python_code
stringlengths
0
1.8M
repo_name
stringclasses
7 values
file_path
stringlengths
5
99
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2011, 2012 Cavium, Inc. */ #include <linux/device.h> #include <linux/gpio/consumer.h> #include <linux/mdio-mux.h> #include <linux/module.h> #include <linux/of_mdio.h> #include <linux/phy.h> #include <linux/platform_device.h> #define DRV_VERSION "1.1" #define DRV_DESCRIPTION "GPIO controlled MDIO bus multiplexer driver" struct mdio_mux_gpio_state { struct gpio_descs *gpios; void *mux_handle; }; static int mdio_mux_gpio_switch_fn(int current_child, int desired_child, void *data) { struct mdio_mux_gpio_state *s = data; DECLARE_BITMAP(values, BITS_PER_TYPE(desired_child)); if (current_child == desired_child) return 0; values[0] = desired_child; gpiod_set_array_value_cansleep(s->gpios->ndescs, s->gpios->desc, s->gpios->info, values); return 0; } static int mdio_mux_gpio_probe(struct platform_device *pdev) { struct mdio_mux_gpio_state *s; struct gpio_descs *gpios; int r; gpios = devm_gpiod_get_array(&pdev->dev, NULL, GPIOD_OUT_LOW); if (IS_ERR(gpios)) return PTR_ERR(gpios); s = devm_kzalloc(&pdev->dev, sizeof(*s), GFP_KERNEL); if (!s) return -ENOMEM; s->gpios = gpios; r = mdio_mux_init(&pdev->dev, pdev->dev.of_node, mdio_mux_gpio_switch_fn, &s->mux_handle, s, NULL); if (r != 0) return r; pdev->dev.platform_data = s; return 0; } static int mdio_mux_gpio_remove(struct platform_device *pdev) { struct mdio_mux_gpio_state *s = dev_get_platdata(&pdev->dev); mdio_mux_uninit(s->mux_handle); return 0; } static const struct of_device_id mdio_mux_gpio_match[] = { { .compatible = "mdio-mux-gpio", }, { /* Legacy compatible property. */ .compatible = "cavium,mdio-mux-sn74cbtlv3253", }, {}, }; MODULE_DEVICE_TABLE(of, mdio_mux_gpio_match); static struct platform_driver mdio_mux_gpio_driver = { .driver = { .name = "mdio-mux-gpio", .of_match_table = mdio_mux_gpio_match, }, .probe = mdio_mux_gpio_probe, .remove = mdio_mux_gpio_remove, }; module_platform_driver(mdio_mux_gpio_driver); MODULE_DESCRIPTION(DRV_DESCRIPTION); MODULE_VERSION(DRV_VERSION); MODULE_AUTHOR("David Daney"); MODULE_LICENSE("GPL v2");
linux-master
drivers/net/mdio/mdio-mux-gpio.c
// SPDX-License-Identifier: GPL-2.0 /* * Simple memory-mapped device MDIO MUX driver * * Author: Timur Tabi <[email protected]> * * Copyright 2012 Freescale Semiconductor, Inc. */ #include <linux/device.h> #include <linux/mdio-mux.h> #include <linux/module.h> #include <linux/of_address.h> #include <linux/of_mdio.h> #include <linux/phy.h> #include <linux/platform_device.h> struct mdio_mux_mmioreg_state { void *mux_handle; phys_addr_t phys; unsigned int iosize; unsigned int mask; }; /* * MDIO multiplexing switch function * * This function is called by the mdio-mux layer when it thinks the mdio bus * multiplexer needs to switch. * * 'current_child' is the current value of the mux register (masked via * s->mask). * * 'desired_child' is the value of the 'reg' property of the target child MDIO * node. * * The first time this function is called, current_child == -1. * * If current_child == desired_child, then the mux is already set to the * correct bus. */ static int mdio_mux_mmioreg_switch_fn(int current_child, int desired_child, void *data) { struct mdio_mux_mmioreg_state *s = data; if (current_child ^ desired_child) { void __iomem *p = ioremap(s->phys, s->iosize); if (!p) return -ENOMEM; switch (s->iosize) { case sizeof(uint8_t): { uint8_t x, y; x = ioread8(p); y = (x & ~s->mask) | desired_child; if (x != y) { iowrite8((x & ~s->mask) | desired_child, p); pr_debug("%s: %02x -> %02x\n", __func__, x, y); } break; } case sizeof(uint16_t): { uint16_t x, y; x = ioread16(p); y = (x & ~s->mask) | desired_child; if (x != y) { iowrite16((x & ~s->mask) | desired_child, p); pr_debug("%s: %04x -> %04x\n", __func__, x, y); } break; } case sizeof(uint32_t): { uint32_t x, y; x = ioread32(p); y = (x & ~s->mask) | desired_child; if (x != y) { iowrite32((x & ~s->mask) | desired_child, p); pr_debug("%s: %08x -> %08x\n", __func__, x, y); } break; } } iounmap(p); } return 0; } static int mdio_mux_mmioreg_probe(struct platform_device *pdev) { struct device_node *np2, *np = pdev->dev.of_node; struct mdio_mux_mmioreg_state *s; struct resource res; const __be32 *iprop; int len, ret; dev_dbg(&pdev->dev, "probing node %pOF\n", np); s = devm_kzalloc(&pdev->dev, sizeof(*s), GFP_KERNEL); if (!s) return -ENOMEM; ret = of_address_to_resource(np, 0, &res); if (ret) { dev_err(&pdev->dev, "could not obtain memory map for node %pOF\n", np); return ret; } s->phys = res.start; s->iosize = resource_size(&res); if (s->iosize != sizeof(uint8_t) && s->iosize != sizeof(uint16_t) && s->iosize != sizeof(uint32_t)) { dev_err(&pdev->dev, "only 8/16/32-bit registers are supported\n"); return -EINVAL; } iprop = of_get_property(np, "mux-mask", &len); if (!iprop || len != sizeof(uint32_t)) { dev_err(&pdev->dev, "missing or invalid mux-mask property\n"); return -ENODEV; } if (be32_to_cpup(iprop) >= BIT(s->iosize * 8)) { dev_err(&pdev->dev, "only 8/16/32-bit registers are supported\n"); return -EINVAL; } s->mask = be32_to_cpup(iprop); /* * Verify that the 'reg' property of each child MDIO bus does not * set any bits outside of the 'mask'. */ for_each_available_child_of_node(np, np2) { u64 reg; if (of_property_read_reg(np2, 0, &reg, NULL)) { dev_err(&pdev->dev, "mdio-mux child node %pOF is " "missing a 'reg' property\n", np2); of_node_put(np2); return -ENODEV; } if ((u32)reg & ~s->mask) { dev_err(&pdev->dev, "mdio-mux child node %pOF has " "a 'reg' value with unmasked bits\n", np2); of_node_put(np2); return -ENODEV; } } ret = mdio_mux_init(&pdev->dev, pdev->dev.of_node, mdio_mux_mmioreg_switch_fn, &s->mux_handle, s, NULL); if (ret) return dev_err_probe(&pdev->dev, ret, "failed to register mdio-mux bus %pOF\n", np); pdev->dev.platform_data = s; return 0; } static int mdio_mux_mmioreg_remove(struct platform_device *pdev) { struct mdio_mux_mmioreg_state *s = dev_get_platdata(&pdev->dev); mdio_mux_uninit(s->mux_handle); return 0; } static const struct of_device_id mdio_mux_mmioreg_match[] = { { .compatible = "mdio-mux-mmioreg", }, {}, }; MODULE_DEVICE_TABLE(of, mdio_mux_mmioreg_match); static struct platform_driver mdio_mux_mmioreg_driver = { .driver = { .name = "mdio-mux-mmioreg", .of_match_table = mdio_mux_mmioreg_match, }, .probe = mdio_mux_mmioreg_probe, .remove = mdio_mux_mmioreg_remove, }; module_platform_driver(mdio_mux_mmioreg_driver); MODULE_AUTHOR("Timur Tabi <[email protected]>"); MODULE_DESCRIPTION("Memory-mapped device MDIO MUX driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/net/mdio/mdio-mux-mmioreg.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2011, 2012 Cavium, Inc. */ #include <linux/device.h> #include <linux/mdio-mux.h> #include <linux/module.h> #include <linux/of_mdio.h> #include <linux/phy.h> #include <linux/platform_device.h> #define DRV_DESCRIPTION "MDIO bus multiplexer driver" struct mdio_mux_child_bus; struct mdio_mux_parent_bus { struct mii_bus *mii_bus; int current_child; int parent_id; void *switch_data; int (*switch_fn)(int current_child, int desired_child, void *data); /* List of our children linked through their next fields. */ struct mdio_mux_child_bus *children; }; struct mdio_mux_child_bus { struct mii_bus *mii_bus; struct mdio_mux_parent_bus *parent; struct mdio_mux_child_bus *next; int bus_number; }; /* * The parent bus' lock is used to order access to the switch_fn. */ static int mdio_mux_read(struct mii_bus *bus, int phy_id, int regnum) { struct mdio_mux_child_bus *cb = bus->priv; struct mdio_mux_parent_bus *pb = cb->parent; int r; mutex_lock_nested(&pb->mii_bus->mdio_lock, MDIO_MUTEX_MUX); r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data); if (r) goto out; pb->current_child = cb->bus_number; r = pb->mii_bus->read(pb->mii_bus, phy_id, regnum); out: mutex_unlock(&pb->mii_bus->mdio_lock); return r; } /* * The parent bus' lock is used to order access to the switch_fn. */ static int mdio_mux_write(struct mii_bus *bus, int phy_id, int regnum, u16 val) { struct mdio_mux_child_bus *cb = bus->priv; struct mdio_mux_parent_bus *pb = cb->parent; int r; mutex_lock_nested(&pb->mii_bus->mdio_lock, MDIO_MUTEX_MUX); r = pb->switch_fn(pb->current_child, cb->bus_number, pb->switch_data); if (r) goto out; pb->current_child = cb->bus_number; r = pb->mii_bus->write(pb->mii_bus, phy_id, regnum, val); out: mutex_unlock(&pb->mii_bus->mdio_lock); return r; } static int parent_count; static void mdio_mux_uninit_children(struct mdio_mux_parent_bus *pb) { struct mdio_mux_child_bus *cb = pb->children; while (cb) { mdiobus_unregister(cb->mii_bus); mdiobus_free(cb->mii_bus); cb = cb->next; } } int mdio_mux_init(struct device *dev, struct device_node *mux_node, int (*switch_fn)(int cur, int desired, void *data), void **mux_handle, void *data, struct mii_bus *mux_bus) { struct device_node *parent_bus_node; struct device_node *child_bus_node; int r, ret_val; struct mii_bus *parent_bus; struct mdio_mux_parent_bus *pb; struct mdio_mux_child_bus *cb; if (!mux_node) return -ENODEV; if (!mux_bus) { parent_bus_node = of_parse_phandle(mux_node, "mdio-parent-bus", 0); if (!parent_bus_node) return -ENODEV; parent_bus = of_mdio_find_bus(parent_bus_node); if (!parent_bus) { ret_val = -EPROBE_DEFER; goto err_parent_bus; } } else { parent_bus_node = NULL; parent_bus = mux_bus; get_device(&parent_bus->dev); } pb = devm_kzalloc(dev, sizeof(*pb), GFP_KERNEL); if (!pb) { ret_val = -ENOMEM; goto err_pb_kz; } pb->switch_data = data; pb->switch_fn = switch_fn; pb->current_child = -1; pb->parent_id = parent_count++; pb->mii_bus = parent_bus; ret_val = -ENODEV; for_each_available_child_of_node(mux_node, child_bus_node) { int v; r = of_property_read_u32(child_bus_node, "reg", &v); if (r) { dev_err(dev, "Error: Failed to find reg for child %pOF\n", child_bus_node); continue; } cb = devm_kzalloc(dev, sizeof(*cb), GFP_KERNEL); if (!cb) { ret_val = -ENOMEM; goto err_loop; } cb->bus_number = v; cb->parent = pb; cb->mii_bus = mdiobus_alloc(); if (!cb->mii_bus) { ret_val = -ENOMEM; goto err_loop; } cb->mii_bus->priv = cb; cb->mii_bus->name = "mdio_mux"; snprintf(cb->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x.%x", cb->mii_bus->name, pb->parent_id, v); cb->mii_bus->parent = dev; cb->mii_bus->read = mdio_mux_read; cb->mii_bus->write = mdio_mux_write; r = of_mdiobus_register(cb->mii_bus, child_bus_node); if (r) { mdiobus_free(cb->mii_bus); if (r == -EPROBE_DEFER) { ret_val = r; goto err_loop; } devm_kfree(dev, cb); dev_err(dev, "Error: Failed to register MDIO bus for child %pOF\n", child_bus_node); } else { cb->next = pb->children; pb->children = cb; } } if (pb->children) { *mux_handle = pb; return 0; } dev_err(dev, "Error: No acceptable child buses found\n"); err_loop: mdio_mux_uninit_children(pb); of_node_put(child_bus_node); err_pb_kz: put_device(&parent_bus->dev); err_parent_bus: of_node_put(parent_bus_node); return ret_val; } EXPORT_SYMBOL_GPL(mdio_mux_init); void mdio_mux_uninit(void *mux_handle) { struct mdio_mux_parent_bus *pb = mux_handle; mdio_mux_uninit_children(pb); put_device(&pb->mii_bus->dev); } EXPORT_SYMBOL_GPL(mdio_mux_uninit); MODULE_DESCRIPTION(DRV_DESCRIPTION); MODULE_AUTHOR("David Daney"); MODULE_LICENSE("GPL v2");
linux-master
drivers/net/mdio/mdio-mux.c
// SPDX-License-Identifier: GPL-2.0+ /* MDIO bus multiplexer using kernel multiplexer subsystem * * Copyright 2019 NXP */ #include <linux/mdio-mux.h> #include <linux/module.h> #include <linux/mux/consumer.h> #include <linux/platform_device.h> struct mdio_mux_multiplexer_state { struct mux_control *muxc; bool do_deselect; void *mux_handle; }; /** * mdio_mux_multiplexer_switch_fn - This function is called by the mdio-mux * layer when it thinks the mdio bus * multiplexer needs to switch. * @current_child: current value of the mux register. * @desired_child: value of the 'reg' property of the target child MDIO node. * @data: Private data used by this switch_fn passed to mdio_mux_init function * via mdio_mux_init(.., .., .., .., data, ..). * * The first time this function is called, current_child == -1. * If current_child == desired_child, then the mux is already set to the * correct bus. */ static int mdio_mux_multiplexer_switch_fn(int current_child, int desired_child, void *data) { struct platform_device *pdev; struct mdio_mux_multiplexer_state *s; int ret = 0; pdev = (struct platform_device *)data; s = platform_get_drvdata(pdev); if (!(current_child ^ desired_child)) return 0; if (s->do_deselect) ret = mux_control_deselect(s->muxc); if (ret) { dev_err(&pdev->dev, "mux_control_deselect failed in %s: %d\n", __func__, ret); return ret; } ret = mux_control_select(s->muxc, desired_child); if (!ret) { dev_dbg(&pdev->dev, "%s %d -> %d\n", __func__, current_child, desired_child); s->do_deselect = true; } else { s->do_deselect = false; } return ret; } static int mdio_mux_multiplexer_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct mdio_mux_multiplexer_state *s; int ret = 0; s = devm_kzalloc(&pdev->dev, sizeof(*s), GFP_KERNEL); if (!s) return -ENOMEM; s->muxc = devm_mux_control_get(dev, NULL); if (IS_ERR(s->muxc)) return dev_err_probe(&pdev->dev, PTR_ERR(s->muxc), "Failed to get mux\n"); platform_set_drvdata(pdev, s); ret = mdio_mux_init(&pdev->dev, pdev->dev.of_node, mdio_mux_multiplexer_switch_fn, &s->mux_handle, pdev, NULL); return ret; } static int mdio_mux_multiplexer_remove(struct platform_device *pdev) { struct mdio_mux_multiplexer_state *s = platform_get_drvdata(pdev); mdio_mux_uninit(s->mux_handle); if (s->do_deselect) mux_control_deselect(s->muxc); return 0; } static const struct of_device_id mdio_mux_multiplexer_match[] = { { .compatible = "mdio-mux-multiplexer", }, {}, }; MODULE_DEVICE_TABLE(of, mdio_mux_multiplexer_match); static struct platform_driver mdio_mux_multiplexer_driver = { .driver = { .name = "mdio-mux-multiplexer", .of_match_table = mdio_mux_multiplexer_match, }, .probe = mdio_mux_multiplexer_probe, .remove = mdio_mux_multiplexer_remove, }; module_platform_driver(mdio_mux_multiplexer_driver); MODULE_DESCRIPTION("MDIO bus multiplexer using kernel multiplexer subsystem"); MODULE_AUTHOR("Pankaj Bansal <[email protected]>"); MODULE_LICENSE("GPL");
linux-master
drivers/net/mdio/mdio-mux-multiplexer.c
// SPDX-License-Identifier: GPL-2.0 /* * GPIO based MDIO bitbang driver. * Supports OpenFirmware. * * Copyright (c) 2008 CSE Semaphore Belgium. * by Laurent Pinchart <[email protected]> * * Copyright (C) 2008, Paulius Zaleckas <[email protected]> * * Based on earlier work by * * Copyright (c) 2003 Intracom S.A. * by Pantelis Antoniou <[email protected]> * * 2005 (c) MontaVista Software, Inc. * Vitaly Bordug <[email protected]> */ #include <linux/gpio/consumer.h> #include <linux/interrupt.h> #include <linux/mdio-bitbang.h> #include <linux/mdio-gpio.h> #include <linux/module.h> #include <linux/of_mdio.h> #include <linux/platform_data/mdio-gpio.h> #include <linux/platform_device.h> #include <linux/slab.h> struct mdio_gpio_info { struct mdiobb_ctrl ctrl; struct gpio_desc *mdc, *mdio, *mdo; }; static int mdio_gpio_get_data(struct device *dev, struct mdio_gpio_info *bitbang) { bitbang->mdc = devm_gpiod_get_index(dev, NULL, MDIO_GPIO_MDC, GPIOD_OUT_LOW); if (IS_ERR(bitbang->mdc)) return PTR_ERR(bitbang->mdc); bitbang->mdio = devm_gpiod_get_index(dev, NULL, MDIO_GPIO_MDIO, GPIOD_IN); if (IS_ERR(bitbang->mdio)) return PTR_ERR(bitbang->mdio); bitbang->mdo = devm_gpiod_get_index_optional(dev, NULL, MDIO_GPIO_MDO, GPIOD_OUT_LOW); return PTR_ERR_OR_ZERO(bitbang->mdo); } static void mdio_dir(struct mdiobb_ctrl *ctrl, int dir) { struct mdio_gpio_info *bitbang = container_of(ctrl, struct mdio_gpio_info, ctrl); if (bitbang->mdo) { /* Separate output pin. Always set its value to high * when changing direction. If direction is input, * assume the pin serves as pull-up. If direction is * output, the default value is high. */ gpiod_set_value_cansleep(bitbang->mdo, 1); return; } if (dir) gpiod_direction_output(bitbang->mdio, 1); else gpiod_direction_input(bitbang->mdio); } static int mdio_get(struct mdiobb_ctrl *ctrl) { struct mdio_gpio_info *bitbang = container_of(ctrl, struct mdio_gpio_info, ctrl); return gpiod_get_value_cansleep(bitbang->mdio); } static void mdio_set(struct mdiobb_ctrl *ctrl, int what) { struct mdio_gpio_info *bitbang = container_of(ctrl, struct mdio_gpio_info, ctrl); if (bitbang->mdo) gpiod_set_value_cansleep(bitbang->mdo, what); else gpiod_set_value_cansleep(bitbang->mdio, what); } static void mdc_set(struct mdiobb_ctrl *ctrl, int what) { struct mdio_gpio_info *bitbang = container_of(ctrl, struct mdio_gpio_info, ctrl); gpiod_set_value_cansleep(bitbang->mdc, what); } static const struct mdiobb_ops mdio_gpio_ops = { .owner = THIS_MODULE, .set_mdc = mdc_set, .set_mdio_dir = mdio_dir, .set_mdio_data = mdio_set, .get_mdio_data = mdio_get, }; static struct mii_bus *mdio_gpio_bus_init(struct device *dev, struct mdio_gpio_info *bitbang, int bus_id) { struct mdio_gpio_platform_data *pdata = dev_get_platdata(dev); struct mii_bus *new_bus; bitbang->ctrl.ops = &mdio_gpio_ops; new_bus = alloc_mdio_bitbang(&bitbang->ctrl); if (!new_bus) return NULL; new_bus->name = "GPIO Bitbanged MDIO"; new_bus->parent = dev; if (bus_id != -1) snprintf(new_bus->id, MII_BUS_ID_SIZE, "gpio-%x", bus_id); else strncpy(new_bus->id, "gpio", MII_BUS_ID_SIZE); if (pdata) { new_bus->phy_mask = pdata->phy_mask; new_bus->phy_ignore_ta_mask = pdata->phy_ignore_ta_mask; } if (dev->of_node && of_device_is_compatible(dev->of_node, "microchip,mdio-smi0")) { bitbang->ctrl.op_c22_read = 0; bitbang->ctrl.op_c22_write = 0; bitbang->ctrl.override_op_c22 = 1; } dev_set_drvdata(dev, new_bus); return new_bus; } static void mdio_gpio_bus_deinit(struct device *dev) { struct mii_bus *bus = dev_get_drvdata(dev); free_mdio_bitbang(bus); } static void mdio_gpio_bus_destroy(struct device *dev) { struct mii_bus *bus = dev_get_drvdata(dev); mdiobus_unregister(bus); mdio_gpio_bus_deinit(dev); } static int mdio_gpio_probe(struct platform_device *pdev) { struct mdio_gpio_info *bitbang; struct mii_bus *new_bus; int ret, bus_id; bitbang = devm_kzalloc(&pdev->dev, sizeof(*bitbang), GFP_KERNEL); if (!bitbang) return -ENOMEM; ret = mdio_gpio_get_data(&pdev->dev, bitbang); if (ret) return ret; if (pdev->dev.of_node) { bus_id = of_alias_get_id(pdev->dev.of_node, "mdio-gpio"); if (bus_id < 0) { dev_warn(&pdev->dev, "failed to get alias id\n"); bus_id = 0; } } else { bus_id = pdev->id; } new_bus = mdio_gpio_bus_init(&pdev->dev, bitbang, bus_id); if (!new_bus) return -ENODEV; ret = of_mdiobus_register(new_bus, pdev->dev.of_node); if (ret) mdio_gpio_bus_deinit(&pdev->dev); return ret; } static int mdio_gpio_remove(struct platform_device *pdev) { mdio_gpio_bus_destroy(&pdev->dev); return 0; } static const struct of_device_id mdio_gpio_of_match[] = { { .compatible = "virtual,mdio-gpio", }, { .compatible = "microchip,mdio-smi0" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, mdio_gpio_of_match); static struct platform_driver mdio_gpio_driver = { .probe = mdio_gpio_probe, .remove = mdio_gpio_remove, .driver = { .name = "mdio-gpio", .of_match_table = mdio_gpio_of_match, }, }; module_platform_driver(mdio_gpio_driver); MODULE_ALIAS("platform:mdio-gpio"); MODULE_AUTHOR("Laurent Pinchart, Paulius Zaleckas"); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("Generic driver for MDIO bus emulation using GPIO");
linux-master
drivers/net/mdio/mdio-gpio.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2009-2015 Cavium, Inc. */ #include <linux/gfp.h> #include <linux/io.h> #include <linux/module.h> #include <linux/of_address.h> #include <linux/of_mdio.h> #include <linux/phy.h> #include <linux/platform_device.h> #include "mdio-cavium.h" static int octeon_mdiobus_probe(struct platform_device *pdev) { struct cavium_mdiobus *bus; struct mii_bus *mii_bus; struct resource *res_mem; resource_size_t mdio_phys; resource_size_t regsize; union cvmx_smix_en smi_en; int err = -ENOENT; mii_bus = devm_mdiobus_alloc_size(&pdev->dev, sizeof(*bus)); if (!mii_bus) return -ENOMEM; res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res_mem == NULL) { dev_err(&pdev->dev, "found no memory resource\n"); return -ENXIO; } bus = mii_bus->priv; bus->mii_bus = mii_bus; mdio_phys = res_mem->start; regsize = resource_size(res_mem); if (!devm_request_mem_region(&pdev->dev, mdio_phys, regsize, res_mem->name)) { dev_err(&pdev->dev, "request_mem_region failed\n"); return -ENXIO; } bus->register_base = devm_ioremap(&pdev->dev, mdio_phys, regsize); if (!bus->register_base) { dev_err(&pdev->dev, "dev_ioremap failed\n"); return -ENOMEM; } smi_en.u64 = 0; smi_en.s.en = 1; oct_mdio_writeq(smi_en.u64, bus->register_base + SMI_EN); bus->mii_bus->name = KBUILD_MODNAME; snprintf(bus->mii_bus->id, MII_BUS_ID_SIZE, "%px", bus->register_base); bus->mii_bus->parent = &pdev->dev; bus->mii_bus->read = cavium_mdiobus_read_c22; bus->mii_bus->write = cavium_mdiobus_write_c22; bus->mii_bus->read_c45 = cavium_mdiobus_read_c45; bus->mii_bus->write_c45 = cavium_mdiobus_write_c45; platform_set_drvdata(pdev, bus); err = of_mdiobus_register(bus->mii_bus, pdev->dev.of_node); if (err) goto fail_register; dev_info(&pdev->dev, "Probed\n"); return 0; fail_register: smi_en.u64 = 0; oct_mdio_writeq(smi_en.u64, bus->register_base + SMI_EN); return err; } static int octeon_mdiobus_remove(struct platform_device *pdev) { struct cavium_mdiobus *bus; union cvmx_smix_en smi_en; bus = platform_get_drvdata(pdev); mdiobus_unregister(bus->mii_bus); smi_en.u64 = 0; oct_mdio_writeq(smi_en.u64, bus->register_base + SMI_EN); return 0; } static const struct of_device_id octeon_mdiobus_match[] = { { .compatible = "cavium,octeon-3860-mdio", }, {}, }; MODULE_DEVICE_TABLE(of, octeon_mdiobus_match); static struct platform_driver octeon_mdiobus_driver = { .driver = { .name = KBUILD_MODNAME, .of_match_table = octeon_mdiobus_match, }, .probe = octeon_mdiobus_probe, .remove = octeon_mdiobus_remove, }; module_platform_driver(octeon_mdiobus_driver); MODULE_DESCRIPTION("Cavium OCTEON MDIO bus driver"); MODULE_AUTHOR("David Daney"); MODULE_LICENSE("GPL v2");
linux-master
drivers/net/mdio/mdio-octeon.c
// SPDX-License-Identifier: GPL-2.0+ /* * Broadcom UniMAC MDIO bus controller driver * * Copyright (C) 2014-2017 Broadcom */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_mdio.h> #include <linux/of_platform.h> #include <linux/phy.h> #include <linux/platform_data/mdio-bcm-unimac.h> #include <linux/platform_device.h> #include <linux/sched.h> #define MDIO_CMD 0x00 #define MDIO_START_BUSY (1 << 29) #define MDIO_READ_FAIL (1 << 28) #define MDIO_RD (2 << 26) #define MDIO_WR (1 << 26) #define MDIO_PMD_SHIFT 21 #define MDIO_PMD_MASK 0x1F #define MDIO_REG_SHIFT 16 #define MDIO_REG_MASK 0x1F #define MDIO_CFG 0x04 #define MDIO_C22 (1 << 0) #define MDIO_C45 0 #define MDIO_CLK_DIV_SHIFT 4 #define MDIO_CLK_DIV_MASK 0x3F #define MDIO_SUPP_PREAMBLE (1 << 12) struct unimac_mdio_priv { struct mii_bus *mii_bus; void __iomem *base; int (*wait_func) (void *wait_func_data); void *wait_func_data; struct clk *clk; u32 clk_freq; }; static inline u32 unimac_mdio_readl(struct unimac_mdio_priv *priv, u32 offset) { /* MIPS chips strapped for BE will automagically configure the * peripheral registers for CPU-native byte order. */ if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) return __raw_readl(priv->base + offset); else return readl_relaxed(priv->base + offset); } static inline void unimac_mdio_writel(struct unimac_mdio_priv *priv, u32 val, u32 offset) { if (IS_ENABLED(CONFIG_MIPS) && IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) __raw_writel(val, priv->base + offset); else writel_relaxed(val, priv->base + offset); } static inline void unimac_mdio_start(struct unimac_mdio_priv *priv) { u32 reg; reg = unimac_mdio_readl(priv, MDIO_CMD); reg |= MDIO_START_BUSY; unimac_mdio_writel(priv, reg, MDIO_CMD); } static inline unsigned int unimac_mdio_busy(struct unimac_mdio_priv *priv) { return unimac_mdio_readl(priv, MDIO_CMD) & MDIO_START_BUSY; } static int unimac_mdio_poll(void *wait_func_data) { struct unimac_mdio_priv *priv = wait_func_data; unsigned int timeout = 1000; do { if (!unimac_mdio_busy(priv)) return 0; usleep_range(1000, 2000); } while (--timeout); return -ETIMEDOUT; } static int unimac_mdio_read(struct mii_bus *bus, int phy_id, int reg) { struct unimac_mdio_priv *priv = bus->priv; int ret; u32 cmd; /* Prepare the read operation */ cmd = MDIO_RD | (phy_id << MDIO_PMD_SHIFT) | (reg << MDIO_REG_SHIFT); unimac_mdio_writel(priv, cmd, MDIO_CMD); /* Start MDIO transaction */ unimac_mdio_start(priv); ret = priv->wait_func(priv->wait_func_data); if (ret) return ret; cmd = unimac_mdio_readl(priv, MDIO_CMD); /* Some broken devices are known not to release the line during * turn-around, e.g: Broadcom BCM53125 external switches, so check for * that condition here and ignore the MDIO controller read failure * indication. */ if (!(bus->phy_ignore_ta_mask & 1 << phy_id) && (cmd & MDIO_READ_FAIL)) return -EIO; return cmd & 0xffff; } static int unimac_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val) { struct unimac_mdio_priv *priv = bus->priv; u32 cmd; /* Prepare the write operation */ cmd = MDIO_WR | (phy_id << MDIO_PMD_SHIFT) | (reg << MDIO_REG_SHIFT) | (0xffff & val); unimac_mdio_writel(priv, cmd, MDIO_CMD); unimac_mdio_start(priv); return priv->wait_func(priv->wait_func_data); } /* Workaround for integrated BCM7xxx Gigabit PHYs which have a problem with * their internal MDIO management controller making them fail to successfully * be read from or written to for the first transaction. We insert a dummy * BMSR read here to make sure that phy_get_device() and get_phy_id() can * correctly read the PHY MII_PHYSID1/2 registers and successfully register a * PHY device for this peripheral. * * Once the PHY driver is registered, we can workaround subsequent reads from * there (e.g: during system-wide power management). * * bus->reset is invoked before mdiobus_scan during mdiobus_register and is * therefore the right location to stick that workaround. Since we do not want * to read from non-existing PHYs, we either use bus->phy_mask or do a manual * Device Tree scan to limit the search area. */ static int unimac_mdio_reset(struct mii_bus *bus) { struct device_node *np = bus->dev.of_node; struct device_node *child; u32 read_mask = 0; int addr; if (!np) { read_mask = ~bus->phy_mask; } else { for_each_available_child_of_node(np, child) { addr = of_mdio_parse_addr(&bus->dev, child); if (addr < 0) continue; read_mask |= 1 << addr; } } for (addr = 0; addr < PHY_MAX_ADDR; addr++) { if (read_mask & 1 << addr) { dev_dbg(&bus->dev, "Workaround for PHY @ %d\n", addr); mdiobus_read(bus, addr, MII_BMSR); } } return 0; } static void unimac_mdio_clk_set(struct unimac_mdio_priv *priv) { unsigned long rate; u32 reg, div; /* Keep the hardware default values */ if (!priv->clk_freq) return; if (!priv->clk) rate = 250000000; else rate = clk_get_rate(priv->clk); div = (rate / (2 * priv->clk_freq)) - 1; if (div & ~MDIO_CLK_DIV_MASK) { pr_warn("Incorrect MDIO clock frequency, ignoring\n"); return; } /* The MDIO clock is the reference clock (typically 250Mhz) divided by * 2 x (MDIO_CLK_DIV + 1) */ reg = unimac_mdio_readl(priv, MDIO_CFG); reg &= ~(MDIO_CLK_DIV_MASK << MDIO_CLK_DIV_SHIFT); reg |= div << MDIO_CLK_DIV_SHIFT; unimac_mdio_writel(priv, reg, MDIO_CFG); } static int unimac_mdio_probe(struct platform_device *pdev) { struct unimac_mdio_pdata *pdata = pdev->dev.platform_data; struct unimac_mdio_priv *priv; struct device_node *np; struct mii_bus *bus; struct resource *r; int ret; np = pdev->dev.of_node; priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; r = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!r) return -EINVAL; /* Just ioremap, as this MDIO block is usually integrated into an * Ethernet MAC controller register range */ priv->base = devm_ioremap(&pdev->dev, r->start, resource_size(r)); if (!priv->base) { dev_err(&pdev->dev, "failed to remap register\n"); return -ENOMEM; } priv->clk = devm_clk_get_optional(&pdev->dev, NULL); if (IS_ERR(priv->clk)) return PTR_ERR(priv->clk); ret = clk_prepare_enable(priv->clk); if (ret) return ret; if (of_property_read_u32(np, "clock-frequency", &priv->clk_freq)) priv->clk_freq = 0; unimac_mdio_clk_set(priv); priv->mii_bus = mdiobus_alloc(); if (!priv->mii_bus) { ret = -ENOMEM; goto out_clk_disable; } bus = priv->mii_bus; bus->priv = priv; if (pdata) { bus->name = pdata->bus_name; priv->wait_func = pdata->wait_func; priv->wait_func_data = pdata->wait_func_data; bus->phy_mask = ~pdata->phy_mask; } else { bus->name = "unimac MII bus"; priv->wait_func_data = priv; priv->wait_func = unimac_mdio_poll; } bus->parent = &pdev->dev; bus->read = unimac_mdio_read; bus->write = unimac_mdio_write; bus->reset = unimac_mdio_reset; snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d", pdev->name, pdev->id); ret = of_mdiobus_register(bus, np); if (ret) { dev_err(&pdev->dev, "MDIO bus registration failed\n"); goto out_mdio_free; } platform_set_drvdata(pdev, priv); dev_info(&pdev->dev, "Broadcom UniMAC MDIO bus\n"); return 0; out_mdio_free: mdiobus_free(bus); out_clk_disable: clk_disable_unprepare(priv->clk); return ret; } static int unimac_mdio_remove(struct platform_device *pdev) { struct unimac_mdio_priv *priv = platform_get_drvdata(pdev); mdiobus_unregister(priv->mii_bus); mdiobus_free(priv->mii_bus); clk_disable_unprepare(priv->clk); return 0; } static int __maybe_unused unimac_mdio_suspend(struct device *d) { struct unimac_mdio_priv *priv = dev_get_drvdata(d); clk_disable_unprepare(priv->clk); return 0; } static int __maybe_unused unimac_mdio_resume(struct device *d) { struct unimac_mdio_priv *priv = dev_get_drvdata(d); int ret; ret = clk_prepare_enable(priv->clk); if (ret) return ret; unimac_mdio_clk_set(priv); return 0; } static SIMPLE_DEV_PM_OPS(unimac_mdio_pm_ops, unimac_mdio_suspend, unimac_mdio_resume); static const struct of_device_id unimac_mdio_ids[] = { { .compatible = "brcm,asp-v2.1-mdio", }, { .compatible = "brcm,asp-v2.0-mdio", }, { .compatible = "brcm,genet-mdio-v5", }, { .compatible = "brcm,genet-mdio-v4", }, { .compatible = "brcm,genet-mdio-v3", }, { .compatible = "brcm,genet-mdio-v2", }, { .compatible = "brcm,genet-mdio-v1", }, { .compatible = "brcm,unimac-mdio", }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, unimac_mdio_ids); static struct platform_driver unimac_mdio_driver = { .driver = { .name = UNIMAC_MDIO_DRV_NAME, .of_match_table = unimac_mdio_ids, .pm = &unimac_mdio_pm_ops, }, .probe = unimac_mdio_probe, .remove = unimac_mdio_remove, }; module_platform_driver(unimac_mdio_driver); MODULE_AUTHOR("Broadcom Corporation"); MODULE_DESCRIPTION("Broadcom UniMAC MDIO bus controller"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:" UNIMAC_MDIO_DRV_NAME);
linux-master
drivers/net/mdio/mdio-bcm-unimac.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2019 Baylibre, SAS. * Author: Jerome Brunet <[email protected]> */ #include <linux/bitfield.h> #include <linux/delay.h> #include <linux/clk.h> #include <linux/clk-provider.h> #include <linux/device.h> #include <linux/io.h> #include <linux/iopoll.h> #include <linux/mdio-mux.h> #include <linux/module.h> #include <linux/phy.h> #include <linux/platform_device.h> #define ETH_PLL_STS 0x40 #define ETH_PLL_CTL0 0x44 #define PLL_CTL0_LOCK_DIG BIT(30) #define PLL_CTL0_RST BIT(29) #define PLL_CTL0_EN BIT(28) #define PLL_CTL0_SEL BIT(23) #define PLL_CTL0_N GENMASK(14, 10) #define PLL_CTL0_M GENMASK(8, 0) #define PLL_LOCK_TIMEOUT 1000000 #define PLL_MUX_NUM_PARENT 2 #define ETH_PLL_CTL1 0x48 #define ETH_PLL_CTL2 0x4c #define ETH_PLL_CTL3 0x50 #define ETH_PLL_CTL4 0x54 #define ETH_PLL_CTL5 0x58 #define ETH_PLL_CTL6 0x5c #define ETH_PLL_CTL7 0x60 #define ETH_PHY_CNTL0 0x80 #define EPHY_G12A_ID 0x33010180 #define ETH_PHY_CNTL1 0x84 #define PHY_CNTL1_ST_MODE GENMASK(2, 0) #define PHY_CNTL1_ST_PHYADD GENMASK(7, 3) #define EPHY_DFLT_ADD 8 #define PHY_CNTL1_MII_MODE GENMASK(15, 14) #define EPHY_MODE_RMII 0x1 #define PHY_CNTL1_CLK_EN BIT(16) #define PHY_CNTL1_CLKFREQ BIT(17) #define PHY_CNTL1_PHY_ENB BIT(18) #define ETH_PHY_CNTL2 0x88 #define PHY_CNTL2_USE_INTERNAL BIT(5) #define PHY_CNTL2_SMI_SRC_MAC BIT(6) #define PHY_CNTL2_RX_CLK_EPHY BIT(9) #define MESON_G12A_MDIO_EXTERNAL_ID 0 #define MESON_G12A_MDIO_INTERNAL_ID 1 struct g12a_mdio_mux { void __iomem *regs; void *mux_handle; struct clk *pll; }; struct g12a_ephy_pll { void __iomem *base; struct clk_hw hw; }; #define g12a_ephy_pll_to_dev(_hw) \ container_of(_hw, struct g12a_ephy_pll, hw) static unsigned long g12a_ephy_pll_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct g12a_ephy_pll *pll = g12a_ephy_pll_to_dev(hw); u32 val, m, n; val = readl(pll->base + ETH_PLL_CTL0); m = FIELD_GET(PLL_CTL0_M, val); n = FIELD_GET(PLL_CTL0_N, val); return parent_rate * m / n; } static int g12a_ephy_pll_enable(struct clk_hw *hw) { struct g12a_ephy_pll *pll = g12a_ephy_pll_to_dev(hw); u32 val = readl(pll->base + ETH_PLL_CTL0); /* Apply both enable an reset */ val |= PLL_CTL0_RST | PLL_CTL0_EN; writel(val, pll->base + ETH_PLL_CTL0); /* Clear the reset to let PLL lock */ val &= ~PLL_CTL0_RST; writel(val, pll->base + ETH_PLL_CTL0); /* Poll on the digital lock instead of the usual analog lock * This is done because bit 31 is unreliable on some SoC. Bit * 31 may indicate that the PLL is not lock even though the clock * is actually running */ return readl_poll_timeout(pll->base + ETH_PLL_CTL0, val, val & PLL_CTL0_LOCK_DIG, 0, PLL_LOCK_TIMEOUT); } static void g12a_ephy_pll_disable(struct clk_hw *hw) { struct g12a_ephy_pll *pll = g12a_ephy_pll_to_dev(hw); u32 val; val = readl(pll->base + ETH_PLL_CTL0); val &= ~PLL_CTL0_EN; val |= PLL_CTL0_RST; writel(val, pll->base + ETH_PLL_CTL0); } static int g12a_ephy_pll_is_enabled(struct clk_hw *hw) { struct g12a_ephy_pll *pll = g12a_ephy_pll_to_dev(hw); unsigned int val; val = readl(pll->base + ETH_PLL_CTL0); return (val & PLL_CTL0_LOCK_DIG) ? 1 : 0; } static int g12a_ephy_pll_init(struct clk_hw *hw) { struct g12a_ephy_pll *pll = g12a_ephy_pll_to_dev(hw); /* Apply PLL HW settings */ writel(0x29c0040a, pll->base + ETH_PLL_CTL0); writel(0x927e0000, pll->base + ETH_PLL_CTL1); writel(0xac5f49e5, pll->base + ETH_PLL_CTL2); writel(0x00000000, pll->base + ETH_PLL_CTL3); writel(0x00000000, pll->base + ETH_PLL_CTL4); writel(0x20200000, pll->base + ETH_PLL_CTL5); writel(0x0000c002, pll->base + ETH_PLL_CTL6); writel(0x00000023, pll->base + ETH_PLL_CTL7); return 0; } static const struct clk_ops g12a_ephy_pll_ops = { .recalc_rate = g12a_ephy_pll_recalc_rate, .is_enabled = g12a_ephy_pll_is_enabled, .enable = g12a_ephy_pll_enable, .disable = g12a_ephy_pll_disable, .init = g12a_ephy_pll_init, }; static int g12a_enable_internal_mdio(struct g12a_mdio_mux *priv) { u32 value; int ret; /* Enable the phy clock */ if (!__clk_is_enabled(priv->pll)) { ret = clk_prepare_enable(priv->pll); if (ret) return ret; } /* Initialize ephy control */ writel(EPHY_G12A_ID, priv->regs + ETH_PHY_CNTL0); /* Make sure we get a 0 -> 1 transition on the enable bit */ value = FIELD_PREP(PHY_CNTL1_ST_MODE, 3) | FIELD_PREP(PHY_CNTL1_ST_PHYADD, EPHY_DFLT_ADD) | FIELD_PREP(PHY_CNTL1_MII_MODE, EPHY_MODE_RMII) | PHY_CNTL1_CLK_EN | PHY_CNTL1_CLKFREQ; writel(value, priv->regs + ETH_PHY_CNTL1); writel(PHY_CNTL2_USE_INTERNAL | PHY_CNTL2_SMI_SRC_MAC | PHY_CNTL2_RX_CLK_EPHY, priv->regs + ETH_PHY_CNTL2); value |= PHY_CNTL1_PHY_ENB; writel(value, priv->regs + ETH_PHY_CNTL1); /* The phy needs a bit of time to power up */ mdelay(10); return 0; } static int g12a_enable_external_mdio(struct g12a_mdio_mux *priv) { /* Reset the mdio bus mux */ writel_relaxed(0x0, priv->regs + ETH_PHY_CNTL2); /* Disable the phy clock if enabled */ if (__clk_is_enabled(priv->pll)) clk_disable_unprepare(priv->pll); return 0; } static int g12a_mdio_switch_fn(int current_child, int desired_child, void *data) { struct g12a_mdio_mux *priv = dev_get_drvdata(data); if (current_child == desired_child) return 0; switch (desired_child) { case MESON_G12A_MDIO_EXTERNAL_ID: return g12a_enable_external_mdio(priv); case MESON_G12A_MDIO_INTERNAL_ID: return g12a_enable_internal_mdio(priv); default: return -EINVAL; } } static const struct of_device_id g12a_mdio_mux_match[] = { { .compatible = "amlogic,g12a-mdio-mux", }, {}, }; MODULE_DEVICE_TABLE(of, g12a_mdio_mux_match); static int g12a_ephy_glue_clk_register(struct device *dev) { struct g12a_mdio_mux *priv = dev_get_drvdata(dev); const char *parent_names[PLL_MUX_NUM_PARENT]; struct clk_init_data init; struct g12a_ephy_pll *pll; struct clk_mux *mux; struct clk *clk; char *name; int i; /* get the mux parents */ for (i = 0; i < PLL_MUX_NUM_PARENT; i++) { char in_name[8]; snprintf(in_name, sizeof(in_name), "clkin%d", i); clk = devm_clk_get(dev, in_name); if (IS_ERR(clk)) return dev_err_probe(dev, PTR_ERR(clk), "Missing clock %s\n", in_name); parent_names[i] = __clk_get_name(clk); } /* create the input mux */ mux = devm_kzalloc(dev, sizeof(*mux), GFP_KERNEL); if (!mux) return -ENOMEM; name = kasprintf(GFP_KERNEL, "%s#mux", dev_name(dev)); if (!name) return -ENOMEM; init.name = name; init.ops = &clk_mux_ro_ops; init.flags = 0; init.parent_names = parent_names; init.num_parents = PLL_MUX_NUM_PARENT; mux->reg = priv->regs + ETH_PLL_CTL0; mux->shift = __ffs(PLL_CTL0_SEL); mux->mask = PLL_CTL0_SEL >> mux->shift; mux->hw.init = &init; clk = devm_clk_register(dev, &mux->hw); kfree(name); if (IS_ERR(clk)) { dev_err(dev, "failed to register input mux\n"); return PTR_ERR(clk); } /* create the pll */ pll = devm_kzalloc(dev, sizeof(*pll), GFP_KERNEL); if (!pll) return -ENOMEM; name = kasprintf(GFP_KERNEL, "%s#pll", dev_name(dev)); if (!name) return -ENOMEM; init.name = name; init.ops = &g12a_ephy_pll_ops; init.flags = 0; parent_names[0] = __clk_get_name(clk); init.parent_names = parent_names; init.num_parents = 1; pll->base = priv->regs; pll->hw.init = &init; clk = devm_clk_register(dev, &pll->hw); kfree(name); if (IS_ERR(clk)) { dev_err(dev, "failed to register input mux\n"); return PTR_ERR(clk); } priv->pll = clk; return 0; } static int g12a_mdio_mux_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct g12a_mdio_mux *priv; struct clk *pclk; int ret; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; platform_set_drvdata(pdev, priv); priv->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->regs)) return PTR_ERR(priv->regs); pclk = devm_clk_get_enabled(dev, "pclk"); if (IS_ERR(pclk)) return dev_err_probe(dev, PTR_ERR(pclk), "failed to get peripheral clock\n"); /* Register PLL in CCF */ ret = g12a_ephy_glue_clk_register(dev); if (ret) return ret; ret = mdio_mux_init(dev, dev->of_node, g12a_mdio_switch_fn, &priv->mux_handle, dev, NULL); if (ret) dev_err_probe(dev, ret, "mdio multiplexer init failed\n"); return ret; } static int g12a_mdio_mux_remove(struct platform_device *pdev) { struct g12a_mdio_mux *priv = platform_get_drvdata(pdev); mdio_mux_uninit(priv->mux_handle); if (__clk_is_enabled(priv->pll)) clk_disable_unprepare(priv->pll); return 0; } static struct platform_driver g12a_mdio_mux_driver = { .probe = g12a_mdio_mux_probe, .remove = g12a_mdio_mux_remove, .driver = { .name = "g12a-mdio_mux", .of_match_table = g12a_mdio_mux_match, }, }; module_platform_driver(g12a_mdio_mux_driver); MODULE_DESCRIPTION("Amlogic G12a MDIO multiplexer driver"); MODULE_AUTHOR("Jerome Brunet <[email protected]>"); MODULE_LICENSE("GPL v2");
linux-master
drivers/net/mdio/mdio-mux-meson-g12a.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright 2016 Broadcom */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/iopoll.h> #include <linux/mdio-mux.h> #include <linux/module.h> #include <linux/of_mdio.h> #include <linux/phy.h> #include <linux/platform_device.h> #define MDIO_RATE_ADJ_EXT_OFFSET 0x000 #define MDIO_RATE_ADJ_INT_OFFSET 0x004 #define MDIO_RATE_ADJ_DIVIDENT_SHIFT 16 #define MDIO_SCAN_CTRL_OFFSET 0x008 #define MDIO_SCAN_CTRL_OVRIDE_EXT_MSTR 28 #define MDIO_PARAM_OFFSET 0x23c #define MDIO_PARAM_MIIM_CYCLE 29 #define MDIO_PARAM_INTERNAL_SEL 25 #define MDIO_PARAM_BUS_ID 22 #define MDIO_PARAM_C45_SEL 21 #define MDIO_PARAM_PHY_ID 16 #define MDIO_PARAM_PHY_DATA 0 #define MDIO_READ_OFFSET 0x240 #define MDIO_READ_DATA_MASK 0xffff #define MDIO_ADDR_OFFSET 0x244 #define MDIO_CTRL_OFFSET 0x248 #define MDIO_CTRL_WRITE_OP 0x1 #define MDIO_CTRL_READ_OP 0x2 #define MDIO_STAT_OFFSET 0x24c #define MDIO_STAT_DONE 1 #define BUS_MAX_ADDR 32 #define EXT_BUS_START_ADDR 16 #define MDIO_REG_ADDR_SPACE_SIZE 0x250 #define MDIO_OPERATING_FREQUENCY 11000000 #define MDIO_RATE_ADJ_DIVIDENT 1 struct iproc_mdiomux_desc { void *mux_handle; void __iomem *base; struct device *dev; struct mii_bus *mii_bus; struct clk *core_clk; }; static void mdio_mux_iproc_config(struct iproc_mdiomux_desc *md) { u32 divisor; u32 val; /* Disable external mdio master access */ val = readl(md->base + MDIO_SCAN_CTRL_OFFSET); val |= BIT(MDIO_SCAN_CTRL_OVRIDE_EXT_MSTR); writel(val, md->base + MDIO_SCAN_CTRL_OFFSET); if (md->core_clk) { /* use rate adjust regs to derive the mdio's operating * frequency from the specified core clock */ divisor = clk_get_rate(md->core_clk) / MDIO_OPERATING_FREQUENCY; divisor = divisor / (MDIO_RATE_ADJ_DIVIDENT + 1); val = divisor; val |= MDIO_RATE_ADJ_DIVIDENT << MDIO_RATE_ADJ_DIVIDENT_SHIFT; writel(val, md->base + MDIO_RATE_ADJ_EXT_OFFSET); writel(val, md->base + MDIO_RATE_ADJ_INT_OFFSET); } } static int iproc_mdio_wait_for_idle(void __iomem *base, bool result) { u32 val; return readl_poll_timeout(base + MDIO_STAT_OFFSET, val, (val & MDIO_STAT_DONE) == result, 2000, 1000000); } /* start_miim_ops- Program and start MDIO transaction over mdio bus. * @base: Base address * @phyid: phyid of the selected bus. * @reg: register offset to be read/written. * @val :0 if read op else value to be written in @reg; * @op: Operation that need to be carried out. * MDIO_CTRL_READ_OP: Read transaction. * MDIO_CTRL_WRITE_OP: Write transaction. * * Return value: Successful Read operation returns read reg values and write * operation returns 0. Failure operation returns negative error code. */ static int start_miim_ops(void __iomem *base, bool c45, u16 phyid, u32 reg, u16 val, u32 op) { u32 param; int ret; writel(0, base + MDIO_CTRL_OFFSET); ret = iproc_mdio_wait_for_idle(base, 0); if (ret) goto err; param = readl(base + MDIO_PARAM_OFFSET); param |= phyid << MDIO_PARAM_PHY_ID; param |= val << MDIO_PARAM_PHY_DATA; if (c45) param |= BIT(MDIO_PARAM_C45_SEL); writel(param, base + MDIO_PARAM_OFFSET); writel(reg, base + MDIO_ADDR_OFFSET); writel(op, base + MDIO_CTRL_OFFSET); ret = iproc_mdio_wait_for_idle(base, 1); if (ret) goto err; if (op == MDIO_CTRL_READ_OP) ret = readl(base + MDIO_READ_OFFSET) & MDIO_READ_DATA_MASK; err: return ret; } static int iproc_mdiomux_read_c22(struct mii_bus *bus, int phyid, int reg) { struct iproc_mdiomux_desc *md = bus->priv; int ret; ret = start_miim_ops(md->base, false, phyid, reg, 0, MDIO_CTRL_READ_OP); if (ret < 0) dev_err(&bus->dev, "mdiomux c22 read operation failed!!!"); return ret; } static int iproc_mdiomux_read_c45(struct mii_bus *bus, int phyid, int devad, int reg) { struct iproc_mdiomux_desc *md = bus->priv; int ret; ret = start_miim_ops(md->base, true, phyid, reg | devad << 16, 0, MDIO_CTRL_READ_OP); if (ret < 0) dev_err(&bus->dev, "mdiomux read c45 operation failed!!!"); return ret; } static int iproc_mdiomux_write_c22(struct mii_bus *bus, int phyid, int reg, u16 val) { struct iproc_mdiomux_desc *md = bus->priv; int ret; /* Write val at reg offset */ ret = start_miim_ops(md->base, false, phyid, reg, val, MDIO_CTRL_WRITE_OP); if (ret < 0) dev_err(&bus->dev, "mdiomux write c22 operation failed!!!"); return ret; } static int iproc_mdiomux_write_c45(struct mii_bus *bus, int phyid, int devad, int reg, u16 val) { struct iproc_mdiomux_desc *md = bus->priv; int ret; /* Write val at reg offset */ ret = start_miim_ops(md->base, true, phyid, reg | devad << 16, val, MDIO_CTRL_WRITE_OP); if (ret < 0) dev_err(&bus->dev, "mdiomux write c45 operation failed!!!"); return ret; } static int mdio_mux_iproc_switch_fn(int current_child, int desired_child, void *data) { struct iproc_mdiomux_desc *md = data; u32 param, bus_id; bool bus_dir; /* select bus and its properties */ bus_dir = (desired_child < EXT_BUS_START_ADDR); bus_id = bus_dir ? desired_child : (desired_child - EXT_BUS_START_ADDR); param = (bus_dir ? 1 : 0) << MDIO_PARAM_INTERNAL_SEL; param |= (bus_id << MDIO_PARAM_BUS_ID); writel(param, md->base + MDIO_PARAM_OFFSET); return 0; } static int mdio_mux_iproc_probe(struct platform_device *pdev) { struct iproc_mdiomux_desc *md; struct mii_bus *bus; struct resource *res; int rc; md = devm_kzalloc(&pdev->dev, sizeof(*md), GFP_KERNEL); if (!md) return -ENOMEM; md->dev = &pdev->dev; md->base = devm_platform_get_and_ioremap_resource(pdev, 0, &res); if (IS_ERR(md->base)) return PTR_ERR(md->base); if (res->start & 0xfff) { /* For backward compatibility in case the * base address is specified with an offset. */ dev_info(&pdev->dev, "fix base address in dt-blob\n"); res->start &= ~0xfff; res->end = res->start + MDIO_REG_ADDR_SPACE_SIZE - 1; } md->mii_bus = devm_mdiobus_alloc(&pdev->dev); if (!md->mii_bus) { dev_err(&pdev->dev, "mdiomux bus alloc failed\n"); return -ENOMEM; } md->core_clk = devm_clk_get(&pdev->dev, NULL); if (md->core_clk == ERR_PTR(-ENOENT) || md->core_clk == ERR_PTR(-EINVAL)) md->core_clk = NULL; else if (IS_ERR(md->core_clk)) return PTR_ERR(md->core_clk); rc = clk_prepare_enable(md->core_clk); if (rc) { dev_err(&pdev->dev, "failed to enable core clk\n"); return rc; } bus = md->mii_bus; bus->priv = md; bus->name = "iProc MDIO mux bus"; snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d", pdev->name, pdev->id); bus->parent = &pdev->dev; bus->read = iproc_mdiomux_read_c22; bus->write = iproc_mdiomux_write_c22; bus->read_c45 = iproc_mdiomux_read_c45; bus->write_c45 = iproc_mdiomux_write_c45; bus->phy_mask = ~0; bus->dev.of_node = pdev->dev.of_node; rc = mdiobus_register(bus); if (rc) { dev_err(&pdev->dev, "mdiomux registration failed\n"); goto out_clk; } platform_set_drvdata(pdev, md); rc = mdio_mux_init(md->dev, md->dev->of_node, mdio_mux_iproc_switch_fn, &md->mux_handle, md, md->mii_bus); if (rc) { dev_info(md->dev, "mdiomux initialization failed\n"); goto out_register; } mdio_mux_iproc_config(md); dev_info(md->dev, "iProc mdiomux registered\n"); return 0; out_register: mdiobus_unregister(bus); out_clk: clk_disable_unprepare(md->core_clk); return rc; } static int mdio_mux_iproc_remove(struct platform_device *pdev) { struct iproc_mdiomux_desc *md = platform_get_drvdata(pdev); mdio_mux_uninit(md->mux_handle); mdiobus_unregister(md->mii_bus); clk_disable_unprepare(md->core_clk); return 0; } #ifdef CONFIG_PM_SLEEP static int mdio_mux_iproc_suspend(struct device *dev) { struct iproc_mdiomux_desc *md = dev_get_drvdata(dev); clk_disable_unprepare(md->core_clk); return 0; } static int mdio_mux_iproc_resume(struct device *dev) { struct iproc_mdiomux_desc *md = dev_get_drvdata(dev); int rc; rc = clk_prepare_enable(md->core_clk); if (rc) { dev_err(md->dev, "failed to enable core clk\n"); return rc; } mdio_mux_iproc_config(md); return 0; } #endif static SIMPLE_DEV_PM_OPS(mdio_mux_iproc_pm_ops, mdio_mux_iproc_suspend, mdio_mux_iproc_resume); static const struct of_device_id mdio_mux_iproc_match[] = { { .compatible = "brcm,mdio-mux-iproc", }, {}, }; MODULE_DEVICE_TABLE(of, mdio_mux_iproc_match); static struct platform_driver mdiomux_iproc_driver = { .driver = { .name = "mdio-mux-iproc", .of_match_table = mdio_mux_iproc_match, .pm = &mdio_mux_iproc_pm_ops, }, .probe = mdio_mux_iproc_probe, .remove = mdio_mux_iproc_remove, }; module_platform_driver(mdiomux_iproc_driver); MODULE_DESCRIPTION("iProc MDIO Mux Bus Driver"); MODULE_AUTHOR("Pramod Kumar <[email protected]>"); MODULE_LICENSE("GPL v2");
linux-master
drivers/net/mdio/mdio-mux-bcm-iproc.c
// SPDX-License-Identifier: GPL-2.0-only /* * fwnode helpers for the MDIO (Ethernet PHY) API * * This file provides helper functions for extracting PHY device information * out of the fwnode and using it to populate an mii_bus. */ #include <linux/acpi.h> #include <linux/fwnode_mdio.h> #include <linux/of.h> #include <linux/phy.h> #include <linux/pse-pd/pse.h> MODULE_AUTHOR("Calvin Johnson <[email protected]>"); MODULE_LICENSE("GPL"); static struct pse_control * fwnode_find_pse_control(struct fwnode_handle *fwnode) { struct pse_control *psec; struct device_node *np; if (!IS_ENABLED(CONFIG_PSE_CONTROLLER)) return NULL; np = to_of_node(fwnode); if (!np) return NULL; psec = of_pse_control_get(np); if (PTR_ERR(psec) == -ENOENT) return NULL; return psec; } static struct mii_timestamper * fwnode_find_mii_timestamper(struct fwnode_handle *fwnode) { struct of_phandle_args arg; int err; if (is_acpi_node(fwnode)) return NULL; err = of_parse_phandle_with_fixed_args(to_of_node(fwnode), "timestamper", 1, 0, &arg); if (err == -ENOENT) return NULL; else if (err) return ERR_PTR(err); if (arg.args_count != 1) return ERR_PTR(-EINVAL); return register_mii_timestamper(arg.np, arg.args[0]); } int fwnode_mdiobus_phy_device_register(struct mii_bus *mdio, struct phy_device *phy, struct fwnode_handle *child, u32 addr) { int rc; rc = fwnode_irq_get(child, 0); /* Don't wait forever if the IRQ provider doesn't become available, * just fall back to poll mode */ if (rc == -EPROBE_DEFER) rc = driver_deferred_probe_check_state(&phy->mdio.dev); if (rc == -EPROBE_DEFER) return rc; if (rc > 0) { phy->irq = rc; mdio->irq[addr] = rc; } else { phy->irq = mdio->irq[addr]; } if (fwnode_property_read_bool(child, "broken-turn-around")) mdio->phy_ignore_ta_mask |= 1 << addr; fwnode_property_read_u32(child, "reset-assert-us", &phy->mdio.reset_assert_delay); fwnode_property_read_u32(child, "reset-deassert-us", &phy->mdio.reset_deassert_delay); /* Associate the fwnode with the device structure so it * can be looked up later */ fwnode_handle_get(child); device_set_node(&phy->mdio.dev, child); /* All data is now stored in the phy struct; * register it */ rc = phy_device_register(phy); if (rc) { device_set_node(&phy->mdio.dev, NULL); fwnode_handle_put(child); return rc; } dev_dbg(&mdio->dev, "registered phy %p fwnode at address %i\n", child, addr); return 0; } EXPORT_SYMBOL(fwnode_mdiobus_phy_device_register); int fwnode_mdiobus_register_phy(struct mii_bus *bus, struct fwnode_handle *child, u32 addr) { struct mii_timestamper *mii_ts = NULL; struct pse_control *psec = NULL; struct phy_device *phy; bool is_c45; u32 phy_id; int rc; psec = fwnode_find_pse_control(child); if (IS_ERR(psec)) return PTR_ERR(psec); mii_ts = fwnode_find_mii_timestamper(child); if (IS_ERR(mii_ts)) { rc = PTR_ERR(mii_ts); goto clean_pse; } is_c45 = fwnode_device_is_compatible(child, "ethernet-phy-ieee802.3-c45"); if (is_c45 || fwnode_get_phy_id(child, &phy_id)) phy = get_phy_device(bus, addr, is_c45); else phy = phy_device_create(bus, addr, phy_id, 0, NULL); if (IS_ERR(phy)) { rc = PTR_ERR(phy); goto clean_mii_ts; } if (is_acpi_node(child)) { phy->irq = bus->irq[addr]; /* Associate the fwnode with the device structure so it * can be looked up later. */ phy->mdio.dev.fwnode = fwnode_handle_get(child); /* All data is now stored in the phy struct, so register it */ rc = phy_device_register(phy); if (rc) { phy->mdio.dev.fwnode = NULL; fwnode_handle_put(child); goto clean_phy; } } else if (is_of_node(child)) { rc = fwnode_mdiobus_phy_device_register(bus, phy, child, addr); if (rc) goto clean_phy; } phy->psec = psec; /* phy->mii_ts may already be defined by the PHY driver. A * mii_timestamper probed via the device tree will still have * precedence. */ if (mii_ts) phy->mii_ts = mii_ts; return 0; clean_phy: phy_device_free(phy); clean_mii_ts: unregister_mii_timestamper(mii_ts); clean_pse: pse_control_put(psec); return rc; } EXPORT_SYMBOL(fwnode_mdiobus_register_phy);
linux-master
drivers/net/mdio/fwnode_mdio.c
// SPDX-License-Identifier: GPL-2.0-or-later /* Copyright (C) 2019 IBM Corp. */ #include <linux/bitfield.h> #include <linux/delay.h> #include <linux/reset.h> #include <linux/iopoll.h> #include <linux/mdio.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_mdio.h> #include <linux/phy.h> #include <linux/platform_device.h> #define DRV_NAME "mdio-aspeed" #define ASPEED_MDIO_CTRL 0x0 #define ASPEED_MDIO_CTRL_FIRE BIT(31) #define ASPEED_MDIO_CTRL_ST BIT(28) #define ASPEED_MDIO_CTRL_ST_C45 0 #define ASPEED_MDIO_CTRL_ST_C22 1 #define ASPEED_MDIO_CTRL_OP GENMASK(27, 26) #define MDIO_C22_OP_WRITE 0b01 #define MDIO_C22_OP_READ 0b10 #define MDIO_C45_OP_ADDR 0b00 #define MDIO_C45_OP_WRITE 0b01 #define MDIO_C45_OP_PREAD 0b10 #define MDIO_C45_OP_READ 0b11 #define ASPEED_MDIO_CTRL_PHYAD GENMASK(25, 21) #define ASPEED_MDIO_CTRL_REGAD GENMASK(20, 16) #define ASPEED_MDIO_CTRL_MIIWDATA GENMASK(15, 0) #define ASPEED_MDIO_DATA 0x4 #define ASPEED_MDIO_DATA_MDC_THRES GENMASK(31, 24) #define ASPEED_MDIO_DATA_MDIO_EDGE BIT(23) #define ASPEED_MDIO_DATA_MDIO_LATCH GENMASK(22, 20) #define ASPEED_MDIO_DATA_IDLE BIT(16) #define ASPEED_MDIO_DATA_MIIRDATA GENMASK(15, 0) #define ASPEED_MDIO_INTERVAL_US 100 #define ASPEED_MDIO_TIMEOUT_US (ASPEED_MDIO_INTERVAL_US * 10) struct aspeed_mdio { void __iomem *base; struct reset_control *reset; }; static int aspeed_mdio_op(struct mii_bus *bus, u8 st, u8 op, u8 phyad, u8 regad, u16 data) { struct aspeed_mdio *ctx = bus->priv; u32 ctrl; dev_dbg(&bus->dev, "%s: st: %u op: %u, phyad: %u, regad: %u, data: %u\n", __func__, st, op, phyad, regad, data); ctrl = ASPEED_MDIO_CTRL_FIRE | FIELD_PREP(ASPEED_MDIO_CTRL_ST, st) | FIELD_PREP(ASPEED_MDIO_CTRL_OP, op) | FIELD_PREP(ASPEED_MDIO_CTRL_PHYAD, phyad) | FIELD_PREP(ASPEED_MDIO_CTRL_REGAD, regad) | FIELD_PREP(ASPEED_MDIO_DATA_MIIRDATA, data); iowrite32(ctrl, ctx->base + ASPEED_MDIO_CTRL); return readl_poll_timeout(ctx->base + ASPEED_MDIO_CTRL, ctrl, !(ctrl & ASPEED_MDIO_CTRL_FIRE), ASPEED_MDIO_INTERVAL_US, ASPEED_MDIO_TIMEOUT_US); } static int aspeed_mdio_get_data(struct mii_bus *bus) { struct aspeed_mdio *ctx = bus->priv; u32 data; int rc; rc = readl_poll_timeout(ctx->base + ASPEED_MDIO_DATA, data, data & ASPEED_MDIO_DATA_IDLE, ASPEED_MDIO_INTERVAL_US, ASPEED_MDIO_TIMEOUT_US); if (rc < 0) return rc; return FIELD_GET(ASPEED_MDIO_DATA_MIIRDATA, data); } static int aspeed_mdio_read_c22(struct mii_bus *bus, int addr, int regnum) { int rc; rc = aspeed_mdio_op(bus, ASPEED_MDIO_CTRL_ST_C22, MDIO_C22_OP_READ, addr, regnum, 0); if (rc < 0) return rc; return aspeed_mdio_get_data(bus); } static int aspeed_mdio_write_c22(struct mii_bus *bus, int addr, int regnum, u16 val) { return aspeed_mdio_op(bus, ASPEED_MDIO_CTRL_ST_C22, MDIO_C22_OP_WRITE, addr, regnum, val); } static int aspeed_mdio_read_c45(struct mii_bus *bus, int addr, int devad, int regnum) { int rc; rc = aspeed_mdio_op(bus, ASPEED_MDIO_CTRL_ST_C45, MDIO_C45_OP_ADDR, addr, devad, regnum); if (rc < 0) return rc; rc = aspeed_mdio_op(bus, ASPEED_MDIO_CTRL_ST_C45, MDIO_C45_OP_READ, addr, devad, 0); if (rc < 0) return rc; return aspeed_mdio_get_data(bus); } static int aspeed_mdio_write_c45(struct mii_bus *bus, int addr, int devad, int regnum, u16 val) { int rc; rc = aspeed_mdio_op(bus, ASPEED_MDIO_CTRL_ST_C45, MDIO_C45_OP_ADDR, addr, devad, regnum); if (rc < 0) return rc; return aspeed_mdio_op(bus, ASPEED_MDIO_CTRL_ST_C45, MDIO_C45_OP_WRITE, addr, devad, val); } static int aspeed_mdio_probe(struct platform_device *pdev) { struct aspeed_mdio *ctx; struct mii_bus *bus; int rc; bus = devm_mdiobus_alloc_size(&pdev->dev, sizeof(*ctx)); if (!bus) return -ENOMEM; ctx = bus->priv; ctx->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(ctx->base)) return PTR_ERR(ctx->base); ctx->reset = devm_reset_control_get_optional_shared(&pdev->dev, NULL); if (IS_ERR(ctx->reset)) return PTR_ERR(ctx->reset); reset_control_deassert(ctx->reset); bus->name = DRV_NAME; snprintf(bus->id, MII_BUS_ID_SIZE, "%s%d", pdev->name, pdev->id); bus->parent = &pdev->dev; bus->read = aspeed_mdio_read_c22; bus->write = aspeed_mdio_write_c22; bus->read_c45 = aspeed_mdio_read_c45; bus->write_c45 = aspeed_mdio_write_c45; rc = of_mdiobus_register(bus, pdev->dev.of_node); if (rc) { dev_err(&pdev->dev, "Cannot register MDIO bus!\n"); reset_control_assert(ctx->reset); return rc; } platform_set_drvdata(pdev, bus); return 0; } static int aspeed_mdio_remove(struct platform_device *pdev) { struct mii_bus *bus = (struct mii_bus *)platform_get_drvdata(pdev); struct aspeed_mdio *ctx = bus->priv; reset_control_assert(ctx->reset); mdiobus_unregister(bus); return 0; } static const struct of_device_id aspeed_mdio_of_match[] = { { .compatible = "aspeed,ast2600-mdio", }, { }, }; MODULE_DEVICE_TABLE(of, aspeed_mdio_of_match); static struct platform_driver aspeed_mdio_driver = { .driver = { .name = DRV_NAME, .of_match_table = aspeed_mdio_of_match, }, .probe = aspeed_mdio_probe, .remove = aspeed_mdio_remove, }; module_platform_driver(aspeed_mdio_driver); MODULE_AUTHOR("Andrew Jeffery <[email protected]>"); MODULE_LICENSE("GPL");
linux-master
drivers/net/mdio/mdio-aspeed.c
// SPDX-License-Identifier: GPL-2.0-only /* * OF helpers for the MDIO (Ethernet PHY) API * * Copyright (c) 2009 Secret Lab Technologies, Ltd. * * This file provides helper functions for extracting PHY device information * out of the OpenFirmware device tree and using it to populate an mii_bus. */ #include <linux/device.h> #include <linux/err.h> #include <linux/fwnode_mdio.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/of.h> #include <linux/of_irq.h> #include <linux/of_mdio.h> #include <linux/of_net.h> #include <linux/phy.h> #include <linux/phy_fixed.h> #define DEFAULT_GPIO_RESET_DELAY 10 /* in microseconds */ MODULE_AUTHOR("Grant Likely <[email protected]>"); MODULE_LICENSE("GPL"); /* Extract the clause 22 phy ID from the compatible string of the form * ethernet-phy-idAAAA.BBBB */ static int of_get_phy_id(struct device_node *device, u32 *phy_id) { return fwnode_get_phy_id(of_fwnode_handle(device), phy_id); } int of_mdiobus_phy_device_register(struct mii_bus *mdio, struct phy_device *phy, struct device_node *child, u32 addr) { return fwnode_mdiobus_phy_device_register(mdio, phy, of_fwnode_handle(child), addr); } EXPORT_SYMBOL(of_mdiobus_phy_device_register); static int of_mdiobus_register_phy(struct mii_bus *mdio, struct device_node *child, u32 addr) { return fwnode_mdiobus_register_phy(mdio, of_fwnode_handle(child), addr); } static int of_mdiobus_register_device(struct mii_bus *mdio, struct device_node *child, u32 addr) { struct fwnode_handle *fwnode = of_fwnode_handle(child); struct mdio_device *mdiodev; int rc; mdiodev = mdio_device_create(mdio, addr); if (IS_ERR(mdiodev)) return PTR_ERR(mdiodev); /* Associate the OF node with the device structure so it * can be looked up later. */ fwnode_handle_get(fwnode); device_set_node(&mdiodev->dev, fwnode); /* All data is now stored in the mdiodev struct; register it. */ rc = mdio_device_register(mdiodev); if (rc) { device_set_node(&mdiodev->dev, NULL); fwnode_handle_put(fwnode); mdio_device_free(mdiodev); return rc; } dev_dbg(&mdio->dev, "registered mdio device %pOFn at address %i\n", child, addr); return 0; } /* The following is a list of PHY compatible strings which appear in * some DTBs. The compatible string is never matched against a PHY * driver, so is pointless. We only expect devices which are not PHYs * to have a compatible string, so they can be matched to an MDIO * driver. Encourage users to upgrade their DT blobs to remove these. */ static const struct of_device_id whitelist_phys[] = { { .compatible = "brcm,40nm-ephy" }, { .compatible = "broadcom,bcm5241" }, { .compatible = "marvell,88E1111", }, { .compatible = "marvell,88e1116", }, { .compatible = "marvell,88e1118", }, { .compatible = "marvell,88e1145", }, { .compatible = "marvell,88e1149r", }, { .compatible = "marvell,88e1310", }, { .compatible = "marvell,88E1510", }, { .compatible = "marvell,88E1514", }, { .compatible = "moxa,moxart-rtl8201cp", }, {} }; /* * Return true if the child node is for a phy. It must either: * o Compatible string of "ethernet-phy-idX.X" * o Compatible string of "ethernet-phy-ieee802.3-c45" * o Compatible string of "ethernet-phy-ieee802.3-c22" * o In the white list above (and issue a warning) * o No compatibility string * * A device which is not a phy is expected to have a compatible string * indicating what sort of device it is. */ bool of_mdiobus_child_is_phy(struct device_node *child) { u32 phy_id; if (of_get_phy_id(child, &phy_id) != -EINVAL) return true; if (of_device_is_compatible(child, "ethernet-phy-ieee802.3-c45")) return true; if (of_device_is_compatible(child, "ethernet-phy-ieee802.3-c22")) return true; if (of_match_node(whitelist_phys, child)) { pr_warn(FW_WARN "%pOF: Whitelisted compatible string. Please remove\n", child); return true; } if (!of_property_present(child, "compatible")) return true; return false; } EXPORT_SYMBOL(of_mdiobus_child_is_phy); /** * __of_mdiobus_register - Register mii_bus and create PHYs from the device tree * @mdio: pointer to mii_bus structure * @np: pointer to device_node of MDIO bus. * @owner: module owning the @mdio object. * * This function registers the mii_bus structure and registers a phy_device * for each child node of @np. */ int __of_mdiobus_register(struct mii_bus *mdio, struct device_node *np, struct module *owner) { struct device_node *child; bool scanphys = false; int addr, rc; if (!np) return __mdiobus_register(mdio, owner); /* Do not continue if the node is disabled */ if (!of_device_is_available(np)) return -ENODEV; /* Mask out all PHYs from auto probing. Instead the PHYs listed in * the device tree are populated after the bus has been registered */ mdio->phy_mask = ~0; device_set_node(&mdio->dev, of_fwnode_handle(np)); /* Get bus level PHY reset GPIO details */ mdio->reset_delay_us = DEFAULT_GPIO_RESET_DELAY; of_property_read_u32(np, "reset-delay-us", &mdio->reset_delay_us); mdio->reset_post_delay_us = 0; of_property_read_u32(np, "reset-post-delay-us", &mdio->reset_post_delay_us); /* Register the MDIO bus */ rc = __mdiobus_register(mdio, owner); if (rc) return rc; /* Loop over the child nodes and register a phy_device for each phy */ for_each_available_child_of_node(np, child) { addr = of_mdio_parse_addr(&mdio->dev, child); if (addr < 0) { scanphys = true; continue; } if (of_mdiobus_child_is_phy(child)) rc = of_mdiobus_register_phy(mdio, child, addr); else rc = of_mdiobus_register_device(mdio, child, addr); if (rc == -ENODEV) dev_err(&mdio->dev, "MDIO device at address %d is missing.\n", addr); else if (rc) goto unregister; } if (!scanphys) return 0; /* auto scan for PHYs with empty reg property */ for_each_available_child_of_node(np, child) { /* Skip PHYs with reg property set */ if (of_property_present(child, "reg")) continue; for (addr = 0; addr < PHY_MAX_ADDR; addr++) { /* skip already registered PHYs */ if (mdiobus_is_registered_device(mdio, addr)) continue; /* be noisy to encourage people to set reg property */ dev_info(&mdio->dev, "scan phy %pOFn at address %i\n", child, addr); if (of_mdiobus_child_is_phy(child)) { /* -ENODEV is the return code that PHYLIB has * standardized on to indicate that bus * scanning should continue. */ rc = of_mdiobus_register_phy(mdio, child, addr); if (!rc) break; if (rc != -ENODEV) goto unregister; } } } return 0; unregister: of_node_put(child); mdiobus_unregister(mdio); return rc; } EXPORT_SYMBOL(__of_mdiobus_register); /** * of_mdio_find_device - Given a device tree node, find the mdio_device * @np: pointer to the mdio_device's device tree node * * If successful, returns a pointer to the mdio_device with the embedded * struct device refcount incremented by one, or NULL on failure. * The caller should call put_device() on the mdio_device after its use */ struct mdio_device *of_mdio_find_device(struct device_node *np) { return fwnode_mdio_find_device(of_fwnode_handle(np)); } EXPORT_SYMBOL(of_mdio_find_device); /** * of_phy_find_device - Give a PHY node, find the phy_device * @phy_np: Pointer to the phy's device tree node * * If successful, returns a pointer to the phy_device with the embedded * struct device refcount incremented by one, or NULL on failure. */ struct phy_device *of_phy_find_device(struct device_node *phy_np) { return fwnode_phy_find_device(of_fwnode_handle(phy_np)); } EXPORT_SYMBOL(of_phy_find_device); /** * of_phy_connect - Connect to the phy described in the device tree * @dev: pointer to net_device claiming the phy * @phy_np: Pointer to device tree node for the PHY * @hndlr: Link state callback for the network device * @flags: flags to pass to the PHY * @iface: PHY data interface type * * If successful, returns a pointer to the phy_device with the embedded * struct device refcount incremented by one, or NULL on failure. The * refcount must be dropped by calling phy_disconnect() or phy_detach(). */ struct phy_device *of_phy_connect(struct net_device *dev, struct device_node *phy_np, void (*hndlr)(struct net_device *), u32 flags, phy_interface_t iface) { struct phy_device *phy = of_phy_find_device(phy_np); int ret; if (!phy) return NULL; phy->dev_flags |= flags; ret = phy_connect_direct(dev, phy, hndlr, iface); /* refcount is held by phy_connect_direct() on success */ put_device(&phy->mdio.dev); return ret ? NULL : phy; } EXPORT_SYMBOL(of_phy_connect); /** * of_phy_get_and_connect * - Get phy node and connect to the phy described in the device tree * @dev: pointer to net_device claiming the phy * @np: Pointer to device tree node for the net_device claiming the phy * @hndlr: Link state callback for the network device * * If successful, returns a pointer to the phy_device with the embedded * struct device refcount incremented by one, or NULL on failure. The * refcount must be dropped by calling phy_disconnect() or phy_detach(). */ struct phy_device *of_phy_get_and_connect(struct net_device *dev, struct device_node *np, void (*hndlr)(struct net_device *)) { phy_interface_t iface; struct device_node *phy_np; struct phy_device *phy; int ret; ret = of_get_phy_mode(np, &iface); if (ret) return NULL; if (of_phy_is_fixed_link(np)) { ret = of_phy_register_fixed_link(np); if (ret < 0) { netdev_err(dev, "broken fixed-link specification\n"); return NULL; } phy_np = of_node_get(np); } else { phy_np = of_parse_phandle(np, "phy-handle", 0); if (!phy_np) return NULL; } phy = of_phy_connect(dev, phy_np, hndlr, 0, iface); of_node_put(phy_np); return phy; } EXPORT_SYMBOL(of_phy_get_and_connect); /* * of_phy_is_fixed_link() and of_phy_register_fixed_link() must * support two DT bindings: * - the old DT binding, where 'fixed-link' was a property with 5 * cells encoding various information about the fixed PHY * - the new DT binding, where 'fixed-link' is a sub-node of the * Ethernet device. */ bool of_phy_is_fixed_link(struct device_node *np) { struct device_node *dn; int len, err; const char *managed; /* New binding */ dn = of_get_child_by_name(np, "fixed-link"); if (dn) { of_node_put(dn); return true; } err = of_property_read_string(np, "managed", &managed); if (err == 0 && strcmp(managed, "auto") != 0) return true; /* Old binding */ if (of_get_property(np, "fixed-link", &len) && len == (5 * sizeof(__be32))) return true; return false; } EXPORT_SYMBOL(of_phy_is_fixed_link); int of_phy_register_fixed_link(struct device_node *np) { struct fixed_phy_status status = {}; struct device_node *fixed_link_node; u32 fixed_link_prop[5]; const char *managed; if (of_property_read_string(np, "managed", &managed) == 0 && strcmp(managed, "in-band-status") == 0) { /* status is zeroed, namely its .link member */ goto register_phy; } /* New binding */ fixed_link_node = of_get_child_by_name(np, "fixed-link"); if (fixed_link_node) { status.link = 1; status.duplex = of_property_read_bool(fixed_link_node, "full-duplex"); if (of_property_read_u32(fixed_link_node, "speed", &status.speed)) { of_node_put(fixed_link_node); return -EINVAL; } status.pause = of_property_read_bool(fixed_link_node, "pause"); status.asym_pause = of_property_read_bool(fixed_link_node, "asym-pause"); of_node_put(fixed_link_node); goto register_phy; } /* Old binding */ if (of_property_read_u32_array(np, "fixed-link", fixed_link_prop, ARRAY_SIZE(fixed_link_prop)) == 0) { status.link = 1; status.duplex = fixed_link_prop[1]; status.speed = fixed_link_prop[2]; status.pause = fixed_link_prop[3]; status.asym_pause = fixed_link_prop[4]; goto register_phy; } return -ENODEV; register_phy: return PTR_ERR_OR_ZERO(fixed_phy_register(PHY_POLL, &status, np)); } EXPORT_SYMBOL(of_phy_register_fixed_link); void of_phy_deregister_fixed_link(struct device_node *np) { struct phy_device *phydev; phydev = of_phy_find_device(np); if (!phydev) return; fixed_phy_unregister(phydev); put_device(&phydev->mdio.dev); /* of_phy_find_device() */ phy_device_free(phydev); /* fixed_phy_register() */ } EXPORT_SYMBOL(of_phy_deregister_fixed_link);
linux-master
drivers/net/mdio/of_mdio.c
// SPDX-License-Identifier: GPL-2.0 /* MOXA ART Ethernet (RTL8201CP) MDIO interface driver * * Copyright (C) 2013 Jonas Jensen <[email protected]> */ #include <linux/delay.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/of_address.h> #include <linux/of_mdio.h> #include <linux/phy.h> #include <linux/platform_device.h> #define REG_PHY_CTRL 0 #define REG_PHY_WRITE_DATA 4 /* REG_PHY_CTRL */ #define MIIWR BIT(27) /* init write sequence (auto cleared)*/ #define MIIRD BIT(26) #define REGAD_MASK 0x3e00000 #define PHYAD_MASK 0x1f0000 #define MIIRDATA_MASK 0xffff /* REG_PHY_WRITE_DATA */ #define MIIWDATA_MASK 0xffff struct moxart_mdio_data { void __iomem *base; }; static int moxart_mdio_read(struct mii_bus *bus, int mii_id, int regnum) { struct moxart_mdio_data *data = bus->priv; u32 ctrl = 0; unsigned int count = 5; dev_dbg(&bus->dev, "%s\n", __func__); ctrl |= MIIRD | ((mii_id << 16) & PHYAD_MASK) | ((regnum << 21) & REGAD_MASK); writel(ctrl, data->base + REG_PHY_CTRL); do { ctrl = readl(data->base + REG_PHY_CTRL); if (!(ctrl & MIIRD)) return ctrl & MIIRDATA_MASK; mdelay(10); count--; } while (count > 0); dev_dbg(&bus->dev, "%s timed out\n", __func__); return -ETIMEDOUT; } static int moxart_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value) { struct moxart_mdio_data *data = bus->priv; u32 ctrl = 0; unsigned int count = 5; dev_dbg(&bus->dev, "%s\n", __func__); ctrl |= MIIWR | ((mii_id << 16) & PHYAD_MASK) | ((regnum << 21) & REGAD_MASK); value &= MIIWDATA_MASK; writel(value, data->base + REG_PHY_WRITE_DATA); writel(ctrl, data->base + REG_PHY_CTRL); do { ctrl = readl(data->base + REG_PHY_CTRL); if (!(ctrl & MIIWR)) return 0; mdelay(10); count--; } while (count > 0); dev_dbg(&bus->dev, "%s timed out\n", __func__); return -ETIMEDOUT; } static int moxart_mdio_reset(struct mii_bus *bus) { int data, i; for (i = 0; i < PHY_MAX_ADDR; i++) { data = moxart_mdio_read(bus, i, MII_BMCR); if (data < 0) continue; data |= BMCR_RESET; if (moxart_mdio_write(bus, i, MII_BMCR, data) < 0) continue; } return 0; } static int moxart_mdio_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct mii_bus *bus; struct moxart_mdio_data *data; int ret, i; bus = mdiobus_alloc_size(sizeof(*data)); if (!bus) return -ENOMEM; bus->name = "MOXA ART Ethernet MII"; bus->read = &moxart_mdio_read; bus->write = &moxart_mdio_write; bus->reset = &moxart_mdio_reset; snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d-mii", pdev->name, pdev->id); bus->parent = &pdev->dev; /* Setting PHY_MAC_INTERRUPT here even if it has no effect, * of_mdiobus_register() sets these PHY_POLL. * Ideally, the interrupt from MAC controller could be used to * detect link state changes, not polling, i.e. if there was * a way phy_driver could set PHY_HAS_INTERRUPT but have that * interrupt handled in ethernet drivercode. */ for (i = 0; i < PHY_MAX_ADDR; i++) bus->irq[i] = PHY_MAC_INTERRUPT; data = bus->priv; data->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(data->base)) { ret = PTR_ERR(data->base); goto err_out_free_mdiobus; } ret = of_mdiobus_register(bus, np); if (ret < 0) goto err_out_free_mdiobus; platform_set_drvdata(pdev, bus); return 0; err_out_free_mdiobus: mdiobus_free(bus); return ret; } static int moxart_mdio_remove(struct platform_device *pdev) { struct mii_bus *bus = platform_get_drvdata(pdev); mdiobus_unregister(bus); mdiobus_free(bus); return 0; } static const struct of_device_id moxart_mdio_dt_ids[] = { { .compatible = "moxa,moxart-mdio" }, { } }; MODULE_DEVICE_TABLE(of, moxart_mdio_dt_ids); static struct platform_driver moxart_mdio_driver = { .probe = moxart_mdio_probe, .remove = moxart_mdio_remove, .driver = { .name = "moxart-mdio", .of_match_table = moxart_mdio_dt_ids, }, }; module_platform_driver(moxart_mdio_driver); MODULE_DESCRIPTION("MOXA ART MDIO interface driver"); MODULE_AUTHOR("Jonas Jensen <[email protected]>"); MODULE_LICENSE("GPL v2");
linux-master
drivers/net/mdio/mdio-moxart.c
// SPDX-License-Identifier: GPL-2.0 /* Copyright (c) 2022 Baylibre, SAS. * Author: Jerome Brunet <[email protected]> */ #include <linux/bitfield.h> #include <linux/delay.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/mdio-mux.h> #include <linux/module.h> #include <linux/platform_device.h> #define ETH_REG2 0x0 #define REG2_PHYID GENMASK(21, 0) #define EPHY_GXL_ID 0x110181 #define REG2_LEDACT GENMASK(23, 22) #define REG2_LEDLINK GENMASK(25, 24) #define REG2_DIV4SEL BIT(27) #define REG2_ADCBYPASS BIT(30) #define REG2_CLKINSEL BIT(31) #define ETH_REG3 0x4 #define REG3_ENH BIT(3) #define REG3_CFGMODE GENMASK(6, 4) #define REG3_AUTOMDIX BIT(7) #define REG3_PHYADDR GENMASK(12, 8) #define REG3_PWRUPRST BIT(21) #define REG3_PWRDOWN BIT(22) #define REG3_LEDPOL BIT(23) #define REG3_PHYMDI BIT(26) #define REG3_CLKINEN BIT(29) #define REG3_PHYIP BIT(30) #define REG3_PHYEN BIT(31) #define ETH_REG4 0x8 #define REG4_PWRUPRSTSIG BIT(0) #define MESON_GXL_MDIO_EXTERNAL_ID 0 #define MESON_GXL_MDIO_INTERNAL_ID 1 struct gxl_mdio_mux { void __iomem *regs; void *mux_handle; }; static void gxl_enable_internal_mdio(struct gxl_mdio_mux *priv) { u32 val; /* Setup the internal phy */ val = (REG3_ENH | FIELD_PREP(REG3_CFGMODE, 0x7) | REG3_AUTOMDIX | FIELD_PREP(REG3_PHYADDR, 8) | REG3_LEDPOL | REG3_PHYMDI | REG3_CLKINEN | REG3_PHYIP); writel(REG4_PWRUPRSTSIG, priv->regs + ETH_REG4); writel(val, priv->regs + ETH_REG3); mdelay(10); /* NOTE: The HW kept the phy id configurable at runtime. * The id below is arbitrary. It is the one used in the vendor code. * The only constraint is that it must match the one in * drivers/net/phy/meson-gxl.c to properly match the PHY. */ writel(FIELD_PREP(REG2_PHYID, EPHY_GXL_ID), priv->regs + ETH_REG2); /* Enable the internal phy */ val |= REG3_PHYEN; writel(val, priv->regs + ETH_REG3); writel(0, priv->regs + ETH_REG4); /* The phy needs a bit of time to power up */ mdelay(10); } static void gxl_enable_external_mdio(struct gxl_mdio_mux *priv) { /* Reset the mdio bus mux to the external phy */ writel(0, priv->regs + ETH_REG3); } static int gxl_mdio_switch_fn(int current_child, int desired_child, void *data) { struct gxl_mdio_mux *priv = dev_get_drvdata(data); if (current_child == desired_child) return 0; switch (desired_child) { case MESON_GXL_MDIO_EXTERNAL_ID: gxl_enable_external_mdio(priv); break; case MESON_GXL_MDIO_INTERNAL_ID: gxl_enable_internal_mdio(priv); break; default: return -EINVAL; } return 0; } static const struct of_device_id gxl_mdio_mux_match[] = { { .compatible = "amlogic,gxl-mdio-mux", }, {}, }; MODULE_DEVICE_TABLE(of, gxl_mdio_mux_match); static int gxl_mdio_mux_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct gxl_mdio_mux *priv; struct clk *rclk; int ret; priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; platform_set_drvdata(pdev, priv); priv->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->regs)) return PTR_ERR(priv->regs); rclk = devm_clk_get_enabled(dev, "ref"); if (IS_ERR(rclk)) return dev_err_probe(dev, PTR_ERR(rclk), "failed to get reference clock\n"); ret = mdio_mux_init(dev, dev->of_node, gxl_mdio_switch_fn, &priv->mux_handle, dev, NULL); if (ret) dev_err_probe(dev, ret, "mdio multiplexer init failed\n"); return ret; } static int gxl_mdio_mux_remove(struct platform_device *pdev) { struct gxl_mdio_mux *priv = platform_get_drvdata(pdev); mdio_mux_uninit(priv->mux_handle); return 0; } static struct platform_driver gxl_mdio_mux_driver = { .probe = gxl_mdio_mux_probe, .remove = gxl_mdio_mux_remove, .driver = { .name = "gxl-mdio-mux", .of_match_table = gxl_mdio_mux_match, }, }; module_platform_driver(gxl_mdio_mux_driver); MODULE_DESCRIPTION("Amlogic GXL MDIO multiplexer driver"); MODULE_AUTHOR("Jerome Brunet <[email protected]>"); MODULE_LICENSE("GPL");
linux-master
drivers/net/mdio/mdio-mux-meson-gxl.c
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause /* Copyright (c) 2015, The Linux Foundation. All rights reserved. */ /* Copyright (c) 2020 Sartura Ltd. */ #include <linux/delay.h> #include <linux/io.h> #include <linux/iopoll.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of_address.h> #include <linux/of_mdio.h> #include <linux/phy.h> #include <linux/platform_device.h> #include <linux/clk.h> #define MDIO_MODE_REG 0x40 #define MDIO_ADDR_REG 0x44 #define MDIO_DATA_WRITE_REG 0x48 #define MDIO_DATA_READ_REG 0x4c #define MDIO_CMD_REG 0x50 #define MDIO_CMD_ACCESS_BUSY BIT(16) #define MDIO_CMD_ACCESS_START BIT(8) #define MDIO_CMD_ACCESS_CODE_READ 0 #define MDIO_CMD_ACCESS_CODE_WRITE 1 #define MDIO_CMD_ACCESS_CODE_C45_ADDR 0 #define MDIO_CMD_ACCESS_CODE_C45_WRITE 1 #define MDIO_CMD_ACCESS_CODE_C45_READ 2 /* 0 = Clause 22, 1 = Clause 45 */ #define MDIO_MODE_C45 BIT(8) #define IPQ4019_MDIO_TIMEOUT 10000 #define IPQ4019_MDIO_SLEEP 10 /* MDIO clock source frequency is fixed to 100M */ #define IPQ_MDIO_CLK_RATE 100000000 #define IPQ_PHY_SET_DELAY_US 100000 struct ipq4019_mdio_data { void __iomem *membase; void __iomem *eth_ldo_rdy; struct clk *mdio_clk; }; static int ipq4019_mdio_wait_busy(struct mii_bus *bus) { struct ipq4019_mdio_data *priv = bus->priv; unsigned int busy; return readl_poll_timeout(priv->membase + MDIO_CMD_REG, busy, (busy & MDIO_CMD_ACCESS_BUSY) == 0, IPQ4019_MDIO_SLEEP, IPQ4019_MDIO_TIMEOUT); } static int ipq4019_mdio_read_c45(struct mii_bus *bus, int mii_id, int mmd, int reg) { struct ipq4019_mdio_data *priv = bus->priv; unsigned int data; unsigned int cmd; if (ipq4019_mdio_wait_busy(bus)) return -ETIMEDOUT; data = readl(priv->membase + MDIO_MODE_REG); data |= MDIO_MODE_C45; writel(data, priv->membase + MDIO_MODE_REG); /* issue the phy address and mmd */ writel((mii_id << 8) | mmd, priv->membase + MDIO_ADDR_REG); /* issue reg */ writel(reg, priv->membase + MDIO_DATA_WRITE_REG); cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_C45_ADDR; /* issue read command */ writel(cmd, priv->membase + MDIO_CMD_REG); /* Wait read complete */ if (ipq4019_mdio_wait_busy(bus)) return -ETIMEDOUT; cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_C45_READ; writel(cmd, priv->membase + MDIO_CMD_REG); if (ipq4019_mdio_wait_busy(bus)) return -ETIMEDOUT; /* Read and return data */ return readl(priv->membase + MDIO_DATA_READ_REG); } static int ipq4019_mdio_read_c22(struct mii_bus *bus, int mii_id, int regnum) { struct ipq4019_mdio_data *priv = bus->priv; unsigned int data; unsigned int cmd; if (ipq4019_mdio_wait_busy(bus)) return -ETIMEDOUT; data = readl(priv->membase + MDIO_MODE_REG); data &= ~MDIO_MODE_C45; writel(data, priv->membase + MDIO_MODE_REG); /* issue the phy address and reg */ writel((mii_id << 8) | regnum, priv->membase + MDIO_ADDR_REG); cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_READ; /* issue read command */ writel(cmd, priv->membase + MDIO_CMD_REG); /* Wait read complete */ if (ipq4019_mdio_wait_busy(bus)) return -ETIMEDOUT; /* Read and return data */ return readl(priv->membase + MDIO_DATA_READ_REG); } static int ipq4019_mdio_write_c45(struct mii_bus *bus, int mii_id, int mmd, int reg, u16 value) { struct ipq4019_mdio_data *priv = bus->priv; unsigned int data; unsigned int cmd; if (ipq4019_mdio_wait_busy(bus)) return -ETIMEDOUT; data = readl(priv->membase + MDIO_MODE_REG); data |= MDIO_MODE_C45; writel(data, priv->membase + MDIO_MODE_REG); /* issue the phy address and mmd */ writel((mii_id << 8) | mmd, priv->membase + MDIO_ADDR_REG); /* issue reg */ writel(reg, priv->membase + MDIO_DATA_WRITE_REG); cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_C45_ADDR; writel(cmd, priv->membase + MDIO_CMD_REG); if (ipq4019_mdio_wait_busy(bus)) return -ETIMEDOUT; /* issue write data */ writel(value, priv->membase + MDIO_DATA_WRITE_REG); cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_C45_WRITE; writel(cmd, priv->membase + MDIO_CMD_REG); /* Wait write complete */ if (ipq4019_mdio_wait_busy(bus)) return -ETIMEDOUT; return 0; } static int ipq4019_mdio_write_c22(struct mii_bus *bus, int mii_id, int regnum, u16 value) { struct ipq4019_mdio_data *priv = bus->priv; unsigned int data; unsigned int cmd; if (ipq4019_mdio_wait_busy(bus)) return -ETIMEDOUT; /* Enter Clause 22 mode */ data = readl(priv->membase + MDIO_MODE_REG); data &= ~MDIO_MODE_C45; writel(data, priv->membase + MDIO_MODE_REG); /* issue the phy address and reg */ writel((mii_id << 8) | regnum, priv->membase + MDIO_ADDR_REG); /* issue write data */ writel(value, priv->membase + MDIO_DATA_WRITE_REG); /* issue write command */ cmd = MDIO_CMD_ACCESS_START | MDIO_CMD_ACCESS_CODE_WRITE; writel(cmd, priv->membase + MDIO_CMD_REG); /* Wait write complete */ if (ipq4019_mdio_wait_busy(bus)) return -ETIMEDOUT; return 0; } static int ipq_mdio_reset(struct mii_bus *bus) { struct ipq4019_mdio_data *priv = bus->priv; u32 val; int ret; /* To indicate CMN_PLL that ethernet_ldo has been ready if platform resource 1 * is specified in the device tree. */ if (priv->eth_ldo_rdy) { val = readl(priv->eth_ldo_rdy); val |= BIT(0); writel(val, priv->eth_ldo_rdy); fsleep(IPQ_PHY_SET_DELAY_US); } /* Configure MDIO clock source frequency if clock is specified in the device tree */ ret = clk_set_rate(priv->mdio_clk, IPQ_MDIO_CLK_RATE); if (ret) return ret; ret = clk_prepare_enable(priv->mdio_clk); if (ret == 0) mdelay(10); return ret; } static int ipq4019_mdio_probe(struct platform_device *pdev) { struct ipq4019_mdio_data *priv; struct mii_bus *bus; struct resource *res; int ret; bus = devm_mdiobus_alloc_size(&pdev->dev, sizeof(*priv)); if (!bus) return -ENOMEM; priv = bus->priv; priv->membase = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->membase)) return PTR_ERR(priv->membase); priv->mdio_clk = devm_clk_get_optional(&pdev->dev, "gcc_mdio_ahb_clk"); if (IS_ERR(priv->mdio_clk)) return PTR_ERR(priv->mdio_clk); /* The platform resource is provided on the chipset IPQ5018 */ /* This resource is optional */ res = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (res) priv->eth_ldo_rdy = devm_ioremap_resource(&pdev->dev, res); bus->name = "ipq4019_mdio"; bus->read = ipq4019_mdio_read_c22; bus->write = ipq4019_mdio_write_c22; bus->read_c45 = ipq4019_mdio_read_c45; bus->write_c45 = ipq4019_mdio_write_c45; bus->reset = ipq_mdio_reset; bus->parent = &pdev->dev; snprintf(bus->id, MII_BUS_ID_SIZE, "%s%d", pdev->name, pdev->id); ret = of_mdiobus_register(bus, pdev->dev.of_node); if (ret) { dev_err(&pdev->dev, "Cannot register MDIO bus!\n"); return ret; } platform_set_drvdata(pdev, bus); return 0; } static int ipq4019_mdio_remove(struct platform_device *pdev) { struct mii_bus *bus = platform_get_drvdata(pdev); mdiobus_unregister(bus); return 0; } static const struct of_device_id ipq4019_mdio_dt_ids[] = { { .compatible = "qcom,ipq4019-mdio" }, { .compatible = "qcom,ipq5018-mdio" }, { } }; MODULE_DEVICE_TABLE(of, ipq4019_mdio_dt_ids); static struct platform_driver ipq4019_mdio_driver = { .probe = ipq4019_mdio_probe, .remove = ipq4019_mdio_remove, .driver = { .name = "ipq4019-mdio", .of_match_table = ipq4019_mdio_dt_ids, }, }; module_platform_driver(ipq4019_mdio_driver); MODULE_DESCRIPTION("ipq4019 MDIO interface driver"); MODULE_AUTHOR("Qualcomm Atheros"); MODULE_LICENSE("Dual BSD/GPL");
linux-master
drivers/net/mdio/mdio-ipq4019.c
// SPDX-License-Identifier: (GPL-2.0 OR MIT) /* * Driver for the MDIO interface of Microsemi network switches. * * Author: Alexandre Belloni <[email protected]> * Copyright (c) 2017 Microsemi Corporation */ #include <linux/bitops.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/iopoll.h> #include <linux/kernel.h> #include <linux/mdio/mdio-mscc-miim.h> #include <linux/mfd/ocelot.h> #include <linux/module.h> #include <linux/of_mdio.h> #include <linux/phy.h> #include <linux/platform_device.h> #include <linux/property.h> #include <linux/regmap.h> #define MSCC_MIIM_REG_STATUS 0x0 #define MSCC_MIIM_STATUS_STAT_PENDING BIT(2) #define MSCC_MIIM_STATUS_STAT_BUSY BIT(3) #define MSCC_MIIM_REG_CMD 0x8 #define MSCC_MIIM_CMD_OPR_WRITE BIT(1) #define MSCC_MIIM_CMD_OPR_READ BIT(2) #define MSCC_MIIM_CMD_WRDATA_SHIFT 4 #define MSCC_MIIM_CMD_REGAD_SHIFT 20 #define MSCC_MIIM_CMD_PHYAD_SHIFT 25 #define MSCC_MIIM_CMD_VLD BIT(31) #define MSCC_MIIM_REG_DATA 0xC #define MSCC_MIIM_DATA_ERROR (BIT(16) | BIT(17)) #define MSCC_MIIM_REG_CFG 0x10 #define MSCC_MIIM_CFG_PRESCALE_MASK GENMASK(7, 0) #define MSCC_PHY_REG_PHY_CFG 0x0 #define PHY_CFG_PHY_ENA (BIT(0) | BIT(1) | BIT(2) | BIT(3)) #define PHY_CFG_PHY_COMMON_RESET BIT(4) #define PHY_CFG_PHY_RESET (BIT(5) | BIT(6) | BIT(7) | BIT(8)) #define MSCC_PHY_REG_PHY_STATUS 0x4 #define LAN966X_CUPHY_COMMON_CFG 0x0 #define CUPHY_COMMON_CFG_RESET_N BIT(0) struct mscc_miim_info { unsigned int phy_reset_offset; unsigned int phy_reset_bits; }; struct mscc_miim_dev { struct regmap *regs; int mii_status_offset; bool ignore_read_errors; struct regmap *phy_regs; const struct mscc_miim_info *info; struct clk *clk; u32 bus_freq; }; /* When high resolution timers aren't built-in: we can't use usleep_range() as * we would sleep way too long. Use udelay() instead. */ #define mscc_readx_poll_timeout(op, addr, val, cond, delay_us, timeout_us)\ ({ \ if (!IS_ENABLED(CONFIG_HIGH_RES_TIMERS)) \ readx_poll_timeout_atomic(op, addr, val, cond, delay_us, \ timeout_us); \ readx_poll_timeout(op, addr, val, cond, delay_us, timeout_us); \ }) static int mscc_miim_status(struct mii_bus *bus) { struct mscc_miim_dev *miim = bus->priv; int val, ret; ret = regmap_read(miim->regs, MSCC_MIIM_REG_STATUS + miim->mii_status_offset, &val); if (ret < 0) { WARN_ONCE(1, "mscc miim status read error %d\n", ret); return ret; } return val; } static int mscc_miim_wait_ready(struct mii_bus *bus) { u32 val; return mscc_readx_poll_timeout(mscc_miim_status, bus, val, !(val & MSCC_MIIM_STATUS_STAT_BUSY), 50, 10000); } static int mscc_miim_wait_pending(struct mii_bus *bus) { u32 val; return mscc_readx_poll_timeout(mscc_miim_status, bus, val, !(val & MSCC_MIIM_STATUS_STAT_PENDING), 50, 10000); } static int mscc_miim_read(struct mii_bus *bus, int mii_id, int regnum) { struct mscc_miim_dev *miim = bus->priv; u32 val; int ret; ret = mscc_miim_wait_pending(bus); if (ret) goto out; ret = regmap_write(miim->regs, MSCC_MIIM_REG_CMD + miim->mii_status_offset, MSCC_MIIM_CMD_VLD | (mii_id << MSCC_MIIM_CMD_PHYAD_SHIFT) | (regnum << MSCC_MIIM_CMD_REGAD_SHIFT) | MSCC_MIIM_CMD_OPR_READ); if (ret < 0) { WARN_ONCE(1, "mscc miim write cmd reg error %d\n", ret); goto out; } ret = mscc_miim_wait_ready(bus); if (ret) goto out; ret = regmap_read(miim->regs, MSCC_MIIM_REG_DATA + miim->mii_status_offset, &val); if (ret < 0) { WARN_ONCE(1, "mscc miim read data reg error %d\n", ret); goto out; } if (!miim->ignore_read_errors && !!(val & MSCC_MIIM_DATA_ERROR)) { ret = -EIO; goto out; } ret = val & 0xFFFF; out: return ret; } static int mscc_miim_write(struct mii_bus *bus, int mii_id, int regnum, u16 value) { struct mscc_miim_dev *miim = bus->priv; int ret; ret = mscc_miim_wait_pending(bus); if (ret < 0) goto out; ret = regmap_write(miim->regs, MSCC_MIIM_REG_CMD + miim->mii_status_offset, MSCC_MIIM_CMD_VLD | (mii_id << MSCC_MIIM_CMD_PHYAD_SHIFT) | (regnum << MSCC_MIIM_CMD_REGAD_SHIFT) | (value << MSCC_MIIM_CMD_WRDATA_SHIFT) | MSCC_MIIM_CMD_OPR_WRITE); if (ret < 0) WARN_ONCE(1, "mscc miim write error %d\n", ret); out: return ret; } static int mscc_miim_reset(struct mii_bus *bus) { struct mscc_miim_dev *miim = bus->priv; unsigned int offset, bits; int ret; if (!miim->phy_regs) return 0; offset = miim->info->phy_reset_offset; bits = miim->info->phy_reset_bits; ret = regmap_update_bits(miim->phy_regs, offset, bits, 0); if (ret < 0) { WARN_ONCE(1, "mscc reset set error %d\n", ret); return ret; } ret = regmap_update_bits(miim->phy_regs, offset, bits, bits); if (ret < 0) { WARN_ONCE(1, "mscc reset clear error %d\n", ret); return ret; } mdelay(500); return 0; } static const struct regmap_config mscc_miim_regmap_config = { .reg_bits = 32, .val_bits = 32, .reg_stride = 4, }; static const struct regmap_config mscc_miim_phy_regmap_config = { .reg_bits = 32, .val_bits = 32, .reg_stride = 4, .name = "phy", }; int mscc_miim_setup(struct device *dev, struct mii_bus **pbus, const char *name, struct regmap *mii_regmap, int status_offset, bool ignore_read_errors) { struct mscc_miim_dev *miim; struct mii_bus *bus; bus = devm_mdiobus_alloc_size(dev, sizeof(*miim)); if (!bus) return -ENOMEM; bus->name = name; bus->read = mscc_miim_read; bus->write = mscc_miim_write; bus->reset = mscc_miim_reset; snprintf(bus->id, MII_BUS_ID_SIZE, "%s-mii", dev_name(dev)); bus->parent = dev; miim = bus->priv; *pbus = bus; miim->regs = mii_regmap; miim->mii_status_offset = status_offset; miim->ignore_read_errors = ignore_read_errors; *pbus = bus; return 0; } EXPORT_SYMBOL(mscc_miim_setup); static int mscc_miim_clk_set(struct mii_bus *bus) { struct mscc_miim_dev *miim = bus->priv; unsigned long rate; u32 div; /* Keep the current settings */ if (!miim->bus_freq) return 0; rate = clk_get_rate(miim->clk); div = DIV_ROUND_UP(rate, 2 * miim->bus_freq) - 1; if (div == 0 || div & ~MSCC_MIIM_CFG_PRESCALE_MASK) { dev_err(&bus->dev, "Incorrect MDIO clock frequency\n"); return -EINVAL; } return regmap_update_bits(miim->regs, MSCC_MIIM_REG_CFG, MSCC_MIIM_CFG_PRESCALE_MASK, div); } static int mscc_miim_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct regmap *mii_regmap, *phy_regmap; struct device *dev = &pdev->dev; struct mscc_miim_dev *miim; struct mii_bus *bus; int ret; mii_regmap = ocelot_regmap_from_resource(pdev, 0, &mscc_miim_regmap_config); if (IS_ERR(mii_regmap)) return dev_err_probe(dev, PTR_ERR(mii_regmap), "Unable to create MIIM regmap\n"); /* This resource is optional */ phy_regmap = ocelot_regmap_from_resource_optional(pdev, 1, &mscc_miim_phy_regmap_config); if (IS_ERR(phy_regmap)) return dev_err_probe(dev, PTR_ERR(phy_regmap), "Unable to create phy register regmap\n"); ret = mscc_miim_setup(dev, &bus, "mscc_miim", mii_regmap, 0, false); if (ret < 0) { dev_err(dev, "Unable to setup the MDIO bus\n"); return ret; } miim = bus->priv; miim->phy_regs = phy_regmap; miim->info = device_get_match_data(dev); if (!miim->info) return -EINVAL; miim->clk = devm_clk_get_optional(dev, NULL); if (IS_ERR(miim->clk)) return PTR_ERR(miim->clk); of_property_read_u32(np, "clock-frequency", &miim->bus_freq); if (miim->bus_freq && !miim->clk) { dev_err(dev, "cannot use clock-frequency without a clock\n"); return -EINVAL; } ret = clk_prepare_enable(miim->clk); if (ret) return ret; ret = mscc_miim_clk_set(bus); if (ret) goto out_disable_clk; ret = of_mdiobus_register(bus, np); if (ret < 0) { dev_err(dev, "Cannot register MDIO bus (%d)\n", ret); goto out_disable_clk; } platform_set_drvdata(pdev, bus); return 0; out_disable_clk: clk_disable_unprepare(miim->clk); return ret; } static int mscc_miim_remove(struct platform_device *pdev) { struct mii_bus *bus = platform_get_drvdata(pdev); struct mscc_miim_dev *miim = bus->priv; clk_disable_unprepare(miim->clk); mdiobus_unregister(bus); return 0; } static const struct mscc_miim_info mscc_ocelot_miim_info = { .phy_reset_offset = MSCC_PHY_REG_PHY_CFG, .phy_reset_bits = PHY_CFG_PHY_ENA | PHY_CFG_PHY_COMMON_RESET | PHY_CFG_PHY_RESET, }; static const struct mscc_miim_info microchip_lan966x_miim_info = { .phy_reset_offset = LAN966X_CUPHY_COMMON_CFG, .phy_reset_bits = CUPHY_COMMON_CFG_RESET_N, }; static const struct of_device_id mscc_miim_match[] = { { .compatible = "mscc,ocelot-miim", .data = &mscc_ocelot_miim_info }, { .compatible = "microchip,lan966x-miim", .data = &microchip_lan966x_miim_info }, { } }; MODULE_DEVICE_TABLE(of, mscc_miim_match); static struct platform_driver mscc_miim_driver = { .probe = mscc_miim_probe, .remove = mscc_miim_remove, .driver = { .name = "mscc-miim", .of_match_table = mscc_miim_match, }, }; module_platform_driver(mscc_miim_driver); MODULE_DESCRIPTION("Microsemi MIIM driver"); MODULE_AUTHOR("Alexandre Belloni <[email protected]>"); MODULE_LICENSE("Dual MIT/GPL");
linux-master
drivers/net/mdio/mdio-mscc-miim.c
// SPDX-License-Identifier: GPL-2.0 /* * Bitbanged MDIO support. * * Author: Scott Wood <[email protected]> * Copyright (c) 2007 Freescale Semiconductor * * Based on CPM2 MDIO code which is: * * Copyright (c) 2003 Intracom S.A. * by Pantelis Antoniou <[email protected]> * * 2005 (c) MontaVista Software, Inc. * Vitaly Bordug <[email protected]> */ #include <linux/delay.h> #include <linux/mdio-bitbang.h> #include <linux/module.h> #include <linux/types.h> #define MDIO_READ 2 #define MDIO_WRITE 1 #define MDIO_C45 (1<<15) #define MDIO_C45_ADDR (MDIO_C45 | 0) #define MDIO_C45_READ (MDIO_C45 | 3) #define MDIO_C45_WRITE (MDIO_C45 | 1) #define MDIO_SETUP_TIME 10 #define MDIO_HOLD_TIME 10 /* Minimum MDC period is 400 ns, plus some margin for error. MDIO_DELAY * is done twice per period. */ #define MDIO_DELAY 250 /* The PHY may take up to 300 ns to produce data, plus some margin * for error. */ #define MDIO_READ_DELAY 350 /* MDIO must already be configured as output. */ static void mdiobb_send_bit(struct mdiobb_ctrl *ctrl, int val) { const struct mdiobb_ops *ops = ctrl->ops; ops->set_mdio_data(ctrl, val); ndelay(MDIO_DELAY); ops->set_mdc(ctrl, 1); ndelay(MDIO_DELAY); ops->set_mdc(ctrl, 0); } /* MDIO must already be configured as input. */ static int mdiobb_get_bit(struct mdiobb_ctrl *ctrl) { const struct mdiobb_ops *ops = ctrl->ops; ndelay(MDIO_DELAY); ops->set_mdc(ctrl, 1); ndelay(MDIO_READ_DELAY); ops->set_mdc(ctrl, 0); return ops->get_mdio_data(ctrl); } /* MDIO must already be configured as output. */ static void mdiobb_send_num(struct mdiobb_ctrl *ctrl, u16 val, int bits) { int i; for (i = bits - 1; i >= 0; i--) mdiobb_send_bit(ctrl, (val >> i) & 1); } /* MDIO must already be configured as input. */ static u16 mdiobb_get_num(struct mdiobb_ctrl *ctrl, int bits) { int i; u16 ret = 0; for (i = bits - 1; i >= 0; i--) { ret <<= 1; ret |= mdiobb_get_bit(ctrl); } return ret; } /* Utility to send the preamble, address, and * register (common to read and write). */ static void mdiobb_cmd(struct mdiobb_ctrl *ctrl, int op, u8 phy, u8 reg) { const struct mdiobb_ops *ops = ctrl->ops; int i; ops->set_mdio_dir(ctrl, 1); /* * Send a 32 bit preamble ('1's) with an extra '1' bit for good * measure. The IEEE spec says this is a PHY optional * requirement. The AMD 79C874 requires one after power up and * one after a MII communications error. This means that we are * doing more preambles than we need, but it is safer and will be * much more robust. */ for (i = 0; i < 32; i++) mdiobb_send_bit(ctrl, 1); /* send the start bit (01) and the read opcode (10) or write (01). Clause 45 operation uses 00 for the start and 11, 10 for read/write */ mdiobb_send_bit(ctrl, 0); if (op & MDIO_C45) mdiobb_send_bit(ctrl, 0); else mdiobb_send_bit(ctrl, 1); mdiobb_send_bit(ctrl, (op >> 1) & 1); mdiobb_send_bit(ctrl, (op >> 0) & 1); mdiobb_send_num(ctrl, phy, 5); mdiobb_send_num(ctrl, reg, 5); } /* In clause 45 mode all commands are prefixed by MDIO_ADDR to specify the lower 16 bits of the 21 bit address. This transfer is done identically to a MDIO_WRITE except for a different code. Theoretically clause 45 and normal devices can exist on the same bus. Normal devices should ignore the MDIO_ADDR phase. */ static void mdiobb_cmd_addr(struct mdiobb_ctrl *ctrl, int phy, int dev_addr, int reg) { mdiobb_cmd(ctrl, MDIO_C45_ADDR, phy, dev_addr); /* send the turnaround (10) */ mdiobb_send_bit(ctrl, 1); mdiobb_send_bit(ctrl, 0); mdiobb_send_num(ctrl, reg, 16); ctrl->ops->set_mdio_dir(ctrl, 0); mdiobb_get_bit(ctrl); } static int mdiobb_read_common(struct mii_bus *bus, int phy) { struct mdiobb_ctrl *ctrl = bus->priv; int ret, i; ctrl->ops->set_mdio_dir(ctrl, 0); /* check the turnaround bit: the PHY should be driving it to zero, if this * PHY is listed in phy_ignore_ta_mask as having broken TA, skip that */ if (mdiobb_get_bit(ctrl) != 0 && !(bus->phy_ignore_ta_mask & (1 << phy))) { /* PHY didn't drive TA low -- flush any bits it * may be trying to send. */ for (i = 0; i < 32; i++) mdiobb_get_bit(ctrl); return 0xffff; } ret = mdiobb_get_num(ctrl, 16); mdiobb_get_bit(ctrl); return ret; } int mdiobb_read_c22(struct mii_bus *bus, int phy, int reg) { struct mdiobb_ctrl *ctrl = bus->priv; mdiobb_cmd(ctrl, ctrl->op_c22_read, phy, reg); return mdiobb_read_common(bus, phy); } EXPORT_SYMBOL(mdiobb_read_c22); int mdiobb_read_c45(struct mii_bus *bus, int phy, int devad, int reg) { struct mdiobb_ctrl *ctrl = bus->priv; mdiobb_cmd_addr(ctrl, phy, devad, reg); mdiobb_cmd(ctrl, MDIO_C45_READ, phy, devad); return mdiobb_read_common(bus, phy); } EXPORT_SYMBOL(mdiobb_read_c45); static int mdiobb_write_common(struct mii_bus *bus, u16 val) { struct mdiobb_ctrl *ctrl = bus->priv; /* send the turnaround (10) */ mdiobb_send_bit(ctrl, 1); mdiobb_send_bit(ctrl, 0); mdiobb_send_num(ctrl, val, 16); ctrl->ops->set_mdio_dir(ctrl, 0); mdiobb_get_bit(ctrl); return 0; } int mdiobb_write_c22(struct mii_bus *bus, int phy, int reg, u16 val) { struct mdiobb_ctrl *ctrl = bus->priv; mdiobb_cmd(ctrl, ctrl->op_c22_write, phy, reg); return mdiobb_write_common(bus, val); } EXPORT_SYMBOL(mdiobb_write_c22); int mdiobb_write_c45(struct mii_bus *bus, int phy, int devad, int reg, u16 val) { struct mdiobb_ctrl *ctrl = bus->priv; mdiobb_cmd_addr(ctrl, phy, devad, reg); mdiobb_cmd(ctrl, MDIO_C45_WRITE, phy, devad); return mdiobb_write_common(bus, val); } EXPORT_SYMBOL(mdiobb_write_c45); struct mii_bus *alloc_mdio_bitbang(struct mdiobb_ctrl *ctrl) { struct mii_bus *bus; bus = mdiobus_alloc(); if (!bus) return NULL; __module_get(ctrl->ops->owner); bus->read = mdiobb_read_c22; bus->write = mdiobb_write_c22; bus->read_c45 = mdiobb_read_c45; bus->write_c45 = mdiobb_write_c45; bus->priv = ctrl; if (!ctrl->override_op_c22) { ctrl->op_c22_read = MDIO_READ; ctrl->op_c22_write = MDIO_WRITE; } return bus; } EXPORT_SYMBOL(alloc_mdio_bitbang); void free_mdio_bitbang(struct mii_bus *bus) { struct mdiobb_ctrl *ctrl = bus->priv; module_put(ctrl->ops->owner); mdiobus_free(bus); } EXPORT_SYMBOL(free_mdio_bitbang); MODULE_LICENSE("GPL v2");
linux-master
drivers/net/mdio/mdio-bitbang.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2015 Broadcom Corporation */ #include <linux/delay.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_platform.h> #include <linux/of_mdio.h> #include <linux/phy.h> #include <linux/platform_device.h> #include <linux/sched.h> #define IPROC_GPHY_MDCDIV 0x1a #define MII_CTRL_OFFSET 0x000 #define MII_CTRL_DIV_SHIFT 0 #define MII_CTRL_PRE_SHIFT 7 #define MII_CTRL_BUSY_SHIFT 8 #define MII_DATA_OFFSET 0x004 #define MII_DATA_MASK 0xffff #define MII_DATA_TA_SHIFT 16 #define MII_DATA_TA_VAL 2 #define MII_DATA_RA_SHIFT 18 #define MII_DATA_PA_SHIFT 23 #define MII_DATA_OP_SHIFT 28 #define MII_DATA_OP_WRITE 1 #define MII_DATA_OP_READ 2 #define MII_DATA_SB_SHIFT 30 struct iproc_mdio_priv { struct mii_bus *mii_bus; void __iomem *base; }; static inline int iproc_mdio_wait_for_idle(void __iomem *base) { u32 val; unsigned int timeout = 1000; /* loop for 1s */ do { val = readl(base + MII_CTRL_OFFSET); if ((val & BIT(MII_CTRL_BUSY_SHIFT)) == 0) return 0; usleep_range(1000, 2000); } while (timeout--); return -ETIMEDOUT; } static inline void iproc_mdio_config_clk(void __iomem *base) { u32 val; val = (IPROC_GPHY_MDCDIV << MII_CTRL_DIV_SHIFT) | BIT(MII_CTRL_PRE_SHIFT); writel(val, base + MII_CTRL_OFFSET); } static int iproc_mdio_read(struct mii_bus *bus, int phy_id, int reg) { struct iproc_mdio_priv *priv = bus->priv; u32 cmd; int rc; rc = iproc_mdio_wait_for_idle(priv->base); if (rc) return rc; /* Prepare the read operation */ cmd = (MII_DATA_TA_VAL << MII_DATA_TA_SHIFT) | (reg << MII_DATA_RA_SHIFT) | (phy_id << MII_DATA_PA_SHIFT) | BIT(MII_DATA_SB_SHIFT) | (MII_DATA_OP_READ << MII_DATA_OP_SHIFT); writel(cmd, priv->base + MII_DATA_OFFSET); rc = iproc_mdio_wait_for_idle(priv->base); if (rc) return rc; cmd = readl(priv->base + MII_DATA_OFFSET) & MII_DATA_MASK; return cmd; } static int iproc_mdio_write(struct mii_bus *bus, int phy_id, int reg, u16 val) { struct iproc_mdio_priv *priv = bus->priv; u32 cmd; int rc; rc = iproc_mdio_wait_for_idle(priv->base); if (rc) return rc; /* Prepare the write operation */ cmd = (MII_DATA_TA_VAL << MII_DATA_TA_SHIFT) | (reg << MII_DATA_RA_SHIFT) | (phy_id << MII_DATA_PA_SHIFT) | BIT(MII_DATA_SB_SHIFT) | (MII_DATA_OP_WRITE << MII_DATA_OP_SHIFT) | ((u32)(val) & MII_DATA_MASK); writel(cmd, priv->base + MII_DATA_OFFSET); rc = iproc_mdio_wait_for_idle(priv->base); if (rc) return rc; return 0; } static int iproc_mdio_probe(struct platform_device *pdev) { struct iproc_mdio_priv *priv; struct mii_bus *bus; int rc; priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; priv->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->base)) { dev_err(&pdev->dev, "failed to ioremap register\n"); return PTR_ERR(priv->base); } priv->mii_bus = mdiobus_alloc(); if (!priv->mii_bus) { dev_err(&pdev->dev, "MDIO bus alloc failed\n"); return -ENOMEM; } bus = priv->mii_bus; bus->priv = priv; bus->name = "iProc MDIO bus"; snprintf(bus->id, MII_BUS_ID_SIZE, "%s-%d", pdev->name, pdev->id); bus->parent = &pdev->dev; bus->read = iproc_mdio_read; bus->write = iproc_mdio_write; iproc_mdio_config_clk(priv->base); rc = of_mdiobus_register(bus, pdev->dev.of_node); if (rc) { dev_err(&pdev->dev, "MDIO bus registration failed\n"); goto err_iproc_mdio; } platform_set_drvdata(pdev, priv); dev_info(&pdev->dev, "Broadcom iProc MDIO bus registered\n"); return 0; err_iproc_mdio: mdiobus_free(bus); return rc; } static int iproc_mdio_remove(struct platform_device *pdev) { struct iproc_mdio_priv *priv = platform_get_drvdata(pdev); mdiobus_unregister(priv->mii_bus); mdiobus_free(priv->mii_bus); return 0; } #ifdef CONFIG_PM_SLEEP static int iproc_mdio_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct iproc_mdio_priv *priv = platform_get_drvdata(pdev); /* restore the mii clock configuration */ iproc_mdio_config_clk(priv->base); return 0; } static const struct dev_pm_ops iproc_mdio_pm_ops = { .resume = iproc_mdio_resume }; #endif /* CONFIG_PM_SLEEP */ static const struct of_device_id iproc_mdio_of_match[] = { { .compatible = "brcm,iproc-mdio", }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, iproc_mdio_of_match); static struct platform_driver iproc_mdio_driver = { .driver = { .name = "iproc-mdio", .of_match_table = iproc_mdio_of_match, #ifdef CONFIG_PM_SLEEP .pm = &iproc_mdio_pm_ops, #endif }, .probe = iproc_mdio_probe, .remove = iproc_mdio_remove, }; module_platform_driver(iproc_mdio_driver); MODULE_AUTHOR("Broadcom Corporation"); MODULE_DESCRIPTION("Broadcom iProc MDIO bus controller"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:iproc-mdio");
linux-master
drivers/net/mdio/mdio-bcm-iproc.c
// SPDX-License-Identifier: GPL-2.0-only /* * ACPI helpers for the MDIO (Ethernet PHY) API * * This file provides helper functions for extracting PHY device information * out of the ACPI ASL and using it to populate an mii_bus. */ #include <linux/acpi.h> #include <linux/acpi_mdio.h> #include <linux/bits.h> #include <linux/dev_printk.h> #include <linux/fwnode_mdio.h> #include <linux/module.h> #include <linux/types.h> MODULE_AUTHOR("Calvin Johnson <[email protected]>"); MODULE_LICENSE("GPL"); /** * __acpi_mdiobus_register - Register mii_bus and create PHYs from the ACPI ASL. * @mdio: pointer to mii_bus structure * @fwnode: pointer to fwnode of MDIO bus. This fwnode is expected to represent * @owner: module owning this @mdio object. * an ACPI device object corresponding to the MDIO bus and its children are * expected to correspond to the PHY devices on that bus. * * This function registers the mii_bus structure and registers a phy_device * for each child node of @fwnode. */ int __acpi_mdiobus_register(struct mii_bus *mdio, struct fwnode_handle *fwnode, struct module *owner) { struct fwnode_handle *child; u32 addr; int ret; /* Mask out all PHYs from auto probing. */ mdio->phy_mask = GENMASK(31, 0); ret = __mdiobus_register(mdio, owner); if (ret) return ret; ACPI_COMPANION_SET(&mdio->dev, to_acpi_device_node(fwnode)); /* Loop over the child nodes and register a phy_device for each PHY */ fwnode_for_each_child_node(fwnode, child) { ret = acpi_get_local_address(ACPI_HANDLE_FWNODE(child), &addr); if (ret || addr >= PHY_MAX_ADDR) continue; ret = fwnode_mdiobus_register_phy(mdio, child, addr); if (ret == -ENODEV) dev_err(&mdio->dev, "MDIO device at address %d is missing.\n", addr); } return 0; } EXPORT_SYMBOL(__acpi_mdiobus_register);
linux-master
drivers/net/mdio/acpi_mdio.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Janz MODULbus VMOD-ICAN3 CAN Interface Driver * * Copyright (c) 2010 Ira W. Snyder <[email protected]> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/ethtool.h> #include <linux/platform_device.h> #include <linux/netdevice.h> #include <linux/can.h> #include <linux/can/dev.h> #include <linux/can/skb.h> #include <linux/can/error.h> #include <linux/mfd/janz.h> #include <asm/io.h> /* the DPM has 64k of memory, organized into 256x 256 byte pages */ #define DPM_NUM_PAGES 256 #define DPM_PAGE_SIZE 256 #define DPM_PAGE_ADDR(p) ((p) * DPM_PAGE_SIZE) /* JANZ ICAN3 "old-style" host interface queue page numbers */ #define QUEUE_OLD_CONTROL 0 #define QUEUE_OLD_RB0 1 #define QUEUE_OLD_RB1 2 #define QUEUE_OLD_WB0 3 #define QUEUE_OLD_WB1 4 /* Janz ICAN3 "old-style" host interface control registers */ #define MSYNC_PEER 0x00 /* ICAN only */ #define MSYNC_LOCL 0x01 /* host only */ #define TARGET_RUNNING 0x02 #define FIRMWARE_STAMP 0x60 /* big endian firmware stamp */ #define MSYNC_RB0 0x01 #define MSYNC_RB1 0x02 #define MSYNC_RBLW 0x04 #define MSYNC_RB_MASK (MSYNC_RB0 | MSYNC_RB1) #define MSYNC_WB0 0x10 #define MSYNC_WB1 0x20 #define MSYNC_WBLW 0x40 #define MSYNC_WB_MASK (MSYNC_WB0 | MSYNC_WB1) /* Janz ICAN3 "new-style" host interface queue page numbers */ #define QUEUE_TOHOST 5 #define QUEUE_FROMHOST_MID 6 #define QUEUE_FROMHOST_HIGH 7 #define QUEUE_FROMHOST_LOW 8 /* The first free page in the DPM is #9 */ #define DPM_FREE_START 9 /* Janz ICAN3 "new-style" and "fast" host interface descriptor flags */ #define DESC_VALID 0x80 #define DESC_WRAP 0x40 #define DESC_INTERRUPT 0x20 #define DESC_IVALID 0x10 #define DESC_LEN(len) (len) /* Janz ICAN3 Firmware Messages */ #define MSG_CONNECTI 0x02 #define MSG_DISCONNECT 0x03 #define MSG_IDVERS 0x04 #define MSG_MSGLOST 0x05 #define MSG_NEWHOSTIF 0x08 #define MSG_INQUIRY 0x0a #define MSG_SETAFILMASK 0x10 #define MSG_INITFDPMQUEUE 0x11 #define MSG_HWCONF 0x12 #define MSG_FMSGLOST 0x15 #define MSG_CEVTIND 0x37 #define MSG_CBTRREQ 0x41 #define MSG_COFFREQ 0x42 #define MSG_CONREQ 0x43 #define MSG_CCONFREQ 0x47 #define MSG_NMTS 0xb0 #define MSG_LMTS 0xb4 /* * Janz ICAN3 CAN Inquiry Message Types * * NOTE: there appears to be a firmware bug here. You must send * NOTE: INQUIRY_STATUS and expect to receive an INQUIRY_EXTENDED * NOTE: response. The controller never responds to a message with * NOTE: the INQUIRY_EXTENDED subspec :( */ #define INQUIRY_STATUS 0x00 #define INQUIRY_TERMINATION 0x01 #define INQUIRY_EXTENDED 0x04 /* Janz ICAN3 CAN Set Acceptance Filter Mask Message Types */ #define SETAFILMASK_REJECT 0x00 #define SETAFILMASK_FASTIF 0x02 /* Janz ICAN3 CAN Hardware Configuration Message Types */ #define HWCONF_TERMINATE_ON 0x01 #define HWCONF_TERMINATE_OFF 0x00 /* Janz ICAN3 CAN Event Indication Message Types */ #define CEVTIND_EI 0x01 #define CEVTIND_DOI 0x02 #define CEVTIND_LOST 0x04 #define CEVTIND_FULL 0x08 #define CEVTIND_BEI 0x10 #define CEVTIND_CHIP_SJA1000 0x02 #define ICAN3_BUSERR_QUOTA_MAX 255 /* Janz ICAN3 CAN Frame Conversion */ #define ICAN3_SNGL 0x02 #define ICAN3_ECHO 0x10 #define ICAN3_EFF_RTR 0x40 #define ICAN3_SFF_RTR 0x10 #define ICAN3_EFF 0x80 #define ICAN3_CAN_TYPE_MASK 0x0f #define ICAN3_CAN_TYPE_SFF 0x00 #define ICAN3_CAN_TYPE_EFF 0x01 #define ICAN3_CAN_DLC_MASK 0x0f /* Janz ICAN3 NMTS subtypes */ #define NMTS_CREATE_NODE_REQ 0x0 #define NMTS_SLAVE_STATE_IND 0x8 #define NMTS_SLAVE_EVENT_IND 0x9 /* Janz ICAN3 LMTS subtypes */ #define LMTS_BUSON_REQ 0x0 #define LMTS_BUSOFF_REQ 0x1 #define LMTS_CAN_CONF_REQ 0x2 /* Janz ICAN3 NMTS Event indications */ #define NE_LOCAL_OCCURRED 0x3 #define NE_LOCAL_RESOLVED 0x2 #define NE_REMOTE_OCCURRED 0xc #define NE_REMOTE_RESOLVED 0x8 /* * SJA1000 Status and Error Register Definitions * * Copied from drivers/net/can/sja1000/sja1000.h */ /* status register content */ #define SR_BS 0x80 #define SR_ES 0x40 #define SR_TS 0x20 #define SR_RS 0x10 #define SR_TCS 0x08 #define SR_TBS 0x04 #define SR_DOS 0x02 #define SR_RBS 0x01 #define SR_CRIT (SR_BS|SR_ES) /* ECC register */ #define ECC_SEG 0x1F #define ECC_DIR 0x20 #define ECC_ERR 6 #define ECC_BIT 0x00 #define ECC_FORM 0x40 #define ECC_STUFF 0x80 #define ECC_MASK 0xc0 /* Number of buffers for use in the "new-style" host interface */ #define ICAN3_NEW_BUFFERS 16 /* Number of buffers for use in the "fast" host interface */ #define ICAN3_TX_BUFFERS 512 #define ICAN3_RX_BUFFERS 1024 /* SJA1000 Clock Input */ #define ICAN3_CAN_CLOCK 8000000 /* Janz ICAN3 firmware types */ enum ican3_fwtype { ICAN3_FWTYPE_ICANOS, ICAN3_FWTYPE_CAL_CANOPEN, }; /* Driver Name */ #define DRV_NAME "janz-ican3" /* DPM Control Registers -- starts at offset 0x100 in the MODULbus registers */ struct ican3_dpm_control { /* window address register */ u8 window_address; u8 unused1; /* * Read access: clear interrupt from microcontroller * Write access: send interrupt to microcontroller */ u8 interrupt; u8 unused2; /* write-only: reset all hardware on the module */ u8 hwreset; u8 unused3; /* write-only: generate an interrupt to the TPU */ u8 tpuinterrupt; }; struct ican3_dev { /* must be the first member */ struct can_priv can; /* CAN network device */ struct net_device *ndev; struct napi_struct napi; /* module number */ unsigned int num; /* base address of registers and IRQ */ struct janz_cmodio_onboard_regs __iomem *ctrl; struct ican3_dpm_control __iomem *dpmctrl; void __iomem *dpm; int irq; /* CAN bus termination status */ struct completion termination_comp; bool termination_enabled; /* CAN bus error status registers */ struct completion buserror_comp; struct can_berr_counter bec; /* firmware type */ enum ican3_fwtype fwtype; char fwinfo[32]; /* old and new style host interface */ unsigned int iftype; /* queue for echo packets */ struct sk_buff_head echoq; /* * Any function which changes the current DPM page must hold this * lock while it is performing data accesses. This ensures that the * function will not be preempted and end up reading data from a * different DPM page than it expects. */ spinlock_t lock; /* new host interface */ unsigned int rx_int; unsigned int rx_num; unsigned int tx_num; /* fast host interface */ unsigned int fastrx_start; unsigned int fastrx_num; unsigned int fasttx_start; unsigned int fasttx_num; /* first free DPM page */ unsigned int free_page; }; struct ican3_msg { u8 control; u8 spec; __le16 len; u8 data[252]; }; struct ican3_new_desc { u8 control; u8 pointer; }; struct ican3_fast_desc { u8 control; u8 command; u8 data[14]; }; /* write to the window basic address register */ static inline void ican3_set_page(struct ican3_dev *mod, unsigned int page) { BUG_ON(page >= DPM_NUM_PAGES); iowrite8(page, &mod->dpmctrl->window_address); } /* * ICAN3 "old-style" host interface */ /* * Receive a message from the ICAN3 "old-style" firmware interface * * LOCKING: must hold mod->lock * * returns 0 on success, -ENOMEM when no message exists */ static int ican3_old_recv_msg(struct ican3_dev *mod, struct ican3_msg *msg) { unsigned int mbox, mbox_page; u8 locl, peer, xord; /* get the MSYNC registers */ ican3_set_page(mod, QUEUE_OLD_CONTROL); peer = ioread8(mod->dpm + MSYNC_PEER); locl = ioread8(mod->dpm + MSYNC_LOCL); xord = locl ^ peer; if ((xord & MSYNC_RB_MASK) == 0x00) { netdev_dbg(mod->ndev, "no mbox for reading\n"); return -ENOMEM; } /* find the first free mbox to read */ if ((xord & MSYNC_RB_MASK) == MSYNC_RB_MASK) mbox = (xord & MSYNC_RBLW) ? MSYNC_RB0 : MSYNC_RB1; else mbox = (xord & MSYNC_RB0) ? MSYNC_RB0 : MSYNC_RB1; /* copy the message */ mbox_page = (mbox == MSYNC_RB0) ? QUEUE_OLD_RB0 : QUEUE_OLD_RB1; ican3_set_page(mod, mbox_page); memcpy_fromio(msg, mod->dpm, sizeof(*msg)); /* * notify the firmware that the read buffer is available * for it to fill again */ locl ^= mbox; ican3_set_page(mod, QUEUE_OLD_CONTROL); iowrite8(locl, mod->dpm + MSYNC_LOCL); return 0; } /* * Send a message through the "old-style" firmware interface * * LOCKING: must hold mod->lock * * returns 0 on success, -ENOMEM when no free space exists */ static int ican3_old_send_msg(struct ican3_dev *mod, struct ican3_msg *msg) { unsigned int mbox, mbox_page; u8 locl, peer, xord; /* get the MSYNC registers */ ican3_set_page(mod, QUEUE_OLD_CONTROL); peer = ioread8(mod->dpm + MSYNC_PEER); locl = ioread8(mod->dpm + MSYNC_LOCL); xord = locl ^ peer; if ((xord & MSYNC_WB_MASK) == MSYNC_WB_MASK) { netdev_err(mod->ndev, "no mbox for writing\n"); return -ENOMEM; } /* calculate a free mbox to use */ mbox = (xord & MSYNC_WB0) ? MSYNC_WB1 : MSYNC_WB0; /* copy the message to the DPM */ mbox_page = (mbox == MSYNC_WB0) ? QUEUE_OLD_WB0 : QUEUE_OLD_WB1; ican3_set_page(mod, mbox_page); memcpy_toio(mod->dpm, msg, sizeof(*msg)); locl ^= mbox; if (mbox == MSYNC_WB1) locl |= MSYNC_WBLW; ican3_set_page(mod, QUEUE_OLD_CONTROL); iowrite8(locl, mod->dpm + MSYNC_LOCL); return 0; } /* * ICAN3 "new-style" Host Interface Setup */ static void ican3_init_new_host_interface(struct ican3_dev *mod) { struct ican3_new_desc desc; unsigned long flags; void __iomem *dst; int i; spin_lock_irqsave(&mod->lock, flags); /* setup the internal datastructures for RX */ mod->rx_num = 0; mod->rx_int = 0; /* tohost queue descriptors are in page 5 */ ican3_set_page(mod, QUEUE_TOHOST); dst = mod->dpm; /* initialize the tohost (rx) queue descriptors: pages 9-24 */ for (i = 0; i < ICAN3_NEW_BUFFERS; i++) { desc.control = DESC_INTERRUPT | DESC_LEN(1); /* I L=1 */ desc.pointer = mod->free_page; /* set wrap flag on last buffer */ if (i == ICAN3_NEW_BUFFERS - 1) desc.control |= DESC_WRAP; memcpy_toio(dst, &desc, sizeof(desc)); dst += sizeof(desc); mod->free_page++; } /* fromhost (tx) mid queue descriptors are in page 6 */ ican3_set_page(mod, QUEUE_FROMHOST_MID); dst = mod->dpm; /* setup the internal datastructures for TX */ mod->tx_num = 0; /* initialize the fromhost mid queue descriptors: pages 25-40 */ for (i = 0; i < ICAN3_NEW_BUFFERS; i++) { desc.control = DESC_VALID | DESC_LEN(1); /* V L=1 */ desc.pointer = mod->free_page; /* set wrap flag on last buffer */ if (i == ICAN3_NEW_BUFFERS - 1) desc.control |= DESC_WRAP; memcpy_toio(dst, &desc, sizeof(desc)); dst += sizeof(desc); mod->free_page++; } /* fromhost hi queue descriptors are in page 7 */ ican3_set_page(mod, QUEUE_FROMHOST_HIGH); dst = mod->dpm; /* initialize only a single buffer in the fromhost hi queue (unused) */ desc.control = DESC_VALID | DESC_WRAP | DESC_LEN(1); /* VW L=1 */ desc.pointer = mod->free_page; memcpy_toio(dst, &desc, sizeof(desc)); mod->free_page++; /* fromhost low queue descriptors are in page 8 */ ican3_set_page(mod, QUEUE_FROMHOST_LOW); dst = mod->dpm; /* initialize only a single buffer in the fromhost low queue (unused) */ desc.control = DESC_VALID | DESC_WRAP | DESC_LEN(1); /* VW L=1 */ desc.pointer = mod->free_page; memcpy_toio(dst, &desc, sizeof(desc)); mod->free_page++; spin_unlock_irqrestore(&mod->lock, flags); } /* * ICAN3 Fast Host Interface Setup */ static void ican3_init_fast_host_interface(struct ican3_dev *mod) { struct ican3_fast_desc desc; unsigned long flags; unsigned int addr; void __iomem *dst; int i; spin_lock_irqsave(&mod->lock, flags); /* save the start recv page */ mod->fastrx_start = mod->free_page; mod->fastrx_num = 0; /* build a single fast tohost queue descriptor */ memset(&desc, 0, sizeof(desc)); desc.control = 0x00; desc.command = 1; /* build the tohost queue descriptor ring in memory */ addr = 0; for (i = 0; i < ICAN3_RX_BUFFERS; i++) { /* set the wrap bit on the last buffer */ if (i == ICAN3_RX_BUFFERS - 1) desc.control |= DESC_WRAP; /* switch to the correct page */ ican3_set_page(mod, mod->free_page); /* copy the descriptor to the DPM */ dst = mod->dpm + addr; memcpy_toio(dst, &desc, sizeof(desc)); addr += sizeof(desc); /* move to the next page if necessary */ if (addr >= DPM_PAGE_SIZE) { addr = 0; mod->free_page++; } } /* make sure we page-align the next queue */ if (addr != 0) mod->free_page++; /* save the start xmit page */ mod->fasttx_start = mod->free_page; mod->fasttx_num = 0; /* build a single fast fromhost queue descriptor */ memset(&desc, 0, sizeof(desc)); desc.control = DESC_VALID; desc.command = 1; /* build the fromhost queue descriptor ring in memory */ addr = 0; for (i = 0; i < ICAN3_TX_BUFFERS; i++) { /* set the wrap bit on the last buffer */ if (i == ICAN3_TX_BUFFERS - 1) desc.control |= DESC_WRAP; /* switch to the correct page */ ican3_set_page(mod, mod->free_page); /* copy the descriptor to the DPM */ dst = mod->dpm + addr; memcpy_toio(dst, &desc, sizeof(desc)); addr += sizeof(desc); /* move to the next page if necessary */ if (addr >= DPM_PAGE_SIZE) { addr = 0; mod->free_page++; } } spin_unlock_irqrestore(&mod->lock, flags); } /* * ICAN3 "new-style" Host Interface Message Helpers */ /* * LOCKING: must hold mod->lock */ static int ican3_new_send_msg(struct ican3_dev *mod, struct ican3_msg *msg) { struct ican3_new_desc desc; void __iomem *desc_addr = mod->dpm + (mod->tx_num * sizeof(desc)); /* switch to the fromhost mid queue, and read the buffer descriptor */ ican3_set_page(mod, QUEUE_FROMHOST_MID); memcpy_fromio(&desc, desc_addr, sizeof(desc)); if (!(desc.control & DESC_VALID)) { netdev_dbg(mod->ndev, "%s: no free buffers\n", __func__); return -ENOMEM; } /* switch to the data page, copy the data */ ican3_set_page(mod, desc.pointer); memcpy_toio(mod->dpm, msg, sizeof(*msg)); /* switch back to the descriptor, set the valid bit, write it back */ ican3_set_page(mod, QUEUE_FROMHOST_MID); desc.control ^= DESC_VALID; memcpy_toio(desc_addr, &desc, sizeof(desc)); /* update the tx number */ mod->tx_num = (desc.control & DESC_WRAP) ? 0 : (mod->tx_num + 1); return 0; } /* * LOCKING: must hold mod->lock */ static int ican3_new_recv_msg(struct ican3_dev *mod, struct ican3_msg *msg) { struct ican3_new_desc desc; void __iomem *desc_addr = mod->dpm + (mod->rx_num * sizeof(desc)); /* switch to the tohost queue, and read the buffer descriptor */ ican3_set_page(mod, QUEUE_TOHOST); memcpy_fromio(&desc, desc_addr, sizeof(desc)); if (!(desc.control & DESC_VALID)) { netdev_dbg(mod->ndev, "%s: no buffers to recv\n", __func__); return -ENOMEM; } /* switch to the data page, copy the data */ ican3_set_page(mod, desc.pointer); memcpy_fromio(msg, mod->dpm, sizeof(*msg)); /* switch back to the descriptor, toggle the valid bit, write it back */ ican3_set_page(mod, QUEUE_TOHOST); desc.control ^= DESC_VALID; memcpy_toio(desc_addr, &desc, sizeof(desc)); /* update the rx number */ mod->rx_num = (desc.control & DESC_WRAP) ? 0 : (mod->rx_num + 1); return 0; } /* * Message Send / Recv Helpers */ static int ican3_send_msg(struct ican3_dev *mod, struct ican3_msg *msg) { unsigned long flags; int ret; spin_lock_irqsave(&mod->lock, flags); if (mod->iftype == 0) ret = ican3_old_send_msg(mod, msg); else ret = ican3_new_send_msg(mod, msg); spin_unlock_irqrestore(&mod->lock, flags); return ret; } static int ican3_recv_msg(struct ican3_dev *mod, struct ican3_msg *msg) { unsigned long flags; int ret; spin_lock_irqsave(&mod->lock, flags); if (mod->iftype == 0) ret = ican3_old_recv_msg(mod, msg); else ret = ican3_new_recv_msg(mod, msg); spin_unlock_irqrestore(&mod->lock, flags); return ret; } /* * Quick Pre-constructed Messages */ static int ican3_msg_connect(struct ican3_dev *mod) { struct ican3_msg msg; memset(&msg, 0, sizeof(msg)); msg.spec = MSG_CONNECTI; msg.len = cpu_to_le16(0); return ican3_send_msg(mod, &msg); } static int ican3_msg_disconnect(struct ican3_dev *mod) { struct ican3_msg msg; memset(&msg, 0, sizeof(msg)); msg.spec = MSG_DISCONNECT; msg.len = cpu_to_le16(0); return ican3_send_msg(mod, &msg); } static int ican3_msg_newhostif(struct ican3_dev *mod) { struct ican3_msg msg; int ret; memset(&msg, 0, sizeof(msg)); msg.spec = MSG_NEWHOSTIF; msg.len = cpu_to_le16(0); /* If we're not using the old interface, switching seems bogus */ WARN_ON(mod->iftype != 0); ret = ican3_send_msg(mod, &msg); if (ret) return ret; /* mark the module as using the new host interface */ mod->iftype = 1; return 0; } static int ican3_msg_fasthostif(struct ican3_dev *mod) { struct ican3_msg msg; unsigned int addr; memset(&msg, 0, sizeof(msg)); msg.spec = MSG_INITFDPMQUEUE; msg.len = cpu_to_le16(8); /* write the tohost queue start address */ addr = DPM_PAGE_ADDR(mod->fastrx_start); msg.data[0] = addr & 0xff; msg.data[1] = (addr >> 8) & 0xff; msg.data[2] = (addr >> 16) & 0xff; msg.data[3] = (addr >> 24) & 0xff; /* write the fromhost queue start address */ addr = DPM_PAGE_ADDR(mod->fasttx_start); msg.data[4] = addr & 0xff; msg.data[5] = (addr >> 8) & 0xff; msg.data[6] = (addr >> 16) & 0xff; msg.data[7] = (addr >> 24) & 0xff; /* If we're not using the new interface yet, we cannot do this */ WARN_ON(mod->iftype != 1); return ican3_send_msg(mod, &msg); } /* * Setup the CAN filter to either accept or reject all * messages from the CAN bus. */ static int ican3_set_id_filter(struct ican3_dev *mod, bool accept) { struct ican3_msg msg; int ret; /* Standard Frame Format */ memset(&msg, 0, sizeof(msg)); msg.spec = MSG_SETAFILMASK; msg.len = cpu_to_le16(5); msg.data[0] = 0x00; /* IDLo LSB */ msg.data[1] = 0x00; /* IDLo MSB */ msg.data[2] = 0xff; /* IDHi LSB */ msg.data[3] = 0x07; /* IDHi MSB */ /* accept all frames for fast host if, or reject all frames */ msg.data[4] = accept ? SETAFILMASK_FASTIF : SETAFILMASK_REJECT; ret = ican3_send_msg(mod, &msg); if (ret) return ret; /* Extended Frame Format */ memset(&msg, 0, sizeof(msg)); msg.spec = MSG_SETAFILMASK; msg.len = cpu_to_le16(13); msg.data[0] = 0; /* MUX = 0 */ msg.data[1] = 0x00; /* IDLo LSB */ msg.data[2] = 0x00; msg.data[3] = 0x00; msg.data[4] = 0x20; /* IDLo MSB */ msg.data[5] = 0xff; /* IDHi LSB */ msg.data[6] = 0xff; msg.data[7] = 0xff; msg.data[8] = 0x3f; /* IDHi MSB */ /* accept all frames for fast host if, or reject all frames */ msg.data[9] = accept ? SETAFILMASK_FASTIF : SETAFILMASK_REJECT; return ican3_send_msg(mod, &msg); } /* * Bring the CAN bus online or offline */ static int ican3_set_bus_state(struct ican3_dev *mod, bool on) { struct can_bittiming *bt = &mod->can.bittiming; struct ican3_msg msg; u8 btr0, btr1; int res; /* This algorithm was stolen from drivers/net/can/sja1000/sja1000.c */ /* The bittiming register command for the ICAN3 just sets the bit timing */ /* registers on the SJA1000 chip directly */ btr0 = ((bt->brp - 1) & 0x3f) | (((bt->sjw - 1) & 0x3) << 6); btr1 = ((bt->prop_seg + bt->phase_seg1 - 1) & 0xf) | (((bt->phase_seg2 - 1) & 0x7) << 4); if (mod->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) btr1 |= 0x80; if (mod->fwtype == ICAN3_FWTYPE_ICANOS) { if (on) { /* set bittiming */ memset(&msg, 0, sizeof(msg)); msg.spec = MSG_CBTRREQ; msg.len = cpu_to_le16(4); msg.data[0] = 0x00; msg.data[1] = 0x00; msg.data[2] = btr0; msg.data[3] = btr1; res = ican3_send_msg(mod, &msg); if (res) return res; } /* can-on/off request */ memset(&msg, 0, sizeof(msg)); msg.spec = on ? MSG_CONREQ : MSG_COFFREQ; msg.len = cpu_to_le16(0); return ican3_send_msg(mod, &msg); } else if (mod->fwtype == ICAN3_FWTYPE_CAL_CANOPEN) { /* bittiming + can-on/off request */ memset(&msg, 0, sizeof(msg)); msg.spec = MSG_LMTS; if (on) { msg.len = cpu_to_le16(4); msg.data[0] = LMTS_BUSON_REQ; msg.data[1] = 0; msg.data[2] = btr0; msg.data[3] = btr1; } else { msg.len = cpu_to_le16(2); msg.data[0] = LMTS_BUSOFF_REQ; msg.data[1] = 0; } res = ican3_send_msg(mod, &msg); if (res) return res; if (on) { /* create NMT Slave Node for error processing * class 2 (with error capability, see CiA/DS203-1) * id 1 * name locnod1 (must be exactly 7 bytes) */ memset(&msg, 0, sizeof(msg)); msg.spec = MSG_NMTS; msg.len = cpu_to_le16(11); msg.data[0] = NMTS_CREATE_NODE_REQ; msg.data[1] = 0; msg.data[2] = 2; /* node class */ msg.data[3] = 1; /* node id */ strcpy(msg.data + 4, "locnod1"); /* node name */ return ican3_send_msg(mod, &msg); } return 0; } return -ENOTSUPP; } static int ican3_set_termination(struct ican3_dev *mod, bool on) { struct ican3_msg msg; memset(&msg, 0, sizeof(msg)); msg.spec = MSG_HWCONF; msg.len = cpu_to_le16(2); msg.data[0] = 0x00; msg.data[1] = on ? HWCONF_TERMINATE_ON : HWCONF_TERMINATE_OFF; return ican3_send_msg(mod, &msg); } static int ican3_send_inquiry(struct ican3_dev *mod, u8 subspec) { struct ican3_msg msg; memset(&msg, 0, sizeof(msg)); msg.spec = MSG_INQUIRY; msg.len = cpu_to_le16(2); msg.data[0] = subspec; msg.data[1] = 0x00; return ican3_send_msg(mod, &msg); } static int ican3_set_buserror(struct ican3_dev *mod, u8 quota) { struct ican3_msg msg; if (mod->fwtype == ICAN3_FWTYPE_ICANOS) { memset(&msg, 0, sizeof(msg)); msg.spec = MSG_CCONFREQ; msg.len = cpu_to_le16(2); msg.data[0] = 0x00; msg.data[1] = quota; } else if (mod->fwtype == ICAN3_FWTYPE_CAL_CANOPEN) { memset(&msg, 0, sizeof(msg)); msg.spec = MSG_LMTS; msg.len = cpu_to_le16(4); msg.data[0] = LMTS_CAN_CONF_REQ; msg.data[1] = 0x00; msg.data[2] = 0x00; msg.data[3] = quota; } else { return -ENOTSUPP; } return ican3_send_msg(mod, &msg); } /* * ICAN3 to Linux CAN Frame Conversion */ static void ican3_to_can_frame(struct ican3_dev *mod, struct ican3_fast_desc *desc, struct can_frame *cf) { if ((desc->command & ICAN3_CAN_TYPE_MASK) == ICAN3_CAN_TYPE_SFF) { if (desc->data[1] & ICAN3_SFF_RTR) cf->can_id |= CAN_RTR_FLAG; cf->can_id |= desc->data[0] << 3; cf->can_id |= (desc->data[1] & 0xe0) >> 5; cf->len = can_cc_dlc2len(desc->data[1] & ICAN3_CAN_DLC_MASK); memcpy(cf->data, &desc->data[2], cf->len); } else { cf->len = can_cc_dlc2len(desc->data[0] & ICAN3_CAN_DLC_MASK); if (desc->data[0] & ICAN3_EFF_RTR) cf->can_id |= CAN_RTR_FLAG; if (desc->data[0] & ICAN3_EFF) { cf->can_id |= CAN_EFF_FLAG; cf->can_id |= desc->data[2] << 21; /* 28-21 */ cf->can_id |= desc->data[3] << 13; /* 20-13 */ cf->can_id |= desc->data[4] << 5; /* 12-5 */ cf->can_id |= (desc->data[5] & 0xf8) >> 3; } else { cf->can_id |= desc->data[2] << 3; /* 10-3 */ cf->can_id |= desc->data[3] >> 5; /* 2-0 */ } memcpy(cf->data, &desc->data[6], cf->len); } } static void can_frame_to_ican3(struct ican3_dev *mod, struct can_frame *cf, struct ican3_fast_desc *desc) { /* clear out any stale data in the descriptor */ memset(desc->data, 0, sizeof(desc->data)); /* we always use the extended format, with the ECHO flag set */ desc->command = ICAN3_CAN_TYPE_EFF; desc->data[0] |= cf->len; desc->data[1] |= ICAN3_ECHO; /* support single transmission (no retries) mode */ if (mod->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT) desc->data[1] |= ICAN3_SNGL; if (cf->can_id & CAN_RTR_FLAG) desc->data[0] |= ICAN3_EFF_RTR; /* pack the id into the correct places */ if (cf->can_id & CAN_EFF_FLAG) { desc->data[0] |= ICAN3_EFF; desc->data[2] = (cf->can_id & 0x1fe00000) >> 21; /* 28-21 */ desc->data[3] = (cf->can_id & 0x001fe000) >> 13; /* 20-13 */ desc->data[4] = (cf->can_id & 0x00001fe0) >> 5; /* 12-5 */ desc->data[5] = (cf->can_id & 0x0000001f) << 3; /* 4-0 */ } else { desc->data[2] = (cf->can_id & 0x7F8) >> 3; /* bits 10-3 */ desc->data[3] = (cf->can_id & 0x007) << 5; /* bits 2-0 */ } /* copy the data bits into the descriptor */ memcpy(&desc->data[6], cf->data, cf->len); } /* * Interrupt Handling */ /* * Handle an ID + Version message response from the firmware. We never generate * this message in production code, but it is very useful when debugging to be * able to display this message. */ static void ican3_handle_idvers(struct ican3_dev *mod, struct ican3_msg *msg) { netdev_dbg(mod->ndev, "IDVERS response: %s\n", msg->data); } static void ican3_handle_msglost(struct ican3_dev *mod, struct ican3_msg *msg) { struct net_device *dev = mod->ndev; struct net_device_stats *stats = &dev->stats; struct can_frame *cf; struct sk_buff *skb; /* * Report that communication messages with the microcontroller firmware * are being lost. These are never CAN frames, so we do not generate an * error frame for userspace */ if (msg->spec == MSG_MSGLOST) { netdev_err(mod->ndev, "lost %d control messages\n", msg->data[0]); return; } /* * Oops, this indicates that we have lost messages in the fast queue, * which are exclusively CAN messages. Our driver isn't reading CAN * frames fast enough. * * We'll pretend that the SJA1000 told us that it ran out of buffer * space, because there is not a better message for this. */ skb = alloc_can_err_skb(dev, &cf); if (skb) { cf->can_id |= CAN_ERR_CRTL; cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; stats->rx_over_errors++; stats->rx_errors++; netif_rx(skb); } } /* * Handle CAN Event Indication Messages from the firmware * * The ICAN3 firmware provides the values of some SJA1000 registers when it * generates this message. The code below is largely copied from the * drivers/net/can/sja1000/sja1000.c file, and adapted as necessary */ static int ican3_handle_cevtind(struct ican3_dev *mod, struct ican3_msg *msg) { struct net_device *dev = mod->ndev; struct net_device_stats *stats = &dev->stats; enum can_state state = mod->can.state; u8 isrc, ecc, status, rxerr, txerr; struct can_frame *cf; struct sk_buff *skb; /* we can only handle the SJA1000 part */ if (msg->data[1] != CEVTIND_CHIP_SJA1000) { netdev_err(mod->ndev, "unable to handle errors on non-SJA1000\n"); return -ENODEV; } /* check the message length for sanity */ if (le16_to_cpu(msg->len) < 6) { netdev_err(mod->ndev, "error message too short\n"); return -EINVAL; } isrc = msg->data[0]; ecc = msg->data[2]; status = msg->data[3]; rxerr = msg->data[4]; txerr = msg->data[5]; /* * This hardware lacks any support other than bus error messages to * determine if packet transmission has failed. * * When TX errors happen, one echo skb needs to be dropped from the * front of the queue. * * A small bit of code is duplicated here and below, to avoid error * skb allocation when it will just be freed immediately. */ if (isrc == CEVTIND_BEI) { int ret; netdev_dbg(mod->ndev, "bus error interrupt\n"); /* TX error */ if (!(ecc & ECC_DIR)) { kfree_skb(skb_dequeue(&mod->echoq)); stats->tx_errors++; } else { stats->rx_errors++; } /* * The controller automatically disables bus-error interrupts * and therefore we must re-enable them. */ ret = ican3_set_buserror(mod, 1); if (ret) { netdev_err(mod->ndev, "unable to re-enable bus-error\n"); return ret; } /* bus error reporting is off, return immediately */ if (!(mod->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)) return 0; } skb = alloc_can_err_skb(dev, &cf); if (skb == NULL) return -ENOMEM; /* data overrun interrupt */ if (isrc == CEVTIND_DOI || isrc == CEVTIND_LOST) { netdev_dbg(mod->ndev, "data overrun interrupt\n"); cf->can_id |= CAN_ERR_CRTL; cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; stats->rx_over_errors++; stats->rx_errors++; } /* error warning + passive interrupt */ if (isrc == CEVTIND_EI) { netdev_dbg(mod->ndev, "error warning + passive interrupt\n"); if (status & SR_BS) { state = CAN_STATE_BUS_OFF; cf->can_id |= CAN_ERR_BUSOFF; mod->can.can_stats.bus_off++; can_bus_off(dev); } else if (status & SR_ES) { if (rxerr >= 128 || txerr >= 128) state = CAN_STATE_ERROR_PASSIVE; else state = CAN_STATE_ERROR_WARNING; } else { state = CAN_STATE_ERROR_ACTIVE; } } /* bus error interrupt */ if (isrc == CEVTIND_BEI) { mod->can.can_stats.bus_error++; cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR | CAN_ERR_CNT; switch (ecc & ECC_MASK) { case ECC_BIT: cf->data[2] |= CAN_ERR_PROT_BIT; break; case ECC_FORM: cf->data[2] |= CAN_ERR_PROT_FORM; break; case ECC_STUFF: cf->data[2] |= CAN_ERR_PROT_STUFF; break; default: cf->data[3] = ecc & ECC_SEG; break; } if (!(ecc & ECC_DIR)) cf->data[2] |= CAN_ERR_PROT_TX; cf->data[6] = txerr; cf->data[7] = rxerr; } if (state != mod->can.state && (state == CAN_STATE_ERROR_WARNING || state == CAN_STATE_ERROR_PASSIVE)) { cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT; if (state == CAN_STATE_ERROR_WARNING) { mod->can.can_stats.error_warning++; cf->data[1] = (txerr > rxerr) ? CAN_ERR_CRTL_TX_WARNING : CAN_ERR_CRTL_RX_WARNING; } else { mod->can.can_stats.error_passive++; cf->data[1] = (txerr > rxerr) ? CAN_ERR_CRTL_TX_PASSIVE : CAN_ERR_CRTL_RX_PASSIVE; } cf->data[6] = txerr; cf->data[7] = rxerr; } mod->can.state = state; netif_rx(skb); return 0; } static void ican3_handle_inquiry(struct ican3_dev *mod, struct ican3_msg *msg) { switch (msg->data[0]) { case INQUIRY_STATUS: case INQUIRY_EXTENDED: mod->bec.rxerr = msg->data[5]; mod->bec.txerr = msg->data[6]; complete(&mod->buserror_comp); break; case INQUIRY_TERMINATION: mod->termination_enabled = msg->data[6] & HWCONF_TERMINATE_ON; complete(&mod->termination_comp); break; default: netdev_err(mod->ndev, "received an unknown inquiry response\n"); break; } } /* Handle NMTS Slave Event Indication Messages from the firmware */ static void ican3_handle_nmtsind(struct ican3_dev *mod, struct ican3_msg *msg) { u16 subspec; subspec = msg->data[0] + msg->data[1] * 0x100; if (subspec == NMTS_SLAVE_EVENT_IND) { switch (msg->data[2]) { case NE_LOCAL_OCCURRED: case NE_LOCAL_RESOLVED: /* now follows the same message as Raw ICANOS CEVTIND * shift the data at the same place and call this method */ le16_add_cpu(&msg->len, -3); memmove(msg->data, msg->data + 3, le16_to_cpu(msg->len)); ican3_handle_cevtind(mod, msg); break; case NE_REMOTE_OCCURRED: case NE_REMOTE_RESOLVED: /* should not occurre, ignore */ break; default: netdev_warn(mod->ndev, "unknown NMTS event indication %x\n", msg->data[2]); break; } } else if (subspec == NMTS_SLAVE_STATE_IND) { /* ignore state indications */ } else { netdev_warn(mod->ndev, "unhandled NMTS indication %x\n", subspec); return; } } static void ican3_handle_unknown_message(struct ican3_dev *mod, struct ican3_msg *msg) { netdev_warn(mod->ndev, "received unknown message: spec 0x%.2x length %d\n", msg->spec, le16_to_cpu(msg->len)); } /* * Handle a control message from the firmware */ static void ican3_handle_message(struct ican3_dev *mod, struct ican3_msg *msg) { netdev_dbg(mod->ndev, "%s: modno %d spec 0x%.2x len %d bytes\n", __func__, mod->num, msg->spec, le16_to_cpu(msg->len)); switch (msg->spec) { case MSG_IDVERS: ican3_handle_idvers(mod, msg); break; case MSG_MSGLOST: case MSG_FMSGLOST: ican3_handle_msglost(mod, msg); break; case MSG_CEVTIND: ican3_handle_cevtind(mod, msg); break; case MSG_INQUIRY: ican3_handle_inquiry(mod, msg); break; case MSG_NMTS: ican3_handle_nmtsind(mod, msg); break; default: ican3_handle_unknown_message(mod, msg); break; } } /* * The ican3 needs to store all echo skbs, and therefore cannot * use the generic infrastructure for this. */ static void ican3_put_echo_skb(struct ican3_dev *mod, struct sk_buff *skb) { skb = can_create_echo_skb(skb); if (!skb) return; skb_tx_timestamp(skb); /* save this skb for tx interrupt echo handling */ skb_queue_tail(&mod->echoq, skb); } static unsigned int ican3_get_echo_skb(struct ican3_dev *mod) { struct sk_buff *skb = skb_dequeue(&mod->echoq); struct can_frame *cf; u8 dlc = 0; /* this should never trigger unless there is a driver bug */ if (!skb) { netdev_err(mod->ndev, "BUG: echo skb not occupied\n"); return 0; } cf = (struct can_frame *)skb->data; if (!(cf->can_id & CAN_RTR_FLAG)) dlc = cf->len; /* check flag whether this packet has to be looped back */ if (skb->pkt_type != PACKET_LOOPBACK) { kfree_skb(skb); return dlc; } skb->protocol = htons(ETH_P_CAN); skb->pkt_type = PACKET_BROADCAST; skb->ip_summed = CHECKSUM_UNNECESSARY; skb->dev = mod->ndev; netif_receive_skb(skb); return dlc; } /* * Compare an skb with an existing echo skb * * This function will be used on devices which have a hardware loopback. * On these devices, this function can be used to compare a received skb * with the saved echo skbs so that the hardware echo skb can be dropped. * * Returns true if the skb's are identical, false otherwise. */ static bool ican3_echo_skb_matches(struct ican3_dev *mod, struct sk_buff *skb) { struct can_frame *cf = (struct can_frame *)skb->data; struct sk_buff *echo_skb = skb_peek(&mod->echoq); struct can_frame *echo_cf; if (!echo_skb) return false; echo_cf = (struct can_frame *)echo_skb->data; if (cf->can_id != echo_cf->can_id) return false; if (cf->len != echo_cf->len) return false; return memcmp(cf->data, echo_cf->data, cf->len) == 0; } /* * Check that there is room in the TX ring to transmit another skb * * LOCKING: must hold mod->lock */ static bool ican3_txok(struct ican3_dev *mod) { struct ican3_fast_desc __iomem *desc; u8 control; /* check that we have echo queue space */ if (skb_queue_len(&mod->echoq) >= ICAN3_TX_BUFFERS) return false; /* copy the control bits of the descriptor */ ican3_set_page(mod, mod->fasttx_start + (mod->fasttx_num / 16)); desc = mod->dpm + ((mod->fasttx_num % 16) * sizeof(*desc)); control = ioread8(&desc->control); /* if the control bits are not valid, then we have no more space */ if (!(control & DESC_VALID)) return false; return true; } /* * Receive one CAN frame from the hardware * * CONTEXT: must be called from user context */ static int ican3_recv_skb(struct ican3_dev *mod) { struct net_device *ndev = mod->ndev; struct net_device_stats *stats = &ndev->stats; struct ican3_fast_desc desc; void __iomem *desc_addr; struct can_frame *cf; struct sk_buff *skb; unsigned long flags; spin_lock_irqsave(&mod->lock, flags); /* copy the whole descriptor */ ican3_set_page(mod, mod->fastrx_start + (mod->fastrx_num / 16)); desc_addr = mod->dpm + ((mod->fastrx_num % 16) * sizeof(desc)); memcpy_fromio(&desc, desc_addr, sizeof(desc)); spin_unlock_irqrestore(&mod->lock, flags); /* check that we actually have a CAN frame */ if (!(desc.control & DESC_VALID)) return -ENOBUFS; /* allocate an skb */ skb = alloc_can_skb(ndev, &cf); if (unlikely(skb == NULL)) { stats->rx_dropped++; goto err_noalloc; } /* convert the ICAN3 frame into Linux CAN format */ ican3_to_can_frame(mod, &desc, cf); /* * If this is an ECHO frame received from the hardware loopback * feature, use the skb saved in the ECHO stack instead. This allows * the Linux CAN core to support CAN_RAW_RECV_OWN_MSGS correctly. * * Since this is a confirmation of a successfully transmitted packet * sent from this host, update the transmit statistics. * * Also, the netdevice queue needs to be allowed to send packets again. */ if (ican3_echo_skb_matches(mod, skb)) { stats->tx_packets++; stats->tx_bytes += ican3_get_echo_skb(mod); kfree_skb(skb); goto err_noalloc; } /* update statistics, receive the skb */ stats->rx_packets++; if (!(cf->can_id & CAN_RTR_FLAG)) stats->rx_bytes += cf->len; netif_receive_skb(skb); err_noalloc: /* toggle the valid bit and return the descriptor to the ring */ desc.control ^= DESC_VALID; spin_lock_irqsave(&mod->lock, flags); ican3_set_page(mod, mod->fastrx_start + (mod->fastrx_num / 16)); memcpy_toio(desc_addr, &desc, 1); /* update the next buffer pointer */ mod->fastrx_num = (desc.control & DESC_WRAP) ? 0 : (mod->fastrx_num + 1); /* there are still more buffers to process */ spin_unlock_irqrestore(&mod->lock, flags); return 0; } static int ican3_napi(struct napi_struct *napi, int budget) { struct ican3_dev *mod = container_of(napi, struct ican3_dev, napi); unsigned long flags; int received = 0; int ret; /* process all communication messages */ while (true) { struct ican3_msg msg; ret = ican3_recv_msg(mod, &msg); if (ret) break; ican3_handle_message(mod, &msg); } /* process all CAN frames from the fast interface */ while (received < budget) { ret = ican3_recv_skb(mod); if (ret) break; received++; } /* We have processed all packets that the adapter had, but it * was less than our budget, stop polling */ if (received < budget) napi_complete_done(napi, received); spin_lock_irqsave(&mod->lock, flags); /* Wake up the transmit queue if necessary */ if (netif_queue_stopped(mod->ndev) && ican3_txok(mod)) netif_wake_queue(mod->ndev); spin_unlock_irqrestore(&mod->lock, flags); /* re-enable interrupt generation */ iowrite8(1 << mod->num, &mod->ctrl->int_enable); return received; } static irqreturn_t ican3_irq(int irq, void *dev_id) { struct ican3_dev *mod = dev_id; u8 stat; /* * The interrupt status register on this device reports interrupts * as zeroes instead of using ones like most other devices */ stat = ioread8(&mod->ctrl->int_disable) & (1 << mod->num); if (stat == (1 << mod->num)) return IRQ_NONE; /* clear the MODULbus interrupt from the microcontroller */ ioread8(&mod->dpmctrl->interrupt); /* disable interrupt generation, schedule the NAPI poller */ iowrite8(1 << mod->num, &mod->ctrl->int_disable); napi_schedule(&mod->napi); return IRQ_HANDLED; } /* * Firmware reset, startup, and shutdown */ /* * Reset an ICAN module to its power-on state * * CONTEXT: no network device registered */ static int ican3_reset_module(struct ican3_dev *mod) { unsigned long start; u8 runold, runnew; /* disable interrupts so no more work is scheduled */ iowrite8(1 << mod->num, &mod->ctrl->int_disable); /* the first unallocated page in the DPM is #9 */ mod->free_page = DPM_FREE_START; ican3_set_page(mod, QUEUE_OLD_CONTROL); runold = ioread8(mod->dpm + TARGET_RUNNING); /* reset the module */ iowrite8(0x00, &mod->dpmctrl->hwreset); /* wait until the module has finished resetting and is running */ start = jiffies; do { ican3_set_page(mod, QUEUE_OLD_CONTROL); runnew = ioread8(mod->dpm + TARGET_RUNNING); if (runnew == (runold ^ 0xff)) return 0; msleep(10); } while (time_before(jiffies, start + HZ / 2)); netdev_err(mod->ndev, "failed to reset CAN module\n"); return -ETIMEDOUT; } static void ican3_shutdown_module(struct ican3_dev *mod) { ican3_msg_disconnect(mod); ican3_reset_module(mod); } /* * Startup an ICAN module, bringing it into fast mode */ static int ican3_startup_module(struct ican3_dev *mod) { int ret; ret = ican3_reset_module(mod); if (ret) { netdev_err(mod->ndev, "unable to reset module\n"); return ret; } /* detect firmware */ memcpy_fromio(mod->fwinfo, mod->dpm + FIRMWARE_STAMP, sizeof(mod->fwinfo) - 1); if (strncmp(mod->fwinfo, "JANZ-ICAN3", 10)) { netdev_err(mod->ndev, "ICAN3 not detected (found %s)\n", mod->fwinfo); return -ENODEV; } if (strstr(mod->fwinfo, "CAL/CANopen")) mod->fwtype = ICAN3_FWTYPE_CAL_CANOPEN; else mod->fwtype = ICAN3_FWTYPE_ICANOS; /* re-enable interrupts so we can send messages */ iowrite8(1 << mod->num, &mod->ctrl->int_enable); ret = ican3_msg_connect(mod); if (ret) { netdev_err(mod->ndev, "unable to connect to module\n"); return ret; } ican3_init_new_host_interface(mod); ret = ican3_msg_newhostif(mod); if (ret) { netdev_err(mod->ndev, "unable to switch to new-style interface\n"); return ret; } /* default to "termination on" */ ret = ican3_set_termination(mod, true); if (ret) { netdev_err(mod->ndev, "unable to enable termination\n"); return ret; } /* default to "bus errors enabled" */ ret = ican3_set_buserror(mod, 1); if (ret) { netdev_err(mod->ndev, "unable to set bus-error\n"); return ret; } ican3_init_fast_host_interface(mod); ret = ican3_msg_fasthostif(mod); if (ret) { netdev_err(mod->ndev, "unable to switch to fast host interface\n"); return ret; } ret = ican3_set_id_filter(mod, true); if (ret) { netdev_err(mod->ndev, "unable to set acceptance filter\n"); return ret; } return 0; } /* * CAN Network Device */ static int ican3_open(struct net_device *ndev) { struct ican3_dev *mod = netdev_priv(ndev); int ret; /* open the CAN layer */ ret = open_candev(ndev); if (ret) { netdev_err(mod->ndev, "unable to start CAN layer\n"); return ret; } /* bring the bus online */ ret = ican3_set_bus_state(mod, true); if (ret) { netdev_err(mod->ndev, "unable to set bus-on\n"); close_candev(ndev); return ret; } /* start up the network device */ mod->can.state = CAN_STATE_ERROR_ACTIVE; netif_start_queue(ndev); return 0; } static int ican3_stop(struct net_device *ndev) { struct ican3_dev *mod = netdev_priv(ndev); int ret; /* stop the network device xmit routine */ netif_stop_queue(ndev); mod->can.state = CAN_STATE_STOPPED; /* bring the bus offline, stop receiving packets */ ret = ican3_set_bus_state(mod, false); if (ret) { netdev_err(mod->ndev, "unable to set bus-off\n"); return ret; } /* drop all outstanding echo skbs */ skb_queue_purge(&mod->echoq); /* close the CAN layer */ close_candev(ndev); return 0; } static netdev_tx_t ican3_xmit(struct sk_buff *skb, struct net_device *ndev) { struct ican3_dev *mod = netdev_priv(ndev); struct can_frame *cf = (struct can_frame *)skb->data; struct ican3_fast_desc desc; void __iomem *desc_addr; unsigned long flags; if (can_dev_dropped_skb(ndev, skb)) return NETDEV_TX_OK; spin_lock_irqsave(&mod->lock, flags); /* check that we can actually transmit */ if (!ican3_txok(mod)) { netdev_err(mod->ndev, "BUG: no free descriptors\n"); spin_unlock_irqrestore(&mod->lock, flags); return NETDEV_TX_BUSY; } /* copy the control bits of the descriptor */ ican3_set_page(mod, mod->fasttx_start + (mod->fasttx_num / 16)); desc_addr = mod->dpm + ((mod->fasttx_num % 16) * sizeof(desc)); memset(&desc, 0, sizeof(desc)); memcpy_fromio(&desc, desc_addr, 1); /* convert the Linux CAN frame into ICAN3 format */ can_frame_to_ican3(mod, cf, &desc); /* * This hardware doesn't have TX-done notifications, so we'll try and * emulate it the best we can using ECHO skbs. Add the skb to the ECHO * stack. Upon packet reception, check if the ECHO skb and received * skb match, and use that to wake the queue. */ ican3_put_echo_skb(mod, skb); /* * the programming manual says that you must set the IVALID bit, then * interrupt, then set the valid bit. Quite weird, but it seems to be * required for this to work */ desc.control |= DESC_IVALID; memcpy_toio(desc_addr, &desc, sizeof(desc)); /* generate a MODULbus interrupt to the microcontroller */ iowrite8(0x01, &mod->dpmctrl->interrupt); desc.control ^= DESC_VALID; memcpy_toio(desc_addr, &desc, sizeof(desc)); /* update the next buffer pointer */ mod->fasttx_num = (desc.control & DESC_WRAP) ? 0 : (mod->fasttx_num + 1); /* if there is no free descriptor space, stop the transmit queue */ if (!ican3_txok(mod)) netif_stop_queue(ndev); spin_unlock_irqrestore(&mod->lock, flags); return NETDEV_TX_OK; } static const struct net_device_ops ican3_netdev_ops = { .ndo_open = ican3_open, .ndo_stop = ican3_stop, .ndo_start_xmit = ican3_xmit, .ndo_change_mtu = can_change_mtu, }; static const struct ethtool_ops ican3_ethtool_ops = { .get_ts_info = ethtool_op_get_ts_info, }; /* * Low-level CAN Device */ /* This structure was stolen from drivers/net/can/sja1000/sja1000.c */ static const struct can_bittiming_const ican3_bittiming_const = { .name = DRV_NAME, .tseg1_min = 1, .tseg1_max = 16, .tseg2_min = 1, .tseg2_max = 8, .sjw_max = 4, .brp_min = 1, .brp_max = 64, .brp_inc = 1, }; static int ican3_set_mode(struct net_device *ndev, enum can_mode mode) { struct ican3_dev *mod = netdev_priv(ndev); int ret; if (mode != CAN_MODE_START) return -ENOTSUPP; /* bring the bus online */ ret = ican3_set_bus_state(mod, true); if (ret) { netdev_err(ndev, "unable to set bus-on\n"); return ret; } /* start up the network device */ mod->can.state = CAN_STATE_ERROR_ACTIVE; if (netif_queue_stopped(ndev)) netif_wake_queue(ndev); return 0; } static int ican3_get_berr_counter(const struct net_device *ndev, struct can_berr_counter *bec) { struct ican3_dev *mod = netdev_priv(ndev); int ret; ret = ican3_send_inquiry(mod, INQUIRY_STATUS); if (ret) return ret; if (!wait_for_completion_timeout(&mod->buserror_comp, HZ)) { netdev_info(mod->ndev, "%s timed out\n", __func__); return -ETIMEDOUT; } bec->rxerr = mod->bec.rxerr; bec->txerr = mod->bec.txerr; return 0; } /* * Sysfs Attributes */ static ssize_t termination_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ican3_dev *mod = netdev_priv(to_net_dev(dev)); int ret; ret = ican3_send_inquiry(mod, INQUIRY_TERMINATION); if (ret) return ret; if (!wait_for_completion_timeout(&mod->termination_comp, HZ)) { netdev_info(mod->ndev, "%s timed out\n", __func__); return -ETIMEDOUT; } return sysfs_emit(buf, "%u\n", mod->termination_enabled); } static ssize_t termination_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct ican3_dev *mod = netdev_priv(to_net_dev(dev)); unsigned long enable; int ret; if (kstrtoul(buf, 0, &enable)) return -EINVAL; ret = ican3_set_termination(mod, enable); if (ret) return ret; return count; } static ssize_t fwinfo_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ican3_dev *mod = netdev_priv(to_net_dev(dev)); return scnprintf(buf, PAGE_SIZE, "%s\n", mod->fwinfo); } static DEVICE_ATTR_RW(termination); static DEVICE_ATTR_RO(fwinfo); static struct attribute *ican3_sysfs_attrs[] = { &dev_attr_termination.attr, &dev_attr_fwinfo.attr, NULL, }; static const struct attribute_group ican3_sysfs_attr_group = { .attrs = ican3_sysfs_attrs, }; /* * PCI Subsystem */ static int ican3_probe(struct platform_device *pdev) { struct janz_platform_data *pdata; struct net_device *ndev; struct ican3_dev *mod; struct resource *res; struct device *dev; int ret; pdata = dev_get_platdata(&pdev->dev); if (!pdata) return -ENXIO; dev_dbg(&pdev->dev, "probe: module number %d\n", pdata->modno); /* save the struct device for printing */ dev = &pdev->dev; /* allocate the CAN device and private data */ ndev = alloc_candev(sizeof(*mod), 0); if (!ndev) { dev_err(dev, "unable to allocate CANdev\n"); ret = -ENOMEM; goto out_return; } platform_set_drvdata(pdev, ndev); mod = netdev_priv(ndev); mod->ndev = ndev; mod->num = pdata->modno; netif_napi_add_weight(ndev, &mod->napi, ican3_napi, ICAN3_RX_BUFFERS); skb_queue_head_init(&mod->echoq); spin_lock_init(&mod->lock); init_completion(&mod->termination_comp); init_completion(&mod->buserror_comp); /* setup device-specific sysfs attributes */ ndev->sysfs_groups[0] = &ican3_sysfs_attr_group; /* the first unallocated page in the DPM is 9 */ mod->free_page = DPM_FREE_START; ndev->netdev_ops = &ican3_netdev_ops; ndev->ethtool_ops = &ican3_ethtool_ops; ndev->flags |= IFF_ECHO; SET_NETDEV_DEV(ndev, &pdev->dev); mod->can.clock.freq = ICAN3_CAN_CLOCK; mod->can.bittiming_const = &ican3_bittiming_const; mod->can.do_set_mode = ican3_set_mode; mod->can.do_get_berr_counter = ican3_get_berr_counter; mod->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_BERR_REPORTING | CAN_CTRLMODE_ONE_SHOT; /* find our IRQ number */ mod->irq = platform_get_irq(pdev, 0); if (mod->irq < 0) { ret = -ENODEV; goto out_free_ndev; } ndev->irq = mod->irq; /* get access to the MODULbus registers for this module */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(dev, "MODULbus registers not found\n"); ret = -ENODEV; goto out_free_ndev; } mod->dpm = ioremap(res->start, resource_size(res)); if (!mod->dpm) { dev_err(dev, "MODULbus registers not ioremap\n"); ret = -ENOMEM; goto out_free_ndev; } mod->dpmctrl = mod->dpm + DPM_PAGE_SIZE; /* get access to the control registers for this module */ res = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (!res) { dev_err(dev, "CONTROL registers not found\n"); ret = -ENODEV; goto out_iounmap_dpm; } mod->ctrl = ioremap(res->start, resource_size(res)); if (!mod->ctrl) { dev_err(dev, "CONTROL registers not ioremap\n"); ret = -ENOMEM; goto out_iounmap_dpm; } /* disable our IRQ, then hookup the IRQ handler */ iowrite8(1 << mod->num, &mod->ctrl->int_disable); ret = request_irq(mod->irq, ican3_irq, IRQF_SHARED, DRV_NAME, mod); if (ret) { dev_err(dev, "unable to request IRQ\n"); goto out_iounmap_ctrl; } /* reset and initialize the CAN controller into fast mode */ napi_enable(&mod->napi); ret = ican3_startup_module(mod); if (ret) { dev_err(dev, "%s: unable to start CANdev\n", __func__); goto out_free_irq; } /* register with the Linux CAN layer */ ret = register_candev(ndev); if (ret) { dev_err(dev, "%s: unable to register CANdev\n", __func__); goto out_free_irq; } netdev_info(mod->ndev, "module %d: registered CAN device\n", pdata->modno); return 0; out_free_irq: napi_disable(&mod->napi); iowrite8(1 << mod->num, &mod->ctrl->int_disable); free_irq(mod->irq, mod); out_iounmap_ctrl: iounmap(mod->ctrl); out_iounmap_dpm: iounmap(mod->dpm); out_free_ndev: free_candev(ndev); out_return: return ret; } static void ican3_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct ican3_dev *mod = netdev_priv(ndev); /* unregister the netdevice, stop interrupts */ unregister_netdev(ndev); napi_disable(&mod->napi); iowrite8(1 << mod->num, &mod->ctrl->int_disable); free_irq(mod->irq, mod); /* put the module into reset */ ican3_shutdown_module(mod); /* unmap all registers */ iounmap(mod->ctrl); iounmap(mod->dpm); free_candev(ndev); } static struct platform_driver ican3_driver = { .driver = { .name = DRV_NAME, }, .probe = ican3_probe, .remove_new = ican3_remove, }; module_platform_driver(ican3_driver); MODULE_AUTHOR("Ira W. Snyder <[email protected]>"); MODULE_DESCRIPTION("Janz MODULbus VMOD-ICAN3 Driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:janz-ican3");
linux-master
drivers/net/can/janz-ican3.c
// SPDX-License-Identifier: GPL-2.0-only /* * vxcan.c - Virtual CAN Tunnel for cross namespace communication * * This code is derived from drivers/net/can/vcan.c for the virtual CAN * specific parts and from drivers/net/veth.c to implement the netlink API * for network interface pairs in a common and established way. * * Copyright (c) 2017 Oliver Hartkopp <[email protected]> */ #include <linux/ethtool.h> #include <linux/module.h> #include <linux/init.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/if_ether.h> #include <linux/can.h> #include <linux/can/dev.h> #include <linux/can/skb.h> #include <linux/can/vxcan.h> #include <linux/can/can-ml.h> #include <linux/slab.h> #include <net/rtnetlink.h> #define DRV_NAME "vxcan" MODULE_DESCRIPTION("Virtual CAN Tunnel"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Oliver Hartkopp <[email protected]>"); MODULE_ALIAS_RTNL_LINK(DRV_NAME); struct vxcan_priv { struct net_device __rcu *peer; }; static netdev_tx_t vxcan_xmit(struct sk_buff *oskb, struct net_device *dev) { struct vxcan_priv *priv = netdev_priv(dev); struct net_device *peer; struct net_device_stats *peerstats, *srcstats = &dev->stats; struct sk_buff *skb; unsigned int len; if (can_dropped_invalid_skb(dev, oskb)) return NETDEV_TX_OK; rcu_read_lock(); peer = rcu_dereference(priv->peer); if (unlikely(!peer)) { kfree_skb(oskb); dev->stats.tx_dropped++; goto out_unlock; } skb_tx_timestamp(oskb); skb = skb_clone(oskb, GFP_ATOMIC); if (skb) { consume_skb(oskb); } else { kfree_skb(oskb); goto out_unlock; } /* reset CAN GW hop counter */ skb->csum_start = 0; skb->pkt_type = PACKET_BROADCAST; skb->dev = peer; skb->ip_summed = CHECKSUM_UNNECESSARY; len = can_skb_get_data_len(skb); if (netif_rx(skb) == NET_RX_SUCCESS) { srcstats->tx_packets++; srcstats->tx_bytes += len; peerstats = &peer->stats; peerstats->rx_packets++; peerstats->rx_bytes += len; } out_unlock: rcu_read_unlock(); return NETDEV_TX_OK; } static int vxcan_open(struct net_device *dev) { struct vxcan_priv *priv = netdev_priv(dev); struct net_device *peer = rtnl_dereference(priv->peer); if (!peer) return -ENOTCONN; if (peer->flags & IFF_UP) { netif_carrier_on(dev); netif_carrier_on(peer); } return 0; } static int vxcan_close(struct net_device *dev) { struct vxcan_priv *priv = netdev_priv(dev); struct net_device *peer = rtnl_dereference(priv->peer); netif_carrier_off(dev); if (peer) netif_carrier_off(peer); return 0; } static int vxcan_get_iflink(const struct net_device *dev) { struct vxcan_priv *priv = netdev_priv(dev); struct net_device *peer; int iflink; rcu_read_lock(); peer = rcu_dereference(priv->peer); iflink = peer ? peer->ifindex : 0; rcu_read_unlock(); return iflink; } static int vxcan_change_mtu(struct net_device *dev, int new_mtu) { /* Do not allow changing the MTU while running */ if (dev->flags & IFF_UP) return -EBUSY; if (new_mtu != CAN_MTU && new_mtu != CANFD_MTU && !can_is_canxl_dev_mtu(new_mtu)) return -EINVAL; dev->mtu = new_mtu; return 0; } static const struct net_device_ops vxcan_netdev_ops = { .ndo_open = vxcan_open, .ndo_stop = vxcan_close, .ndo_start_xmit = vxcan_xmit, .ndo_get_iflink = vxcan_get_iflink, .ndo_change_mtu = vxcan_change_mtu, }; static const struct ethtool_ops vxcan_ethtool_ops = { .get_ts_info = ethtool_op_get_ts_info, }; static void vxcan_setup(struct net_device *dev) { struct can_ml_priv *can_ml; dev->type = ARPHRD_CAN; dev->mtu = CANFD_MTU; dev->hard_header_len = 0; dev->addr_len = 0; dev->tx_queue_len = 0; dev->flags = IFF_NOARP; dev->netdev_ops = &vxcan_netdev_ops; dev->ethtool_ops = &vxcan_ethtool_ops; dev->needs_free_netdev = true; can_ml = netdev_priv(dev) + ALIGN(sizeof(struct vxcan_priv), NETDEV_ALIGN); can_set_ml_priv(dev, can_ml); } /* forward declaration for rtnl_create_link() */ static struct rtnl_link_ops vxcan_link_ops; static int vxcan_newlink(struct net *net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { struct vxcan_priv *priv; struct net_device *peer; struct net *peer_net; struct nlattr *peer_tb[IFLA_MAX + 1], **tbp = tb; char ifname[IFNAMSIZ]; unsigned char name_assign_type; struct ifinfomsg *ifmp = NULL; int err; /* register peer device */ if (data && data[VXCAN_INFO_PEER]) { struct nlattr *nla_peer; nla_peer = data[VXCAN_INFO_PEER]; ifmp = nla_data(nla_peer); err = rtnl_nla_parse_ifinfomsg(peer_tb, nla_peer, extack); if (err < 0) return err; tbp = peer_tb; } if (ifmp && tbp[IFLA_IFNAME]) { nla_strscpy(ifname, tbp[IFLA_IFNAME], IFNAMSIZ); name_assign_type = NET_NAME_USER; } else { snprintf(ifname, IFNAMSIZ, DRV_NAME "%%d"); name_assign_type = NET_NAME_ENUM; } peer_net = rtnl_link_get_net(net, tbp); if (IS_ERR(peer_net)) return PTR_ERR(peer_net); peer = rtnl_create_link(peer_net, ifname, name_assign_type, &vxcan_link_ops, tbp, extack); if (IS_ERR(peer)) { put_net(peer_net); return PTR_ERR(peer); } if (ifmp && dev->ifindex) peer->ifindex = ifmp->ifi_index; err = register_netdevice(peer); put_net(peer_net); peer_net = NULL; if (err < 0) { free_netdev(peer); return err; } netif_carrier_off(peer); err = rtnl_configure_link(peer, ifmp, 0, NULL); if (err < 0) goto unregister_network_device; /* register first device */ if (tb[IFLA_IFNAME]) nla_strscpy(dev->name, tb[IFLA_IFNAME], IFNAMSIZ); else snprintf(dev->name, IFNAMSIZ, DRV_NAME "%%d"); err = register_netdevice(dev); if (err < 0) goto unregister_network_device; netif_carrier_off(dev); /* cross link the device pair */ priv = netdev_priv(dev); rcu_assign_pointer(priv->peer, peer); priv = netdev_priv(peer); rcu_assign_pointer(priv->peer, dev); return 0; unregister_network_device: unregister_netdevice(peer); return err; } static void vxcan_dellink(struct net_device *dev, struct list_head *head) { struct vxcan_priv *priv; struct net_device *peer; priv = netdev_priv(dev); peer = rtnl_dereference(priv->peer); /* Note : dellink() is called from default_device_exit_batch(), * before a rcu_synchronize() point. The devices are guaranteed * not being freed before one RCU grace period. */ RCU_INIT_POINTER(priv->peer, NULL); unregister_netdevice_queue(dev, head); if (peer) { priv = netdev_priv(peer); RCU_INIT_POINTER(priv->peer, NULL); unregister_netdevice_queue(peer, head); } } static const struct nla_policy vxcan_policy[VXCAN_INFO_MAX + 1] = { [VXCAN_INFO_PEER] = { .len = sizeof(struct ifinfomsg) }, }; static struct net *vxcan_get_link_net(const struct net_device *dev) { struct vxcan_priv *priv = netdev_priv(dev); struct net_device *peer = rtnl_dereference(priv->peer); return peer ? dev_net(peer) : dev_net(dev); } static struct rtnl_link_ops vxcan_link_ops = { .kind = DRV_NAME, .priv_size = ALIGN(sizeof(struct vxcan_priv), NETDEV_ALIGN) + sizeof(struct can_ml_priv), .setup = vxcan_setup, .newlink = vxcan_newlink, .dellink = vxcan_dellink, .policy = vxcan_policy, .maxtype = VXCAN_INFO_MAX, .get_link_net = vxcan_get_link_net, }; static __init int vxcan_init(void) { pr_info("vxcan: Virtual CAN Tunnel driver\n"); return rtnl_link_register(&vxcan_link_ops); } static __exit void vxcan_exit(void) { rtnl_link_unregister(&vxcan_link_ops); } module_init(vxcan_init); module_exit(vxcan_exit);
linux-master
drivers/net/can/vxcan.c
// SPDX-License-Identifier: GPL-2.0-or-later /* Xilinx CAN device driver * * Copyright (C) 2012 - 2022 Xilinx, Inc. * Copyright (C) 2009 PetaLogix. All rights reserved. * Copyright (C) 2017 - 2018 Sandvik Mining and Construction Oy * * Description: * This driver is developed for Axi CAN IP and for Zynq CANPS Controller. */ #include <linux/bitfield.h> #include <linux/clk.h> #include <linux/errno.h> #include <linux/ethtool.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/skbuff.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/types.h> #include <linux/can/dev.h> #include <linux/can/error.h> #include <linux/phy/phy.h> #include <linux/pm_runtime.h> #include <linux/reset.h> #define DRIVER_NAME "xilinx_can" /* CAN registers set */ enum xcan_reg { XCAN_SRR_OFFSET = 0x00, /* Software reset */ XCAN_MSR_OFFSET = 0x04, /* Mode select */ XCAN_BRPR_OFFSET = 0x08, /* Baud rate prescaler */ XCAN_BTR_OFFSET = 0x0C, /* Bit timing */ XCAN_ECR_OFFSET = 0x10, /* Error counter */ XCAN_ESR_OFFSET = 0x14, /* Error status */ XCAN_SR_OFFSET = 0x18, /* Status */ XCAN_ISR_OFFSET = 0x1C, /* Interrupt status */ XCAN_IER_OFFSET = 0x20, /* Interrupt enable */ XCAN_ICR_OFFSET = 0x24, /* Interrupt clear */ /* not on CAN FD cores */ XCAN_TXFIFO_OFFSET = 0x30, /* TX FIFO base */ XCAN_RXFIFO_OFFSET = 0x50, /* RX FIFO base */ XCAN_AFR_OFFSET = 0x60, /* Acceptance Filter */ /* only on CAN FD cores */ XCAN_F_BRPR_OFFSET = 0x088, /* Data Phase Baud Rate * Prescaler */ XCAN_F_BTR_OFFSET = 0x08C, /* Data Phase Bit Timing */ XCAN_TRR_OFFSET = 0x0090, /* TX Buffer Ready Request */ XCAN_AFR_EXT_OFFSET = 0x00E0, /* Acceptance Filter */ XCAN_FSR_OFFSET = 0x00E8, /* RX FIFO Status */ XCAN_TXMSG_BASE_OFFSET = 0x0100, /* TX Message Space */ XCAN_RXMSG_BASE_OFFSET = 0x1100, /* RX Message Space */ XCAN_RXMSG_2_BASE_OFFSET = 0x2100, /* RX Message Space */ XCAN_AFR_2_MASK_OFFSET = 0x0A00, /* Acceptance Filter MASK */ XCAN_AFR_2_ID_OFFSET = 0x0A04, /* Acceptance Filter ID */ }; #define XCAN_FRAME_ID_OFFSET(frame_base) ((frame_base) + 0x00) #define XCAN_FRAME_DLC_OFFSET(frame_base) ((frame_base) + 0x04) #define XCAN_FRAME_DW1_OFFSET(frame_base) ((frame_base) + 0x08) #define XCAN_FRAME_DW2_OFFSET(frame_base) ((frame_base) + 0x0C) #define XCANFD_FRAME_DW_OFFSET(frame_base) ((frame_base) + 0x08) #define XCAN_CANFD_FRAME_SIZE 0x48 #define XCAN_TXMSG_FRAME_OFFSET(n) (XCAN_TXMSG_BASE_OFFSET + \ XCAN_CANFD_FRAME_SIZE * (n)) #define XCAN_RXMSG_FRAME_OFFSET(n) (XCAN_RXMSG_BASE_OFFSET + \ XCAN_CANFD_FRAME_SIZE * (n)) #define XCAN_RXMSG_2_FRAME_OFFSET(n) (XCAN_RXMSG_2_BASE_OFFSET + \ XCAN_CANFD_FRAME_SIZE * (n)) /* the single TX mailbox used by this driver on CAN FD HW */ #define XCAN_TX_MAILBOX_IDX 0 /* CAN register bit masks - XCAN_<REG>_<BIT>_MASK */ #define XCAN_SRR_CEN_MASK 0x00000002 /* CAN enable */ #define XCAN_SRR_RESET_MASK 0x00000001 /* Soft Reset the CAN core */ #define XCAN_MSR_LBACK_MASK 0x00000002 /* Loop back mode select */ #define XCAN_MSR_SLEEP_MASK 0x00000001 /* Sleep mode select */ #define XCAN_BRPR_BRP_MASK 0x000000FF /* Baud rate prescaler */ #define XCAN_BRPR_TDCO_MASK GENMASK(12, 8) /* TDCO */ #define XCAN_2_BRPR_TDCO_MASK GENMASK(13, 8) /* TDCO for CANFD 2.0 */ #define XCAN_BTR_SJW_MASK 0x00000180 /* Synchronous jump width */ #define XCAN_BTR_TS2_MASK 0x00000070 /* Time segment 2 */ #define XCAN_BTR_TS1_MASK 0x0000000F /* Time segment 1 */ #define XCAN_BTR_SJW_MASK_CANFD 0x000F0000 /* Synchronous jump width */ #define XCAN_BTR_TS2_MASK_CANFD 0x00000F00 /* Time segment 2 */ #define XCAN_BTR_TS1_MASK_CANFD 0x0000003F /* Time segment 1 */ #define XCAN_ECR_REC_MASK 0x0000FF00 /* Receive error counter */ #define XCAN_ECR_TEC_MASK 0x000000FF /* Transmit error counter */ #define XCAN_ESR_ACKER_MASK 0x00000010 /* ACK error */ #define XCAN_ESR_BERR_MASK 0x00000008 /* Bit error */ #define XCAN_ESR_STER_MASK 0x00000004 /* Stuff error */ #define XCAN_ESR_FMER_MASK 0x00000002 /* Form error */ #define XCAN_ESR_CRCER_MASK 0x00000001 /* CRC error */ #define XCAN_SR_TDCV_MASK GENMASK(22, 16) /* TDCV Value */ #define XCAN_SR_TXFLL_MASK 0x00000400 /* TX FIFO is full */ #define XCAN_SR_ESTAT_MASK 0x00000180 /* Error status */ #define XCAN_SR_ERRWRN_MASK 0x00000040 /* Error warning */ #define XCAN_SR_NORMAL_MASK 0x00000008 /* Normal mode */ #define XCAN_SR_LBACK_MASK 0x00000002 /* Loop back mode */ #define XCAN_SR_CONFIG_MASK 0x00000001 /* Configuration mode */ #define XCAN_IXR_RXMNF_MASK 0x00020000 /* RX match not finished */ #define XCAN_IXR_TXFEMP_MASK 0x00004000 /* TX FIFO Empty */ #define XCAN_IXR_WKUP_MASK 0x00000800 /* Wake up interrupt */ #define XCAN_IXR_SLP_MASK 0x00000400 /* Sleep interrupt */ #define XCAN_IXR_BSOFF_MASK 0x00000200 /* Bus off interrupt */ #define XCAN_IXR_ERROR_MASK 0x00000100 /* Error interrupt */ #define XCAN_IXR_RXNEMP_MASK 0x00000080 /* RX FIFO NotEmpty intr */ #define XCAN_IXR_RXOFLW_MASK 0x00000040 /* RX FIFO Overflow intr */ #define XCAN_IXR_RXOK_MASK 0x00000010 /* Message received intr */ #define XCAN_IXR_TXFLL_MASK 0x00000004 /* Tx FIFO Full intr */ #define XCAN_IXR_TXOK_MASK 0x00000002 /* TX successful intr */ #define XCAN_IXR_ARBLST_MASK 0x00000001 /* Arbitration lost intr */ #define XCAN_IDR_ID1_MASK 0xFFE00000 /* Standard msg identifier */ #define XCAN_IDR_SRR_MASK 0x00100000 /* Substitute remote TXreq */ #define XCAN_IDR_IDE_MASK 0x00080000 /* Identifier extension */ #define XCAN_IDR_ID2_MASK 0x0007FFFE /* Extended message ident */ #define XCAN_IDR_RTR_MASK 0x00000001 /* Remote TX request */ #define XCAN_DLCR_DLC_MASK 0xF0000000 /* Data length code */ #define XCAN_FSR_FL_MASK 0x00003F00 /* RX Fill Level */ #define XCAN_2_FSR_FL_MASK 0x00007F00 /* RX Fill Level */ #define XCAN_FSR_IRI_MASK 0x00000080 /* RX Increment Read Index */ #define XCAN_FSR_RI_MASK 0x0000001F /* RX Read Index */ #define XCAN_2_FSR_RI_MASK 0x0000003F /* RX Read Index */ #define XCAN_DLCR_EDL_MASK 0x08000000 /* EDL Mask in DLC */ #define XCAN_DLCR_BRS_MASK 0x04000000 /* BRS Mask in DLC */ /* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */ #define XCAN_BRPR_TDC_ENABLE BIT(16) /* Transmitter Delay Compensation (TDC) Enable */ #define XCAN_BTR_SJW_SHIFT 7 /* Synchronous jump width */ #define XCAN_BTR_TS2_SHIFT 4 /* Time segment 2 */ #define XCAN_BTR_SJW_SHIFT_CANFD 16 /* Synchronous jump width */ #define XCAN_BTR_TS2_SHIFT_CANFD 8 /* Time segment 2 */ #define XCAN_IDR_ID1_SHIFT 21 /* Standard Messg Identifier */ #define XCAN_IDR_ID2_SHIFT 1 /* Extended Message Identifier */ #define XCAN_DLCR_DLC_SHIFT 28 /* Data length code */ #define XCAN_ESR_REC_SHIFT 8 /* Rx Error Count */ /* CAN frame length constants */ #define XCAN_FRAME_MAX_DATA_LEN 8 #define XCANFD_DW_BYTES 4 #define XCAN_TIMEOUT (1 * HZ) /* TX-FIFO-empty interrupt available */ #define XCAN_FLAG_TXFEMP 0x0001 /* RX Match Not Finished interrupt available */ #define XCAN_FLAG_RXMNF 0x0002 /* Extended acceptance filters with control at 0xE0 */ #define XCAN_FLAG_EXT_FILTERS 0x0004 /* TX mailboxes instead of TX FIFO */ #define XCAN_FLAG_TX_MAILBOXES 0x0008 /* RX FIFO with each buffer in separate registers at 0x1100 * instead of the regular FIFO at 0x50 */ #define XCAN_FLAG_RX_FIFO_MULTI 0x0010 #define XCAN_FLAG_CANFD_2 0x0020 enum xcan_ip_type { XAXI_CAN = 0, XZYNQ_CANPS, XAXI_CANFD, XAXI_CANFD_2_0, }; struct xcan_devtype_data { enum xcan_ip_type cantype; unsigned int flags; const struct can_bittiming_const *bittiming_const; const char *bus_clk_name; unsigned int btr_ts2_shift; unsigned int btr_sjw_shift; }; /** * struct xcan_priv - This definition define CAN driver instance * @can: CAN private data structure. * @tx_lock: Lock for synchronizing TX interrupt handling * @tx_head: Tx CAN packets ready to send on the queue * @tx_tail: Tx CAN packets successfully sended on the queue * @tx_max: Maximum number packets the driver can send * @napi: NAPI structure * @read_reg: For reading data from CAN registers * @write_reg: For writing data to CAN registers * @dev: Network device data structure * @reg_base: Ioremapped address to registers * @irq_flags: For request_irq() * @bus_clk: Pointer to struct clk * @can_clk: Pointer to struct clk * @devtype: Device type specific constants * @transceiver: Optional pointer to associated CAN transceiver * @rstc: Pointer to reset control */ struct xcan_priv { struct can_priv can; spinlock_t tx_lock; /* Lock for synchronizing TX interrupt handling */ unsigned int tx_head; unsigned int tx_tail; unsigned int tx_max; struct napi_struct napi; u32 (*read_reg)(const struct xcan_priv *priv, enum xcan_reg reg); void (*write_reg)(const struct xcan_priv *priv, enum xcan_reg reg, u32 val); struct device *dev; void __iomem *reg_base; unsigned long irq_flags; struct clk *bus_clk; struct clk *can_clk; struct xcan_devtype_data devtype; struct phy *transceiver; struct reset_control *rstc; }; /* CAN Bittiming constants as per Xilinx CAN specs */ static const struct can_bittiming_const xcan_bittiming_const = { .name = DRIVER_NAME, .tseg1_min = 1, .tseg1_max = 16, .tseg2_min = 1, .tseg2_max = 8, .sjw_max = 4, .brp_min = 1, .brp_max = 256, .brp_inc = 1, }; /* AXI CANFD Arbitration Bittiming constants as per AXI CANFD 1.0 spec */ static const struct can_bittiming_const xcan_bittiming_const_canfd = { .name = DRIVER_NAME, .tseg1_min = 1, .tseg1_max = 64, .tseg2_min = 1, .tseg2_max = 16, .sjw_max = 16, .brp_min = 1, .brp_max = 256, .brp_inc = 1, }; /* AXI CANFD Data Bittiming constants as per AXI CANFD 1.0 specs */ static const struct can_bittiming_const xcan_data_bittiming_const_canfd = { .name = DRIVER_NAME, .tseg1_min = 1, .tseg1_max = 16, .tseg2_min = 1, .tseg2_max = 8, .sjw_max = 8, .brp_min = 1, .brp_max = 256, .brp_inc = 1, }; /* AXI CANFD 2.0 Arbitration Bittiming constants as per AXI CANFD 2.0 spec */ static const struct can_bittiming_const xcan_bittiming_const_canfd2 = { .name = DRIVER_NAME, .tseg1_min = 1, .tseg1_max = 256, .tseg2_min = 1, .tseg2_max = 128, .sjw_max = 128, .brp_min = 1, .brp_max = 256, .brp_inc = 1, }; /* AXI CANFD 2.0 Data Bittiming constants as per AXI CANFD 2.0 spec */ static const struct can_bittiming_const xcan_data_bittiming_const_canfd2 = { .name = DRIVER_NAME, .tseg1_min = 1, .tseg1_max = 32, .tseg2_min = 1, .tseg2_max = 16, .sjw_max = 16, .brp_min = 1, .brp_max = 256, .brp_inc = 1, }; /* Transmission Delay Compensation constants for CANFD 1.0 */ static const struct can_tdc_const xcan_tdc_const_canfd = { .tdcv_min = 0, .tdcv_max = 0, /* Manual mode not supported. */ .tdco_min = 0, .tdco_max = 32, .tdcf_min = 0, /* Filter window not supported */ .tdcf_max = 0, }; /* Transmission Delay Compensation constants for CANFD 2.0 */ static const struct can_tdc_const xcan_tdc_const_canfd2 = { .tdcv_min = 0, .tdcv_max = 0, /* Manual mode not supported. */ .tdco_min = 0, .tdco_max = 64, .tdcf_min = 0, /* Filter window not supported */ .tdcf_max = 0, }; /** * xcan_write_reg_le - Write a value to the device register little endian * @priv: Driver private data structure * @reg: Register offset * @val: Value to write at the Register offset * * Write data to the paricular CAN register */ static void xcan_write_reg_le(const struct xcan_priv *priv, enum xcan_reg reg, u32 val) { iowrite32(val, priv->reg_base + reg); } /** * xcan_read_reg_le - Read a value from the device register little endian * @priv: Driver private data structure * @reg: Register offset * * Read data from the particular CAN register * Return: value read from the CAN register */ static u32 xcan_read_reg_le(const struct xcan_priv *priv, enum xcan_reg reg) { return ioread32(priv->reg_base + reg); } /** * xcan_write_reg_be - Write a value to the device register big endian * @priv: Driver private data structure * @reg: Register offset * @val: Value to write at the Register offset * * Write data to the paricular CAN register */ static void xcan_write_reg_be(const struct xcan_priv *priv, enum xcan_reg reg, u32 val) { iowrite32be(val, priv->reg_base + reg); } /** * xcan_read_reg_be - Read a value from the device register big endian * @priv: Driver private data structure * @reg: Register offset * * Read data from the particular CAN register * Return: value read from the CAN register */ static u32 xcan_read_reg_be(const struct xcan_priv *priv, enum xcan_reg reg) { return ioread32be(priv->reg_base + reg); } /** * xcan_rx_int_mask - Get the mask for the receive interrupt * @priv: Driver private data structure * * Return: The receive interrupt mask used by the driver on this HW */ static u32 xcan_rx_int_mask(const struct xcan_priv *priv) { /* RXNEMP is better suited for our use case as it cannot be cleared * while the FIFO is non-empty, but CAN FD HW does not have it */ if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI) return XCAN_IXR_RXOK_MASK; else return XCAN_IXR_RXNEMP_MASK; } /** * set_reset_mode - Resets the CAN device mode * @ndev: Pointer to net_device structure * * This is the driver reset mode routine.The driver * enters into configuration mode. * * Return: 0 on success and failure value on error */ static int set_reset_mode(struct net_device *ndev) { struct xcan_priv *priv = netdev_priv(ndev); unsigned long timeout; priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK); timeout = jiffies + XCAN_TIMEOUT; while (!(priv->read_reg(priv, XCAN_SR_OFFSET) & XCAN_SR_CONFIG_MASK)) { if (time_after(jiffies, timeout)) { netdev_warn(ndev, "timed out for config mode\n"); return -ETIMEDOUT; } usleep_range(500, 10000); } /* reset clears FIFOs */ priv->tx_head = 0; priv->tx_tail = 0; return 0; } /** * xcan_set_bittiming - CAN set bit timing routine * @ndev: Pointer to net_device structure * * This is the driver set bittiming routine. * Return: 0 on success and failure value on error */ static int xcan_set_bittiming(struct net_device *ndev) { struct xcan_priv *priv = netdev_priv(ndev); struct can_bittiming *bt = &priv->can.bittiming; struct can_bittiming *dbt = &priv->can.data_bittiming; u32 btr0, btr1; u32 is_config_mode; /* Check whether Xilinx CAN is in configuration mode. * It cannot set bit timing if Xilinx CAN is not in configuration mode. */ is_config_mode = priv->read_reg(priv, XCAN_SR_OFFSET) & XCAN_SR_CONFIG_MASK; if (!is_config_mode) { netdev_alert(ndev, "BUG! Cannot set bittiming - CAN is not in config mode\n"); return -EPERM; } /* Setting Baud Rate prescaler value in BRPR Register */ btr0 = (bt->brp - 1); /* Setting Time Segment 1 in BTR Register */ btr1 = (bt->prop_seg + bt->phase_seg1 - 1); /* Setting Time Segment 2 in BTR Register */ btr1 |= (bt->phase_seg2 - 1) << priv->devtype.btr_ts2_shift; /* Setting Synchronous jump width in BTR Register */ btr1 |= (bt->sjw - 1) << priv->devtype.btr_sjw_shift; priv->write_reg(priv, XCAN_BRPR_OFFSET, btr0); priv->write_reg(priv, XCAN_BTR_OFFSET, btr1); if (priv->devtype.cantype == XAXI_CANFD || priv->devtype.cantype == XAXI_CANFD_2_0) { /* Setting Baud Rate prescaler value in F_BRPR Register */ btr0 = dbt->brp - 1; if (can_tdc_is_enabled(&priv->can)) { if (priv->devtype.cantype == XAXI_CANFD) btr0 |= FIELD_PREP(XCAN_BRPR_TDCO_MASK, priv->can.tdc.tdco) | XCAN_BRPR_TDC_ENABLE; else btr0 |= FIELD_PREP(XCAN_2_BRPR_TDCO_MASK, priv->can.tdc.tdco) | XCAN_BRPR_TDC_ENABLE; } /* Setting Time Segment 1 in BTR Register */ btr1 = dbt->prop_seg + dbt->phase_seg1 - 1; /* Setting Time Segment 2 in BTR Register */ btr1 |= (dbt->phase_seg2 - 1) << priv->devtype.btr_ts2_shift; /* Setting Synchronous jump width in BTR Register */ btr1 |= (dbt->sjw - 1) << priv->devtype.btr_sjw_shift; priv->write_reg(priv, XCAN_F_BRPR_OFFSET, btr0); priv->write_reg(priv, XCAN_F_BTR_OFFSET, btr1); } netdev_dbg(ndev, "BRPR=0x%08x, BTR=0x%08x\n", priv->read_reg(priv, XCAN_BRPR_OFFSET), priv->read_reg(priv, XCAN_BTR_OFFSET)); return 0; } /** * xcan_chip_start - This the drivers start routine * @ndev: Pointer to net_device structure * * This is the drivers start routine. * Based on the State of the CAN device it puts * the CAN device into a proper mode. * * Return: 0 on success and failure value on error */ static int xcan_chip_start(struct net_device *ndev) { struct xcan_priv *priv = netdev_priv(ndev); u32 reg_msr; int err; u32 ier; /* Check if it is in reset mode */ err = set_reset_mode(ndev); if (err < 0) return err; err = xcan_set_bittiming(ndev); if (err < 0) return err; /* Enable interrupts * * We enable the ERROR interrupt even with * CAN_CTRLMODE_BERR_REPORTING disabled as there is no * dedicated interrupt for a state change to * ERROR_WARNING/ERROR_PASSIVE. */ ier = XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK | XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK | XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK | XCAN_IXR_ARBLST_MASK | xcan_rx_int_mask(priv); if (priv->devtype.flags & XCAN_FLAG_RXMNF) ier |= XCAN_IXR_RXMNF_MASK; priv->write_reg(priv, XCAN_IER_OFFSET, ier); /* Check whether it is loopback mode or normal mode */ if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) reg_msr = XCAN_MSR_LBACK_MASK; else reg_msr = 0x0; /* enable the first extended filter, if any, as cores with extended * filtering default to non-receipt if all filters are disabled */ if (priv->devtype.flags & XCAN_FLAG_EXT_FILTERS) priv->write_reg(priv, XCAN_AFR_EXT_OFFSET, 0x00000001); priv->write_reg(priv, XCAN_MSR_OFFSET, reg_msr); priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_CEN_MASK); netdev_dbg(ndev, "status:#x%08x\n", priv->read_reg(priv, XCAN_SR_OFFSET)); priv->can.state = CAN_STATE_ERROR_ACTIVE; return 0; } /** * xcan_do_set_mode - This sets the mode of the driver * @ndev: Pointer to net_device structure * @mode: Tells the mode of the driver * * This check the drivers state and calls the corresponding modes to set. * * Return: 0 on success and failure value on error */ static int xcan_do_set_mode(struct net_device *ndev, enum can_mode mode) { int ret; switch (mode) { case CAN_MODE_START: ret = xcan_chip_start(ndev); if (ret < 0) { netdev_err(ndev, "xcan_chip_start failed!\n"); return ret; } netif_wake_queue(ndev); break; default: ret = -EOPNOTSUPP; break; } return ret; } /** * xcan_write_frame - Write a frame to HW * @ndev: Pointer to net_device structure * @skb: sk_buff pointer that contains data to be Txed * @frame_offset: Register offset to write the frame to */ static void xcan_write_frame(struct net_device *ndev, struct sk_buff *skb, int frame_offset) { u32 id, dlc, data[2] = {0, 0}; struct canfd_frame *cf = (struct canfd_frame *)skb->data; u32 ramoff, dwindex = 0, i; struct xcan_priv *priv = netdev_priv(ndev); /* Watch carefully on the bit sequence */ if (cf->can_id & CAN_EFF_FLAG) { /* Extended CAN ID format */ id = ((cf->can_id & CAN_EFF_MASK) << XCAN_IDR_ID2_SHIFT) & XCAN_IDR_ID2_MASK; id |= (((cf->can_id & CAN_EFF_MASK) >> (CAN_EFF_ID_BITS - CAN_SFF_ID_BITS)) << XCAN_IDR_ID1_SHIFT) & XCAN_IDR_ID1_MASK; /* The substibute remote TX request bit should be "1" * for extended frames as in the Xilinx CAN datasheet */ id |= XCAN_IDR_IDE_MASK | XCAN_IDR_SRR_MASK; if (cf->can_id & CAN_RTR_FLAG) /* Extended frames remote TX request */ id |= XCAN_IDR_RTR_MASK; } else { /* Standard CAN ID format */ id = ((cf->can_id & CAN_SFF_MASK) << XCAN_IDR_ID1_SHIFT) & XCAN_IDR_ID1_MASK; if (cf->can_id & CAN_RTR_FLAG) /* Standard frames remote TX request */ id |= XCAN_IDR_SRR_MASK; } dlc = can_fd_len2dlc(cf->len) << XCAN_DLCR_DLC_SHIFT; if (can_is_canfd_skb(skb)) { if (cf->flags & CANFD_BRS) dlc |= XCAN_DLCR_BRS_MASK; dlc |= XCAN_DLCR_EDL_MASK; } if (!(priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES) && (priv->devtype.flags & XCAN_FLAG_TXFEMP)) can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max, 0); else can_put_echo_skb(skb, ndev, 0, 0); priv->tx_head++; priv->write_reg(priv, XCAN_FRAME_ID_OFFSET(frame_offset), id); /* If the CAN frame is RTR frame this write triggers transmission * (not on CAN FD) */ priv->write_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_offset), dlc); if (priv->devtype.cantype == XAXI_CANFD || priv->devtype.cantype == XAXI_CANFD_2_0) { for (i = 0; i < cf->len; i += 4) { ramoff = XCANFD_FRAME_DW_OFFSET(frame_offset) + (dwindex * XCANFD_DW_BYTES); priv->write_reg(priv, ramoff, be32_to_cpup((__be32 *)(cf->data + i))); dwindex++; } } else { if (cf->len > 0) data[0] = be32_to_cpup((__be32 *)(cf->data + 0)); if (cf->len > 4) data[1] = be32_to_cpup((__be32 *)(cf->data + 4)); if (!(cf->can_id & CAN_RTR_FLAG)) { priv->write_reg(priv, XCAN_FRAME_DW1_OFFSET(frame_offset), data[0]); /* If the CAN frame is Standard/Extended frame this * write triggers transmission (not on CAN FD) */ priv->write_reg(priv, XCAN_FRAME_DW2_OFFSET(frame_offset), data[1]); } } } /** * xcan_start_xmit_fifo - Starts the transmission (FIFO mode) * @skb: sk_buff pointer that contains data to be Txed * @ndev: Pointer to net_device structure * * Return: 0 on success, -ENOSPC if FIFO is full. */ static int xcan_start_xmit_fifo(struct sk_buff *skb, struct net_device *ndev) { struct xcan_priv *priv = netdev_priv(ndev); unsigned long flags; /* Check if the TX buffer is full */ if (unlikely(priv->read_reg(priv, XCAN_SR_OFFSET) & XCAN_SR_TXFLL_MASK)) return -ENOSPC; spin_lock_irqsave(&priv->tx_lock, flags); xcan_write_frame(ndev, skb, XCAN_TXFIFO_OFFSET); /* Clear TX-FIFO-empty interrupt for xcan_tx_interrupt() */ if (priv->tx_max > 1) priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXFEMP_MASK); /* Check if the TX buffer is full */ if ((priv->tx_head - priv->tx_tail) == priv->tx_max) netif_stop_queue(ndev); spin_unlock_irqrestore(&priv->tx_lock, flags); return 0; } /** * xcan_start_xmit_mailbox - Starts the transmission (mailbox mode) * @skb: sk_buff pointer that contains data to be Txed * @ndev: Pointer to net_device structure * * Return: 0 on success, -ENOSPC if there is no space */ static int xcan_start_xmit_mailbox(struct sk_buff *skb, struct net_device *ndev) { struct xcan_priv *priv = netdev_priv(ndev); unsigned long flags; if (unlikely(priv->read_reg(priv, XCAN_TRR_OFFSET) & BIT(XCAN_TX_MAILBOX_IDX))) return -ENOSPC; spin_lock_irqsave(&priv->tx_lock, flags); xcan_write_frame(ndev, skb, XCAN_TXMSG_FRAME_OFFSET(XCAN_TX_MAILBOX_IDX)); /* Mark buffer as ready for transmit */ priv->write_reg(priv, XCAN_TRR_OFFSET, BIT(XCAN_TX_MAILBOX_IDX)); netif_stop_queue(ndev); spin_unlock_irqrestore(&priv->tx_lock, flags); return 0; } /** * xcan_start_xmit - Starts the transmission * @skb: sk_buff pointer that contains data to be Txed * @ndev: Pointer to net_device structure * * This function is invoked from upper layers to initiate transmission. * * Return: NETDEV_TX_OK on success and NETDEV_TX_BUSY when the tx queue is full */ static netdev_tx_t xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct xcan_priv *priv = netdev_priv(ndev); int ret; if (can_dev_dropped_skb(ndev, skb)) return NETDEV_TX_OK; if (priv->devtype.flags & XCAN_FLAG_TX_MAILBOXES) ret = xcan_start_xmit_mailbox(skb, ndev); else ret = xcan_start_xmit_fifo(skb, ndev); if (ret < 0) { netdev_err(ndev, "BUG!, TX full when queue awake!\n"); netif_stop_queue(ndev); return NETDEV_TX_BUSY; } return NETDEV_TX_OK; } /** * xcan_rx - Is called from CAN isr to complete the received * frame processing * @ndev: Pointer to net_device structure * @frame_base: Register offset to the frame to be read * * This function is invoked from the CAN isr(poll) to process the Rx frames. It * does minimal processing and invokes "netif_receive_skb" to complete further * processing. * Return: 1 on success and 0 on failure. */ static int xcan_rx(struct net_device *ndev, int frame_base) { struct xcan_priv *priv = netdev_priv(ndev); struct net_device_stats *stats = &ndev->stats; struct can_frame *cf; struct sk_buff *skb; u32 id_xcan, dlc, data[2] = {0, 0}; skb = alloc_can_skb(ndev, &cf); if (unlikely(!skb)) { stats->rx_dropped++; return 0; } /* Read a frame from Xilinx zynq CANPS */ id_xcan = priv->read_reg(priv, XCAN_FRAME_ID_OFFSET(frame_base)); dlc = priv->read_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_base)) >> XCAN_DLCR_DLC_SHIFT; /* Change Xilinx CAN data length format to socketCAN data format */ cf->len = can_cc_dlc2len(dlc); /* Change Xilinx CAN ID format to socketCAN ID format */ if (id_xcan & XCAN_IDR_IDE_MASK) { /* The received frame is an Extended format frame */ cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> 3; cf->can_id |= (id_xcan & XCAN_IDR_ID2_MASK) >> XCAN_IDR_ID2_SHIFT; cf->can_id |= CAN_EFF_FLAG; if (id_xcan & XCAN_IDR_RTR_MASK) cf->can_id |= CAN_RTR_FLAG; } else { /* The received frame is a standard format frame */ cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> XCAN_IDR_ID1_SHIFT; if (id_xcan & XCAN_IDR_SRR_MASK) cf->can_id |= CAN_RTR_FLAG; } /* DW1/DW2 must always be read to remove message from RXFIFO */ data[0] = priv->read_reg(priv, XCAN_FRAME_DW1_OFFSET(frame_base)); data[1] = priv->read_reg(priv, XCAN_FRAME_DW2_OFFSET(frame_base)); if (!(cf->can_id & CAN_RTR_FLAG)) { /* Change Xilinx CAN data format to socketCAN data format */ if (cf->len > 0) *(__be32 *)(cf->data) = cpu_to_be32(data[0]); if (cf->len > 4) *(__be32 *)(cf->data + 4) = cpu_to_be32(data[1]); stats->rx_bytes += cf->len; } stats->rx_packets++; netif_receive_skb(skb); return 1; } /** * xcanfd_rx - Is called from CAN isr to complete the received * frame processing * @ndev: Pointer to net_device structure * @frame_base: Register offset to the frame to be read * * This function is invoked from the CAN isr(poll) to process the Rx frames. It * does minimal processing and invokes "netif_receive_skb" to complete further * processing. * Return: 1 on success and 0 on failure. */ static int xcanfd_rx(struct net_device *ndev, int frame_base) { struct xcan_priv *priv = netdev_priv(ndev); struct net_device_stats *stats = &ndev->stats; struct canfd_frame *cf; struct sk_buff *skb; u32 id_xcan, dlc, data[2] = {0, 0}, dwindex = 0, i, dw_offset; id_xcan = priv->read_reg(priv, XCAN_FRAME_ID_OFFSET(frame_base)); dlc = priv->read_reg(priv, XCAN_FRAME_DLC_OFFSET(frame_base)); if (dlc & XCAN_DLCR_EDL_MASK) skb = alloc_canfd_skb(ndev, &cf); else skb = alloc_can_skb(ndev, (struct can_frame **)&cf); if (unlikely(!skb)) { stats->rx_dropped++; return 0; } /* Change Xilinx CANFD data length format to socketCAN data * format */ if (dlc & XCAN_DLCR_EDL_MASK) cf->len = can_fd_dlc2len((dlc & XCAN_DLCR_DLC_MASK) >> XCAN_DLCR_DLC_SHIFT); else cf->len = can_cc_dlc2len((dlc & XCAN_DLCR_DLC_MASK) >> XCAN_DLCR_DLC_SHIFT); /* Change Xilinx CAN ID format to socketCAN ID format */ if (id_xcan & XCAN_IDR_IDE_MASK) { /* The received frame is an Extended format frame */ cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> 3; cf->can_id |= (id_xcan & XCAN_IDR_ID2_MASK) >> XCAN_IDR_ID2_SHIFT; cf->can_id |= CAN_EFF_FLAG; if (id_xcan & XCAN_IDR_RTR_MASK) cf->can_id |= CAN_RTR_FLAG; } else { /* The received frame is a standard format frame */ cf->can_id = (id_xcan & XCAN_IDR_ID1_MASK) >> XCAN_IDR_ID1_SHIFT; if (!(dlc & XCAN_DLCR_EDL_MASK) && (id_xcan & XCAN_IDR_SRR_MASK)) cf->can_id |= CAN_RTR_FLAG; } /* Check the frame received is FD or not*/ if (dlc & XCAN_DLCR_EDL_MASK) { for (i = 0; i < cf->len; i += 4) { dw_offset = XCANFD_FRAME_DW_OFFSET(frame_base) + (dwindex * XCANFD_DW_BYTES); data[0] = priv->read_reg(priv, dw_offset); *(__be32 *)(cf->data + i) = cpu_to_be32(data[0]); dwindex++; } } else { for (i = 0; i < cf->len; i += 4) { dw_offset = XCANFD_FRAME_DW_OFFSET(frame_base); data[0] = priv->read_reg(priv, dw_offset + i); *(__be32 *)(cf->data + i) = cpu_to_be32(data[0]); } } if (!(cf->can_id & CAN_RTR_FLAG)) stats->rx_bytes += cf->len; stats->rx_packets++; netif_receive_skb(skb); return 1; } /** * xcan_current_error_state - Get current error state from HW * @ndev: Pointer to net_device structure * * Checks the current CAN error state from the HW. Note that this * only checks for ERROR_PASSIVE and ERROR_WARNING. * * Return: * ERROR_PASSIVE or ERROR_WARNING if either is active, ERROR_ACTIVE * otherwise. */ static enum can_state xcan_current_error_state(struct net_device *ndev) { struct xcan_priv *priv = netdev_priv(ndev); u32 status = priv->read_reg(priv, XCAN_SR_OFFSET); if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK) return CAN_STATE_ERROR_PASSIVE; else if (status & XCAN_SR_ERRWRN_MASK) return CAN_STATE_ERROR_WARNING; else return CAN_STATE_ERROR_ACTIVE; } /** * xcan_set_error_state - Set new CAN error state * @ndev: Pointer to net_device structure * @new_state: The new CAN state to be set * @cf: Error frame to be populated or NULL * * Set new CAN error state for the device, updating statistics and * populating the error frame if given. */ static void xcan_set_error_state(struct net_device *ndev, enum can_state new_state, struct can_frame *cf) { struct xcan_priv *priv = netdev_priv(ndev); u32 ecr = priv->read_reg(priv, XCAN_ECR_OFFSET); u32 txerr = ecr & XCAN_ECR_TEC_MASK; u32 rxerr = (ecr & XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT; enum can_state tx_state = txerr >= rxerr ? new_state : 0; enum can_state rx_state = txerr <= rxerr ? new_state : 0; /* non-ERROR states are handled elsewhere */ if (WARN_ON(new_state > CAN_STATE_ERROR_PASSIVE)) return; can_change_state(ndev, cf, tx_state, rx_state); if (cf) { cf->can_id |= CAN_ERR_CNT; cf->data[6] = txerr; cf->data[7] = rxerr; } } /** * xcan_update_error_state_after_rxtx - Update CAN error state after RX/TX * @ndev: Pointer to net_device structure * * If the device is in a ERROR-WARNING or ERROR-PASSIVE state, check if * the performed RX/TX has caused it to drop to a lesser state and set * the interface state accordingly. */ static void xcan_update_error_state_after_rxtx(struct net_device *ndev) { struct xcan_priv *priv = netdev_priv(ndev); enum can_state old_state = priv->can.state; enum can_state new_state; /* changing error state due to successful frame RX/TX can only * occur from these states */ if (old_state != CAN_STATE_ERROR_WARNING && old_state != CAN_STATE_ERROR_PASSIVE) return; new_state = xcan_current_error_state(ndev); if (new_state != old_state) { struct sk_buff *skb; struct can_frame *cf; skb = alloc_can_err_skb(ndev, &cf); xcan_set_error_state(ndev, new_state, skb ? cf : NULL); if (skb) netif_rx(skb); } } /** * xcan_err_interrupt - error frame Isr * @ndev: net_device pointer * @isr: interrupt status register value * * This is the CAN error interrupt and it will * check the type of error and forward the error * frame to upper layers. */ static void xcan_err_interrupt(struct net_device *ndev, u32 isr) { struct xcan_priv *priv = netdev_priv(ndev); struct net_device_stats *stats = &ndev->stats; struct can_frame cf = { }; u32 err_status; err_status = priv->read_reg(priv, XCAN_ESR_OFFSET); priv->write_reg(priv, XCAN_ESR_OFFSET, err_status); if (isr & XCAN_IXR_BSOFF_MASK) { priv->can.state = CAN_STATE_BUS_OFF; priv->can.can_stats.bus_off++; /* Leave device in Config Mode in bus-off state */ priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK); can_bus_off(ndev); cf.can_id |= CAN_ERR_BUSOFF; } else { enum can_state new_state = xcan_current_error_state(ndev); if (new_state != priv->can.state) xcan_set_error_state(ndev, new_state, &cf); } /* Check for Arbitration lost interrupt */ if (isr & XCAN_IXR_ARBLST_MASK) { priv->can.can_stats.arbitration_lost++; cf.can_id |= CAN_ERR_LOSTARB; cf.data[0] = CAN_ERR_LOSTARB_UNSPEC; } /* Check for RX FIFO Overflow interrupt */ if (isr & XCAN_IXR_RXOFLW_MASK) { stats->rx_over_errors++; stats->rx_errors++; cf.can_id |= CAN_ERR_CRTL; cf.data[1] |= CAN_ERR_CRTL_RX_OVERFLOW; } /* Check for RX Match Not Finished interrupt */ if (isr & XCAN_IXR_RXMNF_MASK) { stats->rx_dropped++; stats->rx_errors++; netdev_err(ndev, "RX match not finished, frame discarded\n"); cf.can_id |= CAN_ERR_CRTL; cf.data[1] |= CAN_ERR_CRTL_UNSPEC; } /* Check for error interrupt */ if (isr & XCAN_IXR_ERROR_MASK) { bool berr_reporting = false; if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) { berr_reporting = true; cf.can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; } /* Check for Ack error interrupt */ if (err_status & XCAN_ESR_ACKER_MASK) { stats->tx_errors++; if (berr_reporting) { cf.can_id |= CAN_ERR_ACK; cf.data[3] = CAN_ERR_PROT_LOC_ACK; } } /* Check for Bit error interrupt */ if (err_status & XCAN_ESR_BERR_MASK) { stats->tx_errors++; if (berr_reporting) { cf.can_id |= CAN_ERR_PROT; cf.data[2] = CAN_ERR_PROT_BIT; } } /* Check for Stuff error interrupt */ if (err_status & XCAN_ESR_STER_MASK) { stats->rx_errors++; if (berr_reporting) { cf.can_id |= CAN_ERR_PROT; cf.data[2] = CAN_ERR_PROT_STUFF; } } /* Check for Form error interrupt */ if (err_status & XCAN_ESR_FMER_MASK) { stats->rx_errors++; if (berr_reporting) { cf.can_id |= CAN_ERR_PROT; cf.data[2] = CAN_ERR_PROT_FORM; } } /* Check for CRC error interrupt */ if (err_status & XCAN_ESR_CRCER_MASK) { stats->rx_errors++; if (berr_reporting) { cf.can_id |= CAN_ERR_PROT; cf.data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; } } priv->can.can_stats.bus_error++; } if (cf.can_id) { struct can_frame *skb_cf; struct sk_buff *skb = alloc_can_err_skb(ndev, &skb_cf); if (skb) { skb_cf->can_id |= cf.can_id; memcpy(skb_cf->data, cf.data, CAN_ERR_DLC); netif_rx(skb); } } netdev_dbg(ndev, "%s: error status register:0x%x\n", __func__, priv->read_reg(priv, XCAN_ESR_OFFSET)); } /** * xcan_state_interrupt - It will check the state of the CAN device * @ndev: net_device pointer * @isr: interrupt status register value * * This will checks the state of the CAN device * and puts the device into appropriate state. */ static void xcan_state_interrupt(struct net_device *ndev, u32 isr) { struct xcan_priv *priv = netdev_priv(ndev); /* Check for Sleep interrupt if set put CAN device in sleep state */ if (isr & XCAN_IXR_SLP_MASK) priv->can.state = CAN_STATE_SLEEPING; /* Check for Wake up interrupt if set put CAN device in Active state */ if (isr & XCAN_IXR_WKUP_MASK) priv->can.state = CAN_STATE_ERROR_ACTIVE; } /** * xcan_rx_fifo_get_next_frame - Get register offset of next RX frame * @priv: Driver private data structure * * Return: Register offset of the next frame in RX FIFO. */ static int xcan_rx_fifo_get_next_frame(struct xcan_priv *priv) { int offset; if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI) { u32 fsr, mask; /* clear RXOK before the is-empty check so that any newly * received frame will reassert it without a race */ priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXOK_MASK); fsr = priv->read_reg(priv, XCAN_FSR_OFFSET); /* check if RX FIFO is empty */ if (priv->devtype.flags & XCAN_FLAG_CANFD_2) mask = XCAN_2_FSR_FL_MASK; else mask = XCAN_FSR_FL_MASK; if (!(fsr & mask)) return -ENOENT; if (priv->devtype.flags & XCAN_FLAG_CANFD_2) offset = XCAN_RXMSG_2_FRAME_OFFSET(fsr & XCAN_2_FSR_RI_MASK); else offset = XCAN_RXMSG_FRAME_OFFSET(fsr & XCAN_FSR_RI_MASK); } else { /* check if RX FIFO is empty */ if (!(priv->read_reg(priv, XCAN_ISR_OFFSET) & XCAN_IXR_RXNEMP_MASK)) return -ENOENT; /* frames are read from a static offset */ offset = XCAN_RXFIFO_OFFSET; } return offset; } /** * xcan_rx_poll - Poll routine for rx packets (NAPI) * @napi: napi structure pointer * @quota: Max number of rx packets to be processed. * * This is the poll routine for rx part. * It will process the packets maximux quota value. * * Return: number of packets received */ static int xcan_rx_poll(struct napi_struct *napi, int quota) { struct net_device *ndev = napi->dev; struct xcan_priv *priv = netdev_priv(ndev); u32 ier; int work_done = 0; int frame_offset; while ((frame_offset = xcan_rx_fifo_get_next_frame(priv)) >= 0 && (work_done < quota)) { if (xcan_rx_int_mask(priv) & XCAN_IXR_RXOK_MASK) work_done += xcanfd_rx(ndev, frame_offset); else work_done += xcan_rx(ndev, frame_offset); if (priv->devtype.flags & XCAN_FLAG_RX_FIFO_MULTI) /* increment read index */ priv->write_reg(priv, XCAN_FSR_OFFSET, XCAN_FSR_IRI_MASK); else /* clear rx-not-empty (will actually clear only if * empty) */ priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXNEMP_MASK); } if (work_done) xcan_update_error_state_after_rxtx(ndev); if (work_done < quota) { if (napi_complete_done(napi, work_done)) { ier = priv->read_reg(priv, XCAN_IER_OFFSET); ier |= xcan_rx_int_mask(priv); priv->write_reg(priv, XCAN_IER_OFFSET, ier); } } return work_done; } /** * xcan_tx_interrupt - Tx Done Isr * @ndev: net_device pointer * @isr: Interrupt status register value */ static void xcan_tx_interrupt(struct net_device *ndev, u32 isr) { struct xcan_priv *priv = netdev_priv(ndev); struct net_device_stats *stats = &ndev->stats; unsigned int frames_in_fifo; int frames_sent = 1; /* TXOK => at least 1 frame was sent */ unsigned long flags; int retries = 0; /* Synchronize with xmit as we need to know the exact number * of frames in the FIFO to stay in sync due to the TXFEMP * handling. * This also prevents a race between netif_wake_queue() and * netif_stop_queue(). */ spin_lock_irqsave(&priv->tx_lock, flags); frames_in_fifo = priv->tx_head - priv->tx_tail; if (WARN_ON_ONCE(frames_in_fifo == 0)) { /* clear TXOK anyway to avoid getting back here */ priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK); spin_unlock_irqrestore(&priv->tx_lock, flags); return; } /* Check if 2 frames were sent (TXOK only means that at least 1 * frame was sent). */ if (frames_in_fifo > 1) { WARN_ON(frames_in_fifo > priv->tx_max); /* Synchronize TXOK and isr so that after the loop: * (1) isr variable is up-to-date at least up to TXOK clear * time. This avoids us clearing a TXOK of a second frame * but not noticing that the FIFO is now empty and thus * marking only a single frame as sent. * (2) No TXOK is left. Having one could mean leaving a * stray TXOK as we might process the associated frame * via TXFEMP handling as we read TXFEMP *after* TXOK * clear to satisfy (1). */ while ((isr & XCAN_IXR_TXOK_MASK) && !WARN_ON(++retries == 100)) { priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK); isr = priv->read_reg(priv, XCAN_ISR_OFFSET); } if (isr & XCAN_IXR_TXFEMP_MASK) { /* nothing in FIFO anymore */ frames_sent = frames_in_fifo; } } else { /* single frame in fifo, just clear TXOK */ priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK); } while (frames_sent--) { stats->tx_bytes += can_get_echo_skb(ndev, priv->tx_tail % priv->tx_max, NULL); priv->tx_tail++; stats->tx_packets++; } netif_wake_queue(ndev); spin_unlock_irqrestore(&priv->tx_lock, flags); xcan_update_error_state_after_rxtx(ndev); } /** * xcan_interrupt - CAN Isr * @irq: irq number * @dev_id: device id pointer * * This is the xilinx CAN Isr. It checks for the type of interrupt * and invokes the corresponding ISR. * * Return: * IRQ_NONE - If CAN device is in sleep mode, IRQ_HANDLED otherwise */ static irqreturn_t xcan_interrupt(int irq, void *dev_id) { struct net_device *ndev = (struct net_device *)dev_id; struct xcan_priv *priv = netdev_priv(ndev); u32 isr, ier; u32 isr_errors; u32 rx_int_mask = xcan_rx_int_mask(priv); /* Get the interrupt status from Xilinx CAN */ isr = priv->read_reg(priv, XCAN_ISR_OFFSET); if (!isr) return IRQ_NONE; /* Check for the type of interrupt and Processing it */ if (isr & (XCAN_IXR_SLP_MASK | XCAN_IXR_WKUP_MASK)) { priv->write_reg(priv, XCAN_ICR_OFFSET, (XCAN_IXR_SLP_MASK | XCAN_IXR_WKUP_MASK)); xcan_state_interrupt(ndev, isr); } /* Check for Tx interrupt and Processing it */ if (isr & XCAN_IXR_TXOK_MASK) xcan_tx_interrupt(ndev, isr); /* Check for the type of error interrupt and Processing it */ isr_errors = isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK | XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK | XCAN_IXR_RXMNF_MASK); if (isr_errors) { priv->write_reg(priv, XCAN_ICR_OFFSET, isr_errors); xcan_err_interrupt(ndev, isr); } /* Check for the type of receive interrupt and Processing it */ if (isr & rx_int_mask) { ier = priv->read_reg(priv, XCAN_IER_OFFSET); ier &= ~rx_int_mask; priv->write_reg(priv, XCAN_IER_OFFSET, ier); napi_schedule(&priv->napi); } return IRQ_HANDLED; } /** * xcan_chip_stop - Driver stop routine * @ndev: Pointer to net_device structure * * This is the drivers stop routine. It will disable the * interrupts and put the device into configuration mode. */ static void xcan_chip_stop(struct net_device *ndev) { struct xcan_priv *priv = netdev_priv(ndev); int ret; /* Disable interrupts and leave the can in configuration mode */ ret = set_reset_mode(ndev); if (ret < 0) netdev_dbg(ndev, "set_reset_mode() Failed\n"); priv->can.state = CAN_STATE_STOPPED; } /** * xcan_open - Driver open routine * @ndev: Pointer to net_device structure * * This is the driver open routine. * Return: 0 on success and failure value on error */ static int xcan_open(struct net_device *ndev) { struct xcan_priv *priv = netdev_priv(ndev); int ret; ret = phy_power_on(priv->transceiver); if (ret) return ret; ret = pm_runtime_get_sync(priv->dev); if (ret < 0) { netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n", __func__, ret); goto err; } ret = request_irq(ndev->irq, xcan_interrupt, priv->irq_flags, ndev->name, ndev); if (ret < 0) { netdev_err(ndev, "irq allocation for CAN failed\n"); goto err; } /* Set chip into reset mode */ ret = set_reset_mode(ndev); if (ret < 0) { netdev_err(ndev, "mode resetting failed!\n"); goto err_irq; } /* Common open */ ret = open_candev(ndev); if (ret) goto err_irq; ret = xcan_chip_start(ndev); if (ret < 0) { netdev_err(ndev, "xcan_chip_start failed!\n"); goto err_candev; } napi_enable(&priv->napi); netif_start_queue(ndev); return 0; err_candev: close_candev(ndev); err_irq: free_irq(ndev->irq, ndev); err: pm_runtime_put(priv->dev); phy_power_off(priv->transceiver); return ret; } /** * xcan_close - Driver close routine * @ndev: Pointer to net_device structure * * Return: 0 always */ static int xcan_close(struct net_device *ndev) { struct xcan_priv *priv = netdev_priv(ndev); netif_stop_queue(ndev); napi_disable(&priv->napi); xcan_chip_stop(ndev); free_irq(ndev->irq, ndev); close_candev(ndev); pm_runtime_put(priv->dev); phy_power_off(priv->transceiver); return 0; } /** * xcan_get_berr_counter - error counter routine * @ndev: Pointer to net_device structure * @bec: Pointer to can_berr_counter structure * * This is the driver error counter routine. * Return: 0 on success and failure value on error */ static int xcan_get_berr_counter(const struct net_device *ndev, struct can_berr_counter *bec) { struct xcan_priv *priv = netdev_priv(ndev); int ret; ret = pm_runtime_get_sync(priv->dev); if (ret < 0) { netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n", __func__, ret); pm_runtime_put(priv->dev); return ret; } bec->txerr = priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_TEC_MASK; bec->rxerr = ((priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT); pm_runtime_put(priv->dev); return 0; } /** * xcan_get_auto_tdcv - Get Transmitter Delay Compensation Value * @ndev: Pointer to net_device structure * @tdcv: Pointer to TDCV value * * Return: 0 on success */ static int xcan_get_auto_tdcv(const struct net_device *ndev, u32 *tdcv) { struct xcan_priv *priv = netdev_priv(ndev); *tdcv = FIELD_GET(XCAN_SR_TDCV_MASK, priv->read_reg(priv, XCAN_SR_OFFSET)); return 0; } static const struct net_device_ops xcan_netdev_ops = { .ndo_open = xcan_open, .ndo_stop = xcan_close, .ndo_start_xmit = xcan_start_xmit, .ndo_change_mtu = can_change_mtu, }; static const struct ethtool_ops xcan_ethtool_ops = { .get_ts_info = ethtool_op_get_ts_info, }; /** * xcan_suspend - Suspend method for the driver * @dev: Address of the device structure * * Put the driver into low power mode. * Return: 0 on success and failure value on error */ static int __maybe_unused xcan_suspend(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); if (netif_running(ndev)) { netif_stop_queue(ndev); netif_device_detach(ndev); xcan_chip_stop(ndev); } return pm_runtime_force_suspend(dev); } /** * xcan_resume - Resume from suspend * @dev: Address of the device structure * * Resume operation after suspend. * Return: 0 on success and failure value on error */ static int __maybe_unused xcan_resume(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); int ret; ret = pm_runtime_force_resume(dev); if (ret) { dev_err(dev, "pm_runtime_force_resume failed on resume\n"); return ret; } if (netif_running(ndev)) { ret = xcan_chip_start(ndev); if (ret) { dev_err(dev, "xcan_chip_start failed on resume\n"); return ret; } netif_device_attach(ndev); netif_start_queue(ndev); } return 0; } /** * xcan_runtime_suspend - Runtime suspend method for the driver * @dev: Address of the device structure * * Put the driver into low power mode. * Return: 0 always */ static int __maybe_unused xcan_runtime_suspend(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); struct xcan_priv *priv = netdev_priv(ndev); clk_disable_unprepare(priv->bus_clk); clk_disable_unprepare(priv->can_clk); return 0; } /** * xcan_runtime_resume - Runtime resume from suspend * @dev: Address of the device structure * * Resume operation after suspend. * Return: 0 on success and failure value on error */ static int __maybe_unused xcan_runtime_resume(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); struct xcan_priv *priv = netdev_priv(ndev); int ret; ret = clk_prepare_enable(priv->bus_clk); if (ret) { dev_err(dev, "Cannot enable clock.\n"); return ret; } ret = clk_prepare_enable(priv->can_clk); if (ret) { dev_err(dev, "Cannot enable clock.\n"); clk_disable_unprepare(priv->bus_clk); return ret; } return 0; } static const struct dev_pm_ops xcan_dev_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(xcan_suspend, xcan_resume) SET_RUNTIME_PM_OPS(xcan_runtime_suspend, xcan_runtime_resume, NULL) }; static const struct xcan_devtype_data xcan_zynq_data = { .cantype = XZYNQ_CANPS, .flags = XCAN_FLAG_TXFEMP, .bittiming_const = &xcan_bittiming_const, .btr_ts2_shift = XCAN_BTR_TS2_SHIFT, .btr_sjw_shift = XCAN_BTR_SJW_SHIFT, .bus_clk_name = "pclk", }; static const struct xcan_devtype_data xcan_axi_data = { .cantype = XAXI_CAN, .bittiming_const = &xcan_bittiming_const, .btr_ts2_shift = XCAN_BTR_TS2_SHIFT, .btr_sjw_shift = XCAN_BTR_SJW_SHIFT, .bus_clk_name = "s_axi_aclk", }; static const struct xcan_devtype_data xcan_canfd_data = { .cantype = XAXI_CANFD, .flags = XCAN_FLAG_EXT_FILTERS | XCAN_FLAG_RXMNF | XCAN_FLAG_TX_MAILBOXES | XCAN_FLAG_RX_FIFO_MULTI, .bittiming_const = &xcan_bittiming_const_canfd, .btr_ts2_shift = XCAN_BTR_TS2_SHIFT_CANFD, .btr_sjw_shift = XCAN_BTR_SJW_SHIFT_CANFD, .bus_clk_name = "s_axi_aclk", }; static const struct xcan_devtype_data xcan_canfd2_data = { .cantype = XAXI_CANFD_2_0, .flags = XCAN_FLAG_EXT_FILTERS | XCAN_FLAG_RXMNF | XCAN_FLAG_TX_MAILBOXES | XCAN_FLAG_CANFD_2 | XCAN_FLAG_RX_FIFO_MULTI, .bittiming_const = &xcan_bittiming_const_canfd2, .btr_ts2_shift = XCAN_BTR_TS2_SHIFT_CANFD, .btr_sjw_shift = XCAN_BTR_SJW_SHIFT_CANFD, .bus_clk_name = "s_axi_aclk", }; /* Match table for OF platform binding */ static const struct of_device_id xcan_of_match[] = { { .compatible = "xlnx,zynq-can-1.0", .data = &xcan_zynq_data }, { .compatible = "xlnx,axi-can-1.00.a", .data = &xcan_axi_data }, { .compatible = "xlnx,canfd-1.0", .data = &xcan_canfd_data }, { .compatible = "xlnx,canfd-2.0", .data = &xcan_canfd2_data }, { /* end of list */ }, }; MODULE_DEVICE_TABLE(of, xcan_of_match); /** * xcan_probe - Platform registration call * @pdev: Handle to the platform device structure * * This function does all the memory allocation and registration for the CAN * device. * * Return: 0 on success and failure value on error */ static int xcan_probe(struct platform_device *pdev) { struct net_device *ndev; struct xcan_priv *priv; struct phy *transceiver; const struct of_device_id *of_id; const struct xcan_devtype_data *devtype = &xcan_axi_data; void __iomem *addr; int ret; int rx_max, tx_max; u32 hw_tx_max = 0, hw_rx_max = 0; const char *hw_tx_max_property; /* Get the virtual base address for the device */ addr = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(addr)) { ret = PTR_ERR(addr); goto err; } of_id = of_match_device(xcan_of_match, &pdev->dev); if (of_id && of_id->data) devtype = of_id->data; hw_tx_max_property = devtype->flags & XCAN_FLAG_TX_MAILBOXES ? "tx-mailbox-count" : "tx-fifo-depth"; ret = of_property_read_u32(pdev->dev.of_node, hw_tx_max_property, &hw_tx_max); if (ret < 0) { dev_err(&pdev->dev, "missing %s property\n", hw_tx_max_property); goto err; } ret = of_property_read_u32(pdev->dev.of_node, "rx-fifo-depth", &hw_rx_max); if (ret < 0) { dev_err(&pdev->dev, "missing rx-fifo-depth property (mailbox mode is not supported)\n"); goto err; } /* With TX FIFO: * * There is no way to directly figure out how many frames have been * sent when the TXOK interrupt is processed. If TXFEMP * is supported, we can have 2 frames in the FIFO and use TXFEMP * to determine if 1 or 2 frames have been sent. * Theoretically we should be able to use TXFWMEMP to determine up * to 3 frames, but it seems that after putting a second frame in the * FIFO, with watermark at 2 frames, it can happen that TXFWMEMP (less * than 2 frames in FIFO) is set anyway with no TXOK (a frame was * sent), which is not a sensible state - possibly TXFWMEMP is not * completely synchronized with the rest of the bits? * * With TX mailboxes: * * HW sends frames in CAN ID priority order. To preserve FIFO ordering * we submit frames one at a time. */ if (!(devtype->flags & XCAN_FLAG_TX_MAILBOXES) && (devtype->flags & XCAN_FLAG_TXFEMP)) tx_max = min(hw_tx_max, 2U); else tx_max = 1; rx_max = hw_rx_max; /* Create a CAN device instance */ ndev = alloc_candev(sizeof(struct xcan_priv), tx_max); if (!ndev) return -ENOMEM; priv = netdev_priv(ndev); priv->dev = &pdev->dev; priv->can.bittiming_const = devtype->bittiming_const; priv->can.do_set_mode = xcan_do_set_mode; priv->can.do_get_berr_counter = xcan_get_berr_counter; priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_BERR_REPORTING; priv->rstc = devm_reset_control_get_optional_exclusive(&pdev->dev, NULL); if (IS_ERR(priv->rstc)) { dev_err(&pdev->dev, "Cannot get CAN reset.\n"); ret = PTR_ERR(priv->rstc); goto err_free; } ret = reset_control_reset(priv->rstc); if (ret) goto err_free; if (devtype->cantype == XAXI_CANFD) { priv->can.data_bittiming_const = &xcan_data_bittiming_const_canfd; priv->can.tdc_const = &xcan_tdc_const_canfd; } if (devtype->cantype == XAXI_CANFD_2_0) { priv->can.data_bittiming_const = &xcan_data_bittiming_const_canfd2; priv->can.tdc_const = &xcan_tdc_const_canfd2; } if (devtype->cantype == XAXI_CANFD || devtype->cantype == XAXI_CANFD_2_0) { priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD | CAN_CTRLMODE_TDC_AUTO; priv->can.do_get_auto_tdcv = xcan_get_auto_tdcv; } priv->reg_base = addr; priv->tx_max = tx_max; priv->devtype = *devtype; spin_lock_init(&priv->tx_lock); /* Get IRQ for the device */ ret = platform_get_irq(pdev, 0); if (ret < 0) goto err_reset; ndev->irq = ret; ndev->flags |= IFF_ECHO; /* We support local echo */ platform_set_drvdata(pdev, ndev); SET_NETDEV_DEV(ndev, &pdev->dev); ndev->netdev_ops = &xcan_netdev_ops; ndev->ethtool_ops = &xcan_ethtool_ops; /* Getting the CAN can_clk info */ priv->can_clk = devm_clk_get(&pdev->dev, "can_clk"); if (IS_ERR(priv->can_clk)) { ret = dev_err_probe(&pdev->dev, PTR_ERR(priv->can_clk), "device clock not found\n"); goto err_reset; } priv->bus_clk = devm_clk_get(&pdev->dev, devtype->bus_clk_name); if (IS_ERR(priv->bus_clk)) { ret = dev_err_probe(&pdev->dev, PTR_ERR(priv->bus_clk), "bus clock not found\n"); goto err_reset; } transceiver = devm_phy_optional_get(&pdev->dev, NULL); if (IS_ERR(transceiver)) { ret = PTR_ERR(transceiver); dev_err_probe(&pdev->dev, ret, "failed to get phy\n"); goto err_reset; } priv->transceiver = transceiver; priv->write_reg = xcan_write_reg_le; priv->read_reg = xcan_read_reg_le; pm_runtime_enable(&pdev->dev); ret = pm_runtime_get_sync(&pdev->dev); if (ret < 0) { netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n", __func__, ret); goto err_disableclks; } if (priv->read_reg(priv, XCAN_SR_OFFSET) != XCAN_SR_CONFIG_MASK) { priv->write_reg = xcan_write_reg_be; priv->read_reg = xcan_read_reg_be; } priv->can.clock.freq = clk_get_rate(priv->can_clk); netif_napi_add_weight(ndev, &priv->napi, xcan_rx_poll, rx_max); ret = register_candev(ndev); if (ret) { dev_err(&pdev->dev, "fail to register failed (err=%d)\n", ret); goto err_disableclks; } of_can_transceiver(ndev); pm_runtime_put(&pdev->dev); if (priv->devtype.flags & XCAN_FLAG_CANFD_2) { priv->write_reg(priv, XCAN_AFR_2_ID_OFFSET, 0x00000000); priv->write_reg(priv, XCAN_AFR_2_MASK_OFFSET, 0x00000000); } netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx buffers: actual %d, using %d\n", priv->reg_base, ndev->irq, priv->can.clock.freq, hw_tx_max, priv->tx_max); return 0; err_disableclks: pm_runtime_put(priv->dev); pm_runtime_disable(&pdev->dev); err_reset: reset_control_assert(priv->rstc); err_free: free_candev(ndev); err: return ret; } /** * xcan_remove - Unregister the device after releasing the resources * @pdev: Handle to the platform device structure * * This function frees all the resources allocated to the device. * Return: 0 always */ static void xcan_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct xcan_priv *priv = netdev_priv(ndev); unregister_candev(ndev); pm_runtime_disable(&pdev->dev); reset_control_assert(priv->rstc); free_candev(ndev); } static struct platform_driver xcan_driver = { .probe = xcan_probe, .remove_new = xcan_remove, .driver = { .name = DRIVER_NAME, .pm = &xcan_dev_pm_ops, .of_match_table = xcan_of_match, }, }; module_platform_driver(xcan_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Xilinx Inc"); MODULE_DESCRIPTION("Xilinx CAN interface");
linux-master
drivers/net/can/xilinx_can.c
// SPDX-License-Identifier: GPL-2.0-only /* * at91_can.c - CAN network driver for AT91 SoC CAN controller * * (C) 2007 by Hans J. Koch <[email protected]> * (C) 2008, 2009, 2010, 2011 by Marc Kleine-Budde <[email protected]> */ #include <linux/clk.h> #include <linux/errno.h> #include <linux/ethtool.h> #include <linux/if_arp.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/rtnetlink.h> #include <linux/skbuff.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/types.h> #include <linux/can/dev.h> #include <linux/can/error.h> #define AT91_MB_MASK(i) ((1 << (i)) - 1) /* Common registers */ enum at91_reg { AT91_MR = 0x000, AT91_IER = 0x004, AT91_IDR = 0x008, AT91_IMR = 0x00C, AT91_SR = 0x010, AT91_BR = 0x014, AT91_TIM = 0x018, AT91_TIMESTP = 0x01C, AT91_ECR = 0x020, AT91_TCR = 0x024, AT91_ACR = 0x028, }; /* Mailbox registers (0 <= i <= 15) */ #define AT91_MMR(i) ((enum at91_reg)(0x200 + ((i) * 0x20))) #define AT91_MAM(i) ((enum at91_reg)(0x204 + ((i) * 0x20))) #define AT91_MID(i) ((enum at91_reg)(0x208 + ((i) * 0x20))) #define AT91_MFID(i) ((enum at91_reg)(0x20C + ((i) * 0x20))) #define AT91_MSR(i) ((enum at91_reg)(0x210 + ((i) * 0x20))) #define AT91_MDL(i) ((enum at91_reg)(0x214 + ((i) * 0x20))) #define AT91_MDH(i) ((enum at91_reg)(0x218 + ((i) * 0x20))) #define AT91_MCR(i) ((enum at91_reg)(0x21C + ((i) * 0x20))) /* Register bits */ #define AT91_MR_CANEN BIT(0) #define AT91_MR_LPM BIT(1) #define AT91_MR_ABM BIT(2) #define AT91_MR_OVL BIT(3) #define AT91_MR_TEOF BIT(4) #define AT91_MR_TTM BIT(5) #define AT91_MR_TIMFRZ BIT(6) #define AT91_MR_DRPT BIT(7) #define AT91_SR_RBSY BIT(29) #define AT91_MMR_PRIO_SHIFT (16) #define AT91_MID_MIDE BIT(29) #define AT91_MSR_MRTR BIT(20) #define AT91_MSR_MABT BIT(22) #define AT91_MSR_MRDY BIT(23) #define AT91_MSR_MMI BIT(24) #define AT91_MCR_MRTR BIT(20) #define AT91_MCR_MTCR BIT(23) /* Mailbox Modes */ enum at91_mb_mode { AT91_MB_MODE_DISABLED = 0, AT91_MB_MODE_RX = 1, AT91_MB_MODE_RX_OVRWR = 2, AT91_MB_MODE_TX = 3, AT91_MB_MODE_CONSUMER = 4, AT91_MB_MODE_PRODUCER = 5, }; /* Interrupt mask bits */ #define AT91_IRQ_ERRA BIT(16) #define AT91_IRQ_WARN BIT(17) #define AT91_IRQ_ERRP BIT(18) #define AT91_IRQ_BOFF BIT(19) #define AT91_IRQ_SLEEP BIT(20) #define AT91_IRQ_WAKEUP BIT(21) #define AT91_IRQ_TOVF BIT(22) #define AT91_IRQ_TSTP BIT(23) #define AT91_IRQ_CERR BIT(24) #define AT91_IRQ_SERR BIT(25) #define AT91_IRQ_AERR BIT(26) #define AT91_IRQ_FERR BIT(27) #define AT91_IRQ_BERR BIT(28) #define AT91_IRQ_ERR_ALL (0x1fff0000) #define AT91_IRQ_ERR_FRAME (AT91_IRQ_CERR | AT91_IRQ_SERR | \ AT91_IRQ_AERR | AT91_IRQ_FERR | AT91_IRQ_BERR) #define AT91_IRQ_ERR_LINE (AT91_IRQ_ERRA | AT91_IRQ_WARN | \ AT91_IRQ_ERRP | AT91_IRQ_BOFF) #define AT91_IRQ_ALL (0x1fffffff) enum at91_devtype { AT91_DEVTYPE_SAM9263, AT91_DEVTYPE_SAM9X5, }; struct at91_devtype_data { unsigned int rx_first; unsigned int rx_split; unsigned int rx_last; unsigned int tx_shift; enum at91_devtype type; }; struct at91_priv { struct can_priv can; /* must be the first member! */ struct napi_struct napi; void __iomem *reg_base; u32 reg_sr; unsigned int tx_next; unsigned int tx_echo; unsigned int rx_next; struct at91_devtype_data devtype_data; struct clk *clk; struct at91_can_data *pdata; canid_t mb0_id; }; static const struct at91_devtype_data at91_at91sam9263_data = { .rx_first = 1, .rx_split = 8, .rx_last = 11, .tx_shift = 2, .type = AT91_DEVTYPE_SAM9263, }; static const struct at91_devtype_data at91_at91sam9x5_data = { .rx_first = 0, .rx_split = 4, .rx_last = 5, .tx_shift = 1, .type = AT91_DEVTYPE_SAM9X5, }; static const struct can_bittiming_const at91_bittiming_const = { .name = KBUILD_MODNAME, .tseg1_min = 4, .tseg1_max = 16, .tseg2_min = 2, .tseg2_max = 8, .sjw_max = 4, .brp_min = 2, .brp_max = 128, .brp_inc = 1, }; #define AT91_IS(_model) \ static inline int __maybe_unused at91_is_sam##_model(const struct at91_priv *priv) \ { \ return priv->devtype_data.type == AT91_DEVTYPE_SAM##_model; \ } AT91_IS(9263); AT91_IS(9X5); static inline unsigned int get_mb_rx_first(const struct at91_priv *priv) { return priv->devtype_data.rx_first; } static inline unsigned int get_mb_rx_last(const struct at91_priv *priv) { return priv->devtype_data.rx_last; } static inline unsigned int get_mb_rx_split(const struct at91_priv *priv) { return priv->devtype_data.rx_split; } static inline unsigned int get_mb_rx_num(const struct at91_priv *priv) { return get_mb_rx_last(priv) - get_mb_rx_first(priv) + 1; } static inline unsigned int get_mb_rx_low_last(const struct at91_priv *priv) { return get_mb_rx_split(priv) - 1; } static inline unsigned int get_mb_rx_low_mask(const struct at91_priv *priv) { return AT91_MB_MASK(get_mb_rx_split(priv)) & ~AT91_MB_MASK(get_mb_rx_first(priv)); } static inline unsigned int get_mb_tx_shift(const struct at91_priv *priv) { return priv->devtype_data.tx_shift; } static inline unsigned int get_mb_tx_num(const struct at91_priv *priv) { return 1 << get_mb_tx_shift(priv); } static inline unsigned int get_mb_tx_first(const struct at91_priv *priv) { return get_mb_rx_last(priv) + 1; } static inline unsigned int get_mb_tx_last(const struct at91_priv *priv) { return get_mb_tx_first(priv) + get_mb_tx_num(priv) - 1; } static inline unsigned int get_next_prio_shift(const struct at91_priv *priv) { return get_mb_tx_shift(priv); } static inline unsigned int get_next_prio_mask(const struct at91_priv *priv) { return 0xf << get_mb_tx_shift(priv); } static inline unsigned int get_next_mb_mask(const struct at91_priv *priv) { return AT91_MB_MASK(get_mb_tx_shift(priv)); } static inline unsigned int get_next_mask(const struct at91_priv *priv) { return get_next_mb_mask(priv) | get_next_prio_mask(priv); } static inline unsigned int get_irq_mb_rx(const struct at91_priv *priv) { return AT91_MB_MASK(get_mb_rx_last(priv) + 1) & ~AT91_MB_MASK(get_mb_rx_first(priv)); } static inline unsigned int get_irq_mb_tx(const struct at91_priv *priv) { return AT91_MB_MASK(get_mb_tx_last(priv) + 1) & ~AT91_MB_MASK(get_mb_tx_first(priv)); } static inline unsigned int get_tx_next_mb(const struct at91_priv *priv) { return (priv->tx_next & get_next_mb_mask(priv)) + get_mb_tx_first(priv); } static inline unsigned int get_tx_next_prio(const struct at91_priv *priv) { return (priv->tx_next >> get_next_prio_shift(priv)) & 0xf; } static inline unsigned int get_tx_echo_mb(const struct at91_priv *priv) { return (priv->tx_echo & get_next_mb_mask(priv)) + get_mb_tx_first(priv); } static inline u32 at91_read(const struct at91_priv *priv, enum at91_reg reg) { return readl_relaxed(priv->reg_base + reg); } static inline void at91_write(const struct at91_priv *priv, enum at91_reg reg, u32 value) { writel_relaxed(value, priv->reg_base + reg); } static inline void set_mb_mode_prio(const struct at91_priv *priv, unsigned int mb, enum at91_mb_mode mode, int prio) { at91_write(priv, AT91_MMR(mb), (mode << 24) | (prio << 16)); } static inline void set_mb_mode(const struct at91_priv *priv, unsigned int mb, enum at91_mb_mode mode) { set_mb_mode_prio(priv, mb, mode, 0); } static inline u32 at91_can_id_to_reg_mid(canid_t can_id) { u32 reg_mid; if (can_id & CAN_EFF_FLAG) reg_mid = (can_id & CAN_EFF_MASK) | AT91_MID_MIDE; else reg_mid = (can_id & CAN_SFF_MASK) << 18; return reg_mid; } static void at91_setup_mailboxes(struct net_device *dev) { struct at91_priv *priv = netdev_priv(dev); unsigned int i; u32 reg_mid; /* Due to a chip bug (errata 50.2.6.3 & 50.3.5.3) the first * mailbox is disabled. The next 11 mailboxes are used as a * reception FIFO. The last mailbox is configured with * overwrite option. The overwrite flag indicates a FIFO * overflow. */ reg_mid = at91_can_id_to_reg_mid(priv->mb0_id); for (i = 0; i < get_mb_rx_first(priv); i++) { set_mb_mode(priv, i, AT91_MB_MODE_DISABLED); at91_write(priv, AT91_MID(i), reg_mid); at91_write(priv, AT91_MCR(i), 0x0); /* clear dlc */ } for (i = get_mb_rx_first(priv); i < get_mb_rx_last(priv); i++) set_mb_mode(priv, i, AT91_MB_MODE_RX); set_mb_mode(priv, get_mb_rx_last(priv), AT91_MB_MODE_RX_OVRWR); /* reset acceptance mask and id register */ for (i = get_mb_rx_first(priv); i <= get_mb_rx_last(priv); i++) { at91_write(priv, AT91_MAM(i), 0x0); at91_write(priv, AT91_MID(i), AT91_MID_MIDE); } /* The last 4 mailboxes are used for transmitting. */ for (i = get_mb_tx_first(priv); i <= get_mb_tx_last(priv); i++) set_mb_mode_prio(priv, i, AT91_MB_MODE_TX, 0); /* Reset tx and rx helper pointers */ priv->tx_next = priv->tx_echo = 0; priv->rx_next = get_mb_rx_first(priv); } static int at91_set_bittiming(struct net_device *dev) { const struct at91_priv *priv = netdev_priv(dev); const struct can_bittiming *bt = &priv->can.bittiming; u32 reg_br; reg_br = ((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) ? 1 << 24 : 0) | ((bt->brp - 1) << 16) | ((bt->sjw - 1) << 12) | ((bt->prop_seg - 1) << 8) | ((bt->phase_seg1 - 1) << 4) | ((bt->phase_seg2 - 1) << 0); netdev_info(dev, "writing AT91_BR: 0x%08x\n", reg_br); at91_write(priv, AT91_BR, reg_br); return 0; } static int at91_get_berr_counter(const struct net_device *dev, struct can_berr_counter *bec) { const struct at91_priv *priv = netdev_priv(dev); u32 reg_ecr = at91_read(priv, AT91_ECR); bec->rxerr = reg_ecr & 0xff; bec->txerr = reg_ecr >> 16; return 0; } static void at91_chip_start(struct net_device *dev) { struct at91_priv *priv = netdev_priv(dev); u32 reg_mr, reg_ier; /* disable interrupts */ at91_write(priv, AT91_IDR, AT91_IRQ_ALL); /* disable chip */ reg_mr = at91_read(priv, AT91_MR); at91_write(priv, AT91_MR, reg_mr & ~AT91_MR_CANEN); at91_set_bittiming(dev); at91_setup_mailboxes(dev); /* enable chip */ if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) reg_mr = AT91_MR_CANEN | AT91_MR_ABM; else reg_mr = AT91_MR_CANEN; at91_write(priv, AT91_MR, reg_mr); priv->can.state = CAN_STATE_ERROR_ACTIVE; /* Enable interrupts */ reg_ier = get_irq_mb_rx(priv) | AT91_IRQ_ERRP | AT91_IRQ_ERR_FRAME; at91_write(priv, AT91_IDR, AT91_IRQ_ALL); at91_write(priv, AT91_IER, reg_ier); } static void at91_chip_stop(struct net_device *dev, enum can_state state) { struct at91_priv *priv = netdev_priv(dev); u32 reg_mr; /* disable interrupts */ at91_write(priv, AT91_IDR, AT91_IRQ_ALL); reg_mr = at91_read(priv, AT91_MR); at91_write(priv, AT91_MR, reg_mr & ~AT91_MR_CANEN); priv->can.state = state; } /* theory of operation: * * According to the datasheet priority 0 is the highest priority, 15 * is the lowest. If two mailboxes have the same priority level the * message of the mailbox with the lowest number is sent first. * * We use the first TX mailbox (AT91_MB_TX_FIRST) with prio 0, then * the next mailbox with prio 0, and so on, until all mailboxes are * used. Then we start from the beginning with mailbox * AT91_MB_TX_FIRST, but with prio 1, mailbox AT91_MB_TX_FIRST + 1 * prio 1. When we reach the last mailbox with prio 15, we have to * stop sending, waiting for all messages to be delivered, then start * again with mailbox AT91_MB_TX_FIRST prio 0. * * We use the priv->tx_next as counter for the next transmission * mailbox, but without the offset AT91_MB_TX_FIRST. The lower bits * encode the mailbox number, the upper 4 bits the mailbox priority: * * priv->tx_next = (prio << get_next_prio_shift(priv)) | * (mb - get_mb_tx_first(priv)); * */ static netdev_tx_t at91_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct at91_priv *priv = netdev_priv(dev); struct can_frame *cf = (struct can_frame *)skb->data; unsigned int mb, prio; u32 reg_mid, reg_mcr; if (can_dev_dropped_skb(dev, skb)) return NETDEV_TX_OK; mb = get_tx_next_mb(priv); prio = get_tx_next_prio(priv); if (unlikely(!(at91_read(priv, AT91_MSR(mb)) & AT91_MSR_MRDY))) { netif_stop_queue(dev); netdev_err(dev, "BUG! TX buffer full when queue awake!\n"); return NETDEV_TX_BUSY; } reg_mid = at91_can_id_to_reg_mid(cf->can_id); reg_mcr = ((cf->can_id & CAN_RTR_FLAG) ? AT91_MCR_MRTR : 0) | (cf->len << 16) | AT91_MCR_MTCR; /* disable MB while writing ID (see datasheet) */ set_mb_mode(priv, mb, AT91_MB_MODE_DISABLED); at91_write(priv, AT91_MID(mb), reg_mid); set_mb_mode_prio(priv, mb, AT91_MB_MODE_TX, prio); at91_write(priv, AT91_MDL(mb), *(u32 *)(cf->data + 0)); at91_write(priv, AT91_MDH(mb), *(u32 *)(cf->data + 4)); /* This triggers transmission */ at91_write(priv, AT91_MCR(mb), reg_mcr); /* _NOTE_: subtract AT91_MB_TX_FIRST offset from mb! */ can_put_echo_skb(skb, dev, mb - get_mb_tx_first(priv), 0); /* we have to stop the queue and deliver all messages in case * of a prio+mb counter wrap around. This is the case if * tx_next buffer prio and mailbox equals 0. * * also stop the queue if next buffer is still in use * (== not ready) */ priv->tx_next++; if (!(at91_read(priv, AT91_MSR(get_tx_next_mb(priv))) & AT91_MSR_MRDY) || (priv->tx_next & get_next_mask(priv)) == 0) netif_stop_queue(dev); /* Enable interrupt for this mailbox */ at91_write(priv, AT91_IER, 1 << mb); return NETDEV_TX_OK; } /** * at91_activate_rx_low - activate lower rx mailboxes * @priv: a91 context * * Reenables the lower mailboxes for reception of new CAN messages */ static inline void at91_activate_rx_low(const struct at91_priv *priv) { u32 mask = get_mb_rx_low_mask(priv); at91_write(priv, AT91_TCR, mask); } /** * at91_activate_rx_mb - reactive single rx mailbox * @priv: a91 context * @mb: mailbox to reactivate * * Reenables given mailbox for reception of new CAN messages */ static inline void at91_activate_rx_mb(const struct at91_priv *priv, unsigned int mb) { u32 mask = 1 << mb; at91_write(priv, AT91_TCR, mask); } /** * at91_rx_overflow_err - send error frame due to rx overflow * @dev: net device */ static void at91_rx_overflow_err(struct net_device *dev) { struct net_device_stats *stats = &dev->stats; struct sk_buff *skb; struct can_frame *cf; netdev_dbg(dev, "RX buffer overflow\n"); stats->rx_over_errors++; stats->rx_errors++; skb = alloc_can_err_skb(dev, &cf); if (unlikely(!skb)) return; cf->can_id |= CAN_ERR_CRTL; cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; netif_receive_skb(skb); } /** * at91_read_mb - read CAN msg from mailbox (lowlevel impl) * @dev: net device * @mb: mailbox number to read from * @cf: can frame where to store message * * Reads a CAN message from the given mailbox and stores data into * given can frame. "mb" and "cf" must be valid. */ static void at91_read_mb(struct net_device *dev, unsigned int mb, struct can_frame *cf) { const struct at91_priv *priv = netdev_priv(dev); u32 reg_msr, reg_mid; reg_mid = at91_read(priv, AT91_MID(mb)); if (reg_mid & AT91_MID_MIDE) cf->can_id = ((reg_mid >> 0) & CAN_EFF_MASK) | CAN_EFF_FLAG; else cf->can_id = (reg_mid >> 18) & CAN_SFF_MASK; reg_msr = at91_read(priv, AT91_MSR(mb)); cf->len = can_cc_dlc2len((reg_msr >> 16) & 0xf); if (reg_msr & AT91_MSR_MRTR) { cf->can_id |= CAN_RTR_FLAG; } else { *(u32 *)(cf->data + 0) = at91_read(priv, AT91_MDL(mb)); *(u32 *)(cf->data + 4) = at91_read(priv, AT91_MDH(mb)); } /* allow RX of extended frames */ at91_write(priv, AT91_MID(mb), AT91_MID_MIDE); if (unlikely(mb == get_mb_rx_last(priv) && reg_msr & AT91_MSR_MMI)) at91_rx_overflow_err(dev); } /** * at91_read_msg - read CAN message from mailbox * @dev: net device * @mb: mail box to read from * * Reads a CAN message from given mailbox, and put into linux network * RX queue, does all housekeeping chores (stats, ...) */ static void at91_read_msg(struct net_device *dev, unsigned int mb) { struct net_device_stats *stats = &dev->stats; struct can_frame *cf; struct sk_buff *skb; skb = alloc_can_skb(dev, &cf); if (unlikely(!skb)) { stats->rx_dropped++; return; } at91_read_mb(dev, mb, cf); stats->rx_packets++; if (!(cf->can_id & CAN_RTR_FLAG)) stats->rx_bytes += cf->len; netif_receive_skb(skb); } /** * at91_poll_rx - read multiple CAN messages from mailboxes * @dev: net device * @quota: max number of pkgs we're allowed to receive * * Theory of Operation: * * About 3/4 of the mailboxes (get_mb_rx_first()...get_mb_rx_last()) * on the chip are reserved for RX. We split them into 2 groups. The * lower group ranges from get_mb_rx_first() to get_mb_rx_low_last(). * * Like it or not, but the chip always saves a received CAN message * into the first free mailbox it finds (starting with the * lowest). This makes it very difficult to read the messages in the * right order from the chip. This is how we work around that problem: * * The first message goes into mb nr. 1 and issues an interrupt. All * rx ints are disabled in the interrupt handler and a napi poll is * scheduled. We read the mailbox, but do _not_ re-enable the mb (to * receive another message). * * lower mbxs upper * ____^______ __^__ * / \ / \ * +-+-+-+-+-+-+-+-++-+-+-+-+ * | |x|x|x|x|x|x|x|| | | | | * +-+-+-+-+-+-+-+-++-+-+-+-+ * 0 0 0 0 0 0 0 0 0 0 1 1 \ mail * 0 1 2 3 4 5 6 7 8 9 0 1 / box * ^ * | * \ * unused, due to chip bug * * The variable priv->rx_next points to the next mailbox to read a * message from. As long we're in the lower mailboxes we just read the * mailbox but not re-enable it. * * With completion of the last of the lower mailboxes, we re-enable the * whole first group, but continue to look for filled mailboxes in the * upper mailboxes. Imagine the second group like overflow mailboxes, * which takes CAN messages if the lower goup is full. While in the * upper group we re-enable the mailbox right after reading it. Giving * the chip more room to store messages. * * After finishing we look again in the lower group if we've still * quota. * */ static int at91_poll_rx(struct net_device *dev, int quota) { struct at91_priv *priv = netdev_priv(dev); u32 reg_sr = at91_read(priv, AT91_SR); const unsigned long *addr = (unsigned long *)&reg_sr; unsigned int mb; int received = 0; if (priv->rx_next > get_mb_rx_low_last(priv) && reg_sr & get_mb_rx_low_mask(priv)) netdev_info(dev, "order of incoming frames cannot be guaranteed\n"); again: for (mb = find_next_bit(addr, get_mb_tx_first(priv), priv->rx_next); mb < get_mb_tx_first(priv) && quota > 0; reg_sr = at91_read(priv, AT91_SR), mb = find_next_bit(addr, get_mb_tx_first(priv), ++priv->rx_next)) { at91_read_msg(dev, mb); /* reactivate mailboxes */ if (mb == get_mb_rx_low_last(priv)) /* all lower mailboxed, if just finished it */ at91_activate_rx_low(priv); else if (mb > get_mb_rx_low_last(priv)) /* only the mailbox we read */ at91_activate_rx_mb(priv, mb); received++; quota--; } /* upper group completed, look again in lower */ if (priv->rx_next > get_mb_rx_low_last(priv) && mb > get_mb_rx_last(priv)) { priv->rx_next = get_mb_rx_first(priv); if (quota > 0) goto again; } return received; } static void at91_poll_err_frame(struct net_device *dev, struct can_frame *cf, u32 reg_sr) { struct at91_priv *priv = netdev_priv(dev); /* CRC error */ if (reg_sr & AT91_IRQ_CERR) { netdev_dbg(dev, "CERR irq\n"); dev->stats.rx_errors++; priv->can.can_stats.bus_error++; cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; } /* Stuffing Error */ if (reg_sr & AT91_IRQ_SERR) { netdev_dbg(dev, "SERR irq\n"); dev->stats.rx_errors++; priv->can.can_stats.bus_error++; cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; cf->data[2] |= CAN_ERR_PROT_STUFF; } /* Acknowledgement Error */ if (reg_sr & AT91_IRQ_AERR) { netdev_dbg(dev, "AERR irq\n"); dev->stats.tx_errors++; cf->can_id |= CAN_ERR_ACK; } /* Form error */ if (reg_sr & AT91_IRQ_FERR) { netdev_dbg(dev, "FERR irq\n"); dev->stats.rx_errors++; priv->can.can_stats.bus_error++; cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; cf->data[2] |= CAN_ERR_PROT_FORM; } /* Bit Error */ if (reg_sr & AT91_IRQ_BERR) { netdev_dbg(dev, "BERR irq\n"); dev->stats.tx_errors++; priv->can.can_stats.bus_error++; cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; cf->data[2] |= CAN_ERR_PROT_BIT; } } static int at91_poll_err(struct net_device *dev, int quota, u32 reg_sr) { struct sk_buff *skb; struct can_frame *cf; if (quota == 0) return 0; skb = alloc_can_err_skb(dev, &cf); if (unlikely(!skb)) return 0; at91_poll_err_frame(dev, cf, reg_sr); netif_receive_skb(skb); return 1; } static int at91_poll(struct napi_struct *napi, int quota) { struct net_device *dev = napi->dev; const struct at91_priv *priv = netdev_priv(dev); u32 reg_sr = at91_read(priv, AT91_SR); int work_done = 0; if (reg_sr & get_irq_mb_rx(priv)) work_done += at91_poll_rx(dev, quota - work_done); /* The error bits are clear on read, * so use saved value from irq handler. */ reg_sr |= priv->reg_sr; if (reg_sr & AT91_IRQ_ERR_FRAME) work_done += at91_poll_err(dev, quota - work_done, reg_sr); if (work_done < quota) { /* enable IRQs for frame errors and all mailboxes >= rx_next */ u32 reg_ier = AT91_IRQ_ERR_FRAME; reg_ier |= get_irq_mb_rx(priv) & ~AT91_MB_MASK(priv->rx_next); napi_complete_done(napi, work_done); at91_write(priv, AT91_IER, reg_ier); } return work_done; } /* theory of operation: * * priv->tx_echo holds the number of the oldest can_frame put for * transmission into the hardware, but not yet ACKed by the CAN tx * complete IRQ. * * We iterate from priv->tx_echo to priv->tx_next and check if the * packet has been transmitted, echo it back to the CAN framework. If * we discover a not yet transmitted package, stop looking for more. * */ static void at91_irq_tx(struct net_device *dev, u32 reg_sr) { struct at91_priv *priv = netdev_priv(dev); u32 reg_msr; unsigned int mb; /* masking of reg_sr not needed, already done by at91_irq */ for (/* nix */; (priv->tx_next - priv->tx_echo) > 0; priv->tx_echo++) { mb = get_tx_echo_mb(priv); /* no event in mailbox? */ if (!(reg_sr & (1 << mb))) break; /* Disable irq for this TX mailbox */ at91_write(priv, AT91_IDR, 1 << mb); /* only echo if mailbox signals us a transfer * complete (MSR_MRDY). Otherwise it's a tansfer * abort. "can_bus_off()" takes care about the skbs * parked in the echo queue. */ reg_msr = at91_read(priv, AT91_MSR(mb)); if (likely(reg_msr & AT91_MSR_MRDY && ~reg_msr & AT91_MSR_MABT)) { /* _NOTE_: subtract AT91_MB_TX_FIRST offset from mb! */ dev->stats.tx_bytes += can_get_echo_skb(dev, mb - get_mb_tx_first(priv), NULL); dev->stats.tx_packets++; } } /* restart queue if we don't have a wrap around but restart if * we get a TX int for the last can frame directly before a * wrap around. */ if ((priv->tx_next & get_next_mask(priv)) != 0 || (priv->tx_echo & get_next_mask(priv)) == 0) netif_wake_queue(dev); } static void at91_irq_err_state(struct net_device *dev, struct can_frame *cf, enum can_state new_state) { struct at91_priv *priv = netdev_priv(dev); u32 reg_idr = 0, reg_ier = 0; struct can_berr_counter bec; at91_get_berr_counter(dev, &bec); switch (priv->can.state) { case CAN_STATE_ERROR_ACTIVE: /* from: ERROR_ACTIVE * to : ERROR_WARNING, ERROR_PASSIVE, BUS_OFF * => : there was a warning int */ if (new_state >= CAN_STATE_ERROR_WARNING && new_state <= CAN_STATE_BUS_OFF) { netdev_dbg(dev, "Error Warning IRQ\n"); priv->can.can_stats.error_warning++; cf->can_id |= CAN_ERR_CRTL; cf->data[1] = (bec.txerr > bec.rxerr) ? CAN_ERR_CRTL_TX_WARNING : CAN_ERR_CRTL_RX_WARNING; } fallthrough; case CAN_STATE_ERROR_WARNING: /* from: ERROR_ACTIVE, ERROR_WARNING * to : ERROR_PASSIVE, BUS_OFF * => : error passive int */ if (new_state >= CAN_STATE_ERROR_PASSIVE && new_state <= CAN_STATE_BUS_OFF) { netdev_dbg(dev, "Error Passive IRQ\n"); priv->can.can_stats.error_passive++; cf->can_id |= CAN_ERR_CRTL; cf->data[1] = (bec.txerr > bec.rxerr) ? CAN_ERR_CRTL_TX_PASSIVE : CAN_ERR_CRTL_RX_PASSIVE; } break; case CAN_STATE_BUS_OFF: /* from: BUS_OFF * to : ERROR_ACTIVE, ERROR_WARNING, ERROR_PASSIVE */ if (new_state <= CAN_STATE_ERROR_PASSIVE) { cf->can_id |= CAN_ERR_RESTARTED; netdev_dbg(dev, "restarted\n"); priv->can.can_stats.restarts++; netif_carrier_on(dev); netif_wake_queue(dev); } break; default: break; } /* process state changes depending on the new state */ switch (new_state) { case CAN_STATE_ERROR_ACTIVE: /* actually we want to enable AT91_IRQ_WARN here, but * it screws up the system under certain * circumstances. so just enable AT91_IRQ_ERRP, thus * the "fallthrough" */ netdev_dbg(dev, "Error Active\n"); cf->can_id |= CAN_ERR_PROT; cf->data[2] = CAN_ERR_PROT_ACTIVE; fallthrough; case CAN_STATE_ERROR_WARNING: reg_idr = AT91_IRQ_ERRA | AT91_IRQ_WARN | AT91_IRQ_BOFF; reg_ier = AT91_IRQ_ERRP; break; case CAN_STATE_ERROR_PASSIVE: reg_idr = AT91_IRQ_ERRA | AT91_IRQ_WARN | AT91_IRQ_ERRP; reg_ier = AT91_IRQ_BOFF; break; case CAN_STATE_BUS_OFF: reg_idr = AT91_IRQ_ERRA | AT91_IRQ_ERRP | AT91_IRQ_WARN | AT91_IRQ_BOFF; reg_ier = 0; cf->can_id |= CAN_ERR_BUSOFF; netdev_dbg(dev, "bus-off\n"); netif_carrier_off(dev); priv->can.can_stats.bus_off++; /* turn off chip, if restart is disabled */ if (!priv->can.restart_ms) { at91_chip_stop(dev, CAN_STATE_BUS_OFF); return; } break; default: break; } at91_write(priv, AT91_IDR, reg_idr); at91_write(priv, AT91_IER, reg_ier); } static int at91_get_state_by_bec(const struct net_device *dev, enum can_state *state) { struct can_berr_counter bec; int err; err = at91_get_berr_counter(dev, &bec); if (err) return err; if (bec.txerr < 96 && bec.rxerr < 96) *state = CAN_STATE_ERROR_ACTIVE; else if (bec.txerr < 128 && bec.rxerr < 128) *state = CAN_STATE_ERROR_WARNING; else if (bec.txerr < 256 && bec.rxerr < 256) *state = CAN_STATE_ERROR_PASSIVE; else *state = CAN_STATE_BUS_OFF; return 0; } static void at91_irq_err(struct net_device *dev) { struct at91_priv *priv = netdev_priv(dev); struct sk_buff *skb; struct can_frame *cf; enum can_state new_state; u32 reg_sr; int err; if (at91_is_sam9263(priv)) { reg_sr = at91_read(priv, AT91_SR); /* we need to look at the unmasked reg_sr */ if (unlikely(reg_sr & AT91_IRQ_BOFF)) { new_state = CAN_STATE_BUS_OFF; } else if (unlikely(reg_sr & AT91_IRQ_ERRP)) { new_state = CAN_STATE_ERROR_PASSIVE; } else if (unlikely(reg_sr & AT91_IRQ_WARN)) { new_state = CAN_STATE_ERROR_WARNING; } else if (likely(reg_sr & AT91_IRQ_ERRA)) { new_state = CAN_STATE_ERROR_ACTIVE; } else { netdev_err(dev, "BUG! hardware in undefined state\n"); return; } } else { err = at91_get_state_by_bec(dev, &new_state); if (err) return; } /* state hasn't changed */ if (likely(new_state == priv->can.state)) return; skb = alloc_can_err_skb(dev, &cf); if (unlikely(!skb)) return; at91_irq_err_state(dev, cf, new_state); netif_rx(skb); priv->can.state = new_state; } /* interrupt handler */ static irqreturn_t at91_irq(int irq, void *dev_id) { struct net_device *dev = dev_id; struct at91_priv *priv = netdev_priv(dev); irqreturn_t handled = IRQ_NONE; u32 reg_sr, reg_imr; reg_sr = at91_read(priv, AT91_SR); reg_imr = at91_read(priv, AT91_IMR); /* Ignore masked interrupts */ reg_sr &= reg_imr; if (!reg_sr) goto exit; handled = IRQ_HANDLED; /* Receive or error interrupt? -> napi */ if (reg_sr & (get_irq_mb_rx(priv) | AT91_IRQ_ERR_FRAME)) { /* The error bits are clear on read, * save for later use. */ priv->reg_sr = reg_sr; at91_write(priv, AT91_IDR, get_irq_mb_rx(priv) | AT91_IRQ_ERR_FRAME); napi_schedule(&priv->napi); } /* Transmission complete interrupt */ if (reg_sr & get_irq_mb_tx(priv)) at91_irq_tx(dev, reg_sr); at91_irq_err(dev); exit: return handled; } static int at91_open(struct net_device *dev) { struct at91_priv *priv = netdev_priv(dev); int err; err = clk_prepare_enable(priv->clk); if (err) return err; /* check or determine and set bittime */ err = open_candev(dev); if (err) goto out; /* register interrupt handler */ if (request_irq(dev->irq, at91_irq, IRQF_SHARED, dev->name, dev)) { err = -EAGAIN; goto out_close; } /* start chip and queuing */ at91_chip_start(dev); napi_enable(&priv->napi); netif_start_queue(dev); return 0; out_close: close_candev(dev); out: clk_disable_unprepare(priv->clk); return err; } /* stop CAN bus activity */ static int at91_close(struct net_device *dev) { struct at91_priv *priv = netdev_priv(dev); netif_stop_queue(dev); napi_disable(&priv->napi); at91_chip_stop(dev, CAN_STATE_STOPPED); free_irq(dev->irq, dev); clk_disable_unprepare(priv->clk); close_candev(dev); return 0; } static int at91_set_mode(struct net_device *dev, enum can_mode mode) { switch (mode) { case CAN_MODE_START: at91_chip_start(dev); netif_wake_queue(dev); break; default: return -EOPNOTSUPP; } return 0; } static const struct net_device_ops at91_netdev_ops = { .ndo_open = at91_open, .ndo_stop = at91_close, .ndo_start_xmit = at91_start_xmit, .ndo_change_mtu = can_change_mtu, }; static const struct ethtool_ops at91_ethtool_ops = { .get_ts_info = ethtool_op_get_ts_info, }; static ssize_t mb0_id_show(struct device *dev, struct device_attribute *attr, char *buf) { struct at91_priv *priv = netdev_priv(to_net_dev(dev)); if (priv->mb0_id & CAN_EFF_FLAG) return sysfs_emit(buf, "0x%08x\n", priv->mb0_id); else return sysfs_emit(buf, "0x%03x\n", priv->mb0_id); } static ssize_t mb0_id_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct net_device *ndev = to_net_dev(dev); struct at91_priv *priv = netdev_priv(ndev); unsigned long can_id; ssize_t ret; int err; rtnl_lock(); if (ndev->flags & IFF_UP) { ret = -EBUSY; goto out; } err = kstrtoul(buf, 0, &can_id); if (err) { ret = err; goto out; } if (can_id & CAN_EFF_FLAG) can_id &= CAN_EFF_MASK | CAN_EFF_FLAG; else can_id &= CAN_SFF_MASK; priv->mb0_id = can_id; ret = count; out: rtnl_unlock(); return ret; } static DEVICE_ATTR_RW(mb0_id); static struct attribute *at91_sysfs_attrs[] = { &dev_attr_mb0_id.attr, NULL, }; static const struct attribute_group at91_sysfs_attr_group = { .attrs = at91_sysfs_attrs, }; #if defined(CONFIG_OF) static const struct of_device_id at91_can_dt_ids[] = { { .compatible = "atmel,at91sam9x5-can", .data = &at91_at91sam9x5_data, }, { .compatible = "atmel,at91sam9263-can", .data = &at91_at91sam9263_data, }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, at91_can_dt_ids); #endif static const struct at91_devtype_data *at91_can_get_driver_data(struct platform_device *pdev) { if (pdev->dev.of_node) { const struct of_device_id *match; match = of_match_node(at91_can_dt_ids, pdev->dev.of_node); if (!match) { dev_err(&pdev->dev, "no matching node found in dtb\n"); return NULL; } return (const struct at91_devtype_data *)match->data; } return (const struct at91_devtype_data *) platform_get_device_id(pdev)->driver_data; } static int at91_can_probe(struct platform_device *pdev) { const struct at91_devtype_data *devtype_data; struct net_device *dev; struct at91_priv *priv; struct resource *res; struct clk *clk; void __iomem *addr; int err, irq; devtype_data = at91_can_get_driver_data(pdev); if (!devtype_data) { dev_err(&pdev->dev, "no driver data\n"); err = -ENODEV; goto exit; } clk = clk_get(&pdev->dev, "can_clk"); if (IS_ERR(clk)) { dev_err(&pdev->dev, "no clock defined\n"); err = -ENODEV; goto exit; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_irq(pdev, 0); if (!res || irq <= 0) { err = -ENODEV; goto exit_put; } if (!request_mem_region(res->start, resource_size(res), pdev->name)) { err = -EBUSY; goto exit_put; } addr = ioremap(res->start, resource_size(res)); if (!addr) { err = -ENOMEM; goto exit_release; } dev = alloc_candev(sizeof(struct at91_priv), 1 << devtype_data->tx_shift); if (!dev) { err = -ENOMEM; goto exit_iounmap; } dev->netdev_ops = &at91_netdev_ops; dev->ethtool_ops = &at91_ethtool_ops; dev->irq = irq; dev->flags |= IFF_ECHO; priv = netdev_priv(dev); priv->can.clock.freq = clk_get_rate(clk); priv->can.bittiming_const = &at91_bittiming_const; priv->can.do_set_mode = at91_set_mode; priv->can.do_get_berr_counter = at91_get_berr_counter; priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY; priv->reg_base = addr; priv->devtype_data = *devtype_data; priv->clk = clk; priv->pdata = dev_get_platdata(&pdev->dev); priv->mb0_id = 0x7ff; netif_napi_add_weight(dev, &priv->napi, at91_poll, get_mb_rx_num(priv)); if (at91_is_sam9263(priv)) dev->sysfs_groups[0] = &at91_sysfs_attr_group; platform_set_drvdata(pdev, dev); SET_NETDEV_DEV(dev, &pdev->dev); err = register_candev(dev); if (err) { dev_err(&pdev->dev, "registering netdev failed\n"); goto exit_free; } dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%d)\n", priv->reg_base, dev->irq); return 0; exit_free: free_candev(dev); exit_iounmap: iounmap(addr); exit_release: release_mem_region(res->start, resource_size(res)); exit_put: clk_put(clk); exit: return err; } static void at91_can_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct at91_priv *priv = netdev_priv(dev); struct resource *res; unregister_netdev(dev); iounmap(priv->reg_base); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); release_mem_region(res->start, resource_size(res)); clk_put(priv->clk); free_candev(dev); } static const struct platform_device_id at91_can_id_table[] = { { .name = "at91sam9x5_can", .driver_data = (kernel_ulong_t)&at91_at91sam9x5_data, }, { .name = "at91_can", .driver_data = (kernel_ulong_t)&at91_at91sam9263_data, }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(platform, at91_can_id_table); static struct platform_driver at91_can_driver = { .probe = at91_can_probe, .remove_new = at91_can_remove, .driver = { .name = KBUILD_MODNAME, .of_match_table = of_match_ptr(at91_can_dt_ids), }, .id_table = at91_can_id_table, }; module_platform_driver(at91_can_driver); MODULE_AUTHOR("Marc Kleine-Budde <[email protected]>"); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION(KBUILD_MODNAME " CAN netdevice driver");
linux-master
drivers/net/can/at91_can.c
/* * sun4i_can.c - CAN bus controller driver for Allwinner SUN4I&SUN7I based SoCs * * Copyright (C) 2013 Peter Chen * Copyright (C) 2015 Gerhard Bertelsmann * All rights reserved. * * Parts of this software are based on (derived from) the SJA1000 code by: * Copyright (C) 2014 Oliver Hartkopp <[email protected]> * Copyright (C) 2007 Wolfgang Grandegger <[email protected]> * Copyright (C) 2002-2007 Volkswagen Group Electronic Research * Copyright (C) 2003 Matthias Brukner, Trajet Gmbh, Rebenring 33, * 38106 Braunschweig, GERMANY * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of Volkswagen nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * Alternatively, provided that this notice is retained in full, this * software may be distributed under the terms of the GNU General * Public License ("GPL") version 2, in which case the provisions of the * GPL apply INSTEAD OF those given above. * * The provided data structures and external interfaces from this code * are not restricted to be used by modules with a GPL compatible license. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. * */ #include <linux/netdevice.h> #include <linux/can.h> #include <linux/can/dev.h> #include <linux/can/error.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/ethtool.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/io.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/reset.h> #define DRV_NAME "sun4i_can" /* Registers address (physical base address 0x01C2BC00) */ #define SUN4I_REG_MSEL_ADDR 0x0000 /* CAN Mode Select */ #define SUN4I_REG_CMD_ADDR 0x0004 /* CAN Command */ #define SUN4I_REG_STA_ADDR 0x0008 /* CAN Status */ #define SUN4I_REG_INT_ADDR 0x000c /* CAN Interrupt Flag */ #define SUN4I_REG_INTEN_ADDR 0x0010 /* CAN Interrupt Enable */ #define SUN4I_REG_BTIME_ADDR 0x0014 /* CAN Bus Timing 0 */ #define SUN4I_REG_TEWL_ADDR 0x0018 /* CAN Tx Error Warning Limit */ #define SUN4I_REG_ERRC_ADDR 0x001c /* CAN Error Counter */ #define SUN4I_REG_RMCNT_ADDR 0x0020 /* CAN Receive Message Counter */ #define SUN4I_REG_RBUFSA_ADDR 0x0024 /* CAN Receive Buffer Start Address */ #define SUN4I_REG_BUF0_ADDR 0x0040 /* CAN Tx/Rx Buffer 0 */ #define SUN4I_REG_BUF1_ADDR 0x0044 /* CAN Tx/Rx Buffer 1 */ #define SUN4I_REG_BUF2_ADDR 0x0048 /* CAN Tx/Rx Buffer 2 */ #define SUN4I_REG_BUF3_ADDR 0x004c /* CAN Tx/Rx Buffer 3 */ #define SUN4I_REG_BUF4_ADDR 0x0050 /* CAN Tx/Rx Buffer 4 */ #define SUN4I_REG_BUF5_ADDR 0x0054 /* CAN Tx/Rx Buffer 5 */ #define SUN4I_REG_BUF6_ADDR 0x0058 /* CAN Tx/Rx Buffer 6 */ #define SUN4I_REG_BUF7_ADDR 0x005c /* CAN Tx/Rx Buffer 7 */ #define SUN4I_REG_BUF8_ADDR 0x0060 /* CAN Tx/Rx Buffer 8 */ #define SUN4I_REG_BUF9_ADDR 0x0064 /* CAN Tx/Rx Buffer 9 */ #define SUN4I_REG_BUF10_ADDR 0x0068 /* CAN Tx/Rx Buffer 10 */ #define SUN4I_REG_BUF11_ADDR 0x006c /* CAN Tx/Rx Buffer 11 */ #define SUN4I_REG_BUF12_ADDR 0x0070 /* CAN Tx/Rx Buffer 12 */ #define SUN4I_REG_ACPC_ADDR 0x0040 /* CAN Acceptance Code 0 */ #define SUN4I_REG_ACPM_ADDR 0x0044 /* CAN Acceptance Mask 0 */ #define SUN4I_REG_ACPC_ADDR_D1 0x0028 /* CAN Acceptance Code 0 on the D1 */ #define SUN4I_REG_ACPM_ADDR_D1 0x002C /* CAN Acceptance Mask 0 on the D1 */ #define SUN4I_REG_RBUF_RBACK_START_ADDR 0x0180 /* CAN transmit buffer start */ #define SUN4I_REG_RBUF_RBACK_END_ADDR 0x01b0 /* CAN transmit buffer end */ /* Controller Register Description */ /* mode select register (r/w) * offset:0x0000 default:0x0000_0001 */ #define SUN4I_MSEL_SLEEP_MODE (0x01 << 4) /* write in reset mode */ #define SUN4I_MSEL_WAKE_UP (0x00 << 4) #define SUN4I_MSEL_SINGLE_FILTER (0x01 << 3) /* write in reset mode */ #define SUN4I_MSEL_DUAL_FILTERS (0x00 << 3) #define SUN4I_MSEL_LOOPBACK_MODE BIT(2) #define SUN4I_MSEL_LISTEN_ONLY_MODE BIT(1) #define SUN4I_MSEL_RESET_MODE BIT(0) /* command register (w) * offset:0x0004 default:0x0000_0000 */ #define SUN4I_CMD_BUS_OFF_REQ BIT(5) #define SUN4I_CMD_SELF_RCV_REQ BIT(4) #define SUN4I_CMD_CLEAR_OR_FLAG BIT(3) #define SUN4I_CMD_RELEASE_RBUF BIT(2) #define SUN4I_CMD_ABORT_REQ BIT(1) #define SUN4I_CMD_TRANS_REQ BIT(0) /* status register (r) * offset:0x0008 default:0x0000_003c */ #define SUN4I_STA_BIT_ERR (0x00 << 22) #define SUN4I_STA_FORM_ERR (0x01 << 22) #define SUN4I_STA_STUFF_ERR (0x02 << 22) #define SUN4I_STA_OTHER_ERR (0x03 << 22) #define SUN4I_STA_MASK_ERR (0x03 << 22) #define SUN4I_STA_ERR_DIR BIT(21) #define SUN4I_STA_ERR_SEG_CODE (0x1f << 16) #define SUN4I_STA_START (0x03 << 16) #define SUN4I_STA_ID28_21 (0x02 << 16) #define SUN4I_STA_ID20_18 (0x06 << 16) #define SUN4I_STA_SRTR (0x04 << 16) #define SUN4I_STA_IDE (0x05 << 16) #define SUN4I_STA_ID17_13 (0x07 << 16) #define SUN4I_STA_ID12_5 (0x0f << 16) #define SUN4I_STA_ID4_0 (0x0e << 16) #define SUN4I_STA_RTR (0x0c << 16) #define SUN4I_STA_RB1 (0x0d << 16) #define SUN4I_STA_RB0 (0x09 << 16) #define SUN4I_STA_DLEN (0x0b << 16) #define SUN4I_STA_DATA_FIELD (0x0a << 16) #define SUN4I_STA_CRC_SEQUENCE (0x08 << 16) #define SUN4I_STA_CRC_DELIMITER (0x18 << 16) #define SUN4I_STA_ACK (0x19 << 16) #define SUN4I_STA_ACK_DELIMITER (0x1b << 16) #define SUN4I_STA_END (0x1a << 16) #define SUN4I_STA_INTERMISSION (0x12 << 16) #define SUN4I_STA_ACTIVE_ERROR (0x11 << 16) #define SUN4I_STA_PASSIVE_ERROR (0x16 << 16) #define SUN4I_STA_TOLERATE_DOMINANT_BITS (0x13 << 16) #define SUN4I_STA_ERROR_DELIMITER (0x17 << 16) #define SUN4I_STA_OVERLOAD (0x1c << 16) #define SUN4I_STA_BUS_OFF BIT(7) #define SUN4I_STA_ERR_STA BIT(6) #define SUN4I_STA_TRANS_BUSY BIT(5) #define SUN4I_STA_RCV_BUSY BIT(4) #define SUN4I_STA_TRANS_OVER BIT(3) #define SUN4I_STA_TBUF_RDY BIT(2) #define SUN4I_STA_DATA_ORUN BIT(1) #define SUN4I_STA_RBUF_RDY BIT(0) /* interrupt register (r) * offset:0x000c default:0x0000_0000 */ #define SUN4I_INT_BUS_ERR BIT(7) #define SUN4I_INT_ARB_LOST BIT(6) #define SUN4I_INT_ERR_PASSIVE BIT(5) #define SUN4I_INT_WAKEUP BIT(4) #define SUN4I_INT_DATA_OR BIT(3) #define SUN4I_INT_ERR_WRN BIT(2) #define SUN4I_INT_TBUF_VLD BIT(1) #define SUN4I_INT_RBUF_VLD BIT(0) /* interrupt enable register (r/w) * offset:0x0010 default:0x0000_0000 */ #define SUN4I_INTEN_BERR BIT(7) #define SUN4I_INTEN_ARB_LOST BIT(6) #define SUN4I_INTEN_ERR_PASSIVE BIT(5) #define SUN4I_INTEN_WAKEUP BIT(4) #define SUN4I_INTEN_OR BIT(3) #define SUN4I_INTEN_ERR_WRN BIT(2) #define SUN4I_INTEN_TX BIT(1) #define SUN4I_INTEN_RX BIT(0) /* error code */ #define SUN4I_ERR_INRCV (0x1 << 5) #define SUN4I_ERR_INTRANS (0x0 << 5) /* filter mode */ #define SUN4I_FILTER_CLOSE 0 #define SUN4I_SINGLE_FLTER_MODE 1 #define SUN4I_DUAL_FILTER_MODE 2 /* message buffer flags */ #define SUN4I_MSG_EFF_FLAG BIT(7) #define SUN4I_MSG_RTR_FLAG BIT(6) /* max. number of interrupts handled in ISR */ #define SUN4I_CAN_MAX_IRQ 20 #define SUN4I_MODE_MAX_RETRIES 100 /** * struct sun4ican_quirks - Differences between SoC variants. * * @has_reset: SoC needs reset deasserted. * @acp_offset: Offset of ACPC and ACPM registers */ struct sun4ican_quirks { bool has_reset; int acp_offset; }; struct sun4ican_priv { struct can_priv can; void __iomem *base; struct clk *clk; struct reset_control *reset; spinlock_t cmdreg_lock; /* lock for concurrent cmd register writes */ int acp_offset; }; static const struct can_bittiming_const sun4ican_bittiming_const = { .name = DRV_NAME, .tseg1_min = 1, .tseg1_max = 16, .tseg2_min = 1, .tseg2_max = 8, .sjw_max = 4, .brp_min = 1, .brp_max = 64, .brp_inc = 1, }; static void sun4i_can_write_cmdreg(struct sun4ican_priv *priv, u8 val) { unsigned long flags; spin_lock_irqsave(&priv->cmdreg_lock, flags); writel(val, priv->base + SUN4I_REG_CMD_ADDR); spin_unlock_irqrestore(&priv->cmdreg_lock, flags); } static int set_normal_mode(struct net_device *dev) { struct sun4ican_priv *priv = netdev_priv(dev); int retry = SUN4I_MODE_MAX_RETRIES; u32 mod_reg_val = 0; do { mod_reg_val = readl(priv->base + SUN4I_REG_MSEL_ADDR); mod_reg_val &= ~SUN4I_MSEL_RESET_MODE; writel(mod_reg_val, priv->base + SUN4I_REG_MSEL_ADDR); } while (retry-- && (mod_reg_val & SUN4I_MSEL_RESET_MODE)); if (readl(priv->base + SUN4I_REG_MSEL_ADDR) & SUN4I_MSEL_RESET_MODE) { netdev_err(dev, "setting controller into normal mode failed!\n"); return -ETIMEDOUT; } return 0; } static int set_reset_mode(struct net_device *dev) { struct sun4ican_priv *priv = netdev_priv(dev); int retry = SUN4I_MODE_MAX_RETRIES; u32 mod_reg_val = 0; do { mod_reg_val = readl(priv->base + SUN4I_REG_MSEL_ADDR); mod_reg_val |= SUN4I_MSEL_RESET_MODE; writel(mod_reg_val, priv->base + SUN4I_REG_MSEL_ADDR); } while (retry-- && !(mod_reg_val & SUN4I_MSEL_RESET_MODE)); if (!(readl(priv->base + SUN4I_REG_MSEL_ADDR) & SUN4I_MSEL_RESET_MODE)) { netdev_err(dev, "setting controller into reset mode failed!\n"); return -ETIMEDOUT; } return 0; } /* bittiming is called in reset_mode only */ static int sun4ican_set_bittiming(struct net_device *dev) { struct sun4ican_priv *priv = netdev_priv(dev); struct can_bittiming *bt = &priv->can.bittiming; u32 cfg; cfg = ((bt->brp - 1) & 0x3FF) | (((bt->sjw - 1) & 0x3) << 14) | (((bt->prop_seg + bt->phase_seg1 - 1) & 0xf) << 16) | (((bt->phase_seg2 - 1) & 0x7) << 20); if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) cfg |= 0x800000; netdev_dbg(dev, "setting BITTIMING=0x%08x\n", cfg); writel(cfg, priv->base + SUN4I_REG_BTIME_ADDR); return 0; } static int sun4ican_get_berr_counter(const struct net_device *dev, struct can_berr_counter *bec) { struct sun4ican_priv *priv = netdev_priv(dev); u32 errors; int err; err = clk_prepare_enable(priv->clk); if (err) { netdev_err(dev, "could not enable clock\n"); return err; } errors = readl(priv->base + SUN4I_REG_ERRC_ADDR); bec->txerr = errors & 0xFF; bec->rxerr = (errors >> 16) & 0xFF; clk_disable_unprepare(priv->clk); return 0; } static int sun4i_can_start(struct net_device *dev) { struct sun4ican_priv *priv = netdev_priv(dev); int err; u32 mod_reg_val; /* we need to enter the reset mode */ err = set_reset_mode(dev); if (err) { netdev_err(dev, "could not enter reset mode\n"); return err; } /* set filters - we accept all */ writel(0x00000000, priv->base + SUN4I_REG_ACPC_ADDR + priv->acp_offset); writel(0xFFFFFFFF, priv->base + SUN4I_REG_ACPM_ADDR + priv->acp_offset); /* clear error counters and error code capture */ writel(0, priv->base + SUN4I_REG_ERRC_ADDR); /* enable interrupts */ if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) writel(0xFF, priv->base + SUN4I_REG_INTEN_ADDR); else writel(0xFF & ~SUN4I_INTEN_BERR, priv->base + SUN4I_REG_INTEN_ADDR); /* enter the selected mode */ mod_reg_val = readl(priv->base + SUN4I_REG_MSEL_ADDR); if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) mod_reg_val |= SUN4I_MSEL_LOOPBACK_MODE; else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) mod_reg_val |= SUN4I_MSEL_LISTEN_ONLY_MODE; writel(mod_reg_val, priv->base + SUN4I_REG_MSEL_ADDR); err = sun4ican_set_bittiming(dev); if (err) return err; /* we are ready to enter the normal mode */ err = set_normal_mode(dev); if (err) { netdev_err(dev, "could not enter normal mode\n"); return err; } priv->can.state = CAN_STATE_ERROR_ACTIVE; return 0; } static int sun4i_can_stop(struct net_device *dev) { struct sun4ican_priv *priv = netdev_priv(dev); int err; priv->can.state = CAN_STATE_STOPPED; /* we need to enter reset mode */ err = set_reset_mode(dev); if (err) { netdev_err(dev, "could not enter reset mode\n"); return err; } /* disable all interrupts */ writel(0, priv->base + SUN4I_REG_INTEN_ADDR); return 0; } static int sun4ican_set_mode(struct net_device *dev, enum can_mode mode) { int err; switch (mode) { case CAN_MODE_START: err = sun4i_can_start(dev); if (err) { netdev_err(dev, "starting CAN controller failed!\n"); return err; } if (netif_queue_stopped(dev)) netif_wake_queue(dev); break; default: return -EOPNOTSUPP; } return 0; } /* transmit a CAN message * message layout in the sk_buff should be like this: * xx xx xx xx ff ll 00 11 22 33 44 55 66 77 * [ can_id ] [flags] [len] [can data (up to 8 bytes] */ static netdev_tx_t sun4ican_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct sun4ican_priv *priv = netdev_priv(dev); struct can_frame *cf = (struct can_frame *)skb->data; u8 dlc; u32 dreg, msg_flag_n; canid_t id; int i; if (can_dev_dropped_skb(dev, skb)) return NETDEV_TX_OK; netif_stop_queue(dev); id = cf->can_id; dlc = cf->len; msg_flag_n = dlc; if (id & CAN_RTR_FLAG) msg_flag_n |= SUN4I_MSG_RTR_FLAG; if (id & CAN_EFF_FLAG) { msg_flag_n |= SUN4I_MSG_EFF_FLAG; dreg = SUN4I_REG_BUF5_ADDR; writel((id >> 21) & 0xFF, priv->base + SUN4I_REG_BUF1_ADDR); writel((id >> 13) & 0xFF, priv->base + SUN4I_REG_BUF2_ADDR); writel((id >> 5) & 0xFF, priv->base + SUN4I_REG_BUF3_ADDR); writel((id << 3) & 0xF8, priv->base + SUN4I_REG_BUF4_ADDR); } else { dreg = SUN4I_REG_BUF3_ADDR; writel((id >> 3) & 0xFF, priv->base + SUN4I_REG_BUF1_ADDR); writel((id << 5) & 0xE0, priv->base + SUN4I_REG_BUF2_ADDR); } for (i = 0; i < dlc; i++) writel(cf->data[i], priv->base + (dreg + i * 4)); writel(msg_flag_n, priv->base + SUN4I_REG_BUF0_ADDR); can_put_echo_skb(skb, dev, 0, 0); if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) sun4i_can_write_cmdreg(priv, SUN4I_CMD_SELF_RCV_REQ); else sun4i_can_write_cmdreg(priv, SUN4I_CMD_TRANS_REQ); return NETDEV_TX_OK; } static void sun4i_can_rx(struct net_device *dev) { struct sun4ican_priv *priv = netdev_priv(dev); struct net_device_stats *stats = &dev->stats; struct can_frame *cf; struct sk_buff *skb; u8 fi; u32 dreg; canid_t id; int i; /* create zero'ed CAN frame buffer */ skb = alloc_can_skb(dev, &cf); if (!skb) return; fi = readl(priv->base + SUN4I_REG_BUF0_ADDR); cf->len = can_cc_dlc2len(fi & 0x0F); if (fi & SUN4I_MSG_EFF_FLAG) { dreg = SUN4I_REG_BUF5_ADDR; id = (readl(priv->base + SUN4I_REG_BUF1_ADDR) << 21) | (readl(priv->base + SUN4I_REG_BUF2_ADDR) << 13) | (readl(priv->base + SUN4I_REG_BUF3_ADDR) << 5) | ((readl(priv->base + SUN4I_REG_BUF4_ADDR) >> 3) & 0x1f); id |= CAN_EFF_FLAG; } else { dreg = SUN4I_REG_BUF3_ADDR; id = (readl(priv->base + SUN4I_REG_BUF1_ADDR) << 3) | ((readl(priv->base + SUN4I_REG_BUF2_ADDR) >> 5) & 0x7); } /* remote frame ? */ if (fi & SUN4I_MSG_RTR_FLAG) { id |= CAN_RTR_FLAG; } else { for (i = 0; i < cf->len; i++) cf->data[i] = readl(priv->base + dreg + i * 4); stats->rx_bytes += cf->len; } stats->rx_packets++; cf->can_id = id; sun4i_can_write_cmdreg(priv, SUN4I_CMD_RELEASE_RBUF); netif_rx(skb); } static int sun4i_can_err(struct net_device *dev, u8 isrc, u8 status) { struct sun4ican_priv *priv = netdev_priv(dev); struct net_device_stats *stats = &dev->stats; struct can_frame *cf; struct sk_buff *skb; enum can_state state = priv->can.state; enum can_state rx_state, tx_state; unsigned int rxerr, txerr, errc; u32 ecc, alc; /* we don't skip if alloc fails because we want the stats anyhow */ skb = alloc_can_err_skb(dev, &cf); errc = readl(priv->base + SUN4I_REG_ERRC_ADDR); rxerr = (errc >> 16) & 0xFF; txerr = errc & 0xFF; if (isrc & SUN4I_INT_DATA_OR) { /* data overrun interrupt */ netdev_dbg(dev, "data overrun interrupt\n"); if (likely(skb)) { cf->can_id |= CAN_ERR_CRTL; cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; } stats->rx_over_errors++; stats->rx_errors++; /* reset the CAN IP by entering reset mode * ignoring timeout error */ set_reset_mode(dev); set_normal_mode(dev); /* clear bit */ sun4i_can_write_cmdreg(priv, SUN4I_CMD_CLEAR_OR_FLAG); } if (isrc & SUN4I_INT_ERR_WRN) { /* error warning interrupt */ netdev_dbg(dev, "error warning interrupt\n"); if (status & SUN4I_STA_BUS_OFF) state = CAN_STATE_BUS_OFF; else if (status & SUN4I_STA_ERR_STA) state = CAN_STATE_ERROR_WARNING; else state = CAN_STATE_ERROR_ACTIVE; } if (skb && state != CAN_STATE_BUS_OFF) { cf->can_id |= CAN_ERR_CNT; cf->data[6] = txerr; cf->data[7] = rxerr; } if (isrc & SUN4I_INT_BUS_ERR) { /* bus error interrupt */ netdev_dbg(dev, "bus error interrupt\n"); priv->can.can_stats.bus_error++; stats->rx_errors++; if (likely(skb)) { ecc = readl(priv->base + SUN4I_REG_STA_ADDR); cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; switch (ecc & SUN4I_STA_MASK_ERR) { case SUN4I_STA_BIT_ERR: cf->data[2] |= CAN_ERR_PROT_BIT; break; case SUN4I_STA_FORM_ERR: cf->data[2] |= CAN_ERR_PROT_FORM; break; case SUN4I_STA_STUFF_ERR: cf->data[2] |= CAN_ERR_PROT_STUFF; break; default: cf->data[3] = (ecc & SUN4I_STA_ERR_SEG_CODE) >> 16; break; } /* error occurred during transmission? */ if ((ecc & SUN4I_STA_ERR_DIR) == 0) cf->data[2] |= CAN_ERR_PROT_TX; } } if (isrc & SUN4I_INT_ERR_PASSIVE) { /* error passive interrupt */ netdev_dbg(dev, "error passive interrupt\n"); if (state == CAN_STATE_ERROR_PASSIVE) state = CAN_STATE_ERROR_WARNING; else state = CAN_STATE_ERROR_PASSIVE; } if (isrc & SUN4I_INT_ARB_LOST) { /* arbitration lost interrupt */ netdev_dbg(dev, "arbitration lost interrupt\n"); alc = readl(priv->base + SUN4I_REG_STA_ADDR); priv->can.can_stats.arbitration_lost++; if (likely(skb)) { cf->can_id |= CAN_ERR_LOSTARB; cf->data[0] = (alc >> 8) & 0x1f; } } if (state != priv->can.state) { tx_state = txerr >= rxerr ? state : 0; rx_state = txerr <= rxerr ? state : 0; if (likely(skb)) can_change_state(dev, cf, tx_state, rx_state); else priv->can.state = state; if (state == CAN_STATE_BUS_OFF) can_bus_off(dev); } if (likely(skb)) netif_rx(skb); else return -ENOMEM; return 0; } static irqreturn_t sun4i_can_interrupt(int irq, void *dev_id) { struct net_device *dev = (struct net_device *)dev_id; struct sun4ican_priv *priv = netdev_priv(dev); struct net_device_stats *stats = &dev->stats; u8 isrc, status; int n = 0; while ((isrc = readl(priv->base + SUN4I_REG_INT_ADDR)) && (n < SUN4I_CAN_MAX_IRQ)) { n++; status = readl(priv->base + SUN4I_REG_STA_ADDR); if (isrc & SUN4I_INT_WAKEUP) netdev_warn(dev, "wakeup interrupt\n"); if (isrc & SUN4I_INT_TBUF_VLD) { /* transmission complete interrupt */ stats->tx_bytes += can_get_echo_skb(dev, 0, NULL); stats->tx_packets++; netif_wake_queue(dev); } if ((isrc & SUN4I_INT_RBUF_VLD) && !(isrc & SUN4I_INT_DATA_OR)) { /* receive interrupt - don't read if overrun occurred */ while (status & SUN4I_STA_RBUF_RDY) { /* RX buffer is not empty */ sun4i_can_rx(dev); status = readl(priv->base + SUN4I_REG_STA_ADDR); } } if (isrc & (SUN4I_INT_DATA_OR | SUN4I_INT_ERR_WRN | SUN4I_INT_BUS_ERR | SUN4I_INT_ERR_PASSIVE | SUN4I_INT_ARB_LOST)) { /* error interrupt */ if (sun4i_can_err(dev, isrc, status)) netdev_err(dev, "can't allocate buffer - clearing pending interrupts\n"); } /* clear interrupts */ writel(isrc, priv->base + SUN4I_REG_INT_ADDR); readl(priv->base + SUN4I_REG_INT_ADDR); } if (n >= SUN4I_CAN_MAX_IRQ) netdev_dbg(dev, "%d messages handled in ISR", n); return (n) ? IRQ_HANDLED : IRQ_NONE; } static int sun4ican_open(struct net_device *dev) { struct sun4ican_priv *priv = netdev_priv(dev); int err; /* common open */ err = open_candev(dev); if (err) return err; /* register interrupt handler */ err = request_irq(dev->irq, sun4i_can_interrupt, 0, dev->name, dev); if (err) { netdev_err(dev, "request_irq err: %d\n", err); goto exit_irq; } /* software reset deassert */ err = reset_control_deassert(priv->reset); if (err) { netdev_err(dev, "could not deassert CAN reset\n"); goto exit_soft_reset; } /* turn on clocking for CAN peripheral block */ err = clk_prepare_enable(priv->clk); if (err) { netdev_err(dev, "could not enable CAN peripheral clock\n"); goto exit_clock; } err = sun4i_can_start(dev); if (err) { netdev_err(dev, "could not start CAN peripheral\n"); goto exit_can_start; } netif_start_queue(dev); return 0; exit_can_start: clk_disable_unprepare(priv->clk); exit_clock: reset_control_assert(priv->reset); exit_soft_reset: free_irq(dev->irq, dev); exit_irq: close_candev(dev); return err; } static int sun4ican_close(struct net_device *dev) { struct sun4ican_priv *priv = netdev_priv(dev); netif_stop_queue(dev); sun4i_can_stop(dev); clk_disable_unprepare(priv->clk); reset_control_assert(priv->reset); free_irq(dev->irq, dev); close_candev(dev); return 0; } static const struct net_device_ops sun4ican_netdev_ops = { .ndo_open = sun4ican_open, .ndo_stop = sun4ican_close, .ndo_start_xmit = sun4ican_start_xmit, }; static const struct ethtool_ops sun4ican_ethtool_ops = { .get_ts_info = ethtool_op_get_ts_info, }; static const struct sun4ican_quirks sun4ican_quirks_a10 = { .has_reset = false, .acp_offset = 0, }; static const struct sun4ican_quirks sun4ican_quirks_r40 = { .has_reset = true, .acp_offset = 0, }; static const struct sun4ican_quirks sun4ican_quirks_d1 = { .has_reset = true, .acp_offset = (SUN4I_REG_ACPC_ADDR_D1 - SUN4I_REG_ACPC_ADDR), }; static const struct of_device_id sun4ican_of_match[] = { { .compatible = "allwinner,sun4i-a10-can", .data = &sun4ican_quirks_a10 }, { .compatible = "allwinner,sun7i-a20-can", .data = &sun4ican_quirks_a10 }, { .compatible = "allwinner,sun8i-r40-can", .data = &sun4ican_quirks_r40 }, { .compatible = "allwinner,sun20i-d1-can", .data = &sun4ican_quirks_d1 }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, sun4ican_of_match); static void sun4ican_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); unregister_netdev(dev); free_candev(dev); } static int sun4ican_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct clk *clk; struct reset_control *reset = NULL; void __iomem *addr; int err, irq; struct net_device *dev; struct sun4ican_priv *priv; const struct sun4ican_quirks *quirks; quirks = of_device_get_match_data(&pdev->dev); if (!quirks) { dev_err(&pdev->dev, "failed to determine the quirks to use\n"); err = -ENODEV; goto exit; } if (quirks->has_reset) { reset = devm_reset_control_get_exclusive(&pdev->dev, NULL); if (IS_ERR(reset)) { dev_err(&pdev->dev, "unable to request reset\n"); err = PTR_ERR(reset); goto exit; } } clk = of_clk_get(np, 0); if (IS_ERR(clk)) { dev_err(&pdev->dev, "unable to request clock\n"); err = -ENODEV; goto exit; } irq = platform_get_irq(pdev, 0); if (irq < 0) { err = -ENODEV; goto exit; } addr = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(addr)) { err = PTR_ERR(addr); goto exit; } dev = alloc_candev(sizeof(struct sun4ican_priv), 1); if (!dev) { dev_err(&pdev->dev, "could not allocate memory for CAN device\n"); err = -ENOMEM; goto exit; } dev->netdev_ops = &sun4ican_netdev_ops; dev->ethtool_ops = &sun4ican_ethtool_ops; dev->irq = irq; dev->flags |= IFF_ECHO; priv = netdev_priv(dev); priv->can.clock.freq = clk_get_rate(clk); priv->can.bittiming_const = &sun4ican_bittiming_const; priv->can.do_set_mode = sun4ican_set_mode; priv->can.do_get_berr_counter = sun4ican_get_berr_counter; priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING | CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_3_SAMPLES; priv->base = addr; priv->clk = clk; priv->reset = reset; priv->acp_offset = quirks->acp_offset; spin_lock_init(&priv->cmdreg_lock); platform_set_drvdata(pdev, dev); SET_NETDEV_DEV(dev, &pdev->dev); err = register_candev(dev); if (err) { dev_err(&pdev->dev, "registering %s failed (err=%d)\n", DRV_NAME, err); goto exit_free; } dev_info(&pdev->dev, "device registered (base=%p, irq=%d)\n", priv->base, dev->irq); return 0; exit_free: free_candev(dev); exit: return err; } static struct platform_driver sun4i_can_driver = { .driver = { .name = DRV_NAME, .of_match_table = sun4ican_of_match, }, .probe = sun4ican_probe, .remove_new = sun4ican_remove, }; module_platform_driver(sun4i_can_driver); MODULE_AUTHOR("Peter Chen <[email protected]>"); MODULE_AUTHOR("Gerhard Bertelsmann <[email protected]>"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_DESCRIPTION("CAN driver for Allwinner SoCs (A10/A20/D1)");
linux-master
drivers/net/can/sun4i_can.c
// SPDX-License-Identifier: GPL-2.0-only /* * TI HECC (CAN) device driver * * This driver supports TI's HECC (High End CAN Controller module) and the * specs for the same is available at <http://www.ti.com> * * Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/ * Copyright (C) 2019 Jeroen Hofstee <[email protected]> */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/interrupt.h> #include <linux/errno.h> #include <linux/ethtool.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/of.h> #include <linux/regulator/consumer.h> #include <linux/can/dev.h> #include <linux/can/error.h> #include <linux/can/rx-offload.h> #define DRV_NAME "ti_hecc" #define HECC_MODULE_VERSION "0.7" MODULE_VERSION(HECC_MODULE_VERSION); #define DRV_DESC "TI High End CAN Controller Driver " HECC_MODULE_VERSION /* TX / RX Mailbox Configuration */ #define HECC_MAX_MAILBOXES 32 /* hardware mailboxes - do not change */ #define MAX_TX_PRIO 0x3F /* hardware value - do not change */ /* Important Note: TX mailbox configuration * TX mailboxes should be restricted to the number of SKB buffers to avoid * maintaining SKB buffers separately. TX mailboxes should be a power of 2 * for the mailbox logic to work. Top mailbox numbers are reserved for RX * and lower mailboxes for TX. * * HECC_MAX_TX_MBOX HECC_MB_TX_SHIFT * 4 (default) 2 * 8 3 * 16 4 */ #define HECC_MB_TX_SHIFT 2 /* as per table above */ #define HECC_MAX_TX_MBOX BIT(HECC_MB_TX_SHIFT) #define HECC_TX_PRIO_SHIFT (HECC_MB_TX_SHIFT) #define HECC_TX_PRIO_MASK (MAX_TX_PRIO << HECC_MB_TX_SHIFT) #define HECC_TX_MB_MASK (HECC_MAX_TX_MBOX - 1) #define HECC_TX_MASK ((HECC_MAX_TX_MBOX - 1) | HECC_TX_PRIO_MASK) /* RX mailbox configuration * * The remaining mailboxes are used for reception and are delivered * based on their timestamp, to avoid a hardware race when CANME is * changed while CAN-bus traffic is being received. */ #define HECC_MAX_RX_MBOX (HECC_MAX_MAILBOXES - HECC_MAX_TX_MBOX) #define HECC_RX_FIRST_MBOX (HECC_MAX_MAILBOXES - 1) #define HECC_RX_LAST_MBOX (HECC_MAX_TX_MBOX) /* TI HECC module registers */ #define HECC_CANME 0x0 /* Mailbox enable */ #define HECC_CANMD 0x4 /* Mailbox direction */ #define HECC_CANTRS 0x8 /* Transmit request set */ #define HECC_CANTRR 0xC /* Transmit request */ #define HECC_CANTA 0x10 /* Transmission acknowledge */ #define HECC_CANAA 0x14 /* Abort acknowledge */ #define HECC_CANRMP 0x18 /* Receive message pending */ #define HECC_CANRML 0x1C /* Receive message lost */ #define HECC_CANRFP 0x20 /* Remote frame pending */ #define HECC_CANGAM 0x24 /* SECC only:Global acceptance mask */ #define HECC_CANMC 0x28 /* Master control */ #define HECC_CANBTC 0x2C /* Bit timing configuration */ #define HECC_CANES 0x30 /* Error and status */ #define HECC_CANTEC 0x34 /* Transmit error counter */ #define HECC_CANREC 0x38 /* Receive error counter */ #define HECC_CANGIF0 0x3C /* Global interrupt flag 0 */ #define HECC_CANGIM 0x40 /* Global interrupt mask */ #define HECC_CANGIF1 0x44 /* Global interrupt flag 1 */ #define HECC_CANMIM 0x48 /* Mailbox interrupt mask */ #define HECC_CANMIL 0x4C /* Mailbox interrupt level */ #define HECC_CANOPC 0x50 /* Overwrite protection control */ #define HECC_CANTIOC 0x54 /* Transmit I/O control */ #define HECC_CANRIOC 0x58 /* Receive I/O control */ #define HECC_CANLNT 0x5C /* HECC only: Local network time */ #define HECC_CANTOC 0x60 /* HECC only: Time-out control */ #define HECC_CANTOS 0x64 /* HECC only: Time-out status */ #define HECC_CANTIOCE 0x68 /* SCC only:Enhanced TX I/O control */ #define HECC_CANRIOCE 0x6C /* SCC only:Enhanced RX I/O control */ /* TI HECC RAM registers */ #define HECC_CANMOTS 0x80 /* Message object time stamp */ /* Mailbox registers */ #define HECC_CANMID 0x0 #define HECC_CANMCF 0x4 #define HECC_CANMDL 0x8 #define HECC_CANMDH 0xC #define HECC_SET_REG 0xFFFFFFFF #define HECC_CANID_MASK 0x3FF /* 18 bits mask for extended id's */ #define HECC_CCE_WAIT_COUNT 100 /* Wait for ~1 sec for CCE bit */ #define HECC_CANMC_SCM BIT(13) /* SCC compat mode */ #define HECC_CANMC_CCR BIT(12) /* Change config request */ #define HECC_CANMC_PDR BIT(11) /* Local Power down - for sleep mode */ #define HECC_CANMC_ABO BIT(7) /* Auto Bus On */ #define HECC_CANMC_STM BIT(6) /* Self test mode - loopback */ #define HECC_CANMC_SRES BIT(5) /* Software reset */ #define HECC_CANTIOC_EN BIT(3) /* Enable CAN TX I/O pin */ #define HECC_CANRIOC_EN BIT(3) /* Enable CAN RX I/O pin */ #define HECC_CANMID_IDE BIT(31) /* Extended frame format */ #define HECC_CANMID_AME BIT(30) /* Acceptance mask enable */ #define HECC_CANMID_AAM BIT(29) /* Auto answer mode */ #define HECC_CANES_FE BIT(24) /* form error */ #define HECC_CANES_BE BIT(23) /* bit error */ #define HECC_CANES_SA1 BIT(22) /* stuck at dominant error */ #define HECC_CANES_CRCE BIT(21) /* CRC error */ #define HECC_CANES_SE BIT(20) /* stuff bit error */ #define HECC_CANES_ACKE BIT(19) /* ack error */ #define HECC_CANES_BO BIT(18) /* Bus off status */ #define HECC_CANES_EP BIT(17) /* Error passive status */ #define HECC_CANES_EW BIT(16) /* Error warning status */ #define HECC_CANES_SMA BIT(5) /* suspend mode ack */ #define HECC_CANES_CCE BIT(4) /* Change config enabled */ #define HECC_CANES_PDA BIT(3) /* Power down mode ack */ #define HECC_CANBTC_SAM BIT(7) /* sample points */ #define HECC_BUS_ERROR (HECC_CANES_FE | HECC_CANES_BE |\ HECC_CANES_CRCE | HECC_CANES_SE |\ HECC_CANES_ACKE) #define HECC_CANES_FLAGS (HECC_BUS_ERROR | HECC_CANES_BO |\ HECC_CANES_EP | HECC_CANES_EW) #define HECC_CANMCF_RTR BIT(4) /* Remote transmit request */ #define HECC_CANGIF_MAIF BIT(17) /* Message alarm interrupt */ #define HECC_CANGIF_TCOIF BIT(16) /* Timer counter overflow int */ #define HECC_CANGIF_GMIF BIT(15) /* Global mailbox interrupt */ #define HECC_CANGIF_AAIF BIT(14) /* Abort ack interrupt */ #define HECC_CANGIF_WDIF BIT(13) /* Write denied interrupt */ #define HECC_CANGIF_WUIF BIT(12) /* Wake up interrupt */ #define HECC_CANGIF_RMLIF BIT(11) /* Receive message lost interrupt */ #define HECC_CANGIF_BOIF BIT(10) /* Bus off interrupt */ #define HECC_CANGIF_EPIF BIT(9) /* Error passive interrupt */ #define HECC_CANGIF_WLIF BIT(8) /* Warning level interrupt */ #define HECC_CANGIF_MBOX_MASK 0x1F /* Mailbox number mask */ #define HECC_CANGIM_I1EN BIT(1) /* Int line 1 enable */ #define HECC_CANGIM_I0EN BIT(0) /* Int line 0 enable */ #define HECC_CANGIM_DEF_MASK 0x700 /* only busoff/warning/passive */ #define HECC_CANGIM_SIL BIT(2) /* system interrupts to int line 1 */ /* CAN Bittiming constants as per HECC specs */ static const struct can_bittiming_const ti_hecc_bittiming_const = { .name = DRV_NAME, .tseg1_min = 1, .tseg1_max = 16, .tseg2_min = 1, .tseg2_max = 8, .sjw_max = 4, .brp_min = 1, .brp_max = 256, .brp_inc = 1, }; struct ti_hecc_priv { struct can_priv can; /* MUST be first member/field */ struct can_rx_offload offload; struct net_device *ndev; struct clk *clk; void __iomem *base; void __iomem *hecc_ram; void __iomem *mbx; bool use_hecc1int; spinlock_t mbx_lock; /* CANME register needs protection */ u32 tx_head; u32 tx_tail; struct regulator *reg_xceiver; }; static inline int get_tx_head_mb(struct ti_hecc_priv *priv) { return priv->tx_head & HECC_TX_MB_MASK; } static inline int get_tx_tail_mb(struct ti_hecc_priv *priv) { return priv->tx_tail & HECC_TX_MB_MASK; } static inline int get_tx_head_prio(struct ti_hecc_priv *priv) { return (priv->tx_head >> HECC_TX_PRIO_SHIFT) & MAX_TX_PRIO; } static inline void hecc_write_lam(struct ti_hecc_priv *priv, u32 mbxno, u32 val) { __raw_writel(val, priv->hecc_ram + mbxno * 4); } static inline u32 hecc_read_stamp(struct ti_hecc_priv *priv, u32 mbxno) { return __raw_readl(priv->hecc_ram + HECC_CANMOTS + mbxno * 4); } static inline void hecc_write_mbx(struct ti_hecc_priv *priv, u32 mbxno, u32 reg, u32 val) { __raw_writel(val, priv->mbx + mbxno * 0x10 + reg); } static inline u32 hecc_read_mbx(struct ti_hecc_priv *priv, u32 mbxno, u32 reg) { return __raw_readl(priv->mbx + mbxno * 0x10 + reg); } static inline void hecc_write(struct ti_hecc_priv *priv, u32 reg, u32 val) { __raw_writel(val, priv->base + reg); } static inline u32 hecc_read(struct ti_hecc_priv *priv, int reg) { return __raw_readl(priv->base + reg); } static inline void hecc_set_bit(struct ti_hecc_priv *priv, int reg, u32 bit_mask) { hecc_write(priv, reg, hecc_read(priv, reg) | bit_mask); } static inline void hecc_clear_bit(struct ti_hecc_priv *priv, int reg, u32 bit_mask) { hecc_write(priv, reg, hecc_read(priv, reg) & ~bit_mask); } static inline u32 hecc_get_bit(struct ti_hecc_priv *priv, int reg, u32 bit_mask) { return (hecc_read(priv, reg) & bit_mask) ? 1 : 0; } static int ti_hecc_set_btc(struct ti_hecc_priv *priv) { struct can_bittiming *bit_timing = &priv->can.bittiming; u32 can_btc; can_btc = (bit_timing->phase_seg2 - 1) & 0x7; can_btc |= ((bit_timing->phase_seg1 + bit_timing->prop_seg - 1) & 0xF) << 3; if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) { if (bit_timing->brp > 4) can_btc |= HECC_CANBTC_SAM; else netdev_warn(priv->ndev, "WARN: Triple sampling not set due to h/w limitations"); } can_btc |= ((bit_timing->sjw - 1) & 0x3) << 8; can_btc |= ((bit_timing->brp - 1) & 0xFF) << 16; /* ERM being set to 0 by default meaning resync at falling edge */ hecc_write(priv, HECC_CANBTC, can_btc); netdev_info(priv->ndev, "setting CANBTC=%#x\n", can_btc); return 0; } static int ti_hecc_transceiver_switch(const struct ti_hecc_priv *priv, int on) { if (!priv->reg_xceiver) return 0; if (on) return regulator_enable(priv->reg_xceiver); else return regulator_disable(priv->reg_xceiver); } static void ti_hecc_reset(struct net_device *ndev) { u32 cnt; struct ti_hecc_priv *priv = netdev_priv(ndev); netdev_dbg(ndev, "resetting hecc ...\n"); hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_SRES); /* Set change control request and wait till enabled */ hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_CCR); /* INFO: It has been observed that at times CCE bit may not be * set and hw seems to be ok even if this bit is not set so * timing out with a timing of 1ms to respect the specs */ cnt = HECC_CCE_WAIT_COUNT; while (!hecc_get_bit(priv, HECC_CANES, HECC_CANES_CCE) && cnt != 0) { --cnt; udelay(10); } /* Note: On HECC, BTC can be programmed only in initialization mode, so * it is expected that the can bittiming parameters are set via ip * utility before the device is opened */ ti_hecc_set_btc(priv); /* Clear CCR (and CANMC register) and wait for CCE = 0 enable */ hecc_write(priv, HECC_CANMC, 0); /* INFO: CAN net stack handles bus off and hence disabling auto-bus-on * hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_ABO); */ /* INFO: It has been observed that at times CCE bit may not be * set and hw seems to be ok even if this bit is not set so */ cnt = HECC_CCE_WAIT_COUNT; while (hecc_get_bit(priv, HECC_CANES, HECC_CANES_CCE) && cnt != 0) { --cnt; udelay(10); } /* Enable TX and RX I/O Control pins */ hecc_write(priv, HECC_CANTIOC, HECC_CANTIOC_EN); hecc_write(priv, HECC_CANRIOC, HECC_CANRIOC_EN); /* Clear registers for clean operation */ hecc_write(priv, HECC_CANTA, HECC_SET_REG); hecc_write(priv, HECC_CANRMP, HECC_SET_REG); hecc_write(priv, HECC_CANGIF0, HECC_SET_REG); hecc_write(priv, HECC_CANGIF1, HECC_SET_REG); hecc_write(priv, HECC_CANME, 0); hecc_write(priv, HECC_CANMD, 0); /* SCC compat mode NOT supported (and not needed too) */ hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_SCM); } static void ti_hecc_start(struct net_device *ndev) { struct ti_hecc_priv *priv = netdev_priv(ndev); u32 cnt, mbxno, mbx_mask; /* put HECC in initialization mode and set btc */ ti_hecc_reset(ndev); priv->tx_head = HECC_TX_MASK; priv->tx_tail = HECC_TX_MASK; /* Enable local and global acceptance mask registers */ hecc_write(priv, HECC_CANGAM, HECC_SET_REG); /* Prepare configured mailboxes to receive messages */ for (cnt = 0; cnt < HECC_MAX_RX_MBOX; cnt++) { mbxno = HECC_MAX_MAILBOXES - 1 - cnt; mbx_mask = BIT(mbxno); hecc_clear_bit(priv, HECC_CANME, mbx_mask); hecc_write_mbx(priv, mbxno, HECC_CANMID, HECC_CANMID_AME); hecc_write_lam(priv, mbxno, HECC_SET_REG); hecc_set_bit(priv, HECC_CANMD, mbx_mask); hecc_set_bit(priv, HECC_CANME, mbx_mask); hecc_set_bit(priv, HECC_CANMIM, mbx_mask); } /* Enable tx interrupts */ hecc_set_bit(priv, HECC_CANMIM, BIT(HECC_MAX_TX_MBOX) - 1); /* Prevent message over-write to create a rx fifo, but not for * the lowest priority mailbox, since that allows detecting * overflows instead of the hardware silently dropping the * messages. */ mbx_mask = ~BIT(HECC_RX_LAST_MBOX); hecc_write(priv, HECC_CANOPC, mbx_mask); /* Enable interrupts */ if (priv->use_hecc1int) { hecc_write(priv, HECC_CANMIL, HECC_SET_REG); hecc_write(priv, HECC_CANGIM, HECC_CANGIM_DEF_MASK | HECC_CANGIM_I1EN | HECC_CANGIM_SIL); } else { hecc_write(priv, HECC_CANMIL, 0); hecc_write(priv, HECC_CANGIM, HECC_CANGIM_DEF_MASK | HECC_CANGIM_I0EN); } priv->can.state = CAN_STATE_ERROR_ACTIVE; } static void ti_hecc_stop(struct net_device *ndev) { struct ti_hecc_priv *priv = netdev_priv(ndev); /* Disable the CPK; stop sending, erroring and acking */ hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_CCR); /* Disable interrupts and disable mailboxes */ hecc_write(priv, HECC_CANGIM, 0); hecc_write(priv, HECC_CANMIM, 0); hecc_write(priv, HECC_CANME, 0); priv->can.state = CAN_STATE_STOPPED; } static int ti_hecc_do_set_mode(struct net_device *ndev, enum can_mode mode) { int ret = 0; switch (mode) { case CAN_MODE_START: ti_hecc_start(ndev); netif_wake_queue(ndev); break; default: ret = -EOPNOTSUPP; break; } return ret; } static int ti_hecc_get_berr_counter(const struct net_device *ndev, struct can_berr_counter *bec) { struct ti_hecc_priv *priv = netdev_priv(ndev); bec->txerr = hecc_read(priv, HECC_CANTEC); bec->rxerr = hecc_read(priv, HECC_CANREC); return 0; } /* ti_hecc_xmit: HECC Transmit * * The transmit mailboxes start from 0 to HECC_MAX_TX_MBOX. In HECC the * priority of the mailbox for transmission is dependent upon priority setting * field in mailbox registers. The mailbox with highest value in priority field * is transmitted first. Only when two mailboxes have the same value in * priority field the highest numbered mailbox is transmitted first. * * To utilize the HECC priority feature as described above we start with the * highest numbered mailbox with highest priority level and move on to the next * mailbox with the same priority level and so on. Once we loop through all the * transmit mailboxes we choose the next priority level (lower) and so on * until we reach the lowest priority level on the lowest numbered mailbox * when we stop transmission until all mailboxes are transmitted and then * restart at highest numbered mailbox with highest priority. * * Two counters (head and tail) are used to track the next mailbox to transmit * and to track the echo buffer for already transmitted mailbox. The queue * is stopped when all the mailboxes are busy or when there is a priority * value roll-over happens. */ static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev) { struct ti_hecc_priv *priv = netdev_priv(ndev); struct can_frame *cf = (struct can_frame *)skb->data; u32 mbxno, mbx_mask, data; unsigned long flags; if (can_dev_dropped_skb(ndev, skb)) return NETDEV_TX_OK; mbxno = get_tx_head_mb(priv); mbx_mask = BIT(mbxno); spin_lock_irqsave(&priv->mbx_lock, flags); if (unlikely(hecc_read(priv, HECC_CANME) & mbx_mask)) { spin_unlock_irqrestore(&priv->mbx_lock, flags); netif_stop_queue(ndev); netdev_err(priv->ndev, "BUG: TX mbx not ready tx_head=%08X, tx_tail=%08X\n", priv->tx_head, priv->tx_tail); return NETDEV_TX_BUSY; } spin_unlock_irqrestore(&priv->mbx_lock, flags); /* Prepare mailbox for transmission */ data = cf->len | (get_tx_head_prio(priv) << 8); if (cf->can_id & CAN_RTR_FLAG) /* Remote transmission request */ data |= HECC_CANMCF_RTR; hecc_write_mbx(priv, mbxno, HECC_CANMCF, data); if (cf->can_id & CAN_EFF_FLAG) /* Extended frame format */ data = (cf->can_id & CAN_EFF_MASK) | HECC_CANMID_IDE; else /* Standard frame format */ data = (cf->can_id & CAN_SFF_MASK) << 18; hecc_write_mbx(priv, mbxno, HECC_CANMID, data); hecc_write_mbx(priv, mbxno, HECC_CANMDL, be32_to_cpu(*(__be32 *)(cf->data))); if (cf->len > 4) hecc_write_mbx(priv, mbxno, HECC_CANMDH, be32_to_cpu(*(__be32 *)(cf->data + 4))); else *(u32 *)(cf->data + 4) = 0; can_put_echo_skb(skb, ndev, mbxno, 0); spin_lock_irqsave(&priv->mbx_lock, flags); --priv->tx_head; if ((hecc_read(priv, HECC_CANME) & BIT(get_tx_head_mb(priv))) || (priv->tx_head & HECC_TX_MASK) == HECC_TX_MASK) { netif_stop_queue(ndev); } hecc_set_bit(priv, HECC_CANME, mbx_mask); spin_unlock_irqrestore(&priv->mbx_lock, flags); hecc_write(priv, HECC_CANTRS, mbx_mask); return NETDEV_TX_OK; } static inline struct ti_hecc_priv *rx_offload_to_priv(struct can_rx_offload *offload) { return container_of(offload, struct ti_hecc_priv, offload); } static struct sk_buff *ti_hecc_mailbox_read(struct can_rx_offload *offload, unsigned int mbxno, u32 *timestamp, bool drop) { struct ti_hecc_priv *priv = rx_offload_to_priv(offload); struct sk_buff *skb; struct can_frame *cf; u32 data, mbx_mask; mbx_mask = BIT(mbxno); if (unlikely(drop)) { skb = ERR_PTR(-ENOBUFS); goto mark_as_read; } skb = alloc_can_skb(offload->dev, &cf); if (unlikely(!skb)) { skb = ERR_PTR(-ENOMEM); goto mark_as_read; } data = hecc_read_mbx(priv, mbxno, HECC_CANMID); if (data & HECC_CANMID_IDE) cf->can_id = (data & CAN_EFF_MASK) | CAN_EFF_FLAG; else cf->can_id = (data >> 18) & CAN_SFF_MASK; data = hecc_read_mbx(priv, mbxno, HECC_CANMCF); if (data & HECC_CANMCF_RTR) cf->can_id |= CAN_RTR_FLAG; cf->len = can_cc_dlc2len(data & 0xF); data = hecc_read_mbx(priv, mbxno, HECC_CANMDL); *(__be32 *)(cf->data) = cpu_to_be32(data); if (cf->len > 4) { data = hecc_read_mbx(priv, mbxno, HECC_CANMDH); *(__be32 *)(cf->data + 4) = cpu_to_be32(data); } *timestamp = hecc_read_stamp(priv, mbxno); /* Check for FIFO overrun. * * All but the last RX mailbox have activated overwrite * protection. So skip check for overrun, if we're not * handling the last RX mailbox. * * As the overwrite protection for the last RX mailbox is * disabled, the CAN core might update while we're reading * it. This means the skb might be inconsistent. * * Return an error to let rx-offload discard this CAN frame. */ if (unlikely(mbxno == HECC_RX_LAST_MBOX && hecc_read(priv, HECC_CANRML) & mbx_mask)) skb = ERR_PTR(-ENOBUFS); mark_as_read: hecc_write(priv, HECC_CANRMP, mbx_mask); return skb; } static int ti_hecc_error(struct net_device *ndev, int int_status, int err_status) { struct ti_hecc_priv *priv = netdev_priv(ndev); struct can_frame *cf; struct sk_buff *skb; u32 timestamp; int err; if (err_status & HECC_BUS_ERROR) { /* propagate the error condition to the can stack */ skb = alloc_can_err_skb(ndev, &cf); if (!skb) { if (net_ratelimit()) netdev_err(priv->ndev, "%s: alloc_can_err_skb() failed\n", __func__); return -ENOMEM; } ++priv->can.can_stats.bus_error; cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT; if (err_status & HECC_CANES_FE) cf->data[2] |= CAN_ERR_PROT_FORM; if (err_status & HECC_CANES_BE) cf->data[2] |= CAN_ERR_PROT_BIT; if (err_status & HECC_CANES_SE) cf->data[2] |= CAN_ERR_PROT_STUFF; if (err_status & HECC_CANES_CRCE) cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; if (err_status & HECC_CANES_ACKE) cf->data[3] = CAN_ERR_PROT_LOC_ACK; timestamp = hecc_read(priv, HECC_CANLNT); err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp); if (err) ndev->stats.rx_fifo_errors++; } hecc_write(priv, HECC_CANES, HECC_CANES_FLAGS); return 0; } static void ti_hecc_change_state(struct net_device *ndev, enum can_state rx_state, enum can_state tx_state) { struct ti_hecc_priv *priv = netdev_priv(ndev); struct can_frame *cf; struct sk_buff *skb; u32 timestamp; int err; skb = alloc_can_err_skb(priv->ndev, &cf); if (unlikely(!skb)) { priv->can.state = max(tx_state, rx_state); return; } can_change_state(priv->ndev, cf, tx_state, rx_state); if (max(tx_state, rx_state) != CAN_STATE_BUS_OFF) { cf->can_id |= CAN_ERR_CNT; cf->data[6] = hecc_read(priv, HECC_CANTEC); cf->data[7] = hecc_read(priv, HECC_CANREC); } timestamp = hecc_read(priv, HECC_CANLNT); err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp); if (err) ndev->stats.rx_fifo_errors++; } static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id) { struct net_device *ndev = (struct net_device *)dev_id; struct ti_hecc_priv *priv = netdev_priv(ndev); struct net_device_stats *stats = &ndev->stats; u32 mbxno, mbx_mask, int_status, err_status, stamp; unsigned long flags, rx_pending; u32 handled = 0; int_status = hecc_read(priv, priv->use_hecc1int ? HECC_CANGIF1 : HECC_CANGIF0); if (!int_status) return IRQ_NONE; err_status = hecc_read(priv, HECC_CANES); if (unlikely(err_status & HECC_CANES_FLAGS)) ti_hecc_error(ndev, int_status, err_status); if (unlikely(int_status & HECC_CANGIM_DEF_MASK)) { enum can_state rx_state, tx_state; u32 rec = hecc_read(priv, HECC_CANREC); u32 tec = hecc_read(priv, HECC_CANTEC); if (int_status & HECC_CANGIF_WLIF) { handled |= HECC_CANGIF_WLIF; rx_state = rec >= tec ? CAN_STATE_ERROR_WARNING : 0; tx_state = rec <= tec ? CAN_STATE_ERROR_WARNING : 0; netdev_dbg(priv->ndev, "Error Warning interrupt\n"); ti_hecc_change_state(ndev, rx_state, tx_state); } if (int_status & HECC_CANGIF_EPIF) { handled |= HECC_CANGIF_EPIF; rx_state = rec >= tec ? CAN_STATE_ERROR_PASSIVE : 0; tx_state = rec <= tec ? CAN_STATE_ERROR_PASSIVE : 0; netdev_dbg(priv->ndev, "Error passive interrupt\n"); ti_hecc_change_state(ndev, rx_state, tx_state); } if (int_status & HECC_CANGIF_BOIF) { handled |= HECC_CANGIF_BOIF; rx_state = CAN_STATE_BUS_OFF; tx_state = CAN_STATE_BUS_OFF; netdev_dbg(priv->ndev, "Bus off interrupt\n"); /* Disable all interrupts */ hecc_write(priv, HECC_CANGIM, 0); can_bus_off(ndev); ti_hecc_change_state(ndev, rx_state, tx_state); } } else if (unlikely(priv->can.state != CAN_STATE_ERROR_ACTIVE)) { enum can_state new_state, tx_state, rx_state; u32 rec = hecc_read(priv, HECC_CANREC); u32 tec = hecc_read(priv, HECC_CANTEC); if (rec >= 128 || tec >= 128) new_state = CAN_STATE_ERROR_PASSIVE; else if (rec >= 96 || tec >= 96) new_state = CAN_STATE_ERROR_WARNING; else new_state = CAN_STATE_ERROR_ACTIVE; if (new_state < priv->can.state) { rx_state = rec >= tec ? new_state : 0; tx_state = rec <= tec ? new_state : 0; ti_hecc_change_state(ndev, rx_state, tx_state); } } if (int_status & HECC_CANGIF_GMIF) { while (priv->tx_tail - priv->tx_head > 0) { mbxno = get_tx_tail_mb(priv); mbx_mask = BIT(mbxno); if (!(mbx_mask & hecc_read(priv, HECC_CANTA))) break; hecc_write(priv, HECC_CANTA, mbx_mask); spin_lock_irqsave(&priv->mbx_lock, flags); hecc_clear_bit(priv, HECC_CANME, mbx_mask); spin_unlock_irqrestore(&priv->mbx_lock, flags); stamp = hecc_read_stamp(priv, mbxno); stats->tx_bytes += can_rx_offload_get_echo_skb_queue_timestamp(&priv->offload, mbxno, stamp, NULL); stats->tx_packets++; --priv->tx_tail; } /* restart queue if wrap-up or if queue stalled on last pkt */ if ((priv->tx_head == priv->tx_tail && ((priv->tx_head & HECC_TX_MASK) != HECC_TX_MASK)) || (((priv->tx_tail & HECC_TX_MASK) == HECC_TX_MASK) && ((priv->tx_head & HECC_TX_MASK) == HECC_TX_MASK))) netif_wake_queue(ndev); /* offload RX mailboxes and let NAPI deliver them */ while ((rx_pending = hecc_read(priv, HECC_CANRMP))) { can_rx_offload_irq_offload_timestamp(&priv->offload, rx_pending); } } /* clear all interrupt conditions - read back to avoid spurious ints */ if (priv->use_hecc1int) { hecc_write(priv, HECC_CANGIF1, handled); int_status = hecc_read(priv, HECC_CANGIF1); } else { hecc_write(priv, HECC_CANGIF0, handled); int_status = hecc_read(priv, HECC_CANGIF0); } can_rx_offload_irq_finish(&priv->offload); return IRQ_HANDLED; } static int ti_hecc_open(struct net_device *ndev) { struct ti_hecc_priv *priv = netdev_priv(ndev); int err; err = request_irq(ndev->irq, ti_hecc_interrupt, IRQF_SHARED, ndev->name, ndev); if (err) { netdev_err(ndev, "error requesting interrupt\n"); return err; } ti_hecc_transceiver_switch(priv, 1); /* Open common can device */ err = open_candev(ndev); if (err) { netdev_err(ndev, "open_candev() failed %d\n", err); ti_hecc_transceiver_switch(priv, 0); free_irq(ndev->irq, ndev); return err; } ti_hecc_start(ndev); can_rx_offload_enable(&priv->offload); netif_start_queue(ndev); return 0; } static int ti_hecc_close(struct net_device *ndev) { struct ti_hecc_priv *priv = netdev_priv(ndev); netif_stop_queue(ndev); can_rx_offload_disable(&priv->offload); ti_hecc_stop(ndev); free_irq(ndev->irq, ndev); close_candev(ndev); ti_hecc_transceiver_switch(priv, 0); return 0; } static const struct net_device_ops ti_hecc_netdev_ops = { .ndo_open = ti_hecc_open, .ndo_stop = ti_hecc_close, .ndo_start_xmit = ti_hecc_xmit, .ndo_change_mtu = can_change_mtu, }; static const struct ethtool_ops ti_hecc_ethtool_ops = { .get_ts_info = ethtool_op_get_ts_info, }; static const struct of_device_id ti_hecc_dt_ids[] = { { .compatible = "ti,am3517-hecc", }, { } }; MODULE_DEVICE_TABLE(of, ti_hecc_dt_ids); static int ti_hecc_probe(struct platform_device *pdev) { struct net_device *ndev = (struct net_device *)0; struct ti_hecc_priv *priv; struct device_node *np = pdev->dev.of_node; struct regulator *reg_xceiver; int err = -ENODEV; if (!IS_ENABLED(CONFIG_OF) || !np) return -EINVAL; reg_xceiver = devm_regulator_get(&pdev->dev, "xceiver"); if (PTR_ERR(reg_xceiver) == -EPROBE_DEFER) return -EPROBE_DEFER; else if (IS_ERR(reg_xceiver)) reg_xceiver = NULL; ndev = alloc_candev(sizeof(struct ti_hecc_priv), HECC_MAX_TX_MBOX); if (!ndev) { dev_err(&pdev->dev, "alloc_candev failed\n"); return -ENOMEM; } priv = netdev_priv(ndev); /* handle hecc memory */ priv->base = devm_platform_ioremap_resource_byname(pdev, "hecc"); if (IS_ERR(priv->base)) { dev_err(&pdev->dev, "hecc ioremap failed\n"); err = PTR_ERR(priv->base); goto probe_exit_candev; } /* handle hecc-ram memory */ priv->hecc_ram = devm_platform_ioremap_resource_byname(pdev, "hecc-ram"); if (IS_ERR(priv->hecc_ram)) { dev_err(&pdev->dev, "hecc-ram ioremap failed\n"); err = PTR_ERR(priv->hecc_ram); goto probe_exit_candev; } /* handle mbx memory */ priv->mbx = devm_platform_ioremap_resource_byname(pdev, "mbx"); if (IS_ERR(priv->mbx)) { dev_err(&pdev->dev, "mbx ioremap failed\n"); err = PTR_ERR(priv->mbx); goto probe_exit_candev; } ndev->irq = platform_get_irq(pdev, 0); if (ndev->irq < 0) { err = ndev->irq; goto probe_exit_candev; } priv->ndev = ndev; priv->reg_xceiver = reg_xceiver; priv->use_hecc1int = of_property_read_bool(np, "ti,use-hecc1int"); priv->can.bittiming_const = &ti_hecc_bittiming_const; priv->can.do_set_mode = ti_hecc_do_set_mode; priv->can.do_get_berr_counter = ti_hecc_get_berr_counter; priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES; spin_lock_init(&priv->mbx_lock); ndev->flags |= IFF_ECHO; platform_set_drvdata(pdev, ndev); SET_NETDEV_DEV(ndev, &pdev->dev); ndev->netdev_ops = &ti_hecc_netdev_ops; ndev->ethtool_ops = &ti_hecc_ethtool_ops; priv->clk = clk_get(&pdev->dev, "hecc_ck"); if (IS_ERR(priv->clk)) { dev_err(&pdev->dev, "No clock available\n"); err = PTR_ERR(priv->clk); priv->clk = NULL; goto probe_exit_candev; } priv->can.clock.freq = clk_get_rate(priv->clk); err = clk_prepare_enable(priv->clk); if (err) { dev_err(&pdev->dev, "clk_prepare_enable() failed\n"); goto probe_exit_release_clk; } priv->offload.mailbox_read = ti_hecc_mailbox_read; priv->offload.mb_first = HECC_RX_FIRST_MBOX; priv->offload.mb_last = HECC_RX_LAST_MBOX; err = can_rx_offload_add_timestamp(ndev, &priv->offload); if (err) { dev_err(&pdev->dev, "can_rx_offload_add_timestamp() failed\n"); goto probe_exit_disable_clk; } err = register_candev(ndev); if (err) { dev_err(&pdev->dev, "register_candev() failed\n"); goto probe_exit_offload; } dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%u)\n", priv->base, (u32)ndev->irq); return 0; probe_exit_offload: can_rx_offload_del(&priv->offload); probe_exit_disable_clk: clk_disable_unprepare(priv->clk); probe_exit_release_clk: clk_put(priv->clk); probe_exit_candev: free_candev(ndev); return err; } static void ti_hecc_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct ti_hecc_priv *priv = netdev_priv(ndev); unregister_candev(ndev); clk_disable_unprepare(priv->clk); clk_put(priv->clk); can_rx_offload_del(&priv->offload); free_candev(ndev); } #ifdef CONFIG_PM static int ti_hecc_suspend(struct platform_device *pdev, pm_message_t state) { struct net_device *dev = platform_get_drvdata(pdev); struct ti_hecc_priv *priv = netdev_priv(dev); if (netif_running(dev)) { netif_stop_queue(dev); netif_device_detach(dev); } hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_PDR); priv->can.state = CAN_STATE_SLEEPING; clk_disable_unprepare(priv->clk); return 0; } static int ti_hecc_resume(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct ti_hecc_priv *priv = netdev_priv(dev); int err; err = clk_prepare_enable(priv->clk); if (err) return err; hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_PDR); priv->can.state = CAN_STATE_ERROR_ACTIVE; if (netif_running(dev)) { netif_device_attach(dev); netif_start_queue(dev); } return 0; } #else #define ti_hecc_suspend NULL #define ti_hecc_resume NULL #endif /* TI HECC netdevice driver: platform driver structure */ static struct platform_driver ti_hecc_driver = { .driver = { .name = DRV_NAME, .of_match_table = ti_hecc_dt_ids, }, .probe = ti_hecc_probe, .remove_new = ti_hecc_remove, .suspend = ti_hecc_suspend, .resume = ti_hecc_resume, }; module_platform_driver(ti_hecc_driver); MODULE_AUTHOR("Anant Gole <[email protected]>"); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION(DRV_DESC); MODULE_ALIAS("platform:" DRV_NAME);
linux-master
drivers/net/can/ti_hecc.c
/* vcan.c - Virtual CAN interface * * Copyright (c) 2002-2017 Volkswagen Group Electronic Research * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of Volkswagen nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * Alternatively, provided that this notice is retained in full, this * software may be distributed under the terms of the GNU General * Public License ("GPL") version 2, in which case the provisions of the * GPL apply INSTEAD OF those given above. * * The provided data structures and external interfaces from this code * are not restricted to be used by modules with a GPL compatible license. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/ethtool.h> #include <linux/module.h> #include <linux/init.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/if_ether.h> #include <linux/can.h> #include <linux/can/can-ml.h> #include <linux/can/dev.h> #include <linux/can/skb.h> #include <linux/slab.h> #include <net/rtnetlink.h> #define DRV_NAME "vcan" MODULE_DESCRIPTION("virtual CAN interface"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Urs Thuermann <[email protected]>"); MODULE_ALIAS_RTNL_LINK(DRV_NAME); /* CAN test feature: * Enable the echo on driver level for testing the CAN core echo modes. * See Documentation/networking/can.rst for details. */ static bool echo; /* echo testing. Default: 0 (Off) */ module_param(echo, bool, 0444); MODULE_PARM_DESC(echo, "Echo sent frames (for testing). Default: 0 (Off)"); static void vcan_rx(struct sk_buff *skb, struct net_device *dev) { struct net_device_stats *stats = &dev->stats; stats->rx_packets++; stats->rx_bytes += can_skb_get_data_len(skb); skb->pkt_type = PACKET_BROADCAST; skb->dev = dev; skb->ip_summed = CHECKSUM_UNNECESSARY; netif_rx(skb); } static netdev_tx_t vcan_tx(struct sk_buff *skb, struct net_device *dev) { struct net_device_stats *stats = &dev->stats; unsigned int len; int loop; if (can_dropped_invalid_skb(dev, skb)) return NETDEV_TX_OK; len = can_skb_get_data_len(skb); stats->tx_packets++; stats->tx_bytes += len; /* set flag whether this packet has to be looped back */ loop = skb->pkt_type == PACKET_LOOPBACK; skb_tx_timestamp(skb); if (!echo) { /* no echo handling available inside this driver */ if (loop) { /* only count the packets here, because the * CAN core already did the echo for us */ stats->rx_packets++; stats->rx_bytes += len; } consume_skb(skb); return NETDEV_TX_OK; } /* perform standard echo handling for CAN network interfaces */ if (loop) { skb = can_create_echo_skb(skb); if (!skb) return NETDEV_TX_OK; /* receive with packet counting */ vcan_rx(skb, dev); } else { /* no looped packets => no counting */ consume_skb(skb); } return NETDEV_TX_OK; } static int vcan_change_mtu(struct net_device *dev, int new_mtu) { /* Do not allow changing the MTU while running */ if (dev->flags & IFF_UP) return -EBUSY; if (new_mtu != CAN_MTU && new_mtu != CANFD_MTU && !can_is_canxl_dev_mtu(new_mtu)) return -EINVAL; dev->mtu = new_mtu; return 0; } static const struct net_device_ops vcan_netdev_ops = { .ndo_start_xmit = vcan_tx, .ndo_change_mtu = vcan_change_mtu, }; static const struct ethtool_ops vcan_ethtool_ops = { .get_ts_info = ethtool_op_get_ts_info, }; static void vcan_setup(struct net_device *dev) { dev->type = ARPHRD_CAN; dev->mtu = CANFD_MTU; dev->hard_header_len = 0; dev->addr_len = 0; dev->tx_queue_len = 0; dev->flags = IFF_NOARP; can_set_ml_priv(dev, netdev_priv(dev)); /* set flags according to driver capabilities */ if (echo) dev->flags |= IFF_ECHO; dev->netdev_ops = &vcan_netdev_ops; dev->ethtool_ops = &vcan_ethtool_ops; dev->needs_free_netdev = true; } static struct rtnl_link_ops vcan_link_ops __read_mostly = { .kind = DRV_NAME, .priv_size = sizeof(struct can_ml_priv), .setup = vcan_setup, }; static __init int vcan_init_module(void) { pr_info("Virtual CAN interface driver\n"); if (echo) pr_info("enabled echo on driver level.\n"); return rtnl_link_register(&vcan_link_ops); } static __exit void vcan_cleanup_module(void) { rtnl_link_unregister(&vcan_link_ops); } module_init(vcan_init_module); module_exit(vcan_cleanup_module);
linux-master
drivers/net/can/vcan.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Socket CAN driver for Aeroflex Gaisler GRCAN and GRHCAN. * * 2012 (c) Aeroflex Gaisler AB * * This driver supports GRCAN and GRHCAN CAN controllers available in the GRLIB * VHDL IP core library. * * Full documentation of the GRCAN core can be found here: * http://www.gaisler.com/products/grlib/grip.pdf * * See "Documentation/devicetree/bindings/net/can/grcan.txt" for information on * open firmware properties. * * See "Documentation/ABI/testing/sysfs-class-net-grcan" for information on the * sysfs interface. * * See "Documentation/admin-guide/kernel-parameters.rst" for information on the module * parameters. * * Contributors: Andreas Larsson <[email protected]> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/netdevice.h> #include <linux/delay.h> #include <linux/ethtool.h> #include <linux/io.h> #include <linux/can/dev.h> #include <linux/platform_device.h> #include <linux/spinlock.h> #include <linux/of.h> #include <linux/of_irq.h> #include <linux/dma-mapping.h> #define DRV_NAME "grcan" #define GRCAN_NAPI_WEIGHT 32 #define GRCAN_RESERVE_SIZE(slot1, slot2) (((slot2) - (slot1)) / 4 - 1) struct grcan_registers { u32 conf; /* 0x00 */ u32 stat; /* 0x04 */ u32 ctrl; /* 0x08 */ u32 __reserved1[GRCAN_RESERVE_SIZE(0x08, 0x18)]; u32 smask; /* 0x18 - CanMASK */ u32 scode; /* 0x1c - CanCODE */ u32 __reserved2[GRCAN_RESERVE_SIZE(0x1c, 0x100)]; u32 pimsr; /* 0x100 */ u32 pimr; /* 0x104 */ u32 pisr; /* 0x108 */ u32 pir; /* 0x10C */ u32 imr; /* 0x110 */ u32 picr; /* 0x114 */ u32 __reserved3[GRCAN_RESERVE_SIZE(0x114, 0x200)]; u32 txctrl; /* 0x200 */ u32 txaddr; /* 0x204 */ u32 txsize; /* 0x208 */ u32 txwr; /* 0x20C */ u32 txrd; /* 0x210 */ u32 txirq; /* 0x214 */ u32 __reserved4[GRCAN_RESERVE_SIZE(0x214, 0x300)]; u32 rxctrl; /* 0x300 */ u32 rxaddr; /* 0x304 */ u32 rxsize; /* 0x308 */ u32 rxwr; /* 0x30C */ u32 rxrd; /* 0x310 */ u32 rxirq; /* 0x314 */ u32 rxmask; /* 0x318 */ u32 rxcode; /* 0x31C */ }; #define GRCAN_CONF_ABORT 0x00000001 #define GRCAN_CONF_ENABLE0 0x00000002 #define GRCAN_CONF_ENABLE1 0x00000004 #define GRCAN_CONF_SELECT 0x00000008 #define GRCAN_CONF_SILENT 0x00000010 #define GRCAN_CONF_SAM 0x00000020 /* Available in some hardware */ #define GRCAN_CONF_BPR 0x00000300 /* Note: not BRP */ #define GRCAN_CONF_RSJ 0x00007000 #define GRCAN_CONF_PS1 0x00f00000 #define GRCAN_CONF_PS2 0x000f0000 #define GRCAN_CONF_SCALER 0xff000000 #define GRCAN_CONF_OPERATION \ (GRCAN_CONF_ABORT | GRCAN_CONF_ENABLE0 | GRCAN_CONF_ENABLE1 \ | GRCAN_CONF_SELECT | GRCAN_CONF_SILENT | GRCAN_CONF_SAM) #define GRCAN_CONF_TIMING \ (GRCAN_CONF_BPR | GRCAN_CONF_RSJ | GRCAN_CONF_PS1 \ | GRCAN_CONF_PS2 | GRCAN_CONF_SCALER) #define GRCAN_CONF_RSJ_MIN 1 #define GRCAN_CONF_RSJ_MAX 4 #define GRCAN_CONF_PS1_MIN 1 #define GRCAN_CONF_PS1_MAX 15 #define GRCAN_CONF_PS2_MIN 2 #define GRCAN_CONF_PS2_MAX 8 #define GRCAN_CONF_SCALER_MIN 0 #define GRCAN_CONF_SCALER_MAX 255 #define GRCAN_CONF_SCALER_INC 1 #define GRCAN_CONF_BPR_BIT 8 #define GRCAN_CONF_RSJ_BIT 12 #define GRCAN_CONF_PS1_BIT 20 #define GRCAN_CONF_PS2_BIT 16 #define GRCAN_CONF_SCALER_BIT 24 #define GRCAN_STAT_PASS 0x000001 #define GRCAN_STAT_OFF 0x000002 #define GRCAN_STAT_OR 0x000004 #define GRCAN_STAT_AHBERR 0x000008 #define GRCAN_STAT_ACTIVE 0x000010 #define GRCAN_STAT_RXERRCNT 0x00ff00 #define GRCAN_STAT_TXERRCNT 0xff0000 #define GRCAN_STAT_ERRCTR_RELATED (GRCAN_STAT_PASS | GRCAN_STAT_OFF) #define GRCAN_STAT_RXERRCNT_BIT 8 #define GRCAN_STAT_TXERRCNT_BIT 16 #define GRCAN_STAT_ERRCNT_WARNING_LIMIT 96 #define GRCAN_STAT_ERRCNT_PASSIVE_LIMIT 127 #define GRCAN_CTRL_RESET 0x2 #define GRCAN_CTRL_ENABLE 0x1 #define GRCAN_TXCTRL_ENABLE 0x1 #define GRCAN_TXCTRL_ONGOING 0x2 #define GRCAN_TXCTRL_SINGLE 0x4 #define GRCAN_RXCTRL_ENABLE 0x1 #define GRCAN_RXCTRL_ONGOING 0x2 /* Relative offset of IRQ sources to AMBA Plug&Play */ #define GRCAN_IRQIX_IRQ 0 #define GRCAN_IRQIX_TXSYNC 1 #define GRCAN_IRQIX_RXSYNC 2 #define GRCAN_IRQ_PASS 0x00001 #define GRCAN_IRQ_OFF 0x00002 #define GRCAN_IRQ_OR 0x00004 #define GRCAN_IRQ_RXAHBERR 0x00008 #define GRCAN_IRQ_TXAHBERR 0x00010 #define GRCAN_IRQ_RXIRQ 0x00020 #define GRCAN_IRQ_TXIRQ 0x00040 #define GRCAN_IRQ_RXFULL 0x00080 #define GRCAN_IRQ_TXEMPTY 0x00100 #define GRCAN_IRQ_RX 0x00200 #define GRCAN_IRQ_TX 0x00400 #define GRCAN_IRQ_RXSYNC 0x00800 #define GRCAN_IRQ_TXSYNC 0x01000 #define GRCAN_IRQ_RXERRCTR 0x02000 #define GRCAN_IRQ_TXERRCTR 0x04000 #define GRCAN_IRQ_RXMISS 0x08000 #define GRCAN_IRQ_TXLOSS 0x10000 #define GRCAN_IRQ_NONE 0 #define GRCAN_IRQ_ALL \ (GRCAN_IRQ_PASS | GRCAN_IRQ_OFF | GRCAN_IRQ_OR \ | GRCAN_IRQ_RXAHBERR | GRCAN_IRQ_TXAHBERR \ | GRCAN_IRQ_RXIRQ | GRCAN_IRQ_TXIRQ \ | GRCAN_IRQ_RXFULL | GRCAN_IRQ_TXEMPTY \ | GRCAN_IRQ_RX | GRCAN_IRQ_TX | GRCAN_IRQ_RXSYNC \ | GRCAN_IRQ_TXSYNC | GRCAN_IRQ_RXERRCTR \ | GRCAN_IRQ_TXERRCTR | GRCAN_IRQ_RXMISS \ | GRCAN_IRQ_TXLOSS) #define GRCAN_IRQ_ERRCTR_RELATED (GRCAN_IRQ_RXERRCTR | GRCAN_IRQ_TXERRCTR \ | GRCAN_IRQ_PASS | GRCAN_IRQ_OFF) #define GRCAN_IRQ_ERRORS (GRCAN_IRQ_ERRCTR_RELATED | GRCAN_IRQ_OR \ | GRCAN_IRQ_TXAHBERR | GRCAN_IRQ_RXAHBERR \ | GRCAN_IRQ_TXLOSS) #define GRCAN_IRQ_DEFAULT (GRCAN_IRQ_RX | GRCAN_IRQ_TX | GRCAN_IRQ_ERRORS) #define GRCAN_MSG_SIZE 16 #define GRCAN_MSG_IDE 0x80000000 #define GRCAN_MSG_RTR 0x40000000 #define GRCAN_MSG_BID 0x1ffc0000 #define GRCAN_MSG_EID 0x1fffffff #define GRCAN_MSG_IDE_BIT 31 #define GRCAN_MSG_RTR_BIT 30 #define GRCAN_MSG_BID_BIT 18 #define GRCAN_MSG_EID_BIT 0 #define GRCAN_MSG_DLC 0xf0000000 #define GRCAN_MSG_TXERRC 0x00ff0000 #define GRCAN_MSG_RXERRC 0x0000ff00 #define GRCAN_MSG_DLC_BIT 28 #define GRCAN_MSG_TXERRC_BIT 16 #define GRCAN_MSG_RXERRC_BIT 8 #define GRCAN_MSG_AHBERR 0x00000008 #define GRCAN_MSG_OR 0x00000004 #define GRCAN_MSG_OFF 0x00000002 #define GRCAN_MSG_PASS 0x00000001 #define GRCAN_MSG_DATA_SLOT_INDEX(i) (2 + (i) / 4) #define GRCAN_MSG_DATA_SHIFT(i) ((3 - (i) % 4) * 8) #define GRCAN_BUFFER_ALIGNMENT 1024 #define GRCAN_DEFAULT_BUFFER_SIZE 1024 #define GRCAN_VALID_TR_SIZE_MASK 0x001fffc0 #define GRCAN_INVALID_BUFFER_SIZE(s) \ ((s) == 0 || ((s) & ~GRCAN_VALID_TR_SIZE_MASK)) #if GRCAN_INVALID_BUFFER_SIZE(GRCAN_DEFAULT_BUFFER_SIZE) #error "Invalid default buffer size" #endif struct grcan_dma_buffer { size_t size; void *buf; dma_addr_t handle; }; struct grcan_dma { size_t base_size; void *base_buf; dma_addr_t base_handle; struct grcan_dma_buffer tx; struct grcan_dma_buffer rx; }; /* GRCAN configuration parameters */ struct grcan_device_config { unsigned short enable0; unsigned short enable1; unsigned short select; unsigned int txsize; unsigned int rxsize; }; #define GRCAN_DEFAULT_DEVICE_CONFIG { \ .enable0 = 0, \ .enable1 = 0, \ .select = 0, \ .txsize = GRCAN_DEFAULT_BUFFER_SIZE, \ .rxsize = GRCAN_DEFAULT_BUFFER_SIZE, \ } #define GRCAN_TXBUG_SAFE_GRLIB_VERSION 4100 #define GRLIB_VERSION_MASK 0xffff /* GRCAN private data structure */ struct grcan_priv { struct can_priv can; /* must be the first member */ struct net_device *dev; struct device *ofdev_dev; struct napi_struct napi; struct grcan_registers __iomem *regs; /* ioremap'ed registers */ struct grcan_device_config config; struct grcan_dma dma; struct sk_buff **echo_skb; /* We allocate this on our own */ /* The echo skb pointer, pointing into echo_skb and indicating which * frames can be echoed back. See the "Notes on the tx cyclic buffer * handling"-comment for grcan_start_xmit for more details. */ u32 eskbp; /* Lock for controlling changes to the netif tx queue state, accesses to * the echo_skb pointer eskbp and for making sure that a running reset * and/or a close of the interface is done without interference from * other parts of the code. * * The echo_skb pointer, eskbp, should only be accessed under this lock * as it can be changed in several places and together with decisions on * whether to wake up the tx queue. * * The tx queue must never be woken up if there is a running reset or * close in progress. * * A running reset (see below on need_txbug_workaround) should never be * done if the interface is closing down and several running resets * should never be scheduled simultaneously. */ spinlock_t lock; /* Whether a workaround is needed due to a bug in older hardware. In * this case, the driver both tries to prevent the bug from being * triggered and recovers, if the bug nevertheless happens, by doing a * running reset. A running reset, resets the device and continues from * where it were without being noticeable from outside the driver (apart * from slight delays). */ bool need_txbug_workaround; /* To trigger initization of running reset and to trigger running reset * respectively in the case of a hanged device due to a txbug. */ struct timer_list hang_timer; struct timer_list rr_timer; /* To avoid waking up the netif queue and restarting timers * when a reset is scheduled or when closing of the device is * undergoing */ bool resetting; bool closing; }; /* Wait time for a short wait for ongoing to clear */ #define GRCAN_SHORTWAIT_USECS 10 /* Limit on the number of transmitted bits of an eff frame according to the CAN * specification: 1 bit start of frame, 32 bits arbitration field, 6 bits * control field, 8 bytes data field, 16 bits crc field, 2 bits ACK field and 7 * bits end of frame */ #define GRCAN_EFF_FRAME_MAX_BITS (1+32+6+8*8+16+2+7) #if defined(__BIG_ENDIAN) static inline u32 grcan_read_reg(u32 __iomem *reg) { return ioread32be(reg); } static inline void grcan_write_reg(u32 __iomem *reg, u32 val) { iowrite32be(val, reg); } #else static inline u32 grcan_read_reg(u32 __iomem *reg) { return ioread32(reg); } static inline void grcan_write_reg(u32 __iomem *reg, u32 val) { iowrite32(val, reg); } #endif static inline void grcan_clear_bits(u32 __iomem *reg, u32 mask) { grcan_write_reg(reg, grcan_read_reg(reg) & ~mask); } static inline void grcan_set_bits(u32 __iomem *reg, u32 mask) { grcan_write_reg(reg, grcan_read_reg(reg) | mask); } static inline u32 grcan_read_bits(u32 __iomem *reg, u32 mask) { return grcan_read_reg(reg) & mask; } static inline void grcan_write_bits(u32 __iomem *reg, u32 value, u32 mask) { u32 old = grcan_read_reg(reg); grcan_write_reg(reg, (old & ~mask) | (value & mask)); } /* a and b should both be in [0,size] and a == b == size should not hold */ static inline u32 grcan_ring_add(u32 a, u32 b, u32 size) { u32 sum = a + b; if (sum < size) return sum; else return sum - size; } /* a and b should both be in [0,size) */ static inline u32 grcan_ring_sub(u32 a, u32 b, u32 size) { return grcan_ring_add(a, size - b, size); } /* Available slots for new transmissions */ static inline u32 grcan_txspace(size_t txsize, u32 txwr, u32 eskbp) { u32 slots = txsize / GRCAN_MSG_SIZE - 1; u32 used = grcan_ring_sub(txwr, eskbp, txsize) / GRCAN_MSG_SIZE; return slots - used; } /* Configuration parameters that can be set via module parameters */ static struct grcan_device_config grcan_module_config = GRCAN_DEFAULT_DEVICE_CONFIG; static const struct can_bittiming_const grcan_bittiming_const = { .name = DRV_NAME, .tseg1_min = GRCAN_CONF_PS1_MIN + 1, .tseg1_max = GRCAN_CONF_PS1_MAX + 1, .tseg2_min = GRCAN_CONF_PS2_MIN, .tseg2_max = GRCAN_CONF_PS2_MAX, .sjw_max = GRCAN_CONF_RSJ_MAX, .brp_min = GRCAN_CONF_SCALER_MIN + 1, .brp_max = GRCAN_CONF_SCALER_MAX + 1, .brp_inc = GRCAN_CONF_SCALER_INC, }; static int grcan_set_bittiming(struct net_device *dev) { struct grcan_priv *priv = netdev_priv(dev); struct grcan_registers __iomem *regs = priv->regs; struct can_bittiming *bt = &priv->can.bittiming; u32 timing = 0; int bpr, rsj, ps1, ps2, scaler; /* Should never happen - function will not be called when * device is up */ if (grcan_read_bits(&regs->ctrl, GRCAN_CTRL_ENABLE)) return -EBUSY; bpr = 0; /* Note bpr and brp are different concepts */ rsj = bt->sjw; ps1 = (bt->prop_seg + bt->phase_seg1) - 1; /* tseg1 - 1 */ ps2 = bt->phase_seg2; scaler = (bt->brp - 1); netdev_dbg(dev, "Request for BPR=%d, RSJ=%d, PS1=%d, PS2=%d, SCALER=%d", bpr, rsj, ps1, ps2, scaler); if (!(ps1 > ps2)) { netdev_err(dev, "PS1 > PS2 must hold: PS1=%d, PS2=%d\n", ps1, ps2); return -EINVAL; } if (!(ps2 >= rsj)) { netdev_err(dev, "PS2 >= RSJ must hold: PS2=%d, RSJ=%d\n", ps2, rsj); return -EINVAL; } timing |= (bpr << GRCAN_CONF_BPR_BIT) & GRCAN_CONF_BPR; timing |= (rsj << GRCAN_CONF_RSJ_BIT) & GRCAN_CONF_RSJ; timing |= (ps1 << GRCAN_CONF_PS1_BIT) & GRCAN_CONF_PS1; timing |= (ps2 << GRCAN_CONF_PS2_BIT) & GRCAN_CONF_PS2; timing |= (scaler << GRCAN_CONF_SCALER_BIT) & GRCAN_CONF_SCALER; netdev_info(dev, "setting timing=0x%x\n", timing); grcan_write_bits(&regs->conf, timing, GRCAN_CONF_TIMING); return 0; } static int grcan_get_berr_counter(const struct net_device *dev, struct can_berr_counter *bec) { struct grcan_priv *priv = netdev_priv(dev); struct grcan_registers __iomem *regs = priv->regs; u32 status = grcan_read_reg(&regs->stat); bec->txerr = (status & GRCAN_STAT_TXERRCNT) >> GRCAN_STAT_TXERRCNT_BIT; bec->rxerr = (status & GRCAN_STAT_RXERRCNT) >> GRCAN_STAT_RXERRCNT_BIT; return 0; } static int grcan_poll(struct napi_struct *napi, int budget); /* Reset device, but keep configuration information */ static void grcan_reset(struct net_device *dev) { struct grcan_priv *priv = netdev_priv(dev); struct grcan_registers __iomem *regs = priv->regs; u32 config = grcan_read_reg(&regs->conf); grcan_set_bits(&regs->ctrl, GRCAN_CTRL_RESET); grcan_write_reg(&regs->conf, config); priv->eskbp = grcan_read_reg(&regs->txrd); priv->can.state = CAN_STATE_STOPPED; /* Turn off hardware filtering - regs->rxcode set to 0 by reset */ grcan_write_reg(&regs->rxmask, 0); } /* stop device without changing any configurations */ static void grcan_stop_hardware(struct net_device *dev) { struct grcan_priv *priv = netdev_priv(dev); struct grcan_registers __iomem *regs = priv->regs; grcan_write_reg(&regs->imr, GRCAN_IRQ_NONE); grcan_clear_bits(&regs->txctrl, GRCAN_TXCTRL_ENABLE); grcan_clear_bits(&regs->rxctrl, GRCAN_RXCTRL_ENABLE); grcan_clear_bits(&regs->ctrl, GRCAN_CTRL_ENABLE); } /* Let priv->eskbp catch up to regs->txrd and echo back the skbs if echo * is true and free them otherwise. * * If budget is >= 0, stop after handling at most budget skbs. Otherwise, * continue until priv->eskbp catches up to regs->txrd. * * priv->lock *must* be held when calling this function */ static int catch_up_echo_skb(struct net_device *dev, int budget, bool echo) { struct grcan_priv *priv = netdev_priv(dev); struct grcan_registers __iomem *regs = priv->regs; struct grcan_dma *dma = &priv->dma; struct net_device_stats *stats = &dev->stats; int i, work_done; /* Updates to priv->eskbp and wake-ups of the queue needs to * be atomic towards the reads of priv->eskbp and shut-downs * of the queue in grcan_start_xmit. */ u32 txrd = grcan_read_reg(&regs->txrd); for (work_done = 0; work_done < budget || budget < 0; work_done++) { if (priv->eskbp == txrd) break; i = priv->eskbp / GRCAN_MSG_SIZE; if (echo) { /* Normal echo of messages */ stats->tx_packets++; stats->tx_bytes += can_get_echo_skb(dev, i, NULL); } else { /* For cleanup of untransmitted messages */ can_free_echo_skb(dev, i, NULL); } priv->eskbp = grcan_ring_add(priv->eskbp, GRCAN_MSG_SIZE, dma->tx.size); txrd = grcan_read_reg(&regs->txrd); } return work_done; } static void grcan_lost_one_shot_frame(struct net_device *dev) { struct grcan_priv *priv = netdev_priv(dev); struct grcan_registers __iomem *regs = priv->regs; struct grcan_dma *dma = &priv->dma; u32 txrd; unsigned long flags; spin_lock_irqsave(&priv->lock, flags); catch_up_echo_skb(dev, -1, true); if (unlikely(grcan_read_bits(&regs->txctrl, GRCAN_TXCTRL_ENABLE))) { /* Should never happen */ netdev_err(dev, "TXCTRL enabled at TXLOSS in one shot mode\n"); } else { /* By the time an GRCAN_IRQ_TXLOSS is generated in * one-shot mode there is no problem in writing * to TXRD even in versions of the hardware in * which GRCAN_TXCTRL_ONGOING is not cleared properly * in one-shot mode. */ /* Skip message and discard echo-skb */ txrd = grcan_read_reg(&regs->txrd); txrd = grcan_ring_add(txrd, GRCAN_MSG_SIZE, dma->tx.size); grcan_write_reg(&regs->txrd, txrd); catch_up_echo_skb(dev, -1, false); if (!priv->resetting && !priv->closing && !(priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)) { netif_wake_queue(dev); grcan_set_bits(&regs->txctrl, GRCAN_TXCTRL_ENABLE); } } spin_unlock_irqrestore(&priv->lock, flags); } static void grcan_err(struct net_device *dev, u32 sources, u32 status) { struct grcan_priv *priv = netdev_priv(dev); struct grcan_registers __iomem *regs = priv->regs; struct grcan_dma *dma = &priv->dma; struct net_device_stats *stats = &dev->stats; struct can_frame cf; /* Zero potential error_frame */ memset(&cf, 0, sizeof(cf)); /* Message lost interrupt. This might be due to arbitration error, but * is also triggered when there is no one else on the can bus or when * there is a problem with the hardware interface or the bus itself. As * arbitration errors can not be singled out, no error frames are * generated reporting this event as an arbitration error. */ if (sources & GRCAN_IRQ_TXLOSS) { /* Take care of failed one-shot transmit */ if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT) grcan_lost_one_shot_frame(dev); /* Stop printing as soon as error passive or bus off is in * effect to limit the amount of txloss debug printouts. */ if (!(status & GRCAN_STAT_ERRCTR_RELATED)) { netdev_dbg(dev, "tx message lost\n"); stats->tx_errors++; } } /* Conditions dealing with the error counters. There is no interrupt for * error warning, but there are interrupts for increases of the error * counters. */ if ((sources & GRCAN_IRQ_ERRCTR_RELATED) || (status & GRCAN_STAT_ERRCTR_RELATED)) { enum can_state state = priv->can.state; enum can_state oldstate = state; u32 txerr = (status & GRCAN_STAT_TXERRCNT) >> GRCAN_STAT_TXERRCNT_BIT; u32 rxerr = (status & GRCAN_STAT_RXERRCNT) >> GRCAN_STAT_RXERRCNT_BIT; /* Figure out current state */ if (status & GRCAN_STAT_OFF) { state = CAN_STATE_BUS_OFF; } else if (status & GRCAN_STAT_PASS) { state = CAN_STATE_ERROR_PASSIVE; } else if (txerr >= GRCAN_STAT_ERRCNT_WARNING_LIMIT || rxerr >= GRCAN_STAT_ERRCNT_WARNING_LIMIT) { state = CAN_STATE_ERROR_WARNING; } else { state = CAN_STATE_ERROR_ACTIVE; } /* Handle and report state changes */ if (state != oldstate) { switch (state) { case CAN_STATE_BUS_OFF: netdev_dbg(dev, "bus-off\n"); netif_carrier_off(dev); priv->can.can_stats.bus_off++; /* Prevent the hardware from recovering from bus * off on its own if restart is disabled. */ if (!priv->can.restart_ms) grcan_stop_hardware(dev); cf.can_id |= CAN_ERR_BUSOFF; break; case CAN_STATE_ERROR_PASSIVE: netdev_dbg(dev, "Error passive condition\n"); priv->can.can_stats.error_passive++; cf.can_id |= CAN_ERR_CRTL; if (txerr >= GRCAN_STAT_ERRCNT_PASSIVE_LIMIT) cf.data[1] |= CAN_ERR_CRTL_TX_PASSIVE; if (rxerr >= GRCAN_STAT_ERRCNT_PASSIVE_LIMIT) cf.data[1] |= CAN_ERR_CRTL_RX_PASSIVE; break; case CAN_STATE_ERROR_WARNING: netdev_dbg(dev, "Error warning condition\n"); priv->can.can_stats.error_warning++; cf.can_id |= CAN_ERR_CRTL; if (txerr >= GRCAN_STAT_ERRCNT_WARNING_LIMIT) cf.data[1] |= CAN_ERR_CRTL_TX_WARNING; if (rxerr >= GRCAN_STAT_ERRCNT_WARNING_LIMIT) cf.data[1] |= CAN_ERR_CRTL_RX_WARNING; break; case CAN_STATE_ERROR_ACTIVE: netdev_dbg(dev, "Error active condition\n"); cf.can_id |= CAN_ERR_CRTL; break; default: /* There are no others at this point */ break; } cf.can_id |= CAN_ERR_CNT; cf.data[6] = txerr; cf.data[7] = rxerr; priv->can.state = state; } /* Report automatic restarts */ if (priv->can.restart_ms && oldstate == CAN_STATE_BUS_OFF) { unsigned long flags; cf.can_id |= CAN_ERR_RESTARTED; netdev_dbg(dev, "restarted\n"); priv->can.can_stats.restarts++; netif_carrier_on(dev); spin_lock_irqsave(&priv->lock, flags); if (!priv->resetting && !priv->closing) { u32 txwr = grcan_read_reg(&regs->txwr); if (grcan_txspace(dma->tx.size, txwr, priv->eskbp)) netif_wake_queue(dev); } spin_unlock_irqrestore(&priv->lock, flags); } } /* Data overrun interrupt */ if ((sources & GRCAN_IRQ_OR) || (status & GRCAN_STAT_OR)) { netdev_dbg(dev, "got data overrun interrupt\n"); stats->rx_over_errors++; stats->rx_errors++; cf.can_id |= CAN_ERR_CRTL; cf.data[1] |= CAN_ERR_CRTL_RX_OVERFLOW; } /* AHB bus error interrupts (not CAN bus errors) - shut down the * device. */ if (sources & (GRCAN_IRQ_TXAHBERR | GRCAN_IRQ_RXAHBERR) || (status & GRCAN_STAT_AHBERR)) { char *txrx = ""; unsigned long flags; if (sources & GRCAN_IRQ_TXAHBERR) { txrx = "on tx "; stats->tx_errors++; } else if (sources & GRCAN_IRQ_RXAHBERR) { txrx = "on rx "; stats->rx_errors++; } netdev_err(dev, "Fatal AHB bus error %s- halting device\n", txrx); spin_lock_irqsave(&priv->lock, flags); /* Prevent anything to be enabled again and halt device */ priv->closing = true; netif_stop_queue(dev); grcan_stop_hardware(dev); priv->can.state = CAN_STATE_STOPPED; spin_unlock_irqrestore(&priv->lock, flags); } /* Pass on error frame if something to report, * i.e. id contains some information */ if (cf.can_id) { struct can_frame *skb_cf; struct sk_buff *skb = alloc_can_err_skb(dev, &skb_cf); if (skb == NULL) { netdev_dbg(dev, "could not allocate error frame\n"); return; } skb_cf->can_id |= cf.can_id; memcpy(skb_cf->data, cf.data, sizeof(cf.data)); netif_rx(skb); } } static irqreturn_t grcan_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct grcan_priv *priv = netdev_priv(dev); struct grcan_registers __iomem *regs = priv->regs; u32 sources, status; /* Find out the source */ sources = grcan_read_reg(&regs->pimsr); if (!sources) return IRQ_NONE; grcan_write_reg(&regs->picr, sources); status = grcan_read_reg(&regs->stat); /* If we got TX progress, the device has not hanged, * so disable the hang timer */ if (priv->need_txbug_workaround && (sources & (GRCAN_IRQ_TX | GRCAN_IRQ_TXLOSS))) { del_timer(&priv->hang_timer); } /* Frame(s) received or transmitted */ if (sources & (GRCAN_IRQ_TX | GRCAN_IRQ_RX)) { /* Disable tx/rx interrupts and schedule poll(). No need for * locking as interference from a running reset at worst leads * to an extra interrupt. */ grcan_clear_bits(&regs->imr, GRCAN_IRQ_TX | GRCAN_IRQ_RX); napi_schedule(&priv->napi); } /* (Potential) error conditions to take care of */ if (sources & GRCAN_IRQ_ERRORS) grcan_err(dev, sources, status); return IRQ_HANDLED; } /* Reset device and restart operations from where they were. * * This assumes that RXCTRL & RXCTRL is properly disabled and that RX * is not ONGOING (TX might be stuck in ONGOING due to a harwrware bug * for single shot) */ static void grcan_running_reset(struct timer_list *t) { struct grcan_priv *priv = from_timer(priv, t, rr_timer); struct net_device *dev = priv->dev; struct grcan_registers __iomem *regs = priv->regs; unsigned long flags; /* This temporarily messes with eskbp, so we need to lock * priv->lock */ spin_lock_irqsave(&priv->lock, flags); priv->resetting = false; del_timer(&priv->hang_timer); del_timer(&priv->rr_timer); if (!priv->closing) { /* Save and reset - config register preserved by grcan_reset */ u32 imr = grcan_read_reg(&regs->imr); u32 txaddr = grcan_read_reg(&regs->txaddr); u32 txsize = grcan_read_reg(&regs->txsize); u32 txwr = grcan_read_reg(&regs->txwr); u32 txrd = grcan_read_reg(&regs->txrd); u32 eskbp = priv->eskbp; u32 rxaddr = grcan_read_reg(&regs->rxaddr); u32 rxsize = grcan_read_reg(&regs->rxsize); u32 rxwr = grcan_read_reg(&regs->rxwr); u32 rxrd = grcan_read_reg(&regs->rxrd); grcan_reset(dev); /* Restore */ grcan_write_reg(&regs->txaddr, txaddr); grcan_write_reg(&regs->txsize, txsize); grcan_write_reg(&regs->txwr, txwr); grcan_write_reg(&regs->txrd, txrd); priv->eskbp = eskbp; grcan_write_reg(&regs->rxaddr, rxaddr); grcan_write_reg(&regs->rxsize, rxsize); grcan_write_reg(&regs->rxwr, rxwr); grcan_write_reg(&regs->rxrd, rxrd); /* Turn on device again */ grcan_write_reg(&regs->imr, imr); priv->can.state = CAN_STATE_ERROR_ACTIVE; grcan_write_reg(&regs->txctrl, GRCAN_TXCTRL_ENABLE | (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT ? GRCAN_TXCTRL_SINGLE : 0)); grcan_write_reg(&regs->rxctrl, GRCAN_RXCTRL_ENABLE); grcan_write_reg(&regs->ctrl, GRCAN_CTRL_ENABLE); /* Start queue if there is size and listen-onle mode is not * enabled */ if (grcan_txspace(priv->dma.tx.size, txwr, priv->eskbp) && !(priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)) netif_wake_queue(dev); } spin_unlock_irqrestore(&priv->lock, flags); netdev_err(dev, "Device reset and restored\n"); } /* Waiting time in usecs corresponding to the transmission of three maximum * sized can frames in the given bitrate (in bits/sec). Waiting for this amount * of time makes sure that the can controller have time to finish sending or * receiving a frame with a good margin. * * usecs/sec * number of frames * bits/frame / bits/sec */ static inline u32 grcan_ongoing_wait_usecs(__u32 bitrate) { return 1000000 * 3 * GRCAN_EFF_FRAME_MAX_BITS / bitrate; } /* Set timer so that it will not fire until after a period in which the can * controller have a good margin to finish transmitting a frame unless it has * hanged */ static inline void grcan_reset_timer(struct timer_list *timer, __u32 bitrate) { u32 wait_jiffies = usecs_to_jiffies(grcan_ongoing_wait_usecs(bitrate)); mod_timer(timer, jiffies + wait_jiffies); } /* Disable channels and schedule a running reset */ static void grcan_initiate_running_reset(struct timer_list *t) { struct grcan_priv *priv = from_timer(priv, t, hang_timer); struct net_device *dev = priv->dev; struct grcan_registers __iomem *regs = priv->regs; unsigned long flags; netdev_err(dev, "Device seems hanged - reset scheduled\n"); spin_lock_irqsave(&priv->lock, flags); /* The main body of this function must never be executed again * until after an execution of grcan_running_reset */ if (!priv->resetting && !priv->closing) { priv->resetting = true; netif_stop_queue(dev); grcan_clear_bits(&regs->txctrl, GRCAN_TXCTRL_ENABLE); grcan_clear_bits(&regs->rxctrl, GRCAN_RXCTRL_ENABLE); grcan_reset_timer(&priv->rr_timer, priv->can.bittiming.bitrate); } spin_unlock_irqrestore(&priv->lock, flags); } static void grcan_free_dma_buffers(struct net_device *dev) { struct grcan_priv *priv = netdev_priv(dev); struct grcan_dma *dma = &priv->dma; dma_free_coherent(priv->ofdev_dev, dma->base_size, dma->base_buf, dma->base_handle); memset(dma, 0, sizeof(*dma)); } static int grcan_allocate_dma_buffers(struct net_device *dev, size_t tsize, size_t rsize) { struct grcan_priv *priv = netdev_priv(dev); struct grcan_dma *dma = &priv->dma; struct grcan_dma_buffer *large = rsize > tsize ? &dma->rx : &dma->tx; struct grcan_dma_buffer *small = rsize > tsize ? &dma->tx : &dma->rx; size_t shift; /* Need a whole number of GRCAN_BUFFER_ALIGNMENT for the large, * i.e. first buffer */ size_t maxs = max(tsize, rsize); size_t lsize = ALIGN(maxs, GRCAN_BUFFER_ALIGNMENT); /* Put the small buffer after that */ size_t ssize = min(tsize, rsize); /* Extra GRCAN_BUFFER_ALIGNMENT to allow for alignment */ dma->base_size = lsize + ssize + GRCAN_BUFFER_ALIGNMENT; dma->base_buf = dma_alloc_coherent(priv->ofdev_dev, dma->base_size, &dma->base_handle, GFP_KERNEL); if (!dma->base_buf) return -ENOMEM; dma->tx.size = tsize; dma->rx.size = rsize; large->handle = ALIGN(dma->base_handle, GRCAN_BUFFER_ALIGNMENT); small->handle = large->handle + lsize; shift = large->handle - dma->base_handle; large->buf = dma->base_buf + shift; small->buf = large->buf + lsize; return 0; } /* priv->lock *must* be held when calling this function */ static int grcan_start(struct net_device *dev) { struct grcan_priv *priv = netdev_priv(dev); struct grcan_registers __iomem *regs = priv->regs; u32 confop, txctrl; grcan_reset(dev); grcan_write_reg(&regs->txaddr, priv->dma.tx.handle); grcan_write_reg(&regs->txsize, priv->dma.tx.size); /* regs->txwr, regs->txrd and priv->eskbp already set to 0 by reset */ grcan_write_reg(&regs->rxaddr, priv->dma.rx.handle); grcan_write_reg(&regs->rxsize, priv->dma.rx.size); /* regs->rxwr and regs->rxrd already set to 0 by reset */ /* Enable interrupts */ grcan_read_reg(&regs->pir); grcan_write_reg(&regs->imr, GRCAN_IRQ_DEFAULT); /* Enable interfaces, channels and device */ confop = GRCAN_CONF_ABORT | (priv->config.enable0 ? GRCAN_CONF_ENABLE0 : 0) | (priv->config.enable1 ? GRCAN_CONF_ENABLE1 : 0) | (priv->config.select ? GRCAN_CONF_SELECT : 0) | (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY ? GRCAN_CONF_SILENT : 0) | (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES ? GRCAN_CONF_SAM : 0); grcan_write_bits(&regs->conf, confop, GRCAN_CONF_OPERATION); txctrl = GRCAN_TXCTRL_ENABLE | (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT ? GRCAN_TXCTRL_SINGLE : 0); grcan_write_reg(&regs->txctrl, txctrl); grcan_write_reg(&regs->rxctrl, GRCAN_RXCTRL_ENABLE); grcan_write_reg(&regs->ctrl, GRCAN_CTRL_ENABLE); priv->can.state = CAN_STATE_ERROR_ACTIVE; return 0; } static int grcan_set_mode(struct net_device *dev, enum can_mode mode) { struct grcan_priv *priv = netdev_priv(dev); unsigned long flags; int err = 0; if (mode == CAN_MODE_START) { /* This might be called to restart the device to recover from * bus off errors */ spin_lock_irqsave(&priv->lock, flags); if (priv->closing || priv->resetting) { err = -EBUSY; } else { netdev_info(dev, "Restarting device\n"); grcan_start(dev); if (!(priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)) netif_wake_queue(dev); } spin_unlock_irqrestore(&priv->lock, flags); return err; } return -EOPNOTSUPP; } static int grcan_open(struct net_device *dev) { struct grcan_priv *priv = netdev_priv(dev); struct grcan_dma *dma = &priv->dma; unsigned long flags; int err; /* Allocate memory */ err = grcan_allocate_dma_buffers(dev, priv->config.txsize, priv->config.rxsize); if (err) { netdev_err(dev, "could not allocate DMA buffers\n"); return err; } priv->echo_skb = kcalloc(dma->tx.size, sizeof(*priv->echo_skb), GFP_KERNEL); if (!priv->echo_skb) { err = -ENOMEM; goto exit_free_dma_buffers; } priv->can.echo_skb_max = dma->tx.size; priv->can.echo_skb = priv->echo_skb; /* Get can device up */ err = open_candev(dev); if (err) goto exit_free_echo_skb; err = request_irq(dev->irq, grcan_interrupt, IRQF_SHARED, dev->name, dev); if (err) goto exit_close_candev; spin_lock_irqsave(&priv->lock, flags); napi_enable(&priv->napi); grcan_start(dev); if (!(priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)) netif_start_queue(dev); priv->resetting = false; priv->closing = false; spin_unlock_irqrestore(&priv->lock, flags); return 0; exit_close_candev: close_candev(dev); exit_free_echo_skb: kfree(priv->echo_skb); exit_free_dma_buffers: grcan_free_dma_buffers(dev); return err; } static int grcan_close(struct net_device *dev) { struct grcan_priv *priv = netdev_priv(dev); unsigned long flags; napi_disable(&priv->napi); spin_lock_irqsave(&priv->lock, flags); priv->closing = true; if (priv->need_txbug_workaround) { spin_unlock_irqrestore(&priv->lock, flags); del_timer_sync(&priv->hang_timer); del_timer_sync(&priv->rr_timer); spin_lock_irqsave(&priv->lock, flags); } netif_stop_queue(dev); grcan_stop_hardware(dev); priv->can.state = CAN_STATE_STOPPED; spin_unlock_irqrestore(&priv->lock, flags); free_irq(dev->irq, dev); close_candev(dev); grcan_free_dma_buffers(dev); priv->can.echo_skb_max = 0; priv->can.echo_skb = NULL; kfree(priv->echo_skb); return 0; } static void grcan_transmit_catch_up(struct net_device *dev) { struct grcan_priv *priv = netdev_priv(dev); unsigned long flags; int work_done; spin_lock_irqsave(&priv->lock, flags); work_done = catch_up_echo_skb(dev, -1, true); if (work_done) { if (!priv->resetting && !priv->closing && !(priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)) netif_wake_queue(dev); /* With napi we don't get TX interrupts for a while, * so prevent a running reset while catching up */ if (priv->need_txbug_workaround) del_timer(&priv->hang_timer); } spin_unlock_irqrestore(&priv->lock, flags); } static int grcan_receive(struct net_device *dev, int budget) { struct grcan_priv *priv = netdev_priv(dev); struct grcan_registers __iomem *regs = priv->regs; struct grcan_dma *dma = &priv->dma; struct net_device_stats *stats = &dev->stats; struct can_frame *cf; struct sk_buff *skb; u32 wr, rd, startrd; u32 *slot; u32 i, rtr, eff, j, shift; int work_done = 0; rd = grcan_read_reg(&regs->rxrd); startrd = rd; for (work_done = 0; work_done < budget; work_done++) { /* Check for packet to receive */ wr = grcan_read_reg(&regs->rxwr); if (rd == wr) break; /* Take care of packet */ skb = alloc_can_skb(dev, &cf); if (skb == NULL) { netdev_err(dev, "dropping frame: skb allocation failed\n"); stats->rx_dropped++; continue; } slot = dma->rx.buf + rd; eff = slot[0] & GRCAN_MSG_IDE; rtr = slot[0] & GRCAN_MSG_RTR; if (eff) { cf->can_id = ((slot[0] & GRCAN_MSG_EID) >> GRCAN_MSG_EID_BIT); cf->can_id |= CAN_EFF_FLAG; } else { cf->can_id = ((slot[0] & GRCAN_MSG_BID) >> GRCAN_MSG_BID_BIT); } cf->len = can_cc_dlc2len((slot[1] & GRCAN_MSG_DLC) >> GRCAN_MSG_DLC_BIT); if (rtr) { cf->can_id |= CAN_RTR_FLAG; } else { for (i = 0; i < cf->len; i++) { j = GRCAN_MSG_DATA_SLOT_INDEX(i); shift = GRCAN_MSG_DATA_SHIFT(i); cf->data[i] = (u8)(slot[j] >> shift); } stats->rx_bytes += cf->len; } stats->rx_packets++; netif_receive_skb(skb); rd = grcan_ring_add(rd, GRCAN_MSG_SIZE, dma->rx.size); } /* Make sure everything is read before allowing hardware to * use the memory */ mb(); /* Update read pointer - no need to check for ongoing */ if (likely(rd != startrd)) grcan_write_reg(&regs->rxrd, rd); return work_done; } static int grcan_poll(struct napi_struct *napi, int budget) { struct grcan_priv *priv = container_of(napi, struct grcan_priv, napi); struct net_device *dev = priv->dev; struct grcan_registers __iomem *regs = priv->regs; unsigned long flags; int work_done; work_done = grcan_receive(dev, budget); grcan_transmit_catch_up(dev); if (work_done < budget) { napi_complete(napi); /* Guarantee no interference with a running reset that otherwise * could turn off interrupts. */ spin_lock_irqsave(&priv->lock, flags); /* Enable tx and rx interrupts again. No need to check * priv->closing as napi_disable in grcan_close is waiting for * scheduled napi calls to finish. */ grcan_set_bits(&regs->imr, GRCAN_IRQ_TX | GRCAN_IRQ_RX); spin_unlock_irqrestore(&priv->lock, flags); } return work_done; } /* Work tx bug by waiting while for the risky situation to clear. If that fails, * drop a frame in one-shot mode or indicate a busy device otherwise. * * Returns 0 on successful wait. Otherwise it sets *netdev_tx_status to the * value that should be returned by grcan_start_xmit when aborting the xmit. */ static int grcan_txbug_workaround(struct net_device *dev, struct sk_buff *skb, u32 txwr, u32 oneshotmode, netdev_tx_t *netdev_tx_status) { struct grcan_priv *priv = netdev_priv(dev); struct grcan_registers __iomem *regs = priv->regs; struct grcan_dma *dma = &priv->dma; int i; unsigned long flags; /* Wait a while for ongoing to be cleared or read pointer to catch up to * write pointer. The latter is needed due to a bug in older versions of * GRCAN in which ONGOING is not cleared properly one-shot mode when a * transmission fails. */ for (i = 0; i < GRCAN_SHORTWAIT_USECS; i++) { udelay(1); if (!grcan_read_bits(&regs->txctrl, GRCAN_TXCTRL_ONGOING) || grcan_read_reg(&regs->txrd) == txwr) { return 0; } } /* Clean up, in case the situation was not resolved */ spin_lock_irqsave(&priv->lock, flags); if (!priv->resetting && !priv->closing) { /* Queue might have been stopped earlier in grcan_start_xmit */ if (grcan_txspace(dma->tx.size, txwr, priv->eskbp)) netif_wake_queue(dev); /* Set a timer to resolve a hanged tx controller */ if (!timer_pending(&priv->hang_timer)) grcan_reset_timer(&priv->hang_timer, priv->can.bittiming.bitrate); } spin_unlock_irqrestore(&priv->lock, flags); if (oneshotmode) { /* In one-shot mode we should never end up here because * then the interrupt handler increases txrd on TXLOSS, * but it is consistent with one-shot mode to drop the * frame in this case. */ kfree_skb(skb); *netdev_tx_status = NETDEV_TX_OK; } else { /* In normal mode the socket-can transmission queue get * to keep the frame so that it can be retransmitted * later */ *netdev_tx_status = NETDEV_TX_BUSY; } return -EBUSY; } /* Notes on the tx cyclic buffer handling: * * regs->txwr - the next slot for the driver to put data to be sent * regs->txrd - the next slot for the device to read data * priv->eskbp - the next slot for the driver to call can_put_echo_skb for * * grcan_start_xmit can enter more messages as long as regs->txwr does * not reach priv->eskbp (within 1 message gap) * * The device sends messages until regs->txrd reaches regs->txwr * * The interrupt calls handler calls can_put_echo_skb until * priv->eskbp reaches regs->txrd */ static netdev_tx_t grcan_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct grcan_priv *priv = netdev_priv(dev); struct grcan_registers __iomem *regs = priv->regs; struct grcan_dma *dma = &priv->dma; struct can_frame *cf = (struct can_frame *)skb->data; u32 id, txwr, txrd, space, txctrl; int slotindex; u32 *slot; u32 i, rtr, eff, dlc, tmp, err; int j, shift; unsigned long flags; u32 oneshotmode = priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT; if (can_dev_dropped_skb(dev, skb)) return NETDEV_TX_OK; /* Trying to transmit in silent mode will generate error interrupts, but * this should never happen - the queue should not have been started. */ if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) return NETDEV_TX_BUSY; /* Reads of priv->eskbp and shut-downs of the queue needs to * be atomic towards the updates to priv->eskbp and wake-ups * of the queue in the interrupt handler. */ spin_lock_irqsave(&priv->lock, flags); txwr = grcan_read_reg(&regs->txwr); space = grcan_txspace(dma->tx.size, txwr, priv->eskbp); slotindex = txwr / GRCAN_MSG_SIZE; slot = dma->tx.buf + txwr; if (unlikely(space == 1)) netif_stop_queue(dev); spin_unlock_irqrestore(&priv->lock, flags); /* End of critical section*/ /* This should never happen. If circular buffer is full, the * netif_stop_queue should have been stopped already. */ if (unlikely(!space)) { netdev_err(dev, "No buffer space, but queue is non-stopped.\n"); return NETDEV_TX_BUSY; } /* Convert and write CAN message to DMA buffer */ eff = cf->can_id & CAN_EFF_FLAG; rtr = cf->can_id & CAN_RTR_FLAG; id = cf->can_id & (eff ? CAN_EFF_MASK : CAN_SFF_MASK); dlc = cf->len; if (eff) tmp = (id << GRCAN_MSG_EID_BIT) & GRCAN_MSG_EID; else tmp = (id << GRCAN_MSG_BID_BIT) & GRCAN_MSG_BID; slot[0] = (eff ? GRCAN_MSG_IDE : 0) | (rtr ? GRCAN_MSG_RTR : 0) | tmp; slot[1] = ((dlc << GRCAN_MSG_DLC_BIT) & GRCAN_MSG_DLC); slot[2] = 0; slot[3] = 0; for (i = 0; i < dlc; i++) { j = GRCAN_MSG_DATA_SLOT_INDEX(i); shift = GRCAN_MSG_DATA_SHIFT(i); slot[j] |= cf->data[i] << shift; } /* Checking that channel has not been disabled. These cases * should never happen */ txctrl = grcan_read_reg(&regs->txctrl); if (!(txctrl & GRCAN_TXCTRL_ENABLE)) netdev_err(dev, "tx channel spuriously disabled\n"); if (oneshotmode && !(txctrl & GRCAN_TXCTRL_SINGLE)) netdev_err(dev, "one-shot mode spuriously disabled\n"); /* Bug workaround for old version of grcan where updating txwr * in the same clock cycle as the controller updates txrd to * the current txwr could hang the can controller */ if (priv->need_txbug_workaround) { txrd = grcan_read_reg(&regs->txrd); if (unlikely(grcan_ring_sub(txwr, txrd, dma->tx.size) == 1)) { netdev_tx_t txstatus; err = grcan_txbug_workaround(dev, skb, txwr, oneshotmode, &txstatus); if (err) return txstatus; } } /* Prepare skb for echoing. This must be after the bug workaround above * as ownership of the skb is passed on by calling can_put_echo_skb. * Returning NETDEV_TX_BUSY or accessing skb or cf after a call to * can_put_echo_skb would be an error unless other measures are * taken. */ can_put_echo_skb(skb, dev, slotindex, 0); /* Make sure everything is written before allowing hardware to * read from the memory */ wmb(); /* Update write pointer to start transmission */ grcan_write_reg(&regs->txwr, grcan_ring_add(txwr, GRCAN_MSG_SIZE, dma->tx.size)); return NETDEV_TX_OK; } /* ========== Setting up sysfs interface and module parameters ========== */ #define GRCAN_NOT_BOOL(unsigned_val) ((unsigned_val) > 1) #define GRCAN_MODULE_PARAM(name, mtype, valcheckf, desc) \ static void grcan_sanitize_##name(struct platform_device *pd) \ { \ struct grcan_device_config grcan_default_config \ = GRCAN_DEFAULT_DEVICE_CONFIG; \ if (valcheckf(grcan_module_config.name)) { \ dev_err(&pd->dev, \ "Invalid module parameter value for " \ #name " - setting default\n"); \ grcan_module_config.name = \ grcan_default_config.name; \ } \ } \ module_param_named(name, grcan_module_config.name, \ mtype, 0444); \ MODULE_PARM_DESC(name, desc) #define GRCAN_CONFIG_ATTR(name, desc) \ static ssize_t grcan_store_##name(struct device *sdev, \ struct device_attribute *att, \ const char *buf, \ size_t count) \ { \ struct net_device *dev = to_net_dev(sdev); \ struct grcan_priv *priv = netdev_priv(dev); \ u8 val; \ int ret; \ if (dev->flags & IFF_UP) \ return -EBUSY; \ ret = kstrtou8(buf, 0, &val); \ if (ret < 0 || val > 1) \ return -EINVAL; \ priv->config.name = val; \ return count; \ } \ static ssize_t grcan_show_##name(struct device *sdev, \ struct device_attribute *att, \ char *buf) \ { \ struct net_device *dev = to_net_dev(sdev); \ struct grcan_priv *priv = netdev_priv(dev); \ return sprintf(buf, "%d\n", priv->config.name); \ } \ static DEVICE_ATTR(name, 0644, \ grcan_show_##name, \ grcan_store_##name); \ GRCAN_MODULE_PARAM(name, ushort, GRCAN_NOT_BOOL, desc) /* The following configuration options are made available both via module * parameters and writable sysfs files. See the chapter about GRCAN in the * documentation for the GRLIB VHDL library for further details. */ GRCAN_CONFIG_ATTR(enable0, "Configuration of physical interface 0. Determines\n" \ "the \"Enable 0\" bit of the configuration register.\n" \ "Format: 0 | 1\nDefault: 0\n"); GRCAN_CONFIG_ATTR(enable1, "Configuration of physical interface 1. Determines\n" \ "the \"Enable 1\" bit of the configuration register.\n" \ "Format: 0 | 1\nDefault: 0\n"); GRCAN_CONFIG_ATTR(select, "Select which physical interface to use.\n" \ "Format: 0 | 1\nDefault: 0\n"); /* The tx and rx buffer size configuration options are only available via module * parameters. */ GRCAN_MODULE_PARAM(txsize, uint, GRCAN_INVALID_BUFFER_SIZE, "Sets the size of the tx buffer.\n" \ "Format: <unsigned int> where (txsize & ~0x1fffc0) == 0\n" \ "Default: 1024\n"); GRCAN_MODULE_PARAM(rxsize, uint, GRCAN_INVALID_BUFFER_SIZE, "Sets the size of the rx buffer.\n" \ "Format: <unsigned int> where (size & ~0x1fffc0) == 0\n" \ "Default: 1024\n"); /* Function that makes sure that configuration done using * module parameters are set to valid values */ static void grcan_sanitize_module_config(struct platform_device *ofdev) { grcan_sanitize_enable0(ofdev); grcan_sanitize_enable1(ofdev); grcan_sanitize_select(ofdev); grcan_sanitize_txsize(ofdev); grcan_sanitize_rxsize(ofdev); } static const struct attribute *const sysfs_grcan_attrs[] = { /* Config attrs */ &dev_attr_enable0.attr, &dev_attr_enable1.attr, &dev_attr_select.attr, NULL, }; static const struct attribute_group sysfs_grcan_group = { .name = "grcan", .attrs = (struct attribute **)sysfs_grcan_attrs, }; /* ========== Setting up the driver ========== */ static const struct net_device_ops grcan_netdev_ops = { .ndo_open = grcan_open, .ndo_stop = grcan_close, .ndo_start_xmit = grcan_start_xmit, .ndo_change_mtu = can_change_mtu, }; static const struct ethtool_ops grcan_ethtool_ops = { .get_ts_info = ethtool_op_get_ts_info, }; static int grcan_setup_netdev(struct platform_device *ofdev, void __iomem *base, int irq, u32 ambafreq, bool txbug) { struct net_device *dev; struct grcan_priv *priv; struct grcan_registers __iomem *regs; int err; dev = alloc_candev(sizeof(struct grcan_priv), 0); if (!dev) return -ENOMEM; dev->irq = irq; dev->flags |= IFF_ECHO; dev->netdev_ops = &grcan_netdev_ops; dev->ethtool_ops = &grcan_ethtool_ops; dev->sysfs_groups[0] = &sysfs_grcan_group; priv = netdev_priv(dev); memcpy(&priv->config, &grcan_module_config, sizeof(struct grcan_device_config)); priv->dev = dev; priv->ofdev_dev = &ofdev->dev; priv->regs = base; priv->can.bittiming_const = &grcan_bittiming_const; priv->can.do_set_bittiming = grcan_set_bittiming; priv->can.do_set_mode = grcan_set_mode; priv->can.do_get_berr_counter = grcan_get_berr_counter; priv->can.clock.freq = ambafreq; priv->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_ONE_SHOT; priv->need_txbug_workaround = txbug; /* Discover if triple sampling is supported by hardware */ regs = priv->regs; grcan_set_bits(&regs->ctrl, GRCAN_CTRL_RESET); grcan_set_bits(&regs->conf, GRCAN_CONF_SAM); if (grcan_read_bits(&regs->conf, GRCAN_CONF_SAM)) { priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES; dev_dbg(&ofdev->dev, "Hardware supports triple-sampling\n"); } spin_lock_init(&priv->lock); if (priv->need_txbug_workaround) { timer_setup(&priv->rr_timer, grcan_running_reset, 0); timer_setup(&priv->hang_timer, grcan_initiate_running_reset, 0); } netif_napi_add_weight(dev, &priv->napi, grcan_poll, GRCAN_NAPI_WEIGHT); SET_NETDEV_DEV(dev, &ofdev->dev); dev_info(&ofdev->dev, "regs=0x%p, irq=%d, clock=%d\n", priv->regs, dev->irq, priv->can.clock.freq); err = register_candev(dev); if (err) goto exit_free_candev; platform_set_drvdata(ofdev, dev); /* Reset device to allow bit-timing to be set. No need to call * grcan_reset at this stage. That is done in grcan_open. */ grcan_write_reg(&regs->ctrl, GRCAN_CTRL_RESET); return 0; exit_free_candev: free_candev(dev); return err; } static int grcan_probe(struct platform_device *ofdev) { struct device_node *np = ofdev->dev.of_node; struct device_node *sysid_parent; u32 sysid, ambafreq; int irq, err; void __iomem *base; bool txbug = true; /* Compare GRLIB version number with the first that does not * have the tx bug (see start_xmit) */ sysid_parent = of_find_node_by_path("/ambapp0"); if (sysid_parent) { err = of_property_read_u32(sysid_parent, "systemid", &sysid); if (!err && ((sysid & GRLIB_VERSION_MASK) >= GRCAN_TXBUG_SAFE_GRLIB_VERSION)) txbug = false; of_node_put(sysid_parent); } err = of_property_read_u32(np, "freq", &ambafreq); if (err) { dev_err(&ofdev->dev, "unable to fetch \"freq\" property\n"); goto exit_error; } base = devm_platform_ioremap_resource(ofdev, 0); if (IS_ERR(base)) { err = PTR_ERR(base); goto exit_error; } irq = irq_of_parse_and_map(np, GRCAN_IRQIX_IRQ); if (!irq) { dev_err(&ofdev->dev, "no irq found\n"); err = -ENODEV; goto exit_error; } grcan_sanitize_module_config(ofdev); err = grcan_setup_netdev(ofdev, base, irq, ambafreq, txbug); if (err) goto exit_dispose_irq; return 0; exit_dispose_irq: irq_dispose_mapping(irq); exit_error: dev_err(&ofdev->dev, "%s socket CAN driver initialization failed with error %d\n", DRV_NAME, err); return err; } static void grcan_remove(struct platform_device *ofdev) { struct net_device *dev = platform_get_drvdata(ofdev); struct grcan_priv *priv = netdev_priv(dev); unregister_candev(dev); /* Will in turn call grcan_close */ irq_dispose_mapping(dev->irq); netif_napi_del(&priv->napi); free_candev(dev); } static const struct of_device_id grcan_match[] = { {.name = "GAISLER_GRCAN"}, {.name = "01_03d"}, {.name = "GAISLER_GRHCAN"}, {.name = "01_034"}, {}, }; MODULE_DEVICE_TABLE(of, grcan_match); static struct platform_driver grcan_driver = { .driver = { .name = DRV_NAME, .of_match_table = grcan_match, }, .probe = grcan_probe, .remove_new = grcan_remove, }; module_platform_driver(grcan_driver); MODULE_AUTHOR("Aeroflex Gaisler AB."); MODULE_DESCRIPTION("Socket CAN driver for Aeroflex Gaisler GRCAN"); MODULE_LICENSE("GPL");
linux-master
drivers/net/can/grcan.c
// SPDX-License-Identifier: GPL-2.0 OR BSD-2-Clause /* Copyright (C) 2018 KVASER AB, Sweden. All rights reserved. * Parts of this driver are based on the following: * - Kvaser linux pciefd driver (version 5.42) * - PEAK linux canfd driver */ #include <linux/bitfield.h> #include <linux/can/dev.h> #include <linux/device.h> #include <linux/ethtool.h> #include <linux/iopoll.h> #include <linux/kernel.h> #include <linux/minmax.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/pci.h> #include <linux/timer.h> MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Kvaser AB <[email protected]>"); MODULE_DESCRIPTION("CAN driver for Kvaser CAN/PCIe devices"); #define KVASER_PCIEFD_DRV_NAME "kvaser_pciefd" #define KVASER_PCIEFD_WAIT_TIMEOUT msecs_to_jiffies(1000) #define KVASER_PCIEFD_BEC_POLL_FREQ (jiffies + msecs_to_jiffies(200)) #define KVASER_PCIEFD_MAX_ERR_REP 256U #define KVASER_PCIEFD_CAN_TX_MAX_COUNT 17U #define KVASER_PCIEFD_MAX_CAN_CHANNELS 4UL #define KVASER_PCIEFD_DMA_COUNT 2U #define KVASER_PCIEFD_DMA_SIZE (4U * 1024U) #define KVASER_PCIEFD_VENDOR 0x1a07 /* Altera based devices */ #define KVASER_PCIEFD_4HS_DEVICE_ID 0x000d #define KVASER_PCIEFD_2HS_V2_DEVICE_ID 0x000e #define KVASER_PCIEFD_HS_V2_DEVICE_ID 0x000f #define KVASER_PCIEFD_MINIPCIE_HS_V2_DEVICE_ID 0x0010 #define KVASER_PCIEFD_MINIPCIE_2HS_V2_DEVICE_ID 0x0011 /* SmartFusion2 based devices */ #define KVASER_PCIEFD_2CAN_V3_DEVICE_ID 0x0012 #define KVASER_PCIEFD_1CAN_V3_DEVICE_ID 0x0013 #define KVASER_PCIEFD_4CAN_V2_DEVICE_ID 0x0014 #define KVASER_PCIEFD_MINIPCIE_2CAN_V3_DEVICE_ID 0x0015 #define KVASER_PCIEFD_MINIPCIE_1CAN_V3_DEVICE_ID 0x0016 /* Altera SerDes Enable 64-bit DMA address translation */ #define KVASER_PCIEFD_ALTERA_DMA_64BIT BIT(0) /* SmartFusion2 SerDes LSB address translation mask */ #define KVASER_PCIEFD_SF2_DMA_LSB_MASK GENMASK(31, 12) /* Kvaser KCAN CAN controller registers */ #define KVASER_PCIEFD_KCAN_FIFO_REG 0x100 #define KVASER_PCIEFD_KCAN_FIFO_LAST_REG 0x180 #define KVASER_PCIEFD_KCAN_CTRL_REG 0x2c0 #define KVASER_PCIEFD_KCAN_CMD_REG 0x400 #define KVASER_PCIEFD_KCAN_IEN_REG 0x408 #define KVASER_PCIEFD_KCAN_IRQ_REG 0x410 #define KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG 0x414 #define KVASER_PCIEFD_KCAN_STAT_REG 0x418 #define KVASER_PCIEFD_KCAN_MODE_REG 0x41c #define KVASER_PCIEFD_KCAN_BTRN_REG 0x420 #define KVASER_PCIEFD_KCAN_BUS_LOAD_REG 0x424 #define KVASER_PCIEFD_KCAN_BTRD_REG 0x428 #define KVASER_PCIEFD_KCAN_PWM_REG 0x430 /* System identification and information registers */ #define KVASER_PCIEFD_SYSID_VERSION_REG 0x8 #define KVASER_PCIEFD_SYSID_CANFREQ_REG 0xc #define KVASER_PCIEFD_SYSID_BUSFREQ_REG 0x10 #define KVASER_PCIEFD_SYSID_BUILD_REG 0x14 /* Shared receive buffer FIFO registers */ #define KVASER_PCIEFD_SRB_FIFO_LAST_REG 0x1f4 /* Shared receive buffer registers */ #define KVASER_PCIEFD_SRB_CMD_REG 0x0 #define KVASER_PCIEFD_SRB_IEN_REG 0x04 #define KVASER_PCIEFD_SRB_IRQ_REG 0x0c #define KVASER_PCIEFD_SRB_STAT_REG 0x10 #define KVASER_PCIEFD_SRB_RX_NR_PACKETS_REG 0x14 #define KVASER_PCIEFD_SRB_CTRL_REG 0x18 /* System build information fields */ #define KVASER_PCIEFD_SYSID_VERSION_NR_CHAN_MASK GENMASK(31, 24) #define KVASER_PCIEFD_SYSID_VERSION_MAJOR_MASK GENMASK(23, 16) #define KVASER_PCIEFD_SYSID_VERSION_MINOR_MASK GENMASK(7, 0) #define KVASER_PCIEFD_SYSID_BUILD_SEQ_MASK GENMASK(15, 1) /* Reset DMA buffer 0, 1 and FIFO offset */ #define KVASER_PCIEFD_SRB_CMD_RDB1 BIT(5) #define KVASER_PCIEFD_SRB_CMD_RDB0 BIT(4) #define KVASER_PCIEFD_SRB_CMD_FOR BIT(0) /* DMA underflow, buffer 0 and 1 */ #define KVASER_PCIEFD_SRB_IRQ_DUF1 BIT(13) #define KVASER_PCIEFD_SRB_IRQ_DUF0 BIT(12) /* DMA overflow, buffer 0 and 1 */ #define KVASER_PCIEFD_SRB_IRQ_DOF1 BIT(11) #define KVASER_PCIEFD_SRB_IRQ_DOF0 BIT(10) /* DMA packet done, buffer 0 and 1 */ #define KVASER_PCIEFD_SRB_IRQ_DPD1 BIT(9) #define KVASER_PCIEFD_SRB_IRQ_DPD0 BIT(8) /* Got DMA support */ #define KVASER_PCIEFD_SRB_STAT_DMA BIT(24) /* DMA idle */ #define KVASER_PCIEFD_SRB_STAT_DI BIT(15) /* SRB current packet level */ #define KVASER_PCIEFD_SRB_RX_NR_PACKETS_MASK GENMASK(7, 0) /* DMA Enable */ #define KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE BIT(0) /* KCAN CTRL packet types */ #define KVASER_PCIEFD_KCAN_CTRL_TYPE_MASK GENMASK(31, 29) #define KVASER_PCIEFD_KCAN_CTRL_TYPE_EFLUSH 0x4 #define KVASER_PCIEFD_KCAN_CTRL_TYPE_EFRAME 0x5 /* Command sequence number */ #define KVASER_PCIEFD_KCAN_CMD_SEQ_MASK GENMASK(23, 16) /* Command bits */ #define KVASER_PCIEFD_KCAN_CMD_MASK GENMASK(5, 0) /* Abort, flush and reset */ #define KVASER_PCIEFD_KCAN_CMD_AT BIT(1) /* Request status packet */ #define KVASER_PCIEFD_KCAN_CMD_SRQ BIT(0) /* Transmitter unaligned */ #define KVASER_PCIEFD_KCAN_IRQ_TAL BIT(17) /* Tx FIFO empty */ #define KVASER_PCIEFD_KCAN_IRQ_TE BIT(16) /* Tx FIFO overflow */ #define KVASER_PCIEFD_KCAN_IRQ_TOF BIT(15) /* Tx buffer flush done */ #define KVASER_PCIEFD_KCAN_IRQ_TFD BIT(14) /* Abort done */ #define KVASER_PCIEFD_KCAN_IRQ_ABD BIT(13) /* Rx FIFO overflow */ #define KVASER_PCIEFD_KCAN_IRQ_ROF BIT(5) /* FDF bit when controller is in classic CAN mode */ #define KVASER_PCIEFD_KCAN_IRQ_FDIC BIT(3) /* Bus parameter protection error */ #define KVASER_PCIEFD_KCAN_IRQ_BPP BIT(2) /* Tx FIFO unaligned end */ #define KVASER_PCIEFD_KCAN_IRQ_TAE BIT(1) /* Tx FIFO unaligned read */ #define KVASER_PCIEFD_KCAN_IRQ_TAR BIT(0) /* Tx FIFO size */ #define KVASER_PCIEFD_KCAN_TX_NR_PACKETS_MAX_MASK GENMASK(23, 16) /* Tx FIFO current packet level */ #define KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK GENMASK(7, 0) /* Current status packet sequence number */ #define KVASER_PCIEFD_KCAN_STAT_SEQNO_MASK GENMASK(31, 24) /* Controller got CAN FD capability */ #define KVASER_PCIEFD_KCAN_STAT_FD BIT(19) /* Controller got one-shot capability */ #define KVASER_PCIEFD_KCAN_STAT_CAP BIT(16) /* Controller in reset mode */ #define KVASER_PCIEFD_KCAN_STAT_IRM BIT(15) /* Reset mode request */ #define KVASER_PCIEFD_KCAN_STAT_RMR BIT(14) /* Bus off */ #define KVASER_PCIEFD_KCAN_STAT_BOFF BIT(11) /* Idle state. Controller in reset mode and no abort or flush pending */ #define KVASER_PCIEFD_KCAN_STAT_IDLE BIT(10) /* Abort request */ #define KVASER_PCIEFD_KCAN_STAT_AR BIT(7) /* Controller is bus off */ #define KVASER_PCIEFD_KCAN_STAT_BUS_OFF_MASK \ (KVASER_PCIEFD_KCAN_STAT_AR | KVASER_PCIEFD_KCAN_STAT_BOFF | \ KVASER_PCIEFD_KCAN_STAT_RMR | KVASER_PCIEFD_KCAN_STAT_IRM) /* Classic CAN mode */ #define KVASER_PCIEFD_KCAN_MODE_CCM BIT(31) /* Active error flag enable. Clear to force error passive */ #define KVASER_PCIEFD_KCAN_MODE_EEN BIT(23) /* Acknowledgment packet type */ #define KVASER_PCIEFD_KCAN_MODE_APT BIT(20) /* CAN FD non-ISO */ #define KVASER_PCIEFD_KCAN_MODE_NIFDEN BIT(15) /* Error packet enable */ #define KVASER_PCIEFD_KCAN_MODE_EPEN BIT(12) /* Listen only mode */ #define KVASER_PCIEFD_KCAN_MODE_LOM BIT(9) /* Reset mode */ #define KVASER_PCIEFD_KCAN_MODE_RM BIT(8) /* BTRN and BTRD fields */ #define KVASER_PCIEFD_KCAN_BTRN_TSEG2_MASK GENMASK(30, 26) #define KVASER_PCIEFD_KCAN_BTRN_TSEG1_MASK GENMASK(25, 17) #define KVASER_PCIEFD_KCAN_BTRN_SJW_MASK GENMASK(16, 13) #define KVASER_PCIEFD_KCAN_BTRN_BRP_MASK GENMASK(12, 0) /* PWM Control fields */ #define KVASER_PCIEFD_KCAN_PWM_TOP_MASK GENMASK(23, 16) #define KVASER_PCIEFD_KCAN_PWM_TRIGGER_MASK GENMASK(7, 0) /* KCAN packet type IDs */ #define KVASER_PCIEFD_PACK_TYPE_DATA 0x0 #define KVASER_PCIEFD_PACK_TYPE_ACK 0x1 #define KVASER_PCIEFD_PACK_TYPE_TXRQ 0x2 #define KVASER_PCIEFD_PACK_TYPE_ERROR 0x3 #define KVASER_PCIEFD_PACK_TYPE_EFLUSH_ACK 0x4 #define KVASER_PCIEFD_PACK_TYPE_EFRAME_ACK 0x5 #define KVASER_PCIEFD_PACK_TYPE_ACK_DATA 0x6 #define KVASER_PCIEFD_PACK_TYPE_STATUS 0x8 #define KVASER_PCIEFD_PACK_TYPE_BUS_LOAD 0x9 /* Common KCAN packet definitions, second word */ #define KVASER_PCIEFD_PACKET_TYPE_MASK GENMASK(31, 28) #define KVASER_PCIEFD_PACKET_CHID_MASK GENMASK(27, 25) #define KVASER_PCIEFD_PACKET_SEQ_MASK GENMASK(7, 0) /* KCAN Transmit/Receive data packet, first word */ #define KVASER_PCIEFD_RPACKET_IDE BIT(30) #define KVASER_PCIEFD_RPACKET_RTR BIT(29) #define KVASER_PCIEFD_RPACKET_ID_MASK GENMASK(28, 0) /* KCAN Transmit data packet, second word */ #define KVASER_PCIEFD_TPACKET_AREQ BIT(31) #define KVASER_PCIEFD_TPACKET_SMS BIT(16) /* KCAN Transmit/Receive data packet, second word */ #define KVASER_PCIEFD_RPACKET_FDF BIT(15) #define KVASER_PCIEFD_RPACKET_BRS BIT(14) #define KVASER_PCIEFD_RPACKET_ESI BIT(13) #define KVASER_PCIEFD_RPACKET_DLC_MASK GENMASK(11, 8) /* KCAN Transmit acknowledge packet, first word */ #define KVASER_PCIEFD_APACKET_NACK BIT(11) #define KVASER_PCIEFD_APACKET_ABL BIT(10) #define KVASER_PCIEFD_APACKET_CT BIT(9) #define KVASER_PCIEFD_APACKET_FLU BIT(8) /* KCAN Status packet, first word */ #define KVASER_PCIEFD_SPACK_RMCD BIT(22) #define KVASER_PCIEFD_SPACK_IRM BIT(21) #define KVASER_PCIEFD_SPACK_IDET BIT(20) #define KVASER_PCIEFD_SPACK_BOFF BIT(16) #define KVASER_PCIEFD_SPACK_RXERR_MASK GENMASK(15, 8) #define KVASER_PCIEFD_SPACK_TXERR_MASK GENMASK(7, 0) /* KCAN Status packet, second word */ #define KVASER_PCIEFD_SPACK_EPLR BIT(24) #define KVASER_PCIEFD_SPACK_EWLR BIT(23) #define KVASER_PCIEFD_SPACK_AUTO BIT(21) /* KCAN Error detected packet, second word */ #define KVASER_PCIEFD_EPACK_DIR_TX BIT(0) /* Macros for calculating addresses of registers */ #define KVASER_PCIEFD_GET_BLOCK_ADDR(pcie, block) \ ((pcie)->reg_base + (pcie)->driver_data->address_offset->block) #define KVASER_PCIEFD_PCI_IEN_ADDR(pcie) \ (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), pci_ien)) #define KVASER_PCIEFD_PCI_IRQ_ADDR(pcie) \ (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), pci_irq)) #define KVASER_PCIEFD_SERDES_ADDR(pcie) \ (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), serdes)) #define KVASER_PCIEFD_SYSID_ADDR(pcie) \ (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), sysid)) #define KVASER_PCIEFD_LOOPBACK_ADDR(pcie) \ (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), loopback)) #define KVASER_PCIEFD_SRB_FIFO_ADDR(pcie) \ (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), kcan_srb_fifo)) #define KVASER_PCIEFD_SRB_ADDR(pcie) \ (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), kcan_srb)) #define KVASER_PCIEFD_KCAN_CH0_ADDR(pcie) \ (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), kcan_ch0)) #define KVASER_PCIEFD_KCAN_CH1_ADDR(pcie) \ (KVASER_PCIEFD_GET_BLOCK_ADDR((pcie), kcan_ch1)) #define KVASER_PCIEFD_KCAN_CHANNEL_SPAN(pcie) \ (KVASER_PCIEFD_KCAN_CH1_ADDR((pcie)) - KVASER_PCIEFD_KCAN_CH0_ADDR((pcie))) #define KVASER_PCIEFD_KCAN_CHX_ADDR(pcie, i) \ (KVASER_PCIEFD_KCAN_CH0_ADDR((pcie)) + (i) * KVASER_PCIEFD_KCAN_CHANNEL_SPAN((pcie))) struct kvaser_pciefd; static void kvaser_pciefd_write_dma_map_altera(struct kvaser_pciefd *pcie, dma_addr_t addr, int index); static void kvaser_pciefd_write_dma_map_sf2(struct kvaser_pciefd *pcie, dma_addr_t addr, int index); struct kvaser_pciefd_address_offset { u32 serdes; u32 pci_ien; u32 pci_irq; u32 sysid; u32 loopback; u32 kcan_srb_fifo; u32 kcan_srb; u32 kcan_ch0; u32 kcan_ch1; }; struct kvaser_pciefd_dev_ops { void (*kvaser_pciefd_write_dma_map)(struct kvaser_pciefd *pcie, dma_addr_t addr, int index); }; struct kvaser_pciefd_irq_mask { u32 kcan_rx0; u32 kcan_tx[KVASER_PCIEFD_MAX_CAN_CHANNELS]; u32 all; }; struct kvaser_pciefd_driver_data { const struct kvaser_pciefd_address_offset *address_offset; const struct kvaser_pciefd_irq_mask *irq_mask; const struct kvaser_pciefd_dev_ops *ops; }; static const struct kvaser_pciefd_address_offset kvaser_pciefd_altera_address_offset = { .serdes = 0x1000, .pci_ien = 0x50, .pci_irq = 0x40, .sysid = 0x1f020, .loopback = 0x1f000, .kcan_srb_fifo = 0x1f200, .kcan_srb = 0x1f400, .kcan_ch0 = 0x10000, .kcan_ch1 = 0x11000, }; static const struct kvaser_pciefd_address_offset kvaser_pciefd_sf2_address_offset = { .serdes = 0x280c8, .pci_ien = 0x102004, .pci_irq = 0x102008, .sysid = 0x100000, .loopback = 0x103000, .kcan_srb_fifo = 0x120000, .kcan_srb = 0x121000, .kcan_ch0 = 0x140000, .kcan_ch1 = 0x142000, }; static const struct kvaser_pciefd_irq_mask kvaser_pciefd_altera_irq_mask = { .kcan_rx0 = BIT(4), .kcan_tx = { BIT(0), BIT(1), BIT(2), BIT(3) }, .all = GENMASK(4, 0), }; static const struct kvaser_pciefd_irq_mask kvaser_pciefd_sf2_irq_mask = { .kcan_rx0 = BIT(4), .kcan_tx = { BIT(16), BIT(17), BIT(18), BIT(19) }, .all = GENMASK(19, 16) | BIT(4), }; static const struct kvaser_pciefd_dev_ops kvaser_pciefd_altera_dev_ops = { .kvaser_pciefd_write_dma_map = kvaser_pciefd_write_dma_map_altera, }; static const struct kvaser_pciefd_dev_ops kvaser_pciefd_sf2_dev_ops = { .kvaser_pciefd_write_dma_map = kvaser_pciefd_write_dma_map_sf2, }; static const struct kvaser_pciefd_driver_data kvaser_pciefd_altera_driver_data = { .address_offset = &kvaser_pciefd_altera_address_offset, .irq_mask = &kvaser_pciefd_altera_irq_mask, .ops = &kvaser_pciefd_altera_dev_ops, }; static const struct kvaser_pciefd_driver_data kvaser_pciefd_sf2_driver_data = { .address_offset = &kvaser_pciefd_sf2_address_offset, .irq_mask = &kvaser_pciefd_sf2_irq_mask, .ops = &kvaser_pciefd_sf2_dev_ops, }; struct kvaser_pciefd_can { struct can_priv can; struct kvaser_pciefd *kv_pcie; void __iomem *reg_base; struct can_berr_counter bec; u8 cmd_seq; int err_rep_cnt; int echo_idx; spinlock_t lock; /* Locks sensitive registers (e.g. MODE) */ spinlock_t echo_lock; /* Locks the message echo buffer */ struct timer_list bec_poll_timer; struct completion start_comp, flush_comp; }; struct kvaser_pciefd { struct pci_dev *pci; void __iomem *reg_base; struct kvaser_pciefd_can *can[KVASER_PCIEFD_MAX_CAN_CHANNELS]; const struct kvaser_pciefd_driver_data *driver_data; void *dma_data[KVASER_PCIEFD_DMA_COUNT]; u8 nr_channels; u32 bus_freq; u32 freq; u32 freq_to_ticks_div; }; struct kvaser_pciefd_rx_packet { u32 header[2]; u64 timestamp; }; struct kvaser_pciefd_tx_packet { u32 header[2]; u8 data[64]; }; static const struct can_bittiming_const kvaser_pciefd_bittiming_const = { .name = KVASER_PCIEFD_DRV_NAME, .tseg1_min = 1, .tseg1_max = 512, .tseg2_min = 1, .tseg2_max = 32, .sjw_max = 16, .brp_min = 1, .brp_max = 8192, .brp_inc = 1, }; static struct pci_device_id kvaser_pciefd_id_table[] = { { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_4HS_DEVICE_ID), .driver_data = (kernel_ulong_t)&kvaser_pciefd_altera_driver_data, }, { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_2HS_V2_DEVICE_ID), .driver_data = (kernel_ulong_t)&kvaser_pciefd_altera_driver_data, }, { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_HS_V2_DEVICE_ID), .driver_data = (kernel_ulong_t)&kvaser_pciefd_altera_driver_data, }, { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_HS_V2_DEVICE_ID), .driver_data = (kernel_ulong_t)&kvaser_pciefd_altera_driver_data, }, { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_2HS_V2_DEVICE_ID), .driver_data = (kernel_ulong_t)&kvaser_pciefd_altera_driver_data, }, { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_2CAN_V3_DEVICE_ID), .driver_data = (kernel_ulong_t)&kvaser_pciefd_sf2_driver_data, }, { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_1CAN_V3_DEVICE_ID), .driver_data = (kernel_ulong_t)&kvaser_pciefd_sf2_driver_data, }, { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_4CAN_V2_DEVICE_ID), .driver_data = (kernel_ulong_t)&kvaser_pciefd_sf2_driver_data, }, { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_2CAN_V3_DEVICE_ID), .driver_data = (kernel_ulong_t)&kvaser_pciefd_sf2_driver_data, }, { PCI_DEVICE(KVASER_PCIEFD_VENDOR, KVASER_PCIEFD_MINIPCIE_1CAN_V3_DEVICE_ID), .driver_data = (kernel_ulong_t)&kvaser_pciefd_sf2_driver_data, }, { 0, }, }; MODULE_DEVICE_TABLE(pci, kvaser_pciefd_id_table); static inline void kvaser_pciefd_send_kcan_cmd(struct kvaser_pciefd_can *can, u32 cmd) { iowrite32(FIELD_PREP(KVASER_PCIEFD_KCAN_CMD_MASK, cmd) | FIELD_PREP(KVASER_PCIEFD_KCAN_CMD_SEQ_MASK, ++can->cmd_seq), can->reg_base + KVASER_PCIEFD_KCAN_CMD_REG); } static inline void kvaser_pciefd_request_status(struct kvaser_pciefd_can *can) { kvaser_pciefd_send_kcan_cmd(can, KVASER_PCIEFD_KCAN_CMD_SRQ); } static inline void kvaser_pciefd_abort_flush_reset(struct kvaser_pciefd_can *can) { kvaser_pciefd_send_kcan_cmd(can, KVASER_PCIEFD_KCAN_CMD_AT); } static void kvaser_pciefd_enable_err_gen(struct kvaser_pciefd_can *can) { u32 mode; unsigned long irq; spin_lock_irqsave(&can->lock, irq); mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); if (!(mode & KVASER_PCIEFD_KCAN_MODE_EPEN)) { mode |= KVASER_PCIEFD_KCAN_MODE_EPEN; iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); } spin_unlock_irqrestore(&can->lock, irq); } static void kvaser_pciefd_disable_err_gen(struct kvaser_pciefd_can *can) { u32 mode; unsigned long irq; spin_lock_irqsave(&can->lock, irq); mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); mode &= ~KVASER_PCIEFD_KCAN_MODE_EPEN; iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); spin_unlock_irqrestore(&can->lock, irq); } static void kvaser_pciefd_set_tx_irq(struct kvaser_pciefd_can *can) { u32 msk; msk = KVASER_PCIEFD_KCAN_IRQ_TE | KVASER_PCIEFD_KCAN_IRQ_ROF | KVASER_PCIEFD_KCAN_IRQ_TOF | KVASER_PCIEFD_KCAN_IRQ_ABD | KVASER_PCIEFD_KCAN_IRQ_TAE | KVASER_PCIEFD_KCAN_IRQ_TAL | KVASER_PCIEFD_KCAN_IRQ_FDIC | KVASER_PCIEFD_KCAN_IRQ_BPP | KVASER_PCIEFD_KCAN_IRQ_TAR; iowrite32(msk, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); } static inline void kvaser_pciefd_set_skb_timestamp(const struct kvaser_pciefd *pcie, struct sk_buff *skb, u64 timestamp) { skb_hwtstamps(skb)->hwtstamp = ns_to_ktime(div_u64(timestamp * 1000, pcie->freq_to_ticks_div)); } static void kvaser_pciefd_setup_controller(struct kvaser_pciefd_can *can) { u32 mode; unsigned long irq; spin_lock_irqsave(&can->lock, irq); mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); if (can->can.ctrlmode & CAN_CTRLMODE_FD) { mode &= ~KVASER_PCIEFD_KCAN_MODE_CCM; if (can->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) mode |= KVASER_PCIEFD_KCAN_MODE_NIFDEN; else mode &= ~KVASER_PCIEFD_KCAN_MODE_NIFDEN; } else { mode |= KVASER_PCIEFD_KCAN_MODE_CCM; mode &= ~KVASER_PCIEFD_KCAN_MODE_NIFDEN; } if (can->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) mode |= KVASER_PCIEFD_KCAN_MODE_LOM; else mode &= ~KVASER_PCIEFD_KCAN_MODE_LOM; mode |= KVASER_PCIEFD_KCAN_MODE_EEN; mode |= KVASER_PCIEFD_KCAN_MODE_EPEN; /* Use ACK packet type */ mode &= ~KVASER_PCIEFD_KCAN_MODE_APT; mode &= ~KVASER_PCIEFD_KCAN_MODE_RM; iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); spin_unlock_irqrestore(&can->lock, irq); } static void kvaser_pciefd_start_controller_flush(struct kvaser_pciefd_can *can) { u32 status; unsigned long irq; spin_lock_irqsave(&can->lock, irq); iowrite32(GENMASK(31, 0), can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG); if (status & KVASER_PCIEFD_KCAN_STAT_IDLE) { /* If controller is already idle, run abort, flush and reset */ kvaser_pciefd_abort_flush_reset(can); } else if (!(status & KVASER_PCIEFD_KCAN_STAT_RMR)) { u32 mode; /* Put controller in reset mode */ mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); mode |= KVASER_PCIEFD_KCAN_MODE_RM; iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); } spin_unlock_irqrestore(&can->lock, irq); } static int kvaser_pciefd_bus_on(struct kvaser_pciefd_can *can) { u32 mode; unsigned long irq; del_timer(&can->bec_poll_timer); if (!completion_done(&can->flush_comp)) kvaser_pciefd_start_controller_flush(can); if (!wait_for_completion_timeout(&can->flush_comp, KVASER_PCIEFD_WAIT_TIMEOUT)) { netdev_err(can->can.dev, "Timeout during bus on flush\n"); return -ETIMEDOUT; } spin_lock_irqsave(&can->lock, irq); iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); iowrite32(GENMASK(31, 0), can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); mode &= ~KVASER_PCIEFD_KCAN_MODE_RM; iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); spin_unlock_irqrestore(&can->lock, irq); if (!wait_for_completion_timeout(&can->start_comp, KVASER_PCIEFD_WAIT_TIMEOUT)) { netdev_err(can->can.dev, "Timeout during bus on reset\n"); return -ETIMEDOUT; } /* Reset interrupt handling */ iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); iowrite32(GENMASK(31, 0), can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); kvaser_pciefd_set_tx_irq(can); kvaser_pciefd_setup_controller(can); can->can.state = CAN_STATE_ERROR_ACTIVE; netif_wake_queue(can->can.dev); can->bec.txerr = 0; can->bec.rxerr = 0; can->err_rep_cnt = 0; return 0; } static void kvaser_pciefd_pwm_stop(struct kvaser_pciefd_can *can) { u8 top; u32 pwm_ctrl; unsigned long irq; spin_lock_irqsave(&can->lock, irq); pwm_ctrl = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG); top = FIELD_GET(KVASER_PCIEFD_KCAN_PWM_TOP_MASK, pwm_ctrl); /* Set duty cycle to zero */ pwm_ctrl |= FIELD_PREP(KVASER_PCIEFD_KCAN_PWM_TRIGGER_MASK, top); iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG); spin_unlock_irqrestore(&can->lock, irq); } static void kvaser_pciefd_pwm_start(struct kvaser_pciefd_can *can) { int top, trigger; u32 pwm_ctrl; unsigned long irq; kvaser_pciefd_pwm_stop(can); spin_lock_irqsave(&can->lock, irq); /* Set frequency to 500 KHz */ top = can->kv_pcie->bus_freq / (2 * 500000) - 1; pwm_ctrl = FIELD_PREP(KVASER_PCIEFD_KCAN_PWM_TRIGGER_MASK, top); pwm_ctrl |= FIELD_PREP(KVASER_PCIEFD_KCAN_PWM_TOP_MASK, top); iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG); /* Set duty cycle to 95 */ trigger = (100 * top - 95 * (top + 1) + 50) / 100; pwm_ctrl = FIELD_PREP(KVASER_PCIEFD_KCAN_PWM_TRIGGER_MASK, trigger); pwm_ctrl |= FIELD_PREP(KVASER_PCIEFD_KCAN_PWM_TOP_MASK, top); iowrite32(pwm_ctrl, can->reg_base + KVASER_PCIEFD_KCAN_PWM_REG); spin_unlock_irqrestore(&can->lock, irq); } static int kvaser_pciefd_open(struct net_device *netdev) { int err; struct kvaser_pciefd_can *can = netdev_priv(netdev); err = open_candev(netdev); if (err) return err; err = kvaser_pciefd_bus_on(can); if (err) { close_candev(netdev); return err; } return 0; } static int kvaser_pciefd_stop(struct net_device *netdev) { struct kvaser_pciefd_can *can = netdev_priv(netdev); int ret = 0; /* Don't interrupt ongoing flush */ if (!completion_done(&can->flush_comp)) kvaser_pciefd_start_controller_flush(can); if (!wait_for_completion_timeout(&can->flush_comp, KVASER_PCIEFD_WAIT_TIMEOUT)) { netdev_err(can->can.dev, "Timeout during stop\n"); ret = -ETIMEDOUT; } else { iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); del_timer(&can->bec_poll_timer); } can->can.state = CAN_STATE_STOPPED; close_candev(netdev); return ret; } static int kvaser_pciefd_prepare_tx_packet(struct kvaser_pciefd_tx_packet *p, struct kvaser_pciefd_can *can, struct sk_buff *skb) { struct canfd_frame *cf = (struct canfd_frame *)skb->data; int packet_size; int seq = can->echo_idx; memset(p, 0, sizeof(*p)); if (can->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT) p->header[1] |= KVASER_PCIEFD_TPACKET_SMS; if (cf->can_id & CAN_RTR_FLAG) p->header[0] |= KVASER_PCIEFD_RPACKET_RTR; if (cf->can_id & CAN_EFF_FLAG) p->header[0] |= KVASER_PCIEFD_RPACKET_IDE; p->header[0] |= FIELD_PREP(KVASER_PCIEFD_RPACKET_ID_MASK, cf->can_id); p->header[1] |= KVASER_PCIEFD_TPACKET_AREQ; if (can_is_canfd_skb(skb)) { p->header[1] |= FIELD_PREP(KVASER_PCIEFD_RPACKET_DLC_MASK, can_fd_len2dlc(cf->len)); p->header[1] |= KVASER_PCIEFD_RPACKET_FDF; if (cf->flags & CANFD_BRS) p->header[1] |= KVASER_PCIEFD_RPACKET_BRS; if (cf->flags & CANFD_ESI) p->header[1] |= KVASER_PCIEFD_RPACKET_ESI; } else { p->header[1] |= FIELD_PREP(KVASER_PCIEFD_RPACKET_DLC_MASK, can_get_cc_dlc((struct can_frame *)cf, can->can.ctrlmode)); } p->header[1] |= FIELD_PREP(KVASER_PCIEFD_PACKET_SEQ_MASK, seq); packet_size = cf->len; memcpy(p->data, cf->data, packet_size); return DIV_ROUND_UP(packet_size, 4); } static netdev_tx_t kvaser_pciefd_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct kvaser_pciefd_can *can = netdev_priv(netdev); unsigned long irq_flags; struct kvaser_pciefd_tx_packet packet; int nr_words; u8 count; if (can_dev_dropped_skb(netdev, skb)) return NETDEV_TX_OK; nr_words = kvaser_pciefd_prepare_tx_packet(&packet, can, skb); spin_lock_irqsave(&can->echo_lock, irq_flags); /* Prepare and save echo skb in internal slot */ can_put_echo_skb(skb, netdev, can->echo_idx, 0); /* Move echo index to the next slot */ can->echo_idx = (can->echo_idx + 1) % can->can.echo_skb_max; /* Write header to fifo */ iowrite32(packet.header[0], can->reg_base + KVASER_PCIEFD_KCAN_FIFO_REG); iowrite32(packet.header[1], can->reg_base + KVASER_PCIEFD_KCAN_FIFO_REG); if (nr_words) { u32 data_last = ((u32 *)packet.data)[nr_words - 1]; /* Write data to fifo, except last word */ iowrite32_rep(can->reg_base + KVASER_PCIEFD_KCAN_FIFO_REG, packet.data, nr_words - 1); /* Write last word to end of fifo */ __raw_writel(data_last, can->reg_base + KVASER_PCIEFD_KCAN_FIFO_LAST_REG); } else { /* Complete write to fifo */ __raw_writel(0, can->reg_base + KVASER_PCIEFD_KCAN_FIFO_LAST_REG); } count = FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK, ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG)); /* No room for a new message, stop the queue until at least one * successful transmit */ if (count >= can->can.echo_skb_max || can->can.echo_skb[can->echo_idx]) netif_stop_queue(netdev); spin_unlock_irqrestore(&can->echo_lock, irq_flags); return NETDEV_TX_OK; } static int kvaser_pciefd_set_bittiming(struct kvaser_pciefd_can *can, bool data) { u32 mode, test, btrn; unsigned long irq_flags; int ret; struct can_bittiming *bt; if (data) bt = &can->can.data_bittiming; else bt = &can->can.bittiming; btrn = FIELD_PREP(KVASER_PCIEFD_KCAN_BTRN_TSEG2_MASK, bt->phase_seg2 - 1) | FIELD_PREP(KVASER_PCIEFD_KCAN_BTRN_TSEG1_MASK, bt->prop_seg + bt->phase_seg1 - 1) | FIELD_PREP(KVASER_PCIEFD_KCAN_BTRN_SJW_MASK, bt->sjw - 1) | FIELD_PREP(KVASER_PCIEFD_KCAN_BTRN_BRP_MASK, bt->brp - 1); spin_lock_irqsave(&can->lock, irq_flags); mode = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); /* Put the circuit in reset mode */ iowrite32(mode | KVASER_PCIEFD_KCAN_MODE_RM, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); /* Can only set bittiming if in reset mode */ ret = readl_poll_timeout(can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG, test, test & KVASER_PCIEFD_KCAN_MODE_RM, 0, 10); if (ret) { spin_unlock_irqrestore(&can->lock, irq_flags); return -EBUSY; } if (data) iowrite32(btrn, can->reg_base + KVASER_PCIEFD_KCAN_BTRD_REG); else iowrite32(btrn, can->reg_base + KVASER_PCIEFD_KCAN_BTRN_REG); /* Restore previous reset mode status */ iowrite32(mode, can->reg_base + KVASER_PCIEFD_KCAN_MODE_REG); spin_unlock_irqrestore(&can->lock, irq_flags); return 0; } static int kvaser_pciefd_set_nominal_bittiming(struct net_device *ndev) { return kvaser_pciefd_set_bittiming(netdev_priv(ndev), false); } static int kvaser_pciefd_set_data_bittiming(struct net_device *ndev) { return kvaser_pciefd_set_bittiming(netdev_priv(ndev), true); } static int kvaser_pciefd_set_mode(struct net_device *ndev, enum can_mode mode) { struct kvaser_pciefd_can *can = netdev_priv(ndev); int ret = 0; switch (mode) { case CAN_MODE_START: if (!can->can.restart_ms) ret = kvaser_pciefd_bus_on(can); break; default: return -EOPNOTSUPP; } return ret; } static int kvaser_pciefd_get_berr_counter(const struct net_device *ndev, struct can_berr_counter *bec) { struct kvaser_pciefd_can *can = netdev_priv(ndev); bec->rxerr = can->bec.rxerr; bec->txerr = can->bec.txerr; return 0; } static void kvaser_pciefd_bec_poll_timer(struct timer_list *data) { struct kvaser_pciefd_can *can = from_timer(can, data, bec_poll_timer); kvaser_pciefd_enable_err_gen(can); kvaser_pciefd_request_status(can); can->err_rep_cnt = 0; } static const struct net_device_ops kvaser_pciefd_netdev_ops = { .ndo_open = kvaser_pciefd_open, .ndo_stop = kvaser_pciefd_stop, .ndo_eth_ioctl = can_eth_ioctl_hwts, .ndo_start_xmit = kvaser_pciefd_start_xmit, .ndo_change_mtu = can_change_mtu, }; static const struct ethtool_ops kvaser_pciefd_ethtool_ops = { .get_ts_info = can_ethtool_op_get_ts_info_hwts, }; static int kvaser_pciefd_setup_can_ctrls(struct kvaser_pciefd *pcie) { int i; for (i = 0; i < pcie->nr_channels; i++) { struct net_device *netdev; struct kvaser_pciefd_can *can; u32 status, tx_nr_packets_max; netdev = alloc_candev(sizeof(struct kvaser_pciefd_can), KVASER_PCIEFD_CAN_TX_MAX_COUNT); if (!netdev) return -ENOMEM; can = netdev_priv(netdev); netdev->netdev_ops = &kvaser_pciefd_netdev_ops; netdev->ethtool_ops = &kvaser_pciefd_ethtool_ops; can->reg_base = KVASER_PCIEFD_KCAN_CHX_ADDR(pcie, i); can->kv_pcie = pcie; can->cmd_seq = 0; can->err_rep_cnt = 0; can->bec.txerr = 0; can->bec.rxerr = 0; init_completion(&can->start_comp); init_completion(&can->flush_comp); timer_setup(&can->bec_poll_timer, kvaser_pciefd_bec_poll_timer, 0); /* Disable Bus load reporting */ iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_BUS_LOAD_REG); tx_nr_packets_max = FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_MAX_MASK, ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG)); can->can.clock.freq = pcie->freq; can->can.echo_skb_max = min(KVASER_PCIEFD_CAN_TX_MAX_COUNT, tx_nr_packets_max - 1); can->echo_idx = 0; spin_lock_init(&can->echo_lock); spin_lock_init(&can->lock); can->can.bittiming_const = &kvaser_pciefd_bittiming_const; can->can.data_bittiming_const = &kvaser_pciefd_bittiming_const; can->can.do_set_bittiming = kvaser_pciefd_set_nominal_bittiming; can->can.do_set_data_bittiming = kvaser_pciefd_set_data_bittiming; can->can.do_set_mode = kvaser_pciefd_set_mode; can->can.do_get_berr_counter = kvaser_pciefd_get_berr_counter; can->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO | CAN_CTRLMODE_CC_LEN8_DLC; status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG); if (!(status & KVASER_PCIEFD_KCAN_STAT_FD)) { dev_err(&pcie->pci->dev, "CAN FD not supported as expected %d\n", i); free_candev(netdev); return -ENODEV; } if (status & KVASER_PCIEFD_KCAN_STAT_CAP) can->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT; netdev->flags |= IFF_ECHO; SET_NETDEV_DEV(netdev, &pcie->pci->dev); iowrite32(GENMASK(31, 0), can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); pcie->can[i] = can; kvaser_pciefd_pwm_start(can); } return 0; } static int kvaser_pciefd_reg_candev(struct kvaser_pciefd *pcie) { int i; for (i = 0; i < pcie->nr_channels; i++) { int err = register_candev(pcie->can[i]->can.dev); if (err) { int j; /* Unregister all successfully registered devices. */ for (j = 0; j < i; j++) unregister_candev(pcie->can[j]->can.dev); return err; } } return 0; } static void kvaser_pciefd_write_dma_map_altera(struct kvaser_pciefd *pcie, dma_addr_t addr, int index) { void __iomem *serdes_base; u32 word1, word2; #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT word1 = addr | KVASER_PCIEFD_ALTERA_DMA_64BIT; word2 = addr >> 32; #else word1 = addr; word2 = 0; #endif serdes_base = KVASER_PCIEFD_SERDES_ADDR(pcie) + 0x8 * index; iowrite32(word1, serdes_base); iowrite32(word2, serdes_base + 0x4); } static void kvaser_pciefd_write_dma_map_sf2(struct kvaser_pciefd *pcie, dma_addr_t addr, int index) { void __iomem *serdes_base; u32 lsb = addr & KVASER_PCIEFD_SF2_DMA_LSB_MASK; u32 msb = 0x0; #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT msb = addr >> 32; #endif serdes_base = KVASER_PCIEFD_SERDES_ADDR(pcie) + 0x10 * index; iowrite32(lsb, serdes_base); iowrite32(msb, serdes_base + 0x4); } static int kvaser_pciefd_setup_dma(struct kvaser_pciefd *pcie) { int i; u32 srb_status; u32 srb_packet_count; dma_addr_t dma_addr[KVASER_PCIEFD_DMA_COUNT]; /* Disable the DMA */ iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG); for (i = 0; i < KVASER_PCIEFD_DMA_COUNT; i++) { pcie->dma_data[i] = dmam_alloc_coherent(&pcie->pci->dev, KVASER_PCIEFD_DMA_SIZE, &dma_addr[i], GFP_KERNEL); if (!pcie->dma_data[i] || !dma_addr[i]) { dev_err(&pcie->pci->dev, "Rx dma_alloc(%u) failure\n", KVASER_PCIEFD_DMA_SIZE); return -ENOMEM; } pcie->driver_data->ops->kvaser_pciefd_write_dma_map(pcie, dma_addr[i], i); } /* Reset Rx FIFO, and both DMA buffers */ iowrite32(KVASER_PCIEFD_SRB_CMD_FOR | KVASER_PCIEFD_SRB_CMD_RDB0 | KVASER_PCIEFD_SRB_CMD_RDB1, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG); /* Empty Rx FIFO */ srb_packet_count = FIELD_GET(KVASER_PCIEFD_SRB_RX_NR_PACKETS_MASK, ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_RX_NR_PACKETS_REG)); while (srb_packet_count) { /* Drop current packet in FIFO */ ioread32(KVASER_PCIEFD_SRB_FIFO_ADDR(pcie) + KVASER_PCIEFD_SRB_FIFO_LAST_REG); srb_packet_count--; } srb_status = ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_STAT_REG); if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DI)) { dev_err(&pcie->pci->dev, "DMA not idle before enabling\n"); return -EIO; } /* Enable the DMA */ iowrite32(KVASER_PCIEFD_SRB_CTRL_DMA_ENABLE, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG); return 0; } static int kvaser_pciefd_setup_board(struct kvaser_pciefd *pcie) { u32 version, srb_status, build; version = ioread32(KVASER_PCIEFD_SYSID_ADDR(pcie) + KVASER_PCIEFD_SYSID_VERSION_REG); pcie->nr_channels = min(KVASER_PCIEFD_MAX_CAN_CHANNELS, FIELD_GET(KVASER_PCIEFD_SYSID_VERSION_NR_CHAN_MASK, version)); build = ioread32(KVASER_PCIEFD_SYSID_ADDR(pcie) + KVASER_PCIEFD_SYSID_BUILD_REG); dev_dbg(&pcie->pci->dev, "Version %lu.%lu.%lu\n", FIELD_GET(KVASER_PCIEFD_SYSID_VERSION_MAJOR_MASK, version), FIELD_GET(KVASER_PCIEFD_SYSID_VERSION_MINOR_MASK, version), FIELD_GET(KVASER_PCIEFD_SYSID_BUILD_SEQ_MASK, build)); srb_status = ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_STAT_REG); if (!(srb_status & KVASER_PCIEFD_SRB_STAT_DMA)) { dev_err(&pcie->pci->dev, "Hardware without DMA is not supported\n"); return -ENODEV; } pcie->bus_freq = ioread32(KVASER_PCIEFD_SYSID_ADDR(pcie) + KVASER_PCIEFD_SYSID_BUSFREQ_REG); pcie->freq = ioread32(KVASER_PCIEFD_SYSID_ADDR(pcie) + KVASER_PCIEFD_SYSID_CANFREQ_REG); pcie->freq_to_ticks_div = pcie->freq / 1000000; if (pcie->freq_to_ticks_div == 0) pcie->freq_to_ticks_div = 1; /* Turn off all loopback functionality */ iowrite32(0, KVASER_PCIEFD_LOOPBACK_ADDR(pcie)); return 0; } static int kvaser_pciefd_handle_data_packet(struct kvaser_pciefd *pcie, struct kvaser_pciefd_rx_packet *p, __le32 *data) { struct sk_buff *skb; struct canfd_frame *cf; struct can_priv *priv; u8 ch_id = FIELD_GET(KVASER_PCIEFD_PACKET_CHID_MASK, p->header[1]); u8 dlc; if (ch_id >= pcie->nr_channels) return -EIO; priv = &pcie->can[ch_id]->can; dlc = FIELD_GET(KVASER_PCIEFD_RPACKET_DLC_MASK, p->header[1]); if (p->header[1] & KVASER_PCIEFD_RPACKET_FDF) { skb = alloc_canfd_skb(priv->dev, &cf); if (!skb) { priv->dev->stats.rx_dropped++; return -ENOMEM; } cf->len = can_fd_dlc2len(dlc); if (p->header[1] & KVASER_PCIEFD_RPACKET_BRS) cf->flags |= CANFD_BRS; if (p->header[1] & KVASER_PCIEFD_RPACKET_ESI) cf->flags |= CANFD_ESI; } else { skb = alloc_can_skb(priv->dev, (struct can_frame **)&cf); if (!skb) { priv->dev->stats.rx_dropped++; return -ENOMEM; } can_frame_set_cc_len((struct can_frame *)cf, dlc, priv->ctrlmode); } cf->can_id = FIELD_GET(KVASER_PCIEFD_RPACKET_ID_MASK, p->header[0]); if (p->header[0] & KVASER_PCIEFD_RPACKET_IDE) cf->can_id |= CAN_EFF_FLAG; if (p->header[0] & KVASER_PCIEFD_RPACKET_RTR) { cf->can_id |= CAN_RTR_FLAG; } else { memcpy(cf->data, data, cf->len); priv->dev->stats.rx_bytes += cf->len; } priv->dev->stats.rx_packets++; kvaser_pciefd_set_skb_timestamp(pcie, skb, p->timestamp); return netif_rx(skb); } static void kvaser_pciefd_change_state(struct kvaser_pciefd_can *can, struct can_frame *cf, enum can_state new_state, enum can_state tx_state, enum can_state rx_state) { can_change_state(can->can.dev, cf, tx_state, rx_state); if (new_state == CAN_STATE_BUS_OFF) { struct net_device *ndev = can->can.dev; unsigned long irq_flags; spin_lock_irqsave(&can->lock, irq_flags); netif_stop_queue(can->can.dev); spin_unlock_irqrestore(&can->lock, irq_flags); /* Prevent CAN controller from auto recover from bus off */ if (!can->can.restart_ms) { kvaser_pciefd_start_controller_flush(can); can_bus_off(ndev); } } } static void kvaser_pciefd_packet_to_state(struct kvaser_pciefd_rx_packet *p, struct can_berr_counter *bec, enum can_state *new_state, enum can_state *tx_state, enum can_state *rx_state) { if (p->header[0] & KVASER_PCIEFD_SPACK_BOFF || p->header[0] & KVASER_PCIEFD_SPACK_IRM) *new_state = CAN_STATE_BUS_OFF; else if (bec->txerr >= 255 || bec->rxerr >= 255) *new_state = CAN_STATE_BUS_OFF; else if (p->header[1] & KVASER_PCIEFD_SPACK_EPLR) *new_state = CAN_STATE_ERROR_PASSIVE; else if (bec->txerr >= 128 || bec->rxerr >= 128) *new_state = CAN_STATE_ERROR_PASSIVE; else if (p->header[1] & KVASER_PCIEFD_SPACK_EWLR) *new_state = CAN_STATE_ERROR_WARNING; else if (bec->txerr >= 96 || bec->rxerr >= 96) *new_state = CAN_STATE_ERROR_WARNING; else *new_state = CAN_STATE_ERROR_ACTIVE; *tx_state = bec->txerr >= bec->rxerr ? *new_state : 0; *rx_state = bec->txerr <= bec->rxerr ? *new_state : 0; } static int kvaser_pciefd_rx_error_frame(struct kvaser_pciefd_can *can, struct kvaser_pciefd_rx_packet *p) { struct can_berr_counter bec; enum can_state old_state, new_state, tx_state, rx_state; struct net_device *ndev = can->can.dev; struct sk_buff *skb; struct can_frame *cf = NULL; old_state = can->can.state; bec.txerr = FIELD_GET(KVASER_PCIEFD_SPACK_TXERR_MASK, p->header[0]); bec.rxerr = FIELD_GET(KVASER_PCIEFD_SPACK_RXERR_MASK, p->header[0]); kvaser_pciefd_packet_to_state(p, &bec, &new_state, &tx_state, &rx_state); skb = alloc_can_err_skb(ndev, &cf); if (new_state != old_state) { kvaser_pciefd_change_state(can, cf, new_state, tx_state, rx_state); if (old_state == CAN_STATE_BUS_OFF && new_state == CAN_STATE_ERROR_ACTIVE && can->can.restart_ms) { can->can.can_stats.restarts++; if (skb) cf->can_id |= CAN_ERR_RESTARTED; } } can->err_rep_cnt++; can->can.can_stats.bus_error++; if (p->header[1] & KVASER_PCIEFD_EPACK_DIR_TX) ndev->stats.tx_errors++; else ndev->stats.rx_errors++; can->bec.txerr = bec.txerr; can->bec.rxerr = bec.rxerr; if (!skb) { ndev->stats.rx_dropped++; return -ENOMEM; } kvaser_pciefd_set_skb_timestamp(can->kv_pcie, skb, p->timestamp); cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_CNT; cf->data[6] = bec.txerr; cf->data[7] = bec.rxerr; netif_rx(skb); return 0; } static int kvaser_pciefd_handle_error_packet(struct kvaser_pciefd *pcie, struct kvaser_pciefd_rx_packet *p) { struct kvaser_pciefd_can *can; u8 ch_id = FIELD_GET(KVASER_PCIEFD_PACKET_CHID_MASK, p->header[1]); if (ch_id >= pcie->nr_channels) return -EIO; can = pcie->can[ch_id]; kvaser_pciefd_rx_error_frame(can, p); if (can->err_rep_cnt >= KVASER_PCIEFD_MAX_ERR_REP) /* Do not report more errors, until bec_poll_timer expires */ kvaser_pciefd_disable_err_gen(can); /* Start polling the error counters */ mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ); return 0; } static int kvaser_pciefd_handle_status_resp(struct kvaser_pciefd_can *can, struct kvaser_pciefd_rx_packet *p) { struct can_berr_counter bec; enum can_state old_state, new_state, tx_state, rx_state; old_state = can->can.state; bec.txerr = FIELD_GET(KVASER_PCIEFD_SPACK_TXERR_MASK, p->header[0]); bec.rxerr = FIELD_GET(KVASER_PCIEFD_SPACK_RXERR_MASK, p->header[0]); kvaser_pciefd_packet_to_state(p, &bec, &new_state, &tx_state, &rx_state); if (new_state != old_state) { struct net_device *ndev = can->can.dev; struct sk_buff *skb; struct can_frame *cf; skb = alloc_can_err_skb(ndev, &cf); if (!skb) { ndev->stats.rx_dropped++; return -ENOMEM; } kvaser_pciefd_change_state(can, cf, new_state, tx_state, rx_state); if (old_state == CAN_STATE_BUS_OFF && new_state == CAN_STATE_ERROR_ACTIVE && can->can.restart_ms) { can->can.can_stats.restarts++; cf->can_id |= CAN_ERR_RESTARTED; } kvaser_pciefd_set_skb_timestamp(can->kv_pcie, skb, p->timestamp); cf->data[6] = bec.txerr; cf->data[7] = bec.rxerr; netif_rx(skb); } can->bec.txerr = bec.txerr; can->bec.rxerr = bec.rxerr; /* Check if we need to poll the error counters */ if (bec.txerr || bec.rxerr) mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ); return 0; } static int kvaser_pciefd_handle_status_packet(struct kvaser_pciefd *pcie, struct kvaser_pciefd_rx_packet *p) { struct kvaser_pciefd_can *can; u8 cmdseq; u32 status; u8 ch_id = FIELD_GET(KVASER_PCIEFD_PACKET_CHID_MASK, p->header[1]); if (ch_id >= pcie->nr_channels) return -EIO; can = pcie->can[ch_id]; status = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_STAT_REG); cmdseq = FIELD_GET(KVASER_PCIEFD_KCAN_STAT_SEQNO_MASK, status); /* Reset done, start abort and flush */ if (p->header[0] & KVASER_PCIEFD_SPACK_IRM && p->header[0] & KVASER_PCIEFD_SPACK_RMCD && p->header[1] & KVASER_PCIEFD_SPACK_AUTO && cmdseq == FIELD_GET(KVASER_PCIEFD_PACKET_SEQ_MASK, p->header[1]) && status & KVASER_PCIEFD_KCAN_STAT_IDLE) { iowrite32(KVASER_PCIEFD_KCAN_IRQ_ABD, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); kvaser_pciefd_abort_flush_reset(can); } else if (p->header[0] & KVASER_PCIEFD_SPACK_IDET && p->header[0] & KVASER_PCIEFD_SPACK_IRM && cmdseq == FIELD_GET(KVASER_PCIEFD_PACKET_SEQ_MASK, p->header[1]) && status & KVASER_PCIEFD_KCAN_STAT_IDLE) { /* Reset detected, send end of flush if no packet are in FIFO */ u8 count; count = FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK, ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG)); if (!count) iowrite32(FIELD_PREP(KVASER_PCIEFD_KCAN_CTRL_TYPE_MASK, KVASER_PCIEFD_KCAN_CTRL_TYPE_EFLUSH), can->reg_base + KVASER_PCIEFD_KCAN_CTRL_REG); } else if (!(p->header[1] & KVASER_PCIEFD_SPACK_AUTO) && cmdseq == FIELD_GET(KVASER_PCIEFD_PACKET_SEQ_MASK, p->header[1])) { /* Response to status request received */ kvaser_pciefd_handle_status_resp(can, p); if (can->can.state != CAN_STATE_BUS_OFF && can->can.state != CAN_STATE_ERROR_ACTIVE) { mod_timer(&can->bec_poll_timer, KVASER_PCIEFD_BEC_POLL_FREQ); } } else if (p->header[0] & KVASER_PCIEFD_SPACK_RMCD && !(status & KVASER_PCIEFD_KCAN_STAT_BUS_OFF_MASK)) { /* Reset to bus on detected */ if (!completion_done(&can->start_comp)) complete(&can->start_comp); } return 0; } static void kvaser_pciefd_handle_nack_packet(struct kvaser_pciefd_can *can, struct kvaser_pciefd_rx_packet *p) { struct sk_buff *skb; struct can_frame *cf; skb = alloc_can_err_skb(can->can.dev, &cf); can->can.dev->stats.tx_errors++; if (p->header[0] & KVASER_PCIEFD_APACKET_ABL) { if (skb) cf->can_id |= CAN_ERR_LOSTARB; can->can.can_stats.arbitration_lost++; } else if (skb) { cf->can_id |= CAN_ERR_ACK; } if (skb) { cf->can_id |= CAN_ERR_BUSERROR; kvaser_pciefd_set_skb_timestamp(can->kv_pcie, skb, p->timestamp); netif_rx(skb); } else { can->can.dev->stats.rx_dropped++; netdev_warn(can->can.dev, "No memory left for err_skb\n"); } } static int kvaser_pciefd_handle_ack_packet(struct kvaser_pciefd *pcie, struct kvaser_pciefd_rx_packet *p) { struct kvaser_pciefd_can *can; bool one_shot_fail = false; u8 ch_id = FIELD_GET(KVASER_PCIEFD_PACKET_CHID_MASK, p->header[1]); if (ch_id >= pcie->nr_channels) return -EIO; can = pcie->can[ch_id]; /* Ignore control packet ACK */ if (p->header[0] & KVASER_PCIEFD_APACKET_CT) return 0; if (p->header[0] & KVASER_PCIEFD_APACKET_NACK) { kvaser_pciefd_handle_nack_packet(can, p); one_shot_fail = true; } if (p->header[0] & KVASER_PCIEFD_APACKET_FLU) { netdev_dbg(can->can.dev, "Packet was flushed\n"); } else { int echo_idx = FIELD_GET(KVASER_PCIEFD_PACKET_SEQ_MASK, p->header[0]); int len; u8 count; struct sk_buff *skb; skb = can->can.echo_skb[echo_idx]; if (skb) kvaser_pciefd_set_skb_timestamp(pcie, skb, p->timestamp); len = can_get_echo_skb(can->can.dev, echo_idx, NULL); count = FIELD_GET(KVASER_PCIEFD_KCAN_TX_NR_PACKETS_CURRENT_MASK, ioread32(can->reg_base + KVASER_PCIEFD_KCAN_TX_NR_PACKETS_REG)); if (count < can->can.echo_skb_max && netif_queue_stopped(can->can.dev)) netif_wake_queue(can->can.dev); if (!one_shot_fail) { can->can.dev->stats.tx_bytes += len; can->can.dev->stats.tx_packets++; } } return 0; } static int kvaser_pciefd_handle_eflush_packet(struct kvaser_pciefd *pcie, struct kvaser_pciefd_rx_packet *p) { struct kvaser_pciefd_can *can; u8 ch_id = FIELD_GET(KVASER_PCIEFD_PACKET_CHID_MASK, p->header[1]); if (ch_id >= pcie->nr_channels) return -EIO; can = pcie->can[ch_id]; if (!completion_done(&can->flush_comp)) complete(&can->flush_comp); return 0; } static int kvaser_pciefd_read_packet(struct kvaser_pciefd *pcie, int *start_pos, int dma_buf) { __le32 *buffer = pcie->dma_data[dma_buf]; __le64 timestamp; struct kvaser_pciefd_rx_packet packet; struct kvaser_pciefd_rx_packet *p = &packet; u8 type; int pos = *start_pos; int size; int ret = 0; size = le32_to_cpu(buffer[pos++]); if (!size) { *start_pos = 0; return 0; } p->header[0] = le32_to_cpu(buffer[pos++]); p->header[1] = le32_to_cpu(buffer[pos++]); /* Read 64-bit timestamp */ memcpy(&timestamp, &buffer[pos], sizeof(__le64)); pos += 2; p->timestamp = le64_to_cpu(timestamp); type = FIELD_GET(KVASER_PCIEFD_PACKET_TYPE_MASK, p->header[1]); switch (type) { case KVASER_PCIEFD_PACK_TYPE_DATA: ret = kvaser_pciefd_handle_data_packet(pcie, p, &buffer[pos]); if (!(p->header[0] & KVASER_PCIEFD_RPACKET_RTR)) { u8 data_len; data_len = can_fd_dlc2len(FIELD_GET(KVASER_PCIEFD_RPACKET_DLC_MASK, p->header[1])); pos += DIV_ROUND_UP(data_len, 4); } break; case KVASER_PCIEFD_PACK_TYPE_ACK: ret = kvaser_pciefd_handle_ack_packet(pcie, p); break; case KVASER_PCIEFD_PACK_TYPE_STATUS: ret = kvaser_pciefd_handle_status_packet(pcie, p); break; case KVASER_PCIEFD_PACK_TYPE_ERROR: ret = kvaser_pciefd_handle_error_packet(pcie, p); break; case KVASER_PCIEFD_PACK_TYPE_EFLUSH_ACK: ret = kvaser_pciefd_handle_eflush_packet(pcie, p); break; case KVASER_PCIEFD_PACK_TYPE_ACK_DATA: case KVASER_PCIEFD_PACK_TYPE_BUS_LOAD: case KVASER_PCIEFD_PACK_TYPE_EFRAME_ACK: case KVASER_PCIEFD_PACK_TYPE_TXRQ: dev_info(&pcie->pci->dev, "Received unexpected packet type 0x%08X\n", type); break; default: dev_err(&pcie->pci->dev, "Unknown packet type 0x%08X\n", type); ret = -EIO; break; } if (ret) return ret; /* Position does not point to the end of the package, * corrupted packet size? */ if ((*start_pos + size) != pos) return -EIO; /* Point to the next packet header, if any */ *start_pos = pos; return ret; } static int kvaser_pciefd_read_buffer(struct kvaser_pciefd *pcie, int dma_buf) { int pos = 0; int res = 0; do { res = kvaser_pciefd_read_packet(pcie, &pos, dma_buf); } while (!res && pos > 0 && pos < KVASER_PCIEFD_DMA_SIZE); return res; } static void kvaser_pciefd_receive_irq(struct kvaser_pciefd *pcie) { u32 irq = ioread32(KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG); if (irq & KVASER_PCIEFD_SRB_IRQ_DPD0) { kvaser_pciefd_read_buffer(pcie, 0); /* Reset DMA buffer 0 */ iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG); } if (irq & KVASER_PCIEFD_SRB_IRQ_DPD1) { kvaser_pciefd_read_buffer(pcie, 1); /* Reset DMA buffer 1 */ iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG); } if (irq & KVASER_PCIEFD_SRB_IRQ_DOF0 || irq & KVASER_PCIEFD_SRB_IRQ_DOF1 || irq & KVASER_PCIEFD_SRB_IRQ_DUF0 || irq & KVASER_PCIEFD_SRB_IRQ_DUF1) dev_err(&pcie->pci->dev, "DMA IRQ error 0x%08X\n", irq); iowrite32(irq, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG); } static void kvaser_pciefd_transmit_irq(struct kvaser_pciefd_can *can) { u32 irq = ioread32(can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); if (irq & KVASER_PCIEFD_KCAN_IRQ_TOF) netdev_err(can->can.dev, "Tx FIFO overflow\n"); if (irq & KVASER_PCIEFD_KCAN_IRQ_BPP) netdev_err(can->can.dev, "Fail to change bittiming, when not in reset mode\n"); if (irq & KVASER_PCIEFD_KCAN_IRQ_FDIC) netdev_err(can->can.dev, "CAN FD frame in CAN mode\n"); if (irq & KVASER_PCIEFD_KCAN_IRQ_ROF) netdev_err(can->can.dev, "Rx FIFO overflow\n"); iowrite32(irq, can->reg_base + KVASER_PCIEFD_KCAN_IRQ_REG); } static irqreturn_t kvaser_pciefd_irq_handler(int irq, void *dev) { struct kvaser_pciefd *pcie = (struct kvaser_pciefd *)dev; const struct kvaser_pciefd_irq_mask *irq_mask = pcie->driver_data->irq_mask; u32 board_irq = ioread32(KVASER_PCIEFD_PCI_IRQ_ADDR(pcie)); int i; if (!(board_irq & irq_mask->all)) return IRQ_NONE; if (board_irq & irq_mask->kcan_rx0) kvaser_pciefd_receive_irq(pcie); for (i = 0; i < pcie->nr_channels; i++) { if (!pcie->can[i]) { dev_err(&pcie->pci->dev, "IRQ mask points to unallocated controller\n"); break; } /* Check that mask matches channel (i) IRQ mask */ if (board_irq & irq_mask->kcan_tx[i]) kvaser_pciefd_transmit_irq(pcie->can[i]); } return IRQ_HANDLED; } static void kvaser_pciefd_teardown_can_ctrls(struct kvaser_pciefd *pcie) { int i; for (i = 0; i < pcie->nr_channels; i++) { struct kvaser_pciefd_can *can = pcie->can[i]; if (can) { iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); kvaser_pciefd_pwm_stop(can); free_candev(can->can.dev); } } } static int kvaser_pciefd_probe(struct pci_dev *pdev, const struct pci_device_id *id) { int err; struct kvaser_pciefd *pcie; const struct kvaser_pciefd_irq_mask *irq_mask; void __iomem *irq_en_base; pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL); if (!pcie) return -ENOMEM; pci_set_drvdata(pdev, pcie); pcie->pci = pdev; pcie->driver_data = (const struct kvaser_pciefd_driver_data *)id->driver_data; irq_mask = pcie->driver_data->irq_mask; err = pci_enable_device(pdev); if (err) return err; err = pci_request_regions(pdev, KVASER_PCIEFD_DRV_NAME); if (err) goto err_disable_pci; pcie->reg_base = pci_iomap(pdev, 0, 0); if (!pcie->reg_base) { err = -ENOMEM; goto err_release_regions; } err = kvaser_pciefd_setup_board(pcie); if (err) goto err_pci_iounmap; err = kvaser_pciefd_setup_dma(pcie); if (err) goto err_pci_iounmap; pci_set_master(pdev); err = kvaser_pciefd_setup_can_ctrls(pcie); if (err) goto err_teardown_can_ctrls; err = request_irq(pcie->pci->irq, kvaser_pciefd_irq_handler, IRQF_SHARED, KVASER_PCIEFD_DRV_NAME, pcie); if (err) goto err_teardown_can_ctrls; iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IRQ_REG); iowrite32(KVASER_PCIEFD_SRB_IRQ_DPD0 | KVASER_PCIEFD_SRB_IRQ_DPD1 | KVASER_PCIEFD_SRB_IRQ_DOF0 | KVASER_PCIEFD_SRB_IRQ_DOF1 | KVASER_PCIEFD_SRB_IRQ_DUF0 | KVASER_PCIEFD_SRB_IRQ_DUF1, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_IEN_REG); /* Enable PCI interrupts */ irq_en_base = KVASER_PCIEFD_PCI_IEN_ADDR(pcie); iowrite32(irq_mask->all, irq_en_base); /* Ready the DMA buffers */ iowrite32(KVASER_PCIEFD_SRB_CMD_RDB0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG); iowrite32(KVASER_PCIEFD_SRB_CMD_RDB1, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CMD_REG); err = kvaser_pciefd_reg_candev(pcie); if (err) goto err_free_irq; return 0; err_free_irq: /* Disable PCI interrupts */ iowrite32(0, irq_en_base); free_irq(pcie->pci->irq, pcie); err_teardown_can_ctrls: kvaser_pciefd_teardown_can_ctrls(pcie); iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG); pci_clear_master(pdev); err_pci_iounmap: pci_iounmap(pdev, pcie->reg_base); err_release_regions: pci_release_regions(pdev); err_disable_pci: pci_disable_device(pdev); return err; } static void kvaser_pciefd_remove_all_ctrls(struct kvaser_pciefd *pcie) { int i; for (i = 0; i < pcie->nr_channels; i++) { struct kvaser_pciefd_can *can = pcie->can[i]; if (can) { iowrite32(0, can->reg_base + KVASER_PCIEFD_KCAN_IEN_REG); unregister_candev(can->can.dev); del_timer(&can->bec_poll_timer); kvaser_pciefd_pwm_stop(can); free_candev(can->can.dev); } } } static void kvaser_pciefd_remove(struct pci_dev *pdev) { struct kvaser_pciefd *pcie = pci_get_drvdata(pdev); kvaser_pciefd_remove_all_ctrls(pcie); /* Disable interrupts */ iowrite32(0, KVASER_PCIEFD_SRB_ADDR(pcie) + KVASER_PCIEFD_SRB_CTRL_REG); iowrite32(0, KVASER_PCIEFD_PCI_IEN_ADDR(pcie)); free_irq(pcie->pci->irq, pcie); pci_iounmap(pdev, pcie->reg_base); pci_release_regions(pdev); pci_disable_device(pdev); } static struct pci_driver kvaser_pciefd = { .name = KVASER_PCIEFD_DRV_NAME, .id_table = kvaser_pciefd_id_table, .probe = kvaser_pciefd_probe, .remove = kvaser_pciefd_remove, }; module_pci_driver(kvaser_pciefd)
linux-master
drivers/net/can/kvaser_pciefd.c
// SPDX-License-Identifier: GPL-2.0 /* ELM327 based CAN interface driver (tty line discipline) * * This driver started as a derivative of linux/drivers/net/can/slcan.c * and my thanks go to the original authors for their inspiration. * * can327.c Author : Max Staudt <[email protected]> * slcan.c Author : Oliver Hartkopp <[email protected]> * slip.c Authors : Laurence Culhane <[email protected]> * Fred N. van Kempen <[email protected]> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/init.h> #include <linux/module.h> #include <linux/bitops.h> #include <linux/ctype.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/lockdep.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/tty.h> #include <linux/tty_ldisc.h> #include <linux/workqueue.h> #include <uapi/linux/tty.h> #include <linux/can.h> #include <linux/can/dev.h> #include <linux/can/error.h> #include <linux/can/rx-offload.h> #define CAN327_NAPI_WEIGHT 4 #define CAN327_SIZE_TXBUF 32 #define CAN327_SIZE_RXBUF 1024 #define CAN327_CAN_CONFIG_SEND_SFF 0x8000 #define CAN327_CAN_CONFIG_VARIABLE_DLC 0x4000 #define CAN327_CAN_CONFIG_RECV_BOTH_SFF_EFF 0x2000 #define CAN327_CAN_CONFIG_BAUDRATE_MULT_8_7 0x1000 #define CAN327_DUMMY_CHAR 'y' #define CAN327_DUMMY_STRING "y" #define CAN327_READY_CHAR '>' /* Bits in elm->cmds_todo */ enum can327_tx_do { CAN327_TX_DO_CAN_DATA = 0, CAN327_TX_DO_CANID_11BIT, CAN327_TX_DO_CANID_29BIT_LOW, CAN327_TX_DO_CANID_29BIT_HIGH, CAN327_TX_DO_CAN_CONFIG_PART2, CAN327_TX_DO_CAN_CONFIG, CAN327_TX_DO_RESPONSES, CAN327_TX_DO_SILENT_MONITOR, CAN327_TX_DO_INIT, }; struct can327 { /* This must be the first member when using alloc_candev() */ struct can_priv can; struct can_rx_offload offload; /* TTY buffers */ u8 txbuf[CAN327_SIZE_TXBUF]; u8 rxbuf[CAN327_SIZE_RXBUF]; /* Per-channel lock */ spinlock_t lock; /* TTY and netdev devices that we're bridging */ struct tty_struct *tty; struct net_device *dev; /* TTY buffer accounting */ struct work_struct tx_work; /* Flushes TTY TX buffer */ u8 *txhead; /* Next TX byte */ size_t txleft; /* Bytes left to TX */ int rxfill; /* Bytes already RX'd in buffer */ /* State machine */ enum { CAN327_STATE_NOTINIT = 0, CAN327_STATE_GETDUMMYCHAR, CAN327_STATE_GETPROMPT, CAN327_STATE_RECEIVING, } state; /* Things we have yet to send */ char **next_init_cmd; unsigned long cmds_todo; /* The CAN frame and config the ELM327 is sending/using, * or will send/use after finishing all cmds_todo */ struct can_frame can_frame_to_send; u16 can_config; u8 can_bitrate_divisor; /* Parser state */ bool drop_next_line; /* Stop the channel on UART side hardware failure, e.g. stray * characters or neverending lines. This may be caused by bad * UART wiring, a bad ELM327, a bad UART bridge... * Once this is true, nothing will be sent to the TTY. */ bool uart_side_failure; }; static inline void can327_uart_side_failure(struct can327 *elm); static void can327_send(struct can327 *elm, const void *buf, size_t len) { int written; lockdep_assert_held(&elm->lock); if (elm->uart_side_failure) return; memcpy(elm->txbuf, buf, len); /* Order of next two lines is *very* important. * When we are sending a little amount of data, * the transfer may be completed inside the ops->write() * routine, because it's running with interrupts enabled. * In this case we *never* got WRITE_WAKEUP event, * if we did not request it before write operation. * 14 Oct 1994 Dmitry Gorodchanin. */ set_bit(TTY_DO_WRITE_WAKEUP, &elm->tty->flags); written = elm->tty->ops->write(elm->tty, elm->txbuf, len); if (written < 0) { netdev_err(elm->dev, "Failed to write to tty %s.\n", elm->tty->name); can327_uart_side_failure(elm); return; } elm->txleft = len - written; elm->txhead = elm->txbuf + written; } /* Take the ELM327 out of almost any state and back into command mode. * We send CAN327_DUMMY_CHAR which will either abort any running * operation, or be echoed back to us in case we're already in command * mode. */ static void can327_kick_into_cmd_mode(struct can327 *elm) { lockdep_assert_held(&elm->lock); if (elm->state != CAN327_STATE_GETDUMMYCHAR && elm->state != CAN327_STATE_GETPROMPT) { can327_send(elm, CAN327_DUMMY_STRING, 1); elm->state = CAN327_STATE_GETDUMMYCHAR; } } /* Schedule a CAN frame and necessary config changes to be sent to the TTY. */ static void can327_send_frame(struct can327 *elm, struct can_frame *frame) { lockdep_assert_held(&elm->lock); /* Schedule any necessary changes in ELM327's CAN configuration */ if (elm->can_frame_to_send.can_id != frame->can_id) { /* Set the new CAN ID for transmission. */ if ((frame->can_id ^ elm->can_frame_to_send.can_id) & CAN_EFF_FLAG) { elm->can_config = (frame->can_id & CAN_EFF_FLAG ? 0 : CAN327_CAN_CONFIG_SEND_SFF) | CAN327_CAN_CONFIG_VARIABLE_DLC | CAN327_CAN_CONFIG_RECV_BOTH_SFF_EFF | elm->can_bitrate_divisor; set_bit(CAN327_TX_DO_CAN_CONFIG, &elm->cmds_todo); } if (frame->can_id & CAN_EFF_FLAG) { clear_bit(CAN327_TX_DO_CANID_11BIT, &elm->cmds_todo); set_bit(CAN327_TX_DO_CANID_29BIT_LOW, &elm->cmds_todo); set_bit(CAN327_TX_DO_CANID_29BIT_HIGH, &elm->cmds_todo); } else { set_bit(CAN327_TX_DO_CANID_11BIT, &elm->cmds_todo); clear_bit(CAN327_TX_DO_CANID_29BIT_LOW, &elm->cmds_todo); clear_bit(CAN327_TX_DO_CANID_29BIT_HIGH, &elm->cmds_todo); } } /* Schedule the CAN frame itself. */ elm->can_frame_to_send = *frame; set_bit(CAN327_TX_DO_CAN_DATA, &elm->cmds_todo); can327_kick_into_cmd_mode(elm); } /* ELM327 initialisation sequence. * The line length is limited by the buffer in can327_handle_prompt(). */ static char *can327_init_script[] = { "AT WS\r", /* v1.0: Warm Start */ "AT PP FF OFF\r", /* v1.0: All Programmable Parameters Off */ "AT M0\r", /* v1.0: Memory Off */ "AT AL\r", /* v1.0: Allow Long messages */ "AT BI\r", /* v1.0: Bypass Initialisation */ "AT CAF0\r", /* v1.0: CAN Auto Formatting Off */ "AT CFC0\r", /* v1.0: CAN Flow Control Off */ "AT CF 000\r", /* v1.0: Reset CAN ID Filter */ "AT CM 000\r", /* v1.0: Reset CAN ID Mask */ "AT E1\r", /* v1.0: Echo On */ "AT H1\r", /* v1.0: Headers On */ "AT L0\r", /* v1.0: Linefeeds Off */ "AT SH 7DF\r", /* v1.0: Set CAN sending ID to 0x7df */ "AT ST FF\r", /* v1.0: Set maximum Timeout for response after TX */ "AT AT0\r", /* v1.2: Adaptive Timing Off */ "AT D1\r", /* v1.3: Print DLC On */ "AT S1\r", /* v1.3: Spaces On */ "AT TP B\r", /* v1.0: Try Protocol B */ NULL }; static void can327_init_device(struct can327 *elm) { lockdep_assert_held(&elm->lock); elm->state = CAN327_STATE_NOTINIT; elm->can_frame_to_send.can_id = 0x7df; /* ELM327 HW default */ elm->rxfill = 0; elm->drop_next_line = 0; /* We can only set the bitrate as a fraction of 500000. * The bitrates listed in can327_bitrate_const will * limit the user to the right values. */ elm->can_bitrate_divisor = 500000 / elm->can.bittiming.bitrate; elm->can_config = CAN327_CAN_CONFIG_SEND_SFF | CAN327_CAN_CONFIG_VARIABLE_DLC | CAN327_CAN_CONFIG_RECV_BOTH_SFF_EFF | elm->can_bitrate_divisor; /* Configure ELM327 and then start monitoring */ elm->next_init_cmd = &can327_init_script[0]; set_bit(CAN327_TX_DO_INIT, &elm->cmds_todo); set_bit(CAN327_TX_DO_SILENT_MONITOR, &elm->cmds_todo); set_bit(CAN327_TX_DO_RESPONSES, &elm->cmds_todo); set_bit(CAN327_TX_DO_CAN_CONFIG, &elm->cmds_todo); can327_kick_into_cmd_mode(elm); } static void can327_feed_frame_to_netdev(struct can327 *elm, struct sk_buff *skb) { lockdep_assert_held(&elm->lock); if (!netif_running(elm->dev)) { kfree_skb(skb); return; } /* Queue for NAPI pickup. * rx-offload will update stats and LEDs for us. */ if (can_rx_offload_queue_tail(&elm->offload, skb)) elm->dev->stats.rx_fifo_errors++; /* Wake NAPI */ can_rx_offload_irq_finish(&elm->offload); } /* Called when we're out of ideas and just want it all to end. */ static inline void can327_uart_side_failure(struct can327 *elm) { struct can_frame *frame; struct sk_buff *skb; lockdep_assert_held(&elm->lock); elm->uart_side_failure = true; clear_bit(TTY_DO_WRITE_WAKEUP, &elm->tty->flags); elm->can.can_stats.bus_off++; netif_stop_queue(elm->dev); elm->can.state = CAN_STATE_BUS_OFF; can_bus_off(elm->dev); netdev_err(elm->dev, "ELM327 misbehaved. Blocking further communication.\n"); skb = alloc_can_err_skb(elm->dev, &frame); if (!skb) return; frame->can_id |= CAN_ERR_BUSOFF; can327_feed_frame_to_netdev(elm, skb); } /* Compares a byte buffer (non-NUL terminated) to the payload part of * a string, and returns true iff the buffer (content *and* length) is * exactly that string, without the terminating NUL byte. * * Example: If reference is "BUS ERROR", then this returns true iff nbytes == 9 * and !memcmp(buf, "BUS ERROR", 9). * * The reason to use strings is so we can easily include them in the C * code, and to avoid hardcoding lengths. */ static inline bool can327_rxbuf_cmp(const u8 *buf, size_t nbytes, const char *reference) { size_t ref_len = strlen(reference); return (nbytes == ref_len) && !memcmp(buf, reference, ref_len); } static void can327_parse_error(struct can327 *elm, size_t len) { struct can_frame *frame; struct sk_buff *skb; lockdep_assert_held(&elm->lock); skb = alloc_can_err_skb(elm->dev, &frame); if (!skb) /* It's okay to return here: * The outer parsing loop will drop this UART buffer. */ return; /* Filter possible error messages based on length of RX'd line */ if (can327_rxbuf_cmp(elm->rxbuf, len, "UNABLE TO CONNECT")) { netdev_err(elm->dev, "ELM327 reported UNABLE TO CONNECT. Please check your setup.\n"); } else if (can327_rxbuf_cmp(elm->rxbuf, len, "BUFFER FULL")) { /* This will only happen if the last data line was complete. * Otherwise, can327_parse_frame() will heuristically * emit this kind of error frame instead. */ frame->can_id |= CAN_ERR_CRTL; frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; } else if (can327_rxbuf_cmp(elm->rxbuf, len, "BUS ERROR")) { frame->can_id |= CAN_ERR_BUSERROR; } else if (can327_rxbuf_cmp(elm->rxbuf, len, "CAN ERROR")) { frame->can_id |= CAN_ERR_PROT; } else if (can327_rxbuf_cmp(elm->rxbuf, len, "<RX ERROR")) { frame->can_id |= CAN_ERR_PROT; } else if (can327_rxbuf_cmp(elm->rxbuf, len, "BUS BUSY")) { frame->can_id |= CAN_ERR_PROT; frame->data[2] = CAN_ERR_PROT_OVERLOAD; } else if (can327_rxbuf_cmp(elm->rxbuf, len, "FB ERROR")) { frame->can_id |= CAN_ERR_PROT; frame->data[2] = CAN_ERR_PROT_TX; } else if (len == 5 && !memcmp(elm->rxbuf, "ERR", 3)) { /* ERR is followed by two digits, hence line length 5 */ netdev_err(elm->dev, "ELM327 reported an ERR%c%c. Please power it off and on again.\n", elm->rxbuf[3], elm->rxbuf[4]); frame->can_id |= CAN_ERR_CRTL; } else { /* Something else has happened. * Maybe garbage on the UART line. * Emit a generic error frame. */ } can327_feed_frame_to_netdev(elm, skb); } /* Parse CAN frames coming as ASCII from ELM327. * They can be of various formats: * * 29-bit ID (EFF): 12 34 56 78 D PL PL PL PL PL PL PL PL * 11-bit ID (!EFF): 123 D PL PL PL PL PL PL PL PL * * where D = DLC, PL = payload byte * * Instead of a payload, RTR indicates a remote request. * * We will use the spaces and line length to guess the format. */ static int can327_parse_frame(struct can327 *elm, size_t len) { struct can_frame *frame; struct sk_buff *skb; int hexlen; int datastart; int i; lockdep_assert_held(&elm->lock); skb = alloc_can_skb(elm->dev, &frame); if (!skb) return -ENOMEM; /* Find first non-hex and non-space character: * - In the simplest case, there is none. * - For RTR frames, 'R' is the first non-hex character. * - An error message may replace the end of the data line. */ for (hexlen = 0; hexlen <= len; hexlen++) { if (hex_to_bin(elm->rxbuf[hexlen]) < 0 && elm->rxbuf[hexlen] != ' ') { break; } } /* Sanity check whether the line is really a clean hexdump, * or terminated by an error message, or contains garbage. */ if (hexlen < len && !isdigit(elm->rxbuf[hexlen]) && !isupper(elm->rxbuf[hexlen]) && '<' != elm->rxbuf[hexlen] && ' ' != elm->rxbuf[hexlen]) { /* The line is likely garbled anyway, so bail. * The main code will restart listening. */ kfree_skb(skb); return -ENODATA; } /* Use spaces in CAN ID to distinguish 29 or 11 bit address length. * No out-of-bounds access: * We use the fact that we can always read from elm->rxbuf. */ if (elm->rxbuf[2] == ' ' && elm->rxbuf[5] == ' ' && elm->rxbuf[8] == ' ' && elm->rxbuf[11] == ' ' && elm->rxbuf[13] == ' ') { frame->can_id = CAN_EFF_FLAG; datastart = 14; } else if (elm->rxbuf[3] == ' ' && elm->rxbuf[5] == ' ') { datastart = 6; } else { /* This is not a well-formatted data line. * Assume it's an error message. */ kfree_skb(skb); return -ENODATA; } if (hexlen < datastart) { /* The line is too short to be a valid frame hex dump. * Something interrupted the hex dump or it is invalid. */ kfree_skb(skb); return -ENODATA; } /* From here on all chars up to buf[hexlen] are hex or spaces, * at well-defined offsets. */ /* Read CAN data length */ frame->len = (hex_to_bin(elm->rxbuf[datastart - 2]) << 0); /* Read CAN ID */ if (frame->can_id & CAN_EFF_FLAG) { frame->can_id |= (hex_to_bin(elm->rxbuf[0]) << 28) | (hex_to_bin(elm->rxbuf[1]) << 24) | (hex_to_bin(elm->rxbuf[3]) << 20) | (hex_to_bin(elm->rxbuf[4]) << 16) | (hex_to_bin(elm->rxbuf[6]) << 12) | (hex_to_bin(elm->rxbuf[7]) << 8) | (hex_to_bin(elm->rxbuf[9]) << 4) | (hex_to_bin(elm->rxbuf[10]) << 0); } else { frame->can_id |= (hex_to_bin(elm->rxbuf[0]) << 8) | (hex_to_bin(elm->rxbuf[1]) << 4) | (hex_to_bin(elm->rxbuf[2]) << 0); } /* Check for RTR frame */ if (elm->rxfill >= hexlen + 3 && !memcmp(&elm->rxbuf[hexlen], "RTR", 3)) { frame->can_id |= CAN_RTR_FLAG; } /* Is the line long enough to hold the advertised payload? * Note: RTR frames have a DLC, but no actual payload. */ if (!(frame->can_id & CAN_RTR_FLAG) && (hexlen < frame->len * 3 + datastart)) { /* Incomplete frame. * Probably the ELM327's RS232 TX buffer was full. * Emit an error frame and exit. */ frame->can_id = CAN_ERR_FLAG | CAN_ERR_CRTL; frame->len = CAN_ERR_DLC; frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; can327_feed_frame_to_netdev(elm, skb); /* Signal failure to parse. * The line will be re-parsed as an error line, which will fail. * However, this will correctly drop the state machine back into * command mode. */ return -ENODATA; } /* Parse the data nibbles. */ for (i = 0; i < frame->len; i++) { frame->data[i] = (hex_to_bin(elm->rxbuf[datastart + 3 * i]) << 4) | (hex_to_bin(elm->rxbuf[datastart + 3 * i + 1])); } /* Feed the frame to the network layer. */ can327_feed_frame_to_netdev(elm, skb); return 0; } static void can327_parse_line(struct can327 *elm, size_t len) { lockdep_assert_held(&elm->lock); /* Skip empty lines */ if (!len) return; /* Skip echo lines */ if (elm->drop_next_line) { elm->drop_next_line = 0; return; } else if (!memcmp(elm->rxbuf, "AT", 2)) { return; } /* Regular parsing */ if (elm->state == CAN327_STATE_RECEIVING && can327_parse_frame(elm, len)) { /* Parse an error line. */ can327_parse_error(elm, len); /* Start afresh. */ can327_kick_into_cmd_mode(elm); } } static void can327_handle_prompt(struct can327 *elm) { struct can_frame *frame = &elm->can_frame_to_send; /* Size this buffer for the largest ELM327 line we may generate, * which is currently an 8 byte CAN frame's payload hexdump. * Items in can327_init_script must fit here, too! */ char local_txbuf[sizeof("0102030405060708\r")]; lockdep_assert_held(&elm->lock); if (!elm->cmds_todo) { /* Enter CAN monitor mode */ can327_send(elm, "ATMA\r", 5); elm->state = CAN327_STATE_RECEIVING; /* We will be in the default state once this command is * sent, so enable the TX packet queue. */ netif_wake_queue(elm->dev); return; } /* Reconfigure ELM327 step by step as indicated by elm->cmds_todo */ if (test_bit(CAN327_TX_DO_INIT, &elm->cmds_todo)) { snprintf(local_txbuf, sizeof(local_txbuf), "%s", *elm->next_init_cmd); elm->next_init_cmd++; if (!(*elm->next_init_cmd)) { clear_bit(CAN327_TX_DO_INIT, &elm->cmds_todo); /* Init finished. */ } } else if (test_and_clear_bit(CAN327_TX_DO_SILENT_MONITOR, &elm->cmds_todo)) { snprintf(local_txbuf, sizeof(local_txbuf), "ATCSM%i\r", !!(elm->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)); } else if (test_and_clear_bit(CAN327_TX_DO_RESPONSES, &elm->cmds_todo)) { snprintf(local_txbuf, sizeof(local_txbuf), "ATR%i\r", !(elm->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)); } else if (test_and_clear_bit(CAN327_TX_DO_CAN_CONFIG, &elm->cmds_todo)) { snprintf(local_txbuf, sizeof(local_txbuf), "ATPC\r"); set_bit(CAN327_TX_DO_CAN_CONFIG_PART2, &elm->cmds_todo); } else if (test_and_clear_bit(CAN327_TX_DO_CAN_CONFIG_PART2, &elm->cmds_todo)) { snprintf(local_txbuf, sizeof(local_txbuf), "ATPB%04X\r", elm->can_config); } else if (test_and_clear_bit(CAN327_TX_DO_CANID_29BIT_HIGH, &elm->cmds_todo)) { snprintf(local_txbuf, sizeof(local_txbuf), "ATCP%02X\r", (frame->can_id & CAN_EFF_MASK) >> 24); } else if (test_and_clear_bit(CAN327_TX_DO_CANID_29BIT_LOW, &elm->cmds_todo)) { snprintf(local_txbuf, sizeof(local_txbuf), "ATSH%06X\r", frame->can_id & CAN_EFF_MASK & ((1 << 24) - 1)); } else if (test_and_clear_bit(CAN327_TX_DO_CANID_11BIT, &elm->cmds_todo)) { snprintf(local_txbuf, sizeof(local_txbuf), "ATSH%03X\r", frame->can_id & CAN_SFF_MASK); } else if (test_and_clear_bit(CAN327_TX_DO_CAN_DATA, &elm->cmds_todo)) { if (frame->can_id & CAN_RTR_FLAG) { /* Send an RTR frame. Their DLC is fixed. * Some chips don't send them at all. */ snprintf(local_txbuf, sizeof(local_txbuf), "ATRTR\r"); } else { /* Send a regular CAN data frame */ int i; for (i = 0; i < frame->len; i++) { snprintf(&local_txbuf[2 * i], sizeof(local_txbuf), "%02X", frame->data[i]); } snprintf(&local_txbuf[2 * i], sizeof(local_txbuf), "\r"); } elm->drop_next_line = 1; elm->state = CAN327_STATE_RECEIVING; /* We will be in the default state once this command is * sent, so enable the TX packet queue. */ netif_wake_queue(elm->dev); } can327_send(elm, local_txbuf, strlen(local_txbuf)); } static bool can327_is_ready_char(char c) { /* Bits 0xc0 are sometimes set (randomly), hence the mask. * Probably bad hardware. */ return (c & 0x3f) == CAN327_READY_CHAR; } static void can327_drop_bytes(struct can327 *elm, size_t i) { lockdep_assert_held(&elm->lock); memmove(&elm->rxbuf[0], &elm->rxbuf[i], CAN327_SIZE_RXBUF - i); elm->rxfill -= i; } static void can327_parse_rxbuf(struct can327 *elm, size_t first_new_char_idx) { size_t len, pos; lockdep_assert_held(&elm->lock); switch (elm->state) { case CAN327_STATE_NOTINIT: elm->rxfill = 0; break; case CAN327_STATE_GETDUMMYCHAR: /* Wait for 'y' or '>' */ for (pos = 0; pos < elm->rxfill; pos++) { if (elm->rxbuf[pos] == CAN327_DUMMY_CHAR) { can327_send(elm, "\r", 1); elm->state = CAN327_STATE_GETPROMPT; pos++; break; } else if (can327_is_ready_char(elm->rxbuf[pos])) { can327_send(elm, CAN327_DUMMY_STRING, 1); pos++; break; } } can327_drop_bytes(elm, pos); break; case CAN327_STATE_GETPROMPT: /* Wait for '>' */ if (can327_is_ready_char(elm->rxbuf[elm->rxfill - 1])) can327_handle_prompt(elm); elm->rxfill = 0; break; case CAN327_STATE_RECEIVING: /* Find <CR> delimiting feedback lines. */ len = first_new_char_idx; while (len < elm->rxfill && elm->rxbuf[len] != '\r') len++; if (len == CAN327_SIZE_RXBUF) { /* Assume the buffer ran full with garbage. * Did we even connect at the right baud rate? */ netdev_err(elm->dev, "RX buffer overflow. Faulty ELM327 or UART?\n"); can327_uart_side_failure(elm); } else if (len == elm->rxfill) { if (can327_is_ready_char(elm->rxbuf[elm->rxfill - 1])) { /* The ELM327's AT ST response timeout ran out, * so we got a prompt. * Clear RX buffer and restart listening. */ elm->rxfill = 0; can327_handle_prompt(elm); } /* No <CR> found - we haven't received a full line yet. * Wait for more data. */ } else { /* We have a full line to parse. */ can327_parse_line(elm, len); /* Remove parsed data from RX buffer. */ can327_drop_bytes(elm, len + 1); /* More data to parse? */ if (elm->rxfill) can327_parse_rxbuf(elm, 0); } } } static int can327_netdev_open(struct net_device *dev) { struct can327 *elm = netdev_priv(dev); int err; spin_lock_bh(&elm->lock); if (!elm->tty) { spin_unlock_bh(&elm->lock); return -ENODEV; } if (elm->uart_side_failure) netdev_warn(elm->dev, "Reopening netdev after a UART side fault has been detected.\n"); /* Clear TTY buffers */ elm->rxfill = 0; elm->txleft = 0; /* open_candev() checks for elm->can.bittiming.bitrate != 0 */ err = open_candev(dev); if (err) { spin_unlock_bh(&elm->lock); return err; } can327_init_device(elm); spin_unlock_bh(&elm->lock); err = can_rx_offload_add_manual(dev, &elm->offload, CAN327_NAPI_WEIGHT); if (err) { close_candev(dev); return err; } can_rx_offload_enable(&elm->offload); elm->can.state = CAN_STATE_ERROR_ACTIVE; netif_start_queue(dev); return 0; } static int can327_netdev_close(struct net_device *dev) { struct can327 *elm = netdev_priv(dev); /* Interrupt whatever the ELM327 is doing right now */ spin_lock_bh(&elm->lock); can327_send(elm, CAN327_DUMMY_STRING, 1); spin_unlock_bh(&elm->lock); netif_stop_queue(dev); /* We don't flush the UART TX queue here, as we want final stop * commands (like the above dummy char) to be flushed out. */ can_rx_offload_disable(&elm->offload); elm->can.state = CAN_STATE_STOPPED; can_rx_offload_del(&elm->offload); close_candev(dev); return 0; } /* Send a can_frame to a TTY. */ static netdev_tx_t can327_netdev_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct can327 *elm = netdev_priv(dev); struct can_frame *frame = (struct can_frame *)skb->data; if (can_dev_dropped_skb(dev, skb)) return NETDEV_TX_OK; /* We shouldn't get here after a hardware fault: * can_bus_off() calls netif_carrier_off() */ if (elm->uart_side_failure) { WARN_ON_ONCE(elm->uart_side_failure); goto out; } netif_stop_queue(dev); /* BHs are already disabled, so no spin_lock_bh(). * See Documentation/networking/netdevices.rst */ spin_lock(&elm->lock); can327_send_frame(elm, frame); spin_unlock(&elm->lock); dev->stats.tx_packets++; dev->stats.tx_bytes += frame->can_id & CAN_RTR_FLAG ? 0 : frame->len; skb_tx_timestamp(skb); out: kfree_skb(skb); return NETDEV_TX_OK; } static const struct net_device_ops can327_netdev_ops = { .ndo_open = can327_netdev_open, .ndo_stop = can327_netdev_close, .ndo_start_xmit = can327_netdev_start_xmit, .ndo_change_mtu = can_change_mtu, }; static const struct ethtool_ops can327_ethtool_ops = { .get_ts_info = ethtool_op_get_ts_info, }; static bool can327_is_valid_rx_char(u8 c) { static const bool lut_char_is_valid['z'] = { ['\r'] = true, [' '] = true, ['.'] = true, ['0'] = true, true, true, true, true, ['5'] = true, true, true, true, true, ['<'] = true, [CAN327_READY_CHAR] = true, ['?'] = true, ['A'] = true, true, true, true, true, true, true, ['H'] = true, true, true, true, true, true, true, ['O'] = true, true, true, true, true, true, true, ['V'] = true, true, true, true, true, ['a'] = true, ['b'] = true, ['v'] = true, [CAN327_DUMMY_CHAR] = true, }; BUILD_BUG_ON(CAN327_DUMMY_CHAR >= 'z'); return (c < ARRAY_SIZE(lut_char_is_valid) && lut_char_is_valid[c]); } /* Handle incoming ELM327 ASCII data. * This will not be re-entered while running, but other ldisc * functions may be called in parallel. */ static void can327_ldisc_rx(struct tty_struct *tty, const u8 *cp, const u8 *fp, size_t count) { struct can327 *elm = tty->disc_data; size_t first_new_char_idx; if (elm->uart_side_failure) return; spin_lock_bh(&elm->lock); /* Store old rxfill, so can327_parse_rxbuf() will have * the option of skipping already checked characters. */ first_new_char_idx = elm->rxfill; while (count--) { if (elm->rxfill >= CAN327_SIZE_RXBUF) { netdev_err(elm->dev, "Receive buffer overflowed. Bad chip or wiring? count = %zu", count); goto uart_failure; } if (fp && *fp++) { netdev_err(elm->dev, "Error in received character stream. Check your wiring."); goto uart_failure; } /* Ignore NUL characters, which the PIC microcontroller may * inadvertently insert due to a known hardware bug. * See ELM327 documentation, which refers to a Microchip PIC * bug description. */ if (*cp) { /* Check for stray characters on the UART line. * Likely caused by bad hardware. */ if (!can327_is_valid_rx_char(*cp)) { netdev_err(elm->dev, "Received illegal character %02x.\n", *cp); goto uart_failure; } elm->rxbuf[elm->rxfill++] = *cp; } cp++; } can327_parse_rxbuf(elm, first_new_char_idx); spin_unlock_bh(&elm->lock); return; uart_failure: can327_uart_side_failure(elm); spin_unlock_bh(&elm->lock); } /* Write out remaining transmit buffer. * Scheduled when TTY is writable. */ static void can327_ldisc_tx_worker(struct work_struct *work) { struct can327 *elm = container_of(work, struct can327, tx_work); ssize_t written; if (elm->uart_side_failure) return; spin_lock_bh(&elm->lock); if (elm->txleft) { written = elm->tty->ops->write(elm->tty, elm->txhead, elm->txleft); if (written < 0) { netdev_err(elm->dev, "Failed to write to tty %s.\n", elm->tty->name); can327_uart_side_failure(elm); spin_unlock_bh(&elm->lock); return; } elm->txleft -= written; elm->txhead += written; } if (!elm->txleft) clear_bit(TTY_DO_WRITE_WAKEUP, &elm->tty->flags); spin_unlock_bh(&elm->lock); } /* Called by the driver when there's room for more data. */ static void can327_ldisc_tx_wakeup(struct tty_struct *tty) { struct can327 *elm = tty->disc_data; schedule_work(&elm->tx_work); } /* ELM327 can only handle bitrates that are integer divisors of 500 kHz, * or 7/8 of that. Divisors are 1 to 64. * Currently we don't implement support for 7/8 rates. */ static const u32 can327_bitrate_const[] = { 7812, 7936, 8064, 8196, 8333, 8474, 8620, 8771, 8928, 9090, 9259, 9433, 9615, 9803, 10000, 10204, 10416, 10638, 10869, 11111, 11363, 11627, 11904, 12195, 12500, 12820, 13157, 13513, 13888, 14285, 14705, 15151, 15625, 16129, 16666, 17241, 17857, 18518, 19230, 20000, 20833, 21739, 22727, 23809, 25000, 26315, 27777, 29411, 31250, 33333, 35714, 38461, 41666, 45454, 50000, 55555, 62500, 71428, 83333, 100000, 125000, 166666, 250000, 500000 }; static int can327_ldisc_open(struct tty_struct *tty) { struct net_device *dev; struct can327 *elm; int err; if (!capable(CAP_NET_ADMIN)) return -EPERM; if (!tty->ops->write) return -EOPNOTSUPP; dev = alloc_candev(sizeof(struct can327), 0); if (!dev) return -ENFILE; elm = netdev_priv(dev); /* Configure TTY interface */ tty->receive_room = 65536; /* We don't flow control */ spin_lock_init(&elm->lock); INIT_WORK(&elm->tx_work, can327_ldisc_tx_worker); /* Configure CAN metadata */ elm->can.bitrate_const = can327_bitrate_const; elm->can.bitrate_const_cnt = ARRAY_SIZE(can327_bitrate_const); elm->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY; /* Configure netdev interface */ elm->dev = dev; dev->netdev_ops = &can327_netdev_ops; dev->ethtool_ops = &can327_ethtool_ops; /* Mark ldisc channel as alive */ elm->tty = tty; tty->disc_data = elm; /* Let 'er rip */ err = register_candev(elm->dev); if (err) { free_candev(elm->dev); return err; } netdev_info(elm->dev, "can327 on %s.\n", tty->name); return 0; } /* Close down a can327 channel. * This means flushing out any pending queues, and then returning. * This call is serialized against other ldisc functions: * Once this is called, no other ldisc function of ours is entered. * * We also use this function for a hangup event. */ static void can327_ldisc_close(struct tty_struct *tty) { struct can327 *elm = tty->disc_data; /* unregister_netdev() calls .ndo_stop() so we don't have to. */ unregister_candev(elm->dev); /* Give UART one final chance to flush. * No need to clear TTY_DO_WRITE_WAKEUP since .write_wakeup() is * serialised against .close() and will not be called once we return. */ flush_work(&elm->tx_work); /* Mark channel as dead */ spin_lock_bh(&elm->lock); tty->disc_data = NULL; elm->tty = NULL; spin_unlock_bh(&elm->lock); netdev_info(elm->dev, "can327 off %s.\n", tty->name); free_candev(elm->dev); } static int can327_ldisc_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { struct can327 *elm = tty->disc_data; unsigned int tmp; switch (cmd) { case SIOCGIFNAME: tmp = strnlen(elm->dev->name, IFNAMSIZ - 1) + 1; if (copy_to_user((void __user *)arg, elm->dev->name, tmp)) return -EFAULT; return 0; case SIOCSIFHWADDR: return -EINVAL; default: return tty_mode_ioctl(tty, cmd, arg); } } static struct tty_ldisc_ops can327_ldisc = { .owner = THIS_MODULE, .name = KBUILD_MODNAME, .num = N_CAN327, .receive_buf = can327_ldisc_rx, .write_wakeup = can327_ldisc_tx_wakeup, .open = can327_ldisc_open, .close = can327_ldisc_close, .ioctl = can327_ldisc_ioctl, }; static int __init can327_init(void) { int status; status = tty_register_ldisc(&can327_ldisc); if (status) pr_err("Can't register line discipline\n"); return status; } static void __exit can327_exit(void) { /* This will only be called when all channels have been closed by * userspace - tty_ldisc.c takes care of the module's refcount. */ tty_unregister_ldisc(&can327_ldisc); } module_init(can327_init); module_exit(can327_exit); MODULE_ALIAS_LDISC(N_CAN327); MODULE_DESCRIPTION("ELM327 based CAN interface"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Max Staudt <[email protected]>");
linux-master
drivers/net/can/can327.c
// SPDX-License-Identifier: GPL-2.0 // // bxcan.c - STM32 Basic Extended CAN controller driver // // Copyright (c) 2022 Dario Binacchi <[email protected]> // // NOTE: The ST documentation uses the terms master/slave instead of // primary/secondary. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/bitfield.h> #include <linux/can.h> #include <linux/can/dev.h> #include <linux/can/error.h> #include <linux/can/rx-offload.h> #include <linux/clk.h> #include <linux/ethtool.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/iopoll.h> #include <linux/kernel.h> #include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/regmap.h> #define BXCAN_NAPI_WEIGHT 3 #define BXCAN_TIMEOUT_US 10000 #define BXCAN_RX_MB_NUM 2 #define BXCAN_TX_MB_NUM 3 /* Primary control register (MCR) bits */ #define BXCAN_MCR_RESET BIT(15) #define BXCAN_MCR_TTCM BIT(7) #define BXCAN_MCR_ABOM BIT(6) #define BXCAN_MCR_AWUM BIT(5) #define BXCAN_MCR_NART BIT(4) #define BXCAN_MCR_RFLM BIT(3) #define BXCAN_MCR_TXFP BIT(2) #define BXCAN_MCR_SLEEP BIT(1) #define BXCAN_MCR_INRQ BIT(0) /* Primary status register (MSR) bits */ #define BXCAN_MSR_ERRI BIT(2) #define BXCAN_MSR_SLAK BIT(1) #define BXCAN_MSR_INAK BIT(0) /* Transmit status register (TSR) bits */ #define BXCAN_TSR_RQCP2 BIT(16) #define BXCAN_TSR_RQCP1 BIT(8) #define BXCAN_TSR_RQCP0 BIT(0) /* Receive FIFO 0 register (RF0R) bits */ #define BXCAN_RF0R_RFOM0 BIT(5) #define BXCAN_RF0R_FMP0_MASK GENMASK(1, 0) /* Interrupt enable register (IER) bits */ #define BXCAN_IER_SLKIE BIT(17) #define BXCAN_IER_WKUIE BIT(16) #define BXCAN_IER_ERRIE BIT(15) #define BXCAN_IER_LECIE BIT(11) #define BXCAN_IER_BOFIE BIT(10) #define BXCAN_IER_EPVIE BIT(9) #define BXCAN_IER_EWGIE BIT(8) #define BXCAN_IER_FOVIE1 BIT(6) #define BXCAN_IER_FFIE1 BIT(5) #define BXCAN_IER_FMPIE1 BIT(4) #define BXCAN_IER_FOVIE0 BIT(3) #define BXCAN_IER_FFIE0 BIT(2) #define BXCAN_IER_FMPIE0 BIT(1) #define BXCAN_IER_TMEIE BIT(0) /* Error status register (ESR) bits */ #define BXCAN_ESR_REC_MASK GENMASK(31, 24) #define BXCAN_ESR_TEC_MASK GENMASK(23, 16) #define BXCAN_ESR_LEC_MASK GENMASK(6, 4) #define BXCAN_ESR_BOFF BIT(2) #define BXCAN_ESR_EPVF BIT(1) #define BXCAN_ESR_EWGF BIT(0) /* Bit timing register (BTR) bits */ #define BXCAN_BTR_SILM BIT(31) #define BXCAN_BTR_LBKM BIT(30) #define BXCAN_BTR_SJW_MASK GENMASK(25, 24) #define BXCAN_BTR_TS2_MASK GENMASK(22, 20) #define BXCAN_BTR_TS1_MASK GENMASK(19, 16) #define BXCAN_BTR_BRP_MASK GENMASK(9, 0) /* TX mailbox identifier register (TIxR, x = 0..2) bits */ #define BXCAN_TIxR_STID_MASK GENMASK(31, 21) #define BXCAN_TIxR_EXID_MASK GENMASK(31, 3) #define BXCAN_TIxR_IDE BIT(2) #define BXCAN_TIxR_RTR BIT(1) #define BXCAN_TIxR_TXRQ BIT(0) /* TX mailbox data length and time stamp register (TDTxR, x = 0..2 bits */ #define BXCAN_TDTxR_DLC_MASK GENMASK(3, 0) /* RX FIFO mailbox identifier register (RIxR, x = 0..1 */ #define BXCAN_RIxR_STID_MASK GENMASK(31, 21) #define BXCAN_RIxR_EXID_MASK GENMASK(31, 3) #define BXCAN_RIxR_IDE BIT(2) #define BXCAN_RIxR_RTR BIT(1) /* RX FIFO mailbox data length and timestamp register (RDTxR, x = 0..1) bits */ #define BXCAN_RDTxR_TIME_MASK GENMASK(31, 16) #define BXCAN_RDTxR_DLC_MASK GENMASK(3, 0) #define BXCAN_FMR_REG 0x00 #define BXCAN_FM1R_REG 0x04 #define BXCAN_FS1R_REG 0x0c #define BXCAN_FFA1R_REG 0x14 #define BXCAN_FA1R_REG 0x1c #define BXCAN_FiR1_REG(b) (0x40 + (b) * 8) #define BXCAN_FiR2_REG(b) (0x44 + (b) * 8) #define BXCAN_FILTER_ID(cfg) ((cfg) == BXCAN_CFG_DUAL_SECONDARY ? 14 : 0) /* Filter primary register (FMR) bits */ #define BXCAN_FMR_CANSB_MASK GENMASK(13, 8) #define BXCAN_FMR_FINIT BIT(0) enum bxcan_lec_code { BXCAN_LEC_NO_ERROR = 0, BXCAN_LEC_STUFF_ERROR, BXCAN_LEC_FORM_ERROR, BXCAN_LEC_ACK_ERROR, BXCAN_LEC_BIT1_ERROR, BXCAN_LEC_BIT0_ERROR, BXCAN_LEC_CRC_ERROR, BXCAN_LEC_UNUSED }; enum bxcan_cfg { BXCAN_CFG_SINGLE = 0, BXCAN_CFG_DUAL_PRIMARY, BXCAN_CFG_DUAL_SECONDARY }; /* Structure of the message buffer */ struct bxcan_mb { u32 id; /* can identifier */ u32 dlc; /* data length control and timestamp */ u32 data[2]; /* data */ }; /* Structure of the hardware registers */ struct bxcan_regs { u32 mcr; /* 0x00 - primary control */ u32 msr; /* 0x04 - primary status */ u32 tsr; /* 0x08 - transmit status */ u32 rf0r; /* 0x0c - FIFO 0 */ u32 rf1r; /* 0x10 - FIFO 1 */ u32 ier; /* 0x14 - interrupt enable */ u32 esr; /* 0x18 - error status */ u32 btr; /* 0x1c - bit timing*/ u32 reserved0[88]; /* 0x20 */ struct bxcan_mb tx_mb[BXCAN_TX_MB_NUM]; /* 0x180 - tx mailbox */ struct bxcan_mb rx_mb[BXCAN_RX_MB_NUM]; /* 0x1b0 - rx mailbox */ }; struct bxcan_priv { struct can_priv can; struct can_rx_offload offload; struct device *dev; struct net_device *ndev; struct bxcan_regs __iomem *regs; struct regmap *gcan; int tx_irq; int sce_irq; enum bxcan_cfg cfg; struct clk *clk; spinlock_t rmw_lock; /* lock for read-modify-write operations */ unsigned int tx_head; unsigned int tx_tail; u32 timestamp; }; static const struct can_bittiming_const bxcan_bittiming_const = { .name = KBUILD_MODNAME, .tseg1_min = 1, .tseg1_max = 16, .tseg2_min = 1, .tseg2_max = 8, .sjw_max = 4, .brp_min = 1, .brp_max = 1024, .brp_inc = 1, }; static inline void bxcan_rmw(struct bxcan_priv *priv, void __iomem *addr, u32 clear, u32 set) { unsigned long flags; u32 old, val; spin_lock_irqsave(&priv->rmw_lock, flags); old = readl(addr); val = (old & ~clear) | set; if (val != old) writel(val, addr); spin_unlock_irqrestore(&priv->rmw_lock, flags); } static void bxcan_disable_filters(struct bxcan_priv *priv, enum bxcan_cfg cfg) { unsigned int fid = BXCAN_FILTER_ID(cfg); u32 fmask = BIT(fid); regmap_update_bits(priv->gcan, BXCAN_FA1R_REG, fmask, 0); } static void bxcan_enable_filters(struct bxcan_priv *priv, enum bxcan_cfg cfg) { unsigned int fid = BXCAN_FILTER_ID(cfg); u32 fmask = BIT(fid); /* Filter settings: * * Accept all messages. * Assign filter 0 to CAN1 and filter 14 to CAN2 in identifier * mask mode with 32 bits width. */ /* Enter filter initialization mode and assing filters to CAN * controllers. */ regmap_update_bits(priv->gcan, BXCAN_FMR_REG, BXCAN_FMR_CANSB_MASK | BXCAN_FMR_FINIT, FIELD_PREP(BXCAN_FMR_CANSB_MASK, 14) | BXCAN_FMR_FINIT); /* Deactivate filter */ regmap_update_bits(priv->gcan, BXCAN_FA1R_REG, fmask, 0); /* Two 32-bit registers in identifier mask mode */ regmap_update_bits(priv->gcan, BXCAN_FM1R_REG, fmask, 0); /* Single 32-bit scale configuration */ regmap_update_bits(priv->gcan, BXCAN_FS1R_REG, fmask, fmask); /* Assign filter to FIFO 0 */ regmap_update_bits(priv->gcan, BXCAN_FFA1R_REG, fmask, 0); /* Accept all messages */ regmap_write(priv->gcan, BXCAN_FiR1_REG(fid), 0); regmap_write(priv->gcan, BXCAN_FiR2_REG(fid), 0); /* Activate filter */ regmap_update_bits(priv->gcan, BXCAN_FA1R_REG, fmask, fmask); /* Exit filter initialization mode */ regmap_update_bits(priv->gcan, BXCAN_FMR_REG, BXCAN_FMR_FINIT, 0); } static inline u8 bxcan_get_tx_head(const struct bxcan_priv *priv) { return priv->tx_head % BXCAN_TX_MB_NUM; } static inline u8 bxcan_get_tx_tail(const struct bxcan_priv *priv) { return priv->tx_tail % BXCAN_TX_MB_NUM; } static inline u8 bxcan_get_tx_free(const struct bxcan_priv *priv) { return BXCAN_TX_MB_NUM - (priv->tx_head - priv->tx_tail); } static bool bxcan_tx_busy(const struct bxcan_priv *priv) { if (bxcan_get_tx_free(priv) > 0) return false; netif_stop_queue(priv->ndev); /* Memory barrier before checking tx_free (head and tail) */ smp_mb(); if (bxcan_get_tx_free(priv) == 0) { netdev_dbg(priv->ndev, "Stopping tx-queue (tx_head=0x%08x, tx_tail=0x%08x, len=%d).\n", priv->tx_head, priv->tx_tail, priv->tx_head - priv->tx_tail); return true; } netif_start_queue(priv->ndev); return false; } static int bxcan_chip_softreset(struct bxcan_priv *priv) { struct bxcan_regs __iomem *regs = priv->regs; u32 value; bxcan_rmw(priv, &regs->mcr, 0, BXCAN_MCR_RESET); return readx_poll_timeout(readl, &regs->msr, value, value & BXCAN_MSR_SLAK, BXCAN_TIMEOUT_US, USEC_PER_SEC); } static int bxcan_enter_init_mode(struct bxcan_priv *priv) { struct bxcan_regs __iomem *regs = priv->regs; u32 value; bxcan_rmw(priv, &regs->mcr, 0, BXCAN_MCR_INRQ); return readx_poll_timeout(readl, &regs->msr, value, value & BXCAN_MSR_INAK, BXCAN_TIMEOUT_US, USEC_PER_SEC); } static int bxcan_leave_init_mode(struct bxcan_priv *priv) { struct bxcan_regs __iomem *regs = priv->regs; u32 value; bxcan_rmw(priv, &regs->mcr, BXCAN_MCR_INRQ, 0); return readx_poll_timeout(readl, &regs->msr, value, !(value & BXCAN_MSR_INAK), BXCAN_TIMEOUT_US, USEC_PER_SEC); } static int bxcan_enter_sleep_mode(struct bxcan_priv *priv) { struct bxcan_regs __iomem *regs = priv->regs; u32 value; bxcan_rmw(priv, &regs->mcr, 0, BXCAN_MCR_SLEEP); return readx_poll_timeout(readl, &regs->msr, value, value & BXCAN_MSR_SLAK, BXCAN_TIMEOUT_US, USEC_PER_SEC); } static int bxcan_leave_sleep_mode(struct bxcan_priv *priv) { struct bxcan_regs __iomem *regs = priv->regs; u32 value; bxcan_rmw(priv, &regs->mcr, BXCAN_MCR_SLEEP, 0); return readx_poll_timeout(readl, &regs->msr, value, !(value & BXCAN_MSR_SLAK), BXCAN_TIMEOUT_US, USEC_PER_SEC); } static inline struct bxcan_priv *rx_offload_to_priv(struct can_rx_offload *offload) { return container_of(offload, struct bxcan_priv, offload); } static struct sk_buff *bxcan_mailbox_read(struct can_rx_offload *offload, unsigned int mbxno, u32 *timestamp, bool drop) { struct bxcan_priv *priv = rx_offload_to_priv(offload); struct bxcan_regs __iomem *regs = priv->regs; struct bxcan_mb __iomem *mb_regs = &regs->rx_mb[0]; struct sk_buff *skb = NULL; struct can_frame *cf; u32 rf0r, id, dlc; rf0r = readl(&regs->rf0r); if (unlikely(drop)) { skb = ERR_PTR(-ENOBUFS); goto mark_as_read; } if (!(rf0r & BXCAN_RF0R_FMP0_MASK)) goto mark_as_read; skb = alloc_can_skb(offload->dev, &cf); if (unlikely(!skb)) { skb = ERR_PTR(-ENOMEM); goto mark_as_read; } id = readl(&mb_regs->id); if (id & BXCAN_RIxR_IDE) cf->can_id = FIELD_GET(BXCAN_RIxR_EXID_MASK, id) | CAN_EFF_FLAG; else cf->can_id = FIELD_GET(BXCAN_RIxR_STID_MASK, id) & CAN_SFF_MASK; dlc = readl(&mb_regs->dlc); priv->timestamp = FIELD_GET(BXCAN_RDTxR_TIME_MASK, dlc); cf->len = can_cc_dlc2len(FIELD_GET(BXCAN_RDTxR_DLC_MASK, dlc)); if (id & BXCAN_RIxR_RTR) { cf->can_id |= CAN_RTR_FLAG; } else { int i, j; for (i = 0, j = 0; i < cf->len; i += 4, j++) *(u32 *)(cf->data + i) = readl(&mb_regs->data[j]); } mark_as_read: rf0r |= BXCAN_RF0R_RFOM0; writel(rf0r, &regs->rf0r); return skb; } static irqreturn_t bxcan_rx_isr(int irq, void *dev_id) { struct net_device *ndev = dev_id; struct bxcan_priv *priv = netdev_priv(ndev); struct bxcan_regs __iomem *regs = priv->regs; u32 rf0r; rf0r = readl(&regs->rf0r); if (!(rf0r & BXCAN_RF0R_FMP0_MASK)) return IRQ_NONE; can_rx_offload_irq_offload_fifo(&priv->offload); can_rx_offload_irq_finish(&priv->offload); return IRQ_HANDLED; } static irqreturn_t bxcan_tx_isr(int irq, void *dev_id) { struct net_device *ndev = dev_id; struct bxcan_priv *priv = netdev_priv(ndev); struct bxcan_regs __iomem *regs = priv->regs; struct net_device_stats *stats = &ndev->stats; u32 tsr, rqcp_bit; int idx; tsr = readl(&regs->tsr); if (!(tsr & (BXCAN_TSR_RQCP0 | BXCAN_TSR_RQCP1 | BXCAN_TSR_RQCP2))) return IRQ_NONE; while (priv->tx_head - priv->tx_tail > 0) { idx = bxcan_get_tx_tail(priv); rqcp_bit = BXCAN_TSR_RQCP0 << (idx << 3); if (!(tsr & rqcp_bit)) break; stats->tx_packets++; stats->tx_bytes += can_get_echo_skb(ndev, idx, NULL); priv->tx_tail++; } writel(tsr, &regs->tsr); if (bxcan_get_tx_free(priv)) { /* Make sure that anybody stopping the queue after * this sees the new tx_ring->tail. */ smp_mb(); netif_wake_queue(ndev); } return IRQ_HANDLED; } static void bxcan_handle_state_change(struct net_device *ndev, u32 esr) { struct bxcan_priv *priv = netdev_priv(ndev); enum can_state new_state = priv->can.state; struct can_berr_counter bec; enum can_state rx_state, tx_state; struct sk_buff *skb; struct can_frame *cf; /* Early exit if no error flag is set */ if (!(esr & (BXCAN_ESR_EWGF | BXCAN_ESR_EPVF | BXCAN_ESR_BOFF))) return; bec.txerr = FIELD_GET(BXCAN_ESR_TEC_MASK, esr); bec.rxerr = FIELD_GET(BXCAN_ESR_REC_MASK, esr); if (esr & BXCAN_ESR_BOFF) new_state = CAN_STATE_BUS_OFF; else if (esr & BXCAN_ESR_EPVF) new_state = CAN_STATE_ERROR_PASSIVE; else if (esr & BXCAN_ESR_EWGF) new_state = CAN_STATE_ERROR_WARNING; /* state hasn't changed */ if (unlikely(new_state == priv->can.state)) return; skb = alloc_can_err_skb(ndev, &cf); tx_state = bec.txerr >= bec.rxerr ? new_state : 0; rx_state = bec.txerr <= bec.rxerr ? new_state : 0; can_change_state(ndev, cf, tx_state, rx_state); if (new_state == CAN_STATE_BUS_OFF) { can_bus_off(ndev); } else if (skb) { cf->can_id |= CAN_ERR_CNT; cf->data[6] = bec.txerr; cf->data[7] = bec.rxerr; } if (skb) { int err; err = can_rx_offload_queue_timestamp(&priv->offload, skb, priv->timestamp); if (err) ndev->stats.rx_fifo_errors++; } } static void bxcan_handle_bus_err(struct net_device *ndev, u32 esr) { struct bxcan_priv *priv = netdev_priv(ndev); enum bxcan_lec_code lec_code; struct can_frame *cf; struct sk_buff *skb; lec_code = FIELD_GET(BXCAN_ESR_LEC_MASK, esr); /* Early exit if no lec update or no error. * No lec update means that no CAN bus event has been detected * since CPU wrote BXCAN_LEC_UNUSED value to status reg. */ if (lec_code == BXCAN_LEC_UNUSED || lec_code == BXCAN_LEC_NO_ERROR) return; /* Common for all type of bus errors */ priv->can.can_stats.bus_error++; /* Propagate the error condition to the CAN stack */ skb = alloc_can_err_skb(ndev, &cf); if (skb) cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; switch (lec_code) { case BXCAN_LEC_STUFF_ERROR: netdev_dbg(ndev, "Stuff error\n"); ndev->stats.rx_errors++; if (skb) cf->data[2] |= CAN_ERR_PROT_STUFF; break; case BXCAN_LEC_FORM_ERROR: netdev_dbg(ndev, "Form error\n"); ndev->stats.rx_errors++; if (skb) cf->data[2] |= CAN_ERR_PROT_FORM; break; case BXCAN_LEC_ACK_ERROR: netdev_dbg(ndev, "Ack error\n"); ndev->stats.tx_errors++; if (skb) { cf->can_id |= CAN_ERR_ACK; cf->data[3] = CAN_ERR_PROT_LOC_ACK; } break; case BXCAN_LEC_BIT1_ERROR: netdev_dbg(ndev, "Bit error (recessive)\n"); ndev->stats.tx_errors++; if (skb) cf->data[2] |= CAN_ERR_PROT_BIT1; break; case BXCAN_LEC_BIT0_ERROR: netdev_dbg(ndev, "Bit error (dominant)\n"); ndev->stats.tx_errors++; if (skb) cf->data[2] |= CAN_ERR_PROT_BIT0; break; case BXCAN_LEC_CRC_ERROR: netdev_dbg(ndev, "CRC error\n"); ndev->stats.rx_errors++; if (skb) { cf->data[2] |= CAN_ERR_PROT_BIT; cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; } break; default: break; } if (skb) { int err; err = can_rx_offload_queue_timestamp(&priv->offload, skb, priv->timestamp); if (err) ndev->stats.rx_fifo_errors++; } } static irqreturn_t bxcan_state_change_isr(int irq, void *dev_id) { struct net_device *ndev = dev_id; struct bxcan_priv *priv = netdev_priv(ndev); struct bxcan_regs __iomem *regs = priv->regs; u32 msr, esr; msr = readl(&regs->msr); if (!(msr & BXCAN_MSR_ERRI)) return IRQ_NONE; esr = readl(&regs->esr); bxcan_handle_state_change(ndev, esr); if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) bxcan_handle_bus_err(ndev, esr); msr |= BXCAN_MSR_ERRI; writel(msr, &regs->msr); can_rx_offload_irq_finish(&priv->offload); return IRQ_HANDLED; } static int bxcan_chip_start(struct net_device *ndev) { struct bxcan_priv *priv = netdev_priv(ndev); struct bxcan_regs __iomem *regs = priv->regs; struct can_bittiming *bt = &priv->can.bittiming; u32 clr, set; int err; err = bxcan_chip_softreset(priv); if (err) { netdev_err(ndev, "failed to reset chip, error %pe\n", ERR_PTR(err)); return err; } err = bxcan_leave_sleep_mode(priv); if (err) { netdev_err(ndev, "failed to leave sleep mode, error %pe\n", ERR_PTR(err)); goto failed_leave_sleep; } err = bxcan_enter_init_mode(priv); if (err) { netdev_err(ndev, "failed to enter init mode, error %pe\n", ERR_PTR(err)); goto failed_enter_init; } /* MCR * * select request order priority * enable time triggered mode * bus-off state left on sw request * sleep mode left on sw request * retransmit automatically on error * do not lock RX FIFO on overrun */ bxcan_rmw(priv, &regs->mcr, BXCAN_MCR_ABOM | BXCAN_MCR_AWUM | BXCAN_MCR_NART | BXCAN_MCR_RFLM, BXCAN_MCR_TTCM | BXCAN_MCR_TXFP); /* Bit timing register settings */ set = FIELD_PREP(BXCAN_BTR_BRP_MASK, bt->brp - 1) | FIELD_PREP(BXCAN_BTR_TS1_MASK, bt->phase_seg1 + bt->prop_seg - 1) | FIELD_PREP(BXCAN_BTR_TS2_MASK, bt->phase_seg2 - 1) | FIELD_PREP(BXCAN_BTR_SJW_MASK, bt->sjw - 1); /* loopback + silent mode put the controller in test mode, * useful for hot self-test */ if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) set |= BXCAN_BTR_LBKM; if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) set |= BXCAN_BTR_SILM; bxcan_rmw(priv, &regs->btr, BXCAN_BTR_SILM | BXCAN_BTR_LBKM | BXCAN_BTR_BRP_MASK | BXCAN_BTR_TS1_MASK | BXCAN_BTR_TS2_MASK | BXCAN_BTR_SJW_MASK, set); bxcan_enable_filters(priv, priv->cfg); /* Clear all internal status */ priv->tx_head = 0; priv->tx_tail = 0; err = bxcan_leave_init_mode(priv); if (err) { netdev_err(ndev, "failed to leave init mode, error %pe\n", ERR_PTR(err)); goto failed_leave_init; } /* Set a `lec` value so that we can check for updates later */ bxcan_rmw(priv, &regs->esr, BXCAN_ESR_LEC_MASK, FIELD_PREP(BXCAN_ESR_LEC_MASK, BXCAN_LEC_UNUSED)); /* IER * * Enable interrupt for: * bus-off * passive error * warning error * last error code * RX FIFO pending message * TX mailbox empty */ clr = BXCAN_IER_WKUIE | BXCAN_IER_SLKIE | BXCAN_IER_FOVIE1 | BXCAN_IER_FFIE1 | BXCAN_IER_FMPIE1 | BXCAN_IER_FOVIE0 | BXCAN_IER_FFIE0; set = BXCAN_IER_ERRIE | BXCAN_IER_BOFIE | BXCAN_IER_EPVIE | BXCAN_IER_EWGIE | BXCAN_IER_FMPIE0 | BXCAN_IER_TMEIE; if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) set |= BXCAN_IER_LECIE; else clr |= BXCAN_IER_LECIE; bxcan_rmw(priv, &regs->ier, clr, set); priv->can.state = CAN_STATE_ERROR_ACTIVE; return 0; failed_leave_init: failed_enter_init: failed_leave_sleep: bxcan_chip_softreset(priv); return err; } static int bxcan_open(struct net_device *ndev) { struct bxcan_priv *priv = netdev_priv(ndev); int err; err = clk_prepare_enable(priv->clk); if (err) { netdev_err(ndev, "failed to enable clock, error %pe\n", ERR_PTR(err)); return err; } err = open_candev(ndev); if (err) { netdev_err(ndev, "open_candev() failed, error %pe\n", ERR_PTR(err)); goto out_disable_clock; } can_rx_offload_enable(&priv->offload); err = request_irq(ndev->irq, bxcan_rx_isr, IRQF_SHARED, ndev->name, ndev); if (err) { netdev_err(ndev, "failed to register rx irq(%d), error %pe\n", ndev->irq, ERR_PTR(err)); goto out_close_candev; } err = request_irq(priv->tx_irq, bxcan_tx_isr, IRQF_SHARED, ndev->name, ndev); if (err) { netdev_err(ndev, "failed to register tx irq(%d), error %pe\n", priv->tx_irq, ERR_PTR(err)); goto out_free_rx_irq; } err = request_irq(priv->sce_irq, bxcan_state_change_isr, IRQF_SHARED, ndev->name, ndev); if (err) { netdev_err(ndev, "failed to register sce irq(%d), error %pe\n", priv->sce_irq, ERR_PTR(err)); goto out_free_tx_irq; } err = bxcan_chip_start(ndev); if (err) goto out_free_sce_irq; netif_start_queue(ndev); return 0; out_free_sce_irq: free_irq(priv->sce_irq, ndev); out_free_tx_irq: free_irq(priv->tx_irq, ndev); out_free_rx_irq: free_irq(ndev->irq, ndev); out_close_candev: can_rx_offload_disable(&priv->offload); close_candev(ndev); out_disable_clock: clk_disable_unprepare(priv->clk); return err; } static void bxcan_chip_stop(struct net_device *ndev) { struct bxcan_priv *priv = netdev_priv(ndev); struct bxcan_regs __iomem *regs = priv->regs; /* disable all interrupts */ bxcan_rmw(priv, &regs->ier, BXCAN_IER_SLKIE | BXCAN_IER_WKUIE | BXCAN_IER_ERRIE | BXCAN_IER_LECIE | BXCAN_IER_BOFIE | BXCAN_IER_EPVIE | BXCAN_IER_EWGIE | BXCAN_IER_FOVIE1 | BXCAN_IER_FFIE1 | BXCAN_IER_FMPIE1 | BXCAN_IER_FOVIE0 | BXCAN_IER_FFIE0 | BXCAN_IER_FMPIE0 | BXCAN_IER_TMEIE, 0); bxcan_disable_filters(priv, priv->cfg); bxcan_enter_sleep_mode(priv); priv->can.state = CAN_STATE_STOPPED; } static int bxcan_stop(struct net_device *ndev) { struct bxcan_priv *priv = netdev_priv(ndev); netif_stop_queue(ndev); bxcan_chip_stop(ndev); free_irq(ndev->irq, ndev); free_irq(priv->tx_irq, ndev); free_irq(priv->sce_irq, ndev); can_rx_offload_disable(&priv->offload); close_candev(ndev); clk_disable_unprepare(priv->clk); return 0; } static netdev_tx_t bxcan_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct bxcan_priv *priv = netdev_priv(ndev); struct can_frame *cf = (struct can_frame *)skb->data; struct bxcan_regs __iomem *regs = priv->regs; struct bxcan_mb __iomem *mb_regs; unsigned int idx; u32 id; int i, j; if (can_dropped_invalid_skb(ndev, skb)) return NETDEV_TX_OK; if (bxcan_tx_busy(priv)) return NETDEV_TX_BUSY; idx = bxcan_get_tx_head(priv); priv->tx_head++; if (bxcan_get_tx_free(priv) == 0) netif_stop_queue(ndev); mb_regs = &regs->tx_mb[idx]; if (cf->can_id & CAN_EFF_FLAG) id = FIELD_PREP(BXCAN_TIxR_EXID_MASK, cf->can_id) | BXCAN_TIxR_IDE; else id = FIELD_PREP(BXCAN_TIxR_STID_MASK, cf->can_id); if (cf->can_id & CAN_RTR_FLAG) { /* Remote transmission request */ id |= BXCAN_TIxR_RTR; } else { for (i = 0, j = 0; i < cf->len; i += 4, j++) writel(*(u32 *)(cf->data + i), &mb_regs->data[j]); } writel(FIELD_PREP(BXCAN_TDTxR_DLC_MASK, cf->len), &mb_regs->dlc); can_put_echo_skb(skb, ndev, idx, 0); /* Start transmission */ writel(id | BXCAN_TIxR_TXRQ, &mb_regs->id); return NETDEV_TX_OK; } static const struct net_device_ops bxcan_netdev_ops = { .ndo_open = bxcan_open, .ndo_stop = bxcan_stop, .ndo_start_xmit = bxcan_start_xmit, .ndo_change_mtu = can_change_mtu, }; static const struct ethtool_ops bxcan_ethtool_ops = { .get_ts_info = ethtool_op_get_ts_info, }; static int bxcan_do_set_mode(struct net_device *ndev, enum can_mode mode) { int err; switch (mode) { case CAN_MODE_START: err = bxcan_chip_start(ndev); if (err) return err; netif_wake_queue(ndev); break; default: return -EOPNOTSUPP; } return 0; } static int bxcan_get_berr_counter(const struct net_device *ndev, struct can_berr_counter *bec) { struct bxcan_priv *priv = netdev_priv(ndev); struct bxcan_regs __iomem *regs = priv->regs; u32 esr; int err; err = clk_prepare_enable(priv->clk); if (err) return err; esr = readl(&regs->esr); bec->txerr = FIELD_GET(BXCAN_ESR_TEC_MASK, esr); bec->rxerr = FIELD_GET(BXCAN_ESR_REC_MASK, esr); clk_disable_unprepare(priv->clk); return 0; } static int bxcan_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct device *dev = &pdev->dev; struct net_device *ndev; struct bxcan_priv *priv; struct clk *clk = NULL; void __iomem *regs; struct regmap *gcan; enum bxcan_cfg cfg; int err, rx_irq, tx_irq, sce_irq; regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(regs)) { dev_err(dev, "failed to get base address\n"); return PTR_ERR(regs); } gcan = syscon_regmap_lookup_by_phandle(np, "st,gcan"); if (IS_ERR(gcan)) { dev_err(dev, "failed to get shared memory base address\n"); return PTR_ERR(gcan); } if (of_property_read_bool(np, "st,can-primary")) cfg = BXCAN_CFG_DUAL_PRIMARY; else if (of_property_read_bool(np, "st,can-secondary")) cfg = BXCAN_CFG_DUAL_SECONDARY; else cfg = BXCAN_CFG_SINGLE; clk = devm_clk_get(dev, NULL); if (IS_ERR(clk)) { dev_err(dev, "failed to get clock\n"); return PTR_ERR(clk); } rx_irq = platform_get_irq_byname(pdev, "rx0"); if (rx_irq < 0) return rx_irq; tx_irq = platform_get_irq_byname(pdev, "tx"); if (tx_irq < 0) return tx_irq; sce_irq = platform_get_irq_byname(pdev, "sce"); if (sce_irq < 0) return sce_irq; ndev = alloc_candev(sizeof(struct bxcan_priv), BXCAN_TX_MB_NUM); if (!ndev) { dev_err(dev, "alloc_candev() failed\n"); return -ENOMEM; } priv = netdev_priv(ndev); platform_set_drvdata(pdev, ndev); SET_NETDEV_DEV(ndev, dev); ndev->netdev_ops = &bxcan_netdev_ops; ndev->ethtool_ops = &bxcan_ethtool_ops; ndev->irq = rx_irq; ndev->flags |= IFF_ECHO; priv->dev = dev; priv->ndev = ndev; priv->regs = regs; priv->gcan = gcan; priv->clk = clk; priv->tx_irq = tx_irq; priv->sce_irq = sce_irq; priv->cfg = cfg; priv->can.clock.freq = clk_get_rate(clk); spin_lock_init(&priv->rmw_lock); priv->tx_head = 0; priv->tx_tail = 0; priv->can.bittiming_const = &bxcan_bittiming_const; priv->can.do_set_mode = bxcan_do_set_mode; priv->can.do_get_berr_counter = bxcan_get_berr_counter; priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_BERR_REPORTING; priv->offload.mailbox_read = bxcan_mailbox_read; err = can_rx_offload_add_fifo(ndev, &priv->offload, BXCAN_NAPI_WEIGHT); if (err) { dev_err(dev, "failed to add FIFO rx_offload\n"); goto out_free_candev; } err = register_candev(ndev); if (err) { dev_err(dev, "failed to register netdev\n"); goto out_can_rx_offload_del; } dev_info(dev, "clk: %d Hz, IRQs: %d, %d, %d\n", priv->can.clock.freq, tx_irq, rx_irq, sce_irq); return 0; out_can_rx_offload_del: can_rx_offload_del(&priv->offload); out_free_candev: free_candev(ndev); return err; } static void bxcan_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct bxcan_priv *priv = netdev_priv(ndev); unregister_candev(ndev); clk_disable_unprepare(priv->clk); can_rx_offload_del(&priv->offload); free_candev(ndev); } static int __maybe_unused bxcan_suspend(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); struct bxcan_priv *priv = netdev_priv(ndev); if (!netif_running(ndev)) return 0; netif_stop_queue(ndev); netif_device_detach(ndev); bxcan_enter_sleep_mode(priv); priv->can.state = CAN_STATE_SLEEPING; clk_disable_unprepare(priv->clk); return 0; } static int __maybe_unused bxcan_resume(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); struct bxcan_priv *priv = netdev_priv(ndev); if (!netif_running(ndev)) return 0; clk_prepare_enable(priv->clk); bxcan_leave_sleep_mode(priv); priv->can.state = CAN_STATE_ERROR_ACTIVE; netif_device_attach(ndev); netif_start_queue(ndev); return 0; } static SIMPLE_DEV_PM_OPS(bxcan_pm_ops, bxcan_suspend, bxcan_resume); static const struct of_device_id bxcan_of_match[] = { {.compatible = "st,stm32f4-bxcan"}, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, bxcan_of_match); static struct platform_driver bxcan_driver = { .driver = { .name = KBUILD_MODNAME, .pm = &bxcan_pm_ops, .of_match_table = bxcan_of_match, }, .probe = bxcan_probe, .remove_new = bxcan_remove, }; module_platform_driver(bxcan_driver); MODULE_AUTHOR("Dario Binacchi <[email protected]>"); MODULE_DESCRIPTION("STMicroelectronics Basic Extended CAN controller driver"); MODULE_LICENSE("GPL");
linux-master
drivers/net/can/bxcan.c
// SPDX-License-Identifier: GPL-2.0-only /* * CAN bus driver for the alone generic (as possible as) MSCAN controller. * * Copyright (C) 2005-2006 Andrey Volkov <[email protected]>, * Varma Electronics Oy * Copyright (C) 2008-2009 Wolfgang Grandegger <[email protected]> * Copyright (C) 2008-2009 Pengutronix <[email protected]> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/if_ether.h> #include <linux/list.h> #include <linux/can/dev.h> #include <linux/can/error.h> #include <linux/io.h> #include "mscan.h" static const struct can_bittiming_const mscan_bittiming_const = { .name = "mscan", .tseg1_min = 4, .tseg1_max = 16, .tseg2_min = 2, .tseg2_max = 8, .sjw_max = 4, .brp_min = 1, .brp_max = 64, .brp_inc = 1, }; struct mscan_state { u8 mode; u8 canrier; u8 cantier; }; static enum can_state state_map[] = { CAN_STATE_ERROR_ACTIVE, CAN_STATE_ERROR_WARNING, CAN_STATE_ERROR_PASSIVE, CAN_STATE_BUS_OFF }; static int mscan_set_mode(struct net_device *dev, u8 mode) { struct mscan_priv *priv = netdev_priv(dev); struct mscan_regs __iomem *regs = priv->reg_base; int ret = 0; int i; u8 canctl1; if (mode != MSCAN_NORMAL_MODE) { if (priv->tx_active) { /* Abort transfers before going to sleep */# out_8(&regs->cantarq, priv->tx_active); /* Suppress TX done interrupts */ out_8(&regs->cantier, 0); } canctl1 = in_8(&regs->canctl1); if ((mode & MSCAN_SLPRQ) && !(canctl1 & MSCAN_SLPAK)) { setbits8(&regs->canctl0, MSCAN_SLPRQ); for (i = 0; i < MSCAN_SET_MODE_RETRIES; i++) { if (in_8(&regs->canctl1) & MSCAN_SLPAK) break; udelay(100); } /* * The mscan controller will fail to enter sleep mode, * while there are irregular activities on bus, like * somebody keeps retransmitting. This behavior is * undocumented and seems to differ between mscan built * in mpc5200b and mpc5200. We proceed in that case, * since otherwise the slprq will be kept set and the * controller will get stuck. NOTE: INITRQ or CSWAI * will abort all active transmit actions, if still * any, at once. */ if (i >= MSCAN_SET_MODE_RETRIES) netdev_dbg(dev, "device failed to enter sleep mode. " "We proceed anyhow.\n"); else priv->can.state = CAN_STATE_SLEEPING; } if ((mode & MSCAN_INITRQ) && !(canctl1 & MSCAN_INITAK)) { setbits8(&regs->canctl0, MSCAN_INITRQ); for (i = 0; i < MSCAN_SET_MODE_RETRIES; i++) { if (in_8(&regs->canctl1) & MSCAN_INITAK) break; } if (i >= MSCAN_SET_MODE_RETRIES) ret = -ENODEV; } if (!ret) priv->can.state = CAN_STATE_STOPPED; if (mode & MSCAN_CSWAI) setbits8(&regs->canctl0, MSCAN_CSWAI); } else { canctl1 = in_8(&regs->canctl1); if (canctl1 & (MSCAN_SLPAK | MSCAN_INITAK)) { clrbits8(&regs->canctl0, MSCAN_SLPRQ | MSCAN_INITRQ); for (i = 0; i < MSCAN_SET_MODE_RETRIES; i++) { canctl1 = in_8(&regs->canctl1); if (!(canctl1 & (MSCAN_INITAK | MSCAN_SLPAK))) break; } if (i >= MSCAN_SET_MODE_RETRIES) ret = -ENODEV; else priv->can.state = CAN_STATE_ERROR_ACTIVE; } } return ret; } static int mscan_start(struct net_device *dev) { struct mscan_priv *priv = netdev_priv(dev); struct mscan_regs __iomem *regs = priv->reg_base; u8 canrflg; int err; out_8(&regs->canrier, 0); INIT_LIST_HEAD(&priv->tx_head); priv->prev_buf_id = 0; priv->cur_pri = 0; priv->tx_active = 0; priv->shadow_canrier = 0; priv->flags = 0; if (priv->type == MSCAN_TYPE_MPC5121) { /* Clear pending bus-off condition */ if (in_8(&regs->canmisc) & MSCAN_BOHOLD) out_8(&regs->canmisc, MSCAN_BOHOLD); } err = mscan_set_mode(dev, MSCAN_NORMAL_MODE); if (err) return err; canrflg = in_8(&regs->canrflg); priv->shadow_statflg = canrflg & MSCAN_STAT_MSK; priv->can.state = state_map[max(MSCAN_STATE_RX(canrflg), MSCAN_STATE_TX(canrflg))]; out_8(&regs->cantier, 0); /* Enable receive interrupts. */ out_8(&regs->canrier, MSCAN_RX_INTS_ENABLE); return 0; } static int mscan_restart(struct net_device *dev) { struct mscan_priv *priv = netdev_priv(dev); if (priv->type == MSCAN_TYPE_MPC5121) { struct mscan_regs __iomem *regs = priv->reg_base; priv->can.state = CAN_STATE_ERROR_ACTIVE; WARN(!(in_8(&regs->canmisc) & MSCAN_BOHOLD), "bus-off state expected\n"); out_8(&regs->canmisc, MSCAN_BOHOLD); /* Re-enable receive interrupts. */ out_8(&regs->canrier, MSCAN_RX_INTS_ENABLE); } else { if (priv->can.state <= CAN_STATE_BUS_OFF) mscan_set_mode(dev, MSCAN_INIT_MODE); return mscan_start(dev); } return 0; } static netdev_tx_t mscan_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct can_frame *frame = (struct can_frame *)skb->data; struct mscan_priv *priv = netdev_priv(dev); struct mscan_regs __iomem *regs = priv->reg_base; int i, rtr, buf_id; u32 can_id; if (can_dev_dropped_skb(dev, skb)) return NETDEV_TX_OK; out_8(&regs->cantier, 0); i = ~priv->tx_active & MSCAN_TXE; buf_id = ffs(i) - 1; switch (hweight8(i)) { case 0: netif_stop_queue(dev); netdev_err(dev, "Tx Ring full when queue awake!\n"); return NETDEV_TX_BUSY; case 1: /* * if buf_id < 3, then current frame will be send out of order, * since buffer with lower id have higher priority (hell..) */ netif_stop_queue(dev); fallthrough; case 2: if (buf_id < priv->prev_buf_id) { priv->cur_pri++; if (priv->cur_pri == 0xff) { set_bit(F_TX_WAIT_ALL, &priv->flags); netif_stop_queue(dev); } } set_bit(F_TX_PROGRESS, &priv->flags); break; } priv->prev_buf_id = buf_id; out_8(&regs->cantbsel, i); rtr = frame->can_id & CAN_RTR_FLAG; /* RTR is always the lowest bit of interest, then IDs follow */ if (frame->can_id & CAN_EFF_FLAG) { can_id = (frame->can_id & CAN_EFF_MASK) << (MSCAN_EFF_RTR_SHIFT + 1); if (rtr) can_id |= 1 << MSCAN_EFF_RTR_SHIFT; out_be16(&regs->tx.idr3_2, can_id); can_id >>= 16; /* EFF_FLAGS are between the IDs :( */ can_id = (can_id & 0x7) | ((can_id << 2) & 0xffe0) | MSCAN_EFF_FLAGS; } else { can_id = (frame->can_id & CAN_SFF_MASK) << (MSCAN_SFF_RTR_SHIFT + 1); if (rtr) can_id |= 1 << MSCAN_SFF_RTR_SHIFT; } out_be16(&regs->tx.idr1_0, can_id); if (!rtr) { void __iomem *data = &regs->tx.dsr1_0; u16 *payload = (u16 *)frame->data; for (i = 0; i < frame->len / 2; i++) { out_be16(data, *payload++); data += 2 + _MSCAN_RESERVED_DSR_SIZE; } /* write remaining byte if necessary */ if (frame->len & 1) out_8(data, frame->data[frame->len - 1]); } out_8(&regs->tx.dlr, frame->len); out_8(&regs->tx.tbpr, priv->cur_pri); /* Start transmission. */ out_8(&regs->cantflg, 1 << buf_id); if (!test_bit(F_TX_PROGRESS, &priv->flags)) netif_trans_update(dev); list_add_tail(&priv->tx_queue[buf_id].list, &priv->tx_head); can_put_echo_skb(skb, dev, buf_id, 0); /* Enable interrupt. */ priv->tx_active |= 1 << buf_id; out_8(&regs->cantier, priv->tx_active); return NETDEV_TX_OK; } static enum can_state get_new_state(struct net_device *dev, u8 canrflg) { struct mscan_priv *priv = netdev_priv(dev); if (unlikely(canrflg & MSCAN_CSCIF)) return state_map[max(MSCAN_STATE_RX(canrflg), MSCAN_STATE_TX(canrflg))]; return priv->can.state; } static void mscan_get_rx_frame(struct net_device *dev, struct can_frame *frame) { struct mscan_priv *priv = netdev_priv(dev); struct mscan_regs __iomem *regs = priv->reg_base; u32 can_id; int i; can_id = in_be16(&regs->rx.idr1_0); if (can_id & (1 << 3)) { frame->can_id = CAN_EFF_FLAG; can_id = ((can_id << 16) | in_be16(&regs->rx.idr3_2)); can_id = ((can_id & 0xffe00000) | ((can_id & 0x7ffff) << 2)) >> 2; } else { can_id >>= 4; frame->can_id = 0; } frame->can_id |= can_id >> 1; if (can_id & 1) frame->can_id |= CAN_RTR_FLAG; frame->len = can_cc_dlc2len(in_8(&regs->rx.dlr) & 0xf); if (!(frame->can_id & CAN_RTR_FLAG)) { void __iomem *data = &regs->rx.dsr1_0; u16 *payload = (u16 *)frame->data; for (i = 0; i < frame->len / 2; i++) { *payload++ = in_be16(data); data += 2 + _MSCAN_RESERVED_DSR_SIZE; } /* read remaining byte if necessary */ if (frame->len & 1) frame->data[frame->len - 1] = in_8(data); } out_8(&regs->canrflg, MSCAN_RXF); } static void mscan_get_err_frame(struct net_device *dev, struct can_frame *frame, u8 canrflg) { struct mscan_priv *priv = netdev_priv(dev); struct mscan_regs __iomem *regs = priv->reg_base; struct net_device_stats *stats = &dev->stats; enum can_state new_state; netdev_dbg(dev, "error interrupt (canrflg=%#x)\n", canrflg); frame->can_id = CAN_ERR_FLAG; if (canrflg & MSCAN_OVRIF) { frame->can_id |= CAN_ERR_CRTL; frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; stats->rx_over_errors++; stats->rx_errors++; } else { frame->data[1] = 0; } new_state = get_new_state(dev, canrflg); if (new_state != priv->can.state) { can_change_state(dev, frame, state_map[MSCAN_STATE_TX(canrflg)], state_map[MSCAN_STATE_RX(canrflg)]); if (priv->can.state == CAN_STATE_BUS_OFF) { /* * The MSCAN on the MPC5200 does recover from bus-off * automatically. To avoid that we stop the chip doing * a light-weight stop (we are in irq-context). */ if (priv->type != MSCAN_TYPE_MPC5121) { out_8(&regs->cantier, 0); out_8(&regs->canrier, 0); setbits8(&regs->canctl0, MSCAN_SLPRQ | MSCAN_INITRQ); } can_bus_off(dev); } } priv->shadow_statflg = canrflg & MSCAN_STAT_MSK; frame->len = CAN_ERR_DLC; out_8(&regs->canrflg, MSCAN_ERR_IF); } static int mscan_rx_poll(struct napi_struct *napi, int quota) { struct mscan_priv *priv = container_of(napi, struct mscan_priv, napi); struct net_device *dev = napi->dev; struct mscan_regs __iomem *regs = priv->reg_base; struct net_device_stats *stats = &dev->stats; int work_done = 0; struct sk_buff *skb; struct can_frame *frame; u8 canrflg; while (work_done < quota) { canrflg = in_8(&regs->canrflg); if (!(canrflg & (MSCAN_RXF | MSCAN_ERR_IF))) break; skb = alloc_can_skb(dev, &frame); if (!skb) { if (printk_ratelimit()) netdev_notice(dev, "packet dropped\n"); stats->rx_dropped++; out_8(&regs->canrflg, canrflg); continue; } if (canrflg & MSCAN_RXF) { mscan_get_rx_frame(dev, frame); stats->rx_packets++; if (!(frame->can_id & CAN_RTR_FLAG)) stats->rx_bytes += frame->len; } else if (canrflg & MSCAN_ERR_IF) { mscan_get_err_frame(dev, frame, canrflg); } work_done++; netif_receive_skb(skb); } if (work_done < quota) { if (likely(napi_complete_done(&priv->napi, work_done))) { clear_bit(F_RX_PROGRESS, &priv->flags); if (priv->can.state < CAN_STATE_BUS_OFF) out_8(&regs->canrier, priv->shadow_canrier); } } return work_done; } static irqreturn_t mscan_isr(int irq, void *dev_id) { struct net_device *dev = (struct net_device *)dev_id; struct mscan_priv *priv = netdev_priv(dev); struct mscan_regs __iomem *regs = priv->reg_base; struct net_device_stats *stats = &dev->stats; u8 cantier, cantflg, canrflg; irqreturn_t ret = IRQ_NONE; cantier = in_8(&regs->cantier) & MSCAN_TXE; cantflg = in_8(&regs->cantflg) & cantier; if (cantier && cantflg) { struct list_head *tmp, *pos; list_for_each_safe(pos, tmp, &priv->tx_head) { struct tx_queue_entry *entry = list_entry(pos, struct tx_queue_entry, list); u8 mask = entry->mask; if (!(cantflg & mask)) continue; out_8(&regs->cantbsel, mask); stats->tx_bytes += can_get_echo_skb(dev, entry->id, NULL); stats->tx_packets++; priv->tx_active &= ~mask; list_del(pos); } if (list_empty(&priv->tx_head)) { clear_bit(F_TX_WAIT_ALL, &priv->flags); clear_bit(F_TX_PROGRESS, &priv->flags); priv->cur_pri = 0; } else { netif_trans_update(dev); } if (!test_bit(F_TX_WAIT_ALL, &priv->flags)) netif_wake_queue(dev); out_8(&regs->cantier, priv->tx_active); ret = IRQ_HANDLED; } canrflg = in_8(&regs->canrflg); if ((canrflg & ~MSCAN_STAT_MSK) && !test_and_set_bit(F_RX_PROGRESS, &priv->flags)) { if (canrflg & ~MSCAN_STAT_MSK) { priv->shadow_canrier = in_8(&regs->canrier); out_8(&regs->canrier, 0); napi_schedule(&priv->napi); ret = IRQ_HANDLED; } else { clear_bit(F_RX_PROGRESS, &priv->flags); } } return ret; } static int mscan_do_set_mode(struct net_device *dev, enum can_mode mode) { int ret = 0; switch (mode) { case CAN_MODE_START: ret = mscan_restart(dev); if (ret) break; if (netif_queue_stopped(dev)) netif_wake_queue(dev); break; default: ret = -EOPNOTSUPP; break; } return ret; } static int mscan_do_set_bittiming(struct net_device *dev) { struct mscan_priv *priv = netdev_priv(dev); struct mscan_regs __iomem *regs = priv->reg_base; struct can_bittiming *bt = &priv->can.bittiming; u8 btr0, btr1; btr0 = BTR0_SET_BRP(bt->brp) | BTR0_SET_SJW(bt->sjw); btr1 = (BTR1_SET_TSEG1(bt->prop_seg + bt->phase_seg1) | BTR1_SET_TSEG2(bt->phase_seg2) | BTR1_SET_SAM(priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES)); netdev_info(dev, "setting BTR0=0x%02x BTR1=0x%02x\n", btr0, btr1); out_8(&regs->canbtr0, btr0); out_8(&regs->canbtr1, btr1); return 0; } static int mscan_get_berr_counter(const struct net_device *dev, struct can_berr_counter *bec) { struct mscan_priv *priv = netdev_priv(dev); struct mscan_regs __iomem *regs = priv->reg_base; bec->txerr = in_8(&regs->cantxerr); bec->rxerr = in_8(&regs->canrxerr); return 0; } static int mscan_open(struct net_device *dev) { int ret; struct mscan_priv *priv = netdev_priv(dev); struct mscan_regs __iomem *regs = priv->reg_base; ret = clk_prepare_enable(priv->clk_ipg); if (ret) goto exit_retcode; ret = clk_prepare_enable(priv->clk_can); if (ret) goto exit_dis_ipg_clock; /* common open */ ret = open_candev(dev); if (ret) goto exit_dis_can_clock; napi_enable(&priv->napi); ret = request_irq(dev->irq, mscan_isr, 0, dev->name, dev); if (ret < 0) { netdev_err(dev, "failed to attach interrupt\n"); goto exit_napi_disable; } if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) setbits8(&regs->canctl1, MSCAN_LISTEN); else clrbits8(&regs->canctl1, MSCAN_LISTEN); ret = mscan_start(dev); if (ret) goto exit_free_irq; netif_start_queue(dev); return 0; exit_free_irq: free_irq(dev->irq, dev); exit_napi_disable: napi_disable(&priv->napi); close_candev(dev); exit_dis_can_clock: clk_disable_unprepare(priv->clk_can); exit_dis_ipg_clock: clk_disable_unprepare(priv->clk_ipg); exit_retcode: return ret; } static int mscan_close(struct net_device *dev) { struct mscan_priv *priv = netdev_priv(dev); struct mscan_regs __iomem *regs = priv->reg_base; netif_stop_queue(dev); napi_disable(&priv->napi); out_8(&regs->cantier, 0); out_8(&regs->canrier, 0); mscan_set_mode(dev, MSCAN_INIT_MODE); close_candev(dev); free_irq(dev->irq, dev); clk_disable_unprepare(priv->clk_can); clk_disable_unprepare(priv->clk_ipg); return 0; } static const struct net_device_ops mscan_netdev_ops = { .ndo_open = mscan_open, .ndo_stop = mscan_close, .ndo_start_xmit = mscan_start_xmit, .ndo_change_mtu = can_change_mtu, }; static const struct ethtool_ops mscan_ethtool_ops = { .get_ts_info = ethtool_op_get_ts_info, }; int register_mscandev(struct net_device *dev, int mscan_clksrc) { struct mscan_priv *priv = netdev_priv(dev); struct mscan_regs __iomem *regs = priv->reg_base; u8 ctl1; ctl1 = in_8(&regs->canctl1); if (mscan_clksrc) ctl1 |= MSCAN_CLKSRC; else ctl1 &= ~MSCAN_CLKSRC; if (priv->type == MSCAN_TYPE_MPC5121) { priv->can.do_get_berr_counter = mscan_get_berr_counter; ctl1 |= MSCAN_BORM; /* bus-off recovery upon request */ } ctl1 |= MSCAN_CANE; out_8(&regs->canctl1, ctl1); udelay(100); /* acceptance mask/acceptance code (accept everything) */ out_be16(&regs->canidar1_0, 0); out_be16(&regs->canidar3_2, 0); out_be16(&regs->canidar5_4, 0); out_be16(&regs->canidar7_6, 0); out_be16(&regs->canidmr1_0, 0xffff); out_be16(&regs->canidmr3_2, 0xffff); out_be16(&regs->canidmr5_4, 0xffff); out_be16(&regs->canidmr7_6, 0xffff); /* Two 32 bit Acceptance Filters */ out_8(&regs->canidac, MSCAN_AF_32BIT); mscan_set_mode(dev, MSCAN_INIT_MODE); return register_candev(dev); } void unregister_mscandev(struct net_device *dev) { struct mscan_priv *priv = netdev_priv(dev); struct mscan_regs __iomem *regs = priv->reg_base; mscan_set_mode(dev, MSCAN_INIT_MODE); clrbits8(&regs->canctl1, MSCAN_CANE); unregister_candev(dev); } struct net_device *alloc_mscandev(void) { struct net_device *dev; struct mscan_priv *priv; int i; dev = alloc_candev(sizeof(struct mscan_priv), MSCAN_ECHO_SKB_MAX); if (!dev) return NULL; priv = netdev_priv(dev); dev->netdev_ops = &mscan_netdev_ops; dev->ethtool_ops = &mscan_ethtool_ops; dev->flags |= IFF_ECHO; /* we support local echo */ netif_napi_add_weight(dev, &priv->napi, mscan_rx_poll, 8); priv->can.bittiming_const = &mscan_bittiming_const; priv->can.do_set_bittiming = mscan_do_set_bittiming; priv->can.do_set_mode = mscan_do_set_mode; priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY; for (i = 0; i < TX_QUEUE_SIZE; i++) { priv->tx_queue[i].id = i; priv->tx_queue[i].mask = 1 << i; } return dev; } MODULE_AUTHOR("Andrey Volkov <[email protected]>"); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("CAN port driver for a MSCAN based chips");
linux-master
drivers/net/can/mscan/mscan.c
// SPDX-License-Identifier: GPL-2.0-only /* * CAN bus driver for the Freescale MPC5xxx embedded CPU. * * Copyright (C) 2004-2005 Andrey Volkov <[email protected]>, * Varma Electronics Oy * Copyright (C) 2008-2009 Wolfgang Grandegger <[email protected]> * Copyright (C) 2009 Wolfram Sang, Pengutronix <[email protected]> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/netdevice.h> #include <linux/can/dev.h> #include <linux/of_address.h> #include <linux/of_irq.h> #include <linux/of_platform.h> #include <sysdev/fsl_soc.h> #include <linux/clk.h> #include <linux/io.h> #include <asm/mpc52xx.h> #include "mscan.h" #define DRV_NAME "mpc5xxx_can" struct mpc5xxx_can_data { unsigned int type; u32 (*get_clock)(struct platform_device *ofdev, const char *clock_name, int *mscan_clksrc); void (*put_clock)(struct platform_device *ofdev); }; #ifdef CONFIG_PPC_MPC52xx static const struct of_device_id mpc52xx_cdm_ids[] = { { .compatible = "fsl,mpc5200-cdm", }, {} }; static u32 mpc52xx_can_get_clock(struct platform_device *ofdev, const char *clock_name, int *mscan_clksrc) { unsigned int pvr; struct mpc52xx_cdm __iomem *cdm; struct device_node *np_cdm; unsigned int freq; u32 val; pvr = mfspr(SPRN_PVR); /* * Either the oscillator clock (SYS_XTAL_IN) or the IP bus clock * (IP_CLK) can be selected as MSCAN clock source. According to * the MPC5200 user's manual, the oscillator clock is the better * choice as it has less jitter. For this reason, it is selected * by default. Unfortunately, it can not be selected for the old * MPC5200 Rev. A chips due to a hardware bug (check errata). */ if (clock_name && strcmp(clock_name, "ip") == 0) *mscan_clksrc = MSCAN_CLKSRC_BUS; else *mscan_clksrc = MSCAN_CLKSRC_XTAL; freq = mpc5xxx_get_bus_frequency(&ofdev->dev); if (!freq) return 0; if (*mscan_clksrc == MSCAN_CLKSRC_BUS || pvr == 0x80822011) return freq; /* Determine SYS_XTAL_IN frequency from the clock domain settings */ np_cdm = of_find_matching_node(NULL, mpc52xx_cdm_ids); if (!np_cdm) { dev_err(&ofdev->dev, "can't get clock node!\n"); return 0; } cdm = of_iomap(np_cdm, 0); if (!cdm) { of_node_put(np_cdm); dev_err(&ofdev->dev, "can't map clock node!\n"); return 0; } if (in_8(&cdm->ipb_clk_sel) & 0x1) freq *= 2; val = in_be32(&cdm->rstcfg); freq *= (val & (1 << 5)) ? 8 : 4; freq /= (val & (1 << 6)) ? 12 : 16; of_node_put(np_cdm); iounmap(cdm); return freq; } #else /* !CONFIG_PPC_MPC52xx */ static u32 mpc52xx_can_get_clock(struct platform_device *ofdev, const char *clock_name, int *mscan_clksrc) { return 0; } #endif /* CONFIG_PPC_MPC52xx */ #ifdef CONFIG_PPC_MPC512x static u32 mpc512x_can_get_clock(struct platform_device *ofdev, const char *clock_source, int *mscan_clksrc) { struct device_node *np; u32 clockdiv; enum { CLK_FROM_AUTO, CLK_FROM_IPS, CLK_FROM_SYS, CLK_FROM_REF, } clk_from; struct clk *clk_in, *clk_can; unsigned long freq_calc; struct mscan_priv *priv; struct clk *clk_ipg; /* the caller passed in the clock source spec that was read from * the device tree, get the optional clock divider as well */ np = ofdev->dev.of_node; clockdiv = 1; of_property_read_u32(np, "fsl,mscan-clock-divider", &clockdiv); dev_dbg(&ofdev->dev, "device tree specs: clk src[%s] div[%d]\n", clock_source ? clock_source : "<NULL>", clockdiv); /* when clock-source is 'ip', the CANCTL1[CLKSRC] bit needs to * get set, and the 'ips' clock is the input to the MSCAN * component * * for clock-source values of 'ref' or 'sys' the CANCTL1[CLKSRC] * bit needs to get cleared, an optional clock-divider may have * been specified (the default value is 1), the appropriate * MSCAN related MCLK is the input to the MSCAN component * * in the absence of a clock-source spec, first an optimal clock * gets determined based on the 'sys' clock, if that fails the * 'ref' clock is used */ clk_from = CLK_FROM_AUTO; if (clock_source) { /* interpret the device tree's spec for the clock source */ if (!strcmp(clock_source, "ip")) clk_from = CLK_FROM_IPS; else if (!strcmp(clock_source, "sys")) clk_from = CLK_FROM_SYS; else if (!strcmp(clock_source, "ref")) clk_from = CLK_FROM_REF; else goto err_invalid; dev_dbg(&ofdev->dev, "got a clk source spec[%d]\n", clk_from); } if (clk_from == CLK_FROM_AUTO) { /* no spec so far, try the 'sys' clock; round to the * next MHz and see if we can get a multiple of 16MHz */ dev_dbg(&ofdev->dev, "no clk source spec, trying SYS\n"); clk_in = devm_clk_get(&ofdev->dev, "sys"); if (IS_ERR(clk_in)) goto err_notavail; freq_calc = clk_get_rate(clk_in); freq_calc += 499999; freq_calc /= 1000000; freq_calc *= 1000000; if ((freq_calc % 16000000) == 0) { clk_from = CLK_FROM_SYS; clockdiv = freq_calc / 16000000; dev_dbg(&ofdev->dev, "clk fit, sys[%lu] div[%d] freq[%lu]\n", freq_calc, clockdiv, freq_calc / clockdiv); } } if (clk_from == CLK_FROM_AUTO) { /* no spec so far, use the 'ref' clock */ dev_dbg(&ofdev->dev, "no clk source spec, trying REF\n"); clk_in = devm_clk_get(&ofdev->dev, "ref"); if (IS_ERR(clk_in)) goto err_notavail; clk_from = CLK_FROM_REF; freq_calc = clk_get_rate(clk_in); dev_dbg(&ofdev->dev, "clk fit, ref[%lu] (no div) freq[%lu]\n", freq_calc, freq_calc); } /* select IPS or MCLK as the MSCAN input (returned to the caller), * setup the MCLK mux source and rate if applicable, apply the * optionally specified or derived above divider, and determine * the actual resulting clock rate to return to the caller */ switch (clk_from) { case CLK_FROM_IPS: clk_can = devm_clk_get(&ofdev->dev, "ips"); if (IS_ERR(clk_can)) goto err_notavail; priv = netdev_priv(dev_get_drvdata(&ofdev->dev)); priv->clk_can = clk_can; freq_calc = clk_get_rate(clk_can); *mscan_clksrc = MSCAN_CLKSRC_IPS; dev_dbg(&ofdev->dev, "clk from IPS, clksrc[%d] freq[%lu]\n", *mscan_clksrc, freq_calc); break; case CLK_FROM_SYS: case CLK_FROM_REF: clk_can = devm_clk_get(&ofdev->dev, "mclk"); if (IS_ERR(clk_can)) goto err_notavail; priv = netdev_priv(dev_get_drvdata(&ofdev->dev)); priv->clk_can = clk_can; if (clk_from == CLK_FROM_SYS) clk_in = devm_clk_get(&ofdev->dev, "sys"); if (clk_from == CLK_FROM_REF) clk_in = devm_clk_get(&ofdev->dev, "ref"); if (IS_ERR(clk_in)) goto err_notavail; clk_set_parent(clk_can, clk_in); freq_calc = clk_get_rate(clk_in); freq_calc /= clockdiv; clk_set_rate(clk_can, freq_calc); freq_calc = clk_get_rate(clk_can); *mscan_clksrc = MSCAN_CLKSRC_BUS; dev_dbg(&ofdev->dev, "clk from MCLK, clksrc[%d] freq[%lu]\n", *mscan_clksrc, freq_calc); break; default: goto err_invalid; } /* the above clk_can item is used for the bitrate, access to * the peripheral's register set needs the clk_ipg item */ clk_ipg = devm_clk_get(&ofdev->dev, "ipg"); if (IS_ERR(clk_ipg)) goto err_notavail_ipg; if (clk_prepare_enable(clk_ipg)) goto err_notavail_ipg; priv = netdev_priv(dev_get_drvdata(&ofdev->dev)); priv->clk_ipg = clk_ipg; /* return the determined clock source rate */ return freq_calc; err_invalid: dev_err(&ofdev->dev, "invalid clock source specification\n"); /* clock source rate could not get determined */ return 0; err_notavail: dev_err(&ofdev->dev, "cannot acquire or setup bitrate clock source\n"); /* clock source rate could not get determined */ return 0; err_notavail_ipg: dev_err(&ofdev->dev, "cannot acquire or setup register clock\n"); /* clock source rate could not get determined */ return 0; } static void mpc512x_can_put_clock(struct platform_device *ofdev) { struct mscan_priv *priv; priv = netdev_priv(dev_get_drvdata(&ofdev->dev)); if (priv->clk_ipg) clk_disable_unprepare(priv->clk_ipg); } #else /* !CONFIG_PPC_MPC512x */ static u32 mpc512x_can_get_clock(struct platform_device *ofdev, const char *clock_name, int *mscan_clksrc) { return 0; } #define mpc512x_can_put_clock NULL #endif /* CONFIG_PPC_MPC512x */ static const struct of_device_id mpc5xxx_can_table[]; static int mpc5xxx_can_probe(struct platform_device *ofdev) { const struct mpc5xxx_can_data *data; struct device_node *np = ofdev->dev.of_node; struct net_device *dev; struct mscan_priv *priv; void __iomem *base; const char *clock_name = NULL; int irq, mscan_clksrc = 0; int err = -ENOMEM; data = of_device_get_match_data(&ofdev->dev); if (!data) return -EINVAL; base = of_iomap(np, 0); if (!base) return dev_err_probe(&ofdev->dev, err, "couldn't ioremap\n"); irq = irq_of_parse_and_map(np, 0); if (!irq) { dev_err(&ofdev->dev, "no irq found\n"); err = -ENODEV; goto exit_unmap_mem; } dev = alloc_mscandev(); if (!dev) goto exit_dispose_irq; platform_set_drvdata(ofdev, dev); SET_NETDEV_DEV(dev, &ofdev->dev); priv = netdev_priv(dev); priv->reg_base = base; dev->irq = irq; clock_name = of_get_property(np, "fsl,mscan-clock-source", NULL); priv->type = data->type; priv->can.clock.freq = data->get_clock(ofdev, clock_name, &mscan_clksrc); if (!priv->can.clock.freq) { dev_err(&ofdev->dev, "couldn't get MSCAN clock properties\n"); goto exit_put_clock; } err = register_mscandev(dev, mscan_clksrc); if (err) { dev_err(&ofdev->dev, "registering %s failed (err=%d)\n", DRV_NAME, err); goto exit_put_clock; } dev_info(&ofdev->dev, "MSCAN at 0x%p, irq %d, clock %d Hz\n", priv->reg_base, dev->irq, priv->can.clock.freq); return 0; exit_put_clock: if (data->put_clock) data->put_clock(ofdev); free_candev(dev); exit_dispose_irq: irq_dispose_mapping(irq); exit_unmap_mem: iounmap(base); return err; } static void mpc5xxx_can_remove(struct platform_device *ofdev) { const struct of_device_id *match; const struct mpc5xxx_can_data *data; struct net_device *dev = platform_get_drvdata(ofdev); struct mscan_priv *priv = netdev_priv(dev); match = of_match_device(mpc5xxx_can_table, &ofdev->dev); data = match ? match->data : NULL; unregister_mscandev(dev); if (data && data->put_clock) data->put_clock(ofdev); iounmap(priv->reg_base); irq_dispose_mapping(dev->irq); free_candev(dev); } #ifdef CONFIG_PM static struct mscan_regs saved_regs; static int mpc5xxx_can_suspend(struct platform_device *ofdev, pm_message_t state) { struct net_device *dev = platform_get_drvdata(ofdev); struct mscan_priv *priv = netdev_priv(dev); struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base; _memcpy_fromio(&saved_regs, regs, sizeof(*regs)); return 0; } static int mpc5xxx_can_resume(struct platform_device *ofdev) { struct net_device *dev = platform_get_drvdata(ofdev); struct mscan_priv *priv = netdev_priv(dev); struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base; regs->canctl0 |= MSCAN_INITRQ; while (!(regs->canctl1 & MSCAN_INITAK)) udelay(10); regs->canctl1 = saved_regs.canctl1; regs->canbtr0 = saved_regs.canbtr0; regs->canbtr1 = saved_regs.canbtr1; regs->canidac = saved_regs.canidac; /* restore masks, buffers etc. */ _memcpy_toio(&regs->canidar1_0, (void *)&saved_regs.canidar1_0, sizeof(*regs) - offsetof(struct mscan_regs, canidar1_0)); regs->canctl0 &= ~MSCAN_INITRQ; regs->cantbsel = saved_regs.cantbsel; regs->canrier = saved_regs.canrier; regs->cantier = saved_regs.cantier; regs->canctl0 = saved_regs.canctl0; return 0; } #endif static const struct mpc5xxx_can_data mpc5200_can_data = { .type = MSCAN_TYPE_MPC5200, .get_clock = mpc52xx_can_get_clock, /* .put_clock not applicable */ }; static const struct mpc5xxx_can_data mpc5121_can_data = { .type = MSCAN_TYPE_MPC5121, .get_clock = mpc512x_can_get_clock, .put_clock = mpc512x_can_put_clock, }; static const struct of_device_id mpc5xxx_can_table[] = { { .compatible = "fsl,mpc5200-mscan", .data = &mpc5200_can_data, }, /* Note that only MPC5121 Rev. 2 (and later) is supported */ { .compatible = "fsl,mpc5121-mscan", .data = &mpc5121_can_data, }, {}, }; MODULE_DEVICE_TABLE(of, mpc5xxx_can_table); static struct platform_driver mpc5xxx_can_driver = { .driver = { .name = "mpc5xxx_can", .of_match_table = mpc5xxx_can_table, }, .probe = mpc5xxx_can_probe, .remove_new = mpc5xxx_can_remove, #ifdef CONFIG_PM .suspend = mpc5xxx_can_suspend, .resume = mpc5xxx_can_resume, #endif }; module_platform_driver(mpc5xxx_can_driver); MODULE_AUTHOR("Wolfgang Grandegger <[email protected]>"); MODULE_DESCRIPTION("Freescale MPC5xxx CAN driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/net/can/mscan/mpc5xxx_can.c
// SPDX-License-Identifier: GPL-2.0-only /* * Driver for CC770 and AN82527 CAN controllers on the legacy ISA bus * * Copyright (C) 2009, 2011 Wolfgang Grandegger <[email protected]> */ /* * Bosch CC770 and Intel AN82527 CAN controllers on the ISA or PC-104 bus. * The I/O port or memory address and the IRQ number must be specified via * module parameters: * * insmod cc770_isa.ko port=0x310,0x380 irq=7,11 * * for ISA devices using I/O ports or: * * insmod cc770_isa.ko mem=0xd1000,0xd1000 irq=7,11 * * for memory mapped ISA devices. * * Indirect access via address and data port is supported as well: * * insmod cc770_isa.ko port=0x310,0x380 indirect=1 irq=7,11 * * Furthermore, the following mode parameter can be defined: * * clk: External oscillator clock frequency (default=16000000 [16 MHz]) * cir: CPU interface register (default=0x40 [DSC]) * bcr: Bus configuration register (default=0x40 [CBY]) * cor: Clockout register (default=0x00) * * Note: for clk, cir, bcr and cor, the first argument re-defines the * default for all other devices, e.g.: * * insmod cc770_isa.ko mem=0xd1000,0xd1000 irq=7,11 clk=24000000 * * is equivalent to * * insmod cc770_isa.ko mem=0xd1000,0xd1000 irq=7,11 clk=24000000,24000000 */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/netdevice.h> #include <linux/delay.h> #include <linux/irq.h> #include <linux/io.h> #include <linux/can.h> #include <linux/can/dev.h> #include <linux/can/platform/cc770.h> #include "cc770.h" #define MAXDEV 8 MODULE_AUTHOR("Wolfgang Grandegger <[email protected]>"); MODULE_DESCRIPTION("Socket-CAN driver for CC770 on the ISA bus"); MODULE_LICENSE("GPL v2"); #define CLK_DEFAULT 16000000 /* 16 MHz */ #define COR_DEFAULT 0x00 #define BCR_DEFAULT BUSCFG_CBY static unsigned long port[MAXDEV]; static unsigned long mem[MAXDEV]; static int irq[MAXDEV]; static int clk[MAXDEV]; static u8 cir[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff}; static u8 cor[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff}; static u8 bcr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff}; static int indirect[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1}; module_param_hw_array(port, ulong, ioport, NULL, 0444); MODULE_PARM_DESC(port, "I/O port number"); module_param_hw_array(mem, ulong, iomem, NULL, 0444); MODULE_PARM_DESC(mem, "I/O memory address"); module_param_hw_array(indirect, int, ioport, NULL, 0444); MODULE_PARM_DESC(indirect, "Indirect access via address and data port"); module_param_hw_array(irq, int, irq, NULL, 0444); MODULE_PARM_DESC(irq, "IRQ number"); module_param_array(clk, int, NULL, 0444); MODULE_PARM_DESC(clk, "External oscillator clock frequency " "(default=16000000 [16 MHz])"); module_param_array(cir, byte, NULL, 0444); MODULE_PARM_DESC(cir, "CPU interface register (default=0x40 [DSC])"); module_param_array(cor, byte, NULL, 0444); MODULE_PARM_DESC(cor, "Clockout register (default=0x00)"); module_param_array(bcr, byte, NULL, 0444); MODULE_PARM_DESC(bcr, "Bus configuration register (default=0x40 [CBY])"); #define CC770_IOSIZE 0x20 #define CC770_IOSIZE_INDIRECT 0x02 /* Spinlock for cc770_isa_port_write_reg_indirect * and cc770_isa_port_read_reg_indirect */ static DEFINE_SPINLOCK(cc770_isa_port_lock); static struct platform_device *cc770_isa_devs[MAXDEV]; static u8 cc770_isa_mem_read_reg(const struct cc770_priv *priv, int reg) { return readb(priv->reg_base + reg); } static void cc770_isa_mem_write_reg(const struct cc770_priv *priv, int reg, u8 val) { writeb(val, priv->reg_base + reg); } static u8 cc770_isa_port_read_reg(const struct cc770_priv *priv, int reg) { return inb((unsigned long)priv->reg_base + reg); } static void cc770_isa_port_write_reg(const struct cc770_priv *priv, int reg, u8 val) { outb(val, (unsigned long)priv->reg_base + reg); } static u8 cc770_isa_port_read_reg_indirect(const struct cc770_priv *priv, int reg) { unsigned long base = (unsigned long)priv->reg_base; unsigned long flags; u8 val; spin_lock_irqsave(&cc770_isa_port_lock, flags); outb(reg, base); val = inb(base + 1); spin_unlock_irqrestore(&cc770_isa_port_lock, flags); return val; } static void cc770_isa_port_write_reg_indirect(const struct cc770_priv *priv, int reg, u8 val) { unsigned long base = (unsigned long)priv->reg_base; unsigned long flags; spin_lock_irqsave(&cc770_isa_port_lock, flags); outb(reg, base); outb(val, base + 1); spin_unlock_irqrestore(&cc770_isa_port_lock, flags); } static int cc770_isa_probe(struct platform_device *pdev) { struct net_device *dev; struct cc770_priv *priv; void __iomem *base = NULL; int iosize = CC770_IOSIZE; int idx = pdev->id; int err; u32 clktmp; dev_dbg(&pdev->dev, "probing idx=%d: port=%#lx, mem=%#lx, irq=%d\n", idx, port[idx], mem[idx], irq[idx]); if (mem[idx]) { if (!request_mem_region(mem[idx], iosize, KBUILD_MODNAME)) { err = -EBUSY; goto exit; } base = ioremap(mem[idx], iosize); if (!base) { err = -ENOMEM; goto exit_release; } } else { if (indirect[idx] > 0 || (indirect[idx] == -1 && indirect[0] > 0)) iosize = CC770_IOSIZE_INDIRECT; if (!request_region(port[idx], iosize, KBUILD_MODNAME)) { err = -EBUSY; goto exit; } } dev = alloc_cc770dev(0); if (!dev) { err = -ENOMEM; goto exit_unmap; } priv = netdev_priv(dev); dev->irq = irq[idx]; priv->irq_flags = IRQF_SHARED; if (mem[idx]) { priv->reg_base = base; dev->base_addr = mem[idx]; priv->read_reg = cc770_isa_mem_read_reg; priv->write_reg = cc770_isa_mem_write_reg; } else { priv->reg_base = (void __iomem *)port[idx]; dev->base_addr = port[idx]; if (iosize == CC770_IOSIZE_INDIRECT) { priv->read_reg = cc770_isa_port_read_reg_indirect; priv->write_reg = cc770_isa_port_write_reg_indirect; } else { priv->read_reg = cc770_isa_port_read_reg; priv->write_reg = cc770_isa_port_write_reg; } } if (clk[idx]) clktmp = clk[idx]; else if (clk[0]) clktmp = clk[0]; else clktmp = CLK_DEFAULT; priv->can.clock.freq = clktmp; if (cir[idx] != 0xff) { priv->cpu_interface = cir[idx]; } else if (cir[0] != 0xff) { priv->cpu_interface = cir[0]; } else { /* The system clock may not exceed 10 MHz */ if (clktmp > 10000000) { priv->cpu_interface |= CPUIF_DSC; clktmp /= 2; } /* The memory clock may not exceed 8 MHz */ if (clktmp > 8000000) priv->cpu_interface |= CPUIF_DMC; } if (priv->cpu_interface & CPUIF_DSC) priv->can.clock.freq /= 2; if (bcr[idx] != 0xff) priv->bus_config = bcr[idx]; else if (bcr[0] != 0xff) priv->bus_config = bcr[0]; else priv->bus_config = BCR_DEFAULT; if (cor[idx] != 0xff) priv->clkout = cor[idx]; else if (cor[0] != 0xff) priv->clkout = cor[0]; else priv->clkout = COR_DEFAULT; platform_set_drvdata(pdev, dev); SET_NETDEV_DEV(dev, &pdev->dev); err = register_cc770dev(dev); if (err) { dev_err(&pdev->dev, "couldn't register device (err=%d)\n", err); goto exit_free; } dev_info(&pdev->dev, "device registered (reg_base=0x%p, irq=%d)\n", priv->reg_base, dev->irq); return 0; exit_free: free_cc770dev(dev); exit_unmap: if (mem[idx]) iounmap(base); exit_release: if (mem[idx]) release_mem_region(mem[idx], iosize); else release_region(port[idx], iosize); exit: return err; } static void cc770_isa_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct cc770_priv *priv = netdev_priv(dev); int idx = pdev->id; unregister_cc770dev(dev); if (mem[idx]) { iounmap(priv->reg_base); release_mem_region(mem[idx], CC770_IOSIZE); } else { if (priv->read_reg == cc770_isa_port_read_reg_indirect) release_region(port[idx], CC770_IOSIZE_INDIRECT); else release_region(port[idx], CC770_IOSIZE); } free_cc770dev(dev); } static struct platform_driver cc770_isa_driver = { .probe = cc770_isa_probe, .remove_new = cc770_isa_remove, .driver = { .name = KBUILD_MODNAME, }, }; static int __init cc770_isa_init(void) { int idx, err; for (idx = 0; idx < ARRAY_SIZE(cc770_isa_devs); idx++) { if ((port[idx] || mem[idx]) && irq[idx]) { cc770_isa_devs[idx] = platform_device_alloc(KBUILD_MODNAME, idx); if (!cc770_isa_devs[idx]) { err = -ENOMEM; goto exit_free_devices; } err = platform_device_add(cc770_isa_devs[idx]); if (err) { platform_device_put(cc770_isa_devs[idx]); goto exit_free_devices; } pr_debug("platform device %d: port=%#lx, mem=%#lx, " "irq=%d\n", idx, port[idx], mem[idx], irq[idx]); } else if (idx == 0 || port[idx] || mem[idx]) { pr_err("insufficient parameters supplied\n"); err = -EINVAL; goto exit_free_devices; } } err = platform_driver_register(&cc770_isa_driver); if (err) goto exit_free_devices; pr_info("driver for max. %d devices registered\n", MAXDEV); return 0; exit_free_devices: while (--idx >= 0) { if (cc770_isa_devs[idx]) platform_device_unregister(cc770_isa_devs[idx]); } return err; } module_init(cc770_isa_init); static void __exit cc770_isa_exit(void) { int idx; platform_driver_unregister(&cc770_isa_driver); for (idx = 0; idx < ARRAY_SIZE(cc770_isa_devs); idx++) { if (cc770_isa_devs[idx]) platform_device_unregister(cc770_isa_devs[idx]); } } module_exit(cc770_isa_exit);
linux-master
drivers/net/can/cc770/cc770_isa.c
// SPDX-License-Identifier: GPL-2.0-only /* * Core driver for the CC770 and AN82527 CAN controllers * * Copyright (C) 2009, 2011 Wolfgang Grandegger <[email protected]> */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/interrupt.h> #include <linux/ptrace.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/ethtool.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/if_ether.h> #include <linux/skbuff.h> #include <linux/delay.h> #include <linux/can.h> #include <linux/can/dev.h> #include <linux/can/error.h> #include <linux/can/platform/cc770.h> #include "cc770.h" MODULE_AUTHOR("Wolfgang Grandegger <[email protected]>"); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION(KBUILD_MODNAME "CAN netdevice driver"); /* * The CC770 is a CAN controller from Bosch, which is 100% compatible * with the AN82527 from Intel, but with "bugs" being fixed and some * additional functionality, mainly: * * 1. RX and TX error counters are readable. * 2. Support of silent (listen-only) mode. * 3. Message object 15 can receive all types of frames, also RTR and EFF. * * Details are available from Bosch's "CC770_Product_Info_2007-01.pdf", * which explains in detail the compatibility between the CC770 and the * 82527. This driver use the additional functionality 3. on real CC770 * devices. Unfortunately, the CC770 does still not store the message * identifier of received remote transmission request frames and * therefore it's set to 0. * * The message objects 1..14 can be used for TX and RX while the message * objects 15 is optimized for RX. It has a shadow register for reliable * data reception under heavy bus load. Therefore it makes sense to use * this message object for the needed use case. The frame type (EFF/SFF) * for the message object 15 can be defined via kernel module parameter * "msgobj15_eff". If not equal 0, it will receive 29-bit EFF frames, * otherwise 11 bit SFF messages. */ static int msgobj15_eff; module_param(msgobj15_eff, int, 0444); MODULE_PARM_DESC(msgobj15_eff, "Extended 29-bit frames for message object 15 " "(default: 11-bit standard frames)"); static int i82527_compat; module_param(i82527_compat, int, 0444); MODULE_PARM_DESC(i82527_compat, "Strict Intel 82527 compatibility mode " "without using additional functions"); /* * This driver uses the last 5 message objects 11..15. The definitions * and structure below allows to configure and assign them to the real * message object. */ static unsigned char cc770_obj_flags[CC770_OBJ_MAX] = { [CC770_OBJ_RX0] = CC770_OBJ_FLAG_RX, [CC770_OBJ_RX1] = CC770_OBJ_FLAG_RX | CC770_OBJ_FLAG_EFF, [CC770_OBJ_RX_RTR0] = CC770_OBJ_FLAG_RX | CC770_OBJ_FLAG_RTR, [CC770_OBJ_RX_RTR1] = CC770_OBJ_FLAG_RX | CC770_OBJ_FLAG_RTR | CC770_OBJ_FLAG_EFF, [CC770_OBJ_TX] = 0, }; static const struct can_bittiming_const cc770_bittiming_const = { .name = KBUILD_MODNAME, .tseg1_min = 1, .tseg1_max = 16, .tseg2_min = 1, .tseg2_max = 8, .sjw_max = 4, .brp_min = 1, .brp_max = 64, .brp_inc = 1, }; static inline int intid2obj(unsigned int intid) { if (intid == 2) return 0; else return MSGOBJ_LAST + 2 - intid; } static void enable_all_objs(const struct net_device *dev) { struct cc770_priv *priv = netdev_priv(dev); u8 msgcfg; unsigned char obj_flags; unsigned int o, mo; for (o = 0; o < ARRAY_SIZE(priv->obj_flags); o++) { obj_flags = priv->obj_flags[o]; mo = obj2msgobj(o); if (obj_flags & CC770_OBJ_FLAG_RX) { /* * We don't need extra objects for RTR and EFF if * the additional CC770 functions are enabled. */ if (priv->control_normal_mode & CTRL_EAF) { if (o > 0) continue; netdev_dbg(dev, "Message object %d for " "RX data, RTR, SFF and EFF\n", mo); } else { netdev_dbg(dev, "Message object %d for RX %s %s\n", mo, obj_flags & CC770_OBJ_FLAG_RTR ? "RTR" : "data", obj_flags & CC770_OBJ_FLAG_EFF ? "EFF" : "SFF"); } if (obj_flags & CC770_OBJ_FLAG_EFF) msgcfg = MSGCFG_XTD; else msgcfg = 0; if (obj_flags & CC770_OBJ_FLAG_RTR) msgcfg |= MSGCFG_DIR; cc770_write_reg(priv, msgobj[mo].config, msgcfg); cc770_write_reg(priv, msgobj[mo].ctrl0, MSGVAL_SET | TXIE_RES | RXIE_SET | INTPND_RES); if (obj_flags & CC770_OBJ_FLAG_RTR) cc770_write_reg(priv, msgobj[mo].ctrl1, NEWDAT_RES | CPUUPD_SET | TXRQST_RES | RMTPND_RES); else cc770_write_reg(priv, msgobj[mo].ctrl1, NEWDAT_RES | MSGLST_RES | TXRQST_RES | RMTPND_RES); } else { netdev_dbg(dev, "Message object %d for " "TX data, RTR, SFF and EFF\n", mo); cc770_write_reg(priv, msgobj[mo].ctrl1, RMTPND_RES | TXRQST_RES | CPUUPD_RES | NEWDAT_RES); cc770_write_reg(priv, msgobj[mo].ctrl0, MSGVAL_RES | TXIE_RES | RXIE_RES | INTPND_RES); } } } static void disable_all_objs(const struct cc770_priv *priv) { int o, mo; for (o = 0; o < ARRAY_SIZE(priv->obj_flags); o++) { mo = obj2msgobj(o); if (priv->obj_flags[o] & CC770_OBJ_FLAG_RX) { if (o > 0 && priv->control_normal_mode & CTRL_EAF) continue; cc770_write_reg(priv, msgobj[mo].ctrl1, NEWDAT_RES | MSGLST_RES | TXRQST_RES | RMTPND_RES); cc770_write_reg(priv, msgobj[mo].ctrl0, MSGVAL_RES | TXIE_RES | RXIE_RES | INTPND_RES); } else { /* Clear message object for send */ cc770_write_reg(priv, msgobj[mo].ctrl1, RMTPND_RES | TXRQST_RES | CPUUPD_RES | NEWDAT_RES); cc770_write_reg(priv, msgobj[mo].ctrl0, MSGVAL_RES | TXIE_RES | RXIE_RES | INTPND_RES); } } } static void set_reset_mode(struct net_device *dev) { struct cc770_priv *priv = netdev_priv(dev); /* Enable configuration and puts chip in bus-off, disable interrupts */ cc770_write_reg(priv, control, CTRL_CCE | CTRL_INI); priv->can.state = CAN_STATE_STOPPED; /* Clear interrupts */ cc770_read_reg(priv, interrupt); /* Clear status register */ cc770_write_reg(priv, status, 0); /* Disable all used message objects */ disable_all_objs(priv); } static void set_normal_mode(struct net_device *dev) { struct cc770_priv *priv = netdev_priv(dev); /* Clear interrupts */ cc770_read_reg(priv, interrupt); /* Clear status register and pre-set last error code */ cc770_write_reg(priv, status, STAT_LEC_MASK); /* Enable all used message objects*/ enable_all_objs(dev); /* * Clear bus-off, interrupts only for errors, * not for status change */ cc770_write_reg(priv, control, priv->control_normal_mode); priv->can.state = CAN_STATE_ERROR_ACTIVE; } static void chipset_init(struct cc770_priv *priv) { int mo, id, data; /* Enable configuration and put chip in bus-off, disable interrupts */ cc770_write_reg(priv, control, (CTRL_CCE | CTRL_INI)); /* Set CLKOUT divider and slew rates */ cc770_write_reg(priv, clkout, priv->clkout); /* Configure CPU interface / CLKOUT enable */ cc770_write_reg(priv, cpu_interface, priv->cpu_interface); /* Set bus configuration */ cc770_write_reg(priv, bus_config, priv->bus_config); /* Clear interrupts */ cc770_read_reg(priv, interrupt); /* Clear status register */ cc770_write_reg(priv, status, 0); /* Clear and invalidate message objects */ for (mo = MSGOBJ_FIRST; mo <= MSGOBJ_LAST; mo++) { cc770_write_reg(priv, msgobj[mo].ctrl0, INTPND_UNC | RXIE_RES | TXIE_RES | MSGVAL_RES); cc770_write_reg(priv, msgobj[mo].ctrl0, INTPND_RES | RXIE_RES | TXIE_RES | MSGVAL_RES); cc770_write_reg(priv, msgobj[mo].ctrl1, NEWDAT_RES | MSGLST_RES | TXRQST_RES | RMTPND_RES); for (data = 0; data < 8; data++) cc770_write_reg(priv, msgobj[mo].data[data], 0); for (id = 0; id < 4; id++) cc770_write_reg(priv, msgobj[mo].id[id], 0); cc770_write_reg(priv, msgobj[mo].config, 0); } /* Set all global ID masks to "don't care" */ cc770_write_reg(priv, global_mask_std[0], 0); cc770_write_reg(priv, global_mask_std[1], 0); cc770_write_reg(priv, global_mask_ext[0], 0); cc770_write_reg(priv, global_mask_ext[1], 0); cc770_write_reg(priv, global_mask_ext[2], 0); cc770_write_reg(priv, global_mask_ext[3], 0); } static int cc770_probe_chip(struct net_device *dev) { struct cc770_priv *priv = netdev_priv(dev); /* Enable configuration, put chip in bus-off, disable ints */ cc770_write_reg(priv, control, CTRL_CCE | CTRL_EAF | CTRL_INI); /* Configure cpu interface / CLKOUT disable */ cc770_write_reg(priv, cpu_interface, priv->cpu_interface); /* * Check if hardware reset is still inactive or maybe there * is no chip in this address space */ if (cc770_read_reg(priv, cpu_interface) & CPUIF_RST) { netdev_info(dev, "probing @0x%p failed (reset)\n", priv->reg_base); return -ENODEV; } /* Write and read back test pattern (some arbitrary values) */ cc770_write_reg(priv, msgobj[1].data[1], 0x25); cc770_write_reg(priv, msgobj[2].data[3], 0x52); cc770_write_reg(priv, msgobj[10].data[6], 0xc3); if ((cc770_read_reg(priv, msgobj[1].data[1]) != 0x25) || (cc770_read_reg(priv, msgobj[2].data[3]) != 0x52) || (cc770_read_reg(priv, msgobj[10].data[6]) != 0xc3)) { netdev_info(dev, "probing @0x%p failed (pattern)\n", priv->reg_base); return -ENODEV; } /* Check if this chip is a CC770 supporting additional functions */ if (cc770_read_reg(priv, control) & CTRL_EAF) priv->control_normal_mode |= CTRL_EAF; return 0; } static void cc770_start(struct net_device *dev) { struct cc770_priv *priv = netdev_priv(dev); /* leave reset mode */ if (priv->can.state != CAN_STATE_STOPPED) set_reset_mode(dev); /* leave reset mode */ set_normal_mode(dev); } static int cc770_set_mode(struct net_device *dev, enum can_mode mode) { switch (mode) { case CAN_MODE_START: cc770_start(dev); netif_wake_queue(dev); break; default: return -EOPNOTSUPP; } return 0; } static int cc770_set_bittiming(struct net_device *dev) { struct cc770_priv *priv = netdev_priv(dev); struct can_bittiming *bt = &priv->can.bittiming; u8 btr0, btr1; btr0 = ((bt->brp - 1) & 0x3f) | (((bt->sjw - 1) & 0x3) << 6); btr1 = ((bt->prop_seg + bt->phase_seg1 - 1) & 0xf) | (((bt->phase_seg2 - 1) & 0x7) << 4); if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) btr1 |= 0x80; netdev_info(dev, "setting BTR0=0x%02x BTR1=0x%02x\n", btr0, btr1); cc770_write_reg(priv, bit_timing_0, btr0); cc770_write_reg(priv, bit_timing_1, btr1); return 0; } static int cc770_get_berr_counter(const struct net_device *dev, struct can_berr_counter *bec) { struct cc770_priv *priv = netdev_priv(dev); bec->txerr = cc770_read_reg(priv, tx_error_counter); bec->rxerr = cc770_read_reg(priv, rx_error_counter); return 0; } static void cc770_tx(struct net_device *dev, int mo) { struct cc770_priv *priv = netdev_priv(dev); struct can_frame *cf = (struct can_frame *)priv->tx_skb->data; u8 dlc, rtr; u32 id; int i; dlc = cf->len; id = cf->can_id; rtr = cf->can_id & CAN_RTR_FLAG ? 0 : MSGCFG_DIR; cc770_write_reg(priv, msgobj[mo].ctrl0, MSGVAL_RES | TXIE_RES | RXIE_RES | INTPND_RES); cc770_write_reg(priv, msgobj[mo].ctrl1, RMTPND_RES | TXRQST_RES | CPUUPD_SET | NEWDAT_RES); if (id & CAN_EFF_FLAG) { id &= CAN_EFF_MASK; cc770_write_reg(priv, msgobj[mo].config, (dlc << 4) | rtr | MSGCFG_XTD); cc770_write_reg(priv, msgobj[mo].id[3], id << 3); cc770_write_reg(priv, msgobj[mo].id[2], id >> 5); cc770_write_reg(priv, msgobj[mo].id[1], id >> 13); cc770_write_reg(priv, msgobj[mo].id[0], id >> 21); } else { id &= CAN_SFF_MASK; cc770_write_reg(priv, msgobj[mo].config, (dlc << 4) | rtr); cc770_write_reg(priv, msgobj[mo].id[0], id >> 3); cc770_write_reg(priv, msgobj[mo].id[1], id << 5); } for (i = 0; i < dlc; i++) cc770_write_reg(priv, msgobj[mo].data[i], cf->data[i]); cc770_write_reg(priv, msgobj[mo].ctrl1, RMTPND_UNC | TXRQST_SET | CPUUPD_RES | NEWDAT_UNC); cc770_write_reg(priv, msgobj[mo].ctrl0, MSGVAL_SET | TXIE_SET | RXIE_SET | INTPND_UNC); } static netdev_tx_t cc770_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct cc770_priv *priv = netdev_priv(dev); unsigned int mo = obj2msgobj(CC770_OBJ_TX); if (can_dev_dropped_skb(dev, skb)) return NETDEV_TX_OK; netif_stop_queue(dev); if ((cc770_read_reg(priv, msgobj[mo].ctrl1) & TXRQST_UNC) == TXRQST_SET) { netdev_err(dev, "TX register is still occupied!\n"); return NETDEV_TX_BUSY; } priv->tx_skb = skb; cc770_tx(dev, mo); return NETDEV_TX_OK; } static void cc770_rx(struct net_device *dev, unsigned int mo, u8 ctrl1) { struct cc770_priv *priv = netdev_priv(dev); struct net_device_stats *stats = &dev->stats; struct can_frame *cf; struct sk_buff *skb; u8 config; u32 id; int i; skb = alloc_can_skb(dev, &cf); if (!skb) return; config = cc770_read_reg(priv, msgobj[mo].config); if (ctrl1 & RMTPND_SET) { /* * Unfortunately, the chip does not store the real message * identifier of the received remote transmission request * frame. Therefore we set it to 0. */ cf->can_id = CAN_RTR_FLAG; if (config & MSGCFG_XTD) cf->can_id |= CAN_EFF_FLAG; cf->len = 0; } else { if (config & MSGCFG_XTD) { id = cc770_read_reg(priv, msgobj[mo].id[3]); id |= cc770_read_reg(priv, msgobj[mo].id[2]) << 8; id |= cc770_read_reg(priv, msgobj[mo].id[1]) << 16; id |= cc770_read_reg(priv, msgobj[mo].id[0]) << 24; id >>= 3; id |= CAN_EFF_FLAG; } else { id = cc770_read_reg(priv, msgobj[mo].id[1]); id |= cc770_read_reg(priv, msgobj[mo].id[0]) << 8; id >>= 5; } cf->can_id = id; cf->len = can_cc_dlc2len((config & 0xf0) >> 4); for (i = 0; i < cf->len; i++) cf->data[i] = cc770_read_reg(priv, msgobj[mo].data[i]); stats->rx_bytes += cf->len; } stats->rx_packets++; netif_rx(skb); } static int cc770_err(struct net_device *dev, u8 status) { struct cc770_priv *priv = netdev_priv(dev); struct can_frame *cf; struct sk_buff *skb; u8 lec; netdev_dbg(dev, "status interrupt (%#x)\n", status); skb = alloc_can_err_skb(dev, &cf); if (!skb) return -ENOMEM; /* Use extended functions of the CC770 */ if (priv->control_normal_mode & CTRL_EAF) { cf->can_id |= CAN_ERR_CNT; cf->data[6] = cc770_read_reg(priv, tx_error_counter); cf->data[7] = cc770_read_reg(priv, rx_error_counter); } if (status & STAT_BOFF) { /* Disable interrupts */ cc770_write_reg(priv, control, CTRL_INI); cf->can_id |= CAN_ERR_BUSOFF; priv->can.state = CAN_STATE_BUS_OFF; priv->can.can_stats.bus_off++; can_bus_off(dev); } else if (status & STAT_WARN) { cf->can_id |= CAN_ERR_CRTL; /* Only the CC770 does show error passive */ if (cf->data[7] > 127) { cf->data[1] = CAN_ERR_CRTL_RX_PASSIVE | CAN_ERR_CRTL_TX_PASSIVE; priv->can.state = CAN_STATE_ERROR_PASSIVE; priv->can.can_stats.error_passive++; } else { cf->data[1] = CAN_ERR_CRTL_RX_WARNING | CAN_ERR_CRTL_TX_WARNING; priv->can.state = CAN_STATE_ERROR_WARNING; priv->can.can_stats.error_warning++; } } else { /* Back to error active */ cf->can_id |= CAN_ERR_PROT; cf->data[2] = CAN_ERR_PROT_ACTIVE; priv->can.state = CAN_STATE_ERROR_ACTIVE; } lec = status & STAT_LEC_MASK; if (lec < 7 && lec > 0) { if (lec == STAT_LEC_ACK) { cf->can_id |= CAN_ERR_ACK; } else { cf->can_id |= CAN_ERR_PROT; switch (lec) { case STAT_LEC_STUFF: cf->data[2] |= CAN_ERR_PROT_STUFF; break; case STAT_LEC_FORM: cf->data[2] |= CAN_ERR_PROT_FORM; break; case STAT_LEC_BIT1: cf->data[2] |= CAN_ERR_PROT_BIT1; break; case STAT_LEC_BIT0: cf->data[2] |= CAN_ERR_PROT_BIT0; break; case STAT_LEC_CRC: cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; break; } } } netif_rx(skb); return 0; } static int cc770_status_interrupt(struct net_device *dev) { struct cc770_priv *priv = netdev_priv(dev); u8 status; status = cc770_read_reg(priv, status); /* Reset the status register including RXOK and TXOK */ cc770_write_reg(priv, status, STAT_LEC_MASK); if (status & (STAT_WARN | STAT_BOFF) || (status & STAT_LEC_MASK) != STAT_LEC_MASK) { cc770_err(dev, status); return status & STAT_BOFF; } return 0; } static void cc770_rx_interrupt(struct net_device *dev, unsigned int o) { struct cc770_priv *priv = netdev_priv(dev); struct net_device_stats *stats = &dev->stats; unsigned int mo = obj2msgobj(o); u8 ctrl1; int n = CC770_MAX_MSG; while (n--) { ctrl1 = cc770_read_reg(priv, msgobj[mo].ctrl1); if (!(ctrl1 & NEWDAT_SET)) { /* Check for RTR if additional functions are enabled */ if (priv->control_normal_mode & CTRL_EAF) { if (!(cc770_read_reg(priv, msgobj[mo].ctrl0) & INTPND_SET)) break; } else { break; } } if (ctrl1 & MSGLST_SET) { stats->rx_over_errors++; stats->rx_errors++; } if (mo < MSGOBJ_LAST) cc770_write_reg(priv, msgobj[mo].ctrl1, NEWDAT_RES | MSGLST_RES | TXRQST_UNC | RMTPND_UNC); cc770_rx(dev, mo, ctrl1); cc770_write_reg(priv, msgobj[mo].ctrl0, MSGVAL_SET | TXIE_RES | RXIE_SET | INTPND_RES); cc770_write_reg(priv, msgobj[mo].ctrl1, NEWDAT_RES | MSGLST_RES | TXRQST_RES | RMTPND_RES); } } static void cc770_rtr_interrupt(struct net_device *dev, unsigned int o) { struct cc770_priv *priv = netdev_priv(dev); unsigned int mo = obj2msgobj(o); u8 ctrl0, ctrl1; int n = CC770_MAX_MSG; while (n--) { ctrl0 = cc770_read_reg(priv, msgobj[mo].ctrl0); if (!(ctrl0 & INTPND_SET)) break; ctrl1 = cc770_read_reg(priv, msgobj[mo].ctrl1); cc770_rx(dev, mo, ctrl1); cc770_write_reg(priv, msgobj[mo].ctrl0, MSGVAL_SET | TXIE_RES | RXIE_SET | INTPND_RES); cc770_write_reg(priv, msgobj[mo].ctrl1, NEWDAT_RES | CPUUPD_SET | TXRQST_RES | RMTPND_RES); } } static void cc770_tx_interrupt(struct net_device *dev, unsigned int o) { struct cc770_priv *priv = netdev_priv(dev); struct net_device_stats *stats = &dev->stats; unsigned int mo = obj2msgobj(o); u8 ctrl1; ctrl1 = cc770_read_reg(priv, msgobj[mo].ctrl1); cc770_write_reg(priv, msgobj[mo].ctrl0, MSGVAL_RES | TXIE_RES | RXIE_RES | INTPND_RES); cc770_write_reg(priv, msgobj[mo].ctrl1, RMTPND_RES | TXRQST_RES | MSGLST_RES | NEWDAT_RES); if (unlikely(!priv->tx_skb)) { netdev_err(dev, "missing tx skb in tx interrupt\n"); return; } if (unlikely(ctrl1 & MSGLST_SET)) { stats->rx_over_errors++; stats->rx_errors++; } /* When the CC770 is sending an RTR message and it receives a regular * message that matches the id of the RTR message, it will overwrite the * outgoing message in the TX register. When this happens we must * process the received message and try to transmit the outgoing skb * again. */ if (unlikely(ctrl1 & NEWDAT_SET)) { cc770_rx(dev, mo, ctrl1); cc770_tx(dev, mo); return; } can_put_echo_skb(priv->tx_skb, dev, 0, 0); stats->tx_bytes += can_get_echo_skb(dev, 0, NULL); stats->tx_packets++; priv->tx_skb = NULL; netif_wake_queue(dev); } static irqreturn_t cc770_interrupt(int irq, void *dev_id) { struct net_device *dev = (struct net_device *)dev_id; struct cc770_priv *priv = netdev_priv(dev); u8 intid; int o, n = 0; /* Shared interrupts and IRQ off? */ if (priv->can.state == CAN_STATE_STOPPED) return IRQ_NONE; if (priv->pre_irq) priv->pre_irq(priv); while (n < CC770_MAX_IRQ) { /* Read the highest pending interrupt request */ intid = cc770_read_reg(priv, interrupt); if (!intid) break; n++; if (intid == 1) { /* Exit in case of bus-off */ if (cc770_status_interrupt(dev)) break; } else { o = intid2obj(intid); if (o >= CC770_OBJ_MAX) { netdev_err(dev, "Unexpected interrupt id %d\n", intid); continue; } if (priv->obj_flags[o] & CC770_OBJ_FLAG_RTR) cc770_rtr_interrupt(dev, o); else if (priv->obj_flags[o] & CC770_OBJ_FLAG_RX) cc770_rx_interrupt(dev, o); else cc770_tx_interrupt(dev, o); } } if (priv->post_irq) priv->post_irq(priv); if (n >= CC770_MAX_IRQ) netdev_dbg(dev, "%d messages handled in ISR", n); return (n) ? IRQ_HANDLED : IRQ_NONE; } static int cc770_open(struct net_device *dev) { struct cc770_priv *priv = netdev_priv(dev); int err; /* set chip into reset mode */ set_reset_mode(dev); /* common open */ err = open_candev(dev); if (err) return err; err = request_irq(dev->irq, &cc770_interrupt, priv->irq_flags, dev->name, dev); if (err) { close_candev(dev); return -EAGAIN; } /* init and start chip */ cc770_start(dev); netif_start_queue(dev); return 0; } static int cc770_close(struct net_device *dev) { netif_stop_queue(dev); set_reset_mode(dev); free_irq(dev->irq, dev); close_candev(dev); return 0; } struct net_device *alloc_cc770dev(int sizeof_priv) { struct net_device *dev; struct cc770_priv *priv; dev = alloc_candev(sizeof(struct cc770_priv) + sizeof_priv, CC770_ECHO_SKB_MAX); if (!dev) return NULL; priv = netdev_priv(dev); priv->dev = dev; priv->can.bittiming_const = &cc770_bittiming_const; priv->can.do_set_bittiming = cc770_set_bittiming; priv->can.do_set_mode = cc770_set_mode; priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES; priv->tx_skb = NULL; memcpy(priv->obj_flags, cc770_obj_flags, sizeof(cc770_obj_flags)); if (sizeof_priv) priv->priv = (void *)priv + sizeof(struct cc770_priv); return dev; } EXPORT_SYMBOL_GPL(alloc_cc770dev); void free_cc770dev(struct net_device *dev) { free_candev(dev); } EXPORT_SYMBOL_GPL(free_cc770dev); static const struct net_device_ops cc770_netdev_ops = { .ndo_open = cc770_open, .ndo_stop = cc770_close, .ndo_start_xmit = cc770_start_xmit, .ndo_change_mtu = can_change_mtu, }; static const struct ethtool_ops cc770_ethtool_ops = { .get_ts_info = ethtool_op_get_ts_info, }; int register_cc770dev(struct net_device *dev) { struct cc770_priv *priv = netdev_priv(dev); int err; err = cc770_probe_chip(dev); if (err) return err; dev->netdev_ops = &cc770_netdev_ops; dev->ethtool_ops = &cc770_ethtool_ops; dev->flags |= IFF_ECHO; /* we support local echo */ /* Should we use additional functions? */ if (!i82527_compat && priv->control_normal_mode & CTRL_EAF) { priv->can.do_get_berr_counter = cc770_get_berr_counter; priv->control_normal_mode = CTRL_IE | CTRL_EAF | CTRL_EIE; netdev_dbg(dev, "i82527 mode with additional functions\n"); } else { priv->control_normal_mode = CTRL_IE | CTRL_EIE; netdev_dbg(dev, "strict i82527 compatibility mode\n"); } chipset_init(priv); set_reset_mode(dev); return register_candev(dev); } EXPORT_SYMBOL_GPL(register_cc770dev); void unregister_cc770dev(struct net_device *dev) { set_reset_mode(dev); unregister_candev(dev); } EXPORT_SYMBOL_GPL(unregister_cc770dev); static __init int cc770_init(void) { if (msgobj15_eff) { cc770_obj_flags[CC770_OBJ_RX0] |= CC770_OBJ_FLAG_EFF; cc770_obj_flags[CC770_OBJ_RX1] &= ~CC770_OBJ_FLAG_EFF; } pr_info("CAN netdevice driver\n"); return 0; } module_init(cc770_init); static __exit void cc770_exit(void) { pr_info("driver removed\n"); } module_exit(cc770_exit);
linux-master
drivers/net/can/cc770/cc770.c
// SPDX-License-Identifier: GPL-2.0-only /* * Driver for CC770 and AN82527 CAN controllers on the platform bus * * Copyright (C) 2009, 2011 Wolfgang Grandegger <[email protected]> */ /* * If platform data are used you should have similar definitions * in your board-specific code: * * static struct cc770_platform_data myboard_cc770_pdata = { * .osc_freq = 16000000, * .cir = 0x41, * .cor = 0x20, * .bcr = 0x40, * }; * * Please see include/linux/can/platform/cc770.h for description of * above fields. * * If the device tree is used, you need a CAN node definition in your * DTS file similar to: * * can@3,100 { * compatible = "bosch,cc770"; * reg = <3 0x100 0x80>; * interrupts = <2 0>; * interrupt-parent = <&mpic>; * bosch,external-clock-frequency = <16000000>; * }; * * See "Documentation/devicetree/bindings/net/can/cc770.txt" for further * information. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/netdevice.h> #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/of.h> #include <linux/can.h> #include <linux/can/dev.h> #include <linux/can/platform/cc770.h> #include "cc770.h" #define DRV_NAME "cc770_platform" MODULE_AUTHOR("Wolfgang Grandegger <[email protected]>"); MODULE_DESCRIPTION("Socket-CAN driver for CC770 on the platform bus"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:" DRV_NAME); #define CC770_PLATFORM_CAN_CLOCK 16000000 static u8 cc770_platform_read_reg(const struct cc770_priv *priv, int reg) { return ioread8(priv->reg_base + reg); } static void cc770_platform_write_reg(const struct cc770_priv *priv, int reg, u8 val) { iowrite8(val, priv->reg_base + reg); } static int cc770_get_of_node_data(struct platform_device *pdev, struct cc770_priv *priv) { struct device_node *np = pdev->dev.of_node; const u32 *prop; int prop_size; u32 clkext; prop = of_get_property(np, "bosch,external-clock-frequency", &prop_size); if (prop && (prop_size == sizeof(u32))) clkext = *prop; else clkext = CC770_PLATFORM_CAN_CLOCK; /* default */ priv->can.clock.freq = clkext; /* The system clock may not exceed 10 MHz */ if (priv->can.clock.freq > 10000000) { priv->cpu_interface |= CPUIF_DSC; priv->can.clock.freq /= 2; } /* The memory clock may not exceed 8 MHz */ if (priv->can.clock.freq > 8000000) priv->cpu_interface |= CPUIF_DMC; if (of_property_read_bool(np, "bosch,divide-memory-clock")) priv->cpu_interface |= CPUIF_DMC; if (of_property_read_bool(np, "bosch,iso-low-speed-mux")) priv->cpu_interface |= CPUIF_MUX; if (!of_get_property(np, "bosch,no-comperator-bypass", NULL)) priv->bus_config |= BUSCFG_CBY; if (of_property_read_bool(np, "bosch,disconnect-rx0-input")) priv->bus_config |= BUSCFG_DR0; if (of_property_read_bool(np, "bosch,disconnect-rx1-input")) priv->bus_config |= BUSCFG_DR1; if (of_property_read_bool(np, "bosch,disconnect-tx1-output")) priv->bus_config |= BUSCFG_DT1; if (of_property_read_bool(np, "bosch,polarity-dominant")) priv->bus_config |= BUSCFG_POL; prop = of_get_property(np, "bosch,clock-out-frequency", &prop_size); if (prop && (prop_size == sizeof(u32)) && *prop > 0) { u32 cdv = clkext / *prop; int slew; if (cdv > 0 && cdv < 16) { priv->cpu_interface |= CPUIF_CEN; priv->clkout |= (cdv - 1) & CLKOUT_CD_MASK; prop = of_get_property(np, "bosch,slew-rate", &prop_size); if (prop && (prop_size == sizeof(u32))) { slew = *prop; } else { /* Determine default slew rate */ slew = (CLKOUT_SL_MASK >> CLKOUT_SL_SHIFT) - ((cdv * clkext - 1) / 8000000); if (slew < 0) slew = 0; } priv->clkout |= (slew << CLKOUT_SL_SHIFT) & CLKOUT_SL_MASK; } else { dev_dbg(&pdev->dev, "invalid clock-out-frequency\n"); } } return 0; } static int cc770_get_platform_data(struct platform_device *pdev, struct cc770_priv *priv) { struct cc770_platform_data *pdata = dev_get_platdata(&pdev->dev); priv->can.clock.freq = pdata->osc_freq; if (priv->cpu_interface & CPUIF_DSC) priv->can.clock.freq /= 2; priv->clkout = pdata->cor; priv->bus_config = pdata->bcr; priv->cpu_interface = pdata->cir; return 0; } static int cc770_platform_probe(struct platform_device *pdev) { struct net_device *dev; struct cc770_priv *priv; struct resource *mem; resource_size_t mem_size; void __iomem *base; int err, irq; mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_irq(pdev, 0); if (!mem || irq <= 0) return -ENODEV; mem_size = resource_size(mem); if (!request_mem_region(mem->start, mem_size, pdev->name)) return -EBUSY; base = ioremap(mem->start, mem_size); if (!base) { err = -ENOMEM; goto exit_release_mem; } dev = alloc_cc770dev(0); if (!dev) { err = -ENOMEM; goto exit_unmap_mem; } dev->irq = irq; priv = netdev_priv(dev); priv->read_reg = cc770_platform_read_reg; priv->write_reg = cc770_platform_write_reg; priv->irq_flags = IRQF_SHARED; priv->reg_base = base; if (pdev->dev.of_node) err = cc770_get_of_node_data(pdev, priv); else if (dev_get_platdata(&pdev->dev)) err = cc770_get_platform_data(pdev, priv); else err = -ENODEV; if (err) goto exit_free_cc770; dev_dbg(&pdev->dev, "reg_base=0x%p irq=%d clock=%d cpu_interface=0x%02x " "bus_config=0x%02x clkout=0x%02x\n", priv->reg_base, dev->irq, priv->can.clock.freq, priv->cpu_interface, priv->bus_config, priv->clkout); platform_set_drvdata(pdev, dev); SET_NETDEV_DEV(dev, &pdev->dev); err = register_cc770dev(dev); if (err) { dev_err(&pdev->dev, "couldn't register CC700 device (err=%d)\n", err); goto exit_free_cc770; } return 0; exit_free_cc770: free_cc770dev(dev); exit_unmap_mem: iounmap(base); exit_release_mem: release_mem_region(mem->start, mem_size); return err; } static void cc770_platform_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct cc770_priv *priv = netdev_priv(dev); struct resource *mem; unregister_cc770dev(dev); iounmap(priv->reg_base); free_cc770dev(dev); mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); release_mem_region(mem->start, resource_size(mem)); } static const struct of_device_id cc770_platform_table[] = { {.compatible = "bosch,cc770"}, /* CC770 from Bosch */ {.compatible = "intc,82527"}, /* AN82527 from Intel CP */ {}, }; MODULE_DEVICE_TABLE(of, cc770_platform_table); static struct platform_driver cc770_platform_driver = { .driver = { .name = DRV_NAME, .of_match_table = cc770_platform_table, }, .probe = cc770_platform_probe, .remove_new = cc770_platform_remove, }; module_platform_driver(cc770_platform_driver);
linux-master
drivers/net/can/cc770/cc770_platform.c
/* * slcan.c - serial line CAN interface driver (using tty line discipline) * * This file is derived from linux/drivers/net/slip/slip.c and got * inspiration from linux/drivers/net/can/can327.c for the rework made * on the line discipline code. * * slip.c Authors : Laurence Culhane <[email protected]> * Fred N. van Kempen <[email protected]> * slcan.c Author : Oliver Hartkopp <[email protected]> * can327.c Author : Max Staudt <[email protected]> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, see http://www.gnu.org/licenses/gpl.html * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/uaccess.h> #include <linux/bitops.h> #include <linux/string.h> #include <linux/tty.h> #include <linux/errno.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/rtnetlink.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/workqueue.h> #include <linux/can.h> #include <linux/can/dev.h> #include <linux/can/skb.h> #include "slcan.h" MODULE_ALIAS_LDISC(N_SLCAN); MODULE_DESCRIPTION("serial line CAN interface"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Oliver Hartkopp <[email protected]>"); MODULE_AUTHOR("Dario Binacchi <[email protected]>"); /* maximum rx buffer len: extended CAN frame with timestamp */ #define SLCAN_MTU (sizeof("T1111222281122334455667788EA5F\r") + 1) #define SLCAN_CMD_LEN 1 #define SLCAN_SFF_ID_LEN 3 #define SLCAN_EFF_ID_LEN 8 #define SLCAN_STATE_LEN 1 #define SLCAN_STATE_BE_RXCNT_LEN 3 #define SLCAN_STATE_BE_TXCNT_LEN 3 #define SLCAN_STATE_FRAME_LEN (1 + SLCAN_CMD_LEN + \ SLCAN_STATE_BE_RXCNT_LEN + \ SLCAN_STATE_BE_TXCNT_LEN) struct slcan { struct can_priv can; /* Various fields. */ struct tty_struct *tty; /* ptr to TTY structure */ struct net_device *dev; /* easy for intr handling */ spinlock_t lock; struct work_struct tx_work; /* Flushes transmit buffer */ /* These are pointers to the malloc()ed frame buffers. */ unsigned char rbuff[SLCAN_MTU]; /* receiver buffer */ int rcount; /* received chars counter */ unsigned char xbuff[SLCAN_MTU]; /* transmitter buffer*/ unsigned char *xhead; /* pointer to next XMIT byte */ int xleft; /* bytes left in XMIT queue */ unsigned long flags; /* Flag values/ mode etc */ #define SLF_ERROR 0 /* Parity, etc. error */ #define SLF_XCMD 1 /* Command transmission */ unsigned long cmd_flags; /* Command flags */ #define CF_ERR_RST 0 /* Reset errors on open */ wait_queue_head_t xcmd_wait; /* Wait queue for commands */ /* transmission */ }; static const u32 slcan_bitrate_const[] = { 10000, 20000, 50000, 100000, 125000, 250000, 500000, 800000, 1000000 }; bool slcan_err_rst_on_open(struct net_device *ndev) { struct slcan *sl = netdev_priv(ndev); return !!test_bit(CF_ERR_RST, &sl->cmd_flags); } int slcan_enable_err_rst_on_open(struct net_device *ndev, bool on) { struct slcan *sl = netdev_priv(ndev); if (netif_running(ndev)) return -EBUSY; if (on) set_bit(CF_ERR_RST, &sl->cmd_flags); else clear_bit(CF_ERR_RST, &sl->cmd_flags); return 0; } /************************************************************************* * SLCAN ENCAPSULATION FORMAT * *************************************************************************/ /* A CAN frame has a can_id (11 bit standard frame format OR 29 bit extended * frame format) a data length code (len) which can be from 0 to 8 * and up to <len> data bytes as payload. * Additionally a CAN frame may become a remote transmission frame if the * RTR-bit is set. This causes another ECU to send a CAN frame with the * given can_id. * * The SLCAN ASCII representation of these different frame types is: * <type> <id> <dlc> <data>* * * Extended frames (29 bit) are defined by capital characters in the type. * RTR frames are defined as 'r' types - normal frames have 't' type: * t => 11 bit data frame * r => 11 bit RTR frame * T => 29 bit data frame * R => 29 bit RTR frame * * The <id> is 3 (standard) or 8 (extended) bytes in ASCII Hex (base64). * The <dlc> is a one byte ASCII number ('0' - '8') * The <data> section has at much ASCII Hex bytes as defined by the <dlc> * * Examples: * * t1230 : can_id 0x123, len 0, no data * t4563112233 : can_id 0x456, len 3, data 0x11 0x22 0x33 * T12ABCDEF2AA55 : extended can_id 0x12ABCDEF, len 2, data 0xAA 0x55 * r1230 : can_id 0x123, len 0, no data, remote transmission request * */ /************************************************************************* * STANDARD SLCAN DECAPSULATION * *************************************************************************/ /* Send one completely decapsulated can_frame to the network layer */ static void slcan_bump_frame(struct slcan *sl) { struct sk_buff *skb; struct can_frame *cf; int i, tmp; u32 tmpid; char *cmd = sl->rbuff; skb = alloc_can_skb(sl->dev, &cf); if (unlikely(!skb)) { sl->dev->stats.rx_dropped++; return; } switch (*cmd) { case 'r': cf->can_id = CAN_RTR_FLAG; fallthrough; case 't': /* store dlc ASCII value and terminate SFF CAN ID string */ cf->len = sl->rbuff[SLCAN_CMD_LEN + SLCAN_SFF_ID_LEN]; sl->rbuff[SLCAN_CMD_LEN + SLCAN_SFF_ID_LEN] = 0; /* point to payload data behind the dlc */ cmd += SLCAN_CMD_LEN + SLCAN_SFF_ID_LEN + 1; break; case 'R': cf->can_id = CAN_RTR_FLAG; fallthrough; case 'T': cf->can_id |= CAN_EFF_FLAG; /* store dlc ASCII value and terminate EFF CAN ID string */ cf->len = sl->rbuff[SLCAN_CMD_LEN + SLCAN_EFF_ID_LEN]; sl->rbuff[SLCAN_CMD_LEN + SLCAN_EFF_ID_LEN] = 0; /* point to payload data behind the dlc */ cmd += SLCAN_CMD_LEN + SLCAN_EFF_ID_LEN + 1; break; default: goto decode_failed; } if (kstrtou32(sl->rbuff + SLCAN_CMD_LEN, 16, &tmpid)) goto decode_failed; cf->can_id |= tmpid; /* get len from sanitized ASCII value */ if (cf->len >= '0' && cf->len < '9') cf->len -= '0'; else goto decode_failed; /* RTR frames may have a dlc > 0 but they never have any data bytes */ if (!(cf->can_id & CAN_RTR_FLAG)) { for (i = 0; i < cf->len; i++) { tmp = hex_to_bin(*cmd++); if (tmp < 0) goto decode_failed; cf->data[i] = (tmp << 4); tmp = hex_to_bin(*cmd++); if (tmp < 0) goto decode_failed; cf->data[i] |= tmp; } } sl->dev->stats.rx_packets++; if (!(cf->can_id & CAN_RTR_FLAG)) sl->dev->stats.rx_bytes += cf->len; netif_rx(skb); return; decode_failed: sl->dev->stats.rx_errors++; dev_kfree_skb(skb); } /* A change state frame must contain state info and receive and transmit * error counters. * * Examples: * * sb256256 : state bus-off: rx counter 256, tx counter 256 * sa057033 : state active, rx counter 57, tx counter 33 */ static void slcan_bump_state(struct slcan *sl) { struct net_device *dev = sl->dev; struct sk_buff *skb; struct can_frame *cf; char *cmd = sl->rbuff; u32 rxerr, txerr; enum can_state state, rx_state, tx_state; switch (cmd[1]) { case 'a': state = CAN_STATE_ERROR_ACTIVE; break; case 'w': state = CAN_STATE_ERROR_WARNING; break; case 'p': state = CAN_STATE_ERROR_PASSIVE; break; case 'b': state = CAN_STATE_BUS_OFF; break; default: return; } if (state == sl->can.state || sl->rcount < SLCAN_STATE_FRAME_LEN) return; cmd += SLCAN_STATE_BE_RXCNT_LEN + SLCAN_CMD_LEN + 1; cmd[SLCAN_STATE_BE_TXCNT_LEN] = 0; if (kstrtou32(cmd, 10, &txerr)) return; *cmd = 0; cmd -= SLCAN_STATE_BE_RXCNT_LEN; if (kstrtou32(cmd, 10, &rxerr)) return; skb = alloc_can_err_skb(dev, &cf); tx_state = txerr >= rxerr ? state : 0; rx_state = txerr <= rxerr ? state : 0; can_change_state(dev, cf, tx_state, rx_state); if (state == CAN_STATE_BUS_OFF) { can_bus_off(dev); } else if (skb) { cf->can_id |= CAN_ERR_CNT; cf->data[6] = txerr; cf->data[7] = rxerr; } if (skb) netif_rx(skb); } /* An error frame can contain more than one type of error. * * Examples: * * e1a : len 1, errors: ACK error * e3bcO: len 3, errors: Bit0 error, CRC error, Tx overrun error */ static void slcan_bump_err(struct slcan *sl) { struct net_device *dev = sl->dev; struct sk_buff *skb; struct can_frame *cf; char *cmd = sl->rbuff; bool rx_errors = false, tx_errors = false, rx_over_errors = false; int i, len; /* get len from sanitized ASCII value */ len = cmd[1]; if (len >= '0' && len < '9') len -= '0'; else return; if ((len + SLCAN_CMD_LEN + 1) > sl->rcount) return; skb = alloc_can_err_skb(dev, &cf); if (skb) cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; cmd += SLCAN_CMD_LEN + 1; for (i = 0; i < len; i++, cmd++) { switch (*cmd) { case 'a': netdev_dbg(dev, "ACK error\n"); tx_errors = true; if (skb) { cf->can_id |= CAN_ERR_ACK; cf->data[3] = CAN_ERR_PROT_LOC_ACK; } break; case 'b': netdev_dbg(dev, "Bit0 error\n"); tx_errors = true; if (skb) cf->data[2] |= CAN_ERR_PROT_BIT0; break; case 'B': netdev_dbg(dev, "Bit1 error\n"); tx_errors = true; if (skb) cf->data[2] |= CAN_ERR_PROT_BIT1; break; case 'c': netdev_dbg(dev, "CRC error\n"); rx_errors = true; if (skb) { cf->data[2] |= CAN_ERR_PROT_BIT; cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; } break; case 'f': netdev_dbg(dev, "Form Error\n"); rx_errors = true; if (skb) cf->data[2] |= CAN_ERR_PROT_FORM; break; case 'o': netdev_dbg(dev, "Rx overrun error\n"); rx_over_errors = true; rx_errors = true; if (skb) { cf->can_id |= CAN_ERR_CRTL; cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; } break; case 'O': netdev_dbg(dev, "Tx overrun error\n"); tx_errors = true; if (skb) { cf->can_id |= CAN_ERR_CRTL; cf->data[1] = CAN_ERR_CRTL_TX_OVERFLOW; } break; case 's': netdev_dbg(dev, "Stuff error\n"); rx_errors = true; if (skb) cf->data[2] |= CAN_ERR_PROT_STUFF; break; default: if (skb) dev_kfree_skb(skb); return; } } if (rx_errors) dev->stats.rx_errors++; if (rx_over_errors) dev->stats.rx_over_errors++; if (tx_errors) dev->stats.tx_errors++; if (skb) netif_rx(skb); } static void slcan_bump(struct slcan *sl) { switch (sl->rbuff[0]) { case 'r': fallthrough; case 't': fallthrough; case 'R': fallthrough; case 'T': return slcan_bump_frame(sl); case 'e': return slcan_bump_err(sl); case 's': return slcan_bump_state(sl); default: return; } } /* parse tty input stream */ static void slcan_unesc(struct slcan *sl, unsigned char s) { if ((s == '\r') || (s == '\a')) { /* CR or BEL ends the pdu */ if (!test_and_clear_bit(SLF_ERROR, &sl->flags) && sl->rcount > 4) slcan_bump(sl); sl->rcount = 0; } else { if (!test_bit(SLF_ERROR, &sl->flags)) { if (sl->rcount < SLCAN_MTU) { sl->rbuff[sl->rcount++] = s; return; } sl->dev->stats.rx_over_errors++; set_bit(SLF_ERROR, &sl->flags); } } } /************************************************************************* * STANDARD SLCAN ENCAPSULATION * *************************************************************************/ /* Encapsulate one can_frame and stuff into a TTY queue. */ static void slcan_encaps(struct slcan *sl, struct can_frame *cf) { int actual, i; unsigned char *pos; unsigned char *endpos; canid_t id = cf->can_id; pos = sl->xbuff; if (cf->can_id & CAN_RTR_FLAG) *pos = 'R'; /* becomes 'r' in standard frame format (SFF) */ else *pos = 'T'; /* becomes 't' in standard frame format (SSF) */ /* determine number of chars for the CAN-identifier */ if (cf->can_id & CAN_EFF_FLAG) { id &= CAN_EFF_MASK; endpos = pos + SLCAN_EFF_ID_LEN; } else { *pos |= 0x20; /* convert R/T to lower case for SFF */ id &= CAN_SFF_MASK; endpos = pos + SLCAN_SFF_ID_LEN; } /* build 3 (SFF) or 8 (EFF) digit CAN identifier */ pos++; while (endpos >= pos) { *endpos-- = hex_asc_upper[id & 0xf]; id >>= 4; } pos += (cf->can_id & CAN_EFF_FLAG) ? SLCAN_EFF_ID_LEN : SLCAN_SFF_ID_LEN; *pos++ = cf->len + '0'; /* RTR frames may have a dlc > 0 but they never have any data bytes */ if (!(cf->can_id & CAN_RTR_FLAG)) { for (i = 0; i < cf->len; i++) pos = hex_byte_pack_upper(pos, cf->data[i]); sl->dev->stats.tx_bytes += cf->len; } *pos++ = '\r'; /* Order of next two lines is *very* important. * When we are sending a little amount of data, * the transfer may be completed inside the ops->write() * routine, because it's running with interrupts enabled. * In this case we *never* got WRITE_WAKEUP event, * if we did not request it before write operation. * 14 Oct 1994 Dmitry Gorodchanin. */ set_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags); actual = sl->tty->ops->write(sl->tty, sl->xbuff, pos - sl->xbuff); sl->xleft = (pos - sl->xbuff) - actual; sl->xhead = sl->xbuff + actual; } /* Write out any remaining transmit buffer. Scheduled when tty is writable */ static void slcan_transmit(struct work_struct *work) { struct slcan *sl = container_of(work, struct slcan, tx_work); int actual; spin_lock_bh(&sl->lock); /* First make sure we're connected. */ if (unlikely(!netif_running(sl->dev)) && likely(!test_bit(SLF_XCMD, &sl->flags))) { spin_unlock_bh(&sl->lock); return; } if (sl->xleft <= 0) { if (unlikely(test_bit(SLF_XCMD, &sl->flags))) { clear_bit(SLF_XCMD, &sl->flags); clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags); spin_unlock_bh(&sl->lock); wake_up(&sl->xcmd_wait); return; } /* Now serial buffer is almost free & we can start * transmission of another packet */ sl->dev->stats.tx_packets++; clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags); spin_unlock_bh(&sl->lock); netif_wake_queue(sl->dev); return; } actual = sl->tty->ops->write(sl->tty, sl->xhead, sl->xleft); sl->xleft -= actual; sl->xhead += actual; spin_unlock_bh(&sl->lock); } /* Called by the driver when there's room for more data. * Schedule the transmit. */ static void slcan_write_wakeup(struct tty_struct *tty) { struct slcan *sl = tty->disc_data; schedule_work(&sl->tx_work); } /* Send a can_frame to a TTY queue. */ static netdev_tx_t slcan_netdev_xmit(struct sk_buff *skb, struct net_device *dev) { struct slcan *sl = netdev_priv(dev); if (can_dev_dropped_skb(dev, skb)) return NETDEV_TX_OK; spin_lock(&sl->lock); if (!netif_running(dev)) { spin_unlock(&sl->lock); netdev_warn(dev, "xmit: iface is down\n"); goto out; } if (!sl->tty) { spin_unlock(&sl->lock); goto out; } netif_stop_queue(sl->dev); slcan_encaps(sl, (struct can_frame *)skb->data); /* encaps & send */ spin_unlock(&sl->lock); skb_tx_timestamp(skb); out: kfree_skb(skb); return NETDEV_TX_OK; } /****************************************** * Routines looking at netdevice side. ******************************************/ static int slcan_transmit_cmd(struct slcan *sl, const unsigned char *cmd) { int ret, actual, n; spin_lock(&sl->lock); if (!sl->tty) { spin_unlock(&sl->lock); return -ENODEV; } n = scnprintf(sl->xbuff, sizeof(sl->xbuff), "%s", cmd); set_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags); actual = sl->tty->ops->write(sl->tty, sl->xbuff, n); sl->xleft = n - actual; sl->xhead = sl->xbuff + actual; set_bit(SLF_XCMD, &sl->flags); spin_unlock(&sl->lock); ret = wait_event_interruptible_timeout(sl->xcmd_wait, !test_bit(SLF_XCMD, &sl->flags), HZ); clear_bit(SLF_XCMD, &sl->flags); if (ret == -ERESTARTSYS) return ret; if (ret == 0) return -ETIMEDOUT; return 0; } /* Netdevice UP -> DOWN routine */ static int slcan_netdev_close(struct net_device *dev) { struct slcan *sl = netdev_priv(dev); int err; if (sl->can.bittiming.bitrate && sl->can.bittiming.bitrate != CAN_BITRATE_UNKNOWN) { err = slcan_transmit_cmd(sl, "C\r"); if (err) netdev_warn(dev, "failed to send close command 'C\\r'\n"); } /* TTY discipline is running. */ clear_bit(TTY_DO_WRITE_WAKEUP, &sl->tty->flags); flush_work(&sl->tx_work); netif_stop_queue(dev); sl->rcount = 0; sl->xleft = 0; close_candev(dev); sl->can.state = CAN_STATE_STOPPED; if (sl->can.bittiming.bitrate == CAN_BITRATE_UNKNOWN) sl->can.bittiming.bitrate = CAN_BITRATE_UNSET; return 0; } /* Netdevice DOWN -> UP routine */ static int slcan_netdev_open(struct net_device *dev) { struct slcan *sl = netdev_priv(dev); unsigned char cmd[SLCAN_MTU]; int err, s; /* The baud rate is not set with the command * `ip link set <iface> type can bitrate <baud>' and therefore * can.bittiming.bitrate is CAN_BITRATE_UNSET (0), causing * open_candev() to fail. So let's set to a fake value. */ if (sl->can.bittiming.bitrate == CAN_BITRATE_UNSET) sl->can.bittiming.bitrate = CAN_BITRATE_UNKNOWN; err = open_candev(dev); if (err) { netdev_err(dev, "failed to open can device\n"); return err; } if (sl->can.bittiming.bitrate != CAN_BITRATE_UNKNOWN) { for (s = 0; s < ARRAY_SIZE(slcan_bitrate_const); s++) { if (sl->can.bittiming.bitrate == slcan_bitrate_const[s]) break; } /* The CAN framework has already validate the bitrate value, * so we can avoid to check if `s' has been properly set. */ snprintf(cmd, sizeof(cmd), "C\rS%d\r", s); err = slcan_transmit_cmd(sl, cmd); if (err) { netdev_err(dev, "failed to send bitrate command 'C\\rS%d\\r'\n", s); goto cmd_transmit_failed; } if (test_bit(CF_ERR_RST, &sl->cmd_flags)) { err = slcan_transmit_cmd(sl, "F\r"); if (err) { netdev_err(dev, "failed to send error command 'F\\r'\n"); goto cmd_transmit_failed; } } if (sl->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) { err = slcan_transmit_cmd(sl, "L\r"); if (err) { netdev_err(dev, "failed to send listen-only command 'L\\r'\n"); goto cmd_transmit_failed; } } else { err = slcan_transmit_cmd(sl, "O\r"); if (err) { netdev_err(dev, "failed to send open command 'O\\r'\n"); goto cmd_transmit_failed; } } } sl->can.state = CAN_STATE_ERROR_ACTIVE; netif_start_queue(dev); return 0; cmd_transmit_failed: close_candev(dev); return err; } static const struct net_device_ops slcan_netdev_ops = { .ndo_open = slcan_netdev_open, .ndo_stop = slcan_netdev_close, .ndo_start_xmit = slcan_netdev_xmit, .ndo_change_mtu = can_change_mtu, }; /****************************************** * Routines looking at TTY side. ******************************************/ /* Handle the 'receiver data ready' interrupt. * This function is called by the 'tty_io' module in the kernel when * a block of SLCAN data has been received, which can now be decapsulated * and sent on to some IP layer for further processing. This will not * be re-entered while running but other ldisc functions may be called * in parallel */ static void slcan_receive_buf(struct tty_struct *tty, const u8 *cp, const u8 *fp, size_t count) { struct slcan *sl = tty->disc_data; if (!netif_running(sl->dev)) return; /* Read the characters out of the buffer */ while (count--) { if (fp && *fp++) { if (!test_and_set_bit(SLF_ERROR, &sl->flags)) sl->dev->stats.rx_errors++; cp++; continue; } slcan_unesc(sl, *cp++); } } /* Open the high-level part of the SLCAN channel. * This function is called by the TTY module when the * SLCAN line discipline is called for. * * Called in process context serialized from other ldisc calls. */ static int slcan_open(struct tty_struct *tty) { struct net_device *dev; struct slcan *sl; int err; if (!capable(CAP_NET_ADMIN)) return -EPERM; if (!tty->ops->write) return -EOPNOTSUPP; dev = alloc_candev(sizeof(*sl), 1); if (!dev) return -ENFILE; sl = netdev_priv(dev); /* Configure TTY interface */ tty->receive_room = 65536; /* We don't flow control */ sl->rcount = 0; sl->xleft = 0; spin_lock_init(&sl->lock); INIT_WORK(&sl->tx_work, slcan_transmit); init_waitqueue_head(&sl->xcmd_wait); /* Configure CAN metadata */ sl->can.bitrate_const = slcan_bitrate_const; sl->can.bitrate_const_cnt = ARRAY_SIZE(slcan_bitrate_const); sl->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY; /* Configure netdev interface */ sl->dev = dev; dev->netdev_ops = &slcan_netdev_ops; dev->ethtool_ops = &slcan_ethtool_ops; /* Mark ldisc channel as alive */ sl->tty = tty; tty->disc_data = sl; err = register_candev(dev); if (err) { free_candev(dev); pr_err("can't register candev\n"); return err; } netdev_info(dev, "slcan on %s.\n", tty->name); /* TTY layer expects 0 on success */ return 0; } /* Close down a SLCAN channel. * This means flushing out any pending queues, and then returning. This * call is serialized against other ldisc functions. * Once this is called, no other ldisc function of ours is entered. * * We also use this method for a hangup event. */ static void slcan_close(struct tty_struct *tty) { struct slcan *sl = tty->disc_data; unregister_candev(sl->dev); /* * The netdev needn't be UP (so .ndo_stop() is not called). Hence make * sure this is not running before freeing it up. */ flush_work(&sl->tx_work); /* Mark channel as dead */ spin_lock_bh(&sl->lock); tty->disc_data = NULL; sl->tty = NULL; spin_unlock_bh(&sl->lock); netdev_info(sl->dev, "slcan off %s.\n", tty->name); free_candev(sl->dev); } /* Perform I/O control on an active SLCAN channel. */ static int slcan_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { struct slcan *sl = tty->disc_data; unsigned int tmp; switch (cmd) { case SIOCGIFNAME: tmp = strlen(sl->dev->name) + 1; if (copy_to_user((void __user *)arg, sl->dev->name, tmp)) return -EFAULT; return 0; case SIOCSIFHWADDR: return -EINVAL; default: return tty_mode_ioctl(tty, cmd, arg); } } static struct tty_ldisc_ops slcan_ldisc = { .owner = THIS_MODULE, .num = N_SLCAN, .name = KBUILD_MODNAME, .open = slcan_open, .close = slcan_close, .ioctl = slcan_ioctl, .receive_buf = slcan_receive_buf, .write_wakeup = slcan_write_wakeup, }; static int __init slcan_init(void) { int status; pr_info("serial line CAN interface driver\n"); /* Fill in our line protocol discipline, and register it */ status = tty_register_ldisc(&slcan_ldisc); if (status) pr_err("can't register line discipline\n"); return status; } static void __exit slcan_exit(void) { /* This will only be called when all channels have been closed by * userspace - tty_ldisc.c takes care of the module's refcount. */ tty_unregister_ldisc(&slcan_ldisc); } module_init(slcan_init); module_exit(slcan_exit);
linux-master
drivers/net/can/slcan/slcan-core.c
// SPDX-License-Identifier: GPL-2.0+ /* Copyright (c) 2022 Amarula Solutions, Dario Binacchi <[email protected]> * */ #include <linux/can/dev.h> #include <linux/ethtool.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/platform_device.h> #include "slcan.h" static const char slcan_priv_flags_strings[][ETH_GSTRING_LEN] = { #define SLCAN_PRIV_FLAGS_ERR_RST_ON_OPEN BIT(0) "err-rst-on-open", }; static void slcan_get_strings(struct net_device *ndev, u32 stringset, u8 *data) { switch (stringset) { case ETH_SS_PRIV_FLAGS: memcpy(data, slcan_priv_flags_strings, sizeof(slcan_priv_flags_strings)); } } static u32 slcan_get_priv_flags(struct net_device *ndev) { u32 flags = 0; if (slcan_err_rst_on_open(ndev)) flags |= SLCAN_PRIV_FLAGS_ERR_RST_ON_OPEN; return flags; } static int slcan_set_priv_flags(struct net_device *ndev, u32 flags) { bool err_rst_op_open = !!(flags & SLCAN_PRIV_FLAGS_ERR_RST_ON_OPEN); return slcan_enable_err_rst_on_open(ndev, err_rst_op_open); } static int slcan_get_sset_count(struct net_device *netdev, int sset) { switch (sset) { case ETH_SS_PRIV_FLAGS: return ARRAY_SIZE(slcan_priv_flags_strings); default: return -EOPNOTSUPP; } } const struct ethtool_ops slcan_ethtool_ops = { .get_strings = slcan_get_strings, .get_priv_flags = slcan_get_priv_flags, .set_priv_flags = slcan_set_priv_flags, .get_sset_count = slcan_get_sset_count, .get_ts_info = ethtool_op_get_ts_info, };
linux-master
drivers/net/can/slcan/slcan-ethtool.c
// SPDX-License-Identifier: GPL-2.0+ /* Renesas R-Car CAN device driver * * Copyright (C) 2013 Cogent Embedded, Inc. <[email protected]> * Copyright (C) 2013 Renesas Solutions Corp. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/interrupt.h> #include <linux/errno.h> #include <linux/ethtool.h> #include <linux/netdevice.h> #include <linux/platform_device.h> #include <linux/can/dev.h> #include <linux/clk.h> #include <linux/of.h> #define RCAR_CAN_DRV_NAME "rcar_can" /* Clock Select Register settings */ enum CLKR { CLKR_CLKP1 = 0, /* Peripheral clock (clkp1) */ CLKR_CLKP2 = 1, /* Peripheral clock (clkp2) */ CLKR_CLKEXT = 3, /* Externally input clock */ }; #define RCAR_SUPPORTED_CLOCKS (BIT(CLKR_CLKP1) | BIT(CLKR_CLKP2) | \ BIT(CLKR_CLKEXT)) /* Mailbox configuration: * mailbox 60 - 63 - Rx FIFO mailboxes * mailbox 56 - 59 - Tx FIFO mailboxes * non-FIFO mailboxes are not used */ #define RCAR_CAN_N_MBX 64 /* Number of mailboxes in non-FIFO mode */ #define RCAR_CAN_RX_FIFO_MBX 60 /* Mailbox - window to Rx FIFO */ #define RCAR_CAN_TX_FIFO_MBX 56 /* Mailbox - window to Tx FIFO */ #define RCAR_CAN_FIFO_DEPTH 4 /* Mailbox registers structure */ struct rcar_can_mbox_regs { u32 id; /* IDE and RTR bits, SID and EID */ u8 stub; /* Not used */ u8 dlc; /* Data Length Code - bits [0..3] */ u8 data[8]; /* Data Bytes */ u8 tsh; /* Time Stamp Higher Byte */ u8 tsl; /* Time Stamp Lower Byte */ }; struct rcar_can_regs { struct rcar_can_mbox_regs mb[RCAR_CAN_N_MBX]; /* Mailbox registers */ u32 mkr_2_9[8]; /* Mask Registers 2-9 */ u32 fidcr[2]; /* FIFO Received ID Compare Register */ u32 mkivlr1; /* Mask Invalid Register 1 */ u32 mier1; /* Mailbox Interrupt Enable Register 1 */ u32 mkr_0_1[2]; /* Mask Registers 0-1 */ u32 mkivlr0; /* Mask Invalid Register 0*/ u32 mier0; /* Mailbox Interrupt Enable Register 0 */ u8 pad_440[0x3c0]; u8 mctl[64]; /* Message Control Registers */ u16 ctlr; /* Control Register */ u16 str; /* Status register */ u8 bcr[3]; /* Bit Configuration Register */ u8 clkr; /* Clock Select Register */ u8 rfcr; /* Receive FIFO Control Register */ u8 rfpcr; /* Receive FIFO Pointer Control Register */ u8 tfcr; /* Transmit FIFO Control Register */ u8 tfpcr; /* Transmit FIFO Pointer Control Register */ u8 eier; /* Error Interrupt Enable Register */ u8 eifr; /* Error Interrupt Factor Judge Register */ u8 recr; /* Receive Error Count Register */ u8 tecr; /* Transmit Error Count Register */ u8 ecsr; /* Error Code Store Register */ u8 cssr; /* Channel Search Support Register */ u8 mssr; /* Mailbox Search Status Register */ u8 msmr; /* Mailbox Search Mode Register */ u16 tsr; /* Time Stamp Register */ u8 afsr; /* Acceptance Filter Support Register */ u8 pad_857; u8 tcr; /* Test Control Register */ u8 pad_859[7]; u8 ier; /* Interrupt Enable Register */ u8 isr; /* Interrupt Status Register */ u8 pad_862; u8 mbsmr; /* Mailbox Search Mask Register */ }; struct rcar_can_priv { struct can_priv can; /* Must be the first member! */ struct net_device *ndev; struct napi_struct napi; struct rcar_can_regs __iomem *regs; struct clk *clk; struct clk *can_clk; u32 tx_head; u32 tx_tail; u8 clock_select; u8 ier; }; static const struct can_bittiming_const rcar_can_bittiming_const = { .name = RCAR_CAN_DRV_NAME, .tseg1_min = 4, .tseg1_max = 16, .tseg2_min = 2, .tseg2_max = 8, .sjw_max = 4, .brp_min = 1, .brp_max = 1024, .brp_inc = 1, }; /* Control Register bits */ #define RCAR_CAN_CTLR_BOM (3 << 11) /* Bus-Off Recovery Mode Bits */ #define RCAR_CAN_CTLR_BOM_ENT (1 << 11) /* Entry to halt mode */ /* at bus-off entry */ #define RCAR_CAN_CTLR_SLPM (1 << 10) #define RCAR_CAN_CTLR_CANM (3 << 8) /* Operating Mode Select Bit */ #define RCAR_CAN_CTLR_CANM_HALT (1 << 9) #define RCAR_CAN_CTLR_CANM_RESET (1 << 8) #define RCAR_CAN_CTLR_CANM_FORCE_RESET (3 << 8) #define RCAR_CAN_CTLR_MLM (1 << 3) /* Message Lost Mode Select */ #define RCAR_CAN_CTLR_IDFM (3 << 1) /* ID Format Mode Select Bits */ #define RCAR_CAN_CTLR_IDFM_MIXED (1 << 2) /* Mixed ID mode */ #define RCAR_CAN_CTLR_MBM (1 << 0) /* Mailbox Mode select */ /* Status Register bits */ #define RCAR_CAN_STR_RSTST (1 << 8) /* Reset Status Bit */ /* FIFO Received ID Compare Registers 0 and 1 bits */ #define RCAR_CAN_FIDCR_IDE (1 << 31) /* ID Extension Bit */ #define RCAR_CAN_FIDCR_RTR (1 << 30) /* Remote Transmission Request Bit */ /* Receive FIFO Control Register bits */ #define RCAR_CAN_RFCR_RFEST (1 << 7) /* Receive FIFO Empty Status Flag */ #define RCAR_CAN_RFCR_RFE (1 << 0) /* Receive FIFO Enable */ /* Transmit FIFO Control Register bits */ #define RCAR_CAN_TFCR_TFUST (7 << 1) /* Transmit FIFO Unsent Message */ /* Number Status Bits */ #define RCAR_CAN_TFCR_TFUST_SHIFT 1 /* Offset of Transmit FIFO Unsent */ /* Message Number Status Bits */ #define RCAR_CAN_TFCR_TFE (1 << 0) /* Transmit FIFO Enable */ #define RCAR_CAN_N_RX_MKREGS1 2 /* Number of mask registers */ /* for Rx mailboxes 0-31 */ #define RCAR_CAN_N_RX_MKREGS2 8 /* Bit Configuration Register settings */ #define RCAR_CAN_BCR_TSEG1(x) (((x) & 0x0f) << 20) #define RCAR_CAN_BCR_BPR(x) (((x) & 0x3ff) << 8) #define RCAR_CAN_BCR_SJW(x) (((x) & 0x3) << 4) #define RCAR_CAN_BCR_TSEG2(x) ((x) & 0x07) /* Mailbox and Mask Registers bits */ #define RCAR_CAN_IDE (1 << 31) #define RCAR_CAN_RTR (1 << 30) #define RCAR_CAN_SID_SHIFT 18 /* Mailbox Interrupt Enable Register 1 bits */ #define RCAR_CAN_MIER1_RXFIE (1 << 28) /* Receive FIFO Interrupt Enable */ #define RCAR_CAN_MIER1_TXFIE (1 << 24) /* Transmit FIFO Interrupt Enable */ /* Interrupt Enable Register bits */ #define RCAR_CAN_IER_ERSIE (1 << 5) /* Error (ERS) Interrupt Enable Bit */ #define RCAR_CAN_IER_RXFIE (1 << 4) /* Reception FIFO Interrupt */ /* Enable Bit */ #define RCAR_CAN_IER_TXFIE (1 << 3) /* Transmission FIFO Interrupt */ /* Enable Bit */ /* Interrupt Status Register bits */ #define RCAR_CAN_ISR_ERSF (1 << 5) /* Error (ERS) Interrupt Status Bit */ #define RCAR_CAN_ISR_RXFF (1 << 4) /* Reception FIFO Interrupt */ /* Status Bit */ #define RCAR_CAN_ISR_TXFF (1 << 3) /* Transmission FIFO Interrupt */ /* Status Bit */ /* Error Interrupt Enable Register bits */ #define RCAR_CAN_EIER_BLIE (1 << 7) /* Bus Lock Interrupt Enable */ #define RCAR_CAN_EIER_OLIE (1 << 6) /* Overload Frame Transmit */ /* Interrupt Enable */ #define RCAR_CAN_EIER_ORIE (1 << 5) /* Receive Overrun Interrupt Enable */ #define RCAR_CAN_EIER_BORIE (1 << 4) /* Bus-Off Recovery Interrupt Enable */ #define RCAR_CAN_EIER_BOEIE (1 << 3) /* Bus-Off Entry Interrupt Enable */ #define RCAR_CAN_EIER_EPIE (1 << 2) /* Error Passive Interrupt Enable */ #define RCAR_CAN_EIER_EWIE (1 << 1) /* Error Warning Interrupt Enable */ #define RCAR_CAN_EIER_BEIE (1 << 0) /* Bus Error Interrupt Enable */ /* Error Interrupt Factor Judge Register bits */ #define RCAR_CAN_EIFR_BLIF (1 << 7) /* Bus Lock Detect Flag */ #define RCAR_CAN_EIFR_OLIF (1 << 6) /* Overload Frame Transmission */ /* Detect Flag */ #define RCAR_CAN_EIFR_ORIF (1 << 5) /* Receive Overrun Detect Flag */ #define RCAR_CAN_EIFR_BORIF (1 << 4) /* Bus-Off Recovery Detect Flag */ #define RCAR_CAN_EIFR_BOEIF (1 << 3) /* Bus-Off Entry Detect Flag */ #define RCAR_CAN_EIFR_EPIF (1 << 2) /* Error Passive Detect Flag */ #define RCAR_CAN_EIFR_EWIF (1 << 1) /* Error Warning Detect Flag */ #define RCAR_CAN_EIFR_BEIF (1 << 0) /* Bus Error Detect Flag */ /* Error Code Store Register bits */ #define RCAR_CAN_ECSR_EDPM (1 << 7) /* Error Display Mode Select Bit */ #define RCAR_CAN_ECSR_ADEF (1 << 6) /* ACK Delimiter Error Flag */ #define RCAR_CAN_ECSR_BE0F (1 << 5) /* Bit Error (dominant) Flag */ #define RCAR_CAN_ECSR_BE1F (1 << 4) /* Bit Error (recessive) Flag */ #define RCAR_CAN_ECSR_CEF (1 << 3) /* CRC Error Flag */ #define RCAR_CAN_ECSR_AEF (1 << 2) /* ACK Error Flag */ #define RCAR_CAN_ECSR_FEF (1 << 1) /* Form Error Flag */ #define RCAR_CAN_ECSR_SEF (1 << 0) /* Stuff Error Flag */ #define RCAR_CAN_NAPI_WEIGHT 4 #define MAX_STR_READS 0x100 static void tx_failure_cleanup(struct net_device *ndev) { int i; for (i = 0; i < RCAR_CAN_FIFO_DEPTH; i++) can_free_echo_skb(ndev, i, NULL); } static void rcar_can_error(struct net_device *ndev) { struct rcar_can_priv *priv = netdev_priv(ndev); struct can_frame *cf; struct sk_buff *skb; u8 eifr, txerr = 0, rxerr = 0; /* Propagate the error condition to the CAN stack */ skb = alloc_can_err_skb(ndev, &cf); eifr = readb(&priv->regs->eifr); if (eifr & (RCAR_CAN_EIFR_EWIF | RCAR_CAN_EIFR_EPIF)) { txerr = readb(&priv->regs->tecr); rxerr = readb(&priv->regs->recr); if (skb) cf->can_id |= CAN_ERR_CRTL; } if (eifr & RCAR_CAN_EIFR_BEIF) { int rx_errors = 0, tx_errors = 0; u8 ecsr; netdev_dbg(priv->ndev, "Bus error interrupt:\n"); if (skb) cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT; ecsr = readb(&priv->regs->ecsr); if (ecsr & RCAR_CAN_ECSR_ADEF) { netdev_dbg(priv->ndev, "ACK Delimiter Error\n"); tx_errors++; writeb(~RCAR_CAN_ECSR_ADEF, &priv->regs->ecsr); if (skb) cf->data[3] = CAN_ERR_PROT_LOC_ACK_DEL; } if (ecsr & RCAR_CAN_ECSR_BE0F) { netdev_dbg(priv->ndev, "Bit Error (dominant)\n"); tx_errors++; writeb(~RCAR_CAN_ECSR_BE0F, &priv->regs->ecsr); if (skb) cf->data[2] |= CAN_ERR_PROT_BIT0; } if (ecsr & RCAR_CAN_ECSR_BE1F) { netdev_dbg(priv->ndev, "Bit Error (recessive)\n"); tx_errors++; writeb(~RCAR_CAN_ECSR_BE1F, &priv->regs->ecsr); if (skb) cf->data[2] |= CAN_ERR_PROT_BIT1; } if (ecsr & RCAR_CAN_ECSR_CEF) { netdev_dbg(priv->ndev, "CRC Error\n"); rx_errors++; writeb(~RCAR_CAN_ECSR_CEF, &priv->regs->ecsr); if (skb) cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; } if (ecsr & RCAR_CAN_ECSR_AEF) { netdev_dbg(priv->ndev, "ACK Error\n"); tx_errors++; writeb(~RCAR_CAN_ECSR_AEF, &priv->regs->ecsr); if (skb) { cf->can_id |= CAN_ERR_ACK; cf->data[3] = CAN_ERR_PROT_LOC_ACK; } } if (ecsr & RCAR_CAN_ECSR_FEF) { netdev_dbg(priv->ndev, "Form Error\n"); rx_errors++; writeb(~RCAR_CAN_ECSR_FEF, &priv->regs->ecsr); if (skb) cf->data[2] |= CAN_ERR_PROT_FORM; } if (ecsr & RCAR_CAN_ECSR_SEF) { netdev_dbg(priv->ndev, "Stuff Error\n"); rx_errors++; writeb(~RCAR_CAN_ECSR_SEF, &priv->regs->ecsr); if (skb) cf->data[2] |= CAN_ERR_PROT_STUFF; } priv->can.can_stats.bus_error++; ndev->stats.rx_errors += rx_errors; ndev->stats.tx_errors += tx_errors; writeb(~RCAR_CAN_EIFR_BEIF, &priv->regs->eifr); } if (eifr & RCAR_CAN_EIFR_EWIF) { netdev_dbg(priv->ndev, "Error warning interrupt\n"); priv->can.state = CAN_STATE_ERROR_WARNING; priv->can.can_stats.error_warning++; /* Clear interrupt condition */ writeb(~RCAR_CAN_EIFR_EWIF, &priv->regs->eifr); if (skb) cf->data[1] = txerr > rxerr ? CAN_ERR_CRTL_TX_WARNING : CAN_ERR_CRTL_RX_WARNING; } if (eifr & RCAR_CAN_EIFR_EPIF) { netdev_dbg(priv->ndev, "Error passive interrupt\n"); priv->can.state = CAN_STATE_ERROR_PASSIVE; priv->can.can_stats.error_passive++; /* Clear interrupt condition */ writeb(~RCAR_CAN_EIFR_EPIF, &priv->regs->eifr); if (skb) cf->data[1] = txerr > rxerr ? CAN_ERR_CRTL_TX_PASSIVE : CAN_ERR_CRTL_RX_PASSIVE; } if (eifr & RCAR_CAN_EIFR_BOEIF) { netdev_dbg(priv->ndev, "Bus-off entry interrupt\n"); tx_failure_cleanup(ndev); priv->ier = RCAR_CAN_IER_ERSIE; writeb(priv->ier, &priv->regs->ier); priv->can.state = CAN_STATE_BUS_OFF; /* Clear interrupt condition */ writeb(~RCAR_CAN_EIFR_BOEIF, &priv->regs->eifr); priv->can.can_stats.bus_off++; can_bus_off(ndev); if (skb) cf->can_id |= CAN_ERR_BUSOFF; } else if (skb) { cf->can_id |= CAN_ERR_CNT; cf->data[6] = txerr; cf->data[7] = rxerr; } if (eifr & RCAR_CAN_EIFR_ORIF) { netdev_dbg(priv->ndev, "Receive overrun error interrupt\n"); ndev->stats.rx_over_errors++; ndev->stats.rx_errors++; writeb(~RCAR_CAN_EIFR_ORIF, &priv->regs->eifr); if (skb) { cf->can_id |= CAN_ERR_CRTL; cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; } } if (eifr & RCAR_CAN_EIFR_OLIF) { netdev_dbg(priv->ndev, "Overload Frame Transmission error interrupt\n"); ndev->stats.rx_over_errors++; ndev->stats.rx_errors++; writeb(~RCAR_CAN_EIFR_OLIF, &priv->regs->eifr); if (skb) { cf->can_id |= CAN_ERR_PROT; cf->data[2] |= CAN_ERR_PROT_OVERLOAD; } } if (skb) netif_rx(skb); } static void rcar_can_tx_done(struct net_device *ndev) { struct rcar_can_priv *priv = netdev_priv(ndev); struct net_device_stats *stats = &ndev->stats; u8 isr; while (1) { u8 unsent = readb(&priv->regs->tfcr); unsent = (unsent & RCAR_CAN_TFCR_TFUST) >> RCAR_CAN_TFCR_TFUST_SHIFT; if (priv->tx_head - priv->tx_tail <= unsent) break; stats->tx_packets++; stats->tx_bytes += can_get_echo_skb(ndev, priv->tx_tail % RCAR_CAN_FIFO_DEPTH, NULL); priv->tx_tail++; netif_wake_queue(ndev); } /* Clear interrupt */ isr = readb(&priv->regs->isr); writeb(isr & ~RCAR_CAN_ISR_TXFF, &priv->regs->isr); } static irqreturn_t rcar_can_interrupt(int irq, void *dev_id) { struct net_device *ndev = dev_id; struct rcar_can_priv *priv = netdev_priv(ndev); u8 isr; isr = readb(&priv->regs->isr); if (!(isr & priv->ier)) return IRQ_NONE; if (isr & RCAR_CAN_ISR_ERSF) rcar_can_error(ndev); if (isr & RCAR_CAN_ISR_TXFF) rcar_can_tx_done(ndev); if (isr & RCAR_CAN_ISR_RXFF) { if (napi_schedule_prep(&priv->napi)) { /* Disable Rx FIFO interrupts */ priv->ier &= ~RCAR_CAN_IER_RXFIE; writeb(priv->ier, &priv->regs->ier); __napi_schedule(&priv->napi); } } return IRQ_HANDLED; } static void rcar_can_set_bittiming(struct net_device *dev) { struct rcar_can_priv *priv = netdev_priv(dev); struct can_bittiming *bt = &priv->can.bittiming; u32 bcr; bcr = RCAR_CAN_BCR_TSEG1(bt->phase_seg1 + bt->prop_seg - 1) | RCAR_CAN_BCR_BPR(bt->brp - 1) | RCAR_CAN_BCR_SJW(bt->sjw - 1) | RCAR_CAN_BCR_TSEG2(bt->phase_seg2 - 1); /* Don't overwrite CLKR with 32-bit BCR access; CLKR has 8-bit access. * All the registers are big-endian but they get byte-swapped on 32-bit * read/write (but not on 8-bit, contrary to the manuals)... */ writel((bcr << 8) | priv->clock_select, &priv->regs->bcr); } static void rcar_can_start(struct net_device *ndev) { struct rcar_can_priv *priv = netdev_priv(ndev); u16 ctlr; int i; /* Set controller to known mode: * - FIFO mailbox mode * - accept all messages * - overrun mode * CAN is in sleep mode after MCU hardware or software reset. */ ctlr = readw(&priv->regs->ctlr); ctlr &= ~RCAR_CAN_CTLR_SLPM; writew(ctlr, &priv->regs->ctlr); /* Go to reset mode */ ctlr |= RCAR_CAN_CTLR_CANM_FORCE_RESET; writew(ctlr, &priv->regs->ctlr); for (i = 0; i < MAX_STR_READS; i++) { if (readw(&priv->regs->str) & RCAR_CAN_STR_RSTST) break; } rcar_can_set_bittiming(ndev); ctlr |= RCAR_CAN_CTLR_IDFM_MIXED; /* Select mixed ID mode */ ctlr |= RCAR_CAN_CTLR_BOM_ENT; /* Entry to halt mode automatically */ /* at bus-off */ ctlr |= RCAR_CAN_CTLR_MBM; /* Select FIFO mailbox mode */ ctlr |= RCAR_CAN_CTLR_MLM; /* Overrun mode */ writew(ctlr, &priv->regs->ctlr); /* Accept all SID and EID */ writel(0, &priv->regs->mkr_2_9[6]); writel(0, &priv->regs->mkr_2_9[7]); /* In FIFO mailbox mode, write "0" to bits 24 to 31 */ writel(0, &priv->regs->mkivlr1); /* Accept all frames */ writel(0, &priv->regs->fidcr[0]); writel(RCAR_CAN_FIDCR_IDE | RCAR_CAN_FIDCR_RTR, &priv->regs->fidcr[1]); /* Enable and configure FIFO mailbox interrupts */ writel(RCAR_CAN_MIER1_RXFIE | RCAR_CAN_MIER1_TXFIE, &priv->regs->mier1); priv->ier = RCAR_CAN_IER_ERSIE | RCAR_CAN_IER_RXFIE | RCAR_CAN_IER_TXFIE; writeb(priv->ier, &priv->regs->ier); /* Accumulate error codes */ writeb(RCAR_CAN_ECSR_EDPM, &priv->regs->ecsr); /* Enable error interrupts */ writeb(RCAR_CAN_EIER_EWIE | RCAR_CAN_EIER_EPIE | RCAR_CAN_EIER_BOEIE | (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING ? RCAR_CAN_EIER_BEIE : 0) | RCAR_CAN_EIER_ORIE | RCAR_CAN_EIER_OLIE, &priv->regs->eier); priv->can.state = CAN_STATE_ERROR_ACTIVE; /* Go to operation mode */ writew(ctlr & ~RCAR_CAN_CTLR_CANM, &priv->regs->ctlr); for (i = 0; i < MAX_STR_READS; i++) { if (!(readw(&priv->regs->str) & RCAR_CAN_STR_RSTST)) break; } /* Enable Rx and Tx FIFO */ writeb(RCAR_CAN_RFCR_RFE, &priv->regs->rfcr); writeb(RCAR_CAN_TFCR_TFE, &priv->regs->tfcr); } static int rcar_can_open(struct net_device *ndev) { struct rcar_can_priv *priv = netdev_priv(ndev); int err; err = clk_prepare_enable(priv->clk); if (err) { netdev_err(ndev, "failed to enable peripheral clock, error %d\n", err); goto out; } err = clk_prepare_enable(priv->can_clk); if (err) { netdev_err(ndev, "failed to enable CAN clock, error %d\n", err); goto out_clock; } err = open_candev(ndev); if (err) { netdev_err(ndev, "open_candev() failed, error %d\n", err); goto out_can_clock; } napi_enable(&priv->napi); err = request_irq(ndev->irq, rcar_can_interrupt, 0, ndev->name, ndev); if (err) { netdev_err(ndev, "request_irq(%d) failed, error %d\n", ndev->irq, err); goto out_close; } rcar_can_start(ndev); netif_start_queue(ndev); return 0; out_close: napi_disable(&priv->napi); close_candev(ndev); out_can_clock: clk_disable_unprepare(priv->can_clk); out_clock: clk_disable_unprepare(priv->clk); out: return err; } static void rcar_can_stop(struct net_device *ndev) { struct rcar_can_priv *priv = netdev_priv(ndev); u16 ctlr; int i; /* Go to (force) reset mode */ ctlr = readw(&priv->regs->ctlr); ctlr |= RCAR_CAN_CTLR_CANM_FORCE_RESET; writew(ctlr, &priv->regs->ctlr); for (i = 0; i < MAX_STR_READS; i++) { if (readw(&priv->regs->str) & RCAR_CAN_STR_RSTST) break; } writel(0, &priv->regs->mier0); writel(0, &priv->regs->mier1); writeb(0, &priv->regs->ier); writeb(0, &priv->regs->eier); /* Go to sleep mode */ ctlr |= RCAR_CAN_CTLR_SLPM; writew(ctlr, &priv->regs->ctlr); priv->can.state = CAN_STATE_STOPPED; } static int rcar_can_close(struct net_device *ndev) { struct rcar_can_priv *priv = netdev_priv(ndev); netif_stop_queue(ndev); rcar_can_stop(ndev); free_irq(ndev->irq, ndev); napi_disable(&priv->napi); clk_disable_unprepare(priv->can_clk); clk_disable_unprepare(priv->clk); close_candev(ndev); return 0; } static netdev_tx_t rcar_can_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct rcar_can_priv *priv = netdev_priv(ndev); struct can_frame *cf = (struct can_frame *)skb->data; u32 data, i; if (can_dev_dropped_skb(ndev, skb)) return NETDEV_TX_OK; if (cf->can_id & CAN_EFF_FLAG) /* Extended frame format */ data = (cf->can_id & CAN_EFF_MASK) | RCAR_CAN_IDE; else /* Standard frame format */ data = (cf->can_id & CAN_SFF_MASK) << RCAR_CAN_SID_SHIFT; if (cf->can_id & CAN_RTR_FLAG) { /* Remote transmission request */ data |= RCAR_CAN_RTR; } else { for (i = 0; i < cf->len; i++) writeb(cf->data[i], &priv->regs->mb[RCAR_CAN_TX_FIFO_MBX].data[i]); } writel(data, &priv->regs->mb[RCAR_CAN_TX_FIFO_MBX].id); writeb(cf->len, &priv->regs->mb[RCAR_CAN_TX_FIFO_MBX].dlc); can_put_echo_skb(skb, ndev, priv->tx_head % RCAR_CAN_FIFO_DEPTH, 0); priv->tx_head++; /* Start Tx: write 0xff to the TFPCR register to increment * the CPU-side pointer for the transmit FIFO to the next * mailbox location */ writeb(0xff, &priv->regs->tfpcr); /* Stop the queue if we've filled all FIFO entries */ if (priv->tx_head - priv->tx_tail >= RCAR_CAN_FIFO_DEPTH) netif_stop_queue(ndev); return NETDEV_TX_OK; } static const struct net_device_ops rcar_can_netdev_ops = { .ndo_open = rcar_can_open, .ndo_stop = rcar_can_close, .ndo_start_xmit = rcar_can_start_xmit, .ndo_change_mtu = can_change_mtu, }; static const struct ethtool_ops rcar_can_ethtool_ops = { .get_ts_info = ethtool_op_get_ts_info, }; static void rcar_can_rx_pkt(struct rcar_can_priv *priv) { struct net_device_stats *stats = &priv->ndev->stats; struct can_frame *cf; struct sk_buff *skb; u32 data; u8 dlc; skb = alloc_can_skb(priv->ndev, &cf); if (!skb) { stats->rx_dropped++; return; } data = readl(&priv->regs->mb[RCAR_CAN_RX_FIFO_MBX].id); if (data & RCAR_CAN_IDE) cf->can_id = (data & CAN_EFF_MASK) | CAN_EFF_FLAG; else cf->can_id = (data >> RCAR_CAN_SID_SHIFT) & CAN_SFF_MASK; dlc = readb(&priv->regs->mb[RCAR_CAN_RX_FIFO_MBX].dlc); cf->len = can_cc_dlc2len(dlc); if (data & RCAR_CAN_RTR) { cf->can_id |= CAN_RTR_FLAG; } else { for (dlc = 0; dlc < cf->len; dlc++) cf->data[dlc] = readb(&priv->regs->mb[RCAR_CAN_RX_FIFO_MBX].data[dlc]); stats->rx_bytes += cf->len; } stats->rx_packets++; netif_receive_skb(skb); } static int rcar_can_rx_poll(struct napi_struct *napi, int quota) { struct rcar_can_priv *priv = container_of(napi, struct rcar_can_priv, napi); int num_pkts; for (num_pkts = 0; num_pkts < quota; num_pkts++) { u8 rfcr, isr; isr = readb(&priv->regs->isr); /* Clear interrupt bit */ if (isr & RCAR_CAN_ISR_RXFF) writeb(isr & ~RCAR_CAN_ISR_RXFF, &priv->regs->isr); rfcr = readb(&priv->regs->rfcr); if (rfcr & RCAR_CAN_RFCR_RFEST) break; rcar_can_rx_pkt(priv); /* Write 0xff to the RFPCR register to increment * the CPU-side pointer for the receive FIFO * to the next mailbox location */ writeb(0xff, &priv->regs->rfpcr); } /* All packets processed */ if (num_pkts < quota) { napi_complete_done(napi, num_pkts); priv->ier |= RCAR_CAN_IER_RXFIE; writeb(priv->ier, &priv->regs->ier); } return num_pkts; } static int rcar_can_do_set_mode(struct net_device *ndev, enum can_mode mode) { switch (mode) { case CAN_MODE_START: rcar_can_start(ndev); netif_wake_queue(ndev); return 0; default: return -EOPNOTSUPP; } } static int rcar_can_get_berr_counter(const struct net_device *dev, struct can_berr_counter *bec) { struct rcar_can_priv *priv = netdev_priv(dev); int err; err = clk_prepare_enable(priv->clk); if (err) return err; bec->txerr = readb(&priv->regs->tecr); bec->rxerr = readb(&priv->regs->recr); clk_disable_unprepare(priv->clk); return 0; } static const char * const clock_names[] = { [CLKR_CLKP1] = "clkp1", [CLKR_CLKP2] = "clkp2", [CLKR_CLKEXT] = "can_clk", }; static int rcar_can_probe(struct platform_device *pdev) { struct rcar_can_priv *priv; struct net_device *ndev; void __iomem *addr; u32 clock_select = CLKR_CLKP1; int err = -ENODEV; int irq; of_property_read_u32(pdev->dev.of_node, "renesas,can-clock-select", &clock_select); irq = platform_get_irq(pdev, 0); if (irq < 0) { err = irq; goto fail; } addr = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(addr)) { err = PTR_ERR(addr); goto fail; } ndev = alloc_candev(sizeof(struct rcar_can_priv), RCAR_CAN_FIFO_DEPTH); if (!ndev) { dev_err(&pdev->dev, "alloc_candev() failed\n"); err = -ENOMEM; goto fail; } priv = netdev_priv(ndev); priv->clk = devm_clk_get(&pdev->dev, "clkp1"); if (IS_ERR(priv->clk)) { err = PTR_ERR(priv->clk); dev_err(&pdev->dev, "cannot get peripheral clock, error %d\n", err); goto fail_clk; } if (!(BIT(clock_select) & RCAR_SUPPORTED_CLOCKS)) { err = -EINVAL; dev_err(&pdev->dev, "invalid CAN clock selected\n"); goto fail_clk; } priv->can_clk = devm_clk_get(&pdev->dev, clock_names[clock_select]); if (IS_ERR(priv->can_clk)) { err = PTR_ERR(priv->can_clk); dev_err(&pdev->dev, "cannot get CAN clock, error %d\n", err); goto fail_clk; } ndev->netdev_ops = &rcar_can_netdev_ops; ndev->ethtool_ops = &rcar_can_ethtool_ops; ndev->irq = irq; ndev->flags |= IFF_ECHO; priv->ndev = ndev; priv->regs = addr; priv->clock_select = clock_select; priv->can.clock.freq = clk_get_rate(priv->can_clk); priv->can.bittiming_const = &rcar_can_bittiming_const; priv->can.do_set_mode = rcar_can_do_set_mode; priv->can.do_get_berr_counter = rcar_can_get_berr_counter; priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING; platform_set_drvdata(pdev, ndev); SET_NETDEV_DEV(ndev, &pdev->dev); netif_napi_add_weight(ndev, &priv->napi, rcar_can_rx_poll, RCAR_CAN_NAPI_WEIGHT); err = register_candev(ndev); if (err) { dev_err(&pdev->dev, "register_candev() failed, error %d\n", err); goto fail_candev; } dev_info(&pdev->dev, "device registered (IRQ%d)\n", ndev->irq); return 0; fail_candev: netif_napi_del(&priv->napi); fail_clk: free_candev(ndev); fail: return err; } static void rcar_can_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct rcar_can_priv *priv = netdev_priv(ndev); unregister_candev(ndev); netif_napi_del(&priv->napi); free_candev(ndev); } static int __maybe_unused rcar_can_suspend(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); struct rcar_can_priv *priv = netdev_priv(ndev); u16 ctlr; if (!netif_running(ndev)) return 0; netif_stop_queue(ndev); netif_device_detach(ndev); ctlr = readw(&priv->regs->ctlr); ctlr |= RCAR_CAN_CTLR_CANM_HALT; writew(ctlr, &priv->regs->ctlr); ctlr |= RCAR_CAN_CTLR_SLPM; writew(ctlr, &priv->regs->ctlr); priv->can.state = CAN_STATE_SLEEPING; clk_disable(priv->clk); return 0; } static int __maybe_unused rcar_can_resume(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); struct rcar_can_priv *priv = netdev_priv(ndev); u16 ctlr; int err; if (!netif_running(ndev)) return 0; err = clk_enable(priv->clk); if (err) { netdev_err(ndev, "clk_enable() failed, error %d\n", err); return err; } ctlr = readw(&priv->regs->ctlr); ctlr &= ~RCAR_CAN_CTLR_SLPM; writew(ctlr, &priv->regs->ctlr); ctlr &= ~RCAR_CAN_CTLR_CANM; writew(ctlr, &priv->regs->ctlr); priv->can.state = CAN_STATE_ERROR_ACTIVE; netif_device_attach(ndev); netif_start_queue(ndev); return 0; } static SIMPLE_DEV_PM_OPS(rcar_can_pm_ops, rcar_can_suspend, rcar_can_resume); static const struct of_device_id rcar_can_of_table[] __maybe_unused = { { .compatible = "renesas,can-r8a7778" }, { .compatible = "renesas,can-r8a7779" }, { .compatible = "renesas,can-r8a7790" }, { .compatible = "renesas,can-r8a7791" }, { .compatible = "renesas,rcar-gen1-can" }, { .compatible = "renesas,rcar-gen2-can" }, { .compatible = "renesas,rcar-gen3-can" }, { } }; MODULE_DEVICE_TABLE(of, rcar_can_of_table); static struct platform_driver rcar_can_driver = { .driver = { .name = RCAR_CAN_DRV_NAME, .of_match_table = of_match_ptr(rcar_can_of_table), .pm = &rcar_can_pm_ops, }, .probe = rcar_can_probe, .remove_new = rcar_can_remove, }; module_platform_driver(rcar_can_driver); MODULE_AUTHOR("Cogent Embedded, Inc."); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("CAN driver for Renesas R-Car SoC"); MODULE_ALIAS("platform:" RCAR_CAN_DRV_NAME);
linux-master
drivers/net/can/rcar/rcar_can.c
// SPDX-License-Identifier: GPL-2.0+ /* Renesas R-Car CAN FD device driver * * Copyright (C) 2015 Renesas Electronics Corp. */ /* The R-Car CAN FD controller can operate in either one of the below two modes * - CAN FD only mode * - Classical CAN (CAN 2.0) only mode * * This driver puts the controller in CAN FD only mode by default. In this * mode, the controller acts as a CAN FD node that can also interoperate with * CAN 2.0 nodes. * * To switch the controller to Classical CAN (CAN 2.0) only mode, add * "renesas,no-can-fd" optional property to the device tree node. A h/w reset is * also required to switch modes. * * Note: The h/w manual register naming convention is clumsy and not acceptable * to use as it is in the driver. However, those names are added as comments * wherever it is modified to a readable name. */ #include <linux/bitmap.h> #include <linux/bitops.h> #include <linux/can/dev.h> #include <linux/clk.h> #include <linux/errno.h> #include <linux/ethtool.h> #include <linux/interrupt.h> #include <linux/iopoll.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/netdevice.h> #include <linux/of.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> #include <linux/reset.h> #include <linux/types.h> #define RCANFD_DRV_NAME "rcar_canfd" /* Global register bits */ /* RSCFDnCFDGRMCFG */ #define RCANFD_GRMCFG_RCMC BIT(0) /* RSCFDnCFDGCFG / RSCFDnGCFG */ #define RCANFD_GCFG_EEFE BIT(6) #define RCANFD_GCFG_CMPOC BIT(5) /* CAN FD only */ #define RCANFD_GCFG_DCS BIT(4) #define RCANFD_GCFG_DCE BIT(1) #define RCANFD_GCFG_TPRI BIT(0) /* RSCFDnCFDGCTR / RSCFDnGCTR */ #define RCANFD_GCTR_TSRST BIT(16) #define RCANFD_GCTR_CFMPOFIE BIT(11) /* CAN FD only */ #define RCANFD_GCTR_THLEIE BIT(10) #define RCANFD_GCTR_MEIE BIT(9) #define RCANFD_GCTR_DEIE BIT(8) #define RCANFD_GCTR_GSLPR BIT(2) #define RCANFD_GCTR_GMDC_MASK (0x3) #define RCANFD_GCTR_GMDC_GOPM (0x0) #define RCANFD_GCTR_GMDC_GRESET (0x1) #define RCANFD_GCTR_GMDC_GTEST (0x2) /* RSCFDnCFDGSTS / RSCFDnGSTS */ #define RCANFD_GSTS_GRAMINIT BIT(3) #define RCANFD_GSTS_GSLPSTS BIT(2) #define RCANFD_GSTS_GHLTSTS BIT(1) #define RCANFD_GSTS_GRSTSTS BIT(0) /* Non-operational status */ #define RCANFD_GSTS_GNOPM (BIT(0) | BIT(1) | BIT(2) | BIT(3)) /* RSCFDnCFDGERFL / RSCFDnGERFL */ #define RCANFD_GERFL_EEF0_7 GENMASK(23, 16) #define RCANFD_GERFL_EEF(ch) BIT(16 + (ch)) #define RCANFD_GERFL_CMPOF BIT(3) /* CAN FD only */ #define RCANFD_GERFL_THLES BIT(2) #define RCANFD_GERFL_MES BIT(1) #define RCANFD_GERFL_DEF BIT(0) #define RCANFD_GERFL_ERR(gpriv, x) \ ((x) & (reg_gen4(gpriv, RCANFD_GERFL_EEF0_7, \ RCANFD_GERFL_EEF(0) | RCANFD_GERFL_EEF(1)) | \ RCANFD_GERFL_MES | \ ((gpriv)->fdmode ? RCANFD_GERFL_CMPOF : 0))) /* AFL Rx rules registers */ /* RSCFDnCFDGAFLCFG0 / RSCFDnGAFLCFG0 */ #define RCANFD_GAFLCFG_SETRNC(gpriv, n, x) \ (((x) & reg_gen4(gpriv, 0x1ff, 0xff)) << \ (reg_gen4(gpriv, 16, 24) - ((n) & 1) * reg_gen4(gpriv, 16, 8))) #define RCANFD_GAFLCFG_GETRNC(gpriv, n, x) \ (((x) >> (reg_gen4(gpriv, 16, 24) - ((n) & 1) * reg_gen4(gpriv, 16, 8))) & \ reg_gen4(gpriv, 0x1ff, 0xff)) /* RSCFDnCFDGAFLECTR / RSCFDnGAFLECTR */ #define RCANFD_GAFLECTR_AFLDAE BIT(8) #define RCANFD_GAFLECTR_AFLPN(gpriv, x) ((x) & reg_gen4(gpriv, 0x7f, 0x1f)) /* RSCFDnCFDGAFLIDj / RSCFDnGAFLIDj */ #define RCANFD_GAFLID_GAFLLB BIT(29) /* RSCFDnCFDGAFLP1_j / RSCFDnGAFLP1_j */ #define RCANFD_GAFLP1_GAFLFDP(x) (1 << (x)) /* Channel register bits */ /* RSCFDnCmCFG - Classical CAN only */ #define RCANFD_CFG_SJW(x) (((x) & 0x3) << 24) #define RCANFD_CFG_TSEG2(x) (((x) & 0x7) << 20) #define RCANFD_CFG_TSEG1(x) (((x) & 0xf) << 16) #define RCANFD_CFG_BRP(x) (((x) & 0x3ff) << 0) /* RSCFDnCFDCmNCFG - CAN FD only */ #define RCANFD_NCFG_NTSEG2(gpriv, x) \ (((x) & reg_gen4(gpriv, 0x7f, 0x1f)) << reg_gen4(gpriv, 25, 24)) #define RCANFD_NCFG_NTSEG1(gpriv, x) \ (((x) & reg_gen4(gpriv, 0xff, 0x7f)) << reg_gen4(gpriv, 17, 16)) #define RCANFD_NCFG_NSJW(gpriv, x) \ (((x) & reg_gen4(gpriv, 0x7f, 0x1f)) << reg_gen4(gpriv, 10, 11)) #define RCANFD_NCFG_NBRP(x) (((x) & 0x3ff) << 0) /* RSCFDnCFDCmCTR / RSCFDnCmCTR */ #define RCANFD_CCTR_CTME BIT(24) #define RCANFD_CCTR_ERRD BIT(23) #define RCANFD_CCTR_BOM_MASK (0x3 << 21) #define RCANFD_CCTR_BOM_ISO (0x0 << 21) #define RCANFD_CCTR_BOM_BENTRY (0x1 << 21) #define RCANFD_CCTR_BOM_BEND (0x2 << 21) #define RCANFD_CCTR_TDCVFIE BIT(19) #define RCANFD_CCTR_SOCOIE BIT(18) #define RCANFD_CCTR_EOCOIE BIT(17) #define RCANFD_CCTR_TAIE BIT(16) #define RCANFD_CCTR_ALIE BIT(15) #define RCANFD_CCTR_BLIE BIT(14) #define RCANFD_CCTR_OLIE BIT(13) #define RCANFD_CCTR_BORIE BIT(12) #define RCANFD_CCTR_BOEIE BIT(11) #define RCANFD_CCTR_EPIE BIT(10) #define RCANFD_CCTR_EWIE BIT(9) #define RCANFD_CCTR_BEIE BIT(8) #define RCANFD_CCTR_CSLPR BIT(2) #define RCANFD_CCTR_CHMDC_MASK (0x3) #define RCANFD_CCTR_CHDMC_COPM (0x0) #define RCANFD_CCTR_CHDMC_CRESET (0x1) #define RCANFD_CCTR_CHDMC_CHLT (0x2) /* RSCFDnCFDCmSTS / RSCFDnCmSTS */ #define RCANFD_CSTS_COMSTS BIT(7) #define RCANFD_CSTS_RECSTS BIT(6) #define RCANFD_CSTS_TRMSTS BIT(5) #define RCANFD_CSTS_BOSTS BIT(4) #define RCANFD_CSTS_EPSTS BIT(3) #define RCANFD_CSTS_SLPSTS BIT(2) #define RCANFD_CSTS_HLTSTS BIT(1) #define RCANFD_CSTS_CRSTSTS BIT(0) #define RCANFD_CSTS_TECCNT(x) (((x) >> 24) & 0xff) #define RCANFD_CSTS_RECCNT(x) (((x) >> 16) & 0xff) /* RSCFDnCFDCmERFL / RSCFDnCmERFL */ #define RCANFD_CERFL_ADERR BIT(14) #define RCANFD_CERFL_B0ERR BIT(13) #define RCANFD_CERFL_B1ERR BIT(12) #define RCANFD_CERFL_CERR BIT(11) #define RCANFD_CERFL_AERR BIT(10) #define RCANFD_CERFL_FERR BIT(9) #define RCANFD_CERFL_SERR BIT(8) #define RCANFD_CERFL_ALF BIT(7) #define RCANFD_CERFL_BLF BIT(6) #define RCANFD_CERFL_OVLF BIT(5) #define RCANFD_CERFL_BORF BIT(4) #define RCANFD_CERFL_BOEF BIT(3) #define RCANFD_CERFL_EPF BIT(2) #define RCANFD_CERFL_EWF BIT(1) #define RCANFD_CERFL_BEF BIT(0) #define RCANFD_CERFL_ERR(x) ((x) & (0x7fff)) /* above bits 14:0 */ /* RSCFDnCFDCmDCFG */ #define RCANFD_DCFG_DSJW(gpriv, x) (((x) & reg_gen4(gpriv, 0xf, 0x7)) << 24) #define RCANFD_DCFG_DTSEG2(gpriv, x) \ (((x) & reg_gen4(gpriv, 0x0f, 0x7)) << reg_gen4(gpriv, 16, 20)) #define RCANFD_DCFG_DTSEG1(gpriv, x) \ (((x) & reg_gen4(gpriv, 0x1f, 0xf)) << reg_gen4(gpriv, 8, 16)) #define RCANFD_DCFG_DBRP(x) (((x) & 0xff) << 0) /* RSCFDnCFDCmFDCFG */ #define RCANFD_GEN4_FDCFG_CLOE BIT(30) #define RCANFD_GEN4_FDCFG_FDOE BIT(28) #define RCANFD_FDCFG_TDCE BIT(9) #define RCANFD_FDCFG_TDCOC BIT(8) #define RCANFD_FDCFG_TDCO(x) (((x) & 0x7f) >> 16) /* RSCFDnCFDRFCCx */ #define RCANFD_RFCC_RFIM BIT(12) #define RCANFD_RFCC_RFDC(x) (((x) & 0x7) << 8) #define RCANFD_RFCC_RFPLS(x) (((x) & 0x7) << 4) #define RCANFD_RFCC_RFIE BIT(1) #define RCANFD_RFCC_RFE BIT(0) /* RSCFDnCFDRFSTSx */ #define RCANFD_RFSTS_RFIF BIT(3) #define RCANFD_RFSTS_RFMLT BIT(2) #define RCANFD_RFSTS_RFFLL BIT(1) #define RCANFD_RFSTS_RFEMP BIT(0) /* RSCFDnCFDRFIDx */ #define RCANFD_RFID_RFIDE BIT(31) #define RCANFD_RFID_RFRTR BIT(30) /* RSCFDnCFDRFPTRx */ #define RCANFD_RFPTR_RFDLC(x) (((x) >> 28) & 0xf) #define RCANFD_RFPTR_RFPTR(x) (((x) >> 16) & 0xfff) #define RCANFD_RFPTR_RFTS(x) (((x) >> 0) & 0xffff) /* RSCFDnCFDRFFDSTSx */ #define RCANFD_RFFDSTS_RFFDF BIT(2) #define RCANFD_RFFDSTS_RFBRS BIT(1) #define RCANFD_RFFDSTS_RFESI BIT(0) /* Common FIFO bits */ /* RSCFDnCFDCFCCk */ #define RCANFD_CFCC_CFTML(gpriv, x) \ (((x) & reg_gen4(gpriv, 0x1f, 0xf)) << reg_gen4(gpriv, 16, 20)) #define RCANFD_CFCC_CFM(gpriv, x) (((x) & 0x3) << reg_gen4(gpriv, 8, 16)) #define RCANFD_CFCC_CFIM BIT(12) #define RCANFD_CFCC_CFDC(gpriv, x) (((x) & 0x7) << reg_gen4(gpriv, 21, 8)) #define RCANFD_CFCC_CFPLS(x) (((x) & 0x7) << 4) #define RCANFD_CFCC_CFTXIE BIT(2) #define RCANFD_CFCC_CFE BIT(0) /* RSCFDnCFDCFSTSk */ #define RCANFD_CFSTS_CFMC(x) (((x) >> 8) & 0xff) #define RCANFD_CFSTS_CFTXIF BIT(4) #define RCANFD_CFSTS_CFMLT BIT(2) #define RCANFD_CFSTS_CFFLL BIT(1) #define RCANFD_CFSTS_CFEMP BIT(0) /* RSCFDnCFDCFIDk */ #define RCANFD_CFID_CFIDE BIT(31) #define RCANFD_CFID_CFRTR BIT(30) #define RCANFD_CFID_CFID_MASK(x) ((x) & 0x1fffffff) /* RSCFDnCFDCFPTRk */ #define RCANFD_CFPTR_CFDLC(x) (((x) & 0xf) << 28) #define RCANFD_CFPTR_CFPTR(x) (((x) & 0xfff) << 16) #define RCANFD_CFPTR_CFTS(x) (((x) & 0xff) << 0) /* RSCFDnCFDCFFDCSTSk */ #define RCANFD_CFFDCSTS_CFFDF BIT(2) #define RCANFD_CFFDCSTS_CFBRS BIT(1) #define RCANFD_CFFDCSTS_CFESI BIT(0) /* This controller supports either Classical CAN only mode or CAN FD only mode. * These modes are supported in two separate set of register maps & names. * However, some of the register offsets are common for both modes. Those * offsets are listed below as Common registers. * * The CAN FD only mode specific registers & Classical CAN only mode specific * registers are listed separately. Their register names starts with * RCANFD_F_xxx & RCANFD_C_xxx respectively. */ /* Common registers */ /* RSCFDnCFDCmNCFG / RSCFDnCmCFG */ #define RCANFD_CCFG(m) (0x0000 + (0x10 * (m))) /* RSCFDnCFDCmCTR / RSCFDnCmCTR */ #define RCANFD_CCTR(m) (0x0004 + (0x10 * (m))) /* RSCFDnCFDCmSTS / RSCFDnCmSTS */ #define RCANFD_CSTS(m) (0x0008 + (0x10 * (m))) /* RSCFDnCFDCmERFL / RSCFDnCmERFL */ #define RCANFD_CERFL(m) (0x000C + (0x10 * (m))) /* RSCFDnCFDGCFG / RSCFDnGCFG */ #define RCANFD_GCFG (0x0084) /* RSCFDnCFDGCTR / RSCFDnGCTR */ #define RCANFD_GCTR (0x0088) /* RSCFDnCFDGCTS / RSCFDnGCTS */ #define RCANFD_GSTS (0x008c) /* RSCFDnCFDGERFL / RSCFDnGERFL */ #define RCANFD_GERFL (0x0090) /* RSCFDnCFDGTSC / RSCFDnGTSC */ #define RCANFD_GTSC (0x0094) /* RSCFDnCFDGAFLECTR / RSCFDnGAFLECTR */ #define RCANFD_GAFLECTR (0x0098) /* RSCFDnCFDGAFLCFG / RSCFDnGAFLCFG */ #define RCANFD_GAFLCFG(ch) (0x009c + (0x04 * ((ch) / 2))) /* RSCFDnCFDRMNB / RSCFDnRMNB */ #define RCANFD_RMNB (0x00a4) /* RSCFDnCFDRMND / RSCFDnRMND */ #define RCANFD_RMND(y) (0x00a8 + (0x04 * (y))) /* RSCFDnCFDRFCCx / RSCFDnRFCCx */ #define RCANFD_RFCC(gpriv, x) (reg_gen4(gpriv, 0x00c0, 0x00b8) + (0x04 * (x))) /* RSCFDnCFDRFSTSx / RSCFDnRFSTSx */ #define RCANFD_RFSTS(gpriv, x) (RCANFD_RFCC(gpriv, x) + 0x20) /* RSCFDnCFDRFPCTRx / RSCFDnRFPCTRx */ #define RCANFD_RFPCTR(gpriv, x) (RCANFD_RFCC(gpriv, x) + 0x40) /* Common FIFO Control registers */ /* RSCFDnCFDCFCCx / RSCFDnCFCCx */ #define RCANFD_CFCC(gpriv, ch, idx) \ (reg_gen4(gpriv, 0x0120, 0x0118) + (0x0c * (ch)) + (0x04 * (idx))) /* RSCFDnCFDCFSTSx / RSCFDnCFSTSx */ #define RCANFD_CFSTS(gpriv, ch, idx) \ (reg_gen4(gpriv, 0x01e0, 0x0178) + (0x0c * (ch)) + (0x04 * (idx))) /* RSCFDnCFDCFPCTRx / RSCFDnCFPCTRx */ #define RCANFD_CFPCTR(gpriv, ch, idx) \ (reg_gen4(gpriv, 0x0240, 0x01d8) + (0x0c * (ch)) + (0x04 * (idx))) /* RSCFDnCFDFESTS / RSCFDnFESTS */ #define RCANFD_FESTS (0x0238) /* RSCFDnCFDFFSTS / RSCFDnFFSTS */ #define RCANFD_FFSTS (0x023c) /* RSCFDnCFDFMSTS / RSCFDnFMSTS */ #define RCANFD_FMSTS (0x0240) /* RSCFDnCFDRFISTS / RSCFDnRFISTS */ #define RCANFD_RFISTS (0x0244) /* RSCFDnCFDCFRISTS / RSCFDnCFRISTS */ #define RCANFD_CFRISTS (0x0248) /* RSCFDnCFDCFTISTS / RSCFDnCFTISTS */ #define RCANFD_CFTISTS (0x024c) /* RSCFDnCFDTMCp / RSCFDnTMCp */ #define RCANFD_TMC(p) (0x0250 + (0x01 * (p))) /* RSCFDnCFDTMSTSp / RSCFDnTMSTSp */ #define RCANFD_TMSTS(p) (0x02d0 + (0x01 * (p))) /* RSCFDnCFDTMTRSTSp / RSCFDnTMTRSTSp */ #define RCANFD_TMTRSTS(y) (0x0350 + (0x04 * (y))) /* RSCFDnCFDTMTARSTSp / RSCFDnTMTARSTSp */ #define RCANFD_TMTARSTS(y) (0x0360 + (0x04 * (y))) /* RSCFDnCFDTMTCSTSp / RSCFDnTMTCSTSp */ #define RCANFD_TMTCSTS(y) (0x0370 + (0x04 * (y))) /* RSCFDnCFDTMTASTSp / RSCFDnTMTASTSp */ #define RCANFD_TMTASTS(y) (0x0380 + (0x04 * (y))) /* RSCFDnCFDTMIECy / RSCFDnTMIECy */ #define RCANFD_TMIEC(y) (0x0390 + (0x04 * (y))) /* RSCFDnCFDTXQCCm / RSCFDnTXQCCm */ #define RCANFD_TXQCC(m) (0x03a0 + (0x04 * (m))) /* RSCFDnCFDTXQSTSm / RSCFDnTXQSTSm */ #define RCANFD_TXQSTS(m) (0x03c0 + (0x04 * (m))) /* RSCFDnCFDTXQPCTRm / RSCFDnTXQPCTRm */ #define RCANFD_TXQPCTR(m) (0x03e0 + (0x04 * (m))) /* RSCFDnCFDTHLCCm / RSCFDnTHLCCm */ #define RCANFD_THLCC(m) (0x0400 + (0x04 * (m))) /* RSCFDnCFDTHLSTSm / RSCFDnTHLSTSm */ #define RCANFD_THLSTS(m) (0x0420 + (0x04 * (m))) /* RSCFDnCFDTHLPCTRm / RSCFDnTHLPCTRm */ #define RCANFD_THLPCTR(m) (0x0440 + (0x04 * (m))) /* RSCFDnCFDGTINTSTS0 / RSCFDnGTINTSTS0 */ #define RCANFD_GTINTSTS0 (0x0460) /* RSCFDnCFDGTINTSTS1 / RSCFDnGTINTSTS1 */ #define RCANFD_GTINTSTS1 (0x0464) /* RSCFDnCFDGTSTCFG / RSCFDnGTSTCFG */ #define RCANFD_GTSTCFG (0x0468) /* RSCFDnCFDGTSTCTR / RSCFDnGTSTCTR */ #define RCANFD_GTSTCTR (0x046c) /* RSCFDnCFDGLOCKK / RSCFDnGLOCKK */ #define RCANFD_GLOCKK (0x047c) /* RSCFDnCFDGRMCFG */ #define RCANFD_GRMCFG (0x04fc) /* RSCFDnCFDGAFLIDj / RSCFDnGAFLIDj */ #define RCANFD_GAFLID(offset, j) ((offset) + (0x10 * (j))) /* RSCFDnCFDGAFLMj / RSCFDnGAFLMj */ #define RCANFD_GAFLM(offset, j) ((offset) + 0x04 + (0x10 * (j))) /* RSCFDnCFDGAFLP0j / RSCFDnGAFLP0j */ #define RCANFD_GAFLP0(offset, j) ((offset) + 0x08 + (0x10 * (j))) /* RSCFDnCFDGAFLP1j / RSCFDnGAFLP1j */ #define RCANFD_GAFLP1(offset, j) ((offset) + 0x0c + (0x10 * (j))) /* Classical CAN only mode register map */ /* RSCFDnGAFLXXXj offset */ #define RCANFD_C_GAFL_OFFSET (0x0500) /* RSCFDnRMXXXq -> RCANFD_C_RMXXX(q) */ #define RCANFD_C_RMID(q) (0x0600 + (0x10 * (q))) #define RCANFD_C_RMPTR(q) (0x0604 + (0x10 * (q))) #define RCANFD_C_RMDF0(q) (0x0608 + (0x10 * (q))) #define RCANFD_C_RMDF1(q) (0x060c + (0x10 * (q))) /* RSCFDnRFXXx -> RCANFD_C_RFXX(x) */ #define RCANFD_C_RFOFFSET (0x0e00) #define RCANFD_C_RFID(x) (RCANFD_C_RFOFFSET + (0x10 * (x))) #define RCANFD_C_RFPTR(x) (RCANFD_C_RFOFFSET + 0x04 + (0x10 * (x))) #define RCANFD_C_RFDF(x, df) \ (RCANFD_C_RFOFFSET + 0x08 + (0x10 * (x)) + (0x04 * (df))) /* RSCFDnCFXXk -> RCANFD_C_CFXX(ch, k) */ #define RCANFD_C_CFOFFSET (0x0e80) #define RCANFD_C_CFID(ch, idx) \ (RCANFD_C_CFOFFSET + (0x30 * (ch)) + (0x10 * (idx))) #define RCANFD_C_CFPTR(ch, idx) \ (RCANFD_C_CFOFFSET + 0x04 + (0x30 * (ch)) + (0x10 * (idx))) #define RCANFD_C_CFDF(ch, idx, df) \ (RCANFD_C_CFOFFSET + 0x08 + (0x30 * (ch)) + (0x10 * (idx)) + (0x04 * (df))) /* RSCFDnTMXXp -> RCANFD_C_TMXX(p) */ #define RCANFD_C_TMID(p) (0x1000 + (0x10 * (p))) #define RCANFD_C_TMPTR(p) (0x1004 + (0x10 * (p))) #define RCANFD_C_TMDF0(p) (0x1008 + (0x10 * (p))) #define RCANFD_C_TMDF1(p) (0x100c + (0x10 * (p))) /* RSCFDnTHLACCm */ #define RCANFD_C_THLACC(m) (0x1800 + (0x04 * (m))) /* RSCFDnRPGACCr */ #define RCANFD_C_RPGACC(r) (0x1900 + (0x04 * (r))) /* R-Car Gen4 Classical and CAN FD mode specific register map */ #define RCANFD_GEN4_FDCFG(m) (0x1404 + (0x20 * (m))) #define RCANFD_GEN4_GAFL_OFFSET (0x1800) /* CAN FD mode specific register map */ /* RSCFDnCFDCmXXX -> RCANFD_F_XXX(m) */ #define RCANFD_F_DCFG(gpriv, m) (reg_gen4(gpriv, 0x1400, 0x0500) + (0x20 * (m))) #define RCANFD_F_CFDCFG(m) (0x0504 + (0x20 * (m))) #define RCANFD_F_CFDCTR(m) (0x0508 + (0x20 * (m))) #define RCANFD_F_CFDSTS(m) (0x050c + (0x20 * (m))) #define RCANFD_F_CFDCRC(m) (0x0510 + (0x20 * (m))) /* RSCFDnCFDGAFLXXXj offset */ #define RCANFD_F_GAFL_OFFSET (0x1000) /* RSCFDnCFDRMXXXq -> RCANFD_F_RMXXX(q) */ #define RCANFD_F_RMID(q) (0x2000 + (0x20 * (q))) #define RCANFD_F_RMPTR(q) (0x2004 + (0x20 * (q))) #define RCANFD_F_RMFDSTS(q) (0x2008 + (0x20 * (q))) #define RCANFD_F_RMDF(q, b) (0x200c + (0x04 * (b)) + (0x20 * (q))) /* RSCFDnCFDRFXXx -> RCANFD_F_RFXX(x) */ #define RCANFD_F_RFOFFSET(gpriv) reg_gen4(gpriv, 0x6000, 0x3000) #define RCANFD_F_RFID(gpriv, x) (RCANFD_F_RFOFFSET(gpriv) + (0x80 * (x))) #define RCANFD_F_RFPTR(gpriv, x) (RCANFD_F_RFOFFSET(gpriv) + 0x04 + (0x80 * (x))) #define RCANFD_F_RFFDSTS(gpriv, x) (RCANFD_F_RFOFFSET(gpriv) + 0x08 + (0x80 * (x))) #define RCANFD_F_RFDF(gpriv, x, df) \ (RCANFD_F_RFOFFSET(gpriv) + 0x0c + (0x80 * (x)) + (0x04 * (df))) /* RSCFDnCFDCFXXk -> RCANFD_F_CFXX(ch, k) */ #define RCANFD_F_CFOFFSET(gpriv) reg_gen4(gpriv, 0x6400, 0x3400) #define RCANFD_F_CFID(gpriv, ch, idx) \ (RCANFD_F_CFOFFSET(gpriv) + (0x180 * (ch)) + (0x80 * (idx))) #define RCANFD_F_CFPTR(gpriv, ch, idx) \ (RCANFD_F_CFOFFSET(gpriv) + 0x04 + (0x180 * (ch)) + (0x80 * (idx))) #define RCANFD_F_CFFDCSTS(gpriv, ch, idx) \ (RCANFD_F_CFOFFSET(gpriv) + 0x08 + (0x180 * (ch)) + (0x80 * (idx))) #define RCANFD_F_CFDF(gpriv, ch, idx, df) \ (RCANFD_F_CFOFFSET(gpriv) + 0x0c + (0x180 * (ch)) + (0x80 * (idx)) + \ (0x04 * (df))) /* RSCFDnCFDTMXXp -> RCANFD_F_TMXX(p) */ #define RCANFD_F_TMID(p) (0x4000 + (0x20 * (p))) #define RCANFD_F_TMPTR(p) (0x4004 + (0x20 * (p))) #define RCANFD_F_TMFDCTR(p) (0x4008 + (0x20 * (p))) #define RCANFD_F_TMDF(p, b) (0x400c + (0x20 * (p)) + (0x04 * (b))) /* RSCFDnCFDTHLACCm */ #define RCANFD_F_THLACC(m) (0x6000 + (0x04 * (m))) /* RSCFDnCFDRPGACCr */ #define RCANFD_F_RPGACC(r) (0x6400 + (0x04 * (r))) /* Constants */ #define RCANFD_FIFO_DEPTH 8 /* Tx FIFO depth */ #define RCANFD_NAPI_WEIGHT 8 /* Rx poll quota */ #define RCANFD_NUM_CHANNELS 8 /* Eight channels max */ #define RCANFD_CHANNELS_MASK BIT((RCANFD_NUM_CHANNELS) - 1) #define RCANFD_GAFL_PAGENUM(entry) ((entry) / 16) #define RCANFD_CHANNEL_NUMRULES 1 /* only one rule per channel */ /* Rx FIFO is a global resource of the controller. There are 8 such FIFOs * available. Each channel gets a dedicated Rx FIFO (i.e.) the channel * number is added to RFFIFO index. */ #define RCANFD_RFFIFO_IDX 0 /* Tx/Rx or Common FIFO is a per channel resource. Each channel has 3 Common * FIFOs dedicated to them. Use the first (index 0) FIFO out of the 3 for Tx. */ #define RCANFD_CFFIFO_IDX 0 /* fCAN clock select register settings */ enum rcar_canfd_fcanclk { RCANFD_CANFDCLK = 0, /* CANFD clock */ RCANFD_EXTCLK, /* Externally input clock */ }; struct rcar_canfd_global; struct rcar_canfd_hw_info { u8 max_channels; u8 postdiv; /* hardware features */ unsigned shared_global_irqs:1; /* Has shared global irqs */ unsigned multi_channel_irqs:1; /* Has multiple channel irqs */ }; /* Channel priv data */ struct rcar_canfd_channel { struct can_priv can; /* Must be the first member */ struct net_device *ndev; struct rcar_canfd_global *gpriv; /* Controller reference */ void __iomem *base; /* Register base address */ struct phy *transceiver; /* Optional transceiver */ struct napi_struct napi; u32 tx_head; /* Incremented on xmit */ u32 tx_tail; /* Incremented on xmit done */ u32 channel; /* Channel number */ spinlock_t tx_lock; /* To protect tx path */ }; /* Global priv data */ struct rcar_canfd_global { struct rcar_canfd_channel *ch[RCANFD_NUM_CHANNELS]; void __iomem *base; /* Register base address */ struct platform_device *pdev; /* Respective platform device */ struct clk *clkp; /* Peripheral clock */ struct clk *can_clk; /* fCAN clock */ enum rcar_canfd_fcanclk fcan; /* CANFD or Ext clock */ unsigned long channels_mask; /* Enabled channels mask */ bool fdmode; /* CAN FD or Classical CAN only mode */ struct reset_control *rstc1; struct reset_control *rstc2; const struct rcar_canfd_hw_info *info; }; /* CAN FD mode nominal rate constants */ static const struct can_bittiming_const rcar_canfd_nom_bittiming_const = { .name = RCANFD_DRV_NAME, .tseg1_min = 2, .tseg1_max = 128, .tseg2_min = 2, .tseg2_max = 32, .sjw_max = 32, .brp_min = 1, .brp_max = 1024, .brp_inc = 1, }; /* CAN FD mode data rate constants */ static const struct can_bittiming_const rcar_canfd_data_bittiming_const = { .name = RCANFD_DRV_NAME, .tseg1_min = 2, .tseg1_max = 16, .tseg2_min = 2, .tseg2_max = 8, .sjw_max = 8, .brp_min = 1, .brp_max = 256, .brp_inc = 1, }; /* Classical CAN mode bitrate constants */ static const struct can_bittiming_const rcar_canfd_bittiming_const = { .name = RCANFD_DRV_NAME, .tseg1_min = 4, .tseg1_max = 16, .tseg2_min = 2, .tseg2_max = 8, .sjw_max = 4, .brp_min = 1, .brp_max = 1024, .brp_inc = 1, }; static const struct rcar_canfd_hw_info rcar_gen3_hw_info = { .max_channels = 2, .postdiv = 2, .shared_global_irqs = 1, }; static const struct rcar_canfd_hw_info rcar_gen4_hw_info = { .max_channels = 8, .postdiv = 2, .shared_global_irqs = 1, }; static const struct rcar_canfd_hw_info rzg2l_hw_info = { .max_channels = 2, .postdiv = 1, .multi_channel_irqs = 1, }; /* Helper functions */ static inline bool is_gen4(struct rcar_canfd_global *gpriv) { return gpriv->info == &rcar_gen4_hw_info; } static inline u32 reg_gen4(struct rcar_canfd_global *gpriv, u32 gen4, u32 not_gen4) { return is_gen4(gpriv) ? gen4 : not_gen4; } static inline void rcar_canfd_update(u32 mask, u32 val, u32 __iomem *reg) { u32 data = readl(reg); data &= ~mask; data |= (val & mask); writel(data, reg); } static inline u32 rcar_canfd_read(void __iomem *base, u32 offset) { return readl(base + (offset)); } static inline void rcar_canfd_write(void __iomem *base, u32 offset, u32 val) { writel(val, base + (offset)); } static void rcar_canfd_set_bit(void __iomem *base, u32 reg, u32 val) { rcar_canfd_update(val, val, base + (reg)); } static void rcar_canfd_clear_bit(void __iomem *base, u32 reg, u32 val) { rcar_canfd_update(val, 0, base + (reg)); } static void rcar_canfd_update_bit(void __iomem *base, u32 reg, u32 mask, u32 val) { rcar_canfd_update(mask, val, base + (reg)); } static void rcar_canfd_get_data(struct rcar_canfd_channel *priv, struct canfd_frame *cf, u32 off) { u32 i, lwords; lwords = DIV_ROUND_UP(cf->len, sizeof(u32)); for (i = 0; i < lwords; i++) *((u32 *)cf->data + i) = rcar_canfd_read(priv->base, off + (i * sizeof(u32))); } static void rcar_canfd_put_data(struct rcar_canfd_channel *priv, struct canfd_frame *cf, u32 off) { u32 i, lwords; lwords = DIV_ROUND_UP(cf->len, sizeof(u32)); for (i = 0; i < lwords; i++) rcar_canfd_write(priv->base, off + (i * sizeof(u32)), *((u32 *)cf->data + i)); } static void rcar_canfd_tx_failure_cleanup(struct net_device *ndev) { u32 i; for (i = 0; i < RCANFD_FIFO_DEPTH; i++) can_free_echo_skb(ndev, i, NULL); } static void rcar_canfd_set_mode(struct rcar_canfd_global *gpriv) { if (is_gen4(gpriv)) { u32 ch, val = gpriv->fdmode ? RCANFD_GEN4_FDCFG_FDOE : RCANFD_GEN4_FDCFG_CLOE; for_each_set_bit(ch, &gpriv->channels_mask, gpriv->info->max_channels) rcar_canfd_set_bit(gpriv->base, RCANFD_GEN4_FDCFG(ch), val); } else { if (gpriv->fdmode) rcar_canfd_set_bit(gpriv->base, RCANFD_GRMCFG, RCANFD_GRMCFG_RCMC); else rcar_canfd_clear_bit(gpriv->base, RCANFD_GRMCFG, RCANFD_GRMCFG_RCMC); } } static int rcar_canfd_reset_controller(struct rcar_canfd_global *gpriv) { u32 sts, ch; int err; /* Check RAMINIT flag as CAN RAM initialization takes place * after the MCU reset */ err = readl_poll_timeout((gpriv->base + RCANFD_GSTS), sts, !(sts & RCANFD_GSTS_GRAMINIT), 2, 500000); if (err) { dev_dbg(&gpriv->pdev->dev, "global raminit failed\n"); return err; } /* Transition to Global Reset mode */ rcar_canfd_clear_bit(gpriv->base, RCANFD_GCTR, RCANFD_GCTR_GSLPR); rcar_canfd_update_bit(gpriv->base, RCANFD_GCTR, RCANFD_GCTR_GMDC_MASK, RCANFD_GCTR_GMDC_GRESET); /* Ensure Global reset mode */ err = readl_poll_timeout((gpriv->base + RCANFD_GSTS), sts, (sts & RCANFD_GSTS_GRSTSTS), 2, 500000); if (err) { dev_dbg(&gpriv->pdev->dev, "global reset failed\n"); return err; } /* Reset Global error flags */ rcar_canfd_write(gpriv->base, RCANFD_GERFL, 0x0); /* Set the controller into appropriate mode */ rcar_canfd_set_mode(gpriv); /* Transition all Channels to reset mode */ for_each_set_bit(ch, &gpriv->channels_mask, gpriv->info->max_channels) { rcar_canfd_clear_bit(gpriv->base, RCANFD_CCTR(ch), RCANFD_CCTR_CSLPR); rcar_canfd_update_bit(gpriv->base, RCANFD_CCTR(ch), RCANFD_CCTR_CHMDC_MASK, RCANFD_CCTR_CHDMC_CRESET); /* Ensure Channel reset mode */ err = readl_poll_timeout((gpriv->base + RCANFD_CSTS(ch)), sts, (sts & RCANFD_CSTS_CRSTSTS), 2, 500000); if (err) { dev_dbg(&gpriv->pdev->dev, "channel %u reset failed\n", ch); return err; } } return 0; } static void rcar_canfd_configure_controller(struct rcar_canfd_global *gpriv) { u32 cfg, ch; /* Global configuration settings */ /* ECC Error flag Enable */ cfg = RCANFD_GCFG_EEFE; if (gpriv->fdmode) /* Truncate payload to configured message size RFPLS */ cfg |= RCANFD_GCFG_CMPOC; /* Set External Clock if selected */ if (gpriv->fcan != RCANFD_CANFDCLK) cfg |= RCANFD_GCFG_DCS; rcar_canfd_set_bit(gpriv->base, RCANFD_GCFG, cfg); /* Channel configuration settings */ for_each_set_bit(ch, &gpriv->channels_mask, gpriv->info->max_channels) { rcar_canfd_set_bit(gpriv->base, RCANFD_CCTR(ch), RCANFD_CCTR_ERRD); rcar_canfd_update_bit(gpriv->base, RCANFD_CCTR(ch), RCANFD_CCTR_BOM_MASK, RCANFD_CCTR_BOM_BENTRY); } } static void rcar_canfd_configure_afl_rules(struct rcar_canfd_global *gpriv, u32 ch) { u32 cfg; int offset, start, page, num_rules = RCANFD_CHANNEL_NUMRULES; u32 ridx = ch + RCANFD_RFFIFO_IDX; if (ch == 0) { start = 0; /* Channel 0 always starts from 0th rule */ } else { /* Get number of Channel 0 rules and adjust */ cfg = rcar_canfd_read(gpriv->base, RCANFD_GAFLCFG(ch)); start = RCANFD_GAFLCFG_GETRNC(gpriv, 0, cfg); } /* Enable write access to entry */ page = RCANFD_GAFL_PAGENUM(start); rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLECTR, (RCANFD_GAFLECTR_AFLPN(gpriv, page) | RCANFD_GAFLECTR_AFLDAE)); /* Write number of rules for channel */ rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLCFG(ch), RCANFD_GAFLCFG_SETRNC(gpriv, ch, num_rules)); if (is_gen4(gpriv)) offset = RCANFD_GEN4_GAFL_OFFSET; else if (gpriv->fdmode) offset = RCANFD_F_GAFL_OFFSET; else offset = RCANFD_C_GAFL_OFFSET; /* Accept all IDs */ rcar_canfd_write(gpriv->base, RCANFD_GAFLID(offset, start), 0); /* IDE or RTR is not considered for matching */ rcar_canfd_write(gpriv->base, RCANFD_GAFLM(offset, start), 0); /* Any data length accepted */ rcar_canfd_write(gpriv->base, RCANFD_GAFLP0(offset, start), 0); /* Place the msg in corresponding Rx FIFO entry */ rcar_canfd_set_bit(gpriv->base, RCANFD_GAFLP1(offset, start), RCANFD_GAFLP1_GAFLFDP(ridx)); /* Disable write access to page */ rcar_canfd_clear_bit(gpriv->base, RCANFD_GAFLECTR, RCANFD_GAFLECTR_AFLDAE); } static void rcar_canfd_configure_rx(struct rcar_canfd_global *gpriv, u32 ch) { /* Rx FIFO is used for reception */ u32 cfg; u16 rfdc, rfpls; /* Select Rx FIFO based on channel */ u32 ridx = ch + RCANFD_RFFIFO_IDX; rfdc = 2; /* b010 - 8 messages Rx FIFO depth */ if (gpriv->fdmode) rfpls = 7; /* b111 - Max 64 bytes payload */ else rfpls = 0; /* b000 - Max 8 bytes payload */ cfg = (RCANFD_RFCC_RFIM | RCANFD_RFCC_RFDC(rfdc) | RCANFD_RFCC_RFPLS(rfpls) | RCANFD_RFCC_RFIE); rcar_canfd_write(gpriv->base, RCANFD_RFCC(gpriv, ridx), cfg); } static void rcar_canfd_configure_tx(struct rcar_canfd_global *gpriv, u32 ch) { /* Tx/Rx(Common) FIFO configured in Tx mode is * used for transmission * * Each channel has 3 Common FIFO dedicated to them. * Use the 1st (index 0) out of 3 */ u32 cfg; u16 cftml, cfm, cfdc, cfpls; cftml = 0; /* 0th buffer */ cfm = 1; /* b01 - Transmit mode */ cfdc = 2; /* b010 - 8 messages Tx FIFO depth */ if (gpriv->fdmode) cfpls = 7; /* b111 - Max 64 bytes payload */ else cfpls = 0; /* b000 - Max 8 bytes payload */ cfg = (RCANFD_CFCC_CFTML(gpriv, cftml) | RCANFD_CFCC_CFM(gpriv, cfm) | RCANFD_CFCC_CFIM | RCANFD_CFCC_CFDC(gpriv, cfdc) | RCANFD_CFCC_CFPLS(cfpls) | RCANFD_CFCC_CFTXIE); rcar_canfd_write(gpriv->base, RCANFD_CFCC(gpriv, ch, RCANFD_CFFIFO_IDX), cfg); if (gpriv->fdmode) /* Clear FD mode specific control/status register */ rcar_canfd_write(gpriv->base, RCANFD_F_CFFDCSTS(gpriv, ch, RCANFD_CFFIFO_IDX), 0); } static void rcar_canfd_enable_global_interrupts(struct rcar_canfd_global *gpriv) { u32 ctr; /* Clear any stray error interrupt flags */ rcar_canfd_write(gpriv->base, RCANFD_GERFL, 0); /* Global interrupts setup */ ctr = RCANFD_GCTR_MEIE; if (gpriv->fdmode) ctr |= RCANFD_GCTR_CFMPOFIE; rcar_canfd_set_bit(gpriv->base, RCANFD_GCTR, ctr); } static void rcar_canfd_disable_global_interrupts(struct rcar_canfd_global *gpriv) { /* Disable all interrupts */ rcar_canfd_write(gpriv->base, RCANFD_GCTR, 0); /* Clear any stray error interrupt flags */ rcar_canfd_write(gpriv->base, RCANFD_GERFL, 0); } static void rcar_canfd_enable_channel_interrupts(struct rcar_canfd_channel *priv) { u32 ctr, ch = priv->channel; /* Clear any stray error flags */ rcar_canfd_write(priv->base, RCANFD_CERFL(ch), 0); /* Channel interrupts setup */ ctr = (RCANFD_CCTR_TAIE | RCANFD_CCTR_ALIE | RCANFD_CCTR_BLIE | RCANFD_CCTR_OLIE | RCANFD_CCTR_BORIE | RCANFD_CCTR_BOEIE | RCANFD_CCTR_EPIE | RCANFD_CCTR_EWIE | RCANFD_CCTR_BEIE); rcar_canfd_set_bit(priv->base, RCANFD_CCTR(ch), ctr); } static void rcar_canfd_disable_channel_interrupts(struct rcar_canfd_channel *priv) { u32 ctr, ch = priv->channel; ctr = (RCANFD_CCTR_TAIE | RCANFD_CCTR_ALIE | RCANFD_CCTR_BLIE | RCANFD_CCTR_OLIE | RCANFD_CCTR_BORIE | RCANFD_CCTR_BOEIE | RCANFD_CCTR_EPIE | RCANFD_CCTR_EWIE | RCANFD_CCTR_BEIE); rcar_canfd_clear_bit(priv->base, RCANFD_CCTR(ch), ctr); /* Clear any stray error flags */ rcar_canfd_write(priv->base, RCANFD_CERFL(ch), 0); } static void rcar_canfd_global_error(struct net_device *ndev) { struct rcar_canfd_channel *priv = netdev_priv(ndev); struct rcar_canfd_global *gpriv = priv->gpriv; struct net_device_stats *stats = &ndev->stats; u32 ch = priv->channel; u32 gerfl, sts; u32 ridx = ch + RCANFD_RFFIFO_IDX; gerfl = rcar_canfd_read(priv->base, RCANFD_GERFL); if (gerfl & RCANFD_GERFL_EEF(ch)) { netdev_dbg(ndev, "Ch%u: ECC Error flag\n", ch); stats->tx_dropped++; } if (gerfl & RCANFD_GERFL_MES) { sts = rcar_canfd_read(priv->base, RCANFD_CFSTS(gpriv, ch, RCANFD_CFFIFO_IDX)); if (sts & RCANFD_CFSTS_CFMLT) { netdev_dbg(ndev, "Tx Message Lost flag\n"); stats->tx_dropped++; rcar_canfd_write(priv->base, RCANFD_CFSTS(gpriv, ch, RCANFD_CFFIFO_IDX), sts & ~RCANFD_CFSTS_CFMLT); } sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(gpriv, ridx)); if (sts & RCANFD_RFSTS_RFMLT) { netdev_dbg(ndev, "Rx Message Lost flag\n"); stats->rx_dropped++; rcar_canfd_write(priv->base, RCANFD_RFSTS(gpriv, ridx), sts & ~RCANFD_RFSTS_RFMLT); } } if (gpriv->fdmode && gerfl & RCANFD_GERFL_CMPOF) { /* Message Lost flag will be set for respective channel * when this condition happens with counters and flags * already updated. */ netdev_dbg(ndev, "global payload overflow interrupt\n"); } /* Clear all global error interrupts. Only affected channels bits * get cleared */ rcar_canfd_write(priv->base, RCANFD_GERFL, 0); } static void rcar_canfd_error(struct net_device *ndev, u32 cerfl, u16 txerr, u16 rxerr) { struct rcar_canfd_channel *priv = netdev_priv(ndev); struct net_device_stats *stats = &ndev->stats; struct can_frame *cf; struct sk_buff *skb; u32 ch = priv->channel; netdev_dbg(ndev, "ch erfl %x txerr %u rxerr %u\n", cerfl, txerr, rxerr); /* Propagate the error condition to the CAN stack */ skb = alloc_can_err_skb(ndev, &cf); if (!skb) { stats->rx_dropped++; return; } /* Channel error interrupts */ if (cerfl & RCANFD_CERFL_BEF) { netdev_dbg(ndev, "Bus error\n"); cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT; cf->data[2] = CAN_ERR_PROT_UNSPEC; priv->can.can_stats.bus_error++; } if (cerfl & RCANFD_CERFL_ADERR) { netdev_dbg(ndev, "ACK Delimiter Error\n"); stats->tx_errors++; cf->data[3] |= CAN_ERR_PROT_LOC_ACK_DEL; } if (cerfl & RCANFD_CERFL_B0ERR) { netdev_dbg(ndev, "Bit Error (dominant)\n"); stats->tx_errors++; cf->data[2] |= CAN_ERR_PROT_BIT0; } if (cerfl & RCANFD_CERFL_B1ERR) { netdev_dbg(ndev, "Bit Error (recessive)\n"); stats->tx_errors++; cf->data[2] |= CAN_ERR_PROT_BIT1; } if (cerfl & RCANFD_CERFL_CERR) { netdev_dbg(ndev, "CRC Error\n"); stats->rx_errors++; cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ; } if (cerfl & RCANFD_CERFL_AERR) { netdev_dbg(ndev, "ACK Error\n"); stats->tx_errors++; cf->can_id |= CAN_ERR_ACK; cf->data[3] |= CAN_ERR_PROT_LOC_ACK; } if (cerfl & RCANFD_CERFL_FERR) { netdev_dbg(ndev, "Form Error\n"); stats->rx_errors++; cf->data[2] |= CAN_ERR_PROT_FORM; } if (cerfl & RCANFD_CERFL_SERR) { netdev_dbg(ndev, "Stuff Error\n"); stats->rx_errors++; cf->data[2] |= CAN_ERR_PROT_STUFF; } if (cerfl & RCANFD_CERFL_ALF) { netdev_dbg(ndev, "Arbitration lost Error\n"); priv->can.can_stats.arbitration_lost++; cf->can_id |= CAN_ERR_LOSTARB; cf->data[0] |= CAN_ERR_LOSTARB_UNSPEC; } if (cerfl & RCANFD_CERFL_BLF) { netdev_dbg(ndev, "Bus Lock Error\n"); stats->rx_errors++; cf->can_id |= CAN_ERR_BUSERROR; } if (cerfl & RCANFD_CERFL_EWF) { netdev_dbg(ndev, "Error warning interrupt\n"); priv->can.state = CAN_STATE_ERROR_WARNING; priv->can.can_stats.error_warning++; cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT; cf->data[1] = txerr > rxerr ? CAN_ERR_CRTL_TX_WARNING : CAN_ERR_CRTL_RX_WARNING; cf->data[6] = txerr; cf->data[7] = rxerr; } if (cerfl & RCANFD_CERFL_EPF) { netdev_dbg(ndev, "Error passive interrupt\n"); priv->can.state = CAN_STATE_ERROR_PASSIVE; priv->can.can_stats.error_passive++; cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT; cf->data[1] = txerr > rxerr ? CAN_ERR_CRTL_TX_PASSIVE : CAN_ERR_CRTL_RX_PASSIVE; cf->data[6] = txerr; cf->data[7] = rxerr; } if (cerfl & RCANFD_CERFL_BOEF) { netdev_dbg(ndev, "Bus-off entry interrupt\n"); rcar_canfd_tx_failure_cleanup(ndev); priv->can.state = CAN_STATE_BUS_OFF; priv->can.can_stats.bus_off++; can_bus_off(ndev); cf->can_id |= CAN_ERR_BUSOFF; } if (cerfl & RCANFD_CERFL_OVLF) { netdev_dbg(ndev, "Overload Frame Transmission error interrupt\n"); stats->tx_errors++; cf->can_id |= CAN_ERR_PROT; cf->data[2] |= CAN_ERR_PROT_OVERLOAD; } /* Clear channel error interrupts that are handled */ rcar_canfd_write(priv->base, RCANFD_CERFL(ch), RCANFD_CERFL_ERR(~cerfl)); netif_rx(skb); } static void rcar_canfd_tx_done(struct net_device *ndev) { struct rcar_canfd_channel *priv = netdev_priv(ndev); struct rcar_canfd_global *gpriv = priv->gpriv; struct net_device_stats *stats = &ndev->stats; u32 sts; unsigned long flags; u32 ch = priv->channel; do { u8 unsent, sent; sent = priv->tx_tail % RCANFD_FIFO_DEPTH; stats->tx_packets++; stats->tx_bytes += can_get_echo_skb(ndev, sent, NULL); spin_lock_irqsave(&priv->tx_lock, flags); priv->tx_tail++; sts = rcar_canfd_read(priv->base, RCANFD_CFSTS(gpriv, ch, RCANFD_CFFIFO_IDX)); unsent = RCANFD_CFSTS_CFMC(sts); /* Wake producer only when there is room */ if (unsent != RCANFD_FIFO_DEPTH) netif_wake_queue(ndev); if (priv->tx_head - priv->tx_tail <= unsent) { spin_unlock_irqrestore(&priv->tx_lock, flags); break; } spin_unlock_irqrestore(&priv->tx_lock, flags); } while (1); /* Clear interrupt */ rcar_canfd_write(priv->base, RCANFD_CFSTS(gpriv, ch, RCANFD_CFFIFO_IDX), sts & ~RCANFD_CFSTS_CFTXIF); } static void rcar_canfd_handle_global_err(struct rcar_canfd_global *gpriv, u32 ch) { struct rcar_canfd_channel *priv = gpriv->ch[ch]; struct net_device *ndev = priv->ndev; u32 gerfl; /* Handle global error interrupts */ gerfl = rcar_canfd_read(priv->base, RCANFD_GERFL); if (unlikely(RCANFD_GERFL_ERR(gpriv, gerfl))) rcar_canfd_global_error(ndev); } static irqreturn_t rcar_canfd_global_err_interrupt(int irq, void *dev_id) { struct rcar_canfd_global *gpriv = dev_id; u32 ch; for_each_set_bit(ch, &gpriv->channels_mask, gpriv->info->max_channels) rcar_canfd_handle_global_err(gpriv, ch); return IRQ_HANDLED; } static void rcar_canfd_handle_global_receive(struct rcar_canfd_global *gpriv, u32 ch) { struct rcar_canfd_channel *priv = gpriv->ch[ch]; u32 ridx = ch + RCANFD_RFFIFO_IDX; u32 sts, cc; /* Handle Rx interrupts */ sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(gpriv, ridx)); cc = rcar_canfd_read(priv->base, RCANFD_RFCC(gpriv, ridx)); if (likely(sts & RCANFD_RFSTS_RFIF && cc & RCANFD_RFCC_RFIE)) { if (napi_schedule_prep(&priv->napi)) { /* Disable Rx FIFO interrupts */ rcar_canfd_clear_bit(priv->base, RCANFD_RFCC(gpriv, ridx), RCANFD_RFCC_RFIE); __napi_schedule(&priv->napi); } } } static irqreturn_t rcar_canfd_global_receive_fifo_interrupt(int irq, void *dev_id) { struct rcar_canfd_global *gpriv = dev_id; u32 ch; for_each_set_bit(ch, &gpriv->channels_mask, gpriv->info->max_channels) rcar_canfd_handle_global_receive(gpriv, ch); return IRQ_HANDLED; } static irqreturn_t rcar_canfd_global_interrupt(int irq, void *dev_id) { struct rcar_canfd_global *gpriv = dev_id; u32 ch; /* Global error interrupts still indicate a condition specific * to a channel. RxFIFO interrupt is a global interrupt. */ for_each_set_bit(ch, &gpriv->channels_mask, gpriv->info->max_channels) { rcar_canfd_handle_global_err(gpriv, ch); rcar_canfd_handle_global_receive(gpriv, ch); } return IRQ_HANDLED; } static void rcar_canfd_state_change(struct net_device *ndev, u16 txerr, u16 rxerr) { struct rcar_canfd_channel *priv = netdev_priv(ndev); struct net_device_stats *stats = &ndev->stats; enum can_state rx_state, tx_state, state = priv->can.state; struct can_frame *cf; struct sk_buff *skb; /* Handle transition from error to normal states */ if (txerr < 96 && rxerr < 96) state = CAN_STATE_ERROR_ACTIVE; else if (txerr < 128 && rxerr < 128) state = CAN_STATE_ERROR_WARNING; if (state != priv->can.state) { netdev_dbg(ndev, "state: new %d, old %d: txerr %u, rxerr %u\n", state, priv->can.state, txerr, rxerr); skb = alloc_can_err_skb(ndev, &cf); if (!skb) { stats->rx_dropped++; return; } tx_state = txerr >= rxerr ? state : 0; rx_state = txerr <= rxerr ? state : 0; can_change_state(ndev, cf, tx_state, rx_state); netif_rx(skb); } } static void rcar_canfd_handle_channel_tx(struct rcar_canfd_global *gpriv, u32 ch) { struct rcar_canfd_channel *priv = gpriv->ch[ch]; struct net_device *ndev = priv->ndev; u32 sts; /* Handle Tx interrupts */ sts = rcar_canfd_read(priv->base, RCANFD_CFSTS(gpriv, ch, RCANFD_CFFIFO_IDX)); if (likely(sts & RCANFD_CFSTS_CFTXIF)) rcar_canfd_tx_done(ndev); } static irqreturn_t rcar_canfd_channel_tx_interrupt(int irq, void *dev_id) { struct rcar_canfd_channel *priv = dev_id; rcar_canfd_handle_channel_tx(priv->gpriv, priv->channel); return IRQ_HANDLED; } static void rcar_canfd_handle_channel_err(struct rcar_canfd_global *gpriv, u32 ch) { struct rcar_canfd_channel *priv = gpriv->ch[ch]; struct net_device *ndev = priv->ndev; u16 txerr, rxerr; u32 sts, cerfl; /* Handle channel error interrupts */ cerfl = rcar_canfd_read(priv->base, RCANFD_CERFL(ch)); sts = rcar_canfd_read(priv->base, RCANFD_CSTS(ch)); txerr = RCANFD_CSTS_TECCNT(sts); rxerr = RCANFD_CSTS_RECCNT(sts); if (unlikely(RCANFD_CERFL_ERR(cerfl))) rcar_canfd_error(ndev, cerfl, txerr, rxerr); /* Handle state change to lower states */ if (unlikely(priv->can.state != CAN_STATE_ERROR_ACTIVE && priv->can.state != CAN_STATE_BUS_OFF)) rcar_canfd_state_change(ndev, txerr, rxerr); } static irqreturn_t rcar_canfd_channel_err_interrupt(int irq, void *dev_id) { struct rcar_canfd_channel *priv = dev_id; rcar_canfd_handle_channel_err(priv->gpriv, priv->channel); return IRQ_HANDLED; } static irqreturn_t rcar_canfd_channel_interrupt(int irq, void *dev_id) { struct rcar_canfd_global *gpriv = dev_id; u32 ch; /* Common FIFO is a per channel resource */ for_each_set_bit(ch, &gpriv->channels_mask, gpriv->info->max_channels) { rcar_canfd_handle_channel_err(gpriv, ch); rcar_canfd_handle_channel_tx(gpriv, ch); } return IRQ_HANDLED; } static void rcar_canfd_set_bittiming(struct net_device *dev) { struct rcar_canfd_channel *priv = netdev_priv(dev); struct rcar_canfd_global *gpriv = priv->gpriv; const struct can_bittiming *bt = &priv->can.bittiming; const struct can_bittiming *dbt = &priv->can.data_bittiming; u16 brp, sjw, tseg1, tseg2; u32 cfg; u32 ch = priv->channel; /* Nominal bit timing settings */ brp = bt->brp - 1; sjw = bt->sjw - 1; tseg1 = bt->prop_seg + bt->phase_seg1 - 1; tseg2 = bt->phase_seg2 - 1; if (priv->can.ctrlmode & CAN_CTRLMODE_FD) { /* CAN FD only mode */ cfg = (RCANFD_NCFG_NTSEG1(gpriv, tseg1) | RCANFD_NCFG_NBRP(brp) | RCANFD_NCFG_NSJW(gpriv, sjw) | RCANFD_NCFG_NTSEG2(gpriv, tseg2)); rcar_canfd_write(priv->base, RCANFD_CCFG(ch), cfg); netdev_dbg(priv->ndev, "nrate: brp %u, sjw %u, tseg1 %u, tseg2 %u\n", brp, sjw, tseg1, tseg2); /* Data bit timing settings */ brp = dbt->brp - 1; sjw = dbt->sjw - 1; tseg1 = dbt->prop_seg + dbt->phase_seg1 - 1; tseg2 = dbt->phase_seg2 - 1; cfg = (RCANFD_DCFG_DTSEG1(gpriv, tseg1) | RCANFD_DCFG_DBRP(brp) | RCANFD_DCFG_DSJW(gpriv, sjw) | RCANFD_DCFG_DTSEG2(gpriv, tseg2)); rcar_canfd_write(priv->base, RCANFD_F_DCFG(gpriv, ch), cfg); netdev_dbg(priv->ndev, "drate: brp %u, sjw %u, tseg1 %u, tseg2 %u\n", brp, sjw, tseg1, tseg2); } else { /* Classical CAN only mode */ if (is_gen4(gpriv)) { cfg = (RCANFD_NCFG_NTSEG1(gpriv, tseg1) | RCANFD_NCFG_NBRP(brp) | RCANFD_NCFG_NSJW(gpriv, sjw) | RCANFD_NCFG_NTSEG2(gpriv, tseg2)); } else { cfg = (RCANFD_CFG_TSEG1(tseg1) | RCANFD_CFG_BRP(brp) | RCANFD_CFG_SJW(sjw) | RCANFD_CFG_TSEG2(tseg2)); } rcar_canfd_write(priv->base, RCANFD_CCFG(ch), cfg); netdev_dbg(priv->ndev, "rate: brp %u, sjw %u, tseg1 %u, tseg2 %u\n", brp, sjw, tseg1, tseg2); } } static int rcar_canfd_start(struct net_device *ndev) { struct rcar_canfd_channel *priv = netdev_priv(ndev); struct rcar_canfd_global *gpriv = priv->gpriv; int err = -EOPNOTSUPP; u32 sts, ch = priv->channel; u32 ridx = ch + RCANFD_RFFIFO_IDX; rcar_canfd_set_bittiming(ndev); rcar_canfd_enable_channel_interrupts(priv); /* Set channel to Operational mode */ rcar_canfd_update_bit(priv->base, RCANFD_CCTR(ch), RCANFD_CCTR_CHMDC_MASK, RCANFD_CCTR_CHDMC_COPM); /* Verify channel mode change */ err = readl_poll_timeout((priv->base + RCANFD_CSTS(ch)), sts, (sts & RCANFD_CSTS_COMSTS), 2, 500000); if (err) { netdev_err(ndev, "channel %u communication state failed\n", ch); goto fail_mode_change; } /* Enable Common & Rx FIFO */ rcar_canfd_set_bit(priv->base, RCANFD_CFCC(gpriv, ch, RCANFD_CFFIFO_IDX), RCANFD_CFCC_CFE); rcar_canfd_set_bit(priv->base, RCANFD_RFCC(gpriv, ridx), RCANFD_RFCC_RFE); priv->can.state = CAN_STATE_ERROR_ACTIVE; return 0; fail_mode_change: rcar_canfd_disable_channel_interrupts(priv); return err; } static int rcar_canfd_open(struct net_device *ndev) { struct rcar_canfd_channel *priv = netdev_priv(ndev); struct rcar_canfd_global *gpriv = priv->gpriv; int err; err = phy_power_on(priv->transceiver); if (err) { netdev_err(ndev, "failed to power on PHY: %pe\n", ERR_PTR(err)); return err; } /* Peripheral clock is already enabled in probe */ err = clk_prepare_enable(gpriv->can_clk); if (err) { netdev_err(ndev, "failed to enable CAN clock: %pe\n", ERR_PTR(err)); goto out_phy; } err = open_candev(ndev); if (err) { netdev_err(ndev, "open_candev() failed: %pe\n", ERR_PTR(err)); goto out_can_clock; } napi_enable(&priv->napi); err = rcar_canfd_start(ndev); if (err) goto out_close; netif_start_queue(ndev); return 0; out_close: napi_disable(&priv->napi); close_candev(ndev); out_can_clock: clk_disable_unprepare(gpriv->can_clk); out_phy: phy_power_off(priv->transceiver); return err; } static void rcar_canfd_stop(struct net_device *ndev) { struct rcar_canfd_channel *priv = netdev_priv(ndev); struct rcar_canfd_global *gpriv = priv->gpriv; int err; u32 sts, ch = priv->channel; u32 ridx = ch + RCANFD_RFFIFO_IDX; /* Transition to channel reset mode */ rcar_canfd_update_bit(priv->base, RCANFD_CCTR(ch), RCANFD_CCTR_CHMDC_MASK, RCANFD_CCTR_CHDMC_CRESET); /* Check Channel reset mode */ err = readl_poll_timeout((priv->base + RCANFD_CSTS(ch)), sts, (sts & RCANFD_CSTS_CRSTSTS), 2, 500000); if (err) netdev_err(ndev, "channel %u reset failed\n", ch); rcar_canfd_disable_channel_interrupts(priv); /* Disable Common & Rx FIFO */ rcar_canfd_clear_bit(priv->base, RCANFD_CFCC(gpriv, ch, RCANFD_CFFIFO_IDX), RCANFD_CFCC_CFE); rcar_canfd_clear_bit(priv->base, RCANFD_RFCC(gpriv, ridx), RCANFD_RFCC_RFE); /* Set the state as STOPPED */ priv->can.state = CAN_STATE_STOPPED; } static int rcar_canfd_close(struct net_device *ndev) { struct rcar_canfd_channel *priv = netdev_priv(ndev); struct rcar_canfd_global *gpriv = priv->gpriv; netif_stop_queue(ndev); rcar_canfd_stop(ndev); napi_disable(&priv->napi); clk_disable_unprepare(gpriv->can_clk); close_candev(ndev); phy_power_off(priv->transceiver); return 0; } static netdev_tx_t rcar_canfd_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct rcar_canfd_channel *priv = netdev_priv(ndev); struct rcar_canfd_global *gpriv = priv->gpriv; struct canfd_frame *cf = (struct canfd_frame *)skb->data; u32 sts = 0, id, dlc; unsigned long flags; u32 ch = priv->channel; if (can_dev_dropped_skb(ndev, skb)) return NETDEV_TX_OK; if (cf->can_id & CAN_EFF_FLAG) { id = cf->can_id & CAN_EFF_MASK; id |= RCANFD_CFID_CFIDE; } else { id = cf->can_id & CAN_SFF_MASK; } if (cf->can_id & CAN_RTR_FLAG) id |= RCANFD_CFID_CFRTR; dlc = RCANFD_CFPTR_CFDLC(can_fd_len2dlc(cf->len)); if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) || is_gen4(gpriv)) { rcar_canfd_write(priv->base, RCANFD_F_CFID(gpriv, ch, RCANFD_CFFIFO_IDX), id); rcar_canfd_write(priv->base, RCANFD_F_CFPTR(gpriv, ch, RCANFD_CFFIFO_IDX), dlc); if (can_is_canfd_skb(skb)) { /* CAN FD frame format */ sts |= RCANFD_CFFDCSTS_CFFDF; if (cf->flags & CANFD_BRS) sts |= RCANFD_CFFDCSTS_CFBRS; if (priv->can.state == CAN_STATE_ERROR_PASSIVE) sts |= RCANFD_CFFDCSTS_CFESI; } rcar_canfd_write(priv->base, RCANFD_F_CFFDCSTS(gpriv, ch, RCANFD_CFFIFO_IDX), sts); rcar_canfd_put_data(priv, cf, RCANFD_F_CFDF(gpriv, ch, RCANFD_CFFIFO_IDX, 0)); } else { rcar_canfd_write(priv->base, RCANFD_C_CFID(ch, RCANFD_CFFIFO_IDX), id); rcar_canfd_write(priv->base, RCANFD_C_CFPTR(ch, RCANFD_CFFIFO_IDX), dlc); rcar_canfd_put_data(priv, cf, RCANFD_C_CFDF(ch, RCANFD_CFFIFO_IDX, 0)); } can_put_echo_skb(skb, ndev, priv->tx_head % RCANFD_FIFO_DEPTH, 0); spin_lock_irqsave(&priv->tx_lock, flags); priv->tx_head++; /* Stop the queue if we've filled all FIFO entries */ if (priv->tx_head - priv->tx_tail >= RCANFD_FIFO_DEPTH) netif_stop_queue(ndev); /* Start Tx: Write 0xff to CFPC to increment the CPU-side * pointer for the Common FIFO */ rcar_canfd_write(priv->base, RCANFD_CFPCTR(gpriv, ch, RCANFD_CFFIFO_IDX), 0xff); spin_unlock_irqrestore(&priv->tx_lock, flags); return NETDEV_TX_OK; } static void rcar_canfd_rx_pkt(struct rcar_canfd_channel *priv) { struct net_device_stats *stats = &priv->ndev->stats; struct rcar_canfd_global *gpriv = priv->gpriv; struct canfd_frame *cf; struct sk_buff *skb; u32 sts = 0, id, dlc; u32 ch = priv->channel; u32 ridx = ch + RCANFD_RFFIFO_IDX; if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) || is_gen4(gpriv)) { id = rcar_canfd_read(priv->base, RCANFD_F_RFID(gpriv, ridx)); dlc = rcar_canfd_read(priv->base, RCANFD_F_RFPTR(gpriv, ridx)); sts = rcar_canfd_read(priv->base, RCANFD_F_RFFDSTS(gpriv, ridx)); if ((priv->can.ctrlmode & CAN_CTRLMODE_FD) && sts & RCANFD_RFFDSTS_RFFDF) skb = alloc_canfd_skb(priv->ndev, &cf); else skb = alloc_can_skb(priv->ndev, (struct can_frame **)&cf); } else { id = rcar_canfd_read(priv->base, RCANFD_C_RFID(ridx)); dlc = rcar_canfd_read(priv->base, RCANFD_C_RFPTR(ridx)); skb = alloc_can_skb(priv->ndev, (struct can_frame **)&cf); } if (!skb) { stats->rx_dropped++; return; } if (id & RCANFD_RFID_RFIDE) cf->can_id = (id & CAN_EFF_MASK) | CAN_EFF_FLAG; else cf->can_id = id & CAN_SFF_MASK; if (priv->can.ctrlmode & CAN_CTRLMODE_FD) { if (sts & RCANFD_RFFDSTS_RFFDF) cf->len = can_fd_dlc2len(RCANFD_RFPTR_RFDLC(dlc)); else cf->len = can_cc_dlc2len(RCANFD_RFPTR_RFDLC(dlc)); if (sts & RCANFD_RFFDSTS_RFESI) { cf->flags |= CANFD_ESI; netdev_dbg(priv->ndev, "ESI Error\n"); } if (!(sts & RCANFD_RFFDSTS_RFFDF) && (id & RCANFD_RFID_RFRTR)) { cf->can_id |= CAN_RTR_FLAG; } else { if (sts & RCANFD_RFFDSTS_RFBRS) cf->flags |= CANFD_BRS; rcar_canfd_get_data(priv, cf, RCANFD_F_RFDF(gpriv, ridx, 0)); } } else { cf->len = can_cc_dlc2len(RCANFD_RFPTR_RFDLC(dlc)); if (id & RCANFD_RFID_RFRTR) cf->can_id |= CAN_RTR_FLAG; else if (is_gen4(gpriv)) rcar_canfd_get_data(priv, cf, RCANFD_F_RFDF(gpriv, ridx, 0)); else rcar_canfd_get_data(priv, cf, RCANFD_C_RFDF(ridx, 0)); } /* Write 0xff to RFPC to increment the CPU-side * pointer of the Rx FIFO */ rcar_canfd_write(priv->base, RCANFD_RFPCTR(gpriv, ridx), 0xff); if (!(cf->can_id & CAN_RTR_FLAG)) stats->rx_bytes += cf->len; stats->rx_packets++; netif_receive_skb(skb); } static int rcar_canfd_rx_poll(struct napi_struct *napi, int quota) { struct rcar_canfd_channel *priv = container_of(napi, struct rcar_canfd_channel, napi); struct rcar_canfd_global *gpriv = priv->gpriv; int num_pkts; u32 sts; u32 ch = priv->channel; u32 ridx = ch + RCANFD_RFFIFO_IDX; for (num_pkts = 0; num_pkts < quota; num_pkts++) { sts = rcar_canfd_read(priv->base, RCANFD_RFSTS(gpriv, ridx)); /* Check FIFO empty condition */ if (sts & RCANFD_RFSTS_RFEMP) break; rcar_canfd_rx_pkt(priv); /* Clear interrupt bit */ if (sts & RCANFD_RFSTS_RFIF) rcar_canfd_write(priv->base, RCANFD_RFSTS(gpriv, ridx), sts & ~RCANFD_RFSTS_RFIF); } /* All packets processed */ if (num_pkts < quota) { if (napi_complete_done(napi, num_pkts)) { /* Enable Rx FIFO interrupts */ rcar_canfd_set_bit(priv->base, RCANFD_RFCC(gpriv, ridx), RCANFD_RFCC_RFIE); } } return num_pkts; } static int rcar_canfd_do_set_mode(struct net_device *ndev, enum can_mode mode) { int err; switch (mode) { case CAN_MODE_START: err = rcar_canfd_start(ndev); if (err) return err; netif_wake_queue(ndev); return 0; default: return -EOPNOTSUPP; } } static int rcar_canfd_get_berr_counter(const struct net_device *dev, struct can_berr_counter *bec) { struct rcar_canfd_channel *priv = netdev_priv(dev); u32 val, ch = priv->channel; /* Peripheral clock is already enabled in probe */ val = rcar_canfd_read(priv->base, RCANFD_CSTS(ch)); bec->txerr = RCANFD_CSTS_TECCNT(val); bec->rxerr = RCANFD_CSTS_RECCNT(val); return 0; } static const struct net_device_ops rcar_canfd_netdev_ops = { .ndo_open = rcar_canfd_open, .ndo_stop = rcar_canfd_close, .ndo_start_xmit = rcar_canfd_start_xmit, .ndo_change_mtu = can_change_mtu, }; static const struct ethtool_ops rcar_canfd_ethtool_ops = { .get_ts_info = ethtool_op_get_ts_info, }; static int rcar_canfd_channel_probe(struct rcar_canfd_global *gpriv, u32 ch, u32 fcan_freq, struct phy *transceiver) { const struct rcar_canfd_hw_info *info = gpriv->info; struct platform_device *pdev = gpriv->pdev; struct device *dev = &pdev->dev; struct rcar_canfd_channel *priv; struct net_device *ndev; int err = -ENODEV; ndev = alloc_candev(sizeof(*priv), RCANFD_FIFO_DEPTH); if (!ndev) return -ENOMEM; priv = netdev_priv(ndev); ndev->netdev_ops = &rcar_canfd_netdev_ops; ndev->ethtool_ops = &rcar_canfd_ethtool_ops; ndev->flags |= IFF_ECHO; priv->ndev = ndev; priv->base = gpriv->base; priv->transceiver = transceiver; priv->channel = ch; priv->gpriv = gpriv; if (transceiver) priv->can.bitrate_max = transceiver->attrs.max_link_rate; priv->can.clock.freq = fcan_freq; dev_info(dev, "can_clk rate is %u\n", priv->can.clock.freq); if (info->multi_channel_irqs) { char *irq_name; int err_irq; int tx_irq; err_irq = platform_get_irq_byname(pdev, ch == 0 ? "ch0_err" : "ch1_err"); if (err_irq < 0) { err = err_irq; goto fail; } tx_irq = platform_get_irq_byname(pdev, ch == 0 ? "ch0_trx" : "ch1_trx"); if (tx_irq < 0) { err = tx_irq; goto fail; } irq_name = devm_kasprintf(dev, GFP_KERNEL, "canfd.ch%d_err", ch); if (!irq_name) { err = -ENOMEM; goto fail; } err = devm_request_irq(dev, err_irq, rcar_canfd_channel_err_interrupt, 0, irq_name, priv); if (err) { dev_err(dev, "devm_request_irq CH Err %d failed: %pe\n", err_irq, ERR_PTR(err)); goto fail; } irq_name = devm_kasprintf(dev, GFP_KERNEL, "canfd.ch%d_trx", ch); if (!irq_name) { err = -ENOMEM; goto fail; } err = devm_request_irq(dev, tx_irq, rcar_canfd_channel_tx_interrupt, 0, irq_name, priv); if (err) { dev_err(dev, "devm_request_irq Tx %d failed: %pe\n", tx_irq, ERR_PTR(err)); goto fail; } } if (gpriv->fdmode) { priv->can.bittiming_const = &rcar_canfd_nom_bittiming_const; priv->can.data_bittiming_const = &rcar_canfd_data_bittiming_const; /* Controller starts in CAN FD only mode */ err = can_set_static_ctrlmode(ndev, CAN_CTRLMODE_FD); if (err) goto fail; priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING; } else { /* Controller starts in Classical CAN only mode */ priv->can.bittiming_const = &rcar_canfd_bittiming_const; priv->can.ctrlmode_supported = CAN_CTRLMODE_BERR_REPORTING; } priv->can.do_set_mode = rcar_canfd_do_set_mode; priv->can.do_get_berr_counter = rcar_canfd_get_berr_counter; SET_NETDEV_DEV(ndev, dev); netif_napi_add_weight(ndev, &priv->napi, rcar_canfd_rx_poll, RCANFD_NAPI_WEIGHT); spin_lock_init(&priv->tx_lock); gpriv->ch[priv->channel] = priv; err = register_candev(ndev); if (err) { dev_err(dev, "register_candev() failed: %pe\n", ERR_PTR(err)); goto fail_candev; } dev_info(dev, "device registered (channel %u)\n", priv->channel); return 0; fail_candev: netif_napi_del(&priv->napi); fail: free_candev(ndev); return err; } static void rcar_canfd_channel_remove(struct rcar_canfd_global *gpriv, u32 ch) { struct rcar_canfd_channel *priv = gpriv->ch[ch]; if (priv) { unregister_candev(priv->ndev); netif_napi_del(&priv->napi); free_candev(priv->ndev); } } static int rcar_canfd_probe(struct platform_device *pdev) { struct phy *transceivers[RCANFD_NUM_CHANNELS] = { NULL, }; const struct rcar_canfd_hw_info *info; struct device *dev = &pdev->dev; void __iomem *addr; u32 sts, ch, fcan_freq; struct rcar_canfd_global *gpriv; struct device_node *of_child; unsigned long channels_mask = 0; int err, ch_irq, g_irq; int g_err_irq, g_recc_irq; bool fdmode = true; /* CAN FD only mode - default */ char name[9] = "channelX"; int i; info = of_device_get_match_data(dev); if (of_property_read_bool(dev->of_node, "renesas,no-can-fd")) fdmode = false; /* Classical CAN only mode */ for (i = 0; i < info->max_channels; ++i) { name[7] = '0' + i; of_child = of_get_child_by_name(dev->of_node, name); if (of_child && of_device_is_available(of_child)) { channels_mask |= BIT(i); transceivers[i] = devm_of_phy_optional_get(dev, of_child, NULL); } of_node_put(of_child); if (IS_ERR(transceivers[i])) return PTR_ERR(transceivers[i]); } if (info->shared_global_irqs) { ch_irq = platform_get_irq_byname_optional(pdev, "ch_int"); if (ch_irq < 0) { /* For backward compatibility get irq by index */ ch_irq = platform_get_irq(pdev, 0); if (ch_irq < 0) return ch_irq; } g_irq = platform_get_irq_byname_optional(pdev, "g_int"); if (g_irq < 0) { /* For backward compatibility get irq by index */ g_irq = platform_get_irq(pdev, 1); if (g_irq < 0) return g_irq; } } else { g_err_irq = platform_get_irq_byname(pdev, "g_err"); if (g_err_irq < 0) return g_err_irq; g_recc_irq = platform_get_irq_byname(pdev, "g_recc"); if (g_recc_irq < 0) return g_recc_irq; } /* Global controller context */ gpriv = devm_kzalloc(dev, sizeof(*gpriv), GFP_KERNEL); if (!gpriv) return -ENOMEM; gpriv->pdev = pdev; gpriv->channels_mask = channels_mask; gpriv->fdmode = fdmode; gpriv->info = info; gpriv->rstc1 = devm_reset_control_get_optional_exclusive(dev, "rstp_n"); if (IS_ERR(gpriv->rstc1)) return dev_err_probe(dev, PTR_ERR(gpriv->rstc1), "failed to get rstp_n\n"); gpriv->rstc2 = devm_reset_control_get_optional_exclusive(dev, "rstc_n"); if (IS_ERR(gpriv->rstc2)) return dev_err_probe(dev, PTR_ERR(gpriv->rstc2), "failed to get rstc_n\n"); /* Peripheral clock */ gpriv->clkp = devm_clk_get(dev, "fck"); if (IS_ERR(gpriv->clkp)) return dev_err_probe(dev, PTR_ERR(gpriv->clkp), "cannot get peripheral clock\n"); /* fCAN clock: Pick External clock. If not available fallback to * CANFD clock */ gpriv->can_clk = devm_clk_get(dev, "can_clk"); if (IS_ERR(gpriv->can_clk) || (clk_get_rate(gpriv->can_clk) == 0)) { gpriv->can_clk = devm_clk_get(dev, "canfd"); if (IS_ERR(gpriv->can_clk)) return dev_err_probe(dev, PTR_ERR(gpriv->can_clk), "cannot get canfd clock\n"); gpriv->fcan = RCANFD_CANFDCLK; } else { gpriv->fcan = RCANFD_EXTCLK; } fcan_freq = clk_get_rate(gpriv->can_clk); if (gpriv->fcan == RCANFD_CANFDCLK) /* CANFD clock is further divided by (1/2) within the IP */ fcan_freq /= info->postdiv; addr = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(addr)) { err = PTR_ERR(addr); goto fail_dev; } gpriv->base = addr; /* Request IRQ that's common for both channels */ if (info->shared_global_irqs) { err = devm_request_irq(dev, ch_irq, rcar_canfd_channel_interrupt, 0, "canfd.ch_int", gpriv); if (err) { dev_err(dev, "devm_request_irq %d failed: %pe\n", ch_irq, ERR_PTR(err)); goto fail_dev; } err = devm_request_irq(dev, g_irq, rcar_canfd_global_interrupt, 0, "canfd.g_int", gpriv); if (err) { dev_err(dev, "devm_request_irq %d failed: %pe\n", g_irq, ERR_PTR(err)); goto fail_dev; } } else { err = devm_request_irq(dev, g_recc_irq, rcar_canfd_global_receive_fifo_interrupt, 0, "canfd.g_recc", gpriv); if (err) { dev_err(dev, "devm_request_irq %d failed: %pe\n", g_recc_irq, ERR_PTR(err)); goto fail_dev; } err = devm_request_irq(dev, g_err_irq, rcar_canfd_global_err_interrupt, 0, "canfd.g_err", gpriv); if (err) { dev_err(dev, "devm_request_irq %d failed: %pe\n", g_err_irq, ERR_PTR(err)); goto fail_dev; } } err = reset_control_reset(gpriv->rstc1); if (err) goto fail_dev; err = reset_control_reset(gpriv->rstc2); if (err) { reset_control_assert(gpriv->rstc1); goto fail_dev; } /* Enable peripheral clock for register access */ err = clk_prepare_enable(gpriv->clkp); if (err) { dev_err(dev, "failed to enable peripheral clock: %pe\n", ERR_PTR(err)); goto fail_reset; } err = rcar_canfd_reset_controller(gpriv); if (err) { dev_err(dev, "reset controller failed: %pe\n", ERR_PTR(err)); goto fail_clk; } /* Controller in Global reset & Channel reset mode */ rcar_canfd_configure_controller(gpriv); /* Configure per channel attributes */ for_each_set_bit(ch, &gpriv->channels_mask, info->max_channels) { /* Configure Channel's Rx fifo */ rcar_canfd_configure_rx(gpriv, ch); /* Configure Channel's Tx (Common) fifo */ rcar_canfd_configure_tx(gpriv, ch); /* Configure receive rules */ rcar_canfd_configure_afl_rules(gpriv, ch); } /* Configure common interrupts */ rcar_canfd_enable_global_interrupts(gpriv); /* Start Global operation mode */ rcar_canfd_update_bit(gpriv->base, RCANFD_GCTR, RCANFD_GCTR_GMDC_MASK, RCANFD_GCTR_GMDC_GOPM); /* Verify mode change */ err = readl_poll_timeout((gpriv->base + RCANFD_GSTS), sts, !(sts & RCANFD_GSTS_GNOPM), 2, 500000); if (err) { dev_err(dev, "global operational mode failed\n"); goto fail_mode; } for_each_set_bit(ch, &gpriv->channels_mask, info->max_channels) { err = rcar_canfd_channel_probe(gpriv, ch, fcan_freq, transceivers[ch]); if (err) goto fail_channel; } platform_set_drvdata(pdev, gpriv); dev_info(dev, "global operational state (clk %d, fdmode %d)\n", gpriv->fcan, gpriv->fdmode); return 0; fail_channel: for_each_set_bit(ch, &gpriv->channels_mask, info->max_channels) rcar_canfd_channel_remove(gpriv, ch); fail_mode: rcar_canfd_disable_global_interrupts(gpriv); fail_clk: clk_disable_unprepare(gpriv->clkp); fail_reset: reset_control_assert(gpriv->rstc1); reset_control_assert(gpriv->rstc2); fail_dev: return err; } static void rcar_canfd_remove(struct platform_device *pdev) { struct rcar_canfd_global *gpriv = platform_get_drvdata(pdev); u32 ch; rcar_canfd_reset_controller(gpriv); rcar_canfd_disable_global_interrupts(gpriv); for_each_set_bit(ch, &gpriv->channels_mask, gpriv->info->max_channels) { rcar_canfd_disable_channel_interrupts(gpriv->ch[ch]); rcar_canfd_channel_remove(gpriv, ch); } /* Enter global sleep mode */ rcar_canfd_set_bit(gpriv->base, RCANFD_GCTR, RCANFD_GCTR_GSLPR); clk_disable_unprepare(gpriv->clkp); reset_control_assert(gpriv->rstc1); reset_control_assert(gpriv->rstc2); } static int __maybe_unused rcar_canfd_suspend(struct device *dev) { return 0; } static int __maybe_unused rcar_canfd_resume(struct device *dev) { return 0; } static SIMPLE_DEV_PM_OPS(rcar_canfd_pm_ops, rcar_canfd_suspend, rcar_canfd_resume); static const __maybe_unused struct of_device_id rcar_canfd_of_table[] = { { .compatible = "renesas,r8a779a0-canfd", .data = &rcar_gen4_hw_info }, { .compatible = "renesas,rcar-gen3-canfd", .data = &rcar_gen3_hw_info }, { .compatible = "renesas,rcar-gen4-canfd", .data = &rcar_gen4_hw_info }, { .compatible = "renesas,rzg2l-canfd", .data = &rzg2l_hw_info }, { } }; MODULE_DEVICE_TABLE(of, rcar_canfd_of_table); static struct platform_driver rcar_canfd_driver = { .driver = { .name = RCANFD_DRV_NAME, .of_match_table = of_match_ptr(rcar_canfd_of_table), .pm = &rcar_canfd_pm_ops, }, .probe = rcar_canfd_probe, .remove_new = rcar_canfd_remove, }; module_platform_driver(rcar_canfd_driver); MODULE_AUTHOR("Ramesh Shanmugasundaram <[email protected]>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("CAN FD driver for Renesas R-Car SoC"); MODULE_ALIAS("platform:" RCANFD_DRV_NAME);
linux-master
drivers/net/can/rcar/rcar_canfd.c
/* * Platform CAN bus driver for Bosch C_CAN controller * * Copyright (C) 2010 ST Microelectronics * Bhupesh Sharma <[email protected]> * * Borrowed heavily from the C_CAN driver originally written by: * Copyright (C) 2007 * - Sascha Hauer, Marc Kleine-Budde, Pengutronix <[email protected]> * - Simon Kallweit, intefo AG <[email protected]> * * Bosch C_CAN controller is compliant to CAN protocol version 2.0 part A and B. * Bosch C_CAN user manual can be obtained from: * http://www.semiconductors.bosch.de/media/en/pdf/ipmodules_1/c_can/ * users_manual_c_can.pdf * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/if_ether.h> #include <linux/list.h> #include <linux/io.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/clk.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/mfd/syscon.h> #include <linux/regmap.h> #include <linux/can/dev.h> #include "c_can.h" #define DCAN_RAM_INIT_BIT BIT(3) static DEFINE_SPINLOCK(raminit_lock); /* 16-bit c_can registers can be arranged differently in the memory * architecture of different implementations. For example: 16-bit * registers can be aligned to a 16-bit boundary or 32-bit boundary etc. * Handle the same by providing a common read/write interface. */ static u16 c_can_plat_read_reg_aligned_to_16bit(const struct c_can_priv *priv, enum reg index) { return readw(priv->base + priv->regs[index]); } static void c_can_plat_write_reg_aligned_to_16bit(const struct c_can_priv *priv, enum reg index, u16 val) { writew(val, priv->base + priv->regs[index]); } static u16 c_can_plat_read_reg_aligned_to_32bit(const struct c_can_priv *priv, enum reg index) { return readw(priv->base + 2 * priv->regs[index]); } static void c_can_plat_write_reg_aligned_to_32bit(const struct c_can_priv *priv, enum reg index, u16 val) { writew(val, priv->base + 2 * priv->regs[index]); } static void c_can_hw_raminit_wait_syscon(const struct c_can_priv *priv, u32 mask, u32 val) { const struct c_can_raminit *raminit = &priv->raminit_sys; int timeout = 0; u32 ctrl = 0; /* We look only at the bits of our instance. */ val &= mask; do { udelay(1); timeout++; regmap_read(raminit->syscon, raminit->reg, &ctrl); if (timeout == 1000) { dev_err(&priv->dev->dev, "%s: time out\n", __func__); break; } } while ((ctrl & mask) != val); } static void c_can_hw_raminit_syscon(const struct c_can_priv *priv, bool enable) { const struct c_can_raminit *raminit = &priv->raminit_sys; u32 ctrl = 0; u32 mask; spin_lock(&raminit_lock); mask = 1 << raminit->bits.start | 1 << raminit->bits.done; regmap_read(raminit->syscon, raminit->reg, &ctrl); /* We clear the start bit first. The start bit is * looking at the 0 -> transition, but is not self clearing; * NOTE: DONE must be written with 1 to clear it. * We can't clear the DONE bit here using regmap_update_bits() * as it will bypass the write if initial condition is START:0 DONE:1 * e.g. on DRA7 which needs START pulse. */ ctrl &= ~mask; /* START = 0, DONE = 0 */ regmap_update_bits(raminit->syscon, raminit->reg, mask, ctrl); /* check if START bit is 0. Ignore DONE bit for now * as it can be either 0 or 1. */ c_can_hw_raminit_wait_syscon(priv, 1 << raminit->bits.start, ctrl); if (enable) { /* Clear DONE bit & set START bit. */ ctrl |= 1 << raminit->bits.start; /* DONE must be written with 1 to clear it */ ctrl |= 1 << raminit->bits.done; regmap_update_bits(raminit->syscon, raminit->reg, mask, ctrl); /* prevent further clearing of DONE bit */ ctrl &= ~(1 << raminit->bits.done); /* clear START bit if start pulse is needed */ if (raminit->needs_pulse) { ctrl &= ~(1 << raminit->bits.start); regmap_update_bits(raminit->syscon, raminit->reg, mask, ctrl); } ctrl |= 1 << raminit->bits.done; c_can_hw_raminit_wait_syscon(priv, mask, ctrl); } spin_unlock(&raminit_lock); } static u32 c_can_plat_read_reg32(const struct c_can_priv *priv, enum reg index) { u32 val; val = priv->read_reg(priv, index); val |= ((u32)priv->read_reg(priv, index + 1)) << 16; return val; } static void c_can_plat_write_reg32(const struct c_can_priv *priv, enum reg index, u32 val) { priv->write_reg(priv, index + 1, val >> 16); priv->write_reg(priv, index, val); } static u32 d_can_plat_read_reg32(const struct c_can_priv *priv, enum reg index) { return readl(priv->base + priv->regs[index]); } static void d_can_plat_write_reg32(const struct c_can_priv *priv, enum reg index, u32 val) { writel(val, priv->base + priv->regs[index]); } static void c_can_hw_raminit_wait(const struct c_can_priv *priv, u32 mask) { while (priv->read_reg32(priv, C_CAN_FUNCTION_REG) & mask) udelay(1); } static void c_can_hw_raminit(const struct c_can_priv *priv, bool enable) { u32 ctrl; ctrl = priv->read_reg32(priv, C_CAN_FUNCTION_REG); ctrl &= ~DCAN_RAM_INIT_BIT; priv->write_reg32(priv, C_CAN_FUNCTION_REG, ctrl); c_can_hw_raminit_wait(priv, ctrl); if (enable) { ctrl |= DCAN_RAM_INIT_BIT; priv->write_reg32(priv, C_CAN_FUNCTION_REG, ctrl); c_can_hw_raminit_wait(priv, ctrl); } } static const struct c_can_driver_data c_can_drvdata = { .id = BOSCH_C_CAN, .msg_obj_num = 32, }; static const struct c_can_driver_data d_can_drvdata = { .id = BOSCH_D_CAN, .msg_obj_num = 32, }; static const struct raminit_bits dra7_raminit_bits[] = { [0] = { .start = 3, .done = 1, }, [1] = { .start = 5, .done = 2, }, }; static const struct c_can_driver_data dra7_dcan_drvdata = { .id = BOSCH_D_CAN, .msg_obj_num = 64, .raminit_num = ARRAY_SIZE(dra7_raminit_bits), .raminit_bits = dra7_raminit_bits, .raminit_pulse = true, }; static const struct raminit_bits am3352_raminit_bits[] = { [0] = { .start = 0, .done = 8, }, [1] = { .start = 1, .done = 9, }, }; static const struct c_can_driver_data am3352_dcan_drvdata = { .id = BOSCH_D_CAN, .msg_obj_num = 64, .raminit_num = ARRAY_SIZE(am3352_raminit_bits), .raminit_bits = am3352_raminit_bits, }; static const struct platform_device_id c_can_id_table[] = { { .name = KBUILD_MODNAME, .driver_data = (kernel_ulong_t)&c_can_drvdata, }, { .name = "c_can", .driver_data = (kernel_ulong_t)&c_can_drvdata, }, { .name = "d_can", .driver_data = (kernel_ulong_t)&d_can_drvdata, }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(platform, c_can_id_table); static const struct of_device_id c_can_of_table[] = { { .compatible = "bosch,c_can", .data = &c_can_drvdata }, { .compatible = "bosch,d_can", .data = &d_can_drvdata }, { .compatible = "ti,dra7-d_can", .data = &dra7_dcan_drvdata }, { .compatible = "ti,am3352-d_can", .data = &am3352_dcan_drvdata }, { .compatible = "ti,am4372-d_can", .data = &am3352_dcan_drvdata }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, c_can_of_table); static int c_can_plat_probe(struct platform_device *pdev) { int ret; void __iomem *addr; struct net_device *dev; struct c_can_priv *priv; const struct of_device_id *match; struct resource *mem; int irq; struct clk *clk; const struct c_can_driver_data *drvdata; struct device_node *np = pdev->dev.of_node; match = of_match_device(c_can_of_table, &pdev->dev); if (match) { drvdata = match->data; } else if (pdev->id_entry->driver_data) { drvdata = (struct c_can_driver_data *) platform_get_device_id(pdev)->driver_data; } else { return -ENODEV; } /* get the appropriate clk */ clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(clk)) { ret = PTR_ERR(clk); goto exit; } /* get the platform data */ irq = platform_get_irq(pdev, 0); if (irq < 0) { ret = irq; goto exit; } addr = devm_platform_get_and_ioremap_resource(pdev, 0, &mem); if (IS_ERR(addr)) { ret = PTR_ERR(addr); goto exit; } /* allocate the c_can device */ dev = alloc_c_can_dev(drvdata->msg_obj_num); if (!dev) { ret = -ENOMEM; goto exit; } priv = netdev_priv(dev); switch (drvdata->id) { case BOSCH_C_CAN: priv->regs = reg_map_c_can; switch (mem->flags & IORESOURCE_MEM_TYPE_MASK) { case IORESOURCE_MEM_32BIT: priv->read_reg = c_can_plat_read_reg_aligned_to_32bit; priv->write_reg = c_can_plat_write_reg_aligned_to_32bit; priv->read_reg32 = c_can_plat_read_reg32; priv->write_reg32 = c_can_plat_write_reg32; break; case IORESOURCE_MEM_16BIT: default: priv->read_reg = c_can_plat_read_reg_aligned_to_16bit; priv->write_reg = c_can_plat_write_reg_aligned_to_16bit; priv->read_reg32 = c_can_plat_read_reg32; priv->write_reg32 = c_can_plat_write_reg32; break; } break; case BOSCH_D_CAN: priv->regs = reg_map_d_can; priv->read_reg = c_can_plat_read_reg_aligned_to_16bit; priv->write_reg = c_can_plat_write_reg_aligned_to_16bit; priv->read_reg32 = d_can_plat_read_reg32; priv->write_reg32 = d_can_plat_write_reg32; /* Check if we need custom RAMINIT via syscon. Mostly for TI * platforms. Only supported with DT boot. */ if (np && of_property_read_bool(np, "syscon-raminit")) { u32 id; struct c_can_raminit *raminit = &priv->raminit_sys; ret = -EINVAL; raminit->syscon = syscon_regmap_lookup_by_phandle(np, "syscon-raminit"); if (IS_ERR(raminit->syscon)) { /* can fail with -EPROBE_DEFER */ ret = PTR_ERR(raminit->syscon); free_c_can_dev(dev); return ret; } if (of_property_read_u32_index(np, "syscon-raminit", 1, &raminit->reg)) { dev_err(&pdev->dev, "couldn't get the RAMINIT reg. offset!\n"); goto exit_free_device; } if (of_property_read_u32_index(np, "syscon-raminit", 2, &id)) { dev_err(&pdev->dev, "couldn't get the CAN instance ID\n"); goto exit_free_device; } if (id >= drvdata->raminit_num) { dev_err(&pdev->dev, "Invalid CAN instance ID\n"); goto exit_free_device; } raminit->bits = drvdata->raminit_bits[id]; raminit->needs_pulse = drvdata->raminit_pulse; priv->raminit = c_can_hw_raminit_syscon; } else { priv->raminit = c_can_hw_raminit; } break; default: ret = -EINVAL; goto exit_free_device; } dev->irq = irq; priv->base = addr; priv->device = &pdev->dev; priv->can.clock.freq = clk_get_rate(clk); priv->type = drvdata->id; platform_set_drvdata(pdev, dev); SET_NETDEV_DEV(dev, &pdev->dev); pm_runtime_enable(priv->device); ret = register_c_can_dev(dev); if (ret) { dev_err(&pdev->dev, "registering %s failed (err=%d)\n", KBUILD_MODNAME, ret); goto exit_free_device; } dev_info(&pdev->dev, "%s device registered (regs=%p, irq=%d)\n", KBUILD_MODNAME, priv->base, dev->irq); return 0; exit_free_device: pm_runtime_disable(priv->device); free_c_can_dev(dev); exit: dev_err(&pdev->dev, "probe failed\n"); return ret; } static void c_can_plat_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct c_can_priv *priv = netdev_priv(dev); unregister_c_can_dev(dev); pm_runtime_disable(priv->device); free_c_can_dev(dev); } #ifdef CONFIG_PM static int c_can_suspend(struct platform_device *pdev, pm_message_t state) { int ret; struct net_device *ndev = platform_get_drvdata(pdev); struct c_can_priv *priv = netdev_priv(ndev); if (priv->type != BOSCH_D_CAN) { dev_warn(&pdev->dev, "Not supported\n"); return 0; } if (netif_running(ndev)) { netif_stop_queue(ndev); netif_device_detach(ndev); } ret = c_can_power_down(ndev); if (ret) { netdev_err(ndev, "failed to enter power down mode\n"); return ret; } priv->can.state = CAN_STATE_SLEEPING; return 0; } static int c_can_resume(struct platform_device *pdev) { int ret; struct net_device *ndev = platform_get_drvdata(pdev); struct c_can_priv *priv = netdev_priv(ndev); if (priv->type != BOSCH_D_CAN) { dev_warn(&pdev->dev, "Not supported\n"); return 0; } ret = c_can_power_up(ndev); if (ret) { netdev_err(ndev, "Still in power down mode\n"); return ret; } priv->can.state = CAN_STATE_ERROR_ACTIVE; if (netif_running(ndev)) { netif_device_attach(ndev); netif_start_queue(ndev); } return 0; } #else #define c_can_suspend NULL #define c_can_resume NULL #endif static struct platform_driver c_can_plat_driver = { .driver = { .name = KBUILD_MODNAME, .of_match_table = c_can_of_table, }, .probe = c_can_plat_probe, .remove_new = c_can_plat_remove, .suspend = c_can_suspend, .resume = c_can_resume, .id_table = c_can_id_table, }; module_platform_driver(c_can_plat_driver); MODULE_AUTHOR("Bhupesh Sharma <[email protected]>"); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("Platform CAN bus driver for Bosch C_CAN controller");
linux-master
drivers/net/can/c_can/c_can_platform.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright 2021, Dario Binacchi <[email protected]> */ #include <linux/ethtool.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/netdevice.h> #include <linux/can/dev.h> #include "c_can.h" static void c_can_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kernel_ring, struct netlink_ext_ack *extack) { struct c_can_priv *priv = netdev_priv(netdev); ring->rx_max_pending = priv->msg_obj_num; ring->tx_max_pending = priv->msg_obj_num; ring->rx_pending = priv->msg_obj_rx_num; ring->tx_pending = priv->msg_obj_tx_num; } const struct ethtool_ops c_can_ethtool_ops = { .get_ringparam = c_can_get_ringparam, .get_ts_info = ethtool_op_get_ts_info, };
linux-master
drivers/net/can/c_can/c_can_ethtool.c
/* * CAN bus driver for Bosch C_CAN controller * * Copyright (C) 2010 ST Microelectronics * Bhupesh Sharma <[email protected]> * * Borrowed heavily from the C_CAN driver originally written by: * Copyright (C) 2007 * - Sascha Hauer, Marc Kleine-Budde, Pengutronix <[email protected]> * - Simon Kallweit, intefo AG <[email protected]> * * TX and RX NAPI implementation has been borrowed from at91 CAN driver * written by: * Copyright * (C) 2007 by Hans J. Koch <[email protected]> * (C) 2008, 2009 by Marc Kleine-Budde <[email protected]> * * Bosch C_CAN controller is compliant to CAN protocol version 2.0 part A and B. * Bosch C_CAN user manual can be obtained from: * http://www.semiconductors.bosch.de/media/en/pdf/ipmodules_1/c_can/ * users_manual_c_can.pdf * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/if_ether.h> #include <linux/list.h> #include <linux/io.h> #include <linux/pm_runtime.h> #include <linux/pinctrl/consumer.h> #include <linux/can.h> #include <linux/can/dev.h> #include <linux/can/error.h> #include "c_can.h" /* Number of interface registers */ #define IF_ENUM_REG_LEN 11 #define C_CAN_IFACE(reg, iface) (C_CAN_IF1_##reg + (iface) * IF_ENUM_REG_LEN) /* control extension register D_CAN specific */ #define CONTROL_EX_PDR BIT(8) /* control register */ #define CONTROL_SWR BIT(15) #define CONTROL_TEST BIT(7) #define CONTROL_CCE BIT(6) #define CONTROL_DISABLE_AR BIT(5) #define CONTROL_ENABLE_AR (0 << 5) #define CONTROL_EIE BIT(3) #define CONTROL_SIE BIT(2) #define CONTROL_IE BIT(1) #define CONTROL_INIT BIT(0) #define CONTROL_IRQMSK (CONTROL_EIE | CONTROL_IE | CONTROL_SIE) /* test register */ #define TEST_RX BIT(7) #define TEST_TX1 BIT(6) #define TEST_TX2 BIT(5) #define TEST_LBACK BIT(4) #define TEST_SILENT BIT(3) #define TEST_BASIC BIT(2) /* status register */ #define STATUS_PDA BIT(10) #define STATUS_BOFF BIT(7) #define STATUS_EWARN BIT(6) #define STATUS_EPASS BIT(5) #define STATUS_RXOK BIT(4) #define STATUS_TXOK BIT(3) /* error counter register */ #define ERR_CNT_TEC_MASK 0xff #define ERR_CNT_TEC_SHIFT 0 #define ERR_CNT_REC_SHIFT 8 #define ERR_CNT_REC_MASK (0x7f << ERR_CNT_REC_SHIFT) #define ERR_CNT_RP_SHIFT 15 #define ERR_CNT_RP_MASK (0x1 << ERR_CNT_RP_SHIFT) /* bit-timing register */ #define BTR_BRP_MASK 0x3f #define BTR_BRP_SHIFT 0 #define BTR_SJW_SHIFT 6 #define BTR_SJW_MASK (0x3 << BTR_SJW_SHIFT) #define BTR_TSEG1_SHIFT 8 #define BTR_TSEG1_MASK (0xf << BTR_TSEG1_SHIFT) #define BTR_TSEG2_SHIFT 12 #define BTR_TSEG2_MASK (0x7 << BTR_TSEG2_SHIFT) /* interrupt register */ #define INT_STS_PENDING 0x8000 /* brp extension register */ #define BRP_EXT_BRPE_MASK 0x0f #define BRP_EXT_BRPE_SHIFT 0 /* IFx command request */ #define IF_COMR_BUSY BIT(15) /* IFx command mask */ #define IF_COMM_WR BIT(7) #define IF_COMM_MASK BIT(6) #define IF_COMM_ARB BIT(5) #define IF_COMM_CONTROL BIT(4) #define IF_COMM_CLR_INT_PND BIT(3) #define IF_COMM_TXRQST BIT(2) #define IF_COMM_CLR_NEWDAT IF_COMM_TXRQST #define IF_COMM_DATAA BIT(1) #define IF_COMM_DATAB BIT(0) /* TX buffer setup */ #define IF_COMM_TX (IF_COMM_ARB | IF_COMM_CONTROL | \ IF_COMM_TXRQST | \ IF_COMM_DATAA | IF_COMM_DATAB) /* For the low buffers we clear the interrupt bit, but keep newdat */ #define IF_COMM_RCV_LOW (IF_COMM_MASK | IF_COMM_ARB | \ IF_COMM_CONTROL | IF_COMM_CLR_INT_PND | \ IF_COMM_DATAA | IF_COMM_DATAB) /* For the high buffers we clear the interrupt bit and newdat */ #define IF_COMM_RCV_HIGH (IF_COMM_RCV_LOW | IF_COMM_CLR_NEWDAT) /* Receive setup of message objects */ #define IF_COMM_RCV_SETUP (IF_COMM_MASK | IF_COMM_ARB | IF_COMM_CONTROL) /* Invalidation of message objects */ #define IF_COMM_INVAL (IF_COMM_ARB | IF_COMM_CONTROL) /* IFx arbitration */ #define IF_ARB_MSGVAL BIT(31) #define IF_ARB_MSGXTD BIT(30) #define IF_ARB_TRANSMIT BIT(29) /* IFx message control */ #define IF_MCONT_NEWDAT BIT(15) #define IF_MCONT_MSGLST BIT(14) #define IF_MCONT_INTPND BIT(13) #define IF_MCONT_UMASK BIT(12) #define IF_MCONT_TXIE BIT(11) #define IF_MCONT_RXIE BIT(10) #define IF_MCONT_RMTEN BIT(9) #define IF_MCONT_TXRQST BIT(8) #define IF_MCONT_EOB BIT(7) #define IF_MCONT_DLC_MASK 0xf #define IF_MCONT_RCV (IF_MCONT_RXIE | IF_MCONT_UMASK) #define IF_MCONT_RCV_EOB (IF_MCONT_RCV | IF_MCONT_EOB) #define IF_MCONT_TX (IF_MCONT_TXIE | IF_MCONT_EOB) /* Use IF1 in NAPI path and IF2 in TX path */ #define IF_NAPI 0 #define IF_TX 1 /* minimum timeout for checking BUSY status */ #define MIN_TIMEOUT_VALUE 6 /* Wait for ~1 sec for INIT bit */ #define INIT_WAIT_MS 1000 /* c_can lec values */ enum c_can_lec_type { LEC_NO_ERROR = 0, LEC_STUFF_ERROR, LEC_FORM_ERROR, LEC_ACK_ERROR, LEC_BIT1_ERROR, LEC_BIT0_ERROR, LEC_CRC_ERROR, LEC_UNUSED, LEC_MASK = LEC_UNUSED, }; /* c_can error types: * Bus errors (BUS_OFF, ERROR_WARNING, ERROR_PASSIVE) are supported */ enum c_can_bus_error_types { C_CAN_NO_ERROR = 0, C_CAN_BUS_OFF, C_CAN_ERROR_WARNING, C_CAN_ERROR_PASSIVE, }; static const struct can_bittiming_const c_can_bittiming_const = { .name = KBUILD_MODNAME, .tseg1_min = 2, /* Time segment 1 = prop_seg + phase_seg1 */ .tseg1_max = 16, .tseg2_min = 1, /* Time segment 2 = phase_seg2 */ .tseg2_max = 8, .sjw_max = 4, .brp_min = 1, .brp_max = 1024, /* 6-bit BRP field + 4-bit BRPE field*/ .brp_inc = 1, }; static inline void c_can_pm_runtime_get_sync(const struct c_can_priv *priv) { if (priv->device) pm_runtime_get_sync(priv->device); } static inline void c_can_pm_runtime_put_sync(const struct c_can_priv *priv) { if (priv->device) pm_runtime_put_sync(priv->device); } static inline void c_can_reset_ram(const struct c_can_priv *priv, bool enable) { if (priv->raminit) priv->raminit(priv, enable); } static void c_can_irq_control(struct c_can_priv *priv, bool enable) { u32 ctrl = priv->read_reg(priv, C_CAN_CTRL_REG) & ~CONTROL_IRQMSK; if (enable) ctrl |= CONTROL_IRQMSK; priv->write_reg(priv, C_CAN_CTRL_REG, ctrl); } static void c_can_obj_update(struct net_device *dev, int iface, u32 cmd, u32 obj) { struct c_can_priv *priv = netdev_priv(dev); int cnt, reg = C_CAN_IFACE(COMREQ_REG, iface); priv->write_reg32(priv, reg, (cmd << 16) | obj); for (cnt = MIN_TIMEOUT_VALUE; cnt; cnt--) { if (!(priv->read_reg(priv, reg) & IF_COMR_BUSY)) return; udelay(1); } netdev_err(dev, "Updating object timed out\n"); } static inline void c_can_object_get(struct net_device *dev, int iface, u32 obj, u32 cmd) { c_can_obj_update(dev, iface, cmd, obj); } static inline void c_can_object_put(struct net_device *dev, int iface, u32 obj, u32 cmd) { c_can_obj_update(dev, iface, cmd | IF_COMM_WR, obj); } /* Note: According to documentation clearing TXIE while MSGVAL is set * is not allowed, but works nicely on C/DCAN. And that lowers the I/O * load significantly. */ static void c_can_inval_tx_object(struct net_device *dev, int iface, int obj) { struct c_can_priv *priv = netdev_priv(dev); priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), 0); c_can_object_put(dev, iface, obj, IF_COMM_INVAL); } static void c_can_inval_msg_object(struct net_device *dev, int iface, int obj) { struct c_can_priv *priv = netdev_priv(dev); priv->write_reg32(priv, C_CAN_IFACE(ARB1_REG, iface), 0); c_can_inval_tx_object(dev, iface, obj); } static void c_can_setup_tx_object(struct net_device *dev, int iface, struct can_frame *frame, int idx) { struct c_can_priv *priv = netdev_priv(dev); u16 ctrl = IF_MCONT_TX | frame->len; bool rtr = frame->can_id & CAN_RTR_FLAG; u32 arb = IF_ARB_MSGVAL; int i; if (frame->can_id & CAN_EFF_FLAG) { arb |= frame->can_id & CAN_EFF_MASK; arb |= IF_ARB_MSGXTD; } else { arb |= (frame->can_id & CAN_SFF_MASK) << 18; } if (!rtr) arb |= IF_ARB_TRANSMIT; /* If we change the DIR bit, we need to invalidate the buffer * first, i.e. clear the MSGVAL flag in the arbiter. */ if (rtr != (bool)test_bit(idx, &priv->tx_dir)) { u32 obj = idx + priv->msg_obj_tx_first; c_can_inval_msg_object(dev, iface, obj); change_bit(idx, &priv->tx_dir); } priv->write_reg32(priv, C_CAN_IFACE(ARB1_REG, iface), arb); priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), ctrl); if (priv->type == BOSCH_D_CAN) { u32 data = 0, dreg = C_CAN_IFACE(DATA1_REG, iface); for (i = 0; i < frame->len; i += 4, dreg += 2) { data = (u32)frame->data[i]; data |= (u32)frame->data[i + 1] << 8; data |= (u32)frame->data[i + 2] << 16; data |= (u32)frame->data[i + 3] << 24; priv->write_reg32(priv, dreg, data); } } else { for (i = 0; i < frame->len; i += 2) { priv->write_reg(priv, C_CAN_IFACE(DATA1_REG, iface) + i / 2, frame->data[i] | (frame->data[i + 1] << 8)); } } } static int c_can_handle_lost_msg_obj(struct net_device *dev, int iface, int objno, u32 ctrl) { struct net_device_stats *stats = &dev->stats; struct c_can_priv *priv = netdev_priv(dev); struct can_frame *frame; struct sk_buff *skb; ctrl &= ~(IF_MCONT_MSGLST | IF_MCONT_INTPND | IF_MCONT_NEWDAT); priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), ctrl); c_can_object_put(dev, iface, objno, IF_COMM_CONTROL); stats->rx_errors++; stats->rx_over_errors++; /* create an error msg */ skb = alloc_can_err_skb(dev, &frame); if (unlikely(!skb)) return 0; frame->can_id |= CAN_ERR_CRTL; frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; netif_receive_skb(skb); return 1; } static int c_can_read_msg_object(struct net_device *dev, int iface, u32 ctrl) { struct net_device_stats *stats = &dev->stats; struct c_can_priv *priv = netdev_priv(dev); struct can_frame *frame; struct sk_buff *skb; u32 arb, data; skb = alloc_can_skb(dev, &frame); if (!skb) { stats->rx_dropped++; return -ENOMEM; } frame->len = can_cc_dlc2len(ctrl & 0x0F); arb = priv->read_reg32(priv, C_CAN_IFACE(ARB1_REG, iface)); if (arb & IF_ARB_MSGXTD) frame->can_id = (arb & CAN_EFF_MASK) | CAN_EFF_FLAG; else frame->can_id = (arb >> 18) & CAN_SFF_MASK; if (arb & IF_ARB_TRANSMIT) { frame->can_id |= CAN_RTR_FLAG; } else { int i, dreg = C_CAN_IFACE(DATA1_REG, iface); if (priv->type == BOSCH_D_CAN) { for (i = 0; i < frame->len; i += 4, dreg += 2) { data = priv->read_reg32(priv, dreg); frame->data[i] = data; frame->data[i + 1] = data >> 8; frame->data[i + 2] = data >> 16; frame->data[i + 3] = data >> 24; } } else { for (i = 0; i < frame->len; i += 2, dreg++) { data = priv->read_reg(priv, dreg); frame->data[i] = data; frame->data[i + 1] = data >> 8; } } stats->rx_bytes += frame->len; } stats->rx_packets++; netif_receive_skb(skb); return 0; } static void c_can_setup_receive_object(struct net_device *dev, int iface, u32 obj, u32 mask, u32 id, u32 mcont) { struct c_can_priv *priv = netdev_priv(dev); mask |= BIT(29); priv->write_reg32(priv, C_CAN_IFACE(MASK1_REG, iface), mask); id |= IF_ARB_MSGVAL; priv->write_reg32(priv, C_CAN_IFACE(ARB1_REG, iface), id); priv->write_reg(priv, C_CAN_IFACE(MSGCTRL_REG, iface), mcont); c_can_object_put(dev, iface, obj, IF_COMM_RCV_SETUP); } static bool c_can_tx_busy(const struct c_can_priv *priv, const struct c_can_tx_ring *tx_ring) { if (c_can_get_tx_free(priv, tx_ring) > 0) return false; netif_stop_queue(priv->dev); /* Memory barrier before checking tx_free (head and tail) */ smp_mb(); if (c_can_get_tx_free(priv, tx_ring) == 0) { netdev_dbg(priv->dev, "Stopping tx-queue (tx_head=0x%08x, tx_tail=0x%08x, len=%d).\n", tx_ring->head, tx_ring->tail, tx_ring->head - tx_ring->tail); return true; } netif_start_queue(priv->dev); return false; } static netdev_tx_t c_can_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct can_frame *frame = (struct can_frame *)skb->data; struct c_can_priv *priv = netdev_priv(dev); struct c_can_tx_ring *tx_ring = &priv->tx; u32 idx, obj, cmd = IF_COMM_TX; if (can_dev_dropped_skb(dev, skb)) return NETDEV_TX_OK; if (c_can_tx_busy(priv, tx_ring)) return NETDEV_TX_BUSY; idx = c_can_get_tx_head(tx_ring); tx_ring->head++; if (c_can_get_tx_free(priv, tx_ring) == 0) netif_stop_queue(dev); if (idx < c_can_get_tx_tail(tx_ring)) cmd &= ~IF_COMM_TXRQST; /* Cache the message */ /* Store the message in the interface so we can call * can_put_echo_skb(). We must do this before we enable * transmit as we might race against do_tx(). */ c_can_setup_tx_object(dev, IF_TX, frame, idx); can_put_echo_skb(skb, dev, idx, 0); obj = idx + priv->msg_obj_tx_first; c_can_object_put(dev, IF_TX, obj, cmd); return NETDEV_TX_OK; } static int c_can_wait_for_ctrl_init(struct net_device *dev, struct c_can_priv *priv, u32 init) { int retry = 0; while (init != (priv->read_reg(priv, C_CAN_CTRL_REG) & CONTROL_INIT)) { udelay(10); if (retry++ > 1000) { netdev_err(dev, "CCTRL: set CONTROL_INIT failed\n"); return -EIO; } } return 0; } static int c_can_set_bittiming(struct net_device *dev) { unsigned int reg_btr, reg_brpe, ctrl_save; u8 brp, brpe, sjw, tseg1, tseg2; u32 ten_bit_brp; struct c_can_priv *priv = netdev_priv(dev); const struct can_bittiming *bt = &priv->can.bittiming; int res; /* c_can provides a 6-bit brp and 4-bit brpe fields */ ten_bit_brp = bt->brp - 1; brp = ten_bit_brp & BTR_BRP_MASK; brpe = ten_bit_brp >> 6; sjw = bt->sjw - 1; tseg1 = bt->prop_seg + bt->phase_seg1 - 1; tseg2 = bt->phase_seg2 - 1; reg_btr = brp | (sjw << BTR_SJW_SHIFT) | (tseg1 << BTR_TSEG1_SHIFT) | (tseg2 << BTR_TSEG2_SHIFT); reg_brpe = brpe & BRP_EXT_BRPE_MASK; netdev_info(dev, "setting BTR=%04x BRPE=%04x\n", reg_btr, reg_brpe); ctrl_save = priv->read_reg(priv, C_CAN_CTRL_REG); ctrl_save &= ~CONTROL_INIT; priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_CCE | CONTROL_INIT); res = c_can_wait_for_ctrl_init(dev, priv, CONTROL_INIT); if (res) return res; priv->write_reg(priv, C_CAN_BTR_REG, reg_btr); priv->write_reg(priv, C_CAN_BRPEXT_REG, reg_brpe); priv->write_reg(priv, C_CAN_CTRL_REG, ctrl_save); return c_can_wait_for_ctrl_init(dev, priv, 0); } /* Configure C_CAN message objects for Tx and Rx purposes: * C_CAN provides a total of 32 message objects that can be configured * either for Tx or Rx purposes. Here the first 16 message objects are used as * a reception FIFO. The end of reception FIFO is signified by the EoB bit * being SET. The remaining 16 message objects are kept aside for Tx purposes. * See user guide document for further details on configuring message * objects. */ static void c_can_configure_msg_objects(struct net_device *dev) { struct c_can_priv *priv = netdev_priv(dev); int i; /* first invalidate all message objects */ for (i = priv->msg_obj_rx_first; i <= priv->msg_obj_num; i++) c_can_inval_msg_object(dev, IF_NAPI, i); /* setup receive message objects */ for (i = priv->msg_obj_rx_first; i < priv->msg_obj_rx_last; i++) c_can_setup_receive_object(dev, IF_NAPI, i, 0, 0, IF_MCONT_RCV); c_can_setup_receive_object(dev, IF_NAPI, priv->msg_obj_rx_last, 0, 0, IF_MCONT_RCV_EOB); } static int c_can_software_reset(struct net_device *dev) { struct c_can_priv *priv = netdev_priv(dev); int retry = 0; if (priv->type != BOSCH_D_CAN) return 0; priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_SWR | CONTROL_INIT); while (priv->read_reg(priv, C_CAN_CTRL_REG) & CONTROL_SWR) { msleep(20); if (retry++ > 100) { netdev_err(dev, "CCTRL: software reset failed\n"); return -EIO; } } return 0; } /* Configure C_CAN chip: * - enable/disable auto-retransmission * - set operating mode * - configure message objects */ static int c_can_chip_config(struct net_device *dev) { struct c_can_priv *priv = netdev_priv(dev); struct c_can_tx_ring *tx_ring = &priv->tx; int err; err = c_can_software_reset(dev); if (err) return err; /* enable automatic retransmission */ priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_ENABLE_AR); if ((priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) && (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK)) { /* loopback + silent mode : useful for hot self-test */ priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_TEST); priv->write_reg(priv, C_CAN_TEST_REG, TEST_LBACK | TEST_SILENT); } else if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) { /* loopback mode : useful for self-test function */ priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_TEST); priv->write_reg(priv, C_CAN_TEST_REG, TEST_LBACK); } else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) { /* silent mode : bus-monitoring mode */ priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_TEST); priv->write_reg(priv, C_CAN_TEST_REG, TEST_SILENT); } /* configure message objects */ c_can_configure_msg_objects(dev); /* set a `lec` value so that we can check for updates later */ priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED); /* Clear all internal status */ tx_ring->head = 0; tx_ring->tail = 0; priv->tx_dir = 0; /* set bittiming params */ return c_can_set_bittiming(dev); } static int c_can_start(struct net_device *dev) { struct c_can_priv *priv = netdev_priv(dev); int err; struct pinctrl *p; /* basic c_can configuration */ err = c_can_chip_config(dev); if (err) return err; /* Setup the command for new messages */ priv->comm_rcv_high = priv->type != BOSCH_D_CAN ? IF_COMM_RCV_LOW : IF_COMM_RCV_HIGH; priv->can.state = CAN_STATE_ERROR_ACTIVE; /* Attempt to use "active" if available else use "default" */ p = pinctrl_get_select(priv->device, "active"); if (!IS_ERR(p)) pinctrl_put(p); else pinctrl_pm_select_default_state(priv->device); return 0; } static void c_can_stop(struct net_device *dev) { struct c_can_priv *priv = netdev_priv(dev); c_can_irq_control(priv, false); /* put ctrl to init on stop to end ongoing transmission */ priv->write_reg(priv, C_CAN_CTRL_REG, CONTROL_INIT); /* deactivate pins */ pinctrl_pm_select_sleep_state(dev->dev.parent); priv->can.state = CAN_STATE_STOPPED; } static int c_can_set_mode(struct net_device *dev, enum can_mode mode) { struct c_can_priv *priv = netdev_priv(dev); int err; switch (mode) { case CAN_MODE_START: err = c_can_start(dev); if (err) return err; netif_wake_queue(dev); c_can_irq_control(priv, true); break; default: return -EOPNOTSUPP; } return 0; } static int __c_can_get_berr_counter(const struct net_device *dev, struct can_berr_counter *bec) { unsigned int reg_err_counter; struct c_can_priv *priv = netdev_priv(dev); reg_err_counter = priv->read_reg(priv, C_CAN_ERR_CNT_REG); bec->rxerr = (reg_err_counter & ERR_CNT_REC_MASK) >> ERR_CNT_REC_SHIFT; bec->txerr = reg_err_counter & ERR_CNT_TEC_MASK; return 0; } static int c_can_get_berr_counter(const struct net_device *dev, struct can_berr_counter *bec) { struct c_can_priv *priv = netdev_priv(dev); int err; c_can_pm_runtime_get_sync(priv); err = __c_can_get_berr_counter(dev, bec); c_can_pm_runtime_put_sync(priv); return err; } static void c_can_do_tx(struct net_device *dev) { struct c_can_priv *priv = netdev_priv(dev); struct c_can_tx_ring *tx_ring = &priv->tx; struct net_device_stats *stats = &dev->stats; u32 idx, obj, pkts = 0, bytes = 0, pend; u8 tail; if (priv->msg_obj_tx_last > 32) pend = priv->read_reg32(priv, C_CAN_INTPND3_REG); else pend = priv->read_reg(priv, C_CAN_INTPND2_REG); while ((idx = ffs(pend))) { idx--; pend &= ~BIT(idx); obj = idx + priv->msg_obj_tx_first; /* We use IF_NAPI interface instead of IF_TX because we * are called from c_can_poll(), which runs inside * NAPI. We are not transmitting. */ c_can_inval_tx_object(dev, IF_NAPI, obj); bytes += can_get_echo_skb(dev, idx, NULL); pkts++; } if (!pkts) return; tx_ring->tail += pkts; if (c_can_get_tx_free(priv, tx_ring)) { /* Make sure that anybody stopping the queue after * this sees the new tx_ring->tail. */ smp_mb(); netif_wake_queue(priv->dev); } stats->tx_bytes += bytes; stats->tx_packets += pkts; tail = c_can_get_tx_tail(tx_ring); if (priv->type == BOSCH_D_CAN && tail == 0) { u8 head = c_can_get_tx_head(tx_ring); /* Start transmission for all cached messages */ for (idx = tail; idx < head; idx++) { obj = idx + priv->msg_obj_tx_first; c_can_object_put(dev, IF_NAPI, obj, IF_COMM_TXRQST); } } } /* If we have a gap in the pending bits, that means we either * raced with the hardware or failed to readout all upper * objects in the last run due to quota limit. */ static u32 c_can_adjust_pending(u32 pend, u32 rx_mask) { u32 weight, lasts; if (pend == rx_mask) return pend; /* If the last set bit is larger than the number of pending * bits we have a gap. */ weight = hweight32(pend); lasts = fls(pend); /* If the bits are linear, nothing to do */ if (lasts == weight) return pend; /* Find the first set bit after the gap. We walk backwards * from the last set bit. */ for (lasts--; pend & BIT(lasts - 1); lasts--) ; return pend & ~GENMASK(lasts - 1, 0); } static inline void c_can_rx_object_get(struct net_device *dev, struct c_can_priv *priv, u32 obj) { c_can_object_get(dev, IF_NAPI, obj, priv->comm_rcv_high); } static inline void c_can_rx_finalize(struct net_device *dev, struct c_can_priv *priv, u32 obj) { if (priv->type != BOSCH_D_CAN) c_can_object_get(dev, IF_NAPI, obj, IF_COMM_CLR_NEWDAT); } static int c_can_read_objects(struct net_device *dev, struct c_can_priv *priv, u32 pend, int quota) { u32 pkts = 0, ctrl, obj; while ((obj = ffs(pend)) && quota > 0) { pend &= ~BIT(obj - 1); c_can_rx_object_get(dev, priv, obj); ctrl = priv->read_reg(priv, C_CAN_IFACE(MSGCTRL_REG, IF_NAPI)); if (ctrl & IF_MCONT_MSGLST) { int n; n = c_can_handle_lost_msg_obj(dev, IF_NAPI, obj, ctrl); pkts += n; quota -= n; continue; } /* This really should not happen, but this covers some * odd HW behaviour. Do not remove that unless you * want to brick your machine. */ if (!(ctrl & IF_MCONT_NEWDAT)) continue; /* read the data from the message object */ c_can_read_msg_object(dev, IF_NAPI, ctrl); c_can_rx_finalize(dev, priv, obj); pkts++; quota--; } return pkts; } static inline u32 c_can_get_pending(struct c_can_priv *priv) { u32 pend; if (priv->msg_obj_rx_last > 16) pend = priv->read_reg32(priv, C_CAN_NEWDAT1_REG); else pend = priv->read_reg(priv, C_CAN_NEWDAT1_REG); return pend; } /* theory of operation: * * c_can core saves a received CAN message into the first free message * object it finds free (starting with the lowest). Bits NEWDAT and * INTPND are set for this message object indicating that a new message * has arrived. * * We clear the newdat bit right away. * * This can result in packet reordering when the readout is slow. */ static int c_can_do_rx_poll(struct net_device *dev, int quota) { struct c_can_priv *priv = netdev_priv(dev); u32 pkts = 0, pend = 0, toread, n; while (quota > 0) { if (!pend) { pend = c_can_get_pending(priv); if (!pend) break; /* If the pending field has a gap, handle the * bits above the gap first. */ toread = c_can_adjust_pending(pend, priv->msg_obj_rx_mask); } else { toread = pend; } /* Remove the bits from pend */ pend &= ~toread; /* Read the objects */ n = c_can_read_objects(dev, priv, toread, quota); pkts += n; quota -= n; } return pkts; } static int c_can_handle_state_change(struct net_device *dev, enum c_can_bus_error_types error_type) { unsigned int reg_err_counter; unsigned int rx_err_passive; struct c_can_priv *priv = netdev_priv(dev); struct can_frame *cf; struct sk_buff *skb; struct can_berr_counter bec; switch (error_type) { case C_CAN_NO_ERROR: priv->can.state = CAN_STATE_ERROR_ACTIVE; break; case C_CAN_ERROR_WARNING: /* error warning state */ priv->can.can_stats.error_warning++; priv->can.state = CAN_STATE_ERROR_WARNING; break; case C_CAN_ERROR_PASSIVE: /* error passive state */ priv->can.can_stats.error_passive++; priv->can.state = CAN_STATE_ERROR_PASSIVE; break; case C_CAN_BUS_OFF: /* bus-off state */ priv->can.state = CAN_STATE_BUS_OFF; priv->can.can_stats.bus_off++; break; default: break; } /* propagate the error condition to the CAN stack */ skb = alloc_can_err_skb(dev, &cf); if (unlikely(!skb)) return 0; __c_can_get_berr_counter(dev, &bec); reg_err_counter = priv->read_reg(priv, C_CAN_ERR_CNT_REG); rx_err_passive = (reg_err_counter & ERR_CNT_RP_MASK) >> ERR_CNT_RP_SHIFT; switch (error_type) { case C_CAN_NO_ERROR: cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT; cf->data[1] = CAN_ERR_CRTL_ACTIVE; cf->data[6] = bec.txerr; cf->data[7] = bec.rxerr; break; case C_CAN_ERROR_WARNING: /* error warning state */ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT; cf->data[1] = (bec.txerr > bec.rxerr) ? CAN_ERR_CRTL_TX_WARNING : CAN_ERR_CRTL_RX_WARNING; cf->data[6] = bec.txerr; cf->data[7] = bec.rxerr; break; case C_CAN_ERROR_PASSIVE: /* error passive state */ cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT; if (rx_err_passive) cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE; if (bec.txerr > 127) cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE; cf->data[6] = bec.txerr; cf->data[7] = bec.rxerr; break; case C_CAN_BUS_OFF: /* bus-off state */ cf->can_id |= CAN_ERR_BUSOFF; can_bus_off(dev); break; default: break; } netif_receive_skb(skb); return 1; } static int c_can_handle_bus_err(struct net_device *dev, enum c_can_lec_type lec_type) { struct c_can_priv *priv = netdev_priv(dev); struct net_device_stats *stats = &dev->stats; struct can_frame *cf; struct sk_buff *skb; /* early exit if no lec update or no error. * no lec update means that no CAN bus event has been detected * since CPU wrote 0x7 value to status reg. */ if (lec_type == LEC_UNUSED || lec_type == LEC_NO_ERROR) return 0; if (!(priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)) return 0; /* common for all type of bus errors */ priv->can.can_stats.bus_error++; stats->rx_errors++; /* propagate the error condition to the CAN stack */ skb = alloc_can_err_skb(dev, &cf); if (unlikely(!skb)) return 0; /* check for 'last error code' which tells us the * type of the last error to occur on the CAN bus */ cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; switch (lec_type) { case LEC_STUFF_ERROR: netdev_dbg(dev, "stuff error\n"); cf->data[2] |= CAN_ERR_PROT_STUFF; break; case LEC_FORM_ERROR: netdev_dbg(dev, "form error\n"); cf->data[2] |= CAN_ERR_PROT_FORM; break; case LEC_ACK_ERROR: netdev_dbg(dev, "ack error\n"); cf->data[3] = CAN_ERR_PROT_LOC_ACK; break; case LEC_BIT1_ERROR: netdev_dbg(dev, "bit1 error\n"); cf->data[2] |= CAN_ERR_PROT_BIT1; break; case LEC_BIT0_ERROR: netdev_dbg(dev, "bit0 error\n"); cf->data[2] |= CAN_ERR_PROT_BIT0; break; case LEC_CRC_ERROR: netdev_dbg(dev, "CRC error\n"); cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; break; default: break; } netif_receive_skb(skb); return 1; } static int c_can_poll(struct napi_struct *napi, int quota) { struct net_device *dev = napi->dev; struct c_can_priv *priv = netdev_priv(dev); u16 curr, last = priv->last_status; int work_done = 0; /* Only read the status register if a status interrupt was pending */ if (atomic_xchg(&priv->sie_pending, 0)) { priv->last_status = priv->read_reg(priv, C_CAN_STS_REG); curr = priv->last_status; /* Ack status on C_CAN. D_CAN is self clearing */ if (priv->type != BOSCH_D_CAN) priv->write_reg(priv, C_CAN_STS_REG, LEC_UNUSED); } else { /* no change detected ... */ curr = last; } /* handle state changes */ if ((curr & STATUS_EWARN) && (!(last & STATUS_EWARN))) { netdev_dbg(dev, "entered error warning state\n"); work_done += c_can_handle_state_change(dev, C_CAN_ERROR_WARNING); } if ((curr & STATUS_EPASS) && (!(last & STATUS_EPASS))) { netdev_dbg(dev, "entered error passive state\n"); work_done += c_can_handle_state_change(dev, C_CAN_ERROR_PASSIVE); } if ((curr & STATUS_BOFF) && (!(last & STATUS_BOFF))) { netdev_dbg(dev, "entered bus off state\n"); work_done += c_can_handle_state_change(dev, C_CAN_BUS_OFF); goto end; } /* handle bus recovery events */ if ((!(curr & STATUS_BOFF)) && (last & STATUS_BOFF)) { netdev_dbg(dev, "left bus off state\n"); work_done += c_can_handle_state_change(dev, C_CAN_ERROR_PASSIVE); } if ((!(curr & STATUS_EPASS)) && (last & STATUS_EPASS)) { netdev_dbg(dev, "left error passive state\n"); work_done += c_can_handle_state_change(dev, C_CAN_ERROR_WARNING); } if ((!(curr & STATUS_EWARN)) && (last & STATUS_EWARN)) { netdev_dbg(dev, "left error warning state\n"); work_done += c_can_handle_state_change(dev, C_CAN_NO_ERROR); } /* handle lec errors on the bus */ work_done += c_can_handle_bus_err(dev, curr & LEC_MASK); /* Handle Tx/Rx events. We do this unconditionally */ work_done += c_can_do_rx_poll(dev, (quota - work_done)); c_can_do_tx(dev); end: if (work_done < quota) { napi_complete_done(napi, work_done); /* enable all IRQs if we are not in bus off state */ if (priv->can.state != CAN_STATE_BUS_OFF) c_can_irq_control(priv, true); } return work_done; } static irqreturn_t c_can_isr(int irq, void *dev_id) { struct net_device *dev = (struct net_device *)dev_id; struct c_can_priv *priv = netdev_priv(dev); int reg_int; reg_int = priv->read_reg(priv, C_CAN_INT_REG); if (!reg_int) return IRQ_NONE; /* save for later use */ if (reg_int & INT_STS_PENDING) atomic_set(&priv->sie_pending, 1); /* disable all interrupts and schedule the NAPI */ c_can_irq_control(priv, false); napi_schedule(&priv->napi); return IRQ_HANDLED; } static int c_can_open(struct net_device *dev) { int err; struct c_can_priv *priv = netdev_priv(dev); c_can_pm_runtime_get_sync(priv); c_can_reset_ram(priv, true); /* open the can device */ err = open_candev(dev); if (err) { netdev_err(dev, "failed to open can device\n"); goto exit_open_fail; } /* register interrupt handler */ err = request_irq(dev->irq, &c_can_isr, IRQF_SHARED, dev->name, dev); if (err < 0) { netdev_err(dev, "failed to request interrupt\n"); goto exit_irq_fail; } /* start the c_can controller */ err = c_can_start(dev); if (err) goto exit_start_fail; napi_enable(&priv->napi); /* enable status change, error and module interrupts */ c_can_irq_control(priv, true); netif_start_queue(dev); return 0; exit_start_fail: free_irq(dev->irq, dev); exit_irq_fail: close_candev(dev); exit_open_fail: c_can_reset_ram(priv, false); c_can_pm_runtime_put_sync(priv); return err; } static int c_can_close(struct net_device *dev) { struct c_can_priv *priv = netdev_priv(dev); netif_stop_queue(dev); napi_disable(&priv->napi); c_can_stop(dev); free_irq(dev->irq, dev); close_candev(dev); c_can_reset_ram(priv, false); c_can_pm_runtime_put_sync(priv); return 0; } struct net_device *alloc_c_can_dev(int msg_obj_num) { struct net_device *dev; struct c_can_priv *priv; int msg_obj_tx_num = msg_obj_num / 2; dev = alloc_candev(sizeof(*priv), msg_obj_tx_num); if (!dev) return NULL; priv = netdev_priv(dev); priv->msg_obj_num = msg_obj_num; priv->msg_obj_rx_num = msg_obj_num - msg_obj_tx_num; priv->msg_obj_rx_first = 1; priv->msg_obj_rx_last = priv->msg_obj_rx_first + priv->msg_obj_rx_num - 1; priv->msg_obj_rx_mask = GENMASK(priv->msg_obj_rx_num - 1, 0); priv->msg_obj_tx_num = msg_obj_tx_num; priv->msg_obj_tx_first = priv->msg_obj_rx_last + 1; priv->msg_obj_tx_last = priv->msg_obj_tx_first + priv->msg_obj_tx_num - 1; priv->tx.head = 0; priv->tx.tail = 0; priv->tx.obj_num = msg_obj_tx_num; netif_napi_add_weight(dev, &priv->napi, c_can_poll, priv->msg_obj_rx_num); priv->dev = dev; priv->can.bittiming_const = &c_can_bittiming_const; priv->can.do_set_mode = c_can_set_mode; priv->can.do_get_berr_counter = c_can_get_berr_counter; priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_BERR_REPORTING; return dev; } EXPORT_SYMBOL_GPL(alloc_c_can_dev); #ifdef CONFIG_PM int c_can_power_down(struct net_device *dev) { u32 val; unsigned long time_out; struct c_can_priv *priv = netdev_priv(dev); if (!(dev->flags & IFF_UP)) return 0; WARN_ON(priv->type != BOSCH_D_CAN); /* set PDR value so the device goes to power down mode */ val = priv->read_reg(priv, C_CAN_CTRL_EX_REG); val |= CONTROL_EX_PDR; priv->write_reg(priv, C_CAN_CTRL_EX_REG, val); /* Wait for the PDA bit to get set */ time_out = jiffies + msecs_to_jiffies(INIT_WAIT_MS); while (!(priv->read_reg(priv, C_CAN_STS_REG) & STATUS_PDA) && time_after(time_out, jiffies)) cpu_relax(); if (time_after(jiffies, time_out)) return -ETIMEDOUT; c_can_stop(dev); c_can_reset_ram(priv, false); c_can_pm_runtime_put_sync(priv); return 0; } EXPORT_SYMBOL_GPL(c_can_power_down); int c_can_power_up(struct net_device *dev) { u32 val; unsigned long time_out; struct c_can_priv *priv = netdev_priv(dev); int ret; if (!(dev->flags & IFF_UP)) return 0; WARN_ON(priv->type != BOSCH_D_CAN); c_can_pm_runtime_get_sync(priv); c_can_reset_ram(priv, true); /* Clear PDR and INIT bits */ val = priv->read_reg(priv, C_CAN_CTRL_EX_REG); val &= ~CONTROL_EX_PDR; priv->write_reg(priv, C_CAN_CTRL_EX_REG, val); val = priv->read_reg(priv, C_CAN_CTRL_REG); val &= ~CONTROL_INIT; priv->write_reg(priv, C_CAN_CTRL_REG, val); /* Wait for the PDA bit to get clear */ time_out = jiffies + msecs_to_jiffies(INIT_WAIT_MS); while ((priv->read_reg(priv, C_CAN_STS_REG) & STATUS_PDA) && time_after(time_out, jiffies)) cpu_relax(); if (time_after(jiffies, time_out)) { ret = -ETIMEDOUT; goto err_out; } ret = c_can_start(dev); if (ret) goto err_out; c_can_irq_control(priv, true); return 0; err_out: c_can_reset_ram(priv, false); c_can_pm_runtime_put_sync(priv); return ret; } EXPORT_SYMBOL_GPL(c_can_power_up); #endif void free_c_can_dev(struct net_device *dev) { struct c_can_priv *priv = netdev_priv(dev); netif_napi_del(&priv->napi); free_candev(dev); } EXPORT_SYMBOL_GPL(free_c_can_dev); static const struct net_device_ops c_can_netdev_ops = { .ndo_open = c_can_open, .ndo_stop = c_can_close, .ndo_start_xmit = c_can_start_xmit, .ndo_change_mtu = can_change_mtu, }; int register_c_can_dev(struct net_device *dev) { /* Deactivate pins to prevent DRA7 DCAN IP from being * stuck in transition when module is disabled. * Pins are activated in c_can_start() and deactivated * in c_can_stop() */ pinctrl_pm_select_sleep_state(dev->dev.parent); dev->flags |= IFF_ECHO; /* we support local echo */ dev->netdev_ops = &c_can_netdev_ops; dev->ethtool_ops = &c_can_ethtool_ops; return register_candev(dev); } EXPORT_SYMBOL_GPL(register_c_can_dev); void unregister_c_can_dev(struct net_device *dev) { unregister_candev(dev); } EXPORT_SYMBOL_GPL(unregister_c_can_dev); MODULE_AUTHOR("Bhupesh Sharma <[email protected]>"); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("CAN bus driver for Bosch C_CAN controller");
linux-master
drivers/net/can/c_can/c_can_main.c
/* * PCI bus driver for Bosch C_CAN/D_CAN controller * * Copyright (C) 2012 Federico Vaga <[email protected]> * * Borrowed from c_can_platform.c * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/pci.h> #include <linux/can/dev.h> #include "c_can.h" #define PCI_DEVICE_ID_PCH_CAN 0x8818 #define PCH_PCI_SOFT_RESET 0x01fc enum c_can_pci_reg_align { C_CAN_REG_ALIGN_16, C_CAN_REG_ALIGN_32, C_CAN_REG_32, }; struct c_can_pci_data { /* Specify if is C_CAN or D_CAN */ enum c_can_dev_id type; /* Number of message objects */ unsigned int msg_obj_num; /* Set the register alignment in the memory */ enum c_can_pci_reg_align reg_align; /* Set the frequency */ unsigned int freq; /* PCI bar number */ int bar; /* Callback for reset */ void (*init)(const struct c_can_priv *priv, bool enable); }; /* 16-bit c_can registers can be arranged differently in the memory * architecture of different implementations. For example: 16-bit * registers can be aligned to a 16-bit boundary or 32-bit boundary etc. * Handle the same by providing a common read/write interface. */ static u16 c_can_pci_read_reg_aligned_to_16bit(const struct c_can_priv *priv, enum reg index) { return readw(priv->base + priv->regs[index]); } static void c_can_pci_write_reg_aligned_to_16bit(const struct c_can_priv *priv, enum reg index, u16 val) { writew(val, priv->base + priv->regs[index]); } static u16 c_can_pci_read_reg_aligned_to_32bit(const struct c_can_priv *priv, enum reg index) { return readw(priv->base + 2 * priv->regs[index]); } static void c_can_pci_write_reg_aligned_to_32bit(const struct c_can_priv *priv, enum reg index, u16 val) { writew(val, priv->base + 2 * priv->regs[index]); } static u16 c_can_pci_read_reg_32bit(const struct c_can_priv *priv, enum reg index) { return (u16)ioread32(priv->base + 2 * priv->regs[index]); } static void c_can_pci_write_reg_32bit(const struct c_can_priv *priv, enum reg index, u16 val) { iowrite32((u32)val, priv->base + 2 * priv->regs[index]); } static u32 c_can_pci_read_reg32(const struct c_can_priv *priv, enum reg index) { u32 val; val = priv->read_reg(priv, index); val |= ((u32)priv->read_reg(priv, index + 1)) << 16; return val; } static void c_can_pci_write_reg32(const struct c_can_priv *priv, enum reg index, u32 val) { priv->write_reg(priv, index + 1, val >> 16); priv->write_reg(priv, index, val); } static void c_can_pci_reset_pch(const struct c_can_priv *priv, bool enable) { if (enable) { u32 __iomem *addr = priv->base + PCH_PCI_SOFT_RESET; /* write to sw reset register */ iowrite32(1, addr); iowrite32(0, addr); } } static int c_can_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct c_can_pci_data *c_can_pci_data = (void *)ent->driver_data; struct c_can_priv *priv; struct net_device *dev; void __iomem *addr; int ret; ret = pci_enable_device(pdev); if (ret) { dev_err(&pdev->dev, "pci_enable_device FAILED\n"); goto out; } ret = pci_request_regions(pdev, KBUILD_MODNAME); if (ret) { dev_err(&pdev->dev, "pci_request_regions FAILED\n"); goto out_disable_device; } ret = pci_enable_msi(pdev); if (!ret) { dev_info(&pdev->dev, "MSI enabled\n"); pci_set_master(pdev); } addr = pci_iomap(pdev, c_can_pci_data->bar, pci_resource_len(pdev, c_can_pci_data->bar)); if (!addr) { dev_err(&pdev->dev, "device has no PCI memory resources, failing adapter\n"); ret = -ENOMEM; goto out_release_regions; } /* allocate the c_can device */ dev = alloc_c_can_dev(c_can_pci_data->msg_obj_num); if (!dev) { ret = -ENOMEM; goto out_iounmap; } priv = netdev_priv(dev); pci_set_drvdata(pdev, dev); SET_NETDEV_DEV(dev, &pdev->dev); dev->irq = pdev->irq; priv->base = addr; priv->device = &pdev->dev; if (!c_can_pci_data->freq) { dev_err(&pdev->dev, "no clock frequency defined\n"); ret = -ENODEV; goto out_free_c_can; } else { priv->can.clock.freq = c_can_pci_data->freq; } /* Configure CAN type */ switch (c_can_pci_data->type) { case BOSCH_C_CAN: priv->regs = reg_map_c_can; break; case BOSCH_D_CAN: priv->regs = reg_map_d_can; break; default: ret = -EINVAL; goto out_free_c_can; } priv->type = c_can_pci_data->type; /* Configure access to registers */ switch (c_can_pci_data->reg_align) { case C_CAN_REG_ALIGN_32: priv->read_reg = c_can_pci_read_reg_aligned_to_32bit; priv->write_reg = c_can_pci_write_reg_aligned_to_32bit; break; case C_CAN_REG_ALIGN_16: priv->read_reg = c_can_pci_read_reg_aligned_to_16bit; priv->write_reg = c_can_pci_write_reg_aligned_to_16bit; break; case C_CAN_REG_32: priv->read_reg = c_can_pci_read_reg_32bit; priv->write_reg = c_can_pci_write_reg_32bit; break; default: ret = -EINVAL; goto out_free_c_can; } priv->read_reg32 = c_can_pci_read_reg32; priv->write_reg32 = c_can_pci_write_reg32; priv->raminit = c_can_pci_data->init; ret = register_c_can_dev(dev); if (ret) { dev_err(&pdev->dev, "registering %s failed (err=%d)\n", KBUILD_MODNAME, ret); goto out_free_c_can; } dev_dbg(&pdev->dev, "%s device registered (regs=%p, irq=%d)\n", KBUILD_MODNAME, priv->regs, dev->irq); return 0; out_free_c_can: free_c_can_dev(dev); out_iounmap: pci_iounmap(pdev, addr); out_release_regions: pci_disable_msi(pdev); pci_release_regions(pdev); out_disable_device: pci_disable_device(pdev); out: return ret; } static void c_can_pci_remove(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); struct c_can_priv *priv = netdev_priv(dev); void __iomem *addr = priv->base; unregister_c_can_dev(dev); free_c_can_dev(dev); pci_iounmap(pdev, addr); pci_disable_msi(pdev); pci_release_regions(pdev); pci_disable_device(pdev); } static const struct c_can_pci_data c_can_sta2x11 = { .type = BOSCH_C_CAN, .msg_obj_num = 32, .reg_align = C_CAN_REG_ALIGN_32, .freq = 52000000, /* 52 Mhz */ .bar = 0, }; static const struct c_can_pci_data c_can_pch = { .type = BOSCH_C_CAN, .msg_obj_num = 32, .reg_align = C_CAN_REG_32, .freq = 50000000, /* 50 MHz */ .init = c_can_pci_reset_pch, .bar = 1, }; #define C_CAN_ID(_vend, _dev, _driverdata) { \ PCI_DEVICE(_vend, _dev), \ .driver_data = (unsigned long)&(_driverdata), \ } static const struct pci_device_id c_can_pci_tbl[] = { C_CAN_ID(PCI_VENDOR_ID_STMICRO, PCI_DEVICE_ID_STMICRO_CAN, c_can_sta2x11), C_CAN_ID(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_PCH_CAN, c_can_pch), {}, }; static struct pci_driver c_can_pci_driver = { .name = KBUILD_MODNAME, .id_table = c_can_pci_tbl, .probe = c_can_pci_probe, .remove = c_can_pci_remove, }; module_pci_driver(c_can_pci_driver); MODULE_AUTHOR("Federico Vaga <[email protected]>"); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("PCI CAN bus driver for Bosch C_CAN/D_CAN controller"); MODULE_DEVICE_TABLE(pci, c_can_pci_tbl);
linux-master
drivers/net/can/c_can/c_can_pci.c
// SPDX-License-Identifier: GPL-2.0-or-later /******************************************************************************* * * CTU CAN FD IP Core * * Copyright (C) 2015-2018 Ondrej Ille <[email protected]> FEE CTU * Copyright (C) 2018-2021 Ondrej Ille <[email protected]> self-funded * Copyright (C) 2018-2019 Martin Jerabek <[email protected]> FEE CTU * Copyright (C) 2018-2022 Pavel Pisa <[email protected]> FEE CTU/self-funded * * Project advisors: * Jiri Novak <[email protected]> * Pavel Pisa <[email protected]> * * Department of Measurement (http://meas.fel.cvut.cz/) * Faculty of Electrical Engineering (http://www.fel.cvut.cz) * Czech Technical University (http://www.cvut.cz/) ******************************************************************************/ #include <linux/module.h> #include <linux/pci.h> #include "ctucanfd.h" #ifndef PCI_DEVICE_DATA #define PCI_DEVICE_DATA(vend, dev, data) \ .vendor = PCI_VENDOR_ID_##vend, \ .device = PCI_DEVICE_ID_##vend##_##dev, \ .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID, 0, 0, \ .driver_data = (kernel_ulong_t)(data) #endif #ifndef PCI_VENDOR_ID_TEDIA #define PCI_VENDOR_ID_TEDIA 0x1760 #endif #ifndef PCI_DEVICE_ID_TEDIA_CTUCAN_VER21 #define PCI_DEVICE_ID_TEDIA_CTUCAN_VER21 0xff00 #endif #define CTUCAN_BAR0_CTUCAN_ID 0x0000 #define CTUCAN_BAR0_CRA_BASE 0x4000 #define CYCLONE_IV_CRA_A2P_IE (0x0050) #define CTUCAN_WITHOUT_CTUCAN_ID 0 #define CTUCAN_WITH_CTUCAN_ID 1 struct ctucan_pci_board_data { void __iomem *bar0_base; void __iomem *cra_base; void __iomem *bar1_base; struct list_head ndev_list_head; int use_msi; }; static struct ctucan_pci_board_data *ctucan_pci_get_bdata(struct pci_dev *pdev) { return (struct ctucan_pci_board_data *)pci_get_drvdata(pdev); } static void ctucan_pci_set_drvdata(struct device *dev, struct net_device *ndev) { struct pci_dev *pdev = container_of(dev, struct pci_dev, dev); struct ctucan_priv *priv = netdev_priv(ndev); struct ctucan_pci_board_data *bdata = ctucan_pci_get_bdata(pdev); list_add(&priv->peers_on_pdev, &bdata->ndev_list_head); priv->irq_flags = IRQF_SHARED; } /** * ctucan_pci_probe - PCI registration call * @pdev: Handle to the pci device structure * @ent: Pointer to the entry from ctucan_pci_tbl * * This function does all the memory allocation and registration for the CAN * device. * * Return: 0 on success and failure value on error */ static int ctucan_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct device *dev = &pdev->dev; unsigned long driver_data = ent->driver_data; struct ctucan_pci_board_data *bdata; void __iomem *addr; void __iomem *cra_addr; void __iomem *bar0_base; u32 cra_a2p_ie; u32 ctucan_id = 0; int ret; unsigned int ntxbufs; unsigned int num_cores = 1; unsigned int core_i = 0; int irq; int msi_ok = 0; ret = pci_enable_device(pdev); if (ret) { dev_err(dev, "pci_enable_device FAILED\n"); goto err; } ret = pci_request_regions(pdev, KBUILD_MODNAME); if (ret) { dev_err(dev, "pci_request_regions FAILED\n"); goto err_disable_device; } ret = pci_enable_msi(pdev); if (!ret) { dev_info(dev, "MSI enabled\n"); pci_set_master(pdev); msi_ok = 1; } dev_info(dev, "ctucan BAR0 0x%08llx 0x%08llx\n", (long long)pci_resource_start(pdev, 0), (long long)pci_resource_len(pdev, 0)); dev_info(dev, "ctucan BAR1 0x%08llx 0x%08llx\n", (long long)pci_resource_start(pdev, 1), (long long)pci_resource_len(pdev, 1)); addr = pci_iomap(pdev, 1, pci_resource_len(pdev, 1)); if (!addr) { dev_err(dev, "PCI BAR 1 cannot be mapped\n"); ret = -ENOMEM; goto err_release_regions; } /* Cyclone IV PCI Express Control Registers Area */ bar0_base = pci_iomap(pdev, 0, pci_resource_len(pdev, 0)); if (!bar0_base) { dev_err(dev, "PCI BAR 0 cannot be mapped\n"); ret = -EIO; goto err_pci_iounmap_bar1; } if (driver_data == CTUCAN_WITHOUT_CTUCAN_ID) { cra_addr = bar0_base; num_cores = 2; } else { cra_addr = bar0_base + CTUCAN_BAR0_CRA_BASE; ctucan_id = ioread32(bar0_base + CTUCAN_BAR0_CTUCAN_ID); dev_info(dev, "ctucan_id 0x%08lx\n", (unsigned long)ctucan_id); num_cores = ctucan_id & 0xf; } irq = pdev->irq; ntxbufs = 4; bdata = kzalloc(sizeof(*bdata), GFP_KERNEL); if (!bdata) { ret = -ENOMEM; goto err_pci_iounmap_bar0; } INIT_LIST_HEAD(&bdata->ndev_list_head); bdata->bar0_base = bar0_base; bdata->cra_base = cra_addr; bdata->bar1_base = addr; bdata->use_msi = msi_ok; pci_set_drvdata(pdev, bdata); ret = ctucan_probe_common(dev, addr, irq, ntxbufs, 100000000, 0, ctucan_pci_set_drvdata); if (ret < 0) goto err_free_board; core_i++; while (core_i < num_cores) { addr += 0x4000; ret = ctucan_probe_common(dev, addr, irq, ntxbufs, 100000000, 0, ctucan_pci_set_drvdata); if (ret < 0) { dev_info(dev, "CTU CAN FD core %d initialization failed\n", core_i); break; } core_i++; } /* enable interrupt in * Avalon-MM to PCI Express Interrupt Enable Register */ cra_a2p_ie = ioread32(cra_addr + CYCLONE_IV_CRA_A2P_IE); dev_info(dev, "cra_a2p_ie 0x%08x\n", cra_a2p_ie); cra_a2p_ie |= 1; iowrite32(cra_a2p_ie, cra_addr + CYCLONE_IV_CRA_A2P_IE); cra_a2p_ie = ioread32(cra_addr + CYCLONE_IV_CRA_A2P_IE); dev_info(dev, "cra_a2p_ie 0x%08x\n", cra_a2p_ie); return 0; err_free_board: pci_set_drvdata(pdev, NULL); kfree(bdata); err_pci_iounmap_bar0: pci_iounmap(pdev, cra_addr); err_pci_iounmap_bar1: pci_iounmap(pdev, addr); err_release_regions: if (msi_ok) pci_disable_msi(pdev); pci_release_regions(pdev); err_disable_device: pci_disable_device(pdev); err: return ret; } /** * ctucan_pci_remove - Unregister the device after releasing the resources * @pdev: Handle to the pci device structure * * This function frees all the resources allocated to the device. * Return: 0 always */ static void ctucan_pci_remove(struct pci_dev *pdev) { struct net_device *ndev; struct ctucan_priv *priv = NULL; struct ctucan_pci_board_data *bdata = ctucan_pci_get_bdata(pdev); dev_dbg(&pdev->dev, "ctucan_remove"); if (!bdata) { dev_err(&pdev->dev, "%s: no list of devices\n", __func__); return; } /* disable interrupt in * Avalon-MM to PCI Express Interrupt Enable Register */ if (bdata->cra_base) iowrite32(0, bdata->cra_base + CYCLONE_IV_CRA_A2P_IE); while ((priv = list_first_entry_or_null(&bdata->ndev_list_head, struct ctucan_priv, peers_on_pdev)) != NULL) { ndev = priv->can.dev; unregister_candev(ndev); netif_napi_del(&priv->napi); list_del_init(&priv->peers_on_pdev); free_candev(ndev); } pci_iounmap(pdev, bdata->bar1_base); if (bdata->use_msi) pci_disable_msi(pdev); pci_release_regions(pdev); pci_disable_device(pdev); pci_iounmap(pdev, bdata->bar0_base); pci_set_drvdata(pdev, NULL); kfree(bdata); } static SIMPLE_DEV_PM_OPS(ctucan_pci_pm_ops, ctucan_suspend, ctucan_resume); static const struct pci_device_id ctucan_pci_tbl[] = { {PCI_DEVICE_DATA(TEDIA, CTUCAN_VER21, CTUCAN_WITH_CTUCAN_ID)}, {}, }; static struct pci_driver ctucan_pci_driver = { .name = KBUILD_MODNAME, .id_table = ctucan_pci_tbl, .probe = ctucan_pci_probe, .remove = ctucan_pci_remove, .driver.pm = &ctucan_pci_pm_ops, }; module_pci_driver(ctucan_pci_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Pavel Pisa <[email protected]>"); MODULE_DESCRIPTION("CTU CAN FD for PCI bus");
linux-master
drivers/net/can/ctucanfd/ctucanfd_pci.c
// SPDX-License-Identifier: GPL-2.0-or-later /******************************************************************************* * * CTU CAN FD IP Core * * Copyright (C) 2015-2018 Ondrej Ille <[email protected]> FEE CTU * Copyright (C) 2018-2021 Ondrej Ille <[email protected]> self-funded * Copyright (C) 2018-2019 Martin Jerabek <[email protected]> FEE CTU * Copyright (C) 2018-2022 Pavel Pisa <[email protected]> FEE CTU/self-funded * * Project advisors: * Jiri Novak <[email protected]> * Pavel Pisa <[email protected]> * * Department of Measurement (http://meas.fel.cvut.cz/) * Faculty of Electrical Engineering (http://www.fel.cvut.cz) * Czech Technical University (http://www.cvut.cz/) ******************************************************************************/ #include <linux/module.h> #include <linux/netdevice.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include "ctucanfd.h" #define DRV_NAME "ctucanfd" static void ctucan_platform_set_drvdata(struct device *dev, struct net_device *ndev) { struct platform_device *pdev = container_of(dev, struct platform_device, dev); platform_set_drvdata(pdev, ndev); } /** * ctucan_platform_probe - Platform registration call * @pdev: Handle to the platform device structure * * This function does all the memory allocation and registration for the CAN * device. * * Return: 0 on success and failure value on error */ static int ctucan_platform_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; void __iomem *addr; int ret; unsigned int ntxbufs; int irq; /* Get the virtual base address for the device */ addr = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(addr)) { ret = PTR_ERR(addr); goto err; } irq = platform_get_irq(pdev, 0); if (irq < 0) { ret = irq; goto err; } /* Number of tx bufs might be change in HW for future. If so, * it will be passed as property via device tree */ ntxbufs = 4; ret = ctucan_probe_common(dev, addr, irq, ntxbufs, 0, 1, ctucan_platform_set_drvdata); if (ret < 0) platform_set_drvdata(pdev, NULL); err: return ret; } /** * ctucan_platform_remove - Unregister the device after releasing the resources * @pdev: Handle to the platform device structure * * This function frees all the resources allocated to the device. * Return: 0 always */ static void ctucan_platform_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct ctucan_priv *priv = netdev_priv(ndev); netdev_dbg(ndev, "ctucan_remove"); unregister_candev(ndev); pm_runtime_disable(&pdev->dev); netif_napi_del(&priv->napi); free_candev(ndev); } static SIMPLE_DEV_PM_OPS(ctucan_platform_pm_ops, ctucan_suspend, ctucan_resume); /* Match table for OF platform binding */ static const struct of_device_id ctucan_of_match[] = { { .compatible = "ctu,ctucanfd-2", }, { .compatible = "ctu,ctucanfd", }, { /* end of list */ }, }; MODULE_DEVICE_TABLE(of, ctucan_of_match); static struct platform_driver ctucanfd_driver = { .probe = ctucan_platform_probe, .remove_new = ctucan_platform_remove, .driver = { .name = DRV_NAME, .pm = &ctucan_platform_pm_ops, .of_match_table = ctucan_of_match, }, }; module_platform_driver(ctucanfd_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Martin Jerabek"); MODULE_DESCRIPTION("CTU CAN FD for platform");
linux-master
drivers/net/can/ctucanfd/ctucanfd_platform.c
// SPDX-License-Identifier: GPL-2.0-or-later /******************************************************************************* * * CTU CAN FD IP Core * * Copyright (C) 2015-2018 Ondrej Ille <[email protected]> FEE CTU * Copyright (C) 2018-2021 Ondrej Ille <[email protected]> self-funded * Copyright (C) 2018-2019 Martin Jerabek <[email protected]> FEE CTU * Copyright (C) 2018-2022 Pavel Pisa <[email protected]> FEE CTU/self-funded * * Project advisors: * Jiri Novak <[email protected]> * Pavel Pisa <[email protected]> * * Department of Measurement (http://meas.fel.cvut.cz/) * Faculty of Electrical Engineering (http://www.fel.cvut.cz) * Czech Technical University (http://www.cvut.cz/) ******************************************************************************/ #include <linux/clk.h> #include <linux/errno.h> #include <linux/ethtool.h> #include <linux/init.h> #include <linux/bitfield.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/skbuff.h> #include <linux/string.h> #include <linux/types.h> #include <linux/can/error.h> #include <linux/pm_runtime.h> #include "ctucanfd.h" #include "ctucanfd_kregs.h" #include "ctucanfd_kframe.h" #ifdef DEBUG #define ctucan_netdev_dbg(ndev, args...) \ netdev_dbg(ndev, args) #else #define ctucan_netdev_dbg(...) do { } while (0) #endif #define CTUCANFD_ID 0xCAFD /* TX buffer rotation: * - when a buffer transitions to empty state, rotate order and priorities * - if more buffers seem to transition at the same time, rotate by the number of buffers * - it may be assumed that buffers transition to empty state in FIFO order (because we manage * priorities that way) * - at frame filling, do not rotate anything, just increment buffer modulo counter */ #define CTUCANFD_FLAG_RX_FFW_BUFFERED 1 #define CTUCAN_STATE_TO_TEXT_ENTRY(st) \ [st] = #st enum ctucan_txtb_status { TXT_NOT_EXIST = 0x0, TXT_RDY = 0x1, TXT_TRAN = 0x2, TXT_ABTP = 0x3, TXT_TOK = 0x4, TXT_ERR = 0x6, TXT_ABT = 0x7, TXT_ETY = 0x8, }; enum ctucan_txtb_command { TXT_CMD_SET_EMPTY = 0x01, TXT_CMD_SET_READY = 0x02, TXT_CMD_SET_ABORT = 0x04 }; static const struct can_bittiming_const ctu_can_fd_bit_timing_max = { .name = "ctu_can_fd", .tseg1_min = 2, .tseg1_max = 190, .tseg2_min = 1, .tseg2_max = 63, .sjw_max = 31, .brp_min = 1, .brp_max = 8, .brp_inc = 1, }; static const struct can_bittiming_const ctu_can_fd_bit_timing_data_max = { .name = "ctu_can_fd", .tseg1_min = 2, .tseg1_max = 94, .tseg2_min = 1, .tseg2_max = 31, .sjw_max = 31, .brp_min = 1, .brp_max = 2, .brp_inc = 1, }; static const char * const ctucan_state_strings[CAN_STATE_MAX] = { CTUCAN_STATE_TO_TEXT_ENTRY(CAN_STATE_ERROR_ACTIVE), CTUCAN_STATE_TO_TEXT_ENTRY(CAN_STATE_ERROR_WARNING), CTUCAN_STATE_TO_TEXT_ENTRY(CAN_STATE_ERROR_PASSIVE), CTUCAN_STATE_TO_TEXT_ENTRY(CAN_STATE_BUS_OFF), CTUCAN_STATE_TO_TEXT_ENTRY(CAN_STATE_STOPPED), CTUCAN_STATE_TO_TEXT_ENTRY(CAN_STATE_SLEEPING) }; static void ctucan_write32_le(struct ctucan_priv *priv, enum ctu_can_fd_can_registers reg, u32 val) { iowrite32(val, priv->mem_base + reg); } static void ctucan_write32_be(struct ctucan_priv *priv, enum ctu_can_fd_can_registers reg, u32 val) { iowrite32be(val, priv->mem_base + reg); } static u32 ctucan_read32_le(struct ctucan_priv *priv, enum ctu_can_fd_can_registers reg) { return ioread32(priv->mem_base + reg); } static u32 ctucan_read32_be(struct ctucan_priv *priv, enum ctu_can_fd_can_registers reg) { return ioread32be(priv->mem_base + reg); } static void ctucan_write32(struct ctucan_priv *priv, enum ctu_can_fd_can_registers reg, u32 val) { priv->write_reg(priv, reg, val); } static u32 ctucan_read32(struct ctucan_priv *priv, enum ctu_can_fd_can_registers reg) { return priv->read_reg(priv, reg); } static void ctucan_write_txt_buf(struct ctucan_priv *priv, enum ctu_can_fd_can_registers buf_base, u32 offset, u32 val) { priv->write_reg(priv, buf_base + offset, val); } #define CTU_CAN_FD_TXTNF(priv) (!!FIELD_GET(REG_STATUS_TXNF, ctucan_read32(priv, CTUCANFD_STATUS))) #define CTU_CAN_FD_ENABLED(priv) (!!FIELD_GET(REG_MODE_ENA, ctucan_read32(priv, CTUCANFD_MODE))) /** * ctucan_state_to_str() - Converts CAN controller state code to corresponding text * @state: CAN controller state code * * Return: Pointer to string representation of the error state */ static const char *ctucan_state_to_str(enum can_state state) { const char *txt = NULL; if (state >= 0 && state < CAN_STATE_MAX) txt = ctucan_state_strings[state]; return txt ? txt : "UNKNOWN"; } /** * ctucan_reset() - Issues software reset request to CTU CAN FD * @ndev: Pointer to net_device structure * * Return: 0 for success, -%ETIMEDOUT if CAN controller does not leave reset */ static int ctucan_reset(struct net_device *ndev) { struct ctucan_priv *priv = netdev_priv(ndev); int i = 100; ctucan_write32(priv, CTUCANFD_MODE, REG_MODE_RST); clear_bit(CTUCANFD_FLAG_RX_FFW_BUFFERED, &priv->drv_flags); do { u16 device_id = FIELD_GET(REG_DEVICE_ID_DEVICE_ID, ctucan_read32(priv, CTUCANFD_DEVICE_ID)); if (device_id == 0xCAFD) return 0; if (!i--) { netdev_warn(ndev, "device did not leave reset\n"); return -ETIMEDOUT; } usleep_range(100, 200); } while (1); } /** * ctucan_set_btr() - Sets CAN bus bit timing in CTU CAN FD * @ndev: Pointer to net_device structure * @bt: Pointer to Bit timing structure * @nominal: True - Nominal bit timing, False - Data bit timing * * Return: 0 - OK, -%EPERM if controller is enabled */ static int ctucan_set_btr(struct net_device *ndev, struct can_bittiming *bt, bool nominal) { struct ctucan_priv *priv = netdev_priv(ndev); int max_ph1_len = 31; u32 btr = 0; u32 prop_seg = bt->prop_seg; u32 phase_seg1 = bt->phase_seg1; if (CTU_CAN_FD_ENABLED(priv)) { netdev_err(ndev, "BUG! Cannot set bittiming - CAN is enabled\n"); return -EPERM; } if (nominal) max_ph1_len = 63; /* The timing calculation functions have only constraints on tseg1, which is prop_seg + * phase1_seg combined. tseg1 is then split in half and stored into prog_seg and phase_seg1. * In CTU CAN FD, PROP is 6/7 bits wide but PH1 only 6/5, so we must re-distribute the * values here. */ if (phase_seg1 > max_ph1_len) { prop_seg += phase_seg1 - max_ph1_len; phase_seg1 = max_ph1_len; bt->prop_seg = prop_seg; bt->phase_seg1 = phase_seg1; } if (nominal) { btr = FIELD_PREP(REG_BTR_PROP, prop_seg); btr |= FIELD_PREP(REG_BTR_PH1, phase_seg1); btr |= FIELD_PREP(REG_BTR_PH2, bt->phase_seg2); btr |= FIELD_PREP(REG_BTR_BRP, bt->brp); btr |= FIELD_PREP(REG_BTR_SJW, bt->sjw); ctucan_write32(priv, CTUCANFD_BTR, btr); } else { btr = FIELD_PREP(REG_BTR_FD_PROP_FD, prop_seg); btr |= FIELD_PREP(REG_BTR_FD_PH1_FD, phase_seg1); btr |= FIELD_PREP(REG_BTR_FD_PH2_FD, bt->phase_seg2); btr |= FIELD_PREP(REG_BTR_FD_BRP_FD, bt->brp); btr |= FIELD_PREP(REG_BTR_FD_SJW_FD, bt->sjw); ctucan_write32(priv, CTUCANFD_BTR_FD, btr); } return 0; } /** * ctucan_set_bittiming() - CAN set nominal bit timing routine * @ndev: Pointer to net_device structure * * Return: 0 on success, -%EPERM on error */ static int ctucan_set_bittiming(struct net_device *ndev) { struct ctucan_priv *priv = netdev_priv(ndev); struct can_bittiming *bt = &priv->can.bittiming; /* Note that bt may be modified here */ return ctucan_set_btr(ndev, bt, true); } /** * ctucan_set_data_bittiming() - CAN set data bit timing routine * @ndev: Pointer to net_device structure * * Return: 0 on success, -%EPERM on error */ static int ctucan_set_data_bittiming(struct net_device *ndev) { struct ctucan_priv *priv = netdev_priv(ndev); struct can_bittiming *dbt = &priv->can.data_bittiming; /* Note that dbt may be modified here */ return ctucan_set_btr(ndev, dbt, false); } /** * ctucan_set_secondary_sample_point() - Sets secondary sample point in CTU CAN FD * @ndev: Pointer to net_device structure * * Return: 0 on success, -%EPERM if controller is enabled */ static int ctucan_set_secondary_sample_point(struct net_device *ndev) { struct ctucan_priv *priv = netdev_priv(ndev); struct can_bittiming *dbt = &priv->can.data_bittiming; int ssp_offset = 0; u32 ssp_cfg = 0; /* No SSP by default */ if (CTU_CAN_FD_ENABLED(priv)) { netdev_err(ndev, "BUG! Cannot set SSP - CAN is enabled\n"); return -EPERM; } /* Use SSP for bit-rates above 1 Mbits/s */ if (dbt->bitrate > 1000000) { /* Calculate SSP in minimal time quanta */ ssp_offset = (priv->can.clock.freq / 1000) * dbt->sample_point / dbt->bitrate; if (ssp_offset > 127) { netdev_warn(ndev, "SSP offset saturated to 127\n"); ssp_offset = 127; } ssp_cfg = FIELD_PREP(REG_TRV_DELAY_SSP_OFFSET, ssp_offset); ssp_cfg |= FIELD_PREP(REG_TRV_DELAY_SSP_SRC, 0x1); } ctucan_write32(priv, CTUCANFD_TRV_DELAY, ssp_cfg); return 0; } /** * ctucan_set_mode() - Sets CTU CAN FDs mode * @priv: Pointer to private data * @mode: Pointer to controller modes to be set */ static void ctucan_set_mode(struct ctucan_priv *priv, const struct can_ctrlmode *mode) { u32 mode_reg = ctucan_read32(priv, CTUCANFD_MODE); mode_reg = (mode->flags & CAN_CTRLMODE_LOOPBACK) ? (mode_reg | REG_MODE_ILBP) : (mode_reg & ~REG_MODE_ILBP); mode_reg = (mode->flags & CAN_CTRLMODE_LISTENONLY) ? (mode_reg | REG_MODE_BMM) : (mode_reg & ~REG_MODE_BMM); mode_reg = (mode->flags & CAN_CTRLMODE_FD) ? (mode_reg | REG_MODE_FDE) : (mode_reg & ~REG_MODE_FDE); mode_reg = (mode->flags & CAN_CTRLMODE_PRESUME_ACK) ? (mode_reg | REG_MODE_ACF) : (mode_reg & ~REG_MODE_ACF); mode_reg = (mode->flags & CAN_CTRLMODE_FD_NON_ISO) ? (mode_reg | REG_MODE_NISOFD) : (mode_reg & ~REG_MODE_NISOFD); /* One shot mode supported indirectly via Retransmit limit */ mode_reg &= ~FIELD_PREP(REG_MODE_RTRTH, 0xF); mode_reg = (mode->flags & CAN_CTRLMODE_ONE_SHOT) ? (mode_reg | REG_MODE_RTRLE) : (mode_reg & ~REG_MODE_RTRLE); /* Some bits fixed: * TSTM - Off, User shall not be able to change REC/TEC by hand during operation */ mode_reg &= ~REG_MODE_TSTM; ctucan_write32(priv, CTUCANFD_MODE, mode_reg); } /** * ctucan_chip_start() - This routine starts the driver * @ndev: Pointer to net_device structure * * Routine expects that chip is in reset state. It setups initial * Tx buffers for FIFO priorities, sets bittiming, enables interrupts, * switches core to operational mode and changes controller * state to %CAN_STATE_STOPPED. * * Return: 0 on success and failure value on error */ static int ctucan_chip_start(struct net_device *ndev) { struct ctucan_priv *priv = netdev_priv(ndev); u32 int_ena, int_msk; u32 mode_reg; int err; struct can_ctrlmode mode; priv->txb_prio = 0x01234567; priv->txb_head = 0; priv->txb_tail = 0; ctucan_write32(priv, CTUCANFD_TX_PRIORITY, priv->txb_prio); /* Configure bit-rates and ssp */ err = ctucan_set_bittiming(ndev); if (err < 0) return err; err = ctucan_set_data_bittiming(ndev); if (err < 0) return err; err = ctucan_set_secondary_sample_point(ndev); if (err < 0) return err; /* Configure modes */ mode.flags = priv->can.ctrlmode; mode.mask = 0xFFFFFFFF; ctucan_set_mode(priv, &mode); /* Configure interrupts */ int_ena = REG_INT_STAT_RBNEI | REG_INT_STAT_TXBHCI | REG_INT_STAT_EWLI | REG_INT_STAT_FCSI; /* Bus error reporting -> Allow Error/Arb.lost interrupts */ if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) { int_ena |= REG_INT_STAT_ALI | REG_INT_STAT_BEI; } int_msk = ~int_ena; /* Mask all disabled interrupts */ /* It's after reset, so there is no need to clear anything */ ctucan_write32(priv, CTUCANFD_INT_MASK_SET, int_msk); ctucan_write32(priv, CTUCANFD_INT_ENA_SET, int_ena); /* Controller enters ERROR_ACTIVE on initial FCSI */ priv->can.state = CAN_STATE_STOPPED; /* Enable the controller */ mode_reg = ctucan_read32(priv, CTUCANFD_MODE); mode_reg |= REG_MODE_ENA; ctucan_write32(priv, CTUCANFD_MODE, mode_reg); return 0; } /** * ctucan_do_set_mode() - Sets mode of the driver * @ndev: Pointer to net_device structure * @mode: Tells the mode of the driver * * This check the drivers state and calls the corresponding modes to set. * * Return: 0 on success and failure value on error */ static int ctucan_do_set_mode(struct net_device *ndev, enum can_mode mode) { int ret; switch (mode) { case CAN_MODE_START: ret = ctucan_reset(ndev); if (ret < 0) return ret; ret = ctucan_chip_start(ndev); if (ret < 0) { netdev_err(ndev, "ctucan_chip_start failed!\n"); return ret; } netif_wake_queue(ndev); break; default: ret = -EOPNOTSUPP; break; } return ret; } /** * ctucan_get_tx_status() - Gets status of TXT buffer * @priv: Pointer to private data * @buf: Buffer index (0-based) * * Return: Status of TXT buffer */ static enum ctucan_txtb_status ctucan_get_tx_status(struct ctucan_priv *priv, u8 buf) { u32 tx_status = ctucan_read32(priv, CTUCANFD_TX_STATUS); enum ctucan_txtb_status status = (tx_status >> (buf * 4)) & 0x7; return status; } /** * ctucan_is_txt_buf_writable() - Checks if frame can be inserted to TXT Buffer * @priv: Pointer to private data * @buf: Buffer index (0-based) * * Return: True - Frame can be inserted to TXT Buffer, False - If attempted, frame will not be * inserted to TXT Buffer */ static bool ctucan_is_txt_buf_writable(struct ctucan_priv *priv, u8 buf) { enum ctucan_txtb_status buf_status; buf_status = ctucan_get_tx_status(priv, buf); if (buf_status == TXT_RDY || buf_status == TXT_TRAN || buf_status == TXT_ABTP) return false; return true; } /** * ctucan_insert_frame() - Inserts frame to TXT buffer * @priv: Pointer to private data * @cf: Pointer to CAN frame to be inserted * @buf: TXT Buffer index to which frame is inserted (0-based) * @isfdf: True - CAN FD Frame, False - CAN 2.0 Frame * * Return: True - Frame inserted successfully * False - Frame was not inserted due to one of: * 1. TXT Buffer is not writable (it is in wrong state) * 2. Invalid TXT buffer index * 3. Invalid frame length */ static bool ctucan_insert_frame(struct ctucan_priv *priv, const struct canfd_frame *cf, u8 buf, bool isfdf) { u32 buf_base; u32 ffw = 0; u32 idw = 0; unsigned int i; if (buf >= priv->ntxbufs) return false; if (!ctucan_is_txt_buf_writable(priv, buf)) return false; if (cf->len > CANFD_MAX_DLEN) return false; /* Prepare Frame format */ if (cf->can_id & CAN_RTR_FLAG) ffw |= REG_FRAME_FORMAT_W_RTR; if (cf->can_id & CAN_EFF_FLAG) ffw |= REG_FRAME_FORMAT_W_IDE; if (isfdf) { ffw |= REG_FRAME_FORMAT_W_FDF; if (cf->flags & CANFD_BRS) ffw |= REG_FRAME_FORMAT_W_BRS; } ffw |= FIELD_PREP(REG_FRAME_FORMAT_W_DLC, can_fd_len2dlc(cf->len)); /* Prepare identifier */ if (cf->can_id & CAN_EFF_FLAG) idw = cf->can_id & CAN_EFF_MASK; else idw = FIELD_PREP(REG_IDENTIFIER_W_IDENTIFIER_BASE, cf->can_id & CAN_SFF_MASK); /* Write ID, Frame format, Don't write timestamp -> Time triggered transmission disabled */ buf_base = (buf + 1) * 0x100; ctucan_write_txt_buf(priv, buf_base, CTUCANFD_FRAME_FORMAT_W, ffw); ctucan_write_txt_buf(priv, buf_base, CTUCANFD_IDENTIFIER_W, idw); /* Write Data payload */ if (!(cf->can_id & CAN_RTR_FLAG)) { for (i = 0; i < cf->len; i += 4) { u32 data = le32_to_cpu(*(__le32 *)(cf->data + i)); ctucan_write_txt_buf(priv, buf_base, CTUCANFD_DATA_1_4_W + i, data); } } return true; } /** * ctucan_give_txtb_cmd() - Applies command on TXT buffer * @priv: Pointer to private data * @cmd: Command to give * @buf: Buffer index (0-based) */ static void ctucan_give_txtb_cmd(struct ctucan_priv *priv, enum ctucan_txtb_command cmd, u8 buf) { u32 tx_cmd = cmd; tx_cmd |= 1 << (buf + 8); ctucan_write32(priv, CTUCANFD_TX_COMMAND, tx_cmd); } /** * ctucan_start_xmit() - Starts the transmission * @skb: sk_buff pointer that contains data to be Txed * @ndev: Pointer to net_device structure * * Invoked from upper layers to initiate transmission. Uses the next available free TXT Buffer and * populates its fields to start the transmission. * * Return: %NETDEV_TX_OK on success, %NETDEV_TX_BUSY when no free TXT buffer is available, * negative return values reserved for error cases */ static netdev_tx_t ctucan_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct ctucan_priv *priv = netdev_priv(ndev); struct canfd_frame *cf = (struct canfd_frame *)skb->data; u32 txtb_id; bool ok; unsigned long flags; if (can_dev_dropped_skb(ndev, skb)) return NETDEV_TX_OK; if (unlikely(!CTU_CAN_FD_TXTNF(priv))) { netif_stop_queue(ndev); netdev_err(ndev, "BUG!, no TXB free when queue awake!\n"); return NETDEV_TX_BUSY; } txtb_id = priv->txb_head % priv->ntxbufs; ctucan_netdev_dbg(ndev, "%s: using TXB#%u\n", __func__, txtb_id); ok = ctucan_insert_frame(priv, cf, txtb_id, can_is_canfd_skb(skb)); if (!ok) { netdev_err(ndev, "BUG! TXNF set but cannot insert frame into TXTB! HW Bug?"); kfree_skb(skb); ndev->stats.tx_dropped++; return NETDEV_TX_OK; } can_put_echo_skb(skb, ndev, txtb_id, 0); spin_lock_irqsave(&priv->tx_lock, flags); ctucan_give_txtb_cmd(priv, TXT_CMD_SET_READY, txtb_id); priv->txb_head++; /* Check if all TX buffers are full */ if (!CTU_CAN_FD_TXTNF(priv)) netif_stop_queue(ndev); spin_unlock_irqrestore(&priv->tx_lock, flags); return NETDEV_TX_OK; } /** * ctucan_read_rx_frame() - Reads frame from RX FIFO * @priv: Pointer to CTU CAN FD's private data * @cf: Pointer to CAN frame struct * @ffw: Previously read frame format word * * Note: Frame format word must be read separately and provided in 'ffw'. */ static void ctucan_read_rx_frame(struct ctucan_priv *priv, struct canfd_frame *cf, u32 ffw) { u32 idw; unsigned int i; unsigned int wc; unsigned int len; idw = ctucan_read32(priv, CTUCANFD_RX_DATA); if (FIELD_GET(REG_FRAME_FORMAT_W_IDE, ffw)) cf->can_id = (idw & CAN_EFF_MASK) | CAN_EFF_FLAG; else cf->can_id = (idw >> 18) & CAN_SFF_MASK; /* BRS, ESI, RTR Flags */ if (FIELD_GET(REG_FRAME_FORMAT_W_FDF, ffw)) { if (FIELD_GET(REG_FRAME_FORMAT_W_BRS, ffw)) cf->flags |= CANFD_BRS; if (FIELD_GET(REG_FRAME_FORMAT_W_ESI_RSV, ffw)) cf->flags |= CANFD_ESI; } else if (FIELD_GET(REG_FRAME_FORMAT_W_RTR, ffw)) { cf->can_id |= CAN_RTR_FLAG; } wc = FIELD_GET(REG_FRAME_FORMAT_W_RWCNT, ffw) - 3; /* DLC */ if (FIELD_GET(REG_FRAME_FORMAT_W_DLC, ffw) <= 8) { len = FIELD_GET(REG_FRAME_FORMAT_W_DLC, ffw); } else { if (FIELD_GET(REG_FRAME_FORMAT_W_FDF, ffw)) len = wc << 2; else len = 8; } cf->len = len; if (unlikely(len > wc * 4)) len = wc * 4; /* Timestamp - Read and throw away */ ctucan_read32(priv, CTUCANFD_RX_DATA); ctucan_read32(priv, CTUCANFD_RX_DATA); /* Data */ for (i = 0; i < len; i += 4) { u32 data = ctucan_read32(priv, CTUCANFD_RX_DATA); *(__le32 *)(cf->data + i) = cpu_to_le32(data); } while (unlikely(i < wc * 4)) { ctucan_read32(priv, CTUCANFD_RX_DATA); i += 4; } } /** * ctucan_rx() - Called from CAN ISR to complete the received frame processing * @ndev: Pointer to net_device structure * * This function is invoked from the CAN isr(poll) to process the Rx frames. It does minimal * processing and invokes "netif_receive_skb" to complete further processing. * Return: 1 when frame is passed to the network layer, 0 when the first frame word is read but * system is out of free SKBs temporally and left code to resolve SKB allocation later, * -%EAGAIN in a case of empty Rx FIFO. */ static int ctucan_rx(struct net_device *ndev) { struct ctucan_priv *priv = netdev_priv(ndev); struct net_device_stats *stats = &ndev->stats; struct canfd_frame *cf; struct sk_buff *skb; u32 ffw; if (test_bit(CTUCANFD_FLAG_RX_FFW_BUFFERED, &priv->drv_flags)) { ffw = priv->rxfrm_first_word; clear_bit(CTUCANFD_FLAG_RX_FFW_BUFFERED, &priv->drv_flags); } else { ffw = ctucan_read32(priv, CTUCANFD_RX_DATA); } if (!FIELD_GET(REG_FRAME_FORMAT_W_RWCNT, ffw)) return -EAGAIN; if (FIELD_GET(REG_FRAME_FORMAT_W_FDF, ffw)) skb = alloc_canfd_skb(ndev, &cf); else skb = alloc_can_skb(ndev, (struct can_frame **)&cf); if (unlikely(!skb)) { priv->rxfrm_first_word = ffw; set_bit(CTUCANFD_FLAG_RX_FFW_BUFFERED, &priv->drv_flags); return 0; } ctucan_read_rx_frame(priv, cf, ffw); stats->rx_bytes += cf->len; stats->rx_packets++; netif_receive_skb(skb); return 1; } /** * ctucan_read_fault_state() - Reads CTU CAN FDs fault confinement state. * @priv: Pointer to private data * * Returns: Fault confinement state of controller */ static enum can_state ctucan_read_fault_state(struct ctucan_priv *priv) { u32 fs; u32 rec_tec; u32 ewl; fs = ctucan_read32(priv, CTUCANFD_EWL); rec_tec = ctucan_read32(priv, CTUCANFD_REC); ewl = FIELD_GET(REG_EWL_EW_LIMIT, fs); if (FIELD_GET(REG_EWL_ERA, fs)) { if (ewl > FIELD_GET(REG_REC_REC_VAL, rec_tec) && ewl > FIELD_GET(REG_REC_TEC_VAL, rec_tec)) return CAN_STATE_ERROR_ACTIVE; else return CAN_STATE_ERROR_WARNING; } else if (FIELD_GET(REG_EWL_ERP, fs)) { return CAN_STATE_ERROR_PASSIVE; } else if (FIELD_GET(REG_EWL_BOF, fs)) { return CAN_STATE_BUS_OFF; } WARN(true, "Invalid error state"); return CAN_STATE_ERROR_PASSIVE; } /** * ctucan_get_rec_tec() - Reads REC/TEC counter values from controller * @priv: Pointer to private data * @bec: Pointer to Error counter structure */ static void ctucan_get_rec_tec(struct ctucan_priv *priv, struct can_berr_counter *bec) { u32 err_ctrs = ctucan_read32(priv, CTUCANFD_REC); bec->rxerr = FIELD_GET(REG_REC_REC_VAL, err_ctrs); bec->txerr = FIELD_GET(REG_REC_TEC_VAL, err_ctrs); } /** * ctucan_err_interrupt() - Error frame ISR * @ndev: net_device pointer * @isr: interrupt status register value * * This is the CAN error interrupt and it will check the type of error and forward the error * frame to upper layers. */ static void ctucan_err_interrupt(struct net_device *ndev, u32 isr) { struct ctucan_priv *priv = netdev_priv(ndev); struct net_device_stats *stats = &ndev->stats; struct can_frame *cf; struct sk_buff *skb; enum can_state state; struct can_berr_counter bec; u32 err_capt_alc; int dologerr = net_ratelimit(); ctucan_get_rec_tec(priv, &bec); state = ctucan_read_fault_state(priv); err_capt_alc = ctucan_read32(priv, CTUCANFD_ERR_CAPT); if (dologerr) netdev_info(ndev, "%s: ISR = 0x%08x, rxerr %d, txerr %d, error type %lu, pos %lu, ALC id_field %lu, bit %lu\n", __func__, isr, bec.rxerr, bec.txerr, FIELD_GET(REG_ERR_CAPT_ERR_TYPE, err_capt_alc), FIELD_GET(REG_ERR_CAPT_ERR_POS, err_capt_alc), FIELD_GET(REG_ERR_CAPT_ALC_ID_FIELD, err_capt_alc), FIELD_GET(REG_ERR_CAPT_ALC_BIT, err_capt_alc)); skb = alloc_can_err_skb(ndev, &cf); /* EWLI: error warning limit condition met * FCSI: fault confinement state changed * ALI: arbitration lost (just informative) * BEI: bus error interrupt */ if (FIELD_GET(REG_INT_STAT_FCSI, isr) || FIELD_GET(REG_INT_STAT_EWLI, isr)) { netdev_info(ndev, "state changes from %s to %s\n", ctucan_state_to_str(priv->can.state), ctucan_state_to_str(state)); if (priv->can.state == state) netdev_warn(ndev, "current and previous state is the same! (missed interrupt?)\n"); priv->can.state = state; switch (state) { case CAN_STATE_BUS_OFF: priv->can.can_stats.bus_off++; can_bus_off(ndev); if (skb) cf->can_id |= CAN_ERR_BUSOFF; break; case CAN_STATE_ERROR_PASSIVE: priv->can.can_stats.error_passive++; if (skb) { cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT; cf->data[1] = (bec.rxerr > 127) ? CAN_ERR_CRTL_RX_PASSIVE : CAN_ERR_CRTL_TX_PASSIVE; cf->data[6] = bec.txerr; cf->data[7] = bec.rxerr; } break; case CAN_STATE_ERROR_WARNING: priv->can.can_stats.error_warning++; if (skb) { cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT; cf->data[1] |= (bec.txerr > bec.rxerr) ? CAN_ERR_CRTL_TX_WARNING : CAN_ERR_CRTL_RX_WARNING; cf->data[6] = bec.txerr; cf->data[7] = bec.rxerr; } break; case CAN_STATE_ERROR_ACTIVE: cf->can_id |= CAN_ERR_CNT; cf->data[1] = CAN_ERR_CRTL_ACTIVE; cf->data[6] = bec.txerr; cf->data[7] = bec.rxerr; break; default: netdev_warn(ndev, "unhandled error state (%d:%s)!\n", state, ctucan_state_to_str(state)); break; } } /* Check for Arbitration Lost interrupt */ if (FIELD_GET(REG_INT_STAT_ALI, isr)) { if (dologerr) netdev_info(ndev, "arbitration lost\n"); priv->can.can_stats.arbitration_lost++; if (skb) { cf->can_id |= CAN_ERR_LOSTARB; cf->data[0] = CAN_ERR_LOSTARB_UNSPEC; } } /* Check for Bus Error interrupt */ if (FIELD_GET(REG_INT_STAT_BEI, isr)) { netdev_info(ndev, "bus error\n"); priv->can.can_stats.bus_error++; stats->rx_errors++; if (skb) { cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; cf->data[2] = CAN_ERR_PROT_UNSPEC; cf->data[3] = CAN_ERR_PROT_LOC_UNSPEC; } } if (skb) { stats->rx_packets++; stats->rx_bytes += cf->can_dlc; netif_rx(skb); } } /** * ctucan_rx_poll() - Poll routine for rx packets (NAPI) * @napi: NAPI structure pointer * @quota: Max number of rx packets to be processed. * * This is the poll routine for rx part. It will process the packets maximux quota value. * * Return: Number of packets received */ static int ctucan_rx_poll(struct napi_struct *napi, int quota) { struct net_device *ndev = napi->dev; struct ctucan_priv *priv = netdev_priv(ndev); int work_done = 0; u32 status; u32 framecnt; int res = 1; framecnt = FIELD_GET(REG_RX_STATUS_RXFRC, ctucan_read32(priv, CTUCANFD_RX_STATUS)); while (framecnt && work_done < quota && res > 0) { res = ctucan_rx(ndev); work_done++; framecnt = FIELD_GET(REG_RX_STATUS_RXFRC, ctucan_read32(priv, CTUCANFD_RX_STATUS)); } /* Check for RX FIFO Overflow */ status = ctucan_read32(priv, CTUCANFD_STATUS); if (FIELD_GET(REG_STATUS_DOR, status)) { struct net_device_stats *stats = &ndev->stats; struct can_frame *cf; struct sk_buff *skb; netdev_info(ndev, "rx_poll: rx fifo overflow\n"); stats->rx_over_errors++; stats->rx_errors++; skb = alloc_can_err_skb(ndev, &cf); if (skb) { cf->can_id |= CAN_ERR_CRTL; cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW; stats->rx_packets++; stats->rx_bytes += cf->can_dlc; netif_rx(skb); } /* Clear Data Overrun */ ctucan_write32(priv, CTUCANFD_COMMAND, REG_COMMAND_CDO); } if (!framecnt && res != 0) { if (napi_complete_done(napi, work_done)) { /* Clear and enable RBNEI. It is level-triggered, so * there is no race condition. */ ctucan_write32(priv, CTUCANFD_INT_STAT, REG_INT_STAT_RBNEI); ctucan_write32(priv, CTUCANFD_INT_MASK_CLR, REG_INT_STAT_RBNEI); } } return work_done; } /** * ctucan_rotate_txb_prio() - Rotates priorities of TXT Buffers * @ndev: net_device pointer */ static void ctucan_rotate_txb_prio(struct net_device *ndev) { struct ctucan_priv *priv = netdev_priv(ndev); u32 prio = priv->txb_prio; prio = (prio << 4) | ((prio >> ((priv->ntxbufs - 1) * 4)) & 0xF); ctucan_netdev_dbg(ndev, "%s: from 0x%08x to 0x%08x\n", __func__, priv->txb_prio, prio); priv->txb_prio = prio; ctucan_write32(priv, CTUCANFD_TX_PRIORITY, prio); } /** * ctucan_tx_interrupt() - Tx done Isr * @ndev: net_device pointer */ static void ctucan_tx_interrupt(struct net_device *ndev) { struct ctucan_priv *priv = netdev_priv(ndev); struct net_device_stats *stats = &ndev->stats; bool first = true; bool some_buffers_processed; unsigned long flags; enum ctucan_txtb_status txtb_status; u32 txtb_id; /* read tx_status * if txb[n].finished (bit 2) * if ok -> echo * if error / aborted -> ?? (find how to handle oneshot mode) * txb_tail++ */ do { spin_lock_irqsave(&priv->tx_lock, flags); some_buffers_processed = false; while ((int)(priv->txb_head - priv->txb_tail) > 0) { txtb_id = priv->txb_tail % priv->ntxbufs; txtb_status = ctucan_get_tx_status(priv, txtb_id); ctucan_netdev_dbg(ndev, "TXI: TXB#%u: status 0x%x\n", txtb_id, txtb_status); switch (txtb_status) { case TXT_TOK: ctucan_netdev_dbg(ndev, "TXT_OK\n"); stats->tx_bytes += can_get_echo_skb(ndev, txtb_id, NULL); stats->tx_packets++; break; case TXT_ERR: /* This indicated that retransmit limit has been reached. Obviously * we should not echo the frame, but also not indicate any kind of * error. If desired, it was already reported (possible multiple * times) on each arbitration lost. */ netdev_warn(ndev, "TXB in Error state\n"); can_free_echo_skb(ndev, txtb_id, NULL); stats->tx_dropped++; break; case TXT_ABT: /* Same as for TXT_ERR, only with different cause. We *could* * re-queue the frame, but multiqueue/abort is not supported yet * anyway. */ netdev_warn(ndev, "TXB in Aborted state\n"); can_free_echo_skb(ndev, txtb_id, NULL); stats->tx_dropped++; break; default: /* Bug only if the first buffer is not finished, otherwise it is * pretty much expected. */ if (first) { netdev_err(ndev, "BUG: TXB#%u not in a finished state (0x%x)!\n", txtb_id, txtb_status); spin_unlock_irqrestore(&priv->tx_lock, flags); /* do not clear nor wake */ return; } goto clear; } priv->txb_tail++; first = false; some_buffers_processed = true; /* Adjust priorities *before* marking the buffer as empty. */ ctucan_rotate_txb_prio(ndev); ctucan_give_txtb_cmd(priv, TXT_CMD_SET_EMPTY, txtb_id); } clear: spin_unlock_irqrestore(&priv->tx_lock, flags); /* If no buffers were processed this time, we cannot clear - that would introduce * a race condition. */ if (some_buffers_processed) { /* Clear the interrupt again. We do not want to receive again interrupt for * the buffer already handled. If it is the last finished one then it would * cause log of spurious interrupt. */ ctucan_write32(priv, CTUCANFD_INT_STAT, REG_INT_STAT_TXBHCI); } } while (some_buffers_processed); spin_lock_irqsave(&priv->tx_lock, flags); /* Check if at least one TX buffer is free */ if (CTU_CAN_FD_TXTNF(priv)) netif_wake_queue(ndev); spin_unlock_irqrestore(&priv->tx_lock, flags); } /** * ctucan_interrupt() - CAN Isr * @irq: irq number * @dev_id: device id pointer * * This is the CTU CAN FD ISR. It checks for the type of interrupt * and invokes the corresponding ISR. * * Return: * IRQ_NONE - If CAN device is in sleep mode, IRQ_HANDLED otherwise */ static irqreturn_t ctucan_interrupt(int irq, void *dev_id) { struct net_device *ndev = (struct net_device *)dev_id; struct ctucan_priv *priv = netdev_priv(ndev); u32 isr, icr; u32 imask; int irq_loops; for (irq_loops = 0; irq_loops < 10000; irq_loops++) { /* Get the interrupt status */ isr = ctucan_read32(priv, CTUCANFD_INT_STAT); if (!isr) return irq_loops ? IRQ_HANDLED : IRQ_NONE; /* Receive Buffer Not Empty Interrupt */ if (FIELD_GET(REG_INT_STAT_RBNEI, isr)) { ctucan_netdev_dbg(ndev, "RXBNEI\n"); /* Mask RXBNEI the first, then clear interrupt and schedule NAPI. Even if * another IRQ fires, RBNEI will always be 0 (masked). */ icr = REG_INT_STAT_RBNEI; ctucan_write32(priv, CTUCANFD_INT_MASK_SET, icr); ctucan_write32(priv, CTUCANFD_INT_STAT, icr); napi_schedule(&priv->napi); } /* TXT Buffer HW Command Interrupt */ if (FIELD_GET(REG_INT_STAT_TXBHCI, isr)) { ctucan_netdev_dbg(ndev, "TXBHCI\n"); /* Cleared inside */ ctucan_tx_interrupt(ndev); } /* Error interrupts */ if (FIELD_GET(REG_INT_STAT_EWLI, isr) || FIELD_GET(REG_INT_STAT_FCSI, isr) || FIELD_GET(REG_INT_STAT_ALI, isr)) { icr = isr & (REG_INT_STAT_EWLI | REG_INT_STAT_FCSI | REG_INT_STAT_ALI); ctucan_netdev_dbg(ndev, "some ERR interrupt: clearing 0x%08x\n", icr); ctucan_write32(priv, CTUCANFD_INT_STAT, icr); ctucan_err_interrupt(ndev, isr); } /* Ignore RI, TI, LFI, RFI, BSI */ } netdev_err(ndev, "%s: stuck interrupt (isr=0x%08x), stopping\n", __func__, isr); if (FIELD_GET(REG_INT_STAT_TXBHCI, isr)) { int i; netdev_err(ndev, "txb_head=0x%08x txb_tail=0x%08x\n", priv->txb_head, priv->txb_tail); for (i = 0; i < priv->ntxbufs; i++) { u32 status = ctucan_get_tx_status(priv, i); netdev_err(ndev, "txb[%d] txb status=0x%08x\n", i, status); } } imask = 0xffffffff; ctucan_write32(priv, CTUCANFD_INT_ENA_CLR, imask); ctucan_write32(priv, CTUCANFD_INT_MASK_SET, imask); return IRQ_HANDLED; } /** * ctucan_chip_stop() - Driver stop routine * @ndev: Pointer to net_device structure * * This is the drivers stop routine. It will disable the * interrupts and disable the controller. */ static void ctucan_chip_stop(struct net_device *ndev) { struct ctucan_priv *priv = netdev_priv(ndev); u32 mask = 0xffffffff; u32 mode; /* Disable interrupts and disable CAN */ ctucan_write32(priv, CTUCANFD_INT_ENA_CLR, mask); ctucan_write32(priv, CTUCANFD_INT_MASK_SET, mask); mode = ctucan_read32(priv, CTUCANFD_MODE); mode &= ~REG_MODE_ENA; ctucan_write32(priv, CTUCANFD_MODE, mode); priv->can.state = CAN_STATE_STOPPED; } /** * ctucan_open() - Driver open routine * @ndev: Pointer to net_device structure * * This is the driver open routine. * Return: 0 on success and failure value on error */ static int ctucan_open(struct net_device *ndev) { struct ctucan_priv *priv = netdev_priv(ndev); int ret; ret = pm_runtime_get_sync(priv->dev); if (ret < 0) { netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n", __func__, ret); pm_runtime_put_noidle(priv->dev); return ret; } ret = ctucan_reset(ndev); if (ret < 0) goto err_reset; /* Common open */ ret = open_candev(ndev); if (ret) { netdev_warn(ndev, "open_candev failed!\n"); goto err_open; } ret = request_irq(ndev->irq, ctucan_interrupt, priv->irq_flags, ndev->name, ndev); if (ret < 0) { netdev_err(ndev, "irq allocation for CAN failed\n"); goto err_irq; } ret = ctucan_chip_start(ndev); if (ret < 0) { netdev_err(ndev, "ctucan_chip_start failed!\n"); goto err_chip_start; } netdev_info(ndev, "ctu_can_fd device registered\n"); napi_enable(&priv->napi); netif_start_queue(ndev); return 0; err_chip_start: free_irq(ndev->irq, ndev); err_irq: close_candev(ndev); err_open: err_reset: pm_runtime_put(priv->dev); return ret; } /** * ctucan_close() - Driver close routine * @ndev: Pointer to net_device structure * * Return: 0 always */ static int ctucan_close(struct net_device *ndev) { struct ctucan_priv *priv = netdev_priv(ndev); netif_stop_queue(ndev); napi_disable(&priv->napi); ctucan_chip_stop(ndev); free_irq(ndev->irq, ndev); close_candev(ndev); pm_runtime_put(priv->dev); return 0; } /** * ctucan_get_berr_counter() - error counter routine * @ndev: Pointer to net_device structure * @bec: Pointer to can_berr_counter structure * * This is the driver error counter routine. * Return: 0 on success and failure value on error */ static int ctucan_get_berr_counter(const struct net_device *ndev, struct can_berr_counter *bec) { struct ctucan_priv *priv = netdev_priv(ndev); int ret; ret = pm_runtime_get_sync(priv->dev); if (ret < 0) { netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n", __func__, ret); pm_runtime_put_noidle(priv->dev); return ret; } ctucan_get_rec_tec(priv, bec); pm_runtime_put(priv->dev); return 0; } static const struct net_device_ops ctucan_netdev_ops = { .ndo_open = ctucan_open, .ndo_stop = ctucan_close, .ndo_start_xmit = ctucan_start_xmit, .ndo_change_mtu = can_change_mtu, }; static const struct ethtool_ops ctucan_ethtool_ops = { .get_ts_info = ethtool_op_get_ts_info, }; int ctucan_suspend(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); struct ctucan_priv *priv = netdev_priv(ndev); if (netif_running(ndev)) { netif_stop_queue(ndev); netif_device_detach(ndev); } priv->can.state = CAN_STATE_SLEEPING; return 0; } EXPORT_SYMBOL(ctucan_suspend); int ctucan_resume(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); struct ctucan_priv *priv = netdev_priv(ndev); priv->can.state = CAN_STATE_ERROR_ACTIVE; if (netif_running(ndev)) { netif_device_attach(ndev); netif_start_queue(ndev); } return 0; } EXPORT_SYMBOL(ctucan_resume); int ctucan_probe_common(struct device *dev, void __iomem *addr, int irq, unsigned int ntxbufs, unsigned long can_clk_rate, int pm_enable_call, void (*set_drvdata_fnc)(struct device *dev, struct net_device *ndev)) { struct ctucan_priv *priv; struct net_device *ndev; int ret; /* Create a CAN device instance */ ndev = alloc_candev(sizeof(struct ctucan_priv), ntxbufs); if (!ndev) return -ENOMEM; priv = netdev_priv(ndev); spin_lock_init(&priv->tx_lock); INIT_LIST_HEAD(&priv->peers_on_pdev); priv->ntxbufs = ntxbufs; priv->dev = dev; priv->can.bittiming_const = &ctu_can_fd_bit_timing_max; priv->can.data_bittiming_const = &ctu_can_fd_bit_timing_data_max; priv->can.do_set_mode = ctucan_do_set_mode; /* Needed for timing adjustment to be performed as soon as possible */ priv->can.do_set_bittiming = ctucan_set_bittiming; priv->can.do_set_data_bittiming = ctucan_set_data_bittiming; priv->can.do_get_berr_counter = ctucan_get_berr_counter; priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_FD | CAN_CTRLMODE_PRESUME_ACK | CAN_CTRLMODE_BERR_REPORTING | CAN_CTRLMODE_FD_NON_ISO | CAN_CTRLMODE_ONE_SHOT; priv->mem_base = addr; /* Get IRQ for the device */ ndev->irq = irq; ndev->flags |= IFF_ECHO; /* We support local echo */ if (set_drvdata_fnc) set_drvdata_fnc(dev, ndev); SET_NETDEV_DEV(ndev, dev); ndev->netdev_ops = &ctucan_netdev_ops; ndev->ethtool_ops = &ctucan_ethtool_ops; /* Getting the can_clk info */ if (!can_clk_rate) { priv->can_clk = devm_clk_get(dev, NULL); if (IS_ERR(priv->can_clk)) { dev_err(dev, "Device clock not found.\n"); ret = PTR_ERR(priv->can_clk); goto err_free; } can_clk_rate = clk_get_rate(priv->can_clk); } priv->write_reg = ctucan_write32_le; priv->read_reg = ctucan_read32_le; if (pm_enable_call) pm_runtime_enable(dev); ret = pm_runtime_get_sync(dev); if (ret < 0) { netdev_err(ndev, "%s: pm_runtime_get failed(%d)\n", __func__, ret); pm_runtime_put_noidle(priv->dev); goto err_pmdisable; } /* Check for big-endianity and set according IO-accessors */ if ((ctucan_read32(priv, CTUCANFD_DEVICE_ID) & 0xFFFF) != CTUCANFD_ID) { priv->write_reg = ctucan_write32_be; priv->read_reg = ctucan_read32_be; if ((ctucan_read32(priv, CTUCANFD_DEVICE_ID) & 0xFFFF) != CTUCANFD_ID) { netdev_err(ndev, "CTU_CAN_FD signature not found\n"); ret = -ENODEV; goto err_deviceoff; } } ret = ctucan_reset(ndev); if (ret < 0) goto err_deviceoff; priv->can.clock.freq = can_clk_rate; netif_napi_add(ndev, &priv->napi, ctucan_rx_poll); ret = register_candev(ndev); if (ret) { dev_err(dev, "fail to register failed (err=%d)\n", ret); goto err_deviceoff; } pm_runtime_put(dev); netdev_dbg(ndev, "mem_base=0x%p irq=%d clock=%d, no. of txt buffers:%d\n", priv->mem_base, ndev->irq, priv->can.clock.freq, priv->ntxbufs); return 0; err_deviceoff: pm_runtime_put(priv->dev); err_pmdisable: if (pm_enable_call) pm_runtime_disable(dev); err_free: list_del_init(&priv->peers_on_pdev); free_candev(ndev); return ret; } EXPORT_SYMBOL(ctucan_probe_common); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Martin Jerabek <[email protected]>"); MODULE_AUTHOR("Pavel Pisa <[email protected]>"); MODULE_AUTHOR("Ondrej Ille <[email protected]>"); MODULE_DESCRIPTION("CTU CAN FD interface");
linux-master
drivers/net/can/ctucanfd/ctucanfd_base.c
// SPDX-License-Identifier: GPL-2.0-only /* Copyright (C) 2007, 2011 Wolfgang Grandegger <[email protected]> * Copyright (C) 2012 Stephane Grosjean <[email protected]> * * Copyright (C) 2016 PEAK System-Technik GmbH */ #include <linux/can.h> #include <linux/can/dev.h> #include <linux/ethtool.h> #include "peak_canfd_user.h" /* internal IP core cache size (used as default echo skbs max number) */ #define PCANFD_ECHO_SKB_MAX 24 /* bittiming ranges of the PEAK-System PC CAN-FD interfaces */ static const struct can_bittiming_const peak_canfd_nominal_const = { .name = "peak_canfd", .tseg1_min = 1, .tseg1_max = (1 << PUCAN_TSLOW_TSGEG1_BITS), .tseg2_min = 1, .tseg2_max = (1 << PUCAN_TSLOW_TSGEG2_BITS), .sjw_max = (1 << PUCAN_TSLOW_SJW_BITS), .brp_min = 1, .brp_max = (1 << PUCAN_TSLOW_BRP_BITS), .brp_inc = 1, }; static const struct can_bittiming_const peak_canfd_data_const = { .name = "peak_canfd", .tseg1_min = 1, .tseg1_max = (1 << PUCAN_TFAST_TSGEG1_BITS), .tseg2_min = 1, .tseg2_max = (1 << PUCAN_TFAST_TSGEG2_BITS), .sjw_max = (1 << PUCAN_TFAST_SJW_BITS), .brp_min = 1, .brp_max = (1 << PUCAN_TFAST_BRP_BITS), .brp_inc = 1, }; static struct peak_canfd_priv *pucan_init_cmd(struct peak_canfd_priv *priv) { priv->cmd_len = 0; return priv; } static void *pucan_add_cmd(struct peak_canfd_priv *priv, int cmd_op) { struct pucan_command *cmd; if (priv->cmd_len + sizeof(*cmd) > priv->cmd_maxlen) return NULL; cmd = priv->cmd_buffer + priv->cmd_len; /* reset all unused bit to default */ memset(cmd, 0, sizeof(*cmd)); cmd->opcode_channel = pucan_cmd_opcode_channel(priv->index, cmd_op); priv->cmd_len += sizeof(*cmd); return cmd; } static int pucan_write_cmd(struct peak_canfd_priv *priv) { int err; if (priv->pre_cmd) { err = priv->pre_cmd(priv); if (err) return err; } err = priv->write_cmd(priv); if (err) return err; if (priv->post_cmd) err = priv->post_cmd(priv); return err; } /* uCAN commands interface functions */ static int pucan_set_reset_mode(struct peak_canfd_priv *priv) { pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_RESET_MODE); return pucan_write_cmd(priv); } static int pucan_set_normal_mode(struct peak_canfd_priv *priv) { int err; pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_NORMAL_MODE); err = pucan_write_cmd(priv); if (!err) priv->can.state = CAN_STATE_ERROR_ACTIVE; return err; } static int pucan_set_listen_only_mode(struct peak_canfd_priv *priv) { int err; pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_LISTEN_ONLY_MODE); err = pucan_write_cmd(priv); if (!err) priv->can.state = CAN_STATE_ERROR_ACTIVE; return err; } static int pucan_set_timing_slow(struct peak_canfd_priv *priv, const struct can_bittiming *pbt) { struct pucan_timing_slow *cmd; cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_TIMING_SLOW); cmd->sjw_t = PUCAN_TSLOW_SJW_T(pbt->sjw - 1, priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES); cmd->tseg1 = PUCAN_TSLOW_TSEG1(pbt->prop_seg + pbt->phase_seg1 - 1); cmd->tseg2 = PUCAN_TSLOW_TSEG2(pbt->phase_seg2 - 1); cmd->brp = cpu_to_le16(PUCAN_TSLOW_BRP(pbt->brp - 1)); cmd->ewl = 96; /* default */ netdev_dbg(priv->ndev, "nominal: brp=%u tseg1=%u tseg2=%u sjw=%u\n", le16_to_cpu(cmd->brp), cmd->tseg1, cmd->tseg2, cmd->sjw_t); return pucan_write_cmd(priv); } static int pucan_set_timing_fast(struct peak_canfd_priv *priv, const struct can_bittiming *pbt) { struct pucan_timing_fast *cmd; cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_TIMING_FAST); cmd->sjw = PUCAN_TFAST_SJW(pbt->sjw - 1); cmd->tseg1 = PUCAN_TFAST_TSEG1(pbt->prop_seg + pbt->phase_seg1 - 1); cmd->tseg2 = PUCAN_TFAST_TSEG2(pbt->phase_seg2 - 1); cmd->brp = cpu_to_le16(PUCAN_TFAST_BRP(pbt->brp - 1)); netdev_dbg(priv->ndev, "data: brp=%u tseg1=%u tseg2=%u sjw=%u\n", le16_to_cpu(cmd->brp), cmd->tseg1, cmd->tseg2, cmd->sjw); return pucan_write_cmd(priv); } static int pucan_set_std_filter(struct peak_canfd_priv *priv, u8 row, u32 mask) { struct pucan_std_filter *cmd; cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_SET_STD_FILTER); /* all the 11-bits CAN ID values are represented by one bit in a * 64 rows array of 32 bits: the upper 6 bits of the CAN ID select the * row while the lowest 5 bits select the bit in that row. * * bit filter * 1 passed * 0 discarded */ /* select the row */ cmd->idx = row; /* set/unset bits in the row */ cmd->mask = cpu_to_le32(mask); return pucan_write_cmd(priv); } static int pucan_tx_abort(struct peak_canfd_priv *priv, u16 flags) { struct pucan_tx_abort *cmd; cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_TX_ABORT); cmd->flags = cpu_to_le16(flags); return pucan_write_cmd(priv); } static int pucan_clr_err_counters(struct peak_canfd_priv *priv) { struct pucan_wr_err_cnt *cmd; cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_WR_ERR_CNT); cmd->sel_mask = cpu_to_le16(PUCAN_WRERRCNT_TE | PUCAN_WRERRCNT_RE); cmd->tx_counter = 0; cmd->rx_counter = 0; return pucan_write_cmd(priv); } static int pucan_set_options(struct peak_canfd_priv *priv, u16 opt_mask) { struct pucan_options *cmd; cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_SET_EN_OPTION); cmd->options = cpu_to_le16(opt_mask); return pucan_write_cmd(priv); } static int pucan_clr_options(struct peak_canfd_priv *priv, u16 opt_mask) { struct pucan_options *cmd; cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_CLR_DIS_OPTION); cmd->options = cpu_to_le16(opt_mask); return pucan_write_cmd(priv); } static int pucan_setup_rx_barrier(struct peak_canfd_priv *priv) { pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_RX_BARRIER); return pucan_write_cmd(priv); } static int pucan_netif_rx(struct sk_buff *skb, __le32 ts_low, __le32 ts_high) { struct skb_shared_hwtstamps *hwts = skb_hwtstamps(skb); u64 ts_us; ts_us = (u64)le32_to_cpu(ts_high) << 32; ts_us |= le32_to_cpu(ts_low); /* IP core timestamps are µs. */ hwts->hwtstamp = ns_to_ktime(ts_us * NSEC_PER_USEC); return netif_rx(skb); } /* handle the reception of one CAN frame */ static int pucan_handle_can_rx(struct peak_canfd_priv *priv, struct pucan_rx_msg *msg) { struct net_device_stats *stats = &priv->ndev->stats; struct canfd_frame *cf; struct sk_buff *skb; const u16 rx_msg_flags = le16_to_cpu(msg->flags); u8 cf_len; if (rx_msg_flags & PUCAN_MSG_EXT_DATA_LEN) cf_len = can_fd_dlc2len(pucan_msg_get_dlc(msg)); else cf_len = can_cc_dlc2len(pucan_msg_get_dlc(msg)); /* if this frame is an echo, */ if (rx_msg_flags & PUCAN_MSG_LOOPED_BACK) { unsigned long flags; spin_lock_irqsave(&priv->echo_lock, flags); /* count bytes of the echo instead of skb */ stats->tx_bytes += can_get_echo_skb(priv->ndev, msg->client, NULL); stats->tx_packets++; /* restart tx queue (a slot is free) */ netif_wake_queue(priv->ndev); spin_unlock_irqrestore(&priv->echo_lock, flags); /* if this frame is only an echo, stop here. Otherwise, * continue to push this application self-received frame into * its own rx queue. */ if (!(rx_msg_flags & PUCAN_MSG_SELF_RECEIVE)) return 0; } /* otherwise, it should be pushed into rx fifo */ if (rx_msg_flags & PUCAN_MSG_EXT_DATA_LEN) { /* CANFD frame case */ skb = alloc_canfd_skb(priv->ndev, &cf); if (!skb) return -ENOMEM; if (rx_msg_flags & PUCAN_MSG_BITRATE_SWITCH) cf->flags |= CANFD_BRS; if (rx_msg_flags & PUCAN_MSG_ERROR_STATE_IND) cf->flags |= CANFD_ESI; } else { /* CAN 2.0 frame case */ skb = alloc_can_skb(priv->ndev, (struct can_frame **)&cf); if (!skb) return -ENOMEM; } cf->can_id = le32_to_cpu(msg->can_id); cf->len = cf_len; if (rx_msg_flags & PUCAN_MSG_EXT_ID) cf->can_id |= CAN_EFF_FLAG; if (rx_msg_flags & PUCAN_MSG_RTR) { cf->can_id |= CAN_RTR_FLAG; } else { memcpy(cf->data, msg->d, cf->len); stats->rx_bytes += cf->len; } stats->rx_packets++; pucan_netif_rx(skb, msg->ts_low, msg->ts_high); return 0; } /* handle rx/tx error counters notification */ static int pucan_handle_error(struct peak_canfd_priv *priv, struct pucan_error_msg *msg) { priv->bec.txerr = msg->tx_err_cnt; priv->bec.rxerr = msg->rx_err_cnt; return 0; } /* handle status notification */ static int pucan_handle_status(struct peak_canfd_priv *priv, struct pucan_status_msg *msg) { struct net_device *ndev = priv->ndev; struct net_device_stats *stats = &ndev->stats; struct can_frame *cf; struct sk_buff *skb; /* this STATUS is the CNF of the RX_BARRIER: Tx path can be setup */ if (pucan_status_is_rx_barrier(msg)) { if (priv->enable_tx_path) { int err = priv->enable_tx_path(priv); if (err) return err; } /* wake network queue up (echo_skb array is empty) */ netif_wake_queue(ndev); return 0; } skb = alloc_can_err_skb(ndev, &cf); /* test state error bits according to their priority */ if (pucan_status_is_busoff(msg)) { netdev_dbg(ndev, "Bus-off entry status\n"); priv->can.state = CAN_STATE_BUS_OFF; priv->can.can_stats.bus_off++; can_bus_off(ndev); if (skb) cf->can_id |= CAN_ERR_BUSOFF; } else if (pucan_status_is_passive(msg)) { netdev_dbg(ndev, "Error passive status\n"); priv->can.state = CAN_STATE_ERROR_PASSIVE; priv->can.can_stats.error_passive++; if (skb) { cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT; cf->data[1] = (priv->bec.txerr > priv->bec.rxerr) ? CAN_ERR_CRTL_TX_PASSIVE : CAN_ERR_CRTL_RX_PASSIVE; cf->data[6] = priv->bec.txerr; cf->data[7] = priv->bec.rxerr; } } else if (pucan_status_is_warning(msg)) { netdev_dbg(ndev, "Error warning status\n"); priv->can.state = CAN_STATE_ERROR_WARNING; priv->can.can_stats.error_warning++; if (skb) { cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT; cf->data[1] = (priv->bec.txerr > priv->bec.rxerr) ? CAN_ERR_CRTL_TX_WARNING : CAN_ERR_CRTL_RX_WARNING; cf->data[6] = priv->bec.txerr; cf->data[7] = priv->bec.rxerr; } } else if (priv->can.state != CAN_STATE_ERROR_ACTIVE) { /* back to ERROR_ACTIVE */ netdev_dbg(ndev, "Error active status\n"); can_change_state(ndev, cf, CAN_STATE_ERROR_ACTIVE, CAN_STATE_ERROR_ACTIVE); } else { dev_kfree_skb(skb); return 0; } if (!skb) { stats->rx_dropped++; return -ENOMEM; } pucan_netif_rx(skb, msg->ts_low, msg->ts_high); return 0; } /* handle uCAN Rx overflow notification */ static int pucan_handle_cache_critical(struct peak_canfd_priv *priv) { struct net_device_stats *stats = &priv->ndev->stats; struct can_frame *cf; struct sk_buff *skb; stats->rx_over_errors++; stats->rx_errors++; skb = alloc_can_err_skb(priv->ndev, &cf); if (!skb) { stats->rx_dropped++; return -ENOMEM; } cf->can_id |= CAN_ERR_CRTL | CAN_ERR_CNT; cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; cf->data[6] = priv->bec.txerr; cf->data[7] = priv->bec.rxerr; netif_rx(skb); return 0; } /* handle a single uCAN message */ int peak_canfd_handle_msg(struct peak_canfd_priv *priv, struct pucan_rx_msg *msg) { u16 msg_type = le16_to_cpu(msg->type); int msg_size = le16_to_cpu(msg->size); int err; if (!msg_size || !msg_type) { /* null packet found: end of list */ goto exit; } switch (msg_type) { case PUCAN_MSG_CAN_RX: err = pucan_handle_can_rx(priv, (struct pucan_rx_msg *)msg); break; case PUCAN_MSG_ERROR: err = pucan_handle_error(priv, (struct pucan_error_msg *)msg); break; case PUCAN_MSG_STATUS: err = pucan_handle_status(priv, (struct pucan_status_msg *)msg); break; case PUCAN_MSG_CACHE_CRITICAL: err = pucan_handle_cache_critical(priv); break; default: err = 0; } if (err < 0) return err; exit: return msg_size; } /* handle a list of rx_count messages from rx_msg memory address */ int peak_canfd_handle_msgs_list(struct peak_canfd_priv *priv, struct pucan_rx_msg *msg_list, int msg_count) { void *msg_ptr = msg_list; int i, msg_size = 0; for (i = 0; i < msg_count; i++) { msg_size = peak_canfd_handle_msg(priv, msg_ptr); /* a null packet can be found at the end of a list */ if (msg_size <= 0) break; msg_ptr += ALIGN(msg_size, 4); } if (msg_size < 0) return msg_size; return i; } static int peak_canfd_start(struct peak_canfd_priv *priv) { int err; err = pucan_clr_err_counters(priv); if (err) goto err_exit; priv->echo_idx = 0; priv->bec.txerr = 0; priv->bec.rxerr = 0; if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) err = pucan_set_listen_only_mode(priv); else err = pucan_set_normal_mode(priv); err_exit: return err; } static void peak_canfd_stop(struct peak_canfd_priv *priv) { int err; /* go back to RESET mode */ err = pucan_set_reset_mode(priv); if (err) { netdev_err(priv->ndev, "channel %u reset failed\n", priv->index); } else { /* abort last Tx (MUST be done in RESET mode only!) */ pucan_tx_abort(priv, PUCAN_TX_ABORT_FLUSH); } } static int peak_canfd_set_mode(struct net_device *ndev, enum can_mode mode) { struct peak_canfd_priv *priv = netdev_priv(ndev); switch (mode) { case CAN_MODE_START: peak_canfd_start(priv); netif_wake_queue(ndev); break; default: return -EOPNOTSUPP; } return 0; } static int peak_canfd_get_berr_counter(const struct net_device *ndev, struct can_berr_counter *bec) { struct peak_canfd_priv *priv = netdev_priv(ndev); *bec = priv->bec; return 0; } static int peak_canfd_open(struct net_device *ndev) { struct peak_canfd_priv *priv = netdev_priv(ndev); int i, err = 0; err = open_candev(ndev); if (err) { netdev_err(ndev, "open_candev() failed, error %d\n", err); goto err_exit; } err = pucan_set_reset_mode(priv); if (err) goto err_close; if (priv->can.ctrlmode & CAN_CTRLMODE_FD) { if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) err = pucan_clr_options(priv, PUCAN_OPTION_CANDFDISO); else err = pucan_set_options(priv, PUCAN_OPTION_CANDFDISO); if (err) goto err_close; } /* set option: get rx/tx error counters */ err = pucan_set_options(priv, PUCAN_OPTION_ERROR); if (err) goto err_close; /* accept all standard CAN ID */ for (i = 0; i <= PUCAN_FLTSTD_ROW_IDX_MAX; i++) pucan_set_std_filter(priv, i, 0xffffffff); err = peak_canfd_start(priv); if (err) goto err_close; /* receiving the RB status says when Tx path is ready */ err = pucan_setup_rx_barrier(priv); if (!err) goto err_exit; err_close: close_candev(ndev); err_exit: return err; } static int peak_canfd_set_bittiming(struct net_device *ndev) { struct peak_canfd_priv *priv = netdev_priv(ndev); return pucan_set_timing_slow(priv, &priv->can.bittiming); } static int peak_canfd_set_data_bittiming(struct net_device *ndev) { struct peak_canfd_priv *priv = netdev_priv(ndev); return pucan_set_timing_fast(priv, &priv->can.data_bittiming); } static int peak_canfd_close(struct net_device *ndev) { struct peak_canfd_priv *priv = netdev_priv(ndev); netif_stop_queue(ndev); peak_canfd_stop(priv); close_candev(ndev); return 0; } static netdev_tx_t peak_canfd_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct peak_canfd_priv *priv = netdev_priv(ndev); struct net_device_stats *stats = &ndev->stats; struct canfd_frame *cf = (struct canfd_frame *)skb->data; struct pucan_tx_msg *msg; u16 msg_size, msg_flags; unsigned long flags; bool should_stop_tx_queue; int room_left; u8 len; if (can_dev_dropped_skb(ndev, skb)) return NETDEV_TX_OK; msg_size = ALIGN(sizeof(*msg) + cf->len, 4); msg = priv->alloc_tx_msg(priv, msg_size, &room_left); /* should never happen except under bus-off condition and (auto-)restart * mechanism */ if (!msg) { stats->tx_dropped++; netif_stop_queue(ndev); return NETDEV_TX_BUSY; } msg->size = cpu_to_le16(msg_size); msg->type = cpu_to_le16(PUCAN_MSG_CAN_TX); msg_flags = 0; if (cf->can_id & CAN_EFF_FLAG) { msg_flags |= PUCAN_MSG_EXT_ID; msg->can_id = cpu_to_le32(cf->can_id & CAN_EFF_MASK); } else { msg->can_id = cpu_to_le32(cf->can_id & CAN_SFF_MASK); } if (can_is_canfd_skb(skb)) { /* CAN FD frame format */ len = can_fd_len2dlc(cf->len); msg_flags |= PUCAN_MSG_EXT_DATA_LEN; if (cf->flags & CANFD_BRS) msg_flags |= PUCAN_MSG_BITRATE_SWITCH; if (cf->flags & CANFD_ESI) msg_flags |= PUCAN_MSG_ERROR_STATE_IND; } else { /* CAN 2.0 frame format */ len = cf->len; if (cf->can_id & CAN_RTR_FLAG) msg_flags |= PUCAN_MSG_RTR; } /* always ask loopback for echo management */ msg_flags |= PUCAN_MSG_LOOPED_BACK; /* set driver specific bit to differentiate with application loopback */ if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) msg_flags |= PUCAN_MSG_SELF_RECEIVE; msg->flags = cpu_to_le16(msg_flags); msg->channel_dlc = PUCAN_MSG_CHANNEL_DLC(priv->index, len); memcpy(msg->d, cf->data, cf->len); /* struct msg client field is used as an index in the echo skbs ring */ msg->client = priv->echo_idx; spin_lock_irqsave(&priv->echo_lock, flags); /* prepare and save echo skb in internal slot */ can_put_echo_skb(skb, ndev, priv->echo_idx, 0); /* move echo index to the next slot */ priv->echo_idx = (priv->echo_idx + 1) % priv->can.echo_skb_max; /* if next slot is not free, stop network queue (no slot free in echo * skb ring means that the controller did not write these frames on * the bus: no need to continue). */ should_stop_tx_queue = !!(priv->can.echo_skb[priv->echo_idx]); /* stop network tx queue if not enough room to save one more msg too */ if (priv->can.ctrlmode & CAN_CTRLMODE_FD) should_stop_tx_queue |= (room_left < (sizeof(*msg) + CANFD_MAX_DLEN)); else should_stop_tx_queue |= (room_left < (sizeof(*msg) + CAN_MAX_DLEN)); if (should_stop_tx_queue) netif_stop_queue(ndev); spin_unlock_irqrestore(&priv->echo_lock, flags); /* write the skb on the interface */ priv->write_tx_msg(priv, msg); return NETDEV_TX_OK; } static int peak_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { struct hwtstamp_config hwts_cfg = { 0 }; switch (cmd) { case SIOCSHWTSTAMP: /* set */ if (copy_from_user(&hwts_cfg, ifr->ifr_data, sizeof(hwts_cfg))) return -EFAULT; if (hwts_cfg.tx_type == HWTSTAMP_TX_OFF && hwts_cfg.rx_filter == HWTSTAMP_FILTER_ALL) return 0; return -ERANGE; case SIOCGHWTSTAMP: /* get */ hwts_cfg.tx_type = HWTSTAMP_TX_OFF; hwts_cfg.rx_filter = HWTSTAMP_FILTER_ALL; if (copy_to_user(ifr->ifr_data, &hwts_cfg, sizeof(hwts_cfg))) return -EFAULT; return 0; default: return -EOPNOTSUPP; } } static const struct net_device_ops peak_canfd_netdev_ops = { .ndo_open = peak_canfd_open, .ndo_stop = peak_canfd_close, .ndo_eth_ioctl = peak_eth_ioctl, .ndo_start_xmit = peak_canfd_start_xmit, .ndo_change_mtu = can_change_mtu, }; static int peak_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) { info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | SOF_TIMESTAMPING_RX_SOFTWARE | SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE; info->phc_index = -1; info->tx_types = BIT(HWTSTAMP_TX_OFF); info->rx_filters = BIT(HWTSTAMP_FILTER_ALL); return 0; } static const struct ethtool_ops peak_canfd_ethtool_ops = { .get_ts_info = peak_get_ts_info, }; struct net_device *alloc_peak_canfd_dev(int sizeof_priv, int index, int echo_skb_max) { struct net_device *ndev; struct peak_canfd_priv *priv; /* we DO support local echo */ if (echo_skb_max < 0) echo_skb_max = PCANFD_ECHO_SKB_MAX; /* allocate the candev object */ ndev = alloc_candev(sizeof_priv, echo_skb_max); if (!ndev) return NULL; priv = netdev_priv(ndev); /* complete now socket-can initialization side */ priv->can.state = CAN_STATE_STOPPED; priv->can.bittiming_const = &peak_canfd_nominal_const; priv->can.data_bittiming_const = &peak_canfd_data_const; priv->can.do_set_mode = peak_canfd_set_mode; priv->can.do_get_berr_counter = peak_canfd_get_berr_counter; priv->can.do_set_bittiming = peak_canfd_set_bittiming; priv->can.do_set_data_bittiming = peak_canfd_set_data_bittiming; priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO | CAN_CTRLMODE_BERR_REPORTING; priv->ndev = ndev; priv->index = index; priv->cmd_len = 0; spin_lock_init(&priv->echo_lock); ndev->flags |= IFF_ECHO; ndev->netdev_ops = &peak_canfd_netdev_ops; ndev->ethtool_ops = &peak_canfd_ethtool_ops; ndev->dev_id = index; return ndev; }
linux-master
drivers/net/can/peak_canfd/peak_canfd.c
// SPDX-License-Identifier: GPL-2.0-only /* Copyright (C) 2007, 2011 Wolfgang Grandegger <[email protected]> * Copyright (C) 2012 Stephane Grosjean <[email protected]> * * Derived from the PCAN project file driver/src/pcan_pci.c: * * Copyright (C) 2001-2006 PEAK System-Technik GmbH */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/netdevice.h> #include <linux/delay.h> #include <linux/pci.h> #include <linux/io.h> #include <linux/can.h> #include <linux/can/dev.h> #include "peak_canfd_user.h" MODULE_AUTHOR("Stephane Grosjean <[email protected]>"); MODULE_DESCRIPTION("Socket-CAN driver for PEAK PCAN PCIe/M.2 FD family cards"); MODULE_LICENSE("GPL v2"); #define PCIEFD_DRV_NAME "peak_pciefd" #define PEAK_PCI_VENDOR_ID 0x001c /* The PCI device and vendor IDs */ #define PEAK_PCIEFD_ID 0x0013 /* for PCIe slot cards */ #define PCAN_CPCIEFD_ID 0x0014 /* for Compact-PCI Serial slot cards */ #define PCAN_PCIE104FD_ID 0x0017 /* for PCIe-104 Express slot cards */ #define PCAN_MINIPCIEFD_ID 0x0018 /* for mini-PCIe slot cards */ #define PCAN_PCIEFD_OEM_ID 0x0019 /* for PCIe slot OEM cards */ #define PCAN_M2_ID 0x001a /* for M2 slot cards */ /* PEAK PCIe board access description */ #define PCIEFD_BAR0_SIZE (64 * 1024) #define PCIEFD_RX_DMA_SIZE (4 * 1024) #define PCIEFD_TX_DMA_SIZE (4 * 1024) #define PCIEFD_TX_PAGE_SIZE (2 * 1024) /* System Control Registers */ #define PCIEFD_REG_SYS_CTL_SET 0x0000 /* set bits */ #define PCIEFD_REG_SYS_CTL_CLR 0x0004 /* clear bits */ /* Version info registers */ #define PCIEFD_REG_SYS_VER1 0x0040 /* version reg #1 */ #define PCIEFD_REG_SYS_VER2 0x0044 /* version reg #2 */ #define PCIEFD_FW_VERSION(x, y, z) (((u32)(x) << 24) | \ ((u32)(y) << 16) | \ ((u32)(z) << 8)) /* System Control Registers Bits */ #define PCIEFD_SYS_CTL_TS_RST 0x00000001 /* timestamp clock */ #define PCIEFD_SYS_CTL_CLK_EN 0x00000002 /* system clock */ /* CAN-FD channel addresses */ #define PCIEFD_CANX_OFF(c) (((c) + 1) * 0x1000) #define PCIEFD_ECHO_SKB_MAX PCANFD_ECHO_SKB_DEF /* CAN-FD channel registers */ #define PCIEFD_REG_CAN_MISC 0x0000 /* Misc. control */ #define PCIEFD_REG_CAN_CLK_SEL 0x0008 /* Clock selector */ #define PCIEFD_REG_CAN_CMD_PORT_L 0x0010 /* 64-bits command port */ #define PCIEFD_REG_CAN_CMD_PORT_H 0x0014 #define PCIEFD_REG_CAN_TX_REQ_ACC 0x0020 /* Tx request accumulator */ #define PCIEFD_REG_CAN_TX_CTL_SET 0x0030 /* Tx control set register */ #define PCIEFD_REG_CAN_TX_CTL_CLR 0x0038 /* Tx control clear register */ #define PCIEFD_REG_CAN_TX_DMA_ADDR_L 0x0040 /* 64-bits addr for Tx DMA */ #define PCIEFD_REG_CAN_TX_DMA_ADDR_H 0x0044 #define PCIEFD_REG_CAN_RX_CTL_SET 0x0050 /* Rx control set register */ #define PCIEFD_REG_CAN_RX_CTL_CLR 0x0058 /* Rx control clear register */ #define PCIEFD_REG_CAN_RX_CTL_WRT 0x0060 /* Rx control write register */ #define PCIEFD_REG_CAN_RX_CTL_ACK 0x0068 /* Rx control ACK register */ #define PCIEFD_REG_CAN_RX_DMA_ADDR_L 0x0070 /* 64-bits addr for Rx DMA */ #define PCIEFD_REG_CAN_RX_DMA_ADDR_H 0x0074 /* CAN-FD channel misc register bits */ #define CANFD_MISC_TS_RST 0x00000001 /* timestamp cnt rst */ /* CAN-FD channel Clock SELector Source & DIVider */ #define CANFD_CLK_SEL_DIV_MASK 0x00000007 #define CANFD_CLK_SEL_DIV_60MHZ 0x00000000 /* SRC=240MHz only */ #define CANFD_CLK_SEL_DIV_40MHZ 0x00000001 /* SRC=240MHz only */ #define CANFD_CLK_SEL_DIV_30MHZ 0x00000002 /* SRC=240MHz only */ #define CANFD_CLK_SEL_DIV_24MHZ 0x00000003 /* SRC=240MHz only */ #define CANFD_CLK_SEL_DIV_20MHZ 0x00000004 /* SRC=240MHz only */ #define CANFD_CLK_SEL_SRC_MASK 0x00000008 /* 0=80MHz, 1=240MHz */ #define CANFD_CLK_SEL_SRC_240MHZ 0x00000008 #define CANFD_CLK_SEL_SRC_80MHZ (~CANFD_CLK_SEL_SRC_240MHZ & \ CANFD_CLK_SEL_SRC_MASK) #define CANFD_CLK_SEL_20MHZ (CANFD_CLK_SEL_SRC_240MHZ |\ CANFD_CLK_SEL_DIV_20MHZ) #define CANFD_CLK_SEL_24MHZ (CANFD_CLK_SEL_SRC_240MHZ |\ CANFD_CLK_SEL_DIV_24MHZ) #define CANFD_CLK_SEL_30MHZ (CANFD_CLK_SEL_SRC_240MHZ |\ CANFD_CLK_SEL_DIV_30MHZ) #define CANFD_CLK_SEL_40MHZ (CANFD_CLK_SEL_SRC_240MHZ |\ CANFD_CLK_SEL_DIV_40MHZ) #define CANFD_CLK_SEL_60MHZ (CANFD_CLK_SEL_SRC_240MHZ |\ CANFD_CLK_SEL_DIV_60MHZ) #define CANFD_CLK_SEL_80MHZ (CANFD_CLK_SEL_SRC_80MHZ) /* CAN-FD channel Rx/Tx control register bits */ #define CANFD_CTL_UNC_BIT 0x00010000 /* Uncached DMA mem */ #define CANFD_CTL_RST_BIT 0x00020000 /* reset DMA action */ #define CANFD_CTL_IEN_BIT 0x00040000 /* IRQ enable */ /* Rx IRQ Count and Time Limits */ #define CANFD_CTL_IRQ_CL_DEF 16 /* Rx msg max nb per IRQ in Rx DMA */ #define CANFD_CTL_IRQ_TL_DEF 10 /* Time before IRQ if < CL (x100 µs) */ /* Tx anticipation window (link logical address should be aligned on 2K * boundary) */ #define PCIEFD_TX_PAGE_COUNT (PCIEFD_TX_DMA_SIZE / PCIEFD_TX_PAGE_SIZE) #define CANFD_MSG_LNK_TX 0x1001 /* Tx msgs link */ /* 32-bits IRQ status fields, heading Rx DMA area */ static inline int pciefd_irq_tag(u32 irq_status) { return irq_status & 0x0000000f; } static inline int pciefd_irq_rx_cnt(u32 irq_status) { return (irq_status & 0x000007f0) >> 4; } static inline int pciefd_irq_is_lnk(u32 irq_status) { return irq_status & 0x00010000; } /* Rx record */ struct pciefd_rx_dma { __le32 irq_status; __le32 sys_time_low; __le32 sys_time_high; struct pucan_rx_msg msg[]; } __packed __aligned(4); /* Tx Link record */ struct pciefd_tx_link { __le16 size; __le16 type; __le32 laddr_lo; __le32 laddr_hi; } __packed __aligned(4); /* Tx page descriptor */ struct pciefd_page { void *vbase; /* page virtual address */ dma_addr_t lbase; /* page logical address */ u32 offset; u32 size; }; /* CAN-FD channel object */ struct pciefd_board; struct pciefd_can { struct peak_canfd_priv ucan; /* must be the first member */ void __iomem *reg_base; /* channel config base addr */ struct pciefd_board *board; /* reverse link */ struct pucan_command pucan_cmd; /* command buffer */ dma_addr_t rx_dma_laddr; /* DMA virtual and logical addr */ void *rx_dma_vaddr; /* for Rx and Tx areas */ dma_addr_t tx_dma_laddr; void *tx_dma_vaddr; struct pciefd_page tx_pages[PCIEFD_TX_PAGE_COUNT]; u16 tx_pages_free; /* free Tx pages counter */ u16 tx_page_index; /* current page used for Tx */ spinlock_t tx_lock; u32 irq_status; u32 irq_tag; /* next irq tag */ }; /* PEAK-PCIe FD board object */ struct pciefd_board { void __iomem *reg_base; struct pci_dev *pci_dev; int can_count; spinlock_t cmd_lock; /* 64-bits cmds must be atomic */ struct pciefd_can *can[]; /* array of network devices */ }; /* supported device ids. */ static const struct pci_device_id peak_pciefd_tbl[] = { {PEAK_PCI_VENDOR_ID, PEAK_PCIEFD_ID, PCI_ANY_ID, PCI_ANY_ID,}, {PEAK_PCI_VENDOR_ID, PCAN_CPCIEFD_ID, PCI_ANY_ID, PCI_ANY_ID,}, {PEAK_PCI_VENDOR_ID, PCAN_PCIE104FD_ID, PCI_ANY_ID, PCI_ANY_ID,}, {PEAK_PCI_VENDOR_ID, PCAN_MINIPCIEFD_ID, PCI_ANY_ID, PCI_ANY_ID,}, {PEAK_PCI_VENDOR_ID, PCAN_PCIEFD_OEM_ID, PCI_ANY_ID, PCI_ANY_ID,}, {PEAK_PCI_VENDOR_ID, PCAN_M2_ID, PCI_ANY_ID, PCI_ANY_ID,}, {0,} }; MODULE_DEVICE_TABLE(pci, peak_pciefd_tbl); /* read a 32 bits value from a SYS block register */ static inline u32 pciefd_sys_readreg(const struct pciefd_board *priv, u16 reg) { return readl(priv->reg_base + reg); } /* write a 32 bits value into a SYS block register */ static inline void pciefd_sys_writereg(const struct pciefd_board *priv, u32 val, u16 reg) { writel(val, priv->reg_base + reg); } /* read a 32 bits value from CAN-FD block register */ static inline u32 pciefd_can_readreg(const struct pciefd_can *priv, u16 reg) { return readl(priv->reg_base + reg); } /* write a 32 bits value into a CAN-FD block register */ static inline void pciefd_can_writereg(const struct pciefd_can *priv, u32 val, u16 reg) { writel(val, priv->reg_base + reg); } /* give a channel logical Rx DMA address to the board */ static void pciefd_can_setup_rx_dma(struct pciefd_can *priv) { #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT const u32 dma_addr_h = (u32)(priv->rx_dma_laddr >> 32); #else const u32 dma_addr_h = 0; #endif /* (DMA must be reset for Rx) */ pciefd_can_writereg(priv, CANFD_CTL_RST_BIT, PCIEFD_REG_CAN_RX_CTL_SET); /* write the logical address of the Rx DMA area for this channel */ pciefd_can_writereg(priv, (u32)priv->rx_dma_laddr, PCIEFD_REG_CAN_RX_DMA_ADDR_L); pciefd_can_writereg(priv, dma_addr_h, PCIEFD_REG_CAN_RX_DMA_ADDR_H); /* also indicates that Rx DMA is cacheable */ pciefd_can_writereg(priv, CANFD_CTL_UNC_BIT, PCIEFD_REG_CAN_RX_CTL_CLR); } /* clear channel logical Rx DMA address from the board */ static void pciefd_can_clear_rx_dma(struct pciefd_can *priv) { /* DMA must be reset for Rx */ pciefd_can_writereg(priv, CANFD_CTL_RST_BIT, PCIEFD_REG_CAN_RX_CTL_SET); /* clear the logical address of the Rx DMA area for this channel */ pciefd_can_writereg(priv, 0, PCIEFD_REG_CAN_RX_DMA_ADDR_L); pciefd_can_writereg(priv, 0, PCIEFD_REG_CAN_RX_DMA_ADDR_H); } /* give a channel logical Tx DMA address to the board */ static void pciefd_can_setup_tx_dma(struct pciefd_can *priv) { #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT const u32 dma_addr_h = (u32)(priv->tx_dma_laddr >> 32); #else const u32 dma_addr_h = 0; #endif /* (DMA must be reset for Tx) */ pciefd_can_writereg(priv, CANFD_CTL_RST_BIT, PCIEFD_REG_CAN_TX_CTL_SET); /* write the logical address of the Tx DMA area for this channel */ pciefd_can_writereg(priv, (u32)priv->tx_dma_laddr, PCIEFD_REG_CAN_TX_DMA_ADDR_L); pciefd_can_writereg(priv, dma_addr_h, PCIEFD_REG_CAN_TX_DMA_ADDR_H); /* also indicates that Tx DMA is cacheable */ pciefd_can_writereg(priv, CANFD_CTL_UNC_BIT, PCIEFD_REG_CAN_TX_CTL_CLR); } /* clear channel logical Tx DMA address from the board */ static void pciefd_can_clear_tx_dma(struct pciefd_can *priv) { /* DMA must be reset for Tx */ pciefd_can_writereg(priv, CANFD_CTL_RST_BIT, PCIEFD_REG_CAN_TX_CTL_SET); /* clear the logical address of the Tx DMA area for this channel */ pciefd_can_writereg(priv, 0, PCIEFD_REG_CAN_TX_DMA_ADDR_L); pciefd_can_writereg(priv, 0, PCIEFD_REG_CAN_TX_DMA_ADDR_H); } static void pciefd_can_ack_rx_dma(struct pciefd_can *priv) { /* read value of current IRQ tag and inc it for next one */ priv->irq_tag = le32_to_cpu(*(__le32 *)priv->rx_dma_vaddr); priv->irq_tag++; priv->irq_tag &= 0xf; /* write the next IRQ tag for this CAN */ pciefd_can_writereg(priv, priv->irq_tag, PCIEFD_REG_CAN_RX_CTL_ACK); } /* IRQ handler */ static irqreturn_t pciefd_irq_handler(int irq, void *arg) { struct pciefd_can *priv = arg; struct pciefd_rx_dma *rx_dma = priv->rx_dma_vaddr; /* INTA mode only to sync with PCIe transaction */ if (!pci_dev_msi_enabled(priv->board->pci_dev)) (void)pciefd_sys_readreg(priv->board, PCIEFD_REG_SYS_VER1); /* read IRQ status from the first 32-bits of the Rx DMA area */ priv->irq_status = le32_to_cpu(rx_dma->irq_status); /* check if this (shared) IRQ is for this CAN */ if (pciefd_irq_tag(priv->irq_status) != priv->irq_tag) return IRQ_NONE; /* handle rx messages (if any) */ peak_canfd_handle_msgs_list(&priv->ucan, rx_dma->msg, pciefd_irq_rx_cnt(priv->irq_status)); /* handle tx link interrupt (if any) */ if (pciefd_irq_is_lnk(priv->irq_status)) { unsigned long flags; spin_lock_irqsave(&priv->tx_lock, flags); priv->tx_pages_free++; spin_unlock_irqrestore(&priv->tx_lock, flags); /* wake producer up (only if enough room in echo_skb array) */ spin_lock_irqsave(&priv->ucan.echo_lock, flags); if (!priv->ucan.can.echo_skb[priv->ucan.echo_idx]) netif_wake_queue(priv->ucan.ndev); spin_unlock_irqrestore(&priv->ucan.echo_lock, flags); } /* re-enable Rx DMA transfer for this CAN */ pciefd_can_ack_rx_dma(priv); return IRQ_HANDLED; } static int pciefd_enable_tx_path(struct peak_canfd_priv *ucan) { struct pciefd_can *priv = (struct pciefd_can *)ucan; int i; /* initialize the Tx pages descriptors */ priv->tx_pages_free = PCIEFD_TX_PAGE_COUNT - 1; priv->tx_page_index = 0; priv->tx_pages[0].vbase = priv->tx_dma_vaddr; priv->tx_pages[0].lbase = priv->tx_dma_laddr; for (i = 0; i < PCIEFD_TX_PAGE_COUNT; i++) { priv->tx_pages[i].offset = 0; priv->tx_pages[i].size = PCIEFD_TX_PAGE_SIZE - sizeof(struct pciefd_tx_link); if (i) { priv->tx_pages[i].vbase = priv->tx_pages[i - 1].vbase + PCIEFD_TX_PAGE_SIZE; priv->tx_pages[i].lbase = priv->tx_pages[i - 1].lbase + PCIEFD_TX_PAGE_SIZE; } } /* setup Tx DMA addresses into IP core */ pciefd_can_setup_tx_dma(priv); /* start (TX_RST=0) Tx Path */ pciefd_can_writereg(priv, CANFD_CTL_RST_BIT, PCIEFD_REG_CAN_TX_CTL_CLR); return 0; } /* board specific CANFD command pre-processing */ static int pciefd_pre_cmd(struct peak_canfd_priv *ucan) { struct pciefd_can *priv = (struct pciefd_can *)ucan; u16 cmd = pucan_cmd_get_opcode(&priv->pucan_cmd); int err; /* pre-process command */ switch (cmd) { case PUCAN_CMD_NORMAL_MODE: case PUCAN_CMD_LISTEN_ONLY_MODE: if (ucan->can.state == CAN_STATE_BUS_OFF) break; /* going into operational mode: setup IRQ handler */ err = request_irq(priv->ucan.ndev->irq, pciefd_irq_handler, IRQF_SHARED, PCIEFD_DRV_NAME, priv); if (err) return err; /* setup Rx DMA address */ pciefd_can_setup_rx_dma(priv); /* setup max count of msgs per IRQ */ pciefd_can_writereg(priv, (CANFD_CTL_IRQ_TL_DEF) << 8 | CANFD_CTL_IRQ_CL_DEF, PCIEFD_REG_CAN_RX_CTL_WRT); /* clear DMA RST for Rx (Rx start) */ pciefd_can_writereg(priv, CANFD_CTL_RST_BIT, PCIEFD_REG_CAN_RX_CTL_CLR); /* reset timestamps */ pciefd_can_writereg(priv, !CANFD_MISC_TS_RST, PCIEFD_REG_CAN_MISC); /* do an initial ACK */ pciefd_can_ack_rx_dma(priv); /* enable IRQ for this CAN after having set next irq_tag */ pciefd_can_writereg(priv, CANFD_CTL_IEN_BIT, PCIEFD_REG_CAN_RX_CTL_SET); /* Tx path will be setup as soon as RX_BARRIER is received */ break; default: break; } return 0; } /* write a command */ static int pciefd_write_cmd(struct peak_canfd_priv *ucan) { struct pciefd_can *priv = (struct pciefd_can *)ucan; unsigned long flags; /* 64-bits command is atomic */ spin_lock_irqsave(&priv->board->cmd_lock, flags); pciefd_can_writereg(priv, *(u32 *)ucan->cmd_buffer, PCIEFD_REG_CAN_CMD_PORT_L); pciefd_can_writereg(priv, *(u32 *)(ucan->cmd_buffer + 4), PCIEFD_REG_CAN_CMD_PORT_H); spin_unlock_irqrestore(&priv->board->cmd_lock, flags); return 0; } /* board specific CANFD command post-processing */ static int pciefd_post_cmd(struct peak_canfd_priv *ucan) { struct pciefd_can *priv = (struct pciefd_can *)ucan; u16 cmd = pucan_cmd_get_opcode(&priv->pucan_cmd); switch (cmd) { case PUCAN_CMD_RESET_MODE: if (ucan->can.state == CAN_STATE_STOPPED) break; /* controller now in reset mode: */ /* disable IRQ for this CAN */ pciefd_can_writereg(priv, CANFD_CTL_IEN_BIT, PCIEFD_REG_CAN_RX_CTL_CLR); /* stop and reset DMA addresses in Tx/Rx engines */ pciefd_can_clear_tx_dma(priv); pciefd_can_clear_rx_dma(priv); /* wait for above commands to complete (read cycle) */ (void)pciefd_sys_readreg(priv->board, PCIEFD_REG_SYS_VER1); free_irq(priv->ucan.ndev->irq, priv); ucan->can.state = CAN_STATE_STOPPED; break; } return 0; } static void *pciefd_alloc_tx_msg(struct peak_canfd_priv *ucan, u16 msg_size, int *room_left) { struct pciefd_can *priv = (struct pciefd_can *)ucan; struct pciefd_page *page = priv->tx_pages + priv->tx_page_index; unsigned long flags; void *msg; spin_lock_irqsave(&priv->tx_lock, flags); if (page->offset + msg_size > page->size) { struct pciefd_tx_link *lk; /* not enough space in this page: try another one */ if (!priv->tx_pages_free) { spin_unlock_irqrestore(&priv->tx_lock, flags); /* Tx overflow */ return NULL; } priv->tx_pages_free--; /* keep address of the very last free slot of current page */ lk = page->vbase + page->offset; /* next, move on a new free page */ priv->tx_page_index = (priv->tx_page_index + 1) % PCIEFD_TX_PAGE_COUNT; page = priv->tx_pages + priv->tx_page_index; /* put link record to this new page at the end of prev one */ lk->size = cpu_to_le16(sizeof(*lk)); lk->type = cpu_to_le16(CANFD_MSG_LNK_TX); lk->laddr_lo = cpu_to_le32(page->lbase); #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT lk->laddr_hi = cpu_to_le32(page->lbase >> 32); #else lk->laddr_hi = 0; #endif /* next msgs will be put from the begininng of this new page */ page->offset = 0; } *room_left = priv->tx_pages_free * page->size; spin_unlock_irqrestore(&priv->tx_lock, flags); msg = page->vbase + page->offset; /* give back room left in the tx ring */ *room_left += page->size - (page->offset + msg_size); return msg; } static int pciefd_write_tx_msg(struct peak_canfd_priv *ucan, struct pucan_tx_msg *msg) { struct pciefd_can *priv = (struct pciefd_can *)ucan; struct pciefd_page *page = priv->tx_pages + priv->tx_page_index; /* this slot is now reserved for writing the frame */ page->offset += le16_to_cpu(msg->size); /* tell the board a frame has been written in Tx DMA area */ pciefd_can_writereg(priv, 1, PCIEFD_REG_CAN_TX_REQ_ACC); return 0; } /* probe for CAN-FD channel #pciefd_board->can_count */ static int pciefd_can_probe(struct pciefd_board *pciefd) { struct net_device *ndev; struct pciefd_can *priv; u32 clk; int err; /* allocate the candev object with default isize of echo skbs ring */ ndev = alloc_peak_canfd_dev(sizeof(*priv), pciefd->can_count, PCIEFD_ECHO_SKB_MAX); if (!ndev) { dev_err(&pciefd->pci_dev->dev, "failed to alloc candev object\n"); goto failure; } priv = netdev_priv(ndev); /* fill-in candev private object: */ /* setup PCIe-FD own callbacks */ priv->ucan.pre_cmd = pciefd_pre_cmd; priv->ucan.write_cmd = pciefd_write_cmd; priv->ucan.post_cmd = pciefd_post_cmd; priv->ucan.enable_tx_path = pciefd_enable_tx_path; priv->ucan.alloc_tx_msg = pciefd_alloc_tx_msg; priv->ucan.write_tx_msg = pciefd_write_tx_msg; /* setup PCIe-FD own command buffer */ priv->ucan.cmd_buffer = &priv->pucan_cmd; priv->ucan.cmd_maxlen = sizeof(priv->pucan_cmd); priv->board = pciefd; /* CAN config regs block address */ priv->reg_base = pciefd->reg_base + PCIEFD_CANX_OFF(priv->ucan.index); /* allocate non-cacheable DMA'able 4KB memory area for Rx */ priv->rx_dma_vaddr = dmam_alloc_coherent(&pciefd->pci_dev->dev, PCIEFD_RX_DMA_SIZE, &priv->rx_dma_laddr, GFP_KERNEL); if (!priv->rx_dma_vaddr) { dev_err(&pciefd->pci_dev->dev, "Rx dmam_alloc_coherent(%u) failure\n", PCIEFD_RX_DMA_SIZE); goto err_free_candev; } /* allocate non-cacheable DMA'able 4KB memory area for Tx */ priv->tx_dma_vaddr = dmam_alloc_coherent(&pciefd->pci_dev->dev, PCIEFD_TX_DMA_SIZE, &priv->tx_dma_laddr, GFP_KERNEL); if (!priv->tx_dma_vaddr) { dev_err(&pciefd->pci_dev->dev, "Tx dmam_alloc_coherent(%u) failure\n", PCIEFD_TX_DMA_SIZE); goto err_free_candev; } /* CAN clock in RST mode */ pciefd_can_writereg(priv, CANFD_MISC_TS_RST, PCIEFD_REG_CAN_MISC); /* read current clock value */ clk = pciefd_can_readreg(priv, PCIEFD_REG_CAN_CLK_SEL); switch (clk) { case CANFD_CLK_SEL_20MHZ: priv->ucan.can.clock.freq = 20 * 1000 * 1000; break; case CANFD_CLK_SEL_24MHZ: priv->ucan.can.clock.freq = 24 * 1000 * 1000; break; case CANFD_CLK_SEL_30MHZ: priv->ucan.can.clock.freq = 30 * 1000 * 1000; break; case CANFD_CLK_SEL_40MHZ: priv->ucan.can.clock.freq = 40 * 1000 * 1000; break; case CANFD_CLK_SEL_60MHZ: priv->ucan.can.clock.freq = 60 * 1000 * 1000; break; default: pciefd_can_writereg(priv, CANFD_CLK_SEL_80MHZ, PCIEFD_REG_CAN_CLK_SEL); fallthrough; case CANFD_CLK_SEL_80MHZ: priv->ucan.can.clock.freq = 80 * 1000 * 1000; break; } ndev->irq = pciefd->pci_dev->irq; SET_NETDEV_DEV(ndev, &pciefd->pci_dev->dev); err = register_candev(ndev); if (err) { dev_err(&pciefd->pci_dev->dev, "couldn't register CAN device: %d\n", err); goto err_free_candev; } spin_lock_init(&priv->tx_lock); /* save the object address in the board structure */ pciefd->can[pciefd->can_count] = priv; dev_info(&pciefd->pci_dev->dev, "%s at reg_base=0x%p irq=%d\n", ndev->name, priv->reg_base, ndev->irq); return 0; err_free_candev: free_candev(ndev); failure: return -ENOMEM; } /* remove a CAN-FD channel by releasing all of its resources */ static void pciefd_can_remove(struct pciefd_can *priv) { /* unregister (close) the can device to go back to RST mode first */ unregister_candev(priv->ucan.ndev); /* finally, free the candev object */ free_candev(priv->ucan.ndev); } /* remove all CAN-FD channels by releasing their own resources */ static void pciefd_can_remove_all(struct pciefd_board *pciefd) { while (pciefd->can_count > 0) pciefd_can_remove(pciefd->can[--pciefd->can_count]); } /* probe for the entire device */ static int peak_pciefd_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct pciefd_board *pciefd; int err, can_count; u16 sub_sys_id; u8 hw_ver_major; u8 hw_ver_minor; u8 hw_ver_sub; u32 v2; err = pci_enable_device(pdev); if (err) return err; err = pci_request_regions(pdev, PCIEFD_DRV_NAME); if (err) goto err_disable_pci; /* the number of channels depends on sub-system id */ err = pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &sub_sys_id); if (err) goto err_release_regions; dev_dbg(&pdev->dev, "probing device %04x:%04x:%04x\n", pdev->vendor, pdev->device, sub_sys_id); if (sub_sys_id >= 0x0012) can_count = 4; else if (sub_sys_id >= 0x0010) can_count = 3; else if (sub_sys_id >= 0x0004) can_count = 2; else can_count = 1; /* allocate board structure object */ pciefd = devm_kzalloc(&pdev->dev, struct_size(pciefd, can, can_count), GFP_KERNEL); if (!pciefd) { err = -ENOMEM; goto err_release_regions; } /* initialize the board structure */ pciefd->pci_dev = pdev; spin_lock_init(&pciefd->cmd_lock); /* save the PCI BAR0 virtual address for further system regs access */ pciefd->reg_base = pci_iomap(pdev, 0, PCIEFD_BAR0_SIZE); if (!pciefd->reg_base) { dev_err(&pdev->dev, "failed to map PCI resource #0\n"); err = -ENOMEM; goto err_release_regions; } /* read the firmware version number */ v2 = pciefd_sys_readreg(pciefd, PCIEFD_REG_SYS_VER2); hw_ver_major = (v2 & 0x0000f000) >> 12; hw_ver_minor = (v2 & 0x00000f00) >> 8; hw_ver_sub = (v2 & 0x000000f0) >> 4; dev_info(&pdev->dev, "%ux CAN-FD PCAN-PCIe FPGA v%u.%u.%u:\n", can_count, hw_ver_major, hw_ver_minor, hw_ver_sub); #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT /* FW < v3.3.0 DMA logic doesn't handle correctly the mix of 32-bit and * 64-bit logical addresses: this workaround forces usage of 32-bit * DMA addresses only when such a fw is detected. */ if (PCIEFD_FW_VERSION(hw_ver_major, hw_ver_minor, hw_ver_sub) < PCIEFD_FW_VERSION(3, 3, 0)) { err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); if (err) dev_warn(&pdev->dev, "warning: can't set DMA mask %llxh (err %d)\n", DMA_BIT_MASK(32), err); } #endif /* stop system clock */ pciefd_sys_writereg(pciefd, PCIEFD_SYS_CTL_CLK_EN, PCIEFD_REG_SYS_CTL_CLR); pci_set_master(pdev); /* create now the corresponding channels objects */ while (pciefd->can_count < can_count) { err = pciefd_can_probe(pciefd); if (err) goto err_free_canfd; pciefd->can_count++; } /* set system timestamps counter in RST mode */ pciefd_sys_writereg(pciefd, PCIEFD_SYS_CTL_TS_RST, PCIEFD_REG_SYS_CTL_SET); /* wait a bit (read cycle) */ (void)pciefd_sys_readreg(pciefd, PCIEFD_REG_SYS_VER1); /* free all clocks */ pciefd_sys_writereg(pciefd, PCIEFD_SYS_CTL_TS_RST, PCIEFD_REG_SYS_CTL_CLR); /* start system clock */ pciefd_sys_writereg(pciefd, PCIEFD_SYS_CTL_CLK_EN, PCIEFD_REG_SYS_CTL_SET); /* remember the board structure address in the device user data */ pci_set_drvdata(pdev, pciefd); return 0; err_free_canfd: pciefd_can_remove_all(pciefd); pci_iounmap(pdev, pciefd->reg_base); err_release_regions: pci_release_regions(pdev); err_disable_pci: pci_disable_device(pdev); /* pci_xxx_config_word() return positive PCIBIOS_xxx error codes while * the probe() function must return a negative errno in case of failure * (err is unchanged if negative) */ return pcibios_err_to_errno(err); } /* free the board structure object, as well as its resources: */ static void peak_pciefd_remove(struct pci_dev *pdev) { struct pciefd_board *pciefd = pci_get_drvdata(pdev); /* release CAN-FD channels resources */ pciefd_can_remove_all(pciefd); pci_iounmap(pdev, pciefd->reg_base); pci_release_regions(pdev); pci_disable_device(pdev); } static struct pci_driver peak_pciefd_driver = { .name = PCIEFD_DRV_NAME, .id_table = peak_pciefd_tbl, .probe = peak_pciefd_probe, .remove = peak_pciefd_remove, }; module_pci_driver(peak_pciefd_driver);
linux-master
drivers/net/can/peak_canfd/peak_pciefd_main.c
// SPDX-License-Identifier: GPL-2.0 /* Fintek F81604 USB-to-2CAN controller driver. * * Copyright (C) 2023 Ji-Ze Hong (Peter Hong) <[email protected]> */ #include <linux/bitfield.h> #include <linux/netdevice.h> #include <linux/units.h> #include <linux/usb.h> #include <linux/can.h> #include <linux/can/dev.h> #include <linux/can/error.h> #include <linux/can/platform/sja1000.h> #include <asm-generic/unaligned.h> /* vendor and product id */ #define F81604_VENDOR_ID 0x2c42 #define F81604_PRODUCT_ID 0x1709 #define F81604_CAN_CLOCK (12 * MEGA) #define F81604_MAX_DEV 2 #define F81604_SET_DEVICE_RETRY 10 #define F81604_USB_TIMEOUT 2000 #define F81604_SET_GET_REGISTER 0xA0 #define F81604_PORT_OFFSET 0x1000 #define F81604_MAX_RX_URBS 4 #define F81604_CMD_DATA 0x00 #define F81604_DLC_LEN_MASK GENMASK(3, 0) #define F81604_DLC_EFF_BIT BIT(7) #define F81604_DLC_RTR_BIT BIT(6) #define F81604_SFF_SHIFT 5 #define F81604_EFF_SHIFT 3 #define F81604_BRP_MASK GENMASK(5, 0) #define F81604_SJW_MASK GENMASK(7, 6) #define F81604_SEG1_MASK GENMASK(3, 0) #define F81604_SEG2_MASK GENMASK(6, 4) #define F81604_CLEAR_ALC 0 #define F81604_CLEAR_ECC 1 #define F81604_CLEAR_OVERRUN 2 /* device setting */ #define F81604_CTRL_MODE_REG 0x80 #define F81604_TX_ONESHOT (0x03 << 3) #define F81604_TX_NORMAL (0x01 << 3) #define F81604_RX_AUTO_RELEASE_BUF BIT(1) #define F81604_INT_WHEN_CHANGE BIT(0) #define F81604_TERMINATOR_REG 0x105 #define F81604_CAN0_TERM BIT(2) #define F81604_CAN1_TERM BIT(3) #define F81604_TERMINATION_DISABLED CAN_TERMINATION_DISABLED #define F81604_TERMINATION_ENABLED 120 /* SJA1000 registers - manual section 6.4 (Pelican Mode) */ #define F81604_SJA1000_MOD 0x00 #define F81604_SJA1000_CMR 0x01 #define F81604_SJA1000_IR 0x03 #define F81604_SJA1000_IER 0x04 #define F81604_SJA1000_ALC 0x0B #define F81604_SJA1000_ECC 0x0C #define F81604_SJA1000_RXERR 0x0E #define F81604_SJA1000_TXERR 0x0F #define F81604_SJA1000_ACCC0 0x10 #define F81604_SJA1000_ACCM0 0x14 #define F81604_MAX_FILTER_CNT 4 /* Common registers - manual section 6.5 */ #define F81604_SJA1000_BTR0 0x06 #define F81604_SJA1000_BTR1 0x07 #define F81604_SJA1000_BTR1_SAMPLE_TRIPLE BIT(7) #define F81604_SJA1000_OCR 0x08 #define F81604_SJA1000_CDR 0x1F /* mode register */ #define F81604_SJA1000_MOD_RM 0x01 #define F81604_SJA1000_MOD_LOM 0x02 #define F81604_SJA1000_MOD_STM 0x04 /* commands */ #define F81604_SJA1000_CMD_CDO 0x08 /* interrupt sources */ #define F81604_SJA1000_IRQ_BEI 0x80 #define F81604_SJA1000_IRQ_ALI 0x40 #define F81604_SJA1000_IRQ_EPI 0x20 #define F81604_SJA1000_IRQ_DOI 0x08 #define F81604_SJA1000_IRQ_EI 0x04 #define F81604_SJA1000_IRQ_TI 0x02 #define F81604_SJA1000_IRQ_RI 0x01 #define F81604_SJA1000_IRQ_ALL 0xFF #define F81604_SJA1000_IRQ_OFF 0x00 /* status register content */ #define F81604_SJA1000_SR_BS 0x80 #define F81604_SJA1000_SR_ES 0x40 #define F81604_SJA1000_SR_TCS 0x08 /* ECC register */ #define F81604_SJA1000_ECC_SEG 0x1F #define F81604_SJA1000_ECC_DIR 0x20 #define F81604_SJA1000_ECC_BIT 0x00 #define F81604_SJA1000_ECC_FORM 0x40 #define F81604_SJA1000_ECC_STUFF 0x80 #define F81604_SJA1000_ECC_MASK 0xc0 /* ALC register */ #define F81604_SJA1000_ALC_MASK 0x1f /* table of devices that work with this driver */ static const struct usb_device_id f81604_table[] = { { USB_DEVICE(F81604_VENDOR_ID, F81604_PRODUCT_ID) }, {} /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, f81604_table); static const struct ethtool_ops f81604_ethtool_ops = { .get_ts_info = ethtool_op_get_ts_info, }; static const u16 f81604_termination[] = { F81604_TERMINATION_DISABLED, F81604_TERMINATION_ENABLED }; struct f81604_priv { struct net_device *netdev[F81604_MAX_DEV]; }; struct f81604_port_priv { struct can_priv can; struct net_device *netdev; struct sk_buff *echo_skb; unsigned long clear_flags; struct work_struct clear_reg_work; struct usb_device *dev; struct usb_interface *intf; struct usb_anchor urbs_anchor; }; /* Interrupt endpoint data format: * Byte 0: Status register. * Byte 1: Interrupt register. * Byte 2: Interrupt enable register. * Byte 3: Arbitration lost capture(ALC) register. * Byte 4: Error code capture(ECC) register. * Byte 5: Error warning limit register. * Byte 6: RX error counter register. * Byte 7: TX error counter register. * Byte 8: Reserved. */ struct f81604_int_data { u8 sr; u8 isrc; u8 ier; u8 alc; u8 ecc; u8 ewlr; u8 rxerr; u8 txerr; u8 val; } __packed __aligned(4); struct f81604_sff { __be16 id; u8 data[CAN_MAX_DLEN]; } __packed __aligned(2); struct f81604_eff { __be32 id; u8 data[CAN_MAX_DLEN]; } __packed __aligned(2); struct f81604_can_frame { u8 cmd; /* According for F81604 DLC define: * bit 3~0: data length (0~8) * bit6: is RTR flag. * bit7: is EFF frame. */ u8 dlc; union { struct f81604_sff sff; struct f81604_eff eff; }; } __packed __aligned(2); static const u8 bulk_in_addr[F81604_MAX_DEV] = { 2, 4 }; static const u8 bulk_out_addr[F81604_MAX_DEV] = { 1, 3 }; static const u8 int_in_addr[F81604_MAX_DEV] = { 1, 3 }; static int f81604_write(struct usb_device *dev, u16 reg, u8 data) { int ret; ret = usb_control_msg_send(dev, 0, F81604_SET_GET_REGISTER, USB_TYPE_VENDOR | USB_DIR_OUT, 0, reg, &data, sizeof(data), F81604_USB_TIMEOUT, GFP_KERNEL); if (ret) dev_err(&dev->dev, "%s: reg: %x data: %x failed: %pe\n", __func__, reg, data, ERR_PTR(ret)); return ret; } static int f81604_read(struct usb_device *dev, u16 reg, u8 *data) { int ret; ret = usb_control_msg_recv(dev, 0, F81604_SET_GET_REGISTER, USB_TYPE_VENDOR | USB_DIR_IN, 0, reg, data, sizeof(*data), F81604_USB_TIMEOUT, GFP_KERNEL); if (ret < 0) dev_err(&dev->dev, "%s: reg: %x failed: %pe\n", __func__, reg, ERR_PTR(ret)); return ret; } static int f81604_update_bits(struct usb_device *dev, u16 reg, u8 mask, u8 data) { int ret; u8 tmp; ret = f81604_read(dev, reg, &tmp); if (ret) return ret; tmp &= ~mask; tmp |= (mask & data); return f81604_write(dev, reg, tmp); } static int f81604_sja1000_write(struct f81604_port_priv *priv, u16 reg, u8 data) { int port = priv->netdev->dev_port; int real_reg; real_reg = reg + F81604_PORT_OFFSET * port + F81604_PORT_OFFSET; return f81604_write(priv->dev, real_reg, data); } static int f81604_sja1000_read(struct f81604_port_priv *priv, u16 reg, u8 *data) { int port = priv->netdev->dev_port; int real_reg; real_reg = reg + F81604_PORT_OFFSET * port + F81604_PORT_OFFSET; return f81604_read(priv->dev, real_reg, data); } static int f81604_set_reset_mode(struct f81604_port_priv *priv) { int ret, i; u8 tmp; /* disable interrupts */ ret = f81604_sja1000_write(priv, F81604_SJA1000_IER, F81604_SJA1000_IRQ_OFF); if (ret) return ret; for (i = 0; i < F81604_SET_DEVICE_RETRY; i++) { ret = f81604_sja1000_read(priv, F81604_SJA1000_MOD, &tmp); if (ret) return ret; /* check reset bit */ if (tmp & F81604_SJA1000_MOD_RM) { priv->can.state = CAN_STATE_STOPPED; return 0; } /* reset chip */ ret = f81604_sja1000_write(priv, F81604_SJA1000_MOD, F81604_SJA1000_MOD_RM); if (ret) return ret; } return -EPERM; } static int f81604_set_normal_mode(struct f81604_port_priv *priv) { u8 tmp, ier = 0; u8 mod_reg = 0; int ret, i; for (i = 0; i < F81604_SET_DEVICE_RETRY; i++) { ret = f81604_sja1000_read(priv, F81604_SJA1000_MOD, &tmp); if (ret) return ret; /* check reset bit */ if ((tmp & F81604_SJA1000_MOD_RM) == 0) { priv->can.state = CAN_STATE_ERROR_ACTIVE; /* enable interrupts, RI handled by bulk-in */ ier = F81604_SJA1000_IRQ_ALL & ~F81604_SJA1000_IRQ_RI; if (!(priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)) ier &= ~F81604_SJA1000_IRQ_BEI; return f81604_sja1000_write(priv, F81604_SJA1000_IER, ier); } /* set chip to normal mode */ if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) mod_reg |= F81604_SJA1000_MOD_LOM; if (priv->can.ctrlmode & CAN_CTRLMODE_PRESUME_ACK) mod_reg |= F81604_SJA1000_MOD_STM; ret = f81604_sja1000_write(priv, F81604_SJA1000_MOD, mod_reg); if (ret) return ret; } return -EPERM; } static int f81604_chipset_init(struct f81604_port_priv *priv) { int i, ret; /* set clock divider and output control register */ ret = f81604_sja1000_write(priv, F81604_SJA1000_CDR, CDR_CBP | CDR_PELICAN); if (ret) return ret; /* set acceptance filter (accept all) */ for (i = 0; i < F81604_MAX_FILTER_CNT; ++i) { ret = f81604_sja1000_write(priv, F81604_SJA1000_ACCC0 + i, 0); if (ret) return ret; } for (i = 0; i < F81604_MAX_FILTER_CNT; ++i) { ret = f81604_sja1000_write(priv, F81604_SJA1000_ACCM0 + i, 0xFF); if (ret) return ret; } return f81604_sja1000_write(priv, F81604_SJA1000_OCR, OCR_TX0_PUSHPULL | OCR_TX1_PUSHPULL | OCR_MODE_NORMAL); } static void f81604_process_rx_packet(struct net_device *netdev, struct f81604_can_frame *frame) { struct net_device_stats *stats = &netdev->stats; struct can_frame *cf; struct sk_buff *skb; if (frame->cmd != F81604_CMD_DATA) return; skb = alloc_can_skb(netdev, &cf); if (!skb) { stats->rx_dropped++; return; } cf->len = can_cc_dlc2len(frame->dlc & F81604_DLC_LEN_MASK); if (frame->dlc & F81604_DLC_EFF_BIT) { cf->can_id = get_unaligned_be32(&frame->eff.id) >> F81604_EFF_SHIFT; cf->can_id |= CAN_EFF_FLAG; if (!(frame->dlc & F81604_DLC_RTR_BIT)) memcpy(cf->data, frame->eff.data, cf->len); } else { cf->can_id = get_unaligned_be16(&frame->sff.id) >> F81604_SFF_SHIFT; if (!(frame->dlc & F81604_DLC_RTR_BIT)) memcpy(cf->data, frame->sff.data, cf->len); } if (frame->dlc & F81604_DLC_RTR_BIT) cf->can_id |= CAN_RTR_FLAG; else stats->rx_bytes += cf->len; stats->rx_packets++; netif_rx(skb); } static void f81604_read_bulk_callback(struct urb *urb) { struct f81604_can_frame *frame = urb->transfer_buffer; struct net_device *netdev = urb->context; int ret; if (!netif_device_present(netdev)) return; if (urb->status) netdev_info(netdev, "%s: URB aborted %pe\n", __func__, ERR_PTR(urb->status)); switch (urb->status) { case 0: /* success */ break; case -ENOENT: case -EPIPE: case -EPROTO: case -ESHUTDOWN: return; default: goto resubmit_urb; } if (urb->actual_length != sizeof(*frame)) { netdev_warn(netdev, "URB length %u not equal to %zu\n", urb->actual_length, sizeof(*frame)); goto resubmit_urb; } f81604_process_rx_packet(netdev, frame); resubmit_urb: ret = usb_submit_urb(urb, GFP_ATOMIC); if (ret == -ENODEV) netif_device_detach(netdev); else if (ret) netdev_err(netdev, "%s: failed to resubmit read bulk urb: %pe\n", __func__, ERR_PTR(ret)); } static void f81604_handle_tx(struct f81604_port_priv *priv, struct f81604_int_data *data) { struct net_device *netdev = priv->netdev; struct net_device_stats *stats = &netdev->stats; /* transmission buffer released */ if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT && !(data->sr & F81604_SJA1000_SR_TCS)) { stats->tx_errors++; can_free_echo_skb(netdev, 0, NULL); } else { /* transmission complete */ stats->tx_bytes += can_get_echo_skb(netdev, 0, NULL); stats->tx_packets++; } netif_wake_queue(netdev); } static void f81604_handle_can_bus_errors(struct f81604_port_priv *priv, struct f81604_int_data *data) { enum can_state can_state = priv->can.state; struct net_device *netdev = priv->netdev; struct net_device_stats *stats = &netdev->stats; struct can_frame *cf; struct sk_buff *skb; /* Note: ALC/ECC will not auto clear by read here, must be cleared by * read register (via clear_reg_work). */ skb = alloc_can_err_skb(netdev, &cf); if (skb) { cf->can_id |= CAN_ERR_CNT; cf->data[6] = data->txerr; cf->data[7] = data->rxerr; } if (data->isrc & F81604_SJA1000_IRQ_DOI) { /* data overrun interrupt */ netdev_dbg(netdev, "data overrun interrupt\n"); if (skb) { cf->can_id |= CAN_ERR_CRTL; cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; } stats->rx_over_errors++; stats->rx_errors++; set_bit(F81604_CLEAR_OVERRUN, &priv->clear_flags); } if (data->isrc & F81604_SJA1000_IRQ_EI) { /* error warning interrupt */ netdev_dbg(netdev, "error warning interrupt\n"); if (data->sr & F81604_SJA1000_SR_BS) can_state = CAN_STATE_BUS_OFF; else if (data->sr & F81604_SJA1000_SR_ES) can_state = CAN_STATE_ERROR_WARNING; else can_state = CAN_STATE_ERROR_ACTIVE; } if (data->isrc & F81604_SJA1000_IRQ_BEI) { /* bus error interrupt */ netdev_dbg(netdev, "bus error interrupt\n"); priv->can.can_stats.bus_error++; stats->rx_errors++; if (skb) { cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; /* set error type */ switch (data->ecc & F81604_SJA1000_ECC_MASK) { case F81604_SJA1000_ECC_BIT: cf->data[2] |= CAN_ERR_PROT_BIT; break; case F81604_SJA1000_ECC_FORM: cf->data[2] |= CAN_ERR_PROT_FORM; break; case F81604_SJA1000_ECC_STUFF: cf->data[2] |= CAN_ERR_PROT_STUFF; break; default: break; } /* set error location */ cf->data[3] = data->ecc & F81604_SJA1000_ECC_SEG; /* Error occurred during transmission? */ if ((data->ecc & F81604_SJA1000_ECC_DIR) == 0) cf->data[2] |= CAN_ERR_PROT_TX; } set_bit(F81604_CLEAR_ECC, &priv->clear_flags); } if (data->isrc & F81604_SJA1000_IRQ_EPI) { if (can_state == CAN_STATE_ERROR_PASSIVE) can_state = CAN_STATE_ERROR_WARNING; else can_state = CAN_STATE_ERROR_PASSIVE; /* error passive interrupt */ netdev_dbg(netdev, "error passive interrupt: %d\n", can_state); } if (data->isrc & F81604_SJA1000_IRQ_ALI) { /* arbitration lost interrupt */ netdev_dbg(netdev, "arbitration lost interrupt\n"); priv->can.can_stats.arbitration_lost++; if (skb) { cf->can_id |= CAN_ERR_LOSTARB; cf->data[0] = data->alc & F81604_SJA1000_ALC_MASK; } set_bit(F81604_CLEAR_ALC, &priv->clear_flags); } if (can_state != priv->can.state) { enum can_state tx_state, rx_state; tx_state = data->txerr >= data->rxerr ? can_state : 0; rx_state = data->txerr <= data->rxerr ? can_state : 0; can_change_state(netdev, cf, tx_state, rx_state); if (can_state == CAN_STATE_BUS_OFF) can_bus_off(netdev); } if (priv->clear_flags) schedule_work(&priv->clear_reg_work); if (skb) netif_rx(skb); } static void f81604_read_int_callback(struct urb *urb) { struct f81604_int_data *data = urb->transfer_buffer; struct net_device *netdev = urb->context; struct f81604_port_priv *priv; int ret; priv = netdev_priv(netdev); if (!netif_device_present(netdev)) return; if (urb->status) netdev_info(netdev, "%s: Int URB aborted: %pe\n", __func__, ERR_PTR(urb->status)); switch (urb->status) { case 0: /* success */ break; case -ENOENT: case -EPIPE: case -EPROTO: case -ESHUTDOWN: return; default: goto resubmit_urb; } /* handle Errors */ if (data->isrc & (F81604_SJA1000_IRQ_DOI | F81604_SJA1000_IRQ_EI | F81604_SJA1000_IRQ_BEI | F81604_SJA1000_IRQ_EPI | F81604_SJA1000_IRQ_ALI)) f81604_handle_can_bus_errors(priv, data); /* handle TX */ if (priv->can.state != CAN_STATE_BUS_OFF && (data->isrc & F81604_SJA1000_IRQ_TI)) f81604_handle_tx(priv, data); resubmit_urb: ret = usb_submit_urb(urb, GFP_ATOMIC); if (ret == -ENODEV) netif_device_detach(netdev); else if (ret) netdev_err(netdev, "%s: failed to resubmit int urb: %pe\n", __func__, ERR_PTR(ret)); } static void f81604_unregister_urbs(struct f81604_port_priv *priv) { usb_kill_anchored_urbs(&priv->urbs_anchor); } static int f81604_register_urbs(struct f81604_port_priv *priv) { struct net_device *netdev = priv->netdev; struct f81604_int_data *int_data; int id = netdev->dev_port; struct urb *int_urb; int rx_urb_cnt; int ret; for (rx_urb_cnt = 0; rx_urb_cnt < F81604_MAX_RX_URBS; ++rx_urb_cnt) { struct f81604_can_frame *frame; struct urb *rx_urb; rx_urb = usb_alloc_urb(0, GFP_KERNEL); if (!rx_urb) { ret = -ENOMEM; break; } frame = kmalloc(sizeof(*frame), GFP_KERNEL); if (!frame) { usb_free_urb(rx_urb); ret = -ENOMEM; break; } usb_fill_bulk_urb(rx_urb, priv->dev, usb_rcvbulkpipe(priv->dev, bulk_in_addr[id]), frame, sizeof(*frame), f81604_read_bulk_callback, netdev); rx_urb->transfer_flags |= URB_FREE_BUFFER; usb_anchor_urb(rx_urb, &priv->urbs_anchor); ret = usb_submit_urb(rx_urb, GFP_KERNEL); if (ret) { usb_unanchor_urb(rx_urb); usb_free_urb(rx_urb); break; } /* Drop reference, USB core will take care of freeing it */ usb_free_urb(rx_urb); } if (rx_urb_cnt == 0) { netdev_warn(netdev, "%s: submit rx urb failed: %pe\n", __func__, ERR_PTR(ret)); goto error; } int_urb = usb_alloc_urb(0, GFP_KERNEL); if (!int_urb) { ret = -ENOMEM; goto error; } int_data = kmalloc(sizeof(*int_data), GFP_KERNEL); if (!int_data) { usb_free_urb(int_urb); ret = -ENOMEM; goto error; } usb_fill_int_urb(int_urb, priv->dev, usb_rcvintpipe(priv->dev, int_in_addr[id]), int_data, sizeof(*int_data), f81604_read_int_callback, netdev, 1); int_urb->transfer_flags |= URB_FREE_BUFFER; usb_anchor_urb(int_urb, &priv->urbs_anchor); ret = usb_submit_urb(int_urb, GFP_KERNEL); if (ret) { usb_unanchor_urb(int_urb); usb_free_urb(int_urb); netdev_warn(netdev, "%s: submit int urb failed: %pe\n", __func__, ERR_PTR(ret)); goto error; } /* Drop reference, USB core will take care of freeing it */ usb_free_urb(int_urb); return 0; error: f81604_unregister_urbs(priv); return ret; } static int f81604_start(struct net_device *netdev) { struct f81604_port_priv *priv = netdev_priv(netdev); int ret; u8 mode; u8 tmp; mode = F81604_RX_AUTO_RELEASE_BUF | F81604_INT_WHEN_CHANGE; /* Set TR/AT mode */ if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT) mode |= F81604_TX_ONESHOT; else mode |= F81604_TX_NORMAL; ret = f81604_sja1000_write(priv, F81604_CTRL_MODE_REG, mode); if (ret) return ret; /* set reset mode */ ret = f81604_set_reset_mode(priv); if (ret) return ret; ret = f81604_chipset_init(priv); if (ret) return ret; /* Clear error counters and error code capture */ ret = f81604_sja1000_write(priv, F81604_SJA1000_TXERR, 0); if (ret) return ret; ret = f81604_sja1000_write(priv, F81604_SJA1000_RXERR, 0); if (ret) return ret; /* Read clear for ECC/ALC/IR register */ ret = f81604_sja1000_read(priv, F81604_SJA1000_ECC, &tmp); if (ret) return ret; ret = f81604_sja1000_read(priv, F81604_SJA1000_ALC, &tmp); if (ret) return ret; ret = f81604_sja1000_read(priv, F81604_SJA1000_IR, &tmp); if (ret) return ret; ret = f81604_register_urbs(priv); if (ret) return ret; ret = f81604_set_normal_mode(priv); if (ret) { f81604_unregister_urbs(priv); return ret; } return 0; } static int f81604_set_bittiming(struct net_device *dev) { struct f81604_port_priv *priv = netdev_priv(dev); struct can_bittiming *bt = &priv->can.bittiming; u8 btr0, btr1; int ret; btr0 = FIELD_PREP(F81604_BRP_MASK, bt->brp - 1) | FIELD_PREP(F81604_SJW_MASK, bt->sjw - 1); btr1 = FIELD_PREP(F81604_SEG1_MASK, bt->prop_seg + bt->phase_seg1 - 1) | FIELD_PREP(F81604_SEG2_MASK, bt->phase_seg2 - 1); if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) btr1 |= F81604_SJA1000_BTR1_SAMPLE_TRIPLE; ret = f81604_sja1000_write(priv, F81604_SJA1000_BTR0, btr0); if (ret) { netdev_warn(dev, "%s: Set BTR0 failed: %pe\n", __func__, ERR_PTR(ret)); return ret; } ret = f81604_sja1000_write(priv, F81604_SJA1000_BTR1, btr1); if (ret) { netdev_warn(dev, "%s: Set BTR1 failed: %pe\n", __func__, ERR_PTR(ret)); return ret; } return 0; } static int f81604_set_mode(struct net_device *netdev, enum can_mode mode) { int ret; switch (mode) { case CAN_MODE_START: ret = f81604_start(netdev); if (!ret && netif_queue_stopped(netdev)) netif_wake_queue(netdev); break; default: ret = -EOPNOTSUPP; } return ret; } static void f81604_write_bulk_callback(struct urb *urb) { struct net_device *netdev = urb->context; if (!netif_device_present(netdev)) return; if (urb->status) netdev_info(netdev, "%s: Tx URB error: %pe\n", __func__, ERR_PTR(urb->status)); } static void f81604_clear_reg_work(struct work_struct *work) { struct f81604_port_priv *priv; u8 tmp; priv = container_of(work, struct f81604_port_priv, clear_reg_work); /* dummy read for clear Arbitration lost capture(ALC) register. */ if (test_and_clear_bit(F81604_CLEAR_ALC, &priv->clear_flags)) f81604_sja1000_read(priv, F81604_SJA1000_ALC, &tmp); /* dummy read for clear Error code capture(ECC) register. */ if (test_and_clear_bit(F81604_CLEAR_ECC, &priv->clear_flags)) f81604_sja1000_read(priv, F81604_SJA1000_ECC, &tmp); /* dummy write for clear data overrun flag. */ if (test_and_clear_bit(F81604_CLEAR_OVERRUN, &priv->clear_flags)) f81604_sja1000_write(priv, F81604_SJA1000_CMR, F81604_SJA1000_CMD_CDO); } static netdev_tx_t f81604_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct can_frame *cf = (struct can_frame *)skb->data; struct f81604_port_priv *priv = netdev_priv(netdev); struct net_device_stats *stats = &netdev->stats; struct f81604_can_frame *frame; struct urb *write_urb; int ret; if (can_dev_dropped_skb(netdev, skb)) return NETDEV_TX_OK; netif_stop_queue(netdev); write_urb = usb_alloc_urb(0, GFP_ATOMIC); if (!write_urb) goto nomem_urb; frame = kzalloc(sizeof(*frame), GFP_ATOMIC); if (!frame) goto nomem_buf; usb_fill_bulk_urb(write_urb, priv->dev, usb_sndbulkpipe(priv->dev, bulk_out_addr[netdev->dev_port]), frame, sizeof(*frame), f81604_write_bulk_callback, priv->netdev); write_urb->transfer_flags |= URB_FREE_BUFFER; frame->cmd = F81604_CMD_DATA; frame->dlc = cf->len; if (cf->can_id & CAN_RTR_FLAG) frame->dlc |= F81604_DLC_RTR_BIT; if (cf->can_id & CAN_EFF_FLAG) { u32 id = (cf->can_id & CAN_EFF_MASK) << F81604_EFF_SHIFT; put_unaligned_be32(id, &frame->eff.id); frame->dlc |= F81604_DLC_EFF_BIT; if (!(cf->can_id & CAN_RTR_FLAG)) memcpy(&frame->eff.data, cf->data, cf->len); } else { u32 id = (cf->can_id & CAN_SFF_MASK) << F81604_SFF_SHIFT; put_unaligned_be16(id, &frame->sff.id); if (!(cf->can_id & CAN_RTR_FLAG)) memcpy(&frame->sff.data, cf->data, cf->len); } can_put_echo_skb(skb, netdev, 0, 0); ret = usb_submit_urb(write_urb, GFP_ATOMIC); if (ret) { netdev_err(netdev, "%s: failed to resubmit tx bulk urb: %pe\n", __func__, ERR_PTR(ret)); can_free_echo_skb(netdev, 0, NULL); stats->tx_dropped++; stats->tx_errors++; if (ret == -ENODEV) netif_device_detach(netdev); else netif_wake_queue(netdev); } /* let usb core take care of this urb */ usb_free_urb(write_urb); return NETDEV_TX_OK; nomem_buf: usb_free_urb(write_urb); nomem_urb: dev_kfree_skb(skb); stats->tx_dropped++; stats->tx_errors++; netif_wake_queue(netdev); return NETDEV_TX_OK; } static int f81604_get_berr_counter(const struct net_device *netdev, struct can_berr_counter *bec) { struct f81604_port_priv *priv = netdev_priv(netdev); u8 txerr, rxerr; int ret; ret = f81604_sja1000_read(priv, F81604_SJA1000_TXERR, &txerr); if (ret) return ret; ret = f81604_sja1000_read(priv, F81604_SJA1000_RXERR, &rxerr); if (ret) return ret; bec->txerr = txerr; bec->rxerr = rxerr; return 0; } /* Open USB device */ static int f81604_open(struct net_device *netdev) { int ret; ret = open_candev(netdev); if (ret) return ret; ret = f81604_start(netdev); if (ret) { if (ret == -ENODEV) netif_device_detach(netdev); close_candev(netdev); return ret; } netif_start_queue(netdev); return 0; } /* Close USB device */ static int f81604_close(struct net_device *netdev) { struct f81604_port_priv *priv = netdev_priv(netdev); f81604_set_reset_mode(priv); netif_stop_queue(netdev); cancel_work_sync(&priv->clear_reg_work); close_candev(netdev); f81604_unregister_urbs(priv); return 0; } static const struct net_device_ops f81604_netdev_ops = { .ndo_open = f81604_open, .ndo_stop = f81604_close, .ndo_start_xmit = f81604_start_xmit, .ndo_change_mtu = can_change_mtu, }; static const struct can_bittiming_const f81604_bittiming_const = { .name = KBUILD_MODNAME, .tseg1_min = 1, .tseg1_max = 16, .tseg2_min = 1, .tseg2_max = 8, .sjw_max = 4, .brp_min = 1, .brp_max = 64, .brp_inc = 1, }; /* Called by the usb core when driver is unloaded or device is removed */ static void f81604_disconnect(struct usb_interface *intf) { struct f81604_priv *priv = usb_get_intfdata(intf); int i; for (i = 0; i < ARRAY_SIZE(priv->netdev); ++i) { if (!priv->netdev[i]) continue; unregister_netdev(priv->netdev[i]); free_candev(priv->netdev[i]); } } static int __f81604_set_termination(struct usb_device *dev, int idx, u16 term) { u8 mask, data = 0; if (idx == 0) mask = F81604_CAN0_TERM; else mask = F81604_CAN1_TERM; if (term) data = mask; return f81604_update_bits(dev, F81604_TERMINATOR_REG, mask, data); } static int f81604_set_termination(struct net_device *netdev, u16 term) { struct f81604_port_priv *port_priv = netdev_priv(netdev); ASSERT_RTNL(); return __f81604_set_termination(port_priv->dev, netdev->dev_port, term); } static int f81604_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *dev = interface_to_usbdev(intf); struct net_device *netdev; struct f81604_priv *priv; int i, ret; priv = devm_kzalloc(&intf->dev, sizeof(*priv), GFP_KERNEL); if (!priv) return -ENOMEM; usb_set_intfdata(intf, priv); for (i = 0; i < ARRAY_SIZE(priv->netdev); ++i) { ret = __f81604_set_termination(dev, i, 0); if (ret) { dev_err(&intf->dev, "Setting termination of CH#%d failed: %pe\n", i, ERR_PTR(ret)); return ret; } } for (i = 0; i < ARRAY_SIZE(priv->netdev); ++i) { struct f81604_port_priv *port_priv; netdev = alloc_candev(sizeof(*port_priv), 1); if (!netdev) { dev_err(&intf->dev, "Couldn't alloc candev: %d\n", i); ret = -ENOMEM; goto failure_cleanup; } port_priv = netdev_priv(netdev); INIT_WORK(&port_priv->clear_reg_work, f81604_clear_reg_work); init_usb_anchor(&port_priv->urbs_anchor); port_priv->intf = intf; port_priv->dev = dev; port_priv->netdev = netdev; port_priv->can.clock.freq = F81604_CAN_CLOCK; port_priv->can.termination_const = f81604_termination; port_priv->can.termination_const_cnt = ARRAY_SIZE(f81604_termination); port_priv->can.bittiming_const = &f81604_bittiming_const; port_priv->can.do_set_bittiming = f81604_set_bittiming; port_priv->can.do_set_mode = f81604_set_mode; port_priv->can.do_set_termination = f81604_set_termination; port_priv->can.do_get_berr_counter = f81604_get_berr_counter; port_priv->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_ONE_SHOT | CAN_CTRLMODE_BERR_REPORTING | CAN_CTRLMODE_PRESUME_ACK; netdev->ethtool_ops = &f81604_ethtool_ops; netdev->netdev_ops = &f81604_netdev_ops; netdev->flags |= IFF_ECHO; netdev->dev_port = i; SET_NETDEV_DEV(netdev, &intf->dev); ret = register_candev(netdev); if (ret) { netdev_err(netdev, "register CAN device failed: %pe\n", ERR_PTR(ret)); free_candev(netdev); goto failure_cleanup; } priv->netdev[i] = netdev; } return 0; failure_cleanup: f81604_disconnect(intf); return ret; } static struct usb_driver f81604_driver = { .name = KBUILD_MODNAME, .probe = f81604_probe, .disconnect = f81604_disconnect, .id_table = f81604_table, }; module_usb_driver(f81604_driver); MODULE_AUTHOR("Ji-Ze Hong (Peter Hong) <[email protected]>"); MODULE_DESCRIPTION("Fintek F81604 USB to 2xCANBUS"); MODULE_LICENSE("GPL");
linux-master
drivers/net/can/usb/f81604.c
// SPDX-License-Identifier: GPL-2.0-only /* * CAN driver for esd electronics gmbh CAN-USB/2, CAN-USB/3 and CAN-USB/Micro * * Copyright (C) 2010-2012 esd electronic system design gmbh, Matthias Fuchs <[email protected]> * Copyright (C) 2022-2023 esd electronics gmbh, Frank Jungclaus <[email protected]> */ #include <linux/can.h> #include <linux/can/dev.h> #include <linux/can/error.h> #include <linux/ethtool.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/signal.h> #include <linux/slab.h> #include <linux/units.h> #include <linux/usb.h> MODULE_AUTHOR("Matthias Fuchs <[email protected]>"); MODULE_AUTHOR("Frank Jungclaus <[email protected]>"); MODULE_DESCRIPTION("CAN driver for esd electronics gmbh CAN-USB/2, CAN-USB/3 and CAN-USB/Micro interfaces"); MODULE_LICENSE("GPL v2"); /* USB vendor and product ID */ #define ESD_USB_ESDGMBH_VENDOR_ID 0x0ab4 #define ESD_USB_CANUSB2_PRODUCT_ID 0x0010 #define ESD_USB_CANUSBM_PRODUCT_ID 0x0011 #define ESD_USB_CANUSB3_PRODUCT_ID 0x0014 /* CAN controller clock frequencies */ #define ESD_USB_2_CAN_CLOCK (60 * MEGA) /* Hz */ #define ESD_USB_M_CAN_CLOCK (36 * MEGA) /* Hz */ #define ESD_USB_3_CAN_CLOCK (80 * MEGA) /* Hz */ /* Maximum number of CAN nets */ #define ESD_USB_MAX_NETS 2 /* USB commands */ #define ESD_USB_CMD_VERSION 1 /* also used for VERSION_REPLY */ #define ESD_USB_CMD_CAN_RX 2 /* device to host only */ #define ESD_USB_CMD_CAN_TX 3 /* also used for TX_DONE */ #define ESD_USB_CMD_SETBAUD 4 /* also used for SETBAUD_REPLY */ #define ESD_USB_CMD_TS 5 /* also used for TS_REPLY */ #define ESD_USB_CMD_IDADD 6 /* also used for IDADD_REPLY */ /* esd CAN message flags - dlc field */ #define ESD_USB_RTR BIT(4) #define ESD_USB_NO_BRS BIT(4) #define ESD_USB_ESI BIT(5) #define ESD_USB_FD BIT(7) /* esd CAN message flags - id field */ #define ESD_USB_EXTID BIT(29) #define ESD_USB_EVENT BIT(30) #define ESD_USB_IDMASK GENMASK(28, 0) /* esd CAN event ids */ #define ESD_USB_EV_CAN_ERROR_EXT 2 /* CAN controller specific diagnostic data */ /* baudrate message flags */ #define ESD_USB_LOM BIT(30) /* Listen Only Mode */ #define ESD_USB_UBR BIT(31) /* User Bit Rate (controller BTR) in bits 0..27 */ #define ESD_USB_NO_BAUDRATE GENMASK(30, 0) /* bit rate unconfigured */ /* bit timing esd CAN-USB */ #define ESD_USB_2_TSEG1_SHIFT 16 #define ESD_USB_2_TSEG2_SHIFT 20 #define ESD_USB_2_SJW_SHIFT 14 #define ESD_USB_M_SJW_SHIFT 24 #define ESD_USB_TRIPLE_SAMPLES BIT(23) /* Transmitter Delay Compensation */ #define ESD_USB_3_TDC_MODE_AUTO 0 /* esd IDADD message */ #define ESD_USB_ID_ENABLE BIT(7) #define ESD_USB_MAX_ID_SEGMENT 64 /* SJA1000 ECC register (emulated by usb firmware) */ #define ESD_USB_SJA1000_ECC_SEG GENMASK(4, 0) #define ESD_USB_SJA1000_ECC_DIR BIT(5) #define ESD_USB_SJA1000_ECC_ERR BIT(2, 1) #define ESD_USB_SJA1000_ECC_BIT 0x00 #define ESD_USB_SJA1000_ECC_FORM BIT(6) #define ESD_USB_SJA1000_ECC_STUFF BIT(7) #define ESD_USB_SJA1000_ECC_MASK GENMASK(7, 6) /* esd bus state event codes */ #define ESD_USB_BUSSTATE_MASK GENMASK(7, 6) #define ESD_USB_BUSSTATE_WARN BIT(6) #define ESD_USB_BUSSTATE_ERRPASSIVE BIT(7) #define ESD_USB_BUSSTATE_BUSOFF GENMASK(7, 6) #define ESD_USB_RX_BUFFER_SIZE 1024 #define ESD_USB_MAX_RX_URBS 4 #define ESD_USB_MAX_TX_URBS 16 /* must be power of 2 */ /* Modes for CAN-USB/3, to be used for esd_usb_3_set_baudrate_msg_x.mode */ #define ESD_USB_3_BAUDRATE_MODE_DISABLE 0 /* remove from bus */ #define ESD_USB_3_BAUDRATE_MODE_INDEX 1 /* ESD (CiA) bit rate idx */ #define ESD_USB_3_BAUDRATE_MODE_BTR_CTRL 2 /* BTR values (controller)*/ #define ESD_USB_3_BAUDRATE_MODE_BTR_CANONICAL 3 /* BTR values (canonical) */ #define ESD_USB_3_BAUDRATE_MODE_NUM 4 /* numerical bit rate */ #define ESD_USB_3_BAUDRATE_MODE_AUTOBAUD 5 /* autobaud */ /* Flags for CAN-USB/3, to be used for esd_usb_3_set_baudrate_msg_x.flags */ #define ESD_USB_3_BAUDRATE_FLAG_FD BIT(0) /* enable CAN FD mode */ #define ESD_USB_3_BAUDRATE_FLAG_LOM BIT(1) /* enable listen only mode */ #define ESD_USB_3_BAUDRATE_FLAG_STM BIT(2) /* enable self test mode */ #define ESD_USB_3_BAUDRATE_FLAG_TRS BIT(3) /* enable triple sampling */ #define ESD_USB_3_BAUDRATE_FLAG_TXP BIT(4) /* enable transmit pause */ struct esd_usb_header_msg { u8 len; /* total message length in 32bit words */ u8 cmd; u8 rsvd[2]; }; struct esd_usb_version_msg { u8 len; /* total message length in 32bit words */ u8 cmd; u8 rsvd; u8 flags; __le32 drv_version; }; struct esd_usb_version_reply_msg { u8 len; /* total message length in 32bit words */ u8 cmd; u8 nets; u8 features; __le32 version; u8 name[16]; __le32 rsvd; __le32 ts; }; struct esd_usb_rx_msg { u8 len; /* total message length in 32bit words */ u8 cmd; u8 net; u8 dlc; __le32 ts; __le32 id; /* upper 3 bits contain flags */ union { u8 data[CAN_MAX_DLEN]; u8 data_fd[CANFD_MAX_DLEN]; struct { u8 status; /* CAN Controller Status */ u8 ecc; /* Error Capture Register */ u8 rec; /* RX Error Counter */ u8 tec; /* TX Error Counter */ } ev_can_err_ext; /* For ESD_EV_CAN_ERROR_EXT */ }; }; struct esd_usb_tx_msg { u8 len; /* total message length in 32bit words */ u8 cmd; u8 net; u8 dlc; u32 hnd; /* opaque handle, not used by device */ __le32 id; /* upper 3 bits contain flags */ union { u8 data[CAN_MAX_DLEN]; u8 data_fd[CANFD_MAX_DLEN]; }; }; struct esd_usb_tx_done_msg { u8 len; /* total message length in 32bit words */ u8 cmd; u8 net; u8 status; u32 hnd; /* opaque handle, not used by device */ __le32 ts; }; struct esd_usb_id_filter_msg { u8 len; /* total message length in 32bit words */ u8 cmd; u8 net; u8 option; __le32 mask[ESD_USB_MAX_ID_SEGMENT + 1]; /* +1 for 29bit extended IDs */ }; struct esd_usb_set_baudrate_msg { u8 len; /* total message length in 32bit words */ u8 cmd; u8 net; u8 rsvd; __le32 baud; }; /* CAN-USB/3 baudrate configuration, used for nominal as well as for data bit rate */ struct esd_usb_3_baudrate_cfg { __le16 brp; /* bit rate pre-scaler */ __le16 tseg1; /* time segment before sample point */ __le16 tseg2; /* time segment after sample point */ __le16 sjw; /* synchronization jump Width */ }; /* In principle, the esd CAN-USB/3 supports Transmitter Delay Compensation (TDC), * but currently only the automatic TDC mode is supported by this driver. * An implementation for manual TDC configuration will follow. * * For information about struct esd_usb_3_tdc_cfg, see * NTCAN Application Developers Manual, 6.2.25 NTCAN_TDC_CFG + related chapters * https://esd.eu/fileadmin/esd/docs/manuals/NTCAN_Part1_Function_API_Manual_en_56.pdf */ struct esd_usb_3_tdc_cfg { u8 tdc_mode; /* transmitter delay compensation mode */ u8 ssp_offset; /* secondary sample point offset in mtq */ s8 ssp_shift; /* secondary sample point shift in mtq */ u8 tdc_filter; /* TDC filter in mtq */ }; /* Extended version of the above set_baudrate_msg for a CAN-USB/3 * to define the CAN bit timing configuration of the CAN controller in * CAN FD mode as well as in Classical CAN mode. * * The payload of this command is a NTCAN_BAUDRATE_X structure according to * esd electronics gmbh, NTCAN Application Developers Manual, 6.2.15 NTCAN_BAUDRATE_X * https://esd.eu/fileadmin/esd/docs/manuals/NTCAN_Part1_Function_API_Manual_en_56.pdf */ struct esd_usb_3_set_baudrate_msg_x { u8 len; /* total message length in 32bit words */ u8 cmd; u8 net; u8 rsvd; /*reserved */ /* Payload ... */ __le16 mode; /* mode word, see ESD_USB_3_BAUDRATE_MODE_xxx */ __le16 flags; /* control flags, see ESD_USB_3_BAUDRATE_FLAG_xxx */ struct esd_usb_3_tdc_cfg tdc; /* TDC configuration */ struct esd_usb_3_baudrate_cfg nom; /* nominal bit rate */ struct esd_usb_3_baudrate_cfg data; /* data bit rate */ }; /* Main message type used between library and application */ union __packed esd_usb_msg { struct esd_usb_header_msg hdr; struct esd_usb_version_msg version; struct esd_usb_version_reply_msg version_reply; struct esd_usb_rx_msg rx; struct esd_usb_tx_msg tx; struct esd_usb_tx_done_msg txdone; struct esd_usb_set_baudrate_msg setbaud; struct esd_usb_3_set_baudrate_msg_x setbaud_x; struct esd_usb_id_filter_msg filter; }; static struct usb_device_id esd_usb_table[] = { {USB_DEVICE(ESD_USB_ESDGMBH_VENDOR_ID, ESD_USB_CANUSB2_PRODUCT_ID)}, {USB_DEVICE(ESD_USB_ESDGMBH_VENDOR_ID, ESD_USB_CANUSBM_PRODUCT_ID)}, {USB_DEVICE(ESD_USB_ESDGMBH_VENDOR_ID, ESD_USB_CANUSB3_PRODUCT_ID)}, {} }; MODULE_DEVICE_TABLE(usb, esd_usb_table); struct esd_usb_net_priv; struct esd_tx_urb_context { struct esd_usb_net_priv *priv; u32 echo_index; }; struct esd_usb { struct usb_device *udev; struct esd_usb_net_priv *nets[ESD_USB_MAX_NETS]; struct usb_anchor rx_submitted; int net_count; u32 version; int rxinitdone; void *rxbuf[ESD_USB_MAX_RX_URBS]; dma_addr_t rxbuf_dma[ESD_USB_MAX_RX_URBS]; }; struct esd_usb_net_priv { struct can_priv can; /* must be the first member */ atomic_t active_tx_jobs; struct usb_anchor tx_submitted; struct esd_tx_urb_context tx_contexts[ESD_USB_MAX_TX_URBS]; struct esd_usb *usb; struct net_device *netdev; int index; u8 old_state; struct can_berr_counter bec; }; static void esd_usb_rx_event(struct esd_usb_net_priv *priv, union esd_usb_msg *msg) { struct net_device_stats *stats = &priv->netdev->stats; struct can_frame *cf; struct sk_buff *skb; u32 id = le32_to_cpu(msg->rx.id) & ESD_USB_IDMASK; if (id == ESD_USB_EV_CAN_ERROR_EXT) { u8 state = msg->rx.ev_can_err_ext.status; u8 ecc = msg->rx.ev_can_err_ext.ecc; priv->bec.rxerr = msg->rx.ev_can_err_ext.rec; priv->bec.txerr = msg->rx.ev_can_err_ext.tec; netdev_dbg(priv->netdev, "CAN_ERR_EV_EXT: dlc=%#02x state=%02x ecc=%02x rec=%02x tec=%02x\n", msg->rx.dlc, state, ecc, priv->bec.rxerr, priv->bec.txerr); /* if berr-reporting is off, only pass through on state change ... */ if (!(priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) && state == priv->old_state) return; skb = alloc_can_err_skb(priv->netdev, &cf); if (!skb) stats->rx_dropped++; if (state != priv->old_state) { enum can_state tx_state, rx_state; enum can_state new_state = CAN_STATE_ERROR_ACTIVE; priv->old_state = state; switch (state & ESD_USB_BUSSTATE_MASK) { case ESD_USB_BUSSTATE_BUSOFF: new_state = CAN_STATE_BUS_OFF; can_bus_off(priv->netdev); break; case ESD_USB_BUSSTATE_WARN: new_state = CAN_STATE_ERROR_WARNING; break; case ESD_USB_BUSSTATE_ERRPASSIVE: new_state = CAN_STATE_ERROR_PASSIVE; break; default: new_state = CAN_STATE_ERROR_ACTIVE; priv->bec.txerr = 0; priv->bec.rxerr = 0; break; } if (new_state != priv->can.state) { tx_state = (priv->bec.txerr >= priv->bec.rxerr) ? new_state : 0; rx_state = (priv->bec.txerr <= priv->bec.rxerr) ? new_state : 0; can_change_state(priv->netdev, cf, tx_state, rx_state); } } else if (skb) { priv->can.can_stats.bus_error++; stats->rx_errors++; cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; switch (ecc & ESD_USB_SJA1000_ECC_MASK) { case ESD_USB_SJA1000_ECC_BIT: cf->data[2] |= CAN_ERR_PROT_BIT; break; case ESD_USB_SJA1000_ECC_FORM: cf->data[2] |= CAN_ERR_PROT_FORM; break; case ESD_USB_SJA1000_ECC_STUFF: cf->data[2] |= CAN_ERR_PROT_STUFF; break; default: break; } /* Error occurred during transmission? */ if (!(ecc & ESD_USB_SJA1000_ECC_DIR)) cf->data[2] |= CAN_ERR_PROT_TX; /* Bit stream position in CAN frame as the error was detected */ cf->data[3] = ecc & ESD_USB_SJA1000_ECC_SEG; } if (skb) { cf->can_id |= CAN_ERR_CNT; cf->data[6] = priv->bec.txerr; cf->data[7] = priv->bec.rxerr; netif_rx(skb); } } } static void esd_usb_rx_can_msg(struct esd_usb_net_priv *priv, union esd_usb_msg *msg) { struct net_device_stats *stats = &priv->netdev->stats; struct can_frame *cf; struct canfd_frame *cfd; struct sk_buff *skb; u32 id; u8 len; if (!netif_device_present(priv->netdev)) return; id = le32_to_cpu(msg->rx.id); if (id & ESD_USB_EVENT) { esd_usb_rx_event(priv, msg); } else { if (msg->rx.dlc & ESD_USB_FD) { skb = alloc_canfd_skb(priv->netdev, &cfd); } else { skb = alloc_can_skb(priv->netdev, &cf); cfd = (struct canfd_frame *)cf; } if (skb == NULL) { stats->rx_dropped++; return; } cfd->can_id = id & ESD_USB_IDMASK; if (msg->rx.dlc & ESD_USB_FD) { /* masking by 0x0F is already done within can_fd_dlc2len() */ cfd->len = can_fd_dlc2len(msg->rx.dlc); len = cfd->len; if ((msg->rx.dlc & ESD_USB_NO_BRS) == 0) cfd->flags |= CANFD_BRS; if (msg->rx.dlc & ESD_USB_ESI) cfd->flags |= CANFD_ESI; } else { can_frame_set_cc_len(cf, msg->rx.dlc & ~ESD_USB_RTR, priv->can.ctrlmode); len = cf->len; if (msg->rx.dlc & ESD_USB_RTR) { cf->can_id |= CAN_RTR_FLAG; len = 0; } } if (id & ESD_USB_EXTID) cfd->can_id |= CAN_EFF_FLAG; memcpy(cfd->data, msg->rx.data_fd, len); stats->rx_bytes += len; stats->rx_packets++; netif_rx(skb); } } static void esd_usb_tx_done_msg(struct esd_usb_net_priv *priv, union esd_usb_msg *msg) { struct net_device_stats *stats = &priv->netdev->stats; struct net_device *netdev = priv->netdev; struct esd_tx_urb_context *context; if (!netif_device_present(netdev)) return; context = &priv->tx_contexts[msg->txdone.hnd & (ESD_USB_MAX_TX_URBS - 1)]; if (!msg->txdone.status) { stats->tx_packets++; stats->tx_bytes += can_get_echo_skb(netdev, context->echo_index, NULL); } else { stats->tx_errors++; can_free_echo_skb(netdev, context->echo_index, NULL); } /* Release context */ context->echo_index = ESD_USB_MAX_TX_URBS; atomic_dec(&priv->active_tx_jobs); netif_wake_queue(netdev); } static void esd_usb_read_bulk_callback(struct urb *urb) { struct esd_usb *dev = urb->context; int retval; int pos = 0; int i; switch (urb->status) { case 0: /* success */ break; case -ENOENT: case -EPIPE: case -EPROTO: case -ESHUTDOWN: return; default: dev_info(dev->udev->dev.parent, "Rx URB aborted (%d)\n", urb->status); goto resubmit_urb; } while (pos < urb->actual_length) { union esd_usb_msg *msg; msg = (union esd_usb_msg *)(urb->transfer_buffer + pos); switch (msg->hdr.cmd) { case ESD_USB_CMD_CAN_RX: if (msg->rx.net >= dev->net_count) { dev_err(dev->udev->dev.parent, "format error\n"); break; } esd_usb_rx_can_msg(dev->nets[msg->rx.net], msg); break; case ESD_USB_CMD_CAN_TX: if (msg->txdone.net >= dev->net_count) { dev_err(dev->udev->dev.parent, "format error\n"); break; } esd_usb_tx_done_msg(dev->nets[msg->txdone.net], msg); break; } pos += msg->hdr.len * sizeof(u32); /* convert to # of bytes */ if (pos > urb->actual_length) { dev_err(dev->udev->dev.parent, "format error\n"); break; } } resubmit_urb: usb_fill_bulk_urb(urb, dev->udev, usb_rcvbulkpipe(dev->udev, 1), urb->transfer_buffer, ESD_USB_RX_BUFFER_SIZE, esd_usb_read_bulk_callback, dev); retval = usb_submit_urb(urb, GFP_ATOMIC); if (retval == -ENODEV) { for (i = 0; i < dev->net_count; i++) { if (dev->nets[i]) netif_device_detach(dev->nets[i]->netdev); } } else if (retval) { dev_err(dev->udev->dev.parent, "failed resubmitting read bulk urb: %d\n", retval); } } /* callback for bulk IN urb */ static void esd_usb_write_bulk_callback(struct urb *urb) { struct esd_tx_urb_context *context = urb->context; struct esd_usb_net_priv *priv; struct net_device *netdev; size_t size = sizeof(union esd_usb_msg); WARN_ON(!context); priv = context->priv; netdev = priv->netdev; /* free up our allocated buffer */ usb_free_coherent(urb->dev, size, urb->transfer_buffer, urb->transfer_dma); if (!netif_device_present(netdev)) return; if (urb->status) netdev_info(netdev, "Tx URB aborted (%d)\n", urb->status); netif_trans_update(netdev); } static ssize_t firmware_show(struct device *d, struct device_attribute *attr, char *buf) { struct usb_interface *intf = to_usb_interface(d); struct esd_usb *dev = usb_get_intfdata(intf); return sprintf(buf, "%d.%d.%d\n", (dev->version >> 12) & 0xf, (dev->version >> 8) & 0xf, dev->version & 0xff); } static DEVICE_ATTR_RO(firmware); static ssize_t hardware_show(struct device *d, struct device_attribute *attr, char *buf) { struct usb_interface *intf = to_usb_interface(d); struct esd_usb *dev = usb_get_intfdata(intf); return sprintf(buf, "%d.%d.%d\n", (dev->version >> 28) & 0xf, (dev->version >> 24) & 0xf, (dev->version >> 16) & 0xff); } static DEVICE_ATTR_RO(hardware); static ssize_t nets_show(struct device *d, struct device_attribute *attr, char *buf) { struct usb_interface *intf = to_usb_interface(d); struct esd_usb *dev = usb_get_intfdata(intf); return sprintf(buf, "%d", dev->net_count); } static DEVICE_ATTR_RO(nets); static int esd_usb_send_msg(struct esd_usb *dev, union esd_usb_msg *msg) { int actual_length; return usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, 2), msg, msg->hdr.len * sizeof(u32), /* convert to # of bytes */ &actual_length, 1000); } static int esd_usb_wait_msg(struct esd_usb *dev, union esd_usb_msg *msg) { int actual_length; return usb_bulk_msg(dev->udev, usb_rcvbulkpipe(dev->udev, 1), msg, sizeof(*msg), &actual_length, 1000); } static int esd_usb_setup_rx_urbs(struct esd_usb *dev) { int i, err = 0; if (dev->rxinitdone) return 0; for (i = 0; i < ESD_USB_MAX_RX_URBS; i++) { struct urb *urb = NULL; u8 *buf = NULL; dma_addr_t buf_dma; /* create a URB, and a buffer for it */ urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { err = -ENOMEM; break; } buf = usb_alloc_coherent(dev->udev, ESD_USB_RX_BUFFER_SIZE, GFP_KERNEL, &buf_dma); if (!buf) { dev_warn(dev->udev->dev.parent, "No memory left for USB buffer\n"); err = -ENOMEM; goto freeurb; } urb->transfer_dma = buf_dma; usb_fill_bulk_urb(urb, dev->udev, usb_rcvbulkpipe(dev->udev, 1), buf, ESD_USB_RX_BUFFER_SIZE, esd_usb_read_bulk_callback, dev); urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; usb_anchor_urb(urb, &dev->rx_submitted); err = usb_submit_urb(urb, GFP_KERNEL); if (err) { usb_unanchor_urb(urb); usb_free_coherent(dev->udev, ESD_USB_RX_BUFFER_SIZE, buf, urb->transfer_dma); goto freeurb; } dev->rxbuf[i] = buf; dev->rxbuf_dma[i] = buf_dma; freeurb: /* Drop reference, USB core will take care of freeing it */ usb_free_urb(urb); if (err) break; } /* Did we submit any URBs */ if (i == 0) { dev_err(dev->udev->dev.parent, "couldn't setup read URBs\n"); return err; } /* Warn if we've couldn't transmit all the URBs */ if (i < ESD_USB_MAX_RX_URBS) { dev_warn(dev->udev->dev.parent, "rx performance may be slow\n"); } dev->rxinitdone = 1; return 0; } /* Start interface */ static int esd_usb_start(struct esd_usb_net_priv *priv) { struct esd_usb *dev = priv->usb; struct net_device *netdev = priv->netdev; union esd_usb_msg *msg; int err, i; msg = kmalloc(sizeof(*msg), GFP_KERNEL); if (!msg) { err = -ENOMEM; goto out; } /* Enable all IDs * The IDADD message takes up to 64 32 bit bitmasks (2048 bits). * Each bit represents one 11 bit CAN identifier. A set bit * enables reception of the corresponding CAN identifier. A cleared * bit disabled this identifier. An additional bitmask value * following the CAN 2.0A bits is used to enable reception of * extended CAN frames. Only the LSB of this final mask is checked * for the complete 29 bit ID range. The IDADD message also allows * filter configuration for an ID subset. In this case you can add * the number of the starting bitmask (0..64) to the filter.option * field followed by only some bitmasks. */ msg->hdr.cmd = ESD_USB_CMD_IDADD; msg->hdr.len = sizeof(struct esd_usb_id_filter_msg) / sizeof(u32); /* # of 32bit words */ msg->filter.net = priv->index; msg->filter.option = ESD_USB_ID_ENABLE; /* start with segment 0 */ for (i = 0; i < ESD_USB_MAX_ID_SEGMENT; i++) msg->filter.mask[i] = cpu_to_le32(GENMASK(31, 0)); /* enable 29bit extended IDs */ msg->filter.mask[ESD_USB_MAX_ID_SEGMENT] = cpu_to_le32(BIT(0)); err = esd_usb_send_msg(dev, msg); if (err) goto out; err = esd_usb_setup_rx_urbs(dev); if (err) goto out; priv->can.state = CAN_STATE_ERROR_ACTIVE; out: if (err == -ENODEV) netif_device_detach(netdev); if (err) netdev_err(netdev, "couldn't start device: %d\n", err); kfree(msg); return err; } static void unlink_all_urbs(struct esd_usb *dev) { struct esd_usb_net_priv *priv; int i, j; usb_kill_anchored_urbs(&dev->rx_submitted); for (i = 0; i < ESD_USB_MAX_RX_URBS; ++i) usb_free_coherent(dev->udev, ESD_USB_RX_BUFFER_SIZE, dev->rxbuf[i], dev->rxbuf_dma[i]); for (i = 0; i < dev->net_count; i++) { priv = dev->nets[i]; if (priv) { usb_kill_anchored_urbs(&priv->tx_submitted); atomic_set(&priv->active_tx_jobs, 0); for (j = 0; j < ESD_USB_MAX_TX_URBS; j++) priv->tx_contexts[j].echo_index = ESD_USB_MAX_TX_URBS; } } } static int esd_usb_open(struct net_device *netdev) { struct esd_usb_net_priv *priv = netdev_priv(netdev); int err; /* common open */ err = open_candev(netdev); if (err) return err; /* finally start device */ err = esd_usb_start(priv); if (err) { netdev_warn(netdev, "couldn't start device: %d\n", err); close_candev(netdev); return err; } netif_start_queue(netdev); return 0; } static netdev_tx_t esd_usb_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct esd_usb_net_priv *priv = netdev_priv(netdev); struct esd_usb *dev = priv->usb; struct esd_tx_urb_context *context = NULL; struct net_device_stats *stats = &netdev->stats; struct canfd_frame *cfd = (struct canfd_frame *)skb->data; union esd_usb_msg *msg; struct urb *urb; u8 *buf; int i, err; int ret = NETDEV_TX_OK; size_t size = sizeof(union esd_usb_msg); if (can_dev_dropped_skb(netdev, skb)) return NETDEV_TX_OK; /* create a URB, and a buffer for it, and copy the data to the URB */ urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) { stats->tx_dropped++; dev_kfree_skb(skb); goto nourbmem; } buf = usb_alloc_coherent(dev->udev, size, GFP_ATOMIC, &urb->transfer_dma); if (!buf) { netdev_err(netdev, "No memory left for USB buffer\n"); stats->tx_dropped++; dev_kfree_skb(skb); goto nobufmem; } msg = (union esd_usb_msg *)buf; /* minimal length as # of 32bit words */ msg->hdr.len = offsetof(struct esd_usb_tx_msg, data) / sizeof(u32); msg->hdr.cmd = ESD_USB_CMD_CAN_TX; msg->tx.net = priv->index; if (can_is_canfd_skb(skb)) { msg->tx.dlc = can_fd_len2dlc(cfd->len); msg->tx.dlc |= ESD_USB_FD; if ((cfd->flags & CANFD_BRS) == 0) msg->tx.dlc |= ESD_USB_NO_BRS; } else { msg->tx.dlc = can_get_cc_dlc((struct can_frame *)cfd, priv->can.ctrlmode); if (cfd->can_id & CAN_RTR_FLAG) msg->tx.dlc |= ESD_USB_RTR; } msg->tx.id = cpu_to_le32(cfd->can_id & CAN_ERR_MASK); if (cfd->can_id & CAN_EFF_FLAG) msg->tx.id |= cpu_to_le32(ESD_USB_EXTID); memcpy(msg->tx.data_fd, cfd->data, cfd->len); /* round up, then divide by 4 to add the payload length as # of 32bit words */ msg->hdr.len += DIV_ROUND_UP(cfd->len, sizeof(u32)); for (i = 0; i < ESD_USB_MAX_TX_URBS; i++) { if (priv->tx_contexts[i].echo_index == ESD_USB_MAX_TX_URBS) { context = &priv->tx_contexts[i]; break; } } /* This may never happen */ if (!context) { netdev_warn(netdev, "couldn't find free context\n"); ret = NETDEV_TX_BUSY; goto releasebuf; } context->priv = priv; context->echo_index = i; /* hnd must not be 0 - MSB is stripped in txdone handling */ msg->tx.hnd = BIT(31) | i; /* returned in TX done message */ usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, 2), buf, msg->hdr.len * sizeof(u32), /* convert to # of bytes */ esd_usb_write_bulk_callback, context); urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; usb_anchor_urb(urb, &priv->tx_submitted); can_put_echo_skb(skb, netdev, context->echo_index, 0); atomic_inc(&priv->active_tx_jobs); /* Slow down tx path */ if (atomic_read(&priv->active_tx_jobs) >= ESD_USB_MAX_TX_URBS) netif_stop_queue(netdev); err = usb_submit_urb(urb, GFP_ATOMIC); if (err) { can_free_echo_skb(netdev, context->echo_index, NULL); atomic_dec(&priv->active_tx_jobs); usb_unanchor_urb(urb); stats->tx_dropped++; if (err == -ENODEV) netif_device_detach(netdev); else netdev_warn(netdev, "failed tx_urb %d\n", err); goto releasebuf; } netif_trans_update(netdev); /* Release our reference to this URB, the USB core will eventually free * it entirely. */ usb_free_urb(urb); return NETDEV_TX_OK; releasebuf: usb_free_coherent(dev->udev, size, buf, urb->transfer_dma); nobufmem: usb_free_urb(urb); nourbmem: return ret; } static int esd_usb_close(struct net_device *netdev) { struct esd_usb_net_priv *priv = netdev_priv(netdev); union esd_usb_msg *msg; int i; msg = kmalloc(sizeof(*msg), GFP_KERNEL); if (!msg) return -ENOMEM; /* Disable all IDs (see esd_usb_start()) */ msg->hdr.cmd = ESD_USB_CMD_IDADD; msg->hdr.len = sizeof(struct esd_usb_id_filter_msg) / sizeof(u32);/* # of 32bit words */ msg->filter.net = priv->index; msg->filter.option = ESD_USB_ID_ENABLE; /* start with segment 0 */ for (i = 0; i <= ESD_USB_MAX_ID_SEGMENT; i++) msg->filter.mask[i] = 0; if (esd_usb_send_msg(priv->usb, msg) < 0) netdev_err(netdev, "sending idadd message failed\n"); /* set CAN controller to reset mode */ msg->hdr.len = sizeof(struct esd_usb_set_baudrate_msg) / sizeof(u32); /* # of 32bit words */ msg->hdr.cmd = ESD_USB_CMD_SETBAUD; msg->setbaud.net = priv->index; msg->setbaud.rsvd = 0; msg->setbaud.baud = cpu_to_le32(ESD_USB_NO_BAUDRATE); if (esd_usb_send_msg(priv->usb, msg) < 0) netdev_err(netdev, "sending setbaud message failed\n"); priv->can.state = CAN_STATE_STOPPED; netif_stop_queue(netdev); close_candev(netdev); kfree(msg); return 0; } static const struct net_device_ops esd_usb_netdev_ops = { .ndo_open = esd_usb_open, .ndo_stop = esd_usb_close, .ndo_start_xmit = esd_usb_start_xmit, .ndo_change_mtu = can_change_mtu, }; static const struct ethtool_ops esd_usb_ethtool_ops = { .get_ts_info = ethtool_op_get_ts_info, }; static const struct can_bittiming_const esd_usb_2_bittiming_const = { .name = "esd_usb_2", .tseg1_min = 1, .tseg1_max = 16, .tseg2_min = 1, .tseg2_max = 8, .sjw_max = 4, .brp_min = 1, .brp_max = 1024, .brp_inc = 1, }; static int esd_usb_2_set_bittiming(struct net_device *netdev) { const struct can_bittiming_const *btc = &esd_usb_2_bittiming_const; struct esd_usb_net_priv *priv = netdev_priv(netdev); struct can_bittiming *bt = &priv->can.bittiming; union esd_usb_msg *msg; int err; u32 canbtr; int sjw_shift; canbtr = ESD_USB_UBR; if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) canbtr |= ESD_USB_LOM; canbtr |= (bt->brp - 1) & (btc->brp_max - 1); if (le16_to_cpu(priv->usb->udev->descriptor.idProduct) == ESD_USB_CANUSBM_PRODUCT_ID) sjw_shift = ESD_USB_M_SJW_SHIFT; else sjw_shift = ESD_USB_2_SJW_SHIFT; canbtr |= ((bt->sjw - 1) & (btc->sjw_max - 1)) << sjw_shift; canbtr |= ((bt->prop_seg + bt->phase_seg1 - 1) & (btc->tseg1_max - 1)) << ESD_USB_2_TSEG1_SHIFT; canbtr |= ((bt->phase_seg2 - 1) & (btc->tseg2_max - 1)) << ESD_USB_2_TSEG2_SHIFT; if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) canbtr |= ESD_USB_TRIPLE_SAMPLES; msg = kmalloc(sizeof(*msg), GFP_KERNEL); if (!msg) return -ENOMEM; msg->hdr.len = sizeof(struct esd_usb_set_baudrate_msg) / sizeof(u32); /* # of 32bit words */ msg->hdr.cmd = ESD_USB_CMD_SETBAUD; msg->setbaud.net = priv->index; msg->setbaud.rsvd = 0; msg->setbaud.baud = cpu_to_le32(canbtr); netdev_dbg(netdev, "setting BTR=%#x\n", canbtr); err = esd_usb_send_msg(priv->usb, msg); kfree(msg); return err; } /* Nominal bittiming constants, see * Microchip SAM E70/S70/V70/V71, Data Sheet, Rev. G - 07/2022 * 48.6.8 MCAN Nominal Bit Timing and Prescaler Register */ static const struct can_bittiming_const esd_usb_3_nom_bittiming_const = { .name = "esd_usb_3", .tseg1_min = 2, .tseg1_max = 256, .tseg2_min = 2, .tseg2_max = 128, .sjw_max = 128, .brp_min = 1, .brp_max = 512, .brp_inc = 1, }; /* Data bittiming constants, see * Microchip SAM E70/S70/V70/V71, Data Sheet, Rev. G - 07/2022 * 48.6.4 MCAN Data Bit Timing and Prescaler Register */ static const struct can_bittiming_const esd_usb_3_data_bittiming_const = { .name = "esd_usb_3", .tseg1_min = 2, .tseg1_max = 32, .tseg2_min = 1, .tseg2_max = 16, .sjw_max = 8, .brp_min = 1, .brp_max = 32, .brp_inc = 1, }; static int esd_usb_3_set_bittiming(struct net_device *netdev) { const struct can_bittiming_const *nom_btc = &esd_usb_3_nom_bittiming_const; const struct can_bittiming_const *data_btc = &esd_usb_3_data_bittiming_const; struct esd_usb_net_priv *priv = netdev_priv(netdev); struct can_bittiming *nom_bt = &priv->can.bittiming; struct can_bittiming *data_bt = &priv->can.data_bittiming; struct esd_usb_3_set_baudrate_msg_x *baud_x; union esd_usb_msg *msg; u16 flags = 0; int err; msg = kmalloc(sizeof(*msg), GFP_KERNEL); if (!msg) return -ENOMEM; baud_x = &msg->setbaud_x; /* Canonical is the most reasonable mode for SocketCAN on CAN-USB/3 ... */ baud_x->mode = cpu_to_le16(ESD_USB_3_BAUDRATE_MODE_BTR_CANONICAL); if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) flags |= ESD_USB_3_BAUDRATE_FLAG_LOM; if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) flags |= ESD_USB_3_BAUDRATE_FLAG_TRS; baud_x->nom.brp = cpu_to_le16(nom_bt->brp & (nom_btc->brp_max - 1)); baud_x->nom.sjw = cpu_to_le16(nom_bt->sjw & (nom_btc->sjw_max - 1)); baud_x->nom.tseg1 = cpu_to_le16((nom_bt->prop_seg + nom_bt->phase_seg1) & (nom_btc->tseg1_max - 1)); baud_x->nom.tseg2 = cpu_to_le16(nom_bt->phase_seg2 & (nom_btc->tseg2_max - 1)); if (priv->can.ctrlmode & CAN_CTRLMODE_FD) { baud_x->data.brp = cpu_to_le16(data_bt->brp & (data_btc->brp_max - 1)); baud_x->data.sjw = cpu_to_le16(data_bt->sjw & (data_btc->sjw_max - 1)); baud_x->data.tseg1 = cpu_to_le16((data_bt->prop_seg + data_bt->phase_seg1) & (data_btc->tseg1_max - 1)); baud_x->data.tseg2 = cpu_to_le16(data_bt->phase_seg2 & (data_btc->tseg2_max - 1)); flags |= ESD_USB_3_BAUDRATE_FLAG_FD; } /* Currently this driver only supports the automatic TDC mode */ baud_x->tdc.tdc_mode = ESD_USB_3_TDC_MODE_AUTO; baud_x->tdc.ssp_offset = 0; baud_x->tdc.ssp_shift = 0; baud_x->tdc.tdc_filter = 0; baud_x->flags = cpu_to_le16(flags); baud_x->net = priv->index; baud_x->rsvd = 0; /* set len as # of 32bit words */ msg->hdr.len = sizeof(struct esd_usb_3_set_baudrate_msg_x) / sizeof(u32); msg->hdr.cmd = ESD_USB_CMD_SETBAUD; netdev_dbg(netdev, "ctrlmode=%#x/%#x, esd-net=%u, esd-mode=%#x, esd-flags=%#x\n", priv->can.ctrlmode, priv->can.ctrlmode_supported, priv->index, le16_to_cpu(baud_x->mode), flags); err = esd_usb_send_msg(priv->usb, msg); kfree(msg); return err; } static int esd_usb_get_berr_counter(const struct net_device *netdev, struct can_berr_counter *bec) { struct esd_usb_net_priv *priv = netdev_priv(netdev); bec->txerr = priv->bec.txerr; bec->rxerr = priv->bec.rxerr; return 0; } static int esd_usb_set_mode(struct net_device *netdev, enum can_mode mode) { switch (mode) { case CAN_MODE_START: netif_wake_queue(netdev); break; default: return -EOPNOTSUPP; } return 0; } static int esd_usb_probe_one_net(struct usb_interface *intf, int index) { struct esd_usb *dev = usb_get_intfdata(intf); struct net_device *netdev; struct esd_usb_net_priv *priv; int err = 0; int i; netdev = alloc_candev(sizeof(*priv), ESD_USB_MAX_TX_URBS); if (!netdev) { dev_err(&intf->dev, "couldn't alloc candev\n"); err = -ENOMEM; goto done; } priv = netdev_priv(netdev); init_usb_anchor(&priv->tx_submitted); atomic_set(&priv->active_tx_jobs, 0); for (i = 0; i < ESD_USB_MAX_TX_URBS; i++) priv->tx_contexts[i].echo_index = ESD_USB_MAX_TX_URBS; priv->usb = dev; priv->netdev = netdev; priv->index = index; priv->can.state = CAN_STATE_STOPPED; priv->can.ctrlmode_supported = CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_CC_LEN8_DLC | CAN_CTRLMODE_BERR_REPORTING; switch (le16_to_cpu(dev->udev->descriptor.idProduct)) { case ESD_USB_CANUSB3_PRODUCT_ID: priv->can.clock.freq = ESD_USB_3_CAN_CLOCK; priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES; priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD; priv->can.bittiming_const = &esd_usb_3_nom_bittiming_const; priv->can.data_bittiming_const = &esd_usb_3_data_bittiming_const; priv->can.do_set_bittiming = esd_usb_3_set_bittiming; priv->can.do_set_data_bittiming = esd_usb_3_set_bittiming; break; case ESD_USB_CANUSBM_PRODUCT_ID: priv->can.clock.freq = ESD_USB_M_CAN_CLOCK; priv->can.bittiming_const = &esd_usb_2_bittiming_const; priv->can.do_set_bittiming = esd_usb_2_set_bittiming; break; case ESD_USB_CANUSB2_PRODUCT_ID: default: priv->can.clock.freq = ESD_USB_2_CAN_CLOCK; priv->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES; priv->can.bittiming_const = &esd_usb_2_bittiming_const; priv->can.do_set_bittiming = esd_usb_2_set_bittiming; break; } priv->can.do_set_mode = esd_usb_set_mode; priv->can.do_get_berr_counter = esd_usb_get_berr_counter; netdev->flags |= IFF_ECHO; /* we support local echo */ netdev->netdev_ops = &esd_usb_netdev_ops; netdev->ethtool_ops = &esd_usb_ethtool_ops; SET_NETDEV_DEV(netdev, &intf->dev); netdev->dev_id = index; err = register_candev(netdev); if (err) { dev_err(&intf->dev, "couldn't register CAN device: %d\n", err); free_candev(netdev); err = -ENOMEM; goto done; } dev->nets[index] = priv; netdev_info(netdev, "device %s registered\n", netdev->name); done: return err; } /* probe function for new USB devices * * check version information and number of available * CAN interfaces */ static int esd_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct esd_usb *dev; union esd_usb_msg *msg; int i, err; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) { err = -ENOMEM; goto done; } dev->udev = interface_to_usbdev(intf); init_usb_anchor(&dev->rx_submitted); usb_set_intfdata(intf, dev); msg = kmalloc(sizeof(*msg), GFP_KERNEL); if (!msg) { err = -ENOMEM; goto free_msg; } /* query number of CAN interfaces (nets) */ msg->hdr.cmd = ESD_USB_CMD_VERSION; msg->hdr.len = sizeof(struct esd_usb_version_msg) / sizeof(u32); /* # of 32bit words */ msg->version.rsvd = 0; msg->version.flags = 0; msg->version.drv_version = 0; err = esd_usb_send_msg(dev, msg); if (err < 0) { dev_err(&intf->dev, "sending version message failed\n"); goto free_msg; } err = esd_usb_wait_msg(dev, msg); if (err < 0) { dev_err(&intf->dev, "no version message answer\n"); goto free_msg; } dev->net_count = (int)msg->version_reply.nets; dev->version = le32_to_cpu(msg->version_reply.version); if (device_create_file(&intf->dev, &dev_attr_firmware)) dev_err(&intf->dev, "Couldn't create device file for firmware\n"); if (device_create_file(&intf->dev, &dev_attr_hardware)) dev_err(&intf->dev, "Couldn't create device file for hardware\n"); if (device_create_file(&intf->dev, &dev_attr_nets)) dev_err(&intf->dev, "Couldn't create device file for nets\n"); /* do per device probing */ for (i = 0; i < dev->net_count; i++) esd_usb_probe_one_net(intf, i); free_msg: kfree(msg); if (err) kfree(dev); done: return err; } /* called by the usb core when the device is removed from the system */ static void esd_usb_disconnect(struct usb_interface *intf) { struct esd_usb *dev = usb_get_intfdata(intf); struct net_device *netdev; int i; device_remove_file(&intf->dev, &dev_attr_firmware); device_remove_file(&intf->dev, &dev_attr_hardware); device_remove_file(&intf->dev, &dev_attr_nets); usb_set_intfdata(intf, NULL); if (dev) { for (i = 0; i < dev->net_count; i++) { if (dev->nets[i]) { netdev = dev->nets[i]->netdev; unregister_netdev(netdev); free_candev(netdev); } } unlink_all_urbs(dev); kfree(dev); } } /* usb specific object needed to register this driver with the usb subsystem */ static struct usb_driver esd_usb_driver = { .name = KBUILD_MODNAME, .probe = esd_usb_probe, .disconnect = esd_usb_disconnect, .id_table = esd_usb_table, }; module_usb_driver(esd_usb_driver);
linux-master
drivers/net/can/usb/esd_usb.c
// SPDX-License-Identifier: GPL-2.0 /* Driver for Theobroma Systems UCAN devices, Protocol Version 3 * * Copyright (C) 2018 Theobroma Systems Design und Consulting GmbH * * * General Description: * * The USB Device uses three Endpoints: * * CONTROL Endpoint: Is used the setup the device (start, stop, * info, configure). * * IN Endpoint: The device sends CAN Frame Messages and Device * Information using the IN endpoint. * * OUT Endpoint: The driver sends configuration requests, and CAN * Frames on the out endpoint. * * Error Handling: * * If error reporting is turned on the device encodes error into CAN * error frames (see uapi/linux/can/error.h) and sends it using the * IN Endpoint. The driver updates statistics and forward it. */ #include <linux/can.h> #include <linux/can/dev.h> #include <linux/can/error.h> #include <linux/ethtool.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/signal.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/usb.h> #define UCAN_DRIVER_NAME "ucan" #define UCAN_MAX_RX_URBS 8 /* the CAN controller needs a while to enable/disable the bus */ #define UCAN_USB_CTL_PIPE_TIMEOUT 1000 /* this driver currently supports protocol version 3 only */ #define UCAN_PROTOCOL_VERSION_MIN 3 #define UCAN_PROTOCOL_VERSION_MAX 3 /* UCAN Message Definitions * ------------------------ * * ucan_message_out_t and ucan_message_in_t define the messages * transmitted on the OUT and IN endpoint. * * Multibyte fields are transmitted with little endianness * * INTR Endpoint: a single uint32_t storing the current space in the fifo * * OUT Endpoint: single message of type ucan_message_out_t is * transmitted on the out endpoint * * IN Endpoint: multiple messages ucan_message_in_t concateted in * the following way: * * m[n].len <=> the length if message n(including the header in bytes) * m[n] is is aligned to a 4 byte boundary, hence * offset(m[0]) := 0; * offset(m[n+1]) := offset(m[n]) + (m[n].len + 3) & 3 * * this implies that * offset(m[n]) % 4 <=> 0 */ /* Device Global Commands */ enum { UCAN_DEVICE_GET_FW_STRING = 0, }; /* UCAN Commands */ enum { /* start the can transceiver - val defines the operation mode */ UCAN_COMMAND_START = 0, /* cancel pending transmissions and stop the can transceiver */ UCAN_COMMAND_STOP = 1, /* send can transceiver into low-power sleep mode */ UCAN_COMMAND_SLEEP = 2, /* wake up can transceiver from low-power sleep mode */ UCAN_COMMAND_WAKEUP = 3, /* reset the can transceiver */ UCAN_COMMAND_RESET = 4, /* get piece of info from the can transceiver - subcmd defines what * piece */ UCAN_COMMAND_GET = 5, /* clear or disable hardware filter - subcmd defines which of the two */ UCAN_COMMAND_FILTER = 6, /* Setup bittiming */ UCAN_COMMAND_SET_BITTIMING = 7, /* recover from bus-off state */ UCAN_COMMAND_RESTART = 8, }; /* UCAN_COMMAND_START and UCAN_COMMAND_GET_INFO operation modes (bitmap). * Undefined bits must be set to 0. */ enum { UCAN_MODE_LOOPBACK = BIT(0), UCAN_MODE_SILENT = BIT(1), UCAN_MODE_3_SAMPLES = BIT(2), UCAN_MODE_ONE_SHOT = BIT(3), UCAN_MODE_BERR_REPORT = BIT(4), }; /* UCAN_COMMAND_GET subcommands */ enum { UCAN_COMMAND_GET_INFO = 0, UCAN_COMMAND_GET_PROTOCOL_VERSION = 1, }; /* UCAN_COMMAND_FILTER subcommands */ enum { UCAN_FILTER_CLEAR = 0, UCAN_FILTER_DISABLE = 1, UCAN_FILTER_ENABLE = 2, }; /* OUT endpoint message types */ enum { UCAN_OUT_TX = 2, /* transmit a CAN frame */ }; /* IN endpoint message types */ enum { UCAN_IN_TX_COMPLETE = 1, /* CAN frame transmission completed */ UCAN_IN_RX = 2, /* CAN frame received */ }; struct ucan_ctl_cmd_start { __le16 mode; /* OR-ing any of UCAN_MODE_* */ } __packed; struct ucan_ctl_cmd_set_bittiming { __le32 tq; /* Time quanta (TQ) in nanoseconds */ __le16 brp; /* TQ Prescaler */ __le16 sample_point; /* Samplepoint on tenth percent */ u8 prop_seg; /* Propagation segment in TQs */ u8 phase_seg1; /* Phase buffer segment 1 in TQs */ u8 phase_seg2; /* Phase buffer segment 2 in TQs */ u8 sjw; /* Synchronisation jump width in TQs */ } __packed; struct ucan_ctl_cmd_device_info { __le32 freq; /* Clock Frequency for tq generation */ u8 tx_fifo; /* Size of the transmission fifo */ u8 sjw_max; /* can_bittiming fields... */ u8 tseg1_min; u8 tseg1_max; u8 tseg2_min; u8 tseg2_max; __le16 brp_inc; __le32 brp_min; __le32 brp_max; /* ...can_bittiming fields */ __le16 ctrlmodes; /* supported control modes */ __le16 hwfilter; /* Number of HW filter banks */ __le16 rxmboxes; /* Number of receive Mailboxes */ } __packed; struct ucan_ctl_cmd_get_protocol_version { __le32 version; } __packed; union ucan_ctl_payload { /* Setup Bittiming * bmRequest == UCAN_COMMAND_START */ struct ucan_ctl_cmd_start cmd_start; /* Setup Bittiming * bmRequest == UCAN_COMMAND_SET_BITTIMING */ struct ucan_ctl_cmd_set_bittiming cmd_set_bittiming; /* Get Device Information * bmRequest == UCAN_COMMAND_GET; wValue = UCAN_COMMAND_GET_INFO */ struct ucan_ctl_cmd_device_info cmd_get_device_info; /* Get Protocol Version * bmRequest == UCAN_COMMAND_GET; * wValue = UCAN_COMMAND_GET_PROTOCOL_VERSION */ struct ucan_ctl_cmd_get_protocol_version cmd_get_protocol_version; u8 raw[128]; } __packed; enum { UCAN_TX_COMPLETE_SUCCESS = BIT(0), }; /* Transmission Complete within ucan_message_in */ struct ucan_tx_complete_entry_t { u8 echo_index; u8 flags; } __packed __aligned(0x2); /* CAN Data message format within ucan_message_in/out */ struct ucan_can_msg { /* note DLC is computed by * msg.len - sizeof (msg.len) * - sizeof (msg.type) * - sizeof (msg.can_msg.id) */ __le32 id; union { u8 data[CAN_MAX_DLEN]; /* Data of CAN frames */ u8 dlc; /* RTR dlc */ }; } __packed; /* OUT Endpoint, outbound messages */ struct ucan_message_out { __le16 len; /* Length of the content include header */ u8 type; /* UCAN_OUT_TX and friends */ u8 subtype; /* command sub type */ union { /* Transmit CAN frame * (type == UCAN_TX) && ((msg.can_msg.id & CAN_RTR_FLAG) == 0) * subtype stores the echo id */ struct ucan_can_msg can_msg; } msg; } __packed __aligned(0x4); /* IN Endpoint, inbound messages */ struct ucan_message_in { __le16 len; /* Length of the content include header */ u8 type; /* UCAN_IN_RX and friends */ u8 subtype; /* command sub type */ union { /* CAN Frame received * (type == UCAN_IN_RX) * && ((msg.can_msg.id & CAN_RTR_FLAG) == 0) */ struct ucan_can_msg can_msg; /* CAN transmission complete * (type == UCAN_IN_TX_COMPLETE) */ DECLARE_FLEX_ARRAY(struct ucan_tx_complete_entry_t, can_tx_complete_msg); } __aligned(0x4) msg; } __packed __aligned(0x4); /* Macros to calculate message lengths */ #define UCAN_OUT_HDR_SIZE offsetof(struct ucan_message_out, msg) #define UCAN_IN_HDR_SIZE offsetof(struct ucan_message_in, msg) #define UCAN_IN_LEN(member) (UCAN_OUT_HDR_SIZE + sizeof(member)) struct ucan_priv; /* Context Information for transmission URBs */ struct ucan_urb_context { struct ucan_priv *up; bool allocated; }; /* Information reported by the USB device */ struct ucan_device_info { struct can_bittiming_const bittiming_const; u8 tx_fifo; }; /* Driver private data */ struct ucan_priv { /* must be the first member */ struct can_priv can; /* linux USB device structures */ struct usb_device *udev; struct net_device *netdev; /* lock for can->echo_skb (used around * can_put/get/free_echo_skb */ spinlock_t echo_skb_lock; /* usb device information */ u8 intf_index; u8 in_ep_addr; u8 out_ep_addr; u16 in_ep_size; /* transmission and reception buffers */ struct usb_anchor rx_urbs; struct usb_anchor tx_urbs; union ucan_ctl_payload *ctl_msg_buffer; struct ucan_device_info device_info; /* transmission control information and locks */ spinlock_t context_lock; unsigned int available_tx_urbs; struct ucan_urb_context *context_array; }; static u8 ucan_can_cc_dlc2len(struct ucan_can_msg *msg, u16 len) { if (le32_to_cpu(msg->id) & CAN_RTR_FLAG) return can_cc_dlc2len(msg->dlc); else return can_cc_dlc2len(len - (UCAN_IN_HDR_SIZE + sizeof(msg->id))); } static void ucan_release_context_array(struct ucan_priv *up) { if (!up->context_array) return; /* lock is not needed because, driver is currently opening or closing */ up->available_tx_urbs = 0; kfree(up->context_array); up->context_array = NULL; } static int ucan_alloc_context_array(struct ucan_priv *up) { int i; /* release contexts if any */ ucan_release_context_array(up); up->context_array = kcalloc(up->device_info.tx_fifo, sizeof(*up->context_array), GFP_KERNEL); if (!up->context_array) { netdev_err(up->netdev, "Not enough memory to allocate tx contexts\n"); return -ENOMEM; } for (i = 0; i < up->device_info.tx_fifo; i++) { up->context_array[i].allocated = false; up->context_array[i].up = up; } /* lock is not needed because, driver is currently opening */ up->available_tx_urbs = up->device_info.tx_fifo; return 0; } static struct ucan_urb_context *ucan_alloc_context(struct ucan_priv *up) { int i; unsigned long flags; struct ucan_urb_context *ret = NULL; if (WARN_ON_ONCE(!up->context_array)) return NULL; /* execute context operation atomically */ spin_lock_irqsave(&up->context_lock, flags); for (i = 0; i < up->device_info.tx_fifo; i++) { if (!up->context_array[i].allocated) { /* update context */ ret = &up->context_array[i]; up->context_array[i].allocated = true; /* stop queue if necessary */ up->available_tx_urbs--; if (!up->available_tx_urbs) netif_stop_queue(up->netdev); break; } } spin_unlock_irqrestore(&up->context_lock, flags); return ret; } static bool ucan_release_context(struct ucan_priv *up, struct ucan_urb_context *ctx) { unsigned long flags; bool ret = false; if (WARN_ON_ONCE(!up->context_array)) return false; /* execute context operation atomically */ spin_lock_irqsave(&up->context_lock, flags); /* context was not allocated, maybe the device sent garbage */ if (ctx->allocated) { ctx->allocated = false; /* check if the queue needs to be woken */ if (!up->available_tx_urbs) netif_wake_queue(up->netdev); up->available_tx_urbs++; ret = true; } spin_unlock_irqrestore(&up->context_lock, flags); return ret; } static int ucan_ctrl_command_out(struct ucan_priv *up, u8 cmd, u16 subcmd, u16 datalen) { return usb_control_msg(up->udev, usb_sndctrlpipe(up->udev, 0), cmd, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, subcmd, up->intf_index, up->ctl_msg_buffer, datalen, UCAN_USB_CTL_PIPE_TIMEOUT); } static int ucan_device_request_in(struct ucan_priv *up, u8 cmd, u16 subcmd, u16 datalen) { return usb_control_msg(up->udev, usb_rcvctrlpipe(up->udev, 0), cmd, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, subcmd, 0, up->ctl_msg_buffer, datalen, UCAN_USB_CTL_PIPE_TIMEOUT); } /* Parse the device information structure reported by the device and * setup private variables accordingly */ static void ucan_parse_device_info(struct ucan_priv *up, struct ucan_ctl_cmd_device_info *device_info) { struct can_bittiming_const *bittiming = &up->device_info.bittiming_const; u16 ctrlmodes; /* store the data */ up->can.clock.freq = le32_to_cpu(device_info->freq); up->device_info.tx_fifo = device_info->tx_fifo; strcpy(bittiming->name, "ucan"); bittiming->tseg1_min = device_info->tseg1_min; bittiming->tseg1_max = device_info->tseg1_max; bittiming->tseg2_min = device_info->tseg2_min; bittiming->tseg2_max = device_info->tseg2_max; bittiming->sjw_max = device_info->sjw_max; bittiming->brp_min = le32_to_cpu(device_info->brp_min); bittiming->brp_max = le32_to_cpu(device_info->brp_max); bittiming->brp_inc = le16_to_cpu(device_info->brp_inc); ctrlmodes = le16_to_cpu(device_info->ctrlmodes); up->can.ctrlmode_supported = 0; if (ctrlmodes & UCAN_MODE_LOOPBACK) up->can.ctrlmode_supported |= CAN_CTRLMODE_LOOPBACK; if (ctrlmodes & UCAN_MODE_SILENT) up->can.ctrlmode_supported |= CAN_CTRLMODE_LISTENONLY; if (ctrlmodes & UCAN_MODE_3_SAMPLES) up->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES; if (ctrlmodes & UCAN_MODE_ONE_SHOT) up->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT; if (ctrlmodes & UCAN_MODE_BERR_REPORT) up->can.ctrlmode_supported |= CAN_CTRLMODE_BERR_REPORTING; } /* Handle a CAN error frame that we have received from the device. * Returns true if the can state has changed. */ static bool ucan_handle_error_frame(struct ucan_priv *up, struct ucan_message_in *m, canid_t canid) { enum can_state new_state = up->can.state; struct net_device_stats *net_stats = &up->netdev->stats; struct can_device_stats *can_stats = &up->can.can_stats; if (canid & CAN_ERR_LOSTARB) can_stats->arbitration_lost++; if (canid & CAN_ERR_BUSERROR) can_stats->bus_error++; if (canid & CAN_ERR_ACK) net_stats->tx_errors++; if (canid & CAN_ERR_BUSOFF) new_state = CAN_STATE_BUS_OFF; /* controller problems, details in data[1] */ if (canid & CAN_ERR_CRTL) { u8 d1 = m->msg.can_msg.data[1]; if (d1 & CAN_ERR_CRTL_RX_OVERFLOW) net_stats->rx_over_errors++; /* controller state bits: if multiple are set the worst wins */ if (d1 & CAN_ERR_CRTL_ACTIVE) new_state = CAN_STATE_ERROR_ACTIVE; if (d1 & (CAN_ERR_CRTL_RX_WARNING | CAN_ERR_CRTL_TX_WARNING)) new_state = CAN_STATE_ERROR_WARNING; if (d1 & (CAN_ERR_CRTL_RX_PASSIVE | CAN_ERR_CRTL_TX_PASSIVE)) new_state = CAN_STATE_ERROR_PASSIVE; } /* protocol error, details in data[2] */ if (canid & CAN_ERR_PROT) { u8 d2 = m->msg.can_msg.data[2]; if (d2 & CAN_ERR_PROT_TX) net_stats->tx_errors++; else net_stats->rx_errors++; } /* no state change - we are done */ if (up->can.state == new_state) return false; /* we switched into a better state */ if (up->can.state > new_state) { up->can.state = new_state; return true; } /* we switched into a worse state */ up->can.state = new_state; switch (new_state) { case CAN_STATE_BUS_OFF: can_stats->bus_off++; can_bus_off(up->netdev); break; case CAN_STATE_ERROR_PASSIVE: can_stats->error_passive++; break; case CAN_STATE_ERROR_WARNING: can_stats->error_warning++; break; default: break; } return true; } /* Callback on reception of a can frame via the IN endpoint * * This function allocates an skb and transferres it to the Linux * network stack */ static void ucan_rx_can_msg(struct ucan_priv *up, struct ucan_message_in *m) { int len; canid_t canid; struct can_frame *cf; struct sk_buff *skb; struct net_device_stats *stats = &up->netdev->stats; /* get the contents of the length field */ len = le16_to_cpu(m->len); /* check sanity */ if (len < UCAN_IN_HDR_SIZE + sizeof(m->msg.can_msg.id)) { netdev_warn(up->netdev, "invalid input message len: %d\n", len); return; } /* handle error frames */ canid = le32_to_cpu(m->msg.can_msg.id); if (canid & CAN_ERR_FLAG) { bool busstate_changed = ucan_handle_error_frame(up, m, canid); /* if berr-reporting is off only state changes get through */ if (!(up->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) && !busstate_changed) return; } else { canid_t canid_mask; /* compute the mask for canid */ canid_mask = CAN_RTR_FLAG; if (canid & CAN_EFF_FLAG) canid_mask |= CAN_EFF_MASK | CAN_EFF_FLAG; else canid_mask |= CAN_SFF_MASK; if (canid & ~canid_mask) netdev_warn(up->netdev, "unexpected bits set (canid %x, mask %x)", canid, canid_mask); canid &= canid_mask; } /* allocate skb */ skb = alloc_can_skb(up->netdev, &cf); if (!skb) return; /* fill the can frame */ cf->can_id = canid; /* compute DLC taking RTR_FLAG into account */ cf->len = ucan_can_cc_dlc2len(&m->msg.can_msg, len); /* copy the payload of non RTR frames */ if (!(cf->can_id & CAN_RTR_FLAG) || (cf->can_id & CAN_ERR_FLAG)) memcpy(cf->data, m->msg.can_msg.data, cf->len); /* don't count error frames as real packets */ if (!(cf->can_id & CAN_ERR_FLAG)) { stats->rx_packets++; if (!(cf->can_id & CAN_RTR_FLAG)) stats->rx_bytes += cf->len; } /* pass it to Linux */ netif_rx(skb); } /* callback indicating completed transmission */ static void ucan_tx_complete_msg(struct ucan_priv *up, struct ucan_message_in *m) { unsigned long flags; u16 count, i; u8 echo_index; u16 len = le16_to_cpu(m->len); struct ucan_urb_context *context; if (len < UCAN_IN_HDR_SIZE || (len % 2 != 0)) { netdev_err(up->netdev, "invalid tx complete length\n"); return; } count = (len - UCAN_IN_HDR_SIZE) / 2; for (i = 0; i < count; i++) { /* we did not submit such echo ids */ echo_index = m->msg.can_tx_complete_msg[i].echo_index; if (echo_index >= up->device_info.tx_fifo) { up->netdev->stats.tx_errors++; netdev_err(up->netdev, "invalid echo_index %d received\n", echo_index); continue; } /* gather information from the context */ context = &up->context_array[echo_index]; /* Release context and restart queue if necessary. * Also check if the context was allocated */ if (!ucan_release_context(up, context)) continue; spin_lock_irqsave(&up->echo_skb_lock, flags); if (m->msg.can_tx_complete_msg[i].flags & UCAN_TX_COMPLETE_SUCCESS) { /* update statistics */ up->netdev->stats.tx_packets++; up->netdev->stats.tx_bytes += can_get_echo_skb(up->netdev, echo_index, NULL); } else { up->netdev->stats.tx_dropped++; can_free_echo_skb(up->netdev, echo_index, NULL); } spin_unlock_irqrestore(&up->echo_skb_lock, flags); } } /* callback on reception of a USB message */ static void ucan_read_bulk_callback(struct urb *urb) { int ret; int pos; struct ucan_priv *up = urb->context; struct net_device *netdev = up->netdev; struct ucan_message_in *m; /* the device is not up and the driver should not receive any * data on the bulk in pipe */ if (WARN_ON(!up->context_array)) { usb_free_coherent(up->udev, up->in_ep_size, urb->transfer_buffer, urb->transfer_dma); return; } /* check URB status */ switch (urb->status) { case 0: break; case -ENOENT: case -EPIPE: case -EPROTO: case -ESHUTDOWN: case -ETIME: /* urb is not resubmitted -> free dma data */ usb_free_coherent(up->udev, up->in_ep_size, urb->transfer_buffer, urb->transfer_dma); netdev_dbg(up->netdev, "not resubmitting urb; status: %d\n", urb->status); return; default: goto resubmit; } /* sanity check */ if (!netif_device_present(netdev)) return; /* iterate over input */ pos = 0; while (pos < urb->actual_length) { int len; /* check sanity (length of header) */ if ((urb->actual_length - pos) < UCAN_IN_HDR_SIZE) { netdev_warn(up->netdev, "invalid message (short; no hdr; l:%d)\n", urb->actual_length); goto resubmit; } /* setup the message address */ m = (struct ucan_message_in *) ((u8 *)urb->transfer_buffer + pos); len = le16_to_cpu(m->len); /* check sanity (length of content) */ if (urb->actual_length - pos < len) { netdev_warn(up->netdev, "invalid message (short; no data; l:%d)\n", urb->actual_length); print_hex_dump(KERN_WARNING, "raw data: ", DUMP_PREFIX_ADDRESS, 16, 1, urb->transfer_buffer, urb->actual_length, true); goto resubmit; } switch (m->type) { case UCAN_IN_RX: ucan_rx_can_msg(up, m); break; case UCAN_IN_TX_COMPLETE: ucan_tx_complete_msg(up, m); break; default: netdev_warn(up->netdev, "invalid message (type; t:%d)\n", m->type); break; } /* proceed to next message */ pos += len; /* align to 4 byte boundary */ pos = round_up(pos, 4); } resubmit: /* resubmit urb when done */ usb_fill_bulk_urb(urb, up->udev, usb_rcvbulkpipe(up->udev, up->in_ep_addr), urb->transfer_buffer, up->in_ep_size, ucan_read_bulk_callback, up); usb_anchor_urb(urb, &up->rx_urbs); ret = usb_submit_urb(urb, GFP_ATOMIC); if (ret < 0) { netdev_err(up->netdev, "failed resubmitting read bulk urb: %d\n", ret); usb_unanchor_urb(urb); usb_free_coherent(up->udev, up->in_ep_size, urb->transfer_buffer, urb->transfer_dma); if (ret == -ENODEV) netif_device_detach(netdev); } } /* callback after transmission of a USB message */ static void ucan_write_bulk_callback(struct urb *urb) { unsigned long flags; struct ucan_priv *up; struct ucan_urb_context *context = urb->context; /* get the urb context */ if (WARN_ON_ONCE(!context)) return; /* free up our allocated buffer */ usb_free_coherent(urb->dev, sizeof(struct ucan_message_out), urb->transfer_buffer, urb->transfer_dma); up = context->up; if (WARN_ON_ONCE(!up)) return; /* sanity check */ if (!netif_device_present(up->netdev)) return; /* transmission failed (USB - the device will not send a TX complete) */ if (urb->status) { netdev_warn(up->netdev, "failed to transmit USB message to device: %d\n", urb->status); /* update counters an cleanup */ spin_lock_irqsave(&up->echo_skb_lock, flags); can_free_echo_skb(up->netdev, context - up->context_array, NULL); spin_unlock_irqrestore(&up->echo_skb_lock, flags); up->netdev->stats.tx_dropped++; /* release context and restart the queue if necessary */ if (!ucan_release_context(up, context)) netdev_err(up->netdev, "urb failed, failed to release context\n"); } } static void ucan_cleanup_rx_urbs(struct ucan_priv *up, struct urb **urbs) { int i; for (i = 0; i < UCAN_MAX_RX_URBS; i++) { if (urbs[i]) { usb_unanchor_urb(urbs[i]); usb_free_coherent(up->udev, up->in_ep_size, urbs[i]->transfer_buffer, urbs[i]->transfer_dma); usb_free_urb(urbs[i]); } } memset(urbs, 0, sizeof(*urbs) * UCAN_MAX_RX_URBS); } static int ucan_prepare_and_anchor_rx_urbs(struct ucan_priv *up, struct urb **urbs) { int i; memset(urbs, 0, sizeof(*urbs) * UCAN_MAX_RX_URBS); for (i = 0; i < UCAN_MAX_RX_URBS; i++) { void *buf; urbs[i] = usb_alloc_urb(0, GFP_KERNEL); if (!urbs[i]) goto err; buf = usb_alloc_coherent(up->udev, up->in_ep_size, GFP_KERNEL, &urbs[i]->transfer_dma); if (!buf) { /* cleanup this urb */ usb_free_urb(urbs[i]); urbs[i] = NULL; goto err; } usb_fill_bulk_urb(urbs[i], up->udev, usb_rcvbulkpipe(up->udev, up->in_ep_addr), buf, up->in_ep_size, ucan_read_bulk_callback, up); urbs[i]->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; usb_anchor_urb(urbs[i], &up->rx_urbs); } return 0; err: /* cleanup other unsubmitted urbs */ ucan_cleanup_rx_urbs(up, urbs); return -ENOMEM; } /* Submits rx urbs with the semantic: Either submit all, or cleanup * everything. I case of errors submitted urbs are killed and all urbs in * the array are freed. I case of no errors every entry in the urb * array is set to NULL. */ static int ucan_submit_rx_urbs(struct ucan_priv *up, struct urb **urbs) { int i, ret; /* Iterate over all urbs to submit. On success remove the urb * from the list. */ for (i = 0; i < UCAN_MAX_RX_URBS; i++) { ret = usb_submit_urb(urbs[i], GFP_KERNEL); if (ret) { netdev_err(up->netdev, "could not submit urb; code: %d\n", ret); goto err; } /* Anchor URB and drop reference, USB core will take * care of freeing it */ usb_free_urb(urbs[i]); urbs[i] = NULL; } return 0; err: /* Cleanup unsubmitted urbs */ ucan_cleanup_rx_urbs(up, urbs); /* Kill urbs that are already submitted */ usb_kill_anchored_urbs(&up->rx_urbs); return ret; } /* Open the network device */ static int ucan_open(struct net_device *netdev) { int ret, ret_cleanup; u16 ctrlmode; struct urb *urbs[UCAN_MAX_RX_URBS]; struct ucan_priv *up = netdev_priv(netdev); ret = ucan_alloc_context_array(up); if (ret) return ret; /* Allocate and prepare IN URBS - allocated and anchored * urbs are stored in urbs[] for clean */ ret = ucan_prepare_and_anchor_rx_urbs(up, urbs); if (ret) goto err_contexts; /* Check the control mode */ ctrlmode = 0; if (up->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) ctrlmode |= UCAN_MODE_LOOPBACK; if (up->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) ctrlmode |= UCAN_MODE_SILENT; if (up->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) ctrlmode |= UCAN_MODE_3_SAMPLES; if (up->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT) ctrlmode |= UCAN_MODE_ONE_SHOT; /* Enable this in any case - filtering is down within the * receive path */ ctrlmode |= UCAN_MODE_BERR_REPORT; up->ctl_msg_buffer->cmd_start.mode = cpu_to_le16(ctrlmode); /* Driver is ready to receive data - start the USB device */ ret = ucan_ctrl_command_out(up, UCAN_COMMAND_START, 0, 2); if (ret < 0) { netdev_err(up->netdev, "could not start device, code: %d\n", ret); goto err_reset; } /* Call CAN layer open */ ret = open_candev(netdev); if (ret) goto err_stop; /* Driver is ready to receive data. Submit RX URBS */ ret = ucan_submit_rx_urbs(up, urbs); if (ret) goto err_stop; up->can.state = CAN_STATE_ERROR_ACTIVE; /* Start the network queue */ netif_start_queue(netdev); return 0; err_stop: /* The device have started already stop it */ ret_cleanup = ucan_ctrl_command_out(up, UCAN_COMMAND_STOP, 0, 0); if (ret_cleanup < 0) netdev_err(up->netdev, "could not stop device, code: %d\n", ret_cleanup); err_reset: /* The device might have received data, reset it for * consistent state */ ret_cleanup = ucan_ctrl_command_out(up, UCAN_COMMAND_RESET, 0, 0); if (ret_cleanup < 0) netdev_err(up->netdev, "could not reset device, code: %d\n", ret_cleanup); /* clean up unsubmitted urbs */ ucan_cleanup_rx_urbs(up, urbs); err_contexts: ucan_release_context_array(up); return ret; } static struct urb *ucan_prepare_tx_urb(struct ucan_priv *up, struct ucan_urb_context *context, struct can_frame *cf, u8 echo_index) { int mlen; struct urb *urb; struct ucan_message_out *m; /* create a URB, and a buffer for it, and copy the data to the URB */ urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) { netdev_err(up->netdev, "no memory left for URBs\n"); return NULL; } m = usb_alloc_coherent(up->udev, sizeof(struct ucan_message_out), GFP_ATOMIC, &urb->transfer_dma); if (!m) { netdev_err(up->netdev, "no memory left for USB buffer\n"); usb_free_urb(urb); return NULL; } /* build the USB message */ m->type = UCAN_OUT_TX; m->msg.can_msg.id = cpu_to_le32(cf->can_id); if (cf->can_id & CAN_RTR_FLAG) { mlen = UCAN_OUT_HDR_SIZE + offsetof(struct ucan_can_msg, dlc) + sizeof(m->msg.can_msg.dlc); m->msg.can_msg.dlc = cf->len; } else { mlen = UCAN_OUT_HDR_SIZE + sizeof(m->msg.can_msg.id) + cf->len; memcpy(m->msg.can_msg.data, cf->data, cf->len); } m->len = cpu_to_le16(mlen); m->subtype = echo_index; /* build the urb */ usb_fill_bulk_urb(urb, up->udev, usb_sndbulkpipe(up->udev, up->out_ep_addr), m, mlen, ucan_write_bulk_callback, context); urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; return urb; } static void ucan_clean_up_tx_urb(struct ucan_priv *up, struct urb *urb) { usb_free_coherent(up->udev, sizeof(struct ucan_message_out), urb->transfer_buffer, urb->transfer_dma); usb_free_urb(urb); } /* callback when Linux needs to send a can frame */ static netdev_tx_t ucan_start_xmit(struct sk_buff *skb, struct net_device *netdev) { unsigned long flags; int ret; u8 echo_index; struct urb *urb; struct ucan_urb_context *context; struct ucan_priv *up = netdev_priv(netdev); struct can_frame *cf = (struct can_frame *)skb->data; /* check skb */ if (can_dev_dropped_skb(netdev, skb)) return NETDEV_TX_OK; /* allocate a context and slow down tx path, if fifo state is low */ context = ucan_alloc_context(up); echo_index = context - up->context_array; if (WARN_ON_ONCE(!context)) return NETDEV_TX_BUSY; /* prepare urb for transmission */ urb = ucan_prepare_tx_urb(up, context, cf, echo_index); if (!urb) goto drop; /* put the skb on can loopback stack */ spin_lock_irqsave(&up->echo_skb_lock, flags); can_put_echo_skb(skb, up->netdev, echo_index, 0); spin_unlock_irqrestore(&up->echo_skb_lock, flags); /* transmit it */ usb_anchor_urb(urb, &up->tx_urbs); ret = usb_submit_urb(urb, GFP_ATOMIC); /* cleanup urb */ if (ret) { /* on error, clean up */ usb_unanchor_urb(urb); ucan_clean_up_tx_urb(up, urb); if (!ucan_release_context(up, context)) netdev_err(up->netdev, "xmit err: failed to release context\n"); /* remove the skb from the echo stack - this also * frees the skb */ spin_lock_irqsave(&up->echo_skb_lock, flags); can_free_echo_skb(up->netdev, echo_index, NULL); spin_unlock_irqrestore(&up->echo_skb_lock, flags); if (ret == -ENODEV) { netif_device_detach(up->netdev); } else { netdev_warn(up->netdev, "xmit err: failed to submit urb %d\n", ret); up->netdev->stats.tx_dropped++; } return NETDEV_TX_OK; } netif_trans_update(netdev); /* release ref, as we do not need the urb anymore */ usb_free_urb(urb); return NETDEV_TX_OK; drop: if (!ucan_release_context(up, context)) netdev_err(up->netdev, "xmit drop: failed to release context\n"); dev_kfree_skb(skb); up->netdev->stats.tx_dropped++; return NETDEV_TX_OK; } /* Device goes down * * Clean up used resources */ static int ucan_close(struct net_device *netdev) { int ret; struct ucan_priv *up = netdev_priv(netdev); up->can.state = CAN_STATE_STOPPED; /* stop sending data */ usb_kill_anchored_urbs(&up->tx_urbs); /* stop receiving data */ usb_kill_anchored_urbs(&up->rx_urbs); /* stop and reset can device */ ret = ucan_ctrl_command_out(up, UCAN_COMMAND_STOP, 0, 0); if (ret < 0) netdev_err(up->netdev, "could not stop device, code: %d\n", ret); ret = ucan_ctrl_command_out(up, UCAN_COMMAND_RESET, 0, 0); if (ret < 0) netdev_err(up->netdev, "could not reset device, code: %d\n", ret); netif_stop_queue(netdev); ucan_release_context_array(up); close_candev(up->netdev); return 0; } /* CAN driver callbacks */ static const struct net_device_ops ucan_netdev_ops = { .ndo_open = ucan_open, .ndo_stop = ucan_close, .ndo_start_xmit = ucan_start_xmit, .ndo_change_mtu = can_change_mtu, }; static const struct ethtool_ops ucan_ethtool_ops = { .get_ts_info = ethtool_op_get_ts_info, }; /* Request to set bittiming * * This function generates an USB set bittiming message and transmits * it to the device */ static int ucan_set_bittiming(struct net_device *netdev) { int ret; struct ucan_priv *up = netdev_priv(netdev); struct ucan_ctl_cmd_set_bittiming *cmd_set_bittiming; cmd_set_bittiming = &up->ctl_msg_buffer->cmd_set_bittiming; cmd_set_bittiming->tq = cpu_to_le32(up->can.bittiming.tq); cmd_set_bittiming->brp = cpu_to_le16(up->can.bittiming.brp); cmd_set_bittiming->sample_point = cpu_to_le16(up->can.bittiming.sample_point); cmd_set_bittiming->prop_seg = up->can.bittiming.prop_seg; cmd_set_bittiming->phase_seg1 = up->can.bittiming.phase_seg1; cmd_set_bittiming->phase_seg2 = up->can.bittiming.phase_seg2; cmd_set_bittiming->sjw = up->can.bittiming.sjw; ret = ucan_ctrl_command_out(up, UCAN_COMMAND_SET_BITTIMING, 0, sizeof(*cmd_set_bittiming)); return (ret < 0) ? ret : 0; } /* Restart the device to get it out of BUS-OFF state. * Called when the user runs "ip link set can1 type can restart". */ static int ucan_set_mode(struct net_device *netdev, enum can_mode mode) { int ret; unsigned long flags; struct ucan_priv *up = netdev_priv(netdev); switch (mode) { case CAN_MODE_START: netdev_dbg(up->netdev, "restarting device\n"); ret = ucan_ctrl_command_out(up, UCAN_COMMAND_RESTART, 0, 0); up->can.state = CAN_STATE_ERROR_ACTIVE; /* check if queue can be restarted, * up->available_tx_urbs must be protected by the * lock */ spin_lock_irqsave(&up->context_lock, flags); if (up->available_tx_urbs > 0) netif_wake_queue(up->netdev); spin_unlock_irqrestore(&up->context_lock, flags); return ret; default: return -EOPNOTSUPP; } } /* Probe the device, reset it and gather general device information */ static int ucan_probe(struct usb_interface *intf, const struct usb_device_id *id) { int ret; int i; u32 protocol_version; struct usb_device *udev; struct net_device *netdev; struct usb_host_interface *iface_desc; struct ucan_priv *up; struct usb_endpoint_descriptor *ep; u16 in_ep_size; u16 out_ep_size; u8 in_ep_addr; u8 out_ep_addr; union ucan_ctl_payload *ctl_msg_buffer; char firmware_str[sizeof(union ucan_ctl_payload) + 1]; udev = interface_to_usbdev(intf); /* Stage 1 - Interface Parsing * --------------------------- * * Identifie the device USB interface descriptor and its * endpoints. Probing is aborted on errors. */ /* check if the interface is sane */ iface_desc = intf->cur_altsetting; if (!iface_desc) return -ENODEV; dev_info(&udev->dev, "%s: probing device on interface #%d\n", UCAN_DRIVER_NAME, iface_desc->desc.bInterfaceNumber); /* interface sanity check */ if (iface_desc->desc.bNumEndpoints != 2) { dev_err(&udev->dev, "%s: invalid EP count (%d)", UCAN_DRIVER_NAME, iface_desc->desc.bNumEndpoints); goto err_firmware_needs_update; } /* check interface endpoints */ in_ep_addr = 0; out_ep_addr = 0; in_ep_size = 0; out_ep_size = 0; for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) { ep = &iface_desc->endpoint[i].desc; if (((ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK) != 0) && ((ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_BULK)) { /* In Endpoint */ in_ep_addr = ep->bEndpointAddress; in_ep_addr &= USB_ENDPOINT_NUMBER_MASK; in_ep_size = le16_to_cpu(ep->wMaxPacketSize); } else if (((ep->bEndpointAddress & USB_ENDPOINT_DIR_MASK) == 0) && ((ep->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_BULK)) { /* Out Endpoint */ out_ep_addr = ep->bEndpointAddress; out_ep_addr &= USB_ENDPOINT_NUMBER_MASK; out_ep_size = le16_to_cpu(ep->wMaxPacketSize); } } /* check if interface is sane */ if (!in_ep_addr || !out_ep_addr) { dev_err(&udev->dev, "%s: invalid endpoint configuration\n", UCAN_DRIVER_NAME); goto err_firmware_needs_update; } if (in_ep_size < sizeof(struct ucan_message_in)) { dev_err(&udev->dev, "%s: invalid in_ep MaxPacketSize\n", UCAN_DRIVER_NAME); goto err_firmware_needs_update; } if (out_ep_size < sizeof(struct ucan_message_out)) { dev_err(&udev->dev, "%s: invalid out_ep MaxPacketSize\n", UCAN_DRIVER_NAME); goto err_firmware_needs_update; } /* Stage 2 - Device Identification * ------------------------------- * * The device interface seems to be a ucan device. Do further * compatibility checks. On error probing is aborted, on * success this stage leaves the ctl_msg_buffer with the * reported contents of a GET_INFO command (supported * bittimings, tx_fifo depth). This information is used in * Stage 3 for the final driver initialisation. */ /* Prepare Memory for control transfers */ ctl_msg_buffer = devm_kzalloc(&udev->dev, sizeof(union ucan_ctl_payload), GFP_KERNEL); if (!ctl_msg_buffer) { dev_err(&udev->dev, "%s: failed to allocate control pipe memory\n", UCAN_DRIVER_NAME); return -ENOMEM; } /* get protocol version * * note: ucan_ctrl_command_* wrappers cannot be used yet * because `up` is initialised in Stage 3 */ ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), UCAN_COMMAND_GET, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, UCAN_COMMAND_GET_PROTOCOL_VERSION, iface_desc->desc.bInterfaceNumber, ctl_msg_buffer, sizeof(union ucan_ctl_payload), UCAN_USB_CTL_PIPE_TIMEOUT); /* older firmware version do not support this command - those * are not supported by this drive */ if (ret != 4) { dev_err(&udev->dev, "%s: could not read protocol version, ret=%d\n", UCAN_DRIVER_NAME, ret); if (ret >= 0) ret = -EINVAL; goto err_firmware_needs_update; } /* this driver currently supports protocol version 3 only */ protocol_version = le32_to_cpu(ctl_msg_buffer->cmd_get_protocol_version.version); if (protocol_version < UCAN_PROTOCOL_VERSION_MIN || protocol_version > UCAN_PROTOCOL_VERSION_MAX) { dev_err(&udev->dev, "%s: device protocol version %d is not supported\n", UCAN_DRIVER_NAME, protocol_version); goto err_firmware_needs_update; } /* request the device information and store it in ctl_msg_buffer * * note: ucan_ctrl_command_* wrappers cannot be used yet * because `up` is initialised in Stage 3 */ ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), UCAN_COMMAND_GET, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, UCAN_COMMAND_GET_INFO, iface_desc->desc.bInterfaceNumber, ctl_msg_buffer, sizeof(ctl_msg_buffer->cmd_get_device_info), UCAN_USB_CTL_PIPE_TIMEOUT); if (ret < 0) { dev_err(&udev->dev, "%s: failed to retrieve device info\n", UCAN_DRIVER_NAME); goto err_firmware_needs_update; } if (ret < sizeof(ctl_msg_buffer->cmd_get_device_info)) { dev_err(&udev->dev, "%s: device reported invalid device info\n", UCAN_DRIVER_NAME); goto err_firmware_needs_update; } if (ctl_msg_buffer->cmd_get_device_info.tx_fifo == 0) { dev_err(&udev->dev, "%s: device reported invalid tx-fifo size\n", UCAN_DRIVER_NAME); goto err_firmware_needs_update; } /* Stage 3 - Driver Initialisation * ------------------------------- * * Register device to Linux, prepare private structures and * reset the device. */ /* allocate driver resources */ netdev = alloc_candev(sizeof(struct ucan_priv), ctl_msg_buffer->cmd_get_device_info.tx_fifo); if (!netdev) { dev_err(&udev->dev, "%s: cannot allocate candev\n", UCAN_DRIVER_NAME); return -ENOMEM; } up = netdev_priv(netdev); /* initialize data */ up->udev = udev; up->netdev = netdev; up->intf_index = iface_desc->desc.bInterfaceNumber; up->in_ep_addr = in_ep_addr; up->out_ep_addr = out_ep_addr; up->in_ep_size = in_ep_size; up->ctl_msg_buffer = ctl_msg_buffer; up->context_array = NULL; up->available_tx_urbs = 0; up->can.state = CAN_STATE_STOPPED; up->can.bittiming_const = &up->device_info.bittiming_const; up->can.do_set_bittiming = ucan_set_bittiming; up->can.do_set_mode = &ucan_set_mode; spin_lock_init(&up->context_lock); spin_lock_init(&up->echo_skb_lock); netdev->netdev_ops = &ucan_netdev_ops; netdev->ethtool_ops = &ucan_ethtool_ops; usb_set_intfdata(intf, up); SET_NETDEV_DEV(netdev, &intf->dev); /* parse device information * the data retrieved in Stage 2 is still available in * up->ctl_msg_buffer */ ucan_parse_device_info(up, &ctl_msg_buffer->cmd_get_device_info); /* just print some device information - if available */ ret = ucan_device_request_in(up, UCAN_DEVICE_GET_FW_STRING, 0, sizeof(union ucan_ctl_payload)); if (ret > 0) { /* copy string while ensuring zero termination */ strscpy(firmware_str, up->ctl_msg_buffer->raw, sizeof(union ucan_ctl_payload) + 1); } else { strcpy(firmware_str, "unknown"); } /* device is compatible, reset it */ ret = ucan_ctrl_command_out(up, UCAN_COMMAND_RESET, 0, 0); if (ret < 0) goto err_free_candev; init_usb_anchor(&up->rx_urbs); init_usb_anchor(&up->tx_urbs); up->can.state = CAN_STATE_STOPPED; /* register the device */ ret = register_candev(netdev); if (ret) goto err_free_candev; /* initialisation complete, log device info */ netdev_info(up->netdev, "registered device\n"); netdev_info(up->netdev, "firmware string: %s\n", firmware_str); /* success */ return 0; err_free_candev: free_candev(netdev); return ret; err_firmware_needs_update: dev_err(&udev->dev, "%s: probe failed; try to update the device firmware\n", UCAN_DRIVER_NAME); return -ENODEV; } /* disconnect the device */ static void ucan_disconnect(struct usb_interface *intf) { struct ucan_priv *up = usb_get_intfdata(intf); usb_set_intfdata(intf, NULL); if (up) { unregister_candev(up->netdev); free_candev(up->netdev); } } static struct usb_device_id ucan_table[] = { /* Mule (soldered onto compute modules) */ {USB_DEVICE_INTERFACE_NUMBER(0x2294, 0x425a, 0)}, /* Seal (standalone USB stick) */ {USB_DEVICE_INTERFACE_NUMBER(0x2294, 0x425b, 0)}, {} /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, ucan_table); /* driver callbacks */ static struct usb_driver ucan_driver = { .name = UCAN_DRIVER_NAME, .probe = ucan_probe, .disconnect = ucan_disconnect, .id_table = ucan_table, }; module_usb_driver(ucan_driver); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Martin Elshuber <[email protected]>"); MODULE_AUTHOR("Jakob Unterwurzacher <[email protected]>"); MODULE_DESCRIPTION("Driver for Theobroma Systems UCAN devices");
linux-master
drivers/net/can/usb/ucan.c
// SPDX-License-Identifier: GPL-2.0-only /* * CAN driver for EMS Dr. Thomas Wuensche CPC-USB/ARM7 * * Copyright (C) 2004-2009 EMS Dr. Thomas Wuensche */ #include <linux/ethtool.h> #include <linux/signal.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/usb.h> #include <linux/can.h> #include <linux/can/dev.h> #include <linux/can/error.h> MODULE_AUTHOR("Sebastian Haas <[email protected]>"); MODULE_DESCRIPTION("CAN driver for EMS Dr. Thomas Wuensche CAN/USB interfaces"); MODULE_LICENSE("GPL v2"); /* Control-Values for CPC_Control() Command Subject Selection */ #define CONTR_CAN_MESSAGE 0x04 #define CONTR_CAN_STATE 0x0C #define CONTR_BUS_ERROR 0x1C /* Control Command Actions */ #define CONTR_CONT_OFF 0 #define CONTR_CONT_ON 1 #define CONTR_ONCE 2 /* Messages from CPC to PC */ #define CPC_MSG_TYPE_CAN_FRAME 1 /* CAN data frame */ #define CPC_MSG_TYPE_RTR_FRAME 8 /* CAN remote frame */ #define CPC_MSG_TYPE_CAN_PARAMS 12 /* Actual CAN parameters */ #define CPC_MSG_TYPE_CAN_STATE 14 /* CAN state message */ #define CPC_MSG_TYPE_EXT_CAN_FRAME 16 /* Extended CAN data frame */ #define CPC_MSG_TYPE_EXT_RTR_FRAME 17 /* Extended remote frame */ #define CPC_MSG_TYPE_CONTROL 19 /* change interface behavior */ #define CPC_MSG_TYPE_CONFIRM 20 /* command processed confirmation */ #define CPC_MSG_TYPE_OVERRUN 21 /* overrun events */ #define CPC_MSG_TYPE_CAN_FRAME_ERROR 23 /* detected bus errors */ #define CPC_MSG_TYPE_ERR_COUNTER 25 /* RX/TX error counter */ /* Messages from the PC to the CPC interface */ #define CPC_CMD_TYPE_CAN_FRAME 1 /* CAN data frame */ #define CPC_CMD_TYPE_CONTROL 3 /* control of interface behavior */ #define CPC_CMD_TYPE_CAN_PARAMS 6 /* set CAN parameters */ #define CPC_CMD_TYPE_RTR_FRAME 13 /* CAN remote frame */ #define CPC_CMD_TYPE_CAN_STATE 14 /* CAN state message */ #define CPC_CMD_TYPE_EXT_CAN_FRAME 15 /* Extended CAN data frame */ #define CPC_CMD_TYPE_EXT_RTR_FRAME 16 /* Extended CAN remote frame */ #define CPC_CMD_TYPE_CAN_EXIT 200 /* exit the CAN */ #define CPC_CMD_TYPE_INQ_ERR_COUNTER 25 /* request the CAN error counters */ #define CPC_CMD_TYPE_CLEAR_MSG_QUEUE 8 /* clear CPC_MSG queue */ #define CPC_CMD_TYPE_CLEAR_CMD_QUEUE 28 /* clear CPC_CMD queue */ #define CPC_CC_TYPE_SJA1000 2 /* Philips basic CAN controller */ #define CPC_CAN_ECODE_ERRFRAME 0x01 /* Ecode type */ /* Overrun types */ #define CPC_OVR_EVENT_CAN 0x01 #define CPC_OVR_EVENT_CANSTATE 0x02 #define CPC_OVR_EVENT_BUSERROR 0x04 /* * If the CAN controller lost a message we indicate it with the highest bit * set in the count field. */ #define CPC_OVR_HW 0x80 /* Size of the "struct ems_cpc_msg" without the union */ #define CPC_MSG_HEADER_LEN 11 #define CPC_CAN_MSG_MIN_SIZE 5 /* Define these values to match your devices */ #define USB_CPCUSB_VENDOR_ID 0x12D6 #define USB_CPCUSB_ARM7_PRODUCT_ID 0x0444 /* Mode register NXP LPC2119/SJA1000 CAN Controller */ #define SJA1000_MOD_NORMAL 0x00 #define SJA1000_MOD_RM 0x01 /* ECC register NXP LPC2119/SJA1000 CAN Controller */ #define SJA1000_ECC_SEG 0x1F #define SJA1000_ECC_DIR 0x20 #define SJA1000_ECC_ERR 0x06 #define SJA1000_ECC_BIT 0x00 #define SJA1000_ECC_FORM 0x40 #define SJA1000_ECC_STUFF 0x80 #define SJA1000_ECC_MASK 0xc0 /* Status register content */ #define SJA1000_SR_BS 0x80 #define SJA1000_SR_ES 0x40 #define SJA1000_DEFAULT_OUTPUT_CONTROL 0xDA /* * The device actually uses a 16MHz clock to generate the CAN clock * but it expects SJA1000 bit settings based on 8MHz (is internally * converted). */ #define EMS_USB_ARM7_CLOCK 8000000 #define CPC_TX_QUEUE_TRIGGER_LOW 25 #define CPC_TX_QUEUE_TRIGGER_HIGH 35 /* * CAN-Message representation in a CPC_MSG. Message object type is * CPC_MSG_TYPE_CAN_FRAME or CPC_MSG_TYPE_RTR_FRAME or * CPC_MSG_TYPE_EXT_CAN_FRAME or CPC_MSG_TYPE_EXT_RTR_FRAME. */ struct cpc_can_msg { __le32 id; u8 length; u8 msg[8]; }; /* Representation of the CAN parameters for the SJA1000 controller */ struct cpc_sja1000_params { u8 mode; u8 acc_code0; u8 acc_code1; u8 acc_code2; u8 acc_code3; u8 acc_mask0; u8 acc_mask1; u8 acc_mask2; u8 acc_mask3; u8 btr0; u8 btr1; u8 outp_contr; }; /* CAN params message representation */ struct cpc_can_params { u8 cc_type; /* Will support M16C CAN controller in the future */ union { struct cpc_sja1000_params sja1000; } cc_params; }; /* Structure for confirmed message handling */ struct cpc_confirm { u8 error; /* error code */ }; /* Structure for overrun conditions */ struct cpc_overrun { u8 event; u8 count; }; /* SJA1000 CAN errors (compatible to NXP LPC2119) */ struct cpc_sja1000_can_error { u8 ecc; u8 rxerr; u8 txerr; }; /* structure for CAN error conditions */ struct cpc_can_error { u8 ecode; struct { u8 cc_type; /* Other controllers may also provide error code capture regs */ union { struct cpc_sja1000_can_error sja1000; } regs; } cc; }; /* * Structure containing RX/TX error counter. This structure is used to request * the values of the CAN controllers TX and RX error counter. */ struct cpc_can_err_counter { u8 rx; u8 tx; }; /* Main message type used between library and application */ struct __packed ems_cpc_msg { u8 type; /* type of message */ u8 length; /* length of data within union 'msg' */ u8 msgid; /* confirmation handle */ __le32 ts_sec; /* timestamp in seconds */ __le32 ts_nsec; /* timestamp in nano seconds */ union __packed { u8 generic[64]; struct cpc_can_msg can_msg; struct cpc_can_params can_params; struct cpc_confirm confirmation; struct cpc_overrun overrun; struct cpc_can_error error; struct cpc_can_err_counter err_counter; u8 can_state; } msg; }; /* * Table of devices that work with this driver * NOTE: This driver supports only CPC-USB/ARM7 (LPC2119) yet. */ static struct usb_device_id ems_usb_table[] = { {USB_DEVICE(USB_CPCUSB_VENDOR_ID, USB_CPCUSB_ARM7_PRODUCT_ID)}, {} /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, ems_usb_table); #define RX_BUFFER_SIZE 64 #define CPC_HEADER_SIZE 4 #define INTR_IN_BUFFER_SIZE 4 #define MAX_RX_URBS 10 #define MAX_TX_URBS 10 struct ems_usb; struct ems_tx_urb_context { struct ems_usb *dev; u32 echo_index; }; struct ems_usb { struct can_priv can; /* must be the first member */ struct sk_buff *echo_skb[MAX_TX_URBS]; struct usb_device *udev; struct net_device *netdev; atomic_t active_tx_urbs; struct usb_anchor tx_submitted; struct ems_tx_urb_context tx_contexts[MAX_TX_URBS]; struct usb_anchor rx_submitted; struct urb *intr_urb; u8 *tx_msg_buffer; u8 *intr_in_buffer; unsigned int free_slots; /* remember number of available slots */ struct ems_cpc_msg active_params; /* active controller parameters */ void *rxbuf[MAX_RX_URBS]; dma_addr_t rxbuf_dma[MAX_RX_URBS]; }; static void ems_usb_read_interrupt_callback(struct urb *urb) { struct ems_usb *dev = urb->context; struct net_device *netdev = dev->netdev; int err; if (!netif_device_present(netdev)) return; switch (urb->status) { case 0: dev->free_slots = dev->intr_in_buffer[1]; if (dev->free_slots > CPC_TX_QUEUE_TRIGGER_HIGH && netif_queue_stopped(netdev)) netif_wake_queue(netdev); break; case -ECONNRESET: /* unlink */ case -ENOENT: case -EPIPE: case -EPROTO: case -ESHUTDOWN: return; default: netdev_info(netdev, "Rx interrupt aborted %d\n", urb->status); break; } err = usb_submit_urb(urb, GFP_ATOMIC); if (err == -ENODEV) netif_device_detach(netdev); else if (err) netdev_err(netdev, "failed resubmitting intr urb: %d\n", err); } static void ems_usb_rx_can_msg(struct ems_usb *dev, struct ems_cpc_msg *msg) { struct can_frame *cf; struct sk_buff *skb; int i; struct net_device_stats *stats = &dev->netdev->stats; skb = alloc_can_skb(dev->netdev, &cf); if (skb == NULL) return; cf->can_id = le32_to_cpu(msg->msg.can_msg.id); cf->len = can_cc_dlc2len(msg->msg.can_msg.length & 0xF); if (msg->type == CPC_MSG_TYPE_EXT_CAN_FRAME || msg->type == CPC_MSG_TYPE_EXT_RTR_FRAME) cf->can_id |= CAN_EFF_FLAG; if (msg->type == CPC_MSG_TYPE_RTR_FRAME || msg->type == CPC_MSG_TYPE_EXT_RTR_FRAME) { cf->can_id |= CAN_RTR_FLAG; } else { for (i = 0; i < cf->len; i++) cf->data[i] = msg->msg.can_msg.msg[i]; stats->rx_bytes += cf->len; } stats->rx_packets++; netif_rx(skb); } static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg) { struct can_frame *cf; struct sk_buff *skb; struct net_device_stats *stats = &dev->netdev->stats; skb = alloc_can_err_skb(dev->netdev, &cf); if (skb == NULL) return; if (msg->type == CPC_MSG_TYPE_CAN_STATE) { u8 state = msg->msg.can_state; if (state & SJA1000_SR_BS) { dev->can.state = CAN_STATE_BUS_OFF; cf->can_id |= CAN_ERR_BUSOFF; dev->can.can_stats.bus_off++; can_bus_off(dev->netdev); } else if (state & SJA1000_SR_ES) { dev->can.state = CAN_STATE_ERROR_WARNING; dev->can.can_stats.error_warning++; } else { dev->can.state = CAN_STATE_ERROR_ACTIVE; dev->can.can_stats.error_passive++; } } else if (msg->type == CPC_MSG_TYPE_CAN_FRAME_ERROR) { u8 ecc = msg->msg.error.cc.regs.sja1000.ecc; u8 txerr = msg->msg.error.cc.regs.sja1000.txerr; u8 rxerr = msg->msg.error.cc.regs.sja1000.rxerr; /* bus error interrupt */ dev->can.can_stats.bus_error++; stats->rx_errors++; cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; switch (ecc & SJA1000_ECC_MASK) { case SJA1000_ECC_BIT: cf->data[2] |= CAN_ERR_PROT_BIT; break; case SJA1000_ECC_FORM: cf->data[2] |= CAN_ERR_PROT_FORM; break; case SJA1000_ECC_STUFF: cf->data[2] |= CAN_ERR_PROT_STUFF; break; default: cf->data[3] = ecc & SJA1000_ECC_SEG; break; } /* Error occurred during transmission? */ if ((ecc & SJA1000_ECC_DIR) == 0) cf->data[2] |= CAN_ERR_PROT_TX; if (dev->can.state == CAN_STATE_ERROR_WARNING || dev->can.state == CAN_STATE_ERROR_PASSIVE) { cf->can_id |= CAN_ERR_CRTL; cf->data[1] = (txerr > rxerr) ? CAN_ERR_CRTL_TX_PASSIVE : CAN_ERR_CRTL_RX_PASSIVE; } } else if (msg->type == CPC_MSG_TYPE_OVERRUN) { cf->can_id |= CAN_ERR_CRTL; cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; stats->rx_over_errors++; stats->rx_errors++; } netif_rx(skb); } /* * callback for bulk IN urb */ static void ems_usb_read_bulk_callback(struct urb *urb) { struct ems_usb *dev = urb->context; struct net_device *netdev; int retval; netdev = dev->netdev; if (!netif_device_present(netdev)) return; switch (urb->status) { case 0: /* success */ break; case -ENOENT: return; default: netdev_info(netdev, "Rx URB aborted (%d)\n", urb->status); goto resubmit_urb; } if (urb->actual_length > CPC_HEADER_SIZE) { struct ems_cpc_msg *msg; u8 *ibuf = urb->transfer_buffer; u8 msg_count, start; msg_count = ibuf[0] & ~0x80; start = CPC_HEADER_SIZE; while (msg_count) { msg = (struct ems_cpc_msg *)&ibuf[start]; switch (msg->type) { case CPC_MSG_TYPE_CAN_STATE: /* Process CAN state changes */ ems_usb_rx_err(dev, msg); break; case CPC_MSG_TYPE_CAN_FRAME: case CPC_MSG_TYPE_EXT_CAN_FRAME: case CPC_MSG_TYPE_RTR_FRAME: case CPC_MSG_TYPE_EXT_RTR_FRAME: ems_usb_rx_can_msg(dev, msg); break; case CPC_MSG_TYPE_CAN_FRAME_ERROR: /* Process errorframe */ ems_usb_rx_err(dev, msg); break; case CPC_MSG_TYPE_OVERRUN: /* Message lost while receiving */ ems_usb_rx_err(dev, msg); break; } start += CPC_MSG_HEADER_LEN + msg->length; msg_count--; if (start > urb->transfer_buffer_length) { netdev_err(netdev, "format error\n"); break; } } } resubmit_urb: usb_fill_bulk_urb(urb, dev->udev, usb_rcvbulkpipe(dev->udev, 2), urb->transfer_buffer, RX_BUFFER_SIZE, ems_usb_read_bulk_callback, dev); retval = usb_submit_urb(urb, GFP_ATOMIC); if (retval == -ENODEV) netif_device_detach(netdev); else if (retval) netdev_err(netdev, "failed resubmitting read bulk urb: %d\n", retval); } /* * callback for bulk IN urb */ static void ems_usb_write_bulk_callback(struct urb *urb) { struct ems_tx_urb_context *context = urb->context; struct ems_usb *dev; struct net_device *netdev; BUG_ON(!context); dev = context->dev; netdev = dev->netdev; /* free up our allocated buffer */ usb_free_coherent(urb->dev, urb->transfer_buffer_length, urb->transfer_buffer, urb->transfer_dma); atomic_dec(&dev->active_tx_urbs); if (!netif_device_present(netdev)) return; if (urb->status) netdev_info(netdev, "Tx URB aborted (%d)\n", urb->status); netif_trans_update(netdev); /* transmission complete interrupt */ netdev->stats.tx_packets++; netdev->stats.tx_bytes += can_get_echo_skb(netdev, context->echo_index, NULL); /* Release context */ context->echo_index = MAX_TX_URBS; } /* * Send the given CPC command synchronously */ static int ems_usb_command_msg(struct ems_usb *dev, struct ems_cpc_msg *msg) { int actual_length; /* Copy payload */ memcpy(&dev->tx_msg_buffer[CPC_HEADER_SIZE], msg, msg->length + CPC_MSG_HEADER_LEN); /* Clear header */ memset(&dev->tx_msg_buffer[0], 0, CPC_HEADER_SIZE); return usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, 2), &dev->tx_msg_buffer[0], msg->length + CPC_MSG_HEADER_LEN + CPC_HEADER_SIZE, &actual_length, 1000); } /* * Change CAN controllers' mode register */ static int ems_usb_write_mode(struct ems_usb *dev, u8 mode) { dev->active_params.msg.can_params.cc_params.sja1000.mode = mode; return ems_usb_command_msg(dev, &dev->active_params); } /* * Send a CPC_Control command to change behaviour when interface receives a CAN * message, bus error or CAN state changed notifications. */ static int ems_usb_control_cmd(struct ems_usb *dev, u8 val) { struct ems_cpc_msg cmd; cmd.type = CPC_CMD_TYPE_CONTROL; cmd.length = CPC_MSG_HEADER_LEN + 1; cmd.msgid = 0; cmd.msg.generic[0] = val; return ems_usb_command_msg(dev, &cmd); } /* * Start interface */ static int ems_usb_start(struct ems_usb *dev) { struct net_device *netdev = dev->netdev; int err, i; dev->intr_in_buffer[0] = 0; dev->free_slots = 50; /* initial size */ for (i = 0; i < MAX_RX_URBS; i++) { struct urb *urb = NULL; u8 *buf = NULL; dma_addr_t buf_dma; /* create a URB, and a buffer for it */ urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { err = -ENOMEM; break; } buf = usb_alloc_coherent(dev->udev, RX_BUFFER_SIZE, GFP_KERNEL, &buf_dma); if (!buf) { netdev_err(netdev, "No memory left for USB buffer\n"); usb_free_urb(urb); err = -ENOMEM; break; } urb->transfer_dma = buf_dma; usb_fill_bulk_urb(urb, dev->udev, usb_rcvbulkpipe(dev->udev, 2), buf, RX_BUFFER_SIZE, ems_usb_read_bulk_callback, dev); urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; usb_anchor_urb(urb, &dev->rx_submitted); err = usb_submit_urb(urb, GFP_KERNEL); if (err) { usb_unanchor_urb(urb); usb_free_coherent(dev->udev, RX_BUFFER_SIZE, buf, urb->transfer_dma); usb_free_urb(urb); break; } dev->rxbuf[i] = buf; dev->rxbuf_dma[i] = buf_dma; /* Drop reference, USB core will take care of freeing it */ usb_free_urb(urb); } /* Did we submit any URBs */ if (i == 0) { netdev_warn(netdev, "couldn't setup read URBs\n"); return err; } /* Warn if we've couldn't transmit all the URBs */ if (i < MAX_RX_URBS) netdev_warn(netdev, "rx performance may be slow\n"); /* Setup and start interrupt URB */ usb_fill_int_urb(dev->intr_urb, dev->udev, usb_rcvintpipe(dev->udev, 1), dev->intr_in_buffer, INTR_IN_BUFFER_SIZE, ems_usb_read_interrupt_callback, dev, 1); err = usb_submit_urb(dev->intr_urb, GFP_KERNEL); if (err) { netdev_warn(netdev, "intr URB submit failed: %d\n", err); return err; } /* CPC-USB will transfer received message to host */ err = ems_usb_control_cmd(dev, CONTR_CAN_MESSAGE | CONTR_CONT_ON); if (err) goto failed; /* CPC-USB will transfer CAN state changes to host */ err = ems_usb_control_cmd(dev, CONTR_CAN_STATE | CONTR_CONT_ON); if (err) goto failed; /* CPC-USB will transfer bus errors to host */ err = ems_usb_control_cmd(dev, CONTR_BUS_ERROR | CONTR_CONT_ON); if (err) goto failed; err = ems_usb_write_mode(dev, SJA1000_MOD_NORMAL); if (err) goto failed; dev->can.state = CAN_STATE_ERROR_ACTIVE; return 0; failed: netdev_warn(netdev, "couldn't submit control: %d\n", err); return err; } static void unlink_all_urbs(struct ems_usb *dev) { int i; usb_unlink_urb(dev->intr_urb); usb_kill_anchored_urbs(&dev->rx_submitted); for (i = 0; i < MAX_RX_URBS; ++i) usb_free_coherent(dev->udev, RX_BUFFER_SIZE, dev->rxbuf[i], dev->rxbuf_dma[i]); usb_kill_anchored_urbs(&dev->tx_submitted); atomic_set(&dev->active_tx_urbs, 0); for (i = 0; i < MAX_TX_URBS; i++) dev->tx_contexts[i].echo_index = MAX_TX_URBS; } static int ems_usb_open(struct net_device *netdev) { struct ems_usb *dev = netdev_priv(netdev); int err; err = ems_usb_write_mode(dev, SJA1000_MOD_RM); if (err) return err; /* common open */ err = open_candev(netdev); if (err) return err; /* finally start device */ err = ems_usb_start(dev); if (err) { if (err == -ENODEV) netif_device_detach(dev->netdev); netdev_warn(netdev, "couldn't start device: %d\n", err); close_candev(netdev); return err; } netif_start_queue(netdev); return 0; } static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct ems_usb *dev = netdev_priv(netdev); struct ems_tx_urb_context *context = NULL; struct net_device_stats *stats = &netdev->stats; struct can_frame *cf = (struct can_frame *)skb->data; struct ems_cpc_msg *msg; struct urb *urb; u8 *buf; int i, err; size_t size = CPC_HEADER_SIZE + CPC_MSG_HEADER_LEN + sizeof(struct cpc_can_msg); if (can_dev_dropped_skb(netdev, skb)) return NETDEV_TX_OK; /* create a URB, and a buffer for it, and copy the data to the URB */ urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) goto nomem; buf = usb_alloc_coherent(dev->udev, size, GFP_ATOMIC, &urb->transfer_dma); if (!buf) { netdev_err(netdev, "No memory left for USB buffer\n"); usb_free_urb(urb); goto nomem; } msg = (struct ems_cpc_msg *)&buf[CPC_HEADER_SIZE]; msg->msg.can_msg.id = cpu_to_le32(cf->can_id & CAN_ERR_MASK); msg->msg.can_msg.length = cf->len; if (cf->can_id & CAN_RTR_FLAG) { msg->type = cf->can_id & CAN_EFF_FLAG ? CPC_CMD_TYPE_EXT_RTR_FRAME : CPC_CMD_TYPE_RTR_FRAME; msg->length = CPC_CAN_MSG_MIN_SIZE; } else { msg->type = cf->can_id & CAN_EFF_FLAG ? CPC_CMD_TYPE_EXT_CAN_FRAME : CPC_CMD_TYPE_CAN_FRAME; for (i = 0; i < cf->len; i++) msg->msg.can_msg.msg[i] = cf->data[i]; msg->length = CPC_CAN_MSG_MIN_SIZE + cf->len; } for (i = 0; i < MAX_TX_URBS; i++) { if (dev->tx_contexts[i].echo_index == MAX_TX_URBS) { context = &dev->tx_contexts[i]; break; } } /* * May never happen! When this happens we'd more URBs in flight as * allowed (MAX_TX_URBS). */ if (!context) { usb_free_coherent(dev->udev, size, buf, urb->transfer_dma); usb_free_urb(urb); netdev_warn(netdev, "couldn't find free context\n"); return NETDEV_TX_BUSY; } context->dev = dev; context->echo_index = i; usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, 2), buf, size, ems_usb_write_bulk_callback, context); urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; usb_anchor_urb(urb, &dev->tx_submitted); can_put_echo_skb(skb, netdev, context->echo_index, 0); atomic_inc(&dev->active_tx_urbs); err = usb_submit_urb(urb, GFP_ATOMIC); if (unlikely(err)) { can_free_echo_skb(netdev, context->echo_index, NULL); usb_unanchor_urb(urb); usb_free_coherent(dev->udev, size, buf, urb->transfer_dma); atomic_dec(&dev->active_tx_urbs); if (err == -ENODEV) { netif_device_detach(netdev); } else { netdev_warn(netdev, "failed tx_urb %d\n", err); stats->tx_dropped++; } } else { netif_trans_update(netdev); /* Slow down tx path */ if (atomic_read(&dev->active_tx_urbs) >= MAX_TX_URBS || dev->free_slots < CPC_TX_QUEUE_TRIGGER_LOW) { netif_stop_queue(netdev); } } /* * Release our reference to this URB, the USB core will eventually free * it entirely. */ usb_free_urb(urb); return NETDEV_TX_OK; nomem: dev_kfree_skb(skb); stats->tx_dropped++; return NETDEV_TX_OK; } static int ems_usb_close(struct net_device *netdev) { struct ems_usb *dev = netdev_priv(netdev); /* Stop polling */ unlink_all_urbs(dev); netif_stop_queue(netdev); /* Set CAN controller to reset mode */ if (ems_usb_write_mode(dev, SJA1000_MOD_RM)) netdev_warn(netdev, "couldn't stop device"); close_candev(netdev); return 0; } static const struct net_device_ops ems_usb_netdev_ops = { .ndo_open = ems_usb_open, .ndo_stop = ems_usb_close, .ndo_start_xmit = ems_usb_start_xmit, .ndo_change_mtu = can_change_mtu, }; static const struct ethtool_ops ems_usb_ethtool_ops = { .get_ts_info = ethtool_op_get_ts_info, }; static const struct can_bittiming_const ems_usb_bittiming_const = { .name = KBUILD_MODNAME, .tseg1_min = 1, .tseg1_max = 16, .tseg2_min = 1, .tseg2_max = 8, .sjw_max = 4, .brp_min = 1, .brp_max = 64, .brp_inc = 1, }; static int ems_usb_set_mode(struct net_device *netdev, enum can_mode mode) { struct ems_usb *dev = netdev_priv(netdev); switch (mode) { case CAN_MODE_START: if (ems_usb_write_mode(dev, SJA1000_MOD_NORMAL)) netdev_warn(netdev, "couldn't start device"); if (netif_queue_stopped(netdev)) netif_wake_queue(netdev); break; default: return -EOPNOTSUPP; } return 0; } static int ems_usb_set_bittiming(struct net_device *netdev) { struct ems_usb *dev = netdev_priv(netdev); struct can_bittiming *bt = &dev->can.bittiming; u8 btr0, btr1; btr0 = ((bt->brp - 1) & 0x3f) | (((bt->sjw - 1) & 0x3) << 6); btr1 = ((bt->prop_seg + bt->phase_seg1 - 1) & 0xf) | (((bt->phase_seg2 - 1) & 0x7) << 4); if (dev->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) btr1 |= 0x80; netdev_info(netdev, "setting BTR0=0x%02x BTR1=0x%02x\n", btr0, btr1); dev->active_params.msg.can_params.cc_params.sja1000.btr0 = btr0; dev->active_params.msg.can_params.cc_params.sja1000.btr1 = btr1; return ems_usb_command_msg(dev, &dev->active_params); } static void init_params_sja1000(struct ems_cpc_msg *msg) { struct cpc_sja1000_params *sja1000 = &msg->msg.can_params.cc_params.sja1000; msg->type = CPC_CMD_TYPE_CAN_PARAMS; msg->length = sizeof(struct cpc_can_params); msg->msgid = 0; msg->msg.can_params.cc_type = CPC_CC_TYPE_SJA1000; /* Acceptance filter open */ sja1000->acc_code0 = 0x00; sja1000->acc_code1 = 0x00; sja1000->acc_code2 = 0x00; sja1000->acc_code3 = 0x00; /* Acceptance filter open */ sja1000->acc_mask0 = 0xFF; sja1000->acc_mask1 = 0xFF; sja1000->acc_mask2 = 0xFF; sja1000->acc_mask3 = 0xFF; sja1000->btr0 = 0; sja1000->btr1 = 0; sja1000->outp_contr = SJA1000_DEFAULT_OUTPUT_CONTROL; sja1000->mode = SJA1000_MOD_RM; } /* * probe function for new CPC-USB devices */ static int ems_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct net_device *netdev; struct ems_usb *dev; int i, err = -ENOMEM; netdev = alloc_candev(sizeof(struct ems_usb), MAX_TX_URBS); if (!netdev) { dev_err(&intf->dev, "ems_usb: Couldn't alloc candev\n"); return -ENOMEM; } dev = netdev_priv(netdev); dev->udev = interface_to_usbdev(intf); dev->netdev = netdev; dev->can.state = CAN_STATE_STOPPED; dev->can.clock.freq = EMS_USB_ARM7_CLOCK; dev->can.bittiming_const = &ems_usb_bittiming_const; dev->can.do_set_bittiming = ems_usb_set_bittiming; dev->can.do_set_mode = ems_usb_set_mode; dev->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES; netdev->netdev_ops = &ems_usb_netdev_ops; netdev->ethtool_ops = &ems_usb_ethtool_ops; netdev->flags |= IFF_ECHO; /* we support local echo */ init_usb_anchor(&dev->rx_submitted); init_usb_anchor(&dev->tx_submitted); atomic_set(&dev->active_tx_urbs, 0); for (i = 0; i < MAX_TX_URBS; i++) dev->tx_contexts[i].echo_index = MAX_TX_URBS; dev->intr_urb = usb_alloc_urb(0, GFP_KERNEL); if (!dev->intr_urb) goto cleanup_candev; dev->intr_in_buffer = kzalloc(INTR_IN_BUFFER_SIZE, GFP_KERNEL); if (!dev->intr_in_buffer) goto cleanup_intr_urb; dev->tx_msg_buffer = kzalloc(CPC_HEADER_SIZE + sizeof(struct ems_cpc_msg), GFP_KERNEL); if (!dev->tx_msg_buffer) goto cleanup_intr_in_buffer; usb_set_intfdata(intf, dev); SET_NETDEV_DEV(netdev, &intf->dev); init_params_sja1000(&dev->active_params); err = ems_usb_command_msg(dev, &dev->active_params); if (err) { netdev_err(netdev, "couldn't initialize controller: %d\n", err); goto cleanup_tx_msg_buffer; } err = register_candev(netdev); if (err) { netdev_err(netdev, "couldn't register CAN device: %d\n", err); goto cleanup_tx_msg_buffer; } return 0; cleanup_tx_msg_buffer: kfree(dev->tx_msg_buffer); cleanup_intr_in_buffer: kfree(dev->intr_in_buffer); cleanup_intr_urb: usb_free_urb(dev->intr_urb); cleanup_candev: free_candev(netdev); return err; } /* * called by the usb core when the device is removed from the system */ static void ems_usb_disconnect(struct usb_interface *intf) { struct ems_usb *dev = usb_get_intfdata(intf); usb_set_intfdata(intf, NULL); if (dev) { unregister_netdev(dev->netdev); unlink_all_urbs(dev); usb_free_urb(dev->intr_urb); kfree(dev->intr_in_buffer); kfree(dev->tx_msg_buffer); free_candev(dev->netdev); } } /* usb specific object needed to register this driver with the usb subsystem */ static struct usb_driver ems_usb_driver = { .name = KBUILD_MODNAME, .probe = ems_usb_probe, .disconnect = ems_usb_disconnect, .id_table = ems_usb_table, }; module_usb_driver(ems_usb_driver);
linux-master
drivers/net/can/usb/ems_usb.c
// SPDX-License-Identifier: GPL-2.0-only /* * CAN driver for "8 devices" USB2CAN converter * * Copyright (C) 2012 Bernd Krumboeck ([email protected]) * * This driver is inspired by the 3.2.0 version of drivers/net/can/usb/ems_usb.c * and drivers/net/can/usb/esd_usb2.c * * Many thanks to Gerhard Bertelsmann ([email protected]) * for testing and fixing this driver. Also many thanks to "8 devices", * who were very cooperative and answered my questions. */ #include <linux/ethtool.h> #include <linux/signal.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/usb.h> #include <linux/can.h> #include <linux/can/dev.h> #include <linux/can/error.h> /* driver constants */ #define MAX_RX_URBS 20 #define MAX_TX_URBS 20 #define RX_BUFFER_SIZE 64 /* vendor and product id */ #define USB_8DEV_VENDOR_ID 0x0483 #define USB_8DEV_PRODUCT_ID 0x1234 /* endpoints */ enum usb_8dev_endpoint { USB_8DEV_ENDP_DATA_RX = 1, USB_8DEV_ENDP_DATA_TX, USB_8DEV_ENDP_CMD_RX, USB_8DEV_ENDP_CMD_TX }; /* device CAN clock */ #define USB_8DEV_ABP_CLOCK 32000000 /* setup flags */ #define USB_8DEV_SILENT 0x01 #define USB_8DEV_LOOPBACK 0x02 #define USB_8DEV_DISABLE_AUTO_RESTRANS 0x04 #define USB_8DEV_STATUS_FRAME 0x08 /* commands */ enum usb_8dev_cmd { USB_8DEV_RESET = 1, USB_8DEV_OPEN, USB_8DEV_CLOSE, USB_8DEV_SET_SPEED, USB_8DEV_SET_MASK_FILTER, USB_8DEV_GET_STATUS, USB_8DEV_GET_STATISTICS, USB_8DEV_GET_SERIAL, USB_8DEV_GET_SOFTW_VER, USB_8DEV_GET_HARDW_VER, USB_8DEV_RESET_TIMESTAMP, USB_8DEV_GET_SOFTW_HARDW_VER }; /* command options */ #define USB_8DEV_BAUD_MANUAL 0x09 #define USB_8DEV_CMD_START 0x11 #define USB_8DEV_CMD_END 0x22 #define USB_8DEV_CMD_SUCCESS 0 #define USB_8DEV_CMD_ERROR 255 #define USB_8DEV_CMD_TIMEOUT 1000 /* frames */ #define USB_8DEV_DATA_START 0x55 #define USB_8DEV_DATA_END 0xAA #define USB_8DEV_TYPE_CAN_FRAME 0 #define USB_8DEV_TYPE_ERROR_FRAME 3 #define USB_8DEV_EXTID 0x01 #define USB_8DEV_RTR 0x02 #define USB_8DEV_ERR_FLAG 0x04 /* status */ #define USB_8DEV_STATUSMSG_OK 0x00 /* Normal condition. */ #define USB_8DEV_STATUSMSG_OVERRUN 0x01 /* Overrun occurred when sending */ #define USB_8DEV_STATUSMSG_BUSLIGHT 0x02 /* Error counter has reached 96 */ #define USB_8DEV_STATUSMSG_BUSHEAVY 0x03 /* Error count. has reached 128 */ #define USB_8DEV_STATUSMSG_BUSOFF 0x04 /* Device is in BUSOFF */ #define USB_8DEV_STATUSMSG_STUFF 0x20 /* Stuff Error */ #define USB_8DEV_STATUSMSG_FORM 0x21 /* Form Error */ #define USB_8DEV_STATUSMSG_ACK 0x23 /* Ack Error */ #define USB_8DEV_STATUSMSG_BIT0 0x24 /* Bit1 Error */ #define USB_8DEV_STATUSMSG_BIT1 0x25 /* Bit0 Error */ #define USB_8DEV_STATUSMSG_CRC 0x27 /* CRC Error */ #define USB_8DEV_RP_MASK 0x7F /* Mask for Receive Error Bit */ /* table of devices that work with this driver */ static const struct usb_device_id usb_8dev_table[] = { { USB_DEVICE(USB_8DEV_VENDOR_ID, USB_8DEV_PRODUCT_ID) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, usb_8dev_table); struct usb_8dev_tx_urb_context { struct usb_8dev_priv *priv; u32 echo_index; }; /* Structure to hold all of our device specific stuff */ struct usb_8dev_priv { struct can_priv can; /* must be the first member */ struct usb_device *udev; struct net_device *netdev; atomic_t active_tx_urbs; struct usb_anchor tx_submitted; struct usb_8dev_tx_urb_context tx_contexts[MAX_TX_URBS]; struct usb_anchor rx_submitted; struct can_berr_counter bec; u8 *cmd_msg_buffer; struct mutex usb_8dev_cmd_lock; void *rxbuf[MAX_RX_URBS]; dma_addr_t rxbuf_dma[MAX_RX_URBS]; }; /* tx frame */ struct __packed usb_8dev_tx_msg { u8 begin; u8 flags; /* RTR and EXT_ID flag */ __be32 id; /* upper 3 bits not used */ u8 dlc; /* data length code 0-8 bytes */ u8 data[8]; /* 64-bit data */ u8 end; }; /* rx frame */ struct __packed usb_8dev_rx_msg { u8 begin; u8 type; /* frame type */ u8 flags; /* RTR and EXT_ID flag */ __be32 id; /* upper 3 bits not used */ u8 dlc; /* data length code 0-8 bytes */ u8 data[8]; /* 64-bit data */ __be32 timestamp; /* 32-bit timestamp */ u8 end; }; /* command frame */ struct __packed usb_8dev_cmd_msg { u8 begin; u8 channel; /* unknown - always 0 */ u8 command; /* command to execute */ u8 opt1; /* optional parameter / return value */ u8 opt2; /* optional parameter 2 */ u8 data[10]; /* optional parameter and data */ u8 end; }; static int usb_8dev_send_cmd_msg(struct usb_8dev_priv *priv, u8 *msg, int size) { int actual_length; return usb_bulk_msg(priv->udev, usb_sndbulkpipe(priv->udev, USB_8DEV_ENDP_CMD_TX), msg, size, &actual_length, USB_8DEV_CMD_TIMEOUT); } static int usb_8dev_wait_cmd_msg(struct usb_8dev_priv *priv, u8 *msg, int size, int *actual_length) { return usb_bulk_msg(priv->udev, usb_rcvbulkpipe(priv->udev, USB_8DEV_ENDP_CMD_RX), msg, size, actual_length, USB_8DEV_CMD_TIMEOUT); } /* Send command to device and receive result. * Command was successful when opt1 = 0. */ static int usb_8dev_send_cmd(struct usb_8dev_priv *priv, struct usb_8dev_cmd_msg *out, struct usb_8dev_cmd_msg *in) { int err; int num_bytes_read; struct net_device *netdev; netdev = priv->netdev; out->begin = USB_8DEV_CMD_START; out->end = USB_8DEV_CMD_END; mutex_lock(&priv->usb_8dev_cmd_lock); memcpy(priv->cmd_msg_buffer, out, sizeof(struct usb_8dev_cmd_msg)); err = usb_8dev_send_cmd_msg(priv, priv->cmd_msg_buffer, sizeof(struct usb_8dev_cmd_msg)); if (err < 0) { netdev_err(netdev, "sending command message failed\n"); goto failed; } err = usb_8dev_wait_cmd_msg(priv, priv->cmd_msg_buffer, sizeof(struct usb_8dev_cmd_msg), &num_bytes_read); if (err < 0) { netdev_err(netdev, "no command message answer\n"); goto failed; } memcpy(in, priv->cmd_msg_buffer, sizeof(struct usb_8dev_cmd_msg)); if (in->begin != USB_8DEV_CMD_START || in->end != USB_8DEV_CMD_END || num_bytes_read != 16 || in->opt1 != 0) err = -EPROTO; failed: mutex_unlock(&priv->usb_8dev_cmd_lock); return err; } /* Send open command to device */ static int usb_8dev_cmd_open(struct usb_8dev_priv *priv) { struct can_bittiming *bt = &priv->can.bittiming; struct usb_8dev_cmd_msg outmsg; struct usb_8dev_cmd_msg inmsg; u32 ctrlmode = priv->can.ctrlmode; u32 flags = USB_8DEV_STATUS_FRAME; __be32 beflags; __be16 bebrp; memset(&outmsg, 0, sizeof(outmsg)); outmsg.command = USB_8DEV_OPEN; outmsg.opt1 = USB_8DEV_BAUD_MANUAL; outmsg.data[0] = bt->prop_seg + bt->phase_seg1; outmsg.data[1] = bt->phase_seg2; outmsg.data[2] = bt->sjw; /* BRP */ bebrp = cpu_to_be16((u16)bt->brp); memcpy(&outmsg.data[3], &bebrp, sizeof(bebrp)); /* flags */ if (ctrlmode & CAN_CTRLMODE_LOOPBACK) flags |= USB_8DEV_LOOPBACK; if (ctrlmode & CAN_CTRLMODE_LISTENONLY) flags |= USB_8DEV_SILENT; if (ctrlmode & CAN_CTRLMODE_ONE_SHOT) flags |= USB_8DEV_DISABLE_AUTO_RESTRANS; beflags = cpu_to_be32(flags); memcpy(&outmsg.data[5], &beflags, sizeof(beflags)); return usb_8dev_send_cmd(priv, &outmsg, &inmsg); } /* Send close command to device */ static int usb_8dev_cmd_close(struct usb_8dev_priv *priv) { struct usb_8dev_cmd_msg inmsg; struct usb_8dev_cmd_msg outmsg = { .channel = 0, .command = USB_8DEV_CLOSE, .opt1 = 0, .opt2 = 0 }; return usb_8dev_send_cmd(priv, &outmsg, &inmsg); } /* Get firmware and hardware version */ static int usb_8dev_cmd_version(struct usb_8dev_priv *priv, u32 *res) { struct usb_8dev_cmd_msg inmsg; struct usb_8dev_cmd_msg outmsg = { .channel = 0, .command = USB_8DEV_GET_SOFTW_HARDW_VER, .opt1 = 0, .opt2 = 0 }; int err = usb_8dev_send_cmd(priv, &outmsg, &inmsg); if (err) return err; *res = be32_to_cpup((__be32 *)inmsg.data); return err; } /* Set network device mode * * Maybe we should leave this function empty, because the device * set mode variable with open command. */ static int usb_8dev_set_mode(struct net_device *netdev, enum can_mode mode) { struct usb_8dev_priv *priv = netdev_priv(netdev); int err = 0; switch (mode) { case CAN_MODE_START: err = usb_8dev_cmd_open(priv); if (err) netdev_warn(netdev, "couldn't start device"); break; default: return -EOPNOTSUPP; } return err; } /* Read error/status frames */ static void usb_8dev_rx_err_msg(struct usb_8dev_priv *priv, struct usb_8dev_rx_msg *msg) { struct can_frame *cf; struct sk_buff *skb; struct net_device_stats *stats = &priv->netdev->stats; /* Error message: * byte 0: Status * byte 1: bit 7: Receive Passive * byte 1: bit 0-6: Receive Error Counter * byte 2: Transmit Error Counter * byte 3: Always 0 (maybe reserved for future use) */ u8 state = msg->data[0]; u8 rxerr = msg->data[1] & USB_8DEV_RP_MASK; u8 txerr = msg->data[2]; int rx_errors = 0; int tx_errors = 0; skb = alloc_can_err_skb(priv->netdev, &cf); if (!skb) return; switch (state) { case USB_8DEV_STATUSMSG_OK: priv->can.state = CAN_STATE_ERROR_ACTIVE; cf->can_id |= CAN_ERR_PROT; cf->data[2] = CAN_ERR_PROT_ACTIVE; break; case USB_8DEV_STATUSMSG_BUSOFF: priv->can.state = CAN_STATE_BUS_OFF; cf->can_id |= CAN_ERR_BUSOFF; priv->can.can_stats.bus_off++; can_bus_off(priv->netdev); break; case USB_8DEV_STATUSMSG_OVERRUN: case USB_8DEV_STATUSMSG_BUSLIGHT: case USB_8DEV_STATUSMSG_BUSHEAVY: cf->can_id |= CAN_ERR_CRTL; break; default: priv->can.state = CAN_STATE_ERROR_WARNING; cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; priv->can.can_stats.bus_error++; break; } switch (state) { case USB_8DEV_STATUSMSG_OK: case USB_8DEV_STATUSMSG_BUSOFF: break; case USB_8DEV_STATUSMSG_ACK: cf->can_id |= CAN_ERR_ACK; tx_errors = 1; break; case USB_8DEV_STATUSMSG_CRC: cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; rx_errors = 1; break; case USB_8DEV_STATUSMSG_BIT0: cf->data[2] |= CAN_ERR_PROT_BIT0; tx_errors = 1; break; case USB_8DEV_STATUSMSG_BIT1: cf->data[2] |= CAN_ERR_PROT_BIT1; tx_errors = 1; break; case USB_8DEV_STATUSMSG_FORM: cf->data[2] |= CAN_ERR_PROT_FORM; rx_errors = 1; break; case USB_8DEV_STATUSMSG_STUFF: cf->data[2] |= CAN_ERR_PROT_STUFF; rx_errors = 1; break; case USB_8DEV_STATUSMSG_OVERRUN: cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; stats->rx_over_errors++; rx_errors = 1; break; case USB_8DEV_STATUSMSG_BUSLIGHT: priv->can.state = CAN_STATE_ERROR_WARNING; cf->data[1] = (txerr > rxerr) ? CAN_ERR_CRTL_TX_WARNING : CAN_ERR_CRTL_RX_WARNING; priv->can.can_stats.error_warning++; break; case USB_8DEV_STATUSMSG_BUSHEAVY: priv->can.state = CAN_STATE_ERROR_PASSIVE; cf->data[1] = (txerr > rxerr) ? CAN_ERR_CRTL_TX_PASSIVE : CAN_ERR_CRTL_RX_PASSIVE; priv->can.can_stats.error_passive++; break; default: netdev_warn(priv->netdev, "Unknown status/error message (%d)\n", state); break; } if (tx_errors) { cf->data[2] |= CAN_ERR_PROT_TX; stats->tx_errors++; } if (rx_errors) stats->rx_errors++; if (priv->can.state != CAN_STATE_BUS_OFF) { cf->can_id |= CAN_ERR_CNT; cf->data[6] = txerr; cf->data[7] = rxerr; } priv->bec.txerr = txerr; priv->bec.rxerr = rxerr; netif_rx(skb); } /* Read data and status frames */ static void usb_8dev_rx_can_msg(struct usb_8dev_priv *priv, struct usb_8dev_rx_msg *msg) { struct can_frame *cf; struct sk_buff *skb; struct net_device_stats *stats = &priv->netdev->stats; if (msg->type == USB_8DEV_TYPE_ERROR_FRAME && msg->flags == USB_8DEV_ERR_FLAG) { usb_8dev_rx_err_msg(priv, msg); } else if (msg->type == USB_8DEV_TYPE_CAN_FRAME) { skb = alloc_can_skb(priv->netdev, &cf); if (!skb) return; cf->can_id = be32_to_cpu(msg->id); can_frame_set_cc_len(cf, msg->dlc & 0xF, priv->can.ctrlmode); if (msg->flags & USB_8DEV_EXTID) cf->can_id |= CAN_EFF_FLAG; if (msg->flags & USB_8DEV_RTR) { cf->can_id |= CAN_RTR_FLAG; } else { memcpy(cf->data, msg->data, cf->len); stats->rx_bytes += cf->len; } stats->rx_packets++; netif_rx(skb); } else { netdev_warn(priv->netdev, "frame type %d unknown", msg->type); } } /* Callback for reading data from device * * Check urb status, call read function and resubmit urb read operation. */ static void usb_8dev_read_bulk_callback(struct urb *urb) { struct usb_8dev_priv *priv = urb->context; struct net_device *netdev; int retval; int pos = 0; netdev = priv->netdev; if (!netif_device_present(netdev)) return; switch (urb->status) { case 0: /* success */ break; case -ENOENT: case -EPIPE: case -EPROTO: case -ESHUTDOWN: return; default: netdev_info(netdev, "Rx URB aborted (%d)\n", urb->status); goto resubmit_urb; } while (pos < urb->actual_length) { struct usb_8dev_rx_msg *msg; if (pos + sizeof(struct usb_8dev_rx_msg) > urb->actual_length) { netdev_err(priv->netdev, "format error\n"); break; } msg = (struct usb_8dev_rx_msg *)(urb->transfer_buffer + pos); usb_8dev_rx_can_msg(priv, msg); pos += sizeof(struct usb_8dev_rx_msg); } resubmit_urb: usb_fill_bulk_urb(urb, priv->udev, usb_rcvbulkpipe(priv->udev, USB_8DEV_ENDP_DATA_RX), urb->transfer_buffer, RX_BUFFER_SIZE, usb_8dev_read_bulk_callback, priv); retval = usb_submit_urb(urb, GFP_ATOMIC); if (retval == -ENODEV) netif_device_detach(netdev); else if (retval) netdev_err(netdev, "failed resubmitting read bulk urb: %d\n", retval); } /* Callback handler for write operations * * Free allocated buffers, check transmit status and * calculate statistic. */ static void usb_8dev_write_bulk_callback(struct urb *urb) { struct usb_8dev_tx_urb_context *context = urb->context; struct usb_8dev_priv *priv; struct net_device *netdev; BUG_ON(!context); priv = context->priv; netdev = priv->netdev; /* free up our allocated buffer */ usb_free_coherent(urb->dev, urb->transfer_buffer_length, urb->transfer_buffer, urb->transfer_dma); atomic_dec(&priv->active_tx_urbs); if (!netif_device_present(netdev)) return; if (urb->status) netdev_info(netdev, "Tx URB aborted (%d)\n", urb->status); netdev->stats.tx_packets++; netdev->stats.tx_bytes += can_get_echo_skb(netdev, context->echo_index, NULL); /* Release context */ context->echo_index = MAX_TX_URBS; netif_wake_queue(netdev); } /* Send data to device */ static netdev_tx_t usb_8dev_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct usb_8dev_priv *priv = netdev_priv(netdev); struct net_device_stats *stats = &netdev->stats; struct can_frame *cf = (struct can_frame *) skb->data; struct usb_8dev_tx_msg *msg; struct urb *urb; struct usb_8dev_tx_urb_context *context = NULL; u8 *buf; int i, err; size_t size = sizeof(struct usb_8dev_tx_msg); if (can_dev_dropped_skb(netdev, skb)) return NETDEV_TX_OK; /* create a URB, and a buffer for it, and copy the data to the URB */ urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) goto nomem; buf = usb_alloc_coherent(priv->udev, size, GFP_ATOMIC, &urb->transfer_dma); if (!buf) { netdev_err(netdev, "No memory left for USB buffer\n"); goto nomembuf; } memset(buf, 0, size); msg = (struct usb_8dev_tx_msg *)buf; msg->begin = USB_8DEV_DATA_START; msg->flags = 0x00; if (cf->can_id & CAN_RTR_FLAG) msg->flags |= USB_8DEV_RTR; if (cf->can_id & CAN_EFF_FLAG) msg->flags |= USB_8DEV_EXTID; msg->id = cpu_to_be32(cf->can_id & CAN_ERR_MASK); msg->dlc = can_get_cc_dlc(cf, priv->can.ctrlmode); memcpy(msg->data, cf->data, cf->len); msg->end = USB_8DEV_DATA_END; for (i = 0; i < MAX_TX_URBS; i++) { if (priv->tx_contexts[i].echo_index == MAX_TX_URBS) { context = &priv->tx_contexts[i]; break; } } /* May never happen! When this happens we'd more URBs in flight as * allowed (MAX_TX_URBS). */ if (!context) goto nofreecontext; context->priv = priv; context->echo_index = i; usb_fill_bulk_urb(urb, priv->udev, usb_sndbulkpipe(priv->udev, USB_8DEV_ENDP_DATA_TX), buf, size, usb_8dev_write_bulk_callback, context); urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; usb_anchor_urb(urb, &priv->tx_submitted); can_put_echo_skb(skb, netdev, context->echo_index, 0); atomic_inc(&priv->active_tx_urbs); err = usb_submit_urb(urb, GFP_ATOMIC); if (unlikely(err)) { can_free_echo_skb(netdev, context->echo_index, NULL); usb_unanchor_urb(urb); usb_free_coherent(priv->udev, size, buf, urb->transfer_dma); atomic_dec(&priv->active_tx_urbs); if (err == -ENODEV) netif_device_detach(netdev); else netdev_warn(netdev, "failed tx_urb %d\n", err); stats->tx_dropped++; } else if (atomic_read(&priv->active_tx_urbs) >= MAX_TX_URBS) /* Slow down tx path */ netif_stop_queue(netdev); /* Release our reference to this URB, the USB core will eventually free * it entirely. */ usb_free_urb(urb); return NETDEV_TX_OK; nofreecontext: usb_free_coherent(priv->udev, size, buf, urb->transfer_dma); usb_free_urb(urb); netdev_warn(netdev, "couldn't find free context"); return NETDEV_TX_BUSY; nomembuf: usb_free_urb(urb); nomem: dev_kfree_skb(skb); stats->tx_dropped++; return NETDEV_TX_OK; } static int usb_8dev_get_berr_counter(const struct net_device *netdev, struct can_berr_counter *bec) { struct usb_8dev_priv *priv = netdev_priv(netdev); bec->txerr = priv->bec.txerr; bec->rxerr = priv->bec.rxerr; return 0; } /* Start USB device */ static int usb_8dev_start(struct usb_8dev_priv *priv) { struct net_device *netdev = priv->netdev; int err, i; for (i = 0; i < MAX_RX_URBS; i++) { struct urb *urb = NULL; u8 *buf; dma_addr_t buf_dma; /* create a URB, and a buffer for it */ urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { err = -ENOMEM; break; } buf = usb_alloc_coherent(priv->udev, RX_BUFFER_SIZE, GFP_KERNEL, &buf_dma); if (!buf) { netdev_err(netdev, "No memory left for USB buffer\n"); usb_free_urb(urb); err = -ENOMEM; break; } urb->transfer_dma = buf_dma; usb_fill_bulk_urb(urb, priv->udev, usb_rcvbulkpipe(priv->udev, USB_8DEV_ENDP_DATA_RX), buf, RX_BUFFER_SIZE, usb_8dev_read_bulk_callback, priv); urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; usb_anchor_urb(urb, &priv->rx_submitted); err = usb_submit_urb(urb, GFP_KERNEL); if (err) { usb_unanchor_urb(urb); usb_free_coherent(priv->udev, RX_BUFFER_SIZE, buf, urb->transfer_dma); usb_free_urb(urb); break; } priv->rxbuf[i] = buf; priv->rxbuf_dma[i] = buf_dma; /* Drop reference, USB core will take care of freeing it */ usb_free_urb(urb); } /* Did we submit any URBs */ if (i == 0) { netdev_warn(netdev, "couldn't setup read URBs\n"); return err; } /* Warn if we've couldn't transmit all the URBs */ if (i < MAX_RX_URBS) netdev_warn(netdev, "rx performance may be slow\n"); err = usb_8dev_cmd_open(priv); if (err) goto failed; priv->can.state = CAN_STATE_ERROR_ACTIVE; return 0; failed: if (err == -ENODEV) netif_device_detach(priv->netdev); netdev_warn(netdev, "couldn't submit control: %d\n", err); return err; } /* Open USB device */ static int usb_8dev_open(struct net_device *netdev) { struct usb_8dev_priv *priv = netdev_priv(netdev); int err; /* common open */ err = open_candev(netdev); if (err) return err; /* finally start device */ err = usb_8dev_start(priv); if (err) { if (err == -ENODEV) netif_device_detach(priv->netdev); netdev_warn(netdev, "couldn't start device: %d\n", err); close_candev(netdev); return err; } netif_start_queue(netdev); return 0; } static void unlink_all_urbs(struct usb_8dev_priv *priv) { int i; usb_kill_anchored_urbs(&priv->rx_submitted); for (i = 0; i < MAX_RX_URBS; ++i) usb_free_coherent(priv->udev, RX_BUFFER_SIZE, priv->rxbuf[i], priv->rxbuf_dma[i]); usb_kill_anchored_urbs(&priv->tx_submitted); atomic_set(&priv->active_tx_urbs, 0); for (i = 0; i < MAX_TX_URBS; i++) priv->tx_contexts[i].echo_index = MAX_TX_URBS; } /* Close USB device */ static int usb_8dev_close(struct net_device *netdev) { struct usb_8dev_priv *priv = netdev_priv(netdev); int err = 0; /* Send CLOSE command to CAN controller */ err = usb_8dev_cmd_close(priv); if (err) netdev_warn(netdev, "couldn't stop device"); priv->can.state = CAN_STATE_STOPPED; netif_stop_queue(netdev); /* Stop polling */ unlink_all_urbs(priv); close_candev(netdev); return err; } static const struct net_device_ops usb_8dev_netdev_ops = { .ndo_open = usb_8dev_open, .ndo_stop = usb_8dev_close, .ndo_start_xmit = usb_8dev_start_xmit, .ndo_change_mtu = can_change_mtu, }; static const struct ethtool_ops usb_8dev_ethtool_ops = { .get_ts_info = ethtool_op_get_ts_info, }; static const struct can_bittiming_const usb_8dev_bittiming_const = { .name = KBUILD_MODNAME, .tseg1_min = 1, .tseg1_max = 16, .tseg2_min = 1, .tseg2_max = 8, .sjw_max = 4, .brp_min = 1, .brp_max = 1024, .brp_inc = 1, }; /* Probe USB device * * Check device and firmware. * Set supported modes and bittiming constants. * Allocate some memory. */ static int usb_8dev_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct net_device *netdev; struct usb_8dev_priv *priv; int i, err = -ENOMEM; u32 version; char buf[18]; struct usb_device *usbdev = interface_to_usbdev(intf); /* product id looks strange, better we also check iProduct string */ if (usb_string(usbdev, usbdev->descriptor.iProduct, buf, sizeof(buf)) > 0 && strcmp(buf, "USB2CAN converter")) { dev_info(&usbdev->dev, "ignoring: not an USB2CAN converter\n"); return -ENODEV; } netdev = alloc_candev(sizeof(struct usb_8dev_priv), MAX_TX_URBS); if (!netdev) { dev_err(&intf->dev, "Couldn't alloc candev\n"); return -ENOMEM; } priv = netdev_priv(netdev); priv->udev = usbdev; priv->netdev = netdev; priv->can.state = CAN_STATE_STOPPED; priv->can.clock.freq = USB_8DEV_ABP_CLOCK; priv->can.bittiming_const = &usb_8dev_bittiming_const; priv->can.do_set_mode = usb_8dev_set_mode; priv->can.do_get_berr_counter = usb_8dev_get_berr_counter; priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_ONE_SHOT | CAN_CTRLMODE_CC_LEN8_DLC; netdev->netdev_ops = &usb_8dev_netdev_ops; netdev->ethtool_ops = &usb_8dev_ethtool_ops; netdev->flags |= IFF_ECHO; /* we support local echo */ init_usb_anchor(&priv->rx_submitted); init_usb_anchor(&priv->tx_submitted); atomic_set(&priv->active_tx_urbs, 0); for (i = 0; i < MAX_TX_URBS; i++) priv->tx_contexts[i].echo_index = MAX_TX_URBS; priv->cmd_msg_buffer = devm_kzalloc(&intf->dev, sizeof(struct usb_8dev_cmd_msg), GFP_KERNEL); if (!priv->cmd_msg_buffer) goto cleanup_candev; usb_set_intfdata(intf, priv); SET_NETDEV_DEV(netdev, &intf->dev); mutex_init(&priv->usb_8dev_cmd_lock); err = register_candev(netdev); if (err) { netdev_err(netdev, "couldn't register CAN device: %d\n", err); goto cleanup_candev; } err = usb_8dev_cmd_version(priv, &version); if (err) { netdev_err(netdev, "can't get firmware version\n"); goto cleanup_unregister_candev; } else { netdev_info(netdev, "firmware: %d.%d, hardware: %d.%d\n", (version>>24) & 0xff, (version>>16) & 0xff, (version>>8) & 0xff, version & 0xff); } return 0; cleanup_unregister_candev: unregister_netdev(priv->netdev); cleanup_candev: free_candev(netdev); return err; } /* Called by the usb core when driver is unloaded or device is removed */ static void usb_8dev_disconnect(struct usb_interface *intf) { struct usb_8dev_priv *priv = usb_get_intfdata(intf); usb_set_intfdata(intf, NULL); if (priv) { netdev_info(priv->netdev, "device disconnected\n"); unregister_netdev(priv->netdev); unlink_all_urbs(priv); free_candev(priv->netdev); } } static struct usb_driver usb_8dev_driver = { .name = KBUILD_MODNAME, .probe = usb_8dev_probe, .disconnect = usb_8dev_disconnect, .id_table = usb_8dev_table, }; module_usb_driver(usb_8dev_driver); MODULE_AUTHOR("Bernd Krumboeck <[email protected]>"); MODULE_DESCRIPTION("CAN driver for 8 devices USB2CAN interfaces"); MODULE_LICENSE("GPL v2");
linux-master
drivers/net/can/usb/usb_8dev.c
// SPDX-License-Identifier: GPL-2.0-only /* SocketCAN driver for Microchip CAN BUS Analyzer Tool * * Copyright (C) 2017 Mobica Limited * * This driver is inspired by the 4.6.2 version of net/can/usb/usb_8dev.c */ #include <asm/unaligned.h> #include <linux/can.h> #include <linux/can/dev.h> #include <linux/can/error.h> #include <linux/ethtool.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/signal.h> #include <linux/slab.h> #include <linux/usb.h> /* vendor and product id */ #define MCBA_MODULE_NAME "mcba_usb" #define MCBA_VENDOR_ID 0x04d8 #define MCBA_PRODUCT_ID 0x0a30 /* driver constants */ #define MCBA_MAX_RX_URBS 20 #define MCBA_MAX_TX_URBS 20 #define MCBA_CTX_FREE MCBA_MAX_TX_URBS /* RX buffer must be bigger than msg size since at the * beginning USB messages are stacked. */ #define MCBA_USB_RX_BUFF_SIZE 64 #define MCBA_USB_TX_BUFF_SIZE (sizeof(struct mcba_usb_msg)) /* Microchip command id */ #define MBCA_CMD_RECEIVE_MESSAGE 0xE3 #define MBCA_CMD_I_AM_ALIVE_FROM_CAN 0xF5 #define MBCA_CMD_I_AM_ALIVE_FROM_USB 0xF7 #define MBCA_CMD_CHANGE_BIT_RATE 0xA1 #define MBCA_CMD_TRANSMIT_MESSAGE_EV 0xA3 #define MBCA_CMD_SETUP_TERMINATION_RESISTANCE 0xA8 #define MBCA_CMD_READ_FW_VERSION 0xA9 #define MBCA_CMD_NOTHING_TO_SEND 0xFF #define MBCA_CMD_TRANSMIT_MESSAGE_RSP 0xE2 #define MCBA_VER_REQ_USB 1 #define MCBA_VER_REQ_CAN 2 /* Drive the CAN_RES signal LOW "0" to activate R24 and R25 */ #define MCBA_VER_TERMINATION_ON 0 #define MCBA_VER_TERMINATION_OFF 1 #define MCBA_SIDL_EXID_MASK 0x8 #define MCBA_DLC_MASK 0xf #define MCBA_DLC_RTR_MASK 0x40 #define MCBA_CAN_STATE_WRN_TH 95 #define MCBA_CAN_STATE_ERR_PSV_TH 127 #define MCBA_TERMINATION_DISABLED CAN_TERMINATION_DISABLED #define MCBA_TERMINATION_ENABLED 120 struct mcba_usb_ctx { struct mcba_priv *priv; u32 ndx; bool can; }; /* Structure to hold all of our device specific stuff */ struct mcba_priv { struct can_priv can; /* must be the first member */ struct sk_buff *echo_skb[MCBA_MAX_TX_URBS]; struct mcba_usb_ctx tx_context[MCBA_MAX_TX_URBS]; struct usb_device *udev; struct net_device *netdev; struct usb_anchor tx_submitted; struct usb_anchor rx_submitted; struct can_berr_counter bec; bool usb_ka_first_pass; bool can_ka_first_pass; bool can_speed_check; atomic_t free_ctx_cnt; void *rxbuf[MCBA_MAX_RX_URBS]; dma_addr_t rxbuf_dma[MCBA_MAX_RX_URBS]; int rx_pipe; int tx_pipe; }; /* CAN frame */ struct __packed mcba_usb_msg_can { u8 cmd_id; __be16 eid; __be16 sid; u8 dlc; u8 data[8]; u8 timestamp[4]; u8 checksum; }; /* command frame */ struct __packed mcba_usb_msg { u8 cmd_id; u8 unused[18]; }; struct __packed mcba_usb_msg_ka_usb { u8 cmd_id; u8 termination_state; u8 soft_ver_major; u8 soft_ver_minor; u8 unused[15]; }; struct __packed mcba_usb_msg_ka_can { u8 cmd_id; u8 tx_err_cnt; u8 rx_err_cnt; u8 rx_buff_ovfl; u8 tx_bus_off; __be16 can_bitrate; __le16 rx_lost; u8 can_stat; u8 soft_ver_major; u8 soft_ver_minor; u8 debug_mode; u8 test_complete; u8 test_result; u8 unused[4]; }; struct __packed mcba_usb_msg_change_bitrate { u8 cmd_id; __be16 bitrate; u8 unused[16]; }; struct __packed mcba_usb_msg_termination { u8 cmd_id; u8 termination; u8 unused[17]; }; struct __packed mcba_usb_msg_fw_ver { u8 cmd_id; u8 pic; u8 unused[17]; }; static const struct usb_device_id mcba_usb_table[] = { { USB_DEVICE(MCBA_VENDOR_ID, MCBA_PRODUCT_ID) }, {} /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, mcba_usb_table); static const u16 mcba_termination[] = { MCBA_TERMINATION_DISABLED, MCBA_TERMINATION_ENABLED }; static const u32 mcba_bitrate[] = { 20000, 33333, 50000, 80000, 83333, 100000, 125000, 150000, 175000, 200000, 225000, 250000, 275000, 300000, 500000, 625000, 800000, 1000000 }; static inline void mcba_init_ctx(struct mcba_priv *priv) { int i = 0; for (i = 0; i < MCBA_MAX_TX_URBS; i++) { priv->tx_context[i].ndx = MCBA_CTX_FREE; priv->tx_context[i].priv = priv; } atomic_set(&priv->free_ctx_cnt, ARRAY_SIZE(priv->tx_context)); } static inline struct mcba_usb_ctx *mcba_usb_get_free_ctx(struct mcba_priv *priv, struct can_frame *cf) { int i = 0; struct mcba_usb_ctx *ctx = NULL; for (i = 0; i < MCBA_MAX_TX_URBS; i++) { if (priv->tx_context[i].ndx == MCBA_CTX_FREE) { ctx = &priv->tx_context[i]; ctx->ndx = i; if (cf) ctx->can = true; else ctx->can = false; atomic_dec(&priv->free_ctx_cnt); break; } } if (!atomic_read(&priv->free_ctx_cnt)) /* That was the last free ctx. Slow down tx path */ netif_stop_queue(priv->netdev); return ctx; } /* mcba_usb_free_ctx and mcba_usb_get_free_ctx are executed by different * threads. The order of execution in below function is important. */ static inline void mcba_usb_free_ctx(struct mcba_usb_ctx *ctx) { /* Increase number of free ctxs before freeing ctx */ atomic_inc(&ctx->priv->free_ctx_cnt); ctx->ndx = MCBA_CTX_FREE; /* Wake up the queue once ctx is marked free */ netif_wake_queue(ctx->priv->netdev); } static void mcba_usb_write_bulk_callback(struct urb *urb) { struct mcba_usb_ctx *ctx = urb->context; struct net_device *netdev; WARN_ON(!ctx); netdev = ctx->priv->netdev; /* free up our allocated buffer */ usb_free_coherent(urb->dev, urb->transfer_buffer_length, urb->transfer_buffer, urb->transfer_dma); if (ctx->can) { if (!netif_device_present(netdev)) return; netdev->stats.tx_packets++; netdev->stats.tx_bytes += can_get_echo_skb(netdev, ctx->ndx, NULL); } if (urb->status) netdev_info(netdev, "Tx URB aborted (%d)\n", urb->status); /* Release the context */ mcba_usb_free_ctx(ctx); } /* Send data to device */ static netdev_tx_t mcba_usb_xmit(struct mcba_priv *priv, struct mcba_usb_msg *usb_msg, struct mcba_usb_ctx *ctx) { struct urb *urb; u8 *buf; int err; /* create a URB, and a buffer for it, and copy the data to the URB */ urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) return -ENOMEM; buf = usb_alloc_coherent(priv->udev, MCBA_USB_TX_BUFF_SIZE, GFP_ATOMIC, &urb->transfer_dma); if (!buf) { err = -ENOMEM; goto nomembuf; } memcpy(buf, usb_msg, MCBA_USB_TX_BUFF_SIZE); usb_fill_bulk_urb(urb, priv->udev, priv->tx_pipe, buf, MCBA_USB_TX_BUFF_SIZE, mcba_usb_write_bulk_callback, ctx); urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; usb_anchor_urb(urb, &priv->tx_submitted); err = usb_submit_urb(urb, GFP_ATOMIC); if (unlikely(err)) goto failed; /* Release our reference to this URB, the USB core will eventually free * it entirely. */ usb_free_urb(urb); return 0; failed: usb_unanchor_urb(urb); usb_free_coherent(priv->udev, MCBA_USB_TX_BUFF_SIZE, buf, urb->transfer_dma); if (err == -ENODEV) netif_device_detach(priv->netdev); else netdev_warn(priv->netdev, "failed tx_urb %d\n", err); nomembuf: usb_free_urb(urb); return err; } /* Send data to device */ static netdev_tx_t mcba_usb_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct mcba_priv *priv = netdev_priv(netdev); struct can_frame *cf = (struct can_frame *)skb->data; struct mcba_usb_ctx *ctx = NULL; struct net_device_stats *stats = &priv->netdev->stats; u16 sid; int err; struct mcba_usb_msg_can usb_msg = { .cmd_id = MBCA_CMD_TRANSMIT_MESSAGE_EV }; if (can_dev_dropped_skb(netdev, skb)) return NETDEV_TX_OK; ctx = mcba_usb_get_free_ctx(priv, cf); if (!ctx) return NETDEV_TX_BUSY; if (cf->can_id & CAN_EFF_FLAG) { /* SIDH | SIDL | EIDH | EIDL * 28 - 21 | 20 19 18 x x x 17 16 | 15 - 8 | 7 - 0 */ sid = MCBA_SIDL_EXID_MASK; /* store 28-18 bits */ sid |= (cf->can_id & 0x1ffc0000) >> 13; /* store 17-16 bits */ sid |= (cf->can_id & 0x30000) >> 16; put_unaligned_be16(sid, &usb_msg.sid); /* store 15-0 bits */ put_unaligned_be16(cf->can_id & 0xffff, &usb_msg.eid); } else { /* SIDH | SIDL * 10 - 3 | 2 1 0 x x x x x */ put_unaligned_be16((cf->can_id & CAN_SFF_MASK) << 5, &usb_msg.sid); usb_msg.eid = 0; } usb_msg.dlc = cf->len; memcpy(usb_msg.data, cf->data, usb_msg.dlc); if (cf->can_id & CAN_RTR_FLAG) usb_msg.dlc |= MCBA_DLC_RTR_MASK; can_put_echo_skb(skb, priv->netdev, ctx->ndx, 0); err = mcba_usb_xmit(priv, (struct mcba_usb_msg *)&usb_msg, ctx); if (err) goto xmit_failed; return NETDEV_TX_OK; xmit_failed: can_free_echo_skb(priv->netdev, ctx->ndx, NULL); mcba_usb_free_ctx(ctx); stats->tx_dropped++; return NETDEV_TX_OK; } /* Send cmd to device */ static void mcba_usb_xmit_cmd(struct mcba_priv *priv, struct mcba_usb_msg *usb_msg) { struct mcba_usb_ctx *ctx = NULL; int err; ctx = mcba_usb_get_free_ctx(priv, NULL); if (!ctx) { netdev_err(priv->netdev, "Lack of free ctx. Sending (%d) cmd aborted", usb_msg->cmd_id); return; } err = mcba_usb_xmit(priv, usb_msg, ctx); if (err) netdev_err(priv->netdev, "Failed to send cmd (%d)", usb_msg->cmd_id); } static void mcba_usb_xmit_change_bitrate(struct mcba_priv *priv, u16 bitrate) { struct mcba_usb_msg_change_bitrate usb_msg = { .cmd_id = MBCA_CMD_CHANGE_BIT_RATE }; put_unaligned_be16(bitrate, &usb_msg.bitrate); mcba_usb_xmit_cmd(priv, (struct mcba_usb_msg *)&usb_msg); } static void mcba_usb_xmit_read_fw_ver(struct mcba_priv *priv, u8 pic) { struct mcba_usb_msg_fw_ver usb_msg = { .cmd_id = MBCA_CMD_READ_FW_VERSION, .pic = pic }; mcba_usb_xmit_cmd(priv, (struct mcba_usb_msg *)&usb_msg); } static void mcba_usb_process_can(struct mcba_priv *priv, struct mcba_usb_msg_can *msg) { struct can_frame *cf; struct sk_buff *skb; struct net_device_stats *stats = &priv->netdev->stats; u16 sid; skb = alloc_can_skb(priv->netdev, &cf); if (!skb) return; sid = get_unaligned_be16(&msg->sid); if (sid & MCBA_SIDL_EXID_MASK) { /* SIDH | SIDL | EIDH | EIDL * 28 - 21 | 20 19 18 x x x 17 16 | 15 - 8 | 7 - 0 */ cf->can_id = CAN_EFF_FLAG; /* store 28-18 bits */ cf->can_id |= (sid & 0xffe0) << 13; /* store 17-16 bits */ cf->can_id |= (sid & 3) << 16; /* store 15-0 bits */ cf->can_id |= get_unaligned_be16(&msg->eid); } else { /* SIDH | SIDL * 10 - 3 | 2 1 0 x x x x x */ cf->can_id = (sid & 0xffe0) >> 5; } cf->len = can_cc_dlc2len(msg->dlc & MCBA_DLC_MASK); if (msg->dlc & MCBA_DLC_RTR_MASK) { cf->can_id |= CAN_RTR_FLAG; } else { memcpy(cf->data, msg->data, cf->len); stats->rx_bytes += cf->len; } stats->rx_packets++; netif_rx(skb); } static void mcba_usb_process_ka_usb(struct mcba_priv *priv, struct mcba_usb_msg_ka_usb *msg) { if (unlikely(priv->usb_ka_first_pass)) { netdev_info(priv->netdev, "PIC USB version %u.%u\n", msg->soft_ver_major, msg->soft_ver_minor); priv->usb_ka_first_pass = false; } if (msg->termination_state == MCBA_VER_TERMINATION_ON) priv->can.termination = MCBA_TERMINATION_ENABLED; else priv->can.termination = MCBA_TERMINATION_DISABLED; } static u32 convert_can2host_bitrate(struct mcba_usb_msg_ka_can *msg) { const u32 bitrate = get_unaligned_be16(&msg->can_bitrate); if ((bitrate == 33) || (bitrate == 83)) return bitrate * 1000 + 333; else return bitrate * 1000; } static void mcba_usb_process_ka_can(struct mcba_priv *priv, struct mcba_usb_msg_ka_can *msg) { if (unlikely(priv->can_ka_first_pass)) { netdev_info(priv->netdev, "PIC CAN version %u.%u\n", msg->soft_ver_major, msg->soft_ver_minor); priv->can_ka_first_pass = false; } if (unlikely(priv->can_speed_check)) { const u32 bitrate = convert_can2host_bitrate(msg); priv->can_speed_check = false; if (bitrate != priv->can.bittiming.bitrate) netdev_err( priv->netdev, "Wrong bitrate reported by the device (%u). Expected %u", bitrate, priv->can.bittiming.bitrate); } priv->bec.txerr = msg->tx_err_cnt; priv->bec.rxerr = msg->rx_err_cnt; if (msg->tx_bus_off) priv->can.state = CAN_STATE_BUS_OFF; else if ((priv->bec.txerr > MCBA_CAN_STATE_ERR_PSV_TH) || (priv->bec.rxerr > MCBA_CAN_STATE_ERR_PSV_TH)) priv->can.state = CAN_STATE_ERROR_PASSIVE; else if ((priv->bec.txerr > MCBA_CAN_STATE_WRN_TH) || (priv->bec.rxerr > MCBA_CAN_STATE_WRN_TH)) priv->can.state = CAN_STATE_ERROR_WARNING; } static void mcba_usb_process_rx(struct mcba_priv *priv, struct mcba_usb_msg *msg) { switch (msg->cmd_id) { case MBCA_CMD_I_AM_ALIVE_FROM_CAN: mcba_usb_process_ka_can(priv, (struct mcba_usb_msg_ka_can *)msg); break; case MBCA_CMD_I_AM_ALIVE_FROM_USB: mcba_usb_process_ka_usb(priv, (struct mcba_usb_msg_ka_usb *)msg); break; case MBCA_CMD_RECEIVE_MESSAGE: mcba_usb_process_can(priv, (struct mcba_usb_msg_can *)msg); break; case MBCA_CMD_NOTHING_TO_SEND: /* Side effect of communication between PIC_USB and PIC_CAN. * PIC_CAN is telling us that it has nothing to send */ break; case MBCA_CMD_TRANSMIT_MESSAGE_RSP: /* Transmission response from the device containing timestamp */ break; default: netdev_warn(priv->netdev, "Unsupported msg (0x%X)", msg->cmd_id); break; } } /* Callback for reading data from device * * Check urb status, call read function and resubmit urb read operation. */ static void mcba_usb_read_bulk_callback(struct urb *urb) { struct mcba_priv *priv = urb->context; struct net_device *netdev; int retval; int pos = 0; netdev = priv->netdev; if (!netif_device_present(netdev)) return; switch (urb->status) { case 0: /* success */ break; case -ENOENT: case -EPIPE: case -EPROTO: case -ESHUTDOWN: return; default: netdev_info(netdev, "Rx URB aborted (%d)\n", urb->status); goto resubmit_urb; } while (pos < urb->actual_length) { struct mcba_usb_msg *msg; if (pos + sizeof(struct mcba_usb_msg) > urb->actual_length) { netdev_err(priv->netdev, "format error\n"); break; } msg = (struct mcba_usb_msg *)(urb->transfer_buffer + pos); mcba_usb_process_rx(priv, msg); pos += sizeof(struct mcba_usb_msg); } resubmit_urb: usb_fill_bulk_urb(urb, priv->udev, priv->rx_pipe, urb->transfer_buffer, MCBA_USB_RX_BUFF_SIZE, mcba_usb_read_bulk_callback, priv); retval = usb_submit_urb(urb, GFP_ATOMIC); if (retval == -ENODEV) netif_device_detach(netdev); else if (retval) netdev_err(netdev, "failed resubmitting read bulk urb: %d\n", retval); } /* Start USB device */ static int mcba_usb_start(struct mcba_priv *priv) { struct net_device *netdev = priv->netdev; int err, i; mcba_init_ctx(priv); for (i = 0; i < MCBA_MAX_RX_URBS; i++) { struct urb *urb = NULL; u8 *buf; dma_addr_t buf_dma; /* create a URB, and a buffer for it */ urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { err = -ENOMEM; break; } buf = usb_alloc_coherent(priv->udev, MCBA_USB_RX_BUFF_SIZE, GFP_KERNEL, &buf_dma); if (!buf) { netdev_err(netdev, "No memory left for USB buffer\n"); usb_free_urb(urb); err = -ENOMEM; break; } urb->transfer_dma = buf_dma; usb_fill_bulk_urb(urb, priv->udev, priv->rx_pipe, buf, MCBA_USB_RX_BUFF_SIZE, mcba_usb_read_bulk_callback, priv); urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; usb_anchor_urb(urb, &priv->rx_submitted); err = usb_submit_urb(urb, GFP_KERNEL); if (err) { usb_unanchor_urb(urb); usb_free_coherent(priv->udev, MCBA_USB_RX_BUFF_SIZE, buf, buf_dma); usb_free_urb(urb); break; } priv->rxbuf[i] = buf; priv->rxbuf_dma[i] = buf_dma; /* Drop reference, USB core will take care of freeing it */ usb_free_urb(urb); } /* Did we submit any URBs */ if (i == 0) { netdev_warn(netdev, "couldn't setup read URBs\n"); return err; } /* Warn if we've couldn't transmit all the URBs */ if (i < MCBA_MAX_RX_URBS) netdev_warn(netdev, "rx performance may be slow\n"); mcba_usb_xmit_read_fw_ver(priv, MCBA_VER_REQ_USB); mcba_usb_xmit_read_fw_ver(priv, MCBA_VER_REQ_CAN); return err; } /* Open USB device */ static int mcba_usb_open(struct net_device *netdev) { struct mcba_priv *priv = netdev_priv(netdev); int err; /* common open */ err = open_candev(netdev); if (err) return err; priv->can_speed_check = true; priv->can.state = CAN_STATE_ERROR_ACTIVE; netif_start_queue(netdev); return 0; } static void mcba_urb_unlink(struct mcba_priv *priv) { int i; usb_kill_anchored_urbs(&priv->rx_submitted); for (i = 0; i < MCBA_MAX_RX_URBS; ++i) usb_free_coherent(priv->udev, MCBA_USB_RX_BUFF_SIZE, priv->rxbuf[i], priv->rxbuf_dma[i]); usb_kill_anchored_urbs(&priv->tx_submitted); } /* Close USB device */ static int mcba_usb_close(struct net_device *netdev) { struct mcba_priv *priv = netdev_priv(netdev); priv->can.state = CAN_STATE_STOPPED; netif_stop_queue(netdev); /* Stop polling */ mcba_urb_unlink(priv); close_candev(netdev); return 0; } /* Set network device mode * * Maybe we should leave this function empty, because the device * set mode variable with open command. */ static int mcba_net_set_mode(struct net_device *netdev, enum can_mode mode) { return 0; } static int mcba_net_get_berr_counter(const struct net_device *netdev, struct can_berr_counter *bec) { struct mcba_priv *priv = netdev_priv(netdev); bec->txerr = priv->bec.txerr; bec->rxerr = priv->bec.rxerr; return 0; } static const struct net_device_ops mcba_netdev_ops = { .ndo_open = mcba_usb_open, .ndo_stop = mcba_usb_close, .ndo_start_xmit = mcba_usb_start_xmit, }; static const struct ethtool_ops mcba_ethtool_ops = { .get_ts_info = ethtool_op_get_ts_info, }; /* Microchip CANBUS has hardcoded bittiming values by default. * This function sends request via USB to change the speed and align bittiming * values for presentation purposes only */ static int mcba_net_set_bittiming(struct net_device *netdev) { struct mcba_priv *priv = netdev_priv(netdev); const u16 bitrate_kbps = priv->can.bittiming.bitrate / 1000; mcba_usb_xmit_change_bitrate(priv, bitrate_kbps); return 0; } static int mcba_set_termination(struct net_device *netdev, u16 term) { struct mcba_priv *priv = netdev_priv(netdev); struct mcba_usb_msg_termination usb_msg = { .cmd_id = MBCA_CMD_SETUP_TERMINATION_RESISTANCE }; if (term == MCBA_TERMINATION_ENABLED) usb_msg.termination = MCBA_VER_TERMINATION_ON; else usb_msg.termination = MCBA_VER_TERMINATION_OFF; mcba_usb_xmit_cmd(priv, (struct mcba_usb_msg *)&usb_msg); return 0; } static int mcba_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct net_device *netdev; struct mcba_priv *priv; int err; struct usb_device *usbdev = interface_to_usbdev(intf); struct usb_endpoint_descriptor *in, *out; err = usb_find_common_endpoints(intf->cur_altsetting, &in, &out, NULL, NULL); if (err) { dev_err(&intf->dev, "Can't find endpoints\n"); return err; } netdev = alloc_candev(sizeof(struct mcba_priv), MCBA_MAX_TX_URBS); if (!netdev) { dev_err(&intf->dev, "Couldn't alloc candev\n"); return -ENOMEM; } priv = netdev_priv(netdev); priv->udev = usbdev; priv->netdev = netdev; priv->usb_ka_first_pass = true; priv->can_ka_first_pass = true; priv->can_speed_check = false; init_usb_anchor(&priv->rx_submitted); init_usb_anchor(&priv->tx_submitted); usb_set_intfdata(intf, priv); /* Init CAN device */ priv->can.state = CAN_STATE_STOPPED; priv->can.termination_const = mcba_termination; priv->can.termination_const_cnt = ARRAY_SIZE(mcba_termination); priv->can.bitrate_const = mcba_bitrate; priv->can.bitrate_const_cnt = ARRAY_SIZE(mcba_bitrate); priv->can.do_set_termination = mcba_set_termination; priv->can.do_set_mode = mcba_net_set_mode; priv->can.do_get_berr_counter = mcba_net_get_berr_counter; priv->can.do_set_bittiming = mcba_net_set_bittiming; netdev->netdev_ops = &mcba_netdev_ops; netdev->ethtool_ops = &mcba_ethtool_ops; netdev->flags |= IFF_ECHO; /* we support local echo */ SET_NETDEV_DEV(netdev, &intf->dev); err = register_candev(netdev); if (err) { netdev_err(netdev, "couldn't register CAN device: %d\n", err); goto cleanup_free_candev; } priv->rx_pipe = usb_rcvbulkpipe(priv->udev, in->bEndpointAddress); priv->tx_pipe = usb_sndbulkpipe(priv->udev, out->bEndpointAddress); /* Start USB dev only if we have successfully registered CAN device */ err = mcba_usb_start(priv); if (err) { if (err == -ENODEV) netif_device_detach(priv->netdev); netdev_warn(netdev, "couldn't start device: %d\n", err); goto cleanup_unregister_candev; } dev_info(&intf->dev, "Microchip CAN BUS Analyzer connected\n"); return 0; cleanup_unregister_candev: unregister_candev(priv->netdev); cleanup_free_candev: free_candev(netdev); return err; } /* Called by the usb core when driver is unloaded or device is removed */ static void mcba_usb_disconnect(struct usb_interface *intf) { struct mcba_priv *priv = usb_get_intfdata(intf); usb_set_intfdata(intf, NULL); netdev_info(priv->netdev, "device disconnected\n"); unregister_candev(priv->netdev); mcba_urb_unlink(priv); free_candev(priv->netdev); } static struct usb_driver mcba_usb_driver = { .name = MCBA_MODULE_NAME, .probe = mcba_usb_probe, .disconnect = mcba_usb_disconnect, .id_table = mcba_usb_table, }; module_usb_driver(mcba_usb_driver); MODULE_AUTHOR("Remigiusz Kołłątaj <[email protected]>"); MODULE_DESCRIPTION("SocketCAN driver for Microchip CAN BUS Analyzer Tool"); MODULE_LICENSE("GPL v2");
linux-master
drivers/net/can/usb/mcba_usb.c
// SPDX-License-Identifier: GPL-2.0-only /* CAN driver for Geschwister Schneider USB/CAN devices * and bytewerk.org candleLight USB CAN interfaces. * * Copyright (C) 2013-2016 Geschwister Schneider Technologie-, * Entwicklungs- und Vertriebs UG (Haftungsbeschränkt). * Copyright (C) 2016 Hubert Denkmair * Copyright (c) 2023 Pengutronix, Marc Kleine-Budde <[email protected]> * * Many thanks to all socketcan devs! */ #include <linux/bitfield.h> #include <linux/clocksource.h> #include <linux/ethtool.h> #include <linux/init.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/signal.h> #include <linux/timecounter.h> #include <linux/units.h> #include <linux/usb.h> #include <linux/workqueue.h> #include <linux/can.h> #include <linux/can/dev.h> #include <linux/can/error.h> #include <linux/can/rx-offload.h> /* Device specific constants */ #define USB_GS_USB_1_VENDOR_ID 0x1d50 #define USB_GS_USB_1_PRODUCT_ID 0x606f #define USB_CANDLELIGHT_VENDOR_ID 0x1209 #define USB_CANDLELIGHT_PRODUCT_ID 0x2323 #define USB_CES_CANEXT_FD_VENDOR_ID 0x1cd2 #define USB_CES_CANEXT_FD_PRODUCT_ID 0x606f #define USB_ABE_CANDEBUGGER_FD_VENDOR_ID 0x16d0 #define USB_ABE_CANDEBUGGER_FD_PRODUCT_ID 0x10b8 #define GS_USB_ENDPOINT_IN 1 #define GS_USB_ENDPOINT_OUT 2 /* Timestamp 32 bit timer runs at 1 MHz (1 µs tick). Worker accounts * for timer overflow (will be after ~71 minutes) */ #define GS_USB_TIMESTAMP_TIMER_HZ (1 * HZ_PER_MHZ) #define GS_USB_TIMESTAMP_WORK_DELAY_SEC 1800 static_assert(GS_USB_TIMESTAMP_WORK_DELAY_SEC < CYCLECOUNTER_MASK(32) / GS_USB_TIMESTAMP_TIMER_HZ / 2); /* Device specific constants */ enum gs_usb_breq { GS_USB_BREQ_HOST_FORMAT = 0, GS_USB_BREQ_BITTIMING, GS_USB_BREQ_MODE, GS_USB_BREQ_BERR, GS_USB_BREQ_BT_CONST, GS_USB_BREQ_DEVICE_CONFIG, GS_USB_BREQ_TIMESTAMP, GS_USB_BREQ_IDENTIFY, GS_USB_BREQ_GET_USER_ID, GS_USB_BREQ_QUIRK_CANTACT_PRO_DATA_BITTIMING = GS_USB_BREQ_GET_USER_ID, GS_USB_BREQ_SET_USER_ID, GS_USB_BREQ_DATA_BITTIMING, GS_USB_BREQ_BT_CONST_EXT, GS_USB_BREQ_SET_TERMINATION, GS_USB_BREQ_GET_TERMINATION, GS_USB_BREQ_GET_STATE, }; enum gs_can_mode { /* reset a channel. turns it off */ GS_CAN_MODE_RESET = 0, /* starts a channel */ GS_CAN_MODE_START }; enum gs_can_state { GS_CAN_STATE_ERROR_ACTIVE = 0, GS_CAN_STATE_ERROR_WARNING, GS_CAN_STATE_ERROR_PASSIVE, GS_CAN_STATE_BUS_OFF, GS_CAN_STATE_STOPPED, GS_CAN_STATE_SLEEPING }; enum gs_can_identify_mode { GS_CAN_IDENTIFY_OFF = 0, GS_CAN_IDENTIFY_ON }; enum gs_can_termination_state { GS_CAN_TERMINATION_STATE_OFF = 0, GS_CAN_TERMINATION_STATE_ON }; #define GS_USB_TERMINATION_DISABLED CAN_TERMINATION_DISABLED #define GS_USB_TERMINATION_ENABLED 120 /* data types passed between host and device */ /* The firmware on the original USB2CAN by Geschwister Schneider * Technologie Entwicklungs- und Vertriebs UG exchanges all data * between the host and the device in host byte order. This is done * with the struct gs_host_config::byte_order member, which is sent * first to indicate the desired byte order. * * The widely used open source firmware candleLight doesn't support * this feature and exchanges the data in little endian byte order. */ struct gs_host_config { __le32 byte_order; } __packed; struct gs_device_config { u8 reserved1; u8 reserved2; u8 reserved3; u8 icount; __le32 sw_version; __le32 hw_version; } __packed; #define GS_CAN_MODE_NORMAL 0 #define GS_CAN_MODE_LISTEN_ONLY BIT(0) #define GS_CAN_MODE_LOOP_BACK BIT(1) #define GS_CAN_MODE_TRIPLE_SAMPLE BIT(2) #define GS_CAN_MODE_ONE_SHOT BIT(3) #define GS_CAN_MODE_HW_TIMESTAMP BIT(4) /* GS_CAN_FEATURE_IDENTIFY BIT(5) */ /* GS_CAN_FEATURE_USER_ID BIT(6) */ #define GS_CAN_MODE_PAD_PKTS_TO_MAX_PKT_SIZE BIT(7) #define GS_CAN_MODE_FD BIT(8) /* GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX BIT(9) */ /* GS_CAN_FEATURE_BT_CONST_EXT BIT(10) */ /* GS_CAN_FEATURE_TERMINATION BIT(11) */ #define GS_CAN_MODE_BERR_REPORTING BIT(12) /* GS_CAN_FEATURE_GET_STATE BIT(13) */ struct gs_device_mode { __le32 mode; __le32 flags; } __packed; struct gs_device_state { __le32 state; __le32 rxerr; __le32 txerr; } __packed; struct gs_device_bittiming { __le32 prop_seg; __le32 phase_seg1; __le32 phase_seg2; __le32 sjw; __le32 brp; } __packed; struct gs_identify_mode { __le32 mode; } __packed; struct gs_device_termination_state { __le32 state; } __packed; #define GS_CAN_FEATURE_LISTEN_ONLY BIT(0) #define GS_CAN_FEATURE_LOOP_BACK BIT(1) #define GS_CAN_FEATURE_TRIPLE_SAMPLE BIT(2) #define GS_CAN_FEATURE_ONE_SHOT BIT(3) #define GS_CAN_FEATURE_HW_TIMESTAMP BIT(4) #define GS_CAN_FEATURE_IDENTIFY BIT(5) #define GS_CAN_FEATURE_USER_ID BIT(6) #define GS_CAN_FEATURE_PAD_PKTS_TO_MAX_PKT_SIZE BIT(7) #define GS_CAN_FEATURE_FD BIT(8) #define GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX BIT(9) #define GS_CAN_FEATURE_BT_CONST_EXT BIT(10) #define GS_CAN_FEATURE_TERMINATION BIT(11) #define GS_CAN_FEATURE_BERR_REPORTING BIT(12) #define GS_CAN_FEATURE_GET_STATE BIT(13) #define GS_CAN_FEATURE_MASK GENMASK(13, 0) /* internal quirks - keep in GS_CAN_FEATURE space for now */ /* CANtact Pro original firmware: * BREQ DATA_BITTIMING overlaps with GET_USER_ID */ #define GS_CAN_FEATURE_QUIRK_BREQ_CANTACT_PRO BIT(31) struct gs_device_bt_const { __le32 feature; __le32 fclk_can; __le32 tseg1_min; __le32 tseg1_max; __le32 tseg2_min; __le32 tseg2_max; __le32 sjw_max; __le32 brp_min; __le32 brp_max; __le32 brp_inc; } __packed; struct gs_device_bt_const_extended { __le32 feature; __le32 fclk_can; __le32 tseg1_min; __le32 tseg1_max; __le32 tseg2_min; __le32 tseg2_max; __le32 sjw_max; __le32 brp_min; __le32 brp_max; __le32 brp_inc; __le32 dtseg1_min; __le32 dtseg1_max; __le32 dtseg2_min; __le32 dtseg2_max; __le32 dsjw_max; __le32 dbrp_min; __le32 dbrp_max; __le32 dbrp_inc; } __packed; #define GS_CAN_FLAG_OVERFLOW BIT(0) #define GS_CAN_FLAG_FD BIT(1) #define GS_CAN_FLAG_BRS BIT(2) #define GS_CAN_FLAG_ESI BIT(3) struct classic_can { u8 data[8]; } __packed; struct classic_can_ts { u8 data[8]; __le32 timestamp_us; } __packed; struct classic_can_quirk { u8 data[8]; u8 quirk; } __packed; struct canfd { u8 data[64]; } __packed; struct canfd_ts { u8 data[64]; __le32 timestamp_us; } __packed; struct canfd_quirk { u8 data[64]; u8 quirk; } __packed; struct gs_host_frame { u32 echo_id; __le32 can_id; u8 can_dlc; u8 channel; u8 flags; u8 reserved; union { DECLARE_FLEX_ARRAY(struct classic_can, classic_can); DECLARE_FLEX_ARRAY(struct classic_can_ts, classic_can_ts); DECLARE_FLEX_ARRAY(struct classic_can_quirk, classic_can_quirk); DECLARE_FLEX_ARRAY(struct canfd, canfd); DECLARE_FLEX_ARRAY(struct canfd_ts, canfd_ts); DECLARE_FLEX_ARRAY(struct canfd_quirk, canfd_quirk); }; } __packed; /* The GS USB devices make use of the same flags and masks as in * linux/can.h and linux/can/error.h, and no additional mapping is necessary. */ /* Only send a max of GS_MAX_TX_URBS frames per channel at a time. */ #define GS_MAX_TX_URBS 10 /* Only launch a max of GS_MAX_RX_URBS usb requests at a time. */ #define GS_MAX_RX_URBS 30 #define GS_NAPI_WEIGHT 32 /* Maximum number of interfaces the driver supports per device. * Current hardware only supports 3 interfaces. The future may vary. */ #define GS_MAX_INTF 3 struct gs_tx_context { struct gs_can *dev; unsigned int echo_id; }; struct gs_can { struct can_priv can; /* must be the first member */ struct can_rx_offload offload; struct gs_usb *parent; struct net_device *netdev; struct usb_device *udev; struct can_bittiming_const bt_const, data_bt_const; unsigned int channel; /* channel number */ u32 feature; unsigned int hf_size_tx; /* This lock prevents a race condition between xmit and receive. */ spinlock_t tx_ctx_lock; struct gs_tx_context tx_context[GS_MAX_TX_URBS]; struct usb_anchor tx_submitted; atomic_t active_tx_urbs; }; /* usb interface struct */ struct gs_usb { struct gs_can *canch[GS_MAX_INTF]; struct usb_anchor rx_submitted; struct usb_device *udev; /* time counter for hardware timestamps */ struct cyclecounter cc; struct timecounter tc; spinlock_t tc_lock; /* spinlock to guard access tc->cycle_last */ struct delayed_work timestamp; unsigned int hf_size_rx; u8 active_channels; }; /* 'allocate' a tx context. * returns a valid tx context or NULL if there is no space. */ static struct gs_tx_context *gs_alloc_tx_context(struct gs_can *dev) { int i = 0; unsigned long flags; spin_lock_irqsave(&dev->tx_ctx_lock, flags); for (; i < GS_MAX_TX_URBS; i++) { if (dev->tx_context[i].echo_id == GS_MAX_TX_URBS) { dev->tx_context[i].echo_id = i; spin_unlock_irqrestore(&dev->tx_ctx_lock, flags); return &dev->tx_context[i]; } } spin_unlock_irqrestore(&dev->tx_ctx_lock, flags); return NULL; } /* releases a tx context */ static void gs_free_tx_context(struct gs_tx_context *txc) { txc->echo_id = GS_MAX_TX_URBS; } /* Get a tx context by id. */ static struct gs_tx_context *gs_get_tx_context(struct gs_can *dev, unsigned int id) { unsigned long flags; if (id < GS_MAX_TX_URBS) { spin_lock_irqsave(&dev->tx_ctx_lock, flags); if (dev->tx_context[id].echo_id == id) { spin_unlock_irqrestore(&dev->tx_ctx_lock, flags); return &dev->tx_context[id]; } spin_unlock_irqrestore(&dev->tx_ctx_lock, flags); } return NULL; } static int gs_cmd_reset(struct gs_can *dev) { struct gs_device_mode dm = { .mode = GS_CAN_MODE_RESET, }; return usb_control_msg_send(dev->udev, 0, GS_USB_BREQ_MODE, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, dev->channel, 0, &dm, sizeof(dm), 1000, GFP_KERNEL); } static inline int gs_usb_get_timestamp(const struct gs_usb *parent, u32 *timestamp_p) { __le32 timestamp; int rc; rc = usb_control_msg_recv(parent->udev, 0, GS_USB_BREQ_TIMESTAMP, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, 0, 0, &timestamp, sizeof(timestamp), USB_CTRL_GET_TIMEOUT, GFP_KERNEL); if (rc) return rc; *timestamp_p = le32_to_cpu(timestamp); return 0; } static u64 gs_usb_timestamp_read(const struct cyclecounter *cc) __must_hold(&dev->tc_lock) { struct gs_usb *parent = container_of(cc, struct gs_usb, cc); u32 timestamp = 0; int err; lockdep_assert_held(&parent->tc_lock); /* drop lock for synchronous USB transfer */ spin_unlock_bh(&parent->tc_lock); err = gs_usb_get_timestamp(parent, &timestamp); spin_lock_bh(&parent->tc_lock); if (err) dev_err(&parent->udev->dev, "Error %d while reading timestamp. HW timestamps may be inaccurate.", err); return timestamp; } static void gs_usb_timestamp_work(struct work_struct *work) { struct delayed_work *delayed_work = to_delayed_work(work); struct gs_usb *parent; parent = container_of(delayed_work, struct gs_usb, timestamp); spin_lock_bh(&parent->tc_lock); timecounter_read(&parent->tc); spin_unlock_bh(&parent->tc_lock); schedule_delayed_work(&parent->timestamp, GS_USB_TIMESTAMP_WORK_DELAY_SEC * HZ); } static void gs_usb_skb_set_timestamp(struct gs_can *dev, struct sk_buff *skb, u32 timestamp) { struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb); struct gs_usb *parent = dev->parent; u64 ns; spin_lock_bh(&parent->tc_lock); ns = timecounter_cyc2time(&parent->tc, timestamp); spin_unlock_bh(&parent->tc_lock); hwtstamps->hwtstamp = ns_to_ktime(ns); } static void gs_usb_timestamp_init(struct gs_usb *parent) { struct cyclecounter *cc = &parent->cc; cc->read = gs_usb_timestamp_read; cc->mask = CYCLECOUNTER_MASK(32); cc->shift = 32 - bits_per(NSEC_PER_SEC / GS_USB_TIMESTAMP_TIMER_HZ); cc->mult = clocksource_hz2mult(GS_USB_TIMESTAMP_TIMER_HZ, cc->shift); spin_lock_init(&parent->tc_lock); spin_lock_bh(&parent->tc_lock); timecounter_init(&parent->tc, &parent->cc, ktime_get_real_ns()); spin_unlock_bh(&parent->tc_lock); INIT_DELAYED_WORK(&parent->timestamp, gs_usb_timestamp_work); schedule_delayed_work(&parent->timestamp, GS_USB_TIMESTAMP_WORK_DELAY_SEC * HZ); } static void gs_usb_timestamp_stop(struct gs_usb *parent) { cancel_delayed_work_sync(&parent->timestamp); } static void gs_update_state(struct gs_can *dev, struct can_frame *cf) { struct can_device_stats *can_stats = &dev->can.can_stats; if (cf->can_id & CAN_ERR_RESTARTED) { dev->can.state = CAN_STATE_ERROR_ACTIVE; can_stats->restarts++; } else if (cf->can_id & CAN_ERR_BUSOFF) { dev->can.state = CAN_STATE_BUS_OFF; can_stats->bus_off++; } else if (cf->can_id & CAN_ERR_CRTL) { if ((cf->data[1] & CAN_ERR_CRTL_TX_WARNING) || (cf->data[1] & CAN_ERR_CRTL_RX_WARNING)) { dev->can.state = CAN_STATE_ERROR_WARNING; can_stats->error_warning++; } else if ((cf->data[1] & CAN_ERR_CRTL_TX_PASSIVE) || (cf->data[1] & CAN_ERR_CRTL_RX_PASSIVE)) { dev->can.state = CAN_STATE_ERROR_PASSIVE; can_stats->error_passive++; } else { dev->can.state = CAN_STATE_ERROR_ACTIVE; } } } static u32 gs_usb_set_timestamp(struct gs_can *dev, struct sk_buff *skb, const struct gs_host_frame *hf) { u32 timestamp; if (hf->flags & GS_CAN_FLAG_FD) timestamp = le32_to_cpu(hf->canfd_ts->timestamp_us); else timestamp = le32_to_cpu(hf->classic_can_ts->timestamp_us); if (skb) gs_usb_skb_set_timestamp(dev, skb, timestamp); return timestamp; } static void gs_usb_rx_offload(struct gs_can *dev, struct sk_buff *skb, const struct gs_host_frame *hf) { struct can_rx_offload *offload = &dev->offload; int rc; if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP) { const u32 ts = gs_usb_set_timestamp(dev, skb, hf); rc = can_rx_offload_queue_timestamp(offload, skb, ts); } else { rc = can_rx_offload_queue_tail(offload, skb); } if (rc) dev->netdev->stats.rx_fifo_errors++; } static unsigned int gs_usb_get_echo_skb(struct gs_can *dev, struct sk_buff *skb, const struct gs_host_frame *hf) { struct can_rx_offload *offload = &dev->offload; const u32 echo_id = hf->echo_id; unsigned int len; if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP) { const u32 ts = gs_usb_set_timestamp(dev, skb, hf); len = can_rx_offload_get_echo_skb_queue_timestamp(offload, echo_id, ts, NULL); } else { len = can_rx_offload_get_echo_skb_queue_tail(offload, echo_id, NULL); } return len; } static void gs_usb_receive_bulk_callback(struct urb *urb) { struct gs_usb *parent = urb->context; struct gs_can *dev; struct net_device *netdev; int rc; struct net_device_stats *stats; struct gs_host_frame *hf = urb->transfer_buffer; struct gs_tx_context *txc; struct can_frame *cf; struct canfd_frame *cfd; struct sk_buff *skb; BUG_ON(!parent); switch (urb->status) { case 0: /* success */ break; case -ENOENT: case -ESHUTDOWN: return; default: /* do not resubmit aborted urbs. eg: when device goes down */ return; } /* device reports out of range channel id */ if (hf->channel >= GS_MAX_INTF) goto device_detach; dev = parent->canch[hf->channel]; netdev = dev->netdev; stats = &netdev->stats; if (!netif_device_present(netdev)) return; if (!netif_running(netdev)) goto resubmit_urb; if (hf->echo_id == -1) { /* normal rx */ if (hf->flags & GS_CAN_FLAG_FD) { skb = alloc_canfd_skb(netdev, &cfd); if (!skb) return; cfd->can_id = le32_to_cpu(hf->can_id); cfd->len = can_fd_dlc2len(hf->can_dlc); if (hf->flags & GS_CAN_FLAG_BRS) cfd->flags |= CANFD_BRS; if (hf->flags & GS_CAN_FLAG_ESI) cfd->flags |= CANFD_ESI; memcpy(cfd->data, hf->canfd->data, cfd->len); } else { skb = alloc_can_skb(netdev, &cf); if (!skb) return; cf->can_id = le32_to_cpu(hf->can_id); can_frame_set_cc_len(cf, hf->can_dlc, dev->can.ctrlmode); memcpy(cf->data, hf->classic_can->data, 8); /* ERROR frames tell us information about the controller */ if (le32_to_cpu(hf->can_id) & CAN_ERR_FLAG) gs_update_state(dev, cf); } gs_usb_rx_offload(dev, skb, hf); } else { /* echo_id == hf->echo_id */ if (hf->echo_id >= GS_MAX_TX_URBS) { netdev_err(netdev, "Unexpected out of range echo id %u\n", hf->echo_id); goto resubmit_urb; } txc = gs_get_tx_context(dev, hf->echo_id); /* bad devices send bad echo_ids. */ if (!txc) { netdev_err(netdev, "Unexpected unused echo id %u\n", hf->echo_id); goto resubmit_urb; } skb = dev->can.echo_skb[hf->echo_id]; stats->tx_packets++; stats->tx_bytes += gs_usb_get_echo_skb(dev, skb, hf); gs_free_tx_context(txc); atomic_dec(&dev->active_tx_urbs); netif_wake_queue(netdev); } if (hf->flags & GS_CAN_FLAG_OVERFLOW) { stats->rx_over_errors++; stats->rx_errors++; skb = alloc_can_err_skb(netdev, &cf); if (!skb) goto resubmit_urb; cf->can_id |= CAN_ERR_CRTL; cf->len = CAN_ERR_DLC; cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; gs_usb_rx_offload(dev, skb, hf); } can_rx_offload_irq_finish(&dev->offload); resubmit_urb: usb_fill_bulk_urb(urb, parent->udev, usb_rcvbulkpipe(parent->udev, GS_USB_ENDPOINT_IN), hf, dev->parent->hf_size_rx, gs_usb_receive_bulk_callback, parent); rc = usb_submit_urb(urb, GFP_ATOMIC); /* USB failure take down all interfaces */ if (rc == -ENODEV) { device_detach: for (rc = 0; rc < GS_MAX_INTF; rc++) { if (parent->canch[rc]) netif_device_detach(parent->canch[rc]->netdev); } } } static int gs_usb_set_bittiming(struct net_device *netdev) { struct gs_can *dev = netdev_priv(netdev); struct can_bittiming *bt = &dev->can.bittiming; struct gs_device_bittiming dbt = { .prop_seg = cpu_to_le32(bt->prop_seg), .phase_seg1 = cpu_to_le32(bt->phase_seg1), .phase_seg2 = cpu_to_le32(bt->phase_seg2), .sjw = cpu_to_le32(bt->sjw), .brp = cpu_to_le32(bt->brp), }; /* request bit timings */ return usb_control_msg_send(dev->udev, 0, GS_USB_BREQ_BITTIMING, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, dev->channel, 0, &dbt, sizeof(dbt), 1000, GFP_KERNEL); } static int gs_usb_set_data_bittiming(struct net_device *netdev) { struct gs_can *dev = netdev_priv(netdev); struct can_bittiming *bt = &dev->can.data_bittiming; struct gs_device_bittiming dbt = { .prop_seg = cpu_to_le32(bt->prop_seg), .phase_seg1 = cpu_to_le32(bt->phase_seg1), .phase_seg2 = cpu_to_le32(bt->phase_seg2), .sjw = cpu_to_le32(bt->sjw), .brp = cpu_to_le32(bt->brp), }; u8 request = GS_USB_BREQ_DATA_BITTIMING; if (dev->feature & GS_CAN_FEATURE_QUIRK_BREQ_CANTACT_PRO) request = GS_USB_BREQ_QUIRK_CANTACT_PRO_DATA_BITTIMING; /* request data bit timings */ return usb_control_msg_send(dev->udev, 0, request, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, dev->channel, 0, &dbt, sizeof(dbt), 1000, GFP_KERNEL); } static void gs_usb_xmit_callback(struct urb *urb) { struct gs_tx_context *txc = urb->context; struct gs_can *dev = txc->dev; struct net_device *netdev = dev->netdev; if (urb->status) netdev_info(netdev, "usb xmit fail %u\n", txc->echo_id); } static netdev_tx_t gs_can_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct gs_can *dev = netdev_priv(netdev); struct net_device_stats *stats = &dev->netdev->stats; struct urb *urb; struct gs_host_frame *hf; struct can_frame *cf; struct canfd_frame *cfd; int rc; unsigned int idx; struct gs_tx_context *txc; if (can_dev_dropped_skb(netdev, skb)) return NETDEV_TX_OK; /* find an empty context to keep track of transmission */ txc = gs_alloc_tx_context(dev); if (!txc) return NETDEV_TX_BUSY; /* create a URB, and a buffer for it */ urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) goto nomem_urb; hf = kmalloc(dev->hf_size_tx, GFP_ATOMIC); if (!hf) goto nomem_hf; idx = txc->echo_id; if (idx >= GS_MAX_TX_URBS) { netdev_err(netdev, "Invalid tx context %u\n", idx); goto badidx; } hf->echo_id = idx; hf->channel = dev->channel; hf->flags = 0; hf->reserved = 0; if (can_is_canfd_skb(skb)) { cfd = (struct canfd_frame *)skb->data; hf->can_id = cpu_to_le32(cfd->can_id); hf->can_dlc = can_fd_len2dlc(cfd->len); hf->flags |= GS_CAN_FLAG_FD; if (cfd->flags & CANFD_BRS) hf->flags |= GS_CAN_FLAG_BRS; if (cfd->flags & CANFD_ESI) hf->flags |= GS_CAN_FLAG_ESI; memcpy(hf->canfd->data, cfd->data, cfd->len); } else { cf = (struct can_frame *)skb->data; hf->can_id = cpu_to_le32(cf->can_id); hf->can_dlc = can_get_cc_dlc(cf, dev->can.ctrlmode); memcpy(hf->classic_can->data, cf->data, cf->len); } usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, GS_USB_ENDPOINT_OUT), hf, dev->hf_size_tx, gs_usb_xmit_callback, txc); urb->transfer_flags |= URB_FREE_BUFFER; usb_anchor_urb(urb, &dev->tx_submitted); can_put_echo_skb(skb, netdev, idx, 0); atomic_inc(&dev->active_tx_urbs); rc = usb_submit_urb(urb, GFP_ATOMIC); if (unlikely(rc)) { /* usb send failed */ atomic_dec(&dev->active_tx_urbs); can_free_echo_skb(netdev, idx, NULL); gs_free_tx_context(txc); usb_unanchor_urb(urb); if (rc == -ENODEV) { netif_device_detach(netdev); } else { netdev_err(netdev, "usb_submit failed (err=%d)\n", rc); stats->tx_dropped++; } } else { /* Slow down tx path */ if (atomic_read(&dev->active_tx_urbs) >= GS_MAX_TX_URBS) netif_stop_queue(netdev); } /* let usb core take care of this urb */ usb_free_urb(urb); return NETDEV_TX_OK; badidx: kfree(hf); nomem_hf: usb_free_urb(urb); nomem_urb: gs_free_tx_context(txc); dev_kfree_skb(skb); stats->tx_dropped++; return NETDEV_TX_OK; } static int gs_can_open(struct net_device *netdev) { struct gs_can *dev = netdev_priv(netdev); struct gs_usb *parent = dev->parent; struct gs_device_mode dm = { .mode = cpu_to_le32(GS_CAN_MODE_START), }; struct gs_host_frame *hf; struct urb *urb = NULL; u32 ctrlmode; u32 flags = 0; int rc, i; rc = open_candev(netdev); if (rc) return rc; ctrlmode = dev->can.ctrlmode; if (ctrlmode & CAN_CTRLMODE_FD) { if (dev->feature & GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX) dev->hf_size_tx = struct_size(hf, canfd_quirk, 1); else dev->hf_size_tx = struct_size(hf, canfd, 1); } else { if (dev->feature & GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX) dev->hf_size_tx = struct_size(hf, classic_can_quirk, 1); else dev->hf_size_tx = struct_size(hf, classic_can, 1); } can_rx_offload_enable(&dev->offload); if (!parent->active_channels) { if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP) gs_usb_timestamp_init(parent); for (i = 0; i < GS_MAX_RX_URBS; i++) { u8 *buf; /* alloc rx urb */ urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { rc = -ENOMEM; goto out_usb_kill_anchored_urbs; } /* alloc rx buffer */ buf = kmalloc(dev->parent->hf_size_rx, GFP_KERNEL); if (!buf) { rc = -ENOMEM; goto out_usb_free_urb; } /* fill, anchor, and submit rx urb */ usb_fill_bulk_urb(urb, dev->udev, usb_rcvbulkpipe(dev->udev, GS_USB_ENDPOINT_IN), buf, dev->parent->hf_size_rx, gs_usb_receive_bulk_callback, parent); urb->transfer_flags |= URB_FREE_BUFFER; usb_anchor_urb(urb, &parent->rx_submitted); rc = usb_submit_urb(urb, GFP_KERNEL); if (rc) { if (rc == -ENODEV) netif_device_detach(dev->netdev); netdev_err(netdev, "usb_submit_urb() failed, error %pe\n", ERR_PTR(rc)); goto out_usb_unanchor_urb; } /* Drop reference, * USB core will take care of freeing it */ usb_free_urb(urb); } } /* flags */ if (ctrlmode & CAN_CTRLMODE_LOOPBACK) flags |= GS_CAN_MODE_LOOP_BACK; if (ctrlmode & CAN_CTRLMODE_LISTENONLY) flags |= GS_CAN_MODE_LISTEN_ONLY; if (ctrlmode & CAN_CTRLMODE_3_SAMPLES) flags |= GS_CAN_MODE_TRIPLE_SAMPLE; if (ctrlmode & CAN_CTRLMODE_ONE_SHOT) flags |= GS_CAN_MODE_ONE_SHOT; if (ctrlmode & CAN_CTRLMODE_BERR_REPORTING) flags |= GS_CAN_MODE_BERR_REPORTING; if (ctrlmode & CAN_CTRLMODE_FD) flags |= GS_CAN_MODE_FD; /* if hardware supports timestamps, enable it */ if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP) flags |= GS_CAN_MODE_HW_TIMESTAMP; /* finally start device */ dev->can.state = CAN_STATE_ERROR_ACTIVE; dm.flags = cpu_to_le32(flags); rc = usb_control_msg_send(dev->udev, 0, GS_USB_BREQ_MODE, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, dev->channel, 0, &dm, sizeof(dm), 1000, GFP_KERNEL); if (rc) { netdev_err(netdev, "Couldn't start device (err=%d)\n", rc); dev->can.state = CAN_STATE_STOPPED; goto out_usb_kill_anchored_urbs; } parent->active_channels++; if (!(dev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY)) netif_start_queue(netdev); return 0; out_usb_unanchor_urb: usb_unanchor_urb(urb); out_usb_free_urb: usb_free_urb(urb); out_usb_kill_anchored_urbs: if (!parent->active_channels) { usb_kill_anchored_urbs(&dev->tx_submitted); if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP) gs_usb_timestamp_stop(parent); } can_rx_offload_disable(&dev->offload); close_candev(netdev); return rc; } static int gs_usb_get_state(const struct net_device *netdev, struct can_berr_counter *bec, enum can_state *state) { struct gs_can *dev = netdev_priv(netdev); struct gs_device_state ds; int rc; rc = usb_control_msg_recv(dev->udev, 0, GS_USB_BREQ_GET_STATE, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, dev->channel, 0, &ds, sizeof(ds), USB_CTRL_GET_TIMEOUT, GFP_KERNEL); if (rc) return rc; if (le32_to_cpu(ds.state) >= CAN_STATE_MAX) return -EOPNOTSUPP; *state = le32_to_cpu(ds.state); bec->txerr = le32_to_cpu(ds.txerr); bec->rxerr = le32_to_cpu(ds.rxerr); return 0; } static int gs_usb_can_get_berr_counter(const struct net_device *netdev, struct can_berr_counter *bec) { enum can_state state; return gs_usb_get_state(netdev, bec, &state); } static int gs_can_close(struct net_device *netdev) { int rc; struct gs_can *dev = netdev_priv(netdev); struct gs_usb *parent = dev->parent; netif_stop_queue(netdev); /* Stop polling */ parent->active_channels--; if (!parent->active_channels) { usb_kill_anchored_urbs(&parent->rx_submitted); if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP) gs_usb_timestamp_stop(parent); } /* Stop sending URBs */ usb_kill_anchored_urbs(&dev->tx_submitted); atomic_set(&dev->active_tx_urbs, 0); dev->can.state = CAN_STATE_STOPPED; /* reset the device */ gs_cmd_reset(dev); /* reset tx contexts */ for (rc = 0; rc < GS_MAX_TX_URBS; rc++) { dev->tx_context[rc].dev = dev; dev->tx_context[rc].echo_id = GS_MAX_TX_URBS; } can_rx_offload_disable(&dev->offload); /* close the netdev */ close_candev(netdev); return 0; } static int gs_can_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { const struct gs_can *dev = netdev_priv(netdev); if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP) return can_eth_ioctl_hwts(netdev, ifr, cmd); return -EOPNOTSUPP; } static const struct net_device_ops gs_usb_netdev_ops = { .ndo_open = gs_can_open, .ndo_stop = gs_can_close, .ndo_start_xmit = gs_can_start_xmit, .ndo_change_mtu = can_change_mtu, .ndo_eth_ioctl = gs_can_eth_ioctl, }; static int gs_usb_set_identify(struct net_device *netdev, bool do_identify) { struct gs_can *dev = netdev_priv(netdev); struct gs_identify_mode imode; if (do_identify) imode.mode = cpu_to_le32(GS_CAN_IDENTIFY_ON); else imode.mode = cpu_to_le32(GS_CAN_IDENTIFY_OFF); return usb_control_msg_send(dev->udev, 0, GS_USB_BREQ_IDENTIFY, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, dev->channel, 0, &imode, sizeof(imode), 100, GFP_KERNEL); } /* blink LED's for finding the this interface */ static int gs_usb_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) { const struct gs_can *dev = netdev_priv(netdev); int rc = 0; if (!(dev->feature & GS_CAN_FEATURE_IDENTIFY)) return -EOPNOTSUPP; switch (state) { case ETHTOOL_ID_ACTIVE: rc = gs_usb_set_identify(netdev, GS_CAN_IDENTIFY_ON); break; case ETHTOOL_ID_INACTIVE: rc = gs_usb_set_identify(netdev, GS_CAN_IDENTIFY_OFF); break; default: break; } return rc; } static int gs_usb_get_ts_info(struct net_device *netdev, struct ethtool_ts_info *info) { struct gs_can *dev = netdev_priv(netdev); /* report if device supports HW timestamps */ if (dev->feature & GS_CAN_FEATURE_HW_TIMESTAMP) return can_ethtool_op_get_ts_info_hwts(netdev, info); return ethtool_op_get_ts_info(netdev, info); } static const struct ethtool_ops gs_usb_ethtool_ops = { .set_phys_id = gs_usb_set_phys_id, .get_ts_info = gs_usb_get_ts_info, }; static int gs_usb_get_termination(struct net_device *netdev, u16 *term) { struct gs_can *dev = netdev_priv(netdev); struct gs_device_termination_state term_state; int rc; rc = usb_control_msg_recv(dev->udev, 0, GS_USB_BREQ_GET_TERMINATION, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, dev->channel, 0, &term_state, sizeof(term_state), 1000, GFP_KERNEL); if (rc) return rc; if (term_state.state == cpu_to_le32(GS_CAN_TERMINATION_STATE_ON)) *term = GS_USB_TERMINATION_ENABLED; else *term = GS_USB_TERMINATION_DISABLED; return 0; } static int gs_usb_set_termination(struct net_device *netdev, u16 term) { struct gs_can *dev = netdev_priv(netdev); struct gs_device_termination_state term_state; if (term == GS_USB_TERMINATION_ENABLED) term_state.state = cpu_to_le32(GS_CAN_TERMINATION_STATE_ON); else term_state.state = cpu_to_le32(GS_CAN_TERMINATION_STATE_OFF); return usb_control_msg_send(dev->udev, 0, GS_USB_BREQ_SET_TERMINATION, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, dev->channel, 0, &term_state, sizeof(term_state), 1000, GFP_KERNEL); } static const u16 gs_usb_termination_const[] = { GS_USB_TERMINATION_DISABLED, GS_USB_TERMINATION_ENABLED }; static struct gs_can *gs_make_candev(unsigned int channel, struct usb_interface *intf, struct gs_device_config *dconf) { struct gs_can *dev; struct net_device *netdev; int rc; struct gs_device_bt_const_extended bt_const_extended; struct gs_device_bt_const bt_const; u32 feature; /* fetch bit timing constants */ rc = usb_control_msg_recv(interface_to_usbdev(intf), 0, GS_USB_BREQ_BT_CONST, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, channel, 0, &bt_const, sizeof(bt_const), 1000, GFP_KERNEL); if (rc) { dev_err(&intf->dev, "Couldn't get bit timing const for channel %d (%pe)\n", channel, ERR_PTR(rc)); return ERR_PTR(rc); } /* create netdev */ netdev = alloc_candev(sizeof(struct gs_can), GS_MAX_TX_URBS); if (!netdev) { dev_err(&intf->dev, "Couldn't allocate candev\n"); return ERR_PTR(-ENOMEM); } dev = netdev_priv(netdev); netdev->netdev_ops = &gs_usb_netdev_ops; netdev->ethtool_ops = &gs_usb_ethtool_ops; netdev->flags |= IFF_ECHO; /* we support full roundtrip echo */ netdev->dev_id = channel; /* dev setup */ strcpy(dev->bt_const.name, KBUILD_MODNAME); dev->bt_const.tseg1_min = le32_to_cpu(bt_const.tseg1_min); dev->bt_const.tseg1_max = le32_to_cpu(bt_const.tseg1_max); dev->bt_const.tseg2_min = le32_to_cpu(bt_const.tseg2_min); dev->bt_const.tseg2_max = le32_to_cpu(bt_const.tseg2_max); dev->bt_const.sjw_max = le32_to_cpu(bt_const.sjw_max); dev->bt_const.brp_min = le32_to_cpu(bt_const.brp_min); dev->bt_const.brp_max = le32_to_cpu(bt_const.brp_max); dev->bt_const.brp_inc = le32_to_cpu(bt_const.brp_inc); dev->udev = interface_to_usbdev(intf); dev->netdev = netdev; dev->channel = channel; init_usb_anchor(&dev->tx_submitted); atomic_set(&dev->active_tx_urbs, 0); spin_lock_init(&dev->tx_ctx_lock); for (rc = 0; rc < GS_MAX_TX_URBS; rc++) { dev->tx_context[rc].dev = dev; dev->tx_context[rc].echo_id = GS_MAX_TX_URBS; } /* can setup */ dev->can.state = CAN_STATE_STOPPED; dev->can.clock.freq = le32_to_cpu(bt_const.fclk_can); dev->can.bittiming_const = &dev->bt_const; dev->can.do_set_bittiming = gs_usb_set_bittiming; dev->can.ctrlmode_supported = CAN_CTRLMODE_CC_LEN8_DLC; feature = le32_to_cpu(bt_const.feature); dev->feature = FIELD_GET(GS_CAN_FEATURE_MASK, feature); if (feature & GS_CAN_FEATURE_LISTEN_ONLY) dev->can.ctrlmode_supported |= CAN_CTRLMODE_LISTENONLY; if (feature & GS_CAN_FEATURE_LOOP_BACK) dev->can.ctrlmode_supported |= CAN_CTRLMODE_LOOPBACK; if (feature & GS_CAN_FEATURE_TRIPLE_SAMPLE) dev->can.ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES; if (feature & GS_CAN_FEATURE_ONE_SHOT) dev->can.ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT; if (feature & GS_CAN_FEATURE_FD) { dev->can.ctrlmode_supported |= CAN_CTRLMODE_FD; /* The data bit timing will be overwritten, if * GS_CAN_FEATURE_BT_CONST_EXT is set. */ dev->can.data_bittiming_const = &dev->bt_const; dev->can.do_set_data_bittiming = gs_usb_set_data_bittiming; } if (feature & GS_CAN_FEATURE_TERMINATION) { rc = gs_usb_get_termination(netdev, &dev->can.termination); if (rc) { dev->feature &= ~GS_CAN_FEATURE_TERMINATION; dev_info(&intf->dev, "Disabling termination support for channel %d (%pe)\n", channel, ERR_PTR(rc)); } else { dev->can.termination_const = gs_usb_termination_const; dev->can.termination_const_cnt = ARRAY_SIZE(gs_usb_termination_const); dev->can.do_set_termination = gs_usb_set_termination; } } if (feature & GS_CAN_FEATURE_BERR_REPORTING) dev->can.ctrlmode_supported |= CAN_CTRLMODE_BERR_REPORTING; if (feature & GS_CAN_FEATURE_GET_STATE) dev->can.do_get_berr_counter = gs_usb_can_get_berr_counter; /* The CANtact Pro from LinkLayer Labs is based on the * LPC54616 µC, which is affected by the NXP LPC USB transfer * erratum. However, the current firmware (version 2) doesn't * set the GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX bit. Set the * feature GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX to workaround * this issue. * * For the GS_USB_BREQ_DATA_BITTIMING USB control message the * CANtact Pro firmware uses a request value, which is already * used by the candleLight firmware for a different purpose * (GS_USB_BREQ_GET_USER_ID). Set the feature * GS_CAN_FEATURE_QUIRK_BREQ_CANTACT_PRO to workaround this * issue. */ if (dev->udev->descriptor.idVendor == cpu_to_le16(USB_GS_USB_1_VENDOR_ID) && dev->udev->descriptor.idProduct == cpu_to_le16(USB_GS_USB_1_PRODUCT_ID) && dev->udev->manufacturer && dev->udev->product && !strcmp(dev->udev->manufacturer, "LinkLayer Labs") && !strcmp(dev->udev->product, "CANtact Pro") && (le32_to_cpu(dconf->sw_version) <= 2)) dev->feature |= GS_CAN_FEATURE_REQ_USB_QUIRK_LPC546XX | GS_CAN_FEATURE_QUIRK_BREQ_CANTACT_PRO; /* GS_CAN_FEATURE_IDENTIFY is only supported for sw_version > 1 */ if (!(le32_to_cpu(dconf->sw_version) > 1 && feature & GS_CAN_FEATURE_IDENTIFY)) dev->feature &= ~GS_CAN_FEATURE_IDENTIFY; /* fetch extended bit timing constants if device has feature * GS_CAN_FEATURE_FD and GS_CAN_FEATURE_BT_CONST_EXT */ if (feature & GS_CAN_FEATURE_FD && feature & GS_CAN_FEATURE_BT_CONST_EXT) { rc = usb_control_msg_recv(interface_to_usbdev(intf), 0, GS_USB_BREQ_BT_CONST_EXT, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, channel, 0, &bt_const_extended, sizeof(bt_const_extended), 1000, GFP_KERNEL); if (rc) { dev_err(&intf->dev, "Couldn't get extended bit timing const for channel %d (%pe)\n", channel, ERR_PTR(rc)); goto out_free_candev; } strcpy(dev->data_bt_const.name, KBUILD_MODNAME); dev->data_bt_const.tseg1_min = le32_to_cpu(bt_const_extended.dtseg1_min); dev->data_bt_const.tseg1_max = le32_to_cpu(bt_const_extended.dtseg1_max); dev->data_bt_const.tseg2_min = le32_to_cpu(bt_const_extended.dtseg2_min); dev->data_bt_const.tseg2_max = le32_to_cpu(bt_const_extended.dtseg2_max); dev->data_bt_const.sjw_max = le32_to_cpu(bt_const_extended.dsjw_max); dev->data_bt_const.brp_min = le32_to_cpu(bt_const_extended.dbrp_min); dev->data_bt_const.brp_max = le32_to_cpu(bt_const_extended.dbrp_max); dev->data_bt_const.brp_inc = le32_to_cpu(bt_const_extended.dbrp_inc); dev->can.data_bittiming_const = &dev->data_bt_const; } can_rx_offload_add_manual(netdev, &dev->offload, GS_NAPI_WEIGHT); SET_NETDEV_DEV(netdev, &intf->dev); rc = register_candev(dev->netdev); if (rc) { dev_err(&intf->dev, "Couldn't register candev for channel %d (%pe)\n", channel, ERR_PTR(rc)); goto out_can_rx_offload_del; } return dev; out_can_rx_offload_del: can_rx_offload_del(&dev->offload); out_free_candev: free_candev(dev->netdev); return ERR_PTR(rc); } static void gs_destroy_candev(struct gs_can *dev) { unregister_candev(dev->netdev); can_rx_offload_del(&dev->offload); free_candev(dev->netdev); } static int gs_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev(intf); struct gs_host_frame *hf; struct gs_usb *parent; struct gs_host_config hconf = { .byte_order = cpu_to_le32(0x0000beef), }; struct gs_device_config dconf; unsigned int icount, i; int rc; /* send host config */ rc = usb_control_msg_send(udev, 0, GS_USB_BREQ_HOST_FORMAT, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, 1, intf->cur_altsetting->desc.bInterfaceNumber, &hconf, sizeof(hconf), 1000, GFP_KERNEL); if (rc) { dev_err(&intf->dev, "Couldn't send data format (err=%d)\n", rc); return rc; } /* read device config */ rc = usb_control_msg_recv(udev, 0, GS_USB_BREQ_DEVICE_CONFIG, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, 1, intf->cur_altsetting->desc.bInterfaceNumber, &dconf, sizeof(dconf), 1000, GFP_KERNEL); if (rc) { dev_err(&intf->dev, "Couldn't get device config: (err=%d)\n", rc); return rc; } icount = dconf.icount + 1; dev_info(&intf->dev, "Configuring for %u interfaces\n", icount); if (icount > GS_MAX_INTF) { dev_err(&intf->dev, "Driver cannot handle more that %u CAN interfaces\n", GS_MAX_INTF); return -EINVAL; } parent = kzalloc(sizeof(*parent), GFP_KERNEL); if (!parent) return -ENOMEM; init_usb_anchor(&parent->rx_submitted); usb_set_intfdata(intf, parent); parent->udev = udev; for (i = 0; i < icount; i++) { unsigned int hf_size_rx = 0; parent->canch[i] = gs_make_candev(i, intf, &dconf); if (IS_ERR_OR_NULL(parent->canch[i])) { /* save error code to return later */ rc = PTR_ERR(parent->canch[i]); /* on failure destroy previously created candevs */ icount = i; for (i = 0; i < icount; i++) gs_destroy_candev(parent->canch[i]); usb_kill_anchored_urbs(&parent->rx_submitted); kfree(parent); return rc; } parent->canch[i]->parent = parent; /* set RX packet size based on FD and if hardware * timestamps are supported. */ if (parent->canch[i]->can.ctrlmode_supported & CAN_CTRLMODE_FD) { if (parent->canch[i]->feature & GS_CAN_FEATURE_HW_TIMESTAMP) hf_size_rx = struct_size(hf, canfd_ts, 1); else hf_size_rx = struct_size(hf, canfd, 1); } else { if (parent->canch[i]->feature & GS_CAN_FEATURE_HW_TIMESTAMP) hf_size_rx = struct_size(hf, classic_can_ts, 1); else hf_size_rx = struct_size(hf, classic_can, 1); } parent->hf_size_rx = max(parent->hf_size_rx, hf_size_rx); } return 0; } static void gs_usb_disconnect(struct usb_interface *intf) { struct gs_usb *parent = usb_get_intfdata(intf); unsigned int i; usb_set_intfdata(intf, NULL); if (!parent) { dev_err(&intf->dev, "Disconnect (nodata)\n"); return; } for (i = 0; i < GS_MAX_INTF; i++) if (parent->canch[i]) gs_destroy_candev(parent->canch[i]); kfree(parent); } static const struct usb_device_id gs_usb_table[] = { { USB_DEVICE_INTERFACE_NUMBER(USB_GS_USB_1_VENDOR_ID, USB_GS_USB_1_PRODUCT_ID, 0) }, { USB_DEVICE_INTERFACE_NUMBER(USB_CANDLELIGHT_VENDOR_ID, USB_CANDLELIGHT_PRODUCT_ID, 0) }, { USB_DEVICE_INTERFACE_NUMBER(USB_CES_CANEXT_FD_VENDOR_ID, USB_CES_CANEXT_FD_PRODUCT_ID, 0) }, { USB_DEVICE_INTERFACE_NUMBER(USB_ABE_CANDEBUGGER_FD_VENDOR_ID, USB_ABE_CANDEBUGGER_FD_PRODUCT_ID, 0) }, {} /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, gs_usb_table); static struct usb_driver gs_usb_driver = { .name = KBUILD_MODNAME, .probe = gs_usb_probe, .disconnect = gs_usb_disconnect, .id_table = gs_usb_table, }; module_usb_driver(gs_usb_driver); MODULE_AUTHOR("Maximilian Schneider <[email protected]>"); MODULE_DESCRIPTION( "Socket CAN device driver for Geschwister Schneider Technologie-, " "Entwicklungs- und Vertriebs UG. USB2.0 to CAN interfaces\n" "and bytewerk.org candleLight USB CAN interfaces."); MODULE_LICENSE("GPL v2");
linux-master
drivers/net/can/usb/gs_usb.c
// SPDX-License-Identifier: GPL-2.0 /* Driver for ETAS GmbH ES58X USB CAN(-FD) Bus Interfaces. * * File es58x_core.c: Core logic to manage the network devices and the * USB interface. * * Copyright (c) 2019 Robert Bosch Engineering and Business Solutions. All rights reserved. * Copyright (c) 2020 ETAS K.K.. All rights reserved. * Copyright (c) 2020-2022 Vincent Mailhol <[email protected]> */ #include <asm/unaligned.h> #include <linux/crc16.h> #include <linux/ethtool.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/usb.h> #include <net/devlink.h> #include "es58x_core.h" MODULE_AUTHOR("Vincent Mailhol <[email protected]>"); MODULE_AUTHOR("Arunachalam Santhanam <[email protected]>"); MODULE_DESCRIPTION("Socket CAN driver for ETAS ES58X USB adapters"); MODULE_LICENSE("GPL v2"); #define ES58X_VENDOR_ID 0x108C #define ES581_4_PRODUCT_ID 0x0159 #define ES582_1_PRODUCT_ID 0x0168 #define ES584_1_PRODUCT_ID 0x0169 /* ES58X FD has some interface protocols unsupported by this driver. */ #define ES58X_FD_INTERFACE_PROTOCOL 0 /* Table of devices which work with this driver. */ static const struct usb_device_id es58x_id_table[] = { { /* ETAS GmbH ES581.4 USB dual-channel CAN Bus Interface module. */ USB_DEVICE(ES58X_VENDOR_ID, ES581_4_PRODUCT_ID), .driver_info = ES58X_DUAL_CHANNEL }, { /* ETAS GmbH ES582.1 USB dual-channel CAN FD Bus Interface module. */ USB_DEVICE_INTERFACE_PROTOCOL(ES58X_VENDOR_ID, ES582_1_PRODUCT_ID, ES58X_FD_INTERFACE_PROTOCOL), .driver_info = ES58X_DUAL_CHANNEL | ES58X_FD_FAMILY }, { /* ETAS GmbH ES584.1 USB single-channel CAN FD Bus Interface module. */ USB_DEVICE_INTERFACE_PROTOCOL(ES58X_VENDOR_ID, ES584_1_PRODUCT_ID, ES58X_FD_INTERFACE_PROTOCOL), .driver_info = ES58X_FD_FAMILY }, { /* Terminating entry */ } }; MODULE_DEVICE_TABLE(usb, es58x_id_table); #define es58x_print_hex_dump(buf, len) \ print_hex_dump(KERN_DEBUG, \ KBUILD_MODNAME " " __stringify(buf) ": ", \ DUMP_PREFIX_NONE, 16, 1, buf, len, false) #define es58x_print_hex_dump_debug(buf, len) \ print_hex_dump_debug(KBUILD_MODNAME " " __stringify(buf) ": ",\ DUMP_PREFIX_NONE, 16, 1, buf, len, false) /* The last two bytes of an ES58X command is a CRC16. The first two * bytes (the start of frame) are skipped and the CRC calculation * starts on the third byte. */ #define ES58X_CRC_CALC_OFFSET sizeof_field(union es58x_urb_cmd, sof) /** * es58x_calculate_crc() - Compute the crc16 of a given URB. * @urb_cmd: The URB command for which we want to calculate the CRC. * @urb_len: Length of @urb_cmd. Must be at least bigger than 4 * (ES58X_CRC_CALC_OFFSET + sizeof(crc)) * * Return: crc16 value. */ static u16 es58x_calculate_crc(const union es58x_urb_cmd *urb_cmd, u16 urb_len) { u16 crc; ssize_t len = urb_len - ES58X_CRC_CALC_OFFSET - sizeof(crc); crc = crc16(0, &urb_cmd->raw_cmd[ES58X_CRC_CALC_OFFSET], len); return crc; } /** * es58x_get_crc() - Get the CRC value of a given URB. * @urb_cmd: The URB command for which we want to get the CRC. * @urb_len: Length of @urb_cmd. Must be at least bigger than 4 * (ES58X_CRC_CALC_OFFSET + sizeof(crc)) * * Return: crc16 value. */ static u16 es58x_get_crc(const union es58x_urb_cmd *urb_cmd, u16 urb_len) { u16 crc; const __le16 *crc_addr; crc_addr = (__le16 *)&urb_cmd->raw_cmd[urb_len - sizeof(crc)]; crc = get_unaligned_le16(crc_addr); return crc; } /** * es58x_set_crc() - Set the CRC value of a given URB. * @urb_cmd: The URB command for which we want to get the CRC. * @urb_len: Length of @urb_cmd. Must be at least bigger than 4 * (ES58X_CRC_CALC_OFFSET + sizeof(crc)) */ static void es58x_set_crc(union es58x_urb_cmd *urb_cmd, u16 urb_len) { u16 crc; __le16 *crc_addr; crc = es58x_calculate_crc(urb_cmd, urb_len); crc_addr = (__le16 *)&urb_cmd->raw_cmd[urb_len - sizeof(crc)]; put_unaligned_le16(crc, crc_addr); } /** * es58x_check_crc() - Validate the CRC value of a given URB. * @es58x_dev: ES58X device. * @urb_cmd: The URB command for which we want to check the CRC. * @urb_len: Length of @urb_cmd. Must be at least bigger than 4 * (ES58X_CRC_CALC_OFFSET + sizeof(crc)) * * Return: zero on success, -EBADMSG if the CRC check fails. */ static int es58x_check_crc(struct es58x_device *es58x_dev, const union es58x_urb_cmd *urb_cmd, u16 urb_len) { u16 calculated_crc = es58x_calculate_crc(urb_cmd, urb_len); u16 expected_crc = es58x_get_crc(urb_cmd, urb_len); if (expected_crc != calculated_crc) { dev_err_ratelimited(es58x_dev->dev, "%s: Bad CRC, urb_len: %d\n", __func__, urb_len); return -EBADMSG; } return 0; } /** * es58x_timestamp_to_ns() - Convert a timestamp value received from a * ES58X device to nanoseconds. * @timestamp: Timestamp received from a ES58X device. * * The timestamp received from ES58X is expressed in multiples of 0.5 * micro seconds. This function converts it in to nanoseconds. * * Return: Timestamp value in nanoseconds. */ static u64 es58x_timestamp_to_ns(u64 timestamp) { const u64 es58x_timestamp_ns_mult_coef = 500ULL; return es58x_timestamp_ns_mult_coef * timestamp; } /** * es58x_set_skb_timestamp() - Set the hardware timestamp of an skb. * @netdev: CAN network device. * @skb: socket buffer of a CAN message. * @timestamp: Timestamp received from an ES58X device. * * Used for both received and echo messages. */ static void es58x_set_skb_timestamp(struct net_device *netdev, struct sk_buff *skb, u64 timestamp) { struct es58x_device *es58x_dev = es58x_priv(netdev)->es58x_dev; struct skb_shared_hwtstamps *hwts; hwts = skb_hwtstamps(skb); /* Ignoring overflow (overflow on 64 bits timestamp with nano * second precision would occur after more than 500 years). */ hwts->hwtstamp = ns_to_ktime(es58x_timestamp_to_ns(timestamp) + es58x_dev->realtime_diff_ns); } /** * es58x_rx_timestamp() - Handle a received timestamp. * @es58x_dev: ES58X device. * @timestamp: Timestamp received from a ES58X device. * * Calculate the difference between the ES58X device and the kernel * internal clocks. This difference will be later used as an offset to * convert the timestamps of RX and echo messages to match the kernel * system time (e.g. convert to UNIX time). */ void es58x_rx_timestamp(struct es58x_device *es58x_dev, u64 timestamp) { u64 ktime_real_ns = ktime_get_real_ns(); u64 device_timestamp = es58x_timestamp_to_ns(timestamp); dev_dbg(es58x_dev->dev, "%s: request round-trip time: %llu ns\n", __func__, ktime_real_ns - es58x_dev->ktime_req_ns); es58x_dev->realtime_diff_ns = (es58x_dev->ktime_req_ns + ktime_real_ns) / 2 - device_timestamp; es58x_dev->ktime_req_ns = 0; dev_dbg(es58x_dev->dev, "%s: Device timestamp: %llu, diff with kernel: %llu\n", __func__, device_timestamp, es58x_dev->realtime_diff_ns); } /** * es58x_set_realtime_diff_ns() - Calculate difference between the * clocks of the ES58X device and the kernel * @es58x_dev: ES58X device. * * Request a timestamp from the ES58X device. Once the answer is * received, the timestamp difference will be set by the callback * function es58x_rx_timestamp(). * * Return: zero on success, errno when any error occurs. */ static int es58x_set_realtime_diff_ns(struct es58x_device *es58x_dev) { if (es58x_dev->ktime_req_ns) { dev_warn(es58x_dev->dev, "%s: Previous request to set timestamp has not completed yet\n", __func__); return -EBUSY; } es58x_dev->ktime_req_ns = ktime_get_real_ns(); return es58x_dev->ops->get_timestamp(es58x_dev); } /** * es58x_is_can_state_active() - Is the network device in an active * CAN state? * @netdev: CAN network device. * * The device is considered active if it is able to send or receive * CAN frames, that is to say if it is in any of * CAN_STATE_ERROR_ACTIVE, CAN_STATE_ERROR_WARNING or * CAN_STATE_ERROR_PASSIVE states. * * Caution: when recovering from a bus-off, * net/core/dev.c#can_restart() will call * net/core/dev.c#can_flush_echo_skb() without using any kind of * locks. For this reason, it is critical to guarantee that no TX or * echo operations (i.e. any access to priv->echo_skb[]) can be done * while this function is returning false. * * Return: true if the device is active, else returns false. */ static bool es58x_is_can_state_active(struct net_device *netdev) { return es58x_priv(netdev)->can.state < CAN_STATE_BUS_OFF; } /** * es58x_is_echo_skb_threshold_reached() - Determine the limit of how * many skb slots can be taken before we should stop the network * queue. * @priv: ES58X private parameters related to the network device. * * We need to save enough free skb slots in order to be able to do * bulk send. This function can be used to determine when to wake or * stop the network queue in regard to the number of skb slots already * taken if the echo FIFO. * * Return: boolean. */ static bool es58x_is_echo_skb_threshold_reached(struct es58x_priv *priv) { u32 num_echo_skb = priv->tx_head - priv->tx_tail; u32 threshold = priv->can.echo_skb_max - priv->es58x_dev->param->tx_bulk_max + 1; return num_echo_skb >= threshold; } /** * es58x_can_free_echo_skb_tail() - Remove the oldest echo skb of the * echo FIFO. * @netdev: CAN network device. * * Naming convention: the tail is the beginning of the FIFO, i.e. the * first skb to have entered the FIFO. */ static void es58x_can_free_echo_skb_tail(struct net_device *netdev) { struct es58x_priv *priv = es58x_priv(netdev); u16 fifo_mask = priv->es58x_dev->param->fifo_mask; unsigned int frame_len = 0; can_free_echo_skb(netdev, priv->tx_tail & fifo_mask, &frame_len); netdev_completed_queue(netdev, 1, frame_len); priv->tx_tail++; netdev->stats.tx_dropped++; } /** * es58x_can_get_echo_skb_recovery() - Try to re-sync the echo FIFO. * @netdev: CAN network device. * @rcv_packet_idx: Index * * This function should not be called under normal circumstances. In * the unlikely case that one or several URB packages get dropped by * the device, the index will get out of sync. Try to recover by * dropping the echo skb packets with older indexes. * * Return: zero if recovery was successful, -EINVAL otherwise. */ static int es58x_can_get_echo_skb_recovery(struct net_device *netdev, u32 rcv_packet_idx) { struct es58x_priv *priv = es58x_priv(netdev); int ret = 0; netdev->stats.tx_errors++; if (net_ratelimit()) netdev_warn(netdev, "Bad echo packet index: %u. First index: %u, end index %u, num_echo_skb: %02u/%02u\n", rcv_packet_idx, priv->tx_tail, priv->tx_head, priv->tx_head - priv->tx_tail, priv->can.echo_skb_max); if ((s32)(rcv_packet_idx - priv->tx_tail) < 0) { if (net_ratelimit()) netdev_warn(netdev, "Received echo index is from the past. Ignoring it\n"); ret = -EINVAL; } else if ((s32)(rcv_packet_idx - priv->tx_head) >= 0) { if (net_ratelimit()) netdev_err(netdev, "Received echo index is from the future. Ignoring it\n"); ret = -EINVAL; } else { if (net_ratelimit()) netdev_warn(netdev, "Recovery: dropping %u echo skb from index %u to %u\n", rcv_packet_idx - priv->tx_tail, priv->tx_tail, rcv_packet_idx - 1); while (priv->tx_tail != rcv_packet_idx) { if (priv->tx_tail == priv->tx_head) return -EINVAL; es58x_can_free_echo_skb_tail(netdev); } } return ret; } /** * es58x_can_get_echo_skb() - Get the skb from the echo FIFO and loop * it back locally. * @netdev: CAN network device. * @rcv_packet_idx: Index of the first packet received from the device. * @tstamps: Array of hardware timestamps received from a ES58X device. * @pkts: Number of packets (and so, length of @tstamps). * * Callback function for when we receive a self reception * acknowledgment. Retrieves the skb from the echo FIFO, sets its * hardware timestamp (the actual time it was sent) and loops it back * locally. * * The device has to be active (i.e. network interface UP and not in * bus off state or restarting). * * Packet indexes must be consecutive (i.e. index of first packet is * @rcv_packet_idx, index of second packet is @rcv_packet_idx + 1 and * index of last packet is @rcv_packet_idx + @pkts - 1). * * Return: zero on success. */ int es58x_can_get_echo_skb(struct net_device *netdev, u32 rcv_packet_idx, u64 *tstamps, unsigned int pkts) { struct es58x_priv *priv = es58x_priv(netdev); unsigned int rx_total_frame_len = 0; unsigned int num_echo_skb = priv->tx_head - priv->tx_tail; int i; u16 fifo_mask = priv->es58x_dev->param->fifo_mask; if (!netif_running(netdev)) { if (net_ratelimit()) netdev_info(netdev, "%s: %s is down, dropping %d echo packets\n", __func__, netdev->name, pkts); netdev->stats.tx_dropped += pkts; return 0; } else if (!es58x_is_can_state_active(netdev)) { if (net_ratelimit()) netdev_dbg(netdev, "Bus is off or device is restarting. Ignoring %u echo packets from index %u\n", pkts, rcv_packet_idx); /* stats.tx_dropped will be (or was already) * incremented by * drivers/net/can/net/dev.c:can_flush_echo_skb(). */ return 0; } else if (num_echo_skb == 0) { if (net_ratelimit()) netdev_warn(netdev, "Received %u echo packets from index: %u but echo skb queue is empty.\n", pkts, rcv_packet_idx); netdev->stats.tx_dropped += pkts; return 0; } if (priv->tx_tail != rcv_packet_idx) { if (es58x_can_get_echo_skb_recovery(netdev, rcv_packet_idx) < 0) { if (net_ratelimit()) netdev_warn(netdev, "Could not find echo skb for echo packet index: %u\n", rcv_packet_idx); return 0; } } if (num_echo_skb < pkts) { int pkts_drop = pkts - num_echo_skb; if (net_ratelimit()) netdev_err(netdev, "Received %u echo packets but have only %d echo skb. Dropping %d echo skb\n", pkts, num_echo_skb, pkts_drop); netdev->stats.tx_dropped += pkts_drop; pkts -= pkts_drop; } for (i = 0; i < pkts; i++) { unsigned int skb_idx = priv->tx_tail & fifo_mask; struct sk_buff *skb = priv->can.echo_skb[skb_idx]; unsigned int frame_len = 0; if (skb) es58x_set_skb_timestamp(netdev, skb, tstamps[i]); netdev->stats.tx_bytes += can_get_echo_skb(netdev, skb_idx, &frame_len); rx_total_frame_len += frame_len; priv->tx_tail++; } netdev_completed_queue(netdev, pkts, rx_total_frame_len); netdev->stats.tx_packets += pkts; priv->err_passive_before_rtx_success = 0; if (!es58x_is_echo_skb_threshold_reached(priv)) netif_wake_queue(netdev); return 0; } /** * es58x_can_reset_echo_fifo() - Reset the echo FIFO. * @netdev: CAN network device. * * The echo_skb array of struct can_priv will be flushed by * drivers/net/can/dev.c:can_flush_echo_skb(). This function resets * the parameters of the struct es58x_priv of our device and reset the * queue (c.f. BQL). */ static void es58x_can_reset_echo_fifo(struct net_device *netdev) { struct es58x_priv *priv = es58x_priv(netdev); priv->tx_tail = 0; priv->tx_head = 0; priv->tx_urb = NULL; priv->err_passive_before_rtx_success = 0; netdev_reset_queue(netdev); } /** * es58x_flush_pending_tx_msg() - Reset the buffer for transmission messages. * @netdev: CAN network device. * * es58x_start_xmit() will queue up to tx_bulk_max messages in * &tx_urb buffer and do a bulk send of all messages in one single URB * (c.f. xmit_more flag). When the device recovers from a bus off * state or when the device stops, the tx_urb buffer might still have * pending messages in it and thus need to be flushed. */ static void es58x_flush_pending_tx_msg(struct net_device *netdev) { struct es58x_priv *priv = es58x_priv(netdev); struct es58x_device *es58x_dev = priv->es58x_dev; if (priv->tx_urb) { netdev_warn(netdev, "%s: dropping %d TX messages\n", __func__, priv->tx_can_msg_cnt); netdev->stats.tx_dropped += priv->tx_can_msg_cnt; while (priv->tx_can_msg_cnt > 0) { unsigned int frame_len = 0; u16 fifo_mask = priv->es58x_dev->param->fifo_mask; priv->tx_head--; priv->tx_can_msg_cnt--; can_free_echo_skb(netdev, priv->tx_head & fifo_mask, &frame_len); netdev_completed_queue(netdev, 1, frame_len); } usb_anchor_urb(priv->tx_urb, &priv->es58x_dev->tx_urbs_idle); atomic_inc(&es58x_dev->tx_urbs_idle_cnt); usb_free_urb(priv->tx_urb); } priv->tx_urb = NULL; } /** * es58x_tx_ack_msg() - Handle acknowledgment messages. * @netdev: CAN network device. * @tx_free_entries: Number of free entries in the device transmit FIFO. * @rx_cmd_ret_u32: error code as returned by the ES58X device. * * ES58X sends an acknowledgment message after a transmission request * is done. This is mandatory for the ES581.4 but is optional (and * deactivated in this driver) for the ES58X_FD family. * * Under normal circumstances, this function should never throw an * error message. * * Return: zero on success, errno when any error occurs. */ int es58x_tx_ack_msg(struct net_device *netdev, u16 tx_free_entries, enum es58x_ret_u32 rx_cmd_ret_u32) { struct es58x_priv *priv = es58x_priv(netdev); if (tx_free_entries <= priv->es58x_dev->param->tx_bulk_max) { if (net_ratelimit()) netdev_err(netdev, "Only %d entries left in device queue, num_echo_skb: %d/%d\n", tx_free_entries, priv->tx_head - priv->tx_tail, priv->can.echo_skb_max); netif_stop_queue(netdev); } return es58x_rx_cmd_ret_u32(netdev, ES58X_RET_TYPE_TX_MSG, rx_cmd_ret_u32); } /** * es58x_rx_can_msg() - Handle a received a CAN message. * @netdev: CAN network device. * @timestamp: Hardware time stamp (only relevant in rx branches). * @data: CAN payload. * @can_id: CAN ID. * @es58x_flags: Please refer to enum es58x_flag. * @dlc: Data Length Code (raw value). * * Fill up a CAN skb and post it. * * This function handles the case where the DLC of a classical CAN * frame is greater than CAN_MAX_DLEN (c.f. the len8_dlc field of * struct can_frame). * * Return: zero on success. */ int es58x_rx_can_msg(struct net_device *netdev, u64 timestamp, const u8 *data, canid_t can_id, enum es58x_flag es58x_flags, u8 dlc) { struct canfd_frame *cfd; struct can_frame *ccf; struct sk_buff *skb; u8 len; bool is_can_fd = !!(es58x_flags & ES58X_FLAG_FD_DATA); if (dlc > CAN_MAX_RAW_DLC) { netdev_err(netdev, "%s: DLC is %d but maximum should be %d\n", __func__, dlc, CAN_MAX_RAW_DLC); return -EMSGSIZE; } if (is_can_fd) { len = can_fd_dlc2len(dlc); skb = alloc_canfd_skb(netdev, &cfd); } else { len = can_cc_dlc2len(dlc); skb = alloc_can_skb(netdev, &ccf); cfd = (struct canfd_frame *)ccf; } if (!skb) { netdev->stats.rx_dropped++; return 0; } cfd->can_id = can_id; if (es58x_flags & ES58X_FLAG_EFF) cfd->can_id |= CAN_EFF_FLAG; if (is_can_fd) { cfd->len = len; if (es58x_flags & ES58X_FLAG_FD_BRS) cfd->flags |= CANFD_BRS; if (es58x_flags & ES58X_FLAG_FD_ESI) cfd->flags |= CANFD_ESI; } else { can_frame_set_cc_len(ccf, dlc, es58x_priv(netdev)->can.ctrlmode); if (es58x_flags & ES58X_FLAG_RTR) { ccf->can_id |= CAN_RTR_FLAG; len = 0; } } memcpy(cfd->data, data, len); netdev->stats.rx_packets++; netdev->stats.rx_bytes += len; es58x_set_skb_timestamp(netdev, skb, timestamp); netif_rx(skb); es58x_priv(netdev)->err_passive_before_rtx_success = 0; return 0; } /** * es58x_rx_err_msg() - Handle a received CAN event or error message. * @netdev: CAN network device. * @error: Error code. * @event: Event code. * @timestamp: Timestamp received from a ES58X device. * * Handle the errors and events received by the ES58X device, create * a CAN error skb and post it. * * In some rare cases the devices might get stuck alternating between * CAN_STATE_ERROR_PASSIVE and CAN_STATE_ERROR_WARNING. To prevent * this behavior, we force a bus off state if the device goes in * CAN_STATE_ERROR_WARNING for ES58X_MAX_CONSECUTIVE_WARN consecutive * times with no successful transmission or reception in between. * * Once the device is in bus off state, the only way to restart it is * through the drivers/net/can/dev.c:can_restart() function. The * device is technically capable to recover by itself under certain * circumstances, however, allowing self recovery would create * complex race conditions with drivers/net/can/dev.c:can_restart() * and thus was not implemented. To activate automatic restart, please * set the restart-ms parameter (e.g. ip link set can0 type can * restart-ms 100). * * If the bus is really instable, this function would try to send a * lot of log messages. Those are rate limited (i.e. you will see * messages such as "net_ratelimit: XXX callbacks suppressed" in * dmesg). * * Return: zero on success, errno when any error occurs. */ int es58x_rx_err_msg(struct net_device *netdev, enum es58x_err error, enum es58x_event event, u64 timestamp) { struct es58x_priv *priv = es58x_priv(netdev); struct can_priv *can = netdev_priv(netdev); struct can_device_stats *can_stats = &can->can_stats; struct can_frame *cf = NULL; struct sk_buff *skb; int ret = 0; if (!netif_running(netdev)) { if (net_ratelimit()) netdev_info(netdev, "%s: %s is down, dropping packet\n", __func__, netdev->name); netdev->stats.rx_dropped++; return 0; } if (error == ES58X_ERR_OK && event == ES58X_EVENT_OK) { netdev_err(netdev, "%s: Both error and event are zero\n", __func__); return -EINVAL; } skb = alloc_can_err_skb(netdev, &cf); switch (error) { case ES58X_ERR_OK: /* 0: No error */ break; case ES58X_ERR_PROT_STUFF: if (net_ratelimit()) netdev_dbg(netdev, "Error BITSTUFF\n"); if (cf) cf->data[2] |= CAN_ERR_PROT_STUFF; break; case ES58X_ERR_PROT_FORM: if (net_ratelimit()) netdev_dbg(netdev, "Error FORMAT\n"); if (cf) cf->data[2] |= CAN_ERR_PROT_FORM; break; case ES58X_ERR_ACK: if (net_ratelimit()) netdev_dbg(netdev, "Error ACK\n"); if (cf) cf->can_id |= CAN_ERR_ACK; break; case ES58X_ERR_PROT_BIT: if (net_ratelimit()) netdev_dbg(netdev, "Error BIT\n"); if (cf) cf->data[2] |= CAN_ERR_PROT_BIT; break; case ES58X_ERR_PROT_CRC: if (net_ratelimit()) netdev_dbg(netdev, "Error CRC\n"); if (cf) cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ; break; case ES58X_ERR_PROT_BIT1: if (net_ratelimit()) netdev_dbg(netdev, "Error: expected a recessive bit but monitored a dominant one\n"); if (cf) cf->data[2] |= CAN_ERR_PROT_BIT1; break; case ES58X_ERR_PROT_BIT0: if (net_ratelimit()) netdev_dbg(netdev, "Error expected a dominant bit but monitored a recessive one\n"); if (cf) cf->data[2] |= CAN_ERR_PROT_BIT0; break; case ES58X_ERR_PROT_OVERLOAD: if (net_ratelimit()) netdev_dbg(netdev, "Error OVERLOAD\n"); if (cf) cf->data[2] |= CAN_ERR_PROT_OVERLOAD; break; case ES58X_ERR_PROT_UNSPEC: if (net_ratelimit()) netdev_dbg(netdev, "Unspecified error\n"); if (cf) cf->can_id |= CAN_ERR_PROT; break; default: if (net_ratelimit()) netdev_err(netdev, "%s: Unspecified error code 0x%04X\n", __func__, (int)error); if (cf) cf->can_id |= CAN_ERR_PROT; break; } switch (event) { case ES58X_EVENT_OK: /* 0: No event */ break; case ES58X_EVENT_CRTL_ACTIVE: if (can->state == CAN_STATE_BUS_OFF) { netdev_err(netdev, "%s: state transition: BUS OFF -> ACTIVE\n", __func__); } if (net_ratelimit()) netdev_dbg(netdev, "Event CAN BUS ACTIVE\n"); if (cf) cf->data[1] |= CAN_ERR_CRTL_ACTIVE; can->state = CAN_STATE_ERROR_ACTIVE; break; case ES58X_EVENT_CRTL_PASSIVE: if (net_ratelimit()) netdev_dbg(netdev, "Event CAN BUS PASSIVE\n"); /* Either TX or RX error count reached passive state * but we do not know which. Setting both flags by * default. */ if (cf) { cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE; cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE; } if (can->state < CAN_STATE_BUS_OFF) can->state = CAN_STATE_ERROR_PASSIVE; can_stats->error_passive++; if (priv->err_passive_before_rtx_success < U8_MAX) priv->err_passive_before_rtx_success++; break; case ES58X_EVENT_CRTL_WARNING: if (net_ratelimit()) netdev_dbg(netdev, "Event CAN BUS WARNING\n"); /* Either TX or RX error count reached warning state * but we do not know which. Setting both flags by * default. */ if (cf) { cf->data[1] |= CAN_ERR_CRTL_RX_WARNING; cf->data[1] |= CAN_ERR_CRTL_TX_WARNING; } if (can->state < CAN_STATE_BUS_OFF) can->state = CAN_STATE_ERROR_WARNING; can_stats->error_warning++; break; case ES58X_EVENT_BUSOFF: if (net_ratelimit()) netdev_dbg(netdev, "Event CAN BUS OFF\n"); if (cf) cf->can_id |= CAN_ERR_BUSOFF; can_stats->bus_off++; netif_stop_queue(netdev); if (can->state != CAN_STATE_BUS_OFF) { can->state = CAN_STATE_BUS_OFF; can_bus_off(netdev); ret = can->do_set_mode(netdev, CAN_MODE_STOP); } break; case ES58X_EVENT_SINGLE_WIRE: if (net_ratelimit()) netdev_warn(netdev, "Lost connection on either CAN high or CAN low\n"); /* Lost connection on either CAN high or CAN * low. Setting both flags by default. */ if (cf) { cf->data[4] |= CAN_ERR_TRX_CANH_NO_WIRE; cf->data[4] |= CAN_ERR_TRX_CANL_NO_WIRE; } break; default: if (net_ratelimit()) netdev_err(netdev, "%s: Unspecified event code 0x%04X\n", __func__, (int)event); if (cf) cf->can_id |= CAN_ERR_CRTL; break; } if (cf) { if (cf->data[1]) cf->can_id |= CAN_ERR_CRTL; if (cf->data[2] || cf->data[3]) { cf->can_id |= CAN_ERR_PROT; can_stats->bus_error++; } if (cf->data[4]) cf->can_id |= CAN_ERR_TRX; es58x_set_skb_timestamp(netdev, skb, timestamp); netif_rx(skb); } if ((event & ES58X_EVENT_CRTL_PASSIVE) && priv->err_passive_before_rtx_success == ES58X_CONSECUTIVE_ERR_PASSIVE_MAX) { netdev_info(netdev, "Got %d consecutive warning events with no successful RX or TX. Forcing bus-off\n", priv->err_passive_before_rtx_success); return es58x_rx_err_msg(netdev, ES58X_ERR_OK, ES58X_EVENT_BUSOFF, timestamp); } return ret; } /** * es58x_cmd_ret_desc() - Convert a command type to a string. * @cmd_ret_type: Type of the command which triggered the return code. * * The final line (return "<unknown>") should not be reached. If this * is the case, there is an implementation bug. * * Return: a readable description of the @cmd_ret_type. */ static const char *es58x_cmd_ret_desc(enum es58x_ret_type cmd_ret_type) { switch (cmd_ret_type) { case ES58X_RET_TYPE_SET_BITTIMING: return "Set bittiming"; case ES58X_RET_TYPE_ENABLE_CHANNEL: return "Enable channel"; case ES58X_RET_TYPE_DISABLE_CHANNEL: return "Disable channel"; case ES58X_RET_TYPE_TX_MSG: return "Transmit message"; case ES58X_RET_TYPE_RESET_RX: return "Reset RX"; case ES58X_RET_TYPE_RESET_TX: return "Reset TX"; case ES58X_RET_TYPE_DEVICE_ERR: return "Device error"; } return "<unknown>"; }; /** * es58x_rx_cmd_ret_u8() - Handle the command's return code received * from the ES58X device. * @dev: Device, only used for the dev_XXX() print functions. * @cmd_ret_type: Type of the command which triggered the return code. * @rx_cmd_ret_u8: Command error code as returned by the ES58X device. * * Handles the 8 bits command return code. Those are specific to the * ES581.4 device. The return value will eventually be used by * es58x_handle_urb_cmd() function which will take proper actions in * case of critical issues such and memory errors or bad CRC values. * * In contrast with es58x_rx_cmd_ret_u32(), the network device is * unknown. * * Return: zero on success, return errno when any error occurs. */ int es58x_rx_cmd_ret_u8(struct device *dev, enum es58x_ret_type cmd_ret_type, enum es58x_ret_u8 rx_cmd_ret_u8) { const char *ret_desc = es58x_cmd_ret_desc(cmd_ret_type); switch (rx_cmd_ret_u8) { case ES58X_RET_U8_OK: dev_dbg_ratelimited(dev, "%s: OK\n", ret_desc); return 0; case ES58X_RET_U8_ERR_UNSPECIFIED_FAILURE: dev_err(dev, "%s: unspecified failure\n", ret_desc); return -EBADMSG; case ES58X_RET_U8_ERR_NO_MEM: dev_err(dev, "%s: device ran out of memory\n", ret_desc); return -ENOMEM; case ES58X_RET_U8_ERR_BAD_CRC: dev_err(dev, "%s: CRC of previous command is incorrect\n", ret_desc); return -EIO; default: dev_err(dev, "%s: returned unknown value: 0x%02X\n", ret_desc, rx_cmd_ret_u8); return -EBADMSG; } } /** * es58x_rx_cmd_ret_u32() - Handle the command return code received * from the ES58X device. * @netdev: CAN network device. * @cmd_ret_type: Type of the command which triggered the return code. * @rx_cmd_ret_u32: error code as returned by the ES58X device. * * Handles the 32 bits command return code. The return value will * eventually be used by es58x_handle_urb_cmd() function which will * take proper actions in case of critical issues such and memory * errors or bad CRC values. * * Return: zero on success, errno when any error occurs. */ int es58x_rx_cmd_ret_u32(struct net_device *netdev, enum es58x_ret_type cmd_ret_type, enum es58x_ret_u32 rx_cmd_ret_u32) { struct es58x_priv *priv = es58x_priv(netdev); const struct es58x_operators *ops = priv->es58x_dev->ops; const char *ret_desc = es58x_cmd_ret_desc(cmd_ret_type); switch (rx_cmd_ret_u32) { case ES58X_RET_U32_OK: switch (cmd_ret_type) { case ES58X_RET_TYPE_ENABLE_CHANNEL: es58x_can_reset_echo_fifo(netdev); priv->can.state = CAN_STATE_ERROR_ACTIVE; netif_wake_queue(netdev); netdev_info(netdev, "%s: %s (Serial Number %s): CAN%d channel becomes ready\n", ret_desc, priv->es58x_dev->udev->product, priv->es58x_dev->udev->serial, priv->channel_idx + 1); break; case ES58X_RET_TYPE_TX_MSG: if (IS_ENABLED(CONFIG_VERBOSE_DEBUG) && net_ratelimit()) netdev_vdbg(netdev, "%s: OK\n", ret_desc); break; default: netdev_dbg(netdev, "%s: OK\n", ret_desc); break; } return 0; case ES58X_RET_U32_ERR_UNSPECIFIED_FAILURE: if (cmd_ret_type == ES58X_RET_TYPE_ENABLE_CHANNEL) { int ret; netdev_warn(netdev, "%s: channel is already opened, closing and re-opening it to reflect new configuration\n", ret_desc); ret = ops->disable_channel(es58x_priv(netdev)); if (ret) return ret; return ops->enable_channel(es58x_priv(netdev)); } if (cmd_ret_type == ES58X_RET_TYPE_DISABLE_CHANNEL) { netdev_info(netdev, "%s: channel is already closed\n", ret_desc); return 0; } netdev_err(netdev, "%s: unspecified failure\n", ret_desc); return -EBADMSG; case ES58X_RET_U32_ERR_NO_MEM: netdev_err(netdev, "%s: device ran out of memory\n", ret_desc); return -ENOMEM; case ES58X_RET_U32_WARN_PARAM_ADJUSTED: netdev_warn(netdev, "%s: some incompatible parameters have been adjusted\n", ret_desc); return 0; case ES58X_RET_U32_WARN_TX_MAYBE_REORDER: netdev_warn(netdev, "%s: TX messages might have been reordered\n", ret_desc); return 0; case ES58X_RET_U32_ERR_TIMEDOUT: netdev_err(netdev, "%s: command timed out\n", ret_desc); return -ETIMEDOUT; case ES58X_RET_U32_ERR_FIFO_FULL: netdev_warn(netdev, "%s: fifo is full\n", ret_desc); return 0; case ES58X_RET_U32_ERR_BAD_CONFIG: netdev_err(netdev, "%s: bad configuration\n", ret_desc); return -EINVAL; case ES58X_RET_U32_ERR_NO_RESOURCE: netdev_err(netdev, "%s: no resource available\n", ret_desc); return -EBUSY; default: netdev_err(netdev, "%s returned unknown value: 0x%08X\n", ret_desc, rx_cmd_ret_u32); return -EBADMSG; } } /** * es58x_increment_rx_errors() - Increment the network devices' error * count. * @es58x_dev: ES58X device. * * If an error occurs on the early stages on receiving an URB command, * we might not be able to figure out on which network device the * error occurred. In such case, we arbitrarily increment the error * count of all the network devices attached to our ES58X device. */ static void es58x_increment_rx_errors(struct es58x_device *es58x_dev) { int i; for (i = 0; i < es58x_dev->num_can_ch; i++) if (es58x_dev->netdev[i]) es58x_dev->netdev[i]->stats.rx_errors++; } /** * es58x_handle_urb_cmd() - Handle the URB command * @es58x_dev: ES58X device. * @urb_cmd: The URB command received from the ES58X device, might not * be aligned. * * Sends the URB command to the device specific function. Manages the * errors thrown back by those functions. */ static void es58x_handle_urb_cmd(struct es58x_device *es58x_dev, const union es58x_urb_cmd *urb_cmd) { const struct es58x_operators *ops = es58x_dev->ops; size_t cmd_len; int i, ret; ret = ops->handle_urb_cmd(es58x_dev, urb_cmd); switch (ret) { case 0: /* OK */ return; case -ENODEV: dev_err_ratelimited(es58x_dev->dev, "Device is not ready\n"); break; case -EINVAL: case -EMSGSIZE: case -EBADRQC: case -EBADMSG: case -ECHRNG: case -ETIMEDOUT: cmd_len = es58x_get_urb_cmd_len(es58x_dev, ops->get_msg_len(urb_cmd)); dev_err(es58x_dev->dev, "ops->handle_urb_cmd() returned error %pe", ERR_PTR(ret)); es58x_print_hex_dump(urb_cmd, cmd_len); break; case -EFAULT: case -ENOMEM: case -EIO: default: dev_crit(es58x_dev->dev, "ops->handle_urb_cmd() returned error %pe, detaching all network devices\n", ERR_PTR(ret)); for (i = 0; i < es58x_dev->num_can_ch; i++) if (es58x_dev->netdev[i]) netif_device_detach(es58x_dev->netdev[i]); if (es58x_dev->ops->reset_device) es58x_dev->ops->reset_device(es58x_dev); break; } /* Because the urb command could not fully be parsed, * channel_id is not confirmed. Incrementing rx_errors count * of all channels. */ es58x_increment_rx_errors(es58x_dev); } /** * es58x_check_rx_urb() - Check the length and format of the URB command. * @es58x_dev: ES58X device. * @urb_cmd: The URB command received from the ES58X device, might not * be aligned. * @urb_actual_len: The actual length of the URB command. * * Check if the first message of the received urb is valid, that is to * say that both the header and the length are coherent. * * Return: * the length of the first message of the URB on success. * * -ENODATA if the URB command is incomplete (in which case, the URB * command should be buffered and combined with the next URB to try to * reconstitute the URB command). * * -EOVERFLOW if the length is bigger than the maximum expected one. * * -EBADRQC if the start of frame does not match the expected value. */ static signed int es58x_check_rx_urb(struct es58x_device *es58x_dev, const union es58x_urb_cmd *urb_cmd, u32 urb_actual_len) { const struct device *dev = es58x_dev->dev; const struct es58x_parameters *param = es58x_dev->param; u16 sof, msg_len; signed int urb_cmd_len, ret; if (urb_actual_len < param->urb_cmd_header_len) { dev_vdbg(dev, "%s: Received %d bytes [%*ph]: header incomplete\n", __func__, urb_actual_len, urb_actual_len, urb_cmd->raw_cmd); return -ENODATA; } sof = get_unaligned_le16(&urb_cmd->sof); if (sof != param->rx_start_of_frame) { dev_err_ratelimited(es58x_dev->dev, "%s: Expected sequence 0x%04X for start of frame but got 0x%04X.\n", __func__, param->rx_start_of_frame, sof); return -EBADRQC; } msg_len = es58x_dev->ops->get_msg_len(urb_cmd); urb_cmd_len = es58x_get_urb_cmd_len(es58x_dev, msg_len); if (urb_cmd_len > param->rx_urb_cmd_max_len) { dev_err_ratelimited(es58x_dev->dev, "%s: Biggest expected size for rx urb_cmd is %u but receive a command of size %d\n", __func__, param->rx_urb_cmd_max_len, urb_cmd_len); return -EOVERFLOW; } else if (urb_actual_len < urb_cmd_len) { dev_vdbg(dev, "%s: Received %02d/%02d bytes\n", __func__, urb_actual_len, urb_cmd_len); return -ENODATA; } ret = es58x_check_crc(es58x_dev, urb_cmd, urb_cmd_len); if (ret) return ret; return urb_cmd_len; } /** * es58x_copy_to_cmd_buf() - Copy an array to the URB command buffer. * @es58x_dev: ES58X device. * @raw_cmd: the buffer we want to copy. * @raw_cmd_len: length of @raw_cmd. * * Concatenates @raw_cmd_len bytes of @raw_cmd to the end of the URB * command buffer. * * Return: zero on success, -EMSGSIZE if not enough space is available * to do the copy. */ static int es58x_copy_to_cmd_buf(struct es58x_device *es58x_dev, u8 *raw_cmd, int raw_cmd_len) { if (es58x_dev->rx_cmd_buf_len + raw_cmd_len > es58x_dev->param->rx_urb_cmd_max_len) return -EMSGSIZE; memcpy(&es58x_dev->rx_cmd_buf.raw_cmd[es58x_dev->rx_cmd_buf_len], raw_cmd, raw_cmd_len); es58x_dev->rx_cmd_buf_len += raw_cmd_len; return 0; } /** * es58x_split_urb_try_recovery() - Try to recover bad URB sequences. * @es58x_dev: ES58X device. * @raw_cmd: pointer to the buffer we want to copy. * @raw_cmd_len: length of @raw_cmd. * * Under some rare conditions, we might get incorrect URBs from the * device. From our observations, one of the valid URB gets replaced * by one from the past. The full root cause is not identified. * * This function looks for the next start of frame in the urb buffer * in order to try to recover. * * Such behavior was not observed on the devices of the ES58X FD * family and only seems to impact the ES581.4. * * Return: the number of bytes dropped on success, -EBADMSG if recovery failed. */ static int es58x_split_urb_try_recovery(struct es58x_device *es58x_dev, u8 *raw_cmd, size_t raw_cmd_len) { union es58x_urb_cmd *urb_cmd; signed int urb_cmd_len; u16 sof; int dropped_bytes = 0; es58x_increment_rx_errors(es58x_dev); while (raw_cmd_len > sizeof(sof)) { urb_cmd = (union es58x_urb_cmd *)raw_cmd; sof = get_unaligned_le16(&urb_cmd->sof); if (sof == es58x_dev->param->rx_start_of_frame) { urb_cmd_len = es58x_check_rx_urb(es58x_dev, urb_cmd, raw_cmd_len); if ((urb_cmd_len == -ENODATA) || urb_cmd_len > 0) { dev_info_ratelimited(es58x_dev->dev, "Recovery successful! Dropped %d bytes (urb_cmd_len: %d)\n", dropped_bytes, urb_cmd_len); return dropped_bytes; } } raw_cmd++; raw_cmd_len--; dropped_bytes++; } dev_warn_ratelimited(es58x_dev->dev, "%s: Recovery failed\n", __func__); return -EBADMSG; } /** * es58x_handle_incomplete_cmd() - Reconstitute an URB command from * different URB pieces. * @es58x_dev: ES58X device. * @urb: last urb buffer received. * * The device might split the URB commands in an arbitrary amount of * pieces. This function concatenates those in an URB buffer until a * full URB command is reconstituted and consume it. * * Return: * number of bytes consumed from @urb if successful. * * -ENODATA if the URB command is still incomplete. * * -EBADMSG if the URB command is incorrect. */ static signed int es58x_handle_incomplete_cmd(struct es58x_device *es58x_dev, struct urb *urb) { size_t cpy_len; signed int urb_cmd_len, tmp_cmd_buf_len, ret; tmp_cmd_buf_len = es58x_dev->rx_cmd_buf_len; cpy_len = min_t(int, es58x_dev->param->rx_urb_cmd_max_len - es58x_dev->rx_cmd_buf_len, urb->actual_length); ret = es58x_copy_to_cmd_buf(es58x_dev, urb->transfer_buffer, cpy_len); if (ret < 0) return ret; urb_cmd_len = es58x_check_rx_urb(es58x_dev, &es58x_dev->rx_cmd_buf, es58x_dev->rx_cmd_buf_len); if (urb_cmd_len == -ENODATA) { return -ENODATA; } else if (urb_cmd_len < 0) { dev_err_ratelimited(es58x_dev->dev, "Could not reconstitute incomplete command from previous URB, dropping %d bytes\n", tmp_cmd_buf_len + urb->actual_length); dev_err_ratelimited(es58x_dev->dev, "Error code: %pe, es58x_dev->rx_cmd_buf_len: %d, urb->actual_length: %u\n", ERR_PTR(urb_cmd_len), tmp_cmd_buf_len, urb->actual_length); es58x_print_hex_dump(&es58x_dev->rx_cmd_buf, tmp_cmd_buf_len); es58x_print_hex_dump(urb->transfer_buffer, urb->actual_length); return urb->actual_length; } es58x_handle_urb_cmd(es58x_dev, &es58x_dev->rx_cmd_buf); return urb_cmd_len - tmp_cmd_buf_len; /* consumed length */ } /** * es58x_split_urb() - Cut the received URB in individual URB commands. * @es58x_dev: ES58X device. * @urb: last urb buffer received. * * The device might send urb in bulk format (i.e. several URB commands * concatenated together). This function will split all the commands * contained in the urb. * * Return: * number of bytes consumed from @urb if successful. * * -ENODATA if the URB command is incomplete. * * -EBADMSG if the URB command is incorrect. */ static signed int es58x_split_urb(struct es58x_device *es58x_dev, struct urb *urb) { union es58x_urb_cmd *urb_cmd; u8 *raw_cmd = urb->transfer_buffer; s32 raw_cmd_len = urb->actual_length; int ret; if (es58x_dev->rx_cmd_buf_len != 0) { ret = es58x_handle_incomplete_cmd(es58x_dev, urb); if (ret != -ENODATA) es58x_dev->rx_cmd_buf_len = 0; if (ret < 0) return ret; raw_cmd += ret; raw_cmd_len -= ret; } while (raw_cmd_len > 0) { if (raw_cmd[0] == ES58X_HEARTBEAT) { raw_cmd++; raw_cmd_len--; continue; } urb_cmd = (union es58x_urb_cmd *)raw_cmd; ret = es58x_check_rx_urb(es58x_dev, urb_cmd, raw_cmd_len); if (ret > 0) { es58x_handle_urb_cmd(es58x_dev, urb_cmd); } else if (ret == -ENODATA) { es58x_copy_to_cmd_buf(es58x_dev, raw_cmd, raw_cmd_len); return -ENODATA; } else if (ret < 0) { ret = es58x_split_urb_try_recovery(es58x_dev, raw_cmd, raw_cmd_len); if (ret < 0) return ret; } raw_cmd += ret; raw_cmd_len -= ret; } return 0; } /** * es58x_read_bulk_callback() - Callback for reading data from device. * @urb: last urb buffer received. * * This function gets eventually called each time an URB is received * from the ES58X device. * * Checks urb status, calls read function and resubmits urb read * operation. */ static void es58x_read_bulk_callback(struct urb *urb) { struct es58x_device *es58x_dev = urb->context; const struct device *dev = es58x_dev->dev; int i, ret; switch (urb->status) { case 0: /* success */ break; case -EOVERFLOW: dev_err_ratelimited(dev, "%s: error %pe\n", __func__, ERR_PTR(urb->status)); es58x_print_hex_dump_debug(urb->transfer_buffer, urb->transfer_buffer_length); goto resubmit_urb; case -EPROTO: dev_warn_ratelimited(dev, "%s: error %pe. Device unplugged?\n", __func__, ERR_PTR(urb->status)); goto free_urb; case -ENOENT: case -EPIPE: dev_err_ratelimited(dev, "%s: error %pe\n", __func__, ERR_PTR(urb->status)); goto free_urb; case -ESHUTDOWN: dev_dbg_ratelimited(dev, "%s: error %pe\n", __func__, ERR_PTR(urb->status)); goto free_urb; default: dev_err_ratelimited(dev, "%s: error %pe\n", __func__, ERR_PTR(urb->status)); goto resubmit_urb; } ret = es58x_split_urb(es58x_dev, urb); if ((ret != -ENODATA) && ret < 0) { dev_err(es58x_dev->dev, "es58x_split_urb() returned error %pe", ERR_PTR(ret)); es58x_print_hex_dump_debug(urb->transfer_buffer, urb->actual_length); /* Because the urb command could not be parsed, * channel_id is not confirmed. Incrementing rx_errors * count of all channels. */ es58x_increment_rx_errors(es58x_dev); } resubmit_urb: ret = usb_submit_urb(urb, GFP_ATOMIC); if (ret == -ENODEV) { for (i = 0; i < es58x_dev->num_can_ch; i++) if (es58x_dev->netdev[i]) netif_device_detach(es58x_dev->netdev[i]); } else if (ret) dev_err_ratelimited(dev, "Failed resubmitting read bulk urb: %pe\n", ERR_PTR(ret)); return; free_urb: usb_free_coherent(urb->dev, urb->transfer_buffer_length, urb->transfer_buffer, urb->transfer_dma); } /** * es58x_write_bulk_callback() - Callback after writing data to the device. * @urb: urb buffer which was previously submitted. * * This function gets eventually called each time an URB was sent to * the ES58X device. * * Puts the @urb back to the urbs idle anchor and tries to restart the * network queue. */ static void es58x_write_bulk_callback(struct urb *urb) { struct net_device *netdev = urb->context; struct es58x_device *es58x_dev = es58x_priv(netdev)->es58x_dev; switch (urb->status) { case 0: /* success */ break; case -EOVERFLOW: if (net_ratelimit()) netdev_err(netdev, "%s: error %pe\n", __func__, ERR_PTR(urb->status)); es58x_print_hex_dump(urb->transfer_buffer, urb->transfer_buffer_length); break; case -ENOENT: if (net_ratelimit()) netdev_dbg(netdev, "%s: error %pe\n", __func__, ERR_PTR(urb->status)); usb_free_coherent(urb->dev, es58x_dev->param->tx_urb_cmd_max_len, urb->transfer_buffer, urb->transfer_dma); return; default: if (net_ratelimit()) netdev_info(netdev, "%s: error %pe\n", __func__, ERR_PTR(urb->status)); break; } usb_anchor_urb(urb, &es58x_dev->tx_urbs_idle); atomic_inc(&es58x_dev->tx_urbs_idle_cnt); } /** * es58x_alloc_urb() - Allocate memory for an URB and its transfer * buffer. * @es58x_dev: ES58X device. * @urb: URB to be allocated. * @buf: used to return DMA address of buffer. * @buf_len: requested buffer size. * @mem_flags: affect whether allocation may block. * * Allocates an URB and its @transfer_buffer and set its @transfer_dma * address. * * This function is used at start-up to allocate all RX URBs at once * and during run time for TX URBs. * * Return: zero on success, -ENOMEM if no memory is available. */ static int es58x_alloc_urb(struct es58x_device *es58x_dev, struct urb **urb, u8 **buf, size_t buf_len, gfp_t mem_flags) { *urb = usb_alloc_urb(0, mem_flags); if (!*urb) { dev_err(es58x_dev->dev, "No memory left for URBs\n"); return -ENOMEM; } *buf = usb_alloc_coherent(es58x_dev->udev, buf_len, mem_flags, &(*urb)->transfer_dma); if (!*buf) { dev_err(es58x_dev->dev, "No memory left for USB buffer\n"); usb_free_urb(*urb); return -ENOMEM; } (*urb)->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; return 0; } /** * es58x_get_tx_urb() - Get an URB for transmission. * @es58x_dev: ES58X device. * * Gets an URB from the idle urbs anchor or allocate a new one if the * anchor is empty. * * If there are more than ES58X_TX_URBS_MAX in the idle anchor, do * some garbage collection. The garbage collection is done here * instead of within es58x_write_bulk_callback() because * usb_free_coherent() should not be used in IRQ context: * c.f. WARN_ON(irqs_disabled()) in dma_free_attrs(). * * Return: a pointer to an URB on success, NULL if no memory is * available. */ static struct urb *es58x_get_tx_urb(struct es58x_device *es58x_dev) { atomic_t *idle_cnt = &es58x_dev->tx_urbs_idle_cnt; struct urb *urb = usb_get_from_anchor(&es58x_dev->tx_urbs_idle); if (!urb) { size_t tx_buf_len; u8 *buf; tx_buf_len = es58x_dev->param->tx_urb_cmd_max_len; if (es58x_alloc_urb(es58x_dev, &urb, &buf, tx_buf_len, GFP_ATOMIC)) return NULL; usb_fill_bulk_urb(urb, es58x_dev->udev, es58x_dev->tx_pipe, buf, tx_buf_len, es58x_write_bulk_callback, NULL); return urb; } while (atomic_dec_return(idle_cnt) > ES58X_TX_URBS_MAX) { /* Garbage collector */ struct urb *tmp = usb_get_from_anchor(&es58x_dev->tx_urbs_idle); if (!tmp) break; usb_free_coherent(tmp->dev, es58x_dev->param->tx_urb_cmd_max_len, tmp->transfer_buffer, tmp->transfer_dma); usb_free_urb(tmp); } return urb; } /** * es58x_submit_urb() - Send data to the device. * @es58x_dev: ES58X device. * @urb: URB to be sent. * @netdev: CAN network device. * * Return: zero on success, errno when any error occurs. */ static int es58x_submit_urb(struct es58x_device *es58x_dev, struct urb *urb, struct net_device *netdev) { int ret; es58x_set_crc(urb->transfer_buffer, urb->transfer_buffer_length); urb->context = netdev; usb_anchor_urb(urb, &es58x_dev->tx_urbs_busy); ret = usb_submit_urb(urb, GFP_ATOMIC); if (ret) { netdev_err(netdev, "%s: USB send urb failure: %pe\n", __func__, ERR_PTR(ret)); usb_unanchor_urb(urb); usb_free_coherent(urb->dev, es58x_dev->param->tx_urb_cmd_max_len, urb->transfer_buffer, urb->transfer_dma); } usb_free_urb(urb); return ret; } /** * es58x_send_msg() - Prepare an URB and submit it. * @es58x_dev: ES58X device. * @cmd_type: Command type. * @cmd_id: Command ID. * @msg: ES58X message to be sent. * @msg_len: Length of @msg. * @channel_idx: Index of the network device. * * Creates an URB command from a given message, sets the header and the * CRC and then submits it. * * Return: zero on success, errno when any error occurs. */ int es58x_send_msg(struct es58x_device *es58x_dev, u8 cmd_type, u8 cmd_id, const void *msg, u16 msg_len, int channel_idx) { struct net_device *netdev; union es58x_urb_cmd *urb_cmd; struct urb *urb; int urb_cmd_len; if (channel_idx == ES58X_CHANNEL_IDX_NA) netdev = es58x_dev->netdev[0]; /* Default to first channel */ else netdev = es58x_dev->netdev[channel_idx]; urb_cmd_len = es58x_get_urb_cmd_len(es58x_dev, msg_len); if (urb_cmd_len > es58x_dev->param->tx_urb_cmd_max_len) return -EOVERFLOW; urb = es58x_get_tx_urb(es58x_dev); if (!urb) return -ENOMEM; urb_cmd = urb->transfer_buffer; es58x_dev->ops->fill_urb_header(urb_cmd, cmd_type, cmd_id, channel_idx, msg_len); memcpy(&urb_cmd->raw_cmd[es58x_dev->param->urb_cmd_header_len], msg, msg_len); urb->transfer_buffer_length = urb_cmd_len; return es58x_submit_urb(es58x_dev, urb, netdev); } /** * es58x_alloc_rx_urbs() - Allocate RX URBs. * @es58x_dev: ES58X device. * * Allocate URBs for reception and anchor them. * * Return: zero on success, errno when any error occurs. */ static int es58x_alloc_rx_urbs(struct es58x_device *es58x_dev) { const struct device *dev = es58x_dev->dev; const struct es58x_parameters *param = es58x_dev->param; u16 rx_buf_len = usb_maxpacket(es58x_dev->udev, es58x_dev->rx_pipe); struct urb *urb; u8 *buf; int i; int ret = -EINVAL; for (i = 0; i < param->rx_urb_max; i++) { ret = es58x_alloc_urb(es58x_dev, &urb, &buf, rx_buf_len, GFP_KERNEL); if (ret) break; usb_fill_bulk_urb(urb, es58x_dev->udev, es58x_dev->rx_pipe, buf, rx_buf_len, es58x_read_bulk_callback, es58x_dev); usb_anchor_urb(urb, &es58x_dev->rx_urbs); ret = usb_submit_urb(urb, GFP_KERNEL); if (ret) { usb_unanchor_urb(urb); usb_free_coherent(es58x_dev->udev, rx_buf_len, buf, urb->transfer_dma); usb_free_urb(urb); break; } usb_free_urb(urb); } if (i == 0) { dev_err(dev, "%s: Could not setup any rx URBs\n", __func__); return ret; } dev_dbg(dev, "%s: Allocated %d rx URBs each of size %u\n", __func__, i, rx_buf_len); return ret; } /** * es58x_free_urbs() - Free all the TX and RX URBs. * @es58x_dev: ES58X device. */ static void es58x_free_urbs(struct es58x_device *es58x_dev) { struct urb *urb; if (!usb_wait_anchor_empty_timeout(&es58x_dev->tx_urbs_busy, 1000)) { dev_err(es58x_dev->dev, "%s: Timeout, some TX urbs still remain\n", __func__); usb_kill_anchored_urbs(&es58x_dev->tx_urbs_busy); } while ((urb = usb_get_from_anchor(&es58x_dev->tx_urbs_idle)) != NULL) { usb_free_coherent(urb->dev, es58x_dev->param->tx_urb_cmd_max_len, urb->transfer_buffer, urb->transfer_dma); usb_free_urb(urb); atomic_dec(&es58x_dev->tx_urbs_idle_cnt); } if (atomic_read(&es58x_dev->tx_urbs_idle_cnt)) dev_err(es58x_dev->dev, "All idle urbs were freed but tx_urb_idle_cnt is %d\n", atomic_read(&es58x_dev->tx_urbs_idle_cnt)); usb_kill_anchored_urbs(&es58x_dev->rx_urbs); } /** * es58x_open() - Enable the network device. * @netdev: CAN network device. * * Called when the network transitions to the up state. Allocate the * URB resources if needed and open the channel. * * Return: zero on success, errno when any error occurs. */ static int es58x_open(struct net_device *netdev) { struct es58x_device *es58x_dev = es58x_priv(netdev)->es58x_dev; int ret; if (!es58x_dev->opened_channel_cnt) { ret = es58x_alloc_rx_urbs(es58x_dev); if (ret) return ret; ret = es58x_set_realtime_diff_ns(es58x_dev); if (ret) goto free_urbs; } ret = open_candev(netdev); if (ret) goto free_urbs; ret = es58x_dev->ops->enable_channel(es58x_priv(netdev)); if (ret) goto free_urbs; es58x_dev->opened_channel_cnt++; netif_start_queue(netdev); return ret; free_urbs: if (!es58x_dev->opened_channel_cnt) es58x_free_urbs(es58x_dev); netdev_err(netdev, "%s: Could not open the network device: %pe\n", __func__, ERR_PTR(ret)); return ret; } /** * es58x_stop() - Disable the network device. * @netdev: CAN network device. * * Called when the network transitions to the down state. If all the * channels of the device are closed, free the URB resources which are * not needed anymore. * * Return: zero on success, errno when any error occurs. */ static int es58x_stop(struct net_device *netdev) { struct es58x_priv *priv = es58x_priv(netdev); struct es58x_device *es58x_dev = priv->es58x_dev; int ret; netif_stop_queue(netdev); ret = es58x_dev->ops->disable_channel(priv); if (ret) return ret; priv->can.state = CAN_STATE_STOPPED; es58x_can_reset_echo_fifo(netdev); close_candev(netdev); es58x_flush_pending_tx_msg(netdev); es58x_dev->opened_channel_cnt--; if (!es58x_dev->opened_channel_cnt) es58x_free_urbs(es58x_dev); return 0; } /** * es58x_xmit_commit() - Send the bulk urb. * @netdev: CAN network device. * * Do the bulk send. This function should be called only once by bulk * transmission. * * Return: zero on success, errno when any error occurs. */ static int es58x_xmit_commit(struct net_device *netdev) { struct es58x_priv *priv = es58x_priv(netdev); int ret; if (!es58x_is_can_state_active(netdev)) return -ENETDOWN; if (es58x_is_echo_skb_threshold_reached(priv)) netif_stop_queue(netdev); ret = es58x_submit_urb(priv->es58x_dev, priv->tx_urb, netdev); if (ret == 0) priv->tx_urb = NULL; return ret; } /** * es58x_xmit_more() - Can we put more packets? * @priv: ES58X private parameters related to the network device. * * Return: true if we can put more, false if it is time to send. */ static bool es58x_xmit_more(struct es58x_priv *priv) { unsigned int free_slots = priv->can.echo_skb_max - (priv->tx_head - priv->tx_tail); return netdev_xmit_more() && free_slots > 0 && priv->tx_can_msg_cnt < priv->es58x_dev->param->tx_bulk_max; } /** * es58x_start_xmit() - Transmit an skb. * @skb: socket buffer of a CAN message. * @netdev: CAN network device. * * Called when a packet needs to be transmitted. * * This function relies on Byte Queue Limits (BQL). The main benefit * is to increase the throughput by allowing bulk transfers * (c.f. xmit_more flag). * * Queues up to tx_bulk_max messages in &tx_urb buffer and does * a bulk send of all messages in one single URB. * * Return: NETDEV_TX_OK regardless of if we could transmit the @skb or * had to drop it. */ static netdev_tx_t es58x_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct es58x_priv *priv = es58x_priv(netdev); struct es58x_device *es58x_dev = priv->es58x_dev; unsigned int frame_len; int ret; if (can_dev_dropped_skb(netdev, skb)) { if (priv->tx_urb) goto xmit_commit; return NETDEV_TX_OK; } if (priv->tx_urb && priv->tx_can_msg_is_fd != can_is_canfd_skb(skb)) { /* Can not do bulk send with mixed CAN and CAN FD frames. */ ret = es58x_xmit_commit(netdev); if (ret) goto drop_skb; } if (!priv->tx_urb) { priv->tx_urb = es58x_get_tx_urb(es58x_dev); if (!priv->tx_urb) { ret = -ENOMEM; goto drop_skb; } priv->tx_can_msg_cnt = 0; priv->tx_can_msg_is_fd = can_is_canfd_skb(skb); } ret = es58x_dev->ops->tx_can_msg(priv, skb); if (ret) goto drop_skb; frame_len = can_skb_get_frame_len(skb); ret = can_put_echo_skb(skb, netdev, priv->tx_head & es58x_dev->param->fifo_mask, frame_len); if (ret) goto xmit_failure; netdev_sent_queue(netdev, frame_len); priv->tx_head++; priv->tx_can_msg_cnt++; xmit_commit: if (!es58x_xmit_more(priv)) { ret = es58x_xmit_commit(netdev); if (ret) goto xmit_failure; } return NETDEV_TX_OK; drop_skb: dev_kfree_skb(skb); netdev->stats.tx_dropped++; xmit_failure: netdev_warn(netdev, "%s: send message failure: %pe\n", __func__, ERR_PTR(ret)); netdev->stats.tx_errors++; es58x_flush_pending_tx_msg(netdev); return NETDEV_TX_OK; } static const struct net_device_ops es58x_netdev_ops = { .ndo_open = es58x_open, .ndo_stop = es58x_stop, .ndo_start_xmit = es58x_start_xmit, .ndo_eth_ioctl = can_eth_ioctl_hwts, }; static const struct ethtool_ops es58x_ethtool_ops = { .get_ts_info = can_ethtool_op_get_ts_info_hwts, }; /** * es58x_set_mode() - Change network device mode. * @netdev: CAN network device. * @mode: either %CAN_MODE_START, %CAN_MODE_STOP or %CAN_MODE_SLEEP * * Currently, this function is only used to stop and restart the * channel during a bus off event (c.f. es58x_rx_err_msg() and * drivers/net/can/dev.c:can_restart() which are the two only * callers). * * Return: zero on success, errno when any error occurs. */ static int es58x_set_mode(struct net_device *netdev, enum can_mode mode) { struct es58x_priv *priv = es58x_priv(netdev); switch (mode) { case CAN_MODE_START: switch (priv->can.state) { case CAN_STATE_BUS_OFF: return priv->es58x_dev->ops->enable_channel(priv); case CAN_STATE_STOPPED: return es58x_open(netdev); case CAN_STATE_ERROR_ACTIVE: case CAN_STATE_ERROR_WARNING: case CAN_STATE_ERROR_PASSIVE: default: return 0; } case CAN_MODE_STOP: switch (priv->can.state) { case CAN_STATE_STOPPED: return 0; case CAN_STATE_ERROR_ACTIVE: case CAN_STATE_ERROR_WARNING: case CAN_STATE_ERROR_PASSIVE: case CAN_STATE_BUS_OFF: default: return priv->es58x_dev->ops->disable_channel(priv); } case CAN_MODE_SLEEP: default: return -EOPNOTSUPP; } } /** * es58x_init_priv() - Initialize private parameters. * @es58x_dev: ES58X device. * @priv: ES58X private parameters related to the network device. * @channel_idx: Index of the network device. * * Return: zero on success, errno if devlink port could not be * properly registered. */ static int es58x_init_priv(struct es58x_device *es58x_dev, struct es58x_priv *priv, int channel_idx) { struct devlink_port_attrs attrs = { .flavour = DEVLINK_PORT_FLAVOUR_PHYSICAL, }; const struct es58x_parameters *param = es58x_dev->param; struct can_priv *can = &priv->can; priv->es58x_dev = es58x_dev; priv->channel_idx = channel_idx; priv->tx_urb = NULL; priv->tx_can_msg_cnt = 0; can->bittiming_const = param->bittiming_const; if (param->ctrlmode_supported & CAN_CTRLMODE_FD) { can->data_bittiming_const = param->data_bittiming_const; can->tdc_const = param->tdc_const; } can->bitrate_max = param->bitrate_max; can->clock = param->clock; can->state = CAN_STATE_STOPPED; can->ctrlmode_supported = param->ctrlmode_supported; can->do_set_mode = es58x_set_mode; devlink_port_attrs_set(&priv->devlink_port, &attrs); return devlink_port_register(priv_to_devlink(es58x_dev), &priv->devlink_port, channel_idx); } /** * es58x_init_netdev() - Initialize the network device. * @es58x_dev: ES58X device. * @channel_idx: Index of the network device. * * Return: zero on success, errno when any error occurs. */ static int es58x_init_netdev(struct es58x_device *es58x_dev, int channel_idx) { struct net_device *netdev; struct device *dev = es58x_dev->dev; int ret; netdev = alloc_candev(sizeof(struct es58x_priv), es58x_dev->param->fifo_mask + 1); if (!netdev) { dev_err(dev, "Could not allocate candev\n"); return -ENOMEM; } SET_NETDEV_DEV(netdev, dev); es58x_dev->netdev[channel_idx] = netdev; ret = es58x_init_priv(es58x_dev, es58x_priv(netdev), channel_idx); if (ret) goto free_candev; SET_NETDEV_DEVLINK_PORT(netdev, &es58x_priv(netdev)->devlink_port); netdev->netdev_ops = &es58x_netdev_ops; netdev->ethtool_ops = &es58x_ethtool_ops; netdev->flags |= IFF_ECHO; /* We support local echo */ netdev->dev_port = channel_idx; ret = register_candev(netdev); if (ret) goto devlink_port_unregister; netdev_queue_set_dql_min_limit(netdev_get_tx_queue(netdev, 0), es58x_dev->param->dql_min_limit); return ret; devlink_port_unregister: devlink_port_unregister(&es58x_priv(netdev)->devlink_port); free_candev: es58x_dev->netdev[channel_idx] = NULL; free_candev(netdev); return ret; } /** * es58x_free_netdevs() - Release all network resources of the device. * @es58x_dev: ES58X device. */ static void es58x_free_netdevs(struct es58x_device *es58x_dev) { int i; for (i = 0; i < es58x_dev->num_can_ch; i++) { struct net_device *netdev = es58x_dev->netdev[i]; if (!netdev) continue; unregister_candev(netdev); devlink_port_unregister(&es58x_priv(netdev)->devlink_port); es58x_dev->netdev[i] = NULL; free_candev(netdev); } } /** * es58x_init_es58x_dev() - Initialize the ES58X device. * @intf: USB interface. * @driver_info: Quirks of the device. * * Return: pointer to an ES58X device on success, error pointer when * any error occurs. */ static struct es58x_device *es58x_init_es58x_dev(struct usb_interface *intf, kernel_ulong_t driver_info) { struct device *dev = &intf->dev; struct es58x_device *es58x_dev; struct devlink *devlink; const struct es58x_parameters *param; const struct es58x_operators *ops; struct usb_device *udev = interface_to_usbdev(intf); struct usb_endpoint_descriptor *ep_in, *ep_out; int ret; dev_info(dev, "Starting %s %s (Serial Number %s)\n", udev->manufacturer, udev->product, udev->serial); ret = usb_find_common_endpoints(intf->cur_altsetting, &ep_in, &ep_out, NULL, NULL); if (ret) return ERR_PTR(ret); if (driver_info & ES58X_FD_FAMILY) { param = &es58x_fd_param; ops = &es58x_fd_ops; } else { param = &es581_4_param; ops = &es581_4_ops; } devlink = devlink_alloc(&es58x_dl_ops, es58x_sizeof_es58x_device(param), dev); if (!devlink) return ERR_PTR(-ENOMEM); es58x_dev = devlink_priv(devlink); es58x_dev->param = param; es58x_dev->ops = ops; es58x_dev->dev = dev; es58x_dev->udev = udev; if (driver_info & ES58X_DUAL_CHANNEL) es58x_dev->num_can_ch = 2; else es58x_dev->num_can_ch = 1; init_usb_anchor(&es58x_dev->rx_urbs); init_usb_anchor(&es58x_dev->tx_urbs_idle); init_usb_anchor(&es58x_dev->tx_urbs_busy); atomic_set(&es58x_dev->tx_urbs_idle_cnt, 0); usb_set_intfdata(intf, es58x_dev); es58x_dev->rx_pipe = usb_rcvbulkpipe(es58x_dev->udev, ep_in->bEndpointAddress); es58x_dev->tx_pipe = usb_sndbulkpipe(es58x_dev->udev, ep_out->bEndpointAddress); return es58x_dev; } /** * es58x_probe() - Initialize the USB device. * @intf: USB interface. * @id: USB device ID. * * Return: zero on success, -ENODEV if the interface is not supported * or errno when any other error occurs. */ static int es58x_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct es58x_device *es58x_dev; int ch_idx; es58x_dev = es58x_init_es58x_dev(intf, id->driver_info); if (IS_ERR(es58x_dev)) return PTR_ERR(es58x_dev); es58x_parse_product_info(es58x_dev); devlink_register(priv_to_devlink(es58x_dev)); for (ch_idx = 0; ch_idx < es58x_dev->num_can_ch; ch_idx++) { int ret = es58x_init_netdev(es58x_dev, ch_idx); if (ret) { es58x_free_netdevs(es58x_dev); return ret; } } return 0; } /** * es58x_disconnect() - Disconnect the USB device. * @intf: USB interface * * Called by the usb core when driver is unloaded or device is * removed. */ static void es58x_disconnect(struct usb_interface *intf) { struct es58x_device *es58x_dev = usb_get_intfdata(intf); dev_info(&intf->dev, "Disconnecting %s %s\n", es58x_dev->udev->manufacturer, es58x_dev->udev->product); devlink_unregister(priv_to_devlink(es58x_dev)); es58x_free_netdevs(es58x_dev); es58x_free_urbs(es58x_dev); devlink_free(priv_to_devlink(es58x_dev)); usb_set_intfdata(intf, NULL); } static struct usb_driver es58x_driver = { .name = KBUILD_MODNAME, .probe = es58x_probe, .disconnect = es58x_disconnect, .id_table = es58x_id_table }; module_usb_driver(es58x_driver);
linux-master
drivers/net/can/usb/etas_es58x/es58x_core.c
// SPDX-License-Identifier: GPL-2.0 /* Driver for ETAS GmbH ES58X USB CAN(-FD) Bus Interfaces. * * File es58x_fd.c: Adds support to ETAS ES582.1 and ES584.1 (naming * convention: we use the term "ES58X FD" when referring to those two * variants together). * * Copyright (c) 2019 Robert Bosch Engineering and Business Solutions. All rights reserved. * Copyright (c) 2020 ETAS K.K.. All rights reserved. * Copyright (c) 2020-2022 Vincent Mailhol <[email protected]> */ #include <asm/unaligned.h> #include <linux/kernel.h> #include <linux/units.h> #include "es58x_core.h" #include "es58x_fd.h" /** * es58x_fd_sizeof_rx_tx_msg() - Calculate the actual length of the * structure of a rx or tx message. * @msg: message of variable length, must have a dlc and a len fields. * * Even if RTR frames have actually no payload, the ES58X devices * still expect it. Must be a macro in order to accept several types * (struct es58x_fd_tx_can_msg and struct es58x_fd_rx_can_msg) as an * input. * * Return: length of the message. */ #define es58x_fd_sizeof_rx_tx_msg(msg) \ ({ \ typeof(msg) __msg = (msg); \ size_t __msg_len; \ \ if (__msg.flags & ES58X_FLAG_FD_DATA) \ __msg_len = canfd_sanitize_len(__msg.len); \ else \ __msg_len = can_cc_dlc2len(__msg.dlc); \ \ offsetof(typeof(__msg), data[__msg_len]); \ }) static enum es58x_fd_cmd_type es58x_fd_cmd_type(struct net_device *netdev) { u32 ctrlmode = es58x_priv(netdev)->can.ctrlmode; if (ctrlmode & (CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO)) return ES58X_FD_CMD_TYPE_CANFD; else return ES58X_FD_CMD_TYPE_CAN; } static u16 es58x_fd_get_msg_len(const union es58x_urb_cmd *urb_cmd) { return get_unaligned_le16(&urb_cmd->es58x_fd_urb_cmd.msg_len); } static int es58x_fd_echo_msg(struct net_device *netdev, const struct es58x_fd_urb_cmd *es58x_fd_urb_cmd) { struct es58x_priv *priv = es58x_priv(netdev); const struct es58x_fd_echo_msg *echo_msg; struct es58x_device *es58x_dev = priv->es58x_dev; u64 *tstamps = es58x_dev->timestamps; u16 msg_len = get_unaligned_le16(&es58x_fd_urb_cmd->msg_len); int i, num_element; u32 rcv_packet_idx; const u32 mask = GENMASK(BITS_PER_TYPE(mask) - 1, BITS_PER_TYPE(echo_msg->packet_idx)); num_element = es58x_msg_num_element(es58x_dev->dev, es58x_fd_urb_cmd->echo_msg, msg_len); if (num_element < 0) return num_element; echo_msg = es58x_fd_urb_cmd->echo_msg; rcv_packet_idx = (priv->tx_tail & mask) | echo_msg[0].packet_idx; for (i = 0; i < num_element; i++) { if ((u8)rcv_packet_idx != echo_msg[i].packet_idx) { netdev_err(netdev, "Packet idx jumped from %u to %u\n", (u8)rcv_packet_idx - 1, echo_msg[i].packet_idx); return -EBADMSG; } tstamps[i] = get_unaligned_le64(&echo_msg[i].timestamp); rcv_packet_idx++; } return es58x_can_get_echo_skb(netdev, priv->tx_tail, tstamps, num_element); } static int es58x_fd_rx_can_msg(struct net_device *netdev, const struct es58x_fd_urb_cmd *es58x_fd_urb_cmd) { struct es58x_device *es58x_dev = es58x_priv(netdev)->es58x_dev; const u8 *rx_can_msg_buf = es58x_fd_urb_cmd->rx_can_msg_buf; u16 rx_can_msg_buf_len = get_unaligned_le16(&es58x_fd_urb_cmd->msg_len); int pkts, ret; ret = es58x_check_msg_max_len(es58x_dev->dev, es58x_fd_urb_cmd->rx_can_msg_buf, rx_can_msg_buf_len); if (ret) return ret; for (pkts = 0; rx_can_msg_buf_len > 0; pkts++) { const struct es58x_fd_rx_can_msg *rx_can_msg = (const struct es58x_fd_rx_can_msg *)rx_can_msg_buf; bool is_can_fd = !!(rx_can_msg->flags & ES58X_FLAG_FD_DATA); /* rx_can_msg_len is the length of the rx_can_msg * buffer. Not to be confused with rx_can_msg->len * which is the length of the CAN payload * rx_can_msg->data. */ u16 rx_can_msg_len = es58x_fd_sizeof_rx_tx_msg(*rx_can_msg); if (rx_can_msg_len > rx_can_msg_buf_len) { netdev_err(netdev, "%s: Expected a rx_can_msg of size %d but only %d bytes are left in rx_can_msg_buf\n", __func__, rx_can_msg_len, rx_can_msg_buf_len); return -EMSGSIZE; } if (rx_can_msg->len > CANFD_MAX_DLEN) { netdev_err(netdev, "%s: Data length is %d but maximum should be %d\n", __func__, rx_can_msg->len, CANFD_MAX_DLEN); return -EMSGSIZE; } if (netif_running(netdev)) { u64 tstamp = get_unaligned_le64(&rx_can_msg->timestamp); canid_t can_id = get_unaligned_le32(&rx_can_msg->can_id); u8 dlc; if (is_can_fd) dlc = can_fd_len2dlc(rx_can_msg->len); else dlc = rx_can_msg->dlc; ret = es58x_rx_can_msg(netdev, tstamp, rx_can_msg->data, can_id, rx_can_msg->flags, dlc); if (ret) break; } rx_can_msg_buf_len -= rx_can_msg_len; rx_can_msg_buf += rx_can_msg_len; } if (!netif_running(netdev)) { if (net_ratelimit()) netdev_info(netdev, "%s: %s is down, dropping %d rx packets\n", __func__, netdev->name, pkts); netdev->stats.rx_dropped += pkts; } return ret; } static int es58x_fd_rx_event_msg(struct net_device *netdev, const struct es58x_fd_urb_cmd *es58x_fd_urb_cmd) { struct es58x_device *es58x_dev = es58x_priv(netdev)->es58x_dev; u16 msg_len = get_unaligned_le16(&es58x_fd_urb_cmd->msg_len); const struct es58x_fd_rx_event_msg *rx_event_msg; int ret; rx_event_msg = &es58x_fd_urb_cmd->rx_event_msg; ret = es58x_check_msg_len(es58x_dev->dev, *rx_event_msg, msg_len); if (ret) return ret; return es58x_rx_err_msg(netdev, rx_event_msg->error_code, rx_event_msg->event_code, get_unaligned_le64(&rx_event_msg->timestamp)); } static int es58x_fd_rx_cmd_ret_u32(struct net_device *netdev, const struct es58x_fd_urb_cmd *es58x_fd_urb_cmd, enum es58x_ret_type cmd_ret_type) { struct es58x_device *es58x_dev = es58x_priv(netdev)->es58x_dev; u16 msg_len = get_unaligned_le16(&es58x_fd_urb_cmd->msg_len); int ret; ret = es58x_check_msg_len(es58x_dev->dev, es58x_fd_urb_cmd->rx_cmd_ret_le32, msg_len); if (ret) return ret; return es58x_rx_cmd_ret_u32(netdev, cmd_ret_type, get_unaligned_le32(&es58x_fd_urb_cmd->rx_cmd_ret_le32)); } static int es58x_fd_tx_ack_msg(struct net_device *netdev, const struct es58x_fd_urb_cmd *es58x_fd_urb_cmd) { struct es58x_device *es58x_dev = es58x_priv(netdev)->es58x_dev; const struct es58x_fd_tx_ack_msg *tx_ack_msg; u16 msg_len = get_unaligned_le16(&es58x_fd_urb_cmd->msg_len); int ret; tx_ack_msg = &es58x_fd_urb_cmd->tx_ack_msg; ret = es58x_check_msg_len(es58x_dev->dev, *tx_ack_msg, msg_len); if (ret) return ret; return es58x_tx_ack_msg(netdev, get_unaligned_le16(&tx_ack_msg->tx_free_entries), get_unaligned_le32(&tx_ack_msg->rx_cmd_ret_le32)); } static int es58x_fd_can_cmd_id(struct es58x_device *es58x_dev, const struct es58x_fd_urb_cmd *es58x_fd_urb_cmd) { struct net_device *netdev; int ret; ret = es58x_get_netdev(es58x_dev, es58x_fd_urb_cmd->channel_idx, ES58X_FD_CHANNEL_IDX_OFFSET, &netdev); if (ret) return ret; switch ((enum es58x_fd_can_cmd_id)es58x_fd_urb_cmd->cmd_id) { case ES58X_FD_CAN_CMD_ID_ENABLE_CHANNEL: return es58x_fd_rx_cmd_ret_u32(netdev, es58x_fd_urb_cmd, ES58X_RET_TYPE_ENABLE_CHANNEL); case ES58X_FD_CAN_CMD_ID_DISABLE_CHANNEL: return es58x_fd_rx_cmd_ret_u32(netdev, es58x_fd_urb_cmd, ES58X_RET_TYPE_DISABLE_CHANNEL); case ES58X_FD_CAN_CMD_ID_TX_MSG: return es58x_fd_tx_ack_msg(netdev, es58x_fd_urb_cmd); case ES58X_FD_CAN_CMD_ID_ECHO_MSG: return es58x_fd_echo_msg(netdev, es58x_fd_urb_cmd); case ES58X_FD_CAN_CMD_ID_RX_MSG: return es58x_fd_rx_can_msg(netdev, es58x_fd_urb_cmd); case ES58X_FD_CAN_CMD_ID_RESET_RX: return es58x_fd_rx_cmd_ret_u32(netdev, es58x_fd_urb_cmd, ES58X_RET_TYPE_RESET_RX); case ES58X_FD_CAN_CMD_ID_RESET_TX: return es58x_fd_rx_cmd_ret_u32(netdev, es58x_fd_urb_cmd, ES58X_RET_TYPE_RESET_TX); case ES58X_FD_CAN_CMD_ID_ERROR_OR_EVENT_MSG: return es58x_fd_rx_event_msg(netdev, es58x_fd_urb_cmd); default: return -EBADRQC; } } static int es58x_fd_device_cmd_id(struct es58x_device *es58x_dev, const struct es58x_fd_urb_cmd *es58x_fd_urb_cmd) { u16 msg_len = get_unaligned_le16(&es58x_fd_urb_cmd->msg_len); int ret; switch ((enum es58x_fd_dev_cmd_id)es58x_fd_urb_cmd->cmd_id) { case ES58X_FD_DEV_CMD_ID_TIMESTAMP: ret = es58x_check_msg_len(es58x_dev->dev, es58x_fd_urb_cmd->timestamp, msg_len); if (ret) return ret; es58x_rx_timestamp(es58x_dev, get_unaligned_le64(&es58x_fd_urb_cmd->timestamp)); return 0; default: return -EBADRQC; } } static int es58x_fd_handle_urb_cmd(struct es58x_device *es58x_dev, const union es58x_urb_cmd *urb_cmd) { const struct es58x_fd_urb_cmd *es58x_fd_urb_cmd; int ret; es58x_fd_urb_cmd = &urb_cmd->es58x_fd_urb_cmd; switch ((enum es58x_fd_cmd_type)es58x_fd_urb_cmd->cmd_type) { case ES58X_FD_CMD_TYPE_CAN: case ES58X_FD_CMD_TYPE_CANFD: ret = es58x_fd_can_cmd_id(es58x_dev, es58x_fd_urb_cmd); break; case ES58X_FD_CMD_TYPE_DEVICE: ret = es58x_fd_device_cmd_id(es58x_dev, es58x_fd_urb_cmd); break; default: ret = -EBADRQC; break; } if (ret == -EBADRQC) dev_err(es58x_dev->dev, "%s: Unknown command type (0x%02X) and command ID (0x%02X) combination\n", __func__, es58x_fd_urb_cmd->cmd_type, es58x_fd_urb_cmd->cmd_id); return ret; } static void es58x_fd_fill_urb_header(union es58x_urb_cmd *urb_cmd, u8 cmd_type, u8 cmd_id, u8 channel_idx, u16 msg_len) { struct es58x_fd_urb_cmd *es58x_fd_urb_cmd = &urb_cmd->es58x_fd_urb_cmd; es58x_fd_urb_cmd->SOF = cpu_to_le16(es58x_fd_param.tx_start_of_frame); es58x_fd_urb_cmd->cmd_type = cmd_type; es58x_fd_urb_cmd->cmd_id = cmd_id; es58x_fd_urb_cmd->channel_idx = channel_idx; es58x_fd_urb_cmd->msg_len = cpu_to_le16(msg_len); } static int es58x_fd_tx_can_msg(struct es58x_priv *priv, const struct sk_buff *skb) { struct es58x_device *es58x_dev = priv->es58x_dev; union es58x_urb_cmd *urb_cmd = priv->tx_urb->transfer_buffer; struct es58x_fd_urb_cmd *es58x_fd_urb_cmd = &urb_cmd->es58x_fd_urb_cmd; struct can_frame *cf = (struct can_frame *)skb->data; struct es58x_fd_tx_can_msg *tx_can_msg; bool is_fd = can_is_canfd_skb(skb); u16 msg_len; int ret; if (priv->tx_can_msg_cnt == 0) { msg_len = 0; es58x_fd_fill_urb_header(urb_cmd, is_fd ? ES58X_FD_CMD_TYPE_CANFD : ES58X_FD_CMD_TYPE_CAN, ES58X_FD_CAN_CMD_ID_TX_MSG_NO_ACK, priv->channel_idx, msg_len); } else { msg_len = es58x_fd_get_msg_len(urb_cmd); } ret = es58x_check_msg_max_len(es58x_dev->dev, es58x_fd_urb_cmd->tx_can_msg_buf, msg_len + sizeof(*tx_can_msg)); if (ret) return ret; /* Fill message contents. */ tx_can_msg = (typeof(tx_can_msg))&es58x_fd_urb_cmd->raw_msg[msg_len]; tx_can_msg->packet_idx = (u8)priv->tx_head; put_unaligned_le32(es58x_get_raw_can_id(cf), &tx_can_msg->can_id); tx_can_msg->flags = (u8)es58x_get_flags(skb); if (is_fd) tx_can_msg->len = cf->len; else tx_can_msg->dlc = can_get_cc_dlc(cf, priv->can.ctrlmode); memcpy(tx_can_msg->data, cf->data, cf->len); /* Calculate new sizes */ msg_len += es58x_fd_sizeof_rx_tx_msg(*tx_can_msg); priv->tx_urb->transfer_buffer_length = es58x_get_urb_cmd_len(es58x_dev, msg_len); put_unaligned_le16(msg_len, &es58x_fd_urb_cmd->msg_len); return 0; } static void es58x_fd_convert_bittiming(struct es58x_fd_bittiming *es58x_fd_bt, struct can_bittiming *bt) { /* The actual value set in the hardware registers is one less * than the functional value. */ const int offset = 1; es58x_fd_bt->bitrate = cpu_to_le32(bt->bitrate); es58x_fd_bt->tseg1 = cpu_to_le16(bt->prop_seg + bt->phase_seg1 - offset); es58x_fd_bt->tseg2 = cpu_to_le16(bt->phase_seg2 - offset); es58x_fd_bt->brp = cpu_to_le16(bt->brp - offset); es58x_fd_bt->sjw = cpu_to_le16(bt->sjw - offset); } static int es58x_fd_enable_channel(struct es58x_priv *priv) { struct es58x_device *es58x_dev = priv->es58x_dev; struct net_device *netdev = es58x_dev->netdev[priv->channel_idx]; struct es58x_fd_tx_conf_msg tx_conf_msg = { 0 }; u32 ctrlmode; size_t conf_len = 0; es58x_fd_convert_bittiming(&tx_conf_msg.nominal_bittiming, &priv->can.bittiming); ctrlmode = priv->can.ctrlmode; if (ctrlmode & CAN_CTRLMODE_3_SAMPLES) tx_conf_msg.samples_per_bit = ES58X_SAMPLES_PER_BIT_THREE; else tx_conf_msg.samples_per_bit = ES58X_SAMPLES_PER_BIT_ONE; tx_conf_msg.sync_edge = ES58X_SYNC_EDGE_SINGLE; tx_conf_msg.physical_layer = ES58X_PHYSICAL_LAYER_HIGH_SPEED; tx_conf_msg.echo_mode = ES58X_ECHO_ON; if (ctrlmode & CAN_CTRLMODE_LISTENONLY) tx_conf_msg.ctrlmode |= ES58X_FD_CTRLMODE_PASSIVE; else tx_conf_msg.ctrlmode |= ES58X_FD_CTRLMODE_ACTIVE; if (ctrlmode & CAN_CTRLMODE_FD_NON_ISO) { tx_conf_msg.ctrlmode |= ES58X_FD_CTRLMODE_FD_NON_ISO; tx_conf_msg.canfd_enabled = 1; } else if (ctrlmode & CAN_CTRLMODE_FD) { tx_conf_msg.ctrlmode |= ES58X_FD_CTRLMODE_FD; tx_conf_msg.canfd_enabled = 1; } if (tx_conf_msg.canfd_enabled) { es58x_fd_convert_bittiming(&tx_conf_msg.data_bittiming, &priv->can.data_bittiming); if (can_tdc_is_enabled(&priv->can)) { tx_conf_msg.tdc_enabled = 1; tx_conf_msg.tdco = cpu_to_le16(priv->can.tdc.tdco); tx_conf_msg.tdcf = cpu_to_le16(priv->can.tdc.tdcf); } conf_len = ES58X_FD_CANFD_CONF_LEN; } else { conf_len = ES58X_FD_CAN_CONF_LEN; } return es58x_send_msg(es58x_dev, es58x_fd_cmd_type(netdev), ES58X_FD_CAN_CMD_ID_ENABLE_CHANNEL, &tx_conf_msg, conf_len, priv->channel_idx); } static int es58x_fd_disable_channel(struct es58x_priv *priv) { /* The type (ES58X_FD_CMD_TYPE_CAN or ES58X_FD_CMD_TYPE_CANFD) does * not matter here. */ return es58x_send_msg(priv->es58x_dev, ES58X_FD_CMD_TYPE_CAN, ES58X_FD_CAN_CMD_ID_DISABLE_CHANNEL, ES58X_EMPTY_MSG, 0, priv->channel_idx); } static int es58x_fd_get_timestamp(struct es58x_device *es58x_dev) { return es58x_send_msg(es58x_dev, ES58X_FD_CMD_TYPE_DEVICE, ES58X_FD_DEV_CMD_ID_TIMESTAMP, ES58X_EMPTY_MSG, 0, ES58X_CHANNEL_IDX_NA); } /* Nominal bittiming constants for ES582.1 and ES584.1 as specified in * the microcontroller datasheet: "SAM E70/S70/V70/V71 Family" section * 49.6.8 "MCAN Nominal Bit Timing and Prescaler Register" from * Microchip. * * The values from the specification are the hardware register * values. To convert them to the functional values, all ranges were * incremented by 1 (e.g. range [0..n-1] changed to [1..n]). */ static const struct can_bittiming_const es58x_fd_nom_bittiming_const = { .name = "ES582.1/ES584.1", .tseg1_min = 2, .tseg1_max = 256, .tseg2_min = 2, .tseg2_max = 128, .sjw_max = 128, .brp_min = 1, .brp_max = 512, .brp_inc = 1 }; /* Data bittiming constants for ES582.1 and ES584.1 as specified in * the microcontroller datasheet: "SAM E70/S70/V70/V71 Family" section * 49.6.4 "MCAN Data Bit Timing and Prescaler Register" from * Microchip. */ static const struct can_bittiming_const es58x_fd_data_bittiming_const = { .name = "ES582.1/ES584.1", .tseg1_min = 2, .tseg1_max = 32, .tseg2_min = 1, .tseg2_max = 16, .sjw_max = 8, .brp_min = 1, .brp_max = 32, .brp_inc = 1 }; /* Transmission Delay Compensation constants for ES582.1 and ES584.1 * as specified in the microcontroller datasheet: "SAM E70/S70/V70/V71 * Family" section 49.6.15 "MCAN Transmitter Delay Compensation * Register" from Microchip. */ static const struct can_tdc_const es58x_tdc_const = { .tdcv_min = 0, .tdcv_max = 0, /* Manual mode not supported. */ .tdco_min = 0, .tdco_max = 127, .tdcf_min = 0, .tdcf_max = 127 }; const struct es58x_parameters es58x_fd_param = { .bittiming_const = &es58x_fd_nom_bittiming_const, .data_bittiming_const = &es58x_fd_data_bittiming_const, .tdc_const = &es58x_tdc_const, /* The devices use NXP TJA1044G transievers which guarantee * the timing for data rates up to 5 Mbps. Bitrates up to 8 * Mbps work in an optimal environment but are not recommended * for production environment. */ .bitrate_max = 8 * MEGA /* BPS */, .clock = {.freq = 80 * MEGA /* Hz */}, .ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO | CAN_CTRLMODE_CC_LEN8_DLC | CAN_CTRLMODE_TDC_AUTO, .tx_start_of_frame = 0xCEFA, /* FACE in little endian */ .rx_start_of_frame = 0xFECA, /* CAFE in little endian */ .tx_urb_cmd_max_len = ES58X_FD_TX_URB_CMD_MAX_LEN, .rx_urb_cmd_max_len = ES58X_FD_RX_URB_CMD_MAX_LEN, /* Size of internal device TX queue is 500. * * However, when reaching value around 278, the device's busy * LED turns on and thus maximum value of 500 is never reached * in practice. Also, when this value is too high, some error * on the echo_msg were witnessed when the device is * recovering from bus off. * * For above reasons, a value that would prevent the device * from becoming busy was chosen. In practice, BQL would * prevent the value from even getting closer to below * maximum, so no impact on performance was measured. */ .fifo_mask = 255, /* echo_skb_max = 256 */ .dql_min_limit = CAN_FRAME_LEN_MAX * 15, /* Empirical value. */ .tx_bulk_max = ES58X_FD_TX_BULK_MAX, .urb_cmd_header_len = ES58X_FD_URB_CMD_HEADER_LEN, .rx_urb_max = ES58X_RX_URBS_MAX, .tx_urb_max = ES58X_TX_URBS_MAX }; const struct es58x_operators es58x_fd_ops = { .get_msg_len = es58x_fd_get_msg_len, .handle_urb_cmd = es58x_fd_handle_urb_cmd, .fill_urb_header = es58x_fd_fill_urb_header, .tx_can_msg = es58x_fd_tx_can_msg, .enable_channel = es58x_fd_enable_channel, .disable_channel = es58x_fd_disable_channel, .reset_device = NULL, /* Not implemented in the device firmware. */ .get_timestamp = es58x_fd_get_timestamp };
linux-master
drivers/net/can/usb/etas_es58x/es58x_fd.c
// SPDX-License-Identifier: GPL-2.0 /* Driver for ETAS GmbH ES58X USB CAN(-FD) Bus Interfaces. * * File es58x_devlink.c: report the product information using devlink. * * Copyright (c) 2022 Vincent Mailhol <[email protected]> */ #include <linux/ctype.h> #include <linux/device.h> #include <linux/usb.h> #include <net/devlink.h> #include "es58x_core.h" /* USB descriptor index containing the product information string. */ #define ES58X_PROD_INFO_IDX 6 /** * es58x_parse_sw_version() - Extract boot loader or firmware version. * @es58x_dev: ES58X device. * @prod_info: USB custom string returned by the device. * @prefix: Select which information should be parsed. Set it to "FW" * to parse the firmware version or to "BL" to parse the * bootloader version. * * The @prod_info string contains the firmware and the bootloader * version number all prefixed by a magic string and concatenated with * other numbers. Depending on the device, the firmware (bootloader) * format is either "FW_Vxx.xx.xx" ("BL_Vxx.xx.xx") or "FW:xx.xx.xx" * ("BL:xx.xx.xx") where 'x' represents a digit. @prod_info must * contains the common part of those prefixes: "FW" or "BL". * * Parse @prod_info and store the version number in * &es58x_dev.firmware_version or &es58x_dev.bootloader_version * according to @prefix value. * * Return: zero on success, -EINVAL if @prefix contains an invalid * value and -EBADMSG if @prod_info could not be parsed. */ static int es58x_parse_sw_version(struct es58x_device *es58x_dev, const char *prod_info, const char *prefix) { struct es58x_sw_version *version; int major, minor, revision; if (!strcmp(prefix, "FW")) version = &es58x_dev->firmware_version; else if (!strcmp(prefix, "BL")) version = &es58x_dev->bootloader_version; else return -EINVAL; /* Go to prefix */ prod_info = strstr(prod_info, prefix); if (!prod_info) return -EBADMSG; /* Go to beginning of the version number */ while (!isdigit(*prod_info)) { prod_info++; if (!*prod_info) return -EBADMSG; } if (sscanf(prod_info, "%2u.%2u.%2u", &major, &minor, &revision) != 3) return -EBADMSG; version->major = major; version->minor = minor; version->revision = revision; return 0; } /** * es58x_parse_hw_rev() - Extract hardware revision number. * @es58x_dev: ES58X device. * @prod_info: USB custom string returned by the device. * * @prod_info contains the hardware revision prefixed by a magic * string and conquenated together with other numbers. Depending on * the device, the hardware revision format is either * "HW_VER:axxx/xxx" or "HR:axxx/xxx" where 'a' represents a letter * and 'x' a digit. * * Parse @prod_info and store the hardware revision number in * &es58x_dev.hardware_revision. * * Return: zero on success, -EBADMSG if @prod_info could not be * parsed. */ static int es58x_parse_hw_rev(struct es58x_device *es58x_dev, const char *prod_info) { char letter; int major, minor; /* The only occurrence of 'H' is in the hardware revision prefix. */ prod_info = strchr(prod_info, 'H'); if (!prod_info) return -EBADMSG; /* Go to beginning of the hardware revision */ prod_info = strchr(prod_info, ':'); if (!prod_info) return -EBADMSG; prod_info++; if (sscanf(prod_info, "%c%3u/%3u", &letter, &major, &minor) != 3) return -EBADMSG; es58x_dev->hardware_revision.letter = letter; es58x_dev->hardware_revision.major = major; es58x_dev->hardware_revision.minor = minor; return 0; } /** * es58x_parse_product_info() - Parse the ES58x product information * string. * @es58x_dev: ES58X device. * * Retrieve the product information string and parse it to extract the * firmware version, the bootloader version and the hardware * revision. * * If the function fails, simply emit a log message and continue * because product information is not critical for the driver to * operate. */ void es58x_parse_product_info(struct es58x_device *es58x_dev) { char *prod_info; prod_info = usb_cache_string(es58x_dev->udev, ES58X_PROD_INFO_IDX); if (!prod_info) { dev_warn(es58x_dev->dev, "could not retrieve the product info string\n"); return; } if (es58x_parse_sw_version(es58x_dev, prod_info, "FW") || es58x_parse_sw_version(es58x_dev, prod_info, "BL") || es58x_parse_hw_rev(es58x_dev, prod_info)) dev_info(es58x_dev->dev, "could not parse product info: '%s'\n", prod_info); kfree(prod_info); } /** * es58x_sw_version_is_set() - Check if the version is a valid number. * @sw_ver: Version number of either the firmware or the bootloader. * * If &es58x_sw_version.major, &es58x_sw_version.minor and * &es58x_sw_version.revision are all zero, the product string could * not be parsed and the version number is invalid. */ static inline bool es58x_sw_version_is_set(struct es58x_sw_version *sw_ver) { return sw_ver->major || sw_ver->minor || sw_ver->revision; } /** * es58x_hw_revision_is_set() - Check if the revision is a valid number. * @hw_rev: Revision number of the hardware. * * If &es58x_hw_revision.letter is the null character, the product * string could not be parsed and the hardware revision number is * invalid. */ static inline bool es58x_hw_revision_is_set(struct es58x_hw_revision *hw_rev) { return hw_rev->letter != '\0'; } /** * es58x_devlink_info_get() - Report the product information. * @devlink: Devlink. * @req: skb wrapper where to put requested information. * @extack: Unused. * * Report the firmware version, the bootloader version, the hardware * revision and the serial number through netlink. * * Return: zero on success, errno when any error occurs. */ static int es58x_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req, struct netlink_ext_ack *extack) { struct es58x_device *es58x_dev = devlink_priv(devlink); struct es58x_sw_version *fw_ver = &es58x_dev->firmware_version; struct es58x_sw_version *bl_ver = &es58x_dev->bootloader_version; struct es58x_hw_revision *hw_rev = &es58x_dev->hardware_revision; char buf[max(sizeof("xx.xx.xx"), sizeof("axxx/xxx"))]; int ret = 0; if (es58x_sw_version_is_set(fw_ver)) { snprintf(buf, sizeof(buf), "%02u.%02u.%02u", fw_ver->major, fw_ver->minor, fw_ver->revision); ret = devlink_info_version_running_put(req, DEVLINK_INFO_VERSION_GENERIC_FW, buf); if (ret) return ret; } if (es58x_sw_version_is_set(bl_ver)) { snprintf(buf, sizeof(buf), "%02u.%02u.%02u", bl_ver->major, bl_ver->minor, bl_ver->revision); ret = devlink_info_version_running_put(req, DEVLINK_INFO_VERSION_GENERIC_FW_BOOTLOADER, buf); if (ret) return ret; } if (es58x_hw_revision_is_set(hw_rev)) { snprintf(buf, sizeof(buf), "%c%03u/%03u", hw_rev->letter, hw_rev->major, hw_rev->minor); ret = devlink_info_version_fixed_put(req, DEVLINK_INFO_VERSION_GENERIC_BOARD_REV, buf); if (ret) return ret; } return devlink_info_serial_number_put(req, es58x_dev->udev->serial); } const struct devlink_ops es58x_dl_ops = { .info_get = es58x_devlink_info_get, };
linux-master
drivers/net/can/usb/etas_es58x/es58x_devlink.c
// SPDX-License-Identifier: GPL-2.0 /* Driver for ETAS GmbH ES58X USB CAN(-FD) Bus Interfaces. * * File es581_4.c: Adds support to ETAS ES581.4. * * Copyright (c) 2019 Robert Bosch Engineering and Business Solutions. All rights reserved. * Copyright (c) 2020 ETAS K.K.. All rights reserved. * Copyright (c) 2020-2022 Vincent Mailhol <[email protected]> */ #include <asm/unaligned.h> #include <linux/kernel.h> #include <linux/units.h> #include "es58x_core.h" #include "es581_4.h" /** * es581_4_sizeof_rx_tx_msg() - Calculate the actual length of the * structure of a rx or tx message. * @msg: message of variable length, must have a dlc field. * * Even if RTR frames have actually no payload, the ES58X devices * still expect it. Must be a macro in order to accept several types * (struct es581_4_tx_can_msg and struct es581_4_rx_can_msg) as an * input. * * Return: length of the message. */ #define es581_4_sizeof_rx_tx_msg(msg) \ offsetof(typeof(msg), data[can_cc_dlc2len((msg).dlc)]) static u16 es581_4_get_msg_len(const union es58x_urb_cmd *urb_cmd) { return get_unaligned_le16(&urb_cmd->es581_4_urb_cmd.msg_len); } static int es581_4_echo_msg(struct es58x_device *es58x_dev, const struct es581_4_urb_cmd *es581_4_urb_cmd) { struct net_device *netdev; const struct es581_4_bulk_echo_msg *bulk_echo_msg; const struct es581_4_echo_msg *echo_msg; u64 *tstamps = es58x_dev->timestamps; u16 msg_len; u32 first_packet_idx, packet_idx; unsigned int dropped = 0; int i, num_element, ret; bulk_echo_msg = &es581_4_urb_cmd->bulk_echo_msg; msg_len = get_unaligned_le16(&es581_4_urb_cmd->msg_len) - sizeof(bulk_echo_msg->channel_no); num_element = es58x_msg_num_element(es58x_dev->dev, bulk_echo_msg->echo_msg, msg_len); if (num_element <= 0) return num_element; ret = es58x_get_netdev(es58x_dev, bulk_echo_msg->channel_no, ES581_4_CHANNEL_IDX_OFFSET, &netdev); if (ret) return ret; echo_msg = &bulk_echo_msg->echo_msg[0]; first_packet_idx = get_unaligned_le32(&echo_msg->packet_idx); packet_idx = first_packet_idx; for (i = 0; i < num_element; i++) { u32 tmp_idx; echo_msg = &bulk_echo_msg->echo_msg[i]; tmp_idx = get_unaligned_le32(&echo_msg->packet_idx); if (tmp_idx == packet_idx - 1) { if (net_ratelimit()) netdev_warn(netdev, "Received echo packet idx %u twice\n", packet_idx - 1); dropped++; continue; } if (tmp_idx != packet_idx) { netdev_err(netdev, "Echo packet idx jumped from %u to %u\n", packet_idx - 1, echo_msg->packet_idx); return -EBADMSG; } tstamps[i] = get_unaligned_le64(&echo_msg->timestamp); packet_idx++; } netdev->stats.tx_dropped += dropped; return es58x_can_get_echo_skb(netdev, first_packet_idx, tstamps, num_element - dropped); } static int es581_4_rx_can_msg(struct es58x_device *es58x_dev, const struct es581_4_urb_cmd *es581_4_urb_cmd, u16 msg_len) { const struct device *dev = es58x_dev->dev; struct net_device *netdev; int pkts, num_element, channel_no, ret; num_element = es58x_msg_num_element(dev, es581_4_urb_cmd->rx_can_msg, msg_len); if (num_element <= 0) return num_element; channel_no = es581_4_urb_cmd->rx_can_msg[0].channel_no; ret = es58x_get_netdev(es58x_dev, channel_no, ES581_4_CHANNEL_IDX_OFFSET, &netdev); if (ret) return ret; if (!netif_running(netdev)) { if (net_ratelimit()) netdev_info(netdev, "%s: %s is down, dropping %d rx packets\n", __func__, netdev->name, num_element); netdev->stats.rx_dropped += num_element; return 0; } for (pkts = 0; pkts < num_element; pkts++) { const struct es581_4_rx_can_msg *rx_can_msg = &es581_4_urb_cmd->rx_can_msg[pkts]; u64 tstamp = get_unaligned_le64(&rx_can_msg->timestamp); canid_t can_id = get_unaligned_le32(&rx_can_msg->can_id); if (channel_no != rx_can_msg->channel_no) return -EBADMSG; ret = es58x_rx_can_msg(netdev, tstamp, rx_can_msg->data, can_id, rx_can_msg->flags, rx_can_msg->dlc); if (ret) break; } return ret; } static int es581_4_rx_err_msg(struct es58x_device *es58x_dev, const struct es581_4_rx_err_msg *rx_err_msg) { struct net_device *netdev; enum es58x_err error = get_unaligned_le32(&rx_err_msg->error); int ret; ret = es58x_get_netdev(es58x_dev, rx_err_msg->channel_no, ES581_4_CHANNEL_IDX_OFFSET, &netdev); if (ret) return ret; return es58x_rx_err_msg(netdev, error, 0, get_unaligned_le64(&rx_err_msg->timestamp)); } static int es581_4_rx_event_msg(struct es58x_device *es58x_dev, const struct es581_4_rx_event_msg *rx_event_msg) { struct net_device *netdev; enum es58x_event event = get_unaligned_le32(&rx_event_msg->event); int ret; ret = es58x_get_netdev(es58x_dev, rx_event_msg->channel_no, ES581_4_CHANNEL_IDX_OFFSET, &netdev); if (ret) return ret; return es58x_rx_err_msg(netdev, 0, event, get_unaligned_le64(&rx_event_msg->timestamp)); } static int es581_4_rx_cmd_ret_u32(struct es58x_device *es58x_dev, const struct es581_4_urb_cmd *es581_4_urb_cmd, enum es58x_ret_type ret_type) { struct net_device *netdev; const struct es581_4_rx_cmd_ret *rx_cmd_ret; u16 msg_len = get_unaligned_le16(&es581_4_urb_cmd->msg_len); int ret; ret = es58x_check_msg_len(es58x_dev->dev, es581_4_urb_cmd->rx_cmd_ret, msg_len); if (ret) return ret; rx_cmd_ret = &es581_4_urb_cmd->rx_cmd_ret; ret = es58x_get_netdev(es58x_dev, rx_cmd_ret->channel_no, ES581_4_CHANNEL_IDX_OFFSET, &netdev); if (ret) return ret; return es58x_rx_cmd_ret_u32(netdev, ret_type, get_unaligned_le32(&rx_cmd_ret->rx_cmd_ret_le32)); } static int es581_4_tx_ack_msg(struct es58x_device *es58x_dev, const struct es581_4_urb_cmd *es581_4_urb_cmd) { struct net_device *netdev; const struct es581_4_tx_ack_msg *tx_ack_msg; u16 msg_len = get_unaligned_le16(&es581_4_urb_cmd->msg_len); int ret; tx_ack_msg = &es581_4_urb_cmd->tx_ack_msg; ret = es58x_check_msg_len(es58x_dev->dev, *tx_ack_msg, msg_len); if (ret) return ret; if (tx_ack_msg->rx_cmd_ret_u8 != ES58X_RET_U8_OK) return es58x_rx_cmd_ret_u8(es58x_dev->dev, ES58X_RET_TYPE_TX_MSG, tx_ack_msg->rx_cmd_ret_u8); ret = es58x_get_netdev(es58x_dev, tx_ack_msg->channel_no, ES581_4_CHANNEL_IDX_OFFSET, &netdev); if (ret) return ret; return es58x_tx_ack_msg(netdev, get_unaligned_le16(&tx_ack_msg->tx_free_entries), ES58X_RET_U32_OK); } static int es581_4_dispatch_rx_cmd(struct es58x_device *es58x_dev, const struct es581_4_urb_cmd *es581_4_urb_cmd) { const struct device *dev = es58x_dev->dev; u16 msg_len = get_unaligned_le16(&es581_4_urb_cmd->msg_len); enum es581_4_rx_type rx_type = es581_4_urb_cmd->rx_can_msg[0].rx_type; int ret = 0; switch (rx_type) { case ES581_4_RX_TYPE_MESSAGE: return es581_4_rx_can_msg(es58x_dev, es581_4_urb_cmd, msg_len); case ES581_4_RX_TYPE_ERROR: ret = es58x_check_msg_len(dev, es581_4_urb_cmd->rx_err_msg, msg_len); if (ret < 0) return ret; return es581_4_rx_err_msg(es58x_dev, &es581_4_urb_cmd->rx_err_msg); case ES581_4_RX_TYPE_EVENT: ret = es58x_check_msg_len(dev, es581_4_urb_cmd->rx_event_msg, msg_len); if (ret < 0) return ret; return es581_4_rx_event_msg(es58x_dev, &es581_4_urb_cmd->rx_event_msg); default: dev_err(dev, "%s: Unknown rx_type 0x%02X\n", __func__, rx_type); return -EBADRQC; } } static int es581_4_handle_urb_cmd(struct es58x_device *es58x_dev, const union es58x_urb_cmd *urb_cmd) { const struct es581_4_urb_cmd *es581_4_urb_cmd; struct device *dev = es58x_dev->dev; u16 msg_len = es581_4_get_msg_len(urb_cmd); int ret; es581_4_urb_cmd = &urb_cmd->es581_4_urb_cmd; if (es581_4_urb_cmd->cmd_type != ES581_4_CAN_COMMAND_TYPE) { dev_err(dev, "%s: Unknown command type (0x%02X)\n", __func__, es581_4_urb_cmd->cmd_type); return -EBADRQC; } switch ((enum es581_4_cmd_id)es581_4_urb_cmd->cmd_id) { case ES581_4_CMD_ID_SET_BITTIMING: return es581_4_rx_cmd_ret_u32(es58x_dev, es581_4_urb_cmd, ES58X_RET_TYPE_SET_BITTIMING); case ES581_4_CMD_ID_ENABLE_CHANNEL: return es581_4_rx_cmd_ret_u32(es58x_dev, es581_4_urb_cmd, ES58X_RET_TYPE_ENABLE_CHANNEL); case ES581_4_CMD_ID_TX_MSG: return es581_4_tx_ack_msg(es58x_dev, es581_4_urb_cmd); case ES581_4_CMD_ID_RX_MSG: return es581_4_dispatch_rx_cmd(es58x_dev, es581_4_urb_cmd); case ES581_4_CMD_ID_RESET_RX: ret = es581_4_rx_cmd_ret_u32(es58x_dev, es581_4_urb_cmd, ES58X_RET_TYPE_RESET_RX); return ret; case ES581_4_CMD_ID_RESET_TX: ret = es581_4_rx_cmd_ret_u32(es58x_dev, es581_4_urb_cmd, ES58X_RET_TYPE_RESET_TX); return ret; case ES581_4_CMD_ID_DISABLE_CHANNEL: return es581_4_rx_cmd_ret_u32(es58x_dev, es581_4_urb_cmd, ES58X_RET_TYPE_DISABLE_CHANNEL); case ES581_4_CMD_ID_TIMESTAMP: ret = es58x_check_msg_len(dev, es581_4_urb_cmd->timestamp, msg_len); if (ret < 0) return ret; es58x_rx_timestamp(es58x_dev, get_unaligned_le64(&es581_4_urb_cmd->timestamp)); return 0; case ES581_4_CMD_ID_ECHO: return es581_4_echo_msg(es58x_dev, es581_4_urb_cmd); case ES581_4_CMD_ID_DEVICE_ERR: ret = es58x_check_msg_len(dev, es581_4_urb_cmd->rx_cmd_ret_u8, msg_len); if (ret) return ret; return es58x_rx_cmd_ret_u8(dev, ES58X_RET_TYPE_DEVICE_ERR, es581_4_urb_cmd->rx_cmd_ret_u8); default: dev_warn(dev, "%s: Unexpected command ID: 0x%02X\n", __func__, es581_4_urb_cmd->cmd_id); return -EBADRQC; } } static void es581_4_fill_urb_header(union es58x_urb_cmd *urb_cmd, u8 cmd_type, u8 cmd_id, u8 channel_idx, u16 msg_len) { struct es581_4_urb_cmd *es581_4_urb_cmd = &urb_cmd->es581_4_urb_cmd; es581_4_urb_cmd->SOF = cpu_to_le16(es581_4_param.tx_start_of_frame); es581_4_urb_cmd->cmd_type = cmd_type; es581_4_urb_cmd->cmd_id = cmd_id; es581_4_urb_cmd->msg_len = cpu_to_le16(msg_len); } static int es581_4_tx_can_msg(struct es58x_priv *priv, const struct sk_buff *skb) { struct es58x_device *es58x_dev = priv->es58x_dev; union es58x_urb_cmd *urb_cmd = priv->tx_urb->transfer_buffer; struct es581_4_urb_cmd *es581_4_urb_cmd = &urb_cmd->es581_4_urb_cmd; struct can_frame *cf = (struct can_frame *)skb->data; struct es581_4_tx_can_msg *tx_can_msg; u16 msg_len; int ret; if (can_is_canfd_skb(skb)) return -EMSGSIZE; if (priv->tx_can_msg_cnt == 0) { msg_len = sizeof(es581_4_urb_cmd->bulk_tx_can_msg.num_can_msg); es581_4_fill_urb_header(urb_cmd, ES581_4_CAN_COMMAND_TYPE, ES581_4_CMD_ID_TX_MSG, priv->channel_idx, msg_len); es581_4_urb_cmd->bulk_tx_can_msg.num_can_msg = 0; } else { msg_len = es581_4_get_msg_len(urb_cmd); } ret = es58x_check_msg_max_len(es58x_dev->dev, es581_4_urb_cmd->bulk_tx_can_msg, msg_len + sizeof(*tx_can_msg)); if (ret) return ret; /* Fill message contents. */ tx_can_msg = (typeof(tx_can_msg))&es581_4_urb_cmd->raw_msg[msg_len]; put_unaligned_le32(es58x_get_raw_can_id(cf), &tx_can_msg->can_id); put_unaligned_le32(priv->tx_head, &tx_can_msg->packet_idx); put_unaligned_le16((u16)es58x_get_flags(skb), &tx_can_msg->flags); tx_can_msg->channel_no = priv->channel_idx + ES581_4_CHANNEL_IDX_OFFSET; tx_can_msg->dlc = can_get_cc_dlc(cf, priv->can.ctrlmode); memcpy(tx_can_msg->data, cf->data, cf->len); /* Calculate new sizes. */ es581_4_urb_cmd->bulk_tx_can_msg.num_can_msg++; msg_len += es581_4_sizeof_rx_tx_msg(*tx_can_msg); priv->tx_urb->transfer_buffer_length = es58x_get_urb_cmd_len(es58x_dev, msg_len); es581_4_urb_cmd->msg_len = cpu_to_le16(msg_len); return 0; } static int es581_4_set_bittiming(struct es58x_priv *priv) { struct es581_4_tx_conf_msg tx_conf_msg = { 0 }; struct can_bittiming *bt = &priv->can.bittiming; tx_conf_msg.bitrate = cpu_to_le32(bt->bitrate); /* bt->sample_point is in tenth of percent. Convert it to percent. */ tx_conf_msg.sample_point = cpu_to_le32(bt->sample_point / 10U); tx_conf_msg.samples_per_bit = cpu_to_le32(ES58X_SAMPLES_PER_BIT_ONE); tx_conf_msg.bit_time = cpu_to_le32(can_bit_time(bt)); tx_conf_msg.sjw = cpu_to_le32(bt->sjw); tx_conf_msg.sync_edge = cpu_to_le32(ES58X_SYNC_EDGE_SINGLE); tx_conf_msg.physical_layer = cpu_to_le32(ES58X_PHYSICAL_LAYER_HIGH_SPEED); tx_conf_msg.echo_mode = cpu_to_le32(ES58X_ECHO_ON); tx_conf_msg.channel_no = priv->channel_idx + ES581_4_CHANNEL_IDX_OFFSET; return es58x_send_msg(priv->es58x_dev, ES581_4_CAN_COMMAND_TYPE, ES581_4_CMD_ID_SET_BITTIMING, &tx_conf_msg, sizeof(tx_conf_msg), priv->channel_idx); } static int es581_4_enable_channel(struct es58x_priv *priv) { int ret; u8 msg = priv->channel_idx + ES581_4_CHANNEL_IDX_OFFSET; ret = es581_4_set_bittiming(priv); if (ret) return ret; return es58x_send_msg(priv->es58x_dev, ES581_4_CAN_COMMAND_TYPE, ES581_4_CMD_ID_ENABLE_CHANNEL, &msg, sizeof(msg), priv->channel_idx); } static int es581_4_disable_channel(struct es58x_priv *priv) { u8 msg = priv->channel_idx + ES581_4_CHANNEL_IDX_OFFSET; return es58x_send_msg(priv->es58x_dev, ES581_4_CAN_COMMAND_TYPE, ES581_4_CMD_ID_DISABLE_CHANNEL, &msg, sizeof(msg), priv->channel_idx); } static int es581_4_reset_device(struct es58x_device *es58x_dev) { return es58x_send_msg(es58x_dev, ES581_4_CAN_COMMAND_TYPE, ES581_4_CMD_ID_RESET_DEVICE, ES58X_EMPTY_MSG, 0, ES58X_CHANNEL_IDX_NA); } static int es581_4_get_timestamp(struct es58x_device *es58x_dev) { return es58x_send_msg(es58x_dev, ES581_4_CAN_COMMAND_TYPE, ES581_4_CMD_ID_TIMESTAMP, ES58X_EMPTY_MSG, 0, ES58X_CHANNEL_IDX_NA); } /* Nominal bittiming constants for ES581.4 as specified in the * microcontroller datasheet: "Stellaris(R) LM3S5B91 Microcontroller" * table 17-4 "CAN Protocol Ranges" from Texas Instruments. */ static const struct can_bittiming_const es581_4_bittiming_const = { .name = "ES581.4", .tseg1_min = 1, .tseg1_max = 8, .tseg2_min = 1, .tseg2_max = 8, .sjw_max = 4, .brp_min = 1, .brp_max = 128, .brp_inc = 1 }; const struct es58x_parameters es581_4_param = { .bittiming_const = &es581_4_bittiming_const, .data_bittiming_const = NULL, .tdc_const = NULL, .bitrate_max = 1 * MEGA /* BPS */, .clock = {.freq = 50 * MEGA /* Hz */}, .ctrlmode_supported = CAN_CTRLMODE_CC_LEN8_DLC, .tx_start_of_frame = 0xAFAF, .rx_start_of_frame = 0xFAFA, .tx_urb_cmd_max_len = ES581_4_TX_URB_CMD_MAX_LEN, .rx_urb_cmd_max_len = ES581_4_RX_URB_CMD_MAX_LEN, /* Size of internal device TX queue is 330. * * However, we witnessed some ES58X_ERR_PROT_CRC errors from * the device and thus, echo_skb_max was lowered to the * empirical value of 75 which seems stable and then rounded * down to become a power of two. * * Root cause of those ES58X_ERR_PROT_CRC errors is still * unclear. */ .fifo_mask = 63, /* echo_skb_max = 64 */ .dql_min_limit = CAN_FRAME_LEN_MAX * 50, /* Empirical value. */ .tx_bulk_max = ES581_4_TX_BULK_MAX, .urb_cmd_header_len = ES581_4_URB_CMD_HEADER_LEN, .rx_urb_max = ES58X_RX_URBS_MAX, .tx_urb_max = ES58X_TX_URBS_MAX }; const struct es58x_operators es581_4_ops = { .get_msg_len = es581_4_get_msg_len, .handle_urb_cmd = es581_4_handle_urb_cmd, .fill_urb_header = es581_4_fill_urb_header, .tx_can_msg = es581_4_tx_can_msg, .enable_channel = es581_4_enable_channel, .disable_channel = es581_4_disable_channel, .reset_device = es581_4_reset_device, .get_timestamp = es581_4_get_timestamp };
linux-master
drivers/net/can/usb/etas_es58x/es581_4.c
// SPDX-License-Identifier: GPL-2.0-only /* * CAN driver for PEAK System PCAN-USB FD / PCAN-USB Pro FD adapter * * Copyright (C) 2013-2014 Stephane Grosjean <[email protected]> */ #include <linux/ethtool.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/usb.h> #include <linux/can.h> #include <linux/can/dev.h> #include <linux/can/error.h> #include <linux/can/dev/peak_canfd.h> #include "pcan_usb_core.h" #include "pcan_usb_pro.h" #define PCAN_USBPROFD_CHANNEL_COUNT 2 #define PCAN_USBFD_CHANNEL_COUNT 1 /* PCAN-USB Pro FD adapter internal clock (Hz) */ #define PCAN_UFD_CRYSTAL_HZ 80000000 #define PCAN_UFD_CMD_BUFFER_SIZE 512 #define PCAN_UFD_LOSPD_PKT_SIZE 64 /* PCAN-USB Pro FD command timeout (ms.) */ #define PCAN_UFD_CMD_TIMEOUT_MS 1000 /* PCAN-USB Pro FD rx/tx buffers size */ #define PCAN_UFD_RX_BUFFER_SIZE 2048 #define PCAN_UFD_TX_BUFFER_SIZE 512 /* struct pcan_ufd_fw_info::type */ #define PCAN_USBFD_TYPE_STD 1 #define PCAN_USBFD_TYPE_EXT 2 /* includes EP numbers */ /* read some versions info from the hw device */ struct __packed pcan_ufd_fw_info { __le16 size_of; /* sizeof this */ __le16 type; /* type of this structure */ u8 hw_type; /* Type of hardware (HW_TYPE_xxx) */ u8 bl_version[3]; /* Bootloader version */ u8 hw_version; /* Hardware version (PCB) */ u8 fw_version[3]; /* Firmware version */ __le32 dev_id[2]; /* "device id" per CAN */ __le32 ser_no; /* S/N */ __le32 flags; /* special functions */ /* extended data when type == PCAN_USBFD_TYPE_EXT */ u8 cmd_out_ep; /* ep for cmd */ u8 cmd_in_ep; /* ep for replies */ u8 data_out_ep[2]; /* ep for CANx TX */ u8 data_in_ep; /* ep for CAN RX */ u8 dummy[3]; }; /* handle device specific info used by the netdevices */ struct pcan_usb_fd_if { struct peak_usb_device *dev[PCAN_USB_MAX_CHANNEL]; struct pcan_ufd_fw_info fw_info; struct peak_time_ref time_ref; int cm_ignore_count; int dev_opened_count; }; /* device information */ struct pcan_usb_fd_device { struct peak_usb_device dev; struct can_berr_counter bec; struct pcan_usb_fd_if *usb_if; u8 *cmd_buffer_addr; }; /* Extended USB commands (non uCAN commands) */ /* Clock Modes command */ #define PCAN_UFD_CMD_CLK_SET 0x80 #define PCAN_UFD_CLK_80MHZ 0x0 #define PCAN_UFD_CLK_60MHZ 0x1 #define PCAN_UFD_CLK_40MHZ 0x2 #define PCAN_UFD_CLK_30MHZ 0x3 #define PCAN_UFD_CLK_24MHZ 0x4 #define PCAN_UFD_CLK_20MHZ 0x5 #define PCAN_UFD_CLK_DEF PCAN_UFD_CLK_80MHZ struct __packed pcan_ufd_clock { __le16 opcode_channel; u8 mode; u8 unused[5]; }; /* LED control command */ #define PCAN_UFD_CMD_LED_SET 0x86 #define PCAN_UFD_LED_DEV 0x00 #define PCAN_UFD_LED_FAST 0x01 #define PCAN_UFD_LED_SLOW 0x02 #define PCAN_UFD_LED_ON 0x03 #define PCAN_UFD_LED_OFF 0x04 #define PCAN_UFD_LED_DEF PCAN_UFD_LED_DEV struct __packed pcan_ufd_led { __le16 opcode_channel; u8 mode; u8 unused[5]; }; /* Extended usage of uCAN commands CMD_xxx_xx_OPTION for PCAN-USB Pro FD */ #define PCAN_UFD_FLTEXT_CALIBRATION 0x8000 struct __packed pcan_ufd_options { __le16 opcode_channel; __le16 ucan_mask; u16 unused; __le16 usb_mask; }; /* Extended usage of uCAN messages for PCAN-USB Pro FD */ #define PCAN_UFD_MSG_CALIBRATION 0x100 struct __packed pcan_ufd_ts_msg { __le16 size; __le16 type; __le32 ts_low; __le32 ts_high; __le16 usb_frame_index; u16 unused; }; #define PCAN_UFD_MSG_OVERRUN 0x101 #define PCAN_UFD_OVMSG_CHANNEL(o) ((o)->channel & 0xf) struct __packed pcan_ufd_ovr_msg { __le16 size; __le16 type; __le32 ts_low; __le32 ts_high; u8 channel; u8 unused[3]; }; #define PCAN_UFD_CMD_DEVID_SET 0x81 struct __packed pcan_ufd_device_id { __le16 opcode_channel; u16 unused; __le32 device_id; }; static inline int pufd_omsg_get_channel(struct pcan_ufd_ovr_msg *om) { return om->channel & 0xf; } /* Clock mode frequency values */ static const u32 pcan_usb_fd_clk_freq[6] = { [PCAN_UFD_CLK_80MHZ] = 80000000, [PCAN_UFD_CLK_60MHZ] = 60000000, [PCAN_UFD_CLK_40MHZ] = 40000000, [PCAN_UFD_CLK_30MHZ] = 30000000, [PCAN_UFD_CLK_24MHZ] = 24000000, [PCAN_UFD_CLK_20MHZ] = 20000000 }; /* return a device USB interface */ static inline struct pcan_usb_fd_if *pcan_usb_fd_dev_if(struct peak_usb_device *dev) { struct pcan_usb_fd_device *pdev = container_of(dev, struct pcan_usb_fd_device, dev); return pdev->usb_if; } /* return a device USB commands buffer */ static inline void *pcan_usb_fd_cmd_buffer(struct peak_usb_device *dev) { struct pcan_usb_fd_device *pdev = container_of(dev, struct pcan_usb_fd_device, dev); return pdev->cmd_buffer_addr; } /* send PCAN-USB Pro FD commands synchronously */ static int pcan_usb_fd_send_cmd(struct peak_usb_device *dev, void *cmd_tail) { struct pcan_usb_fd_device *pdev = container_of(dev, struct pcan_usb_fd_device, dev); struct pcan_ufd_fw_info *fw_info = &pdev->usb_if->fw_info; void *cmd_head = pcan_usb_fd_cmd_buffer(dev); int err = 0; u8 *packet_ptr; int packet_len; ptrdiff_t cmd_len; /* usb device unregistered? */ if (!(dev->state & PCAN_USB_STATE_CONNECTED)) return 0; /* if a packet is not filled completely by commands, the command list * is terminated with an "end of collection" record. */ cmd_len = cmd_tail - cmd_head; if (cmd_len <= (PCAN_UFD_CMD_BUFFER_SIZE - sizeof(u64))) { memset(cmd_tail, 0xff, sizeof(u64)); cmd_len += sizeof(u64); } packet_ptr = cmd_head; packet_len = cmd_len; /* firmware is not able to re-assemble 512 bytes buffer in full-speed */ if (unlikely(dev->udev->speed != USB_SPEED_HIGH)) packet_len = min(packet_len, PCAN_UFD_LOSPD_PKT_SIZE); do { err = usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, fw_info->cmd_out_ep), packet_ptr, packet_len, NULL, PCAN_UFD_CMD_TIMEOUT_MS); if (err) { netdev_err(dev->netdev, "sending command failure: %d\n", err); break; } packet_ptr += packet_len; cmd_len -= packet_len; if (cmd_len < PCAN_UFD_LOSPD_PKT_SIZE) packet_len = cmd_len; } while (packet_len > 0); return err; } static int pcan_usb_fd_read_fwinfo(struct peak_usb_device *dev, struct pcan_ufd_fw_info *fw_info) { return pcan_usb_pro_send_req(dev, PCAN_USBPRO_REQ_INFO, PCAN_USBPRO_INFO_FW, fw_info, sizeof(*fw_info)); } /* build the commands list in the given buffer, to enter operational mode */ static int pcan_usb_fd_build_restart_cmd(struct peak_usb_device *dev, u8 *buf) { struct pucan_wr_err_cnt *prc; struct pucan_command *cmd; u8 *pc = buf; /* 1st, reset error counters: */ prc = (struct pucan_wr_err_cnt *)pc; prc->opcode_channel = pucan_cmd_opcode_channel(dev->ctrl_idx, PUCAN_CMD_WR_ERR_CNT); /* select both counters */ prc->sel_mask = cpu_to_le16(PUCAN_WRERRCNT_TE|PUCAN_WRERRCNT_RE); /* and reset their values */ prc->tx_counter = 0; prc->rx_counter = 0; /* moves the pointer forward */ pc += sizeof(struct pucan_wr_err_cnt); /* add command to switch from ISO to non-ISO mode, if fw allows it */ if (dev->can.ctrlmode_supported & CAN_CTRLMODE_FD_NON_ISO) { struct pucan_options *puo = (struct pucan_options *)pc; puo->opcode_channel = (dev->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) ? pucan_cmd_opcode_channel(dev->ctrl_idx, PUCAN_CMD_CLR_DIS_OPTION) : pucan_cmd_opcode_channel(dev->ctrl_idx, PUCAN_CMD_SET_EN_OPTION); puo->options = cpu_to_le16(PUCAN_OPTION_CANDFDISO); /* to be sure that no other extended bits will be taken into * account */ puo->unused = 0; /* moves the pointer forward */ pc += sizeof(struct pucan_options); } /* next, go back to operational mode */ cmd = (struct pucan_command *)pc; cmd->opcode_channel = pucan_cmd_opcode_channel(dev->ctrl_idx, (dev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) ? PUCAN_CMD_LISTEN_ONLY_MODE : PUCAN_CMD_NORMAL_MODE); pc += sizeof(struct pucan_command); return pc - buf; } /* set CAN bus on/off */ static int pcan_usb_fd_set_bus(struct peak_usb_device *dev, u8 onoff) { u8 *pc = pcan_usb_fd_cmd_buffer(dev); int l; if (onoff) { /* build the cmds list to enter operational mode */ l = pcan_usb_fd_build_restart_cmd(dev, pc); } else { struct pucan_command *cmd = (struct pucan_command *)pc; /* build cmd to go back to reset mode */ cmd->opcode_channel = pucan_cmd_opcode_channel(dev->ctrl_idx, PUCAN_CMD_RESET_MODE); l = sizeof(struct pucan_command); } /* send the command */ return pcan_usb_fd_send_cmd(dev, pc + l); } /* set filtering masks: * * idx in range [0..63] selects a row #idx, all rows otherwise * mask in range [0..0xffffffff] defines up to 32 CANIDs in the row(s) * * Each bit of this 64 x 32 bits array defines a CANID value: * * bit[i,j] = 1 implies that CANID=(i x 32)+j will be received, while * bit[i,j] = 0 implies that CANID=(i x 32)+j will be discarded. */ static int pcan_usb_fd_set_filter_std(struct peak_usb_device *dev, int idx, u32 mask) { struct pucan_filter_std *cmd = pcan_usb_fd_cmd_buffer(dev); int i, n; /* select all rows when idx is out of range [0..63] */ if ((idx < 0) || (idx >= (1 << PUCAN_FLTSTD_ROW_IDX_BITS))) { n = 1 << PUCAN_FLTSTD_ROW_IDX_BITS; idx = 0; /* select the row (and only the row) otherwise */ } else { n = idx + 1; } for (i = idx; i < n; i++, cmd++) { cmd->opcode_channel = pucan_cmd_opcode_channel(dev->ctrl_idx, PUCAN_CMD_FILTER_STD); cmd->idx = cpu_to_le16(i); cmd->mask = cpu_to_le32(mask); } /* send the command */ return pcan_usb_fd_send_cmd(dev, cmd); } /* set/unset options * * onoff set(1)/unset(0) options * mask each bit defines a kind of options to set/unset */ static int pcan_usb_fd_set_options(struct peak_usb_device *dev, bool onoff, u16 ucan_mask, u16 usb_mask) { struct pcan_ufd_options *cmd = pcan_usb_fd_cmd_buffer(dev); cmd->opcode_channel = pucan_cmd_opcode_channel(dev->ctrl_idx, (onoff) ? PUCAN_CMD_SET_EN_OPTION : PUCAN_CMD_CLR_DIS_OPTION); cmd->ucan_mask = cpu_to_le16(ucan_mask); cmd->usb_mask = cpu_to_le16(usb_mask); /* send the command */ return pcan_usb_fd_send_cmd(dev, ++cmd); } /* setup LED control */ static int pcan_usb_fd_set_can_led(struct peak_usb_device *dev, u8 led_mode) { struct pcan_ufd_led *cmd = pcan_usb_fd_cmd_buffer(dev); cmd->opcode_channel = pucan_cmd_opcode_channel(dev->ctrl_idx, PCAN_UFD_CMD_LED_SET); cmd->mode = led_mode; /* send the command */ return pcan_usb_fd_send_cmd(dev, ++cmd); } /* set CAN clock domain */ static int pcan_usb_fd_set_clock_domain(struct peak_usb_device *dev, u8 clk_mode) { struct pcan_ufd_clock *cmd = pcan_usb_fd_cmd_buffer(dev); cmd->opcode_channel = pucan_cmd_opcode_channel(dev->ctrl_idx, PCAN_UFD_CMD_CLK_SET); cmd->mode = clk_mode; /* send the command */ return pcan_usb_fd_send_cmd(dev, ++cmd); } /* set bittiming for CAN and CAN-FD header */ static int pcan_usb_fd_set_bittiming_slow(struct peak_usb_device *dev, struct can_bittiming *bt) { struct pucan_timing_slow *cmd = pcan_usb_fd_cmd_buffer(dev); cmd->opcode_channel = pucan_cmd_opcode_channel(dev->ctrl_idx, PUCAN_CMD_TIMING_SLOW); cmd->sjw_t = PUCAN_TSLOW_SJW_T(bt->sjw - 1, dev->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES); cmd->tseg2 = PUCAN_TSLOW_TSEG2(bt->phase_seg2 - 1); cmd->tseg1 = PUCAN_TSLOW_TSEG1(bt->prop_seg + bt->phase_seg1 - 1); cmd->brp = cpu_to_le16(PUCAN_TSLOW_BRP(bt->brp - 1)); cmd->ewl = 96; /* default */ /* send the command */ return pcan_usb_fd_send_cmd(dev, ++cmd); } /* set CAN-FD bittiming for data */ static int pcan_usb_fd_set_bittiming_fast(struct peak_usb_device *dev, struct can_bittiming *bt) { struct pucan_timing_fast *cmd = pcan_usb_fd_cmd_buffer(dev); cmd->opcode_channel = pucan_cmd_opcode_channel(dev->ctrl_idx, PUCAN_CMD_TIMING_FAST); cmd->sjw = PUCAN_TFAST_SJW(bt->sjw - 1); cmd->tseg2 = PUCAN_TFAST_TSEG2(bt->phase_seg2 - 1); cmd->tseg1 = PUCAN_TFAST_TSEG1(bt->prop_seg + bt->phase_seg1 - 1); cmd->brp = cpu_to_le16(PUCAN_TFAST_BRP(bt->brp - 1)); /* send the command */ return pcan_usb_fd_send_cmd(dev, ++cmd); } /* read user CAN channel id from device */ static int pcan_usb_fd_get_can_channel_id(struct peak_usb_device *dev, u32 *can_ch_id) { int err; struct pcan_usb_fd_if *usb_if = pcan_usb_fd_dev_if(dev); err = pcan_usb_fd_read_fwinfo(dev, &usb_if->fw_info); if (err) return err; *can_ch_id = le32_to_cpu(usb_if->fw_info.dev_id[dev->ctrl_idx]); return err; } /* set a new CAN channel id in the flash memory of the device */ static int pcan_usb_fd_set_can_channel_id(struct peak_usb_device *dev, u32 can_ch_id) { struct pcan_ufd_device_id *cmd = pcan_usb_fd_cmd_buffer(dev); cmd->opcode_channel = pucan_cmd_opcode_channel(dev->ctrl_idx, PCAN_UFD_CMD_DEVID_SET); cmd->device_id = cpu_to_le32(can_ch_id); /* send the command */ return pcan_usb_fd_send_cmd(dev, ++cmd); } /* handle restart but in asynchronously way * (uses PCAN-USB Pro code to complete asynchronous request) */ static int pcan_usb_fd_restart_async(struct peak_usb_device *dev, struct urb *urb, u8 *buf) { struct pcan_usb_fd_device *pdev = container_of(dev, struct pcan_usb_fd_device, dev); struct pcan_ufd_fw_info *fw_info = &pdev->usb_if->fw_info; u8 *pc = buf; /* build the entire cmds list in the provided buffer, to go back into * operational mode. */ pc += pcan_usb_fd_build_restart_cmd(dev, pc); /* add EOC */ memset(pc, 0xff, sizeof(struct pucan_command)); pc += sizeof(struct pucan_command); /* complete the URB */ usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, fw_info->cmd_out_ep), buf, pc - buf, pcan_usb_pro_restart_complete, dev); /* and submit it. */ return usb_submit_urb(urb, GFP_ATOMIC); } static int pcan_usb_fd_drv_loaded(struct peak_usb_device *dev, bool loaded) { struct pcan_usb_fd_device *pdev = container_of(dev, struct pcan_usb_fd_device, dev); pdev->cmd_buffer_addr[0] = 0; pdev->cmd_buffer_addr[1] = !!loaded; return pcan_usb_pro_send_req(dev, PCAN_USBPRO_REQ_FCT, PCAN_USBPRO_FCT_DRVLD, pdev->cmd_buffer_addr, PCAN_USBPRO_FCT_DRVLD_REQ_LEN); } static int pcan_usb_fd_decode_canmsg(struct pcan_usb_fd_if *usb_if, struct pucan_msg *rx_msg) { struct pucan_rx_msg *rm = (struct pucan_rx_msg *)rx_msg; struct peak_usb_device *dev; struct net_device *netdev; struct canfd_frame *cfd; struct sk_buff *skb; const u16 rx_msg_flags = le16_to_cpu(rm->flags); if (pucan_msg_get_channel(rm) >= ARRAY_SIZE(usb_if->dev)) return -ENOMEM; dev = usb_if->dev[pucan_msg_get_channel(rm)]; netdev = dev->netdev; if (rx_msg_flags & PUCAN_MSG_EXT_DATA_LEN) { /* CANFD frame case */ skb = alloc_canfd_skb(netdev, &cfd); if (!skb) return -ENOMEM; if (rx_msg_flags & PUCAN_MSG_BITRATE_SWITCH) cfd->flags |= CANFD_BRS; if (rx_msg_flags & PUCAN_MSG_ERROR_STATE_IND) cfd->flags |= CANFD_ESI; cfd->len = can_fd_dlc2len(pucan_msg_get_dlc(rm)); } else { /* CAN 2.0 frame case */ skb = alloc_can_skb(netdev, (struct can_frame **)&cfd); if (!skb) return -ENOMEM; can_frame_set_cc_len((struct can_frame *)cfd, pucan_msg_get_dlc(rm), dev->can.ctrlmode); } cfd->can_id = le32_to_cpu(rm->can_id); if (rx_msg_flags & PUCAN_MSG_EXT_ID) cfd->can_id |= CAN_EFF_FLAG; if (rx_msg_flags & PUCAN_MSG_RTR) { cfd->can_id |= CAN_RTR_FLAG; } else { memcpy(cfd->data, rm->d, cfd->len); netdev->stats.rx_bytes += cfd->len; } netdev->stats.rx_packets++; peak_usb_netif_rx_64(skb, le32_to_cpu(rm->ts_low), le32_to_cpu(rm->ts_high)); return 0; } /* handle uCAN status message */ static int pcan_usb_fd_decode_status(struct pcan_usb_fd_if *usb_if, struct pucan_msg *rx_msg) { struct pucan_status_msg *sm = (struct pucan_status_msg *)rx_msg; struct pcan_usb_fd_device *pdev; enum can_state new_state = CAN_STATE_ERROR_ACTIVE; enum can_state rx_state, tx_state; struct peak_usb_device *dev; struct net_device *netdev; struct can_frame *cf; struct sk_buff *skb; if (pucan_stmsg_get_channel(sm) >= ARRAY_SIZE(usb_if->dev)) return -ENOMEM; dev = usb_if->dev[pucan_stmsg_get_channel(sm)]; pdev = container_of(dev, struct pcan_usb_fd_device, dev); netdev = dev->netdev; /* nothing should be sent while in BUS_OFF state */ if (dev->can.state == CAN_STATE_BUS_OFF) return 0; if (sm->channel_p_w_b & PUCAN_BUS_BUSOFF) { new_state = CAN_STATE_BUS_OFF; } else if (sm->channel_p_w_b & PUCAN_BUS_PASSIVE) { new_state = CAN_STATE_ERROR_PASSIVE; } else if (sm->channel_p_w_b & PUCAN_BUS_WARNING) { new_state = CAN_STATE_ERROR_WARNING; } else { /* back to (or still in) ERROR_ACTIVE state */ new_state = CAN_STATE_ERROR_ACTIVE; pdev->bec.txerr = 0; pdev->bec.rxerr = 0; } /* state hasn't changed */ if (new_state == dev->can.state) return 0; /* handle bus state change */ tx_state = (pdev->bec.txerr >= pdev->bec.rxerr) ? new_state : 0; rx_state = (pdev->bec.txerr <= pdev->bec.rxerr) ? new_state : 0; /* allocate an skb to store the error frame */ skb = alloc_can_err_skb(netdev, &cf); can_change_state(netdev, cf, tx_state, rx_state); /* things must be done even in case of OOM */ if (new_state == CAN_STATE_BUS_OFF) can_bus_off(netdev); if (!skb) return -ENOMEM; peak_usb_netif_rx_64(skb, le32_to_cpu(sm->ts_low), le32_to_cpu(sm->ts_high)); return 0; } /* handle uCAN error message */ static int pcan_usb_fd_decode_error(struct pcan_usb_fd_if *usb_if, struct pucan_msg *rx_msg) { struct pucan_error_msg *er = (struct pucan_error_msg *)rx_msg; struct pcan_usb_fd_device *pdev; struct peak_usb_device *dev; if (pucan_ermsg_get_channel(er) >= ARRAY_SIZE(usb_if->dev)) return -EINVAL; dev = usb_if->dev[pucan_ermsg_get_channel(er)]; pdev = container_of(dev, struct pcan_usb_fd_device, dev); /* keep a trace of tx and rx error counters for later use */ pdev->bec.txerr = er->tx_err_cnt; pdev->bec.rxerr = er->rx_err_cnt; return 0; } /* handle uCAN overrun message */ static int pcan_usb_fd_decode_overrun(struct pcan_usb_fd_if *usb_if, struct pucan_msg *rx_msg) { struct pcan_ufd_ovr_msg *ov = (struct pcan_ufd_ovr_msg *)rx_msg; struct peak_usb_device *dev; struct net_device *netdev; struct can_frame *cf; struct sk_buff *skb; if (pufd_omsg_get_channel(ov) >= ARRAY_SIZE(usb_if->dev)) return -EINVAL; dev = usb_if->dev[pufd_omsg_get_channel(ov)]; netdev = dev->netdev; /* allocate an skb to store the error frame */ skb = alloc_can_err_skb(netdev, &cf); if (!skb) return -ENOMEM; cf->can_id |= CAN_ERR_CRTL; cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW; peak_usb_netif_rx_64(skb, le32_to_cpu(ov->ts_low), le32_to_cpu(ov->ts_high)); netdev->stats.rx_over_errors++; netdev->stats.rx_errors++; return 0; } /* handle USB calibration message */ static void pcan_usb_fd_decode_ts(struct pcan_usb_fd_if *usb_if, struct pucan_msg *rx_msg) { struct pcan_ufd_ts_msg *ts = (struct pcan_ufd_ts_msg *)rx_msg; /* should wait until clock is stabilized */ if (usb_if->cm_ignore_count > 0) usb_if->cm_ignore_count--; else peak_usb_set_ts_now(&usb_if->time_ref, le32_to_cpu(ts->ts_low)); } /* callback for bulk IN urb */ static int pcan_usb_fd_decode_buf(struct peak_usb_device *dev, struct urb *urb) { struct pcan_usb_fd_if *usb_if = pcan_usb_fd_dev_if(dev); struct net_device *netdev = dev->netdev; struct pucan_msg *rx_msg; u8 *msg_ptr, *msg_end; int err = 0; /* loop reading all the records from the incoming message */ msg_ptr = urb->transfer_buffer; msg_end = urb->transfer_buffer + urb->actual_length; for (; msg_ptr < msg_end;) { u16 rx_msg_type, rx_msg_size; rx_msg = (struct pucan_msg *)msg_ptr; if (!rx_msg->size) { /* null packet found: end of list */ break; } rx_msg_size = le16_to_cpu(rx_msg->size); rx_msg_type = le16_to_cpu(rx_msg->type); /* check if the record goes out of current packet */ if (msg_ptr + rx_msg_size > msg_end) { netdev_err(netdev, "got frag rec: should inc usb rx buf sze\n"); err = -EBADMSG; break; } switch (rx_msg_type) { case PUCAN_MSG_CAN_RX: err = pcan_usb_fd_decode_canmsg(usb_if, rx_msg); if (err < 0) goto fail; break; case PCAN_UFD_MSG_CALIBRATION: pcan_usb_fd_decode_ts(usb_if, rx_msg); break; case PUCAN_MSG_ERROR: err = pcan_usb_fd_decode_error(usb_if, rx_msg); if (err < 0) goto fail; break; case PUCAN_MSG_STATUS: err = pcan_usb_fd_decode_status(usb_if, rx_msg); if (err < 0) goto fail; break; case PCAN_UFD_MSG_OVERRUN: err = pcan_usb_fd_decode_overrun(usb_if, rx_msg); if (err < 0) goto fail; break; default: netdev_err(netdev, "unhandled msg type 0x%02x (%d): ignored\n", rx_msg_type, rx_msg_type); break; } msg_ptr += rx_msg_size; } fail: if (err) pcan_dump_mem("received msg", urb->transfer_buffer, urb->actual_length); return err; } /* CAN/CANFD frames encoding callback */ static int pcan_usb_fd_encode_msg(struct peak_usb_device *dev, struct sk_buff *skb, u8 *obuf, size_t *size) { struct pucan_tx_msg *tx_msg = (struct pucan_tx_msg *)obuf; struct canfd_frame *cfd = (struct canfd_frame *)skb->data; u16 tx_msg_size, tx_msg_flags; u8 dlc; if (cfd->len > CANFD_MAX_DLEN) return -EINVAL; tx_msg_size = ALIGN(sizeof(struct pucan_tx_msg) + cfd->len, 4); tx_msg->size = cpu_to_le16(tx_msg_size); tx_msg->type = cpu_to_le16(PUCAN_MSG_CAN_TX); tx_msg_flags = 0; if (cfd->can_id & CAN_EFF_FLAG) { tx_msg_flags |= PUCAN_MSG_EXT_ID; tx_msg->can_id = cpu_to_le32(cfd->can_id & CAN_EFF_MASK); } else { tx_msg->can_id = cpu_to_le32(cfd->can_id & CAN_SFF_MASK); } if (can_is_canfd_skb(skb)) { /* considering a CANFD frame */ dlc = can_fd_len2dlc(cfd->len); tx_msg_flags |= PUCAN_MSG_EXT_DATA_LEN; if (cfd->flags & CANFD_BRS) tx_msg_flags |= PUCAN_MSG_BITRATE_SWITCH; if (cfd->flags & CANFD_ESI) tx_msg_flags |= PUCAN_MSG_ERROR_STATE_IND; } else { /* CAND 2.0 frames */ dlc = can_get_cc_dlc((struct can_frame *)cfd, dev->can.ctrlmode); if (cfd->can_id & CAN_RTR_FLAG) tx_msg_flags |= PUCAN_MSG_RTR; } /* Single-Shot frame */ if (dev->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT) tx_msg_flags |= PUCAN_MSG_SINGLE_SHOT; tx_msg->flags = cpu_to_le16(tx_msg_flags); tx_msg->channel_dlc = PUCAN_MSG_CHANNEL_DLC(dev->ctrl_idx, dlc); memcpy(tx_msg->d, cfd->data, cfd->len); /* add null size message to tag the end (messages are 32-bits aligned) */ tx_msg = (struct pucan_tx_msg *)(obuf + tx_msg_size); tx_msg->size = 0; /* set the whole size of the USB packet to send */ *size = tx_msg_size + sizeof(u32); return 0; } /* start the interface (last chance before set bus on) */ static int pcan_usb_fd_start(struct peak_usb_device *dev) { struct pcan_usb_fd_device *pdev = container_of(dev, struct pcan_usb_fd_device, dev); int err; /* set filter mode: all acceptance */ err = pcan_usb_fd_set_filter_std(dev, -1, 0xffffffff); if (err) return err; /* opening first device: */ if (pdev->usb_if->dev_opened_count == 0) { /* reset time_ref */ peak_usb_init_time_ref(&pdev->usb_if->time_ref, &pcan_usb_pro_fd); /* enable USB calibration messages */ err = pcan_usb_fd_set_options(dev, 1, PUCAN_OPTION_ERROR, PCAN_UFD_FLTEXT_CALIBRATION); } pdev->usb_if->dev_opened_count++; /* reset cached error counters */ pdev->bec.txerr = 0; pdev->bec.rxerr = 0; return err; } /* socket callback used to copy berr counters values received through USB */ static int pcan_usb_fd_get_berr_counter(const struct net_device *netdev, struct can_berr_counter *bec) { struct peak_usb_device *dev = netdev_priv(netdev); struct pcan_usb_fd_device *pdev = container_of(dev, struct pcan_usb_fd_device, dev); *bec = pdev->bec; /* must return 0 */ return 0; } /* probe function for all PCAN-USB FD family usb interfaces */ static int pcan_usb_fd_probe(struct usb_interface *intf) { struct usb_host_interface *iface_desc = &intf->altsetting[0]; /* CAN interface is always interface #0 */ return iface_desc->desc.bInterfaceNumber; } /* stop interface (last chance before set bus off) */ static int pcan_usb_fd_stop(struct peak_usb_device *dev) { struct pcan_usb_fd_device *pdev = container_of(dev, struct pcan_usb_fd_device, dev); /* turn off special msgs for that interface if no other dev opened */ if (pdev->usb_if->dev_opened_count == 1) pcan_usb_fd_set_options(dev, 0, PUCAN_OPTION_ERROR, PCAN_UFD_FLTEXT_CALIBRATION); pdev->usb_if->dev_opened_count--; return 0; } /* called when probing, to initialize a device object */ static int pcan_usb_fd_init(struct peak_usb_device *dev) { struct pcan_usb_fd_device *pdev = container_of(dev, struct pcan_usb_fd_device, dev); struct pcan_ufd_fw_info *fw_info; int i, err = -ENOMEM; /* do this for 1st channel only */ if (!dev->prev_siblings) { /* allocate netdevices common structure attached to first one */ pdev->usb_if = kzalloc(sizeof(*pdev->usb_if), GFP_KERNEL); if (!pdev->usb_if) goto err_out; /* allocate command buffer once for all for the interface */ pdev->cmd_buffer_addr = kzalloc(PCAN_UFD_CMD_BUFFER_SIZE, GFP_KERNEL); if (!pdev->cmd_buffer_addr) goto err_out_1; /* number of ts msgs to ignore before taking one into account */ pdev->usb_if->cm_ignore_count = 5; fw_info = &pdev->usb_if->fw_info; err = pcan_usb_fd_read_fwinfo(dev, fw_info); if (err) { dev_err(dev->netdev->dev.parent, "unable to read %s firmware info (err %d)\n", dev->adapter->name, err); goto err_out_2; } /* explicit use of dev_xxx() instead of netdev_xxx() here: * information displayed are related to the device itself, not * to the canx (channel) device. */ dev_info(dev->netdev->dev.parent, "PEAK-System %s v%u fw v%u.%u.%u (%u channels)\n", dev->adapter->name, fw_info->hw_version, fw_info->fw_version[0], fw_info->fw_version[1], fw_info->fw_version[2], dev->adapter->ctrl_count); /* check for ability to switch between ISO/non-ISO modes */ if (fw_info->fw_version[0] >= 2) { /* firmware >= 2.x supports ISO/non-ISO switching */ dev->can.ctrlmode_supported |= CAN_CTRLMODE_FD_NON_ISO; } else { /* firmware < 2.x only supports fixed(!) non-ISO */ dev->can.ctrlmode |= CAN_CTRLMODE_FD_NON_ISO; } /* if vendor rsp is of type 2, then it contains EP numbers to * use for cmds pipes. If not, then default EP should be used. */ if (fw_info->type != cpu_to_le16(PCAN_USBFD_TYPE_EXT)) { fw_info->cmd_out_ep = PCAN_USBPRO_EP_CMDOUT; fw_info->cmd_in_ep = PCAN_USBPRO_EP_CMDIN; } /* tell the hardware the can driver is running */ err = pcan_usb_fd_drv_loaded(dev, 1); if (err) { dev_err(dev->netdev->dev.parent, "unable to tell %s driver is loaded (err %d)\n", dev->adapter->name, err); goto err_out_2; } } else { /* otherwise, simply copy previous sibling's values */ struct pcan_usb_fd_device *ppdev = container_of(dev->prev_siblings, struct pcan_usb_fd_device, dev); pdev->usb_if = ppdev->usb_if; pdev->cmd_buffer_addr = ppdev->cmd_buffer_addr; /* do a copy of the ctrlmode[_supported] too */ dev->can.ctrlmode = ppdev->dev.can.ctrlmode; dev->can.ctrlmode_supported = ppdev->dev.can.ctrlmode_supported; fw_info = &pdev->usb_if->fw_info; } pdev->usb_if->dev[dev->ctrl_idx] = dev; dev->can_channel_id = le32_to_cpu(pdev->usb_if->fw_info.dev_id[dev->ctrl_idx]); /* if vendor rsp is of type 2, then it contains EP numbers to * use for data pipes. If not, then statically defined EP are used * (see peak_usb_create_dev()). */ if (fw_info->type == cpu_to_le16(PCAN_USBFD_TYPE_EXT)) { dev->ep_msg_in = fw_info->data_in_ep; dev->ep_msg_out = fw_info->data_out_ep[dev->ctrl_idx]; } /* set clock domain */ for (i = 0; i < ARRAY_SIZE(pcan_usb_fd_clk_freq); i++) if (dev->adapter->clock.freq == pcan_usb_fd_clk_freq[i]) break; if (i >= ARRAY_SIZE(pcan_usb_fd_clk_freq)) { dev_warn(dev->netdev->dev.parent, "incompatible clock frequencies\n"); err = -EINVAL; goto err_out_2; } pcan_usb_fd_set_clock_domain(dev, i); /* set LED in default state (end of init phase) */ pcan_usb_fd_set_can_led(dev, PCAN_UFD_LED_DEF); return 0; err_out_2: kfree(pdev->cmd_buffer_addr); err_out_1: kfree(pdev->usb_if); err_out: return err; } /* called when driver module is being unloaded */ static void pcan_usb_fd_exit(struct peak_usb_device *dev) { struct pcan_usb_fd_device *pdev = container_of(dev, struct pcan_usb_fd_device, dev); /* when rmmod called before unplug and if down, should reset things * before leaving */ if (dev->can.state != CAN_STATE_STOPPED) { /* set bus off on the corresponding channel */ pcan_usb_fd_set_bus(dev, 0); } /* switch off corresponding CAN LEDs */ pcan_usb_fd_set_can_led(dev, PCAN_UFD_LED_OFF); /* if channel #0 (only) */ if (dev->ctrl_idx == 0) { /* turn off calibration message if any device were opened */ if (pdev->usb_if->dev_opened_count > 0) pcan_usb_fd_set_options(dev, 0, PUCAN_OPTION_ERROR, PCAN_UFD_FLTEXT_CALIBRATION); /* tell USB adapter that the driver is being unloaded */ pcan_usb_fd_drv_loaded(dev, 0); } } /* called when the USB adapter is unplugged */ static void pcan_usb_fd_free(struct peak_usb_device *dev) { /* last device: can free shared objects now */ if (!dev->prev_siblings && !dev->next_siblings) { struct pcan_usb_fd_device *pdev = container_of(dev, struct pcan_usb_fd_device, dev); /* free commands buffer */ kfree(pdev->cmd_buffer_addr); /* free usb interface object */ kfree(pdev->usb_if); } } /* blink LED's */ static int pcan_usb_fd_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) { struct peak_usb_device *dev = netdev_priv(netdev); int err = 0; switch (state) { case ETHTOOL_ID_ACTIVE: err = pcan_usb_fd_set_can_led(dev, PCAN_UFD_LED_FAST); break; case ETHTOOL_ID_INACTIVE: err = pcan_usb_fd_set_can_led(dev, PCAN_UFD_LED_DEF); break; default: break; } return err; } static const struct ethtool_ops pcan_usb_fd_ethtool_ops = { .set_phys_id = pcan_usb_fd_set_phys_id, .get_ts_info = pcan_get_ts_info, .get_eeprom_len = peak_usb_get_eeprom_len, .get_eeprom = peak_usb_get_eeprom, .set_eeprom = peak_usb_set_eeprom, }; /* describes the PCAN-USB FD adapter */ static const struct can_bittiming_const pcan_usb_fd_const = { .name = "pcan_usb_fd", .tseg1_min = 1, .tseg1_max = (1 << PUCAN_TSLOW_TSGEG1_BITS), .tseg2_min = 1, .tseg2_max = (1 << PUCAN_TSLOW_TSGEG2_BITS), .sjw_max = (1 << PUCAN_TSLOW_SJW_BITS), .brp_min = 1, .brp_max = (1 << PUCAN_TSLOW_BRP_BITS), .brp_inc = 1, }; static const struct can_bittiming_const pcan_usb_fd_data_const = { .name = "pcan_usb_fd", .tseg1_min = 1, .tseg1_max = (1 << PUCAN_TFAST_TSGEG1_BITS), .tseg2_min = 1, .tseg2_max = (1 << PUCAN_TFAST_TSGEG2_BITS), .sjw_max = (1 << PUCAN_TFAST_SJW_BITS), .brp_min = 1, .brp_max = (1 << PUCAN_TFAST_BRP_BITS), .brp_inc = 1, }; const struct peak_usb_adapter pcan_usb_fd = { .name = "PCAN-USB FD", .device_id = PCAN_USBFD_PRODUCT_ID, .ctrl_count = PCAN_USBFD_CHANNEL_COUNT, .ctrlmode_supported = CAN_CTRLMODE_FD | CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_ONE_SHOT | CAN_CTRLMODE_CC_LEN8_DLC, .clock = { .freq = PCAN_UFD_CRYSTAL_HZ, }, .bittiming_const = &pcan_usb_fd_const, .data_bittiming_const = &pcan_usb_fd_data_const, /* size of device private data */ .sizeof_dev_private = sizeof(struct pcan_usb_fd_device), .ethtool_ops = &pcan_usb_fd_ethtool_ops, /* timestamps usage */ .ts_used_bits = 32, .us_per_ts_scale = 1, /* us = (ts * scale) >> shift */ .us_per_ts_shift = 0, /* give here messages in/out endpoints */ .ep_msg_in = PCAN_USBPRO_EP_MSGIN, .ep_msg_out = {PCAN_USBPRO_EP_MSGOUT_0}, /* size of rx/tx usb buffers */ .rx_buffer_size = PCAN_UFD_RX_BUFFER_SIZE, .tx_buffer_size = PCAN_UFD_TX_BUFFER_SIZE, /* device callbacks */ .intf_probe = pcan_usb_fd_probe, .dev_init = pcan_usb_fd_init, .dev_exit = pcan_usb_fd_exit, .dev_free = pcan_usb_fd_free, .dev_set_bus = pcan_usb_fd_set_bus, .dev_set_bittiming = pcan_usb_fd_set_bittiming_slow, .dev_set_data_bittiming = pcan_usb_fd_set_bittiming_fast, .dev_get_can_channel_id = pcan_usb_fd_get_can_channel_id, .dev_set_can_channel_id = pcan_usb_fd_set_can_channel_id, .dev_decode_buf = pcan_usb_fd_decode_buf, .dev_start = pcan_usb_fd_start, .dev_stop = pcan_usb_fd_stop, .dev_restart_async = pcan_usb_fd_restart_async, .dev_encode_msg = pcan_usb_fd_encode_msg, .do_get_berr_counter = pcan_usb_fd_get_berr_counter, }; /* describes the PCAN-CHIP USB */ static const struct can_bittiming_const pcan_usb_chip_const = { .name = "pcan_chip_usb", .tseg1_min = 1, .tseg1_max = (1 << PUCAN_TSLOW_TSGEG1_BITS), .tseg2_min = 1, .tseg2_max = (1 << PUCAN_TSLOW_TSGEG2_BITS), .sjw_max = (1 << PUCAN_TSLOW_SJW_BITS), .brp_min = 1, .brp_max = (1 << PUCAN_TSLOW_BRP_BITS), .brp_inc = 1, }; static const struct can_bittiming_const pcan_usb_chip_data_const = { .name = "pcan_chip_usb", .tseg1_min = 1, .tseg1_max = (1 << PUCAN_TFAST_TSGEG1_BITS), .tseg2_min = 1, .tseg2_max = (1 << PUCAN_TFAST_TSGEG2_BITS), .sjw_max = (1 << PUCAN_TFAST_SJW_BITS), .brp_min = 1, .brp_max = (1 << PUCAN_TFAST_BRP_BITS), .brp_inc = 1, }; const struct peak_usb_adapter pcan_usb_chip = { .name = "PCAN-Chip USB", .device_id = PCAN_USBCHIP_PRODUCT_ID, .ctrl_count = PCAN_USBFD_CHANNEL_COUNT, .ctrlmode_supported = CAN_CTRLMODE_FD | CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_ONE_SHOT | CAN_CTRLMODE_CC_LEN8_DLC, .clock = { .freq = PCAN_UFD_CRYSTAL_HZ, }, .bittiming_const = &pcan_usb_chip_const, .data_bittiming_const = &pcan_usb_chip_data_const, /* size of device private data */ .sizeof_dev_private = sizeof(struct pcan_usb_fd_device), .ethtool_ops = &pcan_usb_fd_ethtool_ops, /* timestamps usage */ .ts_used_bits = 32, .us_per_ts_scale = 1, /* us = (ts * scale) >> shift */ .us_per_ts_shift = 0, /* give here messages in/out endpoints */ .ep_msg_in = PCAN_USBPRO_EP_MSGIN, .ep_msg_out = {PCAN_USBPRO_EP_MSGOUT_0}, /* size of rx/tx usb buffers */ .rx_buffer_size = PCAN_UFD_RX_BUFFER_SIZE, .tx_buffer_size = PCAN_UFD_TX_BUFFER_SIZE, /* device callbacks */ .intf_probe = pcan_usb_pro_probe, /* same as PCAN-USB Pro */ .dev_init = pcan_usb_fd_init, .dev_exit = pcan_usb_fd_exit, .dev_free = pcan_usb_fd_free, .dev_set_bus = pcan_usb_fd_set_bus, .dev_set_bittiming = pcan_usb_fd_set_bittiming_slow, .dev_set_data_bittiming = pcan_usb_fd_set_bittiming_fast, .dev_get_can_channel_id = pcan_usb_fd_get_can_channel_id, .dev_set_can_channel_id = pcan_usb_fd_set_can_channel_id, .dev_decode_buf = pcan_usb_fd_decode_buf, .dev_start = pcan_usb_fd_start, .dev_stop = pcan_usb_fd_stop, .dev_restart_async = pcan_usb_fd_restart_async, .dev_encode_msg = pcan_usb_fd_encode_msg, .do_get_berr_counter = pcan_usb_fd_get_berr_counter, }; /* describes the PCAN-USB Pro FD adapter */ static const struct can_bittiming_const pcan_usb_pro_fd_const = { .name = "pcan_usb_pro_fd", .tseg1_min = 1, .tseg1_max = (1 << PUCAN_TSLOW_TSGEG1_BITS), .tseg2_min = 1, .tseg2_max = (1 << PUCAN_TSLOW_TSGEG2_BITS), .sjw_max = (1 << PUCAN_TSLOW_SJW_BITS), .brp_min = 1, .brp_max = (1 << PUCAN_TSLOW_BRP_BITS), .brp_inc = 1, }; static const struct can_bittiming_const pcan_usb_pro_fd_data_const = { .name = "pcan_usb_pro_fd", .tseg1_min = 1, .tseg1_max = (1 << PUCAN_TFAST_TSGEG1_BITS), .tseg2_min = 1, .tseg2_max = (1 << PUCAN_TFAST_TSGEG2_BITS), .sjw_max = (1 << PUCAN_TFAST_SJW_BITS), .brp_min = 1, .brp_max = (1 << PUCAN_TFAST_BRP_BITS), .brp_inc = 1, }; const struct peak_usb_adapter pcan_usb_pro_fd = { .name = "PCAN-USB Pro FD", .device_id = PCAN_USBPROFD_PRODUCT_ID, .ctrl_count = PCAN_USBPROFD_CHANNEL_COUNT, .ctrlmode_supported = CAN_CTRLMODE_FD | CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_ONE_SHOT | CAN_CTRLMODE_CC_LEN8_DLC, .clock = { .freq = PCAN_UFD_CRYSTAL_HZ, }, .bittiming_const = &pcan_usb_pro_fd_const, .data_bittiming_const = &pcan_usb_pro_fd_data_const, /* size of device private data */ .sizeof_dev_private = sizeof(struct pcan_usb_fd_device), .ethtool_ops = &pcan_usb_fd_ethtool_ops, /* timestamps usage */ .ts_used_bits = 32, .us_per_ts_scale = 1, /* us = (ts * scale) >> shift */ .us_per_ts_shift = 0, /* give here messages in/out endpoints */ .ep_msg_in = PCAN_USBPRO_EP_MSGIN, .ep_msg_out = {PCAN_USBPRO_EP_MSGOUT_0, PCAN_USBPRO_EP_MSGOUT_1}, /* size of rx/tx usb buffers */ .rx_buffer_size = PCAN_UFD_RX_BUFFER_SIZE, .tx_buffer_size = PCAN_UFD_TX_BUFFER_SIZE, /* device callbacks */ .intf_probe = pcan_usb_pro_probe, /* same as PCAN-USB Pro */ .dev_init = pcan_usb_fd_init, .dev_exit = pcan_usb_fd_exit, .dev_free = pcan_usb_fd_free, .dev_set_bus = pcan_usb_fd_set_bus, .dev_set_bittiming = pcan_usb_fd_set_bittiming_slow, .dev_set_data_bittiming = pcan_usb_fd_set_bittiming_fast, .dev_get_can_channel_id = pcan_usb_fd_get_can_channel_id, .dev_set_can_channel_id = pcan_usb_fd_set_can_channel_id, .dev_decode_buf = pcan_usb_fd_decode_buf, .dev_start = pcan_usb_fd_start, .dev_stop = pcan_usb_fd_stop, .dev_restart_async = pcan_usb_fd_restart_async, .dev_encode_msg = pcan_usb_fd_encode_msg, .do_get_berr_counter = pcan_usb_fd_get_berr_counter, }; /* describes the PCAN-USB X6 adapter */ static const struct can_bittiming_const pcan_usb_x6_const = { .name = "pcan_usb_x6", .tseg1_min = 1, .tseg1_max = (1 << PUCAN_TSLOW_TSGEG1_BITS), .tseg2_min = 1, .tseg2_max = (1 << PUCAN_TSLOW_TSGEG2_BITS), .sjw_max = (1 << PUCAN_TSLOW_SJW_BITS), .brp_min = 1, .brp_max = (1 << PUCAN_TSLOW_BRP_BITS), .brp_inc = 1, }; static const struct can_bittiming_const pcan_usb_x6_data_const = { .name = "pcan_usb_x6", .tseg1_min = 1, .tseg1_max = (1 << PUCAN_TFAST_TSGEG1_BITS), .tseg2_min = 1, .tseg2_max = (1 << PUCAN_TFAST_TSGEG2_BITS), .sjw_max = (1 << PUCAN_TFAST_SJW_BITS), .brp_min = 1, .brp_max = (1 << PUCAN_TFAST_BRP_BITS), .brp_inc = 1, }; const struct peak_usb_adapter pcan_usb_x6 = { .name = "PCAN-USB X6", .device_id = PCAN_USBX6_PRODUCT_ID, .ctrl_count = PCAN_USBPROFD_CHANNEL_COUNT, .ctrlmode_supported = CAN_CTRLMODE_FD | CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_ONE_SHOT | CAN_CTRLMODE_CC_LEN8_DLC, .clock = { .freq = PCAN_UFD_CRYSTAL_HZ, }, .bittiming_const = &pcan_usb_x6_const, .data_bittiming_const = &pcan_usb_x6_data_const, /* size of device private data */ .sizeof_dev_private = sizeof(struct pcan_usb_fd_device), .ethtool_ops = &pcan_usb_fd_ethtool_ops, /* timestamps usage */ .ts_used_bits = 32, .us_per_ts_scale = 1, /* us = (ts * scale) >> shift */ .us_per_ts_shift = 0, /* give here messages in/out endpoints */ .ep_msg_in = PCAN_USBPRO_EP_MSGIN, .ep_msg_out = {PCAN_USBPRO_EP_MSGOUT_0, PCAN_USBPRO_EP_MSGOUT_1}, /* size of rx/tx usb buffers */ .rx_buffer_size = PCAN_UFD_RX_BUFFER_SIZE, .tx_buffer_size = PCAN_UFD_TX_BUFFER_SIZE, /* device callbacks */ .intf_probe = pcan_usb_pro_probe, /* same as PCAN-USB Pro */ .dev_init = pcan_usb_fd_init, .dev_exit = pcan_usb_fd_exit, .dev_free = pcan_usb_fd_free, .dev_set_bus = pcan_usb_fd_set_bus, .dev_set_bittiming = pcan_usb_fd_set_bittiming_slow, .dev_set_data_bittiming = pcan_usb_fd_set_bittiming_fast, .dev_get_can_channel_id = pcan_usb_fd_get_can_channel_id, .dev_set_can_channel_id = pcan_usb_fd_set_can_channel_id, .dev_decode_buf = pcan_usb_fd_decode_buf, .dev_start = pcan_usb_fd_start, .dev_stop = pcan_usb_fd_stop, .dev_restart_async = pcan_usb_fd_restart_async, .dev_encode_msg = pcan_usb_fd_encode_msg, .do_get_berr_counter = pcan_usb_fd_get_berr_counter, };
linux-master
drivers/net/can/usb/peak_usb/pcan_usb_fd.c
// SPDX-License-Identifier: GPL-2.0-only /* * CAN driver for PEAK System PCAN-USB adapter * Derived from the PCAN project file driver/src/pcan_usb.c * * Copyright (C) 2003-2010 PEAK System-Technik GmbH * Copyright (C) 2011-2012 Stephane Grosjean <[email protected]> * * Many thanks to Klaus Hitschler <[email protected]> */ #include <asm/unaligned.h> #include <linux/ethtool.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/usb.h> #include <linux/can.h> #include <linux/can/dev.h> #include <linux/can/error.h> #include "pcan_usb_core.h" /* PCAN-USB Endpoints */ #define PCAN_USB_EP_CMDOUT 1 #define PCAN_USB_EP_CMDIN (PCAN_USB_EP_CMDOUT | USB_DIR_IN) #define PCAN_USB_EP_MSGOUT 2 #define PCAN_USB_EP_MSGIN (PCAN_USB_EP_MSGOUT | USB_DIR_IN) /* PCAN-USB command struct */ #define PCAN_USB_CMD_FUNC 0 #define PCAN_USB_CMD_NUM 1 #define PCAN_USB_CMD_ARGS 2 #define PCAN_USB_CMD_ARGS_LEN 14 #define PCAN_USB_CMD_LEN (PCAN_USB_CMD_ARGS + \ PCAN_USB_CMD_ARGS_LEN) /* PCAN-USB commands */ #define PCAN_USB_CMD_BITRATE 1 #define PCAN_USB_CMD_SET_BUS 3 #define PCAN_USB_CMD_DEVID 4 #define PCAN_USB_CMD_SN 6 #define PCAN_USB_CMD_REGISTER 9 #define PCAN_USB_CMD_EXT_VCC 10 #define PCAN_USB_CMD_ERR_FR 11 #define PCAN_USB_CMD_LED 12 /* PCAN_USB_CMD_SET_BUS number arg */ #define PCAN_USB_BUS_XCVER 2 #define PCAN_USB_BUS_SILENT_MODE 3 /* PCAN_USB_CMD_xxx functions */ #define PCAN_USB_GET 1 #define PCAN_USB_SET 2 /* PCAN-USB command timeout (ms.) */ #define PCAN_USB_COMMAND_TIMEOUT 1000 /* PCAN-USB startup timeout (ms.) */ #define PCAN_USB_STARTUP_TIMEOUT 10 /* PCAN-USB rx/tx buffers size */ #define PCAN_USB_RX_BUFFER_SIZE 64 #define PCAN_USB_TX_BUFFER_SIZE 64 #define PCAN_USB_MSG_HEADER_LEN 2 #define PCAN_USB_MSG_TX_CAN 2 /* Tx msg is a CAN frame */ /* PCAN-USB adapter internal clock (MHz) */ #define PCAN_USB_CRYSTAL_HZ 16000000 /* PCAN-USB USB message record status/len field */ #define PCAN_USB_STATUSLEN_TIMESTAMP (1 << 7) #define PCAN_USB_STATUSLEN_INTERNAL (1 << 6) #define PCAN_USB_STATUSLEN_EXT_ID (1 << 5) #define PCAN_USB_STATUSLEN_RTR (1 << 4) #define PCAN_USB_STATUSLEN_DLC (0xf) /* PCAN-USB 4.1 CAN Id tx extended flags */ #define PCAN_USB_TX_SRR 0x01 /* SJA1000 SRR command */ #define PCAN_USB_TX_AT 0x02 /* SJA1000 AT command */ /* PCAN-USB error flags */ #define PCAN_USB_ERROR_TXFULL 0x01 #define PCAN_USB_ERROR_RXQOVR 0x02 #define PCAN_USB_ERROR_BUS_LIGHT 0x04 #define PCAN_USB_ERROR_BUS_HEAVY 0x08 #define PCAN_USB_ERROR_BUS_OFF 0x10 #define PCAN_USB_ERROR_RXQEMPTY 0x20 #define PCAN_USB_ERROR_QOVR 0x40 #define PCAN_USB_ERROR_TXQFULL 0x80 #define PCAN_USB_ERROR_BUS (PCAN_USB_ERROR_BUS_LIGHT | \ PCAN_USB_ERROR_BUS_HEAVY | \ PCAN_USB_ERROR_BUS_OFF) /* SJA1000 modes */ #define SJA1000_MODE_NORMAL 0x00 #define SJA1000_MODE_INIT 0x01 /* * tick duration = 42.666 us => * (tick_number * 44739243) >> 20 ~ (tick_number * 42666) / 1000 * accuracy = 10^-7 */ #define PCAN_USB_TS_DIV_SHIFTER 20 #define PCAN_USB_TS_US_PER_TICK 44739243 /* PCAN-USB messages record types */ #define PCAN_USB_REC_ERROR 1 #define PCAN_USB_REC_ANALOG 2 #define PCAN_USB_REC_BUSLOAD 3 #define PCAN_USB_REC_TS 4 #define PCAN_USB_REC_BUSEVT 5 /* CAN bus events notifications selection mask */ #define PCAN_USB_ERR_RXERR 0x02 /* ask for rxerr counter */ #define PCAN_USB_ERR_TXERR 0x04 /* ask for txerr counter */ /* This mask generates an usb packet each time the state of the bus changes. * In other words, its interest is to know which side among rx and tx is * responsible of the change of the bus state. */ #define PCAN_USB_BERR_MASK (PCAN_USB_ERR_RXERR | PCAN_USB_ERR_TXERR) /* identify bus event packets with rx/tx error counters */ #define PCAN_USB_ERR_CNT_DEC 0x00 /* counters are decreasing */ #define PCAN_USB_ERR_CNT_INC 0x80 /* counters are increasing */ /* private to PCAN-USB adapter */ struct pcan_usb { struct peak_usb_device dev; struct peak_time_ref time_ref; struct timer_list restart_timer; struct can_berr_counter bec; }; /* incoming message context for decoding */ struct pcan_usb_msg_context { u16 ts16; u8 prev_ts8; u8 *ptr; u8 *end; u8 rec_cnt; u8 rec_idx; u8 rec_ts_idx; struct net_device *netdev; struct pcan_usb *pdev; }; /* * send a command */ static int pcan_usb_send_cmd(struct peak_usb_device *dev, u8 f, u8 n, u8 *p) { int err; int actual_length; /* usb device unregistered? */ if (!(dev->state & PCAN_USB_STATE_CONNECTED)) return 0; dev->cmd_buf[PCAN_USB_CMD_FUNC] = f; dev->cmd_buf[PCAN_USB_CMD_NUM] = n; if (p) memcpy(dev->cmd_buf + PCAN_USB_CMD_ARGS, p, PCAN_USB_CMD_ARGS_LEN); err = usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, PCAN_USB_EP_CMDOUT), dev->cmd_buf, PCAN_USB_CMD_LEN, &actual_length, PCAN_USB_COMMAND_TIMEOUT); if (err) netdev_err(dev->netdev, "sending cmd f=0x%x n=0x%x failure: %d\n", f, n, err); return err; } /* * send a command then wait for its response */ static int pcan_usb_wait_rsp(struct peak_usb_device *dev, u8 f, u8 n, u8 *p) { int err; int actual_length; /* usb device unregistered? */ if (!(dev->state & PCAN_USB_STATE_CONNECTED)) return 0; /* first, send command */ err = pcan_usb_send_cmd(dev, f, n, NULL); if (err) return err; err = usb_bulk_msg(dev->udev, usb_rcvbulkpipe(dev->udev, PCAN_USB_EP_CMDIN), dev->cmd_buf, PCAN_USB_CMD_LEN, &actual_length, PCAN_USB_COMMAND_TIMEOUT); if (err) netdev_err(dev->netdev, "waiting rsp f=0x%x n=0x%x failure: %d\n", f, n, err); else if (p) memcpy(p, dev->cmd_buf + PCAN_USB_CMD_ARGS, PCAN_USB_CMD_ARGS_LEN); return err; } static int pcan_usb_set_sja1000(struct peak_usb_device *dev, u8 mode) { u8 args[PCAN_USB_CMD_ARGS_LEN] = { [1] = mode, }; return pcan_usb_send_cmd(dev, PCAN_USB_CMD_REGISTER, PCAN_USB_SET, args); } static int pcan_usb_set_bus(struct peak_usb_device *dev, u8 onoff) { u8 args[PCAN_USB_CMD_ARGS_LEN] = { [0] = !!onoff, }; return pcan_usb_send_cmd(dev, PCAN_USB_CMD_SET_BUS, PCAN_USB_BUS_XCVER, args); } static int pcan_usb_set_silent(struct peak_usb_device *dev, u8 onoff) { u8 args[PCAN_USB_CMD_ARGS_LEN] = { [0] = !!onoff, }; return pcan_usb_send_cmd(dev, PCAN_USB_CMD_SET_BUS, PCAN_USB_BUS_SILENT_MODE, args); } /* send the cmd to be notified from bus errors */ static int pcan_usb_set_err_frame(struct peak_usb_device *dev, u8 err_mask) { u8 args[PCAN_USB_CMD_ARGS_LEN] = { [0] = err_mask, }; return pcan_usb_send_cmd(dev, PCAN_USB_CMD_ERR_FR, PCAN_USB_SET, args); } static int pcan_usb_set_ext_vcc(struct peak_usb_device *dev, u8 onoff) { u8 args[PCAN_USB_CMD_ARGS_LEN] = { [0] = !!onoff, }; return pcan_usb_send_cmd(dev, PCAN_USB_CMD_EXT_VCC, PCAN_USB_SET, args); } static int pcan_usb_set_led(struct peak_usb_device *dev, u8 onoff) { u8 args[PCAN_USB_CMD_ARGS_LEN] = { [0] = !!onoff, }; return pcan_usb_send_cmd(dev, PCAN_USB_CMD_LED, PCAN_USB_SET, args); } /* * set bittiming value to can */ static int pcan_usb_set_bittiming(struct peak_usb_device *dev, struct can_bittiming *bt) { u8 args[PCAN_USB_CMD_ARGS_LEN]; u8 btr0, btr1; btr0 = ((bt->brp - 1) & 0x3f) | (((bt->sjw - 1) & 0x3) << 6); btr1 = ((bt->prop_seg + bt->phase_seg1 - 1) & 0xf) | (((bt->phase_seg2 - 1) & 0x7) << 4); if (dev->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) btr1 |= 0x80; netdev_info(dev->netdev, "setting BTR0=0x%02x BTR1=0x%02x\n", btr0, btr1); args[0] = btr1; args[1] = btr0; return pcan_usb_send_cmd(dev, PCAN_USB_CMD_BITRATE, PCAN_USB_SET, args); } /* * init/reset can */ static int pcan_usb_write_mode(struct peak_usb_device *dev, u8 onoff) { int err; err = pcan_usb_set_bus(dev, onoff); if (err) return err; if (!onoff) { err = pcan_usb_set_sja1000(dev, SJA1000_MODE_INIT); } else { /* the PCAN-USB needs time to init */ set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(msecs_to_jiffies(PCAN_USB_STARTUP_TIMEOUT)); } return err; } /* * handle end of waiting for the device to reset */ static void pcan_usb_restart(struct timer_list *t) { struct pcan_usb *pdev = from_timer(pdev, t, restart_timer); struct peak_usb_device *dev = &pdev->dev; /* notify candev and netdev */ peak_usb_restart_complete(dev); } /* * handle the submission of the restart urb */ static void pcan_usb_restart_pending(struct urb *urb) { struct pcan_usb *pdev = urb->context; /* the PCAN-USB needs time to restart */ mod_timer(&pdev->restart_timer, jiffies + msecs_to_jiffies(PCAN_USB_STARTUP_TIMEOUT)); /* can delete usb resources */ peak_usb_async_complete(urb); } /* * handle asynchronous restart */ static int pcan_usb_restart_async(struct peak_usb_device *dev, struct urb *urb, u8 *buf) { struct pcan_usb *pdev = container_of(dev, struct pcan_usb, dev); if (timer_pending(&pdev->restart_timer)) return -EBUSY; /* set bus on */ buf[PCAN_USB_CMD_FUNC] = 3; buf[PCAN_USB_CMD_NUM] = 2; buf[PCAN_USB_CMD_ARGS] = 1; usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, PCAN_USB_EP_CMDOUT), buf, PCAN_USB_CMD_LEN, pcan_usb_restart_pending, pdev); return usb_submit_urb(urb, GFP_ATOMIC); } /* * read serial number from device */ static int pcan_usb_get_serial(struct peak_usb_device *dev, u32 *serial_number) { u8 args[PCAN_USB_CMD_ARGS_LEN]; int err; err = pcan_usb_wait_rsp(dev, PCAN_USB_CMD_SN, PCAN_USB_GET, args); if (err) return err; *serial_number = le32_to_cpup((__le32 *)args); return 0; } /* * read can channel id from device */ static int pcan_usb_get_can_channel_id(struct peak_usb_device *dev, u32 *can_ch_id) { u8 args[PCAN_USB_CMD_ARGS_LEN]; int err; err = pcan_usb_wait_rsp(dev, PCAN_USB_CMD_DEVID, PCAN_USB_GET, args); if (err) netdev_err(dev->netdev, "getting can channel id failure: %d\n", err); else *can_ch_id = args[0]; return err; } /* set a new CAN channel id in the flash memory of the device */ static int pcan_usb_set_can_channel_id(struct peak_usb_device *dev, u32 can_ch_id) { u8 args[PCAN_USB_CMD_ARGS_LEN]; /* this kind of device supports 8-bit values only */ if (can_ch_id > U8_MAX) return -EINVAL; /* during the flash process the device disconnects during ~1.25 s.: * prohibit access when interface is UP */ if (dev->netdev->flags & IFF_UP) return -EBUSY; args[0] = can_ch_id; return pcan_usb_send_cmd(dev, PCAN_USB_CMD_DEVID, PCAN_USB_SET, args); } /* * update current time ref with received timestamp */ static int pcan_usb_update_ts(struct pcan_usb_msg_context *mc) { if ((mc->ptr + 2) > mc->end) return -EINVAL; mc->ts16 = get_unaligned_le16(mc->ptr); if (mc->rec_idx > 0) peak_usb_update_ts_now(&mc->pdev->time_ref, mc->ts16); else peak_usb_set_ts_now(&mc->pdev->time_ref, mc->ts16); return 0; } /* * decode received timestamp */ static int pcan_usb_decode_ts(struct pcan_usb_msg_context *mc, u8 first_packet) { /* only 1st packet supplies a word timestamp */ if (first_packet) { if ((mc->ptr + 2) > mc->end) return -EINVAL; mc->ts16 = get_unaligned_le16(mc->ptr); mc->prev_ts8 = mc->ts16 & 0x00ff; mc->ptr += 2; } else { u8 ts8; if ((mc->ptr + 1) > mc->end) return -EINVAL; ts8 = *mc->ptr++; if (ts8 < mc->prev_ts8) mc->ts16 += 0x100; mc->ts16 &= 0xff00; mc->ts16 |= ts8; mc->prev_ts8 = ts8; } return 0; } static int pcan_usb_decode_error(struct pcan_usb_msg_context *mc, u8 n, u8 status_len) { struct sk_buff *skb; struct can_frame *cf; enum can_state new_state = CAN_STATE_ERROR_ACTIVE; /* ignore this error until 1st ts received */ if (n == PCAN_USB_ERROR_QOVR) if (!mc->pdev->time_ref.tick_count) return 0; /* allocate an skb to store the error frame */ skb = alloc_can_err_skb(mc->netdev, &cf); if (n & PCAN_USB_ERROR_RXQOVR) { /* data overrun interrupt */ netdev_dbg(mc->netdev, "data overrun interrupt\n"); mc->netdev->stats.rx_over_errors++; mc->netdev->stats.rx_errors++; if (cf) { cf->can_id |= CAN_ERR_CRTL; cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW; } } if (n & PCAN_USB_ERROR_TXQFULL) netdev_dbg(mc->netdev, "device Tx queue full)\n"); if (n & PCAN_USB_ERROR_BUS_OFF) { new_state = CAN_STATE_BUS_OFF; } else if (n & PCAN_USB_ERROR_BUS_HEAVY) { new_state = ((mc->pdev->bec.txerr >= 128) || (mc->pdev->bec.rxerr >= 128)) ? CAN_STATE_ERROR_PASSIVE : CAN_STATE_ERROR_WARNING; } else { new_state = CAN_STATE_ERROR_ACTIVE; } /* handle change of state */ if (new_state != mc->pdev->dev.can.state) { enum can_state tx_state = (mc->pdev->bec.txerr >= mc->pdev->bec.rxerr) ? new_state : 0; enum can_state rx_state = (mc->pdev->bec.txerr <= mc->pdev->bec.rxerr) ? new_state : 0; can_change_state(mc->netdev, cf, tx_state, rx_state); if (new_state == CAN_STATE_BUS_OFF) { can_bus_off(mc->netdev); } else if (cf && (cf->can_id & CAN_ERR_CRTL)) { /* Supply TX/RX error counters in case of * controller error. */ cf->can_id = CAN_ERR_CNT; cf->data[6] = mc->pdev->bec.txerr; cf->data[7] = mc->pdev->bec.rxerr; } } if (!skb) return -ENOMEM; if (status_len & PCAN_USB_STATUSLEN_TIMESTAMP) { struct skb_shared_hwtstamps *hwts = skb_hwtstamps(skb); peak_usb_get_ts_time(&mc->pdev->time_ref, mc->ts16, &hwts->hwtstamp); } netif_rx(skb); return 0; } /* decode bus event usb packet: first byte contains rxerr while 2nd one contains * txerr. */ static int pcan_usb_handle_bus_evt(struct pcan_usb_msg_context *mc, u8 ir) { struct pcan_usb *pdev = mc->pdev; /* according to the content of the packet */ switch (ir) { case PCAN_USB_ERR_CNT_DEC: case PCAN_USB_ERR_CNT_INC: /* save rx/tx error counters from in the device context */ pdev->bec.rxerr = mc->ptr[1]; pdev->bec.txerr = mc->ptr[2]; break; default: /* reserved */ break; } return 0; } /* * decode non-data usb message */ static int pcan_usb_decode_status(struct pcan_usb_msg_context *mc, u8 status_len) { u8 rec_len = status_len & PCAN_USB_STATUSLEN_DLC; u8 f, n; int err; /* check whether function and number can be read */ if ((mc->ptr + 2) > mc->end) return -EINVAL; f = mc->ptr[PCAN_USB_CMD_FUNC]; n = mc->ptr[PCAN_USB_CMD_NUM]; mc->ptr += PCAN_USB_CMD_ARGS; if (status_len & PCAN_USB_STATUSLEN_TIMESTAMP) { int err = pcan_usb_decode_ts(mc, !mc->rec_ts_idx); if (err) return err; /* Next packet in the buffer will have a timestamp on a single * byte */ mc->rec_ts_idx++; } switch (f) { case PCAN_USB_REC_ERROR: err = pcan_usb_decode_error(mc, n, status_len); if (err) return err; break; case PCAN_USB_REC_ANALOG: /* analog values (ignored) */ rec_len = 2; break; case PCAN_USB_REC_BUSLOAD: /* bus load (ignored) */ rec_len = 1; break; case PCAN_USB_REC_TS: /* only timestamp */ if (pcan_usb_update_ts(mc)) return -EINVAL; break; case PCAN_USB_REC_BUSEVT: /* bus event notifications (get rxerr/txerr) */ err = pcan_usb_handle_bus_evt(mc, n); if (err) return err; break; default: netdev_err(mc->netdev, "unexpected function %u\n", f); break; } if ((mc->ptr + rec_len) > mc->end) return -EINVAL; mc->ptr += rec_len; return 0; } /* * decode data usb message */ static int pcan_usb_decode_data(struct pcan_usb_msg_context *mc, u8 status_len) { u8 rec_len = status_len & PCAN_USB_STATUSLEN_DLC; struct sk_buff *skb; struct can_frame *cf; struct skb_shared_hwtstamps *hwts; u32 can_id_flags; skb = alloc_can_skb(mc->netdev, &cf); if (!skb) return -ENOMEM; if (status_len & PCAN_USB_STATUSLEN_EXT_ID) { if ((mc->ptr + 4) > mc->end) goto decode_failed; can_id_flags = get_unaligned_le32(mc->ptr); cf->can_id = can_id_flags >> 3 | CAN_EFF_FLAG; mc->ptr += 4; } else { if ((mc->ptr + 2) > mc->end) goto decode_failed; can_id_flags = get_unaligned_le16(mc->ptr); cf->can_id = can_id_flags >> 5; mc->ptr += 2; } can_frame_set_cc_len(cf, rec_len, mc->pdev->dev.can.ctrlmode); /* Only first packet timestamp is a word */ if (pcan_usb_decode_ts(mc, !mc->rec_ts_idx)) goto decode_failed; /* Next packet in the buffer will have a timestamp on a single byte */ mc->rec_ts_idx++; /* read data */ memset(cf->data, 0x0, sizeof(cf->data)); if (status_len & PCAN_USB_STATUSLEN_RTR) { cf->can_id |= CAN_RTR_FLAG; } else { if ((mc->ptr + rec_len) > mc->end) goto decode_failed; memcpy(cf->data, mc->ptr, cf->len); mc->ptr += rec_len; /* Ignore next byte (client private id) if SRR bit is set */ if (can_id_flags & PCAN_USB_TX_SRR) mc->ptr++; /* update statistics */ mc->netdev->stats.rx_bytes += cf->len; } mc->netdev->stats.rx_packets++; /* convert timestamp into kernel time */ hwts = skb_hwtstamps(skb); peak_usb_get_ts_time(&mc->pdev->time_ref, mc->ts16, &hwts->hwtstamp); /* push the skb */ netif_rx(skb); return 0; decode_failed: dev_kfree_skb(skb); return -EINVAL; } /* * process incoming message */ static int pcan_usb_decode_msg(struct peak_usb_device *dev, u8 *ibuf, u32 lbuf) { struct pcan_usb_msg_context mc = { .rec_cnt = ibuf[1], .ptr = ibuf + PCAN_USB_MSG_HEADER_LEN, .end = ibuf + lbuf, .netdev = dev->netdev, .pdev = container_of(dev, struct pcan_usb, dev), }; int err; for (err = 0; mc.rec_idx < mc.rec_cnt && !err; mc.rec_idx++) { u8 sl = *mc.ptr++; /* handle status and error frames here */ if (sl & PCAN_USB_STATUSLEN_INTERNAL) { err = pcan_usb_decode_status(&mc, sl); /* handle normal can frames here */ } else { err = pcan_usb_decode_data(&mc, sl); } } return err; } /* * process any incoming buffer */ static int pcan_usb_decode_buf(struct peak_usb_device *dev, struct urb *urb) { int err = 0; if (urb->actual_length > PCAN_USB_MSG_HEADER_LEN) { err = pcan_usb_decode_msg(dev, urb->transfer_buffer, urb->actual_length); } else if (urb->actual_length > 0) { netdev_err(dev->netdev, "usb message length error (%u)\n", urb->actual_length); err = -EINVAL; } return err; } /* * process outgoing packet */ static int pcan_usb_encode_msg(struct peak_usb_device *dev, struct sk_buff *skb, u8 *obuf, size_t *size) { struct net_device *netdev = dev->netdev; struct net_device_stats *stats = &netdev->stats; struct can_frame *cf = (struct can_frame *)skb->data; u32 can_id_flags = cf->can_id & CAN_ERR_MASK; u8 *pc; obuf[0] = PCAN_USB_MSG_TX_CAN; obuf[1] = 1; /* only one CAN frame is stored in the packet */ pc = obuf + PCAN_USB_MSG_HEADER_LEN; /* status/len byte */ *pc = can_get_cc_dlc(cf, dev->can.ctrlmode); if (cf->can_id & CAN_RTR_FLAG) *pc |= PCAN_USB_STATUSLEN_RTR; /* can id */ if (cf->can_id & CAN_EFF_FLAG) { *pc |= PCAN_USB_STATUSLEN_EXT_ID; pc++; can_id_flags <<= 3; if (dev->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) can_id_flags |= PCAN_USB_TX_SRR; if (dev->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT) can_id_flags |= PCAN_USB_TX_AT; put_unaligned_le32(can_id_flags, pc); pc += 4; } else { pc++; can_id_flags <<= 5; if (dev->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) can_id_flags |= PCAN_USB_TX_SRR; if (dev->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT) can_id_flags |= PCAN_USB_TX_AT; put_unaligned_le16(can_id_flags, pc); pc += 2; } /* can data */ if (!(cf->can_id & CAN_RTR_FLAG)) { memcpy(pc, cf->data, cf->len); pc += cf->len; } /* SRR bit needs a writer id (useless here) */ if (can_id_flags & PCAN_USB_TX_SRR) *pc++ = 0x80; obuf[(*size)-1] = (u8)(stats->tx_packets & 0xff); return 0; } /* socket callback used to copy berr counters values received through USB */ static int pcan_usb_get_berr_counter(const struct net_device *netdev, struct can_berr_counter *bec) { struct peak_usb_device *dev = netdev_priv(netdev); struct pcan_usb *pdev = container_of(dev, struct pcan_usb, dev); *bec = pdev->bec; /* must return 0 */ return 0; } /* * start interface */ static int pcan_usb_start(struct peak_usb_device *dev) { struct pcan_usb *pdev = container_of(dev, struct pcan_usb, dev); int err; /* number of bits used in timestamps read from adapter struct */ peak_usb_init_time_ref(&pdev->time_ref, &pcan_usb); pdev->bec.rxerr = 0; pdev->bec.txerr = 0; /* always ask the device for BERR reporting, to be able to switch from * WARNING to PASSIVE state */ err = pcan_usb_set_err_frame(dev, PCAN_USB_BERR_MASK); if (err) netdev_warn(dev->netdev, "Asking for BERR reporting error %u\n", err); /* if revision greater than 3, can put silent mode on/off */ if (dev->device_rev > 3) { err = pcan_usb_set_silent(dev, dev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY); if (err) return err; } return pcan_usb_set_ext_vcc(dev, 0); } static int pcan_usb_init(struct peak_usb_device *dev) { struct pcan_usb *pdev = container_of(dev, struct pcan_usb, dev); u32 serial_number; int err; /* initialize a timer needed to wait for hardware restart */ timer_setup(&pdev->restart_timer, pcan_usb_restart, 0); /* * explicit use of dev_xxx() instead of netdev_xxx() here: * information displayed are related to the device itself, not * to the canx netdevice. */ err = pcan_usb_get_serial(dev, &serial_number); if (err) { dev_err(dev->netdev->dev.parent, "unable to read %s serial number (err %d)\n", pcan_usb.name, err); return err; } dev_info(dev->netdev->dev.parent, "PEAK-System %s adapter hwrev %u serial %08X (%u channel)\n", pcan_usb.name, dev->device_rev, serial_number, pcan_usb.ctrl_count); /* Since rev 4.1, PCAN-USB is able to make single-shot as well as * looped back frames. */ if (dev->device_rev >= 41) { struct can_priv *priv = netdev_priv(dev->netdev); priv->ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT | CAN_CTRLMODE_LOOPBACK; } else { dev_info(dev->netdev->dev.parent, "Firmware update available. Please contact [email protected]\n"); } return 0; } /* * probe function for new PCAN-USB usb interface */ static int pcan_usb_probe(struct usb_interface *intf) { struct usb_host_interface *if_desc; int i; if_desc = intf->altsetting; /* check interface endpoint addresses */ for (i = 0; i < if_desc->desc.bNumEndpoints; i++) { struct usb_endpoint_descriptor *ep = &if_desc->endpoint[i].desc; switch (ep->bEndpointAddress) { case PCAN_USB_EP_CMDOUT: case PCAN_USB_EP_CMDIN: case PCAN_USB_EP_MSGOUT: case PCAN_USB_EP_MSGIN: break; default: return -ENODEV; } } return 0; } static int pcan_usb_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) { struct peak_usb_device *dev = netdev_priv(netdev); int err = 0; switch (state) { case ETHTOOL_ID_ACTIVE: /* call ON/OFF twice a second */ return 2; case ETHTOOL_ID_OFF: err = pcan_usb_set_led(dev, 0); break; case ETHTOOL_ID_ON: fallthrough; case ETHTOOL_ID_INACTIVE: /* restore LED default */ err = pcan_usb_set_led(dev, 1); break; default: break; } return err; } /* This device only handles 8-bit CAN channel id. */ static int pcan_usb_get_eeprom_len(struct net_device *netdev) { return sizeof(u8); } static const struct ethtool_ops pcan_usb_ethtool_ops = { .set_phys_id = pcan_usb_set_phys_id, .get_ts_info = pcan_get_ts_info, .get_eeprom_len = pcan_usb_get_eeprom_len, .get_eeprom = peak_usb_get_eeprom, .set_eeprom = peak_usb_set_eeprom, }; /* * describe the PCAN-USB adapter */ static const struct can_bittiming_const pcan_usb_const = { .name = "pcan_usb", .tseg1_min = 1, .tseg1_max = 16, .tseg2_min = 1, .tseg2_max = 8, .sjw_max = 4, .brp_min = 1, .brp_max = 64, .brp_inc = 1, }; const struct peak_usb_adapter pcan_usb = { .name = "PCAN-USB", .device_id = PCAN_USB_PRODUCT_ID, .ctrl_count = 1, .ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_CC_LEN8_DLC, .clock = { .freq = PCAN_USB_CRYSTAL_HZ / 2, }, .bittiming_const = &pcan_usb_const, /* size of device private data */ .sizeof_dev_private = sizeof(struct pcan_usb), .ethtool_ops = &pcan_usb_ethtool_ops, /* timestamps usage */ .ts_used_bits = 16, .us_per_ts_scale = PCAN_USB_TS_US_PER_TICK, /* us=(ts*scale) */ .us_per_ts_shift = PCAN_USB_TS_DIV_SHIFTER, /* >> shift */ /* give here messages in/out endpoints */ .ep_msg_in = PCAN_USB_EP_MSGIN, .ep_msg_out = {PCAN_USB_EP_MSGOUT}, /* size of rx/tx usb buffers */ .rx_buffer_size = PCAN_USB_RX_BUFFER_SIZE, .tx_buffer_size = PCAN_USB_TX_BUFFER_SIZE, /* device callbacks */ .intf_probe = pcan_usb_probe, .dev_init = pcan_usb_init, .dev_set_bus = pcan_usb_write_mode, .dev_set_bittiming = pcan_usb_set_bittiming, .dev_get_can_channel_id = pcan_usb_get_can_channel_id, .dev_set_can_channel_id = pcan_usb_set_can_channel_id, .dev_decode_buf = pcan_usb_decode_buf, .dev_encode_msg = pcan_usb_encode_msg, .dev_start = pcan_usb_start, .dev_restart_async = pcan_usb_restart_async, .do_get_berr_counter = pcan_usb_get_berr_counter, };
linux-master
drivers/net/can/usb/peak_usb/pcan_usb.c
// SPDX-License-Identifier: GPL-2.0-only /* * CAN driver for PEAK System PCAN-USB Pro adapter * Derived from the PCAN project file driver/src/pcan_usbpro.c * * Copyright (C) 2003-2011 PEAK System-Technik GmbH * Copyright (C) 2011-2012 Stephane Grosjean <[email protected]> */ #include <linux/ethtool.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/usb.h> #include <linux/can.h> #include <linux/can/dev.h> #include <linux/can/error.h> #include "pcan_usb_core.h" #include "pcan_usb_pro.h" #define PCAN_USBPRO_CHANNEL_COUNT 2 /* PCAN-USB Pro adapter internal clock (MHz) */ #define PCAN_USBPRO_CRYSTAL_HZ 56000000 /* PCAN-USB Pro command timeout (ms.) */ #define PCAN_USBPRO_COMMAND_TIMEOUT 1000 /* PCAN-USB Pro rx/tx buffers size */ #define PCAN_USBPRO_RX_BUFFER_SIZE 1024 #define PCAN_USBPRO_TX_BUFFER_SIZE 64 #define PCAN_USBPRO_MSG_HEADER_LEN 4 /* some commands responses need to be re-submitted */ #define PCAN_USBPRO_RSP_SUBMIT_MAX 2 #define PCAN_USBPRO_RTR 0x01 #define PCAN_USBPRO_EXT 0x02 #define PCAN_USBPRO_SS 0x08 #define PCAN_USBPRO_CMD_BUFFER_SIZE 512 /* handle device specific info used by the netdevices */ struct pcan_usb_pro_interface { struct peak_usb_device *dev[PCAN_USBPRO_CHANNEL_COUNT]; struct peak_time_ref time_ref; int cm_ignore_count; int dev_opened_count; }; /* device information */ struct pcan_usb_pro_device { struct peak_usb_device dev; struct pcan_usb_pro_interface *usb_if; u32 cached_ccbt; }; /* internal structure used to handle messages sent to bulk urb */ struct pcan_usb_pro_msg { u8 *rec_ptr; int rec_buffer_size; int rec_buffer_len; union { __le16 *rec_cnt_rd; __le32 *rec_cnt; u8 *rec_buffer; } u; }; /* records sizes table indexed on message id. (8-bits value) */ static u16 pcan_usb_pro_sizeof_rec[256] = { [PCAN_USBPRO_SETBTR] = sizeof(struct pcan_usb_pro_btr), [PCAN_USBPRO_SETBUSACT] = sizeof(struct pcan_usb_pro_busact), [PCAN_USBPRO_SETSILENT] = sizeof(struct pcan_usb_pro_silent), [PCAN_USBPRO_SETFILTR] = sizeof(struct pcan_usb_pro_filter), [PCAN_USBPRO_SETTS] = sizeof(struct pcan_usb_pro_setts), [PCAN_USBPRO_GETDEVID] = sizeof(struct pcan_usb_pro_devid), [PCAN_USBPRO_SETDEVID] = sizeof(struct pcan_usb_pro_devid), [PCAN_USBPRO_SETLED] = sizeof(struct pcan_usb_pro_setled), [PCAN_USBPRO_RXMSG8] = sizeof(struct pcan_usb_pro_rxmsg), [PCAN_USBPRO_RXMSG4] = sizeof(struct pcan_usb_pro_rxmsg) - 4, [PCAN_USBPRO_RXMSG0] = sizeof(struct pcan_usb_pro_rxmsg) - 8, [PCAN_USBPRO_RXRTR] = sizeof(struct pcan_usb_pro_rxmsg) - 8, [PCAN_USBPRO_RXSTATUS] = sizeof(struct pcan_usb_pro_rxstatus), [PCAN_USBPRO_RXTS] = sizeof(struct pcan_usb_pro_rxts), [PCAN_USBPRO_TXMSG8] = sizeof(struct pcan_usb_pro_txmsg), [PCAN_USBPRO_TXMSG4] = sizeof(struct pcan_usb_pro_txmsg) - 4, [PCAN_USBPRO_TXMSG0] = sizeof(struct pcan_usb_pro_txmsg) - 8, }; /* * initialize PCAN-USB Pro message data structure */ static u8 *pcan_msg_init(struct pcan_usb_pro_msg *pm, void *buffer_addr, int buffer_size) { if (buffer_size < PCAN_USBPRO_MSG_HEADER_LEN) return NULL; pm->u.rec_buffer = (u8 *)buffer_addr; pm->rec_buffer_size = pm->rec_buffer_len = buffer_size; pm->rec_ptr = pm->u.rec_buffer + PCAN_USBPRO_MSG_HEADER_LEN; return pm->rec_ptr; } static u8 *pcan_msg_init_empty(struct pcan_usb_pro_msg *pm, void *buffer_addr, int buffer_size) { u8 *pr = pcan_msg_init(pm, buffer_addr, buffer_size); if (pr) { pm->rec_buffer_len = PCAN_USBPRO_MSG_HEADER_LEN; *pm->u.rec_cnt = 0; } return pr; } /* * add one record to a message being built */ static int pcan_msg_add_rec(struct pcan_usb_pro_msg *pm, int id, ...) { int len, i; u8 *pc; va_list ap; va_start(ap, id); pc = pm->rec_ptr + 1; i = 0; switch (id) { case PCAN_USBPRO_TXMSG8: i += 4; fallthrough; case PCAN_USBPRO_TXMSG4: i += 4; fallthrough; case PCAN_USBPRO_TXMSG0: *pc++ = va_arg(ap, int); *pc++ = va_arg(ap, int); *pc++ = va_arg(ap, int); *(__le32 *)pc = cpu_to_le32(va_arg(ap, u32)); pc += 4; memcpy(pc, va_arg(ap, int *), i); pc += i; break; case PCAN_USBPRO_SETBTR: case PCAN_USBPRO_GETDEVID: case PCAN_USBPRO_SETDEVID: *pc++ = va_arg(ap, int); pc += 2; *(__le32 *)pc = cpu_to_le32(va_arg(ap, u32)); pc += 4; break; case PCAN_USBPRO_SETFILTR: case PCAN_USBPRO_SETBUSACT: case PCAN_USBPRO_SETSILENT: *pc++ = va_arg(ap, int); *(__le16 *)pc = cpu_to_le16(va_arg(ap, int)); pc += 2; break; case PCAN_USBPRO_SETLED: *pc++ = va_arg(ap, int); *(__le16 *)pc = cpu_to_le16(va_arg(ap, int)); pc += 2; *(__le32 *)pc = cpu_to_le32(va_arg(ap, u32)); pc += 4; break; case PCAN_USBPRO_SETTS: pc++; *(__le16 *)pc = cpu_to_le16(va_arg(ap, int)); pc += 2; break; default: pr_err("%s: %s(): unknown data type %02Xh (%d)\n", PCAN_USB_DRIVER_NAME, __func__, id, id); pc--; break; } len = pc - pm->rec_ptr; if (len > 0) { le32_add_cpu(pm->u.rec_cnt, 1); *pm->rec_ptr = id; pm->rec_ptr = pc; pm->rec_buffer_len += len; } va_end(ap); return len; } /* * send PCAN-USB Pro command synchronously */ static int pcan_usb_pro_send_cmd(struct peak_usb_device *dev, struct pcan_usb_pro_msg *pum) { int actual_length; int err; /* usb device unregistered? */ if (!(dev->state & PCAN_USB_STATE_CONNECTED)) return 0; err = usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, PCAN_USBPRO_EP_CMDOUT), pum->u.rec_buffer, pum->rec_buffer_len, &actual_length, PCAN_USBPRO_COMMAND_TIMEOUT); if (err) netdev_err(dev->netdev, "sending command failure: %d\n", err); return err; } /* * wait for PCAN-USB Pro command response */ static int pcan_usb_pro_wait_rsp(struct peak_usb_device *dev, struct pcan_usb_pro_msg *pum) { u8 req_data_type, req_channel; int actual_length; int i, err = 0; /* usb device unregistered? */ if (!(dev->state & PCAN_USB_STATE_CONNECTED)) return 0; req_data_type = pum->u.rec_buffer[4]; req_channel = pum->u.rec_buffer[5]; *pum->u.rec_cnt = 0; for (i = 0; !err && i < PCAN_USBPRO_RSP_SUBMIT_MAX; i++) { struct pcan_usb_pro_msg rsp; union pcan_usb_pro_rec *pr; u32 r, rec_cnt; u16 rec_len; u8 *pc; err = usb_bulk_msg(dev->udev, usb_rcvbulkpipe(dev->udev, PCAN_USBPRO_EP_CMDIN), pum->u.rec_buffer, pum->rec_buffer_len, &actual_length, PCAN_USBPRO_COMMAND_TIMEOUT); if (err) { netdev_err(dev->netdev, "waiting rsp error %d\n", err); break; } if (actual_length == 0) continue; err = -EBADMSG; if (actual_length < PCAN_USBPRO_MSG_HEADER_LEN) { netdev_err(dev->netdev, "got abnormal too small rsp (len=%d)\n", actual_length); break; } pc = pcan_msg_init(&rsp, pum->u.rec_buffer, actual_length); rec_cnt = le32_to_cpu(*rsp.u.rec_cnt); /* loop on records stored into message */ for (r = 0; r < rec_cnt; r++) { pr = (union pcan_usb_pro_rec *)pc; rec_len = pcan_usb_pro_sizeof_rec[pr->data_type]; if (!rec_len) { netdev_err(dev->netdev, "got unprocessed record in msg\n"); pcan_dump_mem("rcvd rsp msg", pum->u.rec_buffer, actual_length); break; } /* check if response corresponds to request */ if (pr->data_type != req_data_type) netdev_err(dev->netdev, "got unwanted rsp %xh: ignored\n", pr->data_type); /* check if channel in response corresponds too */ else if ((req_channel != 0xff) && (pr->bus_act.channel != req_channel)) netdev_err(dev->netdev, "got rsp %xh but on chan%u: ignored\n", req_data_type, pr->bus_act.channel); /* got the response */ else return 0; /* otherwise, go on with next record in message */ pc += rec_len; } } return (i >= PCAN_USBPRO_RSP_SUBMIT_MAX) ? -ERANGE : err; } int pcan_usb_pro_send_req(struct peak_usb_device *dev, int req_id, int req_value, void *req_addr, int req_size) { int err; u8 req_type; unsigned int p; /* usb device unregistered? */ if (!(dev->state & PCAN_USB_STATE_CONNECTED)) return 0; req_type = USB_TYPE_VENDOR | USB_RECIP_OTHER; switch (req_id) { case PCAN_USBPRO_REQ_FCT: p = usb_sndctrlpipe(dev->udev, 0); break; default: p = usb_rcvctrlpipe(dev->udev, 0); req_type |= USB_DIR_IN; memset(req_addr, '\0', req_size); break; } err = usb_control_msg(dev->udev, p, req_id, req_type, req_value, 0, req_addr, req_size, 2 * USB_CTRL_GET_TIMEOUT); if (err < 0) { netdev_info(dev->netdev, "unable to request usb[type=%d value=%d] err=%d\n", req_id, req_value, err); return err; } return 0; } static int pcan_usb_pro_set_ts(struct peak_usb_device *dev, u16 onoff) { struct pcan_usb_pro_msg um; pcan_msg_init_empty(&um, dev->cmd_buf, PCAN_USB_MAX_CMD_LEN); pcan_msg_add_rec(&um, PCAN_USBPRO_SETTS, onoff); return pcan_usb_pro_send_cmd(dev, &um); } static int pcan_usb_pro_set_bitrate(struct peak_usb_device *dev, u32 ccbt) { struct pcan_usb_pro_device *pdev = container_of(dev, struct pcan_usb_pro_device, dev); struct pcan_usb_pro_msg um; pcan_msg_init_empty(&um, dev->cmd_buf, PCAN_USB_MAX_CMD_LEN); pcan_msg_add_rec(&um, PCAN_USBPRO_SETBTR, dev->ctrl_idx, ccbt); /* cache the CCBT value to reuse it before next buson */ pdev->cached_ccbt = ccbt; return pcan_usb_pro_send_cmd(dev, &um); } static int pcan_usb_pro_set_bus(struct peak_usb_device *dev, u8 onoff) { struct pcan_usb_pro_msg um; /* if bus=on, be sure the bitrate being set before! */ if (onoff) { struct pcan_usb_pro_device *pdev = container_of(dev, struct pcan_usb_pro_device, dev); pcan_usb_pro_set_bitrate(dev, pdev->cached_ccbt); } pcan_msg_init_empty(&um, dev->cmd_buf, PCAN_USB_MAX_CMD_LEN); pcan_msg_add_rec(&um, PCAN_USBPRO_SETBUSACT, dev->ctrl_idx, onoff); return pcan_usb_pro_send_cmd(dev, &um); } static int pcan_usb_pro_set_silent(struct peak_usb_device *dev, u8 onoff) { struct pcan_usb_pro_msg um; pcan_msg_init_empty(&um, dev->cmd_buf, PCAN_USB_MAX_CMD_LEN); pcan_msg_add_rec(&um, PCAN_USBPRO_SETSILENT, dev->ctrl_idx, onoff); return pcan_usb_pro_send_cmd(dev, &um); } static int pcan_usb_pro_set_filter(struct peak_usb_device *dev, u16 filter_mode) { struct pcan_usb_pro_msg um; pcan_msg_init_empty(&um, dev->cmd_buf, PCAN_USB_MAX_CMD_LEN); pcan_msg_add_rec(&um, PCAN_USBPRO_SETFILTR, dev->ctrl_idx, filter_mode); return pcan_usb_pro_send_cmd(dev, &um); } static int pcan_usb_pro_set_led(struct peak_usb_device *dev, u8 mode, u32 timeout) { struct pcan_usb_pro_msg um; pcan_msg_init_empty(&um, dev->cmd_buf, PCAN_USB_MAX_CMD_LEN); pcan_msg_add_rec(&um, PCAN_USBPRO_SETLED, dev->ctrl_idx, mode, timeout); return pcan_usb_pro_send_cmd(dev, &um); } static int pcan_usb_pro_get_can_channel_id(struct peak_usb_device *dev, u32 *can_ch_id) { struct pcan_usb_pro_devid *pdn; struct pcan_usb_pro_msg um; int err; u8 *pc; pc = pcan_msg_init_empty(&um, dev->cmd_buf, PCAN_USB_MAX_CMD_LEN); pcan_msg_add_rec(&um, PCAN_USBPRO_GETDEVID, dev->ctrl_idx); err = pcan_usb_pro_send_cmd(dev, &um); if (err) return err; err = pcan_usb_pro_wait_rsp(dev, &um); if (err) return err; pdn = (struct pcan_usb_pro_devid *)pc; *can_ch_id = le32_to_cpu(pdn->dev_num); return err; } static int pcan_usb_pro_set_can_channel_id(struct peak_usb_device *dev, u32 can_ch_id) { struct pcan_usb_pro_msg um; pcan_msg_init_empty(&um, dev->cmd_buf, PCAN_USB_MAX_CMD_LEN); pcan_msg_add_rec(&um, PCAN_USBPRO_SETDEVID, dev->ctrl_idx, can_ch_id); return pcan_usb_pro_send_cmd(dev, &um); } static int pcan_usb_pro_set_bittiming(struct peak_usb_device *dev, struct can_bittiming *bt) { u32 ccbt; ccbt = (dev->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) ? 0x00800000 : 0; ccbt |= (bt->sjw - 1) << 24; ccbt |= (bt->phase_seg2 - 1) << 20; ccbt |= (bt->prop_seg + bt->phase_seg1 - 1) << 16; /* = tseg1 */ ccbt |= bt->brp - 1; netdev_info(dev->netdev, "setting ccbt=0x%08x\n", ccbt); return pcan_usb_pro_set_bitrate(dev, ccbt); } void pcan_usb_pro_restart_complete(struct urb *urb) { /* can delete usb resources */ peak_usb_async_complete(urb); /* notify candev and netdev */ peak_usb_restart_complete(urb->context); } /* * handle restart but in asynchronously way */ static int pcan_usb_pro_restart_async(struct peak_usb_device *dev, struct urb *urb, u8 *buf) { struct pcan_usb_pro_msg um; pcan_msg_init_empty(&um, buf, PCAN_USB_MAX_CMD_LEN); pcan_msg_add_rec(&um, PCAN_USBPRO_SETBUSACT, dev->ctrl_idx, 1); usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, PCAN_USBPRO_EP_CMDOUT), buf, PCAN_USB_MAX_CMD_LEN, pcan_usb_pro_restart_complete, dev); return usb_submit_urb(urb, GFP_ATOMIC); } static int pcan_usb_pro_drv_loaded(struct peak_usb_device *dev, int loaded) { u8 *buffer; int err; buffer = kzalloc(PCAN_USBPRO_FCT_DRVLD_REQ_LEN, GFP_KERNEL); if (!buffer) return -ENOMEM; buffer[0] = 0; buffer[1] = !!loaded; err = pcan_usb_pro_send_req(dev, PCAN_USBPRO_REQ_FCT, PCAN_USBPRO_FCT_DRVLD, buffer, PCAN_USBPRO_FCT_DRVLD_REQ_LEN); kfree(buffer); return err; } static inline struct pcan_usb_pro_interface *pcan_usb_pro_dev_if(struct peak_usb_device *dev) { struct pcan_usb_pro_device *pdev = container_of(dev, struct pcan_usb_pro_device, dev); return pdev->usb_if; } static int pcan_usb_pro_handle_canmsg(struct pcan_usb_pro_interface *usb_if, struct pcan_usb_pro_rxmsg *rx) { const unsigned int ctrl_idx = (rx->len >> 4) & 0x0f; struct peak_usb_device *dev = usb_if->dev[ctrl_idx]; struct net_device *netdev = dev->netdev; struct can_frame *can_frame; struct sk_buff *skb; struct skb_shared_hwtstamps *hwts; skb = alloc_can_skb(netdev, &can_frame); if (!skb) return -ENOMEM; can_frame->can_id = le32_to_cpu(rx->id); can_frame->len = rx->len & 0x0f; if (rx->flags & PCAN_USBPRO_EXT) can_frame->can_id |= CAN_EFF_FLAG; if (rx->flags & PCAN_USBPRO_RTR) { can_frame->can_id |= CAN_RTR_FLAG; } else { memcpy(can_frame->data, rx->data, can_frame->len); netdev->stats.rx_bytes += can_frame->len; } netdev->stats.rx_packets++; hwts = skb_hwtstamps(skb); peak_usb_get_ts_time(&usb_if->time_ref, le32_to_cpu(rx->ts32), &hwts->hwtstamp); netif_rx(skb); return 0; } static int pcan_usb_pro_handle_error(struct pcan_usb_pro_interface *usb_if, struct pcan_usb_pro_rxstatus *er) { const u16 raw_status = le16_to_cpu(er->status); const unsigned int ctrl_idx = (er->channel >> 4) & 0x0f; struct peak_usb_device *dev = usb_if->dev[ctrl_idx]; struct net_device *netdev = dev->netdev; struct can_frame *can_frame; enum can_state new_state = CAN_STATE_ERROR_ACTIVE; u8 err_mask = 0; struct sk_buff *skb; struct skb_shared_hwtstamps *hwts; /* nothing should be sent while in BUS_OFF state */ if (dev->can.state == CAN_STATE_BUS_OFF) return 0; if (!raw_status) { /* no error bit (back to active state) */ dev->can.state = CAN_STATE_ERROR_ACTIVE; return 0; } if (raw_status & (PCAN_USBPRO_STATUS_OVERRUN | PCAN_USBPRO_STATUS_QOVERRUN)) { /* trick to bypass next comparison and process other errors */ new_state = CAN_STATE_MAX; } if (raw_status & PCAN_USBPRO_STATUS_BUS) { new_state = CAN_STATE_BUS_OFF; } else if (raw_status & PCAN_USBPRO_STATUS_ERROR) { u32 rx_err_cnt = (le32_to_cpu(er->err_frm) & 0x00ff0000) >> 16; u32 tx_err_cnt = (le32_to_cpu(er->err_frm) & 0xff000000) >> 24; if (rx_err_cnt > 127) err_mask |= CAN_ERR_CRTL_RX_PASSIVE; else if (rx_err_cnt > 96) err_mask |= CAN_ERR_CRTL_RX_WARNING; if (tx_err_cnt > 127) err_mask |= CAN_ERR_CRTL_TX_PASSIVE; else if (tx_err_cnt > 96) err_mask |= CAN_ERR_CRTL_TX_WARNING; if (err_mask & (CAN_ERR_CRTL_RX_WARNING | CAN_ERR_CRTL_TX_WARNING)) new_state = CAN_STATE_ERROR_WARNING; else if (err_mask & (CAN_ERR_CRTL_RX_PASSIVE | CAN_ERR_CRTL_TX_PASSIVE)) new_state = CAN_STATE_ERROR_PASSIVE; } /* donot post any error if current state didn't change */ if (dev->can.state == new_state) return 0; /* allocate an skb to store the error frame */ skb = alloc_can_err_skb(netdev, &can_frame); if (!skb) return -ENOMEM; switch (new_state) { case CAN_STATE_BUS_OFF: can_frame->can_id |= CAN_ERR_BUSOFF; dev->can.can_stats.bus_off++; can_bus_off(netdev); break; case CAN_STATE_ERROR_PASSIVE: can_frame->can_id |= CAN_ERR_CRTL; can_frame->data[1] |= err_mask; dev->can.can_stats.error_passive++; break; case CAN_STATE_ERROR_WARNING: can_frame->can_id |= CAN_ERR_CRTL; can_frame->data[1] |= err_mask; dev->can.can_stats.error_warning++; break; case CAN_STATE_ERROR_ACTIVE: break; default: /* CAN_STATE_MAX (trick to handle other errors) */ if (raw_status & PCAN_USBPRO_STATUS_OVERRUN) { can_frame->can_id |= CAN_ERR_PROT; can_frame->data[2] |= CAN_ERR_PROT_OVERLOAD; netdev->stats.rx_over_errors++; netdev->stats.rx_errors++; } if (raw_status & PCAN_USBPRO_STATUS_QOVERRUN) { can_frame->can_id |= CAN_ERR_CRTL; can_frame->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW; netdev->stats.rx_over_errors++; netdev->stats.rx_errors++; } new_state = CAN_STATE_ERROR_ACTIVE; break; } dev->can.state = new_state; hwts = skb_hwtstamps(skb); peak_usb_get_ts_time(&usb_if->time_ref, le32_to_cpu(er->ts32), &hwts->hwtstamp); netif_rx(skb); return 0; } static void pcan_usb_pro_handle_ts(struct pcan_usb_pro_interface *usb_if, struct pcan_usb_pro_rxts *ts) { /* should wait until clock is stabilized */ if (usb_if->cm_ignore_count > 0) usb_if->cm_ignore_count--; else peak_usb_set_ts_now(&usb_if->time_ref, le32_to_cpu(ts->ts64[1])); } /* * callback for bulk IN urb */ static int pcan_usb_pro_decode_buf(struct peak_usb_device *dev, struct urb *urb) { struct pcan_usb_pro_interface *usb_if = pcan_usb_pro_dev_if(dev); struct net_device *netdev = dev->netdev; struct pcan_usb_pro_msg usb_msg; u8 *rec_ptr, *msg_end; u16 rec_cnt; int err = 0; rec_ptr = pcan_msg_init(&usb_msg, urb->transfer_buffer, urb->actual_length); if (!rec_ptr) { netdev_err(netdev, "bad msg hdr len %d\n", urb->actual_length); return -EINVAL; } /* loop reading all the records from the incoming message */ msg_end = urb->transfer_buffer + urb->actual_length; rec_cnt = le16_to_cpu(*usb_msg.u.rec_cnt_rd); for (; rec_cnt > 0; rec_cnt--) { union pcan_usb_pro_rec *pr = (union pcan_usb_pro_rec *)rec_ptr; u16 sizeof_rec = pcan_usb_pro_sizeof_rec[pr->data_type]; if (!sizeof_rec) { netdev_err(netdev, "got unsupported rec in usb msg:\n"); err = -ENOTSUPP; break; } /* check if the record goes out of current packet */ if (rec_ptr + sizeof_rec > msg_end) { netdev_err(netdev, "got frag rec: should inc usb rx buf size\n"); err = -EBADMSG; break; } switch (pr->data_type) { case PCAN_USBPRO_RXMSG8: case PCAN_USBPRO_RXMSG4: case PCAN_USBPRO_RXMSG0: case PCAN_USBPRO_RXRTR: err = pcan_usb_pro_handle_canmsg(usb_if, &pr->rx_msg); if (err < 0) goto fail; break; case PCAN_USBPRO_RXSTATUS: err = pcan_usb_pro_handle_error(usb_if, &pr->rx_status); if (err < 0) goto fail; break; case PCAN_USBPRO_RXTS: pcan_usb_pro_handle_ts(usb_if, &pr->rx_ts); break; default: netdev_err(netdev, "unhandled rec type 0x%02x (%d): ignored\n", pr->data_type, pr->data_type); break; } rec_ptr += sizeof_rec; } fail: if (err) pcan_dump_mem("received msg", urb->transfer_buffer, urb->actual_length); return err; } static int pcan_usb_pro_encode_msg(struct peak_usb_device *dev, struct sk_buff *skb, u8 *obuf, size_t *size) { struct can_frame *cf = (struct can_frame *)skb->data; u8 data_type, len, flags; struct pcan_usb_pro_msg usb_msg; pcan_msg_init_empty(&usb_msg, obuf, *size); if ((cf->can_id & CAN_RTR_FLAG) || (cf->len == 0)) data_type = PCAN_USBPRO_TXMSG0; else if (cf->len <= 4) data_type = PCAN_USBPRO_TXMSG4; else data_type = PCAN_USBPRO_TXMSG8; len = (dev->ctrl_idx << 4) | (cf->len & 0x0f); flags = 0; if (cf->can_id & CAN_EFF_FLAG) flags |= PCAN_USBPRO_EXT; if (cf->can_id & CAN_RTR_FLAG) flags |= PCAN_USBPRO_RTR; /* Single-Shot frame */ if (dev->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT) flags |= PCAN_USBPRO_SS; pcan_msg_add_rec(&usb_msg, data_type, 0, flags, len, cf->can_id, cf->data); *size = usb_msg.rec_buffer_len; return 0; } static int pcan_usb_pro_start(struct peak_usb_device *dev) { struct pcan_usb_pro_device *pdev = container_of(dev, struct pcan_usb_pro_device, dev); int err; err = pcan_usb_pro_set_silent(dev, dev->can.ctrlmode & CAN_CTRLMODE_LISTENONLY); if (err) return err; /* filter mode: 0-> All OFF; 1->bypass */ err = pcan_usb_pro_set_filter(dev, 1); if (err) return err; /* opening first device: */ if (pdev->usb_if->dev_opened_count == 0) { /* reset time_ref */ peak_usb_init_time_ref(&pdev->usb_if->time_ref, &pcan_usb_pro); /* ask device to send ts messages */ err = pcan_usb_pro_set_ts(dev, 1); } pdev->usb_if->dev_opened_count++; return err; } /* * stop interface * (last chance before set bus off) */ static int pcan_usb_pro_stop(struct peak_usb_device *dev) { struct pcan_usb_pro_device *pdev = container_of(dev, struct pcan_usb_pro_device, dev); /* turn off ts msgs for that interface if no other dev opened */ if (pdev->usb_if->dev_opened_count == 1) pcan_usb_pro_set_ts(dev, 0); pdev->usb_if->dev_opened_count--; return 0; } /* * called when probing to initialize a device object. */ static int pcan_usb_pro_init(struct peak_usb_device *dev) { struct pcan_usb_pro_device *pdev = container_of(dev, struct pcan_usb_pro_device, dev); struct pcan_usb_pro_interface *usb_if = NULL; struct pcan_usb_pro_fwinfo *fi = NULL; struct pcan_usb_pro_blinfo *bi = NULL; int err; /* do this for 1st channel only */ if (!dev->prev_siblings) { /* allocate netdevices common structure attached to first one */ usb_if = kzalloc(sizeof(struct pcan_usb_pro_interface), GFP_KERNEL); fi = kmalloc(sizeof(struct pcan_usb_pro_fwinfo), GFP_KERNEL); bi = kmalloc(sizeof(struct pcan_usb_pro_blinfo), GFP_KERNEL); if (!usb_if || !fi || !bi) { err = -ENOMEM; goto err_out; } /* number of ts msgs to ignore before taking one into account */ usb_if->cm_ignore_count = 5; /* * explicit use of dev_xxx() instead of netdev_xxx() here: * information displayed are related to the device itself, not * to the canx netdevices. */ err = pcan_usb_pro_send_req(dev, PCAN_USBPRO_REQ_INFO, PCAN_USBPRO_INFO_FW, fi, sizeof(*fi)); if (err) { dev_err(dev->netdev->dev.parent, "unable to read %s firmware info (err %d)\n", pcan_usb_pro.name, err); goto err_out; } err = pcan_usb_pro_send_req(dev, PCAN_USBPRO_REQ_INFO, PCAN_USBPRO_INFO_BL, bi, sizeof(*bi)); if (err) { dev_err(dev->netdev->dev.parent, "unable to read %s bootloader info (err %d)\n", pcan_usb_pro.name, err); goto err_out; } /* tell the device the can driver is running */ err = pcan_usb_pro_drv_loaded(dev, 1); if (err) goto err_out; dev_info(dev->netdev->dev.parent, "PEAK-System %s hwrev %u serial %08X.%08X (%u channels)\n", pcan_usb_pro.name, bi->hw_rev, bi->serial_num_hi, bi->serial_num_lo, pcan_usb_pro.ctrl_count); } else { usb_if = pcan_usb_pro_dev_if(dev->prev_siblings); } pdev->usb_if = usb_if; usb_if->dev[dev->ctrl_idx] = dev; /* set LED in default state (end of init phase) */ pcan_usb_pro_set_led(dev, PCAN_USBPRO_LED_DEVICE, 1); kfree(bi); kfree(fi); return 0; err_out: kfree(bi); kfree(fi); kfree(usb_if); return err; } static void pcan_usb_pro_exit(struct peak_usb_device *dev) { struct pcan_usb_pro_device *pdev = container_of(dev, struct pcan_usb_pro_device, dev); /* * when rmmod called before unplug and if down, should reset things * before leaving */ if (dev->can.state != CAN_STATE_STOPPED) { /* set bus off on the corresponding channel */ pcan_usb_pro_set_bus(dev, 0); } /* if channel #0 (only) */ if (dev->ctrl_idx == 0) { /* turn off calibration message if any device were opened */ if (pdev->usb_if->dev_opened_count > 0) pcan_usb_pro_set_ts(dev, 0); /* tell the PCAN-USB Pro device the driver is being unloaded */ pcan_usb_pro_drv_loaded(dev, 0); } } /* * called when PCAN-USB Pro adapter is unplugged */ static void pcan_usb_pro_free(struct peak_usb_device *dev) { /* last device: can free pcan_usb_pro_interface object now */ if (!dev->prev_siblings && !dev->next_siblings) kfree(pcan_usb_pro_dev_if(dev)); } /* * probe function for new PCAN-USB Pro usb interface */ int pcan_usb_pro_probe(struct usb_interface *intf) { struct usb_host_interface *if_desc; int i; if_desc = intf->altsetting; /* check interface endpoint addresses */ for (i = 0; i < if_desc->desc.bNumEndpoints; i++) { struct usb_endpoint_descriptor *ep = &if_desc->endpoint[i].desc; /* * below is the list of valid ep addresses. Any other ep address * is considered as not-CAN interface address => no dev created */ switch (ep->bEndpointAddress) { case PCAN_USBPRO_EP_CMDOUT: case PCAN_USBPRO_EP_CMDIN: case PCAN_USBPRO_EP_MSGOUT_0: case PCAN_USBPRO_EP_MSGOUT_1: case PCAN_USBPRO_EP_MSGIN: case PCAN_USBPRO_EP_UNUSED: break; default: return -ENODEV; } } return 0; } static int pcan_usb_pro_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) { struct peak_usb_device *dev = netdev_priv(netdev); int err = 0; switch (state) { case ETHTOOL_ID_ACTIVE: /* fast blinking forever */ err = pcan_usb_pro_set_led(dev, PCAN_USBPRO_LED_BLINK_FAST, 0xffffffff); break; case ETHTOOL_ID_INACTIVE: /* restore LED default */ err = pcan_usb_pro_set_led(dev, PCAN_USBPRO_LED_DEVICE, 1); break; default: break; } return err; } static const struct ethtool_ops pcan_usb_pro_ethtool_ops = { .set_phys_id = pcan_usb_pro_set_phys_id, .get_ts_info = pcan_get_ts_info, .get_eeprom_len = peak_usb_get_eeprom_len, .get_eeprom = peak_usb_get_eeprom, .set_eeprom = peak_usb_set_eeprom, }; /* * describe the PCAN-USB Pro adapter */ static const struct can_bittiming_const pcan_usb_pro_const = { .name = "pcan_usb_pro", .tseg1_min = 1, .tseg1_max = 16, .tseg2_min = 1, .tseg2_max = 8, .sjw_max = 4, .brp_min = 1, .brp_max = 1024, .brp_inc = 1, }; const struct peak_usb_adapter pcan_usb_pro = { .name = "PCAN-USB Pro", .device_id = PCAN_USBPRO_PRODUCT_ID, .ctrl_count = PCAN_USBPRO_CHANNEL_COUNT, .ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_ONE_SHOT, .clock = { .freq = PCAN_USBPRO_CRYSTAL_HZ, }, .bittiming_const = &pcan_usb_pro_const, /* size of device private data */ .sizeof_dev_private = sizeof(struct pcan_usb_pro_device), .ethtool_ops = &pcan_usb_pro_ethtool_ops, /* timestamps usage */ .ts_used_bits = 32, .us_per_ts_scale = 1, /* us = (ts * scale) >> shift */ .us_per_ts_shift = 0, /* give here messages in/out endpoints */ .ep_msg_in = PCAN_USBPRO_EP_MSGIN, .ep_msg_out = {PCAN_USBPRO_EP_MSGOUT_0, PCAN_USBPRO_EP_MSGOUT_1}, /* size of rx/tx usb buffers */ .rx_buffer_size = PCAN_USBPRO_RX_BUFFER_SIZE, .tx_buffer_size = PCAN_USBPRO_TX_BUFFER_SIZE, /* device callbacks */ .intf_probe = pcan_usb_pro_probe, .dev_init = pcan_usb_pro_init, .dev_exit = pcan_usb_pro_exit, .dev_free = pcan_usb_pro_free, .dev_set_bus = pcan_usb_pro_set_bus, .dev_set_bittiming = pcan_usb_pro_set_bittiming, .dev_get_can_channel_id = pcan_usb_pro_get_can_channel_id, .dev_set_can_channel_id = pcan_usb_pro_set_can_channel_id, .dev_decode_buf = pcan_usb_pro_decode_buf, .dev_encode_msg = pcan_usb_pro_encode_msg, .dev_start = pcan_usb_pro_start, .dev_stop = pcan_usb_pro_stop, .dev_restart_async = pcan_usb_pro_restart_async, };
linux-master
drivers/net/can/usb/peak_usb/pcan_usb_pro.c
// SPDX-License-Identifier: GPL-2.0-only /* * CAN driver for PEAK System USB adapters * Derived from the PCAN project file driver/src/pcan_usb_core.c * * Copyright (C) 2003-2010 PEAK System-Technik GmbH * Copyright (C) 2010-2012 Stephane Grosjean <[email protected]> * * Many thanks to Klaus Hitschler <[email protected]> */ #include <linux/device.h> #include <linux/ethtool.h> #include <linux/init.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/signal.h> #include <linux/slab.h> #include <linux/sysfs.h> #include <linux/usb.h> #include <linux/can.h> #include <linux/can/dev.h> #include <linux/can/error.h> #include "pcan_usb_core.h" MODULE_AUTHOR("Stephane Grosjean <[email protected]>"); MODULE_DESCRIPTION("CAN driver for PEAK-System USB adapters"); MODULE_LICENSE("GPL v2"); /* Table of devices that work with this driver */ static const struct usb_device_id peak_usb_table[] = { { USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USB_PRODUCT_ID), .driver_info = (kernel_ulong_t)&pcan_usb, }, { USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBPRO_PRODUCT_ID), .driver_info = (kernel_ulong_t)&pcan_usb_pro, }, { USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBFD_PRODUCT_ID), .driver_info = (kernel_ulong_t)&pcan_usb_fd, }, { USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBPROFD_PRODUCT_ID), .driver_info = (kernel_ulong_t)&pcan_usb_pro_fd, }, { USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBCHIP_PRODUCT_ID), .driver_info = (kernel_ulong_t)&pcan_usb_chip, }, { USB_DEVICE(PCAN_USB_VENDOR_ID, PCAN_USBX6_PRODUCT_ID), .driver_info = (kernel_ulong_t)&pcan_usb_x6, }, { /* Terminating entry */ } }; MODULE_DEVICE_TABLE(usb, peak_usb_table); static ssize_t can_channel_id_show(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *netdev = to_net_dev(dev); struct peak_usb_device *peak_dev = netdev_priv(netdev); return sysfs_emit(buf, "%08X\n", peak_dev->can_channel_id); } static DEVICE_ATTR_RO(can_channel_id); /* mutable to avoid cast in attribute_group */ static struct attribute *peak_usb_sysfs_attrs[] = { &dev_attr_can_channel_id.attr, NULL, }; static const struct attribute_group peak_usb_sysfs_group = { .name = "peak_usb", .attrs = peak_usb_sysfs_attrs, }; /* * dump memory */ #define DUMP_WIDTH 16 void pcan_dump_mem(const char *prompt, const void *p, int l) { pr_info("%s dumping %s (%d bytes):\n", PCAN_USB_DRIVER_NAME, prompt ? prompt : "memory", l); print_hex_dump(KERN_INFO, PCAN_USB_DRIVER_NAME " ", DUMP_PREFIX_NONE, DUMP_WIDTH, 1, p, l, false); } /* * initialize a time_ref object with usb adapter own settings */ void peak_usb_init_time_ref(struct peak_time_ref *time_ref, const struct peak_usb_adapter *adapter) { if (time_ref) { memset(time_ref, 0, sizeof(struct peak_time_ref)); time_ref->adapter = adapter; } } /* * sometimes, another now may be more recent than current one... */ void peak_usb_update_ts_now(struct peak_time_ref *time_ref, u32 ts_now) { time_ref->ts_dev_2 = ts_now; /* should wait at least two passes before computing */ if (ktime_to_ns(time_ref->tv_host) > 0) { u32 delta_ts = time_ref->ts_dev_2 - time_ref->ts_dev_1; if (time_ref->ts_dev_2 < time_ref->ts_dev_1) delta_ts &= (1 << time_ref->adapter->ts_used_bits) - 1; time_ref->ts_total += delta_ts; } } /* * register device timestamp as now */ void peak_usb_set_ts_now(struct peak_time_ref *time_ref, u32 ts_now) { if (ktime_to_ns(time_ref->tv_host_0) == 0) { /* use monotonic clock to correctly compute further deltas */ time_ref->tv_host_0 = ktime_get(); time_ref->tv_host = ktime_set(0, 0); } else { /* * delta_us should not be >= 2^32 => delta should be < 4294s * handle 32-bits wrapping here: if count of s. reaches 4200, * reset counters and change time base */ if (ktime_to_ns(time_ref->tv_host)) { ktime_t delta = ktime_sub(time_ref->tv_host, time_ref->tv_host_0); if (ktime_to_ns(delta) > (4200ull * NSEC_PER_SEC)) { time_ref->tv_host_0 = time_ref->tv_host; time_ref->ts_total = 0; } } time_ref->tv_host = ktime_get(); time_ref->tick_count++; } time_ref->ts_dev_1 = time_ref->ts_dev_2; peak_usb_update_ts_now(time_ref, ts_now); } /* * compute time according to current ts and time_ref data */ void peak_usb_get_ts_time(struct peak_time_ref *time_ref, u32 ts, ktime_t *time) { /* protect from getting time before setting now */ if (ktime_to_ns(time_ref->tv_host)) { u64 delta_us; s64 delta_ts = 0; /* General case: dev_ts_1 < dev_ts_2 < ts, with: * * - dev_ts_1 = previous sync timestamp * - dev_ts_2 = last sync timestamp * - ts = event timestamp * - ts_period = known sync period (theoretical) * ~ dev_ts2 - dev_ts1 * *but*: * * - time counters wrap (see adapter->ts_used_bits) * - sometimes, dev_ts_1 < ts < dev_ts2 * * "normal" case (sync time counters increase): * must take into account case when ts wraps (tsw) * * < ts_period > < > * | | | * ---+--------+----+-------0-+--+--> * ts_dev_1 | ts_dev_2 | * ts tsw */ if (time_ref->ts_dev_1 < time_ref->ts_dev_2) { /* case when event time (tsw) wraps */ if (ts < time_ref->ts_dev_1) delta_ts = BIT_ULL(time_ref->adapter->ts_used_bits); /* Otherwise, sync time counter (ts_dev_2) has wrapped: * handle case when event time (tsn) hasn't. * * < ts_period > < > * | | | * ---+--------+--0-+---------+--+--> * ts_dev_1 | ts_dev_2 | * tsn ts */ } else if (time_ref->ts_dev_1 < ts) { delta_ts = -BIT_ULL(time_ref->adapter->ts_used_bits); } /* add delay between last sync and event timestamps */ delta_ts += (signed int)(ts - time_ref->ts_dev_2); /* add time from beginning to last sync */ delta_ts += time_ref->ts_total; /* convert ticks number into microseconds */ delta_us = delta_ts * time_ref->adapter->us_per_ts_scale; delta_us >>= time_ref->adapter->us_per_ts_shift; *time = ktime_add_us(time_ref->tv_host_0, delta_us); } else { *time = ktime_get(); } } /* post received skb with native 64-bit hw timestamp */ int peak_usb_netif_rx_64(struct sk_buff *skb, u32 ts_low, u32 ts_high) { struct skb_shared_hwtstamps *hwts = skb_hwtstamps(skb); u64 ns_ts; ns_ts = (u64)ts_high << 32 | ts_low; ns_ts *= NSEC_PER_USEC; hwts->hwtstamp = ns_to_ktime(ns_ts); return netif_rx(skb); } /* * callback for bulk Rx urb */ static void peak_usb_read_bulk_callback(struct urb *urb) { struct peak_usb_device *dev = urb->context; struct net_device *netdev; int err; netdev = dev->netdev; if (!netif_device_present(netdev)) return; /* check reception status */ switch (urb->status) { case 0: /* success */ break; case -EILSEQ: case -ENOENT: case -ECONNRESET: case -ESHUTDOWN: return; default: if (net_ratelimit()) netdev_err(netdev, "Rx urb aborted (%d)\n", urb->status); goto resubmit_urb; } /* protect from any incoming empty msgs */ if ((urb->actual_length > 0) && (dev->adapter->dev_decode_buf)) { /* handle these kinds of msgs only if _start callback called */ if (dev->state & PCAN_USB_STATE_STARTED) { err = dev->adapter->dev_decode_buf(dev, urb); if (err) pcan_dump_mem("received usb message", urb->transfer_buffer, urb->transfer_buffer_length); } } resubmit_urb: usb_fill_bulk_urb(urb, dev->udev, usb_rcvbulkpipe(dev->udev, dev->ep_msg_in), urb->transfer_buffer, dev->adapter->rx_buffer_size, peak_usb_read_bulk_callback, dev); usb_anchor_urb(urb, &dev->rx_submitted); err = usb_submit_urb(urb, GFP_ATOMIC); if (!err) return; usb_unanchor_urb(urb); if (err == -ENODEV) netif_device_detach(netdev); else netdev_err(netdev, "failed resubmitting read bulk urb: %d\n", err); } /* * callback for bulk Tx urb */ static void peak_usb_write_bulk_callback(struct urb *urb) { struct peak_tx_urb_context *context = urb->context; struct peak_usb_device *dev; struct net_device *netdev; int tx_bytes; BUG_ON(!context); dev = context->dev; netdev = dev->netdev; atomic_dec(&dev->active_tx_urbs); if (!netif_device_present(netdev)) return; /* check tx status */ switch (urb->status) { case 0: /* prevent tx timeout */ netif_trans_update(netdev); break; case -EPROTO: case -ENOENT: case -ECONNRESET: case -ESHUTDOWN: break; default: if (net_ratelimit()) netdev_err(netdev, "Tx urb aborted (%d)\n", urb->status); break; } /* should always release echo skb and corresponding context */ tx_bytes = can_get_echo_skb(netdev, context->echo_index, NULL); context->echo_index = PCAN_USB_MAX_TX_URBS; if (!urb->status) { /* transmission complete */ netdev->stats.tx_packets++; netdev->stats.tx_bytes += tx_bytes; /* do wakeup tx queue in case of success only */ netif_wake_queue(netdev); } } /* * called by netdev to send one skb on the CAN interface. */ static netdev_tx_t peak_usb_ndo_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct peak_usb_device *dev = netdev_priv(netdev); struct peak_tx_urb_context *context = NULL; struct net_device_stats *stats = &netdev->stats; struct urb *urb; u8 *obuf; int i, err; size_t size = dev->adapter->tx_buffer_size; if (can_dev_dropped_skb(netdev, skb)) return NETDEV_TX_OK; for (i = 0; i < PCAN_USB_MAX_TX_URBS; i++) if (dev->tx_contexts[i].echo_index == PCAN_USB_MAX_TX_URBS) { context = dev->tx_contexts + i; break; } if (!context) { /* should not occur except during restart */ return NETDEV_TX_BUSY; } urb = context->urb; obuf = urb->transfer_buffer; err = dev->adapter->dev_encode_msg(dev, skb, obuf, &size); if (err) { if (net_ratelimit()) netdev_err(netdev, "packet dropped\n"); dev_kfree_skb(skb); stats->tx_dropped++; return NETDEV_TX_OK; } context->echo_index = i; usb_anchor_urb(urb, &dev->tx_submitted); can_put_echo_skb(skb, netdev, context->echo_index, 0); atomic_inc(&dev->active_tx_urbs); err = usb_submit_urb(urb, GFP_ATOMIC); if (err) { can_free_echo_skb(netdev, context->echo_index, NULL); usb_unanchor_urb(urb); /* this context is not used in fact */ context->echo_index = PCAN_USB_MAX_TX_URBS; atomic_dec(&dev->active_tx_urbs); switch (err) { case -ENODEV: netif_device_detach(netdev); break; default: netdev_warn(netdev, "tx urb submitting failed err=%d\n", err); fallthrough; case -ENOENT: /* cable unplugged */ stats->tx_dropped++; } } else { netif_trans_update(netdev); /* slow down tx path */ if (atomic_read(&dev->active_tx_urbs) >= PCAN_USB_MAX_TX_URBS) netif_stop_queue(netdev); } return NETDEV_TX_OK; } /* * start the CAN interface. * Rx and Tx urbs are allocated here. Rx urbs are submitted here. */ static int peak_usb_start(struct peak_usb_device *dev) { struct net_device *netdev = dev->netdev; int err, i; for (i = 0; i < PCAN_USB_MAX_RX_URBS; i++) { struct urb *urb; u8 *buf; /* create a URB, and a buffer for it, to receive usb messages */ urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { err = -ENOMEM; break; } buf = kmalloc(dev->adapter->rx_buffer_size, GFP_KERNEL); if (!buf) { usb_free_urb(urb); err = -ENOMEM; break; } usb_fill_bulk_urb(urb, dev->udev, usb_rcvbulkpipe(dev->udev, dev->ep_msg_in), buf, dev->adapter->rx_buffer_size, peak_usb_read_bulk_callback, dev); /* ask last usb_free_urb() to also kfree() transfer_buffer */ urb->transfer_flags |= URB_FREE_BUFFER; usb_anchor_urb(urb, &dev->rx_submitted); err = usb_submit_urb(urb, GFP_KERNEL); if (err) { if (err == -ENODEV) netif_device_detach(dev->netdev); usb_unanchor_urb(urb); kfree(buf); usb_free_urb(urb); break; } /* drop reference, USB core will take care of freeing it */ usb_free_urb(urb); } /* did we submit any URBs? Warn if we was not able to submit all urbs */ if (i < PCAN_USB_MAX_RX_URBS) { if (i == 0) { netdev_err(netdev, "couldn't setup any rx URB\n"); return err; } netdev_warn(netdev, "rx performance may be slow\n"); } /* pre-alloc tx buffers and corresponding urbs */ for (i = 0; i < PCAN_USB_MAX_TX_URBS; i++) { struct peak_tx_urb_context *context; struct urb *urb; u8 *buf; /* create a URB and a buffer for it, to transmit usb messages */ urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { err = -ENOMEM; break; } buf = kmalloc(dev->adapter->tx_buffer_size, GFP_KERNEL); if (!buf) { usb_free_urb(urb); err = -ENOMEM; break; } context = dev->tx_contexts + i; context->dev = dev; context->urb = urb; usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, dev->ep_msg_out), buf, dev->adapter->tx_buffer_size, peak_usb_write_bulk_callback, context); /* ask last usb_free_urb() to also kfree() transfer_buffer */ urb->transfer_flags |= URB_FREE_BUFFER; } /* warn if we were not able to allocate enough tx contexts */ if (i < PCAN_USB_MAX_TX_URBS) { if (i == 0) { netdev_err(netdev, "couldn't setup any tx URB\n"); goto err_tx; } netdev_warn(netdev, "tx performance may be slow\n"); } if (dev->adapter->dev_start) { err = dev->adapter->dev_start(dev); if (err) goto err_adapter; } dev->state |= PCAN_USB_STATE_STARTED; /* can set bus on now */ if (dev->adapter->dev_set_bus) { err = dev->adapter->dev_set_bus(dev, 1); if (err) goto err_adapter; } dev->can.state = CAN_STATE_ERROR_ACTIVE; return 0; err_adapter: if (err == -ENODEV) netif_device_detach(dev->netdev); netdev_warn(netdev, "couldn't submit control: %d\n", err); for (i = 0; i < PCAN_USB_MAX_TX_URBS; i++) { usb_free_urb(dev->tx_contexts[i].urb); dev->tx_contexts[i].urb = NULL; } err_tx: usb_kill_anchored_urbs(&dev->rx_submitted); return err; } /* * called by netdev to open the corresponding CAN interface. */ static int peak_usb_ndo_open(struct net_device *netdev) { struct peak_usb_device *dev = netdev_priv(netdev); int err; /* common open */ err = open_candev(netdev); if (err) return err; /* finally start device */ err = peak_usb_start(dev); if (err) { netdev_err(netdev, "couldn't start device: %d\n", err); close_candev(netdev); return err; } netif_start_queue(netdev); return 0; } /* * unlink in-flight Rx and Tx urbs and free their memory. */ static void peak_usb_unlink_all_urbs(struct peak_usb_device *dev) { int i; /* free all Rx (submitted) urbs */ usb_kill_anchored_urbs(&dev->rx_submitted); /* free unsubmitted Tx urbs first */ for (i = 0; i < PCAN_USB_MAX_TX_URBS; i++) { struct urb *urb = dev->tx_contexts[i].urb; if (!urb || dev->tx_contexts[i].echo_index != PCAN_USB_MAX_TX_URBS) { /* * this urb is already released or always submitted, * let usb core free by itself */ continue; } usb_free_urb(urb); dev->tx_contexts[i].urb = NULL; } /* then free all submitted Tx urbs */ usb_kill_anchored_urbs(&dev->tx_submitted); atomic_set(&dev->active_tx_urbs, 0); } /* * called by netdev to close the corresponding CAN interface. */ static int peak_usb_ndo_stop(struct net_device *netdev) { struct peak_usb_device *dev = netdev_priv(netdev); dev->state &= ~PCAN_USB_STATE_STARTED; netif_stop_queue(netdev); close_candev(netdev); dev->can.state = CAN_STATE_STOPPED; /* unlink all pending urbs and free used memory */ peak_usb_unlink_all_urbs(dev); if (dev->adapter->dev_stop) dev->adapter->dev_stop(dev); /* can set bus off now */ if (dev->adapter->dev_set_bus) { int err = dev->adapter->dev_set_bus(dev, 0); if (err) return err; } return 0; } /* * handle end of waiting for the device to reset */ void peak_usb_restart_complete(struct peak_usb_device *dev) { /* finally MUST update can state */ dev->can.state = CAN_STATE_ERROR_ACTIVE; /* netdev queue can be awaken now */ netif_wake_queue(dev->netdev); } void peak_usb_async_complete(struct urb *urb) { kfree(urb->transfer_buffer); usb_free_urb(urb); } /* * device (auto-)restart mechanism runs in a timer context => * MUST handle restart with asynchronous usb transfers */ static int peak_usb_restart(struct peak_usb_device *dev) { struct urb *urb; int err; u8 *buf; /* * if device doesn't define any asynchronous restart handler, simply * wake the netdev queue up */ if (!dev->adapter->dev_restart_async) { peak_usb_restart_complete(dev); return 0; } /* first allocate a urb to handle the asynchronous steps */ urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) return -ENOMEM; /* also allocate enough space for the commands to send */ buf = kmalloc(PCAN_USB_MAX_CMD_LEN, GFP_ATOMIC); if (!buf) { usb_free_urb(urb); return -ENOMEM; } /* call the device specific handler for the restart */ err = dev->adapter->dev_restart_async(dev, urb, buf); if (!err) return 0; kfree(buf); usb_free_urb(urb); return err; } /* * candev callback used to change CAN mode. * Warning: this is called from a timer context! */ static int peak_usb_set_mode(struct net_device *netdev, enum can_mode mode) { struct peak_usb_device *dev = netdev_priv(netdev); int err = 0; switch (mode) { case CAN_MODE_START: err = peak_usb_restart(dev); if (err) netdev_err(netdev, "couldn't start device (err %d)\n", err); break; default: return -EOPNOTSUPP; } return err; } /* * candev callback used to set device nominal/arbitration bitrate. */ static int peak_usb_set_bittiming(struct net_device *netdev) { struct peak_usb_device *dev = netdev_priv(netdev); const struct peak_usb_adapter *pa = dev->adapter; if (pa->dev_set_bittiming) { struct can_bittiming *bt = &dev->can.bittiming; int err = pa->dev_set_bittiming(dev, bt); if (err) netdev_info(netdev, "couldn't set bitrate (err %d)\n", err); return err; } return 0; } /* * candev callback used to set device data bitrate. */ static int peak_usb_set_data_bittiming(struct net_device *netdev) { struct peak_usb_device *dev = netdev_priv(netdev); const struct peak_usb_adapter *pa = dev->adapter; if (pa->dev_set_data_bittiming) { struct can_bittiming *bt = &dev->can.data_bittiming; int err = pa->dev_set_data_bittiming(dev, bt); if (err) netdev_info(netdev, "couldn't set data bitrate (err %d)\n", err); return err; } return 0; } static int peak_eth_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) { struct hwtstamp_config hwts_cfg = { 0 }; switch (cmd) { case SIOCSHWTSTAMP: /* set */ if (copy_from_user(&hwts_cfg, ifr->ifr_data, sizeof(hwts_cfg))) return -EFAULT; if (hwts_cfg.tx_type == HWTSTAMP_TX_OFF && hwts_cfg.rx_filter == HWTSTAMP_FILTER_ALL) return 0; return -ERANGE; case SIOCGHWTSTAMP: /* get */ hwts_cfg.tx_type = HWTSTAMP_TX_OFF; hwts_cfg.rx_filter = HWTSTAMP_FILTER_ALL; if (copy_to_user(ifr->ifr_data, &hwts_cfg, sizeof(hwts_cfg))) return -EFAULT; return 0; default: return -EOPNOTSUPP; } } static const struct net_device_ops peak_usb_netdev_ops = { .ndo_open = peak_usb_ndo_open, .ndo_stop = peak_usb_ndo_stop, .ndo_eth_ioctl = peak_eth_ioctl, .ndo_start_xmit = peak_usb_ndo_start_xmit, .ndo_change_mtu = can_change_mtu, }; /* CAN-USB devices generally handle 32-bit CAN channel IDs. * In case one doesn't, then it have to overload this function. */ int peak_usb_get_eeprom_len(struct net_device *netdev) { return sizeof(u32); } /* Every CAN-USB device exports the dev_get_can_channel_id() operation. It is used * here to fill the data buffer with the user defined CAN channel ID. */ int peak_usb_get_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, u8 *data) { struct peak_usb_device *dev = netdev_priv(netdev); u32 ch_id; __le32 ch_id_le; int err; err = dev->adapter->dev_get_can_channel_id(dev, &ch_id); if (err) return err; /* ethtool operates on individual bytes. The byte order of the CAN * channel id in memory depends on the kernel architecture. We * convert the CAN channel id back to the native byte order of the PEAK * device itself to ensure that the order is consistent for all * host architectures. */ ch_id_le = cpu_to_le32(ch_id); memcpy(data, (u8 *)&ch_id_le + eeprom->offset, eeprom->len); /* update cached value */ dev->can_channel_id = ch_id; return err; } /* Every CAN-USB device exports the dev_get_can_channel_id()/dev_set_can_channel_id() * operations. They are used here to set the new user defined CAN channel ID. */ int peak_usb_set_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, u8 *data) { struct peak_usb_device *dev = netdev_priv(netdev); u32 ch_id; __le32 ch_id_le; int err; /* first, read the current user defined CAN channel ID */ err = dev->adapter->dev_get_can_channel_id(dev, &ch_id); if (err) { netdev_err(netdev, "Failed to init CAN channel id (err %d)\n", err); return err; } /* do update the value with user given bytes. * ethtool operates on individual bytes. The byte order of the CAN * channel ID in memory depends on the kernel architecture. We * convert the CAN channel ID back to the native byte order of the PEAK * device itself to ensure that the order is consistent for all * host architectures. */ ch_id_le = cpu_to_le32(ch_id); memcpy((u8 *)&ch_id_le + eeprom->offset, data, eeprom->len); ch_id = le32_to_cpu(ch_id_le); /* flash the new value now */ err = dev->adapter->dev_set_can_channel_id(dev, ch_id); if (err) { netdev_err(netdev, "Failed to write new CAN channel id (err %d)\n", err); return err; } /* update cached value with the new one */ dev->can_channel_id = ch_id; return 0; } int pcan_get_ts_info(struct net_device *dev, struct ethtool_ts_info *info) { info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE | SOF_TIMESTAMPING_RX_SOFTWARE | SOF_TIMESTAMPING_SOFTWARE | SOF_TIMESTAMPING_RX_HARDWARE | SOF_TIMESTAMPING_RAW_HARDWARE; info->phc_index = -1; info->tx_types = BIT(HWTSTAMP_TX_OFF); info->rx_filters = BIT(HWTSTAMP_FILTER_ALL); return 0; } /* * create one device which is attached to CAN controller #ctrl_idx of the * usb adapter. */ static int peak_usb_create_dev(const struct peak_usb_adapter *peak_usb_adapter, struct usb_interface *intf, int ctrl_idx) { struct usb_device *usb_dev = interface_to_usbdev(intf); int sizeof_candev = peak_usb_adapter->sizeof_dev_private; struct peak_usb_device *dev; struct net_device *netdev; int i, err; u16 tmp16; if (sizeof_candev < sizeof(struct peak_usb_device)) sizeof_candev = sizeof(struct peak_usb_device); netdev = alloc_candev(sizeof_candev, PCAN_USB_MAX_TX_URBS); if (!netdev) { dev_err(&intf->dev, "%s: couldn't alloc candev\n", PCAN_USB_DRIVER_NAME); return -ENOMEM; } dev = netdev_priv(netdev); /* allocate a buffer large enough to send commands */ dev->cmd_buf = kzalloc(PCAN_USB_MAX_CMD_LEN, GFP_KERNEL); if (!dev->cmd_buf) { err = -ENOMEM; goto lbl_free_candev; } dev->udev = usb_dev; dev->netdev = netdev; dev->adapter = peak_usb_adapter; dev->ctrl_idx = ctrl_idx; dev->state = PCAN_USB_STATE_CONNECTED; dev->ep_msg_in = peak_usb_adapter->ep_msg_in; dev->ep_msg_out = peak_usb_adapter->ep_msg_out[ctrl_idx]; dev->can.clock = peak_usb_adapter->clock; dev->can.bittiming_const = peak_usb_adapter->bittiming_const; dev->can.do_set_bittiming = peak_usb_set_bittiming; dev->can.data_bittiming_const = peak_usb_adapter->data_bittiming_const; dev->can.do_set_data_bittiming = peak_usb_set_data_bittiming; dev->can.do_set_mode = peak_usb_set_mode; dev->can.do_get_berr_counter = peak_usb_adapter->do_get_berr_counter; dev->can.ctrlmode_supported = peak_usb_adapter->ctrlmode_supported; netdev->netdev_ops = &peak_usb_netdev_ops; netdev->flags |= IFF_ECHO; /* we support local echo */ /* add ethtool support */ netdev->ethtool_ops = peak_usb_adapter->ethtool_ops; /* register peak_usb sysfs files */ netdev->sysfs_groups[0] = &peak_usb_sysfs_group; init_usb_anchor(&dev->rx_submitted); init_usb_anchor(&dev->tx_submitted); atomic_set(&dev->active_tx_urbs, 0); for (i = 0; i < PCAN_USB_MAX_TX_URBS; i++) dev->tx_contexts[i].echo_index = PCAN_USB_MAX_TX_URBS; dev->prev_siblings = usb_get_intfdata(intf); usb_set_intfdata(intf, dev); SET_NETDEV_DEV(netdev, &intf->dev); netdev->dev_id = ctrl_idx; err = register_candev(netdev); if (err) { dev_err(&intf->dev, "couldn't register CAN device: %d\n", err); goto lbl_restore_intf_data; } if (dev->prev_siblings) (dev->prev_siblings)->next_siblings = dev; /* keep hw revision into the netdevice */ tmp16 = le16_to_cpu(usb_dev->descriptor.bcdDevice); dev->device_rev = tmp16 >> 8; if (dev->adapter->dev_init) { err = dev->adapter->dev_init(dev); if (err) goto lbl_unregister_candev; } /* set bus off */ if (dev->adapter->dev_set_bus) { err = dev->adapter->dev_set_bus(dev, 0); if (err) goto adap_dev_free; } /* get CAN channel id early */ dev->adapter->dev_get_can_channel_id(dev, &dev->can_channel_id); netdev_info(netdev, "attached to %s channel %u (device 0x%08X)\n", peak_usb_adapter->name, ctrl_idx, dev->can_channel_id); return 0; adap_dev_free: if (dev->adapter->dev_free) dev->adapter->dev_free(dev); lbl_unregister_candev: unregister_candev(netdev); lbl_restore_intf_data: usb_set_intfdata(intf, dev->prev_siblings); kfree(dev->cmd_buf); lbl_free_candev: free_candev(netdev); return err; } /* * called by the usb core when the device is unplugged from the system */ static void peak_usb_disconnect(struct usb_interface *intf) { struct peak_usb_device *dev; struct peak_usb_device *dev_prev_siblings; /* unregister as many netdev devices as siblings */ for (dev = usb_get_intfdata(intf); dev; dev = dev_prev_siblings) { struct net_device *netdev = dev->netdev; char name[IFNAMSIZ]; dev_prev_siblings = dev->prev_siblings; dev->state &= ~PCAN_USB_STATE_CONNECTED; strscpy(name, netdev->name, IFNAMSIZ); unregister_candev(netdev); kfree(dev->cmd_buf); dev->next_siblings = NULL; if (dev->adapter->dev_free) dev->adapter->dev_free(dev); free_candev(netdev); dev_info(&intf->dev, "%s removed\n", name); } usb_set_intfdata(intf, NULL); } /* * probe function for new PEAK-System devices */ static int peak_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { const struct peak_usb_adapter *peak_usb_adapter; int i, err = -ENOMEM; /* get corresponding PCAN-USB adapter */ peak_usb_adapter = (const struct peak_usb_adapter *)id->driver_info; /* got corresponding adapter: check if it handles current interface */ if (peak_usb_adapter->intf_probe) { err = peak_usb_adapter->intf_probe(intf); if (err) return err; } for (i = 0; i < peak_usb_adapter->ctrl_count; i++) { err = peak_usb_create_dev(peak_usb_adapter, intf, i); if (err) { /* deregister already created devices */ peak_usb_disconnect(intf); break; } } return err; } /* usb specific object needed to register this driver with the usb subsystem */ static struct usb_driver peak_usb_driver = { .name = PCAN_USB_DRIVER_NAME, .disconnect = peak_usb_disconnect, .probe = peak_usb_probe, .id_table = peak_usb_table, }; static int __init peak_usb_init(void) { int err; /* register this driver with the USB subsystem */ err = usb_register(&peak_usb_driver); if (err) pr_err("%s: usb_register failed (err %d)\n", PCAN_USB_DRIVER_NAME, err); return err; } static int peak_usb_do_device_exit(struct device *d, void *arg) { struct usb_interface *intf = to_usb_interface(d); struct peak_usb_device *dev; /* stop as many netdev devices as siblings */ for (dev = usb_get_intfdata(intf); dev; dev = dev->prev_siblings) { struct net_device *netdev = dev->netdev; if (netif_device_present(netdev)) if (dev->adapter->dev_exit) dev->adapter->dev_exit(dev); } return 0; } static void __exit peak_usb_exit(void) { int err; /* last chance do send any synchronous commands here */ err = driver_for_each_device(&peak_usb_driver.drvwrap.driver, NULL, NULL, peak_usb_do_device_exit); if (err) pr_err("%s: failed to stop all can devices (err %d)\n", PCAN_USB_DRIVER_NAME, err); /* deregister this driver with the USB subsystem */ usb_deregister(&peak_usb_driver); pr_info("%s: PCAN-USB interfaces driver unloaded\n", PCAN_USB_DRIVER_NAME); } module_init(peak_usb_init); module_exit(peak_usb_exit);
linux-master
drivers/net/can/usb/peak_usb/pcan_usb_core.c
// SPDX-License-Identifier: GPL-2.0 /* Parts of this driver are based on the following: * - Kvaser linux leaf driver (version 4.78) * - CAN driver for esd CAN-USB/2 * - Kvaser linux usbcanII driver (version 5.3) * - Kvaser linux mhydra driver (version 5.24) * * Copyright (C) 2002-2018 KVASER AB, Sweden. All rights reserved. * Copyright (C) 2010 Matthias Fuchs <[email protected]>, esd gmbh * Copyright (C) 2012 Olivier Sobrie <[email protected]> * Copyright (C) 2015 Valeo S.A. */ #include <linux/completion.h> #include <linux/device.h> #include <linux/ethtool.h> #include <linux/gfp.h> #include <linux/if.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/spinlock.h> #include <linux/types.h> #include <linux/usb.h> #include <linux/can.h> #include <linux/can/dev.h> #include <linux/can/error.h> #include <linux/can/netlink.h> #include "kvaser_usb.h" /* Kvaser USB vendor id. */ #define KVASER_VENDOR_ID 0x0bfd /* Kvaser Leaf USB devices product ids */ #define USB_LEAF_DEVEL_PRODUCT_ID 0x000a #define USB_LEAF_LITE_PRODUCT_ID 0x000b #define USB_LEAF_PRO_PRODUCT_ID 0x000c #define USB_LEAF_SPRO_PRODUCT_ID 0x000e #define USB_LEAF_PRO_LS_PRODUCT_ID 0x000f #define USB_LEAF_PRO_SWC_PRODUCT_ID 0x0010 #define USB_LEAF_PRO_LIN_PRODUCT_ID 0x0011 #define USB_LEAF_SPRO_LS_PRODUCT_ID 0x0012 #define USB_LEAF_SPRO_SWC_PRODUCT_ID 0x0013 #define USB_MEMO2_DEVEL_PRODUCT_ID 0x0016 #define USB_MEMO2_HSHS_PRODUCT_ID 0x0017 #define USB_UPRO_HSHS_PRODUCT_ID 0x0018 #define USB_LEAF_LITE_GI_PRODUCT_ID 0x0019 #define USB_LEAF_PRO_OBDII_PRODUCT_ID 0x001a #define USB_MEMO2_HSLS_PRODUCT_ID 0x001b #define USB_LEAF_LITE_CH_PRODUCT_ID 0x001c #define USB_BLACKBIRD_SPRO_PRODUCT_ID 0x001d #define USB_OEM_MERCURY_PRODUCT_ID 0x0022 #define USB_OEM_LEAF_PRODUCT_ID 0x0023 #define USB_CAN_R_PRODUCT_ID 0x0027 #define USB_LEAF_LITE_V2_PRODUCT_ID 0x0120 #define USB_MINI_PCIE_HS_PRODUCT_ID 0x0121 #define USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID 0x0122 #define USB_USBCAN_LIGHT_2HS_PRODUCT_ID 0x0123 #define USB_MINI_PCIE_2HS_PRODUCT_ID 0x0124 #define USB_USBCAN_R_V2_PRODUCT_ID 0x0126 #define USB_LEAF_LIGHT_R_V2_PRODUCT_ID 0x0127 #define USB_LEAF_LIGHT_HS_V2_OEM2_PRODUCT_ID 0x0128 /* Kvaser USBCan-II devices product ids */ #define USB_USBCAN_REVB_PRODUCT_ID 0x0002 #define USB_VCI2_PRODUCT_ID 0x0003 #define USB_USBCAN2_PRODUCT_ID 0x0004 #define USB_MEMORATOR_PRODUCT_ID 0x0005 /* Kvaser Minihydra USB devices product ids */ #define USB_BLACKBIRD_V2_PRODUCT_ID 0x0102 #define USB_MEMO_PRO_5HS_PRODUCT_ID 0x0104 #define USB_USBCAN_PRO_5HS_PRODUCT_ID 0x0105 #define USB_USBCAN_LIGHT_4HS_PRODUCT_ID 0x0106 #define USB_LEAF_PRO_HS_V2_PRODUCT_ID 0x0107 #define USB_USBCAN_PRO_2HS_V2_PRODUCT_ID 0x0108 #define USB_MEMO_2HS_PRODUCT_ID 0x0109 #define USB_MEMO_PRO_2HS_V2_PRODUCT_ID 0x010a #define USB_HYBRID_2CANLIN_PRODUCT_ID 0x010b #define USB_ATI_USBCAN_PRO_2HS_V2_PRODUCT_ID 0x010c #define USB_ATI_MEMO_PRO_2HS_V2_PRODUCT_ID 0x010d #define USB_HYBRID_PRO_2CANLIN_PRODUCT_ID 0x010e #define USB_U100_PRODUCT_ID 0x0111 #define USB_U100P_PRODUCT_ID 0x0112 #define USB_U100S_PRODUCT_ID 0x0113 #define USB_USBCAN_PRO_4HS_PRODUCT_ID 0x0114 #define USB_HYBRID_CANLIN_PRODUCT_ID 0x0115 #define USB_HYBRID_PRO_CANLIN_PRODUCT_ID 0x0116 static const struct kvaser_usb_driver_info kvaser_usb_driver_info_hydra = { .quirks = KVASER_USB_QUIRK_HAS_HARDWARE_TIMESTAMP, .ops = &kvaser_usb_hydra_dev_ops, }; static const struct kvaser_usb_driver_info kvaser_usb_driver_info_usbcan = { .quirks = KVASER_USB_QUIRK_HAS_TXRX_ERRORS | KVASER_USB_QUIRK_HAS_SILENT_MODE, .family = KVASER_USBCAN, .ops = &kvaser_usb_leaf_dev_ops, }; static const struct kvaser_usb_driver_info kvaser_usb_driver_info_leaf = { .quirks = KVASER_USB_QUIRK_IGNORE_CLK_FREQ, .family = KVASER_LEAF, .ops = &kvaser_usb_leaf_dev_ops, }; static const struct kvaser_usb_driver_info kvaser_usb_driver_info_leaf_err = { .quirks = KVASER_USB_QUIRK_HAS_TXRX_ERRORS | KVASER_USB_QUIRK_IGNORE_CLK_FREQ, .family = KVASER_LEAF, .ops = &kvaser_usb_leaf_dev_ops, }; static const struct kvaser_usb_driver_info kvaser_usb_driver_info_leaf_err_listen = { .quirks = KVASER_USB_QUIRK_HAS_TXRX_ERRORS | KVASER_USB_QUIRK_HAS_SILENT_MODE | KVASER_USB_QUIRK_IGNORE_CLK_FREQ, .family = KVASER_LEAF, .ops = &kvaser_usb_leaf_dev_ops, }; static const struct kvaser_usb_driver_info kvaser_usb_driver_info_leafimx = { .quirks = 0, .ops = &kvaser_usb_leaf_dev_ops, }; static const struct usb_device_id kvaser_usb_table[] = { /* Leaf M32C USB product IDs */ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_DEVEL_PRODUCT_ID), .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf }, { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_PRODUCT_ID), .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf }, { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_PRODUCT_ID), .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen }, { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_PRODUCT_ID), .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen }, { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_LS_PRODUCT_ID), .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen }, { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_SWC_PRODUCT_ID), .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen }, { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_LIN_PRODUCT_ID), .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen }, { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_LS_PRODUCT_ID), .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen }, { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_SPRO_SWC_PRODUCT_ID), .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen }, { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_DEVEL_PRODUCT_ID), .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen }, { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_HSHS_PRODUCT_ID), .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen }, { USB_DEVICE(KVASER_VENDOR_ID, USB_UPRO_HSHS_PRODUCT_ID), .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err }, { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_GI_PRODUCT_ID), .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf }, { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_OBDII_PRODUCT_ID), .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err_listen }, { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO2_HSLS_PRODUCT_ID), .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err }, { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_CH_PRODUCT_ID), .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err }, { USB_DEVICE(KVASER_VENDOR_ID, USB_BLACKBIRD_SPRO_PRODUCT_ID), .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err }, { USB_DEVICE(KVASER_VENDOR_ID, USB_OEM_MERCURY_PRODUCT_ID), .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err }, { USB_DEVICE(KVASER_VENDOR_ID, USB_OEM_LEAF_PRODUCT_ID), .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err }, { USB_DEVICE(KVASER_VENDOR_ID, USB_CAN_R_PRODUCT_ID), .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leaf_err }, /* Leaf i.MX28 USB product IDs */ { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LITE_V2_PRODUCT_ID), .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx }, { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_HS_PRODUCT_ID), .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx }, { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_HS_V2_OEM_PRODUCT_ID), .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx }, { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_2HS_PRODUCT_ID), .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx }, { USB_DEVICE(KVASER_VENDOR_ID, USB_MINI_PCIE_2HS_PRODUCT_ID), .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx }, { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_R_V2_PRODUCT_ID), .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx }, { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_R_V2_PRODUCT_ID), .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx }, { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_LIGHT_HS_V2_OEM2_PRODUCT_ID), .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_leafimx }, /* USBCANII USB product IDs */ { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN2_PRODUCT_ID), .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_usbcan }, { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_REVB_PRODUCT_ID), .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_usbcan }, { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMORATOR_PRODUCT_ID), .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_usbcan }, { USB_DEVICE(KVASER_VENDOR_ID, USB_VCI2_PRODUCT_ID), .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_usbcan }, /* Minihydra USB product IDs */ { USB_DEVICE(KVASER_VENDOR_ID, USB_BLACKBIRD_V2_PRODUCT_ID), .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_PRO_5HS_PRODUCT_ID), .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_5HS_PRODUCT_ID), .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_LIGHT_4HS_PRODUCT_ID), .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, { USB_DEVICE(KVASER_VENDOR_ID, USB_LEAF_PRO_HS_V2_PRODUCT_ID), .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_2HS_V2_PRODUCT_ID), .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_2HS_PRODUCT_ID), .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, { USB_DEVICE(KVASER_VENDOR_ID, USB_MEMO_PRO_2HS_V2_PRODUCT_ID), .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_2CANLIN_PRODUCT_ID), .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, { USB_DEVICE(KVASER_VENDOR_ID, USB_ATI_USBCAN_PRO_2HS_V2_PRODUCT_ID), .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, { USB_DEVICE(KVASER_VENDOR_ID, USB_ATI_MEMO_PRO_2HS_V2_PRODUCT_ID), .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_PRO_2CANLIN_PRODUCT_ID), .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, { USB_DEVICE(KVASER_VENDOR_ID, USB_U100_PRODUCT_ID), .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, { USB_DEVICE(KVASER_VENDOR_ID, USB_U100P_PRODUCT_ID), .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, { USB_DEVICE(KVASER_VENDOR_ID, USB_U100S_PRODUCT_ID), .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, { USB_DEVICE(KVASER_VENDOR_ID, USB_USBCAN_PRO_4HS_PRODUCT_ID), .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_CANLIN_PRODUCT_ID), .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, { USB_DEVICE(KVASER_VENDOR_ID, USB_HYBRID_PRO_CANLIN_PRODUCT_ID), .driver_info = (kernel_ulong_t)&kvaser_usb_driver_info_hydra }, { } }; MODULE_DEVICE_TABLE(usb, kvaser_usb_table); int kvaser_usb_send_cmd(const struct kvaser_usb *dev, void *cmd, int len) { return usb_bulk_msg(dev->udev, usb_sndbulkpipe(dev->udev, dev->bulk_out->bEndpointAddress), cmd, len, NULL, KVASER_USB_TIMEOUT); } int kvaser_usb_recv_cmd(const struct kvaser_usb *dev, void *cmd, int len, int *actual_len) { return usb_bulk_msg(dev->udev, usb_rcvbulkpipe(dev->udev, dev->bulk_in->bEndpointAddress), cmd, len, actual_len, KVASER_USB_TIMEOUT); } static void kvaser_usb_send_cmd_callback(struct urb *urb) { struct net_device *netdev = urb->context; kfree(urb->transfer_buffer); if (urb->status) netdev_warn(netdev, "urb status received: %d\n", urb->status); } int kvaser_usb_send_cmd_async(struct kvaser_usb_net_priv *priv, void *cmd, int len) { struct kvaser_usb *dev = priv->dev; struct net_device *netdev = priv->netdev; struct urb *urb; int err; urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) return -ENOMEM; usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, dev->bulk_out->bEndpointAddress), cmd, len, kvaser_usb_send_cmd_callback, netdev); usb_anchor_urb(urb, &priv->tx_submitted); err = usb_submit_urb(urb, GFP_ATOMIC); if (err) { netdev_err(netdev, "Error transmitting URB\n"); usb_unanchor_urb(urb); } usb_free_urb(urb); return 0; } int kvaser_usb_can_rx_over_error(struct net_device *netdev) { struct net_device_stats *stats = &netdev->stats; struct can_frame *cf; struct sk_buff *skb; stats->rx_over_errors++; stats->rx_errors++; skb = alloc_can_err_skb(netdev, &cf); if (!skb) { stats->rx_dropped++; netdev_warn(netdev, "No memory left for err_skb\n"); return -ENOMEM; } cf->can_id |= CAN_ERR_CRTL; cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; netif_rx(skb); return 0; } static void kvaser_usb_read_bulk_callback(struct urb *urb) { struct kvaser_usb *dev = urb->context; const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops; int err; unsigned int i; switch (urb->status) { case 0: break; case -ENOENT: case -EPIPE: case -EPROTO: case -ESHUTDOWN: return; default: dev_info(&dev->intf->dev, "Rx URB aborted (%d)\n", urb->status); goto resubmit_urb; } ops->dev_read_bulk_callback(dev, urb->transfer_buffer, urb->actual_length); resubmit_urb: usb_fill_bulk_urb(urb, dev->udev, usb_rcvbulkpipe(dev->udev, dev->bulk_in->bEndpointAddress), urb->transfer_buffer, KVASER_USB_RX_BUFFER_SIZE, kvaser_usb_read_bulk_callback, dev); err = usb_submit_urb(urb, GFP_ATOMIC); if (err == -ENODEV) { for (i = 0; i < dev->nchannels; i++) { if (!dev->nets[i]) continue; netif_device_detach(dev->nets[i]->netdev); } } else if (err) { dev_err(&dev->intf->dev, "Failed resubmitting read bulk urb: %d\n", err); } } static int kvaser_usb_setup_rx_urbs(struct kvaser_usb *dev) { int i, err = 0; if (dev->rxinitdone) return 0; for (i = 0; i < KVASER_USB_MAX_RX_URBS; i++) { struct urb *urb = NULL; u8 *buf = NULL; dma_addr_t buf_dma; urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) { err = -ENOMEM; break; } buf = usb_alloc_coherent(dev->udev, KVASER_USB_RX_BUFFER_SIZE, GFP_KERNEL, &buf_dma); if (!buf) { dev_warn(&dev->intf->dev, "No memory left for USB buffer\n"); usb_free_urb(urb); err = -ENOMEM; break; } usb_fill_bulk_urb(urb, dev->udev, usb_rcvbulkpipe (dev->udev, dev->bulk_in->bEndpointAddress), buf, KVASER_USB_RX_BUFFER_SIZE, kvaser_usb_read_bulk_callback, dev); urb->transfer_dma = buf_dma; urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; usb_anchor_urb(urb, &dev->rx_submitted); err = usb_submit_urb(urb, GFP_KERNEL); if (err) { usb_unanchor_urb(urb); usb_free_coherent(dev->udev, KVASER_USB_RX_BUFFER_SIZE, buf, buf_dma); usb_free_urb(urb); break; } dev->rxbuf[i] = buf; dev->rxbuf_dma[i] = buf_dma; usb_free_urb(urb); } if (i == 0) { dev_warn(&dev->intf->dev, "Cannot setup read URBs, error %d\n", err); return err; } else if (i < KVASER_USB_MAX_RX_URBS) { dev_warn(&dev->intf->dev, "RX performances may be slow\n"); } dev->rxinitdone = true; return 0; } static int kvaser_usb_open(struct net_device *netdev) { struct kvaser_usb_net_priv *priv = netdev_priv(netdev); struct kvaser_usb *dev = priv->dev; const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops; int err; err = open_candev(netdev); if (err) return err; err = ops->dev_set_opt_mode(priv); if (err) goto error; err = ops->dev_start_chip(priv); if (err) { netdev_warn(netdev, "Cannot start device, error %d\n", err); goto error; } priv->can.state = CAN_STATE_ERROR_ACTIVE; return 0; error: close_candev(netdev); return err; } static void kvaser_usb_reset_tx_urb_contexts(struct kvaser_usb_net_priv *priv) { int i, max_tx_urbs; max_tx_urbs = priv->dev->max_tx_urbs; priv->active_tx_contexts = 0; for (i = 0; i < max_tx_urbs; i++) priv->tx_contexts[i].echo_index = max_tx_urbs; } /* This method might sleep. Do not call it in the atomic context * of URB completions. */ void kvaser_usb_unlink_tx_urbs(struct kvaser_usb_net_priv *priv) { usb_kill_anchored_urbs(&priv->tx_submitted); kvaser_usb_reset_tx_urb_contexts(priv); } static void kvaser_usb_unlink_all_urbs(struct kvaser_usb *dev) { int i; usb_kill_anchored_urbs(&dev->rx_submitted); for (i = 0; i < KVASER_USB_MAX_RX_URBS; i++) usb_free_coherent(dev->udev, KVASER_USB_RX_BUFFER_SIZE, dev->rxbuf[i], dev->rxbuf_dma[i]); for (i = 0; i < dev->nchannels; i++) { struct kvaser_usb_net_priv *priv = dev->nets[i]; if (priv) kvaser_usb_unlink_tx_urbs(priv); } } static int kvaser_usb_close(struct net_device *netdev) { struct kvaser_usb_net_priv *priv = netdev_priv(netdev); struct kvaser_usb *dev = priv->dev; const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops; int err; netif_stop_queue(netdev); err = ops->dev_flush_queue(priv); if (err) netdev_warn(netdev, "Cannot flush queue, error %d\n", err); if (ops->dev_reset_chip) { err = ops->dev_reset_chip(dev, priv->channel); if (err) netdev_warn(netdev, "Cannot reset card, error %d\n", err); } err = ops->dev_stop_chip(priv); if (err) netdev_warn(netdev, "Cannot stop device, error %d\n", err); /* reset tx contexts */ kvaser_usb_unlink_tx_urbs(priv); priv->can.state = CAN_STATE_STOPPED; close_candev(priv->netdev); return 0; } static int kvaser_usb_set_bittiming(struct net_device *netdev) { struct kvaser_usb_net_priv *priv = netdev_priv(netdev); struct kvaser_usb *dev = priv->dev; const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops; struct can_bittiming *bt = &priv->can.bittiming; struct kvaser_usb_busparams busparams; int tseg1 = bt->prop_seg + bt->phase_seg1; int tseg2 = bt->phase_seg2; int sjw = bt->sjw; int err; busparams.bitrate = cpu_to_le32(bt->bitrate); busparams.sjw = (u8)sjw; busparams.tseg1 = (u8)tseg1; busparams.tseg2 = (u8)tseg2; if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) busparams.nsamples = 3; else busparams.nsamples = 1; err = ops->dev_set_bittiming(netdev, &busparams); if (err) return err; err = kvaser_usb_setup_rx_urbs(priv->dev); if (err) return err; err = ops->dev_get_busparams(priv); if (err) { /* Treat EOPNOTSUPP as success */ if (err == -EOPNOTSUPP) err = 0; return err; } if (memcmp(&busparams, &priv->busparams_nominal, sizeof(priv->busparams_nominal)) != 0) err = -EINVAL; return err; } static int kvaser_usb_set_data_bittiming(struct net_device *netdev) { struct kvaser_usb_net_priv *priv = netdev_priv(netdev); struct kvaser_usb *dev = priv->dev; const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops; struct can_bittiming *dbt = &priv->can.data_bittiming; struct kvaser_usb_busparams busparams; int tseg1 = dbt->prop_seg + dbt->phase_seg1; int tseg2 = dbt->phase_seg2; int sjw = dbt->sjw; int err; if (!ops->dev_set_data_bittiming || !ops->dev_get_data_busparams) return -EOPNOTSUPP; busparams.bitrate = cpu_to_le32(dbt->bitrate); busparams.sjw = (u8)sjw; busparams.tseg1 = (u8)tseg1; busparams.tseg2 = (u8)tseg2; busparams.nsamples = 1; err = ops->dev_set_data_bittiming(netdev, &busparams); if (err) return err; err = kvaser_usb_setup_rx_urbs(priv->dev); if (err) return err; err = ops->dev_get_data_busparams(priv); if (err) return err; if (memcmp(&busparams, &priv->busparams_data, sizeof(priv->busparams_data)) != 0) err = -EINVAL; return err; } static void kvaser_usb_write_bulk_callback(struct urb *urb) { struct kvaser_usb_tx_urb_context *context = urb->context; struct kvaser_usb_net_priv *priv; struct net_device *netdev; if (WARN_ON(!context)) return; priv = context->priv; netdev = priv->netdev; kfree(urb->transfer_buffer); if (!netif_device_present(netdev)) return; if (urb->status) netdev_info(netdev, "Tx URB aborted (%d)\n", urb->status); } static netdev_tx_t kvaser_usb_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct kvaser_usb_net_priv *priv = netdev_priv(netdev); struct kvaser_usb *dev = priv->dev; const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops; struct net_device_stats *stats = &netdev->stats; struct kvaser_usb_tx_urb_context *context = NULL; struct urb *urb; void *buf; int cmd_len = 0; int err, ret = NETDEV_TX_OK; unsigned int i; unsigned long flags; if (can_dev_dropped_skb(netdev, skb)) return NETDEV_TX_OK; urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) { stats->tx_dropped++; dev_kfree_skb(skb); return NETDEV_TX_OK; } spin_lock_irqsave(&priv->tx_contexts_lock, flags); for (i = 0; i < dev->max_tx_urbs; i++) { if (priv->tx_contexts[i].echo_index == dev->max_tx_urbs) { context = &priv->tx_contexts[i]; context->echo_index = i; ++priv->active_tx_contexts; if (priv->active_tx_contexts >= (int)dev->max_tx_urbs) netif_stop_queue(netdev); break; } } spin_unlock_irqrestore(&priv->tx_contexts_lock, flags); /* This should never happen; it implies a flow control bug */ if (!context) { netdev_warn(netdev, "cannot find free context\n"); ret = NETDEV_TX_BUSY; goto freeurb; } buf = ops->dev_frame_to_cmd(priv, skb, &cmd_len, context->echo_index); if (!buf) { stats->tx_dropped++; dev_kfree_skb(skb); spin_lock_irqsave(&priv->tx_contexts_lock, flags); context->echo_index = dev->max_tx_urbs; --priv->active_tx_contexts; netif_wake_queue(netdev); spin_unlock_irqrestore(&priv->tx_contexts_lock, flags); goto freeurb; } context->priv = priv; can_put_echo_skb(skb, netdev, context->echo_index, 0); usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, dev->bulk_out->bEndpointAddress), buf, cmd_len, kvaser_usb_write_bulk_callback, context); usb_anchor_urb(urb, &priv->tx_submitted); err = usb_submit_urb(urb, GFP_ATOMIC); if (unlikely(err)) { spin_lock_irqsave(&priv->tx_contexts_lock, flags); can_free_echo_skb(netdev, context->echo_index, NULL); context->echo_index = dev->max_tx_urbs; --priv->active_tx_contexts; netif_wake_queue(netdev); spin_unlock_irqrestore(&priv->tx_contexts_lock, flags); usb_unanchor_urb(urb); kfree(buf); stats->tx_dropped++; if (err == -ENODEV) netif_device_detach(netdev); else netdev_warn(netdev, "Failed tx_urb %d\n", err); goto freeurb; } ret = NETDEV_TX_OK; freeurb: usb_free_urb(urb); return ret; } static const struct net_device_ops kvaser_usb_netdev_ops = { .ndo_open = kvaser_usb_open, .ndo_stop = kvaser_usb_close, .ndo_start_xmit = kvaser_usb_start_xmit, .ndo_change_mtu = can_change_mtu, }; static const struct net_device_ops kvaser_usb_netdev_ops_hwts = { .ndo_open = kvaser_usb_open, .ndo_stop = kvaser_usb_close, .ndo_eth_ioctl = can_eth_ioctl_hwts, .ndo_start_xmit = kvaser_usb_start_xmit, .ndo_change_mtu = can_change_mtu, }; static const struct ethtool_ops kvaser_usb_ethtool_ops = { .get_ts_info = ethtool_op_get_ts_info, }; static const struct ethtool_ops kvaser_usb_ethtool_ops_hwts = { .get_ts_info = can_ethtool_op_get_ts_info_hwts, }; static void kvaser_usb_remove_interfaces(struct kvaser_usb *dev) { const struct kvaser_usb_dev_ops *ops = dev->driver_info->ops; int i; for (i = 0; i < dev->nchannels; i++) { if (!dev->nets[i]) continue; unregister_candev(dev->nets[i]->netdev); } kvaser_usb_unlink_all_urbs(dev); for (i = 0; i < dev->nchannels; i++) { if (!dev->nets[i]) continue; if (ops->dev_remove_channel) ops->dev_remove_channel(dev->nets[i]); free_candev(dev->nets[i]->netdev); } } static int kvaser_usb_init_one(struct kvaser_usb *dev, int channel) { struct net_device *netdev; struct kvaser_usb_net_priv *priv; const struct kvaser_usb_driver_info *driver_info = dev->driver_info; const struct kvaser_usb_dev_ops *ops = driver_info->ops; int err; if (ops->dev_reset_chip) { err = ops->dev_reset_chip(dev, channel); if (err) return err; } netdev = alloc_candev(struct_size(priv, tx_contexts, dev->max_tx_urbs), dev->max_tx_urbs); if (!netdev) { dev_err(&dev->intf->dev, "Cannot alloc candev\n"); return -ENOMEM; } priv = netdev_priv(netdev); init_usb_anchor(&priv->tx_submitted); init_completion(&priv->start_comp); init_completion(&priv->stop_comp); init_completion(&priv->flush_comp); init_completion(&priv->get_busparams_comp); priv->can.ctrlmode_supported = CAN_CTRLMODE_CC_LEN8_DLC; priv->dev = dev; priv->netdev = netdev; priv->channel = channel; spin_lock_init(&priv->tx_contexts_lock); kvaser_usb_reset_tx_urb_contexts(priv); priv->can.state = CAN_STATE_STOPPED; priv->can.clock.freq = dev->cfg->clock.freq; priv->can.bittiming_const = dev->cfg->bittiming_const; priv->can.do_set_bittiming = kvaser_usb_set_bittiming; priv->can.do_set_mode = ops->dev_set_mode; if ((driver_info->quirks & KVASER_USB_QUIRK_HAS_TXRX_ERRORS) || (priv->dev->card_data.capabilities & KVASER_USB_CAP_BERR_CAP)) priv->can.do_get_berr_counter = ops->dev_get_berr_counter; if (driver_info->quirks & KVASER_USB_QUIRK_HAS_SILENT_MODE) priv->can.ctrlmode_supported |= CAN_CTRLMODE_LISTENONLY; priv->can.ctrlmode_supported |= dev->card_data.ctrlmode_supported; if (priv->can.ctrlmode_supported & CAN_CTRLMODE_FD) { priv->can.data_bittiming_const = dev->cfg->data_bittiming_const; priv->can.do_set_data_bittiming = kvaser_usb_set_data_bittiming; } netdev->flags |= IFF_ECHO; netdev->netdev_ops = &kvaser_usb_netdev_ops; if (driver_info->quirks & KVASER_USB_QUIRK_HAS_HARDWARE_TIMESTAMP) { netdev->netdev_ops = &kvaser_usb_netdev_ops_hwts; netdev->ethtool_ops = &kvaser_usb_ethtool_ops_hwts; } else { netdev->netdev_ops = &kvaser_usb_netdev_ops; netdev->ethtool_ops = &kvaser_usb_ethtool_ops; } SET_NETDEV_DEV(netdev, &dev->intf->dev); netdev->dev_id = channel; dev->nets[channel] = priv; if (ops->dev_init_channel) { err = ops->dev_init_channel(priv); if (err) goto err; } err = register_candev(netdev); if (err) { dev_err(&dev->intf->dev, "Failed to register CAN device\n"); goto err; } netdev_dbg(netdev, "device registered\n"); return 0; err: free_candev(netdev); dev->nets[channel] = NULL; return err; } static int kvaser_usb_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct kvaser_usb *dev; int err; int i; const struct kvaser_usb_driver_info *driver_info; const struct kvaser_usb_dev_ops *ops; driver_info = (const struct kvaser_usb_driver_info *)id->driver_info; if (!driver_info) return -ENODEV; dev = devm_kzalloc(&intf->dev, sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; dev->intf = intf; dev->driver_info = driver_info; ops = driver_info->ops; err = ops->dev_setup_endpoints(dev); if (err) { dev_err(&intf->dev, "Cannot get usb endpoint(s)"); return err; } dev->udev = interface_to_usbdev(intf); init_usb_anchor(&dev->rx_submitted); usb_set_intfdata(intf, dev); dev->card_data.ctrlmode_supported = 0; dev->card_data.capabilities = 0; err = ops->dev_init_card(dev); if (err) { dev_err(&intf->dev, "Failed to initialize card, error %d\n", err); return err; } err = ops->dev_get_software_info(dev); if (err) { dev_err(&intf->dev, "Cannot get software info, error %d\n", err); return err; } if (ops->dev_get_software_details) { err = ops->dev_get_software_details(dev); if (err) { dev_err(&intf->dev, "Cannot get software details, error %d\n", err); return err; } } if (WARN_ON(!dev->cfg)) return -ENODEV; dev_dbg(&intf->dev, "Firmware version: %d.%d.%d\n", ((dev->fw_version >> 24) & 0xff), ((dev->fw_version >> 16) & 0xff), (dev->fw_version & 0xffff)); dev_dbg(&intf->dev, "Max outstanding tx = %d URBs\n", dev->max_tx_urbs); err = ops->dev_get_card_info(dev); if (err) { dev_err(&intf->dev, "Cannot get card info, error %d\n", err); return err; } if (ops->dev_get_capabilities) { err = ops->dev_get_capabilities(dev); if (err) { dev_err(&intf->dev, "Cannot get capabilities, error %d\n", err); kvaser_usb_remove_interfaces(dev); return err; } } for (i = 0; i < dev->nchannels; i++) { err = kvaser_usb_init_one(dev, i); if (err) { kvaser_usb_remove_interfaces(dev); return err; } } return 0; } static void kvaser_usb_disconnect(struct usb_interface *intf) { struct kvaser_usb *dev = usb_get_intfdata(intf); usb_set_intfdata(intf, NULL); if (!dev) return; kvaser_usb_remove_interfaces(dev); } static struct usb_driver kvaser_usb_driver = { .name = KBUILD_MODNAME, .probe = kvaser_usb_probe, .disconnect = kvaser_usb_disconnect, .id_table = kvaser_usb_table, }; module_usb_driver(kvaser_usb_driver); MODULE_AUTHOR("Olivier Sobrie <[email protected]>"); MODULE_AUTHOR("Kvaser AB <[email protected]>"); MODULE_DESCRIPTION("CAN driver for Kvaser CAN/USB devices"); MODULE_LICENSE("GPL v2");
linux-master
drivers/net/can/usb/kvaser_usb/kvaser_usb_core.c
// SPDX-License-Identifier: GPL-2.0 /* Parts of this driver are based on the following: * - Kvaser linux leaf driver (version 4.78) * - CAN driver for esd CAN-USB/2 * - Kvaser linux usbcanII driver (version 5.3) * * Copyright (C) 2002-2018 KVASER AB, Sweden. All rights reserved. * Copyright (C) 2010 Matthias Fuchs <[email protected]>, esd gmbh * Copyright (C) 2012 Olivier Sobrie <[email protected]> * Copyright (C) 2015 Valeo S.A. */ #include <linux/completion.h> #include <linux/device.h> #include <linux/gfp.h> #include <linux/jiffies.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/types.h> #include <linux/units.h> #include <linux/usb.h> #include <linux/workqueue.h> #include <linux/can.h> #include <linux/can/dev.h> #include <linux/can/error.h> #include <linux/can/netlink.h> #include "kvaser_usb.h" #define MAX_USBCAN_NET_DEVICES 2 /* Command header size */ #define CMD_HEADER_LEN 2 /* Kvaser CAN message flags */ #define MSG_FLAG_ERROR_FRAME BIT(0) #define MSG_FLAG_OVERRUN BIT(1) #define MSG_FLAG_NERR BIT(2) #define MSG_FLAG_WAKEUP BIT(3) #define MSG_FLAG_REMOTE_FRAME BIT(4) #define MSG_FLAG_RESERVED BIT(5) #define MSG_FLAG_TX_ACK BIT(6) #define MSG_FLAG_TX_REQUEST BIT(7) /* CAN states (M16C CxSTRH register) */ #define M16C_STATE_BUS_RESET BIT(0) #define M16C_STATE_BUS_ERROR BIT(4) #define M16C_STATE_BUS_PASSIVE BIT(5) #define M16C_STATE_BUS_OFF BIT(6) /* Leaf/usbcan command ids */ #define CMD_RX_STD_MESSAGE 12 #define CMD_TX_STD_MESSAGE 13 #define CMD_RX_EXT_MESSAGE 14 #define CMD_TX_EXT_MESSAGE 15 #define CMD_SET_BUS_PARAMS 16 #define CMD_GET_BUS_PARAMS 17 #define CMD_GET_BUS_PARAMS_REPLY 18 #define CMD_GET_CHIP_STATE 19 #define CMD_CHIP_STATE_EVENT 20 #define CMD_SET_CTRL_MODE 21 #define CMD_RESET_CHIP 24 #define CMD_START_CHIP 26 #define CMD_START_CHIP_REPLY 27 #define CMD_STOP_CHIP 28 #define CMD_STOP_CHIP_REPLY 29 #define CMD_USBCAN_CLOCK_OVERFLOW_EVENT 33 #define CMD_GET_CARD_INFO 34 #define CMD_GET_CARD_INFO_REPLY 35 #define CMD_GET_SOFTWARE_INFO 38 #define CMD_GET_SOFTWARE_INFO_REPLY 39 #define CMD_ERROR_EVENT 45 #define CMD_FLUSH_QUEUE 48 #define CMD_TX_ACKNOWLEDGE 50 #define CMD_CAN_ERROR_EVENT 51 #define CMD_FLUSH_QUEUE_REPLY 68 #define CMD_GET_CAPABILITIES_REQ 95 #define CMD_GET_CAPABILITIES_RESP 96 #define CMD_LEAF_LOG_MESSAGE 106 /* Leaf frequency options */ #define KVASER_USB_LEAF_SWOPTION_FREQ_MASK 0x60 #define KVASER_USB_LEAF_SWOPTION_FREQ_16_MHZ_CLK 0 #define KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK BIT(5) #define KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK BIT(6) #define KVASER_USB_LEAF_SWOPTION_EXT_CAP BIT(12) /* error factors */ #define M16C_EF_ACKE BIT(0) #define M16C_EF_CRCE BIT(1) #define M16C_EF_FORME BIT(2) #define M16C_EF_STFE BIT(3) #define M16C_EF_BITE0 BIT(4) #define M16C_EF_BITE1 BIT(5) #define M16C_EF_RCVE BIT(6) #define M16C_EF_TRE BIT(7) /* Only Leaf-based devices can report M16C error factors, * thus define our own error status flags for USBCANII */ #define USBCAN_ERROR_STATE_NONE 0 #define USBCAN_ERROR_STATE_TX_ERROR BIT(0) #define USBCAN_ERROR_STATE_RX_ERROR BIT(1) #define USBCAN_ERROR_STATE_BUSERROR BIT(2) /* ctrl modes */ #define KVASER_CTRL_MODE_NORMAL 1 #define KVASER_CTRL_MODE_SILENT 2 #define KVASER_CTRL_MODE_SELFRECEPTION 3 #define KVASER_CTRL_MODE_OFF 4 /* Extended CAN identifier flag */ #define KVASER_EXTENDED_FRAME BIT(31) struct kvaser_cmd_simple { u8 tid; u8 channel; } __packed; struct kvaser_cmd_cardinfo { u8 tid; u8 nchannels; __le32 serial_number; __le32 padding0; __le32 clock_resolution; __le32 mfgdate; u8 ean[8]; u8 hw_revision; union { struct { u8 usb_hs_mode; } __packed leaf1; struct { u8 padding; } __packed usbcan1; } __packed; __le16 padding1; } __packed; struct leaf_cmd_softinfo { u8 tid; u8 padding0; __le32 sw_options; __le32 fw_version; __le16 max_outstanding_tx; __le16 padding1[9]; } __packed; struct usbcan_cmd_softinfo { u8 tid; u8 fw_name[5]; __le16 max_outstanding_tx; u8 padding[6]; __le32 fw_version; __le16 checksum; __le16 sw_options; } __packed; struct kvaser_cmd_busparams { u8 tid; u8 channel; struct kvaser_usb_busparams busparams; } __packed; struct kvaser_cmd_tx_can { u8 channel; u8 tid; u8 data[14]; union { struct { u8 padding; u8 flags; } __packed leaf; struct { u8 flags; u8 padding; } __packed usbcan; } __packed; } __packed; struct kvaser_cmd_rx_can_header { u8 channel; u8 flag; } __packed; struct leaf_cmd_rx_can { u8 channel; u8 flag; __le16 time[3]; u8 data[14]; } __packed; struct usbcan_cmd_rx_can { u8 channel; u8 flag; u8 data[14]; __le16 time; } __packed; struct leaf_cmd_chip_state_event { u8 tid; u8 channel; __le16 time[3]; u8 tx_errors_count; u8 rx_errors_count; u8 status; u8 padding[3]; } __packed; struct usbcan_cmd_chip_state_event { u8 tid; u8 channel; u8 tx_errors_count; u8 rx_errors_count; __le16 time; u8 status; u8 padding[3]; } __packed; struct kvaser_cmd_tx_acknowledge_header { u8 channel; u8 tid; } __packed; struct leaf_cmd_can_error_event { u8 tid; u8 flags; __le16 time[3]; u8 channel; u8 padding; u8 tx_errors_count; u8 rx_errors_count; u8 status; u8 error_factor; } __packed; struct usbcan_cmd_can_error_event { u8 tid; u8 padding; u8 tx_errors_count_ch0; u8 rx_errors_count_ch0; u8 tx_errors_count_ch1; u8 rx_errors_count_ch1; u8 status_ch0; u8 status_ch1; __le16 time; } __packed; /* CMD_ERROR_EVENT error codes */ #define KVASER_USB_LEAF_ERROR_EVENT_TX_QUEUE_FULL 0x8 #define KVASER_USB_LEAF_ERROR_EVENT_PARAM 0x9 struct leaf_cmd_error_event { u8 tid; u8 error_code; __le16 timestamp[3]; __le16 padding; __le16 info1; __le16 info2; } __packed; struct usbcan_cmd_error_event { u8 tid; u8 error_code; __le16 info1; __le16 info2; __le16 timestamp; __le16 padding; } __packed; struct kvaser_cmd_ctrl_mode { u8 tid; u8 channel; u8 ctrl_mode; u8 padding[3]; } __packed; struct kvaser_cmd_flush_queue { u8 tid; u8 channel; u8 flags; u8 padding[3]; } __packed; struct leaf_cmd_log_message { u8 channel; u8 flags; __le16 time[3]; u8 dlc; u8 time_offset; __le32 id; u8 data[8]; } __packed; /* Sub commands for cap_req and cap_res */ #define KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE 0x02 #define KVASER_USB_LEAF_CAP_CMD_ERR_REPORT 0x05 struct kvaser_cmd_cap_req { __le16 padding0; __le16 cap_cmd; __le16 padding1; __le16 channel; } __packed; /* Status codes for cap_res */ #define KVASER_USB_LEAF_CAP_STAT_OK 0x00 #define KVASER_USB_LEAF_CAP_STAT_NOT_IMPL 0x01 #define KVASER_USB_LEAF_CAP_STAT_UNAVAIL 0x02 struct kvaser_cmd_cap_res { __le16 padding; __le16 cap_cmd; __le16 status; __le32 mask; __le32 value; } __packed; struct kvaser_cmd { u8 len; u8 id; union { struct kvaser_cmd_simple simple; struct kvaser_cmd_cardinfo cardinfo; struct kvaser_cmd_busparams busparams; struct kvaser_cmd_rx_can_header rx_can_header; struct kvaser_cmd_tx_acknowledge_header tx_acknowledge_header; union { struct leaf_cmd_softinfo softinfo; struct leaf_cmd_rx_can rx_can; struct leaf_cmd_chip_state_event chip_state_event; struct leaf_cmd_can_error_event can_error_event; struct leaf_cmd_log_message log_message; struct leaf_cmd_error_event error_event; struct kvaser_cmd_cap_req cap_req; struct kvaser_cmd_cap_res cap_res; } __packed leaf; union { struct usbcan_cmd_softinfo softinfo; struct usbcan_cmd_rx_can rx_can; struct usbcan_cmd_chip_state_event chip_state_event; struct usbcan_cmd_can_error_event can_error_event; struct usbcan_cmd_error_event error_event; } __packed usbcan; struct kvaser_cmd_tx_can tx_can; struct kvaser_cmd_ctrl_mode ctrl_mode; struct kvaser_cmd_flush_queue flush_queue; } u; } __packed; #define CMD_SIZE_ANY 0xff #define kvaser_fsize(field) sizeof_field(struct kvaser_cmd, field) static const u8 kvaser_usb_leaf_cmd_sizes_leaf[] = { [CMD_START_CHIP_REPLY] = kvaser_fsize(u.simple), [CMD_STOP_CHIP_REPLY] = kvaser_fsize(u.simple), [CMD_GET_CARD_INFO_REPLY] = kvaser_fsize(u.cardinfo), [CMD_TX_ACKNOWLEDGE] = kvaser_fsize(u.tx_acknowledge_header), [CMD_GET_SOFTWARE_INFO_REPLY] = kvaser_fsize(u.leaf.softinfo), [CMD_RX_STD_MESSAGE] = kvaser_fsize(u.leaf.rx_can), [CMD_RX_EXT_MESSAGE] = kvaser_fsize(u.leaf.rx_can), [CMD_LEAF_LOG_MESSAGE] = kvaser_fsize(u.leaf.log_message), [CMD_CHIP_STATE_EVENT] = kvaser_fsize(u.leaf.chip_state_event), [CMD_CAN_ERROR_EVENT] = kvaser_fsize(u.leaf.can_error_event), [CMD_GET_CAPABILITIES_RESP] = kvaser_fsize(u.leaf.cap_res), [CMD_GET_BUS_PARAMS_REPLY] = kvaser_fsize(u.busparams), [CMD_ERROR_EVENT] = kvaser_fsize(u.leaf.error_event), /* ignored events: */ [CMD_FLUSH_QUEUE_REPLY] = CMD_SIZE_ANY, }; static const u8 kvaser_usb_leaf_cmd_sizes_usbcan[] = { [CMD_START_CHIP_REPLY] = kvaser_fsize(u.simple), [CMD_STOP_CHIP_REPLY] = kvaser_fsize(u.simple), [CMD_GET_CARD_INFO_REPLY] = kvaser_fsize(u.cardinfo), [CMD_TX_ACKNOWLEDGE] = kvaser_fsize(u.tx_acknowledge_header), [CMD_GET_SOFTWARE_INFO_REPLY] = kvaser_fsize(u.usbcan.softinfo), [CMD_RX_STD_MESSAGE] = kvaser_fsize(u.usbcan.rx_can), [CMD_RX_EXT_MESSAGE] = kvaser_fsize(u.usbcan.rx_can), [CMD_CHIP_STATE_EVENT] = kvaser_fsize(u.usbcan.chip_state_event), [CMD_CAN_ERROR_EVENT] = kvaser_fsize(u.usbcan.can_error_event), [CMD_ERROR_EVENT] = kvaser_fsize(u.usbcan.error_event), /* ignored events: */ [CMD_USBCAN_CLOCK_OVERFLOW_EVENT] = CMD_SIZE_ANY, }; /* Summary of a kvaser error event, for a unified Leaf/Usbcan error * handling. Some discrepancies between the two families exist: * * - USBCAN firmware does not report M16C "error factors" * - USBCAN controllers has difficulties reporting if the raised error * event is for ch0 or ch1. They leave such arbitration to the OS * driver by letting it compare error counters with previous values * and decide the error event's channel. Thus for USBCAN, the channel * field is only advisory. */ struct kvaser_usb_err_summary { u8 channel, status, txerr, rxerr; union { struct { u8 error_factor; } leaf; struct { u8 other_ch_status; u8 error_state; } usbcan; }; }; struct kvaser_usb_net_leaf_priv { struct kvaser_usb_net_priv *net; struct delayed_work chip_state_req_work; /* started but not reported as bus-on yet */ bool joining_bus; }; static const struct can_bittiming_const kvaser_usb_leaf_m16c_bittiming_const = { .name = "kvaser_usb_ucii", .tseg1_min = 4, .tseg1_max = 16, .tseg2_min = 2, .tseg2_max = 8, .sjw_max = 4, .brp_min = 1, .brp_max = 16, .brp_inc = 1, }; static const struct can_bittiming_const kvaser_usb_leaf_m32c_bittiming_const = { .name = "kvaser_usb_leaf", .tseg1_min = 3, .tseg1_max = 16, .tseg2_min = 2, .tseg2_max = 8, .sjw_max = 4, .brp_min = 2, .brp_max = 128, .brp_inc = 2, }; static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_usbcan_dev_cfg = { .clock = { .freq = 8 * MEGA /* Hz */, }, .timestamp_freq = 1, .bittiming_const = &kvaser_usb_leaf_m16c_bittiming_const, }; static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_m32c_dev_cfg = { .clock = { .freq = 16 * MEGA /* Hz */, }, .timestamp_freq = 1, .bittiming_const = &kvaser_usb_leaf_m32c_bittiming_const, }; static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_imx_dev_cfg_16mhz = { .clock = { .freq = 16 * MEGA /* Hz */, }, .timestamp_freq = 1, .bittiming_const = &kvaser_usb_flexc_bittiming_const, }; static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_imx_dev_cfg_24mhz = { .clock = { .freq = 24 * MEGA /* Hz */, }, .timestamp_freq = 1, .bittiming_const = &kvaser_usb_flexc_bittiming_const, }; static const struct kvaser_usb_dev_cfg kvaser_usb_leaf_imx_dev_cfg_32mhz = { .clock = { .freq = 32 * MEGA /* Hz */, }, .timestamp_freq = 1, .bittiming_const = &kvaser_usb_flexc_bittiming_const, }; static int kvaser_usb_leaf_verify_size(const struct kvaser_usb *dev, const struct kvaser_cmd *cmd) { /* buffer size >= cmd->len ensured by caller */ u8 min_size = 0; switch (dev->driver_info->family) { case KVASER_LEAF: if (cmd->id < ARRAY_SIZE(kvaser_usb_leaf_cmd_sizes_leaf)) min_size = kvaser_usb_leaf_cmd_sizes_leaf[cmd->id]; break; case KVASER_USBCAN: if (cmd->id < ARRAY_SIZE(kvaser_usb_leaf_cmd_sizes_usbcan)) min_size = kvaser_usb_leaf_cmd_sizes_usbcan[cmd->id]; break; } if (min_size == CMD_SIZE_ANY) return 0; if (min_size) { min_size += CMD_HEADER_LEN; if (cmd->len >= min_size) return 0; dev_err_ratelimited(&dev->intf->dev, "Received command %u too short (size %u, needed %u)", cmd->id, cmd->len, min_size); return -EIO; } dev_warn_ratelimited(&dev->intf->dev, "Unhandled command (%d, size %d)\n", cmd->id, cmd->len); return -EINVAL; } static void * kvaser_usb_leaf_frame_to_cmd(const struct kvaser_usb_net_priv *priv, const struct sk_buff *skb, int *cmd_len, u16 transid) { struct kvaser_usb *dev = priv->dev; struct kvaser_cmd *cmd; u8 *cmd_tx_can_flags = NULL; /* GCC */ struct can_frame *cf = (struct can_frame *)skb->data; cmd = kmalloc(sizeof(*cmd), GFP_ATOMIC); if (cmd) { cmd->u.tx_can.tid = transid & 0xff; cmd->len = *cmd_len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_tx_can); cmd->u.tx_can.channel = priv->channel; switch (dev->driver_info->family) { case KVASER_LEAF: cmd_tx_can_flags = &cmd->u.tx_can.leaf.flags; break; case KVASER_USBCAN: cmd_tx_can_flags = &cmd->u.tx_can.usbcan.flags; break; } *cmd_tx_can_flags = 0; if (cf->can_id & CAN_EFF_FLAG) { cmd->id = CMD_TX_EXT_MESSAGE; cmd->u.tx_can.data[0] = (cf->can_id >> 24) & 0x1f; cmd->u.tx_can.data[1] = (cf->can_id >> 18) & 0x3f; cmd->u.tx_can.data[2] = (cf->can_id >> 14) & 0x0f; cmd->u.tx_can.data[3] = (cf->can_id >> 6) & 0xff; cmd->u.tx_can.data[4] = cf->can_id & 0x3f; } else { cmd->id = CMD_TX_STD_MESSAGE; cmd->u.tx_can.data[0] = (cf->can_id >> 6) & 0x1f; cmd->u.tx_can.data[1] = cf->can_id & 0x3f; } cmd->u.tx_can.data[5] = can_get_cc_dlc(cf, priv->can.ctrlmode); memcpy(&cmd->u.tx_can.data[6], cf->data, cf->len); if (cf->can_id & CAN_RTR_FLAG) *cmd_tx_can_flags |= MSG_FLAG_REMOTE_FRAME; } return cmd; } static int kvaser_usb_leaf_wait_cmd(const struct kvaser_usb *dev, u8 id, struct kvaser_cmd *cmd) { struct kvaser_cmd *tmp; void *buf; int actual_len; int err; int pos; unsigned long to = jiffies + msecs_to_jiffies(KVASER_USB_TIMEOUT); buf = kzalloc(KVASER_USB_RX_BUFFER_SIZE, GFP_KERNEL); if (!buf) return -ENOMEM; do { err = kvaser_usb_recv_cmd(dev, buf, KVASER_USB_RX_BUFFER_SIZE, &actual_len); if (err < 0) goto end; pos = 0; while (pos <= actual_len - CMD_HEADER_LEN) { tmp = buf + pos; /* Handle commands crossing the USB endpoint max packet * size boundary. Check kvaser_usb_read_bulk_callback() * for further details. */ if (tmp->len == 0) { pos = round_up(pos, le16_to_cpu (dev->bulk_in->wMaxPacketSize)); continue; } if (pos + tmp->len > actual_len) { dev_err_ratelimited(&dev->intf->dev, "Format error\n"); break; } if (tmp->id == id) { memcpy(cmd, tmp, tmp->len); goto end; } pos += tmp->len; } } while (time_before(jiffies, to)); err = -EINVAL; end: kfree(buf); if (err == 0) err = kvaser_usb_leaf_verify_size(dev, cmd); return err; } static int kvaser_usb_leaf_send_simple_cmd(const struct kvaser_usb *dev, u8 cmd_id, int channel) { struct kvaser_cmd *cmd; int rc; cmd = kmalloc(sizeof(*cmd), GFP_KERNEL); if (!cmd) return -ENOMEM; cmd->id = cmd_id; cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_simple); cmd->u.simple.channel = channel; cmd->u.simple.tid = 0xff; rc = kvaser_usb_send_cmd(dev, cmd, cmd->len); kfree(cmd); return rc; } static void kvaser_usb_leaf_get_software_info_leaf(struct kvaser_usb *dev, const struct leaf_cmd_softinfo *softinfo) { u32 sw_options = le32_to_cpu(softinfo->sw_options); dev->fw_version = le32_to_cpu(softinfo->fw_version); dev->max_tx_urbs = le16_to_cpu(softinfo->max_outstanding_tx); if (sw_options & KVASER_USB_LEAF_SWOPTION_EXT_CAP) dev->card_data.capabilities |= KVASER_USB_CAP_EXT_CAP; if (dev->driver_info->quirks & KVASER_USB_QUIRK_IGNORE_CLK_FREQ) { /* Firmware expects bittiming parameters calculated for 16MHz * clock, regardless of the actual clock */ dev->cfg = &kvaser_usb_leaf_m32c_dev_cfg; } else { switch (sw_options & KVASER_USB_LEAF_SWOPTION_FREQ_MASK) { case KVASER_USB_LEAF_SWOPTION_FREQ_16_MHZ_CLK: dev->cfg = &kvaser_usb_leaf_imx_dev_cfg_16mhz; break; case KVASER_USB_LEAF_SWOPTION_FREQ_24_MHZ_CLK: dev->cfg = &kvaser_usb_leaf_imx_dev_cfg_24mhz; break; case KVASER_USB_LEAF_SWOPTION_FREQ_32_MHZ_CLK: dev->cfg = &kvaser_usb_leaf_imx_dev_cfg_32mhz; break; } } } static int kvaser_usb_leaf_get_software_info_inner(struct kvaser_usb *dev) { struct kvaser_cmd cmd; int err; err = kvaser_usb_leaf_send_simple_cmd(dev, CMD_GET_SOFTWARE_INFO, 0); if (err) return err; err = kvaser_usb_leaf_wait_cmd(dev, CMD_GET_SOFTWARE_INFO_REPLY, &cmd); if (err) return err; switch (dev->driver_info->family) { case KVASER_LEAF: kvaser_usb_leaf_get_software_info_leaf(dev, &cmd.u.leaf.softinfo); break; case KVASER_USBCAN: dev->fw_version = le32_to_cpu(cmd.u.usbcan.softinfo.fw_version); dev->max_tx_urbs = le16_to_cpu(cmd.u.usbcan.softinfo.max_outstanding_tx); dev->cfg = &kvaser_usb_leaf_usbcan_dev_cfg; break; } return 0; } static int kvaser_usb_leaf_get_software_info(struct kvaser_usb *dev) { int err; int retry = 3; /* On some x86 laptops, plugging a Kvaser device again after * an unplug makes the firmware always ignore the very first * command. For such a case, provide some room for retries * instead of completely exiting the driver. */ do { err = kvaser_usb_leaf_get_software_info_inner(dev); } while (--retry && err == -ETIMEDOUT); return err; } static int kvaser_usb_leaf_get_card_info(struct kvaser_usb *dev) { struct kvaser_cmd cmd; int err; err = kvaser_usb_leaf_send_simple_cmd(dev, CMD_GET_CARD_INFO, 0); if (err) return err; err = kvaser_usb_leaf_wait_cmd(dev, CMD_GET_CARD_INFO_REPLY, &cmd); if (err) return err; dev->nchannels = cmd.u.cardinfo.nchannels; if (dev->nchannels > KVASER_USB_MAX_NET_DEVICES || (dev->driver_info->family == KVASER_USBCAN && dev->nchannels > MAX_USBCAN_NET_DEVICES)) return -EINVAL; return 0; } static int kvaser_usb_leaf_get_single_capability(struct kvaser_usb *dev, u16 cap_cmd_req, u16 *status) { struct kvaser_usb_dev_card_data *card_data = &dev->card_data; struct kvaser_cmd *cmd; u32 value = 0; u32 mask = 0; u16 cap_cmd_res; int err; int i; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (!cmd) return -ENOMEM; cmd->id = CMD_GET_CAPABILITIES_REQ; cmd->u.leaf.cap_req.cap_cmd = cpu_to_le16(cap_cmd_req); cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_cap_req); err = kvaser_usb_send_cmd(dev, cmd, cmd->len); if (err) goto end; err = kvaser_usb_leaf_wait_cmd(dev, CMD_GET_CAPABILITIES_RESP, cmd); if (err) goto end; *status = le16_to_cpu(cmd->u.leaf.cap_res.status); if (*status != KVASER_USB_LEAF_CAP_STAT_OK) goto end; cap_cmd_res = le16_to_cpu(cmd->u.leaf.cap_res.cap_cmd); switch (cap_cmd_res) { case KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE: case KVASER_USB_LEAF_CAP_CMD_ERR_REPORT: value = le32_to_cpu(cmd->u.leaf.cap_res.value); mask = le32_to_cpu(cmd->u.leaf.cap_res.mask); break; default: dev_warn(&dev->intf->dev, "Unknown capability command %u\n", cap_cmd_res); break; } for (i = 0; i < dev->nchannels; i++) { if (BIT(i) & (value & mask)) { switch (cap_cmd_res) { case KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE: card_data->ctrlmode_supported |= CAN_CTRLMODE_LISTENONLY; break; case KVASER_USB_LEAF_CAP_CMD_ERR_REPORT: card_data->capabilities |= KVASER_USB_CAP_BERR_CAP; break; } } } end: kfree(cmd); return err; } static int kvaser_usb_leaf_get_capabilities_leaf(struct kvaser_usb *dev) { int err; u16 status; if (!(dev->card_data.capabilities & KVASER_USB_CAP_EXT_CAP)) { dev_info(&dev->intf->dev, "No extended capability support. Upgrade device firmware.\n"); return 0; } err = kvaser_usb_leaf_get_single_capability(dev, KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE, &status); if (err) return err; if (status) dev_info(&dev->intf->dev, "KVASER_USB_LEAF_CAP_CMD_LISTEN_MODE failed %u\n", status); err = kvaser_usb_leaf_get_single_capability(dev, KVASER_USB_LEAF_CAP_CMD_ERR_REPORT, &status); if (err) return err; if (status) dev_info(&dev->intf->dev, "KVASER_USB_LEAF_CAP_CMD_ERR_REPORT failed %u\n", status); return 0; } static int kvaser_usb_leaf_get_capabilities(struct kvaser_usb *dev) { int err = 0; if (dev->driver_info->family == KVASER_LEAF) err = kvaser_usb_leaf_get_capabilities_leaf(dev); return err; } static void kvaser_usb_leaf_tx_acknowledge(const struct kvaser_usb *dev, const struct kvaser_cmd *cmd) { struct net_device_stats *stats; struct kvaser_usb_tx_urb_context *context; struct kvaser_usb_net_priv *priv; unsigned long flags; u8 channel, tid; channel = cmd->u.tx_acknowledge_header.channel; tid = cmd->u.tx_acknowledge_header.tid; if (channel >= dev->nchannels) { dev_err(&dev->intf->dev, "Invalid channel number (%d)\n", channel); return; } priv = dev->nets[channel]; if (!netif_device_present(priv->netdev)) return; stats = &priv->netdev->stats; context = &priv->tx_contexts[tid % dev->max_tx_urbs]; /* Sometimes the state change doesn't come after a bus-off event */ if (priv->can.restart_ms && priv->can.state == CAN_STATE_BUS_OFF) { struct sk_buff *skb; struct can_frame *cf; skb = alloc_can_err_skb(priv->netdev, &cf); if (skb) { cf->can_id |= CAN_ERR_RESTARTED; netif_rx(skb); } else { netdev_err(priv->netdev, "No memory left for err_skb\n"); } priv->can.can_stats.restarts++; netif_carrier_on(priv->netdev); priv->can.state = CAN_STATE_ERROR_ACTIVE; } spin_lock_irqsave(&priv->tx_contexts_lock, flags); stats->tx_packets++; stats->tx_bytes += can_get_echo_skb(priv->netdev, context->echo_index, NULL); context->echo_index = dev->max_tx_urbs; --priv->active_tx_contexts; netif_wake_queue(priv->netdev); spin_unlock_irqrestore(&priv->tx_contexts_lock, flags); } static int kvaser_usb_leaf_simple_cmd_async(struct kvaser_usb_net_priv *priv, u8 cmd_id) { struct kvaser_cmd *cmd; int err; cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC); if (!cmd) return -ENOMEM; cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_simple); cmd->id = cmd_id; cmd->u.simple.channel = priv->channel; err = kvaser_usb_send_cmd_async(priv, cmd, cmd->len); if (err) kfree(cmd); return err; } static void kvaser_usb_leaf_chip_state_req_work(struct work_struct *work) { struct kvaser_usb_net_leaf_priv *leaf = container_of(work, struct kvaser_usb_net_leaf_priv, chip_state_req_work.work); struct kvaser_usb_net_priv *priv = leaf->net; kvaser_usb_leaf_simple_cmd_async(priv, CMD_GET_CHIP_STATE); } static void kvaser_usb_leaf_rx_error_update_can_state(struct kvaser_usb_net_priv *priv, const struct kvaser_usb_err_summary *es, struct can_frame *cf) { struct kvaser_usb_net_leaf_priv *leaf = priv->sub_priv; struct kvaser_usb *dev = priv->dev; struct net_device_stats *stats = &priv->netdev->stats; enum can_state cur_state, new_state, tx_state, rx_state; netdev_dbg(priv->netdev, "Error status: 0x%02x\n", es->status); new_state = priv->can.state; cur_state = priv->can.state; if (es->status & (M16C_STATE_BUS_OFF | M16C_STATE_BUS_RESET)) { new_state = CAN_STATE_BUS_OFF; } else if (es->status & M16C_STATE_BUS_PASSIVE) { new_state = CAN_STATE_ERROR_PASSIVE; } else if ((es->status & M16C_STATE_BUS_ERROR) && cur_state >= CAN_STATE_BUS_OFF) { /* Guard against spurious error events after a busoff */ } else if (es->txerr >= 128 || es->rxerr >= 128) { new_state = CAN_STATE_ERROR_PASSIVE; } else if (es->txerr >= 96 || es->rxerr >= 96) { new_state = CAN_STATE_ERROR_WARNING; } else { new_state = CAN_STATE_ERROR_ACTIVE; } /* 0bfd:0124 FW 4.18.778 was observed to send the initial * CMD_CHIP_STATE_EVENT after CMD_START_CHIP with M16C_STATE_BUS_OFF * bit set if the channel was bus-off when it was last stopped (even * across chip resets). This bit will clear shortly afterwards, without * triggering a second unsolicited chip state event. * Ignore this initial bus-off. */ if (leaf->joining_bus) { if (new_state == CAN_STATE_BUS_OFF) { netdev_dbg(priv->netdev, "ignoring bus-off during startup"); new_state = cur_state; } else { leaf->joining_bus = false; } } if (new_state != cur_state) { tx_state = (es->txerr >= es->rxerr) ? new_state : 0; rx_state = (es->txerr <= es->rxerr) ? new_state : 0; can_change_state(priv->netdev, cf, tx_state, rx_state); } if (priv->can.restart_ms && cur_state == CAN_STATE_BUS_OFF && new_state < CAN_STATE_BUS_OFF) priv->can.can_stats.restarts++; switch (dev->driver_info->family) { case KVASER_LEAF: if (es->leaf.error_factor) { priv->can.can_stats.bus_error++; stats->rx_errors++; } break; case KVASER_USBCAN: if (es->usbcan.error_state & USBCAN_ERROR_STATE_TX_ERROR) stats->tx_errors++; if (es->usbcan.error_state & USBCAN_ERROR_STATE_RX_ERROR) stats->rx_errors++; if (es->usbcan.error_state & USBCAN_ERROR_STATE_BUSERROR) priv->can.can_stats.bus_error++; break; } priv->bec.txerr = es->txerr; priv->bec.rxerr = es->rxerr; } static void kvaser_usb_leaf_rx_error(const struct kvaser_usb *dev, const struct kvaser_usb_err_summary *es) { struct can_frame *cf; struct can_frame tmp_cf = { .can_id = CAN_ERR_FLAG, .len = CAN_ERR_DLC }; struct sk_buff *skb; struct net_device_stats *stats; struct kvaser_usb_net_priv *priv; struct kvaser_usb_net_leaf_priv *leaf; enum can_state old_state, new_state; if (es->channel >= dev->nchannels) { dev_err(&dev->intf->dev, "Invalid channel number (%d)\n", es->channel); return; } priv = dev->nets[es->channel]; leaf = priv->sub_priv; stats = &priv->netdev->stats; /* Ignore e.g. state change to bus-off reported just after stopping */ if (!netif_running(priv->netdev)) return; /* Update all of the CAN interface's state and error counters before * trying any memory allocation that can actually fail with -ENOMEM. * * We send a temporary stack-allocated error CAN frame to * can_change_state() for the very same reason. * * TODO: Split can_change_state() responsibility between updating the * CAN interface's state and counters, and the setting up of CAN error * frame ID and data to userspace. Remove stack allocation afterwards. */ old_state = priv->can.state; kvaser_usb_leaf_rx_error_update_can_state(priv, es, &tmp_cf); new_state = priv->can.state; /* If there are errors, request status updates periodically as we do * not get automatic notifications of improved state. * Also request updates if we saw a stale BUS_OFF during startup * (joining_bus). */ if (new_state < CAN_STATE_BUS_OFF && (es->rxerr || es->txerr || new_state == CAN_STATE_ERROR_PASSIVE || leaf->joining_bus)) schedule_delayed_work(&leaf->chip_state_req_work, msecs_to_jiffies(500)); skb = alloc_can_err_skb(priv->netdev, &cf); if (!skb) { stats->rx_dropped++; return; } memcpy(cf, &tmp_cf, sizeof(*cf)); if (new_state != old_state) { if (es->status & (M16C_STATE_BUS_OFF | M16C_STATE_BUS_RESET)) { if (!priv->can.restart_ms) kvaser_usb_leaf_simple_cmd_async(priv, CMD_STOP_CHIP); netif_carrier_off(priv->netdev); } if (priv->can.restart_ms && old_state == CAN_STATE_BUS_OFF && new_state < CAN_STATE_BUS_OFF) { cf->can_id |= CAN_ERR_RESTARTED; netif_carrier_on(priv->netdev); } } switch (dev->driver_info->family) { case KVASER_LEAF: if (es->leaf.error_factor) { cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT; if (es->leaf.error_factor & M16C_EF_ACKE) cf->data[3] = CAN_ERR_PROT_LOC_ACK; if (es->leaf.error_factor & M16C_EF_CRCE) cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; if (es->leaf.error_factor & M16C_EF_FORME) cf->data[2] |= CAN_ERR_PROT_FORM; if (es->leaf.error_factor & M16C_EF_STFE) cf->data[2] |= CAN_ERR_PROT_STUFF; if (es->leaf.error_factor & M16C_EF_BITE0) cf->data[2] |= CAN_ERR_PROT_BIT0; if (es->leaf.error_factor & M16C_EF_BITE1) cf->data[2] |= CAN_ERR_PROT_BIT1; if (es->leaf.error_factor & M16C_EF_TRE) cf->data[2] |= CAN_ERR_PROT_TX; } break; case KVASER_USBCAN: if (es->usbcan.error_state & USBCAN_ERROR_STATE_BUSERROR) cf->can_id |= CAN_ERR_BUSERROR; break; } if (new_state != CAN_STATE_BUS_OFF) { cf->can_id |= CAN_ERR_CNT; cf->data[6] = es->txerr; cf->data[7] = es->rxerr; } netif_rx(skb); } /* For USBCAN, report error to userspace if the channels's errors counter * has changed, or we're the only channel seeing a bus error state. */ static void kvaser_usb_leaf_usbcan_conditionally_rx_error(const struct kvaser_usb *dev, struct kvaser_usb_err_summary *es) { struct kvaser_usb_net_priv *priv; unsigned int channel; bool report_error; channel = es->channel; if (channel >= dev->nchannels) { dev_err(&dev->intf->dev, "Invalid channel number (%d)\n", channel); return; } priv = dev->nets[channel]; report_error = false; if (es->txerr != priv->bec.txerr) { es->usbcan.error_state |= USBCAN_ERROR_STATE_TX_ERROR; report_error = true; } if (es->rxerr != priv->bec.rxerr) { es->usbcan.error_state |= USBCAN_ERROR_STATE_RX_ERROR; report_error = true; } if ((es->status & M16C_STATE_BUS_ERROR) && !(es->usbcan.other_ch_status & M16C_STATE_BUS_ERROR)) { es->usbcan.error_state |= USBCAN_ERROR_STATE_BUSERROR; report_error = true; } if (report_error) kvaser_usb_leaf_rx_error(dev, es); } static void kvaser_usb_leaf_usbcan_rx_error(const struct kvaser_usb *dev, const struct kvaser_cmd *cmd) { struct kvaser_usb_err_summary es = { }; switch (cmd->id) { /* Sometimes errors are sent as unsolicited chip state events */ case CMD_CHIP_STATE_EVENT: es.channel = cmd->u.usbcan.chip_state_event.channel; es.status = cmd->u.usbcan.chip_state_event.status; es.txerr = cmd->u.usbcan.chip_state_event.tx_errors_count; es.rxerr = cmd->u.usbcan.chip_state_event.rx_errors_count; kvaser_usb_leaf_usbcan_conditionally_rx_error(dev, &es); break; case CMD_CAN_ERROR_EVENT: es.channel = 0; es.status = cmd->u.usbcan.can_error_event.status_ch0; es.txerr = cmd->u.usbcan.can_error_event.tx_errors_count_ch0; es.rxerr = cmd->u.usbcan.can_error_event.rx_errors_count_ch0; es.usbcan.other_ch_status = cmd->u.usbcan.can_error_event.status_ch1; kvaser_usb_leaf_usbcan_conditionally_rx_error(dev, &es); /* The USBCAN firmware supports up to 2 channels. * Now that ch0 was checked, check if ch1 has any errors. */ if (dev->nchannels == MAX_USBCAN_NET_DEVICES) { es.channel = 1; es.status = cmd->u.usbcan.can_error_event.status_ch1; es.txerr = cmd->u.usbcan.can_error_event.tx_errors_count_ch1; es.rxerr = cmd->u.usbcan.can_error_event.rx_errors_count_ch1; es.usbcan.other_ch_status = cmd->u.usbcan.can_error_event.status_ch0; kvaser_usb_leaf_usbcan_conditionally_rx_error(dev, &es); } break; default: dev_err(&dev->intf->dev, "Invalid cmd id (%d)\n", cmd->id); } } static void kvaser_usb_leaf_leaf_rx_error(const struct kvaser_usb *dev, const struct kvaser_cmd *cmd) { struct kvaser_usb_err_summary es = { }; switch (cmd->id) { case CMD_CAN_ERROR_EVENT: es.channel = cmd->u.leaf.can_error_event.channel; es.status = cmd->u.leaf.can_error_event.status; es.txerr = cmd->u.leaf.can_error_event.tx_errors_count; es.rxerr = cmd->u.leaf.can_error_event.rx_errors_count; es.leaf.error_factor = cmd->u.leaf.can_error_event.error_factor; break; case CMD_LEAF_LOG_MESSAGE: es.channel = cmd->u.leaf.log_message.channel; es.status = cmd->u.leaf.log_message.data[0]; es.txerr = cmd->u.leaf.log_message.data[2]; es.rxerr = cmd->u.leaf.log_message.data[3]; es.leaf.error_factor = cmd->u.leaf.log_message.data[1]; break; case CMD_CHIP_STATE_EVENT: es.channel = cmd->u.leaf.chip_state_event.channel; es.status = cmd->u.leaf.chip_state_event.status; es.txerr = cmd->u.leaf.chip_state_event.tx_errors_count; es.rxerr = cmd->u.leaf.chip_state_event.rx_errors_count; es.leaf.error_factor = 0; break; default: dev_err(&dev->intf->dev, "Invalid cmd id (%d)\n", cmd->id); return; } kvaser_usb_leaf_rx_error(dev, &es); } static void kvaser_usb_leaf_rx_can_err(const struct kvaser_usb_net_priv *priv, const struct kvaser_cmd *cmd) { if (cmd->u.rx_can_header.flag & (MSG_FLAG_ERROR_FRAME | MSG_FLAG_NERR)) { struct net_device_stats *stats = &priv->netdev->stats; netdev_err(priv->netdev, "Unknown error (flags: 0x%02x)\n", cmd->u.rx_can_header.flag); stats->rx_errors++; return; } if (cmd->u.rx_can_header.flag & MSG_FLAG_OVERRUN) kvaser_usb_can_rx_over_error(priv->netdev); } static void kvaser_usb_leaf_rx_can_msg(const struct kvaser_usb *dev, const struct kvaser_cmd *cmd) { struct kvaser_usb_net_priv *priv; struct can_frame *cf; struct sk_buff *skb; struct net_device_stats *stats; u8 channel = cmd->u.rx_can_header.channel; const u8 *rx_data = NULL; /* GCC */ if (channel >= dev->nchannels) { dev_err(&dev->intf->dev, "Invalid channel number (%d)\n", channel); return; } priv = dev->nets[channel]; stats = &priv->netdev->stats; if ((cmd->u.rx_can_header.flag & MSG_FLAG_ERROR_FRAME) && (dev->driver_info->family == KVASER_LEAF && cmd->id == CMD_LEAF_LOG_MESSAGE)) { kvaser_usb_leaf_leaf_rx_error(dev, cmd); return; } else if (cmd->u.rx_can_header.flag & (MSG_FLAG_ERROR_FRAME | MSG_FLAG_NERR | MSG_FLAG_OVERRUN)) { kvaser_usb_leaf_rx_can_err(priv, cmd); return; } else if (cmd->u.rx_can_header.flag & ~MSG_FLAG_REMOTE_FRAME) { netdev_warn(priv->netdev, "Unhandled frame (flags: 0x%02x)\n", cmd->u.rx_can_header.flag); return; } switch (dev->driver_info->family) { case KVASER_LEAF: rx_data = cmd->u.leaf.rx_can.data; break; case KVASER_USBCAN: rx_data = cmd->u.usbcan.rx_can.data; break; } skb = alloc_can_skb(priv->netdev, &cf); if (!skb) { stats->rx_dropped++; return; } if (dev->driver_info->family == KVASER_LEAF && cmd->id == CMD_LEAF_LOG_MESSAGE) { cf->can_id = le32_to_cpu(cmd->u.leaf.log_message.id); if (cf->can_id & KVASER_EXTENDED_FRAME) cf->can_id &= CAN_EFF_MASK | CAN_EFF_FLAG; else cf->can_id &= CAN_SFF_MASK; can_frame_set_cc_len(cf, cmd->u.leaf.log_message.dlc & 0xF, priv->can.ctrlmode); if (cmd->u.leaf.log_message.flags & MSG_FLAG_REMOTE_FRAME) cf->can_id |= CAN_RTR_FLAG; else memcpy(cf->data, &cmd->u.leaf.log_message.data, cf->len); } else { cf->can_id = ((rx_data[0] & 0x1f) << 6) | (rx_data[1] & 0x3f); if (cmd->id == CMD_RX_EXT_MESSAGE) { cf->can_id <<= 18; cf->can_id |= ((rx_data[2] & 0x0f) << 14) | ((rx_data[3] & 0xff) << 6) | (rx_data[4] & 0x3f); cf->can_id |= CAN_EFF_FLAG; } can_frame_set_cc_len(cf, rx_data[5] & 0xF, priv->can.ctrlmode); if (cmd->u.rx_can_header.flag & MSG_FLAG_REMOTE_FRAME) cf->can_id |= CAN_RTR_FLAG; else memcpy(cf->data, &rx_data[6], cf->len); } stats->rx_packets++; if (!(cf->can_id & CAN_RTR_FLAG)) stats->rx_bytes += cf->len; netif_rx(skb); } static void kvaser_usb_leaf_error_event_parameter(const struct kvaser_usb *dev, const struct kvaser_cmd *cmd) { u16 info1 = 0; switch (dev->driver_info->family) { case KVASER_LEAF: info1 = le16_to_cpu(cmd->u.leaf.error_event.info1); break; case KVASER_USBCAN: info1 = le16_to_cpu(cmd->u.usbcan.error_event.info1); break; } /* info1 will contain the offending cmd_no */ switch (info1) { case CMD_SET_CTRL_MODE: dev_warn(&dev->intf->dev, "CMD_SET_CTRL_MODE error in parameter\n"); break; case CMD_SET_BUS_PARAMS: dev_warn(&dev->intf->dev, "CMD_SET_BUS_PARAMS error in parameter\n"); break; default: dev_warn(&dev->intf->dev, "Unhandled parameter error event cmd_no (%u)\n", info1); break; } } static void kvaser_usb_leaf_error_event(const struct kvaser_usb *dev, const struct kvaser_cmd *cmd) { u8 error_code = 0; switch (dev->driver_info->family) { case KVASER_LEAF: error_code = cmd->u.leaf.error_event.error_code; break; case KVASER_USBCAN: error_code = cmd->u.usbcan.error_event.error_code; break; } switch (error_code) { case KVASER_USB_LEAF_ERROR_EVENT_TX_QUEUE_FULL: /* Received additional CAN message, when firmware TX queue is * already full. Something is wrong with the driver. * This should never happen! */ dev_err(&dev->intf->dev, "Received error event TX_QUEUE_FULL\n"); break; case KVASER_USB_LEAF_ERROR_EVENT_PARAM: kvaser_usb_leaf_error_event_parameter(dev, cmd); break; default: dev_warn(&dev->intf->dev, "Unhandled error event (%d)\n", error_code); break; } } static void kvaser_usb_leaf_start_chip_reply(const struct kvaser_usb *dev, const struct kvaser_cmd *cmd) { struct kvaser_usb_net_priv *priv; u8 channel = cmd->u.simple.channel; if (channel >= dev->nchannels) { dev_err(&dev->intf->dev, "Invalid channel number (%d)\n", channel); return; } priv = dev->nets[channel]; if (completion_done(&priv->start_comp) && netif_queue_stopped(priv->netdev)) { netif_wake_queue(priv->netdev); } else { netif_start_queue(priv->netdev); complete(&priv->start_comp); } } static void kvaser_usb_leaf_stop_chip_reply(const struct kvaser_usb *dev, const struct kvaser_cmd *cmd) { struct kvaser_usb_net_priv *priv; u8 channel = cmd->u.simple.channel; if (channel >= dev->nchannels) { dev_err(&dev->intf->dev, "Invalid channel number (%d)\n", channel); return; } priv = dev->nets[channel]; complete(&priv->stop_comp); } static void kvaser_usb_leaf_get_busparams_reply(const struct kvaser_usb *dev, const struct kvaser_cmd *cmd) { struct kvaser_usb_net_priv *priv; u8 channel = cmd->u.busparams.channel; if (channel >= dev->nchannels) { dev_err(&dev->intf->dev, "Invalid channel number (%d)\n", channel); return; } priv = dev->nets[channel]; memcpy(&priv->busparams_nominal, &cmd->u.busparams.busparams, sizeof(priv->busparams_nominal)); complete(&priv->get_busparams_comp); } static void kvaser_usb_leaf_handle_command(const struct kvaser_usb *dev, const struct kvaser_cmd *cmd) { if (kvaser_usb_leaf_verify_size(dev, cmd) < 0) return; switch (cmd->id) { case CMD_START_CHIP_REPLY: kvaser_usb_leaf_start_chip_reply(dev, cmd); break; case CMD_STOP_CHIP_REPLY: kvaser_usb_leaf_stop_chip_reply(dev, cmd); break; case CMD_RX_STD_MESSAGE: case CMD_RX_EXT_MESSAGE: kvaser_usb_leaf_rx_can_msg(dev, cmd); break; case CMD_LEAF_LOG_MESSAGE: if (dev->driver_info->family != KVASER_LEAF) goto warn; kvaser_usb_leaf_rx_can_msg(dev, cmd); break; case CMD_CHIP_STATE_EVENT: case CMD_CAN_ERROR_EVENT: if (dev->driver_info->family == KVASER_LEAF) kvaser_usb_leaf_leaf_rx_error(dev, cmd); else kvaser_usb_leaf_usbcan_rx_error(dev, cmd); break; case CMD_TX_ACKNOWLEDGE: kvaser_usb_leaf_tx_acknowledge(dev, cmd); break; case CMD_ERROR_EVENT: kvaser_usb_leaf_error_event(dev, cmd); break; case CMD_GET_BUS_PARAMS_REPLY: kvaser_usb_leaf_get_busparams_reply(dev, cmd); break; /* Ignored commands */ case CMD_USBCAN_CLOCK_OVERFLOW_EVENT: if (dev->driver_info->family != KVASER_USBCAN) goto warn; break; case CMD_FLUSH_QUEUE_REPLY: if (dev->driver_info->family != KVASER_LEAF) goto warn; break; default: warn: dev_warn(&dev->intf->dev, "Unhandled command (%d)\n", cmd->id); break; } } static void kvaser_usb_leaf_read_bulk_callback(struct kvaser_usb *dev, void *buf, int len) { struct kvaser_cmd *cmd; int pos = 0; while (pos <= len - CMD_HEADER_LEN) { cmd = buf + pos; /* The Kvaser firmware can only read and write commands that * does not cross the USB's endpoint wMaxPacketSize boundary. * If a follow-up command crosses such boundary, firmware puts * a placeholder zero-length command in its place then aligns * the real command to the next max packet size. * * Handle such cases or we're going to miss a significant * number of events in case of a heavy rx load on the bus. */ if (cmd->len == 0) { pos = round_up(pos, le16_to_cpu (dev->bulk_in->wMaxPacketSize)); continue; } if (pos + cmd->len > len) { dev_err_ratelimited(&dev->intf->dev, "Format error\n"); break; } kvaser_usb_leaf_handle_command(dev, cmd); pos += cmd->len; } } static int kvaser_usb_leaf_set_opt_mode(const struct kvaser_usb_net_priv *priv) { struct kvaser_cmd *cmd; int rc; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (!cmd) return -ENOMEM; cmd->id = CMD_SET_CTRL_MODE; cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_ctrl_mode); cmd->u.ctrl_mode.tid = 0xff; cmd->u.ctrl_mode.channel = priv->channel; if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) cmd->u.ctrl_mode.ctrl_mode = KVASER_CTRL_MODE_SILENT; else cmd->u.ctrl_mode.ctrl_mode = KVASER_CTRL_MODE_NORMAL; rc = kvaser_usb_send_cmd(priv->dev, cmd, cmd->len); kfree(cmd); return rc; } static int kvaser_usb_leaf_start_chip(struct kvaser_usb_net_priv *priv) { struct kvaser_usb_net_leaf_priv *leaf = priv->sub_priv; int err; leaf->joining_bus = true; reinit_completion(&priv->start_comp); err = kvaser_usb_leaf_send_simple_cmd(priv->dev, CMD_START_CHIP, priv->channel); if (err) return err; if (!wait_for_completion_timeout(&priv->start_comp, msecs_to_jiffies(KVASER_USB_TIMEOUT))) return -ETIMEDOUT; return 0; } static int kvaser_usb_leaf_stop_chip(struct kvaser_usb_net_priv *priv) { struct kvaser_usb_net_leaf_priv *leaf = priv->sub_priv; int err; reinit_completion(&priv->stop_comp); cancel_delayed_work(&leaf->chip_state_req_work); err = kvaser_usb_leaf_send_simple_cmd(priv->dev, CMD_STOP_CHIP, priv->channel); if (err) return err; if (!wait_for_completion_timeout(&priv->stop_comp, msecs_to_jiffies(KVASER_USB_TIMEOUT))) return -ETIMEDOUT; return 0; } static int kvaser_usb_leaf_reset_chip(struct kvaser_usb *dev, int channel) { return kvaser_usb_leaf_send_simple_cmd(dev, CMD_RESET_CHIP, channel); } static int kvaser_usb_leaf_flush_queue(struct kvaser_usb_net_priv *priv) { struct kvaser_cmd *cmd; int rc; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (!cmd) return -ENOMEM; cmd->id = CMD_FLUSH_QUEUE; cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_flush_queue); cmd->u.flush_queue.channel = priv->channel; cmd->u.flush_queue.flags = 0x00; rc = kvaser_usb_send_cmd(priv->dev, cmd, cmd->len); kfree(cmd); return rc; } static int kvaser_usb_leaf_init_card(struct kvaser_usb *dev) { struct kvaser_usb_dev_card_data *card_data = &dev->card_data; card_data->ctrlmode_supported |= CAN_CTRLMODE_3_SAMPLES; return 0; } static int kvaser_usb_leaf_init_channel(struct kvaser_usb_net_priv *priv) { struct kvaser_usb_net_leaf_priv *leaf; leaf = devm_kzalloc(&priv->dev->intf->dev, sizeof(*leaf), GFP_KERNEL); if (!leaf) return -ENOMEM; leaf->net = priv; INIT_DELAYED_WORK(&leaf->chip_state_req_work, kvaser_usb_leaf_chip_state_req_work); priv->sub_priv = leaf; return 0; } static void kvaser_usb_leaf_remove_channel(struct kvaser_usb_net_priv *priv) { struct kvaser_usb_net_leaf_priv *leaf = priv->sub_priv; if (leaf) cancel_delayed_work_sync(&leaf->chip_state_req_work); } static int kvaser_usb_leaf_set_bittiming(const struct net_device *netdev, const struct kvaser_usb_busparams *busparams) { struct kvaser_usb_net_priv *priv = netdev_priv(netdev); struct kvaser_usb *dev = priv->dev; struct kvaser_cmd *cmd; int rc; cmd = kmalloc(sizeof(*cmd), GFP_KERNEL); if (!cmd) return -ENOMEM; cmd->id = CMD_SET_BUS_PARAMS; cmd->len = CMD_HEADER_LEN + sizeof(struct kvaser_cmd_busparams); cmd->u.busparams.channel = priv->channel; cmd->u.busparams.tid = 0xff; memcpy(&cmd->u.busparams.busparams, busparams, sizeof(cmd->u.busparams.busparams)); rc = kvaser_usb_send_cmd(dev, cmd, cmd->len); kfree(cmd); return rc; } static int kvaser_usb_leaf_get_busparams(struct kvaser_usb_net_priv *priv) { int err; if (priv->dev->driver_info->family == KVASER_USBCAN) return -EOPNOTSUPP; reinit_completion(&priv->get_busparams_comp); err = kvaser_usb_leaf_send_simple_cmd(priv->dev, CMD_GET_BUS_PARAMS, priv->channel); if (err) return err; if (!wait_for_completion_timeout(&priv->get_busparams_comp, msecs_to_jiffies(KVASER_USB_TIMEOUT))) return -ETIMEDOUT; return 0; } static int kvaser_usb_leaf_set_mode(struct net_device *netdev, enum can_mode mode) { struct kvaser_usb_net_priv *priv = netdev_priv(netdev); struct kvaser_usb_net_leaf_priv *leaf = priv->sub_priv; int err; switch (mode) { case CAN_MODE_START: kvaser_usb_unlink_tx_urbs(priv); leaf->joining_bus = true; err = kvaser_usb_leaf_simple_cmd_async(priv, CMD_START_CHIP); if (err) return err; priv->can.state = CAN_STATE_ERROR_ACTIVE; break; default: return -EOPNOTSUPP; } return 0; } static int kvaser_usb_leaf_get_berr_counter(const struct net_device *netdev, struct can_berr_counter *bec) { struct kvaser_usb_net_priv *priv = netdev_priv(netdev); *bec = priv->bec; return 0; } static int kvaser_usb_leaf_setup_endpoints(struct kvaser_usb *dev) { const struct usb_host_interface *iface_desc; struct usb_endpoint_descriptor *endpoint; int i; iface_desc = dev->intf->cur_altsetting; for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { endpoint = &iface_desc->endpoint[i].desc; if (!dev->bulk_in && usb_endpoint_is_bulk_in(endpoint)) dev->bulk_in = endpoint; if (!dev->bulk_out && usb_endpoint_is_bulk_out(endpoint)) dev->bulk_out = endpoint; /* use first bulk endpoint for in and out */ if (dev->bulk_in && dev->bulk_out) return 0; } return -ENODEV; } const struct kvaser_usb_dev_ops kvaser_usb_leaf_dev_ops = { .dev_set_mode = kvaser_usb_leaf_set_mode, .dev_set_bittiming = kvaser_usb_leaf_set_bittiming, .dev_get_busparams = kvaser_usb_leaf_get_busparams, .dev_set_data_bittiming = NULL, .dev_get_data_busparams = NULL, .dev_get_berr_counter = kvaser_usb_leaf_get_berr_counter, .dev_setup_endpoints = kvaser_usb_leaf_setup_endpoints, .dev_init_card = kvaser_usb_leaf_init_card, .dev_init_channel = kvaser_usb_leaf_init_channel, .dev_remove_channel = kvaser_usb_leaf_remove_channel, .dev_get_software_info = kvaser_usb_leaf_get_software_info, .dev_get_software_details = NULL, .dev_get_card_info = kvaser_usb_leaf_get_card_info, .dev_get_capabilities = kvaser_usb_leaf_get_capabilities, .dev_set_opt_mode = kvaser_usb_leaf_set_opt_mode, .dev_start_chip = kvaser_usb_leaf_start_chip, .dev_stop_chip = kvaser_usb_leaf_stop_chip, .dev_reset_chip = kvaser_usb_leaf_reset_chip, .dev_flush_queue = kvaser_usb_leaf_flush_queue, .dev_read_bulk_callback = kvaser_usb_leaf_read_bulk_callback, .dev_frame_to_cmd = kvaser_usb_leaf_frame_to_cmd, };
linux-master
drivers/net/can/usb/kvaser_usb/kvaser_usb_leaf.c
// SPDX-License-Identifier: GPL-2.0 /* Parts of this driver are based on the following: * - Kvaser linux mhydra driver (version 5.24) * - CAN driver for esd CAN-USB/2 * * Copyright (C) 2018 KVASER AB, Sweden. All rights reserved. * Copyright (C) 2010 Matthias Fuchs <[email protected]>, esd gmbh * * Known issues: * - Transition from CAN_STATE_ERROR_WARNING to CAN_STATE_ERROR_ACTIVE is only * reported after a call to do_get_berr_counter(), since firmware does not * distinguish between ERROR_WARNING and ERROR_ACTIVE. * - Hardware timestamps are not set for CAN Tx frames. */ #include <linux/completion.h> #include <linux/device.h> #include <linux/gfp.h> #include <linux/jiffies.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/spinlock.h> #include <linux/string.h> #include <linux/types.h> #include <linux/units.h> #include <linux/usb.h> #include <linux/can.h> #include <linux/can/dev.h> #include <linux/can/error.h> #include <linux/can/netlink.h> #include "kvaser_usb.h" /* Forward declarations */ static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_kcan; static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_flexc; static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_rt; #define KVASER_USB_HYDRA_BULK_EP_IN_ADDR 0x82 #define KVASER_USB_HYDRA_BULK_EP_OUT_ADDR 0x02 #define KVASER_USB_HYDRA_MAX_TRANSID 0xff #define KVASER_USB_HYDRA_MIN_TRANSID 0x01 /* Minihydra command IDs */ #define CMD_SET_BUSPARAMS_REQ 16 #define CMD_GET_BUSPARAMS_REQ 17 #define CMD_GET_BUSPARAMS_RESP 18 #define CMD_GET_CHIP_STATE_REQ 19 #define CMD_CHIP_STATE_EVENT 20 #define CMD_SET_DRIVERMODE_REQ 21 #define CMD_START_CHIP_REQ 26 #define CMD_START_CHIP_RESP 27 #define CMD_STOP_CHIP_REQ 28 #define CMD_STOP_CHIP_RESP 29 #define CMD_TX_CAN_MESSAGE 33 #define CMD_GET_CARD_INFO_REQ 34 #define CMD_GET_CARD_INFO_RESP 35 #define CMD_GET_SOFTWARE_INFO_REQ 38 #define CMD_GET_SOFTWARE_INFO_RESP 39 #define CMD_ERROR_EVENT 45 #define CMD_FLUSH_QUEUE 48 #define CMD_TX_ACKNOWLEDGE 50 #define CMD_FLUSH_QUEUE_RESP 66 #define CMD_SET_BUSPARAMS_FD_REQ 69 #define CMD_SET_BUSPARAMS_FD_RESP 70 #define CMD_SET_BUSPARAMS_RESP 85 #define CMD_GET_CAPABILITIES_REQ 95 #define CMD_GET_CAPABILITIES_RESP 96 #define CMD_RX_MESSAGE 106 #define CMD_MAP_CHANNEL_REQ 200 #define CMD_MAP_CHANNEL_RESP 201 #define CMD_GET_SOFTWARE_DETAILS_REQ 202 #define CMD_GET_SOFTWARE_DETAILS_RESP 203 #define CMD_EXTENDED 255 /* Minihydra extended command IDs */ #define CMD_TX_CAN_MESSAGE_FD 224 #define CMD_TX_ACKNOWLEDGE_FD 225 #define CMD_RX_MESSAGE_FD 226 /* Hydra commands are handled by different threads in firmware. * The threads are denoted hydra entity (HE). Each HE got a unique 6-bit * address. The address is used in hydra commands to get/set source and * destination HE. There are two predefined HE addresses, the remaining * addresses are different between devices and firmware versions. Hence, we need * to enumerate the addresses (see kvaser_usb_hydra_map_channel()). */ /* Well-known HE addresses */ #define KVASER_USB_HYDRA_HE_ADDRESS_ROUTER 0x00 #define KVASER_USB_HYDRA_HE_ADDRESS_ILLEGAL 0x3e #define KVASER_USB_HYDRA_TRANSID_CANHE 0x40 #define KVASER_USB_HYDRA_TRANSID_SYSDBG 0x61 struct kvaser_cmd_map_ch_req { char name[16]; u8 channel; u8 reserved[11]; } __packed; struct kvaser_cmd_map_ch_res { u8 he_addr; u8 channel; u8 reserved[26]; } __packed; struct kvaser_cmd_card_info { __le32 serial_number; __le32 clock_res; __le32 mfg_date; __le32 ean[2]; u8 hw_version; u8 usb_mode; u8 hw_type; u8 reserved0; u8 nchannels; u8 reserved1[3]; } __packed; struct kvaser_cmd_sw_info { u8 reserved0[8]; __le16 max_outstanding_tx; u8 reserved1[18]; } __packed; struct kvaser_cmd_sw_detail_req { u8 use_ext_cmd; u8 reserved[27]; } __packed; /* Software detail flags */ #define KVASER_USB_HYDRA_SW_FLAG_FW_BETA BIT(2) #define KVASER_USB_HYDRA_SW_FLAG_FW_BAD BIT(4) #define KVASER_USB_HYDRA_SW_FLAG_FREQ_80M BIT(5) #define KVASER_USB_HYDRA_SW_FLAG_EXT_CMD BIT(9) #define KVASER_USB_HYDRA_SW_FLAG_CANFD BIT(10) #define KVASER_USB_HYDRA_SW_FLAG_NONISO BIT(11) #define KVASER_USB_HYDRA_SW_FLAG_EXT_CAP BIT(12) #define KVASER_USB_HYDRA_SW_FLAG_CAN_FREQ_80M BIT(13) struct kvaser_cmd_sw_detail_res { __le32 sw_flags; __le32 sw_version; __le32 sw_name; __le32 ean[2]; __le32 max_bitrate; u8 reserved[4]; } __packed; /* Sub commands for cap_req and cap_res */ #define KVASER_USB_HYDRA_CAP_CMD_LISTEN_MODE 0x02 #define KVASER_USB_HYDRA_CAP_CMD_ERR_REPORT 0x05 #define KVASER_USB_HYDRA_CAP_CMD_ONE_SHOT 0x06 struct kvaser_cmd_cap_req { __le16 cap_cmd; u8 reserved[26]; } __packed; /* Status codes for cap_res */ #define KVASER_USB_HYDRA_CAP_STAT_OK 0x00 #define KVASER_USB_HYDRA_CAP_STAT_NOT_IMPL 0x01 #define KVASER_USB_HYDRA_CAP_STAT_UNAVAIL 0x02 struct kvaser_cmd_cap_res { __le16 cap_cmd; __le16 status; __le32 mask; __le32 value; u8 reserved[16]; } __packed; /* CMD_ERROR_EVENT error codes */ #define KVASER_USB_HYDRA_ERROR_EVENT_CAN 0x01 #define KVASER_USB_HYDRA_ERROR_EVENT_PARAM 0x09 struct kvaser_cmd_error_event { __le16 timestamp[3]; u8 reserved; u8 error_code; __le16 info1; __le16 info2; } __packed; /* Chip state status flags. Used for chip_state_event and err_frame_data. */ #define KVASER_USB_HYDRA_BUS_ERR_ACT 0x00 #define KVASER_USB_HYDRA_BUS_ERR_PASS BIT(5) #define KVASER_USB_HYDRA_BUS_BUS_OFF BIT(6) struct kvaser_cmd_chip_state_event { __le16 timestamp[3]; u8 tx_err_counter; u8 rx_err_counter; u8 bus_status; u8 reserved[19]; } __packed; /* Busparam modes */ #define KVASER_USB_HYDRA_BUS_MODE_CAN 0x00 #define KVASER_USB_HYDRA_BUS_MODE_CANFD_ISO 0x01 #define KVASER_USB_HYDRA_BUS_MODE_NONISO 0x02 struct kvaser_cmd_set_busparams { struct kvaser_usb_busparams busparams_nominal; u8 reserved0[4]; struct kvaser_usb_busparams busparams_data; u8 canfd_mode; u8 reserved1[7]; } __packed; /* Busparam type */ #define KVASER_USB_HYDRA_BUSPARAM_TYPE_CAN 0x00 #define KVASER_USB_HYDRA_BUSPARAM_TYPE_CANFD 0x01 struct kvaser_cmd_get_busparams_req { u8 type; u8 reserved[27]; } __packed; struct kvaser_cmd_get_busparams_res { struct kvaser_usb_busparams busparams; u8 reserved[20]; } __packed; /* Ctrl modes */ #define KVASER_USB_HYDRA_CTRLMODE_NORMAL 0x01 #define KVASER_USB_HYDRA_CTRLMODE_LISTEN 0x02 struct kvaser_cmd_set_ctrlmode { u8 mode; u8 reserved[27]; } __packed; struct kvaser_err_frame_data { u8 bus_status; u8 reserved0; u8 tx_err_counter; u8 rx_err_counter; u8 reserved1[4]; } __packed; struct kvaser_cmd_rx_can { u8 cmd_len; u8 cmd_no; u8 channel; u8 flags; __le16 timestamp[3]; u8 dlc; u8 padding; __le32 id; union { u8 data[8]; struct kvaser_err_frame_data err_frame_data; }; } __packed; /* Extended CAN ID flag. Used in rx_can and tx_can */ #define KVASER_USB_HYDRA_EXTENDED_FRAME_ID BIT(31) struct kvaser_cmd_tx_can { __le32 id; u8 data[8]; u8 dlc; u8 flags; __le16 transid; u8 channel; u8 reserved[11]; } __packed; struct kvaser_cmd_header { u8 cmd_no; /* The destination HE address is stored in 0..5 of he_addr. * The upper part of source HE address is stored in 6..7 of he_addr, and * the lower part is stored in 12..15 of transid. */ u8 he_addr; __le16 transid; } __packed; struct kvaser_cmd { struct kvaser_cmd_header header; union { struct kvaser_cmd_map_ch_req map_ch_req; struct kvaser_cmd_map_ch_res map_ch_res; struct kvaser_cmd_card_info card_info; struct kvaser_cmd_sw_info sw_info; struct kvaser_cmd_sw_detail_req sw_detail_req; struct kvaser_cmd_sw_detail_res sw_detail_res; struct kvaser_cmd_cap_req cap_req; struct kvaser_cmd_cap_res cap_res; struct kvaser_cmd_error_event error_event; struct kvaser_cmd_set_busparams set_busparams_req; struct kvaser_cmd_get_busparams_req get_busparams_req; struct kvaser_cmd_get_busparams_res get_busparams_res; struct kvaser_cmd_chip_state_event chip_state_event; struct kvaser_cmd_set_ctrlmode set_ctrlmode; struct kvaser_cmd_rx_can rx_can; struct kvaser_cmd_tx_can tx_can; } __packed; } __packed; /* CAN frame flags. Used in rx_can, ext_rx_can, tx_can and ext_tx_can */ #define KVASER_USB_HYDRA_CF_FLAG_ERROR_FRAME BIT(0) #define KVASER_USB_HYDRA_CF_FLAG_OVERRUN BIT(1) #define KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME BIT(4) #define KVASER_USB_HYDRA_CF_FLAG_EXTENDED_ID BIT(5) #define KVASER_USB_HYDRA_CF_FLAG_TX_ACK BIT(6) /* CAN frame flags. Used in ext_rx_can and ext_tx_can */ #define KVASER_USB_HYDRA_CF_FLAG_OSM_NACK BIT(12) #define KVASER_USB_HYDRA_CF_FLAG_ABL BIT(13) #define KVASER_USB_HYDRA_CF_FLAG_FDF BIT(16) #define KVASER_USB_HYDRA_CF_FLAG_BRS BIT(17) #define KVASER_USB_HYDRA_CF_FLAG_ESI BIT(18) /* KCAN packet header macros. Used in ext_rx_can and ext_tx_can */ #define KVASER_USB_KCAN_DATA_DLC_BITS 4 #define KVASER_USB_KCAN_DATA_DLC_SHIFT 8 #define KVASER_USB_KCAN_DATA_DLC_MASK \ GENMASK(KVASER_USB_KCAN_DATA_DLC_BITS - 1 + \ KVASER_USB_KCAN_DATA_DLC_SHIFT, \ KVASER_USB_KCAN_DATA_DLC_SHIFT) #define KVASER_USB_KCAN_DATA_BRS BIT(14) #define KVASER_USB_KCAN_DATA_FDF BIT(15) #define KVASER_USB_KCAN_DATA_OSM BIT(16) #define KVASER_USB_KCAN_DATA_AREQ BIT(31) #define KVASER_USB_KCAN_DATA_SRR BIT(31) #define KVASER_USB_KCAN_DATA_RTR BIT(29) #define KVASER_USB_KCAN_DATA_IDE BIT(30) struct kvaser_cmd_ext_rx_can { __le32 flags; __le32 id; __le32 kcan_id; __le32 kcan_header; __le64 timestamp; union { u8 kcan_payload[64]; struct kvaser_err_frame_data err_frame_data; }; } __packed; struct kvaser_cmd_ext_tx_can { __le32 flags; __le32 id; __le32 kcan_id; __le32 kcan_header; u8 databytes; u8 dlc; u8 reserved[6]; u8 kcan_payload[64]; } __packed; struct kvaser_cmd_ext_tx_ack { __le32 flags; u8 reserved0[4]; __le64 timestamp; u8 reserved1[8]; } __packed; /* struct for extended commands (CMD_EXTENDED) */ struct kvaser_cmd_ext { struct kvaser_cmd_header header; __le16 len; u8 cmd_no_ext; u8 reserved; union { struct kvaser_cmd_ext_rx_can rx_can; struct kvaser_cmd_ext_tx_can tx_can; struct kvaser_cmd_ext_tx_ack tx_ack; } __packed; } __packed; struct kvaser_usb_net_hydra_priv { int pending_get_busparams_type; }; static const struct can_bittiming_const kvaser_usb_hydra_kcan_bittiming_c = { .name = "kvaser_usb_kcan", .tseg1_min = 1, .tseg1_max = 255, .tseg2_min = 1, .tseg2_max = 32, .sjw_max = 16, .brp_min = 1, .brp_max = 8192, .brp_inc = 1, }; const struct can_bittiming_const kvaser_usb_flexc_bittiming_const = { .name = "kvaser_usb_flex", .tseg1_min = 4, .tseg1_max = 16, .tseg2_min = 2, .tseg2_max = 8, .sjw_max = 4, .brp_min = 1, .brp_max = 256, .brp_inc = 1, }; static const struct can_bittiming_const kvaser_usb_hydra_rt_bittiming_c = { .name = "kvaser_usb_rt", .tseg1_min = 2, .tseg1_max = 96, .tseg2_min = 2, .tseg2_max = 32, .sjw_max = 32, .brp_min = 1, .brp_max = 1024, .brp_inc = 1, }; static const struct can_bittiming_const kvaser_usb_hydra_rtd_bittiming_c = { .name = "kvaser_usb_rt", .tseg1_min = 2, .tseg1_max = 39, .tseg2_min = 2, .tseg2_max = 8, .sjw_max = 8, .brp_min = 1, .brp_max = 1024, .brp_inc = 1, }; #define KVASER_USB_HYDRA_TRANSID_BITS 12 #define KVASER_USB_HYDRA_TRANSID_MASK \ GENMASK(KVASER_USB_HYDRA_TRANSID_BITS - 1, 0) #define KVASER_USB_HYDRA_HE_ADDR_SRC_MASK GENMASK(7, 6) #define KVASER_USB_HYDRA_HE_ADDR_DEST_MASK GENMASK(5, 0) #define KVASER_USB_HYDRA_HE_ADDR_SRC_BITS 2 static inline u16 kvaser_usb_hydra_get_cmd_transid(const struct kvaser_cmd *cmd) { return le16_to_cpu(cmd->header.transid) & KVASER_USB_HYDRA_TRANSID_MASK; } static inline void kvaser_usb_hydra_set_cmd_transid(struct kvaser_cmd *cmd, u16 transid) { cmd->header.transid = cpu_to_le16(transid & KVASER_USB_HYDRA_TRANSID_MASK); } static inline u8 kvaser_usb_hydra_get_cmd_src_he(const struct kvaser_cmd *cmd) { return (cmd->header.he_addr & KVASER_USB_HYDRA_HE_ADDR_SRC_MASK) >> KVASER_USB_HYDRA_HE_ADDR_SRC_BITS | le16_to_cpu(cmd->header.transid) >> KVASER_USB_HYDRA_TRANSID_BITS; } static inline void kvaser_usb_hydra_set_cmd_dest_he(struct kvaser_cmd *cmd, u8 dest_he) { cmd->header.he_addr = (cmd->header.he_addr & KVASER_USB_HYDRA_HE_ADDR_SRC_MASK) | (dest_he & KVASER_USB_HYDRA_HE_ADDR_DEST_MASK); } static u8 kvaser_usb_hydra_channel_from_cmd(const struct kvaser_usb *dev, const struct kvaser_cmd *cmd) { int i; u8 channel = 0xff; u8 src_he = kvaser_usb_hydra_get_cmd_src_he(cmd); for (i = 0; i < KVASER_USB_MAX_NET_DEVICES; i++) { if (dev->card_data.hydra.channel_to_he[i] == src_he) { channel = i; break; } } return channel; } static u16 kvaser_usb_hydra_get_next_transid(struct kvaser_usb *dev) { unsigned long flags; u16 transid; struct kvaser_usb_dev_card_data_hydra *card_data = &dev->card_data.hydra; spin_lock_irqsave(&card_data->transid_lock, flags); transid = card_data->transid; if (transid >= KVASER_USB_HYDRA_MAX_TRANSID) transid = KVASER_USB_HYDRA_MIN_TRANSID; else transid++; card_data->transid = transid; spin_unlock_irqrestore(&card_data->transid_lock, flags); return transid; } static size_t kvaser_usb_hydra_cmd_size(struct kvaser_cmd *cmd) { size_t ret; if (cmd->header.cmd_no == CMD_EXTENDED) ret = le16_to_cpu(((struct kvaser_cmd_ext *)cmd)->len); else ret = sizeof(struct kvaser_cmd); return ret; } static struct kvaser_usb_net_priv * kvaser_usb_hydra_net_priv_from_cmd(const struct kvaser_usb *dev, const struct kvaser_cmd *cmd) { struct kvaser_usb_net_priv *priv = NULL; u8 channel = kvaser_usb_hydra_channel_from_cmd(dev, cmd); if (channel >= dev->nchannels) dev_err(&dev->intf->dev, "Invalid channel number (%d)\n", channel); else priv = dev->nets[channel]; return priv; } static ktime_t kvaser_usb_hydra_ktime_from_rx_cmd(const struct kvaser_usb_dev_cfg *cfg, const struct kvaser_cmd *cmd) { u64 ticks; if (cmd->header.cmd_no == CMD_EXTENDED) { struct kvaser_cmd_ext *cmd_ext = (struct kvaser_cmd_ext *)cmd; ticks = le64_to_cpu(cmd_ext->rx_can.timestamp); } else { ticks = le16_to_cpu(cmd->rx_can.timestamp[0]); ticks += (u64)(le16_to_cpu(cmd->rx_can.timestamp[1])) << 16; ticks += (u64)(le16_to_cpu(cmd->rx_can.timestamp[2])) << 32; } return ns_to_ktime(div_u64(ticks * 1000, cfg->timestamp_freq)); } static int kvaser_usb_hydra_send_simple_cmd(struct kvaser_usb *dev, u8 cmd_no, int channel) { struct kvaser_cmd *cmd; size_t cmd_len; int err; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (!cmd) return -ENOMEM; cmd->header.cmd_no = cmd_no; cmd_len = kvaser_usb_hydra_cmd_size(cmd); if (channel < 0) { kvaser_usb_hydra_set_cmd_dest_he (cmd, KVASER_USB_HYDRA_HE_ADDRESS_ILLEGAL); } else { if (channel >= KVASER_USB_MAX_NET_DEVICES) { dev_err(&dev->intf->dev, "channel (%d) out of range.\n", channel); err = -EINVAL; goto end; } kvaser_usb_hydra_set_cmd_dest_he (cmd, dev->card_data.hydra.channel_to_he[channel]); } kvaser_usb_hydra_set_cmd_transid (cmd, kvaser_usb_hydra_get_next_transid(dev)); err = kvaser_usb_send_cmd(dev, cmd, cmd_len); if (err) goto end; end: kfree(cmd); return err; } static int kvaser_usb_hydra_send_simple_cmd_async(struct kvaser_usb_net_priv *priv, u8 cmd_no) { struct kvaser_cmd *cmd; struct kvaser_usb *dev = priv->dev; size_t cmd_len; int err; cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC); if (!cmd) return -ENOMEM; cmd->header.cmd_no = cmd_no; cmd_len = kvaser_usb_hydra_cmd_size(cmd); kvaser_usb_hydra_set_cmd_dest_he (cmd, dev->card_data.hydra.channel_to_he[priv->channel]); kvaser_usb_hydra_set_cmd_transid (cmd, kvaser_usb_hydra_get_next_transid(dev)); err = kvaser_usb_send_cmd_async(priv, cmd, cmd_len); if (err) kfree(cmd); return err; } /* This function is used for synchronously waiting on hydra control commands. * Note: Compared to kvaser_usb_hydra_read_bulk_callback(), we never need to * handle partial hydra commands. Since hydra control commands are always * non-extended commands. */ static int kvaser_usb_hydra_wait_cmd(const struct kvaser_usb *dev, u8 cmd_no, struct kvaser_cmd *cmd) { void *buf; int err; unsigned long timeout = jiffies + msecs_to_jiffies(KVASER_USB_TIMEOUT); if (cmd->header.cmd_no == CMD_EXTENDED) { dev_err(&dev->intf->dev, "Wait for CMD_EXTENDED not allowed\n"); return -EINVAL; } buf = kzalloc(KVASER_USB_RX_BUFFER_SIZE, GFP_KERNEL); if (!buf) return -ENOMEM; do { int actual_len = 0; int pos = 0; err = kvaser_usb_recv_cmd(dev, buf, KVASER_USB_RX_BUFFER_SIZE, &actual_len); if (err < 0) goto end; while (pos < actual_len) { struct kvaser_cmd *tmp_cmd; size_t cmd_len; tmp_cmd = buf + pos; cmd_len = kvaser_usb_hydra_cmd_size(tmp_cmd); if (pos + cmd_len > actual_len) { dev_err_ratelimited(&dev->intf->dev, "Format error\n"); break; } if (tmp_cmd->header.cmd_no == cmd_no) { memcpy(cmd, tmp_cmd, cmd_len); goto end; } pos += cmd_len; } } while (time_before(jiffies, timeout)); err = -EINVAL; end: kfree(buf); return err; } static int kvaser_usb_hydra_map_channel_resp(struct kvaser_usb *dev, const struct kvaser_cmd *cmd) { u8 he, channel; u16 transid = kvaser_usb_hydra_get_cmd_transid(cmd); struct kvaser_usb_dev_card_data_hydra *card_data = &dev->card_data.hydra; if (transid > 0x007f || transid < 0x0040) { dev_err(&dev->intf->dev, "CMD_MAP_CHANNEL_RESP, invalid transid: 0x%x\n", transid); return -EINVAL; } switch (transid) { case KVASER_USB_HYDRA_TRANSID_CANHE: case KVASER_USB_HYDRA_TRANSID_CANHE + 1: case KVASER_USB_HYDRA_TRANSID_CANHE + 2: case KVASER_USB_HYDRA_TRANSID_CANHE + 3: case KVASER_USB_HYDRA_TRANSID_CANHE + 4: channel = transid & 0x000f; he = cmd->map_ch_res.he_addr; card_data->channel_to_he[channel] = he; break; case KVASER_USB_HYDRA_TRANSID_SYSDBG: card_data->sysdbg_he = cmd->map_ch_res.he_addr; break; default: dev_warn(&dev->intf->dev, "Unknown CMD_MAP_CHANNEL_RESP transid=0x%x\n", transid); break; } return 0; } static int kvaser_usb_hydra_map_channel(struct kvaser_usb *dev, u16 transid, u8 channel, const char *name) { struct kvaser_cmd *cmd; int err; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (!cmd) return -ENOMEM; strcpy(cmd->map_ch_req.name, name); cmd->header.cmd_no = CMD_MAP_CHANNEL_REQ; kvaser_usb_hydra_set_cmd_dest_he (cmd, KVASER_USB_HYDRA_HE_ADDRESS_ROUTER); cmd->map_ch_req.channel = channel; kvaser_usb_hydra_set_cmd_transid(cmd, transid); err = kvaser_usb_send_cmd(dev, cmd, kvaser_usb_hydra_cmd_size(cmd)); if (err) goto end; err = kvaser_usb_hydra_wait_cmd(dev, CMD_MAP_CHANNEL_RESP, cmd); if (err) goto end; err = kvaser_usb_hydra_map_channel_resp(dev, cmd); if (err) goto end; end: kfree(cmd); return err; } static int kvaser_usb_hydra_get_single_capability(struct kvaser_usb *dev, u16 cap_cmd_req, u16 *status) { struct kvaser_usb_dev_card_data *card_data = &dev->card_data; struct kvaser_cmd *cmd; size_t cmd_len; u32 value = 0; u32 mask = 0; u16 cap_cmd_res; int err; int i; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (!cmd) return -ENOMEM; cmd->header.cmd_no = CMD_GET_CAPABILITIES_REQ; cmd_len = kvaser_usb_hydra_cmd_size(cmd); cmd->cap_req.cap_cmd = cpu_to_le16(cap_cmd_req); kvaser_usb_hydra_set_cmd_dest_he(cmd, card_data->hydra.sysdbg_he); kvaser_usb_hydra_set_cmd_transid (cmd, kvaser_usb_hydra_get_next_transid(dev)); err = kvaser_usb_send_cmd(dev, cmd, cmd_len); if (err) goto end; err = kvaser_usb_hydra_wait_cmd(dev, CMD_GET_CAPABILITIES_RESP, cmd); if (err) goto end; *status = le16_to_cpu(cmd->cap_res.status); if (*status != KVASER_USB_HYDRA_CAP_STAT_OK) goto end; cap_cmd_res = le16_to_cpu(cmd->cap_res.cap_cmd); switch (cap_cmd_res) { case KVASER_USB_HYDRA_CAP_CMD_LISTEN_MODE: case KVASER_USB_HYDRA_CAP_CMD_ERR_REPORT: case KVASER_USB_HYDRA_CAP_CMD_ONE_SHOT: value = le32_to_cpu(cmd->cap_res.value); mask = le32_to_cpu(cmd->cap_res.mask); break; default: dev_warn(&dev->intf->dev, "Unknown capability command %u\n", cap_cmd_res); break; } for (i = 0; i < dev->nchannels; i++) { if (BIT(i) & (value & mask)) { switch (cap_cmd_res) { case KVASER_USB_HYDRA_CAP_CMD_LISTEN_MODE: card_data->ctrlmode_supported |= CAN_CTRLMODE_LISTENONLY; break; case KVASER_USB_HYDRA_CAP_CMD_ERR_REPORT: card_data->capabilities |= KVASER_USB_CAP_BERR_CAP; break; case KVASER_USB_HYDRA_CAP_CMD_ONE_SHOT: card_data->ctrlmode_supported |= CAN_CTRLMODE_ONE_SHOT; break; } } } end: kfree(cmd); return err; } static void kvaser_usb_hydra_start_chip_reply(const struct kvaser_usb *dev, const struct kvaser_cmd *cmd) { struct kvaser_usb_net_priv *priv; priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd); if (!priv) return; if (completion_done(&priv->start_comp) && netif_queue_stopped(priv->netdev)) { netif_wake_queue(priv->netdev); } else { netif_start_queue(priv->netdev); complete(&priv->start_comp); } } static void kvaser_usb_hydra_stop_chip_reply(const struct kvaser_usb *dev, const struct kvaser_cmd *cmd) { struct kvaser_usb_net_priv *priv; priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd); if (!priv) return; complete(&priv->stop_comp); } static void kvaser_usb_hydra_flush_queue_reply(const struct kvaser_usb *dev, const struct kvaser_cmd *cmd) { struct kvaser_usb_net_priv *priv; priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd); if (!priv) return; complete(&priv->flush_comp); } static void kvaser_usb_hydra_get_busparams_reply(const struct kvaser_usb *dev, const struct kvaser_cmd *cmd) { struct kvaser_usb_net_priv *priv; struct kvaser_usb_net_hydra_priv *hydra; priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd); if (!priv) return; hydra = priv->sub_priv; if (!hydra) return; switch (hydra->pending_get_busparams_type) { case KVASER_USB_HYDRA_BUSPARAM_TYPE_CAN: memcpy(&priv->busparams_nominal, &cmd->get_busparams_res.busparams, sizeof(priv->busparams_nominal)); break; case KVASER_USB_HYDRA_BUSPARAM_TYPE_CANFD: memcpy(&priv->busparams_data, &cmd->get_busparams_res.busparams, sizeof(priv->busparams_nominal)); break; default: dev_warn(&dev->intf->dev, "Unknown get_busparams_type %d\n", hydra->pending_get_busparams_type); break; } hydra->pending_get_busparams_type = -1; complete(&priv->get_busparams_comp); } static void kvaser_usb_hydra_bus_status_to_can_state(const struct kvaser_usb_net_priv *priv, u8 bus_status, const struct can_berr_counter *bec, enum can_state *new_state) { if (bus_status & KVASER_USB_HYDRA_BUS_BUS_OFF) { *new_state = CAN_STATE_BUS_OFF; } else if (bus_status & KVASER_USB_HYDRA_BUS_ERR_PASS) { *new_state = CAN_STATE_ERROR_PASSIVE; } else if (bus_status == KVASER_USB_HYDRA_BUS_ERR_ACT) { if (bec->txerr >= 128 || bec->rxerr >= 128) { netdev_warn(priv->netdev, "ERR_ACTIVE but err tx=%u or rx=%u >=128\n", bec->txerr, bec->rxerr); *new_state = CAN_STATE_ERROR_PASSIVE; } else if (bec->txerr >= 96 || bec->rxerr >= 96) { *new_state = CAN_STATE_ERROR_WARNING; } else { *new_state = CAN_STATE_ERROR_ACTIVE; } } } static void kvaser_usb_hydra_update_state(struct kvaser_usb_net_priv *priv, u8 bus_status, const struct can_berr_counter *bec) { struct net_device *netdev = priv->netdev; struct can_frame *cf; struct sk_buff *skb; enum can_state new_state, old_state; old_state = priv->can.state; kvaser_usb_hydra_bus_status_to_can_state(priv, bus_status, bec, &new_state); if (new_state == old_state) return; /* Ignore state change if previous state was STOPPED and the new state * is BUS_OFF. Firmware always report this as BUS_OFF, since firmware * does not distinguish between BUS_OFF and STOPPED. */ if (old_state == CAN_STATE_STOPPED && new_state == CAN_STATE_BUS_OFF) return; skb = alloc_can_err_skb(netdev, &cf); if (skb) { enum can_state tx_state, rx_state; tx_state = (bec->txerr >= bec->rxerr) ? new_state : CAN_STATE_ERROR_ACTIVE; rx_state = (bec->txerr <= bec->rxerr) ? new_state : CAN_STATE_ERROR_ACTIVE; can_change_state(netdev, cf, tx_state, rx_state); } if (new_state == CAN_STATE_BUS_OFF && old_state < CAN_STATE_BUS_OFF) { if (!priv->can.restart_ms) kvaser_usb_hydra_send_simple_cmd_async (priv, CMD_STOP_CHIP_REQ); can_bus_off(netdev); } if (!skb) { netdev_warn(netdev, "No memory left for err_skb\n"); return; } if (priv->can.restart_ms && old_state >= CAN_STATE_BUS_OFF && new_state < CAN_STATE_BUS_OFF) priv->can.can_stats.restarts++; if (new_state != CAN_STATE_BUS_OFF) { cf->can_id |= CAN_ERR_CNT; cf->data[6] = bec->txerr; cf->data[7] = bec->rxerr; } netif_rx(skb); } static void kvaser_usb_hydra_state_event(const struct kvaser_usb *dev, const struct kvaser_cmd *cmd) { struct kvaser_usb_net_priv *priv; struct can_berr_counter bec; u8 bus_status; priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd); if (!priv) return; bus_status = cmd->chip_state_event.bus_status; bec.txerr = cmd->chip_state_event.tx_err_counter; bec.rxerr = cmd->chip_state_event.rx_err_counter; kvaser_usb_hydra_update_state(priv, bus_status, &bec); priv->bec.txerr = bec.txerr; priv->bec.rxerr = bec.rxerr; } static void kvaser_usb_hydra_error_event_parameter(const struct kvaser_usb *dev, const struct kvaser_cmd *cmd) { /* info1 will contain the offending cmd_no */ switch (le16_to_cpu(cmd->error_event.info1)) { case CMD_START_CHIP_REQ: dev_warn(&dev->intf->dev, "CMD_START_CHIP_REQ error in parameter\n"); break; case CMD_STOP_CHIP_REQ: dev_warn(&dev->intf->dev, "CMD_STOP_CHIP_REQ error in parameter\n"); break; case CMD_FLUSH_QUEUE: dev_warn(&dev->intf->dev, "CMD_FLUSH_QUEUE error in parameter\n"); break; case CMD_SET_BUSPARAMS_REQ: dev_warn(&dev->intf->dev, "Set bittiming failed. Error in parameter\n"); break; case CMD_SET_BUSPARAMS_FD_REQ: dev_warn(&dev->intf->dev, "Set data bittiming failed. Error in parameter\n"); break; default: dev_warn(&dev->intf->dev, "Unhandled parameter error event cmd_no (%u)\n", le16_to_cpu(cmd->error_event.info1)); break; } } static void kvaser_usb_hydra_error_event(const struct kvaser_usb *dev, const struct kvaser_cmd *cmd) { switch (cmd->error_event.error_code) { case KVASER_USB_HYDRA_ERROR_EVENT_PARAM: kvaser_usb_hydra_error_event_parameter(dev, cmd); break; case KVASER_USB_HYDRA_ERROR_EVENT_CAN: /* Wrong channel mapping?! This should never happen! * info1 will contain the offending cmd_no */ dev_err(&dev->intf->dev, "Received CAN error event for cmd_no (%u)\n", le16_to_cpu(cmd->error_event.info1)); break; default: dev_warn(&dev->intf->dev, "Unhandled error event (%d)\n", cmd->error_event.error_code); break; } } static void kvaser_usb_hydra_error_frame(struct kvaser_usb_net_priv *priv, const struct kvaser_err_frame_data *err_frame_data, ktime_t hwtstamp) { struct net_device *netdev = priv->netdev; struct net_device_stats *stats = &netdev->stats; struct can_frame *cf; struct sk_buff *skb; struct skb_shared_hwtstamps *shhwtstamps; struct can_berr_counter bec; enum can_state new_state, old_state; u8 bus_status; priv->can.can_stats.bus_error++; stats->rx_errors++; bus_status = err_frame_data->bus_status; bec.txerr = err_frame_data->tx_err_counter; bec.rxerr = err_frame_data->rx_err_counter; old_state = priv->can.state; kvaser_usb_hydra_bus_status_to_can_state(priv, bus_status, &bec, &new_state); skb = alloc_can_err_skb(netdev, &cf); if (new_state != old_state) { if (skb) { enum can_state tx_state, rx_state; tx_state = (bec.txerr >= bec.rxerr) ? new_state : CAN_STATE_ERROR_ACTIVE; rx_state = (bec.txerr <= bec.rxerr) ? new_state : CAN_STATE_ERROR_ACTIVE; can_change_state(netdev, cf, tx_state, rx_state); if (priv->can.restart_ms && old_state >= CAN_STATE_BUS_OFF && new_state < CAN_STATE_BUS_OFF) cf->can_id |= CAN_ERR_RESTARTED; } if (new_state == CAN_STATE_BUS_OFF) { if (!priv->can.restart_ms) kvaser_usb_hydra_send_simple_cmd_async (priv, CMD_STOP_CHIP_REQ); can_bus_off(netdev); } } if (!skb) { stats->rx_dropped++; netdev_warn(netdev, "No memory left for err_skb\n"); return; } shhwtstamps = skb_hwtstamps(skb); shhwtstamps->hwtstamp = hwtstamp; cf->can_id |= CAN_ERR_BUSERROR; if (new_state != CAN_STATE_BUS_OFF) { cf->can_id |= CAN_ERR_CNT; cf->data[6] = bec.txerr; cf->data[7] = bec.rxerr; } netif_rx(skb); priv->bec.txerr = bec.txerr; priv->bec.rxerr = bec.rxerr; } static void kvaser_usb_hydra_one_shot_fail(struct kvaser_usb_net_priv *priv, const struct kvaser_cmd_ext *cmd) { struct net_device *netdev = priv->netdev; struct net_device_stats *stats = &netdev->stats; struct can_frame *cf; struct sk_buff *skb; u32 flags; skb = alloc_can_err_skb(netdev, &cf); if (!skb) { stats->rx_dropped++; netdev_warn(netdev, "No memory left for err_skb\n"); return; } cf->can_id |= CAN_ERR_BUSERROR; flags = le32_to_cpu(cmd->tx_ack.flags); if (flags & KVASER_USB_HYDRA_CF_FLAG_OSM_NACK) cf->can_id |= CAN_ERR_ACK; if (flags & KVASER_USB_HYDRA_CF_FLAG_ABL) { cf->can_id |= CAN_ERR_LOSTARB; priv->can.can_stats.arbitration_lost++; } stats->tx_errors++; netif_rx(skb); } static void kvaser_usb_hydra_tx_acknowledge(const struct kvaser_usb *dev, const struct kvaser_cmd *cmd) { struct kvaser_usb_tx_urb_context *context; struct kvaser_usb_net_priv *priv; unsigned long irq_flags; unsigned int len; bool one_shot_fail = false; bool is_err_frame = false; u16 transid = kvaser_usb_hydra_get_cmd_transid(cmd); priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd); if (!priv) return; if (!netif_device_present(priv->netdev)) return; if (cmd->header.cmd_no == CMD_EXTENDED) { struct kvaser_cmd_ext *cmd_ext = (struct kvaser_cmd_ext *)cmd; u32 flags = le32_to_cpu(cmd_ext->tx_ack.flags); if (flags & (KVASER_USB_HYDRA_CF_FLAG_OSM_NACK | KVASER_USB_HYDRA_CF_FLAG_ABL)) { kvaser_usb_hydra_one_shot_fail(priv, cmd_ext); one_shot_fail = true; } is_err_frame = flags & KVASER_USB_HYDRA_CF_FLAG_TX_ACK && flags & KVASER_USB_HYDRA_CF_FLAG_ERROR_FRAME; } context = &priv->tx_contexts[transid % dev->max_tx_urbs]; spin_lock_irqsave(&priv->tx_contexts_lock, irq_flags); len = can_get_echo_skb(priv->netdev, context->echo_index, NULL); context->echo_index = dev->max_tx_urbs; --priv->active_tx_contexts; netif_wake_queue(priv->netdev); spin_unlock_irqrestore(&priv->tx_contexts_lock, irq_flags); if (!one_shot_fail && !is_err_frame) { struct net_device_stats *stats = &priv->netdev->stats; stats->tx_packets++; stats->tx_bytes += len; } } static void kvaser_usb_hydra_rx_msg_std(const struct kvaser_usb *dev, const struct kvaser_cmd *cmd) { struct kvaser_usb_net_priv *priv = NULL; struct can_frame *cf; struct sk_buff *skb; struct skb_shared_hwtstamps *shhwtstamps; struct net_device_stats *stats; u8 flags; ktime_t hwtstamp; priv = kvaser_usb_hydra_net_priv_from_cmd(dev, cmd); if (!priv) return; stats = &priv->netdev->stats; flags = cmd->rx_can.flags; hwtstamp = kvaser_usb_hydra_ktime_from_rx_cmd(dev->cfg, cmd); if (flags & KVASER_USB_HYDRA_CF_FLAG_ERROR_FRAME) { kvaser_usb_hydra_error_frame(priv, &cmd->rx_can.err_frame_data, hwtstamp); return; } skb = alloc_can_skb(priv->netdev, &cf); if (!skb) { stats->rx_dropped++; return; } shhwtstamps = skb_hwtstamps(skb); shhwtstamps->hwtstamp = hwtstamp; cf->can_id = le32_to_cpu(cmd->rx_can.id); if (cf->can_id & KVASER_USB_HYDRA_EXTENDED_FRAME_ID) { cf->can_id &= CAN_EFF_MASK; cf->can_id |= CAN_EFF_FLAG; } else { cf->can_id &= CAN_SFF_MASK; } if (flags & KVASER_USB_HYDRA_CF_FLAG_OVERRUN) kvaser_usb_can_rx_over_error(priv->netdev); can_frame_set_cc_len((struct can_frame *)cf, cmd->rx_can.dlc, priv->can.ctrlmode); if (flags & KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME) { cf->can_id |= CAN_RTR_FLAG; } else { memcpy(cf->data, cmd->rx_can.data, cf->len); stats->rx_bytes += cf->len; } stats->rx_packets++; netif_rx(skb); } static void kvaser_usb_hydra_rx_msg_ext(const struct kvaser_usb *dev, const struct kvaser_cmd_ext *cmd) { struct kvaser_cmd *std_cmd = (struct kvaser_cmd *)cmd; struct kvaser_usb_net_priv *priv; struct canfd_frame *cf; struct sk_buff *skb; struct skb_shared_hwtstamps *shhwtstamps; struct net_device_stats *stats; u32 flags; u8 dlc; u32 kcan_header; ktime_t hwtstamp; priv = kvaser_usb_hydra_net_priv_from_cmd(dev, std_cmd); if (!priv) return; stats = &priv->netdev->stats; kcan_header = le32_to_cpu(cmd->rx_can.kcan_header); dlc = (kcan_header & KVASER_USB_KCAN_DATA_DLC_MASK) >> KVASER_USB_KCAN_DATA_DLC_SHIFT; flags = le32_to_cpu(cmd->rx_can.flags); hwtstamp = kvaser_usb_hydra_ktime_from_rx_cmd(dev->cfg, std_cmd); if (flags & KVASER_USB_HYDRA_CF_FLAG_ERROR_FRAME) { kvaser_usb_hydra_error_frame(priv, &cmd->rx_can.err_frame_data, hwtstamp); return; } if (flags & KVASER_USB_HYDRA_CF_FLAG_FDF) skb = alloc_canfd_skb(priv->netdev, &cf); else skb = alloc_can_skb(priv->netdev, (struct can_frame **)&cf); if (!skb) { stats->rx_dropped++; return; } shhwtstamps = skb_hwtstamps(skb); shhwtstamps->hwtstamp = hwtstamp; cf->can_id = le32_to_cpu(cmd->rx_can.id); if (flags & KVASER_USB_HYDRA_CF_FLAG_EXTENDED_ID) { cf->can_id &= CAN_EFF_MASK; cf->can_id |= CAN_EFF_FLAG; } else { cf->can_id &= CAN_SFF_MASK; } if (flags & KVASER_USB_HYDRA_CF_FLAG_OVERRUN) kvaser_usb_can_rx_over_error(priv->netdev); if (flags & KVASER_USB_HYDRA_CF_FLAG_FDF) { cf->len = can_fd_dlc2len(dlc); if (flags & KVASER_USB_HYDRA_CF_FLAG_BRS) cf->flags |= CANFD_BRS; if (flags & KVASER_USB_HYDRA_CF_FLAG_ESI) cf->flags |= CANFD_ESI; } else { can_frame_set_cc_len((struct can_frame *)cf, dlc, priv->can.ctrlmode); } if (flags & KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME) { cf->can_id |= CAN_RTR_FLAG; } else { memcpy(cf->data, cmd->rx_can.kcan_payload, cf->len); stats->rx_bytes += cf->len; } stats->rx_packets++; netif_rx(skb); } static void kvaser_usb_hydra_handle_cmd_std(const struct kvaser_usb *dev, const struct kvaser_cmd *cmd) { switch (cmd->header.cmd_no) { case CMD_START_CHIP_RESP: kvaser_usb_hydra_start_chip_reply(dev, cmd); break; case CMD_STOP_CHIP_RESP: kvaser_usb_hydra_stop_chip_reply(dev, cmd); break; case CMD_FLUSH_QUEUE_RESP: kvaser_usb_hydra_flush_queue_reply(dev, cmd); break; case CMD_CHIP_STATE_EVENT: kvaser_usb_hydra_state_event(dev, cmd); break; case CMD_GET_BUSPARAMS_RESP: kvaser_usb_hydra_get_busparams_reply(dev, cmd); break; case CMD_ERROR_EVENT: kvaser_usb_hydra_error_event(dev, cmd); break; case CMD_TX_ACKNOWLEDGE: kvaser_usb_hydra_tx_acknowledge(dev, cmd); break; case CMD_RX_MESSAGE: kvaser_usb_hydra_rx_msg_std(dev, cmd); break; /* Ignored commands */ case CMD_SET_BUSPARAMS_RESP: case CMD_SET_BUSPARAMS_FD_RESP: break; default: dev_warn(&dev->intf->dev, "Unhandled command (%d)\n", cmd->header.cmd_no); break; } } static void kvaser_usb_hydra_handle_cmd_ext(const struct kvaser_usb *dev, const struct kvaser_cmd_ext *cmd) { switch (cmd->cmd_no_ext) { case CMD_TX_ACKNOWLEDGE_FD: kvaser_usb_hydra_tx_acknowledge(dev, (struct kvaser_cmd *)cmd); break; case CMD_RX_MESSAGE_FD: kvaser_usb_hydra_rx_msg_ext(dev, cmd); break; default: dev_warn(&dev->intf->dev, "Unhandled extended command (%d)\n", cmd->header.cmd_no); break; } } static void kvaser_usb_hydra_handle_cmd(const struct kvaser_usb *dev, const struct kvaser_cmd *cmd) { if (cmd->header.cmd_no == CMD_EXTENDED) kvaser_usb_hydra_handle_cmd_ext (dev, (struct kvaser_cmd_ext *)cmd); else kvaser_usb_hydra_handle_cmd_std(dev, cmd); } static void * kvaser_usb_hydra_frame_to_cmd_ext(const struct kvaser_usb_net_priv *priv, const struct sk_buff *skb, int *cmd_len, u16 transid) { struct kvaser_usb *dev = priv->dev; struct kvaser_cmd_ext *cmd; struct canfd_frame *cf = (struct canfd_frame *)skb->data; u8 dlc; u8 nbr_of_bytes = cf->len; u32 flags; u32 id; u32 kcan_id; u32 kcan_header; cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC); if (!cmd) return NULL; kvaser_usb_hydra_set_cmd_dest_he ((struct kvaser_cmd *)cmd, dev->card_data.hydra.channel_to_he[priv->channel]); kvaser_usb_hydra_set_cmd_transid((struct kvaser_cmd *)cmd, transid); cmd->header.cmd_no = CMD_EXTENDED; cmd->cmd_no_ext = CMD_TX_CAN_MESSAGE_FD; *cmd_len = ALIGN(sizeof(struct kvaser_cmd_ext) - sizeof(cmd->tx_can.kcan_payload) + nbr_of_bytes, 8); cmd->len = cpu_to_le16(*cmd_len); if (can_is_canfd_skb(skb)) dlc = can_fd_len2dlc(cf->len); else dlc = can_get_cc_dlc((struct can_frame *)cf, priv->can.ctrlmode); cmd->tx_can.databytes = nbr_of_bytes; cmd->tx_can.dlc = dlc; if (cf->can_id & CAN_EFF_FLAG) { id = cf->can_id & CAN_EFF_MASK; flags = KVASER_USB_HYDRA_CF_FLAG_EXTENDED_ID; kcan_id = (cf->can_id & CAN_EFF_MASK) | KVASER_USB_KCAN_DATA_IDE | KVASER_USB_KCAN_DATA_SRR; } else { id = cf->can_id & CAN_SFF_MASK; flags = 0; kcan_id = cf->can_id & CAN_SFF_MASK; } if (cf->can_id & CAN_ERR_FLAG) flags |= KVASER_USB_HYDRA_CF_FLAG_ERROR_FRAME; kcan_header = ((dlc << KVASER_USB_KCAN_DATA_DLC_SHIFT) & KVASER_USB_KCAN_DATA_DLC_MASK) | KVASER_USB_KCAN_DATA_AREQ | (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT ? KVASER_USB_KCAN_DATA_OSM : 0); if (can_is_canfd_skb(skb)) { kcan_header |= KVASER_USB_KCAN_DATA_FDF | (cf->flags & CANFD_BRS ? KVASER_USB_KCAN_DATA_BRS : 0); } else { if (cf->can_id & CAN_RTR_FLAG) { kcan_id |= KVASER_USB_KCAN_DATA_RTR; cmd->tx_can.databytes = 0; flags |= KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME; } } cmd->tx_can.kcan_id = cpu_to_le32(kcan_id); cmd->tx_can.id = cpu_to_le32(id); cmd->tx_can.flags = cpu_to_le32(flags); cmd->tx_can.kcan_header = cpu_to_le32(kcan_header); memcpy(cmd->tx_can.kcan_payload, cf->data, nbr_of_bytes); return cmd; } static void * kvaser_usb_hydra_frame_to_cmd_std(const struct kvaser_usb_net_priv *priv, const struct sk_buff *skb, int *cmd_len, u16 transid) { struct kvaser_usb *dev = priv->dev; struct kvaser_cmd *cmd; struct can_frame *cf = (struct can_frame *)skb->data; u32 flags; u32 id; cmd = kzalloc(sizeof(*cmd), GFP_ATOMIC); if (!cmd) return NULL; kvaser_usb_hydra_set_cmd_dest_he (cmd, dev->card_data.hydra.channel_to_he[priv->channel]); kvaser_usb_hydra_set_cmd_transid(cmd, transid); cmd->header.cmd_no = CMD_TX_CAN_MESSAGE; *cmd_len = ALIGN(sizeof(struct kvaser_cmd), 8); if (cf->can_id & CAN_EFF_FLAG) { id = (cf->can_id & CAN_EFF_MASK); id |= KVASER_USB_HYDRA_EXTENDED_FRAME_ID; } else { id = cf->can_id & CAN_SFF_MASK; } cmd->tx_can.dlc = can_get_cc_dlc(cf, priv->can.ctrlmode); flags = (cf->can_id & CAN_EFF_FLAG ? KVASER_USB_HYDRA_CF_FLAG_EXTENDED_ID : 0); if (cf->can_id & CAN_RTR_FLAG) flags |= KVASER_USB_HYDRA_CF_FLAG_REMOTE_FRAME; flags |= (cf->can_id & CAN_ERR_FLAG ? KVASER_USB_HYDRA_CF_FLAG_ERROR_FRAME : 0); cmd->tx_can.id = cpu_to_le32(id); cmd->tx_can.flags = flags; memcpy(cmd->tx_can.data, cf->data, cf->len); return cmd; } static int kvaser_usb_hydra_set_mode(struct net_device *netdev, enum can_mode mode) { int err = 0; switch (mode) { case CAN_MODE_START: /* CAN controller automatically recovers from BUS_OFF */ break; default: err = -EOPNOTSUPP; } return err; } static int kvaser_usb_hydra_get_busparams(struct kvaser_usb_net_priv *priv, int busparams_type) { struct kvaser_usb *dev = priv->dev; struct kvaser_usb_net_hydra_priv *hydra = priv->sub_priv; struct kvaser_cmd *cmd; size_t cmd_len; int err; if (!hydra) return -EINVAL; cmd = kcalloc(1, sizeof(struct kvaser_cmd), GFP_KERNEL); if (!cmd) return -ENOMEM; cmd->header.cmd_no = CMD_GET_BUSPARAMS_REQ; cmd_len = kvaser_usb_hydra_cmd_size(cmd); kvaser_usb_hydra_set_cmd_dest_he (cmd, dev->card_data.hydra.channel_to_he[priv->channel]); kvaser_usb_hydra_set_cmd_transid (cmd, kvaser_usb_hydra_get_next_transid(dev)); cmd->get_busparams_req.type = busparams_type; hydra->pending_get_busparams_type = busparams_type; reinit_completion(&priv->get_busparams_comp); err = kvaser_usb_send_cmd(dev, cmd, cmd_len); if (err) return err; if (!wait_for_completion_timeout(&priv->get_busparams_comp, msecs_to_jiffies(KVASER_USB_TIMEOUT))) return -ETIMEDOUT; return err; } static int kvaser_usb_hydra_get_nominal_busparams(struct kvaser_usb_net_priv *priv) { return kvaser_usb_hydra_get_busparams(priv, KVASER_USB_HYDRA_BUSPARAM_TYPE_CAN); } static int kvaser_usb_hydra_get_data_busparams(struct kvaser_usb_net_priv *priv) { return kvaser_usb_hydra_get_busparams(priv, KVASER_USB_HYDRA_BUSPARAM_TYPE_CANFD); } static int kvaser_usb_hydra_set_bittiming(const struct net_device *netdev, const struct kvaser_usb_busparams *busparams) { struct kvaser_cmd *cmd; struct kvaser_usb_net_priv *priv = netdev_priv(netdev); struct kvaser_usb *dev = priv->dev; size_t cmd_len; int err; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (!cmd) return -ENOMEM; cmd->header.cmd_no = CMD_SET_BUSPARAMS_REQ; cmd_len = kvaser_usb_hydra_cmd_size(cmd); memcpy(&cmd->set_busparams_req.busparams_nominal, busparams, sizeof(cmd->set_busparams_req.busparams_nominal)); kvaser_usb_hydra_set_cmd_dest_he (cmd, dev->card_data.hydra.channel_to_he[priv->channel]); kvaser_usb_hydra_set_cmd_transid (cmd, kvaser_usb_hydra_get_next_transid(dev)); err = kvaser_usb_send_cmd(dev, cmd, cmd_len); kfree(cmd); return err; } static int kvaser_usb_hydra_set_data_bittiming(const struct net_device *netdev, const struct kvaser_usb_busparams *busparams) { struct kvaser_cmd *cmd; struct kvaser_usb_net_priv *priv = netdev_priv(netdev); struct kvaser_usb *dev = priv->dev; size_t cmd_len; int err; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (!cmd) return -ENOMEM; cmd->header.cmd_no = CMD_SET_BUSPARAMS_FD_REQ; cmd_len = kvaser_usb_hydra_cmd_size(cmd); memcpy(&cmd->set_busparams_req.busparams_data, busparams, sizeof(cmd->set_busparams_req.busparams_data)); if (priv->can.ctrlmode & CAN_CTRLMODE_FD) { if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) cmd->set_busparams_req.canfd_mode = KVASER_USB_HYDRA_BUS_MODE_NONISO; else cmd->set_busparams_req.canfd_mode = KVASER_USB_HYDRA_BUS_MODE_CANFD_ISO; } kvaser_usb_hydra_set_cmd_dest_he (cmd, dev->card_data.hydra.channel_to_he[priv->channel]); kvaser_usb_hydra_set_cmd_transid (cmd, kvaser_usb_hydra_get_next_transid(dev)); err = kvaser_usb_send_cmd(dev, cmd, cmd_len); kfree(cmd); return err; } static int kvaser_usb_hydra_get_berr_counter(const struct net_device *netdev, struct can_berr_counter *bec) { struct kvaser_usb_net_priv *priv = netdev_priv(netdev); int err; err = kvaser_usb_hydra_send_simple_cmd(priv->dev, CMD_GET_CHIP_STATE_REQ, priv->channel); if (err) return err; *bec = priv->bec; return 0; } static int kvaser_usb_hydra_setup_endpoints(struct kvaser_usb *dev) { const struct usb_host_interface *iface_desc; struct usb_endpoint_descriptor *ep; int i; iface_desc = dev->intf->cur_altsetting; for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { ep = &iface_desc->endpoint[i].desc; if (!dev->bulk_in && usb_endpoint_is_bulk_in(ep) && ep->bEndpointAddress == KVASER_USB_HYDRA_BULK_EP_IN_ADDR) dev->bulk_in = ep; if (!dev->bulk_out && usb_endpoint_is_bulk_out(ep) && ep->bEndpointAddress == KVASER_USB_HYDRA_BULK_EP_OUT_ADDR) dev->bulk_out = ep; if (dev->bulk_in && dev->bulk_out) return 0; } return -ENODEV; } static int kvaser_usb_hydra_init_card(struct kvaser_usb *dev) { int err; unsigned int i; struct kvaser_usb_dev_card_data_hydra *card_data = &dev->card_data.hydra; card_data->transid = KVASER_USB_HYDRA_MIN_TRANSID; spin_lock_init(&card_data->transid_lock); memset(card_data->usb_rx_leftover, 0, KVASER_USB_HYDRA_MAX_CMD_LEN); card_data->usb_rx_leftover_len = 0; spin_lock_init(&card_data->usb_rx_leftover_lock); memset(card_data->channel_to_he, KVASER_USB_HYDRA_HE_ADDRESS_ILLEGAL, sizeof(card_data->channel_to_he)); card_data->sysdbg_he = 0; for (i = 0; i < KVASER_USB_MAX_NET_DEVICES; i++) { err = kvaser_usb_hydra_map_channel (dev, (KVASER_USB_HYDRA_TRANSID_CANHE | i), i, "CAN"); if (err) { dev_err(&dev->intf->dev, "CMD_MAP_CHANNEL_REQ failed for CAN%u\n", i); return err; } } err = kvaser_usb_hydra_map_channel(dev, KVASER_USB_HYDRA_TRANSID_SYSDBG, 0, "SYSDBG"); if (err) { dev_err(&dev->intf->dev, "CMD_MAP_CHANNEL_REQ failed for SYSDBG\n"); return err; } return 0; } static int kvaser_usb_hydra_init_channel(struct kvaser_usb_net_priv *priv) { struct kvaser_usb_net_hydra_priv *hydra; hydra = devm_kzalloc(&priv->dev->intf->dev, sizeof(*hydra), GFP_KERNEL); if (!hydra) return -ENOMEM; priv->sub_priv = hydra; return 0; } static int kvaser_usb_hydra_get_software_info(struct kvaser_usb *dev) { struct kvaser_cmd cmd; int err; err = kvaser_usb_hydra_send_simple_cmd(dev, CMD_GET_SOFTWARE_INFO_REQ, -1); if (err) return err; memset(&cmd, 0, sizeof(struct kvaser_cmd)); err = kvaser_usb_hydra_wait_cmd(dev, CMD_GET_SOFTWARE_INFO_RESP, &cmd); if (err) return err; dev->max_tx_urbs = min_t(unsigned int, KVASER_USB_MAX_TX_URBS, le16_to_cpu(cmd.sw_info.max_outstanding_tx)); return 0; } static int kvaser_usb_hydra_get_software_details(struct kvaser_usb *dev) { struct kvaser_cmd *cmd; size_t cmd_len; int err; u32 flags; struct kvaser_usb_dev_card_data *card_data = &dev->card_data; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (!cmd) return -ENOMEM; cmd->header.cmd_no = CMD_GET_SOFTWARE_DETAILS_REQ; cmd_len = kvaser_usb_hydra_cmd_size(cmd); cmd->sw_detail_req.use_ext_cmd = 1; kvaser_usb_hydra_set_cmd_dest_he (cmd, KVASER_USB_HYDRA_HE_ADDRESS_ILLEGAL); kvaser_usb_hydra_set_cmd_transid (cmd, kvaser_usb_hydra_get_next_transid(dev)); err = kvaser_usb_send_cmd(dev, cmd, cmd_len); if (err) goto end; err = kvaser_usb_hydra_wait_cmd(dev, CMD_GET_SOFTWARE_DETAILS_RESP, cmd); if (err) goto end; dev->fw_version = le32_to_cpu(cmd->sw_detail_res.sw_version); flags = le32_to_cpu(cmd->sw_detail_res.sw_flags); if (flags & KVASER_USB_HYDRA_SW_FLAG_FW_BAD) { dev_err(&dev->intf->dev, "Bad firmware, device refuse to run!\n"); err = -EINVAL; goto end; } if (flags & KVASER_USB_HYDRA_SW_FLAG_FW_BETA) dev_info(&dev->intf->dev, "Beta firmware in use\n"); if (flags & KVASER_USB_HYDRA_SW_FLAG_EXT_CAP) card_data->capabilities |= KVASER_USB_CAP_EXT_CAP; if (flags & KVASER_USB_HYDRA_SW_FLAG_EXT_CMD) card_data->capabilities |= KVASER_USB_HYDRA_CAP_EXT_CMD; if (flags & KVASER_USB_HYDRA_SW_FLAG_CANFD) card_data->ctrlmode_supported |= CAN_CTRLMODE_FD; if (flags & KVASER_USB_HYDRA_SW_FLAG_NONISO) card_data->ctrlmode_supported |= CAN_CTRLMODE_FD_NON_ISO; if (flags & KVASER_USB_HYDRA_SW_FLAG_FREQ_80M) dev->cfg = &kvaser_usb_hydra_dev_cfg_kcan; else if (flags & KVASER_USB_HYDRA_SW_FLAG_CAN_FREQ_80M) dev->cfg = &kvaser_usb_hydra_dev_cfg_rt; else dev->cfg = &kvaser_usb_hydra_dev_cfg_flexc; end: kfree(cmd); return err; } static int kvaser_usb_hydra_get_card_info(struct kvaser_usb *dev) { struct kvaser_cmd cmd; int err; err = kvaser_usb_hydra_send_simple_cmd(dev, CMD_GET_CARD_INFO_REQ, -1); if (err) return err; memset(&cmd, 0, sizeof(struct kvaser_cmd)); err = kvaser_usb_hydra_wait_cmd(dev, CMD_GET_CARD_INFO_RESP, &cmd); if (err) return err; dev->nchannels = cmd.card_info.nchannels; if (dev->nchannels > KVASER_USB_MAX_NET_DEVICES) return -EINVAL; return 0; } static int kvaser_usb_hydra_get_capabilities(struct kvaser_usb *dev) { int err; u16 status; if (!(dev->card_data.capabilities & KVASER_USB_CAP_EXT_CAP)) { dev_info(&dev->intf->dev, "No extended capability support. Upgrade your device.\n"); return 0; } err = kvaser_usb_hydra_get_single_capability (dev, KVASER_USB_HYDRA_CAP_CMD_LISTEN_MODE, &status); if (err) return err; if (status) dev_info(&dev->intf->dev, "KVASER_USB_HYDRA_CAP_CMD_LISTEN_MODE failed %u\n", status); err = kvaser_usb_hydra_get_single_capability (dev, KVASER_USB_HYDRA_CAP_CMD_ERR_REPORT, &status); if (err) return err; if (status) dev_info(&dev->intf->dev, "KVASER_USB_HYDRA_CAP_CMD_ERR_REPORT failed %u\n", status); err = kvaser_usb_hydra_get_single_capability (dev, KVASER_USB_HYDRA_CAP_CMD_ONE_SHOT, &status); if (err) return err; if (status) dev_info(&dev->intf->dev, "KVASER_USB_HYDRA_CAP_CMD_ONE_SHOT failed %u\n", status); return 0; } static int kvaser_usb_hydra_set_opt_mode(const struct kvaser_usb_net_priv *priv) { struct kvaser_usb *dev = priv->dev; struct kvaser_cmd *cmd; size_t cmd_len; int err; if ((priv->can.ctrlmode & (CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO)) == CAN_CTRLMODE_FD_NON_ISO) { netdev_warn(priv->netdev, "CTRLMODE_FD shall be on if CTRLMODE_FD_NON_ISO is on\n"); return -EINVAL; } cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (!cmd) return -ENOMEM; cmd->header.cmd_no = CMD_SET_DRIVERMODE_REQ; cmd_len = kvaser_usb_hydra_cmd_size(cmd); kvaser_usb_hydra_set_cmd_dest_he (cmd, dev->card_data.hydra.channel_to_he[priv->channel]); kvaser_usb_hydra_set_cmd_transid (cmd, kvaser_usb_hydra_get_next_transid(dev)); if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) cmd->set_ctrlmode.mode = KVASER_USB_HYDRA_CTRLMODE_LISTEN; else cmd->set_ctrlmode.mode = KVASER_USB_HYDRA_CTRLMODE_NORMAL; err = kvaser_usb_send_cmd(dev, cmd, cmd_len); kfree(cmd); return err; } static int kvaser_usb_hydra_start_chip(struct kvaser_usb_net_priv *priv) { int err; reinit_completion(&priv->start_comp); err = kvaser_usb_hydra_send_simple_cmd(priv->dev, CMD_START_CHIP_REQ, priv->channel); if (err) return err; if (!wait_for_completion_timeout(&priv->start_comp, msecs_to_jiffies(KVASER_USB_TIMEOUT))) return -ETIMEDOUT; return 0; } static int kvaser_usb_hydra_stop_chip(struct kvaser_usb_net_priv *priv) { int err; reinit_completion(&priv->stop_comp); /* Make sure we do not report invalid BUS_OFF from CMD_CHIP_STATE_EVENT * see comment in kvaser_usb_hydra_update_state() */ priv->can.state = CAN_STATE_STOPPED; err = kvaser_usb_hydra_send_simple_cmd(priv->dev, CMD_STOP_CHIP_REQ, priv->channel); if (err) return err; if (!wait_for_completion_timeout(&priv->stop_comp, msecs_to_jiffies(KVASER_USB_TIMEOUT))) return -ETIMEDOUT; return 0; } static int kvaser_usb_hydra_flush_queue(struct kvaser_usb_net_priv *priv) { int err; reinit_completion(&priv->flush_comp); err = kvaser_usb_hydra_send_simple_cmd(priv->dev, CMD_FLUSH_QUEUE, priv->channel); if (err) return err; if (!wait_for_completion_timeout(&priv->flush_comp, msecs_to_jiffies(KVASER_USB_TIMEOUT))) return -ETIMEDOUT; return 0; } /* A single extended hydra command can be transmitted in multiple transfers * We have to buffer partial hydra commands, and handle them on next callback. */ static void kvaser_usb_hydra_read_bulk_callback(struct kvaser_usb *dev, void *buf, int len) { unsigned long irq_flags; struct kvaser_cmd *cmd; int pos = 0; size_t cmd_len; struct kvaser_usb_dev_card_data_hydra *card_data = &dev->card_data.hydra; int usb_rx_leftover_len; spinlock_t *usb_rx_leftover_lock = &card_data->usb_rx_leftover_lock; spin_lock_irqsave(usb_rx_leftover_lock, irq_flags); usb_rx_leftover_len = card_data->usb_rx_leftover_len; if (usb_rx_leftover_len) { int remaining_bytes; cmd = (struct kvaser_cmd *)card_data->usb_rx_leftover; cmd_len = kvaser_usb_hydra_cmd_size(cmd); remaining_bytes = min_t(unsigned int, len, cmd_len - usb_rx_leftover_len); /* Make sure we do not overflow usb_rx_leftover */ if (remaining_bytes + usb_rx_leftover_len > KVASER_USB_HYDRA_MAX_CMD_LEN) { dev_err(&dev->intf->dev, "Format error\n"); spin_unlock_irqrestore(usb_rx_leftover_lock, irq_flags); return; } memcpy(card_data->usb_rx_leftover + usb_rx_leftover_len, buf, remaining_bytes); pos += remaining_bytes; if (remaining_bytes + usb_rx_leftover_len == cmd_len) { kvaser_usb_hydra_handle_cmd(dev, cmd); usb_rx_leftover_len = 0; } else { /* Command still not complete */ usb_rx_leftover_len += remaining_bytes; } card_data->usb_rx_leftover_len = usb_rx_leftover_len; } spin_unlock_irqrestore(usb_rx_leftover_lock, irq_flags); while (pos < len) { cmd = buf + pos; cmd_len = kvaser_usb_hydra_cmd_size(cmd); if (pos + cmd_len > len) { /* We got first part of a command */ int leftover_bytes; leftover_bytes = len - pos; /* Make sure we do not overflow usb_rx_leftover */ if (leftover_bytes > KVASER_USB_HYDRA_MAX_CMD_LEN) { dev_err(&dev->intf->dev, "Format error\n"); return; } spin_lock_irqsave(usb_rx_leftover_lock, irq_flags); memcpy(card_data->usb_rx_leftover, buf + pos, leftover_bytes); card_data->usb_rx_leftover_len = leftover_bytes; spin_unlock_irqrestore(usb_rx_leftover_lock, irq_flags); break; } kvaser_usb_hydra_handle_cmd(dev, cmd); pos += cmd_len; } } static void * kvaser_usb_hydra_frame_to_cmd(const struct kvaser_usb_net_priv *priv, const struct sk_buff *skb, int *cmd_len, u16 transid) { void *buf; if (priv->dev->card_data.capabilities & KVASER_USB_HYDRA_CAP_EXT_CMD) buf = kvaser_usb_hydra_frame_to_cmd_ext(priv, skb, cmd_len, transid); else buf = kvaser_usb_hydra_frame_to_cmd_std(priv, skb, cmd_len, transid); return buf; } const struct kvaser_usb_dev_ops kvaser_usb_hydra_dev_ops = { .dev_set_mode = kvaser_usb_hydra_set_mode, .dev_set_bittiming = kvaser_usb_hydra_set_bittiming, .dev_get_busparams = kvaser_usb_hydra_get_nominal_busparams, .dev_set_data_bittiming = kvaser_usb_hydra_set_data_bittiming, .dev_get_data_busparams = kvaser_usb_hydra_get_data_busparams, .dev_get_berr_counter = kvaser_usb_hydra_get_berr_counter, .dev_setup_endpoints = kvaser_usb_hydra_setup_endpoints, .dev_init_card = kvaser_usb_hydra_init_card, .dev_init_channel = kvaser_usb_hydra_init_channel, .dev_get_software_info = kvaser_usb_hydra_get_software_info, .dev_get_software_details = kvaser_usb_hydra_get_software_details, .dev_get_card_info = kvaser_usb_hydra_get_card_info, .dev_get_capabilities = kvaser_usb_hydra_get_capabilities, .dev_set_opt_mode = kvaser_usb_hydra_set_opt_mode, .dev_start_chip = kvaser_usb_hydra_start_chip, .dev_stop_chip = kvaser_usb_hydra_stop_chip, .dev_reset_chip = NULL, .dev_flush_queue = kvaser_usb_hydra_flush_queue, .dev_read_bulk_callback = kvaser_usb_hydra_read_bulk_callback, .dev_frame_to_cmd = kvaser_usb_hydra_frame_to_cmd, }; static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_kcan = { .clock = { .freq = 80 * MEGA /* Hz */, }, .timestamp_freq = 80, .bittiming_const = &kvaser_usb_hydra_kcan_bittiming_c, .data_bittiming_const = &kvaser_usb_hydra_kcan_bittiming_c, }; static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_flexc = { .clock = { .freq = 24 * MEGA /* Hz */, }, .timestamp_freq = 1, .bittiming_const = &kvaser_usb_flexc_bittiming_const, }; static const struct kvaser_usb_dev_cfg kvaser_usb_hydra_dev_cfg_rt = { .clock = { .freq = 80 * MEGA /* Hz */, }, .timestamp_freq = 24, .bittiming_const = &kvaser_usb_hydra_rt_bittiming_c, .data_bittiming_const = &kvaser_usb_hydra_rtd_bittiming_c, };
linux-master
drivers/net/can/usb/kvaser_usb/kvaser_usb_hydra.c
// SPDX-License-Identifier: GPL-2.0-only /* CAN bus driver for Holt HI3110 CAN Controller with SPI Interface * * Copyright(C) Timesys Corporation 2016 * * Based on Microchip 251x CAN Controller (mcp251x) Linux kernel driver * Copyright 2009 Christian Pellegrin EVOL S.r.l. * Copyright 2007 Raymarine UK, Ltd. All Rights Reserved. * Copyright 2006 Arcom Control Systems Ltd. * * Based on CAN bus driver for the CCAN controller written by * - Sascha Hauer, Marc Kleine-Budde, Pengutronix * - Simon Kallweit, intefo AG * Copyright 2007 */ #include <linux/can/core.h> #include <linux/can/dev.h> #include <linux/clk.h> #include <linux/completion.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/ethtool.h> #include <linux/freezer.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/platform_device.h> #include <linux/property.h> #include <linux/regulator/consumer.h> #include <linux/slab.h> #include <linux/spi/spi.h> #include <linux/uaccess.h> #define HI3110_MASTER_RESET 0x56 #define HI3110_READ_CTRL0 0xD2 #define HI3110_READ_CTRL1 0xD4 #define HI3110_READ_STATF 0xE2 #define HI3110_WRITE_CTRL0 0x14 #define HI3110_WRITE_CTRL1 0x16 #define HI3110_WRITE_INTE 0x1C #define HI3110_WRITE_BTR0 0x18 #define HI3110_WRITE_BTR1 0x1A #define HI3110_READ_BTR0 0xD6 #define HI3110_READ_BTR1 0xD8 #define HI3110_READ_INTF 0xDE #define HI3110_READ_ERR 0xDC #define HI3110_READ_FIFO_WOTIME 0x48 #define HI3110_WRITE_FIFO 0x12 #define HI3110_READ_MESSTAT 0xDA #define HI3110_READ_REC 0xEA #define HI3110_READ_TEC 0xEC #define HI3110_CTRL0_MODE_MASK (7 << 5) #define HI3110_CTRL0_NORMAL_MODE (0 << 5) #define HI3110_CTRL0_LOOPBACK_MODE (1 << 5) #define HI3110_CTRL0_MONITOR_MODE (2 << 5) #define HI3110_CTRL0_SLEEP_MODE (3 << 5) #define HI3110_CTRL0_INIT_MODE (4 << 5) #define HI3110_CTRL1_TXEN BIT(7) #define HI3110_INT_RXTMP BIT(7) #define HI3110_INT_RXFIFO BIT(6) #define HI3110_INT_TXCPLT BIT(5) #define HI3110_INT_BUSERR BIT(4) #define HI3110_INT_MCHG BIT(3) #define HI3110_INT_WAKEUP BIT(2) #define HI3110_INT_F1MESS BIT(1) #define HI3110_INT_F0MESS BIT(0) #define HI3110_ERR_BUSOFF BIT(7) #define HI3110_ERR_TXERRP BIT(6) #define HI3110_ERR_RXERRP BIT(5) #define HI3110_ERR_BITERR BIT(4) #define HI3110_ERR_FRMERR BIT(3) #define HI3110_ERR_CRCERR BIT(2) #define HI3110_ERR_ACKERR BIT(1) #define HI3110_ERR_STUFERR BIT(0) #define HI3110_ERR_PROTOCOL_MASK (0x1F) #define HI3110_ERR_PASSIVE_MASK (0x60) #define HI3110_STAT_RXFMTY BIT(1) #define HI3110_STAT_BUSOFF BIT(2) #define HI3110_STAT_ERRP BIT(3) #define HI3110_STAT_ERRW BIT(4) #define HI3110_STAT_TXMTY BIT(7) #define HI3110_BTR0_SJW_SHIFT 6 #define HI3110_BTR0_BRP_SHIFT 0 #define HI3110_BTR1_SAMP_3PERBIT (1 << 7) #define HI3110_BTR1_SAMP_1PERBIT (0 << 7) #define HI3110_BTR1_TSEG2_SHIFT 4 #define HI3110_BTR1_TSEG1_SHIFT 0 #define HI3110_FIFO_WOTIME_TAG_OFF 0 #define HI3110_FIFO_WOTIME_ID_OFF 1 #define HI3110_FIFO_WOTIME_DLC_OFF 5 #define HI3110_FIFO_WOTIME_DAT_OFF 6 #define HI3110_FIFO_WOTIME_TAG_IDE BIT(7) #define HI3110_FIFO_WOTIME_ID_RTR BIT(0) #define HI3110_FIFO_TAG_OFF 0 #define HI3110_FIFO_ID_OFF 1 #define HI3110_FIFO_STD_DLC_OFF 3 #define HI3110_FIFO_STD_DATA_OFF 4 #define HI3110_FIFO_EXT_DLC_OFF 5 #define HI3110_FIFO_EXT_DATA_OFF 6 #define HI3110_CAN_MAX_DATA_LEN 8 #define HI3110_RX_BUF_LEN 15 #define HI3110_TX_STD_BUF_LEN 12 #define HI3110_TX_EXT_BUF_LEN 14 #define HI3110_CAN_FRAME_MAX_BITS 128 #define HI3110_EFF_FLAGS 0x18 /* IDE + SRR */ #define HI3110_TX_ECHO_SKB_MAX 1 #define HI3110_OST_DELAY_MS (10) #define DEVICE_NAME "hi3110" static const struct can_bittiming_const hi3110_bittiming_const = { .name = DEVICE_NAME, .tseg1_min = 2, .tseg1_max = 16, .tseg2_min = 2, .tseg2_max = 8, .sjw_max = 4, .brp_min = 1, .brp_max = 64, .brp_inc = 1, }; enum hi3110_model { CAN_HI3110_HI3110 = 0x3110, }; struct hi3110_priv { struct can_priv can; struct net_device *net; struct spi_device *spi; enum hi3110_model model; struct mutex hi3110_lock; /* SPI device lock */ u8 *spi_tx_buf; u8 *spi_rx_buf; struct sk_buff *tx_skb; struct workqueue_struct *wq; struct work_struct tx_work; struct work_struct restart_work; int force_quit; int after_suspend; #define HI3110_AFTER_SUSPEND_UP 1 #define HI3110_AFTER_SUSPEND_DOWN 2 #define HI3110_AFTER_SUSPEND_POWER 4 #define HI3110_AFTER_SUSPEND_RESTART 8 int restart_tx; bool tx_busy; struct regulator *power; struct regulator *transceiver; struct clk *clk; }; static void hi3110_clean(struct net_device *net) { struct hi3110_priv *priv = netdev_priv(net); if (priv->tx_skb || priv->tx_busy) net->stats.tx_errors++; dev_kfree_skb(priv->tx_skb); if (priv->tx_busy) can_free_echo_skb(priv->net, 0, NULL); priv->tx_skb = NULL; priv->tx_busy = false; } /* Note about handling of error return of hi3110_spi_trans: accessing * registers via SPI is not really different conceptually than using * normal I/O assembler instructions, although it's much more * complicated from a practical POV. So it's not advisable to always * check the return value of this function. Imagine that every * read{b,l}, write{b,l} and friends would be bracketed in "if ( < 0) * error();", it would be a great mess (well there are some situation * when exception handling C++ like could be useful after all). So we * just check that transfers are OK at the beginning of our * conversation with the chip and to avoid doing really nasty things * (like injecting bogus packets in the network stack). */ static int hi3110_spi_trans(struct spi_device *spi, int len) { struct hi3110_priv *priv = spi_get_drvdata(spi); struct spi_transfer t = { .tx_buf = priv->spi_tx_buf, .rx_buf = priv->spi_rx_buf, .len = len, .cs_change = 0, }; struct spi_message m; int ret; spi_message_init(&m); spi_message_add_tail(&t, &m); ret = spi_sync(spi, &m); if (ret) dev_err(&spi->dev, "spi transfer failed: ret = %d\n", ret); return ret; } static int hi3110_cmd(struct spi_device *spi, u8 command) { struct hi3110_priv *priv = spi_get_drvdata(spi); priv->spi_tx_buf[0] = command; dev_dbg(&spi->dev, "hi3110_cmd: %02X\n", command); return hi3110_spi_trans(spi, 1); } static u8 hi3110_read(struct spi_device *spi, u8 command) { struct hi3110_priv *priv = spi_get_drvdata(spi); u8 val = 0; priv->spi_tx_buf[0] = command; hi3110_spi_trans(spi, 2); val = priv->spi_rx_buf[1]; return val; } static void hi3110_write(struct spi_device *spi, u8 reg, u8 val) { struct hi3110_priv *priv = spi_get_drvdata(spi); priv->spi_tx_buf[0] = reg; priv->spi_tx_buf[1] = val; hi3110_spi_trans(spi, 2); } static void hi3110_hw_tx_frame(struct spi_device *spi, u8 *buf, int len) { struct hi3110_priv *priv = spi_get_drvdata(spi); priv->spi_tx_buf[0] = HI3110_WRITE_FIFO; memcpy(priv->spi_tx_buf + 1, buf, len); hi3110_spi_trans(spi, len + 1); } static void hi3110_hw_tx(struct spi_device *spi, struct can_frame *frame) { u8 buf[HI3110_TX_EXT_BUF_LEN]; buf[HI3110_FIFO_TAG_OFF] = 0; if (frame->can_id & CAN_EFF_FLAG) { /* Extended frame */ buf[HI3110_FIFO_ID_OFF] = (frame->can_id & CAN_EFF_MASK) >> 21; buf[HI3110_FIFO_ID_OFF + 1] = (((frame->can_id & CAN_EFF_MASK) >> 13) & 0xe0) | HI3110_EFF_FLAGS | (((frame->can_id & CAN_EFF_MASK) >> 15) & 0x07); buf[HI3110_FIFO_ID_OFF + 2] = (frame->can_id & CAN_EFF_MASK) >> 7; buf[HI3110_FIFO_ID_OFF + 3] = ((frame->can_id & CAN_EFF_MASK) << 1) | ((frame->can_id & CAN_RTR_FLAG) ? 1 : 0); buf[HI3110_FIFO_EXT_DLC_OFF] = frame->len; memcpy(buf + HI3110_FIFO_EXT_DATA_OFF, frame->data, frame->len); hi3110_hw_tx_frame(spi, buf, HI3110_TX_EXT_BUF_LEN - (HI3110_CAN_MAX_DATA_LEN - frame->len)); } else { /* Standard frame */ buf[HI3110_FIFO_ID_OFF] = (frame->can_id & CAN_SFF_MASK) >> 3; buf[HI3110_FIFO_ID_OFF + 1] = ((frame->can_id & CAN_SFF_MASK) << 5) | ((frame->can_id & CAN_RTR_FLAG) ? (1 << 4) : 0); buf[HI3110_FIFO_STD_DLC_OFF] = frame->len; memcpy(buf + HI3110_FIFO_STD_DATA_OFF, frame->data, frame->len); hi3110_hw_tx_frame(spi, buf, HI3110_TX_STD_BUF_LEN - (HI3110_CAN_MAX_DATA_LEN - frame->len)); } } static void hi3110_hw_rx_frame(struct spi_device *spi, u8 *buf) { struct hi3110_priv *priv = spi_get_drvdata(spi); priv->spi_tx_buf[0] = HI3110_READ_FIFO_WOTIME; hi3110_spi_trans(spi, HI3110_RX_BUF_LEN); memcpy(buf, priv->spi_rx_buf + 1, HI3110_RX_BUF_LEN - 1); } static void hi3110_hw_rx(struct spi_device *spi) { struct hi3110_priv *priv = spi_get_drvdata(spi); struct sk_buff *skb; struct can_frame *frame; u8 buf[HI3110_RX_BUF_LEN - 1]; skb = alloc_can_skb(priv->net, &frame); if (!skb) { priv->net->stats.rx_dropped++; return; } hi3110_hw_rx_frame(spi, buf); if (buf[HI3110_FIFO_WOTIME_TAG_OFF] & HI3110_FIFO_WOTIME_TAG_IDE) { /* IDE is recessive (1), indicating extended 29-bit frame */ frame->can_id = CAN_EFF_FLAG; frame->can_id |= (buf[HI3110_FIFO_WOTIME_ID_OFF] << 21) | (((buf[HI3110_FIFO_WOTIME_ID_OFF + 1] & 0xE0) >> 5) << 18) | ((buf[HI3110_FIFO_WOTIME_ID_OFF + 1] & 0x07) << 15) | (buf[HI3110_FIFO_WOTIME_ID_OFF + 2] << 7) | (buf[HI3110_FIFO_WOTIME_ID_OFF + 3] >> 1); } else { /* IDE is dominant (0), frame indicating standard 11-bit */ frame->can_id = (buf[HI3110_FIFO_WOTIME_ID_OFF] << 3) | ((buf[HI3110_FIFO_WOTIME_ID_OFF + 1] & 0xE0) >> 5); } /* Data length */ frame->len = can_cc_dlc2len(buf[HI3110_FIFO_WOTIME_DLC_OFF] & 0x0F); if (buf[HI3110_FIFO_WOTIME_ID_OFF + 3] & HI3110_FIFO_WOTIME_ID_RTR) { frame->can_id |= CAN_RTR_FLAG; } else { memcpy(frame->data, buf + HI3110_FIFO_WOTIME_DAT_OFF, frame->len); priv->net->stats.rx_bytes += frame->len; } priv->net->stats.rx_packets++; netif_rx(skb); } static void hi3110_hw_sleep(struct spi_device *spi) { hi3110_write(spi, HI3110_WRITE_CTRL0, HI3110_CTRL0_SLEEP_MODE); } static netdev_tx_t hi3110_hard_start_xmit(struct sk_buff *skb, struct net_device *net) { struct hi3110_priv *priv = netdev_priv(net); struct spi_device *spi = priv->spi; if (priv->tx_skb || priv->tx_busy) { dev_err(&spi->dev, "hard_xmit called while tx busy\n"); return NETDEV_TX_BUSY; } if (can_dev_dropped_skb(net, skb)) return NETDEV_TX_OK; netif_stop_queue(net); priv->tx_skb = skb; queue_work(priv->wq, &priv->tx_work); return NETDEV_TX_OK; } static int hi3110_do_set_mode(struct net_device *net, enum can_mode mode) { struct hi3110_priv *priv = netdev_priv(net); switch (mode) { case CAN_MODE_START: hi3110_clean(net); /* We have to delay work since SPI I/O may sleep */ priv->can.state = CAN_STATE_ERROR_ACTIVE; priv->restart_tx = 1; if (priv->can.restart_ms == 0) priv->after_suspend = HI3110_AFTER_SUSPEND_RESTART; queue_work(priv->wq, &priv->restart_work); break; default: return -EOPNOTSUPP; } return 0; } static int hi3110_get_berr_counter(const struct net_device *net, struct can_berr_counter *bec) { struct hi3110_priv *priv = netdev_priv(net); struct spi_device *spi = priv->spi; mutex_lock(&priv->hi3110_lock); bec->txerr = hi3110_read(spi, HI3110_READ_TEC); bec->rxerr = hi3110_read(spi, HI3110_READ_REC); mutex_unlock(&priv->hi3110_lock); return 0; } static int hi3110_set_normal_mode(struct spi_device *spi) { struct hi3110_priv *priv = spi_get_drvdata(spi); u8 reg = 0; hi3110_write(spi, HI3110_WRITE_INTE, HI3110_INT_BUSERR | HI3110_INT_RXFIFO | HI3110_INT_TXCPLT); /* Enable TX */ hi3110_write(spi, HI3110_WRITE_CTRL1, HI3110_CTRL1_TXEN); if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) reg = HI3110_CTRL0_LOOPBACK_MODE; else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) reg = HI3110_CTRL0_MONITOR_MODE; else reg = HI3110_CTRL0_NORMAL_MODE; hi3110_write(spi, HI3110_WRITE_CTRL0, reg); /* Wait for the device to enter the mode */ mdelay(HI3110_OST_DELAY_MS); reg = hi3110_read(spi, HI3110_READ_CTRL0); if ((reg & HI3110_CTRL0_MODE_MASK) != reg) return -EBUSY; priv->can.state = CAN_STATE_ERROR_ACTIVE; return 0; } static int hi3110_do_set_bittiming(struct net_device *net) { struct hi3110_priv *priv = netdev_priv(net); struct can_bittiming *bt = &priv->can.bittiming; struct spi_device *spi = priv->spi; hi3110_write(spi, HI3110_WRITE_BTR0, ((bt->sjw - 1) << HI3110_BTR0_SJW_SHIFT) | ((bt->brp - 1) << HI3110_BTR0_BRP_SHIFT)); hi3110_write(spi, HI3110_WRITE_BTR1, (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES ? HI3110_BTR1_SAMP_3PERBIT : HI3110_BTR1_SAMP_1PERBIT) | ((bt->phase_seg1 + bt->prop_seg - 1) << HI3110_BTR1_TSEG1_SHIFT) | ((bt->phase_seg2 - 1) << HI3110_BTR1_TSEG2_SHIFT)); dev_dbg(&spi->dev, "BT: 0x%02x 0x%02x\n", hi3110_read(spi, HI3110_READ_BTR0), hi3110_read(spi, HI3110_READ_BTR1)); return 0; } static int hi3110_setup(struct net_device *net) { hi3110_do_set_bittiming(net); return 0; } static int hi3110_hw_reset(struct spi_device *spi) { u8 reg; int ret; /* Wait for oscillator startup timer after power up */ mdelay(HI3110_OST_DELAY_MS); ret = hi3110_cmd(spi, HI3110_MASTER_RESET); if (ret) return ret; /* Wait for oscillator startup timer after reset */ mdelay(HI3110_OST_DELAY_MS); reg = hi3110_read(spi, HI3110_READ_CTRL0); if ((reg & HI3110_CTRL0_MODE_MASK) != HI3110_CTRL0_INIT_MODE) return -ENODEV; /* As per the datasheet it appears the error flags are * not cleared on reset. Explicitly clear them by performing a read */ hi3110_read(spi, HI3110_READ_ERR); return 0; } static int hi3110_hw_probe(struct spi_device *spi) { u8 statf; hi3110_hw_reset(spi); /* Confirm correct operation by checking against reset values * in datasheet */ statf = hi3110_read(spi, HI3110_READ_STATF); dev_dbg(&spi->dev, "statf: %02X\n", statf); if (statf != 0x82) return -ENODEV; return 0; } static int hi3110_power_enable(struct regulator *reg, int enable) { if (IS_ERR_OR_NULL(reg)) return 0; if (enable) return regulator_enable(reg); else return regulator_disable(reg); } static int hi3110_stop(struct net_device *net) { struct hi3110_priv *priv = netdev_priv(net); struct spi_device *spi = priv->spi; close_candev(net); priv->force_quit = 1; free_irq(spi->irq, priv); destroy_workqueue(priv->wq); priv->wq = NULL; mutex_lock(&priv->hi3110_lock); /* Disable transmit, interrupts and clear flags */ hi3110_write(spi, HI3110_WRITE_CTRL1, 0x0); hi3110_write(spi, HI3110_WRITE_INTE, 0x0); hi3110_read(spi, HI3110_READ_INTF); hi3110_clean(net); hi3110_hw_sleep(spi); hi3110_power_enable(priv->transceiver, 0); priv->can.state = CAN_STATE_STOPPED; mutex_unlock(&priv->hi3110_lock); return 0; } static void hi3110_tx_work_handler(struct work_struct *ws) { struct hi3110_priv *priv = container_of(ws, struct hi3110_priv, tx_work); struct spi_device *spi = priv->spi; struct net_device *net = priv->net; struct can_frame *frame; mutex_lock(&priv->hi3110_lock); if (priv->tx_skb) { if (priv->can.state == CAN_STATE_BUS_OFF) { hi3110_clean(net); } else { frame = (struct can_frame *)priv->tx_skb->data; hi3110_hw_tx(spi, frame); priv->tx_busy = true; can_put_echo_skb(priv->tx_skb, net, 0, 0); priv->tx_skb = NULL; } } mutex_unlock(&priv->hi3110_lock); } static void hi3110_restart_work_handler(struct work_struct *ws) { struct hi3110_priv *priv = container_of(ws, struct hi3110_priv, restart_work); struct spi_device *spi = priv->spi; struct net_device *net = priv->net; mutex_lock(&priv->hi3110_lock); if (priv->after_suspend) { hi3110_hw_reset(spi); hi3110_setup(net); if (priv->after_suspend & HI3110_AFTER_SUSPEND_RESTART) { hi3110_set_normal_mode(spi); } else if (priv->after_suspend & HI3110_AFTER_SUSPEND_UP) { netif_device_attach(net); hi3110_clean(net); hi3110_set_normal_mode(spi); netif_wake_queue(net); } else { hi3110_hw_sleep(spi); } priv->after_suspend = 0; priv->force_quit = 0; } if (priv->restart_tx) { priv->restart_tx = 0; hi3110_hw_reset(spi); hi3110_setup(net); hi3110_clean(net); hi3110_set_normal_mode(spi); netif_wake_queue(net); } mutex_unlock(&priv->hi3110_lock); } static irqreturn_t hi3110_can_ist(int irq, void *dev_id) { struct hi3110_priv *priv = dev_id; struct spi_device *spi = priv->spi; struct net_device *net = priv->net; mutex_lock(&priv->hi3110_lock); while (!priv->force_quit) { enum can_state new_state; u8 intf, eflag, statf; while (!(HI3110_STAT_RXFMTY & (statf = hi3110_read(spi, HI3110_READ_STATF)))) { hi3110_hw_rx(spi); } intf = hi3110_read(spi, HI3110_READ_INTF); eflag = hi3110_read(spi, HI3110_READ_ERR); /* Update can state */ if (eflag & HI3110_ERR_BUSOFF) new_state = CAN_STATE_BUS_OFF; else if (eflag & HI3110_ERR_PASSIVE_MASK) new_state = CAN_STATE_ERROR_PASSIVE; else if (statf & HI3110_STAT_ERRW) new_state = CAN_STATE_ERROR_WARNING; else new_state = CAN_STATE_ERROR_ACTIVE; if (new_state != priv->can.state) { struct can_frame *cf; struct sk_buff *skb; enum can_state rx_state, tx_state; u8 rxerr, txerr; skb = alloc_can_err_skb(net, &cf); if (!skb) break; txerr = hi3110_read(spi, HI3110_READ_TEC); rxerr = hi3110_read(spi, HI3110_READ_REC); tx_state = txerr >= rxerr ? new_state : 0; rx_state = txerr <= rxerr ? new_state : 0; can_change_state(net, cf, tx_state, rx_state); netif_rx(skb); if (new_state == CAN_STATE_BUS_OFF) { can_bus_off(net); if (priv->can.restart_ms == 0) { priv->force_quit = 1; hi3110_hw_sleep(spi); break; } } else { cf->can_id |= CAN_ERR_CNT; cf->data[6] = txerr; cf->data[7] = rxerr; } } /* Update bus errors */ if ((intf & HI3110_INT_BUSERR) && (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)) { struct can_frame *cf; struct sk_buff *skb; /* Check for protocol errors */ if (eflag & HI3110_ERR_PROTOCOL_MASK) { skb = alloc_can_err_skb(net, &cf); if (!skb) break; cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; priv->can.can_stats.bus_error++; priv->net->stats.rx_errors++; if (eflag & HI3110_ERR_BITERR) cf->data[2] |= CAN_ERR_PROT_BIT; else if (eflag & HI3110_ERR_FRMERR) cf->data[2] |= CAN_ERR_PROT_FORM; else if (eflag & HI3110_ERR_STUFERR) cf->data[2] |= CAN_ERR_PROT_STUFF; else if (eflag & HI3110_ERR_CRCERR) cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ; else if (eflag & HI3110_ERR_ACKERR) cf->data[3] |= CAN_ERR_PROT_LOC_ACK; cf->data[6] = hi3110_read(spi, HI3110_READ_TEC); cf->data[7] = hi3110_read(spi, HI3110_READ_REC); netdev_dbg(priv->net, "Bus Error\n"); netif_rx(skb); } } if (priv->tx_busy && statf & HI3110_STAT_TXMTY) { net->stats.tx_packets++; net->stats.tx_bytes += can_get_echo_skb(net, 0, NULL); priv->tx_busy = false; netif_wake_queue(net); } if (intf == 0) break; } mutex_unlock(&priv->hi3110_lock); return IRQ_HANDLED; } static int hi3110_open(struct net_device *net) { struct hi3110_priv *priv = netdev_priv(net); struct spi_device *spi = priv->spi; unsigned long flags = IRQF_ONESHOT | IRQF_TRIGGER_HIGH; int ret; ret = open_candev(net); if (ret) return ret; mutex_lock(&priv->hi3110_lock); hi3110_power_enable(priv->transceiver, 1); priv->force_quit = 0; priv->tx_skb = NULL; priv->tx_busy = false; ret = request_threaded_irq(spi->irq, NULL, hi3110_can_ist, flags, DEVICE_NAME, priv); if (ret) { dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq); goto out_close; } priv->wq = alloc_workqueue("hi3110_wq", WQ_FREEZABLE | WQ_MEM_RECLAIM, 0); if (!priv->wq) { ret = -ENOMEM; goto out_free_irq; } INIT_WORK(&priv->tx_work, hi3110_tx_work_handler); INIT_WORK(&priv->restart_work, hi3110_restart_work_handler); ret = hi3110_hw_reset(spi); if (ret) goto out_free_wq; ret = hi3110_setup(net); if (ret) goto out_free_wq; ret = hi3110_set_normal_mode(spi); if (ret) goto out_free_wq; netif_wake_queue(net); mutex_unlock(&priv->hi3110_lock); return 0; out_free_wq: destroy_workqueue(priv->wq); out_free_irq: free_irq(spi->irq, priv); hi3110_hw_sleep(spi); out_close: hi3110_power_enable(priv->transceiver, 0); close_candev(net); mutex_unlock(&priv->hi3110_lock); return ret; } static const struct net_device_ops hi3110_netdev_ops = { .ndo_open = hi3110_open, .ndo_stop = hi3110_stop, .ndo_start_xmit = hi3110_hard_start_xmit, }; static const struct ethtool_ops hi3110_ethtool_ops = { .get_ts_info = ethtool_op_get_ts_info, }; static const struct of_device_id hi3110_of_match[] = { { .compatible = "holt,hi3110", .data = (void *)CAN_HI3110_HI3110, }, { } }; MODULE_DEVICE_TABLE(of, hi3110_of_match); static const struct spi_device_id hi3110_id_table[] = { { .name = "hi3110", .driver_data = (kernel_ulong_t)CAN_HI3110_HI3110, }, { } }; MODULE_DEVICE_TABLE(spi, hi3110_id_table); static int hi3110_can_probe(struct spi_device *spi) { struct device *dev = &spi->dev; struct net_device *net; struct hi3110_priv *priv; const void *match; struct clk *clk; u32 freq; int ret; clk = devm_clk_get_optional(&spi->dev, NULL); if (IS_ERR(clk)) return dev_err_probe(dev, PTR_ERR(clk), "no CAN clock source defined\n"); if (clk) { freq = clk_get_rate(clk); } else { ret = device_property_read_u32(dev, "clock-frequency", &freq); if (ret) return dev_err_probe(dev, ret, "Failed to get clock-frequency!\n"); } /* Sanity check */ if (freq > 40000000) return -ERANGE; /* Allocate can/net device */ net = alloc_candev(sizeof(struct hi3110_priv), HI3110_TX_ECHO_SKB_MAX); if (!net) return -ENOMEM; ret = clk_prepare_enable(clk); if (ret) goto out_free; net->netdev_ops = &hi3110_netdev_ops; net->ethtool_ops = &hi3110_ethtool_ops; net->flags |= IFF_ECHO; priv = netdev_priv(net); priv->can.bittiming_const = &hi3110_bittiming_const; priv->can.do_set_mode = hi3110_do_set_mode; priv->can.do_get_berr_counter = hi3110_get_berr_counter; priv->can.clock.freq = freq / 2; priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_BERR_REPORTING; match = device_get_match_data(dev); if (match) priv->model = (enum hi3110_model)(uintptr_t)match; else priv->model = spi_get_device_id(spi)->driver_data; priv->net = net; priv->clk = clk; spi_set_drvdata(spi, priv); /* Configure the SPI bus */ spi->bits_per_word = 8; ret = spi_setup(spi); if (ret) goto out_clk; priv->power = devm_regulator_get_optional(&spi->dev, "vdd"); priv->transceiver = devm_regulator_get_optional(&spi->dev, "xceiver"); if ((PTR_ERR(priv->power) == -EPROBE_DEFER) || (PTR_ERR(priv->transceiver) == -EPROBE_DEFER)) { ret = -EPROBE_DEFER; goto out_clk; } ret = hi3110_power_enable(priv->power, 1); if (ret) goto out_clk; priv->spi = spi; mutex_init(&priv->hi3110_lock); priv->spi_tx_buf = devm_kzalloc(&spi->dev, HI3110_RX_BUF_LEN, GFP_KERNEL); if (!priv->spi_tx_buf) { ret = -ENOMEM; goto error_probe; } priv->spi_rx_buf = devm_kzalloc(&spi->dev, HI3110_RX_BUF_LEN, GFP_KERNEL); if (!priv->spi_rx_buf) { ret = -ENOMEM; goto error_probe; } SET_NETDEV_DEV(net, &spi->dev); ret = hi3110_hw_probe(spi); if (ret) { dev_err_probe(dev, ret, "Cannot initialize %x. Wrong wiring?\n", priv->model); goto error_probe; } hi3110_hw_sleep(spi); ret = register_candev(net); if (ret) goto error_probe; netdev_info(net, "%x successfully initialized.\n", priv->model); return 0; error_probe: hi3110_power_enable(priv->power, 0); out_clk: clk_disable_unprepare(clk); out_free: free_candev(net); return dev_err_probe(dev, ret, "Probe failed\n"); } static void hi3110_can_remove(struct spi_device *spi) { struct hi3110_priv *priv = spi_get_drvdata(spi); struct net_device *net = priv->net; unregister_candev(net); hi3110_power_enable(priv->power, 0); clk_disable_unprepare(priv->clk); free_candev(net); } static int __maybe_unused hi3110_can_suspend(struct device *dev) { struct spi_device *spi = to_spi_device(dev); struct hi3110_priv *priv = spi_get_drvdata(spi); struct net_device *net = priv->net; priv->force_quit = 1; disable_irq(spi->irq); /* Note: at this point neither IST nor workqueues are running. * open/stop cannot be called anyway so locking is not needed */ if (netif_running(net)) { netif_device_detach(net); hi3110_hw_sleep(spi); hi3110_power_enable(priv->transceiver, 0); priv->after_suspend = HI3110_AFTER_SUSPEND_UP; } else { priv->after_suspend = HI3110_AFTER_SUSPEND_DOWN; } if (!IS_ERR_OR_NULL(priv->power)) { regulator_disable(priv->power); priv->after_suspend |= HI3110_AFTER_SUSPEND_POWER; } return 0; } static int __maybe_unused hi3110_can_resume(struct device *dev) { struct spi_device *spi = to_spi_device(dev); struct hi3110_priv *priv = spi_get_drvdata(spi); if (priv->after_suspend & HI3110_AFTER_SUSPEND_POWER) hi3110_power_enable(priv->power, 1); if (priv->after_suspend & HI3110_AFTER_SUSPEND_UP) { hi3110_power_enable(priv->transceiver, 1); queue_work(priv->wq, &priv->restart_work); } else { priv->after_suspend = 0; } priv->force_quit = 0; enable_irq(spi->irq); return 0; } static SIMPLE_DEV_PM_OPS(hi3110_can_pm_ops, hi3110_can_suspend, hi3110_can_resume); static struct spi_driver hi3110_can_driver = { .driver = { .name = DEVICE_NAME, .of_match_table = hi3110_of_match, .pm = &hi3110_can_pm_ops, }, .id_table = hi3110_id_table, .probe = hi3110_can_probe, .remove = hi3110_can_remove, }; module_spi_driver(hi3110_can_driver); MODULE_AUTHOR("Akshay Bhat <[email protected]>"); MODULE_AUTHOR("Casey Fitzpatrick <[email protected]>"); MODULE_DESCRIPTION("Holt HI-3110 CAN driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/net/can/spi/hi311x.c
// SPDX-License-Identifier: GPL-2.0-only /* CAN bus driver for Microchip 251x/25625 CAN Controller with SPI Interface * * MCP2510 support and bug fixes by Christian Pellegrin * <[email protected]> * * Copyright 2009 Christian Pellegrin EVOL S.r.l. * * Copyright 2007 Raymarine UK, Ltd. All Rights Reserved. * Written under contract by: * Chris Elston, Katalix Systems, Ltd. * * Based on Microchip MCP251x CAN controller driver written by * David Vrabel, Copyright 2006 Arcom Control Systems Ltd. * * Based on CAN bus driver for the CCAN controller written by * - Sascha Hauer, Marc Kleine-Budde, Pengutronix * - Simon Kallweit, intefo AG * Copyright 2007 */ #include <linux/bitfield.h> #include <linux/can/core.h> #include <linux/can/dev.h> #include <linux/clk.h> #include <linux/completion.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/ethtool.h> #include <linux/freezer.h> #include <linux/gpio.h> #include <linux/gpio/driver.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/iopoll.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/platform_device.h> #include <linux/property.h> #include <linux/regulator/consumer.h> #include <linux/slab.h> #include <linux/spi/spi.h> #include <linux/uaccess.h> /* SPI interface instruction set */ #define INSTRUCTION_WRITE 0x02 #define INSTRUCTION_READ 0x03 #define INSTRUCTION_BIT_MODIFY 0x05 #define INSTRUCTION_LOAD_TXB(n) (0x40 + 2 * (n)) #define INSTRUCTION_READ_RXB(n) (((n) == 0) ? 0x90 : 0x94) #define INSTRUCTION_RESET 0xC0 #define RTS_TXB0 0x01 #define RTS_TXB1 0x02 #define RTS_TXB2 0x04 #define INSTRUCTION_RTS(n) (0x80 | ((n) & 0x07)) /* MPC251x registers */ #define BFPCTRL 0x0c # define BFPCTRL_B0BFM BIT(0) # define BFPCTRL_B1BFM BIT(1) # define BFPCTRL_BFM(n) (BFPCTRL_B0BFM << (n)) # define BFPCTRL_BFM_MASK GENMASK(1, 0) # define BFPCTRL_B0BFE BIT(2) # define BFPCTRL_B1BFE BIT(3) # define BFPCTRL_BFE(n) (BFPCTRL_B0BFE << (n)) # define BFPCTRL_BFE_MASK GENMASK(3, 2) # define BFPCTRL_B0BFS BIT(4) # define BFPCTRL_B1BFS BIT(5) # define BFPCTRL_BFS(n) (BFPCTRL_B0BFS << (n)) # define BFPCTRL_BFS_MASK GENMASK(5, 4) #define TXRTSCTRL 0x0d # define TXRTSCTRL_B0RTSM BIT(0) # define TXRTSCTRL_B1RTSM BIT(1) # define TXRTSCTRL_B2RTSM BIT(2) # define TXRTSCTRL_RTSM(n) (TXRTSCTRL_B0RTSM << (n)) # define TXRTSCTRL_RTSM_MASK GENMASK(2, 0) # define TXRTSCTRL_B0RTS BIT(3) # define TXRTSCTRL_B1RTS BIT(4) # define TXRTSCTRL_B2RTS BIT(5) # define TXRTSCTRL_RTS(n) (TXRTSCTRL_B0RTS << (n)) # define TXRTSCTRL_RTS_MASK GENMASK(5, 3) #define CANSTAT 0x0e #define CANCTRL 0x0f # define CANCTRL_REQOP_MASK 0xe0 # define CANCTRL_REQOP_CONF 0x80 # define CANCTRL_REQOP_LISTEN_ONLY 0x60 # define CANCTRL_REQOP_LOOPBACK 0x40 # define CANCTRL_REQOP_SLEEP 0x20 # define CANCTRL_REQOP_NORMAL 0x00 # define CANCTRL_OSM 0x08 # define CANCTRL_ABAT 0x10 #define TEC 0x1c #define REC 0x1d #define CNF1 0x2a # define CNF1_SJW_SHIFT 6 #define CNF2 0x29 # define CNF2_BTLMODE 0x80 # define CNF2_SAM 0x40 # define CNF2_PS1_SHIFT 3 #define CNF3 0x28 # define CNF3_SOF 0x08 # define CNF3_WAKFIL 0x04 # define CNF3_PHSEG2_MASK 0x07 #define CANINTE 0x2b # define CANINTE_MERRE 0x80 # define CANINTE_WAKIE 0x40 # define CANINTE_ERRIE 0x20 # define CANINTE_TX2IE 0x10 # define CANINTE_TX1IE 0x08 # define CANINTE_TX0IE 0x04 # define CANINTE_RX1IE 0x02 # define CANINTE_RX0IE 0x01 #define CANINTF 0x2c # define CANINTF_MERRF 0x80 # define CANINTF_WAKIF 0x40 # define CANINTF_ERRIF 0x20 # define CANINTF_TX2IF 0x10 # define CANINTF_TX1IF 0x08 # define CANINTF_TX0IF 0x04 # define CANINTF_RX1IF 0x02 # define CANINTF_RX0IF 0x01 # define CANINTF_RX (CANINTF_RX0IF | CANINTF_RX1IF) # define CANINTF_TX (CANINTF_TX2IF | CANINTF_TX1IF | CANINTF_TX0IF) # define CANINTF_ERR (CANINTF_ERRIF) #define EFLG 0x2d # define EFLG_EWARN 0x01 # define EFLG_RXWAR 0x02 # define EFLG_TXWAR 0x04 # define EFLG_RXEP 0x08 # define EFLG_TXEP 0x10 # define EFLG_TXBO 0x20 # define EFLG_RX0OVR 0x40 # define EFLG_RX1OVR 0x80 #define TXBCTRL(n) (((n) * 0x10) + 0x30 + TXBCTRL_OFF) # define TXBCTRL_ABTF 0x40 # define TXBCTRL_MLOA 0x20 # define TXBCTRL_TXERR 0x10 # define TXBCTRL_TXREQ 0x08 #define TXBSIDH(n) (((n) * 0x10) + 0x30 + TXBSIDH_OFF) # define SIDH_SHIFT 3 #define TXBSIDL(n) (((n) * 0x10) + 0x30 + TXBSIDL_OFF) # define SIDL_SID_MASK 7 # define SIDL_SID_SHIFT 5 # define SIDL_EXIDE_SHIFT 3 # define SIDL_EID_SHIFT 16 # define SIDL_EID_MASK 3 #define TXBEID8(n) (((n) * 0x10) + 0x30 + TXBEID8_OFF) #define TXBEID0(n) (((n) * 0x10) + 0x30 + TXBEID0_OFF) #define TXBDLC(n) (((n) * 0x10) + 0x30 + TXBDLC_OFF) # define DLC_RTR_SHIFT 6 #define TXBCTRL_OFF 0 #define TXBSIDH_OFF 1 #define TXBSIDL_OFF 2 #define TXBEID8_OFF 3 #define TXBEID0_OFF 4 #define TXBDLC_OFF 5 #define TXBDAT_OFF 6 #define RXBCTRL(n) (((n) * 0x10) + 0x60 + RXBCTRL_OFF) # define RXBCTRL_BUKT 0x04 # define RXBCTRL_RXM0 0x20 # define RXBCTRL_RXM1 0x40 #define RXBSIDH(n) (((n) * 0x10) + 0x60 + RXBSIDH_OFF) # define RXBSIDH_SHIFT 3 #define RXBSIDL(n) (((n) * 0x10) + 0x60 + RXBSIDL_OFF) # define RXBSIDL_IDE 0x08 # define RXBSIDL_SRR 0x10 # define RXBSIDL_EID 3 # define RXBSIDL_SHIFT 5 #define RXBEID8(n) (((n) * 0x10) + 0x60 + RXBEID8_OFF) #define RXBEID0(n) (((n) * 0x10) + 0x60 + RXBEID0_OFF) #define RXBDLC(n) (((n) * 0x10) + 0x60 + RXBDLC_OFF) # define RXBDLC_LEN_MASK 0x0f # define RXBDLC_RTR 0x40 #define RXBCTRL_OFF 0 #define RXBSIDH_OFF 1 #define RXBSIDL_OFF 2 #define RXBEID8_OFF 3 #define RXBEID0_OFF 4 #define RXBDLC_OFF 5 #define RXBDAT_OFF 6 #define RXFSID(n) ((n < 3) ? 0 : 4) #define RXFSIDH(n) ((n) * 4 + RXFSID(n)) #define RXFSIDL(n) ((n) * 4 + 1 + RXFSID(n)) #define RXFEID8(n) ((n) * 4 + 2 + RXFSID(n)) #define RXFEID0(n) ((n) * 4 + 3 + RXFSID(n)) #define RXMSIDH(n) ((n) * 4 + 0x20) #define RXMSIDL(n) ((n) * 4 + 0x21) #define RXMEID8(n) ((n) * 4 + 0x22) #define RXMEID0(n) ((n) * 4 + 0x23) #define GET_BYTE(val, byte) \ (((val) >> ((byte) * 8)) & 0xff) #define SET_BYTE(val, byte) \ (((val) & 0xff) << ((byte) * 8)) /* Buffer size required for the largest SPI transfer (i.e., reading a * frame) */ #define CAN_FRAME_MAX_DATA_LEN 8 #define SPI_TRANSFER_BUF_LEN (6 + CAN_FRAME_MAX_DATA_LEN) #define CAN_FRAME_MAX_BITS 128 #define TX_ECHO_SKB_MAX 1 #define MCP251X_OST_DELAY_MS (5) #define DEVICE_NAME "mcp251x" static const struct can_bittiming_const mcp251x_bittiming_const = { .name = DEVICE_NAME, .tseg1_min = 3, .tseg1_max = 16, .tseg2_min = 2, .tseg2_max = 8, .sjw_max = 4, .brp_min = 1, .brp_max = 64, .brp_inc = 1, }; enum mcp251x_model { CAN_MCP251X_MCP2510 = 0x2510, CAN_MCP251X_MCP2515 = 0x2515, CAN_MCP251X_MCP25625 = 0x25625, }; struct mcp251x_priv { struct can_priv can; struct net_device *net; struct spi_device *spi; enum mcp251x_model model; struct mutex mcp_lock; /* SPI device lock */ u8 *spi_tx_buf; u8 *spi_rx_buf; struct sk_buff *tx_skb; struct workqueue_struct *wq; struct work_struct tx_work; struct work_struct restart_work; int force_quit; int after_suspend; #define AFTER_SUSPEND_UP 1 #define AFTER_SUSPEND_DOWN 2 #define AFTER_SUSPEND_POWER 4 #define AFTER_SUSPEND_RESTART 8 int restart_tx; bool tx_busy; struct regulator *power; struct regulator *transceiver; struct clk *clk; #ifdef CONFIG_GPIOLIB struct gpio_chip gpio; u8 reg_bfpctrl; #endif }; #define MCP251X_IS(_model) \ static inline int mcp251x_is_##_model(struct spi_device *spi) \ { \ struct mcp251x_priv *priv = spi_get_drvdata(spi); \ return priv->model == CAN_MCP251X_MCP##_model; \ } MCP251X_IS(2510); static void mcp251x_clean(struct net_device *net) { struct mcp251x_priv *priv = netdev_priv(net); if (priv->tx_skb || priv->tx_busy) net->stats.tx_errors++; dev_kfree_skb(priv->tx_skb); if (priv->tx_busy) can_free_echo_skb(priv->net, 0, NULL); priv->tx_skb = NULL; priv->tx_busy = false; } /* Note about handling of error return of mcp251x_spi_trans: accessing * registers via SPI is not really different conceptually than using * normal I/O assembler instructions, although it's much more * complicated from a practical POV. So it's not advisable to always * check the return value of this function. Imagine that every * read{b,l}, write{b,l} and friends would be bracketed in "if ( < 0) * error();", it would be a great mess (well there are some situation * when exception handling C++ like could be useful after all). So we * just check that transfers are OK at the beginning of our * conversation with the chip and to avoid doing really nasty things * (like injecting bogus packets in the network stack). */ static int mcp251x_spi_trans(struct spi_device *spi, int len) { struct mcp251x_priv *priv = spi_get_drvdata(spi); struct spi_transfer t = { .tx_buf = priv->spi_tx_buf, .rx_buf = priv->spi_rx_buf, .len = len, .cs_change = 0, }; struct spi_message m; int ret; spi_message_init(&m); spi_message_add_tail(&t, &m); ret = spi_sync(spi, &m); if (ret) dev_err(&spi->dev, "spi transfer failed: ret = %d\n", ret); return ret; } static int mcp251x_spi_write(struct spi_device *spi, int len) { struct mcp251x_priv *priv = spi_get_drvdata(spi); int ret; ret = spi_write(spi, priv->spi_tx_buf, len); if (ret) dev_err(&spi->dev, "spi write failed: ret = %d\n", ret); return ret; } static u8 mcp251x_read_reg(struct spi_device *spi, u8 reg) { struct mcp251x_priv *priv = spi_get_drvdata(spi); u8 val = 0; priv->spi_tx_buf[0] = INSTRUCTION_READ; priv->spi_tx_buf[1] = reg; if (spi->controller->flags & SPI_CONTROLLER_HALF_DUPLEX) { spi_write_then_read(spi, priv->spi_tx_buf, 2, &val, 1); } else { mcp251x_spi_trans(spi, 3); val = priv->spi_rx_buf[2]; } return val; } static void mcp251x_read_2regs(struct spi_device *spi, u8 reg, u8 *v1, u8 *v2) { struct mcp251x_priv *priv = spi_get_drvdata(spi); priv->spi_tx_buf[0] = INSTRUCTION_READ; priv->spi_tx_buf[1] = reg; if (spi->controller->flags & SPI_CONTROLLER_HALF_DUPLEX) { u8 val[2] = { 0 }; spi_write_then_read(spi, priv->spi_tx_buf, 2, val, 2); *v1 = val[0]; *v2 = val[1]; } else { mcp251x_spi_trans(spi, 4); *v1 = priv->spi_rx_buf[2]; *v2 = priv->spi_rx_buf[3]; } } static void mcp251x_write_reg(struct spi_device *spi, u8 reg, u8 val) { struct mcp251x_priv *priv = spi_get_drvdata(spi); priv->spi_tx_buf[0] = INSTRUCTION_WRITE; priv->spi_tx_buf[1] = reg; priv->spi_tx_buf[2] = val; mcp251x_spi_write(spi, 3); } static void mcp251x_write_2regs(struct spi_device *spi, u8 reg, u8 v1, u8 v2) { struct mcp251x_priv *priv = spi_get_drvdata(spi); priv->spi_tx_buf[0] = INSTRUCTION_WRITE; priv->spi_tx_buf[1] = reg; priv->spi_tx_buf[2] = v1; priv->spi_tx_buf[3] = v2; mcp251x_spi_write(spi, 4); } static void mcp251x_write_bits(struct spi_device *spi, u8 reg, u8 mask, u8 val) { struct mcp251x_priv *priv = spi_get_drvdata(spi); priv->spi_tx_buf[0] = INSTRUCTION_BIT_MODIFY; priv->spi_tx_buf[1] = reg; priv->spi_tx_buf[2] = mask; priv->spi_tx_buf[3] = val; mcp251x_spi_write(spi, 4); } static u8 mcp251x_read_stat(struct spi_device *spi) { return mcp251x_read_reg(spi, CANSTAT) & CANCTRL_REQOP_MASK; } #define mcp251x_read_stat_poll_timeout(addr, val, cond, delay_us, timeout_us) \ readx_poll_timeout(mcp251x_read_stat, addr, val, cond, \ delay_us, timeout_us) #ifdef CONFIG_GPIOLIB enum { MCP251X_GPIO_TX0RTS = 0, /* inputs */ MCP251X_GPIO_TX1RTS, MCP251X_GPIO_TX2RTS, MCP251X_GPIO_RX0BF, /* outputs */ MCP251X_GPIO_RX1BF, }; #define MCP251X_GPIO_INPUT_MASK \ GENMASK(MCP251X_GPIO_TX2RTS, MCP251X_GPIO_TX0RTS) #define MCP251X_GPIO_OUTPUT_MASK \ GENMASK(MCP251X_GPIO_RX1BF, MCP251X_GPIO_RX0BF) static const char * const mcp251x_gpio_names[] = { [MCP251X_GPIO_TX0RTS] = "TX0RTS", /* inputs */ [MCP251X_GPIO_TX1RTS] = "TX1RTS", [MCP251X_GPIO_TX2RTS] = "TX2RTS", [MCP251X_GPIO_RX0BF] = "RX0BF", /* outputs */ [MCP251X_GPIO_RX1BF] = "RX1BF", }; static inline bool mcp251x_gpio_is_input(unsigned int offset) { return offset <= MCP251X_GPIO_TX2RTS; } static int mcp251x_gpio_request(struct gpio_chip *chip, unsigned int offset) { struct mcp251x_priv *priv = gpiochip_get_data(chip); u8 val; /* nothing to be done for inputs */ if (mcp251x_gpio_is_input(offset)) return 0; val = BFPCTRL_BFE(offset - MCP251X_GPIO_RX0BF); mutex_lock(&priv->mcp_lock); mcp251x_write_bits(priv->spi, BFPCTRL, val, val); mutex_unlock(&priv->mcp_lock); priv->reg_bfpctrl |= val; return 0; } static void mcp251x_gpio_free(struct gpio_chip *chip, unsigned int offset) { struct mcp251x_priv *priv = gpiochip_get_data(chip); u8 val; /* nothing to be done for inputs */ if (mcp251x_gpio_is_input(offset)) return; val = BFPCTRL_BFE(offset - MCP251X_GPIO_RX0BF); mutex_lock(&priv->mcp_lock); mcp251x_write_bits(priv->spi, BFPCTRL, val, 0); mutex_unlock(&priv->mcp_lock); priv->reg_bfpctrl &= ~val; } static int mcp251x_gpio_get_direction(struct gpio_chip *chip, unsigned int offset) { if (mcp251x_gpio_is_input(offset)) return GPIOF_DIR_IN; return GPIOF_DIR_OUT; } static int mcp251x_gpio_get(struct gpio_chip *chip, unsigned int offset) { struct mcp251x_priv *priv = gpiochip_get_data(chip); u8 reg, mask, val; if (mcp251x_gpio_is_input(offset)) { reg = TXRTSCTRL; mask = TXRTSCTRL_RTS(offset); } else { reg = BFPCTRL; mask = BFPCTRL_BFS(offset - MCP251X_GPIO_RX0BF); } mutex_lock(&priv->mcp_lock); val = mcp251x_read_reg(priv->spi, reg); mutex_unlock(&priv->mcp_lock); return !!(val & mask); } static int mcp251x_gpio_get_multiple(struct gpio_chip *chip, unsigned long *maskp, unsigned long *bitsp) { struct mcp251x_priv *priv = gpiochip_get_data(chip); unsigned long bits = 0; u8 val; mutex_lock(&priv->mcp_lock); if (maskp[0] & MCP251X_GPIO_INPUT_MASK) { val = mcp251x_read_reg(priv->spi, TXRTSCTRL); val = FIELD_GET(TXRTSCTRL_RTS_MASK, val); bits |= FIELD_PREP(MCP251X_GPIO_INPUT_MASK, val); } if (maskp[0] & MCP251X_GPIO_OUTPUT_MASK) { val = mcp251x_read_reg(priv->spi, BFPCTRL); val = FIELD_GET(BFPCTRL_BFS_MASK, val); bits |= FIELD_PREP(MCP251X_GPIO_OUTPUT_MASK, val); } mutex_unlock(&priv->mcp_lock); bitsp[0] = bits; return 0; } static void mcp251x_gpio_set(struct gpio_chip *chip, unsigned int offset, int value) { struct mcp251x_priv *priv = gpiochip_get_data(chip); u8 mask, val; mask = BFPCTRL_BFS(offset - MCP251X_GPIO_RX0BF); val = value ? mask : 0; mutex_lock(&priv->mcp_lock); mcp251x_write_bits(priv->spi, BFPCTRL, mask, val); mutex_unlock(&priv->mcp_lock); priv->reg_bfpctrl &= ~mask; priv->reg_bfpctrl |= val; } static void mcp251x_gpio_set_multiple(struct gpio_chip *chip, unsigned long *maskp, unsigned long *bitsp) { struct mcp251x_priv *priv = gpiochip_get_data(chip); u8 mask, val; mask = FIELD_GET(MCP251X_GPIO_OUTPUT_MASK, maskp[0]); mask = FIELD_PREP(BFPCTRL_BFS_MASK, mask); val = FIELD_GET(MCP251X_GPIO_OUTPUT_MASK, bitsp[0]); val = FIELD_PREP(BFPCTRL_BFS_MASK, val); if (!mask) return; mutex_lock(&priv->mcp_lock); mcp251x_write_bits(priv->spi, BFPCTRL, mask, val); mutex_unlock(&priv->mcp_lock); priv->reg_bfpctrl &= ~mask; priv->reg_bfpctrl |= val; } static void mcp251x_gpio_restore(struct spi_device *spi) { struct mcp251x_priv *priv = spi_get_drvdata(spi); mcp251x_write_reg(spi, BFPCTRL, priv->reg_bfpctrl); } static int mcp251x_gpio_setup(struct mcp251x_priv *priv) { struct gpio_chip *gpio = &priv->gpio; if (!device_property_present(&priv->spi->dev, "gpio-controller")) return 0; /* gpiochip handles TX[0..2]RTS and RX[0..1]BF */ gpio->label = priv->spi->modalias; gpio->parent = &priv->spi->dev; gpio->owner = THIS_MODULE; gpio->request = mcp251x_gpio_request; gpio->free = mcp251x_gpio_free; gpio->get_direction = mcp251x_gpio_get_direction; gpio->get = mcp251x_gpio_get; gpio->get_multiple = mcp251x_gpio_get_multiple; gpio->set = mcp251x_gpio_set; gpio->set_multiple = mcp251x_gpio_set_multiple; gpio->base = -1; gpio->ngpio = ARRAY_SIZE(mcp251x_gpio_names); gpio->names = mcp251x_gpio_names; gpio->can_sleep = true; return devm_gpiochip_add_data(&priv->spi->dev, gpio, priv); } #else static inline void mcp251x_gpio_restore(struct spi_device *spi) { } static inline int mcp251x_gpio_setup(struct mcp251x_priv *priv) { return 0; } #endif static void mcp251x_hw_tx_frame(struct spi_device *spi, u8 *buf, int len, int tx_buf_idx) { struct mcp251x_priv *priv = spi_get_drvdata(spi); if (mcp251x_is_2510(spi)) { int i; for (i = 1; i < TXBDAT_OFF + len; i++) mcp251x_write_reg(spi, TXBCTRL(tx_buf_idx) + i, buf[i]); } else { memcpy(priv->spi_tx_buf, buf, TXBDAT_OFF + len); mcp251x_spi_write(spi, TXBDAT_OFF + len); } } static void mcp251x_hw_tx(struct spi_device *spi, struct can_frame *frame, int tx_buf_idx) { struct mcp251x_priv *priv = spi_get_drvdata(spi); u32 sid, eid, exide, rtr; u8 buf[SPI_TRANSFER_BUF_LEN]; exide = (frame->can_id & CAN_EFF_FLAG) ? 1 : 0; /* Extended ID Enable */ if (exide) sid = (frame->can_id & CAN_EFF_MASK) >> 18; else sid = frame->can_id & CAN_SFF_MASK; /* Standard ID */ eid = frame->can_id & CAN_EFF_MASK; /* Extended ID */ rtr = (frame->can_id & CAN_RTR_FLAG) ? 1 : 0; /* Remote transmission */ buf[TXBCTRL_OFF] = INSTRUCTION_LOAD_TXB(tx_buf_idx); buf[TXBSIDH_OFF] = sid >> SIDH_SHIFT; buf[TXBSIDL_OFF] = ((sid & SIDL_SID_MASK) << SIDL_SID_SHIFT) | (exide << SIDL_EXIDE_SHIFT) | ((eid >> SIDL_EID_SHIFT) & SIDL_EID_MASK); buf[TXBEID8_OFF] = GET_BYTE(eid, 1); buf[TXBEID0_OFF] = GET_BYTE(eid, 0); buf[TXBDLC_OFF] = (rtr << DLC_RTR_SHIFT) | frame->len; memcpy(buf + TXBDAT_OFF, frame->data, frame->len); mcp251x_hw_tx_frame(spi, buf, frame->len, tx_buf_idx); /* use INSTRUCTION_RTS, to avoid "repeated frame problem" */ priv->spi_tx_buf[0] = INSTRUCTION_RTS(1 << tx_buf_idx); mcp251x_spi_write(priv->spi, 1); } static void mcp251x_hw_rx_frame(struct spi_device *spi, u8 *buf, int buf_idx) { struct mcp251x_priv *priv = spi_get_drvdata(spi); if (mcp251x_is_2510(spi)) { int i, len; for (i = 1; i < RXBDAT_OFF; i++) buf[i] = mcp251x_read_reg(spi, RXBCTRL(buf_idx) + i); len = can_cc_dlc2len(buf[RXBDLC_OFF] & RXBDLC_LEN_MASK); for (; i < (RXBDAT_OFF + len); i++) buf[i] = mcp251x_read_reg(spi, RXBCTRL(buf_idx) + i); } else { priv->spi_tx_buf[RXBCTRL_OFF] = INSTRUCTION_READ_RXB(buf_idx); if (spi->controller->flags & SPI_CONTROLLER_HALF_DUPLEX) { spi_write_then_read(spi, priv->spi_tx_buf, 1, priv->spi_rx_buf, SPI_TRANSFER_BUF_LEN); memcpy(buf + 1, priv->spi_rx_buf, SPI_TRANSFER_BUF_LEN - 1); } else { mcp251x_spi_trans(spi, SPI_TRANSFER_BUF_LEN); memcpy(buf, priv->spi_rx_buf, SPI_TRANSFER_BUF_LEN); } } } static void mcp251x_hw_rx(struct spi_device *spi, int buf_idx) { struct mcp251x_priv *priv = spi_get_drvdata(spi); struct sk_buff *skb; struct can_frame *frame; u8 buf[SPI_TRANSFER_BUF_LEN]; skb = alloc_can_skb(priv->net, &frame); if (!skb) { dev_err(&spi->dev, "cannot allocate RX skb\n"); priv->net->stats.rx_dropped++; return; } mcp251x_hw_rx_frame(spi, buf, buf_idx); if (buf[RXBSIDL_OFF] & RXBSIDL_IDE) { /* Extended ID format */ frame->can_id = CAN_EFF_FLAG; frame->can_id |= /* Extended ID part */ SET_BYTE(buf[RXBSIDL_OFF] & RXBSIDL_EID, 2) | SET_BYTE(buf[RXBEID8_OFF], 1) | SET_BYTE(buf[RXBEID0_OFF], 0) | /* Standard ID part */ (((buf[RXBSIDH_OFF] << RXBSIDH_SHIFT) | (buf[RXBSIDL_OFF] >> RXBSIDL_SHIFT)) << 18); /* Remote transmission request */ if (buf[RXBDLC_OFF] & RXBDLC_RTR) frame->can_id |= CAN_RTR_FLAG; } else { /* Standard ID format */ frame->can_id = (buf[RXBSIDH_OFF] << RXBSIDH_SHIFT) | (buf[RXBSIDL_OFF] >> RXBSIDL_SHIFT); if (buf[RXBSIDL_OFF] & RXBSIDL_SRR) frame->can_id |= CAN_RTR_FLAG; } /* Data length */ frame->len = can_cc_dlc2len(buf[RXBDLC_OFF] & RXBDLC_LEN_MASK); if (!(frame->can_id & CAN_RTR_FLAG)) { memcpy(frame->data, buf + RXBDAT_OFF, frame->len); priv->net->stats.rx_bytes += frame->len; } priv->net->stats.rx_packets++; netif_rx(skb); } static void mcp251x_hw_sleep(struct spi_device *spi) { mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_SLEEP); } /* May only be called when device is sleeping! */ static int mcp251x_hw_wake(struct spi_device *spi) { u8 value; int ret; /* Force wakeup interrupt to wake device, but don't execute IST */ disable_irq(spi->irq); mcp251x_write_2regs(spi, CANINTE, CANINTE_WAKIE, CANINTF_WAKIF); /* Wait for oscillator startup timer after wake up */ mdelay(MCP251X_OST_DELAY_MS); /* Put device into config mode */ mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_CONF); /* Wait for the device to enter config mode */ ret = mcp251x_read_stat_poll_timeout(spi, value, value == CANCTRL_REQOP_CONF, MCP251X_OST_DELAY_MS * 1000, USEC_PER_SEC); if (ret) { dev_err(&spi->dev, "MCP251x didn't enter in config mode\n"); return ret; } /* Disable and clear pending interrupts */ mcp251x_write_2regs(spi, CANINTE, 0x00, 0x00); enable_irq(spi->irq); return 0; } static netdev_tx_t mcp251x_hard_start_xmit(struct sk_buff *skb, struct net_device *net) { struct mcp251x_priv *priv = netdev_priv(net); struct spi_device *spi = priv->spi; if (priv->tx_skb || priv->tx_busy) { dev_warn(&spi->dev, "hard_xmit called while tx busy\n"); return NETDEV_TX_BUSY; } if (can_dev_dropped_skb(net, skb)) return NETDEV_TX_OK; netif_stop_queue(net); priv->tx_skb = skb; queue_work(priv->wq, &priv->tx_work); return NETDEV_TX_OK; } static int mcp251x_do_set_mode(struct net_device *net, enum can_mode mode) { struct mcp251x_priv *priv = netdev_priv(net); switch (mode) { case CAN_MODE_START: mcp251x_clean(net); /* We have to delay work since SPI I/O may sleep */ priv->can.state = CAN_STATE_ERROR_ACTIVE; priv->restart_tx = 1; if (priv->can.restart_ms == 0) priv->after_suspend = AFTER_SUSPEND_RESTART; queue_work(priv->wq, &priv->restart_work); break; default: return -EOPNOTSUPP; } return 0; } static int mcp251x_set_normal_mode(struct spi_device *spi) { struct mcp251x_priv *priv = spi_get_drvdata(spi); u8 value; int ret; /* Enable interrupts */ mcp251x_write_reg(spi, CANINTE, CANINTE_ERRIE | CANINTE_TX2IE | CANINTE_TX1IE | CANINTE_TX0IE | CANINTE_RX1IE | CANINTE_RX0IE); if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) { /* Put device into loopback mode */ mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_LOOPBACK); } else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) { /* Put device into listen-only mode */ mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_LISTEN_ONLY); } else { /* Put device into normal mode */ mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_NORMAL); /* Wait for the device to enter normal mode */ ret = mcp251x_read_stat_poll_timeout(spi, value, value == 0, MCP251X_OST_DELAY_MS * 1000, USEC_PER_SEC); if (ret) { dev_err(&spi->dev, "MCP251x didn't enter in normal mode\n"); return ret; } } priv->can.state = CAN_STATE_ERROR_ACTIVE; return 0; } static int mcp251x_do_set_bittiming(struct net_device *net) { struct mcp251x_priv *priv = netdev_priv(net); struct can_bittiming *bt = &priv->can.bittiming; struct spi_device *spi = priv->spi; mcp251x_write_reg(spi, CNF1, ((bt->sjw - 1) << CNF1_SJW_SHIFT) | (bt->brp - 1)); mcp251x_write_reg(spi, CNF2, CNF2_BTLMODE | (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES ? CNF2_SAM : 0) | ((bt->phase_seg1 - 1) << CNF2_PS1_SHIFT) | (bt->prop_seg - 1)); mcp251x_write_bits(spi, CNF3, CNF3_PHSEG2_MASK, (bt->phase_seg2 - 1)); dev_dbg(&spi->dev, "CNF: 0x%02x 0x%02x 0x%02x\n", mcp251x_read_reg(spi, CNF1), mcp251x_read_reg(spi, CNF2), mcp251x_read_reg(spi, CNF3)); return 0; } static int mcp251x_setup(struct net_device *net, struct spi_device *spi) { mcp251x_do_set_bittiming(net); mcp251x_write_reg(spi, RXBCTRL(0), RXBCTRL_BUKT | RXBCTRL_RXM0 | RXBCTRL_RXM1); mcp251x_write_reg(spi, RXBCTRL(1), RXBCTRL_RXM0 | RXBCTRL_RXM1); return 0; } static int mcp251x_hw_reset(struct spi_device *spi) { struct mcp251x_priv *priv = spi_get_drvdata(spi); u8 value; int ret; /* Wait for oscillator startup timer after power up */ mdelay(MCP251X_OST_DELAY_MS); priv->spi_tx_buf[0] = INSTRUCTION_RESET; ret = mcp251x_spi_write(spi, 1); if (ret) return ret; /* Wait for oscillator startup timer after reset */ mdelay(MCP251X_OST_DELAY_MS); /* Wait for reset to finish */ ret = mcp251x_read_stat_poll_timeout(spi, value, value == CANCTRL_REQOP_CONF, MCP251X_OST_DELAY_MS * 1000, USEC_PER_SEC); if (ret) dev_err(&spi->dev, "MCP251x didn't enter in conf mode after reset\n"); return ret; } static int mcp251x_hw_probe(struct spi_device *spi) { u8 ctrl; int ret; ret = mcp251x_hw_reset(spi); if (ret) return ret; ctrl = mcp251x_read_reg(spi, CANCTRL); dev_dbg(&spi->dev, "CANCTRL 0x%02x\n", ctrl); /* Check for power up default value */ if ((ctrl & 0x17) != 0x07) return -ENODEV; return 0; } static int mcp251x_power_enable(struct regulator *reg, int enable) { if (IS_ERR_OR_NULL(reg)) return 0; if (enable) return regulator_enable(reg); else return regulator_disable(reg); } static int mcp251x_stop(struct net_device *net) { struct mcp251x_priv *priv = netdev_priv(net); struct spi_device *spi = priv->spi; close_candev(net); priv->force_quit = 1; free_irq(spi->irq, priv); mutex_lock(&priv->mcp_lock); /* Disable and clear pending interrupts */ mcp251x_write_2regs(spi, CANINTE, 0x00, 0x00); mcp251x_write_reg(spi, TXBCTRL(0), 0); mcp251x_clean(net); mcp251x_hw_sleep(spi); mcp251x_power_enable(priv->transceiver, 0); priv->can.state = CAN_STATE_STOPPED; mutex_unlock(&priv->mcp_lock); return 0; } static void mcp251x_error_skb(struct net_device *net, int can_id, int data1) { struct sk_buff *skb; struct can_frame *frame; skb = alloc_can_err_skb(net, &frame); if (skb) { frame->can_id |= can_id; frame->data[1] = data1; netif_rx(skb); } else { netdev_err(net, "cannot allocate error skb\n"); } } static void mcp251x_tx_work_handler(struct work_struct *ws) { struct mcp251x_priv *priv = container_of(ws, struct mcp251x_priv, tx_work); struct spi_device *spi = priv->spi; struct net_device *net = priv->net; struct can_frame *frame; mutex_lock(&priv->mcp_lock); if (priv->tx_skb) { if (priv->can.state == CAN_STATE_BUS_OFF) { mcp251x_clean(net); } else { frame = (struct can_frame *)priv->tx_skb->data; if (frame->len > CAN_FRAME_MAX_DATA_LEN) frame->len = CAN_FRAME_MAX_DATA_LEN; mcp251x_hw_tx(spi, frame, 0); priv->tx_busy = true; can_put_echo_skb(priv->tx_skb, net, 0, 0); priv->tx_skb = NULL; } } mutex_unlock(&priv->mcp_lock); } static void mcp251x_restart_work_handler(struct work_struct *ws) { struct mcp251x_priv *priv = container_of(ws, struct mcp251x_priv, restart_work); struct spi_device *spi = priv->spi; struct net_device *net = priv->net; mutex_lock(&priv->mcp_lock); if (priv->after_suspend) { if (priv->after_suspend & AFTER_SUSPEND_POWER) { mcp251x_hw_reset(spi); mcp251x_setup(net, spi); mcp251x_gpio_restore(spi); } else { mcp251x_hw_wake(spi); } priv->force_quit = 0; if (priv->after_suspend & AFTER_SUSPEND_RESTART) { mcp251x_set_normal_mode(spi); } else if (priv->after_suspend & AFTER_SUSPEND_UP) { netif_device_attach(net); mcp251x_clean(net); mcp251x_set_normal_mode(spi); netif_wake_queue(net); } else { mcp251x_hw_sleep(spi); } priv->after_suspend = 0; } if (priv->restart_tx) { priv->restart_tx = 0; mcp251x_write_reg(spi, TXBCTRL(0), 0); mcp251x_clean(net); netif_wake_queue(net); mcp251x_error_skb(net, CAN_ERR_RESTARTED, 0); } mutex_unlock(&priv->mcp_lock); } static irqreturn_t mcp251x_can_ist(int irq, void *dev_id) { struct mcp251x_priv *priv = dev_id; struct spi_device *spi = priv->spi; struct net_device *net = priv->net; mutex_lock(&priv->mcp_lock); while (!priv->force_quit) { enum can_state new_state; u8 intf, eflag; u8 clear_intf = 0; int can_id = 0, data1 = 0; mcp251x_read_2regs(spi, CANINTF, &intf, &eflag); /* receive buffer 0 */ if (intf & CANINTF_RX0IF) { mcp251x_hw_rx(spi, 0); /* Free one buffer ASAP * (The MCP2515/25625 does this automatically.) */ if (mcp251x_is_2510(spi)) mcp251x_write_bits(spi, CANINTF, CANINTF_RX0IF, 0x00); /* check if buffer 1 is already known to be full, no need to re-read */ if (!(intf & CANINTF_RX1IF)) { u8 intf1, eflag1; /* intf needs to be read again to avoid a race condition */ mcp251x_read_2regs(spi, CANINTF, &intf1, &eflag1); /* combine flags from both operations for error handling */ intf |= intf1; eflag |= eflag1; } } /* receive buffer 1 */ if (intf & CANINTF_RX1IF) { mcp251x_hw_rx(spi, 1); /* The MCP2515/25625 does this automatically. */ if (mcp251x_is_2510(spi)) clear_intf |= CANINTF_RX1IF; } /* mask out flags we don't care about */ intf &= CANINTF_RX | CANINTF_TX | CANINTF_ERR; /* any error or tx interrupt we need to clear? */ if (intf & (CANINTF_ERR | CANINTF_TX)) clear_intf |= intf & (CANINTF_ERR | CANINTF_TX); if (clear_intf) mcp251x_write_bits(spi, CANINTF, clear_intf, 0x00); if (eflag & (EFLG_RX0OVR | EFLG_RX1OVR)) mcp251x_write_bits(spi, EFLG, eflag, 0x00); /* Update can state */ if (eflag & EFLG_TXBO) { new_state = CAN_STATE_BUS_OFF; can_id |= CAN_ERR_BUSOFF; } else if (eflag & EFLG_TXEP) { new_state = CAN_STATE_ERROR_PASSIVE; can_id |= CAN_ERR_CRTL; data1 |= CAN_ERR_CRTL_TX_PASSIVE; } else if (eflag & EFLG_RXEP) { new_state = CAN_STATE_ERROR_PASSIVE; can_id |= CAN_ERR_CRTL; data1 |= CAN_ERR_CRTL_RX_PASSIVE; } else if (eflag & EFLG_TXWAR) { new_state = CAN_STATE_ERROR_WARNING; can_id |= CAN_ERR_CRTL; data1 |= CAN_ERR_CRTL_TX_WARNING; } else if (eflag & EFLG_RXWAR) { new_state = CAN_STATE_ERROR_WARNING; can_id |= CAN_ERR_CRTL; data1 |= CAN_ERR_CRTL_RX_WARNING; } else { new_state = CAN_STATE_ERROR_ACTIVE; } /* Update can state statistics */ switch (priv->can.state) { case CAN_STATE_ERROR_ACTIVE: if (new_state >= CAN_STATE_ERROR_WARNING && new_state <= CAN_STATE_BUS_OFF) priv->can.can_stats.error_warning++; fallthrough; case CAN_STATE_ERROR_WARNING: if (new_state >= CAN_STATE_ERROR_PASSIVE && new_state <= CAN_STATE_BUS_OFF) priv->can.can_stats.error_passive++; break; default: break; } priv->can.state = new_state; if (intf & CANINTF_ERRIF) { /* Handle overflow counters */ if (eflag & (EFLG_RX0OVR | EFLG_RX1OVR)) { if (eflag & EFLG_RX0OVR) { net->stats.rx_over_errors++; net->stats.rx_errors++; } if (eflag & EFLG_RX1OVR) { net->stats.rx_over_errors++; net->stats.rx_errors++; } can_id |= CAN_ERR_CRTL; data1 |= CAN_ERR_CRTL_RX_OVERFLOW; } mcp251x_error_skb(net, can_id, data1); } if (priv->can.state == CAN_STATE_BUS_OFF) { if (priv->can.restart_ms == 0) { priv->force_quit = 1; priv->can.can_stats.bus_off++; can_bus_off(net); mcp251x_hw_sleep(spi); break; } } if (intf == 0) break; if (intf & CANINTF_TX) { if (priv->tx_busy) { net->stats.tx_packets++; net->stats.tx_bytes += can_get_echo_skb(net, 0, NULL); priv->tx_busy = false; } netif_wake_queue(net); } } mutex_unlock(&priv->mcp_lock); return IRQ_HANDLED; } static int mcp251x_open(struct net_device *net) { struct mcp251x_priv *priv = netdev_priv(net); struct spi_device *spi = priv->spi; unsigned long flags = 0; int ret; ret = open_candev(net); if (ret) { dev_err(&spi->dev, "unable to set initial baudrate!\n"); return ret; } mutex_lock(&priv->mcp_lock); mcp251x_power_enable(priv->transceiver, 1); priv->force_quit = 0; priv->tx_skb = NULL; priv->tx_busy = false; if (!dev_fwnode(&spi->dev)) flags = IRQF_TRIGGER_FALLING; ret = request_threaded_irq(spi->irq, NULL, mcp251x_can_ist, flags | IRQF_ONESHOT, dev_name(&spi->dev), priv); if (ret) { dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq); goto out_close; } ret = mcp251x_hw_wake(spi); if (ret) goto out_free_irq; ret = mcp251x_setup(net, spi); if (ret) goto out_free_irq; ret = mcp251x_set_normal_mode(spi); if (ret) goto out_free_irq; netif_wake_queue(net); mutex_unlock(&priv->mcp_lock); return 0; out_free_irq: free_irq(spi->irq, priv); mcp251x_hw_sleep(spi); out_close: mcp251x_power_enable(priv->transceiver, 0); close_candev(net); mutex_unlock(&priv->mcp_lock); return ret; } static const struct net_device_ops mcp251x_netdev_ops = { .ndo_open = mcp251x_open, .ndo_stop = mcp251x_stop, .ndo_start_xmit = mcp251x_hard_start_xmit, .ndo_change_mtu = can_change_mtu, }; static const struct ethtool_ops mcp251x_ethtool_ops = { .get_ts_info = ethtool_op_get_ts_info, }; static const struct of_device_id mcp251x_of_match[] = { { .compatible = "microchip,mcp2510", .data = (void *)CAN_MCP251X_MCP2510, }, { .compatible = "microchip,mcp2515", .data = (void *)CAN_MCP251X_MCP2515, }, { .compatible = "microchip,mcp25625", .data = (void *)CAN_MCP251X_MCP25625, }, { } }; MODULE_DEVICE_TABLE(of, mcp251x_of_match); static const struct spi_device_id mcp251x_id_table[] = { { .name = "mcp2510", .driver_data = (kernel_ulong_t)CAN_MCP251X_MCP2510, }, { .name = "mcp2515", .driver_data = (kernel_ulong_t)CAN_MCP251X_MCP2515, }, { .name = "mcp25625", .driver_data = (kernel_ulong_t)CAN_MCP251X_MCP25625, }, { } }; MODULE_DEVICE_TABLE(spi, mcp251x_id_table); static int mcp251x_can_probe(struct spi_device *spi) { const void *match = device_get_match_data(&spi->dev); struct net_device *net; struct mcp251x_priv *priv; struct clk *clk; u32 freq; int ret; clk = devm_clk_get_optional(&spi->dev, NULL); if (IS_ERR(clk)) return PTR_ERR(clk); freq = clk_get_rate(clk); if (freq == 0) device_property_read_u32(&spi->dev, "clock-frequency", &freq); /* Sanity check */ if (freq < 1000000 || freq > 25000000) return -ERANGE; /* Allocate can/net device */ net = alloc_candev(sizeof(struct mcp251x_priv), TX_ECHO_SKB_MAX); if (!net) return -ENOMEM; ret = clk_prepare_enable(clk); if (ret) goto out_free; net->netdev_ops = &mcp251x_netdev_ops; net->ethtool_ops = &mcp251x_ethtool_ops; net->flags |= IFF_ECHO; priv = netdev_priv(net); priv->can.bittiming_const = &mcp251x_bittiming_const; priv->can.do_set_mode = mcp251x_do_set_mode; priv->can.clock.freq = freq / 2; priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY; if (match) priv->model = (enum mcp251x_model)(uintptr_t)match; else priv->model = spi_get_device_id(spi)->driver_data; priv->net = net; priv->clk = clk; spi_set_drvdata(spi, priv); /* Configure the SPI bus */ spi->bits_per_word = 8; if (mcp251x_is_2510(spi)) spi->max_speed_hz = spi->max_speed_hz ? : 5 * 1000 * 1000; else spi->max_speed_hz = spi->max_speed_hz ? : 10 * 1000 * 1000; ret = spi_setup(spi); if (ret) goto out_clk; priv->power = devm_regulator_get_optional(&spi->dev, "vdd"); priv->transceiver = devm_regulator_get_optional(&spi->dev, "xceiver"); if ((PTR_ERR(priv->power) == -EPROBE_DEFER) || (PTR_ERR(priv->transceiver) == -EPROBE_DEFER)) { ret = -EPROBE_DEFER; goto out_clk; } ret = mcp251x_power_enable(priv->power, 1); if (ret) goto out_clk; priv->wq = alloc_workqueue("mcp251x_wq", WQ_FREEZABLE | WQ_MEM_RECLAIM, 0); if (!priv->wq) { ret = -ENOMEM; goto out_clk; } INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler); INIT_WORK(&priv->restart_work, mcp251x_restart_work_handler); priv->spi = spi; mutex_init(&priv->mcp_lock); priv->spi_tx_buf = devm_kzalloc(&spi->dev, SPI_TRANSFER_BUF_LEN, GFP_KERNEL); if (!priv->spi_tx_buf) { ret = -ENOMEM; goto error_probe; } priv->spi_rx_buf = devm_kzalloc(&spi->dev, SPI_TRANSFER_BUF_LEN, GFP_KERNEL); if (!priv->spi_rx_buf) { ret = -ENOMEM; goto error_probe; } SET_NETDEV_DEV(net, &spi->dev); /* Here is OK to not lock the MCP, no one knows about it yet */ ret = mcp251x_hw_probe(spi); if (ret) { if (ret == -ENODEV) dev_err(&spi->dev, "Cannot initialize MCP%x. Wrong wiring?\n", priv->model); goto error_probe; } mcp251x_hw_sleep(spi); ret = register_candev(net); if (ret) goto error_probe; ret = mcp251x_gpio_setup(priv); if (ret) goto out_unregister_candev; netdev_info(net, "MCP%x successfully initialized.\n", priv->model); return 0; out_unregister_candev: unregister_candev(net); error_probe: destroy_workqueue(priv->wq); priv->wq = NULL; mcp251x_power_enable(priv->power, 0); out_clk: clk_disable_unprepare(clk); out_free: free_candev(net); dev_err(&spi->dev, "Probe failed, err=%d\n", -ret); return ret; } static void mcp251x_can_remove(struct spi_device *spi) { struct mcp251x_priv *priv = spi_get_drvdata(spi); struct net_device *net = priv->net; unregister_candev(net); mcp251x_power_enable(priv->power, 0); destroy_workqueue(priv->wq); priv->wq = NULL; clk_disable_unprepare(priv->clk); free_candev(net); } static int __maybe_unused mcp251x_can_suspend(struct device *dev) { struct spi_device *spi = to_spi_device(dev); struct mcp251x_priv *priv = spi_get_drvdata(spi); struct net_device *net = priv->net; priv->force_quit = 1; disable_irq(spi->irq); /* Note: at this point neither IST nor workqueues are running. * open/stop cannot be called anyway so locking is not needed */ if (netif_running(net)) { netif_device_detach(net); mcp251x_hw_sleep(spi); mcp251x_power_enable(priv->transceiver, 0); priv->after_suspend = AFTER_SUSPEND_UP; } else { priv->after_suspend = AFTER_SUSPEND_DOWN; } mcp251x_power_enable(priv->power, 0); priv->after_suspend |= AFTER_SUSPEND_POWER; return 0; } static int __maybe_unused mcp251x_can_resume(struct device *dev) { struct spi_device *spi = to_spi_device(dev); struct mcp251x_priv *priv = spi_get_drvdata(spi); if (priv->after_suspend & AFTER_SUSPEND_POWER) mcp251x_power_enable(priv->power, 1); if (priv->after_suspend & AFTER_SUSPEND_UP) mcp251x_power_enable(priv->transceiver, 1); if (priv->after_suspend & (AFTER_SUSPEND_POWER | AFTER_SUSPEND_UP)) queue_work(priv->wq, &priv->restart_work); else priv->after_suspend = 0; priv->force_quit = 0; enable_irq(spi->irq); return 0; } static SIMPLE_DEV_PM_OPS(mcp251x_can_pm_ops, mcp251x_can_suspend, mcp251x_can_resume); static struct spi_driver mcp251x_can_driver = { .driver = { .name = DEVICE_NAME, .of_match_table = mcp251x_of_match, .pm = &mcp251x_can_pm_ops, }, .id_table = mcp251x_id_table, .probe = mcp251x_can_probe, .remove = mcp251x_can_remove, }; module_spi_driver(mcp251x_can_driver); MODULE_AUTHOR("Chris Elston <[email protected]>, " "Christian Pellegrin <[email protected]>"); MODULE_DESCRIPTION("Microchip 251x/25625 CAN driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/net/can/spi/mcp251x.c
// SPDX-License-Identifier: GPL-2.0 // // mcp251xfd - Microchip MCP251xFD Family CAN controller driver // // Copyright (c) 2020 Pengutronix, // Marc Kleine-Budde <[email protected]> // // Based on: // // CAN bus driver for Microchip 25XXFD CAN Controller with SPI Interface // // Copyright (c) 2019 Martin Sperl <[email protected]> // #include "mcp251xfd.h" /* The standard crc16 in linux/crc16.h is unfortunately not computing * the correct results (left shift vs. right shift). So here an * implementation with a table generated with the help of: * * http://lkml.iu.edu/hypermail/linux/kernel/0508.1/1085.html */ static const u16 mcp251xfd_crc16_table[] = { 0x0000, 0x8005, 0x800f, 0x000a, 0x801b, 0x001e, 0x0014, 0x8011, 0x8033, 0x0036, 0x003c, 0x8039, 0x0028, 0x802d, 0x8027, 0x0022, 0x8063, 0x0066, 0x006c, 0x8069, 0x0078, 0x807d, 0x8077, 0x0072, 0x0050, 0x8055, 0x805f, 0x005a, 0x804b, 0x004e, 0x0044, 0x8041, 0x80c3, 0x00c6, 0x00cc, 0x80c9, 0x00d8, 0x80dd, 0x80d7, 0x00d2, 0x00f0, 0x80f5, 0x80ff, 0x00fa, 0x80eb, 0x00ee, 0x00e4, 0x80e1, 0x00a0, 0x80a5, 0x80af, 0x00aa, 0x80bb, 0x00be, 0x00b4, 0x80b1, 0x8093, 0x0096, 0x009c, 0x8099, 0x0088, 0x808d, 0x8087, 0x0082, 0x8183, 0x0186, 0x018c, 0x8189, 0x0198, 0x819d, 0x8197, 0x0192, 0x01b0, 0x81b5, 0x81bf, 0x01ba, 0x81ab, 0x01ae, 0x01a4, 0x81a1, 0x01e0, 0x81e5, 0x81ef, 0x01ea, 0x81fb, 0x01fe, 0x01f4, 0x81f1, 0x81d3, 0x01d6, 0x01dc, 0x81d9, 0x01c8, 0x81cd, 0x81c7, 0x01c2, 0x0140, 0x8145, 0x814f, 0x014a, 0x815b, 0x015e, 0x0154, 0x8151, 0x8173, 0x0176, 0x017c, 0x8179, 0x0168, 0x816d, 0x8167, 0x0162, 0x8123, 0x0126, 0x012c, 0x8129, 0x0138, 0x813d, 0x8137, 0x0132, 0x0110, 0x8115, 0x811f, 0x011a, 0x810b, 0x010e, 0x0104, 0x8101, 0x8303, 0x0306, 0x030c, 0x8309, 0x0318, 0x831d, 0x8317, 0x0312, 0x0330, 0x8335, 0x833f, 0x033a, 0x832b, 0x032e, 0x0324, 0x8321, 0x0360, 0x8365, 0x836f, 0x036a, 0x837b, 0x037e, 0x0374, 0x8371, 0x8353, 0x0356, 0x035c, 0x8359, 0x0348, 0x834d, 0x8347, 0x0342, 0x03c0, 0x83c5, 0x83cf, 0x03ca, 0x83db, 0x03de, 0x03d4, 0x83d1, 0x83f3, 0x03f6, 0x03fc, 0x83f9, 0x03e8, 0x83ed, 0x83e7, 0x03e2, 0x83a3, 0x03a6, 0x03ac, 0x83a9, 0x03b8, 0x83bd, 0x83b7, 0x03b2, 0x0390, 0x8395, 0x839f, 0x039a, 0x838b, 0x038e, 0x0384, 0x8381, 0x0280, 0x8285, 0x828f, 0x028a, 0x829b, 0x029e, 0x0294, 0x8291, 0x82b3, 0x02b6, 0x02bc, 0x82b9, 0x02a8, 0x82ad, 0x82a7, 0x02a2, 0x82e3, 0x02e6, 0x02ec, 0x82e9, 0x02f8, 0x82fd, 0x82f7, 0x02f2, 0x02d0, 0x82d5, 0x82df, 0x02da, 0x82cb, 0x02ce, 0x02c4, 0x82c1, 0x8243, 0x0246, 0x024c, 0x8249, 0x0258, 0x825d, 0x8257, 0x0252, 0x0270, 0x8275, 0x827f, 0x027a, 0x826b, 0x026e, 0x0264, 0x8261, 0x0220, 0x8225, 0x822f, 0x022a, 0x823b, 0x023e, 0x0234, 0x8231, 0x8213, 0x0216, 0x021c, 0x8219, 0x0208, 0x820d, 0x8207, 0x0202 }; static inline u16 mcp251xfd_crc16_byte(u16 crc, const u8 data) { u8 index = (crc >> 8) ^ data; return (crc << 8) ^ mcp251xfd_crc16_table[index]; } static u16 mcp251xfd_crc16(u16 crc, u8 const *buffer, size_t len) { while (len--) crc = mcp251xfd_crc16_byte(crc, *buffer++); return crc; } u16 mcp251xfd_crc16_compute(const void *data, size_t data_size) { u16 crc = 0xffff; return mcp251xfd_crc16(crc, data, data_size); } u16 mcp251xfd_crc16_compute2(const void *cmd, size_t cmd_size, const void *data, size_t data_size) { u16 crc; crc = mcp251xfd_crc16_compute(cmd, cmd_size); crc = mcp251xfd_crc16(crc, data, data_size); return crc; }
linux-master
drivers/net/can/spi/mcp251xfd/mcp251xfd-crc16.c
// SPDX-License-Identifier: GPL-2.0 // // mcp251xfd - Microchip MCP251xFD Family CAN controller driver // // Copyright (c) 2021, 2022 Pengutronix, // Marc Kleine-Budde <[email protected]> // #include "mcp251xfd-ram.h" static inline u8 can_ram_clamp(const struct can_ram_config *config, const struct can_ram_obj_config *obj, u8 val) { u8 max; max = min_t(u8, obj->max, obj->fifo_num * config->fifo_depth); return clamp(val, obj->min, max); } static u8 can_ram_rounddown_pow_of_two(const struct can_ram_config *config, const struct can_ram_obj_config *obj, const u8 coalesce, u8 val) { u8 fifo_num = obj->fifo_num; u8 ret = 0, i; val = can_ram_clamp(config, obj, val); if (coalesce) { /* Use 1st FIFO for coalescing, if requested. * * Either use complete FIFO (and FIFO Full IRQ) for * coalescing or only half of FIFO (FIFO Half Full * IRQ) and use remaining half for normal objects. */ ret = min_t(u8, coalesce * 2, config->fifo_depth); val -= ret; fifo_num--; } for (i = 0; i < fifo_num && val; i++) { u8 n; n = min_t(u8, rounddown_pow_of_two(val), config->fifo_depth); /* skip small FIFOs */ if (n < obj->fifo_depth_min) return ret; ret += n; val -= n; } return ret; } void can_ram_get_layout(struct can_ram_layout *layout, const struct can_ram_config *config, const struct ethtool_ringparam *ring, const struct ethtool_coalesce *ec, const bool fd_mode) { u8 num_rx, num_tx; u16 ram_free; /* default CAN */ num_tx = config->tx.def[fd_mode]; num_tx = can_ram_rounddown_pow_of_two(config, &config->tx, 0, num_tx); ram_free = config->size; ram_free -= config->tx.size[fd_mode] * num_tx; num_rx = ram_free / config->rx.size[fd_mode]; layout->default_rx = can_ram_rounddown_pow_of_two(config, &config->rx, 0, num_rx); layout->default_tx = num_tx; /* MAX CAN */ ram_free = config->size; ram_free -= config->tx.size[fd_mode] * config->tx.min; num_rx = ram_free / config->rx.size[fd_mode]; ram_free = config->size; ram_free -= config->rx.size[fd_mode] * config->rx.min; num_tx = ram_free / config->tx.size[fd_mode]; layout->max_rx = can_ram_rounddown_pow_of_two(config, &config->rx, 0, num_rx); layout->max_tx = can_ram_rounddown_pow_of_two(config, &config->tx, 0, num_tx); /* cur CAN */ if (ring) { u8 num_rx_coalesce = 0, num_tx_coalesce = 0; num_rx = can_ram_rounddown_pow_of_two(config, &config->rx, 0, ring->rx_pending); /* The ethtool doc says: * To disable coalescing, set usecs = 0 and max_frames = 1. */ if (ec && !(ec->rx_coalesce_usecs_irq == 0 && ec->rx_max_coalesced_frames_irq == 1)) { u8 max; /* use only max half of available objects for coalescing */ max = min_t(u8, num_rx / 2, config->fifo_depth); num_rx_coalesce = clamp(ec->rx_max_coalesced_frames_irq, (u32)config->rx.fifo_depth_coalesce_min, (u32)max); num_rx_coalesce = rounddown_pow_of_two(num_rx_coalesce); num_rx = can_ram_rounddown_pow_of_two(config, &config->rx, num_rx_coalesce, num_rx); } ram_free = config->size - config->rx.size[fd_mode] * num_rx; num_tx = ram_free / config->tx.size[fd_mode]; num_tx = min_t(u8, ring->tx_pending, num_tx); num_tx = can_ram_rounddown_pow_of_two(config, &config->tx, 0, num_tx); /* The ethtool doc says: * To disable coalescing, set usecs = 0 and max_frames = 1. */ if (ec && !(ec->tx_coalesce_usecs_irq == 0 && ec->tx_max_coalesced_frames_irq == 1)) { u8 max; /* use only max half of available objects for coalescing */ max = min_t(u8, num_tx / 2, config->fifo_depth); num_tx_coalesce = clamp(ec->tx_max_coalesced_frames_irq, (u32)config->tx.fifo_depth_coalesce_min, (u32)max); num_tx_coalesce = rounddown_pow_of_two(num_tx_coalesce); num_tx = can_ram_rounddown_pow_of_two(config, &config->tx, num_tx_coalesce, num_tx); } layout->cur_rx = num_rx; layout->cur_tx = num_tx; layout->rx_coalesce = num_rx_coalesce; layout->tx_coalesce = num_tx_coalesce; } else { layout->cur_rx = layout->default_rx; layout->cur_tx = layout->default_tx; layout->rx_coalesce = 0; layout->tx_coalesce = 0; } }
linux-master
drivers/net/can/spi/mcp251xfd/mcp251xfd-ram.c
// SPDX-License-Identifier: GPL-2.0 // // mcp251xfd - Microchip MCP251xFD Family CAN controller driver // // Copyright (c) 2019, 2020, 2021 Pengutronix, // Marc Kleine-Budde <[email protected]> // // Based on: // // CAN bus driver for Microchip 25XXFD CAN Controller with SPI Interface // // Copyright (c) 2019 Martin Sperl <[email protected]> // #include <asm/unaligned.h> #include <linux/bitfield.h> #include "mcp251xfd.h" static inline struct mcp251xfd_tx_obj *mcp251xfd_get_tx_obj_next(struct mcp251xfd_tx_ring *tx_ring) { u8 tx_head; tx_head = mcp251xfd_get_tx_head(tx_ring); return &tx_ring->obj[tx_head]; } static void mcp251xfd_tx_obj_from_skb(const struct mcp251xfd_priv *priv, struct mcp251xfd_tx_obj *tx_obj, const struct sk_buff *skb, unsigned int seq) { const struct canfd_frame *cfd = (struct canfd_frame *)skb->data; struct mcp251xfd_hw_tx_obj_raw *hw_tx_obj; union mcp251xfd_tx_obj_load_buf *load_buf; u8 dlc; u32 id, flags; int len_sanitized = 0, len; if (cfd->can_id & CAN_EFF_FLAG) { u32 sid, eid; sid = FIELD_GET(MCP251XFD_REG_FRAME_EFF_SID_MASK, cfd->can_id); eid = FIELD_GET(MCP251XFD_REG_FRAME_EFF_EID_MASK, cfd->can_id); id = FIELD_PREP(MCP251XFD_OBJ_ID_EID_MASK, eid) | FIELD_PREP(MCP251XFD_OBJ_ID_SID_MASK, sid); flags = MCP251XFD_OBJ_FLAGS_IDE; } else { id = FIELD_PREP(MCP251XFD_OBJ_ID_SID_MASK, cfd->can_id); flags = 0; } /* Use the MCP2518FD mask even on the MCP2517FD. It doesn't * harm, only the lower 7 bits will be transferred into the * TEF object. */ flags |= FIELD_PREP(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK, seq); if (cfd->can_id & CAN_RTR_FLAG) flags |= MCP251XFD_OBJ_FLAGS_RTR; else len_sanitized = canfd_sanitize_len(cfd->len); /* CANFD */ if (can_is_canfd_skb(skb)) { if (cfd->flags & CANFD_ESI) flags |= MCP251XFD_OBJ_FLAGS_ESI; flags |= MCP251XFD_OBJ_FLAGS_FDF; if (cfd->flags & CANFD_BRS) flags |= MCP251XFD_OBJ_FLAGS_BRS; dlc = can_fd_len2dlc(cfd->len); } else { dlc = can_get_cc_dlc((struct can_frame *)cfd, priv->can.ctrlmode); } flags |= FIELD_PREP(MCP251XFD_OBJ_FLAGS_DLC_MASK, dlc); load_buf = &tx_obj->buf; if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX) hw_tx_obj = &load_buf->crc.hw_tx_obj; else hw_tx_obj = &load_buf->nocrc.hw_tx_obj; put_unaligned_le32(id, &hw_tx_obj->id); put_unaligned_le32(flags, &hw_tx_obj->flags); /* Copy data */ memcpy(hw_tx_obj->data, cfd->data, cfd->len); /* Clear unused data at end of CAN frame */ if (MCP251XFD_SANITIZE_CAN && len_sanitized) { int pad_len; pad_len = len_sanitized - cfd->len; if (pad_len) memset(hw_tx_obj->data + cfd->len, 0x0, pad_len); } /* Number of bytes to be written into the RAM of the controller */ len = sizeof(hw_tx_obj->id) + sizeof(hw_tx_obj->flags); if (MCP251XFD_SANITIZE_CAN) len += round_up(len_sanitized, sizeof(u32)); else len += round_up(cfd->len, sizeof(u32)); if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX) { u16 crc; mcp251xfd_spi_cmd_crc_set_len_in_ram(&load_buf->crc.cmd, len); /* CRC */ len += sizeof(load_buf->crc.cmd); crc = mcp251xfd_crc16_compute(&load_buf->crc, len); put_unaligned_be16(crc, (void *)load_buf + len); /* Total length */ len += sizeof(load_buf->crc.crc); } else { len += sizeof(load_buf->nocrc.cmd); } tx_obj->xfer[0].len = len; } static int mcp251xfd_tx_obj_write(const struct mcp251xfd_priv *priv, struct mcp251xfd_tx_obj *tx_obj) { return spi_async(priv->spi, &tx_obj->msg); } static bool mcp251xfd_tx_busy(const struct mcp251xfd_priv *priv, struct mcp251xfd_tx_ring *tx_ring) { if (mcp251xfd_get_tx_free(tx_ring) > 0) return false; netif_stop_queue(priv->ndev); /* Memory barrier before checking tx_free (head and tail) */ smp_mb(); if (mcp251xfd_get_tx_free(tx_ring) == 0) { netdev_dbg(priv->ndev, "Stopping tx-queue (tx_head=0x%08x, tx_tail=0x%08x, len=%d).\n", tx_ring->head, tx_ring->tail, tx_ring->head - tx_ring->tail); return true; } netif_start_queue(priv->ndev); return false; } netdev_tx_t mcp251xfd_start_xmit(struct sk_buff *skb, struct net_device *ndev) { struct mcp251xfd_priv *priv = netdev_priv(ndev); struct mcp251xfd_tx_ring *tx_ring = priv->tx; struct mcp251xfd_tx_obj *tx_obj; unsigned int frame_len; u8 tx_head; int err; if (can_dev_dropped_skb(ndev, skb)) return NETDEV_TX_OK; if (mcp251xfd_tx_busy(priv, tx_ring)) return NETDEV_TX_BUSY; tx_obj = mcp251xfd_get_tx_obj_next(tx_ring); mcp251xfd_tx_obj_from_skb(priv, tx_obj, skb, tx_ring->head); /* Stop queue if we occupy the complete TX FIFO */ tx_head = mcp251xfd_get_tx_head(tx_ring); tx_ring->head++; if (mcp251xfd_get_tx_free(tx_ring) == 0) netif_stop_queue(ndev); frame_len = can_skb_get_frame_len(skb); err = can_put_echo_skb(skb, ndev, tx_head, frame_len); if (!err) netdev_sent_queue(priv->ndev, frame_len); err = mcp251xfd_tx_obj_write(priv, tx_obj); if (err) goto out_err; return NETDEV_TX_OK; out_err: netdev_err(priv->ndev, "ERROR in %s: %d\n", __func__, err); return NETDEV_TX_OK; }
linux-master
drivers/net/can/spi/mcp251xfd/mcp251xfd-tx.c
// SPDX-License-Identifier: GPL-2.0 // // mcp251xfd - Microchip MCP251xFD Family CAN controller driver // // Copyright (c) 2019, 2020, 2021 Pengutronix, // Marc Kleine-Budde <[email protected]> // // Based on: // // CAN bus driver for Microchip 25XXFD CAN Controller with SPI Interface // // Copyright (c) 2019 Martin Sperl <[email protected]> // #include <asm/unaligned.h> #include <linux/bitfield.h> #include <linux/clk.h> #include <linux/device.h> #include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/pm_runtime.h> #include <linux/property.h> #include "mcp251xfd.h" #define DEVICE_NAME "mcp251xfd" static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp2517fd = { .quirks = MCP251XFD_QUIRK_MAB_NO_WARN | MCP251XFD_QUIRK_CRC_REG | MCP251XFD_QUIRK_CRC_RX | MCP251XFD_QUIRK_CRC_TX | MCP251XFD_QUIRK_ECC, .model = MCP251XFD_MODEL_MCP2517FD, }; static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp2518fd = { .quirks = MCP251XFD_QUIRK_CRC_REG | MCP251XFD_QUIRK_CRC_RX | MCP251XFD_QUIRK_CRC_TX | MCP251XFD_QUIRK_ECC, .model = MCP251XFD_MODEL_MCP2518FD, }; static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp251863 = { .quirks = MCP251XFD_QUIRK_CRC_REG | MCP251XFD_QUIRK_CRC_RX | MCP251XFD_QUIRK_CRC_TX | MCP251XFD_QUIRK_ECC, .model = MCP251XFD_MODEL_MCP251863, }; /* Autodetect model, start with CRC enabled. */ static const struct mcp251xfd_devtype_data mcp251xfd_devtype_data_mcp251xfd = { .quirks = MCP251XFD_QUIRK_CRC_REG | MCP251XFD_QUIRK_CRC_RX | MCP251XFD_QUIRK_CRC_TX | MCP251XFD_QUIRK_ECC, .model = MCP251XFD_MODEL_MCP251XFD, }; static const struct can_bittiming_const mcp251xfd_bittiming_const = { .name = DEVICE_NAME, .tseg1_min = 2, .tseg1_max = 256, .tseg2_min = 1, .tseg2_max = 128, .sjw_max = 128, .brp_min = 1, .brp_max = 256, .brp_inc = 1, }; static const struct can_bittiming_const mcp251xfd_data_bittiming_const = { .name = DEVICE_NAME, .tseg1_min = 1, .tseg1_max = 32, .tseg2_min = 1, .tseg2_max = 16, .sjw_max = 16, .brp_min = 1, .brp_max = 256, .brp_inc = 1, }; static const char *__mcp251xfd_get_model_str(enum mcp251xfd_model model) { switch (model) { case MCP251XFD_MODEL_MCP2517FD: return "MCP2517FD"; case MCP251XFD_MODEL_MCP2518FD: return "MCP2518FD"; case MCP251XFD_MODEL_MCP251863: return "MCP251863"; case MCP251XFD_MODEL_MCP251XFD: return "MCP251xFD"; } return "<unknown>"; } static inline const char * mcp251xfd_get_model_str(const struct mcp251xfd_priv *priv) { return __mcp251xfd_get_model_str(priv->devtype_data.model); } static const char *mcp251xfd_get_mode_str(const u8 mode) { switch (mode) { case MCP251XFD_REG_CON_MODE_MIXED: return "Mixed (CAN FD/CAN 2.0)"; case MCP251XFD_REG_CON_MODE_SLEEP: return "Sleep"; case MCP251XFD_REG_CON_MODE_INT_LOOPBACK: return "Internal Loopback"; case MCP251XFD_REG_CON_MODE_LISTENONLY: return "Listen Only"; case MCP251XFD_REG_CON_MODE_CONFIG: return "Configuration"; case MCP251XFD_REG_CON_MODE_EXT_LOOPBACK: return "External Loopback"; case MCP251XFD_REG_CON_MODE_CAN2_0: return "CAN 2.0"; case MCP251XFD_REG_CON_MODE_RESTRICTED: return "Restricted Operation"; } return "<unknown>"; } static const char * mcp251xfd_get_osc_str(const u32 osc, const u32 osc_reference) { switch (~osc & osc_reference & (MCP251XFD_REG_OSC_OSCRDY | MCP251XFD_REG_OSC_PLLRDY)) { case MCP251XFD_REG_OSC_PLLRDY: return "PLL"; case MCP251XFD_REG_OSC_OSCRDY: return "Oscillator"; case MCP251XFD_REG_OSC_PLLRDY | MCP251XFD_REG_OSC_OSCRDY: return "Oscillator/PLL"; } return "<unknown>"; } static inline int mcp251xfd_vdd_enable(const struct mcp251xfd_priv *priv) { if (!priv->reg_vdd) return 0; return regulator_enable(priv->reg_vdd); } static inline int mcp251xfd_vdd_disable(const struct mcp251xfd_priv *priv) { if (!priv->reg_vdd) return 0; return regulator_disable(priv->reg_vdd); } static inline int mcp251xfd_transceiver_enable(const struct mcp251xfd_priv *priv) { if (!priv->reg_xceiver) return 0; return regulator_enable(priv->reg_xceiver); } static inline int mcp251xfd_transceiver_disable(const struct mcp251xfd_priv *priv) { if (!priv->reg_xceiver) return 0; return regulator_disable(priv->reg_xceiver); } static int mcp251xfd_clks_and_vdd_enable(const struct mcp251xfd_priv *priv) { int err; err = clk_prepare_enable(priv->clk); if (err) return err; err = mcp251xfd_vdd_enable(priv); if (err) clk_disable_unprepare(priv->clk); /* Wait for oscillator stabilisation time after power up */ usleep_range(MCP251XFD_OSC_STAB_SLEEP_US, 2 * MCP251XFD_OSC_STAB_SLEEP_US); return err; } static int mcp251xfd_clks_and_vdd_disable(const struct mcp251xfd_priv *priv) { int err; err = mcp251xfd_vdd_disable(priv); if (err) return err; clk_disable_unprepare(priv->clk); return 0; } static inline bool mcp251xfd_reg_invalid(u32 reg) { return reg == 0x0 || reg == 0xffffffff; } static inline int mcp251xfd_chip_get_mode(const struct mcp251xfd_priv *priv, u8 *mode) { u32 val; int err; err = regmap_read(priv->map_reg, MCP251XFD_REG_CON, &val); if (err) return err; *mode = FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK, val); return 0; } static int __mcp251xfd_chip_set_mode(const struct mcp251xfd_priv *priv, const u8 mode_req, bool nowait) { const struct can_bittiming *bt = &priv->can.bittiming; unsigned long timeout_us = MCP251XFD_POLL_TIMEOUT_US; u32 con = 0, con_reqop, osc = 0; u8 mode; int err; con_reqop = FIELD_PREP(MCP251XFD_REG_CON_REQOP_MASK, mode_req); err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_CON, MCP251XFD_REG_CON_REQOP_MASK, con_reqop); if (err == -EBADMSG) { netdev_err(priv->ndev, "Failed to set Requested Operation Mode.\n"); return -ENODEV; } else if (err) { return err; } if (mode_req == MCP251XFD_REG_CON_MODE_SLEEP || nowait) return 0; if (bt->bitrate) timeout_us = max_t(unsigned long, timeout_us, MCP251XFD_FRAME_LEN_MAX_BITS * USEC_PER_SEC / bt->bitrate); err = regmap_read_poll_timeout(priv->map_reg, MCP251XFD_REG_CON, con, !mcp251xfd_reg_invalid(con) && FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK, con) == mode_req, MCP251XFD_POLL_SLEEP_US, timeout_us); if (err != -ETIMEDOUT && err != -EBADMSG) return err; /* Ignore return value. * Print below error messages, even if this fails. */ regmap_read(priv->map_reg, MCP251XFD_REG_OSC, &osc); if (mcp251xfd_reg_invalid(con)) { netdev_err(priv->ndev, "Failed to read CAN Control Register (con=0x%08x, osc=0x%08x).\n", con, osc); return -ENODEV; } mode = FIELD_GET(MCP251XFD_REG_CON_OPMOD_MASK, con); netdev_err(priv->ndev, "Controller failed to enter mode %s Mode (%u) and stays in %s Mode (%u) (con=0x%08x, osc=0x%08x).\n", mcp251xfd_get_mode_str(mode_req), mode_req, mcp251xfd_get_mode_str(mode), mode, con, osc); return -ETIMEDOUT; } static inline int mcp251xfd_chip_set_mode(const struct mcp251xfd_priv *priv, const u8 mode_req) { return __mcp251xfd_chip_set_mode(priv, mode_req, false); } static inline int __maybe_unused mcp251xfd_chip_set_mode_nowait(const struct mcp251xfd_priv *priv, const u8 mode_req) { return __mcp251xfd_chip_set_mode(priv, mode_req, true); } static int mcp251xfd_chip_wait_for_osc_ready(const struct mcp251xfd_priv *priv, u32 osc_reference, u32 osc_mask) { u32 osc; int err; err = regmap_read_poll_timeout(priv->map_reg, MCP251XFD_REG_OSC, osc, !mcp251xfd_reg_invalid(osc) && (osc & osc_mask) == osc_reference, MCP251XFD_OSC_STAB_SLEEP_US, MCP251XFD_OSC_STAB_TIMEOUT_US); if (err != -ETIMEDOUT) return err; if (mcp251xfd_reg_invalid(osc)) { netdev_err(priv->ndev, "Failed to read Oscillator Configuration Register (osc=0x%08x).\n", osc); return -ENODEV; } netdev_err(priv->ndev, "Timeout waiting for %s ready (osc=0x%08x, osc_reference=0x%08x, osc_mask=0x%08x).\n", mcp251xfd_get_osc_str(osc, osc_reference), osc, osc_reference, osc_mask); return -ETIMEDOUT; } static int mcp251xfd_chip_wake(const struct mcp251xfd_priv *priv) { u32 osc, osc_reference, osc_mask; int err; /* For normal sleep on MCP2517FD and MCP2518FD, clearing * "Oscillator Disable" will wake the chip. For low power mode * on MCP2518FD, asserting the chip select will wake the * chip. Writing to the Oscillator register will wake it in * both cases. */ osc = FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK, MCP251XFD_REG_OSC_CLKODIV_10); /* We cannot check for the PLL ready bit (either set or * unset), as the PLL might be enabled. This can happen if the * system reboots, while the mcp251xfd stays powered. */ osc_reference = MCP251XFD_REG_OSC_OSCRDY; osc_mask = MCP251XFD_REG_OSC_OSCRDY; /* If the controller is in Sleep Mode the following write only * removes the "Oscillator Disable" bit and powers it up. All * other bits are unaffected. */ err = regmap_write(priv->map_reg, MCP251XFD_REG_OSC, osc); if (err) return err; /* Sometimes the PLL is stuck enabled, the controller never * sets the OSC Ready bit, and we get an -ETIMEDOUT. Our * caller takes care of retry. */ return mcp251xfd_chip_wait_for_osc_ready(priv, osc_reference, osc_mask); } static inline int mcp251xfd_chip_sleep(const struct mcp251xfd_priv *priv) { if (priv->pll_enable) { u32 osc; int err; /* Turn off PLL */ osc = FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK, MCP251XFD_REG_OSC_CLKODIV_10); err = regmap_write(priv->map_reg, MCP251XFD_REG_OSC, osc); if (err) netdev_err(priv->ndev, "Failed to disable PLL.\n"); priv->spi->max_speed_hz = priv->spi_max_speed_hz_slow; } return mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_SLEEP); } static int mcp251xfd_chip_softreset_do(const struct mcp251xfd_priv *priv) { const __be16 cmd = mcp251xfd_cmd_reset(); int err; /* The Set Mode and SPI Reset command only works if the * controller is not in Sleep Mode. */ err = mcp251xfd_chip_wake(priv); if (err) return err; err = mcp251xfd_chip_set_mode(priv, MCP251XFD_REG_CON_MODE_CONFIG); if (err) return err; /* spi_write_then_read() works with non DMA-safe buffers */ return spi_write_then_read(priv->spi, &cmd, sizeof(cmd), NULL, 0); } static int mcp251xfd_chip_softreset_check(const struct mcp251xfd_priv *priv) { u32 osc_reference, osc_mask; u8 mode; int err; /* Check for reset defaults of OSC reg. * This will take care of stabilization period. */ osc_reference = MCP251XFD_REG_OSC_OSCRDY | FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK, MCP251XFD_REG_OSC_CLKODIV_10); osc_mask = osc_reference | MCP251XFD_REG_OSC_PLLRDY; err = mcp251xfd_chip_wait_for_osc_ready(priv, osc_reference, osc_mask); if (err) return err; err = mcp251xfd_chip_get_mode(priv, &mode); if (err) return err; if (mode != MCP251XFD_REG_CON_MODE_CONFIG) { netdev_info(priv->ndev, "Controller not in Config Mode after reset, but in %s Mode (%u).\n", mcp251xfd_get_mode_str(mode), mode); return -ETIMEDOUT; } return 0; } static int mcp251xfd_chip_softreset(const struct mcp251xfd_priv *priv) { int err, i; for (i = 0; i < MCP251XFD_SOFTRESET_RETRIES_MAX; i++) { if (i) netdev_info(priv->ndev, "Retrying to reset controller.\n"); err = mcp251xfd_chip_softreset_do(priv); if (err == -ETIMEDOUT) continue; if (err) return err; err = mcp251xfd_chip_softreset_check(priv); if (err == -ETIMEDOUT) continue; if (err) return err; return 0; } return err; } static int mcp251xfd_chip_clock_init(const struct mcp251xfd_priv *priv) { u32 osc, osc_reference, osc_mask; int err; /* Activate Low Power Mode on Oscillator Disable. This only * works on the MCP2518FD. The MCP2517FD will go into normal * Sleep Mode instead. */ osc = MCP251XFD_REG_OSC_LPMEN | FIELD_PREP(MCP251XFD_REG_OSC_CLKODIV_MASK, MCP251XFD_REG_OSC_CLKODIV_10); osc_reference = MCP251XFD_REG_OSC_OSCRDY; osc_mask = MCP251XFD_REG_OSC_OSCRDY | MCP251XFD_REG_OSC_PLLRDY; if (priv->pll_enable) { osc |= MCP251XFD_REG_OSC_PLLEN; osc_reference |= MCP251XFD_REG_OSC_PLLRDY; } err = regmap_write(priv->map_reg, MCP251XFD_REG_OSC, osc); if (err) return err; err = mcp251xfd_chip_wait_for_osc_ready(priv, osc_reference, osc_mask); if (err) return err; priv->spi->max_speed_hz = priv->spi_max_speed_hz_fast; return 0; } static int mcp251xfd_chip_timestamp_init(const struct mcp251xfd_priv *priv) { /* Set Time Base Counter Prescaler to 1. * * This means an overflow of the 32 bit Time Base Counter * register at 40 MHz every 107 seconds. */ return regmap_write(priv->map_reg, MCP251XFD_REG_TSCON, MCP251XFD_REG_TSCON_TBCEN); } static int mcp251xfd_set_bittiming(const struct mcp251xfd_priv *priv) { const struct can_bittiming *bt = &priv->can.bittiming; const struct can_bittiming *dbt = &priv->can.data_bittiming; u32 val = 0; s8 tdco; int err; /* CAN Control Register * * - no transmit bandwidth sharing * - config mode * - disable transmit queue * - store in transmit FIFO event * - transition to restricted operation mode on system error * - ESI is transmitted recessive when ESI of message is high or * CAN controller error passive * - restricted retransmission attempts, * use TQXCON_TXAT and FIFOCON_TXAT * - wake-up filter bits T11FILTER * - use CAN bus line filter for wakeup * - protocol exception is treated as a form error * - Do not compare data bytes */ val = FIELD_PREP(MCP251XFD_REG_CON_REQOP_MASK, MCP251XFD_REG_CON_MODE_CONFIG) | MCP251XFD_REG_CON_STEF | MCP251XFD_REG_CON_ESIGM | MCP251XFD_REG_CON_RTXAT | FIELD_PREP(MCP251XFD_REG_CON_WFT_MASK, MCP251XFD_REG_CON_WFT_T11FILTER) | MCP251XFD_REG_CON_WAKFIL | MCP251XFD_REG_CON_PXEDIS; if (!(priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)) val |= MCP251XFD_REG_CON_ISOCRCEN; err = regmap_write(priv->map_reg, MCP251XFD_REG_CON, val); if (err) return err; /* Nominal Bit Time */ val = FIELD_PREP(MCP251XFD_REG_NBTCFG_BRP_MASK, bt->brp - 1) | FIELD_PREP(MCP251XFD_REG_NBTCFG_TSEG1_MASK, bt->prop_seg + bt->phase_seg1 - 1) | FIELD_PREP(MCP251XFD_REG_NBTCFG_TSEG2_MASK, bt->phase_seg2 - 1) | FIELD_PREP(MCP251XFD_REG_NBTCFG_SJW_MASK, bt->sjw - 1); err = regmap_write(priv->map_reg, MCP251XFD_REG_NBTCFG, val); if (err) return err; if (!(priv->can.ctrlmode & CAN_CTRLMODE_FD)) return 0; /* Data Bit Time */ val = FIELD_PREP(MCP251XFD_REG_DBTCFG_BRP_MASK, dbt->brp - 1) | FIELD_PREP(MCP251XFD_REG_DBTCFG_TSEG1_MASK, dbt->prop_seg + dbt->phase_seg1 - 1) | FIELD_PREP(MCP251XFD_REG_DBTCFG_TSEG2_MASK, dbt->phase_seg2 - 1) | FIELD_PREP(MCP251XFD_REG_DBTCFG_SJW_MASK, dbt->sjw - 1); err = regmap_write(priv->map_reg, MCP251XFD_REG_DBTCFG, val); if (err) return err; /* Transmitter Delay Compensation */ tdco = clamp_t(int, dbt->brp * (dbt->prop_seg + dbt->phase_seg1), -64, 63); val = FIELD_PREP(MCP251XFD_REG_TDC_TDCMOD_MASK, MCP251XFD_REG_TDC_TDCMOD_AUTO) | FIELD_PREP(MCP251XFD_REG_TDC_TDCO_MASK, tdco); return regmap_write(priv->map_reg, MCP251XFD_REG_TDC, val); } static int mcp251xfd_chip_rx_int_enable(const struct mcp251xfd_priv *priv) { u32 val; if (!priv->rx_int) return 0; /* Configure GPIOs: * - PIN0: GPIO Input * - PIN1: GPIO Input/RX Interrupt * * PIN1 must be Input, otherwise there is a glitch on the * rx-INT line. It happens between setting the PIN as output * (in the first byte of the SPI transfer) and configuring the * PIN as interrupt (in the last byte of the SPI transfer). */ val = MCP251XFD_REG_IOCON_PM0 | MCP251XFD_REG_IOCON_TRIS1 | MCP251XFD_REG_IOCON_TRIS0; return regmap_write(priv->map_reg, MCP251XFD_REG_IOCON, val); } static int mcp251xfd_chip_rx_int_disable(const struct mcp251xfd_priv *priv) { u32 val; if (!priv->rx_int) return 0; /* Configure GPIOs: * - PIN0: GPIO Input * - PIN1: GPIO Input */ val = MCP251XFD_REG_IOCON_PM1 | MCP251XFD_REG_IOCON_PM0 | MCP251XFD_REG_IOCON_TRIS1 | MCP251XFD_REG_IOCON_TRIS0; return regmap_write(priv->map_reg, MCP251XFD_REG_IOCON, val); } static int mcp251xfd_chip_ecc_init(struct mcp251xfd_priv *priv) { struct mcp251xfd_ecc *ecc = &priv->ecc; void *ram; u32 val = 0; int err; ecc->ecc_stat = 0; if (priv->devtype_data.quirks & MCP251XFD_QUIRK_ECC) val = MCP251XFD_REG_ECCCON_ECCEN; err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_ECCCON, MCP251XFD_REG_ECCCON_ECCEN, val); if (err) return err; ram = kzalloc(MCP251XFD_RAM_SIZE, GFP_KERNEL); if (!ram) return -ENOMEM; err = regmap_raw_write(priv->map_reg, MCP251XFD_RAM_START, ram, MCP251XFD_RAM_SIZE); kfree(ram); return err; } static u8 mcp251xfd_get_normal_mode(const struct mcp251xfd_priv *priv) { u8 mode; if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) mode = MCP251XFD_REG_CON_MODE_INT_LOOPBACK; else if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) mode = MCP251XFD_REG_CON_MODE_LISTENONLY; else if (priv->can.ctrlmode & CAN_CTRLMODE_FD) mode = MCP251XFD_REG_CON_MODE_MIXED; else mode = MCP251XFD_REG_CON_MODE_CAN2_0; return mode; } static int __mcp251xfd_chip_set_normal_mode(const struct mcp251xfd_priv *priv, bool nowait) { u8 mode; mode = mcp251xfd_get_normal_mode(priv); return __mcp251xfd_chip_set_mode(priv, mode, nowait); } static inline int mcp251xfd_chip_set_normal_mode(const struct mcp251xfd_priv *priv) { return __mcp251xfd_chip_set_normal_mode(priv, false); } static inline int mcp251xfd_chip_set_normal_mode_nowait(const struct mcp251xfd_priv *priv) { return __mcp251xfd_chip_set_normal_mode(priv, true); } static int mcp251xfd_chip_interrupts_enable(const struct mcp251xfd_priv *priv) { u32 val; int err; val = MCP251XFD_REG_CRC_FERRIE | MCP251XFD_REG_CRC_CRCERRIE; err = regmap_write(priv->map_reg, MCP251XFD_REG_CRC, val); if (err) return err; val = MCP251XFD_REG_ECCCON_DEDIE | MCP251XFD_REG_ECCCON_SECIE; err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_ECCCON, val, val); if (err) return err; val = MCP251XFD_REG_INT_CERRIE | MCP251XFD_REG_INT_SERRIE | MCP251XFD_REG_INT_RXOVIE | MCP251XFD_REG_INT_TXATIE | MCP251XFD_REG_INT_SPICRCIE | MCP251XFD_REG_INT_ECCIE | MCP251XFD_REG_INT_TEFIE | MCP251XFD_REG_INT_MODIE | MCP251XFD_REG_INT_RXIE; if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) val |= MCP251XFD_REG_INT_IVMIE; return regmap_write(priv->map_reg, MCP251XFD_REG_INT, val); } static int mcp251xfd_chip_interrupts_disable(const struct mcp251xfd_priv *priv) { int err; u32 mask; err = regmap_write(priv->map_reg, MCP251XFD_REG_INT, 0); if (err) return err; mask = MCP251XFD_REG_ECCCON_DEDIE | MCP251XFD_REG_ECCCON_SECIE; err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_ECCCON, mask, 0x0); if (err) return err; return regmap_write(priv->map_reg, MCP251XFD_REG_CRC, 0); } static void mcp251xfd_chip_stop(struct mcp251xfd_priv *priv, const enum can_state state) { priv->can.state = state; mcp251xfd_chip_interrupts_disable(priv); mcp251xfd_chip_rx_int_disable(priv); mcp251xfd_chip_sleep(priv); } static int mcp251xfd_chip_start(struct mcp251xfd_priv *priv) { int err; err = mcp251xfd_chip_softreset(priv); if (err) goto out_chip_stop; err = mcp251xfd_chip_clock_init(priv); if (err) goto out_chip_stop; err = mcp251xfd_chip_timestamp_init(priv); if (err) goto out_chip_stop; err = mcp251xfd_set_bittiming(priv); if (err) goto out_chip_stop; err = mcp251xfd_chip_rx_int_enable(priv); if (err) goto out_chip_stop; err = mcp251xfd_chip_ecc_init(priv); if (err) goto out_chip_stop; err = mcp251xfd_ring_init(priv); if (err) goto out_chip_stop; err = mcp251xfd_chip_fifo_init(priv); if (err) goto out_chip_stop; priv->can.state = CAN_STATE_ERROR_ACTIVE; err = mcp251xfd_chip_set_normal_mode(priv); if (err) goto out_chip_stop; return 0; out_chip_stop: mcp251xfd_dump(priv); mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED); return err; } static int mcp251xfd_set_mode(struct net_device *ndev, enum can_mode mode) { struct mcp251xfd_priv *priv = netdev_priv(ndev); int err; switch (mode) { case CAN_MODE_START: err = mcp251xfd_chip_start(priv); if (err) return err; err = mcp251xfd_chip_interrupts_enable(priv); if (err) { mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED); return err; } netif_wake_queue(ndev); break; default: return -EOPNOTSUPP; } return 0; } static int __mcp251xfd_get_berr_counter(const struct net_device *ndev, struct can_berr_counter *bec) { const struct mcp251xfd_priv *priv = netdev_priv(ndev); u32 trec; int err; err = regmap_read(priv->map_reg, MCP251XFD_REG_TREC, &trec); if (err) return err; if (trec & MCP251XFD_REG_TREC_TXBO) bec->txerr = 256; else bec->txerr = FIELD_GET(MCP251XFD_REG_TREC_TEC_MASK, trec); bec->rxerr = FIELD_GET(MCP251XFD_REG_TREC_REC_MASK, trec); return 0; } static int mcp251xfd_get_berr_counter(const struct net_device *ndev, struct can_berr_counter *bec) { const struct mcp251xfd_priv *priv = netdev_priv(ndev); /* Avoid waking up the controller if the interface is down */ if (!(ndev->flags & IFF_UP)) return 0; /* The controller is powered down during Bus Off, use saved * bec values. */ if (priv->can.state == CAN_STATE_BUS_OFF) { *bec = priv->bec; return 0; } return __mcp251xfd_get_berr_counter(ndev, bec); } static struct sk_buff * mcp251xfd_alloc_can_err_skb(struct mcp251xfd_priv *priv, struct can_frame **cf, u32 *timestamp) { struct sk_buff *skb; int err; err = mcp251xfd_get_timestamp(priv, timestamp); if (err) return NULL; skb = alloc_can_err_skb(priv->ndev, cf); if (skb) mcp251xfd_skb_set_timestamp(priv, skb, *timestamp); return skb; } static int mcp251xfd_handle_rxovif(struct mcp251xfd_priv *priv) { struct net_device_stats *stats = &priv->ndev->stats; struct mcp251xfd_rx_ring *ring; struct sk_buff *skb; struct can_frame *cf; u32 timestamp, rxovif; int err, i; stats->rx_over_errors++; stats->rx_errors++; err = regmap_read(priv->map_reg, MCP251XFD_REG_RXOVIF, &rxovif); if (err) return err; mcp251xfd_for_each_rx_ring(priv, ring, i) { if (!(rxovif & BIT(ring->fifo_nr))) continue; /* If SERRIF is active, there was a RX MAB overflow. */ if (priv->regs_status.intf & MCP251XFD_REG_INT_SERRIF) { if (net_ratelimit()) netdev_dbg(priv->ndev, "RX-%d: MAB overflow detected.\n", ring->nr); } else { if (net_ratelimit()) netdev_dbg(priv->ndev, "RX-%d: FIFO overflow.\n", ring->nr); } err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_FIFOSTA(ring->fifo_nr), MCP251XFD_REG_FIFOSTA_RXOVIF, 0x0); if (err) return err; } skb = mcp251xfd_alloc_can_err_skb(priv, &cf, &timestamp); if (!skb) return 0; cf->can_id |= CAN_ERR_CRTL; cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp); if (err) stats->rx_fifo_errors++; return 0; } static int mcp251xfd_handle_txatif(struct mcp251xfd_priv *priv) { netdev_info(priv->ndev, "%s\n", __func__); return 0; } static int mcp251xfd_handle_ivmif(struct mcp251xfd_priv *priv) { struct net_device_stats *stats = &priv->ndev->stats; u32 bdiag1, timestamp; struct sk_buff *skb; struct can_frame *cf = NULL; int err; err = mcp251xfd_get_timestamp(priv, &timestamp); if (err) return err; err = regmap_read(priv->map_reg, MCP251XFD_REG_BDIAG1, &bdiag1); if (err) return err; /* Write 0s to clear error bits, don't write 1s to non active * bits, as they will be set. */ err = regmap_write(priv->map_reg, MCP251XFD_REG_BDIAG1, 0x0); if (err) return err; priv->can.can_stats.bus_error++; skb = alloc_can_err_skb(priv->ndev, &cf); if (cf) cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; /* Controller misconfiguration */ if (WARN_ON(bdiag1 & MCP251XFD_REG_BDIAG1_DLCMM)) netdev_err(priv->ndev, "recv'd DLC is larger than PLSIZE of FIFO element."); /* RX errors */ if (bdiag1 & (MCP251XFD_REG_BDIAG1_DCRCERR | MCP251XFD_REG_BDIAG1_NCRCERR)) { netdev_dbg(priv->ndev, "CRC error\n"); stats->rx_errors++; if (cf) cf->data[3] |= CAN_ERR_PROT_LOC_CRC_SEQ; } if (bdiag1 & (MCP251XFD_REG_BDIAG1_DSTUFERR | MCP251XFD_REG_BDIAG1_NSTUFERR)) { netdev_dbg(priv->ndev, "Stuff error\n"); stats->rx_errors++; if (cf) cf->data[2] |= CAN_ERR_PROT_STUFF; } if (bdiag1 & (MCP251XFD_REG_BDIAG1_DFORMERR | MCP251XFD_REG_BDIAG1_NFORMERR)) { netdev_dbg(priv->ndev, "Format error\n"); stats->rx_errors++; if (cf) cf->data[2] |= CAN_ERR_PROT_FORM; } /* TX errors */ if (bdiag1 & MCP251XFD_REG_BDIAG1_NACKERR) { netdev_dbg(priv->ndev, "NACK error\n"); stats->tx_errors++; if (cf) { cf->can_id |= CAN_ERR_ACK; cf->data[2] |= CAN_ERR_PROT_TX; } } if (bdiag1 & (MCP251XFD_REG_BDIAG1_DBIT1ERR | MCP251XFD_REG_BDIAG1_NBIT1ERR)) { netdev_dbg(priv->ndev, "Bit1 error\n"); stats->tx_errors++; if (cf) cf->data[2] |= CAN_ERR_PROT_TX | CAN_ERR_PROT_BIT1; } if (bdiag1 & (MCP251XFD_REG_BDIAG1_DBIT0ERR | MCP251XFD_REG_BDIAG1_NBIT0ERR)) { netdev_dbg(priv->ndev, "Bit0 error\n"); stats->tx_errors++; if (cf) cf->data[2] |= CAN_ERR_PROT_TX | CAN_ERR_PROT_BIT0; } if (!cf) return 0; mcp251xfd_skb_set_timestamp(priv, skb, timestamp); err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp); if (err) stats->rx_fifo_errors++; return 0; } static int mcp251xfd_handle_cerrif(struct mcp251xfd_priv *priv) { struct net_device_stats *stats = &priv->ndev->stats; struct sk_buff *skb; struct can_frame *cf = NULL; enum can_state new_state, rx_state, tx_state; u32 trec, timestamp; int err; err = regmap_read(priv->map_reg, MCP251XFD_REG_TREC, &trec); if (err) return err; if (trec & MCP251XFD_REG_TREC_TXBO) tx_state = CAN_STATE_BUS_OFF; else if (trec & MCP251XFD_REG_TREC_TXBP) tx_state = CAN_STATE_ERROR_PASSIVE; else if (trec & MCP251XFD_REG_TREC_TXWARN) tx_state = CAN_STATE_ERROR_WARNING; else tx_state = CAN_STATE_ERROR_ACTIVE; if (trec & MCP251XFD_REG_TREC_RXBP) rx_state = CAN_STATE_ERROR_PASSIVE; else if (trec & MCP251XFD_REG_TREC_RXWARN) rx_state = CAN_STATE_ERROR_WARNING; else rx_state = CAN_STATE_ERROR_ACTIVE; new_state = max(tx_state, rx_state); if (new_state == priv->can.state) return 0; /* The skb allocation might fail, but can_change_state() * handles cf == NULL. */ skb = mcp251xfd_alloc_can_err_skb(priv, &cf, &timestamp); can_change_state(priv->ndev, cf, tx_state, rx_state); if (new_state == CAN_STATE_BUS_OFF) { /* As we're going to switch off the chip now, let's * save the error counters and return them to * userspace, if do_get_berr_counter() is called while * the chip is in Bus Off. */ err = __mcp251xfd_get_berr_counter(priv->ndev, &priv->bec); if (err) return err; mcp251xfd_chip_stop(priv, CAN_STATE_BUS_OFF); can_bus_off(priv->ndev); } if (!skb) return 0; if (new_state != CAN_STATE_BUS_OFF) { struct can_berr_counter bec; err = mcp251xfd_get_berr_counter(priv->ndev, &bec); if (err) return err; cf->can_id |= CAN_ERR_CNT; cf->data[6] = bec.txerr; cf->data[7] = bec.rxerr; } err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp); if (err) stats->rx_fifo_errors++; return 0; } static int mcp251xfd_handle_modif(const struct mcp251xfd_priv *priv, bool *set_normal_mode) { const u8 mode_reference = mcp251xfd_get_normal_mode(priv); u8 mode; int err; err = mcp251xfd_chip_get_mode(priv, &mode); if (err) return err; if (mode == mode_reference) { netdev_dbg(priv->ndev, "Controller changed into %s Mode (%u).\n", mcp251xfd_get_mode_str(mode), mode); return 0; } /* According to MCP2517FD errata DS80000792B 1., during a TX * MAB underflow, the controller will transition to Restricted * Operation Mode or Listen Only Mode (depending on SERR2LOM). * * However this is not always the case. If SERR2LOM is * configured for Restricted Operation Mode (SERR2LOM not set) * the MCP2517FD will sometimes transition to Listen Only Mode * first. When polling this bit we see that it will transition * to Restricted Operation Mode shortly after. */ if ((priv->devtype_data.quirks & MCP251XFD_QUIRK_MAB_NO_WARN) && (mode == MCP251XFD_REG_CON_MODE_RESTRICTED || mode == MCP251XFD_REG_CON_MODE_LISTENONLY)) netdev_dbg(priv->ndev, "Controller changed into %s Mode (%u).\n", mcp251xfd_get_mode_str(mode), mode); else netdev_err(priv->ndev, "Controller changed into %s Mode (%u).\n", mcp251xfd_get_mode_str(mode), mode); /* After the application requests Normal mode, the controller * will automatically attempt to retransmit the message that * caused the TX MAB underflow. * * However, if there is an ECC error in the TX-RAM, we first * have to reload the tx-object before requesting Normal * mode. This is done later in mcp251xfd_handle_eccif(). */ if (priv->regs_status.intf & MCP251XFD_REG_INT_ECCIF) { *set_normal_mode = true; return 0; } return mcp251xfd_chip_set_normal_mode_nowait(priv); } static int mcp251xfd_handle_serrif(struct mcp251xfd_priv *priv) { struct mcp251xfd_ecc *ecc = &priv->ecc; struct net_device_stats *stats = &priv->ndev->stats; bool handled = false; /* TX MAB underflow * * According to MCP2517FD Errata DS80000792B 1. a TX MAB * underflow is indicated by SERRIF and MODIF. * * In addition to the effects mentioned in the Errata, there * are Bus Errors due to the aborted CAN frame, so a IVMIF * will be seen as well. * * Sometimes there is an ECC error in the TX-RAM, which leads * to a TX MAB underflow. * * However, probably due to a race condition, there is no * associated MODIF pending. * * Further, there are situations, where the SERRIF is caused * by an ECC error in the TX-RAM, but not even the ECCIF is * set. This only seems to happen _after_ the first occurrence * of a ECCIF (which is tracked in ecc->cnt). * * Treat all as a known system errors.. */ if ((priv->regs_status.intf & MCP251XFD_REG_INT_MODIF && priv->regs_status.intf & MCP251XFD_REG_INT_IVMIF) || priv->regs_status.intf & MCP251XFD_REG_INT_ECCIF || ecc->cnt) { const char *msg; if (priv->regs_status.intf & MCP251XFD_REG_INT_ECCIF || ecc->cnt) msg = "TX MAB underflow due to ECC error detected."; else msg = "TX MAB underflow detected."; if (priv->devtype_data.quirks & MCP251XFD_QUIRK_MAB_NO_WARN) netdev_dbg(priv->ndev, "%s\n", msg); else netdev_info(priv->ndev, "%s\n", msg); stats->tx_aborted_errors++; stats->tx_errors++; handled = true; } /* RX MAB overflow * * According to MCP2517FD Errata DS80000792B 1. a RX MAB * overflow is indicated by SERRIF. * * In addition to the effects mentioned in the Errata, (most * of the times) a RXOVIF is raised, if the FIFO that is being * received into has the RXOVIE activated (and we have enabled * RXOVIE on all FIFOs). * * Sometimes there is no RXOVIF just a RXIF is pending. * * Treat all as a known system errors.. */ if (priv->regs_status.intf & MCP251XFD_REG_INT_RXOVIF || priv->regs_status.intf & MCP251XFD_REG_INT_RXIF) { stats->rx_dropped++; handled = true; } if (!handled) netdev_err(priv->ndev, "Unhandled System Error Interrupt (intf=0x%08x)!\n", priv->regs_status.intf); return 0; } static int mcp251xfd_handle_eccif_recover(struct mcp251xfd_priv *priv, u8 nr) { struct mcp251xfd_tx_ring *tx_ring = priv->tx; struct mcp251xfd_ecc *ecc = &priv->ecc; struct mcp251xfd_tx_obj *tx_obj; u8 chip_tx_tail, tx_tail, offset; u16 addr; int err; addr = FIELD_GET(MCP251XFD_REG_ECCSTAT_ERRADDR_MASK, ecc->ecc_stat); err = mcp251xfd_tx_tail_get_from_chip(priv, &chip_tx_tail); if (err) return err; tx_tail = mcp251xfd_get_tx_tail(tx_ring); offset = (nr - chip_tx_tail) & (tx_ring->obj_num - 1); /* Bail out if one of the following is met: * - tx_tail information is inconsistent * - for mcp2517fd: offset not 0 * - for mcp2518fd: offset not 0 or 1 */ if (chip_tx_tail != tx_tail || !(offset == 0 || (offset == 1 && (mcp251xfd_is_2518FD(priv) || mcp251xfd_is_251863(priv))))) { netdev_err(priv->ndev, "ECC Error information inconsistent (addr=0x%04x, nr=%d, tx_tail=0x%08x(%d), chip_tx_tail=%d, offset=%d).\n", addr, nr, tx_ring->tail, tx_tail, chip_tx_tail, offset); return -EINVAL; } netdev_info(priv->ndev, "Recovering %s ECC Error at address 0x%04x (in TX-RAM, tx_obj=%d, tx_tail=0x%08x(%d), offset=%d).\n", ecc->ecc_stat & MCP251XFD_REG_ECCSTAT_SECIF ? "Single" : "Double", addr, nr, tx_ring->tail, tx_tail, offset); /* reload tx_obj into controller RAM ... */ tx_obj = &tx_ring->obj[nr]; err = spi_sync_transfer(priv->spi, tx_obj->xfer, 1); if (err) return err; /* ... and trigger retransmit */ return mcp251xfd_chip_set_normal_mode(priv); } static int mcp251xfd_handle_eccif(struct mcp251xfd_priv *priv, bool set_normal_mode) { struct mcp251xfd_ecc *ecc = &priv->ecc; const char *msg; bool in_tx_ram; u32 ecc_stat; u16 addr; u8 nr; int err; err = regmap_read(priv->map_reg, MCP251XFD_REG_ECCSTAT, &ecc_stat); if (err) return err; err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_ECCSTAT, MCP251XFD_REG_ECCSTAT_IF_MASK, ~ecc_stat); if (err) return err; /* Check if ECC error occurred in TX-RAM */ addr = FIELD_GET(MCP251XFD_REG_ECCSTAT_ERRADDR_MASK, ecc_stat); err = mcp251xfd_get_tx_nr_by_addr(priv->tx, &nr, addr); if (!err) in_tx_ram = true; else if (err == -ENOENT) in_tx_ram = false; else return err; /* Errata Reference: * mcp2517fd: DS80000789B, mcp2518fd: DS80000792C 2. * * ECC single error correction does not work in all cases: * * Fix/Work Around: * Enable single error correction and double error detection * interrupts by setting SECIE and DEDIE. Handle SECIF as a * detection interrupt and do not rely on the error * correction. Instead, handle both interrupts as a * notification that the RAM word at ERRADDR was corrupted. */ if (ecc_stat & MCP251XFD_REG_ECCSTAT_SECIF) msg = "Single ECC Error detected at address"; else if (ecc_stat & MCP251XFD_REG_ECCSTAT_DEDIF) msg = "Double ECC Error detected at address"; else return -EINVAL; if (!in_tx_ram) { ecc->ecc_stat = 0; netdev_notice(priv->ndev, "%s 0x%04x.\n", msg, addr); } else { /* Re-occurring error? */ if (ecc->ecc_stat == ecc_stat) { ecc->cnt++; } else { ecc->ecc_stat = ecc_stat; ecc->cnt = 1; } netdev_info(priv->ndev, "%s 0x%04x (in TX-RAM, tx_obj=%d), occurred %d time%s.\n", msg, addr, nr, ecc->cnt, ecc->cnt > 1 ? "s" : ""); if (ecc->cnt >= MCP251XFD_ECC_CNT_MAX) return mcp251xfd_handle_eccif_recover(priv, nr); } if (set_normal_mode) return mcp251xfd_chip_set_normal_mode_nowait(priv); return 0; } static int mcp251xfd_handle_spicrcif(struct mcp251xfd_priv *priv) { int err; u32 crc; err = regmap_read(priv->map_reg, MCP251XFD_REG_CRC, &crc); if (err) return err; err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_CRC, MCP251XFD_REG_CRC_IF_MASK, ~crc); if (err) return err; if (crc & MCP251XFD_REG_CRC_FERRIF) netdev_notice(priv->ndev, "CRC write command format error.\n"); else if (crc & MCP251XFD_REG_CRC_CRCERRIF) netdev_notice(priv->ndev, "CRC write error detected. CRC=0x%04lx.\n", FIELD_GET(MCP251XFD_REG_CRC_MASK, crc)); return 0; } static int mcp251xfd_read_regs_status(struct mcp251xfd_priv *priv) { const int val_bytes = regmap_get_val_bytes(priv->map_reg); size_t len; if (priv->rx_ring_num == 1) len = sizeof(priv->regs_status.intf); else len = sizeof(priv->regs_status); return regmap_bulk_read(priv->map_reg, MCP251XFD_REG_INT, &priv->regs_status, len / val_bytes); } #define mcp251xfd_handle(priv, irq, ...) \ ({ \ struct mcp251xfd_priv *_priv = (priv); \ int err; \ \ err = mcp251xfd_handle_##irq(_priv, ## __VA_ARGS__); \ if (err) \ netdev_err(_priv->ndev, \ "IRQ handler mcp251xfd_handle_%s() returned %d.\n", \ __stringify(irq), err); \ err; \ }) static irqreturn_t mcp251xfd_irq(int irq, void *dev_id) { struct mcp251xfd_priv *priv = dev_id; irqreturn_t handled = IRQ_NONE; int err; if (priv->rx_int) do { int rx_pending; rx_pending = gpiod_get_value_cansleep(priv->rx_int); if (!rx_pending) break; /* Assume 1st RX-FIFO pending, if other FIFOs * are pending the main IRQ handler will take * care. */ priv->regs_status.rxif = BIT(priv->rx[0]->fifo_nr); err = mcp251xfd_handle(priv, rxif); if (err) goto out_fail; handled = IRQ_HANDLED; /* We don't know which RX-FIFO is pending, but only * handle the 1st RX-FIFO. Leave loop here if we have * more than 1 RX-FIFO to avoid starvation. */ } while (priv->rx_ring_num == 1); do { u32 intf_pending, intf_pending_clearable; bool set_normal_mode = false; err = mcp251xfd_read_regs_status(priv); if (err) goto out_fail; intf_pending = FIELD_GET(MCP251XFD_REG_INT_IF_MASK, priv->regs_status.intf) & FIELD_GET(MCP251XFD_REG_INT_IE_MASK, priv->regs_status.intf); if (!(intf_pending)) { can_rx_offload_threaded_irq_finish(&priv->offload); return handled; } /* Some interrupts must be ACKed in the * MCP251XFD_REG_INT register. * - First ACK then handle, to avoid lost-IRQ race * condition on fast re-occurring interrupts. * - Write "0" to clear active IRQs, "1" to all other, * to avoid r/m/w race condition on the * MCP251XFD_REG_INT register. */ intf_pending_clearable = intf_pending & MCP251XFD_REG_INT_IF_CLEARABLE_MASK; if (intf_pending_clearable) { err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_INT, MCP251XFD_REG_INT_IF_MASK, ~intf_pending_clearable); if (err) goto out_fail; } if (intf_pending & MCP251XFD_REG_INT_MODIF) { err = mcp251xfd_handle(priv, modif, &set_normal_mode); if (err) goto out_fail; } if (intf_pending & MCP251XFD_REG_INT_RXIF) { err = mcp251xfd_handle(priv, rxif); if (err) goto out_fail; } if (intf_pending & MCP251XFD_REG_INT_TEFIF) { err = mcp251xfd_handle(priv, tefif); if (err) goto out_fail; } if (intf_pending & MCP251XFD_REG_INT_RXOVIF) { err = mcp251xfd_handle(priv, rxovif); if (err) goto out_fail; } if (intf_pending & MCP251XFD_REG_INT_TXATIF) { err = mcp251xfd_handle(priv, txatif); if (err) goto out_fail; } if (intf_pending & MCP251XFD_REG_INT_IVMIF) { err = mcp251xfd_handle(priv, ivmif); if (err) goto out_fail; } if (intf_pending & MCP251XFD_REG_INT_SERRIF) { err = mcp251xfd_handle(priv, serrif); if (err) goto out_fail; } if (intf_pending & MCP251XFD_REG_INT_ECCIF) { err = mcp251xfd_handle(priv, eccif, set_normal_mode); if (err) goto out_fail; } if (intf_pending & MCP251XFD_REG_INT_SPICRCIF) { err = mcp251xfd_handle(priv, spicrcif); if (err) goto out_fail; } /* On the MCP2527FD and MCP2518FD, we don't get a * CERRIF IRQ on the transition TX ERROR_WARNING -> TX * ERROR_ACTIVE. */ if (intf_pending & MCP251XFD_REG_INT_CERRIF || priv->can.state > CAN_STATE_ERROR_ACTIVE) { err = mcp251xfd_handle(priv, cerrif); if (err) goto out_fail; /* In Bus Off we completely shut down the * controller. Every subsequent register read * will read bogus data, and if * MCP251XFD_QUIRK_CRC_REG is enabled the CRC * check will fail, too. So leave IRQ handler * directly. */ if (priv->can.state == CAN_STATE_BUS_OFF) { can_rx_offload_threaded_irq_finish(&priv->offload); return IRQ_HANDLED; } } handled = IRQ_HANDLED; } while (1); out_fail: can_rx_offload_threaded_irq_finish(&priv->offload); netdev_err(priv->ndev, "IRQ handler returned %d (intf=0x%08x).\n", err, priv->regs_status.intf); mcp251xfd_dump(priv); mcp251xfd_chip_interrupts_disable(priv); mcp251xfd_timestamp_stop(priv); return handled; } static int mcp251xfd_open(struct net_device *ndev) { struct mcp251xfd_priv *priv = netdev_priv(ndev); const struct spi_device *spi = priv->spi; int err; err = open_candev(ndev); if (err) return err; err = pm_runtime_resume_and_get(ndev->dev.parent); if (err) goto out_close_candev; err = mcp251xfd_ring_alloc(priv); if (err) goto out_pm_runtime_put; err = mcp251xfd_transceiver_enable(priv); if (err) goto out_mcp251xfd_ring_free; err = mcp251xfd_chip_start(priv); if (err) goto out_transceiver_disable; mcp251xfd_timestamp_init(priv); clear_bit(MCP251XFD_FLAGS_DOWN, priv->flags); can_rx_offload_enable(&priv->offload); err = request_threaded_irq(spi->irq, NULL, mcp251xfd_irq, IRQF_SHARED | IRQF_ONESHOT, dev_name(&spi->dev), priv); if (err) goto out_can_rx_offload_disable; err = mcp251xfd_chip_interrupts_enable(priv); if (err) goto out_free_irq; netif_start_queue(ndev); return 0; out_free_irq: free_irq(spi->irq, priv); out_can_rx_offload_disable: can_rx_offload_disable(&priv->offload); set_bit(MCP251XFD_FLAGS_DOWN, priv->flags); mcp251xfd_timestamp_stop(priv); out_transceiver_disable: mcp251xfd_transceiver_disable(priv); out_mcp251xfd_ring_free: mcp251xfd_ring_free(priv); out_pm_runtime_put: mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED); pm_runtime_put(ndev->dev.parent); out_close_candev: close_candev(ndev); return err; } static int mcp251xfd_stop(struct net_device *ndev) { struct mcp251xfd_priv *priv = netdev_priv(ndev); netif_stop_queue(ndev); set_bit(MCP251XFD_FLAGS_DOWN, priv->flags); hrtimer_cancel(&priv->rx_irq_timer); hrtimer_cancel(&priv->tx_irq_timer); mcp251xfd_chip_interrupts_disable(priv); free_irq(ndev->irq, priv); can_rx_offload_disable(&priv->offload); mcp251xfd_timestamp_stop(priv); mcp251xfd_chip_stop(priv, CAN_STATE_STOPPED); mcp251xfd_transceiver_disable(priv); mcp251xfd_ring_free(priv); close_candev(ndev); pm_runtime_put(ndev->dev.parent); return 0; } static const struct net_device_ops mcp251xfd_netdev_ops = { .ndo_open = mcp251xfd_open, .ndo_stop = mcp251xfd_stop, .ndo_start_xmit = mcp251xfd_start_xmit, .ndo_eth_ioctl = can_eth_ioctl_hwts, .ndo_change_mtu = can_change_mtu, }; static void mcp251xfd_register_quirks(struct mcp251xfd_priv *priv) { const struct spi_device *spi = priv->spi; const struct spi_controller *ctlr = spi->controller; if (ctlr->flags & SPI_CONTROLLER_HALF_DUPLEX) priv->devtype_data.quirks |= MCP251XFD_QUIRK_HALF_DUPLEX; } static int mcp251xfd_register_chip_detect(struct mcp251xfd_priv *priv) { const struct net_device *ndev = priv->ndev; const struct mcp251xfd_devtype_data *devtype_data; u32 osc; int err; /* The OSC_LPMEN is only supported on MCP2518FD and MCP251863, * so use it to autodetect the model. */ err = regmap_update_bits(priv->map_reg, MCP251XFD_REG_OSC, MCP251XFD_REG_OSC_LPMEN, MCP251XFD_REG_OSC_LPMEN); if (err) return err; err = regmap_read(priv->map_reg, MCP251XFD_REG_OSC, &osc); if (err) return err; if (osc & MCP251XFD_REG_OSC_LPMEN) { /* We cannot distinguish between MCP2518FD and * MCP251863. If firmware specifies MCP251863, keep * it, otherwise set to MCP2518FD. */ if (mcp251xfd_is_251863(priv)) devtype_data = &mcp251xfd_devtype_data_mcp251863; else devtype_data = &mcp251xfd_devtype_data_mcp2518fd; } else { devtype_data = &mcp251xfd_devtype_data_mcp2517fd; } if (!mcp251xfd_is_251XFD(priv) && priv->devtype_data.model != devtype_data->model) { netdev_info(ndev, "Detected %s, but firmware specifies a %s. Fixing up.\n", __mcp251xfd_get_model_str(devtype_data->model), mcp251xfd_get_model_str(priv)); } priv->devtype_data = *devtype_data; /* We need to preserve the Half Duplex Quirk. */ mcp251xfd_register_quirks(priv); /* Re-init regmap with quirks of detected model. */ return mcp251xfd_regmap_init(priv); } static int mcp251xfd_register_check_rx_int(struct mcp251xfd_priv *priv) { int err, rx_pending; if (!priv->rx_int) return 0; err = mcp251xfd_chip_rx_int_enable(priv); if (err) return err; /* Check if RX_INT is properly working. The RX_INT should not * be active after a softreset. */ rx_pending = gpiod_get_value_cansleep(priv->rx_int); err = mcp251xfd_chip_rx_int_disable(priv); if (err) return err; if (!rx_pending) return 0; netdev_info(priv->ndev, "RX_INT active after softreset, disabling RX_INT support.\n"); devm_gpiod_put(&priv->spi->dev, priv->rx_int); priv->rx_int = NULL; return 0; } static int mcp251xfd_register_get_dev_id(const struct mcp251xfd_priv *priv, u32 *dev_id, u32 *effective_speed_hz_slow, u32 *effective_speed_hz_fast) { struct mcp251xfd_map_buf_nocrc *buf_rx; struct mcp251xfd_map_buf_nocrc *buf_tx; struct spi_transfer xfer[2] = { }; int err; buf_rx = kzalloc(sizeof(*buf_rx), GFP_KERNEL); if (!buf_rx) return -ENOMEM; buf_tx = kzalloc(sizeof(*buf_tx), GFP_KERNEL); if (!buf_tx) { err = -ENOMEM; goto out_kfree_buf_rx; } xfer[0].tx_buf = buf_tx; xfer[0].len = sizeof(buf_tx->cmd); xfer[0].speed_hz = priv->spi_max_speed_hz_slow; xfer[1].rx_buf = buf_rx->data; xfer[1].len = sizeof(*dev_id); xfer[1].speed_hz = priv->spi_max_speed_hz_fast; mcp251xfd_spi_cmd_read_nocrc(&buf_tx->cmd, MCP251XFD_REG_DEVID); err = spi_sync_transfer(priv->spi, xfer, ARRAY_SIZE(xfer)); if (err) goto out_kfree_buf_tx; *dev_id = get_unaligned_le32(buf_rx->data); *effective_speed_hz_slow = xfer[0].effective_speed_hz; *effective_speed_hz_fast = xfer[1].effective_speed_hz; out_kfree_buf_tx: kfree(buf_tx); out_kfree_buf_rx: kfree(buf_rx); return err; } #define MCP251XFD_QUIRK_ACTIVE(quirk) \ (priv->devtype_data.quirks & MCP251XFD_QUIRK_##quirk ? '+' : '-') static int mcp251xfd_register_done(const struct mcp251xfd_priv *priv) { u32 dev_id, effective_speed_hz_slow, effective_speed_hz_fast; unsigned long clk_rate; int err; err = mcp251xfd_register_get_dev_id(priv, &dev_id, &effective_speed_hz_slow, &effective_speed_hz_fast); if (err) return err; clk_rate = clk_get_rate(priv->clk); netdev_info(priv->ndev, "%s rev%lu.%lu (%cRX_INT %cPLL %cMAB_NO_WARN %cCRC_REG %cCRC_RX %cCRC_TX %cECC %cHD o:%lu.%02luMHz c:%u.%02uMHz m:%u.%02uMHz rs:%u.%02uMHz es:%u.%02uMHz rf:%u.%02uMHz ef:%u.%02uMHz) successfully initialized.\n", mcp251xfd_get_model_str(priv), FIELD_GET(MCP251XFD_REG_DEVID_ID_MASK, dev_id), FIELD_GET(MCP251XFD_REG_DEVID_REV_MASK, dev_id), priv->rx_int ? '+' : '-', priv->pll_enable ? '+' : '-', MCP251XFD_QUIRK_ACTIVE(MAB_NO_WARN), MCP251XFD_QUIRK_ACTIVE(CRC_REG), MCP251XFD_QUIRK_ACTIVE(CRC_RX), MCP251XFD_QUIRK_ACTIVE(CRC_TX), MCP251XFD_QUIRK_ACTIVE(ECC), MCP251XFD_QUIRK_ACTIVE(HALF_DUPLEX), clk_rate / 1000000, clk_rate % 1000000 / 1000 / 10, priv->can.clock.freq / 1000000, priv->can.clock.freq % 1000000 / 1000 / 10, priv->spi_max_speed_hz_orig / 1000000, priv->spi_max_speed_hz_orig % 1000000 / 1000 / 10, priv->spi_max_speed_hz_slow / 1000000, priv->spi_max_speed_hz_slow % 1000000 / 1000 / 10, effective_speed_hz_slow / 1000000, effective_speed_hz_slow % 1000000 / 1000 / 10, priv->spi_max_speed_hz_fast / 1000000, priv->spi_max_speed_hz_fast % 1000000 / 1000 / 10, effective_speed_hz_fast / 1000000, effective_speed_hz_fast % 1000000 / 1000 / 10); return 0; } static int mcp251xfd_register(struct mcp251xfd_priv *priv) { struct net_device *ndev = priv->ndev; int err; err = mcp251xfd_clks_and_vdd_enable(priv); if (err) return err; pm_runtime_get_noresume(ndev->dev.parent); err = pm_runtime_set_active(ndev->dev.parent); if (err) goto out_runtime_put_noidle; pm_runtime_enable(ndev->dev.parent); mcp251xfd_register_quirks(priv); err = mcp251xfd_chip_softreset(priv); if (err == -ENODEV) goto out_runtime_disable; if (err) goto out_chip_sleep; err = mcp251xfd_chip_clock_init(priv); if (err == -ENODEV) goto out_runtime_disable; if (err) goto out_chip_sleep; err = mcp251xfd_register_chip_detect(priv); if (err) goto out_chip_sleep; err = mcp251xfd_register_check_rx_int(priv); if (err) goto out_chip_sleep; mcp251xfd_ethtool_init(priv); err = register_candev(ndev); if (err) goto out_chip_sleep; err = mcp251xfd_register_done(priv); if (err) goto out_unregister_candev; /* Put controller into sleep mode and let pm_runtime_put() * disable the clocks and vdd. If CONFIG_PM is not enabled, * the clocks and vdd will stay powered. */ err = mcp251xfd_chip_sleep(priv); if (err) goto out_unregister_candev; pm_runtime_put(ndev->dev.parent); return 0; out_unregister_candev: unregister_candev(ndev); out_chip_sleep: mcp251xfd_chip_sleep(priv); out_runtime_disable: pm_runtime_disable(ndev->dev.parent); out_runtime_put_noidle: pm_runtime_put_noidle(ndev->dev.parent); mcp251xfd_clks_and_vdd_disable(priv); return err; } static inline void mcp251xfd_unregister(struct mcp251xfd_priv *priv) { struct net_device *ndev = priv->ndev; unregister_candev(ndev); if (pm_runtime_enabled(ndev->dev.parent)) pm_runtime_disable(ndev->dev.parent); else mcp251xfd_clks_and_vdd_disable(priv); } static const struct of_device_id mcp251xfd_of_match[] = { { .compatible = "microchip,mcp2517fd", .data = &mcp251xfd_devtype_data_mcp2517fd, }, { .compatible = "microchip,mcp2518fd", .data = &mcp251xfd_devtype_data_mcp2518fd, }, { .compatible = "microchip,mcp251863", .data = &mcp251xfd_devtype_data_mcp251863, }, { .compatible = "microchip,mcp251xfd", .data = &mcp251xfd_devtype_data_mcp251xfd, }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, mcp251xfd_of_match); static const struct spi_device_id mcp251xfd_id_table[] = { { .name = "mcp2517fd", .driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp2517fd, }, { .name = "mcp2518fd", .driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp2518fd, }, { .name = "mcp251863", .driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp251863, }, { .name = "mcp251xfd", .driver_data = (kernel_ulong_t)&mcp251xfd_devtype_data_mcp251xfd, }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(spi, mcp251xfd_id_table); static int mcp251xfd_probe(struct spi_device *spi) { const void *match; struct net_device *ndev; struct mcp251xfd_priv *priv; struct gpio_desc *rx_int; struct regulator *reg_vdd, *reg_xceiver; struct clk *clk; bool pll_enable = false; u32 freq = 0; int err; if (!spi->irq) return dev_err_probe(&spi->dev, -ENXIO, "No IRQ specified (maybe node \"interrupts-extended\" in DT missing)!\n"); rx_int = devm_gpiod_get_optional(&spi->dev, "microchip,rx-int", GPIOD_IN); if (IS_ERR(rx_int)) return dev_err_probe(&spi->dev, PTR_ERR(rx_int), "Failed to get RX-INT!\n"); reg_vdd = devm_regulator_get_optional(&spi->dev, "vdd"); if (PTR_ERR(reg_vdd) == -ENODEV) reg_vdd = NULL; else if (IS_ERR(reg_vdd)) return dev_err_probe(&spi->dev, PTR_ERR(reg_vdd), "Failed to get VDD regulator!\n"); reg_xceiver = devm_regulator_get_optional(&spi->dev, "xceiver"); if (PTR_ERR(reg_xceiver) == -ENODEV) reg_xceiver = NULL; else if (IS_ERR(reg_xceiver)) return dev_err_probe(&spi->dev, PTR_ERR(reg_xceiver), "Failed to get Transceiver regulator!\n"); clk = devm_clk_get_optional(&spi->dev, NULL); if (IS_ERR(clk)) return dev_err_probe(&spi->dev, PTR_ERR(clk), "Failed to get Oscillator (clock)!\n"); if (clk) { freq = clk_get_rate(clk); } else { err = device_property_read_u32(&spi->dev, "clock-frequency", &freq); if (err) return dev_err_probe(&spi->dev, err, "Failed to get clock-frequency!\n"); } /* Sanity check */ if (freq < MCP251XFD_SYSCLOCK_HZ_MIN || freq > MCP251XFD_SYSCLOCK_HZ_MAX) { dev_err(&spi->dev, "Oscillator frequency (%u Hz) is too low or high.\n", freq); return -ERANGE; } if (freq <= MCP251XFD_SYSCLOCK_HZ_MAX / MCP251XFD_OSC_PLL_MULTIPLIER) pll_enable = true; ndev = alloc_candev(sizeof(struct mcp251xfd_priv), MCP251XFD_TX_OBJ_NUM_MAX); if (!ndev) return -ENOMEM; SET_NETDEV_DEV(ndev, &spi->dev); ndev->netdev_ops = &mcp251xfd_netdev_ops; ndev->irq = spi->irq; ndev->flags |= IFF_ECHO; priv = netdev_priv(ndev); spi_set_drvdata(spi, priv); priv->can.clock.freq = freq; if (pll_enable) priv->can.clock.freq *= MCP251XFD_OSC_PLL_MULTIPLIER; priv->can.do_set_mode = mcp251xfd_set_mode; priv->can.do_get_berr_counter = mcp251xfd_get_berr_counter; priv->can.bittiming_const = &mcp251xfd_bittiming_const; priv->can.data_bittiming_const = &mcp251xfd_data_bittiming_const; priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_BERR_REPORTING | CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO | CAN_CTRLMODE_CC_LEN8_DLC; set_bit(MCP251XFD_FLAGS_DOWN, priv->flags); priv->ndev = ndev; priv->spi = spi; priv->rx_int = rx_int; priv->clk = clk; priv->pll_enable = pll_enable; priv->reg_vdd = reg_vdd; priv->reg_xceiver = reg_xceiver; match = device_get_match_data(&spi->dev); if (match) priv->devtype_data = *(struct mcp251xfd_devtype_data *)match; else priv->devtype_data = *(struct mcp251xfd_devtype_data *) spi_get_device_id(spi)->driver_data; /* Errata Reference: * mcp2517fd: DS80000792C 5., mcp2518fd: DS80000789C 4. * * The SPI can write corrupted data to the RAM at fast SPI * speeds: * * Simultaneous activity on the CAN bus while writing data to * RAM via the SPI interface, with high SCK frequency, can * lead to corrupted data being written to RAM. * * Fix/Work Around: * Ensure that FSCK is less than or equal to 0.85 * * (FSYSCLK/2). * * Known good combinations are: * * MCP ext-clk SoC SPI SPI-clk max-clk parent-clk config * * 2518 20 MHz allwinner,sun8i-h3 allwinner,sun8i-h3-spi 8333333 Hz 83.33% 600000000 Hz assigned-clocks = <&ccu CLK_SPIx> * 2518 40 MHz allwinner,sun8i-h3 allwinner,sun8i-h3-spi 16666667 Hz 83.33% 600000000 Hz assigned-clocks = <&ccu CLK_SPIx> * 2517 40 MHz atmel,sama5d27 atmel,at91rm9200-spi 16400000 Hz 82.00% 82000000 Hz default * 2518 40 MHz atmel,sama5d27 atmel,at91rm9200-spi 16400000 Hz 82.00% 82000000 Hz default * 2518 40 MHz fsl,imx6dl fsl,imx51-ecspi 15000000 Hz 75.00% 30000000 Hz default * 2517 20 MHz fsl,imx8mm fsl,imx51-ecspi 8333333 Hz 83.33% 16666667 Hz assigned-clocks = <&clk IMX8MM_CLK_ECSPIx_ROOT> * */ priv->spi_max_speed_hz_orig = spi->max_speed_hz; priv->spi_max_speed_hz_slow = min(spi->max_speed_hz, freq / 2 / 1000 * 850); if (priv->pll_enable) priv->spi_max_speed_hz_fast = min(spi->max_speed_hz, freq * MCP251XFD_OSC_PLL_MULTIPLIER / 2 / 1000 * 850); else priv->spi_max_speed_hz_fast = priv->spi_max_speed_hz_slow; spi->max_speed_hz = priv->spi_max_speed_hz_slow; spi->bits_per_word = 8; spi->rt = true; err = spi_setup(spi); if (err) goto out_free_candev; err = mcp251xfd_regmap_init(priv); if (err) goto out_free_candev; err = can_rx_offload_add_manual(ndev, &priv->offload, MCP251XFD_NAPI_WEIGHT); if (err) goto out_free_candev; err = mcp251xfd_register(priv); if (err) { dev_err_probe(&spi->dev, err, "Failed to detect %s.\n", mcp251xfd_get_model_str(priv)); goto out_can_rx_offload_del; } return 0; out_can_rx_offload_del: can_rx_offload_del(&priv->offload); out_free_candev: spi->max_speed_hz = priv->spi_max_speed_hz_orig; free_candev(ndev); return err; } static void mcp251xfd_remove(struct spi_device *spi) { struct mcp251xfd_priv *priv = spi_get_drvdata(spi); struct net_device *ndev = priv->ndev; can_rx_offload_del(&priv->offload); mcp251xfd_unregister(priv); spi->max_speed_hz = priv->spi_max_speed_hz_orig; free_candev(ndev); } static int __maybe_unused mcp251xfd_runtime_suspend(struct device *device) { const struct mcp251xfd_priv *priv = dev_get_drvdata(device); return mcp251xfd_clks_and_vdd_disable(priv); } static int __maybe_unused mcp251xfd_runtime_resume(struct device *device) { const struct mcp251xfd_priv *priv = dev_get_drvdata(device); return mcp251xfd_clks_and_vdd_enable(priv); } static const struct dev_pm_ops mcp251xfd_pm_ops = { SET_RUNTIME_PM_OPS(mcp251xfd_runtime_suspend, mcp251xfd_runtime_resume, NULL) }; static struct spi_driver mcp251xfd_driver = { .driver = { .name = DEVICE_NAME, .pm = &mcp251xfd_pm_ops, .of_match_table = mcp251xfd_of_match, }, .probe = mcp251xfd_probe, .remove = mcp251xfd_remove, .id_table = mcp251xfd_id_table, }; module_spi_driver(mcp251xfd_driver); MODULE_AUTHOR("Marc Kleine-Budde <[email protected]>"); MODULE_DESCRIPTION("Microchip MCP251xFD Family CAN controller driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/net/can/spi/mcp251xfd/mcp251xfd-core.c
// SPDX-License-Identifier: GPL-2.0 // // mcp251xfd - Microchip MCP251xFD Family CAN controller driver // // Copyright (c) 2019, 2020, 2021 Pengutronix, // Marc Kleine-Budde <[email protected]> // // Based on: // // CAN bus driver for Microchip 25XXFD CAN Controller with SPI Interface // // Copyright (c) 2019 Martin Sperl <[email protected]> // #include <linux/bitfield.h> #include "mcp251xfd.h" static inline int mcp251xfd_rx_head_get_from_chip(const struct mcp251xfd_priv *priv, const struct mcp251xfd_rx_ring *ring, u8 *rx_head, bool *fifo_empty) { u32 fifo_sta; int err; err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOSTA(ring->fifo_nr), &fifo_sta); if (err) return err; *rx_head = FIELD_GET(MCP251XFD_REG_FIFOSTA_FIFOCI_MASK, fifo_sta); *fifo_empty = !(fifo_sta & MCP251XFD_REG_FIFOSTA_TFNRFNIF); return 0; } static inline int mcp251xfd_rx_tail_get_from_chip(const struct mcp251xfd_priv *priv, const struct mcp251xfd_rx_ring *ring, u8 *rx_tail) { u32 fifo_ua; int err; err = regmap_read(priv->map_reg, MCP251XFD_REG_FIFOUA(ring->fifo_nr), &fifo_ua); if (err) return err; fifo_ua -= ring->base - MCP251XFD_RAM_START; *rx_tail = fifo_ua / ring->obj_size; return 0; } static int mcp251xfd_check_rx_tail(const struct mcp251xfd_priv *priv, const struct mcp251xfd_rx_ring *ring) { u8 rx_tail_chip, rx_tail; int err; if (!IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY)) return 0; err = mcp251xfd_rx_tail_get_from_chip(priv, ring, &rx_tail_chip); if (err) return err; rx_tail = mcp251xfd_get_rx_tail(ring); if (rx_tail_chip != rx_tail) { netdev_err(priv->ndev, "RX tail of chip (%d) and ours (%d) inconsistent.\n", rx_tail_chip, rx_tail); return -EILSEQ; } return 0; } static int mcp251xfd_rx_ring_update(const struct mcp251xfd_priv *priv, struct mcp251xfd_rx_ring *ring) { u32 new_head; u8 chip_rx_head; bool fifo_empty; int err; err = mcp251xfd_rx_head_get_from_chip(priv, ring, &chip_rx_head, &fifo_empty); if (err || fifo_empty) return err; /* chip_rx_head, is the next RX-Object filled by the HW. * The new RX head must be >= the old head. */ new_head = round_down(ring->head, ring->obj_num) + chip_rx_head; if (new_head <= ring->head) new_head += ring->obj_num; ring->head = new_head; return mcp251xfd_check_rx_tail(priv, ring); } static void mcp251xfd_hw_rx_obj_to_skb(const struct mcp251xfd_priv *priv, const struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj, struct sk_buff *skb) { struct canfd_frame *cfd = (struct canfd_frame *)skb->data; u8 dlc; if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_IDE) { u32 sid, eid; eid = FIELD_GET(MCP251XFD_OBJ_ID_EID_MASK, hw_rx_obj->id); sid = FIELD_GET(MCP251XFD_OBJ_ID_SID_MASK, hw_rx_obj->id); cfd->can_id = CAN_EFF_FLAG | FIELD_PREP(MCP251XFD_REG_FRAME_EFF_EID_MASK, eid) | FIELD_PREP(MCP251XFD_REG_FRAME_EFF_SID_MASK, sid); } else { cfd->can_id = FIELD_GET(MCP251XFD_OBJ_ID_SID_MASK, hw_rx_obj->id); } dlc = FIELD_GET(MCP251XFD_OBJ_FLAGS_DLC_MASK, hw_rx_obj->flags); /* CANFD */ if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_FDF) { if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_ESI) cfd->flags |= CANFD_ESI; if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_BRS) cfd->flags |= CANFD_BRS; cfd->len = can_fd_dlc2len(dlc); } else { if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_RTR) cfd->can_id |= CAN_RTR_FLAG; can_frame_set_cc_len((struct can_frame *)cfd, dlc, priv->can.ctrlmode); } if (!(hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_RTR)) memcpy(cfd->data, hw_rx_obj->data, cfd->len); mcp251xfd_skb_set_timestamp(priv, skb, hw_rx_obj->ts); } static int mcp251xfd_handle_rxif_one(struct mcp251xfd_priv *priv, struct mcp251xfd_rx_ring *ring, const struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj) { struct net_device_stats *stats = &priv->ndev->stats; struct sk_buff *skb; struct canfd_frame *cfd; int err; if (hw_rx_obj->flags & MCP251XFD_OBJ_FLAGS_FDF) skb = alloc_canfd_skb(priv->ndev, &cfd); else skb = alloc_can_skb(priv->ndev, (struct can_frame **)&cfd); if (!skb) { stats->rx_dropped++; return 0; } mcp251xfd_hw_rx_obj_to_skb(priv, hw_rx_obj, skb); err = can_rx_offload_queue_timestamp(&priv->offload, skb, hw_rx_obj->ts); if (err) stats->rx_fifo_errors++; return 0; } static inline int mcp251xfd_rx_obj_read(const struct mcp251xfd_priv *priv, const struct mcp251xfd_rx_ring *ring, struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj, const u8 offset, const u8 len) { const int val_bytes = regmap_get_val_bytes(priv->map_rx); int err; err = regmap_bulk_read(priv->map_rx, mcp251xfd_get_rx_obj_addr(ring, offset), hw_rx_obj, len * ring->obj_size / val_bytes); return err; } static int mcp251xfd_handle_rxif_ring(struct mcp251xfd_priv *priv, struct mcp251xfd_rx_ring *ring) { struct mcp251xfd_hw_rx_obj_canfd *hw_rx_obj = ring->obj; u8 rx_tail, len; int err, i; err = mcp251xfd_rx_ring_update(priv, ring); if (err) return err; while ((len = mcp251xfd_get_rx_linear_len(ring))) { int offset; rx_tail = mcp251xfd_get_rx_tail(ring); err = mcp251xfd_rx_obj_read(priv, ring, hw_rx_obj, rx_tail, len); if (err) return err; for (i = 0; i < len; i++) { err = mcp251xfd_handle_rxif_one(priv, ring, (void *)hw_rx_obj + i * ring->obj_size); if (err) return err; } /* Increment the RX FIFO tail pointer 'len' times in a * single SPI message. * * Note: * Calculate offset, so that the SPI transfer ends on * the last message of the uinc_xfer array, which has * "cs_change == 0", to properly deactivate the chip * select. */ offset = ARRAY_SIZE(ring->uinc_xfer) - len; err = spi_sync_transfer(priv->spi, ring->uinc_xfer + offset, len); if (err) return err; ring->tail += len; } return 0; } int mcp251xfd_handle_rxif(struct mcp251xfd_priv *priv) { struct mcp251xfd_rx_ring *ring; int err, n; mcp251xfd_for_each_rx_ring(priv, ring, n) { /* - if RX IRQ coalescing is active always handle ring 0 * - only handle rings if RX IRQ is active */ if ((ring->nr > 0 || !priv->rx_obj_num_coalesce_irq) && !(priv->regs_status.rxif & BIT(ring->fifo_nr))) continue; err = mcp251xfd_handle_rxif_ring(priv, ring); if (err) return err; } if (priv->rx_coalesce_usecs_irq) hrtimer_start(&priv->rx_irq_timer, ns_to_ktime(priv->rx_coalesce_usecs_irq * NSEC_PER_USEC), HRTIMER_MODE_REL); return 0; }
linux-master
drivers/net/can/spi/mcp251xfd/mcp251xfd-rx.c
// SPDX-License-Identifier: GPL-2.0 // // mcp251xfd - Microchip MCP251xFD Family CAN controller driver // // Copyright (c) 2019, 2020, 2021 Pengutronix, // Marc Kleine-Budde <[email protected]> // #include "mcp251xfd.h" #include <asm/unaligned.h> static const struct regmap_config mcp251xfd_regmap_crc; static int mcp251xfd_regmap_nocrc_write(void *context, const void *data, size_t count) { struct spi_device *spi = context; return spi_write(spi, data, count); } static int mcp251xfd_regmap_nocrc_gather_write(void *context, const void *reg, size_t reg_len, const void *val, size_t val_len) { struct spi_device *spi = context; struct mcp251xfd_priv *priv = spi_get_drvdata(spi); struct mcp251xfd_map_buf_nocrc *buf_tx = priv->map_buf_nocrc_tx; struct spi_transfer xfer[] = { { .tx_buf = buf_tx, .len = sizeof(buf_tx->cmd) + val_len, }, }; BUILD_BUG_ON(sizeof(buf_tx->cmd) != sizeof(__be16)); if (IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY) && reg_len != sizeof(buf_tx->cmd.cmd)) return -EINVAL; memcpy(&buf_tx->cmd, reg, sizeof(buf_tx->cmd)); memcpy(buf_tx->data, val, val_len); return spi_sync_transfer(spi, xfer, ARRAY_SIZE(xfer)); } static inline bool mcp251xfd_update_bits_read_reg(const struct mcp251xfd_priv *priv, unsigned int reg) { struct mcp251xfd_rx_ring *ring; int n; switch (reg) { case MCP251XFD_REG_INT: case MCP251XFD_REG_TEFCON: case MCP251XFD_REG_FLTCON(0): case MCP251XFD_REG_ECCSTAT: case MCP251XFD_REG_CRC: return false; case MCP251XFD_REG_CON: case MCP251XFD_REG_OSC: case MCP251XFD_REG_ECCCON: return true; default: mcp251xfd_for_each_rx_ring(priv, ring, n) { if (reg == MCP251XFD_REG_FIFOCON(ring->fifo_nr)) return false; if (reg == MCP251XFD_REG_FIFOSTA(ring->fifo_nr)) return true; } WARN(1, "Status of reg 0x%04x unknown.\n", reg); } return true; } static int mcp251xfd_regmap_nocrc_update_bits(void *context, unsigned int reg, unsigned int mask, unsigned int val) { struct spi_device *spi = context; struct mcp251xfd_priv *priv = spi_get_drvdata(spi); struct mcp251xfd_map_buf_nocrc *buf_rx = priv->map_buf_nocrc_rx; struct mcp251xfd_map_buf_nocrc *buf_tx = priv->map_buf_nocrc_tx; __le32 orig_le32 = 0, mask_le32, val_le32, tmp_le32; u8 first_byte, last_byte, len; int err; BUILD_BUG_ON(sizeof(buf_rx->cmd) != sizeof(__be16)); BUILD_BUG_ON(sizeof(buf_tx->cmd) != sizeof(__be16)); if (IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY) && mask == 0) return -EINVAL; first_byte = mcp251xfd_first_byte_set(mask); last_byte = mcp251xfd_last_byte_set(mask); len = last_byte - first_byte + 1; if (mcp251xfd_update_bits_read_reg(priv, reg)) { struct spi_transfer xfer[2] = { }; struct spi_message msg; spi_message_init(&msg); spi_message_add_tail(&xfer[0], &msg); if (priv->devtype_data.quirks & MCP251XFD_QUIRK_HALF_DUPLEX) { xfer[0].tx_buf = buf_tx; xfer[0].len = sizeof(buf_tx->cmd); xfer[1].rx_buf = buf_rx->data; xfer[1].len = len; spi_message_add_tail(&xfer[1], &msg); } else { xfer[0].tx_buf = buf_tx; xfer[0].rx_buf = buf_rx; xfer[0].len = sizeof(buf_tx->cmd) + len; if (MCP251XFD_SANITIZE_SPI) memset(buf_tx->data, 0x0, len); } mcp251xfd_spi_cmd_read_nocrc(&buf_tx->cmd, reg + first_byte); err = spi_sync(spi, &msg); if (err) return err; memcpy(&orig_le32, buf_rx->data, len); } mask_le32 = cpu_to_le32(mask >> BITS_PER_BYTE * first_byte); val_le32 = cpu_to_le32(val >> BITS_PER_BYTE * first_byte); tmp_le32 = orig_le32 & ~mask_le32; tmp_le32 |= val_le32 & mask_le32; mcp251xfd_spi_cmd_write_nocrc(&buf_tx->cmd, reg + first_byte); memcpy(buf_tx->data, &tmp_le32, len); return spi_write(spi, buf_tx, sizeof(buf_tx->cmd) + len); } static int mcp251xfd_regmap_nocrc_read(void *context, const void *reg, size_t reg_len, void *val_buf, size_t val_len) { struct spi_device *spi = context; struct mcp251xfd_priv *priv = spi_get_drvdata(spi); struct mcp251xfd_map_buf_nocrc *buf_rx = priv->map_buf_nocrc_rx; struct mcp251xfd_map_buf_nocrc *buf_tx = priv->map_buf_nocrc_tx; struct spi_transfer xfer[2] = { }; struct spi_message msg; int err; BUILD_BUG_ON(sizeof(buf_rx->cmd) != sizeof(__be16)); BUILD_BUG_ON(sizeof(buf_tx->cmd) != sizeof(__be16)); if (IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY) && reg_len != sizeof(buf_tx->cmd.cmd)) return -EINVAL; spi_message_init(&msg); spi_message_add_tail(&xfer[0], &msg); if (priv->devtype_data.quirks & MCP251XFD_QUIRK_HALF_DUPLEX) { xfer[0].tx_buf = reg; xfer[0].len = sizeof(buf_tx->cmd); xfer[1].rx_buf = val_buf; xfer[1].len = val_len; spi_message_add_tail(&xfer[1], &msg); } else { xfer[0].tx_buf = buf_tx; xfer[0].rx_buf = buf_rx; xfer[0].len = sizeof(buf_tx->cmd) + val_len; memcpy(&buf_tx->cmd, reg, sizeof(buf_tx->cmd)); if (MCP251XFD_SANITIZE_SPI) memset(buf_tx->data, 0x0, val_len); } err = spi_sync(spi, &msg); if (err) return err; if (!(priv->devtype_data.quirks & MCP251XFD_QUIRK_HALF_DUPLEX)) memcpy(val_buf, buf_rx->data, val_len); return 0; } static int mcp251xfd_regmap_crc_gather_write(void *context, const void *reg_p, size_t reg_len, const void *val, size_t val_len) { struct spi_device *spi = context; struct mcp251xfd_priv *priv = spi_get_drvdata(spi); struct mcp251xfd_map_buf_crc *buf_tx = priv->map_buf_crc_tx; struct spi_transfer xfer[] = { { .tx_buf = buf_tx, .len = sizeof(buf_tx->cmd) + val_len + sizeof(buf_tx->crc), }, }; u16 reg = *(u16 *)reg_p; u16 crc; BUILD_BUG_ON(sizeof(buf_tx->cmd) != sizeof(__be16) + sizeof(u8)); if (IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY) && reg_len != sizeof(buf_tx->cmd.cmd) + mcp251xfd_regmap_crc.pad_bits / BITS_PER_BYTE) return -EINVAL; mcp251xfd_spi_cmd_write_crc(&buf_tx->cmd, reg, val_len); memcpy(buf_tx->data, val, val_len); crc = mcp251xfd_crc16_compute(buf_tx, sizeof(buf_tx->cmd) + val_len); put_unaligned_be16(crc, buf_tx->data + val_len); return spi_sync_transfer(spi, xfer, ARRAY_SIZE(xfer)); } static int mcp251xfd_regmap_crc_write(void *context, const void *data, size_t count) { const size_t data_offset = sizeof(__be16) + mcp251xfd_regmap_crc.pad_bits / BITS_PER_BYTE; return mcp251xfd_regmap_crc_gather_write(context, data, data_offset, data + data_offset, count - data_offset); } static int mcp251xfd_regmap_crc_read_check_crc(const struct mcp251xfd_map_buf_crc * const buf_rx, const struct mcp251xfd_map_buf_crc * const buf_tx, unsigned int data_len) { u16 crc_received, crc_calculated; crc_received = get_unaligned_be16(buf_rx->data + data_len); crc_calculated = mcp251xfd_crc16_compute2(&buf_tx->cmd, sizeof(buf_tx->cmd), buf_rx->data, data_len); if (crc_received != crc_calculated) return -EBADMSG; return 0; } static int mcp251xfd_regmap_crc_read_one(struct mcp251xfd_priv *priv, struct spi_message *msg, unsigned int data_len) { const struct mcp251xfd_map_buf_crc *buf_rx = priv->map_buf_crc_rx; const struct mcp251xfd_map_buf_crc *buf_tx = priv->map_buf_crc_tx; int err; BUILD_BUG_ON(sizeof(buf_rx->cmd) != sizeof(__be16) + sizeof(u8)); BUILD_BUG_ON(sizeof(buf_tx->cmd) != sizeof(__be16) + sizeof(u8)); err = spi_sync(priv->spi, msg); if (err) return err; return mcp251xfd_regmap_crc_read_check_crc(buf_rx, buf_tx, data_len); } static int mcp251xfd_regmap_crc_read(void *context, const void *reg_p, size_t reg_len, void *val_buf, size_t val_len) { struct spi_device *spi = context; struct mcp251xfd_priv *priv = spi_get_drvdata(spi); struct mcp251xfd_map_buf_crc *buf_rx = priv->map_buf_crc_rx; struct mcp251xfd_map_buf_crc *buf_tx = priv->map_buf_crc_tx; struct spi_transfer xfer[2] = { }; struct spi_message msg; u16 reg = *(u16 *)reg_p; int i, err; BUILD_BUG_ON(sizeof(buf_rx->cmd) != sizeof(__be16) + sizeof(u8)); BUILD_BUG_ON(sizeof(buf_tx->cmd) != sizeof(__be16) + sizeof(u8)); if (IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY) && reg_len != sizeof(buf_tx->cmd.cmd) + mcp251xfd_regmap_crc.pad_bits / BITS_PER_BYTE) return -EINVAL; spi_message_init(&msg); spi_message_add_tail(&xfer[0], &msg); if (priv->devtype_data.quirks & MCP251XFD_QUIRK_HALF_DUPLEX) { xfer[0].tx_buf = buf_tx; xfer[0].len = sizeof(buf_tx->cmd); xfer[1].rx_buf = buf_rx->data; xfer[1].len = val_len + sizeof(buf_tx->crc); spi_message_add_tail(&xfer[1], &msg); } else { xfer[0].tx_buf = buf_tx; xfer[0].rx_buf = buf_rx; xfer[0].len = sizeof(buf_tx->cmd) + val_len + sizeof(buf_tx->crc); if (MCP251XFD_SANITIZE_SPI) memset(buf_tx->data, 0x0, val_len + sizeof(buf_tx->crc)); } mcp251xfd_spi_cmd_read_crc(&buf_tx->cmd, reg, val_len); for (i = 0; i < MCP251XFD_READ_CRC_RETRIES_MAX; i++) { err = mcp251xfd_regmap_crc_read_one(priv, &msg, val_len); if (!err) goto out; if (err != -EBADMSG) return err; /* MCP251XFD_REG_TBC is the time base counter * register. It increments once per SYS clock tick, * which is 20 or 40 MHz. * * Observation on the mcp2518fd shows that if the * lowest byte (which is transferred first on the SPI * bus) of that register is 0x00 or 0x80 the * calculated CRC doesn't always match the transferred * one. On the mcp2517fd this problem is not limited * to the first byte being 0x00 or 0x80. * * If the highest bit in the lowest byte is flipped * the transferred CRC matches the calculated one. We * assume for now the CRC operates on the correct * data. */ if (reg == MCP251XFD_REG_TBC && ((buf_rx->data[0] & 0xf8) == 0x0 || (buf_rx->data[0] & 0xf8) == 0x80)) { /* Flip highest bit in lowest byte of le32 */ buf_rx->data[0] ^= 0x80; /* re-check CRC */ err = mcp251xfd_regmap_crc_read_check_crc(buf_rx, buf_tx, val_len); if (!err) { /* If CRC is now correct, assume * flipped data is OK. */ goto out; } } /* MCP251XFD_REG_OSC is the first ever reg we read from. * * The chip may be in deep sleep and this SPI transfer * (i.e. the assertion of the CS) will wake the chip * up. This takes about 3ms. The CRC of this transfer * is wrong. * * Or there isn't a chip at all, in this case the CRC * will be wrong, too. * * In both cases ignore the CRC and copy the read data * to the caller. It will take care of both cases. * */ if (reg == MCP251XFD_REG_OSC && val_len == sizeof(__le32)) { err = 0; goto out; } netdev_info(priv->ndev, "CRC read error at address 0x%04x (length=%zd, data=%*ph, CRC=0x%04x) retrying.\n", reg, val_len, (int)val_len, buf_rx->data, get_unaligned_be16(buf_rx->data + val_len)); } if (err) { netdev_err(priv->ndev, "CRC read error at address 0x%04x (length=%zd, data=%*ph, CRC=0x%04x).\n", reg, val_len, (int)val_len, buf_rx->data, get_unaligned_be16(buf_rx->data + val_len)); return err; } out: memcpy(val_buf, buf_rx->data, val_len); return 0; } static const struct regmap_range mcp251xfd_reg_table_yes_range[] = { regmap_reg_range(0x000, 0x2ec), /* CAN FD Controller Module SFR */ regmap_reg_range(0x400, 0xbfc), /* RAM */ regmap_reg_range(0xe00, 0xe14), /* MCP2517/18FD SFR */ }; static const struct regmap_access_table mcp251xfd_reg_table = { .yes_ranges = mcp251xfd_reg_table_yes_range, .n_yes_ranges = ARRAY_SIZE(mcp251xfd_reg_table_yes_range), }; static const struct regmap_config mcp251xfd_regmap_nocrc = { .name = "nocrc", .reg_bits = 16, .reg_stride = 4, .pad_bits = 0, .val_bits = 32, .max_register = 0xffc, .wr_table = &mcp251xfd_reg_table, .rd_table = &mcp251xfd_reg_table, .cache_type = REGCACHE_NONE, .read_flag_mask = (__force unsigned long) cpu_to_be16(MCP251XFD_SPI_INSTRUCTION_READ), .write_flag_mask = (__force unsigned long) cpu_to_be16(MCP251XFD_SPI_INSTRUCTION_WRITE), }; static const struct regmap_bus mcp251xfd_bus_nocrc = { .write = mcp251xfd_regmap_nocrc_write, .gather_write = mcp251xfd_regmap_nocrc_gather_write, .reg_update_bits = mcp251xfd_regmap_nocrc_update_bits, .read = mcp251xfd_regmap_nocrc_read, .reg_format_endian_default = REGMAP_ENDIAN_BIG, .val_format_endian_default = REGMAP_ENDIAN_LITTLE, .max_raw_read = sizeof_field(struct mcp251xfd_map_buf_nocrc, data), .max_raw_write = sizeof_field(struct mcp251xfd_map_buf_nocrc, data), }; static const struct regmap_config mcp251xfd_regmap_crc = { .name = "crc", .reg_bits = 16, .reg_stride = 4, .pad_bits = 16, /* keep data bits aligned */ .val_bits = 32, .max_register = 0xffc, .wr_table = &mcp251xfd_reg_table, .rd_table = &mcp251xfd_reg_table, .cache_type = REGCACHE_NONE, }; static const struct regmap_bus mcp251xfd_bus_crc = { .write = mcp251xfd_regmap_crc_write, .gather_write = mcp251xfd_regmap_crc_gather_write, .read = mcp251xfd_regmap_crc_read, .reg_format_endian_default = REGMAP_ENDIAN_NATIVE, .val_format_endian_default = REGMAP_ENDIAN_LITTLE, .max_raw_read = sizeof_field(struct mcp251xfd_map_buf_crc, data), .max_raw_write = sizeof_field(struct mcp251xfd_map_buf_crc, data), }; static inline bool mcp251xfd_regmap_use_nocrc(struct mcp251xfd_priv *priv) { return (!(priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_REG)) || (!(priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_RX)); } static inline bool mcp251xfd_regmap_use_crc(struct mcp251xfd_priv *priv) { return (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_REG) || (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_RX); } static int mcp251xfd_regmap_init_nocrc(struct mcp251xfd_priv *priv) { if (!priv->map_nocrc) { struct regmap *map; map = devm_regmap_init(&priv->spi->dev, &mcp251xfd_bus_nocrc, priv->spi, &mcp251xfd_regmap_nocrc); if (IS_ERR(map)) return PTR_ERR(map); priv->map_nocrc = map; } if (!priv->map_buf_nocrc_rx) { priv->map_buf_nocrc_rx = devm_kzalloc(&priv->spi->dev, sizeof(*priv->map_buf_nocrc_rx), GFP_KERNEL); if (!priv->map_buf_nocrc_rx) return -ENOMEM; } if (!priv->map_buf_nocrc_tx) { priv->map_buf_nocrc_tx = devm_kzalloc(&priv->spi->dev, sizeof(*priv->map_buf_nocrc_tx), GFP_KERNEL); if (!priv->map_buf_nocrc_tx) return -ENOMEM; } if (!(priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_REG)) priv->map_reg = priv->map_nocrc; if (!(priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_RX)) priv->map_rx = priv->map_nocrc; return 0; } static void mcp251xfd_regmap_destroy_nocrc(struct mcp251xfd_priv *priv) { if (priv->map_buf_nocrc_rx) { devm_kfree(&priv->spi->dev, priv->map_buf_nocrc_rx); priv->map_buf_nocrc_rx = NULL; } if (priv->map_buf_nocrc_tx) { devm_kfree(&priv->spi->dev, priv->map_buf_nocrc_tx); priv->map_buf_nocrc_tx = NULL; } } static int mcp251xfd_regmap_init_crc(struct mcp251xfd_priv *priv) { if (!priv->map_crc) { struct regmap *map; map = devm_regmap_init(&priv->spi->dev, &mcp251xfd_bus_crc, priv->spi, &mcp251xfd_regmap_crc); if (IS_ERR(map)) return PTR_ERR(map); priv->map_crc = map; } if (!priv->map_buf_crc_rx) { priv->map_buf_crc_rx = devm_kzalloc(&priv->spi->dev, sizeof(*priv->map_buf_crc_rx), GFP_KERNEL); if (!priv->map_buf_crc_rx) return -ENOMEM; } if (!priv->map_buf_crc_tx) { priv->map_buf_crc_tx = devm_kzalloc(&priv->spi->dev, sizeof(*priv->map_buf_crc_tx), GFP_KERNEL); if (!priv->map_buf_crc_tx) return -ENOMEM; } if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_REG) priv->map_reg = priv->map_crc; if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_RX) priv->map_rx = priv->map_crc; return 0; } static void mcp251xfd_regmap_destroy_crc(struct mcp251xfd_priv *priv) { if (priv->map_buf_crc_rx) { devm_kfree(&priv->spi->dev, priv->map_buf_crc_rx); priv->map_buf_crc_rx = NULL; } if (priv->map_buf_crc_tx) { devm_kfree(&priv->spi->dev, priv->map_buf_crc_tx); priv->map_buf_crc_tx = NULL; } } int mcp251xfd_regmap_init(struct mcp251xfd_priv *priv) { int err; if (mcp251xfd_regmap_use_nocrc(priv)) { err = mcp251xfd_regmap_init_nocrc(priv); if (err) return err; } else { mcp251xfd_regmap_destroy_nocrc(priv); } if (mcp251xfd_regmap_use_crc(priv)) { err = mcp251xfd_regmap_init_crc(priv); if (err) return err; } else { mcp251xfd_regmap_destroy_crc(priv); } return 0; }
linux-master
drivers/net/can/spi/mcp251xfd/mcp251xfd-regmap.c
// SPDX-License-Identifier: GPL-2.0 // // mcp251xfd - Microchip MCP251xFD Family CAN controller driver // // Copyright (c) 2020, 2021 Pengutronix, // Marc Kleine-Budde <[email protected]> // Copyright (C) 2015-2018 Etnaviv Project // #include <linux/devcoredump.h> #include "mcp251xfd.h" #include "mcp251xfd-dump.h" struct mcp251xfd_dump_iter { void *start; struct mcp251xfd_dump_object_header *hdr; void *data; }; struct mcp251xfd_dump_reg_space { u16 base; u16 size; }; struct mcp251xfd_dump_ring { enum mcp251xfd_dump_object_ring_key key; u32 val; }; static const struct mcp251xfd_dump_reg_space mcp251xfd_dump_reg_space[] = { { .base = MCP251XFD_REG_CON, .size = MCP251XFD_REG_FLTOBJ(32) - MCP251XFD_REG_CON, }, { .base = MCP251XFD_RAM_START, .size = MCP251XFD_RAM_SIZE, }, { .base = MCP251XFD_REG_OSC, .size = MCP251XFD_REG_DEVID - MCP251XFD_REG_OSC, }, }; static void mcp251xfd_dump_header(struct mcp251xfd_dump_iter *iter, enum mcp251xfd_dump_object_type object_type, const void *data_end) { struct mcp251xfd_dump_object_header *hdr = iter->hdr; unsigned int len; len = data_end - iter->data; if (!len) return; hdr->magic = cpu_to_le32(MCP251XFD_DUMP_MAGIC); hdr->type = cpu_to_le32(object_type); hdr->offset = cpu_to_le32(iter->data - iter->start); hdr->len = cpu_to_le32(len); iter->hdr++; iter->data += len; } static void mcp251xfd_dump_registers(const struct mcp251xfd_priv *priv, struct mcp251xfd_dump_iter *iter) { const int val_bytes = regmap_get_val_bytes(priv->map_rx); struct mcp251xfd_dump_object_reg *reg = iter->data; unsigned int i, j; int err; for (i = 0; i < ARRAY_SIZE(mcp251xfd_dump_reg_space); i++) { const struct mcp251xfd_dump_reg_space *reg_space; void *buf; reg_space = &mcp251xfd_dump_reg_space[i]; buf = kmalloc(reg_space->size, GFP_KERNEL); if (!buf) goto out; err = regmap_bulk_read(priv->map_reg, reg_space->base, buf, reg_space->size / val_bytes); if (err) { kfree(buf); continue; } for (j = 0; j < reg_space->size; j += sizeof(u32), reg++) { reg->reg = cpu_to_le32(reg_space->base + j); reg->val = cpu_to_le32p(buf + j); } kfree(buf); } out: mcp251xfd_dump_header(iter, MCP251XFD_DUMP_OBJECT_TYPE_REG, reg); } static void mcp251xfd_dump_ring(struct mcp251xfd_dump_iter *iter, enum mcp251xfd_dump_object_type object_type, const struct mcp251xfd_dump_ring *dump_ring, unsigned int len) { struct mcp251xfd_dump_object_reg *reg = iter->data; unsigned int i; for (i = 0; i < len; i++, reg++) { reg->reg = cpu_to_le32(dump_ring[i].key); reg->val = cpu_to_le32(dump_ring[i].val); } mcp251xfd_dump_header(iter, object_type, reg); } static void mcp251xfd_dump_tef_ring(const struct mcp251xfd_priv *priv, struct mcp251xfd_dump_iter *iter) { const struct mcp251xfd_tef_ring *tef = priv->tef; const struct mcp251xfd_tx_ring *tx = priv->tx; const struct mcp251xfd_dump_ring dump_ring[] = { { .key = MCP251XFD_DUMP_OBJECT_RING_KEY_HEAD, .val = tef->head, }, { .key = MCP251XFD_DUMP_OBJECT_RING_KEY_TAIL, .val = tef->tail, }, { .key = MCP251XFD_DUMP_OBJECT_RING_KEY_BASE, .val = 0, }, { .key = MCP251XFD_DUMP_OBJECT_RING_KEY_NR, .val = 0, }, { .key = MCP251XFD_DUMP_OBJECT_RING_KEY_FIFO_NR, .val = 0, }, { .key = MCP251XFD_DUMP_OBJECT_RING_KEY_OBJ_NUM, .val = tx->obj_num, }, { .key = MCP251XFD_DUMP_OBJECT_RING_KEY_OBJ_SIZE, .val = sizeof(struct mcp251xfd_hw_tef_obj), }, }; mcp251xfd_dump_ring(iter, MCP251XFD_DUMP_OBJECT_TYPE_TEF, dump_ring, ARRAY_SIZE(dump_ring)); } static void mcp251xfd_dump_rx_ring_one(const struct mcp251xfd_priv *priv, struct mcp251xfd_dump_iter *iter, const struct mcp251xfd_rx_ring *rx) { const struct mcp251xfd_dump_ring dump_ring[] = { { .key = MCP251XFD_DUMP_OBJECT_RING_KEY_HEAD, .val = rx->head, }, { .key = MCP251XFD_DUMP_OBJECT_RING_KEY_TAIL, .val = rx->tail, }, { .key = MCP251XFD_DUMP_OBJECT_RING_KEY_BASE, .val = rx->base, }, { .key = MCP251XFD_DUMP_OBJECT_RING_KEY_NR, .val = rx->nr, }, { .key = MCP251XFD_DUMP_OBJECT_RING_KEY_FIFO_NR, .val = rx->fifo_nr, }, { .key = MCP251XFD_DUMP_OBJECT_RING_KEY_OBJ_NUM, .val = rx->obj_num, }, { .key = MCP251XFD_DUMP_OBJECT_RING_KEY_OBJ_SIZE, .val = rx->obj_size, }, }; mcp251xfd_dump_ring(iter, MCP251XFD_DUMP_OBJECT_TYPE_RX, dump_ring, ARRAY_SIZE(dump_ring)); } static void mcp251xfd_dump_rx_ring(const struct mcp251xfd_priv *priv, struct mcp251xfd_dump_iter *iter) { struct mcp251xfd_rx_ring *rx_ring; unsigned int i; mcp251xfd_for_each_rx_ring(priv, rx_ring, i) mcp251xfd_dump_rx_ring_one(priv, iter, rx_ring); } static void mcp251xfd_dump_tx_ring(const struct mcp251xfd_priv *priv, struct mcp251xfd_dump_iter *iter) { const struct mcp251xfd_tx_ring *tx = priv->tx; const struct mcp251xfd_dump_ring dump_ring[] = { { .key = MCP251XFD_DUMP_OBJECT_RING_KEY_HEAD, .val = tx->head, }, { .key = MCP251XFD_DUMP_OBJECT_RING_KEY_TAIL, .val = tx->tail, }, { .key = MCP251XFD_DUMP_OBJECT_RING_KEY_BASE, .val = tx->base, }, { .key = MCP251XFD_DUMP_OBJECT_RING_KEY_NR, .val = tx->nr, }, { .key = MCP251XFD_DUMP_OBJECT_RING_KEY_FIFO_NR, .val = tx->fifo_nr, }, { .key = MCP251XFD_DUMP_OBJECT_RING_KEY_OBJ_NUM, .val = tx->obj_num, }, { .key = MCP251XFD_DUMP_OBJECT_RING_KEY_OBJ_SIZE, .val = tx->obj_size, }, }; mcp251xfd_dump_ring(iter, MCP251XFD_DUMP_OBJECT_TYPE_TX, dump_ring, ARRAY_SIZE(dump_ring)); } static void mcp251xfd_dump_end(const struct mcp251xfd_priv *priv, struct mcp251xfd_dump_iter *iter) { struct mcp251xfd_dump_object_header *hdr = iter->hdr; hdr->magic = cpu_to_le32(MCP251XFD_DUMP_MAGIC); hdr->type = cpu_to_le32(MCP251XFD_DUMP_OBJECT_TYPE_END); hdr->offset = cpu_to_le32(0); hdr->len = cpu_to_le32(0); /* provoke NULL pointer access, if used after END object */ iter->hdr = NULL; } void mcp251xfd_dump(const struct mcp251xfd_priv *priv) { struct mcp251xfd_dump_iter iter; unsigned int rings_num, obj_num; unsigned int file_size = 0; unsigned int i; /* register space + end marker */ obj_num = 2; /* register space */ for (i = 0; i < ARRAY_SIZE(mcp251xfd_dump_reg_space); i++) file_size += mcp251xfd_dump_reg_space[i].size / sizeof(u32) * sizeof(struct mcp251xfd_dump_object_reg); /* TEF ring, RX rings, TX ring */ rings_num = 1 + priv->rx_ring_num + 1; obj_num += rings_num; file_size += rings_num * __MCP251XFD_DUMP_OBJECT_RING_KEY_MAX * sizeof(struct mcp251xfd_dump_object_reg); /* size of the headers */ file_size += sizeof(*iter.hdr) * obj_num; /* allocate the file in vmalloc memory, it's likely to be big */ iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN | __GFP_ZERO | __GFP_NORETRY); if (!iter.start) { netdev_warn(priv->ndev, "Failed to allocate devcoredump file.\n"); return; } /* point the data member after the headers */ iter.hdr = iter.start; iter.data = &iter.hdr[obj_num]; mcp251xfd_dump_registers(priv, &iter); mcp251xfd_dump_tef_ring(priv, &iter); mcp251xfd_dump_rx_ring(priv, &iter); mcp251xfd_dump_tx_ring(priv, &iter); mcp251xfd_dump_end(priv, &iter); dev_coredumpv(&priv->spi->dev, iter.start, iter.data - iter.start, GFP_KERNEL); }
linux-master
drivers/net/can/spi/mcp251xfd/mcp251xfd-dump.c
// SPDX-License-Identifier: GPL-2.0 // // mcp251xfd - Microchip MCP251xFD Family CAN controller driver // // Copyright (c) 2019, 2020, 2021 Pengutronix, // Marc Kleine-Budde <[email protected]> // // Based on: // // CAN bus driver for Microchip 25XXFD CAN Controller with SPI Interface // // Copyright (c) 2019 Martin Sperl <[email protected]> // #include <asm/unaligned.h> #include "mcp251xfd.h" #include "mcp251xfd-ram.h" static inline u8 mcp251xfd_cmd_prepare_write_reg(const struct mcp251xfd_priv *priv, union mcp251xfd_write_reg_buf *write_reg_buf, const u16 reg, const u32 mask, const u32 val) { u8 first_byte, last_byte, len; u8 *data; __le32 val_le32; first_byte = mcp251xfd_first_byte_set(mask); last_byte = mcp251xfd_last_byte_set(mask); len = last_byte - first_byte + 1; data = mcp251xfd_spi_cmd_write(priv, write_reg_buf, reg + first_byte, len); val_le32 = cpu_to_le32(val >> BITS_PER_BYTE * first_byte); memcpy(data, &val_le32, len); if (!(priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_REG)) { len += sizeof(write_reg_buf->nocrc.cmd); } else if (len == 1) { u16 crc; /* CRC */ len += sizeof(write_reg_buf->safe.cmd); crc = mcp251xfd_crc16_compute(&write_reg_buf->safe, len); put_unaligned_be16(crc, (void *)write_reg_buf + len); /* Total length */ len += sizeof(write_reg_buf->safe.crc); } else { u16 crc; mcp251xfd_spi_cmd_crc_set_len_in_reg(&write_reg_buf->crc.cmd, len); /* CRC */ len += sizeof(write_reg_buf->crc.cmd); crc = mcp251xfd_crc16_compute(&write_reg_buf->crc, len); put_unaligned_be16(crc, (void *)write_reg_buf + len); /* Total length */ len += sizeof(write_reg_buf->crc.crc); } return len; } static void mcp251xfd_ring_init_tef(struct mcp251xfd_priv *priv, u16 *base) { struct mcp251xfd_tef_ring *tef_ring; struct spi_transfer *xfer; u32 val; u16 addr; u8 len; int i; /* TEF */ tef_ring = priv->tef; tef_ring->head = 0; tef_ring->tail = 0; /* TEF- and TX-FIFO have same number of objects */ *base = mcp251xfd_get_tef_obj_addr(priv->tx->obj_num); /* FIFO IRQ enable */ addr = MCP251XFD_REG_TEFCON; val = MCP251XFD_REG_TEFCON_TEFOVIE | MCP251XFD_REG_TEFCON_TEFNEIE; len = mcp251xfd_cmd_prepare_write_reg(priv, &tef_ring->irq_enable_buf, addr, val, val); tef_ring->irq_enable_xfer.tx_buf = &tef_ring->irq_enable_buf; tef_ring->irq_enable_xfer.len = len; spi_message_init_with_transfers(&tef_ring->irq_enable_msg, &tef_ring->irq_enable_xfer, 1); /* FIFO increment TEF tail pointer */ addr = MCP251XFD_REG_TEFCON; val = MCP251XFD_REG_TEFCON_UINC; len = mcp251xfd_cmd_prepare_write_reg(priv, &tef_ring->uinc_buf, addr, val, val); for (i = 0; i < ARRAY_SIZE(tef_ring->uinc_xfer); i++) { xfer = &tef_ring->uinc_xfer[i]; xfer->tx_buf = &tef_ring->uinc_buf; xfer->len = len; xfer->cs_change = 1; xfer->cs_change_delay.value = 0; xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS; } /* "cs_change == 1" on the last transfer results in an active * chip select after the complete SPI message. This causes the * controller to interpret the next register access as * data. Set "cs_change" of the last transfer to "0" to * properly deactivate the chip select at the end of the * message. */ xfer->cs_change = 0; if (priv->tx_coalesce_usecs_irq || priv->tx_obj_num_coalesce_irq) { val = MCP251XFD_REG_TEFCON_UINC | MCP251XFD_REG_TEFCON_TEFOVIE | MCP251XFD_REG_TEFCON_TEFHIE; len = mcp251xfd_cmd_prepare_write_reg(priv, &tef_ring->uinc_irq_disable_buf, addr, val, val); xfer->tx_buf = &tef_ring->uinc_irq_disable_buf; xfer->len = len; } } static void mcp251xfd_tx_ring_init_tx_obj(const struct mcp251xfd_priv *priv, const struct mcp251xfd_tx_ring *ring, struct mcp251xfd_tx_obj *tx_obj, const u8 rts_buf_len, const u8 n) { struct spi_transfer *xfer; u16 addr; /* FIFO load */ addr = mcp251xfd_get_tx_obj_addr(ring, n); if (priv->devtype_data.quirks & MCP251XFD_QUIRK_CRC_TX) mcp251xfd_spi_cmd_write_crc_set_addr(&tx_obj->buf.crc.cmd, addr); else mcp251xfd_spi_cmd_write_nocrc(&tx_obj->buf.nocrc.cmd, addr); xfer = &tx_obj->xfer[0]; xfer->tx_buf = &tx_obj->buf; xfer->len = 0; /* actual len is assigned on the fly */ xfer->cs_change = 1; xfer->cs_change_delay.value = 0; xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS; /* FIFO request to send */ xfer = &tx_obj->xfer[1]; xfer->tx_buf = &ring->rts_buf; xfer->len = rts_buf_len; /* SPI message */ spi_message_init_with_transfers(&tx_obj->msg, tx_obj->xfer, ARRAY_SIZE(tx_obj->xfer)); } static void mcp251xfd_ring_init_tx(struct mcp251xfd_priv *priv, u16 *base, u8 *fifo_nr) { struct mcp251xfd_tx_ring *tx_ring; struct mcp251xfd_tx_obj *tx_obj; u32 val; u16 addr; u8 len; int i; tx_ring = priv->tx; tx_ring->head = 0; tx_ring->tail = 0; tx_ring->base = *base; tx_ring->nr = 0; tx_ring->fifo_nr = *fifo_nr; *base = mcp251xfd_get_tx_obj_addr(tx_ring, tx_ring->obj_num); *fifo_nr += 1; /* FIFO request to send */ addr = MCP251XFD_REG_FIFOCON(tx_ring->fifo_nr); val = MCP251XFD_REG_FIFOCON_TXREQ | MCP251XFD_REG_FIFOCON_UINC; len = mcp251xfd_cmd_prepare_write_reg(priv, &tx_ring->rts_buf, addr, val, val); mcp251xfd_for_each_tx_obj(tx_ring, tx_obj, i) mcp251xfd_tx_ring_init_tx_obj(priv, tx_ring, tx_obj, len, i); } static void mcp251xfd_ring_init_rx(struct mcp251xfd_priv *priv, u16 *base, u8 *fifo_nr) { struct mcp251xfd_rx_ring *rx_ring; struct spi_transfer *xfer; u32 val; u16 addr; u8 len; int i, j; mcp251xfd_for_each_rx_ring(priv, rx_ring, i) { rx_ring->head = 0; rx_ring->tail = 0; rx_ring->base = *base; rx_ring->nr = i; rx_ring->fifo_nr = *fifo_nr; *base = mcp251xfd_get_rx_obj_addr(rx_ring, rx_ring->obj_num); *fifo_nr += 1; /* FIFO IRQ enable */ addr = MCP251XFD_REG_FIFOCON(rx_ring->fifo_nr); val = MCP251XFD_REG_FIFOCON_RXOVIE | MCP251XFD_REG_FIFOCON_TFNRFNIE; len = mcp251xfd_cmd_prepare_write_reg(priv, &rx_ring->irq_enable_buf, addr, val, val); rx_ring->irq_enable_xfer.tx_buf = &rx_ring->irq_enable_buf; rx_ring->irq_enable_xfer.len = len; spi_message_init_with_transfers(&rx_ring->irq_enable_msg, &rx_ring->irq_enable_xfer, 1); /* FIFO increment RX tail pointer */ val = MCP251XFD_REG_FIFOCON_UINC; len = mcp251xfd_cmd_prepare_write_reg(priv, &rx_ring->uinc_buf, addr, val, val); for (j = 0; j < ARRAY_SIZE(rx_ring->uinc_xfer); j++) { xfer = &rx_ring->uinc_xfer[j]; xfer->tx_buf = &rx_ring->uinc_buf; xfer->len = len; xfer->cs_change = 1; xfer->cs_change_delay.value = 0; xfer->cs_change_delay.unit = SPI_DELAY_UNIT_NSECS; } /* "cs_change == 1" on the last transfer results in an * active chip select after the complete SPI * message. This causes the controller to interpret * the next register access as data. Set "cs_change" * of the last transfer to "0" to properly deactivate * the chip select at the end of the message. */ xfer->cs_change = 0; /* Use 1st RX-FIFO for IRQ coalescing. If enabled * (rx_coalesce_usecs_irq or rx_max_coalesce_frames_irq * is activated), use the last transfer to disable: * * - TFNRFNIE (Receive FIFO Not Empty Interrupt) * * and enable: * * - TFHRFHIE (Receive FIFO Half Full Interrupt) * - or - * - TFERFFIE (Receive FIFO Full Interrupt) * * depending on rx_max_coalesce_frames_irq. * * The RXOVIE (Overflow Interrupt) is always enabled. */ if (rx_ring->nr == 0 && (priv->rx_coalesce_usecs_irq || priv->rx_obj_num_coalesce_irq)) { val = MCP251XFD_REG_FIFOCON_UINC | MCP251XFD_REG_FIFOCON_RXOVIE; if (priv->rx_obj_num_coalesce_irq == rx_ring->obj_num) val |= MCP251XFD_REG_FIFOCON_TFERFFIE; else if (priv->rx_obj_num_coalesce_irq) val |= MCP251XFD_REG_FIFOCON_TFHRFHIE; len = mcp251xfd_cmd_prepare_write_reg(priv, &rx_ring->uinc_irq_disable_buf, addr, val, val); xfer->tx_buf = &rx_ring->uinc_irq_disable_buf; xfer->len = len; } } } int mcp251xfd_ring_init(struct mcp251xfd_priv *priv) { const struct mcp251xfd_rx_ring *rx_ring; u16 base = 0, ram_used; u8 fifo_nr = 1; int i; netdev_reset_queue(priv->ndev); mcp251xfd_ring_init_tef(priv, &base); mcp251xfd_ring_init_rx(priv, &base, &fifo_nr); mcp251xfd_ring_init_tx(priv, &base, &fifo_nr); /* mcp251xfd_handle_rxif() will iterate over all RX rings. * Rings with their corresponding bit set in * priv->regs_status.rxif are read out. * * If the chip is configured for only 1 RX-FIFO, and if there * is an RX interrupt pending (RXIF in INT register is set), * it must be the 1st RX-FIFO. * * We mark the RXIF of the 1st FIFO as pending here, so that * we can skip the read of the RXIF register in * mcp251xfd_read_regs_status() for the 1 RX-FIFO only case. * * If we use more than 1 RX-FIFO, this value gets overwritten * in mcp251xfd_read_regs_status(), so set it unconditionally * here. */ priv->regs_status.rxif = BIT(priv->rx[0]->fifo_nr); if (priv->tx_obj_num_coalesce_irq) { netdev_dbg(priv->ndev, "FIFO setup: TEF: 0x%03x: %2d*%zu bytes = %4zu bytes (coalesce)\n", mcp251xfd_get_tef_obj_addr(0), priv->tx_obj_num_coalesce_irq, sizeof(struct mcp251xfd_hw_tef_obj), priv->tx_obj_num_coalesce_irq * sizeof(struct mcp251xfd_hw_tef_obj)); netdev_dbg(priv->ndev, " 0x%03x: %2d*%zu bytes = %4zu bytes\n", mcp251xfd_get_tef_obj_addr(priv->tx_obj_num_coalesce_irq), priv->tx->obj_num - priv->tx_obj_num_coalesce_irq, sizeof(struct mcp251xfd_hw_tef_obj), (priv->tx->obj_num - priv->tx_obj_num_coalesce_irq) * sizeof(struct mcp251xfd_hw_tef_obj)); } else { netdev_dbg(priv->ndev, "FIFO setup: TEF: 0x%03x: %2d*%zu bytes = %4zu bytes\n", mcp251xfd_get_tef_obj_addr(0), priv->tx->obj_num, sizeof(struct mcp251xfd_hw_tef_obj), priv->tx->obj_num * sizeof(struct mcp251xfd_hw_tef_obj)); } mcp251xfd_for_each_rx_ring(priv, rx_ring, i) { if (rx_ring->nr == 0 && priv->rx_obj_num_coalesce_irq) { netdev_dbg(priv->ndev, "FIFO setup: RX-%u: FIFO %u/0x%03x: %2u*%u bytes = %4u bytes (coalesce)\n", rx_ring->nr, rx_ring->fifo_nr, mcp251xfd_get_rx_obj_addr(rx_ring, 0), priv->rx_obj_num_coalesce_irq, rx_ring->obj_size, priv->rx_obj_num_coalesce_irq * rx_ring->obj_size); if (priv->rx_obj_num_coalesce_irq == MCP251XFD_FIFO_DEPTH) continue; netdev_dbg(priv->ndev, " 0x%03x: %2u*%u bytes = %4u bytes\n", mcp251xfd_get_rx_obj_addr(rx_ring, priv->rx_obj_num_coalesce_irq), rx_ring->obj_num - priv->rx_obj_num_coalesce_irq, rx_ring->obj_size, (rx_ring->obj_num - priv->rx_obj_num_coalesce_irq) * rx_ring->obj_size); } else { netdev_dbg(priv->ndev, "FIFO setup: RX-%u: FIFO %u/0x%03x: %2u*%u bytes = %4u bytes\n", rx_ring->nr, rx_ring->fifo_nr, mcp251xfd_get_rx_obj_addr(rx_ring, 0), rx_ring->obj_num, rx_ring->obj_size, rx_ring->obj_num * rx_ring->obj_size); } } netdev_dbg(priv->ndev, "FIFO setup: TX: FIFO %u/0x%03x: %2u*%u bytes = %4u bytes\n", priv->tx->fifo_nr, mcp251xfd_get_tx_obj_addr(priv->tx, 0), priv->tx->obj_num, priv->tx->obj_size, priv->tx->obj_num * priv->tx->obj_size); netdev_dbg(priv->ndev, "FIFO setup: free: %4d bytes\n", MCP251XFD_RAM_SIZE - (base - MCP251XFD_RAM_START)); ram_used = base - MCP251XFD_RAM_START; if (ram_used > MCP251XFD_RAM_SIZE) { netdev_err(priv->ndev, "Error during ring configuration, using more RAM (%u bytes) than available (%u bytes).\n", ram_used, MCP251XFD_RAM_SIZE); return -ENOMEM; } return 0; } void mcp251xfd_ring_free(struct mcp251xfd_priv *priv) { int i; for (i = ARRAY_SIZE(priv->rx) - 1; i >= 0; i--) { kfree(priv->rx[i]); priv->rx[i] = NULL; } } static enum hrtimer_restart mcp251xfd_rx_irq_timer(struct hrtimer *t) { struct mcp251xfd_priv *priv = container_of(t, struct mcp251xfd_priv, rx_irq_timer); struct mcp251xfd_rx_ring *ring = priv->rx[0]; if (test_bit(MCP251XFD_FLAGS_DOWN, priv->flags)) return HRTIMER_NORESTART; spi_async(priv->spi, &ring->irq_enable_msg); return HRTIMER_NORESTART; } static enum hrtimer_restart mcp251xfd_tx_irq_timer(struct hrtimer *t) { struct mcp251xfd_priv *priv = container_of(t, struct mcp251xfd_priv, tx_irq_timer); struct mcp251xfd_tef_ring *ring = priv->tef; if (test_bit(MCP251XFD_FLAGS_DOWN, priv->flags)) return HRTIMER_NORESTART; spi_async(priv->spi, &ring->irq_enable_msg); return HRTIMER_NORESTART; } const struct can_ram_config mcp251xfd_ram_config = { .rx = { .size[CAN_RAM_MODE_CAN] = sizeof(struct mcp251xfd_hw_rx_obj_can), .size[CAN_RAM_MODE_CANFD] = sizeof(struct mcp251xfd_hw_rx_obj_canfd), .min = MCP251XFD_RX_OBJ_NUM_MIN, .max = MCP251XFD_RX_OBJ_NUM_MAX, .def[CAN_RAM_MODE_CAN] = CAN_RAM_NUM_MAX, .def[CAN_RAM_MODE_CANFD] = CAN_RAM_NUM_MAX, .fifo_num = MCP251XFD_FIFO_RX_NUM, .fifo_depth_min = MCP251XFD_RX_FIFO_DEPTH_MIN, .fifo_depth_coalesce_min = MCP251XFD_RX_FIFO_DEPTH_COALESCE_MIN, }, .tx = { .size[CAN_RAM_MODE_CAN] = sizeof(struct mcp251xfd_hw_tef_obj) + sizeof(struct mcp251xfd_hw_tx_obj_can), .size[CAN_RAM_MODE_CANFD] = sizeof(struct mcp251xfd_hw_tef_obj) + sizeof(struct mcp251xfd_hw_tx_obj_canfd), .min = MCP251XFD_TX_OBJ_NUM_MIN, .max = MCP251XFD_TX_OBJ_NUM_MAX, .def[CAN_RAM_MODE_CAN] = MCP251XFD_TX_OBJ_NUM_CAN_DEFAULT, .def[CAN_RAM_MODE_CANFD] = MCP251XFD_TX_OBJ_NUM_CANFD_DEFAULT, .fifo_num = MCP251XFD_FIFO_TX_NUM, .fifo_depth_min = MCP251XFD_TX_FIFO_DEPTH_MIN, .fifo_depth_coalesce_min = MCP251XFD_TX_FIFO_DEPTH_COALESCE_MIN, }, .size = MCP251XFD_RAM_SIZE, .fifo_depth = MCP251XFD_FIFO_DEPTH, }; int mcp251xfd_ring_alloc(struct mcp251xfd_priv *priv) { const bool fd_mode = mcp251xfd_is_fd_mode(priv); struct mcp251xfd_tx_ring *tx_ring = priv->tx; struct mcp251xfd_rx_ring *rx_ring; u8 tx_obj_size, rx_obj_size; u8 rem, i; /* switching from CAN-2.0 to CAN-FD mode or vice versa */ if (fd_mode != test_bit(MCP251XFD_FLAGS_FD_MODE, priv->flags)) { struct can_ram_layout layout; can_ram_get_layout(&layout, &mcp251xfd_ram_config, NULL, NULL, fd_mode); priv->rx_obj_num = layout.default_rx; tx_ring->obj_num = layout.default_tx; } if (fd_mode) { tx_obj_size = sizeof(struct mcp251xfd_hw_tx_obj_canfd); rx_obj_size = sizeof(struct mcp251xfd_hw_rx_obj_canfd); set_bit(MCP251XFD_FLAGS_FD_MODE, priv->flags); } else { tx_obj_size = sizeof(struct mcp251xfd_hw_tx_obj_can); rx_obj_size = sizeof(struct mcp251xfd_hw_rx_obj_can); clear_bit(MCP251XFD_FLAGS_FD_MODE, priv->flags); } tx_ring->obj_size = tx_obj_size; rem = priv->rx_obj_num; for (i = 0; i < ARRAY_SIZE(priv->rx) && rem; i++) { u8 rx_obj_num; if (i == 0 && priv->rx_obj_num_coalesce_irq) rx_obj_num = min_t(u8, priv->rx_obj_num_coalesce_irq * 2, MCP251XFD_FIFO_DEPTH); else rx_obj_num = min_t(u8, rounddown_pow_of_two(rem), MCP251XFD_FIFO_DEPTH); rem -= rx_obj_num; rx_ring = kzalloc(sizeof(*rx_ring) + rx_obj_size * rx_obj_num, GFP_KERNEL); if (!rx_ring) { mcp251xfd_ring_free(priv); return -ENOMEM; } rx_ring->obj_num = rx_obj_num; rx_ring->obj_size = rx_obj_size; priv->rx[i] = rx_ring; } priv->rx_ring_num = i; hrtimer_init(&priv->rx_irq_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); priv->rx_irq_timer.function = mcp251xfd_rx_irq_timer; hrtimer_init(&priv->tx_irq_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); priv->tx_irq_timer.function = mcp251xfd_tx_irq_timer; return 0; }
linux-master
drivers/net/can/spi/mcp251xfd/mcp251xfd-ring.c
// SPDX-License-Identifier: GPL-2.0 // // mcp251xfd - Microchip MCP251xFD Family CAN controller driver // // Copyright (c) 2019, 2020, 2021 Pengutronix, // Marc Kleine-Budde <[email protected]> // // Based on: // // CAN bus driver for Microchip 25XXFD CAN Controller with SPI Interface // // Copyright (c) 2019 Martin Sperl <[email protected]> // #include <linux/bitfield.h> #include "mcp251xfd.h" static int mcp251xfd_chip_rx_fifo_init_one(const struct mcp251xfd_priv *priv, const struct mcp251xfd_rx_ring *ring) { u32 fifo_con; /* Enable RXOVIE on _all_ RX FIFOs, not just the last one. * * FIFOs hit by a RX MAB overflow and RXOVIE enabled will * generate a RXOVIF, use this to properly detect RX MAB * overflows. */ fifo_con = FIELD_PREP(MCP251XFD_REG_FIFOCON_FSIZE_MASK, ring->obj_num - 1) | MCP251XFD_REG_FIFOCON_RXTSEN | MCP251XFD_REG_FIFOCON_RXOVIE | MCP251XFD_REG_FIFOCON_TFNRFNIE; if (mcp251xfd_is_fd_mode(priv)) fifo_con |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK, MCP251XFD_REG_FIFOCON_PLSIZE_64); else fifo_con |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK, MCP251XFD_REG_FIFOCON_PLSIZE_8); return regmap_write(priv->map_reg, MCP251XFD_REG_FIFOCON(ring->fifo_nr), fifo_con); } static int mcp251xfd_chip_rx_filter_init_one(const struct mcp251xfd_priv *priv, const struct mcp251xfd_rx_ring *ring) { u32 fltcon; fltcon = MCP251XFD_REG_FLTCON_FLTEN(ring->nr) | MCP251XFD_REG_FLTCON_FBP(ring->nr, ring->fifo_nr); return regmap_update_bits(priv->map_reg, MCP251XFD_REG_FLTCON(ring->nr >> 2), MCP251XFD_REG_FLTCON_FLT_MASK(ring->nr), fltcon); } int mcp251xfd_chip_fifo_init(const struct mcp251xfd_priv *priv) { const struct mcp251xfd_tx_ring *tx_ring = priv->tx; const struct mcp251xfd_rx_ring *rx_ring; u32 val; int err, n; /* TEF */ val = FIELD_PREP(MCP251XFD_REG_TEFCON_FSIZE_MASK, tx_ring->obj_num - 1) | MCP251XFD_REG_TEFCON_TEFTSEN | MCP251XFD_REG_TEFCON_TEFOVIE | MCP251XFD_REG_TEFCON_TEFNEIE; err = regmap_write(priv->map_reg, MCP251XFD_REG_TEFCON, val); if (err) return err; /* TX FIFO */ val = FIELD_PREP(MCP251XFD_REG_FIFOCON_FSIZE_MASK, tx_ring->obj_num - 1) | MCP251XFD_REG_FIFOCON_TXEN | MCP251XFD_REG_FIFOCON_TXATIE; if (mcp251xfd_is_fd_mode(priv)) val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK, MCP251XFD_REG_FIFOCON_PLSIZE_64); else val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_PLSIZE_MASK, MCP251XFD_REG_FIFOCON_PLSIZE_8); if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT) val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_TXAT_MASK, MCP251XFD_REG_FIFOCON_TXAT_ONE_SHOT); else val |= FIELD_PREP(MCP251XFD_REG_FIFOCON_TXAT_MASK, MCP251XFD_REG_FIFOCON_TXAT_UNLIMITED); err = regmap_write(priv->map_reg, MCP251XFD_REG_FIFOCON(priv->tx->fifo_nr), val); if (err) return err; /* RX FIFOs */ mcp251xfd_for_each_rx_ring(priv, rx_ring, n) { err = mcp251xfd_chip_rx_fifo_init_one(priv, rx_ring); if (err) return err; err = mcp251xfd_chip_rx_filter_init_one(priv, rx_ring); if (err) return err; } return 0; }
linux-master
drivers/net/can/spi/mcp251xfd/mcp251xfd-chip-fifo.c
// SPDX-License-Identifier: GPL-2.0 // // mcp251xfd - Microchip MCP251xFD Family CAN controller driver // // Copyright (c) 2021, 2022 Pengutronix, // Marc Kleine-Budde <[email protected]> // #include <linux/ethtool.h> #include "mcp251xfd.h" #include "mcp251xfd-ram.h" static void mcp251xfd_ring_get_ringparam(struct net_device *ndev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kernel_ring, struct netlink_ext_ack *extack) { const struct mcp251xfd_priv *priv = netdev_priv(ndev); const bool fd_mode = mcp251xfd_is_fd_mode(priv); struct can_ram_layout layout; can_ram_get_layout(&layout, &mcp251xfd_ram_config, NULL, NULL, fd_mode); ring->rx_max_pending = layout.max_rx; ring->tx_max_pending = layout.max_tx; ring->rx_pending = priv->rx_obj_num; ring->tx_pending = priv->tx->obj_num; } static int mcp251xfd_ring_set_ringparam(struct net_device *ndev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kernel_ring, struct netlink_ext_ack *extack) { struct mcp251xfd_priv *priv = netdev_priv(ndev); const bool fd_mode = mcp251xfd_is_fd_mode(priv); struct can_ram_layout layout; can_ram_get_layout(&layout, &mcp251xfd_ram_config, ring, NULL, fd_mode); if ((layout.cur_rx != priv->rx_obj_num || layout.cur_tx != priv->tx->obj_num) && netif_running(ndev)) return -EBUSY; priv->rx_obj_num = layout.cur_rx; priv->rx_obj_num_coalesce_irq = layout.rx_coalesce; priv->tx->obj_num = layout.cur_tx; priv->tx_obj_num_coalesce_irq = layout.tx_coalesce; return 0; } static int mcp251xfd_ring_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec, struct kernel_ethtool_coalesce *kec, struct netlink_ext_ack *ext_ack) { struct mcp251xfd_priv *priv = netdev_priv(ndev); u32 rx_max_frames, tx_max_frames; /* The ethtool doc says: * To disable coalescing, set usecs = 0 and max_frames = 1. */ if (priv->rx_obj_num_coalesce_irq == 0) rx_max_frames = 1; else rx_max_frames = priv->rx_obj_num_coalesce_irq; ec->rx_max_coalesced_frames_irq = rx_max_frames; ec->rx_coalesce_usecs_irq = priv->rx_coalesce_usecs_irq; if (priv->tx_obj_num_coalesce_irq == 0) tx_max_frames = 1; else tx_max_frames = priv->tx_obj_num_coalesce_irq; ec->tx_max_coalesced_frames_irq = tx_max_frames; ec->tx_coalesce_usecs_irq = priv->tx_coalesce_usecs_irq; return 0; } static int mcp251xfd_ring_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec, struct kernel_ethtool_coalesce *kec, struct netlink_ext_ack *ext_ack) { struct mcp251xfd_priv *priv = netdev_priv(ndev); const bool fd_mode = mcp251xfd_is_fd_mode(priv); const struct ethtool_ringparam ring = { .rx_pending = priv->rx_obj_num, .tx_pending = priv->tx->obj_num, }; struct can_ram_layout layout; can_ram_get_layout(&layout, &mcp251xfd_ram_config, &ring, ec, fd_mode); if ((layout.rx_coalesce != priv->rx_obj_num_coalesce_irq || ec->rx_coalesce_usecs_irq != priv->rx_coalesce_usecs_irq || layout.tx_coalesce != priv->tx_obj_num_coalesce_irq || ec->tx_coalesce_usecs_irq != priv->tx_coalesce_usecs_irq) && netif_running(ndev)) return -EBUSY; priv->rx_obj_num = layout.cur_rx; priv->rx_obj_num_coalesce_irq = layout.rx_coalesce; priv->rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq; priv->tx->obj_num = layout.cur_tx; priv->tx_obj_num_coalesce_irq = layout.tx_coalesce; priv->tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq; return 0; } static const struct ethtool_ops mcp251xfd_ethtool_ops = { .supported_coalesce_params = ETHTOOL_COALESCE_RX_USECS_IRQ | ETHTOOL_COALESCE_RX_MAX_FRAMES_IRQ | ETHTOOL_COALESCE_TX_USECS_IRQ | ETHTOOL_COALESCE_TX_MAX_FRAMES_IRQ, .get_ringparam = mcp251xfd_ring_get_ringparam, .set_ringparam = mcp251xfd_ring_set_ringparam, .get_coalesce = mcp251xfd_ring_get_coalesce, .set_coalesce = mcp251xfd_ring_set_coalesce, .get_ts_info = can_ethtool_op_get_ts_info_hwts, }; void mcp251xfd_ethtool_init(struct mcp251xfd_priv *priv) { struct can_ram_layout layout; priv->ndev->ethtool_ops = &mcp251xfd_ethtool_ops; can_ram_get_layout(&layout, &mcp251xfd_ram_config, NULL, NULL, false); priv->rx_obj_num = layout.default_rx; priv->tx->obj_num = layout.default_tx; priv->rx_obj_num_coalesce_irq = 0; priv->tx_obj_num_coalesce_irq = 0; priv->rx_coalesce_usecs_irq = 0; priv->tx_coalesce_usecs_irq = 0; }
linux-master
drivers/net/can/spi/mcp251xfd/mcp251xfd-ethtool.c
// SPDX-License-Identifier: GPL-2.0 // // mcp251xfd - Microchip MCP251xFD Family CAN controller driver // // Copyright (c) 2019, 2020, 2021 Pengutronix, // Marc Kleine-Budde <[email protected]> // // Based on: // // CAN bus driver for Microchip 25XXFD CAN Controller with SPI Interface // // Copyright (c) 2019 Martin Sperl <[email protected]> // #include <linux/bitfield.h> #include "mcp251xfd.h" static inline int mcp251xfd_tef_tail_get_from_chip(const struct mcp251xfd_priv *priv, u8 *tef_tail) { u32 tef_ua; int err; err = regmap_read(priv->map_reg, MCP251XFD_REG_TEFUA, &tef_ua); if (err) return err; *tef_tail = tef_ua / sizeof(struct mcp251xfd_hw_tef_obj); return 0; } static int mcp251xfd_check_tef_tail(const struct mcp251xfd_priv *priv) { u8 tef_tail_chip, tef_tail; int err; if (!IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY)) return 0; err = mcp251xfd_tef_tail_get_from_chip(priv, &tef_tail_chip); if (err) return err; tef_tail = mcp251xfd_get_tef_tail(priv); if (tef_tail_chip != tef_tail) { netdev_err(priv->ndev, "TEF tail of chip (0x%02x) and ours (0x%08x) inconsistent.\n", tef_tail_chip, tef_tail); return -EILSEQ; } return 0; } static int mcp251xfd_handle_tefif_recover(const struct mcp251xfd_priv *priv, const u32 seq) { const struct mcp251xfd_tx_ring *tx_ring = priv->tx; u32 tef_sta; int err; err = regmap_read(priv->map_reg, MCP251XFD_REG_TEFSTA, &tef_sta); if (err) return err; if (tef_sta & MCP251XFD_REG_TEFSTA_TEFOVIF) { netdev_err(priv->ndev, "Transmit Event FIFO buffer overflow.\n"); return -ENOBUFS; } netdev_info(priv->ndev, "Transmit Event FIFO buffer %s. (seq=0x%08x, tef_tail=0x%08x, tef_head=0x%08x, tx_head=0x%08x).\n", tef_sta & MCP251XFD_REG_TEFSTA_TEFFIF ? "full" : tef_sta & MCP251XFD_REG_TEFSTA_TEFNEIF ? "not empty" : "empty", seq, priv->tef->tail, priv->tef->head, tx_ring->head); /* The Sequence Number in the TEF doesn't match our tef_tail. */ return -EAGAIN; } static int mcp251xfd_handle_tefif_one(struct mcp251xfd_priv *priv, const struct mcp251xfd_hw_tef_obj *hw_tef_obj, unsigned int *frame_len_ptr) { struct net_device_stats *stats = &priv->ndev->stats; struct sk_buff *skb; u32 seq, seq_masked, tef_tail_masked, tef_tail; seq = FIELD_GET(MCP251XFD_OBJ_FLAGS_SEQ_MCP2518FD_MASK, hw_tef_obj->flags); /* Use the MCP2517FD mask on the MCP2518FD, too. We only * compare 7 bits, this should be enough to detect * net-yet-completed, i.e. old TEF objects. */ seq_masked = seq & field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK); tef_tail_masked = priv->tef->tail & field_mask(MCP251XFD_OBJ_FLAGS_SEQ_MCP2517FD_MASK); if (seq_masked != tef_tail_masked) return mcp251xfd_handle_tefif_recover(priv, seq); tef_tail = mcp251xfd_get_tef_tail(priv); skb = priv->can.echo_skb[tef_tail]; if (skb) mcp251xfd_skb_set_timestamp(priv, skb, hw_tef_obj->ts); stats->tx_bytes += can_rx_offload_get_echo_skb_queue_timestamp(&priv->offload, tef_tail, hw_tef_obj->ts, frame_len_ptr); stats->tx_packets++; priv->tef->tail++; return 0; } static int mcp251xfd_tef_ring_update(struct mcp251xfd_priv *priv) { const struct mcp251xfd_tx_ring *tx_ring = priv->tx; unsigned int new_head; u8 chip_tx_tail; int err; err = mcp251xfd_tx_tail_get_from_chip(priv, &chip_tx_tail); if (err) return err; /* chip_tx_tail, is the next TX-Object send by the HW. * The new TEF head must be >= the old head, ... */ new_head = round_down(priv->tef->head, tx_ring->obj_num) + chip_tx_tail; if (new_head <= priv->tef->head) new_head += tx_ring->obj_num; /* ... but it cannot exceed the TX head. */ priv->tef->head = min(new_head, tx_ring->head); return mcp251xfd_check_tef_tail(priv); } static inline int mcp251xfd_tef_obj_read(const struct mcp251xfd_priv *priv, struct mcp251xfd_hw_tef_obj *hw_tef_obj, const u8 offset, const u8 len) { const struct mcp251xfd_tx_ring *tx_ring = priv->tx; const int val_bytes = regmap_get_val_bytes(priv->map_rx); if (IS_ENABLED(CONFIG_CAN_MCP251XFD_SANITY) && (offset > tx_ring->obj_num || len > tx_ring->obj_num || offset + len > tx_ring->obj_num)) { netdev_err(priv->ndev, "Trying to read too many TEF objects (max=%d, offset=%d, len=%d).\n", tx_ring->obj_num, offset, len); return -ERANGE; } return regmap_bulk_read(priv->map_rx, mcp251xfd_get_tef_obj_addr(offset), hw_tef_obj, sizeof(*hw_tef_obj) / val_bytes * len); } static inline void mcp251xfd_ecc_tefif_successful(struct mcp251xfd_priv *priv) { struct mcp251xfd_ecc *ecc = &priv->ecc; ecc->ecc_stat = 0; } int mcp251xfd_handle_tefif(struct mcp251xfd_priv *priv) { struct mcp251xfd_hw_tef_obj hw_tef_obj[MCP251XFD_TX_OBJ_NUM_MAX]; unsigned int total_frame_len = 0; u8 tef_tail, len, l; int err, i; err = mcp251xfd_tef_ring_update(priv); if (err) return err; tef_tail = mcp251xfd_get_tef_tail(priv); len = mcp251xfd_get_tef_len(priv); l = mcp251xfd_get_tef_linear_len(priv); err = mcp251xfd_tef_obj_read(priv, hw_tef_obj, tef_tail, l); if (err) return err; if (l < len) { err = mcp251xfd_tef_obj_read(priv, &hw_tef_obj[l], 0, len - l); if (err) return err; } for (i = 0; i < len; i++) { unsigned int frame_len = 0; err = mcp251xfd_handle_tefif_one(priv, &hw_tef_obj[i], &frame_len); /* -EAGAIN means the Sequence Number in the TEF * doesn't match our tef_tail. This can happen if we * read the TEF objects too early. Leave loop let the * interrupt handler call us again. */ if (err == -EAGAIN) goto out_netif_wake_queue; if (err) return err; total_frame_len += frame_len; } out_netif_wake_queue: len = i; /* number of handled goods TEFs */ if (len) { struct mcp251xfd_tef_ring *ring = priv->tef; struct mcp251xfd_tx_ring *tx_ring = priv->tx; int offset; /* Increment the TEF FIFO tail pointer 'len' times in * a single SPI message. * * Note: * Calculate offset, so that the SPI transfer ends on * the last message of the uinc_xfer array, which has * "cs_change == 0", to properly deactivate the chip * select. */ offset = ARRAY_SIZE(ring->uinc_xfer) - len; err = spi_sync_transfer(priv->spi, ring->uinc_xfer + offset, len); if (err) return err; tx_ring->tail += len; netdev_completed_queue(priv->ndev, len, total_frame_len); err = mcp251xfd_check_tef_tail(priv); if (err) return err; } mcp251xfd_ecc_tefif_successful(priv); if (mcp251xfd_get_tx_free(priv->tx)) { /* Make sure that anybody stopping the queue after * this sees the new tx_ring->tail. */ smp_mb(); netif_wake_queue(priv->ndev); } if (priv->tx_coalesce_usecs_irq) hrtimer_start(&priv->tx_irq_timer, ns_to_ktime(priv->tx_coalesce_usecs_irq * NSEC_PER_USEC), HRTIMER_MODE_REL); return 0; }
linux-master
drivers/net/can/spi/mcp251xfd/mcp251xfd-tef.c
// SPDX-License-Identifier: GPL-2.0 // // mcp251xfd - Microchip MCP251xFD Family CAN controller driver // // Copyright (c) 2021 Pengutronix, // Marc Kleine-Budde <[email protected]> // #include <linux/clocksource.h> #include <linux/workqueue.h> #include "mcp251xfd.h" static u64 mcp251xfd_timestamp_read(const struct cyclecounter *cc) { const struct mcp251xfd_priv *priv; u32 timestamp = 0; int err; priv = container_of(cc, struct mcp251xfd_priv, cc); err = mcp251xfd_get_timestamp(priv, &timestamp); if (err) netdev_err(priv->ndev, "Error %d while reading timestamp. HW timestamps may be inaccurate.", err); return timestamp; } static void mcp251xfd_timestamp_work(struct work_struct *work) { struct delayed_work *delayed_work = to_delayed_work(work); struct mcp251xfd_priv *priv; priv = container_of(delayed_work, struct mcp251xfd_priv, timestamp); timecounter_read(&priv->tc); schedule_delayed_work(&priv->timestamp, MCP251XFD_TIMESTAMP_WORK_DELAY_SEC * HZ); } void mcp251xfd_skb_set_timestamp(const struct mcp251xfd_priv *priv, struct sk_buff *skb, u32 timestamp) { struct skb_shared_hwtstamps *hwtstamps = skb_hwtstamps(skb); u64 ns; ns = timecounter_cyc2time(&priv->tc, timestamp); hwtstamps->hwtstamp = ns_to_ktime(ns); } void mcp251xfd_timestamp_init(struct mcp251xfd_priv *priv) { struct cyclecounter *cc = &priv->cc; cc->read = mcp251xfd_timestamp_read; cc->mask = CYCLECOUNTER_MASK(32); cc->shift = 1; cc->mult = clocksource_hz2mult(priv->can.clock.freq, cc->shift); timecounter_init(&priv->tc, &priv->cc, ktime_get_real_ns()); INIT_DELAYED_WORK(&priv->timestamp, mcp251xfd_timestamp_work); schedule_delayed_work(&priv->timestamp, MCP251XFD_TIMESTAMP_WORK_DELAY_SEC * HZ); } void mcp251xfd_timestamp_stop(struct mcp251xfd_priv *priv) { cancel_delayed_work_sync(&priv->timestamp); }
linux-master
drivers/net/can/spi/mcp251xfd/mcp251xfd-timestamp.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2008-2010 * * - Kurt Van Dijck, EIA Electronics */ #include <linux/ethtool.h> #include <linux/module.h> #include <linux/interrupt.h> #include <asm/io.h> #include "softing.h" #define TX_ECHO_SKB_MAX (((TXMAX+1)/2)-1) /* * test is a specific CAN netdev * is online (ie. up 'n running, not sleeping, not busoff */ static inline int canif_is_active(struct net_device *netdev) { struct can_priv *can = netdev_priv(netdev); if (!netif_running(netdev)) return 0; return (can->state <= CAN_STATE_ERROR_PASSIVE); } /* reset DPRAM */ static inline void softing_set_reset_dpram(struct softing *card) { if (card->pdat->generation >= 2) { spin_lock_bh(&card->spin); iowrite8(ioread8(&card->dpram[DPRAM_V2_RESET]) & ~1, &card->dpram[DPRAM_V2_RESET]); spin_unlock_bh(&card->spin); } } static inline void softing_clr_reset_dpram(struct softing *card) { if (card->pdat->generation >= 2) { spin_lock_bh(&card->spin); iowrite8(ioread8(&card->dpram[DPRAM_V2_RESET]) | 1, &card->dpram[DPRAM_V2_RESET]); spin_unlock_bh(&card->spin); } } /* trigger the tx queue-ing */ static netdev_tx_t softing_netdev_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct softing_priv *priv = netdev_priv(dev); struct softing *card = priv->card; int ret; uint8_t *ptr; uint8_t fifo_wr, fifo_rd; struct can_frame *cf = (struct can_frame *)skb->data; uint8_t buf[DPRAM_TX_SIZE]; if (can_dev_dropped_skb(dev, skb)) return NETDEV_TX_OK; spin_lock(&card->spin); ret = NETDEV_TX_BUSY; if (!card->fw.up || (card->tx.pending >= TXMAX) || (priv->tx.pending >= TX_ECHO_SKB_MAX)) goto xmit_done; fifo_wr = ioread8(&card->dpram[DPRAM_TX_WR]); fifo_rd = ioread8(&card->dpram[DPRAM_TX_RD]); if (fifo_wr == fifo_rd) /* fifo full */ goto xmit_done; memset(buf, 0, sizeof(buf)); ptr = buf; *ptr = CMD_TX; if (cf->can_id & CAN_RTR_FLAG) *ptr |= CMD_RTR; if (cf->can_id & CAN_EFF_FLAG) *ptr |= CMD_XTD; if (priv->index) *ptr |= CMD_BUS2; ++ptr; *ptr++ = cf->len; *ptr++ = (cf->can_id >> 0); *ptr++ = (cf->can_id >> 8); if (cf->can_id & CAN_EFF_FLAG) { *ptr++ = (cf->can_id >> 16); *ptr++ = (cf->can_id >> 24); } else { /* increment 1, not 2 as you might think */ ptr += 1; } if (!(cf->can_id & CAN_RTR_FLAG)) memcpy(ptr, &cf->data[0], cf->len); memcpy_toio(&card->dpram[DPRAM_TX + DPRAM_TX_SIZE * fifo_wr], buf, DPRAM_TX_SIZE); if (++fifo_wr >= DPRAM_TX_CNT) fifo_wr = 0; iowrite8(fifo_wr, &card->dpram[DPRAM_TX_WR]); card->tx.last_bus = priv->index; ++card->tx.pending; ++priv->tx.pending; can_put_echo_skb(skb, dev, priv->tx.echo_put, 0); ++priv->tx.echo_put; if (priv->tx.echo_put >= TX_ECHO_SKB_MAX) priv->tx.echo_put = 0; /* can_put_echo_skb() saves the skb, safe to return TX_OK */ ret = NETDEV_TX_OK; xmit_done: spin_unlock(&card->spin); if (card->tx.pending >= TXMAX) { int j; for (j = 0; j < ARRAY_SIZE(card->net); ++j) { if (card->net[j]) netif_stop_queue(card->net[j]); } } if (ret != NETDEV_TX_OK) netif_stop_queue(dev); return ret; } /* * shortcut for skb delivery */ int softing_netdev_rx(struct net_device *netdev, const struct can_frame *msg, ktime_t ktime) { struct sk_buff *skb; struct can_frame *cf; skb = alloc_can_skb(netdev, &cf); if (!skb) return -ENOMEM; memcpy(cf, msg, sizeof(*msg)); skb->tstamp = ktime; return netif_rx(skb); } /* * softing_handle_1 * pop 1 entry from the DPRAM queue, and process */ static int softing_handle_1(struct softing *card) { struct net_device *netdev; struct softing_priv *priv; ktime_t ktime; struct can_frame msg; int cnt = 0, lost_msg; uint8_t fifo_rd, fifo_wr, cmd; uint8_t *ptr; uint32_t tmp_u32; uint8_t buf[DPRAM_RX_SIZE]; memset(&msg, 0, sizeof(msg)); /* test for lost msgs */ lost_msg = ioread8(&card->dpram[DPRAM_RX_LOST]); if (lost_msg) { int j; /* reset condition */ iowrite8(0, &card->dpram[DPRAM_RX_LOST]); /* prepare msg */ msg.can_id = CAN_ERR_FLAG | CAN_ERR_CRTL; msg.len = CAN_ERR_DLC; msg.data[1] = CAN_ERR_CRTL_RX_OVERFLOW; /* * service to all buses, we don't know which it was applicable * but only service buses that are online */ for (j = 0; j < ARRAY_SIZE(card->net); ++j) { netdev = card->net[j]; if (!netdev) continue; if (!canif_is_active(netdev)) /* a dead bus has no overflows */ continue; ++netdev->stats.rx_over_errors; softing_netdev_rx(netdev, &msg, 0); } /* prepare for other use */ memset(&msg, 0, sizeof(msg)); ++cnt; } fifo_rd = ioread8(&card->dpram[DPRAM_RX_RD]); fifo_wr = ioread8(&card->dpram[DPRAM_RX_WR]); if (++fifo_rd >= DPRAM_RX_CNT) fifo_rd = 0; if (fifo_wr == fifo_rd) return cnt; memcpy_fromio(buf, &card->dpram[DPRAM_RX + DPRAM_RX_SIZE*fifo_rd], DPRAM_RX_SIZE); mb(); /* trigger dual port RAM */ iowrite8(fifo_rd, &card->dpram[DPRAM_RX_RD]); ptr = buf; cmd = *ptr++; if (cmd == 0xff) /* not quite useful, probably the card has got out */ return 0; netdev = card->net[0]; if (cmd & CMD_BUS2) netdev = card->net[1]; priv = netdev_priv(netdev); if (cmd & CMD_ERR) { uint8_t can_state, state; state = *ptr++; msg.can_id = CAN_ERR_FLAG; msg.len = CAN_ERR_DLC; if (state & SF_MASK_BUSOFF) { can_state = CAN_STATE_BUS_OFF; msg.can_id |= CAN_ERR_BUSOFF; state = STATE_BUSOFF; } else if (state & SF_MASK_EPASSIVE) { can_state = CAN_STATE_ERROR_PASSIVE; msg.can_id |= CAN_ERR_CRTL; msg.data[1] = CAN_ERR_CRTL_TX_PASSIVE; state = STATE_EPASSIVE; } else { can_state = CAN_STATE_ERROR_ACTIVE; msg.can_id |= CAN_ERR_CRTL; state = STATE_EACTIVE; } /* update DPRAM */ iowrite8(state, &card->dpram[priv->index ? DPRAM_INFO_BUSSTATE2 : DPRAM_INFO_BUSSTATE]); /* timestamp */ tmp_u32 = le32_to_cpup((void *)ptr); ktime = softing_raw2ktime(card, tmp_u32); ++netdev->stats.rx_errors; /* update internal status */ if (can_state != priv->can.state) { priv->can.state = can_state; if (can_state == CAN_STATE_ERROR_PASSIVE) ++priv->can.can_stats.error_passive; else if (can_state == CAN_STATE_BUS_OFF) { /* this calls can_close_cleanup() */ ++priv->can.can_stats.bus_off; can_bus_off(netdev); netif_stop_queue(netdev); } /* trigger socketcan */ softing_netdev_rx(netdev, &msg, ktime); } } else { if (cmd & CMD_RTR) msg.can_id |= CAN_RTR_FLAG; msg.len = can_cc_dlc2len(*ptr++); if (cmd & CMD_XTD) { msg.can_id |= CAN_EFF_FLAG; msg.can_id |= le32_to_cpup((void *)ptr); ptr += 4; } else { msg.can_id |= le16_to_cpup((void *)ptr); ptr += 2; } /* timestamp */ tmp_u32 = le32_to_cpup((void *)ptr); ptr += 4; ktime = softing_raw2ktime(card, tmp_u32); if (!(msg.can_id & CAN_RTR_FLAG)) memcpy(&msg.data[0], ptr, 8); /* update socket */ if (cmd & CMD_ACK) { /* acknowledge, was tx msg */ struct sk_buff *skb; skb = priv->can.echo_skb[priv->tx.echo_get]; if (skb) skb->tstamp = ktime; ++netdev->stats.tx_packets; netdev->stats.tx_bytes += can_get_echo_skb(netdev, priv->tx.echo_get, NULL); ++priv->tx.echo_get; if (priv->tx.echo_get >= TX_ECHO_SKB_MAX) priv->tx.echo_get = 0; if (priv->tx.pending) --priv->tx.pending; if (card->tx.pending) --card->tx.pending; } else { int ret; ret = softing_netdev_rx(netdev, &msg, ktime); if (ret == NET_RX_SUCCESS) { ++netdev->stats.rx_packets; if (!(msg.can_id & CAN_RTR_FLAG)) netdev->stats.rx_bytes += msg.len; } else { ++netdev->stats.rx_dropped; } } } ++cnt; return cnt; } /* * real interrupt handler */ static irqreturn_t softing_irq_thread(int irq, void *dev_id) { struct softing *card = (struct softing *)dev_id; struct net_device *netdev; struct softing_priv *priv; int j, offset, work_done; work_done = 0; spin_lock_bh(&card->spin); while (softing_handle_1(card) > 0) { ++card->irq.svc_count; ++work_done; } spin_unlock_bh(&card->spin); /* resume tx queue's */ offset = card->tx.last_bus; for (j = 0; j < ARRAY_SIZE(card->net); ++j) { if (card->tx.pending >= TXMAX) break; netdev = card->net[(j + offset + 1) % card->pdat->nbus]; if (!netdev) continue; priv = netdev_priv(netdev); if (!canif_is_active(netdev)) /* it makes no sense to wake dead buses */ continue; if (priv->tx.pending >= TX_ECHO_SKB_MAX) continue; ++work_done; netif_wake_queue(netdev); } return work_done ? IRQ_HANDLED : IRQ_NONE; } /* * interrupt routines: * schedule the 'real interrupt handler' */ static irqreturn_t softing_irq_v2(int irq, void *dev_id) { struct softing *card = (struct softing *)dev_id; uint8_t ir; ir = ioread8(&card->dpram[DPRAM_V2_IRQ_TOHOST]); iowrite8(0, &card->dpram[DPRAM_V2_IRQ_TOHOST]); return (1 == ir) ? IRQ_WAKE_THREAD : IRQ_NONE; } static irqreturn_t softing_irq_v1(int irq, void *dev_id) { struct softing *card = (struct softing *)dev_id; uint8_t ir; ir = ioread8(&card->dpram[DPRAM_IRQ_TOHOST]); iowrite8(0, &card->dpram[DPRAM_IRQ_TOHOST]); return ir ? IRQ_WAKE_THREAD : IRQ_NONE; } /* * netdev/candev interoperability */ static int softing_netdev_open(struct net_device *ndev) { int ret; /* check or determine and set bittime */ ret = open_candev(ndev); if (ret) return ret; ret = softing_startstop(ndev, 1); if (ret < 0) close_candev(ndev); return ret; } static int softing_netdev_stop(struct net_device *ndev) { netif_stop_queue(ndev); /* softing cycle does close_candev() */ return softing_startstop(ndev, 0); } static int softing_candev_set_mode(struct net_device *ndev, enum can_mode mode) { int ret; switch (mode) { case CAN_MODE_START: /* softing_startstop does close_candev() */ ret = softing_startstop(ndev, 1); return ret; case CAN_MODE_STOP: case CAN_MODE_SLEEP: return -EOPNOTSUPP; } return 0; } /* * Softing device management helpers */ int softing_enable_irq(struct softing *card, int enable) { int ret; if (!card->irq.nr) { return 0; } else if (card->irq.requested && !enable) { free_irq(card->irq.nr, card); card->irq.requested = 0; } else if (!card->irq.requested && enable) { ret = request_threaded_irq(card->irq.nr, (card->pdat->generation >= 2) ? softing_irq_v2 : softing_irq_v1, softing_irq_thread, IRQF_SHARED, dev_name(&card->pdev->dev), card); if (ret) { dev_alert(&card->pdev->dev, "request_threaded_irq(%u) failed\n", card->irq.nr); return ret; } card->irq.requested = 1; } return 0; } static void softing_card_shutdown(struct softing *card) { int fw_up = 0; if (mutex_lock_interruptible(&card->fw.lock)) { /* return -ERESTARTSYS */; } fw_up = card->fw.up; card->fw.up = 0; if (card->irq.requested && card->irq.nr) { free_irq(card->irq.nr, card); card->irq.requested = 0; } if (fw_up) { if (card->pdat->enable_irq) card->pdat->enable_irq(card->pdev, 0); softing_set_reset_dpram(card); if (card->pdat->reset) card->pdat->reset(card->pdev, 1); } mutex_unlock(&card->fw.lock); } static int softing_card_boot(struct softing *card) { int ret, j; static const uint8_t stream[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, }; unsigned char back[sizeof(stream)]; if (mutex_lock_interruptible(&card->fw.lock)) return -ERESTARTSYS; if (card->fw.up) { mutex_unlock(&card->fw.lock); return 0; } /* reset board */ if (card->pdat->enable_irq) card->pdat->enable_irq(card->pdev, 1); /* boot card */ softing_set_reset_dpram(card); if (card->pdat->reset) card->pdat->reset(card->pdev, 1); for (j = 0; (j + sizeof(stream)) < card->dpram_size; j += sizeof(stream)) { memcpy_toio(&card->dpram[j], stream, sizeof(stream)); /* flush IO cache */ mb(); memcpy_fromio(back, &card->dpram[j], sizeof(stream)); if (!memcmp(back, stream, sizeof(stream))) continue; /* memory is not equal */ dev_alert(&card->pdev->dev, "dpram failed at 0x%04x\n", j); ret = -EIO; goto failed; } wmb(); /* load boot firmware */ ret = softing_load_fw(card->pdat->boot.fw, card, card->dpram, card->dpram_size, card->pdat->boot.offs - card->pdat->boot.addr); if (ret < 0) goto failed; /* load loader firmware */ ret = softing_load_fw(card->pdat->load.fw, card, card->dpram, card->dpram_size, card->pdat->load.offs - card->pdat->load.addr); if (ret < 0) goto failed; if (card->pdat->reset) card->pdat->reset(card->pdev, 0); softing_clr_reset_dpram(card); ret = softing_bootloader_command(card, 0, "card boot"); if (ret < 0) goto failed; ret = softing_load_app_fw(card->pdat->app.fw, card); if (ret < 0) goto failed; ret = softing_chip_poweron(card); if (ret < 0) goto failed; card->fw.up = 1; mutex_unlock(&card->fw.lock); return 0; failed: card->fw.up = 0; if (card->pdat->enable_irq) card->pdat->enable_irq(card->pdev, 0); softing_set_reset_dpram(card); if (card->pdat->reset) card->pdat->reset(card->pdev, 1); mutex_unlock(&card->fw.lock); return ret; } /* * netdev sysfs */ static ssize_t show_chip(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *ndev = to_net_dev(dev); struct softing_priv *priv = netdev2softing(ndev); return sprintf(buf, "%i\n", priv->chip); } static ssize_t show_output(struct device *dev, struct device_attribute *attr, char *buf) { struct net_device *ndev = to_net_dev(dev); struct softing_priv *priv = netdev2softing(ndev); return sprintf(buf, "0x%02x\n", priv->output); } static ssize_t store_output(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct net_device *ndev = to_net_dev(dev); struct softing_priv *priv = netdev2softing(ndev); struct softing *card = priv->card; unsigned long val; int ret; ret = kstrtoul(buf, 0, &val); if (ret < 0) return ret; val &= 0xFF; ret = mutex_lock_interruptible(&card->fw.lock); if (ret) return -ERESTARTSYS; if (netif_running(ndev)) { mutex_unlock(&card->fw.lock); return -EBUSY; } priv->output = val; mutex_unlock(&card->fw.lock); return count; } static const DEVICE_ATTR(chip, 0444, show_chip, NULL); static const DEVICE_ATTR(output, 0644, show_output, store_output); static const struct attribute *const netdev_sysfs_attrs[] = { &dev_attr_chip.attr, &dev_attr_output.attr, NULL, }; static const struct attribute_group netdev_sysfs_group = { .name = NULL, .attrs = (struct attribute **)netdev_sysfs_attrs, }; static const struct net_device_ops softing_netdev_ops = { .ndo_open = softing_netdev_open, .ndo_stop = softing_netdev_stop, .ndo_start_xmit = softing_netdev_start_xmit, .ndo_change_mtu = can_change_mtu, }; static const struct ethtool_ops softing_ethtool_ops = { .get_ts_info = ethtool_op_get_ts_info, }; static const struct can_bittiming_const softing_btr_const = { .name = KBUILD_MODNAME, .tseg1_min = 1, .tseg1_max = 16, .tseg2_min = 1, .tseg2_max = 8, .sjw_max = 4, /* overruled */ .brp_min = 1, .brp_max = 32, /* overruled */ .brp_inc = 1, }; static struct net_device *softing_netdev_create(struct softing *card, uint16_t chip_id) { struct net_device *netdev; struct softing_priv *priv; netdev = alloc_candev(sizeof(*priv), TX_ECHO_SKB_MAX); if (!netdev) { dev_alert(&card->pdev->dev, "alloc_candev failed\n"); return NULL; } priv = netdev_priv(netdev); priv->netdev = netdev; priv->card = card; memcpy(&priv->btr_const, &softing_btr_const, sizeof(priv->btr_const)); priv->btr_const.brp_max = card->pdat->max_brp; priv->btr_const.sjw_max = card->pdat->max_sjw; priv->can.bittiming_const = &priv->btr_const; priv->can.clock.freq = 8000000; priv->chip = chip_id; priv->output = softing_default_output(netdev); SET_NETDEV_DEV(netdev, &card->pdev->dev); netdev->flags |= IFF_ECHO; netdev->netdev_ops = &softing_netdev_ops; netdev->ethtool_ops = &softing_ethtool_ops; priv->can.do_set_mode = softing_candev_set_mode; priv->can.ctrlmode_supported = CAN_CTRLMODE_3_SAMPLES; return netdev; } static int softing_netdev_register(struct net_device *netdev) { int ret; ret = register_candev(netdev); if (ret) { dev_alert(&netdev->dev, "register failed\n"); return ret; } if (sysfs_create_group(&netdev->dev.kobj, &netdev_sysfs_group) < 0) netdev_alert(netdev, "sysfs group failed\n"); return 0; } static void softing_netdev_cleanup(struct net_device *netdev) { sysfs_remove_group(&netdev->dev.kobj, &netdev_sysfs_group); unregister_candev(netdev); free_candev(netdev); } /* * sysfs for Platform device */ #define DEV_ATTR_RO(name, member) \ static ssize_t show_##name(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct softing *card = dev_get_drvdata(dev); \ return sprintf(buf, "%u\n", card->member); \ } \ static DEVICE_ATTR(name, 0444, show_##name, NULL) #define DEV_ATTR_RO_STR(name, member) \ static ssize_t show_##name(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct softing *card = dev_get_drvdata(dev); \ return sprintf(buf, "%s\n", card->member); \ } \ static DEVICE_ATTR(name, 0444, show_##name, NULL) DEV_ATTR_RO(serial, id.serial); DEV_ATTR_RO_STR(firmware, pdat->app.fw); DEV_ATTR_RO(firmware_version, id.fw_version); DEV_ATTR_RO_STR(hardware, pdat->name); DEV_ATTR_RO(hardware_version, id.hw_version); DEV_ATTR_RO(license, id.license); static struct attribute *softing_pdev_attrs[] = { &dev_attr_serial.attr, &dev_attr_firmware.attr, &dev_attr_firmware_version.attr, &dev_attr_hardware.attr, &dev_attr_hardware_version.attr, &dev_attr_license.attr, NULL, }; static const struct attribute_group softing_pdev_group = { .name = NULL, .attrs = softing_pdev_attrs, }; /* * platform driver */ static void softing_pdev_remove(struct platform_device *pdev) { struct softing *card = platform_get_drvdata(pdev); int j; /* first, disable card*/ softing_card_shutdown(card); for (j = 0; j < ARRAY_SIZE(card->net); ++j) { if (!card->net[j]) continue; softing_netdev_cleanup(card->net[j]); card->net[j] = NULL; } sysfs_remove_group(&pdev->dev.kobj, &softing_pdev_group); iounmap(card->dpram); kfree(card); } static int softing_pdev_probe(struct platform_device *pdev) { const struct softing_platform_data *pdat = dev_get_platdata(&pdev->dev); struct softing *card; struct net_device *netdev; struct softing_priv *priv; struct resource *pres; int ret; int j; if (!pdat) { dev_warn(&pdev->dev, "no platform data\n"); return -EINVAL; } if (pdat->nbus > ARRAY_SIZE(card->net)) { dev_warn(&pdev->dev, "%u nets??\n", pdat->nbus); return -EINVAL; } card = kzalloc(sizeof(*card), GFP_KERNEL); if (!card) return -ENOMEM; card->pdat = pdat; card->pdev = pdev; platform_set_drvdata(pdev, card); mutex_init(&card->fw.lock); spin_lock_init(&card->spin); ret = -EINVAL; pres = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!pres) goto platform_resource_failed; card->dpram_phys = pres->start; card->dpram_size = resource_size(pres); card->dpram = ioremap(card->dpram_phys, card->dpram_size); if (!card->dpram) { dev_alert(&card->pdev->dev, "dpram ioremap failed\n"); goto ioremap_failed; } pres = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (pres) card->irq.nr = pres->start; /* reset card */ ret = softing_card_boot(card); if (ret < 0) { dev_alert(&pdev->dev, "failed to boot\n"); goto boot_failed; } /* only now, the chip's are known */ card->id.freq = card->pdat->freq; ret = sysfs_create_group(&pdev->dev.kobj, &softing_pdev_group); if (ret < 0) { dev_alert(&card->pdev->dev, "sysfs failed\n"); goto sysfs_failed; } for (j = 0; j < ARRAY_SIZE(card->net); ++j) { card->net[j] = netdev = softing_netdev_create(card, card->id.chip[j]); if (!netdev) { dev_alert(&pdev->dev, "failed to make can[%i]", j); ret = -ENOMEM; goto netdev_failed; } netdev->dev_id = j; priv = netdev_priv(card->net[j]); priv->index = j; ret = softing_netdev_register(netdev); if (ret) { free_candev(netdev); card->net[j] = NULL; dev_alert(&card->pdev->dev, "failed to register can[%i]\n", j); goto netdev_failed; } } dev_info(&card->pdev->dev, "%s ready.\n", card->pdat->name); return 0; netdev_failed: for (j = 0; j < ARRAY_SIZE(card->net); ++j) { if (!card->net[j]) continue; softing_netdev_cleanup(card->net[j]); } sysfs_remove_group(&pdev->dev.kobj, &softing_pdev_group); sysfs_failed: softing_card_shutdown(card); boot_failed: iounmap(card->dpram); ioremap_failed: platform_resource_failed: kfree(card); return ret; } static struct platform_driver softing_driver = { .driver = { .name = KBUILD_MODNAME, }, .probe = softing_pdev_probe, .remove_new = softing_pdev_remove, }; module_platform_driver(softing_driver); MODULE_ALIAS("platform:softing"); MODULE_DESCRIPTION("Softing DPRAM CAN driver"); MODULE_AUTHOR("Kurt Van Dijck <[email protected]>"); MODULE_LICENSE("GPL v2");
linux-master
drivers/net/can/softing/softing_main.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2008-2010 * * - Kurt Van Dijck, EIA Electronics */ #include <linux/firmware.h> #include <linux/sched/signal.h> #include <asm/div64.h> #include <asm/io.h> #include "softing.h" /* * low level DPRAM command. * Make sure that card->dpram[DPRAM_FCT_HOST] is preset */ static int _softing_fct_cmd(struct softing *card, int16_t cmd, uint16_t vector, const char *msg) { int ret; unsigned long stamp; iowrite16(cmd, &card->dpram[DPRAM_FCT_PARAM]); iowrite8(vector >> 8, &card->dpram[DPRAM_FCT_HOST + 1]); iowrite8(vector, &card->dpram[DPRAM_FCT_HOST]); /* be sure to flush this to the card */ wmb(); stamp = jiffies + 1 * HZ; /* wait for card */ do { /* DPRAM_FCT_HOST is _not_ aligned */ ret = ioread8(&card->dpram[DPRAM_FCT_HOST]) + (ioread8(&card->dpram[DPRAM_FCT_HOST + 1]) << 8); /* don't have any cached variables */ rmb(); if (ret == RES_OK) /* read return-value now */ return ioread16(&card->dpram[DPRAM_FCT_RESULT]); if ((ret != vector) || time_after(jiffies, stamp)) break; /* process context => relax */ usleep_range(500, 10000); } while (1); ret = (ret == RES_NONE) ? -ETIMEDOUT : -ECANCELED; dev_alert(&card->pdev->dev, "firmware %s failed (%i)\n", msg, ret); return ret; } static int softing_fct_cmd(struct softing *card, int16_t cmd, const char *msg) { int ret; ret = _softing_fct_cmd(card, cmd, 0, msg); if (ret > 0) { dev_alert(&card->pdev->dev, "%s returned %u\n", msg, ret); ret = -EIO; } return ret; } int softing_bootloader_command(struct softing *card, int16_t cmd, const char *msg) { int ret; unsigned long stamp; iowrite16(RES_NONE, &card->dpram[DPRAM_RECEIPT]); iowrite16(cmd, &card->dpram[DPRAM_COMMAND]); /* be sure to flush this to the card */ wmb(); stamp = jiffies + 3 * HZ; /* wait for card */ do { ret = ioread16(&card->dpram[DPRAM_RECEIPT]); /* don't have any cached variables */ rmb(); if (ret == RES_OK) return 0; if (time_after(jiffies, stamp)) break; /* process context => relax */ usleep_range(500, 10000); } while (!signal_pending(current)); ret = (ret == RES_NONE) ? -ETIMEDOUT : -ECANCELED; dev_alert(&card->pdev->dev, "bootloader %s failed (%i)\n", msg, ret); return ret; } static int fw_parse(const uint8_t **pmem, uint16_t *ptype, uint32_t *paddr, uint16_t *plen, const uint8_t **pdat) { uint16_t checksum[2]; const uint8_t *mem; const uint8_t *end; /* * firmware records are a binary, unaligned stream composed of: * uint16_t type; * uint32_t addr; * uint16_t len; * uint8_t dat[len]; * uint16_t checksum; * all values in little endian. * We could define a struct for this, with __attribute__((packed)), * but would that solve the alignment in _all_ cases (cfr. the * struct itself may be an odd address)? * * I chose to use leXX_to_cpup() since this solves both * endianness & alignment. */ mem = *pmem; *ptype = le16_to_cpup((void *)&mem[0]); *paddr = le32_to_cpup((void *)&mem[2]); *plen = le16_to_cpup((void *)&mem[6]); *pdat = &mem[8]; /* verify checksum */ end = &mem[8 + *plen]; checksum[0] = le16_to_cpup((void *)end); for (checksum[1] = 0; mem < end; ++mem) checksum[1] += *mem; if (checksum[0] != checksum[1]) return -EINVAL; /* increment */ *pmem += 10 + *plen; return 0; } int softing_load_fw(const char *file, struct softing *card, __iomem uint8_t *dpram, unsigned int size, int offset) { const struct firmware *fw; int ret; const uint8_t *mem, *end, *dat; uint16_t type, len; uint32_t addr; uint8_t *buf = NULL, *new_buf; int buflen = 0; int8_t type_end = 0; ret = request_firmware(&fw, file, &card->pdev->dev); if (ret < 0) return ret; dev_dbg(&card->pdev->dev, "%s, firmware(%s) got %u bytes" ", offset %c0x%04x\n", card->pdat->name, file, (unsigned int)fw->size, (offset >= 0) ? '+' : '-', (unsigned int)abs(offset)); /* parse the firmware */ mem = fw->data; end = &mem[fw->size]; /* look for header record */ ret = fw_parse(&mem, &type, &addr, &len, &dat); if (ret < 0) goto failed; if (type != 0xffff) goto failed; if (strncmp("Structured Binary Format, Softing GmbH" , dat, len)) { ret = -EINVAL; goto failed; } /* ok, we had a header */ while (mem < end) { ret = fw_parse(&mem, &type, &addr, &len, &dat); if (ret < 0) goto failed; if (type == 3) { /* start address, not used here */ continue; } else if (type == 1) { /* eof */ type_end = 1; break; } else if (type != 0) { ret = -EINVAL; goto failed; } if ((addr + len + offset) > size) goto failed; memcpy_toio(&dpram[addr + offset], dat, len); /* be sure to flush caches from IO space */ mb(); if (len > buflen) { /* align buflen */ buflen = (len + (1024-1)) & ~(1024-1); new_buf = krealloc(buf, buflen, GFP_KERNEL); if (!new_buf) { ret = -ENOMEM; goto failed; } buf = new_buf; } /* verify record data */ memcpy_fromio(buf, &dpram[addr + offset], len); if (memcmp(buf, dat, len)) { /* is not ok */ dev_alert(&card->pdev->dev, "DPRAM readback failed\n"); ret = -EIO; goto failed; } } if (!type_end) /* no end record seen */ goto failed; ret = 0; failed: kfree(buf); release_firmware(fw); if (ret < 0) dev_info(&card->pdev->dev, "firmware %s failed\n", file); return ret; } int softing_load_app_fw(const char *file, struct softing *card) { const struct firmware *fw; const uint8_t *mem, *end, *dat; int ret, j; uint16_t type, len; uint32_t addr, start_addr = 0; unsigned int sum, rx_sum; int8_t type_end = 0, type_entrypoint = 0; ret = request_firmware(&fw, file, &card->pdev->dev); if (ret) { dev_alert(&card->pdev->dev, "request_firmware(%s) got %i\n", file, ret); return ret; } dev_dbg(&card->pdev->dev, "firmware(%s) got %lu bytes\n", file, (unsigned long)fw->size); /* parse the firmware */ mem = fw->data; end = &mem[fw->size]; /* look for header record */ ret = fw_parse(&mem, &type, &addr, &len, &dat); if (ret) goto failed; ret = -EINVAL; if (type != 0xffff) { dev_alert(&card->pdev->dev, "firmware starts with type 0x%x\n", type); goto failed; } if (strncmp("Structured Binary Format, Softing GmbH", dat, len)) { dev_alert(&card->pdev->dev, "firmware string '%.*s' fault\n", len, dat); goto failed; } /* ok, we had a header */ while (mem < end) { ret = fw_parse(&mem, &type, &addr, &len, &dat); if (ret) goto failed; if (type == 3) { /* start address */ start_addr = addr; type_entrypoint = 1; continue; } else if (type == 1) { /* eof */ type_end = 1; break; } else if (type != 0) { dev_alert(&card->pdev->dev, "unknown record type 0x%04x\n", type); ret = -EINVAL; goto failed; } /* regular data */ for (sum = 0, j = 0; j < len; ++j) sum += dat[j]; /* work in 16bit (target) */ sum &= 0xffff; memcpy_toio(&card->dpram[card->pdat->app.offs], dat, len); iowrite32(card->pdat->app.offs + card->pdat->app.addr, &card->dpram[DPRAM_COMMAND + 2]); iowrite32(addr, &card->dpram[DPRAM_COMMAND + 6]); iowrite16(len, &card->dpram[DPRAM_COMMAND + 10]); iowrite8(1, &card->dpram[DPRAM_COMMAND + 12]); ret = softing_bootloader_command(card, 1, "loading app."); if (ret < 0) goto failed; /* verify checksum */ rx_sum = ioread16(&card->dpram[DPRAM_RECEIPT + 2]); if (rx_sum != sum) { dev_alert(&card->pdev->dev, "SRAM seems to be damaged" ", wanted 0x%04x, got 0x%04x\n", sum, rx_sum); ret = -EIO; goto failed; } } if (!type_end || !type_entrypoint) goto failed; /* start application in card */ iowrite32(start_addr, &card->dpram[DPRAM_COMMAND + 2]); iowrite8(1, &card->dpram[DPRAM_COMMAND + 6]); ret = softing_bootloader_command(card, 3, "start app."); if (ret < 0) goto failed; ret = 0; failed: release_firmware(fw); if (ret < 0) dev_info(&card->pdev->dev, "firmware %s failed\n", file); return ret; } static int softing_reset_chip(struct softing *card) { int ret; do { /* reset chip */ iowrite8(0, &card->dpram[DPRAM_RESET_RX_FIFO]); iowrite8(0, &card->dpram[DPRAM_RESET_RX_FIFO+1]); iowrite8(1, &card->dpram[DPRAM_RESET]); iowrite8(0, &card->dpram[DPRAM_RESET+1]); ret = softing_fct_cmd(card, 0, "reset_can"); if (!ret) break; if (signal_pending(current)) /* don't wait any longer */ break; } while (1); card->tx.pending = 0; return ret; } int softing_chip_poweron(struct softing *card) { int ret; /* sync */ ret = _softing_fct_cmd(card, 99, 0x55, "sync-a"); if (ret < 0) goto failed; ret = _softing_fct_cmd(card, 99, 0xaa, "sync-b"); if (ret < 0) goto failed; ret = softing_reset_chip(card); if (ret < 0) goto failed; /* get_serial */ ret = softing_fct_cmd(card, 43, "get_serial_number"); if (ret < 0) goto failed; card->id.serial = ioread32(&card->dpram[DPRAM_FCT_PARAM]); /* get_version */ ret = softing_fct_cmd(card, 12, "get_version"); if (ret < 0) goto failed; card->id.fw_version = ioread16(&card->dpram[DPRAM_FCT_PARAM + 2]); card->id.hw_version = ioread16(&card->dpram[DPRAM_FCT_PARAM + 4]); card->id.license = ioread16(&card->dpram[DPRAM_FCT_PARAM + 6]); card->id.chip[0] = ioread16(&card->dpram[DPRAM_FCT_PARAM + 8]); card->id.chip[1] = ioread16(&card->dpram[DPRAM_FCT_PARAM + 10]); return 0; failed: return ret; } static void softing_initialize_timestamp(struct softing *card) { uint64_t ovf; card->ts_ref = ktime_get(); /* 16MHz is the reference */ ovf = 0x100000000ULL * 16; do_div(ovf, card->pdat->freq ?: 16); card->ts_overflow = ktime_add_us(0, ovf); } ktime_t softing_raw2ktime(struct softing *card, u32 raw) { uint64_t rawl; ktime_t now, real_offset; ktime_t target; ktime_t tmp; now = ktime_get(); real_offset = ktime_sub(ktime_get_real(), now); /* find nsec from card */ rawl = raw * 16; do_div(rawl, card->pdat->freq ?: 16); target = ktime_add_us(card->ts_ref, rawl); /* test for overflows */ tmp = ktime_add(target, card->ts_overflow); while (unlikely(ktime_to_ns(tmp) > ktime_to_ns(now))) { card->ts_ref = ktime_add(card->ts_ref, card->ts_overflow); target = tmp; tmp = ktime_add(target, card->ts_overflow); } return ktime_add(target, real_offset); } static inline int softing_error_reporting(struct net_device *netdev) { struct softing_priv *priv = netdev_priv(netdev); return (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) ? 1 : 0; } int softing_startstop(struct net_device *dev, int up) { int ret; struct softing *card; struct softing_priv *priv; struct net_device *netdev; int bus_bitmask_start; int j, error_reporting; struct can_frame msg; const struct can_bittiming *bt; priv = netdev_priv(dev); card = priv->card; if (!card->fw.up) return -EIO; ret = mutex_lock_interruptible(&card->fw.lock); if (ret) return ret; bus_bitmask_start = 0; if (dev && up) /* prepare to start this bus as well */ bus_bitmask_start |= (1 << priv->index); /* bring netdevs down */ for (j = 0; j < ARRAY_SIZE(card->net); ++j) { netdev = card->net[j]; if (!netdev) continue; priv = netdev_priv(netdev); if (dev != netdev) netif_stop_queue(netdev); if (netif_running(netdev)) { if (dev != netdev) bus_bitmask_start |= (1 << j); priv->tx.pending = 0; priv->tx.echo_put = 0; priv->tx.echo_get = 0; /* * this bus' may just have called open_candev() * which is rather stupid to call close_candev() * already * but we may come here from busoff recovery too * in which case the echo_skb _needs_ flushing too. * just be sure to call open_candev() again */ close_candev(netdev); } priv->can.state = CAN_STATE_STOPPED; } card->tx.pending = 0; softing_enable_irq(card, 0); ret = softing_reset_chip(card); if (ret) goto failed; if (!bus_bitmask_start) /* no buses to be brought up */ goto card_done; if ((bus_bitmask_start & 1) && (bus_bitmask_start & 2) && (softing_error_reporting(card->net[0]) != softing_error_reporting(card->net[1]))) { dev_alert(&card->pdev->dev, "err_reporting flag differs for buses\n"); goto invalid; } error_reporting = 0; if (bus_bitmask_start & 1) { netdev = card->net[0]; priv = netdev_priv(netdev); error_reporting += softing_error_reporting(netdev); /* init chip 1 */ bt = &priv->can.bittiming; iowrite16(bt->brp, &card->dpram[DPRAM_FCT_PARAM + 2]); iowrite16(bt->sjw, &card->dpram[DPRAM_FCT_PARAM + 4]); iowrite16(bt->phase_seg1 + bt->prop_seg, &card->dpram[DPRAM_FCT_PARAM + 6]); iowrite16(bt->phase_seg2, &card->dpram[DPRAM_FCT_PARAM + 8]); iowrite16((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) ? 1 : 0, &card->dpram[DPRAM_FCT_PARAM + 10]); ret = softing_fct_cmd(card, 1, "initialize_chip[0]"); if (ret < 0) goto failed; /* set mode */ iowrite16(0, &card->dpram[DPRAM_FCT_PARAM + 2]); iowrite16(0, &card->dpram[DPRAM_FCT_PARAM + 4]); ret = softing_fct_cmd(card, 3, "set_mode[0]"); if (ret < 0) goto failed; /* set filter */ /* 11bit id & mask */ iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 2]); iowrite16(0x07ff, &card->dpram[DPRAM_FCT_PARAM + 4]); /* 29bit id.lo & mask.lo & id.hi & mask.hi */ iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 6]); iowrite16(0xffff, &card->dpram[DPRAM_FCT_PARAM + 8]); iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 10]); iowrite16(0x1fff, &card->dpram[DPRAM_FCT_PARAM + 12]); ret = softing_fct_cmd(card, 7, "set_filter[0]"); if (ret < 0) goto failed; /* set output control */ iowrite16(priv->output, &card->dpram[DPRAM_FCT_PARAM + 2]); ret = softing_fct_cmd(card, 5, "set_output[0]"); if (ret < 0) goto failed; } if (bus_bitmask_start & 2) { netdev = card->net[1]; priv = netdev_priv(netdev); error_reporting += softing_error_reporting(netdev); /* init chip2 */ bt = &priv->can.bittiming; iowrite16(bt->brp, &card->dpram[DPRAM_FCT_PARAM + 2]); iowrite16(bt->sjw, &card->dpram[DPRAM_FCT_PARAM + 4]); iowrite16(bt->phase_seg1 + bt->prop_seg, &card->dpram[DPRAM_FCT_PARAM + 6]); iowrite16(bt->phase_seg2, &card->dpram[DPRAM_FCT_PARAM + 8]); iowrite16((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) ? 1 : 0, &card->dpram[DPRAM_FCT_PARAM + 10]); ret = softing_fct_cmd(card, 2, "initialize_chip[1]"); if (ret < 0) goto failed; /* set mode2 */ iowrite16(0, &card->dpram[DPRAM_FCT_PARAM + 2]); iowrite16(0, &card->dpram[DPRAM_FCT_PARAM + 4]); ret = softing_fct_cmd(card, 4, "set_mode[1]"); if (ret < 0) goto failed; /* set filter2 */ /* 11bit id & mask */ iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 2]); iowrite16(0x07ff, &card->dpram[DPRAM_FCT_PARAM + 4]); /* 29bit id.lo & mask.lo & id.hi & mask.hi */ iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 6]); iowrite16(0xffff, &card->dpram[DPRAM_FCT_PARAM + 8]); iowrite16(0x0000, &card->dpram[DPRAM_FCT_PARAM + 10]); iowrite16(0x1fff, &card->dpram[DPRAM_FCT_PARAM + 12]); ret = softing_fct_cmd(card, 8, "set_filter[1]"); if (ret < 0) goto failed; /* set output control2 */ iowrite16(priv->output, &card->dpram[DPRAM_FCT_PARAM + 2]); ret = softing_fct_cmd(card, 6, "set_output[1]"); if (ret < 0) goto failed; } /* enable_error_frame * * Error reporting is switched off at the moment since * the receiving of them is not yet 100% verified * This should be enabled sooner or later */ if (0 && error_reporting) { ret = softing_fct_cmd(card, 51, "enable_error_frame"); if (ret < 0) goto failed; } /* initialize interface */ iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 2]); iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 4]); iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 6]); iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 8]); iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 10]); iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 12]); iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 14]); iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 16]); iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 18]); iowrite16(1, &card->dpram[DPRAM_FCT_PARAM + 20]); ret = softing_fct_cmd(card, 17, "initialize_interface"); if (ret < 0) goto failed; /* enable_fifo */ ret = softing_fct_cmd(card, 36, "enable_fifo"); if (ret < 0) goto failed; /* enable fifo tx ack */ ret = softing_fct_cmd(card, 13, "fifo_tx_ack[0]"); if (ret < 0) goto failed; /* enable fifo tx ack2 */ ret = softing_fct_cmd(card, 14, "fifo_tx_ack[1]"); if (ret < 0) goto failed; /* start_chip */ ret = softing_fct_cmd(card, 11, "start_chip"); if (ret < 0) goto failed; iowrite8(0, &card->dpram[DPRAM_INFO_BUSSTATE]); iowrite8(0, &card->dpram[DPRAM_INFO_BUSSTATE2]); if (card->pdat->generation < 2) { iowrite8(0, &card->dpram[DPRAM_V2_IRQ_TOHOST]); /* flush the DPRAM caches */ wmb(); } softing_initialize_timestamp(card); /* * do socketcan notifications/status changes * from here, no errors should occur, or the failed: part * must be reviewed */ memset(&msg, 0, sizeof(msg)); msg.can_id = CAN_ERR_FLAG | CAN_ERR_RESTARTED; msg.len = CAN_ERR_DLC; for (j = 0; j < ARRAY_SIZE(card->net); ++j) { if (!(bus_bitmask_start & (1 << j))) continue; netdev = card->net[j]; if (!netdev) continue; priv = netdev_priv(netdev); priv->can.state = CAN_STATE_ERROR_ACTIVE; open_candev(netdev); if (dev != netdev) { /* notify other buses on the restart */ softing_netdev_rx(netdev, &msg, 0); ++priv->can.can_stats.restarts; } netif_wake_queue(netdev); } /* enable interrupts */ ret = softing_enable_irq(card, 1); if (ret) goto failed; card_done: mutex_unlock(&card->fw.lock); return 0; invalid: ret = -EINVAL; failed: softing_enable_irq(card, 0); softing_reset_chip(card); mutex_unlock(&card->fw.lock); /* bring all other interfaces down */ for (j = 0; j < ARRAY_SIZE(card->net); ++j) { netdev = card->net[j]; if (!netdev) continue; dev_close(netdev); } return ret; } int softing_default_output(struct net_device *netdev) { struct softing_priv *priv = netdev_priv(netdev); struct softing *card = priv->card; switch (priv->chip) { case 1000: return (card->pdat->generation < 2) ? 0xfb : 0xfa; case 5: return 0x60; default: return 0x40; } }
linux-master
drivers/net/can/softing/softing_fw.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2008-2010 * * - Kurt Van Dijck, EIA Electronics */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/slab.h> #include <pcmcia/cistpl.h> #include <pcmcia/ds.h> #include "softing_platform.h" static int softingcs_index; static DEFINE_SPINLOCK(softingcs_index_lock); static int softingcs_reset(struct platform_device *pdev, int v); static int softingcs_enable_irq(struct platform_device *pdev, int v); /* * platform_data descriptions */ #define MHZ (1000*1000) static const struct softing_platform_data softingcs_platform_data[] = { { .name = "CANcard", .manf = 0x0168, .prod = 0x001, .generation = 1, .nbus = 2, .freq = 16 * MHZ, .max_brp = 32, .max_sjw = 4, .dpram_size = 0x0800, .boot = {0x0000, 0x000000, fw_dir "bcard.bin",}, .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",}, .app = {0x0010, 0x0d0000, fw_dir "cancard.bin",}, .reset = softingcs_reset, .enable_irq = softingcs_enable_irq, }, { .name = "CANcard-NEC", .manf = 0x0168, .prod = 0x002, .generation = 1, .nbus = 2, .freq = 16 * MHZ, .max_brp = 32, .max_sjw = 4, .dpram_size = 0x0800, .boot = {0x0000, 0x000000, fw_dir "bcard.bin",}, .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",}, .app = {0x0010, 0x0d0000, fw_dir "cancard.bin",}, .reset = softingcs_reset, .enable_irq = softingcs_enable_irq, }, { .name = "CANcard-SJA", .manf = 0x0168, .prod = 0x004, .generation = 1, .nbus = 2, .freq = 20 * MHZ, .max_brp = 32, .max_sjw = 4, .dpram_size = 0x0800, .boot = {0x0000, 0x000000, fw_dir "bcard.bin",}, .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",}, .app = {0x0010, 0x0d0000, fw_dir "cansja.bin",}, .reset = softingcs_reset, .enable_irq = softingcs_enable_irq, }, { .name = "CANcard-2", .manf = 0x0168, .prod = 0x005, .generation = 2, .nbus = 2, .freq = 24 * MHZ, .max_brp = 64, .max_sjw = 4, .dpram_size = 0x1000, .boot = {0x0000, 0x000000, fw_dir "bcard2.bin",}, .load = {0x0120, 0x00f600, fw_dir "ldcard2.bin",}, .app = {0x0010, 0x0d0000, fw_dir "cancrd2.bin",}, .reset = softingcs_reset, .enable_irq = NULL, }, { .name = "Vector-CANcard", .manf = 0x0168, .prod = 0x081, .generation = 1, .nbus = 2, .freq = 16 * MHZ, .max_brp = 64, .max_sjw = 4, .dpram_size = 0x0800, .boot = {0x0000, 0x000000, fw_dir "bcard.bin",}, .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",}, .app = {0x0010, 0x0d0000, fw_dir "cancard.bin",}, .reset = softingcs_reset, .enable_irq = softingcs_enable_irq, }, { .name = "Vector-CANcard-SJA", .manf = 0x0168, .prod = 0x084, .generation = 1, .nbus = 2, .freq = 20 * MHZ, .max_brp = 32, .max_sjw = 4, .dpram_size = 0x0800, .boot = {0x0000, 0x000000, fw_dir "bcard.bin",}, .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",}, .app = {0x0010, 0x0d0000, fw_dir "cansja.bin",}, .reset = softingcs_reset, .enable_irq = softingcs_enable_irq, }, { .name = "Vector-CANcard-2", .manf = 0x0168, .prod = 0x085, .generation = 2, .nbus = 2, .freq = 24 * MHZ, .max_brp = 64, .max_sjw = 4, .dpram_size = 0x1000, .boot = {0x0000, 0x000000, fw_dir "bcard2.bin",}, .load = {0x0120, 0x00f600, fw_dir "ldcard2.bin",}, .app = {0x0010, 0x0d0000, fw_dir "cancrd2.bin",}, .reset = softingcs_reset, .enable_irq = NULL, }, { .name = "EDICcard-NEC", .manf = 0x0168, .prod = 0x102, .generation = 1, .nbus = 2, .freq = 16 * MHZ, .max_brp = 64, .max_sjw = 4, .dpram_size = 0x0800, .boot = {0x0000, 0x000000, fw_dir "bcard.bin",}, .load = {0x0120, 0x00f600, fw_dir "ldcard.bin",}, .app = {0x0010, 0x0d0000, fw_dir "cancard.bin",}, .reset = softingcs_reset, .enable_irq = softingcs_enable_irq, }, { .name = "EDICcard-2", .manf = 0x0168, .prod = 0x105, .generation = 2, .nbus = 2, .freq = 24 * MHZ, .max_brp = 64, .max_sjw = 4, .dpram_size = 0x1000, .boot = {0x0000, 0x000000, fw_dir "bcard2.bin",}, .load = {0x0120, 0x00f600, fw_dir "ldcard2.bin",}, .app = {0x0010, 0x0d0000, fw_dir "cancrd2.bin",}, .reset = softingcs_reset, .enable_irq = NULL, }, { 0, 0, }, }; MODULE_FIRMWARE(fw_dir "bcard.bin"); MODULE_FIRMWARE(fw_dir "ldcard.bin"); MODULE_FIRMWARE(fw_dir "cancard.bin"); MODULE_FIRMWARE(fw_dir "cansja.bin"); MODULE_FIRMWARE(fw_dir "bcard2.bin"); MODULE_FIRMWARE(fw_dir "ldcard2.bin"); MODULE_FIRMWARE(fw_dir "cancrd2.bin"); static const struct softing_platform_data *softingcs_find_platform_data(unsigned int manf, unsigned int prod) { const struct softing_platform_data *lp; for (lp = softingcs_platform_data; lp->manf; ++lp) { if ((lp->manf == manf) && (lp->prod == prod)) return lp; } return NULL; } /* * platformdata callbacks */ static int softingcs_reset(struct platform_device *pdev, int v) { struct pcmcia_device *pcmcia = to_pcmcia_dev(pdev->dev.parent); dev_dbg(&pdev->dev, "pcmcia config [2] %02x\n", v ? 0 : 0x20); return pcmcia_write_config_byte(pcmcia, 2, v ? 0 : 0x20); } static int softingcs_enable_irq(struct platform_device *pdev, int v) { struct pcmcia_device *pcmcia = to_pcmcia_dev(pdev->dev.parent); dev_dbg(&pdev->dev, "pcmcia config [0] %02x\n", v ? 0x60 : 0); return pcmcia_write_config_byte(pcmcia, 0, v ? 0x60 : 0); } /* * pcmcia check */ static int softingcs_probe_config(struct pcmcia_device *pcmcia, void *priv_data) { struct softing_platform_data *pdat = priv_data; struct resource *pres; int memspeed = 0; WARN_ON(!pdat); pres = pcmcia->resource[PCMCIA_IOMEM_0]; if (resource_size(pres) < 0x1000) return -ERANGE; pres->flags |= WIN_MEMORY_TYPE_CM | WIN_ENABLE; if (pdat->generation < 2) { pres->flags |= WIN_USE_WAIT | WIN_DATA_WIDTH_8; memspeed = 3; } else { pres->flags |= WIN_DATA_WIDTH_16; } return pcmcia_request_window(pcmcia, pres, memspeed); } static void softingcs_remove(struct pcmcia_device *pcmcia) { struct platform_device *pdev = pcmcia->priv; /* free bits */ platform_device_unregister(pdev); /* release pcmcia stuff */ pcmcia_disable_device(pcmcia); } /* * platform_device wrapper * pdev->resource has 2 entries: io & irq */ static void softingcs_pdev_release(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); kfree(pdev); } static int softingcs_probe(struct pcmcia_device *pcmcia) { int ret; struct platform_device *pdev; const struct softing_platform_data *pdat; struct resource *pres; struct dev { struct platform_device pdev; struct resource res[2]; } *dev; /* find matching platform_data */ pdat = softingcs_find_platform_data(pcmcia->manf_id, pcmcia->card_id); if (!pdat) return -ENOTTY; /* setup pcmcia device */ pcmcia->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IOMEM | CONF_AUTO_SET_VPP | CONF_AUTO_CHECK_VCC; ret = pcmcia_loop_config(pcmcia, softingcs_probe_config, (void *)pdat); if (ret) goto pcmcia_failed; ret = pcmcia_enable_device(pcmcia); if (ret < 0) goto pcmcia_failed; pres = pcmcia->resource[PCMCIA_IOMEM_0]; if (!pres) { ret = -EBADF; goto pcmcia_bad; } /* create softing platform device */ dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) { ret = -ENOMEM; goto mem_failed; } dev->pdev.resource = dev->res; dev->pdev.num_resources = ARRAY_SIZE(dev->res); dev->pdev.dev.release = softingcs_pdev_release; pdev = &dev->pdev; pdev->dev.platform_data = (void *)pdat; pdev->dev.parent = &pcmcia->dev; pcmcia->priv = pdev; /* platform device resources */ pdev->resource[0].flags = IORESOURCE_MEM; pdev->resource[0].start = pres->start; pdev->resource[0].end = pres->end; pdev->resource[1].flags = IORESOURCE_IRQ; pdev->resource[1].start = pcmcia->irq; pdev->resource[1].end = pdev->resource[1].start; /* platform device setup */ spin_lock(&softingcs_index_lock); pdev->id = softingcs_index++; spin_unlock(&softingcs_index_lock); pdev->name = "softing"; dev_set_name(&pdev->dev, "softingcs.%i", pdev->id); ret = platform_device_register(pdev); if (ret < 0) goto platform_failed; dev_info(&pcmcia->dev, "created %s\n", dev_name(&pdev->dev)); return 0; platform_failed: platform_device_put(pdev); mem_failed: pcmcia_bad: pcmcia_failed: pcmcia_disable_device(pcmcia); pcmcia->priv = NULL; return ret; } static const struct pcmcia_device_id softingcs_ids[] = { /* softing */ PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0001), PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0002), PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0004), PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0005), /* vector, manufacturer? */ PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0081), PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0084), PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0085), /* EDIC */ PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0102), PCMCIA_DEVICE_MANF_CARD(0x0168, 0x0105), PCMCIA_DEVICE_NULL, }; MODULE_DEVICE_TABLE(pcmcia, softingcs_ids); static struct pcmcia_driver softingcs_driver = { .owner = THIS_MODULE, .name = "softingcs", .id_table = softingcs_ids, .probe = softingcs_probe, .remove = softingcs_remove, }; module_pcmcia_driver(softingcs_driver); MODULE_DESCRIPTION("softing CANcard driver" ", links PCMCIA card to softing driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/net/can/softing/softing_cs.c
// SPDX-License-Identifier: GPL-2.0 // // flexcan.c - FLEXCAN CAN controller driver // // Copyright (c) 2005-2006 Varma Electronics Oy // Copyright (c) 2009 Sascha Hauer, Pengutronix // Copyright (c) 2010-2017 Pengutronix, Marc Kleine-Budde <[email protected]> // Copyright (c) 2014 David Jander, Protonic Holland // // Based on code originally by Andrey Volkov <[email protected]> #include <dt-bindings/firmware/imx/rsrc.h> #include <linux/bitfield.h> #include <linux/can.h> #include <linux/can/dev.h> #include <linux/can/error.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/firmware/imx/sci.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/mfd/syscon.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/pinctrl/consumer.h> #include <linux/platform_device.h> #include <linux/can/platform/flexcan.h> #include <linux/pm_runtime.h> #include <linux/regmap.h> #include <linux/regulator/consumer.h> #include "flexcan.h" #define DRV_NAME "flexcan" /* 8 for RX fifo and 2 error handling */ #define FLEXCAN_NAPI_WEIGHT (8 + 2) /* FLEXCAN module configuration register (CANMCR) bits */ #define FLEXCAN_MCR_MDIS BIT(31) #define FLEXCAN_MCR_FRZ BIT(30) #define FLEXCAN_MCR_FEN BIT(29) #define FLEXCAN_MCR_HALT BIT(28) #define FLEXCAN_MCR_NOT_RDY BIT(27) #define FLEXCAN_MCR_WAK_MSK BIT(26) #define FLEXCAN_MCR_SOFTRST BIT(25) #define FLEXCAN_MCR_FRZ_ACK BIT(24) #define FLEXCAN_MCR_SUPV BIT(23) #define FLEXCAN_MCR_SLF_WAK BIT(22) #define FLEXCAN_MCR_WRN_EN BIT(21) #define FLEXCAN_MCR_LPM_ACK BIT(20) #define FLEXCAN_MCR_WAK_SRC BIT(19) #define FLEXCAN_MCR_DOZE BIT(18) #define FLEXCAN_MCR_SRX_DIS BIT(17) #define FLEXCAN_MCR_IRMQ BIT(16) #define FLEXCAN_MCR_LPRIO_EN BIT(13) #define FLEXCAN_MCR_AEN BIT(12) #define FLEXCAN_MCR_FDEN BIT(11) /* MCR_MAXMB: maximum used MBs is MAXMB + 1 */ #define FLEXCAN_MCR_MAXMB(x) ((x) & 0x7f) #define FLEXCAN_MCR_IDAM_A (0x0 << 8) #define FLEXCAN_MCR_IDAM_B (0x1 << 8) #define FLEXCAN_MCR_IDAM_C (0x2 << 8) #define FLEXCAN_MCR_IDAM_D (0x3 << 8) /* FLEXCAN control register (CANCTRL) bits */ #define FLEXCAN_CTRL_PRESDIV(x) (((x) & 0xff) << 24) #define FLEXCAN_CTRL_RJW(x) (((x) & 0x03) << 22) #define FLEXCAN_CTRL_PSEG1(x) (((x) & 0x07) << 19) #define FLEXCAN_CTRL_PSEG2(x) (((x) & 0x07) << 16) #define FLEXCAN_CTRL_BOFF_MSK BIT(15) #define FLEXCAN_CTRL_ERR_MSK BIT(14) #define FLEXCAN_CTRL_CLK_SRC BIT(13) #define FLEXCAN_CTRL_LPB BIT(12) #define FLEXCAN_CTRL_TWRN_MSK BIT(11) #define FLEXCAN_CTRL_RWRN_MSK BIT(10) #define FLEXCAN_CTRL_SMP BIT(7) #define FLEXCAN_CTRL_BOFF_REC BIT(6) #define FLEXCAN_CTRL_TSYN BIT(5) #define FLEXCAN_CTRL_LBUF BIT(4) #define FLEXCAN_CTRL_LOM BIT(3) #define FLEXCAN_CTRL_PROPSEG(x) ((x) & 0x07) #define FLEXCAN_CTRL_ERR_BUS (FLEXCAN_CTRL_ERR_MSK) #define FLEXCAN_CTRL_ERR_STATE \ (FLEXCAN_CTRL_TWRN_MSK | FLEXCAN_CTRL_RWRN_MSK | \ FLEXCAN_CTRL_BOFF_MSK) #define FLEXCAN_CTRL_ERR_ALL \ (FLEXCAN_CTRL_ERR_BUS | FLEXCAN_CTRL_ERR_STATE) /* FLEXCAN control register 2 (CTRL2) bits */ #define FLEXCAN_CTRL2_ECRWRE BIT(29) #define FLEXCAN_CTRL2_WRMFRZ BIT(28) #define FLEXCAN_CTRL2_RFFN(x) (((x) & 0x0f) << 24) #define FLEXCAN_CTRL2_TASD(x) (((x) & 0x1f) << 19) #define FLEXCAN_CTRL2_MRP BIT(18) #define FLEXCAN_CTRL2_RRS BIT(17) #define FLEXCAN_CTRL2_EACEN BIT(16) #define FLEXCAN_CTRL2_ISOCANFDEN BIT(12) /* FLEXCAN memory error control register (MECR) bits */ #define FLEXCAN_MECR_ECRWRDIS BIT(31) #define FLEXCAN_MECR_HANCEI_MSK BIT(19) #define FLEXCAN_MECR_FANCEI_MSK BIT(18) #define FLEXCAN_MECR_CEI_MSK BIT(16) #define FLEXCAN_MECR_HAERRIE BIT(15) #define FLEXCAN_MECR_FAERRIE BIT(14) #define FLEXCAN_MECR_EXTERRIE BIT(13) #define FLEXCAN_MECR_RERRDIS BIT(9) #define FLEXCAN_MECR_ECCDIS BIT(8) #define FLEXCAN_MECR_NCEFAFRZ BIT(7) /* FLEXCAN error and status register (ESR) bits */ #define FLEXCAN_ESR_TWRN_INT BIT(17) #define FLEXCAN_ESR_RWRN_INT BIT(16) #define FLEXCAN_ESR_BIT1_ERR BIT(15) #define FLEXCAN_ESR_BIT0_ERR BIT(14) #define FLEXCAN_ESR_ACK_ERR BIT(13) #define FLEXCAN_ESR_CRC_ERR BIT(12) #define FLEXCAN_ESR_FRM_ERR BIT(11) #define FLEXCAN_ESR_STF_ERR BIT(10) #define FLEXCAN_ESR_TX_WRN BIT(9) #define FLEXCAN_ESR_RX_WRN BIT(8) #define FLEXCAN_ESR_IDLE BIT(7) #define FLEXCAN_ESR_TXRX BIT(6) #define FLEXCAN_EST_FLT_CONF_SHIFT (4) #define FLEXCAN_ESR_FLT_CONF_MASK (0x3 << FLEXCAN_EST_FLT_CONF_SHIFT) #define FLEXCAN_ESR_FLT_CONF_ACTIVE (0x0 << FLEXCAN_EST_FLT_CONF_SHIFT) #define FLEXCAN_ESR_FLT_CONF_PASSIVE (0x1 << FLEXCAN_EST_FLT_CONF_SHIFT) #define FLEXCAN_ESR_BOFF_INT BIT(2) #define FLEXCAN_ESR_ERR_INT BIT(1) #define FLEXCAN_ESR_WAK_INT BIT(0) #define FLEXCAN_ESR_ERR_BUS \ (FLEXCAN_ESR_BIT1_ERR | FLEXCAN_ESR_BIT0_ERR | \ FLEXCAN_ESR_ACK_ERR | FLEXCAN_ESR_CRC_ERR | \ FLEXCAN_ESR_FRM_ERR | FLEXCAN_ESR_STF_ERR) #define FLEXCAN_ESR_ERR_STATE \ (FLEXCAN_ESR_TWRN_INT | FLEXCAN_ESR_RWRN_INT | FLEXCAN_ESR_BOFF_INT) #define FLEXCAN_ESR_ERR_ALL \ (FLEXCAN_ESR_ERR_BUS | FLEXCAN_ESR_ERR_STATE) #define FLEXCAN_ESR_ALL_INT \ (FLEXCAN_ESR_TWRN_INT | FLEXCAN_ESR_RWRN_INT | \ FLEXCAN_ESR_BOFF_INT | FLEXCAN_ESR_ERR_INT) /* FLEXCAN Bit Timing register (CBT) bits */ #define FLEXCAN_CBT_BTF BIT(31) #define FLEXCAN_CBT_EPRESDIV_MASK GENMASK(30, 21) #define FLEXCAN_CBT_ERJW_MASK GENMASK(20, 16) #define FLEXCAN_CBT_EPROPSEG_MASK GENMASK(15, 10) #define FLEXCAN_CBT_EPSEG1_MASK GENMASK(9, 5) #define FLEXCAN_CBT_EPSEG2_MASK GENMASK(4, 0) /* FLEXCAN FD control register (FDCTRL) bits */ #define FLEXCAN_FDCTRL_FDRATE BIT(31) #define FLEXCAN_FDCTRL_MBDSR1 GENMASK(20, 19) #define FLEXCAN_FDCTRL_MBDSR0 GENMASK(17, 16) #define FLEXCAN_FDCTRL_MBDSR_8 0x0 #define FLEXCAN_FDCTRL_MBDSR_12 0x1 #define FLEXCAN_FDCTRL_MBDSR_32 0x2 #define FLEXCAN_FDCTRL_MBDSR_64 0x3 #define FLEXCAN_FDCTRL_TDCEN BIT(15) #define FLEXCAN_FDCTRL_TDCFAIL BIT(14) #define FLEXCAN_FDCTRL_TDCOFF GENMASK(12, 8) #define FLEXCAN_FDCTRL_TDCVAL GENMASK(5, 0) /* FLEXCAN FD Bit Timing register (FDCBT) bits */ #define FLEXCAN_FDCBT_FPRESDIV_MASK GENMASK(29, 20) #define FLEXCAN_FDCBT_FRJW_MASK GENMASK(18, 16) #define FLEXCAN_FDCBT_FPROPSEG_MASK GENMASK(14, 10) #define FLEXCAN_FDCBT_FPSEG1_MASK GENMASK(7, 5) #define FLEXCAN_FDCBT_FPSEG2_MASK GENMASK(2, 0) /* FLEXCAN interrupt flag register (IFLAG) bits */ /* Errata ERR005829 step7: Reserve first valid MB */ #define FLEXCAN_TX_MB_RESERVED_RX_FIFO 8 #define FLEXCAN_TX_MB_RESERVED_RX_MAILBOX 0 #define FLEXCAN_RX_MB_RX_MAILBOX_FIRST (FLEXCAN_TX_MB_RESERVED_RX_MAILBOX + 1) #define FLEXCAN_IFLAG_MB(x) BIT_ULL(x) #define FLEXCAN_IFLAG_RX_FIFO_OVERFLOW BIT(7) #define FLEXCAN_IFLAG_RX_FIFO_WARN BIT(6) #define FLEXCAN_IFLAG_RX_FIFO_AVAILABLE BIT(5) /* FLEXCAN message buffers */ #define FLEXCAN_MB_CODE_MASK (0xf << 24) #define FLEXCAN_MB_CODE_RX_BUSY_BIT (0x1 << 24) #define FLEXCAN_MB_CODE_RX_INACTIVE (0x0 << 24) #define FLEXCAN_MB_CODE_RX_EMPTY (0x4 << 24) #define FLEXCAN_MB_CODE_RX_FULL (0x2 << 24) #define FLEXCAN_MB_CODE_RX_OVERRUN (0x6 << 24) #define FLEXCAN_MB_CODE_RX_RANSWER (0xa << 24) #define FLEXCAN_MB_CODE_TX_INACTIVE (0x8 << 24) #define FLEXCAN_MB_CODE_TX_ABORT (0x9 << 24) #define FLEXCAN_MB_CODE_TX_DATA (0xc << 24) #define FLEXCAN_MB_CODE_TX_TANSWER (0xe << 24) #define FLEXCAN_MB_CNT_EDL BIT(31) #define FLEXCAN_MB_CNT_BRS BIT(30) #define FLEXCAN_MB_CNT_ESI BIT(29) #define FLEXCAN_MB_CNT_SRR BIT(22) #define FLEXCAN_MB_CNT_IDE BIT(21) #define FLEXCAN_MB_CNT_RTR BIT(20) #define FLEXCAN_MB_CNT_LENGTH(x) (((x) & 0xf) << 16) #define FLEXCAN_MB_CNT_TIMESTAMP(x) ((x) & 0xffff) #define FLEXCAN_TIMEOUT_US (250) /* Structure of the message buffer */ struct flexcan_mb { u32 can_ctrl; u32 can_id; u32 data[]; }; /* Structure of the hardware registers */ struct flexcan_regs { u32 mcr; /* 0x00 */ u32 ctrl; /* 0x04 - Not affected by Soft Reset */ u32 timer; /* 0x08 */ u32 tcr; /* 0x0c */ u32 rxgmask; /* 0x10 - Not affected by Soft Reset */ u32 rx14mask; /* 0x14 - Not affected by Soft Reset */ u32 rx15mask; /* 0x18 - Not affected by Soft Reset */ u32 ecr; /* 0x1c */ u32 esr; /* 0x20 */ u32 imask2; /* 0x24 */ u32 imask1; /* 0x28 */ u32 iflag2; /* 0x2c */ u32 iflag1; /* 0x30 */ union { /* 0x34 */ u32 gfwr_mx28; /* MX28, MX53 */ u32 ctrl2; /* MX6, VF610 - Not affected by Soft Reset */ }; u32 esr2; /* 0x38 */ u32 imeur; /* 0x3c */ u32 lrfr; /* 0x40 */ u32 crcr; /* 0x44 */ u32 rxfgmask; /* 0x48 */ u32 rxfir; /* 0x4c - Not affected by Soft Reset */ u32 cbt; /* 0x50 - Not affected by Soft Reset */ u32 _reserved2; /* 0x54 */ u32 dbg1; /* 0x58 */ u32 dbg2; /* 0x5c */ u32 _reserved3[8]; /* 0x60 */ struct_group(init, u8 mb[2][512]; /* 0x80 - Not affected by Soft Reset */ /* FIFO-mode: * MB * 0x080...0x08f 0 RX message buffer * 0x090...0x0df 1-5 reserved * 0x0e0...0x0ff 6-7 8 entry ID table * (mx25, mx28, mx35, mx53) * 0x0e0...0x2df 6-7..37 8..128 entry ID table * size conf'ed via ctrl2::RFFN * (mx6, vf610) */ u32 _reserved4[256]; /* 0x480 */ u32 rximr[64]; /* 0x880 - Not affected by Soft Reset */ u32 _reserved5[24]; /* 0x980 */ u32 gfwr_mx6; /* 0x9e0 - MX6 */ u32 _reserved6[39]; /* 0x9e4 */ u32 _rxfir[6]; /* 0xa80 */ u32 _reserved8[2]; /* 0xa98 */ u32 _rxmgmask; /* 0xaa0 */ u32 _rxfgmask; /* 0xaa4 */ u32 _rx14mask; /* 0xaa8 */ u32 _rx15mask; /* 0xaac */ u32 tx_smb[4]; /* 0xab0 */ u32 rx_smb0[4]; /* 0xac0 */ u32 rx_smb1[4]; /* 0xad0 */ ); u32 mecr; /* 0xae0 */ u32 erriar; /* 0xae4 */ u32 erridpr; /* 0xae8 */ u32 errippr; /* 0xaec */ u32 rerrar; /* 0xaf0 */ u32 rerrdr; /* 0xaf4 */ u32 rerrsynr; /* 0xaf8 */ u32 errsr; /* 0xafc */ u32 _reserved7[64]; /* 0xb00 */ u32 fdctrl; /* 0xc00 - Not affected by Soft Reset */ u32 fdcbt; /* 0xc04 - Not affected by Soft Reset */ u32 fdcrc; /* 0xc08 */ u32 _reserved9[199]; /* 0xc0c */ struct_group(init_fd, u32 tx_smb_fd[18]; /* 0xf28 */ u32 rx_smb0_fd[18]; /* 0xf70 */ u32 rx_smb1_fd[18]; /* 0xfb8 */ ); }; static_assert(sizeof(struct flexcan_regs) == 0x4 * 18 + 0xfb8); static const struct flexcan_devtype_data fsl_mcf5441x_devtype_data = { .quirks = FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_NR_IRQ_3 | FLEXCAN_QUIRK_NR_MB_16 | FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX | FLEXCAN_QUIRK_SUPPORT_RX_FIFO, }; static const struct flexcan_devtype_data fsl_p1010_devtype_data = { .quirks = FLEXCAN_QUIRK_BROKEN_WERR_STATE | FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN | FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX | FLEXCAN_QUIRK_SUPPORT_RX_FIFO, }; static const struct flexcan_devtype_data fsl_imx25_devtype_data = { .quirks = FLEXCAN_QUIRK_BROKEN_WERR_STATE | FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX | FLEXCAN_QUIRK_SUPPORT_RX_FIFO, }; static const struct flexcan_devtype_data fsl_imx28_devtype_data = { .quirks = FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX | FLEXCAN_QUIRK_SUPPORT_RX_FIFO, }; static const struct flexcan_devtype_data fsl_imx6q_devtype_data = { .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS | FLEXCAN_QUIRK_USE_RX_MAILBOX | FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR | FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX | FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR, }; static const struct flexcan_devtype_data fsl_imx8qm_devtype_data = { .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS | FLEXCAN_QUIRK_USE_RX_MAILBOX | FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_SUPPORT_FD | FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW | FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX | FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR, }; static struct flexcan_devtype_data fsl_imx8mp_devtype_data = { .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS | FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_RX_MAILBOX | FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR | FLEXCAN_QUIRK_SUPPORT_FD | FLEXCAN_QUIRK_SUPPORT_ECC | FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX | FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR, }; static struct flexcan_devtype_data fsl_imx93_devtype_data = { .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS | FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_RX_MAILBOX | FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_AUTO_STOP_MODE | FLEXCAN_QUIRK_SUPPORT_FD | FLEXCAN_QUIRK_SUPPORT_ECC | FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX | FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR, }; static const struct flexcan_devtype_data fsl_vf610_devtype_data = { .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS | FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_RX_MAILBOX | FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_SUPPORT_ECC | FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX | FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR, }; static const struct flexcan_devtype_data fsl_ls1021a_r2_devtype_data = { .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS | FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_USE_RX_MAILBOX | FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX | FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR, }; static const struct flexcan_devtype_data fsl_lx2160a_r1_devtype_data = { .quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS | FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_BROKEN_PERR_STATE | FLEXCAN_QUIRK_USE_RX_MAILBOX | FLEXCAN_QUIRK_SUPPORT_FD | FLEXCAN_QUIRK_SUPPORT_ECC | FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX | FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR, }; static const struct can_bittiming_const flexcan_bittiming_const = { .name = DRV_NAME, .tseg1_min = 4, .tseg1_max = 16, .tseg2_min = 2, .tseg2_max = 8, .sjw_max = 4, .brp_min = 1, .brp_max = 256, .brp_inc = 1, }; static const struct can_bittiming_const flexcan_fd_bittiming_const = { .name = DRV_NAME, .tseg1_min = 2, .tseg1_max = 96, .tseg2_min = 2, .tseg2_max = 32, .sjw_max = 16, .brp_min = 1, .brp_max = 1024, .brp_inc = 1, }; static const struct can_bittiming_const flexcan_fd_data_bittiming_const = { .name = DRV_NAME, .tseg1_min = 2, .tseg1_max = 39, .tseg2_min = 2, .tseg2_max = 8, .sjw_max = 4, .brp_min = 1, .brp_max = 1024, .brp_inc = 1, }; /* FlexCAN module is essentially modelled as a little-endian IP in most * SoCs, i.e the registers as well as the message buffer areas are * implemented in a little-endian fashion. * * However there are some SoCs (e.g. LS1021A) which implement the FlexCAN * module in a big-endian fashion (i.e the registers as well as the * message buffer areas are implemented in a big-endian way). * * In addition, the FlexCAN module can be found on SoCs having ARM or * PPC cores. So, we need to abstract off the register read/write * functions, ensuring that these cater to all the combinations of module * endianness and underlying CPU endianness. */ static inline u32 flexcan_read_be(void __iomem *addr) { return ioread32be(addr); } static inline void flexcan_write_be(u32 val, void __iomem *addr) { iowrite32be(val, addr); } static inline u32 flexcan_read_le(void __iomem *addr) { return ioread32(addr); } static inline void flexcan_write_le(u32 val, void __iomem *addr) { iowrite32(val, addr); } static struct flexcan_mb __iomem *flexcan_get_mb(const struct flexcan_priv *priv, u8 mb_index) { u8 bank_size; bool bank; if (WARN_ON(mb_index >= priv->mb_count)) return NULL; bank_size = sizeof(priv->regs->mb[0]) / priv->mb_size; bank = mb_index >= bank_size; if (bank) mb_index -= bank_size; return (struct flexcan_mb __iomem *) (&priv->regs->mb[bank][priv->mb_size * mb_index]); } static int flexcan_low_power_enter_ack(struct flexcan_priv *priv) { struct flexcan_regs __iomem *regs = priv->regs; unsigned int timeout = FLEXCAN_TIMEOUT_US / 10; while (timeout-- && !(priv->read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK)) udelay(10); if (!(priv->read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK)) return -ETIMEDOUT; return 0; } static int flexcan_low_power_exit_ack(struct flexcan_priv *priv) { struct flexcan_regs __iomem *regs = priv->regs; unsigned int timeout = FLEXCAN_TIMEOUT_US / 10; while (timeout-- && (priv->read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK)) udelay(10); if (priv->read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK) return -ETIMEDOUT; return 0; } static void flexcan_enable_wakeup_irq(struct flexcan_priv *priv, bool enable) { struct flexcan_regs __iomem *regs = priv->regs; u32 reg_mcr; reg_mcr = priv->read(&regs->mcr); if (enable) reg_mcr |= FLEXCAN_MCR_WAK_MSK; else reg_mcr &= ~FLEXCAN_MCR_WAK_MSK; priv->write(reg_mcr, &regs->mcr); } static int flexcan_stop_mode_enable_scfw(struct flexcan_priv *priv, bool enabled) { u8 idx = priv->scu_idx; u32 rsrc_id, val; rsrc_id = IMX_SC_R_CAN(idx); if (enabled) val = 1; else val = 0; /* stop mode request via scu firmware */ return imx_sc_misc_set_control(priv->sc_ipc_handle, rsrc_id, IMX_SC_C_IPG_STOP, val); } static inline int flexcan_enter_stop_mode(struct flexcan_priv *priv) { struct flexcan_regs __iomem *regs = priv->regs; u32 reg_mcr; int ret; reg_mcr = priv->read(&regs->mcr); reg_mcr |= FLEXCAN_MCR_SLF_WAK; priv->write(reg_mcr, &regs->mcr); /* enable stop request */ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW) { ret = flexcan_stop_mode_enable_scfw(priv, true); if (ret < 0) return ret; } else if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR) { regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr, 1 << priv->stm.req_bit, 1 << priv->stm.req_bit); } else if (priv->devtype_data.quirks & FLEXCAN_QUIRK_AUTO_STOP_MODE) { /* For the auto stop mode, software do nothing, hardware will cover * all the operation automatically after system go into low power mode. */ return 0; } return flexcan_low_power_enter_ack(priv); } static inline int flexcan_exit_stop_mode(struct flexcan_priv *priv) { struct flexcan_regs __iomem *regs = priv->regs; u32 reg_mcr; int ret; /* remove stop request */ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW) { ret = flexcan_stop_mode_enable_scfw(priv, false); if (ret < 0) return ret; } else if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR) { regmap_update_bits(priv->stm.gpr, priv->stm.req_gpr, 1 << priv->stm.req_bit, 0); } reg_mcr = priv->read(&regs->mcr); reg_mcr &= ~FLEXCAN_MCR_SLF_WAK; priv->write(reg_mcr, &regs->mcr); /* For the auto stop mode, hardware will exist stop mode * automatically after system go out of low power mode. */ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_AUTO_STOP_MODE) return 0; return flexcan_low_power_exit_ack(priv); } static inline void flexcan_error_irq_enable(const struct flexcan_priv *priv) { struct flexcan_regs __iomem *regs = priv->regs; u32 reg_ctrl = (priv->reg_ctrl_default | FLEXCAN_CTRL_ERR_MSK); priv->write(reg_ctrl, &regs->ctrl); } static inline void flexcan_error_irq_disable(const struct flexcan_priv *priv) { struct flexcan_regs __iomem *regs = priv->regs; u32 reg_ctrl = (priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_MSK); priv->write(reg_ctrl, &regs->ctrl); } static int flexcan_clks_enable(const struct flexcan_priv *priv) { int err = 0; if (priv->clk_ipg) { err = clk_prepare_enable(priv->clk_ipg); if (err) return err; } if (priv->clk_per) { err = clk_prepare_enable(priv->clk_per); if (err) clk_disable_unprepare(priv->clk_ipg); } return err; } static void flexcan_clks_disable(const struct flexcan_priv *priv) { clk_disable_unprepare(priv->clk_per); clk_disable_unprepare(priv->clk_ipg); } static inline int flexcan_transceiver_enable(const struct flexcan_priv *priv) { if (!priv->reg_xceiver) return 0; return regulator_enable(priv->reg_xceiver); } static inline int flexcan_transceiver_disable(const struct flexcan_priv *priv) { if (!priv->reg_xceiver) return 0; return regulator_disable(priv->reg_xceiver); } static int flexcan_chip_enable(struct flexcan_priv *priv) { struct flexcan_regs __iomem *regs = priv->regs; u32 reg; reg = priv->read(&regs->mcr); reg &= ~FLEXCAN_MCR_MDIS; priv->write(reg, &regs->mcr); return flexcan_low_power_exit_ack(priv); } static int flexcan_chip_disable(struct flexcan_priv *priv) { struct flexcan_regs __iomem *regs = priv->regs; u32 reg; reg = priv->read(&regs->mcr); reg |= FLEXCAN_MCR_MDIS; priv->write(reg, &regs->mcr); return flexcan_low_power_enter_ack(priv); } static int flexcan_chip_freeze(struct flexcan_priv *priv) { struct flexcan_regs __iomem *regs = priv->regs; unsigned int timeout; u32 bitrate = priv->can.bittiming.bitrate; u32 reg; if (bitrate) timeout = 1000 * 1000 * 10 / bitrate; else timeout = FLEXCAN_TIMEOUT_US / 10; reg = priv->read(&regs->mcr); reg |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT; priv->write(reg, &regs->mcr); while (timeout-- && !(priv->read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK)) udelay(100); if (!(priv->read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK)) return -ETIMEDOUT; return 0; } static int flexcan_chip_unfreeze(struct flexcan_priv *priv) { struct flexcan_regs __iomem *regs = priv->regs; unsigned int timeout = FLEXCAN_TIMEOUT_US / 10; u32 reg; reg = priv->read(&regs->mcr); reg &= ~FLEXCAN_MCR_HALT; priv->write(reg, &regs->mcr); while (timeout-- && (priv->read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK)) udelay(10); if (priv->read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK) return -ETIMEDOUT; return 0; } static int flexcan_chip_softreset(struct flexcan_priv *priv) { struct flexcan_regs __iomem *regs = priv->regs; unsigned int timeout = FLEXCAN_TIMEOUT_US / 10; priv->write(FLEXCAN_MCR_SOFTRST, &regs->mcr); while (timeout-- && (priv->read(&regs->mcr) & FLEXCAN_MCR_SOFTRST)) udelay(10); if (priv->read(&regs->mcr) & FLEXCAN_MCR_SOFTRST) return -ETIMEDOUT; return 0; } static int __flexcan_get_berr_counter(const struct net_device *dev, struct can_berr_counter *bec) { const struct flexcan_priv *priv = netdev_priv(dev); struct flexcan_regs __iomem *regs = priv->regs; u32 reg = priv->read(&regs->ecr); bec->txerr = (reg >> 0) & 0xff; bec->rxerr = (reg >> 8) & 0xff; return 0; } static int flexcan_get_berr_counter(const struct net_device *dev, struct can_berr_counter *bec) { const struct flexcan_priv *priv = netdev_priv(dev); int err; err = pm_runtime_resume_and_get(priv->dev); if (err < 0) return err; err = __flexcan_get_berr_counter(dev, bec); pm_runtime_put(priv->dev); return err; } static netdev_tx_t flexcan_start_xmit(struct sk_buff *skb, struct net_device *dev) { const struct flexcan_priv *priv = netdev_priv(dev); struct canfd_frame *cfd = (struct canfd_frame *)skb->data; u32 can_id; u32 data; u32 ctrl = FLEXCAN_MB_CODE_TX_DATA | ((can_fd_len2dlc(cfd->len)) << 16); int i; if (can_dev_dropped_skb(dev, skb)) return NETDEV_TX_OK; netif_stop_queue(dev); if (cfd->can_id & CAN_EFF_FLAG) { can_id = cfd->can_id & CAN_EFF_MASK; ctrl |= FLEXCAN_MB_CNT_IDE | FLEXCAN_MB_CNT_SRR; } else { can_id = (cfd->can_id & CAN_SFF_MASK) << 18; } if (cfd->can_id & CAN_RTR_FLAG) ctrl |= FLEXCAN_MB_CNT_RTR; if (can_is_canfd_skb(skb)) { ctrl |= FLEXCAN_MB_CNT_EDL; if (cfd->flags & CANFD_BRS) ctrl |= FLEXCAN_MB_CNT_BRS; } for (i = 0; i < cfd->len; i += sizeof(u32)) { data = be32_to_cpup((__be32 *)&cfd->data[i]); priv->write(data, &priv->tx_mb->data[i / sizeof(u32)]); } can_put_echo_skb(skb, dev, 0, 0); priv->write(can_id, &priv->tx_mb->can_id); priv->write(ctrl, &priv->tx_mb->can_ctrl); /* Errata ERR005829 step8: * Write twice INACTIVE(0x8) code to first MB. */ priv->write(FLEXCAN_MB_CODE_TX_INACTIVE, &priv->tx_mb_reserved->can_ctrl); priv->write(FLEXCAN_MB_CODE_TX_INACTIVE, &priv->tx_mb_reserved->can_ctrl); return NETDEV_TX_OK; } static void flexcan_irq_bus_err(struct net_device *dev, u32 reg_esr) { struct flexcan_priv *priv = netdev_priv(dev); struct flexcan_regs __iomem *regs = priv->regs; struct sk_buff *skb; struct can_frame *cf; bool rx_errors = false, tx_errors = false; u32 timestamp; int err; timestamp = priv->read(&regs->timer) << 16; skb = alloc_can_err_skb(dev, &cf); if (unlikely(!skb)) return; cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; if (reg_esr & FLEXCAN_ESR_BIT1_ERR) { netdev_dbg(dev, "BIT1_ERR irq\n"); cf->data[2] |= CAN_ERR_PROT_BIT1; tx_errors = true; } if (reg_esr & FLEXCAN_ESR_BIT0_ERR) { netdev_dbg(dev, "BIT0_ERR irq\n"); cf->data[2] |= CAN_ERR_PROT_BIT0; tx_errors = true; } if (reg_esr & FLEXCAN_ESR_ACK_ERR) { netdev_dbg(dev, "ACK_ERR irq\n"); cf->can_id |= CAN_ERR_ACK; cf->data[3] = CAN_ERR_PROT_LOC_ACK; tx_errors = true; } if (reg_esr & FLEXCAN_ESR_CRC_ERR) { netdev_dbg(dev, "CRC_ERR irq\n"); cf->data[2] |= CAN_ERR_PROT_BIT; cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ; rx_errors = true; } if (reg_esr & FLEXCAN_ESR_FRM_ERR) { netdev_dbg(dev, "FRM_ERR irq\n"); cf->data[2] |= CAN_ERR_PROT_FORM; rx_errors = true; } if (reg_esr & FLEXCAN_ESR_STF_ERR) { netdev_dbg(dev, "STF_ERR irq\n"); cf->data[2] |= CAN_ERR_PROT_STUFF; rx_errors = true; } priv->can.can_stats.bus_error++; if (rx_errors) dev->stats.rx_errors++; if (tx_errors) dev->stats.tx_errors++; err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp); if (err) dev->stats.rx_fifo_errors++; } static void flexcan_irq_state(struct net_device *dev, u32 reg_esr) { struct flexcan_priv *priv = netdev_priv(dev); struct flexcan_regs __iomem *regs = priv->regs; struct sk_buff *skb; struct can_frame *cf; enum can_state new_state, rx_state, tx_state; int flt; struct can_berr_counter bec; u32 timestamp; int err; flt = reg_esr & FLEXCAN_ESR_FLT_CONF_MASK; if (likely(flt == FLEXCAN_ESR_FLT_CONF_ACTIVE)) { tx_state = unlikely(reg_esr & FLEXCAN_ESR_TX_WRN) ? CAN_STATE_ERROR_WARNING : CAN_STATE_ERROR_ACTIVE; rx_state = unlikely(reg_esr & FLEXCAN_ESR_RX_WRN) ? CAN_STATE_ERROR_WARNING : CAN_STATE_ERROR_ACTIVE; new_state = max(tx_state, rx_state); } else { __flexcan_get_berr_counter(dev, &bec); new_state = flt == FLEXCAN_ESR_FLT_CONF_PASSIVE ? CAN_STATE_ERROR_PASSIVE : CAN_STATE_BUS_OFF; rx_state = bec.rxerr >= bec.txerr ? new_state : 0; tx_state = bec.rxerr <= bec.txerr ? new_state : 0; } /* state hasn't changed */ if (likely(new_state == priv->can.state)) return; timestamp = priv->read(&regs->timer) << 16; skb = alloc_can_err_skb(dev, &cf); if (unlikely(!skb)) return; can_change_state(dev, cf, tx_state, rx_state); if (unlikely(new_state == CAN_STATE_BUS_OFF)) can_bus_off(dev); err = can_rx_offload_queue_timestamp(&priv->offload, skb, timestamp); if (err) dev->stats.rx_fifo_errors++; } static inline u64 flexcan_read64_mask(struct flexcan_priv *priv, void __iomem *addr, u64 mask) { u64 reg = 0; if (upper_32_bits(mask)) reg = (u64)priv->read(addr - 4) << 32; if (lower_32_bits(mask)) reg |= priv->read(addr); return reg & mask; } static inline void flexcan_write64(struct flexcan_priv *priv, u64 val, void __iomem *addr) { if (upper_32_bits(val)) priv->write(upper_32_bits(val), addr - 4); if (lower_32_bits(val)) priv->write(lower_32_bits(val), addr); } static inline u64 flexcan_read_reg_iflag_rx(struct flexcan_priv *priv) { return flexcan_read64_mask(priv, &priv->regs->iflag1, priv->rx_mask); } static inline u64 flexcan_read_reg_iflag_tx(struct flexcan_priv *priv) { return flexcan_read64_mask(priv, &priv->regs->iflag1, priv->tx_mask); } static inline struct flexcan_priv *rx_offload_to_priv(struct can_rx_offload *offload) { return container_of(offload, struct flexcan_priv, offload); } static struct sk_buff *flexcan_mailbox_read(struct can_rx_offload *offload, unsigned int n, u32 *timestamp, bool drop) { struct flexcan_priv *priv = rx_offload_to_priv(offload); struct flexcan_regs __iomem *regs = priv->regs; struct flexcan_mb __iomem *mb; struct sk_buff *skb; struct canfd_frame *cfd; u32 reg_ctrl, reg_id, reg_iflag1; int i; mb = flexcan_get_mb(priv, n); if (priv->devtype_data.quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX) { u32 code; do { reg_ctrl = priv->read(&mb->can_ctrl); } while (reg_ctrl & FLEXCAN_MB_CODE_RX_BUSY_BIT); /* is this MB empty? */ code = reg_ctrl & FLEXCAN_MB_CODE_MASK; if ((code != FLEXCAN_MB_CODE_RX_FULL) && (code != FLEXCAN_MB_CODE_RX_OVERRUN)) return NULL; if (code == FLEXCAN_MB_CODE_RX_OVERRUN) { /* This MB was overrun, we lost data */ offload->dev->stats.rx_over_errors++; offload->dev->stats.rx_errors++; } } else { reg_iflag1 = priv->read(&regs->iflag1); if (!(reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE)) return NULL; reg_ctrl = priv->read(&mb->can_ctrl); } if (unlikely(drop)) { skb = ERR_PTR(-ENOBUFS); goto mark_as_read; } if (reg_ctrl & FLEXCAN_MB_CNT_EDL) skb = alloc_canfd_skb(offload->dev, &cfd); else skb = alloc_can_skb(offload->dev, (struct can_frame **)&cfd); if (unlikely(!skb)) { skb = ERR_PTR(-ENOMEM); goto mark_as_read; } /* increase timstamp to full 32 bit */ *timestamp = reg_ctrl << 16; reg_id = priv->read(&mb->can_id); if (reg_ctrl & FLEXCAN_MB_CNT_IDE) cfd->can_id = ((reg_id >> 0) & CAN_EFF_MASK) | CAN_EFF_FLAG; else cfd->can_id = (reg_id >> 18) & CAN_SFF_MASK; if (reg_ctrl & FLEXCAN_MB_CNT_EDL) { cfd->len = can_fd_dlc2len((reg_ctrl >> 16) & 0xf); if (reg_ctrl & FLEXCAN_MB_CNT_BRS) cfd->flags |= CANFD_BRS; } else { cfd->len = can_cc_dlc2len((reg_ctrl >> 16) & 0xf); if (reg_ctrl & FLEXCAN_MB_CNT_RTR) cfd->can_id |= CAN_RTR_FLAG; } if (reg_ctrl & FLEXCAN_MB_CNT_ESI) cfd->flags |= CANFD_ESI; for (i = 0; i < cfd->len; i += sizeof(u32)) { __be32 data = cpu_to_be32(priv->read(&mb->data[i / sizeof(u32)])); *(__be32 *)(cfd->data + i) = data; } mark_as_read: if (priv->devtype_data.quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX) flexcan_write64(priv, FLEXCAN_IFLAG_MB(n), &regs->iflag1); else priv->write(FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, &regs->iflag1); /* Read the Free Running Timer. It is optional but recommended * to unlock Mailbox as soon as possible and make it available * for reception. */ priv->read(&regs->timer); return skb; } static irqreturn_t flexcan_irq(int irq, void *dev_id) { struct net_device *dev = dev_id; struct net_device_stats *stats = &dev->stats; struct flexcan_priv *priv = netdev_priv(dev); struct flexcan_regs __iomem *regs = priv->regs; irqreturn_t handled = IRQ_NONE; u64 reg_iflag_tx; u32 reg_esr; enum can_state last_state = priv->can.state; /* reception interrupt */ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX) { u64 reg_iflag_rx; int ret; while ((reg_iflag_rx = flexcan_read_reg_iflag_rx(priv))) { handled = IRQ_HANDLED; ret = can_rx_offload_irq_offload_timestamp(&priv->offload, reg_iflag_rx); if (!ret) break; } } else { u32 reg_iflag1; reg_iflag1 = priv->read(&regs->iflag1); if (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE) { handled = IRQ_HANDLED; can_rx_offload_irq_offload_fifo(&priv->offload); } /* FIFO overflow interrupt */ if (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_OVERFLOW) { handled = IRQ_HANDLED; priv->write(FLEXCAN_IFLAG_RX_FIFO_OVERFLOW, &regs->iflag1); dev->stats.rx_over_errors++; dev->stats.rx_errors++; } } reg_iflag_tx = flexcan_read_reg_iflag_tx(priv); /* transmission complete interrupt */ if (reg_iflag_tx & priv->tx_mask) { u32 reg_ctrl = priv->read(&priv->tx_mb->can_ctrl); handled = IRQ_HANDLED; stats->tx_bytes += can_rx_offload_get_echo_skb_queue_timestamp(&priv->offload, 0, reg_ctrl << 16, NULL); stats->tx_packets++; /* after sending a RTR frame MB is in RX mode */ priv->write(FLEXCAN_MB_CODE_TX_INACTIVE, &priv->tx_mb->can_ctrl); flexcan_write64(priv, priv->tx_mask, &regs->iflag1); netif_wake_queue(dev); } reg_esr = priv->read(&regs->esr); /* ACK all bus error, state change and wake IRQ sources */ if (reg_esr & (FLEXCAN_ESR_ALL_INT | FLEXCAN_ESR_WAK_INT)) { handled = IRQ_HANDLED; priv->write(reg_esr & (FLEXCAN_ESR_ALL_INT | FLEXCAN_ESR_WAK_INT), &regs->esr); } /* state change interrupt or broken error state quirk fix is enabled */ if ((reg_esr & FLEXCAN_ESR_ERR_STATE) || (priv->devtype_data.quirks & (FLEXCAN_QUIRK_BROKEN_WERR_STATE | FLEXCAN_QUIRK_BROKEN_PERR_STATE))) flexcan_irq_state(dev, reg_esr); /* bus error IRQ - handle if bus error reporting is activated */ if ((reg_esr & FLEXCAN_ESR_ERR_BUS) && (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)) flexcan_irq_bus_err(dev, reg_esr); /* availability of error interrupt among state transitions in case * bus error reporting is de-activated and * FLEXCAN_QUIRK_BROKEN_PERR_STATE is enabled: * +--------------------------------------------------------------+ * | +----------------------------------------------+ [stopped / | * | | | sleeping] -+ * +-+-> active <-> warning <-> passive -> bus off -+ * ___________^^^^^^^^^^^^_______________________________ * disabled(1) enabled disabled * * (1): enabled if FLEXCAN_QUIRK_BROKEN_WERR_STATE is enabled */ if ((last_state != priv->can.state) && (priv->devtype_data.quirks & FLEXCAN_QUIRK_BROKEN_PERR_STATE) && !(priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING)) { switch (priv->can.state) { case CAN_STATE_ERROR_ACTIVE: if (priv->devtype_data.quirks & FLEXCAN_QUIRK_BROKEN_WERR_STATE) flexcan_error_irq_enable(priv); else flexcan_error_irq_disable(priv); break; case CAN_STATE_ERROR_WARNING: flexcan_error_irq_enable(priv); break; case CAN_STATE_ERROR_PASSIVE: case CAN_STATE_BUS_OFF: flexcan_error_irq_disable(priv); break; default: break; } } if (handled) can_rx_offload_irq_finish(&priv->offload); return handled; } static void flexcan_set_bittiming_ctrl(const struct net_device *dev) { const struct flexcan_priv *priv = netdev_priv(dev); const struct can_bittiming *bt = &priv->can.bittiming; struct flexcan_regs __iomem *regs = priv->regs; u32 reg; reg = priv->read(&regs->ctrl); reg &= ~(FLEXCAN_CTRL_PRESDIV(0xff) | FLEXCAN_CTRL_RJW(0x3) | FLEXCAN_CTRL_PSEG1(0x7) | FLEXCAN_CTRL_PSEG2(0x7) | FLEXCAN_CTRL_PROPSEG(0x7)); reg |= FLEXCAN_CTRL_PRESDIV(bt->brp - 1) | FLEXCAN_CTRL_PSEG1(bt->phase_seg1 - 1) | FLEXCAN_CTRL_PSEG2(bt->phase_seg2 - 1) | FLEXCAN_CTRL_RJW(bt->sjw - 1) | FLEXCAN_CTRL_PROPSEG(bt->prop_seg - 1); netdev_dbg(dev, "writing ctrl=0x%08x\n", reg); priv->write(reg, &regs->ctrl); /* print chip status */ netdev_dbg(dev, "%s: mcr=0x%08x ctrl=0x%08x\n", __func__, priv->read(&regs->mcr), priv->read(&regs->ctrl)); } static void flexcan_set_bittiming_cbt(const struct net_device *dev) { struct flexcan_priv *priv = netdev_priv(dev); struct can_bittiming *bt = &priv->can.bittiming; struct can_bittiming *dbt = &priv->can.data_bittiming; struct flexcan_regs __iomem *regs = priv->regs; u32 reg_cbt, reg_fdctrl; /* CBT */ /* CBT[EPSEG1] is 5 bit long and CBT[EPROPSEG] is 6 bit * long. The can_calc_bittiming() tries to divide the tseg1 * equally between phase_seg1 and prop_seg, which may not fit * in CBT register. Therefore, if phase_seg1 is more than * possible value, increase prop_seg and decrease phase_seg1. */ if (bt->phase_seg1 > 0x20) { bt->prop_seg += (bt->phase_seg1 - 0x20); bt->phase_seg1 = 0x20; } reg_cbt = FLEXCAN_CBT_BTF | FIELD_PREP(FLEXCAN_CBT_EPRESDIV_MASK, bt->brp - 1) | FIELD_PREP(FLEXCAN_CBT_ERJW_MASK, bt->sjw - 1) | FIELD_PREP(FLEXCAN_CBT_EPROPSEG_MASK, bt->prop_seg - 1) | FIELD_PREP(FLEXCAN_CBT_EPSEG1_MASK, bt->phase_seg1 - 1) | FIELD_PREP(FLEXCAN_CBT_EPSEG2_MASK, bt->phase_seg2 - 1); netdev_dbg(dev, "writing cbt=0x%08x\n", reg_cbt); priv->write(reg_cbt, &regs->cbt); if (priv->can.ctrlmode & CAN_CTRLMODE_FD) { u32 reg_fdcbt, reg_ctrl2; if (bt->brp != dbt->brp) netdev_warn(dev, "Data brp=%d and brp=%d don't match, this may result in a phase error. Consider using different bitrate and/or data bitrate.\n", dbt->brp, bt->brp); /* FDCBT */ /* FDCBT[FPSEG1] is 3 bit long and FDCBT[FPROPSEG] is * 5 bit long. The can_calc_bittiming tries to divide * the tseg1 equally between phase_seg1 and prop_seg, * which may not fit in FDCBT register. Therefore, if * phase_seg1 is more than possible value, increase * prop_seg and decrease phase_seg1 */ if (dbt->phase_seg1 > 0x8) { dbt->prop_seg += (dbt->phase_seg1 - 0x8); dbt->phase_seg1 = 0x8; } reg_fdcbt = priv->read(&regs->fdcbt); reg_fdcbt &= ~(FIELD_PREP(FLEXCAN_FDCBT_FPRESDIV_MASK, 0x3ff) | FIELD_PREP(FLEXCAN_FDCBT_FRJW_MASK, 0x7) | FIELD_PREP(FLEXCAN_FDCBT_FPROPSEG_MASK, 0x1f) | FIELD_PREP(FLEXCAN_FDCBT_FPSEG1_MASK, 0x7) | FIELD_PREP(FLEXCAN_FDCBT_FPSEG2_MASK, 0x7)); reg_fdcbt |= FIELD_PREP(FLEXCAN_FDCBT_FPRESDIV_MASK, dbt->brp - 1) | FIELD_PREP(FLEXCAN_FDCBT_FRJW_MASK, dbt->sjw - 1) | FIELD_PREP(FLEXCAN_FDCBT_FPROPSEG_MASK, dbt->prop_seg) | FIELD_PREP(FLEXCAN_FDCBT_FPSEG1_MASK, dbt->phase_seg1 - 1) | FIELD_PREP(FLEXCAN_FDCBT_FPSEG2_MASK, dbt->phase_seg2 - 1); netdev_dbg(dev, "writing fdcbt=0x%08x\n", reg_fdcbt); priv->write(reg_fdcbt, &regs->fdcbt); /* CTRL2 */ reg_ctrl2 = priv->read(&regs->ctrl2); reg_ctrl2 &= ~FLEXCAN_CTRL2_ISOCANFDEN; if (!(priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)) reg_ctrl2 |= FLEXCAN_CTRL2_ISOCANFDEN; netdev_dbg(dev, "writing ctrl2=0x%08x\n", reg_ctrl2); priv->write(reg_ctrl2, &regs->ctrl2); } /* FDCTRL */ reg_fdctrl = priv->read(&regs->fdctrl); reg_fdctrl &= ~(FLEXCAN_FDCTRL_FDRATE | FIELD_PREP(FLEXCAN_FDCTRL_TDCOFF, 0x1f)); if (priv->can.ctrlmode & CAN_CTRLMODE_FD) { reg_fdctrl |= FLEXCAN_FDCTRL_FDRATE; if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) { /* TDC must be disabled for Loop Back mode */ reg_fdctrl &= ~FLEXCAN_FDCTRL_TDCEN; } else { reg_fdctrl |= FLEXCAN_FDCTRL_TDCEN | FIELD_PREP(FLEXCAN_FDCTRL_TDCOFF, ((dbt->phase_seg1 - 1) + dbt->prop_seg + 2) * ((dbt->brp - 1 ) + 1)); } } netdev_dbg(dev, "writing fdctrl=0x%08x\n", reg_fdctrl); priv->write(reg_fdctrl, &regs->fdctrl); netdev_dbg(dev, "%s: mcr=0x%08x ctrl=0x%08x ctrl2=0x%08x fdctrl=0x%08x cbt=0x%08x fdcbt=0x%08x\n", __func__, priv->read(&regs->mcr), priv->read(&regs->ctrl), priv->read(&regs->ctrl2), priv->read(&regs->fdctrl), priv->read(&regs->cbt), priv->read(&regs->fdcbt)); } static void flexcan_set_bittiming(struct net_device *dev) { const struct flexcan_priv *priv = netdev_priv(dev); struct flexcan_regs __iomem *regs = priv->regs; u32 reg; reg = priv->read(&regs->ctrl); reg &= ~(FLEXCAN_CTRL_LPB | FLEXCAN_CTRL_SMP | FLEXCAN_CTRL_LOM); if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) reg |= FLEXCAN_CTRL_LPB; if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) reg |= FLEXCAN_CTRL_LOM; if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) reg |= FLEXCAN_CTRL_SMP; netdev_dbg(dev, "writing ctrl=0x%08x\n", reg); priv->write(reg, &regs->ctrl); if (priv->can.ctrlmode_supported & CAN_CTRLMODE_FD) return flexcan_set_bittiming_cbt(dev); else return flexcan_set_bittiming_ctrl(dev); } static void flexcan_ram_init(struct net_device *dev) { struct flexcan_priv *priv = netdev_priv(dev); struct flexcan_regs __iomem *regs = priv->regs; u32 reg_ctrl2; /* 11.8.3.13 Detection and correction of memory errors: * CTRL2[WRMFRZ] grants write access to all memory positions * that require initialization, ranging from 0x080 to 0xADF * and from 0xF28 to 0xFFF when the CAN FD feature is enabled. * The RXMGMASK, RX14MASK, RX15MASK, and RXFGMASK registers * need to be initialized as well. MCR[RFEN] must not be set * during memory initialization. */ reg_ctrl2 = priv->read(&regs->ctrl2); reg_ctrl2 |= FLEXCAN_CTRL2_WRMFRZ; priv->write(reg_ctrl2, &regs->ctrl2); memset_io(&regs->init, 0, sizeof(regs->init)); if (priv->can.ctrlmode & CAN_CTRLMODE_FD) memset_io(&regs->init_fd, 0, sizeof(regs->init_fd)); reg_ctrl2 &= ~FLEXCAN_CTRL2_WRMFRZ; priv->write(reg_ctrl2, &regs->ctrl2); } static int flexcan_rx_offload_setup(struct net_device *dev) { struct flexcan_priv *priv = netdev_priv(dev); int err; if (priv->can.ctrlmode & CAN_CTRLMODE_FD) priv->mb_size = sizeof(struct flexcan_mb) + CANFD_MAX_DLEN; else priv->mb_size = sizeof(struct flexcan_mb) + CAN_MAX_DLEN; if (priv->devtype_data.quirks & FLEXCAN_QUIRK_NR_MB_16) priv->mb_count = 16; else priv->mb_count = (sizeof(priv->regs->mb[0]) / priv->mb_size) + (sizeof(priv->regs->mb[1]) / priv->mb_size); if (priv->devtype_data.quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX) priv->tx_mb_reserved = flexcan_get_mb(priv, FLEXCAN_TX_MB_RESERVED_RX_MAILBOX); else priv->tx_mb_reserved = flexcan_get_mb(priv, FLEXCAN_TX_MB_RESERVED_RX_FIFO); priv->tx_mb_idx = priv->mb_count - 1; priv->tx_mb = flexcan_get_mb(priv, priv->tx_mb_idx); priv->tx_mask = FLEXCAN_IFLAG_MB(priv->tx_mb_idx); priv->offload.mailbox_read = flexcan_mailbox_read; if (priv->devtype_data.quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX) { priv->offload.mb_first = FLEXCAN_RX_MB_RX_MAILBOX_FIRST; priv->offload.mb_last = priv->mb_count - 2; priv->rx_mask = GENMASK_ULL(priv->offload.mb_last, priv->offload.mb_first); err = can_rx_offload_add_timestamp(dev, &priv->offload); } else { priv->rx_mask = FLEXCAN_IFLAG_RX_FIFO_OVERFLOW | FLEXCAN_IFLAG_RX_FIFO_AVAILABLE; err = can_rx_offload_add_fifo(dev, &priv->offload, FLEXCAN_NAPI_WEIGHT); } return err; } static void flexcan_chip_interrupts_enable(const struct net_device *dev) { const struct flexcan_priv *priv = netdev_priv(dev); struct flexcan_regs __iomem *regs = priv->regs; u64 reg_imask; disable_irq(dev->irq); priv->write(priv->reg_ctrl_default, &regs->ctrl); reg_imask = priv->rx_mask | priv->tx_mask; priv->write(upper_32_bits(reg_imask), &regs->imask2); priv->write(lower_32_bits(reg_imask), &regs->imask1); enable_irq(dev->irq); } static void flexcan_chip_interrupts_disable(const struct net_device *dev) { const struct flexcan_priv *priv = netdev_priv(dev); struct flexcan_regs __iomem *regs = priv->regs; priv->write(0, &regs->imask2); priv->write(0, &regs->imask1); priv->write(priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_ALL, &regs->ctrl); } /* flexcan_chip_start * * this functions is entered with clocks enabled * */ static int flexcan_chip_start(struct net_device *dev) { struct flexcan_priv *priv = netdev_priv(dev); struct flexcan_regs __iomem *regs = priv->regs; u32 reg_mcr, reg_ctrl, reg_ctrl2, reg_mecr; int err, i; struct flexcan_mb __iomem *mb; /* enable module */ err = flexcan_chip_enable(priv); if (err) return err; /* soft reset */ err = flexcan_chip_softreset(priv); if (err) goto out_chip_disable; if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SUPPORT_ECC) flexcan_ram_init(dev); flexcan_set_bittiming(dev); /* set freeze, halt */ err = flexcan_chip_freeze(priv); if (err) goto out_chip_disable; /* MCR * * only supervisor access * enable warning int * enable individual RX masking * choose format C * set max mailbox number */ reg_mcr = priv->read(&regs->mcr); reg_mcr &= ~FLEXCAN_MCR_MAXMB(0xff); reg_mcr |= FLEXCAN_MCR_SUPV | FLEXCAN_MCR_WRN_EN | FLEXCAN_MCR_IRMQ | FLEXCAN_MCR_IDAM_C | FLEXCAN_MCR_MAXMB(priv->tx_mb_idx); /* MCR * * FIFO: * - disable for mailbox mode * - enable for FIFO mode */ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX) reg_mcr &= ~FLEXCAN_MCR_FEN; else reg_mcr |= FLEXCAN_MCR_FEN; /* MCR * * NOTE: In loopback mode, the CAN_MCR[SRXDIS] cannot be * asserted because this will impede the self reception * of a transmitted message. This is not documented in * earlier versions of flexcan block guide. * * Self Reception: * - enable Self Reception for loopback mode * (by clearing "Self Reception Disable" bit) * - disable for normal operation */ if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) reg_mcr &= ~FLEXCAN_MCR_SRX_DIS; else reg_mcr |= FLEXCAN_MCR_SRX_DIS; /* MCR - CAN-FD */ if (priv->can.ctrlmode & CAN_CTRLMODE_FD) reg_mcr |= FLEXCAN_MCR_FDEN; else reg_mcr &= ~FLEXCAN_MCR_FDEN; netdev_dbg(dev, "%s: writing mcr=0x%08x", __func__, reg_mcr); priv->write(reg_mcr, &regs->mcr); /* CTRL * * disable timer sync feature * * disable auto busoff recovery * transmit lowest buffer first * * enable tx and rx warning interrupt * enable bus off interrupt * (== FLEXCAN_CTRL_ERR_STATE) */ reg_ctrl = priv->read(&regs->ctrl); reg_ctrl &= ~FLEXCAN_CTRL_TSYN; reg_ctrl |= FLEXCAN_CTRL_BOFF_REC | FLEXCAN_CTRL_LBUF | FLEXCAN_CTRL_ERR_STATE; /* enable the "error interrupt" (FLEXCAN_CTRL_ERR_MSK), * on most Flexcan cores, too. Otherwise we don't get * any error warning or passive interrupts. */ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_BROKEN_WERR_STATE || priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) reg_ctrl |= FLEXCAN_CTRL_ERR_MSK; else reg_ctrl &= ~FLEXCAN_CTRL_ERR_MSK; /* save for later use */ priv->reg_ctrl_default = reg_ctrl; /* leave interrupts disabled for now */ reg_ctrl &= ~FLEXCAN_CTRL_ERR_ALL; netdev_dbg(dev, "%s: writing ctrl=0x%08x", __func__, reg_ctrl); priv->write(reg_ctrl, &regs->ctrl); if ((priv->devtype_data.quirks & FLEXCAN_QUIRK_ENABLE_EACEN_RRS)) { reg_ctrl2 = priv->read(&regs->ctrl2); reg_ctrl2 |= FLEXCAN_CTRL2_EACEN | FLEXCAN_CTRL2_RRS; priv->write(reg_ctrl2, &regs->ctrl2); } if (priv->can.ctrlmode_supported & CAN_CTRLMODE_FD) { u32 reg_fdctrl; reg_fdctrl = priv->read(&regs->fdctrl); reg_fdctrl &= ~(FIELD_PREP(FLEXCAN_FDCTRL_MBDSR1, 0x3) | FIELD_PREP(FLEXCAN_FDCTRL_MBDSR0, 0x3)); if (priv->can.ctrlmode & CAN_CTRLMODE_FD) { reg_fdctrl |= FIELD_PREP(FLEXCAN_FDCTRL_MBDSR1, FLEXCAN_FDCTRL_MBDSR_64) | FIELD_PREP(FLEXCAN_FDCTRL_MBDSR0, FLEXCAN_FDCTRL_MBDSR_64); } else { reg_fdctrl |= FIELD_PREP(FLEXCAN_FDCTRL_MBDSR1, FLEXCAN_FDCTRL_MBDSR_8) | FIELD_PREP(FLEXCAN_FDCTRL_MBDSR0, FLEXCAN_FDCTRL_MBDSR_8); } netdev_dbg(dev, "%s: writing fdctrl=0x%08x", __func__, reg_fdctrl); priv->write(reg_fdctrl, &regs->fdctrl); } if (priv->devtype_data.quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX) { for (i = priv->offload.mb_first; i <= priv->offload.mb_last; i++) { mb = flexcan_get_mb(priv, i); priv->write(FLEXCAN_MB_CODE_RX_EMPTY, &mb->can_ctrl); } } else { /* clear and invalidate unused mailboxes first */ for (i = FLEXCAN_TX_MB_RESERVED_RX_FIFO; i < priv->mb_count; i++) { mb = flexcan_get_mb(priv, i); priv->write(FLEXCAN_MB_CODE_RX_INACTIVE, &mb->can_ctrl); } } /* Errata ERR005829: mark first TX mailbox as INACTIVE */ priv->write(FLEXCAN_MB_CODE_TX_INACTIVE, &priv->tx_mb_reserved->can_ctrl); /* mark TX mailbox as INACTIVE */ priv->write(FLEXCAN_MB_CODE_TX_INACTIVE, &priv->tx_mb->can_ctrl); /* acceptance mask/acceptance code (accept everything) */ priv->write(0x0, &regs->rxgmask); priv->write(0x0, &regs->rx14mask); priv->write(0x0, &regs->rx15mask); if (priv->devtype_data.quirks & FLEXCAN_QUIRK_DISABLE_RXFG) priv->write(0x0, &regs->rxfgmask); /* clear acceptance filters */ for (i = 0; i < priv->mb_count; i++) priv->write(0, &regs->rximr[i]); /* On Vybrid, disable non-correctable errors interrupt and * freeze mode. It still can correct the correctable errors * when HW supports ECC. * * This also works around errata e5295 which generates false * positive memory errors and put the device in freeze mode. */ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_DISABLE_MECR) { /* Follow the protocol as described in "Detection * and Correction of Memory Errors" to write to * MECR register (step 1 - 5) * * 1. By default, CTRL2[ECRWRE] = 0, MECR[ECRWRDIS] = 1 * 2. set CTRL2[ECRWRE] */ reg_ctrl2 = priv->read(&regs->ctrl2); reg_ctrl2 |= FLEXCAN_CTRL2_ECRWRE; priv->write(reg_ctrl2, &regs->ctrl2); /* 3. clear MECR[ECRWRDIS] */ reg_mecr = priv->read(&regs->mecr); reg_mecr &= ~FLEXCAN_MECR_ECRWRDIS; priv->write(reg_mecr, &regs->mecr); /* 4. all writes to MECR must keep MECR[ECRWRDIS] cleared */ reg_mecr &= ~(FLEXCAN_MECR_NCEFAFRZ | FLEXCAN_MECR_HANCEI_MSK | FLEXCAN_MECR_FANCEI_MSK); priv->write(reg_mecr, &regs->mecr); /* 5. after configuration done, lock MECR by either * setting MECR[ECRWRDIS] or clearing CTRL2[ECRWRE] */ reg_mecr |= FLEXCAN_MECR_ECRWRDIS; priv->write(reg_mecr, &regs->mecr); reg_ctrl2 &= ~FLEXCAN_CTRL2_ECRWRE; priv->write(reg_ctrl2, &regs->ctrl2); } /* synchronize with the can bus */ err = flexcan_chip_unfreeze(priv); if (err) goto out_chip_disable; priv->can.state = CAN_STATE_ERROR_ACTIVE; /* print chip status */ netdev_dbg(dev, "%s: reading mcr=0x%08x ctrl=0x%08x\n", __func__, priv->read(&regs->mcr), priv->read(&regs->ctrl)); return 0; out_chip_disable: flexcan_chip_disable(priv); return err; } /* __flexcan_chip_stop * * this function is entered with clocks enabled */ static int __flexcan_chip_stop(struct net_device *dev, bool disable_on_error) { struct flexcan_priv *priv = netdev_priv(dev); int err; /* freeze + disable module */ err = flexcan_chip_freeze(priv); if (err && !disable_on_error) return err; err = flexcan_chip_disable(priv); if (err && !disable_on_error) goto out_chip_unfreeze; priv->can.state = CAN_STATE_STOPPED; return 0; out_chip_unfreeze: flexcan_chip_unfreeze(priv); return err; } static inline int flexcan_chip_stop_disable_on_error(struct net_device *dev) { return __flexcan_chip_stop(dev, true); } static inline int flexcan_chip_stop(struct net_device *dev) { return __flexcan_chip_stop(dev, false); } static int flexcan_open(struct net_device *dev) { struct flexcan_priv *priv = netdev_priv(dev); int err; if ((priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) && (priv->can.ctrlmode & CAN_CTRLMODE_FD)) { netdev_err(dev, "Three Samples mode and CAN-FD mode can't be used together\n"); return -EINVAL; } err = pm_runtime_resume_and_get(priv->dev); if (err < 0) return err; err = open_candev(dev); if (err) goto out_runtime_put; err = flexcan_transceiver_enable(priv); if (err) goto out_close; err = flexcan_rx_offload_setup(dev); if (err) goto out_transceiver_disable; err = flexcan_chip_start(dev); if (err) goto out_can_rx_offload_del; can_rx_offload_enable(&priv->offload); err = request_irq(dev->irq, flexcan_irq, IRQF_SHARED, dev->name, dev); if (err) goto out_can_rx_offload_disable; if (priv->devtype_data.quirks & FLEXCAN_QUIRK_NR_IRQ_3) { err = request_irq(priv->irq_boff, flexcan_irq, IRQF_SHARED, dev->name, dev); if (err) goto out_free_irq; err = request_irq(priv->irq_err, flexcan_irq, IRQF_SHARED, dev->name, dev); if (err) goto out_free_irq_boff; } flexcan_chip_interrupts_enable(dev); netif_start_queue(dev); return 0; out_free_irq_boff: free_irq(priv->irq_boff, dev); out_free_irq: free_irq(dev->irq, dev); out_can_rx_offload_disable: can_rx_offload_disable(&priv->offload); flexcan_chip_stop(dev); out_can_rx_offload_del: can_rx_offload_del(&priv->offload); out_transceiver_disable: flexcan_transceiver_disable(priv); out_close: close_candev(dev); out_runtime_put: pm_runtime_put(priv->dev); return err; } static int flexcan_close(struct net_device *dev) { struct flexcan_priv *priv = netdev_priv(dev); netif_stop_queue(dev); flexcan_chip_interrupts_disable(dev); if (priv->devtype_data.quirks & FLEXCAN_QUIRK_NR_IRQ_3) { free_irq(priv->irq_err, dev); free_irq(priv->irq_boff, dev); } free_irq(dev->irq, dev); can_rx_offload_disable(&priv->offload); flexcan_chip_stop_disable_on_error(dev); can_rx_offload_del(&priv->offload); flexcan_transceiver_disable(priv); close_candev(dev); pm_runtime_put(priv->dev); return 0; } static int flexcan_set_mode(struct net_device *dev, enum can_mode mode) { int err; switch (mode) { case CAN_MODE_START: err = flexcan_chip_start(dev); if (err) return err; flexcan_chip_interrupts_enable(dev); netif_wake_queue(dev); break; default: return -EOPNOTSUPP; } return 0; } static const struct net_device_ops flexcan_netdev_ops = { .ndo_open = flexcan_open, .ndo_stop = flexcan_close, .ndo_start_xmit = flexcan_start_xmit, .ndo_change_mtu = can_change_mtu, }; static int register_flexcandev(struct net_device *dev) { struct flexcan_priv *priv = netdev_priv(dev); struct flexcan_regs __iomem *regs = priv->regs; u32 reg, err; err = flexcan_clks_enable(priv); if (err) return err; /* select "bus clock", chip must be disabled */ err = flexcan_chip_disable(priv); if (err) goto out_clks_disable; reg = priv->read(&regs->ctrl); if (priv->clk_src) reg |= FLEXCAN_CTRL_CLK_SRC; else reg &= ~FLEXCAN_CTRL_CLK_SRC; priv->write(reg, &regs->ctrl); err = flexcan_chip_enable(priv); if (err) goto out_chip_disable; /* set freeze, halt */ err = flexcan_chip_freeze(priv); if (err) goto out_chip_disable; /* activate FIFO, restrict register access */ reg = priv->read(&regs->mcr); reg |= FLEXCAN_MCR_FEN | FLEXCAN_MCR_SUPV; priv->write(reg, &regs->mcr); /* Currently we only support newer versions of this core * featuring a RX hardware FIFO (although this driver doesn't * make use of it on some cores). Older cores, found on some * Coldfire derivates are not tested. */ reg = priv->read(&regs->mcr); if (!(reg & FLEXCAN_MCR_FEN)) { netdev_err(dev, "Could not enable RX FIFO, unsupported core\n"); err = -ENODEV; goto out_chip_disable; } err = register_candev(dev); if (err) goto out_chip_disable; /* Disable core and let pm_runtime_put() disable the clocks. * If CONFIG_PM is not enabled, the clocks will stay powered. */ flexcan_chip_disable(priv); pm_runtime_put(priv->dev); return 0; out_chip_disable: flexcan_chip_disable(priv); out_clks_disable: flexcan_clks_disable(priv); return err; } static void unregister_flexcandev(struct net_device *dev) { unregister_candev(dev); } static int flexcan_setup_stop_mode_gpr(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct device_node *np = pdev->dev.of_node; struct device_node *gpr_np; struct flexcan_priv *priv; phandle phandle; u32 out_val[3]; int ret; if (!np) return -EINVAL; /* stop mode property format is: * <&gpr req_gpr req_bit>. */ ret = of_property_read_u32_array(np, "fsl,stop-mode", out_val, ARRAY_SIZE(out_val)); if (ret) { dev_dbg(&pdev->dev, "no stop-mode property\n"); return ret; } phandle = *out_val; gpr_np = of_find_node_by_phandle(phandle); if (!gpr_np) { dev_dbg(&pdev->dev, "could not find gpr node by phandle\n"); return -ENODEV; } priv = netdev_priv(dev); priv->stm.gpr = syscon_node_to_regmap(gpr_np); if (IS_ERR(priv->stm.gpr)) { dev_dbg(&pdev->dev, "could not find gpr regmap\n"); ret = PTR_ERR(priv->stm.gpr); goto out_put_node; } priv->stm.req_gpr = out_val[1]; priv->stm.req_bit = out_val[2]; dev_dbg(&pdev->dev, "gpr %s req_gpr=0x02%x req_bit=%u\n", gpr_np->full_name, priv->stm.req_gpr, priv->stm.req_bit); return 0; out_put_node: of_node_put(gpr_np); return ret; } static int flexcan_setup_stop_mode_scfw(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct flexcan_priv *priv; u8 scu_idx; int ret; ret = of_property_read_u8(pdev->dev.of_node, "fsl,scu-index", &scu_idx); if (ret < 0) { dev_dbg(&pdev->dev, "failed to get scu index\n"); return ret; } priv = netdev_priv(dev); priv->scu_idx = scu_idx; /* this function could be deferred probe, return -EPROBE_DEFER */ return imx_scu_get_handle(&priv->sc_ipc_handle); } /* flexcan_setup_stop_mode - Setup stop mode for wakeup * * Return: = 0 setup stop mode successfully or doesn't support this feature * < 0 fail to setup stop mode (could be deferred probe) */ static int flexcan_setup_stop_mode(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct flexcan_priv *priv; int ret; priv = netdev_priv(dev); if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_SCFW) ret = flexcan_setup_stop_mode_scfw(pdev); else if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SETUP_STOP_MODE_GPR) ret = flexcan_setup_stop_mode_gpr(pdev); else if (priv->devtype_data.quirks & FLEXCAN_QUIRK_AUTO_STOP_MODE) ret = 0; else /* return 0 directly if doesn't support stop mode feature */ return 0; if (ret) return ret; device_set_wakeup_capable(&pdev->dev, true); if (of_property_read_bool(pdev->dev.of_node, "wakeup-source")) device_set_wakeup_enable(&pdev->dev, true); return 0; } static const struct of_device_id flexcan_of_match[] = { { .compatible = "fsl,imx8qm-flexcan", .data = &fsl_imx8qm_devtype_data, }, { .compatible = "fsl,imx8mp-flexcan", .data = &fsl_imx8mp_devtype_data, }, { .compatible = "fsl,imx93-flexcan", .data = &fsl_imx93_devtype_data, }, { .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, }, { .compatible = "fsl,imx28-flexcan", .data = &fsl_imx28_devtype_data, }, { .compatible = "fsl,imx53-flexcan", .data = &fsl_imx25_devtype_data, }, { .compatible = "fsl,imx35-flexcan", .data = &fsl_imx25_devtype_data, }, { .compatible = "fsl,imx25-flexcan", .data = &fsl_imx25_devtype_data, }, { .compatible = "fsl,p1010-flexcan", .data = &fsl_p1010_devtype_data, }, { .compatible = "fsl,vf610-flexcan", .data = &fsl_vf610_devtype_data, }, { .compatible = "fsl,ls1021ar2-flexcan", .data = &fsl_ls1021a_r2_devtype_data, }, { .compatible = "fsl,lx2160ar1-flexcan", .data = &fsl_lx2160a_r1_devtype_data, }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, flexcan_of_match); static const struct platform_device_id flexcan_id_table[] = { { .name = "flexcan-mcf5441x", .driver_data = (kernel_ulong_t)&fsl_mcf5441x_devtype_data, }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(platform, flexcan_id_table); static int flexcan_probe(struct platform_device *pdev) { const struct of_device_id *of_id; const struct flexcan_devtype_data *devtype_data; struct net_device *dev; struct flexcan_priv *priv; struct regulator *reg_xceiver; struct clk *clk_ipg = NULL, *clk_per = NULL; struct flexcan_regs __iomem *regs; struct flexcan_platform_data *pdata; int err, irq; u8 clk_src = 1; u32 clock_freq = 0; reg_xceiver = devm_regulator_get_optional(&pdev->dev, "xceiver"); if (PTR_ERR(reg_xceiver) == -EPROBE_DEFER) return -EPROBE_DEFER; else if (PTR_ERR(reg_xceiver) == -ENODEV) reg_xceiver = NULL; else if (IS_ERR(reg_xceiver)) return PTR_ERR(reg_xceiver); if (pdev->dev.of_node) { of_property_read_u32(pdev->dev.of_node, "clock-frequency", &clock_freq); of_property_read_u8(pdev->dev.of_node, "fsl,clk-source", &clk_src); } else { pdata = dev_get_platdata(&pdev->dev); if (pdata) { clock_freq = pdata->clock_frequency; clk_src = pdata->clk_src; } } if (!clock_freq) { clk_ipg = devm_clk_get(&pdev->dev, "ipg"); if (IS_ERR(clk_ipg)) { dev_err(&pdev->dev, "no ipg clock defined\n"); return PTR_ERR(clk_ipg); } clk_per = devm_clk_get(&pdev->dev, "per"); if (IS_ERR(clk_per)) { dev_err(&pdev->dev, "no per clock defined\n"); return PTR_ERR(clk_per); } clock_freq = clk_get_rate(clk_per); } irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(regs)) return PTR_ERR(regs); of_id = of_match_device(flexcan_of_match, &pdev->dev); if (of_id) devtype_data = of_id->data; else if (platform_get_device_id(pdev)->driver_data) devtype_data = (struct flexcan_devtype_data *) platform_get_device_id(pdev)->driver_data; else return -ENODEV; if ((devtype_data->quirks & FLEXCAN_QUIRK_SUPPORT_FD) && !((devtype_data->quirks & (FLEXCAN_QUIRK_USE_RX_MAILBOX | FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX | FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR | FLEXCAN_QUIRK_SUPPORT_RX_FIFO)) == (FLEXCAN_QUIRK_USE_RX_MAILBOX | FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX | FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR))) { dev_err(&pdev->dev, "CAN-FD mode doesn't work in RX-FIFO mode!\n"); return -EINVAL; } if ((devtype_data->quirks & (FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX | FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR)) == FLEXCAN_QUIRK_SUPPORT_RX_MAILBOX_RTR) { dev_err(&pdev->dev, "Quirks (0x%08x) inconsistent: RX_MAILBOX_RX supported but not RX_MAILBOX\n", devtype_data->quirks); return -EINVAL; } dev = alloc_candev(sizeof(struct flexcan_priv), 1); if (!dev) return -ENOMEM; platform_set_drvdata(pdev, dev); SET_NETDEV_DEV(dev, &pdev->dev); dev->netdev_ops = &flexcan_netdev_ops; dev->ethtool_ops = &flexcan_ethtool_ops; dev->irq = irq; dev->flags |= IFF_ECHO; priv = netdev_priv(dev); priv->devtype_data = *devtype_data; if (of_property_read_bool(pdev->dev.of_node, "big-endian") || priv->devtype_data.quirks & FLEXCAN_QUIRK_DEFAULT_BIG_ENDIAN) { priv->read = flexcan_read_be; priv->write = flexcan_write_be; } else { priv->read = flexcan_read_le; priv->write = flexcan_write_le; } priv->dev = &pdev->dev; priv->can.clock.freq = clock_freq; priv->can.do_set_mode = flexcan_set_mode; priv->can.do_get_berr_counter = flexcan_get_berr_counter; priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_BERR_REPORTING; priv->regs = regs; priv->clk_ipg = clk_ipg; priv->clk_per = clk_per; priv->clk_src = clk_src; priv->reg_xceiver = reg_xceiver; if (priv->devtype_data.quirks & FLEXCAN_QUIRK_NR_IRQ_3) { priv->irq_boff = platform_get_irq(pdev, 1); if (priv->irq_boff < 0) { err = priv->irq_boff; goto failed_platform_get_irq; } priv->irq_err = platform_get_irq(pdev, 2); if (priv->irq_err < 0) { err = priv->irq_err; goto failed_platform_get_irq; } } if (priv->devtype_data.quirks & FLEXCAN_QUIRK_SUPPORT_FD) { priv->can.ctrlmode_supported |= CAN_CTRLMODE_FD | CAN_CTRLMODE_FD_NON_ISO; priv->can.bittiming_const = &flexcan_fd_bittiming_const; priv->can.data_bittiming_const = &flexcan_fd_data_bittiming_const; } else { priv->can.bittiming_const = &flexcan_bittiming_const; } pm_runtime_get_noresume(&pdev->dev); pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); err = register_flexcandev(dev); if (err) { dev_err(&pdev->dev, "registering netdev failed\n"); goto failed_register; } err = flexcan_setup_stop_mode(pdev); if (err < 0) { dev_err_probe(&pdev->dev, err, "setup stop mode failed\n"); goto failed_setup_stop_mode; } of_can_transceiver(dev); return 0; failed_setup_stop_mode: unregister_flexcandev(dev); failed_register: pm_runtime_put_noidle(&pdev->dev); pm_runtime_disable(&pdev->dev); failed_platform_get_irq: free_candev(dev); return err; } static void flexcan_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); device_set_wakeup_enable(&pdev->dev, false); device_set_wakeup_capable(&pdev->dev, false); unregister_flexcandev(dev); pm_runtime_disable(&pdev->dev); free_candev(dev); } static int __maybe_unused flexcan_suspend(struct device *device) { struct net_device *dev = dev_get_drvdata(device); struct flexcan_priv *priv = netdev_priv(dev); int err; if (netif_running(dev)) { /* if wakeup is enabled, enter stop mode * else enter disabled mode. */ if (device_may_wakeup(device)) { enable_irq_wake(dev->irq); err = flexcan_enter_stop_mode(priv); if (err) return err; } else { err = flexcan_chip_stop(dev); if (err) return err; flexcan_chip_interrupts_disable(dev); err = pinctrl_pm_select_sleep_state(device); if (err) return err; } netif_stop_queue(dev); netif_device_detach(dev); } priv->can.state = CAN_STATE_SLEEPING; return 0; } static int __maybe_unused flexcan_resume(struct device *device) { struct net_device *dev = dev_get_drvdata(device); struct flexcan_priv *priv = netdev_priv(dev); int err; priv->can.state = CAN_STATE_ERROR_ACTIVE; if (netif_running(dev)) { netif_device_attach(dev); netif_start_queue(dev); if (device_may_wakeup(device)) { disable_irq_wake(dev->irq); err = flexcan_exit_stop_mode(priv); if (err) return err; } else { err = pinctrl_pm_select_default_state(device); if (err) return err; err = flexcan_chip_start(dev); if (err) return err; flexcan_chip_interrupts_enable(dev); } } return 0; } static int __maybe_unused flexcan_runtime_suspend(struct device *device) { struct net_device *dev = dev_get_drvdata(device); struct flexcan_priv *priv = netdev_priv(dev); flexcan_clks_disable(priv); return 0; } static int __maybe_unused flexcan_runtime_resume(struct device *device) { struct net_device *dev = dev_get_drvdata(device); struct flexcan_priv *priv = netdev_priv(dev); return flexcan_clks_enable(priv); } static int __maybe_unused flexcan_noirq_suspend(struct device *device) { struct net_device *dev = dev_get_drvdata(device); struct flexcan_priv *priv = netdev_priv(dev); if (netif_running(dev)) { int err; if (device_may_wakeup(device)) { flexcan_enable_wakeup_irq(priv, true); /* For auto stop mode, need to keep the clock on before * system go into low power mode. After system go into * low power mode, hardware will config the flexcan into * stop mode, and gate off the clock automatically. */ if (priv->devtype_data.quirks & FLEXCAN_QUIRK_AUTO_STOP_MODE) return 0; } err = pm_runtime_force_suspend(device); if (err) return err; } return 0; } static int __maybe_unused flexcan_noirq_resume(struct device *device) { struct net_device *dev = dev_get_drvdata(device); struct flexcan_priv *priv = netdev_priv(dev); if (netif_running(dev)) { int err; /* For the wakeup in auto stop mode, no need to gate on the * clock here, hardware will do this automatically. */ if (!(device_may_wakeup(device) && priv->devtype_data.quirks & FLEXCAN_QUIRK_AUTO_STOP_MODE)) { err = pm_runtime_force_resume(device); if (err) return err; } if (device_may_wakeup(device)) flexcan_enable_wakeup_irq(priv, false); } return 0; } static const struct dev_pm_ops flexcan_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(flexcan_suspend, flexcan_resume) SET_RUNTIME_PM_OPS(flexcan_runtime_suspend, flexcan_runtime_resume, NULL) SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(flexcan_noirq_suspend, flexcan_noirq_resume) }; static struct platform_driver flexcan_driver = { .driver = { .name = DRV_NAME, .pm = &flexcan_pm_ops, .of_match_table = flexcan_of_match, }, .probe = flexcan_probe, .remove_new = flexcan_remove, .id_table = flexcan_id_table, }; module_platform_driver(flexcan_driver); MODULE_AUTHOR("Sascha Hauer <[email protected]>, " "Marc Kleine-Budde <[email protected]>"); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("CAN port driver for flexcan based chip");
linux-master
drivers/net/can/flexcan/flexcan-core.c
// SPDX-License-Identifier: GPL-2.0+ /* Copyright (c) 2022 Amarula Solutions, Dario Binacchi <[email protected]> * Copyright (c) 2022 Pengutronix, Marc Kleine-Budde <[email protected]> * */ #include <linux/can/dev.h> #include <linux/ethtool.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/platform_device.h> #include "flexcan.h" static const char flexcan_priv_flags_strings[][ETH_GSTRING_LEN] = { #define FLEXCAN_PRIV_FLAGS_RX_RTR BIT(0) "rx-rtr", }; static void flexcan_get_ringparam(struct net_device *ndev, struct ethtool_ringparam *ring, struct kernel_ethtool_ringparam *kernel_ring, struct netlink_ext_ack *ext_ack) { const struct flexcan_priv *priv = netdev_priv(ndev); ring->rx_max_pending = priv->mb_count; ring->tx_max_pending = priv->mb_count; if (priv->devtype_data.quirks & FLEXCAN_QUIRK_USE_RX_MAILBOX) ring->rx_pending = priv->offload.mb_last - priv->offload.mb_first + 1; else ring->rx_pending = 6; /* RX-FIFO depth is fixed */ /* the drive currently supports only on TX buffer */ ring->tx_pending = 1; } static void flexcan_get_strings(struct net_device *ndev, u32 stringset, u8 *data) { switch (stringset) { case ETH_SS_PRIV_FLAGS: memcpy(data, flexcan_priv_flags_strings, sizeof(flexcan_priv_flags_strings)); } } static u32 flexcan_get_priv_flags(struct net_device *ndev) { const struct flexcan_priv *priv = netdev_priv(ndev); u32 priv_flags = 0; if (flexcan_active_rx_rtr(priv)) priv_flags |= FLEXCAN_PRIV_FLAGS_RX_RTR; return priv_flags; } static int flexcan_set_priv_flags(struct net_device *ndev, u32 priv_flags) { struct flexcan_priv *priv = netdev_priv(ndev); u32 quirks = priv->devtype_data.quirks; if (priv_flags & FLEXCAN_PRIV_FLAGS_RX_RTR) { if (flexcan_supports_rx_mailbox_rtr(priv)) quirks |= FLEXCAN_QUIRK_USE_RX_MAILBOX; else if (flexcan_supports_rx_fifo(priv)) quirks &= ~FLEXCAN_QUIRK_USE_RX_MAILBOX; else quirks |= FLEXCAN_QUIRK_USE_RX_MAILBOX; } else { if (flexcan_supports_rx_mailbox(priv)) quirks |= FLEXCAN_QUIRK_USE_RX_MAILBOX; else quirks &= ~FLEXCAN_QUIRK_USE_RX_MAILBOX; } if (quirks != priv->devtype_data.quirks && netif_running(ndev)) return -EBUSY; priv->devtype_data.quirks = quirks; if (!(priv_flags & FLEXCAN_PRIV_FLAGS_RX_RTR) && !flexcan_active_rx_rtr(priv)) netdev_info(ndev, "Activating RX mailbox mode, cannot receive RTR frames.\n"); return 0; } static int flexcan_get_sset_count(struct net_device *netdev, int sset) { switch (sset) { case ETH_SS_PRIV_FLAGS: return ARRAY_SIZE(flexcan_priv_flags_strings); default: return -EOPNOTSUPP; } } const struct ethtool_ops flexcan_ethtool_ops = { .get_ringparam = flexcan_get_ringparam, .get_strings = flexcan_get_strings, .get_priv_flags = flexcan_get_priv_flags, .set_priv_flags = flexcan_set_priv_flags, .get_sset_count = flexcan_get_sset_count, .get_ts_info = ethtool_op_get_ts_info, };
linux-master
drivers/net/can/flexcan/flexcan-ethtool.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * tscan1.c: driver for Technologic Systems TS-CAN1 PC104 boards * * Copyright 2010 Andre B. Oliveira */ /* References: * - Getting started with TS-CAN1, Technologic Systems, Feb 2022 * https://docs.embeddedts.com/TS-CAN1 */ #include <linux/init.h> #include <linux/io.h> #include <linux/ioport.h> #include <linux/isa.h> #include <linux/module.h> #include <linux/netdevice.h> #include "sja1000.h" MODULE_DESCRIPTION("Driver for Technologic Systems TS-CAN1 PC104 boards"); MODULE_AUTHOR("Andre B. Oliveira <[email protected]>"); MODULE_LICENSE("GPL"); /* Maximum number of boards (one in each JP1:JP2 setting of IO address) */ #define TSCAN1_MAXDEV 4 /* PLD registers address offsets */ #define TSCAN1_ID1 0 #define TSCAN1_ID2 1 #define TSCAN1_VERSION 2 #define TSCAN1_LED 3 #define TSCAN1_PAGE 4 #define TSCAN1_MODE 5 #define TSCAN1_JUMPERS 6 /* PLD board identifier registers magic values */ #define TSCAN1_ID1_VALUE 0xf6 #define TSCAN1_ID2_VALUE 0xb9 /* PLD mode register SJA1000 IO enable bit */ #define TSCAN1_MODE_ENABLE 0x40 /* PLD jumpers register bits */ #define TSCAN1_JP4 0x10 #define TSCAN1_JP5 0x20 /* PLD IO base addresses start */ #define TSCAN1_PLD_ADDRESS 0x150 /* PLD register space size */ #define TSCAN1_PLD_SIZE 8 /* SJA1000 register space size */ #define TSCAN1_SJA1000_SIZE 32 /* SJA1000 crystal frequency (16MHz) */ #define TSCAN1_SJA1000_XTAL 16000000 /* SJA1000 IO base addresses */ static const unsigned short tscan1_sja1000_addresses[] = { 0x100, 0x120, 0x180, 0x1a0, 0x200, 0x240, 0x280, 0x320 }; /* Read SJA1000 register */ static u8 tscan1_read(const struct sja1000_priv *priv, int reg) { return inb((unsigned long)priv->reg_base + reg); } /* Write SJA1000 register */ static void tscan1_write(const struct sja1000_priv *priv, int reg, u8 val) { outb(val, (unsigned long)priv->reg_base + reg); } /* Probe for a TS-CAN1 board with JP2:JP1 jumper setting ID */ static int tscan1_probe(struct device *dev, unsigned id) { struct net_device *netdev; struct sja1000_priv *priv; unsigned long pld_base, sja1000_base; int irq, i; pld_base = TSCAN1_PLD_ADDRESS + id * TSCAN1_PLD_SIZE; if (!request_region(pld_base, TSCAN1_PLD_SIZE, dev_name(dev))) return -EBUSY; if (inb(pld_base + TSCAN1_ID1) != TSCAN1_ID1_VALUE || inb(pld_base + TSCAN1_ID2) != TSCAN1_ID2_VALUE) { release_region(pld_base, TSCAN1_PLD_SIZE); return -ENODEV; } switch (inb(pld_base + TSCAN1_JUMPERS) & (TSCAN1_JP4 | TSCAN1_JP5)) { case TSCAN1_JP4: irq = 6; break; case TSCAN1_JP5: irq = 7; break; case TSCAN1_JP4 | TSCAN1_JP5: irq = 5; break; default: dev_err(dev, "invalid JP4:JP5 setting (no IRQ)\n"); release_region(pld_base, TSCAN1_PLD_SIZE); return -EINVAL; } netdev = alloc_sja1000dev(0); if (!netdev) { release_region(pld_base, TSCAN1_PLD_SIZE); return -ENOMEM; } dev_set_drvdata(dev, netdev); SET_NETDEV_DEV(netdev, dev); netdev->base_addr = pld_base; netdev->irq = irq; priv = netdev_priv(netdev); priv->read_reg = tscan1_read; priv->write_reg = tscan1_write; priv->can.clock.freq = TSCAN1_SJA1000_XTAL / 2; priv->cdr = CDR_CBP | CDR_CLK_OFF; priv->ocr = OCR_TX0_PUSHPULL; /* Select the first SJA1000 IO address that is free and that works */ for (i = 0; i < ARRAY_SIZE(tscan1_sja1000_addresses); i++) { sja1000_base = tscan1_sja1000_addresses[i]; if (!request_region(sja1000_base, TSCAN1_SJA1000_SIZE, dev_name(dev))) continue; /* Set SJA1000 IO base address and enable it */ outb(TSCAN1_MODE_ENABLE | i, pld_base + TSCAN1_MODE); priv->reg_base = (void __iomem *)sja1000_base; if (!register_sja1000dev(netdev)) { /* SJA1000 probe succeeded; turn LED off and return */ outb(0, pld_base + TSCAN1_LED); netdev_info(netdev, "TS-CAN1 at 0x%lx 0x%lx irq %d\n", pld_base, sja1000_base, irq); return 0; } /* SJA1000 probe failed; release and try next address */ outb(0, pld_base + TSCAN1_MODE); release_region(sja1000_base, TSCAN1_SJA1000_SIZE); } dev_err(dev, "failed to assign SJA1000 IO address\n"); dev_set_drvdata(dev, NULL); free_sja1000dev(netdev); release_region(pld_base, TSCAN1_PLD_SIZE); return -ENXIO; } static void tscan1_remove(struct device *dev, unsigned id /*unused*/) { struct net_device *netdev; struct sja1000_priv *priv; unsigned long pld_base, sja1000_base; netdev = dev_get_drvdata(dev); unregister_sja1000dev(netdev); dev_set_drvdata(dev, NULL); priv = netdev_priv(netdev); pld_base = netdev->base_addr; sja1000_base = (unsigned long)priv->reg_base; outb(0, pld_base + TSCAN1_MODE); /* disable SJA1000 IO space */ release_region(sja1000_base, TSCAN1_SJA1000_SIZE); release_region(pld_base, TSCAN1_PLD_SIZE); free_sja1000dev(netdev); } static struct isa_driver tscan1_isa_driver = { .probe = tscan1_probe, .remove = tscan1_remove, .driver = { .name = "tscan1", }, }; module_isa_driver(tscan1_isa_driver, TSCAN1_MAXDEV);
linux-master
drivers/net/can/sja1000/tscan1.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2008 Sebastian Haas (initial chardev implementation) * Copyright (C) 2010 Markus Plessing <[email protected]> * Rework for mainline by Oliver Hartkopp <[email protected]> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/netdevice.h> #include <linux/delay.h> #include <linux/io.h> #include <pcmcia/cistpl.h> #include <pcmcia/ds.h> #include <linux/can.h> #include <linux/can/dev.h> #include "sja1000.h" #define DRV_NAME "ems_pcmcia" MODULE_AUTHOR("Markus Plessing <[email protected]>"); MODULE_DESCRIPTION("Socket-CAN driver for EMS CPC-CARD cards"); MODULE_LICENSE("GPL v2"); #define EMS_PCMCIA_MAX_CHAN 2 struct ems_pcmcia_card { int channels; struct pcmcia_device *pcmcia_dev; struct net_device *net_dev[EMS_PCMCIA_MAX_CHAN]; void __iomem *base_addr; }; #define EMS_PCMCIA_CAN_CLOCK (16000000 / 2) /* * The board configuration is probably following: * RX1 is connected to ground. * TX1 is not connected. * CLKO is not connected. * Setting the OCR register to 0xDA is a good idea. * This means normal output mode , push-pull and the correct polarity. */ #define EMS_PCMCIA_OCR (OCR_TX0_PUSHPULL | OCR_TX1_PUSHPULL) /* * In the CDR register, you should set CBP to 1. * You will probably also want to set the clock divider value to 7 * (meaning direct oscillator output) because the second SJA1000 chip * is driven by the first one CLKOUT output. */ #define EMS_PCMCIA_CDR (CDR_CBP | CDR_CLKOUT_MASK) #define EMS_PCMCIA_MEM_SIZE 4096 /* Size of the remapped io-memory */ #define EMS_PCMCIA_CAN_BASE_OFFSET 0x100 /* Offset where controllers starts */ #define EMS_PCMCIA_CAN_CTRL_SIZE 0x80 /* Memory size for each controller */ #define EMS_CMD_RESET 0x00 /* Perform a reset of the card */ #define EMS_CMD_MAP 0x03 /* Map CAN controllers into card' memory */ #define EMS_CMD_UMAP 0x02 /* Unmap CAN controllers from card' memory */ static struct pcmcia_device_id ems_pcmcia_tbl[] = { PCMCIA_DEVICE_PROD_ID123("EMS_T_W", "CPC-Card", "V2.0", 0xeab1ea23, 0xa338573f, 0xe4575800), PCMCIA_DEVICE_NULL, }; MODULE_DEVICE_TABLE(pcmcia, ems_pcmcia_tbl); static u8 ems_pcmcia_read_reg(const struct sja1000_priv *priv, int port) { return readb(priv->reg_base + port); } static void ems_pcmcia_write_reg(const struct sja1000_priv *priv, int port, u8 val) { writeb(val, priv->reg_base + port); } static irqreturn_t ems_pcmcia_interrupt(int irq, void *dev_id) { struct ems_pcmcia_card *card = dev_id; struct net_device *dev; irqreturn_t retval = IRQ_NONE; int i, again; /* Card not present */ if (readw(card->base_addr) != 0xAA55) return IRQ_HANDLED; do { again = 0; /* Check interrupt for each channel */ for (i = 0; i < card->channels; i++) { dev = card->net_dev[i]; if (!dev) continue; if (sja1000_interrupt(irq, dev) == IRQ_HANDLED) again = 1; } /* At least one channel handled the interrupt */ if (again) retval = IRQ_HANDLED; } while (again); return retval; } /* * Check if a CAN controller is present at the specified location * by trying to set 'em into the PeliCAN mode */ static inline int ems_pcmcia_check_chan(struct sja1000_priv *priv) { /* Make sure SJA1000 is in reset mode */ ems_pcmcia_write_reg(priv, SJA1000_MOD, 1); ems_pcmcia_write_reg(priv, SJA1000_CDR, CDR_PELICAN); /* read reset-values */ if (ems_pcmcia_read_reg(priv, SJA1000_CDR) == CDR_PELICAN) return 1; return 0; } static void ems_pcmcia_del_card(struct pcmcia_device *pdev) { struct ems_pcmcia_card *card = pdev->priv; struct net_device *dev; int i; free_irq(pdev->irq, card); for (i = 0; i < card->channels; i++) { dev = card->net_dev[i]; if (!dev) continue; printk(KERN_INFO "%s: removing %s on channel #%d\n", DRV_NAME, dev->name, i); unregister_sja1000dev(dev); free_sja1000dev(dev); } writeb(EMS_CMD_UMAP, card->base_addr); iounmap(card->base_addr); kfree(card); pdev->priv = NULL; } /* * Probe PCI device for EMS CAN signature and register each available * CAN channel to SJA1000 Socket-CAN subsystem. */ static int ems_pcmcia_add_card(struct pcmcia_device *pdev, unsigned long base) { struct sja1000_priv *priv; struct net_device *dev; struct ems_pcmcia_card *card; int err, i; /* Allocating card structures to hold addresses, ... */ card = kzalloc(sizeof(struct ems_pcmcia_card), GFP_KERNEL); if (!card) return -ENOMEM; pdev->priv = card; card->channels = 0; card->base_addr = ioremap(base, EMS_PCMCIA_MEM_SIZE); if (!card->base_addr) { err = -ENOMEM; goto failure_cleanup; } /* Check for unique EMS CAN signature */ if (readw(card->base_addr) != 0xAA55) { err = -ENODEV; goto failure_cleanup; } /* Request board reset */ writeb(EMS_CMD_RESET, card->base_addr); /* Make sure CAN controllers are mapped into card's memory space */ writeb(EMS_CMD_MAP, card->base_addr); /* Detect available channels */ for (i = 0; i < EMS_PCMCIA_MAX_CHAN; i++) { dev = alloc_sja1000dev(0); if (!dev) { err = -ENOMEM; goto failure_cleanup; } card->net_dev[i] = dev; priv = netdev_priv(dev); priv->priv = card; SET_NETDEV_DEV(dev, &pdev->dev); dev->dev_id = i; priv->irq_flags = IRQF_SHARED; dev->irq = pdev->irq; priv->reg_base = card->base_addr + EMS_PCMCIA_CAN_BASE_OFFSET + (i * EMS_PCMCIA_CAN_CTRL_SIZE); /* Check if channel is present */ if (ems_pcmcia_check_chan(priv)) { priv->read_reg = ems_pcmcia_read_reg; priv->write_reg = ems_pcmcia_write_reg; priv->can.clock.freq = EMS_PCMCIA_CAN_CLOCK; priv->ocr = EMS_PCMCIA_OCR; priv->cdr = EMS_PCMCIA_CDR; priv->flags |= SJA1000_CUSTOM_IRQ_HANDLER; /* Register SJA1000 device */ err = register_sja1000dev(dev); if (err) { free_sja1000dev(dev); goto failure_cleanup; } card->channels++; printk(KERN_INFO "%s: registered %s on channel " "#%d at 0x%p, irq %d\n", DRV_NAME, dev->name, i, priv->reg_base, dev->irq); } else free_sja1000dev(dev); } if (!card->channels) { err = -ENODEV; goto failure_cleanup; } err = request_irq(pdev->irq, &ems_pcmcia_interrupt, IRQF_SHARED, DRV_NAME, card); if (!err) return 0; failure_cleanup: ems_pcmcia_del_card(pdev); return err; } /* * Setup PCMCIA socket and probe for EMS CPC-CARD */ static int ems_pcmcia_probe(struct pcmcia_device *dev) { int csval; /* General socket configuration */ dev->config_flags |= CONF_ENABLE_IRQ; dev->config_index = 1; dev->config_regs = PRESENT_OPTION; /* The io structure describes IO port mapping */ dev->resource[0]->end = 16; dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; dev->resource[1]->end = 16; dev->resource[1]->flags |= IO_DATA_PATH_WIDTH_16; dev->io_lines = 5; /* Allocate a memory window */ dev->resource[2]->flags = (WIN_DATA_WIDTH_8 | WIN_MEMORY_TYPE_CM | WIN_ENABLE); dev->resource[2]->start = dev->resource[2]->end = 0; csval = pcmcia_request_window(dev, dev->resource[2], 0); if (csval) { dev_err(&dev->dev, "pcmcia_request_window failed (err=%d)\n", csval); return 0; } csval = pcmcia_map_mem_page(dev, dev->resource[2], dev->config_base); if (csval) { dev_err(&dev->dev, "pcmcia_map_mem_page failed (err=%d)\n", csval); return 0; } csval = pcmcia_enable_device(dev); if (csval) { dev_err(&dev->dev, "pcmcia_enable_device failed (err=%d)\n", csval); return 0; } ems_pcmcia_add_card(dev, dev->resource[2]->start); return 0; } /* * Release claimed resources */ static void ems_pcmcia_remove(struct pcmcia_device *dev) { ems_pcmcia_del_card(dev); pcmcia_disable_device(dev); } static struct pcmcia_driver ems_pcmcia_driver = { .name = DRV_NAME, .probe = ems_pcmcia_probe, .remove = ems_pcmcia_remove, .id_table = ems_pcmcia_tbl, }; module_pcmcia_driver(ems_pcmcia_driver);
linux-master
drivers/net/can/sja1000/ems_pcmcia.c
/* * sja1000.c - Philips SJA1000 network device driver * * Copyright (c) 2003 Matthias Brukner, Trajet Gmbh, Rebenring 33, * 38106 Braunschweig, GERMANY * * Copyright (c) 2002-2007 Volkswagen Group Electronic Research * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of Volkswagen nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * Alternatively, provided that this notice is retained in full, this * software may be distributed under the terms of the GNU General * Public License ("GPL") version 2, in which case the provisions of the * GPL apply INSTEAD OF those given above. * * The provided data structures and external interfaces from this code * are not restricted to be used by modules with a GPL compatible license. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. * */ #include <linux/module.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/interrupt.h> #include <linux/ptrace.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/ethtool.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/if_ether.h> #include <linux/skbuff.h> #include <linux/delay.h> #include <linux/can/dev.h> #include <linux/can/error.h> #include "sja1000.h" #define DRV_NAME "sja1000" MODULE_AUTHOR("Oliver Hartkopp <[email protected]>"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_DESCRIPTION(DRV_NAME "CAN netdevice driver"); static const struct can_bittiming_const sja1000_bittiming_const = { .name = DRV_NAME, .tseg1_min = 1, .tseg1_max = 16, .tseg2_min = 1, .tseg2_max = 8, .sjw_max = 4, .brp_min = 1, .brp_max = 64, .brp_inc = 1, }; static void sja1000_write_cmdreg(struct sja1000_priv *priv, u8 val) { unsigned long flags; /* * The command register needs some locking and time to settle * the write_reg() operation - especially on SMP systems. */ spin_lock_irqsave(&priv->cmdreg_lock, flags); priv->write_reg(priv, SJA1000_CMR, val); priv->read_reg(priv, SJA1000_SR); spin_unlock_irqrestore(&priv->cmdreg_lock, flags); } static int sja1000_is_absent(struct sja1000_priv *priv) { return (priv->read_reg(priv, SJA1000_MOD) == 0xFF); } static int sja1000_probe_chip(struct net_device *dev) { struct sja1000_priv *priv = netdev_priv(dev); if (priv->reg_base && sja1000_is_absent(priv)) { netdev_err(dev, "probing failed\n"); return 0; } return -1; } static void set_reset_mode(struct net_device *dev) { struct sja1000_priv *priv = netdev_priv(dev); unsigned char status = priv->read_reg(priv, SJA1000_MOD); int i; /* disable interrupts */ priv->write_reg(priv, SJA1000_IER, IRQ_OFF); for (i = 0; i < 100; i++) { /* check reset bit */ if (status & MOD_RM) { priv->can.state = CAN_STATE_STOPPED; return; } /* reset chip */ priv->write_reg(priv, SJA1000_MOD, MOD_RM); udelay(10); status = priv->read_reg(priv, SJA1000_MOD); } netdev_err(dev, "setting SJA1000 into reset mode failed!\n"); } static void set_normal_mode(struct net_device *dev) { struct sja1000_priv *priv = netdev_priv(dev); unsigned char status = priv->read_reg(priv, SJA1000_MOD); u8 mod_reg_val = 0x00; int i; for (i = 0; i < 100; i++) { /* check reset bit */ if ((status & MOD_RM) == 0) { priv->can.state = CAN_STATE_ERROR_ACTIVE; /* enable interrupts */ if (priv->can.ctrlmode & CAN_CTRLMODE_BERR_REPORTING) priv->write_reg(priv, SJA1000_IER, IRQ_ALL); else priv->write_reg(priv, SJA1000_IER, IRQ_ALL & ~IRQ_BEI); return; } /* set chip to normal mode */ if (priv->can.ctrlmode & CAN_CTRLMODE_LISTENONLY) mod_reg_val |= MOD_LOM; if (priv->can.ctrlmode & CAN_CTRLMODE_PRESUME_ACK) mod_reg_val |= MOD_STM; priv->write_reg(priv, SJA1000_MOD, mod_reg_val); udelay(10); status = priv->read_reg(priv, SJA1000_MOD); } netdev_err(dev, "setting SJA1000 into normal mode failed!\n"); } /* * initialize SJA1000 chip: * - reset chip * - set output mode * - set baudrate * - enable interrupts * - start operating mode */ static void chipset_init(struct net_device *dev) { struct sja1000_priv *priv = netdev_priv(dev); if (!(priv->flags & SJA1000_QUIRK_NO_CDR_REG)) /* set clock divider and output control register */ priv->write_reg(priv, SJA1000_CDR, priv->cdr | CDR_PELICAN); /* set acceptance filter (accept all) */ priv->write_reg(priv, SJA1000_ACCC0, 0x00); priv->write_reg(priv, SJA1000_ACCC1, 0x00); priv->write_reg(priv, SJA1000_ACCC2, 0x00); priv->write_reg(priv, SJA1000_ACCC3, 0x00); priv->write_reg(priv, SJA1000_ACCM0, 0xFF); priv->write_reg(priv, SJA1000_ACCM1, 0xFF); priv->write_reg(priv, SJA1000_ACCM2, 0xFF); priv->write_reg(priv, SJA1000_ACCM3, 0xFF); priv->write_reg(priv, SJA1000_OCR, priv->ocr | OCR_MODE_NORMAL); } static void sja1000_start(struct net_device *dev) { struct sja1000_priv *priv = netdev_priv(dev); /* leave reset mode */ if (priv->can.state != CAN_STATE_STOPPED) set_reset_mode(dev); /* Initialize chip if uninitialized at this stage */ if (!(priv->flags & SJA1000_QUIRK_NO_CDR_REG || priv->read_reg(priv, SJA1000_CDR) & CDR_PELICAN)) chipset_init(dev); /* Clear error counters and error code capture */ priv->write_reg(priv, SJA1000_TXERR, 0x0); priv->write_reg(priv, SJA1000_RXERR, 0x0); priv->read_reg(priv, SJA1000_ECC); /* clear interrupt flags */ priv->read_reg(priv, SJA1000_IR); /* leave reset mode */ set_normal_mode(dev); } static int sja1000_set_mode(struct net_device *dev, enum can_mode mode) { switch (mode) { case CAN_MODE_START: sja1000_start(dev); if (netif_queue_stopped(dev)) netif_wake_queue(dev); break; default: return -EOPNOTSUPP; } return 0; } static int sja1000_set_bittiming(struct net_device *dev) { struct sja1000_priv *priv = netdev_priv(dev); struct can_bittiming *bt = &priv->can.bittiming; u8 btr0, btr1; btr0 = ((bt->brp - 1) & 0x3f) | (((bt->sjw - 1) & 0x3) << 6); btr1 = ((bt->prop_seg + bt->phase_seg1 - 1) & 0xf) | (((bt->phase_seg2 - 1) & 0x7) << 4); if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) btr1 |= 0x80; netdev_info(dev, "setting BTR0=0x%02x BTR1=0x%02x\n", btr0, btr1); priv->write_reg(priv, SJA1000_BTR0, btr0); priv->write_reg(priv, SJA1000_BTR1, btr1); return 0; } static int sja1000_get_berr_counter(const struct net_device *dev, struct can_berr_counter *bec) { struct sja1000_priv *priv = netdev_priv(dev); bec->txerr = priv->read_reg(priv, SJA1000_TXERR); bec->rxerr = priv->read_reg(priv, SJA1000_RXERR); return 0; } /* * transmit a CAN message * message layout in the sk_buff should be like this: * xx xx xx xx ff ll 00 11 22 33 44 55 66 77 * [ can-id ] [flags] [len] [can data (up to 8 bytes] */ static netdev_tx_t sja1000_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct sja1000_priv *priv = netdev_priv(dev); struct can_frame *cf = (struct can_frame *)skb->data; uint8_t fi; canid_t id; uint8_t dreg; u8 cmd_reg_val = 0x00; int i; if (can_dev_dropped_skb(dev, skb)) return NETDEV_TX_OK; netif_stop_queue(dev); fi = can_get_cc_dlc(cf, priv->can.ctrlmode); id = cf->can_id; if (id & CAN_RTR_FLAG) fi |= SJA1000_FI_RTR; if (id & CAN_EFF_FLAG) { fi |= SJA1000_FI_FF; dreg = SJA1000_EFF_BUF; priv->write_reg(priv, SJA1000_FI, fi); priv->write_reg(priv, SJA1000_ID1, (id & 0x1fe00000) >> 21); priv->write_reg(priv, SJA1000_ID2, (id & 0x001fe000) >> 13); priv->write_reg(priv, SJA1000_ID3, (id & 0x00001fe0) >> 5); priv->write_reg(priv, SJA1000_ID4, (id & 0x0000001f) << 3); } else { dreg = SJA1000_SFF_BUF; priv->write_reg(priv, SJA1000_FI, fi); priv->write_reg(priv, SJA1000_ID1, (id & 0x000007f8) >> 3); priv->write_reg(priv, SJA1000_ID2, (id & 0x00000007) << 5); } for (i = 0; i < cf->len; i++) priv->write_reg(priv, dreg++, cf->data[i]); can_put_echo_skb(skb, dev, 0, 0); if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT) cmd_reg_val |= CMD_AT; if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) cmd_reg_val |= CMD_SRR; else cmd_reg_val |= CMD_TR; sja1000_write_cmdreg(priv, cmd_reg_val); return NETDEV_TX_OK; } static void sja1000_rx(struct net_device *dev) { struct sja1000_priv *priv = netdev_priv(dev); struct net_device_stats *stats = &dev->stats; struct can_frame *cf; struct sk_buff *skb; uint8_t fi; uint8_t dreg; canid_t id; int i; /* create zero'ed CAN frame buffer */ skb = alloc_can_skb(dev, &cf); if (skb == NULL) return; fi = priv->read_reg(priv, SJA1000_FI); if (fi & SJA1000_FI_FF) { /* extended frame format (EFF) */ dreg = SJA1000_EFF_BUF; id = (priv->read_reg(priv, SJA1000_ID1) << 21) | (priv->read_reg(priv, SJA1000_ID2) << 13) | (priv->read_reg(priv, SJA1000_ID3) << 5) | (priv->read_reg(priv, SJA1000_ID4) >> 3); id |= CAN_EFF_FLAG; } else { /* standard frame format (SFF) */ dreg = SJA1000_SFF_BUF; id = (priv->read_reg(priv, SJA1000_ID1) << 3) | (priv->read_reg(priv, SJA1000_ID2) >> 5); } can_frame_set_cc_len(cf, fi & 0x0F, priv->can.ctrlmode); if (fi & SJA1000_FI_RTR) { id |= CAN_RTR_FLAG; } else { for (i = 0; i < cf->len; i++) cf->data[i] = priv->read_reg(priv, dreg++); stats->rx_bytes += cf->len; } stats->rx_packets++; cf->can_id = id; /* release receive buffer */ sja1000_write_cmdreg(priv, CMD_RRB); netif_rx(skb); } static irqreturn_t sja1000_reset_interrupt(int irq, void *dev_id) { struct net_device *dev = (struct net_device *)dev_id; netdev_dbg(dev, "performing a soft reset upon overrun\n"); sja1000_start(dev); return IRQ_HANDLED; } static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status) { struct sja1000_priv *priv = netdev_priv(dev); struct net_device_stats *stats = &dev->stats; struct can_frame *cf; struct sk_buff *skb; enum can_state state = priv->can.state; enum can_state rx_state, tx_state; unsigned int rxerr, txerr; uint8_t ecc, alc; int ret = 0; skb = alloc_can_err_skb(dev, &cf); if (skb == NULL) return -ENOMEM; txerr = priv->read_reg(priv, SJA1000_TXERR); rxerr = priv->read_reg(priv, SJA1000_RXERR); if (isrc & IRQ_DOI) { /* data overrun interrupt */ netdev_dbg(dev, "data overrun interrupt\n"); cf->can_id |= CAN_ERR_CRTL; cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW; stats->rx_over_errors++; stats->rx_errors++; sja1000_write_cmdreg(priv, CMD_CDO); /* clear bit */ /* Some controllers needs additional handling upon overrun * condition: the controller may sometimes be totally confused * and refuse any new frame while its buffer is empty. The only * way to re-sync the read vs. write buffer offsets is to * stop any current handling and perform a reset. */ if (priv->flags & SJA1000_QUIRK_RESET_ON_OVERRUN) ret = IRQ_WAKE_THREAD; } if (isrc & IRQ_EI) { /* error warning interrupt */ netdev_dbg(dev, "error warning interrupt\n"); if (status & SR_BS) state = CAN_STATE_BUS_OFF; else if (status & SR_ES) state = CAN_STATE_ERROR_WARNING; else state = CAN_STATE_ERROR_ACTIVE; } if (state != CAN_STATE_BUS_OFF) { cf->can_id |= CAN_ERR_CNT; cf->data[6] = txerr; cf->data[7] = rxerr; } if (isrc & IRQ_BEI) { /* bus error interrupt */ priv->can.can_stats.bus_error++; stats->rx_errors++; ecc = priv->read_reg(priv, SJA1000_ECC); cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR; /* set error type */ switch (ecc & ECC_MASK) { case ECC_BIT: cf->data[2] |= CAN_ERR_PROT_BIT; break; case ECC_FORM: cf->data[2] |= CAN_ERR_PROT_FORM; break; case ECC_STUFF: cf->data[2] |= CAN_ERR_PROT_STUFF; break; default: break; } /* set error location */ cf->data[3] = ecc & ECC_SEG; /* Error occurred during transmission? */ if ((ecc & ECC_DIR) == 0) cf->data[2] |= CAN_ERR_PROT_TX; } if (isrc & IRQ_EPI) { /* error passive interrupt */ netdev_dbg(dev, "error passive interrupt\n"); if (state == CAN_STATE_ERROR_PASSIVE) state = CAN_STATE_ERROR_WARNING; else state = CAN_STATE_ERROR_PASSIVE; } if (isrc & IRQ_ALI) { /* arbitration lost interrupt */ netdev_dbg(dev, "arbitration lost interrupt\n"); alc = priv->read_reg(priv, SJA1000_ALC); priv->can.can_stats.arbitration_lost++; cf->can_id |= CAN_ERR_LOSTARB; cf->data[0] = alc & 0x1f; } if (state != priv->can.state) { tx_state = txerr >= rxerr ? state : 0; rx_state = txerr <= rxerr ? state : 0; can_change_state(dev, cf, tx_state, rx_state); if(state == CAN_STATE_BUS_OFF) can_bus_off(dev); } netif_rx(skb); return ret; } irqreturn_t sja1000_interrupt(int irq, void *dev_id) { struct net_device *dev = (struct net_device *)dev_id; struct sja1000_priv *priv = netdev_priv(dev); struct net_device_stats *stats = &dev->stats; uint8_t isrc, status; irqreturn_t ret = 0; int n = 0, err; if (priv->pre_irq) priv->pre_irq(priv); /* Shared interrupts and IRQ off? */ if (priv->read_reg(priv, SJA1000_IER) == IRQ_OFF) goto out; while ((isrc = priv->read_reg(priv, SJA1000_IR)) && (n < SJA1000_MAX_IRQ)) { status = priv->read_reg(priv, SJA1000_SR); /* check for absent controller due to hw unplug */ if (status == 0xFF && sja1000_is_absent(priv)) goto out; if (isrc & IRQ_WUI) netdev_warn(dev, "wakeup interrupt\n"); if (isrc & IRQ_TI) { /* transmission buffer released */ if (priv->can.ctrlmode & CAN_CTRLMODE_ONE_SHOT && !(status & SR_TCS)) { stats->tx_errors++; can_free_echo_skb(dev, 0, NULL); } else { /* transmission complete */ stats->tx_bytes += can_get_echo_skb(dev, 0, NULL); stats->tx_packets++; } netif_wake_queue(dev); } if (isrc & IRQ_RI) { /* receive interrupt */ while (status & SR_RBS) { sja1000_rx(dev); status = priv->read_reg(priv, SJA1000_SR); /* check for absent controller */ if (status == 0xFF && sja1000_is_absent(priv)) goto out; } } if (isrc & (IRQ_DOI | IRQ_EI | IRQ_BEI | IRQ_EPI | IRQ_ALI)) { /* error interrupt */ err = sja1000_err(dev, isrc, status); if (err == IRQ_WAKE_THREAD) ret = err; if (err) break; } n++; } out: if (!ret) ret = (n) ? IRQ_HANDLED : IRQ_NONE; if (priv->post_irq) priv->post_irq(priv); if (n >= SJA1000_MAX_IRQ) netdev_dbg(dev, "%d messages handled in ISR", n); return ret; } EXPORT_SYMBOL_GPL(sja1000_interrupt); static int sja1000_open(struct net_device *dev) { struct sja1000_priv *priv = netdev_priv(dev); int err; /* set chip into reset mode */ set_reset_mode(dev); /* common open */ err = open_candev(dev); if (err) return err; /* register interrupt handler, if not done by the device driver */ if (!(priv->flags & SJA1000_CUSTOM_IRQ_HANDLER)) { err = request_threaded_irq(dev->irq, sja1000_interrupt, sja1000_reset_interrupt, priv->irq_flags, dev->name, (void *)dev); if (err) { close_candev(dev); return -EAGAIN; } } /* init and start chi */ sja1000_start(dev); netif_start_queue(dev); return 0; } static int sja1000_close(struct net_device *dev) { struct sja1000_priv *priv = netdev_priv(dev); netif_stop_queue(dev); set_reset_mode(dev); if (!(priv->flags & SJA1000_CUSTOM_IRQ_HANDLER)) free_irq(dev->irq, (void *)dev); close_candev(dev); return 0; } struct net_device *alloc_sja1000dev(int sizeof_priv) { struct net_device *dev; struct sja1000_priv *priv; dev = alloc_candev(sizeof(struct sja1000_priv) + sizeof_priv, SJA1000_ECHO_SKB_MAX); if (!dev) return NULL; priv = netdev_priv(dev); priv->dev = dev; priv->can.bittiming_const = &sja1000_bittiming_const; priv->can.do_set_bittiming = sja1000_set_bittiming; priv->can.do_set_mode = sja1000_set_mode; priv->can.do_get_berr_counter = sja1000_get_berr_counter; priv->can.ctrlmode_supported = CAN_CTRLMODE_LOOPBACK | CAN_CTRLMODE_LISTENONLY | CAN_CTRLMODE_3_SAMPLES | CAN_CTRLMODE_ONE_SHOT | CAN_CTRLMODE_BERR_REPORTING | CAN_CTRLMODE_PRESUME_ACK | CAN_CTRLMODE_CC_LEN8_DLC; spin_lock_init(&priv->cmdreg_lock); if (sizeof_priv) priv->priv = (void *)priv + sizeof(struct sja1000_priv); return dev; } EXPORT_SYMBOL_GPL(alloc_sja1000dev); void free_sja1000dev(struct net_device *dev) { free_candev(dev); } EXPORT_SYMBOL_GPL(free_sja1000dev); static const struct net_device_ops sja1000_netdev_ops = { .ndo_open = sja1000_open, .ndo_stop = sja1000_close, .ndo_start_xmit = sja1000_start_xmit, .ndo_change_mtu = can_change_mtu, }; static const struct ethtool_ops sja1000_ethtool_ops = { .get_ts_info = ethtool_op_get_ts_info, }; int register_sja1000dev(struct net_device *dev) { if (!sja1000_probe_chip(dev)) return -ENODEV; dev->flags |= IFF_ECHO; /* we support local echo */ dev->netdev_ops = &sja1000_netdev_ops; dev->ethtool_ops = &sja1000_ethtool_ops; set_reset_mode(dev); chipset_init(dev); return register_candev(dev); } EXPORT_SYMBOL_GPL(register_sja1000dev); void unregister_sja1000dev(struct net_device *dev) { set_reset_mode(dev); unregister_candev(dev); } EXPORT_SYMBOL_GPL(unregister_sja1000dev); static __init int sja1000_init(void) { printk(KERN_INFO "%s CAN netdevice driver\n", DRV_NAME); return 0; } module_init(sja1000_init); static __exit void sja1000_exit(void) { printk(KERN_INFO "%s: driver removed\n", DRV_NAME); } module_exit(sja1000_exit);
linux-master
drivers/net/can/sja1000/sja1000.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2008 Per Dalen <[email protected]> * * Parts of this software are based on (derived) the following: * * - Kvaser linux driver, version 4.72 BETA * Copyright (C) 2002-2007 KVASER AB * * - Lincan driver, version 0.3.3, OCERA project * Copyright (C) 2004 Pavel Pisa * Copyright (C) 2001 Arnaud Westenberg * * - Socketcan SJA1000 drivers * Copyright (C) 2007 Wolfgang Grandegger <[email protected]> * Copyright (c) 2002-2007 Volkswagen Group Electronic Research * Copyright (c) 2003 Matthias Brukner, Trajet Gmbh, Rebenring 33, * 38106 Braunschweig, GERMANY */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/netdevice.h> #include <linux/delay.h> #include <linux/pci.h> #include <linux/can/dev.h> #include <linux/io.h> #include "sja1000.h" #define DRV_NAME "kvaser_pci" MODULE_AUTHOR("Per Dalen <[email protected]>"); MODULE_DESCRIPTION("Socket-CAN driver for KVASER PCAN PCI cards"); MODULE_LICENSE("GPL v2"); #define MAX_NO_OF_CHANNELS 4 /* max no of channels on a single card */ struct kvaser_pci { int channel; struct pci_dev *pci_dev; struct net_device *slave_dev[MAX_NO_OF_CHANNELS-1]; void __iomem *conf_addr; void __iomem *res_addr; int no_channels; u8 xilinx_ver; }; #define KVASER_PCI_CAN_CLOCK (16000000 / 2) /* * The board configuration is probably following: * RX1 is connected to ground. * TX1 is not connected. * CLKO is not connected. * Setting the OCR register to 0xDA is a good idea. * This means normal output mode , push-pull and the correct polarity. */ #define KVASER_PCI_OCR (OCR_TX0_PUSHPULL | OCR_TX1_PUSHPULL) /* * In the CDR register, you should set CBP to 1. * You will probably also want to set the clock divider value to 0 * (meaning divide-by-2), the Pelican bit, and the clock-off bit * (you will have no need for CLKOUT anyway). */ #define KVASER_PCI_CDR (CDR_CBP | CDR_CLKOUT_MASK) /* * These register values are valid for revision 14 of the Xilinx logic. */ #define XILINX_VERINT 7 /* Lower nibble simulate interrupts, high nibble version number. */ #define XILINX_PRESUMED_VERSION 14 /* * Important S5920 registers */ #define S5920_INTCSR 0x38 #define S5920_PTCR 0x60 #define INTCSR_ADDON_INTENABLE_M 0x2000 #define KVASER_PCI_PORT_BYTES 0x20 #define PCI_CONFIG_PORT_SIZE 0x80 /* size of the config io-memory */ #define PCI_PORT_SIZE 0x80 /* size of a channel io-memory */ #define PCI_PORT_XILINX_SIZE 0x08 /* size of a xilinx io-memory */ #define KVASER_PCI_VENDOR_ID1 0x10e8 /* the PCI device and vendor IDs */ #define KVASER_PCI_DEVICE_ID1 0x8406 #define KVASER_PCI_VENDOR_ID2 0x1a07 /* the PCI device and vendor IDs */ #define KVASER_PCI_DEVICE_ID2 0x0008 static const struct pci_device_id kvaser_pci_tbl[] = { {KVASER_PCI_VENDOR_ID1, KVASER_PCI_DEVICE_ID1, PCI_ANY_ID, PCI_ANY_ID,}, {KVASER_PCI_VENDOR_ID2, KVASER_PCI_DEVICE_ID2, PCI_ANY_ID, PCI_ANY_ID,}, { 0,} }; MODULE_DEVICE_TABLE(pci, kvaser_pci_tbl); static u8 kvaser_pci_read_reg(const struct sja1000_priv *priv, int port) { return ioread8(priv->reg_base + port); } static void kvaser_pci_write_reg(const struct sja1000_priv *priv, int port, u8 val) { iowrite8(val, priv->reg_base + port); } static void kvaser_pci_disable_irq(struct net_device *dev) { struct sja1000_priv *priv = netdev_priv(dev); struct kvaser_pci *board = priv->priv; u32 intcsr; /* Disable interrupts from card */ intcsr = ioread32(board->conf_addr + S5920_INTCSR); intcsr &= ~INTCSR_ADDON_INTENABLE_M; iowrite32(intcsr, board->conf_addr + S5920_INTCSR); } static void kvaser_pci_enable_irq(struct net_device *dev) { struct sja1000_priv *priv = netdev_priv(dev); struct kvaser_pci *board = priv->priv; u32 tmp_en_io; /* Enable interrupts from card */ tmp_en_io = ioread32(board->conf_addr + S5920_INTCSR); tmp_en_io |= INTCSR_ADDON_INTENABLE_M; iowrite32(tmp_en_io, board->conf_addr + S5920_INTCSR); } static int number_of_sja1000_chip(void __iomem *base_addr) { u8 status; int i; for (i = 0; i < MAX_NO_OF_CHANNELS; i++) { /* reset chip */ iowrite8(MOD_RM, base_addr + (i * KVASER_PCI_PORT_BYTES) + SJA1000_MOD); status = ioread8(base_addr + (i * KVASER_PCI_PORT_BYTES) + SJA1000_MOD); /* check reset bit */ if (!(status & MOD_RM)) break; } return i; } static void kvaser_pci_del_chan(struct net_device *dev) { struct sja1000_priv *priv; struct kvaser_pci *board; int i; if (!dev) return; priv = netdev_priv(dev); board = priv->priv; if (!board) return; dev_info(&board->pci_dev->dev, "Removing device %s\n", dev->name); /* Disable PCI interrupts */ kvaser_pci_disable_irq(dev); for (i = 0; i < board->no_channels - 1; i++) { if (board->slave_dev[i]) { dev_info(&board->pci_dev->dev, "Removing device %s\n", board->slave_dev[i]->name); unregister_sja1000dev(board->slave_dev[i]); free_sja1000dev(board->slave_dev[i]); } } unregister_sja1000dev(dev); pci_iounmap(board->pci_dev, priv->reg_base); pci_iounmap(board->pci_dev, board->conf_addr); pci_iounmap(board->pci_dev, board->res_addr); free_sja1000dev(dev); } static int kvaser_pci_add_chan(struct pci_dev *pdev, int channel, struct net_device **master_dev, void __iomem *conf_addr, void __iomem *res_addr, void __iomem *base_addr) { struct net_device *dev; struct sja1000_priv *priv; struct kvaser_pci *board; int err; dev = alloc_sja1000dev(sizeof(struct kvaser_pci)); if (dev == NULL) return -ENOMEM; priv = netdev_priv(dev); board = priv->priv; board->pci_dev = pdev; board->channel = channel; /* S5920 */ board->conf_addr = conf_addr; /* XILINX board wide address */ board->res_addr = res_addr; if (channel == 0) { board->xilinx_ver = ioread8(board->res_addr + XILINX_VERINT) >> 4; /* Assert PTADR# - we're in passive mode so the other bits are not important */ iowrite32(0x80808080UL, board->conf_addr + S5920_PTCR); /* Enable interrupts from card */ kvaser_pci_enable_irq(dev); } else { struct sja1000_priv *master_priv = netdev_priv(*master_dev); struct kvaser_pci *master_board = master_priv->priv; master_board->slave_dev[channel - 1] = dev; master_board->no_channels = channel + 1; board->xilinx_ver = master_board->xilinx_ver; } priv->reg_base = base_addr + channel * KVASER_PCI_PORT_BYTES; priv->read_reg = kvaser_pci_read_reg; priv->write_reg = kvaser_pci_write_reg; priv->can.clock.freq = KVASER_PCI_CAN_CLOCK; priv->ocr = KVASER_PCI_OCR; priv->cdr = KVASER_PCI_CDR; priv->irq_flags = IRQF_SHARED; dev->irq = pdev->irq; dev_info(&pdev->dev, "reg_base=%p conf_addr=%p irq=%d\n", priv->reg_base, board->conf_addr, dev->irq); SET_NETDEV_DEV(dev, &pdev->dev); dev->dev_id = channel; /* Register SJA1000 device */ err = register_sja1000dev(dev); if (err) { dev_err(&pdev->dev, "Registering device failed (err=%d)\n", err); goto failure; } if (channel == 0) *master_dev = dev; return 0; failure: kvaser_pci_del_chan(dev); return err; } static int kvaser_pci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { int err; struct net_device *master_dev = NULL; struct sja1000_priv *priv; struct kvaser_pci *board; int no_channels; void __iomem *base_addr = NULL; void __iomem *conf_addr = NULL; void __iomem *res_addr = NULL; int i; dev_info(&pdev->dev, "initializing device %04x:%04x\n", pdev->vendor, pdev->device); err = pci_enable_device(pdev); if (err) goto failure; err = pci_request_regions(pdev, DRV_NAME); if (err) goto failure_release_pci; /* S5920 */ conf_addr = pci_iomap(pdev, 0, PCI_CONFIG_PORT_SIZE); if (conf_addr == NULL) { err = -ENODEV; goto failure_release_regions; } /* XILINX board wide address */ res_addr = pci_iomap(pdev, 2, PCI_PORT_XILINX_SIZE); if (res_addr == NULL) { err = -ENOMEM; goto failure_iounmap; } base_addr = pci_iomap(pdev, 1, PCI_PORT_SIZE); if (base_addr == NULL) { err = -ENOMEM; goto failure_iounmap; } no_channels = number_of_sja1000_chip(base_addr); if (no_channels == 0) { err = -ENOMEM; goto failure_iounmap; } for (i = 0; i < no_channels; i++) { err = kvaser_pci_add_chan(pdev, i, &master_dev, conf_addr, res_addr, base_addr); if (err) goto failure_cleanup; } priv = netdev_priv(master_dev); board = priv->priv; dev_info(&pdev->dev, "xilinx version=%d number of channels=%d\n", board->xilinx_ver, board->no_channels); pci_set_drvdata(pdev, master_dev); return 0; failure_cleanup: kvaser_pci_del_chan(master_dev); failure_iounmap: if (conf_addr != NULL) pci_iounmap(pdev, conf_addr); if (res_addr != NULL) pci_iounmap(pdev, res_addr); if (base_addr != NULL) pci_iounmap(pdev, base_addr); failure_release_regions: pci_release_regions(pdev); failure_release_pci: pci_disable_device(pdev); failure: return err; } static void kvaser_pci_remove_one(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); kvaser_pci_del_chan(dev); pci_release_regions(pdev); pci_disable_device(pdev); } static struct pci_driver kvaser_pci_driver = { .name = DRV_NAME, .id_table = kvaser_pci_tbl, .probe = kvaser_pci_init_one, .remove = kvaser_pci_remove_one, }; module_pci_driver(kvaser_pci_driver);
linux-master
drivers/net/can/sja1000/kvaser_pci.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2010-2012 Stephane Grosjean <[email protected]> * * CAN driver for PEAK-System PCAN-PC Card * Derived from the PCAN project file driver/src/pcan_pccard.c * Copyright (C) 2006-2010 PEAK System-Technik GmbH */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/netdevice.h> #include <linux/delay.h> #include <linux/timer.h> #include <linux/io.h> #include <pcmcia/cistpl.h> #include <pcmcia/ds.h> #include <linux/can.h> #include <linux/can/dev.h> #include "sja1000.h" MODULE_AUTHOR("Stephane Grosjean <[email protected]>"); MODULE_DESCRIPTION("CAN driver for PEAK-System PCAN-PC Cards"); MODULE_LICENSE("GPL v2"); /* PEAK-System PCMCIA driver name */ #define PCC_NAME "peak_pcmcia" #define PCC_CHAN_MAX 2 #define PCC_CAN_CLOCK (16000000 / 2) #define PCC_MANF_ID 0x0377 #define PCC_CARD_ID 0x0001 #define PCC_CHAN_SIZE 0x20 #define PCC_CHAN_OFF(c) ((c) * PCC_CHAN_SIZE) #define PCC_COMN_OFF (PCC_CHAN_OFF(PCC_CHAN_MAX)) #define PCC_COMN_SIZE 0x40 /* common area registers */ #define PCC_CCR 0x00 #define PCC_CSR 0x02 #define PCC_CPR 0x04 #define PCC_SPI_DIR 0x06 #define PCC_SPI_DOR 0x08 #define PCC_SPI_ADR 0x0a #define PCC_SPI_IR 0x0c #define PCC_FW_MAJOR 0x10 #define PCC_FW_MINOR 0x12 /* CCR bits */ #define PCC_CCR_CLK_16 0x00 #define PCC_CCR_CLK_10 0x01 #define PCC_CCR_CLK_21 0x02 #define PCC_CCR_CLK_8 0x03 #define PCC_CCR_CLK_MASK PCC_CCR_CLK_8 #define PCC_CCR_RST_CHAN(c) (0x01 << ((c) + 2)) #define PCC_CCR_RST_ALL (PCC_CCR_RST_CHAN(0) | PCC_CCR_RST_CHAN(1)) #define PCC_CCR_RST_MASK PCC_CCR_RST_ALL /* led selection bits */ #define PCC_LED(c) (1 << (c)) #define PCC_LED_ALL (PCC_LED(0) | PCC_LED(1)) /* led state value */ #define PCC_LED_ON 0x00 #define PCC_LED_FAST 0x01 #define PCC_LED_SLOW 0x02 #define PCC_LED_OFF 0x03 #define PCC_CCR_LED_CHAN(s, c) ((s) << (((c) + 2) << 1)) #define PCC_CCR_LED_ON_CHAN(c) PCC_CCR_LED_CHAN(PCC_LED_ON, c) #define PCC_CCR_LED_FAST_CHAN(c) PCC_CCR_LED_CHAN(PCC_LED_FAST, c) #define PCC_CCR_LED_SLOW_CHAN(c) PCC_CCR_LED_CHAN(PCC_LED_SLOW, c) #define PCC_CCR_LED_OFF_CHAN(c) PCC_CCR_LED_CHAN(PCC_LED_OFF, c) #define PCC_CCR_LED_MASK_CHAN(c) PCC_CCR_LED_OFF_CHAN(c) #define PCC_CCR_LED_OFF_ALL (PCC_CCR_LED_OFF_CHAN(0) | \ PCC_CCR_LED_OFF_CHAN(1)) #define PCC_CCR_LED_MASK PCC_CCR_LED_OFF_ALL #define PCC_CCR_INIT (PCC_CCR_CLK_16 | PCC_CCR_RST_ALL | PCC_CCR_LED_OFF_ALL) /* CSR bits */ #define PCC_CSR_SPI_BUSY 0x04 /* time waiting for SPI busy (prevent from infinite loop) */ #define PCC_SPI_MAX_BUSY_WAIT_MS 3 /* max count of reading the SPI status register waiting for a change */ /* (prevent from infinite loop) */ #define PCC_WRITE_MAX_LOOP 1000 /* max nb of int handled by that isr in one shot (prevent from infinite loop) */ #define PCC_ISR_MAX_LOOP 10 /* EEPROM chip instruction set */ /* note: EEPROM Read/Write instructions include A8 bit */ #define PCC_EEP_WRITE(a) (0x02 | (((a) & 0x100) >> 5)) #define PCC_EEP_READ(a) (0x03 | (((a) & 0x100) >> 5)) #define PCC_EEP_WRDI 0x04 /* EEPROM Write Disable */ #define PCC_EEP_RDSR 0x05 /* EEPROM Read Status Register */ #define PCC_EEP_WREN 0x06 /* EEPROM Write Enable */ /* EEPROM Status Register bits */ #define PCC_EEP_SR_WEN 0x02 /* EEPROM SR Write Enable bit */ #define PCC_EEP_SR_WIP 0x01 /* EEPROM SR Write In Progress bit */ /* * The board configuration is probably following: * RX1 is connected to ground. * TX1 is not connected. * CLKO is not connected. * Setting the OCR register to 0xDA is a good idea. * This means normal output mode, push-pull and the correct polarity. */ #define PCC_OCR (OCR_TX0_PUSHPULL | OCR_TX1_PUSHPULL) /* * In the CDR register, you should set CBP to 1. * You will probably also want to set the clock divider value to 7 * (meaning direct oscillator output) because the second SJA1000 chip * is driven by the first one CLKOUT output. */ #define PCC_CDR (CDR_CBP | CDR_CLKOUT_MASK) struct pcan_channel { struct net_device *netdev; unsigned long prev_rx_bytes; unsigned long prev_tx_bytes; }; /* PCAN-PC Card private structure */ struct pcan_pccard { struct pcmcia_device *pdev; int chan_count; struct pcan_channel channel[PCC_CHAN_MAX]; u8 ccr; u8 fw_major; u8 fw_minor; void __iomem *ioport_addr; struct timer_list led_timer; }; static struct pcmcia_device_id pcan_table[] = { PCMCIA_DEVICE_MANF_CARD(PCC_MANF_ID, PCC_CARD_ID), PCMCIA_DEVICE_NULL, }; MODULE_DEVICE_TABLE(pcmcia, pcan_table); static void pcan_set_leds(struct pcan_pccard *card, u8 mask, u8 state); /* * start timer which controls leds state */ static void pcan_start_led_timer(struct pcan_pccard *card) { if (!timer_pending(&card->led_timer)) mod_timer(&card->led_timer, jiffies + HZ); } /* * stop the timer which controls leds state */ static void pcan_stop_led_timer(struct pcan_pccard *card) { del_timer_sync(&card->led_timer); } /* * read a sja1000 register */ static u8 pcan_read_canreg(const struct sja1000_priv *priv, int port) { return ioread8(priv->reg_base + port); } /* * write a sja1000 register */ static void pcan_write_canreg(const struct sja1000_priv *priv, int port, u8 v) { struct pcan_pccard *card = priv->priv; int c = (priv->reg_base - card->ioport_addr) / PCC_CHAN_SIZE; /* sja1000 register changes control the leds state */ if (port == SJA1000_MOD) switch (v) { case MOD_RM: /* Reset Mode: set led on */ pcan_set_leds(card, PCC_LED(c), PCC_LED_ON); break; case 0x00: /* Normal Mode: led slow blinking and start led timer */ pcan_set_leds(card, PCC_LED(c), PCC_LED_SLOW); pcan_start_led_timer(card); break; default: break; } iowrite8(v, priv->reg_base + port); } /* * read a register from the common area */ static u8 pcan_read_reg(struct pcan_pccard *card, int port) { return ioread8(card->ioport_addr + PCC_COMN_OFF + port); } /* * write a register into the common area */ static void pcan_write_reg(struct pcan_pccard *card, int port, u8 v) { /* cache ccr value */ if (port == PCC_CCR) { if (card->ccr == v) return; card->ccr = v; } iowrite8(v, card->ioport_addr + PCC_COMN_OFF + port); } /* * check whether the card is present by checking its fw version numbers * against values read at probing time. */ static inline int pcan_pccard_present(struct pcan_pccard *card) { return ((pcan_read_reg(card, PCC_FW_MAJOR) == card->fw_major) && (pcan_read_reg(card, PCC_FW_MINOR) == card->fw_minor)); } /* * wait for SPI engine while it is busy */ static int pcan_wait_spi_busy(struct pcan_pccard *card) { unsigned long timeout = jiffies + msecs_to_jiffies(PCC_SPI_MAX_BUSY_WAIT_MS) + 1; /* be sure to read status at least once after sleeping */ while (pcan_read_reg(card, PCC_CSR) & PCC_CSR_SPI_BUSY) { if (time_after(jiffies, timeout)) return -EBUSY; schedule(); } return 0; } /* * write data in device eeprom */ static int pcan_write_eeprom(struct pcan_pccard *card, u16 addr, u8 v) { u8 status; int err, i; /* write instruction enabling write */ pcan_write_reg(card, PCC_SPI_IR, PCC_EEP_WREN); err = pcan_wait_spi_busy(card); if (err) goto we_spi_err; /* wait until write enabled */ for (i = 0; i < PCC_WRITE_MAX_LOOP; i++) { /* write instruction reading the status register */ pcan_write_reg(card, PCC_SPI_IR, PCC_EEP_RDSR); err = pcan_wait_spi_busy(card); if (err) goto we_spi_err; /* get status register value and check write enable bit */ status = pcan_read_reg(card, PCC_SPI_DIR); if (status & PCC_EEP_SR_WEN) break; } if (i >= PCC_WRITE_MAX_LOOP) { dev_err(&card->pdev->dev, "stop waiting to be allowed to write in eeprom\n"); return -EIO; } /* set address and data */ pcan_write_reg(card, PCC_SPI_ADR, addr & 0xff); pcan_write_reg(card, PCC_SPI_DOR, v); /* * write instruction with bit[3] set according to address value: * if addr refers to upper half of the memory array: bit[3] = 1 */ pcan_write_reg(card, PCC_SPI_IR, PCC_EEP_WRITE(addr)); err = pcan_wait_spi_busy(card); if (err) goto we_spi_err; /* wait while write in progress */ for (i = 0; i < PCC_WRITE_MAX_LOOP; i++) { /* write instruction reading the status register */ pcan_write_reg(card, PCC_SPI_IR, PCC_EEP_RDSR); err = pcan_wait_spi_busy(card); if (err) goto we_spi_err; /* get status register value and check write in progress bit */ status = pcan_read_reg(card, PCC_SPI_DIR); if (!(status & PCC_EEP_SR_WIP)) break; } if (i >= PCC_WRITE_MAX_LOOP) { dev_err(&card->pdev->dev, "stop waiting for write in eeprom to complete\n"); return -EIO; } /* write instruction disabling write */ pcan_write_reg(card, PCC_SPI_IR, PCC_EEP_WRDI); err = pcan_wait_spi_busy(card); if (err) goto we_spi_err; return 0; we_spi_err: dev_err(&card->pdev->dev, "stop waiting (spi engine always busy) err %d\n", err); return err; } static void pcan_set_leds(struct pcan_pccard *card, u8 led_mask, u8 state) { u8 ccr = card->ccr; int i; for (i = 0; i < card->chan_count; i++) if (led_mask & PCC_LED(i)) { /* clear corresponding led bits in ccr */ ccr &= ~PCC_CCR_LED_MASK_CHAN(i); /* then set new bits */ ccr |= PCC_CCR_LED_CHAN(state, i); } /* real write only if something has changed in ccr */ pcan_write_reg(card, PCC_CCR, ccr); } /* * enable/disable CAN connectors power */ static inline void pcan_set_can_power(struct pcan_pccard *card, int onoff) { int err; err = pcan_write_eeprom(card, 0, !!onoff); if (err) dev_err(&card->pdev->dev, "failed setting power %s to can connectors (err %d)\n", (onoff) ? "on" : "off", err); } /* * set leds state according to channel activity */ static void pcan_led_timer(struct timer_list *t) { struct pcan_pccard *card = from_timer(card, t, led_timer); struct net_device *netdev; int i, up_count = 0; u8 ccr; ccr = card->ccr; for (i = 0; i < card->chan_count; i++) { /* default is: not configured */ ccr &= ~PCC_CCR_LED_MASK_CHAN(i); ccr |= PCC_CCR_LED_ON_CHAN(i); netdev = card->channel[i].netdev; if (!netdev || !(netdev->flags & IFF_UP)) continue; up_count++; /* no activity (but configured) */ ccr &= ~PCC_CCR_LED_MASK_CHAN(i); ccr |= PCC_CCR_LED_SLOW_CHAN(i); /* if bytes counters changed, set fast blinking led */ if (netdev->stats.rx_bytes != card->channel[i].prev_rx_bytes) { card->channel[i].prev_rx_bytes = netdev->stats.rx_bytes; ccr &= ~PCC_CCR_LED_MASK_CHAN(i); ccr |= PCC_CCR_LED_FAST_CHAN(i); } if (netdev->stats.tx_bytes != card->channel[i].prev_tx_bytes) { card->channel[i].prev_tx_bytes = netdev->stats.tx_bytes; ccr &= ~PCC_CCR_LED_MASK_CHAN(i); ccr |= PCC_CCR_LED_FAST_CHAN(i); } } /* write the new leds state */ pcan_write_reg(card, PCC_CCR, ccr); /* restart timer (except if no more configured channels) */ if (up_count) mod_timer(&card->led_timer, jiffies + HZ); } /* * interrupt service routine */ static irqreturn_t pcan_isr(int irq, void *dev_id) { struct pcan_pccard *card = dev_id; int irq_handled; /* prevent from infinite loop */ for (irq_handled = 0; irq_handled < PCC_ISR_MAX_LOOP; irq_handled++) { /* handle shared interrupt and next loop */ int nothing_to_handle = 1; int i; /* check interrupt for each channel */ for (i = 0; i < card->chan_count; i++) { struct net_device *netdev; /* * check whether the card is present before calling * sja1000_interrupt() to speed up hotplug detection */ if (!pcan_pccard_present(card)) { /* card unplugged during isr */ return IRQ_NONE; } /* * should check whether all or SJA1000_MAX_IRQ * interrupts have been handled: loop again to be sure. */ netdev = card->channel[i].netdev; if (netdev && sja1000_interrupt(irq, netdev) == IRQ_HANDLED) nothing_to_handle = 0; } if (nothing_to_handle) break; } return (irq_handled) ? IRQ_HANDLED : IRQ_NONE; } /* * free all resources used by the channels and switch off leds and can power */ static void pcan_free_channels(struct pcan_pccard *card) { int i; u8 led_mask = 0; for (i = 0; i < card->chan_count; i++) { struct net_device *netdev; char name[IFNAMSIZ]; led_mask |= PCC_LED(i); netdev = card->channel[i].netdev; if (!netdev) continue; strscpy(name, netdev->name, IFNAMSIZ); unregister_sja1000dev(netdev); free_sja1000dev(netdev); dev_info(&card->pdev->dev, "%s removed\n", name); } /* do it only if device not removed */ if (pcan_pccard_present(card)) { pcan_set_leds(card, led_mask, PCC_LED_OFF); pcan_set_can_power(card, 0); } } /* * check if a CAN controller is present at the specified location */ static inline int pcan_channel_present(struct sja1000_priv *priv) { /* make sure SJA1000 is in reset mode */ pcan_write_canreg(priv, SJA1000_MOD, 1); pcan_write_canreg(priv, SJA1000_CDR, CDR_PELICAN); /* read reset-values */ if (pcan_read_canreg(priv, SJA1000_CDR) == CDR_PELICAN) return 1; return 0; } static int pcan_add_channels(struct pcan_pccard *card) { struct pcmcia_device *pdev = card->pdev; int i, err = 0; u8 ccr = PCC_CCR_INIT; /* init common registers (reset channels and leds off) */ card->ccr = ~ccr; pcan_write_reg(card, PCC_CCR, ccr); /* wait 2ms before unresetting channels */ usleep_range(2000, 3000); ccr &= ~PCC_CCR_RST_ALL; pcan_write_reg(card, PCC_CCR, ccr); /* create one network device per channel detected */ for (i = 0; i < ARRAY_SIZE(card->channel); i++) { struct net_device *netdev; struct sja1000_priv *priv; netdev = alloc_sja1000dev(0); if (!netdev) { err = -ENOMEM; break; } /* update linkages */ priv = netdev_priv(netdev); priv->priv = card; SET_NETDEV_DEV(netdev, &pdev->dev); netdev->dev_id = i; priv->irq_flags = IRQF_SHARED; netdev->irq = pdev->irq; priv->reg_base = card->ioport_addr + PCC_CHAN_OFF(i); /* check if channel is present */ if (!pcan_channel_present(priv)) { dev_err(&pdev->dev, "channel %d not present\n", i); free_sja1000dev(netdev); continue; } priv->read_reg = pcan_read_canreg; priv->write_reg = pcan_write_canreg; priv->can.clock.freq = PCC_CAN_CLOCK; priv->ocr = PCC_OCR; priv->cdr = PCC_CDR; /* Neither a slave device distributes the clock */ if (i > 0) priv->cdr |= CDR_CLK_OFF; priv->flags |= SJA1000_CUSTOM_IRQ_HANDLER; /* register SJA1000 device */ err = register_sja1000dev(netdev); if (err) { free_sja1000dev(netdev); continue; } card->channel[i].netdev = netdev; card->chan_count++; /* set corresponding led on in the new ccr */ ccr &= ~PCC_CCR_LED_OFF_CHAN(i); dev_info(&pdev->dev, "%s on channel %d at 0x%p irq %d\n", netdev->name, i, priv->reg_base, pdev->irq); } /* write new ccr (change leds state) */ pcan_write_reg(card, PCC_CCR, ccr); return err; } static int pcan_conf_check(struct pcmcia_device *pdev, void *priv_data) { pdev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; pdev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; /* only */ pdev->io_lines = 10; /* This reserves IO space but doesn't actually enable it */ return pcmcia_request_io(pdev); } /* * free all resources used by the device */ static void pcan_free(struct pcmcia_device *pdev) { struct pcan_pccard *card = pdev->priv; if (!card) return; free_irq(pdev->irq, card); pcan_stop_led_timer(card); pcan_free_channels(card); ioport_unmap(card->ioport_addr); kfree(card); pdev->priv = NULL; } /* * setup PCMCIA socket and probe for PEAK-System PC-CARD */ static int pcan_probe(struct pcmcia_device *pdev) { struct pcan_pccard *card; int err; pdev->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO; err = pcmcia_loop_config(pdev, pcan_conf_check, NULL); if (err) { dev_err(&pdev->dev, "pcmcia_loop_config() error %d\n", err); goto probe_err_1; } if (!pdev->irq) { dev_err(&pdev->dev, "no irq assigned\n"); err = -ENODEV; goto probe_err_1; } err = pcmcia_enable_device(pdev); if (err) { dev_err(&pdev->dev, "pcmcia_enable_device failed err=%d\n", err); goto probe_err_1; } card = kzalloc(sizeof(struct pcan_pccard), GFP_KERNEL); if (!card) { err = -ENOMEM; goto probe_err_2; } card->pdev = pdev; pdev->priv = card; /* sja1000 api uses iomem */ card->ioport_addr = ioport_map(pdev->resource[0]->start, resource_size(pdev->resource[0])); if (!card->ioport_addr) { dev_err(&pdev->dev, "couldn't map io port into io memory\n"); err = -ENOMEM; goto probe_err_3; } card->fw_major = pcan_read_reg(card, PCC_FW_MAJOR); card->fw_minor = pcan_read_reg(card, PCC_FW_MINOR); /* display board name and firmware version */ dev_info(&pdev->dev, "PEAK-System pcmcia card %s fw %d.%d\n", pdev->prod_id[1] ? pdev->prod_id[1] : "PCAN-PC Card", card->fw_major, card->fw_minor); /* detect available channels */ pcan_add_channels(card); if (!card->chan_count) { err = -ENOMEM; goto probe_err_4; } /* init the timer which controls the leds */ timer_setup(&card->led_timer, pcan_led_timer, 0); /* request the given irq */ err = request_irq(pdev->irq, &pcan_isr, IRQF_SHARED, PCC_NAME, card); if (err) { dev_err(&pdev->dev, "couldn't request irq%d\n", pdev->irq); goto probe_err_5; } /* power on the connectors */ pcan_set_can_power(card, 1); return 0; probe_err_5: /* unregister can devices from network */ pcan_free_channels(card); probe_err_4: ioport_unmap(card->ioport_addr); probe_err_3: kfree(card); pdev->priv = NULL; probe_err_2: pcmcia_disable_device(pdev); probe_err_1: return err; } /* * release claimed resources */ static void pcan_remove(struct pcmcia_device *pdev) { pcan_free(pdev); pcmcia_disable_device(pdev); } static struct pcmcia_driver pcan_driver = { .name = PCC_NAME, .probe = pcan_probe, .remove = pcan_remove, .id_table = pcan_table, }; module_pcmcia_driver(pcan_driver);
linux-master
drivers/net/can/sja1000/peak_pcmcia.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2005 Sascha Hauer, Pengutronix * Copyright (C) 2007 Wolfgang Grandegger <[email protected]> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/netdevice.h> #include <linux/delay.h> #include <linux/pci.h> #include <linux/platform_device.h> #include <linux/irq.h> #include <linux/can/dev.h> #include <linux/can/platform/sja1000.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/of.h> #include "sja1000.h" #define DRV_NAME "sja1000_platform" #define SP_CAN_CLOCK (16000000 / 2) MODULE_AUTHOR("Sascha Hauer <[email protected]>"); MODULE_AUTHOR("Wolfgang Grandegger <[email protected]>"); MODULE_DESCRIPTION("Socket-CAN driver for SJA1000 on the platform bus"); MODULE_ALIAS("platform:" DRV_NAME); MODULE_LICENSE("GPL v2"); struct sja1000_of_data { size_t priv_sz; void (*init)(struct sja1000_priv *priv, struct device_node *of); }; struct technologic_priv { spinlock_t io_lock; }; static u8 sp_read_reg8(const struct sja1000_priv *priv, int reg) { return ioread8(priv->reg_base + reg); } static void sp_write_reg8(const struct sja1000_priv *priv, int reg, u8 val) { iowrite8(val, priv->reg_base + reg); } static u8 sp_read_reg16(const struct sja1000_priv *priv, int reg) { return ioread8(priv->reg_base + reg * 2); } static void sp_write_reg16(const struct sja1000_priv *priv, int reg, u8 val) { iowrite8(val, priv->reg_base + reg * 2); } static u8 sp_read_reg32(const struct sja1000_priv *priv, int reg) { return ioread8(priv->reg_base + reg * 4); } static void sp_write_reg32(const struct sja1000_priv *priv, int reg, u8 val) { iowrite8(val, priv->reg_base + reg * 4); } static u8 sp_technologic_read_reg16(const struct sja1000_priv *priv, int reg) { struct technologic_priv *tp = priv->priv; unsigned long flags; u8 val; spin_lock_irqsave(&tp->io_lock, flags); iowrite16(reg, priv->reg_base + 0); val = ioread16(priv->reg_base + 2); spin_unlock_irqrestore(&tp->io_lock, flags); return val; } static void sp_technologic_write_reg16(const struct sja1000_priv *priv, int reg, u8 val) { struct technologic_priv *tp = priv->priv; unsigned long flags; spin_lock_irqsave(&tp->io_lock, flags); iowrite16(reg, priv->reg_base + 0); iowrite16(val, priv->reg_base + 2); spin_unlock_irqrestore(&tp->io_lock, flags); } static void sp_technologic_init(struct sja1000_priv *priv, struct device_node *of) { struct technologic_priv *tp = priv->priv; priv->read_reg = sp_technologic_read_reg16; priv->write_reg = sp_technologic_write_reg16; spin_lock_init(&tp->io_lock); } static void sp_rzn1_init(struct sja1000_priv *priv, struct device_node *of) { priv->flags = SJA1000_QUIRK_NO_CDR_REG | SJA1000_QUIRK_RESET_ON_OVERRUN; } static void sp_populate(struct sja1000_priv *priv, struct sja1000_platform_data *pdata, unsigned long resource_mem_flags) { /* The CAN clock frequency is half the oscillator clock frequency */ priv->can.clock.freq = pdata->osc_freq / 2; priv->ocr = pdata->ocr; priv->cdr = pdata->cdr; switch (resource_mem_flags & IORESOURCE_MEM_TYPE_MASK) { case IORESOURCE_MEM_32BIT: priv->read_reg = sp_read_reg32; priv->write_reg = sp_write_reg32; break; case IORESOURCE_MEM_16BIT: priv->read_reg = sp_read_reg16; priv->write_reg = sp_write_reg16; break; case IORESOURCE_MEM_8BIT: default: priv->read_reg = sp_read_reg8; priv->write_reg = sp_write_reg8; break; } } static void sp_populate_of(struct sja1000_priv *priv, struct device_node *of) { int err; u32 prop; err = of_property_read_u32(of, "reg-io-width", &prop); if (err) prop = 1; /* 8 bit is default */ switch (prop) { case 4: priv->read_reg = sp_read_reg32; priv->write_reg = sp_write_reg32; break; case 2: priv->read_reg = sp_read_reg16; priv->write_reg = sp_write_reg16; break; case 1: default: priv->read_reg = sp_read_reg8; priv->write_reg = sp_write_reg8; } if (!priv->can.clock.freq) { err = of_property_read_u32(of, "nxp,external-clock-frequency", &prop); if (!err) priv->can.clock.freq = prop / 2; else priv->can.clock.freq = SP_CAN_CLOCK; /* default */ } err = of_property_read_u32(of, "nxp,tx-output-mode", &prop); if (!err) priv->ocr |= prop & OCR_MODE_MASK; else priv->ocr |= OCR_MODE_NORMAL; /* default */ err = of_property_read_u32(of, "nxp,tx-output-config", &prop); if (!err) priv->ocr |= (prop << OCR_TX_SHIFT) & OCR_TX_MASK; else priv->ocr |= OCR_TX0_PULLDOWN; /* default */ err = of_property_read_u32(of, "nxp,clock-out-frequency", &prop); if (!err && prop) { u32 divider = priv->can.clock.freq * 2 / prop; if (divider > 1) priv->cdr |= divider / 2 - 1; else priv->cdr |= CDR_CLKOUT_MASK; } else { priv->cdr |= CDR_CLK_OFF; /* default */ } if (!of_property_read_bool(of, "nxp,no-comparator-bypass")) priv->cdr |= CDR_CBP; /* default */ } static struct sja1000_of_data technologic_data = { .priv_sz = sizeof(struct technologic_priv), .init = sp_technologic_init, }; static struct sja1000_of_data renesas_data = { .init = sp_rzn1_init, }; static const struct of_device_id sp_of_table[] = { { .compatible = "nxp,sja1000", .data = NULL, }, { .compatible = "renesas,rzn1-sja1000", .data = &renesas_data, }, { .compatible = "technologic,sja1000", .data = &technologic_data, }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, sp_of_table); static int sp_probe(struct platform_device *pdev) { int err, irq = 0; void __iomem *addr; struct net_device *dev; struct sja1000_priv *priv; struct resource *res_mem, *res_irq = NULL; struct sja1000_platform_data *pdata; struct device_node *of = pdev->dev.of_node; const struct sja1000_of_data *of_data = NULL; size_t priv_sz = 0; struct clk *clk; pdata = dev_get_platdata(&pdev->dev); if (!pdata && !of) { dev_err(&pdev->dev, "No platform data provided!\n"); return -ENODEV; } res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res_mem) return -ENODEV; if (!devm_request_mem_region(&pdev->dev, res_mem->start, resource_size(res_mem), DRV_NAME)) return -EBUSY; addr = devm_ioremap(&pdev->dev, res_mem->start, resource_size(res_mem)); if (!addr) return -ENOMEM; if (of) { irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; clk = devm_clk_get_optional_enabled(&pdev->dev, NULL); if (IS_ERR(clk)) return dev_err_probe(&pdev->dev, PTR_ERR(clk), "CAN clk operation failed"); } else { res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!res_irq) return -ENODEV; } of_data = device_get_match_data(&pdev->dev); if (of_data) priv_sz = of_data->priv_sz; dev = alloc_sja1000dev(priv_sz); if (!dev) return -ENOMEM; priv = netdev_priv(dev); if (res_irq) { irq = res_irq->start; priv->irq_flags = res_irq->flags & IRQF_TRIGGER_MASK; if (res_irq->flags & IORESOURCE_IRQ_SHAREABLE) priv->irq_flags |= IRQF_SHARED; } else { priv->irq_flags = IRQF_SHARED; } if (priv->flags & SJA1000_QUIRK_RESET_ON_OVERRUN) priv->irq_flags |= IRQF_ONESHOT; dev->irq = irq; priv->reg_base = addr; if (of) { if (clk) { priv->can.clock.freq = clk_get_rate(clk) / 2; if (!priv->can.clock.freq) { err = -EINVAL; dev_err(&pdev->dev, "Zero CAN clk rate"); goto exit_free; } } sp_populate_of(priv, of); if (of_data && of_data->init) of_data->init(priv, of); } else { sp_populate(priv, pdata, res_mem->flags); } platform_set_drvdata(pdev, dev); SET_NETDEV_DEV(dev, &pdev->dev); err = register_sja1000dev(dev); if (err) { dev_err(&pdev->dev, "registering %s failed (err=%d)\n", DRV_NAME, err); goto exit_free; } dev_info(&pdev->dev, "%s device registered (reg_base=%p, irq=%d)\n", DRV_NAME, priv->reg_base, dev->irq); return 0; exit_free: free_sja1000dev(dev); return err; } static void sp_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); unregister_sja1000dev(dev); free_sja1000dev(dev); } static struct platform_driver sp_driver = { .probe = sp_probe, .remove_new = sp_remove, .driver = { .name = DRV_NAME, .of_match_table = sp_of_table, }, }; module_platform_driver(sp_driver);
linux-master
drivers/net/can/sja1000/sja1000_platform.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2007, 2011 Wolfgang Grandegger <[email protected]> * Copyright (C) 2012 Stephane Grosjean <[email protected]> * * Derived from the PCAN project file driver/src/pcan_pci.c: * * Copyright (C) 2001-2006 PEAK System-Technik GmbH */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/netdevice.h> #include <linux/delay.h> #include <linux/pci.h> #include <linux/io.h> #include <linux/i2c.h> #include <linux/i2c-algo-bit.h> #include <linux/can.h> #include <linux/can/dev.h> #include "sja1000.h" MODULE_AUTHOR("Stephane Grosjean <[email protected]>"); MODULE_DESCRIPTION("Socket-CAN driver for PEAK PCAN PCI family cards"); MODULE_LICENSE("GPL v2"); #define DRV_NAME "peak_pci" /* FPGA cards FW version registers */ #define PEAK_VER_REG1 0x40 #define PEAK_VER_REG2 0x44 struct peak_pciec_card; struct peak_pci_chan { void __iomem *cfg_base; /* Common for all channels */ struct net_device *prev_dev; /* Chain of network devices */ u16 icr_mask; /* Interrupt mask for fast ack */ struct peak_pciec_card *pciec_card; /* only for PCIeC LEDs */ }; #define PEAK_PCI_CAN_CLOCK (16000000 / 2) #define PEAK_PCI_CDR (CDR_CBP | CDR_CLKOUT_MASK) #define PEAK_PCI_OCR OCR_TX0_PUSHPULL /* Important PITA registers */ #define PITA_ICR 0x00 /* Interrupt control register */ #define PITA_GPIOICR 0x18 /* GPIO interface control register */ #define PITA_MISC 0x1C /* Miscellaneous register */ #define PEAK_PCI_CFG_SIZE 0x1000 /* Size of the config PCI bar */ #define PEAK_PCI_CHAN_SIZE 0x0400 /* Size used by the channel */ #define PEAK_PCI_VENDOR_ID 0x001C /* The PCI device and vendor IDs */ #define PEAK_PCI_DEVICE_ID 0x0001 /* for PCI/PCIe slot cards */ #define PEAK_PCIEC_DEVICE_ID 0x0002 /* for ExpressCard slot cards */ #define PEAK_PCIE_DEVICE_ID 0x0003 /* for nextgen PCIe slot cards */ #define PEAK_CPCI_DEVICE_ID 0x0004 /* for nextgen cPCI slot cards */ #define PEAK_MPCI_DEVICE_ID 0x0005 /* for nextgen miniPCI slot cards */ #define PEAK_PC_104P_DEVICE_ID 0x0006 /* PCAN-PC/104+ cards */ #define PEAK_PCI_104E_DEVICE_ID 0x0007 /* PCAN-PCI/104 Express cards */ #define PEAK_MPCIE_DEVICE_ID 0x0008 /* The miniPCIe slot cards */ #define PEAK_PCIE_OEM_ID 0x0009 /* PCAN-PCI Express OEM */ #define PEAK_PCIEC34_DEVICE_ID 0x000A /* PCAN-PCI Express 34 (one channel) */ #define PEAK_PCI_CHAN_MAX 4 static const u16 peak_pci_icr_masks[PEAK_PCI_CHAN_MAX] = { 0x02, 0x01, 0x40, 0x80 }; static const struct pci_device_id peak_pci_tbl[] = { { PEAK_PCI_VENDOR_ID, PEAK_PCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, .driver_data = (kernel_ulong_t)"PCAN-PCI", }, { PEAK_PCI_VENDOR_ID, PEAK_PCIE_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, .driver_data = (kernel_ulong_t)"PCAN-PCI Express", }, { PEAK_PCI_VENDOR_ID, PEAK_MPCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, .driver_data = (kernel_ulong_t)"PCAN-miniPCI", }, { PEAK_PCI_VENDOR_ID, PEAK_MPCIE_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, .driver_data = (kernel_ulong_t)"PCAN-miniPCIe", }, { PEAK_PCI_VENDOR_ID, PEAK_PC_104P_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, .driver_data = (kernel_ulong_t)"PCAN-PC/104-Plus Quad", }, { PEAK_PCI_VENDOR_ID, PEAK_PCI_104E_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, .driver_data = (kernel_ulong_t)"PCAN-PCI/104-Express", }, { PEAK_PCI_VENDOR_ID, PEAK_CPCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, .driver_data = (kernel_ulong_t)"PCAN-cPCI", }, { PEAK_PCI_VENDOR_ID, PEAK_PCIE_OEM_ID, PCI_ANY_ID, PCI_ANY_ID, .driver_data = (kernel_ulong_t)"PCAN-Chip PCIe", }, #ifdef CONFIG_CAN_PEAK_PCIEC { PEAK_PCI_VENDOR_ID, PEAK_PCIEC_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, .driver_data = (kernel_ulong_t)"PCAN-ExpressCard", }, { PEAK_PCI_VENDOR_ID, PEAK_PCIEC34_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, .driver_data = (kernel_ulong_t)"PCAN-ExpressCard 34", }, #endif { /* sentinel */ } }; MODULE_DEVICE_TABLE(pci, peak_pci_tbl); #ifdef CONFIG_CAN_PEAK_PCIEC /* PCAN-ExpressCard needs I2C bit-banging configuration option. */ /* GPIOICR byte access offsets */ #define PITA_GPOUT 0x18 /* GPx output value */ #define PITA_GPIN 0x19 /* GPx input value */ #define PITA_GPOEN 0x1A /* configure GPx as output pin */ /* I2C GP bits */ #define PITA_GPIN_SCL 0x01 /* Serial Clock Line */ #define PITA_GPIN_SDA 0x04 /* Serial DAta line */ #define PCA9553_1_SLAVEADDR (0xC4 >> 1) /* PCA9553 LS0 fields values */ enum { PCA9553_LOW, PCA9553_HIGHZ, PCA9553_PWM0, PCA9553_PWM1 }; /* LEDs control */ #define PCA9553_ON PCA9553_LOW #define PCA9553_OFF PCA9553_HIGHZ #define PCA9553_SLOW PCA9553_PWM0 #define PCA9553_FAST PCA9553_PWM1 #define PCA9553_LED(c) (1 << (c)) #define PCA9553_LED_STATE(s, c) ((s) << ((c) << 1)) #define PCA9553_LED_ON(c) PCA9553_LED_STATE(PCA9553_ON, c) #define PCA9553_LED_OFF(c) PCA9553_LED_STATE(PCA9553_OFF, c) #define PCA9553_LED_SLOW(c) PCA9553_LED_STATE(PCA9553_SLOW, c) #define PCA9553_LED_FAST(c) PCA9553_LED_STATE(PCA9553_FAST, c) #define PCA9553_LED_MASK(c) PCA9553_LED_STATE(0x03, c) #define PCA9553_LED_OFF_ALL (PCA9553_LED_OFF(0) | PCA9553_LED_OFF(1)) #define PCA9553_LS0_INIT 0x40 /* initial value (!= from 0x00) */ struct peak_pciec_chan { struct net_device *netdev; unsigned long prev_rx_bytes; unsigned long prev_tx_bytes; }; struct peak_pciec_card { void __iomem *cfg_base; /* Common for all channels */ void __iomem *reg_base; /* first channel base address */ u8 led_cache; /* leds state cache */ /* PCIExpressCard i2c data */ struct i2c_algo_bit_data i2c_bit; struct i2c_adapter led_chip; struct delayed_work led_work; /* led delayed work */ int chan_count; struct peak_pciec_chan channel[PEAK_PCI_CHAN_MAX]; }; /* "normal" pci register write callback is overloaded for leds control */ static void peak_pci_write_reg(const struct sja1000_priv *priv, int port, u8 val); static inline void pita_set_scl_highz(struct peak_pciec_card *card) { u8 gp_outen = readb(card->cfg_base + PITA_GPOEN) & ~PITA_GPIN_SCL; writeb(gp_outen, card->cfg_base + PITA_GPOEN); } static inline void pita_set_sda_highz(struct peak_pciec_card *card) { u8 gp_outen = readb(card->cfg_base + PITA_GPOEN) & ~PITA_GPIN_SDA; writeb(gp_outen, card->cfg_base + PITA_GPOEN); } static void peak_pciec_init_pita_gpio(struct peak_pciec_card *card) { /* raise SCL & SDA GPIOs to high-Z */ pita_set_scl_highz(card); pita_set_sda_highz(card); } static void pita_setsda(void *data, int state) { struct peak_pciec_card *card = (struct peak_pciec_card *)data; u8 gp_out, gp_outen; /* set output sda always to 0 */ gp_out = readb(card->cfg_base + PITA_GPOUT) & ~PITA_GPIN_SDA; writeb(gp_out, card->cfg_base + PITA_GPOUT); /* control output sda with GPOEN */ gp_outen = readb(card->cfg_base + PITA_GPOEN); if (state) gp_outen &= ~PITA_GPIN_SDA; else gp_outen |= PITA_GPIN_SDA; writeb(gp_outen, card->cfg_base + PITA_GPOEN); } static void pita_setscl(void *data, int state) { struct peak_pciec_card *card = (struct peak_pciec_card *)data; u8 gp_out, gp_outen; /* set output scl always to 0 */ gp_out = readb(card->cfg_base + PITA_GPOUT) & ~PITA_GPIN_SCL; writeb(gp_out, card->cfg_base + PITA_GPOUT); /* control output scl with GPOEN */ gp_outen = readb(card->cfg_base + PITA_GPOEN); if (state) gp_outen &= ~PITA_GPIN_SCL; else gp_outen |= PITA_GPIN_SCL; writeb(gp_outen, card->cfg_base + PITA_GPOEN); } static int pita_getsda(void *data) { struct peak_pciec_card *card = (struct peak_pciec_card *)data; /* set tristate */ pita_set_sda_highz(card); return (readb(card->cfg_base + PITA_GPIN) & PITA_GPIN_SDA) ? 1 : 0; } static int pita_getscl(void *data) { struct peak_pciec_card *card = (struct peak_pciec_card *)data; /* set tristate */ pita_set_scl_highz(card); return (readb(card->cfg_base + PITA_GPIN) & PITA_GPIN_SCL) ? 1 : 0; } /* write commands to the LED chip though the I2C-bus of the PCAN-PCIeC */ static int peak_pciec_write_pca9553(struct peak_pciec_card *card, u8 offset, u8 data) { u8 buffer[2] = { offset, data }; struct i2c_msg msg = { .addr = PCA9553_1_SLAVEADDR, .len = 2, .buf = buffer, }; int ret; /* cache led mask */ if (offset == 5 && data == card->led_cache) return 0; ret = i2c_transfer(&card->led_chip, &msg, 1); if (ret < 0) return ret; if (offset == 5) card->led_cache = data; return 0; } /* delayed work callback used to control the LEDs */ static void peak_pciec_led_work(struct work_struct *work) { struct peak_pciec_card *card = container_of(work, struct peak_pciec_card, led_work.work); struct net_device *netdev; u8 new_led = card->led_cache; int i, up_count = 0; /* first check what is to do */ for (i = 0; i < card->chan_count; i++) { /* default is: not configured */ new_led &= ~PCA9553_LED_MASK(i); new_led |= PCA9553_LED_ON(i); netdev = card->channel[i].netdev; if (!netdev || !(netdev->flags & IFF_UP)) continue; up_count++; /* no activity (but configured) */ new_led &= ~PCA9553_LED_MASK(i); new_led |= PCA9553_LED_SLOW(i); /* if bytes counters changed, set fast blinking led */ if (netdev->stats.rx_bytes != card->channel[i].prev_rx_bytes) { card->channel[i].prev_rx_bytes = netdev->stats.rx_bytes; new_led &= ~PCA9553_LED_MASK(i); new_led |= PCA9553_LED_FAST(i); } if (netdev->stats.tx_bytes != card->channel[i].prev_tx_bytes) { card->channel[i].prev_tx_bytes = netdev->stats.tx_bytes; new_led &= ~PCA9553_LED_MASK(i); new_led |= PCA9553_LED_FAST(i); } } /* check if LS0 settings changed, only update i2c if so */ peak_pciec_write_pca9553(card, 5, new_led); /* restart timer (except if no more configured channels) */ if (up_count) schedule_delayed_work(&card->led_work, HZ); } /* set LEDs blinking state */ static void peak_pciec_set_leds(struct peak_pciec_card *card, u8 led_mask, u8 s) { u8 new_led = card->led_cache; int i; /* first check what is to do */ for (i = 0; i < card->chan_count; i++) if (led_mask & PCA9553_LED(i)) { new_led &= ~PCA9553_LED_MASK(i); new_led |= PCA9553_LED_STATE(s, i); } /* check if LS0 settings changed, only update i2c if so */ peak_pciec_write_pca9553(card, 5, new_led); } /* start one second delayed work to control LEDs */ static void peak_pciec_start_led_work(struct peak_pciec_card *card) { schedule_delayed_work(&card->led_work, HZ); } /* stop LEDs delayed work */ static void peak_pciec_stop_led_work(struct peak_pciec_card *card) { cancel_delayed_work_sync(&card->led_work); } /* initialize the PCA9553 4-bit I2C-bus LED chip */ static int peak_pciec_init_leds(struct peak_pciec_card *card) { int err; /* prescaler for frequency 0: "SLOW" = 1 Hz = "44" */ err = peak_pciec_write_pca9553(card, 1, 44 / 1); if (err) return err; /* duty cycle 0: 50% */ err = peak_pciec_write_pca9553(card, 2, 0x80); if (err) return err; /* prescaler for frequency 1: "FAST" = 5 Hz */ err = peak_pciec_write_pca9553(card, 3, 44 / 5); if (err) return err; /* duty cycle 1: 50% */ err = peak_pciec_write_pca9553(card, 4, 0x80); if (err) return err; /* switch LEDs to initial state */ return peak_pciec_write_pca9553(card, 5, PCA9553_LS0_INIT); } /* restore LEDs state to off peak_pciec_leds_exit */ static void peak_pciec_leds_exit(struct peak_pciec_card *card) { /* switch LEDs to off */ peak_pciec_write_pca9553(card, 5, PCA9553_LED_OFF_ALL); } /* normal write sja1000 register method overloaded to catch when controller * is started or stopped, to control leds */ static void peak_pciec_write_reg(const struct sja1000_priv *priv, int port, u8 val) { struct peak_pci_chan *chan = priv->priv; struct peak_pciec_card *card = chan->pciec_card; int c = (priv->reg_base - card->reg_base) / PEAK_PCI_CHAN_SIZE; /* sja1000 register changes control the leds state */ if (port == SJA1000_MOD) switch (val) { case MOD_RM: /* Reset Mode: set led on */ peak_pciec_set_leds(card, PCA9553_LED(c), PCA9553_ON); break; case 0x00: /* Normal Mode: led slow blinking and start led timer */ peak_pciec_set_leds(card, PCA9553_LED(c), PCA9553_SLOW); peak_pciec_start_led_work(card); break; default: break; } /* call base function */ peak_pci_write_reg(priv, port, val); } static const struct i2c_algo_bit_data peak_pciec_i2c_bit_ops = { .setsda = pita_setsda, .setscl = pita_setscl, .getsda = pita_getsda, .getscl = pita_getscl, .udelay = 10, .timeout = HZ, }; static int peak_pciec_probe(struct pci_dev *pdev, struct net_device *dev) { struct sja1000_priv *priv = netdev_priv(dev); struct peak_pci_chan *chan = priv->priv; struct peak_pciec_card *card; int err; /* copy i2c object address from 1st channel */ if (chan->prev_dev) { struct sja1000_priv *prev_priv = netdev_priv(chan->prev_dev); struct peak_pci_chan *prev_chan = prev_priv->priv; card = prev_chan->pciec_card; if (!card) return -ENODEV; /* channel is the first one: do the init part */ } else { /* create the bit banging I2C adapter structure */ card = kzalloc(sizeof(*card), GFP_KERNEL); if (!card) return -ENOMEM; card->cfg_base = chan->cfg_base; card->reg_base = priv->reg_base; card->led_chip.owner = THIS_MODULE; card->led_chip.dev.parent = &pdev->dev; card->led_chip.algo_data = &card->i2c_bit; strncpy(card->led_chip.name, "peak_i2c", sizeof(card->led_chip.name)); card->i2c_bit = peak_pciec_i2c_bit_ops; card->i2c_bit.udelay = 10; card->i2c_bit.timeout = HZ; card->i2c_bit.data = card; peak_pciec_init_pita_gpio(card); err = i2c_bit_add_bus(&card->led_chip); if (err) { dev_err(&pdev->dev, "i2c init failed\n"); goto pciec_init_err_1; } err = peak_pciec_init_leds(card); if (err) { dev_err(&pdev->dev, "leds hardware init failed\n"); goto pciec_init_err_2; } INIT_DELAYED_WORK(&card->led_work, peak_pciec_led_work); /* PCAN-ExpressCard needs its own callback for leds */ priv->write_reg = peak_pciec_write_reg; } chan->pciec_card = card; card->channel[card->chan_count++].netdev = dev; return 0; pciec_init_err_2: i2c_del_adapter(&card->led_chip); pciec_init_err_1: peak_pciec_init_pita_gpio(card); kfree(card); return err; } static void peak_pciec_remove(struct peak_pciec_card *card) { peak_pciec_stop_led_work(card); peak_pciec_leds_exit(card); i2c_del_adapter(&card->led_chip); peak_pciec_init_pita_gpio(card); kfree(card); } #else /* CONFIG_CAN_PEAK_PCIEC */ /* Placebo functions when PCAN-ExpressCard support is not selected */ static inline int peak_pciec_probe(struct pci_dev *pdev, struct net_device *dev) { return -ENODEV; } static inline void peak_pciec_remove(struct peak_pciec_card *card) { } #endif /* CONFIG_CAN_PEAK_PCIEC */ static u8 peak_pci_read_reg(const struct sja1000_priv *priv, int port) { return readb(priv->reg_base + (port << 2)); } static void peak_pci_write_reg(const struct sja1000_priv *priv, int port, u8 val) { writeb(val, priv->reg_base + (port << 2)); } static void peak_pci_post_irq(const struct sja1000_priv *priv) { struct peak_pci_chan *chan = priv->priv; u16 icr; /* Select and clear in PITA stored interrupt */ icr = readw(chan->cfg_base + PITA_ICR); if (icr & chan->icr_mask) writew(chan->icr_mask, chan->cfg_base + PITA_ICR); } static int peak_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct sja1000_priv *priv; struct peak_pci_chan *chan; struct net_device *dev, *prev_dev; void __iomem *cfg_base, *reg_base; u16 sub_sys_id, icr; int i, err, channels; char fw_str[14] = ""; err = pci_enable_device(pdev); if (err) return err; err = pci_request_regions(pdev, DRV_NAME); if (err) goto failure_disable_pci; err = pci_read_config_word(pdev, 0x2e, &sub_sys_id); if (err) goto failure_release_regions; dev_dbg(&pdev->dev, "probing device %04x:%04x:%04x\n", pdev->vendor, pdev->device, sub_sys_id); err = pci_write_config_word(pdev, 0x44, 0); if (err) goto failure_release_regions; if (sub_sys_id >= 12) channels = 4; else if (sub_sys_id >= 10) channels = 3; else if (sub_sys_id >= 4) channels = 2; else channels = 1; cfg_base = pci_iomap(pdev, 0, PEAK_PCI_CFG_SIZE); if (!cfg_base) { dev_err(&pdev->dev, "failed to map PCI resource #0\n"); err = -ENOMEM; goto failure_release_regions; } reg_base = pci_iomap(pdev, 1, PEAK_PCI_CHAN_SIZE * channels); if (!reg_base) { dev_err(&pdev->dev, "failed to map PCI resource #1\n"); err = -ENOMEM; goto failure_unmap_cfg_base; } /* Set GPIO control register */ writew(0x0005, cfg_base + PITA_GPIOICR + 2); /* Enable all channels of this card */ writeb(0x00, cfg_base + PITA_GPIOICR); /* Toggle reset */ writeb(0x05, cfg_base + PITA_MISC + 3); usleep_range(5000, 6000); /* Leave parport mux mode */ writeb(0x04, cfg_base + PITA_MISC + 3); /* FPGA equipped card if not 0 */ if (readl(cfg_base + PEAK_VER_REG1)) { /* FPGA card: display version of the running firmware */ u32 fw_ver = readl(cfg_base + PEAK_VER_REG2); snprintf(fw_str, sizeof(fw_str), " FW v%u.%u.%u", (fw_ver >> 12) & 0xf, (fw_ver >> 8) & 0xf, (fw_ver >> 4) & 0xf); } /* Display commercial name (and, eventually, FW version) of the card */ dev_info(&pdev->dev, "%ux CAN %s%s\n", channels, (const char *)ent->driver_data, fw_str); icr = readw(cfg_base + PITA_ICR + 2); for (i = 0; i < channels; i++) { dev = alloc_sja1000dev(sizeof(struct peak_pci_chan)); if (!dev) { err = -ENOMEM; goto failure_remove_channels; } priv = netdev_priv(dev); chan = priv->priv; chan->cfg_base = cfg_base; priv->reg_base = reg_base + i * PEAK_PCI_CHAN_SIZE; priv->read_reg = peak_pci_read_reg; priv->write_reg = peak_pci_write_reg; priv->post_irq = peak_pci_post_irq; priv->can.clock.freq = PEAK_PCI_CAN_CLOCK; priv->ocr = PEAK_PCI_OCR; priv->cdr = PEAK_PCI_CDR; /* Neither a slave nor a single device distributes the clock */ if (channels == 1 || i > 0) priv->cdr |= CDR_CLK_OFF; /* Setup interrupt handling */ priv->irq_flags = IRQF_SHARED; dev->irq = pdev->irq; chan->icr_mask = peak_pci_icr_masks[i]; icr |= chan->icr_mask; SET_NETDEV_DEV(dev, &pdev->dev); dev->dev_id = i; /* Create chain of SJA1000 devices */ chan->prev_dev = pci_get_drvdata(pdev); pci_set_drvdata(pdev, dev); /* PCAN-ExpressCard needs some additional i2c init. * This must be done *before* register_sja1000dev() but * *after* devices linkage */ if (pdev->device == PEAK_PCIEC_DEVICE_ID || pdev->device == PEAK_PCIEC34_DEVICE_ID) { err = peak_pciec_probe(pdev, dev); if (err) { dev_err(&pdev->dev, "failed to probe device (err %d)\n", err); goto failure_free_dev; } } err = register_sja1000dev(dev); if (err) { dev_err(&pdev->dev, "failed to register device\n"); goto failure_free_dev; } dev_info(&pdev->dev, "%s at reg_base=0x%p cfg_base=0x%p irq=%d\n", dev->name, priv->reg_base, chan->cfg_base, dev->irq); } /* Enable interrupts */ writew(icr, cfg_base + PITA_ICR + 2); return 0; failure_free_dev: pci_set_drvdata(pdev, chan->prev_dev); free_sja1000dev(dev); failure_remove_channels: /* Disable interrupts */ writew(0x0, cfg_base + PITA_ICR + 2); chan = NULL; for (dev = pci_get_drvdata(pdev); dev; dev = prev_dev) { priv = netdev_priv(dev); chan = priv->priv; prev_dev = chan->prev_dev; unregister_sja1000dev(dev); free_sja1000dev(dev); } /* free any PCIeC resources too */ if (chan && chan->pciec_card) peak_pciec_remove(chan->pciec_card); pci_iounmap(pdev, reg_base); failure_unmap_cfg_base: pci_iounmap(pdev, cfg_base); failure_release_regions: pci_release_regions(pdev); failure_disable_pci: pci_disable_device(pdev); /* pci_xxx_config_word() return positive PCIBIOS_xxx error codes while * the probe() function must return a negative errno in case of failure * (err is unchanged if negative) */ return pcibios_err_to_errno(err); } static void peak_pci_remove(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); /* Last device */ struct sja1000_priv *priv = netdev_priv(dev); struct peak_pci_chan *chan = priv->priv; void __iomem *cfg_base = chan->cfg_base; void __iomem *reg_base = priv->reg_base; /* Disable interrupts */ writew(0x0, cfg_base + PITA_ICR + 2); /* Loop over all registered devices */ while (1) { struct net_device *prev_dev = chan->prev_dev; dev_info(&pdev->dev, "removing device %s\n", dev->name); /* do that only for first channel */ if (!prev_dev && chan->pciec_card) peak_pciec_remove(chan->pciec_card); unregister_sja1000dev(dev); free_sja1000dev(dev); dev = prev_dev; if (!dev) break; priv = netdev_priv(dev); chan = priv->priv; } pci_iounmap(pdev, reg_base); pci_iounmap(pdev, cfg_base); pci_release_regions(pdev); pci_disable_device(pdev); } static struct pci_driver peak_pci_driver = { .name = DRV_NAME, .id_table = peak_pci_tbl, .probe = peak_pci_probe, .remove = peak_pci_remove, }; module_pci_driver(peak_pci_driver);
linux-master
drivers/net/can/sja1000/peak_pci.c
// SPDX-License-Identifier: GPL-2.0 /* Fintek F81601 PCIE to 2 CAN controller driver * * Copyright (C) 2019 Peter Hong <[email protected]> * Copyright (C) 2019 Linux Foundation */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/netdevice.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/pci.h> #include <linux/can/dev.h> #include <linux/io.h> #include "sja1000.h" #define F81601_PCI_MAX_CHAN 2 #define F81601_DECODE_REG 0x209 #define F81601_IO_MODE BIT(7) #define F81601_MEM_MODE BIT(6) #define F81601_CFG_MODE BIT(5) #define F81601_CAN2_INTERNAL_CLK BIT(3) #define F81601_CAN1_INTERNAL_CLK BIT(2) #define F81601_CAN2_EN BIT(1) #define F81601_CAN1_EN BIT(0) #define F81601_TRAP_REG 0x20a #define F81601_CAN2_HAS_EN BIT(4) struct f81601_pci_card { void __iomem *addr; spinlock_t lock; /* use this spin lock only for write access */ struct pci_dev *dev; struct net_device *net_dev[F81601_PCI_MAX_CHAN]; }; static const struct pci_device_id f81601_pci_tbl[] = { { PCI_DEVICE(0x1c29, 0x1703) }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(pci, f81601_pci_tbl); static bool internal_clk = true; module_param(internal_clk, bool, 0444); MODULE_PARM_DESC(internal_clk, "Use internal clock, default true (24MHz)"); static unsigned int external_clk; module_param(external_clk, uint, 0444); MODULE_PARM_DESC(external_clk, "External clock when internal_clk disabled"); static u8 f81601_pci_read_reg(const struct sja1000_priv *priv, int port) { return readb(priv->reg_base + port); } static void f81601_pci_write_reg(const struct sja1000_priv *priv, int port, u8 val) { struct f81601_pci_card *card = priv->priv; unsigned long flags; spin_lock_irqsave(&card->lock, flags); writeb(val, priv->reg_base + port); readb(priv->reg_base); spin_unlock_irqrestore(&card->lock, flags); } static void f81601_pci_remove(struct pci_dev *pdev) { struct f81601_pci_card *card = pci_get_drvdata(pdev); struct net_device *dev; int i; for (i = 0; i < ARRAY_SIZE(card->net_dev); i++) { dev = card->net_dev[i]; if (!dev) continue; dev_info(&pdev->dev, "%s: Removing %s\n", __func__, dev->name); unregister_sja1000dev(dev); free_sja1000dev(dev); } } /* Probe F81601 based device for the SJA1000 chips and register each * available CAN channel to SJA1000 Socket-CAN subsystem. */ static int f81601_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct sja1000_priv *priv; struct net_device *dev; struct f81601_pci_card *card; int err, i, count; u8 tmp; if (pcim_enable_device(pdev) < 0) { dev_err(&pdev->dev, "Failed to enable PCI device\n"); return -ENODEV; } dev_info(&pdev->dev, "Detected card at slot #%i\n", PCI_SLOT(pdev->devfn)); card = devm_kzalloc(&pdev->dev, sizeof(*card), GFP_KERNEL); if (!card) return -ENOMEM; card->dev = pdev; spin_lock_init(&card->lock); pci_set_drvdata(pdev, card); tmp = F81601_IO_MODE | F81601_MEM_MODE | F81601_CFG_MODE | F81601_CAN2_EN | F81601_CAN1_EN; if (internal_clk) { tmp |= F81601_CAN2_INTERNAL_CLK | F81601_CAN1_INTERNAL_CLK; dev_info(&pdev->dev, "F81601 running with internal clock: 24Mhz\n"); } else { dev_info(&pdev->dev, "F81601 running with external clock: %dMhz\n", external_clk / 1000000); } pci_write_config_byte(pdev, F81601_DECODE_REG, tmp); card->addr = pcim_iomap(pdev, 0, pci_resource_len(pdev, 0)); if (!card->addr) { err = -ENOMEM; dev_err(&pdev->dev, "%s: Failed to remap BAR\n", __func__); goto failure_cleanup; } /* read CAN2_HW_EN strap pin to detect how many CANBUS do we have */ count = ARRAY_SIZE(card->net_dev); pci_read_config_byte(pdev, F81601_TRAP_REG, &tmp); if (!(tmp & F81601_CAN2_HAS_EN)) count = 1; for (i = 0; i < count; i++) { dev = alloc_sja1000dev(0); if (!dev) { err = -ENOMEM; goto failure_cleanup; } priv = netdev_priv(dev); priv->priv = card; priv->irq_flags = IRQF_SHARED; priv->reg_base = card->addr + 0x80 * i; priv->read_reg = f81601_pci_read_reg; priv->write_reg = f81601_pci_write_reg; if (internal_clk) priv->can.clock.freq = 24000000 / 2; else priv->can.clock.freq = external_clk / 2; priv->ocr = OCR_TX0_PUSHPULL | OCR_TX1_PUSHPULL; priv->cdr = CDR_CBP; SET_NETDEV_DEV(dev, &pdev->dev); dev->dev_id = i; dev->irq = pdev->irq; /* Register SJA1000 device */ err = register_sja1000dev(dev); if (err) { dev_err(&pdev->dev, "%s: Registering device failed: %x\n", __func__, err); free_sja1000dev(dev); goto failure_cleanup; } card->net_dev[i] = dev; dev_info(&pdev->dev, "Channel #%d, %s at 0x%p, irq %d\n", i, dev->name, priv->reg_base, dev->irq); } return 0; failure_cleanup: dev_err(&pdev->dev, "%s: failed: %d. Cleaning Up.\n", __func__, err); f81601_pci_remove(pdev); return err; } static struct pci_driver f81601_pci_driver = { .name = "f81601", .id_table = f81601_pci_tbl, .probe = f81601_pci_probe, .remove = f81601_pci_remove, }; MODULE_DESCRIPTION("Fintek F81601 PCIE to 2 CANBUS adaptor driver"); MODULE_AUTHOR("Peter Hong <[email protected]>"); MODULE_LICENSE("GPL v2"); module_pci_driver(f81601_pci_driver);
linux-master
drivers/net/can/sja1000/f81601.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2007 Wolfgang Grandegger <[email protected]> * Copyright (C) 2008 Markus Plessing <[email protected]> * Copyright (C) 2008 Sebastian Haas <[email protected]> * Copyright (C) 2023 EMS Dr. Thomas Wuensche */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/netdevice.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/pci.h> #include <linux/can/dev.h> #include <linux/io.h> #include "sja1000.h" #define DRV_NAME "ems_pci" MODULE_AUTHOR("Sebastian Haas <[email protected]>"); MODULE_AUTHOR("Gerhard Uttenthaler <[email protected]>"); MODULE_DESCRIPTION("Socket-CAN driver for EMS CPC-PCI/PCIe/104P CAN cards"); MODULE_LICENSE("GPL v2"); #define EMS_PCI_V1_MAX_CHAN 2 #define EMS_PCI_V2_MAX_CHAN 4 #define EMS_PCI_V3_MAX_CHAN 4 #define EMS_PCI_MAX_CHAN EMS_PCI_V2_MAX_CHAN struct ems_pci_card { int version; int channels; struct pci_dev *pci_dev; struct net_device *net_dev[EMS_PCI_MAX_CHAN]; void __iomem *conf_addr; void __iomem *base_addr; }; #define EMS_PCI_CAN_CLOCK (16000000 / 2) /* Register definitions and descriptions are from LinCAN 0.3.3. * * PSB4610 PITA-2 bridge control registers */ #define PITA2_ICR 0x00 /* Interrupt Control Register */ #define PITA2_ICR_INT0 0x00000002 /* [RC] INT0 Active/Clear */ #define PITA2_ICR_INT0_EN 0x00020000 /* [RW] Enable INT0 */ #define PITA2_MISC 0x1c /* Miscellaneous Register */ #define PITA2_MISC_CONFIG 0x04000000 /* Multiplexed parallel interface */ /* Register definitions for the PLX 9030 */ #define PLX_ICSR 0x4c /* Interrupt Control/Status register */ #define PLX_ICSR_LINTI1_ENA 0x0001 /* LINTi1 Enable */ #define PLX_ICSR_PCIINT_ENA 0x0040 /* PCI Interrupt Enable */ #define PLX_ICSR_LINTI1_CLR 0x0400 /* Local Edge Triggerable Interrupt Clear */ #define PLX_ICSR_ENA_CLR (PLX_ICSR_LINTI1_ENA | PLX_ICSR_PCIINT_ENA | \ PLX_ICSR_LINTI1_CLR) /* Register definitions for the ASIX99100 */ #define ASIX_LINTSR 0x28 /* Interrupt Control/Status register */ #define ASIX_LINTSR_INT0AC BIT(0) /* Writing 1 enables or clears interrupt */ #define ASIX_LIEMR 0x24 /* Local Interrupt Enable / Miscellaneous Register */ #define ASIX_LIEMR_L0EINTEN BIT(16) /* Local INT0 input assertion enable */ #define ASIX_LIEMR_LRST BIT(14) /* Local Reset assert */ /* The board configuration is probably following: * RX1 is connected to ground. * TX1 is not connected. * CLKO is not connected. * Setting the OCR register to 0xDA is a good idea. * This means normal output mode, push-pull and the correct polarity. */ #define EMS_PCI_OCR (OCR_TX0_PUSHPULL | OCR_TX1_PUSHPULL) /* In the CDR register, you should set CBP to 1. * You will probably also want to set the clock divider value to 7 * (meaning direct oscillator output) because the second SJA1000 chip * is driven by the first one CLKOUT output. */ #define EMS_PCI_CDR (CDR_CBP | CDR_CLKOUT_MASK) #define EMS_PCI_V1_BASE_BAR 1 #define EMS_PCI_V1_CONF_BAR 0 #define EMS_PCI_V1_CONF_SIZE 4096 /* size of PITA control area */ #define EMS_PCI_V1_CAN_BASE_OFFSET 0x400 /* offset where the controllers start */ #define EMS_PCI_V1_CAN_CTRL_SIZE 0x200 /* memory size for each controller */ #define EMS_PCI_V2_BASE_BAR 2 #define EMS_PCI_V2_CONF_BAR 0 #define EMS_PCI_V2_CONF_SIZE 128 /* size of PLX control area */ #define EMS_PCI_V2_CAN_BASE_OFFSET 0x400 /* offset where the controllers start */ #define EMS_PCI_V2_CAN_CTRL_SIZE 0x200 /* memory size for each controller */ #define EMS_PCI_V3_BASE_BAR 0 #define EMS_PCI_V3_CONF_BAR 5 #define EMS_PCI_V3_CONF_SIZE 128 /* size of ASIX control area */ #define EMS_PCI_V3_CAN_BASE_OFFSET 0x00 /* offset where the controllers starts */ #define EMS_PCI_V3_CAN_CTRL_SIZE 0x100 /* memory size for each controller */ #define EMS_PCI_BASE_SIZE 4096 /* size of controller area */ #define PCI_SUBDEVICE_ID_EMS 0x4010 static const struct pci_device_id ems_pci_tbl[] = { /* CPC-PCI v1 */ {PCI_VENDOR_ID_SIEMENS, 0x2104, PCI_ANY_ID, PCI_ANY_ID,}, /* CPC-PCI v2 */ {PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, PCI_VENDOR_ID_PLX, 0x4000}, /* CPC-104P v2 */ {PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, PCI_VENDOR_ID_PLX, 0x4002}, /* CPC-PCIe v3 */ {PCI_VENDOR_ID_ASIX, PCI_DEVICE_ID_ASIX_AX99100_LB, 0xa000, PCI_SUBDEVICE_ID_EMS}, {0,} }; MODULE_DEVICE_TABLE(pci, ems_pci_tbl); /* Helper to read internal registers from card logic (not CAN) */ static u8 ems_pci_v1_readb(struct ems_pci_card *card, unsigned int port) { return readb(card->base_addr + (port * 4)); } static u8 ems_pci_v1_read_reg(const struct sja1000_priv *priv, int port) { return readb(priv->reg_base + (port * 4)); } static void ems_pci_v1_write_reg(const struct sja1000_priv *priv, int port, u8 val) { writeb(val, priv->reg_base + (port * 4)); } static void ems_pci_v1_post_irq(const struct sja1000_priv *priv) { struct ems_pci_card *card = priv->priv; /* reset int flag of pita */ writel(PITA2_ICR_INT0_EN | PITA2_ICR_INT0, card->conf_addr + PITA2_ICR); } static u8 ems_pci_v2_read_reg(const struct sja1000_priv *priv, int port) { return readb(priv->reg_base + port); } static void ems_pci_v2_write_reg(const struct sja1000_priv *priv, int port, u8 val) { writeb(val, priv->reg_base + port); } static void ems_pci_v2_post_irq(const struct sja1000_priv *priv) { struct ems_pci_card *card = priv->priv; writel(PLX_ICSR_ENA_CLR, card->conf_addr + PLX_ICSR); } static u8 ems_pci_v3_read_reg(const struct sja1000_priv *priv, int port) { return readb(priv->reg_base + port); } static void ems_pci_v3_write_reg(const struct sja1000_priv *priv, int port, u8 val) { writeb(val, priv->reg_base + port); } static void ems_pci_v3_post_irq(const struct sja1000_priv *priv) { struct ems_pci_card *card = priv->priv; writel(ASIX_LINTSR_INT0AC, card->conf_addr + ASIX_LINTSR); } /* Check if a CAN controller is present at the specified location * by trying to set 'em into the PeliCAN mode */ static inline int ems_pci_check_chan(const struct sja1000_priv *priv) { unsigned char res; /* Make sure SJA1000 is in reset mode */ priv->write_reg(priv, SJA1000_MOD, 1); priv->write_reg(priv, SJA1000_CDR, CDR_PELICAN); /* read reset-values */ res = priv->read_reg(priv, SJA1000_CDR); if (res == CDR_PELICAN) return 1; return 0; } static void ems_pci_del_card(struct pci_dev *pdev) { struct ems_pci_card *card = pci_get_drvdata(pdev); struct net_device *dev; int i = 0; for (i = 0; i < card->channels; i++) { dev = card->net_dev[i]; if (!dev) continue; dev_info(&pdev->dev, "Removing %s.\n", dev->name); unregister_sja1000dev(dev); free_sja1000dev(dev); } if (card->base_addr) pci_iounmap(card->pci_dev, card->base_addr); if (card->conf_addr) pci_iounmap(card->pci_dev, card->conf_addr); kfree(card); pci_disable_device(pdev); } static void ems_pci_card_reset(struct ems_pci_card *card) { /* Request board reset */ writeb(0, card->base_addr); } /* Probe PCI device for EMS CAN signature and register each available * CAN channel to SJA1000 Socket-CAN subsystem. */ static int ems_pci_add_card(struct pci_dev *pdev, const struct pci_device_id *ent) { struct sja1000_priv *priv; struct net_device *dev; struct ems_pci_card *card; int max_chan, conf_size, base_bar, conf_bar; int err, i; /* Enabling PCI device */ if (pci_enable_device(pdev) < 0) { dev_err(&pdev->dev, "Enabling PCI device failed\n"); return -ENODEV; } /* Allocating card structures to hold addresses, ... */ card = kzalloc(sizeof(*card), GFP_KERNEL); if (!card) { pci_disable_device(pdev); return -ENOMEM; } pci_set_drvdata(pdev, card); card->pci_dev = pdev; card->channels = 0; if (pdev->vendor == PCI_VENDOR_ID_ASIX) { card->version = 3; /* CPC-PCI v3 */ max_chan = EMS_PCI_V3_MAX_CHAN; base_bar = EMS_PCI_V3_BASE_BAR; conf_bar = EMS_PCI_V3_CONF_BAR; conf_size = EMS_PCI_V3_CONF_SIZE; } else if (pdev->vendor == PCI_VENDOR_ID_PLX) { card->version = 2; /* CPC-PCI v2 */ max_chan = EMS_PCI_V2_MAX_CHAN; base_bar = EMS_PCI_V2_BASE_BAR; conf_bar = EMS_PCI_V2_CONF_BAR; conf_size = EMS_PCI_V2_CONF_SIZE; } else { card->version = 1; /* CPC-PCI v1 */ max_chan = EMS_PCI_V1_MAX_CHAN; base_bar = EMS_PCI_V1_BASE_BAR; conf_bar = EMS_PCI_V1_CONF_BAR; conf_size = EMS_PCI_V1_CONF_SIZE; } /* Remap configuration space and controller memory area */ card->conf_addr = pci_iomap(pdev, conf_bar, conf_size); if (!card->conf_addr) { err = -ENOMEM; goto failure_cleanup; } card->base_addr = pci_iomap(pdev, base_bar, EMS_PCI_BASE_SIZE); if (!card->base_addr) { err = -ENOMEM; goto failure_cleanup; } if (card->version == 1) { /* Configure PITA-2 parallel interface (enable MUX) */ writel(PITA2_MISC_CONFIG, card->conf_addr + PITA2_MISC); /* Check for unique EMS CAN signature */ if (ems_pci_v1_readb(card, 0) != 0x55 || ems_pci_v1_readb(card, 1) != 0xAA || ems_pci_v1_readb(card, 2) != 0x01 || ems_pci_v1_readb(card, 3) != 0xCB || ems_pci_v1_readb(card, 4) != 0x11) { dev_err(&pdev->dev, "Not EMS Dr. Thomas Wuensche interface\n"); err = -ENODEV; goto failure_cleanup; } } if (card->version == 3) { /* ASIX chip asserts local reset to CAN controllers * after bootup until it is deasserted */ writel(readl(card->conf_addr + ASIX_LIEMR) & ~ASIX_LIEMR_LRST, card->conf_addr + ASIX_LIEMR); } ems_pci_card_reset(card); /* Detect available channels */ for (i = 0; i < max_chan; i++) { dev = alloc_sja1000dev(0); if (!dev) { err = -ENOMEM; goto failure_cleanup; } card->net_dev[i] = dev; priv = netdev_priv(dev); priv->priv = card; priv->irq_flags = IRQF_SHARED; dev->irq = pdev->irq; if (card->version == 1) { priv->read_reg = ems_pci_v1_read_reg; priv->write_reg = ems_pci_v1_write_reg; priv->post_irq = ems_pci_v1_post_irq; priv->reg_base = card->base_addr + EMS_PCI_V1_CAN_BASE_OFFSET + (i * EMS_PCI_V1_CAN_CTRL_SIZE); } else if (card->version == 2) { priv->read_reg = ems_pci_v2_read_reg; priv->write_reg = ems_pci_v2_write_reg; priv->post_irq = ems_pci_v2_post_irq; priv->reg_base = card->base_addr + EMS_PCI_V2_CAN_BASE_OFFSET + (i * EMS_PCI_V2_CAN_CTRL_SIZE); } else { priv->read_reg = ems_pci_v3_read_reg; priv->write_reg = ems_pci_v3_write_reg; priv->post_irq = ems_pci_v3_post_irq; priv->reg_base = card->base_addr + EMS_PCI_V3_CAN_BASE_OFFSET + (i * EMS_PCI_V3_CAN_CTRL_SIZE); } /* Check if channel is present */ if (ems_pci_check_chan(priv)) { priv->can.clock.freq = EMS_PCI_CAN_CLOCK; priv->ocr = EMS_PCI_OCR; priv->cdr = EMS_PCI_CDR; SET_NETDEV_DEV(dev, &pdev->dev); dev->dev_id = i; if (card->version == 1) { /* reset int flag of pita */ writel(PITA2_ICR_INT0_EN | PITA2_ICR_INT0, card->conf_addr + PITA2_ICR); } else if (card->version == 2) { /* enable IRQ in PLX 9030 */ writel(PLX_ICSR_ENA_CLR, card->conf_addr + PLX_ICSR); } else { /* Enable IRQ in AX99100 */ writel(ASIX_LINTSR_INT0AC, card->conf_addr + ASIX_LINTSR); /* Enable local INT0 input enable */ writel(readl(card->conf_addr + ASIX_LIEMR) | ASIX_LIEMR_L0EINTEN, card->conf_addr + ASIX_LIEMR); } /* Register SJA1000 device */ err = register_sja1000dev(dev); if (err) { dev_err(&pdev->dev, "Registering device failed: %pe\n", ERR_PTR(err)); free_sja1000dev(dev); goto failure_cleanup; } card->channels++; dev_info(&pdev->dev, "Channel #%d at 0x%p, irq %d\n", i + 1, priv->reg_base, dev->irq); } else { free_sja1000dev(dev); } } return 0; failure_cleanup: dev_err(&pdev->dev, "Error: %d. Cleaning Up.\n", err); ems_pci_del_card(pdev); return err; } static struct pci_driver ems_pci_driver = { .name = DRV_NAME, .id_table = ems_pci_tbl, .probe = ems_pci_add_card, .remove = ems_pci_del_card, }; module_pci_driver(ems_pci_driver);
linux-master
drivers/net/can/sja1000/ems_pci.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2009 Wolfgang Grandegger <[email protected]> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/netdevice.h> #include <linux/delay.h> #include <linux/irq.h> #include <linux/io.h> #include <linux/can/dev.h> #include <linux/can/platform/sja1000.h> #include "sja1000.h" #define DRV_NAME "sja1000_isa" #define MAXDEV 8 MODULE_AUTHOR("Wolfgang Grandegger <[email protected]>"); MODULE_DESCRIPTION("Socket-CAN driver for SJA1000 on the ISA bus"); MODULE_LICENSE("GPL v2"); #define CLK_DEFAULT 16000000 /* 16 MHz */ #define CDR_DEFAULT (CDR_CBP | CDR_CLK_OFF) #define OCR_DEFAULT OCR_TX0_PUSHPULL static unsigned long port[MAXDEV]; static unsigned long mem[MAXDEV]; static int irq[MAXDEV]; static int clk[MAXDEV]; static unsigned char cdr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff}; static unsigned char ocr[MAXDEV] = {[0 ... (MAXDEV - 1)] = 0xff}; static int indirect[MAXDEV] = {[0 ... (MAXDEV - 1)] = -1}; static spinlock_t indirect_lock[MAXDEV]; /* lock for indirect access mode */ module_param_hw_array(port, ulong, ioport, NULL, 0444); MODULE_PARM_DESC(port, "I/O port number"); module_param_hw_array(mem, ulong, iomem, NULL, 0444); MODULE_PARM_DESC(mem, "I/O memory address"); module_param_hw_array(indirect, int, ioport, NULL, 0444); MODULE_PARM_DESC(indirect, "Indirect access via address and data port"); module_param_hw_array(irq, int, irq, NULL, 0444); MODULE_PARM_DESC(irq, "IRQ number"); module_param_array(clk, int, NULL, 0444); MODULE_PARM_DESC(clk, "External oscillator clock frequency " "(default=16000000 [16 MHz])"); module_param_array(cdr, byte, NULL, 0444); MODULE_PARM_DESC(cdr, "Clock divider register " "(default=0x48 [CDR_CBP | CDR_CLK_OFF])"); module_param_array(ocr, byte, NULL, 0444); MODULE_PARM_DESC(ocr, "Output control register " "(default=0x18 [OCR_TX0_PUSHPULL])"); #define SJA1000_IOSIZE 0x20 #define SJA1000_IOSIZE_INDIRECT 0x02 static struct platform_device *sja1000_isa_devs[MAXDEV]; static u8 sja1000_isa_mem_read_reg(const struct sja1000_priv *priv, int reg) { return readb(priv->reg_base + reg); } static void sja1000_isa_mem_write_reg(const struct sja1000_priv *priv, int reg, u8 val) { writeb(val, priv->reg_base + reg); } static u8 sja1000_isa_port_read_reg(const struct sja1000_priv *priv, int reg) { return inb((unsigned long)priv->reg_base + reg); } static void sja1000_isa_port_write_reg(const struct sja1000_priv *priv, int reg, u8 val) { outb(val, (unsigned long)priv->reg_base + reg); } static u8 sja1000_isa_port_read_reg_indirect(const struct sja1000_priv *priv, int reg) { unsigned long flags, base = (unsigned long)priv->reg_base; u8 readval; spin_lock_irqsave(&indirect_lock[priv->dev->dev_id], flags); outb(reg, base); readval = inb(base + 1); spin_unlock_irqrestore(&indirect_lock[priv->dev->dev_id], flags); return readval; } static void sja1000_isa_port_write_reg_indirect(const struct sja1000_priv *priv, int reg, u8 val) { unsigned long flags, base = (unsigned long)priv->reg_base; spin_lock_irqsave(&indirect_lock[priv->dev->dev_id], flags); outb(reg, base); outb(val, base + 1); spin_unlock_irqrestore(&indirect_lock[priv->dev->dev_id], flags); } static int sja1000_isa_probe(struct platform_device *pdev) { struct net_device *dev; struct sja1000_priv *priv; void __iomem *base = NULL; int iosize = SJA1000_IOSIZE; int idx = pdev->id; int err; dev_dbg(&pdev->dev, "probing idx=%d: port=%#lx, mem=%#lx, irq=%d\n", idx, port[idx], mem[idx], irq[idx]); if (mem[idx]) { if (!request_mem_region(mem[idx], iosize, DRV_NAME)) { err = -EBUSY; goto exit; } base = ioremap(mem[idx], iosize); if (!base) { err = -ENOMEM; goto exit_release; } } else { if (indirect[idx] > 0 || (indirect[idx] == -1 && indirect[0] > 0)) iosize = SJA1000_IOSIZE_INDIRECT; if (!request_region(port[idx], iosize, DRV_NAME)) { err = -EBUSY; goto exit; } } dev = alloc_sja1000dev(0); if (!dev) { err = -ENOMEM; goto exit_unmap; } priv = netdev_priv(dev); dev->irq = irq[idx]; priv->irq_flags = IRQF_SHARED; if (mem[idx]) { priv->reg_base = base; dev->base_addr = mem[idx]; priv->read_reg = sja1000_isa_mem_read_reg; priv->write_reg = sja1000_isa_mem_write_reg; } else { priv->reg_base = (void __iomem *)port[idx]; dev->base_addr = port[idx]; if (iosize == SJA1000_IOSIZE_INDIRECT) { priv->read_reg = sja1000_isa_port_read_reg_indirect; priv->write_reg = sja1000_isa_port_write_reg_indirect; spin_lock_init(&indirect_lock[idx]); } else { priv->read_reg = sja1000_isa_port_read_reg; priv->write_reg = sja1000_isa_port_write_reg; } } if (clk[idx]) priv->can.clock.freq = clk[idx] / 2; else if (clk[0]) priv->can.clock.freq = clk[0] / 2; else priv->can.clock.freq = CLK_DEFAULT / 2; if (ocr[idx] != 0xff) priv->ocr = ocr[idx]; else if (ocr[0] != 0xff) priv->ocr = ocr[0]; else priv->ocr = OCR_DEFAULT; if (cdr[idx] != 0xff) priv->cdr = cdr[idx]; else if (cdr[0] != 0xff) priv->cdr = cdr[0]; else priv->cdr = CDR_DEFAULT; platform_set_drvdata(pdev, dev); SET_NETDEV_DEV(dev, &pdev->dev); dev->dev_id = idx; err = register_sja1000dev(dev); if (err) { dev_err(&pdev->dev, "registering %s failed (err=%d)\n", DRV_NAME, err); goto exit_free; } dev_info(&pdev->dev, "%s device registered (reg_base=0x%p, irq=%d)\n", DRV_NAME, priv->reg_base, dev->irq); return 0; exit_free: free_sja1000dev(dev); exit_unmap: if (mem[idx]) iounmap(base); exit_release: if (mem[idx]) release_mem_region(mem[idx], iosize); else release_region(port[idx], iosize); exit: return err; } static void sja1000_isa_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct sja1000_priv *priv = netdev_priv(dev); int idx = pdev->id; unregister_sja1000dev(dev); if (mem[idx]) { iounmap(priv->reg_base); release_mem_region(mem[idx], SJA1000_IOSIZE); } else { if (priv->read_reg == sja1000_isa_port_read_reg_indirect) release_region(port[idx], SJA1000_IOSIZE_INDIRECT); else release_region(port[idx], SJA1000_IOSIZE); } free_sja1000dev(dev); } static struct platform_driver sja1000_isa_driver = { .probe = sja1000_isa_probe, .remove_new = sja1000_isa_remove, .driver = { .name = DRV_NAME, }, }; static int __init sja1000_isa_init(void) { int idx, err; for (idx = 0; idx < MAXDEV; idx++) { if ((port[idx] || mem[idx]) && irq[idx]) { sja1000_isa_devs[idx] = platform_device_alloc(DRV_NAME, idx); if (!sja1000_isa_devs[idx]) { err = -ENOMEM; goto exit_free_devices; } err = platform_device_add(sja1000_isa_devs[idx]); if (err) { platform_device_put(sja1000_isa_devs[idx]); goto exit_free_devices; } pr_debug("%s: platform device %d: port=%#lx, mem=%#lx, " "irq=%d\n", DRV_NAME, idx, port[idx], mem[idx], irq[idx]); } else if (idx == 0 || port[idx] || mem[idx]) { pr_err("%s: insufficient parameters supplied\n", DRV_NAME); err = -EINVAL; goto exit_free_devices; } } err = platform_driver_register(&sja1000_isa_driver); if (err) goto exit_free_devices; pr_info("Legacy %s driver for max. %d devices registered\n", DRV_NAME, MAXDEV); return 0; exit_free_devices: while (--idx >= 0) { if (sja1000_isa_devs[idx]) platform_device_unregister(sja1000_isa_devs[idx]); } return err; } static void __exit sja1000_isa_exit(void) { int idx; platform_driver_unregister(&sja1000_isa_driver); for (idx = 0; idx < MAXDEV; idx++) { if (sja1000_isa_devs[idx]) platform_device_unregister(sja1000_isa_devs[idx]); } } module_init(sja1000_isa_init); module_exit(sja1000_isa_exit);
linux-master
drivers/net/can/sja1000/sja1000_isa.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2008-2010 Pavel Cheblakov <[email protected]> * * Derived from the ems_pci.c driver: * Copyright (C) 2007 Wolfgang Grandegger <[email protected]> * Copyright (C) 2008 Markus Plessing <[email protected]> * Copyright (C) 2008 Sebastian Haas <[email protected]> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/netdevice.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/pci.h> #include <linux/can/dev.h> #include <linux/io.h> #include "sja1000.h" #define DRV_NAME "sja1000_plx_pci" MODULE_AUTHOR("Pavel Cheblakov <[email protected]>"); MODULE_DESCRIPTION("Socket-CAN driver for PLX90xx PCI-bridge cards with " "the SJA1000 chips"); MODULE_LICENSE("GPL v2"); #define PLX_PCI_MAX_CHAN 2 struct plx_pci_card { int channels; /* detected channels count */ struct net_device *net_dev[PLX_PCI_MAX_CHAN]; void __iomem *conf_addr; /* Pointer to device-dependent reset function */ void (*reset_func)(struct pci_dev *pdev); }; #define PLX_PCI_CAN_CLOCK (16000000 / 2) /* PLX9030/9050/9052 registers */ #define PLX_INTCSR 0x4c /* Interrupt Control/Status */ #define PLX_CNTRL 0x50 /* User I/O, Direct Slave Response, * Serial EEPROM, and Initialization * Control register */ #define PLX_LINT1_EN 0x1 /* Local interrupt 1 enable */ #define PLX_LINT1_POL (1 << 1) /* Local interrupt 1 polarity */ #define PLX_LINT2_EN (1 << 3) /* Local interrupt 2 enable */ #define PLX_LINT2_POL (1 << 4) /* Local interrupt 2 polarity */ #define PLX_PCI_INT_EN (1 << 6) /* PCI Interrupt Enable */ #define PLX_PCI_RESET (1 << 30) /* PCI Adapter Software Reset */ /* PLX9056 registers */ #define PLX9056_INTCSR 0x68 /* Interrupt Control/Status */ #define PLX9056_CNTRL 0x6c /* Control / Software Reset */ #define PLX9056_LINTI (1 << 11) #define PLX9056_PCI_INT_EN (1 << 8) #define PLX9056_PCI_RCR (1 << 29) /* Read Configuration Registers */ /* * The board configuration is probably following: * RX1 is connected to ground. * TX1 is not connected. * CLKO is not connected. * Setting the OCR register to 0xDA is a good idea. * This means normal output mode, push-pull and the correct polarity. */ #define PLX_PCI_OCR (OCR_TX0_PUSHPULL | OCR_TX1_PUSHPULL) /* OCR setting for ASEM Dual CAN raw */ #define ASEM_PCI_OCR 0xfe /* * In the CDR register, you should set CBP to 1. * You will probably also want to set the clock divider value to 7 * (meaning direct oscillator output) because the second SJA1000 chip * is driven by the first one CLKOUT output. */ #define PLX_PCI_CDR (CDR_CBP | CDR_CLKOUT_MASK) /* SJA1000 Control Register in the BasicCAN Mode */ #define REG_CR 0x00 /* States of some SJA1000 registers after hardware reset in the BasicCAN mode*/ #define REG_CR_BASICCAN_INITIAL 0x21 #define REG_CR_BASICCAN_INITIAL_MASK 0xa1 #define REG_SR_BASICCAN_INITIAL 0x0c #define REG_IR_BASICCAN_INITIAL 0xe0 /* States of some SJA1000 registers after hardware reset in the PeliCAN mode*/ #define REG_MOD_PELICAN_INITIAL 0x01 #define REG_SR_PELICAN_INITIAL 0x3c #define REG_IR_PELICAN_INITIAL 0x00 #define ADLINK_PCI_VENDOR_ID 0x144A #define ADLINK_PCI_DEVICE_ID 0x7841 #define ESD_PCI_SUB_SYS_ID_PCI200 0x0004 #define ESD_PCI_SUB_SYS_ID_PCI266 0x0009 #define ESD_PCI_SUB_SYS_ID_PMC266 0x000e #define ESD_PCI_SUB_SYS_ID_CPCI200 0x010b #define ESD_PCI_SUB_SYS_ID_PCIE2000 0x0200 #define ESD_PCI_SUB_SYS_ID_PCI104200 0x0501 #define CAN200PCI_DEVICE_ID 0x9030 #define CAN200PCI_VENDOR_ID 0x10b5 #define CAN200PCI_SUB_DEVICE_ID 0x0301 #define CAN200PCI_SUB_VENDOR_ID 0xe1c5 #define IXXAT_PCI_VENDOR_ID 0x10b5 #define IXXAT_PCI_DEVICE_ID 0x9050 #define IXXAT_PCI_SUB_SYS_ID 0x2540 #define MARATHON_PCI_DEVICE_ID 0x2715 #define MARATHON_PCIE_DEVICE_ID 0x3432 #define TEWS_PCI_VENDOR_ID 0x1498 #define TEWS_PCI_DEVICE_ID_TMPC810 0x032A #define CTI_PCI_VENDOR_ID 0x12c4 #define CTI_PCI_DEVICE_ID_CRG001 0x0900 #define MOXA_PCI_VENDOR_ID 0x1393 #define MOXA_PCI_DEVICE_ID 0x0100 #define ASEM_RAW_CAN_VENDOR_ID 0x10b5 #define ASEM_RAW_CAN_DEVICE_ID 0x9030 #define ASEM_RAW_CAN_SUB_VENDOR_ID 0x3000 #define ASEM_RAW_CAN_SUB_DEVICE_ID 0x1001 #define ASEM_RAW_CAN_SUB_DEVICE_ID_BIS 0x1002 #define ASEM_RAW_CAN_RST_REGISTER 0x54 #define ASEM_RAW_CAN_RST_MASK_CAN1 0x20 #define ASEM_RAW_CAN_RST_MASK_CAN2 0x04 static void plx_pci_reset_common(struct pci_dev *pdev); static void plx9056_pci_reset_common(struct pci_dev *pdev); static void plx_pci_reset_marathon_pci(struct pci_dev *pdev); static void plx_pci_reset_marathon_pcie(struct pci_dev *pdev); static void plx_pci_reset_asem_dual_can_raw(struct pci_dev *pdev); struct plx_pci_channel_map { u32 bar; u32 offset; u32 size; /* 0x00 - auto, e.g. length of entire bar */ }; struct plx_pci_card_info { const char *name; int channel_count; u32 can_clock; u8 ocr; /* output control register */ u8 cdr; /* clock divider register */ /* Parameters for mapping local configuration space */ struct plx_pci_channel_map conf_map; /* Parameters for mapping the SJA1000 chips */ struct plx_pci_channel_map chan_map_tbl[PLX_PCI_MAX_CHAN]; /* Pointer to device-dependent reset function */ void (*reset_func)(struct pci_dev *pdev); }; static struct plx_pci_card_info plx_pci_card_info_adlink = { "Adlink PCI-7841/cPCI-7841", 2, PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR, {1, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x80, 0x80} }, &plx_pci_reset_common /* based on PLX9052 */ }; static struct plx_pci_card_info plx_pci_card_info_adlink_se = { "Adlink PCI-7841/cPCI-7841 SE", 2, PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR, {0, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x80, 0x80} }, &plx_pci_reset_common /* based on PLX9052 */ }; static struct plx_pci_card_info plx_pci_card_info_esd200 = { "esd CAN-PCI/CPCI/PCI104/200", 2, PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR, {0, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x100, 0x80} }, &plx_pci_reset_common /* based on PLX9030/9050 */ }; static struct plx_pci_card_info plx_pci_card_info_esd266 = { "esd CAN-PCI/PMC/266", 2, PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR, {0, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x100, 0x80} }, &plx9056_pci_reset_common /* based on PLX9056 */ }; static struct plx_pci_card_info plx_pci_card_info_esd2000 = { "esd CAN-PCIe/2000", 2, PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR, {0, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x100, 0x80} }, &plx9056_pci_reset_common /* based on PEX8311 */ }; static struct plx_pci_card_info plx_pci_card_info_ixxat = { "IXXAT PC-I 04/PCI", 2, PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR, {0, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x200, 0x80} }, &plx_pci_reset_common /* based on PLX9050 */ }; static struct plx_pci_card_info plx_pci_card_info_marathon_pci = { "Marathon CAN-bus-PCI", 2, PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR, {0, 0x00, 0x00}, { {2, 0x00, 0x00}, {4, 0x00, 0x00} }, &plx_pci_reset_marathon_pci /* based on PLX9052 */ }; static struct plx_pci_card_info plx_pci_card_info_marathon_pcie = { "Marathon CAN-bus-PCIe", 2, PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR, {0, 0x00, 0x00}, { {2, 0x00, 0x00}, {3, 0x80, 0x00} }, &plx_pci_reset_marathon_pcie /* based on PEX8311 */ }; static struct plx_pci_card_info plx_pci_card_info_tews = { "TEWS TECHNOLOGIES TPMC810", 2, PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR, {0, 0x00, 0x00}, { {2, 0x000, 0x80}, {2, 0x100, 0x80} }, &plx_pci_reset_common /* based on PLX9030 */ }; static struct plx_pci_card_info plx_pci_card_info_cti = { "Connect Tech Inc. CANpro/104-Plus Opto (CRG001)", 2, PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR, {0, 0x00, 0x00}, { {2, 0x000, 0x80}, {2, 0x100, 0x80} }, &plx_pci_reset_common /* based on PLX9030 */ }; static struct plx_pci_card_info plx_pci_card_info_elcus = { "Eclus CAN-200-PCI", 2, PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR, {1, 0x00, 0x00}, { {2, 0x00, 0x80}, {3, 0x00, 0x80} }, &plx_pci_reset_common /* based on PLX9030 */ }; static struct plx_pci_card_info plx_pci_card_info_moxa = { "MOXA", 2, PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR, {0, 0x00, 0x00}, { {0, 0x00, 0x80}, {1, 0x00, 0x80} }, &plx_pci_reset_common /* based on PLX9052 */ }; static struct plx_pci_card_info plx_pci_card_info_asem_dual_can = { "ASEM Dual CAN raw PCI", 2, PLX_PCI_CAN_CLOCK, ASEM_PCI_OCR, PLX_PCI_CDR, {0, 0x00, 0x00}, { {2, 0x00, 0x00}, {4, 0x00, 0x00} }, &plx_pci_reset_asem_dual_can_raw /* based on PLX9030 */ }; static const struct pci_device_id plx_pci_tbl[] = { { /* Adlink PCI-7841/cPCI-7841 */ ADLINK_PCI_VENDOR_ID, ADLINK_PCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_NETWORK_OTHER << 8, ~0, (kernel_ulong_t)&plx_pci_card_info_adlink }, { /* Adlink PCI-7841/cPCI-7841 SE */ ADLINK_PCI_VENDOR_ID, ADLINK_PCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_COMMUNICATION_OTHER << 8, ~0, (kernel_ulong_t)&plx_pci_card_info_adlink_se }, { /* esd CAN-PCI/200 */ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050, PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PCI200, 0, 0, (kernel_ulong_t)&plx_pci_card_info_esd200 }, { /* esd CAN-CPCI/200 */ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_CPCI200, 0, 0, (kernel_ulong_t)&plx_pci_card_info_esd200 }, { /* esd CAN-PCI104/200 */ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PCI104200, 0, 0, (kernel_ulong_t)&plx_pci_card_info_esd200 }, { /* esd CAN-PCI/266 */ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9056, PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PCI266, 0, 0, (kernel_ulong_t)&plx_pci_card_info_esd266 }, { /* esd CAN-PMC/266 */ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9056, PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PMC266, 0, 0, (kernel_ulong_t)&plx_pci_card_info_esd266 }, { /* esd CAN-PCIE/2000 */ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9056, PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PCIE2000, 0, 0, (kernel_ulong_t)&plx_pci_card_info_esd2000 }, { /* IXXAT PC-I 04/PCI card */ IXXAT_PCI_VENDOR_ID, IXXAT_PCI_DEVICE_ID, PCI_ANY_ID, IXXAT_PCI_SUB_SYS_ID, 0, 0, (kernel_ulong_t)&plx_pci_card_info_ixxat }, { /* Marathon CAN-bus-PCI card */ PCI_VENDOR_ID_PLX, MARATHON_PCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&plx_pci_card_info_marathon_pci }, { /* Marathon CAN-bus-PCIe card */ PCI_VENDOR_ID_PLX, MARATHON_PCIE_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&plx_pci_card_info_marathon_pcie }, { /* TEWS TECHNOLOGIES TPMC810 card */ TEWS_PCI_VENDOR_ID, TEWS_PCI_DEVICE_ID_TMPC810, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&plx_pci_card_info_tews }, { /* Connect Tech Inc. CANpro/104-Plus Opto (CRG001) card */ PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, CTI_PCI_VENDOR_ID, CTI_PCI_DEVICE_ID_CRG001, 0, 0, (kernel_ulong_t)&plx_pci_card_info_cti }, { /* Elcus CAN-200-PCI */ CAN200PCI_VENDOR_ID, CAN200PCI_DEVICE_ID, CAN200PCI_SUB_VENDOR_ID, CAN200PCI_SUB_DEVICE_ID, 0, 0, (kernel_ulong_t)&plx_pci_card_info_elcus }, { /* moxa */ MOXA_PCI_VENDOR_ID, MOXA_PCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID, 0, 0, (kernel_ulong_t)&plx_pci_card_info_moxa }, { /* ASEM Dual CAN raw */ ASEM_RAW_CAN_VENDOR_ID, ASEM_RAW_CAN_DEVICE_ID, ASEM_RAW_CAN_SUB_VENDOR_ID, ASEM_RAW_CAN_SUB_DEVICE_ID, 0, 0, (kernel_ulong_t)&plx_pci_card_info_asem_dual_can }, { /* ASEM Dual CAN raw -new model */ ASEM_RAW_CAN_VENDOR_ID, ASEM_RAW_CAN_DEVICE_ID, ASEM_RAW_CAN_SUB_VENDOR_ID, ASEM_RAW_CAN_SUB_DEVICE_ID_BIS, 0, 0, (kernel_ulong_t)&plx_pci_card_info_asem_dual_can }, { 0,} }; MODULE_DEVICE_TABLE(pci, plx_pci_tbl); static u8 plx_pci_read_reg(const struct sja1000_priv *priv, int port) { return ioread8(priv->reg_base + port); } static void plx_pci_write_reg(const struct sja1000_priv *priv, int port, u8 val) { iowrite8(val, priv->reg_base + port); } /* * Check if a CAN controller is present at the specified location * by trying to switch 'em from the Basic mode into the PeliCAN mode. * Also check states of some registers in reset mode. */ static inline int plx_pci_check_sja1000(const struct sja1000_priv *priv) { int flag = 0; /* * Check registers after hardware reset (the Basic mode) * See states on p. 10 of the Datasheet. */ if ((priv->read_reg(priv, REG_CR) & REG_CR_BASICCAN_INITIAL_MASK) == REG_CR_BASICCAN_INITIAL && (priv->read_reg(priv, SJA1000_SR) == REG_SR_BASICCAN_INITIAL) && (priv->read_reg(priv, SJA1000_IR) == REG_IR_BASICCAN_INITIAL)) flag = 1; /* Bring the SJA1000 into the PeliCAN mode*/ priv->write_reg(priv, SJA1000_CDR, CDR_PELICAN); /* * Check registers after reset in the PeliCAN mode. * See states on p. 23 of the Datasheet. */ if (priv->read_reg(priv, SJA1000_MOD) == REG_MOD_PELICAN_INITIAL && priv->read_reg(priv, SJA1000_SR) == REG_SR_PELICAN_INITIAL && priv->read_reg(priv, SJA1000_IR) == REG_IR_PELICAN_INITIAL) return flag; return 0; } /* * PLX9030/50/52 software reset * Also LRESET# asserts and brings to reset device on the Local Bus (if wired). * For most cards it's enough for reset the SJA1000 chips. */ static void plx_pci_reset_common(struct pci_dev *pdev) { struct plx_pci_card *card = pci_get_drvdata(pdev); u32 cntrl; cntrl = ioread32(card->conf_addr + PLX_CNTRL); cntrl |= PLX_PCI_RESET; iowrite32(cntrl, card->conf_addr + PLX_CNTRL); udelay(100); cntrl ^= PLX_PCI_RESET; iowrite32(cntrl, card->conf_addr + PLX_CNTRL); }; /* * PLX9056 software reset * Assert LRESET# and reset device(s) on the Local Bus (if wired). */ static void plx9056_pci_reset_common(struct pci_dev *pdev) { struct plx_pci_card *card = pci_get_drvdata(pdev); u32 cntrl; /* issue a local bus reset */ cntrl = ioread32(card->conf_addr + PLX9056_CNTRL); cntrl |= PLX_PCI_RESET; iowrite32(cntrl, card->conf_addr + PLX9056_CNTRL); udelay(100); cntrl ^= PLX_PCI_RESET; iowrite32(cntrl, card->conf_addr + PLX9056_CNTRL); /* reload local configuration from EEPROM */ cntrl |= PLX9056_PCI_RCR; iowrite32(cntrl, card->conf_addr + PLX9056_CNTRL); /* * There is no safe way to poll for the end * of reconfiguration process. Waiting for 10ms * is safe. */ mdelay(10); cntrl ^= PLX9056_PCI_RCR; iowrite32(cntrl, card->conf_addr + PLX9056_CNTRL); }; /* Special reset function for Marathon CAN-bus-PCI card */ static void plx_pci_reset_marathon_pci(struct pci_dev *pdev) { void __iomem *reset_addr; int i; static const int reset_bar[2] = {3, 5}; plx_pci_reset_common(pdev); for (i = 0; i < 2; i++) { reset_addr = pci_iomap(pdev, reset_bar[i], 0); if (!reset_addr) { dev_err(&pdev->dev, "Failed to remap reset " "space %d (BAR%d)\n", i, reset_bar[i]); } else { /* reset the SJA1000 chip */ iowrite8(0x1, reset_addr); udelay(100); pci_iounmap(pdev, reset_addr); } } } /* Special reset function for Marathon CAN-bus-PCIe card */ static void plx_pci_reset_marathon_pcie(struct pci_dev *pdev) { void __iomem *addr; void __iomem *reset_addr; int i; plx9056_pci_reset_common(pdev); for (i = 0; i < 2; i++) { struct plx_pci_channel_map *chan_map = &plx_pci_card_info_marathon_pcie.chan_map_tbl[i]; addr = pci_iomap(pdev, chan_map->bar, chan_map->size); if (!addr) { dev_err(&pdev->dev, "Failed to remap reset " "space %d (BAR%d)\n", i, chan_map->bar); } else { /* reset the SJA1000 chip */ #define MARATHON_PCIE_RESET_OFFSET 32 reset_addr = addr + chan_map->offset + MARATHON_PCIE_RESET_OFFSET; iowrite8(0x1, reset_addr); udelay(100); pci_iounmap(pdev, addr); } } } /* Special reset function for ASEM Dual CAN raw card */ static void plx_pci_reset_asem_dual_can_raw(struct pci_dev *pdev) { void __iomem *bar0_addr; u8 tmpval; plx_pci_reset_common(pdev); bar0_addr = pci_iomap(pdev, 0, 0); if (!bar0_addr) { dev_err(&pdev->dev, "Failed to remap reset space 0 (BAR0)\n"); return; } /* reset the two SJA1000 chips */ tmpval = ioread8(bar0_addr + ASEM_RAW_CAN_RST_REGISTER); tmpval &= ~(ASEM_RAW_CAN_RST_MASK_CAN1 | ASEM_RAW_CAN_RST_MASK_CAN2); iowrite8(tmpval, bar0_addr + ASEM_RAW_CAN_RST_REGISTER); usleep_range(300, 400); tmpval |= ASEM_RAW_CAN_RST_MASK_CAN1 | ASEM_RAW_CAN_RST_MASK_CAN2; iowrite8(tmpval, bar0_addr + ASEM_RAW_CAN_RST_REGISTER); usleep_range(300, 400); pci_iounmap(pdev, bar0_addr); } static void plx_pci_del_card(struct pci_dev *pdev) { struct plx_pci_card *card = pci_get_drvdata(pdev); struct net_device *dev; struct sja1000_priv *priv; int i = 0; for (i = 0; i < PLX_PCI_MAX_CHAN; i++) { dev = card->net_dev[i]; if (!dev) continue; dev_info(&pdev->dev, "Removing %s\n", dev->name); unregister_sja1000dev(dev); priv = netdev_priv(dev); if (priv->reg_base) pci_iounmap(pdev, priv->reg_base); free_sja1000dev(dev); } card->reset_func(pdev); /* * Disable interrupts from PCI-card and disable local * interrupts */ if (pdev->device != PCI_DEVICE_ID_PLX_9056 && pdev->device != MARATHON_PCIE_DEVICE_ID) iowrite32(0x0, card->conf_addr + PLX_INTCSR); else iowrite32(0x0, card->conf_addr + PLX9056_INTCSR); if (card->conf_addr) pci_iounmap(pdev, card->conf_addr); kfree(card); pci_disable_device(pdev); } /* * Probe PLX90xx based device for the SJA1000 chips and register each * available CAN channel to SJA1000 Socket-CAN subsystem. */ static int plx_pci_add_card(struct pci_dev *pdev, const struct pci_device_id *ent) { struct sja1000_priv *priv; struct net_device *dev; struct plx_pci_card *card; struct plx_pci_card_info *ci; int err, i; u32 val; void __iomem *addr; ci = (struct plx_pci_card_info *)ent->driver_data; if (pci_enable_device(pdev) < 0) { dev_err(&pdev->dev, "Failed to enable PCI device\n"); return -ENODEV; } dev_info(&pdev->dev, "Detected \"%s\" card at slot #%i\n", ci->name, PCI_SLOT(pdev->devfn)); /* Allocate card structures to hold addresses, ... */ card = kzalloc(sizeof(*card), GFP_KERNEL); if (!card) { pci_disable_device(pdev); return -ENOMEM; } pci_set_drvdata(pdev, card); card->channels = 0; /* Remap PLX90xx configuration space */ addr = pci_iomap(pdev, ci->conf_map.bar, ci->conf_map.size); if (!addr) { err = -ENOMEM; dev_err(&pdev->dev, "Failed to remap configuration space " "(BAR%d)\n", ci->conf_map.bar); goto failure_cleanup; } card->conf_addr = addr + ci->conf_map.offset; ci->reset_func(pdev); card->reset_func = ci->reset_func; /* Detect available channels */ for (i = 0; i < ci->channel_count; i++) { struct plx_pci_channel_map *cm = &ci->chan_map_tbl[i]; dev = alloc_sja1000dev(0); if (!dev) { err = -ENOMEM; goto failure_cleanup; } card->net_dev[i] = dev; priv = netdev_priv(dev); priv->priv = card; priv->irq_flags = IRQF_SHARED; dev->irq = pdev->irq; /* * Remap IO space of the SJA1000 chips * This is device-dependent mapping */ addr = pci_iomap(pdev, cm->bar, cm->size); if (!addr) { err = -ENOMEM; dev_err(&pdev->dev, "Failed to remap BAR%d\n", cm->bar); goto failure_cleanup; } priv->reg_base = addr + cm->offset; priv->read_reg = plx_pci_read_reg; priv->write_reg = plx_pci_write_reg; /* Check if channel is present */ if (plx_pci_check_sja1000(priv)) { priv->can.clock.freq = ci->can_clock; priv->ocr = ci->ocr; priv->cdr = ci->cdr; SET_NETDEV_DEV(dev, &pdev->dev); dev->dev_id = i; /* Register SJA1000 device */ err = register_sja1000dev(dev); if (err) { dev_err(&pdev->dev, "Registering device failed " "(err=%d)\n", err); goto failure_cleanup; } card->channels++; dev_info(&pdev->dev, "Channel #%d at 0x%p, irq %d " "registered as %s\n", i + 1, priv->reg_base, dev->irq, dev->name); } else { dev_err(&pdev->dev, "Channel #%d not detected\n", i + 1); free_sja1000dev(dev); card->net_dev[i] = NULL; } } if (!card->channels) { err = -ENODEV; goto failure_cleanup; } /* * Enable interrupts from PCI-card (PLX90xx) and enable Local_1, * Local_2 interrupts from the SJA1000 chips */ if (pdev->device != PCI_DEVICE_ID_PLX_9056 && pdev->device != MARATHON_PCIE_DEVICE_ID) { val = ioread32(card->conf_addr + PLX_INTCSR); if (pdev->subsystem_vendor == PCI_VENDOR_ID_ESDGMBH) val |= PLX_LINT1_EN | PLX_PCI_INT_EN; else val |= PLX_LINT1_EN | PLX_LINT2_EN | PLX_PCI_INT_EN; iowrite32(val, card->conf_addr + PLX_INTCSR); } else { iowrite32(PLX9056_LINTI | PLX9056_PCI_INT_EN, card->conf_addr + PLX9056_INTCSR); } return 0; failure_cleanup: dev_err(&pdev->dev, "Error: %d. Cleaning Up.\n", err); plx_pci_del_card(pdev); return err; } static struct pci_driver plx_pci_driver = { .name = DRV_NAME, .id_table = plx_pci_tbl, .probe = plx_pci_add_card, .remove = plx_pci_del_card, }; module_pci_driver(plx_pci_driver);
linux-master
drivers/net/can/sja1000/plx_pci.c
// SPDX-License-Identifier: GPL-2.0 // SPI to CAN driver for the Texas Instruments TCAN4x5x // Copyright (C) 2018-19 Texas Instruments Incorporated - http://www.ti.com/ #include "tcan4x5x.h" #define TCAN4X5X_EXT_CLK_DEF 40000000 #define TCAN4X5X_DEV_ID1 0x00 #define TCAN4X5X_DEV_ID1_TCAN 0x4e414354 /* ASCII TCAN */ #define TCAN4X5X_DEV_ID2 0x04 #define TCAN4X5X_REV 0x08 #define TCAN4X5X_STATUS 0x0C #define TCAN4X5X_ERROR_STATUS_MASK 0x10 #define TCAN4X5X_CONTROL 0x14 #define TCAN4X5X_CONFIG 0x800 #define TCAN4X5X_TS_PRESCALE 0x804 #define TCAN4X5X_TEST_REG 0x808 #define TCAN4X5X_INT_FLAGS 0x820 #define TCAN4X5X_MCAN_INT_REG 0x824 #define TCAN4X5X_INT_EN 0x830 /* Interrupt bits */ #define TCAN4X5X_CANBUSTERMOPEN_INT_EN BIT(30) #define TCAN4X5X_CANHCANL_INT_EN BIT(29) #define TCAN4X5X_CANHBAT_INT_EN BIT(28) #define TCAN4X5X_CANLGND_INT_EN BIT(27) #define TCAN4X5X_CANBUSOPEN_INT_EN BIT(26) #define TCAN4X5X_CANBUSGND_INT_EN BIT(25) #define TCAN4X5X_CANBUSBAT_INT_EN BIT(24) #define TCAN4X5X_UVSUP_INT_EN BIT(22) #define TCAN4X5X_UVIO_INT_EN BIT(21) #define TCAN4X5X_TSD_INT_EN BIT(19) #define TCAN4X5X_ECCERR_INT_EN BIT(16) #define TCAN4X5X_CANINT_INT_EN BIT(15) #define TCAN4X5X_LWU_INT_EN BIT(14) #define TCAN4X5X_CANSLNT_INT_EN BIT(10) #define TCAN4X5X_CANDOM_INT_EN BIT(8) #define TCAN4X5X_CANBUS_ERR_INT_EN BIT(5) #define TCAN4X5X_BUS_FAULT BIT(4) #define TCAN4X5X_MCAN_INT BIT(1) #define TCAN4X5X_ENABLE_TCAN_INT \ (TCAN4X5X_MCAN_INT | TCAN4X5X_BUS_FAULT | \ TCAN4X5X_CANBUS_ERR_INT_EN | TCAN4X5X_CANINT_INT_EN) /* MCAN Interrupt bits */ #define TCAN4X5X_MCAN_IR_ARA BIT(29) #define TCAN4X5X_MCAN_IR_PED BIT(28) #define TCAN4X5X_MCAN_IR_PEA BIT(27) #define TCAN4X5X_MCAN_IR_WD BIT(26) #define TCAN4X5X_MCAN_IR_BO BIT(25) #define TCAN4X5X_MCAN_IR_EW BIT(24) #define TCAN4X5X_MCAN_IR_EP BIT(23) #define TCAN4X5X_MCAN_IR_ELO BIT(22) #define TCAN4X5X_MCAN_IR_BEU BIT(21) #define TCAN4X5X_MCAN_IR_BEC BIT(20) #define TCAN4X5X_MCAN_IR_DRX BIT(19) #define TCAN4X5X_MCAN_IR_TOO BIT(18) #define TCAN4X5X_MCAN_IR_MRAF BIT(17) #define TCAN4X5X_MCAN_IR_TSW BIT(16) #define TCAN4X5X_MCAN_IR_TEFL BIT(15) #define TCAN4X5X_MCAN_IR_TEFF BIT(14) #define TCAN4X5X_MCAN_IR_TEFW BIT(13) #define TCAN4X5X_MCAN_IR_TEFN BIT(12) #define TCAN4X5X_MCAN_IR_TFE BIT(11) #define TCAN4X5X_MCAN_IR_TCF BIT(10) #define TCAN4X5X_MCAN_IR_TC BIT(9) #define TCAN4X5X_MCAN_IR_HPM BIT(8) #define TCAN4X5X_MCAN_IR_RF1L BIT(7) #define TCAN4X5X_MCAN_IR_RF1F BIT(6) #define TCAN4X5X_MCAN_IR_RF1W BIT(5) #define TCAN4X5X_MCAN_IR_RF1N BIT(4) #define TCAN4X5X_MCAN_IR_RF0L BIT(3) #define TCAN4X5X_MCAN_IR_RF0F BIT(2) #define TCAN4X5X_MCAN_IR_RF0W BIT(1) #define TCAN4X5X_MCAN_IR_RF0N BIT(0) #define TCAN4X5X_ENABLE_MCAN_INT \ (TCAN4X5X_MCAN_IR_TC | TCAN4X5X_MCAN_IR_RF0N | \ TCAN4X5X_MCAN_IR_RF1N | TCAN4X5X_MCAN_IR_RF0F | \ TCAN4X5X_MCAN_IR_RF1F) #define TCAN4X5X_MRAM_START 0x8000 #define TCAN4X5X_MRAM_SIZE 0x800 #define TCAN4X5X_MCAN_OFFSET 0x1000 #define TCAN4X5X_CLEAR_ALL_INT 0xffffffff #define TCAN4X5X_SET_ALL_INT 0xffffffff #define TCAN4X5X_MODE_SEL_MASK (BIT(7) | BIT(6)) #define TCAN4X5X_MODE_SLEEP 0x00 #define TCAN4X5X_MODE_STANDBY BIT(6) #define TCAN4X5X_MODE_NORMAL BIT(7) #define TCAN4X5X_DISABLE_WAKE_MSK (BIT(31) | BIT(30)) #define TCAN4X5X_DISABLE_INH_MSK BIT(9) #define TCAN4X5X_SW_RESET BIT(2) #define TCAN4X5X_MCAN_CONFIGURED BIT(5) #define TCAN4X5X_WATCHDOG_EN BIT(3) #define TCAN4X5X_WD_60_MS_TIMER 0 #define TCAN4X5X_WD_600_MS_TIMER BIT(28) #define TCAN4X5X_WD_3_S_TIMER BIT(29) #define TCAN4X5X_WD_6_S_TIMER (BIT(28) | BIT(29)) struct tcan4x5x_version_info { const char *name; u32 id2_register; bool has_wake_pin; bool has_state_pin; }; enum { TCAN4552 = 0, TCAN4553, TCAN4X5X, }; static const struct tcan4x5x_version_info tcan4x5x_versions[] = { [TCAN4552] = { .name = "4552", .id2_register = 0x32353534, }, [TCAN4553] = { .name = "4553", .id2_register = 0x32353534, }, /* generic version with no id2_register at the end */ [TCAN4X5X] = { .name = "generic", .has_wake_pin = true, .has_state_pin = true, }, }; static inline struct tcan4x5x_priv *cdev_to_priv(struct m_can_classdev *cdev) { return container_of(cdev, struct tcan4x5x_priv, cdev); } static void tcan4x5x_check_wake(struct tcan4x5x_priv *priv) { int wake_state = 0; if (priv->device_state_gpio) wake_state = gpiod_get_value(priv->device_state_gpio); if (priv->device_wake_gpio && wake_state) { gpiod_set_value(priv->device_wake_gpio, 0); usleep_range(5, 50); gpiod_set_value(priv->device_wake_gpio, 1); } } static int tcan4x5x_reset(struct tcan4x5x_priv *priv) { int ret = 0; if (priv->reset_gpio) { gpiod_set_value(priv->reset_gpio, 1); /* tpulse_width minimum 30us */ usleep_range(30, 100); gpiod_set_value(priv->reset_gpio, 0); } else { ret = regmap_write(priv->regmap, TCAN4X5X_CONFIG, TCAN4X5X_SW_RESET); if (ret) return ret; } usleep_range(700, 1000); return ret; } static u32 tcan4x5x_read_reg(struct m_can_classdev *cdev, int reg) { struct tcan4x5x_priv *priv = cdev_to_priv(cdev); u32 val; regmap_read(priv->regmap, TCAN4X5X_MCAN_OFFSET + reg, &val); return val; } static int tcan4x5x_read_fifo(struct m_can_classdev *cdev, int addr_offset, void *val, size_t val_count) { struct tcan4x5x_priv *priv = cdev_to_priv(cdev); return regmap_bulk_read(priv->regmap, TCAN4X5X_MRAM_START + addr_offset, val, val_count); } static int tcan4x5x_write_reg(struct m_can_classdev *cdev, int reg, int val) { struct tcan4x5x_priv *priv = cdev_to_priv(cdev); return regmap_write(priv->regmap, TCAN4X5X_MCAN_OFFSET + reg, val); } static int tcan4x5x_write_fifo(struct m_can_classdev *cdev, int addr_offset, const void *val, size_t val_count) { struct tcan4x5x_priv *priv = cdev_to_priv(cdev); return regmap_bulk_write(priv->regmap, TCAN4X5X_MRAM_START + addr_offset, val, val_count); } static int tcan4x5x_power_enable(struct regulator *reg, int enable) { if (IS_ERR_OR_NULL(reg)) return 0; if (enable) return regulator_enable(reg); else return regulator_disable(reg); } static int tcan4x5x_write_tcan_reg(struct m_can_classdev *cdev, int reg, int val) { struct tcan4x5x_priv *priv = cdev_to_priv(cdev); return regmap_write(priv->regmap, reg, val); } static int tcan4x5x_clear_interrupts(struct m_can_classdev *cdev) { int ret; ret = tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_STATUS, TCAN4X5X_CLEAR_ALL_INT); if (ret) return ret; return tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_INT_FLAGS, TCAN4X5X_CLEAR_ALL_INT); } static int tcan4x5x_init(struct m_can_classdev *cdev) { struct tcan4x5x_priv *tcan4x5x = cdev_to_priv(cdev); int ret; tcan4x5x_check_wake(tcan4x5x); ret = tcan4x5x_clear_interrupts(cdev); if (ret) return ret; ret = tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_INT_EN, TCAN4X5X_ENABLE_TCAN_INT); if (ret) return ret; ret = tcan4x5x_write_tcan_reg(cdev, TCAN4X5X_ERROR_STATUS_MASK, TCAN4X5X_CLEAR_ALL_INT); if (ret) return ret; ret = regmap_update_bits(tcan4x5x->regmap, TCAN4X5X_CONFIG, TCAN4X5X_MODE_SEL_MASK, TCAN4X5X_MODE_NORMAL); if (ret) return ret; return ret; } static int tcan4x5x_disable_wake(struct m_can_classdev *cdev) { struct tcan4x5x_priv *tcan4x5x = cdev_to_priv(cdev); return regmap_update_bits(tcan4x5x->regmap, TCAN4X5X_CONFIG, TCAN4X5X_DISABLE_WAKE_MSK, 0x00); } static int tcan4x5x_disable_state(struct m_can_classdev *cdev) { struct tcan4x5x_priv *tcan4x5x = cdev_to_priv(cdev); return regmap_update_bits(tcan4x5x->regmap, TCAN4X5X_CONFIG, TCAN4X5X_DISABLE_INH_MSK, 0x01); } static const struct tcan4x5x_version_info *tcan4x5x_find_version(struct tcan4x5x_priv *priv) { u32 val; int ret; ret = regmap_read(priv->regmap, TCAN4X5X_DEV_ID1, &val); if (ret) return ERR_PTR(ret); if (val != TCAN4X5X_DEV_ID1_TCAN) { dev_err(&priv->spi->dev, "Not a tcan device %x\n", val); return ERR_PTR(-ENODEV); } ret = regmap_read(priv->regmap, TCAN4X5X_DEV_ID2, &val); if (ret) return ERR_PTR(ret); for (int i = 0; i != ARRAY_SIZE(tcan4x5x_versions); ++i) { const struct tcan4x5x_version_info *vinfo = &tcan4x5x_versions[i]; if (!vinfo->id2_register || val == vinfo->id2_register) { dev_info(&priv->spi->dev, "Detected TCAN device version %s\n", vinfo->name); return vinfo; } } return &tcan4x5x_versions[TCAN4X5X]; } static int tcan4x5x_get_gpios(struct m_can_classdev *cdev, const struct tcan4x5x_version_info *version_info) { struct tcan4x5x_priv *tcan4x5x = cdev_to_priv(cdev); int ret; if (version_info->has_wake_pin) { tcan4x5x->device_wake_gpio = devm_gpiod_get(cdev->dev, "device-wake", GPIOD_OUT_HIGH); if (IS_ERR(tcan4x5x->device_wake_gpio)) { if (PTR_ERR(tcan4x5x->device_wake_gpio) == -EPROBE_DEFER) return -EPROBE_DEFER; tcan4x5x_disable_wake(cdev); } } tcan4x5x->reset_gpio = devm_gpiod_get_optional(cdev->dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(tcan4x5x->reset_gpio)) tcan4x5x->reset_gpio = NULL; ret = tcan4x5x_reset(tcan4x5x); if (ret) return ret; if (version_info->has_state_pin) { tcan4x5x->device_state_gpio = devm_gpiod_get_optional(cdev->dev, "device-state", GPIOD_IN); if (IS_ERR(tcan4x5x->device_state_gpio)) { tcan4x5x->device_state_gpio = NULL; tcan4x5x_disable_state(cdev); } } return 0; } static struct m_can_ops tcan4x5x_ops = { .init = tcan4x5x_init, .read_reg = tcan4x5x_read_reg, .write_reg = tcan4x5x_write_reg, .write_fifo = tcan4x5x_write_fifo, .read_fifo = tcan4x5x_read_fifo, .clear_interrupts = tcan4x5x_clear_interrupts, }; static int tcan4x5x_can_probe(struct spi_device *spi) { const struct tcan4x5x_version_info *version_info; struct tcan4x5x_priv *priv; struct m_can_classdev *mcan_class; int freq, ret; mcan_class = m_can_class_allocate_dev(&spi->dev, sizeof(struct tcan4x5x_priv)); if (!mcan_class) return -ENOMEM; ret = m_can_check_mram_cfg(mcan_class, TCAN4X5X_MRAM_SIZE); if (ret) goto out_m_can_class_free_dev; priv = cdev_to_priv(mcan_class); priv->power = devm_regulator_get_optional(&spi->dev, "vsup"); if (PTR_ERR(priv->power) == -EPROBE_DEFER) { ret = -EPROBE_DEFER; goto out_m_can_class_free_dev; } else { priv->power = NULL; } m_can_class_get_clocks(mcan_class); if (IS_ERR(mcan_class->cclk)) { dev_err(&spi->dev, "no CAN clock source defined\n"); freq = TCAN4X5X_EXT_CLK_DEF; } else { freq = clk_get_rate(mcan_class->cclk); } /* Sanity check */ if (freq < 20000000 || freq > TCAN4X5X_EXT_CLK_DEF) { dev_err(&spi->dev, "Clock frequency is out of supported range %d\n", freq); ret = -ERANGE; goto out_m_can_class_free_dev; } priv->spi = spi; mcan_class->pm_clock_support = 0; mcan_class->can.clock.freq = freq; mcan_class->dev = &spi->dev; mcan_class->ops = &tcan4x5x_ops; mcan_class->is_peripheral = true; mcan_class->net->irq = spi->irq; spi_set_drvdata(spi, priv); /* Configure the SPI bus */ spi->bits_per_word = 8; ret = spi_setup(spi); if (ret) { dev_err(&spi->dev, "SPI setup failed %pe\n", ERR_PTR(ret)); goto out_m_can_class_free_dev; } ret = tcan4x5x_regmap_init(priv); if (ret) { dev_err(&spi->dev, "regmap init failed %pe\n", ERR_PTR(ret)); goto out_m_can_class_free_dev; } ret = tcan4x5x_power_enable(priv->power, 1); if (ret) { dev_err(&spi->dev, "Enabling regulator failed %pe\n", ERR_PTR(ret)); goto out_m_can_class_free_dev; } version_info = tcan4x5x_find_version(priv); if (IS_ERR(version_info)) { ret = PTR_ERR(version_info); goto out_power; } ret = tcan4x5x_get_gpios(mcan_class, version_info); if (ret) { dev_err(&spi->dev, "Getting gpios failed %pe\n", ERR_PTR(ret)); goto out_power; } ret = tcan4x5x_init(mcan_class); if (ret) { dev_err(&spi->dev, "tcan initialization failed %pe\n", ERR_PTR(ret)); goto out_power; } ret = m_can_class_register(mcan_class); if (ret) { dev_err(&spi->dev, "Failed registering m_can device %pe\n", ERR_PTR(ret)); goto out_power; } netdev_info(mcan_class->net, "TCAN4X5X successfully initialized.\n"); return 0; out_power: tcan4x5x_power_enable(priv->power, 0); out_m_can_class_free_dev: m_can_class_free_dev(mcan_class->net); return ret; } static void tcan4x5x_can_remove(struct spi_device *spi) { struct tcan4x5x_priv *priv = spi_get_drvdata(spi); m_can_class_unregister(&priv->cdev); tcan4x5x_power_enable(priv->power, 0); m_can_class_free_dev(priv->cdev.net); } static const struct of_device_id tcan4x5x_of_match[] = { { .compatible = "ti,tcan4x5x", }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, tcan4x5x_of_match); static const struct spi_device_id tcan4x5x_id_table[] = { { .name = "tcan4x5x", }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(spi, tcan4x5x_id_table); static struct spi_driver tcan4x5x_can_driver = { .driver = { .name = KBUILD_MODNAME, .of_match_table = tcan4x5x_of_match, .pm = NULL, }, .id_table = tcan4x5x_id_table, .probe = tcan4x5x_can_probe, .remove = tcan4x5x_can_remove, }; module_spi_driver(tcan4x5x_can_driver); MODULE_AUTHOR("Dan Murphy <[email protected]>"); MODULE_DESCRIPTION("Texas Instruments TCAN4x5x CAN driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/net/can/m_can/tcan4x5x-core.c
// SPDX-License-Identifier: GPL-2.0 // IOMapped CAN bus driver for Bosch M_CAN controller // Copyright (C) 2014 Freescale Semiconductor, Inc. // Dong Aisheng <[email protected]> // // Copyright (C) 2018-19 Texas Instruments Incorporated - http://www.ti.com/ #include <linux/hrtimer.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> #include "m_can.h" struct m_can_plat_priv { struct m_can_classdev cdev; void __iomem *base; void __iomem *mram_base; }; static inline struct m_can_plat_priv *cdev_to_priv(struct m_can_classdev *cdev) { return container_of(cdev, struct m_can_plat_priv, cdev); } static u32 iomap_read_reg(struct m_can_classdev *cdev, int reg) { struct m_can_plat_priv *priv = cdev_to_priv(cdev); return readl(priv->base + reg); } static int iomap_read_fifo(struct m_can_classdev *cdev, int offset, void *val, size_t val_count) { struct m_can_plat_priv *priv = cdev_to_priv(cdev); void __iomem *src = priv->mram_base + offset; while (val_count--) { *(unsigned int *)val = ioread32(src); val += 4; src += 4; } return 0; } static int iomap_write_reg(struct m_can_classdev *cdev, int reg, int val) { struct m_can_plat_priv *priv = cdev_to_priv(cdev); writel(val, priv->base + reg); return 0; } static int iomap_write_fifo(struct m_can_classdev *cdev, int offset, const void *val, size_t val_count) { struct m_can_plat_priv *priv = cdev_to_priv(cdev); void __iomem *dst = priv->mram_base + offset; while (val_count--) { iowrite32(*(unsigned int *)val, dst); val += 4; dst += 4; } return 0; } static struct m_can_ops m_can_plat_ops = { .read_reg = iomap_read_reg, .write_reg = iomap_write_reg, .write_fifo = iomap_write_fifo, .read_fifo = iomap_read_fifo, }; static int m_can_plat_probe(struct platform_device *pdev) { struct m_can_classdev *mcan_class; struct m_can_plat_priv *priv; struct resource *res; void __iomem *addr; void __iomem *mram_addr; struct phy *transceiver; int irq = 0, ret = 0; mcan_class = m_can_class_allocate_dev(&pdev->dev, sizeof(struct m_can_plat_priv)); if (!mcan_class) return -ENOMEM; priv = cdev_to_priv(mcan_class); ret = m_can_class_get_clocks(mcan_class); if (ret) goto probe_fail; addr = devm_platform_ioremap_resource_byname(pdev, "m_can"); if (IS_ERR(addr)) { ret = PTR_ERR(addr); goto probe_fail; } if (device_property_present(mcan_class->dev, "interrupts") || device_property_present(mcan_class->dev, "interrupt-names")) { irq = platform_get_irq_byname(pdev, "int0"); if (irq < 0) { ret = irq; goto probe_fail; } } else { dev_dbg(mcan_class->dev, "Polling enabled, initialize hrtimer"); hrtimer_init(&mcan_class->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED); } /* message ram could be shared */ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "message_ram"); if (!res) { ret = -ENODEV; goto probe_fail; } mram_addr = devm_ioremap(&pdev->dev, res->start, resource_size(res)); if (!mram_addr) { ret = -ENOMEM; goto probe_fail; } transceiver = devm_phy_optional_get(&pdev->dev, NULL); if (IS_ERR(transceiver)) { ret = PTR_ERR(transceiver); dev_err_probe(&pdev->dev, ret, "failed to get phy\n"); goto probe_fail; } if (transceiver) mcan_class->can.bitrate_max = transceiver->attrs.max_link_rate; priv->base = addr; priv->mram_base = mram_addr; mcan_class->net->irq = irq; mcan_class->pm_clock_support = 1; mcan_class->can.clock.freq = clk_get_rate(mcan_class->cclk); mcan_class->dev = &pdev->dev; mcan_class->transceiver = transceiver; mcan_class->ops = &m_can_plat_ops; mcan_class->is_peripheral = false; platform_set_drvdata(pdev, mcan_class); pm_runtime_enable(mcan_class->dev); ret = m_can_class_register(mcan_class); if (ret) goto out_runtime_disable; return ret; out_runtime_disable: pm_runtime_disable(mcan_class->dev); probe_fail: m_can_class_free_dev(mcan_class->net); return ret; } static __maybe_unused int m_can_suspend(struct device *dev) { return m_can_class_suspend(dev); } static __maybe_unused int m_can_resume(struct device *dev) { return m_can_class_resume(dev); } static void m_can_plat_remove(struct platform_device *pdev) { struct m_can_plat_priv *priv = platform_get_drvdata(pdev); struct m_can_classdev *mcan_class = &priv->cdev; m_can_class_unregister(mcan_class); m_can_class_free_dev(mcan_class->net); } static int __maybe_unused m_can_runtime_suspend(struct device *dev) { struct m_can_plat_priv *priv = dev_get_drvdata(dev); struct m_can_classdev *mcan_class = &priv->cdev; clk_disable_unprepare(mcan_class->cclk); clk_disable_unprepare(mcan_class->hclk); return 0; } static int __maybe_unused m_can_runtime_resume(struct device *dev) { struct m_can_plat_priv *priv = dev_get_drvdata(dev); struct m_can_classdev *mcan_class = &priv->cdev; int err; err = clk_prepare_enable(mcan_class->hclk); if (err) return err; err = clk_prepare_enable(mcan_class->cclk); if (err) clk_disable_unprepare(mcan_class->hclk); return err; } static const struct dev_pm_ops m_can_pmops = { SET_RUNTIME_PM_OPS(m_can_runtime_suspend, m_can_runtime_resume, NULL) SET_SYSTEM_SLEEP_PM_OPS(m_can_suspend, m_can_resume) }; static const struct of_device_id m_can_of_table[] = { { .compatible = "bosch,m_can", .data = NULL }, { /* sentinel */ }, }; MODULE_DEVICE_TABLE(of, m_can_of_table); static struct platform_driver m_can_plat_driver = { .driver = { .name = KBUILD_MODNAME, .of_match_table = m_can_of_table, .pm = &m_can_pmops, }, .probe = m_can_plat_probe, .remove_new = m_can_plat_remove, }; module_platform_driver(m_can_plat_driver); MODULE_AUTHOR("Dong Aisheng <[email protected]>"); MODULE_AUTHOR("Dan Murphy <[email protected]>"); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("M_CAN driver for IO Mapped Bosch controllers");
linux-master
drivers/net/can/m_can/m_can_platform.c
// SPDX-License-Identifier: GPL-2.0 /* * PCI Specific M_CAN Glue * * Copyright (C) 2018-2020 Intel Corporation * Author: Felipe Balbi (Intel) * Author: Jarkko Nikula <[email protected]> * Author: Raymond Tan <[email protected]> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/pci.h> #include <linux/pm_runtime.h> #include "m_can.h" #define M_CAN_PCI_MMIO_BAR 0 #define M_CAN_CLOCK_FREQ_EHL 200000000 #define CTL_CSR_INT_CTL_OFFSET 0x508 struct m_can_pci_priv { struct m_can_classdev cdev; void __iomem *base; }; static inline struct m_can_pci_priv *cdev_to_priv(struct m_can_classdev *cdev) { return container_of(cdev, struct m_can_pci_priv, cdev); } static u32 iomap_read_reg(struct m_can_classdev *cdev, int reg) { struct m_can_pci_priv *priv = cdev_to_priv(cdev); return readl(priv->base + reg); } static int iomap_read_fifo(struct m_can_classdev *cdev, int offset, void *val, size_t val_count) { struct m_can_pci_priv *priv = cdev_to_priv(cdev); void __iomem *src = priv->base + offset; while (val_count--) { *(unsigned int *)val = ioread32(src); val += 4; src += 4; } return 0; } static int iomap_write_reg(struct m_can_classdev *cdev, int reg, int val) { struct m_can_pci_priv *priv = cdev_to_priv(cdev); writel(val, priv->base + reg); return 0; } static int iomap_write_fifo(struct m_can_classdev *cdev, int offset, const void *val, size_t val_count) { struct m_can_pci_priv *priv = cdev_to_priv(cdev); void __iomem *dst = priv->base + offset; while (val_count--) { iowrite32(*(unsigned int *)val, dst); val += 4; dst += 4; } return 0; } static struct m_can_ops m_can_pci_ops = { .read_reg = iomap_read_reg, .write_reg = iomap_write_reg, .write_fifo = iomap_write_fifo, .read_fifo = iomap_read_fifo, }; static int m_can_pci_probe(struct pci_dev *pci, const struct pci_device_id *id) { struct device *dev = &pci->dev; struct m_can_classdev *mcan_class; struct m_can_pci_priv *priv; void __iomem *base; int ret; ret = pcim_enable_device(pci); if (ret) return ret; pci_set_master(pci); ret = pcim_iomap_regions(pci, BIT(M_CAN_PCI_MMIO_BAR), pci_name(pci)); if (ret) return ret; base = pcim_iomap_table(pci)[M_CAN_PCI_MMIO_BAR]; if (!base) { dev_err(dev, "failed to map BARs\n"); return -ENOMEM; } mcan_class = m_can_class_allocate_dev(&pci->dev, sizeof(struct m_can_pci_priv)); if (!mcan_class) return -ENOMEM; priv = cdev_to_priv(mcan_class); priv->base = base; ret = pci_alloc_irq_vectors(pci, 1, 1, PCI_IRQ_ALL_TYPES); if (ret < 0) goto err_free_dev; mcan_class->dev = &pci->dev; mcan_class->net->irq = pci_irq_vector(pci, 0); mcan_class->pm_clock_support = 1; mcan_class->can.clock.freq = id->driver_data; mcan_class->ops = &m_can_pci_ops; pci_set_drvdata(pci, mcan_class); ret = m_can_class_register(mcan_class); if (ret) goto err_free_irq; /* Enable interrupt control at CAN wrapper IP */ writel(0x1, base + CTL_CSR_INT_CTL_OFFSET); pm_runtime_set_autosuspend_delay(dev, 1000); pm_runtime_use_autosuspend(dev); pm_runtime_put_noidle(dev); pm_runtime_allow(dev); return 0; err_free_irq: pci_free_irq_vectors(pci); err_free_dev: m_can_class_free_dev(mcan_class->net); return ret; } static void m_can_pci_remove(struct pci_dev *pci) { struct m_can_classdev *mcan_class = pci_get_drvdata(pci); struct m_can_pci_priv *priv = cdev_to_priv(mcan_class); pm_runtime_forbid(&pci->dev); pm_runtime_get_noresume(&pci->dev); /* Disable interrupt control at CAN wrapper IP */ writel(0x0, priv->base + CTL_CSR_INT_CTL_OFFSET); m_can_class_unregister(mcan_class); m_can_class_free_dev(mcan_class->net); pci_free_irq_vectors(pci); } static __maybe_unused int m_can_pci_suspend(struct device *dev) { return m_can_class_suspend(dev); } static __maybe_unused int m_can_pci_resume(struct device *dev) { return m_can_class_resume(dev); } static SIMPLE_DEV_PM_OPS(m_can_pci_pm_ops, m_can_pci_suspend, m_can_pci_resume); static const struct pci_device_id m_can_pci_id_table[] = { { PCI_VDEVICE(INTEL, 0x4bc1), M_CAN_CLOCK_FREQ_EHL, }, { PCI_VDEVICE(INTEL, 0x4bc2), M_CAN_CLOCK_FREQ_EHL, }, { } /* Terminating Entry */ }; MODULE_DEVICE_TABLE(pci, m_can_pci_id_table); static struct pci_driver m_can_pci_driver = { .name = "m_can_pci", .probe = m_can_pci_probe, .remove = m_can_pci_remove, .id_table = m_can_pci_id_table, .driver = { .pm = &m_can_pci_pm_ops, }, }; module_pci_driver(m_can_pci_driver); MODULE_AUTHOR("Felipe Balbi (Intel)"); MODULE_AUTHOR("Jarkko Nikula <[email protected]>"); MODULE_AUTHOR("Raymond Tan <[email protected]>"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("CAN bus driver for Bosch M_CAN controller on PCI bus");
linux-master
drivers/net/can/m_can/m_can_pci.c
// SPDX-License-Identifier: GPL-2.0 // // tcan4x5x - Texas Instruments TCAN4x5x Family CAN controller driver // // Copyright (c) 2020 Pengutronix, // Marc Kleine-Budde <[email protected]> // Copyright (c) 2018-2019 Texas Instruments Incorporated // http://www.ti.com/ #include "tcan4x5x.h" #define TCAN4X5X_SPI_INSTRUCTION_WRITE (0x61 << 24) #define TCAN4X5X_SPI_INSTRUCTION_READ (0x41 << 24) #define TCAN4X5X_MAX_REGISTER 0x87fc static int tcan4x5x_regmap_gather_write(void *context, const void *reg, size_t reg_len, const void *val, size_t val_len) { struct spi_device *spi = context; struct tcan4x5x_priv *priv = spi_get_drvdata(spi); struct tcan4x5x_map_buf *buf_tx = &priv->map_buf_tx; struct spi_transfer xfer[] = { { .tx_buf = buf_tx, .len = sizeof(buf_tx->cmd) + val_len, }, }; memcpy(&buf_tx->cmd, reg, sizeof(buf_tx->cmd.cmd) + sizeof(buf_tx->cmd.addr)); tcan4x5x_spi_cmd_set_len(&buf_tx->cmd, val_len); memcpy(buf_tx->data, val, val_len); return spi_sync_transfer(spi, xfer, ARRAY_SIZE(xfer)); } static int tcan4x5x_regmap_write(void *context, const void *data, size_t count) { return tcan4x5x_regmap_gather_write(context, data, sizeof(__be32), data + sizeof(__be32), count - sizeof(__be32)); } static int tcan4x5x_regmap_read(void *context, const void *reg_buf, size_t reg_len, void *val_buf, size_t val_len) { struct spi_device *spi = context; struct tcan4x5x_priv *priv = spi_get_drvdata(spi); struct tcan4x5x_map_buf *buf_rx = &priv->map_buf_rx; struct tcan4x5x_map_buf *buf_tx = &priv->map_buf_tx; struct spi_transfer xfer[2] = { { .tx_buf = buf_tx, } }; struct spi_message msg; int err; spi_message_init(&msg); spi_message_add_tail(&xfer[0], &msg); memcpy(&buf_tx->cmd, reg_buf, sizeof(buf_tx->cmd.cmd) + sizeof(buf_tx->cmd.addr)); tcan4x5x_spi_cmd_set_len(&buf_tx->cmd, val_len); if (spi->controller->flags & SPI_CONTROLLER_HALF_DUPLEX) { xfer[0].len = sizeof(buf_tx->cmd); xfer[1].rx_buf = val_buf; xfer[1].len = val_len; spi_message_add_tail(&xfer[1], &msg); } else { xfer[0].rx_buf = buf_rx; xfer[0].len = sizeof(buf_tx->cmd) + val_len; if (TCAN4X5X_SANITIZE_SPI) memset(buf_tx->data, 0x0, val_len); } err = spi_sync(spi, &msg); if (err) return err; if (!(spi->controller->flags & SPI_CONTROLLER_HALF_DUPLEX)) memcpy(val_buf, buf_rx->data, val_len); return 0; } static const struct regmap_range tcan4x5x_reg_table_wr_range[] = { /* Device ID and SPI Registers */ regmap_reg_range(0x000c, 0x0010), /* Device configuration registers and Interrupt Flags*/ regmap_reg_range(0x0800, 0x080c), regmap_reg_range(0x0820, 0x0820), regmap_reg_range(0x0830, 0x0830), /* M_CAN */ regmap_reg_range(0x100c, 0x102c), regmap_reg_range(0x1048, 0x1048), regmap_reg_range(0x1050, 0x105c), regmap_reg_range(0x1080, 0x1088), regmap_reg_range(0x1090, 0x1090), regmap_reg_range(0x1098, 0x10a0), regmap_reg_range(0x10a8, 0x10b0), regmap_reg_range(0x10b8, 0x10c0), regmap_reg_range(0x10c8, 0x10c8), regmap_reg_range(0x10d0, 0x10d4), regmap_reg_range(0x10e0, 0x10e4), regmap_reg_range(0x10f0, 0x10f0), regmap_reg_range(0x10f8, 0x10f8), /* MRAM */ regmap_reg_range(0x8000, 0x87fc), }; static const struct regmap_range tcan4x5x_reg_table_rd_range[] = { regmap_reg_range(0x0000, 0x0010), /* Device ID and SPI Registers */ regmap_reg_range(0x0800, 0x0830), /* Device configuration registers and Interrupt Flags*/ regmap_reg_range(0x1000, 0x10fc), /* M_CAN */ regmap_reg_range(0x8000, 0x87fc), /* MRAM */ }; static const struct regmap_access_table tcan4x5x_reg_table_wr = { .yes_ranges = tcan4x5x_reg_table_wr_range, .n_yes_ranges = ARRAY_SIZE(tcan4x5x_reg_table_wr_range), }; static const struct regmap_access_table tcan4x5x_reg_table_rd = { .yes_ranges = tcan4x5x_reg_table_rd_range, .n_yes_ranges = ARRAY_SIZE(tcan4x5x_reg_table_rd_range), }; static const struct regmap_config tcan4x5x_regmap = { .reg_bits = 24, .reg_stride = 4, .pad_bits = 8, .val_bits = 32, .wr_table = &tcan4x5x_reg_table_wr, .rd_table = &tcan4x5x_reg_table_rd, .max_register = TCAN4X5X_MAX_REGISTER, .cache_type = REGCACHE_NONE, .read_flag_mask = (__force unsigned long) cpu_to_be32(TCAN4X5X_SPI_INSTRUCTION_READ), .write_flag_mask = (__force unsigned long) cpu_to_be32(TCAN4X5X_SPI_INSTRUCTION_WRITE), }; static const struct regmap_bus tcan4x5x_bus = { .write = tcan4x5x_regmap_write, .gather_write = tcan4x5x_regmap_gather_write, .read = tcan4x5x_regmap_read, .reg_format_endian_default = REGMAP_ENDIAN_BIG, .val_format_endian_default = REGMAP_ENDIAN_BIG, .max_raw_read = 256, .max_raw_write = 256, }; int tcan4x5x_regmap_init(struct tcan4x5x_priv *priv) { priv->regmap = devm_regmap_init(&priv->spi->dev, &tcan4x5x_bus, priv->spi, &tcan4x5x_regmap); return PTR_ERR_OR_ZERO(priv->regmap); }
linux-master
drivers/net/can/m_can/tcan4x5x-regmap.c