python_code
stringlengths
0
1.8M
repo_name
stringclasses
7 values
file_path
stringlengths
5
99
// SPDX-License-Identifier: GPL-2.0 /* * KUnit test for the FPGA Region * * Copyright (C) 2023 Red Hat, Inc. * * Author: Marco Pagani <[email protected]> */ #include <kunit/test.h> #include <linux/fpga/fpga-bridge.h> #include <linux/fpga/fpga-mgr.h> #include <linux/fpga/fpga-region.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/types.h> struct mgr_stats { u32 write_count; }; struct bridge_stats { bool enable; u32 cycles_count; }; struct test_ctx { struct fpga_manager *mgr; struct platform_device *mgr_pdev; struct fpga_bridge *bridge; struct platform_device *bridge_pdev; struct fpga_region *region; struct platform_device *region_pdev; struct bridge_stats bridge_stats; struct mgr_stats mgr_stats; }; static int op_write(struct fpga_manager *mgr, const char *buf, size_t count) { struct mgr_stats *stats = mgr->priv; stats->write_count++; return 0; } /* * Fake FPGA manager that implements only the write op to count the number * of programming cycles. The internals of the programming sequence are * tested in the Manager suite since they are outside the responsibility * of the Region. */ static const struct fpga_manager_ops fake_mgr_ops = { .write = op_write, }; static int op_enable_set(struct fpga_bridge *bridge, bool enable) { struct bridge_stats *stats = bridge->priv; if (!stats->enable && enable) stats->cycles_count++; stats->enable = enable; return 0; } /* * Fake FPGA bridge that implements only enable_set op to count the number * of activation cycles. */ static const struct fpga_bridge_ops fake_bridge_ops = { .enable_set = op_enable_set, }; static int fake_region_get_bridges(struct fpga_region *region) { struct fpga_bridge *bridge = region->priv; return fpga_bridge_get_to_list(bridge->dev.parent, region->info, &region->bridge_list); } static int fake_region_match(struct device *dev, const void *data) { return dev->parent == data; } static void fpga_region_test_class_find(struct kunit *test) { struct test_ctx *ctx = test->priv; struct fpga_region *region; region = fpga_region_class_find(NULL, &ctx->region_pdev->dev, fake_region_match); KUNIT_EXPECT_PTR_EQ(test, region, ctx->region); } /* * FPGA Region programming test. The Region must call get_bridges() to get * and control the bridges, and then the Manager for the actual programming. */ static void fpga_region_test_program_fpga(struct kunit *test) { struct test_ctx *ctx = test->priv; struct fpga_image_info *img_info; char img_buf[4]; int ret; img_info = fpga_image_info_alloc(&ctx->mgr_pdev->dev); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, img_info); img_info->buf = img_buf; img_info->count = sizeof(img_buf); ctx->region->info = img_info; ret = fpga_region_program_fpga(ctx->region); KUNIT_ASSERT_EQ(test, ret, 0); KUNIT_EXPECT_EQ(test, 1, ctx->mgr_stats.write_count); KUNIT_EXPECT_EQ(test, 1, ctx->bridge_stats.cycles_count); fpga_bridges_put(&ctx->region->bridge_list); ret = fpga_region_program_fpga(ctx->region); KUNIT_ASSERT_EQ(test, ret, 0); KUNIT_EXPECT_EQ(test, 2, ctx->mgr_stats.write_count); KUNIT_EXPECT_EQ(test, 2, ctx->bridge_stats.cycles_count); fpga_bridges_put(&ctx->region->bridge_list); fpga_image_info_free(img_info); } /* * The configuration used in this test suite uses a single bridge to * limit the code under test to a single unit. The functions used by the * Region for getting and controlling bridges are tested (with a list of * multiple bridges) in the Bridge suite. */ static int fpga_region_test_init(struct kunit *test) { struct test_ctx *ctx; struct fpga_region_info region_info = { 0 }; ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); ctx->mgr_pdev = platform_device_register_simple("mgr_pdev", PLATFORM_DEVID_AUTO, NULL, 0); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->mgr_pdev); ctx->mgr = devm_fpga_mgr_register(&ctx->mgr_pdev->dev, "Fake FPGA Manager", &fake_mgr_ops, &ctx->mgr_stats); KUNIT_ASSERT_FALSE(test, IS_ERR_OR_NULL(ctx->mgr)); ctx->bridge_pdev = platform_device_register_simple("bridge_pdev", PLATFORM_DEVID_AUTO, NULL, 0); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->bridge_pdev); ctx->bridge = fpga_bridge_register(&ctx->bridge_pdev->dev, "Fake FPGA Bridge", &fake_bridge_ops, &ctx->bridge_stats); KUNIT_ASSERT_FALSE(test, IS_ERR_OR_NULL(ctx->bridge)); ctx->bridge_stats.enable = true; ctx->region_pdev = platform_device_register_simple("region_pdev", PLATFORM_DEVID_AUTO, NULL, 0); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->region_pdev); region_info.mgr = ctx->mgr; region_info.priv = ctx->bridge; region_info.get_bridges = fake_region_get_bridges; ctx->region = fpga_region_register_full(&ctx->region_pdev->dev, &region_info); KUNIT_ASSERT_FALSE(test, IS_ERR_OR_NULL(ctx->region)); test->priv = ctx; return 0; } static void fpga_region_test_exit(struct kunit *test) { struct test_ctx *ctx = test->priv; fpga_region_unregister(ctx->region); platform_device_unregister(ctx->region_pdev); fpga_bridge_unregister(ctx->bridge); platform_device_unregister(ctx->bridge_pdev); platform_device_unregister(ctx->mgr_pdev); } static struct kunit_case fpga_region_test_cases[] = { KUNIT_CASE(fpga_region_test_class_find), KUNIT_CASE(fpga_region_test_program_fpga), {} }; static struct kunit_suite fpga_region_suite = { .name = "fpga_mgr", .init = fpga_region_test_init, .exit = fpga_region_test_exit, .test_cases = fpga_region_test_cases, }; kunit_test_suite(fpga_region_suite); MODULE_LICENSE("GPL");
linux-master
drivers/fpga/tests/fpga-region-test.c
// SPDX-License-Identifier: GPL-2.0 /* * KUnit test for the FPGA Bridge * * Copyright (C) 2023 Red Hat, Inc. * * Author: Marco Pagani <[email protected]> */ #include <kunit/test.h> #include <linux/device.h> #include <linux/fpga/fpga-bridge.h> #include <linux/module.h> #include <linux/types.h> struct bridge_stats { bool enable; }; struct bridge_ctx { struct fpga_bridge *bridge; struct platform_device *pdev; struct bridge_stats stats; }; static int op_enable_set(struct fpga_bridge *bridge, bool enable) { struct bridge_stats *stats = bridge->priv; stats->enable = enable; return 0; } /* * Fake FPGA bridge that implements only the enable_set op to track * the state. */ static const struct fpga_bridge_ops fake_bridge_ops = { .enable_set = op_enable_set, }; /** * register_test_bridge() - Register a fake FPGA bridge for testing. * @test: KUnit test context object. * * Return: Context of the newly registered FPGA bridge. */ static struct bridge_ctx *register_test_bridge(struct kunit *test) { struct bridge_ctx *ctx; ctx = kunit_kzalloc(test, sizeof(*ctx), GFP_KERNEL); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx); ctx->pdev = platform_device_register_simple("bridge_pdev", PLATFORM_DEVID_AUTO, NULL, 0); KUNIT_ASSERT_NOT_ERR_OR_NULL(test, ctx->pdev); ctx->bridge = fpga_bridge_register(&ctx->pdev->dev, "Fake FPGA bridge", &fake_bridge_ops, &ctx->stats); KUNIT_ASSERT_FALSE(test, IS_ERR_OR_NULL(ctx->bridge)); return ctx; } static void unregister_test_bridge(struct bridge_ctx *ctx) { fpga_bridge_unregister(ctx->bridge); platform_device_unregister(ctx->pdev); } static void fpga_bridge_test_get(struct kunit *test) { struct bridge_ctx *ctx = test->priv; struct fpga_bridge *bridge; bridge = fpga_bridge_get(&ctx->pdev->dev, NULL); KUNIT_EXPECT_PTR_EQ(test, bridge, ctx->bridge); bridge = fpga_bridge_get(&ctx->pdev->dev, NULL); KUNIT_EXPECT_EQ(test, PTR_ERR(bridge), -EBUSY); fpga_bridge_put(ctx->bridge); } static void fpga_bridge_test_toggle(struct kunit *test) { struct bridge_ctx *ctx = test->priv; int ret; ret = fpga_bridge_disable(ctx->bridge); KUNIT_EXPECT_EQ(test, ret, 0); KUNIT_EXPECT_FALSE(test, ctx->stats.enable); ret = fpga_bridge_enable(ctx->bridge); KUNIT_EXPECT_EQ(test, ret, 0); KUNIT_EXPECT_TRUE(test, ctx->stats.enable); } /* Test the functions for getting and controlling a list of bridges */ static void fpga_bridge_test_get_put_list(struct kunit *test) { struct list_head bridge_list; struct bridge_ctx *ctx_0, *ctx_1; int ret; ctx_0 = test->priv; ctx_1 = register_test_bridge(test); INIT_LIST_HEAD(&bridge_list); /* Get bridge 0 and add it to the list */ ret = fpga_bridge_get_to_list(&ctx_0->pdev->dev, NULL, &bridge_list); KUNIT_EXPECT_EQ(test, ret, 0); KUNIT_EXPECT_PTR_EQ(test, ctx_0->bridge, list_first_entry_or_null(&bridge_list, struct fpga_bridge, node)); /* Get bridge 1 and add it to the list */ ret = fpga_bridge_get_to_list(&ctx_1->pdev->dev, NULL, &bridge_list); KUNIT_EXPECT_EQ(test, ret, 0); KUNIT_EXPECT_PTR_EQ(test, ctx_1->bridge, list_first_entry_or_null(&bridge_list, struct fpga_bridge, node)); /* Disable an then enable both bridges from the list */ ret = fpga_bridges_disable(&bridge_list); KUNIT_EXPECT_EQ(test, ret, 0); KUNIT_EXPECT_FALSE(test, ctx_0->stats.enable); KUNIT_EXPECT_FALSE(test, ctx_1->stats.enable); ret = fpga_bridges_enable(&bridge_list); KUNIT_EXPECT_EQ(test, ret, 0); KUNIT_EXPECT_TRUE(test, ctx_0->stats.enable); KUNIT_EXPECT_TRUE(test, ctx_1->stats.enable); /* Put and remove both bridges from the list */ fpga_bridges_put(&bridge_list); KUNIT_EXPECT_TRUE(test, list_empty(&bridge_list)); unregister_test_bridge(ctx_1); } static int fpga_bridge_test_init(struct kunit *test) { test->priv = register_test_bridge(test); return 0; } static void fpga_bridge_test_exit(struct kunit *test) { unregister_test_bridge(test->priv); } static struct kunit_case fpga_bridge_test_cases[] = { KUNIT_CASE(fpga_bridge_test_get), KUNIT_CASE(fpga_bridge_test_toggle), KUNIT_CASE(fpga_bridge_test_get_put_list), {} }; static struct kunit_suite fpga_bridge_suite = { .name = "fpga_bridge", .init = fpga_bridge_test_init, .exit = fpga_bridge_test_exit, .test_cases = fpga_bridge_test_cases, }; kunit_test_suite(fpga_bridge_suite); MODULE_LICENSE("GPL");
linux-master
drivers/fpga/tests/fpga-bridge-test.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2013, Intel Corporation. * * MEI Library for mei bus nfc device access */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/slab.h> #include <linux/nfc.h> #include "mei_phy.h" struct mei_nfc_hdr { u8 cmd; u8 status; u16 req_id; u32 reserved; u16 data_size; } __packed; struct mei_nfc_cmd { struct mei_nfc_hdr hdr; u8 sub_command; u8 data[]; } __packed; struct mei_nfc_reply { struct mei_nfc_hdr hdr; u8 sub_command; u8 reply_status; u8 data[]; } __packed; struct mei_nfc_if_version { u8 radio_version_sw[3]; u8 reserved[3]; u8 radio_version_hw[3]; u8 i2c_addr; u8 fw_ivn; u8 vendor_id; u8 radio_type; } __packed; struct mei_nfc_connect { u8 fw_ivn; u8 vendor_id; } __packed; struct mei_nfc_connect_resp { u8 fw_ivn; u8 vendor_id; u16 me_major; u16 me_minor; u16 me_hotfix; u16 me_build; } __packed; #define MEI_NFC_CMD_MAINTENANCE 0x00 #define MEI_NFC_CMD_HCI_SEND 0x01 #define MEI_NFC_CMD_HCI_RECV 0x02 #define MEI_NFC_SUBCMD_CONNECT 0x00 #define MEI_NFC_SUBCMD_IF_VERSION 0x01 #define MEI_NFC_MAX_READ (MEI_NFC_HEADER_SIZE + MEI_NFC_MAX_HCI_PAYLOAD) #define MEI_DUMP_SKB_IN(info, skb) \ do { \ pr_debug("%s:\n", info); \ print_hex_dump_debug("mei in : ", DUMP_PREFIX_OFFSET, \ 16, 1, (skb)->data, (skb)->len, false); \ } while (0) #define MEI_DUMP_SKB_OUT(info, skb) \ do { \ pr_debug("%s:\n", info); \ print_hex_dump_debug("mei out: ", DUMP_PREFIX_OFFSET, \ 16, 1, (skb)->data, (skb)->len, false); \ } while (0) #define MEI_DUMP_NFC_HDR(info, _hdr) \ do { \ pr_debug("%s:\n", info); \ pr_debug("cmd=%02d status=%d req_id=%d rsvd=%d size=%d\n", \ (_hdr)->cmd, (_hdr)->status, (_hdr)->req_id, \ (_hdr)->reserved, (_hdr)->data_size); \ } while (0) static int mei_nfc_if_version(struct nfc_mei_phy *phy) { struct mei_nfc_cmd cmd; struct mei_nfc_reply *reply = NULL; struct mei_nfc_if_version *version; size_t if_version_length; int bytes_recv, r; memset(&cmd, 0, sizeof(struct mei_nfc_cmd)); cmd.hdr.cmd = MEI_NFC_CMD_MAINTENANCE; cmd.hdr.data_size = 1; cmd.sub_command = MEI_NFC_SUBCMD_IF_VERSION; MEI_DUMP_NFC_HDR("version", &cmd.hdr); r = mei_cldev_send(phy->cldev, (u8 *)&cmd, sizeof(struct mei_nfc_cmd)); if (r < 0) { pr_err("Could not send IF version cmd\n"); return r; } /* to be sure on the stack we alloc memory */ if_version_length = sizeof(struct mei_nfc_reply) + sizeof(struct mei_nfc_if_version); reply = kzalloc(if_version_length, GFP_KERNEL); if (!reply) return -ENOMEM; bytes_recv = mei_cldev_recv(phy->cldev, (u8 *)reply, if_version_length); if (bytes_recv < 0 || bytes_recv < if_version_length) { pr_err("Could not read IF version\n"); r = -EIO; goto err; } version = (struct mei_nfc_if_version *)reply->data; phy->fw_ivn = version->fw_ivn; phy->vendor_id = version->vendor_id; phy->radio_type = version->radio_type; err: kfree(reply); return r; } static int mei_nfc_connect(struct nfc_mei_phy *phy) { struct mei_nfc_cmd *cmd, *reply; struct mei_nfc_connect *connect; struct mei_nfc_connect_resp *connect_resp; size_t connect_length, connect_resp_length; int bytes_recv, r; connect_length = sizeof(struct mei_nfc_cmd) + sizeof(struct mei_nfc_connect); connect_resp_length = sizeof(struct mei_nfc_cmd) + sizeof(struct mei_nfc_connect_resp); cmd = kzalloc(connect_length, GFP_KERNEL); if (!cmd) return -ENOMEM; connect = (struct mei_nfc_connect *)cmd->data; reply = kzalloc(connect_resp_length, GFP_KERNEL); if (!reply) { kfree(cmd); return -ENOMEM; } connect_resp = (struct mei_nfc_connect_resp *)reply->data; cmd->hdr.cmd = MEI_NFC_CMD_MAINTENANCE; cmd->hdr.data_size = 3; cmd->sub_command = MEI_NFC_SUBCMD_CONNECT; connect->fw_ivn = phy->fw_ivn; connect->vendor_id = phy->vendor_id; MEI_DUMP_NFC_HDR("connect request", &cmd->hdr); r = mei_cldev_send(phy->cldev, (u8 *)cmd, connect_length); if (r < 0) { pr_err("Could not send connect cmd %d\n", r); goto err; } bytes_recv = mei_cldev_recv(phy->cldev, (u8 *)reply, connect_resp_length); if (bytes_recv < 0) { r = bytes_recv; pr_err("Could not read connect response %d\n", r); goto err; } MEI_DUMP_NFC_HDR("connect reply", &reply->hdr); pr_info("IVN 0x%x Vendor ID 0x%x\n", connect_resp->fw_ivn, connect_resp->vendor_id); pr_info("ME FW %d.%d.%d.%d\n", connect_resp->me_major, connect_resp->me_minor, connect_resp->me_hotfix, connect_resp->me_build); r = 0; err: kfree(reply); kfree(cmd); return r; } static int mei_nfc_send(struct nfc_mei_phy *phy, const u8 *buf, size_t length) { struct mei_nfc_hdr *hdr; u8 *mei_buf; int err; err = -ENOMEM; mei_buf = kzalloc(length + MEI_NFC_HEADER_SIZE, GFP_KERNEL); if (!mei_buf) goto out; hdr = (struct mei_nfc_hdr *)mei_buf; hdr->cmd = MEI_NFC_CMD_HCI_SEND; hdr->status = 0; hdr->req_id = phy->req_id; hdr->reserved = 0; hdr->data_size = length; MEI_DUMP_NFC_HDR("send", hdr); memcpy(mei_buf + MEI_NFC_HEADER_SIZE, buf, length); err = mei_cldev_send(phy->cldev, mei_buf, length + MEI_NFC_HEADER_SIZE); if (err < 0) goto out; if (!wait_event_interruptible_timeout(phy->send_wq, phy->recv_req_id == phy->req_id, HZ)) { pr_err("NFC MEI command timeout\n"); err = -ETIME; } else { phy->req_id++; } out: kfree(mei_buf); return err; } /* * Writing a frame must not return the number of written bytes. * It must return either zero for success, or <0 for error. * In addition, it must not alter the skb */ static int nfc_mei_phy_write(void *phy_id, struct sk_buff *skb) { struct nfc_mei_phy *phy = phy_id; int r; MEI_DUMP_SKB_OUT("mei frame sent", skb); r = mei_nfc_send(phy, skb->data, skb->len); if (r > 0) r = 0; return r; } static int mei_nfc_recv(struct nfc_mei_phy *phy, u8 *buf, size_t length) { struct mei_nfc_hdr *hdr; int received_length; received_length = mei_cldev_recv(phy->cldev, buf, length); if (received_length < 0) return received_length; hdr = (struct mei_nfc_hdr *) buf; MEI_DUMP_NFC_HDR("receive", hdr); if (hdr->cmd == MEI_NFC_CMD_HCI_SEND) { phy->recv_req_id = hdr->req_id; wake_up(&phy->send_wq); return 0; } return received_length; } static void nfc_mei_rx_cb(struct mei_cl_device *cldev) { struct nfc_mei_phy *phy = mei_cldev_get_drvdata(cldev); struct sk_buff *skb; int reply_size; if (!phy) return; if (phy->hard_fault != 0) return; skb = alloc_skb(MEI_NFC_MAX_READ, GFP_KERNEL); if (!skb) return; reply_size = mei_nfc_recv(phy, skb->data, MEI_NFC_MAX_READ); if (reply_size < MEI_NFC_HEADER_SIZE) { kfree_skb(skb); return; } skb_put(skb, reply_size); skb_pull(skb, MEI_NFC_HEADER_SIZE); MEI_DUMP_SKB_IN("mei frame read", skb); nfc_hci_recv_frame(phy->hdev, skb); } static int nfc_mei_phy_enable(void *phy_id) { int r; struct nfc_mei_phy *phy = phy_id; if (phy->powered == 1) return 0; r = mei_cldev_enable(phy->cldev); if (r < 0) { pr_err("Could not enable device %d\n", r); return r; } r = mei_nfc_if_version(phy); if (r < 0) { pr_err("Could not enable device %d\n", r); goto err; } r = mei_nfc_connect(phy); if (r < 0) { pr_err("Could not connect to device %d\n", r); goto err; } r = mei_cldev_register_rx_cb(phy->cldev, nfc_mei_rx_cb); if (r) { pr_err("Event cb registration failed %d\n", r); goto err; } phy->powered = 1; return 0; err: phy->powered = 0; mei_cldev_disable(phy->cldev); return r; } static void nfc_mei_phy_disable(void *phy_id) { struct nfc_mei_phy *phy = phy_id; mei_cldev_disable(phy->cldev); phy->powered = 0; } const struct nfc_phy_ops mei_phy_ops = { .write = nfc_mei_phy_write, .enable = nfc_mei_phy_enable, .disable = nfc_mei_phy_disable, }; EXPORT_SYMBOL_GPL(mei_phy_ops); struct nfc_mei_phy *nfc_mei_phy_alloc(struct mei_cl_device *cldev) { struct nfc_mei_phy *phy; phy = kzalloc(sizeof(struct nfc_mei_phy), GFP_KERNEL); if (!phy) return NULL; phy->cldev = cldev; init_waitqueue_head(&phy->send_wq); mei_cldev_set_drvdata(cldev, phy); return phy; } EXPORT_SYMBOL_GPL(nfc_mei_phy_alloc); void nfc_mei_phy_free(struct nfc_mei_phy *phy) { mei_cldev_disable(phy->cldev); kfree(phy); } EXPORT_SYMBOL_GPL(nfc_mei_phy_free); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("mei bus NFC device interface");
linux-master
drivers/nfc/mei_phy.c
// SPDX-License-Identifier: GPL-2.0-only /* * TI TRF7970a RFID/NFC Transceiver Driver * * Copyright (C) 2013 Texas Instruments Incorporated - http://www.ti.com * * Author: Erick Macias <[email protected]> * Author: Felipe Balbi <[email protected]> * Author: Mark A. Greer <[email protected]> */ #include <linux/module.h> #include <linux/device.h> #include <linux/netdevice.h> #include <linux/interrupt.h> #include <linux/pm_runtime.h> #include <linux/nfc.h> #include <linux/skbuff.h> #include <linux/delay.h> #include <linux/gpio/consumer.h> #include <linux/of.h> #include <linux/spi/spi.h> #include <linux/regulator/consumer.h> #include <net/nfc/nfc.h> #include <net/nfc/digital.h> /* There are 3 ways the host can communicate with the trf7970a: * parallel mode, SPI with Slave Select (SS) mode, and SPI without * SS mode. The driver only supports the two SPI modes. * * The trf7970a is very timing sensitive and the VIN, EN2, and EN * pins must asserted in that order and with specific delays in between. * The delays used in the driver were provided by TI and have been * confirmed to work with this driver. There is a bug with the current * version of the trf7970a that requires that EN2 remain low no matter * what. If it goes high, it will generate an RF field even when in * passive target mode. TI has indicated that the chip will work okay * when EN2 is left low. The 'en2-rf-quirk' device tree property * indicates that trf7970a currently being used has the erratum and * that EN2 must be kept low. * * Timeouts are implemented using the delayed workqueue kernel facility. * Timeouts are required so things don't hang when there is no response * from the trf7970a (or tag). Using this mechanism creates a race with * interrupts, however. That is, an interrupt and a timeout could occur * closely enough together that one is blocked by the mutex while the other * executes. When the timeout handler executes first and blocks the * interrupt handler, it will eventually set the state to IDLE so the * interrupt handler will check the state and exit with no harm done. * When the interrupt handler executes first and blocks the timeout handler, * the cancel_delayed_work() call will know that it didn't cancel the * work item (i.e., timeout) and will return zero. That return code is * used by the timer handler to indicate that it should ignore the timeout * once its unblocked. * * Aborting an active command isn't as simple as it seems because the only * way to abort a command that's already been sent to the tag is so turn * off power to the tag. If we do that, though, we'd have to go through * the entire anticollision procedure again but the digital layer doesn't * support that. So, if an abort is received before trf7970a_send_cmd() * has sent the command to the tag, it simply returns -ECANCELED. If the * command has already been sent to the tag, then the driver continues * normally and recieves the response data (or error) but just before * sending the data upstream, it frees the rx_skb and sends -ECANCELED * upstream instead. If the command failed, that error will be sent * upstream. * * When recieving data from a tag and the interrupt status register has * only the SRX bit set, it means that all of the data has been received * (once what's in the fifo has been read). However, depending on timing * an interrupt status with only the SRX bit set may not be recived. In * those cases, the timeout mechanism is used to wait 20 ms in case more * data arrives. After 20 ms, it is assumed that all of the data has been * received and the accumulated rx data is sent upstream. The * 'TRF7970A_ST_WAIT_FOR_RX_DATA_CONT' state is used for this purpose * (i.e., it indicates that some data has been received but we're not sure * if there is more coming so a timeout in this state means all data has * been received and there isn't an error). The delay is 20 ms since delays * of ~16 ms have been observed during testing. * * When transmitting a frame larger than the FIFO size (127 bytes), the * driver will wait 20 ms for the FIFO to drain past the low-watermark * and generate an interrupt. The low-watermark set to 32 bytes so the * interrupt should fire after 127 - 32 = 95 bytes have been sent. At * the lowest possible bit rate (6.62 kbps for 15693), it will take up * to ~14.35 ms so 20 ms is used for the timeout. * * Type 2 write and sector select commands respond with a 4-bit ACK or NACK. * Having only 4 bits in the FIFO won't normally generate an interrupt so * driver enables the '4_bit_RX' bit of the Special Functions register 1 * to cause an interrupt in that case. Leaving that bit for a read command * messes up the data returned so it is only enabled when the framing is * 'NFC_DIGITAL_FRAMING_NFCA_T2T' and the command is not a read command. * Unfortunately, that means that the driver has to peek into tx frames * when the framing is 'NFC_DIGITAL_FRAMING_NFCA_T2T'. This is done by * the trf7970a_per_cmd_config() routine. * * ISO/IEC 15693 frames specify whether to use single or double sub-carrier * frequencies and whether to use low or high data rates in the flags byte * of the frame. This means that the driver has to peek at all 15693 frames * to determine what speed to set the communication to. In addition, write * and lock commands use the OPTION flag to indicate that an EOF must be * sent to the tag before it will send its response. So the driver has to * examine all frames for that reason too. * * It is unclear how long to wait before sending the EOF. According to the * Note under Table 1-1 in section 1.6 of * http://www.ti.com/lit/ug/scbu011/scbu011.pdf, that wait should be at least * 10 ms for TI Tag-it HF-I tags; however testing has shown that is not long * enough so 20 ms is used. So the timer is set to 40 ms - 20 ms to drain * up to 127 bytes in the FIFO at the lowest bit rate plus another 20 ms to * ensure the wait is long enough before sending the EOF. This seems to work * reliably. */ #define TRF7970A_SUPPORTED_PROTOCOLS \ (NFC_PROTO_MIFARE_MASK | NFC_PROTO_ISO14443_MASK | \ NFC_PROTO_ISO14443_B_MASK | NFC_PROTO_FELICA_MASK | \ NFC_PROTO_ISO15693_MASK | NFC_PROTO_NFC_DEP_MASK) #define TRF7970A_AUTOSUSPEND_DELAY 30000 /* 30 seconds */ #define TRF7970A_13MHZ_CLOCK_FREQUENCY 13560000 #define TRF7970A_27MHZ_CLOCK_FREQUENCY 27120000 #define TRF7970A_RX_SKB_ALLOC_SIZE 256 #define TRF7970A_FIFO_SIZE 127 /* TX length is 3 nibbles long ==> 4KB - 1 bytes max */ #define TRF7970A_TX_MAX (4096 - 1) #define TRF7970A_WAIT_FOR_TX_IRQ 20 #define TRF7970A_WAIT_FOR_RX_DATA_TIMEOUT 20 #define TRF7970A_WAIT_FOR_FIFO_DRAIN_TIMEOUT 20 #define TRF7970A_WAIT_TO_ISSUE_ISO15693_EOF 40 /* Guard times for various RF technologies (in us) */ #define TRF7970A_GUARD_TIME_NFCA 5000 #define TRF7970A_GUARD_TIME_NFCB 5000 #define TRF7970A_GUARD_TIME_NFCF 20000 #define TRF7970A_GUARD_TIME_15693 1000 /* Quirks */ /* Erratum: When reading IRQ Status register on trf7970a, we must issue a * read continuous command for IRQ Status and Collision Position registers. */ #define TRF7970A_QUIRK_IRQ_STATUS_READ BIT(0) #define TRF7970A_QUIRK_EN2_MUST_STAY_LOW BIT(1) /* Direct commands */ #define TRF7970A_CMD_IDLE 0x00 #define TRF7970A_CMD_SOFT_INIT 0x03 #define TRF7970A_CMD_RF_COLLISION 0x04 #define TRF7970A_CMD_RF_COLLISION_RESPONSE_N 0x05 #define TRF7970A_CMD_RF_COLLISION_RESPONSE_0 0x06 #define TRF7970A_CMD_FIFO_RESET 0x0f #define TRF7970A_CMD_TRANSMIT_NO_CRC 0x10 #define TRF7970A_CMD_TRANSMIT 0x11 #define TRF7970A_CMD_DELAY_TRANSMIT_NO_CRC 0x12 #define TRF7970A_CMD_DELAY_TRANSMIT 0x13 #define TRF7970A_CMD_EOF 0x14 #define TRF7970A_CMD_CLOSE_SLOT 0x15 #define TRF7970A_CMD_BLOCK_RX 0x16 #define TRF7970A_CMD_ENABLE_RX 0x17 #define TRF7970A_CMD_TEST_INT_RF 0x18 #define TRF7970A_CMD_TEST_EXT_RF 0x19 #define TRF7970A_CMD_RX_GAIN_ADJUST 0x1a /* Bits determining whether its a direct command or register R/W, * whether to use a continuous SPI transaction or not, and the actual * direct cmd opcode or register address. */ #define TRF7970A_CMD_BIT_CTRL BIT(7) #define TRF7970A_CMD_BIT_RW BIT(6) #define TRF7970A_CMD_BIT_CONTINUOUS BIT(5) #define TRF7970A_CMD_BIT_OPCODE(opcode) ((opcode) & 0x1f) /* Registers addresses */ #define TRF7970A_CHIP_STATUS_CTRL 0x00 #define TRF7970A_ISO_CTRL 0x01 #define TRF7970A_ISO14443B_TX_OPTIONS 0x02 #define TRF7970A_ISO14443A_HIGH_BITRATE_OPTIONS 0x03 #define TRF7970A_TX_TIMER_SETTING_H_BYTE 0x04 #define TRF7970A_TX_TIMER_SETTING_L_BYTE 0x05 #define TRF7970A_TX_PULSE_LENGTH_CTRL 0x06 #define TRF7970A_RX_NO_RESPONSE_WAIT 0x07 #define TRF7970A_RX_WAIT_TIME 0x08 #define TRF7970A_MODULATOR_SYS_CLK_CTRL 0x09 #define TRF7970A_RX_SPECIAL_SETTINGS 0x0a #define TRF7970A_REG_IO_CTRL 0x0b #define TRF7970A_IRQ_STATUS 0x0c #define TRF7970A_COLLISION_IRQ_MASK 0x0d #define TRF7970A_COLLISION_POSITION 0x0e #define TRF7970A_RSSI_OSC_STATUS 0x0f #define TRF7970A_SPECIAL_FCN_REG1 0x10 #define TRF7970A_SPECIAL_FCN_REG2 0x11 #define TRF7970A_RAM1 0x12 #define TRF7970A_RAM2 0x13 #define TRF7970A_ADJUTABLE_FIFO_IRQ_LEVELS 0x14 #define TRF7970A_NFC_LOW_FIELD_LEVEL 0x16 #define TRF7970A_NFCID1 0x17 #define TRF7970A_NFC_TARGET_LEVEL 0x18 #define TRF79070A_NFC_TARGET_PROTOCOL 0x19 #define TRF7970A_TEST_REGISTER1 0x1a #define TRF7970A_TEST_REGISTER2 0x1b #define TRF7970A_FIFO_STATUS 0x1c #define TRF7970A_TX_LENGTH_BYTE1 0x1d #define TRF7970A_TX_LENGTH_BYTE2 0x1e #define TRF7970A_FIFO_IO_REGISTER 0x1f /* Chip Status Control Register Bits */ #define TRF7970A_CHIP_STATUS_VRS5_3 BIT(0) #define TRF7970A_CHIP_STATUS_REC_ON BIT(1) #define TRF7970A_CHIP_STATUS_AGC_ON BIT(2) #define TRF7970A_CHIP_STATUS_PM_ON BIT(3) #define TRF7970A_CHIP_STATUS_RF_PWR BIT(4) #define TRF7970A_CHIP_STATUS_RF_ON BIT(5) #define TRF7970A_CHIP_STATUS_DIRECT BIT(6) #define TRF7970A_CHIP_STATUS_STBY BIT(7) /* ISO Control Register Bits */ #define TRF7970A_ISO_CTRL_15693_SGL_1OF4_662 0x00 #define TRF7970A_ISO_CTRL_15693_SGL_1OF256_662 0x01 #define TRF7970A_ISO_CTRL_15693_SGL_1OF4_2648 0x02 #define TRF7970A_ISO_CTRL_15693_SGL_1OF256_2648 0x03 #define TRF7970A_ISO_CTRL_15693_DBL_1OF4_667a 0x04 #define TRF7970A_ISO_CTRL_15693_DBL_1OF256_667 0x05 #define TRF7970A_ISO_CTRL_15693_DBL_1OF4_2669 0x06 #define TRF7970A_ISO_CTRL_15693_DBL_1OF256_2669 0x07 #define TRF7970A_ISO_CTRL_14443A_106 0x08 #define TRF7970A_ISO_CTRL_14443A_212 0x09 #define TRF7970A_ISO_CTRL_14443A_424 0x0a #define TRF7970A_ISO_CTRL_14443A_848 0x0b #define TRF7970A_ISO_CTRL_14443B_106 0x0c #define TRF7970A_ISO_CTRL_14443B_212 0x0d #define TRF7970A_ISO_CTRL_14443B_424 0x0e #define TRF7970A_ISO_CTRL_14443B_848 0x0f #define TRF7970A_ISO_CTRL_FELICA_212 0x1a #define TRF7970A_ISO_CTRL_FELICA_424 0x1b #define TRF7970A_ISO_CTRL_NFC_NFCA_106 0x01 #define TRF7970A_ISO_CTRL_NFC_NFCF_212 0x02 #define TRF7970A_ISO_CTRL_NFC_NFCF_424 0x03 #define TRF7970A_ISO_CTRL_NFC_CE_14443A 0x00 #define TRF7970A_ISO_CTRL_NFC_CE_14443B 0x01 #define TRF7970A_ISO_CTRL_NFC_CE BIT(2) #define TRF7970A_ISO_CTRL_NFC_ACTIVE BIT(3) #define TRF7970A_ISO_CTRL_NFC_INITIATOR BIT(4) #define TRF7970A_ISO_CTRL_NFC_NFC_CE_MODE BIT(5) #define TRF7970A_ISO_CTRL_RFID BIT(5) #define TRF7970A_ISO_CTRL_DIR_MODE BIT(6) #define TRF7970A_ISO_CTRL_RX_CRC_N BIT(7) /* true == No CRC */ #define TRF7970A_ISO_CTRL_RFID_SPEED_MASK 0x1f /* Modulator and SYS_CLK Control Register Bits */ #define TRF7970A_MODULATOR_DEPTH(n) ((n) & 0x7) #define TRF7970A_MODULATOR_DEPTH_ASK10 (TRF7970A_MODULATOR_DEPTH(0)) #define TRF7970A_MODULATOR_DEPTH_OOK (TRF7970A_MODULATOR_DEPTH(1)) #define TRF7970A_MODULATOR_DEPTH_ASK7 (TRF7970A_MODULATOR_DEPTH(2)) #define TRF7970A_MODULATOR_DEPTH_ASK8_5 (TRF7970A_MODULATOR_DEPTH(3)) #define TRF7970A_MODULATOR_DEPTH_ASK13 (TRF7970A_MODULATOR_DEPTH(4)) #define TRF7970A_MODULATOR_DEPTH_ASK16 (TRF7970A_MODULATOR_DEPTH(5)) #define TRF7970A_MODULATOR_DEPTH_ASK22 (TRF7970A_MODULATOR_DEPTH(6)) #define TRF7970A_MODULATOR_DEPTH_ASK30 (TRF7970A_MODULATOR_DEPTH(7)) #define TRF7970A_MODULATOR_EN_ANA BIT(3) #define TRF7970A_MODULATOR_CLK(n) (((n) & 0x3) << 4) #define TRF7970A_MODULATOR_CLK_DISABLED (TRF7970A_MODULATOR_CLK(0)) #define TRF7970A_MODULATOR_CLK_3_6 (TRF7970A_MODULATOR_CLK(1)) #define TRF7970A_MODULATOR_CLK_6_13 (TRF7970A_MODULATOR_CLK(2)) #define TRF7970A_MODULATOR_CLK_13_27 (TRF7970A_MODULATOR_CLK(3)) #define TRF7970A_MODULATOR_EN_OOK BIT(6) #define TRF7970A_MODULATOR_27MHZ BIT(7) #define TRF7970A_RX_SPECIAL_SETTINGS_NO_LIM BIT(0) #define TRF7970A_RX_SPECIAL_SETTINGS_AGCR BIT(1) #define TRF7970A_RX_SPECIAL_SETTINGS_GD_0DB (0x0 << 2) #define TRF7970A_RX_SPECIAL_SETTINGS_GD_5DB (0x1 << 2) #define TRF7970A_RX_SPECIAL_SETTINGS_GD_10DB (0x2 << 2) #define TRF7970A_RX_SPECIAL_SETTINGS_GD_15DB (0x3 << 2) #define TRF7970A_RX_SPECIAL_SETTINGS_HBT BIT(4) #define TRF7970A_RX_SPECIAL_SETTINGS_M848 BIT(5) #define TRF7970A_RX_SPECIAL_SETTINGS_C424 BIT(6) #define TRF7970A_RX_SPECIAL_SETTINGS_C212 BIT(7) #define TRF7970A_REG_IO_CTRL_VRS(v) ((v) & 0x07) #define TRF7970A_REG_IO_CTRL_IO_LOW BIT(5) #define TRF7970A_REG_IO_CTRL_EN_EXT_PA BIT(6) #define TRF7970A_REG_IO_CTRL_AUTO_REG BIT(7) /* IRQ Status Register Bits */ #define TRF7970A_IRQ_STATUS_NORESP BIT(0) /* ISO15693 only */ #define TRF7970A_IRQ_STATUS_NFC_COL_ERROR BIT(0) #define TRF7970A_IRQ_STATUS_COL BIT(1) #define TRF7970A_IRQ_STATUS_FRAMING_EOF_ERROR BIT(2) #define TRF7970A_IRQ_STATUS_NFC_RF BIT(2) #define TRF7970A_IRQ_STATUS_PARITY_ERROR BIT(3) #define TRF7970A_IRQ_STATUS_NFC_SDD BIT(3) #define TRF7970A_IRQ_STATUS_CRC_ERROR BIT(4) #define TRF7970A_IRQ_STATUS_NFC_PROTO_ERROR BIT(4) #define TRF7970A_IRQ_STATUS_FIFO BIT(5) #define TRF7970A_IRQ_STATUS_SRX BIT(6) #define TRF7970A_IRQ_STATUS_TX BIT(7) #define TRF7970A_IRQ_STATUS_ERROR \ (TRF7970A_IRQ_STATUS_COL | \ TRF7970A_IRQ_STATUS_FRAMING_EOF_ERROR | \ TRF7970A_IRQ_STATUS_PARITY_ERROR | \ TRF7970A_IRQ_STATUS_CRC_ERROR) #define TRF7970A_RSSI_OSC_STATUS_RSSI_MASK (BIT(2) | BIT(1) | BIT(0)) #define TRF7970A_RSSI_OSC_STATUS_RSSI_X_MASK (BIT(5) | BIT(4) | BIT(3)) #define TRF7970A_RSSI_OSC_STATUS_RSSI_OSC_OK BIT(6) #define TRF7970A_SPECIAL_FCN_REG1_COL_7_6 BIT(0) #define TRF7970A_SPECIAL_FCN_REG1_14_ANTICOLL BIT(1) #define TRF7970A_SPECIAL_FCN_REG1_4_BIT_RX BIT(2) #define TRF7970A_SPECIAL_FCN_REG1_SP_DIR_MODE BIT(3) #define TRF7970A_SPECIAL_FCN_REG1_NEXT_SLOT_37US BIT(4) #define TRF7970A_SPECIAL_FCN_REG1_PAR43 BIT(5) #define TRF7970A_ADJUTABLE_FIFO_IRQ_LEVELS_WLH_124 (0x0 << 2) #define TRF7970A_ADJUTABLE_FIFO_IRQ_LEVELS_WLH_120 (0x1 << 2) #define TRF7970A_ADJUTABLE_FIFO_IRQ_LEVELS_WLH_112 (0x2 << 2) #define TRF7970A_ADJUTABLE_FIFO_IRQ_LEVELS_WLH_96 (0x3 << 2) #define TRF7970A_ADJUTABLE_FIFO_IRQ_LEVELS_WLL_4 0x0 #define TRF7970A_ADJUTABLE_FIFO_IRQ_LEVELS_WLL_8 0x1 #define TRF7970A_ADJUTABLE_FIFO_IRQ_LEVELS_WLL_16 0x2 #define TRF7970A_ADJUTABLE_FIFO_IRQ_LEVELS_WLL_32 0x3 #define TRF7970A_NFC_LOW_FIELD_LEVEL_RFDET(v) ((v) & 0x07) #define TRF7970A_NFC_LOW_FIELD_LEVEL_CLEX_DIS BIT(7) #define TRF7970A_NFC_TARGET_LEVEL_RFDET(v) ((v) & 0x07) #define TRF7970A_NFC_TARGET_LEVEL_HI_RF BIT(3) #define TRF7970A_NFC_TARGET_LEVEL_SDD_EN BIT(5) #define TRF7970A_NFC_TARGET_LEVEL_LD_S_4BYTES (0x0 << 6) #define TRF7970A_NFC_TARGET_LEVEL_LD_S_7BYTES (0x1 << 6) #define TRF7970A_NFC_TARGET_LEVEL_LD_S_10BYTES (0x2 << 6) #define TRF79070A_NFC_TARGET_PROTOCOL_NFCBR_106 BIT(0) #define TRF79070A_NFC_TARGET_PROTOCOL_NFCBR_212 BIT(1) #define TRF79070A_NFC_TARGET_PROTOCOL_NFCBR_424 (BIT(0) | BIT(1)) #define TRF79070A_NFC_TARGET_PROTOCOL_PAS_14443B BIT(2) #define TRF79070A_NFC_TARGET_PROTOCOL_PAS_106 BIT(3) #define TRF79070A_NFC_TARGET_PROTOCOL_FELICA BIT(4) #define TRF79070A_NFC_TARGET_PROTOCOL_RF_L BIT(6) #define TRF79070A_NFC_TARGET_PROTOCOL_RF_H BIT(7) #define TRF79070A_NFC_TARGET_PROTOCOL_106A \ (TRF79070A_NFC_TARGET_PROTOCOL_RF_H | \ TRF79070A_NFC_TARGET_PROTOCOL_RF_L | \ TRF79070A_NFC_TARGET_PROTOCOL_PAS_106 | \ TRF79070A_NFC_TARGET_PROTOCOL_NFCBR_106) #define TRF79070A_NFC_TARGET_PROTOCOL_106B \ (TRF79070A_NFC_TARGET_PROTOCOL_RF_H | \ TRF79070A_NFC_TARGET_PROTOCOL_RF_L | \ TRF79070A_NFC_TARGET_PROTOCOL_PAS_14443B | \ TRF79070A_NFC_TARGET_PROTOCOL_NFCBR_106) #define TRF79070A_NFC_TARGET_PROTOCOL_212F \ (TRF79070A_NFC_TARGET_PROTOCOL_RF_H | \ TRF79070A_NFC_TARGET_PROTOCOL_RF_L | \ TRF79070A_NFC_TARGET_PROTOCOL_FELICA | \ TRF79070A_NFC_TARGET_PROTOCOL_NFCBR_212) #define TRF79070A_NFC_TARGET_PROTOCOL_424F \ (TRF79070A_NFC_TARGET_PROTOCOL_RF_H | \ TRF79070A_NFC_TARGET_PROTOCOL_RF_L | \ TRF79070A_NFC_TARGET_PROTOCOL_FELICA | \ TRF79070A_NFC_TARGET_PROTOCOL_NFCBR_424) #define TRF7970A_FIFO_STATUS_OVERFLOW BIT(7) /* NFC (ISO/IEC 14443A) Type 2 Tag commands */ #define NFC_T2T_CMD_READ 0x30 /* ISO 15693 commands codes */ #define ISO15693_CMD_INVENTORY 0x01 #define ISO15693_CMD_READ_SINGLE_BLOCK 0x20 #define ISO15693_CMD_WRITE_SINGLE_BLOCK 0x21 #define ISO15693_CMD_LOCK_BLOCK 0x22 #define ISO15693_CMD_READ_MULTIPLE_BLOCK 0x23 #define ISO15693_CMD_WRITE_MULTIPLE_BLOCK 0x24 #define ISO15693_CMD_SELECT 0x25 #define ISO15693_CMD_RESET_TO_READY 0x26 #define ISO15693_CMD_WRITE_AFI 0x27 #define ISO15693_CMD_LOCK_AFI 0x28 #define ISO15693_CMD_WRITE_DSFID 0x29 #define ISO15693_CMD_LOCK_DSFID 0x2a #define ISO15693_CMD_GET_SYSTEM_INFO 0x2b #define ISO15693_CMD_GET_MULTIPLE_BLOCK_SECURITY_STATUS 0x2c /* ISO 15693 request and response flags */ #define ISO15693_REQ_FLAG_SUB_CARRIER BIT(0) #define ISO15693_REQ_FLAG_DATA_RATE BIT(1) #define ISO15693_REQ_FLAG_INVENTORY BIT(2) #define ISO15693_REQ_FLAG_PROTOCOL_EXT BIT(3) #define ISO15693_REQ_FLAG_SELECT BIT(4) #define ISO15693_REQ_FLAG_AFI BIT(4) #define ISO15693_REQ_FLAG_ADDRESS BIT(5) #define ISO15693_REQ_FLAG_NB_SLOTS BIT(5) #define ISO15693_REQ_FLAG_OPTION BIT(6) #define ISO15693_REQ_FLAG_SPEED_MASK \ (ISO15693_REQ_FLAG_SUB_CARRIER | ISO15693_REQ_FLAG_DATA_RATE) enum trf7970a_state { TRF7970A_ST_PWR_OFF, TRF7970A_ST_RF_OFF, TRF7970A_ST_IDLE, TRF7970A_ST_IDLE_RX_BLOCKED, TRF7970A_ST_WAIT_FOR_TX_FIFO, TRF7970A_ST_WAIT_FOR_RX_DATA, TRF7970A_ST_WAIT_FOR_RX_DATA_CONT, TRF7970A_ST_WAIT_TO_ISSUE_EOF, TRF7970A_ST_LISTENING, TRF7970A_ST_LISTENING_MD, TRF7970A_ST_MAX }; struct trf7970a { enum trf7970a_state state; struct device *dev; struct spi_device *spi; struct regulator *regulator; struct nfc_digital_dev *ddev; u32 quirks; bool is_initiator; bool aborting; struct sk_buff *tx_skb; struct sk_buff *rx_skb; nfc_digital_cmd_complete_t cb; void *cb_arg; u8 chip_status_ctrl; u8 iso_ctrl; u8 iso_ctrl_tech; u8 modulator_sys_clk_ctrl; u8 special_fcn_reg1; u8 io_ctrl; unsigned int guard_time; int technology; int framing; u8 md_rf_tech; u8 tx_cmd; bool issue_eof; struct gpio_desc *en_gpiod; struct gpio_desc *en2_gpiod; struct mutex lock; unsigned int timeout; bool ignore_timeout; struct delayed_work timeout_work; }; static int trf7970a_cmd(struct trf7970a *trf, u8 opcode) { u8 cmd = TRF7970A_CMD_BIT_CTRL | TRF7970A_CMD_BIT_OPCODE(opcode); int ret; dev_dbg(trf->dev, "cmd: 0x%x\n", cmd); ret = spi_write(trf->spi, &cmd, 1); if (ret) dev_err(trf->dev, "%s - cmd: 0x%x, ret: %d\n", __func__, cmd, ret); return ret; } static int trf7970a_read(struct trf7970a *trf, u8 reg, u8 *val) { u8 addr = TRF7970A_CMD_BIT_RW | reg; int ret; ret = spi_write_then_read(trf->spi, &addr, 1, val, 1); if (ret) dev_err(trf->dev, "%s - addr: 0x%x, ret: %d\n", __func__, addr, ret); dev_dbg(trf->dev, "read(0x%x): 0x%x\n", addr, *val); return ret; } static int trf7970a_read_cont(struct trf7970a *trf, u8 reg, u8 *buf, size_t len) { u8 addr = reg | TRF7970A_CMD_BIT_RW | TRF7970A_CMD_BIT_CONTINUOUS; struct spi_transfer t[2]; struct spi_message m; int ret; dev_dbg(trf->dev, "read_cont(0x%x, %zd)\n", addr, len); spi_message_init(&m); memset(&t, 0, sizeof(t)); t[0].tx_buf = &addr; t[0].len = sizeof(addr); spi_message_add_tail(&t[0], &m); t[1].rx_buf = buf; t[1].len = len; spi_message_add_tail(&t[1], &m); ret = spi_sync(trf->spi, &m); if (ret) dev_err(trf->dev, "%s - addr: 0x%x, ret: %d\n", __func__, addr, ret); return ret; } static int trf7970a_write(struct trf7970a *trf, u8 reg, u8 val) { u8 buf[2] = { reg, val }; int ret; dev_dbg(trf->dev, "write(0x%x): 0x%x\n", reg, val); ret = spi_write(trf->spi, buf, 2); if (ret) dev_err(trf->dev, "%s - write: 0x%x 0x%x, ret: %d\n", __func__, buf[0], buf[1], ret); return ret; } static int trf7970a_read_irqstatus(struct trf7970a *trf, u8 *status) { int ret; u8 buf[2]; u8 addr; addr = TRF7970A_IRQ_STATUS | TRF7970A_CMD_BIT_RW; if (trf->quirks & TRF7970A_QUIRK_IRQ_STATUS_READ) { addr |= TRF7970A_CMD_BIT_CONTINUOUS; ret = spi_write_then_read(trf->spi, &addr, 1, buf, 2); } else { ret = spi_write_then_read(trf->spi, &addr, 1, buf, 1); } if (ret) dev_err(trf->dev, "%s - irqstatus: Status read failed: %d\n", __func__, ret); else *status = buf[0]; return ret; } static int trf7970a_read_target_proto(struct trf7970a *trf, u8 *target_proto) { int ret; u8 buf[2]; u8 addr; addr = TRF79070A_NFC_TARGET_PROTOCOL | TRF7970A_CMD_BIT_RW | TRF7970A_CMD_BIT_CONTINUOUS; ret = spi_write_then_read(trf->spi, &addr, 1, buf, 2); if (ret) dev_err(trf->dev, "%s - target_proto: Read failed: %d\n", __func__, ret); else *target_proto = buf[0]; return ret; } static int trf7970a_mode_detect(struct trf7970a *trf, u8 *rf_tech) { int ret; u8 target_proto, tech; ret = trf7970a_read_target_proto(trf, &target_proto); if (ret) return ret; switch (target_proto) { case TRF79070A_NFC_TARGET_PROTOCOL_106A: tech = NFC_DIGITAL_RF_TECH_106A; break; case TRF79070A_NFC_TARGET_PROTOCOL_106B: tech = NFC_DIGITAL_RF_TECH_106B; break; case TRF79070A_NFC_TARGET_PROTOCOL_212F: tech = NFC_DIGITAL_RF_TECH_212F; break; case TRF79070A_NFC_TARGET_PROTOCOL_424F: tech = NFC_DIGITAL_RF_TECH_424F; break; default: dev_dbg(trf->dev, "%s - mode_detect: target_proto: 0x%x\n", __func__, target_proto); return -EIO; } *rf_tech = tech; return ret; } static void trf7970a_send_upstream(struct trf7970a *trf) { dev_kfree_skb_any(trf->tx_skb); trf->tx_skb = NULL; if (trf->rx_skb && !IS_ERR(trf->rx_skb) && !trf->aborting) print_hex_dump_debug("trf7970a rx data: ", DUMP_PREFIX_NONE, 16, 1, trf->rx_skb->data, trf->rx_skb->len, false); trf->state = TRF7970A_ST_IDLE; if (trf->aborting) { dev_dbg(trf->dev, "Abort process complete\n"); if (!IS_ERR(trf->rx_skb)) { kfree_skb(trf->rx_skb); trf->rx_skb = ERR_PTR(-ECANCELED); } trf->aborting = false; } trf->cb(trf->ddev, trf->cb_arg, trf->rx_skb); trf->rx_skb = NULL; } static void trf7970a_send_err_upstream(struct trf7970a *trf, int errno) { dev_dbg(trf->dev, "Error - state: %d, errno: %d\n", trf->state, errno); cancel_delayed_work(&trf->timeout_work); kfree_skb(trf->rx_skb); trf->rx_skb = ERR_PTR(errno); trf7970a_send_upstream(trf); } static int trf7970a_transmit(struct trf7970a *trf, struct sk_buff *skb, unsigned int len, const u8 *prefix, unsigned int prefix_len) { struct spi_transfer t[2]; struct spi_message m; unsigned int timeout; int ret; print_hex_dump_debug("trf7970a tx data: ", DUMP_PREFIX_NONE, 16, 1, skb->data, len, false); spi_message_init(&m); memset(&t, 0, sizeof(t)); t[0].tx_buf = prefix; t[0].len = prefix_len; spi_message_add_tail(&t[0], &m); t[1].tx_buf = skb->data; t[1].len = len; spi_message_add_tail(&t[1], &m); ret = spi_sync(trf->spi, &m); if (ret) { dev_err(trf->dev, "%s - Can't send tx data: %d\n", __func__, ret); return ret; } skb_pull(skb, len); if (skb->len > 0) { trf->state = TRF7970A_ST_WAIT_FOR_TX_FIFO; timeout = TRF7970A_WAIT_FOR_FIFO_DRAIN_TIMEOUT; } else { if (trf->issue_eof) { trf->state = TRF7970A_ST_WAIT_TO_ISSUE_EOF; timeout = TRF7970A_WAIT_TO_ISSUE_ISO15693_EOF; } else { trf->state = TRF7970A_ST_WAIT_FOR_RX_DATA; if (!trf->timeout) timeout = TRF7970A_WAIT_FOR_TX_IRQ; else timeout = trf->timeout; } } dev_dbg(trf->dev, "Setting timeout for %d ms, state: %d\n", timeout, trf->state); schedule_delayed_work(&trf->timeout_work, msecs_to_jiffies(timeout)); return 0; } static void trf7970a_fill_fifo(struct trf7970a *trf) { struct sk_buff *skb = trf->tx_skb; unsigned int len; int ret; u8 fifo_bytes; u8 prefix; ret = trf7970a_read(trf, TRF7970A_FIFO_STATUS, &fifo_bytes); if (ret) { trf7970a_send_err_upstream(trf, ret); return; } dev_dbg(trf->dev, "Filling FIFO - fifo_bytes: 0x%x\n", fifo_bytes); fifo_bytes &= ~TRF7970A_FIFO_STATUS_OVERFLOW; /* Calculate how much more data can be written to the fifo */ len = TRF7970A_FIFO_SIZE - fifo_bytes; if (!len) { schedule_delayed_work(&trf->timeout_work, msecs_to_jiffies(TRF7970A_WAIT_FOR_FIFO_DRAIN_TIMEOUT)); return; } len = min(skb->len, len); prefix = TRF7970A_CMD_BIT_CONTINUOUS | TRF7970A_FIFO_IO_REGISTER; ret = trf7970a_transmit(trf, skb, len, &prefix, sizeof(prefix)); if (ret) trf7970a_send_err_upstream(trf, ret); } static void trf7970a_drain_fifo(struct trf7970a *trf, u8 status) { struct sk_buff *skb = trf->rx_skb; int ret; u8 fifo_bytes; if (status & TRF7970A_IRQ_STATUS_ERROR) { trf7970a_send_err_upstream(trf, -EIO); return; } ret = trf7970a_read(trf, TRF7970A_FIFO_STATUS, &fifo_bytes); if (ret) { trf7970a_send_err_upstream(trf, ret); return; } dev_dbg(trf->dev, "Draining FIFO - fifo_bytes: 0x%x\n", fifo_bytes); fifo_bytes &= ~TRF7970A_FIFO_STATUS_OVERFLOW; if (!fifo_bytes) goto no_rx_data; if (fifo_bytes > skb_tailroom(skb)) { skb = skb_copy_expand(skb, skb_headroom(skb), max_t(int, fifo_bytes, TRF7970A_RX_SKB_ALLOC_SIZE), GFP_KERNEL); if (!skb) { trf7970a_send_err_upstream(trf, -ENOMEM); return; } kfree_skb(trf->rx_skb); trf->rx_skb = skb; } ret = trf7970a_read_cont(trf, TRF7970A_FIFO_IO_REGISTER, skb_put(skb, fifo_bytes), fifo_bytes); if (ret) { trf7970a_send_err_upstream(trf, ret); return; } /* If received Type 2 ACK/NACK, shift right 4 bits and pass up */ if ((trf->framing == NFC_DIGITAL_FRAMING_NFCA_T2T) && (skb->len == 1) && (trf->special_fcn_reg1 == TRF7970A_SPECIAL_FCN_REG1_4_BIT_RX)) { skb->data[0] >>= 4; status = TRF7970A_IRQ_STATUS_SRX; } else { trf->state = TRF7970A_ST_WAIT_FOR_RX_DATA_CONT; ret = trf7970a_read(trf, TRF7970A_FIFO_STATUS, &fifo_bytes); if (ret) { trf7970a_send_err_upstream(trf, ret); return; } fifo_bytes &= ~TRF7970A_FIFO_STATUS_OVERFLOW; /* If there are bytes in the FIFO, set status to '0' so * the if stmt below doesn't fire and the driver will wait * for the trf7970a to generate another RX interrupt. */ if (fifo_bytes) status = 0; } no_rx_data: if (status == TRF7970A_IRQ_STATUS_SRX) { /* Receive complete */ trf7970a_send_upstream(trf); return; } dev_dbg(trf->dev, "Setting timeout for %d ms\n", TRF7970A_WAIT_FOR_RX_DATA_TIMEOUT); schedule_delayed_work(&trf->timeout_work, msecs_to_jiffies(TRF7970A_WAIT_FOR_RX_DATA_TIMEOUT)); } static irqreturn_t trf7970a_irq(int irq, void *dev_id) { struct trf7970a *trf = dev_id; int ret; u8 status, fifo_bytes, iso_ctrl; mutex_lock(&trf->lock); if (trf->state == TRF7970A_ST_RF_OFF) { mutex_unlock(&trf->lock); return IRQ_NONE; } ret = trf7970a_read_irqstatus(trf, &status); if (ret) { mutex_unlock(&trf->lock); return IRQ_NONE; } dev_dbg(trf->dev, "IRQ - state: %d, status: 0x%x\n", trf->state, status); if (!status) { mutex_unlock(&trf->lock); return IRQ_NONE; } switch (trf->state) { case TRF7970A_ST_IDLE: case TRF7970A_ST_IDLE_RX_BLOCKED: /* If initiator and getting interrupts caused by RF noise, * turn off the receiver to avoid unnecessary interrupts. * It will be turned back on in trf7970a_send_cmd() when * the next command is issued. */ if (trf->is_initiator && (status & TRF7970A_IRQ_STATUS_ERROR)) { trf7970a_cmd(trf, TRF7970A_CMD_BLOCK_RX); trf->state = TRF7970A_ST_IDLE_RX_BLOCKED; } trf7970a_cmd(trf, TRF7970A_CMD_FIFO_RESET); break; case TRF7970A_ST_WAIT_FOR_TX_FIFO: if (status & TRF7970A_IRQ_STATUS_TX) { trf->ignore_timeout = !cancel_delayed_work(&trf->timeout_work); trf7970a_fill_fifo(trf); } else { trf7970a_send_err_upstream(trf, -EIO); } break; case TRF7970A_ST_WAIT_FOR_RX_DATA: case TRF7970A_ST_WAIT_FOR_RX_DATA_CONT: if (status & TRF7970A_IRQ_STATUS_SRX) { trf->ignore_timeout = !cancel_delayed_work(&trf->timeout_work); trf7970a_drain_fifo(trf, status); } else if (status & TRF7970A_IRQ_STATUS_FIFO) { ret = trf7970a_read(trf, TRF7970A_FIFO_STATUS, &fifo_bytes); fifo_bytes &= ~TRF7970A_FIFO_STATUS_OVERFLOW; if (ret) trf7970a_send_err_upstream(trf, ret); else if (!fifo_bytes) trf7970a_cmd(trf, TRF7970A_CMD_FIFO_RESET); } else if ((status == TRF7970A_IRQ_STATUS_TX) || (!trf->is_initiator && (status == (TRF7970A_IRQ_STATUS_TX | TRF7970A_IRQ_STATUS_NFC_RF)))) { trf7970a_cmd(trf, TRF7970A_CMD_FIFO_RESET); if (!trf->timeout) { trf->ignore_timeout = !cancel_delayed_work(&trf->timeout_work); trf->rx_skb = ERR_PTR(0); trf7970a_send_upstream(trf); break; } if (trf->is_initiator) break; iso_ctrl = trf->iso_ctrl; switch (trf->framing) { case NFC_DIGITAL_FRAMING_NFCA_STANDARD: trf->tx_cmd = TRF7970A_CMD_TRANSMIT_NO_CRC; iso_ctrl |= TRF7970A_ISO_CTRL_RX_CRC_N; trf->iso_ctrl = 0xff; /* Force ISO_CTRL write */ break; case NFC_DIGITAL_FRAMING_NFCA_STANDARD_WITH_CRC_A: trf->tx_cmd = TRF7970A_CMD_TRANSMIT; iso_ctrl &= ~TRF7970A_ISO_CTRL_RX_CRC_N; trf->iso_ctrl = 0xff; /* Force ISO_CTRL write */ break; case NFC_DIGITAL_FRAMING_NFCA_ANTICOL_COMPLETE: ret = trf7970a_write(trf, TRF7970A_SPECIAL_FCN_REG1, TRF7970A_SPECIAL_FCN_REG1_14_ANTICOLL); if (ret) goto err_unlock_exit; trf->special_fcn_reg1 = TRF7970A_SPECIAL_FCN_REG1_14_ANTICOLL; break; default: break; } if (iso_ctrl != trf->iso_ctrl) { ret = trf7970a_write(trf, TRF7970A_ISO_CTRL, iso_ctrl); if (ret) goto err_unlock_exit; trf->iso_ctrl = iso_ctrl; } } else { trf7970a_send_err_upstream(trf, -EIO); } break; case TRF7970A_ST_WAIT_TO_ISSUE_EOF: if (status != TRF7970A_IRQ_STATUS_TX) trf7970a_send_err_upstream(trf, -EIO); break; case TRF7970A_ST_LISTENING: if (status & TRF7970A_IRQ_STATUS_SRX) { trf->ignore_timeout = !cancel_delayed_work(&trf->timeout_work); trf7970a_drain_fifo(trf, status); } else if (!(status & TRF7970A_IRQ_STATUS_NFC_RF)) { trf7970a_send_err_upstream(trf, -EIO); } break; case TRF7970A_ST_LISTENING_MD: if (status & TRF7970A_IRQ_STATUS_SRX) { trf->ignore_timeout = !cancel_delayed_work(&trf->timeout_work); ret = trf7970a_mode_detect(trf, &trf->md_rf_tech); if (ret) { trf7970a_send_err_upstream(trf, ret); } else { trf->state = TRF7970A_ST_LISTENING; trf7970a_drain_fifo(trf, status); } } else if (!(status & TRF7970A_IRQ_STATUS_NFC_RF)) { trf7970a_send_err_upstream(trf, -EIO); } break; default: dev_err(trf->dev, "%s - Driver in invalid state: %d\n", __func__, trf->state); } err_unlock_exit: mutex_unlock(&trf->lock); return IRQ_HANDLED; } static void trf7970a_issue_eof(struct trf7970a *trf) { int ret; dev_dbg(trf->dev, "Issuing EOF\n"); ret = trf7970a_cmd(trf, TRF7970A_CMD_FIFO_RESET); if (ret) trf7970a_send_err_upstream(trf, ret); ret = trf7970a_cmd(trf, TRF7970A_CMD_EOF); if (ret) trf7970a_send_err_upstream(trf, ret); trf->state = TRF7970A_ST_WAIT_FOR_RX_DATA; dev_dbg(trf->dev, "Setting timeout for %d ms, state: %d\n", trf->timeout, trf->state); schedule_delayed_work(&trf->timeout_work, msecs_to_jiffies(trf->timeout)); } static void trf7970a_timeout_work_handler(struct work_struct *work) { struct trf7970a *trf = container_of(work, struct trf7970a, timeout_work.work); dev_dbg(trf->dev, "Timeout - state: %d, ignore_timeout: %d\n", trf->state, trf->ignore_timeout); mutex_lock(&trf->lock); if (trf->ignore_timeout) trf->ignore_timeout = false; else if (trf->state == TRF7970A_ST_WAIT_FOR_RX_DATA_CONT) trf7970a_drain_fifo(trf, TRF7970A_IRQ_STATUS_SRX); else if (trf->state == TRF7970A_ST_WAIT_TO_ISSUE_EOF) trf7970a_issue_eof(trf); else trf7970a_send_err_upstream(trf, -ETIMEDOUT); mutex_unlock(&trf->lock); } static int trf7970a_init(struct trf7970a *trf) { int ret; dev_dbg(trf->dev, "Initializing device - state: %d\n", trf->state); ret = trf7970a_cmd(trf, TRF7970A_CMD_SOFT_INIT); if (ret) goto err_out; ret = trf7970a_cmd(trf, TRF7970A_CMD_IDLE); if (ret) goto err_out; ret = trf7970a_write(trf, TRF7970A_REG_IO_CTRL, trf->io_ctrl | TRF7970A_REG_IO_CTRL_VRS(0x1)); if (ret) goto err_out; ret = trf7970a_write(trf, TRF7970A_NFC_TARGET_LEVEL, 0); if (ret) goto err_out; usleep_range(1000, 2000); trf->chip_status_ctrl &= ~TRF7970A_CHIP_STATUS_RF_ON; ret = trf7970a_write(trf, TRF7970A_MODULATOR_SYS_CLK_CTRL, trf->modulator_sys_clk_ctrl); if (ret) goto err_out; ret = trf7970a_write(trf, TRF7970A_ADJUTABLE_FIFO_IRQ_LEVELS, TRF7970A_ADJUTABLE_FIFO_IRQ_LEVELS_WLH_96 | TRF7970A_ADJUTABLE_FIFO_IRQ_LEVELS_WLL_32); if (ret) goto err_out; ret = trf7970a_write(trf, TRF7970A_SPECIAL_FCN_REG1, 0); if (ret) goto err_out; trf->special_fcn_reg1 = 0; trf->iso_ctrl = 0xff; return 0; err_out: dev_dbg(trf->dev, "Couldn't init device: %d\n", ret); return ret; } static void trf7970a_switch_rf_off(struct trf7970a *trf) { if ((trf->state == TRF7970A_ST_PWR_OFF) || (trf->state == TRF7970A_ST_RF_OFF)) return; dev_dbg(trf->dev, "Switching rf off\n"); trf->chip_status_ctrl &= ~TRF7970A_CHIP_STATUS_RF_ON; trf7970a_write(trf, TRF7970A_CHIP_STATUS_CTRL, trf->chip_status_ctrl); trf->aborting = false; trf->state = TRF7970A_ST_RF_OFF; pm_runtime_mark_last_busy(trf->dev); pm_runtime_put_autosuspend(trf->dev); } static int trf7970a_switch_rf_on(struct trf7970a *trf) { int ret; dev_dbg(trf->dev, "Switching rf on\n"); pm_runtime_get_sync(trf->dev); if (trf->state != TRF7970A_ST_RF_OFF) { /* Power on, RF off */ dev_err(trf->dev, "%s - Incorrect state: %d\n", __func__, trf->state); return -EINVAL; } ret = trf7970a_init(trf); if (ret) { dev_err(trf->dev, "%s - Can't initialize: %d\n", __func__, ret); return ret; } trf->state = TRF7970A_ST_IDLE; return 0; } static int trf7970a_switch_rf(struct nfc_digital_dev *ddev, bool on) { struct trf7970a *trf = nfc_digital_get_drvdata(ddev); int ret = 0; dev_dbg(trf->dev, "Switching RF - state: %d, on: %d\n", trf->state, on); mutex_lock(&trf->lock); if (on) { switch (trf->state) { case TRF7970A_ST_PWR_OFF: case TRF7970A_ST_RF_OFF: ret = trf7970a_switch_rf_on(trf); break; case TRF7970A_ST_IDLE: case TRF7970A_ST_IDLE_RX_BLOCKED: break; default: dev_err(trf->dev, "%s - Invalid request: %d %d\n", __func__, trf->state, on); trf7970a_switch_rf_off(trf); ret = -EINVAL; } } else { switch (trf->state) { case TRF7970A_ST_PWR_OFF: case TRF7970A_ST_RF_OFF: break; default: dev_err(trf->dev, "%s - Invalid request: %d %d\n", __func__, trf->state, on); ret = -EINVAL; fallthrough; case TRF7970A_ST_IDLE: case TRF7970A_ST_IDLE_RX_BLOCKED: case TRF7970A_ST_WAIT_FOR_RX_DATA: case TRF7970A_ST_WAIT_FOR_RX_DATA_CONT: trf7970a_switch_rf_off(trf); } } mutex_unlock(&trf->lock); return ret; } static int trf7970a_in_config_rf_tech(struct trf7970a *trf, int tech) { int ret = 0; dev_dbg(trf->dev, "rf technology: %d\n", tech); switch (tech) { case NFC_DIGITAL_RF_TECH_106A: trf->iso_ctrl_tech = TRF7970A_ISO_CTRL_14443A_106; trf->modulator_sys_clk_ctrl = (trf->modulator_sys_clk_ctrl & 0xf8) | TRF7970A_MODULATOR_DEPTH_OOK; trf->guard_time = TRF7970A_GUARD_TIME_NFCA; break; case NFC_DIGITAL_RF_TECH_106B: trf->iso_ctrl_tech = TRF7970A_ISO_CTRL_14443B_106; trf->modulator_sys_clk_ctrl = (trf->modulator_sys_clk_ctrl & 0xf8) | TRF7970A_MODULATOR_DEPTH_ASK10; trf->guard_time = TRF7970A_GUARD_TIME_NFCB; break; case NFC_DIGITAL_RF_TECH_212F: trf->iso_ctrl_tech = TRF7970A_ISO_CTRL_FELICA_212; trf->modulator_sys_clk_ctrl = (trf->modulator_sys_clk_ctrl & 0xf8) | TRF7970A_MODULATOR_DEPTH_ASK10; trf->guard_time = TRF7970A_GUARD_TIME_NFCF; break; case NFC_DIGITAL_RF_TECH_424F: trf->iso_ctrl_tech = TRF7970A_ISO_CTRL_FELICA_424; trf->modulator_sys_clk_ctrl = (trf->modulator_sys_clk_ctrl & 0xf8) | TRF7970A_MODULATOR_DEPTH_ASK10; trf->guard_time = TRF7970A_GUARD_TIME_NFCF; break; case NFC_DIGITAL_RF_TECH_ISO15693: trf->iso_ctrl_tech = TRF7970A_ISO_CTRL_15693_SGL_1OF4_2648; trf->modulator_sys_clk_ctrl = (trf->modulator_sys_clk_ctrl & 0xf8) | TRF7970A_MODULATOR_DEPTH_OOK; trf->guard_time = TRF7970A_GUARD_TIME_15693; break; default: dev_dbg(trf->dev, "Unsupported rf technology: %d\n", tech); return -EINVAL; } trf->technology = tech; /* If in initiator mode and not changing the RF tech due to a * PSL sequence (indicated by 'trf->iso_ctrl == 0xff' from * trf7970a_init()), clear the NFC Target Detection Level register * due to erratum. */ if (trf->iso_ctrl == 0xff) ret = trf7970a_write(trf, TRF7970A_NFC_TARGET_LEVEL, 0); return ret; } static int trf7970a_is_rf_field(struct trf7970a *trf, bool *is_rf_field) { int ret; u8 rssi; ret = trf7970a_write(trf, TRF7970A_CHIP_STATUS_CTRL, trf->chip_status_ctrl | TRF7970A_CHIP_STATUS_REC_ON); if (ret) return ret; ret = trf7970a_cmd(trf, TRF7970A_CMD_TEST_EXT_RF); if (ret) return ret; usleep_range(50, 60); ret = trf7970a_read(trf, TRF7970A_RSSI_OSC_STATUS, &rssi); if (ret) return ret; ret = trf7970a_write(trf, TRF7970A_CHIP_STATUS_CTRL, trf->chip_status_ctrl); if (ret) return ret; if (rssi & TRF7970A_RSSI_OSC_STATUS_RSSI_MASK) *is_rf_field = true; else *is_rf_field = false; return 0; } static int trf7970a_in_config_framing(struct trf7970a *trf, int framing) { u8 iso_ctrl = trf->iso_ctrl_tech; bool is_rf_field = false; int ret; dev_dbg(trf->dev, "framing: %d\n", framing); switch (framing) { case NFC_DIGITAL_FRAMING_NFCA_SHORT: case NFC_DIGITAL_FRAMING_NFCA_STANDARD: trf->tx_cmd = TRF7970A_CMD_TRANSMIT_NO_CRC; iso_ctrl |= TRF7970A_ISO_CTRL_RX_CRC_N; break; case NFC_DIGITAL_FRAMING_NFCA_STANDARD_WITH_CRC_A: case NFC_DIGITAL_FRAMING_NFCA_T4T: case NFC_DIGITAL_FRAMING_NFCB: case NFC_DIGITAL_FRAMING_NFCB_T4T: case NFC_DIGITAL_FRAMING_NFCF: case NFC_DIGITAL_FRAMING_NFCF_T3T: case NFC_DIGITAL_FRAMING_ISO15693_INVENTORY: case NFC_DIGITAL_FRAMING_ISO15693_T5T: case NFC_DIGITAL_FRAMING_NFCA_NFC_DEP: case NFC_DIGITAL_FRAMING_NFCF_NFC_DEP: trf->tx_cmd = TRF7970A_CMD_TRANSMIT; iso_ctrl &= ~TRF7970A_ISO_CTRL_RX_CRC_N; break; case NFC_DIGITAL_FRAMING_NFCA_T2T: trf->tx_cmd = TRF7970A_CMD_TRANSMIT; iso_ctrl |= TRF7970A_ISO_CTRL_RX_CRC_N; break; default: dev_dbg(trf->dev, "Unsupported Framing: %d\n", framing); return -EINVAL; } trf->framing = framing; if (!(trf->chip_status_ctrl & TRF7970A_CHIP_STATUS_RF_ON)) { ret = trf7970a_is_rf_field(trf, &is_rf_field); if (ret) return ret; if (is_rf_field) return -EBUSY; } if (iso_ctrl != trf->iso_ctrl) { ret = trf7970a_write(trf, TRF7970A_ISO_CTRL, iso_ctrl); if (ret) return ret; trf->iso_ctrl = iso_ctrl; ret = trf7970a_write(trf, TRF7970A_MODULATOR_SYS_CLK_CTRL, trf->modulator_sys_clk_ctrl); if (ret) return ret; } if (!(trf->chip_status_ctrl & TRF7970A_CHIP_STATUS_RF_ON)) { ret = trf7970a_write(trf, TRF7970A_CHIP_STATUS_CTRL, trf->chip_status_ctrl | TRF7970A_CHIP_STATUS_RF_ON); if (ret) return ret; trf->chip_status_ctrl |= TRF7970A_CHIP_STATUS_RF_ON; usleep_range(trf->guard_time, trf->guard_time + 1000); } return 0; } static int trf7970a_in_configure_hw(struct nfc_digital_dev *ddev, int type, int param) { struct trf7970a *trf = nfc_digital_get_drvdata(ddev); int ret; dev_dbg(trf->dev, "Configure hw - type: %d, param: %d\n", type, param); mutex_lock(&trf->lock); trf->is_initiator = true; if ((trf->state == TRF7970A_ST_PWR_OFF) || (trf->state == TRF7970A_ST_RF_OFF)) { ret = trf7970a_switch_rf_on(trf); if (ret) goto err_unlock; } switch (type) { case NFC_DIGITAL_CONFIG_RF_TECH: ret = trf7970a_in_config_rf_tech(trf, param); break; case NFC_DIGITAL_CONFIG_FRAMING: ret = trf7970a_in_config_framing(trf, param); break; default: dev_dbg(trf->dev, "Unknown type: %d\n", type); ret = -EINVAL; } err_unlock: mutex_unlock(&trf->lock); return ret; } static int trf7970a_is_iso15693_write_or_lock(u8 cmd) { switch (cmd) { case ISO15693_CMD_WRITE_SINGLE_BLOCK: case ISO15693_CMD_LOCK_BLOCK: case ISO15693_CMD_WRITE_MULTIPLE_BLOCK: case ISO15693_CMD_WRITE_AFI: case ISO15693_CMD_LOCK_AFI: case ISO15693_CMD_WRITE_DSFID: case ISO15693_CMD_LOCK_DSFID: return 1; default: return 0; } } static int trf7970a_per_cmd_config(struct trf7970a *trf, const struct sk_buff *skb) { const u8 *req = skb->data; u8 special_fcn_reg1, iso_ctrl; int ret; trf->issue_eof = false; /* When issuing Type 2 read command, make sure the '4_bit_RX' bit in * special functions register 1 is cleared; otherwise, its a write or * sector select command and '4_bit_RX' must be set. * * When issuing an ISO 15693 command, inspect the flags byte to see * what speed to use. Also, remember if the OPTION flag is set on * a Type 5 write or lock command so the driver will know that it * has to send an EOF in order to get a response. */ if ((trf->technology == NFC_DIGITAL_RF_TECH_106A) && (trf->framing == NFC_DIGITAL_FRAMING_NFCA_T2T)) { if (req[0] == NFC_T2T_CMD_READ) special_fcn_reg1 = 0; else special_fcn_reg1 = TRF7970A_SPECIAL_FCN_REG1_4_BIT_RX; if (special_fcn_reg1 != trf->special_fcn_reg1) { ret = trf7970a_write(trf, TRF7970A_SPECIAL_FCN_REG1, special_fcn_reg1); if (ret) return ret; trf->special_fcn_reg1 = special_fcn_reg1; } } else if (trf->technology == NFC_DIGITAL_RF_TECH_ISO15693) { iso_ctrl = trf->iso_ctrl & ~TRF7970A_ISO_CTRL_RFID_SPEED_MASK; switch (req[0] & ISO15693_REQ_FLAG_SPEED_MASK) { case 0x00: iso_ctrl |= TRF7970A_ISO_CTRL_15693_SGL_1OF4_662; break; case ISO15693_REQ_FLAG_SUB_CARRIER: iso_ctrl |= TRF7970A_ISO_CTRL_15693_DBL_1OF4_667a; break; case ISO15693_REQ_FLAG_DATA_RATE: iso_ctrl |= TRF7970A_ISO_CTRL_15693_SGL_1OF4_2648; break; case (ISO15693_REQ_FLAG_SUB_CARRIER | ISO15693_REQ_FLAG_DATA_RATE): iso_ctrl |= TRF7970A_ISO_CTRL_15693_DBL_1OF4_2669; break; } if (iso_ctrl != trf->iso_ctrl) { ret = trf7970a_write(trf, TRF7970A_ISO_CTRL, iso_ctrl); if (ret) return ret; trf->iso_ctrl = iso_ctrl; } if ((trf->framing == NFC_DIGITAL_FRAMING_ISO15693_T5T) && trf7970a_is_iso15693_write_or_lock(req[1]) && (req[0] & ISO15693_REQ_FLAG_OPTION)) trf->issue_eof = true; } return 0; } static int trf7970a_send_cmd(struct nfc_digital_dev *ddev, struct sk_buff *skb, u16 timeout, nfc_digital_cmd_complete_t cb, void *arg) { struct trf7970a *trf = nfc_digital_get_drvdata(ddev); u8 prefix[5]; unsigned int len; int ret; u8 status; dev_dbg(trf->dev, "New request - state: %d, timeout: %d ms, len: %d\n", trf->state, timeout, skb->len); if (skb->len > TRF7970A_TX_MAX) return -EINVAL; mutex_lock(&trf->lock); if ((trf->state != TRF7970A_ST_IDLE) && (trf->state != TRF7970A_ST_IDLE_RX_BLOCKED)) { dev_err(trf->dev, "%s - Bogus state: %d\n", __func__, trf->state); ret = -EIO; goto out_err; } if (trf->aborting) { dev_dbg(trf->dev, "Abort process complete\n"); trf->aborting = false; ret = -ECANCELED; goto out_err; } if (timeout) { trf->rx_skb = nfc_alloc_recv_skb(TRF7970A_RX_SKB_ALLOC_SIZE, GFP_KERNEL); if (!trf->rx_skb) { dev_dbg(trf->dev, "Can't alloc rx_skb\n"); ret = -ENOMEM; goto out_err; } } if (trf->state == TRF7970A_ST_IDLE_RX_BLOCKED) { ret = trf7970a_cmd(trf, TRF7970A_CMD_ENABLE_RX); if (ret) goto out_err; trf->state = TRF7970A_ST_IDLE; } if (trf->is_initiator) { ret = trf7970a_per_cmd_config(trf, skb); if (ret) goto out_err; } trf->ddev = ddev; trf->tx_skb = skb; trf->cb = cb; trf->cb_arg = arg; trf->timeout = timeout; trf->ignore_timeout = false; len = skb->len; /* TX data must be prefixed with a FIFO reset cmd, a cmd that depends * on what the current framing is, the address of the TX length byte 1 * register (0x1d), and the 2 byte length of the data to be transmitted. * That totals 5 bytes. */ prefix[0] = TRF7970A_CMD_BIT_CTRL | TRF7970A_CMD_BIT_OPCODE(TRF7970A_CMD_FIFO_RESET); prefix[1] = TRF7970A_CMD_BIT_CTRL | TRF7970A_CMD_BIT_OPCODE(trf->tx_cmd); prefix[2] = TRF7970A_CMD_BIT_CONTINUOUS | TRF7970A_TX_LENGTH_BYTE1; if (trf->framing == NFC_DIGITAL_FRAMING_NFCA_SHORT) { prefix[3] = 0x00; prefix[4] = 0x0f; /* 7 bits */ } else { prefix[3] = (len & 0xf00) >> 4; prefix[3] |= ((len & 0xf0) >> 4); prefix[4] = ((len & 0x0f) << 4); } len = min_t(int, skb->len, TRF7970A_FIFO_SIZE); /* Clear possible spurious interrupt */ ret = trf7970a_read_irqstatus(trf, &status); if (ret) goto out_err; ret = trf7970a_transmit(trf, skb, len, prefix, sizeof(prefix)); if (ret) { kfree_skb(trf->rx_skb); trf->rx_skb = NULL; } out_err: mutex_unlock(&trf->lock); return ret; } static int trf7970a_tg_config_rf_tech(struct trf7970a *trf, int tech) { int ret = 0; dev_dbg(trf->dev, "rf technology: %d\n", tech); switch (tech) { case NFC_DIGITAL_RF_TECH_106A: trf->iso_ctrl_tech = TRF7970A_ISO_CTRL_NFC_NFC_CE_MODE | TRF7970A_ISO_CTRL_NFC_CE | TRF7970A_ISO_CTRL_NFC_CE_14443A; trf->modulator_sys_clk_ctrl = (trf->modulator_sys_clk_ctrl & 0xf8) | TRF7970A_MODULATOR_DEPTH_OOK; break; case NFC_DIGITAL_RF_TECH_212F: trf->iso_ctrl_tech = TRF7970A_ISO_CTRL_NFC_NFC_CE_MODE | TRF7970A_ISO_CTRL_NFC_NFCF_212; trf->modulator_sys_clk_ctrl = (trf->modulator_sys_clk_ctrl & 0xf8) | TRF7970A_MODULATOR_DEPTH_ASK10; break; case NFC_DIGITAL_RF_TECH_424F: trf->iso_ctrl_tech = TRF7970A_ISO_CTRL_NFC_NFC_CE_MODE | TRF7970A_ISO_CTRL_NFC_NFCF_424; trf->modulator_sys_clk_ctrl = (trf->modulator_sys_clk_ctrl & 0xf8) | TRF7970A_MODULATOR_DEPTH_ASK10; break; default: dev_dbg(trf->dev, "Unsupported rf technology: %d\n", tech); return -EINVAL; } trf->technology = tech; /* Normally we write the ISO_CTRL register in * trf7970a_tg_config_framing() because the framing can change * the value written. However, when sending a PSL RES, * digital_tg_send_psl_res_complete() doesn't call * trf7970a_tg_config_framing() so we must write the register * here. */ if ((trf->framing == NFC_DIGITAL_FRAMING_NFC_DEP_ACTIVATED) && (trf->iso_ctrl_tech != trf->iso_ctrl)) { ret = trf7970a_write(trf, TRF7970A_ISO_CTRL, trf->iso_ctrl_tech); trf->iso_ctrl = trf->iso_ctrl_tech; } return ret; } /* Since this is a target routine, several of the framing calls are * made between receiving the request and sending the response so they * should take effect until after the response is sent. This is accomplished * by skipping the ISO_CTRL register write here and doing it in the interrupt * handler. */ static int trf7970a_tg_config_framing(struct trf7970a *trf, int framing) { u8 iso_ctrl = trf->iso_ctrl_tech; int ret; dev_dbg(trf->dev, "framing: %d\n", framing); switch (framing) { case NFC_DIGITAL_FRAMING_NFCA_NFC_DEP: trf->tx_cmd = TRF7970A_CMD_TRANSMIT_NO_CRC; iso_ctrl |= TRF7970A_ISO_CTRL_RX_CRC_N; break; case NFC_DIGITAL_FRAMING_NFCA_STANDARD: case NFC_DIGITAL_FRAMING_NFCA_STANDARD_WITH_CRC_A: case NFC_DIGITAL_FRAMING_NFCA_ANTICOL_COMPLETE: /* These ones are applied in the interrupt handler */ iso_ctrl = trf->iso_ctrl; /* Don't write to ISO_CTRL yet */ break; case NFC_DIGITAL_FRAMING_NFCF_NFC_DEP: trf->tx_cmd = TRF7970A_CMD_TRANSMIT; iso_ctrl &= ~TRF7970A_ISO_CTRL_RX_CRC_N; break; case NFC_DIGITAL_FRAMING_NFC_DEP_ACTIVATED: trf->tx_cmd = TRF7970A_CMD_TRANSMIT; iso_ctrl &= ~TRF7970A_ISO_CTRL_RX_CRC_N; break; default: dev_dbg(trf->dev, "Unsupported Framing: %d\n", framing); return -EINVAL; } trf->framing = framing; if (iso_ctrl != trf->iso_ctrl) { ret = trf7970a_write(trf, TRF7970A_ISO_CTRL, iso_ctrl); if (ret) return ret; trf->iso_ctrl = iso_ctrl; ret = trf7970a_write(trf, TRF7970A_MODULATOR_SYS_CLK_CTRL, trf->modulator_sys_clk_ctrl); if (ret) return ret; } if (!(trf->chip_status_ctrl & TRF7970A_CHIP_STATUS_RF_ON)) { ret = trf7970a_write(trf, TRF7970A_CHIP_STATUS_CTRL, trf->chip_status_ctrl | TRF7970A_CHIP_STATUS_RF_ON); if (ret) return ret; trf->chip_status_ctrl |= TRF7970A_CHIP_STATUS_RF_ON; } return 0; } static int trf7970a_tg_configure_hw(struct nfc_digital_dev *ddev, int type, int param) { struct trf7970a *trf = nfc_digital_get_drvdata(ddev); int ret; dev_dbg(trf->dev, "Configure hw - type: %d, param: %d\n", type, param); mutex_lock(&trf->lock); trf->is_initiator = false; if ((trf->state == TRF7970A_ST_PWR_OFF) || (trf->state == TRF7970A_ST_RF_OFF)) { ret = trf7970a_switch_rf_on(trf); if (ret) goto err_unlock; } switch (type) { case NFC_DIGITAL_CONFIG_RF_TECH: ret = trf7970a_tg_config_rf_tech(trf, param); break; case NFC_DIGITAL_CONFIG_FRAMING: ret = trf7970a_tg_config_framing(trf, param); break; default: dev_dbg(trf->dev, "Unknown type: %d\n", type); ret = -EINVAL; } err_unlock: mutex_unlock(&trf->lock); return ret; } static int _trf7970a_tg_listen(struct nfc_digital_dev *ddev, u16 timeout, nfc_digital_cmd_complete_t cb, void *arg, bool mode_detect) { struct trf7970a *trf = nfc_digital_get_drvdata(ddev); int ret; mutex_lock(&trf->lock); if ((trf->state != TRF7970A_ST_IDLE) && (trf->state != TRF7970A_ST_IDLE_RX_BLOCKED)) { dev_err(trf->dev, "%s - Bogus state: %d\n", __func__, trf->state); ret = -EIO; goto out_err; } if (trf->aborting) { dev_dbg(trf->dev, "Abort process complete\n"); trf->aborting = false; ret = -ECANCELED; goto out_err; } trf->rx_skb = nfc_alloc_recv_skb(TRF7970A_RX_SKB_ALLOC_SIZE, GFP_KERNEL); if (!trf->rx_skb) { dev_dbg(trf->dev, "Can't alloc rx_skb\n"); ret = -ENOMEM; goto out_err; } ret = trf7970a_write(trf, TRF7970A_RX_SPECIAL_SETTINGS, TRF7970A_RX_SPECIAL_SETTINGS_HBT | TRF7970A_RX_SPECIAL_SETTINGS_M848 | TRF7970A_RX_SPECIAL_SETTINGS_C424 | TRF7970A_RX_SPECIAL_SETTINGS_C212); if (ret) goto out_err; ret = trf7970a_write(trf, TRF7970A_REG_IO_CTRL, trf->io_ctrl | TRF7970A_REG_IO_CTRL_VRS(0x1)); if (ret) goto out_err; ret = trf7970a_write(trf, TRF7970A_NFC_LOW_FIELD_LEVEL, TRF7970A_NFC_LOW_FIELD_LEVEL_RFDET(0x3)); if (ret) goto out_err; ret = trf7970a_write(trf, TRF7970A_NFC_TARGET_LEVEL, TRF7970A_NFC_TARGET_LEVEL_RFDET(0x7)); if (ret) goto out_err; trf->ddev = ddev; trf->cb = cb; trf->cb_arg = arg; trf->timeout = timeout; trf->ignore_timeout = false; ret = trf7970a_cmd(trf, TRF7970A_CMD_ENABLE_RX); if (ret) goto out_err; trf->state = mode_detect ? TRF7970A_ST_LISTENING_MD : TRF7970A_ST_LISTENING; schedule_delayed_work(&trf->timeout_work, msecs_to_jiffies(timeout)); out_err: mutex_unlock(&trf->lock); return ret; } static int trf7970a_tg_listen(struct nfc_digital_dev *ddev, u16 timeout, nfc_digital_cmd_complete_t cb, void *arg) { const struct trf7970a *trf = nfc_digital_get_drvdata(ddev); dev_dbg(trf->dev, "Listen - state: %d, timeout: %d ms\n", trf->state, timeout); return _trf7970a_tg_listen(ddev, timeout, cb, arg, false); } static int trf7970a_tg_listen_md(struct nfc_digital_dev *ddev, u16 timeout, nfc_digital_cmd_complete_t cb, void *arg) { const struct trf7970a *trf = nfc_digital_get_drvdata(ddev); int ret; dev_dbg(trf->dev, "Listen MD - state: %d, timeout: %d ms\n", trf->state, timeout); ret = trf7970a_tg_configure_hw(ddev, NFC_DIGITAL_CONFIG_RF_TECH, NFC_DIGITAL_RF_TECH_106A); if (ret) return ret; ret = trf7970a_tg_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING, NFC_DIGITAL_FRAMING_NFCA_NFC_DEP); if (ret) return ret; return _trf7970a_tg_listen(ddev, timeout, cb, arg, true); } static int trf7970a_tg_get_rf_tech(struct nfc_digital_dev *ddev, u8 *rf_tech) { const struct trf7970a *trf = nfc_digital_get_drvdata(ddev); dev_dbg(trf->dev, "Get RF Tech - state: %d, rf_tech: %d\n", trf->state, trf->md_rf_tech); *rf_tech = trf->md_rf_tech; return 0; } static void trf7970a_abort_cmd(struct nfc_digital_dev *ddev) { struct trf7970a *trf = nfc_digital_get_drvdata(ddev); dev_dbg(trf->dev, "Abort process initiated\n"); mutex_lock(&trf->lock); switch (trf->state) { case TRF7970A_ST_WAIT_FOR_TX_FIFO: case TRF7970A_ST_WAIT_FOR_RX_DATA: case TRF7970A_ST_WAIT_FOR_RX_DATA_CONT: case TRF7970A_ST_WAIT_TO_ISSUE_EOF: trf->aborting = true; break; case TRF7970A_ST_LISTENING: trf->ignore_timeout = !cancel_delayed_work(&trf->timeout_work); trf7970a_send_err_upstream(trf, -ECANCELED); dev_dbg(trf->dev, "Abort process complete\n"); break; default: break; } mutex_unlock(&trf->lock); } static const struct nfc_digital_ops trf7970a_nfc_ops = { .in_configure_hw = trf7970a_in_configure_hw, .in_send_cmd = trf7970a_send_cmd, .tg_configure_hw = trf7970a_tg_configure_hw, .tg_send_cmd = trf7970a_send_cmd, .tg_listen = trf7970a_tg_listen, .tg_listen_md = trf7970a_tg_listen_md, .tg_get_rf_tech = trf7970a_tg_get_rf_tech, .switch_rf = trf7970a_switch_rf, .abort_cmd = trf7970a_abort_cmd, }; static int trf7970a_power_up(struct trf7970a *trf) { int ret; dev_dbg(trf->dev, "Powering up - state: %d\n", trf->state); if (trf->state != TRF7970A_ST_PWR_OFF) return 0; ret = regulator_enable(trf->regulator); if (ret) { dev_err(trf->dev, "%s - Can't enable VIN: %d\n", __func__, ret); return ret; } usleep_range(5000, 6000); if (trf->en2_gpiod && !(trf->quirks & TRF7970A_QUIRK_EN2_MUST_STAY_LOW)) { gpiod_set_value_cansleep(trf->en2_gpiod, 1); usleep_range(1000, 2000); } gpiod_set_value_cansleep(trf->en_gpiod, 1); usleep_range(20000, 21000); trf->state = TRF7970A_ST_RF_OFF; return 0; } static int trf7970a_power_down(struct trf7970a *trf) { int ret; dev_dbg(trf->dev, "Powering down - state: %d\n", trf->state); if (trf->state == TRF7970A_ST_PWR_OFF) return 0; if (trf->state != TRF7970A_ST_RF_OFF) { dev_dbg(trf->dev, "Can't power down - not RF_OFF state (%d)\n", trf->state); return -EBUSY; } gpiod_set_value_cansleep(trf->en_gpiod, 0); if (trf->en2_gpiod && !(trf->quirks & TRF7970A_QUIRK_EN2_MUST_STAY_LOW)) gpiod_set_value_cansleep(trf->en2_gpiod, 0); ret = regulator_disable(trf->regulator); if (ret) dev_err(trf->dev, "%s - Can't disable VIN: %d\n", __func__, ret); trf->state = TRF7970A_ST_PWR_OFF; return ret; } static int trf7970a_startup(struct trf7970a *trf) { int ret; ret = trf7970a_power_up(trf); if (ret) return ret; pm_runtime_set_active(trf->dev); pm_runtime_enable(trf->dev); pm_runtime_mark_last_busy(trf->dev); return 0; } static void trf7970a_shutdown(struct trf7970a *trf) { switch (trf->state) { case TRF7970A_ST_WAIT_FOR_TX_FIFO: case TRF7970A_ST_WAIT_FOR_RX_DATA: case TRF7970A_ST_WAIT_FOR_RX_DATA_CONT: case TRF7970A_ST_WAIT_TO_ISSUE_EOF: case TRF7970A_ST_LISTENING: trf7970a_send_err_upstream(trf, -ECANCELED); fallthrough; case TRF7970A_ST_IDLE: case TRF7970A_ST_IDLE_RX_BLOCKED: trf7970a_switch_rf_off(trf); break; default: break; } pm_runtime_disable(trf->dev); pm_runtime_set_suspended(trf->dev); trf7970a_power_down(trf); } static int trf7970a_get_autosuspend_delay(const struct device_node *np) { int autosuspend_delay, ret; ret = of_property_read_u32(np, "autosuspend-delay", &autosuspend_delay); if (ret) autosuspend_delay = TRF7970A_AUTOSUSPEND_DELAY; return autosuspend_delay; } static int trf7970a_probe(struct spi_device *spi) { const struct device_node *np = spi->dev.of_node; struct trf7970a *trf; int uvolts, autosuspend_delay, ret; u32 clk_freq = TRF7970A_13MHZ_CLOCK_FREQUENCY; if (!np) { dev_err(&spi->dev, "No Device Tree entry\n"); return -EINVAL; } trf = devm_kzalloc(&spi->dev, sizeof(*trf), GFP_KERNEL); if (!trf) return -ENOMEM; trf->state = TRF7970A_ST_PWR_OFF; trf->dev = &spi->dev; trf->spi = spi; spi->mode = SPI_MODE_1; spi->bits_per_word = 8; ret = spi_setup(spi); if (ret < 0) { dev_err(trf->dev, "Can't set up SPI Communication\n"); return ret; } if (of_property_read_bool(np, "irq-status-read-quirk")) trf->quirks |= TRF7970A_QUIRK_IRQ_STATUS_READ; /* There are two enable pins - only EN must be present in the DT */ trf->en_gpiod = devm_gpiod_get_index(trf->dev, "ti,enable", 0, GPIOD_OUT_LOW); if (IS_ERR(trf->en_gpiod)) { dev_err(trf->dev, "No EN GPIO property\n"); return PTR_ERR(trf->en_gpiod); } trf->en2_gpiod = devm_gpiod_get_index_optional(trf->dev, "ti,enable", 1, GPIOD_OUT_LOW); if (!trf->en2_gpiod) { dev_info(trf->dev, "No EN2 GPIO property\n"); } else if (IS_ERR(trf->en2_gpiod)) { dev_err(trf->dev, "Error getting EN2 GPIO property: %ld\n", PTR_ERR(trf->en2_gpiod)); return PTR_ERR(trf->en2_gpiod); } else if (of_property_read_bool(np, "en2-rf-quirk")) { trf->quirks |= TRF7970A_QUIRK_EN2_MUST_STAY_LOW; } of_property_read_u32(np, "clock-frequency", &clk_freq); if ((clk_freq != TRF7970A_27MHZ_CLOCK_FREQUENCY) && (clk_freq != TRF7970A_13MHZ_CLOCK_FREQUENCY)) { dev_err(trf->dev, "clock-frequency (%u Hz) unsupported\n", clk_freq); return -EINVAL; } if (clk_freq == TRF7970A_27MHZ_CLOCK_FREQUENCY) { trf->modulator_sys_clk_ctrl = TRF7970A_MODULATOR_27MHZ; dev_dbg(trf->dev, "trf7970a configured for 27MHz crystal\n"); } else { trf->modulator_sys_clk_ctrl = 0; } ret = devm_request_threaded_irq(trf->dev, spi->irq, NULL, trf7970a_irq, IRQF_TRIGGER_RISING | IRQF_ONESHOT, "trf7970a", trf); if (ret) { dev_err(trf->dev, "Can't request IRQ#%d: %d\n", spi->irq, ret); return ret; } mutex_init(&trf->lock); INIT_DELAYED_WORK(&trf->timeout_work, trf7970a_timeout_work_handler); trf->regulator = devm_regulator_get(&spi->dev, "vin"); if (IS_ERR(trf->regulator)) { ret = PTR_ERR(trf->regulator); dev_err(trf->dev, "Can't get VIN regulator: %d\n", ret); goto err_destroy_lock; } ret = regulator_enable(trf->regulator); if (ret) { dev_err(trf->dev, "Can't enable VIN: %d\n", ret); goto err_destroy_lock; } uvolts = regulator_get_voltage(trf->regulator); if (uvolts > 4000000) trf->chip_status_ctrl = TRF7970A_CHIP_STATUS_VRS5_3; trf->regulator = devm_regulator_get(&spi->dev, "vdd-io"); if (IS_ERR(trf->regulator)) { ret = PTR_ERR(trf->regulator); dev_err(trf->dev, "Can't get VDD_IO regulator: %d\n", ret); goto err_destroy_lock; } ret = regulator_enable(trf->regulator); if (ret) { dev_err(trf->dev, "Can't enable VDD_IO: %d\n", ret); goto err_destroy_lock; } if (regulator_get_voltage(trf->regulator) == 1800000) { trf->io_ctrl = TRF7970A_REG_IO_CTRL_IO_LOW; dev_dbg(trf->dev, "trf7970a config vdd_io to 1.8V\n"); } trf->ddev = nfc_digital_allocate_device(&trf7970a_nfc_ops, TRF7970A_SUPPORTED_PROTOCOLS, NFC_DIGITAL_DRV_CAPS_IN_CRC | NFC_DIGITAL_DRV_CAPS_TG_CRC, 0, 0); if (!trf->ddev) { dev_err(trf->dev, "Can't allocate NFC digital device\n"); ret = -ENOMEM; goto err_disable_regulator; } nfc_digital_set_parent_dev(trf->ddev, trf->dev); nfc_digital_set_drvdata(trf->ddev, trf); spi_set_drvdata(spi, trf); autosuspend_delay = trf7970a_get_autosuspend_delay(np); pm_runtime_set_autosuspend_delay(trf->dev, autosuspend_delay); pm_runtime_use_autosuspend(trf->dev); ret = trf7970a_startup(trf); if (ret) goto err_free_ddev; ret = nfc_digital_register_device(trf->ddev); if (ret) { dev_err(trf->dev, "Can't register NFC digital device: %d\n", ret); goto err_shutdown; } return 0; err_shutdown: trf7970a_shutdown(trf); err_free_ddev: nfc_digital_free_device(trf->ddev); err_disable_regulator: regulator_disable(trf->regulator); err_destroy_lock: mutex_destroy(&trf->lock); return ret; } static void trf7970a_remove(struct spi_device *spi) { struct trf7970a *trf = spi_get_drvdata(spi); mutex_lock(&trf->lock); trf7970a_shutdown(trf); mutex_unlock(&trf->lock); nfc_digital_unregister_device(trf->ddev); nfc_digital_free_device(trf->ddev); regulator_disable(trf->regulator); mutex_destroy(&trf->lock); } #ifdef CONFIG_PM_SLEEP static int trf7970a_suspend(struct device *dev) { struct spi_device *spi = to_spi_device(dev); struct trf7970a *trf = spi_get_drvdata(spi); mutex_lock(&trf->lock); trf7970a_shutdown(trf); mutex_unlock(&trf->lock); return 0; } static int trf7970a_resume(struct device *dev) { struct spi_device *spi = to_spi_device(dev); struct trf7970a *trf = spi_get_drvdata(spi); int ret; mutex_lock(&trf->lock); ret = trf7970a_startup(trf); mutex_unlock(&trf->lock); return ret; } #endif #ifdef CONFIG_PM static int trf7970a_pm_runtime_suspend(struct device *dev) { struct spi_device *spi = to_spi_device(dev); struct trf7970a *trf = spi_get_drvdata(spi); int ret; mutex_lock(&trf->lock); ret = trf7970a_power_down(trf); mutex_unlock(&trf->lock); return ret; } static int trf7970a_pm_runtime_resume(struct device *dev) { struct spi_device *spi = to_spi_device(dev); struct trf7970a *trf = spi_get_drvdata(spi); int ret; ret = trf7970a_power_up(trf); if (!ret) pm_runtime_mark_last_busy(dev); return ret; } #endif static const struct dev_pm_ops trf7970a_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(trf7970a_suspend, trf7970a_resume) SET_RUNTIME_PM_OPS(trf7970a_pm_runtime_suspend, trf7970a_pm_runtime_resume, NULL) }; static const struct of_device_id trf7970a_of_match[] __maybe_unused = { {.compatible = "ti,trf7970a",}, {}, }; MODULE_DEVICE_TABLE(of, trf7970a_of_match); static const struct spi_device_id trf7970a_id_table[] = { {"trf7970a", 0}, {} }; MODULE_DEVICE_TABLE(spi, trf7970a_id_table); static struct spi_driver trf7970a_spi_driver = { .probe = trf7970a_probe, .remove = trf7970a_remove, .id_table = trf7970a_id_table, .driver = { .name = "trf7970a", .of_match_table = of_match_ptr(trf7970a_of_match), .pm = &trf7970a_pm_ops, }, }; module_spi_driver(trf7970a_spi_driver); MODULE_AUTHOR("Mark A. Greer <[email protected]>"); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("TI trf7970a RFID/NFC Transceiver Driver");
linux-master
drivers/nfc/trf7970a.c
// SPDX-License-Identifier: GPL-2.0-only /* * Sony NFC Port-100 Series driver * Copyright (c) 2013, Intel Corporation. * * Partly based/Inspired by Stephen Tiedemann's nfcpy */ #include <linux/module.h> #include <linux/usb.h> #include <net/nfc/digital.h> #define VERSION "0.1" #define SONY_VENDOR_ID 0x054c #define RCS380S_PRODUCT_ID 0x06c1 #define RCS380P_PRODUCT_ID 0x06c3 #define PORT100_PROTOCOLS (NFC_PROTO_JEWEL_MASK | \ NFC_PROTO_MIFARE_MASK | \ NFC_PROTO_FELICA_MASK | \ NFC_PROTO_NFC_DEP_MASK | \ NFC_PROTO_ISO14443_MASK | \ NFC_PROTO_ISO14443_B_MASK) #define PORT100_CAPABILITIES (NFC_DIGITAL_DRV_CAPS_IN_CRC | \ NFC_DIGITAL_DRV_CAPS_TG_CRC) /* Standard port100 frame definitions */ #define PORT100_FRAME_HEADER_LEN (sizeof(struct port100_frame) \ + 2) /* data[0] CC, data[1] SCC */ #define PORT100_FRAME_TAIL_LEN 2 /* data[len] DCS, data[len + 1] postamble*/ #define PORT100_COMM_RF_HEAD_MAX_LEN (sizeof(struct port100_tg_comm_rf_cmd)) /* * Max extended frame payload len, excluding CC and SCC * which are already in PORT100_FRAME_HEADER_LEN. */ #define PORT100_FRAME_MAX_PAYLOAD_LEN 1001 #define PORT100_FRAME_ACK_SIZE 6 /* Preamble (1), SoPC (2), ACK Code (2), Postamble (1) */ static u8 ack_frame[PORT100_FRAME_ACK_SIZE] = { 0x00, 0x00, 0xff, 0x00, 0xff, 0x00 }; #define PORT100_FRAME_CHECKSUM(f) (f->data[le16_to_cpu(f->datalen)]) #define PORT100_FRAME_POSTAMBLE(f) (f->data[le16_to_cpu(f->datalen) + 1]) /* start of frame */ #define PORT100_FRAME_SOF 0x00FF #define PORT100_FRAME_EXT 0xFFFF #define PORT100_FRAME_ACK 0x00FF /* Port-100 command: in or out */ #define PORT100_FRAME_DIRECTION(f) (f->data[0]) /* CC */ #define PORT100_FRAME_DIR_OUT 0xD6 #define PORT100_FRAME_DIR_IN 0xD7 /* Port-100 sub-command */ #define PORT100_FRAME_CMD(f) (f->data[1]) /* SCC */ #define PORT100_CMD_GET_FIRMWARE_VERSION 0x20 #define PORT100_CMD_GET_COMMAND_TYPE 0x28 #define PORT100_CMD_SET_COMMAND_TYPE 0x2A #define PORT100_CMD_IN_SET_RF 0x00 #define PORT100_CMD_IN_SET_PROTOCOL 0x02 #define PORT100_CMD_IN_COMM_RF 0x04 #define PORT100_CMD_TG_SET_RF 0x40 #define PORT100_CMD_TG_SET_PROTOCOL 0x42 #define PORT100_CMD_TG_SET_RF_OFF 0x46 #define PORT100_CMD_TG_COMM_RF 0x48 #define PORT100_CMD_SWITCH_RF 0x06 #define PORT100_CMD_RESPONSE(cmd) (cmd + 1) #define PORT100_CMD_TYPE_IS_SUPPORTED(mask, cmd_type) \ ((mask) & (0x01 << (cmd_type))) #define PORT100_CMD_TYPE_0 0 #define PORT100_CMD_TYPE_1 1 #define PORT100_CMD_STATUS_OK 0x00 #define PORT100_CMD_STATUS_TIMEOUT 0x80 #define PORT100_MDAA_TGT_HAS_BEEN_ACTIVATED_MASK 0x01 #define PORT100_MDAA_TGT_WAS_ACTIVATED_MASK 0x02 struct port100; typedef void (*port100_send_async_complete_t)(struct port100 *dev, void *arg, struct sk_buff *resp); /* * Setting sets structure for in_set_rf command * * @in_*_set_number: Represent the entry indexes in the port-100 RF Base Table. * This table contains multiple RF setting sets required for RF * communication. * * @in_*_comm_type: Theses fields set the communication type to be used. */ struct port100_in_rf_setting { u8 in_send_set_number; u8 in_send_comm_type; u8 in_recv_set_number; u8 in_recv_comm_type; } __packed; #define PORT100_COMM_TYPE_IN_212F 0x01 #define PORT100_COMM_TYPE_IN_424F 0x02 #define PORT100_COMM_TYPE_IN_106A 0x03 #define PORT100_COMM_TYPE_IN_106B 0x07 static const struct port100_in_rf_setting in_rf_settings[] = { [NFC_DIGITAL_RF_TECH_212F] = { .in_send_set_number = 1, .in_send_comm_type = PORT100_COMM_TYPE_IN_212F, .in_recv_set_number = 15, .in_recv_comm_type = PORT100_COMM_TYPE_IN_212F, }, [NFC_DIGITAL_RF_TECH_424F] = { .in_send_set_number = 1, .in_send_comm_type = PORT100_COMM_TYPE_IN_424F, .in_recv_set_number = 15, .in_recv_comm_type = PORT100_COMM_TYPE_IN_424F, }, [NFC_DIGITAL_RF_TECH_106A] = { .in_send_set_number = 2, .in_send_comm_type = PORT100_COMM_TYPE_IN_106A, .in_recv_set_number = 15, .in_recv_comm_type = PORT100_COMM_TYPE_IN_106A, }, [NFC_DIGITAL_RF_TECH_106B] = { .in_send_set_number = 3, .in_send_comm_type = PORT100_COMM_TYPE_IN_106B, .in_recv_set_number = 15, .in_recv_comm_type = PORT100_COMM_TYPE_IN_106B, }, /* Ensures the array has NFC_DIGITAL_RF_TECH_LAST elements */ [NFC_DIGITAL_RF_TECH_LAST] = { 0 }, }; /** * struct port100_tg_rf_setting - Setting sets structure for tg_set_rf command * * @tg_set_number: Represents the entry index in the port-100 RF Base Table. * This table contains multiple RF setting sets required for RF * communication. this field is used for both send and receive * settings. * * @tg_comm_type: Sets the communication type to be used to send and receive * data. */ struct port100_tg_rf_setting { u8 tg_set_number; u8 tg_comm_type; } __packed; #define PORT100_COMM_TYPE_TG_106A 0x0B #define PORT100_COMM_TYPE_TG_212F 0x0C #define PORT100_COMM_TYPE_TG_424F 0x0D static const struct port100_tg_rf_setting tg_rf_settings[] = { [NFC_DIGITAL_RF_TECH_106A] = { .tg_set_number = 8, .tg_comm_type = PORT100_COMM_TYPE_TG_106A, }, [NFC_DIGITAL_RF_TECH_212F] = { .tg_set_number = 8, .tg_comm_type = PORT100_COMM_TYPE_TG_212F, }, [NFC_DIGITAL_RF_TECH_424F] = { .tg_set_number = 8, .tg_comm_type = PORT100_COMM_TYPE_TG_424F, }, /* Ensures the array has NFC_DIGITAL_RF_TECH_LAST elements */ [NFC_DIGITAL_RF_TECH_LAST] = { 0 }, }; #define PORT100_IN_PROT_INITIAL_GUARD_TIME 0x00 #define PORT100_IN_PROT_ADD_CRC 0x01 #define PORT100_IN_PROT_CHECK_CRC 0x02 #define PORT100_IN_PROT_MULTI_CARD 0x03 #define PORT100_IN_PROT_ADD_PARITY 0x04 #define PORT100_IN_PROT_CHECK_PARITY 0x05 #define PORT100_IN_PROT_BITWISE_AC_RECV_MODE 0x06 #define PORT100_IN_PROT_VALID_BIT_NUMBER 0x07 #define PORT100_IN_PROT_CRYPTO1 0x08 #define PORT100_IN_PROT_ADD_SOF 0x09 #define PORT100_IN_PROT_CHECK_SOF 0x0A #define PORT100_IN_PROT_ADD_EOF 0x0B #define PORT100_IN_PROT_CHECK_EOF 0x0C #define PORT100_IN_PROT_DEAF_TIME 0x0E #define PORT100_IN_PROT_CRM 0x0F #define PORT100_IN_PROT_CRM_MIN_LEN 0x10 #define PORT100_IN_PROT_T1_TAG_FRAME 0x11 #define PORT100_IN_PROT_RFCA 0x12 #define PORT100_IN_PROT_GUARD_TIME_AT_INITIATOR 0x13 #define PORT100_IN_PROT_END 0x14 #define PORT100_IN_MAX_NUM_PROTOCOLS 19 #define PORT100_TG_PROT_TU 0x00 #define PORT100_TG_PROT_RF_OFF 0x01 #define PORT100_TG_PROT_CRM 0x02 #define PORT100_TG_PROT_END 0x03 #define PORT100_TG_MAX_NUM_PROTOCOLS 3 struct port100_protocol { u8 number; u8 value; } __packed; static const struct port100_protocol in_protocols[][PORT100_IN_MAX_NUM_PROTOCOLS + 1] = { [NFC_DIGITAL_FRAMING_NFCA_SHORT] = { { PORT100_IN_PROT_INITIAL_GUARD_TIME, 6 }, { PORT100_IN_PROT_ADD_CRC, 0 }, { PORT100_IN_PROT_CHECK_CRC, 0 }, { PORT100_IN_PROT_MULTI_CARD, 0 }, { PORT100_IN_PROT_ADD_PARITY, 0 }, { PORT100_IN_PROT_CHECK_PARITY, 1 }, { PORT100_IN_PROT_BITWISE_AC_RECV_MODE, 0 }, { PORT100_IN_PROT_VALID_BIT_NUMBER, 7 }, { PORT100_IN_PROT_CRYPTO1, 0 }, { PORT100_IN_PROT_ADD_SOF, 0 }, { PORT100_IN_PROT_CHECK_SOF, 0 }, { PORT100_IN_PROT_ADD_EOF, 0 }, { PORT100_IN_PROT_CHECK_EOF, 0 }, { PORT100_IN_PROT_DEAF_TIME, 4 }, { PORT100_IN_PROT_CRM, 0 }, { PORT100_IN_PROT_CRM_MIN_LEN, 0 }, { PORT100_IN_PROT_T1_TAG_FRAME, 0 }, { PORT100_IN_PROT_RFCA, 0 }, { PORT100_IN_PROT_GUARD_TIME_AT_INITIATOR, 6 }, { PORT100_IN_PROT_END, 0 }, }, [NFC_DIGITAL_FRAMING_NFCA_STANDARD] = { { PORT100_IN_PROT_INITIAL_GUARD_TIME, 6 }, { PORT100_IN_PROT_ADD_CRC, 0 }, { PORT100_IN_PROT_CHECK_CRC, 0 }, { PORT100_IN_PROT_MULTI_CARD, 0 }, { PORT100_IN_PROT_ADD_PARITY, 1 }, { PORT100_IN_PROT_CHECK_PARITY, 1 }, { PORT100_IN_PROT_BITWISE_AC_RECV_MODE, 0 }, { PORT100_IN_PROT_VALID_BIT_NUMBER, 8 }, { PORT100_IN_PROT_CRYPTO1, 0 }, { PORT100_IN_PROT_ADD_SOF, 0 }, { PORT100_IN_PROT_CHECK_SOF, 0 }, { PORT100_IN_PROT_ADD_EOF, 0 }, { PORT100_IN_PROT_CHECK_EOF, 0 }, { PORT100_IN_PROT_DEAF_TIME, 4 }, { PORT100_IN_PROT_CRM, 0 }, { PORT100_IN_PROT_CRM_MIN_LEN, 0 }, { PORT100_IN_PROT_T1_TAG_FRAME, 0 }, { PORT100_IN_PROT_RFCA, 0 }, { PORT100_IN_PROT_GUARD_TIME_AT_INITIATOR, 6 }, { PORT100_IN_PROT_END, 0 }, }, [NFC_DIGITAL_FRAMING_NFCA_STANDARD_WITH_CRC_A] = { { PORT100_IN_PROT_INITIAL_GUARD_TIME, 6 }, { PORT100_IN_PROT_ADD_CRC, 1 }, { PORT100_IN_PROT_CHECK_CRC, 1 }, { PORT100_IN_PROT_MULTI_CARD, 0 }, { PORT100_IN_PROT_ADD_PARITY, 1 }, { PORT100_IN_PROT_CHECK_PARITY, 1 }, { PORT100_IN_PROT_BITWISE_AC_RECV_MODE, 0 }, { PORT100_IN_PROT_VALID_BIT_NUMBER, 8 }, { PORT100_IN_PROT_CRYPTO1, 0 }, { PORT100_IN_PROT_ADD_SOF, 0 }, { PORT100_IN_PROT_CHECK_SOF, 0 }, { PORT100_IN_PROT_ADD_EOF, 0 }, { PORT100_IN_PROT_CHECK_EOF, 0 }, { PORT100_IN_PROT_DEAF_TIME, 4 }, { PORT100_IN_PROT_CRM, 0 }, { PORT100_IN_PROT_CRM_MIN_LEN, 0 }, { PORT100_IN_PROT_T1_TAG_FRAME, 0 }, { PORT100_IN_PROT_RFCA, 0 }, { PORT100_IN_PROT_GUARD_TIME_AT_INITIATOR, 6 }, { PORT100_IN_PROT_END, 0 }, }, [NFC_DIGITAL_FRAMING_NFCA_T1T] = { /* nfc_digital_framing_nfca_short */ { PORT100_IN_PROT_ADD_CRC, 2 }, { PORT100_IN_PROT_CHECK_CRC, 2 }, { PORT100_IN_PROT_VALID_BIT_NUMBER, 8 }, { PORT100_IN_PROT_T1_TAG_FRAME, 2 }, { PORT100_IN_PROT_END, 0 }, }, [NFC_DIGITAL_FRAMING_NFCA_T2T] = { /* nfc_digital_framing_nfca_standard */ { PORT100_IN_PROT_ADD_CRC, 1 }, { PORT100_IN_PROT_CHECK_CRC, 0 }, { PORT100_IN_PROT_END, 0 }, }, [NFC_DIGITAL_FRAMING_NFCA_T4T] = { /* nfc_digital_framing_nfca_standard_with_crc_a */ { PORT100_IN_PROT_END, 0 }, }, [NFC_DIGITAL_FRAMING_NFCA_NFC_DEP] = { /* nfc_digital_framing_nfca_standard */ { PORT100_IN_PROT_END, 0 }, }, [NFC_DIGITAL_FRAMING_NFCF] = { { PORT100_IN_PROT_INITIAL_GUARD_TIME, 18 }, { PORT100_IN_PROT_ADD_CRC, 1 }, { PORT100_IN_PROT_CHECK_CRC, 1 }, { PORT100_IN_PROT_MULTI_CARD, 0 }, { PORT100_IN_PROT_ADD_PARITY, 0 }, { PORT100_IN_PROT_CHECK_PARITY, 0 }, { PORT100_IN_PROT_BITWISE_AC_RECV_MODE, 0 }, { PORT100_IN_PROT_VALID_BIT_NUMBER, 8 }, { PORT100_IN_PROT_CRYPTO1, 0 }, { PORT100_IN_PROT_ADD_SOF, 0 }, { PORT100_IN_PROT_CHECK_SOF, 0 }, { PORT100_IN_PROT_ADD_EOF, 0 }, { PORT100_IN_PROT_CHECK_EOF, 0 }, { PORT100_IN_PROT_DEAF_TIME, 4 }, { PORT100_IN_PROT_CRM, 0 }, { PORT100_IN_PROT_CRM_MIN_LEN, 0 }, { PORT100_IN_PROT_T1_TAG_FRAME, 0 }, { PORT100_IN_PROT_RFCA, 0 }, { PORT100_IN_PROT_GUARD_TIME_AT_INITIATOR, 6 }, { PORT100_IN_PROT_END, 0 }, }, [NFC_DIGITAL_FRAMING_NFCF_T3T] = { /* nfc_digital_framing_nfcf */ { PORT100_IN_PROT_END, 0 }, }, [NFC_DIGITAL_FRAMING_NFCF_NFC_DEP] = { /* nfc_digital_framing_nfcf */ { PORT100_IN_PROT_INITIAL_GUARD_TIME, 18 }, { PORT100_IN_PROT_ADD_CRC, 1 }, { PORT100_IN_PROT_CHECK_CRC, 1 }, { PORT100_IN_PROT_MULTI_CARD, 0 }, { PORT100_IN_PROT_ADD_PARITY, 0 }, { PORT100_IN_PROT_CHECK_PARITY, 0 }, { PORT100_IN_PROT_BITWISE_AC_RECV_MODE, 0 }, { PORT100_IN_PROT_VALID_BIT_NUMBER, 8 }, { PORT100_IN_PROT_CRYPTO1, 0 }, { PORT100_IN_PROT_ADD_SOF, 0 }, { PORT100_IN_PROT_CHECK_SOF, 0 }, { PORT100_IN_PROT_ADD_EOF, 0 }, { PORT100_IN_PROT_CHECK_EOF, 0 }, { PORT100_IN_PROT_DEAF_TIME, 4 }, { PORT100_IN_PROT_CRM, 0 }, { PORT100_IN_PROT_CRM_MIN_LEN, 0 }, { PORT100_IN_PROT_T1_TAG_FRAME, 0 }, { PORT100_IN_PROT_RFCA, 0 }, { PORT100_IN_PROT_GUARD_TIME_AT_INITIATOR, 6 }, { PORT100_IN_PROT_END, 0 }, }, [NFC_DIGITAL_FRAMING_NFC_DEP_ACTIVATED] = { { PORT100_IN_PROT_END, 0 }, }, [NFC_DIGITAL_FRAMING_NFCB] = { { PORT100_IN_PROT_INITIAL_GUARD_TIME, 20 }, { PORT100_IN_PROT_ADD_CRC, 1 }, { PORT100_IN_PROT_CHECK_CRC, 1 }, { PORT100_IN_PROT_MULTI_CARD, 0 }, { PORT100_IN_PROT_ADD_PARITY, 0 }, { PORT100_IN_PROT_CHECK_PARITY, 0 }, { PORT100_IN_PROT_BITWISE_AC_RECV_MODE, 0 }, { PORT100_IN_PROT_VALID_BIT_NUMBER, 8 }, { PORT100_IN_PROT_CRYPTO1, 0 }, { PORT100_IN_PROT_ADD_SOF, 1 }, { PORT100_IN_PROT_CHECK_SOF, 1 }, { PORT100_IN_PROT_ADD_EOF, 1 }, { PORT100_IN_PROT_CHECK_EOF, 1 }, { PORT100_IN_PROT_DEAF_TIME, 4 }, { PORT100_IN_PROT_CRM, 0 }, { PORT100_IN_PROT_CRM_MIN_LEN, 0 }, { PORT100_IN_PROT_T1_TAG_FRAME, 0 }, { PORT100_IN_PROT_RFCA, 0 }, { PORT100_IN_PROT_GUARD_TIME_AT_INITIATOR, 6 }, { PORT100_IN_PROT_END, 0 }, }, [NFC_DIGITAL_FRAMING_NFCB_T4T] = { /* nfc_digital_framing_nfcb */ { PORT100_IN_PROT_END, 0 }, }, /* Ensures the array has NFC_DIGITAL_FRAMING_LAST elements */ [NFC_DIGITAL_FRAMING_LAST] = { { PORT100_IN_PROT_END, 0 }, }, }; static const struct port100_protocol tg_protocols[][PORT100_TG_MAX_NUM_PROTOCOLS + 1] = { [NFC_DIGITAL_FRAMING_NFCA_SHORT] = { { PORT100_TG_PROT_END, 0 }, }, [NFC_DIGITAL_FRAMING_NFCA_STANDARD] = { { PORT100_TG_PROT_END, 0 }, }, [NFC_DIGITAL_FRAMING_NFCA_STANDARD_WITH_CRC_A] = { { PORT100_TG_PROT_END, 0 }, }, [NFC_DIGITAL_FRAMING_NFCA_T1T] = { { PORT100_TG_PROT_END, 0 }, }, [NFC_DIGITAL_FRAMING_NFCA_T2T] = { { PORT100_TG_PROT_END, 0 }, }, [NFC_DIGITAL_FRAMING_NFCA_NFC_DEP] = { { PORT100_TG_PROT_TU, 1 }, { PORT100_TG_PROT_RF_OFF, 0 }, { PORT100_TG_PROT_CRM, 7 }, { PORT100_TG_PROT_END, 0 }, }, [NFC_DIGITAL_FRAMING_NFCF] = { { PORT100_TG_PROT_END, 0 }, }, [NFC_DIGITAL_FRAMING_NFCF_T3T] = { { PORT100_TG_PROT_END, 0 }, }, [NFC_DIGITAL_FRAMING_NFCF_NFC_DEP] = { { PORT100_TG_PROT_TU, 1 }, { PORT100_TG_PROT_RF_OFF, 0 }, { PORT100_TG_PROT_CRM, 7 }, { PORT100_TG_PROT_END, 0 }, }, [NFC_DIGITAL_FRAMING_NFC_DEP_ACTIVATED] = { { PORT100_TG_PROT_RF_OFF, 1 }, { PORT100_TG_PROT_END, 0 }, }, /* Ensures the array has NFC_DIGITAL_FRAMING_LAST elements */ [NFC_DIGITAL_FRAMING_LAST] = { { PORT100_TG_PROT_END, 0 }, }, }; struct port100 { struct nfc_digital_dev *nfc_digital_dev; int skb_headroom; int skb_tailroom; struct usb_device *udev; struct usb_interface *interface; struct urb *out_urb; struct urb *in_urb; /* This mutex protects the out_urb and avoids to submit a new command * through port100_send_frame_async() while the previous one is being * canceled through port100_abort_cmd(). */ struct mutex out_urb_lock; struct work_struct cmd_complete_work; u8 cmd_type; /* The digital stack serializes commands to be sent. There is no need * for any queuing/locking mechanism at driver level. */ struct port100_cmd *cmd; bool cmd_cancel; struct completion cmd_cancel_done; }; struct port100_cmd { u8 code; int status; struct sk_buff *req; struct sk_buff *resp; int resp_len; port100_send_async_complete_t complete_cb; void *complete_cb_context; }; struct port100_frame { u8 preamble; __be16 start_frame; __be16 extended_frame; __le16 datalen; u8 datalen_checksum; u8 data[]; } __packed; struct port100_ack_frame { u8 preamble; __be16 start_frame; __be16 ack_frame; u8 postambule; } __packed; struct port100_cb_arg { nfc_digital_cmd_complete_t complete_cb; void *complete_arg; u8 mdaa; }; struct port100_tg_comm_rf_cmd { __le16 guard_time; __le16 send_timeout; u8 mdaa; u8 nfca_param[6]; u8 nfcf_param[18]; u8 mf_halted; u8 arae_flag; __le16 recv_timeout; u8 data[]; } __packed; struct port100_tg_comm_rf_res { u8 comm_type; u8 ar_status; u8 target_activated; __le32 status; u8 data[]; } __packed; /* The rule: value + checksum = 0 */ static inline u8 port100_checksum(u16 value) { return ~(((u8 *)&value)[0] + ((u8 *)&value)[1]) + 1; } /* The rule: sum(data elements) + checksum = 0 */ static u8 port100_data_checksum(const u8 *data, int datalen) { u8 sum = 0; int i; for (i = 0; i < datalen; i++) sum += data[i]; return port100_checksum(sum); } static void port100_tx_frame_init(void *_frame, u8 cmd_code) { struct port100_frame *frame = _frame; frame->preamble = 0; frame->start_frame = cpu_to_be16(PORT100_FRAME_SOF); frame->extended_frame = cpu_to_be16(PORT100_FRAME_EXT); PORT100_FRAME_DIRECTION(frame) = PORT100_FRAME_DIR_OUT; PORT100_FRAME_CMD(frame) = cmd_code; frame->datalen = cpu_to_le16(2); } static void port100_tx_frame_finish(void *_frame) { struct port100_frame *frame = _frame; frame->datalen_checksum = port100_checksum(le16_to_cpu(frame->datalen)); PORT100_FRAME_CHECKSUM(frame) = port100_data_checksum(frame->data, le16_to_cpu(frame->datalen)); PORT100_FRAME_POSTAMBLE(frame) = 0; } static void port100_tx_update_payload_len(void *_frame, int len) { struct port100_frame *frame = _frame; le16_add_cpu(&frame->datalen, len); } static bool port100_rx_frame_is_valid(const void *_frame) { u8 checksum; const struct port100_frame *frame = _frame; if (frame->start_frame != cpu_to_be16(PORT100_FRAME_SOF) || frame->extended_frame != cpu_to_be16(PORT100_FRAME_EXT)) return false; checksum = port100_checksum(le16_to_cpu(frame->datalen)); if (checksum != frame->datalen_checksum) return false; checksum = port100_data_checksum(frame->data, le16_to_cpu(frame->datalen)); if (checksum != PORT100_FRAME_CHECKSUM(frame)) return false; return true; } static bool port100_rx_frame_is_ack(const struct port100_ack_frame *frame) { return (frame->start_frame == cpu_to_be16(PORT100_FRAME_SOF) && frame->ack_frame == cpu_to_be16(PORT100_FRAME_ACK)); } static inline int port100_rx_frame_size(const void *frame) { const struct port100_frame *f = frame; return sizeof(struct port100_frame) + le16_to_cpu(f->datalen) + PORT100_FRAME_TAIL_LEN; } static bool port100_rx_frame_is_cmd_response(const struct port100 *dev, const void *frame) { const struct port100_frame *f = frame; return (PORT100_FRAME_CMD(f) == PORT100_CMD_RESPONSE(dev->cmd->code)); } static void port100_recv_response(struct urb *urb) { struct port100 *dev = urb->context; struct port100_cmd *cmd = dev->cmd; u8 *in_frame; cmd->status = urb->status; switch (urb->status) { case 0: break; /* success */ case -ECONNRESET: case -ENOENT: nfc_dbg(&dev->interface->dev, "The urb has been canceled (status %d)\n", urb->status); goto sched_wq; case -ESHUTDOWN: default: nfc_err(&dev->interface->dev, "Urb failure (status %d)\n", urb->status); goto sched_wq; } in_frame = dev->in_urb->transfer_buffer; if (!port100_rx_frame_is_valid(in_frame)) { nfc_err(&dev->interface->dev, "Received an invalid frame\n"); cmd->status = -EIO; goto sched_wq; } print_hex_dump_debug("PORT100 RX: ", DUMP_PREFIX_NONE, 16, 1, in_frame, port100_rx_frame_size(in_frame), false); if (!port100_rx_frame_is_cmd_response(dev, in_frame)) { nfc_err(&dev->interface->dev, "It's not the response to the last command\n"); cmd->status = -EIO; goto sched_wq; } sched_wq: schedule_work(&dev->cmd_complete_work); } static int port100_submit_urb_for_response(const struct port100 *dev, gfp_t flags) { dev->in_urb->complete = port100_recv_response; return usb_submit_urb(dev->in_urb, flags); } static void port100_recv_ack(struct urb *urb) { struct port100 *dev = urb->context; struct port100_cmd *cmd = dev->cmd; const struct port100_ack_frame *in_frame; int rc; cmd->status = urb->status; switch (urb->status) { case 0: break; /* success */ case -ECONNRESET: case -ENOENT: nfc_dbg(&dev->interface->dev, "The urb has been stopped (status %d)\n", urb->status); goto sched_wq; case -ESHUTDOWN: default: nfc_err(&dev->interface->dev, "Urb failure (status %d)\n", urb->status); goto sched_wq; } in_frame = dev->in_urb->transfer_buffer; if (!port100_rx_frame_is_ack(in_frame)) { nfc_err(&dev->interface->dev, "Received an invalid ack\n"); cmd->status = -EIO; goto sched_wq; } rc = port100_submit_urb_for_response(dev, GFP_ATOMIC); if (rc) { nfc_err(&dev->interface->dev, "usb_submit_urb failed with result %d\n", rc); cmd->status = rc; goto sched_wq; } return; sched_wq: schedule_work(&dev->cmd_complete_work); } static int port100_submit_urb_for_ack(const struct port100 *dev, gfp_t flags) { dev->in_urb->complete = port100_recv_ack; return usb_submit_urb(dev->in_urb, flags); } static int port100_send_ack(struct port100 *dev) { int rc = 0; mutex_lock(&dev->out_urb_lock); /* * If prior cancel is in-flight (dev->cmd_cancel == true), we * can skip to send cancel. Then this will wait the prior * cancel, or merged into the next cancel rarely if next * cancel was started before waiting done. In any case, this * will be waked up soon or later. */ if (!dev->cmd_cancel) { reinit_completion(&dev->cmd_cancel_done); usb_kill_urb(dev->out_urb); dev->out_urb->transfer_buffer = ack_frame; dev->out_urb->transfer_buffer_length = sizeof(ack_frame); rc = usb_submit_urb(dev->out_urb, GFP_KERNEL); /* * Set the cmd_cancel flag only if the URB has been * successfully submitted. It will be reset by the out * URB completion callback port100_send_complete(). */ dev->cmd_cancel = !rc; } mutex_unlock(&dev->out_urb_lock); if (!rc) wait_for_completion(&dev->cmd_cancel_done); return rc; } static int port100_send_frame_async(struct port100 *dev, const struct sk_buff *out, const struct sk_buff *in, int in_len) { int rc; mutex_lock(&dev->out_urb_lock); /* A command cancel frame as been sent through dev->out_urb. Don't try * to submit a new one. */ if (dev->cmd_cancel) { rc = -EAGAIN; goto exit; } dev->out_urb->transfer_buffer = out->data; dev->out_urb->transfer_buffer_length = out->len; dev->in_urb->transfer_buffer = in->data; dev->in_urb->transfer_buffer_length = in_len; print_hex_dump_debug("PORT100 TX: ", DUMP_PREFIX_NONE, 16, 1, out->data, out->len, false); rc = usb_submit_urb(dev->out_urb, GFP_KERNEL); if (rc) goto exit; rc = port100_submit_urb_for_ack(dev, GFP_KERNEL); if (rc) usb_kill_urb(dev->out_urb); exit: mutex_unlock(&dev->out_urb_lock); return rc; } static void port100_build_cmd_frame(struct port100 *dev, u8 cmd_code, struct sk_buff *skb) { /* payload is already there, just update datalen */ int payload_len = skb->len; skb_push(skb, PORT100_FRAME_HEADER_LEN); skb_put(skb, PORT100_FRAME_TAIL_LEN); port100_tx_frame_init(skb->data, cmd_code); port100_tx_update_payload_len(skb->data, payload_len); port100_tx_frame_finish(skb->data); } static void port100_send_async_complete(struct port100 *dev) { struct port100_cmd *cmd = dev->cmd; int status = cmd->status; struct sk_buff *req = cmd->req; struct sk_buff *resp = cmd->resp; dev_kfree_skb(req); dev->cmd = NULL; if (status < 0) { cmd->complete_cb(dev, cmd->complete_cb_context, ERR_PTR(status)); dev_kfree_skb(resp); goto done; } skb_put(resp, port100_rx_frame_size(resp->data)); skb_pull(resp, PORT100_FRAME_HEADER_LEN); skb_trim(resp, resp->len - PORT100_FRAME_TAIL_LEN); cmd->complete_cb(dev, cmd->complete_cb_context, resp); done: kfree(cmd); } static int port100_send_cmd_async(struct port100 *dev, u8 cmd_code, struct sk_buff *req, port100_send_async_complete_t complete_cb, void *complete_cb_context) { struct port100_cmd *cmd; struct sk_buff *resp; int rc; int resp_len = PORT100_FRAME_HEADER_LEN + PORT100_FRAME_MAX_PAYLOAD_LEN + PORT100_FRAME_TAIL_LEN; if (dev->cmd) { nfc_err(&dev->interface->dev, "A command is still in process\n"); return -EBUSY; } resp = alloc_skb(resp_len, GFP_KERNEL); if (!resp) return -ENOMEM; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (!cmd) { dev_kfree_skb(resp); return -ENOMEM; } cmd->code = cmd_code; cmd->req = req; cmd->resp = resp; cmd->resp_len = resp_len; cmd->complete_cb = complete_cb; cmd->complete_cb_context = complete_cb_context; port100_build_cmd_frame(dev, cmd_code, req); dev->cmd = cmd; rc = port100_send_frame_async(dev, req, resp, resp_len); if (rc) { kfree(cmd); dev_kfree_skb(resp); dev->cmd = NULL; } return rc; } struct port100_sync_cmd_response { struct sk_buff *resp; struct completion done; }; static void port100_wq_cmd_complete(struct work_struct *work) { struct port100 *dev = container_of(work, struct port100, cmd_complete_work); port100_send_async_complete(dev); } static void port100_send_sync_complete(struct port100 *dev, void *_arg, struct sk_buff *resp) { struct port100_sync_cmd_response *arg = _arg; arg->resp = resp; complete(&arg->done); } static struct sk_buff *port100_send_cmd_sync(struct port100 *dev, u8 cmd_code, struct sk_buff *req) { int rc; struct port100_sync_cmd_response arg; init_completion(&arg.done); rc = port100_send_cmd_async(dev, cmd_code, req, port100_send_sync_complete, &arg); if (rc) { dev_kfree_skb(req); return ERR_PTR(rc); } wait_for_completion(&arg.done); return arg.resp; } static void port100_send_complete(struct urb *urb) { struct port100 *dev = urb->context; if (dev->cmd_cancel) { complete_all(&dev->cmd_cancel_done); dev->cmd_cancel = false; } switch (urb->status) { case 0: break; /* success */ case -ECONNRESET: case -ENOENT: nfc_dbg(&dev->interface->dev, "The urb has been stopped (status %d)\n", urb->status); break; case -ESHUTDOWN: default: nfc_err(&dev->interface->dev, "Urb failure (status %d)\n", urb->status); } } static void port100_abort_cmd(struct nfc_digital_dev *ddev) { struct port100 *dev = nfc_digital_get_drvdata(ddev); /* An ack will cancel the last issued command */ port100_send_ack(dev); /* cancel the urb request */ usb_kill_urb(dev->in_urb); } static struct sk_buff *port100_alloc_skb(const struct port100 *dev, unsigned int size) { struct sk_buff *skb; skb = alloc_skb(dev->skb_headroom + dev->skb_tailroom + size, GFP_KERNEL); if (skb) skb_reserve(skb, dev->skb_headroom); return skb; } static int port100_set_command_type(struct port100 *dev, u8 command_type) { struct sk_buff *skb; struct sk_buff *resp; int rc; skb = port100_alloc_skb(dev, 1); if (!skb) return -ENOMEM; skb_put_u8(skb, command_type); resp = port100_send_cmd_sync(dev, PORT100_CMD_SET_COMMAND_TYPE, skb); if (IS_ERR(resp)) return PTR_ERR(resp); rc = resp->data[0]; dev_kfree_skb(resp); return rc; } static u64 port100_get_command_type_mask(struct port100 *dev) { struct sk_buff *skb; struct sk_buff *resp; u64 mask; skb = port100_alloc_skb(dev, 0); if (!skb) return 0; resp = port100_send_cmd_sync(dev, PORT100_CMD_GET_COMMAND_TYPE, skb); if (IS_ERR(resp)) return 0; if (resp->len < 8) mask = 0; else mask = be64_to_cpu(*(__be64 *)resp->data); dev_kfree_skb(resp); return mask; } static u16 port100_get_firmware_version(struct port100 *dev) { struct sk_buff *skb; struct sk_buff *resp; u16 fw_ver; skb = port100_alloc_skb(dev, 0); if (!skb) return 0; resp = port100_send_cmd_sync(dev, PORT100_CMD_GET_FIRMWARE_VERSION, skb); if (IS_ERR(resp)) return 0; fw_ver = le16_to_cpu(*(__le16 *)resp->data); dev_kfree_skb(resp); return fw_ver; } static int port100_switch_rf(struct nfc_digital_dev *ddev, bool on) { struct port100 *dev = nfc_digital_get_drvdata(ddev); struct sk_buff *skb, *resp; skb = port100_alloc_skb(dev, 1); if (!skb) return -ENOMEM; skb_put_u8(skb, on ? 1 : 0); /* Cancel the last command if the device is being switched off */ if (!on) port100_abort_cmd(ddev); resp = port100_send_cmd_sync(dev, PORT100_CMD_SWITCH_RF, skb); if (IS_ERR(resp)) return PTR_ERR(resp); dev_kfree_skb(resp); return 0; } static int port100_in_set_rf(struct nfc_digital_dev *ddev, u8 rf) { struct port100 *dev = nfc_digital_get_drvdata(ddev); struct sk_buff *skb; struct sk_buff *resp; int rc; if (rf >= NFC_DIGITAL_RF_TECH_LAST) return -EINVAL; skb = port100_alloc_skb(dev, sizeof(struct port100_in_rf_setting)); if (!skb) return -ENOMEM; skb_put_data(skb, &in_rf_settings[rf], sizeof(struct port100_in_rf_setting)); resp = port100_send_cmd_sync(dev, PORT100_CMD_IN_SET_RF, skb); if (IS_ERR(resp)) return PTR_ERR(resp); rc = resp->data[0]; dev_kfree_skb(resp); return rc; } static int port100_in_set_framing(struct nfc_digital_dev *ddev, int param) { struct port100 *dev = nfc_digital_get_drvdata(ddev); const struct port100_protocol *protocols; struct sk_buff *skb; struct sk_buff *resp; int num_protocols; size_t size; int rc; if (param >= NFC_DIGITAL_FRAMING_LAST) return -EINVAL; protocols = in_protocols[param]; num_protocols = 0; while (protocols[num_protocols].number != PORT100_IN_PROT_END) num_protocols++; if (!num_protocols) return 0; size = sizeof(struct port100_protocol) * num_protocols; skb = port100_alloc_skb(dev, size); if (!skb) return -ENOMEM; skb_put_data(skb, protocols, size); resp = port100_send_cmd_sync(dev, PORT100_CMD_IN_SET_PROTOCOL, skb); if (IS_ERR(resp)) return PTR_ERR(resp); rc = resp->data[0]; dev_kfree_skb(resp); return rc; } static int port100_in_configure_hw(struct nfc_digital_dev *ddev, int type, int param) { if (type == NFC_DIGITAL_CONFIG_RF_TECH) return port100_in_set_rf(ddev, param); if (type == NFC_DIGITAL_CONFIG_FRAMING) return port100_in_set_framing(ddev, param); return -EINVAL; } static void port100_in_comm_rf_complete(struct port100 *dev, void *arg, struct sk_buff *resp) { const struct port100_cb_arg *cb_arg = arg; nfc_digital_cmd_complete_t cb = cb_arg->complete_cb; u32 status; int rc; if (IS_ERR(resp)) { rc = PTR_ERR(resp); goto exit; } if (resp->len < 4) { nfc_err(&dev->interface->dev, "Invalid packet length received\n"); rc = -EIO; goto error; } status = le32_to_cpu(*(__le32 *)resp->data); skb_pull(resp, sizeof(u32)); if (status == PORT100_CMD_STATUS_TIMEOUT) { rc = -ETIMEDOUT; goto error; } if (status != PORT100_CMD_STATUS_OK) { nfc_err(&dev->interface->dev, "in_comm_rf failed with status 0x%08x\n", status); rc = -EIO; goto error; } /* Remove collision bits byte */ skb_pull(resp, 1); goto exit; error: kfree_skb(resp); resp = ERR_PTR(rc); exit: cb(dev->nfc_digital_dev, cb_arg->complete_arg, resp); kfree(cb_arg); } static int port100_in_send_cmd(struct nfc_digital_dev *ddev, struct sk_buff *skb, u16 _timeout, nfc_digital_cmd_complete_t cb, void *arg) { struct port100 *dev = nfc_digital_get_drvdata(ddev); struct port100_cb_arg *cb_arg; __le16 timeout; cb_arg = kzalloc(sizeof(struct port100_cb_arg), GFP_KERNEL); if (!cb_arg) return -ENOMEM; cb_arg->complete_cb = cb; cb_arg->complete_arg = arg; timeout = cpu_to_le16(_timeout * 10); memcpy(skb_push(skb, sizeof(__le16)), &timeout, sizeof(__le16)); return port100_send_cmd_async(dev, PORT100_CMD_IN_COMM_RF, skb, port100_in_comm_rf_complete, cb_arg); } static int port100_tg_set_rf(struct nfc_digital_dev *ddev, u8 rf) { struct port100 *dev = nfc_digital_get_drvdata(ddev); struct sk_buff *skb; struct sk_buff *resp; int rc; if (rf >= NFC_DIGITAL_RF_TECH_LAST) return -EINVAL; skb = port100_alloc_skb(dev, sizeof(struct port100_tg_rf_setting)); if (!skb) return -ENOMEM; skb_put_data(skb, &tg_rf_settings[rf], sizeof(struct port100_tg_rf_setting)); resp = port100_send_cmd_sync(dev, PORT100_CMD_TG_SET_RF, skb); if (IS_ERR(resp)) return PTR_ERR(resp); rc = resp->data[0]; dev_kfree_skb(resp); return rc; } static int port100_tg_set_framing(struct nfc_digital_dev *ddev, int param) { struct port100 *dev = nfc_digital_get_drvdata(ddev); const struct port100_protocol *protocols; struct sk_buff *skb; struct sk_buff *resp; int rc; int num_protocols; size_t size; if (param >= NFC_DIGITAL_FRAMING_LAST) return -EINVAL; protocols = tg_protocols[param]; num_protocols = 0; while (protocols[num_protocols].number != PORT100_TG_PROT_END) num_protocols++; if (!num_protocols) return 0; size = sizeof(struct port100_protocol) * num_protocols; skb = port100_alloc_skb(dev, size); if (!skb) return -ENOMEM; skb_put_data(skb, protocols, size); resp = port100_send_cmd_sync(dev, PORT100_CMD_TG_SET_PROTOCOL, skb); if (IS_ERR(resp)) return PTR_ERR(resp); rc = resp->data[0]; dev_kfree_skb(resp); return rc; } static int port100_tg_configure_hw(struct nfc_digital_dev *ddev, int type, int param) { if (type == NFC_DIGITAL_CONFIG_RF_TECH) return port100_tg_set_rf(ddev, param); if (type == NFC_DIGITAL_CONFIG_FRAMING) return port100_tg_set_framing(ddev, param); return -EINVAL; } static bool port100_tg_target_activated(struct port100 *dev, u8 tgt_activated) { u8 mask; switch (dev->cmd_type) { case PORT100_CMD_TYPE_0: mask = PORT100_MDAA_TGT_HAS_BEEN_ACTIVATED_MASK; break; case PORT100_CMD_TYPE_1: mask = PORT100_MDAA_TGT_HAS_BEEN_ACTIVATED_MASK | PORT100_MDAA_TGT_WAS_ACTIVATED_MASK; break; default: nfc_err(&dev->interface->dev, "Unknown command type\n"); return false; } return ((tgt_activated & mask) == mask); } static void port100_tg_comm_rf_complete(struct port100 *dev, void *arg, struct sk_buff *resp) { u32 status; const struct port100_cb_arg *cb_arg = arg; nfc_digital_cmd_complete_t cb = cb_arg->complete_cb; struct port100_tg_comm_rf_res *hdr; if (IS_ERR(resp)) goto exit; hdr = (struct port100_tg_comm_rf_res *)resp->data; status = le32_to_cpu(hdr->status); if (cb_arg->mdaa && !port100_tg_target_activated(dev, hdr->target_activated)) { kfree_skb(resp); resp = ERR_PTR(-ETIMEDOUT); goto exit; } skb_pull(resp, sizeof(struct port100_tg_comm_rf_res)); if (status != PORT100_CMD_STATUS_OK) { kfree_skb(resp); if (status == PORT100_CMD_STATUS_TIMEOUT) resp = ERR_PTR(-ETIMEDOUT); else resp = ERR_PTR(-EIO); } exit: cb(dev->nfc_digital_dev, cb_arg->complete_arg, resp); kfree(cb_arg); } static int port100_tg_send_cmd(struct nfc_digital_dev *ddev, struct sk_buff *skb, u16 timeout, nfc_digital_cmd_complete_t cb, void *arg) { struct port100 *dev = nfc_digital_get_drvdata(ddev); struct port100_tg_comm_rf_cmd *hdr; struct port100_cb_arg *cb_arg; cb_arg = kzalloc(sizeof(struct port100_cb_arg), GFP_KERNEL); if (!cb_arg) return -ENOMEM; cb_arg->complete_cb = cb; cb_arg->complete_arg = arg; skb_push(skb, sizeof(struct port100_tg_comm_rf_cmd)); hdr = (struct port100_tg_comm_rf_cmd *)skb->data; memset(hdr, 0, sizeof(struct port100_tg_comm_rf_cmd)); hdr->guard_time = cpu_to_le16(500); hdr->send_timeout = cpu_to_le16(0xFFFF); hdr->recv_timeout = cpu_to_le16(timeout); return port100_send_cmd_async(dev, PORT100_CMD_TG_COMM_RF, skb, port100_tg_comm_rf_complete, cb_arg); } static int port100_listen_mdaa(struct nfc_digital_dev *ddev, struct digital_tg_mdaa_params *params, u16 timeout, nfc_digital_cmd_complete_t cb, void *arg) { struct port100 *dev = nfc_digital_get_drvdata(ddev); struct port100_tg_comm_rf_cmd *hdr; struct port100_cb_arg *cb_arg; struct sk_buff *skb; int rc; rc = port100_tg_configure_hw(ddev, NFC_DIGITAL_CONFIG_RF_TECH, NFC_DIGITAL_RF_TECH_106A); if (rc) return rc; rc = port100_tg_configure_hw(ddev, NFC_DIGITAL_CONFIG_FRAMING, NFC_DIGITAL_FRAMING_NFCA_NFC_DEP); if (rc) return rc; cb_arg = kzalloc(sizeof(struct port100_cb_arg), GFP_KERNEL); if (!cb_arg) return -ENOMEM; cb_arg->complete_cb = cb; cb_arg->complete_arg = arg; cb_arg->mdaa = 1; skb = port100_alloc_skb(dev, 0); if (!skb) { kfree(cb_arg); return -ENOMEM; } skb_push(skb, sizeof(struct port100_tg_comm_rf_cmd)); hdr = (struct port100_tg_comm_rf_cmd *)skb->data; memset(hdr, 0, sizeof(struct port100_tg_comm_rf_cmd)); hdr->guard_time = 0; hdr->send_timeout = cpu_to_le16(0xFFFF); hdr->mdaa = 1; hdr->nfca_param[0] = (params->sens_res >> 8) & 0xFF; hdr->nfca_param[1] = params->sens_res & 0xFF; memcpy(hdr->nfca_param + 2, params->nfcid1, 3); hdr->nfca_param[5] = params->sel_res; memcpy(hdr->nfcf_param, params->nfcid2, 8); hdr->nfcf_param[16] = (params->sc >> 8) & 0xFF; hdr->nfcf_param[17] = params->sc & 0xFF; hdr->recv_timeout = cpu_to_le16(timeout); return port100_send_cmd_async(dev, PORT100_CMD_TG_COMM_RF, skb, port100_tg_comm_rf_complete, cb_arg); } static int port100_listen(struct nfc_digital_dev *ddev, u16 timeout, nfc_digital_cmd_complete_t cb, void *arg) { const struct port100 *dev = nfc_digital_get_drvdata(ddev); struct sk_buff *skb; skb = port100_alloc_skb(dev, 0); if (!skb) return -ENOMEM; return port100_tg_send_cmd(ddev, skb, timeout, cb, arg); } static const struct nfc_digital_ops port100_digital_ops = { .in_configure_hw = port100_in_configure_hw, .in_send_cmd = port100_in_send_cmd, .tg_listen_mdaa = port100_listen_mdaa, .tg_listen = port100_listen, .tg_configure_hw = port100_tg_configure_hw, .tg_send_cmd = port100_tg_send_cmd, .switch_rf = port100_switch_rf, .abort_cmd = port100_abort_cmd, }; static const struct usb_device_id port100_table[] = { { USB_DEVICE(SONY_VENDOR_ID, RCS380S_PRODUCT_ID), }, { USB_DEVICE(SONY_VENDOR_ID, RCS380P_PRODUCT_ID), }, { } }; MODULE_DEVICE_TABLE(usb, port100_table); static int port100_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct port100 *dev; int rc; struct usb_host_interface *iface_desc; struct usb_endpoint_descriptor *endpoint; int in_endpoint; int out_endpoint; u16 fw_version; u64 cmd_type_mask; int i; dev = devm_kzalloc(&interface->dev, sizeof(struct port100), GFP_KERNEL); if (!dev) return -ENOMEM; mutex_init(&dev->out_urb_lock); dev->udev = usb_get_dev(interface_to_usbdev(interface)); dev->interface = interface; usb_set_intfdata(interface, dev); in_endpoint = out_endpoint = 0; iface_desc = interface->cur_altsetting; for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { endpoint = &iface_desc->endpoint[i].desc; if (!in_endpoint && usb_endpoint_is_bulk_in(endpoint)) in_endpoint = endpoint->bEndpointAddress; if (!out_endpoint && usb_endpoint_is_bulk_out(endpoint)) out_endpoint = endpoint->bEndpointAddress; } if (!in_endpoint || !out_endpoint) { nfc_err(&interface->dev, "Could not find bulk-in or bulk-out endpoint\n"); rc = -ENODEV; goto error; } dev->in_urb = usb_alloc_urb(0, GFP_KERNEL); dev->out_urb = usb_alloc_urb(0, GFP_KERNEL); if (!dev->in_urb || !dev->out_urb) { nfc_err(&interface->dev, "Could not allocate USB URBs\n"); rc = -ENOMEM; goto error; } usb_fill_bulk_urb(dev->in_urb, dev->udev, usb_rcvbulkpipe(dev->udev, in_endpoint), NULL, 0, NULL, dev); usb_fill_bulk_urb(dev->out_urb, dev->udev, usb_sndbulkpipe(dev->udev, out_endpoint), NULL, 0, port100_send_complete, dev); dev->out_urb->transfer_flags = URB_ZERO_PACKET; dev->skb_headroom = PORT100_FRAME_HEADER_LEN + PORT100_COMM_RF_HEAD_MAX_LEN; dev->skb_tailroom = PORT100_FRAME_TAIL_LEN; init_completion(&dev->cmd_cancel_done); INIT_WORK(&dev->cmd_complete_work, port100_wq_cmd_complete); /* The first thing to do with the Port-100 is to set the command type * to be used. If supported we use command type 1. 0 otherwise. */ cmd_type_mask = port100_get_command_type_mask(dev); if (!cmd_type_mask) { nfc_err(&interface->dev, "Could not get supported command types\n"); rc = -ENODEV; goto error; } if (PORT100_CMD_TYPE_IS_SUPPORTED(cmd_type_mask, PORT100_CMD_TYPE_1)) dev->cmd_type = PORT100_CMD_TYPE_1; else dev->cmd_type = PORT100_CMD_TYPE_0; rc = port100_set_command_type(dev, dev->cmd_type); if (rc) { nfc_err(&interface->dev, "The device does not support command type %u\n", dev->cmd_type); goto error; } fw_version = port100_get_firmware_version(dev); if (!fw_version) nfc_err(&interface->dev, "Could not get device firmware version\n"); nfc_info(&interface->dev, "Sony NFC Port-100 Series attached (firmware v%x.%02x)\n", (fw_version & 0xFF00) >> 8, fw_version & 0xFF); dev->nfc_digital_dev = nfc_digital_allocate_device(&port100_digital_ops, PORT100_PROTOCOLS, PORT100_CAPABILITIES, dev->skb_headroom, dev->skb_tailroom); if (!dev->nfc_digital_dev) { nfc_err(&interface->dev, "Could not allocate nfc_digital_dev\n"); rc = -ENOMEM; goto error; } nfc_digital_set_parent_dev(dev->nfc_digital_dev, &interface->dev); nfc_digital_set_drvdata(dev->nfc_digital_dev, dev); rc = nfc_digital_register_device(dev->nfc_digital_dev); if (rc) { nfc_err(&interface->dev, "Could not register digital device\n"); goto free_nfc_dev; } return 0; free_nfc_dev: nfc_digital_free_device(dev->nfc_digital_dev); error: usb_kill_urb(dev->in_urb); usb_free_urb(dev->in_urb); usb_kill_urb(dev->out_urb); usb_free_urb(dev->out_urb); usb_put_dev(dev->udev); return rc; } static void port100_disconnect(struct usb_interface *interface) { struct port100 *dev; dev = usb_get_intfdata(interface); usb_set_intfdata(interface, NULL); nfc_digital_unregister_device(dev->nfc_digital_dev); nfc_digital_free_device(dev->nfc_digital_dev); usb_kill_urb(dev->in_urb); usb_kill_urb(dev->out_urb); usb_free_urb(dev->in_urb); usb_free_urb(dev->out_urb); usb_put_dev(dev->udev); kfree(dev->cmd); nfc_info(&interface->dev, "Sony Port-100 NFC device disconnected\n"); } static struct usb_driver port100_driver = { .name = "port100", .probe = port100_probe, .disconnect = port100_disconnect, .id_table = port100_table, }; module_usb_driver(port100_driver); MODULE_DESCRIPTION("NFC Port-100 series usb driver ver " VERSION); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL");
linux-master
drivers/nfc/port100.c
// SPDX-License-Identifier: GPL-2.0-only /* * NFC hardware simulation driver * Copyright (c) 2013, Intel Corporation. */ #include <linux/device.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/ctype.h> #include <linux/debugfs.h> #include <linux/nfc.h> #include <net/nfc/nfc.h> #include <net/nfc/digital.h> #define NFCSIM_ERR(d, fmt, args...) nfc_err(&d->nfc_digital_dev->nfc_dev->dev, \ "%s: " fmt, __func__, ## args) #define NFCSIM_DBG(d, fmt, args...) dev_dbg(&d->nfc_digital_dev->nfc_dev->dev, \ "%s: " fmt, __func__, ## args) #define NFCSIM_VERSION "0.2" #define NFCSIM_MODE_NONE 0 #define NFCSIM_MODE_INITIATOR 1 #define NFCSIM_MODE_TARGET 2 #define NFCSIM_CAPABILITIES (NFC_DIGITAL_DRV_CAPS_IN_CRC | \ NFC_DIGITAL_DRV_CAPS_TG_CRC) struct nfcsim { struct nfc_digital_dev *nfc_digital_dev; struct work_struct recv_work; struct delayed_work send_work; struct nfcsim_link *link_in; struct nfcsim_link *link_out; bool up; u8 mode; u8 rf_tech; u16 recv_timeout; nfc_digital_cmd_complete_t cb; void *arg; u8 dropframe; }; struct nfcsim_link { struct mutex lock; u8 rf_tech; u8 mode; u8 shutdown; struct sk_buff *skb; wait_queue_head_t recv_wait; u8 cond; }; static struct nfcsim_link *nfcsim_link_new(void) { struct nfcsim_link *link; link = kzalloc(sizeof(struct nfcsim_link), GFP_KERNEL); if (!link) return NULL; mutex_init(&link->lock); init_waitqueue_head(&link->recv_wait); return link; } static void nfcsim_link_free(struct nfcsim_link *link) { dev_kfree_skb(link->skb); kfree(link); } static void nfcsim_link_recv_wake(struct nfcsim_link *link) { link->cond = 1; wake_up_interruptible(&link->recv_wait); } static void nfcsim_link_set_skb(struct nfcsim_link *link, struct sk_buff *skb, u8 rf_tech, u8 mode) { mutex_lock(&link->lock); dev_kfree_skb(link->skb); link->skb = skb; link->rf_tech = rf_tech; link->mode = mode; mutex_unlock(&link->lock); } static void nfcsim_link_recv_cancel(struct nfcsim_link *link) { mutex_lock(&link->lock); link->mode = NFCSIM_MODE_NONE; mutex_unlock(&link->lock); nfcsim_link_recv_wake(link); } static void nfcsim_link_shutdown(struct nfcsim_link *link) { mutex_lock(&link->lock); link->shutdown = 1; link->mode = NFCSIM_MODE_NONE; mutex_unlock(&link->lock); nfcsim_link_recv_wake(link); } static struct sk_buff *nfcsim_link_recv_skb(struct nfcsim_link *link, int timeout, u8 rf_tech, u8 mode) { int rc; struct sk_buff *skb; rc = wait_event_interruptible_timeout(link->recv_wait, link->cond, msecs_to_jiffies(timeout)); mutex_lock(&link->lock); skb = link->skb; link->skb = NULL; if (!rc) { rc = -ETIMEDOUT; goto done; } if (!skb || link->rf_tech != rf_tech || link->mode == mode) { rc = -EINVAL; goto done; } if (link->shutdown) { rc = -ENODEV; goto done; } done: mutex_unlock(&link->lock); if (rc < 0) { dev_kfree_skb(skb); skb = ERR_PTR(rc); } link->cond = 0; return skb; } static void nfcsim_send_wq(struct work_struct *work) { struct nfcsim *dev = container_of(work, struct nfcsim, send_work.work); /* * To effectively send data, the device just wake up its link_out which * is the link_in of the peer device. The exchanged skb has already been * stored in the dev->link_out through nfcsim_link_set_skb(). */ nfcsim_link_recv_wake(dev->link_out); } static void nfcsim_recv_wq(struct work_struct *work) { struct nfcsim *dev = container_of(work, struct nfcsim, recv_work); struct sk_buff *skb; skb = nfcsim_link_recv_skb(dev->link_in, dev->recv_timeout, dev->rf_tech, dev->mode); if (!dev->up) { NFCSIM_ERR(dev, "Device is down\n"); if (!IS_ERR(skb)) dev_kfree_skb(skb); return; } dev->cb(dev->nfc_digital_dev, dev->arg, skb); } static int nfcsim_send(struct nfc_digital_dev *ddev, struct sk_buff *skb, u16 timeout, nfc_digital_cmd_complete_t cb, void *arg) { struct nfcsim *dev = nfc_digital_get_drvdata(ddev); u8 delay; if (!dev->up) { NFCSIM_ERR(dev, "Device is down\n"); return -ENODEV; } dev->recv_timeout = timeout; dev->cb = cb; dev->arg = arg; schedule_work(&dev->recv_work); if (dev->dropframe) { NFCSIM_DBG(dev, "dropping frame (out of %d)\n", dev->dropframe); dev_kfree_skb(skb); dev->dropframe--; return 0; } if (skb) { nfcsim_link_set_skb(dev->link_out, skb, dev->rf_tech, dev->mode); /* Add random delay (between 3 and 10 ms) before sending data */ get_random_bytes(&delay, 1); delay = 3 + (delay & 0x07); schedule_delayed_work(&dev->send_work, msecs_to_jiffies(delay)); } return 0; } static void nfcsim_abort_cmd(struct nfc_digital_dev *ddev) { const struct nfcsim *dev = nfc_digital_get_drvdata(ddev); nfcsim_link_recv_cancel(dev->link_in); } static int nfcsim_switch_rf(struct nfc_digital_dev *ddev, bool on) { struct nfcsim *dev = nfc_digital_get_drvdata(ddev); dev->up = on; return 0; } static int nfcsim_in_configure_hw(struct nfc_digital_dev *ddev, int type, int param) { struct nfcsim *dev = nfc_digital_get_drvdata(ddev); switch (type) { case NFC_DIGITAL_CONFIG_RF_TECH: dev->up = true; dev->mode = NFCSIM_MODE_INITIATOR; dev->rf_tech = param; break; case NFC_DIGITAL_CONFIG_FRAMING: break; default: NFCSIM_ERR(dev, "Invalid configuration type: %d\n", type); return -EINVAL; } return 0; } static int nfcsim_in_send_cmd(struct nfc_digital_dev *ddev, struct sk_buff *skb, u16 timeout, nfc_digital_cmd_complete_t cb, void *arg) { return nfcsim_send(ddev, skb, timeout, cb, arg); } static int nfcsim_tg_configure_hw(struct nfc_digital_dev *ddev, int type, int param) { struct nfcsim *dev = nfc_digital_get_drvdata(ddev); switch (type) { case NFC_DIGITAL_CONFIG_RF_TECH: dev->up = true; dev->mode = NFCSIM_MODE_TARGET; dev->rf_tech = param; break; case NFC_DIGITAL_CONFIG_FRAMING: break; default: NFCSIM_ERR(dev, "Invalid configuration type: %d\n", type); return -EINVAL; } return 0; } static int nfcsim_tg_send_cmd(struct nfc_digital_dev *ddev, struct sk_buff *skb, u16 timeout, nfc_digital_cmd_complete_t cb, void *arg) { return nfcsim_send(ddev, skb, timeout, cb, arg); } static int nfcsim_tg_listen(struct nfc_digital_dev *ddev, u16 timeout, nfc_digital_cmd_complete_t cb, void *arg) { return nfcsim_send(ddev, NULL, timeout, cb, arg); } static const struct nfc_digital_ops nfcsim_digital_ops = { .in_configure_hw = nfcsim_in_configure_hw, .in_send_cmd = nfcsim_in_send_cmd, .tg_listen = nfcsim_tg_listen, .tg_configure_hw = nfcsim_tg_configure_hw, .tg_send_cmd = nfcsim_tg_send_cmd, .abort_cmd = nfcsim_abort_cmd, .switch_rf = nfcsim_switch_rf, }; static struct dentry *nfcsim_debugfs_root; static void nfcsim_debugfs_init(void) { nfcsim_debugfs_root = debugfs_create_dir("nfcsim", NULL); } static void nfcsim_debugfs_remove(void) { debugfs_remove_recursive(nfcsim_debugfs_root); } static void nfcsim_debugfs_init_dev(struct nfcsim *dev) { struct dentry *dev_dir; char devname[5]; /* nfcX\0 */ u32 idx; int n; if (!nfcsim_debugfs_root) { NFCSIM_ERR(dev, "nfcsim debugfs not initialized\n"); return; } idx = dev->nfc_digital_dev->nfc_dev->idx; n = snprintf(devname, sizeof(devname), "nfc%d", idx); if (n >= sizeof(devname)) { NFCSIM_ERR(dev, "Could not compute dev name for dev %d\n", idx); return; } dev_dir = debugfs_create_dir(devname, nfcsim_debugfs_root); debugfs_create_u8("dropframe", 0664, dev_dir, &dev->dropframe); } static struct nfcsim *nfcsim_device_new(struct nfcsim_link *link_in, struct nfcsim_link *link_out) { struct nfcsim *dev; int rc; dev = kzalloc(sizeof(struct nfcsim), GFP_KERNEL); if (!dev) return ERR_PTR(-ENOMEM); INIT_DELAYED_WORK(&dev->send_work, nfcsim_send_wq); INIT_WORK(&dev->recv_work, nfcsim_recv_wq); dev->nfc_digital_dev = nfc_digital_allocate_device(&nfcsim_digital_ops, NFC_PROTO_NFC_DEP_MASK, NFCSIM_CAPABILITIES, 0, 0); if (!dev->nfc_digital_dev) { kfree(dev); return ERR_PTR(-ENOMEM); } nfc_digital_set_drvdata(dev->nfc_digital_dev, dev); dev->link_in = link_in; dev->link_out = link_out; rc = nfc_digital_register_device(dev->nfc_digital_dev); if (rc) { pr_err("Could not register digital device (%d)\n", rc); nfc_digital_free_device(dev->nfc_digital_dev); kfree(dev); return ERR_PTR(rc); } nfcsim_debugfs_init_dev(dev); return dev; } static void nfcsim_device_free(struct nfcsim *dev) { nfc_digital_unregister_device(dev->nfc_digital_dev); dev->up = false; nfcsim_link_shutdown(dev->link_in); cancel_delayed_work_sync(&dev->send_work); cancel_work_sync(&dev->recv_work); nfc_digital_free_device(dev->nfc_digital_dev); kfree(dev); } static struct nfcsim *dev0; static struct nfcsim *dev1; static int __init nfcsim_init(void) { struct nfcsim_link *link0, *link1; int rc; link0 = nfcsim_link_new(); link1 = nfcsim_link_new(); if (!link0 || !link1) { rc = -ENOMEM; goto exit_err; } nfcsim_debugfs_init(); dev0 = nfcsim_device_new(link0, link1); if (IS_ERR(dev0)) { rc = PTR_ERR(dev0); goto exit_err; } dev1 = nfcsim_device_new(link1, link0); if (IS_ERR(dev1)) { nfcsim_device_free(dev0); rc = PTR_ERR(dev1); goto exit_err; } pr_info("nfcsim " NFCSIM_VERSION " initialized\n"); return 0; exit_err: pr_err("Failed to initialize nfcsim driver (%d)\n", rc); if (link0) nfcsim_link_free(link0); if (link1) nfcsim_link_free(link1); return rc; } static void __exit nfcsim_exit(void) { struct nfcsim_link *link0, *link1; link0 = dev0->link_in; link1 = dev0->link_out; nfcsim_device_free(dev0); nfcsim_device_free(dev1); nfcsim_link_free(link0); nfcsim_link_free(link1); nfcsim_debugfs_remove(); } module_init(nfcsim_init); module_exit(nfcsim_exit); MODULE_DESCRIPTION("NFCSim driver ver " NFCSIM_VERSION); MODULE_VERSION(NFCSIM_VERSION); MODULE_LICENSE("GPL");
linux-master
drivers/nfc/nfcsim.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Virtual NCI device simulation driver * * Copyright (C) 2020 Samsung Electrnoics * Bongsu Jeon <[email protected]> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/miscdevice.h> #include <linux/mutex.h> #include <linux/wait.h> #include <net/nfc/nci_core.h> #define IOCTL_GET_NCIDEV_IDX 0 #define VIRTUAL_NFC_PROTOCOLS (NFC_PROTO_JEWEL_MASK | \ NFC_PROTO_MIFARE_MASK | \ NFC_PROTO_FELICA_MASK | \ NFC_PROTO_ISO14443_MASK | \ NFC_PROTO_ISO14443_B_MASK | \ NFC_PROTO_ISO15693_MASK) struct virtual_nci_dev { struct nci_dev *ndev; struct mutex mtx; struct sk_buff *send_buff; struct wait_queue_head wq; }; static int virtual_nci_open(struct nci_dev *ndev) { return 0; } static int virtual_nci_close(struct nci_dev *ndev) { struct virtual_nci_dev *vdev = nci_get_drvdata(ndev); mutex_lock(&vdev->mtx); kfree_skb(vdev->send_buff); vdev->send_buff = NULL; mutex_unlock(&vdev->mtx); return 0; } static int virtual_nci_send(struct nci_dev *ndev, struct sk_buff *skb) { struct virtual_nci_dev *vdev = nci_get_drvdata(ndev); mutex_lock(&vdev->mtx); if (vdev->send_buff) { mutex_unlock(&vdev->mtx); kfree_skb(skb); return -1; } vdev->send_buff = skb_copy(skb, GFP_KERNEL); if (!vdev->send_buff) { mutex_unlock(&vdev->mtx); kfree_skb(skb); return -1; } mutex_unlock(&vdev->mtx); wake_up_interruptible(&vdev->wq); consume_skb(skb); return 0; } static const struct nci_ops virtual_nci_ops = { .open = virtual_nci_open, .close = virtual_nci_close, .send = virtual_nci_send }; static ssize_t virtual_ncidev_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct virtual_nci_dev *vdev = file->private_data; size_t actual_len; mutex_lock(&vdev->mtx); while (!vdev->send_buff) { mutex_unlock(&vdev->mtx); if (wait_event_interruptible(vdev->wq, vdev->send_buff)) return -EFAULT; mutex_lock(&vdev->mtx); } actual_len = min_t(size_t, count, vdev->send_buff->len); if (copy_to_user(buf, vdev->send_buff->data, actual_len)) { mutex_unlock(&vdev->mtx); return -EFAULT; } skb_pull(vdev->send_buff, actual_len); if (vdev->send_buff->len == 0) { consume_skb(vdev->send_buff); vdev->send_buff = NULL; } mutex_unlock(&vdev->mtx); return actual_len; } static ssize_t virtual_ncidev_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct virtual_nci_dev *vdev = file->private_data; struct sk_buff *skb; skb = alloc_skb(count, GFP_KERNEL); if (!skb) return -ENOMEM; if (copy_from_user(skb_put(skb, count), buf, count)) { kfree_skb(skb); return -EFAULT; } nci_recv_frame(vdev->ndev, skb); return count; } static int virtual_ncidev_open(struct inode *inode, struct file *file) { int ret = 0; struct virtual_nci_dev *vdev; vdev = kzalloc(sizeof(*vdev), GFP_KERNEL); if (!vdev) return -ENOMEM; vdev->ndev = nci_allocate_device(&virtual_nci_ops, VIRTUAL_NFC_PROTOCOLS, 0, 0); if (!vdev->ndev) { kfree(vdev); return -ENOMEM; } mutex_init(&vdev->mtx); init_waitqueue_head(&vdev->wq); file->private_data = vdev; nci_set_drvdata(vdev->ndev, vdev); ret = nci_register_device(vdev->ndev); if (ret < 0) { nci_free_device(vdev->ndev); mutex_destroy(&vdev->mtx); kfree(vdev); return ret; } return 0; } static int virtual_ncidev_close(struct inode *inode, struct file *file) { struct virtual_nci_dev *vdev = file->private_data; nci_unregister_device(vdev->ndev); nci_free_device(vdev->ndev); mutex_destroy(&vdev->mtx); kfree(vdev); return 0; } static long virtual_ncidev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct virtual_nci_dev *vdev = file->private_data; const struct nfc_dev *nfc_dev = vdev->ndev->nfc_dev; void __user *p = (void __user *)arg; if (cmd != IOCTL_GET_NCIDEV_IDX) return -ENOTTY; if (copy_to_user(p, &nfc_dev->idx, sizeof(nfc_dev->idx))) return -EFAULT; return 0; } static const struct file_operations virtual_ncidev_fops = { .owner = THIS_MODULE, .read = virtual_ncidev_read, .write = virtual_ncidev_write, .open = virtual_ncidev_open, .release = virtual_ncidev_close, .unlocked_ioctl = virtual_ncidev_ioctl }; static struct miscdevice miscdev = { .minor = MISC_DYNAMIC_MINOR, .name = "virtual_nci", .fops = &virtual_ncidev_fops, .mode = 0600, }; module_misc_device(miscdev); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Virtual NCI device simulation driver"); MODULE_AUTHOR("Bongsu Jeon <[email protected]>");
linux-master
drivers/nfc/virtual_ncidev.c
// SPDX-License-Identifier: GPL-2.0-only /* * Generic driver for NXP NCI NFC chips * * Copyright (C) 2014 NXP Semiconductors All rights reserved. * * Author: Clément Perrochaud <[email protected]> * * Derived from PN544 device driver: * Copyright (C) 2012 Intel Corporation. All rights reserved. */ #include <linux/completion.h> #include <linux/firmware.h> #include <linux/nfc.h> #include <asm/unaligned.h> #include "nxp-nci.h" /* Crypto operations can take up to 30 seconds */ #define NXP_NCI_FW_ANSWER_TIMEOUT msecs_to_jiffies(30000) #define NXP_NCI_FW_CMD_RESET 0xF0 #define NXP_NCI_FW_CMD_GETVERSION 0xF1 #define NXP_NCI_FW_CMD_CHECKINTEGRITY 0xE0 #define NXP_NCI_FW_CMD_WRITE 0xC0 #define NXP_NCI_FW_CMD_READ 0xA2 #define NXP_NCI_FW_CMD_GETSESSIONSTATE 0xF2 #define NXP_NCI_FW_CMD_LOG 0xA7 #define NXP_NCI_FW_CMD_FORCE 0xD0 #define NXP_NCI_FW_CMD_GET_DIE_ID 0xF4 #define NXP_NCI_FW_CHUNK_FLAG 0x0400 #define NXP_NCI_FW_RESULT_OK 0x00 #define NXP_NCI_FW_RESULT_INVALID_ADDR 0x01 #define NXP_NCI_FW_RESULT_GENERIC_ERROR 0x02 #define NXP_NCI_FW_RESULT_UNKNOWN_CMD 0x0B #define NXP_NCI_FW_RESULT_ABORTED_CMD 0x0C #define NXP_NCI_FW_RESULT_PLL_ERROR 0x0D #define NXP_NCI_FW_RESULT_ADDR_RANGE_OFL_ERROR 0x1E #define NXP_NCI_FW_RESULT_BUFFER_OFL_ERROR 0x1F #define NXP_NCI_FW_RESULT_MEM_BSY 0x20 #define NXP_NCI_FW_RESULT_SIGNATURE_ERROR 0x21 #define NXP_NCI_FW_RESULT_FIRMWARE_VERSION_ERROR 0x24 #define NXP_NCI_FW_RESULT_PROTOCOL_ERROR 0x28 #define NXP_NCI_FW_RESULT_SFWU_DEGRADED 0x2A #define NXP_NCI_FW_RESULT_PH_STATUS_FIRST_CHUNK 0x2D #define NXP_NCI_FW_RESULT_PH_STATUS_NEXT_CHUNK 0x2E #define NXP_NCI_FW_RESULT_PH_STATUS_INTERNAL_ERROR_5 0xC5 void nxp_nci_fw_work_complete(struct nxp_nci_info *info, int result) { struct nxp_nci_fw_info *fw_info = &info->fw_info; int r; if (info->phy_ops->set_mode) { r = info->phy_ops->set_mode(info->phy_id, NXP_NCI_MODE_COLD); if (r < 0 && result == 0) result = -r; } info->mode = NXP_NCI_MODE_COLD; if (fw_info->fw) { release_firmware(fw_info->fw); fw_info->fw = NULL; } nfc_fw_download_done(info->ndev->nfc_dev, fw_info->name, (u32) -result); } /* crc_ccitt cannot be used since it is computed MSB first and not LSB first */ static u16 nxp_nci_fw_crc(u8 const *buffer, size_t len) { u16 crc = 0xffff; while (len--) { crc = ((crc >> 8) | (crc << 8)) ^ *buffer++; crc ^= (crc & 0xff) >> 4; crc ^= (crc & 0xff) << 12; crc ^= (crc & 0xff) << 5; } return crc; } static int nxp_nci_fw_send_chunk(struct nxp_nci_info *info) { struct nxp_nci_fw_info *fw_info = &info->fw_info; u16 header, crc; struct sk_buff *skb; size_t chunk_len; size_t remaining_len; int r; skb = nci_skb_alloc(info->ndev, info->max_payload, GFP_KERNEL); if (!skb) return -ENOMEM; chunk_len = info->max_payload - NXP_NCI_FW_HDR_LEN - NXP_NCI_FW_CRC_LEN; remaining_len = fw_info->frame_size - fw_info->written; if (remaining_len > chunk_len) { header = NXP_NCI_FW_CHUNK_FLAG; } else { chunk_len = remaining_len; header = 0x0000; } header |= chunk_len & NXP_NCI_FW_FRAME_LEN_MASK; put_unaligned_be16(header, skb_put(skb, NXP_NCI_FW_HDR_LEN)); skb_put_data(skb, fw_info->data + fw_info->written, chunk_len); crc = nxp_nci_fw_crc(skb->data, chunk_len + NXP_NCI_FW_HDR_LEN); put_unaligned_be16(crc, skb_put(skb, NXP_NCI_FW_CRC_LEN)); r = info->phy_ops->write(info->phy_id, skb); if (r >= 0) r = chunk_len; kfree_skb(skb); return r; } static int nxp_nci_fw_send(struct nxp_nci_info *info) { struct nxp_nci_fw_info *fw_info = &info->fw_info; long completion_rc; int r; reinit_completion(&fw_info->cmd_completion); if (fw_info->written == 0) { fw_info->frame_size = get_unaligned_be16(fw_info->data) & NXP_NCI_FW_FRAME_LEN_MASK; fw_info->data += NXP_NCI_FW_HDR_LEN; fw_info->size -= NXP_NCI_FW_HDR_LEN; } if (fw_info->frame_size > fw_info->size) return -EMSGSIZE; r = nxp_nci_fw_send_chunk(info); if (r < 0) return r; fw_info->written += r; if (*fw_info->data == NXP_NCI_FW_CMD_RESET) { fw_info->cmd_result = 0; if (fw_info->fw) schedule_work(&fw_info->work); } else { completion_rc = wait_for_completion_interruptible_timeout( &fw_info->cmd_completion, NXP_NCI_FW_ANSWER_TIMEOUT); if (completion_rc == 0) return -ETIMEDOUT; } return 0; } void nxp_nci_fw_work(struct work_struct *work) { struct nxp_nci_info *info; struct nxp_nci_fw_info *fw_info; int r; fw_info = container_of(work, struct nxp_nci_fw_info, work); info = container_of(fw_info, struct nxp_nci_info, fw_info); mutex_lock(&info->info_lock); r = fw_info->cmd_result; if (r < 0) goto exit_work; if (fw_info->written == fw_info->frame_size) { fw_info->data += fw_info->frame_size; fw_info->size -= fw_info->frame_size; fw_info->written = 0; } if (fw_info->size > 0) r = nxp_nci_fw_send(info); exit_work: if (r < 0 || fw_info->size == 0) nxp_nci_fw_work_complete(info, r); mutex_unlock(&info->info_lock); } int nxp_nci_fw_download(struct nci_dev *ndev, const char *firmware_name) { struct nxp_nci_info *info = nci_get_drvdata(ndev); struct nxp_nci_fw_info *fw_info = &info->fw_info; int r; mutex_lock(&info->info_lock); if (!info->phy_ops->set_mode || !info->phy_ops->write) { r = -ENOTSUPP; goto fw_download_exit; } if (!firmware_name || firmware_name[0] == '\0') { r = -EINVAL; goto fw_download_exit; } strcpy(fw_info->name, firmware_name); r = request_firmware(&fw_info->fw, firmware_name, ndev->nfc_dev->dev.parent); if (r < 0) goto fw_download_exit; r = info->phy_ops->set_mode(info->phy_id, NXP_NCI_MODE_FW); if (r < 0) { release_firmware(fw_info->fw); goto fw_download_exit; } info->mode = NXP_NCI_MODE_FW; fw_info->data = fw_info->fw->data; fw_info->size = fw_info->fw->size; fw_info->written = 0; fw_info->frame_size = 0; fw_info->cmd_result = 0; schedule_work(&fw_info->work); fw_download_exit: mutex_unlock(&info->info_lock); return r; } static int nxp_nci_fw_read_status(u8 stat) { switch (stat) { case NXP_NCI_FW_RESULT_OK: return 0; case NXP_NCI_FW_RESULT_INVALID_ADDR: return -EINVAL; case NXP_NCI_FW_RESULT_UNKNOWN_CMD: return -EINVAL; case NXP_NCI_FW_RESULT_ABORTED_CMD: return -EMSGSIZE; case NXP_NCI_FW_RESULT_ADDR_RANGE_OFL_ERROR: return -EADDRNOTAVAIL; case NXP_NCI_FW_RESULT_BUFFER_OFL_ERROR: return -ENOBUFS; case NXP_NCI_FW_RESULT_MEM_BSY: return -ENOKEY; case NXP_NCI_FW_RESULT_SIGNATURE_ERROR: return -EKEYREJECTED; case NXP_NCI_FW_RESULT_FIRMWARE_VERSION_ERROR: return -EALREADY; case NXP_NCI_FW_RESULT_PROTOCOL_ERROR: return -EPROTO; case NXP_NCI_FW_RESULT_SFWU_DEGRADED: return -EHWPOISON; case NXP_NCI_FW_RESULT_PH_STATUS_FIRST_CHUNK: return 0; case NXP_NCI_FW_RESULT_PH_STATUS_NEXT_CHUNK: return 0; case NXP_NCI_FW_RESULT_PH_STATUS_INTERNAL_ERROR_5: return -EINVAL; default: return -EIO; } } static u16 nxp_nci_fw_check_crc(struct sk_buff *skb) { u16 crc, frame_crc; size_t len = skb->len - NXP_NCI_FW_CRC_LEN; crc = nxp_nci_fw_crc(skb->data, len); frame_crc = get_unaligned_be16(skb->data + len); return (crc ^ frame_crc); } void nxp_nci_fw_recv_frame(struct nci_dev *ndev, struct sk_buff *skb) { struct nxp_nci_info *info = nci_get_drvdata(ndev); struct nxp_nci_fw_info *fw_info = &info->fw_info; complete(&fw_info->cmd_completion); if (skb) { if (nxp_nci_fw_check_crc(skb) != 0x00) fw_info->cmd_result = -EBADMSG; else fw_info->cmd_result = nxp_nci_fw_read_status(*(u8 *)skb_pull(skb, NXP_NCI_FW_HDR_LEN)); kfree_skb(skb); } else { fw_info->cmd_result = -EIO; } if (fw_info->fw) schedule_work(&fw_info->work); } EXPORT_SYMBOL(nxp_nci_fw_recv_frame);
linux-master
drivers/nfc/nxp-nci/firmware.c
// SPDX-License-Identifier: GPL-2.0-only /* * I2C link layer for the NXP NCI driver * * Copyright (C) 2014 NXP Semiconductors All rights reserved. * Copyright (C) 2012-2015 Intel Corporation. All rights reserved. * * Authors: Clément Perrochaud <[email protected]> * Authors: Oleg Zhurakivskyy <[email protected]> * * Derived from PN544 device driver: * Copyright (C) 2012 Intel Corporation. All rights reserved. */ #include <linux/acpi.h> #include <linux/delay.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/nfc.h> #include <linux/gpio/consumer.h> #include <asm/unaligned.h> #include <net/nfc/nfc.h> #include "nxp-nci.h" #define NXP_NCI_I2C_DRIVER_NAME "nxp-nci_i2c" #define NXP_NCI_I2C_MAX_PAYLOAD 32 struct nxp_nci_i2c_phy { struct i2c_client *i2c_dev; struct nci_dev *ndev; struct gpio_desc *gpiod_en; struct gpio_desc *gpiod_fw; int hard_fault; /* * < 0 if hardware error occurred (e.g. i2c err) * and prevents normal operation. */ }; static int nxp_nci_i2c_set_mode(void *phy_id, enum nxp_nci_mode mode) { struct nxp_nci_i2c_phy *phy = (struct nxp_nci_i2c_phy *) phy_id; gpiod_set_value(phy->gpiod_fw, (mode == NXP_NCI_MODE_FW) ? 1 : 0); gpiod_set_value(phy->gpiod_en, (mode != NXP_NCI_MODE_COLD) ? 1 : 0); usleep_range(10000, 15000); if (mode == NXP_NCI_MODE_COLD) phy->hard_fault = 0; return 0; } static int nxp_nci_i2c_write(void *phy_id, struct sk_buff *skb) { int r; struct nxp_nci_i2c_phy *phy = phy_id; struct i2c_client *client = phy->i2c_dev; if (phy->hard_fault != 0) return phy->hard_fault; r = i2c_master_send(client, skb->data, skb->len); if (r < 0) { /* Retry, chip was in standby */ msleep(110); r = i2c_master_send(client, skb->data, skb->len); } if (r < 0) { nfc_err(&client->dev, "Error %d on I2C send\n", r); } else if (r != skb->len) { nfc_err(&client->dev, "Invalid length sent: %u (expected %u)\n", r, skb->len); r = -EREMOTEIO; } else { /* Success but return 0 and not number of bytes */ r = 0; } return r; } static const struct nxp_nci_phy_ops i2c_phy_ops = { .set_mode = nxp_nci_i2c_set_mode, .write = nxp_nci_i2c_write, }; static int nxp_nci_i2c_fw_read(struct nxp_nci_i2c_phy *phy, struct sk_buff **skb) { struct i2c_client *client = phy->i2c_dev; size_t frame_len; __be16 header; int r; r = i2c_master_recv(client, (u8 *) &header, NXP_NCI_FW_HDR_LEN); if (r < 0) { goto fw_read_exit; } else if (r != NXP_NCI_FW_HDR_LEN) { nfc_err(&client->dev, "Incorrect header length: %u\n", r); r = -EBADMSG; goto fw_read_exit; } frame_len = (be16_to_cpu(header) & NXP_NCI_FW_FRAME_LEN_MASK) + NXP_NCI_FW_CRC_LEN; *skb = alloc_skb(NXP_NCI_FW_HDR_LEN + frame_len, GFP_KERNEL); if (*skb == NULL) { r = -ENOMEM; goto fw_read_exit; } skb_put_data(*skb, &header, NXP_NCI_FW_HDR_LEN); r = i2c_master_recv(client, skb_put(*skb, frame_len), frame_len); if (r < 0) { goto fw_read_exit_free_skb; } else if (r != frame_len) { nfc_err(&client->dev, "Invalid frame length: %u (expected %zu)\n", r, frame_len); r = -EBADMSG; goto fw_read_exit_free_skb; } return 0; fw_read_exit_free_skb: kfree_skb(*skb); fw_read_exit: return r; } static int nxp_nci_i2c_nci_read(struct nxp_nci_i2c_phy *phy, struct sk_buff **skb) { struct nci_ctrl_hdr header; /* May actually be a data header */ struct i2c_client *client = phy->i2c_dev; int r; r = i2c_master_recv(client, (u8 *) &header, NCI_CTRL_HDR_SIZE); if (r < 0) { goto nci_read_exit; } else if (r != NCI_CTRL_HDR_SIZE) { nfc_err(&client->dev, "Incorrect header length: %u\n", r); r = -EBADMSG; goto nci_read_exit; } *skb = alloc_skb(NCI_CTRL_HDR_SIZE + header.plen, GFP_KERNEL); if (*skb == NULL) { r = -ENOMEM; goto nci_read_exit; } skb_put_data(*skb, (void *)&header, NCI_CTRL_HDR_SIZE); if (!header.plen) return 0; r = i2c_master_recv(client, skb_put(*skb, header.plen), header.plen); if (r < 0) { goto nci_read_exit_free_skb; } else if (r != header.plen) { nfc_err(&client->dev, "Invalid frame payload length: %u (expected %u)\n", r, header.plen); r = -EBADMSG; goto nci_read_exit_free_skb; } return 0; nci_read_exit_free_skb: kfree_skb(*skb); nci_read_exit: return r; } static irqreturn_t nxp_nci_i2c_irq_thread_fn(int irq, void *phy_id) { struct nxp_nci_i2c_phy *phy = phy_id; struct i2c_client *client; struct nxp_nci_info *info; struct sk_buff *skb = NULL; int r = 0; if (!phy || !phy->ndev) goto exit_irq_none; client = phy->i2c_dev; if (!client || irq != client->irq) goto exit_irq_none; info = nci_get_drvdata(phy->ndev); if (!info) goto exit_irq_none; mutex_lock(&info->info_lock); if (phy->hard_fault != 0) goto exit_irq_handled; switch (info->mode) { case NXP_NCI_MODE_NCI: r = nxp_nci_i2c_nci_read(phy, &skb); break; case NXP_NCI_MODE_FW: r = nxp_nci_i2c_fw_read(phy, &skb); break; case NXP_NCI_MODE_COLD: r = -EREMOTEIO; break; } if (r == -EREMOTEIO) { phy->hard_fault = r; if (info->mode == NXP_NCI_MODE_FW) nxp_nci_fw_recv_frame(phy->ndev, NULL); } if (r < 0) { nfc_err(&client->dev, "Read failed with error %d\n", r); goto exit_irq_handled; } switch (info->mode) { case NXP_NCI_MODE_NCI: nci_recv_frame(phy->ndev, skb); break; case NXP_NCI_MODE_FW: nxp_nci_fw_recv_frame(phy->ndev, skb); break; case NXP_NCI_MODE_COLD: break; } exit_irq_handled: mutex_unlock(&info->info_lock); return IRQ_HANDLED; exit_irq_none: WARN_ON_ONCE(1); return IRQ_NONE; } static const struct acpi_gpio_params firmware_gpios = { 1, 0, false }; static const struct acpi_gpio_params enable_gpios = { 2, 0, false }; static const struct acpi_gpio_mapping acpi_nxp_nci_gpios[] = { { "enable-gpios", &enable_gpios, 1 }, { "firmware-gpios", &firmware_gpios, 1 }, { } }; static int nxp_nci_i2c_probe(struct i2c_client *client) { struct device *dev = &client->dev; struct nxp_nci_i2c_phy *phy; int r; if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { nfc_err(&client->dev, "Need I2C_FUNC_I2C\n"); return -ENODEV; } phy = devm_kzalloc(&client->dev, sizeof(struct nxp_nci_i2c_phy), GFP_KERNEL); if (!phy) return -ENOMEM; phy->i2c_dev = client; i2c_set_clientdata(client, phy); r = devm_acpi_dev_add_driver_gpios(dev, acpi_nxp_nci_gpios); if (r) dev_dbg(dev, "Unable to add GPIO mapping table\n"); phy->gpiod_en = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW); if (IS_ERR(phy->gpiod_en)) { nfc_err(dev, "Failed to get EN gpio\n"); return PTR_ERR(phy->gpiod_en); } phy->gpiod_fw = devm_gpiod_get_optional(dev, "firmware", GPIOD_OUT_LOW); if (IS_ERR(phy->gpiod_fw)) { nfc_err(dev, "Failed to get FW gpio\n"); return PTR_ERR(phy->gpiod_fw); } r = nxp_nci_probe(phy, &client->dev, &i2c_phy_ops, NXP_NCI_I2C_MAX_PAYLOAD, &phy->ndev); if (r < 0) return r; r = request_threaded_irq(client->irq, NULL, nxp_nci_i2c_irq_thread_fn, IRQF_TRIGGER_RISING | IRQF_ONESHOT, NXP_NCI_I2C_DRIVER_NAME, phy); if (r < 0) nfc_err(&client->dev, "Unable to register IRQ handler\n"); return r; } static void nxp_nci_i2c_remove(struct i2c_client *client) { struct nxp_nci_i2c_phy *phy = i2c_get_clientdata(client); nxp_nci_remove(phy->ndev); free_irq(client->irq, phy); } static const struct i2c_device_id nxp_nci_i2c_id_table[] = { {"nxp-nci_i2c", 0}, {} }; MODULE_DEVICE_TABLE(i2c, nxp_nci_i2c_id_table); static const struct of_device_id of_nxp_nci_i2c_match[] = { { .compatible = "nxp,nxp-nci-i2c", }, {} }; MODULE_DEVICE_TABLE(of, of_nxp_nci_i2c_match); #ifdef CONFIG_ACPI static const struct acpi_device_id acpi_id[] = { { "NXP1001" }, { "NXP1002" }, { "NXP7471" }, { } }; MODULE_DEVICE_TABLE(acpi, acpi_id); #endif static struct i2c_driver nxp_nci_i2c_driver = { .driver = { .name = NXP_NCI_I2C_DRIVER_NAME, .acpi_match_table = ACPI_PTR(acpi_id), .of_match_table = of_nxp_nci_i2c_match, }, .probe = nxp_nci_i2c_probe, .id_table = nxp_nci_i2c_id_table, .remove = nxp_nci_i2c_remove, }; module_i2c_driver(nxp_nci_i2c_driver); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("I2C driver for NXP NCI NFC controllers"); MODULE_AUTHOR("Clément Perrochaud <[email protected]>"); MODULE_AUTHOR("Oleg Zhurakivskyy <[email protected]>");
linux-master
drivers/nfc/nxp-nci/i2c.c
// SPDX-License-Identifier: GPL-2.0-only /* * Generic driver for NXP NCI NFC chips * * Copyright (C) 2014 NXP Semiconductors All rights reserved. * * Authors: Clément Perrochaud <[email protected]> * * Derived from PN544 device driver: * Copyright (C) 2012 Intel Corporation. All rights reserved. */ #include <linux/delay.h> #include <linux/module.h> #include <linux/nfc.h> #include <net/nfc/nci_core.h> #include "nxp-nci.h" #define NXP_NCI_HDR_LEN 4 #define NXP_NCI_NFC_PROTOCOLS (NFC_PROTO_JEWEL_MASK | \ NFC_PROTO_MIFARE_MASK | \ NFC_PROTO_FELICA_MASK | \ NFC_PROTO_ISO14443_MASK | \ NFC_PROTO_ISO14443_B_MASK | \ NFC_PROTO_NFC_DEP_MASK) #define NXP_NCI_RF_PLL_UNLOCKED_NTF nci_opcode_pack(NCI_GID_RF_MGMT, 0x21) #define NXP_NCI_RF_TXLDO_ERROR_NTF nci_opcode_pack(NCI_GID_RF_MGMT, 0x23) static int nxp_nci_open(struct nci_dev *ndev) { struct nxp_nci_info *info = nci_get_drvdata(ndev); int r = 0; mutex_lock(&info->info_lock); if (info->mode != NXP_NCI_MODE_COLD) { r = -EBUSY; goto open_exit; } if (info->phy_ops->set_mode) r = info->phy_ops->set_mode(info->phy_id, NXP_NCI_MODE_NCI); info->mode = NXP_NCI_MODE_NCI; open_exit: mutex_unlock(&info->info_lock); return r; } static int nxp_nci_close(struct nci_dev *ndev) { struct nxp_nci_info *info = nci_get_drvdata(ndev); int r = 0; mutex_lock(&info->info_lock); if (info->phy_ops->set_mode) r = info->phy_ops->set_mode(info->phy_id, NXP_NCI_MODE_COLD); info->mode = NXP_NCI_MODE_COLD; mutex_unlock(&info->info_lock); return r; } static int nxp_nci_send(struct nci_dev *ndev, struct sk_buff *skb) { struct nxp_nci_info *info = nci_get_drvdata(ndev); int r; if (!info->phy_ops->write) { kfree_skb(skb); return -EOPNOTSUPP; } if (info->mode != NXP_NCI_MODE_NCI) { kfree_skb(skb); return -EINVAL; } r = info->phy_ops->write(info->phy_id, skb); if (r < 0) { kfree_skb(skb); return r; } consume_skb(skb); return 0; } static int nxp_nci_rf_pll_unlocked_ntf(struct nci_dev *ndev, struct sk_buff *skb) { nfc_err(&ndev->nfc_dev->dev, "PLL didn't lock. Missing or unstable clock?\n"); return 0; } static int nxp_nci_rf_txldo_error_ntf(struct nci_dev *ndev, struct sk_buff *skb) { nfc_err(&ndev->nfc_dev->dev, "RF transmitter couldn't start. Bad power and/or configuration?\n"); return 0; } static const struct nci_driver_ops nxp_nci_core_ops[] = { { .opcode = NXP_NCI_RF_PLL_UNLOCKED_NTF, .ntf = nxp_nci_rf_pll_unlocked_ntf, }, { .opcode = NXP_NCI_RF_TXLDO_ERROR_NTF, .ntf = nxp_nci_rf_txldo_error_ntf, }, }; static const struct nci_ops nxp_nci_ops = { .open = nxp_nci_open, .close = nxp_nci_close, .send = nxp_nci_send, .fw_download = nxp_nci_fw_download, .core_ops = nxp_nci_core_ops, .n_core_ops = ARRAY_SIZE(nxp_nci_core_ops), }; int nxp_nci_probe(void *phy_id, struct device *pdev, const struct nxp_nci_phy_ops *phy_ops, unsigned int max_payload, struct nci_dev **ndev) { struct nxp_nci_info *info; int r; info = devm_kzalloc(pdev, sizeof(struct nxp_nci_info), GFP_KERNEL); if (!info) return -ENOMEM; info->phy_id = phy_id; info->pdev = pdev; info->phy_ops = phy_ops; info->max_payload = max_payload; INIT_WORK(&info->fw_info.work, nxp_nci_fw_work); init_completion(&info->fw_info.cmd_completion); mutex_init(&info->info_lock); if (info->phy_ops->set_mode) { r = info->phy_ops->set_mode(info->phy_id, NXP_NCI_MODE_COLD); if (r < 0) return r; } info->mode = NXP_NCI_MODE_COLD; info->ndev = nci_allocate_device(&nxp_nci_ops, NXP_NCI_NFC_PROTOCOLS, NXP_NCI_HDR_LEN, 0); if (!info->ndev) return -ENOMEM; nci_set_parent_dev(info->ndev, pdev); nci_set_drvdata(info->ndev, info); r = nci_register_device(info->ndev); if (r < 0) { nci_free_device(info->ndev); return r; } *ndev = info->ndev; return r; } EXPORT_SYMBOL(nxp_nci_probe); void nxp_nci_remove(struct nci_dev *ndev) { struct nxp_nci_info *info = nci_get_drvdata(ndev); if (info->mode == NXP_NCI_MODE_FW) nxp_nci_fw_work_complete(info, -ESHUTDOWN); cancel_work_sync(&info->fw_info.work); mutex_lock(&info->info_lock); if (info->phy_ops->set_mode) info->phy_ops->set_mode(info->phy_id, NXP_NCI_MODE_COLD); nci_unregister_device(ndev); nci_free_device(ndev); mutex_unlock(&info->info_lock); } EXPORT_SYMBOL(nxp_nci_remove); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("NXP NCI NFC driver"); MODULE_AUTHOR("Clément Perrochaud <[email protected]>");
linux-master
drivers/nfc/nxp-nci/core.c
// SPDX-License-Identifier: GPL-2.0-only /* * Low Level Transport (NDLC) Driver for STMicroelectronics NFC Chip * * Copyright (C) 2014-2015 STMicroelectronics SAS. All rights reserved. */ #include <linux/sched.h> #include <net/nfc/nci_core.h> #include "st-nci.h" #define NDLC_TIMER_T1 100 #define NDLC_TIMER_T1_WAIT 400 #define NDLC_TIMER_T2 1200 #define PCB_TYPE_DATAFRAME 0x80 #define PCB_TYPE_SUPERVISOR 0xc0 #define PCB_TYPE_MASK PCB_TYPE_SUPERVISOR #define PCB_SYNC_ACK 0x20 #define PCB_SYNC_NACK 0x10 #define PCB_SYNC_WAIT 0x30 #define PCB_SYNC_NOINFO 0x00 #define PCB_SYNC_MASK PCB_SYNC_WAIT #define PCB_DATAFRAME_RETRANSMIT_YES 0x00 #define PCB_DATAFRAME_RETRANSMIT_NO 0x04 #define PCB_DATAFRAME_RETRANSMIT_MASK PCB_DATAFRAME_RETRANSMIT_NO #define PCB_SUPERVISOR_RETRANSMIT_YES 0x00 #define PCB_SUPERVISOR_RETRANSMIT_NO 0x02 #define PCB_SUPERVISOR_RETRANSMIT_MASK PCB_SUPERVISOR_RETRANSMIT_NO #define PCB_FRAME_CRC_INFO_PRESENT 0x08 #define PCB_FRAME_CRC_INFO_NOTPRESENT 0x00 #define PCB_FRAME_CRC_INFO_MASK PCB_FRAME_CRC_INFO_PRESENT #define NDLC_DUMP_SKB(info, skb) \ do { \ pr_debug("%s:\n", info); \ print_hex_dump(KERN_DEBUG, "ndlc: ", DUMP_PREFIX_OFFSET, \ 16, 1, skb->data, skb->len, 0); \ } while (0) int ndlc_open(struct llt_ndlc *ndlc) { /* toggle reset pin */ ndlc->ops->enable(ndlc->phy_id); ndlc->powered = 1; return 0; } EXPORT_SYMBOL(ndlc_open); void ndlc_close(struct llt_ndlc *ndlc) { struct nci_mode_set_cmd cmd; cmd.cmd_type = ST_NCI_SET_NFC_MODE; cmd.mode = 0; /* toggle reset pin */ ndlc->ops->enable(ndlc->phy_id); nci_prop_cmd(ndlc->ndev, ST_NCI_CORE_PROP, sizeof(struct nci_mode_set_cmd), (__u8 *)&cmd); ndlc->powered = 0; ndlc->ops->disable(ndlc->phy_id); } EXPORT_SYMBOL(ndlc_close); int ndlc_send(struct llt_ndlc *ndlc, struct sk_buff *skb) { /* add ndlc header */ u8 pcb = PCB_TYPE_DATAFRAME | PCB_DATAFRAME_RETRANSMIT_NO | PCB_FRAME_CRC_INFO_NOTPRESENT; *(u8 *)skb_push(skb, 1) = pcb; skb_queue_tail(&ndlc->send_q, skb); schedule_work(&ndlc->sm_work); return 0; } EXPORT_SYMBOL(ndlc_send); static void llt_ndlc_send_queue(struct llt_ndlc *ndlc) { struct sk_buff *skb; int r; unsigned long time_sent; if (ndlc->send_q.qlen) pr_debug("sendQlen=%d unackQlen=%d\n", ndlc->send_q.qlen, ndlc->ack_pending_q.qlen); while (ndlc->send_q.qlen) { skb = skb_dequeue(&ndlc->send_q); NDLC_DUMP_SKB("ndlc frame written", skb); r = ndlc->ops->write(ndlc->phy_id, skb); if (r < 0) { ndlc->hard_fault = r; break; } time_sent = jiffies; *(unsigned long *)skb->cb = time_sent; skb_queue_tail(&ndlc->ack_pending_q, skb); /* start timer t1 for ndlc aknowledge */ ndlc->t1_active = true; mod_timer(&ndlc->t1_timer, time_sent + msecs_to_jiffies(NDLC_TIMER_T1)); /* start timer t2 for chip availability */ ndlc->t2_active = true; mod_timer(&ndlc->t2_timer, time_sent + msecs_to_jiffies(NDLC_TIMER_T2)); } } static void llt_ndlc_requeue_data_pending(struct llt_ndlc *ndlc) { struct sk_buff *skb; u8 pcb; while ((skb = skb_dequeue_tail(&ndlc->ack_pending_q))) { pcb = skb->data[0]; switch (pcb & PCB_TYPE_MASK) { case PCB_TYPE_SUPERVISOR: skb->data[0] = (pcb & ~PCB_SUPERVISOR_RETRANSMIT_MASK) | PCB_SUPERVISOR_RETRANSMIT_YES; break; case PCB_TYPE_DATAFRAME: skb->data[0] = (pcb & ~PCB_DATAFRAME_RETRANSMIT_MASK) | PCB_DATAFRAME_RETRANSMIT_YES; break; default: pr_err("UNKNOWN Packet Control Byte=%d\n", pcb); kfree_skb(skb); continue; } skb_queue_head(&ndlc->send_q, skb); } } static void llt_ndlc_rcv_queue(struct llt_ndlc *ndlc) { struct sk_buff *skb; u8 pcb; unsigned long time_sent; if (ndlc->rcv_q.qlen) pr_debug("rcvQlen=%d\n", ndlc->rcv_q.qlen); while ((skb = skb_dequeue(&ndlc->rcv_q)) != NULL) { pcb = skb->data[0]; skb_pull(skb, 1); if ((pcb & PCB_TYPE_MASK) == PCB_TYPE_SUPERVISOR) { switch (pcb & PCB_SYNC_MASK) { case PCB_SYNC_ACK: skb = skb_dequeue(&ndlc->ack_pending_q); kfree_skb(skb); del_timer_sync(&ndlc->t1_timer); del_timer_sync(&ndlc->t2_timer); ndlc->t2_active = false; ndlc->t1_active = false; break; case PCB_SYNC_NACK: llt_ndlc_requeue_data_pending(ndlc); llt_ndlc_send_queue(ndlc); /* start timer t1 for ndlc aknowledge */ time_sent = jiffies; ndlc->t1_active = true; mod_timer(&ndlc->t1_timer, time_sent + msecs_to_jiffies(NDLC_TIMER_T1)); break; case PCB_SYNC_WAIT: time_sent = jiffies; ndlc->t1_active = true; mod_timer(&ndlc->t1_timer, time_sent + msecs_to_jiffies(NDLC_TIMER_T1_WAIT)); break; default: kfree_skb(skb); break; } } else if ((pcb & PCB_TYPE_MASK) == PCB_TYPE_DATAFRAME) { nci_recv_frame(ndlc->ndev, skb); } else { kfree_skb(skb); } } } static void llt_ndlc_sm_work(struct work_struct *work) { struct llt_ndlc *ndlc = container_of(work, struct llt_ndlc, sm_work); llt_ndlc_send_queue(ndlc); llt_ndlc_rcv_queue(ndlc); if (ndlc->t1_active && timer_pending(&ndlc->t1_timer) == 0) { pr_debug ("Handle T1(recv SUPERVISOR) elapsed (T1 now inactive)\n"); ndlc->t1_active = false; llt_ndlc_requeue_data_pending(ndlc); llt_ndlc_send_queue(ndlc); } if (ndlc->t2_active && timer_pending(&ndlc->t2_timer) == 0) { pr_debug("Handle T2(recv DATA) elapsed (T2 now inactive)\n"); ndlc->t2_active = false; ndlc->t1_active = false; del_timer_sync(&ndlc->t1_timer); del_timer_sync(&ndlc->t2_timer); ndlc_close(ndlc); ndlc->hard_fault = -EREMOTEIO; } } void ndlc_recv(struct llt_ndlc *ndlc, struct sk_buff *skb) { if (skb == NULL) { pr_err("NULL Frame -> link is dead\n"); ndlc->hard_fault = -EREMOTEIO; ndlc_close(ndlc); } else { NDLC_DUMP_SKB("incoming frame", skb); skb_queue_tail(&ndlc->rcv_q, skb); } schedule_work(&ndlc->sm_work); } EXPORT_SYMBOL(ndlc_recv); static void ndlc_t1_timeout(struct timer_list *t) { struct llt_ndlc *ndlc = from_timer(ndlc, t, t1_timer); schedule_work(&ndlc->sm_work); } static void ndlc_t2_timeout(struct timer_list *t) { struct llt_ndlc *ndlc = from_timer(ndlc, t, t2_timer); schedule_work(&ndlc->sm_work); } int ndlc_probe(void *phy_id, const struct nfc_phy_ops *phy_ops, struct device *dev, int phy_headroom, int phy_tailroom, struct llt_ndlc **ndlc_id, struct st_nci_se_status *se_status) { struct llt_ndlc *ndlc; ndlc = devm_kzalloc(dev, sizeof(struct llt_ndlc), GFP_KERNEL); if (!ndlc) return -ENOMEM; ndlc->ops = phy_ops; ndlc->phy_id = phy_id; ndlc->dev = dev; ndlc->powered = 0; *ndlc_id = ndlc; /* initialize timers */ timer_setup(&ndlc->t1_timer, ndlc_t1_timeout, 0); timer_setup(&ndlc->t2_timer, ndlc_t2_timeout, 0); skb_queue_head_init(&ndlc->rcv_q); skb_queue_head_init(&ndlc->send_q); skb_queue_head_init(&ndlc->ack_pending_q); INIT_WORK(&ndlc->sm_work, llt_ndlc_sm_work); return st_nci_probe(ndlc, phy_headroom, phy_tailroom, se_status); } EXPORT_SYMBOL(ndlc_probe); void ndlc_remove(struct llt_ndlc *ndlc) { /* cancel timers */ del_timer_sync(&ndlc->t1_timer); del_timer_sync(&ndlc->t2_timer); ndlc->t2_active = false; ndlc->t1_active = false; /* cancel work */ cancel_work_sync(&ndlc->sm_work); st_nci_remove(ndlc->ndev); skb_queue_purge(&ndlc->rcv_q); skb_queue_purge(&ndlc->send_q); } EXPORT_SYMBOL(ndlc_remove);
linux-master
drivers/nfc/st-nci/ndlc.c
// SPDX-License-Identifier: GPL-2.0-only /* * SPI Link Layer for ST NCI based Driver * Copyright (C) 2014-2015 STMicroelectronics SAS. All rights reserved. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/spi/spi.h> #include <linux/gpio/consumer.h> #include <linux/acpi.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/nfc.h> #include <linux/of.h> #include <net/nfc/nci.h> #include "st-nci.h" #define DRIVER_DESC "NCI NFC driver for ST_NCI" /* ndlc header */ #define ST_NCI_FRAME_HEADROOM 1 #define ST_NCI_FRAME_TAILROOM 0 #define ST_NCI_SPI_MIN_SIZE 4 /* PCB(1) + NCI Packet header(3) */ #define ST_NCI_SPI_MAX_SIZE 250 /* req 4.2.1 */ #define ST_NCI_DRIVER_NAME "st_nci" #define ST_NCI_SPI_DRIVER_NAME "st_nci_spi" struct st_nci_spi_phy { struct spi_device *spi_dev; struct llt_ndlc *ndlc; bool irq_active; struct gpio_desc *gpiod_reset; struct st_nci_se_status se_status; }; static int st_nci_spi_enable(void *phy_id) { struct st_nci_spi_phy *phy = phy_id; gpiod_set_value(phy->gpiod_reset, 0); usleep_range(10000, 15000); gpiod_set_value(phy->gpiod_reset, 1); usleep_range(80000, 85000); if (phy->ndlc->powered == 0 && phy->irq_active == 0) { enable_irq(phy->spi_dev->irq); phy->irq_active = true; } return 0; } static void st_nci_spi_disable(void *phy_id) { struct st_nci_spi_phy *phy = phy_id; disable_irq_nosync(phy->spi_dev->irq); phy->irq_active = false; } /* * Writing a frame must not return the number of written bytes. * It must return either zero for success, or <0 for error. * In addition, it must not alter the skb */ static int st_nci_spi_write(void *phy_id, struct sk_buff *skb) { int r; struct st_nci_spi_phy *phy = phy_id; struct spi_device *dev = phy->spi_dev; struct sk_buff *skb_rx; u8 buf[ST_NCI_SPI_MAX_SIZE + NCI_DATA_HDR_SIZE + ST_NCI_FRAME_HEADROOM + ST_NCI_FRAME_TAILROOM]; struct spi_transfer spi_xfer = { .tx_buf = skb->data, .rx_buf = buf, .len = skb->len, }; if (phy->ndlc->hard_fault != 0) return phy->ndlc->hard_fault; r = spi_sync_transfer(dev, &spi_xfer, 1); /* * We may have received some valuable data on miso line. * Send them back in the ndlc state machine. */ if (!r) { skb_rx = alloc_skb(skb->len, GFP_KERNEL); if (!skb_rx) return -ENOMEM; skb_put(skb_rx, skb->len); memcpy(skb_rx->data, buf, skb->len); ndlc_recv(phy->ndlc, skb_rx); } return r; } /* * Reads an ndlc frame and returns it in a newly allocated sk_buff. * returns: * 0 : if received frame is complete * -EREMOTEIO : i2c read error (fatal) * -EBADMSG : frame was incorrect and discarded * -ENOMEM : cannot allocate skb, frame dropped */ static int st_nci_spi_read(struct st_nci_spi_phy *phy, struct sk_buff **skb) { int r; u8 len; u8 buf[ST_NCI_SPI_MAX_SIZE]; struct spi_device *dev = phy->spi_dev; struct spi_transfer spi_xfer = { .rx_buf = buf, .len = ST_NCI_SPI_MIN_SIZE, }; r = spi_sync_transfer(dev, &spi_xfer, 1); if (r < 0) return -EREMOTEIO; len = be16_to_cpu(*(__be16 *) (buf + 2)); if (len > ST_NCI_SPI_MAX_SIZE) { nfc_err(&dev->dev, "invalid frame len\n"); phy->ndlc->hard_fault = 1; return -EBADMSG; } *skb = alloc_skb(ST_NCI_SPI_MIN_SIZE + len, GFP_KERNEL); if (*skb == NULL) return -ENOMEM; skb_reserve(*skb, ST_NCI_SPI_MIN_SIZE); skb_put(*skb, ST_NCI_SPI_MIN_SIZE); memcpy((*skb)->data, buf, ST_NCI_SPI_MIN_SIZE); if (!len) return 0; spi_xfer.len = len; r = spi_sync_transfer(dev, &spi_xfer, 1); if (r < 0) { kfree_skb(*skb); return -EREMOTEIO; } skb_put(*skb, len); memcpy((*skb)->data + ST_NCI_SPI_MIN_SIZE, buf, len); return 0; } /* * Reads an ndlc frame from the chip. * * On ST21NFCB, IRQ goes in idle state when read starts. */ static irqreturn_t st_nci_irq_thread_fn(int irq, void *phy_id) { struct st_nci_spi_phy *phy = phy_id; struct sk_buff *skb = NULL; int r; if (!phy || !phy->ndlc || irq != phy->spi_dev->irq) { WARN_ON_ONCE(1); return IRQ_NONE; } if (phy->ndlc->hard_fault) return IRQ_HANDLED; if (!phy->ndlc->powered) { st_nci_spi_disable(phy); return IRQ_HANDLED; } r = st_nci_spi_read(phy, &skb); if (r == -EREMOTEIO || r == -ENOMEM || r == -EBADMSG) return IRQ_HANDLED; ndlc_recv(phy->ndlc, skb); return IRQ_HANDLED; } static const struct nfc_phy_ops spi_phy_ops = { .write = st_nci_spi_write, .enable = st_nci_spi_enable, .disable = st_nci_spi_disable, }; static const struct acpi_gpio_params reset_gpios = { 1, 0, false }; static const struct acpi_gpio_mapping acpi_st_nci_gpios[] = { { "reset-gpios", &reset_gpios, 1 }, {}, }; static int st_nci_spi_probe(struct spi_device *dev) { struct st_nci_spi_phy *phy; int r; /* Check SPI platform functionnalities */ if (!dev) { pr_debug("%s: dev is NULL. Device is not accessible.\n", __func__); return -ENODEV; } phy = devm_kzalloc(&dev->dev, sizeof(struct st_nci_spi_phy), GFP_KERNEL); if (!phy) return -ENOMEM; phy->spi_dev = dev; spi_set_drvdata(dev, phy); r = devm_acpi_dev_add_driver_gpios(&dev->dev, acpi_st_nci_gpios); if (r) dev_dbg(&dev->dev, "Unable to add GPIO mapping table\n"); /* Get RESET GPIO */ phy->gpiod_reset = devm_gpiod_get(&dev->dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(phy->gpiod_reset)) { nfc_err(&dev->dev, "Unable to get RESET GPIO\n"); return PTR_ERR(phy->gpiod_reset); } phy->se_status.is_ese_present = device_property_read_bool(&dev->dev, "ese-present"); phy->se_status.is_uicc_present = device_property_read_bool(&dev->dev, "uicc-present"); r = ndlc_probe(phy, &spi_phy_ops, &dev->dev, ST_NCI_FRAME_HEADROOM, ST_NCI_FRAME_TAILROOM, &phy->ndlc, &phy->se_status); if (r < 0) { nfc_err(&dev->dev, "Unable to register ndlc layer\n"); return r; } phy->irq_active = true; r = devm_request_threaded_irq(&dev->dev, dev->irq, NULL, st_nci_irq_thread_fn, IRQF_ONESHOT, ST_NCI_SPI_DRIVER_NAME, phy); if (r < 0) nfc_err(&dev->dev, "Unable to register IRQ handler\n"); return r; } static void st_nci_spi_remove(struct spi_device *dev) { struct st_nci_spi_phy *phy = spi_get_drvdata(dev); ndlc_remove(phy->ndlc); } static struct spi_device_id st_nci_spi_id_table[] = { {ST_NCI_SPI_DRIVER_NAME, 0}, {"st21nfcb-spi", 0}, {} }; MODULE_DEVICE_TABLE(spi, st_nci_spi_id_table); static const struct acpi_device_id st_nci_spi_acpi_match[] __maybe_unused = { {"SMO2101", 0}, {} }; MODULE_DEVICE_TABLE(acpi, st_nci_spi_acpi_match); static const struct of_device_id of_st_nci_spi_match[] __maybe_unused = { { .compatible = "st,st21nfcb-spi", }, {} }; MODULE_DEVICE_TABLE(of, of_st_nci_spi_match); static struct spi_driver st_nci_spi_driver = { .driver = { .name = ST_NCI_SPI_DRIVER_NAME, .of_match_table = of_match_ptr(of_st_nci_spi_match), .acpi_match_table = ACPI_PTR(st_nci_spi_acpi_match), }, .probe = st_nci_spi_probe, .id_table = st_nci_spi_id_table, .remove = st_nci_spi_remove, }; module_spi_driver(st_nci_spi_driver); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION(DRIVER_DESC);
linux-master
drivers/nfc/st-nci/spi.c
// SPDX-License-Identifier: GPL-2.0-only /* * I2C Link Layer for ST NCI NFC controller familly based Driver * Copyright (C) 2014-2015 STMicroelectronics SAS. All rights reserved. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/i2c.h> #include <linux/gpio/consumer.h> #include <linux/acpi.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/nfc.h> #include <linux/of.h> #include "st-nci.h" #define DRIVER_DESC "NCI NFC driver for ST_NCI" /* ndlc header */ #define ST_NCI_FRAME_HEADROOM 1 #define ST_NCI_FRAME_TAILROOM 0 #define ST_NCI_I2C_MIN_SIZE 4 /* PCB(1) + NCI Packet header(3) */ #define ST_NCI_I2C_MAX_SIZE 250 /* req 4.2.1 */ #define ST_NCI_DRIVER_NAME "st_nci" #define ST_NCI_I2C_DRIVER_NAME "st_nci_i2c" struct st_nci_i2c_phy { struct i2c_client *i2c_dev; struct llt_ndlc *ndlc; bool irq_active; struct gpio_desc *gpiod_reset; struct st_nci_se_status se_status; }; static int st_nci_i2c_enable(void *phy_id) { struct st_nci_i2c_phy *phy = phy_id; gpiod_set_value(phy->gpiod_reset, 0); usleep_range(10000, 15000); gpiod_set_value(phy->gpiod_reset, 1); usleep_range(80000, 85000); if (phy->ndlc->powered == 0 && phy->irq_active == 0) { enable_irq(phy->i2c_dev->irq); phy->irq_active = true; } return 0; } static void st_nci_i2c_disable(void *phy_id) { struct st_nci_i2c_phy *phy = phy_id; disable_irq_nosync(phy->i2c_dev->irq); phy->irq_active = false; } /* * Writing a frame must not return the number of written bytes. * It must return either zero for success, or <0 for error. * In addition, it must not alter the skb */ static int st_nci_i2c_write(void *phy_id, struct sk_buff *skb) { int r; struct st_nci_i2c_phy *phy = phy_id; struct i2c_client *client = phy->i2c_dev; if (phy->ndlc->hard_fault != 0) return phy->ndlc->hard_fault; r = i2c_master_send(client, skb->data, skb->len); if (r < 0) { /* Retry, chip was in standby */ usleep_range(1000, 4000); r = i2c_master_send(client, skb->data, skb->len); } if (r >= 0) { if (r != skb->len) r = -EREMOTEIO; else r = 0; } return r; } /* * Reads an ndlc frame and returns it in a newly allocated sk_buff. * returns: * 0 : if received frame is complete * -EREMOTEIO : i2c read error (fatal) * -EBADMSG : frame was incorrect and discarded * -ENOMEM : cannot allocate skb, frame dropped */ static int st_nci_i2c_read(struct st_nci_i2c_phy *phy, struct sk_buff **skb) { int r; u8 len; u8 buf[ST_NCI_I2C_MAX_SIZE]; struct i2c_client *client = phy->i2c_dev; r = i2c_master_recv(client, buf, ST_NCI_I2C_MIN_SIZE); if (r < 0) { /* Retry, chip was in standby */ usleep_range(1000, 4000); r = i2c_master_recv(client, buf, ST_NCI_I2C_MIN_SIZE); } if (r != ST_NCI_I2C_MIN_SIZE) return -EREMOTEIO; len = be16_to_cpu(*(__be16 *) (buf + 2)); if (len > ST_NCI_I2C_MAX_SIZE) { nfc_err(&client->dev, "invalid frame len\n"); return -EBADMSG; } *skb = alloc_skb(ST_NCI_I2C_MIN_SIZE + len, GFP_KERNEL); if (*skb == NULL) return -ENOMEM; skb_reserve(*skb, ST_NCI_I2C_MIN_SIZE); skb_put(*skb, ST_NCI_I2C_MIN_SIZE); memcpy((*skb)->data, buf, ST_NCI_I2C_MIN_SIZE); if (!len) return 0; r = i2c_master_recv(client, buf, len); if (r != len) { kfree_skb(*skb); return -EREMOTEIO; } skb_put(*skb, len); memcpy((*skb)->data + ST_NCI_I2C_MIN_SIZE, buf, len); return 0; } /* * Reads an ndlc frame from the chip. * * On ST_NCI, IRQ goes in idle state when read starts. */ static irqreturn_t st_nci_irq_thread_fn(int irq, void *phy_id) { struct st_nci_i2c_phy *phy = phy_id; struct sk_buff *skb = NULL; int r; if (!phy || !phy->ndlc || irq != phy->i2c_dev->irq) { WARN_ON_ONCE(1); return IRQ_NONE; } if (phy->ndlc->hard_fault) return IRQ_HANDLED; if (!phy->ndlc->powered) { st_nci_i2c_disable(phy); return IRQ_HANDLED; } r = st_nci_i2c_read(phy, &skb); if (r == -EREMOTEIO || r == -ENOMEM || r == -EBADMSG) return IRQ_HANDLED; ndlc_recv(phy->ndlc, skb); return IRQ_HANDLED; } static const struct nfc_phy_ops i2c_phy_ops = { .write = st_nci_i2c_write, .enable = st_nci_i2c_enable, .disable = st_nci_i2c_disable, }; static const struct acpi_gpio_params reset_gpios = { 1, 0, false }; static const struct acpi_gpio_mapping acpi_st_nci_gpios[] = { { "reset-gpios", &reset_gpios, 1 }, {}, }; static int st_nci_i2c_probe(struct i2c_client *client) { struct device *dev = &client->dev; struct st_nci_i2c_phy *phy; int r; if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { nfc_err(&client->dev, "Need I2C_FUNC_I2C\n"); return -ENODEV; } phy = devm_kzalloc(dev, sizeof(struct st_nci_i2c_phy), GFP_KERNEL); if (!phy) return -ENOMEM; phy->i2c_dev = client; i2c_set_clientdata(client, phy); r = devm_acpi_dev_add_driver_gpios(dev, acpi_st_nci_gpios); if (r) dev_dbg(dev, "Unable to add GPIO mapping table\n"); /* Get RESET GPIO */ phy->gpiod_reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(phy->gpiod_reset)) { nfc_err(dev, "Unable to get RESET GPIO\n"); return -ENODEV; } phy->se_status.is_ese_present = device_property_read_bool(dev, "ese-present"); phy->se_status.is_uicc_present = device_property_read_bool(dev, "uicc-present"); r = ndlc_probe(phy, &i2c_phy_ops, &client->dev, ST_NCI_FRAME_HEADROOM, ST_NCI_FRAME_TAILROOM, &phy->ndlc, &phy->se_status); if (r < 0) { nfc_err(&client->dev, "Unable to register ndlc layer\n"); return r; } phy->irq_active = true; r = devm_request_threaded_irq(&client->dev, client->irq, NULL, st_nci_irq_thread_fn, IRQF_ONESHOT, ST_NCI_DRIVER_NAME, phy); if (r < 0) nfc_err(&client->dev, "Unable to register IRQ handler\n"); return r; } static void st_nci_i2c_remove(struct i2c_client *client) { struct st_nci_i2c_phy *phy = i2c_get_clientdata(client); ndlc_remove(phy->ndlc); } static const struct i2c_device_id st_nci_i2c_id_table[] = { {ST_NCI_DRIVER_NAME, 0}, {} }; MODULE_DEVICE_TABLE(i2c, st_nci_i2c_id_table); static const struct acpi_device_id st_nci_i2c_acpi_match[] __maybe_unused = { {"SMO2101"}, {"SMO2102"}, {} }; MODULE_DEVICE_TABLE(acpi, st_nci_i2c_acpi_match); static const struct of_device_id of_st_nci_i2c_match[] __maybe_unused = { { .compatible = "st,st21nfcb-i2c", }, { .compatible = "st,st21nfcb_i2c", }, { .compatible = "st,st21nfcc-i2c", }, {} }; MODULE_DEVICE_TABLE(of, of_st_nci_i2c_match); static struct i2c_driver st_nci_i2c_driver = { .driver = { .name = ST_NCI_I2C_DRIVER_NAME, .of_match_table = of_match_ptr(of_st_nci_i2c_match), .acpi_match_table = ACPI_PTR(st_nci_i2c_acpi_match), }, .probe = st_nci_i2c_probe, .id_table = st_nci_i2c_id_table, .remove = st_nci_i2c_remove, }; module_i2c_driver(st_nci_i2c_driver); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION(DRIVER_DESC);
linux-master
drivers/nfc/st-nci/i2c.c
// SPDX-License-Identifier: GPL-2.0-only /* * Proprietary commands extension for STMicroelectronics NFC NCI Chip * * Copyright (C) 2014-2015 STMicroelectronics SAS. All rights reserved. */ #include <net/genetlink.h> #include <linux/module.h> #include <linux/nfc.h> #include <linux/delay.h> #include <net/nfc/nci_core.h> #include "st-nci.h" #define ST_NCI_HCI_DM_GETDATA 0x10 #define ST_NCI_HCI_DM_PUTDATA 0x11 #define ST_NCI_HCI_DM_LOAD 0x12 #define ST_NCI_HCI_DM_GETINFO 0x13 #define ST_NCI_HCI_DM_FWUPD_START 0x14 #define ST_NCI_HCI_DM_FWUPD_STOP 0x15 #define ST_NCI_HCI_DM_UPDATE_AID 0x20 #define ST_NCI_HCI_DM_RESET 0x3e #define ST_NCI_HCI_DM_FIELD_GENERATOR 0x32 #define ST_NCI_HCI_DM_VDC_MEASUREMENT_VALUE 0x33 #define ST_NCI_HCI_DM_VDC_VALUE_COMPARISON 0x34 #define ST_NCI_FACTORY_MODE_ON 1 #define ST_NCI_FACTORY_MODE_OFF 0 #define ST_NCI_EVT_POST_DATA 0x02 struct get_param_data { u8 gate; u8 data; } __packed; static int st_nci_factory_mode(struct nfc_dev *dev, void *data, size_t data_len) { struct nci_dev *ndev = nfc_get_drvdata(dev); struct st_nci_info *info = nci_get_drvdata(ndev); if (data_len != 1) return -EINVAL; pr_debug("factory mode: %x\n", ((u8 *)data)[0]); switch (((u8 *)data)[0]) { case ST_NCI_FACTORY_MODE_ON: test_and_set_bit(ST_NCI_FACTORY_MODE, &info->flags); break; case ST_NCI_FACTORY_MODE_OFF: clear_bit(ST_NCI_FACTORY_MODE, &info->flags); break; default: return -EINVAL; } return 0; } static int st_nci_hci_clear_all_pipes(struct nfc_dev *dev, void *data, size_t data_len) { struct nci_dev *ndev = nfc_get_drvdata(dev); return nci_hci_clear_all_pipes(ndev); } static int st_nci_hci_dm_put_data(struct nfc_dev *dev, void *data, size_t data_len) { struct nci_dev *ndev = nfc_get_drvdata(dev); return nci_hci_send_cmd(ndev, ST_NCI_DEVICE_MGNT_GATE, ST_NCI_HCI_DM_PUTDATA, data, data_len, NULL); } static int st_nci_hci_dm_update_aid(struct nfc_dev *dev, void *data, size_t data_len) { struct nci_dev *ndev = nfc_get_drvdata(dev); return nci_hci_send_cmd(ndev, ST_NCI_DEVICE_MGNT_GATE, ST_NCI_HCI_DM_UPDATE_AID, data, data_len, NULL); } static int st_nci_hci_dm_get_info(struct nfc_dev *dev, void *data, size_t data_len) { int r; struct sk_buff *msg, *skb; struct nci_dev *ndev = nfc_get_drvdata(dev); r = nci_hci_send_cmd(ndev, ST_NCI_DEVICE_MGNT_GATE, ST_NCI_HCI_DM_GETINFO, data, data_len, &skb); if (r) return r; msg = nfc_vendor_cmd_alloc_reply_skb(dev, ST_NCI_VENDOR_OUI, HCI_DM_GET_INFO, skb->len); if (!msg) { r = -ENOMEM; goto free_skb; } if (nla_put(msg, NFC_ATTR_VENDOR_DATA, skb->len, skb->data)) { kfree_skb(msg); r = -ENOBUFS; goto free_skb; } r = nfc_vendor_cmd_reply(msg); free_skb: kfree_skb(skb); return r; } static int st_nci_hci_dm_get_data(struct nfc_dev *dev, void *data, size_t data_len) { int r; struct sk_buff *msg, *skb; struct nci_dev *ndev = nfc_get_drvdata(dev); r = nci_hci_send_cmd(ndev, ST_NCI_DEVICE_MGNT_GATE, ST_NCI_HCI_DM_GETDATA, data, data_len, &skb); if (r) return r; msg = nfc_vendor_cmd_alloc_reply_skb(dev, ST_NCI_VENDOR_OUI, HCI_DM_GET_DATA, skb->len); if (!msg) { r = -ENOMEM; goto free_skb; } if (nla_put(msg, NFC_ATTR_VENDOR_DATA, skb->len, skb->data)) { kfree_skb(msg); r = -ENOBUFS; goto free_skb; } r = nfc_vendor_cmd_reply(msg); free_skb: kfree_skb(skb); return r; } static int st_nci_hci_dm_fwupd_start(struct nfc_dev *dev, void *data, size_t data_len) { int r; struct nci_dev *ndev = nfc_get_drvdata(dev); dev->fw_download_in_progress = true; r = nci_hci_send_cmd(ndev, ST_NCI_DEVICE_MGNT_GATE, ST_NCI_HCI_DM_FWUPD_START, data, data_len, NULL); if (r) dev->fw_download_in_progress = false; return r; } static int st_nci_hci_dm_fwupd_end(struct nfc_dev *dev, void *data, size_t data_len) { struct nci_dev *ndev = nfc_get_drvdata(dev); return nci_hci_send_cmd(ndev, ST_NCI_DEVICE_MGNT_GATE, ST_NCI_HCI_DM_FWUPD_STOP, data, data_len, NULL); } static int st_nci_hci_dm_direct_load(struct nfc_dev *dev, void *data, size_t data_len) { struct nci_dev *ndev = nfc_get_drvdata(dev); if (dev->fw_download_in_progress) { dev->fw_download_in_progress = false; return nci_hci_send_cmd(ndev, ST_NCI_DEVICE_MGNT_GATE, ST_NCI_HCI_DM_LOAD, data, data_len, NULL); } return -EPROTO; } static int st_nci_hci_dm_reset(struct nfc_dev *dev, void *data, size_t data_len) { struct nci_dev *ndev = nfc_get_drvdata(dev); nci_hci_send_cmd(ndev, ST_NCI_DEVICE_MGNT_GATE, ST_NCI_HCI_DM_RESET, data, data_len, NULL); msleep(200); return 0; } static int st_nci_hci_get_param(struct nfc_dev *dev, void *data, size_t data_len) { int r; struct sk_buff *msg, *skb; struct nci_dev *ndev = nfc_get_drvdata(dev); struct get_param_data *param = (struct get_param_data *)data; if (data_len < sizeof(struct get_param_data)) return -EPROTO; r = nci_hci_get_param(ndev, param->gate, param->data, &skb); if (r) return r; msg = nfc_vendor_cmd_alloc_reply_skb(dev, ST_NCI_VENDOR_OUI, HCI_GET_PARAM, skb->len); if (!msg) { r = -ENOMEM; goto free_skb; } if (nla_put(msg, NFC_ATTR_VENDOR_DATA, skb->len, skb->data)) { kfree_skb(msg); r = -ENOBUFS; goto free_skb; } r = nfc_vendor_cmd_reply(msg); free_skb: kfree_skb(skb); return r; } static int st_nci_hci_dm_field_generator(struct nfc_dev *dev, void *data, size_t data_len) { struct nci_dev *ndev = nfc_get_drvdata(dev); return nci_hci_send_cmd(ndev, ST_NCI_DEVICE_MGNT_GATE, ST_NCI_HCI_DM_FIELD_GENERATOR, data, data_len, NULL); } static int st_nci_hci_dm_vdc_measurement_value(struct nfc_dev *dev, void *data, size_t data_len) { int r; struct sk_buff *msg, *skb; struct nci_dev *ndev = nfc_get_drvdata(dev); if (data_len != 4) return -EPROTO; r = nci_hci_send_cmd(ndev, ST_NCI_DEVICE_MGNT_GATE, ST_NCI_HCI_DM_VDC_MEASUREMENT_VALUE, data, data_len, &skb); if (r) return r; msg = nfc_vendor_cmd_alloc_reply_skb(dev, ST_NCI_VENDOR_OUI, HCI_DM_VDC_MEASUREMENT_VALUE, skb->len); if (!msg) { r = -ENOMEM; goto free_skb; } if (nla_put(msg, NFC_ATTR_VENDOR_DATA, skb->len, skb->data)) { kfree_skb(msg); r = -ENOBUFS; goto free_skb; } r = nfc_vendor_cmd_reply(msg); free_skb: kfree_skb(skb); return r; } static int st_nci_hci_dm_vdc_value_comparison(struct nfc_dev *dev, void *data, size_t data_len) { int r; struct sk_buff *msg, *skb; struct nci_dev *ndev = nfc_get_drvdata(dev); if (data_len != 2) return -EPROTO; r = nci_hci_send_cmd(ndev, ST_NCI_DEVICE_MGNT_GATE, ST_NCI_HCI_DM_VDC_VALUE_COMPARISON, data, data_len, &skb); if (r) return r; msg = nfc_vendor_cmd_alloc_reply_skb(dev, ST_NCI_VENDOR_OUI, HCI_DM_VDC_VALUE_COMPARISON, skb->len); if (!msg) { r = -ENOMEM; goto free_skb; } if (nla_put(msg, NFC_ATTR_VENDOR_DATA, skb->len, skb->data)) { kfree_skb(msg); r = -ENOBUFS; goto free_skb; } r = nfc_vendor_cmd_reply(msg); free_skb: kfree_skb(skb); return r; } static int st_nci_loopback(struct nfc_dev *dev, void *data, size_t data_len) { int r; struct sk_buff *msg, *skb; struct nci_dev *ndev = nfc_get_drvdata(dev); if (data_len <= 0) return -EPROTO; r = nci_nfcc_loopback(ndev, data, data_len, &skb); if (r < 0) return r; msg = nfc_vendor_cmd_alloc_reply_skb(dev, ST_NCI_VENDOR_OUI, LOOPBACK, skb->len); if (!msg) { r = -ENOMEM; goto free_skb; } if (nla_put(msg, NFC_ATTR_VENDOR_DATA, skb->len, skb->data)) { kfree_skb(msg); r = -ENOBUFS; goto free_skb; } r = nfc_vendor_cmd_reply(msg); free_skb: kfree_skb(skb); return r; } static int st_nci_manufacturer_specific(struct nfc_dev *dev, void *data, size_t data_len) { struct sk_buff *msg; struct nci_dev *ndev = nfc_get_drvdata(dev); msg = nfc_vendor_cmd_alloc_reply_skb(dev, ST_NCI_VENDOR_OUI, MANUFACTURER_SPECIFIC, sizeof(ndev->manufact_specific_info)); if (!msg) return -ENOMEM; if (nla_put(msg, NFC_ATTR_VENDOR_DATA, sizeof(ndev->manufact_specific_info), &ndev->manufact_specific_info)) { kfree_skb(msg); return -ENOBUFS; } return nfc_vendor_cmd_reply(msg); } static const struct nfc_vendor_cmd st_nci_vendor_cmds[] = { { .vendor_id = ST_NCI_VENDOR_OUI, .subcmd = FACTORY_MODE, .doit = st_nci_factory_mode, }, { .vendor_id = ST_NCI_VENDOR_OUI, .subcmd = HCI_CLEAR_ALL_PIPES, .doit = st_nci_hci_clear_all_pipes, }, { .vendor_id = ST_NCI_VENDOR_OUI, .subcmd = HCI_DM_PUT_DATA, .doit = st_nci_hci_dm_put_data, }, { .vendor_id = ST_NCI_VENDOR_OUI, .subcmd = HCI_DM_UPDATE_AID, .doit = st_nci_hci_dm_update_aid, }, { .vendor_id = ST_NCI_VENDOR_OUI, .subcmd = HCI_DM_GET_INFO, .doit = st_nci_hci_dm_get_info, }, { .vendor_id = ST_NCI_VENDOR_OUI, .subcmd = HCI_DM_GET_DATA, .doit = st_nci_hci_dm_get_data, }, { .vendor_id = ST_NCI_VENDOR_OUI, .subcmd = HCI_DM_DIRECT_LOAD, .doit = st_nci_hci_dm_direct_load, }, { .vendor_id = ST_NCI_VENDOR_OUI, .subcmd = HCI_DM_RESET, .doit = st_nci_hci_dm_reset, }, { .vendor_id = ST_NCI_VENDOR_OUI, .subcmd = HCI_GET_PARAM, .doit = st_nci_hci_get_param, }, { .vendor_id = ST_NCI_VENDOR_OUI, .subcmd = HCI_DM_FIELD_GENERATOR, .doit = st_nci_hci_dm_field_generator, }, { .vendor_id = ST_NCI_VENDOR_OUI, .subcmd = HCI_DM_FWUPD_START, .doit = st_nci_hci_dm_fwupd_start, }, { .vendor_id = ST_NCI_VENDOR_OUI, .subcmd = HCI_DM_FWUPD_END, .doit = st_nci_hci_dm_fwupd_end, }, { .vendor_id = ST_NCI_VENDOR_OUI, .subcmd = LOOPBACK, .doit = st_nci_loopback, }, { .vendor_id = ST_NCI_VENDOR_OUI, .subcmd = HCI_DM_VDC_MEASUREMENT_VALUE, .doit = st_nci_hci_dm_vdc_measurement_value, }, { .vendor_id = ST_NCI_VENDOR_OUI, .subcmd = HCI_DM_VDC_VALUE_COMPARISON, .doit = st_nci_hci_dm_vdc_value_comparison, }, { .vendor_id = ST_NCI_VENDOR_OUI, .subcmd = MANUFACTURER_SPECIFIC, .doit = st_nci_manufacturer_specific, }, }; int st_nci_vendor_cmds_init(struct nci_dev *ndev) { return nci_set_vendor_cmds(ndev, st_nci_vendor_cmds, sizeof(st_nci_vendor_cmds)); } EXPORT_SYMBOL(st_nci_vendor_cmds_init);
linux-master
drivers/nfc/st-nci/vendor_cmds.c
// SPDX-License-Identifier: GPL-2.0-only /* * NCI based Driver for STMicroelectronics NFC Chip * * Copyright (C) 2014-2015 STMicroelectronics SAS. All rights reserved. */ #include <linux/module.h> #include <linux/nfc.h> #include <net/nfc/nci.h> #include <net/nfc/nci_core.h> #include "st-nci.h" #define DRIVER_DESC "NCI NFC driver for ST_NCI" #define ST_NCI1_X_PROPRIETARY_ISO15693 0x83 static int st_nci_init(struct nci_dev *ndev) { struct nci_mode_set_cmd cmd; cmd.cmd_type = ST_NCI_SET_NFC_MODE; cmd.mode = 1; return nci_prop_cmd(ndev, ST_NCI_CORE_PROP, sizeof(struct nci_mode_set_cmd), (__u8 *)&cmd); } static int st_nci_open(struct nci_dev *ndev) { struct st_nci_info *info = nci_get_drvdata(ndev); int r; if (test_and_set_bit(ST_NCI_RUNNING, &info->flags)) return 0; r = ndlc_open(info->ndlc); if (r) clear_bit(ST_NCI_RUNNING, &info->flags); return r; } static int st_nci_close(struct nci_dev *ndev) { struct st_nci_info *info = nci_get_drvdata(ndev); if (!test_bit(ST_NCI_RUNNING, &info->flags)) return 0; ndlc_close(info->ndlc); clear_bit(ST_NCI_RUNNING, &info->flags); return 0; } static int st_nci_send(struct nci_dev *ndev, struct sk_buff *skb) { struct st_nci_info *info = nci_get_drvdata(ndev); skb->dev = (void *)ndev; if (!test_bit(ST_NCI_RUNNING, &info->flags)) return -EBUSY; return ndlc_send(info->ndlc, skb); } static __u32 st_nci_get_rfprotocol(struct nci_dev *ndev, __u8 rf_protocol) { return rf_protocol == ST_NCI1_X_PROPRIETARY_ISO15693 ? NFC_PROTO_ISO15693_MASK : 0; } static int st_nci_prop_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb) { __u8 status = skb->data[0]; nci_req_complete(ndev, status); return 0; } static const struct nci_driver_ops st_nci_prop_ops[] = { { .opcode = nci_opcode_pack(NCI_GID_PROPRIETARY, ST_NCI_CORE_PROP), .rsp = st_nci_prop_rsp_packet, }, }; static const struct nci_ops st_nci_ops = { .init = st_nci_init, .open = st_nci_open, .close = st_nci_close, .send = st_nci_send, .get_rfprotocol = st_nci_get_rfprotocol, .discover_se = st_nci_discover_se, .enable_se = st_nci_enable_se, .disable_se = st_nci_disable_se, .se_io = st_nci_se_io, .hci_load_session = st_nci_hci_load_session, .hci_event_received = st_nci_hci_event_received, .hci_cmd_received = st_nci_hci_cmd_received, .prop_ops = st_nci_prop_ops, .n_prop_ops = ARRAY_SIZE(st_nci_prop_ops), }; int st_nci_probe(struct llt_ndlc *ndlc, int phy_headroom, int phy_tailroom, struct st_nci_se_status *se_status) { struct st_nci_info *info; int r; u32 protocols; info = devm_kzalloc(ndlc->dev, sizeof(struct st_nci_info), GFP_KERNEL); if (!info) return -ENOMEM; protocols = NFC_PROTO_JEWEL_MASK | NFC_PROTO_MIFARE_MASK | NFC_PROTO_FELICA_MASK | NFC_PROTO_ISO14443_MASK | NFC_PROTO_ISO14443_B_MASK | NFC_PROTO_ISO15693_MASK | NFC_PROTO_NFC_DEP_MASK; BUILD_BUG_ON(ARRAY_SIZE(st_nci_prop_ops) > NCI_MAX_PROPRIETARY_CMD); ndlc->ndev = nci_allocate_device(&st_nci_ops, protocols, phy_headroom, phy_tailroom); if (!ndlc->ndev) { pr_err("Cannot allocate nfc ndev\n"); return -ENOMEM; } info->ndlc = ndlc; nci_set_drvdata(ndlc->ndev, info); r = st_nci_vendor_cmds_init(ndlc->ndev); if (r) { pr_err("Cannot register proprietary vendor cmds\n"); goto err_reg_dev; } r = nci_register_device(ndlc->ndev); if (r) { pr_err("Cannot register nfc device to nci core\n"); goto err_reg_dev; } return st_nci_se_init(ndlc->ndev, se_status); err_reg_dev: nci_free_device(ndlc->ndev); return r; } EXPORT_SYMBOL_GPL(st_nci_probe); void st_nci_remove(struct nci_dev *ndev) { struct st_nci_info *info = nci_get_drvdata(ndev); ndlc_close(info->ndlc); nci_unregister_device(ndev); nci_free_device(ndev); } EXPORT_SYMBOL_GPL(st_nci_remove); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION(DRIVER_DESC);
linux-master
drivers/nfc/st-nci/core.c
// SPDX-License-Identifier: GPL-2.0-only /* * Secure Element driver for STMicroelectronics NFC NCI chip * * Copyright (C) 2014-2015 STMicroelectronics SAS. All rights reserved. */ #include <linux/module.h> #include <linux/nfc.h> #include <linux/delay.h> #include <net/nfc/nci.h> #include <net/nfc/nci_core.h> #include "st-nci.h" struct st_nci_pipe_info { u8 pipe_state; u8 src_host_id; u8 src_gate_id; u8 dst_host_id; u8 dst_gate_id; } __packed; /* Hosts */ #define ST_NCI_HOST_CONTROLLER_ID 0x00 #define ST_NCI_TERMINAL_HOST_ID 0x01 #define ST_NCI_UICC_HOST_ID 0x02 #define ST_NCI_ESE_HOST_ID 0xc0 /* Gates */ #define ST_NCI_APDU_READER_GATE 0xf0 #define ST_NCI_CONNECTIVITY_GATE 0x41 /* Pipes */ #define ST_NCI_DEVICE_MGNT_PIPE 0x02 /* Connectivity pipe only */ #define ST_NCI_SE_COUNT_PIPE_UICC 0x01 /* Connectivity + APDU Reader pipe */ #define ST_NCI_SE_COUNT_PIPE_EMBEDDED 0x02 #define ST_NCI_SE_TO_HOT_PLUG 1000 /* msecs */ #define ST_NCI_SE_TO_PIPES 2000 #define ST_NCI_EVT_HOT_PLUG_IS_INHIBITED(x) (x->data[0] & 0x80) #define NCI_HCI_APDU_PARAM_ATR 0x01 #define NCI_HCI_ADMIN_PARAM_SESSION_IDENTITY 0x01 #define NCI_HCI_ADMIN_PARAM_WHITELIST 0x03 #define NCI_HCI_ADMIN_PARAM_HOST_LIST 0x04 #define ST_NCI_EVT_SE_HARD_RESET 0x20 #define ST_NCI_EVT_TRANSMIT_DATA 0x10 #define ST_NCI_EVT_WTX_REQUEST 0x11 #define ST_NCI_EVT_SE_SOFT_RESET 0x11 #define ST_NCI_EVT_SE_END_OF_APDU_TRANSFER 0x21 #define ST_NCI_EVT_HOT_PLUG 0x03 #define ST_NCI_SE_MODE_OFF 0x00 #define ST_NCI_SE_MODE_ON 0x01 #define ST_NCI_EVT_CONNECTIVITY 0x10 #define ST_NCI_EVT_TRANSACTION 0x12 #define ST_NCI_DM_GETINFO 0x13 #define ST_NCI_DM_GETINFO_PIPE_LIST 0x02 #define ST_NCI_DM_GETINFO_PIPE_INFO 0x01 #define ST_NCI_DM_PIPE_CREATED 0x02 #define ST_NCI_DM_PIPE_OPEN 0x04 #define ST_NCI_DM_RF_ACTIVE 0x80 #define ST_NCI_DM_DISCONNECT 0x30 #define ST_NCI_DM_IS_PIPE_OPEN(p) \ ((p & 0x0f) == (ST_NCI_DM_PIPE_CREATED | ST_NCI_DM_PIPE_OPEN)) #define ST_NCI_ATR_DEFAULT_BWI 0x04 /* * WT = 2^BWI/10[s], convert into msecs and add a secure * room by increasing by 2 this timeout */ #define ST_NCI_BWI_TO_TIMEOUT(x) ((1 << x) * 200) #define ST_NCI_ATR_GET_Y_FROM_TD(x) (x >> 4) /* If TA is present bit 0 is set */ #define ST_NCI_ATR_TA_PRESENT(x) (x & 0x01) /* If TB is present bit 1 is set */ #define ST_NCI_ATR_TB_PRESENT(x) (x & 0x02) #define ST_NCI_NUM_DEVICES 256 static DECLARE_BITMAP(dev_mask, ST_NCI_NUM_DEVICES); /* Here are the mandatory pipe for st_nci */ static struct nci_hci_gate st_nci_gates[] = { {NCI_HCI_ADMIN_GATE, NCI_HCI_ADMIN_PIPE, ST_NCI_HOST_CONTROLLER_ID}, {NCI_HCI_LINK_MGMT_GATE, NCI_HCI_LINK_MGMT_PIPE, ST_NCI_HOST_CONTROLLER_ID}, {ST_NCI_DEVICE_MGNT_GATE, ST_NCI_DEVICE_MGNT_PIPE, ST_NCI_HOST_CONTROLLER_ID}, {NCI_HCI_IDENTITY_MGMT_GATE, NCI_HCI_INVALID_PIPE, ST_NCI_HOST_CONTROLLER_ID}, /* Secure element pipes are created by secure element host */ {ST_NCI_CONNECTIVITY_GATE, NCI_HCI_DO_NOT_OPEN_PIPE, ST_NCI_HOST_CONTROLLER_ID}, {ST_NCI_APDU_READER_GATE, NCI_HCI_DO_NOT_OPEN_PIPE, ST_NCI_HOST_CONTROLLER_ID}, }; static u8 st_nci_se_get_bwi(struct nci_dev *ndev) { int i; u8 td; struct st_nci_info *info = nci_get_drvdata(ndev); /* Bits 8 to 5 of the first TB for T=1 encode BWI from zero to nine */ for (i = 1; i < ST_NCI_ESE_MAX_LENGTH; i++) { td = ST_NCI_ATR_GET_Y_FROM_TD(info->se_info.atr[i]); if (ST_NCI_ATR_TA_PRESENT(td)) i++; if (ST_NCI_ATR_TB_PRESENT(td)) { i++; return info->se_info.atr[i] >> 4; } } return ST_NCI_ATR_DEFAULT_BWI; } static void st_nci_se_get_atr(struct nci_dev *ndev) { struct st_nci_info *info = nci_get_drvdata(ndev); int r; struct sk_buff *skb; r = nci_hci_get_param(ndev, ST_NCI_APDU_READER_GATE, NCI_HCI_APDU_PARAM_ATR, &skb); if (r < 0) return; if (skb->len <= ST_NCI_ESE_MAX_LENGTH) { memcpy(info->se_info.atr, skb->data, skb->len); info->se_info.wt_timeout = ST_NCI_BWI_TO_TIMEOUT(st_nci_se_get_bwi(ndev)); } kfree_skb(skb); } int st_nci_hci_load_session(struct nci_dev *ndev) { int i, j, r; struct sk_buff *skb_pipe_list, *skb_pipe_info; struct st_nci_pipe_info *dm_pipe_info; u8 pipe_list[] = { ST_NCI_DM_GETINFO_PIPE_LIST, ST_NCI_TERMINAL_HOST_ID}; u8 pipe_info[] = { ST_NCI_DM_GETINFO_PIPE_INFO, ST_NCI_TERMINAL_HOST_ID, 0}; /* On ST_NCI device pipes number are dynamics * If pipes are already created, hci_dev_up will fail. * Doing a clear all pipe is a bad idea because: * - It does useless EEPROM cycling * - It might cause issue for secure elements support * (such as removing connectivity or APDU reader pipe) * A better approach on ST_NCI is to: * - get a pipe list for each host. * (eg: ST_NCI_HOST_CONTROLLER_ID for now). * (TODO Later on UICC HOST and eSE HOST) * - get pipe information * - match retrieved pipe list in st_nci_gates * ST_NCI_DEVICE_MGNT_GATE is a proprietary gate * with ST_NCI_DEVICE_MGNT_PIPE. * Pipe can be closed and need to be open. */ r = nci_hci_connect_gate(ndev, ST_NCI_HOST_CONTROLLER_ID, ST_NCI_DEVICE_MGNT_GATE, ST_NCI_DEVICE_MGNT_PIPE); if (r < 0) return r; /* Get pipe list */ r = nci_hci_send_cmd(ndev, ST_NCI_DEVICE_MGNT_GATE, ST_NCI_DM_GETINFO, pipe_list, sizeof(pipe_list), &skb_pipe_list); if (r < 0) return r; /* Complete the existing gate_pipe table */ for (i = 0; i < skb_pipe_list->len; i++) { pipe_info[2] = skb_pipe_list->data[i]; r = nci_hci_send_cmd(ndev, ST_NCI_DEVICE_MGNT_GATE, ST_NCI_DM_GETINFO, pipe_info, sizeof(pipe_info), &skb_pipe_info); if (r) continue; /* * Match pipe ID and gate ID * Output format from ST21NFC_DM_GETINFO is: * - pipe state (1byte) * - source hid (1byte) * - source gid (1byte) * - destination hid (1byte) * - destination gid (1byte) */ dm_pipe_info = (struct st_nci_pipe_info *)skb_pipe_info->data; if (dm_pipe_info->dst_gate_id == ST_NCI_APDU_READER_GATE && dm_pipe_info->src_host_id == ST_NCI_UICC_HOST_ID) { pr_err("Unexpected apdu_reader pipe on host %x\n", dm_pipe_info->src_host_id); kfree_skb(skb_pipe_info); continue; } for (j = 3; (j < ARRAY_SIZE(st_nci_gates)) && (st_nci_gates[j].gate != dm_pipe_info->dst_gate_id); j++) ; if (j < ARRAY_SIZE(st_nci_gates) && st_nci_gates[j].gate == dm_pipe_info->dst_gate_id && ST_NCI_DM_IS_PIPE_OPEN(dm_pipe_info->pipe_state)) { ndev->hci_dev->init_data.gates[j].pipe = pipe_info[2]; ndev->hci_dev->gate2pipe[st_nci_gates[j].gate] = pipe_info[2]; ndev->hci_dev->pipes[pipe_info[2]].gate = st_nci_gates[j].gate; ndev->hci_dev->pipes[pipe_info[2]].host = dm_pipe_info->src_host_id; } kfree_skb(skb_pipe_info); } /* * 3 gates have a well known pipe ID. Only NCI_HCI_LINK_MGMT_GATE * is not yet open at this stage. */ r = nci_hci_connect_gate(ndev, ST_NCI_HOST_CONTROLLER_ID, NCI_HCI_LINK_MGMT_GATE, NCI_HCI_LINK_MGMT_PIPE); kfree_skb(skb_pipe_list); return r; } EXPORT_SYMBOL_GPL(st_nci_hci_load_session); static void st_nci_hci_admin_event_received(struct nci_dev *ndev, u8 event, struct sk_buff *skb) { struct st_nci_info *info = nci_get_drvdata(ndev); switch (event) { case ST_NCI_EVT_HOT_PLUG: if (info->se_info.se_active) { if (!ST_NCI_EVT_HOT_PLUG_IS_INHIBITED(skb)) { del_timer_sync(&info->se_info.se_active_timer); info->se_info.se_active = false; complete(&info->se_info.req_completion); } else { mod_timer(&info->se_info.se_active_timer, jiffies + msecs_to_jiffies(ST_NCI_SE_TO_PIPES)); } } break; default: nfc_err(&ndev->nfc_dev->dev, "Unexpected event on admin gate\n"); } } static int st_nci_hci_apdu_reader_event_received(struct nci_dev *ndev, u8 event, struct sk_buff *skb) { struct st_nci_info *info = nci_get_drvdata(ndev); pr_debug("apdu reader gate event: %x\n", event); switch (event) { case ST_NCI_EVT_TRANSMIT_DATA: del_timer_sync(&info->se_info.bwi_timer); info->se_info.bwi_active = false; info->se_info.cb(info->se_info.cb_context, skb->data, skb->len, 0); break; case ST_NCI_EVT_WTX_REQUEST: mod_timer(&info->se_info.bwi_timer, jiffies + msecs_to_jiffies(info->se_info.wt_timeout)); break; default: nfc_err(&ndev->nfc_dev->dev, "Unexpected event on apdu reader gate\n"); return 1; } kfree_skb(skb); return 0; } /* * Returns: * <= 0: driver handled the event, skb consumed * 1: driver does not handle the event, please do standard processing */ static int st_nci_hci_connectivity_event_received(struct nci_dev *ndev, u8 host, u8 event, struct sk_buff *skb) { int r = 0; struct device *dev = &ndev->nfc_dev->dev; struct nfc_evt_transaction *transaction; u32 aid_len; u8 params_len; pr_debug("connectivity gate event: %x\n", event); switch (event) { case ST_NCI_EVT_CONNECTIVITY: r = nfc_se_connectivity(ndev->nfc_dev, host); break; case ST_NCI_EVT_TRANSACTION: /* According to specification etsi 102 622 * 11.2.2.4 EVT_TRANSACTION Table 52 * Description Tag Length * AID 81 5 to 16 * PARAMETERS 82 0 to 255 * * The key differences are aid storage length is variably sized * in the packet, but fixed in nfc_evt_transaction, and that * the aid_len is u8 in the packet, but u32 in the structure, * and the tags in the packet are not included in * nfc_evt_transaction. * * size(b): 1 1 5-16 1 1 0-255 * offset: 0 1 2 aid_len + 2 aid_len + 3 aid_len + 4 * mem name: aid_tag(M) aid_len aid params_tag(M) params_len params * example: 0x81 5-16 X 0x82 0-255 X */ if (skb->len < 2 || skb->data[0] != NFC_EVT_TRANSACTION_AID_TAG) return -EPROTO; aid_len = skb->data[1]; if (skb->len < aid_len + 4 || aid_len > sizeof(transaction->aid)) return -EPROTO; params_len = skb->data[aid_len + 3]; /* Verify PARAMETERS tag is (82), and final check that there is * enough space in the packet to read everything. */ if (skb->data[aid_len + 2] != NFC_EVT_TRANSACTION_PARAMS_TAG || skb->len < aid_len + 4 + params_len) return -EPROTO; transaction = devm_kzalloc(dev, sizeof(*transaction) + params_len, GFP_KERNEL); if (!transaction) return -ENOMEM; transaction->aid_len = aid_len; transaction->params_len = params_len; memcpy(transaction->aid, &skb->data[2], aid_len); memcpy(transaction->params, &skb->data[aid_len + 4], params_len); r = nfc_se_transaction(ndev->nfc_dev, host, transaction); break; default: nfc_err(&ndev->nfc_dev->dev, "Unexpected event on connectivity gate\n"); return 1; } kfree_skb(skb); return r; } void st_nci_hci_event_received(struct nci_dev *ndev, u8 pipe, u8 event, struct sk_buff *skb) { u8 gate = ndev->hci_dev->pipes[pipe].gate; u8 host = ndev->hci_dev->pipes[pipe].host; switch (gate) { case NCI_HCI_ADMIN_GATE: st_nci_hci_admin_event_received(ndev, event, skb); break; case ST_NCI_APDU_READER_GATE: st_nci_hci_apdu_reader_event_received(ndev, event, skb); break; case ST_NCI_CONNECTIVITY_GATE: st_nci_hci_connectivity_event_received(ndev, host, event, skb); break; } } EXPORT_SYMBOL_GPL(st_nci_hci_event_received); void st_nci_hci_cmd_received(struct nci_dev *ndev, u8 pipe, u8 cmd, struct sk_buff *skb) { struct st_nci_info *info = nci_get_drvdata(ndev); u8 gate = ndev->hci_dev->pipes[pipe].gate; pr_debug("cmd: %x\n", cmd); switch (cmd) { case NCI_HCI_ANY_OPEN_PIPE: if (gate != ST_NCI_APDU_READER_GATE && ndev->hci_dev->pipes[pipe].host != ST_NCI_UICC_HOST_ID) ndev->hci_dev->count_pipes++; if (ndev->hci_dev->count_pipes == ndev->hci_dev->expected_pipes) { del_timer_sync(&info->se_info.se_active_timer); info->se_info.se_active = false; ndev->hci_dev->count_pipes = 0; complete(&info->se_info.req_completion); } break; } } EXPORT_SYMBOL_GPL(st_nci_hci_cmd_received); static int st_nci_control_se(struct nci_dev *ndev, u8 se_idx, u8 state) { struct st_nci_info *info = nci_get_drvdata(ndev); int r, i; struct sk_buff *sk_host_list; u8 host_id; switch (se_idx) { case ST_NCI_UICC_HOST_ID: ndev->hci_dev->count_pipes = 0; ndev->hci_dev->expected_pipes = ST_NCI_SE_COUNT_PIPE_UICC; break; case ST_NCI_ESE_HOST_ID: ndev->hci_dev->count_pipes = 0; ndev->hci_dev->expected_pipes = ST_NCI_SE_COUNT_PIPE_EMBEDDED; break; default: return -EINVAL; } /* * Wait for an EVT_HOT_PLUG in order to * retrieve a relevant host list. */ reinit_completion(&info->se_info.req_completion); r = nci_nfcee_mode_set(ndev, se_idx, state); if (r != NCI_STATUS_OK) return r; mod_timer(&info->se_info.se_active_timer, jiffies + msecs_to_jiffies(ST_NCI_SE_TO_HOT_PLUG)); info->se_info.se_active = true; /* Ignore return value and check in any case the host_list */ wait_for_completion_interruptible(&info->se_info.req_completion); /* There might be some "collision" after receiving a HOT_PLUG event * This may cause the CLF to not answer to the next hci command. * There is no possible synchronization to prevent this. * Adding a small delay is the only way to solve the issue. */ if (info->se_info.se_status->is_ese_present && info->se_info.se_status->is_uicc_present) usleep_range(15000, 20000); r = nci_hci_get_param(ndev, NCI_HCI_ADMIN_GATE, NCI_HCI_ADMIN_PARAM_HOST_LIST, &sk_host_list); if (r != NCI_HCI_ANY_OK) return r; for (i = 0; i < sk_host_list->len && sk_host_list->data[i] != se_idx; i++) ; host_id = sk_host_list->data[i]; kfree_skb(sk_host_list); if (state == ST_NCI_SE_MODE_ON && host_id == se_idx) return se_idx; else if (state == ST_NCI_SE_MODE_OFF && host_id != se_idx) return se_idx; return -1; } int st_nci_disable_se(struct nci_dev *ndev, u32 se_idx) { int r; /* * According to upper layer, se_idx == NFC_SE_UICC when * info->se_info.se_status->is_uicc_enable is true should never happen * Same for eSE. */ r = st_nci_control_se(ndev, se_idx, ST_NCI_SE_MODE_OFF); if (r < 0) { /* Do best effort to release SWP */ if (se_idx == NFC_SE_EMBEDDED) { r = nci_hci_send_event(ndev, ST_NCI_APDU_READER_GATE, ST_NCI_EVT_SE_END_OF_APDU_TRANSFER, NULL, 0); } return r; } return 0; } EXPORT_SYMBOL_GPL(st_nci_disable_se); int st_nci_enable_se(struct nci_dev *ndev, u32 se_idx) { int r; /* * According to upper layer, se_idx == NFC_SE_UICC when * info->se_info.se_status->is_uicc_enable is true should never happen. * Same for eSE. */ r = st_nci_control_se(ndev, se_idx, ST_NCI_SE_MODE_ON); if (r == ST_NCI_ESE_HOST_ID) { st_nci_se_get_atr(ndev); r = nci_hci_send_event(ndev, ST_NCI_APDU_READER_GATE, ST_NCI_EVT_SE_SOFT_RESET, NULL, 0); } if (r < 0) { /* * The activation procedure failed, the secure element * is not connected. Remove from the list. */ nfc_remove_se(ndev->nfc_dev, se_idx); return r; } return 0; } EXPORT_SYMBOL_GPL(st_nci_enable_se); static int st_nci_hci_network_init(struct nci_dev *ndev) { struct st_nci_info *info = nci_get_drvdata(ndev); struct core_conn_create_dest_spec_params *dest_params; struct dest_spec_params spec_params; struct nci_conn_info *conn_info; int r, dev_num; dest_params = kzalloc(sizeof(struct core_conn_create_dest_spec_params) + sizeof(struct dest_spec_params), GFP_KERNEL); if (dest_params == NULL) return -ENOMEM; dest_params->type = NCI_DESTINATION_SPECIFIC_PARAM_NFCEE_TYPE; dest_params->length = sizeof(struct dest_spec_params); spec_params.id = ndev->hci_dev->nfcee_id; spec_params.protocol = NCI_NFCEE_INTERFACE_HCI_ACCESS; memcpy(dest_params->value, &spec_params, sizeof(struct dest_spec_params)); r = nci_core_conn_create(ndev, NCI_DESTINATION_NFCEE, 1, sizeof(struct core_conn_create_dest_spec_params) + sizeof(struct dest_spec_params), dest_params); if (r != NCI_STATUS_OK) goto free_dest_params; conn_info = ndev->hci_dev->conn_info; if (!conn_info) goto free_dest_params; ndev->hci_dev->init_data.gate_count = ARRAY_SIZE(st_nci_gates); memcpy(ndev->hci_dev->init_data.gates, st_nci_gates, sizeof(st_nci_gates)); /* * Session id must include the driver name + i2c bus addr * persistent info to discriminate 2 identical chips */ dev_num = find_first_zero_bit(dev_mask, ST_NCI_NUM_DEVICES); if (dev_num >= ST_NCI_NUM_DEVICES) { r = -ENODEV; goto free_dest_params; } scnprintf(ndev->hci_dev->init_data.session_id, sizeof(ndev->hci_dev->init_data.session_id), "%s%2x", "ST21BH", dev_num); r = nci_hci_dev_session_init(ndev); if (r != NCI_HCI_ANY_OK) goto free_dest_params; /* * In factory mode, we prevent secure elements activation * by disabling nfcee on the current HCI connection id. * HCI will be used here only for proprietary commands. */ if (test_bit(ST_NCI_FACTORY_MODE, &info->flags)) r = nci_nfcee_mode_set(ndev, ndev->hci_dev->conn_info->dest_params->id, NCI_NFCEE_DISABLE); else r = nci_nfcee_mode_set(ndev, ndev->hci_dev->conn_info->dest_params->id, NCI_NFCEE_ENABLE); free_dest_params: kfree(dest_params); return r; } int st_nci_discover_se(struct nci_dev *ndev) { u8 white_list[2]; int r, wl_size = 0; int se_count = 0; struct st_nci_info *info = nci_get_drvdata(ndev); r = st_nci_hci_network_init(ndev); if (r != 0) return r; if (test_bit(ST_NCI_FACTORY_MODE, &info->flags)) return 0; if (info->se_info.se_status->is_uicc_present) white_list[wl_size++] = ST_NCI_UICC_HOST_ID; if (info->se_info.se_status->is_ese_present) white_list[wl_size++] = ST_NCI_ESE_HOST_ID; if (wl_size) { r = nci_hci_set_param(ndev, NCI_HCI_ADMIN_GATE, NCI_HCI_ADMIN_PARAM_WHITELIST, white_list, wl_size); if (r != NCI_HCI_ANY_OK) return r; } if (info->se_info.se_status->is_uicc_present) { nfc_add_se(ndev->nfc_dev, ST_NCI_UICC_HOST_ID, NFC_SE_UICC); se_count++; } if (info->se_info.se_status->is_ese_present) { nfc_add_se(ndev->nfc_dev, ST_NCI_ESE_HOST_ID, NFC_SE_EMBEDDED); se_count++; } return !se_count; } EXPORT_SYMBOL_GPL(st_nci_discover_se); int st_nci_se_io(struct nci_dev *ndev, u32 se_idx, u8 *apdu, size_t apdu_length, se_io_cb_t cb, void *cb_context) { struct st_nci_info *info = nci_get_drvdata(ndev); switch (se_idx) { case ST_NCI_ESE_HOST_ID: info->se_info.cb = cb; info->se_info.cb_context = cb_context; mod_timer(&info->se_info.bwi_timer, jiffies + msecs_to_jiffies(info->se_info.wt_timeout)); info->se_info.bwi_active = true; return nci_hci_send_event(ndev, ST_NCI_APDU_READER_GATE, ST_NCI_EVT_TRANSMIT_DATA, apdu, apdu_length); default: /* Need to free cb_context here as at the moment we can't * clearly indicate to the caller if the callback function * would be called (and free it) or not. In both cases a * negative value may be returned to the caller. */ kfree(cb_context); return -ENODEV; } } EXPORT_SYMBOL(st_nci_se_io); static void st_nci_se_wt_timeout(struct timer_list *t) { /* * No answer from the secure element * within the defined timeout. * Let's send a reset request as recovery procedure. * According to the situation, we first try to send a software reset * to the secure element. If the next command is still not * answering in time, we send to the CLF a secure element hardware * reset request. */ /* hardware reset managed through VCC_UICC_OUT power supply */ u8 param = 0x01; struct st_nci_info *info = from_timer(info, t, se_info.bwi_timer); info->se_info.bwi_active = false; if (!info->se_info.xch_error) { info->se_info.xch_error = true; nci_hci_send_event(info->ndlc->ndev, ST_NCI_APDU_READER_GATE, ST_NCI_EVT_SE_SOFT_RESET, NULL, 0); } else { info->se_info.xch_error = false; nci_hci_send_event(info->ndlc->ndev, ST_NCI_DEVICE_MGNT_GATE, ST_NCI_EVT_SE_HARD_RESET, &param, 1); } info->se_info.cb(info->se_info.cb_context, NULL, 0, -ETIME); } static void st_nci_se_activation_timeout(struct timer_list *t) { struct st_nci_info *info = from_timer(info, t, se_info.se_active_timer); info->se_info.se_active = false; complete(&info->se_info.req_completion); } int st_nci_se_init(struct nci_dev *ndev, struct st_nci_se_status *se_status) { struct st_nci_info *info = nci_get_drvdata(ndev); init_completion(&info->se_info.req_completion); /* initialize timers */ timer_setup(&info->se_info.bwi_timer, st_nci_se_wt_timeout, 0); info->se_info.bwi_active = false; timer_setup(&info->se_info.se_active_timer, st_nci_se_activation_timeout, 0); info->se_info.se_active = false; info->se_info.xch_error = false; info->se_info.wt_timeout = ST_NCI_BWI_TO_TIMEOUT(ST_NCI_ATR_DEFAULT_BWI); info->se_info.se_status = se_status; return 0; } EXPORT_SYMBOL(st_nci_se_init); void st_nci_se_deinit(struct nci_dev *ndev) { struct st_nci_info *info = nci_get_drvdata(ndev); if (info->se_info.bwi_active) del_timer_sync(&info->se_info.bwi_timer); if (info->se_info.se_active) del_timer_sync(&info->se_info.se_active_timer); info->se_info.se_active = false; info->se_info.bwi_active = false; } EXPORT_SYMBOL(st_nci_se_deinit);
linux-master
drivers/nfc/st-nci/se.c
// SPDX-License-Identifier: GPL-2.0-only /* * I2C Link Layer for ST21NFCA HCI based Driver * Copyright (C) 2014 STMicroelectronics SAS. All rights reserved. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/crc-ccitt.h> #include <linux/module.h> #include <linux/i2c.h> #include <linux/gpio/consumer.h> #include <linux/of_irq.h> #include <linux/of_gpio.h> #include <linux/acpi.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/nfc.h> #include <linux/firmware.h> #include <net/nfc/hci.h> #include <net/nfc/llc.h> #include <net/nfc/nfc.h> #include "st21nfca.h" /* * Every frame starts with ST21NFCA_SOF_EOF and ends with ST21NFCA_SOF_EOF. * Because ST21NFCA_SOF_EOF is a possible data value, there is a mecanism * called byte stuffing has been introduced. * * if byte == ST21NFCA_SOF_EOF or ST21NFCA_ESCAPE_BYTE_STUFFING * - insert ST21NFCA_ESCAPE_BYTE_STUFFING (escape byte) * - xor byte with ST21NFCA_BYTE_STUFFING_MASK */ #define ST21NFCA_SOF_EOF 0x7e #define ST21NFCA_BYTE_STUFFING_MASK 0x20 #define ST21NFCA_ESCAPE_BYTE_STUFFING 0x7d /* SOF + 00 */ #define ST21NFCA_FRAME_HEADROOM 2 /* 2 bytes crc + EOF */ #define ST21NFCA_FRAME_TAILROOM 3 #define IS_START_OF_FRAME(buf) (buf[0] == ST21NFCA_SOF_EOF && \ buf[1] == 0) #define ST21NFCA_HCI_DRIVER_NAME "st21nfca_hci" #define ST21NFCA_HCI_I2C_DRIVER_NAME "st21nfca_hci_i2c" struct st21nfca_i2c_phy { struct i2c_client *i2c_dev; struct nfc_hci_dev *hdev; struct gpio_desc *gpiod_ena; struct st21nfca_se_status se_status; struct sk_buff *pending_skb; int current_read_len; /* * crc might have fail because i2c macro * is disable due to other interface activity */ int crc_trials; int powered; int run_mode; /* * < 0 if hardware error occured (e.g. i2c err) * and prevents normal operation. */ int hard_fault; struct mutex phy_lock; }; static const u8 len_seq[] = { 16, 24, 12, 29 }; static const u16 wait_tab[] = { 2, 3, 5, 15, 20, 40}; #define I2C_DUMP_SKB(info, skb) \ do { \ pr_debug("%s:\n", info); \ print_hex_dump(KERN_DEBUG, "i2c: ", DUMP_PREFIX_OFFSET, \ 16, 1, (skb)->data, (skb)->len, 0); \ } while (0) /* * In order to get the CLF in a known state we generate an internal reboot * using a proprietary command. * Once the reboot is completed, we expect to receive a ST21NFCA_SOF_EOF * fill buffer. */ static int st21nfca_hci_platform_init(struct st21nfca_i2c_phy *phy) { u16 wait_reboot[] = { 50, 300, 1000 }; char reboot_cmd[] = { 0x7E, 0x66, 0x48, 0xF6, 0x7E }; u8 tmp[ST21NFCA_HCI_LLC_MAX_SIZE]; int i, r = -1; for (i = 0; i < ARRAY_SIZE(wait_reboot) && r < 0; i++) { r = i2c_master_send(phy->i2c_dev, reboot_cmd, sizeof(reboot_cmd)); if (r < 0) msleep(wait_reboot[i]); } if (r < 0) return r; /* CLF is spending about 20ms to do an internal reboot */ msleep(20); r = -1; for (i = 0; i < ARRAY_SIZE(wait_reboot) && r < 0; i++) { r = i2c_master_recv(phy->i2c_dev, tmp, ST21NFCA_HCI_LLC_MAX_SIZE); if (r < 0) msleep(wait_reboot[i]); } if (r < 0) return r; for (i = 0; i < ST21NFCA_HCI_LLC_MAX_SIZE && tmp[i] == ST21NFCA_SOF_EOF; i++) ; if (r != ST21NFCA_HCI_LLC_MAX_SIZE) return -ENODEV; usleep_range(1000, 1500); return 0; } static int st21nfca_hci_i2c_enable(void *phy_id) { struct st21nfca_i2c_phy *phy = phy_id; gpiod_set_value(phy->gpiod_ena, 1); phy->powered = 1; phy->run_mode = ST21NFCA_HCI_MODE; usleep_range(10000, 15000); return 0; } static void st21nfca_hci_i2c_disable(void *phy_id) { struct st21nfca_i2c_phy *phy = phy_id; gpiod_set_value(phy->gpiod_ena, 0); phy->powered = 0; } static void st21nfca_hci_add_len_crc(struct sk_buff *skb) { u16 crc; u8 tmp; *(u8 *)skb_push(skb, 1) = 0; crc = crc_ccitt(0xffff, skb->data, skb->len); crc = ~crc; tmp = crc & 0x00ff; skb_put_u8(skb, tmp); tmp = (crc >> 8) & 0x00ff; skb_put_u8(skb, tmp); } static void st21nfca_hci_remove_len_crc(struct sk_buff *skb) { skb_pull(skb, ST21NFCA_FRAME_HEADROOM); skb_trim(skb, skb->len - ST21NFCA_FRAME_TAILROOM); } /* * Writing a frame must not return the number of written bytes. * It must return either zero for success, or <0 for error. * In addition, it must not alter the skb */ static int st21nfca_hci_i2c_write(void *phy_id, struct sk_buff *skb) { int r = -1, i, j; struct st21nfca_i2c_phy *phy = phy_id; struct i2c_client *client = phy->i2c_dev; u8 tmp[ST21NFCA_HCI_LLC_MAX_SIZE * 2]; I2C_DUMP_SKB("st21nfca_hci_i2c_write", skb); if (phy->hard_fault != 0) return phy->hard_fault; /* * Compute CRC before byte stuffing computation on frame * Note st21nfca_hci_add_len_crc is doing a byte stuffing * on its own value */ st21nfca_hci_add_len_crc(skb); /* add ST21NFCA_SOF_EOF on tail */ skb_put_u8(skb, ST21NFCA_SOF_EOF); /* add ST21NFCA_SOF_EOF on head */ *(u8 *)skb_push(skb, 1) = ST21NFCA_SOF_EOF; /* * Compute byte stuffing * if byte == ST21NFCA_SOF_EOF or ST21NFCA_ESCAPE_BYTE_STUFFING * insert ST21NFCA_ESCAPE_BYTE_STUFFING (escape byte) * xor byte with ST21NFCA_BYTE_STUFFING_MASK */ tmp[0] = skb->data[0]; for (i = 1, j = 1; i < skb->len - 1; i++, j++) { if (skb->data[i] == ST21NFCA_SOF_EOF || skb->data[i] == ST21NFCA_ESCAPE_BYTE_STUFFING) { tmp[j] = ST21NFCA_ESCAPE_BYTE_STUFFING; j++; tmp[j] = skb->data[i] ^ ST21NFCA_BYTE_STUFFING_MASK; } else { tmp[j] = skb->data[i]; } } tmp[j] = skb->data[i]; j++; /* * Manage sleep mode * Try 3 times to send data with delay between each */ mutex_lock(&phy->phy_lock); for (i = 0; i < ARRAY_SIZE(wait_tab) && r < 0; i++) { r = i2c_master_send(client, tmp, j); if (r < 0) msleep(wait_tab[i]); } mutex_unlock(&phy->phy_lock); if (r >= 0) { if (r != j) r = -EREMOTEIO; else r = 0; } st21nfca_hci_remove_len_crc(skb); return r; } static int get_frame_size(u8 *buf, int buflen) { int len = 0; if (buf[len + 1] == ST21NFCA_SOF_EOF) return 0; for (len = 1; len < buflen && buf[len] != ST21NFCA_SOF_EOF; len++) ; return len; } static int check_crc(u8 *buf, int buflen) { u16 crc; crc = crc_ccitt(0xffff, buf, buflen - 2); crc = ~crc; if (buf[buflen - 2] != (crc & 0xff) || buf[buflen - 1] != (crc >> 8)) { pr_err(ST21NFCA_HCI_DRIVER_NAME ": CRC error 0x%x != 0x%x 0x%x\n", crc, buf[buflen - 1], buf[buflen - 2]); pr_info(DRIVER_DESC ": %s : BAD CRC\n", __func__); print_hex_dump(KERN_DEBUG, "crc: ", DUMP_PREFIX_NONE, 16, 2, buf, buflen, false); return -EPERM; } return 0; } /* * Prepare received data for upper layer. * Received data include byte stuffing, crc and sof/eof * which is not usable by hci part. * returns: * frame size without sof/eof, header and byte stuffing * -EBADMSG : frame was incorrect and discarded */ static int st21nfca_hci_i2c_repack(struct sk_buff *skb) { int i, j, r, size; if (skb->len < 1 || (skb->len > 1 && skb->data[1] != 0)) return -EBADMSG; size = get_frame_size(skb->data, skb->len); if (size > 0) { skb_trim(skb, size); /* remove ST21NFCA byte stuffing for upper layer */ for (i = 1, j = 0; i < skb->len; i++) { if (skb->data[i + j] == (u8) ST21NFCA_ESCAPE_BYTE_STUFFING) { skb->data[i] = skb->data[i + j + 1] | ST21NFCA_BYTE_STUFFING_MASK; i++; j++; } skb->data[i] = skb->data[i + j]; } /* remove byte stuffing useless byte */ skb_trim(skb, i - j); /* remove ST21NFCA_SOF_EOF from head */ skb_pull(skb, 1); r = check_crc(skb->data, skb->len); if (r != 0) return -EBADMSG; /* remove headbyte */ skb_pull(skb, 1); /* remove crc. Byte Stuffing is already removed here */ skb_trim(skb, skb->len - 2); return skb->len; } return 0; } /* * Reads an shdlc frame and returns it in a newly allocated sk_buff. Guarantees * that i2c bus will be flushed and that next read will start on a new frame. * returned skb contains only LLC header and payload. * returns: * frame size : if received frame is complete (find ST21NFCA_SOF_EOF at * end of read) * -EAGAIN : if received frame is incomplete (not find ST21NFCA_SOF_EOF * at end of read) * -EREMOTEIO : i2c read error (fatal) * -EBADMSG : frame was incorrect and discarded * (value returned from st21nfca_hci_i2c_repack) * -EIO : if no ST21NFCA_SOF_EOF is found after reaching * the read length end sequence */ static int st21nfca_hci_i2c_read(struct st21nfca_i2c_phy *phy, struct sk_buff *skb) { int r, i; u8 len; u8 buf[ST21NFCA_HCI_LLC_MAX_PAYLOAD]; struct i2c_client *client = phy->i2c_dev; if (phy->current_read_len < ARRAY_SIZE(len_seq)) { len = len_seq[phy->current_read_len]; /* * Add retry mecanism * Operation on I2C interface may fail in case of operation on * RF or SWP interface */ r = 0; mutex_lock(&phy->phy_lock); for (i = 0; i < ARRAY_SIZE(wait_tab) && r <= 0; i++) { r = i2c_master_recv(client, buf, len); if (r < 0) msleep(wait_tab[i]); } mutex_unlock(&phy->phy_lock); if (r != len) { phy->current_read_len = 0; return -EREMOTEIO; } /* * The first read sequence does not start with SOF. * Data is corrupeted so we drop it. */ if (!phy->current_read_len && !IS_START_OF_FRAME(buf)) { skb_trim(skb, 0); phy->current_read_len = 0; return -EIO; } else if (phy->current_read_len && IS_START_OF_FRAME(buf)) { /* * Previous frame transmission was interrupted and * the frame got repeated. * Received frame start with ST21NFCA_SOF_EOF + 00. */ skb_trim(skb, 0); phy->current_read_len = 0; } skb_put_data(skb, buf, len); if (skb->data[skb->len - 1] == ST21NFCA_SOF_EOF) { phy->current_read_len = 0; return st21nfca_hci_i2c_repack(skb); } phy->current_read_len++; return -EAGAIN; } return -EIO; } /* * Reads an shdlc frame from the chip. This is not as straightforward as it * seems. The frame format is data-crc, and corruption can occur anywhere * while transiting on i2c bus, such that we could read an invalid data. * The tricky case is when we read a corrupted data or crc. We must detect * this here in order to determine that data can be transmitted to the hci * core. This is the reason why we check the crc here. * The CLF will repeat a frame until we send a RR on that frame. * * On ST21NFCA, IRQ goes in idle when read starts. As no size information are * available in the incoming data, other IRQ might come. Every IRQ will trigger * a read sequence with different length and will fill the current frame. * The reception is complete once we reach a ST21NFCA_SOF_EOF. */ static irqreturn_t st21nfca_hci_irq_thread_fn(int irq, void *phy_id) { struct st21nfca_i2c_phy *phy = phy_id; int r; if (!phy || irq != phy->i2c_dev->irq) { WARN_ON_ONCE(1); return IRQ_NONE; } if (phy->hard_fault != 0) return IRQ_HANDLED; r = st21nfca_hci_i2c_read(phy, phy->pending_skb); if (r == -EREMOTEIO) { phy->hard_fault = r; nfc_hci_recv_frame(phy->hdev, NULL); return IRQ_HANDLED; } else if (r == -EAGAIN || r == -EIO) { return IRQ_HANDLED; } else if (r == -EBADMSG && phy->crc_trials < ARRAY_SIZE(wait_tab)) { /* * With ST21NFCA, only one interface (I2C, RF or SWP) * may be active at a time. * Having incorrect crc is usually due to i2c macrocell * deactivation in the middle of a transmission. * It may generate corrupted data on i2c. * We give sometime to get i2c back. * The complete frame will be repeated. */ msleep(wait_tab[phy->crc_trials]); phy->crc_trials++; phy->current_read_len = 0; kfree_skb(phy->pending_skb); } else if (r > 0) { /* * We succeeded to read data from the CLF and * data is valid. * Reset counter. */ nfc_hci_recv_frame(phy->hdev, phy->pending_skb); phy->crc_trials = 0; } else { kfree_skb(phy->pending_skb); } phy->pending_skb = alloc_skb(ST21NFCA_HCI_LLC_MAX_SIZE * 2, GFP_KERNEL); if (phy->pending_skb == NULL) { phy->hard_fault = -ENOMEM; nfc_hci_recv_frame(phy->hdev, NULL); } return IRQ_HANDLED; } static const struct nfc_phy_ops i2c_phy_ops = { .write = st21nfca_hci_i2c_write, .enable = st21nfca_hci_i2c_enable, .disable = st21nfca_hci_i2c_disable, }; static const struct acpi_gpio_params enable_gpios = { 1, 0, false }; static const struct acpi_gpio_mapping acpi_st21nfca_gpios[] = { { "enable-gpios", &enable_gpios, 1 }, {}, }; static int st21nfca_hci_i2c_probe(struct i2c_client *client) { struct device *dev = &client->dev; struct st21nfca_i2c_phy *phy; int r; if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { nfc_err(&client->dev, "Need I2C_FUNC_I2C\n"); return -ENODEV; } phy = devm_kzalloc(&client->dev, sizeof(struct st21nfca_i2c_phy), GFP_KERNEL); if (!phy) return -ENOMEM; phy->i2c_dev = client; phy->pending_skb = alloc_skb(ST21NFCA_HCI_LLC_MAX_SIZE * 2, GFP_KERNEL); if (phy->pending_skb == NULL) return -ENOMEM; phy->current_read_len = 0; phy->crc_trials = 0; mutex_init(&phy->phy_lock); i2c_set_clientdata(client, phy); r = devm_acpi_dev_add_driver_gpios(dev, acpi_st21nfca_gpios); if (r) dev_dbg(dev, "Unable to add GPIO mapping table\n"); /* Get EN GPIO from resource provider */ phy->gpiod_ena = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW); if (IS_ERR(phy->gpiod_ena)) { nfc_err(dev, "Unable to get ENABLE GPIO\n"); r = PTR_ERR(phy->gpiod_ena); goto out_free; } phy->se_status.is_ese_present = device_property_read_bool(&client->dev, "ese-present"); phy->se_status.is_uicc_present = device_property_read_bool(&client->dev, "uicc-present"); r = st21nfca_hci_platform_init(phy); if (r < 0) { nfc_err(&client->dev, "Unable to reboot st21nfca\n"); goto out_free; } r = devm_request_threaded_irq(&client->dev, client->irq, NULL, st21nfca_hci_irq_thread_fn, IRQF_ONESHOT, ST21NFCA_HCI_DRIVER_NAME, phy); if (r < 0) { nfc_err(&client->dev, "Unable to register IRQ handler\n"); goto out_free; } r = st21nfca_hci_probe(phy, &i2c_phy_ops, LLC_SHDLC_NAME, ST21NFCA_FRAME_HEADROOM, ST21NFCA_FRAME_TAILROOM, ST21NFCA_HCI_LLC_MAX_PAYLOAD, &phy->hdev, &phy->se_status); if (r) goto out_free; return 0; out_free: kfree_skb(phy->pending_skb); return r; } static void st21nfca_hci_i2c_remove(struct i2c_client *client) { struct st21nfca_i2c_phy *phy = i2c_get_clientdata(client); st21nfca_hci_remove(phy->hdev); if (phy->powered) st21nfca_hci_i2c_disable(phy); kfree_skb(phy->pending_skb); } static const struct i2c_device_id st21nfca_hci_i2c_id_table[] = { {ST21NFCA_HCI_DRIVER_NAME, 0}, {} }; MODULE_DEVICE_TABLE(i2c, st21nfca_hci_i2c_id_table); static const struct acpi_device_id st21nfca_hci_i2c_acpi_match[] __maybe_unused = { {"SMO2100", 0}, {} }; MODULE_DEVICE_TABLE(acpi, st21nfca_hci_i2c_acpi_match); static const struct of_device_id of_st21nfca_i2c_match[] __maybe_unused = { { .compatible = "st,st21nfca-i2c", }, { .compatible = "st,st21nfca_i2c", }, {} }; MODULE_DEVICE_TABLE(of, of_st21nfca_i2c_match); static struct i2c_driver st21nfca_hci_i2c_driver = { .driver = { .name = ST21NFCA_HCI_I2C_DRIVER_NAME, .of_match_table = of_match_ptr(of_st21nfca_i2c_match), .acpi_match_table = ACPI_PTR(st21nfca_hci_i2c_acpi_match), }, .probe = st21nfca_hci_i2c_probe, .id_table = st21nfca_hci_i2c_id_table, .remove = st21nfca_hci_i2c_remove, }; module_i2c_driver(st21nfca_hci_i2c_driver); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION(DRIVER_DESC);
linux-master
drivers/nfc/st21nfca/i2c.c
// SPDX-License-Identifier: GPL-2.0-only /* * Proprietary commands extension for STMicroelectronics NFC Chip * * Copyright (C) 2014-2015 STMicroelectronics SAS. All rights reserved. */ #include <net/genetlink.h> #include <linux/module.h> #include <linux/nfc.h> #include <net/nfc/hci.h> #include <net/nfc/llc.h> #include "st21nfca.h" #define ST21NFCA_HCI_DM_GETDATA 0x10 #define ST21NFCA_HCI_DM_PUTDATA 0x11 #define ST21NFCA_HCI_DM_LOAD 0x12 #define ST21NFCA_HCI_DM_GETINFO 0x13 #define ST21NFCA_HCI_DM_UPDATE_AID 0x20 #define ST21NFCA_HCI_DM_RESET 0x3e #define ST21NFCA_HCI_DM_FIELD_GENERATOR 0x32 #define ST21NFCA_FACTORY_MODE_ON 1 #define ST21NFCA_FACTORY_MODE_OFF 0 #define ST21NFCA_EVT_POST_DATA 0x02 struct get_param_data { u8 gate; u8 data; } __packed; static int st21nfca_factory_mode(struct nfc_dev *dev, void *data, size_t data_len) { struct nfc_hci_dev *hdev = nfc_get_drvdata(dev); if (data_len != 1) return -EINVAL; pr_debug("factory mode: %x\n", ((u8 *)data)[0]); switch (((u8 *)data)[0]) { case ST21NFCA_FACTORY_MODE_ON: test_and_set_bit(ST21NFCA_FACTORY_MODE, &hdev->quirks); break; case ST21NFCA_FACTORY_MODE_OFF: clear_bit(ST21NFCA_FACTORY_MODE, &hdev->quirks); break; default: return -EINVAL; } return 0; } static int st21nfca_hci_clear_all_pipes(struct nfc_dev *dev, void *data, size_t data_len) { struct nfc_hci_dev *hdev = nfc_get_drvdata(dev); return nfc_hci_disconnect_all_gates(hdev); } static int st21nfca_hci_dm_put_data(struct nfc_dev *dev, void *data, size_t data_len) { struct nfc_hci_dev *hdev = nfc_get_drvdata(dev); return nfc_hci_send_cmd(hdev, ST21NFCA_DEVICE_MGNT_GATE, ST21NFCA_HCI_DM_PUTDATA, data, data_len, NULL); } static int st21nfca_hci_dm_update_aid(struct nfc_dev *dev, void *data, size_t data_len) { struct nfc_hci_dev *hdev = nfc_get_drvdata(dev); return nfc_hci_send_cmd(hdev, ST21NFCA_DEVICE_MGNT_GATE, ST21NFCA_HCI_DM_UPDATE_AID, data, data_len, NULL); } static int st21nfca_hci_dm_get_info(struct nfc_dev *dev, void *data, size_t data_len) { int r; struct sk_buff *msg, *skb; struct nfc_hci_dev *hdev = nfc_get_drvdata(dev); r = nfc_hci_send_cmd(hdev, ST21NFCA_DEVICE_MGNT_GATE, ST21NFCA_HCI_DM_GETINFO, data, data_len, &skb); if (r) goto exit; msg = nfc_vendor_cmd_alloc_reply_skb(dev, ST21NFCA_VENDOR_OUI, HCI_DM_GET_INFO, skb->len); if (!msg) { r = -ENOMEM; goto free_skb; } if (nla_put(msg, NFC_ATTR_VENDOR_DATA, skb->len, skb->data)) { kfree_skb(msg); r = -ENOBUFS; goto free_skb; } r = nfc_vendor_cmd_reply(msg); free_skb: kfree_skb(skb); exit: return r; } static int st21nfca_hci_dm_get_data(struct nfc_dev *dev, void *data, size_t data_len) { int r; struct sk_buff *msg, *skb; struct nfc_hci_dev *hdev = nfc_get_drvdata(dev); r = nfc_hci_send_cmd(hdev, ST21NFCA_DEVICE_MGNT_GATE, ST21NFCA_HCI_DM_GETDATA, data, data_len, &skb); if (r) goto exit; msg = nfc_vendor_cmd_alloc_reply_skb(dev, ST21NFCA_VENDOR_OUI, HCI_DM_GET_DATA, skb->len); if (!msg) { r = -ENOMEM; goto free_skb; } if (nla_put(msg, NFC_ATTR_VENDOR_DATA, skb->len, skb->data)) { kfree_skb(msg); r = -ENOBUFS; goto free_skb; } r = nfc_vendor_cmd_reply(msg); free_skb: kfree_skb(skb); exit: return r; } static int st21nfca_hci_dm_load(struct nfc_dev *dev, void *data, size_t data_len) { struct nfc_hci_dev *hdev = nfc_get_drvdata(dev); return nfc_hci_send_cmd(hdev, ST21NFCA_DEVICE_MGNT_GATE, ST21NFCA_HCI_DM_LOAD, data, data_len, NULL); } static int st21nfca_hci_dm_reset(struct nfc_dev *dev, void *data, size_t data_len) { int r; struct nfc_hci_dev *hdev = nfc_get_drvdata(dev); r = nfc_hci_send_cmd_async(hdev, ST21NFCA_DEVICE_MGNT_GATE, ST21NFCA_HCI_DM_RESET, data, data_len, NULL, NULL); if (r < 0) return r; r = nfc_llc_stop(hdev->llc); if (r < 0) return r; return nfc_llc_start(hdev->llc); } static int st21nfca_hci_get_param(struct nfc_dev *dev, void *data, size_t data_len) { int r; struct sk_buff *msg, *skb; struct nfc_hci_dev *hdev = nfc_get_drvdata(dev); struct get_param_data *param = (struct get_param_data *)data; if (data_len < sizeof(struct get_param_data)) return -EPROTO; r = nfc_hci_get_param(hdev, param->gate, param->data, &skb); if (r) goto exit; msg = nfc_vendor_cmd_alloc_reply_skb(dev, ST21NFCA_VENDOR_OUI, HCI_GET_PARAM, skb->len); if (!msg) { r = -ENOMEM; goto free_skb; } if (nla_put(msg, NFC_ATTR_VENDOR_DATA, skb->len, skb->data)) { kfree_skb(msg); r = -ENOBUFS; goto free_skb; } r = nfc_vendor_cmd_reply(msg); free_skb: kfree_skb(skb); exit: return r; } static int st21nfca_hci_dm_field_generator(struct nfc_dev *dev, void *data, size_t data_len) { struct nfc_hci_dev *hdev = nfc_get_drvdata(dev); return nfc_hci_send_cmd(hdev, ST21NFCA_DEVICE_MGNT_GATE, ST21NFCA_HCI_DM_FIELD_GENERATOR, data, data_len, NULL); } int st21nfca_hci_loopback_event_received(struct nfc_hci_dev *hdev, u8 event, struct sk_buff *skb) { struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev); switch (event) { case ST21NFCA_EVT_POST_DATA: info->vendor_info.rx_skb = skb; break; default: nfc_err(&hdev->ndev->dev, "Unexpected event on loopback gate\n"); } complete(&info->vendor_info.req_completion); return 0; } EXPORT_SYMBOL(st21nfca_hci_loopback_event_received); static int st21nfca_hci_loopback(struct nfc_dev *dev, void *data, size_t data_len) { int r; struct sk_buff *msg; struct nfc_hci_dev *hdev = nfc_get_drvdata(dev); struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev); if (data_len <= 0) return -EPROTO; reinit_completion(&info->vendor_info.req_completion); info->vendor_info.rx_skb = NULL; r = nfc_hci_send_event(hdev, NFC_HCI_LOOPBACK_GATE, ST21NFCA_EVT_POST_DATA, data, data_len); if (r < 0) { r = -EPROTO; goto exit; } wait_for_completion_interruptible(&info->vendor_info.req_completion); if (!info->vendor_info.rx_skb || info->vendor_info.rx_skb->len != data_len) { r = -EPROTO; goto exit; } msg = nfc_vendor_cmd_alloc_reply_skb(hdev->ndev, ST21NFCA_VENDOR_OUI, HCI_LOOPBACK, info->vendor_info.rx_skb->len); if (!msg) { r = -ENOMEM; goto free_skb; } if (nla_put(msg, NFC_ATTR_VENDOR_DATA, info->vendor_info.rx_skb->len, info->vendor_info.rx_skb->data)) { kfree_skb(msg); r = -ENOBUFS; goto free_skb; } r = nfc_vendor_cmd_reply(msg); free_skb: kfree_skb(info->vendor_info.rx_skb); exit: return r; } static const struct nfc_vendor_cmd st21nfca_vendor_cmds[] = { { .vendor_id = ST21NFCA_VENDOR_OUI, .subcmd = FACTORY_MODE, .doit = st21nfca_factory_mode, }, { .vendor_id = ST21NFCA_VENDOR_OUI, .subcmd = HCI_CLEAR_ALL_PIPES, .doit = st21nfca_hci_clear_all_pipes, }, { .vendor_id = ST21NFCA_VENDOR_OUI, .subcmd = HCI_DM_PUT_DATA, .doit = st21nfca_hci_dm_put_data, }, { .vendor_id = ST21NFCA_VENDOR_OUI, .subcmd = HCI_DM_UPDATE_AID, .doit = st21nfca_hci_dm_update_aid, }, { .vendor_id = ST21NFCA_VENDOR_OUI, .subcmd = HCI_DM_GET_INFO, .doit = st21nfca_hci_dm_get_info, }, { .vendor_id = ST21NFCA_VENDOR_OUI, .subcmd = HCI_DM_GET_DATA, .doit = st21nfca_hci_dm_get_data, }, { .vendor_id = ST21NFCA_VENDOR_OUI, .subcmd = HCI_DM_LOAD, .doit = st21nfca_hci_dm_load, }, { .vendor_id = ST21NFCA_VENDOR_OUI, .subcmd = HCI_DM_RESET, .doit = st21nfca_hci_dm_reset, }, { .vendor_id = ST21NFCA_VENDOR_OUI, .subcmd = HCI_GET_PARAM, .doit = st21nfca_hci_get_param, }, { .vendor_id = ST21NFCA_VENDOR_OUI, .subcmd = HCI_DM_FIELD_GENERATOR, .doit = st21nfca_hci_dm_field_generator, }, { .vendor_id = ST21NFCA_VENDOR_OUI, .subcmd = HCI_LOOPBACK, .doit = st21nfca_hci_loopback, }, }; int st21nfca_vendor_cmds_init(struct nfc_hci_dev *hdev) { struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev); init_completion(&info->vendor_info.req_completion); return nfc_hci_set_vendor_cmds(hdev, st21nfca_vendor_cmds, sizeof(st21nfca_vendor_cmds)); } EXPORT_SYMBOL(st21nfca_vendor_cmds_init);
linux-master
drivers/nfc/st21nfca/vendor_cmds.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2014 STMicroelectronics SAS. All rights reserved. */ #include <net/nfc/hci.h> #include "st21nfca.h" #define ST21NFCA_NFCIP1_INITIATOR 0x00 #define ST21NFCA_NFCIP1_REQ 0xd4 #define ST21NFCA_NFCIP1_RES 0xd5 #define ST21NFCA_NFCIP1_ATR_REQ 0x00 #define ST21NFCA_NFCIP1_ATR_RES 0x01 #define ST21NFCA_NFCIP1_PSL_REQ 0x04 #define ST21NFCA_NFCIP1_PSL_RES 0x05 #define ST21NFCA_NFCIP1_DEP_REQ 0x06 #define ST21NFCA_NFCIP1_DEP_RES 0x07 #define ST21NFCA_NFC_DEP_PFB_PNI(pfb) ((pfb) & 0x03) #define ST21NFCA_NFC_DEP_PFB_TYPE(pfb) ((pfb) & 0xE0) #define ST21NFCA_NFC_DEP_PFB_IS_TIMEOUT(pfb) \ ((pfb) & ST21NFCA_NFC_DEP_PFB_TIMEOUT_BIT) #define ST21NFCA_NFC_DEP_DID_BIT_SET(pfb) ((pfb) & 0x04) #define ST21NFCA_NFC_DEP_NAD_BIT_SET(pfb) ((pfb) & 0x08) #define ST21NFCA_NFC_DEP_PFB_TIMEOUT_BIT 0x10 #define ST21NFCA_NFC_DEP_PFB_IS_TIMEOUT(pfb) \ ((pfb) & ST21NFCA_NFC_DEP_PFB_TIMEOUT_BIT) #define ST21NFCA_NFC_DEP_PFB_I_PDU 0x00 #define ST21NFCA_NFC_DEP_PFB_ACK_NACK_PDU 0x40 #define ST21NFCA_NFC_DEP_PFB_SUPERVISOR_PDU 0x80 #define ST21NFCA_ATR_REQ_MIN_SIZE 17 #define ST21NFCA_ATR_REQ_MAX_SIZE 65 #define ST21NFCA_LR_BITS_PAYLOAD_SIZE_254B 0x30 #define ST21NFCA_GB_BIT 0x02 #define ST21NFCA_EVT_SEND_DATA 0x10 #define ST21NFCA_EVT_FIELD_ON 0x11 #define ST21NFCA_EVT_CARD_DEACTIVATED 0x12 #define ST21NFCA_EVT_CARD_ACTIVATED 0x13 #define ST21NFCA_EVT_FIELD_OFF 0x14 #define ST21NFCA_EVT_CARD_F_BITRATE 0x16 #define ST21NFCA_EVT_READER_F_BITRATE 0x13 #define ST21NFCA_PSL_REQ_SEND_SPEED(brs) (brs & 0x38) #define ST21NFCA_PSL_REQ_RECV_SPEED(brs) (brs & 0x07) #define ST21NFCA_PP2LRI(pp) ((pp & 0x30) >> 4) #define ST21NFCA_CARD_BITRATE_212 0x01 #define ST21NFCA_CARD_BITRATE_424 0x02 #define ST21NFCA_DEFAULT_TIMEOUT 0x0a #define PROTOCOL_ERR(req) pr_err("%d: ST21NFCA Protocol error: %s\n", \ __LINE__, req) struct st21nfca_atr_req { u8 length; u8 cmd0; u8 cmd1; u8 nfcid3[NFC_NFCID3_MAXSIZE]; u8 did; u8 bsi; u8 bri; u8 ppi; u8 gbi[]; } __packed; struct st21nfca_atr_res { u8 length; u8 cmd0; u8 cmd1; u8 nfcid3[NFC_NFCID3_MAXSIZE]; u8 did; u8 bsi; u8 bri; u8 to; u8 ppi; u8 gbi[]; } __packed; struct st21nfca_psl_req { u8 length; u8 cmd0; u8 cmd1; u8 did; u8 brs; u8 fsl; } __packed; struct st21nfca_psl_res { u8 length; u8 cmd0; u8 cmd1; u8 did; } __packed; struct st21nfca_dep_req_res { u8 length; u8 cmd0; u8 cmd1; u8 pfb; u8 did; u8 nad; } __packed; static void st21nfca_tx_work(struct work_struct *work) { struct st21nfca_hci_info *info = container_of(work, struct st21nfca_hci_info, dep_info.tx_work); struct nfc_dev *dev; struct sk_buff *skb; if (info) { dev = info->hdev->ndev; skb = info->dep_info.tx_pending; device_lock(&dev->dev); nfc_hci_send_cmd_async(info->hdev, ST21NFCA_RF_READER_F_GATE, ST21NFCA_WR_XCHG_DATA, skb->data, skb->len, info->async_cb, info); device_unlock(&dev->dev); kfree_skb(skb); } } static void st21nfca_im_send_pdu(struct st21nfca_hci_info *info, struct sk_buff *skb) { info->dep_info.tx_pending = skb; schedule_work(&info->dep_info.tx_work); } static int st21nfca_tm_send_atr_res(struct nfc_hci_dev *hdev, struct st21nfca_atr_req *atr_req) { struct st21nfca_atr_res *atr_res; struct sk_buff *skb; size_t gb_len; int r; struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev); gb_len = atr_req->length - sizeof(struct st21nfca_atr_req); skb = alloc_skb(atr_req->length + 1, GFP_KERNEL); if (!skb) return -ENOMEM; skb_put(skb, sizeof(struct st21nfca_atr_res)); atr_res = (struct st21nfca_atr_res *)skb->data; memset(atr_res, 0, sizeof(struct st21nfca_atr_res)); atr_res->length = atr_req->length + 1; atr_res->cmd0 = ST21NFCA_NFCIP1_RES; atr_res->cmd1 = ST21NFCA_NFCIP1_ATR_RES; memcpy(atr_res->nfcid3, atr_req->nfcid3, 6); atr_res->bsi = 0x00; atr_res->bri = 0x00; atr_res->to = ST21NFCA_DEFAULT_TIMEOUT; atr_res->ppi = ST21NFCA_LR_BITS_PAYLOAD_SIZE_254B; if (gb_len) { skb_put(skb, gb_len); atr_res->ppi |= ST21NFCA_GB_BIT; memcpy(atr_res->gbi, atr_req->gbi, gb_len); r = nfc_set_remote_general_bytes(hdev->ndev, atr_res->gbi, gb_len); if (r < 0) { kfree_skb(skb); return r; } } info->dep_info.curr_nfc_dep_pni = 0; r = nfc_hci_send_event(hdev, ST21NFCA_RF_CARD_F_GATE, ST21NFCA_EVT_SEND_DATA, skb->data, skb->len); kfree_skb(skb); return r; } static int st21nfca_tm_recv_atr_req(struct nfc_hci_dev *hdev, struct sk_buff *skb) { struct st21nfca_atr_req *atr_req; size_t gb_len; int r; skb_trim(skb, skb->len - 1); if (!skb->len) return -EIO; if (skb->len < ST21NFCA_ATR_REQ_MIN_SIZE) return -EPROTO; atr_req = (struct st21nfca_atr_req *)skb->data; if (atr_req->length < sizeof(struct st21nfca_atr_req)) return -EPROTO; r = st21nfca_tm_send_atr_res(hdev, atr_req); if (r) return r; gb_len = skb->len - sizeof(struct st21nfca_atr_req); r = nfc_tm_activated(hdev->ndev, NFC_PROTO_NFC_DEP_MASK, NFC_COMM_PASSIVE, atr_req->gbi, gb_len); if (r) return r; return 0; } static int st21nfca_tm_send_psl_res(struct nfc_hci_dev *hdev, struct st21nfca_psl_req *psl_req) { struct st21nfca_psl_res *psl_res; struct sk_buff *skb; u8 bitrate[2] = {0, 0}; int r; skb = alloc_skb(sizeof(struct st21nfca_psl_res), GFP_KERNEL); if (!skb) return -ENOMEM; skb_put(skb, sizeof(struct st21nfca_psl_res)); psl_res = (struct st21nfca_psl_res *)skb->data; psl_res->length = sizeof(struct st21nfca_psl_res); psl_res->cmd0 = ST21NFCA_NFCIP1_RES; psl_res->cmd1 = ST21NFCA_NFCIP1_PSL_RES; psl_res->did = psl_req->did; r = nfc_hci_send_event(hdev, ST21NFCA_RF_CARD_F_GATE, ST21NFCA_EVT_SEND_DATA, skb->data, skb->len); if (r < 0) goto error; /* * ST21NFCA only support P2P passive. * PSL_REQ BRS value != 0 has only a meaning to * change technology to type F. * We change to BITRATE 424Kbits. * In other case switch to BITRATE 106Kbits. */ if (ST21NFCA_PSL_REQ_SEND_SPEED(psl_req->brs) && ST21NFCA_PSL_REQ_RECV_SPEED(psl_req->brs)) { bitrate[0] = ST21NFCA_CARD_BITRATE_424; bitrate[1] = ST21NFCA_CARD_BITRATE_424; } /* Send an event to change bitrate change event to card f */ r = nfc_hci_send_event(hdev, ST21NFCA_RF_CARD_F_GATE, ST21NFCA_EVT_CARD_F_BITRATE, bitrate, 2); error: kfree_skb(skb); return r; } static int st21nfca_tm_recv_psl_req(struct nfc_hci_dev *hdev, struct sk_buff *skb) { struct st21nfca_psl_req *psl_req; skb_trim(skb, skb->len - 1); if (!skb->len) return -EIO; psl_req = (struct st21nfca_psl_req *)skb->data; if (skb->len < sizeof(struct st21nfca_psl_req)) return -EIO; return st21nfca_tm_send_psl_res(hdev, psl_req); } int st21nfca_tm_send_dep_res(struct nfc_hci_dev *hdev, struct sk_buff *skb) { int r; struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev); *(u8 *)skb_push(skb, 1) = info->dep_info.curr_nfc_dep_pni; *(u8 *)skb_push(skb, 1) = ST21NFCA_NFCIP1_DEP_RES; *(u8 *)skb_push(skb, 1) = ST21NFCA_NFCIP1_RES; *(u8 *)skb_push(skb, 1) = skb->len; r = nfc_hci_send_event(hdev, ST21NFCA_RF_CARD_F_GATE, ST21NFCA_EVT_SEND_DATA, skb->data, skb->len); kfree_skb(skb); return r; } EXPORT_SYMBOL(st21nfca_tm_send_dep_res); static int st21nfca_tm_recv_dep_req(struct nfc_hci_dev *hdev, struct sk_buff *skb) { struct st21nfca_dep_req_res *dep_req; u8 size; struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev); skb_trim(skb, skb->len - 1); size = 4; dep_req = (struct st21nfca_dep_req_res *)skb->data; if (skb->len < size) return -EIO; if (ST21NFCA_NFC_DEP_DID_BIT_SET(dep_req->pfb)) size++; if (ST21NFCA_NFC_DEP_NAD_BIT_SET(dep_req->pfb)) size++; if (skb->len < size) return -EIO; /* Receiving DEP_REQ - Decoding */ switch (ST21NFCA_NFC_DEP_PFB_TYPE(dep_req->pfb)) { case ST21NFCA_NFC_DEP_PFB_I_PDU: info->dep_info.curr_nfc_dep_pni = ST21NFCA_NFC_DEP_PFB_PNI(dep_req->pfb); break; case ST21NFCA_NFC_DEP_PFB_ACK_NACK_PDU: pr_err("Received a ACK/NACK PDU\n"); break; case ST21NFCA_NFC_DEP_PFB_SUPERVISOR_PDU: pr_err("Received a SUPERVISOR PDU\n"); break; } skb_pull(skb, size); return nfc_tm_data_received(hdev->ndev, skb); } static int st21nfca_tm_event_send_data(struct nfc_hci_dev *hdev, struct sk_buff *skb) { u8 cmd0, cmd1; int r; cmd0 = skb->data[1]; switch (cmd0) { case ST21NFCA_NFCIP1_REQ: cmd1 = skb->data[2]; switch (cmd1) { case ST21NFCA_NFCIP1_ATR_REQ: r = st21nfca_tm_recv_atr_req(hdev, skb); break; case ST21NFCA_NFCIP1_PSL_REQ: r = st21nfca_tm_recv_psl_req(hdev, skb); break; case ST21NFCA_NFCIP1_DEP_REQ: r = st21nfca_tm_recv_dep_req(hdev, skb); break; default: return 1; } break; default: return 1; } return r; } /* * Returns: * <= 0: driver handled the event, skb consumed * 1: driver does not handle the event, please do standard processing */ int st21nfca_dep_event_received(struct nfc_hci_dev *hdev, u8 event, struct sk_buff *skb) { int r = 0; struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev); pr_debug("dep event: %d\n", event); switch (event) { case ST21NFCA_EVT_CARD_ACTIVATED: info->dep_info.curr_nfc_dep_pni = 0; break; case ST21NFCA_EVT_CARD_DEACTIVATED: break; case ST21NFCA_EVT_FIELD_ON: break; case ST21NFCA_EVT_FIELD_OFF: break; case ST21NFCA_EVT_SEND_DATA: r = st21nfca_tm_event_send_data(hdev, skb); if (r < 0) return r; return 0; default: nfc_err(&hdev->ndev->dev, "Unexpected event on card f gate\n"); return 1; } kfree_skb(skb); return r; } EXPORT_SYMBOL(st21nfca_dep_event_received); static void st21nfca_im_send_psl_req(struct nfc_hci_dev *hdev, u8 did, u8 bsi, u8 bri, u8 lri) { struct sk_buff *skb; struct st21nfca_psl_req *psl_req; struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev); skb = alloc_skb(sizeof(struct st21nfca_psl_req) + 1, GFP_KERNEL); if (!skb) return; skb_reserve(skb, 1); skb_put(skb, sizeof(struct st21nfca_psl_req)); psl_req = (struct st21nfca_psl_req *) skb->data; psl_req->length = sizeof(struct st21nfca_psl_req); psl_req->cmd0 = ST21NFCA_NFCIP1_REQ; psl_req->cmd1 = ST21NFCA_NFCIP1_PSL_REQ; psl_req->did = did; psl_req->brs = (0x30 & bsi << 4) | (bri & 0x03); psl_req->fsl = lri; *(u8 *)skb_push(skb, 1) = info->dep_info.to | 0x10; st21nfca_im_send_pdu(info, skb); } #define ST21NFCA_CB_TYPE_READER_F 1 static void st21nfca_im_recv_atr_res_cb(void *context, struct sk_buff *skb, int err) { struct st21nfca_hci_info *info = context; struct st21nfca_atr_res *atr_res; int r; if (err != 0) return; if (!skb) return; switch (info->async_cb_type) { case ST21NFCA_CB_TYPE_READER_F: skb_trim(skb, skb->len - 1); atr_res = (struct st21nfca_atr_res *)skb->data; r = nfc_set_remote_general_bytes(info->hdev->ndev, atr_res->gbi, skb->len - sizeof(struct st21nfca_atr_res)); if (r < 0) return; if (atr_res->to >= 0x0e) info->dep_info.to = 0x0e; else info->dep_info.to = atr_res->to + 1; info->dep_info.to |= 0x10; r = nfc_dep_link_is_up(info->hdev->ndev, info->dep_info.idx, NFC_COMM_PASSIVE, NFC_RF_INITIATOR); if (r < 0) return; info->dep_info.curr_nfc_dep_pni = 0; if (ST21NFCA_PP2LRI(atr_res->ppi) != info->dep_info.lri) st21nfca_im_send_psl_req(info->hdev, atr_res->did, atr_res->bsi, atr_res->bri, ST21NFCA_PP2LRI(atr_res->ppi)); break; default: kfree_skb(skb); break; } } int st21nfca_im_send_atr_req(struct nfc_hci_dev *hdev, u8 *gb, size_t gb_len) { struct sk_buff *skb; struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev); struct st21nfca_atr_req *atr_req; struct nfc_target *target; uint size; info->dep_info.to = ST21NFCA_DEFAULT_TIMEOUT; size = ST21NFCA_ATR_REQ_MIN_SIZE + gb_len; if (size > ST21NFCA_ATR_REQ_MAX_SIZE) { PROTOCOL_ERR("14.6.1.1"); return -EINVAL; } skb = alloc_skb(sizeof(struct st21nfca_atr_req) + gb_len + 1, GFP_KERNEL); if (!skb) return -ENOMEM; skb_reserve(skb, 1); skb_put(skb, sizeof(struct st21nfca_atr_req)); atr_req = (struct st21nfca_atr_req *)skb->data; memset(atr_req, 0, sizeof(struct st21nfca_atr_req)); atr_req->cmd0 = ST21NFCA_NFCIP1_REQ; atr_req->cmd1 = ST21NFCA_NFCIP1_ATR_REQ; memset(atr_req->nfcid3, 0, NFC_NFCID3_MAXSIZE); target = hdev->ndev->targets; if (target->sensf_res_len > 0) memcpy(atr_req->nfcid3, target->sensf_res, target->sensf_res_len); else get_random_bytes(atr_req->nfcid3, NFC_NFCID3_MAXSIZE); atr_req->did = 0x0; atr_req->bsi = 0x00; atr_req->bri = 0x00; atr_req->ppi = ST21NFCA_LR_BITS_PAYLOAD_SIZE_254B; if (gb_len) { atr_req->ppi |= ST21NFCA_GB_BIT; skb_put_data(skb, gb, gb_len); } atr_req->length = sizeof(struct st21nfca_atr_req) + hdev->gb_len; *(u8 *)skb_push(skb, 1) = info->dep_info.to | 0x10; /* timeout */ info->async_cb_type = ST21NFCA_CB_TYPE_READER_F; info->async_cb_context = info; info->async_cb = st21nfca_im_recv_atr_res_cb; info->dep_info.bri = atr_req->bri; info->dep_info.bsi = atr_req->bsi; info->dep_info.lri = ST21NFCA_PP2LRI(atr_req->ppi); return nfc_hci_send_cmd_async(hdev, ST21NFCA_RF_READER_F_GATE, ST21NFCA_WR_XCHG_DATA, skb->data, skb->len, info->async_cb, info); } EXPORT_SYMBOL(st21nfca_im_send_atr_req); static void st21nfca_im_recv_dep_res_cb(void *context, struct sk_buff *skb, int err) { struct st21nfca_hci_info *info = context; struct st21nfca_dep_req_res *dep_res; int size; if (err != 0) return; if (!skb) return; switch (info->async_cb_type) { case ST21NFCA_CB_TYPE_READER_F: dep_res = (struct st21nfca_dep_req_res *)skb->data; size = 3; if (skb->len < size) goto exit; if (ST21NFCA_NFC_DEP_DID_BIT_SET(dep_res->pfb)) size++; if (ST21NFCA_NFC_DEP_NAD_BIT_SET(dep_res->pfb)) size++; if (skb->len < size) goto exit; skb_trim(skb, skb->len - 1); /* Receiving DEP_REQ - Decoding */ switch (ST21NFCA_NFC_DEP_PFB_TYPE(dep_res->pfb)) { case ST21NFCA_NFC_DEP_PFB_ACK_NACK_PDU: pr_err("Received a ACK/NACK PDU\n"); fallthrough; case ST21NFCA_NFC_DEP_PFB_I_PDU: info->dep_info.curr_nfc_dep_pni = ST21NFCA_NFC_DEP_PFB_PNI(dep_res->pfb + 1); size++; skb_pull(skb, size); nfc_tm_data_received(info->hdev->ndev, skb); break; case ST21NFCA_NFC_DEP_PFB_SUPERVISOR_PDU: pr_err("Received a SUPERVISOR PDU\n"); skb_pull(skb, size); *(u8 *)skb_push(skb, 1) = ST21NFCA_NFCIP1_DEP_REQ; *(u8 *)skb_push(skb, 1) = ST21NFCA_NFCIP1_REQ; *(u8 *)skb_push(skb, 1) = skb->len; *(u8 *)skb_push(skb, 1) = info->dep_info.to | 0x10; st21nfca_im_send_pdu(info, skb); break; } return; default: break; } exit: kfree_skb(skb); } int st21nfca_im_send_dep_req(struct nfc_hci_dev *hdev, struct sk_buff *skb) { struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev); info->async_cb_type = ST21NFCA_CB_TYPE_READER_F; info->async_cb_context = info; info->async_cb = st21nfca_im_recv_dep_res_cb; *(u8 *)skb_push(skb, 1) = info->dep_info.curr_nfc_dep_pni; *(u8 *)skb_push(skb, 1) = ST21NFCA_NFCIP1_DEP_REQ; *(u8 *)skb_push(skb, 1) = ST21NFCA_NFCIP1_REQ; *(u8 *)skb_push(skb, 1) = skb->len; *(u8 *)skb_push(skb, 1) = info->dep_info.to | 0x10; return nfc_hci_send_cmd_async(hdev, ST21NFCA_RF_READER_F_GATE, ST21NFCA_WR_XCHG_DATA, skb->data, skb->len, info->async_cb, info); } EXPORT_SYMBOL(st21nfca_im_send_dep_req); void st21nfca_dep_init(struct nfc_hci_dev *hdev) { struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev); INIT_WORK(&info->dep_info.tx_work, st21nfca_tx_work); info->dep_info.curr_nfc_dep_pni = 0; info->dep_info.idx = 0; info->dep_info.to = ST21NFCA_DEFAULT_TIMEOUT; } EXPORT_SYMBOL(st21nfca_dep_init); void st21nfca_dep_deinit(struct nfc_hci_dev *hdev) { struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev); cancel_work_sync(&info->dep_info.tx_work); } EXPORT_SYMBOL(st21nfca_dep_deinit);
linux-master
drivers/nfc/st21nfca/dep.c
// SPDX-License-Identifier: GPL-2.0-only /* * HCI based Driver for STMicroelectronics NFC Chip * * Copyright (C) 2014 STMicroelectronics SAS. All rights reserved. */ #include <linux/module.h> #include <linux/nfc.h> #include <net/nfc/hci.h> #include "st21nfca.h" #define DRIVER_DESC "HCI NFC driver for ST21NFCA" #define FULL_VERSION_LEN 3 /* Proprietary gates, events, commands and registers */ /* Commands that apply to all RF readers */ #define ST21NFCA_RF_READER_CMD_PRESENCE_CHECK 0x30 #define ST21NFCA_RF_READER_ISO15693_GATE 0x12 #define ST21NFCA_RF_READER_ISO15693_INVENTORY 0x01 /* * Reader gate for communication with contact-less cards using Type A * protocol ISO14443-3 but not compliant with ISO14443-4 */ #define ST21NFCA_RF_READER_14443_3_A_GATE 0x15 #define ST21NFCA_RF_READER_14443_3_A_UID 0x02 #define ST21NFCA_RF_READER_14443_3_A_ATQA 0x03 #define ST21NFCA_RF_READER_14443_3_A_SAK 0x04 #define ST21NFCA_RF_READER_F_DATARATE 0x01 #define ST21NFCA_RF_READER_F_DATARATE_106 0x01 #define ST21NFCA_RF_READER_F_DATARATE_212 0x02 #define ST21NFCA_RF_READER_F_DATARATE_424 0x04 #define ST21NFCA_RF_READER_F_POL_REQ 0x02 #define ST21NFCA_RF_READER_F_POL_REQ_DEFAULT 0xffff0000 #define ST21NFCA_RF_READER_F_NFCID2 0x03 #define ST21NFCA_RF_READER_F_NFCID1 0x04 #define ST21NFCA_RF_CARD_F_MODE 0x01 #define ST21NFCA_RF_CARD_F_NFCID2_LIST 0x04 #define ST21NFCA_RF_CARD_F_NFCID1 0x05 #define ST21NFCA_RF_CARD_F_SENS_RES 0x06 #define ST21NFCA_RF_CARD_F_SEL_RES 0x07 #define ST21NFCA_RF_CARD_F_DATARATE 0x08 #define ST21NFCA_RF_CARD_F_DATARATE_212_424 0x01 #define ST21NFCA_DEVICE_MGNT_PIPE 0x02 #define ST21NFCA_DM_GETINFO 0x13 #define ST21NFCA_DM_GETINFO_PIPE_LIST 0x02 #define ST21NFCA_DM_GETINFO_PIPE_INFO 0x01 #define ST21NFCA_DM_PIPE_CREATED 0x02 #define ST21NFCA_DM_PIPE_OPEN 0x04 #define ST21NFCA_DM_RF_ACTIVE 0x80 #define ST21NFCA_DM_DISCONNECT 0x30 #define ST21NFCA_DM_IS_PIPE_OPEN(p) \ ((p & 0x0f) == (ST21NFCA_DM_PIPE_CREATED | ST21NFCA_DM_PIPE_OPEN)) #define ST21NFCA_NFC_MODE 0x03 /* NFC_MODE parameter*/ #define ST21NFCA_EVT_HOT_PLUG 0x03 #define ST21NFCA_EVT_HOT_PLUG_IS_INHIBITED(x) (x->data[0] & 0x80) #define ST21NFCA_SE_TO_PIPES 2000 static DECLARE_BITMAP(dev_mask, ST21NFCA_NUM_DEVICES); static const struct nfc_hci_gate st21nfca_gates[] = { {NFC_HCI_ADMIN_GATE, NFC_HCI_ADMIN_PIPE}, {NFC_HCI_LINK_MGMT_GATE, NFC_HCI_LINK_MGMT_PIPE}, {ST21NFCA_DEVICE_MGNT_GATE, ST21NFCA_DEVICE_MGNT_PIPE}, {NFC_HCI_LOOPBACK_GATE, NFC_HCI_INVALID_PIPE}, {NFC_HCI_ID_MGMT_GATE, NFC_HCI_INVALID_PIPE}, {NFC_HCI_RF_READER_B_GATE, NFC_HCI_INVALID_PIPE}, {NFC_HCI_RF_READER_A_GATE, NFC_HCI_INVALID_PIPE}, {ST21NFCA_RF_READER_F_GATE, NFC_HCI_INVALID_PIPE}, {ST21NFCA_RF_READER_14443_3_A_GATE, NFC_HCI_INVALID_PIPE}, {ST21NFCA_RF_READER_ISO15693_GATE, NFC_HCI_INVALID_PIPE}, {ST21NFCA_RF_CARD_F_GATE, NFC_HCI_INVALID_PIPE}, /* Secure element pipes are created by secure element host */ {ST21NFCA_CONNECTIVITY_GATE, NFC_HCI_DO_NOT_CREATE_PIPE}, {ST21NFCA_APDU_READER_GATE, NFC_HCI_DO_NOT_CREATE_PIPE}, }; struct st21nfca_pipe_info { u8 pipe_state; u8 src_host_id; u8 src_gate_id; u8 dst_host_id; u8 dst_gate_id; } __packed; /* Largest headroom needed for outgoing custom commands */ #define ST21NFCA_CMDS_HEADROOM 7 static int st21nfca_hci_load_session(struct nfc_hci_dev *hdev) { int i, j, r; struct sk_buff *skb_pipe_list, *skb_pipe_info; struct st21nfca_pipe_info *info; u8 pipe_list[] = { ST21NFCA_DM_GETINFO_PIPE_LIST, NFC_HCI_TERMINAL_HOST_ID }; u8 pipe_info[] = { ST21NFCA_DM_GETINFO_PIPE_INFO, NFC_HCI_TERMINAL_HOST_ID, 0 }; /* On ST21NFCA device pipes number are dynamics * A maximum of 16 pipes can be created at the same time * If pipes are already created, hci_dev_up will fail. * Doing a clear all pipe is a bad idea because: * - It does useless EEPROM cycling * - It might cause issue for secure elements support * (such as removing connectivity or APDU reader pipe) * A better approach on ST21NFCA is to: * - get a pipe list for each host. * (eg: NFC_HCI_HOST_CONTROLLER_ID for now). * (TODO Later on UICC HOST and eSE HOST) * - get pipe information * - match retrieved pipe list in st21nfca_gates * ST21NFCA_DEVICE_MGNT_GATE is a proprietary gate * with ST21NFCA_DEVICE_MGNT_PIPE. * Pipe can be closed and need to be open. */ r = nfc_hci_connect_gate(hdev, NFC_HCI_HOST_CONTROLLER_ID, ST21NFCA_DEVICE_MGNT_GATE, ST21NFCA_DEVICE_MGNT_PIPE); if (r < 0) return r; /* Get pipe list */ r = nfc_hci_send_cmd(hdev, ST21NFCA_DEVICE_MGNT_GATE, ST21NFCA_DM_GETINFO, pipe_list, sizeof(pipe_list), &skb_pipe_list); if (r < 0) return r; /* Complete the existing gate_pipe table */ for (i = 0; i < skb_pipe_list->len; i++) { pipe_info[2] = skb_pipe_list->data[i]; r = nfc_hci_send_cmd(hdev, ST21NFCA_DEVICE_MGNT_GATE, ST21NFCA_DM_GETINFO, pipe_info, sizeof(pipe_info), &skb_pipe_info); if (r) continue; /* * Match pipe ID and gate ID * Output format from ST21NFC_DM_GETINFO is: * - pipe state (1byte) * - source hid (1byte) * - source gid (1byte) * - destination hid (1byte) * - destination gid (1byte) */ info = (struct st21nfca_pipe_info *) skb_pipe_info->data; if (info->dst_gate_id == ST21NFCA_APDU_READER_GATE && info->src_host_id == NFC_HCI_UICC_HOST_ID) { pr_err("Unexpected apdu_reader pipe on host %x\n", info->src_host_id); kfree_skb(skb_pipe_info); continue; } for (j = 3; (j < ARRAY_SIZE(st21nfca_gates)) && (st21nfca_gates[j].gate != info->dst_gate_id) ; j++) ; if (j < ARRAY_SIZE(st21nfca_gates) && st21nfca_gates[j].gate == info->dst_gate_id && ST21NFCA_DM_IS_PIPE_OPEN(info->pipe_state)) { hdev->init_data.gates[j].pipe = pipe_info[2]; hdev->gate2pipe[st21nfca_gates[j].gate] = pipe_info[2]; hdev->pipes[pipe_info[2]].gate = st21nfca_gates[j].gate; hdev->pipes[pipe_info[2]].dest_host = info->src_host_id; } kfree_skb(skb_pipe_info); } /* * 3 gates have a well known pipe ID. Only NFC_HCI_LINK_MGMT_GATE * is not yet open at this stage. */ r = nfc_hci_connect_gate(hdev, NFC_HCI_HOST_CONTROLLER_ID, NFC_HCI_LINK_MGMT_GATE, NFC_HCI_LINK_MGMT_PIPE); kfree_skb(skb_pipe_list); return r; } static int st21nfca_hci_open(struct nfc_hci_dev *hdev) { struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev); int r; mutex_lock(&info->info_lock); if (info->state != ST21NFCA_ST_COLD) { r = -EBUSY; goto out; } r = info->phy_ops->enable(info->phy_id); if (r == 0) info->state = ST21NFCA_ST_READY; out: mutex_unlock(&info->info_lock); return r; } static void st21nfca_hci_close(struct nfc_hci_dev *hdev) { struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev); mutex_lock(&info->info_lock); if (info->state == ST21NFCA_ST_COLD) goto out; info->phy_ops->disable(info->phy_id); info->state = ST21NFCA_ST_COLD; out: mutex_unlock(&info->info_lock); } static int st21nfca_hci_ready(struct nfc_hci_dev *hdev) { struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev); struct sk_buff *skb; u8 param; u8 white_list[2]; int wl_size = 0; int r; if (info->se_status->is_uicc_present) white_list[wl_size++] = NFC_HCI_UICC_HOST_ID; if (info->se_status->is_ese_present) white_list[wl_size++] = ST21NFCA_ESE_HOST_ID; if (wl_size) { r = nfc_hci_set_param(hdev, NFC_HCI_ADMIN_GATE, NFC_HCI_ADMIN_WHITELIST, (u8 *) &white_list, wl_size); if (r < 0) return r; } /* Set NFC_MODE in device management gate to enable */ r = nfc_hci_get_param(hdev, ST21NFCA_DEVICE_MGNT_GATE, ST21NFCA_NFC_MODE, &skb); if (r < 0) return r; param = skb->data[0]; kfree_skb(skb); if (param == 0) { param = 1; r = nfc_hci_set_param(hdev, ST21NFCA_DEVICE_MGNT_GATE, ST21NFCA_NFC_MODE, &param, 1); if (r < 0) return r; } r = nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE, NFC_HCI_EVT_END_OPERATION, NULL, 0); if (r < 0) return r; r = nfc_hci_get_param(hdev, NFC_HCI_ID_MGMT_GATE, NFC_HCI_ID_MGMT_VERSION_SW, &skb); if (r < 0) return r; if (skb->len != FULL_VERSION_LEN) { kfree_skb(skb); return -EINVAL; } print_hex_dump(KERN_DEBUG, "FULL VERSION SOFTWARE INFO: ", DUMP_PREFIX_NONE, 16, 1, skb->data, FULL_VERSION_LEN, false); kfree_skb(skb); return 0; } static int st21nfca_hci_xmit(struct nfc_hci_dev *hdev, struct sk_buff *skb) { struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev); return info->phy_ops->write(info->phy_id, skb); } static int st21nfca_hci_start_poll(struct nfc_hci_dev *hdev, u32 im_protocols, u32 tm_protocols) { int r; u32 pol_req; u8 param[19]; struct sk_buff *datarate_skb; pr_info(DRIVER_DESC ": %s protocols 0x%x 0x%x\n", __func__, im_protocols, tm_protocols); r = nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE, NFC_HCI_EVT_END_OPERATION, NULL, 0); if (r < 0) return r; if (im_protocols) { /* * enable polling according to im_protocols & tm_protocols * - CLOSE pipe according to im_protocols & tm_protocols */ if ((NFC_HCI_RF_READER_B_GATE & im_protocols) == 0) { r = nfc_hci_disconnect_gate(hdev, NFC_HCI_RF_READER_B_GATE); if (r < 0) return r; } if ((NFC_HCI_RF_READER_A_GATE & im_protocols) == 0) { r = nfc_hci_disconnect_gate(hdev, NFC_HCI_RF_READER_A_GATE); if (r < 0) return r; } if ((ST21NFCA_RF_READER_F_GATE & im_protocols) == 0) { r = nfc_hci_disconnect_gate(hdev, ST21NFCA_RF_READER_F_GATE); if (r < 0) return r; } else { hdev->gb = nfc_get_local_general_bytes(hdev->ndev, &hdev->gb_len); if (hdev->gb == NULL || hdev->gb_len == 0) { im_protocols &= ~NFC_PROTO_NFC_DEP_MASK; tm_protocols &= ~NFC_PROTO_NFC_DEP_MASK; } param[0] = ST21NFCA_RF_READER_F_DATARATE_106 | ST21NFCA_RF_READER_F_DATARATE_212 | ST21NFCA_RF_READER_F_DATARATE_424; r = nfc_hci_set_param(hdev, ST21NFCA_RF_READER_F_GATE, ST21NFCA_RF_READER_F_DATARATE, param, 1); if (r < 0) return r; pol_req = be32_to_cpu((__force __be32) ST21NFCA_RF_READER_F_POL_REQ_DEFAULT); r = nfc_hci_set_param(hdev, ST21NFCA_RF_READER_F_GATE, ST21NFCA_RF_READER_F_POL_REQ, (u8 *) &pol_req, 4); if (r < 0) return r; } if ((ST21NFCA_RF_READER_14443_3_A_GATE & im_protocols) == 0) { r = nfc_hci_disconnect_gate(hdev, ST21NFCA_RF_READER_14443_3_A_GATE); if (r < 0) return r; } if ((ST21NFCA_RF_READER_ISO15693_GATE & im_protocols) == 0) { r = nfc_hci_disconnect_gate(hdev, ST21NFCA_RF_READER_ISO15693_GATE); if (r < 0) return r; } r = nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE, NFC_HCI_EVT_READER_REQUESTED, NULL, 0); if (r < 0) nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE, NFC_HCI_EVT_END_OPERATION, NULL, 0); } if (tm_protocols & NFC_PROTO_NFC_DEP_MASK) { r = nfc_hci_get_param(hdev, ST21NFCA_RF_CARD_F_GATE, ST21NFCA_RF_CARD_F_DATARATE, &datarate_skb); if (r < 0) return r; /* Configure the maximum supported datarate to 424Kbps */ if (datarate_skb->len > 0 && datarate_skb->data[0] != ST21NFCA_RF_CARD_F_DATARATE_212_424) { param[0] = ST21NFCA_RF_CARD_F_DATARATE_212_424; r = nfc_hci_set_param(hdev, ST21NFCA_RF_CARD_F_GATE, ST21NFCA_RF_CARD_F_DATARATE, param, 1); if (r < 0) { kfree_skb(datarate_skb); return r; } } kfree_skb(datarate_skb); /* * Configure sens_res * * NFC Forum Digital Spec Table 7: * NFCID1 size: triple (10 bytes) */ param[0] = 0x00; param[1] = 0x08; r = nfc_hci_set_param(hdev, ST21NFCA_RF_CARD_F_GATE, ST21NFCA_RF_CARD_F_SENS_RES, param, 2); if (r < 0) return r; /* * Configure sel_res * * NFC Forum Digistal Spec Table 17: * b3 set to 0b (value b7-b6): * - 10b: Configured for NFC-DEP Protocol */ param[0] = 0x40; r = nfc_hci_set_param(hdev, ST21NFCA_RF_CARD_F_GATE, ST21NFCA_RF_CARD_F_SEL_RES, param, 1); if (r < 0) return r; /* Configure NFCID1 Random uid */ r = nfc_hci_set_param(hdev, ST21NFCA_RF_CARD_F_GATE, ST21NFCA_RF_CARD_F_NFCID1, NULL, 0); if (r < 0) return r; /* Configure NFCID2_LIST */ /* System Code */ param[0] = 0x00; param[1] = 0x00; /* NFCID2 */ param[2] = 0x01; param[3] = 0xfe; param[4] = 'S'; param[5] = 'T'; param[6] = 'M'; param[7] = 'i'; param[8] = 'c'; param[9] = 'r'; /* 8 byte Pad bytes used for polling respone frame */ /* * Configuration byte: * - bit 0: define the default NFCID2 entry used when the * system code is equal to 'FFFF' * - bit 1: use a random value for lowest 6 bytes of * NFCID2 value * - bit 2: ignore polling request frame if request code * is equal to '01' * - Other bits are RFU */ param[18] = 0x01; r = nfc_hci_set_param(hdev, ST21NFCA_RF_CARD_F_GATE, ST21NFCA_RF_CARD_F_NFCID2_LIST, param, 19); if (r < 0) return r; param[0] = 0x02; r = nfc_hci_set_param(hdev, ST21NFCA_RF_CARD_F_GATE, ST21NFCA_RF_CARD_F_MODE, param, 1); } return r; } static void st21nfca_hci_stop_poll(struct nfc_hci_dev *hdev) { nfc_hci_send_cmd(hdev, ST21NFCA_DEVICE_MGNT_GATE, ST21NFCA_DM_DISCONNECT, NULL, 0, NULL); } static int st21nfca_get_iso14443_3_atqa(struct nfc_hci_dev *hdev, u16 *atqa) { int r; struct sk_buff *atqa_skb = NULL; r = nfc_hci_get_param(hdev, ST21NFCA_RF_READER_14443_3_A_GATE, ST21NFCA_RF_READER_14443_3_A_ATQA, &atqa_skb); if (r < 0) goto exit; if (atqa_skb->len != 2) { r = -EPROTO; goto exit; } *atqa = be16_to_cpu(*(__be16 *) atqa_skb->data); exit: kfree_skb(atqa_skb); return r; } static int st21nfca_get_iso14443_3_sak(struct nfc_hci_dev *hdev, u8 *sak) { int r; struct sk_buff *sak_skb = NULL; r = nfc_hci_get_param(hdev, ST21NFCA_RF_READER_14443_3_A_GATE, ST21NFCA_RF_READER_14443_3_A_SAK, &sak_skb); if (r < 0) goto exit; if (sak_skb->len != 1) { r = -EPROTO; goto exit; } *sak = sak_skb->data[0]; exit: kfree_skb(sak_skb); return r; } static int st21nfca_get_iso14443_3_uid(struct nfc_hci_dev *hdev, u8 *uid, int *len) { int r; struct sk_buff *uid_skb = NULL; r = nfc_hci_get_param(hdev, ST21NFCA_RF_READER_14443_3_A_GATE, ST21NFCA_RF_READER_14443_3_A_UID, &uid_skb); if (r < 0) goto exit; if (uid_skb->len == 0 || uid_skb->len > NFC_NFCID1_MAXSIZE) { r = -EPROTO; goto exit; } memcpy(uid, uid_skb->data, uid_skb->len); *len = uid_skb->len; exit: kfree_skb(uid_skb); return r; } static int st21nfca_get_iso15693_inventory(struct nfc_hci_dev *hdev, struct nfc_target *target) { int r; struct sk_buff *inventory_skb = NULL; r = nfc_hci_get_param(hdev, ST21NFCA_RF_READER_ISO15693_GATE, ST21NFCA_RF_READER_ISO15693_INVENTORY, &inventory_skb); if (r < 0) goto exit; skb_pull(inventory_skb, 2); if (inventory_skb->len == 0 || inventory_skb->len > NFC_ISO15693_UID_MAXSIZE) { r = -EPROTO; goto exit; } memcpy(target->iso15693_uid, inventory_skb->data, inventory_skb->len); target->iso15693_dsfid = inventory_skb->data[1]; target->is_iso15693 = 1; exit: kfree_skb(inventory_skb); return r; } static int st21nfca_hci_dep_link_up(struct nfc_hci_dev *hdev, struct nfc_target *target, u8 comm_mode, u8 *gb, size_t gb_len) { struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev); info->dep_info.idx = target->idx; return st21nfca_im_send_atr_req(hdev, gb, gb_len); } static int st21nfca_hci_dep_link_down(struct nfc_hci_dev *hdev) { struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev); info->state = ST21NFCA_ST_READY; return nfc_hci_send_cmd(hdev, ST21NFCA_DEVICE_MGNT_GATE, ST21NFCA_DM_DISCONNECT, NULL, 0, NULL); } static int st21nfca_hci_target_from_gate(struct nfc_hci_dev *hdev, u8 gate, struct nfc_target *target) { int r, len; u16 atqa; u8 sak; u8 uid[NFC_NFCID1_MAXSIZE]; switch (gate) { case ST21NFCA_RF_READER_F_GATE: target->supported_protocols = NFC_PROTO_FELICA_MASK; break; case ST21NFCA_RF_READER_14443_3_A_GATE: /* ISO14443-3 type 1 or 2 tags */ r = st21nfca_get_iso14443_3_atqa(hdev, &atqa); if (r < 0) return r; if (atqa == 0x000c) { target->supported_protocols = NFC_PROTO_JEWEL_MASK; target->sens_res = 0x0c00; } else { r = st21nfca_get_iso14443_3_sak(hdev, &sak); if (r < 0) return r; r = st21nfca_get_iso14443_3_uid(hdev, uid, &len); if (r < 0) return r; target->supported_protocols = nfc_hci_sak_to_protocol(sak); if (target->supported_protocols == 0xffffffff) return -EPROTO; target->sens_res = atqa; target->sel_res = sak; memcpy(target->nfcid1, uid, len); target->nfcid1_len = len; } break; case ST21NFCA_RF_READER_ISO15693_GATE: target->supported_protocols = NFC_PROTO_ISO15693_MASK; r = st21nfca_get_iso15693_inventory(hdev, target); if (r < 0) return r; break; default: return -EPROTO; } return 0; } static int st21nfca_hci_complete_target_discovered(struct nfc_hci_dev *hdev, u8 gate, struct nfc_target *target) { int r; struct sk_buff *nfcid_skb = NULL; if (gate == ST21NFCA_RF_READER_F_GATE) { r = nfc_hci_get_param(hdev, ST21NFCA_RF_READER_F_GATE, ST21NFCA_RF_READER_F_NFCID2, &nfcid_skb); if (r < 0) goto exit; if (nfcid_skb->len > NFC_SENSF_RES_MAXSIZE) { r = -EPROTO; goto exit; } /* * - After the recepton of polling response for type F frame * at 212 or 424 Kbit/s, NFCID2 registry parameters will be * updated. * - After the reception of SEL_RES with NFCIP-1 compliant bit * set for type A frame NFCID1 will be updated */ if (nfcid_skb->len > 0) { /* P2P in type F */ memcpy(target->sensf_res, nfcid_skb->data, nfcid_skb->len); target->sensf_res_len = nfcid_skb->len; /* NFC Forum Digital Protocol Table 44 */ if (target->sensf_res[0] == 0x01 && target->sensf_res[1] == 0xfe) target->supported_protocols = NFC_PROTO_NFC_DEP_MASK; else target->supported_protocols = NFC_PROTO_FELICA_MASK; } else { kfree_skb(nfcid_skb); nfcid_skb = NULL; /* P2P in type A */ r = nfc_hci_get_param(hdev, ST21NFCA_RF_READER_F_GATE, ST21NFCA_RF_READER_F_NFCID1, &nfcid_skb); if (r < 0) goto exit; if (nfcid_skb->len > NFC_NFCID1_MAXSIZE) { r = -EPROTO; goto exit; } memcpy(target->sensf_res, nfcid_skb->data, nfcid_skb->len); target->sensf_res_len = nfcid_skb->len; target->supported_protocols = NFC_PROTO_NFC_DEP_MASK; } target->hci_reader_gate = ST21NFCA_RF_READER_F_GATE; } r = 1; exit: kfree_skb(nfcid_skb); return r; } #define ST21NFCA_CB_TYPE_READER_ISO15693 1 static void st21nfca_hci_data_exchange_cb(void *context, struct sk_buff *skb, int err) { struct st21nfca_hci_info *info = context; switch (info->async_cb_type) { case ST21NFCA_CB_TYPE_READER_ISO15693: if (err == 0) skb_trim(skb, skb->len - 1); info->async_cb(info->async_cb_context, skb, err); break; default: if (err == 0) kfree_skb(skb); break; } } /* * Returns: * <= 0: driver handled the data exchange * 1: driver doesn't especially handle, please do standard processing */ static int st21nfca_hci_im_transceive(struct nfc_hci_dev *hdev, struct nfc_target *target, struct sk_buff *skb, data_exchange_cb_t cb, void *cb_context) { struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev); pr_info(DRIVER_DESC ": %s for gate=%d len=%d\n", __func__, target->hci_reader_gate, skb->len); switch (target->hci_reader_gate) { case ST21NFCA_RF_READER_F_GATE: if (target->supported_protocols == NFC_PROTO_NFC_DEP_MASK) return st21nfca_im_send_dep_req(hdev, skb); *(u8 *)skb_push(skb, 1) = 0x1a; return nfc_hci_send_cmd_async(hdev, target->hci_reader_gate, ST21NFCA_WR_XCHG_DATA, skb->data, skb->len, cb, cb_context); case ST21NFCA_RF_READER_14443_3_A_GATE: *(u8 *)skb_push(skb, 1) = 0x1a; /* CTR, see spec:10.2.2.1 */ return nfc_hci_send_cmd_async(hdev, target->hci_reader_gate, ST21NFCA_WR_XCHG_DATA, skb->data, skb->len, cb, cb_context); case ST21NFCA_RF_READER_ISO15693_GATE: info->async_cb_type = ST21NFCA_CB_TYPE_READER_ISO15693; info->async_cb = cb; info->async_cb_context = cb_context; *(u8 *)skb_push(skb, 1) = 0x17; return nfc_hci_send_cmd_async(hdev, target->hci_reader_gate, ST21NFCA_WR_XCHG_DATA, skb->data, skb->len, st21nfca_hci_data_exchange_cb, info); default: return 1; } } static int st21nfca_hci_tm_send(struct nfc_hci_dev *hdev, struct sk_buff *skb) { return st21nfca_tm_send_dep_res(hdev, skb); } static int st21nfca_hci_check_presence(struct nfc_hci_dev *hdev, struct nfc_target *target) { u8 fwi = 0x11; switch (target->hci_reader_gate) { case NFC_HCI_RF_READER_A_GATE: case NFC_HCI_RF_READER_B_GATE: /* * PRESENCE_CHECK on those gates is available * However, the answer to this command is taking 3 * fwi * if the card is no present. * Instead, we send an empty I-Frame with a very short * configurable fwi ~604µs. */ return nfc_hci_send_cmd(hdev, target->hci_reader_gate, ST21NFCA_WR_XCHG_DATA, &fwi, 1, NULL); case ST21NFCA_RF_READER_14443_3_A_GATE: return nfc_hci_send_cmd(hdev, target->hci_reader_gate, ST21NFCA_RF_READER_CMD_PRESENCE_CHECK, NULL, 0, NULL); default: return -EOPNOTSUPP; } } static void st21nfca_hci_cmd_received(struct nfc_hci_dev *hdev, u8 pipe, u8 cmd, struct sk_buff *skb) { struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev); u8 gate = hdev->pipes[pipe].gate; pr_debug("cmd: %x\n", cmd); switch (cmd) { case NFC_HCI_ANY_OPEN_PIPE: if (gate != ST21NFCA_APDU_READER_GATE && hdev->pipes[pipe].dest_host != NFC_HCI_UICC_HOST_ID) info->se_info.count_pipes++; if (info->se_info.count_pipes == info->se_info.expected_pipes) { del_timer_sync(&info->se_info.se_active_timer); info->se_info.se_active = false; info->se_info.count_pipes = 0; complete(&info->se_info.req_completion); } break; } } static int st21nfca_admin_event_received(struct nfc_hci_dev *hdev, u8 event, struct sk_buff *skb) { struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev); pr_debug("admin event: %x\n", event); switch (event) { case ST21NFCA_EVT_HOT_PLUG: if (info->se_info.se_active) { if (!ST21NFCA_EVT_HOT_PLUG_IS_INHIBITED(skb)) { del_timer_sync(&info->se_info.se_active_timer); info->se_info.se_active = false; complete(&info->se_info.req_completion); } else { mod_timer(&info->se_info.se_active_timer, jiffies + msecs_to_jiffies(ST21NFCA_SE_TO_PIPES)); } } break; default: nfc_err(&hdev->ndev->dev, "Unexpected event on admin gate\n"); } kfree_skb(skb); return 0; } /* * Returns: * <= 0: driver handled the event, skb consumed * 1: driver does not handle the event, please do standard processing */ static int st21nfca_hci_event_received(struct nfc_hci_dev *hdev, u8 pipe, u8 event, struct sk_buff *skb) { u8 gate = hdev->pipes[pipe].gate; u8 host = hdev->pipes[pipe].dest_host; pr_debug("hci event: %d gate: %x\n", event, gate); switch (gate) { case NFC_HCI_ADMIN_GATE: return st21nfca_admin_event_received(hdev, event, skb); case ST21NFCA_RF_CARD_F_GATE: return st21nfca_dep_event_received(hdev, event, skb); case ST21NFCA_CONNECTIVITY_GATE: return st21nfca_connectivity_event_received(hdev, host, event, skb); case ST21NFCA_APDU_READER_GATE: return st21nfca_apdu_reader_event_received(hdev, event, skb); case NFC_HCI_LOOPBACK_GATE: return st21nfca_hci_loopback_event_received(hdev, event, skb); default: return 1; } } static const struct nfc_hci_ops st21nfca_hci_ops = { .open = st21nfca_hci_open, .close = st21nfca_hci_close, .load_session = st21nfca_hci_load_session, .hci_ready = st21nfca_hci_ready, .xmit = st21nfca_hci_xmit, .start_poll = st21nfca_hci_start_poll, .stop_poll = st21nfca_hci_stop_poll, .dep_link_up = st21nfca_hci_dep_link_up, .dep_link_down = st21nfca_hci_dep_link_down, .target_from_gate = st21nfca_hci_target_from_gate, .complete_target_discovered = st21nfca_hci_complete_target_discovered, .im_transceive = st21nfca_hci_im_transceive, .tm_send = st21nfca_hci_tm_send, .check_presence = st21nfca_hci_check_presence, .event_received = st21nfca_hci_event_received, .cmd_received = st21nfca_hci_cmd_received, .discover_se = st21nfca_hci_discover_se, .enable_se = st21nfca_hci_enable_se, .disable_se = st21nfca_hci_disable_se, .se_io = st21nfca_hci_se_io, }; int st21nfca_hci_probe(void *phy_id, const struct nfc_phy_ops *phy_ops, char *llc_name, int phy_headroom, int phy_tailroom, int phy_payload, struct nfc_hci_dev **hdev, struct st21nfca_se_status *se_status) { struct st21nfca_hci_info *info; int r = 0; int dev_num; u32 protocols; struct nfc_hci_init_data init_data; unsigned long quirks = 0; info = kzalloc(sizeof(struct st21nfca_hci_info), GFP_KERNEL); if (!info) return -ENOMEM; info->phy_ops = phy_ops; info->phy_id = phy_id; info->state = ST21NFCA_ST_COLD; mutex_init(&info->info_lock); init_data.gate_count = ARRAY_SIZE(st21nfca_gates); memcpy(init_data.gates, st21nfca_gates, sizeof(st21nfca_gates)); /* * Session id must include the driver name + i2c bus addr * persistent info to discriminate 2 identical chips */ dev_num = find_first_zero_bit(dev_mask, ST21NFCA_NUM_DEVICES); if (dev_num >= ST21NFCA_NUM_DEVICES) { r = -ENODEV; goto err_alloc_hdev; } set_bit(dev_num, dev_mask); scnprintf(init_data.session_id, sizeof(init_data.session_id), "%s%2x", "ST21AH", dev_num); protocols = NFC_PROTO_JEWEL_MASK | NFC_PROTO_MIFARE_MASK | NFC_PROTO_FELICA_MASK | NFC_PROTO_ISO14443_MASK | NFC_PROTO_ISO14443_B_MASK | NFC_PROTO_ISO15693_MASK | NFC_PROTO_NFC_DEP_MASK; set_bit(NFC_HCI_QUIRK_SHORT_CLEAR, &quirks); info->hdev = nfc_hci_allocate_device(&st21nfca_hci_ops, &init_data, quirks, protocols, llc_name, phy_headroom + ST21NFCA_CMDS_HEADROOM, phy_tailroom, phy_payload); if (!info->hdev) { pr_err("Cannot allocate nfc hdev.\n"); r = -ENOMEM; goto err_alloc_hdev; } info->se_status = se_status; nfc_hci_set_clientdata(info->hdev, info); r = nfc_hci_register_device(info->hdev); if (r) goto err_regdev; *hdev = info->hdev; st21nfca_dep_init(info->hdev); st21nfca_se_init(info->hdev); st21nfca_vendor_cmds_init(info->hdev); return 0; err_regdev: nfc_hci_free_device(info->hdev); err_alloc_hdev: kfree(info); return r; } EXPORT_SYMBOL(st21nfca_hci_probe); void st21nfca_hci_remove(struct nfc_hci_dev *hdev) { struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev); st21nfca_dep_deinit(hdev); st21nfca_se_deinit(hdev); nfc_hci_unregister_device(hdev); nfc_hci_free_device(hdev); kfree(info); } EXPORT_SYMBOL(st21nfca_hci_remove); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION(DRIVER_DESC);
linux-master
drivers/nfc/st21nfca/core.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2014 STMicroelectronics SAS. All rights reserved. */ #include <net/nfc/hci.h> #include "st21nfca.h" #define ST21NFCA_EVT_UICC_ACTIVATE 0x10 #define ST21NFCA_EVT_UICC_DEACTIVATE 0x13 #define ST21NFCA_EVT_SE_HARD_RESET 0x20 #define ST21NFCA_EVT_SE_SOFT_RESET 0x11 #define ST21NFCA_EVT_SE_END_OF_APDU_TRANSFER 0x21 #define ST21NFCA_EVT_SE_ACTIVATE 0x22 #define ST21NFCA_EVT_SE_DEACTIVATE 0x23 #define ST21NFCA_EVT_TRANSMIT_DATA 0x10 #define ST21NFCA_EVT_WTX_REQUEST 0x11 #define ST21NFCA_EVT_CONNECTIVITY 0x10 #define ST21NFCA_EVT_TRANSACTION 0x12 #define ST21NFCA_SE_TO_HOT_PLUG 1000 /* Connectivity pipe only */ #define ST21NFCA_SE_COUNT_PIPE_UICC 0x01 /* Connectivity + APDU Reader pipe */ #define ST21NFCA_SE_COUNT_PIPE_EMBEDDED 0x02 #define ST21NFCA_SE_MODE_OFF 0x00 #define ST21NFCA_SE_MODE_ON 0x01 #define ST21NFCA_PARAM_ATR 0x01 #define ST21NFCA_ATR_DEFAULT_BWI 0x04 /* * WT = 2^BWI/10[s], convert into msecs and add a secure * room by increasing by 2 this timeout */ #define ST21NFCA_BWI_TO_TIMEOUT(x) ((1 << x) * 200) #define ST21NFCA_ATR_GET_Y_FROM_TD(x) (x >> 4) /* If TA is present bit 0 is set */ #define ST21NFCA_ATR_TA_PRESENT(x) (x & 0x01) /* If TB is present bit 1 is set */ #define ST21NFCA_ATR_TB_PRESENT(x) (x & 0x02) static u8 st21nfca_se_get_bwi(struct nfc_hci_dev *hdev) { int i; u8 td; struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev); /* Bits 8 to 5 of the first TB for T=1 encode BWI from zero to nine */ for (i = 1; i < ST21NFCA_ESE_MAX_LENGTH; i++) { td = ST21NFCA_ATR_GET_Y_FROM_TD(info->se_info.atr[i]); if (ST21NFCA_ATR_TA_PRESENT(td)) i++; if (ST21NFCA_ATR_TB_PRESENT(td)) { i++; return info->se_info.atr[i] >> 4; } } return ST21NFCA_ATR_DEFAULT_BWI; } static void st21nfca_se_get_atr(struct nfc_hci_dev *hdev) { int r; struct sk_buff *skb; struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev); r = nfc_hci_get_param(hdev, ST21NFCA_APDU_READER_GATE, ST21NFCA_PARAM_ATR, &skb); if (r < 0) return; if (skb->len <= ST21NFCA_ESE_MAX_LENGTH) { memcpy(info->se_info.atr, skb->data, skb->len); info->se_info.wt_timeout = ST21NFCA_BWI_TO_TIMEOUT(st21nfca_se_get_bwi(hdev)); } kfree_skb(skb); } static int st21nfca_hci_control_se(struct nfc_hci_dev *hdev, u32 se_idx, u8 state) { struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev); int r, i; struct sk_buff *sk_host_list; u8 se_event, host_id; switch (se_idx) { case NFC_HCI_UICC_HOST_ID: se_event = (state == ST21NFCA_SE_MODE_ON ? ST21NFCA_EVT_UICC_ACTIVATE : ST21NFCA_EVT_UICC_DEACTIVATE); info->se_info.count_pipes = 0; info->se_info.expected_pipes = ST21NFCA_SE_COUNT_PIPE_UICC; break; case ST21NFCA_ESE_HOST_ID: se_event = (state == ST21NFCA_SE_MODE_ON ? ST21NFCA_EVT_SE_ACTIVATE : ST21NFCA_EVT_SE_DEACTIVATE); info->se_info.count_pipes = 0; info->se_info.expected_pipes = ST21NFCA_SE_COUNT_PIPE_EMBEDDED; break; default: return -EINVAL; } /* * Wait for an EVT_HOT_PLUG in order to * retrieve a relevant host list. */ reinit_completion(&info->se_info.req_completion); r = nfc_hci_send_event(hdev, ST21NFCA_DEVICE_MGNT_GATE, se_event, NULL, 0); if (r < 0) return r; mod_timer(&info->se_info.se_active_timer, jiffies + msecs_to_jiffies(ST21NFCA_SE_TO_HOT_PLUG)); info->se_info.se_active = true; /* Ignore return value and check in any case the host_list */ wait_for_completion_interruptible(&info->se_info.req_completion); r = nfc_hci_get_param(hdev, NFC_HCI_ADMIN_GATE, NFC_HCI_ADMIN_HOST_LIST, &sk_host_list); if (r < 0) return r; for (i = 0; i < sk_host_list->len && sk_host_list->data[i] != se_idx; i++) ; host_id = sk_host_list->data[i]; kfree_skb(sk_host_list); if (state == ST21NFCA_SE_MODE_ON && host_id == se_idx) return se_idx; else if (state == ST21NFCA_SE_MODE_OFF && host_id != se_idx) return se_idx; return -1; } int st21nfca_hci_discover_se(struct nfc_hci_dev *hdev) { struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev); int se_count = 0; if (test_bit(ST21NFCA_FACTORY_MODE, &hdev->quirks)) return 0; if (info->se_status->is_uicc_present) { nfc_add_se(hdev->ndev, NFC_HCI_UICC_HOST_ID, NFC_SE_UICC); se_count++; } if (info->se_status->is_ese_present) { nfc_add_se(hdev->ndev, ST21NFCA_ESE_HOST_ID, NFC_SE_EMBEDDED); se_count++; } return !se_count; } EXPORT_SYMBOL(st21nfca_hci_discover_se); int st21nfca_hci_enable_se(struct nfc_hci_dev *hdev, u32 se_idx) { int r; /* * According to upper layer, se_idx == NFC_SE_UICC when * info->se_status->is_uicc_enable is true should never happen. * Same for eSE. */ r = st21nfca_hci_control_se(hdev, se_idx, ST21NFCA_SE_MODE_ON); if (r == ST21NFCA_ESE_HOST_ID) { st21nfca_se_get_atr(hdev); r = nfc_hci_send_event(hdev, ST21NFCA_APDU_READER_GATE, ST21NFCA_EVT_SE_SOFT_RESET, NULL, 0); if (r < 0) return r; } else if (r < 0) { /* * The activation tentative failed, the secure element * is not connected. Remove from the list. */ nfc_remove_se(hdev->ndev, se_idx); return r; } return 0; } EXPORT_SYMBOL(st21nfca_hci_enable_se); int st21nfca_hci_disable_se(struct nfc_hci_dev *hdev, u32 se_idx) { int r; /* * According to upper layer, se_idx == NFC_SE_UICC when * info->se_status->is_uicc_enable is true should never happen * Same for eSE. */ r = st21nfca_hci_control_se(hdev, se_idx, ST21NFCA_SE_MODE_OFF); if (r < 0) return r; return 0; } EXPORT_SYMBOL(st21nfca_hci_disable_se); int st21nfca_hci_se_io(struct nfc_hci_dev *hdev, u32 se_idx, u8 *apdu, size_t apdu_length, se_io_cb_t cb, void *cb_context) { struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev); pr_debug("se_io %x\n", se_idx); switch (se_idx) { case ST21NFCA_ESE_HOST_ID: info->se_info.cb = cb; info->se_info.cb_context = cb_context; mod_timer(&info->se_info.bwi_timer, jiffies + msecs_to_jiffies(info->se_info.wt_timeout)); info->se_info.bwi_active = true; return nfc_hci_send_event(hdev, ST21NFCA_APDU_READER_GATE, ST21NFCA_EVT_TRANSMIT_DATA, apdu, apdu_length); default: /* Need to free cb_context here as at the moment we can't * clearly indicate to the caller if the callback function * would be called (and free it) or not. In both cases a * negative value may be returned to the caller. */ kfree(cb_context); return -ENODEV; } } EXPORT_SYMBOL(st21nfca_hci_se_io); static void st21nfca_se_wt_work(struct work_struct *work) { /* * No answer from the secure element * within the defined timeout. * Let's send a reset request as recovery procedure. * According to the situation, we first try to send a software reset * to the secure element. If the next command is still not * answering in time, we send to the CLF a secure element hardware * reset request. */ /* hardware reset managed through VCC_UICC_OUT power supply */ u8 param = 0x01; struct st21nfca_hci_info *info = container_of(work, struct st21nfca_hci_info, se_info.timeout_work); info->se_info.bwi_active = false; if (!info->se_info.xch_error) { info->se_info.xch_error = true; nfc_hci_send_event(info->hdev, ST21NFCA_APDU_READER_GATE, ST21NFCA_EVT_SE_SOFT_RESET, NULL, 0); } else { info->se_info.xch_error = false; nfc_hci_send_event(info->hdev, ST21NFCA_DEVICE_MGNT_GATE, ST21NFCA_EVT_SE_HARD_RESET, &param, 1); } info->se_info.cb(info->se_info.cb_context, NULL, 0, -ETIME); } static void st21nfca_se_wt_timeout(struct timer_list *t) { struct st21nfca_hci_info *info = from_timer(info, t, se_info.bwi_timer); schedule_work(&info->se_info.timeout_work); } static void st21nfca_se_activation_timeout(struct timer_list *t) { struct st21nfca_hci_info *info = from_timer(info, t, se_info.se_active_timer); info->se_info.se_active = false; complete(&info->se_info.req_completion); } /* * Returns: * <= 0: driver handled the event, skb consumed * 1: driver does not handle the event, please do standard processing */ int st21nfca_connectivity_event_received(struct nfc_hci_dev *hdev, u8 host, u8 event, struct sk_buff *skb) { int r = 0; struct device *dev = &hdev->ndev->dev; struct nfc_evt_transaction *transaction; u32 aid_len; u8 params_len; pr_debug("connectivity gate event: %x\n", event); switch (event) { case ST21NFCA_EVT_CONNECTIVITY: r = nfc_se_connectivity(hdev->ndev, host); break; case ST21NFCA_EVT_TRANSACTION: /* According to specification etsi 102 622 * 11.2.2.4 EVT_TRANSACTION Table 52 * Description Tag Length * AID 81 5 to 16 * PARAMETERS 82 0 to 255 * * The key differences are aid storage length is variably sized * in the packet, but fixed in nfc_evt_transaction, and that the aid_len * is u8 in the packet, but u32 in the structure, and the tags in * the packet are not included in nfc_evt_transaction. * * size in bytes: 1 1 5-16 1 1 0-255 * offset: 0 1 2 aid_len + 2 aid_len + 3 aid_len + 4 * member name: aid_tag(M) aid_len aid params_tag(M) params_len params * example: 0x81 5-16 X 0x82 0-255 X */ if (skb->len < 2 || skb->data[0] != NFC_EVT_TRANSACTION_AID_TAG) return -EPROTO; aid_len = skb->data[1]; if (skb->len < aid_len + 4 || aid_len > sizeof(transaction->aid)) return -EPROTO; params_len = skb->data[aid_len + 3]; /* Verify PARAMETERS tag is (82), and final check that there is enough * space in the packet to read everything. */ if ((skb->data[aid_len + 2] != NFC_EVT_TRANSACTION_PARAMS_TAG) || (skb->len < aid_len + 4 + params_len)) return -EPROTO; transaction = devm_kzalloc(dev, sizeof(*transaction) + params_len, GFP_KERNEL); if (!transaction) return -ENOMEM; transaction->aid_len = aid_len; transaction->params_len = params_len; memcpy(transaction->aid, &skb->data[2], aid_len); memcpy(transaction->params, &skb->data[aid_len + 4], params_len); r = nfc_se_transaction(hdev->ndev, host, transaction); break; default: nfc_err(&hdev->ndev->dev, "Unexpected event on connectivity gate\n"); return 1; } kfree_skb(skb); return r; } EXPORT_SYMBOL(st21nfca_connectivity_event_received); int st21nfca_apdu_reader_event_received(struct nfc_hci_dev *hdev, u8 event, struct sk_buff *skb) { int r = 0; struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev); pr_debug("apdu reader gate event: %x\n", event); switch (event) { case ST21NFCA_EVT_TRANSMIT_DATA: del_timer_sync(&info->se_info.bwi_timer); cancel_work_sync(&info->se_info.timeout_work); info->se_info.bwi_active = false; r = nfc_hci_send_event(hdev, ST21NFCA_DEVICE_MGNT_GATE, ST21NFCA_EVT_SE_END_OF_APDU_TRANSFER, NULL, 0); if (r < 0) goto exit; info->se_info.cb(info->se_info.cb_context, skb->data, skb->len, 0); break; case ST21NFCA_EVT_WTX_REQUEST: mod_timer(&info->se_info.bwi_timer, jiffies + msecs_to_jiffies(info->se_info.wt_timeout)); break; default: nfc_err(&hdev->ndev->dev, "Unexpected event on apdu reader gate\n"); return 1; } exit: kfree_skb(skb); return r; } EXPORT_SYMBOL(st21nfca_apdu_reader_event_received); void st21nfca_se_init(struct nfc_hci_dev *hdev) { struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev); init_completion(&info->se_info.req_completion); INIT_WORK(&info->se_info.timeout_work, st21nfca_se_wt_work); /* initialize timers */ timer_setup(&info->se_info.bwi_timer, st21nfca_se_wt_timeout, 0); info->se_info.bwi_active = false; timer_setup(&info->se_info.se_active_timer, st21nfca_se_activation_timeout, 0); info->se_info.se_active = false; info->se_info.count_pipes = 0; info->se_info.expected_pipes = 0; info->se_info.xch_error = false; info->se_info.wt_timeout = ST21NFCA_BWI_TO_TIMEOUT(ST21NFCA_ATR_DEFAULT_BWI); } EXPORT_SYMBOL(st21nfca_se_init); void st21nfca_se_deinit(struct nfc_hci_dev *hdev) { struct st21nfca_hci_info *info = nfc_hci_get_clientdata(hdev); if (info->se_info.bwi_active) del_timer_sync(&info->se_info.bwi_timer); if (info->se_info.se_active) del_timer_sync(&info->se_info.se_active_timer); cancel_work_sync(&info->se_info.timeout_work); info->se_info.bwi_active = false; info->se_info.se_active = false; } EXPORT_SYMBOL(st21nfca_se_deinit);
linux-master
drivers/nfc/st21nfca/se.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2013 Intel Corporation. All rights reserved. * * HCI based Driver for NXP pn544 NFC Chip */ #include <linux/module.h> #include <linux/mod_devicetable.h> #include <linux/nfc.h> #include <net/nfc/hci.h> #include <net/nfc/llc.h> #include "../mei_phy.h" #include "pn544.h" #define PN544_DRIVER_NAME "pn544" static int pn544_mei_probe(struct mei_cl_device *cldev, const struct mei_cl_device_id *id) { struct nfc_mei_phy *phy; int r; phy = nfc_mei_phy_alloc(cldev); if (!phy) return -ENOMEM; r = pn544_hci_probe(phy, &mei_phy_ops, LLC_NOP_NAME, MEI_NFC_HEADER_SIZE, 0, MEI_NFC_MAX_HCI_PAYLOAD, NULL, &phy->hdev); if (r < 0) { nfc_mei_phy_free(phy); return r; } return 0; } static void pn544_mei_remove(struct mei_cl_device *cldev) { struct nfc_mei_phy *phy = mei_cldev_get_drvdata(cldev); pn544_hci_remove(phy->hdev); nfc_mei_phy_free(phy); } static struct mei_cl_device_id pn544_mei_tbl[] = { { PN544_DRIVER_NAME, MEI_NFC_UUID, MEI_CL_VERSION_ANY}, /* required last entry */ { } }; MODULE_DEVICE_TABLE(mei, pn544_mei_tbl); static struct mei_cl_driver pn544_driver = { .id_table = pn544_mei_tbl, .name = PN544_DRIVER_NAME, .probe = pn544_mei_probe, .remove = pn544_mei_remove, }; module_mei_cl_driver(pn544_driver); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION(DRIVER_DESC);
linux-master
drivers/nfc/pn544/mei.c
// SPDX-License-Identifier: GPL-2.0-only /* * HCI based Driver for NXP PN544 NFC Chip * * Copyright (C) 2012 Intel Corporation. All rights reserved. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/delay.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/nfc.h> #include <net/nfc/hci.h> #include "pn544.h" /* Timing restrictions (ms) */ #define PN544_HCI_RESETVEN_TIME 30 enum pn544_state { PN544_ST_COLD, PN544_ST_FW_READY, PN544_ST_READY, }; #define FULL_VERSION_LEN 11 /* Proprietary commands */ #define PN544_WRITE 0x3f #define PN544_TEST_SWP 0x21 /* Proprietary gates, events, commands and registers */ /* NFC_HCI_RF_READER_A_GATE additional registers and commands */ #define PN544_RF_READER_A_AUTO_ACTIVATION 0x10 #define PN544_RF_READER_A_CMD_CONTINUE_ACTIVATION 0x12 #define PN544_MIFARE_CMD 0x21 /* Commands that apply to all RF readers */ #define PN544_RF_READER_CMD_PRESENCE_CHECK 0x30 #define PN544_RF_READER_CMD_ACTIVATE_NEXT 0x32 /* NFC_HCI_ID_MGMT_GATE additional registers */ #define PN544_ID_MGMT_FULL_VERSION_SW 0x10 #define PN544_RF_READER_ISO15693_GATE 0x12 #define PN544_RF_READER_F_GATE 0x14 #define PN544_FELICA_ID 0x04 #define PN544_FELICA_RAW 0x20 #define PN544_RF_READER_JEWEL_GATE 0x15 #define PN544_JEWEL_RAW_CMD 0x23 #define PN544_RF_READER_NFCIP1_INITIATOR_GATE 0x30 #define PN544_RF_READER_NFCIP1_TARGET_GATE 0x31 #define PN544_SYS_MGMT_GATE 0x90 #define PN544_SYS_MGMT_INFO_NOTIFICATION 0x02 #define PN544_POLLING_LOOP_MGMT_GATE 0x94 #define PN544_DEP_MODE 0x01 #define PN544_DEP_ATR_REQ 0x02 #define PN544_DEP_ATR_RES 0x03 #define PN544_DEP_MERGE 0x0D #define PN544_PL_RDPHASES 0x06 #define PN544_PL_EMULATION 0x07 #define PN544_PL_NFCT_DEACTIVATED 0x09 #define PN544_SWP_MGMT_GATE 0xA0 #define PN544_SWP_DEFAULT_MODE 0x01 #define PN544_NFC_WI_MGMT_GATE 0xA1 #define PN544_NFC_ESE_DEFAULT_MODE 0x01 #define PN544_HCI_EVT_SND_DATA 0x01 #define PN544_HCI_EVT_ACTIVATED 0x02 #define PN544_HCI_EVT_DEACTIVATED 0x03 #define PN544_HCI_EVT_RCV_DATA 0x04 #define PN544_HCI_EVT_CONTINUE_MI 0x05 #define PN544_HCI_EVT_SWITCH_MODE 0x03 #define PN544_HCI_CMD_ATTREQUEST 0x12 #define PN544_HCI_CMD_CONTINUE_ACTIVATION 0x13 static const struct nfc_hci_gate pn544_gates[] = { {NFC_HCI_ADMIN_GATE, NFC_HCI_INVALID_PIPE}, {NFC_HCI_LOOPBACK_GATE, NFC_HCI_INVALID_PIPE}, {NFC_HCI_ID_MGMT_GATE, NFC_HCI_INVALID_PIPE}, {NFC_HCI_LINK_MGMT_GATE, NFC_HCI_INVALID_PIPE}, {NFC_HCI_RF_READER_B_GATE, NFC_HCI_INVALID_PIPE}, {NFC_HCI_RF_READER_A_GATE, NFC_HCI_INVALID_PIPE}, {PN544_SYS_MGMT_GATE, NFC_HCI_INVALID_PIPE}, {PN544_SWP_MGMT_GATE, NFC_HCI_INVALID_PIPE}, {PN544_POLLING_LOOP_MGMT_GATE, NFC_HCI_INVALID_PIPE}, {PN544_NFC_WI_MGMT_GATE, NFC_HCI_INVALID_PIPE}, {PN544_RF_READER_F_GATE, NFC_HCI_INVALID_PIPE}, {PN544_RF_READER_JEWEL_GATE, NFC_HCI_INVALID_PIPE}, {PN544_RF_READER_ISO15693_GATE, NFC_HCI_INVALID_PIPE}, {PN544_RF_READER_NFCIP1_INITIATOR_GATE, NFC_HCI_INVALID_PIPE}, {PN544_RF_READER_NFCIP1_TARGET_GATE, NFC_HCI_INVALID_PIPE} }; /* Largest headroom needed for outgoing custom commands */ #define PN544_CMDS_HEADROOM 2 struct pn544_hci_info { const struct nfc_phy_ops *phy_ops; void *phy_id; struct nfc_hci_dev *hdev; enum pn544_state state; struct mutex info_lock; int async_cb_type; data_exchange_cb_t async_cb; void *async_cb_context; fw_download_t fw_download; }; static int pn544_hci_open(struct nfc_hci_dev *hdev) { struct pn544_hci_info *info = nfc_hci_get_clientdata(hdev); int r = 0; mutex_lock(&info->info_lock); if (info->state != PN544_ST_COLD) { r = -EBUSY; goto out; } r = info->phy_ops->enable(info->phy_id); if (r == 0) info->state = PN544_ST_READY; out: mutex_unlock(&info->info_lock); return r; } static void pn544_hci_close(struct nfc_hci_dev *hdev) { struct pn544_hci_info *info = nfc_hci_get_clientdata(hdev); mutex_lock(&info->info_lock); if (info->state == PN544_ST_COLD) goto out; info->phy_ops->disable(info->phy_id); info->state = PN544_ST_COLD; out: mutex_unlock(&info->info_lock); } static int pn544_hci_ready(struct nfc_hci_dev *hdev) { struct sk_buff *skb; static struct hw_config { u8 adr[2]; u8 value; } hw_config[] = { {{0x9f, 0x9a}, 0x00}, {{0x98, 0x10}, 0xbc}, {{0x9e, 0x71}, 0x00}, {{0x98, 0x09}, 0x00}, {{0x9e, 0xb4}, 0x00}, {{0x9c, 0x01}, 0x08}, {{0x9e, 0xaa}, 0x01}, {{0x9b, 0xd1}, 0x17}, {{0x9b, 0xd2}, 0x58}, {{0x9b, 0xd3}, 0x10}, {{0x9b, 0xd4}, 0x47}, {{0x9b, 0xd5}, 0x0c}, {{0x9b, 0xd6}, 0x37}, {{0x9b, 0xdd}, 0x33}, {{0x9b, 0x84}, 0x00}, {{0x99, 0x81}, 0x79}, {{0x99, 0x31}, 0x79}, {{0x98, 0x00}, 0x3f}, {{0x9f, 0x09}, 0x02}, {{0x9f, 0x0a}, 0x05}, {{0x9e, 0xd1}, 0xa1}, {{0x99, 0x23}, 0x01}, {{0x9e, 0x74}, 0x00}, {{0x9e, 0x90}, 0x00}, {{0x9f, 0x28}, 0x10}, {{0x9f, 0x35}, 0x04}, {{0x9f, 0x36}, 0x11}, {{0x9c, 0x31}, 0x00}, {{0x9c, 0x32}, 0x00}, {{0x9c, 0x19}, 0x0a}, {{0x9c, 0x1a}, 0x0a}, {{0x9c, 0x0c}, 0x00}, {{0x9c, 0x0d}, 0x00}, {{0x9c, 0x12}, 0x00}, {{0x9c, 0x13}, 0x00}, {{0x98, 0xa2}, 0x09}, {{0x98, 0x93}, 0x00}, {{0x98, 0x7d}, 0x08}, {{0x98, 0x7e}, 0x00}, {{0x9f, 0xc8}, 0x00}, }; struct hw_config *p = hw_config; int count = ARRAY_SIZE(hw_config); struct sk_buff *res_skb; u8 param[4]; int r; param[0] = 0; while (count--) { param[1] = p->adr[0]; param[2] = p->adr[1]; param[3] = p->value; r = nfc_hci_send_cmd(hdev, PN544_SYS_MGMT_GATE, PN544_WRITE, param, 4, &res_skb); if (r < 0) return r; if (res_skb->len != 1) { kfree_skb(res_skb); return -EPROTO; } if (res_skb->data[0] != p->value) { kfree_skb(res_skb); return -EIO; } kfree_skb(res_skb); p++; } param[0] = NFC_HCI_UICC_HOST_ID; r = nfc_hci_set_param(hdev, NFC_HCI_ADMIN_GATE, NFC_HCI_ADMIN_WHITELIST, param, 1); if (r < 0) return r; param[0] = 0x3d; r = nfc_hci_set_param(hdev, PN544_SYS_MGMT_GATE, PN544_SYS_MGMT_INFO_NOTIFICATION, param, 1); if (r < 0) return r; param[0] = 0x0; r = nfc_hci_set_param(hdev, NFC_HCI_RF_READER_A_GATE, PN544_RF_READER_A_AUTO_ACTIVATION, param, 1); if (r < 0) return r; r = nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE, NFC_HCI_EVT_END_OPERATION, NULL, 0); if (r < 0) return r; param[0] = 0x1; r = nfc_hci_set_param(hdev, PN544_POLLING_LOOP_MGMT_GATE, PN544_PL_NFCT_DEACTIVATED, param, 1); if (r < 0) return r; param[0] = 0x0; r = nfc_hci_set_param(hdev, PN544_POLLING_LOOP_MGMT_GATE, PN544_PL_RDPHASES, param, 1); if (r < 0) return r; r = nfc_hci_get_param(hdev, NFC_HCI_ID_MGMT_GATE, PN544_ID_MGMT_FULL_VERSION_SW, &skb); if (r < 0) return r; if (skb->len != FULL_VERSION_LEN) { kfree_skb(skb); return -EINVAL; } print_hex_dump(KERN_DEBUG, "FULL VERSION SOFTWARE INFO: ", DUMP_PREFIX_NONE, 16, 1, skb->data, FULL_VERSION_LEN, false); kfree_skb(skb); return 0; } static int pn544_hci_xmit(struct nfc_hci_dev *hdev, struct sk_buff *skb) { struct pn544_hci_info *info = nfc_hci_get_clientdata(hdev); return info->phy_ops->write(info->phy_id, skb); } static int pn544_hci_start_poll(struct nfc_hci_dev *hdev, u32 im_protocols, u32 tm_protocols) { u8 phases = 0; int r; u8 duration[2]; u8 activated; u8 i_mode = 0x3f; /* Enable all supported modes */ u8 t_mode = 0x0f; u8 t_merge = 0x01; /* Enable merge by default */ pr_info(DRIVER_DESC ": %s protocols 0x%x 0x%x\n", __func__, im_protocols, tm_protocols); r = nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE, NFC_HCI_EVT_END_OPERATION, NULL, 0); if (r < 0) return r; duration[0] = 0x18; duration[1] = 0x6a; r = nfc_hci_set_param(hdev, PN544_POLLING_LOOP_MGMT_GATE, PN544_PL_EMULATION, duration, 2); if (r < 0) return r; activated = 0; r = nfc_hci_set_param(hdev, PN544_POLLING_LOOP_MGMT_GATE, PN544_PL_NFCT_DEACTIVATED, &activated, 1); if (r < 0) return r; if (im_protocols & (NFC_PROTO_ISO14443_MASK | NFC_PROTO_MIFARE_MASK | NFC_PROTO_JEWEL_MASK)) phases |= 1; /* Type A */ if (im_protocols & NFC_PROTO_FELICA_MASK) { phases |= (1 << 2); /* Type F 212 */ phases |= (1 << 3); /* Type F 424 */ } phases |= (1 << 5); /* NFC active */ r = nfc_hci_set_param(hdev, PN544_POLLING_LOOP_MGMT_GATE, PN544_PL_RDPHASES, &phases, 1); if (r < 0) return r; if ((im_protocols | tm_protocols) & NFC_PROTO_NFC_DEP_MASK) { hdev->gb = nfc_get_local_general_bytes(hdev->ndev, &hdev->gb_len); pr_debug("generate local bytes %p\n", hdev->gb); if (hdev->gb == NULL || hdev->gb_len == 0) { im_protocols &= ~NFC_PROTO_NFC_DEP_MASK; tm_protocols &= ~NFC_PROTO_NFC_DEP_MASK; } } if (im_protocols & NFC_PROTO_NFC_DEP_MASK) { r = nfc_hci_send_event(hdev, PN544_RF_READER_NFCIP1_INITIATOR_GATE, NFC_HCI_EVT_END_OPERATION, NULL, 0); if (r < 0) return r; r = nfc_hci_set_param(hdev, PN544_RF_READER_NFCIP1_INITIATOR_GATE, PN544_DEP_MODE, &i_mode, 1); if (r < 0) return r; r = nfc_hci_set_param(hdev, PN544_RF_READER_NFCIP1_INITIATOR_GATE, PN544_DEP_ATR_REQ, hdev->gb, hdev->gb_len); if (r < 0) return r; r = nfc_hci_send_event(hdev, PN544_RF_READER_NFCIP1_INITIATOR_GATE, NFC_HCI_EVT_READER_REQUESTED, NULL, 0); if (r < 0) nfc_hci_send_event(hdev, PN544_RF_READER_NFCIP1_INITIATOR_GATE, NFC_HCI_EVT_END_OPERATION, NULL, 0); } if (tm_protocols & NFC_PROTO_NFC_DEP_MASK) { r = nfc_hci_set_param(hdev, PN544_RF_READER_NFCIP1_TARGET_GATE, PN544_DEP_MODE, &t_mode, 1); if (r < 0) return r; r = nfc_hci_set_param(hdev, PN544_RF_READER_NFCIP1_TARGET_GATE, PN544_DEP_ATR_RES, hdev->gb, hdev->gb_len); if (r < 0) return r; r = nfc_hci_set_param(hdev, PN544_RF_READER_NFCIP1_TARGET_GATE, PN544_DEP_MERGE, &t_merge, 1); if (r < 0) return r; } r = nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE, NFC_HCI_EVT_READER_REQUESTED, NULL, 0); if (r < 0) nfc_hci_send_event(hdev, NFC_HCI_RF_READER_A_GATE, NFC_HCI_EVT_END_OPERATION, NULL, 0); return r; } static int pn544_hci_dep_link_up(struct nfc_hci_dev *hdev, struct nfc_target *target, u8 comm_mode, u8 *gb, size_t gb_len) { struct sk_buff *rgb_skb = NULL; int r; r = nfc_hci_get_param(hdev, target->hci_reader_gate, PN544_DEP_ATR_RES, &rgb_skb); if (r < 0) return r; if (rgb_skb->len == 0 || rgb_skb->len > NFC_GB_MAXSIZE) { r = -EPROTO; goto exit; } print_hex_dump(KERN_DEBUG, "remote gb: ", DUMP_PREFIX_OFFSET, 16, 1, rgb_skb->data, rgb_skb->len, true); r = nfc_set_remote_general_bytes(hdev->ndev, rgb_skb->data, rgb_skb->len); if (r == 0) r = nfc_dep_link_is_up(hdev->ndev, target->idx, comm_mode, NFC_RF_INITIATOR); exit: kfree_skb(rgb_skb); return r; } static int pn544_hci_dep_link_down(struct nfc_hci_dev *hdev) { return nfc_hci_send_event(hdev, PN544_RF_READER_NFCIP1_INITIATOR_GATE, NFC_HCI_EVT_END_OPERATION, NULL, 0); } static int pn544_hci_target_from_gate(struct nfc_hci_dev *hdev, u8 gate, struct nfc_target *target) { switch (gate) { case PN544_RF_READER_F_GATE: target->supported_protocols = NFC_PROTO_FELICA_MASK; break; case PN544_RF_READER_JEWEL_GATE: target->supported_protocols = NFC_PROTO_JEWEL_MASK; target->sens_res = 0x0c00; break; case PN544_RF_READER_NFCIP1_INITIATOR_GATE: target->supported_protocols = NFC_PROTO_NFC_DEP_MASK; break; default: return -EPROTO; } return 0; } static int pn544_hci_complete_target_discovered(struct nfc_hci_dev *hdev, u8 gate, struct nfc_target *target) { struct sk_buff *uid_skb; int r = 0; if (gate == PN544_RF_READER_NFCIP1_INITIATOR_GATE) return r; if (target->supported_protocols & NFC_PROTO_NFC_DEP_MASK) { r = nfc_hci_send_cmd(hdev, PN544_RF_READER_NFCIP1_INITIATOR_GATE, PN544_HCI_CMD_CONTINUE_ACTIVATION, NULL, 0, NULL); if (r < 0) return r; target->hci_reader_gate = PN544_RF_READER_NFCIP1_INITIATOR_GATE; } else if (target->supported_protocols & NFC_PROTO_MIFARE_MASK) { if (target->nfcid1_len != 4 && target->nfcid1_len != 7 && target->nfcid1_len != 10) return -EPROTO; r = nfc_hci_send_cmd(hdev, NFC_HCI_RF_READER_A_GATE, PN544_RF_READER_CMD_ACTIVATE_NEXT, target->nfcid1, target->nfcid1_len, NULL); } else if (target->supported_protocols & NFC_PROTO_FELICA_MASK) { r = nfc_hci_get_param(hdev, PN544_RF_READER_F_GATE, PN544_FELICA_ID, &uid_skb); if (r < 0) return r; if (uid_skb->len != 8) { kfree_skb(uid_skb); return -EPROTO; } /* Type F NFC-DEP IDm has prefix 0x01FE */ if ((uid_skb->data[0] == 0x01) && (uid_skb->data[1] == 0xfe)) { kfree_skb(uid_skb); r = nfc_hci_send_cmd(hdev, PN544_RF_READER_NFCIP1_INITIATOR_GATE, PN544_HCI_CMD_CONTINUE_ACTIVATION, NULL, 0, NULL); if (r < 0) return r; target->supported_protocols = NFC_PROTO_NFC_DEP_MASK; target->hci_reader_gate = PN544_RF_READER_NFCIP1_INITIATOR_GATE; } else { r = nfc_hci_send_cmd(hdev, PN544_RF_READER_F_GATE, PN544_RF_READER_CMD_ACTIVATE_NEXT, uid_skb->data, uid_skb->len, NULL); kfree_skb(uid_skb); } } else if (target->supported_protocols & NFC_PROTO_ISO14443_MASK) { /* * TODO: maybe other ISO 14443 require some kind of continue * activation, but for now we've seen only this one below. */ if (target->sens_res == 0x4403) /* Type 4 Mifare DESFire */ r = nfc_hci_send_cmd(hdev, NFC_HCI_RF_READER_A_GATE, PN544_RF_READER_A_CMD_CONTINUE_ACTIVATION, NULL, 0, NULL); } return r; } #define PN544_CB_TYPE_READER_F 1 static void pn544_hci_data_exchange_cb(void *context, struct sk_buff *skb, int err) { struct pn544_hci_info *info = context; switch (info->async_cb_type) { case PN544_CB_TYPE_READER_F: if (err == 0) skb_pull(skb, 1); info->async_cb(info->async_cb_context, skb, err); break; default: if (err == 0) kfree_skb(skb); break; } } #define MIFARE_CMD_AUTH_KEY_A 0x60 #define MIFARE_CMD_AUTH_KEY_B 0x61 #define MIFARE_CMD_HEADER 2 #define MIFARE_UID_LEN 4 #define MIFARE_KEY_LEN 6 #define MIFARE_CMD_LEN 12 /* * Returns: * <= 0: driver handled the data exchange * 1: driver doesn't especially handle, please do standard processing */ static int pn544_hci_im_transceive(struct nfc_hci_dev *hdev, struct nfc_target *target, struct sk_buff *skb, data_exchange_cb_t cb, void *cb_context) { struct pn544_hci_info *info = nfc_hci_get_clientdata(hdev); pr_info(DRIVER_DESC ": %s for gate=%d\n", __func__, target->hci_reader_gate); switch (target->hci_reader_gate) { case NFC_HCI_RF_READER_A_GATE: if (target->supported_protocols & NFC_PROTO_MIFARE_MASK) { /* * It seems that pn544 is inverting key and UID for * MIFARE authentication commands. */ if (skb->len == MIFARE_CMD_LEN && (skb->data[0] == MIFARE_CMD_AUTH_KEY_A || skb->data[0] == MIFARE_CMD_AUTH_KEY_B)) { u8 uid[MIFARE_UID_LEN]; u8 *data = skb->data + MIFARE_CMD_HEADER; memcpy(uid, data + MIFARE_KEY_LEN, MIFARE_UID_LEN); memmove(data + MIFARE_UID_LEN, data, MIFARE_KEY_LEN); memcpy(data, uid, MIFARE_UID_LEN); } return nfc_hci_send_cmd_async(hdev, target->hci_reader_gate, PN544_MIFARE_CMD, skb->data, skb->len, cb, cb_context); } else return 1; case PN544_RF_READER_F_GATE: *(u8 *)skb_push(skb, 1) = 0; *(u8 *)skb_push(skb, 1) = 0; info->async_cb_type = PN544_CB_TYPE_READER_F; info->async_cb = cb; info->async_cb_context = cb_context; return nfc_hci_send_cmd_async(hdev, target->hci_reader_gate, PN544_FELICA_RAW, skb->data, skb->len, pn544_hci_data_exchange_cb, info); case PN544_RF_READER_JEWEL_GATE: return nfc_hci_send_cmd_async(hdev, target->hci_reader_gate, PN544_JEWEL_RAW_CMD, skb->data, skb->len, cb, cb_context); case PN544_RF_READER_NFCIP1_INITIATOR_GATE: *(u8 *)skb_push(skb, 1) = 0; return nfc_hci_send_event(hdev, target->hci_reader_gate, PN544_HCI_EVT_SND_DATA, skb->data, skb->len); default: return 1; } } static int pn544_hci_tm_send(struct nfc_hci_dev *hdev, struct sk_buff *skb) { int r; /* Set default false for multiple information chaining */ *(u8 *)skb_push(skb, 1) = 0; r = nfc_hci_send_event(hdev, PN544_RF_READER_NFCIP1_TARGET_GATE, PN544_HCI_EVT_SND_DATA, skb->data, skb->len); kfree_skb(skb); return r; } static int pn544_hci_check_presence(struct nfc_hci_dev *hdev, struct nfc_target *target) { pr_debug("supported protocol %d\n", target->supported_protocols); if (target->supported_protocols & (NFC_PROTO_ISO14443_MASK | NFC_PROTO_ISO14443_B_MASK)) { return nfc_hci_send_cmd(hdev, target->hci_reader_gate, PN544_RF_READER_CMD_PRESENCE_CHECK, NULL, 0, NULL); } else if (target->supported_protocols & NFC_PROTO_MIFARE_MASK) { if (target->nfcid1_len != 4 && target->nfcid1_len != 7 && target->nfcid1_len != 10) return -EOPNOTSUPP; return nfc_hci_send_cmd(hdev, NFC_HCI_RF_READER_A_GATE, PN544_RF_READER_CMD_ACTIVATE_NEXT, target->nfcid1, target->nfcid1_len, NULL); } else if (target->supported_protocols & (NFC_PROTO_JEWEL_MASK | NFC_PROTO_FELICA_MASK)) { return -EOPNOTSUPP; } else if (target->supported_protocols & NFC_PROTO_NFC_DEP_MASK) { return nfc_hci_send_cmd(hdev, target->hci_reader_gate, PN544_HCI_CMD_ATTREQUEST, NULL, 0, NULL); } return 0; } /* * Returns: * <= 0: driver handled the event, skb consumed * 1: driver does not handle the event, please do standard processing */ static int pn544_hci_event_received(struct nfc_hci_dev *hdev, u8 pipe, u8 event, struct sk_buff *skb) { struct sk_buff *rgb_skb = NULL; u8 gate = hdev->pipes[pipe].gate; int r; pr_debug("hci event %d\n", event); switch (event) { case PN544_HCI_EVT_ACTIVATED: if (gate == PN544_RF_READER_NFCIP1_INITIATOR_GATE) { r = nfc_hci_target_discovered(hdev, gate); } else if (gate == PN544_RF_READER_NFCIP1_TARGET_GATE) { r = nfc_hci_get_param(hdev, gate, PN544_DEP_ATR_REQ, &rgb_skb); if (r < 0) goto exit; r = nfc_tm_activated(hdev->ndev, NFC_PROTO_NFC_DEP_MASK, NFC_COMM_PASSIVE, rgb_skb->data, rgb_skb->len); kfree_skb(rgb_skb); } else { r = -EINVAL; } break; case PN544_HCI_EVT_DEACTIVATED: r = nfc_hci_send_event(hdev, gate, NFC_HCI_EVT_END_OPERATION, NULL, 0); break; case PN544_HCI_EVT_RCV_DATA: if (skb->len < 2) { r = -EPROTO; goto exit; } if (skb->data[0] != 0) { pr_debug("data0 %d\n", skb->data[0]); r = -EPROTO; goto exit; } skb_pull(skb, 2); return nfc_tm_data_received(hdev->ndev, skb); default: return 1; } exit: kfree_skb(skb); return r; } static int pn544_hci_fw_download(struct nfc_hci_dev *hdev, const char *firmware_name) { struct pn544_hci_info *info = nfc_hci_get_clientdata(hdev); if (info->fw_download == NULL) return -ENOTSUPP; return info->fw_download(info->phy_id, firmware_name, hdev->sw_romlib); } static int pn544_hci_discover_se(struct nfc_hci_dev *hdev) { u32 se_idx = 0; u8 ese_mode = 0x01; /* Default mode */ struct sk_buff *res_skb; int r; r = nfc_hci_send_cmd(hdev, PN544_SYS_MGMT_GATE, PN544_TEST_SWP, NULL, 0, &res_skb); if (r == 0) { if (res_skb->len == 2 && res_skb->data[0] == 0x00) nfc_add_se(hdev->ndev, se_idx++, NFC_SE_UICC); kfree_skb(res_skb); } r = nfc_hci_send_event(hdev, PN544_NFC_WI_MGMT_GATE, PN544_HCI_EVT_SWITCH_MODE, &ese_mode, 1); if (r == 0) nfc_add_se(hdev->ndev, se_idx++, NFC_SE_EMBEDDED); return !se_idx; } #define PN544_SE_MODE_OFF 0x00 #define PN544_SE_MODE_ON 0x01 static int pn544_hci_enable_se(struct nfc_hci_dev *hdev, u32 se_idx) { const struct nfc_se *se; u8 enable = PN544_SE_MODE_ON; static struct uicc_gatelist { u8 head; u8 adr[2]; u8 value; } uicc_gatelist[] = { {0x00, {0x9e, 0xd9}, 0x23}, {0x00, {0x9e, 0xda}, 0x21}, {0x00, {0x9e, 0xdb}, 0x22}, {0x00, {0x9e, 0xdc}, 0x24}, }; struct uicc_gatelist *p = uicc_gatelist; int count = ARRAY_SIZE(uicc_gatelist); struct sk_buff *res_skb; int r; se = nfc_find_se(hdev->ndev, se_idx); switch (se->type) { case NFC_SE_UICC: while (count--) { r = nfc_hci_send_cmd(hdev, PN544_SYS_MGMT_GATE, PN544_WRITE, (u8 *)p, 4, &res_skb); if (r < 0) return r; if (res_skb->len != 1) { kfree_skb(res_skb); return -EPROTO; } if (res_skb->data[0] != p->value) { kfree_skb(res_skb); return -EIO; } kfree_skb(res_skb); p++; } return nfc_hci_set_param(hdev, PN544_SWP_MGMT_GATE, PN544_SWP_DEFAULT_MODE, &enable, 1); case NFC_SE_EMBEDDED: return nfc_hci_set_param(hdev, PN544_NFC_WI_MGMT_GATE, PN544_NFC_ESE_DEFAULT_MODE, &enable, 1); default: return -EINVAL; } } static int pn544_hci_disable_se(struct nfc_hci_dev *hdev, u32 se_idx) { const struct nfc_se *se; u8 disable = PN544_SE_MODE_OFF; se = nfc_find_se(hdev->ndev, se_idx); switch (se->type) { case NFC_SE_UICC: return nfc_hci_set_param(hdev, PN544_SWP_MGMT_GATE, PN544_SWP_DEFAULT_MODE, &disable, 1); case NFC_SE_EMBEDDED: return nfc_hci_set_param(hdev, PN544_NFC_WI_MGMT_GATE, PN544_NFC_ESE_DEFAULT_MODE, &disable, 1); default: return -EINVAL; } } static const struct nfc_hci_ops pn544_hci_ops = { .open = pn544_hci_open, .close = pn544_hci_close, .hci_ready = pn544_hci_ready, .xmit = pn544_hci_xmit, .start_poll = pn544_hci_start_poll, .dep_link_up = pn544_hci_dep_link_up, .dep_link_down = pn544_hci_dep_link_down, .target_from_gate = pn544_hci_target_from_gate, .complete_target_discovered = pn544_hci_complete_target_discovered, .im_transceive = pn544_hci_im_transceive, .tm_send = pn544_hci_tm_send, .check_presence = pn544_hci_check_presence, .event_received = pn544_hci_event_received, .fw_download = pn544_hci_fw_download, .discover_se = pn544_hci_discover_se, .enable_se = pn544_hci_enable_se, .disable_se = pn544_hci_disable_se, }; int pn544_hci_probe(void *phy_id, const struct nfc_phy_ops *phy_ops, char *llc_name, int phy_headroom, int phy_tailroom, int phy_payload, fw_download_t fw_download, struct nfc_hci_dev **hdev) { struct pn544_hci_info *info; u32 protocols; struct nfc_hci_init_data init_data; int r; info = kzalloc(sizeof(struct pn544_hci_info), GFP_KERNEL); if (!info) { r = -ENOMEM; goto err_info_alloc; } info->phy_ops = phy_ops; info->phy_id = phy_id; info->fw_download = fw_download; info->state = PN544_ST_COLD; mutex_init(&info->info_lock); init_data.gate_count = ARRAY_SIZE(pn544_gates); memcpy(init_data.gates, pn544_gates, sizeof(pn544_gates)); /* * TODO: Session id must include the driver name + some bus addr * persistent info to discriminate 2 identical chips */ strcpy(init_data.session_id, "ID544HCI"); protocols = NFC_PROTO_JEWEL_MASK | NFC_PROTO_MIFARE_MASK | NFC_PROTO_FELICA_MASK | NFC_PROTO_ISO14443_MASK | NFC_PROTO_ISO14443_B_MASK | NFC_PROTO_NFC_DEP_MASK; info->hdev = nfc_hci_allocate_device(&pn544_hci_ops, &init_data, 0, protocols, llc_name, phy_headroom + PN544_CMDS_HEADROOM, phy_tailroom, phy_payload); if (!info->hdev) { pr_err("Cannot allocate nfc hdev\n"); r = -ENOMEM; goto err_alloc_hdev; } nfc_hci_set_clientdata(info->hdev, info); r = nfc_hci_register_device(info->hdev); if (r) goto err_regdev; *hdev = info->hdev; return 0; err_regdev: nfc_hci_free_device(info->hdev); err_alloc_hdev: kfree(info); err_info_alloc: return r; } EXPORT_SYMBOL(pn544_hci_probe); void pn544_hci_remove(struct nfc_hci_dev *hdev) { struct pn544_hci_info *info = nfc_hci_get_clientdata(hdev); nfc_hci_unregister_device(hdev); nfc_hci_free_device(hdev); kfree(info); } EXPORT_SYMBOL(pn544_hci_remove); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION(DRIVER_DESC);
linux-master
drivers/nfc/pn544/pn544.c
// SPDX-License-Identifier: GPL-2.0-only /* * I2C Link Layer for PN544 HCI based Driver * * Copyright (C) 2012 Intel Corporation. All rights reserved. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/crc-ccitt.h> #include <linux/module.h> #include <linux/i2c.h> #include <linux/acpi.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/nfc.h> #include <linux/firmware.h> #include <linux/gpio/consumer.h> #include <asm/unaligned.h> #include <net/nfc/hci.h> #include <net/nfc/llc.h> #include <net/nfc/nfc.h> #include "pn544.h" #define PN544_I2C_FRAME_HEADROOM 1 #define PN544_I2C_FRAME_TAILROOM 2 /* GPIO names */ #define PN544_GPIO_NAME_IRQ "pn544_irq" #define PN544_GPIO_NAME_FW "pn544_fw" #define PN544_GPIO_NAME_EN "pn544_en" /* framing in HCI mode */ #define PN544_HCI_I2C_LLC_LEN 1 #define PN544_HCI_I2C_LLC_CRC 2 #define PN544_HCI_I2C_LLC_LEN_CRC (PN544_HCI_I2C_LLC_LEN + \ PN544_HCI_I2C_LLC_CRC) #define PN544_HCI_I2C_LLC_MIN_SIZE (1 + PN544_HCI_I2C_LLC_LEN_CRC) #define PN544_HCI_I2C_LLC_MAX_PAYLOAD 29 #define PN544_HCI_I2C_LLC_MAX_SIZE (PN544_HCI_I2C_LLC_LEN_CRC + 1 + \ PN544_HCI_I2C_LLC_MAX_PAYLOAD) static const struct i2c_device_id pn544_hci_i2c_id_table[] = { {"pn544", 0}, {} }; MODULE_DEVICE_TABLE(i2c, pn544_hci_i2c_id_table); static const struct acpi_device_id pn544_hci_i2c_acpi_match[] __maybe_unused = { {"NXP5440", 0}, {} }; MODULE_DEVICE_TABLE(acpi, pn544_hci_i2c_acpi_match); #define PN544_HCI_I2C_DRIVER_NAME "pn544_hci_i2c" /* * Exposed through the 4 most significant bytes * from the HCI SW_VERSION first byte, a.k.a. * SW RomLib. */ #define PN544_HW_VARIANT_C2 0xa #define PN544_HW_VARIANT_C3 0xb #define PN544_FW_CMD_RESET 0x01 #define PN544_FW_CMD_WRITE 0x08 #define PN544_FW_CMD_CHECK 0x06 #define PN544_FW_CMD_SECURE_WRITE 0x0C #define PN544_FW_CMD_SECURE_CHUNK_WRITE 0x0D struct pn544_i2c_fw_frame_write { u8 cmd; u16 be_length; u8 be_dest_addr[3]; u16 be_datalen; u8 data[]; } __packed; struct pn544_i2c_fw_frame_check { u8 cmd; u16 be_length; u8 be_start_addr[3]; u16 be_datalen; u16 be_crc; } __packed; struct pn544_i2c_fw_frame_response { u8 status; u16 be_length; } __packed; struct pn544_i2c_fw_blob { u32 be_size; u32 be_destaddr; u8 data[]; }; struct pn544_i2c_fw_secure_frame { u8 cmd; u16 be_datalen; u8 data[]; } __packed; struct pn544_i2c_fw_secure_blob { u64 header; u8 data[]; }; #define PN544_FW_CMD_RESULT_TIMEOUT 0x01 #define PN544_FW_CMD_RESULT_BAD_CRC 0x02 #define PN544_FW_CMD_RESULT_ACCESS_DENIED 0x08 #define PN544_FW_CMD_RESULT_PROTOCOL_ERROR 0x0B #define PN544_FW_CMD_RESULT_INVALID_PARAMETER 0x11 #define PN544_FW_CMD_RESULT_UNSUPPORTED_COMMAND 0x13 #define PN544_FW_CMD_RESULT_INVALID_LENGTH 0x18 #define PN544_FW_CMD_RESULT_CRYPTOGRAPHIC_ERROR 0x19 #define PN544_FW_CMD_RESULT_VERSION_CONDITIONS_ERROR 0x1D #define PN544_FW_CMD_RESULT_MEMORY_ERROR 0x20 #define PN544_FW_CMD_RESULT_CHUNK_OK 0x21 #define PN544_FW_CMD_RESULT_WRITE_FAILED 0x74 #define PN544_FW_CMD_RESULT_COMMAND_REJECTED 0xE0 #define PN544_FW_CMD_RESULT_CHUNK_ERROR 0xE6 #define MIN(X, Y) ((X) < (Y) ? (X) : (Y)) #define PN544_FW_WRITE_BUFFER_MAX_LEN 0x9f7 #define PN544_FW_I2C_MAX_PAYLOAD PN544_HCI_I2C_LLC_MAX_SIZE #define PN544_FW_I2C_WRITE_FRAME_HEADER_LEN 8 #define PN544_FW_I2C_WRITE_DATA_MAX_LEN MIN((PN544_FW_I2C_MAX_PAYLOAD -\ PN544_FW_I2C_WRITE_FRAME_HEADER_LEN),\ PN544_FW_WRITE_BUFFER_MAX_LEN) #define PN544_FW_SECURE_CHUNK_WRITE_HEADER_LEN 3 #define PN544_FW_SECURE_CHUNK_WRITE_DATA_MAX_LEN (PN544_FW_I2C_MAX_PAYLOAD -\ PN544_FW_SECURE_CHUNK_WRITE_HEADER_LEN) #define PN544_FW_SECURE_FRAME_HEADER_LEN 3 #define PN544_FW_SECURE_BLOB_HEADER_LEN 8 #define FW_WORK_STATE_IDLE 1 #define FW_WORK_STATE_START 2 #define FW_WORK_STATE_WAIT_WRITE_ANSWER 3 #define FW_WORK_STATE_WAIT_CHECK_ANSWER 4 #define FW_WORK_STATE_WAIT_SECURE_WRITE_ANSWER 5 struct pn544_i2c_phy { struct i2c_client *i2c_dev; struct nfc_hci_dev *hdev; struct gpio_desc *gpiod_en; struct gpio_desc *gpiod_fw; unsigned int en_polarity; u8 hw_variant; struct work_struct fw_work; int fw_work_state; char firmware_name[NFC_FIRMWARE_NAME_MAXSIZE + 1]; const struct firmware *fw; u32 fw_blob_dest_addr; size_t fw_blob_size; const u8 *fw_blob_data; size_t fw_written; size_t fw_size; int fw_cmd_result; int powered; int run_mode; int hard_fault; /* * < 0 if hardware error occured (e.g. i2c err) * and prevents normal operation. */ }; #define I2C_DUMP_SKB(info, skb) \ do { \ pr_debug("%s:\n", info); \ print_hex_dump(KERN_DEBUG, "i2c: ", DUMP_PREFIX_OFFSET, \ 16, 1, (skb)->data, (skb)->len, 0); \ } while (0) static void pn544_hci_i2c_platform_init(struct pn544_i2c_phy *phy) { int polarity, retry, ret; static const char rset_cmd[] = { 0x05, 0xF9, 0x04, 0x00, 0xC3, 0xE5 }; int count = sizeof(rset_cmd); nfc_info(&phy->i2c_dev->dev, "Detecting nfc_en polarity\n"); /* Disable fw download */ gpiod_set_value_cansleep(phy->gpiod_fw, 0); for (polarity = 0; polarity < 2; polarity++) { phy->en_polarity = polarity; retry = 3; while (retry--) { /* power off */ gpiod_set_value_cansleep(phy->gpiod_en, !phy->en_polarity); usleep_range(10000, 15000); /* power on */ gpiod_set_value_cansleep(phy->gpiod_en, phy->en_polarity); usleep_range(10000, 15000); /* send reset */ dev_dbg(&phy->i2c_dev->dev, "Sending reset cmd\n"); ret = i2c_master_send(phy->i2c_dev, rset_cmd, count); if (ret == count) { nfc_info(&phy->i2c_dev->dev, "nfc_en polarity : active %s\n", (polarity == 0 ? "low" : "high")); goto out; } } } nfc_err(&phy->i2c_dev->dev, "Could not detect nfc_en polarity, fallback to active high\n"); out: gpiod_set_value_cansleep(phy->gpiod_en, !phy->en_polarity); usleep_range(10000, 15000); } static void pn544_hci_i2c_enable_mode(struct pn544_i2c_phy *phy, int run_mode) { gpiod_set_value_cansleep(phy->gpiod_fw, run_mode == PN544_FW_MODE ? 1 : 0); gpiod_set_value_cansleep(phy->gpiod_en, phy->en_polarity); usleep_range(10000, 15000); phy->run_mode = run_mode; } static int pn544_hci_i2c_enable(void *phy_id) { struct pn544_i2c_phy *phy = phy_id; pn544_hci_i2c_enable_mode(phy, PN544_HCI_MODE); phy->powered = 1; return 0; } static void pn544_hci_i2c_disable(void *phy_id) { struct pn544_i2c_phy *phy = phy_id; gpiod_set_value_cansleep(phy->gpiod_fw, 0); gpiod_set_value_cansleep(phy->gpiod_en, !phy->en_polarity); usleep_range(10000, 15000); gpiod_set_value_cansleep(phy->gpiod_en, phy->en_polarity); usleep_range(10000, 15000); gpiod_set_value_cansleep(phy->gpiod_en, !phy->en_polarity); usleep_range(10000, 15000); phy->powered = 0; } static void pn544_hci_i2c_add_len_crc(struct sk_buff *skb) { u16 crc; int len; len = skb->len + 2; *(u8 *)skb_push(skb, 1) = len; crc = crc_ccitt(0xffff, skb->data, skb->len); crc = ~crc; skb_put_u8(skb, crc & 0xff); skb_put_u8(skb, crc >> 8); } static void pn544_hci_i2c_remove_len_crc(struct sk_buff *skb) { skb_pull(skb, PN544_I2C_FRAME_HEADROOM); skb_trim(skb, PN544_I2C_FRAME_TAILROOM); } /* * Writing a frame must not return the number of written bytes. * It must return either zero for success, or <0 for error. * In addition, it must not alter the skb */ static int pn544_hci_i2c_write(void *phy_id, struct sk_buff *skb) { int r; struct pn544_i2c_phy *phy = phy_id; struct i2c_client *client = phy->i2c_dev; if (phy->hard_fault != 0) return phy->hard_fault; usleep_range(3000, 6000); pn544_hci_i2c_add_len_crc(skb); I2C_DUMP_SKB("i2c frame written", skb); r = i2c_master_send(client, skb->data, skb->len); if (r == -EREMOTEIO) { /* Retry, chip was in standby */ usleep_range(6000, 10000); r = i2c_master_send(client, skb->data, skb->len); } if (r >= 0) { if (r != skb->len) r = -EREMOTEIO; else r = 0; } pn544_hci_i2c_remove_len_crc(skb); return r; } static int check_crc(u8 *buf, int buflen) { int len; u16 crc; len = buf[0] + 1; crc = crc_ccitt(0xffff, buf, len - 2); crc = ~crc; if (buf[len - 2] != (crc & 0xff) || buf[len - 1] != (crc >> 8)) { pr_err("CRC error 0x%x != 0x%x 0x%x\n", crc, buf[len - 1], buf[len - 2]); pr_info("%s: BAD CRC\n", __func__); print_hex_dump(KERN_DEBUG, "crc: ", DUMP_PREFIX_NONE, 16, 2, buf, buflen, false); return -EPERM; } return 0; } /* * Reads an shdlc frame and returns it in a newly allocated sk_buff. Guarantees * that i2c bus will be flushed and that next read will start on a new frame. * returned skb contains only LLC header and payload. * returns: * -EREMOTEIO : i2c read error (fatal) * -EBADMSG : frame was incorrect and discarded * -ENOMEM : cannot allocate skb, frame dropped */ static int pn544_hci_i2c_read(struct pn544_i2c_phy *phy, struct sk_buff **skb) { int r; u8 len; u8 tmp[PN544_HCI_I2C_LLC_MAX_SIZE - 1]; struct i2c_client *client = phy->i2c_dev; r = i2c_master_recv(client, &len, 1); if (r != 1) { nfc_err(&client->dev, "cannot read len byte\n"); return -EREMOTEIO; } if ((len < (PN544_HCI_I2C_LLC_MIN_SIZE - 1)) || (len > (PN544_HCI_I2C_LLC_MAX_SIZE - 1))) { nfc_err(&client->dev, "invalid len byte\n"); r = -EBADMSG; goto flush; } *skb = alloc_skb(1 + len, GFP_KERNEL); if (*skb == NULL) { r = -ENOMEM; goto flush; } skb_put_u8(*skb, len); r = i2c_master_recv(client, skb_put(*skb, len), len); if (r != len) { kfree_skb(*skb); return -EREMOTEIO; } I2C_DUMP_SKB("i2c frame read", *skb); r = check_crc((*skb)->data, (*skb)->len); if (r != 0) { kfree_skb(*skb); r = -EBADMSG; goto flush; } skb_pull(*skb, 1); skb_trim(*skb, (*skb)->len - 2); usleep_range(3000, 6000); return 0; flush: if (i2c_master_recv(client, tmp, sizeof(tmp)) < 0) r = -EREMOTEIO; usleep_range(3000, 6000); return r; } static int pn544_hci_i2c_fw_read_status(struct pn544_i2c_phy *phy) { int r; struct pn544_i2c_fw_frame_response response; struct i2c_client *client = phy->i2c_dev; r = i2c_master_recv(client, (char *) &response, sizeof(response)); if (r != sizeof(response)) { nfc_err(&client->dev, "cannot read fw status\n"); return -EIO; } usleep_range(3000, 6000); switch (response.status) { case 0: return 0; case PN544_FW_CMD_RESULT_CHUNK_OK: return response.status; case PN544_FW_CMD_RESULT_TIMEOUT: return -ETIMEDOUT; case PN544_FW_CMD_RESULT_BAD_CRC: return -ENODATA; case PN544_FW_CMD_RESULT_ACCESS_DENIED: return -EACCES; case PN544_FW_CMD_RESULT_PROTOCOL_ERROR: return -EPROTO; case PN544_FW_CMD_RESULT_INVALID_PARAMETER: return -EINVAL; case PN544_FW_CMD_RESULT_UNSUPPORTED_COMMAND: return -ENOTSUPP; case PN544_FW_CMD_RESULT_INVALID_LENGTH: return -EBADMSG; case PN544_FW_CMD_RESULT_CRYPTOGRAPHIC_ERROR: return -ENOKEY; case PN544_FW_CMD_RESULT_VERSION_CONDITIONS_ERROR: return -EINVAL; case PN544_FW_CMD_RESULT_MEMORY_ERROR: return -ENOMEM; case PN544_FW_CMD_RESULT_COMMAND_REJECTED: return -EACCES; case PN544_FW_CMD_RESULT_WRITE_FAILED: case PN544_FW_CMD_RESULT_CHUNK_ERROR: return -EIO; default: return -EIO; } } /* * Reads an shdlc frame from the chip. This is not as straightforward as it * seems. There are cases where we could loose the frame start synchronization. * The frame format is len-data-crc, and corruption can occur anywhere while * transiting on i2c bus, such that we could read an invalid len. * In order to recover synchronization with the next frame, we must be sure * to read the real amount of data without using the len byte. We do this by * assuming the following: * - the chip will always present only one single complete frame on the bus * before triggering the interrupt * - the chip will not present a new frame until we have completely read * the previous one (or until we have handled the interrupt). * The tricky case is when we read a corrupted len that is less than the real * len. We must detect this here in order to determine that we need to flush * the bus. This is the reason why we check the crc here. */ static irqreturn_t pn544_hci_i2c_irq_thread_fn(int irq, void *phy_id) { struct pn544_i2c_phy *phy = phy_id; struct i2c_client *client; struct sk_buff *skb = NULL; int r; if (!phy || irq != phy->i2c_dev->irq) { WARN_ON_ONCE(1); return IRQ_NONE; } client = phy->i2c_dev; dev_dbg(&client->dev, "IRQ\n"); if (phy->hard_fault != 0) return IRQ_HANDLED; if (phy->run_mode == PN544_FW_MODE) { phy->fw_cmd_result = pn544_hci_i2c_fw_read_status(phy); schedule_work(&phy->fw_work); } else { r = pn544_hci_i2c_read(phy, &skb); if (r == -EREMOTEIO) { phy->hard_fault = r; nfc_hci_recv_frame(phy->hdev, NULL); return IRQ_HANDLED; } else if ((r == -ENOMEM) || (r == -EBADMSG)) { return IRQ_HANDLED; } nfc_hci_recv_frame(phy->hdev, skb); } return IRQ_HANDLED; } static const struct nfc_phy_ops i2c_phy_ops = { .write = pn544_hci_i2c_write, .enable = pn544_hci_i2c_enable, .disable = pn544_hci_i2c_disable, }; static int pn544_hci_i2c_fw_download(void *phy_id, const char *firmware_name, u8 hw_variant) { struct pn544_i2c_phy *phy = phy_id; pr_info("Starting Firmware Download (%s)\n", firmware_name); strcpy(phy->firmware_name, firmware_name); phy->hw_variant = hw_variant; phy->fw_work_state = FW_WORK_STATE_START; schedule_work(&phy->fw_work); return 0; } static void pn544_hci_i2c_fw_work_complete(struct pn544_i2c_phy *phy, int result) { pr_info("Firmware Download Complete, result=%d\n", result); pn544_hci_i2c_disable(phy); phy->fw_work_state = FW_WORK_STATE_IDLE; if (phy->fw) { release_firmware(phy->fw); phy->fw = NULL; } nfc_fw_download_done(phy->hdev->ndev, phy->firmware_name, (u32) -result); } static int pn544_hci_i2c_fw_write_cmd(struct i2c_client *client, u32 dest_addr, const u8 *data, u16 datalen) { u8 frame[PN544_FW_I2C_MAX_PAYLOAD]; struct pn544_i2c_fw_frame_write *framep; u16 params_len; int framelen; int r; if (datalen > PN544_FW_I2C_WRITE_DATA_MAX_LEN) datalen = PN544_FW_I2C_WRITE_DATA_MAX_LEN; framep = (struct pn544_i2c_fw_frame_write *) frame; params_len = sizeof(framep->be_dest_addr) + sizeof(framep->be_datalen) + datalen; framelen = params_len + sizeof(framep->cmd) + sizeof(framep->be_length); framep->cmd = PN544_FW_CMD_WRITE; put_unaligned_be16(params_len, &framep->be_length); framep->be_dest_addr[0] = (dest_addr & 0xff0000) >> 16; framep->be_dest_addr[1] = (dest_addr & 0xff00) >> 8; framep->be_dest_addr[2] = dest_addr & 0xff; put_unaligned_be16(datalen, &framep->be_datalen); memcpy(framep->data, data, datalen); r = i2c_master_send(client, frame, framelen); if (r == framelen) return datalen; else if (r < 0) return r; else return -EIO; } static int pn544_hci_i2c_fw_check_cmd(struct i2c_client *client, u32 start_addr, const u8 *data, u16 datalen) { struct pn544_i2c_fw_frame_check frame; int r; u16 crc; /* calculate local crc for the data we want to check */ crc = crc_ccitt(0xffff, data, datalen); frame.cmd = PN544_FW_CMD_CHECK; put_unaligned_be16(sizeof(frame.be_start_addr) + sizeof(frame.be_datalen) + sizeof(frame.be_crc), &frame.be_length); /* tell the chip the memory region to which our crc applies */ frame.be_start_addr[0] = (start_addr & 0xff0000) >> 16; frame.be_start_addr[1] = (start_addr & 0xff00) >> 8; frame.be_start_addr[2] = start_addr & 0xff; put_unaligned_be16(datalen, &frame.be_datalen); /* * and give our local crc. Chip will calculate its own crc for the * region and compare with ours. */ put_unaligned_be16(crc, &frame.be_crc); r = i2c_master_send(client, (const char *) &frame, sizeof(frame)); if (r == sizeof(frame)) return 0; else if (r < 0) return r; else return -EIO; } static int pn544_hci_i2c_fw_write_chunk(struct pn544_i2c_phy *phy) { int r; r = pn544_hci_i2c_fw_write_cmd(phy->i2c_dev, phy->fw_blob_dest_addr + phy->fw_written, phy->fw_blob_data + phy->fw_written, phy->fw_blob_size - phy->fw_written); if (r < 0) return r; phy->fw_written += r; phy->fw_work_state = FW_WORK_STATE_WAIT_WRITE_ANSWER; return 0; } static int pn544_hci_i2c_fw_secure_write_frame_cmd(struct pn544_i2c_phy *phy, const u8 *data, u16 datalen) { u8 buf[PN544_FW_I2C_MAX_PAYLOAD]; struct pn544_i2c_fw_secure_frame *chunk; int chunklen; int r; if (datalen > PN544_FW_SECURE_CHUNK_WRITE_DATA_MAX_LEN) datalen = PN544_FW_SECURE_CHUNK_WRITE_DATA_MAX_LEN; chunk = (struct pn544_i2c_fw_secure_frame *) buf; chunk->cmd = PN544_FW_CMD_SECURE_CHUNK_WRITE; put_unaligned_be16(datalen, &chunk->be_datalen); memcpy(chunk->data, data, datalen); chunklen = sizeof(chunk->cmd) + sizeof(chunk->be_datalen) + datalen; r = i2c_master_send(phy->i2c_dev, buf, chunklen); if (r == chunklen) return datalen; else if (r < 0) return r; else return -EIO; } static int pn544_hci_i2c_fw_secure_write_frame(struct pn544_i2c_phy *phy) { struct pn544_i2c_fw_secure_frame *framep; int r; framep = (struct pn544_i2c_fw_secure_frame *) phy->fw_blob_data; if (phy->fw_written == 0) phy->fw_blob_size = get_unaligned_be16(&framep->be_datalen) + PN544_FW_SECURE_FRAME_HEADER_LEN; /* Only secure write command can be chunked*/ if (phy->fw_blob_size > PN544_FW_I2C_MAX_PAYLOAD && framep->cmd != PN544_FW_CMD_SECURE_WRITE) return -EINVAL; /* The firmware also have other commands, we just send them directly */ if (phy->fw_blob_size < PN544_FW_I2C_MAX_PAYLOAD) { r = i2c_master_send(phy->i2c_dev, (const char *) phy->fw_blob_data, phy->fw_blob_size); if (r == phy->fw_blob_size) goto exit; else if (r < 0) return r; else return -EIO; } r = pn544_hci_i2c_fw_secure_write_frame_cmd(phy, phy->fw_blob_data + phy->fw_written, phy->fw_blob_size - phy->fw_written); if (r < 0) return r; exit: phy->fw_written += r; phy->fw_work_state = FW_WORK_STATE_WAIT_SECURE_WRITE_ANSWER; /* SW reset command will not trig any response from PN544 */ if (framep->cmd == PN544_FW_CMD_RESET) { pn544_hci_i2c_enable_mode(phy, PN544_FW_MODE); phy->fw_cmd_result = 0; schedule_work(&phy->fw_work); } return 0; } static void pn544_hci_i2c_fw_work(struct work_struct *work) { struct pn544_i2c_phy *phy = container_of(work, struct pn544_i2c_phy, fw_work); int r; struct pn544_i2c_fw_blob *blob; struct pn544_i2c_fw_secure_blob *secure_blob; switch (phy->fw_work_state) { case FW_WORK_STATE_START: pn544_hci_i2c_enable_mode(phy, PN544_FW_MODE); r = request_firmware(&phy->fw, phy->firmware_name, &phy->i2c_dev->dev); if (r < 0) goto exit_state_start; phy->fw_written = 0; switch (phy->hw_variant) { case PN544_HW_VARIANT_C2: blob = (struct pn544_i2c_fw_blob *) phy->fw->data; phy->fw_blob_size = get_unaligned_be32(&blob->be_size); phy->fw_blob_dest_addr = get_unaligned_be32( &blob->be_destaddr); phy->fw_blob_data = blob->data; r = pn544_hci_i2c_fw_write_chunk(phy); break; case PN544_HW_VARIANT_C3: secure_blob = (struct pn544_i2c_fw_secure_blob *) phy->fw->data; phy->fw_blob_data = secure_blob->data; phy->fw_size = phy->fw->size; r = pn544_hci_i2c_fw_secure_write_frame(phy); break; default: r = -ENOTSUPP; break; } exit_state_start: if (r < 0) pn544_hci_i2c_fw_work_complete(phy, r); break; case FW_WORK_STATE_WAIT_WRITE_ANSWER: r = phy->fw_cmd_result; if (r < 0) goto exit_state_wait_write_answer; if (phy->fw_written == phy->fw_blob_size) { r = pn544_hci_i2c_fw_check_cmd(phy->i2c_dev, phy->fw_blob_dest_addr, phy->fw_blob_data, phy->fw_blob_size); if (r < 0) goto exit_state_wait_write_answer; phy->fw_work_state = FW_WORK_STATE_WAIT_CHECK_ANSWER; break; } r = pn544_hci_i2c_fw_write_chunk(phy); exit_state_wait_write_answer: if (r < 0) pn544_hci_i2c_fw_work_complete(phy, r); break; case FW_WORK_STATE_WAIT_CHECK_ANSWER: r = phy->fw_cmd_result; if (r < 0) goto exit_state_wait_check_answer; blob = (struct pn544_i2c_fw_blob *) (phy->fw_blob_data + phy->fw_blob_size); phy->fw_blob_size = get_unaligned_be32(&blob->be_size); if (phy->fw_blob_size != 0) { phy->fw_blob_dest_addr = get_unaligned_be32(&blob->be_destaddr); phy->fw_blob_data = blob->data; phy->fw_written = 0; r = pn544_hci_i2c_fw_write_chunk(phy); } exit_state_wait_check_answer: if (r < 0 || phy->fw_blob_size == 0) pn544_hci_i2c_fw_work_complete(phy, r); break; case FW_WORK_STATE_WAIT_SECURE_WRITE_ANSWER: r = phy->fw_cmd_result; if (r < 0) goto exit_state_wait_secure_write_answer; if (r == PN544_FW_CMD_RESULT_CHUNK_OK) { r = pn544_hci_i2c_fw_secure_write_frame(phy); goto exit_state_wait_secure_write_answer; } if (phy->fw_written == phy->fw_blob_size) { secure_blob = (struct pn544_i2c_fw_secure_blob *) (phy->fw_blob_data + phy->fw_blob_size); phy->fw_size -= phy->fw_blob_size + PN544_FW_SECURE_BLOB_HEADER_LEN; if (phy->fw_size >= PN544_FW_SECURE_BLOB_HEADER_LEN + PN544_FW_SECURE_FRAME_HEADER_LEN) { phy->fw_blob_data = secure_blob->data; phy->fw_written = 0; r = pn544_hci_i2c_fw_secure_write_frame(phy); } } exit_state_wait_secure_write_answer: if (r < 0 || phy->fw_size == 0) pn544_hci_i2c_fw_work_complete(phy, r); break; default: break; } } static const struct acpi_gpio_params enable_gpios = { 1, 0, false }; static const struct acpi_gpio_params firmware_gpios = { 2, 0, false }; static const struct acpi_gpio_mapping acpi_pn544_gpios[] = { { "enable-gpios", &enable_gpios, 1 }, { "firmware-gpios", &firmware_gpios, 1 }, { }, }; static int pn544_hci_i2c_probe(struct i2c_client *client) { struct device *dev = &client->dev; struct pn544_i2c_phy *phy; int r = 0; if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { nfc_err(&client->dev, "Need I2C_FUNC_I2C\n"); return -ENODEV; } phy = devm_kzalloc(&client->dev, sizeof(struct pn544_i2c_phy), GFP_KERNEL); if (!phy) return -ENOMEM; INIT_WORK(&phy->fw_work, pn544_hci_i2c_fw_work); phy->fw_work_state = FW_WORK_STATE_IDLE; phy->i2c_dev = client; i2c_set_clientdata(client, phy); r = devm_acpi_dev_add_driver_gpios(dev, acpi_pn544_gpios); if (r) dev_dbg(dev, "Unable to add GPIO mapping table\n"); /* Get EN GPIO */ phy->gpiod_en = devm_gpiod_get(dev, "enable", GPIOD_OUT_LOW); if (IS_ERR(phy->gpiod_en)) { nfc_err(dev, "Unable to get EN GPIO\n"); return PTR_ERR(phy->gpiod_en); } /* Get FW GPIO */ phy->gpiod_fw = devm_gpiod_get(dev, "firmware", GPIOD_OUT_LOW); if (IS_ERR(phy->gpiod_fw)) { nfc_err(dev, "Unable to get FW GPIO\n"); return PTR_ERR(phy->gpiod_fw); } pn544_hci_i2c_platform_init(phy); r = devm_request_threaded_irq(&client->dev, client->irq, NULL, pn544_hci_i2c_irq_thread_fn, IRQF_TRIGGER_RISING | IRQF_ONESHOT, PN544_HCI_I2C_DRIVER_NAME, phy); if (r < 0) { nfc_err(&client->dev, "Unable to register IRQ handler\n"); return r; } r = pn544_hci_probe(phy, &i2c_phy_ops, LLC_SHDLC_NAME, PN544_I2C_FRAME_HEADROOM, PN544_I2C_FRAME_TAILROOM, PN544_HCI_I2C_LLC_MAX_PAYLOAD, pn544_hci_i2c_fw_download, &phy->hdev); if (r < 0) return r; return 0; } static void pn544_hci_i2c_remove(struct i2c_client *client) { struct pn544_i2c_phy *phy = i2c_get_clientdata(client); cancel_work_sync(&phy->fw_work); if (phy->fw_work_state != FW_WORK_STATE_IDLE) pn544_hci_i2c_fw_work_complete(phy, -ENODEV); pn544_hci_remove(phy->hdev); if (phy->powered) pn544_hci_i2c_disable(phy); } static const struct of_device_id of_pn544_i2c_match[] __maybe_unused = { { .compatible = "nxp,pn544-i2c", }, {}, }; MODULE_DEVICE_TABLE(of, of_pn544_i2c_match); static struct i2c_driver pn544_hci_i2c_driver = { .driver = { .name = PN544_HCI_I2C_DRIVER_NAME, .of_match_table = of_match_ptr(of_pn544_i2c_match), .acpi_match_table = ACPI_PTR(pn544_hci_i2c_acpi_match), }, .probe = pn544_hci_i2c_probe, .id_table = pn544_hci_i2c_id_table, .remove = pn544_hci_i2c_remove, }; module_i2c_driver(pn544_hci_i2c_driver); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION(DRIVER_DESC);
linux-master
drivers/nfc/pn544/i2c.c
// SPDX-License-Identifier: GPL-2.0-only /* * HCI based Driver for Inside Secure microread NFC Chip * * Copyright (C) 2013 Intel Corporation. All rights reserved. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/crc-ccitt.h> #include <linux/nfc.h> #include <net/nfc/nfc.h> #include <net/nfc/hci.h> #include "microread.h" /* Proprietary gates, events, commands and registers */ /* Admin */ #define MICROREAD_GATE_ID_ADM NFC_HCI_ADMIN_GATE #define MICROREAD_GATE_ID_MGT 0x01 #define MICROREAD_GATE_ID_OS 0x02 #define MICROREAD_GATE_ID_TESTRF 0x03 #define MICROREAD_GATE_ID_LOOPBACK NFC_HCI_LOOPBACK_GATE #define MICROREAD_GATE_ID_IDT NFC_HCI_ID_MGMT_GATE #define MICROREAD_GATE_ID_LMS NFC_HCI_LINK_MGMT_GATE /* Reader */ #define MICROREAD_GATE_ID_MREAD_GEN 0x10 #define MICROREAD_GATE_ID_MREAD_ISO_B NFC_HCI_RF_READER_B_GATE #define MICROREAD_GATE_ID_MREAD_NFC_T1 0x12 #define MICROREAD_GATE_ID_MREAD_ISO_A NFC_HCI_RF_READER_A_GATE #define MICROREAD_GATE_ID_MREAD_NFC_T3 0x14 #define MICROREAD_GATE_ID_MREAD_ISO_15_3 0x15 #define MICROREAD_GATE_ID_MREAD_ISO_15_2 0x16 #define MICROREAD_GATE_ID_MREAD_ISO_B_3 0x17 #define MICROREAD_GATE_ID_MREAD_BPRIME 0x18 #define MICROREAD_GATE_ID_MREAD_ISO_A_3 0x19 /* Card */ #define MICROREAD_GATE_ID_MCARD_GEN 0x20 #define MICROREAD_GATE_ID_MCARD_ISO_B 0x21 #define MICROREAD_GATE_ID_MCARD_BPRIME 0x22 #define MICROREAD_GATE_ID_MCARD_ISO_A 0x23 #define MICROREAD_GATE_ID_MCARD_NFC_T3 0x24 #define MICROREAD_GATE_ID_MCARD_ISO_15_3 0x25 #define MICROREAD_GATE_ID_MCARD_ISO_15_2 0x26 #define MICROREAD_GATE_ID_MCARD_ISO_B_2 0x27 #define MICROREAD_GATE_ID_MCARD_ISO_CUSTOM 0x28 #define MICROREAD_GATE_ID_SECURE_ELEMENT 0x2F /* P2P */ #define MICROREAD_GATE_ID_P2P_GEN 0x30 #define MICROREAD_GATE_ID_P2P_TARGET 0x31 #define MICROREAD_PAR_P2P_TARGET_MODE 0x01 #define MICROREAD_PAR_P2P_TARGET_GT 0x04 #define MICROREAD_GATE_ID_P2P_INITIATOR 0x32 #define MICROREAD_PAR_P2P_INITIATOR_GI 0x01 #define MICROREAD_PAR_P2P_INITIATOR_GT 0x03 /* Those pipes are created/opened by default in the chip */ #define MICROREAD_PIPE_ID_LMS 0x00 #define MICROREAD_PIPE_ID_ADMIN 0x01 #define MICROREAD_PIPE_ID_MGT 0x02 #define MICROREAD_PIPE_ID_OS 0x03 #define MICROREAD_PIPE_ID_HDS_LOOPBACK 0x04 #define MICROREAD_PIPE_ID_HDS_IDT 0x05 #define MICROREAD_PIPE_ID_HDS_MCARD_ISO_B 0x08 #define MICROREAD_PIPE_ID_HDS_MCARD_ISO_BPRIME 0x09 #define MICROREAD_PIPE_ID_HDS_MCARD_ISO_A 0x0A #define MICROREAD_PIPE_ID_HDS_MCARD_ISO_15_3 0x0B #define MICROREAD_PIPE_ID_HDS_MCARD_ISO_15_2 0x0C #define MICROREAD_PIPE_ID_HDS_MCARD_NFC_T3 0x0D #define MICROREAD_PIPE_ID_HDS_MCARD_ISO_B_2 0x0E #define MICROREAD_PIPE_ID_HDS_MCARD_CUSTOM 0x0F #define MICROREAD_PIPE_ID_HDS_MREAD_ISO_B 0x10 #define MICROREAD_PIPE_ID_HDS_MREAD_NFC_T1 0x11 #define MICROREAD_PIPE_ID_HDS_MREAD_ISO_A 0x12 #define MICROREAD_PIPE_ID_HDS_MREAD_ISO_15_3 0x13 #define MICROREAD_PIPE_ID_HDS_MREAD_ISO_15_2 0x14 #define MICROREAD_PIPE_ID_HDS_MREAD_NFC_T3 0x15 #define MICROREAD_PIPE_ID_HDS_MREAD_ISO_B_3 0x16 #define MICROREAD_PIPE_ID_HDS_MREAD_BPRIME 0x17 #define MICROREAD_PIPE_ID_HDS_MREAD_ISO_A_3 0x18 #define MICROREAD_PIPE_ID_HDS_MREAD_GEN 0x1B #define MICROREAD_PIPE_ID_HDS_STACKED_ELEMENT 0x1C #define MICROREAD_PIPE_ID_HDS_INSTANCES 0x1D #define MICROREAD_PIPE_ID_HDS_TESTRF 0x1E #define MICROREAD_PIPE_ID_HDS_P2P_TARGET 0x1F #define MICROREAD_PIPE_ID_HDS_P2P_INITIATOR 0x20 /* Events */ #define MICROREAD_EVT_MREAD_DISCOVERY_OCCURED NFC_HCI_EVT_TARGET_DISCOVERED #define MICROREAD_EVT_MREAD_CARD_FOUND 0x3D #define MICROREAD_EMCF_A_ATQA 0 #define MICROREAD_EMCF_A_SAK 2 #define MICROREAD_EMCF_A_LEN 3 #define MICROREAD_EMCF_A_UID 4 #define MICROREAD_EMCF_A3_ATQA 0 #define MICROREAD_EMCF_A3_SAK 2 #define MICROREAD_EMCF_A3_LEN 3 #define MICROREAD_EMCF_A3_UID 4 #define MICROREAD_EMCF_B_UID 0 #define MICROREAD_EMCF_T1_ATQA 0 #define MICROREAD_EMCF_T1_UID 4 #define MICROREAD_EMCF_T3_UID 0 #define MICROREAD_EVT_MREAD_DISCOVERY_START NFC_HCI_EVT_READER_REQUESTED #define MICROREAD_EVT_MREAD_DISCOVERY_START_SOME 0x3E #define MICROREAD_EVT_MREAD_DISCOVERY_STOP NFC_HCI_EVT_END_OPERATION #define MICROREAD_EVT_MREAD_SIM_REQUESTS 0x3F #define MICROREAD_EVT_MCARD_EXCHANGE NFC_HCI_EVT_TARGET_DISCOVERED #define MICROREAD_EVT_P2P_INITIATOR_EXCHANGE_TO_RF 0x20 #define MICROREAD_EVT_P2P_INITIATOR_EXCHANGE_FROM_RF 0x21 #define MICROREAD_EVT_MCARD_FIELD_ON 0x11 #define MICROREAD_EVT_P2P_TARGET_ACTIVATED 0x13 #define MICROREAD_EVT_P2P_TARGET_DEACTIVATED 0x12 #define MICROREAD_EVT_MCARD_FIELD_OFF 0x14 /* Commands */ #define MICROREAD_CMD_MREAD_EXCHANGE 0x10 #define MICROREAD_CMD_MREAD_SUBSCRIBE 0x3F /* Hosts IDs */ #define MICROREAD_ELT_ID_HDS NFC_HCI_TERMINAL_HOST_ID #define MICROREAD_ELT_ID_SIM NFC_HCI_UICC_HOST_ID #define MICROREAD_ELT_ID_SE1 0x03 #define MICROREAD_ELT_ID_SE2 0x04 #define MICROREAD_ELT_ID_SE3 0x05 static const struct nfc_hci_gate microread_gates[] = { {MICROREAD_GATE_ID_ADM, MICROREAD_PIPE_ID_ADMIN}, {MICROREAD_GATE_ID_LOOPBACK, MICROREAD_PIPE_ID_HDS_LOOPBACK}, {MICROREAD_GATE_ID_IDT, MICROREAD_PIPE_ID_HDS_IDT}, {MICROREAD_GATE_ID_LMS, MICROREAD_PIPE_ID_LMS}, {MICROREAD_GATE_ID_MREAD_ISO_B, MICROREAD_PIPE_ID_HDS_MREAD_ISO_B}, {MICROREAD_GATE_ID_MREAD_ISO_A, MICROREAD_PIPE_ID_HDS_MREAD_ISO_A}, {MICROREAD_GATE_ID_MREAD_ISO_A_3, MICROREAD_PIPE_ID_HDS_MREAD_ISO_A_3}, {MICROREAD_GATE_ID_MGT, MICROREAD_PIPE_ID_MGT}, {MICROREAD_GATE_ID_OS, MICROREAD_PIPE_ID_OS}, {MICROREAD_GATE_ID_MREAD_NFC_T1, MICROREAD_PIPE_ID_HDS_MREAD_NFC_T1}, {MICROREAD_GATE_ID_MREAD_NFC_T3, MICROREAD_PIPE_ID_HDS_MREAD_NFC_T3}, {MICROREAD_GATE_ID_P2P_TARGET, MICROREAD_PIPE_ID_HDS_P2P_TARGET}, {MICROREAD_GATE_ID_P2P_INITIATOR, MICROREAD_PIPE_ID_HDS_P2P_INITIATOR} }; /* Largest headroom needed for outgoing custom commands */ #define MICROREAD_CMDS_HEADROOM 2 #define MICROREAD_CMD_TAILROOM 2 struct microread_info { const struct nfc_phy_ops *phy_ops; void *phy_id; struct nfc_hci_dev *hdev; int async_cb_type; data_exchange_cb_t async_cb; void *async_cb_context; }; static int microread_open(struct nfc_hci_dev *hdev) { struct microread_info *info = nfc_hci_get_clientdata(hdev); return info->phy_ops->enable(info->phy_id); } static void microread_close(struct nfc_hci_dev *hdev) { struct microread_info *info = nfc_hci_get_clientdata(hdev); info->phy_ops->disable(info->phy_id); } static int microread_hci_ready(struct nfc_hci_dev *hdev) { int r; u8 param[4]; param[0] = 0x03; r = nfc_hci_send_cmd(hdev, MICROREAD_GATE_ID_MREAD_ISO_A, MICROREAD_CMD_MREAD_SUBSCRIBE, param, 1, NULL); if (r) return r; r = nfc_hci_send_cmd(hdev, MICROREAD_GATE_ID_MREAD_ISO_A_3, MICROREAD_CMD_MREAD_SUBSCRIBE, NULL, 0, NULL); if (r) return r; param[0] = 0x00; param[1] = 0x03; param[2] = 0x00; r = nfc_hci_send_cmd(hdev, MICROREAD_GATE_ID_MREAD_ISO_B, MICROREAD_CMD_MREAD_SUBSCRIBE, param, 3, NULL); if (r) return r; r = nfc_hci_send_cmd(hdev, MICROREAD_GATE_ID_MREAD_NFC_T1, MICROREAD_CMD_MREAD_SUBSCRIBE, NULL, 0, NULL); if (r) return r; param[0] = 0xFF; param[1] = 0xFF; param[2] = 0x00; param[3] = 0x00; r = nfc_hci_send_cmd(hdev, MICROREAD_GATE_ID_MREAD_NFC_T3, MICROREAD_CMD_MREAD_SUBSCRIBE, param, 4, NULL); return r; } static int microread_xmit(struct nfc_hci_dev *hdev, struct sk_buff *skb) { struct microread_info *info = nfc_hci_get_clientdata(hdev); return info->phy_ops->write(info->phy_id, skb); } static int microread_start_poll(struct nfc_hci_dev *hdev, u32 im_protocols, u32 tm_protocols) { int r; u8 param[2]; u8 mode; param[0] = 0x00; param[1] = 0x00; if (im_protocols & NFC_PROTO_ISO14443_MASK) param[0] |= (1 << 2); if (im_protocols & NFC_PROTO_ISO14443_B_MASK) param[0] |= 1; if (im_protocols & NFC_PROTO_MIFARE_MASK) param[1] |= 1; if (im_protocols & NFC_PROTO_JEWEL_MASK) param[0] |= (1 << 1); if (im_protocols & NFC_PROTO_FELICA_MASK) param[0] |= (1 << 5); if (im_protocols & NFC_PROTO_NFC_DEP_MASK) param[1] |= (1 << 1); if ((im_protocols | tm_protocols) & NFC_PROTO_NFC_DEP_MASK) { hdev->gb = nfc_get_local_general_bytes(hdev->ndev, &hdev->gb_len); if (hdev->gb == NULL || hdev->gb_len == 0) { im_protocols &= ~NFC_PROTO_NFC_DEP_MASK; tm_protocols &= ~NFC_PROTO_NFC_DEP_MASK; } } r = nfc_hci_send_event(hdev, MICROREAD_GATE_ID_MREAD_ISO_A, MICROREAD_EVT_MREAD_DISCOVERY_STOP, NULL, 0); if (r) return r; mode = 0xff; r = nfc_hci_set_param(hdev, MICROREAD_GATE_ID_P2P_TARGET, MICROREAD_PAR_P2P_TARGET_MODE, &mode, 1); if (r) return r; if (im_protocols & NFC_PROTO_NFC_DEP_MASK) { r = nfc_hci_set_param(hdev, MICROREAD_GATE_ID_P2P_INITIATOR, MICROREAD_PAR_P2P_INITIATOR_GI, hdev->gb, hdev->gb_len); if (r) return r; } if (tm_protocols & NFC_PROTO_NFC_DEP_MASK) { r = nfc_hci_set_param(hdev, MICROREAD_GATE_ID_P2P_TARGET, MICROREAD_PAR_P2P_TARGET_GT, hdev->gb, hdev->gb_len); if (r) return r; mode = 0x02; r = nfc_hci_set_param(hdev, MICROREAD_GATE_ID_P2P_TARGET, MICROREAD_PAR_P2P_TARGET_MODE, &mode, 1); if (r) return r; } return nfc_hci_send_event(hdev, MICROREAD_GATE_ID_MREAD_ISO_A, MICROREAD_EVT_MREAD_DISCOVERY_START_SOME, param, 2); } static int microread_dep_link_up(struct nfc_hci_dev *hdev, struct nfc_target *target, u8 comm_mode, u8 *gb, size_t gb_len) { struct sk_buff *rgb_skb = NULL; int r; r = nfc_hci_get_param(hdev, target->hci_reader_gate, MICROREAD_PAR_P2P_INITIATOR_GT, &rgb_skb); if (r < 0) return r; if (rgb_skb->len == 0 || rgb_skb->len > NFC_GB_MAXSIZE) { r = -EPROTO; goto exit; } r = nfc_set_remote_general_bytes(hdev->ndev, rgb_skb->data, rgb_skb->len); if (r == 0) r = nfc_dep_link_is_up(hdev->ndev, target->idx, comm_mode, NFC_RF_INITIATOR); exit: kfree_skb(rgb_skb); return r; } static int microread_dep_link_down(struct nfc_hci_dev *hdev) { return nfc_hci_send_event(hdev, MICROREAD_GATE_ID_P2P_INITIATOR, MICROREAD_EVT_MREAD_DISCOVERY_STOP, NULL, 0); } static int microread_target_from_gate(struct nfc_hci_dev *hdev, u8 gate, struct nfc_target *target) { switch (gate) { case MICROREAD_GATE_ID_P2P_INITIATOR: target->supported_protocols = NFC_PROTO_NFC_DEP_MASK; break; default: return -EPROTO; } return 0; } static int microread_complete_target_discovered(struct nfc_hci_dev *hdev, u8 gate, struct nfc_target *target) { return 0; } #define MICROREAD_CB_TYPE_READER_ALL 1 static void microread_im_transceive_cb(void *context, struct sk_buff *skb, int err) { const struct microread_info *info = context; switch (info->async_cb_type) { case MICROREAD_CB_TYPE_READER_ALL: if (err == 0) { if (skb->len == 0) { kfree_skb(skb); info->async_cb(info->async_cb_context, NULL, -EPROTO); return; } if (skb->data[skb->len - 1] != 0) { err = nfc_hci_result_to_errno( skb->data[skb->len - 1]); kfree_skb(skb); info->async_cb(info->async_cb_context, NULL, err); return; } skb_trim(skb, skb->len - 1); /* RF Error ind. */ } info->async_cb(info->async_cb_context, skb, err); break; default: if (err == 0) kfree_skb(skb); break; } } /* * Returns: * <= 0: driver handled the data exchange * 1: driver doesn't especially handle, please do standard processing */ static int microread_im_transceive(struct nfc_hci_dev *hdev, struct nfc_target *target, struct sk_buff *skb, data_exchange_cb_t cb, void *cb_context) { struct microread_info *info = nfc_hci_get_clientdata(hdev); u8 control_bits; u16 crc; pr_info("data exchange to gate 0x%x\n", target->hci_reader_gate); if (target->hci_reader_gate == MICROREAD_GATE_ID_P2P_INITIATOR) { *(u8 *)skb_push(skb, 1) = 0; return nfc_hci_send_event(hdev, target->hci_reader_gate, MICROREAD_EVT_P2P_INITIATOR_EXCHANGE_TO_RF, skb->data, skb->len); } switch (target->hci_reader_gate) { case MICROREAD_GATE_ID_MREAD_ISO_A: control_bits = 0xCB; break; case MICROREAD_GATE_ID_MREAD_ISO_A_3: control_bits = 0xCB; break; case MICROREAD_GATE_ID_MREAD_ISO_B: control_bits = 0xCB; break; case MICROREAD_GATE_ID_MREAD_NFC_T1: control_bits = 0x1B; crc = crc_ccitt(0xffff, skb->data, skb->len); crc = ~crc; skb_put_u8(skb, crc & 0xff); skb_put_u8(skb, crc >> 8); break; case MICROREAD_GATE_ID_MREAD_NFC_T3: control_bits = 0xDB; break; default: pr_info("Abort im_transceive to invalid gate 0x%x\n", target->hci_reader_gate); return 1; } *(u8 *)skb_push(skb, 1) = control_bits; info->async_cb_type = MICROREAD_CB_TYPE_READER_ALL; info->async_cb = cb; info->async_cb_context = cb_context; return nfc_hci_send_cmd_async(hdev, target->hci_reader_gate, MICROREAD_CMD_MREAD_EXCHANGE, skb->data, skb->len, microread_im_transceive_cb, info); } static int microread_tm_send(struct nfc_hci_dev *hdev, struct sk_buff *skb) { int r; r = nfc_hci_send_event(hdev, MICROREAD_GATE_ID_P2P_TARGET, MICROREAD_EVT_MCARD_EXCHANGE, skb->data, skb->len); kfree_skb(skb); return r; } static void microread_target_discovered(struct nfc_hci_dev *hdev, u8 gate, struct sk_buff *skb) { struct nfc_target *targets; int r = 0; pr_info("target discovered to gate 0x%x\n", gate); targets = kzalloc(sizeof(struct nfc_target), GFP_KERNEL); if (targets == NULL) { r = -ENOMEM; goto exit; } targets->hci_reader_gate = gate; switch (gate) { case MICROREAD_GATE_ID_MREAD_ISO_A: targets->supported_protocols = nfc_hci_sak_to_protocol(skb->data[MICROREAD_EMCF_A_SAK]); targets->sens_res = be16_to_cpu(*(u16 *)&skb->data[MICROREAD_EMCF_A_ATQA]); targets->sel_res = skb->data[MICROREAD_EMCF_A_SAK]; targets->nfcid1_len = skb->data[MICROREAD_EMCF_A_LEN]; if (targets->nfcid1_len > sizeof(targets->nfcid1)) { r = -EINVAL; goto exit_free; } memcpy(targets->nfcid1, &skb->data[MICROREAD_EMCF_A_UID], targets->nfcid1_len); break; case MICROREAD_GATE_ID_MREAD_ISO_A_3: targets->supported_protocols = nfc_hci_sak_to_protocol(skb->data[MICROREAD_EMCF_A3_SAK]); targets->sens_res = be16_to_cpu(*(u16 *)&skb->data[MICROREAD_EMCF_A3_ATQA]); targets->sel_res = skb->data[MICROREAD_EMCF_A3_SAK]; targets->nfcid1_len = skb->data[MICROREAD_EMCF_A3_LEN]; if (targets->nfcid1_len > sizeof(targets->nfcid1)) { r = -EINVAL; goto exit_free; } memcpy(targets->nfcid1, &skb->data[MICROREAD_EMCF_A3_UID], targets->nfcid1_len); break; case MICROREAD_GATE_ID_MREAD_ISO_B: targets->supported_protocols = NFC_PROTO_ISO14443_B_MASK; memcpy(targets->nfcid1, &skb->data[MICROREAD_EMCF_B_UID], 4); targets->nfcid1_len = 4; break; case MICROREAD_GATE_ID_MREAD_NFC_T1: targets->supported_protocols = NFC_PROTO_JEWEL_MASK; targets->sens_res = le16_to_cpu(*(u16 *)&skb->data[MICROREAD_EMCF_T1_ATQA]); memcpy(targets->nfcid1, &skb->data[MICROREAD_EMCF_T1_UID], 4); targets->nfcid1_len = 4; break; case MICROREAD_GATE_ID_MREAD_NFC_T3: targets->supported_protocols = NFC_PROTO_FELICA_MASK; memcpy(targets->nfcid1, &skb->data[MICROREAD_EMCF_T3_UID], 8); targets->nfcid1_len = 8; break; default: pr_info("discard target discovered to gate 0x%x\n", gate); goto exit_free; } r = nfc_targets_found(hdev->ndev, targets, 1); exit_free: kfree(targets); exit: kfree_skb(skb); if (r) pr_err("Failed to handle discovered target err=%d\n", r); } static int microread_event_received(struct nfc_hci_dev *hdev, u8 pipe, u8 event, struct sk_buff *skb) { int r; u8 gate = hdev->pipes[pipe].gate; u8 mode; pr_info("Microread received event 0x%x to gate 0x%x\n", event, gate); switch (event) { case MICROREAD_EVT_MREAD_CARD_FOUND: microread_target_discovered(hdev, gate, skb); return 0; case MICROREAD_EVT_P2P_INITIATOR_EXCHANGE_FROM_RF: if (skb->len < 1) { kfree_skb(skb); return -EPROTO; } if (skb->data[skb->len - 1]) { kfree_skb(skb); return -EIO; } skb_trim(skb, skb->len - 1); r = nfc_tm_data_received(hdev->ndev, skb); break; case MICROREAD_EVT_MCARD_FIELD_ON: case MICROREAD_EVT_MCARD_FIELD_OFF: kfree_skb(skb); return 0; case MICROREAD_EVT_P2P_TARGET_ACTIVATED: r = nfc_tm_activated(hdev->ndev, NFC_PROTO_NFC_DEP_MASK, NFC_COMM_PASSIVE, skb->data, skb->len); kfree_skb(skb); break; case MICROREAD_EVT_MCARD_EXCHANGE: if (skb->len < 1) { kfree_skb(skb); return -EPROTO; } if (skb->data[skb->len-1]) { kfree_skb(skb); return -EIO; } skb_trim(skb, skb->len - 1); r = nfc_tm_data_received(hdev->ndev, skb); break; case MICROREAD_EVT_P2P_TARGET_DEACTIVATED: kfree_skb(skb); mode = 0xff; r = nfc_hci_set_param(hdev, MICROREAD_GATE_ID_P2P_TARGET, MICROREAD_PAR_P2P_TARGET_MODE, &mode, 1); if (r) break; r = nfc_hci_send_event(hdev, gate, MICROREAD_EVT_MREAD_DISCOVERY_STOP, NULL, 0); break; default: return 1; } return r; } static const struct nfc_hci_ops microread_hci_ops = { .open = microread_open, .close = microread_close, .hci_ready = microread_hci_ready, .xmit = microread_xmit, .start_poll = microread_start_poll, .dep_link_up = microread_dep_link_up, .dep_link_down = microread_dep_link_down, .target_from_gate = microread_target_from_gate, .complete_target_discovered = microread_complete_target_discovered, .im_transceive = microread_im_transceive, .tm_send = microread_tm_send, .check_presence = NULL, .event_received = microread_event_received, }; int microread_probe(void *phy_id, const struct nfc_phy_ops *phy_ops, const char *llc_name, int phy_headroom, int phy_tailroom, int phy_payload, struct nfc_hci_dev **hdev) { struct microread_info *info; unsigned long quirks = 0; u32 protocols; struct nfc_hci_init_data init_data; int r; info = kzalloc(sizeof(struct microread_info), GFP_KERNEL); if (!info) { r = -ENOMEM; goto err_info_alloc; } info->phy_ops = phy_ops; info->phy_id = phy_id; init_data.gate_count = ARRAY_SIZE(microread_gates); memcpy(init_data.gates, microread_gates, sizeof(microread_gates)); strcpy(init_data.session_id, "MICROREA"); set_bit(NFC_HCI_QUIRK_SHORT_CLEAR, &quirks); protocols = NFC_PROTO_JEWEL_MASK | NFC_PROTO_MIFARE_MASK | NFC_PROTO_FELICA_MASK | NFC_PROTO_ISO14443_MASK | NFC_PROTO_ISO14443_B_MASK | NFC_PROTO_NFC_DEP_MASK; info->hdev = nfc_hci_allocate_device(&microread_hci_ops, &init_data, quirks, protocols, llc_name, phy_headroom + MICROREAD_CMDS_HEADROOM, phy_tailroom + MICROREAD_CMD_TAILROOM, phy_payload); if (!info->hdev) { pr_err("Cannot allocate nfc hdev\n"); r = -ENOMEM; goto err_alloc_hdev; } nfc_hci_set_clientdata(info->hdev, info); r = nfc_hci_register_device(info->hdev); if (r) goto err_regdev; *hdev = info->hdev; return 0; err_regdev: nfc_hci_free_device(info->hdev); err_alloc_hdev: kfree(info); err_info_alloc: return r; } EXPORT_SYMBOL(microread_probe); void microread_remove(struct nfc_hci_dev *hdev) { struct microread_info *info = nfc_hci_get_clientdata(hdev); nfc_hci_unregister_device(hdev); nfc_hci_free_device(hdev); kfree(info); } EXPORT_SYMBOL(microread_remove); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION(DRIVER_DESC);
linux-master
drivers/nfc/microread/microread.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (C) 2013 Intel Corporation. All rights reserved. * * HCI based Driver for Inside Secure microread NFC Chip */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/mod_devicetable.h> #include <linux/nfc.h> #include <net/nfc/llc.h> #include "../mei_phy.h" #include "microread.h" #define MICROREAD_DRIVER_NAME "microread" static int microread_mei_probe(struct mei_cl_device *cldev, const struct mei_cl_device_id *id) { struct nfc_mei_phy *phy; int r; phy = nfc_mei_phy_alloc(cldev); if (!phy) return -ENOMEM; r = microread_probe(phy, &mei_phy_ops, LLC_NOP_NAME, MEI_NFC_HEADER_SIZE, 0, MEI_NFC_MAX_HCI_PAYLOAD, &phy->hdev); if (r < 0) { nfc_mei_phy_free(phy); return r; } return 0; } static void microread_mei_remove(struct mei_cl_device *cldev) { struct nfc_mei_phy *phy = mei_cldev_get_drvdata(cldev); microread_remove(phy->hdev); nfc_mei_phy_free(phy); } static struct mei_cl_device_id microread_mei_tbl[] = { { MICROREAD_DRIVER_NAME, MEI_NFC_UUID, MEI_CL_VERSION_ANY}, /* required last entry */ { } }; MODULE_DEVICE_TABLE(mei, microread_mei_tbl); static struct mei_cl_driver microread_driver = { .id_table = microread_mei_tbl, .name = MICROREAD_DRIVER_NAME, .probe = microread_mei_probe, .remove = microread_mei_remove, }; module_mei_cl_driver(microread_driver); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION(DRIVER_DESC);
linux-master
drivers/nfc/microread/mei.c
// SPDX-License-Identifier: GPL-2.0-only /* * HCI based Driver for Inside Secure microread NFC Chip - i2c layer * * Copyright (C) 2013 Intel Corporation. All rights reserved. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/i2c.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/gpio.h> #include <linux/nfc.h> #include <net/nfc/hci.h> #include <net/nfc/llc.h> #include "microread.h" #define MICROREAD_I2C_DRIVER_NAME "microread" #define MICROREAD_I2C_FRAME_HEADROOM 1 #define MICROREAD_I2C_FRAME_TAILROOM 1 /* framing in HCI mode */ #define MICROREAD_I2C_LLC_LEN 1 #define MICROREAD_I2C_LLC_CRC 1 #define MICROREAD_I2C_LLC_LEN_CRC (MICROREAD_I2C_LLC_LEN + \ MICROREAD_I2C_LLC_CRC) #define MICROREAD_I2C_LLC_MIN_SIZE (1 + MICROREAD_I2C_LLC_LEN_CRC) #define MICROREAD_I2C_LLC_MAX_PAYLOAD 29 #define MICROREAD_I2C_LLC_MAX_SIZE (MICROREAD_I2C_LLC_LEN_CRC + 1 + \ MICROREAD_I2C_LLC_MAX_PAYLOAD) struct microread_i2c_phy { struct i2c_client *i2c_dev; struct nfc_hci_dev *hdev; int hard_fault; /* * < 0 if hardware error occured (e.g. i2c err) * and prevents normal operation. */ }; #define I2C_DUMP_SKB(info, skb) \ do { \ pr_debug("%s:\n", info); \ print_hex_dump(KERN_DEBUG, "i2c: ", DUMP_PREFIX_OFFSET, \ 16, 1, (skb)->data, (skb)->len, 0); \ } while (0) static void microread_i2c_add_len_crc(struct sk_buff *skb) { int i; u8 crc = 0; int len; len = skb->len; *(u8 *)skb_push(skb, 1) = len; for (i = 0; i < skb->len; i++) crc = crc ^ skb->data[i]; skb_put_u8(skb, crc); } static void microread_i2c_remove_len_crc(struct sk_buff *skb) { skb_pull(skb, MICROREAD_I2C_FRAME_HEADROOM); skb_trim(skb, MICROREAD_I2C_FRAME_TAILROOM); } static int check_crc(const struct sk_buff *skb) { int i; u8 crc = 0; for (i = 0; i < skb->len - 1; i++) crc = crc ^ skb->data[i]; if (crc != skb->data[skb->len-1]) { pr_err("CRC error 0x%x != 0x%x\n", crc, skb->data[skb->len-1]); pr_info("%s: BAD CRC\n", __func__); return -EPERM; } return 0; } static int microread_i2c_enable(void *phy_id) { return 0; } static void microread_i2c_disable(void *phy_id) { return; } static int microread_i2c_write(void *phy_id, struct sk_buff *skb) { int r; struct microread_i2c_phy *phy = phy_id; struct i2c_client *client = phy->i2c_dev; if (phy->hard_fault != 0) return phy->hard_fault; usleep_range(3000, 6000); microread_i2c_add_len_crc(skb); I2C_DUMP_SKB("i2c frame written", skb); r = i2c_master_send(client, skb->data, skb->len); if (r == -EREMOTEIO) { /* Retry, chip was in standby */ usleep_range(6000, 10000); r = i2c_master_send(client, skb->data, skb->len); } if (r >= 0) { if (r != skb->len) r = -EREMOTEIO; else r = 0; } microread_i2c_remove_len_crc(skb); return r; } static int microread_i2c_read(struct microread_i2c_phy *phy, struct sk_buff **skb) { int r; u8 len; u8 tmp[MICROREAD_I2C_LLC_MAX_SIZE - 1]; struct i2c_client *client = phy->i2c_dev; r = i2c_master_recv(client, &len, 1); if (r != 1) { nfc_err(&client->dev, "cannot read len byte\n"); return -EREMOTEIO; } if ((len < MICROREAD_I2C_LLC_MIN_SIZE) || (len > MICROREAD_I2C_LLC_MAX_SIZE)) { nfc_err(&client->dev, "invalid len byte\n"); r = -EBADMSG; goto flush; } *skb = alloc_skb(1 + len, GFP_KERNEL); if (*skb == NULL) { r = -ENOMEM; goto flush; } skb_put_u8(*skb, len); r = i2c_master_recv(client, skb_put(*skb, len), len); if (r != len) { kfree_skb(*skb); return -EREMOTEIO; } I2C_DUMP_SKB("cc frame read", *skb); r = check_crc(*skb); if (r != 0) { kfree_skb(*skb); r = -EBADMSG; goto flush; } skb_pull(*skb, 1); skb_trim(*skb, (*skb)->len - MICROREAD_I2C_FRAME_TAILROOM); usleep_range(3000, 6000); return 0; flush: if (i2c_master_recv(client, tmp, sizeof(tmp)) < 0) r = -EREMOTEIO; usleep_range(3000, 6000); return r; } static irqreturn_t microread_i2c_irq_thread_fn(int irq, void *phy_id) { struct microread_i2c_phy *phy = phy_id; struct sk_buff *skb = NULL; int r; if (!phy || irq != phy->i2c_dev->irq) { WARN_ON_ONCE(1); return IRQ_NONE; } if (phy->hard_fault != 0) return IRQ_HANDLED; r = microread_i2c_read(phy, &skb); if (r == -EREMOTEIO) { phy->hard_fault = r; nfc_hci_recv_frame(phy->hdev, NULL); return IRQ_HANDLED; } else if ((r == -ENOMEM) || (r == -EBADMSG)) { return IRQ_HANDLED; } nfc_hci_recv_frame(phy->hdev, skb); return IRQ_HANDLED; } static const struct nfc_phy_ops i2c_phy_ops = { .write = microread_i2c_write, .enable = microread_i2c_enable, .disable = microread_i2c_disable, }; static int microread_i2c_probe(struct i2c_client *client) { struct microread_i2c_phy *phy; int r; phy = devm_kzalloc(&client->dev, sizeof(struct microread_i2c_phy), GFP_KERNEL); if (!phy) return -ENOMEM; i2c_set_clientdata(client, phy); phy->i2c_dev = client; r = request_threaded_irq(client->irq, NULL, microread_i2c_irq_thread_fn, IRQF_TRIGGER_RISING | IRQF_ONESHOT, MICROREAD_I2C_DRIVER_NAME, phy); if (r) { nfc_err(&client->dev, "Unable to register IRQ handler\n"); return r; } r = microread_probe(phy, &i2c_phy_ops, LLC_SHDLC_NAME, MICROREAD_I2C_FRAME_HEADROOM, MICROREAD_I2C_FRAME_TAILROOM, MICROREAD_I2C_LLC_MAX_PAYLOAD, &phy->hdev); if (r < 0) goto err_irq; return 0; err_irq: free_irq(client->irq, phy); return r; } static void microread_i2c_remove(struct i2c_client *client) { struct microread_i2c_phy *phy = i2c_get_clientdata(client); microread_remove(phy->hdev); free_irq(client->irq, phy); } static const struct i2c_device_id microread_i2c_id[] = { { MICROREAD_I2C_DRIVER_NAME, 0}, { } }; MODULE_DEVICE_TABLE(i2c, microread_i2c_id); static struct i2c_driver microread_i2c_driver = { .driver = { .name = MICROREAD_I2C_DRIVER_NAME, }, .probe = microread_i2c_probe, .remove = microread_i2c_remove, .id_table = microread_i2c_id, }; module_i2c_driver(microread_i2c_driver); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION(DRIVER_DESC);
linux-master
drivers/nfc/microread/i2c.c
// SPDX-License-Identifier: GPL-2.0-or-later /* ------------------------------------------------------------------------- * Copyright (C) 2014-2016, Intel Corporation * * ------------------------------------------------------------------------- */ #include <linux/module.h> #include <linux/nfc.h> #include <linux/i2c.h> #include <linux/delay.h> #include <linux/firmware.h> #include <net/nfc/nci_core.h> #include "fdp.h" #define FDP_OTP_PATCH_NAME "otp.bin" #define FDP_RAM_PATCH_NAME "ram.bin" #define FDP_FW_HEADER_SIZE 576 #define FDP_FW_UPDATE_SLEEP 1000 #define NCI_GET_VERSION_TIMEOUT 8000 #define NCI_PATCH_REQUEST_TIMEOUT 8000 #define FDP_PATCH_CONN_DEST 0xC2 #define FDP_PATCH_CONN_PARAM_TYPE 0xA0 #define NCI_PATCH_TYPE_RAM 0x00 #define NCI_PATCH_TYPE_OTP 0x01 #define NCI_PATCH_TYPE_EOT 0xFF #define NCI_PARAM_ID_FW_RAM_VERSION 0xA0 #define NCI_PARAM_ID_FW_OTP_VERSION 0xA1 #define NCI_PARAM_ID_OTP_LIMITED_VERSION 0xC5 #define NCI_PARAM_ID_KEY_INDEX_ID 0xC6 #define NCI_GID_PROP 0x0F #define NCI_OP_PROP_PATCH_OID 0x08 #define NCI_OP_PROP_SET_PDATA_OID 0x23 struct fdp_nci_info { const struct nfc_phy_ops *phy_ops; struct fdp_i2c_phy *phy; struct nci_dev *ndev; const struct firmware *otp_patch; const struct firmware *ram_patch; u32 otp_patch_version; u32 ram_patch_version; u32 otp_version; u32 ram_version; u32 limited_otp_version; u8 key_index; const u8 *fw_vsc_cfg; u8 clock_type; u32 clock_freq; atomic_t data_pkt_counter; void (*data_pkt_counter_cb)(struct nci_dev *ndev); u8 setup_patch_sent; u8 setup_patch_ntf; u8 setup_patch_status; u8 setup_reset_ntf; wait_queue_head_t setup_wq; }; static const u8 nci_core_get_config_otp_ram_version[5] = { 0x04, NCI_PARAM_ID_FW_RAM_VERSION, NCI_PARAM_ID_FW_OTP_VERSION, NCI_PARAM_ID_OTP_LIMITED_VERSION, NCI_PARAM_ID_KEY_INDEX_ID }; struct nci_core_get_config_rsp { u8 status; u8 count; u8 data[]; }; static int fdp_nci_create_conn(struct nci_dev *ndev) { struct fdp_nci_info *info = nci_get_drvdata(ndev); struct core_conn_create_dest_spec_params param; int r; /* proprietary destination specific paramerer without value */ param.type = FDP_PATCH_CONN_PARAM_TYPE; param.length = 0x00; r = nci_core_conn_create(info->ndev, FDP_PATCH_CONN_DEST, 1, sizeof(param), &param); if (r) return r; return nci_get_conn_info_by_dest_type_params(ndev, FDP_PATCH_CONN_DEST, NULL); } static inline int fdp_nci_get_versions(struct nci_dev *ndev) { return nci_core_cmd(ndev, NCI_OP_CORE_GET_CONFIG_CMD, sizeof(nci_core_get_config_otp_ram_version), (__u8 *) &nci_core_get_config_otp_ram_version); } static inline int fdp_nci_patch_cmd(struct nci_dev *ndev, u8 type) { return nci_prop_cmd(ndev, NCI_OP_PROP_PATCH_OID, sizeof(type), &type); } static inline int fdp_nci_set_production_data(struct nci_dev *ndev, u8 len, const char *data) { return nci_prop_cmd(ndev, NCI_OP_PROP_SET_PDATA_OID, len, data); } static int fdp_nci_set_clock(struct nci_dev *ndev, u8 clock_type, u32 clock_freq) { u32 fc = 13560; u32 nd, num, delta; char data[9]; nd = (24 * fc) / clock_freq; delta = 24 * fc - nd * clock_freq; num = (32768 * delta) / clock_freq; data[0] = 0x00; data[1] = 0x00; data[2] = 0x00; data[3] = 0x10; data[4] = 0x04; data[5] = num & 0xFF; data[6] = (num >> 8) & 0xff; data[7] = nd; data[8] = clock_type; return fdp_nci_set_production_data(ndev, 9, data); } static void fdp_nci_send_patch_cb(struct nci_dev *ndev) { struct fdp_nci_info *info = nci_get_drvdata(ndev); info->setup_patch_sent = 1; wake_up(&info->setup_wq); } /* * Register a packet sent counter and a callback * * We have no other way of knowing when all firmware packets were sent out * on the i2c bus. We need to know that in order to close the connection and * send the patch end message. */ static void fdp_nci_set_data_pkt_counter(struct nci_dev *ndev, void (*cb)(struct nci_dev *ndev), int count) { struct fdp_nci_info *info = nci_get_drvdata(ndev); struct device *dev = &info->phy->i2c_dev->dev; dev_dbg(dev, "NCI data pkt counter %d\n", count); atomic_set(&info->data_pkt_counter, count); info->data_pkt_counter_cb = cb; } /* * The device is expecting a stream of packets. All packets need to * have the PBF flag set to 0x0 (last packet) even if the firmware * file is segmented and there are multiple packets. If we give the * whole firmware to nci_send_data it will segment it and it will set * the PBF flag to 0x01 so we need to do the segmentation here. * * The firmware will be analyzed and applied when we send NCI_OP_PROP_PATCH_CMD * command with NCI_PATCH_TYPE_EOT parameter. The device will send a * NFCC_PATCH_NTF packet and a NCI_OP_CORE_RESET_NTF packet. */ static int fdp_nci_send_patch(struct nci_dev *ndev, u8 conn_id, u8 type) { struct fdp_nci_info *info = nci_get_drvdata(ndev); const struct firmware *fw; struct sk_buff *skb; unsigned long len; int max_size, payload_size; int rc = 0; if ((type == NCI_PATCH_TYPE_OTP && !info->otp_patch) || (type == NCI_PATCH_TYPE_RAM && !info->ram_patch)) return -EINVAL; if (type == NCI_PATCH_TYPE_OTP) fw = info->otp_patch; else fw = info->ram_patch; max_size = nci_conn_max_data_pkt_payload_size(ndev, conn_id); if (max_size <= 0) return -EINVAL; len = fw->size; fdp_nci_set_data_pkt_counter(ndev, fdp_nci_send_patch_cb, DIV_ROUND_UP(fw->size, max_size)); while (len) { payload_size = min_t(unsigned long, max_size, len); skb = nci_skb_alloc(ndev, (NCI_CTRL_HDR_SIZE + payload_size), GFP_KERNEL); if (!skb) { fdp_nci_set_data_pkt_counter(ndev, NULL, 0); return -ENOMEM; } skb_reserve(skb, NCI_CTRL_HDR_SIZE); skb_put_data(skb, fw->data + (fw->size - len), payload_size); rc = nci_send_data(ndev, conn_id, skb); if (rc) { fdp_nci_set_data_pkt_counter(ndev, NULL, 0); return rc; } len -= payload_size; } return rc; } static int fdp_nci_open(struct nci_dev *ndev) { const struct fdp_nci_info *info = nci_get_drvdata(ndev); return info->phy_ops->enable(info->phy); } static int fdp_nci_close(struct nci_dev *ndev) { return 0; } static int fdp_nci_send(struct nci_dev *ndev, struct sk_buff *skb) { struct fdp_nci_info *info = nci_get_drvdata(ndev); int ret; if (atomic_dec_and_test(&info->data_pkt_counter)) info->data_pkt_counter_cb(ndev); ret = info->phy_ops->write(info->phy, skb); if (ret < 0) { kfree_skb(skb); return ret; } consume_skb(skb); return 0; } static int fdp_nci_request_firmware(struct nci_dev *ndev) { struct fdp_nci_info *info = nci_get_drvdata(ndev); struct device *dev = &info->phy->i2c_dev->dev; const u8 *data; int r; r = request_firmware(&info->ram_patch, FDP_RAM_PATCH_NAME, dev); if (r < 0) { nfc_err(dev, "RAM patch request error\n"); return r; } data = info->ram_patch->data; info->ram_patch_version = data[FDP_FW_HEADER_SIZE] | (data[FDP_FW_HEADER_SIZE + 1] << 8) | (data[FDP_FW_HEADER_SIZE + 2] << 16) | (data[FDP_FW_HEADER_SIZE + 3] << 24); dev_dbg(dev, "RAM patch version: %d, size: %zu\n", info->ram_patch_version, info->ram_patch->size); r = request_firmware(&info->otp_patch, FDP_OTP_PATCH_NAME, dev); if (r < 0) { nfc_err(dev, "OTP patch request error\n"); return 0; } data = (u8 *) info->otp_patch->data; info->otp_patch_version = data[FDP_FW_HEADER_SIZE] | (data[FDP_FW_HEADER_SIZE + 1] << 8) | (data[FDP_FW_HEADER_SIZE+2] << 16) | (data[FDP_FW_HEADER_SIZE+3] << 24); dev_dbg(dev, "OTP patch version: %d, size: %zu\n", info->otp_patch_version, info->otp_patch->size); return 0; } static void fdp_nci_release_firmware(struct nci_dev *ndev) { struct fdp_nci_info *info = nci_get_drvdata(ndev); if (info->otp_patch) { release_firmware(info->otp_patch); info->otp_patch = NULL; } if (info->ram_patch) { release_firmware(info->ram_patch); info->ram_patch = NULL; } } static int fdp_nci_patch_otp(struct nci_dev *ndev) { struct fdp_nci_info *info = nci_get_drvdata(ndev); struct device *dev = &info->phy->i2c_dev->dev; int conn_id; int r = 0; if (info->otp_version >= info->otp_patch_version) return r; info->setup_patch_sent = 0; info->setup_reset_ntf = 0; info->setup_patch_ntf = 0; /* Patch init request */ r = fdp_nci_patch_cmd(ndev, NCI_PATCH_TYPE_OTP); if (r) return r; /* Patch data connection creation */ conn_id = fdp_nci_create_conn(ndev); if (conn_id < 0) return conn_id; /* Send the patch over the data connection */ r = fdp_nci_send_patch(ndev, conn_id, NCI_PATCH_TYPE_OTP); if (r) return r; /* Wait for all the packets to be send over i2c */ wait_event_interruptible(info->setup_wq, info->setup_patch_sent == 1); /* make sure that the NFCC processed the last data packet */ msleep(FDP_FW_UPDATE_SLEEP); /* Close the data connection */ r = nci_core_conn_close(info->ndev, conn_id); if (r) return r; /* Patch finish message */ if (fdp_nci_patch_cmd(ndev, NCI_PATCH_TYPE_EOT)) { nfc_err(dev, "OTP patch error 0x%x\n", r); return -EINVAL; } /* If the patch notification didn't arrive yet, wait for it */ wait_event_interruptible(info->setup_wq, info->setup_patch_ntf); /* Check if the patching was successful */ r = info->setup_patch_status; if (r) { nfc_err(dev, "OTP patch error 0x%x\n", r); return -EINVAL; } /* * We need to wait for the reset notification before we * can continue */ wait_event_interruptible(info->setup_wq, info->setup_reset_ntf); return r; } static int fdp_nci_patch_ram(struct nci_dev *ndev) { struct fdp_nci_info *info = nci_get_drvdata(ndev); struct device *dev = &info->phy->i2c_dev->dev; int conn_id; int r = 0; if (info->ram_version >= info->ram_patch_version) return r; info->setup_patch_sent = 0; info->setup_reset_ntf = 0; info->setup_patch_ntf = 0; /* Patch init request */ r = fdp_nci_patch_cmd(ndev, NCI_PATCH_TYPE_RAM); if (r) return r; /* Patch data connection creation */ conn_id = fdp_nci_create_conn(ndev); if (conn_id < 0) return conn_id; /* Send the patch over the data connection */ r = fdp_nci_send_patch(ndev, conn_id, NCI_PATCH_TYPE_RAM); if (r) return r; /* Wait for all the packets to be send over i2c */ wait_event_interruptible(info->setup_wq, info->setup_patch_sent == 1); /* make sure that the NFCC processed the last data packet */ msleep(FDP_FW_UPDATE_SLEEP); /* Close the data connection */ r = nci_core_conn_close(info->ndev, conn_id); if (r) return r; /* Patch finish message */ if (fdp_nci_patch_cmd(ndev, NCI_PATCH_TYPE_EOT)) { nfc_err(dev, "RAM patch error 0x%x\n", r); return -EINVAL; } /* If the patch notification didn't arrive yet, wait for it */ wait_event_interruptible(info->setup_wq, info->setup_patch_ntf); /* Check if the patching was successful */ r = info->setup_patch_status; if (r) { nfc_err(dev, "RAM patch error 0x%x\n", r); return -EINVAL; } /* * We need to wait for the reset notification before we * can continue */ wait_event_interruptible(info->setup_wq, info->setup_reset_ntf); return r; } static int fdp_nci_setup(struct nci_dev *ndev) { /* Format: total length followed by an NCI packet */ struct fdp_nci_info *info = nci_get_drvdata(ndev); struct device *dev = &info->phy->i2c_dev->dev; int r; u8 patched = 0; r = nci_core_init(ndev); if (r) goto error; /* Get RAM and OTP version */ r = fdp_nci_get_versions(ndev); if (r) goto error; /* Load firmware from disk */ r = fdp_nci_request_firmware(ndev); if (r) goto error; /* Update OTP */ if (info->otp_version < info->otp_patch_version) { r = fdp_nci_patch_otp(ndev); if (r) goto error; patched = 1; } /* Update RAM */ if (info->ram_version < info->ram_patch_version) { r = fdp_nci_patch_ram(ndev); if (r) goto error; patched = 1; } /* Release the firmware buffers */ fdp_nci_release_firmware(ndev); /* If a patch was applied the new version is checked */ if (patched) { r = nci_core_init(ndev); if (r) goto error; r = fdp_nci_get_versions(ndev); if (r) goto error; if (info->otp_version != info->otp_patch_version || info->ram_version != info->ram_patch_version) { nfc_err(dev, "Firmware update failed"); r = -EINVAL; goto error; } } /* * We initialized the devices but the NFC subsystem expects * it to not be initialized. */ return nci_core_reset(ndev); error: fdp_nci_release_firmware(ndev); nfc_err(dev, "Setup error %d\n", r); return r; } static int fdp_nci_post_setup(struct nci_dev *ndev) { struct fdp_nci_info *info = nci_get_drvdata(ndev); struct device *dev = &info->phy->i2c_dev->dev; int r; /* Check if the device has VSC */ if (info->fw_vsc_cfg && info->fw_vsc_cfg[0]) { /* Set the vendor specific configuration */ r = fdp_nci_set_production_data(ndev, info->fw_vsc_cfg[3], &info->fw_vsc_cfg[4]); if (r) { nfc_err(dev, "Vendor specific config set error %d\n", r); return r; } } /* Set clock type and frequency */ r = fdp_nci_set_clock(ndev, info->clock_type, info->clock_freq); if (r) { nfc_err(dev, "Clock set error %d\n", r); return r; } /* * In order to apply the VSC FDP needs a reset */ r = nci_core_reset(ndev); if (r) return r; /** * The nci core was initialized when post setup was called * so we leave it like that */ return nci_core_init(ndev); } static int fdp_nci_core_reset_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb) { struct fdp_nci_info *info = nci_get_drvdata(ndev); info->setup_reset_ntf = 1; wake_up(&info->setup_wq); return 0; } static int fdp_nci_prop_patch_ntf_packet(struct nci_dev *ndev, struct sk_buff *skb) { struct fdp_nci_info *info = nci_get_drvdata(ndev); info->setup_patch_ntf = 1; info->setup_patch_status = skb->data[0]; wake_up(&info->setup_wq); return 0; } static int fdp_nci_prop_patch_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb) { struct fdp_nci_info *info = nci_get_drvdata(ndev); struct device *dev = &info->phy->i2c_dev->dev; u8 status = skb->data[0]; dev_dbg(dev, "%s: status 0x%x\n", __func__, status); nci_req_complete(ndev, status); return 0; } static int fdp_nci_prop_set_production_data_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb) { struct fdp_nci_info *info = nci_get_drvdata(ndev); struct device *dev = &info->phy->i2c_dev->dev; u8 status = skb->data[0]; dev_dbg(dev, "%s: status 0x%x\n", __func__, status); nci_req_complete(ndev, status); return 0; } static int fdp_nci_core_get_config_rsp_packet(struct nci_dev *ndev, struct sk_buff *skb) { struct fdp_nci_info *info = nci_get_drvdata(ndev); struct device *dev = &info->phy->i2c_dev->dev; const struct nci_core_get_config_rsp *rsp = (void *) skb->data; unsigned int i; const u8 *p; if (rsp->status == NCI_STATUS_OK) { p = rsp->data; for (i = 0; i < 4; i++) { switch (*p++) { case NCI_PARAM_ID_FW_RAM_VERSION: p++; info->ram_version = le32_to_cpup((__le32 *) p); p += 4; break; case NCI_PARAM_ID_FW_OTP_VERSION: p++; info->otp_version = le32_to_cpup((__le32 *) p); p += 4; break; case NCI_PARAM_ID_OTP_LIMITED_VERSION: p++; info->otp_version = le32_to_cpup((__le32 *) p); p += 4; break; case NCI_PARAM_ID_KEY_INDEX_ID: p++; info->key_index = *p++; } } } dev_dbg(dev, "OTP version %d\n", info->otp_version); dev_dbg(dev, "RAM version %d\n", info->ram_version); dev_dbg(dev, "key index %d\n", info->key_index); dev_dbg(dev, "%s: status 0x%x\n", __func__, rsp->status); nci_req_complete(ndev, rsp->status); return 0; } static const struct nci_driver_ops fdp_core_ops[] = { { .opcode = NCI_OP_CORE_GET_CONFIG_RSP, .rsp = fdp_nci_core_get_config_rsp_packet, }, { .opcode = NCI_OP_CORE_RESET_NTF, .ntf = fdp_nci_core_reset_ntf_packet, }, }; static const struct nci_driver_ops fdp_prop_ops[] = { { .opcode = nci_opcode_pack(NCI_GID_PROP, NCI_OP_PROP_PATCH_OID), .rsp = fdp_nci_prop_patch_rsp_packet, .ntf = fdp_nci_prop_patch_ntf_packet, }, { .opcode = nci_opcode_pack(NCI_GID_PROP, NCI_OP_PROP_SET_PDATA_OID), .rsp = fdp_nci_prop_set_production_data_rsp_packet, }, }; static const struct nci_ops nci_ops = { .open = fdp_nci_open, .close = fdp_nci_close, .send = fdp_nci_send, .setup = fdp_nci_setup, .post_setup = fdp_nci_post_setup, .prop_ops = fdp_prop_ops, .n_prop_ops = ARRAY_SIZE(fdp_prop_ops), .core_ops = fdp_core_ops, .n_core_ops = ARRAY_SIZE(fdp_core_ops), }; int fdp_nci_probe(struct fdp_i2c_phy *phy, const struct nfc_phy_ops *phy_ops, struct nci_dev **ndevp, int tx_headroom, int tx_tailroom, u8 clock_type, u32 clock_freq, const u8 *fw_vsc_cfg) { struct device *dev = &phy->i2c_dev->dev; struct fdp_nci_info *info; struct nci_dev *ndev; u32 protocols; int r; info = devm_kzalloc(dev, sizeof(struct fdp_nci_info), GFP_KERNEL); if (!info) return -ENOMEM; info->phy = phy; info->phy_ops = phy_ops; info->clock_type = clock_type; info->clock_freq = clock_freq; info->fw_vsc_cfg = fw_vsc_cfg; init_waitqueue_head(&info->setup_wq); protocols = NFC_PROTO_JEWEL_MASK | NFC_PROTO_MIFARE_MASK | NFC_PROTO_FELICA_MASK | NFC_PROTO_ISO14443_MASK | NFC_PROTO_ISO14443_B_MASK | NFC_PROTO_NFC_DEP_MASK | NFC_PROTO_ISO15693_MASK; BUILD_BUG_ON(ARRAY_SIZE(fdp_prop_ops) > NCI_MAX_PROPRIETARY_CMD); ndev = nci_allocate_device(&nci_ops, protocols, tx_headroom, tx_tailroom); if (!ndev) { nfc_err(dev, "Cannot allocate nfc ndev\n"); return -ENOMEM; } r = nci_register_device(ndev); if (r) goto err_regdev; *ndevp = ndev; info->ndev = ndev; nci_set_drvdata(ndev, info); return 0; err_regdev: nci_free_device(ndev); return r; } EXPORT_SYMBOL(fdp_nci_probe); void fdp_nci_remove(struct nci_dev *ndev) { nci_unregister_device(ndev); nci_free_device(ndev); } EXPORT_SYMBOL(fdp_nci_remove); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("NFC NCI driver for Intel Fields Peak NFC controller"); MODULE_AUTHOR("Robert Dolca <[email protected]>"); MODULE_FIRMWARE(FDP_OTP_PATCH_NAME); MODULE_FIRMWARE(FDP_RAM_PATCH_NAME);
linux-master
drivers/nfc/fdp/fdp.c
// SPDX-License-Identifier: GPL-2.0-or-later /* ------------------------------------------------------------------------- * Copyright (C) 2014-2016, Intel Corporation * * ------------------------------------------------------------------------- */ #include <linux/module.h> #include <linux/acpi.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/nfc.h> #include <linux/delay.h> #include <linux/gpio/consumer.h> #include <net/nfc/nfc.h> #include <net/nfc/nci_core.h> #include "fdp.h" #define FDP_I2C_DRIVER_NAME "fdp_nci_i2c" #define FDP_DP_CLOCK_TYPE_NAME "clock-type" #define FDP_DP_CLOCK_FREQ_NAME "clock-freq" #define FDP_DP_FW_VSC_CFG_NAME "fw-vsc-cfg" #define FDP_FRAME_HEADROOM 2 #define FDP_FRAME_TAILROOM 1 #define FDP_NCI_I2C_MIN_PAYLOAD 5 #define FDP_NCI_I2C_MAX_PAYLOAD 261 #define FDP_POWER_OFF 0 #define FDP_POWER_ON 1 #define fdp_nci_i2c_dump_skb(dev, prefix, skb) \ print_hex_dump(KERN_DEBUG, prefix": ", DUMP_PREFIX_OFFSET, \ 16, 1, (skb)->data, (skb)->len, 0) static void fdp_nci_i2c_reset(const struct fdp_i2c_phy *phy) { /* Reset RST/WakeUP for at least 100 micro-second */ gpiod_set_value_cansleep(phy->power_gpio, FDP_POWER_OFF); usleep_range(1000, 4000); gpiod_set_value_cansleep(phy->power_gpio, FDP_POWER_ON); usleep_range(10000, 14000); } static int fdp_nci_i2c_enable(void *phy_id) { const struct fdp_i2c_phy *phy = phy_id; fdp_nci_i2c_reset(phy); return 0; } static void fdp_nci_i2c_disable(void *phy_id) { const struct fdp_i2c_phy *phy = phy_id; fdp_nci_i2c_reset(phy); } static void fdp_nci_i2c_add_len_lrc(struct sk_buff *skb) { u8 lrc = 0; u16 len, i; /* Add length header */ len = skb->len; *(u8 *)skb_push(skb, 1) = len & 0xff; *(u8 *)skb_push(skb, 1) = len >> 8; /* Compute and add lrc */ for (i = 0; i < len + 2; i++) lrc ^= skb->data[i]; skb_put_u8(skb, lrc); } static void fdp_nci_i2c_remove_len_lrc(struct sk_buff *skb) { skb_pull(skb, FDP_FRAME_HEADROOM); skb_trim(skb, skb->len - FDP_FRAME_TAILROOM); } static int fdp_nci_i2c_write(void *phy_id, struct sk_buff *skb) { struct fdp_i2c_phy *phy = phy_id; struct i2c_client *client = phy->i2c_dev; int r; if (phy->hard_fault != 0) return phy->hard_fault; fdp_nci_i2c_add_len_lrc(skb); fdp_nci_i2c_dump_skb(&client->dev, "fdp_wr", skb); r = i2c_master_send(client, skb->data, skb->len); if (r == -EREMOTEIO) { /* Retry, chip was in standby */ usleep_range(1000, 4000); r = i2c_master_send(client, skb->data, skb->len); } if (r < 0 || r != skb->len) dev_dbg(&client->dev, "%s: error err=%d len=%d\n", __func__, r, skb->len); if (r >= 0) { if (r != skb->len) { phy->hard_fault = r; r = -EREMOTEIO; } else { r = 0; } } fdp_nci_i2c_remove_len_lrc(skb); return r; } static const struct nfc_phy_ops i2c_phy_ops = { .write = fdp_nci_i2c_write, .enable = fdp_nci_i2c_enable, .disable = fdp_nci_i2c_disable, }; static int fdp_nci_i2c_read(struct fdp_i2c_phy *phy, struct sk_buff **skb) { int r, len; u8 tmp[FDP_NCI_I2C_MAX_PAYLOAD], lrc, k; u16 i; struct i2c_client *client = phy->i2c_dev; *skb = NULL; /* Read the length packet and the data packet */ for (k = 0; k < 2; k++) { len = phy->next_read_size; r = i2c_master_recv(client, tmp, len); if (r != len) { dev_dbg(&client->dev, "%s: i2c recv err: %d\n", __func__, r); goto flush; } /* Check packet integruty */ for (lrc = i = 0; i < r; i++) lrc ^= tmp[i]; /* * LRC check failed. This may due to transmission error or * desynchronization between driver and FDP. Drop the packet * and force resynchronization */ if (lrc) { dev_dbg(&client->dev, "%s: corrupted packet\n", __func__); phy->next_read_size = 5; goto flush; } /* Packet that contains a length */ if (tmp[0] == 0 && tmp[1] == 0) { phy->next_read_size = (tmp[2] << 8) + tmp[3] + 3; } else { phy->next_read_size = FDP_NCI_I2C_MIN_PAYLOAD; *skb = alloc_skb(len, GFP_KERNEL); if (*skb == NULL) { r = -ENOMEM; goto flush; } skb_put_data(*skb, tmp, len); fdp_nci_i2c_dump_skb(&client->dev, "fdp_rd", *skb); fdp_nci_i2c_remove_len_lrc(*skb); } } return 0; flush: /* Flush the remaining data */ if (i2c_master_recv(client, tmp, sizeof(tmp)) < 0) r = -EREMOTEIO; return r; } static irqreturn_t fdp_nci_i2c_irq_thread_fn(int irq, void *phy_id) { struct fdp_i2c_phy *phy = phy_id; struct sk_buff *skb; int r; if (!phy || irq != phy->i2c_dev->irq) { WARN_ON_ONCE(1); return IRQ_NONE; } r = fdp_nci_i2c_read(phy, &skb); if (r == -EREMOTEIO || r == -ENOMEM || r == -EBADMSG) return IRQ_HANDLED; if (skb != NULL) nci_recv_frame(phy->ndev, skb); return IRQ_HANDLED; } static void fdp_nci_i2c_read_device_properties(struct device *dev, u8 *clock_type, u32 *clock_freq, u8 **fw_vsc_cfg) { int r; u8 len; r = device_property_read_u8(dev, FDP_DP_CLOCK_TYPE_NAME, clock_type); if (r) { dev_dbg(dev, "Using default clock type"); *clock_type = 0; } r = device_property_read_u32(dev, FDP_DP_CLOCK_FREQ_NAME, clock_freq); if (r) { dev_dbg(dev, "Using default clock frequency\n"); *clock_freq = 26000; } if (device_property_present(dev, FDP_DP_FW_VSC_CFG_NAME)) { r = device_property_read_u8(dev, FDP_DP_FW_VSC_CFG_NAME, &len); if (r || len <= 0) goto vsc_read_err; /* Add 1 to the length to inclue the length byte itself */ len++; *fw_vsc_cfg = devm_kmalloc_array(dev, len, sizeof(**fw_vsc_cfg), GFP_KERNEL); if (!*fw_vsc_cfg) goto alloc_err; r = device_property_read_u8_array(dev, FDP_DP_FW_VSC_CFG_NAME, *fw_vsc_cfg, len); if (r) { devm_kfree(dev, *fw_vsc_cfg); goto vsc_read_err; } } else { vsc_read_err: dev_dbg(dev, "FW vendor specific commands not present\n"); *fw_vsc_cfg = NULL; } alloc_err: dev_dbg(dev, "Clock type: %d, clock frequency: %d, VSC: %s", *clock_type, *clock_freq, *fw_vsc_cfg != NULL ? "yes" : "no"); } static const struct acpi_gpio_params power_gpios = { 0, 0, false }; static const struct acpi_gpio_mapping acpi_fdp_gpios[] = { { "power-gpios", &power_gpios, 1 }, {}, }; static int fdp_nci_i2c_probe(struct i2c_client *client) { struct fdp_i2c_phy *phy; struct device *dev = &client->dev; u8 *fw_vsc_cfg; u8 clock_type; u32 clock_freq; int r = 0; if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { nfc_err(dev, "No I2C_FUNC_I2C support\n"); return -ENODEV; } /* Checking if we have an irq */ if (client->irq <= 0) { nfc_err(dev, "IRQ not present\n"); return -ENODEV; } phy = devm_kzalloc(dev, sizeof(struct fdp_i2c_phy), GFP_KERNEL); if (!phy) return -ENOMEM; phy->i2c_dev = client; phy->next_read_size = FDP_NCI_I2C_MIN_PAYLOAD; i2c_set_clientdata(client, phy); r = devm_request_threaded_irq(dev, client->irq, NULL, fdp_nci_i2c_irq_thread_fn, IRQF_TRIGGER_RISING | IRQF_ONESHOT, FDP_I2C_DRIVER_NAME, phy); if (r < 0) { nfc_err(&client->dev, "Unable to register IRQ handler\n"); return r; } r = devm_acpi_dev_add_driver_gpios(dev, acpi_fdp_gpios); if (r) dev_dbg(dev, "Unable to add GPIO mapping table\n"); /* Requesting the power gpio */ phy->power_gpio = devm_gpiod_get(dev, "power", GPIOD_OUT_LOW); if (IS_ERR(phy->power_gpio)) { nfc_err(dev, "Power GPIO request failed\n"); return PTR_ERR(phy->power_gpio); } /* read device properties to get the clock and production settings */ fdp_nci_i2c_read_device_properties(dev, &clock_type, &clock_freq, &fw_vsc_cfg); /* Call the NFC specific probe function */ r = fdp_nci_probe(phy, &i2c_phy_ops, &phy->ndev, FDP_FRAME_HEADROOM, FDP_FRAME_TAILROOM, clock_type, clock_freq, fw_vsc_cfg); if (r < 0) { nfc_err(dev, "NCI probing error\n"); return r; } return 0; } static void fdp_nci_i2c_remove(struct i2c_client *client) { struct fdp_i2c_phy *phy = i2c_get_clientdata(client); fdp_nci_remove(phy->ndev); fdp_nci_i2c_disable(phy); } static const struct acpi_device_id fdp_nci_i2c_acpi_match[] = { {"INT339A", 0}, {} }; MODULE_DEVICE_TABLE(acpi, fdp_nci_i2c_acpi_match); static struct i2c_driver fdp_nci_i2c_driver = { .driver = { .name = FDP_I2C_DRIVER_NAME, .acpi_match_table = fdp_nci_i2c_acpi_match, }, .probe = fdp_nci_i2c_probe, .remove = fdp_nci_i2c_remove, }; module_i2c_driver(fdp_nci_i2c_driver); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("I2C driver for Intel Fields Peak NFC controller"); MODULE_AUTHOR("Robert Dolca <[email protected]>");
linux-master
drivers/nfc/fdp/i2c.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * NCI based driver for Samsung S3FWRN5 NFC chip * * Copyright (C) 2015 Samsung Electrnoics * Robert Baldyga <[email protected]> */ #include <linux/completion.h> #include <linux/firmware.h> #include <crypto/hash.h> #include <crypto/sha1.h> #include "s3fwrn5.h" #include "firmware.h" struct s3fwrn5_fw_version { __u8 major; __u8 build1; __u8 build2; __u8 target; }; static int s3fwrn5_fw_send_msg(struct s3fwrn5_fw_info *fw_info, struct sk_buff *msg, struct sk_buff **rsp) { struct s3fwrn5_info *info = container_of(fw_info, struct s3fwrn5_info, fw_info); long ret; reinit_completion(&fw_info->completion); ret = s3fwrn5_write(info, msg); if (ret < 0) return ret; ret = wait_for_completion_interruptible_timeout( &fw_info->completion, msecs_to_jiffies(1000)); if (ret < 0) return ret; else if (ret == 0) return -ENXIO; if (!fw_info->rsp) return -EINVAL; *rsp = fw_info->rsp; fw_info->rsp = NULL; return 0; } static int s3fwrn5_fw_prep_msg(struct s3fwrn5_fw_info *fw_info, struct sk_buff **msg, u8 type, u8 code, const void *data, u16 len) { struct s3fwrn5_fw_header hdr; struct sk_buff *skb; hdr.type = type | fw_info->parity; fw_info->parity ^= 0x80; hdr.code = code; hdr.len = len; skb = alloc_skb(S3FWRN5_FW_HDR_SIZE + len, GFP_KERNEL); if (!skb) return -ENOMEM; skb_put_data(skb, &hdr, S3FWRN5_FW_HDR_SIZE); if (len) skb_put_data(skb, data, len); *msg = skb; return 0; } static int s3fwrn5_fw_get_bootinfo(struct s3fwrn5_fw_info *fw_info, struct s3fwrn5_fw_cmd_get_bootinfo_rsp *bootinfo) { struct sk_buff *msg, *rsp = NULL; struct s3fwrn5_fw_header *hdr; int ret; /* Send GET_BOOTINFO command */ ret = s3fwrn5_fw_prep_msg(fw_info, &msg, S3FWRN5_FW_MSG_CMD, S3FWRN5_FW_CMD_GET_BOOTINFO, NULL, 0); if (ret < 0) return ret; ret = s3fwrn5_fw_send_msg(fw_info, msg, &rsp); kfree_skb(msg); if (ret < 0) return ret; hdr = (struct s3fwrn5_fw_header *) rsp->data; if (hdr->code != S3FWRN5_FW_RET_SUCCESS) { ret = -EINVAL; goto out; } memcpy(bootinfo, rsp->data + S3FWRN5_FW_HDR_SIZE, 10); out: kfree_skb(rsp); return ret; } static int s3fwrn5_fw_enter_update_mode(struct s3fwrn5_fw_info *fw_info, const void *hash_data, u16 hash_size, const void *sig_data, u16 sig_size) { struct s3fwrn5_fw_cmd_enter_updatemode args; struct sk_buff *msg, *rsp = NULL; struct s3fwrn5_fw_header *hdr; int ret; /* Send ENTER_UPDATE_MODE command */ args.hashcode_size = hash_size; args.signature_size = sig_size; ret = s3fwrn5_fw_prep_msg(fw_info, &msg, S3FWRN5_FW_MSG_CMD, S3FWRN5_FW_CMD_ENTER_UPDATE_MODE, &args, sizeof(args)); if (ret < 0) return ret; ret = s3fwrn5_fw_send_msg(fw_info, msg, &rsp); kfree_skb(msg); if (ret < 0) return ret; hdr = (struct s3fwrn5_fw_header *) rsp->data; if (hdr->code != S3FWRN5_FW_RET_SUCCESS) { ret = -EPROTO; goto out; } kfree_skb(rsp); /* Send hashcode data */ ret = s3fwrn5_fw_prep_msg(fw_info, &msg, S3FWRN5_FW_MSG_DATA, 0, hash_data, hash_size); if (ret < 0) return ret; ret = s3fwrn5_fw_send_msg(fw_info, msg, &rsp); kfree_skb(msg); if (ret < 0) return ret; hdr = (struct s3fwrn5_fw_header *) rsp->data; if (hdr->code != S3FWRN5_FW_RET_SUCCESS) { ret = -EPROTO; goto out; } kfree_skb(rsp); /* Send signature data */ ret = s3fwrn5_fw_prep_msg(fw_info, &msg, S3FWRN5_FW_MSG_DATA, 0, sig_data, sig_size); if (ret < 0) return ret; ret = s3fwrn5_fw_send_msg(fw_info, msg, &rsp); kfree_skb(msg); if (ret < 0) return ret; hdr = (struct s3fwrn5_fw_header *) rsp->data; if (hdr->code != S3FWRN5_FW_RET_SUCCESS) ret = -EPROTO; out: kfree_skb(rsp); return ret; } static int s3fwrn5_fw_update_sector(struct s3fwrn5_fw_info *fw_info, u32 base_addr, const void *data) { struct s3fwrn5_fw_cmd_update_sector args; struct sk_buff *msg, *rsp = NULL; struct s3fwrn5_fw_header *hdr; int ret, i; /* Send UPDATE_SECTOR command */ args.base_address = base_addr; ret = s3fwrn5_fw_prep_msg(fw_info, &msg, S3FWRN5_FW_MSG_CMD, S3FWRN5_FW_CMD_UPDATE_SECTOR, &args, sizeof(args)); if (ret < 0) return ret; ret = s3fwrn5_fw_send_msg(fw_info, msg, &rsp); kfree_skb(msg); if (ret < 0) return ret; hdr = (struct s3fwrn5_fw_header *) rsp->data; if (hdr->code != S3FWRN5_FW_RET_SUCCESS) { ret = -EPROTO; goto err; } kfree_skb(rsp); /* Send data split into 256-byte packets */ for (i = 0; i < 16; ++i) { ret = s3fwrn5_fw_prep_msg(fw_info, &msg, S3FWRN5_FW_MSG_DATA, 0, data+256*i, 256); if (ret < 0) break; ret = s3fwrn5_fw_send_msg(fw_info, msg, &rsp); kfree_skb(msg); if (ret < 0) break; hdr = (struct s3fwrn5_fw_header *) rsp->data; if (hdr->code != S3FWRN5_FW_RET_SUCCESS) { ret = -EPROTO; goto err; } kfree_skb(rsp); } return ret; err: kfree_skb(rsp); return ret; } static int s3fwrn5_fw_complete_update_mode(struct s3fwrn5_fw_info *fw_info) { struct sk_buff *msg, *rsp = NULL; struct s3fwrn5_fw_header *hdr; int ret; /* Send COMPLETE_UPDATE_MODE command */ ret = s3fwrn5_fw_prep_msg(fw_info, &msg, S3FWRN5_FW_MSG_CMD, S3FWRN5_FW_CMD_COMPLETE_UPDATE_MODE, NULL, 0); if (ret < 0) return ret; ret = s3fwrn5_fw_send_msg(fw_info, msg, &rsp); kfree_skb(msg); if (ret < 0) return ret; hdr = (struct s3fwrn5_fw_header *) rsp->data; if (hdr->code != S3FWRN5_FW_RET_SUCCESS) ret = -EPROTO; kfree_skb(rsp); return ret; } /* * Firmware header structure: * * 0x00 - 0x0B : Date and time string (w/o NUL termination) * 0x10 - 0x13 : Firmware version * 0x14 - 0x17 : Signature address * 0x18 - 0x1B : Signature size * 0x1C - 0x1F : Firmware image address * 0x20 - 0x23 : Firmware sectors count * 0x24 - 0x27 : Custom signature address * 0x28 - 0x2B : Custom signature size */ #define S3FWRN5_FW_IMAGE_HEADER_SIZE 44 int s3fwrn5_fw_request_firmware(struct s3fwrn5_fw_info *fw_info) { struct s3fwrn5_fw_image *fw = &fw_info->fw; u32 sig_off; u32 image_off; u32 custom_sig_off; int ret; ret = request_firmware(&fw->fw, fw_info->fw_name, &fw_info->ndev->nfc_dev->dev); if (ret < 0) return ret; if (fw->fw->size < S3FWRN5_FW_IMAGE_HEADER_SIZE) { release_firmware(fw->fw); return -EINVAL; } memcpy(fw->date, fw->fw->data + 0x00, 12); fw->date[12] = '\0'; memcpy(&fw->version, fw->fw->data + 0x10, 4); memcpy(&sig_off, fw->fw->data + 0x14, 4); fw->sig = fw->fw->data + sig_off; memcpy(&fw->sig_size, fw->fw->data + 0x18, 4); memcpy(&image_off, fw->fw->data + 0x1C, 4); fw->image = fw->fw->data + image_off; memcpy(&fw->image_sectors, fw->fw->data + 0x20, 4); memcpy(&custom_sig_off, fw->fw->data + 0x24, 4); fw->custom_sig = fw->fw->data + custom_sig_off; memcpy(&fw->custom_sig_size, fw->fw->data + 0x28, 4); return 0; } static void s3fwrn5_fw_release_firmware(struct s3fwrn5_fw_info *fw_info) { release_firmware(fw_info->fw.fw); } static int s3fwrn5_fw_get_base_addr( struct s3fwrn5_fw_cmd_get_bootinfo_rsp *bootinfo, u32 *base_addr) { int i; static const struct { u8 version[4]; u32 base_addr; } match[] = { {{0x05, 0x00, 0x00, 0x00}, 0x00005000}, {{0x05, 0x00, 0x00, 0x01}, 0x00003000}, {{0x05, 0x00, 0x00, 0x02}, 0x00003000}, {{0x05, 0x00, 0x00, 0x03}, 0x00003000}, {{0x05, 0x00, 0x00, 0x05}, 0x00003000} }; for (i = 0; i < ARRAY_SIZE(match); ++i) if (bootinfo->hw_version[0] == match[i].version[0] && bootinfo->hw_version[1] == match[i].version[1] && bootinfo->hw_version[3] == match[i].version[3]) { *base_addr = match[i].base_addr; return 0; } return -EINVAL; } static inline bool s3fwrn5_fw_is_custom(const struct s3fwrn5_fw_cmd_get_bootinfo_rsp *bootinfo) { return !!bootinfo->hw_version[2]; } int s3fwrn5_fw_setup(struct s3fwrn5_fw_info *fw_info) { struct device *dev = &fw_info->ndev->nfc_dev->dev; struct s3fwrn5_fw_cmd_get_bootinfo_rsp bootinfo; int ret; /* Get bootloader info */ ret = s3fwrn5_fw_get_bootinfo(fw_info, &bootinfo); if (ret < 0) { dev_err(dev, "Failed to get bootinfo, ret=%02x\n", ret); goto err; } /* Match hardware version to obtain firmware base address */ ret = s3fwrn5_fw_get_base_addr(&bootinfo, &fw_info->base_addr); if (ret < 0) { dev_err(dev, "Unknown hardware version\n"); goto err; } fw_info->sector_size = bootinfo.sector_size; fw_info->sig_size = s3fwrn5_fw_is_custom(&bootinfo) ? fw_info->fw.custom_sig_size : fw_info->fw.sig_size; fw_info->sig = s3fwrn5_fw_is_custom(&bootinfo) ? fw_info->fw.custom_sig : fw_info->fw.sig; return 0; err: s3fwrn5_fw_release_firmware(fw_info); return ret; } bool s3fwrn5_fw_check_version(const struct s3fwrn5_fw_info *fw_info, u32 version) { struct s3fwrn5_fw_version *new = (void *) &fw_info->fw.version; struct s3fwrn5_fw_version *old = (void *) &version; if (new->major > old->major) return true; if (new->build1 > old->build1) return true; if (new->build2 > old->build2) return true; return false; } int s3fwrn5_fw_download(struct s3fwrn5_fw_info *fw_info) { struct device *dev = &fw_info->ndev->nfc_dev->dev; struct s3fwrn5_fw_image *fw = &fw_info->fw; u8 hash_data[SHA1_DIGEST_SIZE]; struct crypto_shash *tfm; u32 image_size, off; int ret; image_size = fw_info->sector_size * fw->image_sectors; /* Compute SHA of firmware data */ tfm = crypto_alloc_shash("sha1", 0, 0); if (IS_ERR(tfm)) { dev_err(dev, "Cannot allocate shash (code=%pe)\n", tfm); return PTR_ERR(tfm); } ret = crypto_shash_tfm_digest(tfm, fw->image, image_size, hash_data); crypto_free_shash(tfm); if (ret) { dev_err(dev, "Cannot compute hash (code=%d)\n", ret); return ret; } /* Firmware update process */ dev_info(dev, "Firmware update: %s\n", fw_info->fw_name); ret = s3fwrn5_fw_enter_update_mode(fw_info, hash_data, SHA1_DIGEST_SIZE, fw_info->sig, fw_info->sig_size); if (ret < 0) { dev_err(dev, "Unable to enter update mode\n"); return ret; } for (off = 0; off < image_size; off += fw_info->sector_size) { ret = s3fwrn5_fw_update_sector(fw_info, fw_info->base_addr + off, fw->image + off); if (ret < 0) { dev_err(dev, "Firmware update error (code=%d)\n", ret); return ret; } } ret = s3fwrn5_fw_complete_update_mode(fw_info); if (ret < 0) { dev_err(dev, "Unable to complete update mode\n"); return ret; } dev_info(dev, "Firmware update: success\n"); return ret; } void s3fwrn5_fw_init(struct s3fwrn5_fw_info *fw_info, const char *fw_name) { fw_info->parity = 0x00; fw_info->rsp = NULL; fw_info->fw.fw = NULL; strcpy(fw_info->fw_name, fw_name); init_completion(&fw_info->completion); } void s3fwrn5_fw_cleanup(struct s3fwrn5_fw_info *fw_info) { s3fwrn5_fw_release_firmware(fw_info); } int s3fwrn5_fw_recv_frame(struct nci_dev *ndev, struct sk_buff *skb) { struct s3fwrn5_info *info = nci_get_drvdata(ndev); struct s3fwrn5_fw_info *fw_info = &info->fw_info; if (WARN_ON(fw_info->rsp)) { kfree_skb(skb); return -EINVAL; } fw_info->rsp = skb; complete(&fw_info->completion); return 0; }
linux-master
drivers/nfc/s3fwrn5/firmware.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * I2C Link Layer for Samsung S3FWRN5 NCI based Driver * * Copyright (C) 2015 Samsung Electrnoics * Robert Baldyga <[email protected]> */ #include <linux/clk.h> #include <linux/i2c.h> #include <linux/gpio.h> #include <linux/delay.h> #include <linux/of_gpio.h> #include <linux/of_irq.h> #include <linux/module.h> #include <net/nfc/nfc.h> #include "phy_common.h" #define S3FWRN5_I2C_DRIVER_NAME "s3fwrn5_i2c" struct s3fwrn5_i2c_phy { struct phy_common common; struct i2c_client *i2c_dev; struct clk *clk; unsigned int irq_skip:1; }; static void s3fwrn5_i2c_set_mode(void *phy_id, enum s3fwrn5_mode mode) { struct s3fwrn5_i2c_phy *phy = phy_id; mutex_lock(&phy->common.mutex); if (s3fwrn5_phy_power_ctrl(&phy->common, mode) == false) goto out; phy->irq_skip = true; out: mutex_unlock(&phy->common.mutex); } static int s3fwrn5_i2c_write(void *phy_id, struct sk_buff *skb) { struct s3fwrn5_i2c_phy *phy = phy_id; int ret; mutex_lock(&phy->common.mutex); phy->irq_skip = false; ret = i2c_master_send(phy->i2c_dev, skb->data, skb->len); if (ret == -EREMOTEIO) { /* Retry, chip was in standby */ usleep_range(110000, 120000); ret = i2c_master_send(phy->i2c_dev, skb->data, skb->len); } mutex_unlock(&phy->common.mutex); if (ret < 0) return ret; if (ret != skb->len) return -EREMOTEIO; return 0; } static const struct s3fwrn5_phy_ops i2c_phy_ops = { .set_wake = s3fwrn5_phy_set_wake, .set_mode = s3fwrn5_i2c_set_mode, .get_mode = s3fwrn5_phy_get_mode, .write = s3fwrn5_i2c_write, }; static int s3fwrn5_i2c_read(struct s3fwrn5_i2c_phy *phy) { struct sk_buff *skb; size_t hdr_size; size_t data_len; char hdr[4]; int ret; hdr_size = (phy->common.mode == S3FWRN5_MODE_NCI) ? NCI_CTRL_HDR_SIZE : S3FWRN5_FW_HDR_SIZE; ret = i2c_master_recv(phy->i2c_dev, hdr, hdr_size); if (ret < 0) return ret; if (ret < hdr_size) return -EBADMSG; data_len = (phy->common.mode == S3FWRN5_MODE_NCI) ? ((struct nci_ctrl_hdr *)hdr)->plen : ((struct s3fwrn5_fw_header *)hdr)->len; skb = alloc_skb(hdr_size + data_len, GFP_KERNEL); if (!skb) return -ENOMEM; skb_put_data(skb, hdr, hdr_size); if (data_len == 0) goto out; ret = i2c_master_recv(phy->i2c_dev, skb_put(skb, data_len), data_len); if (ret != data_len) { kfree_skb(skb); return -EBADMSG; } out: return s3fwrn5_recv_frame(phy->common.ndev, skb, phy->common.mode); } static irqreturn_t s3fwrn5_i2c_irq_thread_fn(int irq, void *phy_id) { struct s3fwrn5_i2c_phy *phy = phy_id; if (!phy || !phy->common.ndev) { WARN_ON_ONCE(1); return IRQ_NONE; } mutex_lock(&phy->common.mutex); if (phy->irq_skip) goto out; switch (phy->common.mode) { case S3FWRN5_MODE_NCI: case S3FWRN5_MODE_FW: s3fwrn5_i2c_read(phy); break; case S3FWRN5_MODE_COLD: break; } out: mutex_unlock(&phy->common.mutex); return IRQ_HANDLED; } static int s3fwrn5_i2c_parse_dt(struct i2c_client *client) { struct s3fwrn5_i2c_phy *phy = i2c_get_clientdata(client); struct device_node *np = client->dev.of_node; if (!np) return -ENODEV; phy->common.gpio_en = of_get_named_gpio(np, "en-gpios", 0); if (!gpio_is_valid(phy->common.gpio_en)) { /* Support also deprecated property */ phy->common.gpio_en = of_get_named_gpio(np, "s3fwrn5,en-gpios", 0); if (!gpio_is_valid(phy->common.gpio_en)) return -ENODEV; } phy->common.gpio_fw_wake = of_get_named_gpio(np, "wake-gpios", 0); if (!gpio_is_valid(phy->common.gpio_fw_wake)) { /* Support also deprecated property */ phy->common.gpio_fw_wake = of_get_named_gpio(np, "s3fwrn5,fw-gpios", 0); if (!gpio_is_valid(phy->common.gpio_fw_wake)) return -ENODEV; } return 0; } static int s3fwrn5_i2c_probe(struct i2c_client *client) { struct s3fwrn5_i2c_phy *phy; int ret; phy = devm_kzalloc(&client->dev, sizeof(*phy), GFP_KERNEL); if (!phy) return -ENOMEM; mutex_init(&phy->common.mutex); phy->common.mode = S3FWRN5_MODE_COLD; phy->irq_skip = true; phy->i2c_dev = client; i2c_set_clientdata(client, phy); ret = s3fwrn5_i2c_parse_dt(client); if (ret < 0) return ret; ret = devm_gpio_request_one(&phy->i2c_dev->dev, phy->common.gpio_en, GPIOF_OUT_INIT_HIGH, "s3fwrn5_en"); if (ret < 0) return ret; ret = devm_gpio_request_one(&phy->i2c_dev->dev, phy->common.gpio_fw_wake, GPIOF_OUT_INIT_LOW, "s3fwrn5_fw_wake"); if (ret < 0) return ret; /* * S3FWRN5 depends on a clock input ("XI" pin) to function properly. * Depending on the hardware configuration this could be an always-on * oscillator or some external clock that must be explicitly enabled. * Make sure the clock is running before starting S3FWRN5. */ phy->clk = devm_clk_get_optional_enabled(&client->dev, NULL); if (IS_ERR(phy->clk)) return dev_err_probe(&client->dev, PTR_ERR(phy->clk), "failed to get clock\n"); ret = s3fwrn5_probe(&phy->common.ndev, phy, &phy->i2c_dev->dev, &i2c_phy_ops); if (ret < 0) return ret; ret = devm_request_threaded_irq(&client->dev, phy->i2c_dev->irq, NULL, s3fwrn5_i2c_irq_thread_fn, IRQF_ONESHOT, S3FWRN5_I2C_DRIVER_NAME, phy); if (ret) goto s3fwrn5_remove; return 0; s3fwrn5_remove: s3fwrn5_remove(phy->common.ndev); return ret; } static void s3fwrn5_i2c_remove(struct i2c_client *client) { struct s3fwrn5_i2c_phy *phy = i2c_get_clientdata(client); s3fwrn5_remove(phy->common.ndev); } static const struct i2c_device_id s3fwrn5_i2c_id_table[] = { {S3FWRN5_I2C_DRIVER_NAME, 0}, {} }; MODULE_DEVICE_TABLE(i2c, s3fwrn5_i2c_id_table); static const struct of_device_id of_s3fwrn5_i2c_match[] __maybe_unused = { { .compatible = "samsung,s3fwrn5-i2c", }, {} }; MODULE_DEVICE_TABLE(of, of_s3fwrn5_i2c_match); static struct i2c_driver s3fwrn5_i2c_driver = { .driver = { .name = S3FWRN5_I2C_DRIVER_NAME, .of_match_table = of_match_ptr(of_s3fwrn5_i2c_match), }, .probe = s3fwrn5_i2c_probe, .remove = s3fwrn5_i2c_remove, .id_table = s3fwrn5_i2c_id_table, }; module_i2c_driver(s3fwrn5_i2c_driver); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("I2C driver for Samsung S3FWRN5"); MODULE_AUTHOR("Robert Baldyga <[email protected]>");
linux-master
drivers/nfc/s3fwrn5/i2c.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * NCI based driver for Samsung S3FWRN5 NFC chip * * Copyright (C) 2015 Samsung Electrnoics * Robert Baldyga <[email protected]> */ #include <linux/module.h> #include <net/nfc/nci_core.h> #include "s3fwrn5.h" #include "firmware.h" #include "nci.h" #define S3FWRN5_NFC_PROTOCOLS (NFC_PROTO_JEWEL_MASK | \ NFC_PROTO_MIFARE_MASK | \ NFC_PROTO_FELICA_MASK | \ NFC_PROTO_ISO14443_MASK | \ NFC_PROTO_ISO14443_B_MASK | \ NFC_PROTO_ISO15693_MASK) static int s3fwrn5_firmware_init(struct s3fwrn5_info *info) { struct s3fwrn5_fw_info *fw_info = &info->fw_info; int ret; s3fwrn5_fw_init(fw_info, "sec_s3fwrn5_firmware.bin"); /* Get firmware data */ ret = s3fwrn5_fw_request_firmware(fw_info); if (ret < 0) dev_err(&fw_info->ndev->nfc_dev->dev, "Failed to get fw file, ret=%02x\n", ret); return ret; } static int s3fwrn5_firmware_update(struct s3fwrn5_info *info) { bool need_update; int ret; /* Update firmware */ s3fwrn5_set_wake(info, false); s3fwrn5_set_mode(info, S3FWRN5_MODE_FW); ret = s3fwrn5_fw_setup(&info->fw_info); if (ret < 0) return ret; need_update = s3fwrn5_fw_check_version(&info->fw_info, info->ndev->manufact_specific_info); if (!need_update) goto out; dev_info(&info->ndev->nfc_dev->dev, "Detected new firmware version\n"); ret = s3fwrn5_fw_download(&info->fw_info); if (ret < 0) goto out; /* Update RF configuration */ s3fwrn5_set_mode(info, S3FWRN5_MODE_NCI); s3fwrn5_set_wake(info, true); ret = s3fwrn5_nci_rf_configure(info, "sec_s3fwrn5_rfreg.bin"); s3fwrn5_set_wake(info, false); out: s3fwrn5_set_mode(info, S3FWRN5_MODE_COLD); s3fwrn5_fw_cleanup(&info->fw_info); return ret; } static int s3fwrn5_nci_open(struct nci_dev *ndev) { struct s3fwrn5_info *info = nci_get_drvdata(ndev); if (s3fwrn5_get_mode(info) != S3FWRN5_MODE_COLD) return -EBUSY; s3fwrn5_set_mode(info, S3FWRN5_MODE_NCI); s3fwrn5_set_wake(info, true); return 0; } static int s3fwrn5_nci_close(struct nci_dev *ndev) { struct s3fwrn5_info *info = nci_get_drvdata(ndev); s3fwrn5_set_wake(info, false); s3fwrn5_set_mode(info, S3FWRN5_MODE_COLD); return 0; } static int s3fwrn5_nci_send(struct nci_dev *ndev, struct sk_buff *skb) { struct s3fwrn5_info *info = nci_get_drvdata(ndev); int ret; mutex_lock(&info->mutex); if (s3fwrn5_get_mode(info) != S3FWRN5_MODE_NCI) { kfree_skb(skb); mutex_unlock(&info->mutex); return -EINVAL; } ret = s3fwrn5_write(info, skb); if (ret < 0) { kfree_skb(skb); mutex_unlock(&info->mutex); return ret; } consume_skb(skb); mutex_unlock(&info->mutex); return 0; } static int s3fwrn5_nci_post_setup(struct nci_dev *ndev) { struct s3fwrn5_info *info = nci_get_drvdata(ndev); int ret; if (s3fwrn5_firmware_init(info)) { //skip bootloader mode return 0; } ret = s3fwrn5_firmware_update(info); if (ret < 0) return ret; /* NCI core reset */ s3fwrn5_set_mode(info, S3FWRN5_MODE_NCI); s3fwrn5_set_wake(info, true); ret = nci_core_reset(info->ndev); if (ret < 0) return ret; return nci_core_init(info->ndev); } static const struct nci_ops s3fwrn5_nci_ops = { .open = s3fwrn5_nci_open, .close = s3fwrn5_nci_close, .send = s3fwrn5_nci_send, .post_setup = s3fwrn5_nci_post_setup, .prop_ops = s3fwrn5_nci_prop_ops, .n_prop_ops = ARRAY_SIZE(s3fwrn5_nci_prop_ops), }; int s3fwrn5_probe(struct nci_dev **ndev, void *phy_id, struct device *pdev, const struct s3fwrn5_phy_ops *phy_ops) { struct s3fwrn5_info *info; int ret; info = devm_kzalloc(pdev, sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; info->phy_id = phy_id; info->pdev = pdev; info->phy_ops = phy_ops; mutex_init(&info->mutex); s3fwrn5_set_mode(info, S3FWRN5_MODE_COLD); info->ndev = nci_allocate_device(&s3fwrn5_nci_ops, S3FWRN5_NFC_PROTOCOLS, 0, 0); if (!info->ndev) return -ENOMEM; nci_set_parent_dev(info->ndev, pdev); nci_set_drvdata(info->ndev, info); ret = nci_register_device(info->ndev); if (ret < 0) { nci_free_device(info->ndev); return ret; } info->fw_info.ndev = info->ndev; *ndev = info->ndev; return ret; } EXPORT_SYMBOL(s3fwrn5_probe); void s3fwrn5_remove(struct nci_dev *ndev) { struct s3fwrn5_info *info = nci_get_drvdata(ndev); s3fwrn5_set_mode(info, S3FWRN5_MODE_COLD); nci_unregister_device(ndev); nci_free_device(ndev); } EXPORT_SYMBOL(s3fwrn5_remove); int s3fwrn5_recv_frame(struct nci_dev *ndev, struct sk_buff *skb, enum s3fwrn5_mode mode) { switch (mode) { case S3FWRN5_MODE_NCI: return nci_recv_frame(ndev, skb); case S3FWRN5_MODE_FW: return s3fwrn5_fw_recv_frame(ndev, skb); default: kfree_skb(skb); return -ENODEV; } } EXPORT_SYMBOL(s3fwrn5_recv_frame); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Samsung S3FWRN5 NFC driver"); MODULE_AUTHOR("Robert Baldyga <[email protected]>");
linux-master
drivers/nfc/s3fwrn5/core.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * NCI based driver for Samsung S3FWRN5 NFC chip * * Copyright (C) 2015 Samsung Electrnoics * Robert Baldyga <[email protected]> */ #include <linux/completion.h> #include <linux/firmware.h> #include "s3fwrn5.h" #include "nci.h" static int s3fwrn5_nci_prop_rsp(struct nci_dev *ndev, struct sk_buff *skb) { __u8 status = skb->data[0]; nci_req_complete(ndev, status); return 0; } const struct nci_driver_ops s3fwrn5_nci_prop_ops[4] = { { .opcode = nci_opcode_pack(NCI_GID_PROPRIETARY, NCI_PROP_SET_RFREG), .rsp = s3fwrn5_nci_prop_rsp, }, { .opcode = nci_opcode_pack(NCI_GID_PROPRIETARY, NCI_PROP_START_RFREG), .rsp = s3fwrn5_nci_prop_rsp, }, { .opcode = nci_opcode_pack(NCI_GID_PROPRIETARY, NCI_PROP_STOP_RFREG), .rsp = s3fwrn5_nci_prop_rsp, }, { .opcode = nci_opcode_pack(NCI_GID_PROPRIETARY, NCI_PROP_FW_CFG), .rsp = s3fwrn5_nci_prop_rsp, }, }; #define S3FWRN5_RFREG_SECTION_SIZE 252 int s3fwrn5_nci_rf_configure(struct s3fwrn5_info *info, const char *fw_name) { struct device *dev = &info->ndev->nfc_dev->dev; const struct firmware *fw; struct nci_prop_fw_cfg_cmd fw_cfg; struct nci_prop_set_rfreg_cmd set_rfreg; struct nci_prop_stop_rfreg_cmd stop_rfreg; u32 checksum; int i, len; int ret; ret = request_firmware(&fw, fw_name, dev); if (ret < 0) return ret; /* Compute rfreg checksum */ checksum = 0; for (i = 0; i < fw->size; i += 4) checksum += *((u32 *)(fw->data+i)); /* Set default clock configuration for external crystal */ fw_cfg.clk_type = 0x01; fw_cfg.clk_speed = 0xff; fw_cfg.clk_req = 0xff; ret = nci_prop_cmd(info->ndev, NCI_PROP_FW_CFG, sizeof(fw_cfg), (__u8 *)&fw_cfg); if (ret < 0) goto out; /* Start rfreg configuration */ dev_info(dev, "rfreg configuration update: %s\n", fw_name); ret = nci_prop_cmd(info->ndev, NCI_PROP_START_RFREG, 0, NULL); if (ret < 0) { dev_err(dev, "Unable to start rfreg update\n"); goto out; } /* Update rfreg */ set_rfreg.index = 0; for (i = 0; i < fw->size; i += S3FWRN5_RFREG_SECTION_SIZE) { len = (fw->size - i < S3FWRN5_RFREG_SECTION_SIZE) ? (fw->size - i) : S3FWRN5_RFREG_SECTION_SIZE; memcpy(set_rfreg.data, fw->data+i, len); ret = nci_prop_cmd(info->ndev, NCI_PROP_SET_RFREG, len+1, (__u8 *)&set_rfreg); if (ret < 0) { dev_err(dev, "rfreg update error (code=%d)\n", ret); goto out; } set_rfreg.index++; } /* Finish rfreg configuration */ stop_rfreg.checksum = checksum & 0xffff; ret = nci_prop_cmd(info->ndev, NCI_PROP_STOP_RFREG, sizeof(stop_rfreg), (__u8 *)&stop_rfreg); if (ret < 0) { dev_err(dev, "Unable to stop rfreg update\n"); goto out; } dev_info(dev, "rfreg configuration update: success\n"); out: release_firmware(fw); return ret; }
linux-master
drivers/nfc/s3fwrn5/nci.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Link Layer for Samsung S3FWRN5 NCI based Driver * * Copyright (C) 2015 Samsung Electrnoics * Robert Baldyga <[email protected]> * Copyright (C) 2020 Samsung Electrnoics * Bongsu Jeon <[email protected]> */ #include <linux/gpio.h> #include <linux/delay.h> #include <linux/module.h> #include "phy_common.h" void s3fwrn5_phy_set_wake(void *phy_id, bool wake) { struct phy_common *phy = phy_id; mutex_lock(&phy->mutex); gpio_set_value(phy->gpio_fw_wake, wake); if (wake) msleep(S3FWRN5_EN_WAIT_TIME); mutex_unlock(&phy->mutex); } EXPORT_SYMBOL(s3fwrn5_phy_set_wake); bool s3fwrn5_phy_power_ctrl(struct phy_common *phy, enum s3fwrn5_mode mode) { if (phy->mode == mode) return false; phy->mode = mode; gpio_set_value(phy->gpio_en, 1); gpio_set_value(phy->gpio_fw_wake, 0); if (mode == S3FWRN5_MODE_FW) gpio_set_value(phy->gpio_fw_wake, 1); if (mode != S3FWRN5_MODE_COLD) { msleep(S3FWRN5_EN_WAIT_TIME); gpio_set_value(phy->gpio_en, 0); msleep(S3FWRN5_EN_WAIT_TIME); } return true; } EXPORT_SYMBOL(s3fwrn5_phy_power_ctrl); void s3fwrn5_phy_set_mode(void *phy_id, enum s3fwrn5_mode mode) { struct phy_common *phy = phy_id; mutex_lock(&phy->mutex); s3fwrn5_phy_power_ctrl(phy, mode); mutex_unlock(&phy->mutex); } EXPORT_SYMBOL(s3fwrn5_phy_set_mode); enum s3fwrn5_mode s3fwrn5_phy_get_mode(void *phy_id) { struct phy_common *phy = phy_id; enum s3fwrn5_mode mode; mutex_lock(&phy->mutex); mode = phy->mode; mutex_unlock(&phy->mutex); return mode; } EXPORT_SYMBOL(s3fwrn5_phy_get_mode);
linux-master
drivers/nfc/s3fwrn5/phy_common.c
// SPDX-License-Identifier: GPL-2.0+ /* * UART Link Layer for S3FWRN82 NCI based Driver * * Copyright (C) 2015 Samsung Electronics * Robert Baldyga <[email protected]> * Copyright (C) 2020 Samsung Electronics * Bongsu Jeon <[email protected]> */ #include <linux/device.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/nfc.h> #include <linux/netdevice.h> #include <linux/of.h> #include <linux/serdev.h> #include <linux/gpio.h> #include <linux/of_gpio.h> #include "phy_common.h" #define S3FWRN82_NCI_HEADER 3 #define S3FWRN82_NCI_IDX 2 #define NCI_SKB_BUFF_LEN 258 struct s3fwrn82_uart_phy { struct phy_common common; struct serdev_device *ser_dev; struct sk_buff *recv_skb; }; static int s3fwrn82_uart_write(void *phy_id, struct sk_buff *out) { struct s3fwrn82_uart_phy *phy = phy_id; int err; err = serdev_device_write(phy->ser_dev, out->data, out->len, MAX_SCHEDULE_TIMEOUT); if (err < 0) return err; return 0; } static const struct s3fwrn5_phy_ops uart_phy_ops = { .set_wake = s3fwrn5_phy_set_wake, .set_mode = s3fwrn5_phy_set_mode, .get_mode = s3fwrn5_phy_get_mode, .write = s3fwrn82_uart_write, }; static int s3fwrn82_uart_read(struct serdev_device *serdev, const unsigned char *data, size_t count) { struct s3fwrn82_uart_phy *phy = serdev_device_get_drvdata(serdev); size_t i; for (i = 0; i < count; i++) { skb_put_u8(phy->recv_skb, *data++); if (phy->recv_skb->len < S3FWRN82_NCI_HEADER) continue; if ((phy->recv_skb->len - S3FWRN82_NCI_HEADER) < phy->recv_skb->data[S3FWRN82_NCI_IDX]) continue; s3fwrn5_recv_frame(phy->common.ndev, phy->recv_skb, phy->common.mode); phy->recv_skb = alloc_skb(NCI_SKB_BUFF_LEN, GFP_KERNEL); if (!phy->recv_skb) return 0; } return i; } static const struct serdev_device_ops s3fwrn82_serdev_ops = { .receive_buf = s3fwrn82_uart_read, .write_wakeup = serdev_device_write_wakeup, }; static const struct of_device_id s3fwrn82_uart_of_match[] = { { .compatible = "samsung,s3fwrn82", }, {}, }; MODULE_DEVICE_TABLE(of, s3fwrn82_uart_of_match); static int s3fwrn82_uart_parse_dt(struct serdev_device *serdev) { struct s3fwrn82_uart_phy *phy = serdev_device_get_drvdata(serdev); struct device_node *np = serdev->dev.of_node; if (!np) return -ENODEV; phy->common.gpio_en = of_get_named_gpio(np, "en-gpios", 0); if (!gpio_is_valid(phy->common.gpio_en)) return -ENODEV; phy->common.gpio_fw_wake = of_get_named_gpio(np, "wake-gpios", 0); if (!gpio_is_valid(phy->common.gpio_fw_wake)) return -ENODEV; return 0; } static int s3fwrn82_uart_probe(struct serdev_device *serdev) { struct s3fwrn82_uart_phy *phy; int ret = -ENOMEM; phy = devm_kzalloc(&serdev->dev, sizeof(*phy), GFP_KERNEL); if (!phy) goto err_exit; phy->recv_skb = alloc_skb(NCI_SKB_BUFF_LEN, GFP_KERNEL); if (!phy->recv_skb) goto err_exit; mutex_init(&phy->common.mutex); phy->common.mode = S3FWRN5_MODE_COLD; phy->ser_dev = serdev; serdev_device_set_drvdata(serdev, phy); serdev_device_set_client_ops(serdev, &s3fwrn82_serdev_ops); ret = serdev_device_open(serdev); if (ret) { dev_err(&serdev->dev, "Unable to open device\n"); goto err_skb; } ret = serdev_device_set_baudrate(serdev, 115200); if (ret != 115200) { ret = -EINVAL; goto err_serdev; } serdev_device_set_flow_control(serdev, false); ret = s3fwrn82_uart_parse_dt(serdev); if (ret < 0) goto err_serdev; ret = devm_gpio_request_one(&phy->ser_dev->dev, phy->common.gpio_en, GPIOF_OUT_INIT_HIGH, "s3fwrn82_en"); if (ret < 0) goto err_serdev; ret = devm_gpio_request_one(&phy->ser_dev->dev, phy->common.gpio_fw_wake, GPIOF_OUT_INIT_LOW, "s3fwrn82_fw_wake"); if (ret < 0) goto err_serdev; ret = s3fwrn5_probe(&phy->common.ndev, phy, &phy->ser_dev->dev, &uart_phy_ops); if (ret < 0) goto err_serdev; return ret; err_serdev: serdev_device_close(serdev); err_skb: kfree_skb(phy->recv_skb); err_exit: return ret; } static void s3fwrn82_uart_remove(struct serdev_device *serdev) { struct s3fwrn82_uart_phy *phy = serdev_device_get_drvdata(serdev); s3fwrn5_remove(phy->common.ndev); serdev_device_close(serdev); kfree_skb(phy->recv_skb); } static struct serdev_device_driver s3fwrn82_uart_driver = { .probe = s3fwrn82_uart_probe, .remove = s3fwrn82_uart_remove, .driver = { .name = "s3fwrn82_uart", .of_match_table = s3fwrn82_uart_of_match, }, }; module_serdev_device_driver(s3fwrn82_uart_driver); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("UART driver for Samsung NFC"); MODULE_AUTHOR("Bongsu Jeon <[email protected]>");
linux-master
drivers/nfc/s3fwrn5/uart.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Driver for NXP PN533 NFC Chip - USB transport layer * * Copyright (C) 2011 Instituto Nokia de Tecnologia * Copyright (C) 2012-2013 Tieto Poland */ #include <linux/device.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/usb.h> #include <linux/nfc.h> #include <linux/netdevice.h> #include <net/nfc/nfc.h> #include "pn533.h" #define VERSION "0.1" #define PN533_VENDOR_ID 0x4CC #define PN533_PRODUCT_ID 0x2533 #define SCM_VENDOR_ID 0x4E6 #define SCL3711_PRODUCT_ID 0x5591 #define SONY_VENDOR_ID 0x054c #define PASORI_PRODUCT_ID 0x02e1 #define ACS_VENDOR_ID 0x072f #define ACR122U_PRODUCT_ID 0x2200 static const struct usb_device_id pn533_usb_table[] = { { USB_DEVICE(PN533_VENDOR_ID, PN533_PRODUCT_ID), .driver_info = PN533_DEVICE_STD }, { USB_DEVICE(SCM_VENDOR_ID, SCL3711_PRODUCT_ID), .driver_info = PN533_DEVICE_STD }, { USB_DEVICE(SONY_VENDOR_ID, PASORI_PRODUCT_ID), .driver_info = PN533_DEVICE_PASORI }, { USB_DEVICE(ACS_VENDOR_ID, ACR122U_PRODUCT_ID), .driver_info = PN533_DEVICE_ACR122U }, { } }; MODULE_DEVICE_TABLE(usb, pn533_usb_table); struct pn533_usb_phy { struct usb_device *udev; struct usb_interface *interface; struct urb *out_urb; struct urb *in_urb; struct urb *ack_urb; u8 *ack_buffer; struct pn533 *priv; }; static void pn533_recv_response(struct urb *urb) { struct pn533_usb_phy *phy = urb->context; struct sk_buff *skb = NULL; if (!urb->status) { skb = alloc_skb(urb->actual_length, GFP_ATOMIC); if (!skb) { nfc_err(&phy->udev->dev, "failed to alloc memory\n"); } else { skb_put_data(skb, urb->transfer_buffer, urb->actual_length); } } pn533_recv_frame(phy->priv, skb, urb->status); } static int pn533_submit_urb_for_response(struct pn533_usb_phy *phy, gfp_t flags) { phy->in_urb->complete = pn533_recv_response; return usb_submit_urb(phy->in_urb, flags); } static void pn533_recv_ack(struct urb *urb) { struct pn533_usb_phy *phy = urb->context; struct pn533 *priv = phy->priv; struct pn533_cmd *cmd = priv->cmd; struct pn533_std_frame *in_frame; int rc; cmd->status = urb->status; switch (urb->status) { case 0: break; /* success */ case -ECONNRESET: case -ENOENT: dev_dbg(&phy->udev->dev, "The urb has been stopped (status %d)\n", urb->status); goto sched_wq; case -ESHUTDOWN: default: nfc_err(&phy->udev->dev, "Urb failure (status %d)\n", urb->status); goto sched_wq; } in_frame = phy->in_urb->transfer_buffer; if (!pn533_rx_frame_is_ack(in_frame)) { nfc_err(&phy->udev->dev, "Received an invalid ack\n"); cmd->status = -EIO; goto sched_wq; } rc = pn533_submit_urb_for_response(phy, GFP_ATOMIC); if (rc) { nfc_err(&phy->udev->dev, "usb_submit_urb failed with result %d\n", rc); cmd->status = rc; goto sched_wq; } return; sched_wq: queue_work(priv->wq, &priv->cmd_complete_work); } static int pn533_submit_urb_for_ack(struct pn533_usb_phy *phy, gfp_t flags) { phy->in_urb->complete = pn533_recv_ack; return usb_submit_urb(phy->in_urb, flags); } static int pn533_usb_send_ack(struct pn533 *dev, gfp_t flags) { struct pn533_usb_phy *phy = dev->phy; static const u8 ack[6] = {0x00, 0x00, 0xff, 0x00, 0xff, 0x00}; /* spec 7.1.1.3: Preamble, SoPC (2), ACK Code (2), Postamble */ if (!phy->ack_buffer) { phy->ack_buffer = kmemdup(ack, sizeof(ack), flags); if (!phy->ack_buffer) return -ENOMEM; } phy->ack_urb->transfer_buffer = phy->ack_buffer; phy->ack_urb->transfer_buffer_length = sizeof(ack); return usb_submit_urb(phy->ack_urb, flags); } struct pn533_out_arg { struct pn533_usb_phy *phy; struct completion done; }; static int pn533_usb_send_frame(struct pn533 *dev, struct sk_buff *out) { struct pn533_usb_phy *phy = dev->phy; struct pn533_out_arg arg; void *cntx; int rc; if (phy->priv == NULL) phy->priv = dev; phy->out_urb->transfer_buffer = out->data; phy->out_urb->transfer_buffer_length = out->len; print_hex_dump_debug("PN533 TX: ", DUMP_PREFIX_NONE, 16, 1, out->data, out->len, false); arg.phy = phy; init_completion(&arg.done); cntx = phy->out_urb->context; phy->out_urb->context = &arg; rc = usb_submit_urb(phy->out_urb, GFP_KERNEL); if (rc) return rc; wait_for_completion(&arg.done); phy->out_urb->context = cntx; if (dev->protocol_type == PN533_PROTO_REQ_RESP) { /* request for response for sent packet directly */ rc = pn533_submit_urb_for_response(phy, GFP_KERNEL); if (rc) goto error; } else if (dev->protocol_type == PN533_PROTO_REQ_ACK_RESP) { /* request for ACK if that's the case */ rc = pn533_submit_urb_for_ack(phy, GFP_KERNEL); if (rc) goto error; } return 0; error: usb_unlink_urb(phy->out_urb); return rc; } static void pn533_usb_abort_cmd(struct pn533 *dev, gfp_t flags) { struct pn533_usb_phy *phy = dev->phy; /* ACR122U does not support any command which aborts last * issued command i.e. as ACK for standard PN533. Additionally, * it behaves stange, sending broken or incorrect responses, * when we cancel urb before the chip will send response. */ if (dev->device_type == PN533_DEVICE_ACR122U) return; /* An ack will cancel the last issued command */ pn533_usb_send_ack(dev, flags); /* cancel the urb request */ usb_kill_urb(phy->in_urb); } /* ACR122 specific structs and functions */ /* ACS ACR122 pn533 frame definitions */ #define PN533_ACR122_TX_FRAME_HEADER_LEN (sizeof(struct pn533_acr122_tx_frame) \ + 2) #define PN533_ACR122_TX_FRAME_TAIL_LEN 0 #define PN533_ACR122_RX_FRAME_HEADER_LEN (sizeof(struct pn533_acr122_rx_frame) \ + 2) #define PN533_ACR122_RX_FRAME_TAIL_LEN 2 #define PN533_ACR122_FRAME_MAX_PAYLOAD_LEN PN533_STD_FRAME_MAX_PAYLOAD_LEN /* CCID messages types */ #define PN533_ACR122_PC_TO_RDR_ICCPOWERON 0x62 #define PN533_ACR122_PC_TO_RDR_ESCAPE 0x6B #define PN533_ACR122_RDR_TO_PC_ESCAPE 0x83 struct pn533_acr122_ccid_hdr { u8 type; u32 datalen; u8 slot; u8 seq; /* * 3 msg specific bytes or status, error and 1 specific * byte for reposnse msg */ u8 params[3]; u8 data[]; /* payload */ } __packed; struct pn533_acr122_apdu_hdr { u8 class; u8 ins; u8 p1; u8 p2; } __packed; struct pn533_acr122_tx_frame { struct pn533_acr122_ccid_hdr ccid; struct pn533_acr122_apdu_hdr apdu; u8 datalen; u8 data[]; /* pn533 frame: TFI ... */ } __packed; struct pn533_acr122_rx_frame { struct pn533_acr122_ccid_hdr ccid; u8 data[]; /* pn533 frame : TFI ... */ } __packed; static void pn533_acr122_tx_frame_init(void *_frame, u8 cmd_code) { struct pn533_acr122_tx_frame *frame = _frame; frame->ccid.type = PN533_ACR122_PC_TO_RDR_ESCAPE; /* sizeof(apdu_hdr) + sizeof(datalen) */ frame->ccid.datalen = sizeof(frame->apdu) + 1; frame->ccid.slot = 0; frame->ccid.seq = 0; frame->ccid.params[0] = 0; frame->ccid.params[1] = 0; frame->ccid.params[2] = 0; frame->data[0] = PN533_STD_FRAME_DIR_OUT; frame->data[1] = cmd_code; frame->datalen = 2; /* data[0] + data[1] */ frame->apdu.class = 0xFF; frame->apdu.ins = 0; frame->apdu.p1 = 0; frame->apdu.p2 = 0; } static void pn533_acr122_tx_frame_finish(void *_frame) { struct pn533_acr122_tx_frame *frame = _frame; frame->ccid.datalen += frame->datalen; } static void pn533_acr122_tx_update_payload_len(void *_frame, int len) { struct pn533_acr122_tx_frame *frame = _frame; frame->datalen += len; } static bool pn533_acr122_is_rx_frame_valid(void *_frame, struct pn533 *dev) { struct pn533_acr122_rx_frame *frame = _frame; if (frame->ccid.type != 0x83) return false; if (!frame->ccid.datalen) return false; if (frame->data[frame->ccid.datalen - 2] == 0x63) return false; return true; } static int pn533_acr122_rx_frame_size(void *frame) { struct pn533_acr122_rx_frame *f = frame; /* f->ccid.datalen already includes tail length */ return sizeof(struct pn533_acr122_rx_frame) + f->ccid.datalen; } static u8 pn533_acr122_get_cmd_code(void *frame) { struct pn533_acr122_rx_frame *f = frame; return PN533_FRAME_CMD(f); } static struct pn533_frame_ops pn533_acr122_frame_ops = { .tx_frame_init = pn533_acr122_tx_frame_init, .tx_frame_finish = pn533_acr122_tx_frame_finish, .tx_update_payload_len = pn533_acr122_tx_update_payload_len, .tx_header_len = PN533_ACR122_TX_FRAME_HEADER_LEN, .tx_tail_len = PN533_ACR122_TX_FRAME_TAIL_LEN, .rx_is_frame_valid = pn533_acr122_is_rx_frame_valid, .rx_header_len = PN533_ACR122_RX_FRAME_HEADER_LEN, .rx_tail_len = PN533_ACR122_RX_FRAME_TAIL_LEN, .rx_frame_size = pn533_acr122_rx_frame_size, .max_payload_len = PN533_ACR122_FRAME_MAX_PAYLOAD_LEN, .get_cmd_code = pn533_acr122_get_cmd_code, }; struct pn533_acr122_poweron_rdr_arg { int rc; struct completion done; }; static void pn533_acr122_poweron_rdr_resp(struct urb *urb) { struct pn533_acr122_poweron_rdr_arg *arg = urb->context; print_hex_dump_debug("ACR122 RX: ", DUMP_PREFIX_NONE, 16, 1, urb->transfer_buffer, urb->transfer_buffer_length, false); arg->rc = urb->status; complete(&arg->done); } static int pn533_acr122_poweron_rdr(struct pn533_usb_phy *phy) { /* Power on th reader (CCID cmd) */ u8 cmd[10] = {PN533_ACR122_PC_TO_RDR_ICCPOWERON, 0, 0, 0, 0, 0, 0, 3, 0, 0}; char *buffer; int transferred; int rc; void *cntx; struct pn533_acr122_poweron_rdr_arg arg; buffer = kmemdup(cmd, sizeof(cmd), GFP_KERNEL); if (!buffer) return -ENOMEM; init_completion(&arg.done); cntx = phy->in_urb->context; /* backup context */ phy->in_urb->complete = pn533_acr122_poweron_rdr_resp; phy->in_urb->context = &arg; print_hex_dump_debug("ACR122 TX: ", DUMP_PREFIX_NONE, 16, 1, cmd, sizeof(cmd), false); rc = usb_bulk_msg(phy->udev, phy->out_urb->pipe, buffer, sizeof(cmd), &transferred, 5000); kfree(buffer); if (rc || (transferred != sizeof(cmd))) { nfc_err(&phy->udev->dev, "Reader power on cmd error %d\n", rc); return rc; } rc = usb_submit_urb(phy->in_urb, GFP_KERNEL); if (rc) { nfc_err(&phy->udev->dev, "Can't submit reader poweron cmd response %d\n", rc); return rc; } wait_for_completion(&arg.done); phy->in_urb->context = cntx; /* restore context */ return arg.rc; } static void pn533_out_complete(struct urb *urb) { struct pn533_out_arg *arg = urb->context; struct pn533_usb_phy *phy = arg->phy; switch (urb->status) { case 0: break; /* success */ case -ECONNRESET: case -ENOENT: dev_dbg(&phy->udev->dev, "The urb has been stopped (status %d)\n", urb->status); break; case -ESHUTDOWN: default: nfc_err(&phy->udev->dev, "Urb failure (status %d)\n", urb->status); } complete(&arg->done); } static void pn533_ack_complete(struct urb *urb) { struct pn533_usb_phy *phy = urb->context; switch (urb->status) { case 0: break; /* success */ case -ECONNRESET: case -ENOENT: dev_dbg(&phy->udev->dev, "The urb has been stopped (status %d)\n", urb->status); break; case -ESHUTDOWN: default: nfc_err(&phy->udev->dev, "Urb failure (status %d)\n", urb->status); } } static const struct pn533_phy_ops usb_phy_ops = { .send_frame = pn533_usb_send_frame, .send_ack = pn533_usb_send_ack, .abort_cmd = pn533_usb_abort_cmd, }; static int pn533_usb_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct pn533 *priv; struct pn533_usb_phy *phy; struct usb_host_interface *iface_desc; struct usb_endpoint_descriptor *endpoint; int in_endpoint = 0; int out_endpoint = 0; int rc = -ENOMEM; int i; u32 protocols; enum pn533_protocol_type protocol_type = PN533_PROTO_REQ_ACK_RESP; struct pn533_frame_ops *fops = NULL; unsigned char *in_buf; int in_buf_len = PN533_EXT_FRAME_HEADER_LEN + PN533_STD_FRAME_MAX_PAYLOAD_LEN + PN533_STD_FRAME_TAIL_LEN; phy = devm_kzalloc(&interface->dev, sizeof(*phy), GFP_KERNEL); if (!phy) return -ENOMEM; in_buf = kzalloc(in_buf_len, GFP_KERNEL); if (!in_buf) return -ENOMEM; phy->udev = usb_get_dev(interface_to_usbdev(interface)); phy->interface = interface; iface_desc = interface->cur_altsetting; for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { endpoint = &iface_desc->endpoint[i].desc; if (!in_endpoint && usb_endpoint_is_bulk_in(endpoint)) in_endpoint = endpoint->bEndpointAddress; if (!out_endpoint && usb_endpoint_is_bulk_out(endpoint)) out_endpoint = endpoint->bEndpointAddress; } if (!in_endpoint || !out_endpoint) { nfc_err(&interface->dev, "Could not find bulk-in or bulk-out endpoint\n"); rc = -ENODEV; goto error; } phy->in_urb = usb_alloc_urb(0, GFP_KERNEL); phy->out_urb = usb_alloc_urb(0, GFP_KERNEL); phy->ack_urb = usb_alloc_urb(0, GFP_KERNEL); if (!phy->in_urb || !phy->out_urb || !phy->ack_urb) goto error; usb_fill_bulk_urb(phy->in_urb, phy->udev, usb_rcvbulkpipe(phy->udev, in_endpoint), in_buf, in_buf_len, NULL, phy); usb_fill_bulk_urb(phy->out_urb, phy->udev, usb_sndbulkpipe(phy->udev, out_endpoint), NULL, 0, pn533_out_complete, phy); usb_fill_bulk_urb(phy->ack_urb, phy->udev, usb_sndbulkpipe(phy->udev, out_endpoint), NULL, 0, pn533_ack_complete, phy); switch (id->driver_info) { case PN533_DEVICE_STD: protocols = PN533_ALL_PROTOCOLS; break; case PN533_DEVICE_PASORI: protocols = PN533_NO_TYPE_B_PROTOCOLS; break; case PN533_DEVICE_ACR122U: protocols = PN533_NO_TYPE_B_PROTOCOLS; fops = &pn533_acr122_frame_ops; protocol_type = PN533_PROTO_REQ_RESP; rc = pn533_acr122_poweron_rdr(phy); if (rc < 0) { nfc_err(&interface->dev, "Couldn't poweron the reader (error %d)\n", rc); goto error; } break; default: nfc_err(&interface->dev, "Unknown device type %lu\n", id->driver_info); rc = -EINVAL; goto error; } priv = pn53x_common_init(id->driver_info, protocol_type, phy, &usb_phy_ops, fops, &phy->udev->dev); if (IS_ERR(priv)) { rc = PTR_ERR(priv); goto error; } phy->priv = priv; rc = pn533_finalize_setup(priv); if (rc) goto err_clean; usb_set_intfdata(interface, phy); rc = pn53x_register_nfc(priv, protocols, &interface->dev); if (rc) goto err_clean; return 0; err_clean: pn53x_common_clean(priv); error: usb_kill_urb(phy->in_urb); usb_kill_urb(phy->out_urb); usb_kill_urb(phy->ack_urb); usb_free_urb(phy->in_urb); usb_free_urb(phy->out_urb); usb_free_urb(phy->ack_urb); usb_put_dev(phy->udev); kfree(in_buf); kfree(phy->ack_buffer); return rc; } static void pn533_usb_disconnect(struct usb_interface *interface) { struct pn533_usb_phy *phy = usb_get_intfdata(interface); if (!phy) return; pn53x_unregister_nfc(phy->priv); pn53x_common_clean(phy->priv); usb_set_intfdata(interface, NULL); usb_kill_urb(phy->in_urb); usb_kill_urb(phy->out_urb); usb_kill_urb(phy->ack_urb); kfree(phy->in_urb->transfer_buffer); usb_free_urb(phy->in_urb); usb_free_urb(phy->out_urb); usb_free_urb(phy->ack_urb); kfree(phy->ack_buffer); nfc_info(&interface->dev, "NXP PN533 NFC device disconnected\n"); } static struct usb_driver pn533_usb_driver = { .name = "pn533_usb", .probe = pn533_usb_probe, .disconnect = pn533_usb_disconnect, .id_table = pn533_usb_table, }; module_usb_driver(pn533_usb_driver); MODULE_AUTHOR("Lauro Ramos Venancio <[email protected]>"); MODULE_AUTHOR("Aloisio Almeida Jr <[email protected]>"); MODULE_AUTHOR("Waldemar Rymarkiewicz <[email protected]>"); MODULE_DESCRIPTION("PN533 USB driver ver " VERSION); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL");
linux-master
drivers/nfc/pn533/usb.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Driver for NXP PN533 NFC Chip - I2C transport layer * * Copyright (C) 2011 Instituto Nokia de Tecnologia * Copyright (C) 2012-2013 Tieto Poland * Copyright (C) 2016 HALE electronic */ #include <linux/device.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/nfc.h> #include <linux/netdevice.h> #include <linux/interrupt.h> #include <net/nfc/nfc.h> #include "pn533.h" #define VERSION "0.1" #define PN533_I2C_DRIVER_NAME "pn533_i2c" struct pn533_i2c_phy { struct i2c_client *i2c_dev; struct pn533 *priv; bool aborted; int hard_fault; /* * < 0 if hardware error occurred (e.g. i2c err) * and prevents normal operation. */ }; static int pn533_i2c_send_ack(struct pn533 *dev, gfp_t flags) { struct pn533_i2c_phy *phy = dev->phy; struct i2c_client *client = phy->i2c_dev; static const u8 ack[6] = {0x00, 0x00, 0xff, 0x00, 0xff, 0x00}; /* spec 6.2.1.3: Preamble, SoPC (2), ACK Code (2), Postamble */ return i2c_master_send(client, ack, 6); } static int pn533_i2c_send_frame(struct pn533 *dev, struct sk_buff *out) { struct pn533_i2c_phy *phy = dev->phy; struct i2c_client *client = phy->i2c_dev; int rc; if (phy->hard_fault != 0) return phy->hard_fault; if (phy->priv == NULL) phy->priv = dev; phy->aborted = false; print_hex_dump_debug("PN533_i2c TX: ", DUMP_PREFIX_NONE, 16, 1, out->data, out->len, false); rc = i2c_master_send(client, out->data, out->len); if (rc == -EREMOTEIO) { /* Retry, chip was in power down */ usleep_range(6000, 10000); rc = i2c_master_send(client, out->data, out->len); } if (rc >= 0) { if (rc != out->len) rc = -EREMOTEIO; else rc = 0; } return rc; } static void pn533_i2c_abort_cmd(struct pn533 *dev, gfp_t flags) { struct pn533_i2c_phy *phy = dev->phy; phy->aborted = true; /* An ack will cancel the last issued command */ pn533_i2c_send_ack(dev, flags); /* schedule cmd_complete_work to finish current command execution */ pn533_recv_frame(phy->priv, NULL, -ENOENT); } static int pn533_i2c_read(struct pn533_i2c_phy *phy, struct sk_buff **skb) { struct i2c_client *client = phy->i2c_dev; int len = PN533_EXT_FRAME_HEADER_LEN + PN533_STD_FRAME_MAX_PAYLOAD_LEN + PN533_STD_FRAME_TAIL_LEN + 1; int r; *skb = alloc_skb(len, GFP_KERNEL); if (*skb == NULL) return -ENOMEM; r = i2c_master_recv(client, skb_put(*skb, len), len); if (r != len) { nfc_err(&client->dev, "cannot read. r=%d len=%d\n", r, len); kfree_skb(*skb); return -EREMOTEIO; } if (!((*skb)->data[0] & 0x01)) { nfc_err(&client->dev, "READY flag not set"); kfree_skb(*skb); return -EBUSY; } /* remove READY byte */ skb_pull(*skb, 1); /* trim to frame size */ skb_trim(*skb, phy->priv->ops->rx_frame_size((*skb)->data)); return 0; } static irqreturn_t pn533_i2c_irq_thread_fn(int irq, void *data) { struct pn533_i2c_phy *phy = data; struct sk_buff *skb = NULL; int r; if (!phy || irq != phy->i2c_dev->irq) { WARN_ON_ONCE(1); return IRQ_NONE; } if (phy->hard_fault != 0) return IRQ_HANDLED; r = pn533_i2c_read(phy, &skb); if (r == -EREMOTEIO) { phy->hard_fault = r; pn533_recv_frame(phy->priv, NULL, -EREMOTEIO); return IRQ_HANDLED; } else if ((r == -ENOMEM) || (r == -EBADMSG) || (r == -EBUSY)) { return IRQ_HANDLED; } if (!phy->aborted) pn533_recv_frame(phy->priv, skb, 0); return IRQ_HANDLED; } static const struct pn533_phy_ops i2c_phy_ops = { .send_frame = pn533_i2c_send_frame, .send_ack = pn533_i2c_send_ack, .abort_cmd = pn533_i2c_abort_cmd, }; static int pn533_i2c_probe(struct i2c_client *client) { struct pn533_i2c_phy *phy; struct pn533 *priv; int r = 0; if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { nfc_err(&client->dev, "Need I2C_FUNC_I2C\n"); return -ENODEV; } phy = devm_kzalloc(&client->dev, sizeof(struct pn533_i2c_phy), GFP_KERNEL); if (!phy) return -ENOMEM; phy->i2c_dev = client; i2c_set_clientdata(client, phy); priv = pn53x_common_init(PN533_DEVICE_PN532, PN533_PROTO_REQ_ACK_RESP, phy, &i2c_phy_ops, NULL, &phy->i2c_dev->dev); if (IS_ERR(priv)) return PTR_ERR(priv); phy->priv = priv; r = pn532_i2c_nfc_alloc(priv, PN533_NO_TYPE_B_PROTOCOLS, &client->dev); if (r) goto nfc_alloc_err; r = request_threaded_irq(client->irq, NULL, pn533_i2c_irq_thread_fn, IRQF_TRIGGER_FALLING | IRQF_SHARED | IRQF_ONESHOT, PN533_I2C_DRIVER_NAME, phy); if (r < 0) { nfc_err(&client->dev, "Unable to register IRQ handler\n"); goto irq_rqst_err; } r = pn533_finalize_setup(priv); if (r) goto fn_setup_err; r = nfc_register_device(priv->nfc_dev); if (r) goto fn_setup_err; return r; fn_setup_err: free_irq(client->irq, phy); irq_rqst_err: nfc_free_device(priv->nfc_dev); nfc_alloc_err: pn53x_common_clean(phy->priv); return r; } static void pn533_i2c_remove(struct i2c_client *client) { struct pn533_i2c_phy *phy = i2c_get_clientdata(client); free_irq(client->irq, phy); pn53x_unregister_nfc(phy->priv); pn53x_common_clean(phy->priv); } static const struct of_device_id of_pn533_i2c_match[] __maybe_unused = { { .compatible = "nxp,pn532", }, /* * NOTE: The use of the compatibles with the trailing "...-i2c" is * deprecated and will be removed. */ { .compatible = "nxp,pn533-i2c", }, { .compatible = "nxp,pn532-i2c", }, {}, }; MODULE_DEVICE_TABLE(of, of_pn533_i2c_match); static const struct i2c_device_id pn533_i2c_id_table[] = { { PN533_I2C_DRIVER_NAME, 0 }, {} }; MODULE_DEVICE_TABLE(i2c, pn533_i2c_id_table); static struct i2c_driver pn533_i2c_driver = { .driver = { .name = PN533_I2C_DRIVER_NAME, .of_match_table = of_match_ptr(of_pn533_i2c_match), }, .probe = pn533_i2c_probe, .id_table = pn533_i2c_id_table, .remove = pn533_i2c_remove, }; module_i2c_driver(pn533_i2c_driver); MODULE_AUTHOR("Michael Thalmeier <[email protected]>"); MODULE_DESCRIPTION("PN533 I2C driver ver " VERSION); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL");
linux-master
drivers/nfc/pn533/i2c.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Driver for NXP PN533 NFC Chip - core functions * * Copyright (C) 2011 Instituto Nokia de Tecnologia * Copyright (C) 2012-2013 Tieto Poland */ #include <linux/device.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/nfc.h> #include <linux/netdevice.h> #include <net/nfc/nfc.h> #include "pn533.h" #define VERSION "0.3" /* How much time we spend listening for initiators */ #define PN533_LISTEN_TIME 2 /* Delay between each poll frame (ms) */ #define PN533_POLL_INTERVAL 10 /* structs for pn533 commands */ /* PN533_CMD_GET_FIRMWARE_VERSION */ struct pn533_fw_version { u8 ic; u8 ver; u8 rev; u8 support; }; /* PN533_CMD_RF_CONFIGURATION */ #define PN533_CFGITEM_RF_FIELD 0x01 #define PN533_CFGITEM_TIMING 0x02 #define PN533_CFGITEM_MAX_RETRIES 0x05 #define PN533_CFGITEM_PASORI 0x82 #define PN533_CFGITEM_RF_FIELD_AUTO_RFCA 0x2 #define PN533_CFGITEM_RF_FIELD_ON 0x1 #define PN533_CFGITEM_RF_FIELD_OFF 0x0 #define PN533_CONFIG_TIMING_102 0xb #define PN533_CONFIG_TIMING_204 0xc #define PN533_CONFIG_TIMING_409 0xd #define PN533_CONFIG_TIMING_819 0xe #define PN533_CONFIG_MAX_RETRIES_NO_RETRY 0x00 #define PN533_CONFIG_MAX_RETRIES_ENDLESS 0xFF struct pn533_config_max_retries { u8 mx_rty_atr; u8 mx_rty_psl; u8 mx_rty_passive_act; } __packed; struct pn533_config_timing { u8 rfu; u8 atr_res_timeout; u8 dep_timeout; } __packed; /* PN533_CMD_IN_LIST_PASSIVE_TARGET */ /* felica commands opcode */ #define PN533_FELICA_OPC_SENSF_REQ 0 #define PN533_FELICA_OPC_SENSF_RES 1 /* felica SENSF_REQ parameters */ #define PN533_FELICA_SENSF_SC_ALL 0xFFFF #define PN533_FELICA_SENSF_RC_NO_SYSTEM_CODE 0 #define PN533_FELICA_SENSF_RC_SYSTEM_CODE 1 #define PN533_FELICA_SENSF_RC_ADVANCED_PROTOCOL 2 /* type B initiator_data values */ #define PN533_TYPE_B_AFI_ALL_FAMILIES 0 #define PN533_TYPE_B_POLL_METHOD_TIMESLOT 0 #define PN533_TYPE_B_POLL_METHOD_PROBABILISTIC 1 union pn533_cmd_poll_initdata { struct { u8 afi; u8 polling_method; } __packed type_b; struct { u8 opcode; __be16 sc; u8 rc; u8 tsn; } __packed felica; }; struct pn533_poll_modulations { struct { u8 maxtg; u8 brty; union pn533_cmd_poll_initdata initiator_data; } __packed data; u8 len; }; static const struct pn533_poll_modulations poll_mod[] = { [PN533_POLL_MOD_106KBPS_A] = { .data = { .maxtg = 1, .brty = 0, }, .len = 2, }, [PN533_POLL_MOD_212KBPS_FELICA] = { .data = { .maxtg = 1, .brty = 1, .initiator_data.felica = { .opcode = PN533_FELICA_OPC_SENSF_REQ, .sc = PN533_FELICA_SENSF_SC_ALL, .rc = PN533_FELICA_SENSF_RC_SYSTEM_CODE, .tsn = 0x03, }, }, .len = 7, }, [PN533_POLL_MOD_424KBPS_FELICA] = { .data = { .maxtg = 1, .brty = 2, .initiator_data.felica = { .opcode = PN533_FELICA_OPC_SENSF_REQ, .sc = PN533_FELICA_SENSF_SC_ALL, .rc = PN533_FELICA_SENSF_RC_SYSTEM_CODE, .tsn = 0x03, }, }, .len = 7, }, [PN533_POLL_MOD_106KBPS_JEWEL] = { .data = { .maxtg = 1, .brty = 4, }, .len = 2, }, [PN533_POLL_MOD_847KBPS_B] = { .data = { .maxtg = 1, .brty = 8, .initiator_data.type_b = { .afi = PN533_TYPE_B_AFI_ALL_FAMILIES, .polling_method = PN533_TYPE_B_POLL_METHOD_TIMESLOT, }, }, .len = 3, }, [PN533_LISTEN_MOD] = { .len = 0, }, }; /* PN533_CMD_IN_ATR */ struct pn533_cmd_activate_response { u8 status; u8 nfcid3t[10]; u8 didt; u8 bst; u8 brt; u8 to; u8 ppt; /* optional */ u8 gt[]; } __packed; struct pn533_cmd_jump_dep_response { u8 status; u8 tg; u8 nfcid3t[10]; u8 didt; u8 bst; u8 brt; u8 to; u8 ppt; /* optional */ u8 gt[]; } __packed; struct pn532_autopoll_resp { u8 type; u8 ln; u8 tg; u8 tgdata[]; }; /* PN532_CMD_IN_AUTOPOLL */ #define PN532_AUTOPOLL_POLLNR_INFINITE 0xff #define PN532_AUTOPOLL_PERIOD 0x03 /* in units of 150 ms */ #define PN532_AUTOPOLL_TYPE_GENERIC_106 0x00 #define PN532_AUTOPOLL_TYPE_GENERIC_212 0x01 #define PN532_AUTOPOLL_TYPE_GENERIC_424 0x02 #define PN532_AUTOPOLL_TYPE_JEWEL 0x04 #define PN532_AUTOPOLL_TYPE_MIFARE 0x10 #define PN532_AUTOPOLL_TYPE_FELICA212 0x11 #define PN532_AUTOPOLL_TYPE_FELICA424 0x12 #define PN532_AUTOPOLL_TYPE_ISOA 0x20 #define PN532_AUTOPOLL_TYPE_ISOB 0x23 #define PN532_AUTOPOLL_TYPE_DEP_PASSIVE_106 0x40 #define PN532_AUTOPOLL_TYPE_DEP_PASSIVE_212 0x41 #define PN532_AUTOPOLL_TYPE_DEP_PASSIVE_424 0x42 #define PN532_AUTOPOLL_TYPE_DEP_ACTIVE_106 0x80 #define PN532_AUTOPOLL_TYPE_DEP_ACTIVE_212 0x81 #define PN532_AUTOPOLL_TYPE_DEP_ACTIVE_424 0x82 /* PN533_TG_INIT_AS_TARGET */ #define PN533_INIT_TARGET_PASSIVE 0x1 #define PN533_INIT_TARGET_DEP 0x2 #define PN533_INIT_TARGET_RESP_FRAME_MASK 0x3 #define PN533_INIT_TARGET_RESP_ACTIVE 0x1 #define PN533_INIT_TARGET_RESP_DEP 0x4 /* The rule: value(high byte) + value(low byte) + checksum = 0 */ static inline u8 pn533_ext_checksum(u16 value) { return ~(u8)(((value & 0xFF00) >> 8) + (u8)(value & 0xFF)) + 1; } /* The rule: value + checksum = 0 */ static inline u8 pn533_std_checksum(u8 value) { return ~value + 1; } /* The rule: sum(data elements) + checksum = 0 */ static u8 pn533_std_data_checksum(u8 *data, int datalen) { u8 sum = 0; int i; for (i = 0; i < datalen; i++) sum += data[i]; return pn533_std_checksum(sum); } static void pn533_std_tx_frame_init(void *_frame, u8 cmd_code) { struct pn533_std_frame *frame = _frame; frame->preamble = 0; frame->start_frame = cpu_to_be16(PN533_STD_FRAME_SOF); PN533_STD_FRAME_IDENTIFIER(frame) = PN533_STD_FRAME_DIR_OUT; PN533_FRAME_CMD(frame) = cmd_code; frame->datalen = 2; } static void pn533_std_tx_frame_finish(void *_frame) { struct pn533_std_frame *frame = _frame; frame->datalen_checksum = pn533_std_checksum(frame->datalen); PN533_STD_FRAME_CHECKSUM(frame) = pn533_std_data_checksum(frame->data, frame->datalen); PN533_STD_FRAME_POSTAMBLE(frame) = 0; } static void pn533_std_tx_update_payload_len(void *_frame, int len) { struct pn533_std_frame *frame = _frame; frame->datalen += len; } static bool pn533_std_rx_frame_is_valid(void *_frame, struct pn533 *dev) { u8 checksum; struct pn533_std_frame *stdf = _frame; if (stdf->start_frame != cpu_to_be16(PN533_STD_FRAME_SOF)) return false; if (likely(!PN533_STD_IS_EXTENDED(stdf))) { /* Standard frame code */ dev->ops->rx_header_len = PN533_STD_FRAME_HEADER_LEN; checksum = pn533_std_checksum(stdf->datalen); if (checksum != stdf->datalen_checksum) return false; checksum = pn533_std_data_checksum(stdf->data, stdf->datalen); if (checksum != PN533_STD_FRAME_CHECKSUM(stdf)) return false; } else { /* Extended */ struct pn533_ext_frame *eif = _frame; dev->ops->rx_header_len = PN533_EXT_FRAME_HEADER_LEN; checksum = pn533_ext_checksum(be16_to_cpu(eif->datalen)); if (checksum != eif->datalen_checksum) return false; /* check data checksum */ checksum = pn533_std_data_checksum(eif->data, be16_to_cpu(eif->datalen)); if (checksum != PN533_EXT_FRAME_CHECKSUM(eif)) return false; } return true; } bool pn533_rx_frame_is_ack(void *_frame) { struct pn533_std_frame *frame = _frame; if (frame->start_frame != cpu_to_be16(PN533_STD_FRAME_SOF)) return false; if (frame->datalen != 0 || frame->datalen_checksum != 0xFF) return false; return true; } EXPORT_SYMBOL_GPL(pn533_rx_frame_is_ack); static inline int pn533_std_rx_frame_size(void *frame) { struct pn533_std_frame *f = frame; /* check for Extended Information frame */ if (PN533_STD_IS_EXTENDED(f)) { struct pn533_ext_frame *eif = frame; return sizeof(struct pn533_ext_frame) + be16_to_cpu(eif->datalen) + PN533_STD_FRAME_TAIL_LEN; } return sizeof(struct pn533_std_frame) + f->datalen + PN533_STD_FRAME_TAIL_LEN; } static u8 pn533_std_get_cmd_code(void *frame) { struct pn533_std_frame *f = frame; struct pn533_ext_frame *eif = frame; if (PN533_STD_IS_EXTENDED(f)) return PN533_FRAME_CMD(eif); else return PN533_FRAME_CMD(f); } bool pn533_rx_frame_is_cmd_response(struct pn533 *dev, void *frame) { return (dev->ops->get_cmd_code(frame) == PN533_CMD_RESPONSE(dev->cmd->code)); } EXPORT_SYMBOL_GPL(pn533_rx_frame_is_cmd_response); static struct pn533_frame_ops pn533_std_frame_ops = { .tx_frame_init = pn533_std_tx_frame_init, .tx_frame_finish = pn533_std_tx_frame_finish, .tx_update_payload_len = pn533_std_tx_update_payload_len, .tx_header_len = PN533_STD_FRAME_HEADER_LEN, .tx_tail_len = PN533_STD_FRAME_TAIL_LEN, .rx_is_frame_valid = pn533_std_rx_frame_is_valid, .rx_frame_size = pn533_std_rx_frame_size, .rx_header_len = PN533_STD_FRAME_HEADER_LEN, .rx_tail_len = PN533_STD_FRAME_TAIL_LEN, .max_payload_len = PN533_STD_FRAME_MAX_PAYLOAD_LEN, .get_cmd_code = pn533_std_get_cmd_code, }; static void pn533_build_cmd_frame(struct pn533 *dev, u8 cmd_code, struct sk_buff *skb) { /* payload is already there, just update datalen */ int payload_len = skb->len; struct pn533_frame_ops *ops = dev->ops; skb_push(skb, ops->tx_header_len); skb_put(skb, ops->tx_tail_len); ops->tx_frame_init(skb->data, cmd_code); ops->tx_update_payload_len(skb->data, payload_len); ops->tx_frame_finish(skb->data); } static int pn533_send_async_complete(struct pn533 *dev) { struct pn533_cmd *cmd = dev->cmd; struct sk_buff *resp; int status, rc = 0; if (!cmd) { dev_dbg(dev->dev, "%s: cmd not set\n", __func__); goto done; } dev_kfree_skb(cmd->req); status = cmd->status; resp = cmd->resp; if (status < 0) { rc = cmd->complete_cb(dev, cmd->complete_cb_context, ERR_PTR(status)); dev_kfree_skb(resp); goto done; } /* when no response is set we got interrupted */ if (!resp) resp = ERR_PTR(-EINTR); if (!IS_ERR(resp)) { skb_pull(resp, dev->ops->rx_header_len); skb_trim(resp, resp->len - dev->ops->rx_tail_len); } rc = cmd->complete_cb(dev, cmd->complete_cb_context, resp); done: kfree(cmd); dev->cmd = NULL; return rc; } static int __pn533_send_async(struct pn533 *dev, u8 cmd_code, struct sk_buff *req, pn533_send_async_complete_t complete_cb, void *complete_cb_context) { struct pn533_cmd *cmd; int rc = 0; dev_dbg(dev->dev, "Sending command 0x%x\n", cmd_code); cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (!cmd) return -ENOMEM; cmd->code = cmd_code; cmd->req = req; cmd->complete_cb = complete_cb; cmd->complete_cb_context = complete_cb_context; pn533_build_cmd_frame(dev, cmd_code, req); mutex_lock(&dev->cmd_lock); if (!dev->cmd_pending) { dev->cmd = cmd; rc = dev->phy_ops->send_frame(dev, req); if (rc) { dev->cmd = NULL; goto error; } dev->cmd_pending = 1; goto unlock; } dev_dbg(dev->dev, "%s Queueing command 0x%x\n", __func__, cmd_code); INIT_LIST_HEAD(&cmd->queue); list_add_tail(&cmd->queue, &dev->cmd_queue); goto unlock; error: kfree(cmd); unlock: mutex_unlock(&dev->cmd_lock); return rc; } static int pn533_send_data_async(struct pn533 *dev, u8 cmd_code, struct sk_buff *req, pn533_send_async_complete_t complete_cb, void *complete_cb_context) { return __pn533_send_async(dev, cmd_code, req, complete_cb, complete_cb_context); } static int pn533_send_cmd_async(struct pn533 *dev, u8 cmd_code, struct sk_buff *req, pn533_send_async_complete_t complete_cb, void *complete_cb_context) { return __pn533_send_async(dev, cmd_code, req, complete_cb, complete_cb_context); } /* * pn533_send_cmd_direct_async * * The function sends a priority cmd directly to the chip omitting the cmd * queue. It's intended to be used by chaining mechanism of received responses * where the host has to request every single chunk of data before scheduling * next cmd from the queue. */ static int pn533_send_cmd_direct_async(struct pn533 *dev, u8 cmd_code, struct sk_buff *req, pn533_send_async_complete_t complete_cb, void *complete_cb_context) { struct pn533_cmd *cmd; int rc; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (!cmd) return -ENOMEM; cmd->code = cmd_code; cmd->req = req; cmd->complete_cb = complete_cb; cmd->complete_cb_context = complete_cb_context; pn533_build_cmd_frame(dev, cmd_code, req); dev->cmd = cmd; rc = dev->phy_ops->send_frame(dev, req); if (rc < 0) { dev->cmd = NULL; kfree(cmd); } return rc; } static void pn533_wq_cmd_complete(struct work_struct *work) { struct pn533 *dev = container_of(work, struct pn533, cmd_complete_work); int rc; rc = pn533_send_async_complete(dev); if (rc != -EINPROGRESS) queue_work(dev->wq, &dev->cmd_work); } static void pn533_wq_cmd(struct work_struct *work) { struct pn533 *dev = container_of(work, struct pn533, cmd_work); struct pn533_cmd *cmd; int rc; mutex_lock(&dev->cmd_lock); if (list_empty(&dev->cmd_queue)) { dev->cmd_pending = 0; mutex_unlock(&dev->cmd_lock); return; } cmd = list_first_entry(&dev->cmd_queue, struct pn533_cmd, queue); list_del(&cmd->queue); mutex_unlock(&dev->cmd_lock); dev->cmd = cmd; rc = dev->phy_ops->send_frame(dev, cmd->req); if (rc < 0) { dev->cmd = NULL; dev_kfree_skb(cmd->req); kfree(cmd); return; } } struct pn533_sync_cmd_response { struct sk_buff *resp; struct completion done; }; static int pn533_send_sync_complete(struct pn533 *dev, void *_arg, struct sk_buff *resp) { struct pn533_sync_cmd_response *arg = _arg; arg->resp = resp; complete(&arg->done); return 0; } /* pn533_send_cmd_sync * * Please note the req parameter is freed inside the function to * limit a number of return value interpretations by the caller. * * 1. negative in case of error during TX path -> req should be freed * * 2. negative in case of error during RX path -> req should not be freed * as it's been already freed at the beginning of RX path by * async_complete_cb. * * 3. valid pointer in case of successful RX path * * A caller has to check a return value with IS_ERR macro. If the test pass, * the returned pointer is valid. * */ static struct sk_buff *pn533_send_cmd_sync(struct pn533 *dev, u8 cmd_code, struct sk_buff *req) { int rc; struct pn533_sync_cmd_response arg; init_completion(&arg.done); rc = pn533_send_cmd_async(dev, cmd_code, req, pn533_send_sync_complete, &arg); if (rc) { dev_kfree_skb(req); return ERR_PTR(rc); } wait_for_completion(&arg.done); return arg.resp; } static struct sk_buff *pn533_alloc_skb(struct pn533 *dev, unsigned int size) { struct sk_buff *skb; skb = alloc_skb(dev->ops->tx_header_len + size + dev->ops->tx_tail_len, GFP_KERNEL); if (skb) skb_reserve(skb, dev->ops->tx_header_len); return skb; } struct pn533_target_type_a { __be16 sens_res; u8 sel_res; u8 nfcid_len; u8 nfcid_data[]; } __packed; #define PN533_TYPE_A_SENS_RES_NFCID1(x) ((u8)((be16_to_cpu(x) & 0x00C0) >> 6)) #define PN533_TYPE_A_SENS_RES_SSD(x) ((u8)((be16_to_cpu(x) & 0x001F) >> 0)) #define PN533_TYPE_A_SENS_RES_PLATCONF(x) ((u8)((be16_to_cpu(x) & 0x0F00) >> 8)) #define PN533_TYPE_A_SENS_RES_SSD_JEWEL 0x00 #define PN533_TYPE_A_SENS_RES_PLATCONF_JEWEL 0x0C #define PN533_TYPE_A_SEL_PROT(x) (((x) & 0x60) >> 5) #define PN533_TYPE_A_SEL_CASCADE(x) (((x) & 0x04) >> 2) #define PN533_TYPE_A_SEL_PROT_MIFARE 0 #define PN533_TYPE_A_SEL_PROT_ISO14443 1 #define PN533_TYPE_A_SEL_PROT_DEP 2 #define PN533_TYPE_A_SEL_PROT_ISO14443_DEP 3 static bool pn533_target_type_a_is_valid(struct pn533_target_type_a *type_a, int target_data_len) { u8 ssd; u8 platconf; if (target_data_len < sizeof(struct pn533_target_type_a)) return false; /* * The length check of nfcid[] and ats[] are not being performed because * the values are not being used */ /* Requirement 4.6.3.3 from NFC Forum Digital Spec */ ssd = PN533_TYPE_A_SENS_RES_SSD(type_a->sens_res); platconf = PN533_TYPE_A_SENS_RES_PLATCONF(type_a->sens_res); if ((ssd == PN533_TYPE_A_SENS_RES_SSD_JEWEL && platconf != PN533_TYPE_A_SENS_RES_PLATCONF_JEWEL) || (ssd != PN533_TYPE_A_SENS_RES_SSD_JEWEL && platconf == PN533_TYPE_A_SENS_RES_PLATCONF_JEWEL)) return false; /* Requirements 4.8.2.1, 4.8.2.3, 4.8.2.5 and 4.8.2.7 from NFC Forum */ if (PN533_TYPE_A_SEL_CASCADE(type_a->sel_res) != 0) return false; if (type_a->nfcid_len > NFC_NFCID1_MAXSIZE) return false; return true; } static int pn533_target_found_type_a(struct nfc_target *nfc_tgt, u8 *tgt_data, int tgt_data_len) { struct pn533_target_type_a *tgt_type_a; tgt_type_a = (struct pn533_target_type_a *)tgt_data; if (!pn533_target_type_a_is_valid(tgt_type_a, tgt_data_len)) return -EPROTO; switch (PN533_TYPE_A_SEL_PROT(tgt_type_a->sel_res)) { case PN533_TYPE_A_SEL_PROT_MIFARE: nfc_tgt->supported_protocols = NFC_PROTO_MIFARE_MASK; break; case PN533_TYPE_A_SEL_PROT_ISO14443: nfc_tgt->supported_protocols = NFC_PROTO_ISO14443_MASK; break; case PN533_TYPE_A_SEL_PROT_DEP: nfc_tgt->supported_protocols = NFC_PROTO_NFC_DEP_MASK; break; case PN533_TYPE_A_SEL_PROT_ISO14443_DEP: nfc_tgt->supported_protocols = NFC_PROTO_ISO14443_MASK | NFC_PROTO_NFC_DEP_MASK; break; } nfc_tgt->sens_res = be16_to_cpu(tgt_type_a->sens_res); nfc_tgt->sel_res = tgt_type_a->sel_res; nfc_tgt->nfcid1_len = tgt_type_a->nfcid_len; memcpy(nfc_tgt->nfcid1, tgt_type_a->nfcid_data, nfc_tgt->nfcid1_len); return 0; } struct pn533_target_felica { u8 pol_res; u8 opcode; u8 nfcid2[NFC_NFCID2_MAXSIZE]; u8 pad[8]; /* optional */ u8 syst_code[]; } __packed; #define PN533_FELICA_SENSF_NFCID2_DEP_B1 0x01 #define PN533_FELICA_SENSF_NFCID2_DEP_B2 0xFE static bool pn533_target_felica_is_valid(struct pn533_target_felica *felica, int target_data_len) { if (target_data_len < sizeof(struct pn533_target_felica)) return false; if (felica->opcode != PN533_FELICA_OPC_SENSF_RES) return false; return true; } static int pn533_target_found_felica(struct nfc_target *nfc_tgt, u8 *tgt_data, int tgt_data_len) { struct pn533_target_felica *tgt_felica; tgt_felica = (struct pn533_target_felica *)tgt_data; if (!pn533_target_felica_is_valid(tgt_felica, tgt_data_len)) return -EPROTO; if ((tgt_felica->nfcid2[0] == PN533_FELICA_SENSF_NFCID2_DEP_B1) && (tgt_felica->nfcid2[1] == PN533_FELICA_SENSF_NFCID2_DEP_B2)) nfc_tgt->supported_protocols = NFC_PROTO_NFC_DEP_MASK; else nfc_tgt->supported_protocols = NFC_PROTO_FELICA_MASK; memcpy(nfc_tgt->sensf_res, &tgt_felica->opcode, 9); nfc_tgt->sensf_res_len = 9; memcpy(nfc_tgt->nfcid2, tgt_felica->nfcid2, NFC_NFCID2_MAXSIZE); nfc_tgt->nfcid2_len = NFC_NFCID2_MAXSIZE; return 0; } struct pn533_target_jewel { __be16 sens_res; u8 jewelid[4]; } __packed; static bool pn533_target_jewel_is_valid(struct pn533_target_jewel *jewel, int target_data_len) { u8 ssd; u8 platconf; if (target_data_len < sizeof(struct pn533_target_jewel)) return false; /* Requirement 4.6.3.3 from NFC Forum Digital Spec */ ssd = PN533_TYPE_A_SENS_RES_SSD(jewel->sens_res); platconf = PN533_TYPE_A_SENS_RES_PLATCONF(jewel->sens_res); if ((ssd == PN533_TYPE_A_SENS_RES_SSD_JEWEL && platconf != PN533_TYPE_A_SENS_RES_PLATCONF_JEWEL) || (ssd != PN533_TYPE_A_SENS_RES_SSD_JEWEL && platconf == PN533_TYPE_A_SENS_RES_PLATCONF_JEWEL)) return false; return true; } static int pn533_target_found_jewel(struct nfc_target *nfc_tgt, u8 *tgt_data, int tgt_data_len) { struct pn533_target_jewel *tgt_jewel; tgt_jewel = (struct pn533_target_jewel *)tgt_data; if (!pn533_target_jewel_is_valid(tgt_jewel, tgt_data_len)) return -EPROTO; nfc_tgt->supported_protocols = NFC_PROTO_JEWEL_MASK; nfc_tgt->sens_res = be16_to_cpu(tgt_jewel->sens_res); nfc_tgt->nfcid1_len = 4; memcpy(nfc_tgt->nfcid1, tgt_jewel->jewelid, nfc_tgt->nfcid1_len); return 0; } struct pn533_type_b_prot_info { u8 bitrate; u8 fsci_type; u8 fwi_adc_fo; } __packed; #define PN533_TYPE_B_PROT_FCSI(x) (((x) & 0xF0) >> 4) #define PN533_TYPE_B_PROT_TYPE(x) (((x) & 0x0F) >> 0) #define PN533_TYPE_B_PROT_TYPE_RFU_MASK 0x8 struct pn533_type_b_sens_res { u8 opcode; u8 nfcid[4]; u8 appdata[4]; struct pn533_type_b_prot_info prot_info; } __packed; #define PN533_TYPE_B_OPC_SENSB_RES 0x50 struct pn533_target_type_b { struct pn533_type_b_sens_res sensb_res; u8 attrib_res_len; u8 attrib_res[]; } __packed; static bool pn533_target_type_b_is_valid(struct pn533_target_type_b *type_b, int target_data_len) { if (target_data_len < sizeof(struct pn533_target_type_b)) return false; if (type_b->sensb_res.opcode != PN533_TYPE_B_OPC_SENSB_RES) return false; if (PN533_TYPE_B_PROT_TYPE(type_b->sensb_res.prot_info.fsci_type) & PN533_TYPE_B_PROT_TYPE_RFU_MASK) return false; return true; } static int pn533_target_found_type_b(struct nfc_target *nfc_tgt, u8 *tgt_data, int tgt_data_len) { struct pn533_target_type_b *tgt_type_b; tgt_type_b = (struct pn533_target_type_b *)tgt_data; if (!pn533_target_type_b_is_valid(tgt_type_b, tgt_data_len)) return -EPROTO; nfc_tgt->supported_protocols = NFC_PROTO_ISO14443_B_MASK; return 0; } static void pn533_poll_reset_mod_list(struct pn533 *dev); static int pn533_target_found(struct pn533 *dev, u8 tg, u8 *tgdata, int tgdata_len) { struct nfc_target nfc_tgt; int rc; dev_dbg(dev->dev, "%s: modulation=%d\n", __func__, dev->poll_mod_curr); if (tg != 1) return -EPROTO; memset(&nfc_tgt, 0, sizeof(struct nfc_target)); switch (dev->poll_mod_curr) { case PN533_POLL_MOD_106KBPS_A: rc = pn533_target_found_type_a(&nfc_tgt, tgdata, tgdata_len); break; case PN533_POLL_MOD_212KBPS_FELICA: case PN533_POLL_MOD_424KBPS_FELICA: rc = pn533_target_found_felica(&nfc_tgt, tgdata, tgdata_len); break; case PN533_POLL_MOD_106KBPS_JEWEL: rc = pn533_target_found_jewel(&nfc_tgt, tgdata, tgdata_len); break; case PN533_POLL_MOD_847KBPS_B: rc = pn533_target_found_type_b(&nfc_tgt, tgdata, tgdata_len); break; default: nfc_err(dev->dev, "Unknown current poll modulation\n"); return -EPROTO; } if (rc) return rc; if (!(nfc_tgt.supported_protocols & dev->poll_protocols)) { dev_dbg(dev->dev, "The Tg found doesn't have the desired protocol\n"); return -EAGAIN; } dev_dbg(dev->dev, "Target found - supported protocols: 0x%x\n", nfc_tgt.supported_protocols); dev->tgt_available_prots = nfc_tgt.supported_protocols; pn533_poll_reset_mod_list(dev); nfc_targets_found(dev->nfc_dev, &nfc_tgt, 1); return 0; } static inline void pn533_poll_next_mod(struct pn533 *dev) { dev->poll_mod_curr = (dev->poll_mod_curr + 1) % dev->poll_mod_count; } static void pn533_poll_reset_mod_list(struct pn533 *dev) { dev->poll_mod_count = 0; } static void pn533_poll_add_mod(struct pn533 *dev, u8 mod_index) { dev->poll_mod_active[dev->poll_mod_count] = (struct pn533_poll_modulations *)&poll_mod[mod_index]; dev->poll_mod_count++; } static void pn533_poll_create_mod_list(struct pn533 *dev, u32 im_protocols, u32 tm_protocols) { pn533_poll_reset_mod_list(dev); if ((im_protocols & NFC_PROTO_MIFARE_MASK) || (im_protocols & NFC_PROTO_ISO14443_MASK) || (im_protocols & NFC_PROTO_NFC_DEP_MASK)) pn533_poll_add_mod(dev, PN533_POLL_MOD_106KBPS_A); if (im_protocols & NFC_PROTO_FELICA_MASK || im_protocols & NFC_PROTO_NFC_DEP_MASK) { pn533_poll_add_mod(dev, PN533_POLL_MOD_212KBPS_FELICA); pn533_poll_add_mod(dev, PN533_POLL_MOD_424KBPS_FELICA); } if (im_protocols & NFC_PROTO_JEWEL_MASK) pn533_poll_add_mod(dev, PN533_POLL_MOD_106KBPS_JEWEL); if (im_protocols & NFC_PROTO_ISO14443_B_MASK) pn533_poll_add_mod(dev, PN533_POLL_MOD_847KBPS_B); if (tm_protocols) pn533_poll_add_mod(dev, PN533_LISTEN_MOD); } static int pn533_start_poll_complete(struct pn533 *dev, struct sk_buff *resp) { u8 nbtg, tg, *tgdata; int rc, tgdata_len; /* Toggle the DEP polling */ if (dev->poll_protocols & NFC_PROTO_NFC_DEP_MASK) dev->poll_dep = 1; nbtg = resp->data[0]; tg = resp->data[1]; tgdata = &resp->data[2]; tgdata_len = resp->len - 2; /* nbtg + tg */ if (nbtg) { rc = pn533_target_found(dev, tg, tgdata, tgdata_len); /* We must stop the poll after a valid target found */ if (rc == 0) return 0; } return -EAGAIN; } static struct sk_buff *pn533_alloc_poll_tg_frame(struct pn533 *dev) { struct sk_buff *skb; u8 *felica, *nfcid3; u8 *gbytes = dev->gb; size_t gbytes_len = dev->gb_len; u8 felica_params[18] = {0x1, 0xfe, /* DEP */ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, /* random */ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0xff, 0xff}; /* System code */ u8 mifare_params[6] = {0x1, 0x1, /* SENS_RES */ 0x0, 0x0, 0x0, 0x40}; /* SEL_RES for DEP */ unsigned int skb_len = 36 + /* * mode (1), mifare (6), * felica (18), nfcid3 (10), gb_len (1) */ gbytes_len + 1; /* len Tk*/ skb = pn533_alloc_skb(dev, skb_len); if (!skb) return NULL; /* DEP support only */ skb_put_u8(skb, PN533_INIT_TARGET_DEP); /* MIFARE params */ skb_put_data(skb, mifare_params, 6); /* Felica params */ felica = skb_put_data(skb, felica_params, 18); get_random_bytes(felica + 2, 6); /* NFCID3 */ nfcid3 = skb_put_zero(skb, 10); memcpy(nfcid3, felica, 8); /* General bytes */ skb_put_u8(skb, gbytes_len); skb_put_data(skb, gbytes, gbytes_len); /* Len Tk */ skb_put_u8(skb, 0); return skb; } static void pn533_wq_tm_mi_recv(struct work_struct *work); static struct sk_buff *pn533_build_response(struct pn533 *dev); static int pn533_tm_get_data_complete(struct pn533 *dev, void *arg, struct sk_buff *resp) { struct sk_buff *skb; u8 status, ret, mi; int rc; if (IS_ERR(resp)) { skb_queue_purge(&dev->resp_q); return PTR_ERR(resp); } status = resp->data[0]; ret = status & PN533_CMD_RET_MASK; mi = status & PN533_CMD_MI_MASK; skb_pull(resp, sizeof(status)); if (ret != PN533_CMD_RET_SUCCESS) { rc = -EIO; goto error; } skb_queue_tail(&dev->resp_q, resp); if (mi) { queue_work(dev->wq, &dev->mi_tm_rx_work); return -EINPROGRESS; } skb = pn533_build_response(dev); if (!skb) { rc = -EIO; goto error; } return nfc_tm_data_received(dev->nfc_dev, skb); error: nfc_tm_deactivated(dev->nfc_dev); dev->tgt_mode = 0; skb_queue_purge(&dev->resp_q); dev_kfree_skb(resp); return rc; } static void pn533_wq_tm_mi_recv(struct work_struct *work) { struct pn533 *dev = container_of(work, struct pn533, mi_tm_rx_work); struct sk_buff *skb; int rc; skb = pn533_alloc_skb(dev, 0); if (!skb) return; rc = pn533_send_cmd_direct_async(dev, PN533_CMD_TG_GET_DATA, skb, pn533_tm_get_data_complete, NULL); if (rc < 0) dev_kfree_skb(skb); } static int pn533_tm_send_complete(struct pn533 *dev, void *arg, struct sk_buff *resp); static void pn533_wq_tm_mi_send(struct work_struct *work) { struct pn533 *dev = container_of(work, struct pn533, mi_tm_tx_work); struct sk_buff *skb; int rc; /* Grab the first skb in the queue */ skb = skb_dequeue(&dev->fragment_skb); if (skb == NULL) { /* No more data */ /* Reset the queue for future use */ skb_queue_head_init(&dev->fragment_skb); goto error; } /* last entry - remove MI bit */ if (skb_queue_len(&dev->fragment_skb) == 0) { rc = pn533_send_cmd_direct_async(dev, PN533_CMD_TG_SET_DATA, skb, pn533_tm_send_complete, NULL); } else rc = pn533_send_cmd_direct_async(dev, PN533_CMD_TG_SET_META_DATA, skb, pn533_tm_send_complete, NULL); if (rc == 0) /* success */ return; dev_err(dev->dev, "Error %d when trying to perform set meta data_exchange", rc); dev_kfree_skb(skb); error: dev->phy_ops->send_ack(dev, GFP_KERNEL); queue_work(dev->wq, &dev->cmd_work); } static void pn533_wq_tg_get_data(struct work_struct *work) { struct pn533 *dev = container_of(work, struct pn533, tg_work); struct sk_buff *skb; int rc; skb = pn533_alloc_skb(dev, 0); if (!skb) return; rc = pn533_send_data_async(dev, PN533_CMD_TG_GET_DATA, skb, pn533_tm_get_data_complete, NULL); if (rc < 0) dev_kfree_skb(skb); } #define ATR_REQ_GB_OFFSET 17 static int pn533_init_target_complete(struct pn533 *dev, struct sk_buff *resp) { u8 mode, *cmd, comm_mode = NFC_COMM_PASSIVE, *gb; size_t gb_len; int rc; if (resp->len < ATR_REQ_GB_OFFSET + 1) return -EINVAL; mode = resp->data[0]; cmd = &resp->data[1]; dev_dbg(dev->dev, "Target mode 0x%x len %d\n", mode, resp->len); if ((mode & PN533_INIT_TARGET_RESP_FRAME_MASK) == PN533_INIT_TARGET_RESP_ACTIVE) comm_mode = NFC_COMM_ACTIVE; if ((mode & PN533_INIT_TARGET_RESP_DEP) == 0) /* Only DEP supported */ return -EOPNOTSUPP; gb = cmd + ATR_REQ_GB_OFFSET; gb_len = resp->len - (ATR_REQ_GB_OFFSET + 1); rc = nfc_tm_activated(dev->nfc_dev, NFC_PROTO_NFC_DEP_MASK, comm_mode, gb, gb_len); if (rc < 0) { nfc_err(dev->dev, "Error when signaling target activation\n"); return rc; } dev->tgt_mode = 1; queue_work(dev->wq, &dev->tg_work); return 0; } static void pn533_listen_mode_timer(struct timer_list *t) { struct pn533 *dev = from_timer(dev, t, listen_timer); dev->cancel_listen = 1; pn533_poll_next_mod(dev); queue_delayed_work(dev->wq, &dev->poll_work, msecs_to_jiffies(PN533_POLL_INTERVAL)); } static int pn533_rf_complete(struct pn533 *dev, void *arg, struct sk_buff *resp) { int rc = 0; if (IS_ERR(resp)) { rc = PTR_ERR(resp); nfc_err(dev->dev, "RF setting error %d\n", rc); return rc; } queue_delayed_work(dev->wq, &dev->poll_work, msecs_to_jiffies(PN533_POLL_INTERVAL)); dev_kfree_skb(resp); return rc; } static void pn533_wq_rf(struct work_struct *work) { struct pn533 *dev = container_of(work, struct pn533, rf_work); struct sk_buff *skb; int rc; skb = pn533_alloc_skb(dev, 2); if (!skb) return; skb_put_u8(skb, PN533_CFGITEM_RF_FIELD); skb_put_u8(skb, PN533_CFGITEM_RF_FIELD_AUTO_RFCA); rc = pn533_send_cmd_async(dev, PN533_CMD_RF_CONFIGURATION, skb, pn533_rf_complete, NULL); if (rc < 0) { dev_kfree_skb(skb); nfc_err(dev->dev, "RF setting error %d\n", rc); } } static int pn533_poll_dep_complete(struct pn533 *dev, void *arg, struct sk_buff *resp) { struct pn533_cmd_jump_dep_response *rsp; struct nfc_target nfc_target; u8 target_gt_len; int rc; if (IS_ERR(resp)) return PTR_ERR(resp); memset(&nfc_target, 0, sizeof(struct nfc_target)); rsp = (struct pn533_cmd_jump_dep_response *)resp->data; rc = rsp->status & PN533_CMD_RET_MASK; if (rc != PN533_CMD_RET_SUCCESS) { /* Not target found, turn radio off */ queue_work(dev->wq, &dev->rf_work); dev_kfree_skb(resp); return 0; } dev_dbg(dev->dev, "Creating new target"); nfc_target.supported_protocols = NFC_PROTO_NFC_DEP_MASK; nfc_target.nfcid1_len = 10; memcpy(nfc_target.nfcid1, rsp->nfcid3t, nfc_target.nfcid1_len); rc = nfc_targets_found(dev->nfc_dev, &nfc_target, 1); if (rc) goto error; dev->tgt_available_prots = 0; dev->tgt_active_prot = NFC_PROTO_NFC_DEP; /* ATR_RES general bytes are located at offset 17 */ target_gt_len = resp->len - 17; rc = nfc_set_remote_general_bytes(dev->nfc_dev, rsp->gt, target_gt_len); if (!rc) { rc = nfc_dep_link_is_up(dev->nfc_dev, dev->nfc_dev->targets[0].idx, 0, NFC_RF_INITIATOR); if (!rc) pn533_poll_reset_mod_list(dev); } error: dev_kfree_skb(resp); return rc; } #define PASSIVE_DATA_LEN 5 static int pn533_poll_dep(struct nfc_dev *nfc_dev) { struct pn533 *dev = nfc_get_drvdata(nfc_dev); struct sk_buff *skb; int rc, skb_len; u8 *next, nfcid3[NFC_NFCID3_MAXSIZE]; u8 passive_data[PASSIVE_DATA_LEN] = {0x00, 0xff, 0xff, 0x00, 0x3}; if (!dev->gb) { dev->gb = nfc_get_local_general_bytes(nfc_dev, &dev->gb_len); if (!dev->gb || !dev->gb_len) { dev->poll_dep = 0; queue_work(dev->wq, &dev->rf_work); } } skb_len = 3 + dev->gb_len; /* ActPass + BR + Next */ skb_len += PASSIVE_DATA_LEN; /* NFCID3 */ skb_len += NFC_NFCID3_MAXSIZE; nfcid3[0] = 0x1; nfcid3[1] = 0xfe; get_random_bytes(nfcid3 + 2, 6); skb = pn533_alloc_skb(dev, skb_len); if (!skb) return -ENOMEM; skb_put_u8(skb, 0x01); /* Active */ skb_put_u8(skb, 0x02); /* 424 kbps */ next = skb_put(skb, 1); /* Next */ *next = 0; /* Copy passive data */ skb_put_data(skb, passive_data, PASSIVE_DATA_LEN); *next |= 1; /* Copy NFCID3 (which is NFCID2 from SENSF_RES) */ skb_put_data(skb, nfcid3, NFC_NFCID3_MAXSIZE); *next |= 2; skb_put_data(skb, dev->gb, dev->gb_len); *next |= 4; /* We have some Gi */ rc = pn533_send_cmd_async(dev, PN533_CMD_IN_JUMP_FOR_DEP, skb, pn533_poll_dep_complete, NULL); if (rc < 0) dev_kfree_skb(skb); return rc; } static int pn533_autopoll_complete(struct pn533 *dev, void *arg, struct sk_buff *resp) { struct pn532_autopoll_resp *apr; struct nfc_target nfc_tgt; u8 nbtg; int rc; if (IS_ERR(resp)) { rc = PTR_ERR(resp); nfc_err(dev->dev, "%s autopoll complete error %d\n", __func__, rc); if (rc == -ENOENT) { if (dev->poll_mod_count != 0) return rc; goto stop_poll; } else if (rc < 0) { nfc_err(dev->dev, "Error %d when running autopoll\n", rc); goto stop_poll; } } nbtg = resp->data[0]; if ((nbtg > 2) || (nbtg <= 0)) return -EAGAIN; apr = (struct pn532_autopoll_resp *)&resp->data[1]; while (nbtg--) { memset(&nfc_tgt, 0, sizeof(struct nfc_target)); switch (apr->type) { case PN532_AUTOPOLL_TYPE_ISOA: dev_dbg(dev->dev, "ISOA\n"); rc = pn533_target_found_type_a(&nfc_tgt, apr->tgdata, apr->ln - 1); break; case PN532_AUTOPOLL_TYPE_FELICA212: case PN532_AUTOPOLL_TYPE_FELICA424: dev_dbg(dev->dev, "FELICA\n"); rc = pn533_target_found_felica(&nfc_tgt, apr->tgdata, apr->ln - 1); break; case PN532_AUTOPOLL_TYPE_JEWEL: dev_dbg(dev->dev, "JEWEL\n"); rc = pn533_target_found_jewel(&nfc_tgt, apr->tgdata, apr->ln - 1); break; case PN532_AUTOPOLL_TYPE_ISOB: dev_dbg(dev->dev, "ISOB\n"); rc = pn533_target_found_type_b(&nfc_tgt, apr->tgdata, apr->ln - 1); break; case PN532_AUTOPOLL_TYPE_MIFARE: dev_dbg(dev->dev, "Mifare\n"); rc = pn533_target_found_type_a(&nfc_tgt, apr->tgdata, apr->ln - 1); break; default: nfc_err(dev->dev, "Unknown current poll modulation\n"); rc = -EPROTO; } if (rc) goto done; if (!(nfc_tgt.supported_protocols & dev->poll_protocols)) { nfc_err(dev->dev, "The Tg found doesn't have the desired protocol\n"); rc = -EAGAIN; goto done; } dev->tgt_available_prots = nfc_tgt.supported_protocols; apr = (struct pn532_autopoll_resp *) (apr->tgdata + (apr->ln - 1)); } pn533_poll_reset_mod_list(dev); nfc_targets_found(dev->nfc_dev, &nfc_tgt, 1); done: dev_kfree_skb(resp); return rc; stop_poll: nfc_err(dev->dev, "autopoll operation has been stopped\n"); pn533_poll_reset_mod_list(dev); dev->poll_protocols = 0; return rc; } static int pn533_poll_complete(struct pn533 *dev, void *arg, struct sk_buff *resp) { struct pn533_poll_modulations *cur_mod; int rc; if (IS_ERR(resp)) { rc = PTR_ERR(resp); nfc_err(dev->dev, "%s Poll complete error %d\n", __func__, rc); if (rc == -ENOENT) { if (dev->poll_mod_count != 0) return rc; goto stop_poll; } else if (rc < 0) { nfc_err(dev->dev, "Error %d when running poll\n", rc); goto stop_poll; } } cur_mod = dev->poll_mod_active[dev->poll_mod_curr]; if (cur_mod->len == 0) { /* Target mode */ del_timer(&dev->listen_timer); rc = pn533_init_target_complete(dev, resp); goto done; } /* Initiator mode */ rc = pn533_start_poll_complete(dev, resp); if (!rc) goto done; if (!dev->poll_mod_count) { dev_dbg(dev->dev, "Polling has been stopped\n"); goto done; } pn533_poll_next_mod(dev); /* Not target found, turn radio off */ queue_work(dev->wq, &dev->rf_work); done: dev_kfree_skb(resp); return rc; stop_poll: nfc_err(dev->dev, "Polling operation has been stopped\n"); pn533_poll_reset_mod_list(dev); dev->poll_protocols = 0; return rc; } static struct sk_buff *pn533_alloc_poll_in_frame(struct pn533 *dev, struct pn533_poll_modulations *mod) { struct sk_buff *skb; skb = pn533_alloc_skb(dev, mod->len); if (!skb) return NULL; skb_put_data(skb, &mod->data, mod->len); return skb; } static int pn533_send_poll_frame(struct pn533 *dev) { struct pn533_poll_modulations *mod; struct sk_buff *skb; int rc; u8 cmd_code; mod = dev->poll_mod_active[dev->poll_mod_curr]; dev_dbg(dev->dev, "%s mod len %d\n", __func__, mod->len); if ((dev->poll_protocols & NFC_PROTO_NFC_DEP_MASK) && dev->poll_dep) { dev->poll_dep = 0; return pn533_poll_dep(dev->nfc_dev); } if (mod->len == 0) { /* Listen mode */ cmd_code = PN533_CMD_TG_INIT_AS_TARGET; skb = pn533_alloc_poll_tg_frame(dev); } else { /* Polling mode */ cmd_code = PN533_CMD_IN_LIST_PASSIVE_TARGET; skb = pn533_alloc_poll_in_frame(dev, mod); } if (!skb) { nfc_err(dev->dev, "Failed to allocate skb\n"); return -ENOMEM; } rc = pn533_send_cmd_async(dev, cmd_code, skb, pn533_poll_complete, NULL); if (rc < 0) { dev_kfree_skb(skb); nfc_err(dev->dev, "Polling loop error %d\n", rc); } return rc; } static void pn533_wq_poll(struct work_struct *work) { struct pn533 *dev = container_of(work, struct pn533, poll_work.work); struct pn533_poll_modulations *cur_mod; int rc; cur_mod = dev->poll_mod_active[dev->poll_mod_curr]; dev_dbg(dev->dev, "%s cancel_listen %d modulation len %d\n", __func__, dev->cancel_listen, cur_mod->len); if (dev->cancel_listen == 1) { dev->cancel_listen = 0; dev->phy_ops->abort_cmd(dev, GFP_ATOMIC); } rc = pn533_send_poll_frame(dev); if (rc) return; if (cur_mod->len == 0 && dev->poll_mod_count > 1) mod_timer(&dev->listen_timer, jiffies + PN533_LISTEN_TIME * HZ); } static int pn533_start_poll(struct nfc_dev *nfc_dev, u32 im_protocols, u32 tm_protocols) { struct pn533 *dev = nfc_get_drvdata(nfc_dev); struct pn533_poll_modulations *cur_mod; struct sk_buff *skb; u8 rand_mod; int rc; dev_dbg(dev->dev, "%s: im protocols 0x%x tm protocols 0x%x\n", __func__, im_protocols, tm_protocols); if (dev->tgt_active_prot) { nfc_err(dev->dev, "Cannot poll with a target already activated\n"); return -EBUSY; } if (dev->tgt_mode) { nfc_err(dev->dev, "Cannot poll while already being activated\n"); return -EBUSY; } if (tm_protocols) { dev->gb = nfc_get_local_general_bytes(nfc_dev, &dev->gb_len); if (dev->gb == NULL) tm_protocols = 0; } dev->poll_protocols = im_protocols; dev->listen_protocols = tm_protocols; if (dev->device_type == PN533_DEVICE_PN532_AUTOPOLL) { skb = pn533_alloc_skb(dev, 4 + 6); if (!skb) return -ENOMEM; *((u8 *)skb_put(skb, sizeof(u8))) = PN532_AUTOPOLL_POLLNR_INFINITE; *((u8 *)skb_put(skb, sizeof(u8))) = PN532_AUTOPOLL_PERIOD; if ((im_protocols & NFC_PROTO_MIFARE_MASK) && (im_protocols & NFC_PROTO_ISO14443_MASK) && (im_protocols & NFC_PROTO_NFC_DEP_MASK)) *((u8 *)skb_put(skb, sizeof(u8))) = PN532_AUTOPOLL_TYPE_GENERIC_106; else { if (im_protocols & NFC_PROTO_MIFARE_MASK) *((u8 *)skb_put(skb, sizeof(u8))) = PN532_AUTOPOLL_TYPE_MIFARE; if (im_protocols & NFC_PROTO_ISO14443_MASK) *((u8 *)skb_put(skb, sizeof(u8))) = PN532_AUTOPOLL_TYPE_ISOA; if (im_protocols & NFC_PROTO_NFC_DEP_MASK) { *((u8 *)skb_put(skb, sizeof(u8))) = PN532_AUTOPOLL_TYPE_DEP_PASSIVE_106; *((u8 *)skb_put(skb, sizeof(u8))) = PN532_AUTOPOLL_TYPE_DEP_PASSIVE_212; *((u8 *)skb_put(skb, sizeof(u8))) = PN532_AUTOPOLL_TYPE_DEP_PASSIVE_424; } } if (im_protocols & NFC_PROTO_FELICA_MASK || im_protocols & NFC_PROTO_NFC_DEP_MASK) { *((u8 *)skb_put(skb, sizeof(u8))) = PN532_AUTOPOLL_TYPE_FELICA212; *((u8 *)skb_put(skb, sizeof(u8))) = PN532_AUTOPOLL_TYPE_FELICA424; } if (im_protocols & NFC_PROTO_JEWEL_MASK) *((u8 *)skb_put(skb, sizeof(u8))) = PN532_AUTOPOLL_TYPE_JEWEL; if (im_protocols & NFC_PROTO_ISO14443_B_MASK) *((u8 *)skb_put(skb, sizeof(u8))) = PN532_AUTOPOLL_TYPE_ISOB; if (tm_protocols) *((u8 *)skb_put(skb, sizeof(u8))) = PN532_AUTOPOLL_TYPE_DEP_ACTIVE_106; rc = pn533_send_cmd_async(dev, PN533_CMD_IN_AUTOPOLL, skb, pn533_autopoll_complete, NULL); if (rc < 0) dev_kfree_skb(skb); else dev->poll_mod_count++; return rc; } pn533_poll_create_mod_list(dev, im_protocols, tm_protocols); /* Do not always start polling from the same modulation */ get_random_bytes(&rand_mod, sizeof(rand_mod)); rand_mod %= dev->poll_mod_count; dev->poll_mod_curr = rand_mod; cur_mod = dev->poll_mod_active[dev->poll_mod_curr]; rc = pn533_send_poll_frame(dev); /* Start listen timer */ if (!rc && cur_mod->len == 0 && dev->poll_mod_count > 1) mod_timer(&dev->listen_timer, jiffies + PN533_LISTEN_TIME * HZ); return rc; } static void pn533_stop_poll(struct nfc_dev *nfc_dev) { struct pn533 *dev = nfc_get_drvdata(nfc_dev); del_timer(&dev->listen_timer); if (!dev->poll_mod_count) { dev_dbg(dev->dev, "Polling operation was not running\n"); return; } dev->phy_ops->abort_cmd(dev, GFP_KERNEL); flush_delayed_work(&dev->poll_work); pn533_poll_reset_mod_list(dev); } static int pn533_activate_target_nfcdep(struct pn533 *dev) { struct pn533_cmd_activate_response *rsp; u16 gt_len; int rc; struct sk_buff *skb; struct sk_buff *resp; skb = pn533_alloc_skb(dev, sizeof(u8) * 2); /*TG + Next*/ if (!skb) return -ENOMEM; skb_put_u8(skb, 1); /* TG */ skb_put_u8(skb, 0); /* Next */ resp = pn533_send_cmd_sync(dev, PN533_CMD_IN_ATR, skb); if (IS_ERR(resp)) return PTR_ERR(resp); rsp = (struct pn533_cmd_activate_response *)resp->data; rc = rsp->status & PN533_CMD_RET_MASK; if (rc != PN533_CMD_RET_SUCCESS) { nfc_err(dev->dev, "Target activation failed (error 0x%x)\n", rc); dev_kfree_skb(resp); return -EIO; } /* ATR_RES general bytes are located at offset 16 */ gt_len = resp->len - 16; rc = nfc_set_remote_general_bytes(dev->nfc_dev, rsp->gt, gt_len); dev_kfree_skb(resp); return rc; } static int pn533_activate_target(struct nfc_dev *nfc_dev, struct nfc_target *target, u32 protocol) { struct pn533 *dev = nfc_get_drvdata(nfc_dev); int rc; dev_dbg(dev->dev, "%s: protocol=%u\n", __func__, protocol); if (dev->poll_mod_count) { nfc_err(dev->dev, "Cannot activate while polling\n"); return -EBUSY; } if (dev->tgt_active_prot) { nfc_err(dev->dev, "There is already an active target\n"); return -EBUSY; } if (!dev->tgt_available_prots) { nfc_err(dev->dev, "There is no available target to activate\n"); return -EINVAL; } if (!(dev->tgt_available_prots & (1 << protocol))) { nfc_err(dev->dev, "Target doesn't support requested proto %u\n", protocol); return -EINVAL; } if (protocol == NFC_PROTO_NFC_DEP) { rc = pn533_activate_target_nfcdep(dev); if (rc) { nfc_err(dev->dev, "Activating target with DEP failed %d\n", rc); return rc; } } dev->tgt_active_prot = protocol; dev->tgt_available_prots = 0; return 0; } static int pn533_deactivate_target_complete(struct pn533 *dev, void *arg, struct sk_buff *resp) { int rc = 0; if (IS_ERR(resp)) { rc = PTR_ERR(resp); nfc_err(dev->dev, "Target release error %d\n", rc); return rc; } rc = resp->data[0] & PN533_CMD_RET_MASK; if (rc != PN533_CMD_RET_SUCCESS) nfc_err(dev->dev, "Error 0x%x when releasing the target\n", rc); dev_kfree_skb(resp); return rc; } static void pn533_deactivate_target(struct nfc_dev *nfc_dev, struct nfc_target *target, u8 mode) { struct pn533 *dev = nfc_get_drvdata(nfc_dev); struct sk_buff *skb; int rc; if (!dev->tgt_active_prot) { nfc_err(dev->dev, "There is no active target\n"); return; } dev->tgt_active_prot = 0; skb_queue_purge(&dev->resp_q); skb = pn533_alloc_skb(dev, sizeof(u8)); if (!skb) return; skb_put_u8(skb, 1); /* TG*/ rc = pn533_send_cmd_async(dev, PN533_CMD_IN_RELEASE, skb, pn533_deactivate_target_complete, NULL); if (rc < 0) { dev_kfree_skb(skb); nfc_err(dev->dev, "Target release error %d\n", rc); } } static int pn533_in_dep_link_up_complete(struct pn533 *dev, void *arg, struct sk_buff *resp) { struct pn533_cmd_jump_dep_response *rsp; u8 target_gt_len; int rc; u8 active = *(u8 *)arg; kfree(arg); if (IS_ERR(resp)) return PTR_ERR(resp); if (dev->tgt_available_prots && !(dev->tgt_available_prots & (1 << NFC_PROTO_NFC_DEP))) { nfc_err(dev->dev, "The target does not support DEP\n"); rc = -EINVAL; goto error; } rsp = (struct pn533_cmd_jump_dep_response *)resp->data; rc = rsp->status & PN533_CMD_RET_MASK; if (rc != PN533_CMD_RET_SUCCESS) { nfc_err(dev->dev, "Bringing DEP link up failed (error 0x%x)\n", rc); goto error; } if (!dev->tgt_available_prots) { struct nfc_target nfc_target; dev_dbg(dev->dev, "Creating new target\n"); memset(&nfc_target, 0, sizeof(struct nfc_target)); nfc_target.supported_protocols = NFC_PROTO_NFC_DEP_MASK; nfc_target.nfcid1_len = 10; memcpy(nfc_target.nfcid1, rsp->nfcid3t, nfc_target.nfcid1_len); rc = nfc_targets_found(dev->nfc_dev, &nfc_target, 1); if (rc) goto error; dev->tgt_available_prots = 0; } dev->tgt_active_prot = NFC_PROTO_NFC_DEP; /* ATR_RES general bytes are located at offset 17 */ target_gt_len = resp->len - 17; rc = nfc_set_remote_general_bytes(dev->nfc_dev, rsp->gt, target_gt_len); if (rc == 0) rc = nfc_dep_link_is_up(dev->nfc_dev, dev->nfc_dev->targets[0].idx, !active, NFC_RF_INITIATOR); error: dev_kfree_skb(resp); return rc; } static int pn533_rf_field(struct nfc_dev *nfc_dev, u8 rf); static int pn533_dep_link_up(struct nfc_dev *nfc_dev, struct nfc_target *target, u8 comm_mode, u8 *gb, size_t gb_len) { struct pn533 *dev = nfc_get_drvdata(nfc_dev); struct sk_buff *skb; int rc, skb_len; u8 *next, *arg, nfcid3[NFC_NFCID3_MAXSIZE]; u8 passive_data[PASSIVE_DATA_LEN] = {0x00, 0xff, 0xff, 0x00, 0x3}; if (dev->poll_mod_count) { nfc_err(dev->dev, "Cannot bring the DEP link up while polling\n"); return -EBUSY; } if (dev->tgt_active_prot) { nfc_err(dev->dev, "There is already an active target\n"); return -EBUSY; } skb_len = 3 + gb_len; /* ActPass + BR + Next */ skb_len += PASSIVE_DATA_LEN; /* NFCID3 */ skb_len += NFC_NFCID3_MAXSIZE; if (target && !target->nfcid2_len) { nfcid3[0] = 0x1; nfcid3[1] = 0xfe; get_random_bytes(nfcid3 + 2, 6); } skb = pn533_alloc_skb(dev, skb_len); if (!skb) return -ENOMEM; skb_put_u8(skb, !comm_mode); /* ActPass */ skb_put_u8(skb, 0x02); /* 424 kbps */ next = skb_put(skb, 1); /* Next */ *next = 0; /* Copy passive data */ skb_put_data(skb, passive_data, PASSIVE_DATA_LEN); *next |= 1; /* Copy NFCID3 (which is NFCID2 from SENSF_RES) */ if (target && target->nfcid2_len) memcpy(skb_put(skb, NFC_NFCID3_MAXSIZE), target->nfcid2, target->nfcid2_len); else skb_put_data(skb, nfcid3, NFC_NFCID3_MAXSIZE); *next |= 2; if (gb != NULL && gb_len > 0) { skb_put_data(skb, gb, gb_len); *next |= 4; /* We have some Gi */ } else { *next = 0; } arg = kmalloc(sizeof(*arg), GFP_KERNEL); if (!arg) { dev_kfree_skb(skb); return -ENOMEM; } *arg = !comm_mode; pn533_rf_field(dev->nfc_dev, 0); rc = pn533_send_cmd_async(dev, PN533_CMD_IN_JUMP_FOR_DEP, skb, pn533_in_dep_link_up_complete, arg); if (rc < 0) { dev_kfree_skb(skb); kfree(arg); } return rc; } static int pn533_dep_link_down(struct nfc_dev *nfc_dev) { struct pn533 *dev = nfc_get_drvdata(nfc_dev); pn533_poll_reset_mod_list(dev); if (dev->tgt_mode || dev->tgt_active_prot) dev->phy_ops->abort_cmd(dev, GFP_KERNEL); dev->tgt_active_prot = 0; dev->tgt_mode = 0; skb_queue_purge(&dev->resp_q); return 0; } struct pn533_data_exchange_arg { data_exchange_cb_t cb; void *cb_context; }; static struct sk_buff *pn533_build_response(struct pn533 *dev) { struct sk_buff *skb, *tmp, *t; unsigned int skb_len = 0, tmp_len = 0; if (skb_queue_empty(&dev->resp_q)) return NULL; if (skb_queue_len(&dev->resp_q) == 1) { skb = skb_dequeue(&dev->resp_q); goto out; } skb_queue_walk_safe(&dev->resp_q, tmp, t) skb_len += tmp->len; dev_dbg(dev->dev, "%s total length %d\n", __func__, skb_len); skb = alloc_skb(skb_len, GFP_KERNEL); if (skb == NULL) goto out; skb_put(skb, skb_len); skb_queue_walk_safe(&dev->resp_q, tmp, t) { memcpy(skb->data + tmp_len, tmp->data, tmp->len); tmp_len += tmp->len; } out: skb_queue_purge(&dev->resp_q); return skb; } static int pn533_data_exchange_complete(struct pn533 *dev, void *_arg, struct sk_buff *resp) { struct pn533_data_exchange_arg *arg = _arg; struct sk_buff *skb; int rc = 0; u8 status, ret, mi; if (IS_ERR(resp)) { rc = PTR_ERR(resp); goto _error; } status = resp->data[0]; ret = status & PN533_CMD_RET_MASK; mi = status & PN533_CMD_MI_MASK; skb_pull(resp, sizeof(status)); if (ret != PN533_CMD_RET_SUCCESS) { nfc_err(dev->dev, "Exchanging data failed (error 0x%x)\n", ret); rc = -EIO; goto error; } skb_queue_tail(&dev->resp_q, resp); if (mi) { dev->cmd_complete_mi_arg = arg; queue_work(dev->wq, &dev->mi_rx_work); return -EINPROGRESS; } /* Prepare for the next round */ if (skb_queue_len(&dev->fragment_skb) > 0) { dev->cmd_complete_dep_arg = arg; queue_work(dev->wq, &dev->mi_tx_work); return -EINPROGRESS; } skb = pn533_build_response(dev); if (!skb) { rc = -ENOMEM; goto error; } arg->cb(arg->cb_context, skb, 0); kfree(arg); return 0; error: dev_kfree_skb(resp); _error: skb_queue_purge(&dev->resp_q); arg->cb(arg->cb_context, NULL, rc); kfree(arg); return rc; } /* * Receive an incoming pn533 frame. skb contains only header and payload. * If skb == NULL, it is a notification that the link below is dead. */ void pn533_recv_frame(struct pn533 *dev, struct sk_buff *skb, int status) { if (!dev->cmd) goto sched_wq; dev->cmd->status = status; if (status != 0) { dev_dbg(dev->dev, "%s: Error received: %d\n", __func__, status); goto sched_wq; } if (skb == NULL) { dev_err(dev->dev, "NULL Frame -> link is dead\n"); goto sched_wq; } if (pn533_rx_frame_is_ack(skb->data)) { dev_dbg(dev->dev, "%s: Received ACK frame\n", __func__); dev_kfree_skb(skb); return; } print_hex_dump_debug("PN533 RX: ", DUMP_PREFIX_NONE, 16, 1, skb->data, dev->ops->rx_frame_size(skb->data), false); if (!dev->ops->rx_is_frame_valid(skb->data, dev)) { nfc_err(dev->dev, "Received an invalid frame\n"); dev->cmd->status = -EIO; } else if (!pn533_rx_frame_is_cmd_response(dev, skb->data)) { nfc_err(dev->dev, "It it not the response to the last command\n"); dev->cmd->status = -EIO; } dev->cmd->resp = skb; sched_wq: queue_work(dev->wq, &dev->cmd_complete_work); } EXPORT_SYMBOL(pn533_recv_frame); /* Split the Tx skb into small chunks */ static int pn533_fill_fragment_skbs(struct pn533 *dev, struct sk_buff *skb) { struct sk_buff *frag; int frag_size; do { /* Remaining size */ if (skb->len > PN533_CMD_DATAFRAME_MAXLEN) frag_size = PN533_CMD_DATAFRAME_MAXLEN; else frag_size = skb->len; /* Allocate and reserve */ frag = pn533_alloc_skb(dev, frag_size); if (!frag) { skb_queue_purge(&dev->fragment_skb); return -ENOMEM; } if (!dev->tgt_mode) { /* Reserve the TG/MI byte */ skb_reserve(frag, 1); /* MI + TG */ if (frag_size == PN533_CMD_DATAFRAME_MAXLEN) *(u8 *)skb_push(frag, sizeof(u8)) = (PN533_CMD_MI_MASK | 1); else *(u8 *)skb_push(frag, sizeof(u8)) = 1; /* TG */ } skb_put_data(frag, skb->data, frag_size); /* Reduce the size of incoming buffer */ skb_pull(skb, frag_size); /* Add this to skb_queue */ skb_queue_tail(&dev->fragment_skb, frag); } while (skb->len > 0); dev_kfree_skb(skb); return skb_queue_len(&dev->fragment_skb); } static int pn533_transceive(struct nfc_dev *nfc_dev, struct nfc_target *target, struct sk_buff *skb, data_exchange_cb_t cb, void *cb_context) { struct pn533 *dev = nfc_get_drvdata(nfc_dev); struct pn533_data_exchange_arg *arg = NULL; int rc; if (!dev->tgt_active_prot) { nfc_err(dev->dev, "Can't exchange data if there is no active target\n"); rc = -EINVAL; goto error; } arg = kmalloc(sizeof(*arg), GFP_KERNEL); if (!arg) { rc = -ENOMEM; goto error; } arg->cb = cb; arg->cb_context = cb_context; switch (dev->device_type) { case PN533_DEVICE_PASORI: if (dev->tgt_active_prot == NFC_PROTO_FELICA) { rc = pn533_send_data_async(dev, PN533_CMD_IN_COMM_THRU, skb, pn533_data_exchange_complete, arg); break; } fallthrough; default: /* jumbo frame ? */ if (skb->len > PN533_CMD_DATAEXCH_DATA_MAXLEN) { rc = pn533_fill_fragment_skbs(dev, skb); if (rc < 0) goto error; skb = skb_dequeue(&dev->fragment_skb); if (!skb) { rc = -EIO; goto error; } } else { *(u8 *)skb_push(skb, sizeof(u8)) = 1; /* TG */ } rc = pn533_send_data_async(dev, PN533_CMD_IN_DATA_EXCHANGE, skb, pn533_data_exchange_complete, arg); break; } if (rc < 0) /* rc from send_async */ goto error; return 0; error: kfree(arg); dev_kfree_skb(skb); return rc; } static int pn533_tm_send_complete(struct pn533 *dev, void *arg, struct sk_buff *resp) { u8 status; if (IS_ERR(resp)) return PTR_ERR(resp); status = resp->data[0]; /* Prepare for the next round */ if (skb_queue_len(&dev->fragment_skb) > 0) { queue_work(dev->wq, &dev->mi_tm_tx_work); return -EINPROGRESS; } dev_kfree_skb(resp); if (status != 0) { nfc_tm_deactivated(dev->nfc_dev); dev->tgt_mode = 0; return 0; } queue_work(dev->wq, &dev->tg_work); return 0; } static int pn533_tm_send(struct nfc_dev *nfc_dev, struct sk_buff *skb) { struct pn533 *dev = nfc_get_drvdata(nfc_dev); int rc; /* let's split in multiple chunks if size's too big */ if (skb->len > PN533_CMD_DATAEXCH_DATA_MAXLEN) { rc = pn533_fill_fragment_skbs(dev, skb); if (rc < 0) goto error; /* get the first skb */ skb = skb_dequeue(&dev->fragment_skb); if (!skb) { rc = -EIO; goto error; } rc = pn533_send_data_async(dev, PN533_CMD_TG_SET_META_DATA, skb, pn533_tm_send_complete, NULL); } else { /* Send th skb */ rc = pn533_send_data_async(dev, PN533_CMD_TG_SET_DATA, skb, pn533_tm_send_complete, NULL); } error: if (rc < 0) { dev_kfree_skb(skb); skb_queue_purge(&dev->fragment_skb); } return rc; } static void pn533_wq_mi_recv(struct work_struct *work) { struct pn533 *dev = container_of(work, struct pn533, mi_rx_work); struct sk_buff *skb; int rc; skb = pn533_alloc_skb(dev, PN533_CMD_DATAEXCH_HEAD_LEN); if (!skb) goto error; switch (dev->device_type) { case PN533_DEVICE_PASORI: if (dev->tgt_active_prot == NFC_PROTO_FELICA) { rc = pn533_send_cmd_direct_async(dev, PN533_CMD_IN_COMM_THRU, skb, pn533_data_exchange_complete, dev->cmd_complete_mi_arg); break; } fallthrough; default: skb_put_u8(skb, 1); /*TG*/ rc = pn533_send_cmd_direct_async(dev, PN533_CMD_IN_DATA_EXCHANGE, skb, pn533_data_exchange_complete, dev->cmd_complete_mi_arg); break; } if (rc == 0) /* success */ return; nfc_err(dev->dev, "Error %d when trying to perform data_exchange\n", rc); dev_kfree_skb(skb); kfree(dev->cmd_complete_mi_arg); error: dev->phy_ops->send_ack(dev, GFP_KERNEL); queue_work(dev->wq, &dev->cmd_work); } static void pn533_wq_mi_send(struct work_struct *work) { struct pn533 *dev = container_of(work, struct pn533, mi_tx_work); struct sk_buff *skb; int rc; /* Grab the first skb in the queue */ skb = skb_dequeue(&dev->fragment_skb); if (skb == NULL) { /* No more data */ /* Reset the queue for future use */ skb_queue_head_init(&dev->fragment_skb); goto error; } switch (dev->device_type) { case PN533_DEVICE_PASORI: if (dev->tgt_active_prot != NFC_PROTO_FELICA) { rc = -EIO; break; } rc = pn533_send_cmd_direct_async(dev, PN533_CMD_IN_COMM_THRU, skb, pn533_data_exchange_complete, dev->cmd_complete_dep_arg); break; default: /* Still some fragments? */ rc = pn533_send_cmd_direct_async(dev, PN533_CMD_IN_DATA_EXCHANGE, skb, pn533_data_exchange_complete, dev->cmd_complete_dep_arg); break; } if (rc == 0) /* success */ return; nfc_err(dev->dev, "Error %d when trying to perform data_exchange\n", rc); dev_kfree_skb(skb); kfree(dev->cmd_complete_dep_arg); error: dev->phy_ops->send_ack(dev, GFP_KERNEL); queue_work(dev->wq, &dev->cmd_work); } static int pn533_set_configuration(struct pn533 *dev, u8 cfgitem, u8 *cfgdata, u8 cfgdata_len) { struct sk_buff *skb; struct sk_buff *resp; int skb_len; skb_len = sizeof(cfgitem) + cfgdata_len; /* cfgitem + cfgdata */ skb = pn533_alloc_skb(dev, skb_len); if (!skb) return -ENOMEM; skb_put_u8(skb, cfgitem); skb_put_data(skb, cfgdata, cfgdata_len); resp = pn533_send_cmd_sync(dev, PN533_CMD_RF_CONFIGURATION, skb); if (IS_ERR(resp)) return PTR_ERR(resp); dev_kfree_skb(resp); return 0; } static int pn533_get_firmware_version(struct pn533 *dev, struct pn533_fw_version *fv) { struct sk_buff *skb; struct sk_buff *resp; skb = pn533_alloc_skb(dev, 0); if (!skb) return -ENOMEM; resp = pn533_send_cmd_sync(dev, PN533_CMD_GET_FIRMWARE_VERSION, skb); if (IS_ERR(resp)) return PTR_ERR(resp); fv->ic = resp->data[0]; fv->ver = resp->data[1]; fv->rev = resp->data[2]; fv->support = resp->data[3]; dev_kfree_skb(resp); return 0; } static int pn533_pasori_fw_reset(struct pn533 *dev) { struct sk_buff *skb; struct sk_buff *resp; skb = pn533_alloc_skb(dev, sizeof(u8)); if (!skb) return -ENOMEM; skb_put_u8(skb, 0x1); resp = pn533_send_cmd_sync(dev, 0x18, skb); if (IS_ERR(resp)) return PTR_ERR(resp); dev_kfree_skb(resp); return 0; } static int pn533_rf_field(struct nfc_dev *nfc_dev, u8 rf) { struct pn533 *dev = nfc_get_drvdata(nfc_dev); u8 rf_field = !!rf; int rc; rf_field |= PN533_CFGITEM_RF_FIELD_AUTO_RFCA; rc = pn533_set_configuration(dev, PN533_CFGITEM_RF_FIELD, (u8 *)&rf_field, 1); if (rc) { nfc_err(dev->dev, "Error on setting RF field\n"); return rc; } return 0; } static int pn532_sam_configuration(struct nfc_dev *nfc_dev) { struct pn533 *dev = nfc_get_drvdata(nfc_dev); struct sk_buff *skb; struct sk_buff *resp; skb = pn533_alloc_skb(dev, 1); if (!skb) return -ENOMEM; skb_put_u8(skb, 0x01); resp = pn533_send_cmd_sync(dev, PN533_CMD_SAM_CONFIGURATION, skb); if (IS_ERR(resp)) return PTR_ERR(resp); dev_kfree_skb(resp); return 0; } static int pn533_dev_up(struct nfc_dev *nfc_dev) { struct pn533 *dev = nfc_get_drvdata(nfc_dev); int rc; if (dev->phy_ops->dev_up) { rc = dev->phy_ops->dev_up(dev); if (rc) return rc; } if ((dev->device_type == PN533_DEVICE_PN532) || (dev->device_type == PN533_DEVICE_PN532_AUTOPOLL)) { rc = pn532_sam_configuration(nfc_dev); if (rc) return rc; } return pn533_rf_field(nfc_dev, 1); } static int pn533_dev_down(struct nfc_dev *nfc_dev) { struct pn533 *dev = nfc_get_drvdata(nfc_dev); int ret; ret = pn533_rf_field(nfc_dev, 0); if (dev->phy_ops->dev_down && !ret) ret = dev->phy_ops->dev_down(dev); return ret; } static const struct nfc_ops pn533_nfc_ops = { .dev_up = pn533_dev_up, .dev_down = pn533_dev_down, .dep_link_up = pn533_dep_link_up, .dep_link_down = pn533_dep_link_down, .start_poll = pn533_start_poll, .stop_poll = pn533_stop_poll, .activate_target = pn533_activate_target, .deactivate_target = pn533_deactivate_target, .im_transceive = pn533_transceive, .tm_send = pn533_tm_send, }; static int pn533_setup(struct pn533 *dev) { struct pn533_config_max_retries max_retries; struct pn533_config_timing timing; u8 pasori_cfg[3] = {0x08, 0x01, 0x08}; int rc; switch (dev->device_type) { case PN533_DEVICE_STD: case PN533_DEVICE_PASORI: case PN533_DEVICE_ACR122U: case PN533_DEVICE_PN532: case PN533_DEVICE_PN532_AUTOPOLL: max_retries.mx_rty_atr = 0x2; max_retries.mx_rty_psl = 0x1; max_retries.mx_rty_passive_act = PN533_CONFIG_MAX_RETRIES_NO_RETRY; timing.rfu = PN533_CONFIG_TIMING_102; timing.atr_res_timeout = PN533_CONFIG_TIMING_102; timing.dep_timeout = PN533_CONFIG_TIMING_204; break; default: nfc_err(dev->dev, "Unknown device type %d\n", dev->device_type); return -EINVAL; } rc = pn533_set_configuration(dev, PN533_CFGITEM_MAX_RETRIES, (u8 *)&max_retries, sizeof(max_retries)); if (rc) { nfc_err(dev->dev, "Error on setting MAX_RETRIES config\n"); return rc; } rc = pn533_set_configuration(dev, PN533_CFGITEM_TIMING, (u8 *)&timing, sizeof(timing)); if (rc) { nfc_err(dev->dev, "Error on setting RF timings\n"); return rc; } switch (dev->device_type) { case PN533_DEVICE_STD: case PN533_DEVICE_PN532: case PN533_DEVICE_PN532_AUTOPOLL: break; case PN533_DEVICE_PASORI: pn533_pasori_fw_reset(dev); rc = pn533_set_configuration(dev, PN533_CFGITEM_PASORI, pasori_cfg, 3); if (rc) { nfc_err(dev->dev, "Error while settings PASORI config\n"); return rc; } pn533_pasori_fw_reset(dev); break; } return 0; } int pn533_finalize_setup(struct pn533 *dev) { struct pn533_fw_version fw_ver; int rc; memset(&fw_ver, 0, sizeof(fw_ver)); rc = pn533_get_firmware_version(dev, &fw_ver); if (rc) { nfc_err(dev->dev, "Unable to get FW version\n"); return rc; } nfc_info(dev->dev, "NXP PN5%02X firmware ver %d.%d now attached\n", fw_ver.ic, fw_ver.ver, fw_ver.rev); rc = pn533_setup(dev); if (rc) return rc; return 0; } EXPORT_SYMBOL_GPL(pn533_finalize_setup); struct pn533 *pn53x_common_init(u32 device_type, enum pn533_protocol_type protocol_type, void *phy, const struct pn533_phy_ops *phy_ops, struct pn533_frame_ops *fops, struct device *dev) { struct pn533 *priv; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return ERR_PTR(-ENOMEM); priv->phy = phy; priv->phy_ops = phy_ops; priv->dev = dev; if (fops != NULL) priv->ops = fops; else priv->ops = &pn533_std_frame_ops; priv->protocol_type = protocol_type; priv->device_type = device_type; mutex_init(&priv->cmd_lock); INIT_WORK(&priv->cmd_work, pn533_wq_cmd); INIT_WORK(&priv->cmd_complete_work, pn533_wq_cmd_complete); INIT_WORK(&priv->mi_rx_work, pn533_wq_mi_recv); INIT_WORK(&priv->mi_tx_work, pn533_wq_mi_send); INIT_WORK(&priv->tg_work, pn533_wq_tg_get_data); INIT_WORK(&priv->mi_tm_rx_work, pn533_wq_tm_mi_recv); INIT_WORK(&priv->mi_tm_tx_work, pn533_wq_tm_mi_send); INIT_DELAYED_WORK(&priv->poll_work, pn533_wq_poll); INIT_WORK(&priv->rf_work, pn533_wq_rf); priv->wq = alloc_ordered_workqueue("pn533", 0); if (priv->wq == NULL) goto error; timer_setup(&priv->listen_timer, pn533_listen_mode_timer, 0); skb_queue_head_init(&priv->resp_q); skb_queue_head_init(&priv->fragment_skb); INIT_LIST_HEAD(&priv->cmd_queue); return priv; error: kfree(priv); return ERR_PTR(-ENOMEM); } EXPORT_SYMBOL_GPL(pn53x_common_init); void pn53x_common_clean(struct pn533 *priv) { struct pn533_cmd *cmd, *n; /* delete the timer before cleanup the worker */ timer_shutdown_sync(&priv->listen_timer); flush_delayed_work(&priv->poll_work); destroy_workqueue(priv->wq); skb_queue_purge(&priv->resp_q); list_for_each_entry_safe(cmd, n, &priv->cmd_queue, queue) { list_del(&cmd->queue); kfree(cmd); } kfree(priv); } EXPORT_SYMBOL_GPL(pn53x_common_clean); int pn532_i2c_nfc_alloc(struct pn533 *priv, u32 protocols, struct device *parent) { priv->nfc_dev = nfc_allocate_device(&pn533_nfc_ops, protocols, priv->ops->tx_header_len + PN533_CMD_DATAEXCH_HEAD_LEN, priv->ops->tx_tail_len); if (!priv->nfc_dev) return -ENOMEM; nfc_set_parent_dev(priv->nfc_dev, parent); nfc_set_drvdata(priv->nfc_dev, priv); return 0; } EXPORT_SYMBOL_GPL(pn532_i2c_nfc_alloc); int pn53x_register_nfc(struct pn533 *priv, u32 protocols, struct device *parent) { int rc; rc = pn532_i2c_nfc_alloc(priv, protocols, parent); if (rc) return rc; rc = nfc_register_device(priv->nfc_dev); if (rc) nfc_free_device(priv->nfc_dev); return rc; } EXPORT_SYMBOL_GPL(pn53x_register_nfc); void pn53x_unregister_nfc(struct pn533 *priv) { nfc_unregister_device(priv->nfc_dev); nfc_free_device(priv->nfc_dev); } EXPORT_SYMBOL_GPL(pn53x_unregister_nfc); MODULE_AUTHOR("Lauro Ramos Venancio <[email protected]>"); MODULE_AUTHOR("Aloisio Almeida Jr <[email protected]>"); MODULE_AUTHOR("Waldemar Rymarkiewicz <[email protected]>"); MODULE_DESCRIPTION("PN533 driver ver " VERSION); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL");
linux-master
drivers/nfc/pn533/pn533.c
// SPDX-License-Identifier: GPL-2.0+ /* * Driver for NXP PN532 NFC Chip - UART transport layer * * Copyright (C) 2018 Lemonage Software GmbH * Author: Lars Pöschel <[email protected]> * All rights reserved. */ #include <linux/device.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/nfc.h> #include <linux/netdevice.h> #include <linux/of.h> #include <linux/serdev.h> #include "pn533.h" #define PN532_UART_SKB_BUFF_LEN (PN533_CMD_DATAEXCH_DATA_MAXLEN * 2) enum send_wakeup { PN532_SEND_NO_WAKEUP = 0, PN532_SEND_WAKEUP, PN532_SEND_LAST_WAKEUP, }; struct pn532_uart_phy { struct serdev_device *serdev; struct sk_buff *recv_skb; struct pn533 *priv; /* * send_wakeup variable is used to control if we need to send a wakeup * request to the pn532 chip prior to our actual command. There is a * little propability of a race condition. We decided to not mutex the * variable as the worst that could happen is, that we send a wakeup * to the chip that is already awake. This does not hurt. It is a * no-op to the chip. */ enum send_wakeup send_wakeup; struct timer_list cmd_timeout; struct sk_buff *cur_out_buf; }; static int pn532_uart_send_frame(struct pn533 *dev, struct sk_buff *out) { /* wakeup sequence and dummy bytes for waiting time */ static const u8 wakeup[] = { 0x55, 0x55, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}; struct pn532_uart_phy *pn532 = dev->phy; int err; print_hex_dump_debug("PN532_uart TX: ", DUMP_PREFIX_NONE, 16, 1, out->data, out->len, false); pn532->cur_out_buf = out; if (pn532->send_wakeup) { err = serdev_device_write(pn532->serdev, wakeup, sizeof(wakeup), MAX_SCHEDULE_TIMEOUT); if (err < 0) return err; } if (pn532->send_wakeup == PN532_SEND_LAST_WAKEUP) pn532->send_wakeup = PN532_SEND_NO_WAKEUP; err = serdev_device_write(pn532->serdev, out->data, out->len, MAX_SCHEDULE_TIMEOUT); if (err < 0) return err; mod_timer(&pn532->cmd_timeout, HZ / 40 + jiffies); return 0; } static int pn532_uart_send_ack(struct pn533 *dev, gfp_t flags) { /* spec 7.1.1.3: Preamble, SoPC (2), ACK Code (2), Postamble */ static const u8 ack[PN533_STD_FRAME_ACK_SIZE] = { 0x00, 0x00, 0xff, 0x00, 0xff, 0x00}; struct pn532_uart_phy *pn532 = dev->phy; int err; err = serdev_device_write(pn532->serdev, ack, sizeof(ack), MAX_SCHEDULE_TIMEOUT); if (err < 0) return err; return 0; } static void pn532_uart_abort_cmd(struct pn533 *dev, gfp_t flags) { /* An ack will cancel the last issued command */ pn532_uart_send_ack(dev, flags); /* schedule cmd_complete_work to finish current command execution */ pn533_recv_frame(dev, NULL, -ENOENT); } static int pn532_dev_up(struct pn533 *dev) { struct pn532_uart_phy *pn532 = dev->phy; int ret = 0; ret = serdev_device_open(pn532->serdev); if (ret) return ret; pn532->send_wakeup = PN532_SEND_LAST_WAKEUP; return ret; } static int pn532_dev_down(struct pn533 *dev) { struct pn532_uart_phy *pn532 = dev->phy; serdev_device_close(pn532->serdev); pn532->send_wakeup = PN532_SEND_WAKEUP; return 0; } static const struct pn533_phy_ops uart_phy_ops = { .send_frame = pn532_uart_send_frame, .send_ack = pn532_uart_send_ack, .abort_cmd = pn532_uart_abort_cmd, .dev_up = pn532_dev_up, .dev_down = pn532_dev_down, }; static void pn532_cmd_timeout(struct timer_list *t) { struct pn532_uart_phy *dev = from_timer(dev, t, cmd_timeout); pn532_uart_send_frame(dev->priv, dev->cur_out_buf); } /* * scans the buffer if it contains a pn532 frame. It is not checked if the * frame is really valid. This is later done with pn533_rx_frame_is_valid. * This is useful for malformed or errornous transmitted frames. Adjusts the * bufferposition where the frame starts, since pn533_recv_frame expects a * well formed frame. */ static int pn532_uart_rx_is_frame(struct sk_buff *skb) { struct pn533_std_frame *std; struct pn533_ext_frame *ext; u16 frame_len; int i; for (i = 0; i + PN533_STD_FRAME_ACK_SIZE <= skb->len; i++) { std = (struct pn533_std_frame *)&skb->data[i]; /* search start code */ if (std->start_frame != cpu_to_be16(PN533_STD_FRAME_SOF)) continue; /* frame type */ switch (std->datalen) { case PN533_FRAME_DATALEN_ACK: if (std->datalen_checksum == 0xff) { skb_pull(skb, i); return 1; } break; case PN533_FRAME_DATALEN_ERROR: if ((std->datalen_checksum == 0xff) && (skb->len >= PN533_STD_ERROR_FRAME_SIZE)) { skb_pull(skb, i); return 1; } break; case PN533_FRAME_DATALEN_EXTENDED: ext = (struct pn533_ext_frame *)&skb->data[i]; frame_len = be16_to_cpu(ext->datalen); if (skb->len >= frame_len + sizeof(struct pn533_ext_frame) + 2 /* CKS + Postamble */) { skb_pull(skb, i); return 1; } break; default: /* normal information frame */ frame_len = std->datalen; if (skb->len >= frame_len + sizeof(struct pn533_std_frame) + 2 /* CKS + Postamble */) { skb_pull(skb, i); return 1; } break; } } return 0; } static int pn532_receive_buf(struct serdev_device *serdev, const unsigned char *data, size_t count) { struct pn532_uart_phy *dev = serdev_device_get_drvdata(serdev); size_t i; del_timer(&dev->cmd_timeout); for (i = 0; i < count; i++) { skb_put_u8(dev->recv_skb, *data++); if (!pn532_uart_rx_is_frame(dev->recv_skb)) continue; pn533_recv_frame(dev->priv, dev->recv_skb, 0); dev->recv_skb = alloc_skb(PN532_UART_SKB_BUFF_LEN, GFP_KERNEL); if (!dev->recv_skb) return 0; } return i; } static const struct serdev_device_ops pn532_serdev_ops = { .receive_buf = pn532_receive_buf, .write_wakeup = serdev_device_write_wakeup, }; static const struct of_device_id pn532_uart_of_match[] = { { .compatible = "nxp,pn532", }, {}, }; MODULE_DEVICE_TABLE(of, pn532_uart_of_match); static int pn532_uart_probe(struct serdev_device *serdev) { struct pn532_uart_phy *pn532; struct pn533 *priv; int err; err = -ENOMEM; pn532 = kzalloc(sizeof(*pn532), GFP_KERNEL); if (!pn532) goto err_exit; pn532->recv_skb = alloc_skb(PN532_UART_SKB_BUFF_LEN, GFP_KERNEL); if (!pn532->recv_skb) goto err_free; pn532->serdev = serdev; serdev_device_set_drvdata(serdev, pn532); serdev_device_set_client_ops(serdev, &pn532_serdev_ops); err = serdev_device_open(serdev); if (err) { dev_err(&serdev->dev, "Unable to open device\n"); goto err_skb; } err = serdev_device_set_baudrate(serdev, 115200); if (err != 115200) { err = -EINVAL; goto err_serdev; } serdev_device_set_flow_control(serdev, false); pn532->send_wakeup = PN532_SEND_WAKEUP; timer_setup(&pn532->cmd_timeout, pn532_cmd_timeout, 0); priv = pn53x_common_init(PN533_DEVICE_PN532_AUTOPOLL, PN533_PROTO_REQ_ACK_RESP, pn532, &uart_phy_ops, NULL, &pn532->serdev->dev); if (IS_ERR(priv)) { err = PTR_ERR(priv); goto err_serdev; } pn532->priv = priv; err = pn533_finalize_setup(pn532->priv); if (err) goto err_clean; serdev_device_close(serdev); err = pn53x_register_nfc(priv, PN533_NO_TYPE_B_PROTOCOLS, &serdev->dev); if (err) { pn53x_common_clean(pn532->priv); goto err_skb; } return err; err_clean: pn53x_common_clean(pn532->priv); err_serdev: serdev_device_close(serdev); err_skb: kfree_skb(pn532->recv_skb); err_free: kfree(pn532); err_exit: return err; } static void pn532_uart_remove(struct serdev_device *serdev) { struct pn532_uart_phy *pn532 = serdev_device_get_drvdata(serdev); pn53x_unregister_nfc(pn532->priv); serdev_device_close(serdev); pn53x_common_clean(pn532->priv); timer_shutdown_sync(&pn532->cmd_timeout); kfree_skb(pn532->recv_skb); kfree(pn532); } static struct serdev_device_driver pn532_uart_driver = { .probe = pn532_uart_probe, .remove = pn532_uart_remove, .driver = { .name = "pn532_uart", .of_match_table = pn532_uart_of_match, }, }; module_serdev_device_driver(pn532_uart_driver); MODULE_AUTHOR("Lars Pöschel <[email protected]>"); MODULE_DESCRIPTION("PN532 UART driver"); MODULE_LICENSE("GPL");
linux-master
drivers/nfc/pn533/uart.c
// SPDX-License-Identifier: GPL-2.0-only /* * Marvell NFC-over-USB driver: USB interface related functions * * Copyright (C) 2014, Marvell International Ltd. */ #include <linux/module.h> #include <linux/usb.h> #include <linux/nfc.h> #include <net/nfc/nci.h> #include <net/nfc/nci_core.h> #include "nfcmrvl.h" static struct usb_device_id nfcmrvl_table[] = { { USB_DEVICE_AND_INTERFACE_INFO(0x1286, 0x2046, USB_CLASS_VENDOR_SPEC, 4, 1) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, nfcmrvl_table); #define NFCMRVL_USB_BULK_RUNNING 1 #define NFCMRVL_USB_SUSPENDING 2 struct nfcmrvl_usb_drv_data { struct usb_device *udev; struct usb_interface *intf; unsigned long flags; struct work_struct waker; struct usb_anchor tx_anchor; struct usb_anchor bulk_anchor; struct usb_anchor deferred; int tx_in_flight; /* protects tx_in_flight */ spinlock_t txlock; struct usb_endpoint_descriptor *bulk_tx_ep; struct usb_endpoint_descriptor *bulk_rx_ep; int suspend_count; struct nfcmrvl_private *priv; }; static int nfcmrvl_inc_tx(struct nfcmrvl_usb_drv_data *drv_data) { unsigned long flags; int rv; spin_lock_irqsave(&drv_data->txlock, flags); rv = test_bit(NFCMRVL_USB_SUSPENDING, &drv_data->flags); if (!rv) drv_data->tx_in_flight++; spin_unlock_irqrestore(&drv_data->txlock, flags); return rv; } static void nfcmrvl_bulk_complete(struct urb *urb) { struct nfcmrvl_usb_drv_data *drv_data = urb->context; int err; dev_dbg(&drv_data->udev->dev, "urb %p status %d count %d\n", urb, urb->status, urb->actual_length); if (!test_bit(NFCMRVL_NCI_RUNNING, &drv_data->flags)) return; if (!urb->status) { struct sk_buff *skb; skb = nci_skb_alloc(drv_data->priv->ndev, urb->actual_length, GFP_ATOMIC); if (!skb) { nfc_err(&drv_data->udev->dev, "failed to alloc mem\n"); } else { skb_put_data(skb, urb->transfer_buffer, urb->actual_length); if (nfcmrvl_nci_recv_frame(drv_data->priv, skb) < 0) nfc_err(&drv_data->udev->dev, "corrupted Rx packet\n"); } } if (!test_bit(NFCMRVL_USB_BULK_RUNNING, &drv_data->flags)) return; usb_anchor_urb(urb, &drv_data->bulk_anchor); usb_mark_last_busy(drv_data->udev); err = usb_submit_urb(urb, GFP_ATOMIC); if (err) { /* -EPERM: urb is being killed; * -ENODEV: device got disconnected */ if (err != -EPERM && err != -ENODEV) nfc_err(&drv_data->udev->dev, "urb %p failed to resubmit (%d)\n", urb, -err); usb_unanchor_urb(urb); } } static int nfcmrvl_submit_bulk_urb(struct nfcmrvl_usb_drv_data *drv_data, gfp_t mem_flags) { struct urb *urb; unsigned char *buf; unsigned int pipe; int err, size = NFCMRVL_NCI_MAX_EVENT_SIZE; if (!drv_data->bulk_rx_ep) return -ENODEV; urb = usb_alloc_urb(0, mem_flags); if (!urb) return -ENOMEM; buf = kmalloc(size, mem_flags); if (!buf) { usb_free_urb(urb); return -ENOMEM; } pipe = usb_rcvbulkpipe(drv_data->udev, drv_data->bulk_rx_ep->bEndpointAddress); usb_fill_bulk_urb(urb, drv_data->udev, pipe, buf, size, nfcmrvl_bulk_complete, drv_data); urb->transfer_flags |= URB_FREE_BUFFER; usb_mark_last_busy(drv_data->udev); usb_anchor_urb(urb, &drv_data->bulk_anchor); err = usb_submit_urb(urb, mem_flags); if (err) { if (err != -EPERM && err != -ENODEV) nfc_err(&drv_data->udev->dev, "urb %p submission failed (%d)\n", urb, -err); usb_unanchor_urb(urb); } usb_free_urb(urb); return err; } static void nfcmrvl_tx_complete(struct urb *urb) { struct sk_buff *skb = urb->context; struct nci_dev *ndev = (struct nci_dev *)skb->dev; struct nfcmrvl_private *priv = nci_get_drvdata(ndev); struct nfcmrvl_usb_drv_data *drv_data = priv->drv_data; unsigned long flags; nfc_info(priv->dev, "urb %p status %d count %d\n", urb, urb->status, urb->actual_length); spin_lock_irqsave(&drv_data->txlock, flags); drv_data->tx_in_flight--; spin_unlock_irqrestore(&drv_data->txlock, flags); kfree(urb->setup_packet); kfree_skb(skb); } static int nfcmrvl_usb_nci_open(struct nfcmrvl_private *priv) { struct nfcmrvl_usb_drv_data *drv_data = priv->drv_data; int err; err = usb_autopm_get_interface(drv_data->intf); if (err) return err; drv_data->intf->needs_remote_wakeup = 1; err = nfcmrvl_submit_bulk_urb(drv_data, GFP_KERNEL); if (err) goto failed; set_bit(NFCMRVL_USB_BULK_RUNNING, &drv_data->flags); nfcmrvl_submit_bulk_urb(drv_data, GFP_KERNEL); usb_autopm_put_interface(drv_data->intf); return 0; failed: usb_autopm_put_interface(drv_data->intf); return err; } static void nfcmrvl_usb_stop_traffic(struct nfcmrvl_usb_drv_data *drv_data) { usb_kill_anchored_urbs(&drv_data->bulk_anchor); } static int nfcmrvl_usb_nci_close(struct nfcmrvl_private *priv) { struct nfcmrvl_usb_drv_data *drv_data = priv->drv_data; int err; cancel_work_sync(&drv_data->waker); clear_bit(NFCMRVL_USB_BULK_RUNNING, &drv_data->flags); nfcmrvl_usb_stop_traffic(drv_data); usb_kill_anchored_urbs(&drv_data->tx_anchor); err = usb_autopm_get_interface(drv_data->intf); if (err) goto failed; drv_data->intf->needs_remote_wakeup = 0; usb_autopm_put_interface(drv_data->intf); failed: usb_scuttle_anchored_urbs(&drv_data->deferred); return 0; } static int nfcmrvl_usb_nci_send(struct nfcmrvl_private *priv, struct sk_buff *skb) { struct nfcmrvl_usb_drv_data *drv_data = priv->drv_data; struct urb *urb; unsigned int pipe; int err; if (!drv_data->bulk_tx_ep) return -ENODEV; urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) return -ENOMEM; pipe = usb_sndbulkpipe(drv_data->udev, drv_data->bulk_tx_ep->bEndpointAddress); usb_fill_bulk_urb(urb, drv_data->udev, pipe, skb->data, skb->len, nfcmrvl_tx_complete, skb); err = nfcmrvl_inc_tx(drv_data); if (err) { usb_anchor_urb(urb, &drv_data->deferred); schedule_work(&drv_data->waker); err = 0; goto done; } usb_anchor_urb(urb, &drv_data->tx_anchor); err = usb_submit_urb(urb, GFP_ATOMIC); if (err) { if (err != -EPERM && err != -ENODEV) nfc_err(&drv_data->udev->dev, "urb %p submission failed (%d)\n", urb, -err); kfree(urb->setup_packet); usb_unanchor_urb(urb); } else { usb_mark_last_busy(drv_data->udev); } done: usb_free_urb(urb); return err; } static const struct nfcmrvl_if_ops usb_ops = { .nci_open = nfcmrvl_usb_nci_open, .nci_close = nfcmrvl_usb_nci_close, .nci_send = nfcmrvl_usb_nci_send, }; static void nfcmrvl_waker(struct work_struct *work) { struct nfcmrvl_usb_drv_data *drv_data = container_of(work, struct nfcmrvl_usb_drv_data, waker); int err; err = usb_autopm_get_interface(drv_data->intf); if (err) return; usb_autopm_put_interface(drv_data->intf); } static int nfcmrvl_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct nfcmrvl_usb_drv_data *drv_data; struct nfcmrvl_private *priv; int i; struct usb_device *udev = interface_to_usbdev(intf); struct nfcmrvl_platform_data config; /* No configuration for USB */ memset(&config, 0, sizeof(config)); config.reset_n_io = -EINVAL; nfc_info(&udev->dev, "intf %p id %p\n", intf, id); drv_data = devm_kzalloc(&intf->dev, sizeof(*drv_data), GFP_KERNEL); if (!drv_data) return -ENOMEM; for (i = 0; i < intf->cur_altsetting->desc.bNumEndpoints; i++) { struct usb_endpoint_descriptor *ep_desc; ep_desc = &intf->cur_altsetting->endpoint[i].desc; if (!drv_data->bulk_tx_ep && usb_endpoint_is_bulk_out(ep_desc)) { drv_data->bulk_tx_ep = ep_desc; } else if (!drv_data->bulk_rx_ep && usb_endpoint_is_bulk_in(ep_desc)) { drv_data->bulk_rx_ep = ep_desc; } } if (!drv_data->bulk_tx_ep || !drv_data->bulk_rx_ep) return -ENODEV; drv_data->udev = udev; drv_data->intf = intf; INIT_WORK(&drv_data->waker, nfcmrvl_waker); spin_lock_init(&drv_data->txlock); init_usb_anchor(&drv_data->tx_anchor); init_usb_anchor(&drv_data->bulk_anchor); init_usb_anchor(&drv_data->deferred); priv = nfcmrvl_nci_register_dev(NFCMRVL_PHY_USB, drv_data, &usb_ops, &intf->dev, &config); if (IS_ERR(priv)) return PTR_ERR(priv); drv_data->priv = priv; drv_data->priv->support_fw_dnld = false; usb_set_intfdata(intf, drv_data); return 0; } static void nfcmrvl_disconnect(struct usb_interface *intf) { struct nfcmrvl_usb_drv_data *drv_data = usb_get_intfdata(intf); if (!drv_data) return; nfc_info(&drv_data->udev->dev, "intf %p\n", intf); nfcmrvl_nci_unregister_dev(drv_data->priv); usb_set_intfdata(drv_data->intf, NULL); } #ifdef CONFIG_PM static int nfcmrvl_suspend(struct usb_interface *intf, pm_message_t message) { struct nfcmrvl_usb_drv_data *drv_data = usb_get_intfdata(intf); nfc_info(&drv_data->udev->dev, "intf %p\n", intf); if (drv_data->suspend_count++) return 0; spin_lock_irq(&drv_data->txlock); if (!(PMSG_IS_AUTO(message) && drv_data->tx_in_flight)) { set_bit(NFCMRVL_USB_SUSPENDING, &drv_data->flags); spin_unlock_irq(&drv_data->txlock); } else { spin_unlock_irq(&drv_data->txlock); drv_data->suspend_count--; return -EBUSY; } nfcmrvl_usb_stop_traffic(drv_data); usb_kill_anchored_urbs(&drv_data->tx_anchor); return 0; } static void nfcmrvl_play_deferred(struct nfcmrvl_usb_drv_data *drv_data) { struct urb *urb; int err; while ((urb = usb_get_from_anchor(&drv_data->deferred))) { usb_anchor_urb(urb, &drv_data->tx_anchor); err = usb_submit_urb(urb, GFP_ATOMIC); if (err) { kfree(urb->setup_packet); usb_unanchor_urb(urb); usb_free_urb(urb); break; } drv_data->tx_in_flight++; usb_free_urb(urb); } /* Cleanup the rest deferred urbs. */ while ((urb = usb_get_from_anchor(&drv_data->deferred))) { kfree(urb->setup_packet); usb_free_urb(urb); } } static int nfcmrvl_resume(struct usb_interface *intf) { struct nfcmrvl_usb_drv_data *drv_data = usb_get_intfdata(intf); int err = 0; nfc_info(&drv_data->udev->dev, "intf %p\n", intf); if (--drv_data->suspend_count) return 0; if (!test_bit(NFCMRVL_NCI_RUNNING, &drv_data->flags)) goto done; if (test_bit(NFCMRVL_USB_BULK_RUNNING, &drv_data->flags)) { err = nfcmrvl_submit_bulk_urb(drv_data, GFP_NOIO); if (err) { clear_bit(NFCMRVL_USB_BULK_RUNNING, &drv_data->flags); goto failed; } nfcmrvl_submit_bulk_urb(drv_data, GFP_NOIO); } spin_lock_irq(&drv_data->txlock); nfcmrvl_play_deferred(drv_data); clear_bit(NFCMRVL_USB_SUSPENDING, &drv_data->flags); spin_unlock_irq(&drv_data->txlock); return 0; failed: usb_scuttle_anchored_urbs(&drv_data->deferred); done: spin_lock_irq(&drv_data->txlock); clear_bit(NFCMRVL_USB_SUSPENDING, &drv_data->flags); spin_unlock_irq(&drv_data->txlock); return err; } #endif static struct usb_driver nfcmrvl_usb_driver = { .name = "nfcmrvl", .probe = nfcmrvl_probe, .disconnect = nfcmrvl_disconnect, #ifdef CONFIG_PM .suspend = nfcmrvl_suspend, .resume = nfcmrvl_resume, .reset_resume = nfcmrvl_resume, #endif .id_table = nfcmrvl_table, .supports_autosuspend = 1, .disable_hub_initiated_lpm = 1, .soft_unbind = 1, }; module_usb_driver(nfcmrvl_usb_driver); MODULE_AUTHOR("Marvell International Ltd."); MODULE_DESCRIPTION("Marvell NFC-over-USB driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/nfc/nfcmrvl/usb.c
// SPDX-License-Identifier: GPL-2.0-only /* * Marvell NFC-over-SPI driver: SPI interface related functions * * Copyright (C) 2015, Marvell International Ltd. */ #include <linux/module.h> #include <linux/interrupt.h> #include <linux/nfc.h> #include <linux/of_irq.h> #include <net/nfc/nci.h> #include <net/nfc/nci_core.h> #include <linux/spi/spi.h> #include "nfcmrvl.h" #define SPI_WAIT_HANDSHAKE 1 struct nfcmrvl_spi_drv_data { unsigned long flags; struct spi_device *spi; struct nci_spi *nci_spi; struct completion handshake_completion; struct nfcmrvl_private *priv; }; static irqreturn_t nfcmrvl_spi_int_irq_thread_fn(int irq, void *drv_data_ptr) { struct nfcmrvl_spi_drv_data *drv_data = drv_data_ptr; struct sk_buff *skb; /* * Special case where we are waiting for SPI_INT deassertion to start a * transfer. */ if (test_and_clear_bit(SPI_WAIT_HANDSHAKE, &drv_data->flags)) { complete(&drv_data->handshake_completion); return IRQ_HANDLED; } /* Normal case, SPI_INT deasserted by slave to trigger a master read */ skb = nci_spi_read(drv_data->nci_spi); if (!skb) { nfc_err(&drv_data->spi->dev, "failed to read spi packet"); return IRQ_HANDLED; } if (nfcmrvl_nci_recv_frame(drv_data->priv, skb) < 0) nfc_err(&drv_data->spi->dev, "corrupted RX packet"); return IRQ_HANDLED; } static int nfcmrvl_spi_nci_open(struct nfcmrvl_private *priv) { return 0; } static int nfcmrvl_spi_nci_close(struct nfcmrvl_private *priv) { return 0; } static int nfcmrvl_spi_nci_send(struct nfcmrvl_private *priv, struct sk_buff *skb) { struct nfcmrvl_spi_drv_data *drv_data = priv->drv_data; int err; /* Reinit completion for slave handshake */ reinit_completion(&drv_data->handshake_completion); set_bit(SPI_WAIT_HANDSHAKE, &drv_data->flags); /* * Append a dummy byte at the end of SPI frame. This is due to a * specific DMA implementation in the controller */ skb_put(skb, 1); /* Send the SPI packet */ err = nci_spi_send(drv_data->nci_spi, &drv_data->handshake_completion, skb); if (err) nfc_err(priv->dev, "spi_send failed %d", err); return err; } static void nfcmrvl_spi_nci_update_config(struct nfcmrvl_private *priv, const void *param) { struct nfcmrvl_spi_drv_data *drv_data = priv->drv_data; const struct nfcmrvl_fw_spi_config *config = param; drv_data->nci_spi->xfer_speed_hz = config->clk; } static const struct nfcmrvl_if_ops spi_ops = { .nci_open = nfcmrvl_spi_nci_open, .nci_close = nfcmrvl_spi_nci_close, .nci_send = nfcmrvl_spi_nci_send, .nci_update_config = nfcmrvl_spi_nci_update_config, }; static int nfcmrvl_spi_parse_dt(struct device_node *node, struct nfcmrvl_platform_data *pdata) { int ret; ret = nfcmrvl_parse_dt(node, pdata); if (ret < 0) { pr_err("Failed to get generic entries\n"); return ret; } ret = irq_of_parse_and_map(node, 0); if (!ret) { pr_err("Unable to get irq\n"); return -EINVAL; } pdata->irq = ret; return 0; } static int nfcmrvl_spi_probe(struct spi_device *spi) { const struct nfcmrvl_platform_data *pdata; struct nfcmrvl_platform_data config; struct nfcmrvl_spi_drv_data *drv_data; int ret = 0; drv_data = devm_kzalloc(&spi->dev, sizeof(*drv_data), GFP_KERNEL); if (!drv_data) return -ENOMEM; drv_data->spi = spi; drv_data->priv = NULL; spi_set_drvdata(spi, drv_data); pdata = spi->dev.platform_data; if (!pdata && spi->dev.of_node) if (nfcmrvl_spi_parse_dt(spi->dev.of_node, &config) == 0) pdata = &config; if (!pdata) return -EINVAL; ret = devm_request_threaded_irq(&drv_data->spi->dev, pdata->irq, NULL, nfcmrvl_spi_int_irq_thread_fn, IRQF_TRIGGER_FALLING | IRQF_ONESHOT, "nfcmrvl_spi_int", drv_data); if (ret < 0) { nfc_err(&drv_data->spi->dev, "Unable to register IRQ handler"); return -ENODEV; } drv_data->priv = nfcmrvl_nci_register_dev(NFCMRVL_PHY_SPI, drv_data, &spi_ops, &drv_data->spi->dev, pdata); if (IS_ERR(drv_data->priv)) return PTR_ERR(drv_data->priv); drv_data->priv->support_fw_dnld = true; drv_data->nci_spi = nci_spi_allocate_spi(drv_data->spi, 0, 10, drv_data->priv->ndev); /* Init completion for slave handshake */ init_completion(&drv_data->handshake_completion); return 0; } static void nfcmrvl_spi_remove(struct spi_device *spi) { struct nfcmrvl_spi_drv_data *drv_data = spi_get_drvdata(spi); nfcmrvl_nci_unregister_dev(drv_data->priv); } static const struct of_device_id of_nfcmrvl_spi_match[] __maybe_unused = { { .compatible = "marvell,nfc-spi", }, {}, }; MODULE_DEVICE_TABLE(of, of_nfcmrvl_spi_match); static const struct spi_device_id nfcmrvl_spi_id_table[] = { { "nfcmrvl_spi", 0 }, { } }; MODULE_DEVICE_TABLE(spi, nfcmrvl_spi_id_table); static struct spi_driver nfcmrvl_spi_driver = { .probe = nfcmrvl_spi_probe, .remove = nfcmrvl_spi_remove, .id_table = nfcmrvl_spi_id_table, .driver = { .name = "nfcmrvl_spi", .owner = THIS_MODULE, .of_match_table = of_match_ptr(of_nfcmrvl_spi_match), }, }; module_spi_driver(nfcmrvl_spi_driver); MODULE_AUTHOR("Marvell International Ltd."); MODULE_DESCRIPTION("Marvell NFC-over-SPI driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/nfc/nfcmrvl/spi.c
// SPDX-License-Identifier: GPL-2.0-only /* * Marvell NFC-over-I2C driver: I2C interface related functions * * Copyright (C) 2015, Marvell International Ltd. */ #include <linux/module.h> #include <linux/interrupt.h> #include <linux/i2c.h> #include <linux/nfc.h> #include <linux/delay.h> #include <linux/of_irq.h> #include <net/nfc/nci.h> #include <net/nfc/nci_core.h> #include "nfcmrvl.h" struct nfcmrvl_i2c_drv_data { unsigned long flags; struct device *dev; struct i2c_client *i2c; struct nfcmrvl_private *priv; }; static int nfcmrvl_i2c_read(struct nfcmrvl_i2c_drv_data *drv_data, struct sk_buff **skb) { int ret; struct nci_ctrl_hdr nci_hdr; /* Read NCI header to know the payload size */ ret = i2c_master_recv(drv_data->i2c, (u8 *)&nci_hdr, NCI_CTRL_HDR_SIZE); if (ret != NCI_CTRL_HDR_SIZE) { nfc_err(&drv_data->i2c->dev, "cannot read NCI header\n"); return -EBADMSG; } *skb = nci_skb_alloc(drv_data->priv->ndev, nci_hdr.plen + NCI_CTRL_HDR_SIZE, GFP_KERNEL); if (!*skb) return -ENOMEM; /* Copy NCI header into the SKB */ skb_put_data(*skb, &nci_hdr, NCI_CTRL_HDR_SIZE); if (nci_hdr.plen) { /* Read the NCI payload */ ret = i2c_master_recv(drv_data->i2c, skb_put(*skb, nci_hdr.plen), nci_hdr.plen); if (ret != nci_hdr.plen) { nfc_err(&drv_data->i2c->dev, "Invalid frame payload length: %u (expected %u)\n", ret, nci_hdr.plen); kfree_skb(*skb); return -EBADMSG; } } return 0; } static irqreturn_t nfcmrvl_i2c_int_irq_thread_fn(int irq, void *drv_data_ptr) { struct nfcmrvl_i2c_drv_data *drv_data = drv_data_ptr; struct sk_buff *skb = NULL; int ret; if (!drv_data->priv) return IRQ_HANDLED; if (test_bit(NFCMRVL_PHY_ERROR, &drv_data->priv->flags)) return IRQ_HANDLED; ret = nfcmrvl_i2c_read(drv_data, &skb); switch (ret) { case -EREMOTEIO: set_bit(NFCMRVL_PHY_ERROR, &drv_data->priv->flags); break; case -ENOMEM: case -EBADMSG: nfc_err(&drv_data->i2c->dev, "read failed %d\n", ret); break; default: if (nfcmrvl_nci_recv_frame(drv_data->priv, skb) < 0) nfc_err(&drv_data->i2c->dev, "corrupted RX packet\n"); break; } return IRQ_HANDLED; } static int nfcmrvl_i2c_nci_open(struct nfcmrvl_private *priv) { struct nfcmrvl_i2c_drv_data *drv_data = priv->drv_data; if (!drv_data) return -ENODEV; return 0; } static int nfcmrvl_i2c_nci_close(struct nfcmrvl_private *priv) { return 0; } static int nfcmrvl_i2c_nci_send(struct nfcmrvl_private *priv, struct sk_buff *skb) { struct nfcmrvl_i2c_drv_data *drv_data = priv->drv_data; int ret; if (test_bit(NFCMRVL_PHY_ERROR, &priv->flags)) { kfree_skb(skb); return -EREMOTEIO; } ret = i2c_master_send(drv_data->i2c, skb->data, skb->len); /* Retry if chip was in standby */ if (ret == -EREMOTEIO) { nfc_info(drv_data->dev, "chip may sleep, retry\n"); usleep_range(6000, 10000); ret = i2c_master_send(drv_data->i2c, skb->data, skb->len); } if (ret >= 0) { if (ret != skb->len) { nfc_err(drv_data->dev, "Invalid length sent: %u (expected %u)\n", ret, skb->len); ret = -EREMOTEIO; } else ret = 0; } if (ret) { kfree_skb(skb); return ret; } consume_skb(skb); return 0; } static void nfcmrvl_i2c_nci_update_config(struct nfcmrvl_private *priv, const void *param) { } static const struct nfcmrvl_if_ops i2c_ops = { .nci_open = nfcmrvl_i2c_nci_open, .nci_close = nfcmrvl_i2c_nci_close, .nci_send = nfcmrvl_i2c_nci_send, .nci_update_config = nfcmrvl_i2c_nci_update_config, }; static int nfcmrvl_i2c_parse_dt(struct device_node *node, struct nfcmrvl_platform_data *pdata) { int ret; ret = nfcmrvl_parse_dt(node, pdata); if (ret < 0) { pr_err("Failed to get generic entries\n"); return ret; } if (of_property_read_bool(node, "i2c-int-falling")) pdata->irq_polarity = IRQF_TRIGGER_FALLING; else pdata->irq_polarity = IRQF_TRIGGER_RISING; ret = irq_of_parse_and_map(node, 0); if (!ret) { pr_err("Unable to get irq\n"); return -EINVAL; } pdata->irq = ret; return 0; } static int nfcmrvl_i2c_probe(struct i2c_client *client) { const struct nfcmrvl_platform_data *pdata; struct nfcmrvl_i2c_drv_data *drv_data; struct nfcmrvl_platform_data config; int ret; if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { nfc_err(&client->dev, "Need I2C_FUNC_I2C\n"); return -ENODEV; } drv_data = devm_kzalloc(&client->dev, sizeof(*drv_data), GFP_KERNEL); if (!drv_data) return -ENOMEM; drv_data->i2c = client; drv_data->dev = &client->dev; drv_data->priv = NULL; i2c_set_clientdata(client, drv_data); pdata = client->dev.platform_data; if (!pdata && client->dev.of_node) if (nfcmrvl_i2c_parse_dt(client->dev.of_node, &config) == 0) pdata = &config; if (!pdata) return -EINVAL; /* Request the read IRQ */ ret = devm_request_threaded_irq(&drv_data->i2c->dev, pdata->irq, NULL, nfcmrvl_i2c_int_irq_thread_fn, pdata->irq_polarity | IRQF_ONESHOT, "nfcmrvl_i2c_int", drv_data); if (ret < 0) { nfc_err(&drv_data->i2c->dev, "Unable to register IRQ handler\n"); return ret; } drv_data->priv = nfcmrvl_nci_register_dev(NFCMRVL_PHY_I2C, drv_data, &i2c_ops, &drv_data->i2c->dev, pdata); if (IS_ERR(drv_data->priv)) return PTR_ERR(drv_data->priv); drv_data->priv->support_fw_dnld = true; return 0; } static void nfcmrvl_i2c_remove(struct i2c_client *client) { struct nfcmrvl_i2c_drv_data *drv_data = i2c_get_clientdata(client); nfcmrvl_nci_unregister_dev(drv_data->priv); } static const struct of_device_id of_nfcmrvl_i2c_match[] __maybe_unused = { { .compatible = "marvell,nfc-i2c", }, {}, }; MODULE_DEVICE_TABLE(of, of_nfcmrvl_i2c_match); static const struct i2c_device_id nfcmrvl_i2c_id_table[] = { { "nfcmrvl_i2c", 0 }, {} }; MODULE_DEVICE_TABLE(i2c, nfcmrvl_i2c_id_table); static struct i2c_driver nfcmrvl_i2c_driver = { .probe = nfcmrvl_i2c_probe, .id_table = nfcmrvl_i2c_id_table, .remove = nfcmrvl_i2c_remove, .driver = { .name = "nfcmrvl_i2c", .of_match_table = of_match_ptr(of_nfcmrvl_i2c_match), }, }; module_i2c_driver(nfcmrvl_i2c_driver); MODULE_AUTHOR("Marvell International Ltd."); MODULE_DESCRIPTION("Marvell NFC-over-I2C driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/nfc/nfcmrvl/i2c.c
// SPDX-License-Identifier: GPL-2.0-only /* * Marvell NFC driver: major functions * * Copyright (C) 2014-2015 Marvell International Ltd. */ #include <linux/module.h> #include <linux/gpio.h> #include <linux/delay.h> #include <linux/of_gpio.h> #include <linux/nfc.h> #include <net/nfc/nci.h> #include <net/nfc/nci_core.h> #include "nfcmrvl.h" static int nfcmrvl_nci_open(struct nci_dev *ndev) { struct nfcmrvl_private *priv = nci_get_drvdata(ndev); int err; if (test_and_set_bit(NFCMRVL_NCI_RUNNING, &priv->flags)) return 0; /* Reset possible fault of previous session */ clear_bit(NFCMRVL_PHY_ERROR, &priv->flags); err = priv->if_ops->nci_open(priv); if (err) clear_bit(NFCMRVL_NCI_RUNNING, &priv->flags); return err; } static int nfcmrvl_nci_close(struct nci_dev *ndev) { struct nfcmrvl_private *priv = nci_get_drvdata(ndev); if (!test_and_clear_bit(NFCMRVL_NCI_RUNNING, &priv->flags)) return 0; priv->if_ops->nci_close(priv); return 0; } static int nfcmrvl_nci_send(struct nci_dev *ndev, struct sk_buff *skb) { struct nfcmrvl_private *priv = nci_get_drvdata(ndev); nfc_info(priv->dev, "send entry, len %d\n", skb->len); skb->dev = (void *)ndev; if (priv->config.hci_muxed) { unsigned char *hdr; unsigned char len = skb->len; hdr = skb_push(skb, NFCMRVL_HCI_EVENT_HEADER_SIZE); hdr[0] = NFCMRVL_HCI_COMMAND_CODE; hdr[1] = NFCMRVL_HCI_OGF; hdr[2] = NFCMRVL_HCI_OCF; hdr[3] = len; } return priv->if_ops->nci_send(priv, skb); } static int nfcmrvl_nci_setup(struct nci_dev *ndev) { __u8 val = 1; nci_set_config(ndev, NFCMRVL_PB_BAIL_OUT, 1, &val); return 0; } static int nfcmrvl_nci_fw_download(struct nci_dev *ndev, const char *firmware_name) { return nfcmrvl_fw_dnld_start(ndev, firmware_name); } static const struct nci_ops nfcmrvl_nci_ops = { .open = nfcmrvl_nci_open, .close = nfcmrvl_nci_close, .send = nfcmrvl_nci_send, .setup = nfcmrvl_nci_setup, .fw_download = nfcmrvl_nci_fw_download, }; struct nfcmrvl_private *nfcmrvl_nci_register_dev(enum nfcmrvl_phy phy, void *drv_data, const struct nfcmrvl_if_ops *ops, struct device *dev, const struct nfcmrvl_platform_data *pdata) { struct nfcmrvl_private *priv; int rc; int headroom; int tailroom; u32 protocols; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) return ERR_PTR(-ENOMEM); priv->drv_data = drv_data; priv->if_ops = ops; priv->dev = dev; priv->phy = phy; memcpy(&priv->config, pdata, sizeof(*pdata)); if (gpio_is_valid(priv->config.reset_n_io)) { rc = gpio_request_one(priv->config.reset_n_io, GPIOF_OUT_INIT_LOW, "nfcmrvl_reset_n"); if (rc < 0) { priv->config.reset_n_io = -EINVAL; nfc_err(dev, "failed to request reset_n io\n"); } } if (phy == NFCMRVL_PHY_SPI) { headroom = NCI_SPI_HDR_LEN; tailroom = 1; } else headroom = tailroom = 0; if (priv->config.hci_muxed) headroom += NFCMRVL_HCI_EVENT_HEADER_SIZE; protocols = NFC_PROTO_JEWEL_MASK | NFC_PROTO_MIFARE_MASK | NFC_PROTO_FELICA_MASK | NFC_PROTO_ISO14443_MASK | NFC_PROTO_ISO14443_B_MASK | NFC_PROTO_ISO15693_MASK | NFC_PROTO_NFC_DEP_MASK; priv->ndev = nci_allocate_device(&nfcmrvl_nci_ops, protocols, headroom, tailroom); if (!priv->ndev) { nfc_err(dev, "nci_allocate_device failed\n"); rc = -ENOMEM; goto error_free_gpio; } rc = nfcmrvl_fw_dnld_init(priv); if (rc) { nfc_err(dev, "failed to initialize FW download %d\n", rc); goto error_free_dev; } nci_set_drvdata(priv->ndev, priv); rc = nci_register_device(priv->ndev); if (rc) { nfc_err(dev, "nci_register_device failed %d\n", rc); goto error_fw_dnld_deinit; } /* Ensure that controller is powered off */ nfcmrvl_chip_halt(priv); nfc_info(dev, "registered with nci successfully\n"); return priv; error_fw_dnld_deinit: nfcmrvl_fw_dnld_deinit(priv); error_free_dev: nci_free_device(priv->ndev); error_free_gpio: if (gpio_is_valid(priv->config.reset_n_io)) gpio_free(priv->config.reset_n_io); kfree(priv); return ERR_PTR(rc); } EXPORT_SYMBOL_GPL(nfcmrvl_nci_register_dev); void nfcmrvl_nci_unregister_dev(struct nfcmrvl_private *priv) { struct nci_dev *ndev = priv->ndev; nci_unregister_device(ndev); if (priv->ndev->nfc_dev->fw_download_in_progress) nfcmrvl_fw_dnld_abort(priv); nfcmrvl_fw_dnld_deinit(priv); if (gpio_is_valid(priv->config.reset_n_io)) gpio_free(priv->config.reset_n_io); nci_free_device(ndev); kfree(priv); } EXPORT_SYMBOL_GPL(nfcmrvl_nci_unregister_dev); int nfcmrvl_nci_recv_frame(struct nfcmrvl_private *priv, struct sk_buff *skb) { if (priv->config.hci_muxed) { if (skb->data[0] == NFCMRVL_HCI_EVENT_CODE && skb->data[1] == NFCMRVL_HCI_NFC_EVENT_CODE) { /* Data packet, let's extract NCI payload */ skb_pull(skb, NFCMRVL_HCI_EVENT_HEADER_SIZE); } else { /* Skip this packet */ kfree_skb(skb); return 0; } } if (priv->ndev->nfc_dev->fw_download_in_progress) { nfcmrvl_fw_dnld_recv_frame(priv, skb); return 0; } if (test_bit(NFCMRVL_NCI_RUNNING, &priv->flags)) nci_recv_frame(priv->ndev, skb); else { /* Drop this packet since nobody wants it */ kfree_skb(skb); return 0; } return 0; } EXPORT_SYMBOL_GPL(nfcmrvl_nci_recv_frame); void nfcmrvl_chip_reset(struct nfcmrvl_private *priv) { /* Reset possible fault of previous session */ clear_bit(NFCMRVL_PHY_ERROR, &priv->flags); if (gpio_is_valid(priv->config.reset_n_io)) { nfc_info(priv->dev, "reset the chip\n"); gpio_set_value(priv->config.reset_n_io, 0); usleep_range(5000, 10000); gpio_set_value(priv->config.reset_n_io, 1); } else nfc_info(priv->dev, "no reset available on this interface\n"); } void nfcmrvl_chip_halt(struct nfcmrvl_private *priv) { if (gpio_is_valid(priv->config.reset_n_io)) gpio_set_value(priv->config.reset_n_io, 0); } int nfcmrvl_parse_dt(struct device_node *node, struct nfcmrvl_platform_data *pdata) { int reset_n_io; reset_n_io = of_get_named_gpio(node, "reset-n-io", 0); if (reset_n_io < 0) { pr_info("no reset-n-io config\n"); } else if (!gpio_is_valid(reset_n_io)) { pr_err("invalid reset-n-io GPIO\n"); return reset_n_io; } pdata->reset_n_io = reset_n_io; pdata->hci_muxed = of_property_read_bool(node, "hci-muxed"); return 0; } EXPORT_SYMBOL_GPL(nfcmrvl_parse_dt); MODULE_AUTHOR("Marvell International Ltd."); MODULE_DESCRIPTION("Marvell NFC driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/nfc/nfcmrvl/main.c
// SPDX-License-Identifier: GPL-2.0-only /* * Marvell NFC driver: Firmware downloader * * Copyright (C) 2015, Marvell International Ltd. */ #include <linux/module.h> #include <asm/unaligned.h> #include <linux/firmware.h> #include <linux/nfc.h> #include <net/nfc/nci.h> #include <net/nfc/nci_core.h> #include "nfcmrvl.h" #define FW_DNLD_TIMEOUT 15000 #define NCI_OP_PROPRIETARY_BOOT_CMD nci_opcode_pack(NCI_GID_PROPRIETARY, \ NCI_OP_PROP_BOOT_CMD) /* FW download states */ enum { STATE_RESET = 0, STATE_INIT, STATE_SET_REF_CLOCK, STATE_SET_HI_CONFIG, STATE_OPEN_LC, STATE_FW_DNLD, STATE_CLOSE_LC, STATE_BOOT }; enum { SUBSTATE_WAIT_COMMAND = 0, SUBSTATE_WAIT_ACK_CREDIT, SUBSTATE_WAIT_NACK_CREDIT, SUBSTATE_WAIT_DATA_CREDIT, }; /* * Patterns for responses */ static const uint8_t nci_pattern_core_reset_ntf[] = { 0x60, 0x00, 0x02, 0xA0, 0x01 }; static const uint8_t nci_pattern_core_init_rsp[] = { 0x40, 0x01, 0x11 }; static const uint8_t nci_pattern_core_set_config_rsp[] = { 0x40, 0x02, 0x02, 0x00, 0x00 }; static const uint8_t nci_pattern_core_conn_create_rsp[] = { 0x40, 0x04, 0x04, 0x00 }; static const uint8_t nci_pattern_core_conn_close_rsp[] = { 0x40, 0x05, 0x01, 0x00 }; static const uint8_t nci_pattern_core_conn_credits_ntf[] = { 0x60, 0x06, 0x03, 0x01, NCI_CORE_LC_CONNID_PROP_FW_DL, 0x01 }; static const uint8_t nci_pattern_proprietary_boot_rsp[] = { 0x4F, 0x3A, 0x01, 0x00 }; static struct sk_buff *alloc_lc_skb(struct nfcmrvl_private *priv, uint8_t plen) { struct sk_buff *skb; struct nci_data_hdr *hdr; skb = nci_skb_alloc(priv->ndev, (NCI_DATA_HDR_SIZE + plen), GFP_KERNEL); if (!skb) return NULL; hdr = skb_put(skb, NCI_DATA_HDR_SIZE); hdr->conn_id = NCI_CORE_LC_CONNID_PROP_FW_DL; hdr->rfu = 0; hdr->plen = plen; nci_mt_set((__u8 *)hdr, NCI_MT_DATA_PKT); nci_pbf_set((__u8 *)hdr, NCI_PBF_LAST); return skb; } static void fw_dnld_over(struct nfcmrvl_private *priv, u32 error) { if (priv->fw_dnld.fw) { release_firmware(priv->fw_dnld.fw); priv->fw_dnld.fw = NULL; priv->fw_dnld.header = NULL; priv->fw_dnld.binary_config = NULL; } atomic_set(&priv->ndev->cmd_cnt, 0); if (timer_pending(&priv->ndev->cmd_timer)) del_timer_sync(&priv->ndev->cmd_timer); if (timer_pending(&priv->fw_dnld.timer)) del_timer_sync(&priv->fw_dnld.timer); nfc_info(priv->dev, "FW loading over (%d)]\n", error); if (error != 0) { /* failed, halt the chip to avoid power consumption */ nfcmrvl_chip_halt(priv); } nfc_fw_download_done(priv->ndev->nfc_dev, priv->fw_dnld.name, error); } static void fw_dnld_timeout(struct timer_list *t) { struct nfcmrvl_private *priv = from_timer(priv, t, fw_dnld.timer); nfc_err(priv->dev, "FW loading timeout"); priv->fw_dnld.state = STATE_RESET; fw_dnld_over(priv, -ETIMEDOUT); } static int process_state_reset(struct nfcmrvl_private *priv, const struct sk_buff *skb) { if (sizeof(nci_pattern_core_reset_ntf) != skb->len || memcmp(skb->data, nci_pattern_core_reset_ntf, sizeof(nci_pattern_core_reset_ntf))) return -EINVAL; nfc_info(priv->dev, "BootROM reset, start fw download\n"); /* Start FW download state machine */ priv->fw_dnld.state = STATE_INIT; nci_send_cmd(priv->ndev, NCI_OP_CORE_INIT_CMD, 0, NULL); return 0; } static int process_state_init(struct nfcmrvl_private *priv, const struct sk_buff *skb) { struct nci_core_set_config_cmd cmd; if (sizeof(nci_pattern_core_init_rsp) >= skb->len || memcmp(skb->data, nci_pattern_core_init_rsp, sizeof(nci_pattern_core_init_rsp))) return -EINVAL; cmd.num_params = 1; cmd.param.id = NFCMRVL_PROP_REF_CLOCK; cmd.param.len = 4; memcpy(cmd.param.val, &priv->fw_dnld.header->ref_clock, 4); nci_send_cmd(priv->ndev, NCI_OP_CORE_SET_CONFIG_CMD, 3 + cmd.param.len, &cmd); priv->fw_dnld.state = STATE_SET_REF_CLOCK; return 0; } static void create_lc(struct nfcmrvl_private *priv) { uint8_t param[2] = { NCI_CORE_LC_PROP_FW_DL, 0x0 }; priv->fw_dnld.state = STATE_OPEN_LC; nci_send_cmd(priv->ndev, NCI_OP_CORE_CONN_CREATE_CMD, 2, param); } static int process_state_set_ref_clock(struct nfcmrvl_private *priv, const struct sk_buff *skb) { struct nci_core_set_config_cmd cmd; if (sizeof(nci_pattern_core_set_config_rsp) != skb->len || memcmp(skb->data, nci_pattern_core_set_config_rsp, skb->len)) return -EINVAL; cmd.num_params = 1; cmd.param.id = NFCMRVL_PROP_SET_HI_CONFIG; switch (priv->phy) { case NFCMRVL_PHY_UART: cmd.param.len = 5; memcpy(cmd.param.val, &priv->fw_dnld.binary_config->uart.baudrate, 4); cmd.param.val[4] = priv->fw_dnld.binary_config->uart.flow_control; break; case NFCMRVL_PHY_I2C: cmd.param.len = 5; memcpy(cmd.param.val, &priv->fw_dnld.binary_config->i2c.clk, 4); cmd.param.val[4] = 0; break; case NFCMRVL_PHY_SPI: cmd.param.len = 5; memcpy(cmd.param.val, &priv->fw_dnld.binary_config->spi.clk, 4); cmd.param.val[4] = 0; break; default: create_lc(priv); return 0; } priv->fw_dnld.state = STATE_SET_HI_CONFIG; nci_send_cmd(priv->ndev, NCI_OP_CORE_SET_CONFIG_CMD, 3 + cmd.param.len, &cmd); return 0; } static int process_state_set_hi_config(struct nfcmrvl_private *priv, const struct sk_buff *skb) { if (sizeof(nci_pattern_core_set_config_rsp) != skb->len || memcmp(skb->data, nci_pattern_core_set_config_rsp, skb->len)) return -EINVAL; create_lc(priv); return 0; } static int process_state_open_lc(struct nfcmrvl_private *priv, const struct sk_buff *skb) { if (sizeof(nci_pattern_core_conn_create_rsp) >= skb->len || memcmp(skb->data, nci_pattern_core_conn_create_rsp, sizeof(nci_pattern_core_conn_create_rsp))) return -EINVAL; priv->fw_dnld.state = STATE_FW_DNLD; priv->fw_dnld.substate = SUBSTATE_WAIT_COMMAND; priv->fw_dnld.offset = priv->fw_dnld.binary_config->offset; return 0; } static int process_state_fw_dnld(struct nfcmrvl_private *priv, struct sk_buff *skb) { uint16_t len; uint16_t comp_len; struct sk_buff *out_skb; switch (priv->fw_dnld.substate) { case SUBSTATE_WAIT_COMMAND: /* * Command format: * B0..2: NCI header * B3 : Helper command (0xA5) * B4..5: le16 data size * B6..7: le16 data size complement (~) * B8..N: payload */ /* Remove NCI HDR */ skb_pull(skb, 3); if (skb->data[0] != HELPER_CMD_PACKET_FORMAT || skb->len != 5) { nfc_err(priv->dev, "bad command"); return -EINVAL; } skb_pull(skb, 1); len = get_unaligned_le16(skb->data); skb_pull(skb, 2); comp_len = get_unaligned_le16(skb->data); memcpy(&comp_len, skb->data, 2); skb_pull(skb, 2); if (((~len) & 0xFFFF) != comp_len) { nfc_err(priv->dev, "bad len complement: %x %x %x", len, comp_len, (~len & 0xFFFF)); out_skb = alloc_lc_skb(priv, 1); if (!out_skb) return -ENOMEM; skb_put_u8(out_skb, 0xBF); nci_send_frame(priv->ndev, out_skb); priv->fw_dnld.substate = SUBSTATE_WAIT_NACK_CREDIT; return 0; } priv->fw_dnld.chunk_len = len; out_skb = alloc_lc_skb(priv, 1); if (!out_skb) return -ENOMEM; skb_put_u8(out_skb, HELPER_ACK_PACKET_FORMAT); nci_send_frame(priv->ndev, out_skb); priv->fw_dnld.substate = SUBSTATE_WAIT_ACK_CREDIT; break; case SUBSTATE_WAIT_ACK_CREDIT: if (sizeof(nci_pattern_core_conn_credits_ntf) != skb->len || memcmp(nci_pattern_core_conn_credits_ntf, skb->data, skb->len)) { nfc_err(priv->dev, "bad packet: waiting for credit"); return -EINVAL; } if (priv->fw_dnld.chunk_len == 0) { /* FW Loading is done */ uint8_t conn_id = NCI_CORE_LC_CONNID_PROP_FW_DL; priv->fw_dnld.state = STATE_CLOSE_LC; nci_send_cmd(priv->ndev, NCI_OP_CORE_CONN_CLOSE_CMD, 1, &conn_id); } else { out_skb = alloc_lc_skb(priv, priv->fw_dnld.chunk_len); if (!out_skb) return -ENOMEM; skb_put_data(out_skb, ((uint8_t *)priv->fw_dnld.fw->data) + priv->fw_dnld.offset, priv->fw_dnld.chunk_len); nci_send_frame(priv->ndev, out_skb); priv->fw_dnld.substate = SUBSTATE_WAIT_DATA_CREDIT; } break; case SUBSTATE_WAIT_DATA_CREDIT: if (sizeof(nci_pattern_core_conn_credits_ntf) != skb->len || memcmp(nci_pattern_core_conn_credits_ntf, skb->data, skb->len)) { nfc_err(priv->dev, "bad packet: waiting for credit"); return -EINVAL; } priv->fw_dnld.offset += priv->fw_dnld.chunk_len; priv->fw_dnld.chunk_len = 0; priv->fw_dnld.substate = SUBSTATE_WAIT_COMMAND; break; case SUBSTATE_WAIT_NACK_CREDIT: if (sizeof(nci_pattern_core_conn_credits_ntf) != skb->len || memcmp(nci_pattern_core_conn_credits_ntf, skb->data, skb->len)) { nfc_err(priv->dev, "bad packet: waiting for credit"); return -EINVAL; } priv->fw_dnld.substate = SUBSTATE_WAIT_COMMAND; break; } return 0; } static int process_state_close_lc(struct nfcmrvl_private *priv, const struct sk_buff *skb) { if (sizeof(nci_pattern_core_conn_close_rsp) != skb->len || memcmp(skb->data, nci_pattern_core_conn_close_rsp, skb->len)) return -EINVAL; priv->fw_dnld.state = STATE_BOOT; nci_send_cmd(priv->ndev, NCI_OP_PROPRIETARY_BOOT_CMD, 0, NULL); return 0; } static int process_state_boot(struct nfcmrvl_private *priv, const struct sk_buff *skb) { if (sizeof(nci_pattern_proprietary_boot_rsp) != skb->len || memcmp(skb->data, nci_pattern_proprietary_boot_rsp, skb->len)) return -EINVAL; /* * Update HI config to use the right configuration for the next * data exchanges. */ priv->if_ops->nci_update_config(priv, &priv->fw_dnld.binary_config->config); if (priv->fw_dnld.binary_config == &priv->fw_dnld.header->helper) { /* * This is the case where an helper was needed and we have * uploaded it. Now we have to wait the next RESET NTF to start * FW download. */ priv->fw_dnld.state = STATE_RESET; priv->fw_dnld.binary_config = &priv->fw_dnld.header->firmware; nfc_info(priv->dev, "FW loading: helper loaded"); } else { nfc_info(priv->dev, "FW loading: firmware loaded"); fw_dnld_over(priv, 0); } return 0; } static void fw_dnld_rx_work(struct work_struct *work) { int ret; struct sk_buff *skb; struct nfcmrvl_fw_dnld *fw_dnld = container_of(work, struct nfcmrvl_fw_dnld, rx_work); struct nfcmrvl_private *priv = container_of(fw_dnld, struct nfcmrvl_private, fw_dnld); while ((skb = skb_dequeue(&fw_dnld->rx_q))) { nfc_send_to_raw_sock(priv->ndev->nfc_dev, skb, RAW_PAYLOAD_NCI, NFC_DIRECTION_RX); switch (fw_dnld->state) { case STATE_RESET: ret = process_state_reset(priv, skb); break; case STATE_INIT: ret = process_state_init(priv, skb); break; case STATE_SET_REF_CLOCK: ret = process_state_set_ref_clock(priv, skb); break; case STATE_SET_HI_CONFIG: ret = process_state_set_hi_config(priv, skb); break; case STATE_OPEN_LC: ret = process_state_open_lc(priv, skb); break; case STATE_FW_DNLD: ret = process_state_fw_dnld(priv, skb); break; case STATE_CLOSE_LC: ret = process_state_close_lc(priv, skb); break; case STATE_BOOT: ret = process_state_boot(priv, skb); break; default: ret = -EFAULT; } kfree_skb(skb); if (ret != 0) { nfc_err(priv->dev, "FW loading error"); fw_dnld_over(priv, ret); break; } } } int nfcmrvl_fw_dnld_init(struct nfcmrvl_private *priv) { char name[32]; INIT_WORK(&priv->fw_dnld.rx_work, fw_dnld_rx_work); snprintf(name, sizeof(name), "%s_nfcmrvl_fw_dnld_rx_wq", dev_name(&priv->ndev->nfc_dev->dev)); priv->fw_dnld.rx_wq = create_singlethread_workqueue(name); if (!priv->fw_dnld.rx_wq) return -ENOMEM; skb_queue_head_init(&priv->fw_dnld.rx_q); return 0; } void nfcmrvl_fw_dnld_deinit(struct nfcmrvl_private *priv) { destroy_workqueue(priv->fw_dnld.rx_wq); } void nfcmrvl_fw_dnld_recv_frame(struct nfcmrvl_private *priv, struct sk_buff *skb) { /* Discard command timer */ if (timer_pending(&priv->ndev->cmd_timer)) del_timer_sync(&priv->ndev->cmd_timer); /* Allow next command */ atomic_set(&priv->ndev->cmd_cnt, 1); /* Queue and trigger rx work */ skb_queue_tail(&priv->fw_dnld.rx_q, skb); queue_work(priv->fw_dnld.rx_wq, &priv->fw_dnld.rx_work); } void nfcmrvl_fw_dnld_abort(struct nfcmrvl_private *priv) { fw_dnld_over(priv, -EHOSTDOWN); } int nfcmrvl_fw_dnld_start(struct nci_dev *ndev, const char *firmware_name) { struct nfcmrvl_private *priv = nci_get_drvdata(ndev); struct nfcmrvl_fw_dnld *fw_dnld = &priv->fw_dnld; int res; if (!priv->support_fw_dnld) return -ENOTSUPP; if (!firmware_name || !firmware_name[0]) return -EINVAL; strcpy(fw_dnld->name, firmware_name); /* * Retrieve FW binary file and parse it to initialize FW download * state machine. */ /* Retrieve FW binary */ res = request_firmware(&fw_dnld->fw, firmware_name, &ndev->nfc_dev->dev); if (res < 0) { nfc_err(priv->dev, "failed to retrieve FW %s", firmware_name); return -ENOENT; } fw_dnld->header = (const struct nfcmrvl_fw *) priv->fw_dnld.fw->data; if (fw_dnld->header->magic != NFCMRVL_FW_MAGIC || fw_dnld->header->phy != priv->phy) { nfc_err(priv->dev, "bad firmware binary %s magic=0x%x phy=%d", firmware_name, fw_dnld->header->magic, fw_dnld->header->phy); release_firmware(fw_dnld->fw); fw_dnld->header = NULL; return -EINVAL; } if (fw_dnld->header->helper.offset != 0) { nfc_info(priv->dev, "loading helper"); fw_dnld->binary_config = &fw_dnld->header->helper; } else { nfc_info(priv->dev, "loading firmware"); fw_dnld->binary_config = &fw_dnld->header->firmware; } /* Configure a timer for timeout */ timer_setup(&priv->fw_dnld.timer, fw_dnld_timeout, 0); mod_timer(&priv->fw_dnld.timer, jiffies + msecs_to_jiffies(FW_DNLD_TIMEOUT)); /* Ronfigure HI to be sure that it is the bootrom values */ priv->if_ops->nci_update_config(priv, &fw_dnld->header->bootrom.config); /* Allow first command */ atomic_set(&priv->ndev->cmd_cnt, 1); /* First, reset the chip */ priv->fw_dnld.state = STATE_RESET; nfcmrvl_chip_reset(priv); /* Now wait for CORE_RESET_NTF or timeout */ return 0; }
linux-master
drivers/nfc/nfcmrvl/fw_dnld.c
// SPDX-License-Identifier: GPL-2.0-only /* * Marvell NFC-over-UART driver * * Copyright (C) 2015, Marvell International Ltd. */ #include <linux/module.h> #include <linux/delay.h> #include <linux/of_gpio.h> #include <net/nfc/nci.h> #include <net/nfc/nci_core.h> #include "nfcmrvl.h" static unsigned int hci_muxed; static unsigned int flow_control; static unsigned int break_control; static int reset_n_io = -EINVAL; /* * NFCMRVL NCI OPS */ static int nfcmrvl_uart_nci_open(struct nfcmrvl_private *priv) { return 0; } static int nfcmrvl_uart_nci_close(struct nfcmrvl_private *priv) { return 0; } static int nfcmrvl_uart_nci_send(struct nfcmrvl_private *priv, struct sk_buff *skb) { struct nci_uart *nu = priv->drv_data; return nu->ops.send(nu, skb); } static void nfcmrvl_uart_nci_update_config(struct nfcmrvl_private *priv, const void *param) { struct nci_uart *nu = priv->drv_data; const struct nfcmrvl_fw_uart_config *config = param; nci_uart_set_config(nu, le32_to_cpu(config->baudrate), config->flow_control); } static const struct nfcmrvl_if_ops uart_ops = { .nci_open = nfcmrvl_uart_nci_open, .nci_close = nfcmrvl_uart_nci_close, .nci_send = nfcmrvl_uart_nci_send, .nci_update_config = nfcmrvl_uart_nci_update_config }; static int nfcmrvl_uart_parse_dt(struct device_node *node, struct nfcmrvl_platform_data *pdata) { struct device_node *matched_node; int ret; matched_node = of_get_compatible_child(node, "marvell,nfc-uart"); if (!matched_node) { matched_node = of_get_compatible_child(node, "mrvl,nfc-uart"); if (!matched_node) return -ENODEV; } ret = nfcmrvl_parse_dt(matched_node, pdata); if (ret < 0) { pr_err("Failed to get generic entries\n"); of_node_put(matched_node); return ret; } pdata->flow_control = of_property_read_bool(matched_node, "flow-control"); pdata->break_control = of_property_read_bool(matched_node, "break-control"); of_node_put(matched_node); return 0; } /* * NCI UART OPS */ static int nfcmrvl_nci_uart_open(struct nci_uart *nu) { struct nfcmrvl_private *priv; struct nfcmrvl_platform_data config; const struct nfcmrvl_platform_data *pdata = NULL; struct device *dev = nu->tty->dev; /* * Platform data cannot be used here since usually it is already used * by low level serial driver. We can try to retrieve serial device * and check if DT entries were added. */ if (dev && dev->parent && dev->parent->of_node) if (nfcmrvl_uart_parse_dt(dev->parent->of_node, &config) == 0) pdata = &config; if (!pdata) { pr_info("No platform data / DT -> fallback to module params\n"); config.hci_muxed = hci_muxed; config.reset_n_io = reset_n_io; config.flow_control = flow_control; config.break_control = break_control; pdata = &config; } priv = nfcmrvl_nci_register_dev(NFCMRVL_PHY_UART, nu, &uart_ops, dev, pdata); if (IS_ERR(priv)) return PTR_ERR(priv); priv->support_fw_dnld = true; nu->drv_data = priv; nu->ndev = priv->ndev; return 0; } static void nfcmrvl_nci_uart_close(struct nci_uart *nu) { nfcmrvl_nci_unregister_dev((struct nfcmrvl_private *)nu->drv_data); } static int nfcmrvl_nci_uart_recv(struct nci_uart *nu, struct sk_buff *skb) { return nfcmrvl_nci_recv_frame((struct nfcmrvl_private *)nu->drv_data, skb); } static void nfcmrvl_nci_uart_tx_start(struct nci_uart *nu) { struct nfcmrvl_private *priv = (struct nfcmrvl_private *)nu->drv_data; if (priv->ndev->nfc_dev->fw_download_in_progress) return; /* Remove BREAK to wake up the NFCC */ if (priv->config.break_control && nu->tty->ops->break_ctl) { nu->tty->ops->break_ctl(nu->tty, 0); usleep_range(3000, 5000); } } static void nfcmrvl_nci_uart_tx_done(struct nci_uart *nu) { struct nfcmrvl_private *priv = (struct nfcmrvl_private *)nu->drv_data; if (priv->ndev->nfc_dev->fw_download_in_progress) return; /* * To ensure that if the NFCC goes in DEEP SLEEP sate we can wake him * up. we set BREAK. Once we will be ready to send again we will remove * it. */ if (priv->config.break_control && nu->tty->ops->break_ctl) { nu->tty->ops->break_ctl(nu->tty, -1); usleep_range(1000, 3000); } } static struct nci_uart nfcmrvl_nci_uart = { .owner = THIS_MODULE, .name = "nfcmrvl_uart", .driver = NCI_UART_DRIVER_MARVELL, .ops = { .open = nfcmrvl_nci_uart_open, .close = nfcmrvl_nci_uart_close, .recv = nfcmrvl_nci_uart_recv, .tx_start = nfcmrvl_nci_uart_tx_start, .tx_done = nfcmrvl_nci_uart_tx_done, } }; module_driver(nfcmrvl_nci_uart, nci_uart_register, nci_uart_unregister); MODULE_AUTHOR("Marvell International Ltd."); MODULE_DESCRIPTION("Marvell NFC-over-UART"); MODULE_LICENSE("GPL v2"); module_param(flow_control, uint, 0); MODULE_PARM_DESC(flow_control, "Tell if UART needs flow control at init."); module_param(break_control, uint, 0); MODULE_PARM_DESC(break_control, "Tell if UART driver must drive break signal."); module_param(hci_muxed, uint, 0); MODULE_PARM_DESC(hci_muxed, "Tell if transport is muxed in HCI one."); module_param(reset_n_io, int, 0); MODULE_PARM_DESC(reset_n_io, "GPIO that is wired to RESET_N signal.");
linux-master
drivers/nfc/nfcmrvl/uart.c
// SPDX-License-Identifier: GPL-2.0-only /* * ---------------------------------------------------------------------------- * drivers/nfc/st95hf/spi.c function definitions for SPI communication * ---------------------------------------------------------------------------- * Copyright (C) 2015 STMicroelectronics Pvt. Ltd. All rights reserved. */ #include "spi.h" /* Function to send user provided buffer to ST95HF through SPI */ int st95hf_spi_send(struct st95hf_spi_context *spicontext, unsigned char *buffertx, int datalen, enum req_type reqtype) { struct spi_message m; int result = 0; struct spi_device *spidev = spicontext->spidev; struct spi_transfer tx_transfer = { .tx_buf = buffertx, .len = datalen, }; mutex_lock(&spicontext->spi_lock); if (reqtype == SYNC) { spicontext->req_issync = true; reinit_completion(&spicontext->done); } else { spicontext->req_issync = false; } spi_message_init(&m); spi_message_add_tail(&tx_transfer, &m); result = spi_sync(spidev, &m); if (result) { dev_err(&spidev->dev, "error: sending cmd to st95hf using SPI = %d\n", result); mutex_unlock(&spicontext->spi_lock); return result; } /* return for asynchronous or no-wait case */ if (reqtype == ASYNC) { mutex_unlock(&spicontext->spi_lock); return 0; } result = wait_for_completion_timeout(&spicontext->done, msecs_to_jiffies(1000)); /* check for timeout or success */ if (!result) { dev_err(&spidev->dev, "error: response not ready timeout\n"); result = -ETIMEDOUT; } else { result = 0; } mutex_unlock(&spicontext->spi_lock); return result; } EXPORT_SYMBOL_GPL(st95hf_spi_send); /* Function to Receive command Response */ int st95hf_spi_recv_response(struct st95hf_spi_context *spicontext, unsigned char *receivebuff) { int len = 0; struct spi_transfer tx_takedata; struct spi_message m; struct spi_device *spidev = spicontext->spidev; unsigned char readdata_cmd = ST95HF_COMMAND_RECEIVE; struct spi_transfer t[2] = { {.tx_buf = &readdata_cmd, .len = 1,}, {.rx_buf = receivebuff, .len = 2, .cs_change = 1,}, }; int ret = 0; memset(&tx_takedata, 0x0, sizeof(struct spi_transfer)); mutex_lock(&spicontext->spi_lock); /* First spi transfer to know the length of valid data */ spi_message_init(&m); spi_message_add_tail(&t[0], &m); spi_message_add_tail(&t[1], &m); ret = spi_sync(spidev, &m); if (ret) { dev_err(&spidev->dev, "spi_recv_resp, data length error = %d\n", ret); mutex_unlock(&spicontext->spi_lock); return ret; } /* As 2 bytes are already read */ len = 2; /* Support of long frame */ if (receivebuff[0] & 0x60) len += (((receivebuff[0] & 0x60) >> 5) << 8) | receivebuff[1]; else len += receivebuff[1]; /* Now make a transfer to read only relevant bytes */ tx_takedata.rx_buf = &receivebuff[2]; tx_takedata.len = len - 2; spi_message_init(&m); spi_message_add_tail(&tx_takedata, &m); ret = spi_sync(spidev, &m); mutex_unlock(&spicontext->spi_lock); if (ret) { dev_err(&spidev->dev, "spi_recv_resp, data read error = %d\n", ret); return ret; } return len; } EXPORT_SYMBOL_GPL(st95hf_spi_recv_response); int st95hf_spi_recv_echo_res(struct st95hf_spi_context *spicontext, unsigned char *receivebuff) { unsigned char readdata_cmd = ST95HF_COMMAND_RECEIVE; struct spi_transfer t[2] = { {.tx_buf = &readdata_cmd, .len = 1,}, {.rx_buf = receivebuff, .len = 1,}, }; struct spi_message m; struct spi_device *spidev = spicontext->spidev; int ret = 0; mutex_lock(&spicontext->spi_lock); spi_message_init(&m); spi_message_add_tail(&t[0], &m); spi_message_add_tail(&t[1], &m); ret = spi_sync(spidev, &m); mutex_unlock(&spicontext->spi_lock); if (ret) dev_err(&spidev->dev, "recv_echo_res, data read error = %d\n", ret); return ret; } EXPORT_SYMBOL_GPL(st95hf_spi_recv_echo_res);
linux-master
drivers/nfc/st95hf/spi.c
// SPDX-License-Identifier: GPL-2.0-only /* * -------------------------------------------------------------------- * Driver for ST NFC Transceiver ST95HF * -------------------------------------------------------------------- * Copyright (C) 2015 STMicroelectronics Pvt. Ltd. All rights reserved. */ #include <linux/err.h> #include <linux/gpio.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/nfc.h> #include <linux/of_gpio.h> #include <linux/of.h> #include <linux/property.h> #include <linux/regulator/consumer.h> #include <linux/wait.h> #include <net/nfc/digital.h> #include <net/nfc/nfc.h> #include "spi.h" /* supported protocols */ #define ST95HF_SUPPORTED_PROT (NFC_PROTO_ISO14443_MASK | \ NFC_PROTO_ISO14443_B_MASK | \ NFC_PROTO_ISO15693_MASK) /* driver capabilities */ #define ST95HF_CAPABILITIES NFC_DIGITAL_DRV_CAPS_IN_CRC /* Command Send Interface */ /* ST95HF_COMMAND_SEND CMD Ids */ #define ECHO_CMD 0x55 #define WRITE_REGISTER_CMD 0x9 #define PROTOCOL_SELECT_CMD 0x2 #define SEND_RECEIVE_CMD 0x4 /* Select protocol codes */ #define ISO15693_PROTOCOL_CODE 0x1 #define ISO14443A_PROTOCOL_CODE 0x2 #define ISO14443B_PROTOCOL_CODE 0x3 /* * head room len is 3 * 1 byte for control byte * 1 byte for cmd * 1 byte for size */ #define ST95HF_HEADROOM_LEN 3 /* * tailroom is 1 for ISO14443A * and 0 for ISO14443B/ISO15693, * hence the max value 1 should be * taken. */ #define ST95HF_TAILROOM_LEN 1 /* Command Response interface */ #define MAX_RESPONSE_BUFFER_SIZE 280 #define ECHORESPONSE 0x55 #define ST95HF_ERR_MASK 0xF #define ST95HF_TIMEOUT_ERROR 0x87 #define ST95HF_NFCA_CRC_ERR_MASK 0x20 #define ST95HF_NFCB_CRC_ERR_MASK 0x01 /* ST95HF transmission flag values */ #define TRFLAG_NFCA_SHORT_FRAME 0x07 #define TRFLAG_NFCA_STD_FRAME 0x08 #define TRFLAG_NFCA_STD_FRAME_CRC 0x28 /* Misc defs */ #define HIGH 1 #define LOW 0 #define ISO14443A_RATS_REQ 0xE0 #define RATS_TB1_PRESENT_MASK 0x20 #define RATS_TA1_PRESENT_MASK 0x10 #define TB1_FWI_MASK 0xF0 #define WTX_REQ_FROM_TAG 0xF2 #define MAX_CMD_LEN 0x7 #define MAX_CMD_PARAMS 4 struct cmd { int cmd_len; unsigned char cmd_id; unsigned char no_cmd_params; unsigned char cmd_params[MAX_CMD_PARAMS]; enum req_type req; }; struct param_list { int param_offset; int new_param_val; }; /* * List of top-level cmds to be used internally by the driver. * All these commands are build on top of ST95HF basic commands * such as SEND_RECEIVE_CMD, PROTOCOL_SELECT_CMD, etc. * These top level cmds are used internally while implementing various ops of * digital layer/driver probe or extending the digital framework layer for * features that are not yet implemented there, for example, WTX cmd handling. */ enum st95hf_cmd_list { CMD_ECHO, CMD_ISO14443A_CONFIG, CMD_ISO14443A_DEMOGAIN, CMD_ISO14443B_DEMOGAIN, CMD_ISO14443A_PROTOCOL_SELECT, CMD_ISO14443B_PROTOCOL_SELECT, CMD_WTX_RESPONSE, CMD_FIELD_OFF, CMD_ISO15693_PROTOCOL_SELECT, }; static const struct cmd cmd_array[] = { [CMD_ECHO] = { .cmd_len = 0x2, .cmd_id = ECHO_CMD, .no_cmd_params = 0, .req = SYNC, }, [CMD_ISO14443A_CONFIG] = { .cmd_len = 0x7, .cmd_id = WRITE_REGISTER_CMD, .no_cmd_params = 0x4, .cmd_params = {0x3A, 0x00, 0x5A, 0x04}, .req = SYNC, }, [CMD_ISO14443A_DEMOGAIN] = { .cmd_len = 0x7, .cmd_id = WRITE_REGISTER_CMD, .no_cmd_params = 0x4, .cmd_params = {0x68, 0x01, 0x01, 0xDF}, .req = SYNC, }, [CMD_ISO14443B_DEMOGAIN] = { .cmd_len = 0x7, .cmd_id = WRITE_REGISTER_CMD, .no_cmd_params = 0x4, .cmd_params = {0x68, 0x01, 0x01, 0x51}, .req = SYNC, }, [CMD_ISO14443A_PROTOCOL_SELECT] = { .cmd_len = 0x7, .cmd_id = PROTOCOL_SELECT_CMD, .no_cmd_params = 0x4, .cmd_params = {ISO14443A_PROTOCOL_CODE, 0x00, 0x01, 0xA0}, .req = SYNC, }, [CMD_ISO14443B_PROTOCOL_SELECT] = { .cmd_len = 0x7, .cmd_id = PROTOCOL_SELECT_CMD, .no_cmd_params = 0x4, .cmd_params = {ISO14443B_PROTOCOL_CODE, 0x01, 0x03, 0xFF}, .req = SYNC, }, [CMD_WTX_RESPONSE] = { .cmd_len = 0x6, .cmd_id = SEND_RECEIVE_CMD, .no_cmd_params = 0x3, .cmd_params = {0xF2, 0x00, TRFLAG_NFCA_STD_FRAME_CRC}, .req = ASYNC, }, [CMD_FIELD_OFF] = { .cmd_len = 0x5, .cmd_id = PROTOCOL_SELECT_CMD, .no_cmd_params = 0x2, .cmd_params = {0x0, 0x0}, .req = SYNC, }, [CMD_ISO15693_PROTOCOL_SELECT] = { .cmd_len = 0x5, .cmd_id = PROTOCOL_SELECT_CMD, .no_cmd_params = 0x2, .cmd_params = {ISO15693_PROTOCOL_CODE, 0x0D}, .req = SYNC, }, }; /* st95_digital_cmd_complete_arg stores client context */ struct st95_digital_cmd_complete_arg { struct sk_buff *skb_resp; nfc_digital_cmd_complete_t complete_cb; void *cb_usrarg; bool rats; }; /* * structure containing ST95HF driver specific data. * @spicontext: structure containing information required * for spi communication between st95hf and host. * @ddev: nfc digital device object. * @nfcdev: nfc device object. * @enable_gpio: gpio used to enable st95hf transceiver. * @complete_cb_arg: structure to store various context information * that is passed from nfc requesting thread to the threaded ISR. * @st95hf_supply: regulator "consumer" for NFC device. * @sendrcv_trflag: last byte of frame send by sendrecv command * of st95hf. This byte contains transmission flag info. * @exchange_lock: semaphore used for signaling the st95hf_remove * function that the last outstanding async nfc request is finished. * @rm_lock: mutex for ensuring safe access of nfc digital object * from threaded ISR. Usage of this mutex avoids any race between * deletion of the object from st95hf_remove() and its access from * the threaded ISR. * @nfcdev_free: flag to have the state of nfc device object. * [alive | died] * @current_protocol: current nfc protocol. * @current_rf_tech: current rf technology. * @fwi: frame waiting index, received in reply of RATS according to * digital protocol. */ struct st95hf_context { struct st95hf_spi_context spicontext; struct nfc_digital_dev *ddev; struct nfc_dev *nfcdev; unsigned int enable_gpio; struct st95_digital_cmd_complete_arg complete_cb_arg; struct regulator *st95hf_supply; unsigned char sendrcv_trflag; struct semaphore exchange_lock; struct mutex rm_lock; bool nfcdev_free; u8 current_protocol; u8 current_rf_tech; int fwi; }; /* * st95hf_send_recv_cmd() is for sending commands to ST95HF * that are described in the cmd_array[]. It can optionally * receive the response if the cmd request is of type * SYNC. For that to happen caller must pass true to recv_res. * For ASYNC request, recv_res is ignored and the * function will never try to receive the response on behalf * of the caller. */ static int st95hf_send_recv_cmd(struct st95hf_context *st95context, enum st95hf_cmd_list cmd, int no_modif, struct param_list *list_array, bool recv_res) { unsigned char spi_cmd_buffer[MAX_CMD_LEN]; int i, ret; struct device *dev = &st95context->spicontext.spidev->dev; if (cmd_array[cmd].cmd_len > MAX_CMD_LEN) return -EINVAL; if (cmd_array[cmd].no_cmd_params < no_modif) return -EINVAL; if (no_modif && !list_array) return -EINVAL; spi_cmd_buffer[0] = ST95HF_COMMAND_SEND; spi_cmd_buffer[1] = cmd_array[cmd].cmd_id; spi_cmd_buffer[2] = cmd_array[cmd].no_cmd_params; memcpy(&spi_cmd_buffer[3], cmd_array[cmd].cmd_params, spi_cmd_buffer[2]); for (i = 0; i < no_modif; i++) { if (list_array[i].param_offset >= cmd_array[cmd].no_cmd_params) return -EINVAL; spi_cmd_buffer[3 + list_array[i].param_offset] = list_array[i].new_param_val; } ret = st95hf_spi_send(&st95context->spicontext, spi_cmd_buffer, cmd_array[cmd].cmd_len, cmd_array[cmd].req); if (ret) { dev_err(dev, "st95hf_spi_send failed with error %d\n", ret); return ret; } if (cmd_array[cmd].req == SYNC && recv_res) { unsigned char st95hf_response_arr[2]; ret = st95hf_spi_recv_response(&st95context->spicontext, st95hf_response_arr); if (ret < 0) { dev_err(dev, "spi error from st95hf_spi_recv_response(), err = 0x%x\n", ret); return ret; } if (st95hf_response_arr[0]) { dev_err(dev, "st95hf error from st95hf_spi_recv_response(), err = 0x%x\n", st95hf_response_arr[0]); return -EIO; } } return 0; } static int st95hf_echo_command(struct st95hf_context *st95context) { int result = 0; unsigned char echo_response; result = st95hf_send_recv_cmd(st95context, CMD_ECHO, 0, NULL, false); if (result) return result; /* If control reached here, response can be taken */ result = st95hf_spi_recv_echo_res(&st95context->spicontext, &echo_response); if (result) { dev_err(&st95context->spicontext.spidev->dev, "err: echo response receive error = 0x%x\n", result); return result; } if (echo_response == ECHORESPONSE) return 0; dev_err(&st95context->spicontext.spidev->dev, "err: echo res is 0x%x\n", echo_response); return -EIO; } static int secondary_configuration_type4a(struct st95hf_context *stcontext) { int result = 0; struct device *dev = &stcontext->nfcdev->dev; /* 14443A config setting after select protocol */ result = st95hf_send_recv_cmd(stcontext, CMD_ISO14443A_CONFIG, 0, NULL, true); if (result) { dev_err(dev, "type a config cmd, err = 0x%x\n", result); return result; } /* 14443A demo gain setting */ result = st95hf_send_recv_cmd(stcontext, CMD_ISO14443A_DEMOGAIN, 0, NULL, true); if (result) dev_err(dev, "type a demogain cmd, err = 0x%x\n", result); return result; } static int secondary_configuration_type4b(struct st95hf_context *stcontext) { int result = 0; struct device *dev = &stcontext->nfcdev->dev; result = st95hf_send_recv_cmd(stcontext, CMD_ISO14443B_DEMOGAIN, 0, NULL, true); if (result) dev_err(dev, "type b demogain cmd, err = 0x%x\n", result); return result; } static int st95hf_select_protocol(struct st95hf_context *stcontext, int type) { int result = 0; struct device *dev; dev = &stcontext->nfcdev->dev; switch (type) { case NFC_DIGITAL_RF_TECH_106A: stcontext->current_rf_tech = NFC_DIGITAL_RF_TECH_106A; result = st95hf_send_recv_cmd(stcontext, CMD_ISO14443A_PROTOCOL_SELECT, 0, NULL, true); if (result) { dev_err(dev, "protocol sel, err = 0x%x\n", result); return result; } /* secondary config. for 14443Type 4A after protocol select */ result = secondary_configuration_type4a(stcontext); if (result) { dev_err(dev, "type a secondary config, err = 0x%x\n", result); return result; } break; case NFC_DIGITAL_RF_TECH_106B: stcontext->current_rf_tech = NFC_DIGITAL_RF_TECH_106B; result = st95hf_send_recv_cmd(stcontext, CMD_ISO14443B_PROTOCOL_SELECT, 0, NULL, true); if (result) { dev_err(dev, "protocol sel send, err = 0x%x\n", result); return result; } /* * delay of 5-6 ms is required after select protocol * command in case of ISO14443 Type B */ usleep_range(50000, 60000); /* secondary config. for 14443Type 4B after protocol select */ result = secondary_configuration_type4b(stcontext); if (result) { dev_err(dev, "type b secondary config, err = 0x%x\n", result); return result; } break; case NFC_DIGITAL_RF_TECH_ISO15693: stcontext->current_rf_tech = NFC_DIGITAL_RF_TECH_ISO15693; result = st95hf_send_recv_cmd(stcontext, CMD_ISO15693_PROTOCOL_SELECT, 0, NULL, true); if (result) { dev_err(dev, "protocol sel send, err = 0x%x\n", result); return result; } break; default: return -EINVAL; } return 0; } static void st95hf_send_st95enable_negativepulse(struct st95hf_context *st95con) { /* First make irq_in pin high */ gpio_set_value(st95con->enable_gpio, HIGH); /* wait for 1 milisecond */ usleep_range(1000, 2000); /* Make irq_in pin low */ gpio_set_value(st95con->enable_gpio, LOW); /* wait for minimum interrupt pulse to make st95 active */ usleep_range(1000, 2000); /* At end make it high */ gpio_set_value(st95con->enable_gpio, HIGH); } /* * Send a reset sequence over SPI bus (Reset command + wait 3ms + * negative pulse on st95hf enable gpio */ static int st95hf_send_spi_reset_sequence(struct st95hf_context *st95context) { int result = 0; unsigned char reset_cmd = ST95HF_COMMAND_RESET; result = st95hf_spi_send(&st95context->spicontext, &reset_cmd, ST95HF_RESET_CMD_LEN, ASYNC); if (result) { dev_err(&st95context->spicontext.spidev->dev, "spi reset sequence cmd error = %d", result); return result; } /* wait for 3 milisecond to complete the controller reset process */ usleep_range(3000, 4000); /* send negative pulse to make st95hf active */ st95hf_send_st95enable_negativepulse(st95context); /* wait for 10 milisecond : HFO setup time */ usleep_range(10000, 20000); return result; } static int st95hf_por_sequence(struct st95hf_context *st95context) { int nth_attempt = 1; int result; st95hf_send_st95enable_negativepulse(st95context); usleep_range(5000, 6000); do { /* send an ECHO command and checks ST95HF response */ result = st95hf_echo_command(st95context); dev_dbg(&st95context->spicontext.spidev->dev, "response from echo function = 0x%x, attempt = %d\n", result, nth_attempt); if (!result) return 0; /* send an pulse on IRQ in case of the chip is on sleep state */ if (nth_attempt == 2) st95hf_send_st95enable_negativepulse(st95context); else st95hf_send_spi_reset_sequence(st95context); /* delay of 50 milisecond */ usleep_range(50000, 51000); } while (nth_attempt++ < 3); return -ETIMEDOUT; } static int iso14443_config_fdt(struct st95hf_context *st95context, int wtxm) { int result = 0; struct device *dev = &st95context->spicontext.spidev->dev; struct nfc_digital_dev *nfcddev = st95context->ddev; unsigned char pp_typeb; struct param_list new_params[2]; pp_typeb = cmd_array[CMD_ISO14443B_PROTOCOL_SELECT].cmd_params[2]; if (nfcddev->curr_protocol == NFC_PROTO_ISO14443 && st95context->fwi < 4) st95context->fwi = 4; new_params[0].param_offset = 2; if (nfcddev->curr_protocol == NFC_PROTO_ISO14443) new_params[0].new_param_val = st95context->fwi; else if (nfcddev->curr_protocol == NFC_PROTO_ISO14443_B) new_params[0].new_param_val = pp_typeb; new_params[1].param_offset = 3; new_params[1].new_param_val = wtxm; switch (nfcddev->curr_protocol) { case NFC_PROTO_ISO14443: result = st95hf_send_recv_cmd(st95context, CMD_ISO14443A_PROTOCOL_SELECT, 2, new_params, true); if (result) { dev_err(dev, "WTX type a sel proto, err = 0x%x\n", result); return result; } /* secondary config. for 14443Type 4A after protocol select */ result = secondary_configuration_type4a(st95context); if (result) { dev_err(dev, "WTX type a second. config, err = 0x%x\n", result); return result; } break; case NFC_PROTO_ISO14443_B: result = st95hf_send_recv_cmd(st95context, CMD_ISO14443B_PROTOCOL_SELECT, 2, new_params, true); if (result) { dev_err(dev, "WTX type b sel proto, err = 0x%x\n", result); return result; } /* secondary config. for 14443Type 4B after protocol select */ result = secondary_configuration_type4b(st95context); if (result) { dev_err(dev, "WTX type b second. config, err = 0x%x\n", result); return result; } break; default: return -EINVAL; } return 0; } static int st95hf_handle_wtx(struct st95hf_context *stcontext, bool new_wtx, int wtx_val) { int result = 0; unsigned char val_mm = 0; struct param_list new_params[1]; struct nfc_digital_dev *nfcddev = stcontext->ddev; struct device *dev = &stcontext->nfcdev->dev; if (new_wtx) { result = iso14443_config_fdt(stcontext, wtx_val & 0x3f); if (result) { dev_err(dev, "Config. setting error on WTX req, err = 0x%x\n", result); return result; } /* Send response of wtx with ASYNC as no response expected */ new_params[0].param_offset = 1; new_params[0].new_param_val = wtx_val; result = st95hf_send_recv_cmd(stcontext, CMD_WTX_RESPONSE, 1, new_params, false); if (result) dev_err(dev, "WTX response send, err = 0x%x\n", result); return result; } /* if no new wtx, cofigure with default values */ if (nfcddev->curr_protocol == NFC_PROTO_ISO14443) val_mm = cmd_array[CMD_ISO14443A_PROTOCOL_SELECT].cmd_params[3]; else if (nfcddev->curr_protocol == NFC_PROTO_ISO14443_B) val_mm = cmd_array[CMD_ISO14443B_PROTOCOL_SELECT].cmd_params[3]; result = iso14443_config_fdt(stcontext, val_mm); if (result) dev_err(dev, "Default config. setting error after WTX processing, err = 0x%x\n", result); return result; } static int st95hf_error_handling(struct st95hf_context *stcontext, struct sk_buff *skb_resp, int res_len) { int result = 0; unsigned char error_byte; struct device *dev = &stcontext->nfcdev->dev; /* First check ST95HF specific error */ if (skb_resp->data[0] & ST95HF_ERR_MASK) { if (skb_resp->data[0] == ST95HF_TIMEOUT_ERROR) result = -ETIMEDOUT; else result = -EIO; return result; } /* Check for CRC err only if CRC is present in the tag response */ switch (stcontext->current_rf_tech) { case NFC_DIGITAL_RF_TECH_106A: if (stcontext->sendrcv_trflag == TRFLAG_NFCA_STD_FRAME_CRC) { error_byte = skb_resp->data[res_len - 3]; if (error_byte & ST95HF_NFCA_CRC_ERR_MASK) { /* CRC error occurred */ dev_err(dev, "CRC error, byte received = 0x%x\n", error_byte); result = -EIO; } } break; case NFC_DIGITAL_RF_TECH_106B: case NFC_DIGITAL_RF_TECH_ISO15693: error_byte = skb_resp->data[res_len - 1]; if (error_byte & ST95HF_NFCB_CRC_ERR_MASK) { /* CRC error occurred */ dev_err(dev, "CRC error, byte received = 0x%x\n", error_byte); result = -EIO; } break; } return result; } static int st95hf_response_handler(struct st95hf_context *stcontext, struct sk_buff *skb_resp, int res_len) { int result = 0; int skb_len; unsigned char val_mm; struct nfc_digital_dev *nfcddev = stcontext->ddev; struct device *dev = &stcontext->nfcdev->dev; struct st95_digital_cmd_complete_arg *cb_arg; cb_arg = &stcontext->complete_cb_arg; /* Process the response */ skb_put(skb_resp, res_len); /* Remove st95 header */ skb_pull(skb_resp, 2); skb_len = skb_resp->len; /* check if it is case of RATS request reply & FWI is present */ if (nfcddev->curr_protocol == NFC_PROTO_ISO14443 && cb_arg->rats && (skb_resp->data[1] & RATS_TB1_PRESENT_MASK)) { if (skb_resp->data[1] & RATS_TA1_PRESENT_MASK) stcontext->fwi = (skb_resp->data[3] & TB1_FWI_MASK) >> 4; else stcontext->fwi = (skb_resp->data[2] & TB1_FWI_MASK) >> 4; val_mm = cmd_array[CMD_ISO14443A_PROTOCOL_SELECT].cmd_params[3]; result = iso14443_config_fdt(stcontext, val_mm); if (result) { dev_err(dev, "error in config_fdt to handle fwi of ATS, error=%d\n", result); return result; } } cb_arg->rats = false; /* Remove CRC bytes only if received frames data has an eod (CRC) */ switch (stcontext->current_rf_tech) { case NFC_DIGITAL_RF_TECH_106A: if (stcontext->sendrcv_trflag == TRFLAG_NFCA_STD_FRAME_CRC) skb_trim(skb_resp, (skb_len - 5)); else skb_trim(skb_resp, (skb_len - 3)); break; case NFC_DIGITAL_RF_TECH_106B: case NFC_DIGITAL_RF_TECH_ISO15693: skb_trim(skb_resp, (skb_len - 3)); break; } return result; } static irqreturn_t st95hf_irq_handler(int irq, void *st95hfcontext) { struct st95hf_context *stcontext = (struct st95hf_context *)st95hfcontext; if (stcontext->spicontext.req_issync) { complete(&stcontext->spicontext.done); stcontext->spicontext.req_issync = false; return IRQ_HANDLED; } return IRQ_WAKE_THREAD; } static irqreturn_t st95hf_irq_thread_handler(int irq, void *st95hfcontext) { int result = 0; int res_len; static bool wtx; struct device *spidevice; struct sk_buff *skb_resp; struct st95hf_context *stcontext = (struct st95hf_context *)st95hfcontext; struct st95_digital_cmd_complete_arg *cb_arg; spidevice = &stcontext->spicontext.spidev->dev; /* * check semaphore, if not down() already, then we don't * know in which context the ISR is called and surely it * will be a bug. Note that down() of the semaphore is done * in the corresponding st95hf_in_send_cmd() and then * only this ISR should be called. ISR will up() the * semaphore before leaving. Hence when the ISR is called * the correct behaviour is down_trylock() should always * return 1 (indicating semaphore cant be taken and hence no * change in semaphore count). * If not, then we up() the semaphore and crash on * a BUG() ! */ if (!down_trylock(&stcontext->exchange_lock)) { up(&stcontext->exchange_lock); WARN(1, "unknown context in ST95HF ISR"); return IRQ_NONE; } cb_arg = &stcontext->complete_cb_arg; skb_resp = cb_arg->skb_resp; mutex_lock(&stcontext->rm_lock); res_len = st95hf_spi_recv_response(&stcontext->spicontext, skb_resp->data); if (res_len < 0) { dev_err(spidevice, "TISR spi response err = 0x%x\n", res_len); result = res_len; goto end; } /* if stcontext->nfcdev_free is true, it means remove already ran */ if (stcontext->nfcdev_free) { result = -ENODEV; goto end; } if (skb_resp->data[2] == WTX_REQ_FROM_TAG) { /* Request for new FWT from tag */ result = st95hf_handle_wtx(stcontext, true, skb_resp->data[3]); if (result) goto end; wtx = true; mutex_unlock(&stcontext->rm_lock); return IRQ_HANDLED; } result = st95hf_error_handling(stcontext, skb_resp, res_len); if (result) goto end; result = st95hf_response_handler(stcontext, skb_resp, res_len); if (result) goto end; /* * If select protocol is done on wtx req. do select protocol * again with default values */ if (wtx) { wtx = false; result = st95hf_handle_wtx(stcontext, false, 0); if (result) goto end; } /* call digital layer callback */ cb_arg->complete_cb(stcontext->ddev, cb_arg->cb_usrarg, skb_resp); /* up the semaphore before returning */ up(&stcontext->exchange_lock); mutex_unlock(&stcontext->rm_lock); return IRQ_HANDLED; end: kfree_skb(skb_resp); wtx = false; cb_arg->rats = false; skb_resp = ERR_PTR(result); /* call of callback with error */ cb_arg->complete_cb(stcontext->ddev, cb_arg->cb_usrarg, skb_resp); /* up the semaphore before returning */ up(&stcontext->exchange_lock); mutex_unlock(&stcontext->rm_lock); return IRQ_HANDLED; } /* NFC ops functions definition */ static int st95hf_in_configure_hw(struct nfc_digital_dev *ddev, int type, int param) { struct st95hf_context *stcontext = nfc_digital_get_drvdata(ddev); if (type == NFC_DIGITAL_CONFIG_RF_TECH) return st95hf_select_protocol(stcontext, param); if (type == NFC_DIGITAL_CONFIG_FRAMING) { switch (param) { case NFC_DIGITAL_FRAMING_NFCA_SHORT: stcontext->sendrcv_trflag = TRFLAG_NFCA_SHORT_FRAME; break; case NFC_DIGITAL_FRAMING_NFCA_STANDARD: stcontext->sendrcv_trflag = TRFLAG_NFCA_STD_FRAME; break; case NFC_DIGITAL_FRAMING_NFCA_T4T: case NFC_DIGITAL_FRAMING_NFCA_NFC_DEP: case NFC_DIGITAL_FRAMING_NFCA_STANDARD_WITH_CRC_A: stcontext->sendrcv_trflag = TRFLAG_NFCA_STD_FRAME_CRC; break; case NFC_DIGITAL_FRAMING_NFCB: case NFC_DIGITAL_FRAMING_ISO15693_INVENTORY: case NFC_DIGITAL_FRAMING_ISO15693_T5T: break; } } return 0; } static int rf_off(struct st95hf_context *stcontext) { int rc; struct device *dev; dev = &stcontext->nfcdev->dev; rc = st95hf_send_recv_cmd(stcontext, CMD_FIELD_OFF, 0, NULL, true); if (rc) dev_err(dev, "protocol sel send field off, err = 0x%x\n", rc); return rc; } static int st95hf_in_send_cmd(struct nfc_digital_dev *ddev, struct sk_buff *skb, u16 timeout, nfc_digital_cmd_complete_t cb, void *arg) { struct st95hf_context *stcontext = nfc_digital_get_drvdata(ddev); int rc; struct sk_buff *skb_resp; int len_data_to_tag = 0; skb_resp = nfc_alloc_recv_skb(MAX_RESPONSE_BUFFER_SIZE, GFP_KERNEL); if (!skb_resp) return -ENOMEM; switch (stcontext->current_rf_tech) { case NFC_DIGITAL_RF_TECH_106A: len_data_to_tag = skb->len + 1; skb_put_u8(skb, stcontext->sendrcv_trflag); break; case NFC_DIGITAL_RF_TECH_106B: case NFC_DIGITAL_RF_TECH_ISO15693: len_data_to_tag = skb->len; break; default: rc = -EINVAL; goto free_skb_resp; } skb_push(skb, 3); skb->data[0] = ST95HF_COMMAND_SEND; skb->data[1] = SEND_RECEIVE_CMD; skb->data[2] = len_data_to_tag; stcontext->complete_cb_arg.skb_resp = skb_resp; stcontext->complete_cb_arg.cb_usrarg = arg; stcontext->complete_cb_arg.complete_cb = cb; if ((skb->data[3] == ISO14443A_RATS_REQ) && ddev->curr_protocol == NFC_PROTO_ISO14443) stcontext->complete_cb_arg.rats = true; /* * down the semaphore to indicate to remove func that an * ISR is pending, note that it will not block here in any case. * If found blocked, it is a BUG! */ rc = down_killable(&stcontext->exchange_lock); if (rc) { WARN(1, "Semaphore is not found up in st95hf_in_send_cmd\n"); goto free_skb_resp; } rc = st95hf_spi_send(&stcontext->spicontext, skb->data, skb->len, ASYNC); if (rc) { dev_err(&stcontext->nfcdev->dev, "Error %d trying to perform data_exchange", rc); /* up the semaphore since ISR will never come in this case */ up(&stcontext->exchange_lock); goto free_skb_resp; } kfree_skb(skb); return rc; free_skb_resp: kfree_skb(skb_resp); return rc; } /* p2p will be supported in a later release ! */ static int st95hf_tg_configure_hw(struct nfc_digital_dev *ddev, int type, int param) { return 0; } static int st95hf_tg_send_cmd(struct nfc_digital_dev *ddev, struct sk_buff *skb, u16 timeout, nfc_digital_cmd_complete_t cb, void *arg) { return 0; } static int st95hf_tg_listen(struct nfc_digital_dev *ddev, u16 timeout, nfc_digital_cmd_complete_t cb, void *arg) { return 0; } static int st95hf_tg_get_rf_tech(struct nfc_digital_dev *ddev, u8 *rf_tech) { return 0; } static int st95hf_switch_rf(struct nfc_digital_dev *ddev, bool on) { u8 rf_tech; struct st95hf_context *stcontext = nfc_digital_get_drvdata(ddev); rf_tech = ddev->curr_rf_tech; if (on) /* switch on RF field */ return st95hf_select_protocol(stcontext, rf_tech); /* switch OFF RF field */ return rf_off(stcontext); } /* TODO st95hf_abort_cmd */ static void st95hf_abort_cmd(struct nfc_digital_dev *ddev) { } static const struct nfc_digital_ops st95hf_nfc_digital_ops = { .in_configure_hw = st95hf_in_configure_hw, .in_send_cmd = st95hf_in_send_cmd, .tg_listen = st95hf_tg_listen, .tg_configure_hw = st95hf_tg_configure_hw, .tg_send_cmd = st95hf_tg_send_cmd, .tg_get_rf_tech = st95hf_tg_get_rf_tech, .switch_rf = st95hf_switch_rf, .abort_cmd = st95hf_abort_cmd, }; static const struct spi_device_id st95hf_id[] = { { "st95hf", 0 }, {} }; MODULE_DEVICE_TABLE(spi, st95hf_id); static const struct of_device_id st95hf_spi_of_match[] __maybe_unused = { { .compatible = "st,st95hf" }, {}, }; MODULE_DEVICE_TABLE(of, st95hf_spi_of_match); static int st95hf_probe(struct spi_device *nfc_spi_dev) { int ret; struct st95hf_context *st95context; struct st95hf_spi_context *spicontext; nfc_info(&nfc_spi_dev->dev, "ST95HF driver probe called.\n"); st95context = devm_kzalloc(&nfc_spi_dev->dev, sizeof(struct st95hf_context), GFP_KERNEL); if (!st95context) return -ENOMEM; spicontext = &st95context->spicontext; spicontext->spidev = nfc_spi_dev; st95context->fwi = cmd_array[CMD_ISO14443A_PROTOCOL_SELECT].cmd_params[2]; if (device_property_present(&nfc_spi_dev->dev, "st95hfvin")) { st95context->st95hf_supply = devm_regulator_get(&nfc_spi_dev->dev, "st95hfvin"); if (IS_ERR(st95context->st95hf_supply)) { dev_err(&nfc_spi_dev->dev, "failed to acquire regulator\n"); return PTR_ERR(st95context->st95hf_supply); } ret = regulator_enable(st95context->st95hf_supply); if (ret) { dev_err(&nfc_spi_dev->dev, "failed to enable regulator\n"); return ret; } } init_completion(&spicontext->done); mutex_init(&spicontext->spi_lock); /* * Store spicontext in spi device object for using it in * remove function */ dev_set_drvdata(&nfc_spi_dev->dev, spicontext); st95context->enable_gpio = of_get_named_gpio(nfc_spi_dev->dev.of_node, "enable-gpio", 0); if (!gpio_is_valid(st95context->enable_gpio)) { dev_err(&nfc_spi_dev->dev, "No valid enable gpio\n"); ret = st95context->enable_gpio; goto err_disable_regulator; } ret = devm_gpio_request_one(&nfc_spi_dev->dev, st95context->enable_gpio, GPIOF_DIR_OUT | GPIOF_INIT_HIGH, "enable_gpio"); if (ret) goto err_disable_regulator; if (nfc_spi_dev->irq > 0) { if (devm_request_threaded_irq(&nfc_spi_dev->dev, nfc_spi_dev->irq, st95hf_irq_handler, st95hf_irq_thread_handler, IRQF_TRIGGER_FALLING, "st95hf", (void *)st95context) < 0) { dev_err(&nfc_spi_dev->dev, "err: irq request for st95hf is failed\n"); ret = -EINVAL; goto err_disable_regulator; } } else { dev_err(&nfc_spi_dev->dev, "not a valid IRQ associated with ST95HF\n"); ret = -EINVAL; goto err_disable_regulator; } /* * First reset SPI to handle warm reset of the system. * It will put the ST95HF device in Power ON state * which make the state of device identical to state * at the time of cold reset of the system. */ ret = st95hf_send_spi_reset_sequence(st95context); if (ret) { dev_err(&nfc_spi_dev->dev, "err: spi_reset_sequence failed\n"); goto err_disable_regulator; } /* call PowerOnReset sequence of ST95hf to activate it */ ret = st95hf_por_sequence(st95context); if (ret) { dev_err(&nfc_spi_dev->dev, "err: por seq failed for st95hf\n"); goto err_disable_regulator; } /* create NFC dev object and register with NFC Subsystem */ st95context->ddev = nfc_digital_allocate_device(&st95hf_nfc_digital_ops, ST95HF_SUPPORTED_PROT, ST95HF_CAPABILITIES, ST95HF_HEADROOM_LEN, ST95HF_TAILROOM_LEN); if (!st95context->ddev) { ret = -ENOMEM; goto err_disable_regulator; } st95context->nfcdev = st95context->ddev->nfc_dev; nfc_digital_set_parent_dev(st95context->ddev, &nfc_spi_dev->dev); ret = nfc_digital_register_device(st95context->ddev); if (ret) { dev_err(&st95context->nfcdev->dev, "st95hf registration failed\n"); goto err_free_digital_device; } /* store st95context in nfc device object */ nfc_digital_set_drvdata(st95context->ddev, st95context); sema_init(&st95context->exchange_lock, 1); mutex_init(&st95context->rm_lock); return ret; err_free_digital_device: nfc_digital_free_device(st95context->ddev); err_disable_regulator: if (st95context->st95hf_supply) regulator_disable(st95context->st95hf_supply); return ret; } static void st95hf_remove(struct spi_device *nfc_spi_dev) { int result = 0; unsigned char reset_cmd = ST95HF_COMMAND_RESET; struct st95hf_spi_context *spictx = dev_get_drvdata(&nfc_spi_dev->dev); struct st95hf_context *stcontext = container_of(spictx, struct st95hf_context, spicontext); mutex_lock(&stcontext->rm_lock); nfc_digital_unregister_device(stcontext->ddev); nfc_digital_free_device(stcontext->ddev); stcontext->nfcdev_free = true; mutex_unlock(&stcontext->rm_lock); /* if last in_send_cmd's ISR is pending, wait for it to finish */ result = down_killable(&stcontext->exchange_lock); if (result == -EINTR) dev_err(&spictx->spidev->dev, "sleep for semaphore interrupted by signal\n"); /* next reset the ST95HF controller */ result = st95hf_spi_send(&stcontext->spicontext, &reset_cmd, ST95HF_RESET_CMD_LEN, ASYNC); if (result) dev_err(&spictx->spidev->dev, "ST95HF reset failed in remove() err = %d\n", result); /* wait for 3 ms to complete the controller reset process */ usleep_range(3000, 4000); /* disable regulator */ if (stcontext->st95hf_supply) regulator_disable(stcontext->st95hf_supply); } /* Register as SPI protocol driver */ static struct spi_driver st95hf_driver = { .driver = { .name = "st95hf", .owner = THIS_MODULE, .of_match_table = of_match_ptr(st95hf_spi_of_match), }, .id_table = st95hf_id, .probe = st95hf_probe, .remove = st95hf_remove, }; module_spi_driver(st95hf_driver); MODULE_AUTHOR("Shikha Singh <[email protected]>"); MODULE_DESCRIPTION("ST NFC Transceiver ST95HF driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/nfc/st95hf/core.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Bluetooth HCI UART H4 driver with Nokia Extensions AKA Nokia H4+ * * Copyright (C) 2015 Marcel Holtmann <[email protected]> * Copyright (C) 2015-2017 Sebastian Reichel <[email protected]> */ #include <linux/clk.h> #include <linux/errno.h> #include <linux/firmware.h> #include <linux/gpio/consumer.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include <linux/pm_runtime.h> #include <linux/serdev.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/types.h> #include <asm/unaligned.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include "hci_uart.h" #include "btbcm.h" #define VERSION "0.1" #define NOKIA_ID_BCM2048 0x04 #define NOKIA_ID_TI1271 0x31 #define FIRMWARE_BCM2048 "nokia/bcmfw.bin" #define FIRMWARE_TI1271 "nokia/ti1273.bin" #define HCI_NOKIA_NEG_PKT 0x06 #define HCI_NOKIA_ALIVE_PKT 0x07 #define HCI_NOKIA_RADIO_PKT 0x08 #define HCI_NOKIA_NEG_HDR_SIZE 1 #define HCI_NOKIA_MAX_NEG_SIZE 255 #define HCI_NOKIA_ALIVE_HDR_SIZE 1 #define HCI_NOKIA_MAX_ALIVE_SIZE 255 #define HCI_NOKIA_RADIO_HDR_SIZE 2 #define HCI_NOKIA_MAX_RADIO_SIZE 255 #define NOKIA_PROTO_PKT 0x44 #define NOKIA_PROTO_BYTE 0x4c #define NOKIA_NEG_REQ 0x00 #define NOKIA_NEG_ACK 0x20 #define NOKIA_NEG_NAK 0x40 #define H4_TYPE_SIZE 1 #define NOKIA_RECV_ALIVE \ .type = HCI_NOKIA_ALIVE_PKT, \ .hlen = HCI_NOKIA_ALIVE_HDR_SIZE, \ .loff = 0, \ .lsize = 1, \ .maxlen = HCI_NOKIA_MAX_ALIVE_SIZE \ #define NOKIA_RECV_NEG \ .type = HCI_NOKIA_NEG_PKT, \ .hlen = HCI_NOKIA_NEG_HDR_SIZE, \ .loff = 0, \ .lsize = 1, \ .maxlen = HCI_NOKIA_MAX_NEG_SIZE \ #define NOKIA_RECV_RADIO \ .type = HCI_NOKIA_RADIO_PKT, \ .hlen = HCI_NOKIA_RADIO_HDR_SIZE, \ .loff = 1, \ .lsize = 1, \ .maxlen = HCI_NOKIA_MAX_RADIO_SIZE \ struct hci_nokia_neg_hdr { u8 dlen; } __packed; struct hci_nokia_neg_cmd { u8 ack; u16 baud; u16 unused1; u8 proto; u16 sys_clk; u16 unused2; } __packed; #define NOKIA_ALIVE_REQ 0x55 #define NOKIA_ALIVE_RESP 0xcc struct hci_nokia_alive_hdr { u8 dlen; } __packed; struct hci_nokia_alive_pkt { u8 mid; u8 unused; } __packed; struct hci_nokia_neg_evt { u8 ack; u16 baud; u16 unused1; u8 proto; u16 sys_clk; u16 unused2; u8 man_id; u8 ver_id; } __packed; #define MAX_BAUD_RATE 3692300 #define SETUP_BAUD_RATE 921600 #define INIT_BAUD_RATE 120000 struct hci_nokia_radio_hdr { u8 evt; u8 dlen; } __packed; struct nokia_bt_dev { struct hci_uart hu; struct serdev_device *serdev; struct gpio_desc *reset; struct gpio_desc *wakeup_host; struct gpio_desc *wakeup_bt; unsigned long sysclk_speed; int wake_irq; struct sk_buff *rx_skb; struct sk_buff_head txq; bdaddr_t bdaddr; int init_error; struct completion init_completion; u8 man_id; u8 ver_id; bool initialized; bool tx_enabled; bool rx_enabled; }; static int nokia_enqueue(struct hci_uart *hu, struct sk_buff *skb); static void nokia_flow_control(struct serdev_device *serdev, bool enable) { if (enable) { serdev_device_set_rts(serdev, true); serdev_device_set_flow_control(serdev, true); } else { serdev_device_set_flow_control(serdev, false); serdev_device_set_rts(serdev, false); } } static irqreturn_t wakeup_handler(int irq, void *data) { struct nokia_bt_dev *btdev = data; struct device *dev = &btdev->serdev->dev; int wake_state = gpiod_get_value(btdev->wakeup_host); if (btdev->rx_enabled == wake_state) return IRQ_HANDLED; if (wake_state) pm_runtime_get(dev); else pm_runtime_put(dev); btdev->rx_enabled = wake_state; return IRQ_HANDLED; } static int nokia_reset(struct hci_uart *hu) { struct nokia_bt_dev *btdev = hu->priv; struct device *dev = &btdev->serdev->dev; int err; /* reset routine */ gpiod_set_value_cansleep(btdev->reset, 1); gpiod_set_value_cansleep(btdev->wakeup_bt, 1); msleep(100); /* safety check */ err = gpiod_get_value_cansleep(btdev->wakeup_host); if (err == 1) { dev_err(dev, "reset: host wakeup not low!"); return -EPROTO; } /* flush queue */ serdev_device_write_flush(btdev->serdev); /* init uart */ nokia_flow_control(btdev->serdev, false); serdev_device_set_baudrate(btdev->serdev, INIT_BAUD_RATE); gpiod_set_value_cansleep(btdev->reset, 0); /* wait for cts */ err = serdev_device_wait_for_cts(btdev->serdev, true, 200); if (err < 0) { dev_err(dev, "CTS not received: %d", err); return err; } nokia_flow_control(btdev->serdev, true); return 0; } static int nokia_send_alive_packet(struct hci_uart *hu) { struct nokia_bt_dev *btdev = hu->priv; struct device *dev = &btdev->serdev->dev; struct hci_nokia_alive_hdr *hdr; struct hci_nokia_alive_pkt *pkt; struct sk_buff *skb; int len; init_completion(&btdev->init_completion); len = H4_TYPE_SIZE + sizeof(*hdr) + sizeof(*pkt); skb = bt_skb_alloc(len, GFP_KERNEL); if (!skb) return -ENOMEM; hci_skb_pkt_type(skb) = HCI_NOKIA_ALIVE_PKT; memset(skb->data, 0x00, len); hdr = skb_put(skb, sizeof(*hdr)); hdr->dlen = sizeof(*pkt); pkt = skb_put(skb, sizeof(*pkt)); pkt->mid = NOKIA_ALIVE_REQ; nokia_enqueue(hu, skb); hci_uart_tx_wakeup(hu); dev_dbg(dev, "Alive sent"); if (!wait_for_completion_interruptible_timeout(&btdev->init_completion, msecs_to_jiffies(1000))) { return -ETIMEDOUT; } if (btdev->init_error < 0) return btdev->init_error; return 0; } static int nokia_send_negotiation(struct hci_uart *hu) { struct nokia_bt_dev *btdev = hu->priv; struct device *dev = &btdev->serdev->dev; struct hci_nokia_neg_cmd *neg_cmd; struct hci_nokia_neg_hdr *neg_hdr; struct sk_buff *skb; int len, err; u16 baud = DIV_ROUND_CLOSEST(btdev->sysclk_speed * 10, SETUP_BAUD_RATE); int sysclk = btdev->sysclk_speed / 1000; len = H4_TYPE_SIZE + sizeof(*neg_hdr) + sizeof(*neg_cmd); skb = bt_skb_alloc(len, GFP_KERNEL); if (!skb) return -ENOMEM; hci_skb_pkt_type(skb) = HCI_NOKIA_NEG_PKT; neg_hdr = skb_put(skb, sizeof(*neg_hdr)); neg_hdr->dlen = sizeof(*neg_cmd); neg_cmd = skb_put(skb, sizeof(*neg_cmd)); neg_cmd->ack = NOKIA_NEG_REQ; neg_cmd->baud = cpu_to_le16(baud); neg_cmd->unused1 = 0x0000; neg_cmd->proto = NOKIA_PROTO_BYTE; neg_cmd->sys_clk = cpu_to_le16(sysclk); neg_cmd->unused2 = 0x0000; btdev->init_error = 0; init_completion(&btdev->init_completion); nokia_enqueue(hu, skb); hci_uart_tx_wakeup(hu); dev_dbg(dev, "Negotiation sent"); if (!wait_for_completion_interruptible_timeout(&btdev->init_completion, msecs_to_jiffies(10000))) { return -ETIMEDOUT; } if (btdev->init_error < 0) return btdev->init_error; /* Change to previously negotiated speed. Flow Control * is disabled until bluetooth adapter is ready to avoid * broken bytes being received. */ nokia_flow_control(btdev->serdev, false); serdev_device_set_baudrate(btdev->serdev, SETUP_BAUD_RATE); err = serdev_device_wait_for_cts(btdev->serdev, true, 200); if (err < 0) { dev_err(dev, "CTS not received: %d", err); return err; } nokia_flow_control(btdev->serdev, true); dev_dbg(dev, "Negotiation successful"); return 0; } static int nokia_setup_fw(struct hci_uart *hu) { struct nokia_bt_dev *btdev = hu->priv; struct device *dev = &btdev->serdev->dev; const char *fwname; const struct firmware *fw; const u8 *fw_ptr; size_t fw_size; int err; dev_dbg(dev, "setup firmware"); if (btdev->man_id == NOKIA_ID_BCM2048) { fwname = FIRMWARE_BCM2048; } else if (btdev->man_id == NOKIA_ID_TI1271) { fwname = FIRMWARE_TI1271; } else { dev_err(dev, "Unsupported bluetooth device!"); return -ENODEV; } err = request_firmware(&fw, fwname, dev); if (err < 0) { dev_err(dev, "%s: Failed to load Nokia firmware file (%d)", hu->hdev->name, err); return err; } fw_ptr = fw->data; fw_size = fw->size; while (fw_size >= 4) { u16 pkt_size = get_unaligned_le16(fw_ptr); u8 pkt_type = fw_ptr[2]; const struct hci_command_hdr *cmd; u16 opcode; struct sk_buff *skb; switch (pkt_type) { case HCI_COMMAND_PKT: cmd = (struct hci_command_hdr *)(fw_ptr + 3); opcode = le16_to_cpu(cmd->opcode); skb = __hci_cmd_sync(hu->hdev, opcode, cmd->plen, fw_ptr + 3 + HCI_COMMAND_HDR_SIZE, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { err = PTR_ERR(skb); dev_err(dev, "%s: FW command %04x failed (%d)", hu->hdev->name, opcode, err); goto done; } kfree_skb(skb); break; case HCI_NOKIA_RADIO_PKT: case HCI_NOKIA_NEG_PKT: case HCI_NOKIA_ALIVE_PKT: break; } fw_ptr += pkt_size + 2; fw_size -= pkt_size + 2; } done: release_firmware(fw); return err; } static int nokia_setup(struct hci_uart *hu) { struct nokia_bt_dev *btdev = hu->priv; struct device *dev = &btdev->serdev->dev; int err; btdev->initialized = false; nokia_flow_control(btdev->serdev, false); pm_runtime_get_sync(dev); if (btdev->tx_enabled) { gpiod_set_value_cansleep(btdev->wakeup_bt, 0); pm_runtime_put(&btdev->serdev->dev); btdev->tx_enabled = false; } dev_dbg(dev, "protocol setup"); /* 0. reset connection */ err = nokia_reset(hu); if (err < 0) { dev_err(dev, "Reset failed: %d", err); goto out; } /* 1. negotiate speed etc */ err = nokia_send_negotiation(hu); if (err < 0) { dev_err(dev, "Negotiation failed: %d", err); goto out; } /* 2. verify correct setup using alive packet */ err = nokia_send_alive_packet(hu); if (err < 0) { dev_err(dev, "Alive check failed: %d", err); goto out; } /* 3. send firmware */ err = nokia_setup_fw(hu); if (err < 0) { dev_err(dev, "Could not setup FW: %d", err); goto out; } nokia_flow_control(btdev->serdev, false); serdev_device_set_baudrate(btdev->serdev, MAX_BAUD_RATE); nokia_flow_control(btdev->serdev, true); if (btdev->man_id == NOKIA_ID_BCM2048) { hu->hdev->set_bdaddr = btbcm_set_bdaddr; set_bit(HCI_QUIRK_INVALID_BDADDR, &hu->hdev->quirks); dev_dbg(dev, "bcm2048 has invalid bluetooth address!"); } dev_dbg(dev, "protocol setup done!"); gpiod_set_value_cansleep(btdev->wakeup_bt, 0); pm_runtime_put(dev); btdev->tx_enabled = false; btdev->initialized = true; return 0; out: pm_runtime_put(dev); return err; } static int nokia_open(struct hci_uart *hu) { struct device *dev = &hu->serdev->dev; dev_dbg(dev, "protocol open"); pm_runtime_enable(dev); return 0; } static int nokia_flush(struct hci_uart *hu) { struct nokia_bt_dev *btdev = hu->priv; dev_dbg(&btdev->serdev->dev, "flush device"); skb_queue_purge(&btdev->txq); return 0; } static int nokia_close(struct hci_uart *hu) { struct nokia_bt_dev *btdev = hu->priv; struct device *dev = &btdev->serdev->dev; dev_dbg(dev, "close device"); btdev->initialized = false; skb_queue_purge(&btdev->txq); kfree_skb(btdev->rx_skb); /* disable module */ gpiod_set_value(btdev->reset, 1); gpiod_set_value(btdev->wakeup_bt, 0); pm_runtime_disable(&btdev->serdev->dev); return 0; } /* Enqueue frame for transmittion (padding, crc, etc) */ static int nokia_enqueue(struct hci_uart *hu, struct sk_buff *skb) { struct nokia_bt_dev *btdev = hu->priv; int err; /* Prepend skb with frame type */ memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1); /* Packets must be word aligned */ if (skb->len % 2) { err = skb_pad(skb, 1); if (err) return err; skb_put(skb, 1); } skb_queue_tail(&btdev->txq, skb); return 0; } static int nokia_recv_negotiation_packet(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_uart *hu = hci_get_drvdata(hdev); struct nokia_bt_dev *btdev = hu->priv; struct device *dev = &btdev->serdev->dev; struct hci_nokia_neg_hdr *hdr; struct hci_nokia_neg_evt *evt; int ret = 0; hdr = (struct hci_nokia_neg_hdr *)skb->data; if (hdr->dlen != sizeof(*evt)) { btdev->init_error = -EIO; ret = -EIO; goto finish_neg; } evt = skb_pull(skb, sizeof(*hdr)); if (evt->ack != NOKIA_NEG_ACK) { dev_err(dev, "Negotiation received: wrong reply"); btdev->init_error = -EINVAL; ret = -EINVAL; goto finish_neg; } btdev->man_id = evt->man_id; btdev->ver_id = evt->ver_id; dev_dbg(dev, "Negotiation received: baud=%u:clk=%u:manu=%u:vers=%u", evt->baud, evt->sys_clk, evt->man_id, evt->ver_id); finish_neg: complete(&btdev->init_completion); kfree_skb(skb); return ret; } static int nokia_recv_alive_packet(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_uart *hu = hci_get_drvdata(hdev); struct nokia_bt_dev *btdev = hu->priv; struct device *dev = &btdev->serdev->dev; struct hci_nokia_alive_hdr *hdr; struct hci_nokia_alive_pkt *pkt; int ret = 0; hdr = (struct hci_nokia_alive_hdr *)skb->data; if (hdr->dlen != sizeof(*pkt)) { dev_err(dev, "Corrupted alive message"); btdev->init_error = -EIO; ret = -EIO; goto finish_alive; } pkt = skb_pull(skb, sizeof(*hdr)); if (pkt->mid != NOKIA_ALIVE_RESP) { dev_err(dev, "Alive received: invalid response: 0x%02x!", pkt->mid); btdev->init_error = -EINVAL; ret = -EINVAL; goto finish_alive; } dev_dbg(dev, "Alive received"); finish_alive: complete(&btdev->init_completion); kfree_skb(skb); return ret; } static int nokia_recv_radio(struct hci_dev *hdev, struct sk_buff *skb) { /* Packets received on the dedicated radio channel are * HCI events and so feed them back into the core. */ hci_skb_pkt_type(skb) = HCI_EVENT_PKT; return hci_recv_frame(hdev, skb); } /* Recv data */ static const struct h4_recv_pkt nokia_recv_pkts[] = { { H4_RECV_ACL, .recv = hci_recv_frame }, { H4_RECV_SCO, .recv = hci_recv_frame }, { H4_RECV_EVENT, .recv = hci_recv_frame }, { NOKIA_RECV_ALIVE, .recv = nokia_recv_alive_packet }, { NOKIA_RECV_NEG, .recv = nokia_recv_negotiation_packet }, { NOKIA_RECV_RADIO, .recv = nokia_recv_radio }, }; static int nokia_recv(struct hci_uart *hu, const void *data, int count) { struct nokia_bt_dev *btdev = hu->priv; struct device *dev = &btdev->serdev->dev; int err; if (!test_bit(HCI_UART_REGISTERED, &hu->flags)) return -EUNATCH; btdev->rx_skb = h4_recv_buf(hu->hdev, btdev->rx_skb, data, count, nokia_recv_pkts, ARRAY_SIZE(nokia_recv_pkts)); if (IS_ERR(btdev->rx_skb)) { err = PTR_ERR(btdev->rx_skb); dev_err(dev, "Frame reassembly failed (%d)", err); btdev->rx_skb = NULL; return err; } return count; } static struct sk_buff *nokia_dequeue(struct hci_uart *hu) { struct nokia_bt_dev *btdev = hu->priv; struct device *dev = &btdev->serdev->dev; struct sk_buff *result = skb_dequeue(&btdev->txq); if (!btdev->initialized) return result; if (btdev->tx_enabled == !!result) return result; if (result) { pm_runtime_get_sync(dev); gpiod_set_value_cansleep(btdev->wakeup_bt, 1); } else { serdev_device_wait_until_sent(btdev->serdev, 0); gpiod_set_value_cansleep(btdev->wakeup_bt, 0); pm_runtime_put(dev); } btdev->tx_enabled = !!result; return result; } static const struct hci_uart_proto nokia_proto = { .id = HCI_UART_NOKIA, .name = "Nokia", .open = nokia_open, .close = nokia_close, .recv = nokia_recv, .enqueue = nokia_enqueue, .dequeue = nokia_dequeue, .flush = nokia_flush, .setup = nokia_setup, .manufacturer = 1, }; static int nokia_bluetooth_serdev_probe(struct serdev_device *serdev) { struct device *dev = &serdev->dev; struct nokia_bt_dev *btdev; struct clk *sysclk; int err = 0; btdev = devm_kzalloc(dev, sizeof(*btdev), GFP_KERNEL); if (!btdev) return -ENOMEM; btdev->hu.serdev = btdev->serdev = serdev; serdev_device_set_drvdata(serdev, btdev); btdev->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(btdev->reset)) { err = PTR_ERR(btdev->reset); dev_err(dev, "could not get reset gpio: %d", err); return err; } btdev->wakeup_host = devm_gpiod_get(dev, "host-wakeup", GPIOD_IN); if (IS_ERR(btdev->wakeup_host)) { err = PTR_ERR(btdev->wakeup_host); dev_err(dev, "could not get host wakeup gpio: %d", err); return err; } btdev->wake_irq = gpiod_to_irq(btdev->wakeup_host); err = devm_request_threaded_irq(dev, btdev->wake_irq, NULL, wakeup_handler, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, "wakeup", btdev); if (err) { dev_err(dev, "could request wakeup irq: %d", err); return err; } btdev->wakeup_bt = devm_gpiod_get(dev, "bluetooth-wakeup", GPIOD_OUT_LOW); if (IS_ERR(btdev->wakeup_bt)) { err = PTR_ERR(btdev->wakeup_bt); dev_err(dev, "could not get BT wakeup gpio: %d", err); return err; } sysclk = devm_clk_get(dev, "sysclk"); if (IS_ERR(sysclk)) { err = PTR_ERR(sysclk); dev_err(dev, "could not get sysclk: %d", err); return err; } err = clk_prepare_enable(sysclk); if (err) { dev_err(dev, "could not enable sysclk: %d", err); return err; } btdev->sysclk_speed = clk_get_rate(sysclk); clk_disable_unprepare(sysclk); skb_queue_head_init(&btdev->txq); btdev->hu.priv = btdev; btdev->hu.alignment = 2; /* Nokia H4+ is word aligned */ err = hci_uart_register_device(&btdev->hu, &nokia_proto); if (err) { dev_err(dev, "could not register bluetooth uart: %d", err); return err; } return 0; } static void nokia_bluetooth_serdev_remove(struct serdev_device *serdev) { struct nokia_bt_dev *btdev = serdev_device_get_drvdata(serdev); hci_uart_unregister_device(&btdev->hu); } static int nokia_bluetooth_runtime_suspend(struct device *dev) { struct serdev_device *serdev = to_serdev_device(dev); nokia_flow_control(serdev, false); return 0; } static int nokia_bluetooth_runtime_resume(struct device *dev) { struct serdev_device *serdev = to_serdev_device(dev); nokia_flow_control(serdev, true); return 0; } static const struct dev_pm_ops nokia_bluetooth_pm_ops = { SET_RUNTIME_PM_OPS(nokia_bluetooth_runtime_suspend, nokia_bluetooth_runtime_resume, NULL) }; #ifdef CONFIG_OF static const struct of_device_id nokia_bluetooth_of_match[] = { { .compatible = "nokia,h4p-bluetooth", }, {}, }; MODULE_DEVICE_TABLE(of, nokia_bluetooth_of_match); #endif static struct serdev_device_driver nokia_bluetooth_serdev_driver = { .probe = nokia_bluetooth_serdev_probe, .remove = nokia_bluetooth_serdev_remove, .driver = { .name = "nokia-bluetooth", .pm = &nokia_bluetooth_pm_ops, .of_match_table = of_match_ptr(nokia_bluetooth_of_match), }, }; module_serdev_device_driver(nokia_bluetooth_serdev_driver); MODULE_AUTHOR("Sebastian Reichel <[email protected]>"); MODULE_DESCRIPTION("Bluetooth HCI UART Nokia H4+ driver ver " VERSION); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL");
linux-master
drivers/bluetooth/hci_nokia.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * NXP Bluetooth driver * Copyright 2023 NXP */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/serdev.h> #include <linux/of.h> #include <linux/skbuff.h> #include <asm/unaligned.h> #include <linux/firmware.h> #include <linux/string.h> #include <linux/crc8.h> #include <linux/crc32.h> #include <linux/string_helpers.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include "h4_recv.h" #define MANUFACTURER_NXP 37 #define BTNXPUART_TX_STATE_ACTIVE 1 #define BTNXPUART_FW_DOWNLOADING 2 #define BTNXPUART_CHECK_BOOT_SIGNATURE 3 #define BTNXPUART_SERDEV_OPEN 4 #define BTNXPUART_IR_IN_PROGRESS 5 /* NXP HW err codes */ #define BTNXPUART_IR_HW_ERR 0xb0 #define FIRMWARE_W8987 "nxp/uartuart8987_bt.bin" #define FIRMWARE_W8997 "nxp/uartuart8997_bt_v4.bin" #define FIRMWARE_W9098 "nxp/uartuart9098_bt_v1.bin" #define FIRMWARE_IW416 "nxp/uartiw416_bt_v0.bin" #define FIRMWARE_IW612 "nxp/uartspi_n61x_v1.bin.se" #define FIRMWARE_IW624 "nxp/uartiw624_bt.bin" #define FIRMWARE_SECURE_IW624 "nxp/uartiw624_bt.bin.se" #define FIRMWARE_AW693 "nxp/uartaw693_bt.bin" #define FIRMWARE_SECURE_AW693 "nxp/uartaw693_bt.bin.se" #define FIRMWARE_HELPER "nxp/helper_uart_3000000.bin" #define CHIP_ID_W9098 0x5c03 #define CHIP_ID_IW416 0x7201 #define CHIP_ID_IW612 0x7601 #define CHIP_ID_IW624a 0x8000 #define CHIP_ID_IW624c 0x8001 #define CHIP_ID_AW693 0x8200 #define FW_SECURE_MASK 0xc0 #define FW_OPEN 0x00 #define FW_AUTH_ILLEGAL 0x40 #define FW_AUTH_PLAIN 0x80 #define FW_AUTH_ENC 0xc0 #define HCI_NXP_PRI_BAUDRATE 115200 #define HCI_NXP_SEC_BAUDRATE 3000000 #define MAX_FW_FILE_NAME_LEN 50 /* Default ps timeout period in milliseconds */ #define PS_DEFAULT_TIMEOUT_PERIOD_MS 2000 /* wakeup methods */ #define WAKEUP_METHOD_DTR 0 #define WAKEUP_METHOD_BREAK 1 #define WAKEUP_METHOD_EXT_BREAK 2 #define WAKEUP_METHOD_RTS 3 #define WAKEUP_METHOD_INVALID 0xff /* power save mode status */ #define PS_MODE_DISABLE 0 #define PS_MODE_ENABLE 1 /* Power Save Commands to ps_work_func */ #define PS_CMD_EXIT_PS 1 #define PS_CMD_ENTER_PS 2 /* power save state */ #define PS_STATE_AWAKE 0 #define PS_STATE_SLEEP 1 /* Bluetooth vendor command : Sleep mode */ #define HCI_NXP_AUTO_SLEEP_MODE 0xfc23 /* Bluetooth vendor command : Wakeup method */ #define HCI_NXP_WAKEUP_METHOD 0xfc53 /* Bluetooth vendor command : Set operational baudrate */ #define HCI_NXP_SET_OPER_SPEED 0xfc09 /* Bluetooth vendor command: Independent Reset */ #define HCI_NXP_IND_RESET 0xfcfc /* Bluetooth Power State : Vendor cmd params */ #define BT_PS_ENABLE 0x02 #define BT_PS_DISABLE 0x03 /* Bluetooth Host Wakeup Methods */ #define BT_HOST_WAKEUP_METHOD_NONE 0x00 #define BT_HOST_WAKEUP_METHOD_DTR 0x01 #define BT_HOST_WAKEUP_METHOD_BREAK 0x02 #define BT_HOST_WAKEUP_METHOD_GPIO 0x03 /* Bluetooth Chip Wakeup Methods */ #define BT_CTRL_WAKEUP_METHOD_DSR 0x00 #define BT_CTRL_WAKEUP_METHOD_BREAK 0x01 #define BT_CTRL_WAKEUP_METHOD_GPIO 0x02 #define BT_CTRL_WAKEUP_METHOD_EXT_BREAK 0x04 #define BT_CTRL_WAKEUP_METHOD_RTS 0x05 struct ps_data { u8 target_ps_mode; /* ps mode to be set */ u8 cur_psmode; /* current ps_mode */ u8 ps_state; /* controller's power save state */ u8 ps_cmd; u8 h2c_wakeupmode; u8 cur_h2c_wakeupmode; u8 c2h_wakeupmode; u8 c2h_wakeup_gpio; u8 h2c_wakeup_gpio; bool driver_sent_cmd; u16 h2c_ps_interval; u16 c2h_ps_interval; struct hci_dev *hdev; struct work_struct work; struct timer_list ps_timer; }; struct wakeup_cmd_payload { u8 c2h_wakeupmode; u8 c2h_wakeup_gpio; u8 h2c_wakeupmode; u8 h2c_wakeup_gpio; } __packed; struct psmode_cmd_payload { u8 ps_cmd; __le16 c2h_ps_interval; } __packed; struct btnxpuart_data { const char *helper_fw_name; const char *fw_name; }; struct btnxpuart_dev { struct hci_dev *hdev; struct serdev_device *serdev; struct work_struct tx_work; unsigned long tx_state; struct sk_buff_head txq; struct sk_buff *rx_skb; const struct firmware *fw; u8 fw_name[MAX_FW_FILE_NAME_LEN]; u32 fw_dnld_v1_offset; u32 fw_v1_sent_bytes; u32 fw_v3_offset_correction; u32 fw_v1_expected_len; u32 boot_reg_offset; wait_queue_head_t fw_dnld_done_wait_q; wait_queue_head_t check_boot_sign_wait_q; u32 new_baudrate; u32 current_baudrate; u32 fw_init_baudrate; bool timeout_changed; bool baudrate_changed; bool helper_downloaded; struct ps_data psdata; struct btnxpuart_data *nxp_data; }; #define NXP_V1_FW_REQ_PKT 0xa5 #define NXP_V1_CHIP_VER_PKT 0xaa #define NXP_V3_FW_REQ_PKT 0xa7 #define NXP_V3_CHIP_VER_PKT 0xab #define NXP_ACK_V1 0x5a #define NXP_NAK_V1 0xbf #define NXP_ACK_V3 0x7a #define NXP_NAK_V3 0x7b #define NXP_CRC_ERROR_V3 0x7c #define HDR_LEN 16 #define NXP_RECV_CHIP_VER_V1 \ .type = NXP_V1_CHIP_VER_PKT, \ .hlen = 4, \ .loff = 0, \ .lsize = 0, \ .maxlen = 4 #define NXP_RECV_FW_REQ_V1 \ .type = NXP_V1_FW_REQ_PKT, \ .hlen = 4, \ .loff = 0, \ .lsize = 0, \ .maxlen = 4 #define NXP_RECV_CHIP_VER_V3 \ .type = NXP_V3_CHIP_VER_PKT, \ .hlen = 4, \ .loff = 0, \ .lsize = 0, \ .maxlen = 4 #define NXP_RECV_FW_REQ_V3 \ .type = NXP_V3_FW_REQ_PKT, \ .hlen = 9, \ .loff = 0, \ .lsize = 0, \ .maxlen = 9 struct v1_data_req { __le16 len; __le16 len_comp; } __packed; struct v1_start_ind { __le16 chip_id; __le16 chip_id_comp; } __packed; struct v3_data_req { __le16 len; __le32 offset; __le16 error; u8 crc; } __packed; struct v3_start_ind { __le16 chip_id; u8 loader_ver; u8 crc; } __packed; /* UART register addresses of BT chip */ #define CLKDIVADDR 0x7f00008f #define UARTDIVADDR 0x7f000090 #define UARTMCRADDR 0x7f000091 #define UARTREINITADDR 0x7f000092 #define UARTICRADDR 0x7f000093 #define UARTFCRADDR 0x7f000094 #define MCR 0x00000022 #define INIT 0x00000001 #define ICR 0x000000c7 #define FCR 0x000000c7 #define POLYNOMIAL8 0x07 struct uart_reg { __le32 address; __le32 value; } __packed; struct uart_config { struct uart_reg clkdiv; struct uart_reg uartdiv; struct uart_reg mcr; struct uart_reg re_init; struct uart_reg icr; struct uart_reg fcr; __be32 crc; } __packed; struct nxp_bootloader_cmd { __le32 header; __le32 arg; __le32 payload_len; __be32 crc; } __packed; static u8 crc8_table[CRC8_TABLE_SIZE]; /* Default configurations */ #define DEFAULT_H2C_WAKEUP_MODE WAKEUP_METHOD_BREAK #define DEFAULT_PS_MODE PS_MODE_DISABLE #define FW_INIT_BAUDRATE HCI_NXP_PRI_BAUDRATE static struct sk_buff *nxp_drv_send_cmd(struct hci_dev *hdev, u16 opcode, u32 plen, void *param) { struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); struct ps_data *psdata = &nxpdev->psdata; struct sk_buff *skb; /* set flag to prevent nxp_enqueue from parsing values from this command and * calling hci_cmd_sync_queue() again. */ psdata->driver_sent_cmd = true; skb = __hci_cmd_sync(hdev, opcode, plen, param, HCI_CMD_TIMEOUT); psdata->driver_sent_cmd = false; return skb; } static void btnxpuart_tx_wakeup(struct btnxpuart_dev *nxpdev) { if (schedule_work(&nxpdev->tx_work)) set_bit(BTNXPUART_TX_STATE_ACTIVE, &nxpdev->tx_state); } /* NXP Power Save Feature */ static void ps_start_timer(struct btnxpuart_dev *nxpdev) { struct ps_data *psdata = &nxpdev->psdata; if (!psdata) return; if (psdata->cur_psmode == PS_MODE_ENABLE) mod_timer(&psdata->ps_timer, jiffies + msecs_to_jiffies(psdata->h2c_ps_interval)); } static void ps_cancel_timer(struct btnxpuart_dev *nxpdev) { struct ps_data *psdata = &nxpdev->psdata; flush_work(&psdata->work); del_timer_sync(&psdata->ps_timer); } static void ps_control(struct hci_dev *hdev, u8 ps_state) { struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); struct ps_data *psdata = &nxpdev->psdata; int status; if (psdata->ps_state == ps_state || !test_bit(BTNXPUART_SERDEV_OPEN, &nxpdev->tx_state)) return; switch (psdata->cur_h2c_wakeupmode) { case WAKEUP_METHOD_DTR: if (ps_state == PS_STATE_AWAKE) status = serdev_device_set_tiocm(nxpdev->serdev, TIOCM_DTR, 0); else status = serdev_device_set_tiocm(nxpdev->serdev, 0, TIOCM_DTR); break; case WAKEUP_METHOD_BREAK: default: if (ps_state == PS_STATE_AWAKE) status = serdev_device_break_ctl(nxpdev->serdev, 0); else status = serdev_device_break_ctl(nxpdev->serdev, -1); bt_dev_dbg(hdev, "Set UART break: %s, status=%d", str_on_off(ps_state == PS_STATE_SLEEP), status); break; } if (!status) psdata->ps_state = ps_state; if (ps_state == PS_STATE_AWAKE) btnxpuart_tx_wakeup(nxpdev); } static void ps_work_func(struct work_struct *work) { struct ps_data *data = container_of(work, struct ps_data, work); if (data->ps_cmd == PS_CMD_ENTER_PS && data->cur_psmode == PS_MODE_ENABLE) ps_control(data->hdev, PS_STATE_SLEEP); else if (data->ps_cmd == PS_CMD_EXIT_PS) ps_control(data->hdev, PS_STATE_AWAKE); } static void ps_timeout_func(struct timer_list *t) { struct ps_data *data = from_timer(data, t, ps_timer); struct hci_dev *hdev = data->hdev; struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); if (test_bit(BTNXPUART_TX_STATE_ACTIVE, &nxpdev->tx_state)) { ps_start_timer(nxpdev); } else { data->ps_cmd = PS_CMD_ENTER_PS; schedule_work(&data->work); } } static void ps_setup(struct hci_dev *hdev) { struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); struct ps_data *psdata = &nxpdev->psdata; psdata->hdev = hdev; INIT_WORK(&psdata->work, ps_work_func); timer_setup(&psdata->ps_timer, ps_timeout_func, 0); } static void ps_wakeup(struct btnxpuart_dev *nxpdev) { struct ps_data *psdata = &nxpdev->psdata; if (psdata->ps_state != PS_STATE_AWAKE) { psdata->ps_cmd = PS_CMD_EXIT_PS; schedule_work(&psdata->work); } } static int send_ps_cmd(struct hci_dev *hdev, void *data) { struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); struct ps_data *psdata = &nxpdev->psdata; struct psmode_cmd_payload pcmd; struct sk_buff *skb; u8 *status; if (psdata->target_ps_mode == PS_MODE_ENABLE) pcmd.ps_cmd = BT_PS_ENABLE; else pcmd.ps_cmd = BT_PS_DISABLE; pcmd.c2h_ps_interval = __cpu_to_le16(psdata->c2h_ps_interval); skb = nxp_drv_send_cmd(hdev, HCI_NXP_AUTO_SLEEP_MODE, sizeof(pcmd), &pcmd); if (IS_ERR(skb)) { bt_dev_err(hdev, "Setting Power Save mode failed (%ld)", PTR_ERR(skb)); return PTR_ERR(skb); } status = skb_pull_data(skb, 1); if (status) { if (!*status) psdata->cur_psmode = psdata->target_ps_mode; else psdata->target_ps_mode = psdata->cur_psmode; if (psdata->cur_psmode == PS_MODE_ENABLE) ps_start_timer(nxpdev); else ps_wakeup(nxpdev); bt_dev_dbg(hdev, "Power Save mode response: status=%d, ps_mode=%d", *status, psdata->cur_psmode); } kfree_skb(skb); return 0; } static int send_wakeup_method_cmd(struct hci_dev *hdev, void *data) { struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); struct ps_data *psdata = &nxpdev->psdata; struct wakeup_cmd_payload pcmd; struct sk_buff *skb; u8 *status; pcmd.c2h_wakeupmode = psdata->c2h_wakeupmode; pcmd.c2h_wakeup_gpio = psdata->c2h_wakeup_gpio; switch (psdata->h2c_wakeupmode) { case WAKEUP_METHOD_DTR: pcmd.h2c_wakeupmode = BT_CTRL_WAKEUP_METHOD_DSR; break; case WAKEUP_METHOD_BREAK: default: pcmd.h2c_wakeupmode = BT_CTRL_WAKEUP_METHOD_BREAK; break; } pcmd.h2c_wakeup_gpio = 0xff; skb = nxp_drv_send_cmd(hdev, HCI_NXP_WAKEUP_METHOD, sizeof(pcmd), &pcmd); if (IS_ERR(skb)) { bt_dev_err(hdev, "Setting wake-up method failed (%ld)", PTR_ERR(skb)); return PTR_ERR(skb); } status = skb_pull_data(skb, 1); if (status) { if (*status == 0) psdata->cur_h2c_wakeupmode = psdata->h2c_wakeupmode; else psdata->h2c_wakeupmode = psdata->cur_h2c_wakeupmode; bt_dev_dbg(hdev, "Set Wakeup Method response: status=%d, h2c_wakeupmode=%d", *status, psdata->cur_h2c_wakeupmode); } kfree_skb(skb); return 0; } static void ps_init(struct hci_dev *hdev) { struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); struct ps_data *psdata = &nxpdev->psdata; serdev_device_set_tiocm(nxpdev->serdev, 0, TIOCM_RTS); usleep_range(5000, 10000); serdev_device_set_tiocm(nxpdev->serdev, TIOCM_RTS, 0); usleep_range(5000, 10000); psdata->ps_state = PS_STATE_AWAKE; psdata->c2h_wakeupmode = BT_HOST_WAKEUP_METHOD_NONE; psdata->c2h_wakeup_gpio = 0xff; psdata->cur_h2c_wakeupmode = WAKEUP_METHOD_INVALID; psdata->h2c_ps_interval = PS_DEFAULT_TIMEOUT_PERIOD_MS; switch (DEFAULT_H2C_WAKEUP_MODE) { case WAKEUP_METHOD_DTR: psdata->h2c_wakeupmode = WAKEUP_METHOD_DTR; serdev_device_set_tiocm(nxpdev->serdev, 0, TIOCM_DTR); serdev_device_set_tiocm(nxpdev->serdev, TIOCM_DTR, 0); break; case WAKEUP_METHOD_BREAK: default: psdata->h2c_wakeupmode = WAKEUP_METHOD_BREAK; serdev_device_break_ctl(nxpdev->serdev, -1); usleep_range(5000, 10000); serdev_device_break_ctl(nxpdev->serdev, 0); usleep_range(5000, 10000); break; } psdata->cur_psmode = PS_MODE_DISABLE; psdata->target_ps_mode = DEFAULT_PS_MODE; if (psdata->cur_h2c_wakeupmode != psdata->h2c_wakeupmode) hci_cmd_sync_queue(hdev, send_wakeup_method_cmd, NULL, NULL); if (psdata->cur_psmode != psdata->target_ps_mode) hci_cmd_sync_queue(hdev, send_ps_cmd, NULL, NULL); } /* NXP Firmware Download Feature */ static int nxp_download_firmware(struct hci_dev *hdev) { struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); int err = 0; nxpdev->fw_dnld_v1_offset = 0; nxpdev->fw_v1_sent_bytes = 0; nxpdev->fw_v1_expected_len = HDR_LEN; nxpdev->boot_reg_offset = 0; nxpdev->fw_v3_offset_correction = 0; nxpdev->baudrate_changed = false; nxpdev->timeout_changed = false; nxpdev->helper_downloaded = false; serdev_device_set_baudrate(nxpdev->serdev, HCI_NXP_PRI_BAUDRATE); serdev_device_set_flow_control(nxpdev->serdev, false); nxpdev->current_baudrate = HCI_NXP_PRI_BAUDRATE; /* Wait till FW is downloaded */ err = wait_event_interruptible_timeout(nxpdev->fw_dnld_done_wait_q, !test_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state), msecs_to_jiffies(60000)); if (err == 0) { bt_dev_err(hdev, "FW Download Timeout."); return -ETIMEDOUT; } serdev_device_set_flow_control(nxpdev->serdev, true); release_firmware(nxpdev->fw); memset(nxpdev->fw_name, 0, sizeof(nxpdev->fw_name)); /* Allow the downloaded FW to initialize */ msleep(1200); return 0; } static void nxp_send_ack(u8 ack, struct hci_dev *hdev) { struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); u8 ack_nak[2]; int len = 1; ack_nak[0] = ack; if (ack == NXP_ACK_V3) { ack_nak[1] = crc8(crc8_table, ack_nak, 1, 0xff); len = 2; } serdev_device_write_buf(nxpdev->serdev, ack_nak, len); } static bool nxp_fw_change_baudrate(struct hci_dev *hdev, u16 req_len) { struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); struct nxp_bootloader_cmd nxp_cmd5; struct uart_config uart_config; u32 clkdivaddr = CLKDIVADDR - nxpdev->boot_reg_offset; u32 uartdivaddr = UARTDIVADDR - nxpdev->boot_reg_offset; u32 uartmcraddr = UARTMCRADDR - nxpdev->boot_reg_offset; u32 uartreinitaddr = UARTREINITADDR - nxpdev->boot_reg_offset; u32 uarticraddr = UARTICRADDR - nxpdev->boot_reg_offset; u32 uartfcraddr = UARTFCRADDR - nxpdev->boot_reg_offset; if (req_len == sizeof(nxp_cmd5)) { nxp_cmd5.header = __cpu_to_le32(5); nxp_cmd5.arg = 0; nxp_cmd5.payload_len = __cpu_to_le32(sizeof(uart_config)); /* FW expects swapped CRC bytes */ nxp_cmd5.crc = __cpu_to_be32(crc32_be(0UL, (char *)&nxp_cmd5, sizeof(nxp_cmd5) - 4)); serdev_device_write_buf(nxpdev->serdev, (u8 *)&nxp_cmd5, sizeof(nxp_cmd5)); nxpdev->fw_v3_offset_correction += req_len; } else if (req_len == sizeof(uart_config)) { uart_config.clkdiv.address = __cpu_to_le32(clkdivaddr); uart_config.clkdiv.value = __cpu_to_le32(0x00c00000); uart_config.uartdiv.address = __cpu_to_le32(uartdivaddr); uart_config.uartdiv.value = __cpu_to_le32(1); uart_config.mcr.address = __cpu_to_le32(uartmcraddr); uart_config.mcr.value = __cpu_to_le32(MCR); uart_config.re_init.address = __cpu_to_le32(uartreinitaddr); uart_config.re_init.value = __cpu_to_le32(INIT); uart_config.icr.address = __cpu_to_le32(uarticraddr); uart_config.icr.value = __cpu_to_le32(ICR); uart_config.fcr.address = __cpu_to_le32(uartfcraddr); uart_config.fcr.value = __cpu_to_le32(FCR); /* FW expects swapped CRC bytes */ uart_config.crc = __cpu_to_be32(crc32_be(0UL, (char *)&uart_config, sizeof(uart_config) - 4)); serdev_device_write_buf(nxpdev->serdev, (u8 *)&uart_config, sizeof(uart_config)); serdev_device_wait_until_sent(nxpdev->serdev, 0); nxpdev->fw_v3_offset_correction += req_len; return true; } return false; } static bool nxp_fw_change_timeout(struct hci_dev *hdev, u16 req_len) { struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); struct nxp_bootloader_cmd nxp_cmd7; if (req_len != sizeof(nxp_cmd7)) return false; nxp_cmd7.header = __cpu_to_le32(7); nxp_cmd7.arg = __cpu_to_le32(0x70); nxp_cmd7.payload_len = 0; /* FW expects swapped CRC bytes */ nxp_cmd7.crc = __cpu_to_be32(crc32_be(0UL, (char *)&nxp_cmd7, sizeof(nxp_cmd7) - 4)); serdev_device_write_buf(nxpdev->serdev, (u8 *)&nxp_cmd7, sizeof(nxp_cmd7)); serdev_device_wait_until_sent(nxpdev->serdev, 0); nxpdev->fw_v3_offset_correction += req_len; return true; } static u32 nxp_get_data_len(const u8 *buf) { struct nxp_bootloader_cmd *hdr = (struct nxp_bootloader_cmd *)buf; return __le32_to_cpu(hdr->payload_len); } static bool is_fw_downloading(struct btnxpuart_dev *nxpdev) { return test_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state); } static bool process_boot_signature(struct btnxpuart_dev *nxpdev) { if (test_bit(BTNXPUART_CHECK_BOOT_SIGNATURE, &nxpdev->tx_state)) { clear_bit(BTNXPUART_CHECK_BOOT_SIGNATURE, &nxpdev->tx_state); wake_up_interruptible(&nxpdev->check_boot_sign_wait_q); return false; } return is_fw_downloading(nxpdev); } static int nxp_request_firmware(struct hci_dev *hdev, const char *fw_name) { struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); int err = 0; if (!fw_name) return -ENOENT; if (!strlen(nxpdev->fw_name)) { snprintf(nxpdev->fw_name, MAX_FW_FILE_NAME_LEN, "%s", fw_name); bt_dev_dbg(hdev, "Request Firmware: %s", nxpdev->fw_name); err = request_firmware(&nxpdev->fw, nxpdev->fw_name, &hdev->dev); if (err < 0) { bt_dev_err(hdev, "Firmware file %s not found", nxpdev->fw_name); clear_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state); } } return err; } /* for legacy chipsets with V1 bootloader */ static int nxp_recv_chip_ver_v1(struct hci_dev *hdev, struct sk_buff *skb) { struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); struct v1_start_ind *req; __u16 chip_id; req = skb_pull_data(skb, sizeof(*req)); if (!req) goto free_skb; chip_id = le16_to_cpu(req->chip_id ^ req->chip_id_comp); if (chip_id == 0xffff && nxpdev->fw_dnld_v1_offset) { nxpdev->fw_dnld_v1_offset = 0; nxpdev->fw_v1_sent_bytes = 0; nxpdev->fw_v1_expected_len = HDR_LEN; release_firmware(nxpdev->fw); memset(nxpdev->fw_name, 0, sizeof(nxpdev->fw_name)); nxp_send_ack(NXP_ACK_V1, hdev); } free_skb: kfree_skb(skb); return 0; } static int nxp_recv_fw_req_v1(struct hci_dev *hdev, struct sk_buff *skb) { struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); struct btnxpuart_data *nxp_data = nxpdev->nxp_data; struct v1_data_req *req; __u16 len; if (!process_boot_signature(nxpdev)) goto free_skb; req = skb_pull_data(skb, sizeof(*req)); if (!req) goto free_skb; len = __le16_to_cpu(req->len ^ req->len_comp); if (len != 0xffff) { bt_dev_dbg(hdev, "ERR: Send NAK"); nxp_send_ack(NXP_NAK_V1, hdev); goto free_skb; } nxp_send_ack(NXP_ACK_V1, hdev); len = __le16_to_cpu(req->len); if (!nxp_data->helper_fw_name) { if (!nxpdev->timeout_changed) { nxpdev->timeout_changed = nxp_fw_change_timeout(hdev, len); goto free_skb; } if (!nxpdev->baudrate_changed) { nxpdev->baudrate_changed = nxp_fw_change_baudrate(hdev, len); if (nxpdev->baudrate_changed) { serdev_device_set_baudrate(nxpdev->serdev, HCI_NXP_SEC_BAUDRATE); serdev_device_set_flow_control(nxpdev->serdev, true); nxpdev->current_baudrate = HCI_NXP_SEC_BAUDRATE; } goto free_skb; } } if (!nxp_data->helper_fw_name || nxpdev->helper_downloaded) { if (nxp_request_firmware(hdev, nxp_data->fw_name)) goto free_skb; } else if (nxp_data->helper_fw_name && !nxpdev->helper_downloaded) { if (nxp_request_firmware(hdev, nxp_data->helper_fw_name)) goto free_skb; } if (!len) { bt_dev_dbg(hdev, "FW Downloaded Successfully: %zu bytes", nxpdev->fw->size); if (nxp_data->helper_fw_name && !nxpdev->helper_downloaded) { nxpdev->helper_downloaded = true; serdev_device_wait_until_sent(nxpdev->serdev, 0); serdev_device_set_baudrate(nxpdev->serdev, HCI_NXP_SEC_BAUDRATE); serdev_device_set_flow_control(nxpdev->serdev, true); } else { clear_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state); wake_up_interruptible(&nxpdev->fw_dnld_done_wait_q); } goto free_skb; } if (len & 0x01) { /* The CRC did not match at the other end. * Simply send the same bytes again. */ len = nxpdev->fw_v1_sent_bytes; bt_dev_dbg(hdev, "CRC error. Resend %d bytes of FW.", len); } else { nxpdev->fw_dnld_v1_offset += nxpdev->fw_v1_sent_bytes; /* The FW bin file is made up of many blocks of * 16 byte header and payload data chunks. If the * FW has requested a header, read the payload length * info from the header, before sending the header. * In the next iteration, the FW should request the * payload data chunk, which should be equal to the * payload length read from header. If there is a * mismatch, clearly the driver and FW are out of sync, * and we need to re-send the previous header again. */ if (len == nxpdev->fw_v1_expected_len) { if (len == HDR_LEN) nxpdev->fw_v1_expected_len = nxp_get_data_len(nxpdev->fw->data + nxpdev->fw_dnld_v1_offset); else nxpdev->fw_v1_expected_len = HDR_LEN; } else if (len == HDR_LEN) { /* FW download out of sync. Send previous chunk again */ nxpdev->fw_dnld_v1_offset -= nxpdev->fw_v1_sent_bytes; nxpdev->fw_v1_expected_len = HDR_LEN; } } if (nxpdev->fw_dnld_v1_offset + len <= nxpdev->fw->size) serdev_device_write_buf(nxpdev->serdev, nxpdev->fw->data + nxpdev->fw_dnld_v1_offset, len); nxpdev->fw_v1_sent_bytes = len; free_skb: kfree_skb(skb); return 0; } static char *nxp_get_fw_name_from_chipid(struct hci_dev *hdev, u16 chipid, u8 loader_ver) { struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); char *fw_name = NULL; switch (chipid) { case CHIP_ID_W9098: fw_name = FIRMWARE_W9098; break; case CHIP_ID_IW416: fw_name = FIRMWARE_IW416; break; case CHIP_ID_IW612: fw_name = FIRMWARE_IW612; break; case CHIP_ID_IW624a: case CHIP_ID_IW624c: nxpdev->boot_reg_offset = 1; if ((loader_ver & FW_SECURE_MASK) == FW_OPEN) fw_name = FIRMWARE_IW624; else if ((loader_ver & FW_SECURE_MASK) != FW_AUTH_ILLEGAL) fw_name = FIRMWARE_SECURE_IW624; else bt_dev_err(hdev, "Illegal loader version %02x", loader_ver); break; case CHIP_ID_AW693: if ((loader_ver & FW_SECURE_MASK) == FW_OPEN) fw_name = FIRMWARE_AW693; else if ((loader_ver & FW_SECURE_MASK) != FW_AUTH_ILLEGAL) fw_name = FIRMWARE_SECURE_AW693; else bt_dev_err(hdev, "Illegal loader version %02x", loader_ver); break; default: bt_dev_err(hdev, "Unknown chip signature %04x", chipid); break; } return fw_name; } static int nxp_recv_chip_ver_v3(struct hci_dev *hdev, struct sk_buff *skb) { struct v3_start_ind *req = skb_pull_data(skb, sizeof(*req)); struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); u16 chip_id; u8 loader_ver; if (!process_boot_signature(nxpdev)) goto free_skb; chip_id = le16_to_cpu(req->chip_id); loader_ver = req->loader_ver; if (!nxp_request_firmware(hdev, nxp_get_fw_name_from_chipid(hdev, chip_id, loader_ver))) nxp_send_ack(NXP_ACK_V3, hdev); free_skb: kfree_skb(skb); return 0; } static int nxp_recv_fw_req_v3(struct hci_dev *hdev, struct sk_buff *skb) { struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); struct v3_data_req *req; __u16 len; __u32 offset; if (!process_boot_signature(nxpdev)) goto free_skb; req = skb_pull_data(skb, sizeof(*req)); if (!req || !nxpdev->fw) goto free_skb; nxp_send_ack(NXP_ACK_V3, hdev); len = __le16_to_cpu(req->len); if (!nxpdev->timeout_changed) { nxpdev->timeout_changed = nxp_fw_change_timeout(hdev, len); goto free_skb; } if (!nxpdev->baudrate_changed) { nxpdev->baudrate_changed = nxp_fw_change_baudrate(hdev, len); if (nxpdev->baudrate_changed) { serdev_device_set_baudrate(nxpdev->serdev, HCI_NXP_SEC_BAUDRATE); serdev_device_set_flow_control(nxpdev->serdev, true); nxpdev->current_baudrate = HCI_NXP_SEC_BAUDRATE; } goto free_skb; } if (req->len == 0) { bt_dev_dbg(hdev, "FW Downloaded Successfully: %zu bytes", nxpdev->fw->size); clear_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state); wake_up_interruptible(&nxpdev->fw_dnld_done_wait_q); goto free_skb; } if (req->error) bt_dev_dbg(hdev, "FW Download received err 0x%02x from chip", req->error); offset = __le32_to_cpu(req->offset); if (offset < nxpdev->fw_v3_offset_correction) { /* This scenario should ideally never occur. But if it ever does, * FW is out of sync and needs a power cycle. */ bt_dev_err(hdev, "Something went wrong during FW download"); bt_dev_err(hdev, "Please power cycle and try again"); goto free_skb; } serdev_device_write_buf(nxpdev->serdev, nxpdev->fw->data + offset - nxpdev->fw_v3_offset_correction, len); free_skb: kfree_skb(skb); return 0; } static int nxp_set_baudrate_cmd(struct hci_dev *hdev, void *data) { struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); __le32 new_baudrate = __cpu_to_le32(nxpdev->new_baudrate); struct ps_data *psdata = &nxpdev->psdata; struct sk_buff *skb; u8 *status; if (!psdata) return 0; skb = nxp_drv_send_cmd(hdev, HCI_NXP_SET_OPER_SPEED, 4, (u8 *)&new_baudrate); if (IS_ERR(skb)) { bt_dev_err(hdev, "Setting baudrate failed (%ld)", PTR_ERR(skb)); return PTR_ERR(skb); } status = (u8 *)skb_pull_data(skb, 1); if (status) { if (*status == 0) { serdev_device_set_baudrate(nxpdev->serdev, nxpdev->new_baudrate); nxpdev->current_baudrate = nxpdev->new_baudrate; } bt_dev_dbg(hdev, "Set baudrate response: status=%d, baudrate=%d", *status, nxpdev->new_baudrate); } kfree_skb(skb); return 0; } static int nxp_check_boot_sign(struct btnxpuart_dev *nxpdev) { serdev_device_set_baudrate(nxpdev->serdev, HCI_NXP_PRI_BAUDRATE); if (test_bit(BTNXPUART_IR_IN_PROGRESS, &nxpdev->tx_state)) serdev_device_set_flow_control(nxpdev->serdev, false); else serdev_device_set_flow_control(nxpdev->serdev, true); set_bit(BTNXPUART_CHECK_BOOT_SIGNATURE, &nxpdev->tx_state); return wait_event_interruptible_timeout(nxpdev->check_boot_sign_wait_q, !test_bit(BTNXPUART_CHECK_BOOT_SIGNATURE, &nxpdev->tx_state), msecs_to_jiffies(1000)); } static int nxp_set_ind_reset(struct hci_dev *hdev, void *data) { static const u8 ir_hw_err[] = { HCI_EV_HARDWARE_ERROR, 0x01, BTNXPUART_IR_HW_ERR }; struct sk_buff *skb; skb = bt_skb_alloc(3, GFP_ATOMIC); if (!skb) return -ENOMEM; hci_skb_pkt_type(skb) = HCI_EVENT_PKT; skb_put_data(skb, ir_hw_err, 3); /* Inject Hardware Error to upper stack */ return hci_recv_frame(hdev, skb); } /* NXP protocol */ static int nxp_setup(struct hci_dev *hdev) { struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); int err = 0; if (nxp_check_boot_sign(nxpdev)) { bt_dev_dbg(hdev, "Need FW Download."); err = nxp_download_firmware(hdev); if (err < 0) return err; } else { bt_dev_dbg(hdev, "FW already running."); clear_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state); } serdev_device_set_baudrate(nxpdev->serdev, nxpdev->fw_init_baudrate); nxpdev->current_baudrate = nxpdev->fw_init_baudrate; if (nxpdev->current_baudrate != HCI_NXP_SEC_BAUDRATE) { nxpdev->new_baudrate = HCI_NXP_SEC_BAUDRATE; hci_cmd_sync_queue(hdev, nxp_set_baudrate_cmd, NULL, NULL); } ps_init(hdev); if (test_and_clear_bit(BTNXPUART_IR_IN_PROGRESS, &nxpdev->tx_state)) hci_dev_clear_flag(hdev, HCI_SETUP); return 0; } static void nxp_hw_err(struct hci_dev *hdev, u8 code) { struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); switch (code) { case BTNXPUART_IR_HW_ERR: set_bit(BTNXPUART_IR_IN_PROGRESS, &nxpdev->tx_state); hci_dev_set_flag(hdev, HCI_SETUP); break; default: break; } } static int nxp_shutdown(struct hci_dev *hdev) { struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); struct sk_buff *skb; u8 *status; u8 pcmd = 0; if (test_bit(BTNXPUART_IR_IN_PROGRESS, &nxpdev->tx_state)) { skb = nxp_drv_send_cmd(hdev, HCI_NXP_IND_RESET, 1, &pcmd); if (IS_ERR(skb)) return PTR_ERR(skb); status = skb_pull_data(skb, 1); if (status) { serdev_device_set_flow_control(nxpdev->serdev, false); set_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state); } kfree_skb(skb); } return 0; } static int btnxpuart_queue_skb(struct hci_dev *hdev, struct sk_buff *skb) { struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); /* Prepend skb with frame type */ memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1); skb_queue_tail(&nxpdev->txq, skb); btnxpuart_tx_wakeup(nxpdev); return 0; } static int nxp_enqueue(struct hci_dev *hdev, struct sk_buff *skb) { struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); struct ps_data *psdata = &nxpdev->psdata; struct hci_command_hdr *hdr; struct psmode_cmd_payload ps_parm; struct wakeup_cmd_payload wakeup_parm; __le32 baudrate_parm; /* if vendor commands are received from user space (e.g. hcitool), update * driver flags accordingly and ask driver to re-send the command to FW. * In case the payload for any command does not match expected payload * length, let the firmware and user space program handle it, or throw * an error. */ if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT && !psdata->driver_sent_cmd) { hdr = (struct hci_command_hdr *)skb->data; if (hdr->plen != (skb->len - HCI_COMMAND_HDR_SIZE)) return btnxpuart_queue_skb(hdev, skb); switch (__le16_to_cpu(hdr->opcode)) { case HCI_NXP_AUTO_SLEEP_MODE: if (hdr->plen == sizeof(ps_parm)) { memcpy(&ps_parm, skb->data + HCI_COMMAND_HDR_SIZE, hdr->plen); if (ps_parm.ps_cmd == BT_PS_ENABLE) psdata->target_ps_mode = PS_MODE_ENABLE; else if (ps_parm.ps_cmd == BT_PS_DISABLE) psdata->target_ps_mode = PS_MODE_DISABLE; psdata->c2h_ps_interval = __le16_to_cpu(ps_parm.c2h_ps_interval); hci_cmd_sync_queue(hdev, send_ps_cmd, NULL, NULL); goto free_skb; } break; case HCI_NXP_WAKEUP_METHOD: if (hdr->plen == sizeof(wakeup_parm)) { memcpy(&wakeup_parm, skb->data + HCI_COMMAND_HDR_SIZE, hdr->plen); psdata->c2h_wakeupmode = wakeup_parm.c2h_wakeupmode; psdata->c2h_wakeup_gpio = wakeup_parm.c2h_wakeup_gpio; psdata->h2c_wakeup_gpio = wakeup_parm.h2c_wakeup_gpio; switch (wakeup_parm.h2c_wakeupmode) { case BT_CTRL_WAKEUP_METHOD_DSR: psdata->h2c_wakeupmode = WAKEUP_METHOD_DTR; break; case BT_CTRL_WAKEUP_METHOD_BREAK: default: psdata->h2c_wakeupmode = WAKEUP_METHOD_BREAK; break; } hci_cmd_sync_queue(hdev, send_wakeup_method_cmd, NULL, NULL); goto free_skb; } break; case HCI_NXP_SET_OPER_SPEED: if (hdr->plen == sizeof(baudrate_parm)) { memcpy(&baudrate_parm, skb->data + HCI_COMMAND_HDR_SIZE, hdr->plen); nxpdev->new_baudrate = __le32_to_cpu(baudrate_parm); hci_cmd_sync_queue(hdev, nxp_set_baudrate_cmd, NULL, NULL); goto free_skb; } break; case HCI_NXP_IND_RESET: if (hdr->plen == 1) { hci_cmd_sync_queue(hdev, nxp_set_ind_reset, NULL, NULL); goto free_skb; } break; default: break; } } return btnxpuart_queue_skb(hdev, skb); free_skb: kfree_skb(skb); return 0; } static struct sk_buff *nxp_dequeue(void *data) { struct btnxpuart_dev *nxpdev = (struct btnxpuart_dev *)data; ps_wakeup(nxpdev); ps_start_timer(nxpdev); return skb_dequeue(&nxpdev->txq); } /* btnxpuart based on serdev */ static void btnxpuart_tx_work(struct work_struct *work) { struct btnxpuart_dev *nxpdev = container_of(work, struct btnxpuart_dev, tx_work); struct serdev_device *serdev = nxpdev->serdev; struct hci_dev *hdev = nxpdev->hdev; struct sk_buff *skb; int len; while ((skb = nxp_dequeue(nxpdev))) { len = serdev_device_write_buf(serdev, skb->data, skb->len); hdev->stat.byte_tx += len; skb_pull(skb, len); if (skb->len > 0) { skb_queue_head(&nxpdev->txq, skb); break; } switch (hci_skb_pkt_type(skb)) { case HCI_COMMAND_PKT: hdev->stat.cmd_tx++; break; case HCI_ACLDATA_PKT: hdev->stat.acl_tx++; break; case HCI_SCODATA_PKT: hdev->stat.sco_tx++; break; } kfree_skb(skb); } clear_bit(BTNXPUART_TX_STATE_ACTIVE, &nxpdev->tx_state); } static int btnxpuart_open(struct hci_dev *hdev) { struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); int err = 0; err = serdev_device_open(nxpdev->serdev); if (err) { bt_dev_err(hdev, "Unable to open UART device %s", dev_name(&nxpdev->serdev->dev)); } else { set_bit(BTNXPUART_SERDEV_OPEN, &nxpdev->tx_state); } return err; } static int btnxpuart_close(struct hci_dev *hdev) { struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); ps_wakeup(nxpdev); serdev_device_close(nxpdev->serdev); clear_bit(BTNXPUART_SERDEV_OPEN, &nxpdev->tx_state); return 0; } static int btnxpuart_flush(struct hci_dev *hdev) { struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); /* Flush any pending characters */ serdev_device_write_flush(nxpdev->serdev); skb_queue_purge(&nxpdev->txq); cancel_work_sync(&nxpdev->tx_work); kfree_skb(nxpdev->rx_skb); nxpdev->rx_skb = NULL; return 0; } static const struct h4_recv_pkt nxp_recv_pkts[] = { { H4_RECV_ACL, .recv = hci_recv_frame }, { H4_RECV_SCO, .recv = hci_recv_frame }, { H4_RECV_EVENT, .recv = hci_recv_frame }, { NXP_RECV_CHIP_VER_V1, .recv = nxp_recv_chip_ver_v1 }, { NXP_RECV_FW_REQ_V1, .recv = nxp_recv_fw_req_v1 }, { NXP_RECV_CHIP_VER_V3, .recv = nxp_recv_chip_ver_v3 }, { NXP_RECV_FW_REQ_V3, .recv = nxp_recv_fw_req_v3 }, }; static int btnxpuart_receive_buf(struct serdev_device *serdev, const u8 *data, size_t count) { struct btnxpuart_dev *nxpdev = serdev_device_get_drvdata(serdev); ps_start_timer(nxpdev); nxpdev->rx_skb = h4_recv_buf(nxpdev->hdev, nxpdev->rx_skb, data, count, nxp_recv_pkts, ARRAY_SIZE(nxp_recv_pkts)); if (IS_ERR(nxpdev->rx_skb)) { int err = PTR_ERR(nxpdev->rx_skb); /* Safe to ignore out-of-sync bootloader signatures */ if (is_fw_downloading(nxpdev)) return count; bt_dev_err(nxpdev->hdev, "Frame reassembly failed (%d)", err); nxpdev->rx_skb = NULL; return err; } if (!is_fw_downloading(nxpdev)) nxpdev->hdev->stat.byte_rx += count; return count; } static void btnxpuart_write_wakeup(struct serdev_device *serdev) { serdev_device_write_wakeup(serdev); } static const struct serdev_device_ops btnxpuart_client_ops = { .receive_buf = btnxpuart_receive_buf, .write_wakeup = btnxpuart_write_wakeup, }; static int nxp_serdev_probe(struct serdev_device *serdev) { struct hci_dev *hdev; struct btnxpuart_dev *nxpdev; nxpdev = devm_kzalloc(&serdev->dev, sizeof(*nxpdev), GFP_KERNEL); if (!nxpdev) return -ENOMEM; nxpdev->nxp_data = (struct btnxpuart_data *)device_get_match_data(&serdev->dev); nxpdev->serdev = serdev; serdev_device_set_drvdata(serdev, nxpdev); serdev_device_set_client_ops(serdev, &btnxpuart_client_ops); INIT_WORK(&nxpdev->tx_work, btnxpuart_tx_work); skb_queue_head_init(&nxpdev->txq); init_waitqueue_head(&nxpdev->fw_dnld_done_wait_q); init_waitqueue_head(&nxpdev->check_boot_sign_wait_q); device_property_read_u32(&nxpdev->serdev->dev, "fw-init-baudrate", &nxpdev->fw_init_baudrate); if (!nxpdev->fw_init_baudrate) nxpdev->fw_init_baudrate = FW_INIT_BAUDRATE; set_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state); crc8_populate_msb(crc8_table, POLYNOMIAL8); /* Initialize and register HCI device */ hdev = hci_alloc_dev(); if (!hdev) { dev_err(&serdev->dev, "Can't allocate HCI device\n"); return -ENOMEM; } nxpdev->hdev = hdev; hdev->bus = HCI_UART; hci_set_drvdata(hdev, nxpdev); hdev->manufacturer = MANUFACTURER_NXP; hdev->open = btnxpuart_open; hdev->close = btnxpuart_close; hdev->flush = btnxpuart_flush; hdev->setup = nxp_setup; hdev->send = nxp_enqueue; hdev->hw_error = nxp_hw_err; hdev->shutdown = nxp_shutdown; SET_HCIDEV_DEV(hdev, &serdev->dev); if (hci_register_dev(hdev) < 0) { dev_err(&serdev->dev, "Can't register HCI device\n"); hci_free_dev(hdev); return -ENODEV; } ps_setup(hdev); return 0; } static void nxp_serdev_remove(struct serdev_device *serdev) { struct btnxpuart_dev *nxpdev = serdev_device_get_drvdata(serdev); struct hci_dev *hdev = nxpdev->hdev; /* Restore FW baudrate to fw_init_baudrate if changed. * This will ensure FW baudrate is in sync with * driver baudrate in case this driver is re-inserted. */ if (nxpdev->current_baudrate != nxpdev->fw_init_baudrate) { nxpdev->new_baudrate = nxpdev->fw_init_baudrate; nxp_set_baudrate_cmd(hdev, NULL); } ps_cancel_timer(nxpdev); hci_unregister_dev(hdev); hci_free_dev(hdev); } static struct btnxpuart_data w8987_data __maybe_unused = { .helper_fw_name = NULL, .fw_name = FIRMWARE_W8987, }; static struct btnxpuart_data w8997_data __maybe_unused = { .helper_fw_name = FIRMWARE_HELPER, .fw_name = FIRMWARE_W8997, }; static const struct of_device_id nxpuart_of_match_table[] __maybe_unused = { { .compatible = "nxp,88w8987-bt", .data = &w8987_data }, { .compatible = "nxp,88w8997-bt", .data = &w8997_data }, { } }; MODULE_DEVICE_TABLE(of, nxpuart_of_match_table); static struct serdev_device_driver nxp_serdev_driver = { .probe = nxp_serdev_probe, .remove = nxp_serdev_remove, .driver = { .name = "btnxpuart", .of_match_table = of_match_ptr(nxpuart_of_match_table), }, }; module_serdev_device_driver(nxp_serdev_driver); MODULE_AUTHOR("Neeraj Sanjay Kale <[email protected]>"); MODULE_DESCRIPTION("NXP Bluetooth Serial driver"); MODULE_LICENSE("GPL");
linux-master
drivers/bluetooth/btnxpuart.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * * Bluetooth HCI UART driver * * Copyright (C) 2000-2001 Qualcomm Incorporated * Copyright (C) 2002-2003 Maxim Krasnyansky <[email protected]> * Copyright (C) 2004-2005 Marcel Holtmann <[email protected]> */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/interrupt.h> #include <linux/ptrace.h> #include <linux/poll.h> #include <linux/slab.h> #include <linux/tty.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/signal.h> #include <linux/ioctl.h> #include <linux/skbuff.h> #include <asm/unaligned.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include "hci_uart.h" struct h4_struct { struct sk_buff *rx_skb; struct sk_buff_head txq; }; /* Initialize protocol */ static int h4_open(struct hci_uart *hu) { struct h4_struct *h4; BT_DBG("hu %p", hu); h4 = kzalloc(sizeof(*h4), GFP_KERNEL); if (!h4) return -ENOMEM; skb_queue_head_init(&h4->txq); hu->priv = h4; return 0; } /* Flush protocol data */ static int h4_flush(struct hci_uart *hu) { struct h4_struct *h4 = hu->priv; BT_DBG("hu %p", hu); skb_queue_purge(&h4->txq); return 0; } /* Close protocol */ static int h4_close(struct hci_uart *hu) { struct h4_struct *h4 = hu->priv; BT_DBG("hu %p", hu); skb_queue_purge(&h4->txq); kfree_skb(h4->rx_skb); hu->priv = NULL; kfree(h4); return 0; } /* Enqueue frame for transmission (padding, crc, etc) */ static int h4_enqueue(struct hci_uart *hu, struct sk_buff *skb) { struct h4_struct *h4 = hu->priv; BT_DBG("hu %p skb %p", hu, skb); /* Prepend skb with frame type */ memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1); skb_queue_tail(&h4->txq, skb); return 0; } static const struct h4_recv_pkt h4_recv_pkts[] = { { H4_RECV_ACL, .recv = hci_recv_frame }, { H4_RECV_SCO, .recv = hci_recv_frame }, { H4_RECV_EVENT, .recv = hci_recv_frame }, { H4_RECV_ISO, .recv = hci_recv_frame }, }; /* Recv data */ static int h4_recv(struct hci_uart *hu, const void *data, int count) { struct h4_struct *h4 = hu->priv; if (!test_bit(HCI_UART_REGISTERED, &hu->flags)) return -EUNATCH; h4->rx_skb = h4_recv_buf(hu->hdev, h4->rx_skb, data, count, h4_recv_pkts, ARRAY_SIZE(h4_recv_pkts)); if (IS_ERR(h4->rx_skb)) { int err = PTR_ERR(h4->rx_skb); bt_dev_err(hu->hdev, "Frame reassembly failed (%d)", err); h4->rx_skb = NULL; return err; } return count; } static struct sk_buff *h4_dequeue(struct hci_uart *hu) { struct h4_struct *h4 = hu->priv; return skb_dequeue(&h4->txq); } static const struct hci_uart_proto h4p = { .id = HCI_UART_H4, .name = "H4", .open = h4_open, .close = h4_close, .recv = h4_recv, .enqueue = h4_enqueue, .dequeue = h4_dequeue, .flush = h4_flush, }; int __init h4_init(void) { return hci_uart_register_proto(&h4p); } int __exit h4_deinit(void) { return hci_uart_unregister_proto(&h4p); } struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb, const unsigned char *buffer, int count, const struct h4_recv_pkt *pkts, int pkts_count) { struct hci_uart *hu = hci_get_drvdata(hdev); u8 alignment = hu->alignment ? hu->alignment : 1; /* Check for error from previous call */ if (IS_ERR(skb)) skb = NULL; while (count) { int i, len; /* remove padding bytes from buffer */ for (; hu->padding && count > 0; hu->padding--) { count--; buffer++; } if (!count) break; if (!skb) { for (i = 0; i < pkts_count; i++) { if (buffer[0] != (&pkts[i])->type) continue; skb = bt_skb_alloc((&pkts[i])->maxlen, GFP_ATOMIC); if (!skb) return ERR_PTR(-ENOMEM); hci_skb_pkt_type(skb) = (&pkts[i])->type; hci_skb_expect(skb) = (&pkts[i])->hlen; break; } /* Check for invalid packet type */ if (!skb) return ERR_PTR(-EILSEQ); count -= 1; buffer += 1; } len = min_t(uint, hci_skb_expect(skb) - skb->len, count); skb_put_data(skb, buffer, len); count -= len; buffer += len; /* Check for partial packet */ if (skb->len < hci_skb_expect(skb)) continue; for (i = 0; i < pkts_count; i++) { if (hci_skb_pkt_type(skb) == (&pkts[i])->type) break; } if (i >= pkts_count) { kfree_skb(skb); return ERR_PTR(-EILSEQ); } if (skb->len == (&pkts[i])->hlen) { u16 dlen; switch ((&pkts[i])->lsize) { case 0: /* No variable data length */ dlen = 0; break; case 1: /* Single octet variable length */ dlen = skb->data[(&pkts[i])->loff]; hci_skb_expect(skb) += dlen; if (skb_tailroom(skb) < dlen) { kfree_skb(skb); return ERR_PTR(-EMSGSIZE); } break; case 2: /* Double octet variable length */ dlen = get_unaligned_le16(skb->data + (&pkts[i])->loff); hci_skb_expect(skb) += dlen; if (skb_tailroom(skb) < dlen) { kfree_skb(skb); return ERR_PTR(-EMSGSIZE); } break; default: /* Unsupported variable length */ kfree_skb(skb); return ERR_PTR(-EILSEQ); } if (!dlen) { hu->padding = (skb->len + 1) % alignment; hu->padding = (alignment - hu->padding) % alignment; /* No more data, complete frame */ (&pkts[i])->recv(hdev, skb); skb = NULL; } } else { hu->padding = (skb->len + 1) % alignment; hu->padding = (alignment - hu->padding) % alignment; /* Complete frame */ (&pkts[i])->recv(hdev, skb); skb = NULL; } } return skb; } EXPORT_SYMBOL_GPL(h4_recv_buf);
linux-master
drivers/bluetooth/hci_h4.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * * Bluetooth HCI UART driver for marvell devices * * Copyright (C) 2016 Marvell International Ltd. * Copyright (C) 2016 Intel Corporation */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/skbuff.h> #include <linux/firmware.h> #include <linux/module.h> #include <linux/tty.h> #include <linux/of.h> #include <linux/serdev.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include "hci_uart.h" #define HCI_FW_REQ_PKT 0xA5 #define HCI_CHIP_VER_PKT 0xAA #define MRVL_ACK 0x5A #define MRVL_NAK 0xBF #define MRVL_RAW_DATA 0x1F #define MRVL_SET_BAUDRATE 0xFC09 enum { STATE_CHIP_VER_PENDING, STATE_FW_REQ_PENDING, STATE_FW_LOADED, }; struct mrvl_data { struct sk_buff *rx_skb; struct sk_buff_head txq; struct sk_buff_head rawq; unsigned long flags; unsigned int tx_len; u8 id, rev; }; struct mrvl_serdev { struct hci_uart hu; }; struct hci_mrvl_pkt { __le16 lhs; __le16 rhs; } __packed; #define HCI_MRVL_PKT_SIZE 4 static int mrvl_open(struct hci_uart *hu) { struct mrvl_data *mrvl; int ret; BT_DBG("hu %p", hu); if (!hci_uart_has_flow_control(hu)) return -EOPNOTSUPP; mrvl = kzalloc(sizeof(*mrvl), GFP_KERNEL); if (!mrvl) return -ENOMEM; skb_queue_head_init(&mrvl->txq); skb_queue_head_init(&mrvl->rawq); set_bit(STATE_CHIP_VER_PENDING, &mrvl->flags); hu->priv = mrvl; if (hu->serdev) { ret = serdev_device_open(hu->serdev); if (ret) goto err; } return 0; err: kfree(mrvl); return ret; } static int mrvl_close(struct hci_uart *hu) { struct mrvl_data *mrvl = hu->priv; BT_DBG("hu %p", hu); if (hu->serdev) serdev_device_close(hu->serdev); skb_queue_purge(&mrvl->txq); skb_queue_purge(&mrvl->rawq); kfree_skb(mrvl->rx_skb); kfree(mrvl); hu->priv = NULL; return 0; } static int mrvl_flush(struct hci_uart *hu) { struct mrvl_data *mrvl = hu->priv; BT_DBG("hu %p", hu); skb_queue_purge(&mrvl->txq); skb_queue_purge(&mrvl->rawq); return 0; } static struct sk_buff *mrvl_dequeue(struct hci_uart *hu) { struct mrvl_data *mrvl = hu->priv; struct sk_buff *skb; skb = skb_dequeue(&mrvl->txq); if (!skb) { /* Any raw data ? */ skb = skb_dequeue(&mrvl->rawq); } else { /* Prepend skb with frame type */ memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1); } return skb; } static int mrvl_enqueue(struct hci_uart *hu, struct sk_buff *skb) { struct mrvl_data *mrvl = hu->priv; skb_queue_tail(&mrvl->txq, skb); return 0; } static void mrvl_send_ack(struct hci_uart *hu, unsigned char type) { struct mrvl_data *mrvl = hu->priv; struct sk_buff *skb; /* No H4 payload, only 1 byte header */ skb = bt_skb_alloc(0, GFP_ATOMIC); if (!skb) { bt_dev_err(hu->hdev, "Unable to alloc ack/nak packet"); return; } hci_skb_pkt_type(skb) = type; skb_queue_tail(&mrvl->txq, skb); hci_uart_tx_wakeup(hu); } static int mrvl_recv_fw_req(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_mrvl_pkt *pkt = (void *)skb->data; struct hci_uart *hu = hci_get_drvdata(hdev); struct mrvl_data *mrvl = hu->priv; int ret = 0; if ((pkt->lhs ^ pkt->rhs) != 0xffff) { bt_dev_err(hdev, "Corrupted mrvl header"); mrvl_send_ack(hu, MRVL_NAK); ret = -EINVAL; goto done; } mrvl_send_ack(hu, MRVL_ACK); if (!test_bit(STATE_FW_REQ_PENDING, &mrvl->flags)) { bt_dev_err(hdev, "Received unexpected firmware request"); ret = -EINVAL; goto done; } mrvl->tx_len = le16_to_cpu(pkt->lhs); clear_bit(STATE_FW_REQ_PENDING, &mrvl->flags); smp_mb__after_atomic(); wake_up_bit(&mrvl->flags, STATE_FW_REQ_PENDING); done: kfree_skb(skb); return ret; } static int mrvl_recv_chip_ver(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_mrvl_pkt *pkt = (void *)skb->data; struct hci_uart *hu = hci_get_drvdata(hdev); struct mrvl_data *mrvl = hu->priv; u16 version = le16_to_cpu(pkt->lhs); int ret = 0; if ((pkt->lhs ^ pkt->rhs) != 0xffff) { bt_dev_err(hdev, "Corrupted mrvl header"); mrvl_send_ack(hu, MRVL_NAK); ret = -EINVAL; goto done; } mrvl_send_ack(hu, MRVL_ACK); if (!test_bit(STATE_CHIP_VER_PENDING, &mrvl->flags)) { bt_dev_err(hdev, "Received unexpected chip version"); goto done; } mrvl->id = version; mrvl->rev = version >> 8; bt_dev_info(hdev, "Controller id = %x, rev = %x", mrvl->id, mrvl->rev); clear_bit(STATE_CHIP_VER_PENDING, &mrvl->flags); smp_mb__after_atomic(); wake_up_bit(&mrvl->flags, STATE_CHIP_VER_PENDING); done: kfree_skb(skb); return ret; } #define HCI_RECV_CHIP_VER \ .type = HCI_CHIP_VER_PKT, \ .hlen = HCI_MRVL_PKT_SIZE, \ .loff = 0, \ .lsize = 0, \ .maxlen = HCI_MRVL_PKT_SIZE #define HCI_RECV_FW_REQ \ .type = HCI_FW_REQ_PKT, \ .hlen = HCI_MRVL_PKT_SIZE, \ .loff = 0, \ .lsize = 0, \ .maxlen = HCI_MRVL_PKT_SIZE static const struct h4_recv_pkt mrvl_recv_pkts[] = { { H4_RECV_ACL, .recv = hci_recv_frame }, { H4_RECV_SCO, .recv = hci_recv_frame }, { H4_RECV_EVENT, .recv = hci_recv_frame }, { HCI_RECV_FW_REQ, .recv = mrvl_recv_fw_req }, { HCI_RECV_CHIP_VER, .recv = mrvl_recv_chip_ver }, }; static int mrvl_recv(struct hci_uart *hu, const void *data, int count) { struct mrvl_data *mrvl = hu->priv; if (!test_bit(HCI_UART_REGISTERED, &hu->flags)) return -EUNATCH; /* We might receive some noise when there is no firmware loaded. Therefore, * we drop data if the firmware is not loaded yet and if there is no fw load * request pending. */ if (!test_bit(STATE_FW_REQ_PENDING, &mrvl->flags) && !test_bit(STATE_FW_LOADED, &mrvl->flags)) return count; mrvl->rx_skb = h4_recv_buf(hu->hdev, mrvl->rx_skb, data, count, mrvl_recv_pkts, ARRAY_SIZE(mrvl_recv_pkts)); if (IS_ERR(mrvl->rx_skb)) { int err = PTR_ERR(mrvl->rx_skb); bt_dev_err(hu->hdev, "Frame reassembly failed (%d)", err); mrvl->rx_skb = NULL; return err; } return count; } static int mrvl_load_firmware(struct hci_dev *hdev, const char *name) { struct hci_uart *hu = hci_get_drvdata(hdev); struct mrvl_data *mrvl = hu->priv; const struct firmware *fw = NULL; const u8 *fw_ptr, *fw_max; int err; err = request_firmware(&fw, name, &hdev->dev); if (err < 0) { bt_dev_err(hdev, "Failed to load firmware file %s", name); return err; } fw_ptr = fw->data; fw_max = fw->data + fw->size; bt_dev_info(hdev, "Loading %s", name); set_bit(STATE_FW_REQ_PENDING, &mrvl->flags); while (fw_ptr <= fw_max) { struct sk_buff *skb; /* Controller drives the firmware load by sending firmware * request packets containing the expected fragment size. */ err = wait_on_bit_timeout(&mrvl->flags, STATE_FW_REQ_PENDING, TASK_INTERRUPTIBLE, msecs_to_jiffies(2000)); if (err == 1) { bt_dev_err(hdev, "Firmware load interrupted"); err = -EINTR; break; } else if (err) { bt_dev_err(hdev, "Firmware request timeout"); err = -ETIMEDOUT; break; } bt_dev_dbg(hdev, "Firmware request, expecting %d bytes", mrvl->tx_len); if (fw_ptr == fw_max) { /* Controller requests a null size once firmware is * fully loaded. If controller expects more data, there * is an issue. */ if (!mrvl->tx_len) { bt_dev_info(hdev, "Firmware loading complete"); } else { bt_dev_err(hdev, "Firmware loading failure"); err = -EINVAL; } break; } if (fw_ptr + mrvl->tx_len > fw_max) { mrvl->tx_len = fw_max - fw_ptr; bt_dev_dbg(hdev, "Adjusting tx_len to %d", mrvl->tx_len); } skb = bt_skb_alloc(mrvl->tx_len, GFP_KERNEL); if (!skb) { bt_dev_err(hdev, "Failed to alloc mem for FW packet"); err = -ENOMEM; break; } bt_cb(skb)->pkt_type = MRVL_RAW_DATA; skb_put_data(skb, fw_ptr, mrvl->tx_len); fw_ptr += mrvl->tx_len; set_bit(STATE_FW_REQ_PENDING, &mrvl->flags); skb_queue_tail(&mrvl->rawq, skb); hci_uart_tx_wakeup(hu); } release_firmware(fw); return err; } static int mrvl_setup(struct hci_uart *hu) { int err; struct mrvl_data *mrvl = hu->priv; hci_uart_set_flow_control(hu, true); err = mrvl_load_firmware(hu->hdev, "mrvl/helper_uart_3000000.bin"); if (err) { bt_dev_err(hu->hdev, "Unable to download firmware helper"); return -EINVAL; } /* Let the final ack go out before switching the baudrate */ hci_uart_wait_until_sent(hu); if (hu->serdev) serdev_device_set_baudrate(hu->serdev, hu->oper_speed); else hci_uart_set_baudrate(hu, hu->oper_speed); hci_uart_set_flow_control(hu, false); err = mrvl_load_firmware(hu->hdev, "mrvl/uart8897_bt.bin"); if (err) return err; set_bit(STATE_FW_LOADED, &mrvl->flags); return 0; } static int mrvl_set_baudrate(struct hci_uart *hu, unsigned int speed) { int err; struct mrvl_data *mrvl = hu->priv; __le32 speed_le = cpu_to_le32(speed); /* The firmware might be loaded by the Wifi driver over SDIO. We wait * up to 10s for the CTS to go up. Afterward, we know that the firmware * is ready. */ err = serdev_device_wait_for_cts(hu->serdev, true, 10000); if (err) { bt_dev_err(hu->hdev, "Wait for CTS failed with %d\n", err); return err; } set_bit(STATE_FW_LOADED, &mrvl->flags); err = __hci_cmd_sync_status(hu->hdev, MRVL_SET_BAUDRATE, sizeof(speed_le), &speed_le, HCI_INIT_TIMEOUT); if (err) { bt_dev_err(hu->hdev, "send command failed: %d", err); return err; } serdev_device_set_baudrate(hu->serdev, speed); /* We forcefully have to send a command to the bluetooth module so that * the driver detects it after a baudrate change. This is foreseen by * hci_serdev by setting HCI_UART_VND_DETECT which then causes a dummy * local version read. */ set_bit(HCI_UART_VND_DETECT, &hu->hdev_flags); return 0; } static const struct hci_uart_proto mrvl_proto_8897 = { .id = HCI_UART_MRVL, .name = "Marvell", .init_speed = 115200, .oper_speed = 3000000, .open = mrvl_open, .close = mrvl_close, .flush = mrvl_flush, .setup = mrvl_setup, .recv = mrvl_recv, .enqueue = mrvl_enqueue, .dequeue = mrvl_dequeue, }; static const struct hci_uart_proto mrvl_proto_8997 = { .id = HCI_UART_MRVL, .name = "Marvell 8997", .init_speed = 115200, .oper_speed = 3000000, .open = mrvl_open, .close = mrvl_close, .flush = mrvl_flush, .set_baudrate = mrvl_set_baudrate, .recv = mrvl_recv, .enqueue = mrvl_enqueue, .dequeue = mrvl_dequeue, }; static int mrvl_serdev_probe(struct serdev_device *serdev) { struct mrvl_serdev *mrvldev; const struct hci_uart_proto *mrvl_proto = device_get_match_data(&serdev->dev); mrvldev = devm_kzalloc(&serdev->dev, sizeof(*mrvldev), GFP_KERNEL); if (!mrvldev) return -ENOMEM; mrvldev->hu.oper_speed = mrvl_proto->oper_speed; if (mrvl_proto->set_baudrate) of_property_read_u32(serdev->dev.of_node, "max-speed", &mrvldev->hu.oper_speed); mrvldev->hu.serdev = serdev; serdev_device_set_drvdata(serdev, mrvldev); return hci_uart_register_device(&mrvldev->hu, mrvl_proto); } static void mrvl_serdev_remove(struct serdev_device *serdev) { struct mrvl_serdev *mrvldev = serdev_device_get_drvdata(serdev); hci_uart_unregister_device(&mrvldev->hu); } static const struct of_device_id __maybe_unused mrvl_bluetooth_of_match[] = { { .compatible = "mrvl,88w8897", .data = &mrvl_proto_8897}, { .compatible = "mrvl,88w8997", .data = &mrvl_proto_8997}, { }, }; MODULE_DEVICE_TABLE(of, mrvl_bluetooth_of_match); static struct serdev_device_driver mrvl_serdev_driver = { .probe = mrvl_serdev_probe, .remove = mrvl_serdev_remove, .driver = { .name = "hci_uart_mrvl", .of_match_table = of_match_ptr(mrvl_bluetooth_of_match), }, }; int __init mrvl_init(void) { serdev_device_driver_register(&mrvl_serdev_driver); return hci_uart_register_proto(&mrvl_proto_8897); } int __exit mrvl_deinit(void) { serdev_device_driver_unregister(&mrvl_serdev_driver); return hci_uart_unregister_proto(&mrvl_proto_8897); }
linux-master
drivers/bluetooth/hci_mrvl.c
// SPDX-License-Identifier: GPL-2.0-only #include <linux/module.h> #include <linux/virtio.h> #include <linux/virtio_config.h> #include <linux/skbuff.h> #include <uapi/linux/virtio_ids.h> #include <uapi/linux/virtio_bt.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #define VERSION "0.1" enum { VIRTBT_VQ_TX, VIRTBT_VQ_RX, VIRTBT_NUM_VQS, }; struct virtio_bluetooth { struct virtio_device *vdev; struct virtqueue *vqs[VIRTBT_NUM_VQS]; struct work_struct rx; struct hci_dev *hdev; }; static int virtbt_add_inbuf(struct virtio_bluetooth *vbt) { struct virtqueue *vq = vbt->vqs[VIRTBT_VQ_RX]; struct scatterlist sg[1]; struct sk_buff *skb; int err; skb = alloc_skb(1000, GFP_KERNEL); if (!skb) return -ENOMEM; sg_init_one(sg, skb->data, 1000); err = virtqueue_add_inbuf(vq, sg, 1, skb, GFP_KERNEL); if (err < 0) { kfree_skb(skb); return err; } return 0; } static int virtbt_open(struct hci_dev *hdev) { return 0; } static int virtbt_open_vdev(struct virtio_bluetooth *vbt) { if (virtbt_add_inbuf(vbt) < 0) return -EIO; virtqueue_kick(vbt->vqs[VIRTBT_VQ_RX]); return 0; } static int virtbt_close(struct hci_dev *hdev) { return 0; } static int virtbt_close_vdev(struct virtio_bluetooth *vbt) { int i; cancel_work_sync(&vbt->rx); for (i = 0; i < ARRAY_SIZE(vbt->vqs); i++) { struct virtqueue *vq = vbt->vqs[i]; struct sk_buff *skb; while ((skb = virtqueue_detach_unused_buf(vq))) kfree_skb(skb); cond_resched(); } return 0; } static int virtbt_flush(struct hci_dev *hdev) { return 0; } static int virtbt_send_frame(struct hci_dev *hdev, struct sk_buff *skb) { struct virtio_bluetooth *vbt = hci_get_drvdata(hdev); struct scatterlist sg[1]; int err; memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1); sg_init_one(sg, skb->data, skb->len); err = virtqueue_add_outbuf(vbt->vqs[VIRTBT_VQ_TX], sg, 1, skb, GFP_KERNEL); if (err) { kfree_skb(skb); return err; } virtqueue_kick(vbt->vqs[VIRTBT_VQ_TX]); return 0; } static int virtbt_setup_zephyr(struct hci_dev *hdev) { struct sk_buff *skb; /* Read Build Information */ skb = __hci_cmd_sync(hdev, 0xfc08, 0, NULL, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) return PTR_ERR(skb); bt_dev_info(hdev, "%s", (char *)(skb->data + 1)); hci_set_fw_info(hdev, "%s", skb->data + 1); kfree_skb(skb); return 0; } static int virtbt_set_bdaddr_zephyr(struct hci_dev *hdev, const bdaddr_t *bdaddr) { struct sk_buff *skb; /* Write BD_ADDR */ skb = __hci_cmd_sync(hdev, 0xfc06, 6, bdaddr, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) return PTR_ERR(skb); kfree_skb(skb); return 0; } static int virtbt_setup_intel(struct hci_dev *hdev) { struct sk_buff *skb; /* Intel Read Version */ skb = __hci_cmd_sync(hdev, 0xfc05, 0, NULL, HCI_CMD_TIMEOUT); if (IS_ERR(skb)) return PTR_ERR(skb); kfree_skb(skb); return 0; } static int virtbt_set_bdaddr_intel(struct hci_dev *hdev, const bdaddr_t *bdaddr) { struct sk_buff *skb; /* Intel Write BD Address */ skb = __hci_cmd_sync(hdev, 0xfc31, 6, bdaddr, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) return PTR_ERR(skb); kfree_skb(skb); return 0; } static int virtbt_setup_realtek(struct hci_dev *hdev) { struct sk_buff *skb; /* Read ROM Version */ skb = __hci_cmd_sync(hdev, 0xfc6d, 0, NULL, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) return PTR_ERR(skb); bt_dev_info(hdev, "ROM version %u", *((__u8 *) (skb->data + 1))); kfree_skb(skb); return 0; } static int virtbt_shutdown_generic(struct hci_dev *hdev) { struct sk_buff *skb; /* Reset */ skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) return PTR_ERR(skb); kfree_skb(skb); return 0; } static void virtbt_rx_handle(struct virtio_bluetooth *vbt, struct sk_buff *skb) { __u8 pkt_type; pkt_type = *((__u8 *) skb->data); skb_pull(skb, 1); switch (pkt_type) { case HCI_EVENT_PKT: case HCI_ACLDATA_PKT: case HCI_SCODATA_PKT: case HCI_ISODATA_PKT: hci_skb_pkt_type(skb) = pkt_type; hci_recv_frame(vbt->hdev, skb); break; default: kfree_skb(skb); break; } } static void virtbt_rx_work(struct work_struct *work) { struct virtio_bluetooth *vbt = container_of(work, struct virtio_bluetooth, rx); struct sk_buff *skb; unsigned int len; skb = virtqueue_get_buf(vbt->vqs[VIRTBT_VQ_RX], &len); if (!skb) return; skb_put(skb, len); virtbt_rx_handle(vbt, skb); if (virtbt_add_inbuf(vbt) < 0) return; virtqueue_kick(vbt->vqs[VIRTBT_VQ_RX]); } static void virtbt_tx_done(struct virtqueue *vq) { struct sk_buff *skb; unsigned int len; while ((skb = virtqueue_get_buf(vq, &len))) kfree_skb(skb); } static void virtbt_rx_done(struct virtqueue *vq) { struct virtio_bluetooth *vbt = vq->vdev->priv; schedule_work(&vbt->rx); } static int virtbt_probe(struct virtio_device *vdev) { vq_callback_t *callbacks[VIRTBT_NUM_VQS] = { [VIRTBT_VQ_TX] = virtbt_tx_done, [VIRTBT_VQ_RX] = virtbt_rx_done, }; const char *names[VIRTBT_NUM_VQS] = { [VIRTBT_VQ_TX] = "tx", [VIRTBT_VQ_RX] = "rx", }; struct virtio_bluetooth *vbt; struct hci_dev *hdev; int err; __u8 type; if (!virtio_has_feature(vdev, VIRTIO_F_VERSION_1)) return -ENODEV; type = virtio_cread8(vdev, offsetof(struct virtio_bt_config, type)); switch (type) { case VIRTIO_BT_CONFIG_TYPE_PRIMARY: case VIRTIO_BT_CONFIG_TYPE_AMP: break; default: return -EINVAL; } vbt = kzalloc(sizeof(*vbt), GFP_KERNEL); if (!vbt) return -ENOMEM; vdev->priv = vbt; vbt->vdev = vdev; INIT_WORK(&vbt->rx, virtbt_rx_work); err = virtio_find_vqs(vdev, VIRTBT_NUM_VQS, vbt->vqs, callbacks, names, NULL); if (err) return err; hdev = hci_alloc_dev(); if (!hdev) { err = -ENOMEM; goto failed; } vbt->hdev = hdev; hdev->bus = HCI_VIRTIO; hdev->dev_type = type; hci_set_drvdata(hdev, vbt); hdev->open = virtbt_open; hdev->close = virtbt_close; hdev->flush = virtbt_flush; hdev->send = virtbt_send_frame; if (virtio_has_feature(vdev, VIRTIO_BT_F_VND_HCI)) { __u16 vendor; if (virtio_has_feature(vdev, VIRTIO_BT_F_CONFIG_V2)) virtio_cread(vdev, struct virtio_bt_config_v2, vendor, &vendor); else virtio_cread(vdev, struct virtio_bt_config, vendor, &vendor); switch (vendor) { case VIRTIO_BT_CONFIG_VENDOR_ZEPHYR: hdev->manufacturer = 1521; hdev->setup = virtbt_setup_zephyr; hdev->shutdown = virtbt_shutdown_generic; hdev->set_bdaddr = virtbt_set_bdaddr_zephyr; break; case VIRTIO_BT_CONFIG_VENDOR_INTEL: hdev->manufacturer = 2; hdev->setup = virtbt_setup_intel; hdev->shutdown = virtbt_shutdown_generic; hdev->set_bdaddr = virtbt_set_bdaddr_intel; set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks); set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks); set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks); break; case VIRTIO_BT_CONFIG_VENDOR_REALTEK: hdev->manufacturer = 93; hdev->setup = virtbt_setup_realtek; hdev->shutdown = virtbt_shutdown_generic; set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks); set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks); break; } } if (virtio_has_feature(vdev, VIRTIO_BT_F_MSFT_EXT)) { __u16 msft_opcode; if (virtio_has_feature(vdev, VIRTIO_BT_F_CONFIG_V2)) virtio_cread(vdev, struct virtio_bt_config_v2, msft_opcode, &msft_opcode); else virtio_cread(vdev, struct virtio_bt_config, msft_opcode, &msft_opcode); hci_set_msft_opcode(hdev, msft_opcode); } if (virtio_has_feature(vdev, VIRTIO_BT_F_AOSP_EXT)) hci_set_aosp_capable(hdev); if (hci_register_dev(hdev) < 0) { hci_free_dev(hdev); err = -EBUSY; goto failed; } virtio_device_ready(vdev); err = virtbt_open_vdev(vbt); if (err) goto open_failed; return 0; open_failed: hci_free_dev(hdev); failed: vdev->config->del_vqs(vdev); return err; } static void virtbt_remove(struct virtio_device *vdev) { struct virtio_bluetooth *vbt = vdev->priv; struct hci_dev *hdev = vbt->hdev; hci_unregister_dev(hdev); virtio_reset_device(vdev); virtbt_close_vdev(vbt); hci_free_dev(hdev); vbt->hdev = NULL; vdev->config->del_vqs(vdev); kfree(vbt); } static struct virtio_device_id virtbt_table[] = { { VIRTIO_ID_BT, VIRTIO_DEV_ANY_ID }, { 0 }, }; MODULE_DEVICE_TABLE(virtio, virtbt_table); static const unsigned int virtbt_features[] = { VIRTIO_BT_F_VND_HCI, VIRTIO_BT_F_MSFT_EXT, VIRTIO_BT_F_AOSP_EXT, VIRTIO_BT_F_CONFIG_V2, }; static struct virtio_driver virtbt_driver = { .driver.name = KBUILD_MODNAME, .driver.owner = THIS_MODULE, .feature_table = virtbt_features, .feature_table_size = ARRAY_SIZE(virtbt_features), .id_table = virtbt_table, .probe = virtbt_probe, .remove = virtbt_remove, }; module_virtio_driver(virtbt_driver); MODULE_AUTHOR("Marcel Holtmann <[email protected]>"); MODULE_DESCRIPTION("Generic Bluetooth VIRTIO driver ver " VERSION); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL");
linux-master
drivers/bluetooth/virtio_bt.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * * Generic Bluetooth SDIO driver * * Copyright (C) 2007 Cambridge Silicon Radio Ltd. * Copyright (C) 2007 Marcel Holtmann <[email protected]> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/sched.h> #include <linux/errno.h> #include <linux/skbuff.h> #include <linux/mmc/host.h> #include <linux/mmc/sdio_ids.h> #include <linux/mmc/sdio_func.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #define VERSION "0.1" static const struct sdio_device_id btsdio_table[] = { /* Generic Bluetooth Type-A SDIO device */ { SDIO_DEVICE_CLASS(SDIO_CLASS_BT_A) }, /* Generic Bluetooth Type-B SDIO device */ { SDIO_DEVICE_CLASS(SDIO_CLASS_BT_B) }, /* Generic Bluetooth AMP controller */ { SDIO_DEVICE_CLASS(SDIO_CLASS_BT_AMP) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(sdio, btsdio_table); struct btsdio_data { struct hci_dev *hdev; struct sdio_func *func; struct work_struct work; struct sk_buff_head txq; }; #define REG_RDAT 0x00 /* Receiver Data */ #define REG_TDAT 0x00 /* Transmitter Data */ #define REG_PC_RRT 0x10 /* Read Packet Control */ #define REG_PC_WRT 0x11 /* Write Packet Control */ #define REG_RTC_STAT 0x12 /* Retry Control Status */ #define REG_RTC_SET 0x12 /* Retry Control Set */ #define REG_INTRD 0x13 /* Interrupt Indication */ #define REG_CL_INTRD 0x13 /* Interrupt Clear */ #define REG_EN_INTRD 0x14 /* Interrupt Enable */ #define REG_MD_STAT 0x20 /* Bluetooth Mode Status */ #define REG_MD_SET 0x20 /* Bluetooth Mode Set */ static int btsdio_tx_packet(struct btsdio_data *data, struct sk_buff *skb) { int err; BT_DBG("%s", data->hdev->name); /* Prepend Type-A header */ skb_push(skb, 4); skb->data[0] = (skb->len & 0x0000ff); skb->data[1] = (skb->len & 0x00ff00) >> 8; skb->data[2] = (skb->len & 0xff0000) >> 16; skb->data[3] = hci_skb_pkt_type(skb); err = sdio_writesb(data->func, REG_TDAT, skb->data, skb->len); if (err < 0) { skb_pull(skb, 4); sdio_writeb(data->func, 0x01, REG_PC_WRT, NULL); return err; } data->hdev->stat.byte_tx += skb->len; kfree_skb(skb); return 0; } static void btsdio_work(struct work_struct *work) { struct btsdio_data *data = container_of(work, struct btsdio_data, work); struct sk_buff *skb; int err; BT_DBG("%s", data->hdev->name); sdio_claim_host(data->func); while ((skb = skb_dequeue(&data->txq))) { err = btsdio_tx_packet(data, skb); if (err < 0) { data->hdev->stat.err_tx++; skb_queue_head(&data->txq, skb); break; } } sdio_release_host(data->func); } static int btsdio_rx_packet(struct btsdio_data *data) { u8 hdr[4] __attribute__ ((aligned(4))); struct sk_buff *skb; int err, len; BT_DBG("%s", data->hdev->name); err = sdio_readsb(data->func, hdr, REG_RDAT, 4); if (err < 0) return err; len = hdr[0] | (hdr[1] << 8) | (hdr[2] << 16); if (len < 4 || len > 65543) return -EILSEQ; skb = bt_skb_alloc(len - 4, GFP_KERNEL); if (!skb) { /* Out of memory. Prepare a read retry and just * return with the expectation that the next time * we're called we'll have more memory. */ return -ENOMEM; } skb_put(skb, len - 4); err = sdio_readsb(data->func, skb->data, REG_RDAT, len - 4); if (err < 0) { kfree_skb(skb); return err; } data->hdev->stat.byte_rx += len; switch (hdr[3]) { case HCI_EVENT_PKT: case HCI_ACLDATA_PKT: case HCI_SCODATA_PKT: case HCI_ISODATA_PKT: hci_skb_pkt_type(skb) = hdr[3]; err = hci_recv_frame(data->hdev, skb); if (err < 0) return err; break; default: kfree_skb(skb); return -EINVAL; } sdio_writeb(data->func, 0x00, REG_PC_RRT, NULL); return 0; } static void btsdio_interrupt(struct sdio_func *func) { struct btsdio_data *data = sdio_get_drvdata(func); int intrd; BT_DBG("%s", data->hdev->name); intrd = sdio_readb(func, REG_INTRD, NULL); if (intrd & 0x01) { sdio_writeb(func, 0x01, REG_CL_INTRD, NULL); if (btsdio_rx_packet(data) < 0) { data->hdev->stat.err_rx++; sdio_writeb(data->func, 0x01, REG_PC_RRT, NULL); } } } static int btsdio_open(struct hci_dev *hdev) { struct btsdio_data *data = hci_get_drvdata(hdev); int err; BT_DBG("%s", hdev->name); sdio_claim_host(data->func); err = sdio_enable_func(data->func); if (err < 0) goto release; err = sdio_claim_irq(data->func, btsdio_interrupt); if (err < 0) { sdio_disable_func(data->func); goto release; } if (data->func->class == SDIO_CLASS_BT_B) sdio_writeb(data->func, 0x00, REG_MD_SET, NULL); sdio_writeb(data->func, 0x01, REG_EN_INTRD, NULL); release: sdio_release_host(data->func); return err; } static int btsdio_close(struct hci_dev *hdev) { struct btsdio_data *data = hci_get_drvdata(hdev); BT_DBG("%s", hdev->name); sdio_claim_host(data->func); sdio_writeb(data->func, 0x00, REG_EN_INTRD, NULL); sdio_release_irq(data->func); sdio_disable_func(data->func); sdio_release_host(data->func); return 0; } static int btsdio_flush(struct hci_dev *hdev) { struct btsdio_data *data = hci_get_drvdata(hdev); BT_DBG("%s", hdev->name); skb_queue_purge(&data->txq); return 0; } static int btsdio_send_frame(struct hci_dev *hdev, struct sk_buff *skb) { struct btsdio_data *data = hci_get_drvdata(hdev); BT_DBG("%s", hdev->name); switch (hci_skb_pkt_type(skb)) { case HCI_COMMAND_PKT: hdev->stat.cmd_tx++; break; case HCI_ACLDATA_PKT: hdev->stat.acl_tx++; break; case HCI_SCODATA_PKT: hdev->stat.sco_tx++; break; default: return -EILSEQ; } skb_queue_tail(&data->txq, skb); schedule_work(&data->work); return 0; } static int btsdio_probe(struct sdio_func *func, const struct sdio_device_id *id) { struct btsdio_data *data; struct hci_dev *hdev; struct sdio_func_tuple *tuple = func->tuples; int err; BT_DBG("func %p id %p class 0x%04x", func, id, func->class); while (tuple) { BT_DBG("code 0x%x size %d", tuple->code, tuple->size); tuple = tuple->next; } /* Broadcom devices soldered onto the PCB (non-removable) use an * UART connection for Bluetooth, ignore the BT SDIO interface. */ if (func->vendor == SDIO_VENDOR_ID_BROADCOM && !mmc_card_is_removable(func->card->host)) { switch (func->device) { case SDIO_DEVICE_ID_BROADCOM_43341: case SDIO_DEVICE_ID_BROADCOM_43430: case SDIO_DEVICE_ID_BROADCOM_4345: case SDIO_DEVICE_ID_BROADCOM_43455: case SDIO_DEVICE_ID_BROADCOM_4356: return -ENODEV; } } data = devm_kzalloc(&func->dev, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; data->func = func; INIT_WORK(&data->work, btsdio_work); skb_queue_head_init(&data->txq); hdev = hci_alloc_dev(); if (!hdev) return -ENOMEM; hdev->bus = HCI_SDIO; hci_set_drvdata(hdev, data); if (id->class == SDIO_CLASS_BT_AMP) hdev->dev_type = HCI_AMP; else hdev->dev_type = HCI_PRIMARY; data->hdev = hdev; SET_HCIDEV_DEV(hdev, &func->dev); hdev->open = btsdio_open; hdev->close = btsdio_close; hdev->flush = btsdio_flush; hdev->send = btsdio_send_frame; if (func->vendor == 0x0104 && func->device == 0x00c5) set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks); err = hci_register_dev(hdev); if (err < 0) { hci_free_dev(hdev); return err; } sdio_set_drvdata(func, data); return 0; } static void btsdio_remove(struct sdio_func *func) { struct btsdio_data *data = sdio_get_drvdata(func); struct hci_dev *hdev; BT_DBG("func %p", func); if (!data) return; cancel_work_sync(&data->work); hdev = data->hdev; sdio_set_drvdata(func, NULL); hci_unregister_dev(hdev); hci_free_dev(hdev); } static struct sdio_driver btsdio_driver = { .name = "btsdio", .probe = btsdio_probe, .remove = btsdio_remove, .id_table = btsdio_table, }; module_sdio_driver(btsdio_driver); MODULE_AUTHOR("Marcel Holtmann <[email protected]>"); MODULE_DESCRIPTION("Generic Bluetooth SDIO driver ver " VERSION); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL");
linux-master
drivers/bluetooth/btsdio.c
// SPDX-License-Identifier: GPL-2.0-only /* * Marvell Bluetooth driver: debugfs related functions * * Copyright (C) 2009, Marvell International Ltd. **/ #include <linux/debugfs.h> #include <linux/slab.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include "btmrvl_drv.h" struct btmrvl_debugfs_data { struct dentry *config_dir; struct dentry *status_dir; }; static ssize_t btmrvl_hscfgcmd_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) { struct btmrvl_private *priv = file->private_data; long result, ret; ret = kstrtol_from_user(ubuf, count, 10, &result); if (ret) return ret; priv->btmrvl_dev.hscfgcmd = result; if (priv->btmrvl_dev.hscfgcmd) { btmrvl_prepare_command(priv); wake_up_interruptible(&priv->main_thread.wait_q); } return count; } static ssize_t btmrvl_hscfgcmd_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { struct btmrvl_private *priv = file->private_data; char buf[16]; int ret; ret = snprintf(buf, sizeof(buf) - 1, "%d\n", priv->btmrvl_dev.hscfgcmd); return simple_read_from_buffer(userbuf, count, ppos, buf, ret); } static const struct file_operations btmrvl_hscfgcmd_fops = { .read = btmrvl_hscfgcmd_read, .write = btmrvl_hscfgcmd_write, .open = simple_open, .llseek = default_llseek, }; static ssize_t btmrvl_pscmd_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) { struct btmrvl_private *priv = file->private_data; long result, ret; ret = kstrtol_from_user(ubuf, count, 10, &result); if (ret) return ret; priv->btmrvl_dev.pscmd = result; if (priv->btmrvl_dev.pscmd) { btmrvl_prepare_command(priv); wake_up_interruptible(&priv->main_thread.wait_q); } return count; } static ssize_t btmrvl_pscmd_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { struct btmrvl_private *priv = file->private_data; char buf[16]; int ret; ret = snprintf(buf, sizeof(buf) - 1, "%d\n", priv->btmrvl_dev.pscmd); return simple_read_from_buffer(userbuf, count, ppos, buf, ret); } static const struct file_operations btmrvl_pscmd_fops = { .read = btmrvl_pscmd_read, .write = btmrvl_pscmd_write, .open = simple_open, .llseek = default_llseek, }; static ssize_t btmrvl_hscmd_write(struct file *file, const char __user *ubuf, size_t count, loff_t *ppos) { struct btmrvl_private *priv = file->private_data; long result, ret; ret = kstrtol_from_user(ubuf, count, 10, &result); if (ret) return ret; priv->btmrvl_dev.hscmd = result; if (priv->btmrvl_dev.hscmd) { btmrvl_prepare_command(priv); wake_up_interruptible(&priv->main_thread.wait_q); } return count; } static ssize_t btmrvl_hscmd_read(struct file *file, char __user *userbuf, size_t count, loff_t *ppos) { struct btmrvl_private *priv = file->private_data; char buf[16]; int ret; ret = snprintf(buf, sizeof(buf) - 1, "%d\n", priv->btmrvl_dev.hscmd); return simple_read_from_buffer(userbuf, count, ppos, buf, ret); } static const struct file_operations btmrvl_hscmd_fops = { .read = btmrvl_hscmd_read, .write = btmrvl_hscmd_write, .open = simple_open, .llseek = default_llseek, }; void btmrvl_debugfs_init(struct hci_dev *hdev) { struct btmrvl_private *priv = hci_get_drvdata(hdev); struct btmrvl_debugfs_data *dbg; if (!hdev->debugfs) return; dbg = kzalloc(sizeof(*dbg), GFP_KERNEL); priv->debugfs_data = dbg; if (!dbg) { BT_ERR("Can not allocate memory for btmrvl_debugfs_data."); return; } dbg->config_dir = debugfs_create_dir("config", hdev->debugfs); debugfs_create_u8("psmode", 0644, dbg->config_dir, &priv->btmrvl_dev.psmode); debugfs_create_file("pscmd", 0644, dbg->config_dir, priv, &btmrvl_pscmd_fops); debugfs_create_x16("gpiogap", 0644, dbg->config_dir, &priv->btmrvl_dev.gpio_gap); debugfs_create_u8("hsmode", 0644, dbg->config_dir, &priv->btmrvl_dev.hsmode); debugfs_create_file("hscmd", 0644, dbg->config_dir, priv, &btmrvl_hscmd_fops); debugfs_create_file("hscfgcmd", 0644, dbg->config_dir, priv, &btmrvl_hscfgcmd_fops); dbg->status_dir = debugfs_create_dir("status", hdev->debugfs); debugfs_create_u8("curpsmode", 0444, dbg->status_dir, &priv->adapter->psmode); debugfs_create_u8("psstate", 0444, dbg->status_dir, &priv->adapter->ps_state); debugfs_create_u8("hsstate", 0444, dbg->status_dir, &priv->adapter->hs_state); debugfs_create_u8("txdnldready", 0444, dbg->status_dir, &priv->btmrvl_dev.tx_dnld_rdy); } void btmrvl_debugfs_remove(struct hci_dev *hdev) { struct btmrvl_private *priv = hci_get_drvdata(hdev); struct btmrvl_debugfs_data *dbg = priv->debugfs_data; if (!dbg) return; debugfs_remove_recursive(dbg->config_dir); debugfs_remove_recursive(dbg->status_dir); kfree(dbg); }
linux-master
drivers/bluetooth/btmrvl_debugfs.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * * Generic Bluetooth USB driver * * Copyright (C) 2005-2008 Marcel Holtmann <[email protected]> */ #include <linux/dmi.h> #include <linux/module.h> #include <linux/usb.h> #include <linux/usb/quirks.h> #include <linux/firmware.h> #include <linux/iopoll.h> #include <linux/of_device.h> #include <linux/of_irq.h> #include <linux/suspend.h> #include <linux/gpio/consumer.h> #include <linux/debugfs.h> #include <asm/unaligned.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include "btintel.h" #include "btbcm.h" #include "btrtl.h" #include "btmtk.h" #define VERSION "0.8" static bool disable_scofix; static bool force_scofix; static bool enable_autosuspend = IS_ENABLED(CONFIG_BT_HCIBTUSB_AUTOSUSPEND); static bool enable_poll_sync = IS_ENABLED(CONFIG_BT_HCIBTUSB_POLL_SYNC); static bool reset = true; static struct usb_driver btusb_driver; #define BTUSB_IGNORE BIT(0) #define BTUSB_DIGIANSWER BIT(1) #define BTUSB_CSR BIT(2) #define BTUSB_SNIFFER BIT(3) #define BTUSB_BCM92035 BIT(4) #define BTUSB_BROKEN_ISOC BIT(5) #define BTUSB_WRONG_SCO_MTU BIT(6) #define BTUSB_ATH3012 BIT(7) #define BTUSB_INTEL_COMBINED BIT(8) #define BTUSB_INTEL_BOOT BIT(9) #define BTUSB_BCM_PATCHRAM BIT(10) #define BTUSB_MARVELL BIT(11) #define BTUSB_SWAVE BIT(12) #define BTUSB_AMP BIT(13) #define BTUSB_QCA_ROME BIT(14) #define BTUSB_BCM_APPLE BIT(15) #define BTUSB_REALTEK BIT(16) #define BTUSB_BCM2045 BIT(17) #define BTUSB_IFNUM_2 BIT(18) #define BTUSB_CW6622 BIT(19) #define BTUSB_MEDIATEK BIT(20) #define BTUSB_WIDEBAND_SPEECH BIT(21) #define BTUSB_VALID_LE_STATES BIT(22) #define BTUSB_QCA_WCN6855 BIT(23) #define BTUSB_INTEL_BROKEN_SHUTDOWN_LED BIT(24) #define BTUSB_INTEL_BROKEN_INITIAL_NCMD BIT(25) #define BTUSB_INTEL_NO_WBS_SUPPORT BIT(26) #define BTUSB_ACTIONS_SEMI BIT(27) static const struct usb_device_id btusb_table[] = { /* Generic Bluetooth USB device */ { USB_DEVICE_INFO(0xe0, 0x01, 0x01) }, /* Generic Bluetooth AMP device */ { USB_DEVICE_INFO(0xe0, 0x01, 0x04), .driver_info = BTUSB_AMP }, /* Generic Bluetooth USB interface */ { USB_INTERFACE_INFO(0xe0, 0x01, 0x01) }, /* Apple-specific (Broadcom) devices */ { USB_VENDOR_AND_INTERFACE_INFO(0x05ac, 0xff, 0x01, 0x01), .driver_info = BTUSB_BCM_APPLE | BTUSB_IFNUM_2 }, /* MediaTek MT76x0E */ { USB_DEVICE(0x0e8d, 0x763f) }, /* Broadcom SoftSailing reporting vendor specific */ { USB_DEVICE(0x0a5c, 0x21e1) }, /* Apple MacBookPro 7,1 */ { USB_DEVICE(0x05ac, 0x8213) }, /* Apple iMac11,1 */ { USB_DEVICE(0x05ac, 0x8215) }, /* Apple MacBookPro6,2 */ { USB_DEVICE(0x05ac, 0x8218) }, /* Apple MacBookAir3,1, MacBookAir3,2 */ { USB_DEVICE(0x05ac, 0x821b) }, /* Apple MacBookAir4,1 */ { USB_DEVICE(0x05ac, 0x821f) }, /* Apple MacBookPro8,2 */ { USB_DEVICE(0x05ac, 0x821a) }, /* Apple MacMini5,1 */ { USB_DEVICE(0x05ac, 0x8281) }, /* AVM BlueFRITZ! USB v2.0 */ { USB_DEVICE(0x057c, 0x3800), .driver_info = BTUSB_SWAVE }, /* Bluetooth Ultraport Module from IBM */ { USB_DEVICE(0x04bf, 0x030a) }, /* ALPS Modules with non-standard id */ { USB_DEVICE(0x044e, 0x3001) }, { USB_DEVICE(0x044e, 0x3002) }, /* Ericsson with non-standard id */ { USB_DEVICE(0x0bdb, 0x1002) }, /* Canyon CN-BTU1 with HID interfaces */ { USB_DEVICE(0x0c10, 0x0000) }, /* Broadcom BCM20702B0 (Dynex/Insignia) */ { USB_DEVICE(0x19ff, 0x0239), .driver_info = BTUSB_BCM_PATCHRAM }, /* Broadcom BCM43142A0 (Foxconn/Lenovo) */ { USB_VENDOR_AND_INTERFACE_INFO(0x105b, 0xff, 0x01, 0x01), .driver_info = BTUSB_BCM_PATCHRAM }, /* Broadcom BCM920703 (HTC Vive) */ { USB_VENDOR_AND_INTERFACE_INFO(0x0bb4, 0xff, 0x01, 0x01), .driver_info = BTUSB_BCM_PATCHRAM }, /* Foxconn - Hon Hai */ { USB_VENDOR_AND_INTERFACE_INFO(0x0489, 0xff, 0x01, 0x01), .driver_info = BTUSB_BCM_PATCHRAM }, /* Lite-On Technology - Broadcom based */ { USB_VENDOR_AND_INTERFACE_INFO(0x04ca, 0xff, 0x01, 0x01), .driver_info = BTUSB_BCM_PATCHRAM }, /* Broadcom devices with vendor specific id */ { USB_VENDOR_AND_INTERFACE_INFO(0x0a5c, 0xff, 0x01, 0x01), .driver_info = BTUSB_BCM_PATCHRAM }, /* ASUSTek Computer - Broadcom based */ { USB_VENDOR_AND_INTERFACE_INFO(0x0b05, 0xff, 0x01, 0x01), .driver_info = BTUSB_BCM_PATCHRAM }, /* Belkin F8065bf - Broadcom based */ { USB_VENDOR_AND_INTERFACE_INFO(0x050d, 0xff, 0x01, 0x01), .driver_info = BTUSB_BCM_PATCHRAM }, /* IMC Networks - Broadcom based */ { USB_VENDOR_AND_INTERFACE_INFO(0x13d3, 0xff, 0x01, 0x01), .driver_info = BTUSB_BCM_PATCHRAM }, /* Dell Computer - Broadcom based */ { USB_VENDOR_AND_INTERFACE_INFO(0x413c, 0xff, 0x01, 0x01), .driver_info = BTUSB_BCM_PATCHRAM }, /* Toshiba Corp - Broadcom based */ { USB_VENDOR_AND_INTERFACE_INFO(0x0930, 0xff, 0x01, 0x01), .driver_info = BTUSB_BCM_PATCHRAM }, /* Intel Bluetooth USB Bootloader (RAM module) */ { USB_DEVICE(0x8087, 0x0a5a), .driver_info = BTUSB_INTEL_BOOT | BTUSB_BROKEN_ISOC }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, btusb_table); static const struct usb_device_id quirks_table[] = { /* CSR BlueCore devices */ { USB_DEVICE(0x0a12, 0x0001), .driver_info = BTUSB_CSR }, /* Broadcom BCM2033 without firmware */ { USB_DEVICE(0x0a5c, 0x2033), .driver_info = BTUSB_IGNORE }, /* Broadcom BCM2045 devices */ { USB_DEVICE(0x0a5c, 0x2045), .driver_info = BTUSB_BCM2045 }, /* Atheros 3011 with sflash firmware */ { USB_DEVICE(0x0489, 0xe027), .driver_info = BTUSB_IGNORE }, { USB_DEVICE(0x0489, 0xe03d), .driver_info = BTUSB_IGNORE }, { USB_DEVICE(0x04f2, 0xaff1), .driver_info = BTUSB_IGNORE }, { USB_DEVICE(0x0930, 0x0215), .driver_info = BTUSB_IGNORE }, { USB_DEVICE(0x0cf3, 0x3002), .driver_info = BTUSB_IGNORE }, { USB_DEVICE(0x0cf3, 0xe019), .driver_info = BTUSB_IGNORE }, { USB_DEVICE(0x13d3, 0x3304), .driver_info = BTUSB_IGNORE }, /* Atheros AR9285 Malbec with sflash firmware */ { USB_DEVICE(0x03f0, 0x311d), .driver_info = BTUSB_IGNORE }, /* Atheros 3012 with sflash firmware */ { USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0489, 0xe076), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0489, 0xe095), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x300d), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3014), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3018), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x0036), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x311d), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x311e), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x311f), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x817a), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x817b), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0xe006), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3395), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3472), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3487), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3490), .driver_info = BTUSB_ATH3012 }, /* Atheros AR5BBU12 with sflash firmware */ { USB_DEVICE(0x0489, 0xe02c), .driver_info = BTUSB_IGNORE }, /* Atheros AR5BBU12 with sflash firmware */ { USB_DEVICE(0x0489, 0xe036), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 }, /* QCA ROME chipset */ { USB_DEVICE(0x0cf3, 0x535b), .driver_info = BTUSB_QCA_ROME | BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x0cf3, 0xe007), .driver_info = BTUSB_QCA_ROME | BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x0cf3, 0xe009), .driver_info = BTUSB_QCA_ROME | BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x0cf3, 0xe010), .driver_info = BTUSB_QCA_ROME | BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x0cf3, 0xe300), .driver_info = BTUSB_QCA_ROME | BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x0cf3, 0xe301), .driver_info = BTUSB_QCA_ROME | BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x0cf3, 0xe360), .driver_info = BTUSB_QCA_ROME | BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x0cf3, 0xe500), .driver_info = BTUSB_QCA_ROME | BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x0489, 0xe092), .driver_info = BTUSB_QCA_ROME | BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x0489, 0xe09f), .driver_info = BTUSB_QCA_ROME | BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x0489, 0xe0a2), .driver_info = BTUSB_QCA_ROME | BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x04ca, 0x3011), .driver_info = BTUSB_QCA_ROME | BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x04ca, 0x3015), .driver_info = BTUSB_QCA_ROME | BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x04ca, 0x3016), .driver_info = BTUSB_QCA_ROME | BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x04ca, 0x301a), .driver_info = BTUSB_QCA_ROME | BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x04ca, 0x3021), .driver_info = BTUSB_QCA_ROME | BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x13d3, 0x3491), .driver_info = BTUSB_QCA_ROME | BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x13d3, 0x3496), .driver_info = BTUSB_QCA_ROME | BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x13d3, 0x3501), .driver_info = BTUSB_QCA_ROME | BTUSB_WIDEBAND_SPEECH }, /* QCA WCN6855 chipset */ { USB_DEVICE(0x0cf3, 0xe600), .driver_info = BTUSB_QCA_WCN6855 | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x0489, 0xe0cc), .driver_info = BTUSB_QCA_WCN6855 | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x0489, 0xe0d6), .driver_info = BTUSB_QCA_WCN6855 | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x0489, 0xe0e3), .driver_info = BTUSB_QCA_WCN6855 | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x10ab, 0x9309), .driver_info = BTUSB_QCA_WCN6855 | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x10ab, 0x9409), .driver_info = BTUSB_QCA_WCN6855 | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x0489, 0xe0d0), .driver_info = BTUSB_QCA_WCN6855 | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x10ab, 0x9108), .driver_info = BTUSB_QCA_WCN6855 | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x10ab, 0x9109), .driver_info = BTUSB_QCA_WCN6855 | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x10ab, 0x9208), .driver_info = BTUSB_QCA_WCN6855 | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x10ab, 0x9209), .driver_info = BTUSB_QCA_WCN6855 | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x10ab, 0x9308), .driver_info = BTUSB_QCA_WCN6855 | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x10ab, 0x9408), .driver_info = BTUSB_QCA_WCN6855 | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x10ab, 0x9508), .driver_info = BTUSB_QCA_WCN6855 | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x10ab, 0x9509), .driver_info = BTUSB_QCA_WCN6855 | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x10ab, 0x9608), .driver_info = BTUSB_QCA_WCN6855 | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x10ab, 0x9609), .driver_info = BTUSB_QCA_WCN6855 | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x10ab, 0x9f09), .driver_info = BTUSB_QCA_WCN6855 | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x04ca, 0x3022), .driver_info = BTUSB_QCA_WCN6855 | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x0489, 0xe0c7), .driver_info = BTUSB_QCA_WCN6855 | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x0489, 0xe0c9), .driver_info = BTUSB_QCA_WCN6855 | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x0489, 0xe0ca), .driver_info = BTUSB_QCA_WCN6855 | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x0489, 0xe0cb), .driver_info = BTUSB_QCA_WCN6855 | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x0489, 0xe0ce), .driver_info = BTUSB_QCA_WCN6855 | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x0489, 0xe0de), .driver_info = BTUSB_QCA_WCN6855 | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x0489, 0xe0df), .driver_info = BTUSB_QCA_WCN6855 | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x0489, 0xe0e1), .driver_info = BTUSB_QCA_WCN6855 | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x0489, 0xe0ea), .driver_info = BTUSB_QCA_WCN6855 | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x0489, 0xe0ec), .driver_info = BTUSB_QCA_WCN6855 | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x04ca, 0x3023), .driver_info = BTUSB_QCA_WCN6855 | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x04ca, 0x3024), .driver_info = BTUSB_QCA_WCN6855 | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x04ca, 0x3a22), .driver_info = BTUSB_QCA_WCN6855 | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x04ca, 0x3a24), .driver_info = BTUSB_QCA_WCN6855 | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x04ca, 0x3a26), .driver_info = BTUSB_QCA_WCN6855 | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x04ca, 0x3a27), .driver_info = BTUSB_QCA_WCN6855 | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, /* QCA WCN785x chipset */ { USB_DEVICE(0x0cf3, 0xe700), .driver_info = BTUSB_QCA_WCN6855 | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, /* Broadcom BCM2035 */ { USB_DEVICE(0x0a5c, 0x2009), .driver_info = BTUSB_BCM92035 }, { USB_DEVICE(0x0a5c, 0x200a), .driver_info = BTUSB_WRONG_SCO_MTU }, { USB_DEVICE(0x0a5c, 0x2035), .driver_info = BTUSB_WRONG_SCO_MTU }, /* Broadcom BCM2045 */ { USB_DEVICE(0x0a5c, 0x2039), .driver_info = BTUSB_WRONG_SCO_MTU }, { USB_DEVICE(0x0a5c, 0x2101), .driver_info = BTUSB_WRONG_SCO_MTU }, /* IBM/Lenovo ThinkPad with Broadcom chip */ { USB_DEVICE(0x0a5c, 0x201e), .driver_info = BTUSB_WRONG_SCO_MTU }, { USB_DEVICE(0x0a5c, 0x2110), .driver_info = BTUSB_WRONG_SCO_MTU }, /* HP laptop with Broadcom chip */ { USB_DEVICE(0x03f0, 0x171d), .driver_info = BTUSB_WRONG_SCO_MTU }, /* Dell laptop with Broadcom chip */ { USB_DEVICE(0x413c, 0x8126), .driver_info = BTUSB_WRONG_SCO_MTU }, /* Dell Wireless 370 and 410 devices */ { USB_DEVICE(0x413c, 0x8152), .driver_info = BTUSB_WRONG_SCO_MTU }, { USB_DEVICE(0x413c, 0x8156), .driver_info = BTUSB_WRONG_SCO_MTU }, /* Belkin F8T012 and F8T013 devices */ { USB_DEVICE(0x050d, 0x0012), .driver_info = BTUSB_WRONG_SCO_MTU }, { USB_DEVICE(0x050d, 0x0013), .driver_info = BTUSB_WRONG_SCO_MTU }, /* Asus WL-BTD202 device */ { USB_DEVICE(0x0b05, 0x1715), .driver_info = BTUSB_WRONG_SCO_MTU }, /* Kensington Bluetooth USB adapter */ { USB_DEVICE(0x047d, 0x105e), .driver_info = BTUSB_WRONG_SCO_MTU }, /* RTX Telecom based adapters with buggy SCO support */ { USB_DEVICE(0x0400, 0x0807), .driver_info = BTUSB_BROKEN_ISOC }, { USB_DEVICE(0x0400, 0x080a), .driver_info = BTUSB_BROKEN_ISOC }, /* CONWISE Technology based adapters with buggy SCO support */ { USB_DEVICE(0x0e5e, 0x6622), .driver_info = BTUSB_BROKEN_ISOC | BTUSB_CW6622}, /* Roper Class 1 Bluetooth Dongle (Silicon Wave based) */ { USB_DEVICE(0x1310, 0x0001), .driver_info = BTUSB_SWAVE }, /* Digianswer devices */ { USB_DEVICE(0x08fd, 0x0001), .driver_info = BTUSB_DIGIANSWER }, { USB_DEVICE(0x08fd, 0x0002), .driver_info = BTUSB_IGNORE }, /* CSR BlueCore Bluetooth Sniffer */ { USB_DEVICE(0x0a12, 0x0002), .driver_info = BTUSB_SNIFFER | BTUSB_BROKEN_ISOC }, /* Frontline ComProbe Bluetooth Sniffer */ { USB_DEVICE(0x16d3, 0x0002), .driver_info = BTUSB_SNIFFER | BTUSB_BROKEN_ISOC }, /* Marvell Bluetooth devices */ { USB_DEVICE(0x1286, 0x2044), .driver_info = BTUSB_MARVELL }, { USB_DEVICE(0x1286, 0x2046), .driver_info = BTUSB_MARVELL }, { USB_DEVICE(0x1286, 0x204e), .driver_info = BTUSB_MARVELL }, /* Intel Bluetooth devices */ { USB_DEVICE(0x8087, 0x0025), .driver_info = BTUSB_INTEL_COMBINED }, { USB_DEVICE(0x8087, 0x0026), .driver_info = BTUSB_INTEL_COMBINED }, { USB_DEVICE(0x8087, 0x0029), .driver_info = BTUSB_INTEL_COMBINED }, { USB_DEVICE(0x8087, 0x0032), .driver_info = BTUSB_INTEL_COMBINED }, { USB_DEVICE(0x8087, 0x0033), .driver_info = BTUSB_INTEL_COMBINED }, { USB_DEVICE(0x8087, 0x0035), .driver_info = BTUSB_INTEL_COMBINED }, { USB_DEVICE(0x8087, 0x0036), .driver_info = BTUSB_INTEL_COMBINED }, { USB_DEVICE(0x8087, 0x07da), .driver_info = BTUSB_CSR }, { USB_DEVICE(0x8087, 0x07dc), .driver_info = BTUSB_INTEL_COMBINED | BTUSB_INTEL_NO_WBS_SUPPORT | BTUSB_INTEL_BROKEN_INITIAL_NCMD | BTUSB_INTEL_BROKEN_SHUTDOWN_LED }, { USB_DEVICE(0x8087, 0x0a2a), .driver_info = BTUSB_INTEL_COMBINED | BTUSB_INTEL_NO_WBS_SUPPORT | BTUSB_INTEL_BROKEN_SHUTDOWN_LED }, { USB_DEVICE(0x8087, 0x0a2b), .driver_info = BTUSB_INTEL_COMBINED }, { USB_DEVICE(0x8087, 0x0aa7), .driver_info = BTUSB_INTEL_COMBINED | BTUSB_INTEL_BROKEN_SHUTDOWN_LED }, { USB_DEVICE(0x8087, 0x0aaa), .driver_info = BTUSB_INTEL_COMBINED }, /* Other Intel Bluetooth devices */ { USB_VENDOR_AND_INTERFACE_INFO(0x8087, 0xe0, 0x01, 0x01), .driver_info = BTUSB_IGNORE }, /* Realtek 8821CE Bluetooth devices */ { USB_DEVICE(0x13d3, 0x3529), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, /* Realtek 8822CE Bluetooth devices */ { USB_DEVICE(0x0bda, 0xb00c), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x0bda, 0xc822), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, /* Realtek 8822CU Bluetooth devices */ { USB_DEVICE(0x13d3, 0x3549), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, /* Realtek 8852AE Bluetooth devices */ { USB_DEVICE(0x0bda, 0x2852), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x0bda, 0xc852), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x0bda, 0x385a), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x0bda, 0x4852), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x04c5, 0x165c), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x04ca, 0x4006), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x0cb8, 0xc549), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, /* Realtek 8852CE Bluetooth devices */ { USB_DEVICE(0x04ca, 0x4007), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x04c5, 0x1675), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x0cb8, 0xc558), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x13d3, 0x3587), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x13d3, 0x3586), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x13d3, 0x3592), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, /* Realtek 8852BE Bluetooth devices */ { USB_DEVICE(0x0cb8, 0xc559), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x0bda, 0x887b), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x13d3, 0x3571), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, /* Realtek Bluetooth devices */ { USB_VENDOR_AND_INTERFACE_INFO(0x0bda, 0xe0, 0x01, 0x01), .driver_info = BTUSB_REALTEK }, /* MediaTek Bluetooth devices */ { USB_VENDOR_AND_INTERFACE_INFO(0x0e8d, 0xe0, 0x01, 0x01), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, /* Additional MediaTek MT7615E Bluetooth devices */ { USB_DEVICE(0x13d3, 0x3560), .driver_info = BTUSB_MEDIATEK}, /* Additional MediaTek MT7663 Bluetooth devices */ { USB_DEVICE(0x043e, 0x310c), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x04ca, 0x3801), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, /* Additional MediaTek MT7668 Bluetooth devices */ { USB_DEVICE(0x043e, 0x3109), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, /* Additional MediaTek MT7921 Bluetooth devices */ { USB_DEVICE(0x0489, 0xe0c8), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x0489, 0xe0e0), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x0489, 0xe0f2), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x04ca, 0x3802), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x13d3, 0x3563), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x13d3, 0x3564), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x13d3, 0x3567), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x13d3, 0x3578), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x13d3, 0x3583), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x0489, 0xe0cd), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x0e8d, 0x0608), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, /* MediaTek MT7922A Bluetooth devices */ { USB_DEVICE(0x0489, 0xe0d8), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x0489, 0xe0d9), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x0489, 0xe0f5), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x13d3, 0x3568), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x0489, 0xe0e2), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x0489, 0xe0e4), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x0489, 0xe0f1), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x0489, 0xe0f2), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x0489, 0xe0f5), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x0489, 0xe0f6), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x0489, 0xe102), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x04ca, 0x3804), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, /* Additional Realtek 8723AE Bluetooth devices */ { USB_DEVICE(0x0930, 0x021d), .driver_info = BTUSB_REALTEK }, { USB_DEVICE(0x13d3, 0x3394), .driver_info = BTUSB_REALTEK }, /* Additional Realtek 8723BE Bluetooth devices */ { USB_DEVICE(0x0489, 0xe085), .driver_info = BTUSB_REALTEK }, { USB_DEVICE(0x0489, 0xe08b), .driver_info = BTUSB_REALTEK }, { USB_DEVICE(0x04f2, 0xb49f), .driver_info = BTUSB_REALTEK }, { USB_DEVICE(0x13d3, 0x3410), .driver_info = BTUSB_REALTEK }, { USB_DEVICE(0x13d3, 0x3416), .driver_info = BTUSB_REALTEK }, { USB_DEVICE(0x13d3, 0x3459), .driver_info = BTUSB_REALTEK }, { USB_DEVICE(0x13d3, 0x3494), .driver_info = BTUSB_REALTEK }, /* Additional Realtek 8723BU Bluetooth devices */ { USB_DEVICE(0x7392, 0xa611), .driver_info = BTUSB_REALTEK }, /* Additional Realtek 8723DE Bluetooth devices */ { USB_DEVICE(0x0bda, 0xb009), .driver_info = BTUSB_REALTEK }, { USB_DEVICE(0x2ff8, 0xb011), .driver_info = BTUSB_REALTEK }, /* Additional Realtek 8761BUV Bluetooth devices */ { USB_DEVICE(0x2357, 0x0604), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x0b05, 0x190e), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x2550, 0x8761), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x0bda, 0x8771), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x6655, 0x8771), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x7392, 0xc611), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x2b89, 0x8761), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, /* Additional Realtek 8821AE Bluetooth devices */ { USB_DEVICE(0x0b05, 0x17dc), .driver_info = BTUSB_REALTEK }, { USB_DEVICE(0x13d3, 0x3414), .driver_info = BTUSB_REALTEK }, { USB_DEVICE(0x13d3, 0x3458), .driver_info = BTUSB_REALTEK }, { USB_DEVICE(0x13d3, 0x3461), .driver_info = BTUSB_REALTEK }, { USB_DEVICE(0x13d3, 0x3462), .driver_info = BTUSB_REALTEK }, /* Additional Realtek 8822BE Bluetooth devices */ { USB_DEVICE(0x13d3, 0x3526), .driver_info = BTUSB_REALTEK }, { USB_DEVICE(0x0b05, 0x185c), .driver_info = BTUSB_REALTEK }, /* Additional Realtek 8822CE Bluetooth devices */ { USB_DEVICE(0x04ca, 0x4005), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x04c5, 0x161f), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x0b05, 0x18ef), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x13d3, 0x3548), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x13d3, 0x3549), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x13d3, 0x3553), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x13d3, 0x3555), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x2ff8, 0x3051), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x1358, 0xc123), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x0bda, 0xc123), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, { USB_DEVICE(0x0cb5, 0xc547), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, /* Actions Semiconductor ATS2851 based devices */ { USB_DEVICE(0x10d7, 0xb012), .driver_info = BTUSB_ACTIONS_SEMI }, /* Silicon Wave based devices */ { USB_DEVICE(0x0c10, 0x0000), .driver_info = BTUSB_SWAVE }, { } /* Terminating entry */ }; /* The Bluetooth USB module build into some devices needs to be reset on resume, * this is a problem with the platform (likely shutting off all power) not with * the module itself. So we use a DMI list to match known broken platforms. */ static const struct dmi_system_id btusb_needs_reset_resume_table[] = { { /* Dell OptiPlex 3060 (QCA ROME device 0cf3:e007) */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "OptiPlex 3060"), }, }, { /* Dell XPS 9360 (QCA ROME device 0cf3:e300) */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "XPS 13 9360"), }, }, { /* Dell Inspiron 5565 (QCA ROME device 0cf3:e009) */ .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 5565"), }, }, {} }; struct qca_dump_info { /* fields for dump collection */ u16 id_vendor; u16 id_product; u32 fw_version; u32 controller_id; u32 ram_dump_size; u16 ram_dump_seqno; }; #define BTUSB_MAX_ISOC_FRAMES 10 #define BTUSB_INTR_RUNNING 0 #define BTUSB_BULK_RUNNING 1 #define BTUSB_ISOC_RUNNING 2 #define BTUSB_SUSPENDING 3 #define BTUSB_DID_ISO_RESUME 4 #define BTUSB_BOOTLOADER 5 #define BTUSB_DOWNLOADING 6 #define BTUSB_FIRMWARE_LOADED 7 #define BTUSB_FIRMWARE_FAILED 8 #define BTUSB_BOOTING 9 #define BTUSB_DIAG_RUNNING 10 #define BTUSB_OOB_WAKE_ENABLED 11 #define BTUSB_HW_RESET_ACTIVE 12 #define BTUSB_TX_WAIT_VND_EVT 13 #define BTUSB_WAKEUP_AUTOSUSPEND 14 #define BTUSB_USE_ALT3_FOR_WBS 15 #define BTUSB_ALT6_CONTINUOUS_TX 16 #define BTUSB_HW_SSR_ACTIVE 17 struct btusb_data { struct hci_dev *hdev; struct usb_device *udev; struct usb_interface *intf; struct usb_interface *isoc; struct usb_interface *diag; unsigned isoc_ifnum; unsigned long flags; bool poll_sync; int intr_interval; struct work_struct work; struct work_struct waker; struct delayed_work rx_work; struct sk_buff_head acl_q; struct usb_anchor deferred; struct usb_anchor tx_anchor; int tx_in_flight; spinlock_t txlock; struct usb_anchor intr_anchor; struct usb_anchor bulk_anchor; struct usb_anchor isoc_anchor; struct usb_anchor diag_anchor; struct usb_anchor ctrl_anchor; spinlock_t rxlock; struct sk_buff *evt_skb; struct sk_buff *acl_skb; struct sk_buff *sco_skb; struct usb_endpoint_descriptor *intr_ep; struct usb_endpoint_descriptor *bulk_tx_ep; struct usb_endpoint_descriptor *bulk_rx_ep; struct usb_endpoint_descriptor *isoc_tx_ep; struct usb_endpoint_descriptor *isoc_rx_ep; struct usb_endpoint_descriptor *diag_tx_ep; struct usb_endpoint_descriptor *diag_rx_ep; struct gpio_desc *reset_gpio; __u8 cmdreq_type; __u8 cmdreq; unsigned int sco_num; unsigned int air_mode; bool usb_alt6_packet_flow; int isoc_altsetting; int suspend_count; int (*recv_event)(struct hci_dev *hdev, struct sk_buff *skb); int (*recv_acl)(struct hci_dev *hdev, struct sk_buff *skb); int (*recv_bulk)(struct btusb_data *data, void *buffer, int count); int (*setup_on_usb)(struct hci_dev *hdev); int oob_wake_irq; /* irq for out-of-band wake-on-bt */ unsigned cmd_timeout_cnt; struct qca_dump_info qca_dump; }; static void btusb_reset(struct hci_dev *hdev) { struct btusb_data *data; int err; if (hdev->reset) { hdev->reset(hdev); return; } data = hci_get_drvdata(hdev); /* This is not an unbalanced PM reference since the device will reset */ err = usb_autopm_get_interface(data->intf); if (err) { bt_dev_err(hdev, "Failed usb_autopm_get_interface: %d", err); return; } bt_dev_err(hdev, "Resetting usb device."); usb_queue_reset_device(data->intf); } static void btusb_intel_cmd_timeout(struct hci_dev *hdev) { struct btusb_data *data = hci_get_drvdata(hdev); struct gpio_desc *reset_gpio = data->reset_gpio; struct btintel_data *intel_data = hci_get_priv(hdev); if (++data->cmd_timeout_cnt < 5) return; if (intel_data->acpi_reset_method) { if (test_and_set_bit(INTEL_ACPI_RESET_ACTIVE, intel_data->flags)) { bt_dev_err(hdev, "acpi: last reset failed ? Not resetting again"); return; } bt_dev_err(hdev, "Initiating acpi reset method"); /* If ACPI reset method fails, lets try with legacy GPIO * toggling */ if (!intel_data->acpi_reset_method(hdev)) { return; } } if (!reset_gpio) { btusb_reset(hdev); return; } /* * Toggle the hard reset line if the platform provides one. The reset * is going to yank the device off the USB and then replug. So doing * once is enough. The cleanup is handled correctly on the way out * (standard USB disconnect), and the new device is detected cleanly * and bound to the driver again like it should be. */ if (test_and_set_bit(BTUSB_HW_RESET_ACTIVE, &data->flags)) { bt_dev_err(hdev, "last reset failed? Not resetting again"); return; } bt_dev_err(hdev, "Initiating HW reset via gpio"); gpiod_set_value_cansleep(reset_gpio, 1); msleep(100); gpiod_set_value_cansleep(reset_gpio, 0); } #define RTK_DEVCOREDUMP_CODE_MEMDUMP 0x01 #define RTK_DEVCOREDUMP_CODE_HW_ERR 0x02 #define RTK_DEVCOREDUMP_CODE_CMD_TIMEOUT 0x03 #define RTK_SUB_EVENT_CODE_COREDUMP 0x34 struct rtk_dev_coredump_hdr { u8 type; u8 code; u8 reserved[2]; } __packed; static inline void btusb_rtl_alloc_devcoredump(struct hci_dev *hdev, struct rtk_dev_coredump_hdr *hdr, u8 *buf, u32 len) { struct sk_buff *skb; skb = alloc_skb(len + sizeof(*hdr), GFP_ATOMIC); if (!skb) return; skb_put_data(skb, hdr, sizeof(*hdr)); if (len) skb_put_data(skb, buf, len); if (!hci_devcd_init(hdev, skb->len)) { hci_devcd_append(hdev, skb); hci_devcd_complete(hdev); } else { bt_dev_err(hdev, "RTL: Failed to generate devcoredump"); kfree_skb(skb); } } static void btusb_rtl_cmd_timeout(struct hci_dev *hdev) { struct btusb_data *data = hci_get_drvdata(hdev); struct gpio_desc *reset_gpio = data->reset_gpio; struct rtk_dev_coredump_hdr hdr = { .type = RTK_DEVCOREDUMP_CODE_CMD_TIMEOUT, }; btusb_rtl_alloc_devcoredump(hdev, &hdr, NULL, 0); if (++data->cmd_timeout_cnt < 5) return; if (!reset_gpio) { btusb_reset(hdev); return; } /* Toggle the hard reset line. The Realtek device is going to * yank itself off the USB and then replug. The cleanup is handled * correctly on the way out (standard USB disconnect), and the new * device is detected cleanly and bound to the driver again like * it should be. */ if (test_and_set_bit(BTUSB_HW_RESET_ACTIVE, &data->flags)) { bt_dev_err(hdev, "last reset failed? Not resetting again"); return; } bt_dev_err(hdev, "Reset Realtek device via gpio"); gpiod_set_value_cansleep(reset_gpio, 1); msleep(200); gpiod_set_value_cansleep(reset_gpio, 0); } static void btusb_rtl_hw_error(struct hci_dev *hdev, u8 code) { struct rtk_dev_coredump_hdr hdr = { .type = RTK_DEVCOREDUMP_CODE_HW_ERR, .code = code, }; bt_dev_err(hdev, "RTL: hw err, trigger devcoredump (%d)", code); btusb_rtl_alloc_devcoredump(hdev, &hdr, NULL, 0); } static void btusb_qca_cmd_timeout(struct hci_dev *hdev) { struct btusb_data *data = hci_get_drvdata(hdev); struct gpio_desc *reset_gpio = data->reset_gpio; if (test_bit(BTUSB_HW_SSR_ACTIVE, &data->flags)) { bt_dev_info(hdev, "Ramdump in progress, defer cmd_timeout"); return; } if (++data->cmd_timeout_cnt < 5) return; if (reset_gpio) { bt_dev_err(hdev, "Reset qca device via bt_en gpio"); /* Toggle the hard reset line. The qca bt device is going to * yank itself off the USB and then replug. The cleanup is handled * correctly on the way out (standard USB disconnect), and the new * device is detected cleanly and bound to the driver again like * it should be. */ if (test_and_set_bit(BTUSB_HW_RESET_ACTIVE, &data->flags)) { bt_dev_err(hdev, "last reset failed? Not resetting again"); return; } gpiod_set_value_cansleep(reset_gpio, 0); msleep(200); gpiod_set_value_cansleep(reset_gpio, 1); return; } btusb_reset(hdev); } static inline void btusb_free_frags(struct btusb_data *data) { unsigned long flags; spin_lock_irqsave(&data->rxlock, flags); dev_kfree_skb_irq(data->evt_skb); data->evt_skb = NULL; dev_kfree_skb_irq(data->acl_skb); data->acl_skb = NULL; dev_kfree_skb_irq(data->sco_skb); data->sco_skb = NULL; spin_unlock_irqrestore(&data->rxlock, flags); } static int btusb_recv_event(struct btusb_data *data, struct sk_buff *skb) { if (data->intr_interval) { /* Trigger dequeue immediatelly if an event is received */ schedule_delayed_work(&data->rx_work, 0); } return data->recv_event(data->hdev, skb); } static int btusb_recv_intr(struct btusb_data *data, void *buffer, int count) { struct sk_buff *skb; unsigned long flags; int err = 0; spin_lock_irqsave(&data->rxlock, flags); skb = data->evt_skb; while (count) { int len; if (!skb) { skb = bt_skb_alloc(HCI_MAX_EVENT_SIZE, GFP_ATOMIC); if (!skb) { err = -ENOMEM; break; } hci_skb_pkt_type(skb) = HCI_EVENT_PKT; hci_skb_expect(skb) = HCI_EVENT_HDR_SIZE; } len = min_t(uint, hci_skb_expect(skb), count); skb_put_data(skb, buffer, len); count -= len; buffer += len; hci_skb_expect(skb) -= len; if (skb->len == HCI_EVENT_HDR_SIZE) { /* Complete event header */ hci_skb_expect(skb) = hci_event_hdr(skb)->plen; if (skb_tailroom(skb) < hci_skb_expect(skb)) { kfree_skb(skb); skb = NULL; err = -EILSEQ; break; } } if (!hci_skb_expect(skb)) { /* Complete frame */ btusb_recv_event(data, skb); skb = NULL; } } data->evt_skb = skb; spin_unlock_irqrestore(&data->rxlock, flags); return err; } static int btusb_recv_acl(struct btusb_data *data, struct sk_buff *skb) { /* Only queue ACL packet if intr_interval is set as it means * force_poll_sync has been enabled. */ if (!data->intr_interval) return data->recv_acl(data->hdev, skb); skb_queue_tail(&data->acl_q, skb); schedule_delayed_work(&data->rx_work, data->intr_interval); return 0; } static int btusb_recv_bulk(struct btusb_data *data, void *buffer, int count) { struct sk_buff *skb; unsigned long flags; int err = 0; spin_lock_irqsave(&data->rxlock, flags); skb = data->acl_skb; while (count) { int len; if (!skb) { skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC); if (!skb) { err = -ENOMEM; break; } hci_skb_pkt_type(skb) = HCI_ACLDATA_PKT; hci_skb_expect(skb) = HCI_ACL_HDR_SIZE; } len = min_t(uint, hci_skb_expect(skb), count); skb_put_data(skb, buffer, len); count -= len; buffer += len; hci_skb_expect(skb) -= len; if (skb->len == HCI_ACL_HDR_SIZE) { __le16 dlen = hci_acl_hdr(skb)->dlen; /* Complete ACL header */ hci_skb_expect(skb) = __le16_to_cpu(dlen); if (skb_tailroom(skb) < hci_skb_expect(skb)) { kfree_skb(skb); skb = NULL; err = -EILSEQ; break; } } if (!hci_skb_expect(skb)) { /* Complete frame */ btusb_recv_acl(data, skb); skb = NULL; } } data->acl_skb = skb; spin_unlock_irqrestore(&data->rxlock, flags); return err; } static bool btusb_validate_sco_handle(struct hci_dev *hdev, struct hci_sco_hdr *hdr) { __u16 handle; if (hci_dev_test_flag(hdev, HCI_USER_CHANNEL)) // Can't validate, userspace controls everything. return true; /* * USB isochronous transfers are not designed to be reliable and may * lose fragments. When this happens, the next first fragment * encountered might actually be a continuation fragment. * Validate the handle to detect it and drop it, or else the upper * layer will get garbage for a while. */ handle = hci_handle(__le16_to_cpu(hdr->handle)); switch (hci_conn_lookup_type(hdev, handle)) { case SCO_LINK: case ESCO_LINK: return true; default: return false; } } static int btusb_recv_isoc(struct btusb_data *data, void *buffer, int count) { struct sk_buff *skb; unsigned long flags; int err = 0; spin_lock_irqsave(&data->rxlock, flags); skb = data->sco_skb; while (count) { int len; if (!skb) { skb = bt_skb_alloc(HCI_MAX_SCO_SIZE, GFP_ATOMIC); if (!skb) { err = -ENOMEM; break; } hci_skb_pkt_type(skb) = HCI_SCODATA_PKT; hci_skb_expect(skb) = HCI_SCO_HDR_SIZE; } len = min_t(uint, hci_skb_expect(skb), count); skb_put_data(skb, buffer, len); count -= len; buffer += len; hci_skb_expect(skb) -= len; if (skb->len == HCI_SCO_HDR_SIZE) { /* Complete SCO header */ struct hci_sco_hdr *hdr = hci_sco_hdr(skb); hci_skb_expect(skb) = hdr->dlen; if (skb_tailroom(skb) < hci_skb_expect(skb) || !btusb_validate_sco_handle(data->hdev, hdr)) { kfree_skb(skb); skb = NULL; err = -EILSEQ; break; } } if (!hci_skb_expect(skb)) { /* Complete frame */ hci_recv_frame(data->hdev, skb); skb = NULL; } } data->sco_skb = skb; spin_unlock_irqrestore(&data->rxlock, flags); return err; } static void btusb_intr_complete(struct urb *urb) { struct hci_dev *hdev = urb->context; struct btusb_data *data = hci_get_drvdata(hdev); int err; BT_DBG("%s urb %p status %d count %d", hdev->name, urb, urb->status, urb->actual_length); if (!test_bit(HCI_RUNNING, &hdev->flags)) return; if (urb->status == 0) { hdev->stat.byte_rx += urb->actual_length; if (btusb_recv_intr(data, urb->transfer_buffer, urb->actual_length) < 0) { bt_dev_err(hdev, "corrupted event packet"); hdev->stat.err_rx++; } } else if (urb->status == -ENOENT) { /* Avoid suspend failed when usb_kill_urb */ return; } if (!test_bit(BTUSB_INTR_RUNNING, &data->flags)) return; usb_mark_last_busy(data->udev); usb_anchor_urb(urb, &data->intr_anchor); err = usb_submit_urb(urb, GFP_ATOMIC); if (err < 0) { /* -EPERM: urb is being killed; * -ENODEV: device got disconnected */ if (err != -EPERM && err != -ENODEV) bt_dev_err(hdev, "urb %p failed to resubmit (%d)", urb, -err); if (err != -EPERM) hci_cmd_sync_cancel(hdev, -err); usb_unanchor_urb(urb); } } static int btusb_submit_intr_urb(struct hci_dev *hdev, gfp_t mem_flags) { struct btusb_data *data = hci_get_drvdata(hdev); struct urb *urb; unsigned char *buf; unsigned int pipe; int err, size; BT_DBG("%s", hdev->name); if (!data->intr_ep) return -ENODEV; urb = usb_alloc_urb(0, mem_flags); if (!urb) return -ENOMEM; size = le16_to_cpu(data->intr_ep->wMaxPacketSize); buf = kmalloc(size, mem_flags); if (!buf) { usb_free_urb(urb); return -ENOMEM; } pipe = usb_rcvintpipe(data->udev, data->intr_ep->bEndpointAddress); usb_fill_int_urb(urb, data->udev, pipe, buf, size, btusb_intr_complete, hdev, data->intr_ep->bInterval); urb->transfer_flags |= URB_FREE_BUFFER; usb_anchor_urb(urb, &data->intr_anchor); err = usb_submit_urb(urb, mem_flags); if (err < 0) { if (err != -EPERM && err != -ENODEV) bt_dev_err(hdev, "urb %p submission failed (%d)", urb, -err); if (err != -EPERM) hci_cmd_sync_cancel(hdev, -err); usb_unanchor_urb(urb); } /* Only initialize intr_interval if URB poll sync is enabled */ if (!data->poll_sync) goto done; /* The units are frames (milliseconds) for full and low speed devices, * and microframes (1/8 millisecond) for highspeed and SuperSpeed * devices. * * This is done once on open/resume so it shouldn't change even if * force_poll_sync changes. */ switch (urb->dev->speed) { case USB_SPEED_SUPER_PLUS: case USB_SPEED_SUPER: /* units are 125us */ data->intr_interval = usecs_to_jiffies(urb->interval * 125); break; default: data->intr_interval = msecs_to_jiffies(urb->interval); break; } done: usb_free_urb(urb); return err; } static void btusb_bulk_complete(struct urb *urb) { struct hci_dev *hdev = urb->context; struct btusb_data *data = hci_get_drvdata(hdev); int err; BT_DBG("%s urb %p status %d count %d", hdev->name, urb, urb->status, urb->actual_length); if (!test_bit(HCI_RUNNING, &hdev->flags)) return; if (urb->status == 0) { hdev->stat.byte_rx += urb->actual_length; if (data->recv_bulk(data, urb->transfer_buffer, urb->actual_length) < 0) { bt_dev_err(hdev, "corrupted ACL packet"); hdev->stat.err_rx++; } } else if (urb->status == -ENOENT) { /* Avoid suspend failed when usb_kill_urb */ return; } if (!test_bit(BTUSB_BULK_RUNNING, &data->flags)) return; usb_anchor_urb(urb, &data->bulk_anchor); usb_mark_last_busy(data->udev); err = usb_submit_urb(urb, GFP_ATOMIC); if (err < 0) { /* -EPERM: urb is being killed; * -ENODEV: device got disconnected */ if (err != -EPERM && err != -ENODEV) bt_dev_err(hdev, "urb %p failed to resubmit (%d)", urb, -err); usb_unanchor_urb(urb); } } static int btusb_submit_bulk_urb(struct hci_dev *hdev, gfp_t mem_flags) { struct btusb_data *data = hci_get_drvdata(hdev); struct urb *urb; unsigned char *buf; unsigned int pipe; int err, size = HCI_MAX_FRAME_SIZE; BT_DBG("%s", hdev->name); if (!data->bulk_rx_ep) return -ENODEV; urb = usb_alloc_urb(0, mem_flags); if (!urb) return -ENOMEM; buf = kmalloc(size, mem_flags); if (!buf) { usb_free_urb(urb); return -ENOMEM; } pipe = usb_rcvbulkpipe(data->udev, data->bulk_rx_ep->bEndpointAddress); usb_fill_bulk_urb(urb, data->udev, pipe, buf, size, btusb_bulk_complete, hdev); urb->transfer_flags |= URB_FREE_BUFFER; usb_mark_last_busy(data->udev); usb_anchor_urb(urb, &data->bulk_anchor); err = usb_submit_urb(urb, mem_flags); if (err < 0) { if (err != -EPERM && err != -ENODEV) bt_dev_err(hdev, "urb %p submission failed (%d)", urb, -err); usb_unanchor_urb(urb); } usb_free_urb(urb); return err; } static void btusb_isoc_complete(struct urb *urb) { struct hci_dev *hdev = urb->context; struct btusb_data *data = hci_get_drvdata(hdev); int i, err; BT_DBG("%s urb %p status %d count %d", hdev->name, urb, urb->status, urb->actual_length); if (!test_bit(HCI_RUNNING, &hdev->flags)) return; if (urb->status == 0) { for (i = 0; i < urb->number_of_packets; i++) { unsigned int offset = urb->iso_frame_desc[i].offset; unsigned int length = urb->iso_frame_desc[i].actual_length; if (urb->iso_frame_desc[i].status) continue; hdev->stat.byte_rx += length; if (btusb_recv_isoc(data, urb->transfer_buffer + offset, length) < 0) { bt_dev_err(hdev, "corrupted SCO packet"); hdev->stat.err_rx++; } } } else if (urb->status == -ENOENT) { /* Avoid suspend failed when usb_kill_urb */ return; } if (!test_bit(BTUSB_ISOC_RUNNING, &data->flags)) return; usb_anchor_urb(urb, &data->isoc_anchor); err = usb_submit_urb(urb, GFP_ATOMIC); if (err < 0) { /* -EPERM: urb is being killed; * -ENODEV: device got disconnected */ if (err != -EPERM && err != -ENODEV) bt_dev_err(hdev, "urb %p failed to resubmit (%d)", urb, -err); usb_unanchor_urb(urb); } } static inline void __fill_isoc_descriptor_msbc(struct urb *urb, int len, int mtu, struct btusb_data *data) { int i = 0, offset = 0; unsigned int interval; BT_DBG("len %d mtu %d", len, mtu); /* For mSBC ALT 6 settings some chips need to transmit the data * continuously without the zero length of USB packets. */ if (test_bit(BTUSB_ALT6_CONTINUOUS_TX, &data->flags)) goto ignore_usb_alt6_packet_flow; /* For mSBC ALT 6 setting the host will send the packet at continuous * flow. As per core spec 5, vol 4, part B, table 2.1. For ALT setting * 6 the HCI PACKET INTERVAL should be 7.5ms for every usb packets. * To maintain the rate we send 63bytes of usb packets alternatively for * 7ms and 8ms to maintain the rate as 7.5ms. */ if (data->usb_alt6_packet_flow) { interval = 7; data->usb_alt6_packet_flow = false; } else { interval = 6; data->usb_alt6_packet_flow = true; } for (i = 0; i < interval; i++) { urb->iso_frame_desc[i].offset = offset; urb->iso_frame_desc[i].length = offset; } ignore_usb_alt6_packet_flow: if (len && i < BTUSB_MAX_ISOC_FRAMES) { urb->iso_frame_desc[i].offset = offset; urb->iso_frame_desc[i].length = len; i++; } urb->number_of_packets = i; } static inline void __fill_isoc_descriptor(struct urb *urb, int len, int mtu) { int i, offset = 0; BT_DBG("len %d mtu %d", len, mtu); for (i = 0; i < BTUSB_MAX_ISOC_FRAMES && len >= mtu; i++, offset += mtu, len -= mtu) { urb->iso_frame_desc[i].offset = offset; urb->iso_frame_desc[i].length = mtu; } if (len && i < BTUSB_MAX_ISOC_FRAMES) { urb->iso_frame_desc[i].offset = offset; urb->iso_frame_desc[i].length = len; i++; } urb->number_of_packets = i; } static int btusb_submit_isoc_urb(struct hci_dev *hdev, gfp_t mem_flags) { struct btusb_data *data = hci_get_drvdata(hdev); struct urb *urb; unsigned char *buf; unsigned int pipe; int err, size; BT_DBG("%s", hdev->name); if (!data->isoc_rx_ep) return -ENODEV; urb = usb_alloc_urb(BTUSB_MAX_ISOC_FRAMES, mem_flags); if (!urb) return -ENOMEM; size = le16_to_cpu(data->isoc_rx_ep->wMaxPacketSize) * BTUSB_MAX_ISOC_FRAMES; buf = kmalloc(size, mem_flags); if (!buf) { usb_free_urb(urb); return -ENOMEM; } pipe = usb_rcvisocpipe(data->udev, data->isoc_rx_ep->bEndpointAddress); usb_fill_int_urb(urb, data->udev, pipe, buf, size, btusb_isoc_complete, hdev, data->isoc_rx_ep->bInterval); urb->transfer_flags = URB_FREE_BUFFER | URB_ISO_ASAP; __fill_isoc_descriptor(urb, size, le16_to_cpu(data->isoc_rx_ep->wMaxPacketSize)); usb_anchor_urb(urb, &data->isoc_anchor); err = usb_submit_urb(urb, mem_flags); if (err < 0) { if (err != -EPERM && err != -ENODEV) bt_dev_err(hdev, "urb %p submission failed (%d)", urb, -err); usb_unanchor_urb(urb); } usb_free_urb(urb); return err; } static void btusb_diag_complete(struct urb *urb) { struct hci_dev *hdev = urb->context; struct btusb_data *data = hci_get_drvdata(hdev); int err; BT_DBG("%s urb %p status %d count %d", hdev->name, urb, urb->status, urb->actual_length); if (urb->status == 0) { struct sk_buff *skb; skb = bt_skb_alloc(urb->actual_length, GFP_ATOMIC); if (skb) { skb_put_data(skb, urb->transfer_buffer, urb->actual_length); hci_recv_diag(hdev, skb); } } else if (urb->status == -ENOENT) { /* Avoid suspend failed when usb_kill_urb */ return; } if (!test_bit(BTUSB_DIAG_RUNNING, &data->flags)) return; usb_anchor_urb(urb, &data->diag_anchor); usb_mark_last_busy(data->udev); err = usb_submit_urb(urb, GFP_ATOMIC); if (err < 0) { /* -EPERM: urb is being killed; * -ENODEV: device got disconnected */ if (err != -EPERM && err != -ENODEV) bt_dev_err(hdev, "urb %p failed to resubmit (%d)", urb, -err); usb_unanchor_urb(urb); } } static int btusb_submit_diag_urb(struct hci_dev *hdev, gfp_t mem_flags) { struct btusb_data *data = hci_get_drvdata(hdev); struct urb *urb; unsigned char *buf; unsigned int pipe; int err, size = HCI_MAX_FRAME_SIZE; BT_DBG("%s", hdev->name); if (!data->diag_rx_ep) return -ENODEV; urb = usb_alloc_urb(0, mem_flags); if (!urb) return -ENOMEM; buf = kmalloc(size, mem_flags); if (!buf) { usb_free_urb(urb); return -ENOMEM; } pipe = usb_rcvbulkpipe(data->udev, data->diag_rx_ep->bEndpointAddress); usb_fill_bulk_urb(urb, data->udev, pipe, buf, size, btusb_diag_complete, hdev); urb->transfer_flags |= URB_FREE_BUFFER; usb_mark_last_busy(data->udev); usb_anchor_urb(urb, &data->diag_anchor); err = usb_submit_urb(urb, mem_flags); if (err < 0) { if (err != -EPERM && err != -ENODEV) bt_dev_err(hdev, "urb %p submission failed (%d)", urb, -err); usb_unanchor_urb(urb); } usb_free_urb(urb); return err; } static void btusb_tx_complete(struct urb *urb) { struct sk_buff *skb = urb->context; struct hci_dev *hdev = (struct hci_dev *)skb->dev; struct btusb_data *data = hci_get_drvdata(hdev); unsigned long flags; BT_DBG("%s urb %p status %d count %d", hdev->name, urb, urb->status, urb->actual_length); if (!test_bit(HCI_RUNNING, &hdev->flags)) goto done; if (!urb->status) { hdev->stat.byte_tx += urb->transfer_buffer_length; } else { if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) hci_cmd_sync_cancel(hdev, -urb->status); hdev->stat.err_tx++; } done: spin_lock_irqsave(&data->txlock, flags); data->tx_in_flight--; spin_unlock_irqrestore(&data->txlock, flags); kfree(urb->setup_packet); kfree_skb(skb); } static void btusb_isoc_tx_complete(struct urb *urb) { struct sk_buff *skb = urb->context; struct hci_dev *hdev = (struct hci_dev *)skb->dev; BT_DBG("%s urb %p status %d count %d", hdev->name, urb, urb->status, urb->actual_length); if (!test_bit(HCI_RUNNING, &hdev->flags)) goto done; if (!urb->status) hdev->stat.byte_tx += urb->transfer_buffer_length; else hdev->stat.err_tx++; done: kfree(urb->setup_packet); kfree_skb(skb); } static int btusb_open(struct hci_dev *hdev) { struct btusb_data *data = hci_get_drvdata(hdev); int err; BT_DBG("%s", hdev->name); err = usb_autopm_get_interface(data->intf); if (err < 0) return err; /* Patching USB firmware files prior to starting any URBs of HCI path * It is more safe to use USB bulk channel for downloading USB patch */ if (data->setup_on_usb) { err = data->setup_on_usb(hdev); if (err < 0) goto setup_fail; } data->intf->needs_remote_wakeup = 1; if (test_and_set_bit(BTUSB_INTR_RUNNING, &data->flags)) goto done; err = btusb_submit_intr_urb(hdev, GFP_KERNEL); if (err < 0) goto failed; err = btusb_submit_bulk_urb(hdev, GFP_KERNEL); if (err < 0) { usb_kill_anchored_urbs(&data->intr_anchor); goto failed; } set_bit(BTUSB_BULK_RUNNING, &data->flags); btusb_submit_bulk_urb(hdev, GFP_KERNEL); if (data->diag) { if (!btusb_submit_diag_urb(hdev, GFP_KERNEL)) set_bit(BTUSB_DIAG_RUNNING, &data->flags); } done: usb_autopm_put_interface(data->intf); return 0; failed: clear_bit(BTUSB_INTR_RUNNING, &data->flags); setup_fail: usb_autopm_put_interface(data->intf); return err; } static void btusb_stop_traffic(struct btusb_data *data) { usb_kill_anchored_urbs(&data->intr_anchor); usb_kill_anchored_urbs(&data->bulk_anchor); usb_kill_anchored_urbs(&data->isoc_anchor); usb_kill_anchored_urbs(&data->diag_anchor); usb_kill_anchored_urbs(&data->ctrl_anchor); } static int btusb_close(struct hci_dev *hdev) { struct btusb_data *data = hci_get_drvdata(hdev); int err; BT_DBG("%s", hdev->name); cancel_delayed_work(&data->rx_work); cancel_work_sync(&data->work); cancel_work_sync(&data->waker); skb_queue_purge(&data->acl_q); clear_bit(BTUSB_ISOC_RUNNING, &data->flags); clear_bit(BTUSB_BULK_RUNNING, &data->flags); clear_bit(BTUSB_INTR_RUNNING, &data->flags); clear_bit(BTUSB_DIAG_RUNNING, &data->flags); btusb_stop_traffic(data); btusb_free_frags(data); err = usb_autopm_get_interface(data->intf); if (err < 0) goto failed; data->intf->needs_remote_wakeup = 0; /* Enable remote wake up for auto-suspend */ if (test_bit(BTUSB_WAKEUP_AUTOSUSPEND, &data->flags)) data->intf->needs_remote_wakeup = 1; usb_autopm_put_interface(data->intf); failed: usb_scuttle_anchored_urbs(&data->deferred); return 0; } static int btusb_flush(struct hci_dev *hdev) { struct btusb_data *data = hci_get_drvdata(hdev); BT_DBG("%s", hdev->name); cancel_delayed_work(&data->rx_work); skb_queue_purge(&data->acl_q); usb_kill_anchored_urbs(&data->tx_anchor); btusb_free_frags(data); return 0; } static struct urb *alloc_ctrl_urb(struct hci_dev *hdev, struct sk_buff *skb) { struct btusb_data *data = hci_get_drvdata(hdev); struct usb_ctrlrequest *dr; struct urb *urb; unsigned int pipe; urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) return ERR_PTR(-ENOMEM); dr = kmalloc(sizeof(*dr), GFP_KERNEL); if (!dr) { usb_free_urb(urb); return ERR_PTR(-ENOMEM); } dr->bRequestType = data->cmdreq_type; dr->bRequest = data->cmdreq; dr->wIndex = 0; dr->wValue = 0; dr->wLength = __cpu_to_le16(skb->len); pipe = usb_sndctrlpipe(data->udev, 0x00); usb_fill_control_urb(urb, data->udev, pipe, (void *)dr, skb->data, skb->len, btusb_tx_complete, skb); skb->dev = (void *)hdev; return urb; } static struct urb *alloc_bulk_urb(struct hci_dev *hdev, struct sk_buff *skb) { struct btusb_data *data = hci_get_drvdata(hdev); struct urb *urb; unsigned int pipe; if (!data->bulk_tx_ep) return ERR_PTR(-ENODEV); urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) return ERR_PTR(-ENOMEM); pipe = usb_sndbulkpipe(data->udev, data->bulk_tx_ep->bEndpointAddress); usb_fill_bulk_urb(urb, data->udev, pipe, skb->data, skb->len, btusb_tx_complete, skb); skb->dev = (void *)hdev; return urb; } static struct urb *alloc_isoc_urb(struct hci_dev *hdev, struct sk_buff *skb) { struct btusb_data *data = hci_get_drvdata(hdev); struct urb *urb; unsigned int pipe; if (!data->isoc_tx_ep) return ERR_PTR(-ENODEV); urb = usb_alloc_urb(BTUSB_MAX_ISOC_FRAMES, GFP_KERNEL); if (!urb) return ERR_PTR(-ENOMEM); pipe = usb_sndisocpipe(data->udev, data->isoc_tx_ep->bEndpointAddress); usb_fill_int_urb(urb, data->udev, pipe, skb->data, skb->len, btusb_isoc_tx_complete, skb, data->isoc_tx_ep->bInterval); urb->transfer_flags = URB_ISO_ASAP; if (data->isoc_altsetting == 6) __fill_isoc_descriptor_msbc(urb, skb->len, le16_to_cpu(data->isoc_tx_ep->wMaxPacketSize), data); else __fill_isoc_descriptor(urb, skb->len, le16_to_cpu(data->isoc_tx_ep->wMaxPacketSize)); skb->dev = (void *)hdev; return urb; } static int submit_tx_urb(struct hci_dev *hdev, struct urb *urb) { struct btusb_data *data = hci_get_drvdata(hdev); int err; usb_anchor_urb(urb, &data->tx_anchor); err = usb_submit_urb(urb, GFP_KERNEL); if (err < 0) { if (err != -EPERM && err != -ENODEV) bt_dev_err(hdev, "urb %p submission failed (%d)", urb, -err); kfree(urb->setup_packet); usb_unanchor_urb(urb); } else { usb_mark_last_busy(data->udev); } usb_free_urb(urb); return err; } static int submit_or_queue_tx_urb(struct hci_dev *hdev, struct urb *urb) { struct btusb_data *data = hci_get_drvdata(hdev); unsigned long flags; bool suspending; spin_lock_irqsave(&data->txlock, flags); suspending = test_bit(BTUSB_SUSPENDING, &data->flags); if (!suspending) data->tx_in_flight++; spin_unlock_irqrestore(&data->txlock, flags); if (!suspending) return submit_tx_urb(hdev, urb); usb_anchor_urb(urb, &data->deferred); schedule_work(&data->waker); usb_free_urb(urb); return 0; } static int btusb_send_frame(struct hci_dev *hdev, struct sk_buff *skb) { struct urb *urb; BT_DBG("%s", hdev->name); switch (hci_skb_pkt_type(skb)) { case HCI_COMMAND_PKT: urb = alloc_ctrl_urb(hdev, skb); if (IS_ERR(urb)) return PTR_ERR(urb); hdev->stat.cmd_tx++; return submit_or_queue_tx_urb(hdev, urb); case HCI_ACLDATA_PKT: urb = alloc_bulk_urb(hdev, skb); if (IS_ERR(urb)) return PTR_ERR(urb); hdev->stat.acl_tx++; return submit_or_queue_tx_urb(hdev, urb); case HCI_SCODATA_PKT: if (hci_conn_num(hdev, SCO_LINK) < 1) return -ENODEV; urb = alloc_isoc_urb(hdev, skb); if (IS_ERR(urb)) return PTR_ERR(urb); hdev->stat.sco_tx++; return submit_tx_urb(hdev, urb); case HCI_ISODATA_PKT: urb = alloc_bulk_urb(hdev, skb); if (IS_ERR(urb)) return PTR_ERR(urb); return submit_or_queue_tx_urb(hdev, urb); } return -EILSEQ; } static void btusb_notify(struct hci_dev *hdev, unsigned int evt) { struct btusb_data *data = hci_get_drvdata(hdev); BT_DBG("%s evt %d", hdev->name, evt); if (hci_conn_num(hdev, SCO_LINK) != data->sco_num) { data->sco_num = hci_conn_num(hdev, SCO_LINK); data->air_mode = evt; schedule_work(&data->work); } } static inline int __set_isoc_interface(struct hci_dev *hdev, int altsetting) { struct btusb_data *data = hci_get_drvdata(hdev); struct usb_interface *intf = data->isoc; struct usb_endpoint_descriptor *ep_desc; int i, err; if (!data->isoc) return -ENODEV; err = usb_set_interface(data->udev, data->isoc_ifnum, altsetting); if (err < 0) { bt_dev_err(hdev, "setting interface failed (%d)", -err); return err; } data->isoc_altsetting = altsetting; data->isoc_tx_ep = NULL; data->isoc_rx_ep = NULL; for (i = 0; i < intf->cur_altsetting->desc.bNumEndpoints; i++) { ep_desc = &intf->cur_altsetting->endpoint[i].desc; if (!data->isoc_tx_ep && usb_endpoint_is_isoc_out(ep_desc)) { data->isoc_tx_ep = ep_desc; continue; } if (!data->isoc_rx_ep && usb_endpoint_is_isoc_in(ep_desc)) { data->isoc_rx_ep = ep_desc; continue; } } if (!data->isoc_tx_ep || !data->isoc_rx_ep) { bt_dev_err(hdev, "invalid SCO descriptors"); return -ENODEV; } return 0; } static int btusb_switch_alt_setting(struct hci_dev *hdev, int new_alts) { struct btusb_data *data = hci_get_drvdata(hdev); int err; if (data->isoc_altsetting != new_alts) { unsigned long flags; clear_bit(BTUSB_ISOC_RUNNING, &data->flags); usb_kill_anchored_urbs(&data->isoc_anchor); /* When isochronous alternate setting needs to be * changed, because SCO connection has been added * or removed, a packet fragment may be left in the * reassembling state. This could lead to wrongly * assembled fragments. * * Clear outstanding fragment when selecting a new * alternate setting. */ spin_lock_irqsave(&data->rxlock, flags); dev_kfree_skb_irq(data->sco_skb); data->sco_skb = NULL; spin_unlock_irqrestore(&data->rxlock, flags); err = __set_isoc_interface(hdev, new_alts); if (err < 0) return err; } if (!test_and_set_bit(BTUSB_ISOC_RUNNING, &data->flags)) { if (btusb_submit_isoc_urb(hdev, GFP_KERNEL) < 0) clear_bit(BTUSB_ISOC_RUNNING, &data->flags); else btusb_submit_isoc_urb(hdev, GFP_KERNEL); } return 0; } static struct usb_host_interface *btusb_find_altsetting(struct btusb_data *data, int alt) { struct usb_interface *intf = data->isoc; int i; BT_DBG("Looking for Alt no :%d", alt); if (!intf) return NULL; for (i = 0; i < intf->num_altsetting; i++) { if (intf->altsetting[i].desc.bAlternateSetting == alt) return &intf->altsetting[i]; } return NULL; } static void btusb_work(struct work_struct *work) { struct btusb_data *data = container_of(work, struct btusb_data, work); struct hci_dev *hdev = data->hdev; int new_alts = 0; int err; if (data->sco_num > 0) { if (!test_bit(BTUSB_DID_ISO_RESUME, &data->flags)) { err = usb_autopm_get_interface(data->isoc ? data->isoc : data->intf); if (err < 0) { clear_bit(BTUSB_ISOC_RUNNING, &data->flags); usb_kill_anchored_urbs(&data->isoc_anchor); return; } set_bit(BTUSB_DID_ISO_RESUME, &data->flags); } if (data->air_mode == HCI_NOTIFY_ENABLE_SCO_CVSD) { if (hdev->voice_setting & 0x0020) { static const int alts[3] = { 2, 4, 5 }; new_alts = alts[data->sco_num - 1]; } else { new_alts = data->sco_num; } } else if (data->air_mode == HCI_NOTIFY_ENABLE_SCO_TRANSP) { /* Bluetooth USB spec recommends alt 6 (63 bytes), but * many adapters do not support it. Alt 1 appears to * work for all adapters that do not have alt 6, and * which work with WBS at all. Some devices prefer * alt 3 (HCI payload >= 60 Bytes let air packet * data satisfy 60 bytes), requiring * MTU >= 3 (packets) * 25 (size) - 3 (headers) = 72 * see also Core spec 5, vol 4, B 2.1.1 & Table 2.1. */ if (btusb_find_altsetting(data, 6)) new_alts = 6; else if (btusb_find_altsetting(data, 3) && hdev->sco_mtu >= 72 && test_bit(BTUSB_USE_ALT3_FOR_WBS, &data->flags)) new_alts = 3; else new_alts = 1; } if (btusb_switch_alt_setting(hdev, new_alts) < 0) bt_dev_err(hdev, "set USB alt:(%d) failed!", new_alts); } else { usb_kill_anchored_urbs(&data->isoc_anchor); if (test_and_clear_bit(BTUSB_ISOC_RUNNING, &data->flags)) __set_isoc_interface(hdev, 0); if (test_and_clear_bit(BTUSB_DID_ISO_RESUME, &data->flags)) usb_autopm_put_interface(data->isoc ? data->isoc : data->intf); } } static void btusb_waker(struct work_struct *work) { struct btusb_data *data = container_of(work, struct btusb_data, waker); int err; err = usb_autopm_get_interface(data->intf); if (err < 0) return; usb_autopm_put_interface(data->intf); } static void btusb_rx_work(struct work_struct *work) { struct btusb_data *data = container_of(work, struct btusb_data, rx_work.work); struct sk_buff *skb; /* Dequeue ACL data received during the interval */ while ((skb = skb_dequeue(&data->acl_q))) data->recv_acl(data->hdev, skb); } static int btusb_setup_bcm92035(struct hci_dev *hdev) { struct sk_buff *skb; u8 val = 0x00; BT_DBG("%s", hdev->name); skb = __hci_cmd_sync(hdev, 0xfc3b, 1, &val, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) bt_dev_err(hdev, "BCM92035 command failed (%ld)", PTR_ERR(skb)); else kfree_skb(skb); return 0; } static int btusb_setup_csr(struct hci_dev *hdev) { struct btusb_data *data = hci_get_drvdata(hdev); u16 bcdDevice = le16_to_cpu(data->udev->descriptor.bcdDevice); struct hci_rp_read_local_version *rp; struct sk_buff *skb; bool is_fake = false; int ret; BT_DBG("%s", hdev->name); skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { int err = PTR_ERR(skb); bt_dev_err(hdev, "CSR: Local version failed (%d)", err); return err; } rp = skb_pull_data(skb, sizeof(*rp)); if (!rp) { bt_dev_err(hdev, "CSR: Local version length mismatch"); kfree_skb(skb); return -EIO; } bt_dev_info(hdev, "CSR: Setting up dongle with HCI ver=%u rev=%04x", rp->hci_ver, le16_to_cpu(rp->hci_rev)); bt_dev_info(hdev, "LMP ver=%u subver=%04x; manufacturer=%u", rp->lmp_ver, le16_to_cpu(rp->lmp_subver), le16_to_cpu(rp->manufacturer)); /* Detect a wide host of Chinese controllers that aren't CSR. * * Known fake bcdDevices: 0x0100, 0x0134, 0x1915, 0x2520, 0x7558, 0x8891 * * The main thing they have in common is that these are really popular low-cost * options that support newer Bluetooth versions but rely on heavy VID/PID * squatting of this poor old Bluetooth 1.1 device. Even sold as such. * * We detect actual CSR devices by checking that the HCI manufacturer code * is Cambridge Silicon Radio (10) and ensuring that LMP sub-version and * HCI rev values always match. As they both store the firmware number. */ if (le16_to_cpu(rp->manufacturer) != 10 || le16_to_cpu(rp->hci_rev) != le16_to_cpu(rp->lmp_subver)) is_fake = true; /* Known legit CSR firmware build numbers and their supported BT versions: * - 1.1 (0x1) -> 0x0073, 0x020d, 0x033c, 0x034e * - 1.2 (0x2) -> 0x04d9, 0x0529 * - 2.0 (0x3) -> 0x07a6, 0x07ad, 0x0c5c * - 2.1 (0x4) -> 0x149c, 0x1735, 0x1899 (0x1899 is a BlueCore4-External) * - 4.0 (0x6) -> 0x1d86, 0x2031, 0x22bb * * e.g. Real CSR dongles with LMP subversion 0x73 are old enough that * support BT 1.1 only; so it's a dead giveaway when some * third-party BT 4.0 dongle reuses it. */ else if (le16_to_cpu(rp->lmp_subver) <= 0x034e && rp->hci_ver > BLUETOOTH_VER_1_1) is_fake = true; else if (le16_to_cpu(rp->lmp_subver) <= 0x0529 && rp->hci_ver > BLUETOOTH_VER_1_2) is_fake = true; else if (le16_to_cpu(rp->lmp_subver) <= 0x0c5c && rp->hci_ver > BLUETOOTH_VER_2_0) is_fake = true; else if (le16_to_cpu(rp->lmp_subver) <= 0x1899 && rp->hci_ver > BLUETOOTH_VER_2_1) is_fake = true; else if (le16_to_cpu(rp->lmp_subver) <= 0x22bb && rp->hci_ver > BLUETOOTH_VER_4_0) is_fake = true; /* Other clones which beat all the above checks */ else if (bcdDevice == 0x0134 && le16_to_cpu(rp->lmp_subver) == 0x0c5c && rp->hci_ver == BLUETOOTH_VER_2_0) is_fake = true; if (is_fake) { bt_dev_warn(hdev, "CSR: Unbranded CSR clone detected; adding workarounds and force-suspending once..."); /* Generally these clones have big discrepancies between * advertised features and what's actually supported. * Probably will need to be expanded in the future; * without these the controller will lock up. */ set_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks); set_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks); set_bit(HCI_QUIRK_BROKEN_FILTER_CLEAR_ALL, &hdev->quirks); set_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks); /* Clear the reset quirk since this is not an actual * early Bluetooth 1.1 device from CSR. */ clear_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks); clear_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks); /* * Special workaround for these BT 4.0 chip clones, and potentially more: * * - 0x0134: a Barrot 8041a02 (HCI rev: 0x0810 sub: 0x1012) * - 0x7558: IC markings FR3191AHAL 749H15143 (HCI rev/sub-version: 0x0709) * * These controllers are really messed-up. * * 1. Their bulk RX endpoint will never report any data unless * the device was suspended at least once (yes, really). * 2. They will not wakeup when autosuspended and receiving data * on their bulk RX endpoint from e.g. a keyboard or mouse * (IOW remote-wakeup support is broken for the bulk endpoint). * * To fix 1. enable runtime-suspend, force-suspend the * HCI and then wake-it up by disabling runtime-suspend. * * To fix 2. clear the HCI's can_wake flag, this way the HCI * will still be autosuspended when it is not open. * * -- * * Because these are widespread problems we prefer generic solutions; so * apply this initialization quirk to every controller that gets here, * it should be harmless. The alternative is to not work at all. */ pm_runtime_allow(&data->udev->dev); ret = pm_runtime_suspend(&data->udev->dev); if (ret >= 0) msleep(200); else bt_dev_warn(hdev, "CSR: Couldn't suspend the device for our Barrot 8041a02 receive-issue workaround"); pm_runtime_forbid(&data->udev->dev); device_set_wakeup_capable(&data->udev->dev, false); /* Re-enable autosuspend if this was requested */ if (enable_autosuspend) usb_enable_autosuspend(data->udev); } kfree_skb(skb); return 0; } static int inject_cmd_complete(struct hci_dev *hdev, __u16 opcode) { struct sk_buff *skb; struct hci_event_hdr *hdr; struct hci_ev_cmd_complete *evt; skb = bt_skb_alloc(sizeof(*hdr) + sizeof(*evt) + 1, GFP_KERNEL); if (!skb) return -ENOMEM; hdr = skb_put(skb, sizeof(*hdr)); hdr->evt = HCI_EV_CMD_COMPLETE; hdr->plen = sizeof(*evt) + 1; evt = skb_put(skb, sizeof(*evt)); evt->ncmd = 0x01; evt->opcode = cpu_to_le16(opcode); skb_put_u8(skb, 0x00); hci_skb_pkt_type(skb) = HCI_EVENT_PKT; return hci_recv_frame(hdev, skb); } static int btusb_recv_bulk_intel(struct btusb_data *data, void *buffer, int count) { struct hci_dev *hdev = data->hdev; /* When the device is in bootloader mode, then it can send * events via the bulk endpoint. These events are treated the * same way as the ones received from the interrupt endpoint. */ if (btintel_test_flag(hdev, INTEL_BOOTLOADER)) return btusb_recv_intr(data, buffer, count); return btusb_recv_bulk(data, buffer, count); } static int btusb_send_frame_intel(struct hci_dev *hdev, struct sk_buff *skb) { struct urb *urb; BT_DBG("%s", hdev->name); switch (hci_skb_pkt_type(skb)) { case HCI_COMMAND_PKT: if (btintel_test_flag(hdev, INTEL_BOOTLOADER)) { struct hci_command_hdr *cmd = (void *)skb->data; __u16 opcode = le16_to_cpu(cmd->opcode); /* When in bootloader mode and the command 0xfc09 * is received, it needs to be send down the * bulk endpoint. So allocate a bulk URB instead. */ if (opcode == 0xfc09) urb = alloc_bulk_urb(hdev, skb); else urb = alloc_ctrl_urb(hdev, skb); /* When the 0xfc01 command is issued to boot into * the operational firmware, it will actually not * send a command complete event. To keep the flow * control working inject that event here. */ if (opcode == 0xfc01) inject_cmd_complete(hdev, opcode); } else { urb = alloc_ctrl_urb(hdev, skb); } if (IS_ERR(urb)) return PTR_ERR(urb); hdev->stat.cmd_tx++; return submit_or_queue_tx_urb(hdev, urb); case HCI_ACLDATA_PKT: urb = alloc_bulk_urb(hdev, skb); if (IS_ERR(urb)) return PTR_ERR(urb); hdev->stat.acl_tx++; return submit_or_queue_tx_urb(hdev, urb); case HCI_SCODATA_PKT: if (hci_conn_num(hdev, SCO_LINK) < 1) return -ENODEV; urb = alloc_isoc_urb(hdev, skb); if (IS_ERR(urb)) return PTR_ERR(urb); hdev->stat.sco_tx++; return submit_tx_urb(hdev, urb); case HCI_ISODATA_PKT: urb = alloc_bulk_urb(hdev, skb); if (IS_ERR(urb)) return PTR_ERR(urb); return submit_or_queue_tx_urb(hdev, urb); } return -EILSEQ; } static int btusb_setup_realtek(struct hci_dev *hdev) { struct btusb_data *data = hci_get_drvdata(hdev); int ret; ret = btrtl_setup_realtek(hdev); if (btrealtek_test_flag(data->hdev, REALTEK_ALT6_CONTINUOUS_TX_CHIP)) set_bit(BTUSB_ALT6_CONTINUOUS_TX, &data->flags); return ret; } static int btusb_recv_event_realtek(struct hci_dev *hdev, struct sk_buff *skb) { if (skb->data[0] == HCI_VENDOR_PKT && skb->data[2] == RTK_SUB_EVENT_CODE_COREDUMP) { struct rtk_dev_coredump_hdr hdr = { .code = RTK_DEVCOREDUMP_CODE_MEMDUMP, }; bt_dev_dbg(hdev, "RTL: received coredump vendor evt, len %u", skb->len); btusb_rtl_alloc_devcoredump(hdev, &hdr, skb->data, skb->len); kfree_skb(skb); return 0; } return hci_recv_frame(hdev, skb); } /* UHW CR mapping */ #define MTK_BT_MISC 0x70002510 #define MTK_BT_SUBSYS_RST 0x70002610 #define MTK_UDMA_INT_STA_BT 0x74000024 #define MTK_UDMA_INT_STA_BT1 0x74000308 #define MTK_BT_WDT_STATUS 0x740003A0 #define MTK_EP_RST_OPT 0x74011890 #define MTK_EP_RST_IN_OUT_OPT 0x00010001 #define MTK_BT_RST_DONE 0x00000100 #define MTK_BT_RESET_REG_CONNV3 0x70028610 #define MTK_BT_READ_DEV_ID 0x70010200 static void btusb_mtk_wmt_recv(struct urb *urb) { struct hci_dev *hdev = urb->context; struct btusb_data *data = hci_get_drvdata(hdev); struct sk_buff *skb; int err; if (urb->status == 0 && urb->actual_length > 0) { hdev->stat.byte_rx += urb->actual_length; /* WMT event shouldn't be fragmented and the size should be * less than HCI_WMT_MAX_EVENT_SIZE. */ skb = bt_skb_alloc(HCI_WMT_MAX_EVENT_SIZE, GFP_ATOMIC); if (!skb) { hdev->stat.err_rx++; kfree(urb->setup_packet); return; } hci_skb_pkt_type(skb) = HCI_EVENT_PKT; skb_put_data(skb, urb->transfer_buffer, urb->actual_length); /* When someone waits for the WMT event, the skb is being cloned * and being processed the events from there then. */ if (test_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags)) { data->evt_skb = skb_clone(skb, GFP_ATOMIC); if (!data->evt_skb) { kfree_skb(skb); kfree(urb->setup_packet); return; } } err = hci_recv_frame(hdev, skb); if (err < 0) { kfree_skb(data->evt_skb); data->evt_skb = NULL; kfree(urb->setup_packet); return; } if (test_and_clear_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags)) { /* Barrier to sync with other CPUs */ smp_mb__after_atomic(); wake_up_bit(&data->flags, BTUSB_TX_WAIT_VND_EVT); } kfree(urb->setup_packet); return; } else if (urb->status == -ENOENT) { /* Avoid suspend failed when usb_kill_urb */ return; } usb_mark_last_busy(data->udev); /* The URB complete handler is still called with urb->actual_length = 0 * when the event is not available, so we should keep re-submitting * URB until WMT event returns, Also, It's necessary to wait some time * between the two consecutive control URBs to relax the target device * to generate the event. Otherwise, the WMT event cannot return from * the device successfully. */ udelay(500); usb_anchor_urb(urb, &data->ctrl_anchor); err = usb_submit_urb(urb, GFP_ATOMIC); if (err < 0) { kfree(urb->setup_packet); /* -EPERM: urb is being killed; * -ENODEV: device got disconnected */ if (err != -EPERM && err != -ENODEV) bt_dev_err(hdev, "urb %p failed to resubmit (%d)", urb, -err); usb_unanchor_urb(urb); } } static int btusb_mtk_submit_wmt_recv_urb(struct hci_dev *hdev) { struct btusb_data *data = hci_get_drvdata(hdev); struct usb_ctrlrequest *dr; unsigned char *buf; int err, size = 64; unsigned int pipe; struct urb *urb; urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) return -ENOMEM; dr = kmalloc(sizeof(*dr), GFP_KERNEL); if (!dr) { usb_free_urb(urb); return -ENOMEM; } dr->bRequestType = USB_TYPE_VENDOR | USB_DIR_IN; dr->bRequest = 1; dr->wIndex = cpu_to_le16(0); dr->wValue = cpu_to_le16(48); dr->wLength = cpu_to_le16(size); buf = kmalloc(size, GFP_KERNEL); if (!buf) { kfree(dr); usb_free_urb(urb); return -ENOMEM; } pipe = usb_rcvctrlpipe(data->udev, 0); usb_fill_control_urb(urb, data->udev, pipe, (void *)dr, buf, size, btusb_mtk_wmt_recv, hdev); urb->transfer_flags |= URB_FREE_BUFFER; usb_anchor_urb(urb, &data->ctrl_anchor); err = usb_submit_urb(urb, GFP_KERNEL); if (err < 0) { if (err != -EPERM && err != -ENODEV) bt_dev_err(hdev, "urb %p submission failed (%d)", urb, -err); usb_unanchor_urb(urb); } usb_free_urb(urb); return err; } static int btusb_mtk_hci_wmt_sync(struct hci_dev *hdev, struct btmtk_hci_wmt_params *wmt_params) { struct btusb_data *data = hci_get_drvdata(hdev); struct btmtk_hci_wmt_evt_funcc *wmt_evt_funcc; u32 hlen, status = BTMTK_WMT_INVALID; struct btmtk_hci_wmt_evt *wmt_evt; struct btmtk_hci_wmt_cmd *wc; struct btmtk_wmt_hdr *hdr; int err; /* Send the WMT command and wait until the WMT event returns */ hlen = sizeof(*hdr) + wmt_params->dlen; if (hlen > 255) return -EINVAL; wc = kzalloc(hlen, GFP_KERNEL); if (!wc) return -ENOMEM; hdr = &wc->hdr; hdr->dir = 1; hdr->op = wmt_params->op; hdr->dlen = cpu_to_le16(wmt_params->dlen + 1); hdr->flag = wmt_params->flag; memcpy(wc->data, wmt_params->data, wmt_params->dlen); set_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags); /* WMT cmd/event doesn't follow up the generic HCI cmd/event handling, * it needs constantly polling control pipe until the host received the * WMT event, thus, we should require to specifically acquire PM counter * on the USB to prevent the interface from entering auto suspended * while WMT cmd/event in progress. */ err = usb_autopm_get_interface(data->intf); if (err < 0) goto err_free_wc; err = __hci_cmd_send(hdev, 0xfc6f, hlen, wc); if (err < 0) { clear_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags); usb_autopm_put_interface(data->intf); goto err_free_wc; } /* Submit control IN URB on demand to process the WMT event */ err = btusb_mtk_submit_wmt_recv_urb(hdev); usb_autopm_put_interface(data->intf); if (err < 0) goto err_free_wc; /* The vendor specific WMT commands are all answered by a vendor * specific event and will have the Command Status or Command * Complete as with usual HCI command flow control. * * After sending the command, wait for BTUSB_TX_WAIT_VND_EVT * state to be cleared. The driver specific event receive routine * will clear that state and with that indicate completion of the * WMT command. */ err = wait_on_bit_timeout(&data->flags, BTUSB_TX_WAIT_VND_EVT, TASK_INTERRUPTIBLE, HCI_INIT_TIMEOUT); if (err == -EINTR) { bt_dev_err(hdev, "Execution of wmt command interrupted"); clear_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags); goto err_free_wc; } if (err) { bt_dev_err(hdev, "Execution of wmt command timed out"); clear_bit(BTUSB_TX_WAIT_VND_EVT, &data->flags); err = -ETIMEDOUT; goto err_free_wc; } /* Parse and handle the return WMT event */ wmt_evt = (struct btmtk_hci_wmt_evt *)data->evt_skb->data; if (wmt_evt->whdr.op != hdr->op) { bt_dev_err(hdev, "Wrong op received %d expected %d", wmt_evt->whdr.op, hdr->op); err = -EIO; goto err_free_skb; } switch (wmt_evt->whdr.op) { case BTMTK_WMT_SEMAPHORE: if (wmt_evt->whdr.flag == 2) status = BTMTK_WMT_PATCH_UNDONE; else status = BTMTK_WMT_PATCH_DONE; break; case BTMTK_WMT_FUNC_CTRL: wmt_evt_funcc = (struct btmtk_hci_wmt_evt_funcc *)wmt_evt; if (be16_to_cpu(wmt_evt_funcc->status) == 0x404) status = BTMTK_WMT_ON_DONE; else if (be16_to_cpu(wmt_evt_funcc->status) == 0x420) status = BTMTK_WMT_ON_PROGRESS; else status = BTMTK_WMT_ON_UNDONE; break; case BTMTK_WMT_PATCH_DWNLD: if (wmt_evt->whdr.flag == 2) status = BTMTK_WMT_PATCH_DONE; else if (wmt_evt->whdr.flag == 1) status = BTMTK_WMT_PATCH_PROGRESS; else status = BTMTK_WMT_PATCH_UNDONE; break; } if (wmt_params->status) *wmt_params->status = status; err_free_skb: kfree_skb(data->evt_skb); data->evt_skb = NULL; err_free_wc: kfree(wc); return err; } static int btusb_mtk_func_query(struct hci_dev *hdev) { struct btmtk_hci_wmt_params wmt_params; int status, err; u8 param = 0; /* Query whether the function is enabled */ wmt_params.op = BTMTK_WMT_FUNC_CTRL; wmt_params.flag = 4; wmt_params.dlen = sizeof(param); wmt_params.data = &param; wmt_params.status = &status; err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params); if (err < 0) { bt_dev_err(hdev, "Failed to query function status (%d)", err); return err; } return status; } static int btusb_mtk_uhw_reg_write(struct btusb_data *data, u32 reg, u32 val) { struct hci_dev *hdev = data->hdev; int pipe, err; void *buf; buf = kzalloc(4, GFP_KERNEL); if (!buf) return -ENOMEM; put_unaligned_le32(val, buf); pipe = usb_sndctrlpipe(data->udev, 0); err = usb_control_msg(data->udev, pipe, 0x02, 0x5E, reg >> 16, reg & 0xffff, buf, 4, USB_CTRL_SET_TIMEOUT); if (err < 0) { bt_dev_err(hdev, "Failed to write uhw reg(%d)", err); goto err_free_buf; } err_free_buf: kfree(buf); return err; } static int btusb_mtk_uhw_reg_read(struct btusb_data *data, u32 reg, u32 *val) { struct hci_dev *hdev = data->hdev; int pipe, err; void *buf; buf = kzalloc(4, GFP_KERNEL); if (!buf) return -ENOMEM; pipe = usb_rcvctrlpipe(data->udev, 0); err = usb_control_msg(data->udev, pipe, 0x01, 0xDE, reg >> 16, reg & 0xffff, buf, 4, USB_CTRL_SET_TIMEOUT); if (err < 0) { bt_dev_err(hdev, "Failed to read uhw reg(%d)", err); goto err_free_buf; } *val = get_unaligned_le32(buf); bt_dev_dbg(hdev, "reg=%x, value=0x%08x", reg, *val); err_free_buf: kfree(buf); return err; } static int btusb_mtk_reg_read(struct btusb_data *data, u32 reg, u32 *val) { int pipe, err, size = sizeof(u32); void *buf; buf = kzalloc(size, GFP_KERNEL); if (!buf) return -ENOMEM; pipe = usb_rcvctrlpipe(data->udev, 0); err = usb_control_msg(data->udev, pipe, 0x63, USB_TYPE_VENDOR | USB_DIR_IN, reg >> 16, reg & 0xffff, buf, size, USB_CTRL_SET_TIMEOUT); if (err < 0) goto err_free_buf; *val = get_unaligned_le32(buf); err_free_buf: kfree(buf); return err; } static int btusb_mtk_id_get(struct btusb_data *data, u32 reg, u32 *id) { return btusb_mtk_reg_read(data, reg, id); } static u32 btusb_mtk_reset_done(struct hci_dev *hdev) { struct btusb_data *data = hci_get_drvdata(hdev); u32 val = 0; btusb_mtk_uhw_reg_read(data, MTK_BT_MISC, &val); return val & MTK_BT_RST_DONE; } static int btusb_mtk_reset(struct hci_dev *hdev, void *rst_data) { struct btusb_data *data = hci_get_drvdata(hdev); struct btmediatek_data *mediatek; u32 val; int err; /* It's MediaTek specific bluetooth reset mechanism via USB */ if (test_and_set_bit(BTUSB_HW_RESET_ACTIVE, &data->flags)) { bt_dev_err(hdev, "last reset failed? Not resetting again"); return -EBUSY; } err = usb_autopm_get_interface(data->intf); if (err < 0) return err; btusb_stop_traffic(data); usb_kill_anchored_urbs(&data->tx_anchor); mediatek = hci_get_priv(hdev); if (mediatek->dev_id == 0x7925) { btusb_mtk_uhw_reg_read(data, MTK_BT_RESET_REG_CONNV3, &val); val |= (1 << 5); btusb_mtk_uhw_reg_write(data, MTK_BT_RESET_REG_CONNV3, val); btusb_mtk_uhw_reg_read(data, MTK_BT_RESET_REG_CONNV3, &val); val &= 0xFFFF00FF; val |= (1 << 13); btusb_mtk_uhw_reg_write(data, MTK_BT_RESET_REG_CONNV3, val); btusb_mtk_uhw_reg_write(data, MTK_EP_RST_OPT, 0x00010001); btusb_mtk_uhw_reg_read(data, MTK_BT_RESET_REG_CONNV3, &val); val |= (1 << 0); btusb_mtk_uhw_reg_write(data, MTK_BT_RESET_REG_CONNV3, val); btusb_mtk_uhw_reg_write(data, MTK_UDMA_INT_STA_BT, 0x000000FF); btusb_mtk_uhw_reg_read(data, MTK_UDMA_INT_STA_BT, &val); btusb_mtk_uhw_reg_write(data, MTK_UDMA_INT_STA_BT1, 0x000000FF); btusb_mtk_uhw_reg_read(data, MTK_UDMA_INT_STA_BT1, &val); msleep(100); } else { /* It's Device EndPoint Reset Option Register */ bt_dev_dbg(hdev, "Initiating reset mechanism via uhw"); btusb_mtk_uhw_reg_write(data, MTK_EP_RST_OPT, MTK_EP_RST_IN_OUT_OPT); btusb_mtk_uhw_reg_read(data, MTK_BT_WDT_STATUS, &val); /* Reset the bluetooth chip via USB interface. */ btusb_mtk_uhw_reg_write(data, MTK_BT_SUBSYS_RST, 1); btusb_mtk_uhw_reg_write(data, MTK_UDMA_INT_STA_BT, 0x000000FF); btusb_mtk_uhw_reg_read(data, MTK_UDMA_INT_STA_BT, &val); btusb_mtk_uhw_reg_write(data, MTK_UDMA_INT_STA_BT1, 0x000000FF); btusb_mtk_uhw_reg_read(data, MTK_UDMA_INT_STA_BT1, &val); /* MT7921 need to delay 20ms between toggle reset bit */ msleep(20); btusb_mtk_uhw_reg_write(data, MTK_BT_SUBSYS_RST, 0); btusb_mtk_uhw_reg_read(data, MTK_BT_SUBSYS_RST, &val); } err = readx_poll_timeout(btusb_mtk_reset_done, hdev, val, val & MTK_BT_RST_DONE, 20000, 1000000); if (err < 0) bt_dev_err(hdev, "Reset timeout"); btusb_mtk_id_get(data, 0x70010200, &val); if (!val) bt_dev_err(hdev, "Can't get device id, subsys reset fail."); usb_queue_reset_device(data->intf); clear_bit(BTUSB_HW_RESET_ACTIVE, &data->flags); return err; } static int btusb_mtk_setup(struct hci_dev *hdev) { struct btusb_data *data = hci_get_drvdata(hdev); struct btmtk_hci_wmt_params wmt_params; ktime_t calltime, delta, rettime; struct btmtk_tci_sleep tci_sleep; unsigned long long duration; struct sk_buff *skb; const char *fwname; int err, status; u32 dev_id = 0; char fw_bin_name[64]; u32 fw_version = 0; u8 param; struct btmediatek_data *mediatek; calltime = ktime_get(); err = btusb_mtk_id_get(data, 0x80000008, &dev_id); if (err < 0) { bt_dev_err(hdev, "Failed to get device id (%d)", err); return err; } if (!dev_id || dev_id != 0x7663) { err = btusb_mtk_id_get(data, 0x70010200, &dev_id); if (err < 0) { bt_dev_err(hdev, "Failed to get device id (%d)", err); return err; } err = btusb_mtk_id_get(data, 0x80021004, &fw_version); if (err < 0) { bt_dev_err(hdev, "Failed to get fw version (%d)", err); return err; } } mediatek = hci_get_priv(hdev); mediatek->dev_id = dev_id; mediatek->reset_sync = btusb_mtk_reset; err = btmtk_register_coredump(hdev, btusb_driver.name, fw_version); if (err < 0) bt_dev_err(hdev, "Failed to register coredump (%d)", err); switch (dev_id) { case 0x7663: fwname = FIRMWARE_MT7663; break; case 0x7668: fwname = FIRMWARE_MT7668; break; case 0x7922: case 0x7961: case 0x7925: if (dev_id == 0x7925) snprintf(fw_bin_name, sizeof(fw_bin_name), "mediatek/mt%04x/BT_RAM_CODE_MT%04x_1_%x_hdr.bin", dev_id & 0xffff, dev_id & 0xffff, (fw_version & 0xff) + 1); else snprintf(fw_bin_name, sizeof(fw_bin_name), "mediatek/BT_RAM_CODE_MT%04x_1_%x_hdr.bin", dev_id & 0xffff, (fw_version & 0xff) + 1); err = btmtk_setup_firmware_79xx(hdev, fw_bin_name, btusb_mtk_hci_wmt_sync); if (err < 0) { bt_dev_err(hdev, "Failed to set up firmware (%d)", err); return err; } /* It's Device EndPoint Reset Option Register */ btusb_mtk_uhw_reg_write(data, MTK_EP_RST_OPT, MTK_EP_RST_IN_OUT_OPT); /* Enable Bluetooth protocol */ param = 1; wmt_params.op = BTMTK_WMT_FUNC_CTRL; wmt_params.flag = 0; wmt_params.dlen = sizeof(param); wmt_params.data = &param; wmt_params.status = NULL; err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params); if (err < 0) { bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err); return err; } hci_set_msft_opcode(hdev, 0xFD30); hci_set_aosp_capable(hdev); goto done; default: bt_dev_err(hdev, "Unsupported hardware variant (%08x)", dev_id); return -ENODEV; } /* Query whether the firmware is already download */ wmt_params.op = BTMTK_WMT_SEMAPHORE; wmt_params.flag = 1; wmt_params.dlen = 0; wmt_params.data = NULL; wmt_params.status = &status; err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params); if (err < 0) { bt_dev_err(hdev, "Failed to query firmware status (%d)", err); return err; } if (status == BTMTK_WMT_PATCH_DONE) { bt_dev_info(hdev, "firmware already downloaded"); goto ignore_setup_fw; } /* Setup a firmware which the device definitely requires */ err = btmtk_setup_firmware(hdev, fwname, btusb_mtk_hci_wmt_sync); if (err < 0) return err; ignore_setup_fw: err = readx_poll_timeout(btusb_mtk_func_query, hdev, status, status < 0 || status != BTMTK_WMT_ON_PROGRESS, 2000, 5000000); /* -ETIMEDOUT happens */ if (err < 0) return err; /* The other errors happen in btusb_mtk_func_query */ if (status < 0) return status; if (status == BTMTK_WMT_ON_DONE) { bt_dev_info(hdev, "function already on"); goto ignore_func_on; } /* Enable Bluetooth protocol */ param = 1; wmt_params.op = BTMTK_WMT_FUNC_CTRL; wmt_params.flag = 0; wmt_params.dlen = sizeof(param); wmt_params.data = &param; wmt_params.status = NULL; err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params); if (err < 0) { bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err); return err; } ignore_func_on: /* Apply the low power environment setup */ tci_sleep.mode = 0x5; tci_sleep.duration = cpu_to_le16(0x640); tci_sleep.host_duration = cpu_to_le16(0x640); tci_sleep.host_wakeup_pin = 0; tci_sleep.time_compensation = 0; skb = __hci_cmd_sync(hdev, 0xfc7a, sizeof(tci_sleep), &tci_sleep, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { err = PTR_ERR(skb); bt_dev_err(hdev, "Failed to apply low power setting (%d)", err); return err; } kfree_skb(skb); done: rettime = ktime_get(); delta = ktime_sub(rettime, calltime); duration = (unsigned long long)ktime_to_ns(delta) >> 10; bt_dev_info(hdev, "Device setup in %llu usecs", duration); return 0; } static int btusb_mtk_shutdown(struct hci_dev *hdev) { struct btmtk_hci_wmt_params wmt_params; u8 param = 0; int err; /* Disable the device */ wmt_params.op = BTMTK_WMT_FUNC_CTRL; wmt_params.flag = 0; wmt_params.dlen = sizeof(param); wmt_params.data = &param; wmt_params.status = NULL; err = btusb_mtk_hci_wmt_sync(hdev, &wmt_params); if (err < 0) { bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err); return err; } return 0; } static int btusb_recv_acl_mtk(struct hci_dev *hdev, struct sk_buff *skb) { struct btusb_data *data = hci_get_drvdata(hdev); u16 handle = le16_to_cpu(hci_acl_hdr(skb)->handle); struct sk_buff *skb_cd; switch (handle) { case 0xfc6f: /* Firmware dump from device */ /* When the firmware hangs, the device can no longer * suspend and thus disable auto-suspend. */ usb_disable_autosuspend(data->udev); /* We need to forward the diagnostic packet to userspace daemon * for backward compatibility, so we have to clone the packet * extraly for the in-kernel coredump support. */ skb_cd = skb_clone(skb, GFP_ATOMIC); if (skb_cd) btmtk_process_coredump(hdev, skb_cd); fallthrough; case 0x05ff: /* Firmware debug logging 1 */ case 0x05fe: /* Firmware debug logging 2 */ return hci_recv_diag(hdev, skb); } return hci_recv_frame(hdev, skb); } #ifdef CONFIG_PM /* Configure an out-of-band gpio as wake-up pin, if specified in device tree */ static int marvell_config_oob_wake(struct hci_dev *hdev) { struct sk_buff *skb; struct btusb_data *data = hci_get_drvdata(hdev); struct device *dev = &data->udev->dev; u16 pin, gap, opcode; int ret; u8 cmd[5]; /* Move on if no wakeup pin specified */ if (of_property_read_u16(dev->of_node, "marvell,wakeup-pin", &pin) || of_property_read_u16(dev->of_node, "marvell,wakeup-gap-ms", &gap)) return 0; /* Vendor specific command to configure a GPIO as wake-up pin */ opcode = hci_opcode_pack(0x3F, 0x59); cmd[0] = opcode & 0xFF; cmd[1] = opcode >> 8; cmd[2] = 2; /* length of parameters that follow */ cmd[3] = pin; cmd[4] = gap; /* time in ms, for which wakeup pin should be asserted */ skb = bt_skb_alloc(sizeof(cmd), GFP_KERNEL); if (!skb) { bt_dev_err(hdev, "%s: No memory", __func__); return -ENOMEM; } skb_put_data(skb, cmd, sizeof(cmd)); hci_skb_pkt_type(skb) = HCI_COMMAND_PKT; ret = btusb_send_frame(hdev, skb); if (ret) { bt_dev_err(hdev, "%s: configuration failed", __func__); kfree_skb(skb); return ret; } return 0; } #endif static int btusb_set_bdaddr_marvell(struct hci_dev *hdev, const bdaddr_t *bdaddr) { struct sk_buff *skb; u8 buf[8]; long ret; buf[0] = 0xfe; buf[1] = sizeof(bdaddr_t); memcpy(buf + 2, bdaddr, sizeof(bdaddr_t)); skb = __hci_cmd_sync(hdev, 0xfc22, sizeof(buf), buf, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { ret = PTR_ERR(skb); bt_dev_err(hdev, "changing Marvell device address failed (%ld)", ret); return ret; } kfree_skb(skb); return 0; } static int btusb_set_bdaddr_ath3012(struct hci_dev *hdev, const bdaddr_t *bdaddr) { struct sk_buff *skb; u8 buf[10]; long ret; buf[0] = 0x01; buf[1] = 0x01; buf[2] = 0x00; buf[3] = sizeof(bdaddr_t); memcpy(buf + 4, bdaddr, sizeof(bdaddr_t)); skb = __hci_cmd_sync(hdev, 0xfc0b, sizeof(buf), buf, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { ret = PTR_ERR(skb); bt_dev_err(hdev, "Change address command failed (%ld)", ret); return ret; } kfree_skb(skb); return 0; } static int btusb_set_bdaddr_wcn6855(struct hci_dev *hdev, const bdaddr_t *bdaddr) { struct sk_buff *skb; u8 buf[6]; long ret; memcpy(buf, bdaddr, sizeof(bdaddr_t)); skb = __hci_cmd_sync_ev(hdev, 0xfc14, sizeof(buf), buf, HCI_EV_CMD_COMPLETE, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { ret = PTR_ERR(skb); bt_dev_err(hdev, "Change address command failed (%ld)", ret); return ret; } kfree_skb(skb); return 0; } #define QCA_MEMDUMP_ACL_HANDLE 0x2EDD #define QCA_MEMDUMP_SIZE_MAX 0x100000 #define QCA_MEMDUMP_VSE_CLASS 0x01 #define QCA_MEMDUMP_MSG_TYPE 0x08 #define QCA_MEMDUMP_PKT_SIZE 248 #define QCA_LAST_SEQUENCE_NUM 0xffff struct qca_dump_hdr { u8 vse_class; u8 msg_type; __le16 seqno; u8 reserved; union { u8 data[0]; struct { __le32 ram_dump_size; u8 data0[0]; } __packed; }; } __packed; static void btusb_dump_hdr_qca(struct hci_dev *hdev, struct sk_buff *skb) { char buf[128]; struct btusb_data *btdata = hci_get_drvdata(hdev); snprintf(buf, sizeof(buf), "Controller Name: 0x%x\n", btdata->qca_dump.controller_id); skb_put_data(skb, buf, strlen(buf)); snprintf(buf, sizeof(buf), "Firmware Version: 0x%x\n", btdata->qca_dump.fw_version); skb_put_data(skb, buf, strlen(buf)); snprintf(buf, sizeof(buf), "Driver: %s\nVendor: qca\n", btusb_driver.name); skb_put_data(skb, buf, strlen(buf)); snprintf(buf, sizeof(buf), "VID: 0x%x\nPID:0x%x\n", btdata->qca_dump.id_vendor, btdata->qca_dump.id_product); skb_put_data(skb, buf, strlen(buf)); snprintf(buf, sizeof(buf), "Lmp Subversion: 0x%x\n", hdev->lmp_subver); skb_put_data(skb, buf, strlen(buf)); } static void btusb_coredump_qca(struct hci_dev *hdev) { static const u8 param[] = { 0x26 }; struct sk_buff *skb; skb = __hci_cmd_sync(hdev, 0xfc0c, 1, param, HCI_CMD_TIMEOUT); if (IS_ERR(skb)) bt_dev_err(hdev, "%s: triggle crash failed (%ld)", __func__, PTR_ERR(skb)); kfree_skb(skb); } /* * ==0: not a dump pkt. * < 0: fails to handle a dump pkt * > 0: otherwise. */ static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb) { int ret = 1; u8 pkt_type; u8 *sk_ptr; unsigned int sk_len; u16 seqno; u32 dump_size; struct hci_event_hdr *event_hdr; struct hci_acl_hdr *acl_hdr; struct qca_dump_hdr *dump_hdr; struct btusb_data *btdata = hci_get_drvdata(hdev); struct usb_device *udev = btdata->udev; pkt_type = hci_skb_pkt_type(skb); sk_ptr = skb->data; sk_len = skb->len; if (pkt_type == HCI_ACLDATA_PKT) { acl_hdr = hci_acl_hdr(skb); if (le16_to_cpu(acl_hdr->handle) != QCA_MEMDUMP_ACL_HANDLE) return 0; sk_ptr += HCI_ACL_HDR_SIZE; sk_len -= HCI_ACL_HDR_SIZE; event_hdr = (struct hci_event_hdr *)sk_ptr; } else { event_hdr = hci_event_hdr(skb); } if ((event_hdr->evt != HCI_VENDOR_PKT) || (event_hdr->plen != (sk_len - HCI_EVENT_HDR_SIZE))) return 0; sk_ptr += HCI_EVENT_HDR_SIZE; sk_len -= HCI_EVENT_HDR_SIZE; dump_hdr = (struct qca_dump_hdr *)sk_ptr; if ((sk_len < offsetof(struct qca_dump_hdr, data)) || (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) || (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE)) return 0; /*it is dump pkt now*/ seqno = le16_to_cpu(dump_hdr->seqno); if (seqno == 0) { set_bit(BTUSB_HW_SSR_ACTIVE, &btdata->flags); dump_size = le32_to_cpu(dump_hdr->ram_dump_size); if (!dump_size || (dump_size > QCA_MEMDUMP_SIZE_MAX)) { ret = -EILSEQ; bt_dev_err(hdev, "Invalid memdump size(%u)", dump_size); goto out; } ret = hci_devcd_init(hdev, dump_size); if (ret < 0) { bt_dev_err(hdev, "memdump init error(%d)", ret); goto out; } btdata->qca_dump.ram_dump_size = dump_size; btdata->qca_dump.ram_dump_seqno = 0; sk_ptr += offsetof(struct qca_dump_hdr, data0); sk_len -= offsetof(struct qca_dump_hdr, data0); usb_disable_autosuspend(udev); bt_dev_info(hdev, "%s memdump size(%u)\n", (pkt_type == HCI_ACLDATA_PKT) ? "ACL" : "event", dump_size); } else { sk_ptr += offsetof(struct qca_dump_hdr, data); sk_len -= offsetof(struct qca_dump_hdr, data); } if (!btdata->qca_dump.ram_dump_size) { ret = -EINVAL; bt_dev_err(hdev, "memdump is not active"); goto out; } if ((seqno > btdata->qca_dump.ram_dump_seqno + 1) && (seqno != QCA_LAST_SEQUENCE_NUM)) { dump_size = QCA_MEMDUMP_PKT_SIZE * (seqno - btdata->qca_dump.ram_dump_seqno - 1); hci_devcd_append_pattern(hdev, 0x0, dump_size); bt_dev_err(hdev, "expected memdump seqno(%u) is not received(%u)\n", btdata->qca_dump.ram_dump_seqno, seqno); btdata->qca_dump.ram_dump_seqno = seqno; kfree_skb(skb); return ret; } skb_pull(skb, skb->len - sk_len); hci_devcd_append(hdev, skb); btdata->qca_dump.ram_dump_seqno++; if (seqno == QCA_LAST_SEQUENCE_NUM) { bt_dev_info(hdev, "memdump done: pkts(%u), total(%u)\n", btdata->qca_dump.ram_dump_seqno, btdata->qca_dump.ram_dump_size); hci_devcd_complete(hdev); goto out; } return ret; out: if (btdata->qca_dump.ram_dump_size) usb_enable_autosuspend(udev); btdata->qca_dump.ram_dump_size = 0; btdata->qca_dump.ram_dump_seqno = 0; clear_bit(BTUSB_HW_SSR_ACTIVE, &btdata->flags); if (ret < 0) kfree_skb(skb); return ret; } static int btusb_recv_acl_qca(struct hci_dev *hdev, struct sk_buff *skb) { if (handle_dump_pkt_qca(hdev, skb)) return 0; return hci_recv_frame(hdev, skb); } static int btusb_recv_evt_qca(struct hci_dev *hdev, struct sk_buff *skb) { if (handle_dump_pkt_qca(hdev, skb)) return 0; return hci_recv_frame(hdev, skb); } #define QCA_DFU_PACKET_LEN 4096 #define QCA_GET_TARGET_VERSION 0x09 #define QCA_CHECK_STATUS 0x05 #define QCA_DFU_DOWNLOAD 0x01 #define QCA_SYSCFG_UPDATED 0x40 #define QCA_PATCH_UPDATED 0x80 #define QCA_DFU_TIMEOUT 3000 #define QCA_FLAG_MULTI_NVM 0x80 #define QCA_BT_RESET_WAIT_MS 100 #define WCN6855_2_0_RAM_VERSION_GF 0x400c1200 #define WCN6855_2_1_RAM_VERSION_GF 0x400c1211 struct qca_version { __le32 rom_version; __le32 patch_version; __le32 ram_version; __u8 chip_id; __u8 platform_id; __le16 flag; __u8 reserved[4]; } __packed; struct qca_rampatch_version { __le16 rom_version_high; __le16 rom_version_low; __le16 patch_version; } __packed; struct qca_device_info { u32 rom_version; u8 rampatch_hdr; /* length of header in rampatch */ u8 nvm_hdr; /* length of header in NVM */ u8 ver_offset; /* offset of version structure in rampatch */ }; static const struct qca_device_info qca_devices_table[] = { { 0x00000100, 20, 4, 8 }, /* Rome 1.0 */ { 0x00000101, 20, 4, 8 }, /* Rome 1.1 */ { 0x00000200, 28, 4, 16 }, /* Rome 2.0 */ { 0x00000201, 28, 4, 16 }, /* Rome 2.1 */ { 0x00000300, 28, 4, 16 }, /* Rome 3.0 */ { 0x00000302, 28, 4, 16 }, /* Rome 3.2 */ { 0x00130100, 40, 4, 16 }, /* WCN6855 1.0 */ { 0x00130200, 40, 4, 16 }, /* WCN6855 2.0 */ { 0x00130201, 40, 4, 16 }, /* WCN6855 2.1 */ { 0x00190200, 40, 4, 16 }, /* WCN785x 2.0 */ }; static int btusb_qca_send_vendor_req(struct usb_device *udev, u8 request, void *data, u16 size) { int pipe, err; u8 *buf; buf = kmalloc(size, GFP_KERNEL); if (!buf) return -ENOMEM; /* Found some of USB hosts have IOT issues with ours so that we should * not wait until HCI layer is ready. */ pipe = usb_rcvctrlpipe(udev, 0); err = usb_control_msg(udev, pipe, request, USB_TYPE_VENDOR | USB_DIR_IN, 0, 0, buf, size, USB_CTRL_SET_TIMEOUT); if (err < 0) { dev_err(&udev->dev, "Failed to access otp area (%d)", err); goto done; } memcpy(data, buf, size); done: kfree(buf); return err; } static int btusb_setup_qca_download_fw(struct hci_dev *hdev, const struct firmware *firmware, size_t hdr_size) { struct btusb_data *btdata = hci_get_drvdata(hdev); struct usb_device *udev = btdata->udev; size_t count, size, sent = 0; int pipe, len, err; u8 *buf; buf = kmalloc(QCA_DFU_PACKET_LEN, GFP_KERNEL); if (!buf) return -ENOMEM; count = firmware->size; size = min_t(size_t, count, hdr_size); memcpy(buf, firmware->data, size); /* USB patches should go down to controller through USB path * because binary format fits to go down through USB channel. * USB control path is for patching headers and USB bulk is for * patch body. */ pipe = usb_sndctrlpipe(udev, 0); err = usb_control_msg(udev, pipe, QCA_DFU_DOWNLOAD, USB_TYPE_VENDOR, 0, 0, buf, size, USB_CTRL_SET_TIMEOUT); if (err < 0) { bt_dev_err(hdev, "Failed to send headers (%d)", err); goto done; } sent += size; count -= size; /* ep2 need time to switch from function acl to function dfu, * so we add 20ms delay here. */ msleep(20); while (count) { size = min_t(size_t, count, QCA_DFU_PACKET_LEN); memcpy(buf, firmware->data + sent, size); pipe = usb_sndbulkpipe(udev, 0x02); err = usb_bulk_msg(udev, pipe, buf, size, &len, QCA_DFU_TIMEOUT); if (err < 0) { bt_dev_err(hdev, "Failed to send body at %zd of %zd (%d)", sent, firmware->size, err); break; } if (size != len) { bt_dev_err(hdev, "Failed to get bulk buffer"); err = -EILSEQ; break; } sent += size; count -= size; } done: kfree(buf); return err; } static int btusb_setup_qca_load_rampatch(struct hci_dev *hdev, struct qca_version *ver, const struct qca_device_info *info) { struct qca_rampatch_version *rver; const struct firmware *fw; u32 ver_rom, ver_patch, rver_rom; u16 rver_rom_low, rver_rom_high, rver_patch; char fwname[64]; int err; ver_rom = le32_to_cpu(ver->rom_version); ver_patch = le32_to_cpu(ver->patch_version); snprintf(fwname, sizeof(fwname), "qca/rampatch_usb_%08x.bin", ver_rom); err = request_firmware(&fw, fwname, &hdev->dev); if (err) { bt_dev_err(hdev, "failed to request rampatch file: %s (%d)", fwname, err); return err; } bt_dev_info(hdev, "using rampatch file: %s", fwname); rver = (struct qca_rampatch_version *)(fw->data + info->ver_offset); rver_rom_low = le16_to_cpu(rver->rom_version_low); rver_patch = le16_to_cpu(rver->patch_version); if (ver_rom & ~0xffffU) { rver_rom_high = le16_to_cpu(rver->rom_version_high); rver_rom = rver_rom_high << 16 | rver_rom_low; } else { rver_rom = rver_rom_low; } bt_dev_info(hdev, "QCA: patch rome 0x%x build 0x%x, " "firmware rome 0x%x build 0x%x", rver_rom, rver_patch, ver_rom, ver_patch); if (rver_rom != ver_rom || rver_patch <= ver_patch) { bt_dev_err(hdev, "rampatch file version did not match with firmware"); err = -EINVAL; goto done; } err = btusb_setup_qca_download_fw(hdev, fw, info->rampatch_hdr); done: release_firmware(fw); return err; } static void btusb_generate_qca_nvm_name(char *fwname, size_t max_size, const struct qca_version *ver) { u32 rom_version = le32_to_cpu(ver->rom_version); u16 flag = le16_to_cpu(ver->flag); if (((flag >> 8) & 0xff) == QCA_FLAG_MULTI_NVM) { /* The board_id should be split into two bytes * The 1st byte is chip ID, and the 2nd byte is platform ID * For example, board ID 0x010A, 0x01 is platform ID. 0x0A is chip ID * we have several platforms, and platform IDs are continuously added * Platform ID: * 0x00 is for Mobile * 0x01 is for X86 * 0x02 is for Automotive * 0x03 is for Consumer electronic */ u16 board_id = (ver->chip_id << 8) + ver->platform_id; const char *variant; switch (le32_to_cpu(ver->ram_version)) { case WCN6855_2_0_RAM_VERSION_GF: case WCN6855_2_1_RAM_VERSION_GF: variant = "_gf"; break; default: variant = ""; break; } if (board_id == 0) { snprintf(fwname, max_size, "qca/nvm_usb_%08x%s.bin", rom_version, variant); } else { snprintf(fwname, max_size, "qca/nvm_usb_%08x%s_%04x.bin", rom_version, variant, board_id); } } else { snprintf(fwname, max_size, "qca/nvm_usb_%08x.bin", rom_version); } } static int btusb_setup_qca_load_nvm(struct hci_dev *hdev, struct qca_version *ver, const struct qca_device_info *info) { const struct firmware *fw; char fwname[64]; int err; btusb_generate_qca_nvm_name(fwname, sizeof(fwname), ver); err = request_firmware(&fw, fwname, &hdev->dev); if (err) { bt_dev_err(hdev, "failed to request NVM file: %s (%d)", fwname, err); return err; } bt_dev_info(hdev, "using NVM file: %s", fwname); err = btusb_setup_qca_download_fw(hdev, fw, info->nvm_hdr); release_firmware(fw); return err; } /* identify the ROM version and check whether patches are needed */ static bool btusb_qca_need_patch(struct usb_device *udev) { struct qca_version ver; if (btusb_qca_send_vendor_req(udev, QCA_GET_TARGET_VERSION, &ver, sizeof(ver)) < 0) return false; /* only low ROM versions need patches */ return !(le32_to_cpu(ver.rom_version) & ~0xffffU); } static int btusb_setup_qca(struct hci_dev *hdev) { struct btusb_data *btdata = hci_get_drvdata(hdev); struct usb_device *udev = btdata->udev; const struct qca_device_info *info = NULL; struct qca_version ver; u32 ver_rom; u8 status; int i, err; err = btusb_qca_send_vendor_req(udev, QCA_GET_TARGET_VERSION, &ver, sizeof(ver)); if (err < 0) return err; ver_rom = le32_to_cpu(ver.rom_version); for (i = 0; i < ARRAY_SIZE(qca_devices_table); i++) { if (ver_rom == qca_devices_table[i].rom_version) info = &qca_devices_table[i]; } if (!info) { /* If the rom_version is not matched in the qca_devices_table * and the high ROM version is not zero, we assume this chip no * need to load the rampatch and nvm. */ if (ver_rom & ~0xffffU) return 0; bt_dev_err(hdev, "don't support firmware rome 0x%x", ver_rom); return -ENODEV; } err = btusb_qca_send_vendor_req(udev, QCA_CHECK_STATUS, &status, sizeof(status)); if (err < 0) return err; if (!(status & QCA_PATCH_UPDATED)) { err = btusb_setup_qca_load_rampatch(hdev, &ver, info); if (err < 0) return err; } err = btusb_qca_send_vendor_req(udev, QCA_GET_TARGET_VERSION, &ver, sizeof(ver)); if (err < 0) return err; btdata->qca_dump.fw_version = le32_to_cpu(ver.patch_version); btdata->qca_dump.controller_id = le32_to_cpu(ver.rom_version); if (!(status & QCA_SYSCFG_UPDATED)) { err = btusb_setup_qca_load_nvm(hdev, &ver, info); if (err < 0) return err; /* WCN6855 2.1 and later will reset to apply firmware downloaded here, so * wait ~100ms for reset Done then go ahead, otherwise, it maybe * cause potential enable failure. */ if (info->rom_version >= 0x00130201) msleep(QCA_BT_RESET_WAIT_MS); } /* Mark HCI_OP_ENHANCED_SETUP_SYNC_CONN as broken as it doesn't seem to * work with the likes of HSP/HFP mSBC. */ set_bit(HCI_QUIRK_BROKEN_ENHANCED_SETUP_SYNC_CONN, &hdev->quirks); return 0; } static inline int __set_diag_interface(struct hci_dev *hdev) { struct btusb_data *data = hci_get_drvdata(hdev); struct usb_interface *intf = data->diag; int i; if (!data->diag) return -ENODEV; data->diag_tx_ep = NULL; data->diag_rx_ep = NULL; for (i = 0; i < intf->cur_altsetting->desc.bNumEndpoints; i++) { struct usb_endpoint_descriptor *ep_desc; ep_desc = &intf->cur_altsetting->endpoint[i].desc; if (!data->diag_tx_ep && usb_endpoint_is_bulk_out(ep_desc)) { data->diag_tx_ep = ep_desc; continue; } if (!data->diag_rx_ep && usb_endpoint_is_bulk_in(ep_desc)) { data->diag_rx_ep = ep_desc; continue; } } if (!data->diag_tx_ep || !data->diag_rx_ep) { bt_dev_err(hdev, "invalid diagnostic descriptors"); return -ENODEV; } return 0; } static struct urb *alloc_diag_urb(struct hci_dev *hdev, bool enable) { struct btusb_data *data = hci_get_drvdata(hdev); struct sk_buff *skb; struct urb *urb; unsigned int pipe; if (!data->diag_tx_ep) return ERR_PTR(-ENODEV); urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) return ERR_PTR(-ENOMEM); skb = bt_skb_alloc(2, GFP_KERNEL); if (!skb) { usb_free_urb(urb); return ERR_PTR(-ENOMEM); } skb_put_u8(skb, 0xf0); skb_put_u8(skb, enable); pipe = usb_sndbulkpipe(data->udev, data->diag_tx_ep->bEndpointAddress); usb_fill_bulk_urb(urb, data->udev, pipe, skb->data, skb->len, btusb_tx_complete, skb); skb->dev = (void *)hdev; return urb; } static int btusb_bcm_set_diag(struct hci_dev *hdev, bool enable) { struct btusb_data *data = hci_get_drvdata(hdev); struct urb *urb; if (!data->diag) return -ENODEV; if (!test_bit(HCI_RUNNING, &hdev->flags)) return -ENETDOWN; urb = alloc_diag_urb(hdev, enable); if (IS_ERR(urb)) return PTR_ERR(urb); return submit_or_queue_tx_urb(hdev, urb); } #ifdef CONFIG_PM static irqreturn_t btusb_oob_wake_handler(int irq, void *priv) { struct btusb_data *data = priv; pm_wakeup_event(&data->udev->dev, 0); pm_system_wakeup(); /* Disable only if not already disabled (keep it balanced) */ if (test_and_clear_bit(BTUSB_OOB_WAKE_ENABLED, &data->flags)) { disable_irq_nosync(irq); disable_irq_wake(irq); } return IRQ_HANDLED; } static const struct of_device_id btusb_match_table[] = { { .compatible = "usb1286,204e" }, { .compatible = "usbcf3,e300" }, /* QCA6174A */ { .compatible = "usb4ca,301a" }, /* QCA6174A (Lite-On) */ { } }; MODULE_DEVICE_TABLE(of, btusb_match_table); /* Use an oob wakeup pin? */ static int btusb_config_oob_wake(struct hci_dev *hdev) { struct btusb_data *data = hci_get_drvdata(hdev); struct device *dev = &data->udev->dev; int irq, ret; clear_bit(BTUSB_OOB_WAKE_ENABLED, &data->flags); if (!of_match_device(btusb_match_table, dev)) return 0; /* Move on if no IRQ specified */ irq = of_irq_get_byname(dev->of_node, "wakeup"); if (irq <= 0) { bt_dev_dbg(hdev, "%s: no OOB Wakeup IRQ in DT", __func__); return 0; } irq_set_status_flags(irq, IRQ_NOAUTOEN); ret = devm_request_irq(&hdev->dev, irq, btusb_oob_wake_handler, 0, "OOB Wake-on-BT", data); if (ret) { bt_dev_err(hdev, "%s: IRQ request failed", __func__); return ret; } ret = device_init_wakeup(dev, true); if (ret) { bt_dev_err(hdev, "%s: failed to init_wakeup", __func__); return ret; } data->oob_wake_irq = irq; bt_dev_info(hdev, "OOB Wake-on-BT configured at IRQ %u", irq); return 0; } #endif static void btusb_check_needs_reset_resume(struct usb_interface *intf) { if (dmi_check_system(btusb_needs_reset_resume_table)) interface_to_usbdev(intf)->quirks |= USB_QUIRK_RESET_RESUME; } static bool btusb_wakeup(struct hci_dev *hdev) { struct btusb_data *data = hci_get_drvdata(hdev); return device_may_wakeup(&data->udev->dev); } static int btusb_shutdown_qca(struct hci_dev *hdev) { struct sk_buff *skb; skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { bt_dev_err(hdev, "HCI reset during shutdown failed"); return PTR_ERR(skb); } kfree_skb(skb); return 0; } static ssize_t force_poll_sync_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct btusb_data *data = file->private_data; char buf[3]; buf[0] = data->poll_sync ? 'Y' : 'N'; buf[1] = '\n'; buf[2] = '\0'; return simple_read_from_buffer(user_buf, count, ppos, buf, 2); } static ssize_t force_poll_sync_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct btusb_data *data = file->private_data; bool enable; int err; err = kstrtobool_from_user(user_buf, count, &enable); if (err) return err; /* Only allow changes while the adapter is down */ if (test_bit(HCI_UP, &data->hdev->flags)) return -EPERM; if (data->poll_sync == enable) return -EALREADY; data->poll_sync = enable; return count; } static const struct file_operations force_poll_sync_fops = { .open = simple_open, .read = force_poll_sync_read, .write = force_poll_sync_write, .llseek = default_llseek, }; static int btusb_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_endpoint_descriptor *ep_desc; struct gpio_desc *reset_gpio; struct btusb_data *data; struct hci_dev *hdev; unsigned ifnum_base; int i, err, priv_size; BT_DBG("intf %p id %p", intf, id); if ((id->driver_info & BTUSB_IFNUM_2) && (intf->cur_altsetting->desc.bInterfaceNumber != 0) && (intf->cur_altsetting->desc.bInterfaceNumber != 2)) return -ENODEV; ifnum_base = intf->cur_altsetting->desc.bInterfaceNumber; if (!id->driver_info) { const struct usb_device_id *match; match = usb_match_id(intf, quirks_table); if (match) id = match; } if (id->driver_info == BTUSB_IGNORE) return -ENODEV; if (id->driver_info & BTUSB_ATH3012) { struct usb_device *udev = interface_to_usbdev(intf); /* Old firmware would otherwise let ath3k driver load * patch and sysconfig files */ if (le16_to_cpu(udev->descriptor.bcdDevice) <= 0x0001 && !btusb_qca_need_patch(udev)) return -ENODEV; } data = devm_kzalloc(&intf->dev, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; for (i = 0; i < intf->cur_altsetting->desc.bNumEndpoints; i++) { ep_desc = &intf->cur_altsetting->endpoint[i].desc; if (!data->intr_ep && usb_endpoint_is_int_in(ep_desc)) { data->intr_ep = ep_desc; continue; } if (!data->bulk_tx_ep && usb_endpoint_is_bulk_out(ep_desc)) { data->bulk_tx_ep = ep_desc; continue; } if (!data->bulk_rx_ep && usb_endpoint_is_bulk_in(ep_desc)) { data->bulk_rx_ep = ep_desc; continue; } } if (!data->intr_ep || !data->bulk_tx_ep || !data->bulk_rx_ep) return -ENODEV; if (id->driver_info & BTUSB_AMP) { data->cmdreq_type = USB_TYPE_CLASS | 0x01; data->cmdreq = 0x2b; } else { data->cmdreq_type = USB_TYPE_CLASS; data->cmdreq = 0x00; } data->udev = interface_to_usbdev(intf); data->intf = intf; INIT_WORK(&data->work, btusb_work); INIT_WORK(&data->waker, btusb_waker); INIT_DELAYED_WORK(&data->rx_work, btusb_rx_work); skb_queue_head_init(&data->acl_q); init_usb_anchor(&data->deferred); init_usb_anchor(&data->tx_anchor); spin_lock_init(&data->txlock); init_usb_anchor(&data->intr_anchor); init_usb_anchor(&data->bulk_anchor); init_usb_anchor(&data->isoc_anchor); init_usb_anchor(&data->diag_anchor); init_usb_anchor(&data->ctrl_anchor); spin_lock_init(&data->rxlock); priv_size = 0; data->recv_event = hci_recv_frame; data->recv_bulk = btusb_recv_bulk; if (id->driver_info & BTUSB_INTEL_COMBINED) { /* Allocate extra space for Intel device */ priv_size += sizeof(struct btintel_data); /* Override the rx handlers */ data->recv_event = btintel_recv_event; data->recv_bulk = btusb_recv_bulk_intel; } else if (id->driver_info & BTUSB_REALTEK) { /* Allocate extra space for Realtek device */ priv_size += sizeof(struct btrealtek_data); data->recv_event = btusb_recv_event_realtek; } else if (id->driver_info & BTUSB_MEDIATEK) { /* Allocate extra space for Mediatek device */ priv_size += sizeof(struct btmediatek_data); } data->recv_acl = hci_recv_frame; hdev = hci_alloc_dev_priv(priv_size); if (!hdev) return -ENOMEM; hdev->bus = HCI_USB; hci_set_drvdata(hdev, data); if (id->driver_info & BTUSB_AMP) hdev->dev_type = HCI_AMP; else hdev->dev_type = HCI_PRIMARY; data->hdev = hdev; SET_HCIDEV_DEV(hdev, &intf->dev); reset_gpio = gpiod_get_optional(&data->udev->dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(reset_gpio)) { err = PTR_ERR(reset_gpio); goto out_free_dev; } else if (reset_gpio) { data->reset_gpio = reset_gpio; } hdev->open = btusb_open; hdev->close = btusb_close; hdev->flush = btusb_flush; hdev->send = btusb_send_frame; hdev->notify = btusb_notify; hdev->wakeup = btusb_wakeup; #ifdef CONFIG_PM err = btusb_config_oob_wake(hdev); if (err) goto out_free_dev; /* Marvell devices may need a specific chip configuration */ if (id->driver_info & BTUSB_MARVELL && data->oob_wake_irq) { err = marvell_config_oob_wake(hdev); if (err) goto out_free_dev; } #endif if (id->driver_info & BTUSB_CW6622) set_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks); if (id->driver_info & BTUSB_BCM2045) set_bit(HCI_QUIRK_BROKEN_STORED_LINK_KEY, &hdev->quirks); if (id->driver_info & BTUSB_BCM92035) hdev->setup = btusb_setup_bcm92035; if (IS_ENABLED(CONFIG_BT_HCIBTUSB_BCM) && (id->driver_info & BTUSB_BCM_PATCHRAM)) { hdev->manufacturer = 15; hdev->setup = btbcm_setup_patchram; hdev->set_diag = btusb_bcm_set_diag; hdev->set_bdaddr = btbcm_set_bdaddr; /* Broadcom LM_DIAG Interface numbers are hardcoded */ data->diag = usb_ifnum_to_if(data->udev, ifnum_base + 2); } if (IS_ENABLED(CONFIG_BT_HCIBTUSB_BCM) && (id->driver_info & BTUSB_BCM_APPLE)) { hdev->manufacturer = 15; hdev->setup = btbcm_setup_apple; hdev->set_diag = btusb_bcm_set_diag; /* Broadcom LM_DIAG Interface numbers are hardcoded */ data->diag = usb_ifnum_to_if(data->udev, ifnum_base + 2); } /* Combined Intel Device setup to support multiple setup routine */ if (id->driver_info & BTUSB_INTEL_COMBINED) { err = btintel_configure_setup(hdev, btusb_driver.name); if (err) goto out_free_dev; /* Transport specific configuration */ hdev->send = btusb_send_frame_intel; hdev->cmd_timeout = btusb_intel_cmd_timeout; if (id->driver_info & BTUSB_INTEL_NO_WBS_SUPPORT) btintel_set_flag(hdev, INTEL_ROM_LEGACY_NO_WBS_SUPPORT); if (id->driver_info & BTUSB_INTEL_BROKEN_INITIAL_NCMD) btintel_set_flag(hdev, INTEL_BROKEN_INITIAL_NCMD); if (id->driver_info & BTUSB_INTEL_BROKEN_SHUTDOWN_LED) btintel_set_flag(hdev, INTEL_BROKEN_SHUTDOWN_LED); } if (id->driver_info & BTUSB_MARVELL) hdev->set_bdaddr = btusb_set_bdaddr_marvell; if (IS_ENABLED(CONFIG_BT_HCIBTUSB_MTK) && (id->driver_info & BTUSB_MEDIATEK)) { hdev->setup = btusb_mtk_setup; hdev->shutdown = btusb_mtk_shutdown; hdev->manufacturer = 70; hdev->cmd_timeout = btmtk_reset_sync; hdev->set_bdaddr = btmtk_set_bdaddr; set_bit(HCI_QUIRK_BROKEN_ENHANCED_SETUP_SYNC_CONN, &hdev->quirks); set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks); data->recv_acl = btusb_recv_acl_mtk; } if (id->driver_info & BTUSB_SWAVE) { set_bit(HCI_QUIRK_FIXUP_INQUIRY_MODE, &hdev->quirks); set_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks); } if (id->driver_info & BTUSB_INTEL_BOOT) { hdev->manufacturer = 2; set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks); } if (id->driver_info & BTUSB_ATH3012) { data->setup_on_usb = btusb_setup_qca; hdev->set_bdaddr = btusb_set_bdaddr_ath3012; set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks); set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks); } if (id->driver_info & BTUSB_QCA_ROME) { data->setup_on_usb = btusb_setup_qca; hdev->set_bdaddr = btusb_set_bdaddr_ath3012; hdev->cmd_timeout = btusb_qca_cmd_timeout; set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks); btusb_check_needs_reset_resume(intf); } if (id->driver_info & BTUSB_QCA_WCN6855) { data->qca_dump.id_vendor = id->idVendor; data->qca_dump.id_product = id->idProduct; data->recv_event = btusb_recv_evt_qca; data->recv_acl = btusb_recv_acl_qca; hci_devcd_register(hdev, btusb_coredump_qca, btusb_dump_hdr_qca, NULL); data->setup_on_usb = btusb_setup_qca; hdev->shutdown = btusb_shutdown_qca; hdev->set_bdaddr = btusb_set_bdaddr_wcn6855; hdev->cmd_timeout = btusb_qca_cmd_timeout; set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks); hci_set_msft_opcode(hdev, 0xFD70); } if (id->driver_info & BTUSB_AMP) { /* AMP controllers do not support SCO packets */ data->isoc = NULL; } else { /* Interface orders are hardcoded in the specification */ data->isoc = usb_ifnum_to_if(data->udev, ifnum_base + 1); data->isoc_ifnum = ifnum_base + 1; } if (IS_ENABLED(CONFIG_BT_HCIBTUSB_RTL) && (id->driver_info & BTUSB_REALTEK)) { btrtl_set_driver_name(hdev, btusb_driver.name); hdev->setup = btusb_setup_realtek; hdev->shutdown = btrtl_shutdown_realtek; hdev->cmd_timeout = btusb_rtl_cmd_timeout; hdev->hw_error = btusb_rtl_hw_error; /* Realtek devices need to set remote wakeup on auto-suspend */ set_bit(BTUSB_WAKEUP_AUTOSUSPEND, &data->flags); set_bit(BTUSB_USE_ALT3_FOR_WBS, &data->flags); } if (id->driver_info & BTUSB_ACTIONS_SEMI) { /* Support is advertised, but not implemented */ set_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks); set_bit(HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER, &hdev->quirks); set_bit(HCI_QUIRK_BROKEN_SET_RPA_TIMEOUT, &hdev->quirks); set_bit(HCI_QUIRK_BROKEN_EXT_SCAN, &hdev->quirks); } if (!reset) set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks); if (force_scofix || id->driver_info & BTUSB_WRONG_SCO_MTU) { if (!disable_scofix) set_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks); } if (id->driver_info & BTUSB_BROKEN_ISOC) data->isoc = NULL; if (id->driver_info & BTUSB_WIDEBAND_SPEECH) set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks); if (id->driver_info & BTUSB_VALID_LE_STATES) set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks); if (id->driver_info & BTUSB_DIGIANSWER) { data->cmdreq_type = USB_TYPE_VENDOR; set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks); } if (id->driver_info & BTUSB_CSR) { struct usb_device *udev = data->udev; u16 bcdDevice = le16_to_cpu(udev->descriptor.bcdDevice); /* Old firmware would otherwise execute USB reset */ if (bcdDevice < 0x117) set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks); /* This must be set first in case we disable it for fakes */ set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks); /* Fake CSR devices with broken commands */ if (le16_to_cpu(udev->descriptor.idVendor) == 0x0a12 && le16_to_cpu(udev->descriptor.idProduct) == 0x0001) hdev->setup = btusb_setup_csr; } if (id->driver_info & BTUSB_SNIFFER) { struct usb_device *udev = data->udev; /* New sniffer firmware has crippled HCI interface */ if (le16_to_cpu(udev->descriptor.bcdDevice) > 0x997) set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks); } if (id->driver_info & BTUSB_INTEL_BOOT) { /* A bug in the bootloader causes that interrupt interface is * only enabled after receiving SetInterface(0, AltSetting=0). */ err = usb_set_interface(data->udev, 0, 0); if (err < 0) { BT_ERR("failed to set interface 0, alt 0 %d", err); goto out_free_dev; } } if (data->isoc) { err = usb_driver_claim_interface(&btusb_driver, data->isoc, data); if (err < 0) goto out_free_dev; } if (IS_ENABLED(CONFIG_BT_HCIBTUSB_BCM) && data->diag) { if (!usb_driver_claim_interface(&btusb_driver, data->diag, data)) __set_diag_interface(hdev); else data->diag = NULL; } if (enable_autosuspend) usb_enable_autosuspend(data->udev); data->poll_sync = enable_poll_sync; err = hci_register_dev(hdev); if (err < 0) goto out_free_dev; usb_set_intfdata(intf, data); debugfs_create_file("force_poll_sync", 0644, hdev->debugfs, data, &force_poll_sync_fops); return 0; out_free_dev: if (data->reset_gpio) gpiod_put(data->reset_gpio); hci_free_dev(hdev); return err; } static void btusb_disconnect(struct usb_interface *intf) { struct btusb_data *data = usb_get_intfdata(intf); struct hci_dev *hdev; BT_DBG("intf %p", intf); if (!data) return; hdev = data->hdev; usb_set_intfdata(data->intf, NULL); if (data->isoc) usb_set_intfdata(data->isoc, NULL); if (data->diag) usb_set_intfdata(data->diag, NULL); hci_unregister_dev(hdev); if (intf == data->intf) { if (data->isoc) usb_driver_release_interface(&btusb_driver, data->isoc); if (data->diag) usb_driver_release_interface(&btusb_driver, data->diag); } else if (intf == data->isoc) { if (data->diag) usb_driver_release_interface(&btusb_driver, data->diag); usb_driver_release_interface(&btusb_driver, data->intf); } else if (intf == data->diag) { usb_driver_release_interface(&btusb_driver, data->intf); if (data->isoc) usb_driver_release_interface(&btusb_driver, data->isoc); } if (data->oob_wake_irq) device_init_wakeup(&data->udev->dev, false); if (data->reset_gpio) gpiod_put(data->reset_gpio); hci_free_dev(hdev); } #ifdef CONFIG_PM static int btusb_suspend(struct usb_interface *intf, pm_message_t message) { struct btusb_data *data = usb_get_intfdata(intf); BT_DBG("intf %p", intf); if (data->suspend_count++) return 0; spin_lock_irq(&data->txlock); if (!(PMSG_IS_AUTO(message) && data->tx_in_flight)) { set_bit(BTUSB_SUSPENDING, &data->flags); spin_unlock_irq(&data->txlock); } else { spin_unlock_irq(&data->txlock); data->suspend_count--; return -EBUSY; } cancel_work_sync(&data->work); btusb_stop_traffic(data); usb_kill_anchored_urbs(&data->tx_anchor); if (data->oob_wake_irq && device_may_wakeup(&data->udev->dev)) { set_bit(BTUSB_OOB_WAKE_ENABLED, &data->flags); enable_irq_wake(data->oob_wake_irq); enable_irq(data->oob_wake_irq); } /* For global suspend, Realtek devices lose the loaded fw * in them. But for autosuspend, firmware should remain. * Actually, it depends on whether the usb host sends * set feature (enable wakeup) or not. */ if (test_bit(BTUSB_WAKEUP_AUTOSUSPEND, &data->flags)) { if (PMSG_IS_AUTO(message) && device_can_wakeup(&data->udev->dev)) data->udev->do_remote_wakeup = 1; else if (!PMSG_IS_AUTO(message) && !device_may_wakeup(&data->udev->dev)) { data->udev->do_remote_wakeup = 0; data->udev->reset_resume = 1; } } return 0; } static void play_deferred(struct btusb_data *data) { struct urb *urb; int err; while ((urb = usb_get_from_anchor(&data->deferred))) { usb_anchor_urb(urb, &data->tx_anchor); err = usb_submit_urb(urb, GFP_ATOMIC); if (err < 0) { if (err != -EPERM && err != -ENODEV) BT_ERR("%s urb %p submission failed (%d)", data->hdev->name, urb, -err); kfree(urb->setup_packet); usb_unanchor_urb(urb); usb_free_urb(urb); break; } data->tx_in_flight++; usb_free_urb(urb); } /* Cleanup the rest deferred urbs. */ while ((urb = usb_get_from_anchor(&data->deferred))) { kfree(urb->setup_packet); usb_free_urb(urb); } } static int btusb_resume(struct usb_interface *intf) { struct btusb_data *data = usb_get_intfdata(intf); struct hci_dev *hdev = data->hdev; int err = 0; BT_DBG("intf %p", intf); if (--data->suspend_count) return 0; /* Disable only if not already disabled (keep it balanced) */ if (test_and_clear_bit(BTUSB_OOB_WAKE_ENABLED, &data->flags)) { disable_irq(data->oob_wake_irq); disable_irq_wake(data->oob_wake_irq); } if (!test_bit(HCI_RUNNING, &hdev->flags)) goto done; if (test_bit(BTUSB_INTR_RUNNING, &data->flags)) { err = btusb_submit_intr_urb(hdev, GFP_NOIO); if (err < 0) { clear_bit(BTUSB_INTR_RUNNING, &data->flags); goto failed; } } if (test_bit(BTUSB_BULK_RUNNING, &data->flags)) { err = btusb_submit_bulk_urb(hdev, GFP_NOIO); if (err < 0) { clear_bit(BTUSB_BULK_RUNNING, &data->flags); goto failed; } btusb_submit_bulk_urb(hdev, GFP_NOIO); } if (test_bit(BTUSB_ISOC_RUNNING, &data->flags)) { if (btusb_submit_isoc_urb(hdev, GFP_NOIO) < 0) clear_bit(BTUSB_ISOC_RUNNING, &data->flags); else btusb_submit_isoc_urb(hdev, GFP_NOIO); } spin_lock_irq(&data->txlock); play_deferred(data); clear_bit(BTUSB_SUSPENDING, &data->flags); spin_unlock_irq(&data->txlock); schedule_work(&data->work); return 0; failed: usb_scuttle_anchored_urbs(&data->deferred); done: spin_lock_irq(&data->txlock); clear_bit(BTUSB_SUSPENDING, &data->flags); spin_unlock_irq(&data->txlock); return err; } #endif #ifdef CONFIG_DEV_COREDUMP static void btusb_coredump(struct device *dev) { struct btusb_data *data = dev_get_drvdata(dev); struct hci_dev *hdev = data->hdev; if (hdev->dump.coredump) hdev->dump.coredump(hdev); } #endif static struct usb_driver btusb_driver = { .name = "btusb", .probe = btusb_probe, .disconnect = btusb_disconnect, #ifdef CONFIG_PM .suspend = btusb_suspend, .resume = btusb_resume, #endif .id_table = btusb_table, .supports_autosuspend = 1, .disable_hub_initiated_lpm = 1, #ifdef CONFIG_DEV_COREDUMP .drvwrap = { .driver = { .coredump = btusb_coredump, }, }, #endif }; module_usb_driver(btusb_driver); module_param(disable_scofix, bool, 0644); MODULE_PARM_DESC(disable_scofix, "Disable fixup of wrong SCO buffer size"); module_param(force_scofix, bool, 0644); MODULE_PARM_DESC(force_scofix, "Force fixup of wrong SCO buffers size"); module_param(enable_autosuspend, bool, 0644); MODULE_PARM_DESC(enable_autosuspend, "Enable USB autosuspend by default"); module_param(reset, bool, 0644); MODULE_PARM_DESC(reset, "Send HCI reset command on initialization"); MODULE_AUTHOR("Marcel Holtmann <[email protected]>"); MODULE_DESCRIPTION("Generic Bluetooth USB driver ver " VERSION); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL");
linux-master
drivers/bluetooth/btusb.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * * Bluetooth HCI UART driver * * Copyright (C) 2000-2001 Qualcomm Incorporated * Copyright (C) 2002-2003 Maxim Krasnyansky <[email protected]> * Copyright (C) 2004-2005 Marcel Holtmann <[email protected]> */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/interrupt.h> #include <linux/ptrace.h> #include <linux/poll.h> #include <linux/slab.h> #include <linux/tty.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/signal.h> #include <linux/ioctl.h> #include <linux/skbuff.h> #include <linux/firmware.h> #include <linux/serdev.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include "btintel.h" #include "btbcm.h" #include "hci_uart.h" #define VERSION "2.3" static const struct hci_uart_proto *hup[HCI_UART_MAX_PROTO]; int hci_uart_register_proto(const struct hci_uart_proto *p) { if (p->id >= HCI_UART_MAX_PROTO) return -EINVAL; if (hup[p->id]) return -EEXIST; hup[p->id] = p; BT_INFO("HCI UART protocol %s registered", p->name); return 0; } int hci_uart_unregister_proto(const struct hci_uart_proto *p) { if (p->id >= HCI_UART_MAX_PROTO) return -EINVAL; if (!hup[p->id]) return -EINVAL; hup[p->id] = NULL; return 0; } static const struct hci_uart_proto *hci_uart_get_proto(unsigned int id) { if (id >= HCI_UART_MAX_PROTO) return NULL; return hup[id]; } static inline void hci_uart_tx_complete(struct hci_uart *hu, int pkt_type) { struct hci_dev *hdev = hu->hdev; /* Update HCI stat counters */ switch (pkt_type) { case HCI_COMMAND_PKT: hdev->stat.cmd_tx++; break; case HCI_ACLDATA_PKT: hdev->stat.acl_tx++; break; case HCI_SCODATA_PKT: hdev->stat.sco_tx++; break; } } static inline struct sk_buff *hci_uart_dequeue(struct hci_uart *hu) { struct sk_buff *skb = hu->tx_skb; if (!skb) { percpu_down_read(&hu->proto_lock); if (test_bit(HCI_UART_PROTO_READY, &hu->flags)) skb = hu->proto->dequeue(hu); percpu_up_read(&hu->proto_lock); } else { hu->tx_skb = NULL; } return skb; } int hci_uart_tx_wakeup(struct hci_uart *hu) { /* This may be called in an IRQ context, so we can't sleep. Therefore * we try to acquire the lock only, and if that fails we assume the * tty is being closed because that is the only time the write lock is * acquired. If, however, at some point in the future the write lock * is also acquired in other situations, then this must be revisited. */ if (!percpu_down_read_trylock(&hu->proto_lock)) return 0; if (!test_bit(HCI_UART_PROTO_READY, &hu->flags)) goto no_schedule; set_bit(HCI_UART_TX_WAKEUP, &hu->tx_state); if (test_and_set_bit(HCI_UART_SENDING, &hu->tx_state)) goto no_schedule; BT_DBG(""); schedule_work(&hu->write_work); no_schedule: percpu_up_read(&hu->proto_lock); return 0; } EXPORT_SYMBOL_GPL(hci_uart_tx_wakeup); static void hci_uart_write_work(struct work_struct *work) { struct hci_uart *hu = container_of(work, struct hci_uart, write_work); struct tty_struct *tty = hu->tty; struct hci_dev *hdev = hu->hdev; struct sk_buff *skb; /* REVISIT: should we cope with bad skbs or ->write() returning * and error value ? */ restart: clear_bit(HCI_UART_TX_WAKEUP, &hu->tx_state); while ((skb = hci_uart_dequeue(hu))) { int len; set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); len = tty->ops->write(tty, skb->data, skb->len); hdev->stat.byte_tx += len; skb_pull(skb, len); if (skb->len) { hu->tx_skb = skb; break; } hci_uart_tx_complete(hu, hci_skb_pkt_type(skb)); kfree_skb(skb); } clear_bit(HCI_UART_SENDING, &hu->tx_state); if (test_bit(HCI_UART_TX_WAKEUP, &hu->tx_state)) goto restart; wake_up_bit(&hu->tx_state, HCI_UART_SENDING); } void hci_uart_init_work(struct work_struct *work) { struct hci_uart *hu = container_of(work, struct hci_uart, init_ready); int err; struct hci_dev *hdev; if (!test_and_clear_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags)) return; err = hci_register_dev(hu->hdev); if (err < 0) { BT_ERR("Can't register HCI device"); clear_bit(HCI_UART_PROTO_READY, &hu->flags); hu->proto->close(hu); hdev = hu->hdev; hu->hdev = NULL; hci_free_dev(hdev); return; } set_bit(HCI_UART_REGISTERED, &hu->flags); } int hci_uart_init_ready(struct hci_uart *hu) { if (!test_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags)) return -EALREADY; schedule_work(&hu->init_ready); return 0; } int hci_uart_wait_until_sent(struct hci_uart *hu) { return wait_on_bit_timeout(&hu->tx_state, HCI_UART_SENDING, TASK_INTERRUPTIBLE, msecs_to_jiffies(2000)); } /* ------- Interface to HCI layer ------ */ /* Reset device */ static int hci_uart_flush(struct hci_dev *hdev) { struct hci_uart *hu = hci_get_drvdata(hdev); struct tty_struct *tty = hu->tty; BT_DBG("hdev %p tty %p", hdev, tty); if (hu->tx_skb) { kfree_skb(hu->tx_skb); hu->tx_skb = NULL; } /* Flush any pending characters in the driver and discipline. */ tty_ldisc_flush(tty); tty_driver_flush_buffer(tty); percpu_down_read(&hu->proto_lock); if (test_bit(HCI_UART_PROTO_READY, &hu->flags)) hu->proto->flush(hu); percpu_up_read(&hu->proto_lock); return 0; } /* Initialize device */ static int hci_uart_open(struct hci_dev *hdev) { BT_DBG("%s %p", hdev->name, hdev); /* Undo clearing this from hci_uart_close() */ hdev->flush = hci_uart_flush; return 0; } /* Close device */ static int hci_uart_close(struct hci_dev *hdev) { BT_DBG("hdev %p", hdev); hci_uart_flush(hdev); hdev->flush = NULL; return 0; } /* Send frames from HCI layer */ static int hci_uart_send_frame(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_uart *hu = hci_get_drvdata(hdev); BT_DBG("%s: type %d len %d", hdev->name, hci_skb_pkt_type(skb), skb->len); percpu_down_read(&hu->proto_lock); if (!test_bit(HCI_UART_PROTO_READY, &hu->flags)) { percpu_up_read(&hu->proto_lock); return -EUNATCH; } hu->proto->enqueue(hu, skb); percpu_up_read(&hu->proto_lock); hci_uart_tx_wakeup(hu); return 0; } /* Check the underlying device or tty has flow control support */ bool hci_uart_has_flow_control(struct hci_uart *hu) { /* serdev nodes check if the needed operations are present */ if (hu->serdev) return true; if (hu->tty->driver->ops->tiocmget && hu->tty->driver->ops->tiocmset) return true; return false; } /* Flow control or un-flow control the device */ void hci_uart_set_flow_control(struct hci_uart *hu, bool enable) { struct tty_struct *tty = hu->tty; struct ktermios ktermios; int status; unsigned int set = 0; unsigned int clear = 0; if (hu->serdev) { serdev_device_set_flow_control(hu->serdev, !enable); serdev_device_set_rts(hu->serdev, !enable); return; } if (enable) { /* Disable hardware flow control */ ktermios = tty->termios; ktermios.c_cflag &= ~CRTSCTS; tty_set_termios(tty, &ktermios); BT_DBG("Disabling hardware flow control: %s", (tty->termios.c_cflag & CRTSCTS) ? "failed" : "success"); /* Clear RTS to prevent the device from sending */ /* Most UARTs need OUT2 to enable interrupts */ status = tty->driver->ops->tiocmget(tty); BT_DBG("Current tiocm 0x%x", status); set &= ~(TIOCM_OUT2 | TIOCM_RTS); clear = ~set; set &= TIOCM_DTR | TIOCM_RTS | TIOCM_OUT1 | TIOCM_OUT2 | TIOCM_LOOP; clear &= TIOCM_DTR | TIOCM_RTS | TIOCM_OUT1 | TIOCM_OUT2 | TIOCM_LOOP; status = tty->driver->ops->tiocmset(tty, set, clear); BT_DBG("Clearing RTS: %s", status ? "failed" : "success"); } else { /* Set RTS to allow the device to send again */ status = tty->driver->ops->tiocmget(tty); BT_DBG("Current tiocm 0x%x", status); set |= (TIOCM_OUT2 | TIOCM_RTS); clear = ~set; set &= TIOCM_DTR | TIOCM_RTS | TIOCM_OUT1 | TIOCM_OUT2 | TIOCM_LOOP; clear &= TIOCM_DTR | TIOCM_RTS | TIOCM_OUT1 | TIOCM_OUT2 | TIOCM_LOOP; status = tty->driver->ops->tiocmset(tty, set, clear); BT_DBG("Setting RTS: %s", status ? "failed" : "success"); /* Re-enable hardware flow control */ ktermios = tty->termios; ktermios.c_cflag |= CRTSCTS; tty_set_termios(tty, &ktermios); BT_DBG("Enabling hardware flow control: %s", !(tty->termios.c_cflag & CRTSCTS) ? "failed" : "success"); } } void hci_uart_set_speeds(struct hci_uart *hu, unsigned int init_speed, unsigned int oper_speed) { hu->init_speed = init_speed; hu->oper_speed = oper_speed; } void hci_uart_set_baudrate(struct hci_uart *hu, unsigned int speed) { struct tty_struct *tty = hu->tty; struct ktermios ktermios; ktermios = tty->termios; ktermios.c_cflag &= ~CBAUD; tty_termios_encode_baud_rate(&ktermios, speed, speed); /* tty_set_termios() return not checked as it is always 0 */ tty_set_termios(tty, &ktermios); BT_DBG("%s: New tty speeds: %d/%d", hu->hdev->name, tty->termios.c_ispeed, tty->termios.c_ospeed); } static int hci_uart_setup(struct hci_dev *hdev) { struct hci_uart *hu = hci_get_drvdata(hdev); struct hci_rp_read_local_version *ver; struct sk_buff *skb; unsigned int speed; int err; /* Init speed if any */ if (hu->init_speed) speed = hu->init_speed; else if (hu->proto->init_speed) speed = hu->proto->init_speed; else speed = 0; if (speed) hci_uart_set_baudrate(hu, speed); /* Operational speed if any */ if (hu->oper_speed) speed = hu->oper_speed; else if (hu->proto->oper_speed) speed = hu->proto->oper_speed; else speed = 0; if (hu->proto->set_baudrate && speed) { err = hu->proto->set_baudrate(hu, speed); if (!err) hci_uart_set_baudrate(hu, speed); } if (hu->proto->setup) return hu->proto->setup(hu); if (!test_bit(HCI_UART_VND_DETECT, &hu->hdev_flags)) return 0; skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { BT_ERR("%s: Reading local version information failed (%ld)", hdev->name, PTR_ERR(skb)); return 0; } if (skb->len != sizeof(*ver)) { BT_ERR("%s: Event length mismatch for version information", hdev->name); goto done; } ver = (struct hci_rp_read_local_version *)skb->data; switch (le16_to_cpu(ver->manufacturer)) { #ifdef CONFIG_BT_HCIUART_INTEL case 2: hdev->set_bdaddr = btintel_set_bdaddr; btintel_check_bdaddr(hdev); break; #endif #ifdef CONFIG_BT_HCIUART_BCM case 15: hdev->set_bdaddr = btbcm_set_bdaddr; btbcm_check_bdaddr(hdev); break; #endif default: break; } done: kfree_skb(skb); return 0; } /* ------ LDISC part ------ */ /* hci_uart_tty_open * * Called when line discipline changed to HCI_UART. * * Arguments: * tty pointer to tty info structure * Return Value: * 0 if success, otherwise error code */ static int hci_uart_tty_open(struct tty_struct *tty) { struct hci_uart *hu; BT_DBG("tty %p", tty); if (!capable(CAP_NET_ADMIN)) return -EPERM; /* Error if the tty has no write op instead of leaving an exploitable * hole */ if (tty->ops->write == NULL) return -EOPNOTSUPP; hu = kzalloc(sizeof(struct hci_uart), GFP_KERNEL); if (!hu) { BT_ERR("Can't allocate control structure"); return -ENFILE; } if (percpu_init_rwsem(&hu->proto_lock)) { BT_ERR("Can't allocate semaphore structure"); kfree(hu); return -ENOMEM; } tty->disc_data = hu; hu->tty = tty; tty->receive_room = 65536; /* disable alignment support by default */ hu->alignment = 1; hu->padding = 0; INIT_WORK(&hu->init_ready, hci_uart_init_work); INIT_WORK(&hu->write_work, hci_uart_write_work); /* Flush any pending characters in the driver */ tty_driver_flush_buffer(tty); return 0; } /* hci_uart_tty_close() * * Called when the line discipline is changed to something * else, the tty is closed, or the tty detects a hangup. */ static void hci_uart_tty_close(struct tty_struct *tty) { struct hci_uart *hu = tty->disc_data; struct hci_dev *hdev; BT_DBG("tty %p", tty); /* Detach from the tty */ tty->disc_data = NULL; if (!hu) return; hdev = hu->hdev; if (hdev) hci_uart_close(hdev); if (test_bit(HCI_UART_PROTO_READY, &hu->flags)) { percpu_down_write(&hu->proto_lock); clear_bit(HCI_UART_PROTO_READY, &hu->flags); percpu_up_write(&hu->proto_lock); cancel_work_sync(&hu->init_ready); cancel_work_sync(&hu->write_work); if (hdev) { if (test_bit(HCI_UART_REGISTERED, &hu->flags)) hci_unregister_dev(hdev); hci_free_dev(hdev); } hu->proto->close(hu); } clear_bit(HCI_UART_PROTO_SET, &hu->flags); percpu_free_rwsem(&hu->proto_lock); kfree(hu); } /* hci_uart_tty_wakeup() * * Callback for transmit wakeup. Called when low level * device driver can accept more send data. * * Arguments: tty pointer to associated tty instance data * Return Value: None */ static void hci_uart_tty_wakeup(struct tty_struct *tty) { struct hci_uart *hu = tty->disc_data; BT_DBG(""); if (!hu) return; clear_bit(TTY_DO_WRITE_WAKEUP, &tty->flags); if (tty != hu->tty) return; if (test_bit(HCI_UART_PROTO_READY, &hu->flags)) hci_uart_tx_wakeup(hu); } /* hci_uart_tty_receive() * * Called by tty low level driver when receive data is * available. * * Arguments: tty pointer to tty isntance data * data pointer to received data * flags pointer to flags for data * count count of received data in bytes * * Return Value: None */ static void hci_uart_tty_receive(struct tty_struct *tty, const u8 *data, const u8 *flags, size_t count) { struct hci_uart *hu = tty->disc_data; if (!hu || tty != hu->tty) return; percpu_down_read(&hu->proto_lock); if (!test_bit(HCI_UART_PROTO_READY, &hu->flags)) { percpu_up_read(&hu->proto_lock); return; } /* It does not need a lock here as it is already protected by a mutex in * tty caller */ hu->proto->recv(hu, data, count); percpu_up_read(&hu->proto_lock); if (hu->hdev) hu->hdev->stat.byte_rx += count; tty_unthrottle(tty); } static int hci_uart_register_dev(struct hci_uart *hu) { struct hci_dev *hdev; int err; BT_DBG(""); /* Initialize and register HCI device */ hdev = hci_alloc_dev(); if (!hdev) { BT_ERR("Can't allocate HCI device"); return -ENOMEM; } hu->hdev = hdev; hdev->bus = HCI_UART; hci_set_drvdata(hdev, hu); /* Only when vendor specific setup callback is provided, consider * the manufacturer information valid. This avoids filling in the * value for Ericsson when nothing is specified. */ if (hu->proto->setup) hdev->manufacturer = hu->proto->manufacturer; hdev->open = hci_uart_open; hdev->close = hci_uart_close; hdev->flush = hci_uart_flush; hdev->send = hci_uart_send_frame; hdev->setup = hci_uart_setup; SET_HCIDEV_DEV(hdev, hu->tty->dev); if (test_bit(HCI_UART_RAW_DEVICE, &hu->hdev_flags)) set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks); if (test_bit(HCI_UART_EXT_CONFIG, &hu->hdev_flags)) set_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks); if (!test_bit(HCI_UART_RESET_ON_INIT, &hu->hdev_flags)) set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks); if (test_bit(HCI_UART_CREATE_AMP, &hu->hdev_flags)) hdev->dev_type = HCI_AMP; else hdev->dev_type = HCI_PRIMARY; /* Only call open() for the protocol after hdev is fully initialized as * open() (or a timer/workqueue it starts) may attempt to reference it. */ err = hu->proto->open(hu); if (err) { hu->hdev = NULL; hci_free_dev(hdev); return err; } if (test_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags)) return 0; if (hci_register_dev(hdev) < 0) { BT_ERR("Can't register HCI device"); hu->proto->close(hu); hu->hdev = NULL; hci_free_dev(hdev); return -ENODEV; } set_bit(HCI_UART_REGISTERED, &hu->flags); return 0; } static int hci_uart_set_proto(struct hci_uart *hu, int id) { const struct hci_uart_proto *p; int err; p = hci_uart_get_proto(id); if (!p) return -EPROTONOSUPPORT; hu->proto = p; err = hci_uart_register_dev(hu); if (err) { return err; } set_bit(HCI_UART_PROTO_READY, &hu->flags); return 0; } static int hci_uart_set_flags(struct hci_uart *hu, unsigned long flags) { unsigned long valid_flags = BIT(HCI_UART_RAW_DEVICE) | BIT(HCI_UART_RESET_ON_INIT) | BIT(HCI_UART_CREATE_AMP) | BIT(HCI_UART_INIT_PENDING) | BIT(HCI_UART_EXT_CONFIG) | BIT(HCI_UART_VND_DETECT); if (flags & ~valid_flags) return -EINVAL; hu->hdev_flags = flags; return 0; } /* hci_uart_tty_ioctl() * * Process IOCTL system call for the tty device. * * Arguments: * * tty pointer to tty instance data * cmd IOCTL command code * arg argument for IOCTL call (cmd dependent) * * Return Value: Command dependent */ static int hci_uart_tty_ioctl(struct tty_struct *tty, unsigned int cmd, unsigned long arg) { struct hci_uart *hu = tty->disc_data; int err = 0; BT_DBG(""); /* Verify the status of the device */ if (!hu) return -EBADF; switch (cmd) { case HCIUARTSETPROTO: if (!test_and_set_bit(HCI_UART_PROTO_SET, &hu->flags)) { err = hci_uart_set_proto(hu, arg); if (err) clear_bit(HCI_UART_PROTO_SET, &hu->flags); } else err = -EBUSY; break; case HCIUARTGETPROTO: if (test_bit(HCI_UART_PROTO_SET, &hu->flags) && test_bit(HCI_UART_PROTO_READY, &hu->flags)) err = hu->proto->id; else err = -EUNATCH; break; case HCIUARTGETDEVICE: if (test_bit(HCI_UART_REGISTERED, &hu->flags)) err = hu->hdev->id; else err = -EUNATCH; break; case HCIUARTSETFLAGS: if (test_bit(HCI_UART_PROTO_SET, &hu->flags)) err = -EBUSY; else err = hci_uart_set_flags(hu, arg); break; case HCIUARTGETFLAGS: err = hu->hdev_flags; break; default: err = n_tty_ioctl_helper(tty, cmd, arg); break; } return err; } /* * We don't provide read/write/poll interface for user space. */ static ssize_t hci_uart_tty_read(struct tty_struct *tty, struct file *file, u8 *buf, size_t nr, void **cookie, unsigned long offset) { return 0; } static ssize_t hci_uart_tty_write(struct tty_struct *tty, struct file *file, const u8 *data, size_t count) { return 0; } static struct tty_ldisc_ops hci_uart_ldisc = { .owner = THIS_MODULE, .num = N_HCI, .name = "n_hci", .open = hci_uart_tty_open, .close = hci_uart_tty_close, .read = hci_uart_tty_read, .write = hci_uart_tty_write, .ioctl = hci_uart_tty_ioctl, .compat_ioctl = hci_uart_tty_ioctl, .receive_buf = hci_uart_tty_receive, .write_wakeup = hci_uart_tty_wakeup, }; static int __init hci_uart_init(void) { int err; BT_INFO("HCI UART driver ver %s", VERSION); /* Register the tty discipline */ err = tty_register_ldisc(&hci_uart_ldisc); if (err) { BT_ERR("HCI line discipline registration failed. (%d)", err); return err; } #ifdef CONFIG_BT_HCIUART_H4 h4_init(); #endif #ifdef CONFIG_BT_HCIUART_BCSP bcsp_init(); #endif #ifdef CONFIG_BT_HCIUART_LL ll_init(); #endif #ifdef CONFIG_BT_HCIUART_ATH3K ath_init(); #endif #ifdef CONFIG_BT_HCIUART_3WIRE h5_init(); #endif #ifdef CONFIG_BT_HCIUART_INTEL intel_init(); #endif #ifdef CONFIG_BT_HCIUART_BCM bcm_init(); #endif #ifdef CONFIG_BT_HCIUART_QCA qca_init(); #endif #ifdef CONFIG_BT_HCIUART_AG6XX ag6xx_init(); #endif #ifdef CONFIG_BT_HCIUART_MRVL mrvl_init(); #endif return 0; } static void __exit hci_uart_exit(void) { #ifdef CONFIG_BT_HCIUART_H4 h4_deinit(); #endif #ifdef CONFIG_BT_HCIUART_BCSP bcsp_deinit(); #endif #ifdef CONFIG_BT_HCIUART_LL ll_deinit(); #endif #ifdef CONFIG_BT_HCIUART_ATH3K ath_deinit(); #endif #ifdef CONFIG_BT_HCIUART_3WIRE h5_deinit(); #endif #ifdef CONFIG_BT_HCIUART_INTEL intel_deinit(); #endif #ifdef CONFIG_BT_HCIUART_BCM bcm_deinit(); #endif #ifdef CONFIG_BT_HCIUART_QCA qca_deinit(); #endif #ifdef CONFIG_BT_HCIUART_AG6XX ag6xx_deinit(); #endif #ifdef CONFIG_BT_HCIUART_MRVL mrvl_deinit(); #endif tty_unregister_ldisc(&hci_uart_ldisc); } module_init(hci_uart_init); module_exit(hci_uart_exit); MODULE_AUTHOR("Marcel Holtmann <[email protected]>"); MODULE_DESCRIPTION("Bluetooth HCI UART driver ver " VERSION); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL"); MODULE_ALIAS_LDISC(N_HCI);
linux-master
drivers/bluetooth/hci_ldisc.c
// SPDX-License-Identifier: GPL-2.0-only OR MIT /* * Bluetooth HCI driver for Broadcom 4377/4378/4387 devices attached via PCIe * * Copyright (C) The Asahi Linux Contributors */ #include <linux/async.h> #include <linux/bitfield.h> #include <linux/completion.h> #include <linux/dma-mapping.h> #include <linux/dmi.h> #include <linux/firmware.h> #include <linux/module.h> #include <linux/msi.h> #include <linux/of.h> #include <linux/pci.h> #include <linux/printk.h> #include <asm/unaligned.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> enum bcm4377_chip { BCM4377 = 0, BCM4378, BCM4387, }; #define BCM4377_DEVICE_ID 0x5fa0 #define BCM4378_DEVICE_ID 0x5f69 #define BCM4387_DEVICE_ID 0x5f71 #define BCM4377_TIMEOUT 1000 /* * These devices only support DMA transactions inside a 32bit window * (possibly to avoid 64 bit arithmetic). The window size cannot exceed * 0xffffffff but is always aligned down to the previous 0x200 byte boundary * which effectively limits the window to [start, start+0xfffffe00]. * We just limit the DMA window to [0, 0xfffffe00] to make sure we don't * run into this limitation. */ #define BCM4377_DMA_MASK 0xfffffe00 #define BCM4377_PCIECFG_BAR0_WINDOW1 0x80 #define BCM4377_PCIECFG_BAR0_WINDOW2 0x70 #define BCM4377_PCIECFG_BAR0_CORE2_WINDOW1 0x74 #define BCM4377_PCIECFG_BAR0_CORE2_WINDOW2 0x78 #define BCM4377_PCIECFG_BAR2_WINDOW 0x84 #define BCM4377_PCIECFG_BAR0_CORE2_WINDOW1_DEFAULT 0x18011000 #define BCM4377_PCIECFG_BAR2_WINDOW_DEFAULT 0x19000000 #define BCM4377_PCIECFG_SUBSYSTEM_CTRL 0x88 #define BCM4377_BAR0_FW_DOORBELL 0x140 #define BCM4377_BAR0_RTI_CONTROL 0x144 #define BCM4377_BAR0_SLEEP_CONTROL 0x150 #define BCM4377_BAR0_SLEEP_CONTROL_UNQUIESCE 0 #define BCM4377_BAR0_SLEEP_CONTROL_AWAKE 2 #define BCM4377_BAR0_SLEEP_CONTROL_QUIESCE 3 #define BCM4377_BAR0_DOORBELL 0x174 #define BCM4377_BAR0_DOORBELL_VALUE GENMASK(31, 16) #define BCM4377_BAR0_DOORBELL_IDX GENMASK(15, 8) #define BCM4377_BAR0_DOORBELL_RING BIT(5) #define BCM4377_BAR0_HOST_WINDOW_LO 0x590 #define BCM4377_BAR0_HOST_WINDOW_HI 0x594 #define BCM4377_BAR0_HOST_WINDOW_SIZE 0x598 #define BCM4377_BAR2_BOOTSTAGE 0x200454 #define BCM4377_BAR2_FW_LO 0x200478 #define BCM4377_BAR2_FW_HI 0x20047c #define BCM4377_BAR2_FW_SIZE 0x200480 #define BCM4377_BAR2_CONTEXT_ADDR_LO 0x20048c #define BCM4377_BAR2_CONTEXT_ADDR_HI 0x200450 #define BCM4377_BAR2_RTI_STATUS 0x20045c #define BCM4377_BAR2_RTI_WINDOW_LO 0x200494 #define BCM4377_BAR2_RTI_WINDOW_HI 0x200498 #define BCM4377_BAR2_RTI_WINDOW_SIZE 0x20049c #define BCM4377_OTP_SIZE 0xe0 #define BCM4377_OTP_SYS_VENDOR 0x15 #define BCM4377_OTP_CIS 0x80 #define BCM4377_OTP_VENDOR_HDR 0x00000008 #define BCM4377_OTP_MAX_PARAM_LEN 16 #define BCM4377_N_TRANSFER_RINGS 9 #define BCM4377_N_COMPLETION_RINGS 6 #define BCM4377_MAX_RING_SIZE 256 #define BCM4377_MSGID_GENERATION GENMASK(15, 8) #define BCM4377_MSGID_ID GENMASK(7, 0) #define BCM4377_RING_N_ENTRIES 128 #define BCM4377_CONTROL_MSG_SIZE 0x34 #define BCM4377_XFER_RING_MAX_INPLACE_PAYLOAD_SIZE (4 * 0xff) #define MAX_ACL_PAYLOAD_SIZE (HCI_MAX_FRAME_SIZE + HCI_ACL_HDR_SIZE) #define MAX_SCO_PAYLOAD_SIZE (HCI_MAX_SCO_SIZE + HCI_SCO_HDR_SIZE) #define MAX_EVENT_PAYLOAD_SIZE (HCI_MAX_EVENT_SIZE + HCI_EVENT_HDR_SIZE) enum bcm4377_otp_params_type { BCM4377_OTP_BOARD_PARAMS, BCM4377_OTP_CHIP_PARAMS }; enum bcm4377_transfer_ring_id { BCM4377_XFER_RING_CONTROL = 0, BCM4377_XFER_RING_HCI_H2D = 1, BCM4377_XFER_RING_HCI_D2H = 2, BCM4377_XFER_RING_SCO_H2D = 3, BCM4377_XFER_RING_SCO_D2H = 4, BCM4377_XFER_RING_ACL_H2D = 5, BCM4377_XFER_RING_ACL_D2H = 6, }; enum bcm4377_completion_ring_id { BCM4377_ACK_RING_CONTROL = 0, BCM4377_ACK_RING_HCI_ACL = 1, BCM4377_EVENT_RING_HCI_ACL = 2, BCM4377_ACK_RING_SCO = 3, BCM4377_EVENT_RING_SCO = 4, }; enum bcm4377_doorbell { BCM4377_DOORBELL_CONTROL = 0, BCM4377_DOORBELL_HCI_H2D = 1, BCM4377_DOORBELL_HCI_D2H = 2, BCM4377_DOORBELL_ACL_H2D = 3, BCM4377_DOORBELL_ACL_D2H = 4, BCM4377_DOORBELL_SCO = 6, }; /* * Transfer ring entry * * flags: Flags to indicate if the payload is appended or mapped * len: Payload length * payload: Optional payload DMA address * id: Message id to recognize the answer in the completion ring entry */ struct bcm4377_xfer_ring_entry { #define BCM4377_XFER_RING_FLAG_PAYLOAD_MAPPED BIT(0) #define BCM4377_XFER_RING_FLAG_PAYLOAD_IN_FOOTER BIT(1) u8 flags; __le16 len; u8 _unk0; __le64 payload; __le16 id; u8 _unk1[2]; } __packed; static_assert(sizeof(struct bcm4377_xfer_ring_entry) == 0x10); /* * Completion ring entry * * flags: Flags to indicate if the payload is appended or mapped. If the payload * is mapped it can be found in the buffer of the corresponding transfer * ring message. * ring_id: Transfer ring ID which required this message * msg_id: Message ID specified in transfer ring entry * len: Payload length */ struct bcm4377_completion_ring_entry { u8 flags; u8 _unk0; __le16 ring_id; __le16 msg_id; __le32 len; u8 _unk1[6]; } __packed; static_assert(sizeof(struct bcm4377_completion_ring_entry) == 0x10); enum bcm4377_control_message_type { BCM4377_CONTROL_MSG_CREATE_XFER_RING = 1, BCM4377_CONTROL_MSG_CREATE_COMPLETION_RING = 2, BCM4377_CONTROL_MSG_DESTROY_XFER_RING = 3, BCM4377_CONTROL_MSG_DESTROY_COMPLETION_RING = 4, }; /* * Control message used to create a completion ring * * msg_type: Must be BCM4377_CONTROL_MSG_CREATE_COMPLETION_RING * header_size: Unknown, but probably reserved space in front of the entry * footer_size: Number of 32 bit words reserved for payloads after the entry * id/id_again: Completion ring index * ring_iova: DMA address of the ring buffer * n_elements: Number of elements inside the ring buffer * msi: MSI index, doesn't work for all rings though and should be zero * intmod_delay: Unknown delay * intmod_bytes: Unknown */ struct bcm4377_create_completion_ring_msg { u8 msg_type; u8 header_size; u8 footer_size; u8 _unk0; __le16 id; __le16 id_again; __le64 ring_iova; __le16 n_elements; __le32 unk; u8 _unk1[6]; __le16 msi; __le16 intmod_delay; __le32 intmod_bytes; __le16 _unk2; __le32 _unk3; u8 _unk4[10]; } __packed; static_assert(sizeof(struct bcm4377_create_completion_ring_msg) == BCM4377_CONTROL_MSG_SIZE); /* * Control ring message used to destroy a completion ring * * msg_type: Must be BCM4377_CONTROL_MSG_DESTROY_COMPLETION_RING * ring_id: Completion ring to be destroyed */ struct bcm4377_destroy_completion_ring_msg { u8 msg_type; u8 _pad0; __le16 ring_id; u8 _pad1[48]; } __packed; static_assert(sizeof(struct bcm4377_destroy_completion_ring_msg) == BCM4377_CONTROL_MSG_SIZE); /* * Control message used to create a transfer ring * * msg_type: Must be BCM4377_CONTROL_MSG_CREATE_XFER_RING * header_size: Number of 32 bit words reserved for unknown content before the * entry * footer_size: Number of 32 bit words reserved for payloads after the entry * ring_id/ring_id_again: Transfer ring index * ring_iova: DMA address of the ring buffer * n_elements: Number of elements inside the ring buffer * completion_ring_id: Completion ring index for acknowledgements and events * doorbell: Doorbell index used to notify device of new entries * flags: Transfer ring flags * - virtual: set if there is no associated shared memory and only the * corresponding completion ring is used * - sync: only set for the SCO rings */ struct bcm4377_create_transfer_ring_msg { u8 msg_type; u8 header_size; u8 footer_size; u8 _unk0; __le16 ring_id; __le16 ring_id_again; __le64 ring_iova; u8 _unk1[8]; __le16 n_elements; __le16 completion_ring_id; __le16 doorbell; #define BCM4377_XFER_RING_FLAG_VIRTUAL BIT(7) #define BCM4377_XFER_RING_FLAG_SYNC BIT(8) __le16 flags; u8 _unk2[20]; } __packed; static_assert(sizeof(struct bcm4377_create_transfer_ring_msg) == BCM4377_CONTROL_MSG_SIZE); /* * Control ring message used to destroy a transfer ring * * msg_type: Must be BCM4377_CONTROL_MSG_DESTROY_XFER_RING * ring_id: Transfer ring to be destroyed */ struct bcm4377_destroy_transfer_ring_msg { u8 msg_type; u8 _pad0; __le16 ring_id; u8 _pad1[48]; } __packed; static_assert(sizeof(struct bcm4377_destroy_transfer_ring_msg) == BCM4377_CONTROL_MSG_SIZE); /* * "Converged IPC" context struct used to make the device aware of all other * shared memory structures. A pointer to this structure is configured inside a * MMIO register. * * version: Protocol version, must be 2. * size: Size of this structure, must be 0x68. * enabled_caps: Enabled capabilities. Unknown bitfield but should be 2. * peripheral_info_addr: DMA address for a 0x20 buffer to which the device will * write unknown contents * {completion,xfer}_ring_{tails,heads}_addr: DMA pointers to ring heads/tails * n_completion_rings: Number of completion rings, the firmware only works if * this is set to BCM4377_N_COMPLETION_RINGS. * n_xfer_rings: Number of transfer rings, the firmware only works if * this is set to BCM4377_N_TRANSFER_RINGS. * control_completion_ring_addr: Control completion ring buffer DMA address * control_xfer_ring_addr: Control transfer ring buffer DMA address * control_xfer_ring_n_entries: Number of control transfer ring entries * control_completion_ring_n_entries: Number of control completion ring entries * control_xfer_ring_doorbell: Control transfer ring doorbell * control_completion_ring_doorbell: Control completion ring doorbell, * must be set to 0xffff * control_xfer_ring_msi: Control completion ring MSI index, must be 0 * control_completion_ring_msi: Control completion ring MSI index, must be 0. * control_xfer_ring_header_size: Number of 32 bit words reserved in front of * every control transfer ring entry * control_xfer_ring_footer_size: Number of 32 bit words reserved after every * control transfer ring entry * control_completion_ring_header_size: Number of 32 bit words reserved in front * of every control completion ring entry * control_completion_ring_footer_size: Number of 32 bit words reserved after * every control completion ring entry * scratch_pad: Optional scratch pad DMA address * scratch_pad_size: Scratch pad size */ struct bcm4377_context { __le16 version; __le16 size; __le32 enabled_caps; __le64 peripheral_info_addr; /* ring heads and tails */ __le64 completion_ring_heads_addr; __le64 xfer_ring_tails_addr; __le64 completion_ring_tails_addr; __le64 xfer_ring_heads_addr; __le16 n_completion_rings; __le16 n_xfer_rings; /* control ring configuration */ __le64 control_completion_ring_addr; __le64 control_xfer_ring_addr; __le16 control_xfer_ring_n_entries; __le16 control_completion_ring_n_entries; __le16 control_xfer_ring_doorbell; __le16 control_completion_ring_doorbell; __le16 control_xfer_ring_msi; __le16 control_completion_ring_msi; u8 control_xfer_ring_header_size; u8 control_xfer_ring_footer_size; u8 control_completion_ring_header_size; u8 control_completion_ring_footer_size; __le16 _unk0; __le16 _unk1; __le64 scratch_pad; __le32 scratch_pad_size; __le32 _unk3; } __packed; static_assert(sizeof(struct bcm4377_context) == 0x68); #define BCM4378_CALIBRATION_CHUNK_SIZE 0xe6 struct bcm4378_hci_send_calibration_cmd { u8 unk; __le16 blocks_left; u8 data[BCM4378_CALIBRATION_CHUNK_SIZE]; } __packed; #define BCM4378_PTB_CHUNK_SIZE 0xcf struct bcm4378_hci_send_ptb_cmd { __le16 blocks_left; u8 data[BCM4378_PTB_CHUNK_SIZE]; } __packed; /* * Shared memory structure used to store the ring head and tail pointers. */ struct bcm4377_ring_state { __le16 completion_ring_head[BCM4377_N_COMPLETION_RINGS]; __le16 completion_ring_tail[BCM4377_N_COMPLETION_RINGS]; __le16 xfer_ring_head[BCM4377_N_TRANSFER_RINGS]; __le16 xfer_ring_tail[BCM4377_N_TRANSFER_RINGS]; }; /* * A transfer ring can be used in two configurations: * 1) Send control or HCI messages to the device which are then acknowledged * in the corresponding completion ring * 2) Receiving HCI frames from the devices. In this case the transfer ring * itself contains empty messages that are acknowledged once data is * available from the device. If the payloads fit inside the footers * of the completion ring the transfer ring can be configured to be * virtual such that it has no ring buffer. * * ring_id: ring index hardcoded in the firmware * doorbell: doorbell index to notify device of new entries * payload_size: optional in-place payload size * mapped_payload_size: optional out-of-place payload size * completion_ring: index of corresponding completion ring * n_entries: number of entries inside this ring * generation: ring generation; incremented on hci_open to detect stale messages * sync: set to true for SCO rings * virtual: set to true if this ring has no entries and is just required to * setup a corresponding completion ring for device->host messages * d2h_buffers_only: set to true if this ring is only used to provide large * buffers used by device->host messages in the completion * ring * allow_wait: allow to wait for messages to be acknowledged * enabled: true once the ring has been created and can be used * ring: ring buffer for entries (struct bcm4377_xfer_ring_entry) * ring_dma: DMA address for ring entry buffer * payloads: payload buffer for mapped_payload_size payloads * payloads_dma:DMA address for payload buffer * events: pointer to array of completions if waiting is allowed * msgids: bitmap to keep track of used message ids * lock: Spinlock to protect access to ring structurs used in the irq handler */ struct bcm4377_transfer_ring { enum bcm4377_transfer_ring_id ring_id; enum bcm4377_doorbell doorbell; size_t payload_size; size_t mapped_payload_size; u8 completion_ring; u16 n_entries; u8 generation; bool sync; bool virtual; bool d2h_buffers_only; bool allow_wait; bool enabled; void *ring; dma_addr_t ring_dma; void *payloads; dma_addr_t payloads_dma; struct completion **events; DECLARE_BITMAP(msgids, BCM4377_MAX_RING_SIZE); spinlock_t lock; }; /* * A completion ring can be either used to either acknowledge messages sent in * the corresponding transfer ring or to receive messages associated with the * transfer ring. When used to receive messages the transfer ring either * has no ring buffer and is only advanced ("virtual transfer ring") or it * only contains empty DMA buffers to be used for the payloads. * * ring_id: completion ring id, hardcoded in firmware * payload_size: optional payload size after each entry * delay: unknown delay * n_entries: number of entries in this ring * enabled: true once the ring has been created and can be used * ring: ring buffer for entries (struct bcm4377_completion_ring_entry) * ring_dma: DMA address of ring buffer * transfer_rings: bitmap of corresponding transfer ring ids */ struct bcm4377_completion_ring { enum bcm4377_completion_ring_id ring_id; u16 payload_size; u16 delay; u16 n_entries; bool enabled; void *ring; dma_addr_t ring_dma; unsigned long transfer_rings; }; struct bcm4377_data; /* * Chip-specific configuration struct * * id: Chip id (e.g. 0x4377 for BCM4377) * otp_offset: Offset to the start of the OTP inside BAR0 * bar0_window1: Backplane address mapped to the first window in BAR0 * bar0_window2: Backplane address mapped to the second window in BAR0 * bar0_core2_window2: Optional backplane address mapped to the second core's * second window in BAR0 * has_bar0_core2_window2: Set to true if this chip requires the second core's * second window to be configured * clear_pciecfg_subsystem_ctrl_bit19: Set to true if bit 19 in the * vendor-specific subsystem control * register has to be cleared * disable_aspm: Set to true if ASPM must be disabled due to hardware errata * broken_ext_scan: Set to true if the chip erroneously claims to support * extended scanning * broken_mws_transport_config: Set to true if the chip erroneously claims to * support MWS Transport Configuration * send_calibration: Optional callback to send calibration data * send_ptb: Callback to send "PTB" regulatory/calibration data */ struct bcm4377_hw { unsigned int id; u32 otp_offset; u32 bar0_window1; u32 bar0_window2; u32 bar0_core2_window2; unsigned long has_bar0_core2_window2 : 1; unsigned long clear_pciecfg_subsystem_ctrl_bit19 : 1; unsigned long disable_aspm : 1; unsigned long broken_ext_scan : 1; unsigned long broken_mws_transport_config : 1; int (*send_calibration)(struct bcm4377_data *bcm4377); int (*send_ptb)(struct bcm4377_data *bcm4377, const struct firmware *fw); }; static const struct bcm4377_hw bcm4377_hw_variants[]; static const struct dmi_system_id bcm4377_dmi_board_table[]; /* * Private struct associated with each device containing global state * * pdev: Pointer to associated struct pci_dev * hdev: Pointer to associated strucy hci_dev * bar0: iomem pointing to BAR0 * bar1: iomem pointing to BAR2 * bootstage: Current value of the bootstage * rti_status: Current "RTI" status value * hw: Pointer to chip-specific struct bcm4377_hw * taurus_cal_blob: "Taurus" calibration blob used for some chips * taurus_cal_size: "Taurus" calibration blob size * taurus_beamforming_cal_blob: "Taurus" beamforming calibration blob used for * some chips * taurus_beamforming_cal_size: "Taurus" beamforming calibration blob size * stepping: Chip stepping read from OTP; used for firmware selection * vendor: Antenna vendor read from OTP; used for firmware selection * board_type: Board type from FDT or DMI match; used for firmware selection * event: Event for changed bootstage or rti_status; used for booting firmware * ctx: "Converged IPC" context * ctx_dma: "Converged IPC" context DMA address * ring_state: Shared memory buffer containing ring head and tail indexes * ring_state_dma: DMA address for ring_state * {control,hci_acl,sco}_ack_ring: Completion rings used to acknowledge messages * {hci_acl,sco}_event_ring: Completion rings used for device->host messages * control_h2d_ring: Transfer ring used for control messages * {hci,sco,acl}_h2d_ring: Transfer ring used to transfer HCI frames * {hci,sco,acl}_d2h_ring: Transfer ring used to receive HCI frames in the * corresponding completion ring */ struct bcm4377_data { struct pci_dev *pdev; struct hci_dev *hdev; void __iomem *bar0; void __iomem *bar2; u32 bootstage; u32 rti_status; const struct bcm4377_hw *hw; const void *taurus_cal_blob; int taurus_cal_size; const void *taurus_beamforming_cal_blob; int taurus_beamforming_cal_size; char stepping[BCM4377_OTP_MAX_PARAM_LEN]; char vendor[BCM4377_OTP_MAX_PARAM_LEN]; const char *board_type; struct completion event; struct bcm4377_context *ctx; dma_addr_t ctx_dma; struct bcm4377_ring_state *ring_state; dma_addr_t ring_state_dma; /* * The HCI and ACL rings have to be merged because this structure is * hardcoded in the firmware. */ struct bcm4377_completion_ring control_ack_ring; struct bcm4377_completion_ring hci_acl_ack_ring; struct bcm4377_completion_ring hci_acl_event_ring; struct bcm4377_completion_ring sco_ack_ring; struct bcm4377_completion_ring sco_event_ring; struct bcm4377_transfer_ring control_h2d_ring; struct bcm4377_transfer_ring hci_h2d_ring; struct bcm4377_transfer_ring hci_d2h_ring; struct bcm4377_transfer_ring sco_h2d_ring; struct bcm4377_transfer_ring sco_d2h_ring; struct bcm4377_transfer_ring acl_h2d_ring; struct bcm4377_transfer_ring acl_d2h_ring; }; static void bcm4377_ring_doorbell(struct bcm4377_data *bcm4377, u8 doorbell, u16 val) { u32 db = 0; db |= FIELD_PREP(BCM4377_BAR0_DOORBELL_VALUE, val); db |= FIELD_PREP(BCM4377_BAR0_DOORBELL_IDX, doorbell); db |= BCM4377_BAR0_DOORBELL_RING; dev_dbg(&bcm4377->pdev->dev, "write %d to doorbell #%d (0x%x)\n", val, doorbell, db); iowrite32(db, bcm4377->bar0 + BCM4377_BAR0_DOORBELL); } static int bcm4377_extract_msgid(struct bcm4377_data *bcm4377, struct bcm4377_transfer_ring *ring, u16 raw_msgid, u8 *msgid) { u8 generation = FIELD_GET(BCM4377_MSGID_GENERATION, raw_msgid); *msgid = FIELD_GET(BCM4377_MSGID_ID, raw_msgid); if (generation != ring->generation) { dev_warn( &bcm4377->pdev->dev, "invalid message generation %d should be %d in entry for ring %d\n", generation, ring->generation, ring->ring_id); return -EINVAL; } if (*msgid >= ring->n_entries) { dev_warn(&bcm4377->pdev->dev, "invalid message id in entry for ring %d: %d > %d\n", ring->ring_id, *msgid, ring->n_entries); return -EINVAL; } return 0; } static void bcm4377_handle_event(struct bcm4377_data *bcm4377, struct bcm4377_transfer_ring *ring, u16 raw_msgid, u8 entry_flags, u8 type, void *payload, size_t len) { struct sk_buff *skb; u16 head; u8 msgid; unsigned long flags; spin_lock_irqsave(&ring->lock, flags); if (!ring->enabled) { dev_warn(&bcm4377->pdev->dev, "event for disabled transfer ring %d\n", ring->ring_id); goto out; } if (ring->d2h_buffers_only && entry_flags & BCM4377_XFER_RING_FLAG_PAYLOAD_MAPPED) { if (bcm4377_extract_msgid(bcm4377, ring, raw_msgid, &msgid)) goto out; if (len > ring->mapped_payload_size) { dev_warn( &bcm4377->pdev->dev, "invalid payload len in event for ring %d: %zu > %zu\n", ring->ring_id, len, ring->mapped_payload_size); goto out; } payload = ring->payloads + msgid * ring->mapped_payload_size; } skb = bt_skb_alloc(len, GFP_ATOMIC); if (!skb) goto out; memcpy(skb_put(skb, len), payload, len); hci_skb_pkt_type(skb) = type; hci_recv_frame(bcm4377->hdev, skb); out: head = le16_to_cpu(bcm4377->ring_state->xfer_ring_head[ring->ring_id]); head = (head + 1) % ring->n_entries; bcm4377->ring_state->xfer_ring_head[ring->ring_id] = cpu_to_le16(head); bcm4377_ring_doorbell(bcm4377, ring->doorbell, head); spin_unlock_irqrestore(&ring->lock, flags); } static void bcm4377_handle_ack(struct bcm4377_data *bcm4377, struct bcm4377_transfer_ring *ring, u16 raw_msgid) { unsigned long flags; u8 msgid; spin_lock_irqsave(&ring->lock, flags); if (bcm4377_extract_msgid(bcm4377, ring, raw_msgid, &msgid)) goto unlock; if (!test_bit(msgid, ring->msgids)) { dev_warn( &bcm4377->pdev->dev, "invalid message id in ack for ring %d: %d is not used\n", ring->ring_id, msgid); goto unlock; } if (ring->allow_wait && ring->events[msgid]) { complete(ring->events[msgid]); ring->events[msgid] = NULL; } bitmap_release_region(ring->msgids, msgid, ring->n_entries); unlock: spin_unlock_irqrestore(&ring->lock, flags); } static void bcm4377_handle_completion(struct bcm4377_data *bcm4377, struct bcm4377_completion_ring *ring, u16 pos) { struct bcm4377_completion_ring_entry *entry; u16 msg_id, transfer_ring; size_t entry_size, data_len; void *data; if (pos >= ring->n_entries) { dev_warn(&bcm4377->pdev->dev, "invalid offset %d for completion ring %d\n", pos, ring->ring_id); return; } entry_size = sizeof(*entry) + ring->payload_size; entry = ring->ring + pos * entry_size; data = ring->ring + pos * entry_size + sizeof(*entry); data_len = le32_to_cpu(entry->len); msg_id = le16_to_cpu(entry->msg_id); transfer_ring = le16_to_cpu(entry->ring_id); if ((ring->transfer_rings & BIT(transfer_ring)) == 0) { dev_warn( &bcm4377->pdev->dev, "invalid entry at offset %d for transfer ring %d in completion ring %d\n", pos, transfer_ring, ring->ring_id); return; } dev_dbg(&bcm4377->pdev->dev, "entry in completion ring %d for transfer ring %d with msg_id %d\n", ring->ring_id, transfer_ring, msg_id); switch (transfer_ring) { case BCM4377_XFER_RING_CONTROL: bcm4377_handle_ack(bcm4377, &bcm4377->control_h2d_ring, msg_id); break; case BCM4377_XFER_RING_HCI_H2D: bcm4377_handle_ack(bcm4377, &bcm4377->hci_h2d_ring, msg_id); break; case BCM4377_XFER_RING_SCO_H2D: bcm4377_handle_ack(bcm4377, &bcm4377->sco_h2d_ring, msg_id); break; case BCM4377_XFER_RING_ACL_H2D: bcm4377_handle_ack(bcm4377, &bcm4377->acl_h2d_ring, msg_id); break; case BCM4377_XFER_RING_HCI_D2H: bcm4377_handle_event(bcm4377, &bcm4377->hci_d2h_ring, msg_id, entry->flags, HCI_EVENT_PKT, data, data_len); break; case BCM4377_XFER_RING_SCO_D2H: bcm4377_handle_event(bcm4377, &bcm4377->sco_d2h_ring, msg_id, entry->flags, HCI_SCODATA_PKT, data, data_len); break; case BCM4377_XFER_RING_ACL_D2H: bcm4377_handle_event(bcm4377, &bcm4377->acl_d2h_ring, msg_id, entry->flags, HCI_ACLDATA_PKT, data, data_len); break; default: dev_warn( &bcm4377->pdev->dev, "entry in completion ring %d for unknown transfer ring %d with msg_id %d\n", ring->ring_id, transfer_ring, msg_id); } } static void bcm4377_poll_completion_ring(struct bcm4377_data *bcm4377, struct bcm4377_completion_ring *ring) { u16 tail; __le16 *heads = bcm4377->ring_state->completion_ring_head; __le16 *tails = bcm4377->ring_state->completion_ring_tail; if (!ring->enabled) return; tail = le16_to_cpu(tails[ring->ring_id]); dev_dbg(&bcm4377->pdev->dev, "completion ring #%d: head: %d, tail: %d\n", ring->ring_id, le16_to_cpu(heads[ring->ring_id]), tail); while (tail != le16_to_cpu(READ_ONCE(heads[ring->ring_id]))) { /* * ensure the CPU doesn't speculate through the comparison. * otherwise it might already read the (empty) queue entry * before the updated head has been loaded and checked. */ dma_rmb(); bcm4377_handle_completion(bcm4377, ring, tail); tail = (tail + 1) % ring->n_entries; tails[ring->ring_id] = cpu_to_le16(tail); } } static irqreturn_t bcm4377_irq(int irq, void *data) { struct bcm4377_data *bcm4377 = data; u32 bootstage, rti_status; bootstage = ioread32(bcm4377->bar2 + BCM4377_BAR2_BOOTSTAGE); rti_status = ioread32(bcm4377->bar2 + BCM4377_BAR2_RTI_STATUS); if (bootstage != bcm4377->bootstage || rti_status != bcm4377->rti_status) { dev_dbg(&bcm4377->pdev->dev, "bootstage = %d -> %d, rti state = %d -> %d\n", bcm4377->bootstage, bootstage, bcm4377->rti_status, rti_status); complete(&bcm4377->event); bcm4377->bootstage = bootstage; bcm4377->rti_status = rti_status; } if (rti_status > 2) dev_err(&bcm4377->pdev->dev, "RTI status is %d\n", rti_status); bcm4377_poll_completion_ring(bcm4377, &bcm4377->control_ack_ring); bcm4377_poll_completion_ring(bcm4377, &bcm4377->hci_acl_event_ring); bcm4377_poll_completion_ring(bcm4377, &bcm4377->hci_acl_ack_ring); bcm4377_poll_completion_ring(bcm4377, &bcm4377->sco_ack_ring); bcm4377_poll_completion_ring(bcm4377, &bcm4377->sco_event_ring); return IRQ_HANDLED; } static int bcm4377_enqueue(struct bcm4377_data *bcm4377, struct bcm4377_transfer_ring *ring, void *data, size_t len, bool wait) { unsigned long flags; struct bcm4377_xfer_ring_entry *entry; void *payload; size_t offset; u16 head, tail, new_head; u16 raw_msgid; int ret, msgid; DECLARE_COMPLETION_ONSTACK(event); if (len > ring->payload_size && len > ring->mapped_payload_size) { dev_warn( &bcm4377->pdev->dev, "payload len %zu is too large for ring %d (max is %zu or %zu)\n", len, ring->ring_id, ring->payload_size, ring->mapped_payload_size); return -EINVAL; } if (wait && !ring->allow_wait) return -EINVAL; if (ring->virtual) return -EINVAL; spin_lock_irqsave(&ring->lock, flags); head = le16_to_cpu(bcm4377->ring_state->xfer_ring_head[ring->ring_id]); tail = le16_to_cpu(bcm4377->ring_state->xfer_ring_tail[ring->ring_id]); new_head = (head + 1) % ring->n_entries; if (new_head == tail) { dev_warn(&bcm4377->pdev->dev, "can't send message because ring %d is full\n", ring->ring_id); ret = -EINVAL; goto out; } msgid = bitmap_find_free_region(ring->msgids, ring->n_entries, 0); if (msgid < 0) { dev_warn(&bcm4377->pdev->dev, "can't find message id for ring %d\n", ring->ring_id); ret = -EINVAL; goto out; } raw_msgid = FIELD_PREP(BCM4377_MSGID_GENERATION, ring->generation); raw_msgid |= FIELD_PREP(BCM4377_MSGID_ID, msgid); offset = head * (sizeof(*entry) + ring->payload_size); entry = ring->ring + offset; memset(entry, 0, sizeof(*entry)); entry->id = cpu_to_le16(raw_msgid); entry->len = cpu_to_le16(len); if (len <= ring->payload_size) { entry->flags = BCM4377_XFER_RING_FLAG_PAYLOAD_IN_FOOTER; payload = ring->ring + offset + sizeof(*entry); } else { entry->flags = BCM4377_XFER_RING_FLAG_PAYLOAD_MAPPED; entry->payload = cpu_to_le64(ring->payloads_dma + msgid * ring->mapped_payload_size); payload = ring->payloads + msgid * ring->mapped_payload_size; } memcpy(payload, data, len); if (wait) ring->events[msgid] = &event; /* * The 4377 chips stop responding to any commands as soon as they * have been idle for a while. Poking the sleep control register here * makes them come alive again. */ iowrite32(BCM4377_BAR0_SLEEP_CONTROL_AWAKE, bcm4377->bar0 + BCM4377_BAR0_SLEEP_CONTROL); dev_dbg(&bcm4377->pdev->dev, "updating head for transfer queue #%d to %d\n", ring->ring_id, new_head); bcm4377->ring_state->xfer_ring_head[ring->ring_id] = cpu_to_le16(new_head); if (!ring->sync) bcm4377_ring_doorbell(bcm4377, ring->doorbell, new_head); ret = 0; out: spin_unlock_irqrestore(&ring->lock, flags); if (ret == 0 && wait) { ret = wait_for_completion_interruptible_timeout( &event, BCM4377_TIMEOUT); if (ret == 0) ret = -ETIMEDOUT; else if (ret > 0) ret = 0; spin_lock_irqsave(&ring->lock, flags); ring->events[msgid] = NULL; spin_unlock_irqrestore(&ring->lock, flags); } return ret; } static int bcm4377_create_completion_ring(struct bcm4377_data *bcm4377, struct bcm4377_completion_ring *ring) { struct bcm4377_create_completion_ring_msg msg; int ret; if (ring->enabled) { dev_warn(&bcm4377->pdev->dev, "completion ring %d already enabled\n", ring->ring_id); return 0; } memset(ring->ring, 0, ring->n_entries * (sizeof(struct bcm4377_completion_ring_entry) + ring->payload_size)); memset(&msg, 0, sizeof(msg)); msg.msg_type = BCM4377_CONTROL_MSG_CREATE_COMPLETION_RING; msg.id = cpu_to_le16(ring->ring_id); msg.id_again = cpu_to_le16(ring->ring_id); msg.ring_iova = cpu_to_le64(ring->ring_dma); msg.n_elements = cpu_to_le16(ring->n_entries); msg.intmod_bytes = cpu_to_le32(0xffffffff); msg.unk = cpu_to_le32(0xffffffff); msg.intmod_delay = cpu_to_le16(ring->delay); msg.footer_size = ring->payload_size / 4; ret = bcm4377_enqueue(bcm4377, &bcm4377->control_h2d_ring, &msg, sizeof(msg), true); if (!ret) ring->enabled = true; return ret; } static int bcm4377_destroy_completion_ring(struct bcm4377_data *bcm4377, struct bcm4377_completion_ring *ring) { struct bcm4377_destroy_completion_ring_msg msg; int ret; memset(&msg, 0, sizeof(msg)); msg.msg_type = BCM4377_CONTROL_MSG_DESTROY_COMPLETION_RING; msg.ring_id = cpu_to_le16(ring->ring_id); ret = bcm4377_enqueue(bcm4377, &bcm4377->control_h2d_ring, &msg, sizeof(msg), true); if (ret) dev_warn(&bcm4377->pdev->dev, "failed to destroy completion ring %d\n", ring->ring_id); ring->enabled = false; return ret; } static int bcm4377_create_transfer_ring(struct bcm4377_data *bcm4377, struct bcm4377_transfer_ring *ring) { struct bcm4377_create_transfer_ring_msg msg; u16 flags = 0; int ret, i; unsigned long spinlock_flags; if (ring->virtual) flags |= BCM4377_XFER_RING_FLAG_VIRTUAL; if (ring->sync) flags |= BCM4377_XFER_RING_FLAG_SYNC; spin_lock_irqsave(&ring->lock, spinlock_flags); memset(&msg, 0, sizeof(msg)); msg.msg_type = BCM4377_CONTROL_MSG_CREATE_XFER_RING; msg.ring_id = cpu_to_le16(ring->ring_id); msg.ring_id_again = cpu_to_le16(ring->ring_id); msg.ring_iova = cpu_to_le64(ring->ring_dma); msg.n_elements = cpu_to_le16(ring->n_entries); msg.completion_ring_id = cpu_to_le16(ring->completion_ring); msg.doorbell = cpu_to_le16(ring->doorbell); msg.flags = cpu_to_le16(flags); msg.footer_size = ring->payload_size / 4; bcm4377->ring_state->xfer_ring_head[ring->ring_id] = 0; bcm4377->ring_state->xfer_ring_tail[ring->ring_id] = 0; ring->generation++; spin_unlock_irqrestore(&ring->lock, spinlock_flags); ret = bcm4377_enqueue(bcm4377, &bcm4377->control_h2d_ring, &msg, sizeof(msg), true); spin_lock_irqsave(&ring->lock, spinlock_flags); if (ring->d2h_buffers_only) { for (i = 0; i < ring->n_entries; ++i) { struct bcm4377_xfer_ring_entry *entry = ring->ring + i * sizeof(*entry); u16 raw_msgid = FIELD_PREP(BCM4377_MSGID_GENERATION, ring->generation); raw_msgid |= FIELD_PREP(BCM4377_MSGID_ID, i); memset(entry, 0, sizeof(*entry)); entry->id = cpu_to_le16(raw_msgid); entry->len = cpu_to_le16(ring->mapped_payload_size); entry->flags = BCM4377_XFER_RING_FLAG_PAYLOAD_MAPPED; entry->payload = cpu_to_le64(ring->payloads_dma + i * ring->mapped_payload_size); } } /* * send some messages if this is a device->host ring to allow the device * to reply by acknowledging them in the completion ring */ if (ring->virtual || ring->d2h_buffers_only) { bcm4377->ring_state->xfer_ring_head[ring->ring_id] = cpu_to_le16(0xf); bcm4377_ring_doorbell(bcm4377, ring->doorbell, 0xf); } ring->enabled = true; spin_unlock_irqrestore(&ring->lock, spinlock_flags); return ret; } static int bcm4377_destroy_transfer_ring(struct bcm4377_data *bcm4377, struct bcm4377_transfer_ring *ring) { struct bcm4377_destroy_transfer_ring_msg msg; int ret; memset(&msg, 0, sizeof(msg)); msg.msg_type = BCM4377_CONTROL_MSG_DESTROY_XFER_RING; msg.ring_id = cpu_to_le16(ring->ring_id); ret = bcm4377_enqueue(bcm4377, &bcm4377->control_h2d_ring, &msg, sizeof(msg), true); if (ret) dev_warn(&bcm4377->pdev->dev, "failed to destroy transfer ring %d\n", ring->ring_id); ring->enabled = false; return ret; } static int __bcm4378_send_calibration_chunk(struct bcm4377_data *bcm4377, const void *data, size_t data_len, u16 blocks_left) { struct bcm4378_hci_send_calibration_cmd cmd; struct sk_buff *skb; if (data_len > sizeof(cmd.data)) return -EINVAL; memset(&cmd, 0, sizeof(cmd)); cmd.unk = 0x03; cmd.blocks_left = cpu_to_le16(blocks_left); memcpy(cmd.data, data, data_len); skb = __hci_cmd_sync(bcm4377->hdev, 0xfd97, sizeof(cmd), &cmd, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) return PTR_ERR(skb); kfree_skb(skb); return 0; } static int __bcm4378_send_calibration(struct bcm4377_data *bcm4377, const void *data, size_t data_size) { int ret; size_t i, left, transfer_len; size_t blocks = DIV_ROUND_UP(data_size, (size_t)BCM4378_CALIBRATION_CHUNK_SIZE); if (!data) { dev_err(&bcm4377->pdev->dev, "no calibration data available.\n"); return -ENOENT; } for (i = 0, left = data_size; i < blocks; ++i, left -= transfer_len) { transfer_len = min_t(size_t, left, BCM4378_CALIBRATION_CHUNK_SIZE); ret = __bcm4378_send_calibration_chunk( bcm4377, data + i * BCM4378_CALIBRATION_CHUNK_SIZE, transfer_len, blocks - i - 1); if (ret) { dev_err(&bcm4377->pdev->dev, "send calibration chunk failed with %d\n", ret); return ret; } } return 0; } static int bcm4378_send_calibration(struct bcm4377_data *bcm4377) { if ((strcmp(bcm4377->stepping, "b1") == 0) || strcmp(bcm4377->stepping, "b3") == 0) return __bcm4378_send_calibration( bcm4377, bcm4377->taurus_beamforming_cal_blob, bcm4377->taurus_beamforming_cal_size); else return __bcm4378_send_calibration(bcm4377, bcm4377->taurus_cal_blob, bcm4377->taurus_cal_size); } static int bcm4387_send_calibration(struct bcm4377_data *bcm4377) { if (strcmp(bcm4377->stepping, "c2") == 0) return __bcm4378_send_calibration( bcm4377, bcm4377->taurus_beamforming_cal_blob, bcm4377->taurus_beamforming_cal_size); else return __bcm4378_send_calibration(bcm4377, bcm4377->taurus_cal_blob, bcm4377->taurus_cal_size); } static const struct firmware *bcm4377_request_blob(struct bcm4377_data *bcm4377, const char *suffix) { const struct firmware *fw; char name0[64], name1[64]; int ret; snprintf(name0, sizeof(name0), "brcm/brcmbt%04x%s-%s-%s.%s", bcm4377->hw->id, bcm4377->stepping, bcm4377->board_type, bcm4377->vendor, suffix); snprintf(name1, sizeof(name1), "brcm/brcmbt%04x%s-%s.%s", bcm4377->hw->id, bcm4377->stepping, bcm4377->board_type, suffix); dev_dbg(&bcm4377->pdev->dev, "Trying to load firmware: '%s' or '%s'\n", name0, name1); ret = firmware_request_nowarn(&fw, name0, &bcm4377->pdev->dev); if (!ret) return fw; ret = firmware_request_nowarn(&fw, name1, &bcm4377->pdev->dev); if (!ret) return fw; dev_err(&bcm4377->pdev->dev, "Unable to load firmware; tried '%s' and '%s'\n", name0, name1); return NULL; } static int bcm4377_send_ptb(struct bcm4377_data *bcm4377, const struct firmware *fw) { struct sk_buff *skb; skb = __hci_cmd_sync(bcm4377->hdev, 0xfd98, fw->size, fw->data, HCI_INIT_TIMEOUT); /* * This command seems to always fail on more recent firmware versions * (even in traces taken from the macOS driver). It's unclear why this * happens but because the PTB file contains calibration and/or * regulatory data and may be required on older firmware we still try to * send it here just in case and just ignore if it fails. */ if (!IS_ERR(skb)) kfree_skb(skb); return 0; } static int bcm4378_send_ptb_chunk(struct bcm4377_data *bcm4377, const void *data, size_t data_len, u16 blocks_left) { struct bcm4378_hci_send_ptb_cmd cmd; struct sk_buff *skb; if (data_len > BCM4378_PTB_CHUNK_SIZE) return -EINVAL; memset(&cmd, 0, sizeof(cmd)); cmd.blocks_left = cpu_to_le16(blocks_left); memcpy(cmd.data, data, data_len); skb = __hci_cmd_sync(bcm4377->hdev, 0xfe0d, sizeof(cmd), &cmd, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) return PTR_ERR(skb); kfree_skb(skb); return 0; } static int bcm4378_send_ptb(struct bcm4377_data *bcm4377, const struct firmware *fw) { size_t chunks = DIV_ROUND_UP(fw->size, (size_t)BCM4378_PTB_CHUNK_SIZE); size_t i, left, transfer_len; int ret; for (i = 0, left = fw->size; i < chunks; ++i, left -= transfer_len) { transfer_len = min_t(size_t, left, BCM4378_PTB_CHUNK_SIZE); dev_dbg(&bcm4377->pdev->dev, "sending ptb chunk %zu/%zu\n", i + 1, chunks); ret = bcm4378_send_ptb_chunk( bcm4377, fw->data + i * BCM4378_PTB_CHUNK_SIZE, transfer_len, chunks - i - 1); if (ret) { dev_err(&bcm4377->pdev->dev, "sending ptb chunk %zu failed (%d)", i, ret); return ret; } } return 0; } static int bcm4377_hci_open(struct hci_dev *hdev) { struct bcm4377_data *bcm4377 = hci_get_drvdata(hdev); int ret; dev_dbg(&bcm4377->pdev->dev, "creating rings\n"); ret = bcm4377_create_completion_ring(bcm4377, &bcm4377->hci_acl_ack_ring); if (ret) return ret; ret = bcm4377_create_completion_ring(bcm4377, &bcm4377->hci_acl_event_ring); if (ret) goto destroy_hci_acl_ack; ret = bcm4377_create_completion_ring(bcm4377, &bcm4377->sco_ack_ring); if (ret) goto destroy_hci_acl_event; ret = bcm4377_create_completion_ring(bcm4377, &bcm4377->sco_event_ring); if (ret) goto destroy_sco_ack; dev_dbg(&bcm4377->pdev->dev, "all completion rings successfully created!\n"); ret = bcm4377_create_transfer_ring(bcm4377, &bcm4377->hci_h2d_ring); if (ret) goto destroy_sco_event; ret = bcm4377_create_transfer_ring(bcm4377, &bcm4377->hci_d2h_ring); if (ret) goto destroy_hci_h2d; ret = bcm4377_create_transfer_ring(bcm4377, &bcm4377->sco_h2d_ring); if (ret) goto destroy_hci_d2h; ret = bcm4377_create_transfer_ring(bcm4377, &bcm4377->sco_d2h_ring); if (ret) goto destroy_sco_h2d; ret = bcm4377_create_transfer_ring(bcm4377, &bcm4377->acl_h2d_ring); if (ret) goto destroy_sco_d2h; ret = bcm4377_create_transfer_ring(bcm4377, &bcm4377->acl_d2h_ring); if (ret) goto destroy_acl_h2d; dev_dbg(&bcm4377->pdev->dev, "all transfer rings successfully created!\n"); return 0; destroy_acl_h2d: bcm4377_destroy_transfer_ring(bcm4377, &bcm4377->acl_h2d_ring); destroy_sco_d2h: bcm4377_destroy_transfer_ring(bcm4377, &bcm4377->sco_d2h_ring); destroy_sco_h2d: bcm4377_destroy_transfer_ring(bcm4377, &bcm4377->sco_h2d_ring); destroy_hci_d2h: bcm4377_destroy_transfer_ring(bcm4377, &bcm4377->hci_h2d_ring); destroy_hci_h2d: bcm4377_destroy_transfer_ring(bcm4377, &bcm4377->hci_d2h_ring); destroy_sco_event: bcm4377_destroy_completion_ring(bcm4377, &bcm4377->sco_event_ring); destroy_sco_ack: bcm4377_destroy_completion_ring(bcm4377, &bcm4377->sco_ack_ring); destroy_hci_acl_event: bcm4377_destroy_completion_ring(bcm4377, &bcm4377->hci_acl_event_ring); destroy_hci_acl_ack: bcm4377_destroy_completion_ring(bcm4377, &bcm4377->hci_acl_ack_ring); dev_err(&bcm4377->pdev->dev, "Creating rings failed with %d\n", ret); return ret; } static int bcm4377_hci_close(struct hci_dev *hdev) { struct bcm4377_data *bcm4377 = hci_get_drvdata(hdev); dev_dbg(&bcm4377->pdev->dev, "destroying rings in hci_close\n"); bcm4377_destroy_transfer_ring(bcm4377, &bcm4377->acl_d2h_ring); bcm4377_destroy_transfer_ring(bcm4377, &bcm4377->acl_h2d_ring); bcm4377_destroy_transfer_ring(bcm4377, &bcm4377->sco_d2h_ring); bcm4377_destroy_transfer_ring(bcm4377, &bcm4377->sco_h2d_ring); bcm4377_destroy_transfer_ring(bcm4377, &bcm4377->hci_d2h_ring); bcm4377_destroy_transfer_ring(bcm4377, &bcm4377->hci_h2d_ring); bcm4377_destroy_completion_ring(bcm4377, &bcm4377->sco_event_ring); bcm4377_destroy_completion_ring(bcm4377, &bcm4377->sco_ack_ring); bcm4377_destroy_completion_ring(bcm4377, &bcm4377->hci_acl_event_ring); bcm4377_destroy_completion_ring(bcm4377, &bcm4377->hci_acl_ack_ring); return 0; } static bool bcm4377_is_valid_bdaddr(struct bcm4377_data *bcm4377, bdaddr_t *addr) { if (addr->b[0] != 0x93) return true; if (addr->b[1] != 0x76) return true; if (addr->b[2] != 0x00) return true; if (addr->b[4] != (bcm4377->hw->id & 0xff)) return true; if (addr->b[5] != (bcm4377->hw->id >> 8)) return true; return false; } static int bcm4377_check_bdaddr(struct bcm4377_data *bcm4377) { struct hci_rp_read_bd_addr *bda; struct sk_buff *skb; skb = __hci_cmd_sync(bcm4377->hdev, HCI_OP_READ_BD_ADDR, 0, NULL, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { int err = PTR_ERR(skb); dev_err(&bcm4377->pdev->dev, "HCI_OP_READ_BD_ADDR failed (%d)", err); return err; } if (skb->len != sizeof(*bda)) { dev_err(&bcm4377->pdev->dev, "HCI_OP_READ_BD_ADDR reply length invalid"); kfree_skb(skb); return -EIO; } bda = (struct hci_rp_read_bd_addr *)skb->data; if (!bcm4377_is_valid_bdaddr(bcm4377, &bda->bdaddr)) set_bit(HCI_QUIRK_INVALID_BDADDR, &bcm4377->hdev->quirks); kfree_skb(skb); return 0; } static int bcm4377_hci_setup(struct hci_dev *hdev) { struct bcm4377_data *bcm4377 = hci_get_drvdata(hdev); const struct firmware *fw; int ret; if (bcm4377->hw->send_calibration) { ret = bcm4377->hw->send_calibration(bcm4377); if (ret) return ret; } fw = bcm4377_request_blob(bcm4377, "ptb"); if (!fw) { dev_err(&bcm4377->pdev->dev, "failed to load PTB data"); return -ENOENT; } ret = bcm4377->hw->send_ptb(bcm4377, fw); release_firmware(fw); if (ret) return ret; return bcm4377_check_bdaddr(bcm4377); } static int bcm4377_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) { struct bcm4377_data *bcm4377 = hci_get_drvdata(hdev); struct bcm4377_transfer_ring *ring; int ret; switch (hci_skb_pkt_type(skb)) { case HCI_COMMAND_PKT: hdev->stat.cmd_tx++; ring = &bcm4377->hci_h2d_ring; break; case HCI_ACLDATA_PKT: hdev->stat.acl_tx++; ring = &bcm4377->acl_h2d_ring; break; case HCI_SCODATA_PKT: hdev->stat.sco_tx++; ring = &bcm4377->sco_h2d_ring; break; default: return -EILSEQ; } ret = bcm4377_enqueue(bcm4377, ring, skb->data, skb->len, false); if (ret < 0) { hdev->stat.err_tx++; return ret; } hdev->stat.byte_tx += skb->len; kfree_skb(skb); return ret; } static int bcm4377_hci_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr) { struct bcm4377_data *bcm4377 = hci_get_drvdata(hdev); struct sk_buff *skb; int err; skb = __hci_cmd_sync(hdev, 0xfc01, 6, bdaddr, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { err = PTR_ERR(skb); dev_err(&bcm4377->pdev->dev, "Change address command failed (%d)", err); return err; } kfree_skb(skb); return 0; } static int bcm4377_alloc_transfer_ring(struct bcm4377_data *bcm4377, struct bcm4377_transfer_ring *ring) { size_t entry_size; spin_lock_init(&ring->lock); ring->payload_size = ALIGN(ring->payload_size, 4); ring->mapped_payload_size = ALIGN(ring->mapped_payload_size, 4); if (ring->payload_size > BCM4377_XFER_RING_MAX_INPLACE_PAYLOAD_SIZE) return -EINVAL; if (ring->n_entries > BCM4377_MAX_RING_SIZE) return -EINVAL; if (ring->virtual && ring->allow_wait) return -EINVAL; if (ring->d2h_buffers_only) { if (ring->virtual) return -EINVAL; if (ring->payload_size) return -EINVAL; if (!ring->mapped_payload_size) return -EINVAL; } if (ring->virtual) return 0; entry_size = ring->payload_size + sizeof(struct bcm4377_xfer_ring_entry); ring->ring = dmam_alloc_coherent(&bcm4377->pdev->dev, ring->n_entries * entry_size, &ring->ring_dma, GFP_KERNEL); if (!ring->ring) return -ENOMEM; if (ring->allow_wait) { ring->events = devm_kcalloc(&bcm4377->pdev->dev, ring->n_entries, sizeof(*ring->events), GFP_KERNEL); if (!ring->events) return -ENOMEM; } if (ring->mapped_payload_size) { ring->payloads = dmam_alloc_coherent( &bcm4377->pdev->dev, ring->n_entries * ring->mapped_payload_size, &ring->payloads_dma, GFP_KERNEL); if (!ring->payloads) return -ENOMEM; } return 0; } static int bcm4377_alloc_completion_ring(struct bcm4377_data *bcm4377, struct bcm4377_completion_ring *ring) { size_t entry_size; ring->payload_size = ALIGN(ring->payload_size, 4); if (ring->payload_size > BCM4377_XFER_RING_MAX_INPLACE_PAYLOAD_SIZE) return -EINVAL; if (ring->n_entries > BCM4377_MAX_RING_SIZE) return -EINVAL; entry_size = ring->payload_size + sizeof(struct bcm4377_completion_ring_entry); ring->ring = dmam_alloc_coherent(&bcm4377->pdev->dev, ring->n_entries * entry_size, &ring->ring_dma, GFP_KERNEL); if (!ring->ring) return -ENOMEM; return 0; } static int bcm4377_init_context(struct bcm4377_data *bcm4377) { struct device *dev = &bcm4377->pdev->dev; dma_addr_t peripheral_info_dma; bcm4377->ctx = dmam_alloc_coherent(dev, sizeof(*bcm4377->ctx), &bcm4377->ctx_dma, GFP_KERNEL); if (!bcm4377->ctx) return -ENOMEM; memset(bcm4377->ctx, 0, sizeof(*bcm4377->ctx)); bcm4377->ring_state = dmam_alloc_coherent(dev, sizeof(*bcm4377->ring_state), &bcm4377->ring_state_dma, GFP_KERNEL); if (!bcm4377->ring_state) return -ENOMEM; memset(bcm4377->ring_state, 0, sizeof(*bcm4377->ring_state)); bcm4377->ctx->version = cpu_to_le16(1); bcm4377->ctx->size = cpu_to_le16(sizeof(*bcm4377->ctx)); bcm4377->ctx->enabled_caps = cpu_to_le32(2); /* * The BT device will write 0x20 bytes of data to this buffer but * the exact contents are unknown. It only needs to exist for BT * to work such that we can just allocate and then ignore it. */ if (!dmam_alloc_coherent(&bcm4377->pdev->dev, 0x20, &peripheral_info_dma, GFP_KERNEL)) return -ENOMEM; bcm4377->ctx->peripheral_info_addr = cpu_to_le64(peripheral_info_dma); bcm4377->ctx->xfer_ring_heads_addr = cpu_to_le64( bcm4377->ring_state_dma + offsetof(struct bcm4377_ring_state, xfer_ring_head)); bcm4377->ctx->xfer_ring_tails_addr = cpu_to_le64( bcm4377->ring_state_dma + offsetof(struct bcm4377_ring_state, xfer_ring_tail)); bcm4377->ctx->completion_ring_heads_addr = cpu_to_le64( bcm4377->ring_state_dma + offsetof(struct bcm4377_ring_state, completion_ring_head)); bcm4377->ctx->completion_ring_tails_addr = cpu_to_le64( bcm4377->ring_state_dma + offsetof(struct bcm4377_ring_state, completion_ring_tail)); bcm4377->ctx->n_completion_rings = cpu_to_le16(BCM4377_N_COMPLETION_RINGS); bcm4377->ctx->n_xfer_rings = cpu_to_le16(BCM4377_N_TRANSFER_RINGS); bcm4377->ctx->control_completion_ring_addr = cpu_to_le64(bcm4377->control_ack_ring.ring_dma); bcm4377->ctx->control_completion_ring_n_entries = cpu_to_le16(bcm4377->control_ack_ring.n_entries); bcm4377->ctx->control_completion_ring_doorbell = cpu_to_le16(0xffff); bcm4377->ctx->control_completion_ring_msi = 0; bcm4377->ctx->control_completion_ring_header_size = 0; bcm4377->ctx->control_completion_ring_footer_size = 0; bcm4377->ctx->control_xfer_ring_addr = cpu_to_le64(bcm4377->control_h2d_ring.ring_dma); bcm4377->ctx->control_xfer_ring_n_entries = cpu_to_le16(bcm4377->control_h2d_ring.n_entries); bcm4377->ctx->control_xfer_ring_doorbell = cpu_to_le16(bcm4377->control_h2d_ring.doorbell); bcm4377->ctx->control_xfer_ring_msi = 0; bcm4377->ctx->control_xfer_ring_header_size = 0; bcm4377->ctx->control_xfer_ring_footer_size = bcm4377->control_h2d_ring.payload_size / 4; dev_dbg(&bcm4377->pdev->dev, "context initialized at IOVA %pad", &bcm4377->ctx_dma); return 0; } static int bcm4377_prepare_rings(struct bcm4377_data *bcm4377) { int ret; /* * Even though many of these settings appear to be configurable * when sending the "create ring" messages most of these are * actually hardcoded in some (and quite possibly all) firmware versions * and changing them on the host has no effect. * Specifically, this applies to at least the doorbells, the transfer * and completion ring ids and their mapping (e.g. both HCI and ACL * entries will always be queued in completion rings 1 and 2 no matter * what we configure here). */ bcm4377->control_ack_ring.ring_id = BCM4377_ACK_RING_CONTROL; bcm4377->control_ack_ring.n_entries = 32; bcm4377->control_ack_ring.transfer_rings = BIT(BCM4377_XFER_RING_CONTROL); bcm4377->hci_acl_ack_ring.ring_id = BCM4377_ACK_RING_HCI_ACL; bcm4377->hci_acl_ack_ring.n_entries = 2 * BCM4377_RING_N_ENTRIES; bcm4377->hci_acl_ack_ring.transfer_rings = BIT(BCM4377_XFER_RING_HCI_H2D) | BIT(BCM4377_XFER_RING_ACL_H2D); bcm4377->hci_acl_ack_ring.delay = 1000; /* * A payload size of MAX_EVENT_PAYLOAD_SIZE is enough here since large * ACL packets will be transmitted inside buffers mapped via * acl_d2h_ring anyway. */ bcm4377->hci_acl_event_ring.ring_id = BCM4377_EVENT_RING_HCI_ACL; bcm4377->hci_acl_event_ring.payload_size = MAX_EVENT_PAYLOAD_SIZE; bcm4377->hci_acl_event_ring.n_entries = 2 * BCM4377_RING_N_ENTRIES; bcm4377->hci_acl_event_ring.transfer_rings = BIT(BCM4377_XFER_RING_HCI_D2H) | BIT(BCM4377_XFER_RING_ACL_D2H); bcm4377->hci_acl_event_ring.delay = 1000; bcm4377->sco_ack_ring.ring_id = BCM4377_ACK_RING_SCO; bcm4377->sco_ack_ring.n_entries = BCM4377_RING_N_ENTRIES; bcm4377->sco_ack_ring.transfer_rings = BIT(BCM4377_XFER_RING_SCO_H2D); bcm4377->sco_event_ring.ring_id = BCM4377_EVENT_RING_SCO; bcm4377->sco_event_ring.payload_size = MAX_SCO_PAYLOAD_SIZE; bcm4377->sco_event_ring.n_entries = BCM4377_RING_N_ENTRIES; bcm4377->sco_event_ring.transfer_rings = BIT(BCM4377_XFER_RING_SCO_D2H); bcm4377->control_h2d_ring.ring_id = BCM4377_XFER_RING_CONTROL; bcm4377->control_h2d_ring.doorbell = BCM4377_DOORBELL_CONTROL; bcm4377->control_h2d_ring.payload_size = BCM4377_CONTROL_MSG_SIZE; bcm4377->control_h2d_ring.completion_ring = BCM4377_ACK_RING_CONTROL; bcm4377->control_h2d_ring.allow_wait = true; bcm4377->control_h2d_ring.n_entries = BCM4377_RING_N_ENTRIES; bcm4377->hci_h2d_ring.ring_id = BCM4377_XFER_RING_HCI_H2D; bcm4377->hci_h2d_ring.doorbell = BCM4377_DOORBELL_HCI_H2D; bcm4377->hci_h2d_ring.payload_size = MAX_EVENT_PAYLOAD_SIZE; bcm4377->hci_h2d_ring.completion_ring = BCM4377_ACK_RING_HCI_ACL; bcm4377->hci_h2d_ring.n_entries = BCM4377_RING_N_ENTRIES; bcm4377->hci_d2h_ring.ring_id = BCM4377_XFER_RING_HCI_D2H; bcm4377->hci_d2h_ring.doorbell = BCM4377_DOORBELL_HCI_D2H; bcm4377->hci_d2h_ring.completion_ring = BCM4377_EVENT_RING_HCI_ACL; bcm4377->hci_d2h_ring.virtual = true; bcm4377->hci_d2h_ring.n_entries = BCM4377_RING_N_ENTRIES; bcm4377->sco_h2d_ring.ring_id = BCM4377_XFER_RING_SCO_H2D; bcm4377->sco_h2d_ring.doorbell = BCM4377_DOORBELL_SCO; bcm4377->sco_h2d_ring.payload_size = MAX_SCO_PAYLOAD_SIZE; bcm4377->sco_h2d_ring.completion_ring = BCM4377_ACK_RING_SCO; bcm4377->sco_h2d_ring.sync = true; bcm4377->sco_h2d_ring.n_entries = BCM4377_RING_N_ENTRIES; bcm4377->sco_d2h_ring.ring_id = BCM4377_XFER_RING_SCO_D2H; bcm4377->sco_d2h_ring.doorbell = BCM4377_DOORBELL_SCO; bcm4377->sco_d2h_ring.completion_ring = BCM4377_EVENT_RING_SCO; bcm4377->sco_d2h_ring.virtual = true; bcm4377->sco_d2h_ring.sync = true; bcm4377->sco_d2h_ring.n_entries = BCM4377_RING_N_ENTRIES; /* * This ring has to use mapped_payload_size because the largest ACL * packet doesn't fit inside the largest possible footer */ bcm4377->acl_h2d_ring.ring_id = BCM4377_XFER_RING_ACL_H2D; bcm4377->acl_h2d_ring.doorbell = BCM4377_DOORBELL_ACL_H2D; bcm4377->acl_h2d_ring.mapped_payload_size = MAX_ACL_PAYLOAD_SIZE; bcm4377->acl_h2d_ring.completion_ring = BCM4377_ACK_RING_HCI_ACL; bcm4377->acl_h2d_ring.n_entries = BCM4377_RING_N_ENTRIES; /* * This ring only contains empty buffers to be used by incoming * ACL packets that do not fit inside the footer of hci_acl_event_ring */ bcm4377->acl_d2h_ring.ring_id = BCM4377_XFER_RING_ACL_D2H; bcm4377->acl_d2h_ring.doorbell = BCM4377_DOORBELL_ACL_D2H; bcm4377->acl_d2h_ring.completion_ring = BCM4377_EVENT_RING_HCI_ACL; bcm4377->acl_d2h_ring.d2h_buffers_only = true; bcm4377->acl_d2h_ring.mapped_payload_size = MAX_ACL_PAYLOAD_SIZE; bcm4377->acl_d2h_ring.n_entries = BCM4377_RING_N_ENTRIES; /* * no need for any cleanup since this is only called from _probe * and only devres-managed allocations are used */ ret = bcm4377_alloc_transfer_ring(bcm4377, &bcm4377->control_h2d_ring); if (ret) return ret; ret = bcm4377_alloc_transfer_ring(bcm4377, &bcm4377->hci_h2d_ring); if (ret) return ret; ret = bcm4377_alloc_transfer_ring(bcm4377, &bcm4377->hci_d2h_ring); if (ret) return ret; ret = bcm4377_alloc_transfer_ring(bcm4377, &bcm4377->sco_h2d_ring); if (ret) return ret; ret = bcm4377_alloc_transfer_ring(bcm4377, &bcm4377->sco_d2h_ring); if (ret) return ret; ret = bcm4377_alloc_transfer_ring(bcm4377, &bcm4377->acl_h2d_ring); if (ret) return ret; ret = bcm4377_alloc_transfer_ring(bcm4377, &bcm4377->acl_d2h_ring); if (ret) return ret; ret = bcm4377_alloc_completion_ring(bcm4377, &bcm4377->control_ack_ring); if (ret) return ret; ret = bcm4377_alloc_completion_ring(bcm4377, &bcm4377->hci_acl_ack_ring); if (ret) return ret; ret = bcm4377_alloc_completion_ring(bcm4377, &bcm4377->hci_acl_event_ring); if (ret) return ret; ret = bcm4377_alloc_completion_ring(bcm4377, &bcm4377->sco_ack_ring); if (ret) return ret; ret = bcm4377_alloc_completion_ring(bcm4377, &bcm4377->sco_event_ring); if (ret) return ret; dev_dbg(&bcm4377->pdev->dev, "all rings allocated and prepared\n"); return 0; } static int bcm4377_boot(struct bcm4377_data *bcm4377) { const struct firmware *fw; void *bfr; dma_addr_t fw_dma; int ret = 0; u32 bootstage, rti_status; bootstage = ioread32(bcm4377->bar2 + BCM4377_BAR2_BOOTSTAGE); rti_status = ioread32(bcm4377->bar2 + BCM4377_BAR2_RTI_STATUS); if (bootstage != 0) { dev_err(&bcm4377->pdev->dev, "bootstage is %d and not 0\n", bootstage); return -EINVAL; } if (rti_status != 0) { dev_err(&bcm4377->pdev->dev, "RTI status is %d and not 0\n", rti_status); return -EINVAL; } fw = bcm4377_request_blob(bcm4377, "bin"); if (!fw) { dev_err(&bcm4377->pdev->dev, "Failed to load firmware\n"); return -ENOENT; } bfr = dma_alloc_coherent(&bcm4377->pdev->dev, fw->size, &fw_dma, GFP_KERNEL); if (!bfr) { ret = -ENOMEM; goto out_release_fw; } memcpy(bfr, fw->data, fw->size); iowrite32(0, bcm4377->bar0 + BCM4377_BAR0_HOST_WINDOW_LO); iowrite32(0, bcm4377->bar0 + BCM4377_BAR0_HOST_WINDOW_HI); iowrite32(BCM4377_DMA_MASK, bcm4377->bar0 + BCM4377_BAR0_HOST_WINDOW_SIZE); iowrite32(lower_32_bits(fw_dma), bcm4377->bar2 + BCM4377_BAR2_FW_LO); iowrite32(upper_32_bits(fw_dma), bcm4377->bar2 + BCM4377_BAR2_FW_HI); iowrite32(fw->size, bcm4377->bar2 + BCM4377_BAR2_FW_SIZE); iowrite32(0, bcm4377->bar0 + BCM4377_BAR0_FW_DOORBELL); dev_dbg(&bcm4377->pdev->dev, "waiting for firmware to boot\n"); ret = wait_for_completion_interruptible_timeout(&bcm4377->event, BCM4377_TIMEOUT); if (ret == 0) { ret = -ETIMEDOUT; goto out_dma_free; } else if (ret < 0) { goto out_dma_free; } if (bcm4377->bootstage != 2) { dev_err(&bcm4377->pdev->dev, "boostage %d != 2\n", bcm4377->bootstage); ret = -ENXIO; goto out_dma_free; } dev_dbg(&bcm4377->pdev->dev, "firmware has booted (stage = %x)\n", bcm4377->bootstage); ret = 0; out_dma_free: dma_free_coherent(&bcm4377->pdev->dev, fw->size, bfr, fw_dma); out_release_fw: release_firmware(fw); return ret; } static int bcm4377_setup_rti(struct bcm4377_data *bcm4377) { int ret; dev_dbg(&bcm4377->pdev->dev, "starting RTI\n"); iowrite32(1, bcm4377->bar0 + BCM4377_BAR0_RTI_CONTROL); ret = wait_for_completion_interruptible_timeout(&bcm4377->event, BCM4377_TIMEOUT); if (ret == 0) { dev_err(&bcm4377->pdev->dev, "timed out while waiting for RTI to transition to state 1"); return -ETIMEDOUT; } else if (ret < 0) { return ret; } if (bcm4377->rti_status != 1) { dev_err(&bcm4377->pdev->dev, "RTI did not ack state 1 (%d)\n", bcm4377->rti_status); return -ENODEV; } dev_dbg(&bcm4377->pdev->dev, "RTI is in state 1\n"); /* allow access to the entire IOVA space again */ iowrite32(0, bcm4377->bar2 + BCM4377_BAR2_RTI_WINDOW_LO); iowrite32(0, bcm4377->bar2 + BCM4377_BAR2_RTI_WINDOW_HI); iowrite32(BCM4377_DMA_MASK, bcm4377->bar2 + BCM4377_BAR2_RTI_WINDOW_SIZE); /* setup "Converged IPC" context */ iowrite32(lower_32_bits(bcm4377->ctx_dma), bcm4377->bar2 + BCM4377_BAR2_CONTEXT_ADDR_LO); iowrite32(upper_32_bits(bcm4377->ctx_dma), bcm4377->bar2 + BCM4377_BAR2_CONTEXT_ADDR_HI); iowrite32(2, bcm4377->bar0 + BCM4377_BAR0_RTI_CONTROL); ret = wait_for_completion_interruptible_timeout(&bcm4377->event, BCM4377_TIMEOUT); if (ret == 0) { dev_err(&bcm4377->pdev->dev, "timed out while waiting for RTI to transition to state 2"); return -ETIMEDOUT; } else if (ret < 0) { return ret; } if (bcm4377->rti_status != 2) { dev_err(&bcm4377->pdev->dev, "RTI did not ack state 2 (%d)\n", bcm4377->rti_status); return -ENODEV; } dev_dbg(&bcm4377->pdev->dev, "RTI is in state 2; control ring is ready\n"); bcm4377->control_ack_ring.enabled = true; return 0; } static int bcm4377_parse_otp_board_params(struct bcm4377_data *bcm4377, char tag, const char *val, size_t len) { if (tag != 'V') return 0; if (len >= sizeof(bcm4377->vendor)) return -EINVAL; strscpy(bcm4377->vendor, val, len + 1); return 0; } static int bcm4377_parse_otp_chip_params(struct bcm4377_data *bcm4377, char tag, const char *val, size_t len) { size_t idx = 0; if (tag != 's') return 0; if (len >= sizeof(bcm4377->stepping)) return -EINVAL; while (len != 0) { bcm4377->stepping[idx] = tolower(val[idx]); if (val[idx] == '\0') return 0; idx++; len--; } bcm4377->stepping[idx] = '\0'; return 0; } static int bcm4377_parse_otp_str(struct bcm4377_data *bcm4377, const u8 *str, enum bcm4377_otp_params_type type) { const char *p; int ret; p = skip_spaces(str); while (*p) { char tag = *p++; const char *end; size_t len; if (*p++ != '=') /* implicit NUL check */ return -EINVAL; /* *p might be NUL here, if so end == p and len == 0 */ end = strchrnul(p, ' '); len = end - p; /* leave 1 byte for NUL in destination string */ if (len > (BCM4377_OTP_MAX_PARAM_LEN - 1)) return -EINVAL; switch (type) { case BCM4377_OTP_BOARD_PARAMS: ret = bcm4377_parse_otp_board_params(bcm4377, tag, p, len); break; case BCM4377_OTP_CHIP_PARAMS: ret = bcm4377_parse_otp_chip_params(bcm4377, tag, p, len); break; default: ret = -EINVAL; break; } if (ret) return ret; /* Skip to next arg, if any */ p = skip_spaces(end); } return 0; } static int bcm4377_parse_otp_sys_vendor(struct bcm4377_data *bcm4377, u8 *otp, size_t size) { int idx = 4; const char *chip_params; const char *board_params; int ret; /* 4-byte header and two empty strings */ if (size < 6) return -EINVAL; if (get_unaligned_le32(otp) != BCM4377_OTP_VENDOR_HDR) return -EINVAL; chip_params = &otp[idx]; /* Skip first string, including terminator */ idx += strnlen(chip_params, size - idx) + 1; if (idx >= size) return -EINVAL; board_params = &otp[idx]; /* Skip to terminator of second string */ idx += strnlen(board_params, size - idx); if (idx >= size) return -EINVAL; /* At this point both strings are guaranteed NUL-terminated */ dev_dbg(&bcm4377->pdev->dev, "OTP: chip_params='%s' board_params='%s'\n", chip_params, board_params); ret = bcm4377_parse_otp_str(bcm4377, chip_params, BCM4377_OTP_CHIP_PARAMS); if (ret) return ret; ret = bcm4377_parse_otp_str(bcm4377, board_params, BCM4377_OTP_BOARD_PARAMS); if (ret) return ret; if (!bcm4377->stepping[0] || !bcm4377->vendor[0]) return -EINVAL; dev_dbg(&bcm4377->pdev->dev, "OTP: stepping=%s, vendor=%s\n", bcm4377->stepping, bcm4377->vendor); return 0; } static int bcm4377_parse_otp(struct bcm4377_data *bcm4377) { u8 *otp; int i; int ret = -ENOENT; otp = kzalloc(BCM4377_OTP_SIZE, GFP_KERNEL); if (!otp) return -ENOMEM; for (i = 0; i < BCM4377_OTP_SIZE; ++i) otp[i] = ioread8(bcm4377->bar0 + bcm4377->hw->otp_offset + i); i = 0; while (i < (BCM4377_OTP_SIZE - 1)) { u8 type = otp[i]; u8 length = otp[i + 1]; if (type == 0) break; if ((i + 2 + length) > BCM4377_OTP_SIZE) break; switch (type) { case BCM4377_OTP_SYS_VENDOR: dev_dbg(&bcm4377->pdev->dev, "OTP @ 0x%x (%d): SYS_VENDOR", i, length); ret = bcm4377_parse_otp_sys_vendor(bcm4377, &otp[i + 2], length); break; case BCM4377_OTP_CIS: dev_dbg(&bcm4377->pdev->dev, "OTP @ 0x%x (%d): CIS", i, length); break; default: dev_dbg(&bcm4377->pdev->dev, "OTP @ 0x%x (%d): unknown", i, length); break; } i += 2 + length; } kfree(otp); return ret; } static int bcm4377_init_cfg(struct bcm4377_data *bcm4377) { int ret; u32 ctrl; ret = pci_write_config_dword(bcm4377->pdev, BCM4377_PCIECFG_BAR0_WINDOW1, bcm4377->hw->bar0_window1); if (ret) return ret; ret = pci_write_config_dword(bcm4377->pdev, BCM4377_PCIECFG_BAR0_WINDOW2, bcm4377->hw->bar0_window2); if (ret) return ret; ret = pci_write_config_dword( bcm4377->pdev, BCM4377_PCIECFG_BAR0_CORE2_WINDOW1, BCM4377_PCIECFG_BAR0_CORE2_WINDOW1_DEFAULT); if (ret) return ret; if (bcm4377->hw->has_bar0_core2_window2) { ret = pci_write_config_dword(bcm4377->pdev, BCM4377_PCIECFG_BAR0_CORE2_WINDOW2, bcm4377->hw->bar0_core2_window2); if (ret) return ret; } ret = pci_write_config_dword(bcm4377->pdev, BCM4377_PCIECFG_BAR2_WINDOW, BCM4377_PCIECFG_BAR2_WINDOW_DEFAULT); if (ret) return ret; ret = pci_read_config_dword(bcm4377->pdev, BCM4377_PCIECFG_SUBSYSTEM_CTRL, &ctrl); if (ret) return ret; if (bcm4377->hw->clear_pciecfg_subsystem_ctrl_bit19) ctrl &= ~BIT(19); ctrl |= BIT(16); return pci_write_config_dword(bcm4377->pdev, BCM4377_PCIECFG_SUBSYSTEM_CTRL, ctrl); } static int bcm4377_probe_dmi(struct bcm4377_data *bcm4377) { const struct dmi_system_id *board_type_dmi_id; board_type_dmi_id = dmi_first_match(bcm4377_dmi_board_table); if (board_type_dmi_id && board_type_dmi_id->driver_data) { bcm4377->board_type = board_type_dmi_id->driver_data; dev_dbg(&bcm4377->pdev->dev, "found board type via DMI match: %s\n", bcm4377->board_type); } return 0; } static int bcm4377_probe_of(struct bcm4377_data *bcm4377) { struct device_node *np = bcm4377->pdev->dev.of_node; int ret; if (!np) return 0; ret = of_property_read_string(np, "brcm,board-type", &bcm4377->board_type); if (ret) { dev_err(&bcm4377->pdev->dev, "no brcm,board-type property\n"); return ret; } bcm4377->taurus_beamforming_cal_blob = of_get_property(np, "brcm,taurus-bf-cal-blob", &bcm4377->taurus_beamforming_cal_size); if (!bcm4377->taurus_beamforming_cal_blob) { dev_err(&bcm4377->pdev->dev, "no brcm,taurus-bf-cal-blob property\n"); return -ENOENT; } bcm4377->taurus_cal_blob = of_get_property(np, "brcm,taurus-cal-blob", &bcm4377->taurus_cal_size); if (!bcm4377->taurus_cal_blob) { dev_err(&bcm4377->pdev->dev, "no brcm,taurus-cal-blob property\n"); return -ENOENT; } return 0; } static void bcm4377_disable_aspm(struct bcm4377_data *bcm4377) { pci_disable_link_state(bcm4377->pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1); /* * pci_disable_link_state can fail if either CONFIG_PCIEASPM is disabled * or if the BIOS hasn't handed over control to us. We must *always* * disable ASPM for this device due to hardware errata though. */ pcie_capability_clear_word(bcm4377->pdev, PCI_EXP_LNKCTL, PCI_EXP_LNKCTL_ASPMC); } static void bcm4377_pci_free_irq_vectors(void *data) { pci_free_irq_vectors(data); } static void bcm4377_hci_free_dev(void *data) { hci_free_dev(data); } static void bcm4377_hci_unregister_dev(void *data) { hci_unregister_dev(data); } static int bcm4377_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct bcm4377_data *bcm4377; struct hci_dev *hdev; int ret, irq; ret = dma_set_mask_and_coherent(&pdev->dev, BCM4377_DMA_MASK); if (ret) return ret; bcm4377 = devm_kzalloc(&pdev->dev, sizeof(*bcm4377), GFP_KERNEL); if (!bcm4377) return -ENOMEM; bcm4377->pdev = pdev; bcm4377->hw = &bcm4377_hw_variants[id->driver_data]; init_completion(&bcm4377->event); ret = bcm4377_prepare_rings(bcm4377); if (ret) return ret; ret = bcm4377_init_context(bcm4377); if (ret) return ret; ret = bcm4377_probe_dmi(bcm4377); if (ret) return ret; ret = bcm4377_probe_of(bcm4377); if (ret) return ret; if (!bcm4377->board_type) { dev_err(&pdev->dev, "unable to determine board type\n"); return -ENODEV; } if (bcm4377->hw->disable_aspm) bcm4377_disable_aspm(bcm4377); ret = pci_reset_function_locked(pdev); if (ret) dev_warn( &pdev->dev, "function level reset failed with %d; trying to continue anyway\n", ret); /* * If this number is too low and we try to access any BAR too * early the device will crash. Experiments have shown that * approximately 50 msec is the minimum amount we have to wait. * Let's double that to be safe. */ msleep(100); ret = pcim_enable_device(pdev); if (ret) return ret; pci_set_master(pdev); ret = bcm4377_init_cfg(bcm4377); if (ret) return ret; bcm4377->bar0 = pcim_iomap(pdev, 0, 0); if (!bcm4377->bar0) return -EBUSY; bcm4377->bar2 = pcim_iomap(pdev, 2, 0); if (!bcm4377->bar2) return -EBUSY; ret = bcm4377_parse_otp(bcm4377); if (ret) { dev_err(&pdev->dev, "Reading OTP failed with %d\n", ret); return ret; } /* * Legacy interrupts result in an IRQ storm because we don't know where * the interrupt mask and status registers for these chips are. * MSIs are acked automatically instead. */ ret = pci_alloc_irq_vectors(pdev, 1, 1, PCI_IRQ_MSI); if (ret < 0) return -ENODEV; ret = devm_add_action_or_reset(&pdev->dev, bcm4377_pci_free_irq_vectors, pdev); if (ret) return ret; irq = pci_irq_vector(pdev, 0); if (irq <= 0) return -ENODEV; ret = devm_request_irq(&pdev->dev, irq, bcm4377_irq, 0, "bcm4377", bcm4377); if (ret) return ret; hdev = hci_alloc_dev(); if (!hdev) return -ENOMEM; ret = devm_add_action_or_reset(&pdev->dev, bcm4377_hci_free_dev, hdev); if (ret) return ret; bcm4377->hdev = hdev; hdev->bus = HCI_PCI; hdev->dev_type = HCI_PRIMARY; hdev->open = bcm4377_hci_open; hdev->close = bcm4377_hci_close; hdev->send = bcm4377_hci_send_frame; hdev->set_bdaddr = bcm4377_hci_set_bdaddr; hdev->setup = bcm4377_hci_setup; set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks); if (bcm4377->hw->broken_mws_transport_config) set_bit(HCI_QUIRK_BROKEN_MWS_TRANSPORT_CONFIG, &hdev->quirks); if (bcm4377->hw->broken_ext_scan) set_bit(HCI_QUIRK_BROKEN_EXT_SCAN, &hdev->quirks); pci_set_drvdata(pdev, bcm4377); hci_set_drvdata(hdev, bcm4377); SET_HCIDEV_DEV(hdev, &pdev->dev); ret = bcm4377_boot(bcm4377); if (ret) return ret; ret = bcm4377_setup_rti(bcm4377); if (ret) return ret; ret = hci_register_dev(hdev); if (ret) return ret; return devm_add_action_or_reset(&pdev->dev, bcm4377_hci_unregister_dev, hdev); } static int bcm4377_suspend(struct pci_dev *pdev, pm_message_t state) { struct bcm4377_data *bcm4377 = pci_get_drvdata(pdev); int ret; ret = hci_suspend_dev(bcm4377->hdev); if (ret) return ret; iowrite32(BCM4377_BAR0_SLEEP_CONTROL_QUIESCE, bcm4377->bar0 + BCM4377_BAR0_SLEEP_CONTROL); return 0; } static int bcm4377_resume(struct pci_dev *pdev) { struct bcm4377_data *bcm4377 = pci_get_drvdata(pdev); iowrite32(BCM4377_BAR0_SLEEP_CONTROL_UNQUIESCE, bcm4377->bar0 + BCM4377_BAR0_SLEEP_CONTROL); return hci_resume_dev(bcm4377->hdev); } static const struct dmi_system_id bcm4377_dmi_board_table[] = { { .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Apple Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir9,1"), }, .driver_data = "apple,formosa", }, { .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Apple Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro15,4"), }, .driver_data = "apple,formosa", }, { .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Apple Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro16,3"), }, .driver_data = "apple,formosa", }, {} }; static const struct bcm4377_hw bcm4377_hw_variants[] = { [BCM4377] = { .id = 0x4377, .otp_offset = 0x4120, .bar0_window1 = 0x1800b000, .bar0_window2 = 0x1810c000, .disable_aspm = true, .broken_ext_scan = true, .send_ptb = bcm4377_send_ptb, }, [BCM4378] = { .id = 0x4378, .otp_offset = 0x4120, .bar0_window1 = 0x18002000, .bar0_window2 = 0x1810a000, .bar0_core2_window2 = 0x18107000, .has_bar0_core2_window2 = true, .broken_mws_transport_config = true, .send_calibration = bcm4378_send_calibration, .send_ptb = bcm4378_send_ptb, }, [BCM4387] = { .id = 0x4387, .otp_offset = 0x413c, .bar0_window1 = 0x18002000, .bar0_window2 = 0x18109000, .bar0_core2_window2 = 0x18106000, .has_bar0_core2_window2 = true, .clear_pciecfg_subsystem_ctrl_bit19 = true, .broken_mws_transport_config = true, .send_calibration = bcm4387_send_calibration, .send_ptb = bcm4378_send_ptb, }, }; #define BCM4377_DEVID_ENTRY(id) \ { \ PCI_VENDOR_ID_BROADCOM, BCM##id##_DEVICE_ID, PCI_ANY_ID, \ PCI_ANY_ID, PCI_CLASS_NETWORK_OTHER << 8, 0xffff00, \ BCM##id \ } static const struct pci_device_id bcm4377_devid_table[] = { BCM4377_DEVID_ENTRY(4377), BCM4377_DEVID_ENTRY(4378), BCM4377_DEVID_ENTRY(4387), {}, }; MODULE_DEVICE_TABLE(pci, bcm4377_devid_table); static struct pci_driver bcm4377_pci_driver = { .name = "hci_bcm4377", .id_table = bcm4377_devid_table, .probe = bcm4377_probe, .suspend = bcm4377_suspend, .resume = bcm4377_resume, }; module_pci_driver(bcm4377_pci_driver); MODULE_AUTHOR("Sven Peter <[email protected]>"); MODULE_DESCRIPTION("Bluetooth support for Broadcom 4377/4378/4387 devices"); MODULE_LICENSE("Dual MIT/GPL"); MODULE_FIRMWARE("brcm/brcmbt4377*.bin"); MODULE_FIRMWARE("brcm/brcmbt4377*.ptb"); MODULE_FIRMWARE("brcm/brcmbt4378*.bin"); MODULE_FIRMWARE("brcm/brcmbt4378*.ptb"); MODULE_FIRMWARE("brcm/brcmbt4387*.bin"); MODULE_FIRMWARE("brcm/brcmbt4387*.ptb");
linux-master
drivers/bluetooth/hci_bcm4377.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * * AVM BlueFRITZ! USB driver * * Copyright (C) 2003-2006 Marcel Holtmann <[email protected]> */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/skbuff.h> #include <linux/device.h> #include <linux/firmware.h> #include <linux/usb.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #define VERSION "1.2" static struct usb_driver bfusb_driver; static const struct usb_device_id bfusb_table[] = { /* AVM BlueFRITZ! USB */ { USB_DEVICE(0x057c, 0x2200) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, bfusb_table); #define BFUSB_MAX_BLOCK_SIZE 256 #define BFUSB_BLOCK_TIMEOUT 3000 #define BFUSB_TX_PROCESS 1 #define BFUSB_TX_WAKEUP 2 #define BFUSB_MAX_BULK_TX 2 #define BFUSB_MAX_BULK_RX 2 struct bfusb_data { struct hci_dev *hdev; unsigned long state; struct usb_device *udev; unsigned int bulk_in_ep; unsigned int bulk_out_ep; unsigned int bulk_pkt_size; rwlock_t lock; struct sk_buff_head transmit_q; struct sk_buff *reassembly; atomic_t pending_tx; struct sk_buff_head pending_q; struct sk_buff_head completed_q; }; struct bfusb_data_scb { struct urb *urb; }; static void bfusb_tx_complete(struct urb *urb); static void bfusb_rx_complete(struct urb *urb); static struct urb *bfusb_get_completed(struct bfusb_data *data) { struct sk_buff *skb; struct urb *urb = NULL; BT_DBG("bfusb %p", data); skb = skb_dequeue(&data->completed_q); if (skb) { urb = ((struct bfusb_data_scb *) skb->cb)->urb; kfree_skb(skb); } return urb; } static void bfusb_unlink_urbs(struct bfusb_data *data) { struct sk_buff *skb; struct urb *urb; BT_DBG("bfusb %p", data); while ((skb = skb_dequeue(&data->pending_q))) { urb = ((struct bfusb_data_scb *) skb->cb)->urb; usb_kill_urb(urb); skb_queue_tail(&data->completed_q, skb); } while ((urb = bfusb_get_completed(data))) usb_free_urb(urb); } static int bfusb_send_bulk(struct bfusb_data *data, struct sk_buff *skb) { struct bfusb_data_scb *scb = (void *) skb->cb; struct urb *urb = bfusb_get_completed(data); int err, pipe; BT_DBG("bfusb %p skb %p len %d", data, skb, skb->len); if (!urb) { urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) return -ENOMEM; } pipe = usb_sndbulkpipe(data->udev, data->bulk_out_ep); usb_fill_bulk_urb(urb, data->udev, pipe, skb->data, skb->len, bfusb_tx_complete, skb); scb->urb = urb; skb_queue_tail(&data->pending_q, skb); err = usb_submit_urb(urb, GFP_ATOMIC); if (err) { bt_dev_err(data->hdev, "bulk tx submit failed urb %p err %d", urb, err); skb_unlink(skb, &data->pending_q); usb_free_urb(urb); } else atomic_inc(&data->pending_tx); return err; } static void bfusb_tx_wakeup(struct bfusb_data *data) { struct sk_buff *skb; BT_DBG("bfusb %p", data); if (test_and_set_bit(BFUSB_TX_PROCESS, &data->state)) { set_bit(BFUSB_TX_WAKEUP, &data->state); return; } do { clear_bit(BFUSB_TX_WAKEUP, &data->state); while ((atomic_read(&data->pending_tx) < BFUSB_MAX_BULK_TX) && (skb = skb_dequeue(&data->transmit_q))) { if (bfusb_send_bulk(data, skb) < 0) { skb_queue_head(&data->transmit_q, skb); break; } } } while (test_bit(BFUSB_TX_WAKEUP, &data->state)); clear_bit(BFUSB_TX_PROCESS, &data->state); } static void bfusb_tx_complete(struct urb *urb) { struct sk_buff *skb = (struct sk_buff *) urb->context; struct bfusb_data *data = (struct bfusb_data *) skb->dev; BT_DBG("bfusb %p urb %p skb %p len %d", data, urb, skb, skb->len); atomic_dec(&data->pending_tx); if (!test_bit(HCI_RUNNING, &data->hdev->flags)) return; if (!urb->status) data->hdev->stat.byte_tx += skb->len; else data->hdev->stat.err_tx++; read_lock(&data->lock); skb_unlink(skb, &data->pending_q); skb_queue_tail(&data->completed_q, skb); bfusb_tx_wakeup(data); read_unlock(&data->lock); } static int bfusb_rx_submit(struct bfusb_data *data, struct urb *urb) { struct bfusb_data_scb *scb; struct sk_buff *skb; int err, pipe, size = HCI_MAX_FRAME_SIZE + 32; BT_DBG("bfusb %p urb %p", data, urb); if (!urb) { urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) return -ENOMEM; } skb = bt_skb_alloc(size, GFP_ATOMIC); if (!skb) { usb_free_urb(urb); return -ENOMEM; } skb->dev = (void *) data; scb = (struct bfusb_data_scb *) skb->cb; scb->urb = urb; pipe = usb_rcvbulkpipe(data->udev, data->bulk_in_ep); usb_fill_bulk_urb(urb, data->udev, pipe, skb->data, size, bfusb_rx_complete, skb); skb_queue_tail(&data->pending_q, skb); err = usb_submit_urb(urb, GFP_ATOMIC); if (err) { bt_dev_err(data->hdev, "bulk rx submit failed urb %p err %d", urb, err); skb_unlink(skb, &data->pending_q); kfree_skb(skb); usb_free_urb(urb); } return err; } static inline int bfusb_recv_block(struct bfusb_data *data, int hdr, unsigned char *buf, int len) { BT_DBG("bfusb %p hdr 0x%02x data %p len %d", data, hdr, buf, len); if (hdr & 0x10) { bt_dev_err(data->hdev, "error in block"); kfree_skb(data->reassembly); data->reassembly = NULL; return -EIO; } if (hdr & 0x04) { struct sk_buff *skb; unsigned char pkt_type; int pkt_len = 0; if (data->reassembly) { bt_dev_err(data->hdev, "unexpected start block"); kfree_skb(data->reassembly); data->reassembly = NULL; } if (len < 1) { bt_dev_err(data->hdev, "no packet type found"); return -EPROTO; } pkt_type = *buf++; len--; switch (pkt_type) { case HCI_EVENT_PKT: if (len >= HCI_EVENT_HDR_SIZE) { struct hci_event_hdr *hdr = (struct hci_event_hdr *) buf; pkt_len = HCI_EVENT_HDR_SIZE + hdr->plen; } else { bt_dev_err(data->hdev, "event block is too short"); return -EILSEQ; } break; case HCI_ACLDATA_PKT: if (len >= HCI_ACL_HDR_SIZE) { struct hci_acl_hdr *hdr = (struct hci_acl_hdr *) buf; pkt_len = HCI_ACL_HDR_SIZE + __le16_to_cpu(hdr->dlen); } else { bt_dev_err(data->hdev, "data block is too short"); return -EILSEQ; } break; case HCI_SCODATA_PKT: if (len >= HCI_SCO_HDR_SIZE) { struct hci_sco_hdr *hdr = (struct hci_sco_hdr *) buf; pkt_len = HCI_SCO_HDR_SIZE + hdr->dlen; } else { bt_dev_err(data->hdev, "audio block is too short"); return -EILSEQ; } break; } skb = bt_skb_alloc(pkt_len, GFP_ATOMIC); if (!skb) { bt_dev_err(data->hdev, "no memory for the packet"); return -ENOMEM; } hci_skb_pkt_type(skb) = pkt_type; data->reassembly = skb; } else { if (!data->reassembly) { bt_dev_err(data->hdev, "unexpected continuation block"); return -EIO; } } if (len > 0) skb_put_data(data->reassembly, buf, len); if (hdr & 0x08) { hci_recv_frame(data->hdev, data->reassembly); data->reassembly = NULL; } return 0; } static void bfusb_rx_complete(struct urb *urb) { struct sk_buff *skb = (struct sk_buff *) urb->context; struct bfusb_data *data = (struct bfusb_data *) skb->dev; unsigned char *buf = urb->transfer_buffer; int count = urb->actual_length; int err, hdr, len; BT_DBG("bfusb %p urb %p skb %p len %d", data, urb, skb, skb->len); read_lock(&data->lock); if (!test_bit(HCI_RUNNING, &data->hdev->flags)) goto unlock; if (urb->status || !count) goto resubmit; data->hdev->stat.byte_rx += count; skb_put(skb, count); while (count) { hdr = buf[0] | (buf[1] << 8); if (hdr & 0x4000) { len = 0; count -= 2; buf += 2; } else { len = (buf[2] == 0) ? 256 : buf[2]; count -= 3; buf += 3; } if (count < len) { bt_dev_err(data->hdev, "block extends over URB buffer ranges"); } if ((hdr & 0xe1) == 0xc1) bfusb_recv_block(data, hdr, buf, len); count -= len; buf += len; } skb_unlink(skb, &data->pending_q); kfree_skb(skb); bfusb_rx_submit(data, urb); read_unlock(&data->lock); return; resubmit: urb->dev = data->udev; err = usb_submit_urb(urb, GFP_ATOMIC); if (err) { bt_dev_err(data->hdev, "bulk resubmit failed urb %p err %d", urb, err); } unlock: read_unlock(&data->lock); } static int bfusb_open(struct hci_dev *hdev) { struct bfusb_data *data = hci_get_drvdata(hdev); unsigned long flags; int i, err; BT_DBG("hdev %p bfusb %p", hdev, data); write_lock_irqsave(&data->lock, flags); err = bfusb_rx_submit(data, NULL); if (!err) { for (i = 1; i < BFUSB_MAX_BULK_RX; i++) bfusb_rx_submit(data, NULL); } write_unlock_irqrestore(&data->lock, flags); return err; } static int bfusb_flush(struct hci_dev *hdev) { struct bfusb_data *data = hci_get_drvdata(hdev); BT_DBG("hdev %p bfusb %p", hdev, data); skb_queue_purge(&data->transmit_q); return 0; } static int bfusb_close(struct hci_dev *hdev) { struct bfusb_data *data = hci_get_drvdata(hdev); unsigned long flags; BT_DBG("hdev %p bfusb %p", hdev, data); write_lock_irqsave(&data->lock, flags); write_unlock_irqrestore(&data->lock, flags); bfusb_unlink_urbs(data); bfusb_flush(hdev); return 0; } static int bfusb_send_frame(struct hci_dev *hdev, struct sk_buff *skb) { struct bfusb_data *data = hci_get_drvdata(hdev); struct sk_buff *nskb; unsigned char buf[3]; int sent = 0, size, count; BT_DBG("hdev %p skb %p type %d len %d", hdev, skb, hci_skb_pkt_type(skb), skb->len); switch (hci_skb_pkt_type(skb)) { case HCI_COMMAND_PKT: hdev->stat.cmd_tx++; break; case HCI_ACLDATA_PKT: hdev->stat.acl_tx++; break; case HCI_SCODATA_PKT: hdev->stat.sco_tx++; break; } /* Prepend skb with frame type */ memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1); count = skb->len; /* Max HCI frame size seems to be 1511 + 1 */ nskb = bt_skb_alloc(count + 32, GFP_KERNEL); if (!nskb) { bt_dev_err(hdev, "Can't allocate memory for new packet"); return -ENOMEM; } nskb->dev = (void *) data; while (count) { size = min_t(uint, count, BFUSB_MAX_BLOCK_SIZE); buf[0] = 0xc1 | ((sent == 0) ? 0x04 : 0) | ((count == size) ? 0x08 : 0); buf[1] = 0x00; buf[2] = (size == BFUSB_MAX_BLOCK_SIZE) ? 0 : size; skb_put_data(nskb, buf, 3); skb_copy_from_linear_data_offset(skb, sent, skb_put(nskb, size), size); sent += size; count -= size; } /* Don't send frame with multiple size of bulk max packet */ if ((nskb->len % data->bulk_pkt_size) == 0) { buf[0] = 0xdd; buf[1] = 0x00; skb_put_data(nskb, buf, 2); } read_lock(&data->lock); skb_queue_tail(&data->transmit_q, nskb); bfusb_tx_wakeup(data); read_unlock(&data->lock); kfree_skb(skb); return 0; } static int bfusb_load_firmware(struct bfusb_data *data, const unsigned char *firmware, int count) { unsigned char *buf; int err, pipe, len, size, sent = 0; BT_DBG("bfusb %p udev %p", data, data->udev); BT_INFO("BlueFRITZ! USB loading firmware"); buf = kmalloc(BFUSB_MAX_BLOCK_SIZE + 3, GFP_KERNEL); if (!buf) { BT_ERR("Can't allocate memory chunk for firmware"); return -ENOMEM; } pipe = usb_sndctrlpipe(data->udev, 0); if (usb_control_msg(data->udev, pipe, USB_REQ_SET_CONFIGURATION, 0, 1, 0, NULL, 0, USB_CTRL_SET_TIMEOUT) < 0) { BT_ERR("Can't change to loading configuration"); kfree(buf); return -EBUSY; } data->udev->toggle[0] = data->udev->toggle[1] = 0; pipe = usb_sndbulkpipe(data->udev, data->bulk_out_ep); while (count) { size = min_t(uint, count, BFUSB_MAX_BLOCK_SIZE + 3); memcpy(buf, firmware + sent, size); err = usb_bulk_msg(data->udev, pipe, buf, size, &len, BFUSB_BLOCK_TIMEOUT); if (err || (len != size)) { BT_ERR("Error in firmware loading"); goto error; } sent += size; count -= size; } err = usb_bulk_msg(data->udev, pipe, NULL, 0, &len, BFUSB_BLOCK_TIMEOUT); if (err < 0) { BT_ERR("Error in null packet request"); goto error; } pipe = usb_sndctrlpipe(data->udev, 0); err = usb_control_msg(data->udev, pipe, USB_REQ_SET_CONFIGURATION, 0, 2, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); if (err < 0) { BT_ERR("Can't change to running configuration"); goto error; } data->udev->toggle[0] = data->udev->toggle[1] = 0; BT_INFO("BlueFRITZ! USB device ready"); kfree(buf); return 0; error: kfree(buf); pipe = usb_sndctrlpipe(data->udev, 0); usb_control_msg(data->udev, pipe, USB_REQ_SET_CONFIGURATION, 0, 0, 0, NULL, 0, USB_CTRL_SET_TIMEOUT); return err; } static int bfusb_probe(struct usb_interface *intf, const struct usb_device_id *id) { const struct firmware *firmware; struct usb_device *udev = interface_to_usbdev(intf); struct usb_host_endpoint *bulk_out_ep; struct usb_host_endpoint *bulk_in_ep; struct hci_dev *hdev; struct bfusb_data *data; BT_DBG("intf %p id %p", intf, id); /* Check number of endpoints */ if (intf->cur_altsetting->desc.bNumEndpoints < 2) return -EIO; bulk_out_ep = &intf->cur_altsetting->endpoint[0]; bulk_in_ep = &intf->cur_altsetting->endpoint[1]; if (!bulk_out_ep || !bulk_in_ep) { BT_ERR("Bulk endpoints not found"); goto done; } /* Initialize control structure and load firmware */ data = devm_kzalloc(&intf->dev, sizeof(struct bfusb_data), GFP_KERNEL); if (!data) return -ENOMEM; data->udev = udev; data->bulk_in_ep = bulk_in_ep->desc.bEndpointAddress; data->bulk_out_ep = bulk_out_ep->desc.bEndpointAddress; data->bulk_pkt_size = le16_to_cpu(bulk_out_ep->desc.wMaxPacketSize); if (!data->bulk_pkt_size) goto done; rwlock_init(&data->lock); data->reassembly = NULL; skb_queue_head_init(&data->transmit_q); skb_queue_head_init(&data->pending_q); skb_queue_head_init(&data->completed_q); if (request_firmware(&firmware, "bfubase.frm", &udev->dev) < 0) { BT_ERR("Firmware request failed"); goto done; } BT_DBG("firmware data %p size %zu", firmware->data, firmware->size); if (bfusb_load_firmware(data, firmware->data, firmware->size) < 0) { BT_ERR("Firmware loading failed"); goto release; } release_firmware(firmware); /* Initialize and register HCI device */ hdev = hci_alloc_dev(); if (!hdev) { BT_ERR("Can't allocate HCI device"); goto done; } data->hdev = hdev; hdev->bus = HCI_USB; hci_set_drvdata(hdev, data); SET_HCIDEV_DEV(hdev, &intf->dev); hdev->open = bfusb_open; hdev->close = bfusb_close; hdev->flush = bfusb_flush; hdev->send = bfusb_send_frame; set_bit(HCI_QUIRK_BROKEN_LOCAL_COMMANDS, &hdev->quirks); if (hci_register_dev(hdev) < 0) { BT_ERR("Can't register HCI device"); hci_free_dev(hdev); goto done; } usb_set_intfdata(intf, data); return 0; release: release_firmware(firmware); done: return -EIO; } static void bfusb_disconnect(struct usb_interface *intf) { struct bfusb_data *data = usb_get_intfdata(intf); struct hci_dev *hdev = data->hdev; BT_DBG("intf %p", intf); if (!hdev) return; usb_set_intfdata(intf, NULL); bfusb_close(hdev); hci_unregister_dev(hdev); hci_free_dev(hdev); } static struct usb_driver bfusb_driver = { .name = "bfusb", .probe = bfusb_probe, .disconnect = bfusb_disconnect, .id_table = bfusb_table, .disable_hub_initiated_lpm = 1, }; module_usb_driver(bfusb_driver); MODULE_AUTHOR("Marcel Holtmann <[email protected]>"); MODULE_DESCRIPTION("BlueFRITZ! USB driver ver " VERSION); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL"); MODULE_FIRMWARE("bfubase.frm");
linux-master
drivers/bluetooth/bfusb.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * * Bluetooth HCI UART driver for Intel devices * * Copyright (C) 2015 Intel Corporation */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/skbuff.h> #include <linux/firmware.h> #include <linux/module.h> #include <linux/wait.h> #include <linux/tty.h> #include <linux/platform_device.h> #include <linux/gpio/consumer.h> #include <linux/acpi.h> #include <linux/interrupt.h> #include <linux/pm_runtime.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include "hci_uart.h" #include "btintel.h" #define STATE_BOOTLOADER 0 #define STATE_DOWNLOADING 1 #define STATE_FIRMWARE_LOADED 2 #define STATE_FIRMWARE_FAILED 3 #define STATE_BOOTING 4 #define STATE_LPM_ENABLED 5 #define STATE_TX_ACTIVE 6 #define STATE_SUSPENDED 7 #define STATE_LPM_TRANSACTION 8 #define HCI_LPM_WAKE_PKT 0xf0 #define HCI_LPM_PKT 0xf1 #define HCI_LPM_MAX_SIZE 10 #define HCI_LPM_HDR_SIZE HCI_EVENT_HDR_SIZE #define LPM_OP_TX_NOTIFY 0x00 #define LPM_OP_SUSPEND_ACK 0x02 #define LPM_OP_RESUME_ACK 0x03 #define LPM_SUSPEND_DELAY_MS 1000 struct hci_lpm_pkt { __u8 opcode; __u8 dlen; __u8 data[]; } __packed; struct intel_device { struct list_head list; struct platform_device *pdev; struct gpio_desc *reset; struct hci_uart *hu; struct mutex hu_lock; int irq; }; static LIST_HEAD(intel_device_list); static DEFINE_MUTEX(intel_device_list_lock); struct intel_data { struct sk_buff *rx_skb; struct sk_buff_head txq; struct work_struct busy_work; struct hci_uart *hu; unsigned long flags; }; static u8 intel_convert_speed(unsigned int speed) { switch (speed) { case 9600: return 0x00; case 19200: return 0x01; case 38400: return 0x02; case 57600: return 0x03; case 115200: return 0x04; case 230400: return 0x05; case 460800: return 0x06; case 921600: return 0x07; case 1843200: return 0x08; case 3250000: return 0x09; case 2000000: return 0x0a; case 3000000: return 0x0b; default: return 0xff; } } static int intel_wait_booting(struct hci_uart *hu) { struct intel_data *intel = hu->priv; int err; err = wait_on_bit_timeout(&intel->flags, STATE_BOOTING, TASK_INTERRUPTIBLE, msecs_to_jiffies(1000)); if (err == -EINTR) { bt_dev_err(hu->hdev, "Device boot interrupted"); return -EINTR; } if (err) { bt_dev_err(hu->hdev, "Device boot timeout"); return -ETIMEDOUT; } return err; } #ifdef CONFIG_PM static int intel_wait_lpm_transaction(struct hci_uart *hu) { struct intel_data *intel = hu->priv; int err; err = wait_on_bit_timeout(&intel->flags, STATE_LPM_TRANSACTION, TASK_INTERRUPTIBLE, msecs_to_jiffies(1000)); if (err == -EINTR) { bt_dev_err(hu->hdev, "LPM transaction interrupted"); return -EINTR; } if (err) { bt_dev_err(hu->hdev, "LPM transaction timeout"); return -ETIMEDOUT; } return err; } static int intel_lpm_suspend(struct hci_uart *hu) { static const u8 suspend[] = { 0x01, 0x01, 0x01 }; struct intel_data *intel = hu->priv; struct sk_buff *skb; if (!test_bit(STATE_LPM_ENABLED, &intel->flags) || test_bit(STATE_SUSPENDED, &intel->flags)) return 0; if (test_bit(STATE_TX_ACTIVE, &intel->flags)) return -EAGAIN; bt_dev_dbg(hu->hdev, "Suspending"); skb = bt_skb_alloc(sizeof(suspend), GFP_KERNEL); if (!skb) { bt_dev_err(hu->hdev, "Failed to alloc memory for LPM packet"); return -ENOMEM; } skb_put_data(skb, suspend, sizeof(suspend)); hci_skb_pkt_type(skb) = HCI_LPM_PKT; set_bit(STATE_LPM_TRANSACTION, &intel->flags); /* LPM flow is a priority, enqueue packet at list head */ skb_queue_head(&intel->txq, skb); hci_uart_tx_wakeup(hu); intel_wait_lpm_transaction(hu); /* Even in case of failure, continue and test the suspended flag */ clear_bit(STATE_LPM_TRANSACTION, &intel->flags); if (!test_bit(STATE_SUSPENDED, &intel->flags)) { bt_dev_err(hu->hdev, "Device suspend error"); return -EINVAL; } bt_dev_dbg(hu->hdev, "Suspended"); hci_uart_set_flow_control(hu, true); return 0; } static int intel_lpm_resume(struct hci_uart *hu) { struct intel_data *intel = hu->priv; struct sk_buff *skb; if (!test_bit(STATE_LPM_ENABLED, &intel->flags) || !test_bit(STATE_SUSPENDED, &intel->flags)) return 0; bt_dev_dbg(hu->hdev, "Resuming"); hci_uart_set_flow_control(hu, false); skb = bt_skb_alloc(0, GFP_KERNEL); if (!skb) { bt_dev_err(hu->hdev, "Failed to alloc memory for LPM packet"); return -ENOMEM; } hci_skb_pkt_type(skb) = HCI_LPM_WAKE_PKT; set_bit(STATE_LPM_TRANSACTION, &intel->flags); /* LPM flow is a priority, enqueue packet at list head */ skb_queue_head(&intel->txq, skb); hci_uart_tx_wakeup(hu); intel_wait_lpm_transaction(hu); /* Even in case of failure, continue and test the suspended flag */ clear_bit(STATE_LPM_TRANSACTION, &intel->flags); if (test_bit(STATE_SUSPENDED, &intel->flags)) { bt_dev_err(hu->hdev, "Device resume error"); return -EINVAL; } bt_dev_dbg(hu->hdev, "Resumed"); return 0; } #endif /* CONFIG_PM */ static int intel_lpm_host_wake(struct hci_uart *hu) { static const u8 lpm_resume_ack[] = { LPM_OP_RESUME_ACK, 0x00 }; struct intel_data *intel = hu->priv; struct sk_buff *skb; hci_uart_set_flow_control(hu, false); clear_bit(STATE_SUSPENDED, &intel->flags); skb = bt_skb_alloc(sizeof(lpm_resume_ack), GFP_KERNEL); if (!skb) { bt_dev_err(hu->hdev, "Failed to alloc memory for LPM packet"); return -ENOMEM; } skb_put_data(skb, lpm_resume_ack, sizeof(lpm_resume_ack)); hci_skb_pkt_type(skb) = HCI_LPM_PKT; /* LPM flow is a priority, enqueue packet at list head */ skb_queue_head(&intel->txq, skb); hci_uart_tx_wakeup(hu); bt_dev_dbg(hu->hdev, "Resumed by controller"); return 0; } static irqreturn_t intel_irq(int irq, void *dev_id) { struct intel_device *idev = dev_id; dev_info(&idev->pdev->dev, "hci_intel irq\n"); mutex_lock(&idev->hu_lock); if (idev->hu) intel_lpm_host_wake(idev->hu); mutex_unlock(&idev->hu_lock); /* Host/Controller are now LPM resumed, trigger a new delayed suspend */ pm_runtime_get(&idev->pdev->dev); pm_runtime_mark_last_busy(&idev->pdev->dev); pm_runtime_put_autosuspend(&idev->pdev->dev); return IRQ_HANDLED; } static int intel_set_power(struct hci_uart *hu, bool powered) { struct intel_device *idev; int err = -ENODEV; if (!hu->tty->dev) return err; mutex_lock(&intel_device_list_lock); list_for_each_entry(idev, &intel_device_list, list) { /* tty device and pdev device should share the same parent * which is the UART port. */ if (hu->tty->dev->parent != idev->pdev->dev.parent) continue; if (!idev->reset) { err = -ENOTSUPP; break; } BT_INFO("hu %p, Switching compatible pm device (%s) to %u", hu, dev_name(&idev->pdev->dev), powered); gpiod_set_value(idev->reset, powered); /* Provide to idev a hu reference which is used to run LPM * transactions (lpm suspend/resume) from PM callbacks. * hu needs to be protected against concurrent removing during * these PM ops. */ mutex_lock(&idev->hu_lock); idev->hu = powered ? hu : NULL; mutex_unlock(&idev->hu_lock); if (idev->irq < 0) break; if (powered && device_can_wakeup(&idev->pdev->dev)) { err = devm_request_threaded_irq(&idev->pdev->dev, idev->irq, NULL, intel_irq, IRQF_ONESHOT, "bt-host-wake", idev); if (err) { BT_ERR("hu %p, unable to allocate irq-%d", hu, idev->irq); break; } device_wakeup_enable(&idev->pdev->dev); pm_runtime_set_active(&idev->pdev->dev); pm_runtime_use_autosuspend(&idev->pdev->dev); pm_runtime_set_autosuspend_delay(&idev->pdev->dev, LPM_SUSPEND_DELAY_MS); pm_runtime_enable(&idev->pdev->dev); } else if (!powered && device_may_wakeup(&idev->pdev->dev)) { devm_free_irq(&idev->pdev->dev, idev->irq, idev); device_wakeup_disable(&idev->pdev->dev); pm_runtime_disable(&idev->pdev->dev); } } mutex_unlock(&intel_device_list_lock); return err; } static void intel_busy_work(struct work_struct *work) { struct intel_data *intel = container_of(work, struct intel_data, busy_work); struct intel_device *idev; if (!intel->hu->tty->dev) return; /* Link is busy, delay the suspend */ mutex_lock(&intel_device_list_lock); list_for_each_entry(idev, &intel_device_list, list) { if (intel->hu->tty->dev->parent == idev->pdev->dev.parent) { pm_runtime_get(&idev->pdev->dev); pm_runtime_mark_last_busy(&idev->pdev->dev); pm_runtime_put_autosuspend(&idev->pdev->dev); break; } } mutex_unlock(&intel_device_list_lock); } static int intel_open(struct hci_uart *hu) { struct intel_data *intel; BT_DBG("hu %p", hu); if (!hci_uart_has_flow_control(hu)) return -EOPNOTSUPP; intel = kzalloc(sizeof(*intel), GFP_KERNEL); if (!intel) return -ENOMEM; skb_queue_head_init(&intel->txq); INIT_WORK(&intel->busy_work, intel_busy_work); intel->hu = hu; hu->priv = intel; if (!intel_set_power(hu, true)) set_bit(STATE_BOOTING, &intel->flags); return 0; } static int intel_close(struct hci_uart *hu) { struct intel_data *intel = hu->priv; BT_DBG("hu %p", hu); cancel_work_sync(&intel->busy_work); intel_set_power(hu, false); skb_queue_purge(&intel->txq); kfree_skb(intel->rx_skb); kfree(intel); hu->priv = NULL; return 0; } static int intel_flush(struct hci_uart *hu) { struct intel_data *intel = hu->priv; BT_DBG("hu %p", hu); skb_queue_purge(&intel->txq); return 0; } static int inject_cmd_complete(struct hci_dev *hdev, __u16 opcode) { struct sk_buff *skb; struct hci_event_hdr *hdr; struct hci_ev_cmd_complete *evt; skb = bt_skb_alloc(sizeof(*hdr) + sizeof(*evt) + 1, GFP_KERNEL); if (!skb) return -ENOMEM; hdr = skb_put(skb, sizeof(*hdr)); hdr->evt = HCI_EV_CMD_COMPLETE; hdr->plen = sizeof(*evt) + 1; evt = skb_put(skb, sizeof(*evt)); evt->ncmd = 0x01; evt->opcode = cpu_to_le16(opcode); skb_put_u8(skb, 0x00); hci_skb_pkt_type(skb) = HCI_EVENT_PKT; return hci_recv_frame(hdev, skb); } static int intel_set_baudrate(struct hci_uart *hu, unsigned int speed) { struct intel_data *intel = hu->priv; struct hci_dev *hdev = hu->hdev; u8 speed_cmd[] = { 0x06, 0xfc, 0x01, 0x00 }; struct sk_buff *skb; int err; /* This can be the first command sent to the chip, check * that the controller is ready. */ err = intel_wait_booting(hu); clear_bit(STATE_BOOTING, &intel->flags); /* In case of timeout, try to continue anyway */ if (err && err != -ETIMEDOUT) return err; bt_dev_info(hdev, "Change controller speed to %d", speed); speed_cmd[3] = intel_convert_speed(speed); if (speed_cmd[3] == 0xff) { bt_dev_err(hdev, "Unsupported speed"); return -EINVAL; } /* Device will not accept speed change if Intel version has not been * previously requested. */ skb = __hci_cmd_sync(hdev, 0xfc05, 0, NULL, HCI_CMD_TIMEOUT); if (IS_ERR(skb)) { bt_dev_err(hdev, "Reading Intel version information failed (%ld)", PTR_ERR(skb)); return PTR_ERR(skb); } kfree_skb(skb); skb = bt_skb_alloc(sizeof(speed_cmd), GFP_KERNEL); if (!skb) { bt_dev_err(hdev, "Failed to alloc memory for baudrate packet"); return -ENOMEM; } skb_put_data(skb, speed_cmd, sizeof(speed_cmd)); hci_skb_pkt_type(skb) = HCI_COMMAND_PKT; hci_uart_set_flow_control(hu, true); skb_queue_tail(&intel->txq, skb); hci_uart_tx_wakeup(hu); /* wait 100ms to change baudrate on controller side */ msleep(100); hci_uart_set_baudrate(hu, speed); hci_uart_set_flow_control(hu, false); return 0; } static int intel_setup(struct hci_uart *hu) { struct intel_data *intel = hu->priv; struct hci_dev *hdev = hu->hdev; struct sk_buff *skb; struct intel_version ver; struct intel_boot_params params; struct intel_device *idev; const struct firmware *fw; char fwname[64]; u32 boot_param; ktime_t calltime, delta, rettime; unsigned long long duration; unsigned int init_speed, oper_speed; int speed_change = 0; int err; bt_dev_dbg(hdev, "start intel_setup"); hu->hdev->set_diag = btintel_set_diag; hu->hdev->set_bdaddr = btintel_set_bdaddr; /* Set the default boot parameter to 0x0 and it is updated to * SKU specific boot parameter after reading Intel_Write_Boot_Params * command while downloading the firmware. */ boot_param = 0x00000000; calltime = ktime_get(); if (hu->init_speed) init_speed = hu->init_speed; else init_speed = hu->proto->init_speed; if (hu->oper_speed) oper_speed = hu->oper_speed; else oper_speed = hu->proto->oper_speed; if (oper_speed && init_speed && oper_speed != init_speed) speed_change = 1; /* Check that the controller is ready */ err = intel_wait_booting(hu); clear_bit(STATE_BOOTING, &intel->flags); /* In case of timeout, try to continue anyway */ if (err && err != -ETIMEDOUT) return err; set_bit(STATE_BOOTLOADER, &intel->flags); /* Read the Intel version information to determine if the device * is in bootloader mode or if it already has operational firmware * loaded. */ err = btintel_read_version(hdev, &ver); if (err) return err; /* The hardware platform number has a fixed value of 0x37 and * for now only accept this single value. */ if (ver.hw_platform != 0x37) { bt_dev_err(hdev, "Unsupported Intel hardware platform (%u)", ver.hw_platform); return -EINVAL; } /* Check for supported iBT hardware variants of this firmware * loading method. * * This check has been put in place to ensure correct forward * compatibility options when newer hardware variants come along. */ switch (ver.hw_variant) { case 0x0b: /* LnP */ case 0x0c: /* WsP */ case 0x12: /* ThP */ break; default: bt_dev_err(hdev, "Unsupported Intel hardware variant (%u)", ver.hw_variant); return -EINVAL; } btintel_version_info(hdev, &ver); /* The firmware variant determines if the device is in bootloader * mode or is running operational firmware. The value 0x06 identifies * the bootloader and the value 0x23 identifies the operational * firmware. * * When the operational firmware is already present, then only * the check for valid Bluetooth device address is needed. This * determines if the device will be added as configured or * unconfigured controller. * * It is not possible to use the Secure Boot Parameters in this * case since that command is only available in bootloader mode. */ if (ver.fw_variant == 0x23) { clear_bit(STATE_BOOTLOADER, &intel->flags); btintel_check_bdaddr(hdev); return 0; } /* If the device is not in bootloader mode, then the only possible * choice is to return an error and abort the device initialization. */ if (ver.fw_variant != 0x06) { bt_dev_err(hdev, "Unsupported Intel firmware variant (%u)", ver.fw_variant); return -ENODEV; } /* Read the secure boot parameters to identify the operating * details of the bootloader. */ err = btintel_read_boot_params(hdev, &params); if (err) return err; /* It is required that every single firmware fragment is acknowledged * with a command complete event. If the boot parameters indicate * that this bootloader does not send them, then abort the setup. */ if (params.limited_cce != 0x00) { bt_dev_err(hdev, "Unsupported Intel firmware loading method (%u)", params.limited_cce); return -EINVAL; } /* If the OTP has no valid Bluetooth device address, then there will * also be no valid address for the operational firmware. */ if (!bacmp(&params.otp_bdaddr, BDADDR_ANY)) { bt_dev_info(hdev, "No device address configured"); set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks); } /* With this Intel bootloader only the hardware variant and device * revision information are used to select the right firmware for SfP * and WsP. * * The firmware filename is ibt-<hw_variant>-<dev_revid>.sfi. * * Currently the supported hardware variants are: * 11 (0x0b) for iBT 3.0 (LnP/SfP) * 12 (0x0c) for iBT 3.5 (WsP) * * For ThP/JfP and for future SKU's, the FW name varies based on HW * variant, HW revision and FW revision, as these are dependent on CNVi * and RF Combination. * * 18 (0x12) for iBT3.5 (ThP/JfP) * * The firmware file name for these will be * ibt-<hw_variant>-<hw_revision>-<fw_revision>.sfi. * */ switch (ver.hw_variant) { case 0x0b: /* SfP */ case 0x0c: /* WsP */ snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u.sfi", ver.hw_variant, le16_to_cpu(params.dev_revid)); break; case 0x12: /* ThP */ snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u-%u.sfi", ver.hw_variant, ver.hw_revision, ver.fw_revision); break; default: bt_dev_err(hdev, "Unsupported Intel hardware variant (%u)", ver.hw_variant); return -EINVAL; } err = request_firmware(&fw, fwname, &hdev->dev); if (err < 0) { bt_dev_err(hdev, "Failed to load Intel firmware file (%d)", err); return err; } bt_dev_info(hdev, "Found device firmware: %s", fwname); /* Save the DDC file name for later */ switch (ver.hw_variant) { case 0x0b: /* SfP */ case 0x0c: /* WsP */ snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u.ddc", ver.hw_variant, le16_to_cpu(params.dev_revid)); break; case 0x12: /* ThP */ snprintf(fwname, sizeof(fwname), "intel/ibt-%u-%u-%u.ddc", ver.hw_variant, ver.hw_revision, ver.fw_revision); break; default: bt_dev_err(hdev, "Unsupported Intel hardware variant (%u)", ver.hw_variant); return -EINVAL; } if (fw->size < 644) { bt_dev_err(hdev, "Invalid size of firmware file (%zu)", fw->size); err = -EBADF; goto done; } set_bit(STATE_DOWNLOADING, &intel->flags); /* Start firmware downloading and get boot parameter */ err = btintel_download_firmware(hdev, &ver, fw, &boot_param); if (err < 0) goto done; set_bit(STATE_FIRMWARE_LOADED, &intel->flags); bt_dev_info(hdev, "Waiting for firmware download to complete"); /* Before switching the device into operational mode and with that * booting the loaded firmware, wait for the bootloader notification * that all fragments have been successfully received. * * When the event processing receives the notification, then the * STATE_DOWNLOADING flag will be cleared. * * The firmware loading should not take longer than 5 seconds * and thus just timeout if that happens and fail the setup * of this device. */ err = wait_on_bit_timeout(&intel->flags, STATE_DOWNLOADING, TASK_INTERRUPTIBLE, msecs_to_jiffies(5000)); if (err == -EINTR) { bt_dev_err(hdev, "Firmware loading interrupted"); err = -EINTR; goto done; } if (err) { bt_dev_err(hdev, "Firmware loading timeout"); err = -ETIMEDOUT; goto done; } if (test_bit(STATE_FIRMWARE_FAILED, &intel->flags)) { bt_dev_err(hdev, "Firmware loading failed"); err = -ENOEXEC; goto done; } rettime = ktime_get(); delta = ktime_sub(rettime, calltime); duration = (unsigned long long) ktime_to_ns(delta) >> 10; bt_dev_info(hdev, "Firmware loaded in %llu usecs", duration); done: release_firmware(fw); /* Check if there was an error and if is not -EALREADY which means the * firmware has already been loaded. */ if (err < 0 && err != -EALREADY) return err; /* We need to restore the default speed before Intel reset */ if (speed_change) { err = intel_set_baudrate(hu, init_speed); if (err) return err; } calltime = ktime_get(); set_bit(STATE_BOOTING, &intel->flags); err = btintel_send_intel_reset(hdev, boot_param); if (err) return err; /* The bootloader will not indicate when the device is ready. This * is done by the operational firmware sending bootup notification. * * Booting into operational firmware should not take longer than * 1 second. However if that happens, then just fail the setup * since something went wrong. */ bt_dev_info(hdev, "Waiting for device to boot"); err = intel_wait_booting(hu); if (err) return err; clear_bit(STATE_BOOTING, &intel->flags); rettime = ktime_get(); delta = ktime_sub(rettime, calltime); duration = (unsigned long long) ktime_to_ns(delta) >> 10; bt_dev_info(hdev, "Device booted in %llu usecs", duration); /* Enable LPM if matching pdev with wakeup enabled, set TX active * until further LPM TX notification. */ mutex_lock(&intel_device_list_lock); list_for_each_entry(idev, &intel_device_list, list) { if (!hu->tty->dev) break; if (hu->tty->dev->parent == idev->pdev->dev.parent) { if (device_may_wakeup(&idev->pdev->dev)) { set_bit(STATE_LPM_ENABLED, &intel->flags); set_bit(STATE_TX_ACTIVE, &intel->flags); } break; } } mutex_unlock(&intel_device_list_lock); /* Ignore errors, device can work without DDC parameters */ btintel_load_ddc_config(hdev, fwname); skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_CMD_TIMEOUT); if (IS_ERR(skb)) return PTR_ERR(skb); kfree_skb(skb); if (speed_change) { err = intel_set_baudrate(hu, oper_speed); if (err) return err; } bt_dev_info(hdev, "Setup complete"); clear_bit(STATE_BOOTLOADER, &intel->flags); return 0; } static int intel_recv_event(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_uart *hu = hci_get_drvdata(hdev); struct intel_data *intel = hu->priv; struct hci_event_hdr *hdr; if (!test_bit(STATE_BOOTLOADER, &intel->flags) && !test_bit(STATE_BOOTING, &intel->flags)) goto recv; hdr = (void *)skb->data; /* When the firmware loading completes the device sends * out a vendor specific event indicating the result of * the firmware loading. */ if (skb->len == 7 && hdr->evt == 0xff && hdr->plen == 0x05 && skb->data[2] == 0x06) { if (skb->data[3] != 0x00) set_bit(STATE_FIRMWARE_FAILED, &intel->flags); if (test_and_clear_bit(STATE_DOWNLOADING, &intel->flags) && test_bit(STATE_FIRMWARE_LOADED, &intel->flags)) wake_up_bit(&intel->flags, STATE_DOWNLOADING); /* When switching to the operational firmware the device * sends a vendor specific event indicating that the bootup * completed. */ } else if (skb->len == 9 && hdr->evt == 0xff && hdr->plen == 0x07 && skb->data[2] == 0x02) { if (test_and_clear_bit(STATE_BOOTING, &intel->flags)) wake_up_bit(&intel->flags, STATE_BOOTING); } recv: return hci_recv_frame(hdev, skb); } static void intel_recv_lpm_notify(struct hci_dev *hdev, int value) { struct hci_uart *hu = hci_get_drvdata(hdev); struct intel_data *intel = hu->priv; bt_dev_dbg(hdev, "TX idle notification (%d)", value); if (value) { set_bit(STATE_TX_ACTIVE, &intel->flags); schedule_work(&intel->busy_work); } else { clear_bit(STATE_TX_ACTIVE, &intel->flags); } } static int intel_recv_lpm(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_lpm_pkt *lpm = (void *)skb->data; struct hci_uart *hu = hci_get_drvdata(hdev); struct intel_data *intel = hu->priv; switch (lpm->opcode) { case LPM_OP_TX_NOTIFY: if (lpm->dlen < 1) { bt_dev_err(hu->hdev, "Invalid LPM notification packet"); break; } intel_recv_lpm_notify(hdev, lpm->data[0]); break; case LPM_OP_SUSPEND_ACK: set_bit(STATE_SUSPENDED, &intel->flags); if (test_and_clear_bit(STATE_LPM_TRANSACTION, &intel->flags)) wake_up_bit(&intel->flags, STATE_LPM_TRANSACTION); break; case LPM_OP_RESUME_ACK: clear_bit(STATE_SUSPENDED, &intel->flags); if (test_and_clear_bit(STATE_LPM_TRANSACTION, &intel->flags)) wake_up_bit(&intel->flags, STATE_LPM_TRANSACTION); break; default: bt_dev_err(hdev, "Unknown LPM opcode (%02x)", lpm->opcode); break; } kfree_skb(skb); return 0; } #define INTEL_RECV_LPM \ .type = HCI_LPM_PKT, \ .hlen = HCI_LPM_HDR_SIZE, \ .loff = 1, \ .lsize = 1, \ .maxlen = HCI_LPM_MAX_SIZE static const struct h4_recv_pkt intel_recv_pkts[] = { { H4_RECV_ACL, .recv = hci_recv_frame }, { H4_RECV_SCO, .recv = hci_recv_frame }, { H4_RECV_EVENT, .recv = intel_recv_event }, { INTEL_RECV_LPM, .recv = intel_recv_lpm }, }; static int intel_recv(struct hci_uart *hu, const void *data, int count) { struct intel_data *intel = hu->priv; if (!test_bit(HCI_UART_REGISTERED, &hu->flags)) return -EUNATCH; intel->rx_skb = h4_recv_buf(hu->hdev, intel->rx_skb, data, count, intel_recv_pkts, ARRAY_SIZE(intel_recv_pkts)); if (IS_ERR(intel->rx_skb)) { int err = PTR_ERR(intel->rx_skb); bt_dev_err(hu->hdev, "Frame reassembly failed (%d)", err); intel->rx_skb = NULL; return err; } return count; } static int intel_enqueue(struct hci_uart *hu, struct sk_buff *skb) { struct intel_data *intel = hu->priv; struct intel_device *idev; BT_DBG("hu %p skb %p", hu, skb); if (!hu->tty->dev) goto out_enqueue; /* Be sure our controller is resumed and potential LPM transaction * completed before enqueuing any packet. */ mutex_lock(&intel_device_list_lock); list_for_each_entry(idev, &intel_device_list, list) { if (hu->tty->dev->parent == idev->pdev->dev.parent) { pm_runtime_get_sync(&idev->pdev->dev); pm_runtime_mark_last_busy(&idev->pdev->dev); pm_runtime_put_autosuspend(&idev->pdev->dev); break; } } mutex_unlock(&intel_device_list_lock); out_enqueue: skb_queue_tail(&intel->txq, skb); return 0; } static struct sk_buff *intel_dequeue(struct hci_uart *hu) { struct intel_data *intel = hu->priv; struct sk_buff *skb; skb = skb_dequeue(&intel->txq); if (!skb) return skb; if (test_bit(STATE_BOOTLOADER, &intel->flags) && (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT)) { struct hci_command_hdr *cmd = (void *)skb->data; __u16 opcode = le16_to_cpu(cmd->opcode); /* When the 0xfc01 command is issued to boot into * the operational firmware, it will actually not * send a command complete event. To keep the flow * control working inject that event here. */ if (opcode == 0xfc01) inject_cmd_complete(hu->hdev, opcode); } /* Prepend skb with frame type */ memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1); return skb; } static const struct hci_uart_proto intel_proto = { .id = HCI_UART_INTEL, .name = "Intel", .manufacturer = 2, .init_speed = 115200, .oper_speed = 3000000, .open = intel_open, .close = intel_close, .flush = intel_flush, .setup = intel_setup, .set_baudrate = intel_set_baudrate, .recv = intel_recv, .enqueue = intel_enqueue, .dequeue = intel_dequeue, }; #ifdef CONFIG_ACPI static const struct acpi_device_id intel_acpi_match[] = { { "INT33E1", 0 }, { "INT33E3", 0 }, { } }; MODULE_DEVICE_TABLE(acpi, intel_acpi_match); #endif #ifdef CONFIG_PM static int intel_suspend_device(struct device *dev) { struct intel_device *idev = dev_get_drvdata(dev); mutex_lock(&idev->hu_lock); if (idev->hu) intel_lpm_suspend(idev->hu); mutex_unlock(&idev->hu_lock); return 0; } static int intel_resume_device(struct device *dev) { struct intel_device *idev = dev_get_drvdata(dev); mutex_lock(&idev->hu_lock); if (idev->hu) intel_lpm_resume(idev->hu); mutex_unlock(&idev->hu_lock); return 0; } #endif #ifdef CONFIG_PM_SLEEP static int intel_suspend(struct device *dev) { struct intel_device *idev = dev_get_drvdata(dev); if (device_may_wakeup(dev)) enable_irq_wake(idev->irq); return intel_suspend_device(dev); } static int intel_resume(struct device *dev) { struct intel_device *idev = dev_get_drvdata(dev); if (device_may_wakeup(dev)) disable_irq_wake(idev->irq); return intel_resume_device(dev); } #endif static const struct dev_pm_ops intel_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(intel_suspend, intel_resume) SET_RUNTIME_PM_OPS(intel_suspend_device, intel_resume_device, NULL) }; static const struct acpi_gpio_params reset_gpios = { 0, 0, false }; static const struct acpi_gpio_params host_wake_gpios = { 1, 0, false }; static const struct acpi_gpio_mapping acpi_hci_intel_gpios[] = { { "reset-gpios", &reset_gpios, 1, ACPI_GPIO_QUIRK_ONLY_GPIOIO }, { "host-wake-gpios", &host_wake_gpios, 1, ACPI_GPIO_QUIRK_ONLY_GPIOIO }, { } }; static int intel_probe(struct platform_device *pdev) { struct intel_device *idev; int ret; idev = devm_kzalloc(&pdev->dev, sizeof(*idev), GFP_KERNEL); if (!idev) return -ENOMEM; mutex_init(&idev->hu_lock); idev->pdev = pdev; ret = devm_acpi_dev_add_driver_gpios(&pdev->dev, acpi_hci_intel_gpios); if (ret) dev_dbg(&pdev->dev, "Unable to add GPIO mapping table\n"); idev->reset = devm_gpiod_get(&pdev->dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(idev->reset)) { dev_err(&pdev->dev, "Unable to retrieve gpio\n"); return PTR_ERR(idev->reset); } idev->irq = platform_get_irq(pdev, 0); if (idev->irq < 0) { struct gpio_desc *host_wake; dev_err(&pdev->dev, "No IRQ, falling back to gpio-irq\n"); host_wake = devm_gpiod_get(&pdev->dev, "host-wake", GPIOD_IN); if (IS_ERR(host_wake)) { dev_err(&pdev->dev, "Unable to retrieve IRQ\n"); goto no_irq; } idev->irq = gpiod_to_irq(host_wake); if (idev->irq < 0) { dev_err(&pdev->dev, "No corresponding irq for gpio\n"); goto no_irq; } } /* Only enable wake-up/irq when controller is powered */ device_set_wakeup_capable(&pdev->dev, true); device_wakeup_disable(&pdev->dev); no_irq: platform_set_drvdata(pdev, idev); /* Place this instance on the device list */ mutex_lock(&intel_device_list_lock); list_add_tail(&idev->list, &intel_device_list); mutex_unlock(&intel_device_list_lock); dev_info(&pdev->dev, "registered, gpio(%d)/irq(%d).\n", desc_to_gpio(idev->reset), idev->irq); return 0; } static int intel_remove(struct platform_device *pdev) { struct intel_device *idev = platform_get_drvdata(pdev); device_wakeup_disable(&pdev->dev); mutex_lock(&intel_device_list_lock); list_del(&idev->list); mutex_unlock(&intel_device_list_lock); dev_info(&pdev->dev, "unregistered.\n"); return 0; } static struct platform_driver intel_driver = { .probe = intel_probe, .remove = intel_remove, .driver = { .name = "hci_intel", .acpi_match_table = ACPI_PTR(intel_acpi_match), .pm = &intel_pm_ops, }, }; int __init intel_init(void) { int err; err = platform_driver_register(&intel_driver); if (err) return err; return hci_uart_register_proto(&intel_proto); } int __exit intel_deinit(void) { platform_driver_unregister(&intel_driver); return hci_uart_unregister_proto(&intel_proto); }
linux-master
drivers/bluetooth/hci_intel.c
// SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2018 MediaTek Inc. /* * Bluetooth support for MediaTek serial devices * * Author: Sean Wang <[email protected]> * */ #include <asm/unaligned.h> #include <linux/atomic.h> #include <linux/clk.h> #include <linux/firmware.h> #include <linux/gpio/consumer.h> #include <linux/iopoll.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include <linux/pinctrl/consumer.h> #include <linux/pm_runtime.h> #include <linux/regulator/consumer.h> #include <linux/serdev.h> #include <linux/skbuff.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include "h4_recv.h" #include "btmtk.h" #define VERSION "0.2" #define MTK_STP_TLR_SIZE 2 #define BTMTKUART_TX_STATE_ACTIVE 1 #define BTMTKUART_TX_STATE_WAKEUP 2 #define BTMTKUART_TX_WAIT_VND_EVT 3 #define BTMTKUART_REQUIRED_WAKEUP 4 #define BTMTKUART_FLAG_STANDALONE_HW BIT(0) struct mtk_stp_hdr { u8 prefix; __be16 dlen; u8 cs; } __packed; struct btmtkuart_data { unsigned int flags; const char *fwname; }; struct btmtkuart_dev { struct hci_dev *hdev; struct serdev_device *serdev; struct clk *clk; struct clk *osc; struct regulator *vcc; struct gpio_desc *reset; struct gpio_desc *boot; struct pinctrl *pinctrl; struct pinctrl_state *pins_runtime; struct pinctrl_state *pins_boot; speed_t desired_speed; speed_t curr_speed; struct work_struct tx_work; unsigned long tx_state; struct sk_buff_head txq; struct sk_buff *rx_skb; struct sk_buff *evt_skb; u8 stp_pad[6]; u8 stp_cursor; u16 stp_dlen; const struct btmtkuart_data *data; }; #define btmtkuart_is_standalone(bdev) \ ((bdev)->data->flags & BTMTKUART_FLAG_STANDALONE_HW) #define btmtkuart_is_builtin_soc(bdev) \ !((bdev)->data->flags & BTMTKUART_FLAG_STANDALONE_HW) static int mtk_hci_wmt_sync(struct hci_dev *hdev, struct btmtk_hci_wmt_params *wmt_params) { struct btmtkuart_dev *bdev = hci_get_drvdata(hdev); struct btmtk_hci_wmt_evt_funcc *wmt_evt_funcc; u32 hlen, status = BTMTK_WMT_INVALID; struct btmtk_hci_wmt_evt *wmt_evt; struct btmtk_hci_wmt_cmd *wc; struct btmtk_wmt_hdr *hdr; int err; /* Send the WMT command and wait until the WMT event returns */ hlen = sizeof(*hdr) + wmt_params->dlen; if (hlen > 255) { err = -EINVAL; goto err_free_skb; } wc = kzalloc(hlen, GFP_KERNEL); if (!wc) { err = -ENOMEM; goto err_free_skb; } hdr = &wc->hdr; hdr->dir = 1; hdr->op = wmt_params->op; hdr->dlen = cpu_to_le16(wmt_params->dlen + 1); hdr->flag = wmt_params->flag; memcpy(wc->data, wmt_params->data, wmt_params->dlen); set_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state); err = __hci_cmd_send(hdev, 0xfc6f, hlen, wc); if (err < 0) { clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state); goto err_free_wc; } /* The vendor specific WMT commands are all answered by a vendor * specific event and will not have the Command Status or Command * Complete as with usual HCI command flow control. * * After sending the command, wait for BTMTKUART_TX_WAIT_VND_EVT * state to be cleared. The driver specific event receive routine * will clear that state and with that indicate completion of the * WMT command. */ err = wait_on_bit_timeout(&bdev->tx_state, BTMTKUART_TX_WAIT_VND_EVT, TASK_INTERRUPTIBLE, HCI_INIT_TIMEOUT); if (err == -EINTR) { bt_dev_err(hdev, "Execution of wmt command interrupted"); clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state); goto err_free_wc; } if (err) { bt_dev_err(hdev, "Execution of wmt command timed out"); clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state); err = -ETIMEDOUT; goto err_free_wc; } /* Parse and handle the return WMT event */ wmt_evt = (struct btmtk_hci_wmt_evt *)bdev->evt_skb->data; if (wmt_evt->whdr.op != hdr->op) { bt_dev_err(hdev, "Wrong op received %d expected %d", wmt_evt->whdr.op, hdr->op); err = -EIO; goto err_free_wc; } switch (wmt_evt->whdr.op) { case BTMTK_WMT_SEMAPHORE: if (wmt_evt->whdr.flag == 2) status = BTMTK_WMT_PATCH_UNDONE; else status = BTMTK_WMT_PATCH_DONE; break; case BTMTK_WMT_FUNC_CTRL: wmt_evt_funcc = (struct btmtk_hci_wmt_evt_funcc *)wmt_evt; if (be16_to_cpu(wmt_evt_funcc->status) == 0x404) status = BTMTK_WMT_ON_DONE; else if (be16_to_cpu(wmt_evt_funcc->status) == 0x420) status = BTMTK_WMT_ON_PROGRESS; else status = BTMTK_WMT_ON_UNDONE; break; } if (wmt_params->status) *wmt_params->status = status; err_free_wc: kfree(wc); err_free_skb: kfree_skb(bdev->evt_skb); bdev->evt_skb = NULL; return err; } static int btmtkuart_recv_event(struct hci_dev *hdev, struct sk_buff *skb) { struct btmtkuart_dev *bdev = hci_get_drvdata(hdev); struct hci_event_hdr *hdr = (void *)skb->data; int err; /* When someone waits for the WMT event, the skb is being cloned * and being processed the events from there then. */ if (test_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state)) { bdev->evt_skb = skb_clone(skb, GFP_KERNEL); if (!bdev->evt_skb) { err = -ENOMEM; goto err_out; } } err = hci_recv_frame(hdev, skb); if (err < 0) goto err_free_skb; if (hdr->evt == HCI_EV_WMT) { if (test_and_clear_bit(BTMTKUART_TX_WAIT_VND_EVT, &bdev->tx_state)) { /* Barrier to sync with other CPUs */ smp_mb__after_atomic(); wake_up_bit(&bdev->tx_state, BTMTKUART_TX_WAIT_VND_EVT); } } return 0; err_free_skb: kfree_skb(bdev->evt_skb); bdev->evt_skb = NULL; err_out: return err; } static const struct h4_recv_pkt mtk_recv_pkts[] = { { H4_RECV_ACL, .recv = hci_recv_frame }, { H4_RECV_SCO, .recv = hci_recv_frame }, { H4_RECV_EVENT, .recv = btmtkuart_recv_event }, }; static void btmtkuart_tx_work(struct work_struct *work) { struct btmtkuart_dev *bdev = container_of(work, struct btmtkuart_dev, tx_work); struct serdev_device *serdev = bdev->serdev; struct hci_dev *hdev = bdev->hdev; while (1) { clear_bit(BTMTKUART_TX_STATE_WAKEUP, &bdev->tx_state); while (1) { struct sk_buff *skb = skb_dequeue(&bdev->txq); int len; if (!skb) break; len = serdev_device_write_buf(serdev, skb->data, skb->len); hdev->stat.byte_tx += len; skb_pull(skb, len); if (skb->len > 0) { skb_queue_head(&bdev->txq, skb); break; } switch (hci_skb_pkt_type(skb)) { case HCI_COMMAND_PKT: hdev->stat.cmd_tx++; break; case HCI_ACLDATA_PKT: hdev->stat.acl_tx++; break; case HCI_SCODATA_PKT: hdev->stat.sco_tx++; break; } kfree_skb(skb); } if (!test_bit(BTMTKUART_TX_STATE_WAKEUP, &bdev->tx_state)) break; } clear_bit(BTMTKUART_TX_STATE_ACTIVE, &bdev->tx_state); } static void btmtkuart_tx_wakeup(struct btmtkuart_dev *bdev) { if (test_and_set_bit(BTMTKUART_TX_STATE_ACTIVE, &bdev->tx_state)) set_bit(BTMTKUART_TX_STATE_WAKEUP, &bdev->tx_state); schedule_work(&bdev->tx_work); } static const unsigned char * mtk_stp_split(struct btmtkuart_dev *bdev, const unsigned char *data, int count, int *sz_h4) { struct mtk_stp_hdr *shdr; /* The cursor is reset when all the data of STP is consumed out */ if (!bdev->stp_dlen && bdev->stp_cursor >= 6) bdev->stp_cursor = 0; /* Filling pad until all STP info is obtained */ while (bdev->stp_cursor < 6 && count > 0) { bdev->stp_pad[bdev->stp_cursor] = *data; bdev->stp_cursor++; data++; count--; } /* Retrieve STP info and have a sanity check */ if (!bdev->stp_dlen && bdev->stp_cursor >= 6) { shdr = (struct mtk_stp_hdr *)&bdev->stp_pad[2]; bdev->stp_dlen = be16_to_cpu(shdr->dlen) & 0x0fff; /* Resync STP when unexpected data is being read */ if (shdr->prefix != 0x80 || bdev->stp_dlen > 2048) { bt_dev_err(bdev->hdev, "stp format unexpect (%d, %d)", shdr->prefix, bdev->stp_dlen); bdev->stp_cursor = 2; bdev->stp_dlen = 0; } } /* Directly quit when there's no data found for H4 can process */ if (count <= 0) return NULL; /* Tranlate to how much the size of data H4 can handle so far */ *sz_h4 = min_t(int, count, bdev->stp_dlen); /* Update the remaining size of STP packet */ bdev->stp_dlen -= *sz_h4; /* Data points to STP payload which can be handled by H4 */ return data; } static int btmtkuart_recv(struct hci_dev *hdev, const u8 *data, size_t count) { struct btmtkuart_dev *bdev = hci_get_drvdata(hdev); const unsigned char *p_left = data, *p_h4; int sz_left = count, sz_h4, adv; int err; while (sz_left > 0) { /* The serial data received from MT7622 BT controller is * at all time padded around with the STP header and tailer. * * A full STP packet is looking like * ----------------------------------- * | STP header | H:4 | STP tailer | * ----------------------------------- * but it doesn't guarantee to contain a full H:4 packet which * means that it's possible for multiple STP packets forms a * full H:4 packet that means extra STP header + length doesn't * indicate a full H:4 frame, things can fragment. Whose length * recorded in STP header just shows up the most length the * H:4 engine can handle currently. */ p_h4 = mtk_stp_split(bdev, p_left, sz_left, &sz_h4); if (!p_h4) break; adv = p_h4 - p_left; sz_left -= adv; p_left += adv; bdev->rx_skb = h4_recv_buf(bdev->hdev, bdev->rx_skb, p_h4, sz_h4, mtk_recv_pkts, ARRAY_SIZE(mtk_recv_pkts)); if (IS_ERR(bdev->rx_skb)) { err = PTR_ERR(bdev->rx_skb); bt_dev_err(bdev->hdev, "Frame reassembly failed (%d)", err); bdev->rx_skb = NULL; return err; } sz_left -= sz_h4; p_left += sz_h4; } return 0; } static int btmtkuart_receive_buf(struct serdev_device *serdev, const u8 *data, size_t count) { struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev); int err; err = btmtkuart_recv(bdev->hdev, data, count); if (err < 0) return err; bdev->hdev->stat.byte_rx += count; return count; } static void btmtkuart_write_wakeup(struct serdev_device *serdev) { struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev); btmtkuart_tx_wakeup(bdev); } static const struct serdev_device_ops btmtkuart_client_ops = { .receive_buf = btmtkuart_receive_buf, .write_wakeup = btmtkuart_write_wakeup, }; static int btmtkuart_open(struct hci_dev *hdev) { struct btmtkuart_dev *bdev = hci_get_drvdata(hdev); struct device *dev; int err; err = serdev_device_open(bdev->serdev); if (err) { bt_dev_err(hdev, "Unable to open UART device %s", dev_name(&bdev->serdev->dev)); goto err_open; } if (btmtkuart_is_standalone(bdev)) { if (bdev->curr_speed != bdev->desired_speed) err = serdev_device_set_baudrate(bdev->serdev, 115200); else err = serdev_device_set_baudrate(bdev->serdev, bdev->desired_speed); if (err < 0) { bt_dev_err(hdev, "Unable to set baudrate UART device %s", dev_name(&bdev->serdev->dev)); goto err_serdev_close; } serdev_device_set_flow_control(bdev->serdev, false); } bdev->stp_cursor = 2; bdev->stp_dlen = 0; dev = &bdev->serdev->dev; /* Enable the power domain and clock the device requires */ pm_runtime_enable(dev); err = pm_runtime_resume_and_get(dev); if (err < 0) goto err_disable_rpm; err = clk_prepare_enable(bdev->clk); if (err < 0) goto err_put_rpm; return 0; err_put_rpm: pm_runtime_put_sync(dev); err_disable_rpm: pm_runtime_disable(dev); err_serdev_close: serdev_device_close(bdev->serdev); err_open: return err; } static int btmtkuart_close(struct hci_dev *hdev) { struct btmtkuart_dev *bdev = hci_get_drvdata(hdev); struct device *dev = &bdev->serdev->dev; /* Shutdown the clock and power domain the device requires */ clk_disable_unprepare(bdev->clk); pm_runtime_put_sync(dev); pm_runtime_disable(dev); serdev_device_close(bdev->serdev); return 0; } static int btmtkuart_flush(struct hci_dev *hdev) { struct btmtkuart_dev *bdev = hci_get_drvdata(hdev); /* Flush any pending characters */ serdev_device_write_flush(bdev->serdev); skb_queue_purge(&bdev->txq); cancel_work_sync(&bdev->tx_work); kfree_skb(bdev->rx_skb); bdev->rx_skb = NULL; bdev->stp_cursor = 2; bdev->stp_dlen = 0; return 0; } static int btmtkuart_func_query(struct hci_dev *hdev) { struct btmtk_hci_wmt_params wmt_params; int status, err; u8 param = 0; /* Query whether the function is enabled */ wmt_params.op = BTMTK_WMT_FUNC_CTRL; wmt_params.flag = 4; wmt_params.dlen = sizeof(param); wmt_params.data = &param; wmt_params.status = &status; err = mtk_hci_wmt_sync(hdev, &wmt_params); if (err < 0) { bt_dev_err(hdev, "Failed to query function status (%d)", err); return err; } return status; } static int btmtkuart_change_baudrate(struct hci_dev *hdev) { struct btmtkuart_dev *bdev = hci_get_drvdata(hdev); struct btmtk_hci_wmt_params wmt_params; __le32 baudrate; u8 param; int err; /* Indicate the device to enter the probe state the host is * ready to change a new baudrate. */ baudrate = cpu_to_le32(bdev->desired_speed); wmt_params.op = BTMTK_WMT_HIF; wmt_params.flag = 1; wmt_params.dlen = 4; wmt_params.data = &baudrate; wmt_params.status = NULL; err = mtk_hci_wmt_sync(hdev, &wmt_params); if (err < 0) { bt_dev_err(hdev, "Failed to device baudrate (%d)", err); return err; } err = serdev_device_set_baudrate(bdev->serdev, bdev->desired_speed); if (err < 0) { bt_dev_err(hdev, "Failed to set up host baudrate (%d)", err); return err; } serdev_device_set_flow_control(bdev->serdev, false); /* Send a dummy byte 0xff to activate the new baudrate */ param = 0xff; err = serdev_device_write_buf(bdev->serdev, &param, sizeof(param)); if (err < 0 || err < sizeof(param)) return err; serdev_device_wait_until_sent(bdev->serdev, 0); /* Wait some time for the device changing baudrate done */ usleep_range(20000, 22000); /* Test the new baudrate */ wmt_params.op = BTMTK_WMT_TEST; wmt_params.flag = 7; wmt_params.dlen = 0; wmt_params.data = NULL; wmt_params.status = NULL; err = mtk_hci_wmt_sync(hdev, &wmt_params); if (err < 0) { bt_dev_err(hdev, "Failed to test new baudrate (%d)", err); return err; } bdev->curr_speed = bdev->desired_speed; return 0; } static int btmtkuart_setup(struct hci_dev *hdev) { struct btmtkuart_dev *bdev = hci_get_drvdata(hdev); struct btmtk_hci_wmt_params wmt_params; ktime_t calltime, delta, rettime; struct btmtk_tci_sleep tci_sleep; unsigned long long duration; struct sk_buff *skb; int err, status; u8 param = 0x1; calltime = ktime_get(); /* Wakeup MCUSYS is required for certain devices before we start to * do any setups. */ if (test_bit(BTMTKUART_REQUIRED_WAKEUP, &bdev->tx_state)) { wmt_params.op = BTMTK_WMT_WAKEUP; wmt_params.flag = 3; wmt_params.dlen = 0; wmt_params.data = NULL; wmt_params.status = NULL; err = mtk_hci_wmt_sync(hdev, &wmt_params); if (err < 0) { bt_dev_err(hdev, "Failed to wakeup the chip (%d)", err); return err; } clear_bit(BTMTKUART_REQUIRED_WAKEUP, &bdev->tx_state); } if (btmtkuart_is_standalone(bdev)) btmtkuart_change_baudrate(hdev); /* Query whether the firmware is already download */ wmt_params.op = BTMTK_WMT_SEMAPHORE; wmt_params.flag = 1; wmt_params.dlen = 0; wmt_params.data = NULL; wmt_params.status = &status; err = mtk_hci_wmt_sync(hdev, &wmt_params); if (err < 0) { bt_dev_err(hdev, "Failed to query firmware status (%d)", err); return err; } if (status == BTMTK_WMT_PATCH_DONE) { bt_dev_info(hdev, "Firmware already downloaded"); goto ignore_setup_fw; } /* Setup a firmware which the device definitely requires */ err = btmtk_setup_firmware(hdev, bdev->data->fwname, mtk_hci_wmt_sync); if (err < 0) return err; ignore_setup_fw: /* Query whether the device is already enabled */ err = readx_poll_timeout(btmtkuart_func_query, hdev, status, status < 0 || status != BTMTK_WMT_ON_PROGRESS, 2000, 5000000); /* -ETIMEDOUT happens */ if (err < 0) return err; /* The other errors happen in btusb_mtk_func_query */ if (status < 0) return status; if (status == BTMTK_WMT_ON_DONE) { bt_dev_info(hdev, "function already on"); goto ignore_func_on; } /* Enable Bluetooth protocol */ wmt_params.op = BTMTK_WMT_FUNC_CTRL; wmt_params.flag = 0; wmt_params.dlen = sizeof(param); wmt_params.data = &param; wmt_params.status = NULL; err = mtk_hci_wmt_sync(hdev, &wmt_params); if (err < 0) { bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err); return err; } ignore_func_on: /* Apply the low power environment setup */ tci_sleep.mode = 0x5; tci_sleep.duration = cpu_to_le16(0x640); tci_sleep.host_duration = cpu_to_le16(0x640); tci_sleep.host_wakeup_pin = 0; tci_sleep.time_compensation = 0; skb = __hci_cmd_sync(hdev, 0xfc7a, sizeof(tci_sleep), &tci_sleep, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { err = PTR_ERR(skb); bt_dev_err(hdev, "Failed to apply low power setting (%d)", err); return err; } kfree_skb(skb); rettime = ktime_get(); delta = ktime_sub(rettime, calltime); duration = (unsigned long long)ktime_to_ns(delta) >> 10; bt_dev_info(hdev, "Device setup in %llu usecs", duration); return 0; } static int btmtkuart_shutdown(struct hci_dev *hdev) { struct btmtk_hci_wmt_params wmt_params; u8 param = 0x0; int err; /* Disable the device */ wmt_params.op = BTMTK_WMT_FUNC_CTRL; wmt_params.flag = 0; wmt_params.dlen = sizeof(param); wmt_params.data = &param; wmt_params.status = NULL; err = mtk_hci_wmt_sync(hdev, &wmt_params); if (err < 0) { bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err); return err; } return 0; } static int btmtkuart_send_frame(struct hci_dev *hdev, struct sk_buff *skb) { struct btmtkuart_dev *bdev = hci_get_drvdata(hdev); struct mtk_stp_hdr *shdr; int err, dlen, type = 0; /* Prepend skb with frame type */ memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1); /* Make sure that there is enough rooms for STP header and trailer */ if (unlikely(skb_headroom(skb) < sizeof(*shdr)) || (skb_tailroom(skb) < MTK_STP_TLR_SIZE)) { err = pskb_expand_head(skb, sizeof(*shdr), MTK_STP_TLR_SIZE, GFP_ATOMIC); if (err < 0) return err; } /* Add the STP header */ dlen = skb->len; shdr = skb_push(skb, sizeof(*shdr)); shdr->prefix = 0x80; shdr->dlen = cpu_to_be16((dlen & 0x0fff) | (type << 12)); shdr->cs = 0; /* MT7622 doesn't care about checksum value */ /* Add the STP trailer */ skb_put_zero(skb, MTK_STP_TLR_SIZE); skb_queue_tail(&bdev->txq, skb); btmtkuart_tx_wakeup(bdev); return 0; } static int btmtkuart_parse_dt(struct serdev_device *serdev) { struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev); struct device_node *node = serdev->dev.of_node; u32 speed = 921600; int err; if (btmtkuart_is_standalone(bdev)) { of_property_read_u32(node, "current-speed", &speed); bdev->desired_speed = speed; bdev->vcc = devm_regulator_get(&serdev->dev, "vcc"); if (IS_ERR(bdev->vcc)) { err = PTR_ERR(bdev->vcc); return err; } bdev->osc = devm_clk_get_optional(&serdev->dev, "osc"); if (IS_ERR(bdev->osc)) { err = PTR_ERR(bdev->osc); return err; } bdev->boot = devm_gpiod_get_optional(&serdev->dev, "boot", GPIOD_OUT_LOW); if (IS_ERR(bdev->boot)) { err = PTR_ERR(bdev->boot); return err; } bdev->pinctrl = devm_pinctrl_get(&serdev->dev); if (IS_ERR(bdev->pinctrl)) { err = PTR_ERR(bdev->pinctrl); return err; } bdev->pins_boot = pinctrl_lookup_state(bdev->pinctrl, "default"); if (IS_ERR(bdev->pins_boot) && !bdev->boot) { err = PTR_ERR(bdev->pins_boot); dev_err(&serdev->dev, "Should assign RXD to LOW at boot stage\n"); return err; } bdev->pins_runtime = pinctrl_lookup_state(bdev->pinctrl, "runtime"); if (IS_ERR(bdev->pins_runtime)) { err = PTR_ERR(bdev->pins_runtime); return err; } bdev->reset = devm_gpiod_get_optional(&serdev->dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(bdev->reset)) { err = PTR_ERR(bdev->reset); return err; } } else if (btmtkuart_is_builtin_soc(bdev)) { bdev->clk = devm_clk_get(&serdev->dev, "ref"); if (IS_ERR(bdev->clk)) return PTR_ERR(bdev->clk); } return 0; } static int btmtkuart_probe(struct serdev_device *serdev) { struct btmtkuart_dev *bdev; struct hci_dev *hdev; int err; bdev = devm_kzalloc(&serdev->dev, sizeof(*bdev), GFP_KERNEL); if (!bdev) return -ENOMEM; bdev->data = of_device_get_match_data(&serdev->dev); if (!bdev->data) return -ENODEV; bdev->serdev = serdev; serdev_device_set_drvdata(serdev, bdev); serdev_device_set_client_ops(serdev, &btmtkuart_client_ops); err = btmtkuart_parse_dt(serdev); if (err < 0) return err; INIT_WORK(&bdev->tx_work, btmtkuart_tx_work); skb_queue_head_init(&bdev->txq); /* Initialize and register HCI device */ hdev = hci_alloc_dev(); if (!hdev) { dev_err(&serdev->dev, "Can't allocate HCI device\n"); return -ENOMEM; } bdev->hdev = hdev; hdev->bus = HCI_UART; hci_set_drvdata(hdev, bdev); hdev->open = btmtkuart_open; hdev->close = btmtkuart_close; hdev->flush = btmtkuart_flush; hdev->setup = btmtkuart_setup; hdev->shutdown = btmtkuart_shutdown; hdev->send = btmtkuart_send_frame; hdev->set_bdaddr = btmtk_set_bdaddr; SET_HCIDEV_DEV(hdev, &serdev->dev); hdev->manufacturer = 70; set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks); if (btmtkuart_is_standalone(bdev)) { err = clk_prepare_enable(bdev->osc); if (err < 0) goto err_hci_free_dev; if (bdev->boot) { gpiod_set_value_cansleep(bdev->boot, 1); } else { /* Switch to the specific pin state for the booting * requires. */ pinctrl_select_state(bdev->pinctrl, bdev->pins_boot); } /* Power on */ err = regulator_enable(bdev->vcc); if (err < 0) goto err_clk_disable_unprepare; /* Reset if the reset-gpios is available otherwise the board * -level design should be guaranteed. */ if (bdev->reset) { gpiod_set_value_cansleep(bdev->reset, 1); usleep_range(1000, 2000); gpiod_set_value_cansleep(bdev->reset, 0); } /* Wait some time until device got ready and switch to the pin * mode the device requires for UART transfers. */ msleep(50); if (bdev->boot) devm_gpiod_put(&serdev->dev, bdev->boot); pinctrl_select_state(bdev->pinctrl, bdev->pins_runtime); /* A standalone device doesn't depends on power domain on SoC, * so mark it as no callbacks. */ pm_runtime_no_callbacks(&serdev->dev); set_bit(BTMTKUART_REQUIRED_WAKEUP, &bdev->tx_state); } err = hci_register_dev(hdev); if (err < 0) { dev_err(&serdev->dev, "Can't register HCI device\n"); goto err_regulator_disable; } return 0; err_regulator_disable: if (btmtkuart_is_standalone(bdev)) regulator_disable(bdev->vcc); err_clk_disable_unprepare: if (btmtkuart_is_standalone(bdev)) clk_disable_unprepare(bdev->osc); err_hci_free_dev: hci_free_dev(hdev); return err; } static void btmtkuart_remove(struct serdev_device *serdev) { struct btmtkuart_dev *bdev = serdev_device_get_drvdata(serdev); struct hci_dev *hdev = bdev->hdev; if (btmtkuart_is_standalone(bdev)) { regulator_disable(bdev->vcc); clk_disable_unprepare(bdev->osc); } hci_unregister_dev(hdev); hci_free_dev(hdev); } static const struct btmtkuart_data mt7622_data __maybe_unused = { .fwname = FIRMWARE_MT7622, }; static const struct btmtkuart_data mt7663_data __maybe_unused = { .flags = BTMTKUART_FLAG_STANDALONE_HW, .fwname = FIRMWARE_MT7663, }; static const struct btmtkuart_data mt7668_data __maybe_unused = { .flags = BTMTKUART_FLAG_STANDALONE_HW, .fwname = FIRMWARE_MT7668, }; #ifdef CONFIG_OF static const struct of_device_id mtk_of_match_table[] = { { .compatible = "mediatek,mt7622-bluetooth", .data = &mt7622_data}, { .compatible = "mediatek,mt7663u-bluetooth", .data = &mt7663_data}, { .compatible = "mediatek,mt7668u-bluetooth", .data = &mt7668_data}, { } }; MODULE_DEVICE_TABLE(of, mtk_of_match_table); #endif static struct serdev_device_driver btmtkuart_driver = { .probe = btmtkuart_probe, .remove = btmtkuart_remove, .driver = { .name = "btmtkuart", .of_match_table = of_match_ptr(mtk_of_match_table), }, }; module_serdev_device_driver(btmtkuart_driver); MODULE_AUTHOR("Sean Wang <[email protected]>"); MODULE_DESCRIPTION("MediaTek Bluetooth Serial driver ver " VERSION); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL");
linux-master
drivers/bluetooth/btmtkuart.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * * Bluetooth virtual HCI driver * * Copyright (C) 2000-2001 Qualcomm Incorporated * Copyright (C) 2002-2003 Maxim Krasnyansky <[email protected]> * Copyright (C) 2004-2006 Marcel Holtmann <[email protected]> */ #include <linux/module.h> #include <asm/unaligned.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/sched.h> #include <linux/poll.h> #include <linux/skbuff.h> #include <linux/miscdevice.h> #include <linux/debugfs.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #define VERSION "1.5" static bool amp; struct vhci_data { struct hci_dev *hdev; wait_queue_head_t read_wait; struct sk_buff_head readq; struct mutex open_mutex; struct delayed_work open_timeout; struct work_struct suspend_work; bool suspended; bool wakeup; __u16 msft_opcode; bool aosp_capable; }; static int vhci_open_dev(struct hci_dev *hdev) { return 0; } static int vhci_close_dev(struct hci_dev *hdev) { struct vhci_data *data = hci_get_drvdata(hdev); skb_queue_purge(&data->readq); return 0; } static int vhci_flush(struct hci_dev *hdev) { struct vhci_data *data = hci_get_drvdata(hdev); skb_queue_purge(&data->readq); return 0; } static int vhci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) { struct vhci_data *data = hci_get_drvdata(hdev); memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1); skb_queue_tail(&data->readq, skb); wake_up_interruptible(&data->read_wait); return 0; } static int vhci_get_data_path_id(struct hci_dev *hdev, u8 *data_path_id) { *data_path_id = 0; return 0; } static int vhci_get_codec_config_data(struct hci_dev *hdev, __u8 type, struct bt_codec *codec, __u8 *vnd_len, __u8 **vnd_data) { if (type != ESCO_LINK) return -EINVAL; *vnd_len = 0; *vnd_data = NULL; return 0; } static bool vhci_wakeup(struct hci_dev *hdev) { struct vhci_data *data = hci_get_drvdata(hdev); return data->wakeup; } static ssize_t force_suspend_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct vhci_data *data = file->private_data; char buf[3]; buf[0] = data->suspended ? 'Y' : 'N'; buf[1] = '\n'; buf[2] = '\0'; return simple_read_from_buffer(user_buf, count, ppos, buf, 2); } static void vhci_suspend_work(struct work_struct *work) { struct vhci_data *data = container_of(work, struct vhci_data, suspend_work); if (data->suspended) hci_suspend_dev(data->hdev); else hci_resume_dev(data->hdev); } static ssize_t force_suspend_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct vhci_data *data = file->private_data; bool enable; int err; err = kstrtobool_from_user(user_buf, count, &enable); if (err) return err; if (data->suspended == enable) return -EALREADY; data->suspended = enable; schedule_work(&data->suspend_work); return count; } static const struct file_operations force_suspend_fops = { .open = simple_open, .read = force_suspend_read, .write = force_suspend_write, .llseek = default_llseek, }; static ssize_t force_wakeup_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct vhci_data *data = file->private_data; char buf[3]; buf[0] = data->wakeup ? 'Y' : 'N'; buf[1] = '\n'; buf[2] = '\0'; return simple_read_from_buffer(user_buf, count, ppos, buf, 2); } static ssize_t force_wakeup_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct vhci_data *data = file->private_data; bool enable; int err; err = kstrtobool_from_user(user_buf, count, &enable); if (err) return err; if (data->wakeup == enable) return -EALREADY; data->wakeup = enable; return count; } static const struct file_operations force_wakeup_fops = { .open = simple_open, .read = force_wakeup_read, .write = force_wakeup_write, .llseek = default_llseek, }; static int msft_opcode_set(void *data, u64 val) { struct vhci_data *vhci = data; if (val > 0xffff || hci_opcode_ogf(val) != 0x3f) return -EINVAL; if (vhci->msft_opcode) return -EALREADY; vhci->msft_opcode = val; return 0; } static int msft_opcode_get(void *data, u64 *val) { struct vhci_data *vhci = data; *val = vhci->msft_opcode; return 0; } DEFINE_DEBUGFS_ATTRIBUTE(msft_opcode_fops, msft_opcode_get, msft_opcode_set, "%llu\n"); static ssize_t aosp_capable_read(struct file *file, char __user *user_buf, size_t count, loff_t *ppos) { struct vhci_data *vhci = file->private_data; char buf[3]; buf[0] = vhci->aosp_capable ? 'Y' : 'N'; buf[1] = '\n'; buf[2] = '\0'; return simple_read_from_buffer(user_buf, count, ppos, buf, 2); } static ssize_t aosp_capable_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct vhci_data *vhci = file->private_data; bool enable; int err; err = kstrtobool_from_user(user_buf, count, &enable); if (err) return err; if (!enable) return -EINVAL; if (vhci->aosp_capable) return -EALREADY; vhci->aosp_capable = enable; return count; } static const struct file_operations aosp_capable_fops = { .open = simple_open, .read = aosp_capable_read, .write = aosp_capable_write, .llseek = default_llseek, }; static int vhci_setup(struct hci_dev *hdev) { struct vhci_data *vhci = hci_get_drvdata(hdev); if (vhci->msft_opcode) hci_set_msft_opcode(hdev, vhci->msft_opcode); if (vhci->aosp_capable) hci_set_aosp_capable(hdev); return 0; } static void vhci_coredump(struct hci_dev *hdev) { /* No need to do anything */ } static void vhci_coredump_hdr(struct hci_dev *hdev, struct sk_buff *skb) { char buf[80]; snprintf(buf, sizeof(buf), "Controller Name: vhci_ctrl\n"); skb_put_data(skb, buf, strlen(buf)); snprintf(buf, sizeof(buf), "Firmware Version: vhci_fw\n"); skb_put_data(skb, buf, strlen(buf)); snprintf(buf, sizeof(buf), "Driver: vhci_drv\n"); skb_put_data(skb, buf, strlen(buf)); snprintf(buf, sizeof(buf), "Vendor: vhci\n"); skb_put_data(skb, buf, strlen(buf)); } #define MAX_COREDUMP_LINE_LEN 40 struct devcoredump_test_data { enum devcoredump_state state; unsigned int timeout; char data[MAX_COREDUMP_LINE_LEN]; }; static inline void force_devcd_timeout(struct hci_dev *hdev, unsigned int timeout) { #ifdef CONFIG_DEV_COREDUMP hdev->dump.timeout = msecs_to_jiffies(timeout * 1000); #endif } static ssize_t force_devcd_write(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct vhci_data *data = file->private_data; struct hci_dev *hdev = data->hdev; struct sk_buff *skb = NULL; struct devcoredump_test_data dump_data; size_t data_size; int ret; if (count < offsetof(struct devcoredump_test_data, data) || count > sizeof(dump_data)) return -EINVAL; if (copy_from_user(&dump_data, user_buf, count)) return -EFAULT; data_size = count - offsetof(struct devcoredump_test_data, data); skb = alloc_skb(data_size, GFP_ATOMIC); if (!skb) return -ENOMEM; skb_put_data(skb, &dump_data.data, data_size); hci_devcd_register(hdev, vhci_coredump, vhci_coredump_hdr, NULL); /* Force the devcoredump timeout */ if (dump_data.timeout) force_devcd_timeout(hdev, dump_data.timeout); ret = hci_devcd_init(hdev, skb->len); if (ret) { BT_ERR("Failed to generate devcoredump"); kfree_skb(skb); return ret; } hci_devcd_append(hdev, skb); switch (dump_data.state) { case HCI_DEVCOREDUMP_DONE: hci_devcd_complete(hdev); break; case HCI_DEVCOREDUMP_ABORT: hci_devcd_abort(hdev); break; case HCI_DEVCOREDUMP_TIMEOUT: /* Do nothing */ break; default: return -EINVAL; } return count; } static const struct file_operations force_devcoredump_fops = { .open = simple_open, .write = force_devcd_write, }; static int __vhci_create_device(struct vhci_data *data, __u8 opcode) { struct hci_dev *hdev; struct sk_buff *skb; __u8 dev_type; if (data->hdev) return -EBADFD; /* bits 0-1 are dev_type (Primary or AMP) */ dev_type = opcode & 0x03; if (dev_type != HCI_PRIMARY && dev_type != HCI_AMP) return -EINVAL; /* bits 2-5 are reserved (must be zero) */ if (opcode & 0x3c) return -EINVAL; skb = bt_skb_alloc(4, GFP_KERNEL); if (!skb) return -ENOMEM; hdev = hci_alloc_dev(); if (!hdev) { kfree_skb(skb); return -ENOMEM; } data->hdev = hdev; hdev->bus = HCI_VIRTUAL; hdev->dev_type = dev_type; hci_set_drvdata(hdev, data); hdev->open = vhci_open_dev; hdev->close = vhci_close_dev; hdev->flush = vhci_flush; hdev->send = vhci_send_frame; hdev->get_data_path_id = vhci_get_data_path_id; hdev->get_codec_config_data = vhci_get_codec_config_data; hdev->wakeup = vhci_wakeup; hdev->setup = vhci_setup; set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks); /* bit 6 is for external configuration */ if (opcode & 0x40) set_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks); /* bit 7 is for raw device */ if (opcode & 0x80) set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks); set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks); if (hci_register_dev(hdev) < 0) { BT_ERR("Can't register HCI device"); hci_free_dev(hdev); data->hdev = NULL; kfree_skb(skb); return -EBUSY; } debugfs_create_file("force_suspend", 0644, hdev->debugfs, data, &force_suspend_fops); debugfs_create_file("force_wakeup", 0644, hdev->debugfs, data, &force_wakeup_fops); if (IS_ENABLED(CONFIG_BT_MSFTEXT)) debugfs_create_file("msft_opcode", 0644, hdev->debugfs, data, &msft_opcode_fops); if (IS_ENABLED(CONFIG_BT_AOSPEXT)) debugfs_create_file("aosp_capable", 0644, hdev->debugfs, data, &aosp_capable_fops); debugfs_create_file("force_devcoredump", 0644, hdev->debugfs, data, &force_devcoredump_fops); hci_skb_pkt_type(skb) = HCI_VENDOR_PKT; skb_put_u8(skb, 0xff); skb_put_u8(skb, opcode); put_unaligned_le16(hdev->id, skb_put(skb, 2)); skb_queue_tail(&data->readq, skb); wake_up_interruptible(&data->read_wait); return 0; } static int vhci_create_device(struct vhci_data *data, __u8 opcode) { int err; mutex_lock(&data->open_mutex); err = __vhci_create_device(data, opcode); mutex_unlock(&data->open_mutex); return err; } static inline ssize_t vhci_get_user(struct vhci_data *data, struct iov_iter *from) { size_t len = iov_iter_count(from); struct sk_buff *skb; __u8 pkt_type, opcode; int ret; if (len < 2 || len > HCI_MAX_FRAME_SIZE) return -EINVAL; skb = bt_skb_alloc(len, GFP_KERNEL); if (!skb) return -ENOMEM; if (!copy_from_iter_full(skb_put(skb, len), len, from)) { kfree_skb(skb); return -EFAULT; } pkt_type = *((__u8 *) skb->data); skb_pull(skb, 1); switch (pkt_type) { case HCI_EVENT_PKT: case HCI_ACLDATA_PKT: case HCI_SCODATA_PKT: case HCI_ISODATA_PKT: if (!data->hdev) { kfree_skb(skb); return -ENODEV; } hci_skb_pkt_type(skb) = pkt_type; ret = hci_recv_frame(data->hdev, skb); break; case HCI_VENDOR_PKT: cancel_delayed_work_sync(&data->open_timeout); opcode = *((__u8 *) skb->data); skb_pull(skb, 1); if (skb->len > 0) { kfree_skb(skb); return -EINVAL; } kfree_skb(skb); ret = vhci_create_device(data, opcode); break; default: kfree_skb(skb); return -EINVAL; } return (ret < 0) ? ret : len; } static inline ssize_t vhci_put_user(struct vhci_data *data, struct sk_buff *skb, char __user *buf, int count) { char __user *ptr = buf; int len; len = min_t(unsigned int, skb->len, count); if (copy_to_user(ptr, skb->data, len)) return -EFAULT; if (!data->hdev) return len; data->hdev->stat.byte_tx += len; switch (hci_skb_pkt_type(skb)) { case HCI_COMMAND_PKT: data->hdev->stat.cmd_tx++; break; case HCI_ACLDATA_PKT: data->hdev->stat.acl_tx++; break; case HCI_SCODATA_PKT: data->hdev->stat.sco_tx++; break; } return len; } static ssize_t vhci_read(struct file *file, char __user *buf, size_t count, loff_t *pos) { struct vhci_data *data = file->private_data; struct sk_buff *skb; ssize_t ret = 0; while (count) { skb = skb_dequeue(&data->readq); if (skb) { ret = vhci_put_user(data, skb, buf, count); if (ret < 0) skb_queue_head(&data->readq, skb); else kfree_skb(skb); break; } if (file->f_flags & O_NONBLOCK) { ret = -EAGAIN; break; } ret = wait_event_interruptible(data->read_wait, !skb_queue_empty(&data->readq)); if (ret < 0) break; } return ret; } static ssize_t vhci_write(struct kiocb *iocb, struct iov_iter *from) { struct file *file = iocb->ki_filp; struct vhci_data *data = file->private_data; return vhci_get_user(data, from); } static __poll_t vhci_poll(struct file *file, poll_table *wait) { struct vhci_data *data = file->private_data; poll_wait(file, &data->read_wait, wait); if (!skb_queue_empty(&data->readq)) return EPOLLIN | EPOLLRDNORM; return EPOLLOUT | EPOLLWRNORM; } static void vhci_open_timeout(struct work_struct *work) { struct vhci_data *data = container_of(work, struct vhci_data, open_timeout.work); vhci_create_device(data, amp ? HCI_AMP : HCI_PRIMARY); } static int vhci_open(struct inode *inode, struct file *file) { struct vhci_data *data; data = kzalloc(sizeof(struct vhci_data), GFP_KERNEL); if (!data) return -ENOMEM; skb_queue_head_init(&data->readq); init_waitqueue_head(&data->read_wait); mutex_init(&data->open_mutex); INIT_DELAYED_WORK(&data->open_timeout, vhci_open_timeout); INIT_WORK(&data->suspend_work, vhci_suspend_work); file->private_data = data; nonseekable_open(inode, file); schedule_delayed_work(&data->open_timeout, msecs_to_jiffies(1000)); return 0; } static int vhci_release(struct inode *inode, struct file *file) { struct vhci_data *data = file->private_data; struct hci_dev *hdev; cancel_delayed_work_sync(&data->open_timeout); flush_work(&data->suspend_work); hdev = data->hdev; if (hdev) { hci_unregister_dev(hdev); hci_free_dev(hdev); } skb_queue_purge(&data->readq); file->private_data = NULL; kfree(data); return 0; } static const struct file_operations vhci_fops = { .owner = THIS_MODULE, .read = vhci_read, .write_iter = vhci_write, .poll = vhci_poll, .open = vhci_open, .release = vhci_release, .llseek = no_llseek, }; static struct miscdevice vhci_miscdev = { .name = "vhci", .fops = &vhci_fops, .minor = VHCI_MINOR, }; module_misc_device(vhci_miscdev); module_param(amp, bool, 0644); MODULE_PARM_DESC(amp, "Create AMP controller device"); MODULE_AUTHOR("Marcel Holtmann <[email protected]>"); MODULE_DESCRIPTION("Bluetooth virtual HCI driver ver " VERSION); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL"); MODULE_ALIAS("devname:vhci"); MODULE_ALIAS_MISCDEV(VHCI_MINOR);
linux-master
drivers/bluetooth/hci_vhci.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * * Bluetooth support for Broadcom devices * * Copyright (C) 2015 Intel Corporation */ #include <linux/efi.h> #include <linux/module.h> #include <linux/firmware.h> #include <linux/dmi.h> #include <linux/of.h> #include <asm/unaligned.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include "btbcm.h" #define VERSION "0.1" #define BDADDR_BCM20702A0 (&(bdaddr_t) {{0x00, 0xa0, 0x02, 0x70, 0x20, 0x00}}) #define BDADDR_BCM20702A1 (&(bdaddr_t) {{0x00, 0x00, 0xa0, 0x02, 0x70, 0x20}}) #define BDADDR_BCM2076B1 (&(bdaddr_t) {{0x79, 0x56, 0x00, 0xa0, 0x76, 0x20}}) #define BDADDR_BCM43430A0 (&(bdaddr_t) {{0xac, 0x1f, 0x12, 0xa0, 0x43, 0x43}}) #define BDADDR_BCM43430A1 (&(bdaddr_t) {{0xaa, 0xaa, 0xaa, 0xaa, 0xaa, 0xaa}}) #define BDADDR_BCM4324B3 (&(bdaddr_t) {{0x00, 0x00, 0x00, 0xb3, 0x24, 0x43}}) #define BDADDR_BCM4330B1 (&(bdaddr_t) {{0x00, 0x00, 0x00, 0xb1, 0x30, 0x43}}) #define BDADDR_BCM4334B0 (&(bdaddr_t) {{0x00, 0x00, 0x00, 0xb0, 0x34, 0x43}}) #define BDADDR_BCM4345C5 (&(bdaddr_t) {{0xac, 0x1f, 0x00, 0xc5, 0x45, 0x43}}) #define BDADDR_BCM43341B (&(bdaddr_t) {{0xac, 0x1f, 0x00, 0x1b, 0x34, 0x43}}) #define BCM_FW_NAME_LEN 64 #define BCM_FW_NAME_COUNT_MAX 4 /* For kmalloc-ing the fw-name array instead of putting it on the stack */ typedef char bcm_fw_name[BCM_FW_NAME_LEN]; #ifdef CONFIG_EFI static int btbcm_set_bdaddr_from_efi(struct hci_dev *hdev) { efi_guid_t guid = EFI_GUID(0x74b00bd9, 0x805a, 0x4d61, 0xb5, 0x1f, 0x43, 0x26, 0x81, 0x23, 0xd1, 0x13); bdaddr_t efi_bdaddr, bdaddr; efi_status_t status; unsigned long len; int ret; if (!efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE)) return -EOPNOTSUPP; len = sizeof(efi_bdaddr); status = efi.get_variable(L"BDADDR", &guid, NULL, &len, &efi_bdaddr); if (status != EFI_SUCCESS) return -ENXIO; if (len != sizeof(efi_bdaddr)) return -EIO; baswap(&bdaddr, &efi_bdaddr); ret = btbcm_set_bdaddr(hdev, &bdaddr); if (ret) return ret; bt_dev_info(hdev, "BCM: Using EFI device address (%pMR)", &bdaddr); return 0; } #else static int btbcm_set_bdaddr_from_efi(struct hci_dev *hdev) { return -EOPNOTSUPP; } #endif int btbcm_check_bdaddr(struct hci_dev *hdev) { struct hci_rp_read_bd_addr *bda; struct sk_buff *skb; skb = __hci_cmd_sync(hdev, HCI_OP_READ_BD_ADDR, 0, NULL, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { int err = PTR_ERR(skb); bt_dev_err(hdev, "BCM: Reading device address failed (%d)", err); return err; } if (skb->len != sizeof(*bda)) { bt_dev_err(hdev, "BCM: Device address length mismatch"); kfree_skb(skb); return -EIO; } bda = (struct hci_rp_read_bd_addr *)skb->data; /* Check if the address indicates a controller with either an * invalid or default address. In both cases the device needs * to be marked as not having a valid address. * * The address 00:20:70:02:A0:00 indicates a BCM20702A0 controller * with no configured address. * * The address 20:70:02:A0:00:00 indicates a BCM20702A1 controller * with no configured address. * * The address 20:76:A0:00:56:79 indicates a BCM2076B1 controller * with no configured address. * * The address 43:24:B3:00:00:00 indicates a BCM4324B3 controller * with waiting for configuration state. * * The address 43:30:B1:00:00:00 indicates a BCM4330B1 controller * with waiting for configuration state. * * The address 43:43:A0:12:1F:AC indicates a BCM43430A0 controller * with no configured address. * * The address AA:AA:AA:AA:AA:AA indicates a BCM43430A1 controller * with no configured address. */ if (!bacmp(&bda->bdaddr, BDADDR_BCM20702A0) || !bacmp(&bda->bdaddr, BDADDR_BCM20702A1) || !bacmp(&bda->bdaddr, BDADDR_BCM2076B1) || !bacmp(&bda->bdaddr, BDADDR_BCM4324B3) || !bacmp(&bda->bdaddr, BDADDR_BCM4330B1) || !bacmp(&bda->bdaddr, BDADDR_BCM4334B0) || !bacmp(&bda->bdaddr, BDADDR_BCM4345C5) || !bacmp(&bda->bdaddr, BDADDR_BCM43430A0) || !bacmp(&bda->bdaddr, BDADDR_BCM43430A1) || !bacmp(&bda->bdaddr, BDADDR_BCM43341B)) { /* Try falling back to BDADDR EFI variable */ if (btbcm_set_bdaddr_from_efi(hdev) != 0) { bt_dev_info(hdev, "BCM: Using default device address (%pMR)", &bda->bdaddr); set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks); } } kfree_skb(skb); return 0; } EXPORT_SYMBOL_GPL(btbcm_check_bdaddr); int btbcm_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr) { struct sk_buff *skb; int err; skb = __hci_cmd_sync(hdev, 0xfc01, 6, bdaddr, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { err = PTR_ERR(skb); bt_dev_err(hdev, "BCM: Change address command failed (%d)", err); return err; } kfree_skb(skb); return 0; } EXPORT_SYMBOL_GPL(btbcm_set_bdaddr); int btbcm_read_pcm_int_params(struct hci_dev *hdev, struct bcm_set_pcm_int_params *params) { struct sk_buff *skb; int err = 0; skb = __hci_cmd_sync(hdev, 0xfc1d, 0, NULL, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { err = PTR_ERR(skb); bt_dev_err(hdev, "BCM: Read PCM int params failed (%d)", err); return err; } if (skb->len != 6 || skb->data[0]) { bt_dev_err(hdev, "BCM: Read PCM int params length mismatch"); kfree_skb(skb); return -EIO; } if (params) memcpy(params, skb->data + 1, 5); kfree_skb(skb); return 0; } EXPORT_SYMBOL_GPL(btbcm_read_pcm_int_params); int btbcm_write_pcm_int_params(struct hci_dev *hdev, const struct bcm_set_pcm_int_params *params) { struct sk_buff *skb; int err; skb = __hci_cmd_sync(hdev, 0xfc1c, 5, params, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { err = PTR_ERR(skb); bt_dev_err(hdev, "BCM: Write PCM int params failed (%d)", err); return err; } kfree_skb(skb); return 0; } EXPORT_SYMBOL_GPL(btbcm_write_pcm_int_params); int btbcm_patchram(struct hci_dev *hdev, const struct firmware *fw) { const struct hci_command_hdr *cmd; const u8 *fw_ptr; size_t fw_size; struct sk_buff *skb; u16 opcode; int err = 0; /* Start Download */ skb = __hci_cmd_sync(hdev, 0xfc2e, 0, NULL, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { err = PTR_ERR(skb); bt_dev_err(hdev, "BCM: Download Minidrv command failed (%d)", err); goto done; } kfree_skb(skb); /* 50 msec delay after Download Minidrv completes */ msleep(50); fw_ptr = fw->data; fw_size = fw->size; while (fw_size >= sizeof(*cmd)) { const u8 *cmd_param; cmd = (struct hci_command_hdr *)fw_ptr; fw_ptr += sizeof(*cmd); fw_size -= sizeof(*cmd); if (fw_size < cmd->plen) { bt_dev_err(hdev, "BCM: Patch is corrupted"); err = -EINVAL; goto done; } cmd_param = fw_ptr; fw_ptr += cmd->plen; fw_size -= cmd->plen; opcode = le16_to_cpu(cmd->opcode); skb = __hci_cmd_sync(hdev, opcode, cmd->plen, cmd_param, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { err = PTR_ERR(skb); bt_dev_err(hdev, "BCM: Patch command %04x failed (%d)", opcode, err); goto done; } kfree_skb(skb); } /* 250 msec delay after Launch Ram completes */ msleep(250); done: return err; } EXPORT_SYMBOL(btbcm_patchram); static int btbcm_reset(struct hci_dev *hdev) { struct sk_buff *skb; skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { int err = PTR_ERR(skb); bt_dev_err(hdev, "BCM: Reset failed (%d)", err); return err; } kfree_skb(skb); /* 100 msec delay for module to complete reset process */ msleep(100); return 0; } static struct sk_buff *btbcm_read_local_name(struct hci_dev *hdev) { struct sk_buff *skb; skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { bt_dev_err(hdev, "BCM: Reading local name failed (%ld)", PTR_ERR(skb)); return skb; } if (skb->len != sizeof(struct hci_rp_read_local_name)) { bt_dev_err(hdev, "BCM: Local name length mismatch"); kfree_skb(skb); return ERR_PTR(-EIO); } return skb; } static struct sk_buff *btbcm_read_local_version(struct hci_dev *hdev) { struct sk_buff *skb; skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { bt_dev_err(hdev, "BCM: Reading local version info failed (%ld)", PTR_ERR(skb)); return skb; } if (skb->len != sizeof(struct hci_rp_read_local_version)) { bt_dev_err(hdev, "BCM: Local version length mismatch"); kfree_skb(skb); return ERR_PTR(-EIO); } return skb; } static struct sk_buff *btbcm_read_verbose_config(struct hci_dev *hdev) { struct sk_buff *skb; skb = __hci_cmd_sync(hdev, 0xfc79, 0, NULL, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { bt_dev_err(hdev, "BCM: Read verbose config info failed (%ld)", PTR_ERR(skb)); return skb; } if (skb->len != 7) { bt_dev_err(hdev, "BCM: Verbose config length mismatch"); kfree_skb(skb); return ERR_PTR(-EIO); } return skb; } static struct sk_buff *btbcm_read_controller_features(struct hci_dev *hdev) { struct sk_buff *skb; skb = __hci_cmd_sync(hdev, 0xfc6e, 0, NULL, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { bt_dev_err(hdev, "BCM: Read controller features failed (%ld)", PTR_ERR(skb)); return skb; } if (skb->len != 9) { bt_dev_err(hdev, "BCM: Controller features length mismatch"); kfree_skb(skb); return ERR_PTR(-EIO); } return skb; } static struct sk_buff *btbcm_read_usb_product(struct hci_dev *hdev) { struct sk_buff *skb; skb = __hci_cmd_sync(hdev, 0xfc5a, 0, NULL, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { bt_dev_err(hdev, "BCM: Read USB product info failed (%ld)", PTR_ERR(skb)); return skb; } if (skb->len != 5) { bt_dev_err(hdev, "BCM: USB product length mismatch"); kfree_skb(skb); return ERR_PTR(-EIO); } return skb; } static const struct dmi_system_id disable_broken_read_transmit_power[] = { { .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Apple Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro16,1"), }, }, { .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Apple Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro16,2"), }, }, { .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Apple Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro16,4"), }, }, { .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Apple Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir8,1"), }, }, { .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Apple Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir8,2"), }, }, { .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Apple Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "iMac20,1"), }, }, { .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Apple Inc."), DMI_MATCH(DMI_PRODUCT_NAME, "iMac20,2"), }, }, { } }; static int btbcm_read_info(struct hci_dev *hdev) { struct sk_buff *skb; /* Read Verbose Config Version Info */ skb = btbcm_read_verbose_config(hdev); if (IS_ERR(skb)) return PTR_ERR(skb); bt_dev_info(hdev, "BCM: chip id %u", skb->data[1]); kfree_skb(skb); return 0; } static int btbcm_print_controller_features(struct hci_dev *hdev) { struct sk_buff *skb; /* Read Controller Features */ skb = btbcm_read_controller_features(hdev); if (IS_ERR(skb)) return PTR_ERR(skb); bt_dev_info(hdev, "BCM: features 0x%2.2x", skb->data[1]); kfree_skb(skb); /* Read DMI and disable broken Read LE Min/Max Tx Power */ if (dmi_first_match(disable_broken_read_transmit_power)) set_bit(HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER, &hdev->quirks); return 0; } static int btbcm_print_local_name(struct hci_dev *hdev) { struct sk_buff *skb; /* Read Local Name */ skb = btbcm_read_local_name(hdev); if (IS_ERR(skb)) return PTR_ERR(skb); bt_dev_info(hdev, "%s", (char *)(skb->data + 1)); kfree_skb(skb); return 0; } struct bcm_subver_table { u16 subver; const char *name; }; static const struct bcm_subver_table bcm_uart_subver_table[] = { { 0x1111, "BCM4362A2" }, /* 000.017.017 */ { 0x4103, "BCM4330B1" }, /* 002.001.003 */ { 0x410d, "BCM4334B0" }, /* 002.001.013 */ { 0x410e, "BCM43341B0" }, /* 002.001.014 */ { 0x4204, "BCM2076B1" }, /* 002.002.004 */ { 0x4406, "BCM4324B3" }, /* 002.004.006 */ { 0x4606, "BCM4324B5" }, /* 002.006.006 */ { 0x6109, "BCM4335C0" }, /* 003.001.009 */ { 0x610c, "BCM4354" }, /* 003.001.012 */ { 0x2122, "BCM4343A0" }, /* 001.001.034 */ { 0x2209, "BCM43430A1" }, /* 001.002.009 */ { 0x6119, "BCM4345C0" }, /* 003.001.025 */ { 0x6606, "BCM4345C5" }, /* 003.006.006 */ { 0x230f, "BCM4356A2" }, /* 001.003.015 */ { 0x220e, "BCM20702A1" }, /* 001.002.014 */ { 0x420d, "BCM4349B1" }, /* 002.002.013 */ { 0x420e, "BCM4349B1" }, /* 002.002.014 */ { 0x4217, "BCM4329B1" }, /* 002.002.023 */ { 0x6106, "BCM4359C0" }, /* 003.001.006 */ { 0x4106, "BCM4335A0" }, /* 002.001.006 */ { 0x410c, "BCM43430B0" }, /* 002.001.012 */ { 0x2119, "BCM4373A0" }, /* 001.001.025 */ { } }; static const struct bcm_subver_table bcm_usb_subver_table[] = { { 0x2105, "BCM20703A1" }, /* 001.001.005 */ { 0x210b, "BCM43142A0" }, /* 001.001.011 */ { 0x2112, "BCM4314A0" }, /* 001.001.018 */ { 0x2118, "BCM20702A0" }, /* 001.001.024 */ { 0x2126, "BCM4335A0" }, /* 001.001.038 */ { 0x220e, "BCM20702A1" }, /* 001.002.014 */ { 0x230f, "BCM4356A2" }, /* 001.003.015 */ { 0x4106, "BCM4335B0" }, /* 002.001.006 */ { 0x410e, "BCM20702B0" }, /* 002.001.014 */ { 0x6109, "BCM4335C0" }, /* 003.001.009 */ { 0x610c, "BCM4354" }, /* 003.001.012 */ { 0x6607, "BCM4350C5" }, /* 003.006.007 */ { } }; /* * This currently only looks up the device tree board appendix, * but can be expanded to other mechanisms. */ static const char *btbcm_get_board_name(struct device *dev) { #ifdef CONFIG_OF struct device_node *root; char *board_type; const char *tmp; int len; int i; root = of_find_node_by_path("/"); if (!root) return NULL; if (of_property_read_string_index(root, "compatible", 0, &tmp)) return NULL; /* get rid of any '/' in the compatible string */ len = strlen(tmp) + 1; board_type = devm_kzalloc(dev, len, GFP_KERNEL); strscpy(board_type, tmp, len); for (i = 0; i < len; i++) { if (board_type[i] == '/') board_type[i] = '-'; } of_node_put(root); return board_type; #else return NULL; #endif } int btbcm_initialize(struct hci_dev *hdev, bool *fw_load_done, bool use_autobaud_mode) { u16 subver, rev, pid, vid; struct sk_buff *skb; struct hci_rp_read_local_version *ver; const struct bcm_subver_table *bcm_subver_table; const char *hw_name = NULL; const char *board_name; char postfix[16] = ""; int fw_name_count = 0; bcm_fw_name *fw_name; const struct firmware *fw; int i, err; board_name = btbcm_get_board_name(&hdev->dev); /* Reset */ err = btbcm_reset(hdev); if (err) return err; /* Read Local Version Info */ skb = btbcm_read_local_version(hdev); if (IS_ERR(skb)) return PTR_ERR(skb); ver = (struct hci_rp_read_local_version *)skb->data; rev = le16_to_cpu(ver->hci_rev); subver = le16_to_cpu(ver->lmp_subver); kfree_skb(skb); /* Read controller information */ if (!(*fw_load_done)) { err = btbcm_read_info(hdev); if (err) return err; } if (!use_autobaud_mode) { err = btbcm_print_controller_features(hdev); if (err) return err; err = btbcm_print_local_name(hdev); if (err) return err; } bcm_subver_table = (hdev->bus == HCI_USB) ? bcm_usb_subver_table : bcm_uart_subver_table; for (i = 0; bcm_subver_table[i].name; i++) { if (subver == bcm_subver_table[i].subver) { hw_name = bcm_subver_table[i].name; break; } } bt_dev_info(hdev, "%s (%3.3u.%3.3u.%3.3u) build %4.4u", hw_name ? hw_name : "BCM", (subver & 0xe000) >> 13, (subver & 0x1f00) >> 8, (subver & 0x00ff), rev & 0x0fff); if (*fw_load_done) return 0; if (hdev->bus == HCI_USB) { /* Read USB Product Info */ skb = btbcm_read_usb_product(hdev); if (IS_ERR(skb)) return PTR_ERR(skb); vid = get_unaligned_le16(skb->data + 1); pid = get_unaligned_le16(skb->data + 3); kfree_skb(skb); snprintf(postfix, sizeof(postfix), "-%4.4x-%4.4x", vid, pid); } fw_name = kmalloc(BCM_FW_NAME_COUNT_MAX * BCM_FW_NAME_LEN, GFP_KERNEL); if (!fw_name) return -ENOMEM; if (hw_name) { if (board_name) { snprintf(fw_name[fw_name_count], BCM_FW_NAME_LEN, "brcm/%s%s.%s.hcd", hw_name, postfix, board_name); fw_name_count++; } snprintf(fw_name[fw_name_count], BCM_FW_NAME_LEN, "brcm/%s%s.hcd", hw_name, postfix); fw_name_count++; } if (board_name) { snprintf(fw_name[fw_name_count], BCM_FW_NAME_LEN, "brcm/BCM%s.%s.hcd", postfix, board_name); fw_name_count++; } snprintf(fw_name[fw_name_count], BCM_FW_NAME_LEN, "brcm/BCM%s.hcd", postfix); fw_name_count++; for (i = 0; i < fw_name_count; i++) { err = firmware_request_nowarn(&fw, fw_name[i], &hdev->dev); if (err == 0) { bt_dev_info(hdev, "%s '%s' Patch", hw_name ? hw_name : "BCM", fw_name[i]); *fw_load_done = true; break; } } if (*fw_load_done) { err = btbcm_patchram(hdev, fw); if (err) bt_dev_info(hdev, "BCM: Patch failed (%d)", err); release_firmware(fw); } else { bt_dev_err(hdev, "BCM: firmware Patch file not found, tried:"); for (i = 0; i < fw_name_count; i++) bt_dev_err(hdev, "BCM: '%s'", fw_name[i]); } kfree(fw_name); return 0; } EXPORT_SYMBOL_GPL(btbcm_initialize); int btbcm_finalize(struct hci_dev *hdev, bool *fw_load_done, bool use_autobaud_mode) { int err; /* Re-initialize if necessary */ if (*fw_load_done) { err = btbcm_initialize(hdev, fw_load_done, use_autobaud_mode); if (err) return err; } btbcm_check_bdaddr(hdev); set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks); return 0; } EXPORT_SYMBOL_GPL(btbcm_finalize); int btbcm_setup_patchram(struct hci_dev *hdev) { bool fw_load_done = false; bool use_autobaud_mode = false; int err; /* Initialize */ err = btbcm_initialize(hdev, &fw_load_done, use_autobaud_mode); if (err) return err; /* Re-initialize after loading Patch */ return btbcm_finalize(hdev, &fw_load_done, use_autobaud_mode); } EXPORT_SYMBOL_GPL(btbcm_setup_patchram); int btbcm_setup_apple(struct hci_dev *hdev) { struct sk_buff *skb; int err; /* Reset */ err = btbcm_reset(hdev); if (err) return err; /* Read Verbose Config Version Info */ skb = btbcm_read_verbose_config(hdev); if (!IS_ERR(skb)) { bt_dev_info(hdev, "BCM: chip id %u build %4.4u", skb->data[1], get_unaligned_le16(skb->data + 5)); kfree_skb(skb); } /* Read USB Product Info */ skb = btbcm_read_usb_product(hdev); if (!IS_ERR(skb)) { bt_dev_info(hdev, "BCM: product %4.4x:%4.4x", get_unaligned_le16(skb->data + 1), get_unaligned_le16(skb->data + 3)); kfree_skb(skb); } /* Read Controller Features */ skb = btbcm_read_controller_features(hdev); if (!IS_ERR(skb)) { bt_dev_info(hdev, "BCM: features 0x%2.2x", skb->data[1]); kfree_skb(skb); } /* Read Local Name */ skb = btbcm_read_local_name(hdev); if (!IS_ERR(skb)) { bt_dev_info(hdev, "%s", (char *)(skb->data + 1)); kfree_skb(skb); } set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks); return 0; } EXPORT_SYMBOL_GPL(btbcm_setup_apple); MODULE_AUTHOR("Marcel Holtmann <[email protected]>"); MODULE_DESCRIPTION("Bluetooth support for Broadcom devices ver " VERSION); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL");
linux-master
drivers/bluetooth/btbcm.c
// SPDX-License-Identifier: ISC /* Copyright (C) 2021 MediaTek Inc. * */ #include <linux/module.h> #include <linux/firmware.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include "btmtk.h" #define VERSION "0.1" /* It is for mt79xx download rom patch*/ #define MTK_FW_ROM_PATCH_HEADER_SIZE 32 #define MTK_FW_ROM_PATCH_GD_SIZE 64 #define MTK_FW_ROM_PATCH_SEC_MAP_SIZE 64 #define MTK_SEC_MAP_COMMON_SIZE 12 #define MTK_SEC_MAP_NEED_SEND_SIZE 52 struct btmtk_patch_header { u8 datetime[16]; u8 platform[4]; __le16 hwver; __le16 swver; __le32 magicnum; } __packed; struct btmtk_global_desc { __le32 patch_ver; __le32 sub_sys; __le32 feature_opt; __le32 section_num; } __packed; struct btmtk_section_map { __le32 sectype; __le32 secoffset; __le32 secsize; union { __le32 u4SecSpec[13]; struct { __le32 dlAddr; __le32 dlsize; __le32 seckeyidx; __le32 alignlen; __le32 sectype; __le32 dlmodecrctype; __le32 crc; __le32 reserved[6]; } bin_info_spec; }; } __packed; static void btmtk_coredump(struct hci_dev *hdev) { int err; err = __hci_cmd_send(hdev, 0xfd5b, 0, NULL); if (err < 0) bt_dev_err(hdev, "Coredump failed (%d)", err); } static void btmtk_coredump_hdr(struct hci_dev *hdev, struct sk_buff *skb) { struct btmediatek_data *data = hci_get_priv(hdev); char buf[80]; snprintf(buf, sizeof(buf), "Controller Name: 0x%X\n", data->dev_id); skb_put_data(skb, buf, strlen(buf)); snprintf(buf, sizeof(buf), "Firmware Version: 0x%X\n", data->cd_info.fw_version); skb_put_data(skb, buf, strlen(buf)); snprintf(buf, sizeof(buf), "Driver: %s\n", data->cd_info.driver_name); skb_put_data(skb, buf, strlen(buf)); snprintf(buf, sizeof(buf), "Vendor: MediaTek\n"); skb_put_data(skb, buf, strlen(buf)); } static void btmtk_coredump_notify(struct hci_dev *hdev, int state) { struct btmediatek_data *data = hci_get_priv(hdev); switch (state) { case HCI_DEVCOREDUMP_IDLE: data->cd_info.state = HCI_DEVCOREDUMP_IDLE; break; case HCI_DEVCOREDUMP_ACTIVE: data->cd_info.state = HCI_DEVCOREDUMP_ACTIVE; break; case HCI_DEVCOREDUMP_TIMEOUT: case HCI_DEVCOREDUMP_ABORT: case HCI_DEVCOREDUMP_DONE: data->cd_info.state = HCI_DEVCOREDUMP_IDLE; btmtk_reset_sync(hdev); break; } } int btmtk_setup_firmware_79xx(struct hci_dev *hdev, const char *fwname, wmt_cmd_sync_func_t wmt_cmd_sync) { struct btmtk_hci_wmt_params wmt_params; struct btmtk_patch_header *hdr; struct btmtk_global_desc *globaldesc = NULL; struct btmtk_section_map *sectionmap; const struct firmware *fw; const u8 *fw_ptr; const u8 *fw_bin_ptr; int err, dlen, i, status; u8 flag, first_block, retry; u32 section_num, dl_size, section_offset; u8 cmd[64]; err = request_firmware(&fw, fwname, &hdev->dev); if (err < 0) { bt_dev_err(hdev, "Failed to load firmware file (%d)", err); return err; } fw_ptr = fw->data; fw_bin_ptr = fw_ptr; hdr = (struct btmtk_patch_header *)fw_ptr; globaldesc = (struct btmtk_global_desc *)(fw_ptr + MTK_FW_ROM_PATCH_HEADER_SIZE); section_num = le32_to_cpu(globaldesc->section_num); bt_dev_info(hdev, "HW/SW Version: 0x%04x%04x, Build Time: %s", le16_to_cpu(hdr->hwver), le16_to_cpu(hdr->swver), hdr->datetime); for (i = 0; i < section_num; i++) { first_block = 1; fw_ptr = fw_bin_ptr; sectionmap = (struct btmtk_section_map *)(fw_ptr + MTK_FW_ROM_PATCH_HEADER_SIZE + MTK_FW_ROM_PATCH_GD_SIZE + MTK_FW_ROM_PATCH_SEC_MAP_SIZE * i); section_offset = le32_to_cpu(sectionmap->secoffset); dl_size = le32_to_cpu(sectionmap->bin_info_spec.dlsize); if (dl_size > 0) { retry = 20; while (retry > 0) { cmd[0] = 0; /* 0 means legacy dl mode. */ memcpy(cmd + 1, fw_ptr + MTK_FW_ROM_PATCH_HEADER_SIZE + MTK_FW_ROM_PATCH_GD_SIZE + MTK_FW_ROM_PATCH_SEC_MAP_SIZE * i + MTK_SEC_MAP_COMMON_SIZE, MTK_SEC_MAP_NEED_SEND_SIZE + 1); wmt_params.op = BTMTK_WMT_PATCH_DWNLD; wmt_params.status = &status; wmt_params.flag = 0; wmt_params.dlen = MTK_SEC_MAP_NEED_SEND_SIZE + 1; wmt_params.data = &cmd; err = wmt_cmd_sync(hdev, &wmt_params); if (err < 0) { bt_dev_err(hdev, "Failed to send wmt patch dwnld (%d)", err); goto err_release_fw; } if (status == BTMTK_WMT_PATCH_UNDONE) { break; } else if (status == BTMTK_WMT_PATCH_PROGRESS) { msleep(100); retry--; } else if (status == BTMTK_WMT_PATCH_DONE) { goto next_section; } else { bt_dev_err(hdev, "Failed wmt patch dwnld status (%d)", status); err = -EIO; goto err_release_fw; } } fw_ptr += section_offset; wmt_params.op = BTMTK_WMT_PATCH_DWNLD; wmt_params.status = NULL; while (dl_size > 0) { dlen = min_t(int, 250, dl_size); if (first_block == 1) { flag = 1; first_block = 0; } else if (dl_size - dlen <= 0) { flag = 3; } else { flag = 2; } wmt_params.flag = flag; wmt_params.dlen = dlen; wmt_params.data = fw_ptr; err = wmt_cmd_sync(hdev, &wmt_params); if (err < 0) { bt_dev_err(hdev, "Failed to send wmt patch dwnld (%d)", err); goto err_release_fw; } dl_size -= dlen; fw_ptr += dlen; } } next_section: continue; } /* Wait a few moments for firmware activation done */ usleep_range(100000, 120000); err_release_fw: release_firmware(fw); return err; } EXPORT_SYMBOL_GPL(btmtk_setup_firmware_79xx); int btmtk_setup_firmware(struct hci_dev *hdev, const char *fwname, wmt_cmd_sync_func_t wmt_cmd_sync) { struct btmtk_hci_wmt_params wmt_params; const struct firmware *fw; const u8 *fw_ptr; size_t fw_size; int err, dlen; u8 flag, param; err = request_firmware(&fw, fwname, &hdev->dev); if (err < 0) { bt_dev_err(hdev, "Failed to load firmware file (%d)", err); return err; } /* Power on data RAM the firmware relies on. */ param = 1; wmt_params.op = BTMTK_WMT_FUNC_CTRL; wmt_params.flag = 3; wmt_params.dlen = sizeof(param); wmt_params.data = &param; wmt_params.status = NULL; err = wmt_cmd_sync(hdev, &wmt_params); if (err < 0) { bt_dev_err(hdev, "Failed to power on data RAM (%d)", err); goto err_release_fw; } fw_ptr = fw->data; fw_size = fw->size; /* The size of patch header is 30 bytes, should be skip */ if (fw_size < 30) { err = -EINVAL; goto err_release_fw; } fw_size -= 30; fw_ptr += 30; flag = 1; wmt_params.op = BTMTK_WMT_PATCH_DWNLD; wmt_params.status = NULL; while (fw_size > 0) { dlen = min_t(int, 250, fw_size); /* Tell device the position in sequence */ if (fw_size - dlen <= 0) flag = 3; else if (fw_size < fw->size - 30) flag = 2; wmt_params.flag = flag; wmt_params.dlen = dlen; wmt_params.data = fw_ptr; err = wmt_cmd_sync(hdev, &wmt_params); if (err < 0) { bt_dev_err(hdev, "Failed to send wmt patch dwnld (%d)", err); goto err_release_fw; } fw_size -= dlen; fw_ptr += dlen; } wmt_params.op = BTMTK_WMT_RST; wmt_params.flag = 4; wmt_params.dlen = 0; wmt_params.data = NULL; wmt_params.status = NULL; /* Activate funciton the firmware providing to */ err = wmt_cmd_sync(hdev, &wmt_params); if (err < 0) { bt_dev_err(hdev, "Failed to send wmt rst (%d)", err); goto err_release_fw; } /* Wait a few moments for firmware activation done */ usleep_range(10000, 12000); err_release_fw: release_firmware(fw); return err; } EXPORT_SYMBOL_GPL(btmtk_setup_firmware); int btmtk_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr) { struct sk_buff *skb; long ret; skb = __hci_cmd_sync(hdev, 0xfc1a, 6, bdaddr, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { ret = PTR_ERR(skb); bt_dev_err(hdev, "changing Mediatek device address failed (%ld)", ret); return ret; } kfree_skb(skb); return 0; } EXPORT_SYMBOL_GPL(btmtk_set_bdaddr); void btmtk_reset_sync(struct hci_dev *hdev) { struct btmediatek_data *reset_work = hci_get_priv(hdev); int err; hci_dev_lock(hdev); err = hci_cmd_sync_queue(hdev, reset_work->reset_sync, NULL, NULL); if (err) bt_dev_err(hdev, "failed to reset (%d)", err); hci_dev_unlock(hdev); } EXPORT_SYMBOL_GPL(btmtk_reset_sync); int btmtk_register_coredump(struct hci_dev *hdev, const char *name, u32 fw_version) { struct btmediatek_data *data = hci_get_priv(hdev); if (!IS_ENABLED(CONFIG_DEV_COREDUMP)) return -EOPNOTSUPP; data->cd_info.fw_version = fw_version; data->cd_info.state = HCI_DEVCOREDUMP_IDLE; data->cd_info.driver_name = name; return hci_devcd_register(hdev, btmtk_coredump, btmtk_coredump_hdr, btmtk_coredump_notify); } EXPORT_SYMBOL_GPL(btmtk_register_coredump); int btmtk_process_coredump(struct hci_dev *hdev, struct sk_buff *skb) { struct btmediatek_data *data = hci_get_priv(hdev); int err; if (!IS_ENABLED(CONFIG_DEV_COREDUMP)) return 0; switch (data->cd_info.state) { case HCI_DEVCOREDUMP_IDLE: err = hci_devcd_init(hdev, MTK_COREDUMP_SIZE); if (err < 0) break; data->cd_info.cnt = 0; /* It is supposed coredump can be done within 5 seconds */ schedule_delayed_work(&hdev->dump.dump_timeout, msecs_to_jiffies(5000)); fallthrough; case HCI_DEVCOREDUMP_ACTIVE: default: err = hci_devcd_append(hdev, skb); if (err < 0) break; data->cd_info.cnt++; /* Mediatek coredump data would be more than MTK_COREDUMP_NUM */ if (data->cd_info.cnt > MTK_COREDUMP_NUM && skb->len > MTK_COREDUMP_END_LEN) if (!memcmp((char *)&skb->data[skb->len - MTK_COREDUMP_END_LEN], MTK_COREDUMP_END, MTK_COREDUMP_END_LEN - 1)) { bt_dev_info(hdev, "Mediatek coredump end"); hci_devcd_complete(hdev); } break; } if (err < 0) kfree_skb(skb); return err; } EXPORT_SYMBOL_GPL(btmtk_process_coredump); MODULE_AUTHOR("Sean Wang <[email protected]>"); MODULE_AUTHOR("Mark Chen <[email protected]>"); MODULE_DESCRIPTION("Bluetooth support for MediaTek devices ver " VERSION); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL"); MODULE_FIRMWARE(FIRMWARE_MT7622); MODULE_FIRMWARE(FIRMWARE_MT7663); MODULE_FIRMWARE(FIRMWARE_MT7668); MODULE_FIRMWARE(FIRMWARE_MT7961); MODULE_FIRMWARE(FIRMWARE_MT7925);
linux-master
drivers/bluetooth/btmtk.c
// SPDX-License-Identifier: GPL-2.0-only /* * Marvell Bluetooth driver * * Copyright (C) 2009, Marvell International Ltd. **/ #include <linux/module.h> #include <linux/of.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <linux/mmc/sdio_func.h> #include "btmrvl_drv.h" #include "btmrvl_sdio.h" #define VERSION "1.0" /* * This function is called by interface specific interrupt handler. * It updates Power Save & Host Sleep states, and wakes up the main * thread. */ void btmrvl_interrupt(struct btmrvl_private *priv) { priv->adapter->ps_state = PS_AWAKE; priv->adapter->wakeup_tries = 0; priv->adapter->int_count++; if (priv->adapter->hs_state == HS_ACTIVATED) { BT_DBG("BT: HS DEACTIVATED in ISR!"); priv->adapter->hs_state = HS_DEACTIVATED; } wake_up_interruptible(&priv->main_thread.wait_q); } EXPORT_SYMBOL_GPL(btmrvl_interrupt); bool btmrvl_check_evtpkt(struct btmrvl_private *priv, struct sk_buff *skb) { struct hci_event_hdr *hdr = (void *) skb->data; if (hdr->evt == HCI_EV_CMD_COMPLETE) { struct hci_ev_cmd_complete *ec; u16 opcode; ec = (void *) (skb->data + HCI_EVENT_HDR_SIZE); opcode = __le16_to_cpu(ec->opcode); if (priv->btmrvl_dev.sendcmdflag) { priv->btmrvl_dev.sendcmdflag = false; priv->adapter->cmd_complete = true; wake_up_interruptible(&priv->adapter->cmd_wait_q); if (hci_opcode_ogf(opcode) == 0x3F) { BT_DBG("vendor event skipped: opcode=%#4.4x", opcode); kfree_skb(skb); return false; } } } return true; } EXPORT_SYMBOL_GPL(btmrvl_check_evtpkt); int btmrvl_process_event(struct btmrvl_private *priv, struct sk_buff *skb) { struct btmrvl_adapter *adapter = priv->adapter; struct btmrvl_event *event; int ret = 0; event = (struct btmrvl_event *) skb->data; if (event->ec != 0xff) { BT_DBG("Not Marvell Event=%x", event->ec); ret = -EINVAL; goto exit; } switch (event->data[0]) { case BT_EVENT_AUTO_SLEEP_MODE: if (!event->data[2]) { if (event->data[1] == BT_PS_ENABLE) adapter->psmode = 1; else adapter->psmode = 0; BT_DBG("PS Mode:%s", (adapter->psmode) ? "Enable" : "Disable"); } else { BT_DBG("PS Mode command failed"); } break; case BT_EVENT_HOST_SLEEP_CONFIG: if (!event->data[3]) BT_DBG("gpio=%x, gap=%x", event->data[1], event->data[2]); else BT_DBG("HSCFG command failed"); break; case BT_EVENT_HOST_SLEEP_ENABLE: if (!event->data[1]) { adapter->hs_state = HS_ACTIVATED; if (adapter->psmode) adapter->ps_state = PS_SLEEP; wake_up_interruptible(&adapter->event_hs_wait_q); BT_DBG("HS ACTIVATED!"); } else { BT_DBG("HS Enable failed"); } break; case BT_EVENT_MODULE_CFG_REQ: if (priv->btmrvl_dev.sendcmdflag && event->data[1] == MODULE_BRINGUP_REQ) { BT_DBG("EVENT:%s", ((event->data[2] == MODULE_BROUGHT_UP) || (event->data[2] == MODULE_ALREADY_UP)) ? "Bring-up succeed" : "Bring-up failed"); if (event->length > 3 && event->data[3]) priv->btmrvl_dev.dev_type = HCI_AMP; else priv->btmrvl_dev.dev_type = HCI_PRIMARY; BT_DBG("dev_type: %d", priv->btmrvl_dev.dev_type); } else if (priv->btmrvl_dev.sendcmdflag && event->data[1] == MODULE_SHUTDOWN_REQ) { BT_DBG("EVENT:%s", (event->data[2]) ? "Shutdown failed" : "Shutdown succeed"); } else { BT_DBG("BT_CMD_MODULE_CFG_REQ resp for APP"); ret = -EINVAL; } break; case BT_EVENT_POWER_STATE: if (event->data[1] == BT_PS_SLEEP) adapter->ps_state = PS_SLEEP; BT_DBG("EVENT:%s", (adapter->ps_state) ? "PS_SLEEP" : "PS_AWAKE"); break; default: BT_DBG("Unknown Event=%d", event->data[0]); ret = -EINVAL; break; } exit: if (!ret) kfree_skb(skb); return ret; } EXPORT_SYMBOL_GPL(btmrvl_process_event); static int btmrvl_send_sync_cmd(struct btmrvl_private *priv, u16 opcode, const void *param, u8 len) { struct sk_buff *skb; struct hci_command_hdr *hdr; if (priv->surprise_removed) { BT_ERR("Card is removed"); return -EFAULT; } skb = bt_skb_alloc(HCI_COMMAND_HDR_SIZE + len, GFP_KERNEL); if (!skb) { BT_ERR("No free skb"); return -ENOMEM; } hdr = skb_put(skb, HCI_COMMAND_HDR_SIZE); hdr->opcode = cpu_to_le16(opcode); hdr->plen = len; if (len) skb_put_data(skb, param, len); hci_skb_pkt_type(skb) = MRVL_VENDOR_PKT; skb_queue_head(&priv->adapter->tx_queue, skb); priv->btmrvl_dev.sendcmdflag = true; priv->adapter->cmd_complete = false; wake_up_interruptible(&priv->main_thread.wait_q); if (!wait_event_interruptible_timeout(priv->adapter->cmd_wait_q, priv->adapter->cmd_complete || priv->surprise_removed, WAIT_UNTIL_CMD_RESP)) return -ETIMEDOUT; if (priv->surprise_removed) return -EFAULT; return 0; } int btmrvl_send_module_cfg_cmd(struct btmrvl_private *priv, u8 subcmd) { int ret; ret = btmrvl_send_sync_cmd(priv, BT_CMD_MODULE_CFG_REQ, &subcmd, 1); if (ret) BT_ERR("module_cfg_cmd(%x) failed", subcmd); return ret; } EXPORT_SYMBOL_GPL(btmrvl_send_module_cfg_cmd); static int btmrvl_enable_sco_routing_to_host(struct btmrvl_private *priv) { int ret; u8 subcmd = 0; ret = btmrvl_send_sync_cmd(priv, BT_CMD_ROUTE_SCO_TO_HOST, &subcmd, 1); if (ret) BT_ERR("BT_CMD_ROUTE_SCO_TO_HOST command failed: %#x", ret); return ret; } int btmrvl_pscan_window_reporting(struct btmrvl_private *priv, u8 subcmd) { struct btmrvl_sdio_card *card = priv->btmrvl_dev.card; int ret; if (!card->support_pscan_win_report) return 0; ret = btmrvl_send_sync_cmd(priv, BT_CMD_PSCAN_WIN_REPORT_ENABLE, &subcmd, 1); if (ret) BT_ERR("PSCAN_WIN_REPORT_ENABLE command failed: %#x", ret); return ret; } EXPORT_SYMBOL_GPL(btmrvl_pscan_window_reporting); int btmrvl_send_hscfg_cmd(struct btmrvl_private *priv) { int ret; u8 param[2]; param[0] = (priv->btmrvl_dev.gpio_gap & 0xff00) >> 8; param[1] = (u8) (priv->btmrvl_dev.gpio_gap & 0x00ff); BT_DBG("Sending HSCFG Command, gpio=0x%x, gap=0x%x", param[0], param[1]); ret = btmrvl_send_sync_cmd(priv, BT_CMD_HOST_SLEEP_CONFIG, param, 2); if (ret) BT_ERR("HSCFG command failed"); return ret; } EXPORT_SYMBOL_GPL(btmrvl_send_hscfg_cmd); int btmrvl_enable_ps(struct btmrvl_private *priv) { int ret; u8 param; if (priv->btmrvl_dev.psmode) param = BT_PS_ENABLE; else param = BT_PS_DISABLE; ret = btmrvl_send_sync_cmd(priv, BT_CMD_AUTO_SLEEP_MODE, &param, 1); if (ret) BT_ERR("PSMODE command failed"); return 0; } EXPORT_SYMBOL_GPL(btmrvl_enable_ps); int btmrvl_enable_hs(struct btmrvl_private *priv) { struct btmrvl_adapter *adapter = priv->adapter; int ret; ret = btmrvl_send_sync_cmd(priv, BT_CMD_HOST_SLEEP_ENABLE, NULL, 0); if (ret) { BT_ERR("Host sleep enable command failed"); return ret; } ret = wait_event_interruptible_timeout(adapter->event_hs_wait_q, adapter->hs_state || priv->surprise_removed, WAIT_UNTIL_HS_STATE_CHANGED); if (ret < 0 || priv->surprise_removed) { BT_ERR("event_hs_wait_q terminated (%d): %d,%d,%d", ret, adapter->hs_state, adapter->ps_state, adapter->wakeup_tries); } else if (!ret) { BT_ERR("hs_enable timeout: %d,%d,%d", adapter->hs_state, adapter->ps_state, adapter->wakeup_tries); ret = -ETIMEDOUT; } else { BT_DBG("host sleep enabled: %d,%d,%d", adapter->hs_state, adapter->ps_state, adapter->wakeup_tries); ret = 0; } return ret; } EXPORT_SYMBOL_GPL(btmrvl_enable_hs); int btmrvl_prepare_command(struct btmrvl_private *priv) { int ret = 0; if (priv->btmrvl_dev.hscfgcmd) { priv->btmrvl_dev.hscfgcmd = 0; btmrvl_send_hscfg_cmd(priv); } if (priv->btmrvl_dev.pscmd) { priv->btmrvl_dev.pscmd = 0; btmrvl_enable_ps(priv); } if (priv->btmrvl_dev.hscmd) { priv->btmrvl_dev.hscmd = 0; if (priv->btmrvl_dev.hsmode) { ret = btmrvl_enable_hs(priv); } else { ret = priv->hw_wakeup_firmware(priv); priv->adapter->hs_state = HS_DEACTIVATED; BT_DBG("BT: HS DEACTIVATED due to host activity!"); } } return ret; } static int btmrvl_tx_pkt(struct btmrvl_private *priv, struct sk_buff *skb) { int ret = 0; if (!skb || !skb->data) return -EINVAL; if (!skb->len || ((skb->len + BTM_HEADER_LEN) > BTM_UPLD_SIZE)) { BT_ERR("Tx Error: Bad skb length %d : %d", skb->len, BTM_UPLD_SIZE); return -EINVAL; } skb_push(skb, BTM_HEADER_LEN); /* header type: byte[3] * HCI_COMMAND = 1, ACL_DATA = 2, SCO_DATA = 3, 0xFE = Vendor * header length: byte[2][1][0] */ skb->data[0] = (skb->len & 0x0000ff); skb->data[1] = (skb->len & 0x00ff00) >> 8; skb->data[2] = (skb->len & 0xff0000) >> 16; skb->data[3] = hci_skb_pkt_type(skb); if (priv->hw_host_to_card) ret = priv->hw_host_to_card(priv, skb->data, skb->len); return ret; } static void btmrvl_init_adapter(struct btmrvl_private *priv) { int buf_size; skb_queue_head_init(&priv->adapter->tx_queue); priv->adapter->ps_state = PS_AWAKE; buf_size = ALIGN_SZ(SDIO_BLOCK_SIZE, BTSDIO_DMA_ALIGN); priv->adapter->hw_regs_buf = kzalloc(buf_size, GFP_KERNEL); if (!priv->adapter->hw_regs_buf) { priv->adapter->hw_regs = NULL; BT_ERR("Unable to allocate buffer for hw_regs."); } else { priv->adapter->hw_regs = (u8 *)ALIGN_ADDR(priv->adapter->hw_regs_buf, BTSDIO_DMA_ALIGN); BT_DBG("hw_regs_buf=%p hw_regs=%p", priv->adapter->hw_regs_buf, priv->adapter->hw_regs); } init_waitqueue_head(&priv->adapter->cmd_wait_q); init_waitqueue_head(&priv->adapter->event_hs_wait_q); } static void btmrvl_free_adapter(struct btmrvl_private *priv) { skb_queue_purge(&priv->adapter->tx_queue); kfree(priv->adapter->hw_regs_buf); kfree(priv->adapter); priv->adapter = NULL; } static int btmrvl_send_frame(struct hci_dev *hdev, struct sk_buff *skb) { struct btmrvl_private *priv = hci_get_drvdata(hdev); BT_DBG("type=%d, len=%d", hci_skb_pkt_type(skb), skb->len); if (priv->adapter->is_suspending || priv->adapter->is_suspended) { BT_ERR("%s: Device is suspending or suspended", __func__); return -EBUSY; } switch (hci_skb_pkt_type(skb)) { case HCI_COMMAND_PKT: hdev->stat.cmd_tx++; break; case HCI_ACLDATA_PKT: hdev->stat.acl_tx++; break; case HCI_SCODATA_PKT: hdev->stat.sco_tx++; break; } skb_queue_tail(&priv->adapter->tx_queue, skb); if (!priv->adapter->is_suspended) wake_up_interruptible(&priv->main_thread.wait_q); return 0; } static int btmrvl_flush(struct hci_dev *hdev) { struct btmrvl_private *priv = hci_get_drvdata(hdev); skb_queue_purge(&priv->adapter->tx_queue); return 0; } static int btmrvl_close(struct hci_dev *hdev) { struct btmrvl_private *priv = hci_get_drvdata(hdev); skb_queue_purge(&priv->adapter->tx_queue); return 0; } static int btmrvl_open(struct hci_dev *hdev) { return 0; } static int btmrvl_download_cal_data(struct btmrvl_private *priv, u8 *data, int len) { int ret; data[0] = 0x00; data[1] = 0x00; data[2] = 0x00; data[3] = len; print_hex_dump_bytes("Calibration data: ", DUMP_PREFIX_OFFSET, data, BT_CAL_HDR_LEN + len); ret = btmrvl_send_sync_cmd(priv, BT_CMD_LOAD_CONFIG_DATA, data, BT_CAL_HDR_LEN + len); if (ret) BT_ERR("Failed to download calibration data"); return 0; } static int btmrvl_check_device_tree(struct btmrvl_private *priv) { struct device_node *dt_node; struct btmrvl_sdio_card *card = priv->btmrvl_dev.card; u8 cal_data[BT_CAL_HDR_LEN + BT_CAL_DATA_SIZE]; int ret = 0; u16 gpio, gap; if (card->plt_of_node) { dt_node = card->plt_of_node; ret = of_property_read_u16(dt_node, "marvell,wakeup-pin", &gpio); if (ret) gpio = (priv->btmrvl_dev.gpio_gap & 0xff00) >> 8; ret = of_property_read_u16(dt_node, "marvell,wakeup-gap-ms", &gap); if (ret) gap = (u8)(priv->btmrvl_dev.gpio_gap & 0x00ff); priv->btmrvl_dev.gpio_gap = (gpio << 8) + gap; ret = of_property_read_u8_array(dt_node, "marvell,cal-data", cal_data + BT_CAL_HDR_LEN, BT_CAL_DATA_SIZE); if (ret) return ret; BT_DBG("Use cal data from device tree"); ret = btmrvl_download_cal_data(priv, cal_data, BT_CAL_DATA_SIZE); if (ret) BT_ERR("Fail to download calibrate data"); } return ret; } static int btmrvl_setup(struct hci_dev *hdev) { struct btmrvl_private *priv = hci_get_drvdata(hdev); int ret; ret = btmrvl_send_module_cfg_cmd(priv, MODULE_BRINGUP_REQ); if (ret) return ret; priv->btmrvl_dev.gpio_gap = 0xfffe; btmrvl_check_device_tree(priv); btmrvl_enable_sco_routing_to_host(priv); btmrvl_pscan_window_reporting(priv, 0x01); priv->btmrvl_dev.psmode = 1; btmrvl_enable_ps(priv); btmrvl_send_hscfg_cmd(priv); return 0; } static int btmrvl_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr) { struct sk_buff *skb; long ret; u8 buf[8]; buf[0] = MRVL_VENDOR_PKT; buf[1] = sizeof(bdaddr_t); memcpy(buf + 2, bdaddr, sizeof(bdaddr_t)); skb = __hci_cmd_sync(hdev, BT_CMD_SET_BDADDR, sizeof(buf), buf, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { ret = PTR_ERR(skb); BT_ERR("%s: changing btmrvl device address failed (%ld)", hdev->name, ret); return ret; } kfree_skb(skb); return 0; } static bool btmrvl_wakeup(struct hci_dev *hdev) { struct btmrvl_private *priv = hci_get_drvdata(hdev); struct btmrvl_sdio_card *card = priv->btmrvl_dev.card; return device_may_wakeup(&card->func->dev); } /* * This function handles the event generated by firmware, rx data * received from firmware, and tx data sent from kernel. */ static int btmrvl_service_main_thread(void *data) { struct btmrvl_thread *thread = data; struct btmrvl_private *priv = thread->priv; struct btmrvl_adapter *adapter = priv->adapter; wait_queue_entry_t wait; struct sk_buff *skb; ulong flags; init_waitqueue_entry(&wait, current); for (;;) { add_wait_queue(&thread->wait_q, &wait); set_current_state(TASK_INTERRUPTIBLE); if (kthread_should_stop() || priv->surprise_removed) { BT_DBG("main_thread: break from main thread"); break; } if (adapter->wakeup_tries || ((!adapter->int_count) && (!priv->btmrvl_dev.tx_dnld_rdy || skb_queue_empty(&adapter->tx_queue)))) { BT_DBG("main_thread is sleeping..."); schedule(); } set_current_state(TASK_RUNNING); remove_wait_queue(&thread->wait_q, &wait); BT_DBG("main_thread woke up"); if (kthread_should_stop() || priv->surprise_removed) { BT_DBG("main_thread: break from main thread"); break; } spin_lock_irqsave(&priv->driver_lock, flags); if (adapter->int_count) { adapter->int_count = 0; spin_unlock_irqrestore(&priv->driver_lock, flags); priv->hw_process_int_status(priv); } else if (adapter->ps_state == PS_SLEEP && !skb_queue_empty(&adapter->tx_queue)) { spin_unlock_irqrestore(&priv->driver_lock, flags); adapter->wakeup_tries++; priv->hw_wakeup_firmware(priv); continue; } else { spin_unlock_irqrestore(&priv->driver_lock, flags); } if (adapter->ps_state == PS_SLEEP) continue; if (!priv->btmrvl_dev.tx_dnld_rdy || priv->adapter->is_suspended) continue; skb = skb_dequeue(&adapter->tx_queue); if (skb) { if (btmrvl_tx_pkt(priv, skb)) priv->btmrvl_dev.hcidev->stat.err_tx++; else priv->btmrvl_dev.hcidev->stat.byte_tx += skb->len; kfree_skb(skb); } } return 0; } int btmrvl_register_hdev(struct btmrvl_private *priv) { struct hci_dev *hdev = NULL; struct btmrvl_sdio_card *card = priv->btmrvl_dev.card; int ret; hdev = hci_alloc_dev(); if (!hdev) { BT_ERR("Can not allocate HCI device"); goto err_hdev; } priv->btmrvl_dev.hcidev = hdev; hci_set_drvdata(hdev, priv); hdev->bus = HCI_SDIO; hdev->open = btmrvl_open; hdev->close = btmrvl_close; hdev->flush = btmrvl_flush; hdev->send = btmrvl_send_frame; hdev->setup = btmrvl_setup; hdev->set_bdaddr = btmrvl_set_bdaddr; hdev->wakeup = btmrvl_wakeup; SET_HCIDEV_DEV(hdev, &card->func->dev); hdev->dev_type = priv->btmrvl_dev.dev_type; ret = hci_register_dev(hdev); if (ret < 0) { BT_ERR("Can not register HCI device"); goto err_hci_register_dev; } #ifdef CONFIG_DEBUG_FS btmrvl_debugfs_init(hdev); #endif return 0; err_hci_register_dev: hci_free_dev(hdev); err_hdev: /* Stop the thread servicing the interrupts */ kthread_stop(priv->main_thread.task); btmrvl_free_adapter(priv); kfree(priv); return -ENOMEM; } EXPORT_SYMBOL_GPL(btmrvl_register_hdev); struct btmrvl_private *btmrvl_add_card(void *card) { struct btmrvl_private *priv; priv = kzalloc(sizeof(*priv), GFP_KERNEL); if (!priv) { BT_ERR("Can not allocate priv"); goto err_priv; } priv->adapter = kzalloc(sizeof(*priv->adapter), GFP_KERNEL); if (!priv->adapter) { BT_ERR("Allocate buffer for btmrvl_adapter failed!"); goto err_adapter; } btmrvl_init_adapter(priv); BT_DBG("Starting kthread..."); priv->main_thread.priv = priv; spin_lock_init(&priv->driver_lock); init_waitqueue_head(&priv->main_thread.wait_q); priv->main_thread.task = kthread_run(btmrvl_service_main_thread, &priv->main_thread, "btmrvl_main_service"); if (IS_ERR(priv->main_thread.task)) goto err_thread; priv->btmrvl_dev.card = card; priv->btmrvl_dev.tx_dnld_rdy = true; return priv; err_thread: btmrvl_free_adapter(priv); err_adapter: kfree(priv); err_priv: return NULL; } EXPORT_SYMBOL_GPL(btmrvl_add_card); int btmrvl_remove_card(struct btmrvl_private *priv) { struct hci_dev *hdev; hdev = priv->btmrvl_dev.hcidev; wake_up_interruptible(&priv->adapter->cmd_wait_q); wake_up_interruptible(&priv->adapter->event_hs_wait_q); kthread_stop(priv->main_thread.task); #ifdef CONFIG_DEBUG_FS btmrvl_debugfs_remove(hdev); #endif hci_unregister_dev(hdev); hci_free_dev(hdev); priv->btmrvl_dev.hcidev = NULL; btmrvl_free_adapter(priv); kfree(priv); return 0; } EXPORT_SYMBOL_GPL(btmrvl_remove_card); MODULE_AUTHOR("Marvell International Ltd."); MODULE_DESCRIPTION("Marvell Bluetooth driver ver " VERSION); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL v2");
linux-master
drivers/bluetooth/btmrvl_main.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * * Bluetooth HCI UART driver for Broadcom devices * * Copyright (C) 2015 Intel Corporation */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/skbuff.h> #include <linux/firmware.h> #include <linux/module.h> #include <linux/acpi.h> #include <linux/of.h> #include <linux/of_irq.h> #include <linux/property.h> #include <linux/platform_data/x86/apple.h> #include <linux/platform_device.h> #include <linux/regulator/consumer.h> #include <linux/clk.h> #include <linux/gpio/consumer.h> #include <linux/gpio/machine.h> #include <linux/tty.h> #include <linux/interrupt.h> #include <linux/dmi.h> #include <linux/pm_runtime.h> #include <linux/serdev.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include "btbcm.h" #include "hci_uart.h" #define BCM_NULL_PKT 0x00 #define BCM_NULL_SIZE 0 #define BCM_LM_DIAG_PKT 0x07 #define BCM_LM_DIAG_SIZE 63 #define BCM_TYPE49_PKT 0x31 #define BCM_TYPE49_SIZE 0 #define BCM_TYPE52_PKT 0x34 #define BCM_TYPE52_SIZE 0 #define BCM_AUTOSUSPEND_DELAY 5000 /* default autosleep delay */ #define BCM_NUM_SUPPLIES 2 /** * struct bcm_device_data - device specific data * @no_early_set_baudrate: Disallow set baudrate before driver setup() * @drive_rts_on_open: drive RTS signal on ->open() when platform requires it * @no_uart_clock_set: UART clock set command for >3Mbps mode is unavailable * @max_autobaud_speed: max baudrate supported by device in autobaud mode * @max_speed: max baudrate supported */ struct bcm_device_data { bool no_early_set_baudrate; bool drive_rts_on_open; bool no_uart_clock_set; u32 max_autobaud_speed; u32 max_speed; }; /** * struct bcm_device - device driver resources * @serdev_hu: HCI UART controller struct * @list: bcm_device_list node * @dev: physical UART slave * @name: device name logged by bt_dev_*() functions * @device_wakeup: BT_WAKE pin, * assert = Bluetooth device must wake up or remain awake, * deassert = Bluetooth device may sleep when sleep criteria are met * @shutdown: BT_REG_ON pin, * power up or power down Bluetooth device internal regulators * @reset: BT_RST_N pin, * active low resets the Bluetooth logic core * @set_device_wakeup: callback to toggle BT_WAKE pin * either by accessing @device_wakeup or by calling @btlp * @set_shutdown: callback to toggle BT_REG_ON pin * either by accessing @shutdown or by calling @btpu/@btpd * @btlp: Apple ACPI method to toggle BT_WAKE pin ("Bluetooth Low Power") * @btpu: Apple ACPI method to drive BT_REG_ON pin high ("Bluetooth Power Up") * @btpd: Apple ACPI method to drive BT_REG_ON pin low ("Bluetooth Power Down") * @gpio_count: internal counter for GPIO resources associated with ACPI device * @gpio_int_idx: index in _CRS for GpioInt() resource * @txco_clk: external reference frequency clock used by Bluetooth device * @lpo_clk: external LPO clock used by Bluetooth device * @supplies: VBAT and VDDIO supplies used by Bluetooth device * @res_enabled: whether clocks and supplies are prepared and enabled * @init_speed: default baudrate of Bluetooth device; * the host UART is initially set to this baudrate so that * it can configure the Bluetooth device for @oper_speed * @oper_speed: preferred baudrate of Bluetooth device; * set to 0 if @init_speed is already the preferred baudrate * @irq: interrupt triggered by HOST_WAKE_BT pin * @irq_active_low: whether @irq is active low * @irq_acquired: flag to show if IRQ handler has been assigned * @hu: pointer to HCI UART controller struct, * used to disable flow control during runtime suspend and system sleep * @is_suspended: whether flow control is currently disabled * @no_early_set_baudrate: don't set_baudrate before setup() * @drive_rts_on_open: drive RTS signal on ->open() when platform requires it * @no_uart_clock_set: UART clock set command for >3Mbps mode is unavailable * @pcm_int_params: keep the initial PCM configuration * @use_autobaud_mode: start Bluetooth device in autobaud mode * @max_autobaud_speed: max baudrate supported by device in autobaud mode */ struct bcm_device { /* Must be the first member, hci_serdev.c expects this. */ struct hci_uart serdev_hu; struct list_head list; struct device *dev; const char *name; struct gpio_desc *device_wakeup; struct gpio_desc *shutdown; struct gpio_desc *reset; int (*set_device_wakeup)(struct bcm_device *, bool); int (*set_shutdown)(struct bcm_device *, bool); #ifdef CONFIG_ACPI acpi_handle btlp, btpu, btpd; int gpio_count; int gpio_int_idx; #endif struct clk *txco_clk; struct clk *lpo_clk; struct regulator_bulk_data supplies[BCM_NUM_SUPPLIES]; bool res_enabled; u32 init_speed; u32 oper_speed; int irq; bool irq_active_low; bool irq_acquired; #ifdef CONFIG_PM struct hci_uart *hu; bool is_suspended; #endif bool no_early_set_baudrate; bool drive_rts_on_open; bool no_uart_clock_set; bool use_autobaud_mode; u8 pcm_int_params[5]; u32 max_autobaud_speed; }; /* generic bcm uart resources */ struct bcm_data { struct sk_buff *rx_skb; struct sk_buff_head txq; struct bcm_device *dev; }; /* List of BCM BT UART devices */ static DEFINE_MUTEX(bcm_device_lock); static LIST_HEAD(bcm_device_list); static int irq_polarity = -1; module_param(irq_polarity, int, 0444); MODULE_PARM_DESC(irq_polarity, "IRQ polarity 0: active-high 1: active-low"); static inline void host_set_baudrate(struct hci_uart *hu, unsigned int speed) { if (hu->serdev) serdev_device_set_baudrate(hu->serdev, speed); else hci_uart_set_baudrate(hu, speed); } static int bcm_set_baudrate(struct hci_uart *hu, unsigned int speed) { struct hci_dev *hdev = hu->hdev; struct bcm_data *bcm = hu->priv; struct sk_buff *skb; struct bcm_update_uart_baud_rate param; if (speed > 3000000 && !bcm->dev->no_uart_clock_set) { struct bcm_write_uart_clock_setting clock; clock.type = BCM_UART_CLOCK_48MHZ; bt_dev_dbg(hdev, "Set Controller clock (%d)", clock.type); /* This Broadcom specific command changes the UART's controller * clock for baud rate > 3000000. */ skb = __hci_cmd_sync(hdev, 0xfc45, 1, &clock, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { int err = PTR_ERR(skb); bt_dev_err(hdev, "BCM: failed to write clock (%d)", err); return err; } kfree_skb(skb); } bt_dev_dbg(hdev, "Set Controller UART speed to %d bit/s", speed); param.zero = cpu_to_le16(0); param.baud_rate = cpu_to_le32(speed); /* This Broadcom specific command changes the UART's controller baud * rate. */ skb = __hci_cmd_sync(hdev, 0xfc18, sizeof(param), &param, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { int err = PTR_ERR(skb); bt_dev_err(hdev, "BCM: failed to write update baudrate (%d)", err); return err; } kfree_skb(skb); return 0; } /* bcm_device_exists should be protected by bcm_device_lock */ static bool bcm_device_exists(struct bcm_device *device) { struct list_head *p; #ifdef CONFIG_PM /* Devices using serdev always exist */ if (device && device->hu && device->hu->serdev) return true; #endif list_for_each(p, &bcm_device_list) { struct bcm_device *dev = list_entry(p, struct bcm_device, list); if (device == dev) return true; } return false; } static int bcm_gpio_set_power(struct bcm_device *dev, bool powered) { int err; if (powered && !dev->res_enabled) { /* Intel Macs use bcm_apple_get_resources() and don't * have regulator supplies configured. */ if (dev->supplies[0].supply) { err = regulator_bulk_enable(BCM_NUM_SUPPLIES, dev->supplies); if (err) return err; } /* LPO clock needs to be 32.768 kHz */ err = clk_set_rate(dev->lpo_clk, 32768); if (err) { dev_err(dev->dev, "Could not set LPO clock rate\n"); goto err_regulator_disable; } err = clk_prepare_enable(dev->lpo_clk); if (err) goto err_regulator_disable; err = clk_prepare_enable(dev->txco_clk); if (err) goto err_lpo_clk_disable; } err = dev->set_shutdown(dev, powered); if (err) goto err_txco_clk_disable; err = dev->set_device_wakeup(dev, powered); if (err) goto err_revert_shutdown; if (!powered && dev->res_enabled) { clk_disable_unprepare(dev->txco_clk); clk_disable_unprepare(dev->lpo_clk); /* Intel Macs use bcm_apple_get_resources() and don't * have regulator supplies configured. */ if (dev->supplies[0].supply) regulator_bulk_disable(BCM_NUM_SUPPLIES, dev->supplies); } /* wait for device to power on and come out of reset */ usleep_range(100000, 120000); dev->res_enabled = powered; return 0; err_revert_shutdown: dev->set_shutdown(dev, !powered); err_txco_clk_disable: if (powered && !dev->res_enabled) clk_disable_unprepare(dev->txco_clk); err_lpo_clk_disable: if (powered && !dev->res_enabled) clk_disable_unprepare(dev->lpo_clk); err_regulator_disable: if (powered && !dev->res_enabled) regulator_bulk_disable(BCM_NUM_SUPPLIES, dev->supplies); return err; } #ifdef CONFIG_PM static irqreturn_t bcm_host_wake(int irq, void *data) { struct bcm_device *bdev = data; bt_dev_dbg(bdev, "Host wake IRQ"); pm_runtime_get(bdev->dev); pm_runtime_mark_last_busy(bdev->dev); pm_runtime_put_autosuspend(bdev->dev); return IRQ_HANDLED; } static int bcm_request_irq(struct bcm_data *bcm) { struct bcm_device *bdev = bcm->dev; int err; mutex_lock(&bcm_device_lock); if (!bcm_device_exists(bdev)) { err = -ENODEV; goto unlock; } if (bdev->irq <= 0) { err = -EOPNOTSUPP; goto unlock; } err = devm_request_irq(bdev->dev, bdev->irq, bcm_host_wake, bdev->irq_active_low ? IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING, "host_wake", bdev); if (err) { bdev->irq = err; goto unlock; } bdev->irq_acquired = true; device_init_wakeup(bdev->dev, true); pm_runtime_set_autosuspend_delay(bdev->dev, BCM_AUTOSUSPEND_DELAY); pm_runtime_use_autosuspend(bdev->dev); pm_runtime_set_active(bdev->dev); pm_runtime_enable(bdev->dev); unlock: mutex_unlock(&bcm_device_lock); return err; } static const struct bcm_set_sleep_mode default_sleep_params = { .sleep_mode = 1, /* 0=Disabled, 1=UART, 2=Reserved, 3=USB */ .idle_host = 2, /* idle threshold HOST, in 300ms */ .idle_dev = 2, /* idle threshold device, in 300ms */ .bt_wake_active = 1, /* BT_WAKE active mode: 1 = high, 0 = low */ .host_wake_active = 0, /* HOST_WAKE active mode: 1 = high, 0 = low */ .allow_host_sleep = 1, /* Allow host sleep in SCO flag */ .combine_modes = 1, /* Combine sleep and LPM flag */ .tristate_control = 0, /* Allow tri-state control of UART tx flag */ /* Irrelevant USB flags */ .usb_auto_sleep = 0, .usb_resume_timeout = 0, .break_to_host = 0, .pulsed_host_wake = 1, }; static int bcm_setup_sleep(struct hci_uart *hu) { struct bcm_data *bcm = hu->priv; struct sk_buff *skb; struct bcm_set_sleep_mode sleep_params = default_sleep_params; sleep_params.host_wake_active = !bcm->dev->irq_active_low; skb = __hci_cmd_sync(hu->hdev, 0xfc27, sizeof(sleep_params), &sleep_params, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { int err = PTR_ERR(skb); bt_dev_err(hu->hdev, "Sleep VSC failed (%d)", err); return err; } kfree_skb(skb); bt_dev_dbg(hu->hdev, "Set Sleep Parameters VSC succeeded"); return 0; } #else static inline int bcm_request_irq(struct bcm_data *bcm) { return 0; } static inline int bcm_setup_sleep(struct hci_uart *hu) { return 0; } #endif static int bcm_set_diag(struct hci_dev *hdev, bool enable) { struct hci_uart *hu = hci_get_drvdata(hdev); struct bcm_data *bcm = hu->priv; struct sk_buff *skb; if (!test_bit(HCI_RUNNING, &hdev->flags)) return -ENETDOWN; skb = bt_skb_alloc(3, GFP_KERNEL); if (!skb) return -ENOMEM; skb_put_u8(skb, BCM_LM_DIAG_PKT); skb_put_u8(skb, 0xf0); skb_put_u8(skb, enable); skb_queue_tail(&bcm->txq, skb); hci_uart_tx_wakeup(hu); return 0; } static int bcm_open(struct hci_uart *hu) { struct bcm_data *bcm; struct list_head *p; int err; bt_dev_dbg(hu->hdev, "hu %p", hu); if (!hci_uart_has_flow_control(hu)) return -EOPNOTSUPP; bcm = kzalloc(sizeof(*bcm), GFP_KERNEL); if (!bcm) return -ENOMEM; skb_queue_head_init(&bcm->txq); hu->priv = bcm; mutex_lock(&bcm_device_lock); if (hu->serdev) { bcm->dev = serdev_device_get_drvdata(hu->serdev); goto out; } if (!hu->tty->dev) goto out; list_for_each(p, &bcm_device_list) { struct bcm_device *dev = list_entry(p, struct bcm_device, list); /* Retrieve saved bcm_device based on parent of the * platform device (saved during device probe) and * parent of tty device used by hci_uart */ if (hu->tty->dev->parent == dev->dev->parent) { bcm->dev = dev; #ifdef CONFIG_PM dev->hu = hu; #endif break; } } out: if (bcm->dev) { if (bcm->dev->use_autobaud_mode) hci_uart_set_flow_control(hu, false); /* Assert BT_UART_CTS_N */ else if (bcm->dev->drive_rts_on_open) hci_uart_set_flow_control(hu, true); if (bcm->dev->use_autobaud_mode && bcm->dev->max_autobaud_speed) hu->init_speed = min(bcm->dev->oper_speed, bcm->dev->max_autobaud_speed); else hu->init_speed = bcm->dev->init_speed; /* If oper_speed is set, ldisc/serdev will set the baudrate * before calling setup() */ if (!bcm->dev->no_early_set_baudrate && !bcm->dev->use_autobaud_mode) hu->oper_speed = bcm->dev->oper_speed; err = bcm_gpio_set_power(bcm->dev, true); if (bcm->dev->drive_rts_on_open) hci_uart_set_flow_control(hu, false); if (err) goto err_unset_hu; } mutex_unlock(&bcm_device_lock); return 0; err_unset_hu: #ifdef CONFIG_PM if (!hu->serdev) bcm->dev->hu = NULL; #endif mutex_unlock(&bcm_device_lock); hu->priv = NULL; kfree(bcm); return err; } static int bcm_close(struct hci_uart *hu) { struct bcm_data *bcm = hu->priv; struct bcm_device *bdev = NULL; int err; bt_dev_dbg(hu->hdev, "hu %p", hu); /* Protect bcm->dev against removal of the device or driver */ mutex_lock(&bcm_device_lock); if (hu->serdev) { bdev = serdev_device_get_drvdata(hu->serdev); } else if (bcm_device_exists(bcm->dev)) { bdev = bcm->dev; #ifdef CONFIG_PM bdev->hu = NULL; #endif } if (bdev) { if (IS_ENABLED(CONFIG_PM) && bdev->irq_acquired) { devm_free_irq(bdev->dev, bdev->irq, bdev); device_init_wakeup(bdev->dev, false); pm_runtime_disable(bdev->dev); } err = bcm_gpio_set_power(bdev, false); if (err) bt_dev_err(hu->hdev, "Failed to power down"); else pm_runtime_set_suspended(bdev->dev); } mutex_unlock(&bcm_device_lock); skb_queue_purge(&bcm->txq); kfree_skb(bcm->rx_skb); kfree(bcm); hu->priv = NULL; return 0; } static int bcm_flush(struct hci_uart *hu) { struct bcm_data *bcm = hu->priv; bt_dev_dbg(hu->hdev, "hu %p", hu); skb_queue_purge(&bcm->txq); return 0; } static int bcm_setup(struct hci_uart *hu) { struct bcm_data *bcm = hu->priv; bool fw_load_done = false; bool use_autobaud_mode = (bcm->dev ? bcm->dev->use_autobaud_mode : 0); unsigned int speed; int err; bt_dev_dbg(hu->hdev, "hu %p", hu); hu->hdev->set_diag = bcm_set_diag; hu->hdev->set_bdaddr = btbcm_set_bdaddr; err = btbcm_initialize(hu->hdev, &fw_load_done, use_autobaud_mode); if (err) return err; if (!fw_load_done) return 0; /* Init speed if any */ if (bcm->dev && bcm->dev->init_speed) speed = bcm->dev->init_speed; else if (hu->proto->init_speed) speed = hu->proto->init_speed; else speed = 0; if (speed) host_set_baudrate(hu, speed); /* Operational speed if any */ if (hu->oper_speed) speed = hu->oper_speed; else if (bcm->dev && bcm->dev->oper_speed) speed = bcm->dev->oper_speed; else if (hu->proto->oper_speed) speed = hu->proto->oper_speed; else speed = 0; if (speed) { err = bcm_set_baudrate(hu, speed); if (!err) host_set_baudrate(hu, speed); } /* PCM parameters if provided */ if (bcm->dev && bcm->dev->pcm_int_params[0] != 0xff) { struct bcm_set_pcm_int_params params; btbcm_read_pcm_int_params(hu->hdev, &params); memcpy(&params, bcm->dev->pcm_int_params, 5); btbcm_write_pcm_int_params(hu->hdev, &params); } err = btbcm_finalize(hu->hdev, &fw_load_done, use_autobaud_mode); if (err) return err; /* Some devices ship with the controller default address. * Allow the bootloader to set a valid address through the * device tree. */ if (test_bit(HCI_QUIRK_INVALID_BDADDR, &hu->hdev->quirks)) set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hu->hdev->quirks); if (!bcm_request_irq(bcm)) err = bcm_setup_sleep(hu); return err; } #define BCM_RECV_LM_DIAG \ .type = BCM_LM_DIAG_PKT, \ .hlen = BCM_LM_DIAG_SIZE, \ .loff = 0, \ .lsize = 0, \ .maxlen = BCM_LM_DIAG_SIZE #define BCM_RECV_NULL \ .type = BCM_NULL_PKT, \ .hlen = BCM_NULL_SIZE, \ .loff = 0, \ .lsize = 0, \ .maxlen = BCM_NULL_SIZE #define BCM_RECV_TYPE49 \ .type = BCM_TYPE49_PKT, \ .hlen = BCM_TYPE49_SIZE, \ .loff = 0, \ .lsize = 0, \ .maxlen = BCM_TYPE49_SIZE #define BCM_RECV_TYPE52 \ .type = BCM_TYPE52_PKT, \ .hlen = BCM_TYPE52_SIZE, \ .loff = 0, \ .lsize = 0, \ .maxlen = BCM_TYPE52_SIZE static const struct h4_recv_pkt bcm_recv_pkts[] = { { H4_RECV_ACL, .recv = hci_recv_frame }, { H4_RECV_SCO, .recv = hci_recv_frame }, { H4_RECV_EVENT, .recv = hci_recv_frame }, { H4_RECV_ISO, .recv = hci_recv_frame }, { BCM_RECV_LM_DIAG, .recv = hci_recv_diag }, { BCM_RECV_NULL, .recv = hci_recv_diag }, { BCM_RECV_TYPE49, .recv = hci_recv_diag }, { BCM_RECV_TYPE52, .recv = hci_recv_diag }, }; static int bcm_recv(struct hci_uart *hu, const void *data, int count) { struct bcm_data *bcm = hu->priv; if (!test_bit(HCI_UART_REGISTERED, &hu->flags)) return -EUNATCH; bcm->rx_skb = h4_recv_buf(hu->hdev, bcm->rx_skb, data, count, bcm_recv_pkts, ARRAY_SIZE(bcm_recv_pkts)); if (IS_ERR(bcm->rx_skb)) { int err = PTR_ERR(bcm->rx_skb); bt_dev_err(hu->hdev, "Frame reassembly failed (%d)", err); bcm->rx_skb = NULL; return err; } else if (!bcm->rx_skb) { /* Delay auto-suspend when receiving completed packet */ mutex_lock(&bcm_device_lock); if (bcm->dev && bcm_device_exists(bcm->dev)) { pm_runtime_get(bcm->dev->dev); pm_runtime_mark_last_busy(bcm->dev->dev); pm_runtime_put_autosuspend(bcm->dev->dev); } mutex_unlock(&bcm_device_lock); } return count; } static int bcm_enqueue(struct hci_uart *hu, struct sk_buff *skb) { struct bcm_data *bcm = hu->priv; bt_dev_dbg(hu->hdev, "hu %p skb %p", hu, skb); /* Prepend skb with frame type */ memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1); skb_queue_tail(&bcm->txq, skb); return 0; } static struct sk_buff *bcm_dequeue(struct hci_uart *hu) { struct bcm_data *bcm = hu->priv; struct sk_buff *skb = NULL; struct bcm_device *bdev = NULL; mutex_lock(&bcm_device_lock); if (bcm_device_exists(bcm->dev)) { bdev = bcm->dev; pm_runtime_get_sync(bdev->dev); /* Shall be resumed here */ } skb = skb_dequeue(&bcm->txq); if (bdev) { pm_runtime_mark_last_busy(bdev->dev); pm_runtime_put_autosuspend(bdev->dev); } mutex_unlock(&bcm_device_lock); return skb; } #ifdef CONFIG_PM static int bcm_suspend_device(struct device *dev) { struct bcm_device *bdev = dev_get_drvdata(dev); int err; bt_dev_dbg(bdev, ""); if (!bdev->is_suspended && bdev->hu) { hci_uart_set_flow_control(bdev->hu, true); /* Once this returns, driver suspends BT via GPIO */ bdev->is_suspended = true; } /* Suspend the device */ err = bdev->set_device_wakeup(bdev, false); if (err) { if (bdev->is_suspended && bdev->hu) { bdev->is_suspended = false; hci_uart_set_flow_control(bdev->hu, false); } return -EBUSY; } bt_dev_dbg(bdev, "suspend, delaying 15 ms"); msleep(15); return 0; } static int bcm_resume_device(struct device *dev) { struct bcm_device *bdev = dev_get_drvdata(dev); int err; bt_dev_dbg(bdev, ""); err = bdev->set_device_wakeup(bdev, true); if (err) { dev_err(dev, "Failed to power up\n"); return err; } bt_dev_dbg(bdev, "resume, delaying 15 ms"); msleep(15); /* When this executes, the device has woken up already */ if (bdev->is_suspended && bdev->hu) { bdev->is_suspended = false; hci_uart_set_flow_control(bdev->hu, false); } return 0; } #endif #ifdef CONFIG_PM_SLEEP /* suspend callback */ static int bcm_suspend(struct device *dev) { struct bcm_device *bdev = dev_get_drvdata(dev); int error; bt_dev_dbg(bdev, "suspend: is_suspended %d", bdev->is_suspended); /* * When used with a device instantiated as platform_device, bcm_suspend * can be called at any time as long as the platform device is bound, * so it should use bcm_device_lock to protect access to hci_uart * and device_wake-up GPIO. */ mutex_lock(&bcm_device_lock); if (!bdev->hu) goto unlock; if (pm_runtime_active(dev)) bcm_suspend_device(dev); if (device_may_wakeup(dev) && bdev->irq > 0) { error = enable_irq_wake(bdev->irq); if (!error) bt_dev_dbg(bdev, "BCM irq: enabled"); } unlock: mutex_unlock(&bcm_device_lock); return 0; } /* resume callback */ static int bcm_resume(struct device *dev) { struct bcm_device *bdev = dev_get_drvdata(dev); int err = 0; bt_dev_dbg(bdev, "resume: is_suspended %d", bdev->is_suspended); /* * When used with a device instantiated as platform_device, bcm_resume * can be called at any time as long as platform device is bound, * so it should use bcm_device_lock to protect access to hci_uart * and device_wake-up GPIO. */ mutex_lock(&bcm_device_lock); if (!bdev->hu) goto unlock; if (device_may_wakeup(dev) && bdev->irq > 0) { disable_irq_wake(bdev->irq); bt_dev_dbg(bdev, "BCM irq: disabled"); } err = bcm_resume_device(dev); unlock: mutex_unlock(&bcm_device_lock); if (!err) { pm_runtime_disable(dev); pm_runtime_set_active(dev); pm_runtime_enable(dev); } return 0; } #endif /* Some firmware reports an IRQ which does not work (wrong pin in fw table?) */ static struct gpiod_lookup_table irq_on_int33fc02_pin17_gpios = { .dev_id = "serial0-0", .table = { GPIO_LOOKUP("INT33FC:02", 17, "host-wakeup-alt", GPIO_ACTIVE_HIGH), { } }, }; static const struct dmi_system_id bcm_broken_irq_dmi_table[] = { { .ident = "Acer Iconia One 7 B1-750", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Insyde"), DMI_MATCH(DMI_PRODUCT_NAME, "VESPA2"), }, .driver_data = &irq_on_int33fc02_pin17_gpios, }, { .ident = "Asus TF103C", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), DMI_MATCH(DMI_PRODUCT_NAME, "TF103C"), }, .driver_data = &irq_on_int33fc02_pin17_gpios, }, { .ident = "Lenovo Yoga Tablet 2 830F/L / 1050F/L", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Intel Corp."), DMI_MATCH(DMI_PRODUCT_NAME, "VALLEYVIEW C0 PLATFORM"), DMI_MATCH(DMI_BOARD_NAME, "BYT-T FFD8"), /* Partial match on beginning of BIOS version */ DMI_MATCH(DMI_BIOS_VERSION, "BLADE_21"), }, .driver_data = &irq_on_int33fc02_pin17_gpios, }, { .ident = "Meegopad T08", .matches = { DMI_EXACT_MATCH(DMI_BOARD_VENDOR, "To be filled by OEM."), DMI_EXACT_MATCH(DMI_BOARD_NAME, "T3 MRD"), DMI_EXACT_MATCH(DMI_BOARD_VERSION, "V1.1"), }, }, { } }; #ifdef CONFIG_ACPI static const struct acpi_gpio_params first_gpio = { 0, 0, false }; static const struct acpi_gpio_params second_gpio = { 1, 0, false }; static const struct acpi_gpio_params third_gpio = { 2, 0, false }; static const struct acpi_gpio_mapping acpi_bcm_int_last_gpios[] = { { "device-wakeup-gpios", &first_gpio, 1 }, { "shutdown-gpios", &second_gpio, 1 }, { "host-wakeup-gpios", &third_gpio, 1 }, { }, }; static const struct acpi_gpio_mapping acpi_bcm_int_first_gpios[] = { { "host-wakeup-gpios", &first_gpio, 1 }, { "device-wakeup-gpios", &second_gpio, 1 }, { "shutdown-gpios", &third_gpio, 1 }, { }, }; static int bcm_resource(struct acpi_resource *ares, void *data) { struct bcm_device *dev = data; struct acpi_resource_extended_irq *irq; struct acpi_resource_gpio *gpio; struct acpi_resource_uart_serialbus *sb; switch (ares->type) { case ACPI_RESOURCE_TYPE_EXTENDED_IRQ: irq = &ares->data.extended_irq; if (irq->polarity != ACPI_ACTIVE_LOW) dev_info(dev->dev, "ACPI Interrupt resource is active-high, this is usually wrong, treating the IRQ as active-low\n"); dev->irq_active_low = true; break; case ACPI_RESOURCE_TYPE_GPIO: gpio = &ares->data.gpio; if (gpio->connection_type == ACPI_RESOURCE_GPIO_TYPE_INT) { dev->gpio_int_idx = dev->gpio_count; dev->irq_active_low = gpio->polarity == ACPI_ACTIVE_LOW; } dev->gpio_count++; break; case ACPI_RESOURCE_TYPE_SERIAL_BUS: sb = &ares->data.uart_serial_bus; if (sb->type == ACPI_RESOURCE_SERIAL_TYPE_UART) { dev->init_speed = sb->default_baud_rate; dev->oper_speed = 4000000; } break; default: break; } return 0; } static int bcm_apple_set_device_wakeup(struct bcm_device *dev, bool awake) { if (ACPI_FAILURE(acpi_execute_simple_method(dev->btlp, NULL, !awake))) return -EIO; return 0; } static int bcm_apple_set_shutdown(struct bcm_device *dev, bool powered) { if (ACPI_FAILURE(acpi_evaluate_object(powered ? dev->btpu : dev->btpd, NULL, NULL, NULL))) return -EIO; return 0; } static int bcm_apple_get_resources(struct bcm_device *dev) { struct acpi_device *adev = ACPI_COMPANION(dev->dev); const union acpi_object *obj; if (!adev || ACPI_FAILURE(acpi_get_handle(adev->handle, "BTLP", &dev->btlp)) || ACPI_FAILURE(acpi_get_handle(adev->handle, "BTPU", &dev->btpu)) || ACPI_FAILURE(acpi_get_handle(adev->handle, "BTPD", &dev->btpd))) return -ENODEV; if (!acpi_dev_get_property(adev, "baud", ACPI_TYPE_BUFFER, &obj) && obj->buffer.length == 8) dev->init_speed = *(u64 *)obj->buffer.pointer; dev->set_device_wakeup = bcm_apple_set_device_wakeup; dev->set_shutdown = bcm_apple_set_shutdown; return 0; } #else static inline int bcm_apple_get_resources(struct bcm_device *dev) { return -EOPNOTSUPP; } #endif /* CONFIG_ACPI */ static int bcm_gpio_set_device_wakeup(struct bcm_device *dev, bool awake) { gpiod_set_value_cansleep(dev->device_wakeup, awake); return 0; } static int bcm_gpio_set_shutdown(struct bcm_device *dev, bool powered) { gpiod_set_value_cansleep(dev->shutdown, powered); if (dev->reset) /* * The reset line is asserted on powerdown and deasserted * on poweron so the inverse of powered is used. Notice * that the GPIO line BT_RST_N needs to be specified as * active low in the device tree or similar system * description. */ gpiod_set_value_cansleep(dev->reset, !powered); return 0; } /* Try a bunch of names for TXCO */ static struct clk *bcm_get_txco(struct device *dev) { struct clk *clk; /* New explicit name */ clk = devm_clk_get(dev, "txco"); if (!IS_ERR(clk) || PTR_ERR(clk) == -EPROBE_DEFER) return clk; /* Deprecated name */ clk = devm_clk_get(dev, "extclk"); if (!IS_ERR(clk) || PTR_ERR(clk) == -EPROBE_DEFER) return clk; /* Original code used no name at all */ return devm_clk_get(dev, NULL); } static int bcm_get_resources(struct bcm_device *dev) { const struct dmi_system_id *broken_irq_dmi_id; const char *irq_con_id = "host-wakeup"; int err; dev->name = dev_name(dev->dev); if (x86_apple_machine && !bcm_apple_get_resources(dev)) return 0; dev->txco_clk = bcm_get_txco(dev->dev); /* Handle deferred probing */ if (dev->txco_clk == ERR_PTR(-EPROBE_DEFER)) return PTR_ERR(dev->txco_clk); /* Ignore all other errors as before */ if (IS_ERR(dev->txco_clk)) dev->txco_clk = NULL; dev->lpo_clk = devm_clk_get(dev->dev, "lpo"); if (dev->lpo_clk == ERR_PTR(-EPROBE_DEFER)) return PTR_ERR(dev->lpo_clk); if (IS_ERR(dev->lpo_clk)) dev->lpo_clk = NULL; /* Check if we accidentally fetched the lpo clock twice */ if (dev->lpo_clk && clk_is_match(dev->lpo_clk, dev->txco_clk)) { devm_clk_put(dev->dev, dev->txco_clk); dev->txco_clk = NULL; } dev->device_wakeup = devm_gpiod_get_optional(dev->dev, "device-wakeup", GPIOD_OUT_LOW); if (IS_ERR(dev->device_wakeup)) return PTR_ERR(dev->device_wakeup); dev->shutdown = devm_gpiod_get_optional(dev->dev, "shutdown", GPIOD_OUT_LOW); if (IS_ERR(dev->shutdown)) return PTR_ERR(dev->shutdown); dev->reset = devm_gpiod_get_optional(dev->dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(dev->reset)) return PTR_ERR(dev->reset); dev->set_device_wakeup = bcm_gpio_set_device_wakeup; dev->set_shutdown = bcm_gpio_set_shutdown; dev->supplies[0].supply = "vbat"; dev->supplies[1].supply = "vddio"; err = devm_regulator_bulk_get(dev->dev, BCM_NUM_SUPPLIES, dev->supplies); if (err) return err; broken_irq_dmi_id = dmi_first_match(bcm_broken_irq_dmi_table); if (broken_irq_dmi_id && broken_irq_dmi_id->driver_data) { gpiod_add_lookup_table(broken_irq_dmi_id->driver_data); irq_con_id = "host-wakeup-alt"; dev->irq_active_low = false; dev->irq = 0; } /* IRQ can be declared in ACPI table as Interrupt or GpioInt */ if (dev->irq <= 0) { struct gpio_desc *gpio; gpio = devm_gpiod_get_optional(dev->dev, irq_con_id, GPIOD_IN); if (IS_ERR(gpio)) return PTR_ERR(gpio); dev->irq = gpiod_to_irq(gpio); } if (broken_irq_dmi_id) { if (broken_irq_dmi_id->driver_data) { gpiod_remove_lookup_table(broken_irq_dmi_id->driver_data); } else { dev_info(dev->dev, "%s: Has a broken IRQ config, disabling IRQ support / runtime-pm\n", broken_irq_dmi_id->ident); dev->irq = 0; } } dev_dbg(dev->dev, "BCM irq: %d\n", dev->irq); return 0; } #ifdef CONFIG_ACPI static int bcm_acpi_probe(struct bcm_device *dev) { LIST_HEAD(resources); const struct acpi_gpio_mapping *gpio_mapping = acpi_bcm_int_last_gpios; struct resource_entry *entry; int ret; /* Retrieve UART ACPI info */ dev->gpio_int_idx = -1; ret = acpi_dev_get_resources(ACPI_COMPANION(dev->dev), &resources, bcm_resource, dev); if (ret < 0) return ret; resource_list_for_each_entry(entry, &resources) { if (resource_type(entry->res) == IORESOURCE_IRQ) { dev->irq = entry->res->start; break; } } acpi_dev_free_resource_list(&resources); /* If the DSDT uses an Interrupt resource for the IRQ, then there are * only 2 GPIO resources, we use the irq-last mapping for this, since * we already have an irq the 3th / last mapping will not be used. */ if (dev->irq) gpio_mapping = acpi_bcm_int_last_gpios; else if (dev->gpio_int_idx == 0) gpio_mapping = acpi_bcm_int_first_gpios; else if (dev->gpio_int_idx == 2) gpio_mapping = acpi_bcm_int_last_gpios; else dev_warn(dev->dev, "Unexpected ACPI gpio_int_idx: %d\n", dev->gpio_int_idx); /* Warn if our expectations are not met. */ if (dev->gpio_count != (dev->irq ? 2 : 3)) dev_warn(dev->dev, "Unexpected number of ACPI GPIOs: %d\n", dev->gpio_count); ret = devm_acpi_dev_add_driver_gpios(dev->dev, gpio_mapping); if (ret) return ret; if (irq_polarity != -1) { dev->irq_active_low = irq_polarity; dev_warn(dev->dev, "Overwriting IRQ polarity to active %s by module-param\n", dev->irq_active_low ? "low" : "high"); } return 0; } #else static int bcm_acpi_probe(struct bcm_device *dev) { return -EINVAL; } #endif /* CONFIG_ACPI */ static int bcm_of_probe(struct bcm_device *bdev) { bdev->use_autobaud_mode = device_property_read_bool(bdev->dev, "brcm,requires-autobaud-mode"); device_property_read_u32(bdev->dev, "max-speed", &bdev->oper_speed); device_property_read_u8_array(bdev->dev, "brcm,bt-pcm-int-params", bdev->pcm_int_params, 5); bdev->irq = of_irq_get_byname(bdev->dev->of_node, "host-wakeup"); bdev->irq_active_low = irq_get_trigger_type(bdev->irq) & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_LEVEL_LOW); return 0; } static int bcm_probe(struct platform_device *pdev) { struct bcm_device *dev; int ret; dev = devm_kzalloc(&pdev->dev, sizeof(*dev), GFP_KERNEL); if (!dev) return -ENOMEM; dev->dev = &pdev->dev; ret = platform_get_irq(pdev, 0); if (ret < 0) return ret; dev->irq = ret; /* Initialize routing field to an unused value */ dev->pcm_int_params[0] = 0xff; if (has_acpi_companion(&pdev->dev)) { ret = bcm_acpi_probe(dev); if (ret) return ret; } ret = bcm_get_resources(dev); if (ret) return ret; platform_set_drvdata(pdev, dev); dev_info(&pdev->dev, "%s device registered.\n", dev->name); /* Place this instance on the device list */ mutex_lock(&bcm_device_lock); list_add_tail(&dev->list, &bcm_device_list); mutex_unlock(&bcm_device_lock); ret = bcm_gpio_set_power(dev, false); if (ret) dev_err(&pdev->dev, "Failed to power down\n"); return 0; } static int bcm_remove(struct platform_device *pdev) { struct bcm_device *dev = platform_get_drvdata(pdev); mutex_lock(&bcm_device_lock); list_del(&dev->list); mutex_unlock(&bcm_device_lock); dev_info(&pdev->dev, "%s device unregistered.\n", dev->name); return 0; } static const struct hci_uart_proto bcm_proto = { .id = HCI_UART_BCM, .name = "Broadcom", .manufacturer = 15, .init_speed = 115200, .open = bcm_open, .close = bcm_close, .flush = bcm_flush, .setup = bcm_setup, .set_baudrate = bcm_set_baudrate, .recv = bcm_recv, .enqueue = bcm_enqueue, .dequeue = bcm_dequeue, }; #ifdef CONFIG_ACPI /* bcm43430a0/a1 BT does not support 48MHz UART clock, limit to 2000000 baud */ static struct bcm_device_data bcm43430_device_data = { .max_speed = 2000000, }; static const struct acpi_device_id bcm_acpi_match[] = { { "BCM2E00" }, { "BCM2E01" }, { "BCM2E02" }, { "BCM2E03" }, { "BCM2E04" }, { "BCM2E05" }, { "BCM2E06" }, { "BCM2E07" }, { "BCM2E08" }, { "BCM2E09" }, { "BCM2E0A" }, { "BCM2E0B" }, { "BCM2E0C" }, { "BCM2E0D" }, { "BCM2E0E" }, { "BCM2E0F" }, { "BCM2E10" }, { "BCM2E11" }, { "BCM2E12" }, { "BCM2E13" }, { "BCM2E14" }, { "BCM2E15" }, { "BCM2E16" }, { "BCM2E17" }, { "BCM2E18" }, { "BCM2E19" }, { "BCM2E1A" }, { "BCM2E1B" }, { "BCM2E1C" }, { "BCM2E1D" }, { "BCM2E1F" }, { "BCM2E20" }, { "BCM2E21" }, { "BCM2E22" }, { "BCM2E23" }, { "BCM2E24" }, { "BCM2E25" }, { "BCM2E26" }, { "BCM2E27" }, { "BCM2E28" }, { "BCM2E29" }, { "BCM2E2A" }, { "BCM2E2B" }, { "BCM2E2C" }, { "BCM2E2D" }, { "BCM2E2E" }, { "BCM2E2F" }, { "BCM2E30" }, { "BCM2E31" }, { "BCM2E32" }, { "BCM2E33" }, { "BCM2E34" }, { "BCM2E35" }, { "BCM2E36" }, { "BCM2E37" }, { "BCM2E38" }, { "BCM2E39" }, { "BCM2E3A" }, { "BCM2E3B" }, { "BCM2E3C" }, { "BCM2E3D" }, { "BCM2E3E" }, { "BCM2E3F" }, { "BCM2E40" }, { "BCM2E41" }, { "BCM2E42" }, { "BCM2E43" }, { "BCM2E44" }, { "BCM2E45" }, { "BCM2E46" }, { "BCM2E47" }, { "BCM2E48" }, { "BCM2E49" }, { "BCM2E4A" }, { "BCM2E4B" }, { "BCM2E4C" }, { "BCM2E4D" }, { "BCM2E4E" }, { "BCM2E4F" }, { "BCM2E50" }, { "BCM2E51" }, { "BCM2E52" }, { "BCM2E53" }, { "BCM2E54" }, { "BCM2E55" }, { "BCM2E56" }, { "BCM2E57" }, { "BCM2E58" }, { "BCM2E59" }, { "BCM2E5A" }, { "BCM2E5B" }, { "BCM2E5C" }, { "BCM2E5D" }, { "BCM2E5E" }, { "BCM2E5F" }, { "BCM2E60" }, { "BCM2E61" }, { "BCM2E62" }, { "BCM2E63" }, { "BCM2E64" }, { "BCM2E65" }, { "BCM2E66" }, { "BCM2E67" }, { "BCM2E68" }, { "BCM2E69" }, { "BCM2E6B" }, { "BCM2E6D" }, { "BCM2E6E" }, { "BCM2E6F" }, { "BCM2E70" }, { "BCM2E71" }, { "BCM2E72" }, { "BCM2E73" }, { "BCM2E74", (long)&bcm43430_device_data }, { "BCM2E75", (long)&bcm43430_device_data }, { "BCM2E76" }, { "BCM2E77" }, { "BCM2E78" }, { "BCM2E79" }, { "BCM2E7A" }, { "BCM2E7B", (long)&bcm43430_device_data }, { "BCM2E7C" }, { "BCM2E7D" }, { "BCM2E7E" }, { "BCM2E7F" }, { "BCM2E80", (long)&bcm43430_device_data }, { "BCM2E81" }, { "BCM2E82" }, { "BCM2E83" }, { "BCM2E84" }, { "BCM2E85" }, { "BCM2E86" }, { "BCM2E87" }, { "BCM2E88" }, { "BCM2E89", (long)&bcm43430_device_data }, { "BCM2E8A" }, { "BCM2E8B" }, { "BCM2E8C" }, { "BCM2E8D" }, { "BCM2E8E" }, { "BCM2E90" }, { "BCM2E92" }, { "BCM2E93" }, { "BCM2E94", (long)&bcm43430_device_data }, { "BCM2E95" }, { "BCM2E96" }, { "BCM2E97" }, { "BCM2E98" }, { "BCM2E99", (long)&bcm43430_device_data }, { "BCM2E9A" }, { "BCM2E9B", (long)&bcm43430_device_data }, { "BCM2E9C" }, { "BCM2E9D" }, { "BCM2E9F", (long)&bcm43430_device_data }, { "BCM2EA0" }, { "BCM2EA1" }, { "BCM2EA2", (long)&bcm43430_device_data }, { "BCM2EA3", (long)&bcm43430_device_data }, { "BCM2EA4" }, { "BCM2EA5" }, { "BCM2EA6" }, { "BCM2EA7" }, { "BCM2EA8" }, { "BCM2EA9" }, { "BCM2EAA", (long)&bcm43430_device_data }, { "BCM2EAB", (long)&bcm43430_device_data }, { "BCM2EAC", (long)&bcm43430_device_data }, { }, }; MODULE_DEVICE_TABLE(acpi, bcm_acpi_match); #endif /* suspend and resume callbacks */ static const struct dev_pm_ops bcm_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(bcm_suspend, bcm_resume) SET_RUNTIME_PM_OPS(bcm_suspend_device, bcm_resume_device, NULL) }; static struct platform_driver bcm_driver = { .probe = bcm_probe, .remove = bcm_remove, .driver = { .name = "hci_bcm", .acpi_match_table = ACPI_PTR(bcm_acpi_match), .pm = &bcm_pm_ops, }, }; static int bcm_serdev_probe(struct serdev_device *serdev) { struct bcm_device *bcmdev; const struct bcm_device_data *data; int err; bcmdev = devm_kzalloc(&serdev->dev, sizeof(*bcmdev), GFP_KERNEL); if (!bcmdev) return -ENOMEM; bcmdev->dev = &serdev->dev; #ifdef CONFIG_PM bcmdev->hu = &bcmdev->serdev_hu; #endif bcmdev->serdev_hu.serdev = serdev; serdev_device_set_drvdata(serdev, bcmdev); /* Initialize routing field to an unused value */ bcmdev->pcm_int_params[0] = 0xff; if (has_acpi_companion(&serdev->dev)) err = bcm_acpi_probe(bcmdev); else err = bcm_of_probe(bcmdev); if (err) return err; err = bcm_get_resources(bcmdev); if (err) return err; if (!bcmdev->shutdown) { dev_warn(&serdev->dev, "No reset resource, using default baud rate\n"); bcmdev->oper_speed = bcmdev->init_speed; } err = bcm_gpio_set_power(bcmdev, false); if (err) dev_err(&serdev->dev, "Failed to power down\n"); data = device_get_match_data(bcmdev->dev); if (data) { bcmdev->max_autobaud_speed = data->max_autobaud_speed; bcmdev->no_early_set_baudrate = data->no_early_set_baudrate; bcmdev->drive_rts_on_open = data->drive_rts_on_open; bcmdev->no_uart_clock_set = data->no_uart_clock_set; if (data->max_speed && bcmdev->oper_speed > data->max_speed) bcmdev->oper_speed = data->max_speed; } return hci_uart_register_device(&bcmdev->serdev_hu, &bcm_proto); } static void bcm_serdev_remove(struct serdev_device *serdev) { struct bcm_device *bcmdev = serdev_device_get_drvdata(serdev); hci_uart_unregister_device(&bcmdev->serdev_hu); } #ifdef CONFIG_OF static struct bcm_device_data bcm4354_device_data = { .no_early_set_baudrate = true, }; static struct bcm_device_data bcm43438_device_data = { .drive_rts_on_open = true, }; static struct bcm_device_data cyw4373a0_device_data = { .no_uart_clock_set = true, }; static struct bcm_device_data cyw55572_device_data = { .max_autobaud_speed = 921600, }; static const struct of_device_id bcm_bluetooth_of_match[] = { { .compatible = "brcm,bcm20702a1" }, { .compatible = "brcm,bcm4329-bt" }, { .compatible = "brcm,bcm4330-bt" }, { .compatible = "brcm,bcm4334-bt" }, { .compatible = "brcm,bcm4345c5" }, { .compatible = "brcm,bcm43430a0-bt" }, { .compatible = "brcm,bcm43430a1-bt" }, { .compatible = "brcm,bcm43438-bt", .data = &bcm43438_device_data }, { .compatible = "brcm,bcm4349-bt", .data = &bcm43438_device_data }, { .compatible = "brcm,bcm43540-bt", .data = &bcm4354_device_data }, { .compatible = "brcm,bcm4335a0" }, { .compatible = "cypress,cyw4373a0-bt", .data = &cyw4373a0_device_data }, { .compatible = "infineon,cyw55572-bt", .data = &cyw55572_device_data }, { }, }; MODULE_DEVICE_TABLE(of, bcm_bluetooth_of_match); #endif static struct serdev_device_driver bcm_serdev_driver = { .probe = bcm_serdev_probe, .remove = bcm_serdev_remove, .driver = { .name = "hci_uart_bcm", .of_match_table = of_match_ptr(bcm_bluetooth_of_match), .acpi_match_table = ACPI_PTR(bcm_acpi_match), .pm = &bcm_pm_ops, }, }; int __init bcm_init(void) { /* For now, we need to keep both platform device * driver (ACPI generated) and serdev driver (DT). */ platform_driver_register(&bcm_driver); serdev_device_driver_register(&bcm_serdev_driver); return hci_uart_register_proto(&bcm_proto); } int __exit bcm_deinit(void) { platform_driver_unregister(&bcm_driver); serdev_device_driver_unregister(&bcm_serdev_driver); return hci_uart_unregister_proto(&bcm_proto); }
linux-master
drivers/bluetooth/hci_bcm.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Bluetooth HCI serdev driver lib * * Copyright (C) 2017 Linaro, Ltd., Rob Herring <[email protected]> * * Based on hci_ldisc.c: * * Copyright (C) 2000-2001 Qualcomm Incorporated * Copyright (C) 2002-2003 Maxim Krasnyansky <[email protected]> * Copyright (C) 2004-2005 Marcel Holtmann <[email protected]> */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/serdev.h> #include <linux/skbuff.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include "hci_uart.h" static inline void hci_uart_tx_complete(struct hci_uart *hu, int pkt_type) { struct hci_dev *hdev = hu->hdev; /* Update HCI stat counters */ switch (pkt_type) { case HCI_COMMAND_PKT: hdev->stat.cmd_tx++; break; case HCI_ACLDATA_PKT: hdev->stat.acl_tx++; break; case HCI_SCODATA_PKT: hdev->stat.sco_tx++; break; } } static inline struct sk_buff *hci_uart_dequeue(struct hci_uart *hu) { struct sk_buff *skb = hu->tx_skb; if (!skb) { if (test_bit(HCI_UART_PROTO_READY, &hu->flags)) skb = hu->proto->dequeue(hu); } else hu->tx_skb = NULL; return skb; } static void hci_uart_write_work(struct work_struct *work) { struct hci_uart *hu = container_of(work, struct hci_uart, write_work); struct serdev_device *serdev = hu->serdev; struct hci_dev *hdev = hu->hdev; struct sk_buff *skb; /* REVISIT: * should we cope with bad skbs or ->write() returning an error value? */ do { clear_bit(HCI_UART_TX_WAKEUP, &hu->tx_state); while ((skb = hci_uart_dequeue(hu))) { int len; len = serdev_device_write_buf(serdev, skb->data, skb->len); hdev->stat.byte_tx += len; skb_pull(skb, len); if (skb->len) { hu->tx_skb = skb; break; } hci_uart_tx_complete(hu, hci_skb_pkt_type(skb)); kfree_skb(skb); } clear_bit(HCI_UART_SENDING, &hu->tx_state); } while (test_bit(HCI_UART_TX_WAKEUP, &hu->tx_state)); } /* ------- Interface to HCI layer ------ */ /* Reset device */ static int hci_uart_flush(struct hci_dev *hdev) { struct hci_uart *hu = hci_get_drvdata(hdev); BT_DBG("hdev %p serdev %p", hdev, hu->serdev); if (hu->tx_skb) { kfree_skb(hu->tx_skb); hu->tx_skb = NULL; } /* Flush any pending characters in the driver and discipline. */ serdev_device_write_flush(hu->serdev); if (test_bit(HCI_UART_PROTO_READY, &hu->flags)) hu->proto->flush(hu); return 0; } /* Initialize device */ static int hci_uart_open(struct hci_dev *hdev) { struct hci_uart *hu = hci_get_drvdata(hdev); int err; BT_DBG("%s %p", hdev->name, hdev); /* When Quirk HCI_QUIRK_NON_PERSISTENT_SETUP is set by * driver, BT SoC is completely turned OFF during * BT OFF. Upon next BT ON UART port should be opened. */ if (!test_bit(HCI_UART_PROTO_READY, &hu->flags)) { err = serdev_device_open(hu->serdev); if (err) return err; set_bit(HCI_UART_PROTO_READY, &hu->flags); } /* Undo clearing this from hci_uart_close() */ hdev->flush = hci_uart_flush; return 0; } /* Close device */ static int hci_uart_close(struct hci_dev *hdev) { struct hci_uart *hu = hci_get_drvdata(hdev); BT_DBG("hdev %p", hdev); if (!test_bit(HCI_UART_PROTO_READY, &hu->flags)) return 0; hci_uart_flush(hdev); hdev->flush = NULL; /* When QUIRK HCI_QUIRK_NON_PERSISTENT_SETUP is set by driver, * BT SOC is completely powered OFF during BT OFF, holding port * open may drain the battery. */ if (test_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks)) { clear_bit(HCI_UART_PROTO_READY, &hu->flags); serdev_device_close(hu->serdev); } return 0; } /* Send frames from HCI layer */ static int hci_uart_send_frame(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_uart *hu = hci_get_drvdata(hdev); BT_DBG("%s: type %d len %d", hdev->name, hci_skb_pkt_type(skb), skb->len); hu->proto->enqueue(hu, skb); hci_uart_tx_wakeup(hu); return 0; } static int hci_uart_setup(struct hci_dev *hdev) { struct hci_uart *hu = hci_get_drvdata(hdev); struct hci_rp_read_local_version *ver; struct sk_buff *skb; unsigned int speed; int err; /* Init speed if any */ if (hu->init_speed) speed = hu->init_speed; else if (hu->proto->init_speed) speed = hu->proto->init_speed; else speed = 0; if (speed) serdev_device_set_baudrate(hu->serdev, speed); /* Operational speed if any */ if (hu->oper_speed) speed = hu->oper_speed; else if (hu->proto->oper_speed) speed = hu->proto->oper_speed; else speed = 0; if (hu->proto->set_baudrate && speed) { err = hu->proto->set_baudrate(hu, speed); if (err) bt_dev_err(hdev, "Failed to set baudrate"); else serdev_device_set_baudrate(hu->serdev, speed); } if (hu->proto->setup) return hu->proto->setup(hu); if (!test_bit(HCI_UART_VND_DETECT, &hu->hdev_flags)) return 0; skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { bt_dev_err(hdev, "Reading local version info failed (%ld)", PTR_ERR(skb)); return 0; } if (skb->len != sizeof(*ver)) bt_dev_err(hdev, "Event length mismatch for version info"); kfree_skb(skb); return 0; } /* Check if the device is wakeable */ static bool hci_uart_wakeup(struct hci_dev *hdev) { /* HCI UART devices are assumed to be wakeable by default. * Implement wakeup callback to override this behavior. */ return true; } /** hci_uart_write_wakeup - transmit buffer wakeup * @serdev: serial device * * This function is called by the serdev framework when it accepts * more data being sent. */ static void hci_uart_write_wakeup(struct serdev_device *serdev) { struct hci_uart *hu = serdev_device_get_drvdata(serdev); BT_DBG(""); if (!hu || serdev != hu->serdev) { WARN_ON(1); return; } if (test_bit(HCI_UART_PROTO_READY, &hu->flags)) hci_uart_tx_wakeup(hu); } /** hci_uart_receive_buf - receive buffer wakeup * @serdev: serial device * @data: pointer to received data * @count: count of received data in bytes * * This function is called by the serdev framework when it received data * in the RX buffer. * * Return: number of processed bytes */ static int hci_uart_receive_buf(struct serdev_device *serdev, const u8 *data, size_t count) { struct hci_uart *hu = serdev_device_get_drvdata(serdev); if (!hu || serdev != hu->serdev) { WARN_ON(1); return 0; } if (!test_bit(HCI_UART_PROTO_READY, &hu->flags)) return 0; /* It does not need a lock here as it is already protected by a mutex in * tty caller */ hu->proto->recv(hu, data, count); if (hu->hdev) hu->hdev->stat.byte_rx += count; return count; } static const struct serdev_device_ops hci_serdev_client_ops = { .receive_buf = hci_uart_receive_buf, .write_wakeup = hci_uart_write_wakeup, }; int hci_uart_register_device(struct hci_uart *hu, const struct hci_uart_proto *p) { int err; struct hci_dev *hdev; BT_DBG(""); serdev_device_set_client_ops(hu->serdev, &hci_serdev_client_ops); if (percpu_init_rwsem(&hu->proto_lock)) return -ENOMEM; err = serdev_device_open(hu->serdev); if (err) goto err_rwsem; err = p->open(hu); if (err) goto err_open; hu->proto = p; set_bit(HCI_UART_PROTO_READY, &hu->flags); /* Initialize and register HCI device */ hdev = hci_alloc_dev(); if (!hdev) { BT_ERR("Can't allocate HCI device"); err = -ENOMEM; goto err_alloc; } hu->hdev = hdev; hdev->bus = HCI_UART; hci_set_drvdata(hdev, hu); INIT_WORK(&hu->init_ready, hci_uart_init_work); INIT_WORK(&hu->write_work, hci_uart_write_work); /* Only when vendor specific setup callback is provided, consider * the manufacturer information valid. This avoids filling in the * value for Ericsson when nothing is specified. */ if (hu->proto->setup) hdev->manufacturer = hu->proto->manufacturer; hdev->open = hci_uart_open; hdev->close = hci_uart_close; hdev->flush = hci_uart_flush; hdev->send = hci_uart_send_frame; hdev->setup = hci_uart_setup; if (!hdev->wakeup) hdev->wakeup = hci_uart_wakeup; SET_HCIDEV_DEV(hdev, &hu->serdev->dev); if (test_bit(HCI_UART_NO_SUSPEND_NOTIFIER, &hu->flags)) set_bit(HCI_QUIRK_NO_SUSPEND_NOTIFIER, &hdev->quirks); if (test_bit(HCI_UART_RAW_DEVICE, &hu->hdev_flags)) set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks); if (test_bit(HCI_UART_EXT_CONFIG, &hu->hdev_flags)) set_bit(HCI_QUIRK_EXTERNAL_CONFIG, &hdev->quirks); if (test_bit(HCI_UART_CREATE_AMP, &hu->hdev_flags)) hdev->dev_type = HCI_AMP; else hdev->dev_type = HCI_PRIMARY; if (test_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags)) return 0; if (hci_register_dev(hdev) < 0) { BT_ERR("Can't register HCI device"); err = -ENODEV; goto err_register; } set_bit(HCI_UART_REGISTERED, &hu->flags); return 0; err_register: hci_free_dev(hdev); err_alloc: clear_bit(HCI_UART_PROTO_READY, &hu->flags); p->close(hu); err_open: serdev_device_close(hu->serdev); err_rwsem: percpu_free_rwsem(&hu->proto_lock); return err; } EXPORT_SYMBOL_GPL(hci_uart_register_device); void hci_uart_unregister_device(struct hci_uart *hu) { struct hci_dev *hdev = hu->hdev; cancel_work_sync(&hu->init_ready); if (test_bit(HCI_UART_REGISTERED, &hu->flags)) hci_unregister_dev(hdev); hci_free_dev(hdev); cancel_work_sync(&hu->write_work); hu->proto->close(hu); if (test_bit(HCI_UART_PROTO_READY, &hu->flags)) { clear_bit(HCI_UART_PROTO_READY, &hu->flags); serdev_device_close(hu->serdev); } percpu_free_rwsem(&hu->proto_lock); } EXPORT_SYMBOL_GPL(hci_uart_unregister_device);
linux-master
drivers/bluetooth/hci_serdev.c
/* * * A driver for Nokia Connectivity Card DTL-1 devices * * Copyright (C) 2001-2002 Marcel Holtmann <[email protected]> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation; * * Software distributed under the License is distributed on an "AS * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or * implied. See the License for the specific language governing * rights and limitations under the License. * * The initial developer of the original code is David A. Hinds * <[email protected]>. Portions created by David A. Hinds * are Copyright (C) 1999 David A. Hinds. All Rights Reserved. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/ptrace.h> #include <linux/ioport.h> #include <linux/spinlock.h> #include <linux/moduleparam.h> #include <linux/skbuff.h> #include <linux/string.h> #include <linux/serial.h> #include <linux/serial_reg.h> #include <linux/bitops.h> #include <asm/io.h> #include <pcmcia/cistpl.h> #include <pcmcia/ciscode.h> #include <pcmcia/ds.h> #include <pcmcia/cisreg.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> /* ======================== Module parameters ======================== */ MODULE_AUTHOR("Marcel Holtmann <[email protected]>"); MODULE_DESCRIPTION("Bluetooth driver for Nokia Connectivity Card DTL-1"); MODULE_LICENSE("GPL"); /* ======================== Local structures ======================== */ struct dtl1_info { struct pcmcia_device *p_dev; struct hci_dev *hdev; spinlock_t lock; /* For serializing operations */ unsigned long flowmask; /* HCI flow mask */ int ri_latch; struct sk_buff_head txq; unsigned long tx_state; unsigned long rx_state; unsigned long rx_count; struct sk_buff *rx_skb; }; static int dtl1_config(struct pcmcia_device *link); /* Transmit states */ #define XMIT_SENDING 1 #define XMIT_WAKEUP 2 #define XMIT_WAITING 8 /* Receiver States */ #define RECV_WAIT_NSH 0 #define RECV_WAIT_DATA 1 struct nsh { u8 type; u8 zero; u16 len; } __packed; /* Nokia Specific Header */ #define NSHL 4 /* Nokia Specific Header Length */ /* ======================== Interrupt handling ======================== */ static int dtl1_write(unsigned int iobase, int fifo_size, __u8 *buf, int len) { int actual = 0; /* Tx FIFO should be empty */ if (!(inb(iobase + UART_LSR) & UART_LSR_THRE)) return 0; /* Fill FIFO with current frame */ while ((fifo_size-- > 0) && (actual < len)) { /* Transmit next byte */ outb(buf[actual], iobase + UART_TX); actual++; } return actual; } static void dtl1_write_wakeup(struct dtl1_info *info) { if (!info) { BT_ERR("Unknown device"); return; } if (test_bit(XMIT_WAITING, &(info->tx_state))) { set_bit(XMIT_WAKEUP, &(info->tx_state)); return; } if (test_and_set_bit(XMIT_SENDING, &(info->tx_state))) { set_bit(XMIT_WAKEUP, &(info->tx_state)); return; } do { unsigned int iobase = info->p_dev->resource[0]->start; register struct sk_buff *skb; int len; clear_bit(XMIT_WAKEUP, &(info->tx_state)); if (!pcmcia_dev_present(info->p_dev)) return; skb = skb_dequeue(&(info->txq)); if (!skb) break; /* Send frame */ len = dtl1_write(iobase, 32, skb->data, skb->len); if (len == skb->len) { set_bit(XMIT_WAITING, &(info->tx_state)); kfree_skb(skb); } else { skb_pull(skb, len); skb_queue_head(&(info->txq), skb); } info->hdev->stat.byte_tx += len; } while (test_bit(XMIT_WAKEUP, &(info->tx_state))); clear_bit(XMIT_SENDING, &(info->tx_state)); } static void dtl1_control(struct dtl1_info *info, struct sk_buff *skb) { u8 flowmask = *(u8 *)skb->data; int i; printk(KERN_INFO "Bluetooth: Nokia control data ="); for (i = 0; i < skb->len; i++) printk(" %02x", skb->data[i]); printk("\n"); /* transition to active state */ if (((info->flowmask & 0x07) == 0) && ((flowmask & 0x07) != 0)) { clear_bit(XMIT_WAITING, &(info->tx_state)); dtl1_write_wakeup(info); } info->flowmask = flowmask; kfree_skb(skb); } static void dtl1_receive(struct dtl1_info *info) { unsigned int iobase; struct nsh *nsh; int boguscount = 0; if (!info) { BT_ERR("Unknown device"); return; } iobase = info->p_dev->resource[0]->start; do { info->hdev->stat.byte_rx++; /* Allocate packet */ if (info->rx_skb == NULL) { info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC); if (!info->rx_skb) { BT_ERR("Can't allocate mem for new packet"); info->rx_state = RECV_WAIT_NSH; info->rx_count = NSHL; return; } } skb_put_u8(info->rx_skb, inb(iobase + UART_RX)); nsh = (struct nsh *)info->rx_skb->data; info->rx_count--; if (info->rx_count == 0) { switch (info->rx_state) { case RECV_WAIT_NSH: info->rx_state = RECV_WAIT_DATA; info->rx_count = nsh->len + (nsh->len & 0x0001); break; case RECV_WAIT_DATA: hci_skb_pkt_type(info->rx_skb) = nsh->type; /* remove PAD byte if it exists */ if (nsh->len & 0x0001) { info->rx_skb->tail--; info->rx_skb->len--; } /* remove NSH */ skb_pull(info->rx_skb, NSHL); switch (hci_skb_pkt_type(info->rx_skb)) { case 0x80: /* control data for the Nokia Card */ dtl1_control(info, info->rx_skb); break; case 0x82: case 0x83: case 0x84: /* send frame to the HCI layer */ hci_skb_pkt_type(info->rx_skb) &= 0x0f; hci_recv_frame(info->hdev, info->rx_skb); break; default: /* unknown packet */ BT_ERR("Unknown HCI packet with type 0x%02x received", hci_skb_pkt_type(info->rx_skb)); kfree_skb(info->rx_skb); break; } info->rx_state = RECV_WAIT_NSH; info->rx_count = NSHL; info->rx_skb = NULL; break; } } /* Make sure we don't stay here too long */ if (boguscount++ > 32) break; } while (inb(iobase + UART_LSR) & UART_LSR_DR); } static irqreturn_t dtl1_interrupt(int irq, void *dev_inst) { struct dtl1_info *info = dev_inst; unsigned int iobase; unsigned char msr; int boguscount = 0; int iir, lsr; irqreturn_t r = IRQ_NONE; if (!info || !info->hdev) /* our irq handler is shared */ return IRQ_NONE; iobase = info->p_dev->resource[0]->start; spin_lock(&(info->lock)); iir = inb(iobase + UART_IIR) & UART_IIR_ID; while (iir) { r = IRQ_HANDLED; /* Clear interrupt */ lsr = inb(iobase + UART_LSR); switch (iir) { case UART_IIR_RLSI: BT_ERR("RLSI"); break; case UART_IIR_RDI: /* Receive interrupt */ dtl1_receive(info); break; case UART_IIR_THRI: if (lsr & UART_LSR_THRE) { /* Transmitter ready for data */ dtl1_write_wakeup(info); } break; default: BT_ERR("Unhandled IIR=%#x", iir); break; } /* Make sure we don't stay here too long */ if (boguscount++ > 100) break; iir = inb(iobase + UART_IIR) & UART_IIR_ID; } msr = inb(iobase + UART_MSR); if (info->ri_latch ^ (msr & UART_MSR_RI)) { info->ri_latch = msr & UART_MSR_RI; clear_bit(XMIT_WAITING, &(info->tx_state)); dtl1_write_wakeup(info); r = IRQ_HANDLED; } spin_unlock(&(info->lock)); return r; } /* ======================== HCI interface ======================== */ static int dtl1_hci_open(struct hci_dev *hdev) { return 0; } static int dtl1_hci_flush(struct hci_dev *hdev) { struct dtl1_info *info = hci_get_drvdata(hdev); /* Drop TX queue */ skb_queue_purge(&(info->txq)); return 0; } static int dtl1_hci_close(struct hci_dev *hdev) { dtl1_hci_flush(hdev); return 0; } static int dtl1_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) { struct dtl1_info *info = hci_get_drvdata(hdev); struct sk_buff *s; struct nsh nsh; switch (hci_skb_pkt_type(skb)) { case HCI_COMMAND_PKT: hdev->stat.cmd_tx++; nsh.type = 0x81; break; case HCI_ACLDATA_PKT: hdev->stat.acl_tx++; nsh.type = 0x82; break; case HCI_SCODATA_PKT: hdev->stat.sco_tx++; nsh.type = 0x83; break; default: return -EILSEQ; } nsh.zero = 0; nsh.len = skb->len; s = bt_skb_alloc(NSHL + skb->len + 1, GFP_ATOMIC); if (!s) return -ENOMEM; skb_reserve(s, NSHL); skb_copy_from_linear_data(skb, skb_put(s, skb->len), skb->len); if (skb->len & 0x0001) skb_put_u8(s, 0); /* PAD */ /* Prepend skb with Nokia frame header and queue */ memcpy(skb_push(s, NSHL), &nsh, NSHL); skb_queue_tail(&(info->txq), s); dtl1_write_wakeup(info); kfree_skb(skb); return 0; } /* ======================== Card services HCI interaction ======================== */ static int dtl1_open(struct dtl1_info *info) { unsigned long flags; unsigned int iobase = info->p_dev->resource[0]->start; struct hci_dev *hdev; spin_lock_init(&(info->lock)); skb_queue_head_init(&(info->txq)); info->rx_state = RECV_WAIT_NSH; info->rx_count = NSHL; info->rx_skb = NULL; set_bit(XMIT_WAITING, &(info->tx_state)); /* Initialize HCI device */ hdev = hci_alloc_dev(); if (!hdev) { BT_ERR("Can't allocate HCI device"); return -ENOMEM; } info->hdev = hdev; hdev->bus = HCI_PCCARD; hci_set_drvdata(hdev, info); SET_HCIDEV_DEV(hdev, &info->p_dev->dev); hdev->open = dtl1_hci_open; hdev->close = dtl1_hci_close; hdev->flush = dtl1_hci_flush; hdev->send = dtl1_hci_send_frame; spin_lock_irqsave(&(info->lock), flags); /* Reset UART */ outb(0, iobase + UART_MCR); /* Turn off interrupts */ outb(0, iobase + UART_IER); /* Initialize UART */ outb(UART_LCR_WLEN8, iobase + UART_LCR); /* Reset DLAB */ outb((UART_MCR_DTR | UART_MCR_RTS | UART_MCR_OUT2), iobase + UART_MCR); info->ri_latch = inb(info->p_dev->resource[0]->start + UART_MSR) & UART_MSR_RI; /* Turn on interrupts */ outb(UART_IER_RLSI | UART_IER_RDI | UART_IER_THRI, iobase + UART_IER); spin_unlock_irqrestore(&(info->lock), flags); /* Timeout before it is safe to send the first HCI packet */ msleep(2000); /* Register HCI device */ if (hci_register_dev(hdev) < 0) { BT_ERR("Can't register HCI device"); info->hdev = NULL; hci_free_dev(hdev); return -ENODEV; } return 0; } static int dtl1_close(struct dtl1_info *info) { unsigned long flags; unsigned int iobase = info->p_dev->resource[0]->start; struct hci_dev *hdev = info->hdev; if (!hdev) return -ENODEV; dtl1_hci_close(hdev); spin_lock_irqsave(&(info->lock), flags); /* Reset UART */ outb(0, iobase + UART_MCR); /* Turn off interrupts */ outb(0, iobase + UART_IER); spin_unlock_irqrestore(&(info->lock), flags); hci_unregister_dev(hdev); hci_free_dev(hdev); return 0; } static int dtl1_probe(struct pcmcia_device *link) { struct dtl1_info *info; /* Create new info device */ info = devm_kzalloc(&link->dev, sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; info->p_dev = link; link->priv = info; link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_IO; return dtl1_config(link); } static void dtl1_detach(struct pcmcia_device *link) { struct dtl1_info *info = link->priv; dtl1_close(info); pcmcia_disable_device(link); } static int dtl1_confcheck(struct pcmcia_device *p_dev, void *priv_data) { if ((p_dev->resource[1]->end) || (p_dev->resource[1]->end < 8)) return -ENODEV; p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; return pcmcia_request_io(p_dev); } static int dtl1_config(struct pcmcia_device *link) { struct dtl1_info *info = link->priv; int ret; /* Look for a generic full-sized window */ link->resource[0]->end = 8; ret = pcmcia_loop_config(link, dtl1_confcheck, NULL); if (ret) goto failed; ret = pcmcia_request_irq(link, dtl1_interrupt); if (ret) goto failed; ret = pcmcia_enable_device(link); if (ret) goto failed; ret = dtl1_open(info); if (ret) goto failed; return 0; failed: dtl1_detach(link); return ret; } static const struct pcmcia_device_id dtl1_ids[] = { PCMCIA_DEVICE_PROD_ID12("Nokia Mobile Phones", "DTL-1", 0xe1bfdd64, 0xe168480d), PCMCIA_DEVICE_PROD_ID12("Nokia Mobile Phones", "DTL-4", 0xe1bfdd64, 0x9102bc82), PCMCIA_DEVICE_PROD_ID12("Socket", "CF", 0xb38bcc2e, 0x44ebf863), PCMCIA_DEVICE_PROD_ID12("Socket", "CF+ Personal Network Card", 0xb38bcc2e, 0xe732bae3), PCMCIA_DEVICE_NULL }; MODULE_DEVICE_TABLE(pcmcia, dtl1_ids); static struct pcmcia_driver dtl1_driver = { .owner = THIS_MODULE, .name = "dtl1_cs", .probe = dtl1_probe, .remove = dtl1_detach, .id_table = dtl1_ids, }; module_pcmcia_driver(dtl1_driver);
linux-master
drivers/bluetooth/dtl1_cs.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2016, Linaro Ltd. * Copyright (c) 2015, Sony Mobile Communications Inc. */ #include <linux/module.h> #include <linux/slab.h> #include <linux/rpmsg.h> #include <linux/of.h> #include <linux/soc/qcom/wcnss_ctrl.h> #include <linux/platform_device.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include "btqca.h" struct btqcomsmd { struct hci_dev *hdev; struct rpmsg_endpoint *acl_channel; struct rpmsg_endpoint *cmd_channel; }; static int btqcomsmd_recv(struct hci_dev *hdev, unsigned int type, const void *data, size_t count) { struct sk_buff *skb; /* Use GFP_ATOMIC as we're in IRQ context */ skb = bt_skb_alloc(count, GFP_ATOMIC); if (!skb) { hdev->stat.err_rx++; return -ENOMEM; } hci_skb_pkt_type(skb) = type; skb_put_data(skb, data, count); return hci_recv_frame(hdev, skb); } static int btqcomsmd_acl_callback(struct rpmsg_device *rpdev, void *data, int count, void *priv, u32 addr) { struct btqcomsmd *btq = priv; btq->hdev->stat.byte_rx += count; return btqcomsmd_recv(btq->hdev, HCI_ACLDATA_PKT, data, count); } static int btqcomsmd_cmd_callback(struct rpmsg_device *rpdev, void *data, int count, void *priv, u32 addr) { struct btqcomsmd *btq = priv; btq->hdev->stat.byte_rx += count; return btqcomsmd_recv(btq->hdev, HCI_EVENT_PKT, data, count); } static int btqcomsmd_send(struct hci_dev *hdev, struct sk_buff *skb) { struct btqcomsmd *btq = hci_get_drvdata(hdev); int ret; switch (hci_skb_pkt_type(skb)) { case HCI_ACLDATA_PKT: ret = rpmsg_send(btq->acl_channel, skb->data, skb->len); if (ret) { hdev->stat.err_tx++; break; } hdev->stat.acl_tx++; hdev->stat.byte_tx += skb->len; break; case HCI_COMMAND_PKT: ret = rpmsg_send(btq->cmd_channel, skb->data, skb->len); if (ret) { hdev->stat.err_tx++; break; } hdev->stat.cmd_tx++; hdev->stat.byte_tx += skb->len; break; default: ret = -EILSEQ; break; } if (!ret) kfree_skb(skb); return ret; } static int btqcomsmd_open(struct hci_dev *hdev) { return 0; } static int btqcomsmd_close(struct hci_dev *hdev) { return 0; } static int btqcomsmd_setup(struct hci_dev *hdev) { struct sk_buff *skb; skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) return PTR_ERR(skb); kfree_skb(skb); /* Devices do not have persistent storage for BD address. Retrieve * it from the firmware node property. */ set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks); return 0; } static int btqcomsmd_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr) { int ret; ret = qca_set_bdaddr_rome(hdev, bdaddr); if (ret) return ret; /* The firmware stops responding for a while after setting the bdaddr, * causing timeouts for subsequent commands. Sleep a bit to avoid this. */ usleep_range(1000, 10000); return 0; } static int btqcomsmd_probe(struct platform_device *pdev) { struct btqcomsmd *btq; struct hci_dev *hdev; void *wcnss; int ret; btq = devm_kzalloc(&pdev->dev, sizeof(*btq), GFP_KERNEL); if (!btq) return -ENOMEM; wcnss = dev_get_drvdata(pdev->dev.parent); btq->acl_channel = qcom_wcnss_open_channel(wcnss, "APPS_RIVA_BT_ACL", btqcomsmd_acl_callback, btq); if (IS_ERR(btq->acl_channel)) return PTR_ERR(btq->acl_channel); btq->cmd_channel = qcom_wcnss_open_channel(wcnss, "APPS_RIVA_BT_CMD", btqcomsmd_cmd_callback, btq); if (IS_ERR(btq->cmd_channel)) { ret = PTR_ERR(btq->cmd_channel); goto destroy_acl_channel; } hdev = hci_alloc_dev(); if (!hdev) { ret = -ENOMEM; goto destroy_cmd_channel; } hci_set_drvdata(hdev, btq); btq->hdev = hdev; SET_HCIDEV_DEV(hdev, &pdev->dev); hdev->bus = HCI_SMD; hdev->open = btqcomsmd_open; hdev->close = btqcomsmd_close; hdev->send = btqcomsmd_send; hdev->setup = btqcomsmd_setup; hdev->set_bdaddr = btqcomsmd_set_bdaddr; ret = hci_register_dev(hdev); if (ret < 0) goto hci_free_dev; platform_set_drvdata(pdev, btq); return 0; hci_free_dev: hci_free_dev(hdev); destroy_cmd_channel: rpmsg_destroy_ept(btq->cmd_channel); destroy_acl_channel: rpmsg_destroy_ept(btq->acl_channel); return ret; } static int btqcomsmd_remove(struct platform_device *pdev) { struct btqcomsmd *btq = platform_get_drvdata(pdev); hci_unregister_dev(btq->hdev); hci_free_dev(btq->hdev); rpmsg_destroy_ept(btq->cmd_channel); rpmsg_destroy_ept(btq->acl_channel); return 0; } static const struct of_device_id btqcomsmd_of_match[] = { { .compatible = "qcom,wcnss-bt", }, { }, }; MODULE_DEVICE_TABLE(of, btqcomsmd_of_match); static struct platform_driver btqcomsmd_driver = { .probe = btqcomsmd_probe, .remove = btqcomsmd_remove, .driver = { .name = "btqcomsmd", .of_match_table = btqcomsmd_of_match, }, }; module_platform_driver(btqcomsmd_driver); MODULE_AUTHOR("Bjorn Andersson <[email protected]>"); MODULE_DESCRIPTION("Qualcomm SMD HCI driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/bluetooth/btqcomsmd.c
/* * * Driver for the 3Com Bluetooth PCMCIA card * * Copyright (C) 2001-2002 Marcel Holtmann <[email protected]> * Jose Orlando Pereira <[email protected]> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation; * * Software distributed under the License is distributed on an "AS * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or * implied. See the License for the specific language governing * rights and limitations under the License. * * The initial developer of the original code is David A. Hinds * <[email protected]>. Portions created by David A. Hinds * are Copyright (C) 1999 David A. Hinds. All Rights Reserved. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/ptrace.h> #include <linux/ioport.h> #include <linux/spinlock.h> #include <linux/moduleparam.h> #include <linux/skbuff.h> #include <linux/string.h> #include <linux/serial.h> #include <linux/serial_reg.h> #include <linux/bitops.h> #include <asm/io.h> #include <linux/device.h> #include <linux/firmware.h> #include <pcmcia/cistpl.h> #include <pcmcia/ciscode.h> #include <pcmcia/ds.h> #include <pcmcia/cisreg.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> /* ======================== Module parameters ======================== */ MODULE_AUTHOR("Marcel Holtmann <[email protected]>"); MODULE_DESCRIPTION("Bluetooth driver for the 3Com Bluetooth PCMCIA card"); MODULE_LICENSE("GPL"); MODULE_FIRMWARE("BT3CPCC.bin"); /* ======================== Local structures ======================== */ struct bt3c_info { struct pcmcia_device *p_dev; struct hci_dev *hdev; spinlock_t lock; /* For serializing operations */ struct sk_buff_head txq; unsigned long tx_state; unsigned long rx_state; unsigned long rx_count; struct sk_buff *rx_skb; }; static int bt3c_config(struct pcmcia_device *link); static void bt3c_release(struct pcmcia_device *link); static void bt3c_detach(struct pcmcia_device *p_dev); /* Transmit states */ #define XMIT_SENDING 1 #define XMIT_WAKEUP 2 #define XMIT_WAITING 8 /* Receiver states */ #define RECV_WAIT_PACKET_TYPE 0 #define RECV_WAIT_EVENT_HEADER 1 #define RECV_WAIT_ACL_HEADER 2 #define RECV_WAIT_SCO_HEADER 3 #define RECV_WAIT_DATA 4 /* ======================== Special I/O functions ======================== */ #define DATA_L 0 #define DATA_H 1 #define ADDR_L 2 #define ADDR_H 3 #define CONTROL 4 static inline void bt3c_address(unsigned int iobase, unsigned short addr) { outb(addr & 0xff, iobase + ADDR_L); outb((addr >> 8) & 0xff, iobase + ADDR_H); } static inline void bt3c_put(unsigned int iobase, unsigned short value) { outb(value & 0xff, iobase + DATA_L); outb((value >> 8) & 0xff, iobase + DATA_H); } static inline void bt3c_io_write(unsigned int iobase, unsigned short addr, unsigned short value) { bt3c_address(iobase, addr); bt3c_put(iobase, value); } static inline unsigned short bt3c_get(unsigned int iobase) { unsigned short value = inb(iobase + DATA_L); value |= inb(iobase + DATA_H) << 8; return value; } static inline unsigned short bt3c_read(unsigned int iobase, unsigned short addr) { bt3c_address(iobase, addr); return bt3c_get(iobase); } /* ======================== Interrupt handling ======================== */ static int bt3c_write(unsigned int iobase, int fifo_size, __u8 *buf, int len) { int actual = 0; bt3c_address(iobase, 0x7080); /* Fill FIFO with current frame */ while (actual < len) { /* Transmit next byte */ bt3c_put(iobase, buf[actual]); actual++; } bt3c_io_write(iobase, 0x7005, actual); return actual; } static void bt3c_write_wakeup(struct bt3c_info *info) { if (!info) { BT_ERR("Unknown device"); return; } if (test_and_set_bit(XMIT_SENDING, &(info->tx_state))) return; do { unsigned int iobase = info->p_dev->resource[0]->start; register struct sk_buff *skb; int len; if (!pcmcia_dev_present(info->p_dev)) break; skb = skb_dequeue(&(info->txq)); if (!skb) { clear_bit(XMIT_SENDING, &(info->tx_state)); break; } /* Send frame */ len = bt3c_write(iobase, 256, skb->data, skb->len); if (len != skb->len) BT_ERR("Very strange"); kfree_skb(skb); info->hdev->stat.byte_tx += len; } while (0); } static void bt3c_receive(struct bt3c_info *info) { unsigned int iobase; int size = 0, avail; if (!info) { BT_ERR("Unknown device"); return; } iobase = info->p_dev->resource[0]->start; avail = bt3c_read(iobase, 0x7006); bt3c_address(iobase, 0x7480); while (size < avail) { size++; info->hdev->stat.byte_rx++; /* Allocate packet */ if (!info->rx_skb) { info->rx_state = RECV_WAIT_PACKET_TYPE; info->rx_count = 0; info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC); if (!info->rx_skb) { BT_ERR("Can't allocate mem for new packet"); return; } } if (info->rx_state == RECV_WAIT_PACKET_TYPE) { hci_skb_pkt_type(info->rx_skb) = inb(iobase + DATA_L); inb(iobase + DATA_H); switch (hci_skb_pkt_type(info->rx_skb)) { case HCI_EVENT_PKT: info->rx_state = RECV_WAIT_EVENT_HEADER; info->rx_count = HCI_EVENT_HDR_SIZE; break; case HCI_ACLDATA_PKT: info->rx_state = RECV_WAIT_ACL_HEADER; info->rx_count = HCI_ACL_HDR_SIZE; break; case HCI_SCODATA_PKT: info->rx_state = RECV_WAIT_SCO_HEADER; info->rx_count = HCI_SCO_HDR_SIZE; break; default: /* Unknown packet */ BT_ERR("Unknown HCI packet with type 0x%02x received", hci_skb_pkt_type(info->rx_skb)); info->hdev->stat.err_rx++; kfree_skb(info->rx_skb); info->rx_skb = NULL; break; } } else { __u8 x = inb(iobase + DATA_L); skb_put_u8(info->rx_skb, x); inb(iobase + DATA_H); info->rx_count--; if (info->rx_count == 0) { int dlen; struct hci_event_hdr *eh; struct hci_acl_hdr *ah; struct hci_sco_hdr *sh; switch (info->rx_state) { case RECV_WAIT_EVENT_HEADER: eh = hci_event_hdr(info->rx_skb); info->rx_state = RECV_WAIT_DATA; info->rx_count = eh->plen; break; case RECV_WAIT_ACL_HEADER: ah = hci_acl_hdr(info->rx_skb); dlen = __le16_to_cpu(ah->dlen); info->rx_state = RECV_WAIT_DATA; info->rx_count = dlen; break; case RECV_WAIT_SCO_HEADER: sh = hci_sco_hdr(info->rx_skb); info->rx_state = RECV_WAIT_DATA; info->rx_count = sh->dlen; break; case RECV_WAIT_DATA: hci_recv_frame(info->hdev, info->rx_skb); info->rx_skb = NULL; break; } } } } bt3c_io_write(iobase, 0x7006, 0x0000); } static irqreturn_t bt3c_interrupt(int irq, void *dev_inst) { struct bt3c_info *info = dev_inst; unsigned int iobase; int iir; irqreturn_t r = IRQ_NONE; if (!info || !info->hdev) /* our irq handler is shared */ return IRQ_NONE; iobase = info->p_dev->resource[0]->start; spin_lock(&(info->lock)); iir = inb(iobase + CONTROL); if (iir & 0x80) { int stat = bt3c_read(iobase, 0x7001); if ((stat & 0xff) == 0x7f) { BT_ERR("Very strange (stat=0x%04x)", stat); } else if ((stat & 0xff) != 0xff) { if (stat & 0x0020) { int status = bt3c_read(iobase, 0x7002) & 0x10; bt_dev_info(info->hdev, "Antenna %s", status ? "out" : "in"); } if (stat & 0x0001) bt3c_receive(info); if (stat & 0x0002) { clear_bit(XMIT_SENDING, &(info->tx_state)); bt3c_write_wakeup(info); } bt3c_io_write(iobase, 0x7001, 0x0000); outb(iir, iobase + CONTROL); } r = IRQ_HANDLED; } spin_unlock(&(info->lock)); return r; } /* ======================== HCI interface ======================== */ static int bt3c_hci_flush(struct hci_dev *hdev) { struct bt3c_info *info = hci_get_drvdata(hdev); /* Drop TX queue */ skb_queue_purge(&(info->txq)); return 0; } static int bt3c_hci_open(struct hci_dev *hdev) { return 0; } static int bt3c_hci_close(struct hci_dev *hdev) { bt3c_hci_flush(hdev); return 0; } static int bt3c_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) { struct bt3c_info *info = hci_get_drvdata(hdev); unsigned long flags; switch (hci_skb_pkt_type(skb)) { case HCI_COMMAND_PKT: hdev->stat.cmd_tx++; break; case HCI_ACLDATA_PKT: hdev->stat.acl_tx++; break; case HCI_SCODATA_PKT: hdev->stat.sco_tx++; break; } /* Prepend skb with frame type */ memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1); skb_queue_tail(&(info->txq), skb); spin_lock_irqsave(&(info->lock), flags); bt3c_write_wakeup(info); spin_unlock_irqrestore(&(info->lock), flags); return 0; } /* ======================== Card services HCI interaction ======================== */ static int bt3c_load_firmware(struct bt3c_info *info, const unsigned char *firmware, int count) { char *ptr = (char *) firmware; char b[9]; unsigned int iobase, tmp, tn; unsigned long size, addr, fcs; int i, err = 0; iobase = info->p_dev->resource[0]->start; /* Reset */ bt3c_io_write(iobase, 0x8040, 0x0404); bt3c_io_write(iobase, 0x8040, 0x0400); udelay(1); bt3c_io_write(iobase, 0x8040, 0x0404); udelay(17); /* Load */ while (count) { if (ptr[0] != 'S') { BT_ERR("Bad address in firmware"); err = -EFAULT; goto error; } memset(b, 0, sizeof(b)); memcpy(b, ptr + 2, 2); if (kstrtoul(b, 16, &size) < 0) return -EINVAL; memset(b, 0, sizeof(b)); memcpy(b, ptr + 4, 8); if (kstrtoul(b, 16, &addr) < 0) return -EINVAL; memset(b, 0, sizeof(b)); memcpy(b, ptr + (size * 2) + 2, 2); if (kstrtoul(b, 16, &fcs) < 0) return -EINVAL; memset(b, 0, sizeof(b)); for (tmp = 0, i = 0; i < size; i++) { memcpy(b, ptr + (i * 2) + 2, 2); if (kstrtouint(b, 16, &tn)) return -EINVAL; tmp += tn; } if (((tmp + fcs) & 0xff) != 0xff) { BT_ERR("Checksum error in firmware"); err = -EILSEQ; goto error; } if (ptr[1] == '3') { bt3c_address(iobase, addr); memset(b, 0, sizeof(b)); for (i = 0; i < (size - 4) / 2; i++) { memcpy(b, ptr + (i * 4) + 12, 4); if (kstrtouint(b, 16, &tmp)) return -EINVAL; bt3c_put(iobase, tmp); } } ptr += (size * 2) + 6; count -= (size * 2) + 6; } udelay(17); /* Boot */ bt3c_address(iobase, 0x3000); outb(inb(iobase + CONTROL) | 0x40, iobase + CONTROL); error: udelay(17); /* Clear */ bt3c_io_write(iobase, 0x7006, 0x0000); bt3c_io_write(iobase, 0x7005, 0x0000); bt3c_io_write(iobase, 0x7001, 0x0000); return err; } static int bt3c_open(struct bt3c_info *info) { const struct firmware *firmware; struct hci_dev *hdev; int err; spin_lock_init(&(info->lock)); skb_queue_head_init(&(info->txq)); info->rx_state = RECV_WAIT_PACKET_TYPE; info->rx_count = 0; info->rx_skb = NULL; /* Initialize HCI device */ hdev = hci_alloc_dev(); if (!hdev) { BT_ERR("Can't allocate HCI device"); return -ENOMEM; } info->hdev = hdev; hdev->bus = HCI_PCCARD; hci_set_drvdata(hdev, info); SET_HCIDEV_DEV(hdev, &info->p_dev->dev); hdev->open = bt3c_hci_open; hdev->close = bt3c_hci_close; hdev->flush = bt3c_hci_flush; hdev->send = bt3c_hci_send_frame; /* Load firmware */ err = request_firmware(&firmware, "BT3CPCC.bin", &info->p_dev->dev); if (err < 0) { BT_ERR("Firmware request failed"); goto error; } err = bt3c_load_firmware(info, firmware->data, firmware->size); release_firmware(firmware); if (err < 0) { BT_ERR("Firmware loading failed"); goto error; } /* Timeout before it is safe to send the first HCI packet */ msleep(1000); /* Register HCI device */ err = hci_register_dev(hdev); if (err < 0) { BT_ERR("Can't register HCI device"); goto error; } return 0; error: info->hdev = NULL; hci_free_dev(hdev); return err; } static int bt3c_close(struct bt3c_info *info) { struct hci_dev *hdev = info->hdev; if (!hdev) return -ENODEV; bt3c_hci_close(hdev); hci_unregister_dev(hdev); hci_free_dev(hdev); return 0; } static int bt3c_probe(struct pcmcia_device *link) { struct bt3c_info *info; /* Create new info device */ info = devm_kzalloc(&link->dev, sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; info->p_dev = link; link->priv = info; link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_VPP | CONF_AUTO_SET_IO; return bt3c_config(link); } static void bt3c_detach(struct pcmcia_device *link) { bt3c_release(link); } static int bt3c_check_config(struct pcmcia_device *p_dev, void *priv_data) { int *try = priv_data; if (!try) p_dev->io_lines = 16; if ((p_dev->resource[0]->end != 8) || (p_dev->resource[0]->start == 0)) return -EINVAL; p_dev->resource[0]->end = 8; p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; return pcmcia_request_io(p_dev); } static int bt3c_check_config_notpicky(struct pcmcia_device *p_dev, void *priv_data) { static unsigned int base[5] = { 0x3f8, 0x2f8, 0x3e8, 0x2e8, 0x0 }; int j; if (p_dev->io_lines > 3) return -ENODEV; p_dev->resource[0]->flags &= ~IO_DATA_PATH_WIDTH; p_dev->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; p_dev->resource[0]->end = 8; for (j = 0; j < 5; j++) { p_dev->resource[0]->start = base[j]; p_dev->io_lines = base[j] ? 16 : 3; if (!pcmcia_request_io(p_dev)) return 0; } return -ENODEV; } static int bt3c_config(struct pcmcia_device *link) { struct bt3c_info *info = link->priv; int i; unsigned long try; /* First pass: look for a config entry that looks normal. * Two tries: without IO aliases, then with aliases */ for (try = 0; try < 2; try++) if (!pcmcia_loop_config(link, bt3c_check_config, (void *) try)) goto found_port; /* Second pass: try to find an entry that isn't picky about * its base address, then try to grab any standard serial port * address, and finally try to get any free port. */ if (!pcmcia_loop_config(link, bt3c_check_config_notpicky, NULL)) goto found_port; BT_ERR("No usable port range found"); goto failed; found_port: i = pcmcia_request_irq(link, &bt3c_interrupt); if (i != 0) goto failed; i = pcmcia_enable_device(link); if (i != 0) goto failed; if (bt3c_open(info) != 0) goto failed; return 0; failed: bt3c_release(link); return -ENODEV; } static void bt3c_release(struct pcmcia_device *link) { struct bt3c_info *info = link->priv; bt3c_close(info); pcmcia_disable_device(link); } static const struct pcmcia_device_id bt3c_ids[] = { PCMCIA_DEVICE_PROD_ID13("3COM", "Bluetooth PC Card", 0xefce0a31, 0xd4ce9b02), PCMCIA_DEVICE_NULL }; MODULE_DEVICE_TABLE(pcmcia, bt3c_ids); static struct pcmcia_driver bt3c_driver = { .owner = THIS_MODULE, .name = "bt3c_cs", .probe = bt3c_probe, .remove = bt3c_detach, .id_table = bt3c_ids, }; module_pcmcia_driver(bt3c_driver);
linux-master
drivers/bluetooth/bt3c_cs.c
/* * * Bluetooth driver for the Anycom BlueCard (LSE039/LSE041) * * Copyright (C) 2001-2002 Marcel Holtmann <[email protected]> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation; * * Software distributed under the License is distributed on an "AS * IS" basis, WITHOUT WARRANTY OF ANY KIND, either express or * implied. See the License for the specific language governing * rights and limitations under the License. * * The initial developer of the original code is David A. Hinds * <[email protected]>. Portions created by David A. Hinds * are Copyright (C) 1999 David A. Hinds. All Rights Reserved. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/sched.h> #include <linux/delay.h> #include <linux/timer.h> #include <linux/errno.h> #include <linux/ptrace.h> #include <linux/ioport.h> #include <linux/spinlock.h> #include <linux/moduleparam.h> #include <linux/wait.h> #include <linux/skbuff.h> #include <linux/io.h> #include <pcmcia/cistpl.h> #include <pcmcia/ciscode.h> #include <pcmcia/ds.h> #include <pcmcia/cisreg.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> /* ======================== Module parameters ======================== */ MODULE_AUTHOR("Marcel Holtmann <[email protected]>"); MODULE_DESCRIPTION("Bluetooth driver for the Anycom BlueCard (LSE039/LSE041)"); MODULE_LICENSE("GPL"); /* ======================== Local structures ======================== */ struct bluecard_info { struct pcmcia_device *p_dev; struct hci_dev *hdev; spinlock_t lock; /* For serializing operations */ struct timer_list timer; /* For LED control */ struct sk_buff_head txq; unsigned long tx_state; unsigned long rx_state; unsigned long rx_count; struct sk_buff *rx_skb; unsigned char ctrl_reg; unsigned long hw_state; /* Status of the hardware and LED control */ }; static int bluecard_config(struct pcmcia_device *link); static void bluecard_release(struct pcmcia_device *link); static void bluecard_detach(struct pcmcia_device *p_dev); /* Default baud rate: 57600, 115200, 230400 or 460800 */ #define DEFAULT_BAUD_RATE 230400 /* Hardware states */ #define CARD_READY 1 #define CARD_ACTIVITY 2 #define CARD_HAS_PCCARD_ID 4 #define CARD_HAS_POWER_LED 5 #define CARD_HAS_ACTIVITY_LED 6 /* Transmit states */ #define XMIT_SENDING 1 #define XMIT_WAKEUP 2 #define XMIT_BUFFER_NUMBER 5 /* unset = buffer one, set = buffer two */ #define XMIT_BUF_ONE_READY 6 #define XMIT_BUF_TWO_READY 7 #define XMIT_SENDING_READY 8 /* Receiver states */ #define RECV_WAIT_PACKET_TYPE 0 #define RECV_WAIT_EVENT_HEADER 1 #define RECV_WAIT_ACL_HEADER 2 #define RECV_WAIT_SCO_HEADER 3 #define RECV_WAIT_DATA 4 /* Special packet types */ #define PKT_BAUD_RATE_57600 0x80 #define PKT_BAUD_RATE_115200 0x81 #define PKT_BAUD_RATE_230400 0x82 #define PKT_BAUD_RATE_460800 0x83 /* These are the register offsets */ #define REG_COMMAND 0x20 #define REG_INTERRUPT 0x21 #define REG_CONTROL 0x22 #define REG_RX_CONTROL 0x24 #define REG_CARD_RESET 0x30 #define REG_LED_CTRL 0x30 /* REG_COMMAND */ #define REG_COMMAND_TX_BUF_ONE 0x01 #define REG_COMMAND_TX_BUF_TWO 0x02 #define REG_COMMAND_RX_BUF_ONE 0x04 #define REG_COMMAND_RX_BUF_TWO 0x08 #define REG_COMMAND_RX_WIN_ONE 0x00 #define REG_COMMAND_RX_WIN_TWO 0x10 /* REG_CONTROL */ #define REG_CONTROL_BAUD_RATE_57600 0x00 #define REG_CONTROL_BAUD_RATE_115200 0x01 #define REG_CONTROL_BAUD_RATE_230400 0x02 #define REG_CONTROL_BAUD_RATE_460800 0x03 #define REG_CONTROL_RTS 0x04 #define REG_CONTROL_BT_ON 0x08 #define REG_CONTROL_BT_RESET 0x10 #define REG_CONTROL_BT_RES_PU 0x20 #define REG_CONTROL_INTERRUPT 0x40 #define REG_CONTROL_CARD_RESET 0x80 /* REG_RX_CONTROL */ #define RTS_LEVEL_SHIFT_BITS 0x02 /* ======================== LED handling routines ======================== */ static void bluecard_activity_led_timeout(struct timer_list *t) { struct bluecard_info *info = from_timer(info, t, timer); unsigned int iobase = info->p_dev->resource[0]->start; if (test_bit(CARD_ACTIVITY, &(info->hw_state))) { /* leave LED in inactive state for HZ/10 for blink effect */ clear_bit(CARD_ACTIVITY, &(info->hw_state)); mod_timer(&(info->timer), jiffies + HZ / 10); } /* Disable activity LED, enable power LED */ outb(0x08 | 0x20, iobase + 0x30); } static void bluecard_enable_activity_led(struct bluecard_info *info) { unsigned int iobase = info->p_dev->resource[0]->start; /* don't disturb running blink timer */ if (timer_pending(&(info->timer))) return; set_bit(CARD_ACTIVITY, &(info->hw_state)); if (test_bit(CARD_HAS_ACTIVITY_LED, &(info->hw_state))) { /* Enable activity LED, keep power LED enabled */ outb(0x18 | 0x60, iobase + 0x30); } else { /* Disable power LED */ outb(0x00, iobase + 0x30); } /* Stop the LED after HZ/10 */ mod_timer(&(info->timer), jiffies + HZ / 10); } /* ======================== Interrupt handling ======================== */ static int bluecard_write(unsigned int iobase, unsigned int offset, __u8 *buf, int len) { int i, actual; actual = (len > 15) ? 15 : len; outb_p(actual, iobase + offset); for (i = 0; i < actual; i++) outb_p(buf[i], iobase + offset + i + 1); return actual; } static void bluecard_write_wakeup(struct bluecard_info *info) { if (!info) { BT_ERR("Unknown device"); return; } if (!test_bit(XMIT_SENDING_READY, &(info->tx_state))) return; if (test_and_set_bit(XMIT_SENDING, &(info->tx_state))) { set_bit(XMIT_WAKEUP, &(info->tx_state)); return; } do { unsigned int iobase = info->p_dev->resource[0]->start; unsigned int offset; unsigned char command; unsigned long ready_bit; register struct sk_buff *skb; int len; clear_bit(XMIT_WAKEUP, &(info->tx_state)); if (!pcmcia_dev_present(info->p_dev)) return; if (test_bit(XMIT_BUFFER_NUMBER, &(info->tx_state))) { if (!test_bit(XMIT_BUF_TWO_READY, &(info->tx_state))) break; offset = 0x10; command = REG_COMMAND_TX_BUF_TWO; ready_bit = XMIT_BUF_TWO_READY; } else { if (!test_bit(XMIT_BUF_ONE_READY, &(info->tx_state))) break; offset = 0x00; command = REG_COMMAND_TX_BUF_ONE; ready_bit = XMIT_BUF_ONE_READY; } skb = skb_dequeue(&(info->txq)); if (!skb) break; if (hci_skb_pkt_type(skb) & 0x80) { /* Disable RTS */ info->ctrl_reg |= REG_CONTROL_RTS; outb(info->ctrl_reg, iobase + REG_CONTROL); } /* Activate LED */ bluecard_enable_activity_led(info); /* Send frame */ len = bluecard_write(iobase, offset, skb->data, skb->len); /* Tell the FPGA to send the data */ outb_p(command, iobase + REG_COMMAND); /* Mark the buffer as dirty */ clear_bit(ready_bit, &(info->tx_state)); if (hci_skb_pkt_type(skb) & 0x80) { DECLARE_WAIT_QUEUE_HEAD_ONSTACK(wq); DEFINE_WAIT(wait); unsigned char baud_reg; switch (hci_skb_pkt_type(skb)) { case PKT_BAUD_RATE_460800: baud_reg = REG_CONTROL_BAUD_RATE_460800; break; case PKT_BAUD_RATE_230400: baud_reg = REG_CONTROL_BAUD_RATE_230400; break; case PKT_BAUD_RATE_115200: baud_reg = REG_CONTROL_BAUD_RATE_115200; break; case PKT_BAUD_RATE_57600: default: baud_reg = REG_CONTROL_BAUD_RATE_57600; break; } /* Wait until the command reaches the baseband */ mdelay(100); /* Set baud on baseband */ info->ctrl_reg &= ~0x03; info->ctrl_reg |= baud_reg; outb(info->ctrl_reg, iobase + REG_CONTROL); /* Enable RTS */ info->ctrl_reg &= ~REG_CONTROL_RTS; outb(info->ctrl_reg, iobase + REG_CONTROL); /* Wait before the next HCI packet can be send */ mdelay(1000); } if (len == skb->len) { kfree_skb(skb); } else { skb_pull(skb, len); skb_queue_head(&(info->txq), skb); } info->hdev->stat.byte_tx += len; /* Change buffer */ change_bit(XMIT_BUFFER_NUMBER, &(info->tx_state)); } while (test_bit(XMIT_WAKEUP, &(info->tx_state))); clear_bit(XMIT_SENDING, &(info->tx_state)); } static int bluecard_read(unsigned int iobase, unsigned int offset, __u8 *buf, int size) { int i, n, len; outb(REG_COMMAND_RX_WIN_ONE, iobase + REG_COMMAND); len = inb(iobase + offset); n = 0; i = 1; while (n < len) { if (i == 16) { outb(REG_COMMAND_RX_WIN_TWO, iobase + REG_COMMAND); i = 0; } buf[n] = inb(iobase + offset + i); n++; i++; } return len; } static void bluecard_receive(struct bluecard_info *info, unsigned int offset) { unsigned int iobase; unsigned char buf[31]; int i, len; if (!info) { BT_ERR("Unknown device"); return; } iobase = info->p_dev->resource[0]->start; if (test_bit(XMIT_SENDING_READY, &(info->tx_state))) bluecard_enable_activity_led(info); len = bluecard_read(iobase, offset, buf, sizeof(buf)); for (i = 0; i < len; i++) { /* Allocate packet */ if (!info->rx_skb) { info->rx_state = RECV_WAIT_PACKET_TYPE; info->rx_count = 0; info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC); if (!info->rx_skb) { BT_ERR("Can't allocate mem for new packet"); return; } } if (info->rx_state == RECV_WAIT_PACKET_TYPE) { hci_skb_pkt_type(info->rx_skb) = buf[i]; switch (hci_skb_pkt_type(info->rx_skb)) { case 0x00: /* init packet */ if (offset != 0x00) { set_bit(XMIT_BUF_ONE_READY, &(info->tx_state)); set_bit(XMIT_BUF_TWO_READY, &(info->tx_state)); set_bit(XMIT_SENDING_READY, &(info->tx_state)); bluecard_write_wakeup(info); } kfree_skb(info->rx_skb); info->rx_skb = NULL; break; case HCI_EVENT_PKT: info->rx_state = RECV_WAIT_EVENT_HEADER; info->rx_count = HCI_EVENT_HDR_SIZE; break; case HCI_ACLDATA_PKT: info->rx_state = RECV_WAIT_ACL_HEADER; info->rx_count = HCI_ACL_HDR_SIZE; break; case HCI_SCODATA_PKT: info->rx_state = RECV_WAIT_SCO_HEADER; info->rx_count = HCI_SCO_HDR_SIZE; break; default: /* unknown packet */ BT_ERR("Unknown HCI packet with type 0x%02x received", hci_skb_pkt_type(info->rx_skb)); info->hdev->stat.err_rx++; kfree_skb(info->rx_skb); info->rx_skb = NULL; break; } } else { skb_put_u8(info->rx_skb, buf[i]); info->rx_count--; if (info->rx_count == 0) { int dlen; struct hci_event_hdr *eh; struct hci_acl_hdr *ah; struct hci_sco_hdr *sh; switch (info->rx_state) { case RECV_WAIT_EVENT_HEADER: eh = hci_event_hdr(info->rx_skb); info->rx_state = RECV_WAIT_DATA; info->rx_count = eh->plen; break; case RECV_WAIT_ACL_HEADER: ah = hci_acl_hdr(info->rx_skb); dlen = __le16_to_cpu(ah->dlen); info->rx_state = RECV_WAIT_DATA; info->rx_count = dlen; break; case RECV_WAIT_SCO_HEADER: sh = hci_sco_hdr(info->rx_skb); info->rx_state = RECV_WAIT_DATA; info->rx_count = sh->dlen; break; case RECV_WAIT_DATA: hci_recv_frame(info->hdev, info->rx_skb); info->rx_skb = NULL; break; } } } } info->hdev->stat.byte_rx += len; } static irqreturn_t bluecard_interrupt(int irq, void *dev_inst) { struct bluecard_info *info = dev_inst; unsigned int iobase; unsigned char reg; if (!info || !info->hdev) /* our irq handler is shared */ return IRQ_NONE; if (!test_bit(CARD_READY, &(info->hw_state))) return IRQ_HANDLED; iobase = info->p_dev->resource[0]->start; spin_lock(&(info->lock)); /* Disable interrupt */ info->ctrl_reg &= ~REG_CONTROL_INTERRUPT; outb(info->ctrl_reg, iobase + REG_CONTROL); reg = inb(iobase + REG_INTERRUPT); if ((reg != 0x00) && (reg != 0xff)) { if (reg & 0x04) { bluecard_receive(info, 0x00); outb(0x04, iobase + REG_INTERRUPT); outb(REG_COMMAND_RX_BUF_ONE, iobase + REG_COMMAND); } if (reg & 0x08) { bluecard_receive(info, 0x10); outb(0x08, iobase + REG_INTERRUPT); outb(REG_COMMAND_RX_BUF_TWO, iobase + REG_COMMAND); } if (reg & 0x01) { set_bit(XMIT_BUF_ONE_READY, &(info->tx_state)); outb(0x01, iobase + REG_INTERRUPT); bluecard_write_wakeup(info); } if (reg & 0x02) { set_bit(XMIT_BUF_TWO_READY, &(info->tx_state)); outb(0x02, iobase + REG_INTERRUPT); bluecard_write_wakeup(info); } } /* Enable interrupt */ info->ctrl_reg |= REG_CONTROL_INTERRUPT; outb(info->ctrl_reg, iobase + REG_CONTROL); spin_unlock(&(info->lock)); return IRQ_HANDLED; } /* ======================== Device specific HCI commands ======================== */ static int bluecard_hci_set_baud_rate(struct hci_dev *hdev, int baud) { struct bluecard_info *info = hci_get_drvdata(hdev); struct sk_buff *skb; /* Ericsson baud rate command */ unsigned char cmd[] = { HCI_COMMAND_PKT, 0x09, 0xfc, 0x01, 0x03 }; skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_KERNEL); if (!skb) { BT_ERR("Can't allocate mem for new packet"); return -1; } switch (baud) { case 460800: cmd[4] = 0x00; hci_skb_pkt_type(skb) = PKT_BAUD_RATE_460800; break; case 230400: cmd[4] = 0x01; hci_skb_pkt_type(skb) = PKT_BAUD_RATE_230400; break; case 115200: cmd[4] = 0x02; hci_skb_pkt_type(skb) = PKT_BAUD_RATE_115200; break; case 57600: default: cmd[4] = 0x03; hci_skb_pkt_type(skb) = PKT_BAUD_RATE_57600; break; } skb_put_data(skb, cmd, sizeof(cmd)); skb_queue_tail(&(info->txq), skb); bluecard_write_wakeup(info); return 0; } /* ======================== HCI interface ======================== */ static int bluecard_hci_flush(struct hci_dev *hdev) { struct bluecard_info *info = hci_get_drvdata(hdev); /* Drop TX queue */ skb_queue_purge(&(info->txq)); return 0; } static int bluecard_hci_open(struct hci_dev *hdev) { struct bluecard_info *info = hci_get_drvdata(hdev); unsigned int iobase = info->p_dev->resource[0]->start; if (test_bit(CARD_HAS_PCCARD_ID, &(info->hw_state))) bluecard_hci_set_baud_rate(hdev, DEFAULT_BAUD_RATE); /* Enable power LED */ outb(0x08 | 0x20, iobase + 0x30); return 0; } static int bluecard_hci_close(struct hci_dev *hdev) { struct bluecard_info *info = hci_get_drvdata(hdev); unsigned int iobase = info->p_dev->resource[0]->start; bluecard_hci_flush(hdev); /* Stop LED timer */ del_timer_sync(&(info->timer)); /* Disable power LED */ outb(0x00, iobase + 0x30); return 0; } static int bluecard_hci_send_frame(struct hci_dev *hdev, struct sk_buff *skb) { struct bluecard_info *info = hci_get_drvdata(hdev); switch (hci_skb_pkt_type(skb)) { case HCI_COMMAND_PKT: hdev->stat.cmd_tx++; break; case HCI_ACLDATA_PKT: hdev->stat.acl_tx++; break; case HCI_SCODATA_PKT: hdev->stat.sco_tx++; break; } /* Prepend skb with frame type */ memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1); skb_queue_tail(&(info->txq), skb); bluecard_write_wakeup(info); return 0; } /* ======================== Card services HCI interaction ======================== */ static int bluecard_open(struct bluecard_info *info) { unsigned int iobase = info->p_dev->resource[0]->start; struct hci_dev *hdev; unsigned char id; spin_lock_init(&(info->lock)); timer_setup(&info->timer, bluecard_activity_led_timeout, 0); skb_queue_head_init(&(info->txq)); info->rx_state = RECV_WAIT_PACKET_TYPE; info->rx_count = 0; info->rx_skb = NULL; /* Initialize HCI device */ hdev = hci_alloc_dev(); if (!hdev) { BT_ERR("Can't allocate HCI device"); return -ENOMEM; } info->hdev = hdev; hdev->bus = HCI_PCCARD; hci_set_drvdata(hdev, info); SET_HCIDEV_DEV(hdev, &info->p_dev->dev); hdev->open = bluecard_hci_open; hdev->close = bluecard_hci_close; hdev->flush = bluecard_hci_flush; hdev->send = bluecard_hci_send_frame; id = inb(iobase + 0x30); if ((id & 0x0f) == 0x02) set_bit(CARD_HAS_PCCARD_ID, &(info->hw_state)); if (id & 0x10) set_bit(CARD_HAS_POWER_LED, &(info->hw_state)); if (id & 0x20) set_bit(CARD_HAS_ACTIVITY_LED, &(info->hw_state)); /* Reset card */ info->ctrl_reg = REG_CONTROL_BT_RESET | REG_CONTROL_CARD_RESET; outb(info->ctrl_reg, iobase + REG_CONTROL); /* Turn FPGA off */ outb(0x80, iobase + 0x30); /* Wait some time */ msleep(10); /* Turn FPGA on */ outb(0x00, iobase + 0x30); /* Activate card */ info->ctrl_reg = REG_CONTROL_BT_ON | REG_CONTROL_BT_RES_PU; outb(info->ctrl_reg, iobase + REG_CONTROL); /* Enable interrupt */ outb(0xff, iobase + REG_INTERRUPT); info->ctrl_reg |= REG_CONTROL_INTERRUPT; outb(info->ctrl_reg, iobase + REG_CONTROL); if ((id & 0x0f) == 0x03) { /* Disable RTS */ info->ctrl_reg |= REG_CONTROL_RTS; outb(info->ctrl_reg, iobase + REG_CONTROL); /* Set baud rate */ info->ctrl_reg |= 0x03; outb(info->ctrl_reg, iobase + REG_CONTROL); /* Enable RTS */ info->ctrl_reg &= ~REG_CONTROL_RTS; outb(info->ctrl_reg, iobase + REG_CONTROL); set_bit(XMIT_BUF_ONE_READY, &(info->tx_state)); set_bit(XMIT_BUF_TWO_READY, &(info->tx_state)); set_bit(XMIT_SENDING_READY, &(info->tx_state)); } /* Start the RX buffers */ outb(REG_COMMAND_RX_BUF_ONE, iobase + REG_COMMAND); outb(REG_COMMAND_RX_BUF_TWO, iobase + REG_COMMAND); /* Signal that the hardware is ready */ set_bit(CARD_READY, &(info->hw_state)); /* Drop TX queue */ skb_queue_purge(&(info->txq)); /* Control the point at which RTS is enabled */ outb((0x0f << RTS_LEVEL_SHIFT_BITS) | 1, iobase + REG_RX_CONTROL); /* Timeout before it is safe to send the first HCI packet */ msleep(1250); /* Register HCI device */ if (hci_register_dev(hdev) < 0) { BT_ERR("Can't register HCI device"); info->hdev = NULL; hci_free_dev(hdev); return -ENODEV; } return 0; } static int bluecard_close(struct bluecard_info *info) { unsigned int iobase = info->p_dev->resource[0]->start; struct hci_dev *hdev = info->hdev; if (!hdev) return -ENODEV; bluecard_hci_close(hdev); clear_bit(CARD_READY, &(info->hw_state)); /* Reset card */ info->ctrl_reg = REG_CONTROL_BT_RESET | REG_CONTROL_CARD_RESET; outb(info->ctrl_reg, iobase + REG_CONTROL); /* Turn FPGA off */ outb(0x80, iobase + 0x30); hci_unregister_dev(hdev); hci_free_dev(hdev); return 0; } static int bluecard_probe(struct pcmcia_device *link) { struct bluecard_info *info; /* Create new info device */ info = devm_kzalloc(&link->dev, sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; info->p_dev = link; link->priv = info; link->config_flags |= CONF_ENABLE_IRQ; return bluecard_config(link); } static void bluecard_detach(struct pcmcia_device *link) { bluecard_release(link); } static int bluecard_config(struct pcmcia_device *link) { struct bluecard_info *info = link->priv; int i, n; link->config_index = 0x20; link->resource[0]->flags |= IO_DATA_PATH_WIDTH_8; link->resource[0]->end = 64; link->io_lines = 6; for (n = 0; n < 0x400; n += 0x40) { link->resource[0]->start = n ^ 0x300; i = pcmcia_request_io(link); if (i == 0) break; } if (i != 0) goto failed; i = pcmcia_request_irq(link, bluecard_interrupt); if (i != 0) goto failed; i = pcmcia_enable_device(link); if (i != 0) goto failed; if (bluecard_open(info) != 0) goto failed; return 0; failed: bluecard_release(link); return -ENODEV; } static void bluecard_release(struct pcmcia_device *link) { struct bluecard_info *info = link->priv; bluecard_close(info); del_timer_sync(&(info->timer)); pcmcia_disable_device(link); } static const struct pcmcia_device_id bluecard_ids[] = { PCMCIA_DEVICE_PROD_ID12("BlueCard", "LSE041", 0xbaf16fbf, 0x657cc15e), PCMCIA_DEVICE_PROD_ID12("BTCFCARD", "LSE139", 0xe3987764, 0x2524b59c), PCMCIA_DEVICE_PROD_ID12("WSS", "LSE039", 0x0a0736ec, 0x24e6dfab), PCMCIA_DEVICE_NULL }; MODULE_DEVICE_TABLE(pcmcia, bluecard_ids); static struct pcmcia_driver bluecard_driver = { .owner = THIS_MODULE, .name = "bluecard_cs", .probe = bluecard_probe, .remove = bluecard_detach, .id_table = bluecard_ids, }; module_pcmcia_driver(bluecard_driver);
linux-master
drivers/bluetooth/bluecard_cs.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (c) 2008-2009 Atheros Communications Inc. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/firmware.h> #include <linux/usb.h> #include <asm/unaligned.h> #include <net/bluetooth/bluetooth.h> #define VERSION "1.0" #define ATH3K_FIRMWARE "ath3k-1.fw" #define ATH3K_DNLOAD 0x01 #define ATH3K_GETSTATE 0x05 #define ATH3K_SET_NORMAL_MODE 0x07 #define ATH3K_GETVERSION 0x09 #define USB_REG_SWITCH_VID_PID 0x0a #define ATH3K_MODE_MASK 0x3F #define ATH3K_NORMAL_MODE 0x0E #define ATH3K_PATCH_UPDATE 0x80 #define ATH3K_SYSCFG_UPDATE 0x40 #define ATH3K_XTAL_FREQ_26M 0x00 #define ATH3K_XTAL_FREQ_40M 0x01 #define ATH3K_XTAL_FREQ_19P2 0x02 #define ATH3K_NAME_LEN 0xFF struct ath3k_version { __le32 rom_version; __le32 build_version; __le32 ram_version; __u8 ref_clock; __u8 reserved[7]; } __packed; static const struct usb_device_id ath3k_table[] = { /* Atheros AR3011 */ { USB_DEVICE(0x0CF3, 0x3000) }, /* Atheros AR3011 with sflash firmware*/ { USB_DEVICE(0x0489, 0xE027) }, { USB_DEVICE(0x0489, 0xE03D) }, { USB_DEVICE(0x04F2, 0xAFF1) }, { USB_DEVICE(0x0930, 0x0215) }, { USB_DEVICE(0x0CF3, 0x3002) }, { USB_DEVICE(0x0CF3, 0xE019) }, { USB_DEVICE(0x13d3, 0x3304) }, /* Atheros AR9285 Malbec with sflash firmware */ { USB_DEVICE(0x03F0, 0x311D) }, /* Atheros AR3012 with sflash firmware*/ { USB_DEVICE(0x0489, 0xe04d) }, { USB_DEVICE(0x0489, 0xe04e) }, { USB_DEVICE(0x0489, 0xe057) }, { USB_DEVICE(0x0489, 0xe056) }, { USB_DEVICE(0x0489, 0xe05f) }, { USB_DEVICE(0x0489, 0xe076) }, { USB_DEVICE(0x0489, 0xe078) }, { USB_DEVICE(0x0489, 0xe095) }, { USB_DEVICE(0x04c5, 0x1330) }, { USB_DEVICE(0x04CA, 0x3004) }, { USB_DEVICE(0x04CA, 0x3005) }, { USB_DEVICE(0x04CA, 0x3006) }, { USB_DEVICE(0x04CA, 0x3007) }, { USB_DEVICE(0x04CA, 0x3008) }, { USB_DEVICE(0x04CA, 0x300b) }, { USB_DEVICE(0x04CA, 0x300d) }, { USB_DEVICE(0x04CA, 0x300f) }, { USB_DEVICE(0x04CA, 0x3010) }, { USB_DEVICE(0x04CA, 0x3014) }, { USB_DEVICE(0x04CA, 0x3018) }, { USB_DEVICE(0x0930, 0x0219) }, { USB_DEVICE(0x0930, 0x021c) }, { USB_DEVICE(0x0930, 0x0220) }, { USB_DEVICE(0x0930, 0x0227) }, { USB_DEVICE(0x0b05, 0x17d0) }, { USB_DEVICE(0x0CF3, 0x0036) }, { USB_DEVICE(0x0CF3, 0x3004) }, { USB_DEVICE(0x0CF3, 0x3008) }, { USB_DEVICE(0x0CF3, 0x311D) }, { USB_DEVICE(0x0CF3, 0x311E) }, { USB_DEVICE(0x0CF3, 0x311F) }, { USB_DEVICE(0x0cf3, 0x3121) }, { USB_DEVICE(0x0CF3, 0x817a) }, { USB_DEVICE(0x0CF3, 0x817b) }, { USB_DEVICE(0x0cf3, 0xe003) }, { USB_DEVICE(0x0CF3, 0xE004) }, { USB_DEVICE(0x0CF3, 0xE005) }, { USB_DEVICE(0x0CF3, 0xE006) }, { USB_DEVICE(0x13d3, 0x3362) }, { USB_DEVICE(0x13d3, 0x3375) }, { USB_DEVICE(0x13d3, 0x3393) }, { USB_DEVICE(0x13d3, 0x3395) }, { USB_DEVICE(0x13d3, 0x3402) }, { USB_DEVICE(0x13d3, 0x3408) }, { USB_DEVICE(0x13d3, 0x3423) }, { USB_DEVICE(0x13d3, 0x3432) }, { USB_DEVICE(0x13d3, 0x3472) }, { USB_DEVICE(0x13d3, 0x3474) }, { USB_DEVICE(0x13d3, 0x3487) }, { USB_DEVICE(0x13d3, 0x3490) }, /* Atheros AR5BBU12 with sflash firmware */ { USB_DEVICE(0x0489, 0xE02C) }, /* Atheros AR5BBU22 with sflash firmware */ { USB_DEVICE(0x0489, 0xE036) }, { USB_DEVICE(0x0489, 0xE03C) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, ath3k_table); #define BTUSB_ATH3012 0x80 /* This table is to load patch and sysconfig files * for AR3012 */ static const struct usb_device_id ath3k_blist_tbl[] = { /* Atheros AR3012 with sflash firmware*/ { USB_DEVICE(0x0489, 0xe04e), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0489, 0xe04d), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0489, 0xe056), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0489, 0xe057), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0489, 0xe05f), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0489, 0xe076), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0489, 0xe078), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0489, 0xe095), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04c5, 0x1330), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3004), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3005), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3006), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x300d), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3014), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x04ca, 0x3018), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0930, 0x021c), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0930, 0x0227), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0b05, 0x17d0), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0CF3, 0x0036), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x3004), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x3008), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x311D), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x311E), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x311F), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0x3121), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0CF3, 0x817a), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0CF3, 0x817b), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0xe006), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3395), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3402), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3408), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3423), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3432), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3472), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3474), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3487), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x13d3, 0x3490), .driver_info = BTUSB_ATH3012 }, /* Atheros AR5BBU22 with sflash firmware */ { USB_DEVICE(0x0489, 0xE036), .driver_info = BTUSB_ATH3012 }, { USB_DEVICE(0x0489, 0xE03C), .driver_info = BTUSB_ATH3012 }, { } /* Terminating entry */ }; static inline void ath3k_log_failed_loading(int err, int len, int size, int count) { BT_ERR("Firmware loading err = %d, len = %d, size = %d, count = %d", err, len, size, count); } #define USB_REQ_DFU_DNLOAD 1 #define BULK_SIZE 4096 #define FW_HDR_SIZE 20 #define TIMEGAP_USEC_MIN 50 #define TIMEGAP_USEC_MAX 100 static int ath3k_load_firmware(struct usb_device *udev, const struct firmware *firmware) { u8 *send_buf; int len = 0; int err, pipe, size, sent = 0; int count = firmware->size; BT_DBG("udev %p", udev); send_buf = kmalloc(BULK_SIZE, GFP_KERNEL); if (!send_buf) { BT_ERR("Can't allocate memory chunk for firmware"); return -ENOMEM; } err = usb_control_msg_send(udev, 0, USB_REQ_DFU_DNLOAD, USB_TYPE_VENDOR, 0, 0, firmware->data, FW_HDR_SIZE, USB_CTRL_SET_TIMEOUT, GFP_KERNEL); if (err) { BT_ERR("Can't change to loading configuration err"); goto error; } sent += FW_HDR_SIZE; count -= FW_HDR_SIZE; pipe = usb_sndbulkpipe(udev, 0x02); while (count) { /* workaround the compatibility issue with xHCI controller*/ usleep_range(TIMEGAP_USEC_MIN, TIMEGAP_USEC_MAX); size = min_t(uint, count, BULK_SIZE); memcpy(send_buf, firmware->data + sent, size); err = usb_bulk_msg(udev, pipe, send_buf, size, &len, 3000); if (err || (len != size)) { ath3k_log_failed_loading(err, len, size, count); goto error; } sent += size; count -= size; } error: kfree(send_buf); return err; } static int ath3k_get_state(struct usb_device *udev, unsigned char *state) { return usb_control_msg_recv(udev, 0, ATH3K_GETSTATE, USB_TYPE_VENDOR | USB_DIR_IN, 0, 0, state, 1, USB_CTRL_SET_TIMEOUT, GFP_KERNEL); } static int ath3k_get_version(struct usb_device *udev, struct ath3k_version *version) { return usb_control_msg_recv(udev, 0, ATH3K_GETVERSION, USB_TYPE_VENDOR | USB_DIR_IN, 0, 0, version, sizeof(*version), USB_CTRL_SET_TIMEOUT, GFP_KERNEL); } static int ath3k_load_fwfile(struct usb_device *udev, const struct firmware *firmware) { u8 *send_buf; int len = 0; int err, pipe, size, count, sent = 0; int ret; count = firmware->size; send_buf = kmalloc(BULK_SIZE, GFP_KERNEL); if (!send_buf) { BT_ERR("Can't allocate memory chunk for firmware"); return -ENOMEM; } size = min_t(uint, count, FW_HDR_SIZE); ret = usb_control_msg_send(udev, 0, ATH3K_DNLOAD, USB_TYPE_VENDOR, 0, 0, firmware->data, size, USB_CTRL_SET_TIMEOUT, GFP_KERNEL); if (ret) { BT_ERR("Can't change to loading configuration err"); kfree(send_buf); return ret; } sent += size; count -= size; pipe = usb_sndbulkpipe(udev, 0x02); while (count) { /* workaround the compatibility issue with xHCI controller*/ usleep_range(TIMEGAP_USEC_MIN, TIMEGAP_USEC_MAX); size = min_t(uint, count, BULK_SIZE); memcpy(send_buf, firmware->data + sent, size); err = usb_bulk_msg(udev, pipe, send_buf, size, &len, 3000); if (err || (len != size)) { ath3k_log_failed_loading(err, len, size, count); kfree(send_buf); return err; } sent += size; count -= size; } kfree(send_buf); return 0; } static void ath3k_switch_pid(struct usb_device *udev) { usb_control_msg_send(udev, 0, USB_REG_SWITCH_VID_PID, USB_TYPE_VENDOR, 0, 0, NULL, 0, USB_CTRL_SET_TIMEOUT, GFP_KERNEL); } static int ath3k_set_normal_mode(struct usb_device *udev) { unsigned char fw_state; int ret; ret = ath3k_get_state(udev, &fw_state); if (ret) { BT_ERR("Can't get state to change to normal mode err"); return ret; } if ((fw_state & ATH3K_MODE_MASK) == ATH3K_NORMAL_MODE) { BT_DBG("firmware was already in normal mode"); return 0; } return usb_control_msg_send(udev, 0, ATH3K_SET_NORMAL_MODE, USB_TYPE_VENDOR, 0, 0, NULL, 0, USB_CTRL_SET_TIMEOUT, GFP_KERNEL); } static int ath3k_load_patch(struct usb_device *udev) { unsigned char fw_state; char filename[ATH3K_NAME_LEN]; const struct firmware *firmware; struct ath3k_version fw_version; __u32 pt_rom_version, pt_build_version; int ret; ret = ath3k_get_state(udev, &fw_state); if (ret) { BT_ERR("Can't get state to change to load ram patch err"); return ret; } if (fw_state & ATH3K_PATCH_UPDATE) { BT_DBG("Patch was already downloaded"); return 0; } ret = ath3k_get_version(udev, &fw_version); if (ret) { BT_ERR("Can't get version to change to load ram patch err"); return ret; } snprintf(filename, ATH3K_NAME_LEN, "ar3k/AthrBT_0x%08x.dfu", le32_to_cpu(fw_version.rom_version)); ret = request_firmware(&firmware, filename, &udev->dev); if (ret < 0) { BT_ERR("Patch file not found %s", filename); return ret; } pt_rom_version = get_unaligned_le32(firmware->data + firmware->size - 8); pt_build_version = get_unaligned_le32(firmware->data + firmware->size - 4); if (pt_rom_version != le32_to_cpu(fw_version.rom_version) || pt_build_version <= le32_to_cpu(fw_version.build_version)) { BT_ERR("Patch file version did not match with firmware"); release_firmware(firmware); return -EINVAL; } ret = ath3k_load_fwfile(udev, firmware); release_firmware(firmware); return ret; } static int ath3k_load_syscfg(struct usb_device *udev) { unsigned char fw_state; char filename[ATH3K_NAME_LEN]; const struct firmware *firmware; struct ath3k_version fw_version; int clk_value, ret; ret = ath3k_get_state(udev, &fw_state); if (ret) { BT_ERR("Can't get state to change to load configuration err"); return -EBUSY; } ret = ath3k_get_version(udev, &fw_version); if (ret) { BT_ERR("Can't get version to change to load ram patch err"); return ret; } switch (fw_version.ref_clock) { case ATH3K_XTAL_FREQ_26M: clk_value = 26; break; case ATH3K_XTAL_FREQ_40M: clk_value = 40; break; case ATH3K_XTAL_FREQ_19P2: clk_value = 19; break; default: clk_value = 0; break; } snprintf(filename, ATH3K_NAME_LEN, "ar3k/ramps_0x%08x_%d%s", le32_to_cpu(fw_version.rom_version), clk_value, ".dfu"); ret = request_firmware(&firmware, filename, &udev->dev); if (ret < 0) { BT_ERR("Configuration file not found %s", filename); return ret; } ret = ath3k_load_fwfile(udev, firmware); release_firmware(firmware); return ret; } static int ath3k_probe(struct usb_interface *intf, const struct usb_device_id *id) { const struct firmware *firmware; struct usb_device *udev = interface_to_usbdev(intf); int ret; BT_DBG("intf %p id %p", intf, id); if (intf->cur_altsetting->desc.bInterfaceNumber != 0) return -ENODEV; /* match device ID in ath3k blacklist table */ if (!id->driver_info) { const struct usb_device_id *match; match = usb_match_id(intf, ath3k_blist_tbl); if (match) id = match; } /* load patch and sysconfig files for AR3012 */ if (id->driver_info & BTUSB_ATH3012) { /* New firmware with patch and sysconfig files already loaded */ if (le16_to_cpu(udev->descriptor.bcdDevice) > 0x0001) return -ENODEV; ret = ath3k_load_patch(udev); if (ret < 0) { BT_ERR("Loading patch file failed"); return ret; } ret = ath3k_load_syscfg(udev); if (ret < 0) { BT_ERR("Loading sysconfig file failed"); return ret; } ret = ath3k_set_normal_mode(udev); if (ret) { BT_ERR("Set normal mode failed"); return ret; } ath3k_switch_pid(udev); return 0; } ret = request_firmware(&firmware, ATH3K_FIRMWARE, &udev->dev); if (ret < 0) { if (ret == -ENOENT) BT_ERR("Firmware file \"%s\" not found", ATH3K_FIRMWARE); else BT_ERR("Firmware file \"%s\" request failed (err=%d)", ATH3K_FIRMWARE, ret); return ret; } ret = ath3k_load_firmware(udev, firmware); release_firmware(firmware); return ret; } static void ath3k_disconnect(struct usb_interface *intf) { BT_DBG("%s intf %p", __func__, intf); } static struct usb_driver ath3k_driver = { .name = "ath3k", .probe = ath3k_probe, .disconnect = ath3k_disconnect, .id_table = ath3k_table, .disable_hub_initiated_lpm = 1, }; module_usb_driver(ath3k_driver); MODULE_AUTHOR("Atheros Communications"); MODULE_DESCRIPTION("Atheros AR30xx firmware driver"); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL"); MODULE_FIRMWARE(ATH3K_FIRMWARE);
linux-master
drivers/bluetooth/ath3k.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * * Bluetooth HCI UART driver for Intel/AG6xx devices * * Copyright (C) 2016 Intel Corporation */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/skbuff.h> #include <linux/firmware.h> #include <linux/module.h> #include <linux/tty.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include "hci_uart.h" #include "btintel.h" struct ag6xx_data { struct sk_buff *rx_skb; struct sk_buff_head txq; }; struct pbn_entry { __le32 addr; __le32 plen; __u8 data[]; } __packed; static int ag6xx_open(struct hci_uart *hu) { struct ag6xx_data *ag6xx; BT_DBG("hu %p", hu); ag6xx = kzalloc(sizeof(*ag6xx), GFP_KERNEL); if (!ag6xx) return -ENOMEM; skb_queue_head_init(&ag6xx->txq); hu->priv = ag6xx; return 0; } static int ag6xx_close(struct hci_uart *hu) { struct ag6xx_data *ag6xx = hu->priv; BT_DBG("hu %p", hu); skb_queue_purge(&ag6xx->txq); kfree_skb(ag6xx->rx_skb); kfree(ag6xx); hu->priv = NULL; return 0; } static int ag6xx_flush(struct hci_uart *hu) { struct ag6xx_data *ag6xx = hu->priv; BT_DBG("hu %p", hu); skb_queue_purge(&ag6xx->txq); return 0; } static struct sk_buff *ag6xx_dequeue(struct hci_uart *hu) { struct ag6xx_data *ag6xx = hu->priv; struct sk_buff *skb; skb = skb_dequeue(&ag6xx->txq); if (!skb) return skb; /* Prepend skb with frame type */ memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1); return skb; } static int ag6xx_enqueue(struct hci_uart *hu, struct sk_buff *skb) { struct ag6xx_data *ag6xx = hu->priv; skb_queue_tail(&ag6xx->txq, skb); return 0; } static const struct h4_recv_pkt ag6xx_recv_pkts[] = { { H4_RECV_ACL, .recv = hci_recv_frame }, { H4_RECV_SCO, .recv = hci_recv_frame }, { H4_RECV_EVENT, .recv = hci_recv_frame }, }; static int ag6xx_recv(struct hci_uart *hu, const void *data, int count) { struct ag6xx_data *ag6xx = hu->priv; if (!test_bit(HCI_UART_REGISTERED, &hu->flags)) return -EUNATCH; ag6xx->rx_skb = h4_recv_buf(hu->hdev, ag6xx->rx_skb, data, count, ag6xx_recv_pkts, ARRAY_SIZE(ag6xx_recv_pkts)); if (IS_ERR(ag6xx->rx_skb)) { int err = PTR_ERR(ag6xx->rx_skb); bt_dev_err(hu->hdev, "Frame reassembly failed (%d)", err); ag6xx->rx_skb = NULL; return err; } return count; } static int intel_mem_write(struct hci_dev *hdev, u32 addr, u32 plen, const void *data) { /* Can write a maximum of 247 bytes per HCI command. * HCI cmd Header (3), Intel mem write header (6), data (247). */ while (plen > 0) { struct sk_buff *skb; u8 cmd_param[253], fragment_len = (plen > 247) ? 247 : plen; __le32 leaddr = cpu_to_le32(addr); memcpy(cmd_param, &leaddr, 4); cmd_param[4] = 0; cmd_param[5] = fragment_len; memcpy(cmd_param + 6, data, fragment_len); skb = __hci_cmd_sync(hdev, 0xfc8e, fragment_len + 6, cmd_param, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) return PTR_ERR(skb); kfree_skb(skb); plen -= fragment_len; data += fragment_len; addr += fragment_len; } return 0; } static int ag6xx_setup(struct hci_uart *hu) { struct hci_dev *hdev = hu->hdev; struct sk_buff *skb; struct intel_version ver; const struct firmware *fw; const u8 *fw_ptr; char fwname[64]; bool patched = false; int err; hu->hdev->set_diag = btintel_set_diag; hu->hdev->set_bdaddr = btintel_set_bdaddr; err = btintel_enter_mfg(hdev); if (err) return err; err = btintel_read_version(hdev, &ver); if (err) return err; btintel_version_info(hdev, &ver); /* The hardware platform number has a fixed value of 0x37 and * for now only accept this single value. */ if (ver.hw_platform != 0x37) { bt_dev_err(hdev, "Unsupported Intel hardware platform: 0x%X", ver.hw_platform); return -EINVAL; } /* Only the hardware variant iBT 2.1 (AG6XX) is supported by this * firmware setup method. */ if (ver.hw_variant != 0x0a) { bt_dev_err(hdev, "Unsupported Intel hardware variant: 0x%x", ver.hw_variant); return -EINVAL; } snprintf(fwname, sizeof(fwname), "intel/ibt-hw-%x.%x.bddata", ver.hw_platform, ver.hw_variant); err = request_firmware(&fw, fwname, &hdev->dev); if (err < 0) { bt_dev_err(hdev, "Failed to open Intel bddata file: %s (%d)", fwname, err); goto patch; } bt_dev_info(hdev, "Applying bddata (%s)", fwname); skb = __hci_cmd_sync_ev(hdev, 0xfc2f, fw->size, fw->data, HCI_EV_CMD_STATUS, HCI_CMD_TIMEOUT); if (IS_ERR(skb)) { bt_dev_err(hdev, "Applying bddata failed (%ld)", PTR_ERR(skb)); release_firmware(fw); return PTR_ERR(skb); } kfree_skb(skb); release_firmware(fw); patch: /* If there is no applied patch, fw_patch_num is always 0x00. In other * cases, current firmware is already patched. No need to patch it. */ if (ver.fw_patch_num) { bt_dev_info(hdev, "Device is already patched. patch num: %02x", ver.fw_patch_num); patched = true; goto complete; } snprintf(fwname, sizeof(fwname), "intel/ibt-hw-%x.%x.%x-fw-%x.%x.%x.%x.%x.pbn", ver.hw_platform, ver.hw_variant, ver.hw_revision, ver.fw_variant, ver.fw_revision, ver.fw_build_num, ver.fw_build_ww, ver.fw_build_yy); err = request_firmware(&fw, fwname, &hdev->dev); if (err < 0) { bt_dev_err(hdev, "Failed to open Intel patch file: %s(%d)", fwname, err); goto complete; } fw_ptr = fw->data; bt_dev_info(hdev, "Patching firmware file (%s)", fwname); /* PBN patch file contains a list of binary patches to be applied on top * of the embedded firmware. Each patch entry header contains the target * address and patch size. * * Patch entry: * | addr(le) | patch_len(le) | patch_data | * | 4 Bytes | 4 Bytes | n Bytes | * * PBN file is terminated by a patch entry whose address is 0xffffffff. */ while (fw->size > fw_ptr - fw->data) { struct pbn_entry *pbn = (void *)fw_ptr; u32 addr, plen; if (pbn->addr == 0xffffffff) { bt_dev_info(hdev, "Patching complete"); patched = true; break; } addr = le32_to_cpu(pbn->addr); plen = le32_to_cpu(pbn->plen); if (fw->data + fw->size <= pbn->data + plen) { bt_dev_info(hdev, "Invalid patch len (%d)", plen); break; } bt_dev_info(hdev, "Patching %td/%zu", (fw_ptr - fw->data), fw->size); err = intel_mem_write(hdev, addr, plen, pbn->data); if (err) { bt_dev_err(hdev, "Patching failed"); break; } fw_ptr = pbn->data + plen; } release_firmware(fw); complete: /* Exit manufacturing mode and reset */ err = btintel_exit_mfg(hdev, true, patched); if (err) return err; /* Set the event mask for Intel specific vendor events. This enables * a few extra events that are useful during general operation. */ btintel_set_event_mask_mfg(hdev, false); btintel_check_bdaddr(hdev); return 0; } static const struct hci_uart_proto ag6xx_proto = { .id = HCI_UART_AG6XX, .name = "AG6XX", .manufacturer = 2, .open = ag6xx_open, .close = ag6xx_close, .flush = ag6xx_flush, .setup = ag6xx_setup, .recv = ag6xx_recv, .enqueue = ag6xx_enqueue, .dequeue = ag6xx_dequeue, }; int __init ag6xx_init(void) { return hci_uart_register_proto(&ag6xx_proto); } int __exit ag6xx_deinit(void) { return hci_uart_unregister_proto(&ag6xx_proto); }
linux-master
drivers/bluetooth/hci_ag6xx.c
// SPDX-License-Identifier: GPL-2.0-only /* * Texas Instruments' Bluetooth HCILL UART protocol * * HCILL (HCI Low Level) is a Texas Instruments' power management * protocol extension to H4. * * Copyright (C) 2007 Texas Instruments, Inc. * * Written by Ohad Ben-Cohen <[email protected]> * * Acknowledgements: * This file is based on hci_h4.c, which was written * by Maxim Krasnyansky and Marcel Holtmann. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/firmware.h> #include <linux/interrupt.h> #include <linux/ptrace.h> #include <linux/poll.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/signal.h> #include <linux/ioctl.h> #include <linux/of.h> #include <linux/serdev.h> #include <linux/skbuff.h> #include <linux/ti_wilink_st.h> #include <linux/clk.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <linux/gpio/consumer.h> #include <linux/nvmem-consumer.h> #include "hci_uart.h" /* Vendor-specific HCI commands */ #define HCI_VS_WRITE_BD_ADDR 0xfc06 #define HCI_VS_UPDATE_UART_HCI_BAUDRATE 0xff36 /* HCILL commands */ #define HCILL_GO_TO_SLEEP_IND 0x30 #define HCILL_GO_TO_SLEEP_ACK 0x31 #define HCILL_WAKE_UP_IND 0x32 #define HCILL_WAKE_UP_ACK 0x33 /* HCILL states */ enum hcill_states_e { HCILL_ASLEEP, HCILL_ASLEEP_TO_AWAKE, HCILL_AWAKE, HCILL_AWAKE_TO_ASLEEP }; struct ll_device { struct hci_uart hu; struct serdev_device *serdev; struct gpio_desc *enable_gpio; struct clk *ext_clk; bdaddr_t bdaddr; }; struct ll_struct { struct sk_buff *rx_skb; struct sk_buff_head txq; spinlock_t hcill_lock; /* HCILL state lock */ unsigned long hcill_state; /* HCILL power state */ struct sk_buff_head tx_wait_q; /* HCILL wait queue */ }; /* * Builds and sends an HCILL command packet. * These are very simple packets with only 1 cmd byte */ static int send_hcill_cmd(u8 cmd, struct hci_uart *hu) { int err = 0; struct sk_buff *skb = NULL; struct ll_struct *ll = hu->priv; BT_DBG("hu %p cmd 0x%x", hu, cmd); /* allocate packet */ skb = bt_skb_alloc(1, GFP_ATOMIC); if (!skb) { BT_ERR("cannot allocate memory for HCILL packet"); err = -ENOMEM; goto out; } /* prepare packet */ skb_put_u8(skb, cmd); /* send packet */ skb_queue_tail(&ll->txq, skb); out: return err; } /* Initialize protocol */ static int ll_open(struct hci_uart *hu) { struct ll_struct *ll; BT_DBG("hu %p", hu); ll = kzalloc(sizeof(*ll), GFP_KERNEL); if (!ll) return -ENOMEM; skb_queue_head_init(&ll->txq); skb_queue_head_init(&ll->tx_wait_q); spin_lock_init(&ll->hcill_lock); ll->hcill_state = HCILL_AWAKE; hu->priv = ll; if (hu->serdev) { struct ll_device *lldev = serdev_device_get_drvdata(hu->serdev); if (!IS_ERR(lldev->ext_clk)) clk_prepare_enable(lldev->ext_clk); } return 0; } /* Flush protocol data */ static int ll_flush(struct hci_uart *hu) { struct ll_struct *ll = hu->priv; BT_DBG("hu %p", hu); skb_queue_purge(&ll->tx_wait_q); skb_queue_purge(&ll->txq); return 0; } /* Close protocol */ static int ll_close(struct hci_uart *hu) { struct ll_struct *ll = hu->priv; BT_DBG("hu %p", hu); skb_queue_purge(&ll->tx_wait_q); skb_queue_purge(&ll->txq); kfree_skb(ll->rx_skb); if (hu->serdev) { struct ll_device *lldev = serdev_device_get_drvdata(hu->serdev); gpiod_set_value_cansleep(lldev->enable_gpio, 0); clk_disable_unprepare(lldev->ext_clk); } hu->priv = NULL; kfree(ll); return 0; } /* * internal function, which does common work of the device wake up process: * 1. places all pending packets (waiting in tx_wait_q list) in txq list. * 2. changes internal state to HCILL_AWAKE. * Note: assumes that hcill_lock spinlock is taken, * shouldn't be called otherwise! */ static void __ll_do_awake(struct ll_struct *ll) { struct sk_buff *skb = NULL; while ((skb = skb_dequeue(&ll->tx_wait_q))) skb_queue_tail(&ll->txq, skb); ll->hcill_state = HCILL_AWAKE; } /* * Called upon a wake-up-indication from the device */ static void ll_device_want_to_wakeup(struct hci_uart *hu) { unsigned long flags; struct ll_struct *ll = hu->priv; BT_DBG("hu %p", hu); /* lock hcill state */ spin_lock_irqsave(&ll->hcill_lock, flags); switch (ll->hcill_state) { case HCILL_ASLEEP_TO_AWAKE: /* * This state means that both the host and the BRF chip * have simultaneously sent a wake-up-indication packet. * Traditionally, in this case, receiving a wake-up-indication * was enough and an additional wake-up-ack wasn't needed. * This has changed with the BRF6350, which does require an * explicit wake-up-ack. Other BRF versions, which do not * require an explicit ack here, do accept it, thus it is * perfectly safe to always send one. */ BT_DBG("dual wake-up-indication"); fallthrough; case HCILL_ASLEEP: /* acknowledge device wake up */ if (send_hcill_cmd(HCILL_WAKE_UP_ACK, hu) < 0) { BT_ERR("cannot acknowledge device wake up"); goto out; } break; default: /* any other state is illegal */ BT_ERR("received HCILL_WAKE_UP_IND in state %ld", ll->hcill_state); break; } /* send pending packets and change state to HCILL_AWAKE */ __ll_do_awake(ll); out: spin_unlock_irqrestore(&ll->hcill_lock, flags); /* actually send the packets */ hci_uart_tx_wakeup(hu); } /* * Called upon a sleep-indication from the device */ static void ll_device_want_to_sleep(struct hci_uart *hu) { unsigned long flags; struct ll_struct *ll = hu->priv; BT_DBG("hu %p", hu); /* lock hcill state */ spin_lock_irqsave(&ll->hcill_lock, flags); /* sanity check */ if (ll->hcill_state != HCILL_AWAKE) BT_ERR("ERR: HCILL_GO_TO_SLEEP_IND in state %ld", ll->hcill_state); /* acknowledge device sleep */ if (send_hcill_cmd(HCILL_GO_TO_SLEEP_ACK, hu) < 0) { BT_ERR("cannot acknowledge device sleep"); goto out; } /* update state */ ll->hcill_state = HCILL_ASLEEP; out: spin_unlock_irqrestore(&ll->hcill_lock, flags); /* actually send the sleep ack packet */ hci_uart_tx_wakeup(hu); } /* * Called upon wake-up-acknowledgement from the device */ static void ll_device_woke_up(struct hci_uart *hu) { unsigned long flags; struct ll_struct *ll = hu->priv; BT_DBG("hu %p", hu); /* lock hcill state */ spin_lock_irqsave(&ll->hcill_lock, flags); /* sanity check */ if (ll->hcill_state != HCILL_ASLEEP_TO_AWAKE) BT_ERR("received HCILL_WAKE_UP_ACK in state %ld", ll->hcill_state); /* send pending packets and change state to HCILL_AWAKE */ __ll_do_awake(ll); spin_unlock_irqrestore(&ll->hcill_lock, flags); /* actually send the packets */ hci_uart_tx_wakeup(hu); } /* Enqueue frame for transmittion (padding, crc, etc) */ /* may be called from two simultaneous tasklets */ static int ll_enqueue(struct hci_uart *hu, struct sk_buff *skb) { unsigned long flags = 0; struct ll_struct *ll = hu->priv; BT_DBG("hu %p skb %p", hu, skb); /* Prepend skb with frame type */ memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1); /* lock hcill state */ spin_lock_irqsave(&ll->hcill_lock, flags); /* act according to current state */ switch (ll->hcill_state) { case HCILL_AWAKE: BT_DBG("device awake, sending normally"); skb_queue_tail(&ll->txq, skb); break; case HCILL_ASLEEP: BT_DBG("device asleep, waking up and queueing packet"); /* save packet for later */ skb_queue_tail(&ll->tx_wait_q, skb); /* awake device */ if (send_hcill_cmd(HCILL_WAKE_UP_IND, hu) < 0) { BT_ERR("cannot wake up device"); break; } ll->hcill_state = HCILL_ASLEEP_TO_AWAKE; break; case HCILL_ASLEEP_TO_AWAKE: BT_DBG("device waking up, queueing packet"); /* transient state; just keep packet for later */ skb_queue_tail(&ll->tx_wait_q, skb); break; default: BT_ERR("illegal hcill state: %ld (losing packet)", ll->hcill_state); dev_kfree_skb_irq(skb); break; } spin_unlock_irqrestore(&ll->hcill_lock, flags); return 0; } static int ll_recv_frame(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_uart *hu = hci_get_drvdata(hdev); struct ll_struct *ll = hu->priv; switch (hci_skb_pkt_type(skb)) { case HCILL_GO_TO_SLEEP_IND: BT_DBG("HCILL_GO_TO_SLEEP_IND packet"); ll_device_want_to_sleep(hu); break; case HCILL_GO_TO_SLEEP_ACK: /* shouldn't happen */ bt_dev_err(hdev, "received HCILL_GO_TO_SLEEP_ACK in state %ld", ll->hcill_state); break; case HCILL_WAKE_UP_IND: BT_DBG("HCILL_WAKE_UP_IND packet"); ll_device_want_to_wakeup(hu); break; case HCILL_WAKE_UP_ACK: BT_DBG("HCILL_WAKE_UP_ACK packet"); ll_device_woke_up(hu); break; } kfree_skb(skb); return 0; } #define LL_RECV_SLEEP_IND \ .type = HCILL_GO_TO_SLEEP_IND, \ .hlen = 0, \ .loff = 0, \ .lsize = 0, \ .maxlen = 0 #define LL_RECV_SLEEP_ACK \ .type = HCILL_GO_TO_SLEEP_ACK, \ .hlen = 0, \ .loff = 0, \ .lsize = 0, \ .maxlen = 0 #define LL_RECV_WAKE_IND \ .type = HCILL_WAKE_UP_IND, \ .hlen = 0, \ .loff = 0, \ .lsize = 0, \ .maxlen = 0 #define LL_RECV_WAKE_ACK \ .type = HCILL_WAKE_UP_ACK, \ .hlen = 0, \ .loff = 0, \ .lsize = 0, \ .maxlen = 0 static const struct h4_recv_pkt ll_recv_pkts[] = { { H4_RECV_ACL, .recv = hci_recv_frame }, { H4_RECV_SCO, .recv = hci_recv_frame }, { H4_RECV_EVENT, .recv = hci_recv_frame }, { LL_RECV_SLEEP_IND, .recv = ll_recv_frame }, { LL_RECV_SLEEP_ACK, .recv = ll_recv_frame }, { LL_RECV_WAKE_IND, .recv = ll_recv_frame }, { LL_RECV_WAKE_ACK, .recv = ll_recv_frame }, }; /* Recv data */ static int ll_recv(struct hci_uart *hu, const void *data, int count) { struct ll_struct *ll = hu->priv; if (!test_bit(HCI_UART_REGISTERED, &hu->flags)) return -EUNATCH; ll->rx_skb = h4_recv_buf(hu->hdev, ll->rx_skb, data, count, ll_recv_pkts, ARRAY_SIZE(ll_recv_pkts)); if (IS_ERR(ll->rx_skb)) { int err = PTR_ERR(ll->rx_skb); bt_dev_err(hu->hdev, "Frame reassembly failed (%d)", err); ll->rx_skb = NULL; return err; } return count; } static struct sk_buff *ll_dequeue(struct hci_uart *hu) { struct ll_struct *ll = hu->priv; return skb_dequeue(&ll->txq); } #if IS_ENABLED(CONFIG_SERIAL_DEV_BUS) static int read_local_version(struct hci_dev *hdev) { int err = 0; unsigned short version = 0; struct sk_buff *skb; struct hci_rp_read_local_version *ver; skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { bt_dev_err(hdev, "Reading TI version information failed (%ld)", PTR_ERR(skb)); return PTR_ERR(skb); } if (skb->len != sizeof(*ver)) { err = -EILSEQ; goto out; } ver = (struct hci_rp_read_local_version *)skb->data; if (le16_to_cpu(ver->manufacturer) != 13) { err = -ENODEV; goto out; } version = le16_to_cpu(ver->lmp_subver); out: if (err) bt_dev_err(hdev, "Failed to read TI version info: %d", err); kfree_skb(skb); return err ? err : version; } static int send_command_from_firmware(struct ll_device *lldev, struct hci_command *cmd) { struct sk_buff *skb; if (cmd->opcode == HCI_VS_UPDATE_UART_HCI_BAUDRATE) { /* ignore remote change * baud rate HCI VS command */ bt_dev_warn(lldev->hu.hdev, "change remote baud rate command in firmware"); return 0; } if (cmd->prefix != 1) bt_dev_dbg(lldev->hu.hdev, "command type %d", cmd->prefix); skb = __hci_cmd_sync(lldev->hu.hdev, cmd->opcode, cmd->plen, &cmd->speed, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { bt_dev_err(lldev->hu.hdev, "send command failed"); return PTR_ERR(skb); } kfree_skb(skb); return 0; } /* * download_firmware - * internal function which parses through the .bts firmware * script file intreprets SEND, DELAY actions only as of now */ static int download_firmware(struct ll_device *lldev) { unsigned short chip, min_ver, maj_ver; int version, err, len; unsigned char *ptr, *action_ptr; unsigned char bts_scr_name[40]; /* 40 char long bts scr name? */ const struct firmware *fw; struct hci_command *cmd; version = read_local_version(lldev->hu.hdev); if (version < 0) return version; chip = (version & 0x7C00) >> 10; min_ver = (version & 0x007F); maj_ver = (version & 0x0380) >> 7; if (version & 0x8000) maj_ver |= 0x0008; snprintf(bts_scr_name, sizeof(bts_scr_name), "ti-connectivity/TIInit_%d.%d.%d.bts", chip, maj_ver, min_ver); err = request_firmware(&fw, bts_scr_name, &lldev->serdev->dev); if (err || !fw->data || !fw->size) { bt_dev_err(lldev->hu.hdev, "request_firmware failed(errno %d) for %s", err, bts_scr_name); return -EINVAL; } ptr = (void *)fw->data; len = fw->size; /* bts_header to remove out magic number and * version */ ptr += sizeof(struct bts_header); len -= sizeof(struct bts_header); while (len > 0 && ptr) { bt_dev_dbg(lldev->hu.hdev, " action size %d, type %d ", ((struct bts_action *)ptr)->size, ((struct bts_action *)ptr)->type); action_ptr = &(((struct bts_action *)ptr)->data[0]); switch (((struct bts_action *)ptr)->type) { case ACTION_SEND_COMMAND: /* action send */ bt_dev_dbg(lldev->hu.hdev, "S"); cmd = (struct hci_command *)action_ptr; err = send_command_from_firmware(lldev, cmd); if (err) goto out_rel_fw; break; case ACTION_WAIT_EVENT: /* wait */ /* no need to wait as command was synchronous */ bt_dev_dbg(lldev->hu.hdev, "W"); break; case ACTION_DELAY: /* sleep */ bt_dev_info(lldev->hu.hdev, "sleep command in scr"); msleep(((struct bts_action_delay *)action_ptr)->msec); break; } len -= (sizeof(struct bts_action) + ((struct bts_action *)ptr)->size); ptr += sizeof(struct bts_action) + ((struct bts_action *)ptr)->size; } out_rel_fw: /* fw download complete */ release_firmware(fw); return err; } static int ll_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr) { bdaddr_t bdaddr_swapped; struct sk_buff *skb; /* HCI_VS_WRITE_BD_ADDR (at least on a CC2560A chip) expects the BD * address to be MSB first, but bdaddr_t has the convention of being * LSB first. */ baswap(&bdaddr_swapped, bdaddr); skb = __hci_cmd_sync(hdev, HCI_VS_WRITE_BD_ADDR, sizeof(bdaddr_t), &bdaddr_swapped, HCI_INIT_TIMEOUT); if (!IS_ERR(skb)) kfree_skb(skb); return PTR_ERR_OR_ZERO(skb); } static int ll_setup(struct hci_uart *hu) { int err, retry = 3; struct ll_device *lldev; struct serdev_device *serdev = hu->serdev; u32 speed; if (!serdev) return 0; lldev = serdev_device_get_drvdata(serdev); hu->hdev->set_bdaddr = ll_set_bdaddr; serdev_device_set_flow_control(serdev, true); do { /* Reset the Bluetooth device */ gpiod_set_value_cansleep(lldev->enable_gpio, 0); msleep(5); gpiod_set_value_cansleep(lldev->enable_gpio, 1); mdelay(100); err = serdev_device_wait_for_cts(serdev, true, 200); if (err) { bt_dev_err(hu->hdev, "Failed to get CTS"); return err; } err = download_firmware(lldev); if (!err) break; /* Toggle BT_EN and retry */ bt_dev_err(hu->hdev, "download firmware failed, retrying..."); } while (retry--); if (err) return err; /* Set BD address if one was specified at probe */ if (!bacmp(&lldev->bdaddr, BDADDR_NONE)) { /* This means that there was an error getting the BD address * during probe, so mark the device as having a bad address. */ set_bit(HCI_QUIRK_INVALID_BDADDR, &hu->hdev->quirks); } else if (bacmp(&lldev->bdaddr, BDADDR_ANY)) { err = ll_set_bdaddr(hu->hdev, &lldev->bdaddr); if (err) set_bit(HCI_QUIRK_INVALID_BDADDR, &hu->hdev->quirks); } /* Operational speed if any */ if (hu->oper_speed) speed = hu->oper_speed; else if (hu->proto->oper_speed) speed = hu->proto->oper_speed; else speed = 0; if (speed) { __le32 speed_le = cpu_to_le32(speed); struct sk_buff *skb; skb = __hci_cmd_sync(hu->hdev, HCI_VS_UPDATE_UART_HCI_BAUDRATE, sizeof(speed_le), &speed_le, HCI_INIT_TIMEOUT); if (!IS_ERR(skb)) { kfree_skb(skb); serdev_device_set_baudrate(serdev, speed); } } return 0; } static const struct hci_uart_proto llp; static int hci_ti_probe(struct serdev_device *serdev) { struct hci_uart *hu; struct ll_device *lldev; struct nvmem_cell *bdaddr_cell; u32 max_speed = 3000000; lldev = devm_kzalloc(&serdev->dev, sizeof(struct ll_device), GFP_KERNEL); if (!lldev) return -ENOMEM; hu = &lldev->hu; serdev_device_set_drvdata(serdev, lldev); lldev->serdev = hu->serdev = serdev; lldev->enable_gpio = devm_gpiod_get_optional(&serdev->dev, "enable", GPIOD_OUT_LOW); if (IS_ERR(lldev->enable_gpio)) return PTR_ERR(lldev->enable_gpio); lldev->ext_clk = devm_clk_get(&serdev->dev, "ext_clock"); if (IS_ERR(lldev->ext_clk) && PTR_ERR(lldev->ext_clk) != -ENOENT) return PTR_ERR(lldev->ext_clk); of_property_read_u32(serdev->dev.of_node, "max-speed", &max_speed); hci_uart_set_speeds(hu, 115200, max_speed); /* optional BD address from nvram */ bdaddr_cell = nvmem_cell_get(&serdev->dev, "bd-address"); if (IS_ERR(bdaddr_cell)) { int err = PTR_ERR(bdaddr_cell); if (err == -EPROBE_DEFER) return err; /* ENOENT means there is no matching nvmem cell and ENOSYS * means that nvmem is not enabled in the kernel configuration. */ if (err != -ENOENT && err != -ENOSYS) { /* If there was some other error, give userspace a * chance to fix the problem instead of failing to load * the driver. Using BDADDR_NONE as a flag that is * tested later in the setup function. */ dev_warn(&serdev->dev, "Failed to get \"bd-address\" nvmem cell (%d)\n", err); bacpy(&lldev->bdaddr, BDADDR_NONE); } } else { bdaddr_t *bdaddr; size_t len; bdaddr = nvmem_cell_read(bdaddr_cell, &len); nvmem_cell_put(bdaddr_cell); if (IS_ERR(bdaddr)) { dev_err(&serdev->dev, "Failed to read nvmem bd-address\n"); return PTR_ERR(bdaddr); } if (len != sizeof(bdaddr_t)) { dev_err(&serdev->dev, "Invalid nvmem bd-address length\n"); kfree(bdaddr); return -EINVAL; } /* As per the device tree bindings, the value from nvmem is * expected to be MSB first, but in the kernel it is expected * that bdaddr_t is LSB first. */ baswap(&lldev->bdaddr, bdaddr); kfree(bdaddr); } return hci_uart_register_device(hu, &llp); } static void hci_ti_remove(struct serdev_device *serdev) { struct ll_device *lldev = serdev_device_get_drvdata(serdev); hci_uart_unregister_device(&lldev->hu); } static const struct of_device_id hci_ti_of_match[] = { { .compatible = "ti,cc2560" }, { .compatible = "ti,wl1271-st" }, { .compatible = "ti,wl1273-st" }, { .compatible = "ti,wl1281-st" }, { .compatible = "ti,wl1283-st" }, { .compatible = "ti,wl1285-st" }, { .compatible = "ti,wl1801-st" }, { .compatible = "ti,wl1805-st" }, { .compatible = "ti,wl1807-st" }, { .compatible = "ti,wl1831-st" }, { .compatible = "ti,wl1835-st" }, { .compatible = "ti,wl1837-st" }, {}, }; MODULE_DEVICE_TABLE(of, hci_ti_of_match); static struct serdev_device_driver hci_ti_drv = { .driver = { .name = "hci-ti", .of_match_table = hci_ti_of_match, }, .probe = hci_ti_probe, .remove = hci_ti_remove, }; #else #define ll_setup NULL #endif static const struct hci_uart_proto llp = { .id = HCI_UART_LL, .name = "LL", .setup = ll_setup, .open = ll_open, .close = ll_close, .recv = ll_recv, .enqueue = ll_enqueue, .dequeue = ll_dequeue, .flush = ll_flush, }; int __init ll_init(void) { serdev_device_driver_register(&hci_ti_drv); return hci_uart_register_proto(&llp); } int __exit ll_deinit(void) { serdev_device_driver_unregister(&hci_ti_drv); return hci_uart_unregister_proto(&llp); }
linux-master
drivers/bluetooth/hci_ll.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * * Broadcom Blutonium firmware driver * * Copyright (C) 2003 Maxim Krasnyansky <[email protected]> * Copyright (C) 2003 Marcel Holtmann <[email protected]> */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/device.h> #include <linux/firmware.h> #include <linux/usb.h> #include <net/bluetooth/bluetooth.h> #define VERSION "1.2" static const struct usb_device_id bcm203x_table[] = { /* Broadcom Blutonium (BCM2033) */ { USB_DEVICE(0x0a5c, 0x2033) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, bcm203x_table); #define BCM203X_ERROR 0 #define BCM203X_RESET 1 #define BCM203X_LOAD_MINIDRV 2 #define BCM203X_SELECT_MEMORY 3 #define BCM203X_CHECK_MEMORY 4 #define BCM203X_LOAD_FIRMWARE 5 #define BCM203X_CHECK_FIRMWARE 6 #define BCM203X_IN_EP 0x81 #define BCM203X_OUT_EP 0x02 struct bcm203x_data { struct usb_device *udev; unsigned long state; struct work_struct work; atomic_t shutdown; struct urb *urb; unsigned char *buffer; unsigned char *fw_data; unsigned int fw_size; unsigned int fw_sent; }; static void bcm203x_complete(struct urb *urb) { struct bcm203x_data *data = urb->context; struct usb_device *udev = urb->dev; int len; BT_DBG("udev %p urb %p", udev, urb); if (urb->status) { BT_ERR("URB failed with status %d", urb->status); data->state = BCM203X_ERROR; return; } switch (data->state) { case BCM203X_LOAD_MINIDRV: memcpy(data->buffer, "#", 1); usb_fill_bulk_urb(urb, udev, usb_sndbulkpipe(udev, BCM203X_OUT_EP), data->buffer, 1, bcm203x_complete, data); data->state = BCM203X_SELECT_MEMORY; /* use workqueue to have a small delay */ schedule_work(&data->work); break; case BCM203X_SELECT_MEMORY: usb_fill_int_urb(urb, udev, usb_rcvintpipe(udev, BCM203X_IN_EP), data->buffer, 32, bcm203x_complete, data, 1); data->state = BCM203X_CHECK_MEMORY; if (usb_submit_urb(data->urb, GFP_ATOMIC) < 0) BT_ERR("Can't submit URB"); break; case BCM203X_CHECK_MEMORY: if (data->buffer[0] != '#') { BT_ERR("Memory select failed"); data->state = BCM203X_ERROR; break; } data->state = BCM203X_LOAD_FIRMWARE; fallthrough; case BCM203X_LOAD_FIRMWARE: if (data->fw_sent == data->fw_size) { usb_fill_int_urb(urb, udev, usb_rcvintpipe(udev, BCM203X_IN_EP), data->buffer, 32, bcm203x_complete, data, 1); data->state = BCM203X_CHECK_FIRMWARE; } else { len = min_t(uint, data->fw_size - data->fw_sent, 4096); usb_fill_bulk_urb(urb, udev, usb_sndbulkpipe(udev, BCM203X_OUT_EP), data->fw_data + data->fw_sent, len, bcm203x_complete, data); data->fw_sent += len; } if (usb_submit_urb(data->urb, GFP_ATOMIC) < 0) BT_ERR("Can't submit URB"); break; case BCM203X_CHECK_FIRMWARE: if (data->buffer[0] != '.') { BT_ERR("Firmware loading failed"); data->state = BCM203X_ERROR; break; } data->state = BCM203X_RESET; break; } } static void bcm203x_work(struct work_struct *work) { struct bcm203x_data *data = container_of(work, struct bcm203x_data, work); if (atomic_read(&data->shutdown)) return; if (usb_submit_urb(data->urb, GFP_KERNEL) < 0) BT_ERR("Can't submit URB"); } static int bcm203x_probe(struct usb_interface *intf, const struct usb_device_id *id) { const struct firmware *firmware; struct usb_device *udev = interface_to_usbdev(intf); struct bcm203x_data *data; int size; BT_DBG("intf %p id %p", intf, id); if (intf->cur_altsetting->desc.bInterfaceNumber != 0) return -ENODEV; data = devm_kzalloc(&intf->dev, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; data->udev = udev; data->state = BCM203X_LOAD_MINIDRV; data->urb = usb_alloc_urb(0, GFP_KERNEL); if (!data->urb) return -ENOMEM; if (request_firmware(&firmware, "BCM2033-MD.hex", &udev->dev) < 0) { BT_ERR("Mini driver request failed"); usb_free_urb(data->urb); return -EIO; } BT_DBG("minidrv data %p size %zu", firmware->data, firmware->size); size = max_t(uint, firmware->size, 4096); data->buffer = kmalloc(size, GFP_KERNEL); if (!data->buffer) { BT_ERR("Can't allocate memory for mini driver"); release_firmware(firmware); usb_free_urb(data->urb); return -ENOMEM; } memcpy(data->buffer, firmware->data, firmware->size); usb_fill_bulk_urb(data->urb, udev, usb_sndbulkpipe(udev, BCM203X_OUT_EP), data->buffer, firmware->size, bcm203x_complete, data); release_firmware(firmware); if (request_firmware(&firmware, "BCM2033-FW.bin", &udev->dev) < 0) { BT_ERR("Firmware request failed"); usb_free_urb(data->urb); kfree(data->buffer); return -EIO; } BT_DBG("firmware data %p size %zu", firmware->data, firmware->size); data->fw_data = kmemdup(firmware->data, firmware->size, GFP_KERNEL); if (!data->fw_data) { BT_ERR("Can't allocate memory for firmware image"); release_firmware(firmware); usb_free_urb(data->urb); kfree(data->buffer); return -ENOMEM; } data->fw_size = firmware->size; data->fw_sent = 0; release_firmware(firmware); INIT_WORK(&data->work, bcm203x_work); usb_set_intfdata(intf, data); /* use workqueue to have a small delay */ schedule_work(&data->work); return 0; } static void bcm203x_disconnect(struct usb_interface *intf) { struct bcm203x_data *data = usb_get_intfdata(intf); BT_DBG("intf %p", intf); atomic_inc(&data->shutdown); cancel_work_sync(&data->work); usb_kill_urb(data->urb); usb_set_intfdata(intf, NULL); usb_free_urb(data->urb); kfree(data->fw_data); kfree(data->buffer); } static struct usb_driver bcm203x_driver = { .name = "bcm203x", .probe = bcm203x_probe, .disconnect = bcm203x_disconnect, .id_table = bcm203x_table, .disable_hub_initiated_lpm = 1, }; module_usb_driver(bcm203x_driver); MODULE_AUTHOR("Marcel Holtmann <[email protected]>"); MODULE_DESCRIPTION("Broadcom Blutonium firmware driver ver " VERSION); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL"); MODULE_FIRMWARE("BCM2033-MD.hex"); MODULE_FIRMWARE("BCM2033-FW.bin");
linux-master
drivers/bluetooth/bcm203x.c
// SPDX-License-Identifier: GPL-2.0 // Copyright (c) 2019 MediaTek Inc. /* * Bluetooth support for MediaTek SDIO devices * * This file is written based on btsdio.c and btmtkuart.c. * * Author: Sean Wang <[email protected]> * */ #include <asm/unaligned.h> #include <linux/atomic.h> #include <linux/gpio/consumer.h> #include <linux/init.h> #include <linux/iopoll.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> #include <linux/pm_runtime.h> #include <linux/skbuff.h> #include <linux/mmc/host.h> #include <linux/mmc/sdio_ids.h> #include <linux/mmc/sdio_func.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include "h4_recv.h" #include "btmtk.h" #define VERSION "0.1" #define MTKBTSDIO_AUTOSUSPEND_DELAY 1000 static bool enable_autosuspend = true; struct btmtksdio_data { const char *fwname; u16 chipid; bool lp_mbox_supported; }; static const struct btmtksdio_data mt7663_data = { .fwname = FIRMWARE_MT7663, .chipid = 0x7663, .lp_mbox_supported = false, }; static const struct btmtksdio_data mt7668_data = { .fwname = FIRMWARE_MT7668, .chipid = 0x7668, .lp_mbox_supported = false, }; static const struct btmtksdio_data mt7921_data = { .fwname = FIRMWARE_MT7961, .chipid = 0x7921, .lp_mbox_supported = true, }; static const struct sdio_device_id btmtksdio_table[] = { {SDIO_DEVICE(SDIO_VENDOR_ID_MEDIATEK, SDIO_DEVICE_ID_MEDIATEK_MT7663), .driver_data = (kernel_ulong_t)&mt7663_data }, {SDIO_DEVICE(SDIO_VENDOR_ID_MEDIATEK, SDIO_DEVICE_ID_MEDIATEK_MT7668), .driver_data = (kernel_ulong_t)&mt7668_data }, {SDIO_DEVICE(SDIO_VENDOR_ID_MEDIATEK, SDIO_DEVICE_ID_MEDIATEK_MT7961), .driver_data = (kernel_ulong_t)&mt7921_data }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(sdio, btmtksdio_table); #define MTK_REG_CHLPCR 0x4 /* W1S */ #define C_INT_EN_SET BIT(0) #define C_INT_EN_CLR BIT(1) #define C_FW_OWN_REQ_SET BIT(8) /* For write */ #define C_COM_DRV_OWN BIT(8) /* For read */ #define C_FW_OWN_REQ_CLR BIT(9) #define MTK_REG_CSDIOCSR 0x8 #define SDIO_RE_INIT_EN BIT(0) #define SDIO_INT_CTL BIT(2) #define MTK_REG_CHCR 0xc #define C_INT_CLR_CTRL BIT(1) #define BT_RST_DONE BIT(8) /* CHISR have the same bits field definition with CHIER */ #define MTK_REG_CHISR 0x10 #define MTK_REG_CHIER 0x14 #define FW_OWN_BACK_INT BIT(0) #define RX_DONE_INT BIT(1) #define TX_EMPTY BIT(2) #define TX_FIFO_OVERFLOW BIT(8) #define FW_MAILBOX_INT BIT(15) #define INT_MASK GENMASK(15, 0) #define RX_PKT_LEN GENMASK(31, 16) #define MTK_REG_CSICR 0xc0 #define CSICR_CLR_MBOX_ACK BIT(0) #define MTK_REG_PH2DSM0R 0xc4 #define PH2DSM0R_DRIVER_OWN BIT(0) #define MTK_REG_PD2HRM0R 0xdc #define PD2HRM0R_DRV_OWN BIT(0) #define MTK_REG_CTDR 0x18 #define MTK_REG_CRDR 0x1c #define MTK_REG_CRPLR 0x24 #define MTK_SDIO_BLOCK_SIZE 256 #define BTMTKSDIO_TX_WAIT_VND_EVT 1 #define BTMTKSDIO_HW_TX_READY 2 #define BTMTKSDIO_FUNC_ENABLED 3 #define BTMTKSDIO_PATCH_ENABLED 4 #define BTMTKSDIO_HW_RESET_ACTIVE 5 struct mtkbtsdio_hdr { __le16 len; __le16 reserved; u8 bt_type; } __packed; struct btmtksdio_dev { struct hci_dev *hdev; struct sdio_func *func; struct device *dev; struct work_struct txrx_work; unsigned long tx_state; struct sk_buff_head txq; struct sk_buff *evt_skb; const struct btmtksdio_data *data; struct gpio_desc *reset; }; static int mtk_hci_wmt_sync(struct hci_dev *hdev, struct btmtk_hci_wmt_params *wmt_params) { struct btmtksdio_dev *bdev = hci_get_drvdata(hdev); struct btmtk_hci_wmt_evt_funcc *wmt_evt_funcc; struct btmtk_hci_wmt_evt_reg *wmt_evt_reg; u32 hlen, status = BTMTK_WMT_INVALID; struct btmtk_hci_wmt_evt *wmt_evt; struct btmtk_hci_wmt_cmd *wc; struct btmtk_wmt_hdr *hdr; int err; /* Send the WMT command and wait until the WMT event returns */ hlen = sizeof(*hdr) + wmt_params->dlen; if (hlen > 255) return -EINVAL; wc = kzalloc(hlen, GFP_KERNEL); if (!wc) return -ENOMEM; hdr = &wc->hdr; hdr->dir = 1; hdr->op = wmt_params->op; hdr->dlen = cpu_to_le16(wmt_params->dlen + 1); hdr->flag = wmt_params->flag; memcpy(wc->data, wmt_params->data, wmt_params->dlen); set_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state); err = __hci_cmd_send(hdev, 0xfc6f, hlen, wc); if (err < 0) { clear_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state); goto err_free_wc; } /* The vendor specific WMT commands are all answered by a vendor * specific event and will not have the Command Status or Command * Complete as with usual HCI command flow control. * * After sending the command, wait for BTMTKSDIO_TX_WAIT_VND_EVT * state to be cleared. The driver specific event receive routine * will clear that state and with that indicate completion of the * WMT command. */ err = wait_on_bit_timeout(&bdev->tx_state, BTMTKSDIO_TX_WAIT_VND_EVT, TASK_INTERRUPTIBLE, HCI_INIT_TIMEOUT); if (err == -EINTR) { bt_dev_err(hdev, "Execution of wmt command interrupted"); clear_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state); goto err_free_wc; } if (err) { bt_dev_err(hdev, "Execution of wmt command timed out"); clear_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state); err = -ETIMEDOUT; goto err_free_wc; } /* Parse and handle the return WMT event */ wmt_evt = (struct btmtk_hci_wmt_evt *)bdev->evt_skb->data; if (wmt_evt->whdr.op != hdr->op) { bt_dev_err(hdev, "Wrong op received %d expected %d", wmt_evt->whdr.op, hdr->op); err = -EIO; goto err_free_skb; } switch (wmt_evt->whdr.op) { case BTMTK_WMT_SEMAPHORE: if (wmt_evt->whdr.flag == 2) status = BTMTK_WMT_PATCH_UNDONE; else status = BTMTK_WMT_PATCH_DONE; break; case BTMTK_WMT_FUNC_CTRL: wmt_evt_funcc = (struct btmtk_hci_wmt_evt_funcc *)wmt_evt; if (be16_to_cpu(wmt_evt_funcc->status) == 0x404) status = BTMTK_WMT_ON_DONE; else if (be16_to_cpu(wmt_evt_funcc->status) == 0x420) status = BTMTK_WMT_ON_PROGRESS; else status = BTMTK_WMT_ON_UNDONE; break; case BTMTK_WMT_PATCH_DWNLD: if (wmt_evt->whdr.flag == 2) status = BTMTK_WMT_PATCH_DONE; else if (wmt_evt->whdr.flag == 1) status = BTMTK_WMT_PATCH_PROGRESS; else status = BTMTK_WMT_PATCH_UNDONE; break; case BTMTK_WMT_REGISTER: wmt_evt_reg = (struct btmtk_hci_wmt_evt_reg *)wmt_evt; if (le16_to_cpu(wmt_evt->whdr.dlen) == 12) status = le32_to_cpu(wmt_evt_reg->val); break; } if (wmt_params->status) *wmt_params->status = status; err_free_skb: kfree_skb(bdev->evt_skb); bdev->evt_skb = NULL; err_free_wc: kfree(wc); return err; } static int btmtksdio_tx_packet(struct btmtksdio_dev *bdev, struct sk_buff *skb) { struct mtkbtsdio_hdr *sdio_hdr; int err; /* Make sure that there are enough rooms for SDIO header */ if (unlikely(skb_headroom(skb) < sizeof(*sdio_hdr))) { err = pskb_expand_head(skb, sizeof(*sdio_hdr), 0, GFP_ATOMIC); if (err < 0) return err; } /* Prepend MediaTek SDIO Specific Header */ skb_push(skb, sizeof(*sdio_hdr)); sdio_hdr = (void *)skb->data; sdio_hdr->len = cpu_to_le16(skb->len); sdio_hdr->reserved = cpu_to_le16(0); sdio_hdr->bt_type = hci_skb_pkt_type(skb); clear_bit(BTMTKSDIO_HW_TX_READY, &bdev->tx_state); err = sdio_writesb(bdev->func, MTK_REG_CTDR, skb->data, round_up(skb->len, MTK_SDIO_BLOCK_SIZE)); if (err < 0) goto err_skb_pull; bdev->hdev->stat.byte_tx += skb->len; kfree_skb(skb); return 0; err_skb_pull: skb_pull(skb, sizeof(*sdio_hdr)); return err; } static u32 btmtksdio_drv_own_query(struct btmtksdio_dev *bdev) { return sdio_readl(bdev->func, MTK_REG_CHLPCR, NULL); } static u32 btmtksdio_drv_own_query_79xx(struct btmtksdio_dev *bdev) { return sdio_readl(bdev->func, MTK_REG_PD2HRM0R, NULL); } static u32 btmtksdio_chcr_query(struct btmtksdio_dev *bdev) { return sdio_readl(bdev->func, MTK_REG_CHCR, NULL); } static int btmtksdio_fw_pmctrl(struct btmtksdio_dev *bdev) { u32 status; int err; sdio_claim_host(bdev->func); if (bdev->data->lp_mbox_supported && test_bit(BTMTKSDIO_PATCH_ENABLED, &bdev->tx_state)) { sdio_writel(bdev->func, CSICR_CLR_MBOX_ACK, MTK_REG_CSICR, &err); err = readx_poll_timeout(btmtksdio_drv_own_query_79xx, bdev, status, !(status & PD2HRM0R_DRV_OWN), 2000, 1000000); if (err < 0) { bt_dev_err(bdev->hdev, "mailbox ACK not cleared"); goto out; } } /* Return ownership to the device */ sdio_writel(bdev->func, C_FW_OWN_REQ_SET, MTK_REG_CHLPCR, &err); if (err < 0) goto out; err = readx_poll_timeout(btmtksdio_drv_own_query, bdev, status, !(status & C_COM_DRV_OWN), 2000, 1000000); out: sdio_release_host(bdev->func); if (err < 0) bt_dev_err(bdev->hdev, "Cannot return ownership to device"); return err; } static int btmtksdio_drv_pmctrl(struct btmtksdio_dev *bdev) { u32 status; int err; sdio_claim_host(bdev->func); /* Get ownership from the device */ sdio_writel(bdev->func, C_FW_OWN_REQ_CLR, MTK_REG_CHLPCR, &err); if (err < 0) goto out; err = readx_poll_timeout(btmtksdio_drv_own_query, bdev, status, status & C_COM_DRV_OWN, 2000, 1000000); if (!err && bdev->data->lp_mbox_supported && test_bit(BTMTKSDIO_PATCH_ENABLED, &bdev->tx_state)) err = readx_poll_timeout(btmtksdio_drv_own_query_79xx, bdev, status, status & PD2HRM0R_DRV_OWN, 2000, 1000000); out: sdio_release_host(bdev->func); if (err < 0) bt_dev_err(bdev->hdev, "Cannot get ownership from device"); return err; } static int btmtksdio_recv_event(struct hci_dev *hdev, struct sk_buff *skb) { struct btmtksdio_dev *bdev = hci_get_drvdata(hdev); struct hci_event_hdr *hdr = (void *)skb->data; u8 evt = hdr->evt; int err; /* When someone waits for the WMT event, the skb is being cloned * and being processed the events from there then. */ if (test_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state)) { bdev->evt_skb = skb_clone(skb, GFP_KERNEL); if (!bdev->evt_skb) { err = -ENOMEM; goto err_out; } } err = hci_recv_frame(hdev, skb); if (err < 0) goto err_free_skb; if (evt == HCI_EV_WMT) { if (test_and_clear_bit(BTMTKSDIO_TX_WAIT_VND_EVT, &bdev->tx_state)) { /* Barrier to sync with other CPUs */ smp_mb__after_atomic(); wake_up_bit(&bdev->tx_state, BTMTKSDIO_TX_WAIT_VND_EVT); } } return 0; err_free_skb: kfree_skb(bdev->evt_skb); bdev->evt_skb = NULL; err_out: return err; } static int btmtksdio_recv_acl(struct hci_dev *hdev, struct sk_buff *skb) { struct btmtksdio_dev *bdev = hci_get_drvdata(hdev); u16 handle = le16_to_cpu(hci_acl_hdr(skb)->handle); switch (handle) { case 0xfc6f: /* Firmware dump from device: when the firmware hangs, the * device can no longer suspend and thus disable auto-suspend. */ pm_runtime_forbid(bdev->dev); fallthrough; case 0x05ff: case 0x05fe: /* Firmware debug logging */ return hci_recv_diag(hdev, skb); } return hci_recv_frame(hdev, skb); } static const struct h4_recv_pkt mtk_recv_pkts[] = { { H4_RECV_ACL, .recv = btmtksdio_recv_acl }, { H4_RECV_SCO, .recv = hci_recv_frame }, { H4_RECV_EVENT, .recv = btmtksdio_recv_event }, }; static int btmtksdio_rx_packet(struct btmtksdio_dev *bdev, u16 rx_size) { const struct h4_recv_pkt *pkts = mtk_recv_pkts; int pkts_count = ARRAY_SIZE(mtk_recv_pkts); struct mtkbtsdio_hdr *sdio_hdr; int err, i, pad_size; struct sk_buff *skb; u16 dlen; if (rx_size < sizeof(*sdio_hdr)) return -EILSEQ; /* A SDIO packet is exactly containing a Bluetooth packet */ skb = bt_skb_alloc(rx_size, GFP_KERNEL); if (!skb) return -ENOMEM; skb_put(skb, rx_size); err = sdio_readsb(bdev->func, skb->data, MTK_REG_CRDR, rx_size); if (err < 0) goto err_kfree_skb; sdio_hdr = (void *)skb->data; /* We assume the default error as -EILSEQ simply to make the error path * be cleaner. */ err = -EILSEQ; if (rx_size != le16_to_cpu(sdio_hdr->len)) { bt_dev_err(bdev->hdev, "Rx size in sdio header is mismatched "); goto err_kfree_skb; } hci_skb_pkt_type(skb) = sdio_hdr->bt_type; /* Remove MediaTek SDIO header */ skb_pull(skb, sizeof(*sdio_hdr)); /* We have to dig into the packet to get payload size and then know how * many padding bytes at the tail, these padding bytes should be removed * before the packet is indicated to the core layer. */ for (i = 0; i < pkts_count; i++) { if (sdio_hdr->bt_type == (&pkts[i])->type) break; } if (i >= pkts_count) { bt_dev_err(bdev->hdev, "Invalid bt type 0x%02x", sdio_hdr->bt_type); goto err_kfree_skb; } /* Remaining bytes cannot hold a header*/ if (skb->len < (&pkts[i])->hlen) { bt_dev_err(bdev->hdev, "The size of bt header is mismatched"); goto err_kfree_skb; } switch ((&pkts[i])->lsize) { case 1: dlen = skb->data[(&pkts[i])->loff]; break; case 2: dlen = get_unaligned_le16(skb->data + (&pkts[i])->loff); break; default: goto err_kfree_skb; } pad_size = skb->len - (&pkts[i])->hlen - dlen; /* Remaining bytes cannot hold a payload */ if (pad_size < 0) { bt_dev_err(bdev->hdev, "The size of bt payload is mismatched"); goto err_kfree_skb; } /* Remove padding bytes */ skb_trim(skb, skb->len - pad_size); /* Complete frame */ (&pkts[i])->recv(bdev->hdev, skb); bdev->hdev->stat.byte_rx += rx_size; return 0; err_kfree_skb: kfree_skb(skb); return err; } static void btmtksdio_txrx_work(struct work_struct *work) { struct btmtksdio_dev *bdev = container_of(work, struct btmtksdio_dev, txrx_work); unsigned long txrx_timeout; u32 int_status, rx_size; struct sk_buff *skb; int err; pm_runtime_get_sync(bdev->dev); sdio_claim_host(bdev->func); /* Disable interrupt */ sdio_writel(bdev->func, C_INT_EN_CLR, MTK_REG_CHLPCR, 0); txrx_timeout = jiffies + 5 * HZ; do { int_status = sdio_readl(bdev->func, MTK_REG_CHISR, NULL); /* Ack an interrupt as soon as possible before any operation on * hardware. * * Note that we don't ack any status during operations to avoid race * condition between the host and the device such as it's possible to * mistakenly ack RX_DONE for the next packet and then cause interrupts * not be raised again but there is still pending data in the hardware * FIFO. */ sdio_writel(bdev->func, int_status, MTK_REG_CHISR, NULL); int_status &= INT_MASK; if ((int_status & FW_MAILBOX_INT) && bdev->data->chipid == 0x7921) { sdio_writel(bdev->func, PH2DSM0R_DRIVER_OWN, MTK_REG_PH2DSM0R, 0); } if (int_status & FW_OWN_BACK_INT) bt_dev_dbg(bdev->hdev, "Get fw own back"); if (int_status & TX_EMPTY) set_bit(BTMTKSDIO_HW_TX_READY, &bdev->tx_state); else if (unlikely(int_status & TX_FIFO_OVERFLOW)) bt_dev_warn(bdev->hdev, "Tx fifo overflow"); if (test_bit(BTMTKSDIO_HW_TX_READY, &bdev->tx_state)) { skb = skb_dequeue(&bdev->txq); if (skb) { err = btmtksdio_tx_packet(bdev, skb); if (err < 0) { bdev->hdev->stat.err_tx++; skb_queue_head(&bdev->txq, skb); } } } if (int_status & RX_DONE_INT) { rx_size = sdio_readl(bdev->func, MTK_REG_CRPLR, NULL); rx_size = (rx_size & RX_PKT_LEN) >> 16; if (btmtksdio_rx_packet(bdev, rx_size) < 0) bdev->hdev->stat.err_rx++; } } while (int_status || time_is_before_jiffies(txrx_timeout)); /* Enable interrupt */ sdio_writel(bdev->func, C_INT_EN_SET, MTK_REG_CHLPCR, 0); sdio_release_host(bdev->func); pm_runtime_mark_last_busy(bdev->dev); pm_runtime_put_autosuspend(bdev->dev); } static void btmtksdio_interrupt(struct sdio_func *func) { struct btmtksdio_dev *bdev = sdio_get_drvdata(func); /* Disable interrupt */ sdio_writel(bdev->func, C_INT_EN_CLR, MTK_REG_CHLPCR, 0); schedule_work(&bdev->txrx_work); } static int btmtksdio_open(struct hci_dev *hdev) { struct btmtksdio_dev *bdev = hci_get_drvdata(hdev); u32 val; int err; sdio_claim_host(bdev->func); err = sdio_enable_func(bdev->func); if (err < 0) goto err_release_host; set_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state); err = btmtksdio_drv_pmctrl(bdev); if (err < 0) goto err_disable_func; /* Disable interrupt & mask out all interrupt sources */ sdio_writel(bdev->func, C_INT_EN_CLR, MTK_REG_CHLPCR, &err); if (err < 0) goto err_disable_func; sdio_writel(bdev->func, 0, MTK_REG_CHIER, &err); if (err < 0) goto err_disable_func; err = sdio_claim_irq(bdev->func, btmtksdio_interrupt); if (err < 0) goto err_disable_func; err = sdio_set_block_size(bdev->func, MTK_SDIO_BLOCK_SIZE); if (err < 0) goto err_release_irq; /* SDIO CMD 5 allows the SDIO device back to idle state an * synchronous interrupt is supported in SDIO 4-bit mode */ val = sdio_readl(bdev->func, MTK_REG_CSDIOCSR, &err); if (err < 0) goto err_release_irq; val |= SDIO_INT_CTL; sdio_writel(bdev->func, val, MTK_REG_CSDIOCSR, &err); if (err < 0) goto err_release_irq; /* Explitly set write-1-clear method */ val = sdio_readl(bdev->func, MTK_REG_CHCR, &err); if (err < 0) goto err_release_irq; val |= C_INT_CLR_CTRL; sdio_writel(bdev->func, val, MTK_REG_CHCR, &err); if (err < 0) goto err_release_irq; /* Setup interrupt sources */ sdio_writel(bdev->func, RX_DONE_INT | TX_EMPTY | TX_FIFO_OVERFLOW, MTK_REG_CHIER, &err); if (err < 0) goto err_release_irq; /* Enable interrupt */ sdio_writel(bdev->func, C_INT_EN_SET, MTK_REG_CHLPCR, &err); if (err < 0) goto err_release_irq; sdio_release_host(bdev->func); return 0; err_release_irq: sdio_release_irq(bdev->func); err_disable_func: sdio_disable_func(bdev->func); err_release_host: sdio_release_host(bdev->func); return err; } static int btmtksdio_close(struct hci_dev *hdev) { struct btmtksdio_dev *bdev = hci_get_drvdata(hdev); sdio_claim_host(bdev->func); /* Disable interrupt */ sdio_writel(bdev->func, C_INT_EN_CLR, MTK_REG_CHLPCR, NULL); sdio_release_irq(bdev->func); cancel_work_sync(&bdev->txrx_work); btmtksdio_fw_pmctrl(bdev); clear_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state); sdio_disable_func(bdev->func); sdio_release_host(bdev->func); return 0; } static int btmtksdio_flush(struct hci_dev *hdev) { struct btmtksdio_dev *bdev = hci_get_drvdata(hdev); skb_queue_purge(&bdev->txq); cancel_work_sync(&bdev->txrx_work); return 0; } static int btmtksdio_func_query(struct hci_dev *hdev) { struct btmtk_hci_wmt_params wmt_params; int status, err; u8 param = 0; /* Query whether the function is enabled */ wmt_params.op = BTMTK_WMT_FUNC_CTRL; wmt_params.flag = 4; wmt_params.dlen = sizeof(param); wmt_params.data = &param; wmt_params.status = &status; err = mtk_hci_wmt_sync(hdev, &wmt_params); if (err < 0) { bt_dev_err(hdev, "Failed to query function status (%d)", err); return err; } return status; } static int mt76xx_setup(struct hci_dev *hdev, const char *fwname) { struct btmtksdio_dev *bdev = hci_get_drvdata(hdev); struct btmtk_hci_wmt_params wmt_params; struct btmtk_tci_sleep tci_sleep; struct sk_buff *skb; int err, status; u8 param = 0x1; /* Query whether the firmware is already download */ wmt_params.op = BTMTK_WMT_SEMAPHORE; wmt_params.flag = 1; wmt_params.dlen = 0; wmt_params.data = NULL; wmt_params.status = &status; err = mtk_hci_wmt_sync(hdev, &wmt_params); if (err < 0) { bt_dev_err(hdev, "Failed to query firmware status (%d)", err); return err; } if (status == BTMTK_WMT_PATCH_DONE) { bt_dev_info(hdev, "Firmware already downloaded"); goto ignore_setup_fw; } /* Setup a firmware which the device definitely requires */ err = btmtk_setup_firmware(hdev, fwname, mtk_hci_wmt_sync); if (err < 0) return err; ignore_setup_fw: /* Query whether the device is already enabled */ err = readx_poll_timeout(btmtksdio_func_query, hdev, status, status < 0 || status != BTMTK_WMT_ON_PROGRESS, 2000, 5000000); /* -ETIMEDOUT happens */ if (err < 0) return err; /* The other errors happen in btusb_mtk_func_query */ if (status < 0) return status; if (status == BTMTK_WMT_ON_DONE) { bt_dev_info(hdev, "function already on"); goto ignore_func_on; } /* Enable Bluetooth protocol */ wmt_params.op = BTMTK_WMT_FUNC_CTRL; wmt_params.flag = 0; wmt_params.dlen = sizeof(param); wmt_params.data = &param; wmt_params.status = NULL; err = mtk_hci_wmt_sync(hdev, &wmt_params); if (err < 0) { bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err); return err; } set_bit(BTMTKSDIO_PATCH_ENABLED, &bdev->tx_state); ignore_func_on: /* Apply the low power environment setup */ tci_sleep.mode = 0x5; tci_sleep.duration = cpu_to_le16(0x640); tci_sleep.host_duration = cpu_to_le16(0x640); tci_sleep.host_wakeup_pin = 0; tci_sleep.time_compensation = 0; skb = __hci_cmd_sync(hdev, 0xfc7a, sizeof(tci_sleep), &tci_sleep, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { err = PTR_ERR(skb); bt_dev_err(hdev, "Failed to apply low power setting (%d)", err); return err; } kfree_skb(skb); return 0; } static int mt79xx_setup(struct hci_dev *hdev, const char *fwname) { struct btmtksdio_dev *bdev = hci_get_drvdata(hdev); struct btmtk_hci_wmt_params wmt_params; u8 param = 0x1; int err; err = btmtk_setup_firmware_79xx(hdev, fwname, mtk_hci_wmt_sync); if (err < 0) { bt_dev_err(hdev, "Failed to setup 79xx firmware (%d)", err); return err; } err = btmtksdio_fw_pmctrl(bdev); if (err < 0) return err; err = btmtksdio_drv_pmctrl(bdev); if (err < 0) return err; /* Enable Bluetooth protocol */ wmt_params.op = BTMTK_WMT_FUNC_CTRL; wmt_params.flag = 0; wmt_params.dlen = sizeof(param); wmt_params.data = &param; wmt_params.status = NULL; err = mtk_hci_wmt_sync(hdev, &wmt_params); if (err < 0) { bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err); return err; } hci_set_msft_opcode(hdev, 0xFD30); hci_set_aosp_capable(hdev); set_bit(BTMTKSDIO_PATCH_ENABLED, &bdev->tx_state); return err; } static int btmtksdio_mtk_reg_read(struct hci_dev *hdev, u32 reg, u32 *val) { struct btmtk_hci_wmt_params wmt_params; struct reg_read_cmd reg_read = { .type = 1, .num = 1, }; u32 status; int err; reg_read.addr = cpu_to_le32(reg); wmt_params.op = BTMTK_WMT_REGISTER; wmt_params.flag = BTMTK_WMT_REG_READ; wmt_params.dlen = sizeof(reg_read); wmt_params.data = &reg_read; wmt_params.status = &status; err = mtk_hci_wmt_sync(hdev, &wmt_params); if (err < 0) { bt_dev_err(hdev, "Failed to read reg (%d)", err); return err; } *val = status; return err; } static int btmtksdio_mtk_reg_write(struct hci_dev *hdev, u32 reg, u32 val, u32 mask) { struct btmtk_hci_wmt_params wmt_params; const struct reg_write_cmd reg_write = { .type = 1, .num = 1, .addr = cpu_to_le32(reg), .data = cpu_to_le32(val), .mask = cpu_to_le32(mask), }; int err, status; wmt_params.op = BTMTK_WMT_REGISTER; wmt_params.flag = BTMTK_WMT_REG_WRITE; wmt_params.dlen = sizeof(reg_write); wmt_params.data = &reg_write; wmt_params.status = &status; err = mtk_hci_wmt_sync(hdev, &wmt_params); if (err < 0) bt_dev_err(hdev, "Failed to write reg (%d)", err); return err; } static int btmtksdio_get_data_path_id(struct hci_dev *hdev, __u8 *data_path_id) { /* uses 1 as data path id for all the usecases */ *data_path_id = 1; return 0; } static int btmtksdio_get_codec_config_data(struct hci_dev *hdev, __u8 link, struct bt_codec *codec, __u8 *ven_len, __u8 **ven_data) { int err = 0; if (!ven_data || !ven_len) return -EINVAL; *ven_len = 0; *ven_data = NULL; if (link != ESCO_LINK) { bt_dev_err(hdev, "Invalid link type(%u)", link); return -EINVAL; } *ven_data = kmalloc(sizeof(__u8), GFP_KERNEL); if (!*ven_data) { err = -ENOMEM; goto error; } /* supports only CVSD and mSBC offload codecs */ switch (codec->id) { case 0x02: **ven_data = 0x00; break; case 0x05: **ven_data = 0x01; break; default: err = -EINVAL; bt_dev_err(hdev, "Invalid codec id(%u)", codec->id); goto error; } /* codec and its capabilities are pre-defined to ids * preset id = 0x00 represents CVSD codec with sampling rate 8K * preset id = 0x01 represents mSBC codec with sampling rate 16K */ *ven_len = sizeof(__u8); return err; error: kfree(*ven_data); *ven_data = NULL; return err; } static int btmtksdio_sco_setting(struct hci_dev *hdev) { const struct btmtk_sco sco_setting = { .clock_config = 0x49, .channel_format_config = 0x80, }; struct sk_buff *skb; u32 val; int err; /* Enable SCO over I2S/PCM for MediaTek chipset */ skb = __hci_cmd_sync(hdev, 0xfc72, sizeof(sco_setting), &sco_setting, HCI_CMD_TIMEOUT); if (IS_ERR(skb)) return PTR_ERR(skb); kfree_skb(skb); err = btmtksdio_mtk_reg_read(hdev, MT7921_PINMUX_0, &val); if (err < 0) return err; val |= 0x11000000; err = btmtksdio_mtk_reg_write(hdev, MT7921_PINMUX_0, val, ~0); if (err < 0) return err; err = btmtksdio_mtk_reg_read(hdev, MT7921_PINMUX_1, &val); if (err < 0) return err; val |= 0x00000101; err = btmtksdio_mtk_reg_write(hdev, MT7921_PINMUX_1, val, ~0); if (err < 0) return err; hdev->get_data_path_id = btmtksdio_get_data_path_id; hdev->get_codec_config_data = btmtksdio_get_codec_config_data; return err; } static int btmtksdio_reset_setting(struct hci_dev *hdev) { int err; u32 val; err = btmtksdio_mtk_reg_read(hdev, MT7921_PINMUX_1, &val); if (err < 0) return err; val |= 0x20; /* set the pin (bit field 11:8) work as GPIO mode */ err = btmtksdio_mtk_reg_write(hdev, MT7921_PINMUX_1, val, ~0); if (err < 0) return err; err = btmtksdio_mtk_reg_read(hdev, MT7921_BTSYS_RST, &val); if (err < 0) return err; val |= MT7921_BTSYS_RST_WITH_GPIO; return btmtksdio_mtk_reg_write(hdev, MT7921_BTSYS_RST, val, ~0); } static int btmtksdio_setup(struct hci_dev *hdev) { struct btmtksdio_dev *bdev = hci_get_drvdata(hdev); ktime_t calltime, delta, rettime; unsigned long long duration; char fwname[64]; int err, dev_id; u32 fw_version = 0, val; calltime = ktime_get(); set_bit(BTMTKSDIO_HW_TX_READY, &bdev->tx_state); switch (bdev->data->chipid) { case 0x7921: if (test_bit(BTMTKSDIO_HW_RESET_ACTIVE, &bdev->tx_state)) { err = btmtksdio_mtk_reg_read(hdev, MT7921_DLSTATUS, &val); if (err < 0) return err; val &= ~BT_DL_STATE; err = btmtksdio_mtk_reg_write(hdev, MT7921_DLSTATUS, val, ~0); if (err < 0) return err; btmtksdio_fw_pmctrl(bdev); msleep(20); btmtksdio_drv_pmctrl(bdev); clear_bit(BTMTKSDIO_HW_RESET_ACTIVE, &bdev->tx_state); } err = btmtksdio_mtk_reg_read(hdev, 0x70010200, &dev_id); if (err < 0) { bt_dev_err(hdev, "Failed to get device id (%d)", err); return err; } err = btmtksdio_mtk_reg_read(hdev, 0x80021004, &fw_version); if (err < 0) { bt_dev_err(hdev, "Failed to get fw version (%d)", err); return err; } snprintf(fwname, sizeof(fwname), "mediatek/BT_RAM_CODE_MT%04x_1_%x_hdr.bin", dev_id & 0xffff, (fw_version & 0xff) + 1); err = mt79xx_setup(hdev, fwname); if (err < 0) return err; /* Enable SCO over I2S/PCM */ err = btmtksdio_sco_setting(hdev); if (err < 0) { bt_dev_err(hdev, "Failed to enable SCO setting (%d)", err); return err; } /* Enable WBS with mSBC codec */ set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks); /* Enable GPIO reset mechanism */ if (bdev->reset) { err = btmtksdio_reset_setting(hdev); if (err < 0) { bt_dev_err(hdev, "Failed to enable Reset setting (%d)", err); devm_gpiod_put(bdev->dev, bdev->reset); bdev->reset = NULL; } } /* Valid LE States quirk for MediaTek 7921 */ set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks); break; case 0x7663: case 0x7668: err = mt76xx_setup(hdev, bdev->data->fwname); if (err < 0) return err; break; default: return -ENODEV; } rettime = ktime_get(); delta = ktime_sub(rettime, calltime); duration = (unsigned long long)ktime_to_ns(delta) >> 10; pm_runtime_set_autosuspend_delay(bdev->dev, MTKBTSDIO_AUTOSUSPEND_DELAY); pm_runtime_use_autosuspend(bdev->dev); err = pm_runtime_set_active(bdev->dev); if (err < 0) return err; /* Default forbid runtime auto suspend, that can be allowed by * enable_autosuspend flag or the PM runtime entry under sysfs. */ pm_runtime_forbid(bdev->dev); pm_runtime_enable(bdev->dev); if (enable_autosuspend) pm_runtime_allow(bdev->dev); bt_dev_info(hdev, "Device setup in %llu usecs", duration); return 0; } static int btmtksdio_shutdown(struct hci_dev *hdev) { struct btmtksdio_dev *bdev = hci_get_drvdata(hdev); struct btmtk_hci_wmt_params wmt_params; u8 param = 0x0; int err; /* Get back the state to be consistent with the state * in btmtksdio_setup. */ pm_runtime_get_sync(bdev->dev); /* wmt command only works until the reset is complete */ if (test_bit(BTMTKSDIO_HW_RESET_ACTIVE, &bdev->tx_state)) goto ignore_wmt_cmd; /* Disable the device */ wmt_params.op = BTMTK_WMT_FUNC_CTRL; wmt_params.flag = 0; wmt_params.dlen = sizeof(param); wmt_params.data = &param; wmt_params.status = NULL; err = mtk_hci_wmt_sync(hdev, &wmt_params); if (err < 0) { bt_dev_err(hdev, "Failed to send wmt func ctrl (%d)", err); return err; } ignore_wmt_cmd: pm_runtime_put_noidle(bdev->dev); pm_runtime_disable(bdev->dev); return 0; } static int btmtksdio_send_frame(struct hci_dev *hdev, struct sk_buff *skb) { struct btmtksdio_dev *bdev = hci_get_drvdata(hdev); switch (hci_skb_pkt_type(skb)) { case HCI_COMMAND_PKT: hdev->stat.cmd_tx++; break; case HCI_ACLDATA_PKT: hdev->stat.acl_tx++; break; case HCI_SCODATA_PKT: hdev->stat.sco_tx++; break; default: return -EILSEQ; } skb_queue_tail(&bdev->txq, skb); schedule_work(&bdev->txrx_work); return 0; } static void btmtksdio_cmd_timeout(struct hci_dev *hdev) { struct btmtksdio_dev *bdev = hci_get_drvdata(hdev); u32 status; int err; if (!bdev->reset || bdev->data->chipid != 0x7921) return; pm_runtime_get_sync(bdev->dev); if (test_and_set_bit(BTMTKSDIO_HW_RESET_ACTIVE, &bdev->tx_state)) return; sdio_claim_host(bdev->func); sdio_writel(bdev->func, C_INT_EN_CLR, MTK_REG_CHLPCR, NULL); skb_queue_purge(&bdev->txq); cancel_work_sync(&bdev->txrx_work); gpiod_set_value_cansleep(bdev->reset, 1); msleep(100); gpiod_set_value_cansleep(bdev->reset, 0); err = readx_poll_timeout(btmtksdio_chcr_query, bdev, status, status & BT_RST_DONE, 100000, 2000000); if (err < 0) { bt_dev_err(hdev, "Failed to reset (%d)", err); goto err; } clear_bit(BTMTKSDIO_PATCH_ENABLED, &bdev->tx_state); err: sdio_release_host(bdev->func); pm_runtime_put_noidle(bdev->dev); pm_runtime_disable(bdev->dev); hci_reset_dev(hdev); } static bool btmtksdio_sdio_inband_wakeup(struct hci_dev *hdev) { struct btmtksdio_dev *bdev = hci_get_drvdata(hdev); return device_may_wakeup(bdev->dev); } static bool btmtksdio_sdio_wakeup(struct hci_dev *hdev) { struct btmtksdio_dev *bdev = hci_get_drvdata(hdev); bool may_wakeup = device_may_wakeup(bdev->dev); const struct btmtk_wakeon bt_awake = { .mode = 0x1, .gpo = 0, .active_high = 0x1, .enable_delay = cpu_to_le16(0xc80), .wakeup_delay = cpu_to_le16(0x20), }; if (may_wakeup && bdev->data->chipid == 0x7921) { struct sk_buff *skb; skb = __hci_cmd_sync(hdev, 0xfc27, sizeof(bt_awake), &bt_awake, HCI_CMD_TIMEOUT); if (IS_ERR(skb)) may_wakeup = false; else kfree_skb(skb); } return may_wakeup; } static int btmtksdio_probe(struct sdio_func *func, const struct sdio_device_id *id) { struct btmtksdio_dev *bdev; struct hci_dev *hdev; int err; bdev = devm_kzalloc(&func->dev, sizeof(*bdev), GFP_KERNEL); if (!bdev) return -ENOMEM; bdev->data = (void *)id->driver_data; if (!bdev->data) return -ENODEV; bdev->dev = &func->dev; bdev->func = func; INIT_WORK(&bdev->txrx_work, btmtksdio_txrx_work); skb_queue_head_init(&bdev->txq); /* Initialize and register HCI device */ hdev = hci_alloc_dev(); if (!hdev) { dev_err(&func->dev, "Can't allocate HCI device\n"); return -ENOMEM; } bdev->hdev = hdev; hdev->bus = HCI_SDIO; hci_set_drvdata(hdev, bdev); hdev->open = btmtksdio_open; hdev->close = btmtksdio_close; hdev->cmd_timeout = btmtksdio_cmd_timeout; hdev->flush = btmtksdio_flush; hdev->setup = btmtksdio_setup; hdev->shutdown = btmtksdio_shutdown; hdev->send = btmtksdio_send_frame; hdev->wakeup = btmtksdio_sdio_wakeup; /* * If SDIO controller supports wake on Bluetooth, sending a wakeon * command is not necessary. */ if (device_can_wakeup(func->card->host->parent)) hdev->wakeup = btmtksdio_sdio_inband_wakeup; else hdev->wakeup = btmtksdio_sdio_wakeup; hdev->set_bdaddr = btmtk_set_bdaddr; SET_HCIDEV_DEV(hdev, &func->dev); hdev->manufacturer = 70; set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks); sdio_set_drvdata(func, bdev); err = hci_register_dev(hdev); if (err < 0) { dev_err(&func->dev, "Can't register HCI device\n"); hci_free_dev(hdev); return err; } /* pm_runtime_enable would be done after the firmware is being * downloaded because the core layer probably already enables * runtime PM for this func such as the case host->caps & * MMC_CAP_POWER_OFF_CARD. */ if (pm_runtime_enabled(bdev->dev)) pm_runtime_disable(bdev->dev); /* As explaination in drivers/mmc/core/sdio_bus.c tells us: * Unbound SDIO functions are always suspended. * During probe, the function is set active and the usage count * is incremented. If the driver supports runtime PM, * it should call pm_runtime_put_noidle() in its probe routine and * pm_runtime_get_noresume() in its remove routine. * * So, put a pm_runtime_put_noidle here ! */ pm_runtime_put_noidle(bdev->dev); err = device_init_wakeup(bdev->dev, true); if (err) bt_dev_err(hdev, "failed to initialize device wakeup"); bdev->dev->of_node = of_find_compatible_node(NULL, NULL, "mediatek,mt7921s-bluetooth"); bdev->reset = devm_gpiod_get_optional(bdev->dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(bdev->reset)) err = PTR_ERR(bdev->reset); return err; } static void btmtksdio_remove(struct sdio_func *func) { struct btmtksdio_dev *bdev = sdio_get_drvdata(func); struct hci_dev *hdev; if (!bdev) return; /* Be consistent the state in btmtksdio_probe */ pm_runtime_get_noresume(bdev->dev); hdev = bdev->hdev; sdio_set_drvdata(func, NULL); hci_unregister_dev(hdev); hci_free_dev(hdev); } #ifdef CONFIG_PM static int btmtksdio_runtime_suspend(struct device *dev) { struct sdio_func *func = dev_to_sdio_func(dev); struct btmtksdio_dev *bdev; int err; bdev = sdio_get_drvdata(func); if (!bdev) return 0; if (!test_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state)) return 0; sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); err = btmtksdio_fw_pmctrl(bdev); bt_dev_dbg(bdev->hdev, "status (%d) return ownership to device", err); return err; } static int btmtksdio_runtime_resume(struct device *dev) { struct sdio_func *func = dev_to_sdio_func(dev); struct btmtksdio_dev *bdev; int err; bdev = sdio_get_drvdata(func); if (!bdev) return 0; if (!test_bit(BTMTKSDIO_FUNC_ENABLED, &bdev->tx_state)) return 0; err = btmtksdio_drv_pmctrl(bdev); bt_dev_dbg(bdev->hdev, "status (%d) get ownership from device", err); return err; } static UNIVERSAL_DEV_PM_OPS(btmtksdio_pm_ops, btmtksdio_runtime_suspend, btmtksdio_runtime_resume, NULL); #define BTMTKSDIO_PM_OPS (&btmtksdio_pm_ops) #else /* CONFIG_PM */ #define BTMTKSDIO_PM_OPS NULL #endif /* CONFIG_PM */ static struct sdio_driver btmtksdio_driver = { .name = "btmtksdio", .probe = btmtksdio_probe, .remove = btmtksdio_remove, .id_table = btmtksdio_table, .drv = { .owner = THIS_MODULE, .pm = BTMTKSDIO_PM_OPS, } }; module_sdio_driver(btmtksdio_driver); module_param(enable_autosuspend, bool, 0644); MODULE_PARM_DESC(enable_autosuspend, "Enable autosuspend by default"); MODULE_AUTHOR("Sean Wang <[email protected]>"); MODULE_DESCRIPTION("MediaTek Bluetooth SDIO driver ver " VERSION); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL");
linux-master
drivers/bluetooth/btmtksdio.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * * Bluetooth HCI UART driver * * Copyright (C) 2002-2003 Fabrizio Gennari <[email protected]> * Copyright (C) 2004-2005 Marcel Holtmann <[email protected]> */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/types.h> #include <linux/fcntl.h> #include <linux/interrupt.h> #include <linux/ptrace.h> #include <linux/poll.h> #include <linux/slab.h> #include <linux/tty.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/signal.h> #include <linux/ioctl.h> #include <linux/skbuff.h> #include <linux/bitrev.h> #include <asm/unaligned.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include "hci_uart.h" static bool txcrc = true; static bool hciextn = true; #define BCSP_TXWINSIZE 4 #define BCSP_ACK_PKT 0x05 #define BCSP_LE_PKT 0x06 struct bcsp_struct { struct sk_buff_head unack; /* Unack'ed packets queue */ struct sk_buff_head rel; /* Reliable packets queue */ struct sk_buff_head unrel; /* Unreliable packets queue */ unsigned long rx_count; struct sk_buff *rx_skb; u8 rxseq_txack; /* rxseq == txack. */ u8 rxack; /* Last packet sent by us that the peer ack'ed */ struct timer_list tbcsp; struct hci_uart *hu; enum { BCSP_W4_PKT_DELIMITER, BCSP_W4_PKT_START, BCSP_W4_BCSP_HDR, BCSP_W4_DATA, BCSP_W4_CRC } rx_state; enum { BCSP_ESCSTATE_NOESC, BCSP_ESCSTATE_ESC } rx_esc_state; u8 use_crc; u16 message_crc; u8 txack_req; /* Do we need to send ack's to the peer? */ /* Reliable packet sequence number - used to assign seq to each rel pkt. */ u8 msgq_txseq; }; /* ---- BCSP CRC calculation ---- */ /* Table for calculating CRC for polynomial 0x1021, LSB processed first, * initial value 0xffff, bits shifted in reverse order. */ static const u16 crc_table[] = { 0x0000, 0x1081, 0x2102, 0x3183, 0x4204, 0x5285, 0x6306, 0x7387, 0x8408, 0x9489, 0xa50a, 0xb58b, 0xc60c, 0xd68d, 0xe70e, 0xf78f }; /* Initialise the crc calculator */ #define BCSP_CRC_INIT(x) x = 0xffff /* Update crc with next data byte * * Implementation note * The data byte is treated as two nibbles. The crc is generated * in reverse, i.e., bits are fed into the register from the top. */ static void bcsp_crc_update(u16 *crc, u8 d) { u16 reg = *crc; reg = (reg >> 4) ^ crc_table[(reg ^ d) & 0x000f]; reg = (reg >> 4) ^ crc_table[(reg ^ (d >> 4)) & 0x000f]; *crc = reg; } /* ---- BCSP core ---- */ static void bcsp_slip_msgdelim(struct sk_buff *skb) { const char pkt_delim = 0xc0; skb_put_data(skb, &pkt_delim, 1); } static void bcsp_slip_one_byte(struct sk_buff *skb, u8 c) { const char esc_c0[2] = { 0xdb, 0xdc }; const char esc_db[2] = { 0xdb, 0xdd }; switch (c) { case 0xc0: skb_put_data(skb, &esc_c0, 2); break; case 0xdb: skb_put_data(skb, &esc_db, 2); break; default: skb_put_data(skb, &c, 1); } } static int bcsp_enqueue(struct hci_uart *hu, struct sk_buff *skb) { struct bcsp_struct *bcsp = hu->priv; if (skb->len > 0xFFF) { BT_ERR("Packet too long"); kfree_skb(skb); return 0; } switch (hci_skb_pkt_type(skb)) { case HCI_ACLDATA_PKT: case HCI_COMMAND_PKT: skb_queue_tail(&bcsp->rel, skb); break; case HCI_SCODATA_PKT: skb_queue_tail(&bcsp->unrel, skb); break; default: BT_ERR("Unknown packet type"); kfree_skb(skb); break; } return 0; } static struct sk_buff *bcsp_prepare_pkt(struct bcsp_struct *bcsp, u8 *data, int len, int pkt_type) { struct sk_buff *nskb; u8 hdr[4], chan; u16 BCSP_CRC_INIT(bcsp_txmsg_crc); int rel, i; switch (pkt_type) { case HCI_ACLDATA_PKT: chan = 6; /* BCSP ACL channel */ rel = 1; /* reliable channel */ break; case HCI_COMMAND_PKT: chan = 5; /* BCSP cmd/evt channel */ rel = 1; /* reliable channel */ break; case HCI_SCODATA_PKT: chan = 7; /* BCSP SCO channel */ rel = 0; /* unreliable channel */ break; case BCSP_LE_PKT: chan = 1; /* BCSP LE channel */ rel = 0; /* unreliable channel */ break; case BCSP_ACK_PKT: chan = 0; /* BCSP internal channel */ rel = 0; /* unreliable channel */ break; default: BT_ERR("Unknown packet type"); return NULL; } if (hciextn && chan == 5) { __le16 opcode = ((struct hci_command_hdr *)data)->opcode; /* Vendor specific commands */ if (hci_opcode_ogf(__le16_to_cpu(opcode)) == 0x3f) { u8 desc = *(data + HCI_COMMAND_HDR_SIZE); if ((desc & 0xf0) == 0xc0) { data += HCI_COMMAND_HDR_SIZE + 1; len -= HCI_COMMAND_HDR_SIZE + 1; chan = desc & 0x0f; } } } /* Max len of packet: (original len +4(bcsp hdr) +2(crc))*2 * (because bytes 0xc0 and 0xdb are escaped, worst case is * when the packet is all made of 0xc0 and 0xdb :) ) * + 2 (0xc0 delimiters at start and end). */ nskb = alloc_skb((len + 6) * 2 + 2, GFP_ATOMIC); if (!nskb) return NULL; hci_skb_pkt_type(nskb) = pkt_type; bcsp_slip_msgdelim(nskb); hdr[0] = bcsp->rxseq_txack << 3; bcsp->txack_req = 0; BT_DBG("We request packet no %u to card", bcsp->rxseq_txack); if (rel) { hdr[0] |= 0x80 + bcsp->msgq_txseq; BT_DBG("Sending packet with seqno %u", bcsp->msgq_txseq); bcsp->msgq_txseq = (bcsp->msgq_txseq + 1) & 0x07; } if (bcsp->use_crc) hdr[0] |= 0x40; hdr[1] = ((len << 4) & 0xff) | chan; hdr[2] = len >> 4; hdr[3] = ~(hdr[0] + hdr[1] + hdr[2]); /* Put BCSP header */ for (i = 0; i < 4; i++) { bcsp_slip_one_byte(nskb, hdr[i]); if (bcsp->use_crc) bcsp_crc_update(&bcsp_txmsg_crc, hdr[i]); } /* Put payload */ for (i = 0; i < len; i++) { bcsp_slip_one_byte(nskb, data[i]); if (bcsp->use_crc) bcsp_crc_update(&bcsp_txmsg_crc, data[i]); } /* Put CRC */ if (bcsp->use_crc) { bcsp_txmsg_crc = bitrev16(bcsp_txmsg_crc); bcsp_slip_one_byte(nskb, (u8)((bcsp_txmsg_crc >> 8) & 0x00ff)); bcsp_slip_one_byte(nskb, (u8)(bcsp_txmsg_crc & 0x00ff)); } bcsp_slip_msgdelim(nskb); return nskb; } /* This is a rewrite of pkt_avail in ABCSP */ static struct sk_buff *bcsp_dequeue(struct hci_uart *hu) { struct bcsp_struct *bcsp = hu->priv; unsigned long flags; struct sk_buff *skb; /* First of all, check for unreliable messages in the queue, * since they have priority */ skb = skb_dequeue(&bcsp->unrel); if (skb != NULL) { struct sk_buff *nskb; nskb = bcsp_prepare_pkt(bcsp, skb->data, skb->len, hci_skb_pkt_type(skb)); if (nskb) { kfree_skb(skb); return nskb; } else { skb_queue_head(&bcsp->unrel, skb); BT_ERR("Could not dequeue pkt because alloc_skb failed"); } } /* Now, try to send a reliable pkt. We can only send a * reliable packet if the number of packets sent but not yet ack'ed * is < than the winsize */ spin_lock_irqsave_nested(&bcsp->unack.lock, flags, SINGLE_DEPTH_NESTING); if (bcsp->unack.qlen < BCSP_TXWINSIZE) { skb = skb_dequeue(&bcsp->rel); if (skb != NULL) { struct sk_buff *nskb; nskb = bcsp_prepare_pkt(bcsp, skb->data, skb->len, hci_skb_pkt_type(skb)); if (nskb) { __skb_queue_tail(&bcsp->unack, skb); mod_timer(&bcsp->tbcsp, jiffies + HZ / 4); spin_unlock_irqrestore(&bcsp->unack.lock, flags); return nskb; } else { skb_queue_head(&bcsp->rel, skb); BT_ERR("Could not dequeue pkt because alloc_skb failed"); } } } spin_unlock_irqrestore(&bcsp->unack.lock, flags); /* We could not send a reliable packet, either because there are * none or because there are too many unack'ed pkts. Did we receive * any packets we have not acknowledged yet ? */ if (bcsp->txack_req) { /* if so, craft an empty ACK pkt and send it on BCSP unreliable * channel 0 */ struct sk_buff *nskb = bcsp_prepare_pkt(bcsp, NULL, 0, BCSP_ACK_PKT); return nskb; } /* We have nothing to send */ return NULL; } static int bcsp_flush(struct hci_uart *hu) { BT_DBG("hu %p", hu); return 0; } /* Remove ack'ed packets */ static void bcsp_pkt_cull(struct bcsp_struct *bcsp) { struct sk_buff *skb, *tmp; unsigned long flags; int i, pkts_to_be_removed; u8 seqno; spin_lock_irqsave(&bcsp->unack.lock, flags); pkts_to_be_removed = skb_queue_len(&bcsp->unack); seqno = bcsp->msgq_txseq; while (pkts_to_be_removed) { if (bcsp->rxack == seqno) break; pkts_to_be_removed--; seqno = (seqno - 1) & 0x07; } if (bcsp->rxack != seqno) BT_ERR("Peer acked invalid packet"); BT_DBG("Removing %u pkts out of %u, up to seqno %u", pkts_to_be_removed, skb_queue_len(&bcsp->unack), (seqno - 1) & 0x07); i = 0; skb_queue_walk_safe(&bcsp->unack, skb, tmp) { if (i >= pkts_to_be_removed) break; i++; __skb_unlink(skb, &bcsp->unack); dev_kfree_skb_irq(skb); } if (skb_queue_empty(&bcsp->unack)) del_timer(&bcsp->tbcsp); spin_unlock_irqrestore(&bcsp->unack.lock, flags); if (i != pkts_to_be_removed) BT_ERR("Removed only %u out of %u pkts", i, pkts_to_be_removed); } /* Handle BCSP link-establishment packets. When we * detect a "sync" packet, symptom that the BT module has reset, * we do nothing :) (yet) */ static void bcsp_handle_le_pkt(struct hci_uart *hu) { struct bcsp_struct *bcsp = hu->priv; u8 conf_pkt[4] = { 0xad, 0xef, 0xac, 0xed }; u8 conf_rsp_pkt[4] = { 0xde, 0xad, 0xd0, 0xd0 }; u8 sync_pkt[4] = { 0xda, 0xdc, 0xed, 0xed }; /* spot "conf" pkts and reply with a "conf rsp" pkt */ if (bcsp->rx_skb->data[1] >> 4 == 4 && bcsp->rx_skb->data[2] == 0 && !memcmp(&bcsp->rx_skb->data[4], conf_pkt, 4)) { struct sk_buff *nskb = alloc_skb(4, GFP_ATOMIC); BT_DBG("Found a LE conf pkt"); if (!nskb) return; skb_put_data(nskb, conf_rsp_pkt, 4); hci_skb_pkt_type(nskb) = BCSP_LE_PKT; skb_queue_head(&bcsp->unrel, nskb); hci_uart_tx_wakeup(hu); } /* Spot "sync" pkts. If we find one...disaster! */ else if (bcsp->rx_skb->data[1] >> 4 == 4 && bcsp->rx_skb->data[2] == 0 && !memcmp(&bcsp->rx_skb->data[4], sync_pkt, 4)) { BT_ERR("Found a LE sync pkt, card has reset"); } } static inline void bcsp_unslip_one_byte(struct bcsp_struct *bcsp, unsigned char byte) { const u8 c0 = 0xc0, db = 0xdb; switch (bcsp->rx_esc_state) { case BCSP_ESCSTATE_NOESC: switch (byte) { case 0xdb: bcsp->rx_esc_state = BCSP_ESCSTATE_ESC; break; default: skb_put_data(bcsp->rx_skb, &byte, 1); if ((bcsp->rx_skb->data[0] & 0x40) != 0 && bcsp->rx_state != BCSP_W4_CRC) bcsp_crc_update(&bcsp->message_crc, byte); bcsp->rx_count--; } break; case BCSP_ESCSTATE_ESC: switch (byte) { case 0xdc: skb_put_data(bcsp->rx_skb, &c0, 1); if ((bcsp->rx_skb->data[0] & 0x40) != 0 && bcsp->rx_state != BCSP_W4_CRC) bcsp_crc_update(&bcsp->message_crc, 0xc0); bcsp->rx_esc_state = BCSP_ESCSTATE_NOESC; bcsp->rx_count--; break; case 0xdd: skb_put_data(bcsp->rx_skb, &db, 1); if ((bcsp->rx_skb->data[0] & 0x40) != 0 && bcsp->rx_state != BCSP_W4_CRC) bcsp_crc_update(&bcsp->message_crc, 0xdb); bcsp->rx_esc_state = BCSP_ESCSTATE_NOESC; bcsp->rx_count--; break; default: BT_ERR("Invalid byte %02x after esc byte", byte); kfree_skb(bcsp->rx_skb); bcsp->rx_skb = NULL; bcsp->rx_state = BCSP_W4_PKT_DELIMITER; bcsp->rx_count = 0; } } } static void bcsp_complete_rx_pkt(struct hci_uart *hu) { struct bcsp_struct *bcsp = hu->priv; int pass_up = 0; if (bcsp->rx_skb->data[0] & 0x80) { /* reliable pkt */ BT_DBG("Received seqno %u from card", bcsp->rxseq_txack); /* check the rx sequence number is as expected */ if ((bcsp->rx_skb->data[0] & 0x07) == bcsp->rxseq_txack) { bcsp->rxseq_txack++; bcsp->rxseq_txack %= 0x8; } else { /* handle re-transmitted packet or * when packet was missed */ BT_ERR("Out-of-order packet arrived, got %u expected %u", bcsp->rx_skb->data[0] & 0x07, bcsp->rxseq_txack); /* do not process out-of-order packet payload */ pass_up = 2; } /* send current txack value to all received reliable packets */ bcsp->txack_req = 1; /* If needed, transmit an ack pkt */ hci_uart_tx_wakeup(hu); } bcsp->rxack = (bcsp->rx_skb->data[0] >> 3) & 0x07; BT_DBG("Request for pkt %u from card", bcsp->rxack); /* handle received ACK indications, * including those from out-of-order packets */ bcsp_pkt_cull(bcsp); if (pass_up != 2) { if ((bcsp->rx_skb->data[1] & 0x0f) == 6 && (bcsp->rx_skb->data[0] & 0x80)) { hci_skb_pkt_type(bcsp->rx_skb) = HCI_ACLDATA_PKT; pass_up = 1; } else if ((bcsp->rx_skb->data[1] & 0x0f) == 5 && (bcsp->rx_skb->data[0] & 0x80)) { hci_skb_pkt_type(bcsp->rx_skb) = HCI_EVENT_PKT; pass_up = 1; } else if ((bcsp->rx_skb->data[1] & 0x0f) == 7) { hci_skb_pkt_type(bcsp->rx_skb) = HCI_SCODATA_PKT; pass_up = 1; } else if ((bcsp->rx_skb->data[1] & 0x0f) == 1 && !(bcsp->rx_skb->data[0] & 0x80)) { bcsp_handle_le_pkt(hu); pass_up = 0; } else { pass_up = 0; } } if (pass_up == 0) { struct hci_event_hdr hdr; u8 desc = (bcsp->rx_skb->data[1] & 0x0f); if (desc != 0 && desc != 1) { if (hciextn) { desc |= 0xc0; skb_pull(bcsp->rx_skb, 4); memcpy(skb_push(bcsp->rx_skb, 1), &desc, 1); hdr.evt = 0xff; hdr.plen = bcsp->rx_skb->len; memcpy(skb_push(bcsp->rx_skb, HCI_EVENT_HDR_SIZE), &hdr, HCI_EVENT_HDR_SIZE); hci_skb_pkt_type(bcsp->rx_skb) = HCI_EVENT_PKT; hci_recv_frame(hu->hdev, bcsp->rx_skb); } else { BT_ERR("Packet for unknown channel (%u %s)", bcsp->rx_skb->data[1] & 0x0f, bcsp->rx_skb->data[0] & 0x80 ? "reliable" : "unreliable"); kfree_skb(bcsp->rx_skb); } } else kfree_skb(bcsp->rx_skb); } else if (pass_up == 1) { /* Pull out BCSP hdr */ skb_pull(bcsp->rx_skb, 4); hci_recv_frame(hu->hdev, bcsp->rx_skb); } else { /* ignore packet payload of already ACKed re-transmitted * packets or when a packet was missed in the BCSP window */ kfree_skb(bcsp->rx_skb); } bcsp->rx_state = BCSP_W4_PKT_DELIMITER; bcsp->rx_skb = NULL; } static u16 bscp_get_crc(struct bcsp_struct *bcsp) { return get_unaligned_be16(&bcsp->rx_skb->data[bcsp->rx_skb->len - 2]); } /* Recv data */ static int bcsp_recv(struct hci_uart *hu, const void *data, int count) { struct bcsp_struct *bcsp = hu->priv; const unsigned char *ptr; BT_DBG("hu %p count %d rx_state %d rx_count %ld", hu, count, bcsp->rx_state, bcsp->rx_count); ptr = data; while (count) { if (bcsp->rx_count) { if (*ptr == 0xc0) { BT_ERR("Short BCSP packet"); kfree_skb(bcsp->rx_skb); bcsp->rx_skb = NULL; bcsp->rx_state = BCSP_W4_PKT_START; bcsp->rx_count = 0; } else bcsp_unslip_one_byte(bcsp, *ptr); ptr++; count--; continue; } switch (bcsp->rx_state) { case BCSP_W4_BCSP_HDR: if ((0xff & (u8)~(bcsp->rx_skb->data[0] + bcsp->rx_skb->data[1] + bcsp->rx_skb->data[2])) != bcsp->rx_skb->data[3]) { BT_ERR("Error in BCSP hdr checksum"); kfree_skb(bcsp->rx_skb); bcsp->rx_skb = NULL; bcsp->rx_state = BCSP_W4_PKT_DELIMITER; bcsp->rx_count = 0; continue; } bcsp->rx_state = BCSP_W4_DATA; bcsp->rx_count = (bcsp->rx_skb->data[1] >> 4) + (bcsp->rx_skb->data[2] << 4); /* May be 0 */ continue; case BCSP_W4_DATA: if (bcsp->rx_skb->data[0] & 0x40) { /* pkt with crc */ bcsp->rx_state = BCSP_W4_CRC; bcsp->rx_count = 2; } else bcsp_complete_rx_pkt(hu); continue; case BCSP_W4_CRC: if (bitrev16(bcsp->message_crc) != bscp_get_crc(bcsp)) { BT_ERR("Checksum failed: computed %04x received %04x", bitrev16(bcsp->message_crc), bscp_get_crc(bcsp)); kfree_skb(bcsp->rx_skb); bcsp->rx_skb = NULL; bcsp->rx_state = BCSP_W4_PKT_DELIMITER; bcsp->rx_count = 0; continue; } skb_trim(bcsp->rx_skb, bcsp->rx_skb->len - 2); bcsp_complete_rx_pkt(hu); continue; case BCSP_W4_PKT_DELIMITER: switch (*ptr) { case 0xc0: bcsp->rx_state = BCSP_W4_PKT_START; break; default: /*BT_ERR("Ignoring byte %02x", *ptr);*/ break; } ptr++; count--; break; case BCSP_W4_PKT_START: switch (*ptr) { case 0xc0: ptr++; count--; break; default: bcsp->rx_state = BCSP_W4_BCSP_HDR; bcsp->rx_count = 4; bcsp->rx_esc_state = BCSP_ESCSTATE_NOESC; BCSP_CRC_INIT(bcsp->message_crc); /* Do not increment ptr or decrement count * Allocate packet. Max len of a BCSP pkt= * 0xFFF (payload) +4 (header) +2 (crc) */ bcsp->rx_skb = bt_skb_alloc(0x1005, GFP_ATOMIC); if (!bcsp->rx_skb) { BT_ERR("Can't allocate mem for new packet"); bcsp->rx_state = BCSP_W4_PKT_DELIMITER; bcsp->rx_count = 0; return 0; } break; } break; } } return count; } /* Arrange to retransmit all messages in the relq. */ static void bcsp_timed_event(struct timer_list *t) { struct bcsp_struct *bcsp = from_timer(bcsp, t, tbcsp); struct hci_uart *hu = bcsp->hu; struct sk_buff *skb; unsigned long flags; BT_DBG("hu %p retransmitting %u pkts", hu, bcsp->unack.qlen); spin_lock_irqsave_nested(&bcsp->unack.lock, flags, SINGLE_DEPTH_NESTING); while ((skb = __skb_dequeue_tail(&bcsp->unack)) != NULL) { bcsp->msgq_txseq = (bcsp->msgq_txseq - 1) & 0x07; skb_queue_head(&bcsp->rel, skb); } spin_unlock_irqrestore(&bcsp->unack.lock, flags); hci_uart_tx_wakeup(hu); } static int bcsp_open(struct hci_uart *hu) { struct bcsp_struct *bcsp; BT_DBG("hu %p", hu); bcsp = kzalloc(sizeof(*bcsp), GFP_KERNEL); if (!bcsp) return -ENOMEM; hu->priv = bcsp; bcsp->hu = hu; skb_queue_head_init(&bcsp->unack); skb_queue_head_init(&bcsp->rel); skb_queue_head_init(&bcsp->unrel); timer_setup(&bcsp->tbcsp, bcsp_timed_event, 0); bcsp->rx_state = BCSP_W4_PKT_DELIMITER; if (txcrc) bcsp->use_crc = 1; return 0; } static int bcsp_close(struct hci_uart *hu) { struct bcsp_struct *bcsp = hu->priv; timer_shutdown_sync(&bcsp->tbcsp); hu->priv = NULL; BT_DBG("hu %p", hu); skb_queue_purge(&bcsp->unack); skb_queue_purge(&bcsp->rel); skb_queue_purge(&bcsp->unrel); if (bcsp->rx_skb) { kfree_skb(bcsp->rx_skb); bcsp->rx_skb = NULL; } kfree(bcsp); return 0; } static const struct hci_uart_proto bcsp = { .id = HCI_UART_BCSP, .name = "BCSP", .open = bcsp_open, .close = bcsp_close, .enqueue = bcsp_enqueue, .dequeue = bcsp_dequeue, .recv = bcsp_recv, .flush = bcsp_flush }; int __init bcsp_init(void) { return hci_uart_register_proto(&bcsp); } int __exit bcsp_deinit(void) { return hci_uart_unregister_proto(&bcsp); } module_param(txcrc, bool, 0644); MODULE_PARM_DESC(txcrc, "Transmit CRC with every BCSP packet"); module_param(hciextn, bool, 0644); MODULE_PARM_DESC(hciextn, "Convert HCI Extensions into BCSP packets");
linux-master
drivers/bluetooth/hci_bcsp.c
// SPDX-License-Identifier: GPL-2.0-only /* * Bluetooth supports for Qualcomm Atheros chips * * Copyright (c) 2015 The Linux Foundation. All rights reserved. */ #include <linux/module.h> #include <linux/firmware.h> #include <linux/vmalloc.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include "btqca.h" #define VERSION "0.1" int qca_read_soc_version(struct hci_dev *hdev, struct qca_btsoc_version *ver, enum qca_btsoc_type soc_type) { struct sk_buff *skb; struct edl_event_hdr *edl; char cmd; int err = 0; u8 event_type = HCI_EV_VENDOR; u8 rlen = sizeof(*edl) + sizeof(*ver); u8 rtype = EDL_APP_VER_RES_EVT; bt_dev_dbg(hdev, "QCA Version Request"); /* Unlike other SoC's sending version command response as payload to * VSE event. WCN3991 sends version command response as a payload to * command complete event. */ if (soc_type >= QCA_WCN3991) { event_type = 0; rlen += 1; rtype = EDL_PATCH_VER_REQ_CMD; } cmd = EDL_PATCH_VER_REQ_CMD; skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, EDL_PATCH_CMD_LEN, &cmd, event_type, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { err = PTR_ERR(skb); bt_dev_err(hdev, "Reading QCA version information failed (%d)", err); return err; } if (skb->len != rlen) { bt_dev_err(hdev, "QCA Version size mismatch len %d", skb->len); err = -EILSEQ; goto out; } edl = (struct edl_event_hdr *)(skb->data); if (!edl) { bt_dev_err(hdev, "QCA TLV with no header"); err = -EILSEQ; goto out; } if (edl->cresp != EDL_CMD_REQ_RES_EVT || edl->rtype != rtype) { bt_dev_err(hdev, "QCA Wrong packet received %d %d", edl->cresp, edl->rtype); err = -EIO; goto out; } if (soc_type >= QCA_WCN3991) memcpy(ver, edl->data + 1, sizeof(*ver)); else memcpy(ver, &edl->data, sizeof(*ver)); bt_dev_info(hdev, "QCA Product ID :0x%08x", le32_to_cpu(ver->product_id)); bt_dev_info(hdev, "QCA SOC Version :0x%08x", le32_to_cpu(ver->soc_id)); bt_dev_info(hdev, "QCA ROM Version :0x%08x", le16_to_cpu(ver->rom_ver)); bt_dev_info(hdev, "QCA Patch Version:0x%08x", le16_to_cpu(ver->patch_ver)); if (ver->soc_id == 0 || ver->rom_ver == 0) err = -EILSEQ; out: kfree_skb(skb); if (err) bt_dev_err(hdev, "QCA Failed to get version (%d)", err); return err; } EXPORT_SYMBOL_GPL(qca_read_soc_version); static int qca_read_fw_build_info(struct hci_dev *hdev) { struct sk_buff *skb; struct edl_event_hdr *edl; char cmd, build_label[QCA_FW_BUILD_VER_LEN]; int build_lbl_len, err = 0; bt_dev_dbg(hdev, "QCA read fw build info"); cmd = EDL_GET_BUILD_INFO_CMD; skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, EDL_PATCH_CMD_LEN, &cmd, 0, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { err = PTR_ERR(skb); bt_dev_err(hdev, "Reading QCA fw build info failed (%d)", err); return err; } edl = (struct edl_event_hdr *)(skb->data); if (!edl) { bt_dev_err(hdev, "QCA read fw build info with no header"); err = -EILSEQ; goto out; } if (edl->cresp != EDL_CMD_REQ_RES_EVT || edl->rtype != EDL_GET_BUILD_INFO_CMD) { bt_dev_err(hdev, "QCA Wrong packet received %d %d", edl->cresp, edl->rtype); err = -EIO; goto out; } build_lbl_len = edl->data[0]; if (build_lbl_len <= QCA_FW_BUILD_VER_LEN - 1) { memcpy(build_label, edl->data + 1, build_lbl_len); *(build_label + build_lbl_len) = '\0'; } hci_set_fw_info(hdev, "%s", build_label); out: kfree_skb(skb); return err; } static int qca_send_patch_config_cmd(struct hci_dev *hdev) { const u8 cmd[] = { EDL_PATCH_CONFIG_CMD, 0x01, 0, 0, 0 }; struct sk_buff *skb; struct edl_event_hdr *edl; int err; bt_dev_dbg(hdev, "QCA Patch config"); skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, sizeof(cmd), cmd, HCI_EV_VENDOR, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { err = PTR_ERR(skb); bt_dev_err(hdev, "Sending QCA Patch config failed (%d)", err); return err; } if (skb->len != 2) { bt_dev_err(hdev, "QCA Patch config cmd size mismatch len %d", skb->len); err = -EILSEQ; goto out; } edl = (struct edl_event_hdr *)(skb->data); if (!edl) { bt_dev_err(hdev, "QCA Patch config with no header"); err = -EILSEQ; goto out; } if (edl->cresp != EDL_PATCH_CONFIG_RES_EVT || edl->rtype != EDL_PATCH_CONFIG_CMD) { bt_dev_err(hdev, "QCA Wrong packet received %d %d", edl->cresp, edl->rtype); err = -EIO; goto out; } err = 0; out: kfree_skb(skb); return err; } static int qca_send_reset(struct hci_dev *hdev) { struct sk_buff *skb; int err; bt_dev_dbg(hdev, "QCA HCI_RESET"); skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { err = PTR_ERR(skb); bt_dev_err(hdev, "QCA Reset failed (%d)", err); return err; } kfree_skb(skb); return 0; } int qca_send_pre_shutdown_cmd(struct hci_dev *hdev) { struct sk_buff *skb; int err; bt_dev_dbg(hdev, "QCA pre shutdown cmd"); skb = __hci_cmd_sync_ev(hdev, QCA_PRE_SHUTDOWN_CMD, 0, NULL, HCI_EV_CMD_COMPLETE, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { err = PTR_ERR(skb); bt_dev_err(hdev, "QCA preshutdown_cmd failed (%d)", err); return err; } kfree_skb(skb); return 0; } EXPORT_SYMBOL_GPL(qca_send_pre_shutdown_cmd); static void qca_tlv_check_data(struct hci_dev *hdev, struct qca_fw_config *config, u8 *fw_data, enum qca_btsoc_type soc_type) { const u8 *data; u32 type_len; u16 tag_id, tag_len; int idx, length; struct tlv_type_hdr *tlv; struct tlv_type_patch *tlv_patch; struct tlv_type_nvm *tlv_nvm; uint8_t nvm_baud_rate = config->user_baud_rate; config->dnld_mode = QCA_SKIP_EVT_NONE; config->dnld_type = QCA_SKIP_EVT_NONE; switch (config->type) { case ELF_TYPE_PATCH: config->dnld_mode = QCA_SKIP_EVT_VSE_CC; config->dnld_type = QCA_SKIP_EVT_VSE_CC; bt_dev_dbg(hdev, "File Class : 0x%x", fw_data[4]); bt_dev_dbg(hdev, "Data Encoding : 0x%x", fw_data[5]); bt_dev_dbg(hdev, "File version : 0x%x", fw_data[6]); break; case TLV_TYPE_PATCH: tlv = (struct tlv_type_hdr *)fw_data; type_len = le32_to_cpu(tlv->type_len); tlv_patch = (struct tlv_type_patch *)tlv->data; /* For Rome version 1.1 to 3.1, all segment commands * are acked by a vendor specific event (VSE). * For Rome >= 3.2, the download mode field indicates * if VSE is skipped by the controller. * In case VSE is skipped, only the last segment is acked. */ config->dnld_mode = tlv_patch->download_mode; config->dnld_type = config->dnld_mode; BT_DBG("TLV Type\t\t : 0x%x", type_len & 0x000000ff); BT_DBG("Total Length : %d bytes", le32_to_cpu(tlv_patch->total_size)); BT_DBG("Patch Data Length : %d bytes", le32_to_cpu(tlv_patch->data_length)); BT_DBG("Signing Format Version : 0x%x", tlv_patch->format_version); BT_DBG("Signature Algorithm : 0x%x", tlv_patch->signature); BT_DBG("Download mode : 0x%x", tlv_patch->download_mode); BT_DBG("Reserved : 0x%x", tlv_patch->reserved1); BT_DBG("Product ID : 0x%04x", le16_to_cpu(tlv_patch->product_id)); BT_DBG("Rom Build Version : 0x%04x", le16_to_cpu(tlv_patch->rom_build)); BT_DBG("Patch Version : 0x%04x", le16_to_cpu(tlv_patch->patch_version)); BT_DBG("Reserved : 0x%x", le16_to_cpu(tlv_patch->reserved2)); BT_DBG("Patch Entry Address : 0x%x", le32_to_cpu(tlv_patch->entry)); break; case TLV_TYPE_NVM: tlv = (struct tlv_type_hdr *)fw_data; type_len = le32_to_cpu(tlv->type_len); length = (type_len >> 8) & 0x00ffffff; BT_DBG("TLV Type\t\t : 0x%x", type_len & 0x000000ff); BT_DBG("Length\t\t : %d bytes", length); idx = 0; data = tlv->data; while (idx < length) { tlv_nvm = (struct tlv_type_nvm *)(data + idx); tag_id = le16_to_cpu(tlv_nvm->tag_id); tag_len = le16_to_cpu(tlv_nvm->tag_len); /* Update NVM tags as needed */ switch (tag_id) { case EDL_TAG_ID_HCI: /* HCI transport layer parameters * enabling software inband sleep * onto controller side. */ tlv_nvm->data[0] |= 0x80; /* UART Baud Rate */ if (soc_type >= QCA_WCN3991) tlv_nvm->data[1] = nvm_baud_rate; else tlv_nvm->data[2] = nvm_baud_rate; break; case EDL_TAG_ID_DEEP_SLEEP: /* Sleep enable mask * enabling deep sleep feature on controller. */ tlv_nvm->data[0] |= 0x01; break; } idx += (sizeof(u16) + sizeof(u16) + 8 + tag_len); } break; default: BT_ERR("Unknown TLV type %d", config->type); break; } } static int qca_tlv_send_segment(struct hci_dev *hdev, int seg_size, const u8 *data, enum qca_tlv_dnld_mode mode, enum qca_btsoc_type soc_type) { struct sk_buff *skb; struct edl_event_hdr *edl; struct tlv_seg_resp *tlv_resp; u8 cmd[MAX_SIZE_PER_TLV_SEGMENT + 2]; int err = 0; u8 event_type = HCI_EV_VENDOR; u8 rlen = (sizeof(*edl) + sizeof(*tlv_resp)); u8 rtype = EDL_TVL_DNLD_RES_EVT; cmd[0] = EDL_PATCH_TLV_REQ_CMD; cmd[1] = seg_size; memcpy(cmd + 2, data, seg_size); if (mode == QCA_SKIP_EVT_VSE_CC || mode == QCA_SKIP_EVT_VSE) return __hci_cmd_send(hdev, EDL_PATCH_CMD_OPCODE, seg_size + 2, cmd); /* Unlike other SoC's sending version command response as payload to * VSE event. WCN3991 sends version command response as a payload to * command complete event. */ if (soc_type >= QCA_WCN3991) { event_type = 0; rlen = sizeof(*edl); rtype = EDL_PATCH_TLV_REQ_CMD; } skb = __hci_cmd_sync_ev(hdev, EDL_PATCH_CMD_OPCODE, seg_size + 2, cmd, event_type, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { err = PTR_ERR(skb); bt_dev_err(hdev, "QCA Failed to send TLV segment (%d)", err); return err; } if (skb->len != rlen) { bt_dev_err(hdev, "QCA TLV response size mismatch"); err = -EILSEQ; goto out; } edl = (struct edl_event_hdr *)(skb->data); if (!edl) { bt_dev_err(hdev, "TLV with no header"); err = -EILSEQ; goto out; } if (edl->cresp != EDL_CMD_REQ_RES_EVT || edl->rtype != rtype) { bt_dev_err(hdev, "QCA TLV with error stat 0x%x rtype 0x%x", edl->cresp, edl->rtype); err = -EIO; } if (soc_type >= QCA_WCN3991) goto out; tlv_resp = (struct tlv_seg_resp *)(edl->data); if (tlv_resp->result) { bt_dev_err(hdev, "QCA TLV with error stat 0x%x rtype 0x%x (0x%x)", edl->cresp, edl->rtype, tlv_resp->result); } out: kfree_skb(skb); return err; } static int qca_inject_cmd_complete_event(struct hci_dev *hdev) { struct hci_event_hdr *hdr; struct hci_ev_cmd_complete *evt; struct sk_buff *skb; skb = bt_skb_alloc(sizeof(*hdr) + sizeof(*evt) + 1, GFP_KERNEL); if (!skb) return -ENOMEM; hdr = skb_put(skb, sizeof(*hdr)); hdr->evt = HCI_EV_CMD_COMPLETE; hdr->plen = sizeof(*evt) + 1; evt = skb_put(skb, sizeof(*evt)); evt->ncmd = 1; evt->opcode = cpu_to_le16(QCA_HCI_CC_OPCODE); skb_put_u8(skb, QCA_HCI_CC_SUCCESS); hci_skb_pkt_type(skb) = HCI_EVENT_PKT; return hci_recv_frame(hdev, skb); } static int qca_download_firmware(struct hci_dev *hdev, struct qca_fw_config *config, enum qca_btsoc_type soc_type, u8 rom_ver) { const struct firmware *fw; u8 *data; const u8 *segment; int ret, size, remain, i = 0; bt_dev_info(hdev, "QCA Downloading %s", config->fwname); ret = request_firmware(&fw, config->fwname, &hdev->dev); if (ret) { /* For WCN6750, if mbn file is not present then check for * tlv file. */ if (soc_type == QCA_WCN6750 && config->type == ELF_TYPE_PATCH) { bt_dev_dbg(hdev, "QCA Failed to request file: %s (%d)", config->fwname, ret); config->type = TLV_TYPE_PATCH; snprintf(config->fwname, sizeof(config->fwname), "qca/msbtfw%02x.tlv", rom_ver); bt_dev_info(hdev, "QCA Downloading %s", config->fwname); ret = request_firmware(&fw, config->fwname, &hdev->dev); if (ret) { bt_dev_err(hdev, "QCA Failed to request file: %s (%d)", config->fwname, ret); return ret; } } else { bt_dev_err(hdev, "QCA Failed to request file: %s (%d)", config->fwname, ret); return ret; } } size = fw->size; data = vmalloc(fw->size); if (!data) { bt_dev_err(hdev, "QCA Failed to allocate memory for file: %s", config->fwname); release_firmware(fw); return -ENOMEM; } memcpy(data, fw->data, size); release_firmware(fw); qca_tlv_check_data(hdev, config, data, soc_type); segment = data; remain = size; while (remain > 0) { int segsize = min(MAX_SIZE_PER_TLV_SEGMENT, remain); bt_dev_dbg(hdev, "Send segment %d, size %d", i++, segsize); remain -= segsize; /* The last segment is always acked regardless download mode */ if (!remain || segsize < MAX_SIZE_PER_TLV_SEGMENT) config->dnld_mode = QCA_SKIP_EVT_NONE; ret = qca_tlv_send_segment(hdev, segsize, segment, config->dnld_mode, soc_type); if (ret) goto out; segment += segsize; } /* Latest qualcomm chipsets are not sending a command complete event * for every fw packet sent. They only respond with a vendor specific * event for the last packet. This optimization in the chip will * decrease the BT in initialization time. Here we will inject a command * complete event to avoid a command timeout error message. */ if (config->dnld_type == QCA_SKIP_EVT_VSE_CC || config->dnld_type == QCA_SKIP_EVT_VSE) ret = qca_inject_cmd_complete_event(hdev); out: vfree(data); return ret; } static int qca_disable_soc_logging(struct hci_dev *hdev) { struct sk_buff *skb; u8 cmd[2]; int err; cmd[0] = QCA_DISABLE_LOGGING_SUB_OP; cmd[1] = 0x00; skb = __hci_cmd_sync_ev(hdev, QCA_DISABLE_LOGGING, sizeof(cmd), cmd, HCI_EV_CMD_COMPLETE, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { err = PTR_ERR(skb); bt_dev_err(hdev, "QCA Failed to disable soc logging(%d)", err); return err; } kfree_skb(skb); return 0; } int qca_set_bdaddr_rome(struct hci_dev *hdev, const bdaddr_t *bdaddr) { struct sk_buff *skb; u8 cmd[9]; int err; cmd[0] = EDL_NVM_ACCESS_SET_REQ_CMD; cmd[1] = 0x02; /* TAG ID */ cmd[2] = sizeof(bdaddr_t); /* size */ memcpy(cmd + 3, bdaddr, sizeof(bdaddr_t)); skb = __hci_cmd_sync_ev(hdev, EDL_NVM_ACCESS_OPCODE, sizeof(cmd), cmd, HCI_EV_VENDOR, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { err = PTR_ERR(skb); bt_dev_err(hdev, "QCA Change address command failed (%d)", err); return err; } kfree_skb(skb); return 0; } EXPORT_SYMBOL_GPL(qca_set_bdaddr_rome); int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate, enum qca_btsoc_type soc_type, struct qca_btsoc_version ver, const char *firmware_name) { struct qca_fw_config config; int err; u8 rom_ver = 0; u32 soc_ver; bt_dev_dbg(hdev, "QCA setup on UART"); soc_ver = get_soc_ver(ver.soc_id, ver.rom_ver); bt_dev_info(hdev, "QCA controller version 0x%08x", soc_ver); config.user_baud_rate = baudrate; /* Firmware files to download are based on ROM version. * ROM version is derived from last two bytes of soc_ver. */ if (soc_type == QCA_WCN3988) rom_ver = ((soc_ver & 0x00000f00) >> 0x05) | (soc_ver & 0x0000000f); else rom_ver = ((soc_ver & 0x00000f00) >> 0x04) | (soc_ver & 0x0000000f); if (soc_type == QCA_WCN6750) qca_send_patch_config_cmd(hdev); /* Download rampatch file */ config.type = TLV_TYPE_PATCH; switch (soc_type) { case QCA_WCN3990: case QCA_WCN3991: case QCA_WCN3998: snprintf(config.fwname, sizeof(config.fwname), "qca/crbtfw%02x.tlv", rom_ver); break; case QCA_WCN3988: snprintf(config.fwname, sizeof(config.fwname), "qca/apbtfw%02x.tlv", rom_ver); break; case QCA_QCA6390: snprintf(config.fwname, sizeof(config.fwname), "qca/htbtfw%02x.tlv", rom_ver); break; case QCA_WCN6750: /* Choose mbn file by default.If mbn file is not found * then choose tlv file */ config.type = ELF_TYPE_PATCH; snprintf(config.fwname, sizeof(config.fwname), "qca/msbtfw%02x.mbn", rom_ver); break; case QCA_WCN6855: snprintf(config.fwname, sizeof(config.fwname), "qca/hpbtfw%02x.tlv", rom_ver); break; case QCA_WCN7850: snprintf(config.fwname, sizeof(config.fwname), "qca/hmtbtfw%02x.tlv", rom_ver); break; default: snprintf(config.fwname, sizeof(config.fwname), "qca/rampatch_%08x.bin", soc_ver); } err = qca_download_firmware(hdev, &config, soc_type, rom_ver); if (err < 0) { bt_dev_err(hdev, "QCA Failed to download patch (%d)", err); return err; } /* Give the controller some time to get ready to receive the NVM */ msleep(10); /* Download NVM configuration */ config.type = TLV_TYPE_NVM; if (firmware_name) { snprintf(config.fwname, sizeof(config.fwname), "qca/%s", firmware_name); } else { switch (soc_type) { case QCA_WCN3990: case QCA_WCN3991: case QCA_WCN3998: if (le32_to_cpu(ver.soc_id) == QCA_WCN3991_SOC_ID) { snprintf(config.fwname, sizeof(config.fwname), "qca/crnv%02xu.bin", rom_ver); } else { snprintf(config.fwname, sizeof(config.fwname), "qca/crnv%02x.bin", rom_ver); } break; case QCA_WCN3988: snprintf(config.fwname, sizeof(config.fwname), "qca/apnv%02x.bin", rom_ver); break; case QCA_QCA6390: snprintf(config.fwname, sizeof(config.fwname), "qca/htnv%02x.bin", rom_ver); break; case QCA_WCN6750: snprintf(config.fwname, sizeof(config.fwname), "qca/msnv%02x.bin", rom_ver); break; case QCA_WCN6855: snprintf(config.fwname, sizeof(config.fwname), "qca/hpnv%02x.bin", rom_ver); break; case QCA_WCN7850: snprintf(config.fwname, sizeof(config.fwname), "qca/hmtnv%02x.bin", rom_ver); break; default: snprintf(config.fwname, sizeof(config.fwname), "qca/nvm_%08x.bin", soc_ver); } } err = qca_download_firmware(hdev, &config, soc_type, rom_ver); if (err < 0) { bt_dev_err(hdev, "QCA Failed to download NVM (%d)", err); return err; } switch (soc_type) { case QCA_WCN3991: case QCA_QCA6390: case QCA_WCN6750: case QCA_WCN6855: case QCA_WCN7850: err = qca_disable_soc_logging(hdev); if (err < 0) return err; break; default: break; } /* WCN399x and WCN6750 supports the Microsoft vendor extension with 0xFD70 as the * VsMsftOpCode. */ switch (soc_type) { case QCA_WCN3988: case QCA_WCN3990: case QCA_WCN3991: case QCA_WCN3998: case QCA_WCN6750: hci_set_msft_opcode(hdev, 0xFD70); break; default: break; } /* Perform HCI reset */ err = qca_send_reset(hdev); if (err < 0) { bt_dev_err(hdev, "QCA Failed to run HCI_RESET (%d)", err); return err; } switch (soc_type) { case QCA_WCN3991: case QCA_WCN6750: case QCA_WCN6855: case QCA_WCN7850: /* get fw build info */ err = qca_read_fw_build_info(hdev); if (err < 0) return err; break; default: break; } bt_dev_info(hdev, "QCA setup on UART is completed"); return 0; } EXPORT_SYMBOL_GPL(qca_uart_setup); int qca_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr) { struct sk_buff *skb; int err; skb = __hci_cmd_sync_ev(hdev, EDL_WRITE_BD_ADDR_OPCODE, 6, bdaddr, HCI_EV_VENDOR, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { err = PTR_ERR(skb); bt_dev_err(hdev, "QCA Change address cmd failed (%d)", err); return err; } kfree_skb(skb); return 0; } EXPORT_SYMBOL_GPL(qca_set_bdaddr); MODULE_AUTHOR("Ben Young Tae Kim <[email protected]>"); MODULE_DESCRIPTION("Bluetooth support for Qualcomm Atheros family ver " VERSION); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL");
linux-master
drivers/bluetooth/btqca.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * * Digianswer Bluetooth USB driver * * Copyright (C) 2004-2007 Marcel Holtmann <[email protected]> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/sched.h> #include <linux/errno.h> #include <linux/skbuff.h> #include <linux/usb.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include "h4_recv.h" #define VERSION "0.11" static const struct usb_device_id bpa10x_table[] = { /* Tektronix BPA 100/105 (Digianswer) */ { USB_DEVICE(0x08fd, 0x0002) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, bpa10x_table); struct bpa10x_data { struct hci_dev *hdev; struct usb_device *udev; struct usb_anchor tx_anchor; struct usb_anchor rx_anchor; struct sk_buff *rx_skb[2]; }; static void bpa10x_tx_complete(struct urb *urb) { struct sk_buff *skb = urb->context; struct hci_dev *hdev = (struct hci_dev *) skb->dev; BT_DBG("%s urb %p status %d count %d", hdev->name, urb, urb->status, urb->actual_length); if (!test_bit(HCI_RUNNING, &hdev->flags)) goto done; if (!urb->status) hdev->stat.byte_tx += urb->transfer_buffer_length; else hdev->stat.err_tx++; done: kfree(urb->setup_packet); kfree_skb(skb); } #define HCI_VENDOR_HDR_SIZE 5 #define HCI_RECV_VENDOR \ .type = HCI_VENDOR_PKT, \ .hlen = HCI_VENDOR_HDR_SIZE, \ .loff = 3, \ .lsize = 2, \ .maxlen = HCI_MAX_FRAME_SIZE static const struct h4_recv_pkt bpa10x_recv_pkts[] = { { H4_RECV_ACL, .recv = hci_recv_frame }, { H4_RECV_SCO, .recv = hci_recv_frame }, { H4_RECV_EVENT, .recv = hci_recv_frame }, { HCI_RECV_VENDOR, .recv = hci_recv_diag }, }; static void bpa10x_rx_complete(struct urb *urb) { struct hci_dev *hdev = urb->context; struct bpa10x_data *data = hci_get_drvdata(hdev); int err; BT_DBG("%s urb %p status %d count %d", hdev->name, urb, urb->status, urb->actual_length); if (!test_bit(HCI_RUNNING, &hdev->flags)) return; if (urb->status == 0) { bool idx = usb_pipebulk(urb->pipe); data->rx_skb[idx] = h4_recv_buf(hdev, data->rx_skb[idx], urb->transfer_buffer, urb->actual_length, bpa10x_recv_pkts, ARRAY_SIZE(bpa10x_recv_pkts)); if (IS_ERR(data->rx_skb[idx])) { bt_dev_err(hdev, "corrupted event packet"); hdev->stat.err_rx++; data->rx_skb[idx] = NULL; } } usb_anchor_urb(urb, &data->rx_anchor); err = usb_submit_urb(urb, GFP_ATOMIC); if (err < 0) { bt_dev_err(hdev, "urb %p failed to resubmit (%d)", urb, -err); usb_unanchor_urb(urb); } } static inline int bpa10x_submit_intr_urb(struct hci_dev *hdev) { struct bpa10x_data *data = hci_get_drvdata(hdev); struct urb *urb; unsigned char *buf; unsigned int pipe; int err, size = 16; BT_DBG("%s", hdev->name); urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) return -ENOMEM; buf = kmalloc(size, GFP_KERNEL); if (!buf) { usb_free_urb(urb); return -ENOMEM; } pipe = usb_rcvintpipe(data->udev, 0x81); usb_fill_int_urb(urb, data->udev, pipe, buf, size, bpa10x_rx_complete, hdev, 1); urb->transfer_flags |= URB_FREE_BUFFER; usb_anchor_urb(urb, &data->rx_anchor); err = usb_submit_urb(urb, GFP_KERNEL); if (err < 0) { bt_dev_err(hdev, "urb %p submission failed (%d)", urb, -err); usb_unanchor_urb(urb); } usb_free_urb(urb); return err; } static inline int bpa10x_submit_bulk_urb(struct hci_dev *hdev) { struct bpa10x_data *data = hci_get_drvdata(hdev); struct urb *urb; unsigned char *buf; unsigned int pipe; int err, size = 64; BT_DBG("%s", hdev->name); urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) return -ENOMEM; buf = kmalloc(size, GFP_KERNEL); if (!buf) { usb_free_urb(urb); return -ENOMEM; } pipe = usb_rcvbulkpipe(data->udev, 0x82); usb_fill_bulk_urb(urb, data->udev, pipe, buf, size, bpa10x_rx_complete, hdev); urb->transfer_flags |= URB_FREE_BUFFER; usb_anchor_urb(urb, &data->rx_anchor); err = usb_submit_urb(urb, GFP_KERNEL); if (err < 0) { bt_dev_err(hdev, "urb %p submission failed (%d)", urb, -err); usb_unanchor_urb(urb); } usb_free_urb(urb); return err; } static int bpa10x_open(struct hci_dev *hdev) { struct bpa10x_data *data = hci_get_drvdata(hdev); int err; BT_DBG("%s", hdev->name); err = bpa10x_submit_intr_urb(hdev); if (err < 0) goto error; err = bpa10x_submit_bulk_urb(hdev); if (err < 0) goto error; return 0; error: usb_kill_anchored_urbs(&data->rx_anchor); return err; } static int bpa10x_close(struct hci_dev *hdev) { struct bpa10x_data *data = hci_get_drvdata(hdev); BT_DBG("%s", hdev->name); usb_kill_anchored_urbs(&data->rx_anchor); return 0; } static int bpa10x_flush(struct hci_dev *hdev) { struct bpa10x_data *data = hci_get_drvdata(hdev); BT_DBG("%s", hdev->name); usb_kill_anchored_urbs(&data->tx_anchor); return 0; } static int bpa10x_setup(struct hci_dev *hdev) { static const u8 req[] = { 0x07 }; struct sk_buff *skb; BT_DBG("%s", hdev->name); /* Read revision string */ skb = __hci_cmd_sync(hdev, 0xfc0e, sizeof(req), req, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) return PTR_ERR(skb); bt_dev_info(hdev, "%s", (char *)(skb->data + 1)); hci_set_fw_info(hdev, "%s", skb->data + 1); kfree_skb(skb); return 0; } static int bpa10x_send_frame(struct hci_dev *hdev, struct sk_buff *skb) { struct bpa10x_data *data = hci_get_drvdata(hdev); struct usb_ctrlrequest *dr; struct urb *urb; unsigned int pipe; int err; BT_DBG("%s", hdev->name); skb->dev = (void *) hdev; urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) return -ENOMEM; /* Prepend skb with frame type */ *(u8 *)skb_push(skb, 1) = hci_skb_pkt_type(skb); switch (hci_skb_pkt_type(skb)) { case HCI_COMMAND_PKT: dr = kmalloc(sizeof(*dr), GFP_KERNEL); if (!dr) { usb_free_urb(urb); return -ENOMEM; } dr->bRequestType = USB_TYPE_VENDOR; dr->bRequest = 0; dr->wIndex = 0; dr->wValue = 0; dr->wLength = __cpu_to_le16(skb->len); pipe = usb_sndctrlpipe(data->udev, 0x00); usb_fill_control_urb(urb, data->udev, pipe, (void *) dr, skb->data, skb->len, bpa10x_tx_complete, skb); hdev->stat.cmd_tx++; break; case HCI_ACLDATA_PKT: pipe = usb_sndbulkpipe(data->udev, 0x02); usb_fill_bulk_urb(urb, data->udev, pipe, skb->data, skb->len, bpa10x_tx_complete, skb); hdev->stat.acl_tx++; break; case HCI_SCODATA_PKT: pipe = usb_sndbulkpipe(data->udev, 0x02); usb_fill_bulk_urb(urb, data->udev, pipe, skb->data, skb->len, bpa10x_tx_complete, skb); hdev->stat.sco_tx++; break; default: usb_free_urb(urb); return -EILSEQ; } usb_anchor_urb(urb, &data->tx_anchor); err = usb_submit_urb(urb, GFP_KERNEL); if (err < 0) { bt_dev_err(hdev, "urb %p submission failed", urb); kfree(urb->setup_packet); usb_unanchor_urb(urb); } usb_free_urb(urb); return err; } static int bpa10x_set_diag(struct hci_dev *hdev, bool enable) { const u8 req[] = { 0x00, enable }; struct sk_buff *skb; BT_DBG("%s", hdev->name); if (!test_bit(HCI_RUNNING, &hdev->flags)) return -ENETDOWN; /* Enable sniffer operation */ skb = __hci_cmd_sync(hdev, 0xfc0e, sizeof(req), req, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) return PTR_ERR(skb); kfree_skb(skb); return 0; } static int bpa10x_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct bpa10x_data *data; struct hci_dev *hdev; int err; BT_DBG("intf %p id %p", intf, id); if (intf->cur_altsetting->desc.bInterfaceNumber != 0) return -ENODEV; data = devm_kzalloc(&intf->dev, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; data->udev = interface_to_usbdev(intf); init_usb_anchor(&data->tx_anchor); init_usb_anchor(&data->rx_anchor); hdev = hci_alloc_dev(); if (!hdev) return -ENOMEM; hdev->bus = HCI_USB; hci_set_drvdata(hdev, data); data->hdev = hdev; SET_HCIDEV_DEV(hdev, &intf->dev); hdev->open = bpa10x_open; hdev->close = bpa10x_close; hdev->flush = bpa10x_flush; hdev->setup = bpa10x_setup; hdev->send = bpa10x_send_frame; hdev->set_diag = bpa10x_set_diag; set_bit(HCI_QUIRK_RESET_ON_CLOSE, &hdev->quirks); err = hci_register_dev(hdev); if (err < 0) { hci_free_dev(hdev); return err; } usb_set_intfdata(intf, data); return 0; } static void bpa10x_disconnect(struct usb_interface *intf) { struct bpa10x_data *data = usb_get_intfdata(intf); BT_DBG("intf %p", intf); if (!data) return; usb_set_intfdata(intf, NULL); hci_unregister_dev(data->hdev); hci_free_dev(data->hdev); kfree_skb(data->rx_skb[0]); kfree_skb(data->rx_skb[1]); } static struct usb_driver bpa10x_driver = { .name = "bpa10x", .probe = bpa10x_probe, .disconnect = bpa10x_disconnect, .id_table = bpa10x_table, .disable_hub_initiated_lpm = 1, }; module_usb_driver(bpa10x_driver); MODULE_AUTHOR("Marcel Holtmann <[email protected]>"); MODULE_DESCRIPTION("Digianswer Bluetooth USB driver ver " VERSION); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL");
linux-master
drivers/bluetooth/bpa10x.c
/* * Copyright (c) 2017 Redpine Signals Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/module.h> #include <linux/kernel.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <asm/unaligned.h> #include <net/rsi_91x.h> #define RSI_DMA_ALIGN 8 #define RSI_FRAME_DESC_SIZE 16 #define RSI_HEADROOM_FOR_BT_HAL (RSI_FRAME_DESC_SIZE + RSI_DMA_ALIGN) struct rsi_hci_adapter { void *priv; struct rsi_proto_ops *proto_ops; struct hci_dev *hdev; }; static int rsi_hci_open(struct hci_dev *hdev) { return 0; } static int rsi_hci_close(struct hci_dev *hdev) { return 0; } static int rsi_hci_flush(struct hci_dev *hdev) { return 0; } static int rsi_hci_send_pkt(struct hci_dev *hdev, struct sk_buff *skb) { struct rsi_hci_adapter *h_adapter = hci_get_drvdata(hdev); struct sk_buff *new_skb = NULL; switch (hci_skb_pkt_type(skb)) { case HCI_COMMAND_PKT: hdev->stat.cmd_tx++; break; case HCI_ACLDATA_PKT: hdev->stat.acl_tx++; break; case HCI_SCODATA_PKT: hdev->stat.sco_tx++; break; } if (skb_headroom(skb) < RSI_HEADROOM_FOR_BT_HAL) { /* Insufficient skb headroom - allocate a new skb */ new_skb = skb_realloc_headroom(skb, RSI_HEADROOM_FOR_BT_HAL); if (unlikely(!new_skb)) return -ENOMEM; bt_cb(new_skb)->pkt_type = hci_skb_pkt_type(skb); kfree_skb(skb); skb = new_skb; if (!IS_ALIGNED((unsigned long)skb->data, RSI_DMA_ALIGN)) { u8 *skb_data = skb->data; int skb_len = skb->len; skb_push(skb, RSI_DMA_ALIGN); skb_pull(skb, PTR_ALIGN(skb->data, RSI_DMA_ALIGN) - skb->data); memmove(skb->data, skb_data, skb_len); skb_trim(skb, skb_len); } } return h_adapter->proto_ops->coex_send_pkt(h_adapter->priv, skb, RSI_BT_Q); } static int rsi_hci_recv_pkt(void *priv, const u8 *pkt) { struct rsi_hci_adapter *h_adapter = priv; struct hci_dev *hdev = h_adapter->hdev; struct sk_buff *skb; int pkt_len = get_unaligned_le16(pkt) & 0x0fff; skb = dev_alloc_skb(pkt_len); if (!skb) return -ENOMEM; memcpy(skb->data, pkt + RSI_FRAME_DESC_SIZE, pkt_len); skb_put(skb, pkt_len); h_adapter->hdev->stat.byte_rx += skb->len; hci_skb_pkt_type(skb) = pkt[14]; return hci_recv_frame(hdev, skb); } static int rsi_hci_attach(void *priv, struct rsi_proto_ops *ops) { struct rsi_hci_adapter *h_adapter = NULL; struct hci_dev *hdev; int err = 0; h_adapter = kzalloc(sizeof(*h_adapter), GFP_KERNEL); if (!h_adapter) return -ENOMEM; h_adapter->priv = priv; ops->set_bt_context(priv, h_adapter); h_adapter->proto_ops = ops; hdev = hci_alloc_dev(); if (!hdev) { BT_ERR("Failed to alloc HCI device"); goto err; } h_adapter->hdev = hdev; if (ops->get_host_intf(priv) == RSI_HOST_INTF_SDIO) hdev->bus = HCI_SDIO; else hdev->bus = HCI_USB; hci_set_drvdata(hdev, h_adapter); hdev->dev_type = HCI_PRIMARY; hdev->open = rsi_hci_open; hdev->close = rsi_hci_close; hdev->flush = rsi_hci_flush; hdev->send = rsi_hci_send_pkt; err = hci_register_dev(hdev); if (err < 0) { BT_ERR("HCI registration failed with errcode %d", err); hci_free_dev(hdev); goto err; } return 0; err: h_adapter->hdev = NULL; kfree(h_adapter); return -EINVAL; } static void rsi_hci_detach(void *priv) { struct rsi_hci_adapter *h_adapter = priv; struct hci_dev *hdev; if (!h_adapter) return; hdev = h_adapter->hdev; if (hdev) { hci_unregister_dev(hdev); hci_free_dev(hdev); h_adapter->hdev = NULL; } kfree(h_adapter); } const struct rsi_mod_ops rsi_bt_ops = { .attach = rsi_hci_attach, .detach = rsi_hci_detach, .recv_pkt = rsi_hci_recv_pkt, }; EXPORT_SYMBOL(rsi_bt_ops); static int rsi_91x_bt_module_init(void) { return 0; } static void rsi_91x_bt_module_exit(void) { return; } module_init(rsi_91x_bt_module_init); module_exit(rsi_91x_bt_module_exit); MODULE_AUTHOR("Redpine Signals Inc"); MODULE_DESCRIPTION("RSI BT driver"); MODULE_LICENSE("Dual BSD/GPL");
linux-master
drivers/bluetooth/btrsi.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * * Bluetooth HCI Three-wire UART driver * * Copyright (C) 2012 Intel Corporation */ #include <linux/acpi.h> #include <linux/errno.h> #include <linux/gpio/consumer.h> #include <linux/kernel.h> #include <linux/mod_devicetable.h> #include <linux/of.h> #include <linux/pm_runtime.h> #include <linux/serdev.h> #include <linux/skbuff.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include "btrtl.h" #include "hci_uart.h" #define SUSPEND_TIMEOUT_MS 6000 #define HCI_3WIRE_ACK_PKT 0 #define HCI_3WIRE_LINK_PKT 15 /* Sliding window size */ #define H5_TX_WIN_MAX 4 #define H5_ACK_TIMEOUT msecs_to_jiffies(250) #define H5_SYNC_TIMEOUT msecs_to_jiffies(100) /* * Maximum Three-wire packet: * 4 byte header + max value for 12-bit length + 2 bytes for CRC */ #define H5_MAX_LEN (4 + 0xfff + 2) /* Convenience macros for reading Three-wire header values */ #define H5_HDR_SEQ(hdr) ((hdr)[0] & 0x07) #define H5_HDR_ACK(hdr) (((hdr)[0] >> 3) & 0x07) #define H5_HDR_CRC(hdr) (((hdr)[0] >> 6) & 0x01) #define H5_HDR_RELIABLE(hdr) (((hdr)[0] >> 7) & 0x01) #define H5_HDR_PKT_TYPE(hdr) ((hdr)[1] & 0x0f) #define H5_HDR_LEN(hdr) ((((hdr)[1] >> 4) & 0x0f) + ((hdr)[2] << 4)) #define SLIP_DELIMITER 0xc0 #define SLIP_ESC 0xdb #define SLIP_ESC_DELIM 0xdc #define SLIP_ESC_ESC 0xdd /* H5 state flags */ enum { H5_RX_ESC, /* SLIP escape mode */ H5_TX_ACK_REQ, /* Pending ack to send */ H5_WAKEUP_DISABLE, /* Device cannot wake host */ H5_HW_FLOW_CONTROL, /* Use HW flow control */ }; struct h5 { /* Must be the first member, hci_serdev.c expects this. */ struct hci_uart serdev_hu; struct sk_buff_head unack; /* Unack'ed packets queue */ struct sk_buff_head rel; /* Reliable packets queue */ struct sk_buff_head unrel; /* Unreliable packets queue */ unsigned long flags; struct sk_buff *rx_skb; /* Receive buffer */ size_t rx_pending; /* Expecting more bytes */ u8 rx_ack; /* Last ack number received */ int (*rx_func)(struct hci_uart *hu, u8 c); struct timer_list timer; /* Retransmission timer */ struct hci_uart *hu; /* Parent HCI UART */ u8 tx_seq; /* Next seq number to send */ u8 tx_ack; /* Next ack number to send */ u8 tx_win; /* Sliding window size */ enum { H5_UNINITIALIZED, H5_INITIALIZED, H5_ACTIVE, } state; enum { H5_AWAKE, H5_SLEEPING, H5_WAKING_UP, } sleep; const struct h5_vnd *vnd; const char *id; struct gpio_desc *enable_gpio; struct gpio_desc *device_wake_gpio; }; enum h5_driver_info { H5_INFO_WAKEUP_DISABLE = BIT(0), }; struct h5_vnd { int (*setup)(struct h5 *h5); void (*open)(struct h5 *h5); void (*close)(struct h5 *h5); int (*suspend)(struct h5 *h5); int (*resume)(struct h5 *h5); const struct acpi_gpio_mapping *acpi_gpio_map; }; struct h5_device_data { uint32_t driver_info; struct h5_vnd *vnd; }; static void h5_reset_rx(struct h5 *h5); static void h5_link_control(struct hci_uart *hu, const void *data, size_t len) { struct h5 *h5 = hu->priv; struct sk_buff *nskb; nskb = alloc_skb(3, GFP_ATOMIC); if (!nskb) return; hci_skb_pkt_type(nskb) = HCI_3WIRE_LINK_PKT; skb_put_data(nskb, data, len); skb_queue_tail(&h5->unrel, nskb); } static u8 h5_cfg_field(struct h5 *h5) { /* Sliding window size (first 3 bits) */ return h5->tx_win & 0x07; } static void h5_timed_event(struct timer_list *t) { const unsigned char sync_req[] = { 0x01, 0x7e }; unsigned char conf_req[3] = { 0x03, 0xfc }; struct h5 *h5 = from_timer(h5, t, timer); struct hci_uart *hu = h5->hu; struct sk_buff *skb; unsigned long flags; BT_DBG("%s", hu->hdev->name); if (h5->state == H5_UNINITIALIZED) h5_link_control(hu, sync_req, sizeof(sync_req)); if (h5->state == H5_INITIALIZED) { conf_req[2] = h5_cfg_field(h5); h5_link_control(hu, conf_req, sizeof(conf_req)); } if (h5->state != H5_ACTIVE) { mod_timer(&h5->timer, jiffies + H5_SYNC_TIMEOUT); goto wakeup; } if (h5->sleep != H5_AWAKE) { h5->sleep = H5_SLEEPING; goto wakeup; } BT_DBG("hu %p retransmitting %u pkts", hu, h5->unack.qlen); spin_lock_irqsave_nested(&h5->unack.lock, flags, SINGLE_DEPTH_NESTING); while ((skb = __skb_dequeue_tail(&h5->unack)) != NULL) { h5->tx_seq = (h5->tx_seq - 1) & 0x07; skb_queue_head(&h5->rel, skb); } spin_unlock_irqrestore(&h5->unack.lock, flags); wakeup: hci_uart_tx_wakeup(hu); } static void h5_peer_reset(struct hci_uart *hu) { struct h5 *h5 = hu->priv; bt_dev_err(hu->hdev, "Peer device has reset"); h5->state = H5_UNINITIALIZED; del_timer(&h5->timer); skb_queue_purge(&h5->rel); skb_queue_purge(&h5->unrel); skb_queue_purge(&h5->unack); h5->tx_seq = 0; h5->tx_ack = 0; /* Send reset request to upper stack */ hci_reset_dev(hu->hdev); } static int h5_open(struct hci_uart *hu) { struct h5 *h5; const unsigned char sync[] = { 0x01, 0x7e }; BT_DBG("hu %p", hu); if (hu->serdev) { h5 = serdev_device_get_drvdata(hu->serdev); } else { h5 = kzalloc(sizeof(*h5), GFP_KERNEL); if (!h5) return -ENOMEM; } hu->priv = h5; h5->hu = hu; skb_queue_head_init(&h5->unack); skb_queue_head_init(&h5->rel); skb_queue_head_init(&h5->unrel); h5_reset_rx(h5); timer_setup(&h5->timer, h5_timed_event, 0); h5->tx_win = H5_TX_WIN_MAX; if (h5->vnd && h5->vnd->open) h5->vnd->open(h5); set_bit(HCI_UART_INIT_PENDING, &hu->hdev_flags); /* Send initial sync request */ h5_link_control(hu, sync, sizeof(sync)); mod_timer(&h5->timer, jiffies + H5_SYNC_TIMEOUT); return 0; } static int h5_close(struct hci_uart *hu) { struct h5 *h5 = hu->priv; del_timer_sync(&h5->timer); skb_queue_purge(&h5->unack); skb_queue_purge(&h5->rel); skb_queue_purge(&h5->unrel); kfree_skb(h5->rx_skb); h5->rx_skb = NULL; if (h5->vnd && h5->vnd->close) h5->vnd->close(h5); if (!hu->serdev) kfree(h5); return 0; } static int h5_setup(struct hci_uart *hu) { struct h5 *h5 = hu->priv; if (h5->vnd && h5->vnd->setup) return h5->vnd->setup(h5); return 0; } static void h5_pkt_cull(struct h5 *h5) { struct sk_buff *skb, *tmp; unsigned long flags; int i, to_remove; u8 seq; spin_lock_irqsave(&h5->unack.lock, flags); to_remove = skb_queue_len(&h5->unack); if (to_remove == 0) goto unlock; seq = h5->tx_seq; while (to_remove > 0) { if (h5->rx_ack == seq) break; to_remove--; seq = (seq - 1) & 0x07; } if (seq != h5->rx_ack) BT_ERR("Controller acked invalid packet"); i = 0; skb_queue_walk_safe(&h5->unack, skb, tmp) { if (i++ >= to_remove) break; __skb_unlink(skb, &h5->unack); dev_kfree_skb_irq(skb); } if (skb_queue_empty(&h5->unack)) del_timer(&h5->timer); unlock: spin_unlock_irqrestore(&h5->unack.lock, flags); } static void h5_handle_internal_rx(struct hci_uart *hu) { struct h5 *h5 = hu->priv; const unsigned char sync_req[] = { 0x01, 0x7e }; const unsigned char sync_rsp[] = { 0x02, 0x7d }; unsigned char conf_req[3] = { 0x03, 0xfc }; const unsigned char conf_rsp[] = { 0x04, 0x7b }; const unsigned char wakeup_req[] = { 0x05, 0xfa }; const unsigned char woken_req[] = { 0x06, 0xf9 }; const unsigned char sleep_req[] = { 0x07, 0x78 }; const unsigned char *hdr = h5->rx_skb->data; const unsigned char *data = &h5->rx_skb->data[4]; BT_DBG("%s", hu->hdev->name); if (H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT) return; if (H5_HDR_LEN(hdr) < 2) return; conf_req[2] = h5_cfg_field(h5); if (memcmp(data, sync_req, 2) == 0) { if (h5->state == H5_ACTIVE) h5_peer_reset(hu); h5_link_control(hu, sync_rsp, 2); } else if (memcmp(data, sync_rsp, 2) == 0) { if (h5->state == H5_ACTIVE) h5_peer_reset(hu); h5->state = H5_INITIALIZED; h5_link_control(hu, conf_req, 3); } else if (memcmp(data, conf_req, 2) == 0) { h5_link_control(hu, conf_rsp, 2); h5_link_control(hu, conf_req, 3); } else if (memcmp(data, conf_rsp, 2) == 0) { if (H5_HDR_LEN(hdr) > 2) h5->tx_win = (data[2] & 0x07); BT_DBG("Three-wire init complete. tx_win %u", h5->tx_win); h5->state = H5_ACTIVE; hci_uart_init_ready(hu); return; } else if (memcmp(data, sleep_req, 2) == 0) { BT_DBG("Peer went to sleep"); h5->sleep = H5_SLEEPING; return; } else if (memcmp(data, woken_req, 2) == 0) { BT_DBG("Peer woke up"); h5->sleep = H5_AWAKE; } else if (memcmp(data, wakeup_req, 2) == 0) { BT_DBG("Peer requested wakeup"); h5_link_control(hu, woken_req, 2); h5->sleep = H5_AWAKE; } else { BT_DBG("Link Control: 0x%02hhx 0x%02hhx", data[0], data[1]); return; } hci_uart_tx_wakeup(hu); } static void h5_complete_rx_pkt(struct hci_uart *hu) { struct h5 *h5 = hu->priv; const unsigned char *hdr = h5->rx_skb->data; if (H5_HDR_RELIABLE(hdr)) { h5->tx_ack = (h5->tx_ack + 1) % 8; set_bit(H5_TX_ACK_REQ, &h5->flags); hci_uart_tx_wakeup(hu); } h5->rx_ack = H5_HDR_ACK(hdr); h5_pkt_cull(h5); switch (H5_HDR_PKT_TYPE(hdr)) { case HCI_EVENT_PKT: case HCI_ACLDATA_PKT: case HCI_SCODATA_PKT: case HCI_ISODATA_PKT: hci_skb_pkt_type(h5->rx_skb) = H5_HDR_PKT_TYPE(hdr); /* Remove Three-wire header */ skb_pull(h5->rx_skb, 4); hci_recv_frame(hu->hdev, h5->rx_skb); h5->rx_skb = NULL; break; default: h5_handle_internal_rx(hu); break; } h5_reset_rx(h5); } static int h5_rx_crc(struct hci_uart *hu, unsigned char c) { h5_complete_rx_pkt(hu); return 0; } static int h5_rx_payload(struct hci_uart *hu, unsigned char c) { struct h5 *h5 = hu->priv; const unsigned char *hdr = h5->rx_skb->data; if (H5_HDR_CRC(hdr)) { h5->rx_func = h5_rx_crc; h5->rx_pending = 2; } else { h5_complete_rx_pkt(hu); } return 0; } static int h5_rx_3wire_hdr(struct hci_uart *hu, unsigned char c) { struct h5 *h5 = hu->priv; const unsigned char *hdr = h5->rx_skb->data; BT_DBG("%s rx: seq %u ack %u crc %u rel %u type %u len %u", hu->hdev->name, H5_HDR_SEQ(hdr), H5_HDR_ACK(hdr), H5_HDR_CRC(hdr), H5_HDR_RELIABLE(hdr), H5_HDR_PKT_TYPE(hdr), H5_HDR_LEN(hdr)); if (((hdr[0] + hdr[1] + hdr[2] + hdr[3]) & 0xff) != 0xff) { bt_dev_err(hu->hdev, "Invalid header checksum"); h5_reset_rx(h5); return 0; } if (H5_HDR_RELIABLE(hdr) && H5_HDR_SEQ(hdr) != h5->tx_ack) { bt_dev_err(hu->hdev, "Out-of-order packet arrived (%u != %u)", H5_HDR_SEQ(hdr), h5->tx_ack); set_bit(H5_TX_ACK_REQ, &h5->flags); hci_uart_tx_wakeup(hu); h5_reset_rx(h5); return 0; } if (h5->state != H5_ACTIVE && H5_HDR_PKT_TYPE(hdr) != HCI_3WIRE_LINK_PKT) { bt_dev_err(hu->hdev, "Non-link packet received in non-active state"); h5_reset_rx(h5); return 0; } h5->rx_func = h5_rx_payload; h5->rx_pending = H5_HDR_LEN(hdr); return 0; } static int h5_rx_pkt_start(struct hci_uart *hu, unsigned char c) { struct h5 *h5 = hu->priv; if (c == SLIP_DELIMITER) return 1; h5->rx_func = h5_rx_3wire_hdr; h5->rx_pending = 4; h5->rx_skb = bt_skb_alloc(H5_MAX_LEN, GFP_ATOMIC); if (!h5->rx_skb) { bt_dev_err(hu->hdev, "Can't allocate mem for new packet"); h5_reset_rx(h5); return -ENOMEM; } h5->rx_skb->dev = (void *)hu->hdev; return 0; } static int h5_rx_delimiter(struct hci_uart *hu, unsigned char c) { struct h5 *h5 = hu->priv; if (c == SLIP_DELIMITER) h5->rx_func = h5_rx_pkt_start; return 1; } static void h5_unslip_one_byte(struct h5 *h5, unsigned char c) { const u8 delim = SLIP_DELIMITER, esc = SLIP_ESC; const u8 *byte = &c; if (!test_bit(H5_RX_ESC, &h5->flags) && c == SLIP_ESC) { set_bit(H5_RX_ESC, &h5->flags); return; } if (test_and_clear_bit(H5_RX_ESC, &h5->flags)) { switch (c) { case SLIP_ESC_DELIM: byte = &delim; break; case SLIP_ESC_ESC: byte = &esc; break; default: BT_ERR("Invalid esc byte 0x%02hhx", c); h5_reset_rx(h5); return; } } skb_put_data(h5->rx_skb, byte, 1); h5->rx_pending--; BT_DBG("unslipped 0x%02hhx, rx_pending %zu", *byte, h5->rx_pending); } static void h5_reset_rx(struct h5 *h5) { if (h5->rx_skb) { kfree_skb(h5->rx_skb); h5->rx_skb = NULL; } h5->rx_func = h5_rx_delimiter; h5->rx_pending = 0; clear_bit(H5_RX_ESC, &h5->flags); } static int h5_recv(struct hci_uart *hu, const void *data, int count) { struct h5 *h5 = hu->priv; const unsigned char *ptr = data; BT_DBG("%s pending %zu count %d", hu->hdev->name, h5->rx_pending, count); while (count > 0) { int processed; if (h5->rx_pending > 0) { if (*ptr == SLIP_DELIMITER) { bt_dev_err(hu->hdev, "Too short H5 packet"); h5_reset_rx(h5); continue; } h5_unslip_one_byte(h5, *ptr); ptr++; count--; continue; } processed = h5->rx_func(hu, *ptr); if (processed < 0) return processed; ptr += processed; count -= processed; } if (hu->serdev) { pm_runtime_get(&hu->serdev->dev); pm_runtime_mark_last_busy(&hu->serdev->dev); pm_runtime_put_autosuspend(&hu->serdev->dev); } return 0; } static int h5_enqueue(struct hci_uart *hu, struct sk_buff *skb) { struct h5 *h5 = hu->priv; if (skb->len > 0xfff) { bt_dev_err(hu->hdev, "Packet too long (%u bytes)", skb->len); kfree_skb(skb); return 0; } if (h5->state != H5_ACTIVE) { bt_dev_err(hu->hdev, "Ignoring HCI data in non-active state"); kfree_skb(skb); return 0; } switch (hci_skb_pkt_type(skb)) { case HCI_ACLDATA_PKT: case HCI_COMMAND_PKT: skb_queue_tail(&h5->rel, skb); break; case HCI_SCODATA_PKT: case HCI_ISODATA_PKT: skb_queue_tail(&h5->unrel, skb); break; default: bt_dev_err(hu->hdev, "Unknown packet type %u", hci_skb_pkt_type(skb)); kfree_skb(skb); break; } if (hu->serdev) { pm_runtime_get_sync(&hu->serdev->dev); pm_runtime_mark_last_busy(&hu->serdev->dev); pm_runtime_put_autosuspend(&hu->serdev->dev); } return 0; } static void h5_slip_delim(struct sk_buff *skb) { const char delim = SLIP_DELIMITER; skb_put_data(skb, &delim, 1); } static void h5_slip_one_byte(struct sk_buff *skb, u8 c) { const char esc_delim[2] = { SLIP_ESC, SLIP_ESC_DELIM }; const char esc_esc[2] = { SLIP_ESC, SLIP_ESC_ESC }; switch (c) { case SLIP_DELIMITER: skb_put_data(skb, &esc_delim, 2); break; case SLIP_ESC: skb_put_data(skb, &esc_esc, 2); break; default: skb_put_data(skb, &c, 1); } } static bool valid_packet_type(u8 type) { switch (type) { case HCI_ACLDATA_PKT: case HCI_COMMAND_PKT: case HCI_SCODATA_PKT: case HCI_ISODATA_PKT: case HCI_3WIRE_LINK_PKT: case HCI_3WIRE_ACK_PKT: return true; default: return false; } } static struct sk_buff *h5_prepare_pkt(struct hci_uart *hu, u8 pkt_type, const u8 *data, size_t len) { struct h5 *h5 = hu->priv; struct sk_buff *nskb; u8 hdr[4]; int i; if (!valid_packet_type(pkt_type)) { bt_dev_err(hu->hdev, "Unknown packet type %u", pkt_type); return NULL; } /* * Max len of packet: (original len + 4 (H5 hdr) + 2 (crc)) * 2 * (because bytes 0xc0 and 0xdb are escaped, worst case is when * the packet is all made of 0xc0 and 0xdb) + 2 (0xc0 * delimiters at start and end). */ nskb = alloc_skb((len + 6) * 2 + 2, GFP_ATOMIC); if (!nskb) return NULL; hci_skb_pkt_type(nskb) = pkt_type; h5_slip_delim(nskb); hdr[0] = h5->tx_ack << 3; clear_bit(H5_TX_ACK_REQ, &h5->flags); /* Reliable packet? */ if (pkt_type == HCI_ACLDATA_PKT || pkt_type == HCI_COMMAND_PKT) { hdr[0] |= 1 << 7; hdr[0] |= h5->tx_seq; h5->tx_seq = (h5->tx_seq + 1) % 8; } hdr[1] = pkt_type | ((len & 0x0f) << 4); hdr[2] = len >> 4; hdr[3] = ~((hdr[0] + hdr[1] + hdr[2]) & 0xff); BT_DBG("%s tx: seq %u ack %u crc %u rel %u type %u len %u", hu->hdev->name, H5_HDR_SEQ(hdr), H5_HDR_ACK(hdr), H5_HDR_CRC(hdr), H5_HDR_RELIABLE(hdr), H5_HDR_PKT_TYPE(hdr), H5_HDR_LEN(hdr)); for (i = 0; i < 4; i++) h5_slip_one_byte(nskb, hdr[i]); for (i = 0; i < len; i++) h5_slip_one_byte(nskb, data[i]); h5_slip_delim(nskb); return nskb; } static struct sk_buff *h5_dequeue(struct hci_uart *hu) { struct h5 *h5 = hu->priv; unsigned long flags; struct sk_buff *skb, *nskb; if (h5->sleep != H5_AWAKE) { const unsigned char wakeup_req[] = { 0x05, 0xfa }; if (h5->sleep == H5_WAKING_UP) return NULL; h5->sleep = H5_WAKING_UP; BT_DBG("Sending wakeup request"); mod_timer(&h5->timer, jiffies + HZ / 100); return h5_prepare_pkt(hu, HCI_3WIRE_LINK_PKT, wakeup_req, 2); } skb = skb_dequeue(&h5->unrel); if (skb) { nskb = h5_prepare_pkt(hu, hci_skb_pkt_type(skb), skb->data, skb->len); if (nskb) { kfree_skb(skb); return nskb; } skb_queue_head(&h5->unrel, skb); bt_dev_err(hu->hdev, "Could not dequeue pkt because alloc_skb failed"); } spin_lock_irqsave_nested(&h5->unack.lock, flags, SINGLE_DEPTH_NESTING); if (h5->unack.qlen >= h5->tx_win) goto unlock; skb = skb_dequeue(&h5->rel); if (skb) { nskb = h5_prepare_pkt(hu, hci_skb_pkt_type(skb), skb->data, skb->len); if (nskb) { __skb_queue_tail(&h5->unack, skb); mod_timer(&h5->timer, jiffies + H5_ACK_TIMEOUT); spin_unlock_irqrestore(&h5->unack.lock, flags); return nskb; } skb_queue_head(&h5->rel, skb); bt_dev_err(hu->hdev, "Could not dequeue pkt because alloc_skb failed"); } unlock: spin_unlock_irqrestore(&h5->unack.lock, flags); if (test_bit(H5_TX_ACK_REQ, &h5->flags)) return h5_prepare_pkt(hu, HCI_3WIRE_ACK_PKT, NULL, 0); return NULL; } static int h5_flush(struct hci_uart *hu) { BT_DBG("hu %p", hu); return 0; } static const struct hci_uart_proto h5p = { .id = HCI_UART_3WIRE, .name = "Three-wire (H5)", .open = h5_open, .close = h5_close, .setup = h5_setup, .recv = h5_recv, .enqueue = h5_enqueue, .dequeue = h5_dequeue, .flush = h5_flush, }; static int h5_serdev_probe(struct serdev_device *serdev) { struct device *dev = &serdev->dev; struct h5 *h5; const struct h5_device_data *data; h5 = devm_kzalloc(dev, sizeof(*h5), GFP_KERNEL); if (!h5) return -ENOMEM; h5->hu = &h5->serdev_hu; h5->serdev_hu.serdev = serdev; serdev_device_set_drvdata(serdev, h5); if (has_acpi_companion(dev)) { const struct acpi_device_id *match; match = acpi_match_device(dev->driver->acpi_match_table, dev); if (!match) return -ENODEV; data = (const struct h5_device_data *)match->driver_data; h5->vnd = data->vnd; h5->id = (char *)match->id; if (h5->vnd->acpi_gpio_map) devm_acpi_dev_add_driver_gpios(dev, h5->vnd->acpi_gpio_map); } else { data = of_device_get_match_data(dev); if (!data) return -ENODEV; h5->vnd = data->vnd; } if (data->driver_info & H5_INFO_WAKEUP_DISABLE) set_bit(H5_WAKEUP_DISABLE, &h5->flags); h5->enable_gpio = devm_gpiod_get_optional(dev, "enable", GPIOD_OUT_LOW); if (IS_ERR(h5->enable_gpio)) return PTR_ERR(h5->enable_gpio); h5->device_wake_gpio = devm_gpiod_get_optional(dev, "device-wake", GPIOD_OUT_LOW); if (IS_ERR(h5->device_wake_gpio)) return PTR_ERR(h5->device_wake_gpio); return hci_uart_register_device(&h5->serdev_hu, &h5p); } static void h5_serdev_remove(struct serdev_device *serdev) { struct h5 *h5 = serdev_device_get_drvdata(serdev); hci_uart_unregister_device(&h5->serdev_hu); } static int __maybe_unused h5_serdev_suspend(struct device *dev) { struct h5 *h5 = dev_get_drvdata(dev); int ret = 0; if (h5->vnd && h5->vnd->suspend) ret = h5->vnd->suspend(h5); return ret; } static int __maybe_unused h5_serdev_resume(struct device *dev) { struct h5 *h5 = dev_get_drvdata(dev); int ret = 0; if (h5->vnd && h5->vnd->resume) ret = h5->vnd->resume(h5); return ret; } #ifdef CONFIG_BT_HCIUART_RTL static int h5_btrtl_setup(struct h5 *h5) { struct btrtl_device_info *btrtl_dev; struct sk_buff *skb; __le32 baudrate_data; u32 device_baudrate; unsigned int controller_baudrate; bool flow_control; int err; btrtl_dev = btrtl_initialize(h5->hu->hdev, h5->id); if (IS_ERR(btrtl_dev)) return PTR_ERR(btrtl_dev); err = btrtl_get_uart_settings(h5->hu->hdev, btrtl_dev, &controller_baudrate, &device_baudrate, &flow_control); if (err) goto out_free; baudrate_data = cpu_to_le32(device_baudrate); skb = __hci_cmd_sync(h5->hu->hdev, 0xfc17, sizeof(baudrate_data), &baudrate_data, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { rtl_dev_err(h5->hu->hdev, "set baud rate command failed\n"); err = PTR_ERR(skb); goto out_free; } else { kfree_skb(skb); } /* Give the device some time to set up the new baudrate. */ usleep_range(10000, 20000); serdev_device_set_baudrate(h5->hu->serdev, controller_baudrate); serdev_device_set_flow_control(h5->hu->serdev, flow_control); if (flow_control) set_bit(H5_HW_FLOW_CONTROL, &h5->flags); err = btrtl_download_firmware(h5->hu->hdev, btrtl_dev); /* Give the device some time before the hci-core sends it a reset */ usleep_range(10000, 20000); if (err) goto out_free; btrtl_set_quirks(h5->hu->hdev, btrtl_dev); out_free: btrtl_free(btrtl_dev); return err; } static void h5_btrtl_open(struct h5 *h5) { /* * Since h5_btrtl_resume() does a device_reprobe() the suspend handling * done by the hci_suspend_notifier is not necessary; it actually causes * delays and a bunch of errors to get logged, so disable it. */ if (test_bit(H5_WAKEUP_DISABLE, &h5->flags)) set_bit(HCI_UART_NO_SUSPEND_NOTIFIER, &h5->hu->flags); /* Devices always start with these fixed parameters */ serdev_device_set_flow_control(h5->hu->serdev, false); serdev_device_set_parity(h5->hu->serdev, SERDEV_PARITY_EVEN); serdev_device_set_baudrate(h5->hu->serdev, 115200); if (!test_bit(H5_WAKEUP_DISABLE, &h5->flags)) { pm_runtime_set_active(&h5->hu->serdev->dev); pm_runtime_use_autosuspend(&h5->hu->serdev->dev); pm_runtime_set_autosuspend_delay(&h5->hu->serdev->dev, SUSPEND_TIMEOUT_MS); pm_runtime_enable(&h5->hu->serdev->dev); } /* The controller needs reset to startup */ gpiod_set_value_cansleep(h5->enable_gpio, 0); gpiod_set_value_cansleep(h5->device_wake_gpio, 0); msleep(100); /* The controller needs up to 500ms to wakeup */ gpiod_set_value_cansleep(h5->enable_gpio, 1); gpiod_set_value_cansleep(h5->device_wake_gpio, 1); msleep(500); } static void h5_btrtl_close(struct h5 *h5) { if (!test_bit(H5_WAKEUP_DISABLE, &h5->flags)) pm_runtime_disable(&h5->hu->serdev->dev); gpiod_set_value_cansleep(h5->device_wake_gpio, 0); gpiod_set_value_cansleep(h5->enable_gpio, 0); } /* Suspend/resume support. On many devices the RTL BT device loses power during * suspend/resume, causing it to lose its firmware and all state. So we simply * turn it off on suspend and reprobe on resume. This mirrors how RTL devices * are handled in the USB driver, where the BTUSB_WAKEUP_DISABLE is used which * also causes a reprobe on resume. */ static int h5_btrtl_suspend(struct h5 *h5) { serdev_device_set_flow_control(h5->hu->serdev, false); gpiod_set_value_cansleep(h5->device_wake_gpio, 0); if (test_bit(H5_WAKEUP_DISABLE, &h5->flags)) gpiod_set_value_cansleep(h5->enable_gpio, 0); return 0; } struct h5_btrtl_reprobe { struct device *dev; struct work_struct work; }; static void h5_btrtl_reprobe_worker(struct work_struct *work) { struct h5_btrtl_reprobe *reprobe = container_of(work, struct h5_btrtl_reprobe, work); int ret; ret = device_reprobe(reprobe->dev); if (ret && ret != -EPROBE_DEFER) dev_err(reprobe->dev, "Reprobe error %d\n", ret); put_device(reprobe->dev); kfree(reprobe); module_put(THIS_MODULE); } static int h5_btrtl_resume(struct h5 *h5) { if (test_bit(H5_WAKEUP_DISABLE, &h5->flags)) { struct h5_btrtl_reprobe *reprobe; reprobe = kzalloc(sizeof(*reprobe), GFP_KERNEL); if (!reprobe) return -ENOMEM; __module_get(THIS_MODULE); INIT_WORK(&reprobe->work, h5_btrtl_reprobe_worker); reprobe->dev = get_device(&h5->hu->serdev->dev); queue_work(system_long_wq, &reprobe->work); } else { gpiod_set_value_cansleep(h5->device_wake_gpio, 1); if (test_bit(H5_HW_FLOW_CONTROL, &h5->flags)) serdev_device_set_flow_control(h5->hu->serdev, true); } return 0; } static const struct acpi_gpio_params btrtl_device_wake_gpios = { 0, 0, false }; static const struct acpi_gpio_params btrtl_enable_gpios = { 1, 0, false }; static const struct acpi_gpio_params btrtl_host_wake_gpios = { 2, 0, false }; static const struct acpi_gpio_mapping acpi_btrtl_gpios[] = { { "device-wake-gpios", &btrtl_device_wake_gpios, 1 }, { "enable-gpios", &btrtl_enable_gpios, 1 }, { "host-wake-gpios", &btrtl_host_wake_gpios, 1 }, {}, }; static struct h5_vnd rtl_vnd = { .setup = h5_btrtl_setup, .open = h5_btrtl_open, .close = h5_btrtl_close, .suspend = h5_btrtl_suspend, .resume = h5_btrtl_resume, .acpi_gpio_map = acpi_btrtl_gpios, }; static const struct h5_device_data h5_data_rtl8822cs = { .vnd = &rtl_vnd, }; static const struct h5_device_data h5_data_rtl8723bs = { .driver_info = H5_INFO_WAKEUP_DISABLE, .vnd = &rtl_vnd, }; #endif #ifdef CONFIG_ACPI static const struct acpi_device_id h5_acpi_match[] = { #ifdef CONFIG_BT_HCIUART_RTL { "OBDA0623", (kernel_ulong_t)&h5_data_rtl8723bs }, { "OBDA8723", (kernel_ulong_t)&h5_data_rtl8723bs }, #endif { }, }; MODULE_DEVICE_TABLE(acpi, h5_acpi_match); #endif static const struct dev_pm_ops h5_serdev_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(h5_serdev_suspend, h5_serdev_resume) SET_RUNTIME_PM_OPS(h5_serdev_suspend, h5_serdev_resume, NULL) }; static const struct of_device_id rtl_bluetooth_of_match[] = { #ifdef CONFIG_BT_HCIUART_RTL { .compatible = "realtek,rtl8822cs-bt", .data = (const void *)&h5_data_rtl8822cs }, { .compatible = "realtek,rtl8723bs-bt", .data = (const void *)&h5_data_rtl8723bs }, { .compatible = "realtek,rtl8723cs-bt", .data = (const void *)&h5_data_rtl8723bs }, { .compatible = "realtek,rtl8723ds-bt", .data = (const void *)&h5_data_rtl8723bs }, #endif { }, }; MODULE_DEVICE_TABLE(of, rtl_bluetooth_of_match); static struct serdev_device_driver h5_serdev_driver = { .probe = h5_serdev_probe, .remove = h5_serdev_remove, .driver = { .name = "hci_uart_h5", .acpi_match_table = ACPI_PTR(h5_acpi_match), .pm = &h5_serdev_pm_ops, .of_match_table = rtl_bluetooth_of_match, }, }; int __init h5_init(void) { serdev_device_driver_register(&h5_serdev_driver); return hci_uart_register_proto(&h5p); } int __exit h5_deinit(void) { serdev_device_driver_unregister(&h5_serdev_driver); return hci_uart_unregister_proto(&h5p); }
linux-master
drivers/bluetooth/hci_h5.c
// SPDX-License-Identifier: GPL-2.0-only /* * Bluetooth Software UART Qualcomm protocol * * HCI_IBS (HCI In-Band Sleep) is Qualcomm's power management * protocol extension to H4. * * Copyright (C) 2007 Texas Instruments, Inc. * Copyright (c) 2010, 2012, 2018 The Linux Foundation. All rights reserved. * * Acknowledgements: * This file is based on hci_ll.c, which was... * Written by Ohad Ben-Cohen <[email protected]> * which was in turn based on hci_h4.c, which was written * by Maxim Krasnyansky and Marcel Holtmann. */ #include <linux/kernel.h> #include <linux/clk.h> #include <linux/completion.h> #include <linux/debugfs.h> #include <linux/delay.h> #include <linux/devcoredump.h> #include <linux/device.h> #include <linux/gpio/consumer.h> #include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/of.h> #include <linux/acpi.h> #include <linux/platform_device.h> #include <linux/regulator/consumer.h> #include <linux/serdev.h> #include <linux/mutex.h> #include <asm/unaligned.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include "hci_uart.h" #include "btqca.h" /* HCI_IBS protocol messages */ #define HCI_IBS_SLEEP_IND 0xFE #define HCI_IBS_WAKE_IND 0xFD #define HCI_IBS_WAKE_ACK 0xFC #define HCI_MAX_IBS_SIZE 10 #define IBS_WAKE_RETRANS_TIMEOUT_MS 100 #define IBS_BTSOC_TX_IDLE_TIMEOUT_MS 200 #define IBS_HOST_TX_IDLE_TIMEOUT_MS 2000 #define CMD_TRANS_TIMEOUT_MS 100 #define MEMDUMP_TIMEOUT_MS 8000 #define IBS_DISABLE_SSR_TIMEOUT_MS \ (MEMDUMP_TIMEOUT_MS + FW_DOWNLOAD_TIMEOUT_MS) #define FW_DOWNLOAD_TIMEOUT_MS 3000 /* susclk rate */ #define SUSCLK_RATE_32KHZ 32768 /* Controller debug log header */ #define QCA_DEBUG_HANDLE 0x2EDC /* max retry count when init fails */ #define MAX_INIT_RETRIES 3 /* Controller dump header */ #define QCA_SSR_DUMP_HANDLE 0x0108 #define QCA_DUMP_PACKET_SIZE 255 #define QCA_LAST_SEQUENCE_NUM 0xFFFF #define QCA_CRASHBYTE_PACKET_LEN 1096 #define QCA_MEMDUMP_BYTE 0xFB enum qca_flags { QCA_IBS_DISABLED, QCA_DROP_VENDOR_EVENT, QCA_SUSPENDING, QCA_MEMDUMP_COLLECTION, QCA_HW_ERROR_EVENT, QCA_SSR_TRIGGERED, QCA_BT_OFF, QCA_ROM_FW, QCA_DEBUGFS_CREATED, }; enum qca_capabilities { QCA_CAP_WIDEBAND_SPEECH = BIT(0), QCA_CAP_VALID_LE_STATES = BIT(1), }; /* HCI_IBS transmit side sleep protocol states */ enum tx_ibs_states { HCI_IBS_TX_ASLEEP, HCI_IBS_TX_WAKING, HCI_IBS_TX_AWAKE, }; /* HCI_IBS receive side sleep protocol states */ enum rx_states { HCI_IBS_RX_ASLEEP, HCI_IBS_RX_AWAKE, }; /* HCI_IBS transmit and receive side clock state vote */ enum hci_ibs_clock_state_vote { HCI_IBS_VOTE_STATS_UPDATE, HCI_IBS_TX_VOTE_CLOCK_ON, HCI_IBS_TX_VOTE_CLOCK_OFF, HCI_IBS_RX_VOTE_CLOCK_ON, HCI_IBS_RX_VOTE_CLOCK_OFF, }; /* Controller memory dump states */ enum qca_memdump_states { QCA_MEMDUMP_IDLE, QCA_MEMDUMP_COLLECTING, QCA_MEMDUMP_COLLECTED, QCA_MEMDUMP_TIMEOUT, }; struct qca_memdump_info { u32 current_seq_no; u32 received_dump; u32 ram_dump_size; }; struct qca_memdump_event_hdr { __u8 evt; __u8 plen; __u16 opcode; __le16 seq_no; __u8 reserved; } __packed; struct qca_dump_size { __le32 dump_size; } __packed; struct qca_data { struct hci_uart *hu; struct sk_buff *rx_skb; struct sk_buff_head txq; struct sk_buff_head tx_wait_q; /* HCI_IBS wait queue */ struct sk_buff_head rx_memdump_q; /* Memdump wait queue */ spinlock_t hci_ibs_lock; /* HCI_IBS state lock */ u8 tx_ibs_state; /* HCI_IBS transmit side power state*/ u8 rx_ibs_state; /* HCI_IBS receive side power state */ bool tx_vote; /* Clock must be on for TX */ bool rx_vote; /* Clock must be on for RX */ struct timer_list tx_idle_timer; u32 tx_idle_delay; struct timer_list wake_retrans_timer; u32 wake_retrans; struct workqueue_struct *workqueue; struct work_struct ws_awake_rx; struct work_struct ws_awake_device; struct work_struct ws_rx_vote_off; struct work_struct ws_tx_vote_off; struct work_struct ctrl_memdump_evt; struct delayed_work ctrl_memdump_timeout; struct qca_memdump_info *qca_memdump; unsigned long flags; struct completion drop_ev_comp; wait_queue_head_t suspend_wait_q; enum qca_memdump_states memdump_state; struct mutex hci_memdump_lock; u16 fw_version; u16 controller_id; /* For debugging purpose */ u64 ibs_sent_wacks; u64 ibs_sent_slps; u64 ibs_sent_wakes; u64 ibs_recv_wacks; u64 ibs_recv_slps; u64 ibs_recv_wakes; u64 vote_last_jif; u32 vote_on_ms; u32 vote_off_ms; u64 tx_votes_on; u64 rx_votes_on; u64 tx_votes_off; u64 rx_votes_off; u64 votes_on; u64 votes_off; }; enum qca_speed_type { QCA_INIT_SPEED = 1, QCA_OPER_SPEED }; /* * Voltage regulator information required for configuring the * QCA Bluetooth chipset */ struct qca_vreg { const char *name; unsigned int load_uA; }; struct qca_device_data { enum qca_btsoc_type soc_type; struct qca_vreg *vregs; size_t num_vregs; uint32_t capabilities; }; /* * Platform data for the QCA Bluetooth power driver. */ struct qca_power { struct device *dev; struct regulator_bulk_data *vreg_bulk; int num_vregs; bool vregs_on; }; struct qca_serdev { struct hci_uart serdev_hu; struct gpio_desc *bt_en; struct gpio_desc *sw_ctrl; struct clk *susclk; enum qca_btsoc_type btsoc_type; struct qca_power *bt_power; u32 init_speed; u32 oper_speed; const char *firmware_name; }; static int qca_regulator_enable(struct qca_serdev *qcadev); static void qca_regulator_disable(struct qca_serdev *qcadev); static void qca_power_shutdown(struct hci_uart *hu); static int qca_power_off(struct hci_dev *hdev); static void qca_controller_memdump(struct work_struct *work); static void qca_dmp_hdr(struct hci_dev *hdev, struct sk_buff *skb); static enum qca_btsoc_type qca_soc_type(struct hci_uart *hu) { enum qca_btsoc_type soc_type; if (hu->serdev) { struct qca_serdev *qsd = serdev_device_get_drvdata(hu->serdev); soc_type = qsd->btsoc_type; } else { soc_type = QCA_ROME; } return soc_type; } static const char *qca_get_firmware_name(struct hci_uart *hu) { if (hu->serdev) { struct qca_serdev *qsd = serdev_device_get_drvdata(hu->serdev); return qsd->firmware_name; } else { return NULL; } } static void __serial_clock_on(struct tty_struct *tty) { /* TODO: Some chipset requires to enable UART clock on client * side to save power consumption or manual work is required. * Please put your code to control UART clock here if needed */ } static void __serial_clock_off(struct tty_struct *tty) { /* TODO: Some chipset requires to disable UART clock on client * side to save power consumption or manual work is required. * Please put your code to control UART clock off here if needed */ } /* serial_clock_vote needs to be called with the ibs lock held */ static void serial_clock_vote(unsigned long vote, struct hci_uart *hu) { struct qca_data *qca = hu->priv; unsigned int diff; bool old_vote = (qca->tx_vote | qca->rx_vote); bool new_vote; switch (vote) { case HCI_IBS_VOTE_STATS_UPDATE: diff = jiffies_to_msecs(jiffies - qca->vote_last_jif); if (old_vote) qca->vote_off_ms += diff; else qca->vote_on_ms += diff; return; case HCI_IBS_TX_VOTE_CLOCK_ON: qca->tx_vote = true; qca->tx_votes_on++; break; case HCI_IBS_RX_VOTE_CLOCK_ON: qca->rx_vote = true; qca->rx_votes_on++; break; case HCI_IBS_TX_VOTE_CLOCK_OFF: qca->tx_vote = false; qca->tx_votes_off++; break; case HCI_IBS_RX_VOTE_CLOCK_OFF: qca->rx_vote = false; qca->rx_votes_off++; break; default: BT_ERR("Voting irregularity"); return; } new_vote = qca->rx_vote | qca->tx_vote; if (new_vote != old_vote) { if (new_vote) __serial_clock_on(hu->tty); else __serial_clock_off(hu->tty); BT_DBG("Vote serial clock %s(%s)", new_vote ? "true" : "false", vote ? "true" : "false"); diff = jiffies_to_msecs(jiffies - qca->vote_last_jif); if (new_vote) { qca->votes_on++; qca->vote_off_ms += diff; } else { qca->votes_off++; qca->vote_on_ms += diff; } qca->vote_last_jif = jiffies; } } /* Builds and sends an HCI_IBS command packet. * These are very simple packets with only 1 cmd byte. */ static int send_hci_ibs_cmd(u8 cmd, struct hci_uart *hu) { int err = 0; struct sk_buff *skb = NULL; struct qca_data *qca = hu->priv; BT_DBG("hu %p send hci ibs cmd 0x%x", hu, cmd); skb = bt_skb_alloc(1, GFP_ATOMIC); if (!skb) { BT_ERR("Failed to allocate memory for HCI_IBS packet"); return -ENOMEM; } /* Assign HCI_IBS type */ skb_put_u8(skb, cmd); skb_queue_tail(&qca->txq, skb); return err; } static void qca_wq_awake_device(struct work_struct *work) { struct qca_data *qca = container_of(work, struct qca_data, ws_awake_device); struct hci_uart *hu = qca->hu; unsigned long retrans_delay; unsigned long flags; BT_DBG("hu %p wq awake device", hu); /* Vote for serial clock */ serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_ON, hu); spin_lock_irqsave(&qca->hci_ibs_lock, flags); /* Send wake indication to device */ if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0) BT_ERR("Failed to send WAKE to device"); qca->ibs_sent_wakes++; /* Start retransmit timer */ retrans_delay = msecs_to_jiffies(qca->wake_retrans); mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay); spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); /* Actually send the packets */ hci_uart_tx_wakeup(hu); } static void qca_wq_awake_rx(struct work_struct *work) { struct qca_data *qca = container_of(work, struct qca_data, ws_awake_rx); struct hci_uart *hu = qca->hu; unsigned long flags; BT_DBG("hu %p wq awake rx", hu); serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_ON, hu); spin_lock_irqsave(&qca->hci_ibs_lock, flags); qca->rx_ibs_state = HCI_IBS_RX_AWAKE; /* Always acknowledge device wake up, * sending IBS message doesn't count as TX ON. */ if (send_hci_ibs_cmd(HCI_IBS_WAKE_ACK, hu) < 0) BT_ERR("Failed to acknowledge device wake up"); qca->ibs_sent_wacks++; spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); /* Actually send the packets */ hci_uart_tx_wakeup(hu); } static void qca_wq_serial_rx_clock_vote_off(struct work_struct *work) { struct qca_data *qca = container_of(work, struct qca_data, ws_rx_vote_off); struct hci_uart *hu = qca->hu; BT_DBG("hu %p rx clock vote off", hu); serial_clock_vote(HCI_IBS_RX_VOTE_CLOCK_OFF, hu); } static void qca_wq_serial_tx_clock_vote_off(struct work_struct *work) { struct qca_data *qca = container_of(work, struct qca_data, ws_tx_vote_off); struct hci_uart *hu = qca->hu; BT_DBG("hu %p tx clock vote off", hu); /* Run HCI tx handling unlocked */ hci_uart_tx_wakeup(hu); /* Now that message queued to tty driver, vote for tty clocks off. * It is up to the tty driver to pend the clocks off until tx done. */ serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_OFF, hu); } static void hci_ibs_tx_idle_timeout(struct timer_list *t) { struct qca_data *qca = from_timer(qca, t, tx_idle_timer); struct hci_uart *hu = qca->hu; unsigned long flags; BT_DBG("hu %p idle timeout in %d state", hu, qca->tx_ibs_state); spin_lock_irqsave_nested(&qca->hci_ibs_lock, flags, SINGLE_DEPTH_NESTING); switch (qca->tx_ibs_state) { case HCI_IBS_TX_AWAKE: /* TX_IDLE, go to SLEEP */ if (send_hci_ibs_cmd(HCI_IBS_SLEEP_IND, hu) < 0) { BT_ERR("Failed to send SLEEP to device"); break; } qca->tx_ibs_state = HCI_IBS_TX_ASLEEP; qca->ibs_sent_slps++; queue_work(qca->workqueue, &qca->ws_tx_vote_off); break; case HCI_IBS_TX_ASLEEP: case HCI_IBS_TX_WAKING: default: BT_ERR("Spurious timeout tx state %d", qca->tx_ibs_state); break; } spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); } static void hci_ibs_wake_retrans_timeout(struct timer_list *t) { struct qca_data *qca = from_timer(qca, t, wake_retrans_timer); struct hci_uart *hu = qca->hu; unsigned long flags, retrans_delay; bool retransmit = false; BT_DBG("hu %p wake retransmit timeout in %d state", hu, qca->tx_ibs_state); spin_lock_irqsave_nested(&qca->hci_ibs_lock, flags, SINGLE_DEPTH_NESTING); /* Don't retransmit the HCI_IBS_WAKE_IND when suspending. */ if (test_bit(QCA_SUSPENDING, &qca->flags)) { spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); return; } switch (qca->tx_ibs_state) { case HCI_IBS_TX_WAKING: /* No WAKE_ACK, retransmit WAKE */ retransmit = true; if (send_hci_ibs_cmd(HCI_IBS_WAKE_IND, hu) < 0) { BT_ERR("Failed to acknowledge device wake up"); break; } qca->ibs_sent_wakes++; retrans_delay = msecs_to_jiffies(qca->wake_retrans); mod_timer(&qca->wake_retrans_timer, jiffies + retrans_delay); break; case HCI_IBS_TX_ASLEEP: case HCI_IBS_TX_AWAKE: default: BT_ERR("Spurious timeout tx state %d", qca->tx_ibs_state); break; } spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); if (retransmit) hci_uart_tx_wakeup(hu); } static void qca_controller_memdump_timeout(struct work_struct *work) { struct qca_data *qca = container_of(work, struct qca_data, ctrl_memdump_timeout.work); struct hci_uart *hu = qca->hu; mutex_lock(&qca->hci_memdump_lock); if (test_bit(QCA_MEMDUMP_COLLECTION, &qca->flags)) { qca->memdump_state = QCA_MEMDUMP_TIMEOUT; if (!test_bit(QCA_HW_ERROR_EVENT, &qca->flags)) { /* Inject hw error event to reset the device * and driver. */ hci_reset_dev(hu->hdev); } } mutex_unlock(&qca->hci_memdump_lock); } /* Initialize protocol */ static int qca_open(struct hci_uart *hu) { struct qca_serdev *qcadev; struct qca_data *qca; BT_DBG("hu %p qca_open", hu); if (!hci_uart_has_flow_control(hu)) return -EOPNOTSUPP; qca = kzalloc(sizeof(struct qca_data), GFP_KERNEL); if (!qca) return -ENOMEM; skb_queue_head_init(&qca->txq); skb_queue_head_init(&qca->tx_wait_q); skb_queue_head_init(&qca->rx_memdump_q); spin_lock_init(&qca->hci_ibs_lock); mutex_init(&qca->hci_memdump_lock); qca->workqueue = alloc_ordered_workqueue("qca_wq", 0); if (!qca->workqueue) { BT_ERR("QCA Workqueue not initialized properly"); kfree(qca); return -ENOMEM; } INIT_WORK(&qca->ws_awake_rx, qca_wq_awake_rx); INIT_WORK(&qca->ws_awake_device, qca_wq_awake_device); INIT_WORK(&qca->ws_rx_vote_off, qca_wq_serial_rx_clock_vote_off); INIT_WORK(&qca->ws_tx_vote_off, qca_wq_serial_tx_clock_vote_off); INIT_WORK(&qca->ctrl_memdump_evt, qca_controller_memdump); INIT_DELAYED_WORK(&qca->ctrl_memdump_timeout, qca_controller_memdump_timeout); init_waitqueue_head(&qca->suspend_wait_q); qca->hu = hu; init_completion(&qca->drop_ev_comp); /* Assume we start with both sides asleep -- extra wakes OK */ qca->tx_ibs_state = HCI_IBS_TX_ASLEEP; qca->rx_ibs_state = HCI_IBS_RX_ASLEEP; qca->vote_last_jif = jiffies; hu->priv = qca; if (hu->serdev) { qcadev = serdev_device_get_drvdata(hu->serdev); switch (qcadev->btsoc_type) { case QCA_WCN3988: case QCA_WCN3990: case QCA_WCN3991: case QCA_WCN3998: case QCA_WCN6750: hu->init_speed = qcadev->init_speed; break; default: break; } if (qcadev->oper_speed) hu->oper_speed = qcadev->oper_speed; } timer_setup(&qca->wake_retrans_timer, hci_ibs_wake_retrans_timeout, 0); qca->wake_retrans = IBS_WAKE_RETRANS_TIMEOUT_MS; timer_setup(&qca->tx_idle_timer, hci_ibs_tx_idle_timeout, 0); qca->tx_idle_delay = IBS_HOST_TX_IDLE_TIMEOUT_MS; BT_DBG("HCI_UART_QCA open, tx_idle_delay=%u, wake_retrans=%u", qca->tx_idle_delay, qca->wake_retrans); return 0; } static void qca_debugfs_init(struct hci_dev *hdev) { struct hci_uart *hu = hci_get_drvdata(hdev); struct qca_data *qca = hu->priv; struct dentry *ibs_dir; umode_t mode; if (!hdev->debugfs) return; if (test_and_set_bit(QCA_DEBUGFS_CREATED, &qca->flags)) return; ibs_dir = debugfs_create_dir("ibs", hdev->debugfs); /* read only */ mode = 0444; debugfs_create_u8("tx_ibs_state", mode, ibs_dir, &qca->tx_ibs_state); debugfs_create_u8("rx_ibs_state", mode, ibs_dir, &qca->rx_ibs_state); debugfs_create_u64("ibs_sent_sleeps", mode, ibs_dir, &qca->ibs_sent_slps); debugfs_create_u64("ibs_sent_wakes", mode, ibs_dir, &qca->ibs_sent_wakes); debugfs_create_u64("ibs_sent_wake_acks", mode, ibs_dir, &qca->ibs_sent_wacks); debugfs_create_u64("ibs_recv_sleeps", mode, ibs_dir, &qca->ibs_recv_slps); debugfs_create_u64("ibs_recv_wakes", mode, ibs_dir, &qca->ibs_recv_wakes); debugfs_create_u64("ibs_recv_wake_acks", mode, ibs_dir, &qca->ibs_recv_wacks); debugfs_create_bool("tx_vote", mode, ibs_dir, &qca->tx_vote); debugfs_create_u64("tx_votes_on", mode, ibs_dir, &qca->tx_votes_on); debugfs_create_u64("tx_votes_off", mode, ibs_dir, &qca->tx_votes_off); debugfs_create_bool("rx_vote", mode, ibs_dir, &qca->rx_vote); debugfs_create_u64("rx_votes_on", mode, ibs_dir, &qca->rx_votes_on); debugfs_create_u64("rx_votes_off", mode, ibs_dir, &qca->rx_votes_off); debugfs_create_u64("votes_on", mode, ibs_dir, &qca->votes_on); debugfs_create_u64("votes_off", mode, ibs_dir, &qca->votes_off); debugfs_create_u32("vote_on_ms", mode, ibs_dir, &qca->vote_on_ms); debugfs_create_u32("vote_off_ms", mode, ibs_dir, &qca->vote_off_ms); /* read/write */ mode = 0644; debugfs_create_u32("wake_retrans", mode, ibs_dir, &qca->wake_retrans); debugfs_create_u32("tx_idle_delay", mode, ibs_dir, &qca->tx_idle_delay); } /* Flush protocol data */ static int qca_flush(struct hci_uart *hu) { struct qca_data *qca = hu->priv; BT_DBG("hu %p qca flush", hu); skb_queue_purge(&qca->tx_wait_q); skb_queue_purge(&qca->txq); return 0; } /* Close protocol */ static int qca_close(struct hci_uart *hu) { struct qca_data *qca = hu->priv; BT_DBG("hu %p qca close", hu); serial_clock_vote(HCI_IBS_VOTE_STATS_UPDATE, hu); skb_queue_purge(&qca->tx_wait_q); skb_queue_purge(&qca->txq); skb_queue_purge(&qca->rx_memdump_q); /* * Shut the timers down so they can't be rearmed when * destroy_workqueue() drains pending work which in turn might try * to arm a timer. After shutdown rearm attempts are silently * ignored by the timer core code. */ timer_shutdown_sync(&qca->tx_idle_timer); timer_shutdown_sync(&qca->wake_retrans_timer); destroy_workqueue(qca->workqueue); qca->hu = NULL; kfree_skb(qca->rx_skb); hu->priv = NULL; kfree(qca); return 0; } /* Called upon a wake-up-indication from the device. */ static void device_want_to_wakeup(struct hci_uart *hu) { unsigned long flags; struct qca_data *qca = hu->priv; BT_DBG("hu %p want to wake up", hu); spin_lock_irqsave(&qca->hci_ibs_lock, flags); qca->ibs_recv_wakes++; /* Don't wake the rx up when suspending. */ if (test_bit(QCA_SUSPENDING, &qca->flags)) { spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); return; } switch (qca->rx_ibs_state) { case HCI_IBS_RX_ASLEEP: /* Make sure clock is on - we may have turned clock off since * receiving the wake up indicator awake rx clock. */ queue_work(qca->workqueue, &qca->ws_awake_rx); spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); return; case HCI_IBS_RX_AWAKE: /* Always acknowledge device wake up, * sending IBS message doesn't count as TX ON. */ if (send_hci_ibs_cmd(HCI_IBS_WAKE_ACK, hu) < 0) { BT_ERR("Failed to acknowledge device wake up"); break; } qca->ibs_sent_wacks++; break; default: /* Any other state is illegal */ BT_ERR("Received HCI_IBS_WAKE_IND in rx state %d", qca->rx_ibs_state); break; } spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); /* Actually send the packets */ hci_uart_tx_wakeup(hu); } /* Called upon a sleep-indication from the device. */ static void device_want_to_sleep(struct hci_uart *hu) { unsigned long flags; struct qca_data *qca = hu->priv; BT_DBG("hu %p want to sleep in %d state", hu, qca->rx_ibs_state); spin_lock_irqsave(&qca->hci_ibs_lock, flags); qca->ibs_recv_slps++; switch (qca->rx_ibs_state) { case HCI_IBS_RX_AWAKE: /* Update state */ qca->rx_ibs_state = HCI_IBS_RX_ASLEEP; /* Vote off rx clock under workqueue */ queue_work(qca->workqueue, &qca->ws_rx_vote_off); break; case HCI_IBS_RX_ASLEEP: break; default: /* Any other state is illegal */ BT_ERR("Received HCI_IBS_SLEEP_IND in rx state %d", qca->rx_ibs_state); break; } wake_up_interruptible(&qca->suspend_wait_q); spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); } /* Called upon wake-up-acknowledgement from the device */ static void device_woke_up(struct hci_uart *hu) { unsigned long flags, idle_delay; struct qca_data *qca = hu->priv; struct sk_buff *skb = NULL; BT_DBG("hu %p woke up", hu); spin_lock_irqsave(&qca->hci_ibs_lock, flags); qca->ibs_recv_wacks++; /* Don't react to the wake-up-acknowledgment when suspending. */ if (test_bit(QCA_SUSPENDING, &qca->flags)) { spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); return; } switch (qca->tx_ibs_state) { case HCI_IBS_TX_AWAKE: /* Expect one if we send 2 WAKEs */ BT_DBG("Received HCI_IBS_WAKE_ACK in tx state %d", qca->tx_ibs_state); break; case HCI_IBS_TX_WAKING: /* Send pending packets */ while ((skb = skb_dequeue(&qca->tx_wait_q))) skb_queue_tail(&qca->txq, skb); /* Switch timers and change state to HCI_IBS_TX_AWAKE */ del_timer(&qca->wake_retrans_timer); idle_delay = msecs_to_jiffies(qca->tx_idle_delay); mod_timer(&qca->tx_idle_timer, jiffies + idle_delay); qca->tx_ibs_state = HCI_IBS_TX_AWAKE; break; case HCI_IBS_TX_ASLEEP: default: BT_ERR("Received HCI_IBS_WAKE_ACK in tx state %d", qca->tx_ibs_state); break; } spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); /* Actually send the packets */ hci_uart_tx_wakeup(hu); } /* Enqueue frame for transmittion (padding, crc, etc) may be called from * two simultaneous tasklets. */ static int qca_enqueue(struct hci_uart *hu, struct sk_buff *skb) { unsigned long flags = 0, idle_delay; struct qca_data *qca = hu->priv; BT_DBG("hu %p qca enq skb %p tx_ibs_state %d", hu, skb, qca->tx_ibs_state); if (test_bit(QCA_SSR_TRIGGERED, &qca->flags)) { /* As SSR is in progress, ignore the packets */ bt_dev_dbg(hu->hdev, "SSR is in progress"); kfree_skb(skb); return 0; } /* Prepend skb with frame type */ memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1); spin_lock_irqsave(&qca->hci_ibs_lock, flags); /* Don't go to sleep in middle of patch download or * Out-Of-Band(GPIOs control) sleep is selected. * Don't wake the device up when suspending. */ if (test_bit(QCA_IBS_DISABLED, &qca->flags) || test_bit(QCA_SUSPENDING, &qca->flags)) { skb_queue_tail(&qca->txq, skb); spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); return 0; } /* Act according to current state */ switch (qca->tx_ibs_state) { case HCI_IBS_TX_AWAKE: BT_DBG("Device awake, sending normally"); skb_queue_tail(&qca->txq, skb); idle_delay = msecs_to_jiffies(qca->tx_idle_delay); mod_timer(&qca->tx_idle_timer, jiffies + idle_delay); break; case HCI_IBS_TX_ASLEEP: BT_DBG("Device asleep, waking up and queueing packet"); /* Save packet for later */ skb_queue_tail(&qca->tx_wait_q, skb); qca->tx_ibs_state = HCI_IBS_TX_WAKING; /* Schedule a work queue to wake up device */ queue_work(qca->workqueue, &qca->ws_awake_device); break; case HCI_IBS_TX_WAKING: BT_DBG("Device waking up, queueing packet"); /* Transient state; just keep packet for later */ skb_queue_tail(&qca->tx_wait_q, skb); break; default: BT_ERR("Illegal tx state: %d (losing packet)", qca->tx_ibs_state); dev_kfree_skb_irq(skb); break; } spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); return 0; } static int qca_ibs_sleep_ind(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_uart *hu = hci_get_drvdata(hdev); BT_DBG("hu %p recv hci ibs cmd 0x%x", hu, HCI_IBS_SLEEP_IND); device_want_to_sleep(hu); kfree_skb(skb); return 0; } static int qca_ibs_wake_ind(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_uart *hu = hci_get_drvdata(hdev); BT_DBG("hu %p recv hci ibs cmd 0x%x", hu, HCI_IBS_WAKE_IND); device_want_to_wakeup(hu); kfree_skb(skb); return 0; } static int qca_ibs_wake_ack(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_uart *hu = hci_get_drvdata(hdev); BT_DBG("hu %p recv hci ibs cmd 0x%x", hu, HCI_IBS_WAKE_ACK); device_woke_up(hu); kfree_skb(skb); return 0; } static int qca_recv_acl_data(struct hci_dev *hdev, struct sk_buff *skb) { /* We receive debug logs from chip as an ACL packets. * Instead of sending the data to ACL to decode the * received data, we are pushing them to the above layers * as a diagnostic packet. */ if (get_unaligned_le16(skb->data) == QCA_DEBUG_HANDLE) return hci_recv_diag(hdev, skb); return hci_recv_frame(hdev, skb); } static void qca_dmp_hdr(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_uart *hu = hci_get_drvdata(hdev); struct qca_data *qca = hu->priv; char buf[80]; snprintf(buf, sizeof(buf), "Controller Name: 0x%x\n", qca->controller_id); skb_put_data(skb, buf, strlen(buf)); snprintf(buf, sizeof(buf), "Firmware Version: 0x%x\n", qca->fw_version); skb_put_data(skb, buf, strlen(buf)); snprintf(buf, sizeof(buf), "Vendor:Qualcomm\n"); skb_put_data(skb, buf, strlen(buf)); snprintf(buf, sizeof(buf), "Driver: %s\n", hu->serdev->dev.driver->name); skb_put_data(skb, buf, strlen(buf)); } static void qca_controller_memdump(struct work_struct *work) { struct qca_data *qca = container_of(work, struct qca_data, ctrl_memdump_evt); struct hci_uart *hu = qca->hu; struct sk_buff *skb; struct qca_memdump_event_hdr *cmd_hdr; struct qca_memdump_info *qca_memdump = qca->qca_memdump; struct qca_dump_size *dump; u16 seq_no; u32 rx_size; int ret = 0; enum qca_btsoc_type soc_type = qca_soc_type(hu); while ((skb = skb_dequeue(&qca->rx_memdump_q))) { mutex_lock(&qca->hci_memdump_lock); /* Skip processing the received packets if timeout detected * or memdump collection completed. */ if (qca->memdump_state == QCA_MEMDUMP_TIMEOUT || qca->memdump_state == QCA_MEMDUMP_COLLECTED) { mutex_unlock(&qca->hci_memdump_lock); return; } if (!qca_memdump) { qca_memdump = kzalloc(sizeof(struct qca_memdump_info), GFP_ATOMIC); if (!qca_memdump) { mutex_unlock(&qca->hci_memdump_lock); return; } qca->qca_memdump = qca_memdump; } qca->memdump_state = QCA_MEMDUMP_COLLECTING; cmd_hdr = (void *) skb->data; seq_no = __le16_to_cpu(cmd_hdr->seq_no); skb_pull(skb, sizeof(struct qca_memdump_event_hdr)); if (!seq_no) { /* This is the first frame of memdump packet from * the controller, Disable IBS to recevie dump * with out any interruption, ideally time required for * the controller to send the dump is 8 seconds. let us * start timer to handle this asynchronous activity. */ set_bit(QCA_IBS_DISABLED, &qca->flags); set_bit(QCA_MEMDUMP_COLLECTION, &qca->flags); dump = (void *) skb->data; qca_memdump->ram_dump_size = __le32_to_cpu(dump->dump_size); if (!(qca_memdump->ram_dump_size)) { bt_dev_err(hu->hdev, "Rx invalid memdump size"); kfree(qca_memdump); kfree_skb(skb); mutex_unlock(&qca->hci_memdump_lock); return; } queue_delayed_work(qca->workqueue, &qca->ctrl_memdump_timeout, msecs_to_jiffies(MEMDUMP_TIMEOUT_MS)); skb_pull(skb, sizeof(qca_memdump->ram_dump_size)); qca_memdump->current_seq_no = 0; qca_memdump->received_dump = 0; ret = hci_devcd_init(hu->hdev, qca_memdump->ram_dump_size); bt_dev_info(hu->hdev, "hci_devcd_init Return:%d", ret); if (ret < 0) { kfree(qca->qca_memdump); qca->qca_memdump = NULL; qca->memdump_state = QCA_MEMDUMP_COLLECTED; cancel_delayed_work(&qca->ctrl_memdump_timeout); clear_bit(QCA_MEMDUMP_COLLECTION, &qca->flags); mutex_unlock(&qca->hci_memdump_lock); return; } bt_dev_info(hu->hdev, "QCA collecting dump of size:%u", qca_memdump->ram_dump_size); } /* If sequence no 0 is missed then there is no point in * accepting the other sequences. */ if (!test_bit(QCA_MEMDUMP_COLLECTION, &qca->flags)) { bt_dev_err(hu->hdev, "QCA: Discarding other packets"); kfree(qca_memdump); kfree_skb(skb); mutex_unlock(&qca->hci_memdump_lock); return; } /* There could be chance of missing some packets from * the controller. In such cases let us store the dummy * packets in the buffer. */ /* For QCA6390, controller does not lost packets but * sequence number field of packet sometimes has error * bits, so skip this checking for missing packet. */ while ((seq_no > qca_memdump->current_seq_no + 1) && (soc_type != QCA_QCA6390) && seq_no != QCA_LAST_SEQUENCE_NUM) { bt_dev_err(hu->hdev, "QCA controller missed packet:%d", qca_memdump->current_seq_no); rx_size = qca_memdump->received_dump; rx_size += QCA_DUMP_PACKET_SIZE; if (rx_size > qca_memdump->ram_dump_size) { bt_dev_err(hu->hdev, "QCA memdump received %d, no space for missed packet", qca_memdump->received_dump); break; } hci_devcd_append_pattern(hu->hdev, 0x00, QCA_DUMP_PACKET_SIZE); qca_memdump->received_dump += QCA_DUMP_PACKET_SIZE; qca_memdump->current_seq_no++; } rx_size = qca_memdump->received_dump + skb->len; if (rx_size <= qca_memdump->ram_dump_size) { if ((seq_no != QCA_LAST_SEQUENCE_NUM) && (seq_no != qca_memdump->current_seq_no)) { bt_dev_err(hu->hdev, "QCA memdump unexpected packet %d", seq_no); } bt_dev_dbg(hu->hdev, "QCA memdump packet %d with length %d", seq_no, skb->len); hci_devcd_append(hu->hdev, skb); qca_memdump->current_seq_no += 1; qca_memdump->received_dump = rx_size; } else { bt_dev_err(hu->hdev, "QCA memdump received no space for packet %d", qca_memdump->current_seq_no); } if (seq_no == QCA_LAST_SEQUENCE_NUM) { bt_dev_info(hu->hdev, "QCA memdump Done, received %d, total %d", qca_memdump->received_dump, qca_memdump->ram_dump_size); hci_devcd_complete(hu->hdev); cancel_delayed_work(&qca->ctrl_memdump_timeout); kfree(qca->qca_memdump); qca->qca_memdump = NULL; qca->memdump_state = QCA_MEMDUMP_COLLECTED; clear_bit(QCA_MEMDUMP_COLLECTION, &qca->flags); } mutex_unlock(&qca->hci_memdump_lock); } } static int qca_controller_memdump_event(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_uart *hu = hci_get_drvdata(hdev); struct qca_data *qca = hu->priv; set_bit(QCA_SSR_TRIGGERED, &qca->flags); skb_queue_tail(&qca->rx_memdump_q, skb); queue_work(qca->workqueue, &qca->ctrl_memdump_evt); return 0; } static int qca_recv_event(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_uart *hu = hci_get_drvdata(hdev); struct qca_data *qca = hu->priv; if (test_bit(QCA_DROP_VENDOR_EVENT, &qca->flags)) { struct hci_event_hdr *hdr = (void *)skb->data; /* For the WCN3990 the vendor command for a baudrate change * isn't sent as synchronous HCI command, because the * controller sends the corresponding vendor event with the * new baudrate. The event is received and properly decoded * after changing the baudrate of the host port. It needs to * be dropped, otherwise it can be misinterpreted as * response to a later firmware download command (also a * vendor command). */ if (hdr->evt == HCI_EV_VENDOR) complete(&qca->drop_ev_comp); kfree_skb(skb); return 0; } /* We receive chip memory dump as an event packet, With a dedicated * handler followed by a hardware error event. When this event is * received we store dump into a file before closing hci. This * dump will help in triaging the issues. */ if ((skb->data[0] == HCI_VENDOR_PKT) && (get_unaligned_be16(skb->data + 2) == QCA_SSR_DUMP_HANDLE)) return qca_controller_memdump_event(hdev, skb); return hci_recv_frame(hdev, skb); } #define QCA_IBS_SLEEP_IND_EVENT \ .type = HCI_IBS_SLEEP_IND, \ .hlen = 0, \ .loff = 0, \ .lsize = 0, \ .maxlen = HCI_MAX_IBS_SIZE #define QCA_IBS_WAKE_IND_EVENT \ .type = HCI_IBS_WAKE_IND, \ .hlen = 0, \ .loff = 0, \ .lsize = 0, \ .maxlen = HCI_MAX_IBS_SIZE #define QCA_IBS_WAKE_ACK_EVENT \ .type = HCI_IBS_WAKE_ACK, \ .hlen = 0, \ .loff = 0, \ .lsize = 0, \ .maxlen = HCI_MAX_IBS_SIZE static const struct h4_recv_pkt qca_recv_pkts[] = { { H4_RECV_ACL, .recv = qca_recv_acl_data }, { H4_RECV_SCO, .recv = hci_recv_frame }, { H4_RECV_EVENT, .recv = qca_recv_event }, { QCA_IBS_WAKE_IND_EVENT, .recv = qca_ibs_wake_ind }, { QCA_IBS_WAKE_ACK_EVENT, .recv = qca_ibs_wake_ack }, { QCA_IBS_SLEEP_IND_EVENT, .recv = qca_ibs_sleep_ind }, }; static int qca_recv(struct hci_uart *hu, const void *data, int count) { struct qca_data *qca = hu->priv; if (!test_bit(HCI_UART_REGISTERED, &hu->flags)) return -EUNATCH; qca->rx_skb = h4_recv_buf(hu->hdev, qca->rx_skb, data, count, qca_recv_pkts, ARRAY_SIZE(qca_recv_pkts)); if (IS_ERR(qca->rx_skb)) { int err = PTR_ERR(qca->rx_skb); bt_dev_err(hu->hdev, "Frame reassembly failed (%d)", err); qca->rx_skb = NULL; return err; } return count; } static struct sk_buff *qca_dequeue(struct hci_uart *hu) { struct qca_data *qca = hu->priv; return skb_dequeue(&qca->txq); } static uint8_t qca_get_baudrate_value(int speed) { switch (speed) { case 9600: return QCA_BAUDRATE_9600; case 19200: return QCA_BAUDRATE_19200; case 38400: return QCA_BAUDRATE_38400; case 57600: return QCA_BAUDRATE_57600; case 115200: return QCA_BAUDRATE_115200; case 230400: return QCA_BAUDRATE_230400; case 460800: return QCA_BAUDRATE_460800; case 500000: return QCA_BAUDRATE_500000; case 921600: return QCA_BAUDRATE_921600; case 1000000: return QCA_BAUDRATE_1000000; case 2000000: return QCA_BAUDRATE_2000000; case 3000000: return QCA_BAUDRATE_3000000; case 3200000: return QCA_BAUDRATE_3200000; case 3500000: return QCA_BAUDRATE_3500000; default: return QCA_BAUDRATE_115200; } } static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate) { struct hci_uart *hu = hci_get_drvdata(hdev); struct qca_data *qca = hu->priv; struct sk_buff *skb; u8 cmd[] = { 0x01, 0x48, 0xFC, 0x01, 0x00 }; if (baudrate > QCA_BAUDRATE_3200000) return -EINVAL; cmd[4] = baudrate; skb = bt_skb_alloc(sizeof(cmd), GFP_KERNEL); if (!skb) { bt_dev_err(hdev, "Failed to allocate baudrate packet"); return -ENOMEM; } /* Assign commands to change baudrate and packet type. */ skb_put_data(skb, cmd, sizeof(cmd)); hci_skb_pkt_type(skb) = HCI_COMMAND_PKT; skb_queue_tail(&qca->txq, skb); hci_uart_tx_wakeup(hu); /* Wait for the baudrate change request to be sent */ while (!skb_queue_empty(&qca->txq)) usleep_range(100, 200); if (hu->serdev) serdev_device_wait_until_sent(hu->serdev, msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS)); /* Give the controller time to process the request */ switch (qca_soc_type(hu)) { case QCA_WCN3988: case QCA_WCN3990: case QCA_WCN3991: case QCA_WCN3998: case QCA_WCN6750: case QCA_WCN6855: case QCA_WCN7850: usleep_range(1000, 10000); break; default: msleep(300); } return 0; } static inline void host_set_baudrate(struct hci_uart *hu, unsigned int speed) { if (hu->serdev) serdev_device_set_baudrate(hu->serdev, speed); else hci_uart_set_baudrate(hu, speed); } static int qca_send_power_pulse(struct hci_uart *hu, bool on) { int ret; int timeout = msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS); u8 cmd = on ? QCA_WCN3990_POWERON_PULSE : QCA_WCN3990_POWEROFF_PULSE; /* These power pulses are single byte command which are sent * at required baudrate to wcn3990. On wcn3990, we have an external * circuit at Tx pin which decodes the pulse sent at specific baudrate. * For example, wcn3990 supports RF COEX antenna for both Wi-Fi/BT * and also we use the same power inputs to turn on and off for * Wi-Fi/BT. Powering up the power sources will not enable BT, until * we send a power on pulse at 115200 bps. This algorithm will help to * save power. Disabling hardware flow control is mandatory while * sending power pulses to SoC. */ bt_dev_dbg(hu->hdev, "sending power pulse %02x to controller", cmd); serdev_device_write_flush(hu->serdev); hci_uart_set_flow_control(hu, true); ret = serdev_device_write_buf(hu->serdev, &cmd, sizeof(cmd)); if (ret < 0) { bt_dev_err(hu->hdev, "failed to send power pulse %02x", cmd); return ret; } serdev_device_wait_until_sent(hu->serdev, timeout); hci_uart_set_flow_control(hu, false); /* Give to controller time to boot/shutdown */ if (on) msleep(100); else usleep_range(1000, 10000); return 0; } static unsigned int qca_get_speed(struct hci_uart *hu, enum qca_speed_type speed_type) { unsigned int speed = 0; if (speed_type == QCA_INIT_SPEED) { if (hu->init_speed) speed = hu->init_speed; else if (hu->proto->init_speed) speed = hu->proto->init_speed; } else { if (hu->oper_speed) speed = hu->oper_speed; else if (hu->proto->oper_speed) speed = hu->proto->oper_speed; } return speed; } static int qca_check_speeds(struct hci_uart *hu) { switch (qca_soc_type(hu)) { case QCA_WCN3988: case QCA_WCN3990: case QCA_WCN3991: case QCA_WCN3998: case QCA_WCN6750: case QCA_WCN6855: case QCA_WCN7850: if (!qca_get_speed(hu, QCA_INIT_SPEED) && !qca_get_speed(hu, QCA_OPER_SPEED)) return -EINVAL; break; default: if (!qca_get_speed(hu, QCA_INIT_SPEED) || !qca_get_speed(hu, QCA_OPER_SPEED)) return -EINVAL; } return 0; } static int qca_set_speed(struct hci_uart *hu, enum qca_speed_type speed_type) { unsigned int speed, qca_baudrate; struct qca_data *qca = hu->priv; int ret = 0; if (speed_type == QCA_INIT_SPEED) { speed = qca_get_speed(hu, QCA_INIT_SPEED); if (speed) host_set_baudrate(hu, speed); } else { enum qca_btsoc_type soc_type = qca_soc_type(hu); speed = qca_get_speed(hu, QCA_OPER_SPEED); if (!speed) return 0; /* Disable flow control for wcn3990 to deassert RTS while * changing the baudrate of chip and host. */ switch (soc_type) { case QCA_WCN3988: case QCA_WCN3990: case QCA_WCN3991: case QCA_WCN3998: case QCA_WCN6750: case QCA_WCN6855: case QCA_WCN7850: hci_uart_set_flow_control(hu, true); break; default: break; } switch (soc_type) { case QCA_WCN3990: reinit_completion(&qca->drop_ev_comp); set_bit(QCA_DROP_VENDOR_EVENT, &qca->flags); break; default: break; } qca_baudrate = qca_get_baudrate_value(speed); bt_dev_dbg(hu->hdev, "Set UART speed to %d", speed); ret = qca_set_baudrate(hu->hdev, qca_baudrate); if (ret) goto error; host_set_baudrate(hu, speed); error: switch (soc_type) { case QCA_WCN3988: case QCA_WCN3990: case QCA_WCN3991: case QCA_WCN3998: case QCA_WCN6750: case QCA_WCN6855: case QCA_WCN7850: hci_uart_set_flow_control(hu, false); break; default: break; } switch (soc_type) { case QCA_WCN3990: /* Wait for the controller to send the vendor event * for the baudrate change command. */ if (!wait_for_completion_timeout(&qca->drop_ev_comp, msecs_to_jiffies(100))) { bt_dev_err(hu->hdev, "Failed to change controller baudrate\n"); ret = -ETIMEDOUT; } clear_bit(QCA_DROP_VENDOR_EVENT, &qca->flags); break; default: break; } } return ret; } static int qca_send_crashbuffer(struct hci_uart *hu) { struct qca_data *qca = hu->priv; struct sk_buff *skb; skb = bt_skb_alloc(QCA_CRASHBYTE_PACKET_LEN, GFP_KERNEL); if (!skb) { bt_dev_err(hu->hdev, "Failed to allocate memory for skb packet"); return -ENOMEM; } /* We forcefully crash the controller, by sending 0xfb byte for * 1024 times. We also might have chance of losing data, To be * on safer side we send 1096 bytes to the SoC. */ memset(skb_put(skb, QCA_CRASHBYTE_PACKET_LEN), QCA_MEMDUMP_BYTE, QCA_CRASHBYTE_PACKET_LEN); hci_skb_pkt_type(skb) = HCI_COMMAND_PKT; bt_dev_info(hu->hdev, "crash the soc to collect controller dump"); skb_queue_tail(&qca->txq, skb); hci_uart_tx_wakeup(hu); return 0; } static void qca_wait_for_dump_collection(struct hci_dev *hdev) { struct hci_uart *hu = hci_get_drvdata(hdev); struct qca_data *qca = hu->priv; wait_on_bit_timeout(&qca->flags, QCA_MEMDUMP_COLLECTION, TASK_UNINTERRUPTIBLE, MEMDUMP_TIMEOUT_MS); clear_bit(QCA_MEMDUMP_COLLECTION, &qca->flags); } static void qca_hw_error(struct hci_dev *hdev, u8 code) { struct hci_uart *hu = hci_get_drvdata(hdev); struct qca_data *qca = hu->priv; set_bit(QCA_SSR_TRIGGERED, &qca->flags); set_bit(QCA_HW_ERROR_EVENT, &qca->flags); bt_dev_info(hdev, "mem_dump_status: %d", qca->memdump_state); if (qca->memdump_state == QCA_MEMDUMP_IDLE) { /* If hardware error event received for other than QCA * soc memory dump event, then we need to crash the SOC * and wait here for 8 seconds to get the dump packets. * This will block main thread to be on hold until we * collect dump. */ set_bit(QCA_MEMDUMP_COLLECTION, &qca->flags); qca_send_crashbuffer(hu); qca_wait_for_dump_collection(hdev); } else if (qca->memdump_state == QCA_MEMDUMP_COLLECTING) { /* Let us wait here until memory dump collected or * memory dump timer expired. */ bt_dev_info(hdev, "waiting for dump to complete"); qca_wait_for_dump_collection(hdev); } mutex_lock(&qca->hci_memdump_lock); if (qca->memdump_state != QCA_MEMDUMP_COLLECTED) { bt_dev_err(hu->hdev, "clearing allocated memory due to memdump timeout"); hci_devcd_abort(hu->hdev); if (qca->qca_memdump) { kfree(qca->qca_memdump); qca->qca_memdump = NULL; } qca->memdump_state = QCA_MEMDUMP_TIMEOUT; cancel_delayed_work(&qca->ctrl_memdump_timeout); } mutex_unlock(&qca->hci_memdump_lock); if (qca->memdump_state == QCA_MEMDUMP_TIMEOUT || qca->memdump_state == QCA_MEMDUMP_COLLECTED) { cancel_work_sync(&qca->ctrl_memdump_evt); skb_queue_purge(&qca->rx_memdump_q); } clear_bit(QCA_HW_ERROR_EVENT, &qca->flags); } static void qca_cmd_timeout(struct hci_dev *hdev) { struct hci_uart *hu = hci_get_drvdata(hdev); struct qca_data *qca = hu->priv; set_bit(QCA_SSR_TRIGGERED, &qca->flags); if (qca->memdump_state == QCA_MEMDUMP_IDLE) { set_bit(QCA_MEMDUMP_COLLECTION, &qca->flags); qca_send_crashbuffer(hu); qca_wait_for_dump_collection(hdev); } else if (qca->memdump_state == QCA_MEMDUMP_COLLECTING) { /* Let us wait here until memory dump collected or * memory dump timer expired. */ bt_dev_info(hdev, "waiting for dump to complete"); qca_wait_for_dump_collection(hdev); } mutex_lock(&qca->hci_memdump_lock); if (qca->memdump_state != QCA_MEMDUMP_COLLECTED) { qca->memdump_state = QCA_MEMDUMP_TIMEOUT; if (!test_bit(QCA_HW_ERROR_EVENT, &qca->flags)) { /* Inject hw error event to reset the device * and driver. */ hci_reset_dev(hu->hdev); } } mutex_unlock(&qca->hci_memdump_lock); } static bool qca_wakeup(struct hci_dev *hdev) { struct hci_uart *hu = hci_get_drvdata(hdev); bool wakeup; /* BT SoC attached through the serial bus is handled by the serdev driver. * So we need to use the device handle of the serdev driver to get the * status of device may wakeup. */ wakeup = device_may_wakeup(&hu->serdev->ctrl->dev); bt_dev_dbg(hu->hdev, "wakeup status : %d", wakeup); return wakeup; } static int qca_regulator_init(struct hci_uart *hu) { enum qca_btsoc_type soc_type = qca_soc_type(hu); struct qca_serdev *qcadev; int ret; bool sw_ctrl_state; /* Check for vregs status, may be hci down has turned * off the voltage regulator. */ qcadev = serdev_device_get_drvdata(hu->serdev); if (!qcadev->bt_power->vregs_on) { serdev_device_close(hu->serdev); ret = qca_regulator_enable(qcadev); if (ret) return ret; ret = serdev_device_open(hu->serdev); if (ret) { bt_dev_err(hu->hdev, "failed to open port"); return ret; } } switch (soc_type) { case QCA_WCN3988: case QCA_WCN3990: case QCA_WCN3991: case QCA_WCN3998: /* Forcefully enable wcn399x to enter in to boot mode. */ host_set_baudrate(hu, 2400); ret = qca_send_power_pulse(hu, false); if (ret) return ret; break; default: break; } /* For wcn6750 need to enable gpio bt_en */ if (qcadev->bt_en) { gpiod_set_value_cansleep(qcadev->bt_en, 0); msleep(50); gpiod_set_value_cansleep(qcadev->bt_en, 1); msleep(50); if (qcadev->sw_ctrl) { sw_ctrl_state = gpiod_get_value_cansleep(qcadev->sw_ctrl); bt_dev_dbg(hu->hdev, "SW_CTRL is %d", sw_ctrl_state); } } qca_set_speed(hu, QCA_INIT_SPEED); switch (soc_type) { case QCA_WCN3988: case QCA_WCN3990: case QCA_WCN3991: case QCA_WCN3998: ret = qca_send_power_pulse(hu, true); if (ret) return ret; break; default: break; } /* Now the device is in ready state to communicate with host. * To sync host with device we need to reopen port. * Without this, we will have RTS and CTS synchronization * issues. */ serdev_device_close(hu->serdev); ret = serdev_device_open(hu->serdev); if (ret) { bt_dev_err(hu->hdev, "failed to open port"); return ret; } hci_uart_set_flow_control(hu, false); return 0; } static int qca_power_on(struct hci_dev *hdev) { struct hci_uart *hu = hci_get_drvdata(hdev); enum qca_btsoc_type soc_type = qca_soc_type(hu); struct qca_serdev *qcadev; struct qca_data *qca = hu->priv; int ret = 0; /* Non-serdev device usually is powered by external power * and don't need additional action in driver for power on */ if (!hu->serdev) return 0; switch (soc_type) { case QCA_WCN3988: case QCA_WCN3990: case QCA_WCN3991: case QCA_WCN3998: case QCA_WCN6750: case QCA_WCN6855: case QCA_WCN7850: ret = qca_regulator_init(hu); break; default: qcadev = serdev_device_get_drvdata(hu->serdev); if (qcadev->bt_en) { gpiod_set_value_cansleep(qcadev->bt_en, 1); /* Controller needs time to bootup. */ msleep(150); } } clear_bit(QCA_BT_OFF, &qca->flags); return ret; } static void hci_coredump_qca(struct hci_dev *hdev) { static const u8 param[] = { 0x26 }; struct sk_buff *skb; skb = __hci_cmd_sync(hdev, 0xfc0c, 1, param, HCI_CMD_TIMEOUT); if (IS_ERR(skb)) bt_dev_err(hdev, "%s: trigger crash failed (%ld)", __func__, PTR_ERR(skb)); kfree_skb(skb); } static int qca_setup(struct hci_uart *hu) { struct hci_dev *hdev = hu->hdev; struct qca_data *qca = hu->priv; unsigned int speed, qca_baudrate = QCA_BAUDRATE_115200; unsigned int retries = 0; enum qca_btsoc_type soc_type = qca_soc_type(hu); const char *firmware_name = qca_get_firmware_name(hu); int ret; struct qca_btsoc_version ver; const char *soc_name; ret = qca_check_speeds(hu); if (ret) return ret; clear_bit(QCA_ROM_FW, &qca->flags); /* Patch downloading has to be done without IBS mode */ set_bit(QCA_IBS_DISABLED, &qca->flags); /* Enable controller to do both LE scan and BR/EDR inquiry * simultaneously. */ set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks); switch (soc_type) { case QCA_WCN3988: case QCA_WCN3990: case QCA_WCN3991: case QCA_WCN3998: soc_name = "wcn399x"; break; case QCA_WCN6750: soc_name = "wcn6750"; break; case QCA_WCN6855: soc_name = "wcn6855"; break; case QCA_WCN7850: soc_name = "wcn7850"; break; default: soc_name = "ROME/QCA6390"; } bt_dev_info(hdev, "setting up %s", soc_name); qca->memdump_state = QCA_MEMDUMP_IDLE; retry: ret = qca_power_on(hdev); if (ret) goto out; clear_bit(QCA_SSR_TRIGGERED, &qca->flags); switch (soc_type) { case QCA_WCN3988: case QCA_WCN3990: case QCA_WCN3991: case QCA_WCN3998: case QCA_WCN6750: case QCA_WCN6855: case QCA_WCN7850: set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks); hci_set_aosp_capable(hdev); ret = qca_read_soc_version(hdev, &ver, soc_type); if (ret) goto out; break; default: qca_set_speed(hu, QCA_INIT_SPEED); } /* Setup user speed if needed */ speed = qca_get_speed(hu, QCA_OPER_SPEED); if (speed) { ret = qca_set_speed(hu, QCA_OPER_SPEED); if (ret) goto out; qca_baudrate = qca_get_baudrate_value(speed); } switch (soc_type) { case QCA_WCN3988: case QCA_WCN3990: case QCA_WCN3991: case QCA_WCN3998: case QCA_WCN6750: case QCA_WCN6855: case QCA_WCN7850: break; default: /* Get QCA version information */ ret = qca_read_soc_version(hdev, &ver, soc_type); if (ret) goto out; } /* Setup patch / NVM configurations */ ret = qca_uart_setup(hdev, qca_baudrate, soc_type, ver, firmware_name); if (!ret) { clear_bit(QCA_IBS_DISABLED, &qca->flags); qca_debugfs_init(hdev); hu->hdev->hw_error = qca_hw_error; hu->hdev->cmd_timeout = qca_cmd_timeout; if (device_can_wakeup(hu->serdev->ctrl->dev.parent)) hu->hdev->wakeup = qca_wakeup; } else if (ret == -ENOENT) { /* No patch/nvm-config found, run with original fw/config */ set_bit(QCA_ROM_FW, &qca->flags); ret = 0; } else if (ret == -EAGAIN) { /* * Userspace firmware loader will return -EAGAIN in case no * patch/nvm-config is found, so run with original fw/config. */ set_bit(QCA_ROM_FW, &qca->flags); ret = 0; } out: if (ret && retries < MAX_INIT_RETRIES) { bt_dev_warn(hdev, "Retry BT power ON:%d", retries); qca_power_shutdown(hu); if (hu->serdev) { serdev_device_close(hu->serdev); ret = serdev_device_open(hu->serdev); if (ret) { bt_dev_err(hdev, "failed to open port"); return ret; } } retries++; goto retry; } /* Setup bdaddr */ if (soc_type == QCA_ROME) hu->hdev->set_bdaddr = qca_set_bdaddr_rome; else hu->hdev->set_bdaddr = qca_set_bdaddr; qca->fw_version = le16_to_cpu(ver.patch_ver); qca->controller_id = le16_to_cpu(ver.rom_ver); hci_devcd_register(hdev, hci_coredump_qca, qca_dmp_hdr, NULL); return ret; } static const struct hci_uart_proto qca_proto = { .id = HCI_UART_QCA, .name = "QCA", .manufacturer = 29, .init_speed = 115200, .oper_speed = 3000000, .open = qca_open, .close = qca_close, .flush = qca_flush, .setup = qca_setup, .recv = qca_recv, .enqueue = qca_enqueue, .dequeue = qca_dequeue, }; static const struct qca_device_data qca_soc_data_wcn3988 __maybe_unused = { .soc_type = QCA_WCN3988, .vregs = (struct qca_vreg []) { { "vddio", 15000 }, { "vddxo", 80000 }, { "vddrf", 300000 }, { "vddch0", 450000 }, }, .num_vregs = 4, }; static const struct qca_device_data qca_soc_data_wcn3990 __maybe_unused = { .soc_type = QCA_WCN3990, .vregs = (struct qca_vreg []) { { "vddio", 15000 }, { "vddxo", 80000 }, { "vddrf", 300000 }, { "vddch0", 450000 }, }, .num_vregs = 4, }; static const struct qca_device_data qca_soc_data_wcn3991 __maybe_unused = { .soc_type = QCA_WCN3991, .vregs = (struct qca_vreg []) { { "vddio", 15000 }, { "vddxo", 80000 }, { "vddrf", 300000 }, { "vddch0", 450000 }, }, .num_vregs = 4, .capabilities = QCA_CAP_WIDEBAND_SPEECH | QCA_CAP_VALID_LE_STATES, }; static const struct qca_device_data qca_soc_data_wcn3998 __maybe_unused = { .soc_type = QCA_WCN3998, .vregs = (struct qca_vreg []) { { "vddio", 10000 }, { "vddxo", 80000 }, { "vddrf", 300000 }, { "vddch0", 450000 }, }, .num_vregs = 4, }; static const struct qca_device_data qca_soc_data_qca6390 __maybe_unused = { .soc_type = QCA_QCA6390, .num_vregs = 0, }; static const struct qca_device_data qca_soc_data_wcn6750 __maybe_unused = { .soc_type = QCA_WCN6750, .vregs = (struct qca_vreg []) { { "vddio", 5000 }, { "vddaon", 26000 }, { "vddbtcxmx", 126000 }, { "vddrfacmn", 12500 }, { "vddrfa0p8", 102000 }, { "vddrfa1p7", 302000 }, { "vddrfa1p2", 257000 }, { "vddrfa2p2", 1700000 }, { "vddasd", 200 }, }, .num_vregs = 9, .capabilities = QCA_CAP_WIDEBAND_SPEECH | QCA_CAP_VALID_LE_STATES, }; static const struct qca_device_data qca_soc_data_wcn6855 __maybe_unused = { .soc_type = QCA_WCN6855, .vregs = (struct qca_vreg []) { { "vddio", 5000 }, { "vddbtcxmx", 126000 }, { "vddrfacmn", 12500 }, { "vddrfa0p8", 102000 }, { "vddrfa1p7", 302000 }, { "vddrfa1p2", 257000 }, }, .num_vregs = 6, .capabilities = QCA_CAP_WIDEBAND_SPEECH | QCA_CAP_VALID_LE_STATES, }; static const struct qca_device_data qca_soc_data_wcn7850 __maybe_unused = { .soc_type = QCA_WCN7850, .vregs = (struct qca_vreg []) { { "vddio", 5000 }, { "vddaon", 26000 }, { "vdddig", 126000 }, { "vddrfa0p8", 102000 }, { "vddrfa1p2", 257000 }, { "vddrfa1p9", 302000 }, }, .num_vregs = 6, .capabilities = QCA_CAP_WIDEBAND_SPEECH | QCA_CAP_VALID_LE_STATES, }; static void qca_power_shutdown(struct hci_uart *hu) { struct qca_serdev *qcadev; struct qca_data *qca = hu->priv; unsigned long flags; enum qca_btsoc_type soc_type = qca_soc_type(hu); bool sw_ctrl_state; /* From this point we go into power off state. But serial port is * still open, stop queueing the IBS data and flush all the buffered * data in skb's. */ spin_lock_irqsave(&qca->hci_ibs_lock, flags); set_bit(QCA_IBS_DISABLED, &qca->flags); qca_flush(hu); spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); /* Non-serdev device usually is powered by external power * and don't need additional action in driver for power down */ if (!hu->serdev) return; qcadev = serdev_device_get_drvdata(hu->serdev); switch (soc_type) { case QCA_WCN3988: case QCA_WCN3990: case QCA_WCN3991: case QCA_WCN3998: host_set_baudrate(hu, 2400); qca_send_power_pulse(hu, false); qca_regulator_disable(qcadev); break; case QCA_WCN6750: case QCA_WCN6855: gpiod_set_value_cansleep(qcadev->bt_en, 0); msleep(100); qca_regulator_disable(qcadev); if (qcadev->sw_ctrl) { sw_ctrl_state = gpiod_get_value_cansleep(qcadev->sw_ctrl); bt_dev_dbg(hu->hdev, "SW_CTRL is %d", sw_ctrl_state); } break; default: gpiod_set_value_cansleep(qcadev->bt_en, 0); } set_bit(QCA_BT_OFF, &qca->flags); } static int qca_power_off(struct hci_dev *hdev) { struct hci_uart *hu = hci_get_drvdata(hdev); struct qca_data *qca = hu->priv; enum qca_btsoc_type soc_type = qca_soc_type(hu); hu->hdev->hw_error = NULL; hu->hdev->cmd_timeout = NULL; del_timer_sync(&qca->wake_retrans_timer); del_timer_sync(&qca->tx_idle_timer); /* Stop sending shutdown command if soc crashes. */ if (soc_type != QCA_ROME && qca->memdump_state == QCA_MEMDUMP_IDLE) { qca_send_pre_shutdown_cmd(hdev); usleep_range(8000, 10000); } qca_power_shutdown(hu); return 0; } static int qca_regulator_enable(struct qca_serdev *qcadev) { struct qca_power *power = qcadev->bt_power; int ret; /* Already enabled */ if (power->vregs_on) return 0; BT_DBG("enabling %d regulators)", power->num_vregs); ret = regulator_bulk_enable(power->num_vregs, power->vreg_bulk); if (ret) return ret; power->vregs_on = true; ret = clk_prepare_enable(qcadev->susclk); if (ret) qca_regulator_disable(qcadev); return ret; } static void qca_regulator_disable(struct qca_serdev *qcadev) { struct qca_power *power; if (!qcadev) return; power = qcadev->bt_power; /* Already disabled? */ if (!power->vregs_on) return; regulator_bulk_disable(power->num_vregs, power->vreg_bulk); power->vregs_on = false; clk_disable_unprepare(qcadev->susclk); } static int qca_init_regulators(struct qca_power *qca, const struct qca_vreg *vregs, size_t num_vregs) { struct regulator_bulk_data *bulk; int ret; int i; bulk = devm_kcalloc(qca->dev, num_vregs, sizeof(*bulk), GFP_KERNEL); if (!bulk) return -ENOMEM; for (i = 0; i < num_vregs; i++) bulk[i].supply = vregs[i].name; ret = devm_regulator_bulk_get(qca->dev, num_vregs, bulk); if (ret < 0) return ret; for (i = 0; i < num_vregs; i++) { ret = regulator_set_load(bulk[i].consumer, vregs[i].load_uA); if (ret) return ret; } qca->vreg_bulk = bulk; qca->num_vregs = num_vregs; return 0; } static int qca_serdev_probe(struct serdev_device *serdev) { struct qca_serdev *qcadev; struct hci_dev *hdev; const struct qca_device_data *data; int err; bool power_ctrl_enabled = true; qcadev = devm_kzalloc(&serdev->dev, sizeof(*qcadev), GFP_KERNEL); if (!qcadev) return -ENOMEM; qcadev->serdev_hu.serdev = serdev; data = device_get_match_data(&serdev->dev); serdev_device_set_drvdata(serdev, qcadev); device_property_read_string(&serdev->dev, "firmware-name", &qcadev->firmware_name); device_property_read_u32(&serdev->dev, "max-speed", &qcadev->oper_speed); if (!qcadev->oper_speed) BT_DBG("UART will pick default operating speed"); if (data) qcadev->btsoc_type = data->soc_type; else qcadev->btsoc_type = QCA_ROME; switch (qcadev->btsoc_type) { case QCA_WCN3988: case QCA_WCN3990: case QCA_WCN3991: case QCA_WCN3998: case QCA_WCN6750: case QCA_WCN6855: case QCA_WCN7850: qcadev->bt_power = devm_kzalloc(&serdev->dev, sizeof(struct qca_power), GFP_KERNEL); if (!qcadev->bt_power) return -ENOMEM; qcadev->bt_power->dev = &serdev->dev; err = qca_init_regulators(qcadev->bt_power, data->vregs, data->num_vregs); if (err) { BT_ERR("Failed to init regulators:%d", err); return err; } qcadev->bt_power->vregs_on = false; qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable", GPIOD_OUT_LOW); if (IS_ERR_OR_NULL(qcadev->bt_en) && (data->soc_type == QCA_WCN6750 || data->soc_type == QCA_WCN6855)) { dev_err(&serdev->dev, "failed to acquire BT_EN gpio\n"); power_ctrl_enabled = false; } qcadev->sw_ctrl = devm_gpiod_get_optional(&serdev->dev, "swctrl", GPIOD_IN); if (IS_ERR_OR_NULL(qcadev->sw_ctrl) && (data->soc_type == QCA_WCN6750 || data->soc_type == QCA_WCN6855 || data->soc_type == QCA_WCN7850)) dev_warn(&serdev->dev, "failed to acquire SW_CTRL gpio\n"); qcadev->susclk = devm_clk_get_optional(&serdev->dev, NULL); if (IS_ERR(qcadev->susclk)) { dev_err(&serdev->dev, "failed to acquire clk\n"); return PTR_ERR(qcadev->susclk); } err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto); if (err) { BT_ERR("wcn3990 serdev registration failed"); return err; } break; default: qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable", GPIOD_OUT_LOW); if (IS_ERR_OR_NULL(qcadev->bt_en)) { dev_warn(&serdev->dev, "failed to acquire enable gpio\n"); power_ctrl_enabled = false; } qcadev->susclk = devm_clk_get_optional(&serdev->dev, NULL); if (IS_ERR(qcadev->susclk)) { dev_warn(&serdev->dev, "failed to acquire clk\n"); return PTR_ERR(qcadev->susclk); } err = clk_set_rate(qcadev->susclk, SUSCLK_RATE_32KHZ); if (err) return err; err = clk_prepare_enable(qcadev->susclk); if (err) return err; err = hci_uart_register_device(&qcadev->serdev_hu, &qca_proto); if (err) { BT_ERR("Rome serdev registration failed"); clk_disable_unprepare(qcadev->susclk); return err; } } hdev = qcadev->serdev_hu.hdev; if (power_ctrl_enabled) { set_bit(HCI_QUIRK_NON_PERSISTENT_SETUP, &hdev->quirks); hdev->shutdown = qca_power_off; } if (data) { /* Wideband speech support must be set per driver since it can't * be queried via hci. Same with the valid le states quirk. */ if (data->capabilities & QCA_CAP_WIDEBAND_SPEECH) set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks); if (data->capabilities & QCA_CAP_VALID_LE_STATES) set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks); } return 0; } static void qca_serdev_remove(struct serdev_device *serdev) { struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev); struct qca_power *power = qcadev->bt_power; switch (qcadev->btsoc_type) { case QCA_WCN3988: case QCA_WCN3990: case QCA_WCN3991: case QCA_WCN3998: case QCA_WCN6750: case QCA_WCN6855: case QCA_WCN7850: if (power->vregs_on) { qca_power_shutdown(&qcadev->serdev_hu); break; } fallthrough; default: if (qcadev->susclk) clk_disable_unprepare(qcadev->susclk); } hci_uart_unregister_device(&qcadev->serdev_hu); } static void qca_serdev_shutdown(struct device *dev) { int ret; int timeout = msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS); struct serdev_device *serdev = to_serdev_device(dev); struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev); struct hci_uart *hu = &qcadev->serdev_hu; struct hci_dev *hdev = hu->hdev; struct qca_data *qca = hu->priv; const u8 ibs_wake_cmd[] = { 0xFD }; const u8 edl_reset_soc_cmd[] = { 0x01, 0x00, 0xFC, 0x01, 0x05 }; if (qcadev->btsoc_type == QCA_QCA6390) { if (test_bit(QCA_BT_OFF, &qca->flags) || !test_bit(HCI_RUNNING, &hdev->flags)) return; serdev_device_write_flush(serdev); ret = serdev_device_write_buf(serdev, ibs_wake_cmd, sizeof(ibs_wake_cmd)); if (ret < 0) { BT_ERR("QCA send IBS_WAKE_IND error: %d", ret); return; } serdev_device_wait_until_sent(serdev, timeout); usleep_range(8000, 10000); serdev_device_write_flush(serdev); ret = serdev_device_write_buf(serdev, edl_reset_soc_cmd, sizeof(edl_reset_soc_cmd)); if (ret < 0) { BT_ERR("QCA send EDL_RESET_REQ error: %d", ret); return; } serdev_device_wait_until_sent(serdev, timeout); usleep_range(8000, 10000); } } static int __maybe_unused qca_suspend(struct device *dev) { struct serdev_device *serdev = to_serdev_device(dev); struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev); struct hci_uart *hu = &qcadev->serdev_hu; struct qca_data *qca = hu->priv; unsigned long flags; bool tx_pending = false; int ret = 0; u8 cmd; u32 wait_timeout = 0; set_bit(QCA_SUSPENDING, &qca->flags); /* if BT SoC is running with default firmware then it does not * support in-band sleep */ if (test_bit(QCA_ROM_FW, &qca->flags)) return 0; /* During SSR after memory dump collection, controller will be * powered off and then powered on.If controller is powered off * during SSR then we should wait until SSR is completed. */ if (test_bit(QCA_BT_OFF, &qca->flags) && !test_bit(QCA_SSR_TRIGGERED, &qca->flags)) return 0; if (test_bit(QCA_IBS_DISABLED, &qca->flags) || test_bit(QCA_SSR_TRIGGERED, &qca->flags)) { wait_timeout = test_bit(QCA_SSR_TRIGGERED, &qca->flags) ? IBS_DISABLE_SSR_TIMEOUT_MS : FW_DOWNLOAD_TIMEOUT_MS; /* QCA_IBS_DISABLED flag is set to true, During FW download * and during memory dump collection. It is reset to false, * After FW download complete. */ wait_on_bit_timeout(&qca->flags, QCA_IBS_DISABLED, TASK_UNINTERRUPTIBLE, msecs_to_jiffies(wait_timeout)); if (test_bit(QCA_IBS_DISABLED, &qca->flags)) { bt_dev_err(hu->hdev, "SSR or FW download time out"); ret = -ETIMEDOUT; goto error; } } cancel_work_sync(&qca->ws_awake_device); cancel_work_sync(&qca->ws_awake_rx); spin_lock_irqsave_nested(&qca->hci_ibs_lock, flags, SINGLE_DEPTH_NESTING); switch (qca->tx_ibs_state) { case HCI_IBS_TX_WAKING: del_timer(&qca->wake_retrans_timer); fallthrough; case HCI_IBS_TX_AWAKE: del_timer(&qca->tx_idle_timer); serdev_device_write_flush(hu->serdev); cmd = HCI_IBS_SLEEP_IND; ret = serdev_device_write_buf(hu->serdev, &cmd, sizeof(cmd)); if (ret < 0) { BT_ERR("Failed to send SLEEP to device"); break; } qca->tx_ibs_state = HCI_IBS_TX_ASLEEP; qca->ibs_sent_slps++; tx_pending = true; break; case HCI_IBS_TX_ASLEEP: break; default: BT_ERR("Spurious tx state %d", qca->tx_ibs_state); ret = -EINVAL; break; } spin_unlock_irqrestore(&qca->hci_ibs_lock, flags); if (ret < 0) goto error; if (tx_pending) { serdev_device_wait_until_sent(hu->serdev, msecs_to_jiffies(CMD_TRANS_TIMEOUT_MS)); serial_clock_vote(HCI_IBS_TX_VOTE_CLOCK_OFF, hu); } /* Wait for HCI_IBS_SLEEP_IND sent by device to indicate its Tx is going * to sleep, so that the packet does not wake the system later. */ ret = wait_event_interruptible_timeout(qca->suspend_wait_q, qca->rx_ibs_state == HCI_IBS_RX_ASLEEP, msecs_to_jiffies(IBS_BTSOC_TX_IDLE_TIMEOUT_MS)); if (ret == 0) { ret = -ETIMEDOUT; goto error; } return 0; error: clear_bit(QCA_SUSPENDING, &qca->flags); return ret; } static int __maybe_unused qca_resume(struct device *dev) { struct serdev_device *serdev = to_serdev_device(dev); struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev); struct hci_uart *hu = &qcadev->serdev_hu; struct qca_data *qca = hu->priv; clear_bit(QCA_SUSPENDING, &qca->flags); return 0; } static SIMPLE_DEV_PM_OPS(qca_pm_ops, qca_suspend, qca_resume); #ifdef CONFIG_OF static const struct of_device_id qca_bluetooth_of_match[] = { { .compatible = "qcom,qca6174-bt" }, { .compatible = "qcom,qca6390-bt", .data = &qca_soc_data_qca6390}, { .compatible = "qcom,qca9377-bt" }, { .compatible = "qcom,wcn3988-bt", .data = &qca_soc_data_wcn3988}, { .compatible = "qcom,wcn3990-bt", .data = &qca_soc_data_wcn3990}, { .compatible = "qcom,wcn3991-bt", .data = &qca_soc_data_wcn3991}, { .compatible = "qcom,wcn3998-bt", .data = &qca_soc_data_wcn3998}, { .compatible = "qcom,wcn6750-bt", .data = &qca_soc_data_wcn6750}, { .compatible = "qcom,wcn6855-bt", .data = &qca_soc_data_wcn6855}, { .compatible = "qcom,wcn7850-bt", .data = &qca_soc_data_wcn7850}, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, qca_bluetooth_of_match); #endif #ifdef CONFIG_ACPI static const struct acpi_device_id qca_bluetooth_acpi_match[] = { { "QCOM6390", (kernel_ulong_t)&qca_soc_data_qca6390 }, { "DLA16390", (kernel_ulong_t)&qca_soc_data_qca6390 }, { "DLB16390", (kernel_ulong_t)&qca_soc_data_qca6390 }, { "DLB26390", (kernel_ulong_t)&qca_soc_data_qca6390 }, { }, }; MODULE_DEVICE_TABLE(acpi, qca_bluetooth_acpi_match); #endif #ifdef CONFIG_DEV_COREDUMP static void hciqca_coredump(struct device *dev) { struct serdev_device *serdev = to_serdev_device(dev); struct qca_serdev *qcadev = serdev_device_get_drvdata(serdev); struct hci_uart *hu = &qcadev->serdev_hu; struct hci_dev *hdev = hu->hdev; if (hdev->dump.coredump) hdev->dump.coredump(hdev); } #endif static struct serdev_device_driver qca_serdev_driver = { .probe = qca_serdev_probe, .remove = qca_serdev_remove, .driver = { .name = "hci_uart_qca", .of_match_table = of_match_ptr(qca_bluetooth_of_match), .acpi_match_table = ACPI_PTR(qca_bluetooth_acpi_match), .shutdown = qca_serdev_shutdown, .pm = &qca_pm_ops, #ifdef CONFIG_DEV_COREDUMP .coredump = hciqca_coredump, #endif }, }; int __init qca_init(void) { serdev_device_driver_register(&qca_serdev_driver); return hci_uart_register_proto(&qca_proto); } int __exit qca_deinit(void) { serdev_device_driver_unregister(&qca_serdev_driver); return hci_uart_unregister_proto(&qca_proto); }
linux-master
drivers/bluetooth/hci_qca.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Atheros Communication Bluetooth HCIATH3K UART protocol * * HCIATH3K (HCI Atheros AR300x Protocol) is a Atheros Communication's * power management protocol extension to H4 to support AR300x Bluetooth Chip. * * Copyright (c) 2009-2010 Atheros Communications Inc. * * Acknowledgements: * This file is based on hci_h4.c, which was written * by Maxim Krasnyansky and Marcel Holtmann. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/tty.h> #include <linux/errno.h> #include <linux/ioctl.h> #include <linux/skbuff.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include "hci_uart.h" struct ath_struct { struct hci_uart *hu; unsigned int cur_sleep; struct sk_buff *rx_skb; struct sk_buff_head txq; struct work_struct ctxtsw; }; #define OP_WRITE_TAG 0x01 #define INDEX_BDADDR 0x01 struct ath_vendor_cmd { __u8 opcode; __le16 index; __u8 len; __u8 data[251]; } __packed; static int ath_wakeup_ar3k(struct tty_struct *tty) { int status = tty->driver->ops->tiocmget(tty); if (status & TIOCM_CTS) return status; /* Clear RTS first */ tty->driver->ops->tiocmget(tty); tty->driver->ops->tiocmset(tty, 0x00, TIOCM_RTS); msleep(20); /* Set RTS, wake up board */ tty->driver->ops->tiocmget(tty); tty->driver->ops->tiocmset(tty, TIOCM_RTS, 0x00); msleep(20); status = tty->driver->ops->tiocmget(tty); return status; } static void ath_hci_uart_work(struct work_struct *work) { int status; struct ath_struct *ath; struct hci_uart *hu; struct tty_struct *tty; ath = container_of(work, struct ath_struct, ctxtsw); hu = ath->hu; tty = hu->tty; /* verify and wake up controller */ if (ath->cur_sleep) { status = ath_wakeup_ar3k(tty); if (!(status & TIOCM_CTS)) return; } /* Ready to send Data */ clear_bit(HCI_UART_SENDING, &hu->tx_state); hci_uart_tx_wakeup(hu); } static int ath_open(struct hci_uart *hu) { struct ath_struct *ath; BT_DBG("hu %p", hu); if (!hci_uart_has_flow_control(hu)) return -EOPNOTSUPP; ath = kzalloc(sizeof(*ath), GFP_KERNEL); if (!ath) return -ENOMEM; skb_queue_head_init(&ath->txq); hu->priv = ath; ath->hu = hu; INIT_WORK(&ath->ctxtsw, ath_hci_uart_work); return 0; } static int ath_close(struct hci_uart *hu) { struct ath_struct *ath = hu->priv; BT_DBG("hu %p", hu); skb_queue_purge(&ath->txq); kfree_skb(ath->rx_skb); cancel_work_sync(&ath->ctxtsw); hu->priv = NULL; kfree(ath); return 0; } static int ath_flush(struct hci_uart *hu) { struct ath_struct *ath = hu->priv; BT_DBG("hu %p", hu); skb_queue_purge(&ath->txq); return 0; } static int ath_vendor_cmd(struct hci_dev *hdev, uint8_t opcode, uint16_t index, const void *data, size_t dlen) { struct sk_buff *skb; struct ath_vendor_cmd cmd; if (dlen > sizeof(cmd.data)) return -EINVAL; cmd.opcode = opcode; cmd.index = cpu_to_le16(index); cmd.len = dlen; memcpy(cmd.data, data, dlen); skb = __hci_cmd_sync(hdev, 0xfc0b, dlen + 4, &cmd, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) return PTR_ERR(skb); kfree_skb(skb); return 0; } static int ath_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr) { return ath_vendor_cmd(hdev, OP_WRITE_TAG, INDEX_BDADDR, bdaddr, sizeof(*bdaddr)); } static int ath_setup(struct hci_uart *hu) { BT_DBG("hu %p", hu); hu->hdev->set_bdaddr = ath_set_bdaddr; return 0; } static const struct h4_recv_pkt ath_recv_pkts[] = { { H4_RECV_ACL, .recv = hci_recv_frame }, { H4_RECV_SCO, .recv = hci_recv_frame }, { H4_RECV_EVENT, .recv = hci_recv_frame }, }; static int ath_recv(struct hci_uart *hu, const void *data, int count) { struct ath_struct *ath = hu->priv; ath->rx_skb = h4_recv_buf(hu->hdev, ath->rx_skb, data, count, ath_recv_pkts, ARRAY_SIZE(ath_recv_pkts)); if (IS_ERR(ath->rx_skb)) { int err = PTR_ERR(ath->rx_skb); bt_dev_err(hu->hdev, "Frame reassembly failed (%d)", err); ath->rx_skb = NULL; return err; } return count; } #define HCI_OP_ATH_SLEEP 0xFC04 static int ath_enqueue(struct hci_uart *hu, struct sk_buff *skb) { struct ath_struct *ath = hu->priv; if (hci_skb_pkt_type(skb) == HCI_SCODATA_PKT) { kfree_skb(skb); return 0; } /* Update power management enable flag with parameters of * HCI sleep enable vendor specific HCI command. */ if (hci_skb_pkt_type(skb) == HCI_COMMAND_PKT) { struct hci_command_hdr *hdr = (void *)skb->data; if (__le16_to_cpu(hdr->opcode) == HCI_OP_ATH_SLEEP) ath->cur_sleep = skb->data[HCI_COMMAND_HDR_SIZE]; } BT_DBG("hu %p skb %p", hu, skb); /* Prepend skb with frame type */ memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1); skb_queue_tail(&ath->txq, skb); set_bit(HCI_UART_SENDING, &hu->tx_state); schedule_work(&ath->ctxtsw); return 0; } static struct sk_buff *ath_dequeue(struct hci_uart *hu) { struct ath_struct *ath = hu->priv; return skb_dequeue(&ath->txq); } static const struct hci_uart_proto athp = { .id = HCI_UART_ATH3K, .name = "ATH3K", .manufacturer = 69, .open = ath_open, .close = ath_close, .flush = ath_flush, .setup = ath_setup, .recv = ath_recv, .enqueue = ath_enqueue, .dequeue = ath_dequeue, }; int __init ath_init(void) { return hci_uart_register_proto(&athp); } int __exit ath_deinit(void) { return hci_uart_unregister_proto(&athp); }
linux-master
drivers/bluetooth/hci_ath.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * * Bluetooth support for Intel devices * * Copyright (C) 2015 Intel Corporation */ #include <linux/module.h> #include <linux/firmware.h> #include <linux/regmap.h> #include <linux/acpi.h> #include <acpi/acpi_bus.h> #include <asm/unaligned.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include "btintel.h" #define VERSION "0.1" #define BDADDR_INTEL (&(bdaddr_t){{0x00, 0x8b, 0x9e, 0x19, 0x03, 0x00}}) #define RSA_HEADER_LEN 644 #define CSS_HEADER_OFFSET 8 #define ECDSA_OFFSET 644 #define ECDSA_HEADER_LEN 320 #define BTINTEL_PPAG_NAME "PPAG" enum { DSM_SET_WDISABLE2_DELAY = 1, DSM_SET_RESET_METHOD = 3, }; /* structure to store the PPAG data read from ACPI table */ struct btintel_ppag { u32 domain; u32 mode; acpi_status status; struct hci_dev *hdev; }; #define CMD_WRITE_BOOT_PARAMS 0xfc0e struct cmd_write_boot_params { __le32 boot_addr; u8 fw_build_num; u8 fw_build_ww; u8 fw_build_yy; } __packed; static struct { const char *driver_name; u8 hw_variant; u32 fw_build_num; } coredump_info; static const guid_t btintel_guid_dsm = GUID_INIT(0xaa10f4e0, 0x81ac, 0x4233, 0xab, 0xf6, 0x3b, 0x2a, 0xc5, 0x0e, 0x28, 0xd9); int btintel_check_bdaddr(struct hci_dev *hdev) { struct hci_rp_read_bd_addr *bda; struct sk_buff *skb; skb = __hci_cmd_sync(hdev, HCI_OP_READ_BD_ADDR, 0, NULL, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { int err = PTR_ERR(skb); bt_dev_err(hdev, "Reading Intel device address failed (%d)", err); return err; } if (skb->len != sizeof(*bda)) { bt_dev_err(hdev, "Intel device address length mismatch"); kfree_skb(skb); return -EIO; } bda = (struct hci_rp_read_bd_addr *)skb->data; /* For some Intel based controllers, the default Bluetooth device * address 00:03:19:9E:8B:00 can be found. These controllers are * fully operational, but have the danger of duplicate addresses * and that in turn can cause problems with Bluetooth operation. */ if (!bacmp(&bda->bdaddr, BDADDR_INTEL)) { bt_dev_err(hdev, "Found Intel default device address (%pMR)", &bda->bdaddr); set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks); } kfree_skb(skb); return 0; } EXPORT_SYMBOL_GPL(btintel_check_bdaddr); int btintel_enter_mfg(struct hci_dev *hdev) { static const u8 param[] = { 0x01, 0x00 }; struct sk_buff *skb; skb = __hci_cmd_sync(hdev, 0xfc11, 2, param, HCI_CMD_TIMEOUT); if (IS_ERR(skb)) { bt_dev_err(hdev, "Entering manufacturer mode failed (%ld)", PTR_ERR(skb)); return PTR_ERR(skb); } kfree_skb(skb); return 0; } EXPORT_SYMBOL_GPL(btintel_enter_mfg); int btintel_exit_mfg(struct hci_dev *hdev, bool reset, bool patched) { u8 param[] = { 0x00, 0x00 }; struct sk_buff *skb; /* The 2nd command parameter specifies the manufacturing exit method: * 0x00: Just disable the manufacturing mode (0x00). * 0x01: Disable manufacturing mode and reset with patches deactivated. * 0x02: Disable manufacturing mode and reset with patches activated. */ if (reset) param[1] |= patched ? 0x02 : 0x01; skb = __hci_cmd_sync(hdev, 0xfc11, 2, param, HCI_CMD_TIMEOUT); if (IS_ERR(skb)) { bt_dev_err(hdev, "Exiting manufacturer mode failed (%ld)", PTR_ERR(skb)); return PTR_ERR(skb); } kfree_skb(skb); return 0; } EXPORT_SYMBOL_GPL(btintel_exit_mfg); int btintel_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr) { struct sk_buff *skb; int err; skb = __hci_cmd_sync(hdev, 0xfc31, 6, bdaddr, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { err = PTR_ERR(skb); bt_dev_err(hdev, "Changing Intel device address failed (%d)", err); return err; } kfree_skb(skb); return 0; } EXPORT_SYMBOL_GPL(btintel_set_bdaddr); static int btintel_set_event_mask(struct hci_dev *hdev, bool debug) { u8 mask[8] = { 0x87, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; struct sk_buff *skb; int err; if (debug) mask[1] |= 0x62; skb = __hci_cmd_sync(hdev, 0xfc52, 8, mask, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { err = PTR_ERR(skb); bt_dev_err(hdev, "Setting Intel event mask failed (%d)", err); return err; } kfree_skb(skb); return 0; } int btintel_set_diag(struct hci_dev *hdev, bool enable) { struct sk_buff *skb; u8 param[3]; int err; if (enable) { param[0] = 0x03; param[1] = 0x03; param[2] = 0x03; } else { param[0] = 0x00; param[1] = 0x00; param[2] = 0x00; } skb = __hci_cmd_sync(hdev, 0xfc43, 3, param, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { err = PTR_ERR(skb); if (err == -ENODATA) goto done; bt_dev_err(hdev, "Changing Intel diagnostic mode failed (%d)", err); return err; } kfree_skb(skb); done: btintel_set_event_mask(hdev, enable); return 0; } EXPORT_SYMBOL_GPL(btintel_set_diag); static int btintel_set_diag_mfg(struct hci_dev *hdev, bool enable) { int err, ret; err = btintel_enter_mfg(hdev); if (err) return err; ret = btintel_set_diag(hdev, enable); err = btintel_exit_mfg(hdev, false, false); if (err) return err; return ret; } static int btintel_set_diag_combined(struct hci_dev *hdev, bool enable) { int ret; /* Legacy ROM device needs to be in the manufacturer mode to apply * diagnostic setting * * This flag is set after reading the Intel version. */ if (btintel_test_flag(hdev, INTEL_ROM_LEGACY)) ret = btintel_set_diag_mfg(hdev, enable); else ret = btintel_set_diag(hdev, enable); return ret; } static void btintel_hw_error(struct hci_dev *hdev, u8 code) { struct sk_buff *skb; u8 type = 0x00; bt_dev_err(hdev, "Hardware error 0x%2.2x", code); skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { bt_dev_err(hdev, "Reset after hardware error failed (%ld)", PTR_ERR(skb)); return; } kfree_skb(skb); skb = __hci_cmd_sync(hdev, 0xfc22, 1, &type, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { bt_dev_err(hdev, "Retrieving Intel exception info failed (%ld)", PTR_ERR(skb)); return; } if (skb->len != 13) { bt_dev_err(hdev, "Exception info size mismatch"); kfree_skb(skb); return; } bt_dev_err(hdev, "Exception info %s", (char *)(skb->data + 1)); kfree_skb(skb); } int btintel_version_info(struct hci_dev *hdev, struct intel_version *ver) { const char *variant; /* The hardware platform number has a fixed value of 0x37 and * for now only accept this single value. */ if (ver->hw_platform != 0x37) { bt_dev_err(hdev, "Unsupported Intel hardware platform (%u)", ver->hw_platform); return -EINVAL; } /* Check for supported iBT hardware variants of this firmware * loading method. * * This check has been put in place to ensure correct forward * compatibility options when newer hardware variants come along. */ switch (ver->hw_variant) { case 0x07: /* WP - Legacy ROM */ case 0x08: /* StP - Legacy ROM */ case 0x0b: /* SfP */ case 0x0c: /* WsP */ case 0x11: /* JfP */ case 0x12: /* ThP */ case 0x13: /* HrP */ case 0x14: /* CcP */ break; default: bt_dev_err(hdev, "Unsupported Intel hardware variant (%u)", ver->hw_variant); return -EINVAL; } switch (ver->fw_variant) { case 0x01: variant = "Legacy ROM 2.5"; break; case 0x06: variant = "Bootloader"; break; case 0x22: variant = "Legacy ROM 2.x"; break; case 0x23: variant = "Firmware"; break; default: bt_dev_err(hdev, "Unsupported firmware variant(%02x)", ver->fw_variant); return -EINVAL; } coredump_info.hw_variant = ver->hw_variant; coredump_info.fw_build_num = ver->fw_build_num; bt_dev_info(hdev, "%s revision %u.%u build %u week %u %u", variant, ver->fw_revision >> 4, ver->fw_revision & 0x0f, ver->fw_build_num, ver->fw_build_ww, 2000 + ver->fw_build_yy); return 0; } EXPORT_SYMBOL_GPL(btintel_version_info); static int btintel_secure_send(struct hci_dev *hdev, u8 fragment_type, u32 plen, const void *param) { while (plen > 0) { struct sk_buff *skb; u8 cmd_param[253], fragment_len = (plen > 252) ? 252 : plen; cmd_param[0] = fragment_type; memcpy(cmd_param + 1, param, fragment_len); skb = __hci_cmd_sync(hdev, 0xfc09, fragment_len + 1, cmd_param, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) return PTR_ERR(skb); kfree_skb(skb); plen -= fragment_len; param += fragment_len; } return 0; } int btintel_load_ddc_config(struct hci_dev *hdev, const char *ddc_name) { const struct firmware *fw; struct sk_buff *skb; const u8 *fw_ptr; int err; err = request_firmware_direct(&fw, ddc_name, &hdev->dev); if (err < 0) { bt_dev_err(hdev, "Failed to load Intel DDC file %s (%d)", ddc_name, err); return err; } bt_dev_info(hdev, "Found Intel DDC parameters: %s", ddc_name); fw_ptr = fw->data; /* DDC file contains one or more DDC structure which has * Length (1 byte), DDC ID (2 bytes), and DDC value (Length - 2). */ while (fw->size > fw_ptr - fw->data) { u8 cmd_plen = fw_ptr[0] + sizeof(u8); skb = __hci_cmd_sync(hdev, 0xfc8b, cmd_plen, fw_ptr, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { bt_dev_err(hdev, "Failed to send Intel_Write_DDC (%ld)", PTR_ERR(skb)); release_firmware(fw); return PTR_ERR(skb); } fw_ptr += cmd_plen; kfree_skb(skb); } release_firmware(fw); bt_dev_info(hdev, "Applying Intel DDC parameters completed"); return 0; } EXPORT_SYMBOL_GPL(btintel_load_ddc_config); int btintel_set_event_mask_mfg(struct hci_dev *hdev, bool debug) { int err, ret; err = btintel_enter_mfg(hdev); if (err) return err; ret = btintel_set_event_mask(hdev, debug); err = btintel_exit_mfg(hdev, false, false); if (err) return err; return ret; } EXPORT_SYMBOL_GPL(btintel_set_event_mask_mfg); int btintel_read_version(struct hci_dev *hdev, struct intel_version *ver) { struct sk_buff *skb; skb = __hci_cmd_sync(hdev, 0xfc05, 0, NULL, HCI_CMD_TIMEOUT); if (IS_ERR(skb)) { bt_dev_err(hdev, "Reading Intel version information failed (%ld)", PTR_ERR(skb)); return PTR_ERR(skb); } if (skb->len != sizeof(*ver)) { bt_dev_err(hdev, "Intel version event size mismatch"); kfree_skb(skb); return -EILSEQ; } memcpy(ver, skb->data, sizeof(*ver)); kfree_skb(skb); return 0; } EXPORT_SYMBOL_GPL(btintel_read_version); static int btintel_version_info_tlv(struct hci_dev *hdev, struct intel_version_tlv *version) { const char *variant; /* The hardware platform number has a fixed value of 0x37 and * for now only accept this single value. */ if (INTEL_HW_PLATFORM(version->cnvi_bt) != 0x37) { bt_dev_err(hdev, "Unsupported Intel hardware platform (0x%2x)", INTEL_HW_PLATFORM(version->cnvi_bt)); return -EINVAL; } /* Check for supported iBT hardware variants of this firmware * loading method. * * This check has been put in place to ensure correct forward * compatibility options when newer hardware variants come along. */ switch (INTEL_HW_VARIANT(version->cnvi_bt)) { case 0x17: /* TyP */ case 0x18: /* Slr */ case 0x19: /* Slr-F */ case 0x1b: /* Mgr */ case 0x1c: /* Gale Peak (GaP) */ break; default: bt_dev_err(hdev, "Unsupported Intel hardware variant (0x%x)", INTEL_HW_VARIANT(version->cnvi_bt)); return -EINVAL; } switch (version->img_type) { case 0x01: variant = "Bootloader"; /* It is required that every single firmware fragment is acknowledged * with a command complete event. If the boot parameters indicate * that this bootloader does not send them, then abort the setup. */ if (version->limited_cce != 0x00) { bt_dev_err(hdev, "Unsupported Intel firmware loading method (0x%x)", version->limited_cce); return -EINVAL; } /* Secure boot engine type should be either 1 (ECDSA) or 0 (RSA) */ if (version->sbe_type > 0x01) { bt_dev_err(hdev, "Unsupported Intel secure boot engine type (0x%x)", version->sbe_type); return -EINVAL; } bt_dev_info(hdev, "Device revision is %u", version->dev_rev_id); bt_dev_info(hdev, "Secure boot is %s", version->secure_boot ? "enabled" : "disabled"); bt_dev_info(hdev, "OTP lock is %s", version->otp_lock ? "enabled" : "disabled"); bt_dev_info(hdev, "API lock is %s", version->api_lock ? "enabled" : "disabled"); bt_dev_info(hdev, "Debug lock is %s", version->debug_lock ? "enabled" : "disabled"); bt_dev_info(hdev, "Minimum firmware build %u week %u %u", version->min_fw_build_nn, version->min_fw_build_cw, 2000 + version->min_fw_build_yy); break; case 0x03: variant = "Firmware"; break; default: bt_dev_err(hdev, "Unsupported image type(%02x)", version->img_type); return -EINVAL; } coredump_info.hw_variant = INTEL_HW_VARIANT(version->cnvi_bt); coredump_info.fw_build_num = version->build_num; bt_dev_info(hdev, "%s timestamp %u.%u buildtype %u build %u", variant, 2000 + (version->timestamp >> 8), version->timestamp & 0xff, version->build_type, version->build_num); return 0; } static int btintel_parse_version_tlv(struct hci_dev *hdev, struct intel_version_tlv *version, struct sk_buff *skb) { /* Consume Command Complete Status field */ skb_pull(skb, 1); /* Event parameters contatin multiple TLVs. Read each of them * and only keep the required data. Also, it use existing legacy * version field like hw_platform, hw_variant, and fw_variant * to keep the existing setup flow */ while (skb->len) { struct intel_tlv *tlv; /* Make sure skb has a minimum length of the header */ if (skb->len < sizeof(*tlv)) return -EINVAL; tlv = (struct intel_tlv *)skb->data; /* Make sure skb has a enough data */ if (skb->len < tlv->len + sizeof(*tlv)) return -EINVAL; switch (tlv->type) { case INTEL_TLV_CNVI_TOP: version->cnvi_top = get_unaligned_le32(tlv->val); break; case INTEL_TLV_CNVR_TOP: version->cnvr_top = get_unaligned_le32(tlv->val); break; case INTEL_TLV_CNVI_BT: version->cnvi_bt = get_unaligned_le32(tlv->val); break; case INTEL_TLV_CNVR_BT: version->cnvr_bt = get_unaligned_le32(tlv->val); break; case INTEL_TLV_DEV_REV_ID: version->dev_rev_id = get_unaligned_le16(tlv->val); break; case INTEL_TLV_IMAGE_TYPE: version->img_type = tlv->val[0]; break; case INTEL_TLV_TIME_STAMP: /* If image type is Operational firmware (0x03), then * running FW Calendar Week and Year information can * be extracted from Timestamp information */ version->min_fw_build_cw = tlv->val[0]; version->min_fw_build_yy = tlv->val[1]; version->timestamp = get_unaligned_le16(tlv->val); break; case INTEL_TLV_BUILD_TYPE: version->build_type = tlv->val[0]; break; case INTEL_TLV_BUILD_NUM: /* If image type is Operational firmware (0x03), then * running FW build number can be extracted from the * Build information */ version->min_fw_build_nn = tlv->val[0]; version->build_num = get_unaligned_le32(tlv->val); break; case INTEL_TLV_SECURE_BOOT: version->secure_boot = tlv->val[0]; break; case INTEL_TLV_OTP_LOCK: version->otp_lock = tlv->val[0]; break; case INTEL_TLV_API_LOCK: version->api_lock = tlv->val[0]; break; case INTEL_TLV_DEBUG_LOCK: version->debug_lock = tlv->val[0]; break; case INTEL_TLV_MIN_FW: version->min_fw_build_nn = tlv->val[0]; version->min_fw_build_cw = tlv->val[1]; version->min_fw_build_yy = tlv->val[2]; break; case INTEL_TLV_LIMITED_CCE: version->limited_cce = tlv->val[0]; break; case INTEL_TLV_SBE_TYPE: version->sbe_type = tlv->val[0]; break; case INTEL_TLV_OTP_BDADDR: memcpy(&version->otp_bd_addr, tlv->val, sizeof(bdaddr_t)); break; default: /* Ignore rest of information */ break; } /* consume the current tlv and move to next*/ skb_pull(skb, tlv->len + sizeof(*tlv)); } return 0; } static int btintel_read_version_tlv(struct hci_dev *hdev, struct intel_version_tlv *version) { struct sk_buff *skb; const u8 param[1] = { 0xFF }; if (!version) return -EINVAL; skb = __hci_cmd_sync(hdev, 0xfc05, 1, param, HCI_CMD_TIMEOUT); if (IS_ERR(skb)) { bt_dev_err(hdev, "Reading Intel version information failed (%ld)", PTR_ERR(skb)); return PTR_ERR(skb); } if (skb->data[0]) { bt_dev_err(hdev, "Intel Read Version command failed (%02x)", skb->data[0]); kfree_skb(skb); return -EIO; } btintel_parse_version_tlv(hdev, version, skb); kfree_skb(skb); return 0; } /* ------- REGMAP IBT SUPPORT ------- */ #define IBT_REG_MODE_8BIT 0x00 #define IBT_REG_MODE_16BIT 0x01 #define IBT_REG_MODE_32BIT 0x02 struct regmap_ibt_context { struct hci_dev *hdev; __u16 op_write; __u16 op_read; }; struct ibt_cp_reg_access { __le32 addr; __u8 mode; __u8 len; __u8 data[]; } __packed; struct ibt_rp_reg_access { __u8 status; __le32 addr; __u8 data[]; } __packed; static int regmap_ibt_read(void *context, const void *addr, size_t reg_size, void *val, size_t val_size) { struct regmap_ibt_context *ctx = context; struct ibt_cp_reg_access cp; struct ibt_rp_reg_access *rp; struct sk_buff *skb; int err = 0; if (reg_size != sizeof(__le32)) return -EINVAL; switch (val_size) { case 1: cp.mode = IBT_REG_MODE_8BIT; break; case 2: cp.mode = IBT_REG_MODE_16BIT; break; case 4: cp.mode = IBT_REG_MODE_32BIT; break; default: return -EINVAL; } /* regmap provides a little-endian formatted addr */ cp.addr = *(__le32 *)addr; cp.len = val_size; bt_dev_dbg(ctx->hdev, "Register (0x%x) read", le32_to_cpu(cp.addr)); skb = hci_cmd_sync(ctx->hdev, ctx->op_read, sizeof(cp), &cp, HCI_CMD_TIMEOUT); if (IS_ERR(skb)) { err = PTR_ERR(skb); bt_dev_err(ctx->hdev, "regmap: Register (0x%x) read error (%d)", le32_to_cpu(cp.addr), err); return err; } if (skb->len != sizeof(*rp) + val_size) { bt_dev_err(ctx->hdev, "regmap: Register (0x%x) read error, bad len", le32_to_cpu(cp.addr)); err = -EINVAL; goto done; } rp = (struct ibt_rp_reg_access *)skb->data; if (rp->addr != cp.addr) { bt_dev_err(ctx->hdev, "regmap: Register (0x%x) read error, bad addr", le32_to_cpu(rp->addr)); err = -EINVAL; goto done; } memcpy(val, rp->data, val_size); done: kfree_skb(skb); return err; } static int regmap_ibt_gather_write(void *context, const void *addr, size_t reg_size, const void *val, size_t val_size) { struct regmap_ibt_context *ctx = context; struct ibt_cp_reg_access *cp; struct sk_buff *skb; int plen = sizeof(*cp) + val_size; u8 mode; int err = 0; if (reg_size != sizeof(__le32)) return -EINVAL; switch (val_size) { case 1: mode = IBT_REG_MODE_8BIT; break; case 2: mode = IBT_REG_MODE_16BIT; break; case 4: mode = IBT_REG_MODE_32BIT; break; default: return -EINVAL; } cp = kmalloc(plen, GFP_KERNEL); if (!cp) return -ENOMEM; /* regmap provides a little-endian formatted addr/value */ cp->addr = *(__le32 *)addr; cp->mode = mode; cp->len = val_size; memcpy(&cp->data, val, val_size); bt_dev_dbg(ctx->hdev, "Register (0x%x) write", le32_to_cpu(cp->addr)); skb = hci_cmd_sync(ctx->hdev, ctx->op_write, plen, cp, HCI_CMD_TIMEOUT); if (IS_ERR(skb)) { err = PTR_ERR(skb); bt_dev_err(ctx->hdev, "regmap: Register (0x%x) write error (%d)", le32_to_cpu(cp->addr), err); goto done; } kfree_skb(skb); done: kfree(cp); return err; } static int regmap_ibt_write(void *context, const void *data, size_t count) { /* data contains register+value, since we only support 32bit addr, * minimum data size is 4 bytes. */ if (WARN_ONCE(count < 4, "Invalid register access")) return -EINVAL; return regmap_ibt_gather_write(context, data, 4, data + 4, count - 4); } static void regmap_ibt_free_context(void *context) { kfree(context); } static const struct regmap_bus regmap_ibt = { .read = regmap_ibt_read, .write = regmap_ibt_write, .gather_write = regmap_ibt_gather_write, .free_context = regmap_ibt_free_context, .reg_format_endian_default = REGMAP_ENDIAN_LITTLE, .val_format_endian_default = REGMAP_ENDIAN_LITTLE, }; /* Config is the same for all register regions */ static const struct regmap_config regmap_ibt_cfg = { .name = "btintel_regmap", .reg_bits = 32, .val_bits = 32, }; struct regmap *btintel_regmap_init(struct hci_dev *hdev, u16 opcode_read, u16 opcode_write) { struct regmap_ibt_context *ctx; bt_dev_info(hdev, "regmap: Init R%x-W%x region", opcode_read, opcode_write); ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) return ERR_PTR(-ENOMEM); ctx->op_read = opcode_read; ctx->op_write = opcode_write; ctx->hdev = hdev; return regmap_init(&hdev->dev, &regmap_ibt, ctx, &regmap_ibt_cfg); } EXPORT_SYMBOL_GPL(btintel_regmap_init); int btintel_send_intel_reset(struct hci_dev *hdev, u32 boot_param) { struct intel_reset params = { 0x00, 0x01, 0x00, 0x01, 0x00000000 }; struct sk_buff *skb; params.boot_param = cpu_to_le32(boot_param); skb = __hci_cmd_sync(hdev, 0xfc01, sizeof(params), &params, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { bt_dev_err(hdev, "Failed to send Intel Reset command"); return PTR_ERR(skb); } kfree_skb(skb); return 0; } EXPORT_SYMBOL_GPL(btintel_send_intel_reset); int btintel_read_boot_params(struct hci_dev *hdev, struct intel_boot_params *params) { struct sk_buff *skb; skb = __hci_cmd_sync(hdev, 0xfc0d, 0, NULL, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { bt_dev_err(hdev, "Reading Intel boot parameters failed (%ld)", PTR_ERR(skb)); return PTR_ERR(skb); } if (skb->len != sizeof(*params)) { bt_dev_err(hdev, "Intel boot parameters size mismatch"); kfree_skb(skb); return -EILSEQ; } memcpy(params, skb->data, sizeof(*params)); kfree_skb(skb); if (params->status) { bt_dev_err(hdev, "Intel boot parameters command failed (%02x)", params->status); return -bt_to_errno(params->status); } bt_dev_info(hdev, "Device revision is %u", le16_to_cpu(params->dev_revid)); bt_dev_info(hdev, "Secure boot is %s", params->secure_boot ? "enabled" : "disabled"); bt_dev_info(hdev, "OTP lock is %s", params->otp_lock ? "enabled" : "disabled"); bt_dev_info(hdev, "API lock is %s", params->api_lock ? "enabled" : "disabled"); bt_dev_info(hdev, "Debug lock is %s", params->debug_lock ? "enabled" : "disabled"); bt_dev_info(hdev, "Minimum firmware build %u week %u %u", params->min_fw_build_nn, params->min_fw_build_cw, 2000 + params->min_fw_build_yy); return 0; } EXPORT_SYMBOL_GPL(btintel_read_boot_params); static int btintel_sfi_rsa_header_secure_send(struct hci_dev *hdev, const struct firmware *fw) { int err; /* Start the firmware download transaction with the Init fragment * represented by the 128 bytes of CSS header. */ err = btintel_secure_send(hdev, 0x00, 128, fw->data); if (err < 0) { bt_dev_err(hdev, "Failed to send firmware header (%d)", err); goto done; } /* Send the 256 bytes of public key information from the firmware * as the PKey fragment. */ err = btintel_secure_send(hdev, 0x03, 256, fw->data + 128); if (err < 0) { bt_dev_err(hdev, "Failed to send firmware pkey (%d)", err); goto done; } /* Send the 256 bytes of signature information from the firmware * as the Sign fragment. */ err = btintel_secure_send(hdev, 0x02, 256, fw->data + 388); if (err < 0) { bt_dev_err(hdev, "Failed to send firmware signature (%d)", err); goto done; } done: return err; } static int btintel_sfi_ecdsa_header_secure_send(struct hci_dev *hdev, const struct firmware *fw) { int err; /* Start the firmware download transaction with the Init fragment * represented by the 128 bytes of CSS header. */ err = btintel_secure_send(hdev, 0x00, 128, fw->data + 644); if (err < 0) { bt_dev_err(hdev, "Failed to send firmware header (%d)", err); return err; } /* Send the 96 bytes of public key information from the firmware * as the PKey fragment. */ err = btintel_secure_send(hdev, 0x03, 96, fw->data + 644 + 128); if (err < 0) { bt_dev_err(hdev, "Failed to send firmware pkey (%d)", err); return err; } /* Send the 96 bytes of signature information from the firmware * as the Sign fragment */ err = btintel_secure_send(hdev, 0x02, 96, fw->data + 644 + 224); if (err < 0) { bt_dev_err(hdev, "Failed to send firmware signature (%d)", err); return err; } return 0; } static int btintel_download_firmware_payload(struct hci_dev *hdev, const struct firmware *fw, size_t offset) { int err; const u8 *fw_ptr; u32 frag_len; fw_ptr = fw->data + offset; frag_len = 0; err = -EINVAL; while (fw_ptr - fw->data < fw->size) { struct hci_command_hdr *cmd = (void *)(fw_ptr + frag_len); frag_len += sizeof(*cmd) + cmd->plen; /* The parameter length of the secure send command requires * a 4 byte alignment. It happens so that the firmware file * contains proper Intel_NOP commands to align the fragments * as needed. * * Send set of commands with 4 byte alignment from the * firmware data buffer as a single Data fragement. */ if (!(frag_len % 4)) { err = btintel_secure_send(hdev, 0x01, frag_len, fw_ptr); if (err < 0) { bt_dev_err(hdev, "Failed to send firmware data (%d)", err); goto done; } fw_ptr += frag_len; frag_len = 0; } } done: return err; } static bool btintel_firmware_version(struct hci_dev *hdev, u8 num, u8 ww, u8 yy, const struct firmware *fw, u32 *boot_addr) { const u8 *fw_ptr; fw_ptr = fw->data; while (fw_ptr - fw->data < fw->size) { struct hci_command_hdr *cmd = (void *)(fw_ptr); /* Each SKU has a different reset parameter to use in the * HCI_Intel_Reset command and it is embedded in the firmware * data. So, instead of using static value per SKU, check * the firmware data and save it for later use. */ if (le16_to_cpu(cmd->opcode) == CMD_WRITE_BOOT_PARAMS) { struct cmd_write_boot_params *params; params = (void *)(fw_ptr + sizeof(*cmd)); *boot_addr = le32_to_cpu(params->boot_addr); bt_dev_info(hdev, "Boot Address: 0x%x", *boot_addr); bt_dev_info(hdev, "Firmware Version: %u-%u.%u", params->fw_build_num, params->fw_build_ww, params->fw_build_yy); return (num == params->fw_build_num && ww == params->fw_build_ww && yy == params->fw_build_yy); } fw_ptr += sizeof(*cmd) + cmd->plen; } return false; } int btintel_download_firmware(struct hci_dev *hdev, struct intel_version *ver, const struct firmware *fw, u32 *boot_param) { int err; /* SfP and WsP don't seem to update the firmware version on file * so version checking is currently not possible. */ switch (ver->hw_variant) { case 0x0b: /* SfP */ case 0x0c: /* WsP */ /* Skip version checking */ break; default: /* Skip download if firmware has the same version */ if (btintel_firmware_version(hdev, ver->fw_build_num, ver->fw_build_ww, ver->fw_build_yy, fw, boot_param)) { bt_dev_info(hdev, "Firmware already loaded"); /* Return -EALREADY to indicate that the firmware has * already been loaded. */ return -EALREADY; } } /* The firmware variant determines if the device is in bootloader * mode or is running operational firmware. The value 0x06 identifies * the bootloader and the value 0x23 identifies the operational * firmware. * * If the firmware version has changed that means it needs to be reset * to bootloader when operational so the new firmware can be loaded. */ if (ver->fw_variant == 0x23) return -EINVAL; err = btintel_sfi_rsa_header_secure_send(hdev, fw); if (err) return err; return btintel_download_firmware_payload(hdev, fw, RSA_HEADER_LEN); } EXPORT_SYMBOL_GPL(btintel_download_firmware); static int btintel_download_fw_tlv(struct hci_dev *hdev, struct intel_version_tlv *ver, const struct firmware *fw, u32 *boot_param, u8 hw_variant, u8 sbe_type) { int err; u32 css_header_ver; /* Skip download if firmware has the same version */ if (btintel_firmware_version(hdev, ver->min_fw_build_nn, ver->min_fw_build_cw, ver->min_fw_build_yy, fw, boot_param)) { bt_dev_info(hdev, "Firmware already loaded"); /* Return -EALREADY to indicate that firmware has * already been loaded. */ return -EALREADY; } /* The firmware variant determines if the device is in bootloader * mode or is running operational firmware. The value 0x01 identifies * the bootloader and the value 0x03 identifies the operational * firmware. * * If the firmware version has changed that means it needs to be reset * to bootloader when operational so the new firmware can be loaded. */ if (ver->img_type == 0x03) return -EINVAL; /* iBT hardware variants 0x0b, 0x0c, 0x11, 0x12, 0x13, 0x14 support * only RSA secure boot engine. Hence, the corresponding sfi file will * have RSA header of 644 bytes followed by Command Buffer. * * iBT hardware variants 0x17, 0x18 onwards support both RSA and ECDSA * secure boot engine. As a result, the corresponding sfi file will * have RSA header of 644, ECDSA header of 320 bytes followed by * Command Buffer. * * CSS Header byte positions 0x08 to 0x0B represent the CSS Header * version: RSA(0x00010000) , ECDSA (0x00020000) */ css_header_ver = get_unaligned_le32(fw->data + CSS_HEADER_OFFSET); if (css_header_ver != 0x00010000) { bt_dev_err(hdev, "Invalid CSS Header version"); return -EINVAL; } if (hw_variant <= 0x14) { if (sbe_type != 0x00) { bt_dev_err(hdev, "Invalid SBE type for hardware variant (%d)", hw_variant); return -EINVAL; } err = btintel_sfi_rsa_header_secure_send(hdev, fw); if (err) return err; err = btintel_download_firmware_payload(hdev, fw, RSA_HEADER_LEN); if (err) return err; } else if (hw_variant >= 0x17) { /* Check if CSS header for ECDSA follows the RSA header */ if (fw->data[ECDSA_OFFSET] != 0x06) return -EINVAL; /* Check if the CSS Header version is ECDSA(0x00020000) */ css_header_ver = get_unaligned_le32(fw->data + ECDSA_OFFSET + CSS_HEADER_OFFSET); if (css_header_ver != 0x00020000) { bt_dev_err(hdev, "Invalid CSS Header version"); return -EINVAL; } if (sbe_type == 0x00) { err = btintel_sfi_rsa_header_secure_send(hdev, fw); if (err) return err; err = btintel_download_firmware_payload(hdev, fw, RSA_HEADER_LEN + ECDSA_HEADER_LEN); if (err) return err; } else if (sbe_type == 0x01) { err = btintel_sfi_ecdsa_header_secure_send(hdev, fw); if (err) return err; err = btintel_download_firmware_payload(hdev, fw, RSA_HEADER_LEN + ECDSA_HEADER_LEN); if (err) return err; } } return 0; } static void btintel_reset_to_bootloader(struct hci_dev *hdev) { struct intel_reset params; struct sk_buff *skb; /* Send Intel Reset command. This will result in * re-enumeration of BT controller. * * Intel Reset parameter description: * reset_type : 0x00 (Soft reset), * 0x01 (Hard reset) * patch_enable : 0x00 (Do not enable), * 0x01 (Enable) * ddc_reload : 0x00 (Do not reload), * 0x01 (Reload) * boot_option: 0x00 (Current image), * 0x01 (Specified boot address) * boot_param: Boot address * */ params.reset_type = 0x01; params.patch_enable = 0x01; params.ddc_reload = 0x01; params.boot_option = 0x00; params.boot_param = cpu_to_le32(0x00000000); skb = __hci_cmd_sync(hdev, 0xfc01, sizeof(params), &params, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { bt_dev_err(hdev, "FW download error recovery failed (%ld)", PTR_ERR(skb)); return; } bt_dev_info(hdev, "Intel reset sent to retry FW download"); kfree_skb(skb); /* Current Intel BT controllers(ThP/JfP) hold the USB reset * lines for 2ms when it receives Intel Reset in bootloader mode. * Whereas, the upcoming Intel BT controllers will hold USB reset * for 150ms. To keep the delay generic, 150ms is chosen here. */ msleep(150); } static int btintel_read_debug_features(struct hci_dev *hdev, struct intel_debug_features *features) { struct sk_buff *skb; u8 page_no = 1; /* Intel controller supports two pages, each page is of 128-bit * feature bit mask. And each bit defines specific feature support */ skb = __hci_cmd_sync(hdev, 0xfca6, sizeof(page_no), &page_no, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { bt_dev_err(hdev, "Reading supported features failed (%ld)", PTR_ERR(skb)); return PTR_ERR(skb); } if (skb->len != (sizeof(features->page1) + 3)) { bt_dev_err(hdev, "Supported features event size mismatch"); kfree_skb(skb); return -EILSEQ; } memcpy(features->page1, skb->data + 3, sizeof(features->page1)); /* Read the supported features page2 if required in future. */ kfree_skb(skb); return 0; } static acpi_status btintel_ppag_callback(acpi_handle handle, u32 lvl, void *data, void **ret) { acpi_status status; size_t len; struct btintel_ppag *ppag = data; union acpi_object *p, *elements; struct acpi_buffer string = {ACPI_ALLOCATE_BUFFER, NULL}; struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL}; struct hci_dev *hdev = ppag->hdev; status = acpi_get_name(handle, ACPI_FULL_PATHNAME, &string); if (ACPI_FAILURE(status)) { bt_dev_warn(hdev, "PPAG-BT: ACPI Failure: %s", acpi_format_exception(status)); return status; } len = strlen(string.pointer); if (len < strlen(BTINTEL_PPAG_NAME)) { kfree(string.pointer); return AE_OK; } if (strncmp((char *)string.pointer + len - 4, BTINTEL_PPAG_NAME, 4)) { kfree(string.pointer); return AE_OK; } kfree(string.pointer); status = acpi_evaluate_object(handle, NULL, NULL, &buffer); if (ACPI_FAILURE(status)) { ppag->status = status; bt_dev_warn(hdev, "PPAG-BT: ACPI Failure: %s", acpi_format_exception(status)); return status; } p = buffer.pointer; ppag = (struct btintel_ppag *)data; if (p->type != ACPI_TYPE_PACKAGE || p->package.count != 2) { kfree(buffer.pointer); bt_dev_warn(hdev, "PPAG-BT: Invalid object type: %d or package count: %d", p->type, p->package.count); ppag->status = AE_ERROR; return AE_ERROR; } elements = p->package.elements; /* PPAG table is located at element[1] */ p = &elements[1]; ppag->domain = (u32)p->package.elements[0].integer.value; ppag->mode = (u32)p->package.elements[1].integer.value; ppag->status = AE_OK; kfree(buffer.pointer); return AE_CTRL_TERMINATE; } static int btintel_set_debug_features(struct hci_dev *hdev, const struct intel_debug_features *features) { u8 mask[11] = { 0x0a, 0x92, 0x02, 0x7f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; u8 period[5] = { 0x04, 0x91, 0x02, 0x05, 0x00 }; u8 trace_enable = 0x02; struct sk_buff *skb; if (!features) { bt_dev_warn(hdev, "Debug features not read"); return -EINVAL; } if (!(features->page1[0] & 0x3f)) { bt_dev_info(hdev, "Telemetry exception format not supported"); return 0; } skb = __hci_cmd_sync(hdev, 0xfc8b, 11, mask, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { bt_dev_err(hdev, "Setting Intel telemetry ddc write event mask failed (%ld)", PTR_ERR(skb)); return PTR_ERR(skb); } kfree_skb(skb); skb = __hci_cmd_sync(hdev, 0xfc8b, 5, period, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { bt_dev_err(hdev, "Setting periodicity for link statistics traces failed (%ld)", PTR_ERR(skb)); return PTR_ERR(skb); } kfree_skb(skb); skb = __hci_cmd_sync(hdev, 0xfca1, 1, &trace_enable, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { bt_dev_err(hdev, "Enable tracing of link statistics events failed (%ld)", PTR_ERR(skb)); return PTR_ERR(skb); } kfree_skb(skb); bt_dev_info(hdev, "set debug features: trace_enable 0x%02x mask 0x%02x", trace_enable, mask[3]); return 0; } static int btintel_reset_debug_features(struct hci_dev *hdev, const struct intel_debug_features *features) { u8 mask[11] = { 0x0a, 0x92, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 }; u8 trace_enable = 0x00; struct sk_buff *skb; if (!features) { bt_dev_warn(hdev, "Debug features not read"); return -EINVAL; } if (!(features->page1[0] & 0x3f)) { bt_dev_info(hdev, "Telemetry exception format not supported"); return 0; } /* Should stop the trace before writing ddc event mask. */ skb = __hci_cmd_sync(hdev, 0xfca1, 1, &trace_enable, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { bt_dev_err(hdev, "Stop tracing of link statistics events failed (%ld)", PTR_ERR(skb)); return PTR_ERR(skb); } kfree_skb(skb); skb = __hci_cmd_sync(hdev, 0xfc8b, 11, mask, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { bt_dev_err(hdev, "Setting Intel telemetry ddc write event mask failed (%ld)", PTR_ERR(skb)); return PTR_ERR(skb); } kfree_skb(skb); bt_dev_info(hdev, "reset debug features: trace_enable 0x%02x mask 0x%02x", trace_enable, mask[3]); return 0; } int btintel_set_quality_report(struct hci_dev *hdev, bool enable) { struct intel_debug_features features; int err; bt_dev_dbg(hdev, "enable %d", enable); /* Read the Intel supported features and if new exception formats * supported, need to load the additional DDC config to enable. */ err = btintel_read_debug_features(hdev, &features); if (err) return err; /* Set or reset the debug features. */ if (enable) err = btintel_set_debug_features(hdev, &features); else err = btintel_reset_debug_features(hdev, &features); return err; } EXPORT_SYMBOL_GPL(btintel_set_quality_report); static void btintel_coredump(struct hci_dev *hdev) { struct sk_buff *skb; skb = __hci_cmd_sync(hdev, 0xfc4e, 0, NULL, HCI_CMD_TIMEOUT); if (IS_ERR(skb)) { bt_dev_err(hdev, "Coredump failed (%ld)", PTR_ERR(skb)); return; } kfree_skb(skb); } static void btintel_dmp_hdr(struct hci_dev *hdev, struct sk_buff *skb) { char buf[80]; snprintf(buf, sizeof(buf), "Controller Name: 0x%X\n", coredump_info.hw_variant); skb_put_data(skb, buf, strlen(buf)); snprintf(buf, sizeof(buf), "Firmware Version: 0x%X\n", coredump_info.fw_build_num); skb_put_data(skb, buf, strlen(buf)); snprintf(buf, sizeof(buf), "Driver: %s\n", coredump_info.driver_name); skb_put_data(skb, buf, strlen(buf)); snprintf(buf, sizeof(buf), "Vendor: Intel\n"); skb_put_data(skb, buf, strlen(buf)); } static int btintel_register_devcoredump_support(struct hci_dev *hdev) { struct intel_debug_features features; int err; err = btintel_read_debug_features(hdev, &features); if (err) { bt_dev_info(hdev, "Error reading debug features"); return err; } if (!(features.page1[0] & 0x3f)) { bt_dev_dbg(hdev, "Telemetry exception format not supported"); return -EOPNOTSUPP; } hci_devcd_register(hdev, btintel_coredump, btintel_dmp_hdr, NULL); return err; } static const struct firmware *btintel_legacy_rom_get_fw(struct hci_dev *hdev, struct intel_version *ver) { const struct firmware *fw; char fwname[64]; int ret; snprintf(fwname, sizeof(fwname), "intel/ibt-hw-%x.%x.%x-fw-%x.%x.%x.%x.%x.bseq", ver->hw_platform, ver->hw_variant, ver->hw_revision, ver->fw_variant, ver->fw_revision, ver->fw_build_num, ver->fw_build_ww, ver->fw_build_yy); ret = request_firmware(&fw, fwname, &hdev->dev); if (ret < 0) { if (ret == -EINVAL) { bt_dev_err(hdev, "Intel firmware file request failed (%d)", ret); return NULL; } bt_dev_err(hdev, "failed to open Intel firmware file: %s (%d)", fwname, ret); /* If the correct firmware patch file is not found, use the * default firmware patch file instead */ snprintf(fwname, sizeof(fwname), "intel/ibt-hw-%x.%x.bseq", ver->hw_platform, ver->hw_variant); if (request_firmware(&fw, fwname, &hdev->dev) < 0) { bt_dev_err(hdev, "failed to open default fw file: %s", fwname); return NULL; } } bt_dev_info(hdev, "Intel Bluetooth firmware file: %s", fwname); return fw; } static int btintel_legacy_rom_patching(struct hci_dev *hdev, const struct firmware *fw, const u8 **fw_ptr, int *disable_patch) { struct sk_buff *skb; struct hci_command_hdr *cmd; const u8 *cmd_param; struct hci_event_hdr *evt = NULL; const u8 *evt_param = NULL; int remain = fw->size - (*fw_ptr - fw->data); /* The first byte indicates the types of the patch command or event. * 0x01 means HCI command and 0x02 is HCI event. If the first bytes * in the current firmware buffer doesn't start with 0x01 or * the size of remain buffer is smaller than HCI command header, * the firmware file is corrupted and it should stop the patching * process. */ if (remain > HCI_COMMAND_HDR_SIZE && *fw_ptr[0] != 0x01) { bt_dev_err(hdev, "Intel fw corrupted: invalid cmd read"); return -EINVAL; } (*fw_ptr)++; remain--; cmd = (struct hci_command_hdr *)(*fw_ptr); *fw_ptr += sizeof(*cmd); remain -= sizeof(*cmd); /* Ensure that the remain firmware data is long enough than the length * of command parameter. If not, the firmware file is corrupted. */ if (remain < cmd->plen) { bt_dev_err(hdev, "Intel fw corrupted: invalid cmd len"); return -EFAULT; } /* If there is a command that loads a patch in the firmware * file, then enable the patch upon success, otherwise just * disable the manufacturer mode, for example patch activation * is not required when the default firmware patch file is used * because there are no patch data to load. */ if (*disable_patch && le16_to_cpu(cmd->opcode) == 0xfc8e) *disable_patch = 0; cmd_param = *fw_ptr; *fw_ptr += cmd->plen; remain -= cmd->plen; /* This reads the expected events when the above command is sent to the * device. Some vendor commands expects more than one events, for * example command status event followed by vendor specific event. * For this case, it only keeps the last expected event. so the command * can be sent with __hci_cmd_sync_ev() which returns the sk_buff of * last expected event. */ while (remain > HCI_EVENT_HDR_SIZE && *fw_ptr[0] == 0x02) { (*fw_ptr)++; remain--; evt = (struct hci_event_hdr *)(*fw_ptr); *fw_ptr += sizeof(*evt); remain -= sizeof(*evt); if (remain < evt->plen) { bt_dev_err(hdev, "Intel fw corrupted: invalid evt len"); return -EFAULT; } evt_param = *fw_ptr; *fw_ptr += evt->plen; remain -= evt->plen; } /* Every HCI commands in the firmware file has its correspond event. * If event is not found or remain is smaller than zero, the firmware * file is corrupted. */ if (!evt || !evt_param || remain < 0) { bt_dev_err(hdev, "Intel fw corrupted: invalid evt read"); return -EFAULT; } skb = __hci_cmd_sync_ev(hdev, le16_to_cpu(cmd->opcode), cmd->plen, cmd_param, evt->evt, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { bt_dev_err(hdev, "sending Intel patch command (0x%4.4x) failed (%ld)", cmd->opcode, PTR_ERR(skb)); return PTR_ERR(skb); } /* It ensures that the returned event matches the event data read from * the firmware file. At fist, it checks the length and then * the contents of the event. */ if (skb->len != evt->plen) { bt_dev_err(hdev, "mismatch event length (opcode 0x%4.4x)", le16_to_cpu(cmd->opcode)); kfree_skb(skb); return -EFAULT; } if (memcmp(skb->data, evt_param, evt->plen)) { bt_dev_err(hdev, "mismatch event parameter (opcode 0x%4.4x)", le16_to_cpu(cmd->opcode)); kfree_skb(skb); return -EFAULT; } kfree_skb(skb); return 0; } static int btintel_legacy_rom_setup(struct hci_dev *hdev, struct intel_version *ver) { const struct firmware *fw; const u8 *fw_ptr; int disable_patch, err; struct intel_version new_ver; BT_DBG("%s", hdev->name); /* fw_patch_num indicates the version of patch the device currently * have. If there is no patch data in the device, it is always 0x00. * So, if it is other than 0x00, no need to patch the device again. */ if (ver->fw_patch_num) { bt_dev_info(hdev, "Intel device is already patched. patch num: %02x", ver->fw_patch_num); goto complete; } /* Opens the firmware patch file based on the firmware version read * from the controller. If it fails to open the matching firmware * patch file, it tries to open the default firmware patch file. * If no patch file is found, allow the device to operate without * a patch. */ fw = btintel_legacy_rom_get_fw(hdev, ver); if (!fw) goto complete; fw_ptr = fw->data; /* Enable the manufacturer mode of the controller. * Only while this mode is enabled, the driver can download the * firmware patch data and configuration parameters. */ err = btintel_enter_mfg(hdev); if (err) { release_firmware(fw); return err; } disable_patch = 1; /* The firmware data file consists of list of Intel specific HCI * commands and its expected events. The first byte indicates the * type of the message, either HCI command or HCI event. * * It reads the command and its expected event from the firmware file, * and send to the controller. Once __hci_cmd_sync_ev() returns, * the returned event is compared with the event read from the firmware * file and it will continue until all the messages are downloaded to * the controller. * * Once the firmware patching is completed successfully, * the manufacturer mode is disabled with reset and activating the * downloaded patch. * * If the firmware patching fails, the manufacturer mode is * disabled with reset and deactivating the patch. * * If the default patch file is used, no reset is done when disabling * the manufacturer. */ while (fw->size > fw_ptr - fw->data) { int ret; ret = btintel_legacy_rom_patching(hdev, fw, &fw_ptr, &disable_patch); if (ret < 0) goto exit_mfg_deactivate; } release_firmware(fw); if (disable_patch) goto exit_mfg_disable; /* Patching completed successfully and disable the manufacturer mode * with reset and activate the downloaded firmware patches. */ err = btintel_exit_mfg(hdev, true, true); if (err) return err; /* Need build number for downloaded fw patches in * every power-on boot */ err = btintel_read_version(hdev, &new_ver); if (err) return err; bt_dev_info(hdev, "Intel BT fw patch 0x%02x completed & activated", new_ver.fw_patch_num); goto complete; exit_mfg_disable: /* Disable the manufacturer mode without reset */ err = btintel_exit_mfg(hdev, false, false); if (err) return err; bt_dev_info(hdev, "Intel firmware patch completed"); goto complete; exit_mfg_deactivate: release_firmware(fw); /* Patching failed. Disable the manufacturer mode with reset and * deactivate the downloaded firmware patches. */ err = btintel_exit_mfg(hdev, true, false); if (err) return err; bt_dev_info(hdev, "Intel firmware patch completed and deactivated"); complete: /* Set the event mask for Intel specific vendor events. This enables * a few extra events that are useful during general operation. */ btintel_set_event_mask_mfg(hdev, false); btintel_check_bdaddr(hdev); return 0; } static int btintel_download_wait(struct hci_dev *hdev, ktime_t calltime, int msec) { ktime_t delta, rettime; unsigned long long duration; int err; btintel_set_flag(hdev, INTEL_FIRMWARE_LOADED); bt_dev_info(hdev, "Waiting for firmware download to complete"); err = btintel_wait_on_flag_timeout(hdev, INTEL_DOWNLOADING, TASK_INTERRUPTIBLE, msecs_to_jiffies(msec)); if (err == -EINTR) { bt_dev_err(hdev, "Firmware loading interrupted"); return err; } if (err) { bt_dev_err(hdev, "Firmware loading timeout"); return -ETIMEDOUT; } if (btintel_test_flag(hdev, INTEL_FIRMWARE_FAILED)) { bt_dev_err(hdev, "Firmware loading failed"); return -ENOEXEC; } rettime = ktime_get(); delta = ktime_sub(rettime, calltime); duration = (unsigned long long)ktime_to_ns(delta) >> 10; bt_dev_info(hdev, "Firmware loaded in %llu usecs", duration); return 0; } static int btintel_boot_wait(struct hci_dev *hdev, ktime_t calltime, int msec) { ktime_t delta, rettime; unsigned long long duration; int err; bt_dev_info(hdev, "Waiting for device to boot"); err = btintel_wait_on_flag_timeout(hdev, INTEL_BOOTING, TASK_INTERRUPTIBLE, msecs_to_jiffies(msec)); if (err == -EINTR) { bt_dev_err(hdev, "Device boot interrupted"); return -EINTR; } if (err) { bt_dev_err(hdev, "Device boot timeout"); return -ETIMEDOUT; } rettime = ktime_get(); delta = ktime_sub(rettime, calltime); duration = (unsigned long long) ktime_to_ns(delta) >> 10; bt_dev_info(hdev, "Device booted in %llu usecs", duration); return 0; } static int btintel_boot(struct hci_dev *hdev, u32 boot_addr) { ktime_t calltime; int err; calltime = ktime_get(); btintel_set_flag(hdev, INTEL_BOOTING); err = btintel_send_intel_reset(hdev, boot_addr); if (err) { bt_dev_err(hdev, "Intel Soft Reset failed (%d)", err); btintel_reset_to_bootloader(hdev); return err; } /* The bootloader will not indicate when the device is ready. This * is done by the operational firmware sending bootup notification. * * Booting into operational firmware should not take longer than * 1 second. However if that happens, then just fail the setup * since something went wrong. */ err = btintel_boot_wait(hdev, calltime, 1000); if (err == -ETIMEDOUT) btintel_reset_to_bootloader(hdev); return err; } static int btintel_get_fw_name(struct intel_version *ver, struct intel_boot_params *params, char *fw_name, size_t len, const char *suffix) { switch (ver->hw_variant) { case 0x0b: /* SfP */ case 0x0c: /* WsP */ snprintf(fw_name, len, "intel/ibt-%u-%u.%s", ver->hw_variant, le16_to_cpu(params->dev_revid), suffix); break; case 0x11: /* JfP */ case 0x12: /* ThP */ case 0x13: /* HrP */ case 0x14: /* CcP */ snprintf(fw_name, len, "intel/ibt-%u-%u-%u.%s", ver->hw_variant, ver->hw_revision, ver->fw_revision, suffix); break; default: return -EINVAL; } return 0; } static int btintel_download_fw(struct hci_dev *hdev, struct intel_version *ver, struct intel_boot_params *params, u32 *boot_param) { const struct firmware *fw; char fwname[64]; int err; ktime_t calltime; if (!ver || !params) return -EINVAL; /* The firmware variant determines if the device is in bootloader * mode or is running operational firmware. The value 0x06 identifies * the bootloader and the value 0x23 identifies the operational * firmware. * * When the operational firmware is already present, then only * the check for valid Bluetooth device address is needed. This * determines if the device will be added as configured or * unconfigured controller. * * It is not possible to use the Secure Boot Parameters in this * case since that command is only available in bootloader mode. */ if (ver->fw_variant == 0x23) { btintel_clear_flag(hdev, INTEL_BOOTLOADER); btintel_check_bdaddr(hdev); /* SfP and WsP don't seem to update the firmware version on file * so version checking is currently possible. */ switch (ver->hw_variant) { case 0x0b: /* SfP */ case 0x0c: /* WsP */ return 0; } /* Proceed to download to check if the version matches */ goto download; } /* Read the secure boot parameters to identify the operating * details of the bootloader. */ err = btintel_read_boot_params(hdev, params); if (err) return err; /* It is required that every single firmware fragment is acknowledged * with a command complete event. If the boot parameters indicate * that this bootloader does not send them, then abort the setup. */ if (params->limited_cce != 0x00) { bt_dev_err(hdev, "Unsupported Intel firmware loading method (%u)", params->limited_cce); return -EINVAL; } /* If the OTP has no valid Bluetooth device address, then there will * also be no valid address for the operational firmware. */ if (!bacmp(&params->otp_bdaddr, BDADDR_ANY)) { bt_dev_info(hdev, "No device address configured"); set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks); } download: /* With this Intel bootloader only the hardware variant and device * revision information are used to select the right firmware for SfP * and WsP. * * The firmware filename is ibt-<hw_variant>-<dev_revid>.sfi. * * Currently the supported hardware variants are: * 11 (0x0b) for iBT3.0 (LnP/SfP) * 12 (0x0c) for iBT3.5 (WsP) * * For ThP/JfP and for future SKU's, the FW name varies based on HW * variant, HW revision and FW revision, as these are dependent on CNVi * and RF Combination. * * 17 (0x11) for iBT3.5 (JfP) * 18 (0x12) for iBT3.5 (ThP) * * The firmware file name for these will be * ibt-<hw_variant>-<hw_revision>-<fw_revision>.sfi. * */ err = btintel_get_fw_name(ver, params, fwname, sizeof(fwname), "sfi"); if (err < 0) { if (!btintel_test_flag(hdev, INTEL_BOOTLOADER)) { /* Firmware has already been loaded */ btintel_set_flag(hdev, INTEL_FIRMWARE_LOADED); return 0; } bt_dev_err(hdev, "Unsupported Intel firmware naming"); return -EINVAL; } err = firmware_request_nowarn(&fw, fwname, &hdev->dev); if (err < 0) { if (!btintel_test_flag(hdev, INTEL_BOOTLOADER)) { /* Firmware has already been loaded */ btintel_set_flag(hdev, INTEL_FIRMWARE_LOADED); return 0; } bt_dev_err(hdev, "Failed to load Intel firmware file %s (%d)", fwname, err); return err; } bt_dev_info(hdev, "Found device firmware: %s", fwname); if (fw->size < 644) { bt_dev_err(hdev, "Invalid size of firmware file (%zu)", fw->size); err = -EBADF; goto done; } calltime = ktime_get(); btintel_set_flag(hdev, INTEL_DOWNLOADING); /* Start firmware downloading and get boot parameter */ err = btintel_download_firmware(hdev, ver, fw, boot_param); if (err < 0) { if (err == -EALREADY) { /* Firmware has already been loaded */ btintel_set_flag(hdev, INTEL_FIRMWARE_LOADED); err = 0; goto done; } /* When FW download fails, send Intel Reset to retry * FW download. */ btintel_reset_to_bootloader(hdev); goto done; } /* Before switching the device into operational mode and with that * booting the loaded firmware, wait for the bootloader notification * that all fragments have been successfully received. * * When the event processing receives the notification, then the * INTEL_DOWNLOADING flag will be cleared. * * The firmware loading should not take longer than 5 seconds * and thus just timeout if that happens and fail the setup * of this device. */ err = btintel_download_wait(hdev, calltime, 5000); if (err == -ETIMEDOUT) btintel_reset_to_bootloader(hdev); done: release_firmware(fw); return err; } static int btintel_bootloader_setup(struct hci_dev *hdev, struct intel_version *ver) { struct intel_version new_ver; struct intel_boot_params params; u32 boot_param; char ddcname[64]; int err; BT_DBG("%s", hdev->name); /* Set the default boot parameter to 0x0 and it is updated to * SKU specific boot parameter after reading Intel_Write_Boot_Params * command while downloading the firmware. */ boot_param = 0x00000000; btintel_set_flag(hdev, INTEL_BOOTLOADER); err = btintel_download_fw(hdev, ver, &params, &boot_param); if (err) return err; /* controller is already having an operational firmware */ if (ver->fw_variant == 0x23) goto finish; err = btintel_boot(hdev, boot_param); if (err) return err; btintel_clear_flag(hdev, INTEL_BOOTLOADER); err = btintel_get_fw_name(ver, &params, ddcname, sizeof(ddcname), "ddc"); if (err < 0) { bt_dev_err(hdev, "Unsupported Intel firmware naming"); } else { /* Once the device is running in operational mode, it needs to * apply the device configuration (DDC) parameters. * * The device can work without DDC parameters, so even if it * fails to load the file, no need to fail the setup. */ btintel_load_ddc_config(hdev, ddcname); } hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT); /* Read the Intel version information after loading the FW */ err = btintel_read_version(hdev, &new_ver); if (err) return err; btintel_version_info(hdev, &new_ver); finish: /* Set the event mask for Intel specific vendor events. This enables * a few extra events that are useful during general operation. It * does not enable any debugging related events. * * The device will function correctly without these events enabled * and thus no need to fail the setup. */ btintel_set_event_mask(hdev, false); return 0; } static void btintel_get_fw_name_tlv(const struct intel_version_tlv *ver, char *fw_name, size_t len, const char *suffix) { /* The firmware file name for new generation controllers will be * ibt-<cnvi_top type+cnvi_top step>-<cnvr_top type+cnvr_top step> */ snprintf(fw_name, len, "intel/ibt-%04x-%04x.%s", INTEL_CNVX_TOP_PACK_SWAB(INTEL_CNVX_TOP_TYPE(ver->cnvi_top), INTEL_CNVX_TOP_STEP(ver->cnvi_top)), INTEL_CNVX_TOP_PACK_SWAB(INTEL_CNVX_TOP_TYPE(ver->cnvr_top), INTEL_CNVX_TOP_STEP(ver->cnvr_top)), suffix); } static int btintel_prepare_fw_download_tlv(struct hci_dev *hdev, struct intel_version_tlv *ver, u32 *boot_param) { const struct firmware *fw; char fwname[64]; int err; ktime_t calltime; if (!ver || !boot_param) return -EINVAL; /* The firmware variant determines if the device is in bootloader * mode or is running operational firmware. The value 0x03 identifies * the bootloader and the value 0x23 identifies the operational * firmware. * * When the operational firmware is already present, then only * the check for valid Bluetooth device address is needed. This * determines if the device will be added as configured or * unconfigured controller. * * It is not possible to use the Secure Boot Parameters in this * case since that command is only available in bootloader mode. */ if (ver->img_type == 0x03) { btintel_clear_flag(hdev, INTEL_BOOTLOADER); btintel_check_bdaddr(hdev); } else { /* * Check for valid bd address in boot loader mode. Device * will be marked as unconfigured if empty bd address is * found. */ if (!bacmp(&ver->otp_bd_addr, BDADDR_ANY)) { bt_dev_info(hdev, "No device address configured"); set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks); } } btintel_get_fw_name_tlv(ver, fwname, sizeof(fwname), "sfi"); err = firmware_request_nowarn(&fw, fwname, &hdev->dev); if (err < 0) { if (!btintel_test_flag(hdev, INTEL_BOOTLOADER)) { /* Firmware has already been loaded */ btintel_set_flag(hdev, INTEL_FIRMWARE_LOADED); return 0; } bt_dev_err(hdev, "Failed to load Intel firmware file %s (%d)", fwname, err); return err; } bt_dev_info(hdev, "Found device firmware: %s", fwname); if (fw->size < 644) { bt_dev_err(hdev, "Invalid size of firmware file (%zu)", fw->size); err = -EBADF; goto done; } calltime = ktime_get(); btintel_set_flag(hdev, INTEL_DOWNLOADING); /* Start firmware downloading and get boot parameter */ err = btintel_download_fw_tlv(hdev, ver, fw, boot_param, INTEL_HW_VARIANT(ver->cnvi_bt), ver->sbe_type); if (err < 0) { if (err == -EALREADY) { /* Firmware has already been loaded */ btintel_set_flag(hdev, INTEL_FIRMWARE_LOADED); err = 0; goto done; } /* When FW download fails, send Intel Reset to retry * FW download. */ btintel_reset_to_bootloader(hdev); goto done; } /* Before switching the device into operational mode and with that * booting the loaded firmware, wait for the bootloader notification * that all fragments have been successfully received. * * When the event processing receives the notification, then the * BTUSB_DOWNLOADING flag will be cleared. * * The firmware loading should not take longer than 5 seconds * and thus just timeout if that happens and fail the setup * of this device. */ err = btintel_download_wait(hdev, calltime, 5000); if (err == -ETIMEDOUT) btintel_reset_to_bootloader(hdev); done: release_firmware(fw); return err; } static int btintel_get_codec_config_data(struct hci_dev *hdev, __u8 link, struct bt_codec *codec, __u8 *ven_len, __u8 **ven_data) { int err = 0; if (!ven_data || !ven_len) return -EINVAL; *ven_len = 0; *ven_data = NULL; if (link != ESCO_LINK) { bt_dev_err(hdev, "Invalid link type(%u)", link); return -EINVAL; } *ven_data = kmalloc(sizeof(__u8), GFP_KERNEL); if (!*ven_data) { err = -ENOMEM; goto error; } /* supports only CVSD and mSBC offload codecs */ switch (codec->id) { case 0x02: **ven_data = 0x00; break; case 0x05: **ven_data = 0x01; break; default: err = -EINVAL; bt_dev_err(hdev, "Invalid codec id(%u)", codec->id); goto error; } /* codec and its capabilities are pre-defined to ids * preset id = 0x00 represents CVSD codec with sampling rate 8K * preset id = 0x01 represents mSBC codec with sampling rate 16K */ *ven_len = sizeof(__u8); return err; error: kfree(*ven_data); *ven_data = NULL; return err; } static int btintel_get_data_path_id(struct hci_dev *hdev, __u8 *data_path_id) { /* Intel uses 1 as data path id for all the usecases */ *data_path_id = 1; return 0; } static int btintel_configure_offload(struct hci_dev *hdev) { struct sk_buff *skb; int err = 0; struct intel_offload_use_cases *use_cases; skb = __hci_cmd_sync(hdev, 0xfc86, 0, NULL, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { bt_dev_err(hdev, "Reading offload use cases failed (%ld)", PTR_ERR(skb)); return PTR_ERR(skb); } if (skb->len < sizeof(*use_cases)) { err = -EIO; goto error; } use_cases = (void *)skb->data; if (use_cases->status) { err = -bt_to_errno(skb->data[0]); goto error; } if (use_cases->preset[0] & 0x03) { hdev->get_data_path_id = btintel_get_data_path_id; hdev->get_codec_config_data = btintel_get_codec_config_data; } error: kfree_skb(skb); return err; } static void btintel_set_ppag(struct hci_dev *hdev, struct intel_version_tlv *ver) { struct btintel_ppag ppag; struct sk_buff *skb; struct hci_ppag_enable_cmd ppag_cmd; acpi_handle handle; /* PPAG is not supported if CRF is HrP2, Jfp2, JfP1 */ switch (ver->cnvr_top & 0xFFF) { case 0x504: /* Hrp2 */ case 0x202: /* Jfp2 */ case 0x201: /* Jfp1 */ bt_dev_dbg(hdev, "PPAG not supported for Intel CNVr (0x%3x)", ver->cnvr_top & 0xFFF); return; } handle = ACPI_HANDLE(GET_HCIDEV_DEV(hdev)); if (!handle) { bt_dev_info(hdev, "No support for BT device in ACPI firmware"); return; } memset(&ppag, 0, sizeof(ppag)); ppag.hdev = hdev; ppag.status = AE_NOT_FOUND; acpi_walk_namespace(ACPI_TYPE_PACKAGE, handle, 1, NULL, btintel_ppag_callback, &ppag, NULL); if (ACPI_FAILURE(ppag.status)) { if (ppag.status == AE_NOT_FOUND) { bt_dev_dbg(hdev, "PPAG-BT: ACPI entry not found"); return; } return; } if (ppag.domain != 0x12) { bt_dev_dbg(hdev, "PPAG-BT: Bluetooth domain is disabled in ACPI firmware"); return; } /* PPAG mode * BIT 0 : 0 Disabled in EU * 1 Enabled in EU * BIT 1 : 0 Disabled in China * 1 Enabled in China */ if ((ppag.mode & 0x01) != BIT(0) && (ppag.mode & 0x02) != BIT(1)) { bt_dev_dbg(hdev, "PPAG-BT: EU, China mode are disabled in CB/BIOS"); return; } ppag_cmd.ppag_enable_flags = cpu_to_le32(ppag.mode); skb = __hci_cmd_sync(hdev, INTEL_OP_PPAG_CMD, sizeof(ppag_cmd), &ppag_cmd, HCI_CMD_TIMEOUT); if (IS_ERR(skb)) { bt_dev_warn(hdev, "Failed to send PPAG Enable (%ld)", PTR_ERR(skb)); return; } bt_dev_info(hdev, "PPAG-BT: Enabled (Mode %d)", ppag.mode); kfree_skb(skb); } static int btintel_acpi_reset_method(struct hci_dev *hdev) { int ret = 0; acpi_status status; union acpi_object *p, *ref; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; status = acpi_evaluate_object(ACPI_HANDLE(GET_HCIDEV_DEV(hdev)), "_PRR", NULL, &buffer); if (ACPI_FAILURE(status)) { bt_dev_err(hdev, "Failed to run _PRR method"); ret = -ENODEV; return ret; } p = buffer.pointer; if (p->package.count != 1 || p->type != ACPI_TYPE_PACKAGE) { bt_dev_err(hdev, "Invalid arguments"); ret = -EINVAL; goto exit_on_error; } ref = &p->package.elements[0]; if (ref->type != ACPI_TYPE_LOCAL_REFERENCE) { bt_dev_err(hdev, "Invalid object type: 0x%x", ref->type); ret = -EINVAL; goto exit_on_error; } status = acpi_evaluate_object(ref->reference.handle, "_RST", NULL, NULL); if (ACPI_FAILURE(status)) { bt_dev_err(hdev, "Failed to run_RST method"); ret = -ENODEV; goto exit_on_error; } exit_on_error: kfree(buffer.pointer); return ret; } static void btintel_set_dsm_reset_method(struct hci_dev *hdev, struct intel_version_tlv *ver_tlv) { struct btintel_data *data = hci_get_priv(hdev); acpi_handle handle = ACPI_HANDLE(GET_HCIDEV_DEV(hdev)); u8 reset_payload[4] = {0x01, 0x00, 0x01, 0x00}; union acpi_object *obj, argv4; enum { RESET_TYPE_WDISABLE2, RESET_TYPE_VSEC }; handle = ACPI_HANDLE(GET_HCIDEV_DEV(hdev)); if (!handle) { bt_dev_dbg(hdev, "No support for bluetooth device in ACPI firmware"); return; } if (!acpi_has_method(handle, "_PRR")) { bt_dev_err(hdev, "No support for _PRR ACPI method"); return; } switch (ver_tlv->cnvi_top & 0xfff) { case 0x910: /* GalePeak2 */ reset_payload[2] = RESET_TYPE_VSEC; break; default: /* WDISABLE2 is the default reset method */ reset_payload[2] = RESET_TYPE_WDISABLE2; if (!acpi_check_dsm(handle, &btintel_guid_dsm, 0, BIT(DSM_SET_WDISABLE2_DELAY))) { bt_dev_err(hdev, "No dsm support to set reset delay"); return; } argv4.integer.type = ACPI_TYPE_INTEGER; /* delay required to toggle BT power */ argv4.integer.value = 160; obj = acpi_evaluate_dsm(handle, &btintel_guid_dsm, 0, DSM_SET_WDISABLE2_DELAY, &argv4); if (!obj) { bt_dev_err(hdev, "Failed to call dsm to set reset delay"); return; } ACPI_FREE(obj); } bt_dev_info(hdev, "DSM reset method type: 0x%02x", reset_payload[2]); if (!acpi_check_dsm(handle, &btintel_guid_dsm, 0, DSM_SET_RESET_METHOD)) { bt_dev_warn(hdev, "No support for dsm to set reset method"); return; } argv4.buffer.type = ACPI_TYPE_BUFFER; argv4.buffer.length = sizeof(reset_payload); argv4.buffer.pointer = reset_payload; obj = acpi_evaluate_dsm(handle, &btintel_guid_dsm, 0, DSM_SET_RESET_METHOD, &argv4); if (!obj) { bt_dev_err(hdev, "Failed to call dsm to set reset method"); return; } ACPI_FREE(obj); data->acpi_reset_method = btintel_acpi_reset_method; } static int btintel_bootloader_setup_tlv(struct hci_dev *hdev, struct intel_version_tlv *ver) { u32 boot_param; char ddcname[64]; int err; struct intel_version_tlv new_ver; bt_dev_dbg(hdev, ""); /* Set the default boot parameter to 0x0 and it is updated to * SKU specific boot parameter after reading Intel_Write_Boot_Params * command while downloading the firmware. */ boot_param = 0x00000000; btintel_set_flag(hdev, INTEL_BOOTLOADER); err = btintel_prepare_fw_download_tlv(hdev, ver, &boot_param); if (err) return err; /* check if controller is already having an operational firmware */ if (ver->img_type == 0x03) goto finish; err = btintel_boot(hdev, boot_param); if (err) return err; btintel_clear_flag(hdev, INTEL_BOOTLOADER); btintel_get_fw_name_tlv(ver, ddcname, sizeof(ddcname), "ddc"); /* Once the device is running in operational mode, it needs to * apply the device configuration (DDC) parameters. * * The device can work without DDC parameters, so even if it * fails to load the file, no need to fail the setup. */ btintel_load_ddc_config(hdev, ddcname); /* Read supported use cases and set callbacks to fetch datapath id */ btintel_configure_offload(hdev); hci_dev_clear_flag(hdev, HCI_QUALITY_REPORT); /* Set PPAG feature */ btintel_set_ppag(hdev, ver); /* Read the Intel version information after loading the FW */ err = btintel_read_version_tlv(hdev, &new_ver); if (err) return err; btintel_version_info_tlv(hdev, &new_ver); finish: /* Set the event mask for Intel specific vendor events. This enables * a few extra events that are useful during general operation. It * does not enable any debugging related events. * * The device will function correctly without these events enabled * and thus no need to fail the setup. */ btintel_set_event_mask(hdev, false); return 0; } static void btintel_set_msft_opcode(struct hci_dev *hdev, u8 hw_variant) { switch (hw_variant) { /* Legacy bootloader devices that supports MSFT Extension */ case 0x11: /* JfP */ case 0x12: /* ThP */ case 0x13: /* HrP */ case 0x14: /* CcP */ /* All Intel new genration controllers support the Microsoft vendor * extension are using 0xFC1E for VsMsftOpCode. */ case 0x17: case 0x18: case 0x19: case 0x1b: case 0x1c: hci_set_msft_opcode(hdev, 0xFC1E); break; default: /* Not supported */ break; } } static int btintel_setup_combined(struct hci_dev *hdev) { const u8 param[1] = { 0xFF }; struct intel_version ver; struct intel_version_tlv ver_tlv; struct sk_buff *skb; int err; BT_DBG("%s", hdev->name); /* The some controllers have a bug with the first HCI command sent to it * returning number of completed commands as zero. This would stall the * command processing in the Bluetooth core. * * As a workaround, send HCI Reset command first which will reset the * number of completed commands and allow normal command processing * from now on. * * Regarding the INTEL_BROKEN_SHUTDOWN_LED flag, these devices maybe * in the SW_RFKILL ON state as a workaround of fixing LED issue during * the shutdown() procedure, and once the device is in SW_RFKILL ON * state, the only way to exit out of it is sending the HCI_Reset * command. */ if (btintel_test_flag(hdev, INTEL_BROKEN_INITIAL_NCMD) || btintel_test_flag(hdev, INTEL_BROKEN_SHUTDOWN_LED)) { skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { bt_dev_err(hdev, "sending initial HCI reset failed (%ld)", PTR_ERR(skb)); return PTR_ERR(skb); } kfree_skb(skb); } /* Starting from TyP device, the command parameter and response are * changed even though the OCF for HCI_Intel_Read_Version command * remains same. The legacy devices can handle even if the * command has a parameter and returns a correct version information. * So, it uses new format to support both legacy and new format. */ skb = __hci_cmd_sync(hdev, 0xfc05, 1, param, HCI_CMD_TIMEOUT); if (IS_ERR(skb)) { bt_dev_err(hdev, "Reading Intel version command failed (%ld)", PTR_ERR(skb)); return PTR_ERR(skb); } /* Check the status */ if (skb->data[0]) { bt_dev_err(hdev, "Intel Read Version command failed (%02x)", skb->data[0]); err = -EIO; goto exit_error; } /* Apply the common HCI quirks for Intel device */ set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks); set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks); set_bit(HCI_QUIRK_NON_PERSISTENT_DIAG, &hdev->quirks); /* Set up the quality report callback for Intel devices */ hdev->set_quality_report = btintel_set_quality_report; /* For Legacy device, check the HW platform value and size */ if (skb->len == sizeof(ver) && skb->data[1] == 0x37) { bt_dev_dbg(hdev, "Read the legacy Intel version information"); memcpy(&ver, skb->data, sizeof(ver)); /* Display version information */ btintel_version_info(hdev, &ver); /* Check for supported iBT hardware variants of this firmware * loading method. * * This check has been put in place to ensure correct forward * compatibility options when newer hardware variants come * along. */ switch (ver.hw_variant) { case 0x07: /* WP */ case 0x08: /* StP */ /* Legacy ROM product */ btintel_set_flag(hdev, INTEL_ROM_LEGACY); /* Apply the device specific HCI quirks * * WBS for SdP - For the Legacy ROM products, only SdP * supports the WBS. But the version information is not * enough to use here because the StP2 and SdP have same * hw_variant and fw_variant. So, this flag is set by * the transport driver (btusb) based on the HW info * (idProduct) */ if (!btintel_test_flag(hdev, INTEL_ROM_LEGACY_NO_WBS_SUPPORT)) set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks); if (ver.hw_variant == 0x08 && ver.fw_variant == 0x22) set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks); err = btintel_legacy_rom_setup(hdev, &ver); break; case 0x0b: /* SfP */ case 0x11: /* JfP */ case 0x12: /* ThP */ case 0x13: /* HrP */ case 0x14: /* CcP */ set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks); fallthrough; case 0x0c: /* WsP */ /* Apply the device specific HCI quirks * * All Legacy bootloader devices support WBS */ set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks); /* These variants don't seem to support LE Coded PHY */ set_bit(HCI_QUIRK_BROKEN_LE_CODED, &hdev->quirks); /* Setup MSFT Extension support */ btintel_set_msft_opcode(hdev, ver.hw_variant); err = btintel_bootloader_setup(hdev, &ver); btintel_register_devcoredump_support(hdev); break; default: bt_dev_err(hdev, "Unsupported Intel hw variant (%u)", ver.hw_variant); err = -EINVAL; } goto exit_error; } /* memset ver_tlv to start with clean state as few fields are exclusive * to bootloader mode and are not populated in operational mode */ memset(&ver_tlv, 0, sizeof(ver_tlv)); /* For TLV type device, parse the tlv data */ err = btintel_parse_version_tlv(hdev, &ver_tlv, skb); if (err) { bt_dev_err(hdev, "Failed to parse TLV version information"); goto exit_error; } if (INTEL_HW_PLATFORM(ver_tlv.cnvi_bt) != 0x37) { bt_dev_err(hdev, "Unsupported Intel hardware platform (0x%2x)", INTEL_HW_PLATFORM(ver_tlv.cnvi_bt)); err = -EINVAL; goto exit_error; } /* Check for supported iBT hardware variants of this firmware * loading method. * * This check has been put in place to ensure correct forward * compatibility options when newer hardware variants come * along. */ switch (INTEL_HW_VARIANT(ver_tlv.cnvi_bt)) { case 0x11: /* JfP */ case 0x12: /* ThP */ case 0x13: /* HrP */ case 0x14: /* CcP */ /* Some legacy bootloader devices starting from JfP, * the operational firmware supports both old and TLV based * HCI_Intel_Read_Version command based on the command * parameter. * * For upgrading firmware case, the TLV based version cannot * be used because the firmware filename for legacy bootloader * is based on the old format. * * Also, it is not easy to convert TLV based version from the * legacy version format. * * So, as a workaround for those devices, use the legacy * HCI_Intel_Read_Version to get the version information and * run the legacy bootloader setup. */ err = btintel_read_version(hdev, &ver); if (err) break; /* Apply the device specific HCI quirks * * All Legacy bootloader devices support WBS */ set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks); /* These variants don't seem to support LE Coded PHY */ set_bit(HCI_QUIRK_BROKEN_LE_CODED, &hdev->quirks); /* Set Valid LE States quirk */ set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks); /* Setup MSFT Extension support */ btintel_set_msft_opcode(hdev, ver.hw_variant); err = btintel_bootloader_setup(hdev, &ver); btintel_register_devcoredump_support(hdev); break; case 0x17: case 0x18: case 0x19: case 0x1b: case 0x1c: /* Display version information of TLV type */ btintel_version_info_tlv(hdev, &ver_tlv); /* Apply the device specific HCI quirks for TLV based devices * * All TLV based devices support WBS */ set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks); /* Apply LE States quirk from solar onwards */ set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks); /* Setup MSFT Extension support */ btintel_set_msft_opcode(hdev, INTEL_HW_VARIANT(ver_tlv.cnvi_bt)); btintel_set_dsm_reset_method(hdev, &ver_tlv); err = btintel_bootloader_setup_tlv(hdev, &ver_tlv); btintel_register_devcoredump_support(hdev); break; default: bt_dev_err(hdev, "Unsupported Intel hw variant (%u)", INTEL_HW_VARIANT(ver_tlv.cnvi_bt)); err = -EINVAL; break; } exit_error: kfree_skb(skb); return err; } static int btintel_shutdown_combined(struct hci_dev *hdev) { struct sk_buff *skb; int ret; /* Send HCI Reset to the controller to stop any BT activity which * were triggered. This will help to save power and maintain the * sync b/w Host and controller */ skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { bt_dev_err(hdev, "HCI reset during shutdown failed"); return PTR_ERR(skb); } kfree_skb(skb); /* Some platforms have an issue with BT LED when the interface is * down or BT radio is turned off, which takes 5 seconds to BT LED * goes off. As a workaround, sends HCI_Intel_SW_RFKILL to put the * device in the RFKILL ON state which turns off the BT LED immediately. */ if (btintel_test_flag(hdev, INTEL_BROKEN_SHUTDOWN_LED)) { skb = __hci_cmd_sync(hdev, 0xfc3f, 0, NULL, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { ret = PTR_ERR(skb); bt_dev_err(hdev, "turning off Intel device LED failed"); return ret; } kfree_skb(skb); } return 0; } int btintel_configure_setup(struct hci_dev *hdev, const char *driver_name) { hdev->manufacturer = 2; hdev->setup = btintel_setup_combined; hdev->shutdown = btintel_shutdown_combined; hdev->hw_error = btintel_hw_error; hdev->set_diag = btintel_set_diag_combined; hdev->set_bdaddr = btintel_set_bdaddr; coredump_info.driver_name = driver_name; return 0; } EXPORT_SYMBOL_GPL(btintel_configure_setup); static int btintel_diagnostics(struct hci_dev *hdev, struct sk_buff *skb) { struct intel_tlv *tlv = (void *)&skb->data[5]; /* The first event is always an event type TLV */ if (tlv->type != INTEL_TLV_TYPE_ID) goto recv_frame; switch (tlv->val[0]) { case INTEL_TLV_SYSTEM_EXCEPTION: case INTEL_TLV_FATAL_EXCEPTION: case INTEL_TLV_DEBUG_EXCEPTION: case INTEL_TLV_TEST_EXCEPTION: /* Generate devcoredump from exception */ if (!hci_devcd_init(hdev, skb->len)) { hci_devcd_append(hdev, skb); hci_devcd_complete(hdev); } else { bt_dev_err(hdev, "Failed to generate devcoredump"); kfree_skb(skb); } return 0; default: bt_dev_err(hdev, "Invalid exception type %02X", tlv->val[0]); } recv_frame: return hci_recv_frame(hdev, skb); } int btintel_recv_event(struct hci_dev *hdev, struct sk_buff *skb) { struct hci_event_hdr *hdr = (void *)skb->data; const char diagnostics_hdr[] = { 0x87, 0x80, 0x03 }; if (skb->len > HCI_EVENT_HDR_SIZE && hdr->evt == 0xff && hdr->plen > 0) { const void *ptr = skb->data + HCI_EVENT_HDR_SIZE + 1; unsigned int len = skb->len - HCI_EVENT_HDR_SIZE - 1; if (btintel_test_flag(hdev, INTEL_BOOTLOADER)) { switch (skb->data[2]) { case 0x02: /* When switching to the operational firmware * the device sends a vendor specific event * indicating that the bootup completed. */ btintel_bootup(hdev, ptr, len); break; case 0x06: /* When the firmware loading completes the * device sends out a vendor specific event * indicating the result of the firmware * loading. */ btintel_secure_send_result(hdev, ptr, len); break; } } /* Handle all diagnostics events separately. May still call * hci_recv_frame. */ if (len >= sizeof(diagnostics_hdr) && memcmp(&skb->data[2], diagnostics_hdr, sizeof(diagnostics_hdr)) == 0) { return btintel_diagnostics(hdev, skb); } } return hci_recv_frame(hdev, skb); } EXPORT_SYMBOL_GPL(btintel_recv_event); void btintel_bootup(struct hci_dev *hdev, const void *ptr, unsigned int len) { const struct intel_bootup *evt = ptr; if (len != sizeof(*evt)) return; if (btintel_test_and_clear_flag(hdev, INTEL_BOOTING)) btintel_wake_up_flag(hdev, INTEL_BOOTING); } EXPORT_SYMBOL_GPL(btintel_bootup); void btintel_secure_send_result(struct hci_dev *hdev, const void *ptr, unsigned int len) { const struct intel_secure_send_result *evt = ptr; if (len != sizeof(*evt)) return; if (evt->result) btintel_set_flag(hdev, INTEL_FIRMWARE_FAILED); if (btintel_test_and_clear_flag(hdev, INTEL_DOWNLOADING) && btintel_test_flag(hdev, INTEL_FIRMWARE_LOADED)) btintel_wake_up_flag(hdev, INTEL_DOWNLOADING); } EXPORT_SYMBOL_GPL(btintel_secure_send_result); MODULE_AUTHOR("Marcel Holtmann <[email protected]>"); MODULE_DESCRIPTION("Bluetooth support for Intel devices ver " VERSION); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL"); MODULE_FIRMWARE("intel/ibt-11-5.sfi"); MODULE_FIRMWARE("intel/ibt-11-5.ddc"); MODULE_FIRMWARE("intel/ibt-12-16.sfi"); MODULE_FIRMWARE("intel/ibt-12-16.ddc");
linux-master
drivers/bluetooth/btintel.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Bluetooth support for Realtek devices * * Copyright (C) 2015 Endless Mobile, Inc. */ #include <linux/module.h> #include <linux/firmware.h> #include <asm/unaligned.h> #include <linux/usb.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include "btrtl.h" #define VERSION "0.1" #define RTL_CHIP_8723CS_CG 3 #define RTL_CHIP_8723CS_VF 4 #define RTL_CHIP_8723CS_XX 5 #define RTL_EPATCH_SIGNATURE "Realtech" #define RTL_EPATCH_SIGNATURE_V2 "RTBTCore" #define RTL_ROM_LMP_8703B 0x8703 #define RTL_ROM_LMP_8723A 0x1200 #define RTL_ROM_LMP_8723B 0x8723 #define RTL_ROM_LMP_8821A 0x8821 #define RTL_ROM_LMP_8761A 0x8761 #define RTL_ROM_LMP_8822B 0x8822 #define RTL_ROM_LMP_8852A 0x8852 #define RTL_ROM_LMP_8851B 0x8851 #define RTL_CONFIG_MAGIC 0x8723ab55 #define RTL_VSC_OP_COREDUMP 0xfcff #define IC_MATCH_FL_LMPSUBV (1 << 0) #define IC_MATCH_FL_HCIREV (1 << 1) #define IC_MATCH_FL_HCIVER (1 << 2) #define IC_MATCH_FL_HCIBUS (1 << 3) #define IC_MATCH_FL_CHIP_TYPE (1 << 4) #define IC_INFO(lmps, hcir, hciv, bus) \ .match_flags = IC_MATCH_FL_LMPSUBV | IC_MATCH_FL_HCIREV | \ IC_MATCH_FL_HCIVER | IC_MATCH_FL_HCIBUS, \ .lmp_subver = (lmps), \ .hci_rev = (hcir), \ .hci_ver = (hciv), \ .hci_bus = (bus) #define RTL_CHIP_SUBVER (&(struct rtl_vendor_cmd) {{0x10, 0x38, 0x04, 0x28, 0x80}}) #define RTL_CHIP_REV (&(struct rtl_vendor_cmd) {{0x10, 0x3A, 0x04, 0x28, 0x80}}) #define RTL_SEC_PROJ (&(struct rtl_vendor_cmd) {{0x10, 0xA4, 0x0D, 0x00, 0xb0}}) #define RTL_PATCH_SNIPPETS 0x01 #define RTL_PATCH_DUMMY_HEADER 0x02 #define RTL_PATCH_SECURITY_HEADER 0x03 enum btrtl_chip_id { CHIP_ID_8723A, CHIP_ID_8723B, CHIP_ID_8821A, CHIP_ID_8761A, CHIP_ID_8822B = 8, CHIP_ID_8723D, CHIP_ID_8821C, CHIP_ID_8822C = 13, CHIP_ID_8761B, CHIP_ID_8852A = 18, CHIP_ID_8852B = 20, CHIP_ID_8852C = 25, CHIP_ID_8851B = 36, }; struct id_table { __u16 match_flags; __u16 lmp_subver; __u16 hci_rev; __u8 hci_ver; __u8 hci_bus; __u8 chip_type; bool config_needed; bool has_rom_version; bool has_msft_ext; char *fw_name; char *cfg_name; char *hw_info; }; struct btrtl_device_info { const struct id_table *ic_info; u8 rom_version; u8 *fw_data; int fw_len; u8 *cfg_data; int cfg_len; bool drop_fw; int project_id; u8 key_id; struct list_head patch_subsecs; }; static const struct id_table ic_id_table[] = { /* 8723A */ { IC_INFO(RTL_ROM_LMP_8723A, 0xb, 0x6, HCI_USB), .config_needed = false, .has_rom_version = false, .fw_name = "rtl_bt/rtl8723a_fw", .cfg_name = NULL, .hw_info = "rtl8723au" }, /* 8723BS */ { IC_INFO(RTL_ROM_LMP_8723B, 0xb, 0x6, HCI_UART), .config_needed = true, .has_rom_version = true, .fw_name = "rtl_bt/rtl8723bs_fw", .cfg_name = "rtl_bt/rtl8723bs_config", .hw_info = "rtl8723bs" }, /* 8723B */ { IC_INFO(RTL_ROM_LMP_8723B, 0xb, 0x6, HCI_USB), .config_needed = false, .has_rom_version = true, .fw_name = "rtl_bt/rtl8723b_fw", .cfg_name = "rtl_bt/rtl8723b_config", .hw_info = "rtl8723bu" }, /* 8723CS-CG */ { .match_flags = IC_MATCH_FL_LMPSUBV | IC_MATCH_FL_CHIP_TYPE | IC_MATCH_FL_HCIBUS, .lmp_subver = RTL_ROM_LMP_8703B, .chip_type = RTL_CHIP_8723CS_CG, .hci_bus = HCI_UART, .config_needed = true, .has_rom_version = true, .fw_name = "rtl_bt/rtl8723cs_cg_fw", .cfg_name = "rtl_bt/rtl8723cs_cg_config", .hw_info = "rtl8723cs-cg" }, /* 8723CS-VF */ { .match_flags = IC_MATCH_FL_LMPSUBV | IC_MATCH_FL_CHIP_TYPE | IC_MATCH_FL_HCIBUS, .lmp_subver = RTL_ROM_LMP_8703B, .chip_type = RTL_CHIP_8723CS_VF, .hci_bus = HCI_UART, .config_needed = true, .has_rom_version = true, .fw_name = "rtl_bt/rtl8723cs_vf_fw", .cfg_name = "rtl_bt/rtl8723cs_vf_config", .hw_info = "rtl8723cs-vf" }, /* 8723CS-XX */ { .match_flags = IC_MATCH_FL_LMPSUBV | IC_MATCH_FL_CHIP_TYPE | IC_MATCH_FL_HCIBUS, .lmp_subver = RTL_ROM_LMP_8703B, .chip_type = RTL_CHIP_8723CS_XX, .hci_bus = HCI_UART, .config_needed = true, .has_rom_version = true, .fw_name = "rtl_bt/rtl8723cs_xx_fw", .cfg_name = "rtl_bt/rtl8723cs_xx_config", .hw_info = "rtl8723cs" }, /* 8723D */ { IC_INFO(RTL_ROM_LMP_8723B, 0xd, 0x8, HCI_USB), .config_needed = true, .has_rom_version = true, .fw_name = "rtl_bt/rtl8723d_fw", .cfg_name = "rtl_bt/rtl8723d_config", .hw_info = "rtl8723du" }, /* 8723DS */ { IC_INFO(RTL_ROM_LMP_8723B, 0xd, 0x8, HCI_UART), .config_needed = true, .has_rom_version = true, .fw_name = "rtl_bt/rtl8723ds_fw", .cfg_name = "rtl_bt/rtl8723ds_config", .hw_info = "rtl8723ds" }, /* 8821A */ { IC_INFO(RTL_ROM_LMP_8821A, 0xa, 0x6, HCI_USB), .config_needed = false, .has_rom_version = true, .fw_name = "rtl_bt/rtl8821a_fw", .cfg_name = "rtl_bt/rtl8821a_config", .hw_info = "rtl8821au" }, /* 8821C */ { IC_INFO(RTL_ROM_LMP_8821A, 0xc, 0x8, HCI_USB), .config_needed = false, .has_rom_version = true, .has_msft_ext = true, .fw_name = "rtl_bt/rtl8821c_fw", .cfg_name = "rtl_bt/rtl8821c_config", .hw_info = "rtl8821cu" }, /* 8821CS */ { IC_INFO(RTL_ROM_LMP_8821A, 0xc, 0x8, HCI_UART), .config_needed = true, .has_rom_version = true, .has_msft_ext = true, .fw_name = "rtl_bt/rtl8821cs_fw", .cfg_name = "rtl_bt/rtl8821cs_config", .hw_info = "rtl8821cs" }, /* 8761A */ { IC_INFO(RTL_ROM_LMP_8761A, 0xa, 0x6, HCI_USB), .config_needed = false, .has_rom_version = true, .fw_name = "rtl_bt/rtl8761a_fw", .cfg_name = "rtl_bt/rtl8761a_config", .hw_info = "rtl8761au" }, /* 8761B */ { IC_INFO(RTL_ROM_LMP_8761A, 0xb, 0xa, HCI_UART), .config_needed = false, .has_rom_version = true, .has_msft_ext = true, .fw_name = "rtl_bt/rtl8761b_fw", .cfg_name = "rtl_bt/rtl8761b_config", .hw_info = "rtl8761btv" }, /* 8761BU */ { IC_INFO(RTL_ROM_LMP_8761A, 0xb, 0xa, HCI_USB), .config_needed = false, .has_rom_version = true, .fw_name = "rtl_bt/rtl8761bu_fw", .cfg_name = "rtl_bt/rtl8761bu_config", .hw_info = "rtl8761bu" }, /* 8822C with UART interface */ { IC_INFO(RTL_ROM_LMP_8822B, 0xc, 0x8, HCI_UART), .config_needed = true, .has_rom_version = true, .has_msft_ext = true, .fw_name = "rtl_bt/rtl8822cs_fw", .cfg_name = "rtl_bt/rtl8822cs_config", .hw_info = "rtl8822cs" }, /* 8822C with UART interface */ { IC_INFO(RTL_ROM_LMP_8822B, 0xc, 0xa, HCI_UART), .config_needed = true, .has_rom_version = true, .has_msft_ext = true, .fw_name = "rtl_bt/rtl8822cs_fw", .cfg_name = "rtl_bt/rtl8822cs_config", .hw_info = "rtl8822cs" }, /* 8822C with USB interface */ { IC_INFO(RTL_ROM_LMP_8822B, 0xc, 0xa, HCI_USB), .config_needed = false, .has_rom_version = true, .has_msft_ext = true, .fw_name = "rtl_bt/rtl8822cu_fw", .cfg_name = "rtl_bt/rtl8822cu_config", .hw_info = "rtl8822cu" }, /* 8822B */ { IC_INFO(RTL_ROM_LMP_8822B, 0xb, 0x7, HCI_USB), .config_needed = true, .has_rom_version = true, .has_msft_ext = true, .fw_name = "rtl_bt/rtl8822b_fw", .cfg_name = "rtl_bt/rtl8822b_config", .hw_info = "rtl8822bu" }, /* 8852A */ { IC_INFO(RTL_ROM_LMP_8852A, 0xa, 0xb, HCI_USB), .config_needed = false, .has_rom_version = true, .has_msft_ext = true, .fw_name = "rtl_bt/rtl8852au_fw", .cfg_name = "rtl_bt/rtl8852au_config", .hw_info = "rtl8852au" }, /* 8852B with UART interface */ { IC_INFO(RTL_ROM_LMP_8852A, 0xb, 0xb, HCI_UART), .config_needed = true, .has_rom_version = true, .has_msft_ext = true, .fw_name = "rtl_bt/rtl8852bs_fw", .cfg_name = "rtl_bt/rtl8852bs_config", .hw_info = "rtl8852bs" }, /* 8852B */ { IC_INFO(RTL_ROM_LMP_8852A, 0xb, 0xb, HCI_USB), .config_needed = false, .has_rom_version = true, .has_msft_ext = true, .fw_name = "rtl_bt/rtl8852bu_fw", .cfg_name = "rtl_bt/rtl8852bu_config", .hw_info = "rtl8852bu" }, /* 8852C */ { IC_INFO(RTL_ROM_LMP_8852A, 0xc, 0xc, HCI_USB), .config_needed = false, .has_rom_version = true, .has_msft_ext = true, .fw_name = "rtl_bt/rtl8852cu_fw", .cfg_name = "rtl_bt/rtl8852cu_config", .hw_info = "rtl8852cu" }, /* 8851B */ { IC_INFO(RTL_ROM_LMP_8851B, 0xb, 0xc, HCI_USB), .config_needed = false, .has_rom_version = true, .has_msft_ext = false, .fw_name = "rtl_bt/rtl8851bu_fw", .cfg_name = "rtl_bt/rtl8851bu_config", .hw_info = "rtl8851bu" }, }; static const struct id_table *btrtl_match_ic(u16 lmp_subver, u16 hci_rev, u8 hci_ver, u8 hci_bus, u8 chip_type) { int i; for (i = 0; i < ARRAY_SIZE(ic_id_table); i++) { if ((ic_id_table[i].match_flags & IC_MATCH_FL_LMPSUBV) && (ic_id_table[i].lmp_subver != lmp_subver)) continue; if ((ic_id_table[i].match_flags & IC_MATCH_FL_HCIREV) && (ic_id_table[i].hci_rev != hci_rev)) continue; if ((ic_id_table[i].match_flags & IC_MATCH_FL_HCIVER) && (ic_id_table[i].hci_ver != hci_ver)) continue; if ((ic_id_table[i].match_flags & IC_MATCH_FL_HCIBUS) && (ic_id_table[i].hci_bus != hci_bus)) continue; if ((ic_id_table[i].match_flags & IC_MATCH_FL_CHIP_TYPE) && (ic_id_table[i].chip_type != chip_type)) continue; break; } if (i >= ARRAY_SIZE(ic_id_table)) return NULL; return &ic_id_table[i]; } static struct sk_buff *btrtl_read_local_version(struct hci_dev *hdev) { struct sk_buff *skb; skb = __hci_cmd_sync(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { rtl_dev_err(hdev, "HCI_OP_READ_LOCAL_VERSION failed (%ld)", PTR_ERR(skb)); return skb; } if (skb->len != sizeof(struct hci_rp_read_local_version)) { rtl_dev_err(hdev, "HCI_OP_READ_LOCAL_VERSION event length mismatch"); kfree_skb(skb); return ERR_PTR(-EIO); } return skb; } static int rtl_read_rom_version(struct hci_dev *hdev, u8 *version) { struct rtl_rom_version_evt *rom_version; struct sk_buff *skb; /* Read RTL ROM version command */ skb = __hci_cmd_sync(hdev, 0xfc6d, 0, NULL, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { rtl_dev_err(hdev, "Read ROM version failed (%ld)", PTR_ERR(skb)); return PTR_ERR(skb); } if (skb->len != sizeof(*rom_version)) { rtl_dev_err(hdev, "version event length mismatch"); kfree_skb(skb); return -EIO; } rom_version = (struct rtl_rom_version_evt *)skb->data; rtl_dev_info(hdev, "rom_version status=%x version=%x", rom_version->status, rom_version->version); *version = rom_version->version; kfree_skb(skb); return 0; } static int btrtl_vendor_read_reg16(struct hci_dev *hdev, struct rtl_vendor_cmd *cmd, u8 *rp) { struct sk_buff *skb; int err = 0; skb = __hci_cmd_sync(hdev, 0xfc61, sizeof(*cmd), cmd, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { err = PTR_ERR(skb); rtl_dev_err(hdev, "RTL: Read reg16 failed (%d)", err); return err; } if (skb->len != 3 || skb->data[0]) { bt_dev_err(hdev, "RTL: Read reg16 length mismatch"); kfree_skb(skb); return -EIO; } if (rp) memcpy(rp, skb->data + 1, 2); kfree_skb(skb); return 0; } static void *rtl_iov_pull_data(struct rtl_iovec *iov, u32 len) { void *data = iov->data; if (iov->len < len) return NULL; iov->data += len; iov->len -= len; return data; } static void btrtl_insert_ordered_subsec(struct rtl_subsection *node, struct btrtl_device_info *btrtl_dev) { struct list_head *pos; struct list_head *next; struct rtl_subsection *subsec; list_for_each_safe(pos, next, &btrtl_dev->patch_subsecs) { subsec = list_entry(pos, struct rtl_subsection, list); if (subsec->prio >= node->prio) break; } __list_add(&node->list, pos->prev, pos); } static int btrtl_parse_section(struct hci_dev *hdev, struct btrtl_device_info *btrtl_dev, u32 opcode, u8 *data, u32 len) { struct rtl_section_hdr *hdr; struct rtl_subsection *subsec; struct rtl_common_subsec *common_subsec; struct rtl_sec_hdr *sec_hdr; int i; u8 *ptr; u16 num_subsecs; u32 subsec_len; int rc = 0; struct rtl_iovec iov = { .data = data, .len = len, }; hdr = rtl_iov_pull_data(&iov, sizeof(*hdr)); if (!hdr) return -EINVAL; num_subsecs = le16_to_cpu(hdr->num); for (i = 0; i < num_subsecs; i++) { common_subsec = rtl_iov_pull_data(&iov, sizeof(*common_subsec)); if (!common_subsec) break; subsec_len = le32_to_cpu(common_subsec->len); rtl_dev_dbg(hdev, "subsec, eco 0x%02x, len %08x", common_subsec->eco, subsec_len); ptr = rtl_iov_pull_data(&iov, subsec_len); if (!ptr) break; if (common_subsec->eco != btrtl_dev->rom_version + 1) continue; switch (opcode) { case RTL_PATCH_SECURITY_HEADER: sec_hdr = (void *)common_subsec; if (sec_hdr->key_id != btrtl_dev->key_id) continue; break; } subsec = kzalloc(sizeof(*subsec), GFP_KERNEL); if (!subsec) return -ENOMEM; subsec->opcode = opcode; subsec->prio = common_subsec->prio; subsec->len = subsec_len; subsec->data = ptr; btrtl_insert_ordered_subsec(subsec, btrtl_dev); rc += subsec_len; } return rc; } static int rtlbt_parse_firmware_v2(struct hci_dev *hdev, struct btrtl_device_info *btrtl_dev, unsigned char **_buf) { struct rtl_epatch_header_v2 *hdr; int rc; u8 reg_val[2]; u8 key_id; u32 num_sections; struct rtl_section *section; struct rtl_subsection *entry, *tmp; u32 section_len; u32 opcode; int len = 0; int i; u8 *ptr; struct rtl_iovec iov = { .data = btrtl_dev->fw_data, .len = btrtl_dev->fw_len - 7, /* Cut the tail */ }; rc = btrtl_vendor_read_reg16(hdev, RTL_SEC_PROJ, reg_val); if (rc < 0) return -EIO; key_id = reg_val[0]; rtl_dev_dbg(hdev, "%s: key id %u", __func__, key_id); btrtl_dev->key_id = key_id; hdr = rtl_iov_pull_data(&iov, sizeof(*hdr)); if (!hdr) return -EINVAL; num_sections = le32_to_cpu(hdr->num_sections); rtl_dev_dbg(hdev, "FW version %08x-%08x", *((u32 *)hdr->fw_version), *((u32 *)(hdr->fw_version + 4))); for (i = 0; i < num_sections; i++) { section = rtl_iov_pull_data(&iov, sizeof(*section)); if (!section) break; section_len = le32_to_cpu(section->len); opcode = le32_to_cpu(section->opcode); rtl_dev_dbg(hdev, "opcode 0x%04x", section->opcode); ptr = rtl_iov_pull_data(&iov, section_len); if (!ptr) break; switch (opcode) { case RTL_PATCH_SNIPPETS: rc = btrtl_parse_section(hdev, btrtl_dev, opcode, ptr, section_len); break; case RTL_PATCH_SECURITY_HEADER: /* If key_id from chip is zero, ignore all security * headers. */ if (!key_id) break; rc = btrtl_parse_section(hdev, btrtl_dev, opcode, ptr, section_len); break; case RTL_PATCH_DUMMY_HEADER: rc = btrtl_parse_section(hdev, btrtl_dev, opcode, ptr, section_len); break; default: rc = 0; break; } if (rc < 0) { rtl_dev_err(hdev, "RTL: Parse section (%u) err %d", opcode, rc); return rc; } len += rc; } if (!len) return -ENODATA; /* Allocate mem and copy all found subsecs. */ ptr = kvmalloc(len, GFP_KERNEL); if (!ptr) return -ENOMEM; len = 0; list_for_each_entry_safe(entry, tmp, &btrtl_dev->patch_subsecs, list) { rtl_dev_dbg(hdev, "RTL: opcode %08x, addr %p, len 0x%x", entry->opcode, entry->data, entry->len); memcpy(ptr + len, entry->data, entry->len); len += entry->len; } if (!len) return -EPERM; *_buf = ptr; return len; } static int rtlbt_parse_firmware(struct hci_dev *hdev, struct btrtl_device_info *btrtl_dev, unsigned char **_buf) { static const u8 extension_sig[] = { 0x51, 0x04, 0xfd, 0x77 }; struct btrealtek_data *coredump_info = hci_get_priv(hdev); struct rtl_epatch_header *epatch_info; unsigned char *buf; int i, len; size_t min_size; u8 opcode, length, data; int project_id = -1; const unsigned char *fwptr, *chip_id_base; const unsigned char *patch_length_base, *patch_offset_base; u32 patch_offset = 0; u16 patch_length, num_patches; static const struct { __u16 lmp_subver; __u8 id; } project_id_to_lmp_subver[] = { { RTL_ROM_LMP_8723A, 0 }, { RTL_ROM_LMP_8723B, 1 }, { RTL_ROM_LMP_8821A, 2 }, { RTL_ROM_LMP_8761A, 3 }, { RTL_ROM_LMP_8703B, 7 }, { RTL_ROM_LMP_8822B, 8 }, { RTL_ROM_LMP_8723B, 9 }, /* 8723D */ { RTL_ROM_LMP_8821A, 10 }, /* 8821C */ { RTL_ROM_LMP_8822B, 13 }, /* 8822C */ { RTL_ROM_LMP_8761A, 14 }, /* 8761B */ { RTL_ROM_LMP_8852A, 18 }, /* 8852A */ { RTL_ROM_LMP_8852A, 20 }, /* 8852B */ { RTL_ROM_LMP_8852A, 25 }, /* 8852C */ { RTL_ROM_LMP_8851B, 36 }, /* 8851B */ }; if (btrtl_dev->fw_len <= 8) return -EINVAL; if (!memcmp(btrtl_dev->fw_data, RTL_EPATCH_SIGNATURE, 8)) min_size = sizeof(struct rtl_epatch_header) + sizeof(extension_sig) + 3; else if (!memcmp(btrtl_dev->fw_data, RTL_EPATCH_SIGNATURE_V2, 8)) min_size = sizeof(struct rtl_epatch_header_v2) + sizeof(extension_sig) + 3; else return -EINVAL; if (btrtl_dev->fw_len < min_size) return -EINVAL; fwptr = btrtl_dev->fw_data + btrtl_dev->fw_len - sizeof(extension_sig); if (memcmp(fwptr, extension_sig, sizeof(extension_sig)) != 0) { rtl_dev_err(hdev, "extension section signature mismatch"); return -EINVAL; } /* Loop from the end of the firmware parsing instructions, until * we find an instruction that identifies the "project ID" for the * hardware supported by this firwmare file. * Once we have that, we double-check that project_id is suitable * for the hardware we are working with. */ while (fwptr >= btrtl_dev->fw_data + (sizeof(*epatch_info) + 3)) { opcode = *--fwptr; length = *--fwptr; data = *--fwptr; BT_DBG("check op=%x len=%x data=%x", opcode, length, data); if (opcode == 0xff) /* EOF */ break; if (length == 0) { rtl_dev_err(hdev, "found instruction with length 0"); return -EINVAL; } if (opcode == 0 && length == 1) { project_id = data; break; } fwptr -= length; } if (project_id < 0) { rtl_dev_err(hdev, "failed to find version instruction"); return -EINVAL; } /* Find project_id in table */ for (i = 0; i < ARRAY_SIZE(project_id_to_lmp_subver); i++) { if (project_id == project_id_to_lmp_subver[i].id) { btrtl_dev->project_id = project_id; break; } } if (i >= ARRAY_SIZE(project_id_to_lmp_subver)) { rtl_dev_err(hdev, "unknown project id %d", project_id); return -EINVAL; } if (btrtl_dev->ic_info->lmp_subver != project_id_to_lmp_subver[i].lmp_subver) { rtl_dev_err(hdev, "firmware is for %x but this is a %x", project_id_to_lmp_subver[i].lmp_subver, btrtl_dev->ic_info->lmp_subver); return -EINVAL; } if (memcmp(btrtl_dev->fw_data, RTL_EPATCH_SIGNATURE, 8) != 0) { if (!memcmp(btrtl_dev->fw_data, RTL_EPATCH_SIGNATURE_V2, 8)) return rtlbt_parse_firmware_v2(hdev, btrtl_dev, _buf); rtl_dev_err(hdev, "bad EPATCH signature"); return -EINVAL; } epatch_info = (struct rtl_epatch_header *)btrtl_dev->fw_data; num_patches = le16_to_cpu(epatch_info->num_patches); BT_DBG("fw_version=%x, num_patches=%d", le32_to_cpu(epatch_info->fw_version), num_patches); coredump_info->rtl_dump.fw_version = le32_to_cpu(epatch_info->fw_version); /* After the rtl_epatch_header there is a funky patch metadata section. * Assuming 2 patches, the layout is: * ChipID1 ChipID2 PatchLength1 PatchLength2 PatchOffset1 PatchOffset2 * * Find the right patch for this chip. */ min_size += 8 * num_patches; if (btrtl_dev->fw_len < min_size) return -EINVAL; chip_id_base = btrtl_dev->fw_data + sizeof(struct rtl_epatch_header); patch_length_base = chip_id_base + (sizeof(u16) * num_patches); patch_offset_base = patch_length_base + (sizeof(u16) * num_patches); for (i = 0; i < num_patches; i++) { u16 chip_id = get_unaligned_le16(chip_id_base + (i * sizeof(u16))); if (chip_id == btrtl_dev->rom_version + 1) { patch_length = get_unaligned_le16(patch_length_base + (i * sizeof(u16))); patch_offset = get_unaligned_le32(patch_offset_base + (i * sizeof(u32))); break; } } if (!patch_offset) { rtl_dev_err(hdev, "didn't find patch for chip id %d", btrtl_dev->rom_version); return -EINVAL; } BT_DBG("length=%x offset=%x index %d", patch_length, patch_offset, i); min_size = patch_offset + patch_length; if (btrtl_dev->fw_len < min_size) return -EINVAL; /* Copy the firmware into a new buffer and write the version at * the end. */ len = patch_length; buf = kvmalloc(patch_length, GFP_KERNEL); if (!buf) return -ENOMEM; memcpy(buf, btrtl_dev->fw_data + patch_offset, patch_length - 4); memcpy(buf + patch_length - 4, &epatch_info->fw_version, 4); *_buf = buf; return len; } static int rtl_download_firmware(struct hci_dev *hdev, const unsigned char *data, int fw_len) { struct rtl_download_cmd *dl_cmd; int frag_num = fw_len / RTL_FRAG_LEN + 1; int frag_len = RTL_FRAG_LEN; int ret = 0; int i; int j = 0; struct sk_buff *skb; struct hci_rp_read_local_version *rp; dl_cmd = kmalloc(sizeof(struct rtl_download_cmd), GFP_KERNEL); if (!dl_cmd) return -ENOMEM; for (i = 0; i < frag_num; i++) { struct sk_buff *skb; dl_cmd->index = j++; if (dl_cmd->index == 0x7f) j = 1; if (i == (frag_num - 1)) { dl_cmd->index |= 0x80; /* data end */ frag_len = fw_len % RTL_FRAG_LEN; } rtl_dev_dbg(hdev, "download fw (%d/%d). index = %d", i, frag_num, dl_cmd->index); memcpy(dl_cmd->data, data, frag_len); /* Send download command */ skb = __hci_cmd_sync(hdev, 0xfc20, frag_len + 1, dl_cmd, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { rtl_dev_err(hdev, "download fw command failed (%ld)", PTR_ERR(skb)); ret = PTR_ERR(skb); goto out; } if (skb->len != sizeof(struct rtl_download_response)) { rtl_dev_err(hdev, "download fw event length mismatch"); kfree_skb(skb); ret = -EIO; goto out; } kfree_skb(skb); data += RTL_FRAG_LEN; } skb = btrtl_read_local_version(hdev); if (IS_ERR(skb)) { ret = PTR_ERR(skb); rtl_dev_err(hdev, "read local version failed"); goto out; } rp = (struct hci_rp_read_local_version *)skb->data; rtl_dev_info(hdev, "fw version 0x%04x%04x", __le16_to_cpu(rp->hci_rev), __le16_to_cpu(rp->lmp_subver)); kfree_skb(skb); out: kfree(dl_cmd); return ret; } static int rtl_load_file(struct hci_dev *hdev, const char *name, u8 **buff) { const struct firmware *fw; int ret; rtl_dev_info(hdev, "loading %s", name); ret = request_firmware(&fw, name, &hdev->dev); if (ret < 0) return ret; ret = fw->size; *buff = kvmalloc(fw->size, GFP_KERNEL); if (*buff) memcpy(*buff, fw->data, ret); else ret = -ENOMEM; release_firmware(fw); return ret; } static int btrtl_setup_rtl8723a(struct hci_dev *hdev, struct btrtl_device_info *btrtl_dev) { if (btrtl_dev->fw_len < 8) return -EINVAL; /* Check that the firmware doesn't have the epatch signature * (which is only for RTL8723B and newer). */ if (!memcmp(btrtl_dev->fw_data, RTL_EPATCH_SIGNATURE, 8)) { rtl_dev_err(hdev, "unexpected EPATCH signature!"); return -EINVAL; } return rtl_download_firmware(hdev, btrtl_dev->fw_data, btrtl_dev->fw_len); } static int btrtl_setup_rtl8723b(struct hci_dev *hdev, struct btrtl_device_info *btrtl_dev) { unsigned char *fw_data = NULL; int ret; u8 *tbuff; ret = rtlbt_parse_firmware(hdev, btrtl_dev, &fw_data); if (ret < 0) goto out; if (btrtl_dev->cfg_len > 0) { tbuff = kvzalloc(ret + btrtl_dev->cfg_len, GFP_KERNEL); if (!tbuff) { ret = -ENOMEM; goto out; } memcpy(tbuff, fw_data, ret); kvfree(fw_data); memcpy(tbuff + ret, btrtl_dev->cfg_data, btrtl_dev->cfg_len); ret += btrtl_dev->cfg_len; fw_data = tbuff; } rtl_dev_info(hdev, "cfg_sz %d, total sz %d", btrtl_dev->cfg_len, ret); ret = rtl_download_firmware(hdev, fw_data, ret); out: kvfree(fw_data); return ret; } static void btrtl_coredump(struct hci_dev *hdev) { static const u8 param[] = { 0x00, 0x00 }; __hci_cmd_send(hdev, RTL_VSC_OP_COREDUMP, sizeof(param), param); } static void btrtl_dmp_hdr(struct hci_dev *hdev, struct sk_buff *skb) { struct btrealtek_data *coredump_info = hci_get_priv(hdev); char buf[80]; if (coredump_info->rtl_dump.controller) snprintf(buf, sizeof(buf), "Controller Name: %s\n", coredump_info->rtl_dump.controller); else snprintf(buf, sizeof(buf), "Controller Name: Unknown\n"); skb_put_data(skb, buf, strlen(buf)); snprintf(buf, sizeof(buf), "Firmware Version: 0x%X\n", coredump_info->rtl_dump.fw_version); skb_put_data(skb, buf, strlen(buf)); snprintf(buf, sizeof(buf), "Driver: %s\n", coredump_info->rtl_dump.driver_name); skb_put_data(skb, buf, strlen(buf)); snprintf(buf, sizeof(buf), "Vendor: Realtek\n"); skb_put_data(skb, buf, strlen(buf)); } static int btrtl_register_devcoredump_support(struct hci_dev *hdev) { int err; err = hci_devcd_register(hdev, btrtl_coredump, btrtl_dmp_hdr, NULL); return err; } void btrtl_set_driver_name(struct hci_dev *hdev, const char *driver_name) { struct btrealtek_data *coredump_info = hci_get_priv(hdev); coredump_info->rtl_dump.driver_name = driver_name; } EXPORT_SYMBOL_GPL(btrtl_set_driver_name); static bool rtl_has_chip_type(u16 lmp_subver) { switch (lmp_subver) { case RTL_ROM_LMP_8703B: return true; default: break; } return false; } static int rtl_read_chip_type(struct hci_dev *hdev, u8 *type) { struct rtl_chip_type_evt *chip_type; struct sk_buff *skb; const unsigned char cmd_buf[] = {0x00, 0x94, 0xa0, 0x00, 0xb0}; /* Read RTL chip type command */ skb = __hci_cmd_sync(hdev, 0xfc61, 5, cmd_buf, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { rtl_dev_err(hdev, "Read chip type failed (%ld)", PTR_ERR(skb)); return PTR_ERR(skb); } chip_type = skb_pull_data(skb, sizeof(*chip_type)); if (!chip_type) { rtl_dev_err(hdev, "RTL chip type event length mismatch"); kfree_skb(skb); return -EIO; } rtl_dev_info(hdev, "chip_type status=%x type=%x", chip_type->status, chip_type->type); *type = chip_type->type & 0x0f; kfree_skb(skb); return 0; } void btrtl_free(struct btrtl_device_info *btrtl_dev) { struct rtl_subsection *entry, *tmp; kvfree(btrtl_dev->fw_data); kvfree(btrtl_dev->cfg_data); list_for_each_entry_safe(entry, tmp, &btrtl_dev->patch_subsecs, list) { list_del(&entry->list); kfree(entry); } kfree(btrtl_dev); } EXPORT_SYMBOL_GPL(btrtl_free); struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev, const char *postfix) { struct btrealtek_data *coredump_info = hci_get_priv(hdev); struct btrtl_device_info *btrtl_dev; struct sk_buff *skb; struct hci_rp_read_local_version *resp; struct hci_command_hdr *cmd; char fw_name[40]; char cfg_name[40]; u16 hci_rev, lmp_subver; u8 hci_ver, lmp_ver, chip_type = 0; int ret; u8 reg_val[2]; btrtl_dev = kzalloc(sizeof(*btrtl_dev), GFP_KERNEL); if (!btrtl_dev) { ret = -ENOMEM; goto err_alloc; } INIT_LIST_HEAD(&btrtl_dev->patch_subsecs); check_version: ret = btrtl_vendor_read_reg16(hdev, RTL_CHIP_SUBVER, reg_val); if (ret < 0) goto err_free; lmp_subver = get_unaligned_le16(reg_val); if (lmp_subver == RTL_ROM_LMP_8822B) { ret = btrtl_vendor_read_reg16(hdev, RTL_CHIP_REV, reg_val); if (ret < 0) goto err_free; hci_rev = get_unaligned_le16(reg_val); /* 8822E */ if (hci_rev == 0x000e) { hci_ver = 0x0c; lmp_ver = 0x0c; btrtl_dev->ic_info = btrtl_match_ic(lmp_subver, hci_rev, hci_ver, hdev->bus, chip_type); goto next; } } skb = btrtl_read_local_version(hdev); if (IS_ERR(skb)) { ret = PTR_ERR(skb); goto err_free; } resp = (struct hci_rp_read_local_version *)skb->data; hci_ver = resp->hci_ver; hci_rev = le16_to_cpu(resp->hci_rev); lmp_ver = resp->lmp_ver; lmp_subver = le16_to_cpu(resp->lmp_subver); kfree_skb(skb); if (rtl_has_chip_type(lmp_subver)) { ret = rtl_read_chip_type(hdev, &chip_type); if (ret) goto err_free; } btrtl_dev->ic_info = btrtl_match_ic(lmp_subver, hci_rev, hci_ver, hdev->bus, chip_type); next: rtl_dev_info(hdev, "examining hci_ver=%02x hci_rev=%04x lmp_ver=%02x lmp_subver=%04x", hci_ver, hci_rev, lmp_ver, lmp_subver); if (!btrtl_dev->ic_info && !btrtl_dev->drop_fw) btrtl_dev->drop_fw = true; else btrtl_dev->drop_fw = false; if (btrtl_dev->drop_fw) { skb = bt_skb_alloc(sizeof(*cmd), GFP_KERNEL); if (!skb) goto err_free; cmd = skb_put(skb, HCI_COMMAND_HDR_SIZE); cmd->opcode = cpu_to_le16(0xfc66); cmd->plen = 0; hci_skb_pkt_type(skb) = HCI_COMMAND_PKT; ret = hdev->send(hdev, skb); if (ret < 0) { bt_dev_err(hdev, "sending frame failed (%d)", ret); kfree_skb(skb); goto err_free; } /* Ensure the above vendor command is sent to controller and * process has done. */ msleep(200); goto check_version; } if (!btrtl_dev->ic_info) { rtl_dev_info(hdev, "unknown IC info, lmp subver %04x, hci rev %04x, hci ver %04x", lmp_subver, hci_rev, hci_ver); return btrtl_dev; } if (btrtl_dev->ic_info->has_rom_version) { ret = rtl_read_rom_version(hdev, &btrtl_dev->rom_version); if (ret) goto err_free; } if (!btrtl_dev->ic_info->fw_name) { ret = -ENOMEM; goto err_free; } btrtl_dev->fw_len = -EIO; if (lmp_subver == RTL_ROM_LMP_8852A && hci_rev == 0x000c) { snprintf(fw_name, sizeof(fw_name), "%s_v2.bin", btrtl_dev->ic_info->fw_name); btrtl_dev->fw_len = rtl_load_file(hdev, fw_name, &btrtl_dev->fw_data); } if (btrtl_dev->fw_len < 0) { snprintf(fw_name, sizeof(fw_name), "%s.bin", btrtl_dev->ic_info->fw_name); btrtl_dev->fw_len = rtl_load_file(hdev, fw_name, &btrtl_dev->fw_data); } if (btrtl_dev->fw_len < 0) { rtl_dev_err(hdev, "firmware file %s not found", btrtl_dev->ic_info->fw_name); ret = btrtl_dev->fw_len; goto err_free; } if (btrtl_dev->ic_info->cfg_name) { if (postfix) { snprintf(cfg_name, sizeof(cfg_name), "%s-%s.bin", btrtl_dev->ic_info->cfg_name, postfix); } else { snprintf(cfg_name, sizeof(cfg_name), "%s.bin", btrtl_dev->ic_info->cfg_name); } btrtl_dev->cfg_len = rtl_load_file(hdev, cfg_name, &btrtl_dev->cfg_data); if (btrtl_dev->ic_info->config_needed && btrtl_dev->cfg_len <= 0) { rtl_dev_err(hdev, "mandatory config file %s not found", btrtl_dev->ic_info->cfg_name); ret = btrtl_dev->cfg_len; goto err_free; } } /* The following chips supports the Microsoft vendor extension, * therefore set the corresponding VsMsftOpCode. */ if (btrtl_dev->ic_info->has_msft_ext) hci_set_msft_opcode(hdev, 0xFCF0); if (btrtl_dev->ic_info) coredump_info->rtl_dump.controller = btrtl_dev->ic_info->hw_info; return btrtl_dev; err_free: btrtl_free(btrtl_dev); err_alloc: return ERR_PTR(ret); } EXPORT_SYMBOL_GPL(btrtl_initialize); int btrtl_download_firmware(struct hci_dev *hdev, struct btrtl_device_info *btrtl_dev) { int err = 0; /* Match a set of subver values that correspond to stock firmware, * which is not compatible with standard btusb. * If matched, upload an alternative firmware that does conform to * standard btusb. Once that firmware is uploaded, the subver changes * to a different value. */ if (!btrtl_dev->ic_info) { rtl_dev_info(hdev, "assuming no firmware upload needed"); err = 0; goto done; } switch (btrtl_dev->ic_info->lmp_subver) { case RTL_ROM_LMP_8723A: err = btrtl_setup_rtl8723a(hdev, btrtl_dev); break; case RTL_ROM_LMP_8723B: case RTL_ROM_LMP_8821A: case RTL_ROM_LMP_8761A: case RTL_ROM_LMP_8822B: case RTL_ROM_LMP_8852A: case RTL_ROM_LMP_8703B: case RTL_ROM_LMP_8851B: err = btrtl_setup_rtl8723b(hdev, btrtl_dev); break; default: rtl_dev_info(hdev, "assuming no firmware upload needed"); break; } done: if (!err) err = btrtl_register_devcoredump_support(hdev); return err; } EXPORT_SYMBOL_GPL(btrtl_download_firmware); void btrtl_set_quirks(struct hci_dev *hdev, struct btrtl_device_info *btrtl_dev) { /* Enable controller to do both LE scan and BR/EDR inquiry * simultaneously. */ set_bit(HCI_QUIRK_SIMULTANEOUS_DISCOVERY, &hdev->quirks); /* Enable central-peripheral role (able to create new connections with * an existing connection in slave role). */ /* Enable WBS supported for the specific Realtek devices. */ switch (btrtl_dev->project_id) { case CHIP_ID_8822C: case CHIP_ID_8852A: case CHIP_ID_8852B: case CHIP_ID_8852C: case CHIP_ID_8851B: set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks); set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks); /* RTL8852C needs to transmit mSBC data continuously without * the zero length of USB packets for the ALT 6 supported chips */ if (btrtl_dev->project_id == CHIP_ID_8852C) btrealtek_set_flag(hdev, REALTEK_ALT6_CONTINUOUS_TX_CHIP); if (btrtl_dev->project_id == CHIP_ID_8852A || btrtl_dev->project_id == CHIP_ID_8852C) set_bit(HCI_QUIRK_USE_MSFT_EXT_ADDRESS_FILTER, &hdev->quirks); hci_set_aosp_capable(hdev); break; default: rtl_dev_dbg(hdev, "Central-peripheral role not enabled."); rtl_dev_dbg(hdev, "WBS supported not enabled."); break; } if (!btrtl_dev->ic_info) return; switch (btrtl_dev->ic_info->lmp_subver) { case RTL_ROM_LMP_8703B: /* 8723CS reports two pages for local ext features, * but it doesn't support any features from page 2 - * it either responds with garbage or with error status */ set_bit(HCI_QUIRK_BROKEN_LOCAL_EXT_FEATURES_PAGE_2, &hdev->quirks); break; default: break; } } EXPORT_SYMBOL_GPL(btrtl_set_quirks); int btrtl_setup_realtek(struct hci_dev *hdev) { struct btrtl_device_info *btrtl_dev; int ret; btrtl_dev = btrtl_initialize(hdev, NULL); if (IS_ERR(btrtl_dev)) return PTR_ERR(btrtl_dev); ret = btrtl_download_firmware(hdev, btrtl_dev); btrtl_set_quirks(hdev, btrtl_dev); btrtl_free(btrtl_dev); return ret; } EXPORT_SYMBOL_GPL(btrtl_setup_realtek); int btrtl_shutdown_realtek(struct hci_dev *hdev) { struct sk_buff *skb; int ret; /* According to the vendor driver, BT must be reset on close to avoid * firmware crash. */ skb = __hci_cmd_sync(hdev, HCI_OP_RESET, 0, NULL, HCI_INIT_TIMEOUT); if (IS_ERR(skb)) { ret = PTR_ERR(skb); bt_dev_err(hdev, "HCI reset during shutdown failed"); return ret; } kfree_skb(skb); return 0; } EXPORT_SYMBOL_GPL(btrtl_shutdown_realtek); static unsigned int btrtl_convert_baudrate(u32 device_baudrate) { switch (device_baudrate) { case 0x0252a00a: return 230400; case 0x05f75004: return 921600; case 0x00005004: return 1000000; case 0x04928002: case 0x01128002: return 1500000; case 0x00005002: return 2000000; case 0x0000b001: return 2500000; case 0x04928001: return 3000000; case 0x052a6001: return 3500000; case 0x00005001: return 4000000; case 0x0252c014: default: return 115200; } } int btrtl_get_uart_settings(struct hci_dev *hdev, struct btrtl_device_info *btrtl_dev, unsigned int *controller_baudrate, u32 *device_baudrate, bool *flow_control) { struct rtl_vendor_config *config; struct rtl_vendor_config_entry *entry; int i, total_data_len; bool found = false; total_data_len = btrtl_dev->cfg_len - sizeof(*config); if (total_data_len <= 0) { rtl_dev_warn(hdev, "no config loaded"); return -EINVAL; } config = (struct rtl_vendor_config *)btrtl_dev->cfg_data; if (le32_to_cpu(config->signature) != RTL_CONFIG_MAGIC) { rtl_dev_err(hdev, "invalid config magic"); return -EINVAL; } if (total_data_len < le16_to_cpu(config->total_len)) { rtl_dev_err(hdev, "config is too short"); return -EINVAL; } for (i = 0; i < total_data_len; ) { entry = ((void *)config->entry) + i; switch (le16_to_cpu(entry->offset)) { case 0xc: if (entry->len < sizeof(*device_baudrate)) { rtl_dev_err(hdev, "invalid UART config entry"); return -EINVAL; } *device_baudrate = get_unaligned_le32(entry->data); *controller_baudrate = btrtl_convert_baudrate( *device_baudrate); if (entry->len >= 13) *flow_control = !!(entry->data[12] & BIT(2)); else *flow_control = false; found = true; break; default: rtl_dev_dbg(hdev, "skipping config entry 0x%x (len %u)", le16_to_cpu(entry->offset), entry->len); break; } i += sizeof(*entry) + entry->len; } if (!found) { rtl_dev_err(hdev, "no UART config entry found"); return -ENOENT; } rtl_dev_dbg(hdev, "device baudrate = 0x%08x", *device_baudrate); rtl_dev_dbg(hdev, "controller baudrate = %u", *controller_baudrate); rtl_dev_dbg(hdev, "flow control %d", *flow_control); return 0; } EXPORT_SYMBOL_GPL(btrtl_get_uart_settings); MODULE_AUTHOR("Daniel Drake <[email protected]>"); MODULE_DESCRIPTION("Bluetooth support for Realtek devices ver " VERSION); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL"); MODULE_FIRMWARE("rtl_bt/rtl8723a_fw.bin"); MODULE_FIRMWARE("rtl_bt/rtl8723b_fw.bin"); MODULE_FIRMWARE("rtl_bt/rtl8723b_config.bin"); MODULE_FIRMWARE("rtl_bt/rtl8723bs_fw.bin"); MODULE_FIRMWARE("rtl_bt/rtl8723bs_config.bin"); MODULE_FIRMWARE("rtl_bt/rtl8723cs_cg_fw.bin"); MODULE_FIRMWARE("rtl_bt/rtl8723cs_cg_config.bin"); MODULE_FIRMWARE("rtl_bt/rtl8723cs_vf_fw.bin"); MODULE_FIRMWARE("rtl_bt/rtl8723cs_vf_config.bin"); MODULE_FIRMWARE("rtl_bt/rtl8723cs_xx_fw.bin"); MODULE_FIRMWARE("rtl_bt/rtl8723cs_xx_config.bin"); MODULE_FIRMWARE("rtl_bt/rtl8723d_fw.bin"); MODULE_FIRMWARE("rtl_bt/rtl8723d_config.bin"); MODULE_FIRMWARE("rtl_bt/rtl8723ds_fw.bin"); MODULE_FIRMWARE("rtl_bt/rtl8723ds_config.bin"); MODULE_FIRMWARE("rtl_bt/rtl8761a_fw.bin"); MODULE_FIRMWARE("rtl_bt/rtl8761a_config.bin"); MODULE_FIRMWARE("rtl_bt/rtl8761b_fw.bin"); MODULE_FIRMWARE("rtl_bt/rtl8761b_config.bin"); MODULE_FIRMWARE("rtl_bt/rtl8761bu_fw.bin"); MODULE_FIRMWARE("rtl_bt/rtl8761bu_config.bin"); MODULE_FIRMWARE("rtl_bt/rtl8821a_fw.bin"); MODULE_FIRMWARE("rtl_bt/rtl8821a_config.bin"); MODULE_FIRMWARE("rtl_bt/rtl8821c_fw.bin"); MODULE_FIRMWARE("rtl_bt/rtl8821c_config.bin"); MODULE_FIRMWARE("rtl_bt/rtl8821cs_fw.bin"); MODULE_FIRMWARE("rtl_bt/rtl8821cs_config.bin"); MODULE_FIRMWARE("rtl_bt/rtl8822b_fw.bin"); MODULE_FIRMWARE("rtl_bt/rtl8822b_config.bin"); MODULE_FIRMWARE("rtl_bt/rtl8822cs_fw.bin"); MODULE_FIRMWARE("rtl_bt/rtl8822cs_config.bin"); MODULE_FIRMWARE("rtl_bt/rtl8822cu_fw.bin"); MODULE_FIRMWARE("rtl_bt/rtl8822cu_config.bin"); MODULE_FIRMWARE("rtl_bt/rtl8851bu_fw.bin"); MODULE_FIRMWARE("rtl_bt/rtl8851bu_config.bin"); MODULE_FIRMWARE("rtl_bt/rtl8852au_fw.bin"); MODULE_FIRMWARE("rtl_bt/rtl8852au_config.bin"); MODULE_FIRMWARE("rtl_bt/rtl8852bs_fw.bin"); MODULE_FIRMWARE("rtl_bt/rtl8852bs_config.bin"); MODULE_FIRMWARE("rtl_bt/rtl8852bu_fw.bin"); MODULE_FIRMWARE("rtl_bt/rtl8852bu_config.bin"); MODULE_FIRMWARE("rtl_bt/rtl8852cu_fw.bin"); MODULE_FIRMWARE("rtl_bt/rtl8852cu_fw_v2.bin"); MODULE_FIRMWARE("rtl_bt/rtl8852cu_config.bin");
linux-master
drivers/bluetooth/btrtl.c
// SPDX-License-Identifier: GPL-2.0-only /* * Marvell BT-over-SDIO driver: SDIO interface related functions. * * Copyright (C) 2009, Marvell International Ltd. **/ #include <linux/firmware.h> #include <linux/slab.h> #include <linux/suspend.h> #include <linux/mmc/sdio_ids.h> #include <linux/mmc/sdio_func.h> #include <linux/module.h> #include <linux/devcoredump.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include "btmrvl_drv.h" #include "btmrvl_sdio.h" #define VERSION "1.0" static struct memory_type_mapping mem_type_mapping_tbl[] = { {"ITCM", NULL, 0, 0xF0}, {"DTCM", NULL, 0, 0xF1}, {"SQRAM", NULL, 0, 0xF2}, {"APU", NULL, 0, 0xF3}, {"CIU", NULL, 0, 0xF4}, {"ICU", NULL, 0, 0xF5}, {"MAC", NULL, 0, 0xF6}, {"EXT7", NULL, 0, 0xF7}, {"EXT8", NULL, 0, 0xF8}, {"EXT9", NULL, 0, 0xF9}, {"EXT10", NULL, 0, 0xFA}, {"EXT11", NULL, 0, 0xFB}, {"EXT12", NULL, 0, 0xFC}, {"EXT13", NULL, 0, 0xFD}, {"EXTLAST", NULL, 0, 0xFE}, }; static const struct of_device_id btmrvl_sdio_of_match_table[] __maybe_unused = { { .compatible = "marvell,sd8897-bt" }, { .compatible = "marvell,sd8997-bt" }, { } }; static irqreturn_t btmrvl_wake_irq_bt(int irq, void *priv) { struct btmrvl_sdio_card *card = priv; struct device *dev = &card->func->dev; struct btmrvl_plt_wake_cfg *cfg = card->plt_wake_cfg; dev_info(dev, "wake by bt\n"); cfg->wake_by_bt = true; disable_irq_nosync(irq); pm_wakeup_event(dev, 0); pm_system_wakeup(); return IRQ_HANDLED; } /* This function parses device tree node using mmc subnode devicetree API. * The device node is saved in card->plt_of_node. * If the device tree node exists and includes interrupts attributes, this * function will request platform specific wakeup interrupt. */ static int btmrvl_sdio_probe_of(struct device *dev, struct btmrvl_sdio_card *card) { struct btmrvl_plt_wake_cfg *cfg; int ret; if (!dev->of_node || !of_match_node(btmrvl_sdio_of_match_table, dev->of_node)) { dev_info(dev, "sdio device tree data not available\n"); return -1; } card->plt_of_node = dev->of_node; card->plt_wake_cfg = devm_kzalloc(dev, sizeof(*card->plt_wake_cfg), GFP_KERNEL); cfg = card->plt_wake_cfg; if (cfg && card->plt_of_node) { cfg->irq_bt = irq_of_parse_and_map(card->plt_of_node, 0); if (!cfg->irq_bt) { dev_err(dev, "fail to parse irq_bt from device tree\n"); cfg->irq_bt = -1; } else { ret = devm_request_irq(dev, cfg->irq_bt, btmrvl_wake_irq_bt, 0, "bt_wake", card); if (ret) { dev_err(dev, "Failed to request irq_bt %d (%d)\n", cfg->irq_bt, ret); } /* Configure wakeup (enabled by default) */ device_init_wakeup(dev, true); disable_irq(cfg->irq_bt); } } return 0; } /* The btmrvl_sdio_remove() callback function is called * when user removes this module from kernel space or ejects * the card from the slot. The driver handles these 2 cases * differently. * If the user is removing the module, a MODULE_SHUTDOWN_REQ * command is sent to firmware and interrupt will be disabled. * If the card is removed, there is no need to send command * or disable interrupt. * * The variable 'user_rmmod' is used to distinguish these two * scenarios. This flag is initialized as FALSE in case the card * is removed, and will be set to TRUE for module removal when * module_exit function is called. */ static u8 user_rmmod; static u8 sdio_ireg; static const struct btmrvl_sdio_card_reg btmrvl_reg_8688 = { .cfg = 0x03, .host_int_mask = 0x04, .host_intstatus = 0x05, .card_status = 0x20, .sq_read_base_addr_a0 = 0x10, .sq_read_base_addr_a1 = 0x11, .card_fw_status0 = 0x40, .card_fw_status1 = 0x41, .card_rx_len = 0x42, .card_rx_unit = 0x43, .io_port_0 = 0x00, .io_port_1 = 0x01, .io_port_2 = 0x02, .int_read_to_clear = false, }; static const struct btmrvl_sdio_card_reg btmrvl_reg_87xx = { .cfg = 0x00, .host_int_mask = 0x02, .host_intstatus = 0x03, .card_status = 0x30, .sq_read_base_addr_a0 = 0x40, .sq_read_base_addr_a1 = 0x41, .card_revision = 0x5c, .card_fw_status0 = 0x60, .card_fw_status1 = 0x61, .card_rx_len = 0x62, .card_rx_unit = 0x63, .io_port_0 = 0x78, .io_port_1 = 0x79, .io_port_2 = 0x7a, .int_read_to_clear = false, }; static const struct btmrvl_sdio_card_reg btmrvl_reg_8887 = { .cfg = 0x00, .host_int_mask = 0x08, .host_intstatus = 0x0C, .card_status = 0x5C, .sq_read_base_addr_a0 = 0x6C, .sq_read_base_addr_a1 = 0x6D, .card_revision = 0xC8, .card_fw_status0 = 0x88, .card_fw_status1 = 0x89, .card_rx_len = 0x8A, .card_rx_unit = 0x8B, .io_port_0 = 0xE4, .io_port_1 = 0xE5, .io_port_2 = 0xE6, .int_read_to_clear = true, .host_int_rsr = 0x04, .card_misc_cfg = 0xD8, }; static const struct btmrvl_sdio_card_reg btmrvl_reg_8897 = { .cfg = 0x00, .host_int_mask = 0x02, .host_intstatus = 0x03, .card_status = 0x50, .sq_read_base_addr_a0 = 0x60, .sq_read_base_addr_a1 = 0x61, .card_revision = 0xbc, .card_fw_status0 = 0xc0, .card_fw_status1 = 0xc1, .card_rx_len = 0xc2, .card_rx_unit = 0xc3, .io_port_0 = 0xd8, .io_port_1 = 0xd9, .io_port_2 = 0xda, .int_read_to_clear = true, .host_int_rsr = 0x01, .card_misc_cfg = 0xcc, .fw_dump_ctrl = 0xe2, .fw_dump_start = 0xe3, .fw_dump_end = 0xea, }; static const struct btmrvl_sdio_card_reg btmrvl_reg_89xx = { .cfg = 0x00, .host_int_mask = 0x08, .host_intstatus = 0x0c, .card_status = 0x5c, .sq_read_base_addr_a0 = 0xf8, .sq_read_base_addr_a1 = 0xf9, .card_revision = 0xc8, .card_fw_status0 = 0xe8, .card_fw_status1 = 0xe9, .card_rx_len = 0xea, .card_rx_unit = 0xeb, .io_port_0 = 0xe4, .io_port_1 = 0xe5, .io_port_2 = 0xe6, .int_read_to_clear = true, .host_int_rsr = 0x04, .card_misc_cfg = 0xd8, .fw_dump_ctrl = 0xf0, .fw_dump_start = 0xf1, .fw_dump_end = 0xf8, }; static const struct btmrvl_sdio_device btmrvl_sdio_sd8688 = { .helper = "mrvl/sd8688_helper.bin", .firmware = "mrvl/sd8688.bin", .reg = &btmrvl_reg_8688, .support_pscan_win_report = false, .sd_blksz_fw_dl = 64, .supports_fw_dump = false, }; static const struct btmrvl_sdio_device btmrvl_sdio_sd8787 = { .helper = NULL, .firmware = "mrvl/sd8787_uapsta.bin", .reg = &btmrvl_reg_87xx, .support_pscan_win_report = false, .sd_blksz_fw_dl = 256, .supports_fw_dump = false, }; static const struct btmrvl_sdio_device btmrvl_sdio_sd8797 = { .helper = NULL, .firmware = "mrvl/sd8797_uapsta.bin", .reg = &btmrvl_reg_87xx, .support_pscan_win_report = false, .sd_blksz_fw_dl = 256, .supports_fw_dump = false, }; static const struct btmrvl_sdio_device btmrvl_sdio_sd8887 = { .helper = NULL, .firmware = "mrvl/sd8887_uapsta.bin", .reg = &btmrvl_reg_8887, .support_pscan_win_report = true, .sd_blksz_fw_dl = 256, .supports_fw_dump = false, }; static const struct btmrvl_sdio_device btmrvl_sdio_sd8897 = { .helper = NULL, .firmware = "mrvl/sd8897_uapsta.bin", .reg = &btmrvl_reg_8897, .support_pscan_win_report = true, .sd_blksz_fw_dl = 256, .supports_fw_dump = true, }; static const struct btmrvl_sdio_device btmrvl_sdio_sd8977 = { .helper = NULL, .firmware = "mrvl/sdsd8977_combo_v2.bin", .reg = &btmrvl_reg_89xx, .support_pscan_win_report = true, .sd_blksz_fw_dl = 256, .supports_fw_dump = true, }; static const struct btmrvl_sdio_device btmrvl_sdio_sd8987 = { .helper = NULL, .firmware = "mrvl/sd8987_uapsta.bin", .reg = &btmrvl_reg_89xx, .support_pscan_win_report = true, .sd_blksz_fw_dl = 256, .supports_fw_dump = true, }; static const struct btmrvl_sdio_device btmrvl_sdio_sd8997 = { .helper = NULL, .firmware = "mrvl/sdsd8997_combo_v4.bin", .reg = &btmrvl_reg_89xx, .support_pscan_win_report = true, .sd_blksz_fw_dl = 256, .supports_fw_dump = true, }; static const struct sdio_device_id btmrvl_sdio_ids[] = { /* Marvell SD8688 Bluetooth device */ { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8688_BT), .driver_data = (unsigned long)&btmrvl_sdio_sd8688 }, /* Marvell SD8787 Bluetooth device */ { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8787_BT), .driver_data = (unsigned long)&btmrvl_sdio_sd8787 }, /* Marvell SD8787 Bluetooth AMP device */ { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8787_BT_AMP), .driver_data = (unsigned long)&btmrvl_sdio_sd8787 }, /* Marvell SD8797 Bluetooth device */ { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8797_BT), .driver_data = (unsigned long)&btmrvl_sdio_sd8797 }, /* Marvell SD8887 Bluetooth device */ { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8887_BT), .driver_data = (unsigned long)&btmrvl_sdio_sd8887 }, /* Marvell SD8897 Bluetooth device */ { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8897_BT), .driver_data = (unsigned long)&btmrvl_sdio_sd8897 }, /* Marvell SD8977 Bluetooth device */ { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8977_BT), .driver_data = (unsigned long)&btmrvl_sdio_sd8977 }, /* Marvell SD8987 Bluetooth device */ { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8987_BT), .driver_data = (unsigned long)&btmrvl_sdio_sd8987 }, /* Marvell SD8997 Bluetooth device */ { SDIO_DEVICE(SDIO_VENDOR_ID_MARVELL, SDIO_DEVICE_ID_MARVELL_8997_BT), .driver_data = (unsigned long)&btmrvl_sdio_sd8997 }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(sdio, btmrvl_sdio_ids); static int btmrvl_sdio_get_rx_unit(struct btmrvl_sdio_card *card) { u8 reg; int ret; reg = sdio_readb(card->func, card->reg->card_rx_unit, &ret); if (!ret) card->rx_unit = reg; return ret; } static int btmrvl_sdio_read_fw_status(struct btmrvl_sdio_card *card, u16 *dat) { u8 fws0, fws1; int ret; *dat = 0; fws0 = sdio_readb(card->func, card->reg->card_fw_status0, &ret); if (ret) return -EIO; fws1 = sdio_readb(card->func, card->reg->card_fw_status1, &ret); if (ret) return -EIO; *dat = (((u16) fws1) << 8) | fws0; return 0; } static int btmrvl_sdio_read_rx_len(struct btmrvl_sdio_card *card, u16 *dat) { u8 reg; int ret; reg = sdio_readb(card->func, card->reg->card_rx_len, &ret); if (!ret) *dat = (u16) reg << card->rx_unit; return ret; } static int btmrvl_sdio_enable_host_int_mask(struct btmrvl_sdio_card *card, u8 mask) { int ret; sdio_writeb(card->func, mask, card->reg->host_int_mask, &ret); if (ret) { BT_ERR("Unable to enable the host interrupt!"); ret = -EIO; } return ret; } static int btmrvl_sdio_disable_host_int_mask(struct btmrvl_sdio_card *card, u8 mask) { u8 host_int_mask; int ret; host_int_mask = sdio_readb(card->func, card->reg->host_int_mask, &ret); if (ret) return -EIO; host_int_mask &= ~mask; sdio_writeb(card->func, host_int_mask, card->reg->host_int_mask, &ret); if (ret < 0) { BT_ERR("Unable to disable the host interrupt!"); return -EIO; } return 0; } static int btmrvl_sdio_poll_card_status(struct btmrvl_sdio_card *card, u8 bits) { unsigned int tries; u8 status; int ret; for (tries = 0; tries < MAX_POLL_TRIES * 1000; tries++) { status = sdio_readb(card->func, card->reg->card_status, &ret); if (ret) goto failed; if ((status & bits) == bits) return ret; udelay(1); } ret = -ETIMEDOUT; failed: BT_ERR("FAILED! ret=%d", ret); return ret; } static int btmrvl_sdio_verify_fw_download(struct btmrvl_sdio_card *card, int pollnum) { u16 firmwarestat; int tries, ret; /* Wait for firmware to become ready */ for (tries = 0; tries < pollnum; tries++) { sdio_claim_host(card->func); ret = btmrvl_sdio_read_fw_status(card, &firmwarestat); sdio_release_host(card->func); if (ret < 0) continue; if (firmwarestat == FIRMWARE_READY) return 0; msleep(100); } return -ETIMEDOUT; } static int btmrvl_sdio_download_helper(struct btmrvl_sdio_card *card) { const struct firmware *fw_helper = NULL; const u8 *helper = NULL; int ret; void *tmphlprbuf = NULL; int tmphlprbufsz, hlprblknow, helperlen; u8 *helperbuf; u32 tx_len; ret = request_firmware(&fw_helper, card->helper, &card->func->dev); if ((ret < 0) || !fw_helper) { BT_ERR("request_firmware(helper) failed, error code = %d", ret); ret = -ENOENT; goto done; } helper = fw_helper->data; helperlen = fw_helper->size; BT_DBG("Downloading helper image (%d bytes), block size %d bytes", helperlen, SDIO_BLOCK_SIZE); tmphlprbufsz = ALIGN_SZ(BTM_UPLD_SIZE, BTSDIO_DMA_ALIGN); tmphlprbuf = kzalloc(tmphlprbufsz, GFP_KERNEL); if (!tmphlprbuf) { BT_ERR("Unable to allocate buffer for helper." " Terminating download"); ret = -ENOMEM; goto done; } helperbuf = (u8 *) ALIGN_ADDR(tmphlprbuf, BTSDIO_DMA_ALIGN); /* Perform helper data transfer */ tx_len = (FIRMWARE_TRANSFER_NBLOCK * SDIO_BLOCK_SIZE) - SDIO_HEADER_LEN; hlprblknow = 0; do { ret = btmrvl_sdio_poll_card_status(card, CARD_IO_READY | DN_LD_CARD_RDY); if (ret < 0) { BT_ERR("Helper download poll status timeout @ %d", hlprblknow); goto done; } /* Check if there is more data? */ if (hlprblknow >= helperlen) break; if (helperlen - hlprblknow < tx_len) tx_len = helperlen - hlprblknow; /* Little-endian */ helperbuf[0] = ((tx_len & 0x000000ff) >> 0); helperbuf[1] = ((tx_len & 0x0000ff00) >> 8); helperbuf[2] = ((tx_len & 0x00ff0000) >> 16); helperbuf[3] = ((tx_len & 0xff000000) >> 24); memcpy(&helperbuf[SDIO_HEADER_LEN], &helper[hlprblknow], tx_len); /* Now send the data */ ret = sdio_writesb(card->func, card->ioport, helperbuf, FIRMWARE_TRANSFER_NBLOCK * SDIO_BLOCK_SIZE); if (ret < 0) { BT_ERR("IO error during helper download @ %d", hlprblknow); goto done; } hlprblknow += tx_len; } while (true); BT_DBG("Transferring helper image EOF block"); memset(helperbuf, 0x0, SDIO_BLOCK_SIZE); ret = sdio_writesb(card->func, card->ioport, helperbuf, SDIO_BLOCK_SIZE); if (ret < 0) { BT_ERR("IO error in writing helper image EOF block"); goto done; } ret = 0; done: kfree(tmphlprbuf); release_firmware(fw_helper); return ret; } static int btmrvl_sdio_download_fw_w_helper(struct btmrvl_sdio_card *card) { const struct firmware *fw_firmware = NULL; const u8 *firmware = NULL; int firmwarelen, tmpfwbufsz, ret; unsigned int tries, offset; u8 base0, base1; void *tmpfwbuf = NULL; u8 *fwbuf; u16 len, blksz_dl = card->sd_blksz_fw_dl; int txlen = 0, tx_blocks = 0, count = 0; ret = request_firmware(&fw_firmware, card->firmware, &card->func->dev); if ((ret < 0) || !fw_firmware) { BT_ERR("request_firmware(firmware) failed, error code = %d", ret); ret = -ENOENT; goto done; } firmware = fw_firmware->data; firmwarelen = fw_firmware->size; BT_DBG("Downloading FW image (%d bytes)", firmwarelen); tmpfwbufsz = ALIGN_SZ(BTM_UPLD_SIZE, BTSDIO_DMA_ALIGN); tmpfwbuf = kzalloc(tmpfwbufsz, GFP_KERNEL); if (!tmpfwbuf) { BT_ERR("Unable to allocate buffer for firmware." " Terminating download"); ret = -ENOMEM; goto done; } /* Ensure aligned firmware buffer */ fwbuf = (u8 *) ALIGN_ADDR(tmpfwbuf, BTSDIO_DMA_ALIGN); /* Perform firmware data transfer */ offset = 0; do { ret = btmrvl_sdio_poll_card_status(card, CARD_IO_READY | DN_LD_CARD_RDY); if (ret < 0) { BT_ERR("FW download with helper poll status" " timeout @ %d", offset); goto done; } /* Check if there is more data ? */ if (offset >= firmwarelen) break; for (tries = 0; tries < MAX_POLL_TRIES; tries++) { base0 = sdio_readb(card->func, card->reg->sq_read_base_addr_a0, &ret); if (ret) { BT_ERR("BASE0 register read failed:" " base0 = 0x%04X(%d)." " Terminating download", base0, base0); ret = -EIO; goto done; } base1 = sdio_readb(card->func, card->reg->sq_read_base_addr_a1, &ret); if (ret) { BT_ERR("BASE1 register read failed:" " base1 = 0x%04X(%d)." " Terminating download", base1, base1); ret = -EIO; goto done; } len = (((u16) base1) << 8) | base0; if (len) break; udelay(10); } if (!len) break; else if (len > BTM_UPLD_SIZE) { BT_ERR("FW download failure @%d, invalid length %d", offset, len); ret = -EINVAL; goto done; } txlen = len; if (len & BIT(0)) { count++; if (count > MAX_WRITE_IOMEM_RETRY) { BT_ERR("FW download failure @%d, " "over max retry count", offset); ret = -EIO; goto done; } BT_ERR("FW CRC error indicated by the helper: " "len = 0x%04X, txlen = %d", len, txlen); len &= ~BIT(0); /* Set txlen to 0 so as to resend from same offset */ txlen = 0; } else { count = 0; /* Last block ? */ if (firmwarelen - offset < txlen) txlen = firmwarelen - offset; tx_blocks = DIV_ROUND_UP(txlen, blksz_dl); memcpy(fwbuf, &firmware[offset], txlen); } ret = sdio_writesb(card->func, card->ioport, fwbuf, tx_blocks * blksz_dl); if (ret < 0) { BT_ERR("FW download, writesb(%d) failed @%d", count, offset); sdio_writeb(card->func, HOST_CMD53_FIN, card->reg->cfg, &ret); if (ret) BT_ERR("writeb failed (CFG)"); } offset += txlen; } while (true); BT_INFO("FW download over, size %d bytes", offset); ret = 0; done: kfree(tmpfwbuf); release_firmware(fw_firmware); return ret; } static int btmrvl_sdio_card_to_host(struct btmrvl_private *priv) { u16 buf_len = 0; int ret, num_blocks, blksz; struct sk_buff *skb = NULL; u32 type; u8 *payload; struct hci_dev *hdev = priv->btmrvl_dev.hcidev; struct btmrvl_sdio_card *card = priv->btmrvl_dev.card; if (!card || !card->func) { BT_ERR("card or function is NULL!"); ret = -EINVAL; goto exit; } /* Read the length of data to be transferred */ ret = btmrvl_sdio_read_rx_len(card, &buf_len); if (ret < 0) { BT_ERR("read rx_len failed"); ret = -EIO; goto exit; } blksz = SDIO_BLOCK_SIZE; num_blocks = DIV_ROUND_UP(buf_len, blksz); if (buf_len <= SDIO_HEADER_LEN || (num_blocks * blksz) > ALLOC_BUF_SIZE) { BT_ERR("invalid packet length: %d", buf_len); ret = -EINVAL; goto exit; } /* Allocate buffer */ skb = bt_skb_alloc(num_blocks * blksz + BTSDIO_DMA_ALIGN, GFP_KERNEL); if (!skb) { BT_ERR("No free skb"); ret = -ENOMEM; goto exit; } if ((unsigned long) skb->data & (BTSDIO_DMA_ALIGN - 1)) { skb_put(skb, (unsigned long) skb->data & (BTSDIO_DMA_ALIGN - 1)); skb_pull(skb, (unsigned long) skb->data & (BTSDIO_DMA_ALIGN - 1)); } payload = skb->data; ret = sdio_readsb(card->func, payload, card->ioport, num_blocks * blksz); if (ret < 0) { BT_ERR("readsb failed: %d", ret); ret = -EIO; goto exit; } /* This is SDIO specific header length: byte[2][1][0], type: byte[3] * (HCI_COMMAND = 1, ACL_DATA = 2, SCO_DATA = 3, 0xFE = Vendor) */ buf_len = payload[0]; buf_len |= payload[1] << 8; buf_len |= payload[2] << 16; if (buf_len > blksz * num_blocks) { BT_ERR("Skip incorrect packet: hdrlen %d buffer %d", buf_len, blksz * num_blocks); ret = -EIO; goto exit; } type = payload[3]; switch (type) { case HCI_ACLDATA_PKT: case HCI_SCODATA_PKT: case HCI_EVENT_PKT: hci_skb_pkt_type(skb) = type; skb_put(skb, buf_len); skb_pull(skb, SDIO_HEADER_LEN); if (type == HCI_EVENT_PKT) { if (btmrvl_check_evtpkt(priv, skb)) hci_recv_frame(hdev, skb); } else { hci_recv_frame(hdev, skb); } hdev->stat.byte_rx += buf_len; break; case MRVL_VENDOR_PKT: hci_skb_pkt_type(skb) = HCI_VENDOR_PKT; skb_put(skb, buf_len); skb_pull(skb, SDIO_HEADER_LEN); if (btmrvl_process_event(priv, skb)) hci_recv_frame(hdev, skb); hdev->stat.byte_rx += buf_len; break; default: BT_ERR("Unknown packet type:%d", type); BT_ERR("hex: %*ph", blksz * num_blocks, payload); kfree_skb(skb); skb = NULL; break; } exit: if (ret) { hdev->stat.err_rx++; kfree_skb(skb); } return ret; } static int btmrvl_sdio_process_int_status(struct btmrvl_private *priv) { ulong flags; u8 ireg; struct btmrvl_sdio_card *card = priv->btmrvl_dev.card; spin_lock_irqsave(&priv->driver_lock, flags); ireg = sdio_ireg; sdio_ireg = 0; spin_unlock_irqrestore(&priv->driver_lock, flags); sdio_claim_host(card->func); if (ireg & DN_LD_HOST_INT_STATUS) { if (priv->btmrvl_dev.tx_dnld_rdy) BT_DBG("tx_done already received: " " int_status=0x%x", ireg); else priv->btmrvl_dev.tx_dnld_rdy = true; } if (ireg & UP_LD_HOST_INT_STATUS) btmrvl_sdio_card_to_host(priv); sdio_release_host(card->func); return 0; } static int btmrvl_sdio_read_to_clear(struct btmrvl_sdio_card *card, u8 *ireg) { struct btmrvl_adapter *adapter = card->priv->adapter; int ret; ret = sdio_readsb(card->func, adapter->hw_regs, 0, SDIO_BLOCK_SIZE); if (ret) { BT_ERR("sdio_readsb: read int hw_regs failed: %d", ret); return ret; } *ireg = adapter->hw_regs[card->reg->host_intstatus]; BT_DBG("hw_regs[%#x]=%#x", card->reg->host_intstatus, *ireg); return 0; } static int btmrvl_sdio_write_to_clear(struct btmrvl_sdio_card *card, u8 *ireg) { int ret; *ireg = sdio_readb(card->func, card->reg->host_intstatus, &ret); if (ret) { BT_ERR("sdio_readb: read int status failed: %d", ret); return ret; } if (*ireg) { /* * DN_LD_HOST_INT_STATUS and/or UP_LD_HOST_INT_STATUS * Clear the interrupt status register and re-enable the * interrupt. */ BT_DBG("int_status = 0x%x", *ireg); sdio_writeb(card->func, ~(*ireg) & (DN_LD_HOST_INT_STATUS | UP_LD_HOST_INT_STATUS), card->reg->host_intstatus, &ret); if (ret) { BT_ERR("sdio_writeb: clear int status failed: %d", ret); return ret; } } return 0; } static void btmrvl_sdio_interrupt(struct sdio_func *func) { struct btmrvl_private *priv; struct btmrvl_sdio_card *card; ulong flags; u8 ireg = 0; int ret; card = sdio_get_drvdata(func); if (!card || !card->priv) { BT_ERR("sbi_interrupt(%p) card or priv is NULL, card=%p", func, card); return; } priv = card->priv; if (priv->surprise_removed) return; if (card->reg->int_read_to_clear) ret = btmrvl_sdio_read_to_clear(card, &ireg); else ret = btmrvl_sdio_write_to_clear(card, &ireg); if (ret) return; spin_lock_irqsave(&priv->driver_lock, flags); sdio_ireg |= ireg; spin_unlock_irqrestore(&priv->driver_lock, flags); btmrvl_interrupt(priv); } static int btmrvl_sdio_register_dev(struct btmrvl_sdio_card *card) { struct sdio_func *func; u8 reg; int ret; if (!card || !card->func) { BT_ERR("Error: card or function is NULL!"); ret = -EINVAL; goto failed; } func = card->func; sdio_claim_host(func); ret = sdio_enable_func(func); if (ret) { BT_ERR("sdio_enable_func() failed: ret=%d", ret); ret = -EIO; goto release_host; } ret = sdio_claim_irq(func, btmrvl_sdio_interrupt); if (ret) { BT_ERR("sdio_claim_irq failed: ret=%d", ret); ret = -EIO; goto disable_func; } ret = sdio_set_block_size(card->func, SDIO_BLOCK_SIZE); if (ret) { BT_ERR("cannot set SDIO block size"); ret = -EIO; goto release_irq; } reg = sdio_readb(func, card->reg->io_port_0, &ret); if (ret < 0) { ret = -EIO; goto release_irq; } card->ioport = reg; reg = sdio_readb(func, card->reg->io_port_1, &ret); if (ret < 0) { ret = -EIO; goto release_irq; } card->ioport |= (reg << 8); reg = sdio_readb(func, card->reg->io_port_2, &ret); if (ret < 0) { ret = -EIO; goto release_irq; } card->ioport |= (reg << 16); BT_DBG("SDIO FUNC%d IO port: 0x%x", func->num, card->ioport); if (card->reg->int_read_to_clear) { reg = sdio_readb(func, card->reg->host_int_rsr, &ret); if (ret < 0) { ret = -EIO; goto release_irq; } sdio_writeb(func, reg | 0x3f, card->reg->host_int_rsr, &ret); if (ret < 0) { ret = -EIO; goto release_irq; } reg = sdio_readb(func, card->reg->card_misc_cfg, &ret); if (ret < 0) { ret = -EIO; goto release_irq; } sdio_writeb(func, reg | 0x10, card->reg->card_misc_cfg, &ret); if (ret < 0) { ret = -EIO; goto release_irq; } } sdio_set_drvdata(func, card); sdio_release_host(func); return 0; release_irq: sdio_release_irq(func); disable_func: sdio_disable_func(func); release_host: sdio_release_host(func); failed: return ret; } static int btmrvl_sdio_unregister_dev(struct btmrvl_sdio_card *card) { if (card && card->func) { sdio_claim_host(card->func); sdio_release_irq(card->func); sdio_disable_func(card->func); sdio_release_host(card->func); sdio_set_drvdata(card->func, NULL); } return 0; } static int btmrvl_sdio_enable_host_int(struct btmrvl_sdio_card *card) { int ret; if (!card || !card->func) return -EINVAL; sdio_claim_host(card->func); ret = btmrvl_sdio_enable_host_int_mask(card, HIM_ENABLE); btmrvl_sdio_get_rx_unit(card); sdio_release_host(card->func); return ret; } static int btmrvl_sdio_disable_host_int(struct btmrvl_sdio_card *card) { int ret; if (!card || !card->func) return -EINVAL; sdio_claim_host(card->func); ret = btmrvl_sdio_disable_host_int_mask(card, HIM_DISABLE); sdio_release_host(card->func); return ret; } static int btmrvl_sdio_host_to_card(struct btmrvl_private *priv, u8 *payload, u16 nb) { struct btmrvl_sdio_card *card = priv->btmrvl_dev.card; int ret = 0; int blksz; int i = 0; u8 *buf = NULL; void *tmpbuf = NULL; int tmpbufsz; if (!card || !card->func) { BT_ERR("card or function is NULL!"); return -EINVAL; } blksz = DIV_ROUND_UP(nb, SDIO_BLOCK_SIZE) * SDIO_BLOCK_SIZE; buf = payload; if ((unsigned long) payload & (BTSDIO_DMA_ALIGN - 1) || nb < blksz) { tmpbufsz = ALIGN_SZ(blksz, BTSDIO_DMA_ALIGN) + BTSDIO_DMA_ALIGN; tmpbuf = kzalloc(tmpbufsz, GFP_KERNEL); if (!tmpbuf) return -ENOMEM; buf = (u8 *) ALIGN_ADDR(tmpbuf, BTSDIO_DMA_ALIGN); memcpy(buf, payload, nb); } sdio_claim_host(card->func); do { /* Transfer data to card */ ret = sdio_writesb(card->func, card->ioport, buf, blksz); if (ret < 0) { i++; BT_ERR("i=%d writesb failed: %d", i, ret); BT_ERR("hex: %*ph", nb, payload); ret = -EIO; if (i > MAX_WRITE_IOMEM_RETRY) goto exit; } } while (ret); priv->btmrvl_dev.tx_dnld_rdy = false; exit: sdio_release_host(card->func); kfree(tmpbuf); return ret; } static int btmrvl_sdio_download_fw(struct btmrvl_sdio_card *card) { int ret; u8 fws0; int pollnum = MAX_POLL_TRIES; if (!card || !card->func) { BT_ERR("card or function is NULL!"); return -EINVAL; } if (!btmrvl_sdio_verify_fw_download(card, 1)) { BT_DBG("Firmware already downloaded!"); return 0; } sdio_claim_host(card->func); /* Check if other function driver is downloading the firmware */ fws0 = sdio_readb(card->func, card->reg->card_fw_status0, &ret); if (ret) { BT_ERR("Failed to read FW downloading status!"); ret = -EIO; goto done; } if (fws0) { BT_DBG("BT not the winner (%#x). Skip FW downloading", fws0); /* Give other function more time to download the firmware */ pollnum *= 10; } else { if (card->helper) { ret = btmrvl_sdio_download_helper(card); if (ret) { BT_ERR("Failed to download helper!"); ret = -EIO; goto done; } } if (btmrvl_sdio_download_fw_w_helper(card)) { BT_ERR("Failed to download firmware!"); ret = -EIO; goto done; } } /* * winner or not, with this test the FW synchronizes when the * module can continue its initialization */ if (btmrvl_sdio_verify_fw_download(card, pollnum)) { BT_ERR("FW failed to be active in time!"); ret = -ETIMEDOUT; goto done; } sdio_release_host(card->func); return 0; done: sdio_release_host(card->func); return ret; } static int btmrvl_sdio_wakeup_fw(struct btmrvl_private *priv) { struct btmrvl_sdio_card *card = priv->btmrvl_dev.card; int ret = 0; if (!card || !card->func) { BT_ERR("card or function is NULL!"); return -EINVAL; } sdio_claim_host(card->func); sdio_writeb(card->func, HOST_POWER_UP, card->reg->cfg, &ret); sdio_release_host(card->func); BT_DBG("wake up firmware"); return ret; } static void btmrvl_sdio_dump_regs(struct btmrvl_private *priv) { struct btmrvl_sdio_card *card = priv->btmrvl_dev.card; int ret = 0; unsigned int reg, reg_start, reg_end; char buf[256], *ptr; u8 loop, func, data; int MAX_LOOP = 2; btmrvl_sdio_wakeup_fw(priv); sdio_claim_host(card->func); for (loop = 0; loop < MAX_LOOP; loop++) { memset(buf, 0, sizeof(buf)); ptr = buf; if (loop == 0) { /* Read the registers of SDIO function0 */ func = loop; reg_start = 0; reg_end = 9; } else { func = 2; reg_start = 0; reg_end = 0x09; } ptr += sprintf(ptr, "SDIO Func%d (%#x-%#x): ", func, reg_start, reg_end); for (reg = reg_start; reg <= reg_end; reg++) { if (func == 0) data = sdio_f0_readb(card->func, reg, &ret); else data = sdio_readb(card->func, reg, &ret); if (!ret) { ptr += sprintf(ptr, "%02x ", data); } else { ptr += sprintf(ptr, "ERR"); break; } } BT_INFO("%s", buf); } sdio_release_host(card->func); } /* This function read/write firmware */ static enum rdwr_status btmrvl_sdio_rdwr_firmware(struct btmrvl_private *priv, u8 doneflag) { struct btmrvl_sdio_card *card = priv->btmrvl_dev.card; int ret, tries; u8 ctrl_data = 0; sdio_writeb(card->func, FW_DUMP_HOST_READY, card->reg->fw_dump_ctrl, &ret); if (ret) { BT_ERR("SDIO write err"); return RDWR_STATUS_FAILURE; } for (tries = 0; tries < MAX_POLL_TRIES; tries++) { ctrl_data = sdio_readb(card->func, card->reg->fw_dump_ctrl, &ret); if (ret) { BT_ERR("SDIO read err"); return RDWR_STATUS_FAILURE; } if (ctrl_data == FW_DUMP_DONE) break; if (doneflag && ctrl_data == doneflag) return RDWR_STATUS_DONE; if (ctrl_data != FW_DUMP_HOST_READY) { BT_INFO("The ctrl reg was changed, re-try again!"); sdio_writeb(card->func, FW_DUMP_HOST_READY, card->reg->fw_dump_ctrl, &ret); if (ret) { BT_ERR("SDIO write err"); return RDWR_STATUS_FAILURE; } } usleep_range(100, 200); } if (ctrl_data == FW_DUMP_HOST_READY) { BT_ERR("Fail to pull ctrl_data"); return RDWR_STATUS_FAILURE; } return RDWR_STATUS_SUCCESS; } /* This function dump sdio register and memory data */ static void btmrvl_sdio_coredump(struct device *dev) { struct sdio_func *func = dev_to_sdio_func(dev); struct btmrvl_sdio_card *card; struct btmrvl_private *priv; int ret = 0; unsigned int reg, reg_start, reg_end; enum rdwr_status stat; u8 *dbg_ptr, *end_ptr, *fw_dump_data, *fw_dump_ptr; u8 dump_num = 0, idx, i, read_reg, doneflag = 0; u32 memory_size, fw_dump_len = 0; int size = 0; card = sdio_get_drvdata(func); priv = card->priv; /* dump sdio register first */ btmrvl_sdio_dump_regs(priv); if (!card->supports_fw_dump) { BT_ERR("Firmware dump not supported for this card!"); return; } for (idx = 0; idx < ARRAY_SIZE(mem_type_mapping_tbl); idx++) { struct memory_type_mapping *entry = &mem_type_mapping_tbl[idx]; if (entry->mem_ptr) { vfree(entry->mem_ptr); entry->mem_ptr = NULL; } entry->mem_size = 0; } btmrvl_sdio_wakeup_fw(priv); sdio_claim_host(card->func); BT_INFO("== btmrvl firmware dump start =="); stat = btmrvl_sdio_rdwr_firmware(priv, doneflag); if (stat == RDWR_STATUS_FAILURE) goto done; reg = card->reg->fw_dump_start; /* Read the number of the memories which will dump */ dump_num = sdio_readb(card->func, reg, &ret); if (ret) { BT_ERR("SDIO read memory length err"); goto done; } /* Read the length of every memory which will dump */ for (idx = 0; idx < dump_num; idx++) { struct memory_type_mapping *entry = &mem_type_mapping_tbl[idx]; stat = btmrvl_sdio_rdwr_firmware(priv, doneflag); if (stat == RDWR_STATUS_FAILURE) goto done; memory_size = 0; reg = card->reg->fw_dump_start; for (i = 0; i < 4; i++) { read_reg = sdio_readb(card->func, reg, &ret); if (ret) { BT_ERR("SDIO read err"); goto done; } memory_size |= (read_reg << i*8); reg++; } if (memory_size == 0) { BT_INFO("Firmware dump finished!"); sdio_writeb(card->func, FW_DUMP_READ_DONE, card->reg->fw_dump_ctrl, &ret); if (ret) { BT_ERR("SDIO Write MEMDUMP_FINISH ERR"); goto done; } break; } BT_INFO("%s_SIZE=0x%x", entry->mem_name, memory_size); entry->mem_ptr = vzalloc(memory_size + 1); entry->mem_size = memory_size; if (!entry->mem_ptr) { BT_ERR("Vzalloc %s failed", entry->mem_name); goto done; } fw_dump_len += (strlen("========Start dump ") + strlen(entry->mem_name) + strlen("========\n") + (memory_size + 1) + strlen("\n========End dump========\n")); dbg_ptr = entry->mem_ptr; end_ptr = dbg_ptr + memory_size; doneflag = entry->done_flag; BT_INFO("Start %s output, please wait...", entry->mem_name); do { stat = btmrvl_sdio_rdwr_firmware(priv, doneflag); if (stat == RDWR_STATUS_FAILURE) goto done; reg_start = card->reg->fw_dump_start; reg_end = card->reg->fw_dump_end; for (reg = reg_start; reg <= reg_end; reg++) { *dbg_ptr = sdio_readb(card->func, reg, &ret); if (ret) { BT_ERR("SDIO read err"); goto done; } if (dbg_ptr < end_ptr) dbg_ptr++; else BT_ERR("Allocated buffer not enough"); } if (stat == RDWR_STATUS_DONE) { BT_INFO("%s done: size=0x%tx", entry->mem_name, dbg_ptr - entry->mem_ptr); break; } } while (1); } BT_INFO("== btmrvl firmware dump end =="); done: sdio_release_host(card->func); if (fw_dump_len == 0) return; fw_dump_data = vzalloc(fw_dump_len + 1); if (!fw_dump_data) { BT_ERR("Vzalloc fw_dump_data fail!"); return; } fw_dump_ptr = fw_dump_data; /* Dump all the memory data into single file, a userspace script will * be used to split all the memory data to multiple files */ BT_INFO("== btmrvl firmware dump to /sys/class/devcoredump start"); for (idx = 0; idx < dump_num; idx++) { struct memory_type_mapping *entry = &mem_type_mapping_tbl[idx]; if (entry->mem_ptr) { size += scnprintf(fw_dump_ptr + size, fw_dump_len + 1 - size, "========Start dump %s========\n", entry->mem_name); memcpy(fw_dump_ptr + size, entry->mem_ptr, entry->mem_size); size += entry->mem_size; size += scnprintf(fw_dump_ptr + size, fw_dump_len + 1 - size, "\n========End dump========\n"); vfree(mem_type_mapping_tbl[idx].mem_ptr); mem_type_mapping_tbl[idx].mem_ptr = NULL; } } /* fw_dump_data will be free in device coredump release function * after 5 min */ dev_coredumpv(&card->func->dev, fw_dump_data, fw_dump_len, GFP_KERNEL); BT_INFO("== btmrvl firmware dump to /sys/class/devcoredump end"); } static int btmrvl_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id) { int ret = 0; struct btmrvl_private *priv = NULL; struct btmrvl_sdio_card *card = NULL; BT_INFO("vendor=0x%x, device=0x%x, class=%d, fn=%d", id->vendor, id->device, id->class, func->num); card = devm_kzalloc(&func->dev, sizeof(*card), GFP_KERNEL); if (!card) return -ENOMEM; card->func = func; if (id->driver_data) { struct btmrvl_sdio_device *data = (void *) id->driver_data; card->helper = data->helper; card->firmware = data->firmware; card->reg = data->reg; card->sd_blksz_fw_dl = data->sd_blksz_fw_dl; card->support_pscan_win_report = data->support_pscan_win_report; card->supports_fw_dump = data->supports_fw_dump; } if (btmrvl_sdio_register_dev(card) < 0) { BT_ERR("Failed to register BT device!"); return -ENODEV; } /* Disable the interrupts on the card */ btmrvl_sdio_disable_host_int(card); if (btmrvl_sdio_download_fw(card)) { BT_ERR("Downloading firmware failed!"); ret = -ENODEV; goto unreg_dev; } btmrvl_sdio_enable_host_int(card); /* Device tree node parsing and platform specific configuration*/ btmrvl_sdio_probe_of(&func->dev, card); priv = btmrvl_add_card(card); if (!priv) { BT_ERR("Initializing card failed!"); ret = -ENODEV; goto disable_host_int; } card->priv = priv; /* Initialize the interface specific function pointers */ priv->hw_host_to_card = btmrvl_sdio_host_to_card; priv->hw_wakeup_firmware = btmrvl_sdio_wakeup_fw; priv->hw_process_int_status = btmrvl_sdio_process_int_status; if (btmrvl_register_hdev(priv)) { BT_ERR("Register hdev failed!"); ret = -ENODEV; goto disable_host_int; } return 0; disable_host_int: btmrvl_sdio_disable_host_int(card); unreg_dev: btmrvl_sdio_unregister_dev(card); return ret; } static void btmrvl_sdio_remove(struct sdio_func *func) { struct btmrvl_sdio_card *card; if (func) { card = sdio_get_drvdata(func); if (card) { /* Send SHUTDOWN command & disable interrupt * if user removes the module. */ if (user_rmmod) { btmrvl_send_module_cfg_cmd(card->priv, MODULE_SHUTDOWN_REQ); btmrvl_sdio_disable_host_int(card); } BT_DBG("unregister dev"); card->priv->surprise_removed = true; btmrvl_sdio_unregister_dev(card); btmrvl_remove_card(card->priv); } } } static int btmrvl_sdio_suspend(struct device *dev) { struct sdio_func *func = dev_to_sdio_func(dev); struct btmrvl_sdio_card *card; struct btmrvl_private *priv; mmc_pm_flag_t pm_flags; struct hci_dev *hcidev; if (func) { pm_flags = sdio_get_host_pm_caps(func); BT_DBG("%s: suspend: PM flags = 0x%x", sdio_func_id(func), pm_flags); if (!(pm_flags & MMC_PM_KEEP_POWER)) { BT_ERR("%s: cannot remain alive while suspended", sdio_func_id(func)); return -ENOSYS; } card = sdio_get_drvdata(func); if (!card || !card->priv) { BT_ERR("card or priv structure is not valid"); return 0; } } else { BT_ERR("sdio_func is not specified"); return 0; } /* Enable platform specific wakeup interrupt */ if (card->plt_wake_cfg && card->plt_wake_cfg->irq_bt >= 0 && device_may_wakeup(dev)) { card->plt_wake_cfg->wake_by_bt = false; enable_irq(card->plt_wake_cfg->irq_bt); enable_irq_wake(card->plt_wake_cfg->irq_bt); } priv = card->priv; priv->adapter->is_suspending = true; hcidev = priv->btmrvl_dev.hcidev; BT_DBG("%s: SDIO suspend", hcidev->name); hci_suspend_dev(hcidev); if (priv->adapter->hs_state != HS_ACTIVATED) { if (btmrvl_enable_hs(priv)) { BT_ERR("HS not activated, suspend failed!"); /* Disable platform specific wakeup interrupt */ if (card->plt_wake_cfg && card->plt_wake_cfg->irq_bt >= 0 && device_may_wakeup(dev)) { disable_irq_wake(card->plt_wake_cfg->irq_bt); disable_irq(card->plt_wake_cfg->irq_bt); } priv->adapter->is_suspending = false; return -EBUSY; } } priv->adapter->is_suspending = false; priv->adapter->is_suspended = true; /* We will keep the power when hs enabled successfully */ if (priv->adapter->hs_state == HS_ACTIVATED) { BT_DBG("suspend with MMC_PM_KEEP_POWER"); return sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); } BT_DBG("suspend without MMC_PM_KEEP_POWER"); return 0; } static int btmrvl_sdio_resume(struct device *dev) { struct sdio_func *func = dev_to_sdio_func(dev); struct btmrvl_sdio_card *card; struct btmrvl_private *priv; mmc_pm_flag_t pm_flags; struct hci_dev *hcidev; if (func) { pm_flags = sdio_get_host_pm_caps(func); BT_DBG("%s: resume: PM flags = 0x%x", sdio_func_id(func), pm_flags); card = sdio_get_drvdata(func); if (!card || !card->priv) { BT_ERR("card or priv structure is not valid"); return 0; } } else { BT_ERR("sdio_func is not specified"); return 0; } priv = card->priv; if (!priv->adapter->is_suspended) { BT_DBG("device already resumed"); return 0; } priv->hw_wakeup_firmware(priv); priv->adapter->hs_state = HS_DEACTIVATED; hcidev = priv->btmrvl_dev.hcidev; BT_DBG("%s: HS DEACTIVATED in resume!", hcidev->name); priv->adapter->is_suspended = false; BT_DBG("%s: SDIO resume", hcidev->name); hci_resume_dev(hcidev); /* Disable platform specific wakeup interrupt */ if (card->plt_wake_cfg && card->plt_wake_cfg->irq_bt >= 0 && device_may_wakeup(dev)) { disable_irq_wake(card->plt_wake_cfg->irq_bt); disable_irq(card->plt_wake_cfg->irq_bt); if (card->plt_wake_cfg->wake_by_bt) /* Undo our disable, since interrupt handler already * did this. */ enable_irq(card->plt_wake_cfg->irq_bt); } return 0; } static const struct dev_pm_ops btmrvl_sdio_pm_ops = { .suspend = btmrvl_sdio_suspend, .resume = btmrvl_sdio_resume, }; static struct sdio_driver bt_mrvl_sdio = { .name = "btmrvl_sdio", .id_table = btmrvl_sdio_ids, .probe = btmrvl_sdio_probe, .remove = btmrvl_sdio_remove, .drv = { .owner = THIS_MODULE, .coredump = btmrvl_sdio_coredump, .pm = &btmrvl_sdio_pm_ops, } }; static int __init btmrvl_sdio_init_module(void) { if (sdio_register_driver(&bt_mrvl_sdio) != 0) { BT_ERR("SDIO Driver Registration Failed"); return -ENODEV; } /* Clear the flag in case user removes the card. */ user_rmmod = 0; return 0; } static void __exit btmrvl_sdio_exit_module(void) { /* Set the flag as user is removing this module. */ user_rmmod = 1; sdio_unregister_driver(&bt_mrvl_sdio); } module_init(btmrvl_sdio_init_module); module_exit(btmrvl_sdio_exit_module); MODULE_AUTHOR("Marvell International Ltd."); MODULE_DESCRIPTION("Marvell BT-over-SDIO driver ver " VERSION); MODULE_VERSION(VERSION); MODULE_LICENSE("GPL v2"); MODULE_FIRMWARE("mrvl/sd8688_helper.bin"); MODULE_FIRMWARE("mrvl/sd8688.bin"); MODULE_FIRMWARE("mrvl/sd8787_uapsta.bin"); MODULE_FIRMWARE("mrvl/sd8797_uapsta.bin"); MODULE_FIRMWARE("mrvl/sd8887_uapsta.bin"); MODULE_FIRMWARE("mrvl/sd8897_uapsta.bin"); MODULE_FIRMWARE("mrvl/sdsd8977_combo_v2.bin"); MODULE_FIRMWARE("mrvl/sd8987_uapsta.bin"); MODULE_FIRMWARE("mrvl/sdsd8997_combo_v4.bin");
linux-master
drivers/bluetooth/btmrvl_sdio.c
// SPDX-License-Identifier: GPL-2.0-only /* binder_alloc.c * * Android IPC Subsystem * * Copyright (C) 2007-2017 Google, Inc. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/list.h> #include <linux/sched/mm.h> #include <linux/module.h> #include <linux/rtmutex.h> #include <linux/rbtree.h> #include <linux/seq_file.h> #include <linux/vmalloc.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/list_lru.h> #include <linux/ratelimit.h> #include <asm/cacheflush.h> #include <linux/uaccess.h> #include <linux/highmem.h> #include <linux/sizes.h> #include "binder_alloc.h" #include "binder_trace.h" struct list_lru binder_alloc_lru; static DEFINE_MUTEX(binder_alloc_mmap_lock); enum { BINDER_DEBUG_USER_ERROR = 1U << 0, BINDER_DEBUG_OPEN_CLOSE = 1U << 1, BINDER_DEBUG_BUFFER_ALLOC = 1U << 2, BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 3, }; static uint32_t binder_alloc_debug_mask = BINDER_DEBUG_USER_ERROR; module_param_named(debug_mask, binder_alloc_debug_mask, uint, 0644); #define binder_alloc_debug(mask, x...) \ do { \ if (binder_alloc_debug_mask & mask) \ pr_info_ratelimited(x); \ } while (0) static struct binder_buffer *binder_buffer_next(struct binder_buffer *buffer) { return list_entry(buffer->entry.next, struct binder_buffer, entry); } static struct binder_buffer *binder_buffer_prev(struct binder_buffer *buffer) { return list_entry(buffer->entry.prev, struct binder_buffer, entry); } static size_t binder_alloc_buffer_size(struct binder_alloc *alloc, struct binder_buffer *buffer) { if (list_is_last(&buffer->entry, &alloc->buffers)) return alloc->buffer + alloc->buffer_size - buffer->user_data; return binder_buffer_next(buffer)->user_data - buffer->user_data; } static void binder_insert_free_buffer(struct binder_alloc *alloc, struct binder_buffer *new_buffer) { struct rb_node **p = &alloc->free_buffers.rb_node; struct rb_node *parent = NULL; struct binder_buffer *buffer; size_t buffer_size; size_t new_buffer_size; BUG_ON(!new_buffer->free); new_buffer_size = binder_alloc_buffer_size(alloc, new_buffer); binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, "%d: add free buffer, size %zd, at %pK\n", alloc->pid, new_buffer_size, new_buffer); while (*p) { parent = *p; buffer = rb_entry(parent, struct binder_buffer, rb_node); BUG_ON(!buffer->free); buffer_size = binder_alloc_buffer_size(alloc, buffer); if (new_buffer_size < buffer_size) p = &parent->rb_left; else p = &parent->rb_right; } rb_link_node(&new_buffer->rb_node, parent, p); rb_insert_color(&new_buffer->rb_node, &alloc->free_buffers); } static void binder_insert_allocated_buffer_locked( struct binder_alloc *alloc, struct binder_buffer *new_buffer) { struct rb_node **p = &alloc->allocated_buffers.rb_node; struct rb_node *parent = NULL; struct binder_buffer *buffer; BUG_ON(new_buffer->free); while (*p) { parent = *p; buffer = rb_entry(parent, struct binder_buffer, rb_node); BUG_ON(buffer->free); if (new_buffer->user_data < buffer->user_data) p = &parent->rb_left; else if (new_buffer->user_data > buffer->user_data) p = &parent->rb_right; else BUG(); } rb_link_node(&new_buffer->rb_node, parent, p); rb_insert_color(&new_buffer->rb_node, &alloc->allocated_buffers); } static struct binder_buffer *binder_alloc_prepare_to_free_locked( struct binder_alloc *alloc, uintptr_t user_ptr) { struct rb_node *n = alloc->allocated_buffers.rb_node; struct binder_buffer *buffer; void __user *uptr; uptr = (void __user *)user_ptr; while (n) { buffer = rb_entry(n, struct binder_buffer, rb_node); BUG_ON(buffer->free); if (uptr < buffer->user_data) n = n->rb_left; else if (uptr > buffer->user_data) n = n->rb_right; else { /* * Guard against user threads attempting to * free the buffer when in use by kernel or * after it's already been freed. */ if (!buffer->allow_user_free) return ERR_PTR(-EPERM); buffer->allow_user_free = 0; return buffer; } } return NULL; } /** * binder_alloc_prepare_to_free() - get buffer given user ptr * @alloc: binder_alloc for this proc * @user_ptr: User pointer to buffer data * * Validate userspace pointer to buffer data and return buffer corresponding to * that user pointer. Search the rb tree for buffer that matches user data * pointer. * * Return: Pointer to buffer or NULL */ struct binder_buffer *binder_alloc_prepare_to_free(struct binder_alloc *alloc, uintptr_t user_ptr) { struct binder_buffer *buffer; mutex_lock(&alloc->mutex); buffer = binder_alloc_prepare_to_free_locked(alloc, user_ptr); mutex_unlock(&alloc->mutex); return buffer; } static int binder_update_page_range(struct binder_alloc *alloc, int allocate, void __user *start, void __user *end) { void __user *page_addr; unsigned long user_page_addr; struct binder_lru_page *page; struct vm_area_struct *vma = NULL; struct mm_struct *mm = NULL; bool need_mm = false; binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, "%d: %s pages %pK-%pK\n", alloc->pid, allocate ? "allocate" : "free", start, end); if (end <= start) return 0; trace_binder_update_page_range(alloc, allocate, start, end); if (allocate == 0) goto free_range; for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { page = &alloc->pages[(page_addr - alloc->buffer) / PAGE_SIZE]; if (!page->page_ptr) { need_mm = true; break; } } if (need_mm && mmget_not_zero(alloc->mm)) mm = alloc->mm; if (mm) { mmap_write_lock(mm); vma = alloc->vma; } if (!vma && need_mm) { binder_alloc_debug(BINDER_DEBUG_USER_ERROR, "%d: binder_alloc_buf failed to map pages in userspace, no vma\n", alloc->pid); goto err_no_vma; } for (page_addr = start; page_addr < end; page_addr += PAGE_SIZE) { int ret; bool on_lru; size_t index; index = (page_addr - alloc->buffer) / PAGE_SIZE; page = &alloc->pages[index]; if (page->page_ptr) { trace_binder_alloc_lru_start(alloc, index); on_lru = list_lru_del(&binder_alloc_lru, &page->lru); WARN_ON(!on_lru); trace_binder_alloc_lru_end(alloc, index); continue; } if (WARN_ON(!vma)) goto err_page_ptr_cleared; trace_binder_alloc_page_start(alloc, index); page->page_ptr = alloc_page(GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO); if (!page->page_ptr) { pr_err("%d: binder_alloc_buf failed for page at %pK\n", alloc->pid, page_addr); goto err_alloc_page_failed; } page->alloc = alloc; INIT_LIST_HEAD(&page->lru); user_page_addr = (uintptr_t)page_addr; ret = vm_insert_page(vma, user_page_addr, page[0].page_ptr); if (ret) { pr_err("%d: binder_alloc_buf failed to map page at %lx in userspace\n", alloc->pid, user_page_addr); goto err_vm_insert_page_failed; } if (index + 1 > alloc->pages_high) alloc->pages_high = index + 1; trace_binder_alloc_page_end(alloc, index); } if (mm) { mmap_write_unlock(mm); mmput(mm); } return 0; free_range: for (page_addr = end - PAGE_SIZE; 1; page_addr -= PAGE_SIZE) { bool ret; size_t index; index = (page_addr - alloc->buffer) / PAGE_SIZE; page = &alloc->pages[index]; trace_binder_free_lru_start(alloc, index); ret = list_lru_add(&binder_alloc_lru, &page->lru); WARN_ON(!ret); trace_binder_free_lru_end(alloc, index); if (page_addr == start) break; continue; err_vm_insert_page_failed: __free_page(page->page_ptr); page->page_ptr = NULL; err_alloc_page_failed: err_page_ptr_cleared: if (page_addr == start) break; } err_no_vma: if (mm) { mmap_write_unlock(mm); mmput(mm); } return vma ? -ENOMEM : -ESRCH; } static inline void binder_alloc_set_vma(struct binder_alloc *alloc, struct vm_area_struct *vma) { /* pairs with smp_load_acquire in binder_alloc_get_vma() */ smp_store_release(&alloc->vma, vma); } static inline struct vm_area_struct *binder_alloc_get_vma( struct binder_alloc *alloc) { /* pairs with smp_store_release in binder_alloc_set_vma() */ return smp_load_acquire(&alloc->vma); } static bool debug_low_async_space_locked(struct binder_alloc *alloc, int pid) { /* * Find the amount and size of buffers allocated by the current caller; * The idea is that once we cross the threshold, whoever is responsible * for the low async space is likely to try to send another async txn, * and at some point we'll catch them in the act. This is more efficient * than keeping a map per pid. */ struct rb_node *n; struct binder_buffer *buffer; size_t total_alloc_size = 0; size_t num_buffers = 0; for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n)) { buffer = rb_entry(n, struct binder_buffer, rb_node); if (buffer->pid != pid) continue; if (!buffer->async_transaction) continue; total_alloc_size += binder_alloc_buffer_size(alloc, buffer) + sizeof(struct binder_buffer); num_buffers++; } /* * Warn if this pid has more than 50 transactions, or more than 50% of * async space (which is 25% of total buffer size). Oneway spam is only * detected when the threshold is exceeded. */ if (num_buffers > 50 || total_alloc_size > alloc->buffer_size / 4) { binder_alloc_debug(BINDER_DEBUG_USER_ERROR, "%d: pid %d spamming oneway? %zd buffers allocated for a total size of %zd\n", alloc->pid, pid, num_buffers, total_alloc_size); if (!alloc->oneway_spam_detected) { alloc->oneway_spam_detected = true; return true; } } return false; } static struct binder_buffer *binder_alloc_new_buf_locked( struct binder_alloc *alloc, size_t data_size, size_t offsets_size, size_t extra_buffers_size, int is_async, int pid) { struct rb_node *n = alloc->free_buffers.rb_node; struct binder_buffer *buffer; size_t buffer_size; struct rb_node *best_fit = NULL; void __user *has_page_addr; void __user *end_page_addr; size_t size, data_offsets_size; int ret; /* Check binder_alloc is fully initialized */ if (!binder_alloc_get_vma(alloc)) { binder_alloc_debug(BINDER_DEBUG_USER_ERROR, "%d: binder_alloc_buf, no vma\n", alloc->pid); return ERR_PTR(-ESRCH); } data_offsets_size = ALIGN(data_size, sizeof(void *)) + ALIGN(offsets_size, sizeof(void *)); if (data_offsets_size < data_size || data_offsets_size < offsets_size) { binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, "%d: got transaction with invalid size %zd-%zd\n", alloc->pid, data_size, offsets_size); return ERR_PTR(-EINVAL); } size = data_offsets_size + ALIGN(extra_buffers_size, sizeof(void *)); if (size < data_offsets_size || size < extra_buffers_size) { binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, "%d: got transaction with invalid extra_buffers_size %zd\n", alloc->pid, extra_buffers_size); return ERR_PTR(-EINVAL); } if (is_async && alloc->free_async_space < size + sizeof(struct binder_buffer)) { binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, "%d: binder_alloc_buf size %zd failed, no async space left\n", alloc->pid, size); return ERR_PTR(-ENOSPC); } /* Pad 0-size buffers so they get assigned unique addresses */ size = max(size, sizeof(void *)); while (n) { buffer = rb_entry(n, struct binder_buffer, rb_node); BUG_ON(!buffer->free); buffer_size = binder_alloc_buffer_size(alloc, buffer); if (size < buffer_size) { best_fit = n; n = n->rb_left; } else if (size > buffer_size) n = n->rb_right; else { best_fit = n; break; } } if (best_fit == NULL) { size_t allocated_buffers = 0; size_t largest_alloc_size = 0; size_t total_alloc_size = 0; size_t free_buffers = 0; size_t largest_free_size = 0; size_t total_free_size = 0; for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n)) { buffer = rb_entry(n, struct binder_buffer, rb_node); buffer_size = binder_alloc_buffer_size(alloc, buffer); allocated_buffers++; total_alloc_size += buffer_size; if (buffer_size > largest_alloc_size) largest_alloc_size = buffer_size; } for (n = rb_first(&alloc->free_buffers); n != NULL; n = rb_next(n)) { buffer = rb_entry(n, struct binder_buffer, rb_node); buffer_size = binder_alloc_buffer_size(alloc, buffer); free_buffers++; total_free_size += buffer_size; if (buffer_size > largest_free_size) largest_free_size = buffer_size; } binder_alloc_debug(BINDER_DEBUG_USER_ERROR, "%d: binder_alloc_buf size %zd failed, no address space\n", alloc->pid, size); binder_alloc_debug(BINDER_DEBUG_USER_ERROR, "allocated: %zd (num: %zd largest: %zd), free: %zd (num: %zd largest: %zd)\n", total_alloc_size, allocated_buffers, largest_alloc_size, total_free_size, free_buffers, largest_free_size); return ERR_PTR(-ENOSPC); } if (n == NULL) { buffer = rb_entry(best_fit, struct binder_buffer, rb_node); buffer_size = binder_alloc_buffer_size(alloc, buffer); } binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, "%d: binder_alloc_buf size %zd got buffer %pK size %zd\n", alloc->pid, size, buffer, buffer_size); has_page_addr = (void __user *) (((uintptr_t)buffer->user_data + buffer_size) & PAGE_MASK); WARN_ON(n && buffer_size != size); end_page_addr = (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data + size); if (end_page_addr > has_page_addr) end_page_addr = has_page_addr; ret = binder_update_page_range(alloc, 1, (void __user *) PAGE_ALIGN((uintptr_t)buffer->user_data), end_page_addr); if (ret) return ERR_PTR(ret); if (buffer_size != size) { struct binder_buffer *new_buffer; new_buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); if (!new_buffer) { pr_err("%s: %d failed to alloc new buffer struct\n", __func__, alloc->pid); goto err_alloc_buf_struct_failed; } new_buffer->user_data = (u8 __user *)buffer->user_data + size; list_add(&new_buffer->entry, &buffer->entry); new_buffer->free = 1; binder_insert_free_buffer(alloc, new_buffer); } rb_erase(best_fit, &alloc->free_buffers); buffer->free = 0; buffer->allow_user_free = 0; binder_insert_allocated_buffer_locked(alloc, buffer); binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, "%d: binder_alloc_buf size %zd got %pK\n", alloc->pid, size, buffer); buffer->data_size = data_size; buffer->offsets_size = offsets_size; buffer->async_transaction = is_async; buffer->extra_buffers_size = extra_buffers_size; buffer->pid = pid; buffer->oneway_spam_suspect = false; if (is_async) { alloc->free_async_space -= size + sizeof(struct binder_buffer); binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, "%d: binder_alloc_buf size %zd async free %zd\n", alloc->pid, size, alloc->free_async_space); if (alloc->free_async_space < alloc->buffer_size / 10) { /* * Start detecting spammers once we have less than 20% * of async space left (which is less than 10% of total * buffer size). */ buffer->oneway_spam_suspect = debug_low_async_space_locked(alloc, pid); } else { alloc->oneway_spam_detected = false; } } return buffer; err_alloc_buf_struct_failed: binder_update_page_range(alloc, 0, (void __user *) PAGE_ALIGN((uintptr_t)buffer->user_data), end_page_addr); return ERR_PTR(-ENOMEM); } /** * binder_alloc_new_buf() - Allocate a new binder buffer * @alloc: binder_alloc for this proc * @data_size: size of user data buffer * @offsets_size: user specified buffer offset * @extra_buffers_size: size of extra space for meta-data (eg, security context) * @is_async: buffer for async transaction * @pid: pid to attribute allocation to (used for debugging) * * Allocate a new buffer given the requested sizes. Returns * the kernel version of the buffer pointer. The size allocated * is the sum of the three given sizes (each rounded up to * pointer-sized boundary) * * Return: The allocated buffer or %NULL if error */ struct binder_buffer *binder_alloc_new_buf(struct binder_alloc *alloc, size_t data_size, size_t offsets_size, size_t extra_buffers_size, int is_async, int pid) { struct binder_buffer *buffer; mutex_lock(&alloc->mutex); buffer = binder_alloc_new_buf_locked(alloc, data_size, offsets_size, extra_buffers_size, is_async, pid); mutex_unlock(&alloc->mutex); return buffer; } static void __user *buffer_start_page(struct binder_buffer *buffer) { return (void __user *)((uintptr_t)buffer->user_data & PAGE_MASK); } static void __user *prev_buffer_end_page(struct binder_buffer *buffer) { return (void __user *) (((uintptr_t)(buffer->user_data) - 1) & PAGE_MASK); } static void binder_delete_free_buffer(struct binder_alloc *alloc, struct binder_buffer *buffer) { struct binder_buffer *prev, *next = NULL; bool to_free = true; BUG_ON(alloc->buffers.next == &buffer->entry); prev = binder_buffer_prev(buffer); BUG_ON(!prev->free); if (prev_buffer_end_page(prev) == buffer_start_page(buffer)) { to_free = false; binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, "%d: merge free, buffer %pK share page with %pK\n", alloc->pid, buffer->user_data, prev->user_data); } if (!list_is_last(&buffer->entry, &alloc->buffers)) { next = binder_buffer_next(buffer); if (buffer_start_page(next) == buffer_start_page(buffer)) { to_free = false; binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, "%d: merge free, buffer %pK share page with %pK\n", alloc->pid, buffer->user_data, next->user_data); } } if (PAGE_ALIGNED(buffer->user_data)) { binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, "%d: merge free, buffer start %pK is page aligned\n", alloc->pid, buffer->user_data); to_free = false; } if (to_free) { binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, "%d: merge free, buffer %pK do not share page with %pK or %pK\n", alloc->pid, buffer->user_data, prev->user_data, next ? next->user_data : NULL); binder_update_page_range(alloc, 0, buffer_start_page(buffer), buffer_start_page(buffer) + PAGE_SIZE); } list_del(&buffer->entry); kfree(buffer); } static void binder_free_buf_locked(struct binder_alloc *alloc, struct binder_buffer *buffer) { size_t size, buffer_size; buffer_size = binder_alloc_buffer_size(alloc, buffer); size = ALIGN(buffer->data_size, sizeof(void *)) + ALIGN(buffer->offsets_size, sizeof(void *)) + ALIGN(buffer->extra_buffers_size, sizeof(void *)); binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, "%d: binder_free_buf %pK size %zd buffer_size %zd\n", alloc->pid, buffer, size, buffer_size); BUG_ON(buffer->free); BUG_ON(size > buffer_size); BUG_ON(buffer->transaction != NULL); BUG_ON(buffer->user_data < alloc->buffer); BUG_ON(buffer->user_data > alloc->buffer + alloc->buffer_size); if (buffer->async_transaction) { alloc->free_async_space += buffer_size + sizeof(struct binder_buffer); binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC_ASYNC, "%d: binder_free_buf size %zd async free %zd\n", alloc->pid, size, alloc->free_async_space); } binder_update_page_range(alloc, 0, (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data), (void __user *)(((uintptr_t) buffer->user_data + buffer_size) & PAGE_MASK)); rb_erase(&buffer->rb_node, &alloc->allocated_buffers); buffer->free = 1; if (!list_is_last(&buffer->entry, &alloc->buffers)) { struct binder_buffer *next = binder_buffer_next(buffer); if (next->free) { rb_erase(&next->rb_node, &alloc->free_buffers); binder_delete_free_buffer(alloc, next); } } if (alloc->buffers.next != &buffer->entry) { struct binder_buffer *prev = binder_buffer_prev(buffer); if (prev->free) { binder_delete_free_buffer(alloc, buffer); rb_erase(&prev->rb_node, &alloc->free_buffers); buffer = prev; } } binder_insert_free_buffer(alloc, buffer); } static void binder_alloc_clear_buf(struct binder_alloc *alloc, struct binder_buffer *buffer); /** * binder_alloc_free_buf() - free a binder buffer * @alloc: binder_alloc for this proc * @buffer: kernel pointer to buffer * * Free the buffer allocated via binder_alloc_new_buf() */ void binder_alloc_free_buf(struct binder_alloc *alloc, struct binder_buffer *buffer) { /* * We could eliminate the call to binder_alloc_clear_buf() * from binder_alloc_deferred_release() by moving this to * binder_alloc_free_buf_locked(). However, that could * increase contention for the alloc mutex if clear_on_free * is used frequently for large buffers. The mutex is not * needed for correctness here. */ if (buffer->clear_on_free) { binder_alloc_clear_buf(alloc, buffer); buffer->clear_on_free = false; } mutex_lock(&alloc->mutex); binder_free_buf_locked(alloc, buffer); mutex_unlock(&alloc->mutex); } /** * binder_alloc_mmap_handler() - map virtual address space for proc * @alloc: alloc structure for this proc * @vma: vma passed to mmap() * * Called by binder_mmap() to initialize the space specified in * vma for allocating binder buffers * * Return: * 0 = success * -EBUSY = address space already mapped * -ENOMEM = failed to map memory to given address space */ int binder_alloc_mmap_handler(struct binder_alloc *alloc, struct vm_area_struct *vma) { int ret; const char *failure_string; struct binder_buffer *buffer; if (unlikely(vma->vm_mm != alloc->mm)) { ret = -EINVAL; failure_string = "invalid vma->vm_mm"; goto err_invalid_mm; } mutex_lock(&binder_alloc_mmap_lock); if (alloc->buffer_size) { ret = -EBUSY; failure_string = "already mapped"; goto err_already_mapped; } alloc->buffer_size = min_t(unsigned long, vma->vm_end - vma->vm_start, SZ_4M); mutex_unlock(&binder_alloc_mmap_lock); alloc->buffer = (void __user *)vma->vm_start; alloc->pages = kcalloc(alloc->buffer_size / PAGE_SIZE, sizeof(alloc->pages[0]), GFP_KERNEL); if (alloc->pages == NULL) { ret = -ENOMEM; failure_string = "alloc page array"; goto err_alloc_pages_failed; } buffer = kzalloc(sizeof(*buffer), GFP_KERNEL); if (!buffer) { ret = -ENOMEM; failure_string = "alloc buffer struct"; goto err_alloc_buf_struct_failed; } buffer->user_data = alloc->buffer; list_add(&buffer->entry, &alloc->buffers); buffer->free = 1; binder_insert_free_buffer(alloc, buffer); alloc->free_async_space = alloc->buffer_size / 2; /* Signal binder_alloc is fully initialized */ binder_alloc_set_vma(alloc, vma); return 0; err_alloc_buf_struct_failed: kfree(alloc->pages); alloc->pages = NULL; err_alloc_pages_failed: alloc->buffer = NULL; mutex_lock(&binder_alloc_mmap_lock); alloc->buffer_size = 0; err_already_mapped: mutex_unlock(&binder_alloc_mmap_lock); err_invalid_mm: binder_alloc_debug(BINDER_DEBUG_USER_ERROR, "%s: %d %lx-%lx %s failed %d\n", __func__, alloc->pid, vma->vm_start, vma->vm_end, failure_string, ret); return ret; } void binder_alloc_deferred_release(struct binder_alloc *alloc) { struct rb_node *n; int buffers, page_count; struct binder_buffer *buffer; buffers = 0; mutex_lock(&alloc->mutex); BUG_ON(alloc->vma); while ((n = rb_first(&alloc->allocated_buffers))) { buffer = rb_entry(n, struct binder_buffer, rb_node); /* Transaction should already have been freed */ BUG_ON(buffer->transaction); if (buffer->clear_on_free) { binder_alloc_clear_buf(alloc, buffer); buffer->clear_on_free = false; } binder_free_buf_locked(alloc, buffer); buffers++; } while (!list_empty(&alloc->buffers)) { buffer = list_first_entry(&alloc->buffers, struct binder_buffer, entry); WARN_ON(!buffer->free); list_del(&buffer->entry); WARN_ON_ONCE(!list_empty(&alloc->buffers)); kfree(buffer); } page_count = 0; if (alloc->pages) { int i; for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { void __user *page_addr; bool on_lru; if (!alloc->pages[i].page_ptr) continue; on_lru = list_lru_del(&binder_alloc_lru, &alloc->pages[i].lru); page_addr = alloc->buffer + i * PAGE_SIZE; binder_alloc_debug(BINDER_DEBUG_BUFFER_ALLOC, "%s: %d: page %d at %pK %s\n", __func__, alloc->pid, i, page_addr, on_lru ? "on lru" : "active"); __free_page(alloc->pages[i].page_ptr); page_count++; } kfree(alloc->pages); } mutex_unlock(&alloc->mutex); if (alloc->mm) mmdrop(alloc->mm); binder_alloc_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d buffers %d, pages %d\n", __func__, alloc->pid, buffers, page_count); } static void print_binder_buffer(struct seq_file *m, const char *prefix, struct binder_buffer *buffer) { seq_printf(m, "%s %d: %pK size %zd:%zd:%zd %s\n", prefix, buffer->debug_id, buffer->user_data, buffer->data_size, buffer->offsets_size, buffer->extra_buffers_size, buffer->transaction ? "active" : "delivered"); } /** * binder_alloc_print_allocated() - print buffer info * @m: seq_file for output via seq_printf() * @alloc: binder_alloc for this proc * * Prints information about every buffer associated with * the binder_alloc state to the given seq_file */ void binder_alloc_print_allocated(struct seq_file *m, struct binder_alloc *alloc) { struct rb_node *n; mutex_lock(&alloc->mutex); for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n)) print_binder_buffer(m, " buffer", rb_entry(n, struct binder_buffer, rb_node)); mutex_unlock(&alloc->mutex); } /** * binder_alloc_print_pages() - print page usage * @m: seq_file for output via seq_printf() * @alloc: binder_alloc for this proc */ void binder_alloc_print_pages(struct seq_file *m, struct binder_alloc *alloc) { struct binder_lru_page *page; int i; int active = 0; int lru = 0; int free = 0; mutex_lock(&alloc->mutex); /* * Make sure the binder_alloc is fully initialized, otherwise we might * read inconsistent state. */ if (binder_alloc_get_vma(alloc) != NULL) { for (i = 0; i < alloc->buffer_size / PAGE_SIZE; i++) { page = &alloc->pages[i]; if (!page->page_ptr) free++; else if (list_empty(&page->lru)) active++; else lru++; } } mutex_unlock(&alloc->mutex); seq_printf(m, " pages: %d:%d:%d\n", active, lru, free); seq_printf(m, " pages high watermark: %zu\n", alloc->pages_high); } /** * binder_alloc_get_allocated_count() - return count of buffers * @alloc: binder_alloc for this proc * * Return: count of allocated buffers */ int binder_alloc_get_allocated_count(struct binder_alloc *alloc) { struct rb_node *n; int count = 0; mutex_lock(&alloc->mutex); for (n = rb_first(&alloc->allocated_buffers); n != NULL; n = rb_next(n)) count++; mutex_unlock(&alloc->mutex); return count; } /** * binder_alloc_vma_close() - invalidate address space * @alloc: binder_alloc for this proc * * Called from binder_vma_close() when releasing address space. * Clears alloc->vma to prevent new incoming transactions from * allocating more buffers. */ void binder_alloc_vma_close(struct binder_alloc *alloc) { binder_alloc_set_vma(alloc, NULL); } /** * binder_alloc_free_page() - shrinker callback to free pages * @item: item to free * @lock: lock protecting the item * @cb_arg: callback argument * * Called from list_lru_walk() in binder_shrink_scan() to free * up pages when the system is under memory pressure. */ enum lru_status binder_alloc_free_page(struct list_head *item, struct list_lru_one *lru, spinlock_t *lock, void *cb_arg) __must_hold(lock) { struct mm_struct *mm = NULL; struct binder_lru_page *page = container_of(item, struct binder_lru_page, lru); struct binder_alloc *alloc; uintptr_t page_addr; size_t index; struct vm_area_struct *vma; alloc = page->alloc; if (!mutex_trylock(&alloc->mutex)) goto err_get_alloc_mutex_failed; if (!page->page_ptr) goto err_page_already_freed; index = page - alloc->pages; page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE; mm = alloc->mm; if (!mmget_not_zero(mm)) goto err_mmget; if (!mmap_read_trylock(mm)) goto err_mmap_read_lock_failed; vma = binder_alloc_get_vma(alloc); list_lru_isolate(lru, item); spin_unlock(lock); if (vma) { trace_binder_unmap_user_start(alloc, index); zap_page_range_single(vma, page_addr, PAGE_SIZE, NULL); trace_binder_unmap_user_end(alloc, index); } mmap_read_unlock(mm); mmput_async(mm); trace_binder_unmap_kernel_start(alloc, index); __free_page(page->page_ptr); page->page_ptr = NULL; trace_binder_unmap_kernel_end(alloc, index); spin_lock(lock); mutex_unlock(&alloc->mutex); return LRU_REMOVED_RETRY; err_mmap_read_lock_failed: mmput_async(mm); err_mmget: err_page_already_freed: mutex_unlock(&alloc->mutex); err_get_alloc_mutex_failed: return LRU_SKIP; } static unsigned long binder_shrink_count(struct shrinker *shrink, struct shrink_control *sc) { return list_lru_count(&binder_alloc_lru); } static unsigned long binder_shrink_scan(struct shrinker *shrink, struct shrink_control *sc) { return list_lru_walk(&binder_alloc_lru, binder_alloc_free_page, NULL, sc->nr_to_scan); } static struct shrinker binder_shrinker = { .count_objects = binder_shrink_count, .scan_objects = binder_shrink_scan, .seeks = DEFAULT_SEEKS, }; /** * binder_alloc_init() - called by binder_open() for per-proc initialization * @alloc: binder_alloc for this proc * * Called from binder_open() to initialize binder_alloc fields for * new binder proc */ void binder_alloc_init(struct binder_alloc *alloc) { alloc->pid = current->group_leader->pid; alloc->mm = current->mm; mmgrab(alloc->mm); mutex_init(&alloc->mutex); INIT_LIST_HEAD(&alloc->buffers); } int binder_alloc_shrinker_init(void) { int ret = list_lru_init(&binder_alloc_lru); if (ret == 0) { ret = register_shrinker(&binder_shrinker, "android-binder"); if (ret) list_lru_destroy(&binder_alloc_lru); } return ret; } void binder_alloc_shrinker_exit(void) { unregister_shrinker(&binder_shrinker); list_lru_destroy(&binder_alloc_lru); } /** * check_buffer() - verify that buffer/offset is safe to access * @alloc: binder_alloc for this proc * @buffer: binder buffer to be accessed * @offset: offset into @buffer data * @bytes: bytes to access from offset * * Check that the @offset/@bytes are within the size of the given * @buffer and that the buffer is currently active and not freeable. * Offsets must also be multiples of sizeof(u32). The kernel is * allowed to touch the buffer in two cases: * * 1) when the buffer is being created: * (buffer->free == 0 && buffer->allow_user_free == 0) * 2) when the buffer is being torn down: * (buffer->free == 0 && buffer->transaction == NULL). * * Return: true if the buffer is safe to access */ static inline bool check_buffer(struct binder_alloc *alloc, struct binder_buffer *buffer, binder_size_t offset, size_t bytes) { size_t buffer_size = binder_alloc_buffer_size(alloc, buffer); return buffer_size >= bytes && offset <= buffer_size - bytes && IS_ALIGNED(offset, sizeof(u32)) && !buffer->free && (!buffer->allow_user_free || !buffer->transaction); } /** * binder_alloc_get_page() - get kernel pointer for given buffer offset * @alloc: binder_alloc for this proc * @buffer: binder buffer to be accessed * @buffer_offset: offset into @buffer data * @pgoffp: address to copy final page offset to * * Lookup the struct page corresponding to the address * at @buffer_offset into @buffer->user_data. If @pgoffp is not * NULL, the byte-offset into the page is written there. * * The caller is responsible to ensure that the offset points * to a valid address within the @buffer and that @buffer is * not freeable by the user. Since it can't be freed, we are * guaranteed that the corresponding elements of @alloc->pages[] * cannot change. * * Return: struct page */ static struct page *binder_alloc_get_page(struct binder_alloc *alloc, struct binder_buffer *buffer, binder_size_t buffer_offset, pgoff_t *pgoffp) { binder_size_t buffer_space_offset = buffer_offset + (buffer->user_data - alloc->buffer); pgoff_t pgoff = buffer_space_offset & ~PAGE_MASK; size_t index = buffer_space_offset >> PAGE_SHIFT; struct binder_lru_page *lru_page; lru_page = &alloc->pages[index]; *pgoffp = pgoff; return lru_page->page_ptr; } /** * binder_alloc_clear_buf() - zero out buffer * @alloc: binder_alloc for this proc * @buffer: binder buffer to be cleared * * memset the given buffer to 0 */ static void binder_alloc_clear_buf(struct binder_alloc *alloc, struct binder_buffer *buffer) { size_t bytes = binder_alloc_buffer_size(alloc, buffer); binder_size_t buffer_offset = 0; while (bytes) { unsigned long size; struct page *page; pgoff_t pgoff; page = binder_alloc_get_page(alloc, buffer, buffer_offset, &pgoff); size = min_t(size_t, bytes, PAGE_SIZE - pgoff); memset_page(page, pgoff, 0, size); bytes -= size; buffer_offset += size; } } /** * binder_alloc_copy_user_to_buffer() - copy src user to tgt user * @alloc: binder_alloc for this proc * @buffer: binder buffer to be accessed * @buffer_offset: offset into @buffer data * @from: userspace pointer to source buffer * @bytes: bytes to copy * * Copy bytes from source userspace to target buffer. * * Return: bytes remaining to be copied */ unsigned long binder_alloc_copy_user_to_buffer(struct binder_alloc *alloc, struct binder_buffer *buffer, binder_size_t buffer_offset, const void __user *from, size_t bytes) { if (!check_buffer(alloc, buffer, buffer_offset, bytes)) return bytes; while (bytes) { unsigned long size; unsigned long ret; struct page *page; pgoff_t pgoff; void *kptr; page = binder_alloc_get_page(alloc, buffer, buffer_offset, &pgoff); size = min_t(size_t, bytes, PAGE_SIZE - pgoff); kptr = kmap_local_page(page) + pgoff; ret = copy_from_user(kptr, from, size); kunmap_local(kptr); if (ret) return bytes - size + ret; bytes -= size; from += size; buffer_offset += size; } return 0; } static int binder_alloc_do_buffer_copy(struct binder_alloc *alloc, bool to_buffer, struct binder_buffer *buffer, binder_size_t buffer_offset, void *ptr, size_t bytes) { /* All copies must be 32-bit aligned and 32-bit size */ if (!check_buffer(alloc, buffer, buffer_offset, bytes)) return -EINVAL; while (bytes) { unsigned long size; struct page *page; pgoff_t pgoff; page = binder_alloc_get_page(alloc, buffer, buffer_offset, &pgoff); size = min_t(size_t, bytes, PAGE_SIZE - pgoff); if (to_buffer) memcpy_to_page(page, pgoff, ptr, size); else memcpy_from_page(ptr, page, pgoff, size); bytes -= size; pgoff = 0; ptr = ptr + size; buffer_offset += size; } return 0; } int binder_alloc_copy_to_buffer(struct binder_alloc *alloc, struct binder_buffer *buffer, binder_size_t buffer_offset, void *src, size_t bytes) { return binder_alloc_do_buffer_copy(alloc, true, buffer, buffer_offset, src, bytes); } int binder_alloc_copy_from_buffer(struct binder_alloc *alloc, void *dest, struct binder_buffer *buffer, binder_size_t buffer_offset, size_t bytes) { return binder_alloc_do_buffer_copy(alloc, false, buffer, buffer_offset, dest, bytes); }
linux-master
drivers/android/binder_alloc.c
// SPDX-License-Identifier: GPL-2.0-only /* binder.c * * Android IPC Subsystem * * Copyright (C) 2007-2008 Google, Inc. */ /* * Locking overview * * There are 3 main spinlocks which must be acquired in the * order shown: * * 1) proc->outer_lock : protects binder_ref * binder_proc_lock() and binder_proc_unlock() are * used to acq/rel. * 2) node->lock : protects most fields of binder_node. * binder_node_lock() and binder_node_unlock() are * used to acq/rel * 3) proc->inner_lock : protects the thread and node lists * (proc->threads, proc->waiting_threads, proc->nodes) * and all todo lists associated with the binder_proc * (proc->todo, thread->todo, proc->delivered_death and * node->async_todo), as well as thread->transaction_stack * binder_inner_proc_lock() and binder_inner_proc_unlock() * are used to acq/rel * * Any lock under procA must never be nested under any lock at the same * level or below on procB. * * Functions that require a lock held on entry indicate which lock * in the suffix of the function name: * * foo_olocked() : requires node->outer_lock * foo_nlocked() : requires node->lock * foo_ilocked() : requires proc->inner_lock * foo_oilocked(): requires proc->outer_lock and proc->inner_lock * foo_nilocked(): requires node->lock and proc->inner_lock * ... */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/fdtable.h> #include <linux/file.h> #include <linux/freezer.h> #include <linux/fs.h> #include <linux/list.h> #include <linux/miscdevice.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/nsproxy.h> #include <linux/poll.h> #include <linux/debugfs.h> #include <linux/rbtree.h> #include <linux/sched/signal.h> #include <linux/sched/mm.h> #include <linux/seq_file.h> #include <linux/string.h> #include <linux/uaccess.h> #include <linux/pid_namespace.h> #include <linux/security.h> #include <linux/spinlock.h> #include <linux/ratelimit.h> #include <linux/syscalls.h> #include <linux/task_work.h> #include <linux/sizes.h> #include <linux/ktime.h> #include <uapi/linux/android/binder.h> #include <linux/cacheflush.h> #include "binder_internal.h" #include "binder_trace.h" static HLIST_HEAD(binder_deferred_list); static DEFINE_MUTEX(binder_deferred_lock); static HLIST_HEAD(binder_devices); static HLIST_HEAD(binder_procs); static DEFINE_MUTEX(binder_procs_lock); static HLIST_HEAD(binder_dead_nodes); static DEFINE_SPINLOCK(binder_dead_nodes_lock); static struct dentry *binder_debugfs_dir_entry_root; static struct dentry *binder_debugfs_dir_entry_proc; static atomic_t binder_last_id; static int proc_show(struct seq_file *m, void *unused); DEFINE_SHOW_ATTRIBUTE(proc); #define FORBIDDEN_MMAP_FLAGS (VM_WRITE) enum { BINDER_DEBUG_USER_ERROR = 1U << 0, BINDER_DEBUG_FAILED_TRANSACTION = 1U << 1, BINDER_DEBUG_DEAD_TRANSACTION = 1U << 2, BINDER_DEBUG_OPEN_CLOSE = 1U << 3, BINDER_DEBUG_DEAD_BINDER = 1U << 4, BINDER_DEBUG_DEATH_NOTIFICATION = 1U << 5, BINDER_DEBUG_READ_WRITE = 1U << 6, BINDER_DEBUG_USER_REFS = 1U << 7, BINDER_DEBUG_THREADS = 1U << 8, BINDER_DEBUG_TRANSACTION = 1U << 9, BINDER_DEBUG_TRANSACTION_COMPLETE = 1U << 10, BINDER_DEBUG_FREE_BUFFER = 1U << 11, BINDER_DEBUG_INTERNAL_REFS = 1U << 12, BINDER_DEBUG_PRIORITY_CAP = 1U << 13, BINDER_DEBUG_SPINLOCKS = 1U << 14, }; static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR | BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION; module_param_named(debug_mask, binder_debug_mask, uint, 0644); char *binder_devices_param = CONFIG_ANDROID_BINDER_DEVICES; module_param_named(devices, binder_devices_param, charp, 0444); static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait); static int binder_stop_on_user_error; static int binder_set_stop_on_user_error(const char *val, const struct kernel_param *kp) { int ret; ret = param_set_int(val, kp); if (binder_stop_on_user_error < 2) wake_up(&binder_user_error_wait); return ret; } module_param_call(stop_on_user_error, binder_set_stop_on_user_error, param_get_int, &binder_stop_on_user_error, 0644); static __printf(2, 3) void binder_debug(int mask, const char *format, ...) { struct va_format vaf; va_list args; if (binder_debug_mask & mask) { va_start(args, format); vaf.va = &args; vaf.fmt = format; pr_info_ratelimited("%pV", &vaf); va_end(args); } } #define binder_txn_error(x...) \ binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, x) static __printf(1, 2) void binder_user_error(const char *format, ...) { struct va_format vaf; va_list args; if (binder_debug_mask & BINDER_DEBUG_USER_ERROR) { va_start(args, format); vaf.va = &args; vaf.fmt = format; pr_info_ratelimited("%pV", &vaf); va_end(args); } if (binder_stop_on_user_error) binder_stop_on_user_error = 2; } #define binder_set_extended_error(ee, _id, _command, _param) \ do { \ (ee)->id = _id; \ (ee)->command = _command; \ (ee)->param = _param; \ } while (0) #define to_flat_binder_object(hdr) \ container_of(hdr, struct flat_binder_object, hdr) #define to_binder_fd_object(hdr) container_of(hdr, struct binder_fd_object, hdr) #define to_binder_buffer_object(hdr) \ container_of(hdr, struct binder_buffer_object, hdr) #define to_binder_fd_array_object(hdr) \ container_of(hdr, struct binder_fd_array_object, hdr) static struct binder_stats binder_stats; static inline void binder_stats_deleted(enum binder_stat_types type) { atomic_inc(&binder_stats.obj_deleted[type]); } static inline void binder_stats_created(enum binder_stat_types type) { atomic_inc(&binder_stats.obj_created[type]); } struct binder_transaction_log_entry { int debug_id; int debug_id_done; int call_type; int from_proc; int from_thread; int target_handle; int to_proc; int to_thread; int to_node; int data_size; int offsets_size; int return_error_line; uint32_t return_error; uint32_t return_error_param; char context_name[BINDERFS_MAX_NAME + 1]; }; struct binder_transaction_log { atomic_t cur; bool full; struct binder_transaction_log_entry entry[32]; }; static struct binder_transaction_log binder_transaction_log; static struct binder_transaction_log binder_transaction_log_failed; static struct binder_transaction_log_entry *binder_transaction_log_add( struct binder_transaction_log *log) { struct binder_transaction_log_entry *e; unsigned int cur = atomic_inc_return(&log->cur); if (cur >= ARRAY_SIZE(log->entry)) log->full = true; e = &log->entry[cur % ARRAY_SIZE(log->entry)]; WRITE_ONCE(e->debug_id_done, 0); /* * write-barrier to synchronize access to e->debug_id_done. * We make sure the initialized 0 value is seen before * memset() other fields are zeroed by memset. */ smp_wmb(); memset(e, 0, sizeof(*e)); return e; } enum binder_deferred_state { BINDER_DEFERRED_FLUSH = 0x01, BINDER_DEFERRED_RELEASE = 0x02, }; enum { BINDER_LOOPER_STATE_REGISTERED = 0x01, BINDER_LOOPER_STATE_ENTERED = 0x02, BINDER_LOOPER_STATE_EXITED = 0x04, BINDER_LOOPER_STATE_INVALID = 0x08, BINDER_LOOPER_STATE_WAITING = 0x10, BINDER_LOOPER_STATE_POLL = 0x20, }; /** * binder_proc_lock() - Acquire outer lock for given binder_proc * @proc: struct binder_proc to acquire * * Acquires proc->outer_lock. Used to protect binder_ref * structures associated with the given proc. */ #define binder_proc_lock(proc) _binder_proc_lock(proc, __LINE__) static void _binder_proc_lock(struct binder_proc *proc, int line) __acquires(&proc->outer_lock) { binder_debug(BINDER_DEBUG_SPINLOCKS, "%s: line=%d\n", __func__, line); spin_lock(&proc->outer_lock); } /** * binder_proc_unlock() - Release spinlock for given binder_proc * @proc: struct binder_proc to acquire * * Release lock acquired via binder_proc_lock() */ #define binder_proc_unlock(proc) _binder_proc_unlock(proc, __LINE__) static void _binder_proc_unlock(struct binder_proc *proc, int line) __releases(&proc->outer_lock) { binder_debug(BINDER_DEBUG_SPINLOCKS, "%s: line=%d\n", __func__, line); spin_unlock(&proc->outer_lock); } /** * binder_inner_proc_lock() - Acquire inner lock for given binder_proc * @proc: struct binder_proc to acquire * * Acquires proc->inner_lock. Used to protect todo lists */ #define binder_inner_proc_lock(proc) _binder_inner_proc_lock(proc, __LINE__) static void _binder_inner_proc_lock(struct binder_proc *proc, int line) __acquires(&proc->inner_lock) { binder_debug(BINDER_DEBUG_SPINLOCKS, "%s: line=%d\n", __func__, line); spin_lock(&proc->inner_lock); } /** * binder_inner_proc_unlock() - Release inner lock for given binder_proc * @proc: struct binder_proc to acquire * * Release lock acquired via binder_inner_proc_lock() */ #define binder_inner_proc_unlock(proc) _binder_inner_proc_unlock(proc, __LINE__) static void _binder_inner_proc_unlock(struct binder_proc *proc, int line) __releases(&proc->inner_lock) { binder_debug(BINDER_DEBUG_SPINLOCKS, "%s: line=%d\n", __func__, line); spin_unlock(&proc->inner_lock); } /** * binder_node_lock() - Acquire spinlock for given binder_node * @node: struct binder_node to acquire * * Acquires node->lock. Used to protect binder_node fields */ #define binder_node_lock(node) _binder_node_lock(node, __LINE__) static void _binder_node_lock(struct binder_node *node, int line) __acquires(&node->lock) { binder_debug(BINDER_DEBUG_SPINLOCKS, "%s: line=%d\n", __func__, line); spin_lock(&node->lock); } /** * binder_node_unlock() - Release spinlock for given binder_proc * @node: struct binder_node to acquire * * Release lock acquired via binder_node_lock() */ #define binder_node_unlock(node) _binder_node_unlock(node, __LINE__) static void _binder_node_unlock(struct binder_node *node, int line) __releases(&node->lock) { binder_debug(BINDER_DEBUG_SPINLOCKS, "%s: line=%d\n", __func__, line); spin_unlock(&node->lock); } /** * binder_node_inner_lock() - Acquire node and inner locks * @node: struct binder_node to acquire * * Acquires node->lock. If node->proc also acquires * proc->inner_lock. Used to protect binder_node fields */ #define binder_node_inner_lock(node) _binder_node_inner_lock(node, __LINE__) static void _binder_node_inner_lock(struct binder_node *node, int line) __acquires(&node->lock) __acquires(&node->proc->inner_lock) { binder_debug(BINDER_DEBUG_SPINLOCKS, "%s: line=%d\n", __func__, line); spin_lock(&node->lock); if (node->proc) binder_inner_proc_lock(node->proc); else /* annotation for sparse */ __acquire(&node->proc->inner_lock); } /** * binder_node_inner_unlock() - Release node and inner locks * @node: struct binder_node to acquire * * Release lock acquired via binder_node_lock() */ #define binder_node_inner_unlock(node) _binder_node_inner_unlock(node, __LINE__) static void _binder_node_inner_unlock(struct binder_node *node, int line) __releases(&node->lock) __releases(&node->proc->inner_lock) { struct binder_proc *proc = node->proc; binder_debug(BINDER_DEBUG_SPINLOCKS, "%s: line=%d\n", __func__, line); if (proc) binder_inner_proc_unlock(proc); else /* annotation for sparse */ __release(&node->proc->inner_lock); spin_unlock(&node->lock); } static bool binder_worklist_empty_ilocked(struct list_head *list) { return list_empty(list); } /** * binder_worklist_empty() - Check if no items on the work list * @proc: binder_proc associated with list * @list: list to check * * Return: true if there are no items on list, else false */ static bool binder_worklist_empty(struct binder_proc *proc, struct list_head *list) { bool ret; binder_inner_proc_lock(proc); ret = binder_worklist_empty_ilocked(list); binder_inner_proc_unlock(proc); return ret; } /** * binder_enqueue_work_ilocked() - Add an item to the work list * @work: struct binder_work to add to list * @target_list: list to add work to * * Adds the work to the specified list. Asserts that work * is not already on a list. * * Requires the proc->inner_lock to be held. */ static void binder_enqueue_work_ilocked(struct binder_work *work, struct list_head *target_list) { BUG_ON(target_list == NULL); BUG_ON(work->entry.next && !list_empty(&work->entry)); list_add_tail(&work->entry, target_list); } /** * binder_enqueue_deferred_thread_work_ilocked() - Add deferred thread work * @thread: thread to queue work to * @work: struct binder_work to add to list * * Adds the work to the todo list of the thread. Doesn't set the process_todo * flag, which means that (if it wasn't already set) the thread will go to * sleep without handling this work when it calls read. * * Requires the proc->inner_lock to be held. */ static void binder_enqueue_deferred_thread_work_ilocked(struct binder_thread *thread, struct binder_work *work) { WARN_ON(!list_empty(&thread->waiting_thread_node)); binder_enqueue_work_ilocked(work, &thread->todo); } /** * binder_enqueue_thread_work_ilocked() - Add an item to the thread work list * @thread: thread to queue work to * @work: struct binder_work to add to list * * Adds the work to the todo list of the thread, and enables processing * of the todo queue. * * Requires the proc->inner_lock to be held. */ static void binder_enqueue_thread_work_ilocked(struct binder_thread *thread, struct binder_work *work) { WARN_ON(!list_empty(&thread->waiting_thread_node)); binder_enqueue_work_ilocked(work, &thread->todo); thread->process_todo = true; } /** * binder_enqueue_thread_work() - Add an item to the thread work list * @thread: thread to queue work to * @work: struct binder_work to add to list * * Adds the work to the todo list of the thread, and enables processing * of the todo queue. */ static void binder_enqueue_thread_work(struct binder_thread *thread, struct binder_work *work) { binder_inner_proc_lock(thread->proc); binder_enqueue_thread_work_ilocked(thread, work); binder_inner_proc_unlock(thread->proc); } static void binder_dequeue_work_ilocked(struct binder_work *work) { list_del_init(&work->entry); } /** * binder_dequeue_work() - Removes an item from the work list * @proc: binder_proc associated with list * @work: struct binder_work to remove from list * * Removes the specified work item from whatever list it is on. * Can safely be called if work is not on any list. */ static void binder_dequeue_work(struct binder_proc *proc, struct binder_work *work) { binder_inner_proc_lock(proc); binder_dequeue_work_ilocked(work); binder_inner_proc_unlock(proc); } static struct binder_work *binder_dequeue_work_head_ilocked( struct list_head *list) { struct binder_work *w; w = list_first_entry_or_null(list, struct binder_work, entry); if (w) list_del_init(&w->entry); return w; } static void binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer); static void binder_free_thread(struct binder_thread *thread); static void binder_free_proc(struct binder_proc *proc); static void binder_inc_node_tmpref_ilocked(struct binder_node *node); static bool binder_has_work_ilocked(struct binder_thread *thread, bool do_proc_work) { return thread->process_todo || thread->looper_need_return || (do_proc_work && !binder_worklist_empty_ilocked(&thread->proc->todo)); } static bool binder_has_work(struct binder_thread *thread, bool do_proc_work) { bool has_work; binder_inner_proc_lock(thread->proc); has_work = binder_has_work_ilocked(thread, do_proc_work); binder_inner_proc_unlock(thread->proc); return has_work; } static bool binder_available_for_proc_work_ilocked(struct binder_thread *thread) { return !thread->transaction_stack && binder_worklist_empty_ilocked(&thread->todo) && (thread->looper & (BINDER_LOOPER_STATE_ENTERED | BINDER_LOOPER_STATE_REGISTERED)); } static void binder_wakeup_poll_threads_ilocked(struct binder_proc *proc, bool sync) { struct rb_node *n; struct binder_thread *thread; for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { thread = rb_entry(n, struct binder_thread, rb_node); if (thread->looper & BINDER_LOOPER_STATE_POLL && binder_available_for_proc_work_ilocked(thread)) { if (sync) wake_up_interruptible_sync(&thread->wait); else wake_up_interruptible(&thread->wait); } } } /** * binder_select_thread_ilocked() - selects a thread for doing proc work. * @proc: process to select a thread from * * Note that calling this function moves the thread off the waiting_threads * list, so it can only be woken up by the caller of this function, or a * signal. Therefore, callers *should* always wake up the thread this function * returns. * * Return: If there's a thread currently waiting for process work, * returns that thread. Otherwise returns NULL. */ static struct binder_thread * binder_select_thread_ilocked(struct binder_proc *proc) { struct binder_thread *thread; assert_spin_locked(&proc->inner_lock); thread = list_first_entry_or_null(&proc->waiting_threads, struct binder_thread, waiting_thread_node); if (thread) list_del_init(&thread->waiting_thread_node); return thread; } /** * binder_wakeup_thread_ilocked() - wakes up a thread for doing proc work. * @proc: process to wake up a thread in * @thread: specific thread to wake-up (may be NULL) * @sync: whether to do a synchronous wake-up * * This function wakes up a thread in the @proc process. * The caller may provide a specific thread to wake-up in * the @thread parameter. If @thread is NULL, this function * will wake up threads that have called poll(). * * Note that for this function to work as expected, callers * should first call binder_select_thread() to find a thread * to handle the work (if they don't have a thread already), * and pass the result into the @thread parameter. */ static void binder_wakeup_thread_ilocked(struct binder_proc *proc, struct binder_thread *thread, bool sync) { assert_spin_locked(&proc->inner_lock); if (thread) { if (sync) wake_up_interruptible_sync(&thread->wait); else wake_up_interruptible(&thread->wait); return; } /* Didn't find a thread waiting for proc work; this can happen * in two scenarios: * 1. All threads are busy handling transactions * In that case, one of those threads should call back into * the kernel driver soon and pick up this work. * 2. Threads are using the (e)poll interface, in which case * they may be blocked on the waitqueue without having been * added to waiting_threads. For this case, we just iterate * over all threads not handling transaction work, and * wake them all up. We wake all because we don't know whether * a thread that called into (e)poll is handling non-binder * work currently. */ binder_wakeup_poll_threads_ilocked(proc, sync); } static void binder_wakeup_proc_ilocked(struct binder_proc *proc) { struct binder_thread *thread = binder_select_thread_ilocked(proc); binder_wakeup_thread_ilocked(proc, thread, /* sync = */false); } static void binder_set_nice(long nice) { long min_nice; if (can_nice(current, nice)) { set_user_nice(current, nice); return; } min_nice = rlimit_to_nice(rlimit(RLIMIT_NICE)); binder_debug(BINDER_DEBUG_PRIORITY_CAP, "%d: nice value %ld not allowed use %ld instead\n", current->pid, nice, min_nice); set_user_nice(current, min_nice); if (min_nice <= MAX_NICE) return; binder_user_error("%d RLIMIT_NICE not set\n", current->pid); } static struct binder_node *binder_get_node_ilocked(struct binder_proc *proc, binder_uintptr_t ptr) { struct rb_node *n = proc->nodes.rb_node; struct binder_node *node; assert_spin_locked(&proc->inner_lock); while (n) { node = rb_entry(n, struct binder_node, rb_node); if (ptr < node->ptr) n = n->rb_left; else if (ptr > node->ptr) n = n->rb_right; else { /* * take an implicit weak reference * to ensure node stays alive until * call to binder_put_node() */ binder_inc_node_tmpref_ilocked(node); return node; } } return NULL; } static struct binder_node *binder_get_node(struct binder_proc *proc, binder_uintptr_t ptr) { struct binder_node *node; binder_inner_proc_lock(proc); node = binder_get_node_ilocked(proc, ptr); binder_inner_proc_unlock(proc); return node; } static struct binder_node *binder_init_node_ilocked( struct binder_proc *proc, struct binder_node *new_node, struct flat_binder_object *fp) { struct rb_node **p = &proc->nodes.rb_node; struct rb_node *parent = NULL; struct binder_node *node; binder_uintptr_t ptr = fp ? fp->binder : 0; binder_uintptr_t cookie = fp ? fp->cookie : 0; __u32 flags = fp ? fp->flags : 0; assert_spin_locked(&proc->inner_lock); while (*p) { parent = *p; node = rb_entry(parent, struct binder_node, rb_node); if (ptr < node->ptr) p = &(*p)->rb_left; else if (ptr > node->ptr) p = &(*p)->rb_right; else { /* * A matching node is already in * the rb tree. Abandon the init * and return it. */ binder_inc_node_tmpref_ilocked(node); return node; } } node = new_node; binder_stats_created(BINDER_STAT_NODE); node->tmp_refs++; rb_link_node(&node->rb_node, parent, p); rb_insert_color(&node->rb_node, &proc->nodes); node->debug_id = atomic_inc_return(&binder_last_id); node->proc = proc; node->ptr = ptr; node->cookie = cookie; node->work.type = BINDER_WORK_NODE; node->min_priority = flags & FLAT_BINDER_FLAG_PRIORITY_MASK; node->accept_fds = !!(flags & FLAT_BINDER_FLAG_ACCEPTS_FDS); node->txn_security_ctx = !!(flags & FLAT_BINDER_FLAG_TXN_SECURITY_CTX); spin_lock_init(&node->lock); INIT_LIST_HEAD(&node->work.entry); INIT_LIST_HEAD(&node->async_todo); binder_debug(BINDER_DEBUG_INTERNAL_REFS, "%d:%d node %d u%016llx c%016llx created\n", proc->pid, current->pid, node->debug_id, (u64)node->ptr, (u64)node->cookie); return node; } static struct binder_node *binder_new_node(struct binder_proc *proc, struct flat_binder_object *fp) { struct binder_node *node; struct binder_node *new_node = kzalloc(sizeof(*node), GFP_KERNEL); if (!new_node) return NULL; binder_inner_proc_lock(proc); node = binder_init_node_ilocked(proc, new_node, fp); binder_inner_proc_unlock(proc); if (node != new_node) /* * The node was already added by another thread */ kfree(new_node); return node; } static void binder_free_node(struct binder_node *node) { kfree(node); binder_stats_deleted(BINDER_STAT_NODE); } static int binder_inc_node_nilocked(struct binder_node *node, int strong, int internal, struct list_head *target_list) { struct binder_proc *proc = node->proc; assert_spin_locked(&node->lock); if (proc) assert_spin_locked(&proc->inner_lock); if (strong) { if (internal) { if (target_list == NULL && node->internal_strong_refs == 0 && !(node->proc && node == node->proc->context->binder_context_mgr_node && node->has_strong_ref)) { pr_err("invalid inc strong node for %d\n", node->debug_id); return -EINVAL; } node->internal_strong_refs++; } else node->local_strong_refs++; if (!node->has_strong_ref && target_list) { struct binder_thread *thread = container_of(target_list, struct binder_thread, todo); binder_dequeue_work_ilocked(&node->work); BUG_ON(&thread->todo != target_list); binder_enqueue_deferred_thread_work_ilocked(thread, &node->work); } } else { if (!internal) node->local_weak_refs++; if (!node->has_weak_ref && list_empty(&node->work.entry)) { if (target_list == NULL) { pr_err("invalid inc weak node for %d\n", node->debug_id); return -EINVAL; } /* * See comment above */ binder_enqueue_work_ilocked(&node->work, target_list); } } return 0; } static int binder_inc_node(struct binder_node *node, int strong, int internal, struct list_head *target_list) { int ret; binder_node_inner_lock(node); ret = binder_inc_node_nilocked(node, strong, internal, target_list); binder_node_inner_unlock(node); return ret; } static bool binder_dec_node_nilocked(struct binder_node *node, int strong, int internal) { struct binder_proc *proc = node->proc; assert_spin_locked(&node->lock); if (proc) assert_spin_locked(&proc->inner_lock); if (strong) { if (internal) node->internal_strong_refs--; else node->local_strong_refs--; if (node->local_strong_refs || node->internal_strong_refs) return false; } else { if (!internal) node->local_weak_refs--; if (node->local_weak_refs || node->tmp_refs || !hlist_empty(&node->refs)) return false; } if (proc && (node->has_strong_ref || node->has_weak_ref)) { if (list_empty(&node->work.entry)) { binder_enqueue_work_ilocked(&node->work, &proc->todo); binder_wakeup_proc_ilocked(proc); } } else { if (hlist_empty(&node->refs) && !node->local_strong_refs && !node->local_weak_refs && !node->tmp_refs) { if (proc) { binder_dequeue_work_ilocked(&node->work); rb_erase(&node->rb_node, &proc->nodes); binder_debug(BINDER_DEBUG_INTERNAL_REFS, "refless node %d deleted\n", node->debug_id); } else { BUG_ON(!list_empty(&node->work.entry)); spin_lock(&binder_dead_nodes_lock); /* * tmp_refs could have changed so * check it again */ if (node->tmp_refs) { spin_unlock(&binder_dead_nodes_lock); return false; } hlist_del(&node->dead_node); spin_unlock(&binder_dead_nodes_lock); binder_debug(BINDER_DEBUG_INTERNAL_REFS, "dead node %d deleted\n", node->debug_id); } return true; } } return false; } static void binder_dec_node(struct binder_node *node, int strong, int internal) { bool free_node; binder_node_inner_lock(node); free_node = binder_dec_node_nilocked(node, strong, internal); binder_node_inner_unlock(node); if (free_node) binder_free_node(node); } static void binder_inc_node_tmpref_ilocked(struct binder_node *node) { /* * No call to binder_inc_node() is needed since we * don't need to inform userspace of any changes to * tmp_refs */ node->tmp_refs++; } /** * binder_inc_node_tmpref() - take a temporary reference on node * @node: node to reference * * Take reference on node to prevent the node from being freed * while referenced only by a local variable. The inner lock is * needed to serialize with the node work on the queue (which * isn't needed after the node is dead). If the node is dead * (node->proc is NULL), use binder_dead_nodes_lock to protect * node->tmp_refs against dead-node-only cases where the node * lock cannot be acquired (eg traversing the dead node list to * print nodes) */ static void binder_inc_node_tmpref(struct binder_node *node) { binder_node_lock(node); if (node->proc) binder_inner_proc_lock(node->proc); else spin_lock(&binder_dead_nodes_lock); binder_inc_node_tmpref_ilocked(node); if (node->proc) binder_inner_proc_unlock(node->proc); else spin_unlock(&binder_dead_nodes_lock); binder_node_unlock(node); } /** * binder_dec_node_tmpref() - remove a temporary reference on node * @node: node to reference * * Release temporary reference on node taken via binder_inc_node_tmpref() */ static void binder_dec_node_tmpref(struct binder_node *node) { bool free_node; binder_node_inner_lock(node); if (!node->proc) spin_lock(&binder_dead_nodes_lock); else __acquire(&binder_dead_nodes_lock); node->tmp_refs--; BUG_ON(node->tmp_refs < 0); if (!node->proc) spin_unlock(&binder_dead_nodes_lock); else __release(&binder_dead_nodes_lock); /* * Call binder_dec_node() to check if all refcounts are 0 * and cleanup is needed. Calling with strong=0 and internal=1 * causes no actual reference to be released in binder_dec_node(). * If that changes, a change is needed here too. */ free_node = binder_dec_node_nilocked(node, 0, 1); binder_node_inner_unlock(node); if (free_node) binder_free_node(node); } static void binder_put_node(struct binder_node *node) { binder_dec_node_tmpref(node); } static struct binder_ref *binder_get_ref_olocked(struct binder_proc *proc, u32 desc, bool need_strong_ref) { struct rb_node *n = proc->refs_by_desc.rb_node; struct binder_ref *ref; while (n) { ref = rb_entry(n, struct binder_ref, rb_node_desc); if (desc < ref->data.desc) { n = n->rb_left; } else if (desc > ref->data.desc) { n = n->rb_right; } else if (need_strong_ref && !ref->data.strong) { binder_user_error("tried to use weak ref as strong ref\n"); return NULL; } else { return ref; } } return NULL; } /** * binder_get_ref_for_node_olocked() - get the ref associated with given node * @proc: binder_proc that owns the ref * @node: binder_node of target * @new_ref: newly allocated binder_ref to be initialized or %NULL * * Look up the ref for the given node and return it if it exists * * If it doesn't exist and the caller provides a newly allocated * ref, initialize the fields of the newly allocated ref and insert * into the given proc rb_trees and node refs list. * * Return: the ref for node. It is possible that another thread * allocated/initialized the ref first in which case the * returned ref would be different than the passed-in * new_ref. new_ref must be kfree'd by the caller in * this case. */ static struct binder_ref *binder_get_ref_for_node_olocked( struct binder_proc *proc, struct binder_node *node, struct binder_ref *new_ref) { struct binder_context *context = proc->context; struct rb_node **p = &proc->refs_by_node.rb_node; struct rb_node *parent = NULL; struct binder_ref *ref; struct rb_node *n; while (*p) { parent = *p; ref = rb_entry(parent, struct binder_ref, rb_node_node); if (node < ref->node) p = &(*p)->rb_left; else if (node > ref->node) p = &(*p)->rb_right; else return ref; } if (!new_ref) return NULL; binder_stats_created(BINDER_STAT_REF); new_ref->data.debug_id = atomic_inc_return(&binder_last_id); new_ref->proc = proc; new_ref->node = node; rb_link_node(&new_ref->rb_node_node, parent, p); rb_insert_color(&new_ref->rb_node_node, &proc->refs_by_node); new_ref->data.desc = (node == context->binder_context_mgr_node) ? 0 : 1; for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { ref = rb_entry(n, struct binder_ref, rb_node_desc); if (ref->data.desc > new_ref->data.desc) break; new_ref->data.desc = ref->data.desc + 1; } p = &proc->refs_by_desc.rb_node; while (*p) { parent = *p; ref = rb_entry(parent, struct binder_ref, rb_node_desc); if (new_ref->data.desc < ref->data.desc) p = &(*p)->rb_left; else if (new_ref->data.desc > ref->data.desc) p = &(*p)->rb_right; else BUG(); } rb_link_node(&new_ref->rb_node_desc, parent, p); rb_insert_color(&new_ref->rb_node_desc, &proc->refs_by_desc); binder_node_lock(node); hlist_add_head(&new_ref->node_entry, &node->refs); binder_debug(BINDER_DEBUG_INTERNAL_REFS, "%d new ref %d desc %d for node %d\n", proc->pid, new_ref->data.debug_id, new_ref->data.desc, node->debug_id); binder_node_unlock(node); return new_ref; } static void binder_cleanup_ref_olocked(struct binder_ref *ref) { bool delete_node = false; binder_debug(BINDER_DEBUG_INTERNAL_REFS, "%d delete ref %d desc %d for node %d\n", ref->proc->pid, ref->data.debug_id, ref->data.desc, ref->node->debug_id); rb_erase(&ref->rb_node_desc, &ref->proc->refs_by_desc); rb_erase(&ref->rb_node_node, &ref->proc->refs_by_node); binder_node_inner_lock(ref->node); if (ref->data.strong) binder_dec_node_nilocked(ref->node, 1, 1); hlist_del(&ref->node_entry); delete_node = binder_dec_node_nilocked(ref->node, 0, 1); binder_node_inner_unlock(ref->node); /* * Clear ref->node unless we want the caller to free the node */ if (!delete_node) { /* * The caller uses ref->node to determine * whether the node needs to be freed. Clear * it since the node is still alive. */ ref->node = NULL; } if (ref->death) { binder_debug(BINDER_DEBUG_DEAD_BINDER, "%d delete ref %d desc %d has death notification\n", ref->proc->pid, ref->data.debug_id, ref->data.desc); binder_dequeue_work(ref->proc, &ref->death->work); binder_stats_deleted(BINDER_STAT_DEATH); } binder_stats_deleted(BINDER_STAT_REF); } /** * binder_inc_ref_olocked() - increment the ref for given handle * @ref: ref to be incremented * @strong: if true, strong increment, else weak * @target_list: list to queue node work on * * Increment the ref. @ref->proc->outer_lock must be held on entry * * Return: 0, if successful, else errno */ static int binder_inc_ref_olocked(struct binder_ref *ref, int strong, struct list_head *target_list) { int ret; if (strong) { if (ref->data.strong == 0) { ret = binder_inc_node(ref->node, 1, 1, target_list); if (ret) return ret; } ref->data.strong++; } else { if (ref->data.weak == 0) { ret = binder_inc_node(ref->node, 0, 1, target_list); if (ret) return ret; } ref->data.weak++; } return 0; } /** * binder_dec_ref_olocked() - dec the ref for given handle * @ref: ref to be decremented * @strong: if true, strong decrement, else weak * * Decrement the ref. * * Return: %true if ref is cleaned up and ready to be freed. */ static bool binder_dec_ref_olocked(struct binder_ref *ref, int strong) { if (strong) { if (ref->data.strong == 0) { binder_user_error("%d invalid dec strong, ref %d desc %d s %d w %d\n", ref->proc->pid, ref->data.debug_id, ref->data.desc, ref->data.strong, ref->data.weak); return false; } ref->data.strong--; if (ref->data.strong == 0) binder_dec_node(ref->node, strong, 1); } else { if (ref->data.weak == 0) { binder_user_error("%d invalid dec weak, ref %d desc %d s %d w %d\n", ref->proc->pid, ref->data.debug_id, ref->data.desc, ref->data.strong, ref->data.weak); return false; } ref->data.weak--; } if (ref->data.strong == 0 && ref->data.weak == 0) { binder_cleanup_ref_olocked(ref); return true; } return false; } /** * binder_get_node_from_ref() - get the node from the given proc/desc * @proc: proc containing the ref * @desc: the handle associated with the ref * @need_strong_ref: if true, only return node if ref is strong * @rdata: the id/refcount data for the ref * * Given a proc and ref handle, return the associated binder_node * * Return: a binder_node or NULL if not found or not strong when strong required */ static struct binder_node *binder_get_node_from_ref( struct binder_proc *proc, u32 desc, bool need_strong_ref, struct binder_ref_data *rdata) { struct binder_node *node; struct binder_ref *ref; binder_proc_lock(proc); ref = binder_get_ref_olocked(proc, desc, need_strong_ref); if (!ref) goto err_no_ref; node = ref->node; /* * Take an implicit reference on the node to ensure * it stays alive until the call to binder_put_node() */ binder_inc_node_tmpref(node); if (rdata) *rdata = ref->data; binder_proc_unlock(proc); return node; err_no_ref: binder_proc_unlock(proc); return NULL; } /** * binder_free_ref() - free the binder_ref * @ref: ref to free * * Free the binder_ref. Free the binder_node indicated by ref->node * (if non-NULL) and the binder_ref_death indicated by ref->death. */ static void binder_free_ref(struct binder_ref *ref) { if (ref->node) binder_free_node(ref->node); kfree(ref->death); kfree(ref); } /** * binder_update_ref_for_handle() - inc/dec the ref for given handle * @proc: proc containing the ref * @desc: the handle associated with the ref * @increment: true=inc reference, false=dec reference * @strong: true=strong reference, false=weak reference * @rdata: the id/refcount data for the ref * * Given a proc and ref handle, increment or decrement the ref * according to "increment" arg. * * Return: 0 if successful, else errno */ static int binder_update_ref_for_handle(struct binder_proc *proc, uint32_t desc, bool increment, bool strong, struct binder_ref_data *rdata) { int ret = 0; struct binder_ref *ref; bool delete_ref = false; binder_proc_lock(proc); ref = binder_get_ref_olocked(proc, desc, strong); if (!ref) { ret = -EINVAL; goto err_no_ref; } if (increment) ret = binder_inc_ref_olocked(ref, strong, NULL); else delete_ref = binder_dec_ref_olocked(ref, strong); if (rdata) *rdata = ref->data; binder_proc_unlock(proc); if (delete_ref) binder_free_ref(ref); return ret; err_no_ref: binder_proc_unlock(proc); return ret; } /** * binder_dec_ref_for_handle() - dec the ref for given handle * @proc: proc containing the ref * @desc: the handle associated with the ref * @strong: true=strong reference, false=weak reference * @rdata: the id/refcount data for the ref * * Just calls binder_update_ref_for_handle() to decrement the ref. * * Return: 0 if successful, else errno */ static int binder_dec_ref_for_handle(struct binder_proc *proc, uint32_t desc, bool strong, struct binder_ref_data *rdata) { return binder_update_ref_for_handle(proc, desc, false, strong, rdata); } /** * binder_inc_ref_for_node() - increment the ref for given proc/node * @proc: proc containing the ref * @node: target node * @strong: true=strong reference, false=weak reference * @target_list: worklist to use if node is incremented * @rdata: the id/refcount data for the ref * * Given a proc and node, increment the ref. Create the ref if it * doesn't already exist * * Return: 0 if successful, else errno */ static int binder_inc_ref_for_node(struct binder_proc *proc, struct binder_node *node, bool strong, struct list_head *target_list, struct binder_ref_data *rdata) { struct binder_ref *ref; struct binder_ref *new_ref = NULL; int ret = 0; binder_proc_lock(proc); ref = binder_get_ref_for_node_olocked(proc, node, NULL); if (!ref) { binder_proc_unlock(proc); new_ref = kzalloc(sizeof(*ref), GFP_KERNEL); if (!new_ref) return -ENOMEM; binder_proc_lock(proc); ref = binder_get_ref_for_node_olocked(proc, node, new_ref); } ret = binder_inc_ref_olocked(ref, strong, target_list); *rdata = ref->data; if (ret && ref == new_ref) { /* * Cleanup the failed reference here as the target * could now be dead and have already released its * references by now. Calling on the new reference * with strong=0 and a tmp_refs will not decrement * the node. The new_ref gets kfree'd below. */ binder_cleanup_ref_olocked(new_ref); ref = NULL; } binder_proc_unlock(proc); if (new_ref && ref != new_ref) /* * Another thread created the ref first so * free the one we allocated */ kfree(new_ref); return ret; } static void binder_pop_transaction_ilocked(struct binder_thread *target_thread, struct binder_transaction *t) { BUG_ON(!target_thread); assert_spin_locked(&target_thread->proc->inner_lock); BUG_ON(target_thread->transaction_stack != t); BUG_ON(target_thread->transaction_stack->from != target_thread); target_thread->transaction_stack = target_thread->transaction_stack->from_parent; t->from = NULL; } /** * binder_thread_dec_tmpref() - decrement thread->tmp_ref * @thread: thread to decrement * * A thread needs to be kept alive while being used to create or * handle a transaction. binder_get_txn_from() is used to safely * extract t->from from a binder_transaction and keep the thread * indicated by t->from from being freed. When done with that * binder_thread, this function is called to decrement the * tmp_ref and free if appropriate (thread has been released * and no transaction being processed by the driver) */ static void binder_thread_dec_tmpref(struct binder_thread *thread) { /* * atomic is used to protect the counter value while * it cannot reach zero or thread->is_dead is false */ binder_inner_proc_lock(thread->proc); atomic_dec(&thread->tmp_ref); if (thread->is_dead && !atomic_read(&thread->tmp_ref)) { binder_inner_proc_unlock(thread->proc); binder_free_thread(thread); return; } binder_inner_proc_unlock(thread->proc); } /** * binder_proc_dec_tmpref() - decrement proc->tmp_ref * @proc: proc to decrement * * A binder_proc needs to be kept alive while being used to create or * handle a transaction. proc->tmp_ref is incremented when * creating a new transaction or the binder_proc is currently in-use * by threads that are being released. When done with the binder_proc, * this function is called to decrement the counter and free the * proc if appropriate (proc has been released, all threads have * been released and not currenly in-use to process a transaction). */ static void binder_proc_dec_tmpref(struct binder_proc *proc) { binder_inner_proc_lock(proc); proc->tmp_ref--; if (proc->is_dead && RB_EMPTY_ROOT(&proc->threads) && !proc->tmp_ref) { binder_inner_proc_unlock(proc); binder_free_proc(proc); return; } binder_inner_proc_unlock(proc); } /** * binder_get_txn_from() - safely extract the "from" thread in transaction * @t: binder transaction for t->from * * Atomically return the "from" thread and increment the tmp_ref * count for the thread to ensure it stays alive until * binder_thread_dec_tmpref() is called. * * Return: the value of t->from */ static struct binder_thread *binder_get_txn_from( struct binder_transaction *t) { struct binder_thread *from; spin_lock(&t->lock); from = t->from; if (from) atomic_inc(&from->tmp_ref); spin_unlock(&t->lock); return from; } /** * binder_get_txn_from_and_acq_inner() - get t->from and acquire inner lock * @t: binder transaction for t->from * * Same as binder_get_txn_from() except it also acquires the proc->inner_lock * to guarantee that the thread cannot be released while operating on it. * The caller must call binder_inner_proc_unlock() to release the inner lock * as well as call binder_dec_thread_txn() to release the reference. * * Return: the value of t->from */ static struct binder_thread *binder_get_txn_from_and_acq_inner( struct binder_transaction *t) __acquires(&t->from->proc->inner_lock) { struct binder_thread *from; from = binder_get_txn_from(t); if (!from) { __acquire(&from->proc->inner_lock); return NULL; } binder_inner_proc_lock(from->proc); if (t->from) { BUG_ON(from != t->from); return from; } binder_inner_proc_unlock(from->proc); __acquire(&from->proc->inner_lock); binder_thread_dec_tmpref(from); return NULL; } /** * binder_free_txn_fixups() - free unprocessed fd fixups * @t: binder transaction for t->from * * If the transaction is being torn down prior to being * processed by the target process, free all of the * fd fixups and fput the file structs. It is safe to * call this function after the fixups have been * processed -- in that case, the list will be empty. */ static void binder_free_txn_fixups(struct binder_transaction *t) { struct binder_txn_fd_fixup *fixup, *tmp; list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) { fput(fixup->file); if (fixup->target_fd >= 0) put_unused_fd(fixup->target_fd); list_del(&fixup->fixup_entry); kfree(fixup); } } static void binder_txn_latency_free(struct binder_transaction *t) { int from_proc, from_thread, to_proc, to_thread; spin_lock(&t->lock); from_proc = t->from ? t->from->proc->pid : 0; from_thread = t->from ? t->from->pid : 0; to_proc = t->to_proc ? t->to_proc->pid : 0; to_thread = t->to_thread ? t->to_thread->pid : 0; spin_unlock(&t->lock); trace_binder_txn_latency_free(t, from_proc, from_thread, to_proc, to_thread); } static void binder_free_transaction(struct binder_transaction *t) { struct binder_proc *target_proc = t->to_proc; if (target_proc) { binder_inner_proc_lock(target_proc); target_proc->outstanding_txns--; if (target_proc->outstanding_txns < 0) pr_warn("%s: Unexpected outstanding_txns %d\n", __func__, target_proc->outstanding_txns); if (!target_proc->outstanding_txns && target_proc->is_frozen) wake_up_interruptible_all(&target_proc->freeze_wait); if (t->buffer) t->buffer->transaction = NULL; binder_inner_proc_unlock(target_proc); } if (trace_binder_txn_latency_free_enabled()) binder_txn_latency_free(t); /* * If the transaction has no target_proc, then * t->buffer->transaction has already been cleared. */ binder_free_txn_fixups(t); kfree(t); binder_stats_deleted(BINDER_STAT_TRANSACTION); } static void binder_send_failed_reply(struct binder_transaction *t, uint32_t error_code) { struct binder_thread *target_thread; struct binder_transaction *next; BUG_ON(t->flags & TF_ONE_WAY); while (1) { target_thread = binder_get_txn_from_and_acq_inner(t); if (target_thread) { binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, "send failed reply for transaction %d to %d:%d\n", t->debug_id, target_thread->proc->pid, target_thread->pid); binder_pop_transaction_ilocked(target_thread, t); if (target_thread->reply_error.cmd == BR_OK) { target_thread->reply_error.cmd = error_code; binder_enqueue_thread_work_ilocked( target_thread, &target_thread->reply_error.work); wake_up_interruptible(&target_thread->wait); } else { /* * Cannot get here for normal operation, but * we can if multiple synchronous transactions * are sent without blocking for responses. * Just ignore the 2nd error in this case. */ pr_warn("Unexpected reply error: %u\n", target_thread->reply_error.cmd); } binder_inner_proc_unlock(target_thread->proc); binder_thread_dec_tmpref(target_thread); binder_free_transaction(t); return; } __release(&target_thread->proc->inner_lock); next = t->from_parent; binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, "send failed reply for transaction %d, target dead\n", t->debug_id); binder_free_transaction(t); if (next == NULL) { binder_debug(BINDER_DEBUG_DEAD_BINDER, "reply failed, no target thread at root\n"); return; } t = next; binder_debug(BINDER_DEBUG_DEAD_BINDER, "reply failed, no target thread -- retry %d\n", t->debug_id); } } /** * binder_cleanup_transaction() - cleans up undelivered transaction * @t: transaction that needs to be cleaned up * @reason: reason the transaction wasn't delivered * @error_code: error to return to caller (if synchronous call) */ static void binder_cleanup_transaction(struct binder_transaction *t, const char *reason, uint32_t error_code) { if (t->buffer->target_node && !(t->flags & TF_ONE_WAY)) { binder_send_failed_reply(t, error_code); } else { binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, "undelivered transaction %d, %s\n", t->debug_id, reason); binder_free_transaction(t); } } /** * binder_get_object() - gets object and checks for valid metadata * @proc: binder_proc owning the buffer * @u: sender's user pointer to base of buffer * @buffer: binder_buffer that we're parsing. * @offset: offset in the @buffer at which to validate an object. * @object: struct binder_object to read into * * Copy the binder object at the given offset into @object. If @u is * provided then the copy is from the sender's buffer. If not, then * it is copied from the target's @buffer. * * Return: If there's a valid metadata object at @offset, the * size of that object. Otherwise, it returns zero. The object * is read into the struct binder_object pointed to by @object. */ static size_t binder_get_object(struct binder_proc *proc, const void __user *u, struct binder_buffer *buffer, unsigned long offset, struct binder_object *object) { size_t read_size; struct binder_object_header *hdr; size_t object_size = 0; read_size = min_t(size_t, sizeof(*object), buffer->data_size - offset); if (offset > buffer->data_size || read_size < sizeof(*hdr)) return 0; if (u) { if (copy_from_user(object, u + offset, read_size)) return 0; } else { if (binder_alloc_copy_from_buffer(&proc->alloc, object, buffer, offset, read_size)) return 0; } /* Ok, now see if we read a complete object. */ hdr = &object->hdr; switch (hdr->type) { case BINDER_TYPE_BINDER: case BINDER_TYPE_WEAK_BINDER: case BINDER_TYPE_HANDLE: case BINDER_TYPE_WEAK_HANDLE: object_size = sizeof(struct flat_binder_object); break; case BINDER_TYPE_FD: object_size = sizeof(struct binder_fd_object); break; case BINDER_TYPE_PTR: object_size = sizeof(struct binder_buffer_object); break; case BINDER_TYPE_FDA: object_size = sizeof(struct binder_fd_array_object); break; default: return 0; } if (offset <= buffer->data_size - object_size && buffer->data_size >= object_size) return object_size; else return 0; } /** * binder_validate_ptr() - validates binder_buffer_object in a binder_buffer. * @proc: binder_proc owning the buffer * @b: binder_buffer containing the object * @object: struct binder_object to read into * @index: index in offset array at which the binder_buffer_object is * located * @start_offset: points to the start of the offset array * @object_offsetp: offset of @object read from @b * @num_valid: the number of valid offsets in the offset array * * Return: If @index is within the valid range of the offset array * described by @start and @num_valid, and if there's a valid * binder_buffer_object at the offset found in index @index * of the offset array, that object is returned. Otherwise, * %NULL is returned. * Note that the offset found in index @index itself is not * verified; this function assumes that @num_valid elements * from @start were previously verified to have valid offsets. * If @object_offsetp is non-NULL, then the offset within * @b is written to it. */ static struct binder_buffer_object *binder_validate_ptr( struct binder_proc *proc, struct binder_buffer *b, struct binder_object *object, binder_size_t index, binder_size_t start_offset, binder_size_t *object_offsetp, binder_size_t num_valid) { size_t object_size; binder_size_t object_offset; unsigned long buffer_offset; if (index >= num_valid) return NULL; buffer_offset = start_offset + sizeof(binder_size_t) * index; if (binder_alloc_copy_from_buffer(&proc->alloc, &object_offset, b, buffer_offset, sizeof(object_offset))) return NULL; object_size = binder_get_object(proc, NULL, b, object_offset, object); if (!object_size || object->hdr.type != BINDER_TYPE_PTR) return NULL; if (object_offsetp) *object_offsetp = object_offset; return &object->bbo; } /** * binder_validate_fixup() - validates pointer/fd fixups happen in order. * @proc: binder_proc owning the buffer * @b: transaction buffer * @objects_start_offset: offset to start of objects buffer * @buffer_obj_offset: offset to binder_buffer_object in which to fix up * @fixup_offset: start offset in @buffer to fix up * @last_obj_offset: offset to last binder_buffer_object that we fixed * @last_min_offset: minimum fixup offset in object at @last_obj_offset * * Return: %true if a fixup in buffer @buffer at offset @offset is * allowed. * * For safety reasons, we only allow fixups inside a buffer to happen * at increasing offsets; additionally, we only allow fixup on the last * buffer object that was verified, or one of its parents. * * Example of what is allowed: * * A * B (parent = A, offset = 0) * C (parent = A, offset = 16) * D (parent = C, offset = 0) * E (parent = A, offset = 32) // min_offset is 16 (C.parent_offset) * * Examples of what is not allowed: * * Decreasing offsets within the same parent: * A * C (parent = A, offset = 16) * B (parent = A, offset = 0) // decreasing offset within A * * Referring to a parent that wasn't the last object or any of its parents: * A * B (parent = A, offset = 0) * C (parent = A, offset = 0) * C (parent = A, offset = 16) * D (parent = B, offset = 0) // B is not A or any of A's parents */ static bool binder_validate_fixup(struct binder_proc *proc, struct binder_buffer *b, binder_size_t objects_start_offset, binder_size_t buffer_obj_offset, binder_size_t fixup_offset, binder_size_t last_obj_offset, binder_size_t last_min_offset) { if (!last_obj_offset) { /* Nothing to fix up in */ return false; } while (last_obj_offset != buffer_obj_offset) { unsigned long buffer_offset; struct binder_object last_object; struct binder_buffer_object *last_bbo; size_t object_size = binder_get_object(proc, NULL, b, last_obj_offset, &last_object); if (object_size != sizeof(*last_bbo)) return false; last_bbo = &last_object.bbo; /* * Safe to retrieve the parent of last_obj, since it * was already previously verified by the driver. */ if ((last_bbo->flags & BINDER_BUFFER_FLAG_HAS_PARENT) == 0) return false; last_min_offset = last_bbo->parent_offset + sizeof(uintptr_t); buffer_offset = objects_start_offset + sizeof(binder_size_t) * last_bbo->parent; if (binder_alloc_copy_from_buffer(&proc->alloc, &last_obj_offset, b, buffer_offset, sizeof(last_obj_offset))) return false; } return (fixup_offset >= last_min_offset); } /** * struct binder_task_work_cb - for deferred close * * @twork: callback_head for task work * @fd: fd to close * * Structure to pass task work to be handled after * returning from binder_ioctl() via task_work_add(). */ struct binder_task_work_cb { struct callback_head twork; struct file *file; }; /** * binder_do_fd_close() - close list of file descriptors * @twork: callback head for task work * * It is not safe to call ksys_close() during the binder_ioctl() * function if there is a chance that binder's own file descriptor * might be closed. This is to meet the requirements for using * fdget() (see comments for __fget_light()). Therefore use * task_work_add() to schedule the close operation once we have * returned from binder_ioctl(). This function is a callback * for that mechanism and does the actual ksys_close() on the * given file descriptor. */ static void binder_do_fd_close(struct callback_head *twork) { struct binder_task_work_cb *twcb = container_of(twork, struct binder_task_work_cb, twork); fput(twcb->file); kfree(twcb); } /** * binder_deferred_fd_close() - schedule a close for the given file-descriptor * @fd: file-descriptor to close * * See comments in binder_do_fd_close(). This function is used to schedule * a file-descriptor to be closed after returning from binder_ioctl(). */ static void binder_deferred_fd_close(int fd) { struct binder_task_work_cb *twcb; twcb = kzalloc(sizeof(*twcb), GFP_KERNEL); if (!twcb) return; init_task_work(&twcb->twork, binder_do_fd_close); twcb->file = close_fd_get_file(fd); if (twcb->file) { // pin it until binder_do_fd_close(); see comments there get_file(twcb->file); filp_close(twcb->file, current->files); task_work_add(current, &twcb->twork, TWA_RESUME); } else { kfree(twcb); } } static void binder_transaction_buffer_release(struct binder_proc *proc, struct binder_thread *thread, struct binder_buffer *buffer, binder_size_t off_end_offset, bool is_failure) { int debug_id = buffer->debug_id; binder_size_t off_start_offset, buffer_offset; binder_debug(BINDER_DEBUG_TRANSACTION, "%d buffer release %d, size %zd-%zd, failed at %llx\n", proc->pid, buffer->debug_id, buffer->data_size, buffer->offsets_size, (unsigned long long)off_end_offset); if (buffer->target_node) binder_dec_node(buffer->target_node, 1, 0); off_start_offset = ALIGN(buffer->data_size, sizeof(void *)); for (buffer_offset = off_start_offset; buffer_offset < off_end_offset; buffer_offset += sizeof(binder_size_t)) { struct binder_object_header *hdr; size_t object_size = 0; struct binder_object object; binder_size_t object_offset; if (!binder_alloc_copy_from_buffer(&proc->alloc, &object_offset, buffer, buffer_offset, sizeof(object_offset))) object_size = binder_get_object(proc, NULL, buffer, object_offset, &object); if (object_size == 0) { pr_err("transaction release %d bad object at offset %lld, size %zd\n", debug_id, (u64)object_offset, buffer->data_size); continue; } hdr = &object.hdr; switch (hdr->type) { case BINDER_TYPE_BINDER: case BINDER_TYPE_WEAK_BINDER: { struct flat_binder_object *fp; struct binder_node *node; fp = to_flat_binder_object(hdr); node = binder_get_node(proc, fp->binder); if (node == NULL) { pr_err("transaction release %d bad node %016llx\n", debug_id, (u64)fp->binder); break; } binder_debug(BINDER_DEBUG_TRANSACTION, " node %d u%016llx\n", node->debug_id, (u64)node->ptr); binder_dec_node(node, hdr->type == BINDER_TYPE_BINDER, 0); binder_put_node(node); } break; case BINDER_TYPE_HANDLE: case BINDER_TYPE_WEAK_HANDLE: { struct flat_binder_object *fp; struct binder_ref_data rdata; int ret; fp = to_flat_binder_object(hdr); ret = binder_dec_ref_for_handle(proc, fp->handle, hdr->type == BINDER_TYPE_HANDLE, &rdata); if (ret) { pr_err("transaction release %d bad handle %d, ret = %d\n", debug_id, fp->handle, ret); break; } binder_debug(BINDER_DEBUG_TRANSACTION, " ref %d desc %d\n", rdata.debug_id, rdata.desc); } break; case BINDER_TYPE_FD: { /* * No need to close the file here since user-space * closes it for successfully delivered * transactions. For transactions that weren't * delivered, the new fd was never allocated so * there is no need to close and the fput on the * file is done when the transaction is torn * down. */ } break; case BINDER_TYPE_PTR: /* * Nothing to do here, this will get cleaned up when the * transaction buffer gets freed */ break; case BINDER_TYPE_FDA: { struct binder_fd_array_object *fda; struct binder_buffer_object *parent; struct binder_object ptr_object; binder_size_t fda_offset; size_t fd_index; binder_size_t fd_buf_size; binder_size_t num_valid; if (is_failure) { /* * The fd fixups have not been applied so no * fds need to be closed. */ continue; } num_valid = (buffer_offset - off_start_offset) / sizeof(binder_size_t); fda = to_binder_fd_array_object(hdr); parent = binder_validate_ptr(proc, buffer, &ptr_object, fda->parent, off_start_offset, NULL, num_valid); if (!parent) { pr_err("transaction release %d bad parent offset\n", debug_id); continue; } fd_buf_size = sizeof(u32) * fda->num_fds; if (fda->num_fds >= SIZE_MAX / sizeof(u32)) { pr_err("transaction release %d invalid number of fds (%lld)\n", debug_id, (u64)fda->num_fds); continue; } if (fd_buf_size > parent->length || fda->parent_offset > parent->length - fd_buf_size) { /* No space for all file descriptors here. */ pr_err("transaction release %d not enough space for %lld fds in buffer\n", debug_id, (u64)fda->num_fds); continue; } /* * the source data for binder_buffer_object is visible * to user-space and the @buffer element is the user * pointer to the buffer_object containing the fd_array. * Convert the address to an offset relative to * the base of the transaction buffer. */ fda_offset = (parent->buffer - (uintptr_t)buffer->user_data) + fda->parent_offset; for (fd_index = 0; fd_index < fda->num_fds; fd_index++) { u32 fd; int err; binder_size_t offset = fda_offset + fd_index * sizeof(fd); err = binder_alloc_copy_from_buffer( &proc->alloc, &fd, buffer, offset, sizeof(fd)); WARN_ON(err); if (!err) { binder_deferred_fd_close(fd); /* * Need to make sure the thread goes * back to userspace to complete the * deferred close */ if (thread) thread->looper_need_return = true; } } } break; default: pr_err("transaction release %d bad object type %x\n", debug_id, hdr->type); break; } } } /* Clean up all the objects in the buffer */ static inline void binder_release_entire_buffer(struct binder_proc *proc, struct binder_thread *thread, struct binder_buffer *buffer, bool is_failure) { binder_size_t off_end_offset; off_end_offset = ALIGN(buffer->data_size, sizeof(void *)); off_end_offset += buffer->offsets_size; binder_transaction_buffer_release(proc, thread, buffer, off_end_offset, is_failure); } static int binder_translate_binder(struct flat_binder_object *fp, struct binder_transaction *t, struct binder_thread *thread) { struct binder_node *node; struct binder_proc *proc = thread->proc; struct binder_proc *target_proc = t->to_proc; struct binder_ref_data rdata; int ret = 0; node = binder_get_node(proc, fp->binder); if (!node) { node = binder_new_node(proc, fp); if (!node) return -ENOMEM; } if (fp->cookie != node->cookie) { binder_user_error("%d:%d sending u%016llx node %d, cookie mismatch %016llx != %016llx\n", proc->pid, thread->pid, (u64)fp->binder, node->debug_id, (u64)fp->cookie, (u64)node->cookie); ret = -EINVAL; goto done; } if (security_binder_transfer_binder(proc->cred, target_proc->cred)) { ret = -EPERM; goto done; } ret = binder_inc_ref_for_node(target_proc, node, fp->hdr.type == BINDER_TYPE_BINDER, &thread->todo, &rdata); if (ret) goto done; if (fp->hdr.type == BINDER_TYPE_BINDER) fp->hdr.type = BINDER_TYPE_HANDLE; else fp->hdr.type = BINDER_TYPE_WEAK_HANDLE; fp->binder = 0; fp->handle = rdata.desc; fp->cookie = 0; trace_binder_transaction_node_to_ref(t, node, &rdata); binder_debug(BINDER_DEBUG_TRANSACTION, " node %d u%016llx -> ref %d desc %d\n", node->debug_id, (u64)node->ptr, rdata.debug_id, rdata.desc); done: binder_put_node(node); return ret; } static int binder_translate_handle(struct flat_binder_object *fp, struct binder_transaction *t, struct binder_thread *thread) { struct binder_proc *proc = thread->proc; struct binder_proc *target_proc = t->to_proc; struct binder_node *node; struct binder_ref_data src_rdata; int ret = 0; node = binder_get_node_from_ref(proc, fp->handle, fp->hdr.type == BINDER_TYPE_HANDLE, &src_rdata); if (!node) { binder_user_error("%d:%d got transaction with invalid handle, %d\n", proc->pid, thread->pid, fp->handle); return -EINVAL; } if (security_binder_transfer_binder(proc->cred, target_proc->cred)) { ret = -EPERM; goto done; } binder_node_lock(node); if (node->proc == target_proc) { if (fp->hdr.type == BINDER_TYPE_HANDLE) fp->hdr.type = BINDER_TYPE_BINDER; else fp->hdr.type = BINDER_TYPE_WEAK_BINDER; fp->binder = node->ptr; fp->cookie = node->cookie; if (node->proc) binder_inner_proc_lock(node->proc); else __acquire(&node->proc->inner_lock); binder_inc_node_nilocked(node, fp->hdr.type == BINDER_TYPE_BINDER, 0, NULL); if (node->proc) binder_inner_proc_unlock(node->proc); else __release(&node->proc->inner_lock); trace_binder_transaction_ref_to_node(t, node, &src_rdata); binder_debug(BINDER_DEBUG_TRANSACTION, " ref %d desc %d -> node %d u%016llx\n", src_rdata.debug_id, src_rdata.desc, node->debug_id, (u64)node->ptr); binder_node_unlock(node); } else { struct binder_ref_data dest_rdata; binder_node_unlock(node); ret = binder_inc_ref_for_node(target_proc, node, fp->hdr.type == BINDER_TYPE_HANDLE, NULL, &dest_rdata); if (ret) goto done; fp->binder = 0; fp->handle = dest_rdata.desc; fp->cookie = 0; trace_binder_transaction_ref_to_ref(t, node, &src_rdata, &dest_rdata); binder_debug(BINDER_DEBUG_TRANSACTION, " ref %d desc %d -> ref %d desc %d (node %d)\n", src_rdata.debug_id, src_rdata.desc, dest_rdata.debug_id, dest_rdata.desc, node->debug_id); } done: binder_put_node(node); return ret; } static int binder_translate_fd(u32 fd, binder_size_t fd_offset, struct binder_transaction *t, struct binder_thread *thread, struct binder_transaction *in_reply_to) { struct binder_proc *proc = thread->proc; struct binder_proc *target_proc = t->to_proc; struct binder_txn_fd_fixup *fixup; struct file *file; int ret = 0; bool target_allows_fd; if (in_reply_to) target_allows_fd = !!(in_reply_to->flags & TF_ACCEPT_FDS); else target_allows_fd = t->buffer->target_node->accept_fds; if (!target_allows_fd) { binder_user_error("%d:%d got %s with fd, %d, but target does not allow fds\n", proc->pid, thread->pid, in_reply_to ? "reply" : "transaction", fd); ret = -EPERM; goto err_fd_not_accepted; } file = fget(fd); if (!file) { binder_user_error("%d:%d got transaction with invalid fd, %d\n", proc->pid, thread->pid, fd); ret = -EBADF; goto err_fget; } ret = security_binder_transfer_file(proc->cred, target_proc->cred, file); if (ret < 0) { ret = -EPERM; goto err_security; } /* * Add fixup record for this transaction. The allocation * of the fd in the target needs to be done from a * target thread. */ fixup = kzalloc(sizeof(*fixup), GFP_KERNEL); if (!fixup) { ret = -ENOMEM; goto err_alloc; } fixup->file = file; fixup->offset = fd_offset; fixup->target_fd = -1; trace_binder_transaction_fd_send(t, fd, fixup->offset); list_add_tail(&fixup->fixup_entry, &t->fd_fixups); return ret; err_alloc: err_security: fput(file); err_fget: err_fd_not_accepted: return ret; } /** * struct binder_ptr_fixup - data to be fixed-up in target buffer * @offset offset in target buffer to fixup * @skip_size bytes to skip in copy (fixup will be written later) * @fixup_data data to write at fixup offset * @node list node * * This is used for the pointer fixup list (pf) which is created and consumed * during binder_transaction() and is only accessed locally. No * locking is necessary. * * The list is ordered by @offset. */ struct binder_ptr_fixup { binder_size_t offset; size_t skip_size; binder_uintptr_t fixup_data; struct list_head node; }; /** * struct binder_sg_copy - scatter-gather data to be copied * @offset offset in target buffer * @sender_uaddr user address in source buffer * @length bytes to copy * @node list node * * This is used for the sg copy list (sgc) which is created and consumed * during binder_transaction() and is only accessed locally. No * locking is necessary. * * The list is ordered by @offset. */ struct binder_sg_copy { binder_size_t offset; const void __user *sender_uaddr; size_t length; struct list_head node; }; /** * binder_do_deferred_txn_copies() - copy and fixup scatter-gather data * @alloc: binder_alloc associated with @buffer * @buffer: binder buffer in target process * @sgc_head: list_head of scatter-gather copy list * @pf_head: list_head of pointer fixup list * * Processes all elements of @sgc_head, applying fixups from @pf_head * and copying the scatter-gather data from the source process' user * buffer to the target's buffer. It is expected that the list creation * and processing all occurs during binder_transaction() so these lists * are only accessed in local context. * * Return: 0=success, else -errno */ static int binder_do_deferred_txn_copies(struct binder_alloc *alloc, struct binder_buffer *buffer, struct list_head *sgc_head, struct list_head *pf_head) { int ret = 0; struct binder_sg_copy *sgc, *tmpsgc; struct binder_ptr_fixup *tmppf; struct binder_ptr_fixup *pf = list_first_entry_or_null(pf_head, struct binder_ptr_fixup, node); list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) { size_t bytes_copied = 0; while (bytes_copied < sgc->length) { size_t copy_size; size_t bytes_left = sgc->length - bytes_copied; size_t offset = sgc->offset + bytes_copied; /* * We copy up to the fixup (pointed to by pf) */ copy_size = pf ? min(bytes_left, (size_t)pf->offset - offset) : bytes_left; if (!ret && copy_size) ret = binder_alloc_copy_user_to_buffer( alloc, buffer, offset, sgc->sender_uaddr + bytes_copied, copy_size); bytes_copied += copy_size; if (copy_size != bytes_left) { BUG_ON(!pf); /* we stopped at a fixup offset */ if (pf->skip_size) { /* * we are just skipping. This is for * BINDER_TYPE_FDA where the translated * fds will be fixed up when we get * to target context. */ bytes_copied += pf->skip_size; } else { /* apply the fixup indicated by pf */ if (!ret) ret = binder_alloc_copy_to_buffer( alloc, buffer, pf->offset, &pf->fixup_data, sizeof(pf->fixup_data)); bytes_copied += sizeof(pf->fixup_data); } list_del(&pf->node); kfree(pf); pf = list_first_entry_or_null(pf_head, struct binder_ptr_fixup, node); } } list_del(&sgc->node); kfree(sgc); } list_for_each_entry_safe(pf, tmppf, pf_head, node) { BUG_ON(pf->skip_size == 0); list_del(&pf->node); kfree(pf); } BUG_ON(!list_empty(sgc_head)); return ret > 0 ? -EINVAL : ret; } /** * binder_cleanup_deferred_txn_lists() - free specified lists * @sgc_head: list_head of scatter-gather copy list * @pf_head: list_head of pointer fixup list * * Called to clean up @sgc_head and @pf_head if there is an * error. */ static void binder_cleanup_deferred_txn_lists(struct list_head *sgc_head, struct list_head *pf_head) { struct binder_sg_copy *sgc, *tmpsgc; struct binder_ptr_fixup *pf, *tmppf; list_for_each_entry_safe(sgc, tmpsgc, sgc_head, node) { list_del(&sgc->node); kfree(sgc); } list_for_each_entry_safe(pf, tmppf, pf_head, node) { list_del(&pf->node); kfree(pf); } } /** * binder_defer_copy() - queue a scatter-gather buffer for copy * @sgc_head: list_head of scatter-gather copy list * @offset: binder buffer offset in target process * @sender_uaddr: user address in source process * @length: bytes to copy * * Specify a scatter-gather block to be copied. The actual copy must * be deferred until all the needed fixups are identified and queued. * Then the copy and fixups are done together so un-translated values * from the source are never visible in the target buffer. * * We are guaranteed that repeated calls to this function will have * monotonically increasing @offset values so the list will naturally * be ordered. * * Return: 0=success, else -errno */ static int binder_defer_copy(struct list_head *sgc_head, binder_size_t offset, const void __user *sender_uaddr, size_t length) { struct binder_sg_copy *bc = kzalloc(sizeof(*bc), GFP_KERNEL); if (!bc) return -ENOMEM; bc->offset = offset; bc->sender_uaddr = sender_uaddr; bc->length = length; INIT_LIST_HEAD(&bc->node); /* * We are guaranteed that the deferred copies are in-order * so just add to the tail. */ list_add_tail(&bc->node, sgc_head); return 0; } /** * binder_add_fixup() - queue a fixup to be applied to sg copy * @pf_head: list_head of binder ptr fixup list * @offset: binder buffer offset in target process * @fixup: bytes to be copied for fixup * @skip_size: bytes to skip when copying (fixup will be applied later) * * Add the specified fixup to a list ordered by @offset. When copying * the scatter-gather buffers, the fixup will be copied instead of * data from the source buffer. For BINDER_TYPE_FDA fixups, the fixup * will be applied later (in target process context), so we just skip * the bytes specified by @skip_size. If @skip_size is 0, we copy the * value in @fixup. * * This function is called *mostly* in @offset order, but there are * exceptions. Since out-of-order inserts are relatively uncommon, * we insert the new element by searching backward from the tail of * the list. * * Return: 0=success, else -errno */ static int binder_add_fixup(struct list_head *pf_head, binder_size_t offset, binder_uintptr_t fixup, size_t skip_size) { struct binder_ptr_fixup *pf = kzalloc(sizeof(*pf), GFP_KERNEL); struct binder_ptr_fixup *tmppf; if (!pf) return -ENOMEM; pf->offset = offset; pf->fixup_data = fixup; pf->skip_size = skip_size; INIT_LIST_HEAD(&pf->node); /* Fixups are *mostly* added in-order, but there are some * exceptions. Look backwards through list for insertion point. */ list_for_each_entry_reverse(tmppf, pf_head, node) { if (tmppf->offset < pf->offset) { list_add(&pf->node, &tmppf->node); return 0; } } /* * if we get here, then the new offset is the lowest so * insert at the head */ list_add(&pf->node, pf_head); return 0; } static int binder_translate_fd_array(struct list_head *pf_head, struct binder_fd_array_object *fda, const void __user *sender_ubuffer, struct binder_buffer_object *parent, struct binder_buffer_object *sender_uparent, struct binder_transaction *t, struct binder_thread *thread, struct binder_transaction *in_reply_to) { binder_size_t fdi, fd_buf_size; binder_size_t fda_offset; const void __user *sender_ufda_base; struct binder_proc *proc = thread->proc; int ret; if (fda->num_fds == 0) return 0; fd_buf_size = sizeof(u32) * fda->num_fds; if (fda->num_fds >= SIZE_MAX / sizeof(u32)) { binder_user_error("%d:%d got transaction with invalid number of fds (%lld)\n", proc->pid, thread->pid, (u64)fda->num_fds); return -EINVAL; } if (fd_buf_size > parent->length || fda->parent_offset > parent->length - fd_buf_size) { /* No space for all file descriptors here. */ binder_user_error("%d:%d not enough space to store %lld fds in buffer\n", proc->pid, thread->pid, (u64)fda->num_fds); return -EINVAL; } /* * the source data for binder_buffer_object is visible * to user-space and the @buffer element is the user * pointer to the buffer_object containing the fd_array. * Convert the address to an offset relative to * the base of the transaction buffer. */ fda_offset = (parent->buffer - (uintptr_t)t->buffer->user_data) + fda->parent_offset; sender_ufda_base = (void __user *)(uintptr_t)sender_uparent->buffer + fda->parent_offset; if (!IS_ALIGNED((unsigned long)fda_offset, sizeof(u32)) || !IS_ALIGNED((unsigned long)sender_ufda_base, sizeof(u32))) { binder_user_error("%d:%d parent offset not aligned correctly.\n", proc->pid, thread->pid); return -EINVAL; } ret = binder_add_fixup(pf_head, fda_offset, 0, fda->num_fds * sizeof(u32)); if (ret) return ret; for (fdi = 0; fdi < fda->num_fds; fdi++) { u32 fd; binder_size_t offset = fda_offset + fdi * sizeof(fd); binder_size_t sender_uoffset = fdi * sizeof(fd); ret = copy_from_user(&fd, sender_ufda_base + sender_uoffset, sizeof(fd)); if (!ret) ret = binder_translate_fd(fd, offset, t, thread, in_reply_to); if (ret) return ret > 0 ? -EINVAL : ret; } return 0; } static int binder_fixup_parent(struct list_head *pf_head, struct binder_transaction *t, struct binder_thread *thread, struct binder_buffer_object *bp, binder_size_t off_start_offset, binder_size_t num_valid, binder_size_t last_fixup_obj_off, binder_size_t last_fixup_min_off) { struct binder_buffer_object *parent; struct binder_buffer *b = t->buffer; struct binder_proc *proc = thread->proc; struct binder_proc *target_proc = t->to_proc; struct binder_object object; binder_size_t buffer_offset; binder_size_t parent_offset; if (!(bp->flags & BINDER_BUFFER_FLAG_HAS_PARENT)) return 0; parent = binder_validate_ptr(target_proc, b, &object, bp->parent, off_start_offset, &parent_offset, num_valid); if (!parent) { binder_user_error("%d:%d got transaction with invalid parent offset or type\n", proc->pid, thread->pid); return -EINVAL; } if (!binder_validate_fixup(target_proc, b, off_start_offset, parent_offset, bp->parent_offset, last_fixup_obj_off, last_fixup_min_off)) { binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n", proc->pid, thread->pid); return -EINVAL; } if (parent->length < sizeof(binder_uintptr_t) || bp->parent_offset > parent->length - sizeof(binder_uintptr_t)) { /* No space for a pointer here! */ binder_user_error("%d:%d got transaction with invalid parent offset\n", proc->pid, thread->pid); return -EINVAL; } buffer_offset = bp->parent_offset + (uintptr_t)parent->buffer - (uintptr_t)b->user_data; return binder_add_fixup(pf_head, buffer_offset, bp->buffer, 0); } /** * binder_can_update_transaction() - Can a txn be superseded by an updated one? * @t1: the pending async txn in the frozen process * @t2: the new async txn to supersede the outdated pending one * * Return: true if t2 can supersede t1 * false if t2 can not supersede t1 */ static bool binder_can_update_transaction(struct binder_transaction *t1, struct binder_transaction *t2) { if ((t1->flags & t2->flags & (TF_ONE_WAY | TF_UPDATE_TXN)) != (TF_ONE_WAY | TF_UPDATE_TXN) || !t1->to_proc || !t2->to_proc) return false; if (t1->to_proc->tsk == t2->to_proc->tsk && t1->code == t2->code && t1->flags == t2->flags && t1->buffer->pid == t2->buffer->pid && t1->buffer->target_node->ptr == t2->buffer->target_node->ptr && t1->buffer->target_node->cookie == t2->buffer->target_node->cookie) return true; return false; } /** * binder_find_outdated_transaction_ilocked() - Find the outdated transaction * @t: new async transaction * @target_list: list to find outdated transaction * * Return: the outdated transaction if found * NULL if no outdated transacton can be found * * Requires the proc->inner_lock to be held. */ static struct binder_transaction * binder_find_outdated_transaction_ilocked(struct binder_transaction *t, struct list_head *target_list) { struct binder_work *w; list_for_each_entry(w, target_list, entry) { struct binder_transaction *t_queued; if (w->type != BINDER_WORK_TRANSACTION) continue; t_queued = container_of(w, struct binder_transaction, work); if (binder_can_update_transaction(t_queued, t)) return t_queued; } return NULL; } /** * binder_proc_transaction() - sends a transaction to a process and wakes it up * @t: transaction to send * @proc: process to send the transaction to * @thread: thread in @proc to send the transaction to (may be NULL) * * This function queues a transaction to the specified process. It will try * to find a thread in the target process to handle the transaction and * wake it up. If no thread is found, the work is queued to the proc * waitqueue. * * If the @thread parameter is not NULL, the transaction is always queued * to the waitlist of that specific thread. * * Return: 0 if the transaction was successfully queued * BR_DEAD_REPLY if the target process or thread is dead * BR_FROZEN_REPLY if the target process or thread is frozen and * the sync transaction was rejected * BR_TRANSACTION_PENDING_FROZEN if the target process is frozen * and the async transaction was successfully queued */ static int binder_proc_transaction(struct binder_transaction *t, struct binder_proc *proc, struct binder_thread *thread) { struct binder_node *node = t->buffer->target_node; bool oneway = !!(t->flags & TF_ONE_WAY); bool pending_async = false; struct binder_transaction *t_outdated = NULL; bool frozen = false; BUG_ON(!node); binder_node_lock(node); if (oneway) { BUG_ON(thread); if (node->has_async_transaction) pending_async = true; else node->has_async_transaction = true; } binder_inner_proc_lock(proc); if (proc->is_frozen) { frozen = true; proc->sync_recv |= !oneway; proc->async_recv |= oneway; } if ((frozen && !oneway) || proc->is_dead || (thread && thread->is_dead)) { binder_inner_proc_unlock(proc); binder_node_unlock(node); return frozen ? BR_FROZEN_REPLY : BR_DEAD_REPLY; } if (!thread && !pending_async) thread = binder_select_thread_ilocked(proc); if (thread) { binder_enqueue_thread_work_ilocked(thread, &t->work); } else if (!pending_async) { binder_enqueue_work_ilocked(&t->work, &proc->todo); } else { if ((t->flags & TF_UPDATE_TXN) && frozen) { t_outdated = binder_find_outdated_transaction_ilocked(t, &node->async_todo); if (t_outdated) { binder_debug(BINDER_DEBUG_TRANSACTION, "txn %d supersedes %d\n", t->debug_id, t_outdated->debug_id); list_del_init(&t_outdated->work.entry); proc->outstanding_txns--; } } binder_enqueue_work_ilocked(&t->work, &node->async_todo); } if (!pending_async) binder_wakeup_thread_ilocked(proc, thread, !oneway /* sync */); proc->outstanding_txns++; binder_inner_proc_unlock(proc); binder_node_unlock(node); /* * To reduce potential contention, free the outdated transaction and * buffer after releasing the locks. */ if (t_outdated) { struct binder_buffer *buffer = t_outdated->buffer; t_outdated->buffer = NULL; buffer->transaction = NULL; trace_binder_transaction_update_buffer_release(buffer); binder_release_entire_buffer(proc, NULL, buffer, false); binder_alloc_free_buf(&proc->alloc, buffer); kfree(t_outdated); binder_stats_deleted(BINDER_STAT_TRANSACTION); } if (oneway && frozen) return BR_TRANSACTION_PENDING_FROZEN; return 0; } /** * binder_get_node_refs_for_txn() - Get required refs on node for txn * @node: struct binder_node for which to get refs * @procp: returns @node->proc if valid * @error: if no @procp then returns BR_DEAD_REPLY * * User-space normally keeps the node alive when creating a transaction * since it has a reference to the target. The local strong ref keeps it * alive if the sending process dies before the target process processes * the transaction. If the source process is malicious or has a reference * counting bug, relying on the local strong ref can fail. * * Since user-space can cause the local strong ref to go away, we also take * a tmpref on the node to ensure it survives while we are constructing * the transaction. We also need a tmpref on the proc while we are * constructing the transaction, so we take that here as well. * * Return: The target_node with refs taken or NULL if no @node->proc is NULL. * Also sets @procp if valid. If the @node->proc is NULL indicating that the * target proc has died, @error is set to BR_DEAD_REPLY. */ static struct binder_node *binder_get_node_refs_for_txn( struct binder_node *node, struct binder_proc **procp, uint32_t *error) { struct binder_node *target_node = NULL; binder_node_inner_lock(node); if (node->proc) { target_node = node; binder_inc_node_nilocked(node, 1, 0, NULL); binder_inc_node_tmpref_ilocked(node); node->proc->tmp_ref++; *procp = node->proc; } else *error = BR_DEAD_REPLY; binder_node_inner_unlock(node); return target_node; } static void binder_set_txn_from_error(struct binder_transaction *t, int id, uint32_t command, int32_t param) { struct binder_thread *from = binder_get_txn_from_and_acq_inner(t); if (!from) { /* annotation for sparse */ __release(&from->proc->inner_lock); return; } /* don't override existing errors */ if (from->ee.command == BR_OK) binder_set_extended_error(&from->ee, id, command, param); binder_inner_proc_unlock(from->proc); binder_thread_dec_tmpref(from); } static void binder_transaction(struct binder_proc *proc, struct binder_thread *thread, struct binder_transaction_data *tr, int reply, binder_size_t extra_buffers_size) { int ret; struct binder_transaction *t; struct binder_work *w; struct binder_work *tcomplete; binder_size_t buffer_offset = 0; binder_size_t off_start_offset, off_end_offset; binder_size_t off_min; binder_size_t sg_buf_offset, sg_buf_end_offset; binder_size_t user_offset = 0; struct binder_proc *target_proc = NULL; struct binder_thread *target_thread = NULL; struct binder_node *target_node = NULL; struct binder_transaction *in_reply_to = NULL; struct binder_transaction_log_entry *e; uint32_t return_error = 0; uint32_t return_error_param = 0; uint32_t return_error_line = 0; binder_size_t last_fixup_obj_off = 0; binder_size_t last_fixup_min_off = 0; struct binder_context *context = proc->context; int t_debug_id = atomic_inc_return(&binder_last_id); ktime_t t_start_time = ktime_get(); char *secctx = NULL; u32 secctx_sz = 0; struct list_head sgc_head; struct list_head pf_head; const void __user *user_buffer = (const void __user *) (uintptr_t)tr->data.ptr.buffer; INIT_LIST_HEAD(&sgc_head); INIT_LIST_HEAD(&pf_head); e = binder_transaction_log_add(&binder_transaction_log); e->debug_id = t_debug_id; e->call_type = reply ? 2 : !!(tr->flags & TF_ONE_WAY); e->from_proc = proc->pid; e->from_thread = thread->pid; e->target_handle = tr->target.handle; e->data_size = tr->data_size; e->offsets_size = tr->offsets_size; strscpy(e->context_name, proc->context->name, BINDERFS_MAX_NAME); binder_inner_proc_lock(proc); binder_set_extended_error(&thread->ee, t_debug_id, BR_OK, 0); binder_inner_proc_unlock(proc); if (reply) { binder_inner_proc_lock(proc); in_reply_to = thread->transaction_stack; if (in_reply_to == NULL) { binder_inner_proc_unlock(proc); binder_user_error("%d:%d got reply transaction with no transaction stack\n", proc->pid, thread->pid); return_error = BR_FAILED_REPLY; return_error_param = -EPROTO; return_error_line = __LINE__; goto err_empty_call_stack; } if (in_reply_to->to_thread != thread) { spin_lock(&in_reply_to->lock); binder_user_error("%d:%d got reply transaction with bad transaction stack, transaction %d has target %d:%d\n", proc->pid, thread->pid, in_reply_to->debug_id, in_reply_to->to_proc ? in_reply_to->to_proc->pid : 0, in_reply_to->to_thread ? in_reply_to->to_thread->pid : 0); spin_unlock(&in_reply_to->lock); binder_inner_proc_unlock(proc); return_error = BR_FAILED_REPLY; return_error_param = -EPROTO; return_error_line = __LINE__; in_reply_to = NULL; goto err_bad_call_stack; } thread->transaction_stack = in_reply_to->to_parent; binder_inner_proc_unlock(proc); binder_set_nice(in_reply_to->saved_priority); target_thread = binder_get_txn_from_and_acq_inner(in_reply_to); if (target_thread == NULL) { /* annotation for sparse */ __release(&target_thread->proc->inner_lock); binder_txn_error("%d:%d reply target not found\n", thread->pid, proc->pid); return_error = BR_DEAD_REPLY; return_error_line = __LINE__; goto err_dead_binder; } if (target_thread->transaction_stack != in_reply_to) { binder_user_error("%d:%d got reply transaction with bad target transaction stack %d, expected %d\n", proc->pid, thread->pid, target_thread->transaction_stack ? target_thread->transaction_stack->debug_id : 0, in_reply_to->debug_id); binder_inner_proc_unlock(target_thread->proc); return_error = BR_FAILED_REPLY; return_error_param = -EPROTO; return_error_line = __LINE__; in_reply_to = NULL; target_thread = NULL; goto err_dead_binder; } target_proc = target_thread->proc; target_proc->tmp_ref++; binder_inner_proc_unlock(target_thread->proc); } else { if (tr->target.handle) { struct binder_ref *ref; /* * There must already be a strong ref * on this node. If so, do a strong * increment on the node to ensure it * stays alive until the transaction is * done. */ binder_proc_lock(proc); ref = binder_get_ref_olocked(proc, tr->target.handle, true); if (ref) { target_node = binder_get_node_refs_for_txn( ref->node, &target_proc, &return_error); } else { binder_user_error("%d:%d got transaction to invalid handle, %u\n", proc->pid, thread->pid, tr->target.handle); return_error = BR_FAILED_REPLY; } binder_proc_unlock(proc); } else { mutex_lock(&context->context_mgr_node_lock); target_node = context->binder_context_mgr_node; if (target_node) target_node = binder_get_node_refs_for_txn( target_node, &target_proc, &return_error); else return_error = BR_DEAD_REPLY; mutex_unlock(&context->context_mgr_node_lock); if (target_node && target_proc->pid == proc->pid) { binder_user_error("%d:%d got transaction to context manager from process owning it\n", proc->pid, thread->pid); return_error = BR_FAILED_REPLY; return_error_param = -EINVAL; return_error_line = __LINE__; goto err_invalid_target_handle; } } if (!target_node) { binder_txn_error("%d:%d cannot find target node\n", thread->pid, proc->pid); /* * return_error is set above */ return_error_param = -EINVAL; return_error_line = __LINE__; goto err_dead_binder; } e->to_node = target_node->debug_id; if (WARN_ON(proc == target_proc)) { binder_txn_error("%d:%d self transactions not allowed\n", thread->pid, proc->pid); return_error = BR_FAILED_REPLY; return_error_param = -EINVAL; return_error_line = __LINE__; goto err_invalid_target_handle; } if (security_binder_transaction(proc->cred, target_proc->cred) < 0) { binder_txn_error("%d:%d transaction credentials failed\n", thread->pid, proc->pid); return_error = BR_FAILED_REPLY; return_error_param = -EPERM; return_error_line = __LINE__; goto err_invalid_target_handle; } binder_inner_proc_lock(proc); w = list_first_entry_or_null(&thread->todo, struct binder_work, entry); if (!(tr->flags & TF_ONE_WAY) && w && w->type == BINDER_WORK_TRANSACTION) { /* * Do not allow new outgoing transaction from a * thread that has a transaction at the head of * its todo list. Only need to check the head * because binder_select_thread_ilocked picks a * thread from proc->waiting_threads to enqueue * the transaction, and nothing is queued to the * todo list while the thread is on waiting_threads. */ binder_user_error("%d:%d new transaction not allowed when there is a transaction on thread todo\n", proc->pid, thread->pid); binder_inner_proc_unlock(proc); return_error = BR_FAILED_REPLY; return_error_param = -EPROTO; return_error_line = __LINE__; goto err_bad_todo_list; } if (!(tr->flags & TF_ONE_WAY) && thread->transaction_stack) { struct binder_transaction *tmp; tmp = thread->transaction_stack; if (tmp->to_thread != thread) { spin_lock(&tmp->lock); binder_user_error("%d:%d got new transaction with bad transaction stack, transaction %d has target %d:%d\n", proc->pid, thread->pid, tmp->debug_id, tmp->to_proc ? tmp->to_proc->pid : 0, tmp->to_thread ? tmp->to_thread->pid : 0); spin_unlock(&tmp->lock); binder_inner_proc_unlock(proc); return_error = BR_FAILED_REPLY; return_error_param = -EPROTO; return_error_line = __LINE__; goto err_bad_call_stack; } while (tmp) { struct binder_thread *from; spin_lock(&tmp->lock); from = tmp->from; if (from && from->proc == target_proc) { atomic_inc(&from->tmp_ref); target_thread = from; spin_unlock(&tmp->lock); break; } spin_unlock(&tmp->lock); tmp = tmp->from_parent; } } binder_inner_proc_unlock(proc); } if (target_thread) e->to_thread = target_thread->pid; e->to_proc = target_proc->pid; /* TODO: reuse incoming transaction for reply */ t = kzalloc(sizeof(*t), GFP_KERNEL); if (t == NULL) { binder_txn_error("%d:%d cannot allocate transaction\n", thread->pid, proc->pid); return_error = BR_FAILED_REPLY; return_error_param = -ENOMEM; return_error_line = __LINE__; goto err_alloc_t_failed; } INIT_LIST_HEAD(&t->fd_fixups); binder_stats_created(BINDER_STAT_TRANSACTION); spin_lock_init(&t->lock); tcomplete = kzalloc(sizeof(*tcomplete), GFP_KERNEL); if (tcomplete == NULL) { binder_txn_error("%d:%d cannot allocate work for transaction\n", thread->pid, proc->pid); return_error = BR_FAILED_REPLY; return_error_param = -ENOMEM; return_error_line = __LINE__; goto err_alloc_tcomplete_failed; } binder_stats_created(BINDER_STAT_TRANSACTION_COMPLETE); t->debug_id = t_debug_id; t->start_time = t_start_time; if (reply) binder_debug(BINDER_DEBUG_TRANSACTION, "%d:%d BC_REPLY %d -> %d:%d, data %016llx-%016llx size %lld-%lld-%lld\n", proc->pid, thread->pid, t->debug_id, target_proc->pid, target_thread->pid, (u64)tr->data.ptr.buffer, (u64)tr->data.ptr.offsets, (u64)tr->data_size, (u64)tr->offsets_size, (u64)extra_buffers_size); else binder_debug(BINDER_DEBUG_TRANSACTION, "%d:%d BC_TRANSACTION %d -> %d - node %d, data %016llx-%016llx size %lld-%lld-%lld\n", proc->pid, thread->pid, t->debug_id, target_proc->pid, target_node->debug_id, (u64)tr->data.ptr.buffer, (u64)tr->data.ptr.offsets, (u64)tr->data_size, (u64)tr->offsets_size, (u64)extra_buffers_size); if (!reply && !(tr->flags & TF_ONE_WAY)) t->from = thread; else t->from = NULL; t->from_pid = proc->pid; t->from_tid = thread->pid; t->sender_euid = task_euid(proc->tsk); t->to_proc = target_proc; t->to_thread = target_thread; t->code = tr->code; t->flags = tr->flags; t->priority = task_nice(current); if (target_node && target_node->txn_security_ctx) { u32 secid; size_t added_size; security_cred_getsecid(proc->cred, &secid); ret = security_secid_to_secctx(secid, &secctx, &secctx_sz); if (ret) { binder_txn_error("%d:%d failed to get security context\n", thread->pid, proc->pid); return_error = BR_FAILED_REPLY; return_error_param = ret; return_error_line = __LINE__; goto err_get_secctx_failed; } added_size = ALIGN(secctx_sz, sizeof(u64)); extra_buffers_size += added_size; if (extra_buffers_size < added_size) { binder_txn_error("%d:%d integer overflow of extra_buffers_size\n", thread->pid, proc->pid); return_error = BR_FAILED_REPLY; return_error_param = -EINVAL; return_error_line = __LINE__; goto err_bad_extra_size; } } trace_binder_transaction(reply, t, target_node); t->buffer = binder_alloc_new_buf(&target_proc->alloc, tr->data_size, tr->offsets_size, extra_buffers_size, !reply && (t->flags & TF_ONE_WAY), current->tgid); if (IS_ERR(t->buffer)) { char *s; ret = PTR_ERR(t->buffer); s = (ret == -ESRCH) ? ": vma cleared, target dead or dying" : (ret == -ENOSPC) ? ": no space left" : (ret == -ENOMEM) ? ": memory allocation failed" : ""; binder_txn_error("cannot allocate buffer%s", s); return_error_param = PTR_ERR(t->buffer); return_error = return_error_param == -ESRCH ? BR_DEAD_REPLY : BR_FAILED_REPLY; return_error_line = __LINE__; t->buffer = NULL; goto err_binder_alloc_buf_failed; } if (secctx) { int err; size_t buf_offset = ALIGN(tr->data_size, sizeof(void *)) + ALIGN(tr->offsets_size, sizeof(void *)) + ALIGN(extra_buffers_size, sizeof(void *)) - ALIGN(secctx_sz, sizeof(u64)); t->security_ctx = (uintptr_t)t->buffer->user_data + buf_offset; err = binder_alloc_copy_to_buffer(&target_proc->alloc, t->buffer, buf_offset, secctx, secctx_sz); if (err) { t->security_ctx = 0; WARN_ON(1); } security_release_secctx(secctx, secctx_sz); secctx = NULL; } t->buffer->debug_id = t->debug_id; t->buffer->transaction = t; t->buffer->target_node = target_node; t->buffer->clear_on_free = !!(t->flags & TF_CLEAR_BUF); trace_binder_transaction_alloc_buf(t->buffer); if (binder_alloc_copy_user_to_buffer( &target_proc->alloc, t->buffer, ALIGN(tr->data_size, sizeof(void *)), (const void __user *) (uintptr_t)tr->data.ptr.offsets, tr->offsets_size)) { binder_user_error("%d:%d got transaction with invalid offsets ptr\n", proc->pid, thread->pid); return_error = BR_FAILED_REPLY; return_error_param = -EFAULT; return_error_line = __LINE__; goto err_copy_data_failed; } if (!IS_ALIGNED(tr->offsets_size, sizeof(binder_size_t))) { binder_user_error("%d:%d got transaction with invalid offsets size, %lld\n", proc->pid, thread->pid, (u64)tr->offsets_size); return_error = BR_FAILED_REPLY; return_error_param = -EINVAL; return_error_line = __LINE__; goto err_bad_offset; } if (!IS_ALIGNED(extra_buffers_size, sizeof(u64))) { binder_user_error("%d:%d got transaction with unaligned buffers size, %lld\n", proc->pid, thread->pid, (u64)extra_buffers_size); return_error = BR_FAILED_REPLY; return_error_param = -EINVAL; return_error_line = __LINE__; goto err_bad_offset; } off_start_offset = ALIGN(tr->data_size, sizeof(void *)); buffer_offset = off_start_offset; off_end_offset = off_start_offset + tr->offsets_size; sg_buf_offset = ALIGN(off_end_offset, sizeof(void *)); sg_buf_end_offset = sg_buf_offset + extra_buffers_size - ALIGN(secctx_sz, sizeof(u64)); off_min = 0; for (buffer_offset = off_start_offset; buffer_offset < off_end_offset; buffer_offset += sizeof(binder_size_t)) { struct binder_object_header *hdr; size_t object_size; struct binder_object object; binder_size_t object_offset; binder_size_t copy_size; if (binder_alloc_copy_from_buffer(&target_proc->alloc, &object_offset, t->buffer, buffer_offset, sizeof(object_offset))) { binder_txn_error("%d:%d copy offset from buffer failed\n", thread->pid, proc->pid); return_error = BR_FAILED_REPLY; return_error_param = -EINVAL; return_error_line = __LINE__; goto err_bad_offset; } /* * Copy the source user buffer up to the next object * that will be processed. */ copy_size = object_offset - user_offset; if (copy_size && (user_offset > object_offset || binder_alloc_copy_user_to_buffer( &target_proc->alloc, t->buffer, user_offset, user_buffer + user_offset, copy_size))) { binder_user_error("%d:%d got transaction with invalid data ptr\n", proc->pid, thread->pid); return_error = BR_FAILED_REPLY; return_error_param = -EFAULT; return_error_line = __LINE__; goto err_copy_data_failed; } object_size = binder_get_object(target_proc, user_buffer, t->buffer, object_offset, &object); if (object_size == 0 || object_offset < off_min) { binder_user_error("%d:%d got transaction with invalid offset (%lld, min %lld max %lld) or object.\n", proc->pid, thread->pid, (u64)object_offset, (u64)off_min, (u64)t->buffer->data_size); return_error = BR_FAILED_REPLY; return_error_param = -EINVAL; return_error_line = __LINE__; goto err_bad_offset; } /* * Set offset to the next buffer fragment to be * copied */ user_offset = object_offset + object_size; hdr = &object.hdr; off_min = object_offset + object_size; switch (hdr->type) { case BINDER_TYPE_BINDER: case BINDER_TYPE_WEAK_BINDER: { struct flat_binder_object *fp; fp = to_flat_binder_object(hdr); ret = binder_translate_binder(fp, t, thread); if (ret < 0 || binder_alloc_copy_to_buffer(&target_proc->alloc, t->buffer, object_offset, fp, sizeof(*fp))) { binder_txn_error("%d:%d translate binder failed\n", thread->pid, proc->pid); return_error = BR_FAILED_REPLY; return_error_param = ret; return_error_line = __LINE__; goto err_translate_failed; } } break; case BINDER_TYPE_HANDLE: case BINDER_TYPE_WEAK_HANDLE: { struct flat_binder_object *fp; fp = to_flat_binder_object(hdr); ret = binder_translate_handle(fp, t, thread); if (ret < 0 || binder_alloc_copy_to_buffer(&target_proc->alloc, t->buffer, object_offset, fp, sizeof(*fp))) { binder_txn_error("%d:%d translate handle failed\n", thread->pid, proc->pid); return_error = BR_FAILED_REPLY; return_error_param = ret; return_error_line = __LINE__; goto err_translate_failed; } } break; case BINDER_TYPE_FD: { struct binder_fd_object *fp = to_binder_fd_object(hdr); binder_size_t fd_offset = object_offset + (uintptr_t)&fp->fd - (uintptr_t)fp; int ret = binder_translate_fd(fp->fd, fd_offset, t, thread, in_reply_to); fp->pad_binder = 0; if (ret < 0 || binder_alloc_copy_to_buffer(&target_proc->alloc, t->buffer, object_offset, fp, sizeof(*fp))) { binder_txn_error("%d:%d translate fd failed\n", thread->pid, proc->pid); return_error = BR_FAILED_REPLY; return_error_param = ret; return_error_line = __LINE__; goto err_translate_failed; } } break; case BINDER_TYPE_FDA: { struct binder_object ptr_object; binder_size_t parent_offset; struct binder_object user_object; size_t user_parent_size; struct binder_fd_array_object *fda = to_binder_fd_array_object(hdr); size_t num_valid = (buffer_offset - off_start_offset) / sizeof(binder_size_t); struct binder_buffer_object *parent = binder_validate_ptr(target_proc, t->buffer, &ptr_object, fda->parent, off_start_offset, &parent_offset, num_valid); if (!parent) { binder_user_error("%d:%d got transaction with invalid parent offset or type\n", proc->pid, thread->pid); return_error = BR_FAILED_REPLY; return_error_param = -EINVAL; return_error_line = __LINE__; goto err_bad_parent; } if (!binder_validate_fixup(target_proc, t->buffer, off_start_offset, parent_offset, fda->parent_offset, last_fixup_obj_off, last_fixup_min_off)) { binder_user_error("%d:%d got transaction with out-of-order buffer fixup\n", proc->pid, thread->pid); return_error = BR_FAILED_REPLY; return_error_param = -EINVAL; return_error_line = __LINE__; goto err_bad_parent; } /* * We need to read the user version of the parent * object to get the original user offset */ user_parent_size = binder_get_object(proc, user_buffer, t->buffer, parent_offset, &user_object); if (user_parent_size != sizeof(user_object.bbo)) { binder_user_error("%d:%d invalid ptr object size: %zd vs %zd\n", proc->pid, thread->pid, user_parent_size, sizeof(user_object.bbo)); return_error = BR_FAILED_REPLY; return_error_param = -EINVAL; return_error_line = __LINE__; goto err_bad_parent; } ret = binder_translate_fd_array(&pf_head, fda, user_buffer, parent, &user_object.bbo, t, thread, in_reply_to); if (!ret) ret = binder_alloc_copy_to_buffer(&target_proc->alloc, t->buffer, object_offset, fda, sizeof(*fda)); if (ret) { binder_txn_error("%d:%d translate fd array failed\n", thread->pid, proc->pid); return_error = BR_FAILED_REPLY; return_error_param = ret > 0 ? -EINVAL : ret; return_error_line = __LINE__; goto err_translate_failed; } last_fixup_obj_off = parent_offset; last_fixup_min_off = fda->parent_offset + sizeof(u32) * fda->num_fds; } break; case BINDER_TYPE_PTR: { struct binder_buffer_object *bp = to_binder_buffer_object(hdr); size_t buf_left = sg_buf_end_offset - sg_buf_offset; size_t num_valid; if (bp->length > buf_left) { binder_user_error("%d:%d got transaction with too large buffer\n", proc->pid, thread->pid); return_error = BR_FAILED_REPLY; return_error_param = -EINVAL; return_error_line = __LINE__; goto err_bad_offset; } ret = binder_defer_copy(&sgc_head, sg_buf_offset, (const void __user *)(uintptr_t)bp->buffer, bp->length); if (ret) { binder_txn_error("%d:%d deferred copy failed\n", thread->pid, proc->pid); return_error = BR_FAILED_REPLY; return_error_param = ret; return_error_line = __LINE__; goto err_translate_failed; } /* Fixup buffer pointer to target proc address space */ bp->buffer = (uintptr_t) t->buffer->user_data + sg_buf_offset; sg_buf_offset += ALIGN(bp->length, sizeof(u64)); num_valid = (buffer_offset - off_start_offset) / sizeof(binder_size_t); ret = binder_fixup_parent(&pf_head, t, thread, bp, off_start_offset, num_valid, last_fixup_obj_off, last_fixup_min_off); if (ret < 0 || binder_alloc_copy_to_buffer(&target_proc->alloc, t->buffer, object_offset, bp, sizeof(*bp))) { binder_txn_error("%d:%d failed to fixup parent\n", thread->pid, proc->pid); return_error = BR_FAILED_REPLY; return_error_param = ret; return_error_line = __LINE__; goto err_translate_failed; } last_fixup_obj_off = object_offset; last_fixup_min_off = 0; } break; default: binder_user_error("%d:%d got transaction with invalid object type, %x\n", proc->pid, thread->pid, hdr->type); return_error = BR_FAILED_REPLY; return_error_param = -EINVAL; return_error_line = __LINE__; goto err_bad_object_type; } } /* Done processing objects, copy the rest of the buffer */ if (binder_alloc_copy_user_to_buffer( &target_proc->alloc, t->buffer, user_offset, user_buffer + user_offset, tr->data_size - user_offset)) { binder_user_error("%d:%d got transaction with invalid data ptr\n", proc->pid, thread->pid); return_error = BR_FAILED_REPLY; return_error_param = -EFAULT; return_error_line = __LINE__; goto err_copy_data_failed; } ret = binder_do_deferred_txn_copies(&target_proc->alloc, t->buffer, &sgc_head, &pf_head); if (ret) { binder_user_error("%d:%d got transaction with invalid offsets ptr\n", proc->pid, thread->pid); return_error = BR_FAILED_REPLY; return_error_param = ret; return_error_line = __LINE__; goto err_copy_data_failed; } if (t->buffer->oneway_spam_suspect) tcomplete->type = BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT; else tcomplete->type = BINDER_WORK_TRANSACTION_COMPLETE; t->work.type = BINDER_WORK_TRANSACTION; if (reply) { binder_enqueue_thread_work(thread, tcomplete); binder_inner_proc_lock(target_proc); if (target_thread->is_dead) { return_error = BR_DEAD_REPLY; binder_inner_proc_unlock(target_proc); goto err_dead_proc_or_thread; } BUG_ON(t->buffer->async_transaction != 0); binder_pop_transaction_ilocked(target_thread, in_reply_to); binder_enqueue_thread_work_ilocked(target_thread, &t->work); target_proc->outstanding_txns++; binder_inner_proc_unlock(target_proc); wake_up_interruptible_sync(&target_thread->wait); binder_free_transaction(in_reply_to); } else if (!(t->flags & TF_ONE_WAY)) { BUG_ON(t->buffer->async_transaction != 0); binder_inner_proc_lock(proc); /* * Defer the TRANSACTION_COMPLETE, so we don't return to * userspace immediately; this allows the target process to * immediately start processing this transaction, reducing * latency. We will then return the TRANSACTION_COMPLETE when * the target replies (or there is an error). */ binder_enqueue_deferred_thread_work_ilocked(thread, tcomplete); t->need_reply = 1; t->from_parent = thread->transaction_stack; thread->transaction_stack = t; binder_inner_proc_unlock(proc); return_error = binder_proc_transaction(t, target_proc, target_thread); if (return_error) { binder_inner_proc_lock(proc); binder_pop_transaction_ilocked(thread, t); binder_inner_proc_unlock(proc); goto err_dead_proc_or_thread; } } else { BUG_ON(target_node == NULL); BUG_ON(t->buffer->async_transaction != 1); return_error = binder_proc_transaction(t, target_proc, NULL); /* * Let the caller know when async transaction reaches a frozen * process and is put in a pending queue, waiting for the target * process to be unfrozen. */ if (return_error == BR_TRANSACTION_PENDING_FROZEN) tcomplete->type = BINDER_WORK_TRANSACTION_PENDING; binder_enqueue_thread_work(thread, tcomplete); if (return_error && return_error != BR_TRANSACTION_PENDING_FROZEN) goto err_dead_proc_or_thread; } if (target_thread) binder_thread_dec_tmpref(target_thread); binder_proc_dec_tmpref(target_proc); if (target_node) binder_dec_node_tmpref(target_node); /* * write barrier to synchronize with initialization * of log entry */ smp_wmb(); WRITE_ONCE(e->debug_id_done, t_debug_id); return; err_dead_proc_or_thread: binder_txn_error("%d:%d dead process or thread\n", thread->pid, proc->pid); return_error_line = __LINE__; binder_dequeue_work(proc, tcomplete); err_translate_failed: err_bad_object_type: err_bad_offset: err_bad_parent: err_copy_data_failed: binder_cleanup_deferred_txn_lists(&sgc_head, &pf_head); binder_free_txn_fixups(t); trace_binder_transaction_failed_buffer_release(t->buffer); binder_transaction_buffer_release(target_proc, NULL, t->buffer, buffer_offset, true); if (target_node) binder_dec_node_tmpref(target_node); target_node = NULL; t->buffer->transaction = NULL; binder_alloc_free_buf(&target_proc->alloc, t->buffer); err_binder_alloc_buf_failed: err_bad_extra_size: if (secctx) security_release_secctx(secctx, secctx_sz); err_get_secctx_failed: kfree(tcomplete); binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); err_alloc_tcomplete_failed: if (trace_binder_txn_latency_free_enabled()) binder_txn_latency_free(t); kfree(t); binder_stats_deleted(BINDER_STAT_TRANSACTION); err_alloc_t_failed: err_bad_todo_list: err_bad_call_stack: err_empty_call_stack: err_dead_binder: err_invalid_target_handle: if (target_node) { binder_dec_node(target_node, 1, 0); binder_dec_node_tmpref(target_node); } binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, "%d:%d transaction %s to %d:%d failed %d/%d/%d, size %lld-%lld line %d\n", proc->pid, thread->pid, reply ? "reply" : (tr->flags & TF_ONE_WAY ? "async" : "call"), target_proc ? target_proc->pid : 0, target_thread ? target_thread->pid : 0, t_debug_id, return_error, return_error_param, (u64)tr->data_size, (u64)tr->offsets_size, return_error_line); if (target_thread) binder_thread_dec_tmpref(target_thread); if (target_proc) binder_proc_dec_tmpref(target_proc); { struct binder_transaction_log_entry *fe; e->return_error = return_error; e->return_error_param = return_error_param; e->return_error_line = return_error_line; fe = binder_transaction_log_add(&binder_transaction_log_failed); *fe = *e; /* * write barrier to synchronize with initialization * of log entry */ smp_wmb(); WRITE_ONCE(e->debug_id_done, t_debug_id); WRITE_ONCE(fe->debug_id_done, t_debug_id); } BUG_ON(thread->return_error.cmd != BR_OK); if (in_reply_to) { binder_set_txn_from_error(in_reply_to, t_debug_id, return_error, return_error_param); thread->return_error.cmd = BR_TRANSACTION_COMPLETE; binder_enqueue_thread_work(thread, &thread->return_error.work); binder_send_failed_reply(in_reply_to, return_error); } else { binder_inner_proc_lock(proc); binder_set_extended_error(&thread->ee, t_debug_id, return_error, return_error_param); binder_inner_proc_unlock(proc); thread->return_error.cmd = return_error; binder_enqueue_thread_work(thread, &thread->return_error.work); } } /** * binder_free_buf() - free the specified buffer * @proc: binder proc that owns buffer * @buffer: buffer to be freed * @is_failure: failed to send transaction * * If buffer for an async transaction, enqueue the next async * transaction from the node. * * Cleanup buffer and free it. */ static void binder_free_buf(struct binder_proc *proc, struct binder_thread *thread, struct binder_buffer *buffer, bool is_failure) { binder_inner_proc_lock(proc); if (buffer->transaction) { buffer->transaction->buffer = NULL; buffer->transaction = NULL; } binder_inner_proc_unlock(proc); if (buffer->async_transaction && buffer->target_node) { struct binder_node *buf_node; struct binder_work *w; buf_node = buffer->target_node; binder_node_inner_lock(buf_node); BUG_ON(!buf_node->has_async_transaction); BUG_ON(buf_node->proc != proc); w = binder_dequeue_work_head_ilocked( &buf_node->async_todo); if (!w) { buf_node->has_async_transaction = false; } else { binder_enqueue_work_ilocked( w, &proc->todo); binder_wakeup_proc_ilocked(proc); } binder_node_inner_unlock(buf_node); } trace_binder_transaction_buffer_release(buffer); binder_release_entire_buffer(proc, thread, buffer, is_failure); binder_alloc_free_buf(&proc->alloc, buffer); } static int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread, binder_uintptr_t binder_buffer, size_t size, binder_size_t *consumed) { uint32_t cmd; struct binder_context *context = proc->context; void __user *buffer = (void __user *)(uintptr_t)binder_buffer; void __user *ptr = buffer + *consumed; void __user *end = buffer + size; while (ptr < end && thread->return_error.cmd == BR_OK) { int ret; if (get_user(cmd, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); trace_binder_command(cmd); if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.bc)) { atomic_inc(&binder_stats.bc[_IOC_NR(cmd)]); atomic_inc(&proc->stats.bc[_IOC_NR(cmd)]); atomic_inc(&thread->stats.bc[_IOC_NR(cmd)]); } switch (cmd) { case BC_INCREFS: case BC_ACQUIRE: case BC_RELEASE: case BC_DECREFS: { uint32_t target; const char *debug_string; bool strong = cmd == BC_ACQUIRE || cmd == BC_RELEASE; bool increment = cmd == BC_INCREFS || cmd == BC_ACQUIRE; struct binder_ref_data rdata; if (get_user(target, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); ret = -1; if (increment && !target) { struct binder_node *ctx_mgr_node; mutex_lock(&context->context_mgr_node_lock); ctx_mgr_node = context->binder_context_mgr_node; if (ctx_mgr_node) { if (ctx_mgr_node->proc == proc) { binder_user_error("%d:%d context manager tried to acquire desc 0\n", proc->pid, thread->pid); mutex_unlock(&context->context_mgr_node_lock); return -EINVAL; } ret = binder_inc_ref_for_node( proc, ctx_mgr_node, strong, NULL, &rdata); } mutex_unlock(&context->context_mgr_node_lock); } if (ret) ret = binder_update_ref_for_handle( proc, target, increment, strong, &rdata); if (!ret && rdata.desc != target) { binder_user_error("%d:%d tried to acquire reference to desc %d, got %d instead\n", proc->pid, thread->pid, target, rdata.desc); } switch (cmd) { case BC_INCREFS: debug_string = "IncRefs"; break; case BC_ACQUIRE: debug_string = "Acquire"; break; case BC_RELEASE: debug_string = "Release"; break; case BC_DECREFS: default: debug_string = "DecRefs"; break; } if (ret) { binder_user_error("%d:%d %s %d refcount change on invalid ref %d ret %d\n", proc->pid, thread->pid, debug_string, strong, target, ret); break; } binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s ref %d desc %d s %d w %d\n", proc->pid, thread->pid, debug_string, rdata.debug_id, rdata.desc, rdata.strong, rdata.weak); break; } case BC_INCREFS_DONE: case BC_ACQUIRE_DONE: { binder_uintptr_t node_ptr; binder_uintptr_t cookie; struct binder_node *node; bool free_node; if (get_user(node_ptr, (binder_uintptr_t __user *)ptr)) return -EFAULT; ptr += sizeof(binder_uintptr_t); if (get_user(cookie, (binder_uintptr_t __user *)ptr)) return -EFAULT; ptr += sizeof(binder_uintptr_t); node = binder_get_node(proc, node_ptr); if (node == NULL) { binder_user_error("%d:%d %s u%016llx no match\n", proc->pid, thread->pid, cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", (u64)node_ptr); break; } if (cookie != node->cookie) { binder_user_error("%d:%d %s u%016llx node %d cookie mismatch %016llx != %016llx\n", proc->pid, thread->pid, cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", (u64)node_ptr, node->debug_id, (u64)cookie, (u64)node->cookie); binder_put_node(node); break; } binder_node_inner_lock(node); if (cmd == BC_ACQUIRE_DONE) { if (node->pending_strong_ref == 0) { binder_user_error("%d:%d BC_ACQUIRE_DONE node %d has no pending acquire request\n", proc->pid, thread->pid, node->debug_id); binder_node_inner_unlock(node); binder_put_node(node); break; } node->pending_strong_ref = 0; } else { if (node->pending_weak_ref == 0) { binder_user_error("%d:%d BC_INCREFS_DONE node %d has no pending increfs request\n", proc->pid, thread->pid, node->debug_id); binder_node_inner_unlock(node); binder_put_node(node); break; } node->pending_weak_ref = 0; } free_node = binder_dec_node_nilocked(node, cmd == BC_ACQUIRE_DONE, 0); WARN_ON(free_node); binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s node %d ls %d lw %d tr %d\n", proc->pid, thread->pid, cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", node->debug_id, node->local_strong_refs, node->local_weak_refs, node->tmp_refs); binder_node_inner_unlock(node); binder_put_node(node); break; } case BC_ATTEMPT_ACQUIRE: pr_err("BC_ATTEMPT_ACQUIRE not supported\n"); return -EINVAL; case BC_ACQUIRE_RESULT: pr_err("BC_ACQUIRE_RESULT not supported\n"); return -EINVAL; case BC_FREE_BUFFER: { binder_uintptr_t data_ptr; struct binder_buffer *buffer; if (get_user(data_ptr, (binder_uintptr_t __user *)ptr)) return -EFAULT; ptr += sizeof(binder_uintptr_t); buffer = binder_alloc_prepare_to_free(&proc->alloc, data_ptr); if (IS_ERR_OR_NULL(buffer)) { if (PTR_ERR(buffer) == -EPERM) { binder_user_error( "%d:%d BC_FREE_BUFFER u%016llx matched unreturned or currently freeing buffer\n", proc->pid, thread->pid, (u64)data_ptr); } else { binder_user_error( "%d:%d BC_FREE_BUFFER u%016llx no match\n", proc->pid, thread->pid, (u64)data_ptr); } break; } binder_debug(BINDER_DEBUG_FREE_BUFFER, "%d:%d BC_FREE_BUFFER u%016llx found buffer %d for %s transaction\n", proc->pid, thread->pid, (u64)data_ptr, buffer->debug_id, buffer->transaction ? "active" : "finished"); binder_free_buf(proc, thread, buffer, false); break; } case BC_TRANSACTION_SG: case BC_REPLY_SG: { struct binder_transaction_data_sg tr; if (copy_from_user(&tr, ptr, sizeof(tr))) return -EFAULT; ptr += sizeof(tr); binder_transaction(proc, thread, &tr.transaction_data, cmd == BC_REPLY_SG, tr.buffers_size); break; } case BC_TRANSACTION: case BC_REPLY: { struct binder_transaction_data tr; if (copy_from_user(&tr, ptr, sizeof(tr))) return -EFAULT; ptr += sizeof(tr); binder_transaction(proc, thread, &tr, cmd == BC_REPLY, 0); break; } case BC_REGISTER_LOOPER: binder_debug(BINDER_DEBUG_THREADS, "%d:%d BC_REGISTER_LOOPER\n", proc->pid, thread->pid); binder_inner_proc_lock(proc); if (thread->looper & BINDER_LOOPER_STATE_ENTERED) { thread->looper |= BINDER_LOOPER_STATE_INVALID; binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called after BC_ENTER_LOOPER\n", proc->pid, thread->pid); } else if (proc->requested_threads == 0) { thread->looper |= BINDER_LOOPER_STATE_INVALID; binder_user_error("%d:%d ERROR: BC_REGISTER_LOOPER called without request\n", proc->pid, thread->pid); } else { proc->requested_threads--; proc->requested_threads_started++; } thread->looper |= BINDER_LOOPER_STATE_REGISTERED; binder_inner_proc_unlock(proc); break; case BC_ENTER_LOOPER: binder_debug(BINDER_DEBUG_THREADS, "%d:%d BC_ENTER_LOOPER\n", proc->pid, thread->pid); if (thread->looper & BINDER_LOOPER_STATE_REGISTERED) { thread->looper |= BINDER_LOOPER_STATE_INVALID; binder_user_error("%d:%d ERROR: BC_ENTER_LOOPER called after BC_REGISTER_LOOPER\n", proc->pid, thread->pid); } thread->looper |= BINDER_LOOPER_STATE_ENTERED; break; case BC_EXIT_LOOPER: binder_debug(BINDER_DEBUG_THREADS, "%d:%d BC_EXIT_LOOPER\n", proc->pid, thread->pid); thread->looper |= BINDER_LOOPER_STATE_EXITED; break; case BC_REQUEST_DEATH_NOTIFICATION: case BC_CLEAR_DEATH_NOTIFICATION: { uint32_t target; binder_uintptr_t cookie; struct binder_ref *ref; struct binder_ref_death *death = NULL; if (get_user(target, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); if (get_user(cookie, (binder_uintptr_t __user *)ptr)) return -EFAULT; ptr += sizeof(binder_uintptr_t); if (cmd == BC_REQUEST_DEATH_NOTIFICATION) { /* * Allocate memory for death notification * before taking lock */ death = kzalloc(sizeof(*death), GFP_KERNEL); if (death == NULL) { WARN_ON(thread->return_error.cmd != BR_OK); thread->return_error.cmd = BR_ERROR; binder_enqueue_thread_work( thread, &thread->return_error.work); binder_debug( BINDER_DEBUG_FAILED_TRANSACTION, "%d:%d BC_REQUEST_DEATH_NOTIFICATION failed\n", proc->pid, thread->pid); break; } } binder_proc_lock(proc); ref = binder_get_ref_olocked(proc, target, false); if (ref == NULL) { binder_user_error("%d:%d %s invalid ref %d\n", proc->pid, thread->pid, cmd == BC_REQUEST_DEATH_NOTIFICATION ? "BC_REQUEST_DEATH_NOTIFICATION" : "BC_CLEAR_DEATH_NOTIFICATION", target); binder_proc_unlock(proc); kfree(death); break; } binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, "%d:%d %s %016llx ref %d desc %d s %d w %d for node %d\n", proc->pid, thread->pid, cmd == BC_REQUEST_DEATH_NOTIFICATION ? "BC_REQUEST_DEATH_NOTIFICATION" : "BC_CLEAR_DEATH_NOTIFICATION", (u64)cookie, ref->data.debug_id, ref->data.desc, ref->data.strong, ref->data.weak, ref->node->debug_id); binder_node_lock(ref->node); if (cmd == BC_REQUEST_DEATH_NOTIFICATION) { if (ref->death) { binder_user_error("%d:%d BC_REQUEST_DEATH_NOTIFICATION death notification already set\n", proc->pid, thread->pid); binder_node_unlock(ref->node); binder_proc_unlock(proc); kfree(death); break; } binder_stats_created(BINDER_STAT_DEATH); INIT_LIST_HEAD(&death->work.entry); death->cookie = cookie; ref->death = death; if (ref->node->proc == NULL) { ref->death->work.type = BINDER_WORK_DEAD_BINDER; binder_inner_proc_lock(proc); binder_enqueue_work_ilocked( &ref->death->work, &proc->todo); binder_wakeup_proc_ilocked(proc); binder_inner_proc_unlock(proc); } } else { if (ref->death == NULL) { binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification not active\n", proc->pid, thread->pid); binder_node_unlock(ref->node); binder_proc_unlock(proc); break; } death = ref->death; if (death->cookie != cookie) { binder_user_error("%d:%d BC_CLEAR_DEATH_NOTIFICATION death notification cookie mismatch %016llx != %016llx\n", proc->pid, thread->pid, (u64)death->cookie, (u64)cookie); binder_node_unlock(ref->node); binder_proc_unlock(proc); break; } ref->death = NULL; binder_inner_proc_lock(proc); if (list_empty(&death->work.entry)) { death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) binder_enqueue_thread_work_ilocked( thread, &death->work); else { binder_enqueue_work_ilocked( &death->work, &proc->todo); binder_wakeup_proc_ilocked( proc); } } else { BUG_ON(death->work.type != BINDER_WORK_DEAD_BINDER); death->work.type = BINDER_WORK_DEAD_BINDER_AND_CLEAR; } binder_inner_proc_unlock(proc); } binder_node_unlock(ref->node); binder_proc_unlock(proc); } break; case BC_DEAD_BINDER_DONE: { struct binder_work *w; binder_uintptr_t cookie; struct binder_ref_death *death = NULL; if (get_user(cookie, (binder_uintptr_t __user *)ptr)) return -EFAULT; ptr += sizeof(cookie); binder_inner_proc_lock(proc); list_for_each_entry(w, &proc->delivered_death, entry) { struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work); if (tmp_death->cookie == cookie) { death = tmp_death; break; } } binder_debug(BINDER_DEBUG_DEAD_BINDER, "%d:%d BC_DEAD_BINDER_DONE %016llx found %pK\n", proc->pid, thread->pid, (u64)cookie, death); if (death == NULL) { binder_user_error("%d:%d BC_DEAD_BINDER_DONE %016llx not found\n", proc->pid, thread->pid, (u64)cookie); binder_inner_proc_unlock(proc); break; } binder_dequeue_work_ilocked(&death->work); if (death->work.type == BINDER_WORK_DEAD_BINDER_AND_CLEAR) { death->work.type = BINDER_WORK_CLEAR_DEATH_NOTIFICATION; if (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) binder_enqueue_thread_work_ilocked( thread, &death->work); else { binder_enqueue_work_ilocked( &death->work, &proc->todo); binder_wakeup_proc_ilocked(proc); } } binder_inner_proc_unlock(proc); } break; default: pr_err("%d:%d unknown command %u\n", proc->pid, thread->pid, cmd); return -EINVAL; } *consumed = ptr - buffer; } return 0; } static void binder_stat_br(struct binder_proc *proc, struct binder_thread *thread, uint32_t cmd) { trace_binder_return(cmd); if (_IOC_NR(cmd) < ARRAY_SIZE(binder_stats.br)) { atomic_inc(&binder_stats.br[_IOC_NR(cmd)]); atomic_inc(&proc->stats.br[_IOC_NR(cmd)]); atomic_inc(&thread->stats.br[_IOC_NR(cmd)]); } } static int binder_put_node_cmd(struct binder_proc *proc, struct binder_thread *thread, void __user **ptrp, binder_uintptr_t node_ptr, binder_uintptr_t node_cookie, int node_debug_id, uint32_t cmd, const char *cmd_name) { void __user *ptr = *ptrp; if (put_user(cmd, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); if (put_user(node_ptr, (binder_uintptr_t __user *)ptr)) return -EFAULT; ptr += sizeof(binder_uintptr_t); if (put_user(node_cookie, (binder_uintptr_t __user *)ptr)) return -EFAULT; ptr += sizeof(binder_uintptr_t); binder_stat_br(proc, thread, cmd); binder_debug(BINDER_DEBUG_USER_REFS, "%d:%d %s %d u%016llx c%016llx\n", proc->pid, thread->pid, cmd_name, node_debug_id, (u64)node_ptr, (u64)node_cookie); *ptrp = ptr; return 0; } static int binder_wait_for_work(struct binder_thread *thread, bool do_proc_work) { DEFINE_WAIT(wait); struct binder_proc *proc = thread->proc; int ret = 0; binder_inner_proc_lock(proc); for (;;) { prepare_to_wait(&thread->wait, &wait, TASK_INTERRUPTIBLE|TASK_FREEZABLE); if (binder_has_work_ilocked(thread, do_proc_work)) break; if (do_proc_work) list_add(&thread->waiting_thread_node, &proc->waiting_threads); binder_inner_proc_unlock(proc); schedule(); binder_inner_proc_lock(proc); list_del_init(&thread->waiting_thread_node); if (signal_pending(current)) { ret = -EINTR; break; } } finish_wait(&thread->wait, &wait); binder_inner_proc_unlock(proc); return ret; } /** * binder_apply_fd_fixups() - finish fd translation * @proc: binder_proc associated @t->buffer * @t: binder transaction with list of fd fixups * * Now that we are in the context of the transaction target * process, we can allocate and install fds. Process the * list of fds to translate and fixup the buffer with the * new fds first and only then install the files. * * If we fail to allocate an fd, skip the install and release * any fds that have already been allocated. */ static int binder_apply_fd_fixups(struct binder_proc *proc, struct binder_transaction *t) { struct binder_txn_fd_fixup *fixup, *tmp; int ret = 0; list_for_each_entry(fixup, &t->fd_fixups, fixup_entry) { int fd = get_unused_fd_flags(O_CLOEXEC); if (fd < 0) { binder_debug(BINDER_DEBUG_TRANSACTION, "failed fd fixup txn %d fd %d\n", t->debug_id, fd); ret = -ENOMEM; goto err; } binder_debug(BINDER_DEBUG_TRANSACTION, "fd fixup txn %d fd %d\n", t->debug_id, fd); trace_binder_transaction_fd_recv(t, fd, fixup->offset); fixup->target_fd = fd; if (binder_alloc_copy_to_buffer(&proc->alloc, t->buffer, fixup->offset, &fd, sizeof(u32))) { ret = -EINVAL; goto err; } } list_for_each_entry_safe(fixup, tmp, &t->fd_fixups, fixup_entry) { fd_install(fixup->target_fd, fixup->file); list_del(&fixup->fixup_entry); kfree(fixup); } return ret; err: binder_free_txn_fixups(t); return ret; } static int binder_thread_read(struct binder_proc *proc, struct binder_thread *thread, binder_uintptr_t binder_buffer, size_t size, binder_size_t *consumed, int non_block) { void __user *buffer = (void __user *)(uintptr_t)binder_buffer; void __user *ptr = buffer + *consumed; void __user *end = buffer + size; int ret = 0; int wait_for_proc_work; if (*consumed == 0) { if (put_user(BR_NOOP, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); } retry: binder_inner_proc_lock(proc); wait_for_proc_work = binder_available_for_proc_work_ilocked(thread); binder_inner_proc_unlock(proc); thread->looper |= BINDER_LOOPER_STATE_WAITING; trace_binder_wait_for_work(wait_for_proc_work, !!thread->transaction_stack, !binder_worklist_empty(proc, &thread->todo)); if (wait_for_proc_work) { if (!(thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED))) { binder_user_error("%d:%d ERROR: Thread waiting for process work before calling BC_REGISTER_LOOPER or BC_ENTER_LOOPER (state %x)\n", proc->pid, thread->pid, thread->looper); wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); } binder_set_nice(proc->default_priority); } if (non_block) { if (!binder_has_work(thread, wait_for_proc_work)) ret = -EAGAIN; } else { ret = binder_wait_for_work(thread, wait_for_proc_work); } thread->looper &= ~BINDER_LOOPER_STATE_WAITING; if (ret) return ret; while (1) { uint32_t cmd; struct binder_transaction_data_secctx tr; struct binder_transaction_data *trd = &tr.transaction_data; struct binder_work *w = NULL; struct list_head *list = NULL; struct binder_transaction *t = NULL; struct binder_thread *t_from; size_t trsize = sizeof(*trd); binder_inner_proc_lock(proc); if (!binder_worklist_empty_ilocked(&thread->todo)) list = &thread->todo; else if (!binder_worklist_empty_ilocked(&proc->todo) && wait_for_proc_work) list = &proc->todo; else { binder_inner_proc_unlock(proc); /* no data added */ if (ptr - buffer == 4 && !thread->looper_need_return) goto retry; break; } if (end - ptr < sizeof(tr) + 4) { binder_inner_proc_unlock(proc); break; } w = binder_dequeue_work_head_ilocked(list); if (binder_worklist_empty_ilocked(&thread->todo)) thread->process_todo = false; switch (w->type) { case BINDER_WORK_TRANSACTION: { binder_inner_proc_unlock(proc); t = container_of(w, struct binder_transaction, work); } break; case BINDER_WORK_RETURN_ERROR: { struct binder_error *e = container_of( w, struct binder_error, work); WARN_ON(e->cmd == BR_OK); binder_inner_proc_unlock(proc); if (put_user(e->cmd, (uint32_t __user *)ptr)) return -EFAULT; cmd = e->cmd; e->cmd = BR_OK; ptr += sizeof(uint32_t); binder_stat_br(proc, thread, cmd); } break; case BINDER_WORK_TRANSACTION_COMPLETE: case BINDER_WORK_TRANSACTION_PENDING: case BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT: { if (proc->oneway_spam_detection_enabled && w->type == BINDER_WORK_TRANSACTION_ONEWAY_SPAM_SUSPECT) cmd = BR_ONEWAY_SPAM_SUSPECT; else if (w->type == BINDER_WORK_TRANSACTION_PENDING) cmd = BR_TRANSACTION_PENDING_FROZEN; else cmd = BR_TRANSACTION_COMPLETE; binder_inner_proc_unlock(proc); kfree(w); binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); if (put_user(cmd, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); binder_stat_br(proc, thread, cmd); binder_debug(BINDER_DEBUG_TRANSACTION_COMPLETE, "%d:%d BR_TRANSACTION_COMPLETE\n", proc->pid, thread->pid); } break; case BINDER_WORK_NODE: { struct binder_node *node = container_of(w, struct binder_node, work); int strong, weak; binder_uintptr_t node_ptr = node->ptr; binder_uintptr_t node_cookie = node->cookie; int node_debug_id = node->debug_id; int has_weak_ref; int has_strong_ref; void __user *orig_ptr = ptr; BUG_ON(proc != node->proc); strong = node->internal_strong_refs || node->local_strong_refs; weak = !hlist_empty(&node->refs) || node->local_weak_refs || node->tmp_refs || strong; has_strong_ref = node->has_strong_ref; has_weak_ref = node->has_weak_ref; if (weak && !has_weak_ref) { node->has_weak_ref = 1; node->pending_weak_ref = 1; node->local_weak_refs++; } if (strong && !has_strong_ref) { node->has_strong_ref = 1; node->pending_strong_ref = 1; node->local_strong_refs++; } if (!strong && has_strong_ref) node->has_strong_ref = 0; if (!weak && has_weak_ref) node->has_weak_ref = 0; if (!weak && !strong) { binder_debug(BINDER_DEBUG_INTERNAL_REFS, "%d:%d node %d u%016llx c%016llx deleted\n", proc->pid, thread->pid, node_debug_id, (u64)node_ptr, (u64)node_cookie); rb_erase(&node->rb_node, &proc->nodes); binder_inner_proc_unlock(proc); binder_node_lock(node); /* * Acquire the node lock before freeing the * node to serialize with other threads that * may have been holding the node lock while * decrementing this node (avoids race where * this thread frees while the other thread * is unlocking the node after the final * decrement) */ binder_node_unlock(node); binder_free_node(node); } else binder_inner_proc_unlock(proc); if (weak && !has_weak_ref) ret = binder_put_node_cmd( proc, thread, &ptr, node_ptr, node_cookie, node_debug_id, BR_INCREFS, "BR_INCREFS"); if (!ret && strong && !has_strong_ref) ret = binder_put_node_cmd( proc, thread, &ptr, node_ptr, node_cookie, node_debug_id, BR_ACQUIRE, "BR_ACQUIRE"); if (!ret && !strong && has_strong_ref) ret = binder_put_node_cmd( proc, thread, &ptr, node_ptr, node_cookie, node_debug_id, BR_RELEASE, "BR_RELEASE"); if (!ret && !weak && has_weak_ref) ret = binder_put_node_cmd( proc, thread, &ptr, node_ptr, node_cookie, node_debug_id, BR_DECREFS, "BR_DECREFS"); if (orig_ptr == ptr) binder_debug(BINDER_DEBUG_INTERNAL_REFS, "%d:%d node %d u%016llx c%016llx state unchanged\n", proc->pid, thread->pid, node_debug_id, (u64)node_ptr, (u64)node_cookie); if (ret) return ret; } break; case BINDER_WORK_DEAD_BINDER: case BINDER_WORK_DEAD_BINDER_AND_CLEAR: case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { struct binder_ref_death *death; uint32_t cmd; binder_uintptr_t cookie; death = container_of(w, struct binder_ref_death, work); if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) cmd = BR_CLEAR_DEATH_NOTIFICATION_DONE; else cmd = BR_DEAD_BINDER; cookie = death->cookie; binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, "%d:%d %s %016llx\n", proc->pid, thread->pid, cmd == BR_DEAD_BINDER ? "BR_DEAD_BINDER" : "BR_CLEAR_DEATH_NOTIFICATION_DONE", (u64)cookie); if (w->type == BINDER_WORK_CLEAR_DEATH_NOTIFICATION) { binder_inner_proc_unlock(proc); kfree(death); binder_stats_deleted(BINDER_STAT_DEATH); } else { binder_enqueue_work_ilocked( w, &proc->delivered_death); binder_inner_proc_unlock(proc); } if (put_user(cmd, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); if (put_user(cookie, (binder_uintptr_t __user *)ptr)) return -EFAULT; ptr += sizeof(binder_uintptr_t); binder_stat_br(proc, thread, cmd); if (cmd == BR_DEAD_BINDER) goto done; /* DEAD_BINDER notifications can cause transactions */ } break; default: binder_inner_proc_unlock(proc); pr_err("%d:%d: bad work type %d\n", proc->pid, thread->pid, w->type); break; } if (!t) continue; BUG_ON(t->buffer == NULL); if (t->buffer->target_node) { struct binder_node *target_node = t->buffer->target_node; trd->target.ptr = target_node->ptr; trd->cookie = target_node->cookie; t->saved_priority = task_nice(current); if (t->priority < target_node->min_priority && !(t->flags & TF_ONE_WAY)) binder_set_nice(t->priority); else if (!(t->flags & TF_ONE_WAY) || t->saved_priority > target_node->min_priority) binder_set_nice(target_node->min_priority); cmd = BR_TRANSACTION; } else { trd->target.ptr = 0; trd->cookie = 0; cmd = BR_REPLY; } trd->code = t->code; trd->flags = t->flags; trd->sender_euid = from_kuid(current_user_ns(), t->sender_euid); t_from = binder_get_txn_from(t); if (t_from) { struct task_struct *sender = t_from->proc->tsk; trd->sender_pid = task_tgid_nr_ns(sender, task_active_pid_ns(current)); } else { trd->sender_pid = 0; } ret = binder_apply_fd_fixups(proc, t); if (ret) { struct binder_buffer *buffer = t->buffer; bool oneway = !!(t->flags & TF_ONE_WAY); int tid = t->debug_id; if (t_from) binder_thread_dec_tmpref(t_from); buffer->transaction = NULL; binder_cleanup_transaction(t, "fd fixups failed", BR_FAILED_REPLY); binder_free_buf(proc, thread, buffer, true); binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, "%d:%d %stransaction %d fd fixups failed %d/%d, line %d\n", proc->pid, thread->pid, oneway ? "async " : (cmd == BR_REPLY ? "reply " : ""), tid, BR_FAILED_REPLY, ret, __LINE__); if (cmd == BR_REPLY) { cmd = BR_FAILED_REPLY; if (put_user(cmd, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); binder_stat_br(proc, thread, cmd); break; } continue; } trd->data_size = t->buffer->data_size; trd->offsets_size = t->buffer->offsets_size; trd->data.ptr.buffer = (uintptr_t)t->buffer->user_data; trd->data.ptr.offsets = trd->data.ptr.buffer + ALIGN(t->buffer->data_size, sizeof(void *)); tr.secctx = t->security_ctx; if (t->security_ctx) { cmd = BR_TRANSACTION_SEC_CTX; trsize = sizeof(tr); } if (put_user(cmd, (uint32_t __user *)ptr)) { if (t_from) binder_thread_dec_tmpref(t_from); binder_cleanup_transaction(t, "put_user failed", BR_FAILED_REPLY); return -EFAULT; } ptr += sizeof(uint32_t); if (copy_to_user(ptr, &tr, trsize)) { if (t_from) binder_thread_dec_tmpref(t_from); binder_cleanup_transaction(t, "copy_to_user failed", BR_FAILED_REPLY); return -EFAULT; } ptr += trsize; trace_binder_transaction_received(t); binder_stat_br(proc, thread, cmd); binder_debug(BINDER_DEBUG_TRANSACTION, "%d:%d %s %d %d:%d, cmd %u size %zd-%zd ptr %016llx-%016llx\n", proc->pid, thread->pid, (cmd == BR_TRANSACTION) ? "BR_TRANSACTION" : (cmd == BR_TRANSACTION_SEC_CTX) ? "BR_TRANSACTION_SEC_CTX" : "BR_REPLY", t->debug_id, t_from ? t_from->proc->pid : 0, t_from ? t_from->pid : 0, cmd, t->buffer->data_size, t->buffer->offsets_size, (u64)trd->data.ptr.buffer, (u64)trd->data.ptr.offsets); if (t_from) binder_thread_dec_tmpref(t_from); t->buffer->allow_user_free = 1; if (cmd != BR_REPLY && !(t->flags & TF_ONE_WAY)) { binder_inner_proc_lock(thread->proc); t->to_parent = thread->transaction_stack; t->to_thread = thread; thread->transaction_stack = t; binder_inner_proc_unlock(thread->proc); } else { binder_free_transaction(t); } break; } done: *consumed = ptr - buffer; binder_inner_proc_lock(proc); if (proc->requested_threads == 0 && list_empty(&thread->proc->waiting_threads) && proc->requested_threads_started < proc->max_threads && (thread->looper & (BINDER_LOOPER_STATE_REGISTERED | BINDER_LOOPER_STATE_ENTERED)) /* the user-space code fails to */ /*spawn a new thread if we leave this out */) { proc->requested_threads++; binder_inner_proc_unlock(proc); binder_debug(BINDER_DEBUG_THREADS, "%d:%d BR_SPAWN_LOOPER\n", proc->pid, thread->pid); if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer)) return -EFAULT; binder_stat_br(proc, thread, BR_SPAWN_LOOPER); } else binder_inner_proc_unlock(proc); return 0; } static void binder_release_work(struct binder_proc *proc, struct list_head *list) { struct binder_work *w; enum binder_work_type wtype; while (1) { binder_inner_proc_lock(proc); w = binder_dequeue_work_head_ilocked(list); wtype = w ? w->type : 0; binder_inner_proc_unlock(proc); if (!w) return; switch (wtype) { case BINDER_WORK_TRANSACTION: { struct binder_transaction *t; t = container_of(w, struct binder_transaction, work); binder_cleanup_transaction(t, "process died.", BR_DEAD_REPLY); } break; case BINDER_WORK_RETURN_ERROR: { struct binder_error *e = container_of( w, struct binder_error, work); binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, "undelivered TRANSACTION_ERROR: %u\n", e->cmd); } break; case BINDER_WORK_TRANSACTION_COMPLETE: { binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, "undelivered TRANSACTION_COMPLETE\n"); kfree(w); binder_stats_deleted(BINDER_STAT_TRANSACTION_COMPLETE); } break; case BINDER_WORK_DEAD_BINDER_AND_CLEAR: case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: { struct binder_ref_death *death; death = container_of(w, struct binder_ref_death, work); binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, "undelivered death notification, %016llx\n", (u64)death->cookie); kfree(death); binder_stats_deleted(BINDER_STAT_DEATH); } break; case BINDER_WORK_NODE: break; default: pr_err("unexpected work type, %d, not freed\n", wtype); break; } } } static struct binder_thread *binder_get_thread_ilocked( struct binder_proc *proc, struct binder_thread *new_thread) { struct binder_thread *thread = NULL; struct rb_node *parent = NULL; struct rb_node **p = &proc->threads.rb_node; while (*p) { parent = *p; thread = rb_entry(parent, struct binder_thread, rb_node); if (current->pid < thread->pid) p = &(*p)->rb_left; else if (current->pid > thread->pid) p = &(*p)->rb_right; else return thread; } if (!new_thread) return NULL; thread = new_thread; binder_stats_created(BINDER_STAT_THREAD); thread->proc = proc; thread->pid = current->pid; atomic_set(&thread->tmp_ref, 0); init_waitqueue_head(&thread->wait); INIT_LIST_HEAD(&thread->todo); rb_link_node(&thread->rb_node, parent, p); rb_insert_color(&thread->rb_node, &proc->threads); thread->looper_need_return = true; thread->return_error.work.type = BINDER_WORK_RETURN_ERROR; thread->return_error.cmd = BR_OK; thread->reply_error.work.type = BINDER_WORK_RETURN_ERROR; thread->reply_error.cmd = BR_OK; thread->ee.command = BR_OK; INIT_LIST_HEAD(&new_thread->waiting_thread_node); return thread; } static struct binder_thread *binder_get_thread(struct binder_proc *proc) { struct binder_thread *thread; struct binder_thread *new_thread; binder_inner_proc_lock(proc); thread = binder_get_thread_ilocked(proc, NULL); binder_inner_proc_unlock(proc); if (!thread) { new_thread = kzalloc(sizeof(*thread), GFP_KERNEL); if (new_thread == NULL) return NULL; binder_inner_proc_lock(proc); thread = binder_get_thread_ilocked(proc, new_thread); binder_inner_proc_unlock(proc); if (thread != new_thread) kfree(new_thread); } return thread; } static void binder_free_proc(struct binder_proc *proc) { struct binder_device *device; BUG_ON(!list_empty(&proc->todo)); BUG_ON(!list_empty(&proc->delivered_death)); if (proc->outstanding_txns) pr_warn("%s: Unexpected outstanding_txns %d\n", __func__, proc->outstanding_txns); device = container_of(proc->context, struct binder_device, context); if (refcount_dec_and_test(&device->ref)) { kfree(proc->context->name); kfree(device); } binder_alloc_deferred_release(&proc->alloc); put_task_struct(proc->tsk); put_cred(proc->cred); binder_stats_deleted(BINDER_STAT_PROC); kfree(proc); } static void binder_free_thread(struct binder_thread *thread) { BUG_ON(!list_empty(&thread->todo)); binder_stats_deleted(BINDER_STAT_THREAD); binder_proc_dec_tmpref(thread->proc); kfree(thread); } static int binder_thread_release(struct binder_proc *proc, struct binder_thread *thread) { struct binder_transaction *t; struct binder_transaction *send_reply = NULL; int active_transactions = 0; struct binder_transaction *last_t = NULL; binder_inner_proc_lock(thread->proc); /* * take a ref on the proc so it survives * after we remove this thread from proc->threads. * The corresponding dec is when we actually * free the thread in binder_free_thread() */ proc->tmp_ref++; /* * take a ref on this thread to ensure it * survives while we are releasing it */ atomic_inc(&thread->tmp_ref); rb_erase(&thread->rb_node, &proc->threads); t = thread->transaction_stack; if (t) { spin_lock(&t->lock); if (t->to_thread == thread) send_reply = t; } else { __acquire(&t->lock); } thread->is_dead = true; while (t) { last_t = t; active_transactions++; binder_debug(BINDER_DEBUG_DEAD_TRANSACTION, "release %d:%d transaction %d %s, still active\n", proc->pid, thread->pid, t->debug_id, (t->to_thread == thread) ? "in" : "out"); if (t->to_thread == thread) { thread->proc->outstanding_txns--; t->to_proc = NULL; t->to_thread = NULL; if (t->buffer) { t->buffer->transaction = NULL; t->buffer = NULL; } t = t->to_parent; } else if (t->from == thread) { t->from = NULL; t = t->from_parent; } else BUG(); spin_unlock(&last_t->lock); if (t) spin_lock(&t->lock); else __acquire(&t->lock); } /* annotation for sparse, lock not acquired in last iteration above */ __release(&t->lock); /* * If this thread used poll, make sure we remove the waitqueue from any * poll data structures holding it. */ if (thread->looper & BINDER_LOOPER_STATE_POLL) wake_up_pollfree(&thread->wait); binder_inner_proc_unlock(thread->proc); /* * This is needed to avoid races between wake_up_pollfree() above and * someone else removing the last entry from the queue for other reasons * (e.g. ep_remove_wait_queue() being called due to an epoll file * descriptor being closed). Such other users hold an RCU read lock, so * we can be sure they're done after we call synchronize_rcu(). */ if (thread->looper & BINDER_LOOPER_STATE_POLL) synchronize_rcu(); if (send_reply) binder_send_failed_reply(send_reply, BR_DEAD_REPLY); binder_release_work(proc, &thread->todo); binder_thread_dec_tmpref(thread); return active_transactions; } static __poll_t binder_poll(struct file *filp, struct poll_table_struct *wait) { struct binder_proc *proc = filp->private_data; struct binder_thread *thread = NULL; bool wait_for_proc_work; thread = binder_get_thread(proc); if (!thread) return POLLERR; binder_inner_proc_lock(thread->proc); thread->looper |= BINDER_LOOPER_STATE_POLL; wait_for_proc_work = binder_available_for_proc_work_ilocked(thread); binder_inner_proc_unlock(thread->proc); poll_wait(filp, &thread->wait, wait); if (binder_has_work(thread, wait_for_proc_work)) return EPOLLIN; return 0; } static int binder_ioctl_write_read(struct file *filp, unsigned long arg, struct binder_thread *thread) { int ret = 0; struct binder_proc *proc = filp->private_data; void __user *ubuf = (void __user *)arg; struct binder_write_read bwr; if (copy_from_user(&bwr, ubuf, sizeof(bwr))) { ret = -EFAULT; goto out; } binder_debug(BINDER_DEBUG_READ_WRITE, "%d:%d write %lld at %016llx, read %lld at %016llx\n", proc->pid, thread->pid, (u64)bwr.write_size, (u64)bwr.write_buffer, (u64)bwr.read_size, (u64)bwr.read_buffer); if (bwr.write_size > 0) { ret = binder_thread_write(proc, thread, bwr.write_buffer, bwr.write_size, &bwr.write_consumed); trace_binder_write_done(ret); if (ret < 0) { bwr.read_consumed = 0; if (copy_to_user(ubuf, &bwr, sizeof(bwr))) ret = -EFAULT; goto out; } } if (bwr.read_size > 0) { ret = binder_thread_read(proc, thread, bwr.read_buffer, bwr.read_size, &bwr.read_consumed, filp->f_flags & O_NONBLOCK); trace_binder_read_done(ret); binder_inner_proc_lock(proc); if (!binder_worklist_empty_ilocked(&proc->todo)) binder_wakeup_proc_ilocked(proc); binder_inner_proc_unlock(proc); if (ret < 0) { if (copy_to_user(ubuf, &bwr, sizeof(bwr))) ret = -EFAULT; goto out; } } binder_debug(BINDER_DEBUG_READ_WRITE, "%d:%d wrote %lld of %lld, read return %lld of %lld\n", proc->pid, thread->pid, (u64)bwr.write_consumed, (u64)bwr.write_size, (u64)bwr.read_consumed, (u64)bwr.read_size); if (copy_to_user(ubuf, &bwr, sizeof(bwr))) { ret = -EFAULT; goto out; } out: return ret; } static int binder_ioctl_set_ctx_mgr(struct file *filp, struct flat_binder_object *fbo) { int ret = 0; struct binder_proc *proc = filp->private_data; struct binder_context *context = proc->context; struct binder_node *new_node; kuid_t curr_euid = current_euid(); mutex_lock(&context->context_mgr_node_lock); if (context->binder_context_mgr_node) { pr_err("BINDER_SET_CONTEXT_MGR already set\n"); ret = -EBUSY; goto out; } ret = security_binder_set_context_mgr(proc->cred); if (ret < 0) goto out; if (uid_valid(context->binder_context_mgr_uid)) { if (!uid_eq(context->binder_context_mgr_uid, curr_euid)) { pr_err("BINDER_SET_CONTEXT_MGR bad uid %d != %d\n", from_kuid(&init_user_ns, curr_euid), from_kuid(&init_user_ns, context->binder_context_mgr_uid)); ret = -EPERM; goto out; } } else { context->binder_context_mgr_uid = curr_euid; } new_node = binder_new_node(proc, fbo); if (!new_node) { ret = -ENOMEM; goto out; } binder_node_lock(new_node); new_node->local_weak_refs++; new_node->local_strong_refs++; new_node->has_strong_ref = 1; new_node->has_weak_ref = 1; context->binder_context_mgr_node = new_node; binder_node_unlock(new_node); binder_put_node(new_node); out: mutex_unlock(&context->context_mgr_node_lock); return ret; } static int binder_ioctl_get_node_info_for_ref(struct binder_proc *proc, struct binder_node_info_for_ref *info) { struct binder_node *node; struct binder_context *context = proc->context; __u32 handle = info->handle; if (info->strong_count || info->weak_count || info->reserved1 || info->reserved2 || info->reserved3) { binder_user_error("%d BINDER_GET_NODE_INFO_FOR_REF: only handle may be non-zero.", proc->pid); return -EINVAL; } /* This ioctl may only be used by the context manager */ mutex_lock(&context->context_mgr_node_lock); if (!context->binder_context_mgr_node || context->binder_context_mgr_node->proc != proc) { mutex_unlock(&context->context_mgr_node_lock); return -EPERM; } mutex_unlock(&context->context_mgr_node_lock); node = binder_get_node_from_ref(proc, handle, true, NULL); if (!node) return -EINVAL; info->strong_count = node->local_strong_refs + node->internal_strong_refs; info->weak_count = node->local_weak_refs; binder_put_node(node); return 0; } static int binder_ioctl_get_node_debug_info(struct binder_proc *proc, struct binder_node_debug_info *info) { struct rb_node *n; binder_uintptr_t ptr = info->ptr; memset(info, 0, sizeof(*info)); binder_inner_proc_lock(proc); for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { struct binder_node *node = rb_entry(n, struct binder_node, rb_node); if (node->ptr > ptr) { info->ptr = node->ptr; info->cookie = node->cookie; info->has_strong_ref = node->has_strong_ref; info->has_weak_ref = node->has_weak_ref; break; } } binder_inner_proc_unlock(proc); return 0; } static bool binder_txns_pending_ilocked(struct binder_proc *proc) { struct rb_node *n; struct binder_thread *thread; if (proc->outstanding_txns > 0) return true; for (n = rb_first(&proc->threads); n; n = rb_next(n)) { thread = rb_entry(n, struct binder_thread, rb_node); if (thread->transaction_stack) return true; } return false; } static int binder_ioctl_freeze(struct binder_freeze_info *info, struct binder_proc *target_proc) { int ret = 0; if (!info->enable) { binder_inner_proc_lock(target_proc); target_proc->sync_recv = false; target_proc->async_recv = false; target_proc->is_frozen = false; binder_inner_proc_unlock(target_proc); return 0; } /* * Freezing the target. Prevent new transactions by * setting frozen state. If timeout specified, wait * for transactions to drain. */ binder_inner_proc_lock(target_proc); target_proc->sync_recv = false; target_proc->async_recv = false; target_proc->is_frozen = true; binder_inner_proc_unlock(target_proc); if (info->timeout_ms > 0) ret = wait_event_interruptible_timeout( target_proc->freeze_wait, (!target_proc->outstanding_txns), msecs_to_jiffies(info->timeout_ms)); /* Check pending transactions that wait for reply */ if (ret >= 0) { binder_inner_proc_lock(target_proc); if (binder_txns_pending_ilocked(target_proc)) ret = -EAGAIN; binder_inner_proc_unlock(target_proc); } if (ret < 0) { binder_inner_proc_lock(target_proc); target_proc->is_frozen = false; binder_inner_proc_unlock(target_proc); } return ret; } static int binder_ioctl_get_freezer_info( struct binder_frozen_status_info *info) { struct binder_proc *target_proc; bool found = false; __u32 txns_pending; info->sync_recv = 0; info->async_recv = 0; mutex_lock(&binder_procs_lock); hlist_for_each_entry(target_proc, &binder_procs, proc_node) { if (target_proc->pid == info->pid) { found = true; binder_inner_proc_lock(target_proc); txns_pending = binder_txns_pending_ilocked(target_proc); info->sync_recv |= target_proc->sync_recv | (txns_pending << 1); info->async_recv |= target_proc->async_recv; binder_inner_proc_unlock(target_proc); } } mutex_unlock(&binder_procs_lock); if (!found) return -EINVAL; return 0; } static int binder_ioctl_get_extended_error(struct binder_thread *thread, void __user *ubuf) { struct binder_extended_error ee; binder_inner_proc_lock(thread->proc); ee = thread->ee; binder_set_extended_error(&thread->ee, 0, BR_OK, 0); binder_inner_proc_unlock(thread->proc); if (copy_to_user(ubuf, &ee, sizeof(ee))) return -EFAULT; return 0; } static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { int ret; struct binder_proc *proc = filp->private_data; struct binder_thread *thread; void __user *ubuf = (void __user *)arg; /*pr_info("binder_ioctl: %d:%d %x %lx\n", proc->pid, current->pid, cmd, arg);*/ binder_selftest_alloc(&proc->alloc); trace_binder_ioctl(cmd, arg); ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); if (ret) goto err_unlocked; thread = binder_get_thread(proc); if (thread == NULL) { ret = -ENOMEM; goto err; } switch (cmd) { case BINDER_WRITE_READ: ret = binder_ioctl_write_read(filp, arg, thread); if (ret) goto err; break; case BINDER_SET_MAX_THREADS: { int max_threads; if (copy_from_user(&max_threads, ubuf, sizeof(max_threads))) { ret = -EINVAL; goto err; } binder_inner_proc_lock(proc); proc->max_threads = max_threads; binder_inner_proc_unlock(proc); break; } case BINDER_SET_CONTEXT_MGR_EXT: { struct flat_binder_object fbo; if (copy_from_user(&fbo, ubuf, sizeof(fbo))) { ret = -EINVAL; goto err; } ret = binder_ioctl_set_ctx_mgr(filp, &fbo); if (ret) goto err; break; } case BINDER_SET_CONTEXT_MGR: ret = binder_ioctl_set_ctx_mgr(filp, NULL); if (ret) goto err; break; case BINDER_THREAD_EXIT: binder_debug(BINDER_DEBUG_THREADS, "%d:%d exit\n", proc->pid, thread->pid); binder_thread_release(proc, thread); thread = NULL; break; case BINDER_VERSION: { struct binder_version __user *ver = ubuf; if (put_user(BINDER_CURRENT_PROTOCOL_VERSION, &ver->protocol_version)) { ret = -EINVAL; goto err; } break; } case BINDER_GET_NODE_INFO_FOR_REF: { struct binder_node_info_for_ref info; if (copy_from_user(&info, ubuf, sizeof(info))) { ret = -EFAULT; goto err; } ret = binder_ioctl_get_node_info_for_ref(proc, &info); if (ret < 0) goto err; if (copy_to_user(ubuf, &info, sizeof(info))) { ret = -EFAULT; goto err; } break; } case BINDER_GET_NODE_DEBUG_INFO: { struct binder_node_debug_info info; if (copy_from_user(&info, ubuf, sizeof(info))) { ret = -EFAULT; goto err; } ret = binder_ioctl_get_node_debug_info(proc, &info); if (ret < 0) goto err; if (copy_to_user(ubuf, &info, sizeof(info))) { ret = -EFAULT; goto err; } break; } case BINDER_FREEZE: { struct binder_freeze_info info; struct binder_proc **target_procs = NULL, *target_proc; int target_procs_count = 0, i = 0; ret = 0; if (copy_from_user(&info, ubuf, sizeof(info))) { ret = -EFAULT; goto err; } mutex_lock(&binder_procs_lock); hlist_for_each_entry(target_proc, &binder_procs, proc_node) { if (target_proc->pid == info.pid) target_procs_count++; } if (target_procs_count == 0) { mutex_unlock(&binder_procs_lock); ret = -EINVAL; goto err; } target_procs = kcalloc(target_procs_count, sizeof(struct binder_proc *), GFP_KERNEL); if (!target_procs) { mutex_unlock(&binder_procs_lock); ret = -ENOMEM; goto err; } hlist_for_each_entry(target_proc, &binder_procs, proc_node) { if (target_proc->pid != info.pid) continue; binder_inner_proc_lock(target_proc); target_proc->tmp_ref++; binder_inner_proc_unlock(target_proc); target_procs[i++] = target_proc; } mutex_unlock(&binder_procs_lock); for (i = 0; i < target_procs_count; i++) { if (ret >= 0) ret = binder_ioctl_freeze(&info, target_procs[i]); binder_proc_dec_tmpref(target_procs[i]); } kfree(target_procs); if (ret < 0) goto err; break; } case BINDER_GET_FROZEN_INFO: { struct binder_frozen_status_info info; if (copy_from_user(&info, ubuf, sizeof(info))) { ret = -EFAULT; goto err; } ret = binder_ioctl_get_freezer_info(&info); if (ret < 0) goto err; if (copy_to_user(ubuf, &info, sizeof(info))) { ret = -EFAULT; goto err; } break; } case BINDER_ENABLE_ONEWAY_SPAM_DETECTION: { uint32_t enable; if (copy_from_user(&enable, ubuf, sizeof(enable))) { ret = -EFAULT; goto err; } binder_inner_proc_lock(proc); proc->oneway_spam_detection_enabled = (bool)enable; binder_inner_proc_unlock(proc); break; } case BINDER_GET_EXTENDED_ERROR: ret = binder_ioctl_get_extended_error(thread, ubuf); if (ret < 0) goto err; break; default: ret = -EINVAL; goto err; } ret = 0; err: if (thread) thread->looper_need_return = false; wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); if (ret && ret != -EINTR) pr_info("%d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret); err_unlocked: trace_binder_ioctl_done(ret); return ret; } static void binder_vma_open(struct vm_area_struct *vma) { struct binder_proc *proc = vma->vm_private_data; binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%d open vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", proc->pid, vma->vm_start, vma->vm_end, (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, (unsigned long)pgprot_val(vma->vm_page_prot)); } static void binder_vma_close(struct vm_area_struct *vma) { struct binder_proc *proc = vma->vm_private_data; binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%d close vm area %lx-%lx (%ld K) vma %lx pagep %lx\n", proc->pid, vma->vm_start, vma->vm_end, (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, (unsigned long)pgprot_val(vma->vm_page_prot)); binder_alloc_vma_close(&proc->alloc); } static vm_fault_t binder_vm_fault(struct vm_fault *vmf) { return VM_FAULT_SIGBUS; } static const struct vm_operations_struct binder_vm_ops = { .open = binder_vma_open, .close = binder_vma_close, .fault = binder_vm_fault, }; static int binder_mmap(struct file *filp, struct vm_area_struct *vma) { struct binder_proc *proc = filp->private_data; if (proc->tsk != current->group_leader) return -EINVAL; binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d %lx-%lx (%ld K) vma %lx pagep %lx\n", __func__, proc->pid, vma->vm_start, vma->vm_end, (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, (unsigned long)pgprot_val(vma->vm_page_prot)); if (vma->vm_flags & FORBIDDEN_MMAP_FLAGS) { pr_err("%s: %d %lx-%lx %s failed %d\n", __func__, proc->pid, vma->vm_start, vma->vm_end, "bad vm_flags", -EPERM); return -EPERM; } vm_flags_mod(vma, VM_DONTCOPY | VM_MIXEDMAP, VM_MAYWRITE); vma->vm_ops = &binder_vm_ops; vma->vm_private_data = proc; return binder_alloc_mmap_handler(&proc->alloc, vma); } static int binder_open(struct inode *nodp, struct file *filp) { struct binder_proc *proc, *itr; struct binder_device *binder_dev; struct binderfs_info *info; struct dentry *binder_binderfs_dir_entry_proc = NULL; bool existing_pid = false; binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d:%d\n", __func__, current->group_leader->pid, current->pid); proc = kzalloc(sizeof(*proc), GFP_KERNEL); if (proc == NULL) return -ENOMEM; spin_lock_init(&proc->inner_lock); spin_lock_init(&proc->outer_lock); get_task_struct(current->group_leader); proc->tsk = current->group_leader; proc->cred = get_cred(filp->f_cred); INIT_LIST_HEAD(&proc->todo); init_waitqueue_head(&proc->freeze_wait); proc->default_priority = task_nice(current); /* binderfs stashes devices in i_private */ if (is_binderfs_device(nodp)) { binder_dev = nodp->i_private; info = nodp->i_sb->s_fs_info; binder_binderfs_dir_entry_proc = info->proc_log_dir; } else { binder_dev = container_of(filp->private_data, struct binder_device, miscdev); } refcount_inc(&binder_dev->ref); proc->context = &binder_dev->context; binder_alloc_init(&proc->alloc); binder_stats_created(BINDER_STAT_PROC); proc->pid = current->group_leader->pid; INIT_LIST_HEAD(&proc->delivered_death); INIT_LIST_HEAD(&proc->waiting_threads); filp->private_data = proc; mutex_lock(&binder_procs_lock); hlist_for_each_entry(itr, &binder_procs, proc_node) { if (itr->pid == proc->pid) { existing_pid = true; break; } } hlist_add_head(&proc->proc_node, &binder_procs); mutex_unlock(&binder_procs_lock); if (binder_debugfs_dir_entry_proc && !existing_pid) { char strbuf[11]; snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); /* * proc debug entries are shared between contexts. * Only create for the first PID to avoid debugfs log spamming * The printing code will anyway print all contexts for a given * PID so this is not a problem. */ proc->debugfs_entry = debugfs_create_file(strbuf, 0444, binder_debugfs_dir_entry_proc, (void *)(unsigned long)proc->pid, &proc_fops); } if (binder_binderfs_dir_entry_proc && !existing_pid) { char strbuf[11]; struct dentry *binderfs_entry; snprintf(strbuf, sizeof(strbuf), "%u", proc->pid); /* * Similar to debugfs, the process specific log file is shared * between contexts. Only create for the first PID. * This is ok since same as debugfs, the log file will contain * information on all contexts of a given PID. */ binderfs_entry = binderfs_create_file(binder_binderfs_dir_entry_proc, strbuf, &proc_fops, (void *)(unsigned long)proc->pid); if (!IS_ERR(binderfs_entry)) { proc->binderfs_entry = binderfs_entry; } else { int error; error = PTR_ERR(binderfs_entry); pr_warn("Unable to create file %s in binderfs (error %d)\n", strbuf, error); } } return 0; } static int binder_flush(struct file *filp, fl_owner_t id) { struct binder_proc *proc = filp->private_data; binder_defer_work(proc, BINDER_DEFERRED_FLUSH); return 0; } static void binder_deferred_flush(struct binder_proc *proc) { struct rb_node *n; int wake_count = 0; binder_inner_proc_lock(proc); for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) { struct binder_thread *thread = rb_entry(n, struct binder_thread, rb_node); thread->looper_need_return = true; if (thread->looper & BINDER_LOOPER_STATE_WAITING) { wake_up_interruptible(&thread->wait); wake_count++; } } binder_inner_proc_unlock(proc); binder_debug(BINDER_DEBUG_OPEN_CLOSE, "binder_flush: %d woke %d threads\n", proc->pid, wake_count); } static int binder_release(struct inode *nodp, struct file *filp) { struct binder_proc *proc = filp->private_data; debugfs_remove(proc->debugfs_entry); if (proc->binderfs_entry) { binderfs_remove_file(proc->binderfs_entry); proc->binderfs_entry = NULL; } binder_defer_work(proc, BINDER_DEFERRED_RELEASE); return 0; } static int binder_node_release(struct binder_node *node, int refs) { struct binder_ref *ref; int death = 0; struct binder_proc *proc = node->proc; binder_release_work(proc, &node->async_todo); binder_node_lock(node); binder_inner_proc_lock(proc); binder_dequeue_work_ilocked(&node->work); /* * The caller must have taken a temporary ref on the node, */ BUG_ON(!node->tmp_refs); if (hlist_empty(&node->refs) && node->tmp_refs == 1) { binder_inner_proc_unlock(proc); binder_node_unlock(node); binder_free_node(node); return refs; } node->proc = NULL; node->local_strong_refs = 0; node->local_weak_refs = 0; binder_inner_proc_unlock(proc); spin_lock(&binder_dead_nodes_lock); hlist_add_head(&node->dead_node, &binder_dead_nodes); spin_unlock(&binder_dead_nodes_lock); hlist_for_each_entry(ref, &node->refs, node_entry) { refs++; /* * Need the node lock to synchronize * with new notification requests and the * inner lock to synchronize with queued * death notifications. */ binder_inner_proc_lock(ref->proc); if (!ref->death) { binder_inner_proc_unlock(ref->proc); continue; } death++; BUG_ON(!list_empty(&ref->death->work.entry)); ref->death->work.type = BINDER_WORK_DEAD_BINDER; binder_enqueue_work_ilocked(&ref->death->work, &ref->proc->todo); binder_wakeup_proc_ilocked(ref->proc); binder_inner_proc_unlock(ref->proc); } binder_debug(BINDER_DEBUG_DEAD_BINDER, "node %d now dead, refs %d, death %d\n", node->debug_id, refs, death); binder_node_unlock(node); binder_put_node(node); return refs; } static void binder_deferred_release(struct binder_proc *proc) { struct binder_context *context = proc->context; struct rb_node *n; int threads, nodes, incoming_refs, outgoing_refs, active_transactions; mutex_lock(&binder_procs_lock); hlist_del(&proc->proc_node); mutex_unlock(&binder_procs_lock); mutex_lock(&context->context_mgr_node_lock); if (context->binder_context_mgr_node && context->binder_context_mgr_node->proc == proc) { binder_debug(BINDER_DEBUG_DEAD_BINDER, "%s: %d context_mgr_node gone\n", __func__, proc->pid); context->binder_context_mgr_node = NULL; } mutex_unlock(&context->context_mgr_node_lock); binder_inner_proc_lock(proc); /* * Make sure proc stays alive after we * remove all the threads */ proc->tmp_ref++; proc->is_dead = true; proc->is_frozen = false; proc->sync_recv = false; proc->async_recv = false; threads = 0; active_transactions = 0; while ((n = rb_first(&proc->threads))) { struct binder_thread *thread; thread = rb_entry(n, struct binder_thread, rb_node); binder_inner_proc_unlock(proc); threads++; active_transactions += binder_thread_release(proc, thread); binder_inner_proc_lock(proc); } nodes = 0; incoming_refs = 0; while ((n = rb_first(&proc->nodes))) { struct binder_node *node; node = rb_entry(n, struct binder_node, rb_node); nodes++; /* * take a temporary ref on the node before * calling binder_node_release() which will either * kfree() the node or call binder_put_node() */ binder_inc_node_tmpref_ilocked(node); rb_erase(&node->rb_node, &proc->nodes); binder_inner_proc_unlock(proc); incoming_refs = binder_node_release(node, incoming_refs); binder_inner_proc_lock(proc); } binder_inner_proc_unlock(proc); outgoing_refs = 0; binder_proc_lock(proc); while ((n = rb_first(&proc->refs_by_desc))) { struct binder_ref *ref; ref = rb_entry(n, struct binder_ref, rb_node_desc); outgoing_refs++; binder_cleanup_ref_olocked(ref); binder_proc_unlock(proc); binder_free_ref(ref); binder_proc_lock(proc); } binder_proc_unlock(proc); binder_release_work(proc, &proc->todo); binder_release_work(proc, &proc->delivered_death); binder_debug(BINDER_DEBUG_OPEN_CLOSE, "%s: %d threads %d, nodes %d (ref %d), refs %d, active transactions %d\n", __func__, proc->pid, threads, nodes, incoming_refs, outgoing_refs, active_transactions); binder_proc_dec_tmpref(proc); } static void binder_deferred_func(struct work_struct *work) { struct binder_proc *proc; int defer; do { mutex_lock(&binder_deferred_lock); if (!hlist_empty(&binder_deferred_list)) { proc = hlist_entry(binder_deferred_list.first, struct binder_proc, deferred_work_node); hlist_del_init(&proc->deferred_work_node); defer = proc->deferred_work; proc->deferred_work = 0; } else { proc = NULL; defer = 0; } mutex_unlock(&binder_deferred_lock); if (defer & BINDER_DEFERRED_FLUSH) binder_deferred_flush(proc); if (defer & BINDER_DEFERRED_RELEASE) binder_deferred_release(proc); /* frees proc */ } while (proc); } static DECLARE_WORK(binder_deferred_work, binder_deferred_func); static void binder_defer_work(struct binder_proc *proc, enum binder_deferred_state defer) { mutex_lock(&binder_deferred_lock); proc->deferred_work |= defer; if (hlist_unhashed(&proc->deferred_work_node)) { hlist_add_head(&proc->deferred_work_node, &binder_deferred_list); schedule_work(&binder_deferred_work); } mutex_unlock(&binder_deferred_lock); } static void print_binder_transaction_ilocked(struct seq_file *m, struct binder_proc *proc, const char *prefix, struct binder_transaction *t) { struct binder_proc *to_proc; struct binder_buffer *buffer = t->buffer; ktime_t current_time = ktime_get(); spin_lock(&t->lock); to_proc = t->to_proc; seq_printf(m, "%s %d: %pK from %d:%d to %d:%d code %x flags %x pri %ld r%d elapsed %lldms", prefix, t->debug_id, t, t->from_pid, t->from_tid, to_proc ? to_proc->pid : 0, t->to_thread ? t->to_thread->pid : 0, t->code, t->flags, t->priority, t->need_reply, ktime_ms_delta(current_time, t->start_time)); spin_unlock(&t->lock); if (proc != to_proc) { /* * Can only safely deref buffer if we are holding the * correct proc inner lock for this node */ seq_puts(m, "\n"); return; } if (buffer == NULL) { seq_puts(m, " buffer free\n"); return; } if (buffer->target_node) seq_printf(m, " node %d", buffer->target_node->debug_id); seq_printf(m, " size %zd:%zd data %pK\n", buffer->data_size, buffer->offsets_size, buffer->user_data); } static void print_binder_work_ilocked(struct seq_file *m, struct binder_proc *proc, const char *prefix, const char *transaction_prefix, struct binder_work *w) { struct binder_node *node; struct binder_transaction *t; switch (w->type) { case BINDER_WORK_TRANSACTION: t = container_of(w, struct binder_transaction, work); print_binder_transaction_ilocked( m, proc, transaction_prefix, t); break; case BINDER_WORK_RETURN_ERROR: { struct binder_error *e = container_of( w, struct binder_error, work); seq_printf(m, "%stransaction error: %u\n", prefix, e->cmd); } break; case BINDER_WORK_TRANSACTION_COMPLETE: seq_printf(m, "%stransaction complete\n", prefix); break; case BINDER_WORK_NODE: node = container_of(w, struct binder_node, work); seq_printf(m, "%snode work %d: u%016llx c%016llx\n", prefix, node->debug_id, (u64)node->ptr, (u64)node->cookie); break; case BINDER_WORK_DEAD_BINDER: seq_printf(m, "%shas dead binder\n", prefix); break; case BINDER_WORK_DEAD_BINDER_AND_CLEAR: seq_printf(m, "%shas cleared dead binder\n", prefix); break; case BINDER_WORK_CLEAR_DEATH_NOTIFICATION: seq_printf(m, "%shas cleared death notification\n", prefix); break; default: seq_printf(m, "%sunknown work: type %d\n", prefix, w->type); break; } } static void print_binder_thread_ilocked(struct seq_file *m, struct binder_thread *thread, int print_always) { struct binder_transaction *t; struct binder_work *w; size_t start_pos = m->count; size_t header_pos; seq_printf(m, " thread %d: l %02x need_return %d tr %d\n", thread->pid, thread->looper, thread->looper_need_return, atomic_read(&thread->tmp_ref)); header_pos = m->count; t = thread->transaction_stack; while (t) { if (t->from == thread) { print_binder_transaction_ilocked(m, thread->proc, " outgoing transaction", t); t = t->from_parent; } else if (t->to_thread == thread) { print_binder_transaction_ilocked(m, thread->proc, " incoming transaction", t); t = t->to_parent; } else { print_binder_transaction_ilocked(m, thread->proc, " bad transaction", t); t = NULL; } } list_for_each_entry(w, &thread->todo, entry) { print_binder_work_ilocked(m, thread->proc, " ", " pending transaction", w); } if (!print_always && m->count == header_pos) m->count = start_pos; } static void print_binder_node_nilocked(struct seq_file *m, struct binder_node *node) { struct binder_ref *ref; struct binder_work *w; int count; count = 0; hlist_for_each_entry(ref, &node->refs, node_entry) count++; seq_printf(m, " node %d: u%016llx c%016llx hs %d hw %d ls %d lw %d is %d iw %d tr %d", node->debug_id, (u64)node->ptr, (u64)node->cookie, node->has_strong_ref, node->has_weak_ref, node->local_strong_refs, node->local_weak_refs, node->internal_strong_refs, count, node->tmp_refs); if (count) { seq_puts(m, " proc"); hlist_for_each_entry(ref, &node->refs, node_entry) seq_printf(m, " %d", ref->proc->pid); } seq_puts(m, "\n"); if (node->proc) { list_for_each_entry(w, &node->async_todo, entry) print_binder_work_ilocked(m, node->proc, " ", " pending async transaction", w); } } static void print_binder_ref_olocked(struct seq_file *m, struct binder_ref *ref) { binder_node_lock(ref->node); seq_printf(m, " ref %d: desc %d %snode %d s %d w %d d %pK\n", ref->data.debug_id, ref->data.desc, ref->node->proc ? "" : "dead ", ref->node->debug_id, ref->data.strong, ref->data.weak, ref->death); binder_node_unlock(ref->node); } static void print_binder_proc(struct seq_file *m, struct binder_proc *proc, int print_all) { struct binder_work *w; struct rb_node *n; size_t start_pos = m->count; size_t header_pos; struct binder_node *last_node = NULL; seq_printf(m, "proc %d\n", proc->pid); seq_printf(m, "context %s\n", proc->context->name); header_pos = m->count; binder_inner_proc_lock(proc); for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) print_binder_thread_ilocked(m, rb_entry(n, struct binder_thread, rb_node), print_all); for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) { struct binder_node *node = rb_entry(n, struct binder_node, rb_node); if (!print_all && !node->has_async_transaction) continue; /* * take a temporary reference on the node so it * survives and isn't removed from the tree * while we print it. */ binder_inc_node_tmpref_ilocked(node); /* Need to drop inner lock to take node lock */ binder_inner_proc_unlock(proc); if (last_node) binder_put_node(last_node); binder_node_inner_lock(node); print_binder_node_nilocked(m, node); binder_node_inner_unlock(node); last_node = node; binder_inner_proc_lock(proc); } binder_inner_proc_unlock(proc); if (last_node) binder_put_node(last_node); if (print_all) { binder_proc_lock(proc); for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) print_binder_ref_olocked(m, rb_entry(n, struct binder_ref, rb_node_desc)); binder_proc_unlock(proc); } binder_alloc_print_allocated(m, &proc->alloc); binder_inner_proc_lock(proc); list_for_each_entry(w, &proc->todo, entry) print_binder_work_ilocked(m, proc, " ", " pending transaction", w); list_for_each_entry(w, &proc->delivered_death, entry) { seq_puts(m, " has delivered dead binder\n"); break; } binder_inner_proc_unlock(proc); if (!print_all && m->count == header_pos) m->count = start_pos; } static const char * const binder_return_strings[] = { "BR_ERROR", "BR_OK", "BR_TRANSACTION", "BR_REPLY", "BR_ACQUIRE_RESULT", "BR_DEAD_REPLY", "BR_TRANSACTION_COMPLETE", "BR_INCREFS", "BR_ACQUIRE", "BR_RELEASE", "BR_DECREFS", "BR_ATTEMPT_ACQUIRE", "BR_NOOP", "BR_SPAWN_LOOPER", "BR_FINISHED", "BR_DEAD_BINDER", "BR_CLEAR_DEATH_NOTIFICATION_DONE", "BR_FAILED_REPLY", "BR_FROZEN_REPLY", "BR_ONEWAY_SPAM_SUSPECT", "BR_TRANSACTION_PENDING_FROZEN" }; static const char * const binder_command_strings[] = { "BC_TRANSACTION", "BC_REPLY", "BC_ACQUIRE_RESULT", "BC_FREE_BUFFER", "BC_INCREFS", "BC_ACQUIRE", "BC_RELEASE", "BC_DECREFS", "BC_INCREFS_DONE", "BC_ACQUIRE_DONE", "BC_ATTEMPT_ACQUIRE", "BC_REGISTER_LOOPER", "BC_ENTER_LOOPER", "BC_EXIT_LOOPER", "BC_REQUEST_DEATH_NOTIFICATION", "BC_CLEAR_DEATH_NOTIFICATION", "BC_DEAD_BINDER_DONE", "BC_TRANSACTION_SG", "BC_REPLY_SG", }; static const char * const binder_objstat_strings[] = { "proc", "thread", "node", "ref", "death", "transaction", "transaction_complete" }; static void print_binder_stats(struct seq_file *m, const char *prefix, struct binder_stats *stats) { int i; BUILD_BUG_ON(ARRAY_SIZE(stats->bc) != ARRAY_SIZE(binder_command_strings)); for (i = 0; i < ARRAY_SIZE(stats->bc); i++) { int temp = atomic_read(&stats->bc[i]); if (temp) seq_printf(m, "%s%s: %d\n", prefix, binder_command_strings[i], temp); } BUILD_BUG_ON(ARRAY_SIZE(stats->br) != ARRAY_SIZE(binder_return_strings)); for (i = 0; i < ARRAY_SIZE(stats->br); i++) { int temp = atomic_read(&stats->br[i]); if (temp) seq_printf(m, "%s%s: %d\n", prefix, binder_return_strings[i], temp); } BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != ARRAY_SIZE(binder_objstat_strings)); BUILD_BUG_ON(ARRAY_SIZE(stats->obj_created) != ARRAY_SIZE(stats->obj_deleted)); for (i = 0; i < ARRAY_SIZE(stats->obj_created); i++) { int created = atomic_read(&stats->obj_created[i]); int deleted = atomic_read(&stats->obj_deleted[i]); if (created || deleted) seq_printf(m, "%s%s: active %d total %d\n", prefix, binder_objstat_strings[i], created - deleted, created); } } static void print_binder_proc_stats(struct seq_file *m, struct binder_proc *proc) { struct binder_work *w; struct binder_thread *thread; struct rb_node *n; int count, strong, weak, ready_threads; size_t free_async_space = binder_alloc_get_free_async_space(&proc->alloc); seq_printf(m, "proc %d\n", proc->pid); seq_printf(m, "context %s\n", proc->context->name); count = 0; ready_threads = 0; binder_inner_proc_lock(proc); for (n = rb_first(&proc->threads); n != NULL; n = rb_next(n)) count++; list_for_each_entry(thread, &proc->waiting_threads, waiting_thread_node) ready_threads++; seq_printf(m, " threads: %d\n", count); seq_printf(m, " requested threads: %d+%d/%d\n" " ready threads %d\n" " free async space %zd\n", proc->requested_threads, proc->requested_threads_started, proc->max_threads, ready_threads, free_async_space); count = 0; for (n = rb_first(&proc->nodes); n != NULL; n = rb_next(n)) count++; binder_inner_proc_unlock(proc); seq_printf(m, " nodes: %d\n", count); count = 0; strong = 0; weak = 0; binder_proc_lock(proc); for (n = rb_first(&proc->refs_by_desc); n != NULL; n = rb_next(n)) { struct binder_ref *ref = rb_entry(n, struct binder_ref, rb_node_desc); count++; strong += ref->data.strong; weak += ref->data.weak; } binder_proc_unlock(proc); seq_printf(m, " refs: %d s %d w %d\n", count, strong, weak); count = binder_alloc_get_allocated_count(&proc->alloc); seq_printf(m, " buffers: %d\n", count); binder_alloc_print_pages(m, &proc->alloc); count = 0; binder_inner_proc_lock(proc); list_for_each_entry(w, &proc->todo, entry) { if (w->type == BINDER_WORK_TRANSACTION) count++; } binder_inner_proc_unlock(proc); seq_printf(m, " pending transactions: %d\n", count); print_binder_stats(m, " ", &proc->stats); } static int state_show(struct seq_file *m, void *unused) { struct binder_proc *proc; struct binder_node *node; struct binder_node *last_node = NULL; seq_puts(m, "binder state:\n"); spin_lock(&binder_dead_nodes_lock); if (!hlist_empty(&binder_dead_nodes)) seq_puts(m, "dead nodes:\n"); hlist_for_each_entry(node, &binder_dead_nodes, dead_node) { /* * take a temporary reference on the node so it * survives and isn't removed from the list * while we print it. */ node->tmp_refs++; spin_unlock(&binder_dead_nodes_lock); if (last_node) binder_put_node(last_node); binder_node_lock(node); print_binder_node_nilocked(m, node); binder_node_unlock(node); last_node = node; spin_lock(&binder_dead_nodes_lock); } spin_unlock(&binder_dead_nodes_lock); if (last_node) binder_put_node(last_node); mutex_lock(&binder_procs_lock); hlist_for_each_entry(proc, &binder_procs, proc_node) print_binder_proc(m, proc, 1); mutex_unlock(&binder_procs_lock); return 0; } static int stats_show(struct seq_file *m, void *unused) { struct binder_proc *proc; seq_puts(m, "binder stats:\n"); print_binder_stats(m, "", &binder_stats); mutex_lock(&binder_procs_lock); hlist_for_each_entry(proc, &binder_procs, proc_node) print_binder_proc_stats(m, proc); mutex_unlock(&binder_procs_lock); return 0; } static int transactions_show(struct seq_file *m, void *unused) { struct binder_proc *proc; seq_puts(m, "binder transactions:\n"); mutex_lock(&binder_procs_lock); hlist_for_each_entry(proc, &binder_procs, proc_node) print_binder_proc(m, proc, 0); mutex_unlock(&binder_procs_lock); return 0; } static int proc_show(struct seq_file *m, void *unused) { struct binder_proc *itr; int pid = (unsigned long)m->private; mutex_lock(&binder_procs_lock); hlist_for_each_entry(itr, &binder_procs, proc_node) { if (itr->pid == pid) { seq_puts(m, "binder proc state:\n"); print_binder_proc(m, itr, 1); } } mutex_unlock(&binder_procs_lock); return 0; } static void print_binder_transaction_log_entry(struct seq_file *m, struct binder_transaction_log_entry *e) { int debug_id = READ_ONCE(e->debug_id_done); /* * read barrier to guarantee debug_id_done read before * we print the log values */ smp_rmb(); seq_printf(m, "%d: %s from %d:%d to %d:%d context %s node %d handle %d size %d:%d ret %d/%d l=%d", e->debug_id, (e->call_type == 2) ? "reply" : ((e->call_type == 1) ? "async" : "call "), e->from_proc, e->from_thread, e->to_proc, e->to_thread, e->context_name, e->to_node, e->target_handle, e->data_size, e->offsets_size, e->return_error, e->return_error_param, e->return_error_line); /* * read-barrier to guarantee read of debug_id_done after * done printing the fields of the entry */ smp_rmb(); seq_printf(m, debug_id && debug_id == READ_ONCE(e->debug_id_done) ? "\n" : " (incomplete)\n"); } static int transaction_log_show(struct seq_file *m, void *unused) { struct binder_transaction_log *log = m->private; unsigned int log_cur = atomic_read(&log->cur); unsigned int count; unsigned int cur; int i; count = log_cur + 1; cur = count < ARRAY_SIZE(log->entry) && !log->full ? 0 : count % ARRAY_SIZE(log->entry); if (count > ARRAY_SIZE(log->entry) || log->full) count = ARRAY_SIZE(log->entry); for (i = 0; i < count; i++) { unsigned int index = cur++ % ARRAY_SIZE(log->entry); print_binder_transaction_log_entry(m, &log->entry[index]); } return 0; } const struct file_operations binder_fops = { .owner = THIS_MODULE, .poll = binder_poll, .unlocked_ioctl = binder_ioctl, .compat_ioctl = compat_ptr_ioctl, .mmap = binder_mmap, .open = binder_open, .flush = binder_flush, .release = binder_release, }; DEFINE_SHOW_ATTRIBUTE(state); DEFINE_SHOW_ATTRIBUTE(stats); DEFINE_SHOW_ATTRIBUTE(transactions); DEFINE_SHOW_ATTRIBUTE(transaction_log); const struct binder_debugfs_entry binder_debugfs_entries[] = { { .name = "state", .mode = 0444, .fops = &state_fops, .data = NULL, }, { .name = "stats", .mode = 0444, .fops = &stats_fops, .data = NULL, }, { .name = "transactions", .mode = 0444, .fops = &transactions_fops, .data = NULL, }, { .name = "transaction_log", .mode = 0444, .fops = &transaction_log_fops, .data = &binder_transaction_log, }, { .name = "failed_transaction_log", .mode = 0444, .fops = &transaction_log_fops, .data = &binder_transaction_log_failed, }, {} /* terminator */ }; static int __init init_binder_device(const char *name) { int ret; struct binder_device *binder_device; binder_device = kzalloc(sizeof(*binder_device), GFP_KERNEL); if (!binder_device) return -ENOMEM; binder_device->miscdev.fops = &binder_fops; binder_device->miscdev.minor = MISC_DYNAMIC_MINOR; binder_device->miscdev.name = name; refcount_set(&binder_device->ref, 1); binder_device->context.binder_context_mgr_uid = INVALID_UID; binder_device->context.name = name; mutex_init(&binder_device->context.context_mgr_node_lock); ret = misc_register(&binder_device->miscdev); if (ret < 0) { kfree(binder_device); return ret; } hlist_add_head(&binder_device->hlist, &binder_devices); return ret; } static int __init binder_init(void) { int ret; char *device_name, *device_tmp; struct binder_device *device; struct hlist_node *tmp; char *device_names = NULL; const struct binder_debugfs_entry *db_entry; ret = binder_alloc_shrinker_init(); if (ret) return ret; atomic_set(&binder_transaction_log.cur, ~0U); atomic_set(&binder_transaction_log_failed.cur, ~0U); binder_debugfs_dir_entry_root = debugfs_create_dir("binder", NULL); binder_for_each_debugfs_entry(db_entry) debugfs_create_file(db_entry->name, db_entry->mode, binder_debugfs_dir_entry_root, db_entry->data, db_entry->fops); binder_debugfs_dir_entry_proc = debugfs_create_dir("proc", binder_debugfs_dir_entry_root); if (!IS_ENABLED(CONFIG_ANDROID_BINDERFS) && strcmp(binder_devices_param, "") != 0) { /* * Copy the module_parameter string, because we don't want to * tokenize it in-place. */ device_names = kstrdup(binder_devices_param, GFP_KERNEL); if (!device_names) { ret = -ENOMEM; goto err_alloc_device_names_failed; } device_tmp = device_names; while ((device_name = strsep(&device_tmp, ","))) { ret = init_binder_device(device_name); if (ret) goto err_init_binder_device_failed; } } ret = init_binderfs(); if (ret) goto err_init_binder_device_failed; return ret; err_init_binder_device_failed: hlist_for_each_entry_safe(device, tmp, &binder_devices, hlist) { misc_deregister(&device->miscdev); hlist_del(&device->hlist); kfree(device); } kfree(device_names); err_alloc_device_names_failed: debugfs_remove_recursive(binder_debugfs_dir_entry_root); binder_alloc_shrinker_exit(); return ret; } device_initcall(binder_init); #define CREATE_TRACE_POINTS #include "binder_trace.h" MODULE_LICENSE("GPL v2");
linux-master
drivers/android/binder.c
// SPDX-License-Identifier: GPL-2.0-only /* binder_alloc_selftest.c * * Android IPC Subsystem * * Copyright (C) 2017 Google, Inc. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/mm_types.h> #include <linux/err.h> #include "binder_alloc.h" #define BUFFER_NUM 5 #define BUFFER_MIN_SIZE (PAGE_SIZE / 8) static bool binder_selftest_run = true; static int binder_selftest_failures; static DEFINE_MUTEX(binder_selftest_lock); /** * enum buf_end_align_type - Page alignment of a buffer * end with regard to the end of the previous buffer. * * In the pictures below, buf2 refers to the buffer we * are aligning. buf1 refers to previous buffer by addr. * Symbol [ means the start of a buffer, ] means the end * of a buffer, and | means page boundaries. */ enum buf_end_align_type { /** * @SAME_PAGE_UNALIGNED: The end of this buffer is on * the same page as the end of the previous buffer and * is not page aligned. Examples: * buf1 ][ buf2 ][ ... * buf1 ]|[ buf2 ][ ... */ SAME_PAGE_UNALIGNED = 0, /** * @SAME_PAGE_ALIGNED: When the end of the previous buffer * is not page aligned, the end of this buffer is on the * same page as the end of the previous buffer and is page * aligned. When the previous buffer is page aligned, the * end of this buffer is aligned to the next page boundary. * Examples: * buf1 ][ buf2 ]| ... * buf1 ]|[ buf2 ]| ... */ SAME_PAGE_ALIGNED, /** * @NEXT_PAGE_UNALIGNED: The end of this buffer is on * the page next to the end of the previous buffer and * is not page aligned. Examples: * buf1 ][ buf2 | buf2 ][ ... * buf1 ]|[ buf2 | buf2 ][ ... */ NEXT_PAGE_UNALIGNED, /** * @NEXT_PAGE_ALIGNED: The end of this buffer is on * the page next to the end of the previous buffer and * is page aligned. Examples: * buf1 ][ buf2 | buf2 ]| ... * buf1 ]|[ buf2 | buf2 ]| ... */ NEXT_PAGE_ALIGNED, /** * @NEXT_NEXT_UNALIGNED: The end of this buffer is on * the page that follows the page after the end of the * previous buffer and is not page aligned. Examples: * buf1 ][ buf2 | buf2 | buf2 ][ ... * buf1 ]|[ buf2 | buf2 | buf2 ][ ... */ NEXT_NEXT_UNALIGNED, LOOP_END, }; static void pr_err_size_seq(size_t *sizes, int *seq) { int i; pr_err("alloc sizes: "); for (i = 0; i < BUFFER_NUM; i++) pr_cont("[%zu]", sizes[i]); pr_cont("\n"); pr_err("free seq: "); for (i = 0; i < BUFFER_NUM; i++) pr_cont("[%d]", seq[i]); pr_cont("\n"); } static bool check_buffer_pages_allocated(struct binder_alloc *alloc, struct binder_buffer *buffer, size_t size) { void __user *page_addr; void __user *end; int page_index; end = (void __user *)PAGE_ALIGN((uintptr_t)buffer->user_data + size); page_addr = buffer->user_data; for (; page_addr < end; page_addr += PAGE_SIZE) { page_index = (page_addr - alloc->buffer) / PAGE_SIZE; if (!alloc->pages[page_index].page_ptr || !list_empty(&alloc->pages[page_index].lru)) { pr_err("expect alloc but is %s at page index %d\n", alloc->pages[page_index].page_ptr ? "lru" : "free", page_index); return false; } } return true; } static void binder_selftest_alloc_buf(struct binder_alloc *alloc, struct binder_buffer *buffers[], size_t *sizes, int *seq) { int i; for (i = 0; i < BUFFER_NUM; i++) { buffers[i] = binder_alloc_new_buf(alloc, sizes[i], 0, 0, 0, 0); if (IS_ERR(buffers[i]) || !check_buffer_pages_allocated(alloc, buffers[i], sizes[i])) { pr_err_size_seq(sizes, seq); binder_selftest_failures++; } } } static void binder_selftest_free_buf(struct binder_alloc *alloc, struct binder_buffer *buffers[], size_t *sizes, int *seq, size_t end) { int i; for (i = 0; i < BUFFER_NUM; i++) binder_alloc_free_buf(alloc, buffers[seq[i]]); for (i = 0; i < end / PAGE_SIZE; i++) { /** * Error message on a free page can be false positive * if binder shrinker ran during binder_alloc_free_buf * calls above. */ if (list_empty(&alloc->pages[i].lru)) { pr_err_size_seq(sizes, seq); pr_err("expect lru but is %s at page index %d\n", alloc->pages[i].page_ptr ? "alloc" : "free", i); binder_selftest_failures++; } } } static void binder_selftest_free_page(struct binder_alloc *alloc) { int i; unsigned long count; while ((count = list_lru_count(&binder_alloc_lru))) { list_lru_walk(&binder_alloc_lru, binder_alloc_free_page, NULL, count); } for (i = 0; i < (alloc->buffer_size / PAGE_SIZE); i++) { if (alloc->pages[i].page_ptr) { pr_err("expect free but is %s at page index %d\n", list_empty(&alloc->pages[i].lru) ? "alloc" : "lru", i); binder_selftest_failures++; } } } static void binder_selftest_alloc_free(struct binder_alloc *alloc, size_t *sizes, int *seq, size_t end) { struct binder_buffer *buffers[BUFFER_NUM]; binder_selftest_alloc_buf(alloc, buffers, sizes, seq); binder_selftest_free_buf(alloc, buffers, sizes, seq, end); /* Allocate from lru. */ binder_selftest_alloc_buf(alloc, buffers, sizes, seq); if (list_lru_count(&binder_alloc_lru)) pr_err("lru list should be empty but is not\n"); binder_selftest_free_buf(alloc, buffers, sizes, seq, end); binder_selftest_free_page(alloc); } static bool is_dup(int *seq, int index, int val) { int i; for (i = 0; i < index; i++) { if (seq[i] == val) return true; } return false; } /* Generate BUFFER_NUM factorial free orders. */ static void binder_selftest_free_seq(struct binder_alloc *alloc, size_t *sizes, int *seq, int index, size_t end) { int i; if (index == BUFFER_NUM) { binder_selftest_alloc_free(alloc, sizes, seq, end); return; } for (i = 0; i < BUFFER_NUM; i++) { if (is_dup(seq, index, i)) continue; seq[index] = i; binder_selftest_free_seq(alloc, sizes, seq, index + 1, end); } } static void binder_selftest_alloc_size(struct binder_alloc *alloc, size_t *end_offset) { int i; int seq[BUFFER_NUM] = {0}; size_t front_sizes[BUFFER_NUM]; size_t back_sizes[BUFFER_NUM]; size_t last_offset, offset = 0; for (i = 0; i < BUFFER_NUM; i++) { last_offset = offset; offset = end_offset[i]; front_sizes[i] = offset - last_offset; back_sizes[BUFFER_NUM - i - 1] = front_sizes[i]; } /* * Buffers share the first or last few pages. * Only BUFFER_NUM - 1 buffer sizes are adjustable since * we need one giant buffer before getting to the last page. */ back_sizes[0] += alloc->buffer_size - end_offset[BUFFER_NUM - 1]; binder_selftest_free_seq(alloc, front_sizes, seq, 0, end_offset[BUFFER_NUM - 1]); binder_selftest_free_seq(alloc, back_sizes, seq, 0, alloc->buffer_size); } static void binder_selftest_alloc_offset(struct binder_alloc *alloc, size_t *end_offset, int index) { int align; size_t end, prev; if (index == BUFFER_NUM) { binder_selftest_alloc_size(alloc, end_offset); return; } prev = index == 0 ? 0 : end_offset[index - 1]; end = prev; BUILD_BUG_ON(BUFFER_MIN_SIZE * BUFFER_NUM >= PAGE_SIZE); for (align = SAME_PAGE_UNALIGNED; align < LOOP_END; align++) { if (align % 2) end = ALIGN(end, PAGE_SIZE); else end += BUFFER_MIN_SIZE; end_offset[index] = end; binder_selftest_alloc_offset(alloc, end_offset, index + 1); } } /** * binder_selftest_alloc() - Test alloc and free of buffer pages. * @alloc: Pointer to alloc struct. * * Allocate BUFFER_NUM buffers to cover all page alignment cases, * then free them in all orders possible. Check that pages are * correctly allocated, put onto lru when buffers are freed, and * are freed when binder_alloc_free_page is called. */ void binder_selftest_alloc(struct binder_alloc *alloc) { size_t end_offset[BUFFER_NUM]; if (!binder_selftest_run) return; mutex_lock(&binder_selftest_lock); if (!binder_selftest_run || !alloc->vma) goto done; pr_info("STARTED\n"); binder_selftest_alloc_offset(alloc, end_offset, 0); binder_selftest_run = false; if (binder_selftest_failures > 0) pr_info("%d tests FAILED\n", binder_selftest_failures); else pr_info("PASSED\n"); done: mutex_unlock(&binder_selftest_lock); }
linux-master
drivers/android/binder_alloc_selftest.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/compiler_types.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/fsnotify.h> #include <linux/gfp.h> #include <linux/idr.h> #include <linux/init.h> #include <linux/ipc_namespace.h> #include <linux/kdev_t.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/namei.h> #include <linux/magic.h> #include <linux/major.h> #include <linux/miscdevice.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/mount.h> #include <linux/fs_parser.h> #include <linux/sched.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/spinlock_types.h> #include <linux/stddef.h> #include <linux/string.h> #include <linux/types.h> #include <linux/uaccess.h> #include <linux/user_namespace.h> #include <linux/xarray.h> #include <uapi/asm-generic/errno-base.h> #include <uapi/linux/android/binder.h> #include <uapi/linux/android/binderfs.h> #include "binder_internal.h" #define FIRST_INODE 1 #define SECOND_INODE 2 #define INODE_OFFSET 3 #define BINDERFS_MAX_MINOR (1U << MINORBITS) /* Ensure that the initial ipc namespace always has devices available. */ #define BINDERFS_MAX_MINOR_CAPPED (BINDERFS_MAX_MINOR - 4) static dev_t binderfs_dev; static DEFINE_MUTEX(binderfs_minors_mutex); static DEFINE_IDA(binderfs_minors); enum binderfs_param { Opt_max, Opt_stats_mode, }; enum binderfs_stats_mode { binderfs_stats_mode_unset, binderfs_stats_mode_global, }; struct binder_features { bool oneway_spam_detection; bool extended_error; }; static const struct constant_table binderfs_param_stats[] = { { "global", binderfs_stats_mode_global }, {} }; static const struct fs_parameter_spec binderfs_fs_parameters[] = { fsparam_u32("max", Opt_max), fsparam_enum("stats", Opt_stats_mode, binderfs_param_stats), {} }; static struct binder_features binder_features = { .oneway_spam_detection = true, .extended_error = true, }; static inline struct binderfs_info *BINDERFS_SB(const struct super_block *sb) { return sb->s_fs_info; } bool is_binderfs_device(const struct inode *inode) { if (inode->i_sb->s_magic == BINDERFS_SUPER_MAGIC) return true; return false; } /** * binderfs_binder_device_create - allocate inode from super block of a * binderfs mount * @ref_inode: inode from wich the super block will be taken * @userp: buffer to copy information about new device for userspace to * @req: struct binderfs_device as copied from userspace * * This function allocates a new binder_device and reserves a new minor * number for it. * Minor numbers are limited and tracked globally in binderfs_minors. The * function will stash a struct binder_device for the specific binder * device in i_private of the inode. * It will go on to allocate a new inode from the super block of the * filesystem mount, stash a struct binder_device in its i_private field * and attach a dentry to that inode. * * Return: 0 on success, negative errno on failure */ static int binderfs_binder_device_create(struct inode *ref_inode, struct binderfs_device __user *userp, struct binderfs_device *req) { int minor, ret; struct dentry *dentry, *root; struct binder_device *device; char *name = NULL; size_t name_len; struct inode *inode = NULL; struct super_block *sb = ref_inode->i_sb; struct binderfs_info *info = sb->s_fs_info; #if defined(CONFIG_IPC_NS) bool use_reserve = (info->ipc_ns == &init_ipc_ns); #else bool use_reserve = true; #endif /* Reserve new minor number for the new device. */ mutex_lock(&binderfs_minors_mutex); if (++info->device_count <= info->mount_opts.max) minor = ida_alloc_max(&binderfs_minors, use_reserve ? BINDERFS_MAX_MINOR : BINDERFS_MAX_MINOR_CAPPED, GFP_KERNEL); else minor = -ENOSPC; if (minor < 0) { --info->device_count; mutex_unlock(&binderfs_minors_mutex); return minor; } mutex_unlock(&binderfs_minors_mutex); ret = -ENOMEM; device = kzalloc(sizeof(*device), GFP_KERNEL); if (!device) goto err; inode = new_inode(sb); if (!inode) goto err; inode->i_ino = minor + INODE_OFFSET; inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode); init_special_inode(inode, S_IFCHR | 0600, MKDEV(MAJOR(binderfs_dev), minor)); inode->i_fop = &binder_fops; inode->i_uid = info->root_uid; inode->i_gid = info->root_gid; req->name[BINDERFS_MAX_NAME] = '\0'; /* NUL-terminate */ name_len = strlen(req->name); /* Make sure to include terminating NUL byte */ name = kmemdup(req->name, name_len + 1, GFP_KERNEL); if (!name) goto err; refcount_set(&device->ref, 1); device->binderfs_inode = inode; device->context.binder_context_mgr_uid = INVALID_UID; device->context.name = name; device->miscdev.name = name; device->miscdev.minor = minor; mutex_init(&device->context.context_mgr_node_lock); req->major = MAJOR(binderfs_dev); req->minor = minor; if (userp && copy_to_user(userp, req, sizeof(*req))) { ret = -EFAULT; goto err; } root = sb->s_root; inode_lock(d_inode(root)); /* look it up */ dentry = lookup_one_len(name, root, name_len); if (IS_ERR(dentry)) { inode_unlock(d_inode(root)); ret = PTR_ERR(dentry); goto err; } if (d_really_is_positive(dentry)) { /* already exists */ dput(dentry); inode_unlock(d_inode(root)); ret = -EEXIST; goto err; } inode->i_private = device; d_instantiate(dentry, inode); fsnotify_create(root->d_inode, dentry); inode_unlock(d_inode(root)); return 0; err: kfree(name); kfree(device); mutex_lock(&binderfs_minors_mutex); --info->device_count; ida_free(&binderfs_minors, minor); mutex_unlock(&binderfs_minors_mutex); iput(inode); return ret; } /** * binder_ctl_ioctl - handle binder device node allocation requests * * The request handler for the binder-control device. All requests operate on * the binderfs mount the binder-control device resides in: * - BINDER_CTL_ADD * Allocate a new binder device. * * Return: %0 on success, negative errno on failure. */ static long binder_ctl_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int ret = -EINVAL; struct inode *inode = file_inode(file); struct binderfs_device __user *device = (struct binderfs_device __user *)arg; struct binderfs_device device_req; switch (cmd) { case BINDER_CTL_ADD: ret = copy_from_user(&device_req, device, sizeof(device_req)); if (ret) { ret = -EFAULT; break; } ret = binderfs_binder_device_create(inode, device, &device_req); break; default: break; } return ret; } static void binderfs_evict_inode(struct inode *inode) { struct binder_device *device = inode->i_private; struct binderfs_info *info = BINDERFS_SB(inode->i_sb); clear_inode(inode); if (!S_ISCHR(inode->i_mode) || !device) return; mutex_lock(&binderfs_minors_mutex); --info->device_count; ida_free(&binderfs_minors, device->miscdev.minor); mutex_unlock(&binderfs_minors_mutex); if (refcount_dec_and_test(&device->ref)) { kfree(device->context.name); kfree(device); } } static int binderfs_fs_context_parse_param(struct fs_context *fc, struct fs_parameter *param) { int opt; struct binderfs_mount_opts *ctx = fc->fs_private; struct fs_parse_result result; opt = fs_parse(fc, binderfs_fs_parameters, param, &result); if (opt < 0) return opt; switch (opt) { case Opt_max: if (result.uint_32 > BINDERFS_MAX_MINOR) return invalfc(fc, "Bad value for '%s'", param->key); ctx->max = result.uint_32; break; case Opt_stats_mode: if (!capable(CAP_SYS_ADMIN)) return -EPERM; ctx->stats_mode = result.uint_32; break; default: return invalfc(fc, "Unsupported parameter '%s'", param->key); } return 0; } static int binderfs_fs_context_reconfigure(struct fs_context *fc) { struct binderfs_mount_opts *ctx = fc->fs_private; struct binderfs_info *info = BINDERFS_SB(fc->root->d_sb); if (info->mount_opts.stats_mode != ctx->stats_mode) return invalfc(fc, "Binderfs stats mode cannot be changed during a remount"); info->mount_opts.stats_mode = ctx->stats_mode; info->mount_opts.max = ctx->max; return 0; } static int binderfs_show_options(struct seq_file *seq, struct dentry *root) { struct binderfs_info *info = BINDERFS_SB(root->d_sb); if (info->mount_opts.max <= BINDERFS_MAX_MINOR) seq_printf(seq, ",max=%d", info->mount_opts.max); switch (info->mount_opts.stats_mode) { case binderfs_stats_mode_unset: break; case binderfs_stats_mode_global: seq_printf(seq, ",stats=global"); break; } return 0; } static const struct super_operations binderfs_super_ops = { .evict_inode = binderfs_evict_inode, .show_options = binderfs_show_options, .statfs = simple_statfs, }; static inline bool is_binderfs_control_device(const struct dentry *dentry) { struct binderfs_info *info = dentry->d_sb->s_fs_info; return info->control_dentry == dentry; } static int binderfs_rename(struct mnt_idmap *idmap, struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry, unsigned int flags) { if (is_binderfs_control_device(old_dentry) || is_binderfs_control_device(new_dentry)) return -EPERM; return simple_rename(idmap, old_dir, old_dentry, new_dir, new_dentry, flags); } static int binderfs_unlink(struct inode *dir, struct dentry *dentry) { if (is_binderfs_control_device(dentry)) return -EPERM; return simple_unlink(dir, dentry); } static const struct file_operations binder_ctl_fops = { .owner = THIS_MODULE, .open = nonseekable_open, .unlocked_ioctl = binder_ctl_ioctl, .compat_ioctl = binder_ctl_ioctl, .llseek = noop_llseek, }; /** * binderfs_binder_ctl_create - create a new binder-control device * @sb: super block of the binderfs mount * * This function creates a new binder-control device node in the binderfs mount * referred to by @sb. * * Return: 0 on success, negative errno on failure */ static int binderfs_binder_ctl_create(struct super_block *sb) { int minor, ret; struct dentry *dentry; struct binder_device *device; struct inode *inode = NULL; struct dentry *root = sb->s_root; struct binderfs_info *info = sb->s_fs_info; #if defined(CONFIG_IPC_NS) bool use_reserve = (info->ipc_ns == &init_ipc_ns); #else bool use_reserve = true; #endif device = kzalloc(sizeof(*device), GFP_KERNEL); if (!device) return -ENOMEM; /* If we have already created a binder-control node, return. */ if (info->control_dentry) { ret = 0; goto out; } ret = -ENOMEM; inode = new_inode(sb); if (!inode) goto out; /* Reserve a new minor number for the new device. */ mutex_lock(&binderfs_minors_mutex); minor = ida_alloc_max(&binderfs_minors, use_reserve ? BINDERFS_MAX_MINOR : BINDERFS_MAX_MINOR_CAPPED, GFP_KERNEL); mutex_unlock(&binderfs_minors_mutex); if (minor < 0) { ret = minor; goto out; } inode->i_ino = SECOND_INODE; inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode); init_special_inode(inode, S_IFCHR | 0600, MKDEV(MAJOR(binderfs_dev), minor)); inode->i_fop = &binder_ctl_fops; inode->i_uid = info->root_uid; inode->i_gid = info->root_gid; refcount_set(&device->ref, 1); device->binderfs_inode = inode; device->miscdev.minor = minor; dentry = d_alloc_name(root, "binder-control"); if (!dentry) goto out; inode->i_private = device; info->control_dentry = dentry; d_add(dentry, inode); return 0; out: kfree(device); iput(inode); return ret; } static const struct inode_operations binderfs_dir_inode_operations = { .lookup = simple_lookup, .rename = binderfs_rename, .unlink = binderfs_unlink, }; static struct inode *binderfs_make_inode(struct super_block *sb, int mode) { struct inode *ret; ret = new_inode(sb); if (ret) { ret->i_ino = iunique(sb, BINDERFS_MAX_MINOR + INODE_OFFSET); ret->i_mode = mode; ret->i_atime = ret->i_mtime = inode_set_ctime_current(ret); } return ret; } static struct dentry *binderfs_create_dentry(struct dentry *parent, const char *name) { struct dentry *dentry; dentry = lookup_one_len(name, parent, strlen(name)); if (IS_ERR(dentry)) return dentry; /* Return error if the file/dir already exists. */ if (d_really_is_positive(dentry)) { dput(dentry); return ERR_PTR(-EEXIST); } return dentry; } void binderfs_remove_file(struct dentry *dentry) { struct inode *parent_inode; parent_inode = d_inode(dentry->d_parent); inode_lock(parent_inode); if (simple_positive(dentry)) { dget(dentry); simple_unlink(parent_inode, dentry); d_delete(dentry); dput(dentry); } inode_unlock(parent_inode); } struct dentry *binderfs_create_file(struct dentry *parent, const char *name, const struct file_operations *fops, void *data) { struct dentry *dentry; struct inode *new_inode, *parent_inode; struct super_block *sb; parent_inode = d_inode(parent); inode_lock(parent_inode); dentry = binderfs_create_dentry(parent, name); if (IS_ERR(dentry)) goto out; sb = parent_inode->i_sb; new_inode = binderfs_make_inode(sb, S_IFREG | 0444); if (!new_inode) { dput(dentry); dentry = ERR_PTR(-ENOMEM); goto out; } new_inode->i_fop = fops; new_inode->i_private = data; d_instantiate(dentry, new_inode); fsnotify_create(parent_inode, dentry); out: inode_unlock(parent_inode); return dentry; } static struct dentry *binderfs_create_dir(struct dentry *parent, const char *name) { struct dentry *dentry; struct inode *new_inode, *parent_inode; struct super_block *sb; parent_inode = d_inode(parent); inode_lock(parent_inode); dentry = binderfs_create_dentry(parent, name); if (IS_ERR(dentry)) goto out; sb = parent_inode->i_sb; new_inode = binderfs_make_inode(sb, S_IFDIR | 0755); if (!new_inode) { dput(dentry); dentry = ERR_PTR(-ENOMEM); goto out; } new_inode->i_fop = &simple_dir_operations; new_inode->i_op = &simple_dir_inode_operations; set_nlink(new_inode, 2); d_instantiate(dentry, new_inode); inc_nlink(parent_inode); fsnotify_mkdir(parent_inode, dentry); out: inode_unlock(parent_inode); return dentry; } static int binder_features_show(struct seq_file *m, void *unused) { bool *feature = m->private; seq_printf(m, "%d\n", *feature); return 0; } DEFINE_SHOW_ATTRIBUTE(binder_features); static int init_binder_features(struct super_block *sb) { struct dentry *dentry, *dir; dir = binderfs_create_dir(sb->s_root, "features"); if (IS_ERR(dir)) return PTR_ERR(dir); dentry = binderfs_create_file(dir, "oneway_spam_detection", &binder_features_fops, &binder_features.oneway_spam_detection); if (IS_ERR(dentry)) return PTR_ERR(dentry); dentry = binderfs_create_file(dir, "extended_error", &binder_features_fops, &binder_features.extended_error); if (IS_ERR(dentry)) return PTR_ERR(dentry); return 0; } static int init_binder_logs(struct super_block *sb) { struct dentry *binder_logs_root_dir, *dentry, *proc_log_dir; const struct binder_debugfs_entry *db_entry; struct binderfs_info *info; int ret = 0; binder_logs_root_dir = binderfs_create_dir(sb->s_root, "binder_logs"); if (IS_ERR(binder_logs_root_dir)) { ret = PTR_ERR(binder_logs_root_dir); goto out; } binder_for_each_debugfs_entry(db_entry) { dentry = binderfs_create_file(binder_logs_root_dir, db_entry->name, db_entry->fops, db_entry->data); if (IS_ERR(dentry)) { ret = PTR_ERR(dentry); goto out; } } proc_log_dir = binderfs_create_dir(binder_logs_root_dir, "proc"); if (IS_ERR(proc_log_dir)) { ret = PTR_ERR(proc_log_dir); goto out; } info = sb->s_fs_info; info->proc_log_dir = proc_log_dir; out: return ret; } static int binderfs_fill_super(struct super_block *sb, struct fs_context *fc) { int ret; struct binderfs_info *info; struct binderfs_mount_opts *ctx = fc->fs_private; struct inode *inode = NULL; struct binderfs_device device_info = {}; const char *name; size_t len; sb->s_blocksize = PAGE_SIZE; sb->s_blocksize_bits = PAGE_SHIFT; /* * The binderfs filesystem can be mounted by userns root in a * non-initial userns. By default such mounts have the SB_I_NODEV flag * set in s_iflags to prevent security issues where userns root can * just create random device nodes via mknod() since it owns the * filesystem mount. But binderfs does not allow to create any files * including devices nodes. The only way to create binder devices nodes * is through the binder-control device which userns root is explicitly * allowed to do. So removing the SB_I_NODEV flag from s_iflags is both * necessary and safe. */ sb->s_iflags &= ~SB_I_NODEV; sb->s_iflags |= SB_I_NOEXEC; sb->s_magic = BINDERFS_SUPER_MAGIC; sb->s_op = &binderfs_super_ops; sb->s_time_gran = 1; sb->s_fs_info = kzalloc(sizeof(struct binderfs_info), GFP_KERNEL); if (!sb->s_fs_info) return -ENOMEM; info = sb->s_fs_info; info->ipc_ns = get_ipc_ns(current->nsproxy->ipc_ns); info->root_gid = make_kgid(sb->s_user_ns, 0); if (!gid_valid(info->root_gid)) info->root_gid = GLOBAL_ROOT_GID; info->root_uid = make_kuid(sb->s_user_ns, 0); if (!uid_valid(info->root_uid)) info->root_uid = GLOBAL_ROOT_UID; info->mount_opts.max = ctx->max; info->mount_opts.stats_mode = ctx->stats_mode; inode = new_inode(sb); if (!inode) return -ENOMEM; inode->i_ino = FIRST_INODE; inode->i_fop = &simple_dir_operations; inode->i_mode = S_IFDIR | 0755; inode->i_mtime = inode->i_atime = inode_set_ctime_current(inode); inode->i_op = &binderfs_dir_inode_operations; set_nlink(inode, 2); sb->s_root = d_make_root(inode); if (!sb->s_root) return -ENOMEM; ret = binderfs_binder_ctl_create(sb); if (ret) return ret; name = binder_devices_param; for (len = strcspn(name, ","); len > 0; len = strcspn(name, ",")) { strscpy(device_info.name, name, len + 1); ret = binderfs_binder_device_create(inode, NULL, &device_info); if (ret) return ret; name += len; if (*name == ',') name++; } ret = init_binder_features(sb); if (ret) return ret; if (info->mount_opts.stats_mode == binderfs_stats_mode_global) return init_binder_logs(sb); return 0; } static int binderfs_fs_context_get_tree(struct fs_context *fc) { return get_tree_nodev(fc, binderfs_fill_super); } static void binderfs_fs_context_free(struct fs_context *fc) { struct binderfs_mount_opts *ctx = fc->fs_private; kfree(ctx); } static const struct fs_context_operations binderfs_fs_context_ops = { .free = binderfs_fs_context_free, .get_tree = binderfs_fs_context_get_tree, .parse_param = binderfs_fs_context_parse_param, .reconfigure = binderfs_fs_context_reconfigure, }; static int binderfs_init_fs_context(struct fs_context *fc) { struct binderfs_mount_opts *ctx; ctx = kzalloc(sizeof(struct binderfs_mount_opts), GFP_KERNEL); if (!ctx) return -ENOMEM; ctx->max = BINDERFS_MAX_MINOR; ctx->stats_mode = binderfs_stats_mode_unset; fc->fs_private = ctx; fc->ops = &binderfs_fs_context_ops; return 0; } static void binderfs_kill_super(struct super_block *sb) { struct binderfs_info *info = sb->s_fs_info; /* * During inode eviction struct binderfs_info is needed. * So first wipe the super_block then free struct binderfs_info. */ kill_litter_super(sb); if (info && info->ipc_ns) put_ipc_ns(info->ipc_ns); kfree(info); } static struct file_system_type binder_fs_type = { .name = "binder", .init_fs_context = binderfs_init_fs_context, .parameters = binderfs_fs_parameters, .kill_sb = binderfs_kill_super, .fs_flags = FS_USERNS_MOUNT, }; int __init init_binderfs(void) { int ret; const char *name; size_t len; /* Verify that the default binderfs device names are valid. */ name = binder_devices_param; for (len = strcspn(name, ","); len > 0; len = strcspn(name, ",")) { if (len > BINDERFS_MAX_NAME) return -E2BIG; name += len; if (*name == ',') name++; } /* Allocate new major number for binderfs. */ ret = alloc_chrdev_region(&binderfs_dev, 0, BINDERFS_MAX_MINOR, "binder"); if (ret) return ret; ret = register_filesystem(&binder_fs_type); if (ret) { unregister_chrdev_region(binderfs_dev, BINDERFS_MAX_MINOR); return ret; } return ret; }
linux-master
drivers/android/binderfs.c
// SPDX-License-Identifier: GPL-2.0 /* * Macintosh Nubus Interface Code * * Originally by Alan Cox * * Mostly rewritten by David Huggins-Daines, C. Scott Ananian, * and others. */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/nubus.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/module.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <asm/setup.h> #include <asm/page.h> #include <asm/hwtest.h> /* Constants */ /* This is, of course, the size in bytelanes, rather than the size in actual bytes */ #define FORMAT_BLOCK_SIZE 20 #define ROM_DIR_OFFSET 0x24 #define NUBUS_TEST_PATTERN 0x5A932BC7 /* Globals */ /* The "nubus.populate_procfs" parameter makes slot resources available in * procfs. It's deprecated and disabled by default because procfs is no longer * thought to be suitable for that and some board ROMs make it too expensive. */ bool nubus_populate_procfs; module_param_named(populate_procfs, nubus_populate_procfs, bool, 0); LIST_HEAD(nubus_func_rsrcs); /* Meaning of "bytelanes": The card ROM may appear on any or all bytes of each long word in NuBus memory. The low 4 bits of the "map" value found in the format block (at the top of the slot address space, as well as at the top of the MacOS ROM) tells us which bytelanes, i.e. which byte offsets within each longword, are valid. Thus: A map of 0x0f, as found in the MacOS ROM, means that all bytelanes are valid. A map of 0xf0 means that no bytelanes are valid (We pray that we will never encounter this, but stranger things have happened) A map of 0xe1 means that only the MSB of each long word is actually part of the card ROM. (We hope to never encounter NuBus on a little-endian machine. Again, stranger things have happened) A map of 0x78 means that only the LSB of each long word is valid. Etcetera, etcetera. Hopefully this clears up some confusion over what the following code actually does. */ static inline int not_useful(void *p, int map) { unsigned long pv = (unsigned long)p; pv &= 3; if (map & (1 << pv)) return 0; return 1; } static unsigned long nubus_get_rom(unsigned char **ptr, int len, int map) { /* This will hold the result */ unsigned long v = 0; unsigned char *p = *ptr; while (len) { v <<= 8; while (not_useful(p, map)) p++; v |= *p++; len--; } *ptr = p; return v; } static void nubus_rewind(unsigned char **ptr, int len, int map) { unsigned char *p = *ptr; while (len) { do { p--; } while (not_useful(p, map)); len--; } *ptr = p; } static void nubus_advance(unsigned char **ptr, int len, int map) { unsigned char *p = *ptr; while (len) { while (not_useful(p, map)) p++; p++; len--; } *ptr = p; } static void nubus_move(unsigned char **ptr, int len, int map) { unsigned long slot_space = (unsigned long)*ptr & 0xFF000000; if (len > 0) nubus_advance(ptr, len, map); else if (len < 0) nubus_rewind(ptr, -len, map); if (((unsigned long)*ptr & 0xFF000000) != slot_space) pr_err("%s: moved out of slot address space!\n", __func__); } /* Now, functions to read the sResource tree */ /* Each sResource entry consists of a 1-byte ID and a 3-byte data field. If that data field contains an offset, then obviously we have to expand it from a 24-bit signed number to a 32-bit signed number. */ static inline long nubus_expand32(long foo) { if (foo & 0x00800000) /* 24bit negative */ foo |= 0xFF000000; return foo; } static inline void *nubus_rom_addr(int slot) { /* * Returns the first byte after the card. We then walk * backwards to get the lane register and the config */ return (void *)(0xF1000000 + (slot << 24)); } unsigned char *nubus_dirptr(const struct nubus_dirent *nd) { unsigned char *p = nd->base; /* Essentially, just step over the bytelanes using whatever offset we might have found */ nubus_move(&p, nubus_expand32(nd->data), nd->mask); /* And return the value */ return p; } /* These two are for pulling resource data blocks (i.e. stuff that's pointed to with offsets) out of the card ROM. */ void nubus_get_rsrc_mem(void *dest, const struct nubus_dirent *dirent, unsigned int len) { unsigned char *t = dest; unsigned char *p = nubus_dirptr(dirent); while (len) { *t++ = nubus_get_rom(&p, 1, dirent->mask); len--; } } EXPORT_SYMBOL(nubus_get_rsrc_mem); unsigned int nubus_get_rsrc_str(char *dest, const struct nubus_dirent *dirent, unsigned int len) { char *t = dest; unsigned char *p = nubus_dirptr(dirent); while (len > 1) { unsigned char c = nubus_get_rom(&p, 1, dirent->mask); if (!c) break; *t++ = c; len--; } if (len > 0) *t = '\0'; return t - dest; } EXPORT_SYMBOL(nubus_get_rsrc_str); void nubus_seq_write_rsrc_mem(struct seq_file *m, const struct nubus_dirent *dirent, unsigned int len) { unsigned long buf[32]; unsigned int buf_size = sizeof(buf); unsigned char *p = nubus_dirptr(dirent); /* If possible, write out full buffers */ while (len >= buf_size) { unsigned int i; for (i = 0; i < ARRAY_SIZE(buf); i++) buf[i] = nubus_get_rom(&p, sizeof(buf[0]), dirent->mask); seq_write(m, buf, buf_size); len -= buf_size; } /* If not, write out individual bytes */ while (len--) seq_putc(m, nubus_get_rom(&p, 1, dirent->mask)); } int nubus_get_root_dir(const struct nubus_board *board, struct nubus_dir *dir) { dir->ptr = dir->base = board->directory; dir->done = 0; dir->mask = board->lanes; return 0; } EXPORT_SYMBOL(nubus_get_root_dir); /* This is a slyly renamed version of the above */ int nubus_get_func_dir(const struct nubus_rsrc *fres, struct nubus_dir *dir) { dir->ptr = dir->base = fres->directory; dir->done = 0; dir->mask = fres->board->lanes; return 0; } EXPORT_SYMBOL(nubus_get_func_dir); int nubus_get_board_dir(const struct nubus_board *board, struct nubus_dir *dir) { struct nubus_dirent ent; dir->ptr = dir->base = board->directory; dir->done = 0; dir->mask = board->lanes; /* Now dereference it (the first directory is always the board directory) */ if (nubus_readdir(dir, &ent) == -1) return -1; if (nubus_get_subdir(&ent, dir) == -1) return -1; return 0; } EXPORT_SYMBOL(nubus_get_board_dir); int nubus_get_subdir(const struct nubus_dirent *ent, struct nubus_dir *dir) { dir->ptr = dir->base = nubus_dirptr(ent); dir->done = 0; dir->mask = ent->mask; return 0; } EXPORT_SYMBOL(nubus_get_subdir); int nubus_readdir(struct nubus_dir *nd, struct nubus_dirent *ent) { u32 resid; if (nd->done) return -1; /* Do this first, otherwise nubus_rewind & co are off by 4 */ ent->base = nd->ptr; /* This moves nd->ptr forward */ resid = nubus_get_rom(&nd->ptr, 4, nd->mask); /* EOL marker, as per the Apple docs */ if ((resid & 0xff000000) == 0xff000000) { /* Mark it as done */ nd->done = 1; return -1; } /* First byte is the resource ID */ ent->type = resid >> 24; /* Low 3 bytes might contain data (or might not) */ ent->data = resid & 0xffffff; ent->mask = nd->mask; return 0; } EXPORT_SYMBOL(nubus_readdir); int nubus_rewinddir(struct nubus_dir *dir) { dir->ptr = dir->base; dir->done = 0; return 0; } EXPORT_SYMBOL(nubus_rewinddir); /* Driver interface functions, more or less like in pci.c */ struct nubus_rsrc *nubus_first_rsrc_or_null(void) { return list_first_entry_or_null(&nubus_func_rsrcs, struct nubus_rsrc, list); } EXPORT_SYMBOL(nubus_first_rsrc_or_null); struct nubus_rsrc *nubus_next_rsrc_or_null(struct nubus_rsrc *from) { if (list_is_last(&from->list, &nubus_func_rsrcs)) return NULL; return list_next_entry(from, list); } EXPORT_SYMBOL(nubus_next_rsrc_or_null); int nubus_find_rsrc(struct nubus_dir *dir, unsigned char rsrc_type, struct nubus_dirent *ent) { while (nubus_readdir(dir, ent) != -1) { if (ent->type == rsrc_type) return 0; } return -1; } EXPORT_SYMBOL(nubus_find_rsrc); /* Initialization functions - decide which slots contain stuff worth looking at, and print out lots and lots of information from the resource blocks. */ static int __init nubus_get_block_rsrc_dir(struct nubus_board *board, struct proc_dir_entry *procdir, const struct nubus_dirent *parent) { struct nubus_dir dir; struct nubus_dirent ent; nubus_get_subdir(parent, &dir); dir.procdir = nubus_proc_add_rsrc_dir(procdir, parent, board); while (nubus_readdir(&dir, &ent) != -1) { u32 size; nubus_get_rsrc_mem(&size, &ent, 4); pr_debug(" block (0x%x), size %d\n", ent.type, size); nubus_proc_add_rsrc_mem(dir.procdir, &ent, size); } return 0; } static int __init nubus_get_display_vidmode(struct nubus_board *board, struct proc_dir_entry *procdir, const struct nubus_dirent *parent) { struct nubus_dir dir; struct nubus_dirent ent; nubus_get_subdir(parent, &dir); dir.procdir = nubus_proc_add_rsrc_dir(procdir, parent, board); while (nubus_readdir(&dir, &ent) != -1) { switch (ent.type) { case 1: /* mVidParams */ case 2: /* mTable */ { u32 size; nubus_get_rsrc_mem(&size, &ent, 4); pr_debug(" block (0x%x), size %d\n", ent.type, size); nubus_proc_add_rsrc_mem(dir.procdir, &ent, size); break; } default: pr_debug(" unknown resource 0x%02x, data 0x%06x\n", ent.type, ent.data); nubus_proc_add_rsrc_mem(dir.procdir, &ent, 0); } } return 0; } static int __init nubus_get_display_resource(struct nubus_rsrc *fres, struct proc_dir_entry *procdir, const struct nubus_dirent *ent) { switch (ent->type) { case NUBUS_RESID_GAMMADIR: pr_debug(" gamma directory offset: 0x%06x\n", ent->data); nubus_get_block_rsrc_dir(fres->board, procdir, ent); break; case 0x0080 ... 0x0085: pr_debug(" mode 0x%02x info offset: 0x%06x\n", ent->type, ent->data); nubus_get_display_vidmode(fres->board, procdir, ent); break; default: pr_debug(" unknown resource 0x%02x, data 0x%06x\n", ent->type, ent->data); nubus_proc_add_rsrc_mem(procdir, ent, 0); } return 0; } static int __init nubus_get_network_resource(struct nubus_rsrc *fres, struct proc_dir_entry *procdir, const struct nubus_dirent *ent) { switch (ent->type) { case NUBUS_RESID_MAC_ADDRESS: { char addr[6]; nubus_get_rsrc_mem(addr, ent, 6); pr_debug(" MAC address: %pM\n", addr); nubus_proc_add_rsrc_mem(procdir, ent, 6); break; } default: pr_debug(" unknown resource 0x%02x, data 0x%06x\n", ent->type, ent->data); nubus_proc_add_rsrc_mem(procdir, ent, 0); } return 0; } static int __init nubus_get_cpu_resource(struct nubus_rsrc *fres, struct proc_dir_entry *procdir, const struct nubus_dirent *ent) { switch (ent->type) { case NUBUS_RESID_MEMINFO: { unsigned long meminfo[2]; nubus_get_rsrc_mem(&meminfo, ent, 8); pr_debug(" memory: [ 0x%08lx 0x%08lx ]\n", meminfo[0], meminfo[1]); nubus_proc_add_rsrc_mem(procdir, ent, 8); break; } case NUBUS_RESID_ROMINFO: { unsigned long rominfo[2]; nubus_get_rsrc_mem(&rominfo, ent, 8); pr_debug(" ROM: [ 0x%08lx 0x%08lx ]\n", rominfo[0], rominfo[1]); nubus_proc_add_rsrc_mem(procdir, ent, 8); break; } default: pr_debug(" unknown resource 0x%02x, data 0x%06x\n", ent->type, ent->data); nubus_proc_add_rsrc_mem(procdir, ent, 0); } return 0; } static int __init nubus_get_private_resource(struct nubus_rsrc *fres, struct proc_dir_entry *procdir, const struct nubus_dirent *ent) { switch (fres->category) { case NUBUS_CAT_DISPLAY: nubus_get_display_resource(fres, procdir, ent); break; case NUBUS_CAT_NETWORK: nubus_get_network_resource(fres, procdir, ent); break; case NUBUS_CAT_CPU: nubus_get_cpu_resource(fres, procdir, ent); break; default: pr_debug(" unknown resource 0x%02x, data 0x%06x\n", ent->type, ent->data); nubus_proc_add_rsrc_mem(procdir, ent, 0); } return 0; } static struct nubus_rsrc * __init nubus_get_functional_resource(struct nubus_board *board, int slot, const struct nubus_dirent *parent) { struct nubus_dir dir; struct nubus_dirent ent; struct nubus_rsrc *fres; pr_debug(" Functional resource 0x%02x:\n", parent->type); nubus_get_subdir(parent, &dir); dir.procdir = nubus_proc_add_rsrc_dir(board->procdir, parent, board); /* Actually we should probably panic if this fails */ fres = kzalloc(sizeof(*fres), GFP_ATOMIC); if (!fres) return NULL; fres->resid = parent->type; fres->directory = dir.base; fres->board = board; while (nubus_readdir(&dir, &ent) != -1) { switch (ent.type) { case NUBUS_RESID_TYPE: { unsigned short nbtdata[4]; nubus_get_rsrc_mem(nbtdata, &ent, 8); fres->category = nbtdata[0]; fres->type = nbtdata[1]; fres->dr_sw = nbtdata[2]; fres->dr_hw = nbtdata[3]; pr_debug(" type: [cat 0x%x type 0x%x sw 0x%x hw 0x%x]\n", nbtdata[0], nbtdata[1], nbtdata[2], nbtdata[3]); nubus_proc_add_rsrc_mem(dir.procdir, &ent, 8); break; } case NUBUS_RESID_NAME: { char name[64]; unsigned int len; len = nubus_get_rsrc_str(name, &ent, sizeof(name)); pr_debug(" name: %s\n", name); nubus_proc_add_rsrc_mem(dir.procdir, &ent, len + 1); break; } case NUBUS_RESID_DRVRDIR: { /* MacOS driver. If we were NetBSD we might use this :-) */ pr_debug(" driver directory offset: 0x%06x\n", ent.data); nubus_get_block_rsrc_dir(board, dir.procdir, &ent); break; } case NUBUS_RESID_MINOR_BASEOS: { /* We will need this in order to support multiple framebuffers. It might be handy for Ethernet as well */ u32 base_offset; nubus_get_rsrc_mem(&base_offset, &ent, 4); pr_debug(" memory offset: 0x%08x\n", base_offset); nubus_proc_add_rsrc_mem(dir.procdir, &ent, 4); break; } case NUBUS_RESID_MINOR_LENGTH: { /* Ditto */ u32 length; nubus_get_rsrc_mem(&length, &ent, 4); pr_debug(" memory length: 0x%08x\n", length); nubus_proc_add_rsrc_mem(dir.procdir, &ent, 4); break; } case NUBUS_RESID_FLAGS: pr_debug(" flags: 0x%06x\n", ent.data); nubus_proc_add_rsrc(dir.procdir, &ent); break; case NUBUS_RESID_HWDEVID: pr_debug(" hwdevid: 0x%06x\n", ent.data); nubus_proc_add_rsrc(dir.procdir, &ent); break; default: if (nubus_populate_procfs) nubus_get_private_resource(fres, dir.procdir, &ent); } } return fres; } /* This is *really* cool. */ static int __init nubus_get_icon(struct nubus_board *board, struct proc_dir_entry *procdir, const struct nubus_dirent *ent) { /* Should be 32x32 if my memory serves me correctly */ u32 icon[32]; int i; nubus_get_rsrc_mem(&icon, ent, 128); pr_debug(" icon:\n"); for (i = 0; i < 8; i++) pr_debug(" %08x %08x %08x %08x\n", icon[i * 4 + 0], icon[i * 4 + 1], icon[i * 4 + 2], icon[i * 4 + 3]); nubus_proc_add_rsrc_mem(procdir, ent, 128); return 0; } static int __init nubus_get_vendorinfo(struct nubus_board *board, struct proc_dir_entry *procdir, const struct nubus_dirent *parent) { struct nubus_dir dir; struct nubus_dirent ent; static char *vendor_fields[6] = { "ID", "serial", "revision", "part", "date", "unknown field" }; pr_debug(" vendor info:\n"); nubus_get_subdir(parent, &dir); dir.procdir = nubus_proc_add_rsrc_dir(procdir, parent, board); while (nubus_readdir(&dir, &ent) != -1) { char name[64]; unsigned int len; /* These are all strings, we think */ len = nubus_get_rsrc_str(name, &ent, sizeof(name)); if (ent.type < 1 || ent.type > 5) ent.type = 5; pr_debug(" %s: %s\n", vendor_fields[ent.type - 1], name); nubus_proc_add_rsrc_mem(dir.procdir, &ent, len + 1); } return 0; } static int __init nubus_get_board_resource(struct nubus_board *board, int slot, const struct nubus_dirent *parent) { struct nubus_dir dir; struct nubus_dirent ent; pr_debug(" Board resource 0x%02x:\n", parent->type); nubus_get_subdir(parent, &dir); dir.procdir = nubus_proc_add_rsrc_dir(board->procdir, parent, board); while (nubus_readdir(&dir, &ent) != -1) { switch (ent.type) { case NUBUS_RESID_TYPE: { unsigned short nbtdata[4]; /* This type is always the same, and is not useful except insofar as it tells us that we really are looking at a board resource. */ nubus_get_rsrc_mem(nbtdata, &ent, 8); pr_debug(" type: [cat 0x%x type 0x%x sw 0x%x hw 0x%x]\n", nbtdata[0], nbtdata[1], nbtdata[2], nbtdata[3]); if (nbtdata[0] != 1 || nbtdata[1] != 0 || nbtdata[2] != 0 || nbtdata[3] != 0) pr_err("Slot %X: sResource is not a board resource!\n", slot); nubus_proc_add_rsrc_mem(dir.procdir, &ent, 8); break; } case NUBUS_RESID_NAME: { unsigned int len; len = nubus_get_rsrc_str(board->name, &ent, sizeof(board->name)); pr_debug(" name: %s\n", board->name); nubus_proc_add_rsrc_mem(dir.procdir, &ent, len + 1); break; } case NUBUS_RESID_ICON: nubus_get_icon(board, dir.procdir, &ent); break; case NUBUS_RESID_BOARDID: pr_debug(" board id: 0x%x\n", ent.data); nubus_proc_add_rsrc(dir.procdir, &ent); break; case NUBUS_RESID_PRIMARYINIT: pr_debug(" primary init offset: 0x%06x\n", ent.data); nubus_proc_add_rsrc(dir.procdir, &ent); break; case NUBUS_RESID_VENDORINFO: nubus_get_vendorinfo(board, dir.procdir, &ent); break; case NUBUS_RESID_FLAGS: pr_debug(" flags: 0x%06x\n", ent.data); nubus_proc_add_rsrc(dir.procdir, &ent); break; case NUBUS_RESID_HWDEVID: pr_debug(" hwdevid: 0x%06x\n", ent.data); nubus_proc_add_rsrc(dir.procdir, &ent); break; case NUBUS_RESID_SECONDINIT: pr_debug(" secondary init offset: 0x%06x\n", ent.data); nubus_proc_add_rsrc(dir.procdir, &ent); break; /* WTF isn't this in the functional resources? */ case NUBUS_RESID_VIDNAMES: pr_debug(" vidnames directory offset: 0x%06x\n", ent.data); nubus_get_block_rsrc_dir(board, dir.procdir, &ent); break; /* Same goes for this */ case NUBUS_RESID_VIDMODES: pr_debug(" video mode parameter directory offset: 0x%06x\n", ent.data); nubus_proc_add_rsrc(dir.procdir, &ent); break; default: pr_debug(" unknown resource 0x%02x, data 0x%06x\n", ent.type, ent.data); nubus_proc_add_rsrc_mem(dir.procdir, &ent, 0); } } return 0; } static void __init nubus_add_board(int slot, int bytelanes) { struct nubus_board *board; unsigned char *rp; unsigned long dpat; struct nubus_dir dir; struct nubus_dirent ent; int prev_resid = -1; /* Move to the start of the format block */ rp = nubus_rom_addr(slot); nubus_rewind(&rp, FORMAT_BLOCK_SIZE, bytelanes); /* Actually we should probably panic if this fails */ if ((board = kzalloc(sizeof(*board), GFP_ATOMIC)) == NULL) return; board->fblock = rp; /* Dump the format block for debugging purposes */ pr_debug("Slot %X, format block at 0x%p:\n", slot, rp); pr_debug("%08lx\n", nubus_get_rom(&rp, 4, bytelanes)); pr_debug("%08lx\n", nubus_get_rom(&rp, 4, bytelanes)); pr_debug("%08lx\n", nubus_get_rom(&rp, 4, bytelanes)); pr_debug("%02lx\n", nubus_get_rom(&rp, 1, bytelanes)); pr_debug("%02lx\n", nubus_get_rom(&rp, 1, bytelanes)); pr_debug("%08lx\n", nubus_get_rom(&rp, 4, bytelanes)); pr_debug("%02lx\n", nubus_get_rom(&rp, 1, bytelanes)); pr_debug("%02lx\n", nubus_get_rom(&rp, 1, bytelanes)); rp = board->fblock; board->slot = slot; board->slot_addr = (unsigned long)nubus_slot_addr(slot); board->doffset = nubus_get_rom(&rp, 4, bytelanes); /* rom_length is *supposed* to be the total length of the * ROM. In practice it is the "amount of ROM used to compute * the CRC." So some jokers decide to set it to zero and * set the crc to zero so they don't have to do any math. * See the Performa 460 ROM, for example. Those Apple "engineers". */ board->rom_length = nubus_get_rom(&rp, 4, bytelanes); board->crc = nubus_get_rom(&rp, 4, bytelanes); board->rev = nubus_get_rom(&rp, 1, bytelanes); board->format = nubus_get_rom(&rp, 1, bytelanes); board->lanes = bytelanes; /* Directory offset should be small and negative... */ if (!(board->doffset & 0x00FF0000)) pr_warn("Slot %X: Dodgy doffset!\n", slot); dpat = nubus_get_rom(&rp, 4, bytelanes); if (dpat != NUBUS_TEST_PATTERN) pr_warn("Slot %X: Wrong test pattern %08lx!\n", slot, dpat); /* * I wonder how the CRC is meant to work - * any takers ? * CSA: According to MAC docs, not all cards pass the CRC anyway, * since the initial Macintosh ROM releases skipped the check. */ /* Set up the directory pointer */ board->directory = board->fblock; nubus_move(&board->directory, nubus_expand32(board->doffset), board->lanes); nubus_get_root_dir(board, &dir); /* We're ready to rock */ pr_debug("Slot %X resources:\n", slot); /* Each slot should have one board resource and any number of * functional resources. So we'll fill in some fields in the * struct nubus_board from the board resource, then walk down * the list of functional resources, spinning out a nubus_rsrc * for each of them. */ if (nubus_readdir(&dir, &ent) == -1) { /* We can't have this! */ pr_err("Slot %X: Board resource not found!\n", slot); kfree(board); return; } if (ent.type < 1 || ent.type > 127) pr_warn("Slot %X: Board resource ID is invalid!\n", slot); board->procdir = nubus_proc_add_board(board); nubus_get_board_resource(board, slot, &ent); while (nubus_readdir(&dir, &ent) != -1) { struct nubus_rsrc *fres; fres = nubus_get_functional_resource(board, slot, &ent); if (fres == NULL) continue; /* Resources should appear in ascending ID order. This sanity * check prevents duplicate resource IDs. */ if (fres->resid <= prev_resid) { kfree(fres); continue; } prev_resid = fres->resid; list_add_tail(&fres->list, &nubus_func_rsrcs); } if (nubus_device_register(board)) put_device(&board->dev); } static void __init nubus_probe_slot(int slot) { unsigned char dp; unsigned char *rp; int i; rp = nubus_rom_addr(slot); for (i = 4; i; i--) { rp--; if (!hwreg_present(rp)) continue; dp = *rp; /* The last byte of the format block consists of two nybbles which are "mirror images" of each other. These show us the valid bytelanes */ if ((((dp >> 4) ^ dp) & 0x0F) != 0x0F) continue; /* Check that this value is actually *on* one of the bytelanes it claims are valid! */ if (not_useful(rp, dp)) continue; /* Looks promising. Let's put it on the list. */ nubus_add_board(slot, dp); return; } } static void __init nubus_scan_bus(void) { int slot; pr_info("NuBus: Scanning NuBus slots.\n"); for (slot = 9; slot < 15; slot++) { nubus_probe_slot(slot); } } static int __init nubus_init(void) { int err; if (!MACH_IS_MAC) return 0; nubus_proc_init(); err = nubus_parent_device_register(); if (err) return err; nubus_scan_bus(); return 0; } subsys_initcall(nubus_init);
linux-master
drivers/nubus/nubus.c
// SPDX-License-Identifier: GPL-2.0 // // Bus implementation for the NuBus subsystem. // // Copyright (C) 2017 Finn Thain #include <linux/device.h> #include <linux/dma-mapping.h> #include <linux/list.h> #include <linux/nubus.h> #include <linux/seq_file.h> #include <linux/slab.h> #define to_nubus_board(d) container_of(d, struct nubus_board, dev) #define to_nubus_driver(d) container_of(d, struct nubus_driver, driver) static int nubus_device_probe(struct device *dev) { struct nubus_driver *ndrv = to_nubus_driver(dev->driver); int err = -ENODEV; if (ndrv->probe) err = ndrv->probe(to_nubus_board(dev)); return err; } static void nubus_device_remove(struct device *dev) { struct nubus_driver *ndrv = to_nubus_driver(dev->driver); if (ndrv->remove) ndrv->remove(to_nubus_board(dev)); } struct bus_type nubus_bus_type = { .name = "nubus", .probe = nubus_device_probe, .remove = nubus_device_remove, }; EXPORT_SYMBOL(nubus_bus_type); int nubus_driver_register(struct nubus_driver *ndrv) { ndrv->driver.bus = &nubus_bus_type; return driver_register(&ndrv->driver); } EXPORT_SYMBOL(nubus_driver_register); void nubus_driver_unregister(struct nubus_driver *ndrv) { driver_unregister(&ndrv->driver); } EXPORT_SYMBOL(nubus_driver_unregister); static struct device nubus_parent = { .init_name = "nubus", }; static int __init nubus_bus_register(void) { return bus_register(&nubus_bus_type); } postcore_initcall(nubus_bus_register); int __init nubus_parent_device_register(void) { return device_register(&nubus_parent); } static void nubus_device_release(struct device *dev) { struct nubus_board *board = to_nubus_board(dev); struct nubus_rsrc *fres, *tmp; list_for_each_entry_safe(fres, tmp, &nubus_func_rsrcs, list) if (fres->board == board) { list_del(&fres->list); kfree(fres); } kfree(board); } int nubus_device_register(struct nubus_board *board) { board->dev.parent = &nubus_parent; board->dev.release = nubus_device_release; board->dev.bus = &nubus_bus_type; dev_set_name(&board->dev, "slot.%X", board->slot); board->dev.dma_mask = &board->dev.coherent_dma_mask; dma_set_mask(&board->dev, DMA_BIT_MASK(32)); return device_register(&board->dev); } static int nubus_print_device_name_fn(struct device *dev, void *data) { struct nubus_board *board = to_nubus_board(dev); struct seq_file *m = data; seq_printf(m, "Slot %X: %s\n", board->slot, board->name); return 0; } int nubus_proc_show(struct seq_file *m, void *data) { return bus_for_each_dev(&nubus_bus_type, NULL, m, nubus_print_device_name_fn); }
linux-master
drivers/nubus/bus.c
// SPDX-License-Identifier: GPL-2.0 /* drivers/nubus/proc.c: Proc FS interface for NuBus. By David Huggins-Daines <[email protected]> Much code and many ideas from drivers/pci/proc.c: Copyright (c) 1997, 1998 Martin Mares <[email protected]> This is initially based on the Zorro and PCI interfaces. However, it works somewhat differently. The intent is to provide a structure in /proc analogous to the structure of the NuBus ROM resources. Therefore each board function gets a directory, which may in turn contain subdirectories. Each slot resource is a file. Unrecognized resources are empty files, since every resource ID requires a special case (e.g. if the resource ID implies a directory or block, then its value has to be interpreted as a slot ROM pointer etc.). */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/nubus.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/module.h> #include <linux/uaccess.h> #include <asm/byteorder.h> /* * /proc/bus/nubus/devices stuff */ static int nubus_devices_proc_show(struct seq_file *m, void *v) { struct nubus_rsrc *fres; for_each_func_rsrc(fres) seq_printf(m, "%x\t%04x %04x %04x %04x\t%08lx\n", fres->board->slot, fres->category, fres->type, fres->dr_sw, fres->dr_hw, fres->board->slot_addr); return 0; } static struct proc_dir_entry *proc_bus_nubus_dir; /* * /proc/bus/nubus/x/ stuff */ struct proc_dir_entry *nubus_proc_add_board(struct nubus_board *board) { char name[2]; if (!proc_bus_nubus_dir || !nubus_populate_procfs) return NULL; snprintf(name, sizeof(name), "%x", board->slot); return proc_mkdir(name, proc_bus_nubus_dir); } /* The PDE private data for any directory under /proc/bus/nubus/x/ * is the bytelanes value for the board in slot x. */ struct proc_dir_entry *nubus_proc_add_rsrc_dir(struct proc_dir_entry *procdir, const struct nubus_dirent *ent, struct nubus_board *board) { char name[9]; int lanes = board->lanes; if (!procdir || !nubus_populate_procfs) return NULL; snprintf(name, sizeof(name), "%x", ent->type); remove_proc_subtree(name, procdir); return proc_mkdir_data(name, 0555, procdir, (void *)lanes); } /* The PDE private data for a file under /proc/bus/nubus/x/ is a pointer to * an instance of the following structure, which gives the location and size * of the resource data in the slot ROM. For slot resources which hold only a * small integer, this integer value is stored directly and size is set to 0. * A NULL private data pointer indicates an unrecognized resource. */ struct nubus_proc_pde_data { unsigned char *res_ptr; unsigned int res_size; }; static struct nubus_proc_pde_data * nubus_proc_alloc_pde_data(unsigned char *ptr, unsigned int size) { struct nubus_proc_pde_data *pded; pded = kmalloc(sizeof(*pded), GFP_KERNEL); if (!pded) return NULL; pded->res_ptr = ptr; pded->res_size = size; return pded; } static int nubus_proc_rsrc_show(struct seq_file *m, void *v) { struct inode *inode = m->private; struct nubus_proc_pde_data *pded; pded = pde_data(inode); if (!pded) return 0; if (pded->res_size > m->size) return -EFBIG; if (pded->res_size) { int lanes = (int)proc_get_parent_data(inode); struct nubus_dirent ent; if (!lanes) return 0; ent.mask = lanes; ent.base = pded->res_ptr; ent.data = 0; nubus_seq_write_rsrc_mem(m, &ent, pded->res_size); } else { unsigned int data = (unsigned int)pded->res_ptr; seq_putc(m, data >> 16); seq_putc(m, data >> 8); seq_putc(m, data >> 0); } return 0; } static int nubus_rsrc_proc_open(struct inode *inode, struct file *file) { return single_open(file, nubus_proc_rsrc_show, inode); } static const struct proc_ops nubus_rsrc_proc_ops = { .proc_open = nubus_rsrc_proc_open, .proc_read = seq_read, .proc_lseek = seq_lseek, .proc_release = single_release, }; void nubus_proc_add_rsrc_mem(struct proc_dir_entry *procdir, const struct nubus_dirent *ent, unsigned int size) { char name[9]; struct nubus_proc_pde_data *pded; if (!procdir || !nubus_populate_procfs) return; snprintf(name, sizeof(name), "%x", ent->type); if (size) pded = nubus_proc_alloc_pde_data(nubus_dirptr(ent), size); else pded = NULL; remove_proc_subtree(name, procdir); proc_create_data(name, S_IFREG | 0444, procdir, &nubus_rsrc_proc_ops, pded); } void nubus_proc_add_rsrc(struct proc_dir_entry *procdir, const struct nubus_dirent *ent) { char name[9]; unsigned char *data = (unsigned char *)ent->data; if (!procdir || !nubus_populate_procfs) return; snprintf(name, sizeof(name), "%x", ent->type); remove_proc_subtree(name, procdir); proc_create_data(name, S_IFREG | 0444, procdir, &nubus_rsrc_proc_ops, nubus_proc_alloc_pde_data(data, 0)); } /* * /proc/nubus stuff */ void __init nubus_proc_init(void) { proc_create_single("nubus", 0, NULL, nubus_proc_show); proc_bus_nubus_dir = proc_mkdir("bus/nubus", NULL); if (!proc_bus_nubus_dir) return; proc_create_single("devices", 0, proc_bus_nubus_dir, nubus_devices_proc_show); }
linux-master
drivers/nubus/proc.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * Copyright (C) 2006, 2007 Eugene Konev <[email protected]> * * Parts of the VLYNQ specification can be found here: * http://www.ti.com/litv/pdf/sprue36a */ #include <linux/init.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/device.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/slab.h> #include <linux/irq.h> #include <linux/vlynq.h> #define VLYNQ_CTRL_PM_ENABLE 0x80000000 #define VLYNQ_CTRL_CLOCK_INT 0x00008000 #define VLYNQ_CTRL_CLOCK_DIV(x) (((x) & 7) << 16) #define VLYNQ_CTRL_INT_LOCAL 0x00004000 #define VLYNQ_CTRL_INT_ENABLE 0x00002000 #define VLYNQ_CTRL_INT_VECTOR(x) (((x) & 0x1f) << 8) #define VLYNQ_CTRL_INT2CFG 0x00000080 #define VLYNQ_CTRL_RESET 0x00000001 #define VLYNQ_CTRL_CLOCK_MASK (0x7 << 16) #define VLYNQ_INT_OFFSET 0x00000014 #define VLYNQ_REMOTE_OFFSET 0x00000080 #define VLYNQ_STATUS_LINK 0x00000001 #define VLYNQ_STATUS_LERROR 0x00000080 #define VLYNQ_STATUS_RERROR 0x00000100 #define VINT_ENABLE 0x00000100 #define VINT_TYPE_EDGE 0x00000080 #define VINT_LEVEL_LOW 0x00000040 #define VINT_VECTOR(x) ((x) & 0x1f) #define VINT_OFFSET(irq) (8 * ((irq) % 4)) #define VLYNQ_AUTONEGO_V2 0x00010000 struct vlynq_regs { u32 revision; u32 control; u32 status; u32 int_prio; u32 int_status; u32 int_pending; u32 int_ptr; u32 tx_offset; struct vlynq_mapping rx_mapping[4]; u32 chip; u32 autonego; u32 unused[6]; u32 int_device[8]; }; #ifdef CONFIG_VLYNQ_DEBUG static void vlynq_dump_regs(struct vlynq_device *dev) { int i; printk(KERN_DEBUG "VLYNQ local=%p remote=%p\n", dev->local, dev->remote); for (i = 0; i < 32; i++) { printk(KERN_DEBUG "VLYNQ: local %d: %08x\n", i + 1, ((u32 *)dev->local)[i]); printk(KERN_DEBUG "VLYNQ: remote %d: %08x\n", i + 1, ((u32 *)dev->remote)[i]); } } static void vlynq_dump_mem(u32 *base, int count) { int i; for (i = 0; i < (count + 3) / 4; i++) { if (i % 4 == 0) printk(KERN_DEBUG "\nMEM[0x%04x]:", i * 4); printk(KERN_DEBUG " 0x%08x", *(base + i)); } printk(KERN_DEBUG "\n"); } #endif /* Check the VLYNQ link status with a given device */ static int vlynq_linked(struct vlynq_device *dev) { int i; for (i = 0; i < 100; i++) if (readl(&dev->local->status) & VLYNQ_STATUS_LINK) return 1; else cpu_relax(); return 0; } static void vlynq_reset(struct vlynq_device *dev) { writel(readl(&dev->local->control) | VLYNQ_CTRL_RESET, &dev->local->control); /* Wait for the devices to finish resetting */ msleep(5); /* Remove reset bit */ writel(readl(&dev->local->control) & ~VLYNQ_CTRL_RESET, &dev->local->control); /* Give some time for the devices to settle */ msleep(5); } static void vlynq_irq_unmask(struct irq_data *d) { struct vlynq_device *dev = irq_data_get_irq_chip_data(d); int virq; u32 val; BUG_ON(!dev); virq = d->irq - dev->irq_start; val = readl(&dev->remote->int_device[virq >> 2]); val |= (VINT_ENABLE | virq) << VINT_OFFSET(virq); writel(val, &dev->remote->int_device[virq >> 2]); } static void vlynq_irq_mask(struct irq_data *d) { struct vlynq_device *dev = irq_data_get_irq_chip_data(d); int virq; u32 val; BUG_ON(!dev); virq = d->irq - dev->irq_start; val = readl(&dev->remote->int_device[virq >> 2]); val &= ~(VINT_ENABLE << VINT_OFFSET(virq)); writel(val, &dev->remote->int_device[virq >> 2]); } static int vlynq_irq_type(struct irq_data *d, unsigned int flow_type) { struct vlynq_device *dev = irq_data_get_irq_chip_data(d); int virq; u32 val; BUG_ON(!dev); virq = d->irq - dev->irq_start; val = readl(&dev->remote->int_device[virq >> 2]); switch (flow_type & IRQ_TYPE_SENSE_MASK) { case IRQ_TYPE_EDGE_RISING: case IRQ_TYPE_EDGE_FALLING: case IRQ_TYPE_EDGE_BOTH: val |= VINT_TYPE_EDGE << VINT_OFFSET(virq); val &= ~(VINT_LEVEL_LOW << VINT_OFFSET(virq)); break; case IRQ_TYPE_LEVEL_HIGH: val &= ~(VINT_TYPE_EDGE << VINT_OFFSET(virq)); val &= ~(VINT_LEVEL_LOW << VINT_OFFSET(virq)); break; case IRQ_TYPE_LEVEL_LOW: val &= ~(VINT_TYPE_EDGE << VINT_OFFSET(virq)); val |= VINT_LEVEL_LOW << VINT_OFFSET(virq); break; default: return -EINVAL; } writel(val, &dev->remote->int_device[virq >> 2]); return 0; } static void vlynq_local_ack(struct irq_data *d) { struct vlynq_device *dev = irq_data_get_irq_chip_data(d); u32 status = readl(&dev->local->status); pr_debug("%s: local status: 0x%08x\n", dev_name(&dev->dev), status); writel(status, &dev->local->status); } static void vlynq_remote_ack(struct irq_data *d) { struct vlynq_device *dev = irq_data_get_irq_chip_data(d); u32 status = readl(&dev->remote->status); pr_debug("%s: remote status: 0x%08x\n", dev_name(&dev->dev), status); writel(status, &dev->remote->status); } static irqreturn_t vlynq_irq(int irq, void *dev_id) { struct vlynq_device *dev = dev_id; u32 status; int virq = 0; status = readl(&dev->local->int_status); writel(status, &dev->local->int_status); if (unlikely(!status)) spurious_interrupt(); while (status) { if (status & 1) do_IRQ(dev->irq_start + virq); status >>= 1; virq++; } return IRQ_HANDLED; } static struct irq_chip vlynq_irq_chip = { .name = "vlynq", .irq_unmask = vlynq_irq_unmask, .irq_mask = vlynq_irq_mask, .irq_set_type = vlynq_irq_type, }; static struct irq_chip vlynq_local_chip = { .name = "vlynq local error", .irq_unmask = vlynq_irq_unmask, .irq_mask = vlynq_irq_mask, .irq_ack = vlynq_local_ack, }; static struct irq_chip vlynq_remote_chip = { .name = "vlynq local error", .irq_unmask = vlynq_irq_unmask, .irq_mask = vlynq_irq_mask, .irq_ack = vlynq_remote_ack, }; static int vlynq_setup_irq(struct vlynq_device *dev) { u32 val; int i, virq; if (dev->local_irq == dev->remote_irq) { printk(KERN_ERR "%s: local vlynq irq should be different from remote\n", dev_name(&dev->dev)); return -EINVAL; } /* Clear local and remote error bits */ writel(readl(&dev->local->status), &dev->local->status); writel(readl(&dev->remote->status), &dev->remote->status); /* Now setup interrupts */ val = VLYNQ_CTRL_INT_VECTOR(dev->local_irq); val |= VLYNQ_CTRL_INT_ENABLE | VLYNQ_CTRL_INT_LOCAL | VLYNQ_CTRL_INT2CFG; val |= readl(&dev->local->control); writel(VLYNQ_INT_OFFSET, &dev->local->int_ptr); writel(val, &dev->local->control); val = VLYNQ_CTRL_INT_VECTOR(dev->remote_irq); val |= VLYNQ_CTRL_INT_ENABLE; val |= readl(&dev->remote->control); writel(VLYNQ_INT_OFFSET, &dev->remote->int_ptr); writel(val, &dev->remote->int_ptr); writel(val, &dev->remote->control); for (i = dev->irq_start; i <= dev->irq_end; i++) { virq = i - dev->irq_start; if (virq == dev->local_irq) { irq_set_chip_and_handler(i, &vlynq_local_chip, handle_level_irq); irq_set_chip_data(i, dev); } else if (virq == dev->remote_irq) { irq_set_chip_and_handler(i, &vlynq_remote_chip, handle_level_irq); irq_set_chip_data(i, dev); } else { irq_set_chip_and_handler(i, &vlynq_irq_chip, handle_simple_irq); irq_set_chip_data(i, dev); writel(0, &dev->remote->int_device[virq >> 2]); } } if (request_irq(dev->irq, vlynq_irq, IRQF_SHARED, "vlynq", dev)) { printk(KERN_ERR "%s: request_irq failed\n", dev_name(&dev->dev)); return -EAGAIN; } return 0; } static void vlynq_device_release(struct device *dev) { struct vlynq_device *vdev = to_vlynq_device(dev); kfree(vdev); } static int vlynq_device_match(struct device *dev, struct device_driver *drv) { struct vlynq_device *vdev = to_vlynq_device(dev); struct vlynq_driver *vdrv = to_vlynq_driver(drv); struct vlynq_device_id *ids = vdrv->id_table; while (ids->id) { if (ids->id == vdev->dev_id) { vdev->divisor = ids->divisor; vlynq_set_drvdata(vdev, ids); printk(KERN_INFO "Driver found for VLYNQ " "device: %08x\n", vdev->dev_id); return 1; } printk(KERN_DEBUG "Not using the %08x VLYNQ device's driver" " for VLYNQ device: %08x\n", ids->id, vdev->dev_id); ids++; } return 0; } static int vlynq_device_probe(struct device *dev) { struct vlynq_device *vdev = to_vlynq_device(dev); struct vlynq_driver *drv = to_vlynq_driver(dev->driver); struct vlynq_device_id *id = vlynq_get_drvdata(vdev); int result = -ENODEV; if (drv->probe) result = drv->probe(vdev, id); if (result) put_device(dev); return result; } static void vlynq_device_remove(struct device *dev) { struct vlynq_driver *drv = to_vlynq_driver(dev->driver); if (drv->remove) drv->remove(to_vlynq_device(dev)); } int __vlynq_register_driver(struct vlynq_driver *driver, struct module *owner) { driver->driver.name = driver->name; driver->driver.bus = &vlynq_bus_type; return driver_register(&driver->driver); } EXPORT_SYMBOL(__vlynq_register_driver); void vlynq_unregister_driver(struct vlynq_driver *driver) { driver_unregister(&driver->driver); } EXPORT_SYMBOL(vlynq_unregister_driver); /* * A VLYNQ remote device can clock the VLYNQ bus master * using a dedicated clock line. In that case, both the * remove device and the bus master should have the same * serial clock dividers configured. Iterate through the * 8 possible dividers until we actually link with the * device. */ static int __vlynq_try_remote(struct vlynq_device *dev) { int i; vlynq_reset(dev); for (i = dev->dev_id ? vlynq_rdiv2 : vlynq_rdiv8; dev->dev_id ? i <= vlynq_rdiv8 : i >= vlynq_rdiv2; dev->dev_id ? i++ : i--) { if (!vlynq_linked(dev)) break; writel((readl(&dev->remote->control) & ~VLYNQ_CTRL_CLOCK_MASK) | VLYNQ_CTRL_CLOCK_INT | VLYNQ_CTRL_CLOCK_DIV(i - vlynq_rdiv1), &dev->remote->control); writel((readl(&dev->local->control) & ~(VLYNQ_CTRL_CLOCK_INT | VLYNQ_CTRL_CLOCK_MASK)) | VLYNQ_CTRL_CLOCK_DIV(i - vlynq_rdiv1), &dev->local->control); if (vlynq_linked(dev)) { printk(KERN_DEBUG "%s: using remote clock divisor %d\n", dev_name(&dev->dev), i - vlynq_rdiv1 + 1); dev->divisor = i; return 0; } else { vlynq_reset(dev); } } return -ENODEV; } /* * A VLYNQ remote device can be clocked by the VLYNQ bus * master using a dedicated clock line. In that case, only * the bus master configures the serial clock divider. * Iterate through the 8 possible dividers until we * actually get a link with the device. */ static int __vlynq_try_local(struct vlynq_device *dev) { int i; vlynq_reset(dev); for (i = dev->dev_id ? vlynq_ldiv2 : vlynq_ldiv8; dev->dev_id ? i <= vlynq_ldiv8 : i >= vlynq_ldiv2; dev->dev_id ? i++ : i--) { writel((readl(&dev->local->control) & ~VLYNQ_CTRL_CLOCK_MASK) | VLYNQ_CTRL_CLOCK_INT | VLYNQ_CTRL_CLOCK_DIV(i - vlynq_ldiv1), &dev->local->control); if (vlynq_linked(dev)) { printk(KERN_DEBUG "%s: using local clock divisor %d\n", dev_name(&dev->dev), i - vlynq_ldiv1 + 1); dev->divisor = i; return 0; } else { vlynq_reset(dev); } } return -ENODEV; } /* * When using external clocking method, serial clock * is supplied by an external oscillator, therefore we * should mask the local clock bit in the clock control * register for both the bus master and the remote device. */ static int __vlynq_try_external(struct vlynq_device *dev) { vlynq_reset(dev); if (!vlynq_linked(dev)) return -ENODEV; writel((readl(&dev->remote->control) & ~VLYNQ_CTRL_CLOCK_INT), &dev->remote->control); writel((readl(&dev->local->control) & ~VLYNQ_CTRL_CLOCK_INT), &dev->local->control); if (vlynq_linked(dev)) { printk(KERN_DEBUG "%s: using external clock\n", dev_name(&dev->dev)); dev->divisor = vlynq_div_external; return 0; } return -ENODEV; } static int __vlynq_enable_device(struct vlynq_device *dev) { int result; struct plat_vlynq_ops *ops = dev->dev.platform_data; result = ops->on(dev); if (result) return result; switch (dev->divisor) { case vlynq_div_external: case vlynq_div_auto: /* When the device is brought from reset it should have clock * generation negotiated by hardware. * Check which device is generating clocks and perform setup * accordingly */ if (vlynq_linked(dev) && readl(&dev->remote->control) & VLYNQ_CTRL_CLOCK_INT) { if (!__vlynq_try_remote(dev) || !__vlynq_try_local(dev) || !__vlynq_try_external(dev)) return 0; } else { if (!__vlynq_try_external(dev) || !__vlynq_try_local(dev) || !__vlynq_try_remote(dev)) return 0; } break; case vlynq_ldiv1: case vlynq_ldiv2: case vlynq_ldiv3: case vlynq_ldiv4: case vlynq_ldiv5: case vlynq_ldiv6: case vlynq_ldiv7: case vlynq_ldiv8: writel(VLYNQ_CTRL_CLOCK_INT | VLYNQ_CTRL_CLOCK_DIV(dev->divisor - vlynq_ldiv1), &dev->local->control); writel(0, &dev->remote->control); if (vlynq_linked(dev)) { printk(KERN_DEBUG "%s: using local clock divisor %d\n", dev_name(&dev->dev), dev->divisor - vlynq_ldiv1 + 1); return 0; } break; case vlynq_rdiv1: case vlynq_rdiv2: case vlynq_rdiv3: case vlynq_rdiv4: case vlynq_rdiv5: case vlynq_rdiv6: case vlynq_rdiv7: case vlynq_rdiv8: writel(0, &dev->local->control); writel(VLYNQ_CTRL_CLOCK_INT | VLYNQ_CTRL_CLOCK_DIV(dev->divisor - vlynq_rdiv1), &dev->remote->control); if (vlynq_linked(dev)) { printk(KERN_DEBUG "%s: using remote clock divisor %d\n", dev_name(&dev->dev), dev->divisor - vlynq_rdiv1 + 1); return 0; } break; } ops->off(dev); return -ENODEV; } int vlynq_enable_device(struct vlynq_device *dev) { struct plat_vlynq_ops *ops = dev->dev.platform_data; int result = -ENODEV; result = __vlynq_enable_device(dev); if (result) return result; result = vlynq_setup_irq(dev); if (result) ops->off(dev); dev->enabled = !result; return result; } EXPORT_SYMBOL(vlynq_enable_device); void vlynq_disable_device(struct vlynq_device *dev) { struct plat_vlynq_ops *ops = dev->dev.platform_data; dev->enabled = 0; free_irq(dev->irq, dev); ops->off(dev); } EXPORT_SYMBOL(vlynq_disable_device); int vlynq_set_local_mapping(struct vlynq_device *dev, u32 tx_offset, struct vlynq_mapping *mapping) { int i; if (!dev->enabled) return -ENXIO; writel(tx_offset, &dev->local->tx_offset); for (i = 0; i < 4; i++) { writel(mapping[i].offset, &dev->local->rx_mapping[i].offset); writel(mapping[i].size, &dev->local->rx_mapping[i].size); } return 0; } EXPORT_SYMBOL(vlynq_set_local_mapping); int vlynq_set_remote_mapping(struct vlynq_device *dev, u32 tx_offset, struct vlynq_mapping *mapping) { int i; if (!dev->enabled) return -ENXIO; writel(tx_offset, &dev->remote->tx_offset); for (i = 0; i < 4; i++) { writel(mapping[i].offset, &dev->remote->rx_mapping[i].offset); writel(mapping[i].size, &dev->remote->rx_mapping[i].size); } return 0; } EXPORT_SYMBOL(vlynq_set_remote_mapping); int vlynq_set_local_irq(struct vlynq_device *dev, int virq) { int irq = dev->irq_start + virq; if (dev->enabled) return -EBUSY; if ((irq < dev->irq_start) || (irq > dev->irq_end)) return -EINVAL; if (virq == dev->remote_irq) return -EINVAL; dev->local_irq = virq; return 0; } EXPORT_SYMBOL(vlynq_set_local_irq); int vlynq_set_remote_irq(struct vlynq_device *dev, int virq) { int irq = dev->irq_start + virq; if (dev->enabled) return -EBUSY; if ((irq < dev->irq_start) || (irq > dev->irq_end)) return -EINVAL; if (virq == dev->local_irq) return -EINVAL; dev->remote_irq = virq; return 0; } EXPORT_SYMBOL(vlynq_set_remote_irq); static int vlynq_probe(struct platform_device *pdev) { struct vlynq_device *dev; struct resource *regs_res, *mem_res, *irq_res; int len, result; regs_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); if (!regs_res) return -ENODEV; mem_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mem"); if (!mem_res) return -ENODEV; irq_res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "devirq"); if (!irq_res) return -ENODEV; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) { printk(KERN_ERR "vlynq: failed to allocate device structure\n"); return -ENOMEM; } dev->id = pdev->id; dev->dev.bus = &vlynq_bus_type; dev->dev.parent = &pdev->dev; dev_set_name(&dev->dev, "vlynq%d", dev->id); dev->dev.platform_data = pdev->dev.platform_data; dev->dev.release = vlynq_device_release; dev->regs_start = regs_res->start; dev->regs_end = regs_res->end; dev->mem_start = mem_res->start; dev->mem_end = mem_res->end; len = resource_size(regs_res); if (!request_mem_region(regs_res->start, len, dev_name(&dev->dev))) { printk(KERN_ERR "%s: Can't request vlynq registers\n", dev_name(&dev->dev)); result = -ENXIO; goto fail_request; } dev->local = ioremap(regs_res->start, len); if (!dev->local) { printk(KERN_ERR "%s: Can't remap vlynq registers\n", dev_name(&dev->dev)); result = -ENXIO; goto fail_remap; } dev->remote = (struct vlynq_regs *)((void *)dev->local + VLYNQ_REMOTE_OFFSET); dev->irq = platform_get_irq_byname(pdev, "irq"); dev->irq_start = irq_res->start; dev->irq_end = irq_res->end; dev->local_irq = dev->irq_end - dev->irq_start; dev->remote_irq = dev->local_irq - 1; if (device_register(&dev->dev)) goto fail_register; platform_set_drvdata(pdev, dev); printk(KERN_INFO "%s: regs 0x%p, irq %d, mem 0x%p\n", dev_name(&dev->dev), (void *)dev->regs_start, dev->irq, (void *)dev->mem_start); dev->dev_id = 0; dev->divisor = vlynq_div_auto; result = __vlynq_enable_device(dev); if (result == 0) { dev->dev_id = readl(&dev->remote->chip); ((struct plat_vlynq_ops *)(dev->dev.platform_data))->off(dev); } if (dev->dev_id) printk(KERN_INFO "Found a VLYNQ device: %08x\n", dev->dev_id); return 0; fail_register: iounmap(dev->local); fail_remap: fail_request: release_mem_region(regs_res->start, len); kfree(dev); return result; } static int vlynq_remove(struct platform_device *pdev) { struct vlynq_device *dev = platform_get_drvdata(pdev); device_unregister(&dev->dev); iounmap(dev->local); release_mem_region(dev->regs_start, dev->regs_end - dev->regs_start + 1); kfree(dev); return 0; } static struct platform_driver vlynq_platform_driver = { .driver.name = "vlynq", .probe = vlynq_probe, .remove = vlynq_remove, }; struct bus_type vlynq_bus_type = { .name = "vlynq", .match = vlynq_device_match, .probe = vlynq_device_probe, .remove = vlynq_device_remove, }; EXPORT_SYMBOL(vlynq_bus_type); static int vlynq_init(void) { int res = 0; res = bus_register(&vlynq_bus_type); if (res) goto fail_bus; res = platform_driver_register(&vlynq_platform_driver); if (res) goto fail_platform; return 0; fail_platform: bus_unregister(&vlynq_bus_type); fail_bus: return res; } static void vlynq_exit(void) { platform_driver_unregister(&vlynq_platform_driver); bus_unregister(&vlynq_bus_type); } module_init(vlynq_init); module_exit(vlynq_exit);
linux-master
drivers/vlynq/vlynq.c
// SPDX-License-Identifier: GPL-2.0-only /* * pm8xxx RTC driver * * Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved. * Copyright (c) 2023, Linaro Limited */ #include <linux/of.h> #include <linux/module.h> #include <linux/nvmem-consumer.h> #include <linux/init.h> #include <linux/rtc.h> #include <linux/platform_device.h> #include <linux/pm.h> #include <linux/pm_wakeirq.h> #include <linux/regmap.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <asm/unaligned.h> /* RTC_CTRL register bit fields */ #define PM8xxx_RTC_ENABLE BIT(7) #define PM8xxx_RTC_ALARM_CLEAR BIT(0) #define PM8xxx_RTC_ALARM_ENABLE BIT(7) #define NUM_8_BIT_RTC_REGS 0x4 /** * struct pm8xxx_rtc_regs - describe RTC registers per PMIC versions * @ctrl: address of control register * @write: base address of write registers * @read: base address of read registers * @alarm_ctrl: address of alarm control register * @alarm_ctrl2: address of alarm control2 register * @alarm_rw: base address of alarm read-write registers * @alarm_en: alarm enable mask */ struct pm8xxx_rtc_regs { unsigned int ctrl; unsigned int write; unsigned int read; unsigned int alarm_ctrl; unsigned int alarm_ctrl2; unsigned int alarm_rw; unsigned int alarm_en; }; /** * struct pm8xxx_rtc - RTC driver internal structure * @rtc: RTC device * @regmap: regmap used to access registers * @allow_set_time: whether the time can be set * @alarm_irq: alarm irq number * @regs: register description * @dev: device structure * @nvmem_cell: nvmem cell for offset * @offset: offset from epoch in seconds */ struct pm8xxx_rtc { struct rtc_device *rtc; struct regmap *regmap; bool allow_set_time; int alarm_irq; const struct pm8xxx_rtc_regs *regs; struct device *dev; struct nvmem_cell *nvmem_cell; u32 offset; }; static int pm8xxx_rtc_read_nvmem_offset(struct pm8xxx_rtc *rtc_dd) { size_t len; void *buf; int rc; buf = nvmem_cell_read(rtc_dd->nvmem_cell, &len); if (IS_ERR(buf)) { rc = PTR_ERR(buf); dev_dbg(rtc_dd->dev, "failed to read nvmem offset: %d\n", rc); return rc; } if (len != sizeof(u32)) { dev_dbg(rtc_dd->dev, "unexpected nvmem cell size %zu\n", len); kfree(buf); return -EINVAL; } rtc_dd->offset = get_unaligned_le32(buf); kfree(buf); return 0; } static int pm8xxx_rtc_write_nvmem_offset(struct pm8xxx_rtc *rtc_dd, u32 offset) { u8 buf[sizeof(u32)]; int rc; put_unaligned_le32(offset, buf); rc = nvmem_cell_write(rtc_dd->nvmem_cell, buf, sizeof(buf)); if (rc < 0) { dev_dbg(rtc_dd->dev, "failed to write nvmem offset: %d\n", rc); return rc; } return 0; } static int pm8xxx_rtc_read_offset(struct pm8xxx_rtc *rtc_dd) { if (!rtc_dd->nvmem_cell) return 0; return pm8xxx_rtc_read_nvmem_offset(rtc_dd); } static int pm8xxx_rtc_read_raw(struct pm8xxx_rtc *rtc_dd, u32 *secs) { const struct pm8xxx_rtc_regs *regs = rtc_dd->regs; u8 value[NUM_8_BIT_RTC_REGS]; unsigned int reg; int rc; rc = regmap_bulk_read(rtc_dd->regmap, regs->read, value, sizeof(value)); if (rc) return rc; /* * Read the LSB again and check if there has been a carry over. * If there has, redo the read operation. */ rc = regmap_read(rtc_dd->regmap, regs->read, &reg); if (rc < 0) return rc; if (reg < value[0]) { rc = regmap_bulk_read(rtc_dd->regmap, regs->read, value, sizeof(value)); if (rc) return rc; } *secs = get_unaligned_le32(value); return 0; } static int pm8xxx_rtc_update_offset(struct pm8xxx_rtc *rtc_dd, u32 secs) { u32 raw_secs; u32 offset; int rc; if (!rtc_dd->nvmem_cell) return -ENODEV; rc = pm8xxx_rtc_read_raw(rtc_dd, &raw_secs); if (rc) return rc; offset = secs - raw_secs; if (offset == rtc_dd->offset) return 0; rc = pm8xxx_rtc_write_nvmem_offset(rtc_dd, offset); if (rc) return rc; rtc_dd->offset = offset; return 0; } /* * Steps to write the RTC registers. * 1. Disable alarm if enabled. * 2. Disable rtc if enabled. * 3. Write 0x00 to LSB. * 4. Write Byte[1], Byte[2], Byte[3] then Byte[0]. * 5. Enable rtc if disabled in step 2. * 6. Enable alarm if disabled in step 1. */ static int __pm8xxx_rtc_set_time(struct pm8xxx_rtc *rtc_dd, u32 secs) { const struct pm8xxx_rtc_regs *regs = rtc_dd->regs; u8 value[NUM_8_BIT_RTC_REGS]; bool alarm_enabled; int rc; put_unaligned_le32(secs, value); rc = regmap_update_bits_check(rtc_dd->regmap, regs->alarm_ctrl, regs->alarm_en, 0, &alarm_enabled); if (rc) return rc; /* Disable RTC */ rc = regmap_update_bits(rtc_dd->regmap, regs->ctrl, PM8xxx_RTC_ENABLE, 0); if (rc) return rc; /* Write 0 to Byte[0] */ rc = regmap_write(rtc_dd->regmap, regs->write, 0); if (rc) return rc; /* Write Byte[1], Byte[2], Byte[3] */ rc = regmap_bulk_write(rtc_dd->regmap, regs->write + 1, &value[1], sizeof(value) - 1); if (rc) return rc; /* Write Byte[0] */ rc = regmap_write(rtc_dd->regmap, regs->write, value[0]); if (rc) return rc; /* Enable RTC */ rc = regmap_update_bits(rtc_dd->regmap, regs->ctrl, PM8xxx_RTC_ENABLE, PM8xxx_RTC_ENABLE); if (rc) return rc; if (alarm_enabled) { rc = regmap_update_bits(rtc_dd->regmap, regs->alarm_ctrl, regs->alarm_en, regs->alarm_en); if (rc) return rc; } return 0; } static int pm8xxx_rtc_set_time(struct device *dev, struct rtc_time *tm) { struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev); u32 secs; int rc; secs = rtc_tm_to_time64(tm); if (rtc_dd->allow_set_time) rc = __pm8xxx_rtc_set_time(rtc_dd, secs); else rc = pm8xxx_rtc_update_offset(rtc_dd, secs); if (rc) return rc; dev_dbg(dev, "set time: %ptRd %ptRt (%u + %u)\n", tm, tm, secs - rtc_dd->offset, rtc_dd->offset); return 0; } static int pm8xxx_rtc_read_time(struct device *dev, struct rtc_time *tm) { struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev); u32 secs; int rc; rc = pm8xxx_rtc_read_raw(rtc_dd, &secs); if (rc) return rc; secs += rtc_dd->offset; rtc_time64_to_tm(secs, tm); dev_dbg(dev, "read time: %ptRd %ptRt (%u + %u)\n", tm, tm, secs - rtc_dd->offset, rtc_dd->offset); return 0; } static int pm8xxx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alarm) { struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev); const struct pm8xxx_rtc_regs *regs = rtc_dd->regs; u8 value[NUM_8_BIT_RTC_REGS]; u32 secs; int rc; secs = rtc_tm_to_time64(&alarm->time); secs -= rtc_dd->offset; put_unaligned_le32(secs, value); rc = regmap_update_bits(rtc_dd->regmap, regs->alarm_ctrl, regs->alarm_en, 0); if (rc) return rc; rc = regmap_bulk_write(rtc_dd->regmap, regs->alarm_rw, value, sizeof(value)); if (rc) return rc; if (alarm->enabled) { rc = regmap_update_bits(rtc_dd->regmap, regs->alarm_ctrl, regs->alarm_en, regs->alarm_en); if (rc) return rc; } dev_dbg(dev, "set alarm: %ptRd %ptRt\n", &alarm->time, &alarm->time); return 0; } static int pm8xxx_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alarm) { struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev); const struct pm8xxx_rtc_regs *regs = rtc_dd->regs; u8 value[NUM_8_BIT_RTC_REGS]; unsigned int ctrl_reg; u32 secs; int rc; rc = regmap_bulk_read(rtc_dd->regmap, regs->alarm_rw, value, sizeof(value)); if (rc) return rc; secs = get_unaligned_le32(value); secs += rtc_dd->offset; rtc_time64_to_tm(secs, &alarm->time); rc = regmap_read(rtc_dd->regmap, regs->alarm_ctrl, &ctrl_reg); if (rc) return rc; alarm->enabled = !!(ctrl_reg & PM8xxx_RTC_ALARM_ENABLE); dev_dbg(dev, "read alarm: %ptRd %ptRt\n", &alarm->time, &alarm->time); return 0; } static int pm8xxx_rtc_alarm_irq_enable(struct device *dev, unsigned int enable) { struct pm8xxx_rtc *rtc_dd = dev_get_drvdata(dev); const struct pm8xxx_rtc_regs *regs = rtc_dd->regs; u8 value[NUM_8_BIT_RTC_REGS] = {0}; unsigned int val; int rc; if (enable) val = regs->alarm_en; else val = 0; rc = regmap_update_bits(rtc_dd->regmap, regs->alarm_ctrl, regs->alarm_en, val); if (rc) return rc; /* Clear alarm register */ if (!enable) { rc = regmap_bulk_write(rtc_dd->regmap, regs->alarm_rw, value, sizeof(value)); if (rc) return rc; } return 0; } static const struct rtc_class_ops pm8xxx_rtc_ops = { .read_time = pm8xxx_rtc_read_time, .set_time = pm8xxx_rtc_set_time, .set_alarm = pm8xxx_rtc_set_alarm, .read_alarm = pm8xxx_rtc_read_alarm, .alarm_irq_enable = pm8xxx_rtc_alarm_irq_enable, }; static irqreturn_t pm8xxx_alarm_trigger(int irq, void *dev_id) { struct pm8xxx_rtc *rtc_dd = dev_id; const struct pm8xxx_rtc_regs *regs = rtc_dd->regs; int rc; rtc_update_irq(rtc_dd->rtc, 1, RTC_IRQF | RTC_AF); /* Disable alarm */ rc = regmap_update_bits(rtc_dd->regmap, regs->alarm_ctrl, regs->alarm_en, 0); if (rc) return IRQ_NONE; /* Clear alarm status */ rc = regmap_update_bits(rtc_dd->regmap, regs->alarm_ctrl2, PM8xxx_RTC_ALARM_CLEAR, 0); if (rc) return IRQ_NONE; return IRQ_HANDLED; } static int pm8xxx_rtc_enable(struct pm8xxx_rtc *rtc_dd) { const struct pm8xxx_rtc_regs *regs = rtc_dd->regs; return regmap_update_bits(rtc_dd->regmap, regs->ctrl, PM8xxx_RTC_ENABLE, PM8xxx_RTC_ENABLE); } static const struct pm8xxx_rtc_regs pm8921_regs = { .ctrl = 0x11d, .write = 0x11f, .read = 0x123, .alarm_rw = 0x127, .alarm_ctrl = 0x11d, .alarm_ctrl2 = 0x11e, .alarm_en = BIT(1), }; static const struct pm8xxx_rtc_regs pm8058_regs = { .ctrl = 0x1e8, .write = 0x1ea, .read = 0x1ee, .alarm_rw = 0x1f2, .alarm_ctrl = 0x1e8, .alarm_ctrl2 = 0x1e9, .alarm_en = BIT(1), }; static const struct pm8xxx_rtc_regs pm8941_regs = { .ctrl = 0x6046, .write = 0x6040, .read = 0x6048, .alarm_rw = 0x6140, .alarm_ctrl = 0x6146, .alarm_ctrl2 = 0x6148, .alarm_en = BIT(7), }; static const struct pm8xxx_rtc_regs pmk8350_regs = { .ctrl = 0x6146, .write = 0x6140, .read = 0x6148, .alarm_rw = 0x6240, .alarm_ctrl = 0x6246, .alarm_ctrl2 = 0x6248, .alarm_en = BIT(7), }; static const struct of_device_id pm8xxx_id_table[] = { { .compatible = "qcom,pm8921-rtc", .data = &pm8921_regs }, { .compatible = "qcom,pm8058-rtc", .data = &pm8058_regs }, { .compatible = "qcom,pm8941-rtc", .data = &pm8941_regs }, { .compatible = "qcom,pmk8350-rtc", .data = &pmk8350_regs }, { }, }; MODULE_DEVICE_TABLE(of, pm8xxx_id_table); static int pm8xxx_rtc_probe(struct platform_device *pdev) { const struct of_device_id *match; struct pm8xxx_rtc *rtc_dd; int rc; match = of_match_node(pm8xxx_id_table, pdev->dev.of_node); if (!match) return -ENXIO; rtc_dd = devm_kzalloc(&pdev->dev, sizeof(*rtc_dd), GFP_KERNEL); if (rtc_dd == NULL) return -ENOMEM; rtc_dd->regmap = dev_get_regmap(pdev->dev.parent, NULL); if (!rtc_dd->regmap) return -ENXIO; rtc_dd->alarm_irq = platform_get_irq(pdev, 0); if (rtc_dd->alarm_irq < 0) return -ENXIO; rtc_dd->allow_set_time = of_property_read_bool(pdev->dev.of_node, "allow-set-time"); rtc_dd->nvmem_cell = devm_nvmem_cell_get(&pdev->dev, "offset"); if (IS_ERR(rtc_dd->nvmem_cell)) { rc = PTR_ERR(rtc_dd->nvmem_cell); if (rc != -ENOENT) return rc; rtc_dd->nvmem_cell = NULL; } rtc_dd->regs = match->data; rtc_dd->dev = &pdev->dev; if (!rtc_dd->allow_set_time) { rc = pm8xxx_rtc_read_offset(rtc_dd); if (rc) return rc; } rc = pm8xxx_rtc_enable(rtc_dd); if (rc) return rc; platform_set_drvdata(pdev, rtc_dd); device_init_wakeup(&pdev->dev, 1); rtc_dd->rtc = devm_rtc_allocate_device(&pdev->dev); if (IS_ERR(rtc_dd->rtc)) return PTR_ERR(rtc_dd->rtc); rtc_dd->rtc->ops = &pm8xxx_rtc_ops; rtc_dd->rtc->range_max = U32_MAX; rc = devm_request_any_context_irq(&pdev->dev, rtc_dd->alarm_irq, pm8xxx_alarm_trigger, IRQF_TRIGGER_RISING, "pm8xxx_rtc_alarm", rtc_dd); if (rc < 0) return rc; rc = devm_rtc_register_device(rtc_dd->rtc); if (rc) return rc; rc = dev_pm_set_wake_irq(&pdev->dev, rtc_dd->alarm_irq); if (rc) return rc; return 0; } static void pm8xxx_remove(struct platform_device *pdev) { dev_pm_clear_wake_irq(&pdev->dev); } static struct platform_driver pm8xxx_rtc_driver = { .probe = pm8xxx_rtc_probe, .remove_new = pm8xxx_remove, .driver = { .name = "rtc-pm8xxx", .of_match_table = pm8xxx_id_table, }, }; module_platform_driver(pm8xxx_rtc_driver); MODULE_ALIAS("platform:rtc-pm8xxx"); MODULE_DESCRIPTION("PMIC8xxx RTC driver"); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Anirudh Ghayal <[email protected]>"); MODULE_AUTHOR("Johan Hovold <[email protected]>");
linux-master
drivers/rtc/rtc-pm8xxx.c
// SPDX-License-Identifier: GPL-2.0 /* * Nintendo GameCube, Wii and Wii U RTC driver * * This driver is for the MX23L4005, more specifically its real-time clock and * SRAM storage. The value returned by the RTC counter must be added with the * offset stored in a bias register in SRAM (on the GameCube and Wii) or in * /config/rtc.xml (on the Wii U). The latter being very impractical to access * from Linux, this driver assumes the bootloader has read it and stored it in * SRAM like for the other two consoles. * * This device sits on a bus named EXI (which is similar to SPI), channel 0, * device 1. This driver assumes no other user of the EXI bus, which is * currently the case but would have to be reworked to add support for other * GameCube hardware exposed on this bus. * * References: * - https://wiiubrew.org/wiki/Hardware/RTC * - https://wiibrew.org/wiki/MX23L4005 * * Copyright (C) 2018 rw-r-r-0644 * Copyright (C) 2021 Emmanuel Gil Peyrot <[email protected]> * * Based on rtc-gcn.c * Copyright (C) 2004-2009 The GameCube Linux Team * Copyright (C) 2005,2008,2009 Albert Herranz * Based on gamecube_time.c from Torben Nielsen. */ #include <linux/init.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/platform_device.h> #include <linux/regmap.h> #include <linux/rtc.h> #include <linux/time.h> /* EXI registers */ #define EXICSR 0 #define EXICR 12 #define EXIDATA 16 /* EXI register values */ #define EXICSR_DEV 0x380 #define EXICSR_DEV1 0x100 #define EXICSR_CLK 0x070 #define EXICSR_CLK_1MHZ 0x000 #define EXICSR_CLK_2MHZ 0x010 #define EXICSR_CLK_4MHZ 0x020 #define EXICSR_CLK_8MHZ 0x030 #define EXICSR_CLK_16MHZ 0x040 #define EXICSR_CLK_32MHZ 0x050 #define EXICSR_INT 0x008 #define EXICSR_INTSET 0x008 #define EXICR_TSTART 0x001 #define EXICR_TRSMODE 0x002 #define EXICR_TRSMODE_IMM 0x000 #define EXICR_TRSTYPE 0x00C #define EXICR_TRSTYPE_R 0x000 #define EXICR_TRSTYPE_W 0x004 #define EXICR_TLEN 0x030 #define EXICR_TLEN32 0x030 /* EXI registers values to access the RTC */ #define RTC_EXICSR (EXICSR_DEV1 | EXICSR_CLK_8MHZ | EXICSR_INTSET) #define RTC_EXICR_W (EXICR_TSTART | EXICR_TRSMODE_IMM | EXICR_TRSTYPE_W | EXICR_TLEN32) #define RTC_EXICR_R (EXICR_TSTART | EXICR_TRSMODE_IMM | EXICR_TRSTYPE_R | EXICR_TLEN32) #define RTC_EXIDATA_W 0x80000000 /* RTC registers */ #define RTC_COUNTER 0x200000 #define RTC_SRAM 0x200001 #define RTC_SRAM_BIAS 0x200004 #define RTC_SNAPSHOT 0x204000 #define RTC_ONTMR 0x210000 #define RTC_OFFTMR 0x210001 #define RTC_TEST0 0x210004 #define RTC_TEST1 0x210005 #define RTC_TEST2 0x210006 #define RTC_TEST3 0x210007 #define RTC_CONTROL0 0x21000c #define RTC_CONTROL1 0x21000d /* RTC flags */ #define RTC_CONTROL0_UNSTABLE_POWER 0x00000800 #define RTC_CONTROL0_LOW_BATTERY 0x00000200 struct priv { struct regmap *regmap; void __iomem *iob; u32 rtc_bias; }; static int exi_read(void *context, u32 reg, u32 *data) { struct priv *d = (struct priv *)context; void __iomem *iob = d->iob; /* The spin loops here loop about 15~16 times each, so there is no need * to use a more expensive sleep method. */ /* Write register offset */ iowrite32be(RTC_EXICSR, iob + EXICSR); iowrite32be(reg << 8, iob + EXIDATA); iowrite32be(RTC_EXICR_W, iob + EXICR); while (!(ioread32be(iob + EXICSR) & EXICSR_INTSET)) cpu_relax(); /* Read data */ iowrite32be(RTC_EXICSR, iob + EXICSR); iowrite32be(RTC_EXICR_R, iob + EXICR); while (!(ioread32be(iob + EXICSR) & EXICSR_INTSET)) cpu_relax(); *data = ioread32be(iob + EXIDATA); /* Clear channel parameters */ iowrite32be(0, iob + EXICSR); return 0; } static int exi_write(void *context, u32 reg, u32 data) { struct priv *d = (struct priv *)context; void __iomem *iob = d->iob; /* The spin loops here loop about 15~16 times each, so there is no need * to use a more expensive sleep method. */ /* Write register offset */ iowrite32be(RTC_EXICSR, iob + EXICSR); iowrite32be(RTC_EXIDATA_W | (reg << 8), iob + EXIDATA); iowrite32be(RTC_EXICR_W, iob + EXICR); while (!(ioread32be(iob + EXICSR) & EXICSR_INTSET)) cpu_relax(); /* Write data */ iowrite32be(RTC_EXICSR, iob + EXICSR); iowrite32be(data, iob + EXIDATA); iowrite32be(RTC_EXICR_W, iob + EXICR); while (!(ioread32be(iob + EXICSR) & EXICSR_INTSET)) cpu_relax(); /* Clear channel parameters */ iowrite32be(0, iob + EXICSR); return 0; } static const struct regmap_bus exi_bus = { /* TODO: is that true? Not that it matters here, but still. */ .fast_io = true, .reg_read = exi_read, .reg_write = exi_write, }; static int gamecube_rtc_read_time(struct device *dev, struct rtc_time *t) { struct priv *d = dev_get_drvdata(dev); int ret; u32 counter; time64_t timestamp; ret = regmap_read(d->regmap, RTC_COUNTER, &counter); if (ret) return ret; /* Add the counter and the bias to obtain the timestamp */ timestamp = (time64_t)d->rtc_bias + counter; rtc_time64_to_tm(timestamp, t); return 0; } static int gamecube_rtc_set_time(struct device *dev, struct rtc_time *t) { struct priv *d = dev_get_drvdata(dev); time64_t timestamp; /* Subtract the timestamp and the bias to obtain the counter value */ timestamp = rtc_tm_to_time64(t); return regmap_write(d->regmap, RTC_COUNTER, timestamp - d->rtc_bias); } static int gamecube_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) { struct priv *d = dev_get_drvdata(dev); int value; int control0; int ret; switch (cmd) { case RTC_VL_READ: ret = regmap_read(d->regmap, RTC_CONTROL0, &control0); if (ret) return ret; value = 0; if (control0 & RTC_CONTROL0_UNSTABLE_POWER) value |= RTC_VL_DATA_INVALID; if (control0 & RTC_CONTROL0_LOW_BATTERY) value |= RTC_VL_BACKUP_LOW; return put_user(value, (unsigned int __user *)arg); default: return -ENOIOCTLCMD; } } static const struct rtc_class_ops gamecube_rtc_ops = { .read_time = gamecube_rtc_read_time, .set_time = gamecube_rtc_set_time, .ioctl = gamecube_rtc_ioctl, }; static int gamecube_rtc_read_offset_from_sram(struct priv *d) { struct device_node *np; int ret; struct resource res; void __iomem *hw_srnprot; u32 old; np = of_find_compatible_node(NULL, NULL, "nintendo,latte-srnprot"); if (!np) np = of_find_compatible_node(NULL, NULL, "nintendo,hollywood-srnprot"); if (!np) { pr_info("HW_SRNPROT not found, assuming a GameCube\n"); return regmap_read(d->regmap, RTC_SRAM_BIAS, &d->rtc_bias); } ret = of_address_to_resource(np, 0, &res); of_node_put(np); if (ret) { pr_err("no io memory range found\n"); return -1; } hw_srnprot = ioremap(res.start, resource_size(&res)); old = ioread32be(hw_srnprot); /* TODO: figure out why we use this magic constant. I obtained it by * reading the leftover value after boot, after IOSU already ran. * * On my Wii U, setting this register to 1 prevents the console from * rebooting properly, so wiiubrew.org must be missing something. * * See https://wiiubrew.org/wiki/Hardware/Latte_registers */ if (old != 0x7bf) iowrite32be(0x7bf, hw_srnprot); /* Get the offset from RTC SRAM. * * Its default location on the GameCube and on the Wii is in the SRAM, * while on the Wii U the bootloader needs to fill it with the contents * of /config/rtc.xml on the SLC (the eMMC). We don’t do that from * Linux since it requires implementing a proprietary filesystem and do * file decryption, instead we require the bootloader to fill the same * SRAM address as on previous consoles. */ ret = regmap_read(d->regmap, RTC_SRAM_BIAS, &d->rtc_bias); /* Reset SRAM access to how it was before, our job here is done. */ if (old != 0x7bf) iowrite32be(old, hw_srnprot); iounmap(hw_srnprot); if (ret) pr_err("failed to get the RTC bias\n"); return ret; } static const struct regmap_range rtc_rd_ranges[] = { regmap_reg_range(0x200000, 0x200010), regmap_reg_range(0x204000, 0x204000), regmap_reg_range(0x210000, 0x210001), regmap_reg_range(0x210004, 0x210007), regmap_reg_range(0x21000c, 0x21000d), }; static const struct regmap_access_table rtc_rd_regs = { .yes_ranges = rtc_rd_ranges, .n_yes_ranges = ARRAY_SIZE(rtc_rd_ranges), }; static const struct regmap_range rtc_wr_ranges[] = { regmap_reg_range(0x200000, 0x200010), regmap_reg_range(0x204000, 0x204000), regmap_reg_range(0x210000, 0x210001), regmap_reg_range(0x21000d, 0x21000d), }; static const struct regmap_access_table rtc_wr_regs = { .yes_ranges = rtc_wr_ranges, .n_yes_ranges = ARRAY_SIZE(rtc_wr_ranges), }; static const struct regmap_config gamecube_rtc_regmap_config = { .reg_bits = 24, .val_bits = 32, .rd_table = &rtc_rd_regs, .wr_table = &rtc_wr_regs, .max_register = 0x21000d, .name = "gamecube-rtc", }; static int gamecube_rtc_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct rtc_device *rtc; struct priv *d; int ret; d = devm_kzalloc(dev, sizeof(struct priv), GFP_KERNEL); if (!d) return -ENOMEM; d->iob = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(d->iob)) return PTR_ERR(d->iob); d->regmap = devm_regmap_init(dev, &exi_bus, d, &gamecube_rtc_regmap_config); if (IS_ERR(d->regmap)) return PTR_ERR(d->regmap); ret = gamecube_rtc_read_offset_from_sram(d); if (ret) return ret; dev_dbg(dev, "SRAM bias: 0x%x", d->rtc_bias); dev_set_drvdata(dev, d); rtc = devm_rtc_allocate_device(dev); if (IS_ERR(rtc)) return PTR_ERR(rtc); /* We can represent further than that, but it depends on the stored * bias and we can’t modify it persistently on all supported consoles, * so here we pretend to be limited to 2106. */ rtc->range_min = 0; rtc->range_max = U32_MAX; rtc->ops = &gamecube_rtc_ops; devm_rtc_register_device(rtc); return 0; } static const struct of_device_id gamecube_rtc_of_match[] = { {.compatible = "nintendo,latte-exi" }, {.compatible = "nintendo,hollywood-exi" }, {.compatible = "nintendo,flipper-exi" }, { } }; MODULE_DEVICE_TABLE(of, gamecube_rtc_of_match); static struct platform_driver gamecube_rtc_driver = { .probe = gamecube_rtc_probe, .driver = { .name = "rtc-gamecube", .of_match_table = gamecube_rtc_of_match, }, }; module_platform_driver(gamecube_rtc_driver); MODULE_AUTHOR("Emmanuel Gil Peyrot <[email protected]>"); MODULE_DESCRIPTION("Nintendo GameCube, Wii and Wii U RTC driver"); MODULE_LICENSE("GPL");
linux-master
drivers/rtc/rtc-gamecube.c
// SPDX-License-Identifier: GPL-2.0+ // Copyright 2015 IBM Corp. #include <linux/module.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/rtc.h> #include <linux/io.h> struct aspeed_rtc { struct rtc_device *rtc_dev; void __iomem *base; }; #define RTC_TIME 0x00 #define RTC_YEAR 0x04 #define RTC_CTRL 0x10 #define RTC_UNLOCK BIT(1) #define RTC_ENABLE BIT(0) static int aspeed_rtc_read_time(struct device *dev, struct rtc_time *tm) { struct aspeed_rtc *rtc = dev_get_drvdata(dev); unsigned int cent, year; u32 reg1, reg2; if (!(readl(rtc->base + RTC_CTRL) & RTC_ENABLE)) { dev_dbg(dev, "%s failing as rtc disabled\n", __func__); return -EINVAL; } do { reg2 = readl(rtc->base + RTC_YEAR); reg1 = readl(rtc->base + RTC_TIME); } while (reg2 != readl(rtc->base + RTC_YEAR)); tm->tm_mday = (reg1 >> 24) & 0x1f; tm->tm_hour = (reg1 >> 16) & 0x1f; tm->tm_min = (reg1 >> 8) & 0x3f; tm->tm_sec = (reg1 >> 0) & 0x3f; cent = (reg2 >> 16) & 0x1f; year = (reg2 >> 8) & 0x7f; tm->tm_mon = ((reg2 >> 0) & 0x0f) - 1; tm->tm_year = year + (cent * 100) - 1900; dev_dbg(dev, "%s %ptR", __func__, tm); return 0; } static int aspeed_rtc_set_time(struct device *dev, struct rtc_time *tm) { struct aspeed_rtc *rtc = dev_get_drvdata(dev); u32 reg1, reg2, ctrl; int year, cent; cent = (tm->tm_year + 1900) / 100; year = tm->tm_year % 100; reg1 = (tm->tm_mday << 24) | (tm->tm_hour << 16) | (tm->tm_min << 8) | tm->tm_sec; reg2 = ((cent & 0x1f) << 16) | ((year & 0x7f) << 8) | ((tm->tm_mon + 1) & 0xf); ctrl = readl(rtc->base + RTC_CTRL); writel(ctrl | RTC_UNLOCK, rtc->base + RTC_CTRL); writel(reg1, rtc->base + RTC_TIME); writel(reg2, rtc->base + RTC_YEAR); /* Re-lock and ensure enable is set now that a time is programmed */ writel(ctrl | RTC_ENABLE, rtc->base + RTC_CTRL); return 0; } static const struct rtc_class_ops aspeed_rtc_ops = { .read_time = aspeed_rtc_read_time, .set_time = aspeed_rtc_set_time, }; static int aspeed_rtc_probe(struct platform_device *pdev) { struct aspeed_rtc *rtc; rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL); if (!rtc) return -ENOMEM; rtc->base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(rtc->base)) return PTR_ERR(rtc->base); rtc->rtc_dev = devm_rtc_allocate_device(&pdev->dev); if (IS_ERR(rtc->rtc_dev)) return PTR_ERR(rtc->rtc_dev); platform_set_drvdata(pdev, rtc); rtc->rtc_dev->ops = &aspeed_rtc_ops; rtc->rtc_dev->range_min = RTC_TIMESTAMP_BEGIN_1900; rtc->rtc_dev->range_max = 38814989399LL; /* 3199-12-31 23:59:59 */ return devm_rtc_register_device(rtc->rtc_dev); } static const struct of_device_id aspeed_rtc_match[] = { { .compatible = "aspeed,ast2400-rtc", }, { .compatible = "aspeed,ast2500-rtc", }, { .compatible = "aspeed,ast2600-rtc", }, {} }; MODULE_DEVICE_TABLE(of, aspeed_rtc_match); static struct platform_driver aspeed_rtc_driver = { .driver = { .name = "aspeed-rtc", .of_match_table = aspeed_rtc_match, }, }; module_platform_driver_probe(aspeed_rtc_driver, aspeed_rtc_probe); MODULE_DESCRIPTION("ASPEED RTC driver"); MODULE_AUTHOR("Joel Stanley <[email protected]>"); MODULE_LICENSE("GPL");
linux-master
drivers/rtc/rtc-aspeed.c
// SPDX-License-Identifier: GPL-2.0-only /* * rtc class driver for the Maxim MAX6900 chip * * Copyright (c) 2007 MontaVista, Software, Inc. * * Author: Dale Farnsworth <[email protected]> * * based on previously existing rtc class drivers */ #include <linux/module.h> #include <linux/i2c.h> #include <linux/bcd.h> #include <linux/rtc.h> #include <linux/delay.h> /* * register indices */ #define MAX6900_REG_SC 0 /* seconds 00-59 */ #define MAX6900_REG_MN 1 /* minutes 00-59 */ #define MAX6900_REG_HR 2 /* hours 00-23 */ #define MAX6900_REG_DT 3 /* day of month 00-31 */ #define MAX6900_REG_MO 4 /* month 01-12 */ #define MAX6900_REG_DW 5 /* day of week 1-7 */ #define MAX6900_REG_YR 6 /* year 00-99 */ #define MAX6900_REG_CT 7 /* control */ /* register 8 is undocumented */ #define MAX6900_REG_CENTURY 9 /* century */ #define MAX6900_REG_LEN 10 #define MAX6900_BURST_LEN 8 /* can burst r/w first 8 regs */ #define MAX6900_REG_CT_WP (1 << 7) /* Write Protect */ /* * register read/write commands */ #define MAX6900_REG_CONTROL_WRITE 0x8e #define MAX6900_REG_CENTURY_WRITE 0x92 #define MAX6900_REG_CENTURY_READ 0x93 #define MAX6900_REG_RESERVED_READ 0x96 #define MAX6900_REG_BURST_WRITE 0xbe #define MAX6900_REG_BURST_READ 0xbf #define MAX6900_IDLE_TIME_AFTER_WRITE 3 /* specification says 2.5 mS */ static struct i2c_driver max6900_driver; static int max6900_i2c_read_regs(struct i2c_client *client, u8 *buf) { u8 reg_burst_read[1] = { MAX6900_REG_BURST_READ }; u8 reg_century_read[1] = { MAX6900_REG_CENTURY_READ }; struct i2c_msg msgs[4] = { { .addr = client->addr, .flags = 0, /* write */ .len = sizeof(reg_burst_read), .buf = reg_burst_read} , { .addr = client->addr, .flags = I2C_M_RD, .len = MAX6900_BURST_LEN, .buf = buf} , { .addr = client->addr, .flags = 0, /* write */ .len = sizeof(reg_century_read), .buf = reg_century_read} , { .addr = client->addr, .flags = I2C_M_RD, .len = sizeof(buf[MAX6900_REG_CENTURY]), .buf = &buf[MAX6900_REG_CENTURY] } }; int rc; rc = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); if (rc != ARRAY_SIZE(msgs)) { dev_err(&client->dev, "%s: register read failed\n", __func__); return -EIO; } return 0; } static int max6900_i2c_write_regs(struct i2c_client *client, u8 const *buf) { u8 i2c_century_buf[1 + 1] = { MAX6900_REG_CENTURY_WRITE }; struct i2c_msg century_msgs[1] = { { .addr = client->addr, .flags = 0, /* write */ .len = sizeof(i2c_century_buf), .buf = i2c_century_buf} }; u8 i2c_burst_buf[MAX6900_BURST_LEN + 1] = { MAX6900_REG_BURST_WRITE }; struct i2c_msg burst_msgs[1] = { { .addr = client->addr, .flags = 0, /* write */ .len = sizeof(i2c_burst_buf), .buf = i2c_burst_buf} }; int rc; /* * We have to make separate calls to i2c_transfer because of * the need to delay after each write to the chip. Also, * we write the century byte first, since we set the write-protect * bit as part of the burst write. */ i2c_century_buf[1] = buf[MAX6900_REG_CENTURY]; rc = i2c_transfer(client->adapter, century_msgs, ARRAY_SIZE(century_msgs)); if (rc != ARRAY_SIZE(century_msgs)) goto write_failed; msleep(MAX6900_IDLE_TIME_AFTER_WRITE); memcpy(&i2c_burst_buf[1], buf, MAX6900_BURST_LEN); rc = i2c_transfer(client->adapter, burst_msgs, ARRAY_SIZE(burst_msgs)); if (rc != ARRAY_SIZE(burst_msgs)) goto write_failed; msleep(MAX6900_IDLE_TIME_AFTER_WRITE); return 0; write_failed: dev_err(&client->dev, "%s: register write failed\n", __func__); return -EIO; } static int max6900_rtc_read_time(struct device *dev, struct rtc_time *tm) { struct i2c_client *client = to_i2c_client(dev); int rc; u8 regs[MAX6900_REG_LEN]; rc = max6900_i2c_read_regs(client, regs); if (rc < 0) return rc; tm->tm_sec = bcd2bin(regs[MAX6900_REG_SC]); tm->tm_min = bcd2bin(regs[MAX6900_REG_MN]); tm->tm_hour = bcd2bin(regs[MAX6900_REG_HR] & 0x3f); tm->tm_mday = bcd2bin(regs[MAX6900_REG_DT]); tm->tm_mon = bcd2bin(regs[MAX6900_REG_MO]) - 1; tm->tm_year = bcd2bin(regs[MAX6900_REG_YR]) + bcd2bin(regs[MAX6900_REG_CENTURY]) * 100 - 1900; tm->tm_wday = bcd2bin(regs[MAX6900_REG_DW]); return 0; } static int max6900_i2c_clear_write_protect(struct i2c_client *client) { return i2c_smbus_write_byte_data(client, MAX6900_REG_CONTROL_WRITE, 0); } static int max6900_rtc_set_time(struct device *dev, struct rtc_time *tm) { struct i2c_client *client = to_i2c_client(dev); u8 regs[MAX6900_REG_LEN]; int rc; rc = max6900_i2c_clear_write_protect(client); if (rc < 0) return rc; regs[MAX6900_REG_SC] = bin2bcd(tm->tm_sec); regs[MAX6900_REG_MN] = bin2bcd(tm->tm_min); regs[MAX6900_REG_HR] = bin2bcd(tm->tm_hour); regs[MAX6900_REG_DT] = bin2bcd(tm->tm_mday); regs[MAX6900_REG_MO] = bin2bcd(tm->tm_mon + 1); regs[MAX6900_REG_DW] = bin2bcd(tm->tm_wday); regs[MAX6900_REG_YR] = bin2bcd(tm->tm_year % 100); regs[MAX6900_REG_CENTURY] = bin2bcd((tm->tm_year + 1900) / 100); /* set write protect */ regs[MAX6900_REG_CT] = MAX6900_REG_CT_WP; rc = max6900_i2c_write_regs(client, regs); if (rc < 0) return rc; return 0; } static const struct rtc_class_ops max6900_rtc_ops = { .read_time = max6900_rtc_read_time, .set_time = max6900_rtc_set_time, }; static int max6900_probe(struct i2c_client *client) { struct rtc_device *rtc; if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) return -ENODEV; rtc = devm_rtc_device_register(&client->dev, max6900_driver.driver.name, &max6900_rtc_ops, THIS_MODULE); if (IS_ERR(rtc)) return PTR_ERR(rtc); i2c_set_clientdata(client, rtc); return 0; } static const struct i2c_device_id max6900_id[] = { { "max6900", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, max6900_id); static struct i2c_driver max6900_driver = { .driver = { .name = "rtc-max6900", }, .probe = max6900_probe, .id_table = max6900_id, }; module_i2c_driver(max6900_driver); MODULE_DESCRIPTION("Maxim MAX6900 RTC driver"); MODULE_AUTHOR("Dale Farnsworth <[email protected]>"); MODULE_LICENSE("GPL");
linux-master
drivers/rtc/rtc-max6900.c
// SPDX-License-Identifier: GPL-2.0 /* * RTC driver for the Micro Crystal RV3032 * * Copyright (C) 2020 Micro Crystal SA * * Alexandre Belloni <[email protected]> * */ #include <linux/clk.h> #include <linux/clk-provider.h> #include <linux/bcd.h> #include <linux/bitfield.h> #include <linux/bitops.h> #include <linux/hwmon.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/log2.h> #include <linux/module.h> #include <linux/of.h> #include <linux/regmap.h> #include <linux/rtc.h> #define RV3032_SEC 0x01 #define RV3032_MIN 0x02 #define RV3032_HOUR 0x03 #define RV3032_WDAY 0x04 #define RV3032_DAY 0x05 #define RV3032_MONTH 0x06 #define RV3032_YEAR 0x07 #define RV3032_ALARM_MIN 0x08 #define RV3032_ALARM_HOUR 0x09 #define RV3032_ALARM_DAY 0x0A #define RV3032_STATUS 0x0D #define RV3032_TLSB 0x0E #define RV3032_TMSB 0x0F #define RV3032_CTRL1 0x10 #define RV3032_CTRL2 0x11 #define RV3032_CTRL3 0x12 #define RV3032_TS_CTRL 0x13 #define RV3032_CLK_IRQ 0x14 #define RV3032_EEPROM_ADDR 0x3D #define RV3032_EEPROM_DATA 0x3E #define RV3032_EEPROM_CMD 0x3F #define RV3032_RAM1 0x40 #define RV3032_PMU 0xC0 #define RV3032_OFFSET 0xC1 #define RV3032_CLKOUT1 0xC2 #define RV3032_CLKOUT2 0xC3 #define RV3032_TREF0 0xC4 #define RV3032_TREF1 0xC5 #define RV3032_STATUS_VLF BIT(0) #define RV3032_STATUS_PORF BIT(1) #define RV3032_STATUS_EVF BIT(2) #define RV3032_STATUS_AF BIT(3) #define RV3032_STATUS_TF BIT(4) #define RV3032_STATUS_UF BIT(5) #define RV3032_STATUS_TLF BIT(6) #define RV3032_STATUS_THF BIT(7) #define RV3032_TLSB_CLKF BIT(1) #define RV3032_TLSB_EEBUSY BIT(2) #define RV3032_TLSB_TEMP GENMASK(7, 4) #define RV3032_CLKOUT2_HFD_MSK GENMASK(4, 0) #define RV3032_CLKOUT2_FD_MSK GENMASK(6, 5) #define RV3032_CLKOUT2_OS BIT(7) #define RV3032_CTRL1_EERD BIT(3) #define RV3032_CTRL1_WADA BIT(5) #define RV3032_CTRL2_STOP BIT(0) #define RV3032_CTRL2_EIE BIT(2) #define RV3032_CTRL2_AIE BIT(3) #define RV3032_CTRL2_TIE BIT(4) #define RV3032_CTRL2_UIE BIT(5) #define RV3032_CTRL2_CLKIE BIT(6) #define RV3032_CTRL2_TSE BIT(7) #define RV3032_PMU_TCM GENMASK(1, 0) #define RV3032_PMU_TCR GENMASK(3, 2) #define RV3032_PMU_BSM GENMASK(5, 4) #define RV3032_PMU_NCLKE BIT(6) #define RV3032_PMU_BSM_DSM 1 #define RV3032_PMU_BSM_LSM 2 #define RV3032_OFFSET_MSK GENMASK(5, 0) #define RV3032_EVT_CTRL_TSR BIT(2) #define RV3032_EEPROM_CMD_UPDATE 0x11 #define RV3032_EEPROM_CMD_WRITE 0x21 #define RV3032_EEPROM_CMD_READ 0x22 #define RV3032_EEPROM_USER 0xCB #define RV3032_EEBUSY_POLL 10000 #define RV3032_EEBUSY_TIMEOUT 100000 #define OFFSET_STEP_PPT 238419 struct rv3032_data { struct regmap *regmap; struct rtc_device *rtc; bool trickle_charger_set; #ifdef CONFIG_COMMON_CLK struct clk_hw clkout_hw; #endif }; static u16 rv3032_trickle_resistors[] = {1000, 2000, 7000, 11000}; static u16 rv3032_trickle_voltages[] = {0, 1750, 3000, 4400}; static int rv3032_exit_eerd(struct rv3032_data *rv3032, u32 eerd) { if (eerd) return 0; return regmap_update_bits(rv3032->regmap, RV3032_CTRL1, RV3032_CTRL1_EERD, 0); } static int rv3032_enter_eerd(struct rv3032_data *rv3032, u32 *eerd) { u32 ctrl1, status; int ret; ret = regmap_read(rv3032->regmap, RV3032_CTRL1, &ctrl1); if (ret) return ret; *eerd = ctrl1 & RV3032_CTRL1_EERD; if (*eerd) return 0; ret = regmap_update_bits(rv3032->regmap, RV3032_CTRL1, RV3032_CTRL1_EERD, RV3032_CTRL1_EERD); if (ret) return ret; ret = regmap_read_poll_timeout(rv3032->regmap, RV3032_TLSB, status, !(status & RV3032_TLSB_EEBUSY), RV3032_EEBUSY_POLL, RV3032_EEBUSY_TIMEOUT); if (ret) { rv3032_exit_eerd(rv3032, *eerd); return ret; } return 0; } static int rv3032_update_cfg(struct rv3032_data *rv3032, unsigned int reg, unsigned int mask, unsigned int val) { u32 status, eerd; int ret; ret = rv3032_enter_eerd(rv3032, &eerd); if (ret) return ret; ret = regmap_update_bits(rv3032->regmap, reg, mask, val); if (ret) goto exit_eerd; ret = regmap_write(rv3032->regmap, RV3032_EEPROM_CMD, RV3032_EEPROM_CMD_UPDATE); if (ret) goto exit_eerd; usleep_range(46000, RV3032_EEBUSY_TIMEOUT); ret = regmap_read_poll_timeout(rv3032->regmap, RV3032_TLSB, status, !(status & RV3032_TLSB_EEBUSY), RV3032_EEBUSY_POLL, RV3032_EEBUSY_TIMEOUT); exit_eerd: rv3032_exit_eerd(rv3032, eerd); return ret; } static irqreturn_t rv3032_handle_irq(int irq, void *dev_id) { struct rv3032_data *rv3032 = dev_id; unsigned long events = 0; u32 status = 0, ctrl = 0; if (regmap_read(rv3032->regmap, RV3032_STATUS, &status) < 0 || status == 0) { return IRQ_NONE; } if (status & RV3032_STATUS_TF) { status |= RV3032_STATUS_TF; ctrl |= RV3032_CTRL2_TIE; events |= RTC_PF; } if (status & RV3032_STATUS_AF) { status |= RV3032_STATUS_AF; ctrl |= RV3032_CTRL2_AIE; events |= RTC_AF; } if (status & RV3032_STATUS_UF) { status |= RV3032_STATUS_UF; ctrl |= RV3032_CTRL2_UIE; events |= RTC_UF; } if (events) { rtc_update_irq(rv3032->rtc, 1, events); regmap_update_bits(rv3032->regmap, RV3032_STATUS, status, 0); regmap_update_bits(rv3032->regmap, RV3032_CTRL2, ctrl, 0); } return IRQ_HANDLED; } static int rv3032_get_time(struct device *dev, struct rtc_time *tm) { struct rv3032_data *rv3032 = dev_get_drvdata(dev); u8 date[7]; int ret, status; ret = regmap_read(rv3032->regmap, RV3032_STATUS, &status); if (ret < 0) return ret; if (status & (RV3032_STATUS_PORF | RV3032_STATUS_VLF)) return -EINVAL; ret = regmap_bulk_read(rv3032->regmap, RV3032_SEC, date, sizeof(date)); if (ret) return ret; tm->tm_sec = bcd2bin(date[0] & 0x7f); tm->tm_min = bcd2bin(date[1] & 0x7f); tm->tm_hour = bcd2bin(date[2] & 0x3f); tm->tm_wday = date[3] & 0x7; tm->tm_mday = bcd2bin(date[4] & 0x3f); tm->tm_mon = bcd2bin(date[5] & 0x1f) - 1; tm->tm_year = bcd2bin(date[6]) + 100; return 0; } static int rv3032_set_time(struct device *dev, struct rtc_time *tm) { struct rv3032_data *rv3032 = dev_get_drvdata(dev); u8 date[7]; int ret; date[0] = bin2bcd(tm->tm_sec); date[1] = bin2bcd(tm->tm_min); date[2] = bin2bcd(tm->tm_hour); date[3] = tm->tm_wday; date[4] = bin2bcd(tm->tm_mday); date[5] = bin2bcd(tm->tm_mon + 1); date[6] = bin2bcd(tm->tm_year - 100); ret = regmap_bulk_write(rv3032->regmap, RV3032_SEC, date, sizeof(date)); if (ret) return ret; ret = regmap_update_bits(rv3032->regmap, RV3032_STATUS, RV3032_STATUS_PORF | RV3032_STATUS_VLF, 0); return ret; } static int rv3032_get_alarm(struct device *dev, struct rtc_wkalrm *alrm) { struct rv3032_data *rv3032 = dev_get_drvdata(dev); u8 alarmvals[3]; int status, ctrl, ret; ret = regmap_bulk_read(rv3032->regmap, RV3032_ALARM_MIN, alarmvals, sizeof(alarmvals)); if (ret) return ret; ret = regmap_read(rv3032->regmap, RV3032_STATUS, &status); if (ret < 0) return ret; ret = regmap_read(rv3032->regmap, RV3032_CTRL2, &ctrl); if (ret < 0) return ret; alrm->time.tm_sec = 0; alrm->time.tm_min = bcd2bin(alarmvals[0] & 0x7f); alrm->time.tm_hour = bcd2bin(alarmvals[1] & 0x3f); alrm->time.tm_mday = bcd2bin(alarmvals[2] & 0x3f); alrm->enabled = !!(ctrl & RV3032_CTRL2_AIE); alrm->pending = (status & RV3032_STATUS_AF) && alrm->enabled; return 0; } static int rv3032_set_alarm(struct device *dev, struct rtc_wkalrm *alrm) { struct rv3032_data *rv3032 = dev_get_drvdata(dev); u8 alarmvals[3]; u8 ctrl = 0; int ret; ret = regmap_update_bits(rv3032->regmap, RV3032_CTRL2, RV3032_CTRL2_AIE | RV3032_CTRL2_UIE, 0); if (ret) return ret; alarmvals[0] = bin2bcd(alrm->time.tm_min); alarmvals[1] = bin2bcd(alrm->time.tm_hour); alarmvals[2] = bin2bcd(alrm->time.tm_mday); ret = regmap_update_bits(rv3032->regmap, RV3032_STATUS, RV3032_STATUS_AF, 0); if (ret) return ret; ret = regmap_bulk_write(rv3032->regmap, RV3032_ALARM_MIN, alarmvals, sizeof(alarmvals)); if (ret) return ret; if (alrm->enabled) { if (rv3032->rtc->uie_rtctimer.enabled) ctrl |= RV3032_CTRL2_UIE; if (rv3032->rtc->aie_timer.enabled) ctrl |= RV3032_CTRL2_AIE; } ret = regmap_update_bits(rv3032->regmap, RV3032_CTRL2, RV3032_CTRL2_UIE | RV3032_CTRL2_AIE, ctrl); return ret; } static int rv3032_alarm_irq_enable(struct device *dev, unsigned int enabled) { struct rv3032_data *rv3032 = dev_get_drvdata(dev); int ctrl = 0, ret; if (enabled) { if (rv3032->rtc->uie_rtctimer.enabled) ctrl |= RV3032_CTRL2_UIE; if (rv3032->rtc->aie_timer.enabled) ctrl |= RV3032_CTRL2_AIE; } ret = regmap_update_bits(rv3032->regmap, RV3032_STATUS, RV3032_STATUS_AF | RV3032_STATUS_UF, 0); if (ret) return ret; ret = regmap_update_bits(rv3032->regmap, RV3032_CTRL2, RV3032_CTRL2_UIE | RV3032_CTRL2_AIE, ctrl); if (ret) return ret; return 0; } static int rv3032_read_offset(struct device *dev, long *offset) { struct rv3032_data *rv3032 = dev_get_drvdata(dev); int ret, value, steps; ret = regmap_read(rv3032->regmap, RV3032_OFFSET, &value); if (ret < 0) return ret; steps = sign_extend32(FIELD_GET(RV3032_OFFSET_MSK, value), 5); *offset = DIV_ROUND_CLOSEST(steps * OFFSET_STEP_PPT, 1000); return 0; } static int rv3032_set_offset(struct device *dev, long offset) { struct rv3032_data *rv3032 = dev_get_drvdata(dev); offset = clamp(offset, -7629L, 7391L) * 1000; offset = DIV_ROUND_CLOSEST(offset, OFFSET_STEP_PPT); return rv3032_update_cfg(rv3032, RV3032_OFFSET, RV3032_OFFSET_MSK, FIELD_PREP(RV3032_OFFSET_MSK, offset)); } static int rv3032_param_get(struct device *dev, struct rtc_param *param) { struct rv3032_data *rv3032 = dev_get_drvdata(dev); int ret; switch(param->param) { u32 value; case RTC_PARAM_BACKUP_SWITCH_MODE: ret = regmap_read(rv3032->regmap, RV3032_PMU, &value); if (ret < 0) return ret; value = FIELD_GET(RV3032_PMU_BSM, value); switch(value) { case RV3032_PMU_BSM_DSM: param->uvalue = RTC_BSM_DIRECT; break; case RV3032_PMU_BSM_LSM: param->uvalue = RTC_BSM_LEVEL; break; default: param->uvalue = RTC_BSM_DISABLED; } break; default: return -EINVAL; } return 0; } static int rv3032_param_set(struct device *dev, struct rtc_param *param) { struct rv3032_data *rv3032 = dev_get_drvdata(dev); switch(param->param) { u8 mode; case RTC_PARAM_BACKUP_SWITCH_MODE: if (rv3032->trickle_charger_set) return -EINVAL; switch (param->uvalue) { case RTC_BSM_DISABLED: mode = 0; break; case RTC_BSM_DIRECT: mode = RV3032_PMU_BSM_DSM; break; case RTC_BSM_LEVEL: mode = RV3032_PMU_BSM_LSM; break; default: return -EINVAL; } return rv3032_update_cfg(rv3032, RV3032_PMU, RV3032_PMU_BSM, FIELD_PREP(RV3032_PMU_BSM, mode)); default: return -EINVAL; } return 0; } static int rv3032_ioctl(struct device *dev, unsigned int cmd, unsigned long arg) { struct rv3032_data *rv3032 = dev_get_drvdata(dev); int status, val = 0, ret = 0; switch (cmd) { case RTC_VL_READ: ret = regmap_read(rv3032->regmap, RV3032_STATUS, &status); if (ret < 0) return ret; if (status & (RV3032_STATUS_PORF | RV3032_STATUS_VLF)) val = RTC_VL_DATA_INVALID; return put_user(val, (unsigned int __user *)arg); default: return -ENOIOCTLCMD; } } static int rv3032_nvram_write(void *priv, unsigned int offset, void *val, size_t bytes) { return regmap_bulk_write(priv, RV3032_RAM1 + offset, val, bytes); } static int rv3032_nvram_read(void *priv, unsigned int offset, void *val, size_t bytes) { return regmap_bulk_read(priv, RV3032_RAM1 + offset, val, bytes); } static int rv3032_eeprom_write(void *priv, unsigned int offset, void *val, size_t bytes) { struct rv3032_data *rv3032 = priv; u32 status, eerd; int i, ret; u8 *buf = val; ret = rv3032_enter_eerd(rv3032, &eerd); if (ret) return ret; for (i = 0; i < bytes; i++) { ret = regmap_write(rv3032->regmap, RV3032_EEPROM_ADDR, RV3032_EEPROM_USER + offset + i); if (ret) goto exit_eerd; ret = regmap_write(rv3032->regmap, RV3032_EEPROM_DATA, buf[i]); if (ret) goto exit_eerd; ret = regmap_write(rv3032->regmap, RV3032_EEPROM_CMD, RV3032_EEPROM_CMD_WRITE); if (ret) goto exit_eerd; usleep_range(RV3032_EEBUSY_POLL, RV3032_EEBUSY_TIMEOUT); ret = regmap_read_poll_timeout(rv3032->regmap, RV3032_TLSB, status, !(status & RV3032_TLSB_EEBUSY), RV3032_EEBUSY_POLL, RV3032_EEBUSY_TIMEOUT); if (ret) goto exit_eerd; } exit_eerd: rv3032_exit_eerd(rv3032, eerd); return ret; } static int rv3032_eeprom_read(void *priv, unsigned int offset, void *val, size_t bytes) { struct rv3032_data *rv3032 = priv; u32 status, eerd, data; int i, ret; u8 *buf = val; ret = rv3032_enter_eerd(rv3032, &eerd); if (ret) return ret; for (i = 0; i < bytes; i++) { ret = regmap_write(rv3032->regmap, RV3032_EEPROM_ADDR, RV3032_EEPROM_USER + offset + i); if (ret) goto exit_eerd; ret = regmap_write(rv3032->regmap, RV3032_EEPROM_CMD, RV3032_EEPROM_CMD_READ); if (ret) goto exit_eerd; ret = regmap_read_poll_timeout(rv3032->regmap, RV3032_TLSB, status, !(status & RV3032_TLSB_EEBUSY), RV3032_EEBUSY_POLL, RV3032_EEBUSY_TIMEOUT); if (ret) goto exit_eerd; ret = regmap_read(rv3032->regmap, RV3032_EEPROM_DATA, &data); if (ret) goto exit_eerd; buf[i] = data; } exit_eerd: rv3032_exit_eerd(rv3032, eerd); return ret; } static int rv3032_trickle_charger_setup(struct device *dev, struct rv3032_data *rv3032) { u32 val, ohms, voltage; int i; val = FIELD_PREP(RV3032_PMU_TCM, 1) | FIELD_PREP(RV3032_PMU_BSM, RV3032_PMU_BSM_DSM); if (!device_property_read_u32(dev, "trickle-voltage-millivolt", &voltage)) { for (i = 0; i < ARRAY_SIZE(rv3032_trickle_voltages); i++) if (voltage == rv3032_trickle_voltages[i]) break; if (i < ARRAY_SIZE(rv3032_trickle_voltages)) val = FIELD_PREP(RV3032_PMU_TCM, i) | FIELD_PREP(RV3032_PMU_BSM, RV3032_PMU_BSM_LSM); } if (device_property_read_u32(dev, "trickle-resistor-ohms", &ohms)) return 0; for (i = 0; i < ARRAY_SIZE(rv3032_trickle_resistors); i++) if (ohms == rv3032_trickle_resistors[i]) break; if (i >= ARRAY_SIZE(rv3032_trickle_resistors)) { dev_warn(dev, "invalid trickle resistor value\n"); return 0; } rv3032->trickle_charger_set = true; return rv3032_update_cfg(rv3032, RV3032_PMU, RV3032_PMU_TCR | RV3032_PMU_TCM | RV3032_PMU_BSM, val | FIELD_PREP(RV3032_PMU_TCR, i)); } #ifdef CONFIG_COMMON_CLK #define clkout_hw_to_rv3032(hw) container_of(hw, struct rv3032_data, clkout_hw) static int clkout_xtal_rates[] = { 32768, 1024, 64, 1, }; #define RV3032_HFD_STEP 8192 static unsigned long rv3032_clkout_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { int clkout, ret; struct rv3032_data *rv3032 = clkout_hw_to_rv3032(hw); ret = regmap_read(rv3032->regmap, RV3032_CLKOUT2, &clkout); if (ret < 0) return 0; if (clkout & RV3032_CLKOUT2_OS) { unsigned long rate = FIELD_GET(RV3032_CLKOUT2_HFD_MSK, clkout) << 8; ret = regmap_read(rv3032->regmap, RV3032_CLKOUT1, &clkout); if (ret < 0) return 0; rate += clkout + 1; return rate * RV3032_HFD_STEP; } return clkout_xtal_rates[FIELD_GET(RV3032_CLKOUT2_FD_MSK, clkout)]; } static long rv3032_clkout_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *prate) { int i, hfd; if (rate < RV3032_HFD_STEP) for (i = 0; i < ARRAY_SIZE(clkout_xtal_rates); i++) if (clkout_xtal_rates[i] <= rate) return clkout_xtal_rates[i]; hfd = DIV_ROUND_CLOSEST(rate, RV3032_HFD_STEP); return RV3032_HFD_STEP * clamp(hfd, 0, 8192); } static int rv3032_clkout_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { struct rv3032_data *rv3032 = clkout_hw_to_rv3032(hw); u32 status, eerd; int i, hfd, ret; for (i = 0; i < ARRAY_SIZE(clkout_xtal_rates); i++) { if (clkout_xtal_rates[i] == rate) { return rv3032_update_cfg(rv3032, RV3032_CLKOUT2, 0xff, FIELD_PREP(RV3032_CLKOUT2_FD_MSK, i)); } } hfd = DIV_ROUND_CLOSEST(rate, RV3032_HFD_STEP); hfd = clamp(hfd, 1, 8192) - 1; ret = rv3032_enter_eerd(rv3032, &eerd); if (ret) return ret; ret = regmap_write(rv3032->regmap, RV3032_CLKOUT1, hfd & 0xff); if (ret) goto exit_eerd; ret = regmap_write(rv3032->regmap, RV3032_CLKOUT2, RV3032_CLKOUT2_OS | FIELD_PREP(RV3032_CLKOUT2_HFD_MSK, hfd >> 8)); if (ret) goto exit_eerd; ret = regmap_write(rv3032->regmap, RV3032_EEPROM_CMD, RV3032_EEPROM_CMD_UPDATE); if (ret) goto exit_eerd; usleep_range(46000, RV3032_EEBUSY_TIMEOUT); ret = regmap_read_poll_timeout(rv3032->regmap, RV3032_TLSB, status, !(status & RV3032_TLSB_EEBUSY), RV3032_EEBUSY_POLL, RV3032_EEBUSY_TIMEOUT); exit_eerd: rv3032_exit_eerd(rv3032, eerd); return ret; } static int rv3032_clkout_prepare(struct clk_hw *hw) { struct rv3032_data *rv3032 = clkout_hw_to_rv3032(hw); return rv3032_update_cfg(rv3032, RV3032_PMU, RV3032_PMU_NCLKE, 0); } static void rv3032_clkout_unprepare(struct clk_hw *hw) { struct rv3032_data *rv3032 = clkout_hw_to_rv3032(hw); rv3032_update_cfg(rv3032, RV3032_PMU, RV3032_PMU_NCLKE, RV3032_PMU_NCLKE); } static int rv3032_clkout_is_prepared(struct clk_hw *hw) { int val, ret; struct rv3032_data *rv3032 = clkout_hw_to_rv3032(hw); ret = regmap_read(rv3032->regmap, RV3032_PMU, &val); if (ret < 0) return ret; return !(val & RV3032_PMU_NCLKE); } static const struct clk_ops rv3032_clkout_ops = { .prepare = rv3032_clkout_prepare, .unprepare = rv3032_clkout_unprepare, .is_prepared = rv3032_clkout_is_prepared, .recalc_rate = rv3032_clkout_recalc_rate, .round_rate = rv3032_clkout_round_rate, .set_rate = rv3032_clkout_set_rate, }; static int rv3032_clkout_register_clk(struct rv3032_data *rv3032, struct i2c_client *client) { int ret; struct clk *clk; struct clk_init_data init; struct device_node *node = client->dev.of_node; ret = regmap_update_bits(rv3032->regmap, RV3032_TLSB, RV3032_TLSB_CLKF, 0); if (ret < 0) return ret; ret = regmap_update_bits(rv3032->regmap, RV3032_CTRL2, RV3032_CTRL2_CLKIE, 0); if (ret < 0) return ret; ret = regmap_write(rv3032->regmap, RV3032_CLK_IRQ, 0); if (ret < 0) return ret; init.name = "rv3032-clkout"; init.ops = &rv3032_clkout_ops; init.flags = 0; init.parent_names = NULL; init.num_parents = 0; rv3032->clkout_hw.init = &init; of_property_read_string(node, "clock-output-names", &init.name); clk = devm_clk_register(&client->dev, &rv3032->clkout_hw); if (!IS_ERR(clk)) of_clk_add_provider(node, of_clk_src_simple_get, clk); return 0; } #endif static int rv3032_hwmon_read_temp(struct device *dev, long *mC) { struct rv3032_data *rv3032 = dev_get_drvdata(dev); u8 buf[2]; int temp, prev = 0; int ret; ret = regmap_bulk_read(rv3032->regmap, RV3032_TLSB, buf, sizeof(buf)); if (ret) return ret; temp = sign_extend32(buf[1], 7) << 4; temp |= FIELD_GET(RV3032_TLSB_TEMP, buf[0]); /* No blocking or shadowing on RV3032_TLSB and RV3032_TMSB */ do { prev = temp; ret = regmap_bulk_read(rv3032->regmap, RV3032_TLSB, buf, sizeof(buf)); if (ret) return ret; temp = sign_extend32(buf[1], 7) << 4; temp |= FIELD_GET(RV3032_TLSB_TEMP, buf[0]); } while (temp != prev); *mC = (temp * 1000) / 16; return 0; } static umode_t rv3032_hwmon_is_visible(const void *data, enum hwmon_sensor_types type, u32 attr, int channel) { if (type != hwmon_temp) return 0; switch (attr) { case hwmon_temp_input: return 0444; default: return 0; } } static int rv3032_hwmon_read(struct device *dev, enum hwmon_sensor_types type, u32 attr, int channel, long *temp) { int err; switch (attr) { case hwmon_temp_input: err = rv3032_hwmon_read_temp(dev, temp); break; default: err = -EOPNOTSUPP; break; } return err; } static const struct hwmon_channel_info * const rv3032_hwmon_info[] = { HWMON_CHANNEL_INFO(chip, HWMON_C_REGISTER_TZ), HWMON_CHANNEL_INFO(temp, HWMON_T_INPUT | HWMON_T_MAX | HWMON_T_MAX_HYST), NULL }; static const struct hwmon_ops rv3032_hwmon_hwmon_ops = { .is_visible = rv3032_hwmon_is_visible, .read = rv3032_hwmon_read, }; static const struct hwmon_chip_info rv3032_hwmon_chip_info = { .ops = &rv3032_hwmon_hwmon_ops, .info = rv3032_hwmon_info, }; static void rv3032_hwmon_register(struct device *dev) { struct rv3032_data *rv3032 = dev_get_drvdata(dev); if (!IS_REACHABLE(CONFIG_HWMON)) return; devm_hwmon_device_register_with_info(dev, "rv3032", rv3032, &rv3032_hwmon_chip_info, NULL); } static const struct rtc_class_ops rv3032_rtc_ops = { .read_time = rv3032_get_time, .set_time = rv3032_set_time, .read_offset = rv3032_read_offset, .set_offset = rv3032_set_offset, .ioctl = rv3032_ioctl, .read_alarm = rv3032_get_alarm, .set_alarm = rv3032_set_alarm, .alarm_irq_enable = rv3032_alarm_irq_enable, .param_get = rv3032_param_get, .param_set = rv3032_param_set, }; static const struct regmap_config regmap_config = { .reg_bits = 8, .val_bits = 8, .max_register = 0xCA, }; static int rv3032_probe(struct i2c_client *client) { struct rv3032_data *rv3032; int ret, status; struct nvmem_config nvmem_cfg = { .name = "rv3032_nvram", .word_size = 1, .stride = 1, .size = 16, .type = NVMEM_TYPE_BATTERY_BACKED, .reg_read = rv3032_nvram_read, .reg_write = rv3032_nvram_write, }; struct nvmem_config eeprom_cfg = { .name = "rv3032_eeprom", .word_size = 1, .stride = 1, .size = 32, .type = NVMEM_TYPE_EEPROM, .reg_read = rv3032_eeprom_read, .reg_write = rv3032_eeprom_write, }; rv3032 = devm_kzalloc(&client->dev, sizeof(struct rv3032_data), GFP_KERNEL); if (!rv3032) return -ENOMEM; rv3032->regmap = devm_regmap_init_i2c(client, &regmap_config); if (IS_ERR(rv3032->regmap)) return PTR_ERR(rv3032->regmap); i2c_set_clientdata(client, rv3032); ret = regmap_read(rv3032->regmap, RV3032_STATUS, &status); if (ret < 0) return ret; rv3032->rtc = devm_rtc_allocate_device(&client->dev); if (IS_ERR(rv3032->rtc)) return PTR_ERR(rv3032->rtc); if (client->irq > 0) { unsigned long irqflags = IRQF_TRIGGER_LOW; if (dev_fwnode(&client->dev)) irqflags = 0; ret = devm_request_threaded_irq(&client->dev, client->irq, NULL, rv3032_handle_irq, irqflags | IRQF_ONESHOT, "rv3032", rv3032); if (ret) { dev_warn(&client->dev, "unable to request IRQ, alarms disabled\n"); client->irq = 0; } } if (!client->irq) clear_bit(RTC_FEATURE_ALARM, rv3032->rtc->features); ret = regmap_update_bits(rv3032->regmap, RV3032_CTRL1, RV3032_CTRL1_WADA, RV3032_CTRL1_WADA); if (ret) return ret; rv3032_trickle_charger_setup(&client->dev, rv3032); set_bit(RTC_FEATURE_BACKUP_SWITCH_MODE, rv3032->rtc->features); set_bit(RTC_FEATURE_ALARM_RES_MINUTE, rv3032->rtc->features); rv3032->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000; rv3032->rtc->range_max = RTC_TIMESTAMP_END_2099; rv3032->rtc->ops = &rv3032_rtc_ops; ret = devm_rtc_register_device(rv3032->rtc); if (ret) return ret; nvmem_cfg.priv = rv3032->regmap; devm_rtc_nvmem_register(rv3032->rtc, &nvmem_cfg); eeprom_cfg.priv = rv3032; devm_rtc_nvmem_register(rv3032->rtc, &eeprom_cfg); rv3032->rtc->max_user_freq = 1; #ifdef CONFIG_COMMON_CLK rv3032_clkout_register_clk(rv3032, client); #endif rv3032_hwmon_register(&client->dev); return 0; } static const struct acpi_device_id rv3032_i2c_acpi_match[] = { { "MCRY3032" }, { } }; MODULE_DEVICE_TABLE(acpi, rv3032_i2c_acpi_match); static const __maybe_unused struct of_device_id rv3032_of_match[] = { { .compatible = "microcrystal,rv3032", }, { } }; MODULE_DEVICE_TABLE(of, rv3032_of_match); static struct i2c_driver rv3032_driver = { .driver = { .name = "rtc-rv3032", .acpi_match_table = rv3032_i2c_acpi_match, .of_match_table = of_match_ptr(rv3032_of_match), }, .probe = rv3032_probe, }; module_i2c_driver(rv3032_driver); MODULE_AUTHOR("Alexandre Belloni <[email protected]>"); MODULE_DESCRIPTION("Micro Crystal RV3032 RTC driver"); MODULE_LICENSE("GPL v2");
linux-master
drivers/rtc/rtc-rv3032.c
// SPDX-License-Identifier: GPL-2.0-or-later /* * rtc-tps65910.c -- TPS65910 Real Time Clock interface * * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved. * Author: Venu Byravarasu <[email protected]> * * Based on original TI driver rtc-twl.c * Copyright (C) 2007 MontaVista Software, Inc * Author: Alexandre Rusev <[email protected]> */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/module.h> #include <linux/types.h> #include <linux/rtc.h> #include <linux/bcd.h> #include <linux/math64.h> #include <linux/property.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/mfd/tps65910.h> struct tps65910_rtc { struct rtc_device *rtc; int irq; }; /* Total number of RTC registers needed to set time*/ #define NUM_TIME_REGS (TPS65910_YEARS - TPS65910_SECONDS + 1) /* Total number of RTC registers needed to set compensation registers */ #define NUM_COMP_REGS (TPS65910_RTC_COMP_MSB - TPS65910_RTC_COMP_LSB + 1) /* Min and max values supported with 'offset' interface (swapped sign) */ #define MIN_OFFSET (-277761) #define MAX_OFFSET (277778) /* Number of ticks per hour */ #define TICKS_PER_HOUR (32768 * 3600) /* Multiplier for ppb conversions */ #define PPB_MULT (1000000000LL) static int tps65910_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) { struct tps65910 *tps = dev_get_drvdata(dev->parent); u8 val = 0; if (enabled) val = TPS65910_RTC_INTERRUPTS_IT_ALARM; return regmap_write(tps->regmap, TPS65910_RTC_INTERRUPTS, val); } /* * Gets current tps65910 RTC time and date parameters. * * The RTC's time/alarm representation is not what gmtime(3) requires * Linux to use: * * - Months are 1..12 vs Linux 0-11 * - Years are 0..99 vs Linux 1900..N (we assume 21st century) */ static int tps65910_rtc_read_time(struct device *dev, struct rtc_time *tm) { unsigned char rtc_data[NUM_TIME_REGS]; struct tps65910 *tps = dev_get_drvdata(dev->parent); int ret; /* Copy RTC counting registers to static registers or latches */ ret = regmap_update_bits(tps->regmap, TPS65910_RTC_CTRL, TPS65910_RTC_CTRL_GET_TIME, TPS65910_RTC_CTRL_GET_TIME); if (ret < 0) { dev_err(dev, "RTC CTRL reg update failed with err:%d\n", ret); return ret; } ret = regmap_bulk_read(tps->regmap, TPS65910_SECONDS, rtc_data, NUM_TIME_REGS); if (ret < 0) { dev_err(dev, "reading from RTC failed with err:%d\n", ret); return ret; } tm->tm_sec = bcd2bin(rtc_data[0]); tm->tm_min = bcd2bin(rtc_data[1]); tm->tm_hour = bcd2bin(rtc_data[2]); tm->tm_mday = bcd2bin(rtc_data[3]); tm->tm_mon = bcd2bin(rtc_data[4]) - 1; tm->tm_year = bcd2bin(rtc_data[5]) + 100; return ret; } static int tps65910_rtc_set_time(struct device *dev, struct rtc_time *tm) { unsigned char rtc_data[NUM_TIME_REGS]; struct tps65910 *tps = dev_get_drvdata(dev->parent); int ret; rtc_data[0] = bin2bcd(tm->tm_sec); rtc_data[1] = bin2bcd(tm->tm_min); rtc_data[2] = bin2bcd(tm->tm_hour); rtc_data[3] = bin2bcd(tm->tm_mday); rtc_data[4] = bin2bcd(tm->tm_mon + 1); rtc_data[5] = bin2bcd(tm->tm_year - 100); /* Stop RTC while updating the RTC time registers */ ret = regmap_update_bits(tps->regmap, TPS65910_RTC_CTRL, TPS65910_RTC_CTRL_STOP_RTC, 0); if (ret < 0) { dev_err(dev, "RTC stop failed with err:%d\n", ret); return ret; } /* update all the time registers in one shot */ ret = regmap_bulk_write(tps->regmap, TPS65910_SECONDS, rtc_data, NUM_TIME_REGS); if (ret < 0) { dev_err(dev, "rtc_set_time error %d\n", ret); return ret; } /* Start back RTC */ ret = regmap_update_bits(tps->regmap, TPS65910_RTC_CTRL, TPS65910_RTC_CTRL_STOP_RTC, 1); if (ret < 0) dev_err(dev, "RTC start failed with err:%d\n", ret); return ret; } /* * Gets current tps65910 RTC alarm time. */ static int tps65910_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm) { unsigned char alarm_data[NUM_TIME_REGS]; u32 int_val; struct tps65910 *tps = dev_get_drvdata(dev->parent); int ret; ret = regmap_bulk_read(tps->regmap, TPS65910_ALARM_SECONDS, alarm_data, NUM_TIME_REGS); if (ret < 0) { dev_err(dev, "rtc_read_alarm error %d\n", ret); return ret; } alm->time.tm_sec = bcd2bin(alarm_data[0]); alm->time.tm_min = bcd2bin(alarm_data[1]); alm->time.tm_hour = bcd2bin(alarm_data[2]); alm->time.tm_mday = bcd2bin(alarm_data[3]); alm->time.tm_mon = bcd2bin(alarm_data[4]) - 1; alm->time.tm_year = bcd2bin(alarm_data[5]) + 100; ret = regmap_read(tps->regmap, TPS65910_RTC_INTERRUPTS, &int_val); if (ret < 0) return ret; if (int_val & TPS65910_RTC_INTERRUPTS_IT_ALARM) alm->enabled = 1; return ret; } static int tps65910_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm) { unsigned char alarm_data[NUM_TIME_REGS]; struct tps65910 *tps = dev_get_drvdata(dev->parent); int ret; ret = tps65910_rtc_alarm_irq_enable(dev, 0); if (ret) return ret; alarm_data[0] = bin2bcd(alm->time.tm_sec); alarm_data[1] = bin2bcd(alm->time.tm_min); alarm_data[2] = bin2bcd(alm->time.tm_hour); alarm_data[3] = bin2bcd(alm->time.tm_mday); alarm_data[4] = bin2bcd(alm->time.tm_mon + 1); alarm_data[5] = bin2bcd(alm->time.tm_year - 100); /* update all the alarm registers in one shot */ ret = regmap_bulk_write(tps->regmap, TPS65910_ALARM_SECONDS, alarm_data, NUM_TIME_REGS); if (ret) { dev_err(dev, "rtc_set_alarm error %d\n", ret); return ret; } if (alm->enabled) ret = tps65910_rtc_alarm_irq_enable(dev, 1); return ret; } static int tps65910_rtc_set_calibration(struct device *dev, int calibration) { unsigned char comp_data[NUM_COMP_REGS]; struct tps65910 *tps = dev_get_drvdata(dev->parent); s16 value; int ret; /* * TPS65910 uses two's complement 16 bit value for compensation for RTC * crystal inaccuracies. One time every hour when seconds counter * increments from 0 to 1 compensation value will be added to internal * RTC counter value. * * Compensation value 0x7FFF is prohibited value. * * Valid range for compensation value: [-32768 .. 32766] */ if ((calibration < -32768) || (calibration > 32766)) { dev_err(dev, "RTC calibration value out of range: %d\n", calibration); return -EINVAL; } value = (s16)calibration; comp_data[0] = (u16)value & 0xFF; comp_data[1] = ((u16)value >> 8) & 0xFF; /* Update all the compensation registers in one shot */ ret = regmap_bulk_write(tps->regmap, TPS65910_RTC_COMP_LSB, comp_data, NUM_COMP_REGS); if (ret < 0) { dev_err(dev, "rtc_set_calibration error: %d\n", ret); return ret; } /* Enable automatic compensation */ ret = regmap_update_bits(tps->regmap, TPS65910_RTC_CTRL, TPS65910_RTC_CTRL_AUTO_COMP, TPS65910_RTC_CTRL_AUTO_COMP); if (ret < 0) dev_err(dev, "auto_comp enable failed with error: %d\n", ret); return ret; } static int tps65910_rtc_get_calibration(struct device *dev, int *calibration) { unsigned char comp_data[NUM_COMP_REGS]; struct tps65910 *tps = dev_get_drvdata(dev->parent); unsigned int ctrl; u16 value; int ret; ret = regmap_read(tps->regmap, TPS65910_RTC_CTRL, &ctrl); if (ret < 0) return ret; /* If automatic compensation is not enabled report back zero */ if (!(ctrl & TPS65910_RTC_CTRL_AUTO_COMP)) { *calibration = 0; return 0; } ret = regmap_bulk_read(tps->regmap, TPS65910_RTC_COMP_LSB, comp_data, NUM_COMP_REGS); if (ret < 0) { dev_err(dev, "rtc_get_calibration error: %d\n", ret); return ret; } value = (u16)comp_data[0] | ((u16)comp_data[1] << 8); *calibration = (s16)value; return 0; } static int tps65910_read_offset(struct device *dev, long *offset) { int calibration; s64 tmp; int ret; ret = tps65910_rtc_get_calibration(dev, &calibration); if (ret < 0) return ret; /* Convert from RTC calibration register format to ppb format */ tmp = calibration * (s64)PPB_MULT; if (tmp < 0) tmp -= TICKS_PER_HOUR / 2LL; else tmp += TICKS_PER_HOUR / 2LL; tmp = div_s64(tmp, TICKS_PER_HOUR); /* Offset value operates in negative way, so swap sign */ *offset = (long)-tmp; return 0; } static int tps65910_set_offset(struct device *dev, long offset) { int calibration; s64 tmp; int ret; /* Make sure offset value is within supported range */ if (offset < MIN_OFFSET || offset > MAX_OFFSET) return -ERANGE; /* Convert from ppb format to RTC calibration register format */ tmp = offset * (s64)TICKS_PER_HOUR; if (tmp < 0) tmp -= PPB_MULT / 2LL; else tmp += PPB_MULT / 2LL; tmp = div_s64(tmp, PPB_MULT); /* Offset value operates in negative way, so swap sign */ calibration = (int)-tmp; ret = tps65910_rtc_set_calibration(dev, calibration); return ret; } static irqreturn_t tps65910_rtc_interrupt(int irq, void *rtc) { struct device *dev = rtc; unsigned long events = 0; struct tps65910 *tps = dev_get_drvdata(dev->parent); struct tps65910_rtc *tps_rtc = dev_get_drvdata(dev); int ret; u32 rtc_reg; ret = regmap_read(tps->regmap, TPS65910_RTC_STATUS, &rtc_reg); if (ret) return IRQ_NONE; if (rtc_reg & TPS65910_RTC_STATUS_ALARM) events = RTC_IRQF | RTC_AF; ret = regmap_write(tps->regmap, TPS65910_RTC_STATUS, rtc_reg); if (ret) return IRQ_NONE; /* Notify RTC core on event */ rtc_update_irq(tps_rtc->rtc, 1, events); return IRQ_HANDLED; } static const struct rtc_class_ops tps65910_rtc_ops = { .read_time = tps65910_rtc_read_time, .set_time = tps65910_rtc_set_time, .read_alarm = tps65910_rtc_read_alarm, .set_alarm = tps65910_rtc_set_alarm, .alarm_irq_enable = tps65910_rtc_alarm_irq_enable, .read_offset = tps65910_read_offset, .set_offset = tps65910_set_offset, }; static int tps65910_rtc_probe(struct platform_device *pdev) { struct tps65910 *tps65910 = NULL; struct tps65910_rtc *tps_rtc = NULL; int ret; int irq; u32 rtc_reg; tps65910 = dev_get_drvdata(pdev->dev.parent); tps_rtc = devm_kzalloc(&pdev->dev, sizeof(struct tps65910_rtc), GFP_KERNEL); if (!tps_rtc) return -ENOMEM; tps_rtc->rtc = devm_rtc_allocate_device(&pdev->dev); if (IS_ERR(tps_rtc->rtc)) return PTR_ERR(tps_rtc->rtc); /* Clear pending interrupts */ ret = regmap_read(tps65910->regmap, TPS65910_RTC_STATUS, &rtc_reg); if (ret < 0) return ret; ret = regmap_write(tps65910->regmap, TPS65910_RTC_STATUS, rtc_reg); if (ret < 0) return ret; dev_dbg(&pdev->dev, "Enabling rtc-tps65910.\n"); /* Enable RTC digital power domain */ ret = regmap_update_bits(tps65910->regmap, TPS65910_DEVCTRL, DEVCTRL_RTC_PWDN_MASK, 0 << DEVCTRL_RTC_PWDN_SHIFT); if (ret < 0) return ret; rtc_reg = TPS65910_RTC_CTRL_STOP_RTC; ret = regmap_write(tps65910->regmap, TPS65910_RTC_CTRL, rtc_reg); if (ret < 0) return ret; platform_set_drvdata(pdev, tps_rtc); irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; ret = devm_request_threaded_irq(&pdev->dev, irq, NULL, tps65910_rtc_interrupt, IRQF_TRIGGER_LOW, dev_name(&pdev->dev), &pdev->dev); if (ret < 0) irq = -1; tps_rtc->irq = irq; if (irq != -1) { if (device_property_present(tps65910->dev, "wakeup-source")) device_init_wakeup(&pdev->dev, 1); else device_set_wakeup_capable(&pdev->dev, 1); } else { clear_bit(RTC_FEATURE_ALARM, tps_rtc->rtc->features); } tps_rtc->rtc->ops = &tps65910_rtc_ops; tps_rtc->rtc->range_min = RTC_TIMESTAMP_BEGIN_2000; tps_rtc->rtc->range_max = RTC_TIMESTAMP_END_2099; return devm_rtc_register_device(tps_rtc->rtc); } #ifdef CONFIG_PM_SLEEP static int tps65910_rtc_suspend(struct device *dev) { struct tps65910_rtc *tps_rtc = dev_get_drvdata(dev); if (device_may_wakeup(dev)) enable_irq_wake(tps_rtc->irq); return 0; } static int tps65910_rtc_resume(struct device *dev) { struct tps65910_rtc *tps_rtc = dev_get_drvdata(dev); if (device_may_wakeup(dev)) disable_irq_wake(tps_rtc->irq); return 0; } #endif static SIMPLE_DEV_PM_OPS(tps65910_rtc_pm_ops, tps65910_rtc_suspend, tps65910_rtc_resume); static struct platform_driver tps65910_rtc_driver = { .probe = tps65910_rtc_probe, .driver = { .name = "tps65910-rtc", .pm = &tps65910_rtc_pm_ops, }, }; module_platform_driver(tps65910_rtc_driver); MODULE_ALIAS("platform:tps65910-rtc"); MODULE_AUTHOR("Venu Byravarasu <[email protected]>"); MODULE_LICENSE("GPL");
linux-master
drivers/rtc/rtc-tps65910.c
// SPDX-License-Identifier: GPL-2.0-only /* * Au1xxx counter0 (aka Time-Of-Year counter) RTC interface driver. * * Copyright (C) 2008 Manuel Lauss <[email protected]> */ /* All current Au1xxx SoCs have 2 counters fed by an external 32.768 kHz * crystal. Counter 0, which keeps counting during sleep/powerdown, is * used to count seconds since the beginning of the unix epoch. * * The counters must be configured and enabled by bootloader/board code; * no checks as to whether they really get a proper 32.768kHz clock are * made as this would take far too long. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/rtc.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/io.h> #include <asm/mach-au1x00/au1000.h> /* 32kHz clock enabled and detected */ #define CNTR_OK (SYS_CNTRL_E0 | SYS_CNTRL_32S) static int au1xtoy_rtc_read_time(struct device *dev, struct rtc_time *tm) { unsigned long t; t = alchemy_rdsys(AU1000_SYS_TOYREAD); rtc_time64_to_tm(t, tm); return 0; } static int au1xtoy_rtc_set_time(struct device *dev, struct rtc_time *tm) { unsigned long t; t = rtc_tm_to_time64(tm); alchemy_wrsys(t, AU1000_SYS_TOYWRITE); /* wait for the pending register write to succeed. This can * take up to 6 seconds... */ while (alchemy_rdsys(AU1000_SYS_CNTRCTRL) & SYS_CNTRL_C0S) msleep(1); return 0; } static const struct rtc_class_ops au1xtoy_rtc_ops = { .read_time = au1xtoy_rtc_read_time, .set_time = au1xtoy_rtc_set_time, }; static int au1xtoy_rtc_probe(struct platform_device *pdev) { struct rtc_device *rtcdev; unsigned long t; t = alchemy_rdsys(AU1000_SYS_CNTRCTRL); if (!(t & CNTR_OK)) { dev_err(&pdev->dev, "counters not working; aborting.\n"); return -ENODEV; } /* set counter0 tickrate to 1Hz if necessary */ if (alchemy_rdsys(AU1000_SYS_TOYTRIM) != 32767) { /* wait until hardware gives access to TRIM register */ t = 0x00100000; while ((alchemy_rdsys(AU1000_SYS_CNTRCTRL) & SYS_CNTRL_T0S) && --t) msleep(1); if (!t) { /* timed out waiting for register access; assume * counters are unusable. */ dev_err(&pdev->dev, "timeout waiting for access\n"); return -ETIMEDOUT; } /* set 1Hz TOY tick rate */ alchemy_wrsys(32767, AU1000_SYS_TOYTRIM); } /* wait until the hardware allows writes to the counter reg */ while (alchemy_rdsys(AU1000_SYS_CNTRCTRL) & SYS_CNTRL_C0S) msleep(1); rtcdev = devm_rtc_allocate_device(&pdev->dev); if (IS_ERR(rtcdev)) return PTR_ERR(rtcdev); rtcdev->ops = &au1xtoy_rtc_ops; rtcdev->range_max = U32_MAX; platform_set_drvdata(pdev, rtcdev); return devm_rtc_register_device(rtcdev); } static struct platform_driver au1xrtc_driver = { .driver = { .name = "rtc-au1xxx", }, }; module_platform_driver_probe(au1xrtc_driver, au1xtoy_rtc_probe); MODULE_DESCRIPTION("Au1xxx TOY-counter-based RTC driver"); MODULE_AUTHOR("Manuel Lauss <[email protected]>"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:rtc-au1xxx");
linux-master
drivers/rtc/rtc-au1xxx.c