python_code
stringlengths
0
1.8M
repo_name
stringclasses
7 values
file_path
stringlengths
5
99
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2015 Jakub Kicinski <[email protected]> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/usb.h> #include "mt7601u.h" #include "usb.h" #include "trace.h" static const struct usb_device_id mt7601u_device_table[] = { { USB_DEVICE(0x0b05, 0x17d3) }, { USB_DEVICE(0x0e8d, 0x760a) }, { USB_DEVICE(0x0e8d, 0x760b) }, { USB_DEVICE(0x13d3, 0x3431) }, { USB_DEVICE(0x13d3, 0x3434) }, { USB_DEVICE(0x148f, 0x7601) }, { USB_DEVICE(0x148f, 0x760a) }, { USB_DEVICE(0x148f, 0x760b) }, { USB_DEVICE(0x148f, 0x760c) }, { USB_DEVICE(0x148f, 0x760d) }, { USB_DEVICE(0x2001, 0x3d04) }, { USB_DEVICE(0x2717, 0x4106) }, { USB_DEVICE(0x2955, 0x0001) }, { USB_DEVICE(0x2955, 0x1001) }, { USB_DEVICE(0x2955, 0x1003) }, { USB_DEVICE(0x2a5f, 0x1000) }, { USB_DEVICE(0x7392, 0x7710) }, { 0, } }; bool mt7601u_usb_alloc_buf(struct mt7601u_dev *dev, size_t len, struct mt7601u_dma_buf *buf) { struct usb_device *usb_dev = mt7601u_to_usb_dev(dev); buf->len = len; buf->urb = usb_alloc_urb(0, GFP_KERNEL); buf->buf = usb_alloc_coherent(usb_dev, buf->len, GFP_KERNEL, &buf->dma); return !buf->urb || !buf->buf; } void mt7601u_usb_free_buf(struct mt7601u_dev *dev, struct mt7601u_dma_buf *buf) { struct usb_device *usb_dev = mt7601u_to_usb_dev(dev); usb_free_coherent(usb_dev, buf->len, buf->buf, buf->dma); usb_free_urb(buf->urb); } int mt7601u_usb_submit_buf(struct mt7601u_dev *dev, int dir, int ep_idx, struct mt7601u_dma_buf *buf, gfp_t gfp, usb_complete_t complete_fn, void *context) { struct usb_device *usb_dev = mt7601u_to_usb_dev(dev); unsigned pipe; int ret; if (dir == USB_DIR_IN) pipe = usb_rcvbulkpipe(usb_dev, dev->in_eps[ep_idx]); else pipe = usb_sndbulkpipe(usb_dev, dev->out_eps[ep_idx]); usb_fill_bulk_urb(buf->urb, usb_dev, pipe, buf->buf, buf->len, complete_fn, context); buf->urb->transfer_dma = buf->dma; buf->urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; trace_mt_submit_urb(dev, buf->urb); ret = usb_submit_urb(buf->urb, gfp); if (ret) dev_err(dev->dev, "Error: submit URB dir:%d ep:%d failed:%d\n", dir, ep_idx, ret); return ret; } void mt7601u_complete_urb(struct urb *urb) { struct completion *cmpl = urb->context; complete(cmpl); } int mt7601u_vendor_request(struct mt7601u_dev *dev, const u8 req, const u8 direction, const u16 val, const u16 offset, void *buf, const size_t buflen) { int i, ret; struct usb_device *usb_dev = mt7601u_to_usb_dev(dev); const u8 req_type = direction | USB_TYPE_VENDOR | USB_RECIP_DEVICE; const unsigned int pipe = (direction == USB_DIR_IN) ? usb_rcvctrlpipe(usb_dev, 0) : usb_sndctrlpipe(usb_dev, 0); for (i = 0; i < MT_VEND_REQ_MAX_RETRY; i++) { ret = usb_control_msg(usb_dev, pipe, req, req_type, val, offset, buf, buflen, MT_VEND_REQ_TOUT_MS); trace_mt_vend_req(dev, pipe, req, req_type, val, offset, buf, buflen, ret); if (ret == -ENODEV) set_bit(MT7601U_STATE_REMOVED, &dev->state); if (ret >= 0 || ret == -ENODEV) return ret; msleep(5); } dev_err(dev->dev, "Vendor request req:%02x off:%04x failed:%d\n", req, offset, ret); return ret; } void mt7601u_vendor_reset(struct mt7601u_dev *dev) { mt7601u_vendor_request(dev, MT_VEND_DEV_MODE, USB_DIR_OUT, MT_VEND_DEV_MODE_RESET, 0, NULL, 0); } /* should be called with vendor_req_mutex held */ static u32 __mt7601u_rr(struct mt7601u_dev *dev, u32 offset) { int ret; u32 val = ~0; WARN_ONCE(offset > USHRT_MAX, "read high off:%08x", offset); ret = mt7601u_vendor_request(dev, MT_VEND_MULTI_READ, USB_DIR_IN, 0, offset, dev->vend_buf, MT_VEND_BUF); if (ret == MT_VEND_BUF) val = get_unaligned_le32(dev->vend_buf); else if (ret > 0) dev_err(dev->dev, "Error: wrong size read:%d off:%08x\n", ret, offset); trace_reg_read(dev, offset, val); return val; } u32 mt7601u_rr(struct mt7601u_dev *dev, u32 offset) { u32 ret; mutex_lock(&dev->vendor_req_mutex); ret = __mt7601u_rr(dev, offset); mutex_unlock(&dev->vendor_req_mutex); return ret; } /* should be called with vendor_req_mutex held */ static int __mt7601u_vendor_single_wr(struct mt7601u_dev *dev, const u8 req, const u16 offset, const u32 val) { int ret = mt7601u_vendor_request(dev, req, USB_DIR_OUT, val & 0xffff, offset, NULL, 0); if (!ret) ret = mt7601u_vendor_request(dev, req, USB_DIR_OUT, val >> 16, offset + 2, NULL, 0); trace_reg_write(dev, offset, val); return ret; } int mt7601u_vendor_single_wr(struct mt7601u_dev *dev, const u8 req, const u16 offset, const u32 val) { int ret; mutex_lock(&dev->vendor_req_mutex); ret = __mt7601u_vendor_single_wr(dev, req, offset, val); mutex_unlock(&dev->vendor_req_mutex); return ret; } void mt7601u_wr(struct mt7601u_dev *dev, u32 offset, u32 val) { WARN_ONCE(offset > USHRT_MAX, "write high off:%08x", offset); mt7601u_vendor_single_wr(dev, MT_VEND_WRITE, offset, val); } u32 mt7601u_rmw(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val) { mutex_lock(&dev->vendor_req_mutex); val |= __mt7601u_rr(dev, offset) & ~mask; __mt7601u_vendor_single_wr(dev, MT_VEND_WRITE, offset, val); mutex_unlock(&dev->vendor_req_mutex); return val; } u32 mt7601u_rmc(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val) { u32 reg; mutex_lock(&dev->vendor_req_mutex); reg = __mt7601u_rr(dev, offset); val |= reg & ~mask; if (reg != val) __mt7601u_vendor_single_wr(dev, MT_VEND_WRITE, offset, val); mutex_unlock(&dev->vendor_req_mutex); return val; } void mt7601u_wr_copy(struct mt7601u_dev *dev, u32 offset, const void *data, int len) { WARN_ONCE(offset & 3, "unaligned write copy off:%08x", offset); WARN_ONCE(len & 3, "short write copy off:%08x", offset); mt7601u_burst_write_regs(dev, offset, data, len / 4); } void mt7601u_addr_wr(struct mt7601u_dev *dev, const u32 offset, const u8 *addr) { mt7601u_wr(dev, offset, get_unaligned_le32(addr)); mt7601u_wr(dev, offset + 4, addr[4] | addr[5] << 8); } static int mt7601u_assign_pipes(struct usb_interface *usb_intf, struct mt7601u_dev *dev) { struct usb_endpoint_descriptor *ep_desc; struct usb_host_interface *intf_desc = usb_intf->cur_altsetting; unsigned i, ep_i = 0, ep_o = 0; BUILD_BUG_ON(sizeof(dev->in_eps) < __MT_EP_IN_MAX); BUILD_BUG_ON(sizeof(dev->out_eps) < __MT_EP_OUT_MAX); for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) { ep_desc = &intf_desc->endpoint[i].desc; if (usb_endpoint_is_bulk_in(ep_desc) && ep_i++ < __MT_EP_IN_MAX) { dev->in_eps[ep_i - 1] = usb_endpoint_num(ep_desc); dev->in_max_packet = usb_endpoint_maxp(ep_desc); /* Note: this is ignored by usb sub-system but vendor * code does it. We can drop this at some point. */ dev->in_eps[ep_i - 1] |= USB_DIR_IN; } else if (usb_endpoint_is_bulk_out(ep_desc) && ep_o++ < __MT_EP_OUT_MAX) { dev->out_eps[ep_o - 1] = usb_endpoint_num(ep_desc); dev->out_max_packet = usb_endpoint_maxp(ep_desc); } } if (ep_i != __MT_EP_IN_MAX || ep_o != __MT_EP_OUT_MAX) { dev_err(dev->dev, "Error: wrong pipe number in:%d out:%d\n", ep_i, ep_o); return -EINVAL; } return 0; } static int mt7601u_probe(struct usb_interface *usb_intf, const struct usb_device_id *id) { struct usb_device *usb_dev = interface_to_usbdev(usb_intf); struct mt7601u_dev *dev; u32 asic_rev, mac_rev; int ret; dev = mt7601u_alloc_device(&usb_intf->dev); if (!dev) return -ENOMEM; usb_dev = usb_get_dev(usb_dev); usb_reset_device(usb_dev); usb_set_intfdata(usb_intf, dev); dev->vend_buf = devm_kmalloc(dev->dev, MT_VEND_BUF, GFP_KERNEL); if (!dev->vend_buf) { ret = -ENOMEM; goto err; } ret = mt7601u_assign_pipes(usb_intf, dev); if (ret) goto err; ret = mt7601u_wait_asic_ready(dev); if (ret) goto err; asic_rev = mt7601u_rr(dev, MT_ASIC_VERSION); mac_rev = mt7601u_rr(dev, MT_MAC_CSR0); dev_info(dev->dev, "ASIC revision: %08x MAC revision: %08x\n", asic_rev, mac_rev); if ((asic_rev >> 16) != 0x7601) { ret = -ENODEV; goto err; } /* Note: vendor driver skips this check for MT7601U */ if (!(mt7601u_rr(dev, MT_EFUSE_CTRL) & MT_EFUSE_CTRL_SEL)) dev_warn(dev->dev, "Warning: eFUSE not present\n"); ret = mt7601u_init_hardware(dev); if (ret) goto err; ret = mt7601u_register_device(dev); if (ret) goto err_hw; set_bit(MT7601U_STATE_INITIALIZED, &dev->state); return 0; err_hw: mt7601u_cleanup(dev); err: usb_set_intfdata(usb_intf, NULL); usb_put_dev(interface_to_usbdev(usb_intf)); destroy_workqueue(dev->stat_wq); ieee80211_free_hw(dev->hw); return ret; } static void mt7601u_disconnect(struct usb_interface *usb_intf) { struct mt7601u_dev *dev = usb_get_intfdata(usb_intf); ieee80211_unregister_hw(dev->hw); mt7601u_cleanup(dev); usb_set_intfdata(usb_intf, NULL); usb_put_dev(interface_to_usbdev(usb_intf)); destroy_workqueue(dev->stat_wq); ieee80211_free_hw(dev->hw); } static int mt7601u_suspend(struct usb_interface *usb_intf, pm_message_t state) { struct mt7601u_dev *dev = usb_get_intfdata(usb_intf); mt7601u_cleanup(dev); return 0; } static int mt7601u_resume(struct usb_interface *usb_intf) { struct mt7601u_dev *dev = usb_get_intfdata(usb_intf); int ret; ret = mt7601u_init_hardware(dev); if (ret) return ret; set_bit(MT7601U_STATE_INITIALIZED, &dev->state); return 0; } MODULE_DEVICE_TABLE(usb, mt7601u_device_table); MODULE_FIRMWARE(MT7601U_FIRMWARE); MODULE_LICENSE("GPL"); static struct usb_driver mt7601u_driver = { .name = KBUILD_MODNAME, .id_table = mt7601u_device_table, .probe = mt7601u_probe, .disconnect = mt7601u_disconnect, .suspend = mt7601u_suspend, .resume = mt7601u_resume, .reset_resume = mt7601u_resume, .soft_unbind = 1, .disable_hub_initiated_lpm = 1, }; module_usb_driver(mt7601u_driver);
linux-master
drivers/net/wireless/mediatek/mt7601u/usb.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2014 Felix Fietkau <[email protected]> * Copyright (C) 2015 Jakub Kicinski <[email protected]> */ #include <linux/debugfs.h> #include "mt7601u.h" #include "eeprom.h" static int mt76_reg_set(void *data, u64 val) { struct mt7601u_dev *dev = data; mt76_wr(dev, dev->debugfs_reg, val); return 0; } static int mt76_reg_get(void *data, u64 *val) { struct mt7601u_dev *dev = data; *val = mt76_rr(dev, dev->debugfs_reg); return 0; } DEFINE_DEBUGFS_ATTRIBUTE(fops_regval, mt76_reg_get, mt76_reg_set, "0x%08llx\n"); static int mt7601u_ampdu_stat_show(struct seq_file *file, void *data) { struct mt7601u_dev *dev = file->private; int i, j; #define stat_printf(grp, off, name) \ seq_printf(file, #name ":\t%llu\n", dev->stats.grp[off]) stat_printf(rx_stat, 0, rx_crc_err); stat_printf(rx_stat, 1, rx_phy_err); stat_printf(rx_stat, 2, rx_false_cca); stat_printf(rx_stat, 3, rx_plcp_err); stat_printf(rx_stat, 4, rx_fifo_overflow); stat_printf(rx_stat, 5, rx_duplicate); stat_printf(tx_stat, 0, tx_fail_cnt); stat_printf(tx_stat, 1, tx_bcn_cnt); stat_printf(tx_stat, 2, tx_success); stat_printf(tx_stat, 3, tx_retransmit); stat_printf(tx_stat, 4, tx_zero_len); stat_printf(tx_stat, 5, tx_underflow); stat_printf(aggr_stat, 0, non_aggr_tx); stat_printf(aggr_stat, 1, aggr_tx); stat_printf(zero_len_del, 0, tx_zero_len_del); stat_printf(zero_len_del, 1, rx_zero_len_del); #undef stat_printf seq_puts(file, "Aggregations stats:\n"); for (i = 0; i < 4; i++) { for (j = 0; j < 8; j++) seq_printf(file, "%08llx ", dev->stats.aggr_n[i * 8 + j]); seq_putc(file, '\n'); } seq_printf(file, "recent average AMPDU len: %d\n", atomic_read(&dev->avg_ampdu_len)); return 0; } DEFINE_SHOW_ATTRIBUTE(mt7601u_ampdu_stat); static int mt7601u_eeprom_param_show(struct seq_file *file, void *data) { struct mt7601u_dev *dev = file->private; struct mt7601u_rate_power *rp = &dev->ee->power_rate_table; struct tssi_data *td = &dev->ee->tssi_data; int i; seq_printf(file, "RF freq offset: %hhx\n", dev->ee->rf_freq_off); seq_printf(file, "RSSI offset: %hhx %hhx\n", dev->ee->rssi_offset[0], dev->ee->rssi_offset[1]); seq_printf(file, "Reference temp: %hhx\n", dev->ee->ref_temp); seq_printf(file, "LNA gain: %hhx\n", dev->ee->lna_gain); seq_printf(file, "Reg channels: %hhu-%d\n", dev->ee->reg.start, dev->ee->reg.start + dev->ee->reg.num - 1); seq_puts(file, "Per rate power:\n"); for (i = 0; i < 2; i++) seq_printf(file, "\t raw:%02hhx bw20:%02hhx bw40:%02hhx\n", rp->cck[i].raw, rp->cck[i].bw20, rp->cck[i].bw40); for (i = 0; i < 4; i++) seq_printf(file, "\t raw:%02hhx bw20:%02hhx bw40:%02hhx\n", rp->ofdm[i].raw, rp->ofdm[i].bw20, rp->ofdm[i].bw40); for (i = 0; i < 4; i++) seq_printf(file, "\t raw:%02hhx bw20:%02hhx bw40:%02hhx\n", rp->ht[i].raw, rp->ht[i].bw20, rp->ht[i].bw40); seq_puts(file, "Per channel power:\n"); for (i = 0; i < 7; i++) seq_printf(file, "\t tx_power ch%u:%02hhx ch%u:%02hhx\n", i * 2 + 1, dev->ee->chan_pwr[i * 2], i * 2 + 2, dev->ee->chan_pwr[i * 2 + 1]); if (!dev->ee->tssi_enabled) return 0; seq_puts(file, "TSSI:\n"); seq_printf(file, "\t slope:%02hhx\n", td->slope); seq_printf(file, "\t offset=%02hhx %02hhx %02hhx\n", td->offset[0], td->offset[1], td->offset[2]); seq_printf(file, "\t delta_off:%08x\n", td->tx0_delta_offset); return 0; } DEFINE_SHOW_ATTRIBUTE(mt7601u_eeprom_param); void mt7601u_init_debugfs(struct mt7601u_dev *dev) { struct dentry *dir; dir = debugfs_create_dir("mt7601u", dev->hw->wiphy->debugfsdir); debugfs_create_u8("temperature", 0400, dir, &dev->raw_temp); debugfs_create_u32("temp_mode", 0400, dir, &dev->temp_mode); debugfs_create_u32("regidx", 0600, dir, &dev->debugfs_reg); debugfs_create_file("regval", 0600, dir, dev, &fops_regval); debugfs_create_file("ampdu_stat", 0400, dir, dev, &mt7601u_ampdu_stat_fops); debugfs_create_file("eeprom_param", 0400, dir, dev, &mt7601u_eeprom_param_fops); }
linux-master
drivers/net/wireless/mediatek/mt7601u/debugfs.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2014 Felix Fietkau <[email protected]> * Copyright (C) 2015 Jakub Kicinski <[email protected]> */ #include "mt7601u.h" #include "trace.h" #include <linux/etherdevice.h> void mt7601u_set_macaddr(struct mt7601u_dev *dev, const u8 *addr) { ether_addr_copy(dev->macaddr, addr); if (!is_valid_ether_addr(dev->macaddr)) { eth_random_addr(dev->macaddr); dev_info(dev->dev, "Invalid MAC address, using random address %pM\n", dev->macaddr); } mt76_wr(dev, MT_MAC_ADDR_DW0, get_unaligned_le32(dev->macaddr)); mt76_wr(dev, MT_MAC_ADDR_DW1, get_unaligned_le16(dev->macaddr + 4) | FIELD_PREP(MT_MAC_ADDR_DW1_U2ME_MASK, 0xff)); } static void mt76_mac_process_tx_rate(struct ieee80211_tx_rate *txrate, u16 rate) { u8 idx = FIELD_GET(MT_TXWI_RATE_MCS, rate); txrate->idx = 0; txrate->flags = 0; txrate->count = 1; switch (FIELD_GET(MT_TXWI_RATE_PHY_MODE, rate)) { case MT_PHY_TYPE_OFDM: txrate->idx = idx + 4; return; case MT_PHY_TYPE_CCK: if (idx >= 8) idx -= 8; txrate->idx = idx; return; case MT_PHY_TYPE_HT_GF: txrate->flags |= IEEE80211_TX_RC_GREEN_FIELD; fallthrough; case MT_PHY_TYPE_HT: txrate->flags |= IEEE80211_TX_RC_MCS; txrate->idx = idx; break; default: WARN_ON(1); return; } if (FIELD_GET(MT_TXWI_RATE_BW, rate) == MT_PHY_BW_40) txrate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; if (rate & MT_TXWI_RATE_SGI) txrate->flags |= IEEE80211_TX_RC_SHORT_GI; } static void mt76_mac_fill_tx_status(struct mt7601u_dev *dev, struct ieee80211_tx_info *info, struct mt76_tx_status *st) { struct ieee80211_tx_rate *rate = info->status.rates; int cur_idx, last_rate; int i; last_rate = min_t(int, st->retry, IEEE80211_TX_MAX_RATES - 1); mt76_mac_process_tx_rate(&rate[last_rate], st->rate); if (last_rate < IEEE80211_TX_MAX_RATES - 1) rate[last_rate + 1].idx = -1; cur_idx = rate[last_rate].idx + st->retry; for (i = 0; i <= last_rate; i++) { rate[i].flags = rate[last_rate].flags; rate[i].idx = max_t(int, 0, cur_idx - i); rate[i].count = 1; } if (last_rate > 0) rate[last_rate - 1].count = st->retry + 1 - last_rate; info->status.ampdu_len = 1; info->status.ampdu_ack_len = st->success; if (st->is_probe) info->flags |= IEEE80211_TX_CTL_RATE_CTRL_PROBE; if (st->aggr) info->flags |= IEEE80211_TX_CTL_AMPDU | IEEE80211_TX_STAT_AMPDU; if (!st->ack_req) info->flags |= IEEE80211_TX_CTL_NO_ACK; else if (st->success) info->flags |= IEEE80211_TX_STAT_ACK; } u16 mt76_mac_tx_rate_val(struct mt7601u_dev *dev, const struct ieee80211_tx_rate *rate, u8 *nss_val) { u16 rateval; u8 phy, rate_idx; u8 nss = 1; u8 bw = 0; if (rate->flags & IEEE80211_TX_RC_MCS) { rate_idx = rate->idx; nss = 1 + (rate->idx >> 3); phy = MT_PHY_TYPE_HT; if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD) phy = MT_PHY_TYPE_HT_GF; if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) bw = 1; } else { const struct ieee80211_rate *r; int band = dev->chandef.chan->band; u16 val; r = &dev->hw->wiphy->bands[band]->bitrates[rate->idx]; if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) val = r->hw_value_short; else val = r->hw_value; phy = val >> 8; rate_idx = val & 0xff; bw = 0; } rateval = FIELD_PREP(MT_RXWI_RATE_MCS, rate_idx); rateval |= FIELD_PREP(MT_RXWI_RATE_PHY, phy); rateval |= FIELD_PREP(MT_RXWI_RATE_BW, bw); if (rate->flags & IEEE80211_TX_RC_SHORT_GI) rateval |= MT_RXWI_RATE_SGI; *nss_val = nss; return rateval; } void mt76_mac_wcid_set_rate(struct mt7601u_dev *dev, struct mt76_wcid *wcid, const struct ieee80211_tx_rate *rate) { unsigned long flags; spin_lock_irqsave(&dev->lock, flags); wcid->tx_rate = mt76_mac_tx_rate_val(dev, rate, &wcid->tx_rate_nss); wcid->tx_rate_set = true; spin_unlock_irqrestore(&dev->lock, flags); } struct mt76_tx_status mt7601u_mac_fetch_tx_status(struct mt7601u_dev *dev) { struct mt76_tx_status stat = {}; u32 val; val = mt7601u_rr(dev, MT_TX_STAT_FIFO); stat.valid = !!(val & MT_TX_STAT_FIFO_VALID); stat.success = !!(val & MT_TX_STAT_FIFO_SUCCESS); stat.aggr = !!(val & MT_TX_STAT_FIFO_AGGR); stat.ack_req = !!(val & MT_TX_STAT_FIFO_ACKREQ); stat.pktid = FIELD_GET(MT_TX_STAT_FIFO_PID_TYPE, val); stat.wcid = FIELD_GET(MT_TX_STAT_FIFO_WCID, val); stat.rate = FIELD_GET(MT_TX_STAT_FIFO_RATE, val); return stat; } void mt76_send_tx_status(struct mt7601u_dev *dev, struct mt76_tx_status *stat) { struct ieee80211_tx_info info = {}; struct ieee80211_sta *sta = NULL; struct mt76_wcid *wcid = NULL; void *msta; rcu_read_lock(); if (stat->wcid < ARRAY_SIZE(dev->wcid)) wcid = rcu_dereference(dev->wcid[stat->wcid]); if (wcid) { msta = container_of(wcid, struct mt76_sta, wcid); sta = container_of(msta, struct ieee80211_sta, drv_priv); } mt76_mac_fill_tx_status(dev, &info, stat); spin_lock_bh(&dev->mac_lock); ieee80211_tx_status_noskb(dev->hw, sta, &info); spin_unlock_bh(&dev->mac_lock); rcu_read_unlock(); } void mt7601u_mac_set_protection(struct mt7601u_dev *dev, bool legacy_prot, int ht_mode) { int mode = ht_mode & IEEE80211_HT_OP_MODE_PROTECTION; bool non_gf = !!(ht_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT); u32 prot[6]; bool ht_rts[4] = {}; int i; prot[0] = MT_PROT_NAV_SHORT | MT_PROT_TXOP_ALLOW_ALL | MT_PROT_RTS_THR_EN; prot[1] = prot[0]; if (legacy_prot) prot[1] |= MT_PROT_CTRL_CTS2SELF; prot[2] = prot[4] = MT_PROT_NAV_SHORT | MT_PROT_TXOP_ALLOW_BW20; prot[3] = prot[5] = MT_PROT_NAV_SHORT | MT_PROT_TXOP_ALLOW_ALL; if (legacy_prot) { prot[2] |= MT_PROT_RATE_CCK_11; prot[3] |= MT_PROT_RATE_CCK_11; prot[4] |= MT_PROT_RATE_CCK_11; prot[5] |= MT_PROT_RATE_CCK_11; } else { prot[2] |= MT_PROT_RATE_OFDM_24; prot[3] |= MT_PROT_RATE_DUP_OFDM_24; prot[4] |= MT_PROT_RATE_OFDM_24; prot[5] |= MT_PROT_RATE_DUP_OFDM_24; } switch (mode) { case IEEE80211_HT_OP_MODE_PROTECTION_NONE: break; case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER: ht_rts[0] = ht_rts[1] = ht_rts[2] = ht_rts[3] = true; break; case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ: ht_rts[1] = ht_rts[3] = true; break; case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED: ht_rts[0] = ht_rts[1] = ht_rts[2] = ht_rts[3] = true; break; } if (non_gf) ht_rts[2] = ht_rts[3] = true; for (i = 0; i < 4; i++) if (ht_rts[i]) prot[i + 2] |= MT_PROT_CTRL_RTS_CTS; for (i = 0; i < 6; i++) mt7601u_wr(dev, MT_CCK_PROT_CFG + i * 4, prot[i]); } void mt7601u_mac_set_short_preamble(struct mt7601u_dev *dev, bool short_preamb) { if (short_preamb) mt76_set(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT); else mt76_clear(dev, MT_AUTO_RSP_CFG, MT_AUTO_RSP_PREAMB_SHORT); } void mt7601u_mac_config_tsf(struct mt7601u_dev *dev, bool enable, int interval) { u32 val = mt7601u_rr(dev, MT_BEACON_TIME_CFG); val &= ~(MT_BEACON_TIME_CFG_TIMER_EN | MT_BEACON_TIME_CFG_SYNC_MODE | MT_BEACON_TIME_CFG_TBTT_EN); if (!enable) { mt7601u_wr(dev, MT_BEACON_TIME_CFG, val); return; } val &= ~MT_BEACON_TIME_CFG_INTVAL; val |= FIELD_PREP(MT_BEACON_TIME_CFG_INTVAL, interval << 4) | MT_BEACON_TIME_CFG_TIMER_EN | MT_BEACON_TIME_CFG_SYNC_MODE | MT_BEACON_TIME_CFG_TBTT_EN; } static void mt7601u_check_mac_err(struct mt7601u_dev *dev) { u32 val = mt7601u_rr(dev, 0x10f4); if (!(val & BIT(29)) || !(val & (BIT(7) | BIT(5)))) return; dev_err(dev->dev, "Error: MAC specific condition occurred\n"); mt76_set(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR); udelay(10); mt76_clear(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_RESET_CSR); } void mt7601u_mac_work(struct work_struct *work) { struct mt7601u_dev *dev = container_of(work, struct mt7601u_dev, mac_work.work); struct { u32 addr_base; u32 span; u64 *stat_base; } spans[] = { { MT_RX_STA_CNT0, 3, dev->stats.rx_stat }, { MT_TX_STA_CNT0, 3, dev->stats.tx_stat }, { MT_TX_AGG_STAT, 1, dev->stats.aggr_stat }, { MT_MPDU_DENSITY_CNT, 1, dev->stats.zero_len_del }, { MT_TX_AGG_CNT_BASE0, 8, &dev->stats.aggr_n[0] }, { MT_TX_AGG_CNT_BASE1, 8, &dev->stats.aggr_n[16] }, }; u32 sum, n; int i, j, k; /* Note: using MCU_RANDOM_READ is actually slower then reading all the * registers by hand. MCU takes ca. 20ms to complete read of 24 * registers while reading them one by one will takes roughly * 24*200us =~ 5ms. */ k = 0; n = 0; sum = 0; for (i = 0; i < ARRAY_SIZE(spans); i++) for (j = 0; j < spans[i].span; j++) { u32 val = mt7601u_rr(dev, spans[i].addr_base + j * 4); spans[i].stat_base[j * 2] += val & 0xffff; spans[i].stat_base[j * 2 + 1] += val >> 16; /* Calculate average AMPDU length */ if (spans[i].addr_base != MT_TX_AGG_CNT_BASE0 && spans[i].addr_base != MT_TX_AGG_CNT_BASE1) continue; n += (val >> 16) + (val & 0xffff); sum += (val & 0xffff) * (1 + k * 2) + (val >> 16) * (2 + k * 2); k++; } atomic_set(&dev->avg_ampdu_len, n ? DIV_ROUND_CLOSEST(sum, n) : 1); mt7601u_check_mac_err(dev); ieee80211_queue_delayed_work(dev->hw, &dev->mac_work, 10 * HZ); } void mt7601u_mac_wcid_setup(struct mt7601u_dev *dev, u8 idx, u8 vif_idx, u8 *mac) { u8 zmac[ETH_ALEN] = {}; u32 attr; attr = FIELD_PREP(MT_WCID_ATTR_BSS_IDX, vif_idx & 7) | FIELD_PREP(MT_WCID_ATTR_BSS_IDX_EXT, !!(vif_idx & 8)); mt76_wr(dev, MT_WCID_ATTR(idx), attr); if (mac) memcpy(zmac, mac, sizeof(zmac)); mt7601u_addr_wr(dev, MT_WCID_ADDR(idx), zmac); } void mt7601u_mac_set_ampdu_factor(struct mt7601u_dev *dev) { struct ieee80211_sta *sta; struct mt76_wcid *wcid; void *msta; u8 min_factor = 3; int i; rcu_read_lock(); for (i = 0; i < ARRAY_SIZE(dev->wcid); i++) { wcid = rcu_dereference(dev->wcid[i]); if (!wcid) continue; msta = container_of(wcid, struct mt76_sta, wcid); sta = container_of(msta, struct ieee80211_sta, drv_priv); min_factor = min(min_factor, sta->deflink.ht_cap.ampdu_factor); } rcu_read_unlock(); mt7601u_wr(dev, MT_MAX_LEN_CFG, 0xa0fff | FIELD_PREP(MT_MAX_LEN_CFG_AMPDU, min_factor)); } static void mt76_mac_process_rate(struct ieee80211_rx_status *status, u16 rate) { u8 idx = FIELD_GET(MT_RXWI_RATE_MCS, rate); switch (FIELD_GET(MT_RXWI_RATE_PHY, rate)) { case MT_PHY_TYPE_OFDM: if (WARN_ON(idx >= 8)) idx = 0; idx += 4; status->rate_idx = idx; return; case MT_PHY_TYPE_CCK: if (idx >= 8) { idx -= 8; status->enc_flags |= RX_ENC_FLAG_SHORTPRE; } if (WARN_ON(idx >= 4)) idx = 0; status->rate_idx = idx; return; case MT_PHY_TYPE_HT_GF: status->enc_flags |= RX_ENC_FLAG_HT_GF; fallthrough; case MT_PHY_TYPE_HT: status->encoding = RX_ENC_HT; status->rate_idx = idx; break; default: WARN_ON(1); return; } if (rate & MT_RXWI_RATE_SGI) status->enc_flags |= RX_ENC_FLAG_SHORT_GI; if (rate & MT_RXWI_RATE_STBC) status->enc_flags |= 1 << RX_ENC_FLAG_STBC_SHIFT; if (rate & MT_RXWI_RATE_BW) status->bw = RATE_INFO_BW_40; } static void mt7601u_rx_monitor_beacon(struct mt7601u_dev *dev, struct mt7601u_rxwi *rxwi, u16 rate, int rssi) { dev->bcn_freq_off = rxwi->freq_off; dev->bcn_phy_mode = FIELD_GET(MT_RXWI_RATE_PHY, rate); ewma_rssi_add(&dev->avg_rssi, -rssi); } static int mt7601u_rx_is_our_beacon(struct mt7601u_dev *dev, u8 *data) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)data; return ieee80211_is_beacon(hdr->frame_control) && ether_addr_equal(hdr->addr2, dev->ap_bssid); } u32 mt76_mac_process_rx(struct mt7601u_dev *dev, struct sk_buff *skb, u8 *data, void *rxi) { struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb); struct mt7601u_rxwi *rxwi = rxi; u32 len, ctl = le32_to_cpu(rxwi->ctl); u16 rate = le16_to_cpu(rxwi->rate); int rssi; len = FIELD_GET(MT_RXWI_CTL_MPDU_LEN, ctl); if (len < 10) return 0; if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_DECRYPT)) { status->flag |= RX_FLAG_DECRYPTED; status->flag |= RX_FLAG_MMIC_STRIPPED; status->flag |= RX_FLAG_MIC_STRIPPED; status->flag |= RX_FLAG_ICV_STRIPPED; status->flag |= RX_FLAG_IV_STRIPPED; } /* let mac80211 take care of PN validation since apparently * the hardware does not support it */ if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_PN_LEN)) status->flag &= ~RX_FLAG_IV_STRIPPED; status->chains = BIT(0); rssi = mt7601u_phy_get_rssi(dev, rxwi, rate); status->chain_signal[0] = status->signal = rssi; status->freq = dev->chandef.chan->center_freq; status->band = dev->chandef.chan->band; mt76_mac_process_rate(status, rate); spin_lock_bh(&dev->con_mon_lock); if (mt7601u_rx_is_our_beacon(dev, data)) mt7601u_rx_monitor_beacon(dev, rxwi, rate, rssi); else if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_U2M)) ewma_rssi_add(&dev->avg_rssi, -rssi); spin_unlock_bh(&dev->con_mon_lock); return len; } static enum mt76_cipher_type mt76_mac_get_key_info(struct ieee80211_key_conf *key, u8 *key_data) { memset(key_data, 0, 32); if (!key) return MT_CIPHER_NONE; if (key->keylen > 32) return MT_CIPHER_NONE; memcpy(key_data, key->key, key->keylen); switch (key->cipher) { case WLAN_CIPHER_SUITE_WEP40: return MT_CIPHER_WEP40; case WLAN_CIPHER_SUITE_WEP104: return MT_CIPHER_WEP104; case WLAN_CIPHER_SUITE_TKIP: return MT_CIPHER_TKIP; case WLAN_CIPHER_SUITE_CCMP: return MT_CIPHER_AES_CCMP; default: return MT_CIPHER_NONE; } } int mt76_mac_wcid_set_key(struct mt7601u_dev *dev, u8 idx, struct ieee80211_key_conf *key) { enum mt76_cipher_type cipher; u8 key_data[32]; u8 iv_data[8]; u32 val; cipher = mt76_mac_get_key_info(key, key_data); if (cipher == MT_CIPHER_NONE && key) return -EINVAL; trace_set_key(dev, idx); mt7601u_wr_copy(dev, MT_WCID_KEY(idx), key_data, sizeof(key_data)); memset(iv_data, 0, sizeof(iv_data)); if (key) { iv_data[3] = key->keyidx << 6; if (cipher >= MT_CIPHER_TKIP) { /* Note: start with 1 to comply with spec, * (see comment on common/cmm_wpa.c:4291). */ iv_data[0] |= 1; iv_data[3] |= 0x20; } } mt7601u_wr_copy(dev, MT_WCID_IV(idx), iv_data, sizeof(iv_data)); val = mt7601u_rr(dev, MT_WCID_ATTR(idx)); val &= ~MT_WCID_ATTR_PKEY_MODE & ~MT_WCID_ATTR_PKEY_MODE_EXT; val |= FIELD_PREP(MT_WCID_ATTR_PKEY_MODE, cipher & 7) | FIELD_PREP(MT_WCID_ATTR_PKEY_MODE_EXT, cipher >> 3); val &= ~MT_WCID_ATTR_PAIRWISE; val |= MT_WCID_ATTR_PAIRWISE * !!(key && key->flags & IEEE80211_KEY_FLAG_PAIRWISE); mt7601u_wr(dev, MT_WCID_ATTR(idx), val); return 0; } int mt76_mac_shared_key_setup(struct mt7601u_dev *dev, u8 vif_idx, u8 key_idx, struct ieee80211_key_conf *key) { enum mt76_cipher_type cipher; u8 key_data[32]; u32 val; cipher = mt76_mac_get_key_info(key, key_data); if (cipher == MT_CIPHER_NONE && key) return -EINVAL; trace_set_shared_key(dev, vif_idx, key_idx); mt7601u_wr_copy(dev, MT_SKEY(vif_idx, key_idx), key_data, sizeof(key_data)); val = mt76_rr(dev, MT_SKEY_MODE(vif_idx)); val &= ~(MT_SKEY_MODE_MASK << MT_SKEY_MODE_SHIFT(vif_idx, key_idx)); val |= cipher << MT_SKEY_MODE_SHIFT(vif_idx, key_idx); mt76_wr(dev, MT_SKEY_MODE(vif_idx), val); return 0; }
linux-master
drivers/net/wireless/mediatek/mt7601u/mac.c
// SPDX-License-Identifier: GPL-2.0-only /* * (c) Copyright 2002-2010, Ralink Technology, Inc. * Copyright (C) 2014 Felix Fietkau <[email protected]> * Copyright (C) 2015 Jakub Kicinski <[email protected]> */ #include "mt7601u.h" #include "eeprom.h" #include "trace.h" #include "mcu.h" #include "initvals.h" static void mt7601u_set_wlan_state(struct mt7601u_dev *dev, u32 val, bool enable) { int i; /* Note: we don't turn off WLAN_CLK because that makes the device * not respond properly on the probe path. * In case anyone (PSM?) wants to use this function we can * bring the clock stuff back and fixup the probe path. */ if (enable) val |= (MT_WLAN_FUN_CTRL_WLAN_EN | MT_WLAN_FUN_CTRL_WLAN_CLK_EN); else val &= ~(MT_WLAN_FUN_CTRL_WLAN_EN); mt7601u_wr(dev, MT_WLAN_FUN_CTRL, val); udelay(20); if (enable) { set_bit(MT7601U_STATE_WLAN_RUNNING, &dev->state); } else { clear_bit(MT7601U_STATE_WLAN_RUNNING, &dev->state); return; } for (i = 200; i; i--) { val = mt7601u_rr(dev, MT_CMB_CTRL); if (val & MT_CMB_CTRL_XTAL_RDY && val & MT_CMB_CTRL_PLL_LD) break; udelay(20); } /* Note: vendor driver tries to disable/enable wlan here and retry * but the code which does it is so buggy it must have never * triggered, so don't bother. */ if (!i) dev_err(dev->dev, "Error: PLL and XTAL check failed!\n"); } static void mt7601u_chip_onoff(struct mt7601u_dev *dev, bool enable, bool reset) { u32 val; mutex_lock(&dev->hw_atomic_mutex); val = mt7601u_rr(dev, MT_WLAN_FUN_CTRL); if (reset) { val |= MT_WLAN_FUN_CTRL_GPIO_OUT_EN; val &= ~MT_WLAN_FUN_CTRL_FRC_WL_ANT_SEL; if (val & MT_WLAN_FUN_CTRL_WLAN_EN) { val |= (MT_WLAN_FUN_CTRL_WLAN_RESET | MT_WLAN_FUN_CTRL_WLAN_RESET_RF); mt7601u_wr(dev, MT_WLAN_FUN_CTRL, val); udelay(20); val &= ~(MT_WLAN_FUN_CTRL_WLAN_RESET | MT_WLAN_FUN_CTRL_WLAN_RESET_RF); } } mt7601u_wr(dev, MT_WLAN_FUN_CTRL, val); udelay(20); mt7601u_set_wlan_state(dev, val, enable); mutex_unlock(&dev->hw_atomic_mutex); } static void mt7601u_reset_csr_bbp(struct mt7601u_dev *dev) { mt7601u_wr(dev, MT_MAC_SYS_CTRL, (MT_MAC_SYS_CTRL_RESET_CSR | MT_MAC_SYS_CTRL_RESET_BBP)); mt7601u_wr(dev, MT_USB_DMA_CFG, 0); msleep(1); mt7601u_wr(dev, MT_MAC_SYS_CTRL, 0); } static void mt7601u_init_usb_dma(struct mt7601u_dev *dev) { u32 val; val = FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_TOUT, MT_USB_AGGR_TIMEOUT) | FIELD_PREP(MT_USB_DMA_CFG_RX_BULK_AGG_LMT, MT_USB_AGGR_SIZE_LIMIT) | MT_USB_DMA_CFG_RX_BULK_EN | MT_USB_DMA_CFG_TX_BULK_EN; if (dev->in_max_packet == 512) val |= MT_USB_DMA_CFG_RX_BULK_AGG_EN; mt7601u_wr(dev, MT_USB_DMA_CFG, val); val |= MT_USB_DMA_CFG_UDMA_RX_WL_DROP; mt7601u_wr(dev, MT_USB_DMA_CFG, val); val &= ~MT_USB_DMA_CFG_UDMA_RX_WL_DROP; mt7601u_wr(dev, MT_USB_DMA_CFG, val); } static int mt7601u_init_bbp(struct mt7601u_dev *dev) { int ret; ret = mt7601u_wait_bbp_ready(dev); if (ret) return ret; ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP, bbp_common_vals, ARRAY_SIZE(bbp_common_vals)); if (ret) return ret; return mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP, bbp_chip_vals, ARRAY_SIZE(bbp_chip_vals)); } static void mt76_init_beacon_offsets(struct mt7601u_dev *dev) { u16 base = MT_BEACON_BASE; u32 regs[4] = {}; int i; for (i = 0; i < 16; i++) { u16 addr = dev->beacon_offsets[i]; regs[i / 4] |= ((addr - base) / 64) << (8 * (i % 4)); } for (i = 0; i < 4; i++) mt7601u_wr(dev, MT_BCN_OFFSET(i), regs[i]); } static int mt7601u_write_mac_initvals(struct mt7601u_dev *dev) { int ret; ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_WLAN, mac_common_vals, ARRAY_SIZE(mac_common_vals)); if (ret) return ret; ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_WLAN, mac_chip_vals, ARRAY_SIZE(mac_chip_vals)); if (ret) return ret; mt76_init_beacon_offsets(dev); mt7601u_wr(dev, MT_AUX_CLK_CFG, 0); return 0; } static int mt7601u_init_wcid_mem(struct mt7601u_dev *dev) { u32 *vals; int i, ret; vals = kmalloc(sizeof(*vals) * N_WCIDS * 2, GFP_KERNEL); if (!vals) return -ENOMEM; for (i = 0; i < N_WCIDS; i++) { vals[i * 2] = 0xffffffff; vals[i * 2 + 1] = 0x00ffffff; } ret = mt7601u_burst_write_regs(dev, MT_WCID_ADDR_BASE, vals, N_WCIDS * 2); kfree(vals); return ret; } static int mt7601u_init_key_mem(struct mt7601u_dev *dev) { u32 vals[4] = {}; return mt7601u_burst_write_regs(dev, MT_SKEY_MODE_BASE_0, vals, ARRAY_SIZE(vals)); } static int mt7601u_init_wcid_attr_mem(struct mt7601u_dev *dev) { u32 *vals; int i, ret; vals = kmalloc(sizeof(*vals) * N_WCIDS * 2, GFP_KERNEL); if (!vals) return -ENOMEM; for (i = 0; i < N_WCIDS * 2; i++) vals[i] = 1; ret = mt7601u_burst_write_regs(dev, MT_WCID_ATTR_BASE, vals, N_WCIDS * 2); kfree(vals); return ret; } static void mt7601u_reset_counters(struct mt7601u_dev *dev) { mt7601u_rr(dev, MT_RX_STA_CNT0); mt7601u_rr(dev, MT_RX_STA_CNT1); mt7601u_rr(dev, MT_RX_STA_CNT2); mt7601u_rr(dev, MT_TX_STA_CNT0); mt7601u_rr(dev, MT_TX_STA_CNT1); mt7601u_rr(dev, MT_TX_STA_CNT2); } int mt7601u_mac_start(struct mt7601u_dev *dev) { mt7601u_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX); if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_BUSY | MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 200000)) return -ETIMEDOUT; dev->rxfilter = MT_RX_FILTR_CFG_CRC_ERR | MT_RX_FILTR_CFG_PHY_ERR | MT_RX_FILTR_CFG_PROMISC | MT_RX_FILTR_CFG_VER_ERR | MT_RX_FILTR_CFG_DUP | MT_RX_FILTR_CFG_CFACK | MT_RX_FILTR_CFG_CFEND | MT_RX_FILTR_CFG_ACK | MT_RX_FILTR_CFG_CTS | MT_RX_FILTR_CFG_RTS | MT_RX_FILTR_CFG_PSPOLL | MT_RX_FILTR_CFG_BA | MT_RX_FILTR_CFG_CTRL_RSV; mt7601u_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter); mt7601u_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX); if (!mt76_poll(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_BUSY | MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 50)) return -ETIMEDOUT; return 0; } static void mt7601u_mac_stop_hw(struct mt7601u_dev *dev) { int i, ok; if (test_bit(MT7601U_STATE_REMOVED, &dev->state)) return; mt76_clear(dev, MT_BEACON_TIME_CFG, MT_BEACON_TIME_CFG_TIMER_EN | MT_BEACON_TIME_CFG_SYNC_MODE | MT_BEACON_TIME_CFG_TBTT_EN | MT_BEACON_TIME_CFG_BEACON_TX); if (!mt76_poll(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_TX_BUSY, 0, 1000)) dev_warn(dev->dev, "Warning: TX DMA did not stop!\n"); /* Page count on TxQ */ i = 200; while (i-- && ((mt76_rr(dev, 0x0438) & 0xffffffff) || (mt76_rr(dev, 0x0a30) & 0x000000ff) || (mt76_rr(dev, 0x0a34) & 0x00ff00ff))) msleep(10); if (!mt76_poll(dev, MT_MAC_STATUS, MT_MAC_STATUS_TX, 0, 1000)) dev_warn(dev->dev, "Warning: MAC TX did not stop!\n"); mt76_clear(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_RX | MT_MAC_SYS_CTRL_ENABLE_TX); /* Page count on RxQ */ ok = 0; i = 200; while (i--) { if (!(mt76_rr(dev, MT_RXQ_STA) & 0x00ff0000) && !mt76_rr(dev, 0x0a30) && !mt76_rr(dev, 0x0a34)) { if (ok++ > 5) break; continue; } msleep(1); } if (!mt76_poll(dev, MT_MAC_STATUS, MT_MAC_STATUS_RX, 0, 1000)) dev_warn(dev->dev, "Warning: MAC RX did not stop!\n"); if (!mt76_poll(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_RX_BUSY, 0, 1000)) dev_warn(dev->dev, "Warning: RX DMA did not stop!\n"); } void mt7601u_mac_stop(struct mt7601u_dev *dev) { mt7601u_mac_stop_hw(dev); flush_delayed_work(&dev->stat_work); cancel_delayed_work_sync(&dev->stat_work); } static void mt7601u_stop_hardware(struct mt7601u_dev *dev) { mt7601u_chip_onoff(dev, false, false); } int mt7601u_init_hardware(struct mt7601u_dev *dev) { static const u16 beacon_offsets[16] = { /* 512 byte per beacon */ 0xc000, 0xc200, 0xc400, 0xc600, 0xc800, 0xca00, 0xcc00, 0xce00, 0xd000, 0xd200, 0xd400, 0xd600, 0xd800, 0xda00, 0xdc00, 0xde00 }; int ret; dev->beacon_offsets = beacon_offsets; mt7601u_chip_onoff(dev, true, false); ret = mt7601u_wait_asic_ready(dev); if (ret) goto err; ret = mt7601u_mcu_init(dev); if (ret) goto err; if (!mt76_poll_msec(dev, MT_WPDMA_GLO_CFG, MT_WPDMA_GLO_CFG_TX_DMA_BUSY | MT_WPDMA_GLO_CFG_RX_DMA_BUSY, 0, 100)) { ret = -EIO; goto err; } /* Wait for ASIC ready after FW load. */ ret = mt7601u_wait_asic_ready(dev); if (ret) goto err; mt7601u_reset_csr_bbp(dev); mt7601u_init_usb_dma(dev); ret = mt7601u_mcu_cmd_init(dev); if (ret) goto err; ret = mt7601u_dma_init(dev); if (ret) goto err_mcu; ret = mt7601u_write_mac_initvals(dev); if (ret) goto err_rx; if (!mt76_poll_msec(dev, MT_MAC_STATUS, MT_MAC_STATUS_TX | MT_MAC_STATUS_RX, 0, 100)) { ret = -EIO; goto err_rx; } ret = mt7601u_init_bbp(dev); if (ret) goto err_rx; ret = mt7601u_init_wcid_mem(dev); if (ret) goto err_rx; ret = mt7601u_init_key_mem(dev); if (ret) goto err_rx; ret = mt7601u_init_wcid_attr_mem(dev); if (ret) goto err_rx; mt76_clear(dev, MT_BEACON_TIME_CFG, (MT_BEACON_TIME_CFG_TIMER_EN | MT_BEACON_TIME_CFG_SYNC_MODE | MT_BEACON_TIME_CFG_TBTT_EN | MT_BEACON_TIME_CFG_BEACON_TX)); mt7601u_reset_counters(dev); mt7601u_rmw(dev, MT_US_CYC_CFG, MT_US_CYC_CNT, 0x1e); mt7601u_wr(dev, MT_TXOP_CTRL_CFG, FIELD_PREP(MT_TXOP_TRUN_EN, 0x3f) | FIELD_PREP(MT_TXOP_EXT_CCA_DLY, 0x58)); ret = mt7601u_eeprom_init(dev); if (ret) goto err_rx; ret = mt7601u_phy_init(dev); if (ret) goto err_rx; mt7601u_set_rx_path(dev, 0); mt7601u_set_tx_dac(dev, 0); mt7601u_mac_set_ctrlch(dev, false); mt7601u_bbp_set_ctrlch(dev, false); mt7601u_bbp_set_bw(dev, MT_BW_20); return 0; err_rx: mt7601u_dma_cleanup(dev); err_mcu: mt7601u_mcu_cmd_deinit(dev); err: mt7601u_chip_onoff(dev, false, false); return ret; } void mt7601u_cleanup(struct mt7601u_dev *dev) { if (!test_and_clear_bit(MT7601U_STATE_INITIALIZED, &dev->state)) return; mt7601u_stop_hardware(dev); mt7601u_dma_cleanup(dev); mt7601u_mcu_cmd_deinit(dev); } struct mt7601u_dev *mt7601u_alloc_device(struct device *pdev) { struct ieee80211_hw *hw; struct mt7601u_dev *dev; hw = ieee80211_alloc_hw(sizeof(*dev), &mt7601u_ops); if (!hw) return NULL; dev = hw->priv; dev->dev = pdev; dev->hw = hw; mutex_init(&dev->vendor_req_mutex); mutex_init(&dev->reg_atomic_mutex); mutex_init(&dev->hw_atomic_mutex); mutex_init(&dev->mutex); spin_lock_init(&dev->tx_lock); spin_lock_init(&dev->rx_lock); spin_lock_init(&dev->lock); spin_lock_init(&dev->mac_lock); spin_lock_init(&dev->con_mon_lock); atomic_set(&dev->avg_ampdu_len, 1); skb_queue_head_init(&dev->tx_skb_done); dev->stat_wq = alloc_workqueue("mt7601u", WQ_UNBOUND, 0); if (!dev->stat_wq) { ieee80211_free_hw(hw); return NULL; } return dev; } #define CHAN2G(_idx, _freq) { \ .band = NL80211_BAND_2GHZ, \ .center_freq = (_freq), \ .hw_value = (_idx), \ .max_power = 30, \ } static const struct ieee80211_channel mt76_channels_2ghz[] = { CHAN2G(1, 2412), CHAN2G(2, 2417), CHAN2G(3, 2422), CHAN2G(4, 2427), CHAN2G(5, 2432), CHAN2G(6, 2437), CHAN2G(7, 2442), CHAN2G(8, 2447), CHAN2G(9, 2452), CHAN2G(10, 2457), CHAN2G(11, 2462), CHAN2G(12, 2467), CHAN2G(13, 2472), CHAN2G(14, 2484), }; #define CCK_RATE(_idx, _rate) { \ .bitrate = _rate, \ .flags = IEEE80211_RATE_SHORT_PREAMBLE, \ .hw_value = (MT_PHY_TYPE_CCK << 8) | _idx, \ .hw_value_short = (MT_PHY_TYPE_CCK << 8) | (8 + _idx), \ } #define OFDM_RATE(_idx, _rate) { \ .bitrate = _rate, \ .hw_value = (MT_PHY_TYPE_OFDM << 8) | _idx, \ .hw_value_short = (MT_PHY_TYPE_OFDM << 8) | _idx, \ } static struct ieee80211_rate mt76_rates[] = { CCK_RATE(0, 10), CCK_RATE(1, 20), CCK_RATE(2, 55), CCK_RATE(3, 110), OFDM_RATE(0, 60), OFDM_RATE(1, 90), OFDM_RATE(2, 120), OFDM_RATE(3, 180), OFDM_RATE(4, 240), OFDM_RATE(5, 360), OFDM_RATE(6, 480), OFDM_RATE(7, 540), }; static int mt76_init_sband(struct mt7601u_dev *dev, struct ieee80211_supported_band *sband, const struct ieee80211_channel *chan, int n_chan, struct ieee80211_rate *rates, int n_rates) { struct ieee80211_sta_ht_cap *ht_cap; void *chanlist; int size; size = n_chan * sizeof(*chan); chanlist = devm_kmemdup(dev->dev, chan, size, GFP_KERNEL); if (!chanlist) return -ENOMEM; sband->channels = chanlist; sband->n_channels = n_chan; sband->bitrates = rates; sband->n_bitrates = n_rates; ht_cap = &sband->ht_cap; ht_cap->ht_supported = true; ht_cap->cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 | IEEE80211_HT_CAP_GRN_FLD | IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40 | (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT); ht_cap->mcs.rx_mask[0] = 0xff; ht_cap->mcs.rx_mask[4] = 0x1; ht_cap->mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; ht_cap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; ht_cap->ampdu_density = IEEE80211_HT_MPDU_DENSITY_2; dev->chandef.chan = &sband->channels[0]; return 0; } static int mt76_init_sband_2g(struct mt7601u_dev *dev) { dev->sband_2g = devm_kzalloc(dev->dev, sizeof(*dev->sband_2g), GFP_KERNEL); if (!dev->sband_2g) return -ENOMEM; dev->hw->wiphy->bands[NL80211_BAND_2GHZ] = dev->sband_2g; WARN_ON(dev->ee->reg.start - 1 + dev->ee->reg.num > ARRAY_SIZE(mt76_channels_2ghz)); return mt76_init_sband(dev, dev->sband_2g, &mt76_channels_2ghz[dev->ee->reg.start - 1], dev->ee->reg.num, mt76_rates, ARRAY_SIZE(mt76_rates)); } int mt7601u_register_device(struct mt7601u_dev *dev) { struct ieee80211_hw *hw = dev->hw; struct wiphy *wiphy = hw->wiphy; int ret; /* Reserve WCID 0 for mcast - thanks to this APs WCID will go to * entry no. 1 like it does in the vendor driver. */ dev->wcid_mask[0] |= 1; /* init fake wcid for monitor interfaces */ dev->mon_wcid = devm_kmalloc(dev->dev, sizeof(*dev->mon_wcid), GFP_KERNEL); if (!dev->mon_wcid) return -ENOMEM; dev->mon_wcid->idx = 0xff; dev->mon_wcid->hw_key_idx = -1; SET_IEEE80211_DEV(hw, dev->dev); hw->queues = 4; ieee80211_hw_set(hw, SIGNAL_DBM); ieee80211_hw_set(hw, PS_NULLFUNC_STACK); ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES); ieee80211_hw_set(hw, AMPDU_AGGREGATION); ieee80211_hw_set(hw, SUPPORTS_RC_TABLE); ieee80211_hw_set(hw, MFP_CAPABLE); hw->max_rates = 1; hw->max_report_rates = 7; hw->max_rate_tries = 1; hw->sta_data_size = sizeof(struct mt76_sta); hw->vif_data_size = sizeof(struct mt76_vif); SET_IEEE80211_PERM_ADDR(hw, dev->macaddr); wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR; wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); ret = mt76_init_sband_2g(dev); if (ret) return ret; INIT_DELAYED_WORK(&dev->mac_work, mt7601u_mac_work); INIT_DELAYED_WORK(&dev->stat_work, mt7601u_tx_stat); ret = ieee80211_register_hw(hw); if (ret) return ret; mt7601u_init_debugfs(dev); return 0; }
linux-master
drivers/net/wireless/mediatek/mt7601u/init.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2014 Felix Fietkau <[email protected]> * Copyright (C) 2015 Jakub Kicinski <[email protected]> */ #include "mt7601u.h" int mt7601u_wait_asic_ready(struct mt7601u_dev *dev) { int i = 100; u32 val; do { if (test_bit(MT7601U_STATE_REMOVED, &dev->state)) return -EIO; val = mt7601u_rr(dev, MT_MAC_CSR0); if (val && ~val) return 0; udelay(10); } while (i--); return -EIO; } bool mt76_poll(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val, int timeout) { u32 cur; timeout /= 10; do { if (test_bit(MT7601U_STATE_REMOVED, &dev->state)) return false; cur = mt7601u_rr(dev, offset) & mask; if (cur == val) return true; udelay(10); } while (timeout-- > 0); dev_err(dev->dev, "Error: Time out with reg %08x\n", offset); return false; } bool mt76_poll_msec(struct mt7601u_dev *dev, u32 offset, u32 mask, u32 val, int timeout) { u32 cur; timeout /= 10; do { if (test_bit(MT7601U_STATE_REMOVED, &dev->state)) return false; cur = mt7601u_rr(dev, offset) & mask; if (cur == val) return true; msleep(10); } while (timeout-- > 0); dev_err(dev->dev, "Error: Time out with reg %08x\n", offset); return false; }
linux-master
drivers/net/wireless/mediatek/mt7601u/core.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2014 Felix Fietkau <[email protected]> * Copyright (C) 2015 Jakub Kicinski <[email protected]> */ #include <linux/of.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/etherdevice.h> #include <asm/unaligned.h> #include "mt7601u.h" #include "eeprom.h" #include "mac.h" static bool field_valid(u8 val) { return val != 0xff; } static s8 field_validate(u8 val) { if (!field_valid(val)) return 0; return val; } static int mt7601u_efuse_read(struct mt7601u_dev *dev, u16 addr, u8 *data, enum mt7601u_eeprom_access_modes mode) { u32 val; int i; val = mt76_rr(dev, MT_EFUSE_CTRL); val &= ~(MT_EFUSE_CTRL_AIN | MT_EFUSE_CTRL_MODE); val |= FIELD_PREP(MT_EFUSE_CTRL_AIN, addr & ~0xf) | FIELD_PREP(MT_EFUSE_CTRL_MODE, mode) | MT_EFUSE_CTRL_KICK; mt76_wr(dev, MT_EFUSE_CTRL, val); if (!mt76_poll(dev, MT_EFUSE_CTRL, MT_EFUSE_CTRL_KICK, 0, 1000)) return -ETIMEDOUT; val = mt76_rr(dev, MT_EFUSE_CTRL); if ((val & MT_EFUSE_CTRL_AOUT) == MT_EFUSE_CTRL_AOUT) { /* Parts of eeprom not in the usage map (0x80-0xc0,0xf0) * will not return valid data but it's ok. */ memset(data, 0xff, 16); return 0; } for (i = 0; i < 4; i++) { val = mt76_rr(dev, MT_EFUSE_DATA(i)); put_unaligned_le32(val, data + 4 * i); } return 0; } static int mt7601u_efuse_physical_size_check(struct mt7601u_dev *dev) { const int map_reads = DIV_ROUND_UP(MT_EFUSE_USAGE_MAP_SIZE, 16); u8 data[round_up(MT_EFUSE_USAGE_MAP_SIZE, 16)]; int ret, i; u32 start = 0, end = 0, cnt_free; for (i = 0; i < map_reads; i++) { ret = mt7601u_efuse_read(dev, MT_EE_USAGE_MAP_START + i * 16, data + i * 16, MT_EE_PHYSICAL_READ); if (ret) return ret; } for (i = 0; i < MT_EFUSE_USAGE_MAP_SIZE; i++) if (!data[i]) { if (!start) start = MT_EE_USAGE_MAP_START + i; end = MT_EE_USAGE_MAP_START + i; } cnt_free = end - start + 1; if (MT_EFUSE_USAGE_MAP_SIZE - cnt_free < 5) { dev_err(dev->dev, "Error: your device needs default EEPROM file and this driver doesn't support it!\n"); return -EINVAL; } return 0; } static bool mt7601u_has_tssi(struct mt7601u_dev *dev, u8 *eeprom) { u16 nic_conf1 = get_unaligned_le16(eeprom + MT_EE_NIC_CONF_1); return (u16)~nic_conf1 && (nic_conf1 & MT_EE_NIC_CONF_1_TX_ALC_EN); } static void mt7601u_set_chip_cap(struct mt7601u_dev *dev, u8 *eeprom) { u16 nic_conf0 = get_unaligned_le16(eeprom + MT_EE_NIC_CONF_0); u16 nic_conf1 = get_unaligned_le16(eeprom + MT_EE_NIC_CONF_1); if (!field_valid(nic_conf1 & 0xff)) nic_conf1 &= 0xff00; dev->ee->tssi_enabled = mt7601u_has_tssi(dev, eeprom) && !(nic_conf1 & MT_EE_NIC_CONF_1_TEMP_TX_ALC); if (nic_conf1 & MT_EE_NIC_CONF_1_HW_RF_CTRL) dev_err(dev->dev, "Error: this driver does not support HW RF ctrl\n"); if (!field_valid(nic_conf0 >> 8)) return; if (FIELD_GET(MT_EE_NIC_CONF_0_RX_PATH, nic_conf0) > 1 || FIELD_GET(MT_EE_NIC_CONF_0_TX_PATH, nic_conf0) > 1) dev_err(dev->dev, "Error: device has more than 1 RX/TX stream!\n"); } static void mt7601u_set_channel_target_power(struct mt7601u_dev *dev, u8 *eeprom, u8 max_pwr) { u8 trgt_pwr = eeprom[MT_EE_TX_TSSI_TARGET_POWER]; if (trgt_pwr > max_pwr || !trgt_pwr) { dev_warn(dev->dev, "Error: EEPROM trgt power invalid %hhx!\n", trgt_pwr); trgt_pwr = 0x20; } memset(dev->ee->chan_pwr, trgt_pwr, sizeof(dev->ee->chan_pwr)); } static void mt7601u_set_channel_power(struct mt7601u_dev *dev, u8 *eeprom) { u32 i, val; u8 max_pwr; val = mt7601u_rr(dev, MT_TX_ALC_CFG_0); max_pwr = FIELD_GET(MT_TX_ALC_CFG_0_LIMIT_0, val); if (mt7601u_has_tssi(dev, eeprom)) { mt7601u_set_channel_target_power(dev, eeprom, max_pwr); return; } for (i = 0; i < 14; i++) { s8 power = field_validate(eeprom[MT_EE_TX_POWER_OFFSET + i]); if (power > max_pwr || power < 0) power = MT7601U_DEFAULT_TX_POWER; dev->ee->chan_pwr[i] = power; } } static void mt7601u_set_country_reg(struct mt7601u_dev *dev, u8 *eeprom) { /* Note: - region 31 is not valid for mt7601u (see rtmp_init.c) * - comments in rtmp_def.h are incorrect (see rt_channel.c) */ static const struct reg_channel_bounds chan_bounds[] = { /* EEPROM country regions 0 - 7 */ { 1, 11 }, { 1, 13 }, { 10, 2 }, { 10, 4 }, { 14, 1 }, { 1, 14 }, { 3, 7 }, { 5, 9 }, /* EEPROM country regions 32 - 33 */ { 1, 11 }, { 1, 14 } }; u8 val = eeprom[MT_EE_COUNTRY_REGION]; int idx = -1; if (val < 8) idx = val; if (val > 31 && val < 33) idx = val - 32 + 8; if (idx != -1) dev_info(dev->dev, "EEPROM country region %02x (channels %d-%d)\n", val, chan_bounds[idx].start, chan_bounds[idx].start + chan_bounds[idx].num - 1); else idx = 5; /* channels 1 - 14 */ dev->ee->reg = chan_bounds[idx]; /* TODO: country region 33 is special - phy should be set to B-mode * before entering channel 14 (see sta/connect.c) */ } static void mt7601u_set_rf_freq_off(struct mt7601u_dev *dev, u8 *eeprom) { u8 comp; dev->ee->rf_freq_off = field_validate(eeprom[MT_EE_FREQ_OFFSET]); comp = field_validate(eeprom[MT_EE_FREQ_OFFSET_COMPENSATION]); if (comp & BIT(7)) dev->ee->rf_freq_off -= comp & 0x7f; else dev->ee->rf_freq_off += comp; } static void mt7601u_set_rssi_offset(struct mt7601u_dev *dev, u8 *eeprom) { int i; s8 *rssi_offset = dev->ee->rssi_offset; for (i = 0; i < 2; i++) { rssi_offset[i] = eeprom[MT_EE_RSSI_OFFSET + i]; if (rssi_offset[i] < -10 || rssi_offset[i] > 10) { dev_warn(dev->dev, "Warning: EEPROM RSSI is invalid %02hhx\n", rssi_offset[i]); rssi_offset[i] = 0; } } } static void mt7601u_extra_power_over_mac(struct mt7601u_dev *dev) { u32 val; val = ((mt7601u_rr(dev, MT_TX_PWR_CFG_1) & 0x0000ff00) >> 8); val |= ((mt7601u_rr(dev, MT_TX_PWR_CFG_2) & 0x0000ff00) << 8); mt7601u_wr(dev, MT_TX_PWR_CFG_7, val); val = ((mt7601u_rr(dev, MT_TX_PWR_CFG_4) & 0x0000ff00) >> 8); mt7601u_wr(dev, MT_TX_PWR_CFG_9, val); } static void mt7601u_set_power_rate(struct power_per_rate *rate, s8 delta, u8 value) { /* Invalid? Note: vendor driver does not handle this */ if (value == 0xff) return; rate->raw = s6_validate(value); rate->bw20 = s6_to_int(value); /* Note: vendor driver does cap the value to s6 right away */ rate->bw40 = rate->bw20 + delta; } static void mt7601u_save_power_rate(struct mt7601u_dev *dev, s8 delta, u32 val, int i) { struct mt7601u_rate_power *t = &dev->ee->power_rate_table; switch (i) { case 0: mt7601u_set_power_rate(&t->cck[0], delta, (val >> 0) & 0xff); mt7601u_set_power_rate(&t->cck[1], delta, (val >> 8) & 0xff); /* Save cck bw20 for fixups of channel 14 */ dev->ee->real_cck_bw20[0] = t->cck[0].bw20; dev->ee->real_cck_bw20[1] = t->cck[1].bw20; mt7601u_set_power_rate(&t->ofdm[0], delta, (val >> 16) & 0xff); mt7601u_set_power_rate(&t->ofdm[1], delta, (val >> 24) & 0xff); break; case 1: mt7601u_set_power_rate(&t->ofdm[2], delta, (val >> 0) & 0xff); mt7601u_set_power_rate(&t->ofdm[3], delta, (val >> 8) & 0xff); mt7601u_set_power_rate(&t->ht[0], delta, (val >> 16) & 0xff); mt7601u_set_power_rate(&t->ht[1], delta, (val >> 24) & 0xff); break; case 2: mt7601u_set_power_rate(&t->ht[2], delta, (val >> 0) & 0xff); mt7601u_set_power_rate(&t->ht[3], delta, (val >> 8) & 0xff); break; } } static s8 get_delta(u8 val) { s8 ret; if (!field_valid(val) || !(val & BIT(7))) return 0; ret = val & 0x1f; if (ret > 8) ret = 8; if (val & BIT(6)) ret = -ret; return ret; } static void mt7601u_config_tx_power_per_rate(struct mt7601u_dev *dev, u8 *eeprom) { u32 val; s8 bw40_delta; int i; bw40_delta = get_delta(eeprom[MT_EE_TX_POWER_DELTA_BW40]); for (i = 0; i < 5; i++) { val = get_unaligned_le32(eeprom + MT_EE_TX_POWER_BYRATE(i)); mt7601u_save_power_rate(dev, bw40_delta, val, i); if (~val) mt7601u_wr(dev, MT_TX_PWR_CFG_0 + i * 4, val); } mt7601u_extra_power_over_mac(dev); } static void mt7601u_init_tssi_params(struct mt7601u_dev *dev, u8 *eeprom) { struct tssi_data *d = &dev->ee->tssi_data; if (!dev->ee->tssi_enabled) return; d->slope = eeprom[MT_EE_TX_TSSI_SLOPE]; d->tx0_delta_offset = eeprom[MT_EE_TX_TSSI_OFFSET] * 1024; d->offset[0] = eeprom[MT_EE_TX_TSSI_OFFSET_GROUP]; d->offset[1] = eeprom[MT_EE_TX_TSSI_OFFSET_GROUP + 1]; d->offset[2] = eeprom[MT_EE_TX_TSSI_OFFSET_GROUP + 2]; } int mt7601u_eeprom_init(struct mt7601u_dev *dev) { u8 *eeprom; int i, ret; ret = mt7601u_efuse_physical_size_check(dev); if (ret) return ret; dev->ee = devm_kzalloc(dev->dev, sizeof(*dev->ee), GFP_KERNEL); if (!dev->ee) return -ENOMEM; eeprom = kmalloc(MT7601U_EEPROM_SIZE, GFP_KERNEL); if (!eeprom) return -ENOMEM; for (i = 0; i + 16 <= MT7601U_EEPROM_SIZE; i += 16) { ret = mt7601u_efuse_read(dev, i, eeprom + i, MT_EE_READ); if (ret) goto out; } if (eeprom[MT_EE_VERSION_EE] > MT7601U_EE_MAX_VER) dev_warn(dev->dev, "Warning: unsupported EEPROM version %02hhx\n", eeprom[MT_EE_VERSION_EE]); dev_info(dev->dev, "EEPROM ver:%02hhx fae:%02hhx\n", eeprom[MT_EE_VERSION_EE], eeprom[MT_EE_VERSION_FAE]); mt7601u_set_macaddr(dev, eeprom + MT_EE_MAC_ADDR); mt7601u_set_chip_cap(dev, eeprom); mt7601u_set_channel_power(dev, eeprom); mt7601u_set_country_reg(dev, eeprom); mt7601u_set_rf_freq_off(dev, eeprom); mt7601u_set_rssi_offset(dev, eeprom); dev->ee->ref_temp = eeprom[MT_EE_REF_TEMP]; dev->ee->lna_gain = eeprom[MT_EE_LNA_GAIN]; mt7601u_config_tx_power_per_rate(dev, eeprom); mt7601u_init_tssi_params(dev, eeprom); out: kfree(eeprom); return ret; }
linux-master
drivers/net/wireless/mediatek/mt7601u/eeprom.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2015 Jakub Kicinski <[email protected]> */ #include "mt7601u.h" #include "dma.h" #include "usb.h" #include "trace.h" static int mt7601u_submit_rx_buf(struct mt7601u_dev *dev, struct mt7601u_dma_buf_rx *e, gfp_t gfp); static unsigned int ieee80211_get_hdrlen_from_buf(const u8 *data, unsigned len) { const struct ieee80211_hdr *hdr = (const struct ieee80211_hdr *)data; unsigned int hdrlen; if (unlikely(len < 10)) return 0; hdrlen = ieee80211_hdrlen(hdr->frame_control); if (unlikely(hdrlen > len)) return 0; return hdrlen; } static struct sk_buff * mt7601u_rx_skb_from_seg(struct mt7601u_dev *dev, struct mt7601u_rxwi *rxwi, void *data, u32 seg_len, u32 truesize, struct page *p) { struct sk_buff *skb; u32 true_len, hdr_len = 0, copy, frag; skb = alloc_skb(p ? 128 : seg_len, GFP_ATOMIC); if (!skb) return NULL; true_len = mt76_mac_process_rx(dev, skb, data, rxwi); if (!true_len || true_len > seg_len) goto bad_frame; hdr_len = ieee80211_get_hdrlen_from_buf(data, true_len); if (!hdr_len) goto bad_frame; if (rxwi->rxinfo & cpu_to_le32(MT_RXINFO_L2PAD)) { skb_put_data(skb, data, hdr_len); data += hdr_len + 2; true_len -= hdr_len; hdr_len = 0; } /* If not doing paged RX allocated skb will always have enough space */ copy = (true_len <= skb_tailroom(skb)) ? true_len : hdr_len + 8; frag = true_len - copy; skb_put_data(skb, data, copy); data += copy; if (frag) { skb_add_rx_frag(skb, 0, p, data - page_address(p), frag, truesize); get_page(p); } return skb; bad_frame: dev_err_ratelimited(dev->dev, "Error: incorrect frame len:%u hdr:%u\n", true_len, hdr_len); dev_kfree_skb(skb); return NULL; } static void mt7601u_rx_process_seg(struct mt7601u_dev *dev, u8 *data, u32 seg_len, struct page *p, struct list_head *list) { struct sk_buff *skb; struct mt7601u_rxwi *rxwi; u32 fce_info, truesize = seg_len; /* DMA_INFO field at the beginning of the segment contains only some of * the information, we need to read the FCE descriptor from the end. */ fce_info = get_unaligned_le32(data + seg_len - MT_FCE_INFO_LEN); seg_len -= MT_FCE_INFO_LEN; data += MT_DMA_HDR_LEN; seg_len -= MT_DMA_HDR_LEN; rxwi = (struct mt7601u_rxwi *) data; data += sizeof(struct mt7601u_rxwi); seg_len -= sizeof(struct mt7601u_rxwi); if (unlikely(rxwi->zero[0] || rxwi->zero[1] || rxwi->zero[2])) dev_err_once(dev->dev, "Error: RXWI zero fields are set\n"); if (unlikely(FIELD_GET(MT_RXD_INFO_TYPE, fce_info))) dev_err_once(dev->dev, "Error: RX path seen a non-pkt urb\n"); trace_mt_rx(dev, rxwi, fce_info); skb = mt7601u_rx_skb_from_seg(dev, rxwi, data, seg_len, truesize, p); if (!skb) return; local_bh_disable(); rcu_read_lock(); ieee80211_rx_list(dev->hw, NULL, skb, list); rcu_read_unlock(); local_bh_enable(); } static u16 mt7601u_rx_next_seg_len(u8 *data, u32 data_len) { u32 min_seg_len = MT_DMA_HDR_LEN + MT_RX_INFO_LEN + sizeof(struct mt7601u_rxwi) + MT_FCE_INFO_LEN; u16 dma_len = get_unaligned_le16(data); if (data_len < min_seg_len || WARN_ON_ONCE(!dma_len) || WARN_ON_ONCE(dma_len + MT_DMA_HDRS > data_len) || WARN_ON_ONCE(dma_len & 0x3) || WARN_ON_ONCE(dma_len < min_seg_len)) return 0; return MT_DMA_HDRS + dma_len; } static void mt7601u_rx_process_entry(struct mt7601u_dev *dev, struct mt7601u_dma_buf_rx *e) { u32 seg_len, data_len = e->urb->actual_length; u8 *data = page_address(e->p); struct page *new_p = NULL; LIST_HEAD(list); int cnt = 0; if (!test_bit(MT7601U_STATE_INITIALIZED, &dev->state)) return; /* Copy if there is very little data in the buffer. */ if (data_len > 512) new_p = dev_alloc_pages(MT_RX_ORDER); while ((seg_len = mt7601u_rx_next_seg_len(data, data_len))) { mt7601u_rx_process_seg(dev, data, seg_len, new_p ? e->p : NULL, &list); data_len -= seg_len; data += seg_len; cnt++; } if (cnt > 1) trace_mt_rx_dma_aggr(dev, cnt, !!new_p); netif_receive_skb_list(&list); if (new_p) { /* we have one extra ref from the allocator */ put_page(e->p); e->p = new_p; } } static struct mt7601u_dma_buf_rx * mt7601u_rx_get_pending_entry(struct mt7601u_dev *dev) { struct mt7601u_rx_queue *q = &dev->rx_q; struct mt7601u_dma_buf_rx *buf = NULL; unsigned long flags; spin_lock_irqsave(&dev->rx_lock, flags); if (!q->pending) goto out; buf = &q->e[q->start]; q->pending--; q->start = (q->start + 1) % q->entries; out: spin_unlock_irqrestore(&dev->rx_lock, flags); return buf; } static void mt7601u_complete_rx(struct urb *urb) { struct mt7601u_dev *dev = urb->context; struct mt7601u_rx_queue *q = &dev->rx_q; unsigned long flags; /* do no schedule rx tasklet if urb has been unlinked * or the device has been removed */ switch (urb->status) { case -ECONNRESET: case -ESHUTDOWN: case -ENOENT: case -EPROTO: return; default: dev_err_ratelimited(dev->dev, "rx urb failed: %d\n", urb->status); fallthrough; case 0: break; } spin_lock_irqsave(&dev->rx_lock, flags); if (WARN_ONCE(q->e[q->end].urb != urb, "RX urb mismatch")) goto out; q->end = (q->end + 1) % q->entries; q->pending++; tasklet_schedule(&dev->rx_tasklet); out: spin_unlock_irqrestore(&dev->rx_lock, flags); } static void mt7601u_rx_tasklet(struct tasklet_struct *t) { struct mt7601u_dev *dev = from_tasklet(dev, t, rx_tasklet); struct mt7601u_dma_buf_rx *e; while ((e = mt7601u_rx_get_pending_entry(dev))) { if (e->urb->status) continue; mt7601u_rx_process_entry(dev, e); mt7601u_submit_rx_buf(dev, e, GFP_ATOMIC); } } static void mt7601u_complete_tx(struct urb *urb) { struct mt7601u_tx_queue *q = urb->context; struct mt7601u_dev *dev = q->dev; struct sk_buff *skb; unsigned long flags; switch (urb->status) { case -ECONNRESET: case -ESHUTDOWN: case -ENOENT: case -EPROTO: return; default: dev_err_ratelimited(dev->dev, "tx urb failed: %d\n", urb->status); fallthrough; case 0: break; } spin_lock_irqsave(&dev->tx_lock, flags); if (WARN_ONCE(q->e[q->start].urb != urb, "TX urb mismatch")) goto out; skb = q->e[q->start].skb; q->e[q->start].skb = NULL; trace_mt_tx_dma_done(dev, skb); __skb_queue_tail(&dev->tx_skb_done, skb); tasklet_schedule(&dev->tx_tasklet); if (q->used == q->entries - q->entries / 8) ieee80211_wake_queue(dev->hw, skb_get_queue_mapping(skb)); q->start = (q->start + 1) % q->entries; q->used--; out: spin_unlock_irqrestore(&dev->tx_lock, flags); } static void mt7601u_tx_tasklet(struct tasklet_struct *t) { struct mt7601u_dev *dev = from_tasklet(dev, t, tx_tasklet); struct sk_buff_head skbs; unsigned long flags; __skb_queue_head_init(&skbs); spin_lock_irqsave(&dev->tx_lock, flags); set_bit(MT7601U_STATE_MORE_STATS, &dev->state); if (!test_and_set_bit(MT7601U_STATE_READING_STATS, &dev->state)) queue_delayed_work(dev->stat_wq, &dev->stat_work, msecs_to_jiffies(10)); skb_queue_splice_init(&dev->tx_skb_done, &skbs); spin_unlock_irqrestore(&dev->tx_lock, flags); while (!skb_queue_empty(&skbs)) { struct sk_buff *skb = __skb_dequeue(&skbs); mt7601u_tx_status(dev, skb); } } static int mt7601u_dma_submit_tx(struct mt7601u_dev *dev, struct sk_buff *skb, u8 ep) { struct usb_device *usb_dev = mt7601u_to_usb_dev(dev); unsigned snd_pipe = usb_sndbulkpipe(usb_dev, dev->out_eps[ep]); struct mt7601u_dma_buf_tx *e; struct mt7601u_tx_queue *q = &dev->tx_q[ep]; unsigned long flags; int ret; spin_lock_irqsave(&dev->tx_lock, flags); if (WARN_ON(q->entries <= q->used)) { ret = -ENOSPC; goto out; } e = &q->e[q->end]; usb_fill_bulk_urb(e->urb, usb_dev, snd_pipe, skb->data, skb->len, mt7601u_complete_tx, q); ret = usb_submit_urb(e->urb, GFP_ATOMIC); if (ret) { /* Special-handle ENODEV from TX urb submission because it will * often be the first ENODEV we see after device is removed. */ if (ret == -ENODEV) set_bit(MT7601U_STATE_REMOVED, &dev->state); else dev_err(dev->dev, "Error: TX urb submit failed:%d\n", ret); goto out; } q->end = (q->end + 1) % q->entries; q->used++; e->skb = skb; if (q->used >= q->entries) ieee80211_stop_queue(dev->hw, skb_get_queue_mapping(skb)); out: spin_unlock_irqrestore(&dev->tx_lock, flags); return ret; } /* Map hardware Q to USB endpoint number */ static u8 q2ep(u8 qid) { /* TODO: take management packets to queue 5 */ return qid + 1; } /* Map USB endpoint number to Q id in the DMA engine */ static enum mt76_qsel ep2dmaq(u8 ep) { if (ep == 5) return MT_QSEL_MGMT; return MT_QSEL_EDCA; } int mt7601u_dma_enqueue_tx(struct mt7601u_dev *dev, struct sk_buff *skb, struct mt76_wcid *wcid, int hw_q) { u8 ep = q2ep(hw_q); u32 dma_flags; int ret; dma_flags = MT_TXD_PKT_INFO_80211; if (wcid->hw_key_idx == 0xff) dma_flags |= MT_TXD_PKT_INFO_WIV; ret = mt7601u_dma_skb_wrap_pkt(skb, ep2dmaq(ep), dma_flags); if (ret) return ret; ret = mt7601u_dma_submit_tx(dev, skb, ep); if (ret) { ieee80211_free_txskb(dev->hw, skb); return ret; } return 0; } static void mt7601u_kill_rx(struct mt7601u_dev *dev) { int i; for (i = 0; i < dev->rx_q.entries; i++) usb_poison_urb(dev->rx_q.e[i].urb); } static int mt7601u_submit_rx_buf(struct mt7601u_dev *dev, struct mt7601u_dma_buf_rx *e, gfp_t gfp) { struct usb_device *usb_dev = mt7601u_to_usb_dev(dev); u8 *buf = page_address(e->p); unsigned pipe; int ret; pipe = usb_rcvbulkpipe(usb_dev, dev->in_eps[MT_EP_IN_PKT_RX]); usb_fill_bulk_urb(e->urb, usb_dev, pipe, buf, MT_RX_URB_SIZE, mt7601u_complete_rx, dev); trace_mt_submit_urb(dev, e->urb); ret = usb_submit_urb(e->urb, gfp); if (ret) dev_err(dev->dev, "Error: submit RX URB failed:%d\n", ret); return ret; } static int mt7601u_submit_rx(struct mt7601u_dev *dev) { int i, ret; for (i = 0; i < dev->rx_q.entries; i++) { ret = mt7601u_submit_rx_buf(dev, &dev->rx_q.e[i], GFP_KERNEL); if (ret) return ret; } return 0; } static void mt7601u_free_rx(struct mt7601u_dev *dev) { int i; for (i = 0; i < dev->rx_q.entries; i++) { __free_pages(dev->rx_q.e[i].p, MT_RX_ORDER); usb_free_urb(dev->rx_q.e[i].urb); } } static int mt7601u_alloc_rx(struct mt7601u_dev *dev) { int i; memset(&dev->rx_q, 0, sizeof(dev->rx_q)); dev->rx_q.dev = dev; dev->rx_q.entries = N_RX_ENTRIES; for (i = 0; i < N_RX_ENTRIES; i++) { dev->rx_q.e[i].urb = usb_alloc_urb(0, GFP_KERNEL); dev->rx_q.e[i].p = dev_alloc_pages(MT_RX_ORDER); if (!dev->rx_q.e[i].urb || !dev->rx_q.e[i].p) return -ENOMEM; } return 0; } static void mt7601u_free_tx_queue(struct mt7601u_tx_queue *q) { int i; for (i = 0; i < q->entries; i++) { usb_poison_urb(q->e[i].urb); if (q->e[i].skb) mt7601u_tx_status(q->dev, q->e[i].skb); usb_free_urb(q->e[i].urb); } } static void mt7601u_free_tx(struct mt7601u_dev *dev) { int i; if (!dev->tx_q) return; for (i = 0; i < __MT_EP_OUT_MAX; i++) mt7601u_free_tx_queue(&dev->tx_q[i]); } static int mt7601u_alloc_tx_queue(struct mt7601u_dev *dev, struct mt7601u_tx_queue *q) { int i; q->dev = dev; q->entries = N_TX_ENTRIES; for (i = 0; i < N_TX_ENTRIES; i++) { q->e[i].urb = usb_alloc_urb(0, GFP_KERNEL); if (!q->e[i].urb) return -ENOMEM; } return 0; } static int mt7601u_alloc_tx(struct mt7601u_dev *dev) { int i; dev->tx_q = devm_kcalloc(dev->dev, __MT_EP_OUT_MAX, sizeof(*dev->tx_q), GFP_KERNEL); if (!dev->tx_q) return -ENOMEM; for (i = 0; i < __MT_EP_OUT_MAX; i++) if (mt7601u_alloc_tx_queue(dev, &dev->tx_q[i])) return -ENOMEM; return 0; } int mt7601u_dma_init(struct mt7601u_dev *dev) { int ret; tasklet_setup(&dev->tx_tasklet, mt7601u_tx_tasklet); tasklet_setup(&dev->rx_tasklet, mt7601u_rx_tasklet); ret = mt7601u_alloc_tx(dev); if (ret) goto err; ret = mt7601u_alloc_rx(dev); if (ret) goto err; ret = mt7601u_submit_rx(dev); if (ret) goto err; return 0; err: mt7601u_dma_cleanup(dev); return ret; } void mt7601u_dma_cleanup(struct mt7601u_dev *dev) { mt7601u_kill_rx(dev); tasklet_kill(&dev->rx_tasklet); mt7601u_free_rx(dev); mt7601u_free_tx(dev); tasklet_kill(&dev->tx_tasklet); }
linux-master
drivers/net/wireless/mediatek/mt7601u/dma.c
// SPDX-License-Identifier: GPL-2.0-only /* * (c) Copyright 2002-2010, Ralink Technology, Inc. * Copyright (C) 2014 Felix Fietkau <[email protected]> * Copyright (C) 2015 Jakub Kicinski <[email protected]> */ #include "mt7601u.h" #include "mcu.h" #include "eeprom.h" #include "trace.h" #include "initvals_phy.h" #include <linux/etherdevice.h> static void mt7601u_agc_reset(struct mt7601u_dev *dev); static int mt7601u_rf_wr(struct mt7601u_dev *dev, u8 bank, u8 offset, u8 value) { int ret = 0; if (WARN_ON(!test_bit(MT7601U_STATE_WLAN_RUNNING, &dev->state)) || WARN_ON(offset > 63)) return -EINVAL; if (test_bit(MT7601U_STATE_REMOVED, &dev->state)) return 0; mutex_lock(&dev->reg_atomic_mutex); if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100)) { ret = -ETIMEDOUT; goto out; } mt7601u_wr(dev, MT_RF_CSR_CFG, FIELD_PREP(MT_RF_CSR_CFG_DATA, value) | FIELD_PREP(MT_RF_CSR_CFG_REG_BANK, bank) | FIELD_PREP(MT_RF_CSR_CFG_REG_ID, offset) | MT_RF_CSR_CFG_WR | MT_RF_CSR_CFG_KICK); trace_rf_write(dev, bank, offset, value); out: mutex_unlock(&dev->reg_atomic_mutex); if (ret < 0) dev_err(dev->dev, "Error: RF write %02hhx:%02hhx failed:%d!!\n", bank, offset, ret); return ret; } static int mt7601u_rf_rr(struct mt7601u_dev *dev, u8 bank, u8 offset) { int ret = -ETIMEDOUT; u32 val; if (WARN_ON(!test_bit(MT7601U_STATE_WLAN_RUNNING, &dev->state)) || WARN_ON(offset > 63)) return -EINVAL; if (test_bit(MT7601U_STATE_REMOVED, &dev->state)) return 0xff; mutex_lock(&dev->reg_atomic_mutex); if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100)) goto out; mt7601u_wr(dev, MT_RF_CSR_CFG, FIELD_PREP(MT_RF_CSR_CFG_REG_BANK, bank) | FIELD_PREP(MT_RF_CSR_CFG_REG_ID, offset) | MT_RF_CSR_CFG_KICK); if (!mt76_poll(dev, MT_RF_CSR_CFG, MT_RF_CSR_CFG_KICK, 0, 100)) goto out; val = mt7601u_rr(dev, MT_RF_CSR_CFG); if (FIELD_GET(MT_RF_CSR_CFG_REG_ID, val) == offset && FIELD_GET(MT_RF_CSR_CFG_REG_BANK, val) == bank) { ret = FIELD_GET(MT_RF_CSR_CFG_DATA, val); trace_rf_read(dev, bank, offset, ret); } out: mutex_unlock(&dev->reg_atomic_mutex); if (ret < 0) dev_err(dev->dev, "Error: RF read %02hhx:%02hhx failed:%d!!\n", bank, offset, ret); return ret; } static int mt7601u_rf_rmw(struct mt7601u_dev *dev, u8 bank, u8 offset, u8 mask, u8 val) { int ret; ret = mt7601u_rf_rr(dev, bank, offset); if (ret < 0) return ret; val |= ret & ~mask; ret = mt7601u_rf_wr(dev, bank, offset, val); if (ret) return ret; return val; } static int mt7601u_rf_set(struct mt7601u_dev *dev, u8 bank, u8 offset, u8 val) { return mt7601u_rf_rmw(dev, bank, offset, 0, val); } static int mt7601u_rf_clear(struct mt7601u_dev *dev, u8 bank, u8 offset, u8 mask) { return mt7601u_rf_rmw(dev, bank, offset, mask, 0); } static void mt7601u_bbp_wr(struct mt7601u_dev *dev, u8 offset, u8 val) { if (WARN_ON(!test_bit(MT7601U_STATE_WLAN_RUNNING, &dev->state)) || test_bit(MT7601U_STATE_REMOVED, &dev->state)) return; mutex_lock(&dev->reg_atomic_mutex); if (!mt76_poll(dev, MT_BBP_CSR_CFG, MT_BBP_CSR_CFG_BUSY, 0, 1000)) { dev_err(dev->dev, "Error: BBP write %02hhx failed!!\n", offset); goto out; } mt7601u_wr(dev, MT_BBP_CSR_CFG, FIELD_PREP(MT_BBP_CSR_CFG_VAL, val) | FIELD_PREP(MT_BBP_CSR_CFG_REG_NUM, offset) | MT_BBP_CSR_CFG_RW_MODE | MT_BBP_CSR_CFG_BUSY); trace_bbp_write(dev, offset, val); out: mutex_unlock(&dev->reg_atomic_mutex); } static int mt7601u_bbp_rr(struct mt7601u_dev *dev, u8 offset) { u32 val; int ret = -ETIMEDOUT; if (WARN_ON(!test_bit(MT7601U_STATE_WLAN_RUNNING, &dev->state))) return -EINVAL; if (test_bit(MT7601U_STATE_REMOVED, &dev->state)) return 0xff; mutex_lock(&dev->reg_atomic_mutex); if (!mt76_poll(dev, MT_BBP_CSR_CFG, MT_BBP_CSR_CFG_BUSY, 0, 1000)) goto out; mt7601u_wr(dev, MT_BBP_CSR_CFG, FIELD_PREP(MT_BBP_CSR_CFG_REG_NUM, offset) | MT_BBP_CSR_CFG_RW_MODE | MT_BBP_CSR_CFG_BUSY | MT_BBP_CSR_CFG_READ); if (!mt76_poll(dev, MT_BBP_CSR_CFG, MT_BBP_CSR_CFG_BUSY, 0, 1000)) goto out; val = mt7601u_rr(dev, MT_BBP_CSR_CFG); if (FIELD_GET(MT_BBP_CSR_CFG_REG_NUM, val) == offset) { ret = FIELD_GET(MT_BBP_CSR_CFG_VAL, val); trace_bbp_read(dev, offset, ret); } out: mutex_unlock(&dev->reg_atomic_mutex); if (ret < 0) dev_err(dev->dev, "Error: BBP read %02hhx failed:%d!!\n", offset, ret); return ret; } static int mt7601u_bbp_rmw(struct mt7601u_dev *dev, u8 offset, u8 mask, u8 val) { int ret; ret = mt7601u_bbp_rr(dev, offset); if (ret < 0) return ret; val |= ret & ~mask; mt7601u_bbp_wr(dev, offset, val); return val; } static u8 mt7601u_bbp_rmc(struct mt7601u_dev *dev, u8 offset, u8 mask, u8 val) { int ret; ret = mt7601u_bbp_rr(dev, offset); if (ret < 0) return ret; val |= ret & ~mask; if (ret != val) mt7601u_bbp_wr(dev, offset, val); return val; } int mt7601u_wait_bbp_ready(struct mt7601u_dev *dev) { int i = 20; u8 val; do { val = mt7601u_bbp_rr(dev, MT_BBP_REG_VERSION); if (val && val != 0xff) break; } while (--i); if (!i) { dev_err(dev->dev, "Error: BBP is not ready\n"); return -EIO; } return 0; } u32 mt7601u_bbp_set_ctrlch(struct mt7601u_dev *dev, bool below) { return mt7601u_bbp_rmc(dev, 3, 0x20, below ? 0x20 : 0); } int mt7601u_phy_get_rssi(struct mt7601u_dev *dev, struct mt7601u_rxwi *rxwi, u16 rate) { static const s8 lna[2][2][3] = { /* main LNA */ { /* bw20 */ { -2, 15, 33 }, /* bw40 */ { 0, 16, 34 } }, /* aux LNA */ { /* bw20 */ { -2, 15, 33 }, /* bw40 */ { -2, 16, 34 } } }; int bw = FIELD_GET(MT_RXWI_RATE_BW, rate); int aux_lna = FIELD_GET(MT_RXWI_ANT_AUX_LNA, rxwi->ant); int lna_id = FIELD_GET(MT_RXWI_GAIN_RSSI_LNA_ID, rxwi->gain); int val; if (lna_id) /* LNA id can be 0, 2, 3. */ lna_id--; val = 8; val -= lna[aux_lna][bw][lna_id]; val -= FIELD_GET(MT_RXWI_GAIN_RSSI_VAL, rxwi->gain); val -= dev->ee->lna_gain; val -= dev->ee->rssi_offset[0]; return val; } static void mt7601u_vco_cal(struct mt7601u_dev *dev) { mt7601u_rf_wr(dev, 0, 4, 0x0a); mt7601u_rf_wr(dev, 0, 5, 0x20); mt7601u_rf_set(dev, 0, 4, BIT(7)); msleep(2); } static int mt7601u_set_bw_filter(struct mt7601u_dev *dev, bool cal) { u32 filter = 0; int ret; if (!cal) filter |= 0x10000; if (dev->bw != MT_BW_20) filter |= 0x00100; /* TX */ ret = mt7601u_mcu_calibrate(dev, MCU_CAL_BW, filter | 1); if (ret) return ret; /* RX */ return mt7601u_mcu_calibrate(dev, MCU_CAL_BW, filter); } static int mt7601u_load_bbp_temp_table_bw(struct mt7601u_dev *dev) { const struct reg_table *t; if (WARN_ON(dev->temp_mode > MT_TEMP_MODE_LOW)) return -EINVAL; t = &bbp_mode_table[dev->temp_mode][dev->bw]; return mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP, t->regs, t->n); } static int mt7601u_bbp_temp(struct mt7601u_dev *dev, int mode, const char *name) { const struct reg_table *t; int ret; if (dev->temp_mode == mode) return 0; dev->temp_mode = mode; trace_temp_mode(dev, mode); t = bbp_mode_table[dev->temp_mode]; ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP, t[2].regs, t[2].n); if (ret) return ret; return mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP, t[dev->bw].regs, t[dev->bw].n); } static void mt7601u_apply_ch14_fixup(struct mt7601u_dev *dev, int hw_chan) { struct mt7601u_rate_power *t = &dev->ee->power_rate_table; if (hw_chan != 14 || dev->bw != MT_BW_20) { mt7601u_bbp_rmw(dev, 4, 0x20, 0); mt7601u_bbp_wr(dev, 178, 0xff); t->cck[0].bw20 = dev->ee->real_cck_bw20[0]; t->cck[1].bw20 = dev->ee->real_cck_bw20[1]; } else { /* Apply CH14 OBW fixup */ mt7601u_bbp_wr(dev, 4, 0x60); mt7601u_bbp_wr(dev, 178, 0); /* Note: vendor code is buggy here for negative values */ t->cck[0].bw20 = dev->ee->real_cck_bw20[0] - 2; t->cck[1].bw20 = dev->ee->real_cck_bw20[1] - 2; } } static int __mt7601u_phy_set_channel(struct mt7601u_dev *dev, struct cfg80211_chan_def *chandef) { #define FREQ_PLAN_REGS 4 static const u8 freq_plan[14][FREQ_PLAN_REGS] = { { 0x99, 0x99, 0x09, 0x50 }, { 0x46, 0x44, 0x0a, 0x50 }, { 0xec, 0xee, 0x0a, 0x50 }, { 0x99, 0x99, 0x0b, 0x50 }, { 0x46, 0x44, 0x08, 0x51 }, { 0xec, 0xee, 0x08, 0x51 }, { 0x99, 0x99, 0x09, 0x51 }, { 0x46, 0x44, 0x0a, 0x51 }, { 0xec, 0xee, 0x0a, 0x51 }, { 0x99, 0x99, 0x0b, 0x51 }, { 0x46, 0x44, 0x08, 0x52 }, { 0xec, 0xee, 0x08, 0x52 }, { 0x99, 0x99, 0x09, 0x52 }, { 0x33, 0x33, 0x0b, 0x52 }, }; struct mt76_reg_pair channel_freq_plan[FREQ_PLAN_REGS] = { { 17, 0 }, { 18, 0 }, { 19, 0 }, { 20, 0 }, }; struct mt76_reg_pair bbp_settings[3] = { { 62, 0x37 - dev->ee->lna_gain }, { 63, 0x37 - dev->ee->lna_gain }, { 64, 0x37 - dev->ee->lna_gain }, }; struct ieee80211_channel *chan = chandef->chan; enum nl80211_channel_type chan_type = cfg80211_get_chandef_type(chandef); struct mt7601u_rate_power *t = &dev->ee->power_rate_table; int chan_idx; bool chan_ext_below; u8 bw; int i, ret; bw = MT_BW_20; chan_ext_below = (chan_type == NL80211_CHAN_HT40MINUS); chan_idx = chan->hw_value - 1; if (chandef->width == NL80211_CHAN_WIDTH_40) { bw = MT_BW_40; if (chan_idx > 1 && chan_type == NL80211_CHAN_HT40MINUS) chan_idx -= 2; else if (chan_idx < 12 && chan_type == NL80211_CHAN_HT40PLUS) chan_idx += 2; else dev_err(dev->dev, "Error: invalid 40MHz channel!!\n"); } if (bw != dev->bw || chan_ext_below != dev->chan_ext_below) { dev_dbg(dev->dev, "Info: switching HT mode bw:%d below:%d\n", bw, chan_ext_below); mt7601u_bbp_set_bw(dev, bw); mt7601u_bbp_set_ctrlch(dev, chan_ext_below); mt7601u_mac_set_ctrlch(dev, chan_ext_below); dev->chan_ext_below = chan_ext_below; } for (i = 0; i < FREQ_PLAN_REGS; i++) channel_freq_plan[i].value = freq_plan[chan_idx][i]; ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_RF, channel_freq_plan, FREQ_PLAN_REGS); if (ret) return ret; mt7601u_rmw(dev, MT_TX_ALC_CFG_0, 0x3f3f, dev->ee->chan_pwr[chan_idx] & 0x3f); ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP, bbp_settings, ARRAY_SIZE(bbp_settings)); if (ret) return ret; mt7601u_vco_cal(dev); mt7601u_bbp_set_bw(dev, bw); ret = mt7601u_set_bw_filter(dev, false); if (ret) return ret; mt7601u_apply_ch14_fixup(dev, chan->hw_value); mt7601u_wr(dev, MT_TX_PWR_CFG_0, int_to_s6(t->ofdm[1].bw20) << 24 | int_to_s6(t->ofdm[0].bw20) << 16 | int_to_s6(t->cck[1].bw20) << 8 | int_to_s6(t->cck[0].bw20)); if (test_bit(MT7601U_STATE_SCANNING, &dev->state)) mt7601u_agc_reset(dev); dev->chandef = *chandef; return 0; } int mt7601u_phy_set_channel(struct mt7601u_dev *dev, struct cfg80211_chan_def *chandef) { int ret; cancel_delayed_work_sync(&dev->cal_work); cancel_delayed_work_sync(&dev->freq_cal.work); mutex_lock(&dev->hw_atomic_mutex); ret = __mt7601u_phy_set_channel(dev, chandef); mutex_unlock(&dev->hw_atomic_mutex); if (ret) return ret; if (test_bit(MT7601U_STATE_SCANNING, &dev->state)) return 0; ieee80211_queue_delayed_work(dev->hw, &dev->cal_work, MT_CALIBRATE_INTERVAL); if (dev->freq_cal.enabled) ieee80211_queue_delayed_work(dev->hw, &dev->freq_cal.work, MT_FREQ_CAL_INIT_DELAY); return 0; } #define BBP_R47_FLAG GENMASK(2, 0) #define BBP_R47_F_TSSI 0 #define BBP_R47_F_PKT_T 1 #define BBP_R47_F_TX_RATE 2 #define BBP_R47_F_TEMP 4 /** * mt7601u_bbp_r47_get - read value through BBP R47/R49 pair * @dev: pointer to adapter structure * @reg: value of BBP R47 before the operation * @flag: one of the BBP_R47_F_* flags * * Convenience helper for reading values through BBP R47/R49 pair. * Takes old value of BBP R47 as @reg, because callers usually have it * cached already. * * Return: value of BBP R49. */ static u8 mt7601u_bbp_r47_get(struct mt7601u_dev *dev, u8 reg, u8 flag) { flag |= reg & ~BBP_R47_FLAG; mt7601u_bbp_wr(dev, 47, flag); usleep_range(500, 700); return mt7601u_bbp_rr(dev, 49); } static s8 mt7601u_read_bootup_temp(struct mt7601u_dev *dev) { u8 bbp_val, temp; u32 rf_bp, rf_set; int i; rf_set = mt7601u_rr(dev, MT_RF_SETTING_0); rf_bp = mt7601u_rr(dev, MT_RF_BYPASS_0); mt7601u_wr(dev, MT_RF_BYPASS_0, 0); mt7601u_wr(dev, MT_RF_SETTING_0, 0x00000010); mt7601u_wr(dev, MT_RF_BYPASS_0, 0x00000010); bbp_val = mt7601u_bbp_rmw(dev, 47, 0, 0x10); mt7601u_bbp_wr(dev, 22, 0x40); for (i = 100; i && (bbp_val & 0x10); i--) bbp_val = mt7601u_bbp_rr(dev, 47); temp = mt7601u_bbp_r47_get(dev, bbp_val, BBP_R47_F_TEMP); mt7601u_bbp_wr(dev, 22, 0); bbp_val = mt7601u_bbp_rr(dev, 21); bbp_val |= 0x02; mt7601u_bbp_wr(dev, 21, bbp_val); bbp_val &= ~0x02; mt7601u_bbp_wr(dev, 21, bbp_val); mt7601u_wr(dev, MT_RF_BYPASS_0, 0); mt7601u_wr(dev, MT_RF_SETTING_0, rf_set); mt7601u_wr(dev, MT_RF_BYPASS_0, rf_bp); trace_read_temp(dev, temp); return temp; } static s8 mt7601u_read_temp(struct mt7601u_dev *dev) { int i; u8 val; s8 temp; val = mt7601u_bbp_rmw(dev, 47, 0x7f, 0x10); /* Note: this rarely succeeds, temp can change even if it fails. */ for (i = 100; i && (val & 0x10); i--) val = mt7601u_bbp_rr(dev, 47); temp = mt7601u_bbp_r47_get(dev, val, BBP_R47_F_TEMP); trace_read_temp(dev, temp); return temp; } static void mt7601u_rxdc_cal(struct mt7601u_dev *dev) { static const struct mt76_reg_pair intro[] = { { 158, 0x8d }, { 159, 0xfc }, { 158, 0x8c }, { 159, 0x4c }, }, outro[] = { { 158, 0x8d }, { 159, 0xe0 }, }; u32 mac_ctrl; int i, ret; mac_ctrl = mt7601u_rr(dev, MT_MAC_SYS_CTRL); mt7601u_wr(dev, MT_MAC_SYS_CTRL, MT_MAC_SYS_CTRL_ENABLE_RX); ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP, intro, ARRAY_SIZE(intro)); if (ret) dev_err(dev->dev, "%s intro failed:%d\n", __func__, ret); for (i = 20; i; i--) { usleep_range(300, 500); mt7601u_bbp_wr(dev, 158, 0x8c); if (mt7601u_bbp_rr(dev, 159) == 0x0c) break; } if (!i) dev_err(dev->dev, "%s timed out\n", __func__); mt7601u_wr(dev, MT_MAC_SYS_CTRL, 0); ret = mt7601u_write_reg_pairs(dev, MT_MCU_MEMMAP_BBP, outro, ARRAY_SIZE(outro)); if (ret) dev_err(dev->dev, "%s outro failed:%d\n", __func__, ret); mt7601u_wr(dev, MT_MAC_SYS_CTRL, mac_ctrl); } void mt7601u_phy_recalibrate_after_assoc(struct mt7601u_dev *dev) { if (test_bit(MT7601U_STATE_REMOVED, &dev->state)) return; mt7601u_mcu_calibrate(dev, MCU_CAL_DPD, dev->curr_temp); mt7601u_rxdc_cal(dev); } /* Note: function copied from vendor driver */ static s16 lin2dBd(u16 linear) { short exp = 0; unsigned int mantisa; int app, dBd; if (WARN_ON(!linear)) return -10000; mantisa = linear; exp = fls(mantisa) - 16; if (exp > 0) mantisa >>= exp; else mantisa <<= abs(exp); if (mantisa <= 0xb800) app = (mantisa + (mantisa >> 3) + (mantisa >> 4) - 0x9600); else app = (mantisa - (mantisa >> 3) - (mantisa >> 6) - 0x5a00); if (app < 0) app = 0; dBd = ((15 + exp) << 15) + app; dBd = (dBd << 2) + (dBd << 1) + (dBd >> 6) + (dBd >> 7); dBd = (dBd >> 10); return dBd; } static void mt7601u_set_initial_tssi(struct mt7601u_dev *dev, s16 tssi_db, s16 tssi_hvga_db) { struct tssi_data *d = &dev->ee->tssi_data; int init_offset; init_offset = -((tssi_db * d->slope + d->offset[1]) / 4096) + 10; mt76_rmw(dev, MT_TX_ALC_CFG_1, MT_TX_ALC_CFG_1_TEMP_COMP, int_to_s6(init_offset) & MT_TX_ALC_CFG_1_TEMP_COMP); } static void mt7601u_tssi_dc_gain_cal(struct mt7601u_dev *dev) { u8 rf_vga, rf_mixer, bbp_r47; int i, j; s8 res[4]; s16 tssi_init_db, tssi_init_hvga_db; mt7601u_wr(dev, MT_RF_SETTING_0, 0x00000030); mt7601u_wr(dev, MT_RF_BYPASS_0, 0x000c0030); mt7601u_wr(dev, MT_MAC_SYS_CTRL, 0); mt7601u_bbp_wr(dev, 58, 0); mt7601u_bbp_wr(dev, 241, 0x2); mt7601u_bbp_wr(dev, 23, 0x8); bbp_r47 = mt7601u_bbp_rr(dev, 47); /* Set VGA gain */ rf_vga = mt7601u_rf_rr(dev, 5, 3); mt7601u_rf_wr(dev, 5, 3, 8); /* Mixer disable */ rf_mixer = mt7601u_rf_rr(dev, 4, 39); mt7601u_rf_wr(dev, 4, 39, 0); for (i = 0; i < 4; i++) { mt7601u_rf_wr(dev, 4, 39, (i & 1) ? rf_mixer : 0); mt7601u_bbp_wr(dev, 23, (i < 2) ? 0x08 : 0x02); mt7601u_rf_wr(dev, 5, 3, (i < 2) ? 0x08 : 0x11); /* BBP TSSI initial and soft reset */ mt7601u_bbp_wr(dev, 22, 0); mt7601u_bbp_wr(dev, 244, 0); mt7601u_bbp_wr(dev, 21, 1); udelay(1); mt7601u_bbp_wr(dev, 21, 0); /* TSSI measurement */ mt7601u_bbp_wr(dev, 47, 0x50); mt7601u_bbp_wr(dev, (i & 1) ? 244 : 22, (i & 1) ? 0x31 : 0x40); for (j = 20; j; j--) if (!(mt7601u_bbp_rr(dev, 47) & 0x10)) break; if (!j) dev_err(dev->dev, "%s timed out\n", __func__); /* TSSI read */ mt7601u_bbp_wr(dev, 47, 0x40); res[i] = mt7601u_bbp_rr(dev, 49); } tssi_init_db = lin2dBd((short)res[1] - res[0]); tssi_init_hvga_db = lin2dBd(((short)res[3] - res[2]) * 4); dev->tssi_init = res[0]; dev->tssi_init_hvga = res[2]; dev->tssi_init_hvga_offset_db = tssi_init_hvga_db - tssi_init_db; dev_dbg(dev->dev, "TSSI_init:%hhx db:%hx hvga:%hhx hvga_db:%hx off_db:%hx\n", dev->tssi_init, tssi_init_db, dev->tssi_init_hvga, tssi_init_hvga_db, dev->tssi_init_hvga_offset_db); mt7601u_bbp_wr(dev, 22, 0); mt7601u_bbp_wr(dev, 244, 0); mt7601u_bbp_wr(dev, 21, 1); udelay(1); mt7601u_bbp_wr(dev, 21, 0); mt7601u_wr(dev, MT_RF_BYPASS_0, 0); mt7601u_wr(dev, MT_RF_SETTING_0, 0); mt7601u_rf_wr(dev, 5, 3, rf_vga); mt7601u_rf_wr(dev, 4, 39, rf_mixer); mt7601u_bbp_wr(dev, 47, bbp_r47); mt7601u_set_initial_tssi(dev, tssi_init_db, tssi_init_hvga_db); } static int mt7601u_temp_comp(struct mt7601u_dev *dev, bool on) { int ret, temp, hi_temp = 400, lo_temp = -200; temp = (dev->raw_temp - dev->ee->ref_temp) * MT_EE_TEMPERATURE_SLOPE; dev->curr_temp = temp; /* DPD Calibration */ if (temp - dev->dpd_temp > 450 || temp - dev->dpd_temp < -450) { dev->dpd_temp = temp; ret = mt7601u_mcu_calibrate(dev, MCU_CAL_DPD, dev->dpd_temp); if (ret) return ret; mt7601u_vco_cal(dev); dev_dbg(dev->dev, "Recalibrate DPD\n"); } /* PLL Lock Protect */ if (temp < -50 && !dev->pll_lock_protect) { /* < 20C */ dev->pll_lock_protect = true; mt7601u_rf_wr(dev, 4, 4, 6); mt7601u_rf_clear(dev, 4, 10, 0x30); dev_dbg(dev->dev, "PLL lock protect on - too cold\n"); } else if (temp > 50 && dev->pll_lock_protect) { /* > 30C */ dev->pll_lock_protect = false; mt7601u_rf_wr(dev, 4, 4, 0); mt7601u_rf_rmw(dev, 4, 10, 0x30, 0x10); dev_dbg(dev->dev, "PLL lock protect off\n"); } if (on) { hi_temp -= 50; lo_temp -= 50; } /* BBP CR for H, L, N temperature */ if (temp > hi_temp) return mt7601u_bbp_temp(dev, MT_TEMP_MODE_HIGH, "high"); else if (temp > lo_temp) return mt7601u_bbp_temp(dev, MT_TEMP_MODE_NORMAL, "normal"); else return mt7601u_bbp_temp(dev, MT_TEMP_MODE_LOW, "low"); } /* Note: this is used only with TSSI, we can just use trgt_pwr from eeprom. */ static int mt7601u_current_tx_power(struct mt7601u_dev *dev) { return dev->ee->chan_pwr[dev->chandef.chan->hw_value - 1]; } static bool mt7601u_use_hvga(struct mt7601u_dev *dev) { return !(mt7601u_current_tx_power(dev) > 20); } static s16 mt7601u_phy_rf_pa_mode_val(struct mt7601u_dev *dev, int phy_mode, int tx_rate) { static const s16 decode_tb[] = { 0, 8847, -5734, -5734 }; u32 reg; switch (phy_mode) { case MT_PHY_TYPE_OFDM: tx_rate += 4; fallthrough; case MT_PHY_TYPE_CCK: reg = dev->rf_pa_mode[0]; break; default: reg = dev->rf_pa_mode[1]; break; } return decode_tb[(reg >> (tx_rate * 2)) & 0x3]; } static struct mt7601u_tssi_params mt7601u_tssi_params_get(struct mt7601u_dev *dev) { static const u8 ofdm_pkt2rate[8] = { 6, 4, 2, 0, 7, 5, 3, 1 }; static const int static_power[4] = { 0, -49152, -98304, 49152 }; struct mt7601u_tssi_params p; u8 bbp_r47, pkt_type, tx_rate; struct power_per_rate *rate_table; bbp_r47 = mt7601u_bbp_rr(dev, 47); p.tssi0 = mt7601u_bbp_r47_get(dev, bbp_r47, BBP_R47_F_TSSI); dev->raw_temp = mt7601u_bbp_r47_get(dev, bbp_r47, BBP_R47_F_TEMP); pkt_type = mt7601u_bbp_r47_get(dev, bbp_r47, BBP_R47_F_PKT_T); p.trgt_power = mt7601u_current_tx_power(dev); switch (pkt_type & 0x03) { case MT_PHY_TYPE_CCK: tx_rate = (pkt_type >> 4) & 0x03; rate_table = dev->ee->power_rate_table.cck; break; case MT_PHY_TYPE_OFDM: tx_rate = ofdm_pkt2rate[(pkt_type >> 4) & 0x07]; rate_table = dev->ee->power_rate_table.ofdm; break; default: tx_rate = mt7601u_bbp_r47_get(dev, bbp_r47, BBP_R47_F_TX_RATE); tx_rate &= 0x7f; rate_table = dev->ee->power_rate_table.ht; break; } if (dev->bw == MT_BW_20) p.trgt_power += rate_table[tx_rate / 2].bw20; else p.trgt_power += rate_table[tx_rate / 2].bw40; p.trgt_power <<= 12; dev_dbg(dev->dev, "tx_rate:%02hhx pwr:%08x\n", tx_rate, p.trgt_power); p.trgt_power += mt7601u_phy_rf_pa_mode_val(dev, pkt_type & 0x03, tx_rate); /* Channel 14, cck, bw20 */ if ((pkt_type & 0x03) == MT_PHY_TYPE_CCK) { if (mt7601u_bbp_rr(dev, 4) & 0x20) p.trgt_power += mt7601u_bbp_rr(dev, 178) ? 18022 : 9830; else p.trgt_power += mt7601u_bbp_rr(dev, 178) ? 819 : 24576; } p.trgt_power += static_power[mt7601u_bbp_rr(dev, 1) & 0x03]; p.trgt_power += dev->ee->tssi_data.tx0_delta_offset; dev_dbg(dev->dev, "tssi:%02hhx t_power:%08x temp:%02hhx pkt_type:%02hhx\n", p.tssi0, p.trgt_power, dev->raw_temp, pkt_type); return p; } static bool mt7601u_tssi_read_ready(struct mt7601u_dev *dev) { return !(mt7601u_bbp_rr(dev, 47) & 0x10); } static int mt7601u_tssi_cal(struct mt7601u_dev *dev) { struct mt7601u_tssi_params params; int curr_pwr, diff_pwr; char tssi_offset; s8 tssi_init; s16 tssi_m_dc, tssi_db; bool hvga; u32 val; if (!dev->ee->tssi_enabled) return 0; hvga = mt7601u_use_hvga(dev); if (!dev->tssi_read_trig) return mt7601u_mcu_tssi_read_kick(dev, hvga); if (!mt7601u_tssi_read_ready(dev)) return 0; params = mt7601u_tssi_params_get(dev); tssi_init = (hvga ? dev->tssi_init_hvga : dev->tssi_init); tssi_m_dc = params.tssi0 - tssi_init; tssi_db = lin2dBd(tssi_m_dc); dev_dbg(dev->dev, "tssi dc:%04hx db:%04hx hvga:%d\n", tssi_m_dc, tssi_db, hvga); if (dev->chandef.chan->hw_value < 5) tssi_offset = dev->ee->tssi_data.offset[0]; else if (dev->chandef.chan->hw_value < 9) tssi_offset = dev->ee->tssi_data.offset[1]; else tssi_offset = dev->ee->tssi_data.offset[2]; if (hvga) tssi_db -= dev->tssi_init_hvga_offset_db; curr_pwr = tssi_db * dev->ee->tssi_data.slope + (tssi_offset << 9); diff_pwr = params.trgt_power - curr_pwr; dev_dbg(dev->dev, "Power curr:%08x diff:%08x\n", curr_pwr, diff_pwr); if (params.tssi0 > 126 && diff_pwr > 0) { dev_err(dev->dev, "Error: TSSI upper saturation\n"); diff_pwr = 0; } if (params.tssi0 - tssi_init < 1 && diff_pwr < 0) { dev_err(dev->dev, "Error: TSSI lower saturation\n"); diff_pwr = 0; } if ((dev->prev_pwr_diff ^ diff_pwr) < 0 && abs(diff_pwr) < 4096 && (abs(diff_pwr) > abs(dev->prev_pwr_diff) || (diff_pwr > 0 && diff_pwr == -dev->prev_pwr_diff))) diff_pwr = 0; else dev->prev_pwr_diff = diff_pwr; diff_pwr += (diff_pwr > 0) ? 2048 : -2048; diff_pwr /= 4096; dev_dbg(dev->dev, "final diff: %08x\n", diff_pwr); val = mt7601u_rr(dev, MT_TX_ALC_CFG_1); curr_pwr = s6_to_int(FIELD_GET(MT_TX_ALC_CFG_1_TEMP_COMP, val)); diff_pwr += curr_pwr; val = (val & ~MT_TX_ALC_CFG_1_TEMP_COMP) | int_to_s6(diff_pwr); mt7601u_wr(dev, MT_TX_ALC_CFG_1, val); return mt7601u_mcu_tssi_read_kick(dev, hvga); } static u8 mt7601u_agc_default(struct mt7601u_dev *dev) { return (dev->ee->lna_gain - 8) * 2 + 0x34; } static void mt7601u_agc_reset(struct mt7601u_dev *dev) { u8 agc = mt7601u_agc_default(dev); mt7601u_bbp_wr(dev, 66, agc); } void mt7601u_agc_save(struct mt7601u_dev *dev) { dev->agc_save = mt7601u_bbp_rr(dev, 66); } void mt7601u_agc_restore(struct mt7601u_dev *dev) { mt7601u_bbp_wr(dev, 66, dev->agc_save); } static void mt7601u_agc_tune(struct mt7601u_dev *dev) { u8 val = mt7601u_agc_default(dev); long avg_rssi; if (test_bit(MT7601U_STATE_SCANNING, &dev->state)) return; /* Note: only in STA mode and not dozing; perhaps do this only if * there is enough rssi updates since last run? * Rssi updates are only on beacons and U2M so should work... */ spin_lock_bh(&dev->con_mon_lock); avg_rssi = ewma_rssi_read(&dev->avg_rssi); spin_unlock_bh(&dev->con_mon_lock); if (avg_rssi == 0) return; avg_rssi = -avg_rssi; if (avg_rssi <= -70) val -= 0x20; else if (avg_rssi <= -60) val -= 0x10; if (val != mt7601u_bbp_rr(dev, 66)) mt7601u_bbp_wr(dev, 66, val); /* TODO: also if lost a lot of beacons try resetting * (see RTMPSetAGCInitValue() call in mlme.c). */ } static void mt7601u_phy_calibrate(struct work_struct *work) { struct mt7601u_dev *dev = container_of(work, struct mt7601u_dev, cal_work.work); mt7601u_agc_tune(dev); mt7601u_tssi_cal(dev); /* If TSSI calibration was run it already updated temperature. */ if (!dev->ee->tssi_enabled) dev->raw_temp = mt7601u_read_temp(dev); mt7601u_temp_comp(dev, true); /* TODO: find right value for @on */ ieee80211_queue_delayed_work(dev->hw, &dev->cal_work, MT_CALIBRATE_INTERVAL); } static unsigned long __mt7601u_phy_freq_cal(struct mt7601u_dev *dev, s8 last_offset, u8 phy_mode) { u8 activate_threshold, deactivate_threshold; trace_freq_cal_offset(dev, phy_mode, last_offset); /* No beacons received - reschedule soon */ if (last_offset == MT_FREQ_OFFSET_INVALID) return MT_FREQ_CAL_ADJ_INTERVAL; switch (phy_mode) { case MT_PHY_TYPE_CCK: activate_threshold = 19; deactivate_threshold = 5; break; case MT_PHY_TYPE_OFDM: activate_threshold = 102; deactivate_threshold = 32; break; case MT_PHY_TYPE_HT: case MT_PHY_TYPE_HT_GF: activate_threshold = 82; deactivate_threshold = 20; break; default: WARN_ON(1); return MT_FREQ_CAL_CHECK_INTERVAL; } if (abs(last_offset) >= activate_threshold) dev->freq_cal.adjusting = true; else if (abs(last_offset) <= deactivate_threshold) dev->freq_cal.adjusting = false; if (!dev->freq_cal.adjusting) return MT_FREQ_CAL_CHECK_INTERVAL; if (last_offset > deactivate_threshold) { if (dev->freq_cal.freq > 0) dev->freq_cal.freq--; else dev->freq_cal.adjusting = false; } else if (last_offset < -deactivate_threshold) { if (dev->freq_cal.freq < 0xbf) dev->freq_cal.freq++; else dev->freq_cal.adjusting = false; } trace_freq_cal_adjust(dev, dev->freq_cal.freq); mt7601u_rf_wr(dev, 0, 12, dev->freq_cal.freq); mt7601u_vco_cal(dev); return dev->freq_cal.adjusting ? MT_FREQ_CAL_ADJ_INTERVAL : MT_FREQ_CAL_CHECK_INTERVAL; } static void mt7601u_phy_freq_cal(struct work_struct *work) { struct mt7601u_dev *dev = container_of(work, struct mt7601u_dev, freq_cal.work.work); s8 last_offset; u8 phy_mode; unsigned long delay; spin_lock_bh(&dev->con_mon_lock); last_offset = dev->bcn_freq_off; phy_mode = dev->bcn_phy_mode; spin_unlock_bh(&dev->con_mon_lock); delay = __mt7601u_phy_freq_cal(dev, last_offset, phy_mode); ieee80211_queue_delayed_work(dev->hw, &dev->freq_cal.work, delay); spin_lock_bh(&dev->con_mon_lock); dev->bcn_freq_off = MT_FREQ_OFFSET_INVALID; spin_unlock_bh(&dev->con_mon_lock); } void mt7601u_phy_con_cal_onoff(struct mt7601u_dev *dev, struct ieee80211_bss_conf *info) { struct ieee80211_vif *vif = container_of(info, struct ieee80211_vif, bss_conf); if (!vif->cfg.assoc) cancel_delayed_work_sync(&dev->freq_cal.work); /* Start/stop collecting beacon data */ spin_lock_bh(&dev->con_mon_lock); ether_addr_copy(dev->ap_bssid, info->bssid); ewma_rssi_init(&dev->avg_rssi); dev->bcn_freq_off = MT_FREQ_OFFSET_INVALID; spin_unlock_bh(&dev->con_mon_lock); dev->freq_cal.freq = dev->ee->rf_freq_off; dev->freq_cal.enabled = vif->cfg.assoc; dev->freq_cal.adjusting = false; if (vif->cfg.assoc) ieee80211_queue_delayed_work(dev->hw, &dev->freq_cal.work, MT_FREQ_CAL_INIT_DELAY); } static int mt7601u_init_cal(struct mt7601u_dev *dev) { u32 mac_ctrl; int ret; dev->raw_temp = mt7601u_read_bootup_temp(dev); dev->curr_temp = (dev->raw_temp - dev->ee->ref_temp) * MT_EE_TEMPERATURE_SLOPE; dev->dpd_temp = dev->curr_temp; mac_ctrl = mt7601u_rr(dev, MT_MAC_SYS_CTRL); ret = mt7601u_mcu_calibrate(dev, MCU_CAL_R, 0); if (ret) return ret; ret = mt7601u_rf_rr(dev, 0, 4); if (ret < 0) return ret; ret |= 0x80; ret = mt7601u_rf_wr(dev, 0, 4, ret); if (ret) return ret; msleep(2); ret = mt7601u_mcu_calibrate(dev, MCU_CAL_TXDCOC, 0); if (ret) return ret; mt7601u_rxdc_cal(dev); ret = mt7601u_set_bw_filter(dev, true); if (ret) return ret; ret = mt7601u_mcu_calibrate(dev, MCU_CAL_LOFT, 0); if (ret) return ret; ret = mt7601u_mcu_calibrate(dev, MCU_CAL_TXIQ, 0); if (ret) return ret; ret = mt7601u_mcu_calibrate(dev, MCU_CAL_RXIQ, 0); if (ret) return ret; ret = mt7601u_mcu_calibrate(dev, MCU_CAL_DPD, dev->dpd_temp); if (ret) return ret; mt7601u_rxdc_cal(dev); mt7601u_tssi_dc_gain_cal(dev); mt7601u_wr(dev, MT_MAC_SYS_CTRL, mac_ctrl); mt7601u_temp_comp(dev, true); return 0; } int mt7601u_bbp_set_bw(struct mt7601u_dev *dev, int bw) { u32 val, old; if (bw == dev->bw) { /* Vendor driver does the rmc even when no change is needed. */ mt7601u_bbp_rmc(dev, 4, 0x18, bw == MT_BW_20 ? 0 : 0x10); return 0; } dev->bw = bw; /* Stop MAC for the time of bw change */ old = mt7601u_rr(dev, MT_MAC_SYS_CTRL); val = old & ~(MT_MAC_SYS_CTRL_ENABLE_TX | MT_MAC_SYS_CTRL_ENABLE_RX); mt7601u_wr(dev, MT_MAC_SYS_CTRL, val); mt76_poll(dev, MT_MAC_STATUS, MT_MAC_STATUS_TX | MT_MAC_STATUS_RX, 0, 500000); mt7601u_bbp_rmc(dev, 4, 0x18, bw == MT_BW_20 ? 0 : 0x10); mt7601u_wr(dev, MT_MAC_SYS_CTRL, old); return mt7601u_load_bbp_temp_table_bw(dev); } /** * mt7601u_set_rx_path - set rx path in BBP * @dev: pointer to adapter structure * @path: rx path to set values are 0-based */ void mt7601u_set_rx_path(struct mt7601u_dev *dev, u8 path) { mt7601u_bbp_rmw(dev, 3, 0x18, path << 3); } /** * mt7601u_set_tx_dac - set which tx DAC to use * @dev: pointer to adapter structure * @dac: DAC index, values are 0-based */ void mt7601u_set_tx_dac(struct mt7601u_dev *dev, u8 dac) { mt7601u_bbp_rmc(dev, 1, 0x18, dac << 3); } int mt7601u_phy_init(struct mt7601u_dev *dev) { int ret; dev->rf_pa_mode[0] = mt7601u_rr(dev, MT_RF_PA_MODE_CFG0); dev->rf_pa_mode[1] = mt7601u_rr(dev, MT_RF_PA_MODE_CFG1); ret = mt7601u_rf_wr(dev, 0, 12, dev->ee->rf_freq_off); if (ret) return ret; ret = mt7601u_write_reg_pairs(dev, 0, rf_central, ARRAY_SIZE(rf_central)); if (ret) return ret; ret = mt7601u_write_reg_pairs(dev, 0, rf_channel, ARRAY_SIZE(rf_channel)); if (ret) return ret; ret = mt7601u_write_reg_pairs(dev, 0, rf_vga, ARRAY_SIZE(rf_vga)); if (ret) return ret; ret = mt7601u_init_cal(dev); if (ret) return ret; dev->prev_pwr_diff = 100; INIT_DELAYED_WORK(&dev->cal_work, mt7601u_phy_calibrate); INIT_DELAYED_WORK(&dev->freq_cal.work, mt7601u_phy_freq_cal); return 0; }
linux-master
drivers/net/wireless/mediatek/mt7601u/phy.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2014 Felix Fietkau <[email protected]> * Copyright (C) 2015 Jakub Kicinski <[email protected]> */ #include "mt7601u.h" #include "mac.h" #include <linux/etherdevice.h> static int mt7601u_start(struct ieee80211_hw *hw) { struct mt7601u_dev *dev = hw->priv; int ret; mutex_lock(&dev->mutex); ret = mt7601u_mac_start(dev); if (ret) goto out; ieee80211_queue_delayed_work(dev->hw, &dev->mac_work, MT_CALIBRATE_INTERVAL); ieee80211_queue_delayed_work(dev->hw, &dev->cal_work, MT_CALIBRATE_INTERVAL); out: mutex_unlock(&dev->mutex); return ret; } static void mt7601u_stop(struct ieee80211_hw *hw) { struct mt7601u_dev *dev = hw->priv; mutex_lock(&dev->mutex); cancel_delayed_work_sync(&dev->cal_work); cancel_delayed_work_sync(&dev->mac_work); mt7601u_mac_stop(dev); mutex_unlock(&dev->mutex); } static int mt7601u_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct mt7601u_dev *dev = hw->priv; struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv; unsigned int idx = 0; unsigned int wcid = GROUP_WCID(idx); /* Note: for AP do the AP-STA things mt76 does: * - beacon offsets * - do mac address tricks * - shift vif idx */ mvif->idx = idx; if (!ether_addr_equal(dev->macaddr, vif->addr)) mt7601u_set_macaddr(dev, vif->addr); if (dev->wcid_mask[wcid / BITS_PER_LONG] & BIT(wcid % BITS_PER_LONG)) return -ENOSPC; dev->wcid_mask[wcid / BITS_PER_LONG] |= BIT(wcid % BITS_PER_LONG); mvif->group_wcid.idx = wcid; mvif->group_wcid.hw_key_idx = -1; return 0; } static void mt7601u_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct mt7601u_dev *dev = hw->priv; struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv; unsigned int wcid = mvif->group_wcid.idx; dev->wcid_mask[wcid / BITS_PER_LONG] &= ~BIT(wcid % BITS_PER_LONG); } static int mt7601u_config(struct ieee80211_hw *hw, u32 changed) { struct mt7601u_dev *dev = hw->priv; int ret = 0; mutex_lock(&dev->mutex); if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { ieee80211_stop_queues(hw); ret = mt7601u_phy_set_channel(dev, &hw->conf.chandef); ieee80211_wake_queues(hw); } mutex_unlock(&dev->mutex); return ret; } static void mt76_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, unsigned int *total_flags, u64 multicast) { struct mt7601u_dev *dev = hw->priv; u32 flags = 0; #define MT76_FILTER(_flag, _hw) do { \ flags |= *total_flags & FIF_##_flag; \ dev->rxfilter &= ~(_hw); \ dev->rxfilter |= !(flags & FIF_##_flag) * (_hw); \ } while (0) mutex_lock(&dev->mutex); dev->rxfilter &= ~MT_RX_FILTR_CFG_OTHER_BSS; MT76_FILTER(OTHER_BSS, MT_RX_FILTR_CFG_PROMISC); MT76_FILTER(FCSFAIL, MT_RX_FILTR_CFG_CRC_ERR); MT76_FILTER(PLCPFAIL, MT_RX_FILTR_CFG_PHY_ERR); MT76_FILTER(CONTROL, MT_RX_FILTR_CFG_ACK | MT_RX_FILTR_CFG_CTS | MT_RX_FILTR_CFG_CFEND | MT_RX_FILTR_CFG_CFACK | MT_RX_FILTR_CFG_BA | MT_RX_FILTR_CFG_CTRL_RSV); MT76_FILTER(PSPOLL, MT_RX_FILTR_CFG_PSPOLL); *total_flags = flags; mt76_wr(dev, MT_RX_FILTR_CFG, dev->rxfilter); mutex_unlock(&dev->mutex); } static void mt7601u_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *info, u64 changed) { struct mt7601u_dev *dev = hw->priv; mutex_lock(&dev->mutex); if (changed & BSS_CHANGED_ASSOC) mt7601u_phy_con_cal_onoff(dev, info); if (changed & BSS_CHANGED_BSSID) { mt7601u_addr_wr(dev, MT_MAC_BSSID_DW0, info->bssid); /* Note: this is a hack because beacon_int is not changed * on leave nor is any more appropriate event generated. * rt2x00 doesn't seem to be bothered though. */ if (is_zero_ether_addr(info->bssid)) mt7601u_mac_config_tsf(dev, false, 0); } if (changed & BSS_CHANGED_BASIC_RATES) { mt7601u_wr(dev, MT_LEGACY_BASIC_RATE, info->basic_rates); mt7601u_wr(dev, MT_HT_FBK_CFG0, 0x65432100); mt7601u_wr(dev, MT_HT_FBK_CFG1, 0xedcba980); mt7601u_wr(dev, MT_LG_FBK_CFG0, 0xedcba988); mt7601u_wr(dev, MT_LG_FBK_CFG1, 0x00002100); } if (changed & BSS_CHANGED_BEACON_INT) mt7601u_mac_config_tsf(dev, true, info->beacon_int); if (changed & BSS_CHANGED_HT || changed & BSS_CHANGED_ERP_CTS_PROT) mt7601u_mac_set_protection(dev, info->use_cts_prot, info->ht_operation_mode); if (changed & BSS_CHANGED_ERP_PREAMBLE) mt7601u_mac_set_short_preamble(dev, info->use_short_preamble); if (changed & BSS_CHANGED_ERP_SLOT) { int slottime = info->use_short_slot ? 9 : 20; mt76_rmw_field(dev, MT_BKOFF_SLOT_CFG, MT_BKOFF_SLOT_CFG_SLOTTIME, slottime); } if (changed & BSS_CHANGED_ASSOC) mt7601u_phy_recalibrate_after_assoc(dev); mutex_unlock(&dev->mutex); } static int mt76_wcid_alloc(struct mt7601u_dev *dev) { int i, idx = 0; for (i = 0; i < ARRAY_SIZE(dev->wcid_mask); i++) { idx = ffs(~dev->wcid_mask[i]); if (!idx) continue; idx--; dev->wcid_mask[i] |= BIT(idx); break; } idx = i * BITS_PER_LONG + idx; if (idx > 119) return -1; return idx; } static int mt7601u_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct mt7601u_dev *dev = hw->priv; struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv; struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv; int ret = 0; int idx = 0; mutex_lock(&dev->mutex); idx = mt76_wcid_alloc(dev); if (idx < 0) { ret = -ENOSPC; goto out; } msta->wcid.idx = idx; msta->wcid.hw_key_idx = -1; mt7601u_mac_wcid_setup(dev, idx, mvif->idx, sta->addr); mt76_clear(dev, MT_WCID_DROP(idx), MT_WCID_DROP_MASK(idx)); rcu_assign_pointer(dev->wcid[idx], &msta->wcid); mt7601u_mac_set_ampdu_factor(dev); out: mutex_unlock(&dev->mutex); return ret; } static int mt7601u_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct mt7601u_dev *dev = hw->priv; struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv; int idx = msta->wcid.idx; mutex_lock(&dev->mutex); rcu_assign_pointer(dev->wcid[idx], NULL); mt76_set(dev, MT_WCID_DROP(idx), MT_WCID_DROP_MASK(idx)); dev->wcid_mask[idx / BITS_PER_LONG] &= ~BIT(idx % BITS_PER_LONG); mt7601u_mac_wcid_setup(dev, idx, 0, NULL); mt7601u_mac_set_ampdu_factor(dev); mutex_unlock(&dev->mutex); return 0; } static void mt7601u_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum sta_notify_cmd cmd, struct ieee80211_sta *sta) { } static void mt7601u_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const u8 *mac_addr) { struct mt7601u_dev *dev = hw->priv; mt7601u_agc_save(dev); set_bit(MT7601U_STATE_SCANNING, &dev->state); } static void mt7601u_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct mt7601u_dev *dev = hw->priv; mt7601u_agc_restore(dev); clear_bit(MT7601U_STATE_SCANNING, &dev->state); ieee80211_queue_delayed_work(dev->hw, &dev->cal_work, MT_CALIBRATE_INTERVAL); if (dev->freq_cal.enabled) ieee80211_queue_delayed_work(dev->hw, &dev->freq_cal.work, MT_FREQ_CAL_INIT_DELAY); } static int mt7601u_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key) { struct mt7601u_dev *dev = hw->priv; struct mt76_vif *mvif = (struct mt76_vif *) vif->drv_priv; struct mt76_sta *msta = sta ? (struct mt76_sta *) sta->drv_priv : NULL; struct mt76_wcid *wcid = msta ? &msta->wcid : &mvif->group_wcid; int idx = key->keyidx; int ret; /* fall back to sw encryption for unsupported ciphers */ switch (key->cipher) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: case WLAN_CIPHER_SUITE_TKIP: case WLAN_CIPHER_SUITE_CCMP: break; default: return -EOPNOTSUPP; } if (cmd == SET_KEY) { key->hw_key_idx = wcid->idx; wcid->hw_key_idx = idx; } else { if (idx == wcid->hw_key_idx) wcid->hw_key_idx = -1; key = NULL; } if (!msta) { if (key || wcid->hw_key_idx == idx) { ret = mt76_mac_wcid_set_key(dev, wcid->idx, key); if (ret) return ret; } return mt76_mac_shared_key_setup(dev, mvif->idx, idx, key); } return mt76_mac_wcid_set_key(dev, msta->wcid.idx, key); } static int mt7601u_set_rts_threshold(struct ieee80211_hw *hw, u32 value) { struct mt7601u_dev *dev = hw->priv; mt76_rmw_field(dev, MT_TX_RTS_CFG, MT_TX_RTS_CFG_THRESH, value); return 0; } static int mt76_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_ampdu_params *params) { struct mt7601u_dev *dev = hw->priv; struct ieee80211_sta *sta = params->sta; enum ieee80211_ampdu_mlme_action action = params->action; u16 tid = params->tid; u16 ssn = params->ssn; struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv; WARN_ON(msta->wcid.idx > GROUP_WCID(0)); switch (action) { case IEEE80211_AMPDU_RX_START: mt76_set(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid)); break; case IEEE80211_AMPDU_RX_STOP: mt76_clear(dev, MT_WCID_ADDR(msta->wcid.idx) + 4, BIT(16 + tid)); break; case IEEE80211_AMPDU_TX_OPERATIONAL: ieee80211_send_bar(vif, sta->addr, tid, msta->agg_ssn[tid]); break; case IEEE80211_AMPDU_TX_STOP_FLUSH: case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: break; case IEEE80211_AMPDU_TX_START: msta->agg_ssn[tid] = ssn << 4; return IEEE80211_AMPDU_TX_START_IMMEDIATE; case IEEE80211_AMPDU_TX_STOP_CONT: ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); break; } return 0; } static void mt76_sta_rate_tbl_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct mt7601u_dev *dev = hw->priv; struct mt76_sta *msta = (struct mt76_sta *) sta->drv_priv; struct ieee80211_sta_rates *rates; struct ieee80211_tx_rate rate = {}; rcu_read_lock(); rates = rcu_dereference(sta->rates); if (!rates) goto out; rate.idx = rates->rate[0].idx; rate.flags = rates->rate[0].flags; mt76_mac_wcid_set_rate(dev, &msta->wcid, &rate); out: rcu_read_unlock(); } const struct ieee80211_ops mt7601u_ops = { .tx = mt7601u_tx, .wake_tx_queue = ieee80211_handle_wake_tx_queue, .start = mt7601u_start, .stop = mt7601u_stop, .add_interface = mt7601u_add_interface, .remove_interface = mt7601u_remove_interface, .config = mt7601u_config, .configure_filter = mt76_configure_filter, .bss_info_changed = mt7601u_bss_info_changed, .sta_add = mt7601u_sta_add, .sta_remove = mt7601u_sta_remove, .sta_notify = mt7601u_sta_notify, .set_key = mt7601u_set_key, .conf_tx = mt7601u_conf_tx, .sw_scan_start = mt7601u_sw_scan, .sw_scan_complete = mt7601u_sw_scan_complete, .ampdu_action = mt76_ampdu_action, .sta_rate_tbl_update = mt76_sta_rate_tbl_update, .set_rts_threshold = mt7601u_set_rts_threshold, };
linux-master
drivers/net/wireless/mediatek/mt7601u/main.c
// SPDX-License-Identifier: GPL-2.0-only /* * (c) Copyright 2002-2010, Ralink Technology, Inc. * Copyright (C) 2014 Felix Fietkau <[email protected]> * Copyright (C) 2015 Jakub Kicinski <[email protected]> */ #include <linux/kernel.h> #include <linux/firmware.h> #include <linux/delay.h> #include <linux/usb.h> #include <linux/skbuff.h> #include "mt7601u.h" #include "dma.h" #include "mcu.h" #include "usb.h" #include "trace.h" #define MCU_FW_URB_MAX_PAYLOAD 0x3800 #define MCU_FW_URB_SIZE (MCU_FW_URB_MAX_PAYLOAD + 12) #define MCU_RESP_URB_SIZE 1024 static inline int firmware_running(struct mt7601u_dev *dev) { return mt7601u_rr(dev, MT_MCU_COM_REG0) == 1; } static inline void skb_put_le32(struct sk_buff *skb, u32 val) { put_unaligned_le32(val, skb_put(skb, 4)); } static inline void mt7601u_dma_skb_wrap_cmd(struct sk_buff *skb, u8 seq, enum mcu_cmd cmd) { WARN_ON(mt7601u_dma_skb_wrap(skb, CPU_TX_PORT, DMA_COMMAND, FIELD_PREP(MT_TXD_CMD_INFO_SEQ, seq) | FIELD_PREP(MT_TXD_CMD_INFO_TYPE, cmd))); } static inline void trace_mt_mcu_msg_send_cs(struct mt7601u_dev *dev, struct sk_buff *skb, bool need_resp) { u32 i, csum = 0; for (i = 0; i < skb->len / 4; i++) csum ^= get_unaligned_le32(skb->data + i * 4); trace_mt_mcu_msg_send(dev, skb, csum, need_resp); } static struct sk_buff *mt7601u_mcu_msg_alloc(const void *data, int len) { struct sk_buff *skb; WARN_ON(len % 4); /* if length is not divisible by 4 we need to pad */ skb = alloc_skb(len + MT_DMA_HDR_LEN + 4, GFP_KERNEL); if (skb) { skb_reserve(skb, MT_DMA_HDR_LEN); skb_put_data(skb, data, len); } return skb; } static int mt7601u_mcu_wait_resp(struct mt7601u_dev *dev, u8 seq) { struct urb *urb = dev->mcu.resp.urb; u32 rxfce; int urb_status, ret, i = 5; while (i--) { if (!wait_for_completion_timeout(&dev->mcu.resp_cmpl, msecs_to_jiffies(300))) { dev_warn(dev->dev, "Warning: %s retrying\n", __func__); continue; } /* Make copies of important data before reusing the urb */ rxfce = get_unaligned_le32(dev->mcu.resp.buf); urb_status = urb->status * mt7601u_urb_has_error(urb); ret = mt7601u_usb_submit_buf(dev, USB_DIR_IN, MT_EP_IN_CMD_RESP, &dev->mcu.resp, GFP_KERNEL, mt7601u_complete_urb, &dev->mcu.resp_cmpl); if (ret) return ret; if (urb_status) dev_err(dev->dev, "Error: MCU resp urb failed:%d\n", urb_status); if (FIELD_GET(MT_RXD_CMD_INFO_CMD_SEQ, rxfce) == seq && FIELD_GET(MT_RXD_CMD_INFO_EVT_TYPE, rxfce) == CMD_DONE) return 0; dev_err(dev->dev, "Error: MCU resp evt:%lx seq:%hhx-%lx!\n", FIELD_GET(MT_RXD_CMD_INFO_EVT_TYPE, rxfce), seq, FIELD_GET(MT_RXD_CMD_INFO_CMD_SEQ, rxfce)); } dev_err(dev->dev, "Error: %s timed out\n", __func__); return -ETIMEDOUT; } static int mt7601u_mcu_msg_send(struct mt7601u_dev *dev, struct sk_buff *skb, enum mcu_cmd cmd, bool wait_resp) { struct usb_device *usb_dev = mt7601u_to_usb_dev(dev); unsigned cmd_pipe = usb_sndbulkpipe(usb_dev, dev->out_eps[MT_EP_OUT_INBAND_CMD]); int sent, ret; u8 seq = 0; if (test_bit(MT7601U_STATE_REMOVED, &dev->state)) { consume_skb(skb); return 0; } mutex_lock(&dev->mcu.mutex); if (wait_resp) while (!seq) seq = ++dev->mcu.msg_seq & 0xf; mt7601u_dma_skb_wrap_cmd(skb, seq, cmd); if (dev->mcu.resp_cmpl.done) dev_err(dev->dev, "Error: MCU response pre-completed!\n"); trace_mt_mcu_msg_send_cs(dev, skb, wait_resp); trace_mt_submit_urb_sync(dev, cmd_pipe, skb->len); ret = usb_bulk_msg(usb_dev, cmd_pipe, skb->data, skb->len, &sent, 500); if (ret) { dev_err(dev->dev, "Error: send MCU cmd failed:%d\n", ret); goto out; } if (sent != skb->len) dev_err(dev->dev, "Error: %s sent != skb->len\n", __func__); if (wait_resp) ret = mt7601u_mcu_wait_resp(dev, seq); out: mutex_unlock(&dev->mcu.mutex); consume_skb(skb); return ret; } static int mt7601u_mcu_function_select(struct mt7601u_dev *dev, enum mcu_function func, u32 val) { struct sk_buff *skb; struct { __le32 id; __le32 value; } __packed __aligned(4) msg = { .id = cpu_to_le32(func), .value = cpu_to_le32(val), }; skb = mt7601u_mcu_msg_alloc(&msg, sizeof(msg)); if (!skb) return -ENOMEM; return mt7601u_mcu_msg_send(dev, skb, CMD_FUN_SET_OP, func == 5); } int mt7601u_mcu_tssi_read_kick(struct mt7601u_dev *dev, int use_hvga) { int ret; if (!test_bit(MT7601U_STATE_MCU_RUNNING, &dev->state)) return 0; ret = mt7601u_mcu_function_select(dev, ATOMIC_TSSI_SETTING, use_hvga); if (ret) { dev_warn(dev->dev, "Warning: MCU TSSI read kick failed\n"); return ret; } dev->tssi_read_trig = true; return 0; } int mt7601u_mcu_calibrate(struct mt7601u_dev *dev, enum mcu_calibrate cal, u32 val) { struct sk_buff *skb; struct { __le32 id; __le32 value; } __packed __aligned(4) msg = { .id = cpu_to_le32(cal), .value = cpu_to_le32(val), }; skb = mt7601u_mcu_msg_alloc(&msg, sizeof(msg)); if (!skb) return -ENOMEM; return mt7601u_mcu_msg_send(dev, skb, CMD_CALIBRATION_OP, true); } int mt7601u_write_reg_pairs(struct mt7601u_dev *dev, u32 base, const struct mt76_reg_pair *data, int n) { const int max_vals_per_cmd = INBAND_PACKET_MAX_LEN / 8; struct sk_buff *skb; int cnt, i, ret; if (!n) return 0; cnt = min(max_vals_per_cmd, n); skb = alloc_skb(cnt * 8 + MT_DMA_HDR_LEN + 4, GFP_KERNEL); if (!skb) return -ENOMEM; skb_reserve(skb, MT_DMA_HDR_LEN); for (i = 0; i < cnt; i++) { skb_put_le32(skb, base + data[i].reg); skb_put_le32(skb, data[i].value); } ret = mt7601u_mcu_msg_send(dev, skb, CMD_RANDOM_WRITE, cnt == n); if (ret) return ret; return mt7601u_write_reg_pairs(dev, base, data + cnt, n - cnt); } int mt7601u_burst_write_regs(struct mt7601u_dev *dev, u32 offset, const u32 *data, int n) { const int max_regs_per_cmd = INBAND_PACKET_MAX_LEN / 4 - 1; struct sk_buff *skb; int cnt, i, ret; if (!n) return 0; cnt = min(max_regs_per_cmd, n); skb = alloc_skb(cnt * 4 + MT_DMA_HDR_LEN + 4, GFP_KERNEL); if (!skb) return -ENOMEM; skb_reserve(skb, MT_DMA_HDR_LEN); skb_put_le32(skb, MT_MCU_MEMMAP_WLAN + offset); for (i = 0; i < cnt; i++) skb_put_le32(skb, data[i]); ret = mt7601u_mcu_msg_send(dev, skb, CMD_BURST_WRITE, cnt == n); if (ret) return ret; return mt7601u_burst_write_regs(dev, offset + cnt * 4, data + cnt, n - cnt); } struct mt76_fw_header { __le32 ilm_len; __le32 dlm_len; __le16 build_ver; __le16 fw_ver; u8 pad[4]; char build_time[16]; }; struct mt76_fw { struct mt76_fw_header hdr; u8 ivb[MT_MCU_IVB_SIZE]; u8 ilm[]; }; static int __mt7601u_dma_fw(struct mt7601u_dev *dev, const struct mt7601u_dma_buf *dma_buf, const void *data, u32 len, u32 dst_addr) { DECLARE_COMPLETION_ONSTACK(cmpl); struct mt7601u_dma_buf buf = *dma_buf; /* we need to fake length */ __le32 reg; u32 val; int ret; reg = cpu_to_le32(FIELD_PREP(MT_TXD_INFO_TYPE, DMA_PACKET) | FIELD_PREP(MT_TXD_INFO_D_PORT, CPU_TX_PORT) | FIELD_PREP(MT_TXD_INFO_LEN, len)); memcpy(buf.buf, &reg, sizeof(reg)); memcpy(buf.buf + sizeof(reg), data, len); memset(buf.buf + sizeof(reg) + len, 0, 8); ret = mt7601u_vendor_single_wr(dev, MT_VEND_WRITE_FCE, MT_FCE_DMA_ADDR, dst_addr); if (ret) return ret; len = roundup(len, 4); ret = mt7601u_vendor_single_wr(dev, MT_VEND_WRITE_FCE, MT_FCE_DMA_LEN, len << 16); if (ret) return ret; buf.len = MT_DMA_HDR_LEN + len + 4; ret = mt7601u_usb_submit_buf(dev, USB_DIR_OUT, MT_EP_OUT_INBAND_CMD, &buf, GFP_KERNEL, mt7601u_complete_urb, &cmpl); if (ret) return ret; if (!wait_for_completion_timeout(&cmpl, msecs_to_jiffies(1000))) { dev_err(dev->dev, "Error: firmware upload timed out\n"); usb_kill_urb(buf.urb); return -ETIMEDOUT; } if (mt7601u_urb_has_error(buf.urb)) { dev_err(dev->dev, "Error: firmware upload urb failed:%d\n", buf.urb->status); return buf.urb->status; } val = mt7601u_rr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX); val++; mt7601u_wr(dev, MT_TX_CPU_FROM_FCE_CPU_DESC_IDX, val); return 0; } static int mt7601u_dma_fw(struct mt7601u_dev *dev, struct mt7601u_dma_buf *dma_buf, const void *data, int len, u32 dst_addr) { int n, ret; if (len == 0) return 0; n = min(MCU_FW_URB_MAX_PAYLOAD, len); ret = __mt7601u_dma_fw(dev, dma_buf, data, n, dst_addr); if (ret) return ret; if (!mt76_poll_msec(dev, MT_MCU_COM_REG1, BIT(31), BIT(31), 500)) return -ETIMEDOUT; return mt7601u_dma_fw(dev, dma_buf, data + n, len - n, dst_addr + n); } static int mt7601u_upload_firmware(struct mt7601u_dev *dev, const struct mt76_fw *fw) { struct mt7601u_dma_buf dma_buf; void *ivb; u32 ilm_len, dlm_len; int i, ret; ivb = kmemdup(fw->ivb, sizeof(fw->ivb), GFP_KERNEL); if (!ivb) return -ENOMEM; if (mt7601u_usb_alloc_buf(dev, MCU_FW_URB_SIZE, &dma_buf)) { ret = -ENOMEM; goto error; } ilm_len = le32_to_cpu(fw->hdr.ilm_len) - sizeof(fw->ivb); dev_dbg(dev->dev, "loading FW - ILM %u + IVB %zu\n", ilm_len, sizeof(fw->ivb)); ret = mt7601u_dma_fw(dev, &dma_buf, fw->ilm, ilm_len, sizeof(fw->ivb)); if (ret) goto error; dlm_len = le32_to_cpu(fw->hdr.dlm_len); dev_dbg(dev->dev, "loading FW - DLM %u\n", dlm_len); ret = mt7601u_dma_fw(dev, &dma_buf, fw->ilm + ilm_len, dlm_len, MT_MCU_DLM_OFFSET); if (ret) goto error; ret = mt7601u_vendor_request(dev, MT_VEND_DEV_MODE, USB_DIR_OUT, 0x12, 0, ivb, sizeof(fw->ivb)); if (ret < 0) goto error; ret = 0; for (i = 100; i && !firmware_running(dev); i--) msleep(10); if (!i) { ret = -ETIMEDOUT; goto error; } dev_dbg(dev->dev, "Firmware running!\n"); error: kfree(ivb); mt7601u_usb_free_buf(dev, &dma_buf); return ret; } static int mt7601u_load_firmware(struct mt7601u_dev *dev) { const struct firmware *fw; const struct mt76_fw_header *hdr; int len, ret; u32 val; mt7601u_wr(dev, MT_USB_DMA_CFG, (MT_USB_DMA_CFG_RX_BULK_EN | MT_USB_DMA_CFG_TX_BULK_EN)); if (firmware_running(dev)) return firmware_request_cache(dev->dev, MT7601U_FIRMWARE); ret = request_firmware(&fw, MT7601U_FIRMWARE, dev->dev); if (ret) return ret; if (!fw || !fw->data || fw->size < sizeof(*hdr)) goto err_inv_fw; hdr = (const struct mt76_fw_header *) fw->data; if (le32_to_cpu(hdr->ilm_len) <= MT_MCU_IVB_SIZE) goto err_inv_fw; len = sizeof(*hdr); len += le32_to_cpu(hdr->ilm_len); len += le32_to_cpu(hdr->dlm_len); if (fw->size != len) goto err_inv_fw; val = le16_to_cpu(hdr->fw_ver); dev_info(dev->dev, "Firmware Version: %d.%d.%02d Build: %x Build time: %.16s\n", (val >> 12) & 0xf, (val >> 8) & 0xf, val & 0xf, le16_to_cpu(hdr->build_ver), hdr->build_time); len = le32_to_cpu(hdr->ilm_len); mt7601u_wr(dev, 0x94c, 0); mt7601u_wr(dev, MT_FCE_PSE_CTRL, 0); mt7601u_vendor_reset(dev); msleep(5); mt7601u_wr(dev, 0xa44, 0); mt7601u_wr(dev, 0x230, 0x84210); mt7601u_wr(dev, 0x400, 0x80c00); mt7601u_wr(dev, 0x800, 1); mt7601u_rmw(dev, MT_PBF_CFG, 0, (MT_PBF_CFG_TX0Q_EN | MT_PBF_CFG_TX1Q_EN | MT_PBF_CFG_TX2Q_EN | MT_PBF_CFG_TX3Q_EN)); mt7601u_wr(dev, MT_FCE_PSE_CTRL, 1); mt7601u_wr(dev, MT_USB_DMA_CFG, (MT_USB_DMA_CFG_RX_BULK_EN | MT_USB_DMA_CFG_TX_BULK_EN)); val = mt76_set(dev, MT_USB_DMA_CFG, MT_USB_DMA_CFG_TX_CLR); val &= ~MT_USB_DMA_CFG_TX_CLR; mt7601u_wr(dev, MT_USB_DMA_CFG, val); /* FCE tx_fs_base_ptr */ mt7601u_wr(dev, MT_TX_CPU_FROM_FCE_BASE_PTR, 0x400230); /* FCE tx_fs_max_cnt */ mt7601u_wr(dev, MT_TX_CPU_FROM_FCE_MAX_COUNT, 1); /* FCE pdma enable */ mt7601u_wr(dev, MT_FCE_PDMA_GLOBAL_CONF, 0x44); /* FCE skip_fs_en */ mt7601u_wr(dev, MT_FCE_SKIP_FS, 3); ret = mt7601u_upload_firmware(dev, (const struct mt76_fw *)fw->data); release_firmware(fw); return ret; err_inv_fw: dev_err(dev->dev, "Invalid firmware image\n"); release_firmware(fw); return -ENOENT; } int mt7601u_mcu_init(struct mt7601u_dev *dev) { int ret; mutex_init(&dev->mcu.mutex); ret = mt7601u_load_firmware(dev); if (ret) return ret; set_bit(MT7601U_STATE_MCU_RUNNING, &dev->state); return 0; } int mt7601u_mcu_cmd_init(struct mt7601u_dev *dev) { int ret; ret = mt7601u_mcu_function_select(dev, Q_SELECT, 1); if (ret) return ret; init_completion(&dev->mcu.resp_cmpl); if (mt7601u_usb_alloc_buf(dev, MCU_RESP_URB_SIZE, &dev->mcu.resp)) { mt7601u_usb_free_buf(dev, &dev->mcu.resp); return -ENOMEM; } ret = mt7601u_usb_submit_buf(dev, USB_DIR_IN, MT_EP_IN_CMD_RESP, &dev->mcu.resp, GFP_KERNEL, mt7601u_complete_urb, &dev->mcu.resp_cmpl); if (ret) { mt7601u_usb_free_buf(dev, &dev->mcu.resp); return ret; } return 0; } void mt7601u_mcu_cmd_deinit(struct mt7601u_dev *dev) { usb_kill_urb(dev->mcu.resp.urb); mt7601u_usb_free_buf(dev, &dev->mcu.resp); }
linux-master
drivers/net/wireless/mediatek/mt7601u/mcu.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2014 Felix Fietkau <[email protected]> * Copyright (C) 2015 Jakub Kicinski <[email protected]> */ #include "mt7601u.h" #include "trace.h" enum mt76_txq_id { MT_TXQ_VO = IEEE80211_AC_VO, MT_TXQ_VI = IEEE80211_AC_VI, MT_TXQ_BE = IEEE80211_AC_BE, MT_TXQ_BK = IEEE80211_AC_BK, MT_TXQ_PSD, MT_TXQ_MCU, __MT_TXQ_MAX }; /* Hardware uses mirrored order of queues with Q0 having the highest priority */ static u8 q2hwq(u8 q) { return q ^ 0x3; } /* Take mac80211 Q id from the skb and translate it to hardware Q id */ static u8 skb2q(struct sk_buff *skb) { int qid = skb_get_queue_mapping(skb); if (WARN_ON(qid >= MT_TXQ_PSD)) { qid = MT_TXQ_BE; skb_set_queue_mapping(skb, qid); } return q2hwq(qid); } /* Note: TX retry reporting is a bit broken. * Retries are reported only once per AMPDU and often come a frame early * i.e. they are reported in the last status preceding the AMPDU. Apart * from the fact that it's hard to know the length of the AMPDU (which is * required to know to how many consecutive frames retries should be * applied), if status comes early on full FIFO it gets lost and retries * of the whole AMPDU become invisible. * As a work-around encode the desired rate in PKT_ID of TX descriptor * and based on that guess the retries (every rate is tried once). * Only downside here is that for MCS0 we have to rely solely on * transmission failures as no retries can ever be reported. * Not having to read EXT_FIFO has a nice effect of doubling the number * of reports which can be fetched. * Also the vendor driver never uses the EXT_FIFO register so it may be * undertested. */ static u8 mt7601u_tx_pktid_enc(struct mt7601u_dev *dev, u8 rate, bool is_probe) { u8 encoded = (rate + 1) + is_probe * 8; /* Because PKT_ID 0 disables status reporting only 15 values are * available but 16 are needed (8 MCS * 2 for encoding is_probe) * - we need to cram together two rates. MCS0 and MCS7 with is_probe * share PKT_ID 9. */ if (is_probe && rate == 7) return encoded - 7; return encoded; } static void mt7601u_tx_pktid_dec(struct mt7601u_dev *dev, struct mt76_tx_status *stat) { u8 req_rate = stat->pktid; u8 eff_rate = stat->rate & 0x7; req_rate -= 1; if (req_rate > 7) { stat->is_probe = true; req_rate -= 8; /* Decide between MCS0 and MCS7 which share pktid 9 */ if (!req_rate && eff_rate) req_rate = 7; } stat->retry = req_rate - eff_rate; } static void mt7601u_tx_skb_remove_dma_overhead(struct sk_buff *skb, struct ieee80211_tx_info *info) { int pkt_len = (unsigned long)info->status.status_driver_data[0]; skb_pull(skb, sizeof(struct mt76_txwi) + 4); if (ieee80211_get_hdrlen_from_skb(skb) % 4) mt76_remove_hdr_pad(skb); skb_trim(skb, pkt_len); } void mt7601u_tx_status(struct mt7601u_dev *dev, struct sk_buff *skb) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); mt7601u_tx_skb_remove_dma_overhead(skb, info); ieee80211_tx_info_clear_status(info); info->status.rates[0].idx = -1; info->flags |= IEEE80211_TX_STAT_ACK; spin_lock_bh(&dev->mac_lock); ieee80211_tx_status(dev->hw, skb); spin_unlock_bh(&dev->mac_lock); } static int mt7601u_skb_rooms(struct mt7601u_dev *dev, struct sk_buff *skb) { int hdr_len = ieee80211_get_hdrlen_from_skb(skb); u32 need_head; need_head = sizeof(struct mt76_txwi) + 4; if (hdr_len % 4) need_head += 2; return skb_cow(skb, need_head); } static struct mt76_txwi * mt7601u_push_txwi(struct mt7601u_dev *dev, struct sk_buff *skb, struct ieee80211_sta *sta, struct mt76_wcid *wcid, int pkt_len) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_tx_rate *rate = &info->control.rates[0]; struct mt76_txwi *txwi; unsigned long flags; bool is_probe; u32 pkt_id; u16 rate_ctl; u8 nss; txwi = skb_push(skb, sizeof(struct mt76_txwi)); memset(txwi, 0, sizeof(*txwi)); if (!wcid->tx_rate_set) ieee80211_get_tx_rates(info->control.vif, sta, skb, info->control.rates, 1); spin_lock_irqsave(&dev->lock, flags); if (rate->idx < 0 || !rate->count) rate_ctl = wcid->tx_rate; else rate_ctl = mt76_mac_tx_rate_val(dev, rate, &nss); spin_unlock_irqrestore(&dev->lock, flags); txwi->rate_ctl = cpu_to_le16(rate_ctl); if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) txwi->ack_ctl |= MT_TXWI_ACK_CTL_REQ; if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ) txwi->ack_ctl |= MT_TXWI_ACK_CTL_NSEQ; if ((info->flags & IEEE80211_TX_CTL_AMPDU) && sta) { u8 ba_size = IEEE80211_MIN_AMPDU_BUF; ba_size <<= sta->deflink.ht_cap.ampdu_factor; ba_size = min_t(int, 63, ba_size); if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) ba_size = 0; txwi->ack_ctl |= FIELD_PREP(MT_TXWI_ACK_CTL_BA_WINDOW, ba_size); txwi->flags = cpu_to_le16(MT_TXWI_FLAGS_AMPDU | FIELD_PREP(MT_TXWI_FLAGS_MPDU_DENSITY, sta->deflink.ht_cap.ampdu_density)); if (info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE) txwi->flags = 0; } txwi->wcid = wcid->idx; is_probe = !!(info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE); pkt_id = mt7601u_tx_pktid_enc(dev, rate_ctl & 0x7, is_probe); pkt_len |= FIELD_PREP(MT_TXWI_LEN_PKTID, pkt_id); txwi->len_ctl = cpu_to_le16(pkt_len); return txwi; } void mt7601u_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, struct sk_buff *skb) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct mt7601u_dev *dev = hw->priv; struct ieee80211_vif *vif = info->control.vif; struct ieee80211_sta *sta = control->sta; struct mt76_sta *msta = NULL; struct mt76_wcid *wcid = dev->mon_wcid; struct mt76_txwi *txwi; int pkt_len = skb->len; int hw_q = skb2q(skb); BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1); info->status.status_driver_data[0] = (void *)(unsigned long)pkt_len; if (mt7601u_skb_rooms(dev, skb) || mt76_insert_hdr_pad(skb)) { ieee80211_free_txskb(dev->hw, skb); return; } if (sta) { msta = (struct mt76_sta *) sta->drv_priv; wcid = &msta->wcid; } else if (vif) { struct mt76_vif *mvif = (struct mt76_vif *)vif->drv_priv; wcid = &mvif->group_wcid; } txwi = mt7601u_push_txwi(dev, skb, sta, wcid, pkt_len); if (mt7601u_dma_enqueue_tx(dev, skb, wcid, hw_q)) return; trace_mt_tx(dev, skb, msta, txwi); } void mt7601u_tx_stat(struct work_struct *work) { struct mt7601u_dev *dev = container_of(work, struct mt7601u_dev, stat_work.work); struct mt76_tx_status stat; unsigned long flags; int cleaned = 0; while (!test_bit(MT7601U_STATE_REMOVED, &dev->state)) { stat = mt7601u_mac_fetch_tx_status(dev); if (!stat.valid) break; mt7601u_tx_pktid_dec(dev, &stat); mt76_send_tx_status(dev, &stat); cleaned++; } trace_mt_tx_status_cleaned(dev, cleaned); spin_lock_irqsave(&dev->tx_lock, flags); if (cleaned) queue_delayed_work(dev->stat_wq, &dev->stat_work, msecs_to_jiffies(10)); else if (test_and_clear_bit(MT7601U_STATE_MORE_STATS, &dev->state)) queue_delayed_work(dev->stat_wq, &dev->stat_work, msecs_to_jiffies(20)); else clear_bit(MT7601U_STATE_READING_STATS, &dev->state); spin_unlock_irqrestore(&dev->tx_lock, flags); } int mt7601u_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, unsigned int link_id, u16 queue, const struct ieee80211_tx_queue_params *params) { struct mt7601u_dev *dev = hw->priv; u8 cw_min = 5, cw_max = 10, hw_q = q2hwq(queue); u32 val; /* TODO: should we do funny things with the parameters? * See what mt7601u_set_default_edca() used to do in init.c. */ if (params->cw_min) cw_min = fls(params->cw_min); if (params->cw_max) cw_max = fls(params->cw_max); WARN_ON(params->txop > 0xff); WARN_ON(params->aifs > 0xf); WARN_ON(cw_min > 0xf); WARN_ON(cw_max > 0xf); val = FIELD_PREP(MT_EDCA_CFG_AIFSN, params->aifs) | FIELD_PREP(MT_EDCA_CFG_CWMIN, cw_min) | FIELD_PREP(MT_EDCA_CFG_CWMAX, cw_max); /* TODO: based on user-controlled EnableTxBurst var vendor drv sets * a really long txop on AC0 (see connect.c:2009) but only on * connect? When not connected should be 0. */ if (!hw_q) val |= 0x60; else val |= FIELD_PREP(MT_EDCA_CFG_TXOP, params->txop); mt76_wr(dev, MT_EDCA_CFG_AC(hw_q), val); val = mt76_rr(dev, MT_WMM_TXOP(hw_q)); val &= ~(MT_WMM_TXOP_MASK << MT_WMM_TXOP_SHIFT(hw_q)); val |= params->txop << MT_WMM_TXOP_SHIFT(hw_q); mt76_wr(dev, MT_WMM_TXOP(hw_q), val); val = mt76_rr(dev, MT_WMM_AIFSN); val &= ~(MT_WMM_AIFSN_MASK << MT_WMM_AIFSN_SHIFT(hw_q)); val |= params->aifs << MT_WMM_AIFSN_SHIFT(hw_q); mt76_wr(dev, MT_WMM_AIFSN, val); val = mt76_rr(dev, MT_WMM_CWMIN); val &= ~(MT_WMM_CWMIN_MASK << MT_WMM_CWMIN_SHIFT(hw_q)); val |= cw_min << MT_WMM_CWMIN_SHIFT(hw_q); mt76_wr(dev, MT_WMM_CWMIN, val); val = mt76_rr(dev, MT_WMM_CWMAX); val &= ~(MT_WMM_CWMAX_MASK << MT_WMM_CWMAX_SHIFT(hw_q)); val |= cw_max << MT_WMM_CWMAX_SHIFT(hw_q); mt76_wr(dev, MT_WMM_CWMAX, val); return 0; }
linux-master
drivers/net/wireless/mediatek/mt7601u/tx.c
// SPDX-License-Identifier: GPL-2.0 /* ioctl() (mostly Linux Wireless Extensions) routines for Host AP driver */ #include <linux/slab.h> #include <linux/types.h> #include <linux/sched/signal.h> #include <linux/ethtool.h> #include <linux/if_arp.h> #include <linux/module.h> #include <linux/etherdevice.h> #include <net/lib80211.h> #include "hostap_wlan.h" #include "hostap.h" #include "hostap_ap.h" static struct iw_statistics *hostap_get_wireless_stats(struct net_device *dev) { struct hostap_interface *iface; local_info_t *local; struct iw_statistics *wstats; iface = netdev_priv(dev); local = iface->local; /* Why are we doing that ? Jean II */ if (iface->type != HOSTAP_INTERFACE_MAIN) return NULL; wstats = &local->wstats; wstats->status = 0; wstats->discard.code = local->comm_tallies.rx_discards_wep_undecryptable; wstats->discard.misc = local->comm_tallies.rx_fcs_errors + local->comm_tallies.rx_discards_no_buffer + local->comm_tallies.tx_discards_wrong_sa; wstats->discard.retries = local->comm_tallies.tx_retry_limit_exceeded; wstats->discard.fragment = local->comm_tallies.rx_message_in_bad_msg_fragments; if (local->iw_mode != IW_MODE_MASTER && local->iw_mode != IW_MODE_REPEAT) { if (prism2_update_comms_qual(dev) == 0) wstats->qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM; wstats->qual.qual = local->comms_qual; wstats->qual.level = local->avg_signal; wstats->qual.noise = local->avg_noise; } else { wstats->qual.qual = 0; wstats->qual.level = 0; wstats->qual.noise = 0; wstats->qual.updated = IW_QUAL_ALL_INVALID; } return wstats; } static int prism2_get_datarates(struct net_device *dev, u8 *rates) { struct hostap_interface *iface; local_info_t *local; u8 buf[12]; int len; u16 val; iface = netdev_priv(dev); local = iface->local; len = local->func->get_rid(dev, HFA384X_RID_SUPPORTEDDATARATES, buf, sizeof(buf), 0); if (len < 2) return 0; val = le16_to_cpu(*(__le16 *) buf); /* string length */ if (len - 2 < val || val > 10) return 0; memcpy(rates, buf + 2, val); return val; } static int prism2_get_name(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { u8 rates[10]; int len, i, over2 = 0; len = prism2_get_datarates(dev, rates); for (i = 0; i < len; i++) { if (rates[i] == 0x0b || rates[i] == 0x16) { over2 = 1; break; } } strcpy(wrqu->name, over2 ? "IEEE 802.11b" : "IEEE 802.11-DS"); return 0; } static int prism2_ioctl_siwencode(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *keybuf) { struct iw_point *erq = &wrqu->encoding; struct hostap_interface *iface; local_info_t *local; int i; struct lib80211_crypt_data **crypt; iface = netdev_priv(dev); local = iface->local; i = erq->flags & IW_ENCODE_INDEX; if (i < 1 || i > 4) i = local->crypt_info.tx_keyidx; else i--; if (i < 0 || i >= WEP_KEYS) return -EINVAL; crypt = &local->crypt_info.crypt[i]; if (erq->flags & IW_ENCODE_DISABLED) { if (*crypt) lib80211_crypt_delayed_deinit(&local->crypt_info, crypt); goto done; } if (*crypt != NULL && (*crypt)->ops != NULL && strcmp((*crypt)->ops->name, "WEP") != 0) { /* changing to use WEP; deinit previously used algorithm */ lib80211_crypt_delayed_deinit(&local->crypt_info, crypt); } if (*crypt == NULL) { struct lib80211_crypt_data *new_crypt; /* take WEP into use */ new_crypt = kzalloc(sizeof(struct lib80211_crypt_data), GFP_KERNEL); if (new_crypt == NULL) return -ENOMEM; new_crypt->ops = lib80211_get_crypto_ops("WEP"); if (!new_crypt->ops) { request_module("lib80211_crypt_wep"); new_crypt->ops = lib80211_get_crypto_ops("WEP"); } if (new_crypt->ops && try_module_get(new_crypt->ops->owner)) new_crypt->priv = new_crypt->ops->init(i); if (!new_crypt->ops || !new_crypt->priv) { kfree(new_crypt); new_crypt = NULL; printk(KERN_WARNING "%s: could not initialize WEP: " "load module hostap_crypt_wep.o\n", dev->name); return -EOPNOTSUPP; } *crypt = new_crypt; } if (erq->length > 0) { int len = erq->length <= 5 ? 5 : 13; int first = 1, j; if (len > erq->length) memset(keybuf + erq->length, 0, len - erq->length); (*crypt)->ops->set_key(keybuf, len, NULL, (*crypt)->priv); for (j = 0; j < WEP_KEYS; j++) { if (j != i && local->crypt_info.crypt[j]) { first = 0; break; } } if (first) local->crypt_info.tx_keyidx = i; } else { /* No key data - just set the default TX key index */ local->crypt_info.tx_keyidx = i; } done: local->open_wep = erq->flags & IW_ENCODE_OPEN; if (hostap_set_encryption(local)) { printk(KERN_DEBUG "%s: set_encryption failed\n", dev->name); return -EINVAL; } /* Do not reset port0 if card is in Managed mode since resetting will * generate new IEEE 802.11 authentication which may end up in looping * with IEEE 802.1X. Prism2 documentation seem to require port reset * after WEP configuration. However, keys are apparently changed at * least in Managed mode. */ if (local->iw_mode != IW_MODE_INFRA && local->func->reset_port(dev)) { printk(KERN_DEBUG "%s: reset_port failed\n", dev->name); return -EINVAL; } return 0; } static int prism2_ioctl_giwencode(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *key) { struct iw_point *erq = &wrqu->encoding; struct hostap_interface *iface; local_info_t *local; int i, len; u16 val; struct lib80211_crypt_data *crypt; iface = netdev_priv(dev); local = iface->local; i = erq->flags & IW_ENCODE_INDEX; if (i < 1 || i > 4) i = local->crypt_info.tx_keyidx; else i--; if (i < 0 || i >= WEP_KEYS) return -EINVAL; crypt = local->crypt_info.crypt[i]; erq->flags = i + 1; if (crypt == NULL || crypt->ops == NULL) { erq->length = 0; erq->flags |= IW_ENCODE_DISABLED; return 0; } if (strcmp(crypt->ops->name, "WEP") != 0) { /* only WEP is supported with wireless extensions, so just * report that encryption is used */ erq->length = 0; erq->flags |= IW_ENCODE_ENABLED; return 0; } /* Reads from HFA384X_RID_CNFDEFAULTKEY* return bogus values, so show * the keys from driver buffer */ len = crypt->ops->get_key(key, WEP_KEY_LEN, NULL, crypt->priv); erq->length = (len >= 0 ? len : 0); if (local->func->get_rid(dev, HFA384X_RID_CNFWEPFLAGS, &val, 2, 1) < 0) { printk("CNFWEPFLAGS reading failed\n"); return -EOPNOTSUPP; } le16_to_cpus(&val); if (val & HFA384X_WEPFLAGS_PRIVACYINVOKED) erq->flags |= IW_ENCODE_ENABLED; else erq->flags |= IW_ENCODE_DISABLED; if (val & HFA384X_WEPFLAGS_EXCLUDEUNENCRYPTED) erq->flags |= IW_ENCODE_RESTRICTED; else erq->flags |= IW_ENCODE_OPEN; return 0; } static int hostap_set_rate(struct net_device *dev) { struct hostap_interface *iface; local_info_t *local; int ret, basic_rates; iface = netdev_priv(dev); local = iface->local; basic_rates = local->basic_rates & local->tx_rate_control; if (!basic_rates || basic_rates != local->basic_rates) { printk(KERN_INFO "%s: updating basic rate set automatically " "to match with the new supported rate set\n", dev->name); if (!basic_rates) basic_rates = local->tx_rate_control; local->basic_rates = basic_rates; if (hostap_set_word(dev, HFA384X_RID_CNFBASICRATES, basic_rates)) printk(KERN_WARNING "%s: failed to set " "cnfBasicRates\n", dev->name); } ret = (hostap_set_word(dev, HFA384X_RID_TXRATECONTROL, local->tx_rate_control) || hostap_set_word(dev, HFA384X_RID_CNFSUPPORTEDRATES, local->tx_rate_control) || local->func->reset_port(dev)); if (ret) { printk(KERN_WARNING "%s: TXRateControl/cnfSupportedRates " "setting to 0x%x failed\n", dev->name, local->tx_rate_control); } /* Update TX rate configuration for all STAs based on new operational * rate set. */ hostap_update_rates(local); return ret; } static int prism2_ioctl_siwrate(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct iw_param *rrq = &wrqu->bitrate; struct hostap_interface *iface; local_info_t *local; iface = netdev_priv(dev); local = iface->local; if (rrq->fixed) { switch (rrq->value) { case 11000000: local->tx_rate_control = HFA384X_RATES_11MBPS; break; case 5500000: local->tx_rate_control = HFA384X_RATES_5MBPS; break; case 2000000: local->tx_rate_control = HFA384X_RATES_2MBPS; break; case 1000000: local->tx_rate_control = HFA384X_RATES_1MBPS; break; default: local->tx_rate_control = HFA384X_RATES_1MBPS | HFA384X_RATES_2MBPS | HFA384X_RATES_5MBPS | HFA384X_RATES_11MBPS; break; } } else { switch (rrq->value) { case 11000000: local->tx_rate_control = HFA384X_RATES_1MBPS | HFA384X_RATES_2MBPS | HFA384X_RATES_5MBPS | HFA384X_RATES_11MBPS; break; case 5500000: local->tx_rate_control = HFA384X_RATES_1MBPS | HFA384X_RATES_2MBPS | HFA384X_RATES_5MBPS; break; case 2000000: local->tx_rate_control = HFA384X_RATES_1MBPS | HFA384X_RATES_2MBPS; break; case 1000000: local->tx_rate_control = HFA384X_RATES_1MBPS; break; default: local->tx_rate_control = HFA384X_RATES_1MBPS | HFA384X_RATES_2MBPS | HFA384X_RATES_5MBPS | HFA384X_RATES_11MBPS; break; } } return hostap_set_rate(dev); } static int prism2_ioctl_giwrate(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct iw_param *rrq = &wrqu->bitrate; u16 val; struct hostap_interface *iface; local_info_t *local; int ret = 0; iface = netdev_priv(dev); local = iface->local; if (local->func->get_rid(dev, HFA384X_RID_TXRATECONTROL, &val, 2, 1) < 0) return -EINVAL; if ((val & 0x1) && (val > 1)) rrq->fixed = 0; else rrq->fixed = 1; if (local->iw_mode == IW_MODE_MASTER && local->ap != NULL && !local->fw_tx_rate_control) { /* HFA384X_RID_CURRENTTXRATE seems to always be 2 Mbps in * Host AP mode, so use the recorded TX rate of the last sent * frame */ rrq->value = local->ap->last_tx_rate > 0 ? local->ap->last_tx_rate * 100000 : 11000000; return 0; } if (local->func->get_rid(dev, HFA384X_RID_CURRENTTXRATE, &val, 2, 1) < 0) return -EINVAL; switch (val) { case HFA384X_RATES_1MBPS: rrq->value = 1000000; break; case HFA384X_RATES_2MBPS: rrq->value = 2000000; break; case HFA384X_RATES_5MBPS: rrq->value = 5500000; break; case HFA384X_RATES_11MBPS: rrq->value = 11000000; break; default: /* should not happen */ rrq->value = 11000000; ret = -EINVAL; break; } return ret; } static int prism2_ioctl_siwsens(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct iw_param *sens = &wrqu->sens; struct hostap_interface *iface; local_info_t *local; iface = netdev_priv(dev); local = iface->local; /* Set the desired AP density */ if (sens->value < 1 || sens->value > 3) return -EINVAL; if (hostap_set_word(dev, HFA384X_RID_CNFSYSTEMSCALE, sens->value) || local->func->reset_port(dev)) return -EINVAL; return 0; } static int prism2_ioctl_giwsens(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct iw_param *sens = &wrqu->sens; struct hostap_interface *iface; local_info_t *local; __le16 val; iface = netdev_priv(dev); local = iface->local; /* Get the current AP density */ if (local->func->get_rid(dev, HFA384X_RID_CNFSYSTEMSCALE, &val, 2, 1) < 0) return -EINVAL; sens->value = le16_to_cpu(val); sens->fixed = 1; return 0; } /* Deprecated in new wireless extension API */ static int prism2_ioctl_giwaplist(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct iw_point *data = &wrqu->data; struct hostap_interface *iface; local_info_t *local; struct sockaddr *addr; struct iw_quality *qual; iface = netdev_priv(dev); local = iface->local; if (local->iw_mode != IW_MODE_MASTER) { printk(KERN_DEBUG "SIOCGIWAPLIST is currently only supported " "in Host AP mode\n"); data->length = 0; return -EOPNOTSUPP; } addr = kmalloc_array(IW_MAX_AP, sizeof(struct sockaddr), GFP_KERNEL); qual = kmalloc_array(IW_MAX_AP, sizeof(struct iw_quality), GFP_KERNEL); if (addr == NULL || qual == NULL) { kfree(addr); kfree(qual); data->length = 0; return -ENOMEM; } data->length = prism2_ap_get_sta_qual(local, addr, qual, IW_MAX_AP, 1); memcpy(extra, addr, sizeof(struct sockaddr) * data->length); data->flags = 1; /* has quality information */ memcpy(extra + sizeof(struct sockaddr) * data->length, qual, sizeof(struct iw_quality) * data->length); kfree(addr); kfree(qual); return 0; } static int prism2_ioctl_siwrts(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct iw_param *rts = &wrqu->rts; struct hostap_interface *iface; local_info_t *local; __le16 val; iface = netdev_priv(dev); local = iface->local; if (rts->disabled) val = cpu_to_le16(2347); else if (rts->value < 0 || rts->value > 2347) return -EINVAL; else val = cpu_to_le16(rts->value); if (local->func->set_rid(dev, HFA384X_RID_RTSTHRESHOLD, &val, 2) || local->func->reset_port(dev)) return -EINVAL; local->rts_threshold = rts->value; return 0; } static int prism2_ioctl_giwrts(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct iw_param *rts = &wrqu->rts; struct hostap_interface *iface; local_info_t *local; __le16 val; iface = netdev_priv(dev); local = iface->local; if (local->func->get_rid(dev, HFA384X_RID_RTSTHRESHOLD, &val, 2, 1) < 0) return -EINVAL; rts->value = le16_to_cpu(val); rts->disabled = (rts->value == 2347); rts->fixed = 1; return 0; } static int prism2_ioctl_siwfrag(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct iw_param *rts = &wrqu->rts; struct hostap_interface *iface; local_info_t *local; __le16 val; iface = netdev_priv(dev); local = iface->local; if (rts->disabled) val = cpu_to_le16(2346); else if (rts->value < 256 || rts->value > 2346) return -EINVAL; else val = cpu_to_le16(rts->value & ~0x1); /* even numbers only */ local->fragm_threshold = rts->value & ~0x1; if (local->func->set_rid(dev, HFA384X_RID_FRAGMENTATIONTHRESHOLD, &val, 2) || local->func->reset_port(dev)) return -EINVAL; return 0; } static int prism2_ioctl_giwfrag(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct iw_param *rts = &wrqu->rts; struct hostap_interface *iface; local_info_t *local; __le16 val; iface = netdev_priv(dev); local = iface->local; if (local->func->get_rid(dev, HFA384X_RID_FRAGMENTATIONTHRESHOLD, &val, 2, 1) < 0) return -EINVAL; rts->value = le16_to_cpu(val); rts->disabled = (rts->value == 2346); rts->fixed = 1; return 0; } #ifndef PRISM2_NO_STATION_MODES static int hostap_join_ap(struct net_device *dev) { struct hostap_interface *iface; local_info_t *local; struct hfa384x_join_request req; unsigned long flags; int i; struct hfa384x_hostscan_result *entry; iface = netdev_priv(dev); local = iface->local; memcpy(req.bssid, local->preferred_ap, ETH_ALEN); req.channel = 0; spin_lock_irqsave(&local->lock, flags); for (i = 0; i < local->last_scan_results_count; i++) { if (!local->last_scan_results) break; entry = &local->last_scan_results[i]; if (ether_addr_equal(local->preferred_ap, entry->bssid)) { req.channel = entry->chid; break; } } spin_unlock_irqrestore(&local->lock, flags); if (local->func->set_rid(dev, HFA384X_RID_JOINREQUEST, &req, sizeof(req))) { printk(KERN_DEBUG "%s: JoinRequest %pM failed\n", dev->name, local->preferred_ap); return -1; } printk(KERN_DEBUG "%s: Trying to join BSSID %pM\n", dev->name, local->preferred_ap); return 0; } #endif /* PRISM2_NO_STATION_MODES */ static int prism2_ioctl_siwap(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct sockaddr *ap_addr = &wrqu->ap_addr; #ifdef PRISM2_NO_STATION_MODES return -EOPNOTSUPP; #else /* PRISM2_NO_STATION_MODES */ struct hostap_interface *iface; local_info_t *local; iface = netdev_priv(dev); local = iface->local; memcpy(local->preferred_ap, &ap_addr->sa_data, ETH_ALEN); if (local->host_roaming == 1 && local->iw_mode == IW_MODE_INFRA) { struct hfa384x_scan_request scan_req; memset(&scan_req, 0, sizeof(scan_req)); scan_req.channel_list = cpu_to_le16(0x3fff); scan_req.txrate = cpu_to_le16(HFA384X_RATES_1MBPS); if (local->func->set_rid(dev, HFA384X_RID_SCANREQUEST, &scan_req, sizeof(scan_req))) { printk(KERN_DEBUG "%s: ScanResults request failed - " "preferred AP delayed to next unsolicited " "scan\n", dev->name); } } else if (local->host_roaming == 2 && local->iw_mode == IW_MODE_INFRA) { if (hostap_join_ap(dev)) return -EINVAL; } else { printk(KERN_DEBUG "%s: Preferred AP (SIOCSIWAP) is used only " "in Managed mode when host_roaming is enabled\n", dev->name); } return 0; #endif /* PRISM2_NO_STATION_MODES */ } static int prism2_ioctl_giwap(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct sockaddr *ap_addr = &wrqu->ap_addr; struct hostap_interface *iface; local_info_t *local; iface = netdev_priv(dev); local = iface->local; ap_addr->sa_family = ARPHRD_ETHER; switch (iface->type) { case HOSTAP_INTERFACE_AP: memcpy(&ap_addr->sa_data, dev->dev_addr, ETH_ALEN); break; case HOSTAP_INTERFACE_STA: memcpy(&ap_addr->sa_data, local->assoc_ap_addr, ETH_ALEN); break; case HOSTAP_INTERFACE_WDS: memcpy(&ap_addr->sa_data, iface->u.wds.remote_addr, ETH_ALEN); break; default: if (local->func->get_rid(dev, HFA384X_RID_CURRENTBSSID, &ap_addr->sa_data, ETH_ALEN, 1) < 0) return -EOPNOTSUPP; /* local->bssid is also updated in LinkStatus handler when in * station mode */ memcpy(local->bssid, &ap_addr->sa_data, ETH_ALEN); break; } return 0; } static int prism2_ioctl_siwnickn(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *nickname) { struct iw_point *data = &wrqu->data; struct hostap_interface *iface; local_info_t *local; iface = netdev_priv(dev); local = iface->local; memset(local->name, 0, sizeof(local->name)); memcpy(local->name, nickname, data->length); local->name_set = 1; if (hostap_set_string(dev, HFA384X_RID_CNFOWNNAME, local->name) || local->func->reset_port(dev)) return -EINVAL; return 0; } static int prism2_ioctl_giwnickn(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *nickname) { struct iw_point *data = &wrqu->data; struct hostap_interface *iface; local_info_t *local; int len; char name[MAX_NAME_LEN + 3]; u16 val; iface = netdev_priv(dev); local = iface->local; len = local->func->get_rid(dev, HFA384X_RID_CNFOWNNAME, &name, MAX_NAME_LEN + 2, 0); val = le16_to_cpu(*(__le16 *) name); if (len > MAX_NAME_LEN + 2 || len < 0 || val > MAX_NAME_LEN) return -EOPNOTSUPP; name[val + 2] = '\0'; data->length = val + 1; memcpy(nickname, name + 2, val + 1); return 0; } static int prism2_ioctl_siwfreq(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct iw_freq *freq = &wrqu->freq; struct hostap_interface *iface; local_info_t *local; iface = netdev_priv(dev); local = iface->local; /* freq => chan. */ if (freq->e == 1 && freq->m / 100000 >= freq_list[0] && freq->m / 100000 <= freq_list[FREQ_COUNT - 1]) { int ch; int fr = freq->m / 100000; for (ch = 0; ch < FREQ_COUNT; ch++) { if (fr == freq_list[ch]) { freq->e = 0; freq->m = ch + 1; break; } } } if (freq->e != 0 || freq->m < 1 || freq->m > FREQ_COUNT || !(local->channel_mask & (1 << (freq->m - 1)))) return -EINVAL; local->channel = freq->m; /* channel is used in prism2_setup_rids() */ if (hostap_set_word(dev, HFA384X_RID_CNFOWNCHANNEL, local->channel) || local->func->reset_port(dev)) return -EINVAL; return 0; } static int prism2_ioctl_giwfreq(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct iw_freq *freq = &wrqu->freq; struct hostap_interface *iface; local_info_t *local; u16 val; iface = netdev_priv(dev); local = iface->local; if (local->func->get_rid(dev, HFA384X_RID_CURRENTCHANNEL, &val, 2, 1) < 0) return -EINVAL; le16_to_cpus(&val); if (val < 1 || val > FREQ_COUNT) return -EINVAL; freq->m = freq_list[val - 1] * 100000; freq->e = 1; return 0; } static void hostap_monitor_set_type(local_info_t *local) { struct net_device *dev = local->ddev; if (dev == NULL) return; if (local->monitor_type == PRISM2_MONITOR_PRISM || local->monitor_type == PRISM2_MONITOR_CAPHDR) { dev->type = ARPHRD_IEEE80211_PRISM; } else if (local->monitor_type == PRISM2_MONITOR_RADIOTAP) { dev->type = ARPHRD_IEEE80211_RADIOTAP; } else { dev->type = ARPHRD_IEEE80211; } } static int prism2_ioctl_siwessid(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *ssid) { struct iw_point *data = &wrqu->data; struct hostap_interface *iface; local_info_t *local; iface = netdev_priv(dev); local = iface->local; if (iface->type == HOSTAP_INTERFACE_WDS) return -EOPNOTSUPP; if (data->flags == 0) ssid[0] = '\0'; /* ANY */ if (local->iw_mode == IW_MODE_MASTER && ssid[0] == '\0') { /* Setting SSID to empty string seems to kill the card in * Host AP mode */ printk(KERN_DEBUG "%s: Host AP mode does not support " "'Any' essid\n", dev->name); return -EINVAL; } memcpy(local->essid, ssid, data->length); local->essid[data->length] = '\0'; if ((!local->fw_ap && hostap_set_string(dev, HFA384X_RID_CNFDESIREDSSID, local->essid)) || hostap_set_string(dev, HFA384X_RID_CNFOWNSSID, local->essid) || local->func->reset_port(dev)) return -EINVAL; return 0; } static int prism2_ioctl_giwessid(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *essid) { struct iw_point *data = &wrqu->data; struct hostap_interface *iface; local_info_t *local; u16 val; iface = netdev_priv(dev); local = iface->local; if (iface->type == HOSTAP_INTERFACE_WDS) return -EOPNOTSUPP; data->flags = 1; /* active */ if (local->iw_mode == IW_MODE_MASTER) { data->length = strlen(local->essid); memcpy(essid, local->essid, IW_ESSID_MAX_SIZE); } else { int len; char ssid[MAX_SSID_LEN + 2]; memset(ssid, 0, sizeof(ssid)); len = local->func->get_rid(dev, HFA384X_RID_CURRENTSSID, &ssid, MAX_SSID_LEN + 2, 0); val = le16_to_cpu(*(__le16 *) ssid); if (len > MAX_SSID_LEN + 2 || len < 0 || val > MAX_SSID_LEN) { return -EOPNOTSUPP; } data->length = val; memcpy(essid, ssid + 2, IW_ESSID_MAX_SIZE); } return 0; } static int prism2_ioctl_giwrange(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct iw_point *data = &wrqu->data; struct hostap_interface *iface; local_info_t *local; struct iw_range *range = (struct iw_range *) extra; u8 rates[10]; u16 val; int i, len, over2; iface = netdev_priv(dev); local = iface->local; data->length = sizeof(struct iw_range); memset(range, 0, sizeof(struct iw_range)); /* TODO: could fill num_txpower and txpower array with * something; however, there are 128 different values.. */ range->txpower_capa = IW_TXPOW_DBM; if (local->iw_mode == IW_MODE_INFRA || local->iw_mode == IW_MODE_ADHOC) { range->min_pmp = 1 * 1024; range->max_pmp = 65535 * 1024; range->min_pmt = 1 * 1024; range->max_pmt = 1000 * 1024; range->pmp_flags = IW_POWER_PERIOD; range->pmt_flags = IW_POWER_TIMEOUT; range->pm_capa = IW_POWER_PERIOD | IW_POWER_TIMEOUT | IW_POWER_UNICAST_R | IW_POWER_ALL_R; } range->we_version_compiled = WIRELESS_EXT; range->we_version_source = 18; range->retry_capa = IW_RETRY_LIMIT; range->retry_flags = IW_RETRY_LIMIT; range->min_retry = 0; range->max_retry = 255; range->num_channels = FREQ_COUNT; val = 0; for (i = 0; i < FREQ_COUNT; i++) { if (local->channel_mask & (1 << i)) { range->freq[val].i = i + 1; range->freq[val].m = freq_list[i] * 100000; range->freq[val].e = 1; val++; } if (val == IW_MAX_FREQUENCIES) break; } range->num_frequency = val; if (local->sta_fw_ver >= PRISM2_FW_VER(1,3,1)) { range->max_qual.qual = 70; /* what is correct max? This was not * documented exactly. At least * 69 has been observed. */ range->max_qual.level = 0; /* dB */ range->max_qual.noise = 0; /* dB */ /* What would be suitable values for "average/typical" qual? */ range->avg_qual.qual = 20; range->avg_qual.level = -60; range->avg_qual.noise = -95; } else { range->max_qual.qual = 92; /* 0 .. 92 */ range->max_qual.level = 154; /* 27 .. 154 */ range->max_qual.noise = 154; /* 27 .. 154 */ } range->sensitivity = 3; range->max_encoding_tokens = WEP_KEYS; range->num_encoding_sizes = 2; range->encoding_size[0] = 5; range->encoding_size[1] = 13; over2 = 0; len = prism2_get_datarates(dev, rates); range->num_bitrates = 0; for (i = 0; i < len; i++) { if (range->num_bitrates < IW_MAX_BITRATES) { range->bitrate[range->num_bitrates] = rates[i] * 500000; range->num_bitrates++; } if (rates[i] == 0x0b || rates[i] == 0x16) over2 = 1; } /* estimated maximum TCP throughput values (bps) */ range->throughput = over2 ? 5500000 : 1500000; range->min_rts = 0; range->max_rts = 2347; range->min_frag = 256; range->max_frag = 2346; /* Event capability (kernel + driver) */ range->event_capa[0] = (IW_EVENT_CAPA_K_0 | IW_EVENT_CAPA_MASK(SIOCGIWTHRSPY) | IW_EVENT_CAPA_MASK(SIOCGIWAP) | IW_EVENT_CAPA_MASK(SIOCGIWSCAN)); range->event_capa[1] = IW_EVENT_CAPA_K_1; range->event_capa[4] = (IW_EVENT_CAPA_MASK(IWEVTXDROP) | IW_EVENT_CAPA_MASK(IWEVCUSTOM) | IW_EVENT_CAPA_MASK(IWEVREGISTERED) | IW_EVENT_CAPA_MASK(IWEVEXPIRED)); range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 | IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP; if (local->sta_fw_ver >= PRISM2_FW_VER(1,3,1)) range->scan_capa = IW_SCAN_CAPA_ESSID; return 0; } static int hostap_monitor_mode_enable(local_info_t *local) { struct net_device *dev = local->dev; printk(KERN_DEBUG "Enabling monitor mode\n"); hostap_monitor_set_type(local); if (hostap_set_word(dev, HFA384X_RID_CNFPORTTYPE, HFA384X_PORTTYPE_PSEUDO_IBSS)) { printk(KERN_DEBUG "Port type setting for monitor mode " "failed\n"); return -EOPNOTSUPP; } /* Host decrypt is needed to get the IV and ICV fields; * however, monitor mode seems to remove WEP flag from frame * control field */ if (hostap_set_word(dev, HFA384X_RID_CNFWEPFLAGS, HFA384X_WEPFLAGS_HOSTENCRYPT | HFA384X_WEPFLAGS_HOSTDECRYPT)) { printk(KERN_DEBUG "WEP flags setting failed\n"); return -EOPNOTSUPP; } if (local->func->reset_port(dev) || local->func->cmd(dev, HFA384X_CMDCODE_TEST | (HFA384X_TEST_MONITOR << 8), 0, NULL, NULL)) { printk(KERN_DEBUG "Setting monitor mode failed\n"); return -EOPNOTSUPP; } return 0; } static int hostap_monitor_mode_disable(local_info_t *local) { struct net_device *dev = local->ddev; if (dev == NULL) return -1; printk(KERN_DEBUG "%s: Disabling monitor mode\n", dev->name); dev->type = ARPHRD_ETHER; if (local->func->cmd(dev, HFA384X_CMDCODE_TEST | (HFA384X_TEST_STOP << 8), 0, NULL, NULL)) return -1; return hostap_set_encryption(local); } static int prism2_ioctl_siwmode(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { __u32 *mode = &wrqu->mode; struct hostap_interface *iface; local_info_t *local; int double_reset = 0; iface = netdev_priv(dev); local = iface->local; if (*mode != IW_MODE_ADHOC && *mode != IW_MODE_INFRA && *mode != IW_MODE_MASTER && *mode != IW_MODE_REPEAT && *mode != IW_MODE_MONITOR) return -EOPNOTSUPP; #ifdef PRISM2_NO_STATION_MODES if (*mode == IW_MODE_ADHOC || *mode == IW_MODE_INFRA) return -EOPNOTSUPP; #endif /* PRISM2_NO_STATION_MODES */ if (*mode == local->iw_mode) return 0; if (*mode == IW_MODE_MASTER && local->essid[0] == '\0') { printk(KERN_WARNING "%s: empty SSID not allowed in Master " "mode\n", dev->name); return -EINVAL; } if (local->iw_mode == IW_MODE_MONITOR) hostap_monitor_mode_disable(local); if ((local->iw_mode == IW_MODE_ADHOC || local->iw_mode == IW_MODE_MONITOR) && *mode == IW_MODE_MASTER) { /* There seems to be a firmware bug in at least STA f/w v1.5.6 * that leaves beacon frames to use IBSS type when moving from * IBSS to Host AP mode. Doing double Port0 reset seems to be * enough to workaround this. */ double_reset = 1; } printk(KERN_DEBUG "prism2: %s: operating mode changed " "%d -> %d\n", dev->name, local->iw_mode, *mode); local->iw_mode = *mode; if (local->iw_mode == IW_MODE_MONITOR) hostap_monitor_mode_enable(local); else if (local->iw_mode == IW_MODE_MASTER && !local->host_encrypt && !local->fw_encrypt_ok) { printk(KERN_DEBUG "%s: defaulting to host-based encryption as " "a workaround for firmware bug in Host AP mode WEP\n", dev->name); local->host_encrypt = 1; } if (hostap_set_word(dev, HFA384X_RID_CNFPORTTYPE, hostap_get_porttype(local))) return -EOPNOTSUPP; if (local->func->reset_port(dev)) return -EINVAL; if (double_reset && local->func->reset_port(dev)) return -EINVAL; if (local->iw_mode != IW_MODE_INFRA && local->iw_mode != IW_MODE_ADHOC) { /* netif_carrier is used only in client modes for now, so make * sure carrier is on when moving to non-client modes. */ netif_carrier_on(local->dev); netif_carrier_on(local->ddev); } return 0; } static int prism2_ioctl_giwmode(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { __u32 *mode = &wrqu->mode; struct hostap_interface *iface; local_info_t *local; iface = netdev_priv(dev); local = iface->local; switch (iface->type) { case HOSTAP_INTERFACE_STA: *mode = IW_MODE_INFRA; break; case HOSTAP_INTERFACE_WDS: *mode = IW_MODE_REPEAT; break; default: *mode = local->iw_mode; break; } return 0; } static int prism2_ioctl_siwpower(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct iw_param *wrq = &wrqu->power; #ifdef PRISM2_NO_STATION_MODES return -EOPNOTSUPP; #else /* PRISM2_NO_STATION_MODES */ int ret = 0; if (wrq->disabled) return hostap_set_word(dev, HFA384X_RID_CNFPMENABLED, 0); switch (wrq->flags & IW_POWER_MODE) { case IW_POWER_UNICAST_R: ret = hostap_set_word(dev, HFA384X_RID_CNFMULTICASTRECEIVE, 0); if (ret) return ret; ret = hostap_set_word(dev, HFA384X_RID_CNFPMENABLED, 1); if (ret) return ret; break; case IW_POWER_ALL_R: ret = hostap_set_word(dev, HFA384X_RID_CNFMULTICASTRECEIVE, 1); if (ret) return ret; ret = hostap_set_word(dev, HFA384X_RID_CNFPMENABLED, 1); if (ret) return ret; break; case IW_POWER_ON: break; default: return -EINVAL; } if (wrq->flags & IW_POWER_TIMEOUT) { ret = hostap_set_word(dev, HFA384X_RID_CNFPMENABLED, 1); if (ret) return ret; ret = hostap_set_word(dev, HFA384X_RID_CNFPMHOLDOVERDURATION, wrq->value / 1024); if (ret) return ret; } if (wrq->flags & IW_POWER_PERIOD) { ret = hostap_set_word(dev, HFA384X_RID_CNFPMENABLED, 1); if (ret) return ret; ret = hostap_set_word(dev, HFA384X_RID_CNFMAXSLEEPDURATION, wrq->value / 1024); if (ret) return ret; } return ret; #endif /* PRISM2_NO_STATION_MODES */ } static int prism2_ioctl_giwpower(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct iw_param *rrq = &wrqu->power; #ifdef PRISM2_NO_STATION_MODES return -EOPNOTSUPP; #else /* PRISM2_NO_STATION_MODES */ struct hostap_interface *iface; local_info_t *local; __le16 enable, mcast; iface = netdev_priv(dev); local = iface->local; if (local->func->get_rid(dev, HFA384X_RID_CNFPMENABLED, &enable, 2, 1) < 0) return -EINVAL; if (!le16_to_cpu(enable)) { rrq->disabled = 1; return 0; } rrq->disabled = 0; if ((rrq->flags & IW_POWER_TYPE) == IW_POWER_TIMEOUT) { __le16 timeout; if (local->func->get_rid(dev, HFA384X_RID_CNFPMHOLDOVERDURATION, &timeout, 2, 1) < 0) return -EINVAL; rrq->flags = IW_POWER_TIMEOUT; rrq->value = le16_to_cpu(timeout) * 1024; } else { __le16 period; if (local->func->get_rid(dev, HFA384X_RID_CNFMAXSLEEPDURATION, &period, 2, 1) < 0) return -EINVAL; rrq->flags = IW_POWER_PERIOD; rrq->value = le16_to_cpu(period) * 1024; } if (local->func->get_rid(dev, HFA384X_RID_CNFMULTICASTRECEIVE, &mcast, 2, 1) < 0) return -EINVAL; if (le16_to_cpu(mcast)) rrq->flags |= IW_POWER_ALL_R; else rrq->flags |= IW_POWER_UNICAST_R; return 0; #endif /* PRISM2_NO_STATION_MODES */ } static int prism2_ioctl_siwretry(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct iw_param *rrq = &wrqu->retry; struct hostap_interface *iface; local_info_t *local; iface = netdev_priv(dev); local = iface->local; if (rrq->disabled) return -EINVAL; /* setting retry limits is not supported with the current station * firmware code; simulate this with alternative retry count for now */ if (rrq->flags == IW_RETRY_LIMIT) { if (rrq->value < 0) { /* disable manual retry count setting and use firmware * defaults */ local->manual_retry_count = -1; local->tx_control &= ~HFA384X_TX_CTRL_ALT_RTRY; } else { if (hostap_set_word(dev, HFA384X_RID_CNFALTRETRYCOUNT, rrq->value)) { printk(KERN_DEBUG "%s: Alternate retry count " "setting to %d failed\n", dev->name, rrq->value); return -EOPNOTSUPP; } local->manual_retry_count = rrq->value; local->tx_control |= HFA384X_TX_CTRL_ALT_RTRY; } return 0; } return -EOPNOTSUPP; #if 0 /* what could be done, if firmware would support this.. */ if (rrq->flags & IW_RETRY_LIMIT) { if (rrq->flags & IW_RETRY_LONG) HFA384X_RID_LONGRETRYLIMIT = rrq->value; else if (rrq->flags & IW_RETRY_SHORT) HFA384X_RID_SHORTRETRYLIMIT = rrq->value; else { HFA384X_RID_LONGRETRYLIMIT = rrq->value; HFA384X_RID_SHORTRETRYLIMIT = rrq->value; } } if (rrq->flags & IW_RETRY_LIFETIME) { HFA384X_RID_MAXTRANSMITLIFETIME = rrq->value / 1024; } return 0; #endif /* 0 */ } static int prism2_ioctl_giwretry(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct iw_param *rrq = &wrqu->retry; struct hostap_interface *iface; local_info_t *local; __le16 shortretry, longretry, lifetime, altretry; iface = netdev_priv(dev); local = iface->local; if (local->func->get_rid(dev, HFA384X_RID_SHORTRETRYLIMIT, &shortretry, 2, 1) < 0 || local->func->get_rid(dev, HFA384X_RID_LONGRETRYLIMIT, &longretry, 2, 1) < 0 || local->func->get_rid(dev, HFA384X_RID_MAXTRANSMITLIFETIME, &lifetime, 2, 1) < 0) return -EINVAL; rrq->disabled = 0; if ((rrq->flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) { rrq->flags = IW_RETRY_LIFETIME; rrq->value = le16_to_cpu(lifetime) * 1024; } else { if (local->manual_retry_count >= 0) { rrq->flags = IW_RETRY_LIMIT; if (local->func->get_rid(dev, HFA384X_RID_CNFALTRETRYCOUNT, &altretry, 2, 1) >= 0) rrq->value = le16_to_cpu(altretry); else rrq->value = local->manual_retry_count; } else if ((rrq->flags & IW_RETRY_LONG)) { rrq->flags = IW_RETRY_LIMIT | IW_RETRY_LONG; rrq->value = le16_to_cpu(longretry); } else { rrq->flags = IW_RETRY_LIMIT; rrq->value = le16_to_cpu(shortretry); if (shortretry != longretry) rrq->flags |= IW_RETRY_SHORT; } } return 0; } /* Note! This TX power controlling is experimental and should not be used in * production use. It just sets raw power register and does not use any kind of * feedback information from the measured TX power (CR58). This is now * commented out to make sure that it is not used by accident. TX power * configuration will be enabled again after proper algorithm using feedback * has been implemented. */ #ifdef RAW_TXPOWER_SETTING /* Map HFA386x's CR31 to and from dBm with some sort of ad hoc mapping.. * This version assumes following mapping: * CR31 is 7-bit value with -64 to +63 range. * -64 is mapped into +20dBm and +63 into -43dBm. * This is certainly not an exact mapping for every card, but at least * increasing dBm value should correspond to increasing TX power. */ static int prism2_txpower_hfa386x_to_dBm(u16 val) { signed char tmp; if (val > 255) val = 255; tmp = val; tmp >>= 2; return -12 - tmp; } static u16 prism2_txpower_dBm_to_hfa386x(int val) { signed char tmp; if (val > 20) return 128; else if (val < -43) return 127; tmp = val; tmp = -12 - tmp; tmp <<= 2; return (unsigned char) tmp; } #endif /* RAW_TXPOWER_SETTING */ static int prism2_ioctl_siwtxpow(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct iw_param *rrq = &wrqu->txpower; struct hostap_interface *iface; local_info_t *local; #ifdef RAW_TXPOWER_SETTING char *tmp; #endif u16 val; int ret = 0; iface = netdev_priv(dev); local = iface->local; if (rrq->disabled) { if (local->txpower_type != PRISM2_TXPOWER_OFF) { val = 0xff; /* use all standby and sleep modes */ ret = local->func->cmd(dev, HFA384X_CMDCODE_WRITEMIF, HFA386X_CR_A_D_TEST_MODES2, &val, NULL); printk(KERN_DEBUG "%s: Turning radio off: %s\n", dev->name, ret ? "failed" : "OK"); local->txpower_type = PRISM2_TXPOWER_OFF; } return (ret ? -EOPNOTSUPP : 0); } if (local->txpower_type == PRISM2_TXPOWER_OFF) { val = 0; /* disable all standby and sleep modes */ ret = local->func->cmd(dev, HFA384X_CMDCODE_WRITEMIF, HFA386X_CR_A_D_TEST_MODES2, &val, NULL); printk(KERN_DEBUG "%s: Turning radio on: %s\n", dev->name, ret ? "failed" : "OK"); local->txpower_type = PRISM2_TXPOWER_UNKNOWN; } #ifdef RAW_TXPOWER_SETTING if (!rrq->fixed && local->txpower_type != PRISM2_TXPOWER_AUTO) { printk(KERN_DEBUG "Setting ALC on\n"); val = HFA384X_TEST_CFG_BIT_ALC; local->func->cmd(dev, HFA384X_CMDCODE_TEST | (HFA384X_TEST_CFG_BITS << 8), 1, &val, NULL); local->txpower_type = PRISM2_TXPOWER_AUTO; return 0; } if (local->txpower_type != PRISM2_TXPOWER_FIXED) { printk(KERN_DEBUG "Setting ALC off\n"); val = HFA384X_TEST_CFG_BIT_ALC; local->func->cmd(dev, HFA384X_CMDCODE_TEST | (HFA384X_TEST_CFG_BITS << 8), 0, &val, NULL); local->txpower_type = PRISM2_TXPOWER_FIXED; } if (rrq->flags == IW_TXPOW_DBM) tmp = "dBm"; else if (rrq->flags == IW_TXPOW_MWATT) tmp = "mW"; else tmp = "UNKNOWN"; printk(KERN_DEBUG "Setting TX power to %d %s\n", rrq->value, tmp); if (rrq->flags != IW_TXPOW_DBM) { printk("SIOCSIWTXPOW with mW is not supported; use dBm\n"); return -EOPNOTSUPP; } local->txpower = rrq->value; val = prism2_txpower_dBm_to_hfa386x(local->txpower); if (local->func->cmd(dev, HFA384X_CMDCODE_WRITEMIF, HFA386X_CR_MANUAL_TX_POWER, &val, NULL)) ret = -EOPNOTSUPP; #else /* RAW_TXPOWER_SETTING */ if (rrq->fixed) ret = -EOPNOTSUPP; #endif /* RAW_TXPOWER_SETTING */ return ret; } static int prism2_ioctl_giwtxpow(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { #ifdef RAW_TXPOWER_SETTING struct iw_param *rrq = &wrqu->txpower; struct hostap_interface *iface; local_info_t *local; u16 resp0; iface = netdev_priv(dev); local = iface->local; rrq->flags = IW_TXPOW_DBM; rrq->disabled = 0; rrq->fixed = 0; if (local->txpower_type == PRISM2_TXPOWER_AUTO) { if (local->func->cmd(dev, HFA384X_CMDCODE_READMIF, HFA386X_CR_MANUAL_TX_POWER, NULL, &resp0) == 0) { rrq->value = prism2_txpower_hfa386x_to_dBm(resp0); } else { /* Could not get real txpower; guess 15 dBm */ rrq->value = 15; } } else if (local->txpower_type == PRISM2_TXPOWER_OFF) { rrq->value = 0; rrq->disabled = 1; } else if (local->txpower_type == PRISM2_TXPOWER_FIXED) { rrq->value = local->txpower; rrq->fixed = 1; } else { printk("SIOCGIWTXPOW - unknown txpower_type=%d\n", local->txpower_type); } return 0; #else /* RAW_TXPOWER_SETTING */ return -EOPNOTSUPP; #endif /* RAW_TXPOWER_SETTING */ } #ifndef PRISM2_NO_STATION_MODES /* HostScan request works with and without host_roaming mode. In addition, it * does not break current association. However, it requires newer station * firmware version (>= 1.3.1) than scan request. */ static int prism2_request_hostscan(struct net_device *dev, u8 *ssid, u8 ssid_len) { struct hostap_interface *iface; local_info_t *local; struct hfa384x_hostscan_request scan_req; iface = netdev_priv(dev); local = iface->local; memset(&scan_req, 0, sizeof(scan_req)); scan_req.channel_list = cpu_to_le16(local->channel_mask & local->scan_channel_mask); scan_req.txrate = cpu_to_le16(HFA384X_RATES_1MBPS); if (ssid) { if (ssid_len > 32) return -EINVAL; scan_req.target_ssid_len = cpu_to_le16(ssid_len); memcpy(scan_req.target_ssid, ssid, ssid_len); } if (local->func->set_rid(dev, HFA384X_RID_HOSTSCAN, &scan_req, sizeof(scan_req))) { printk(KERN_DEBUG "%s: HOSTSCAN failed\n", dev->name); return -EINVAL; } return 0; } static int prism2_request_scan(struct net_device *dev) { struct hostap_interface *iface; local_info_t *local; struct hfa384x_scan_request scan_req; int ret = 0; iface = netdev_priv(dev); local = iface->local; memset(&scan_req, 0, sizeof(scan_req)); scan_req.channel_list = cpu_to_le16(local->channel_mask & local->scan_channel_mask); scan_req.txrate = cpu_to_le16(HFA384X_RATES_1MBPS); /* FIX: * It seems to be enough to set roaming mode for a short moment to * host-based and then setup scanrequest data and return the mode to * firmware-based. * * Master mode would need to drop to Managed mode for a short while * to make scanning work.. Or sweep through the different channels and * use passive scan based on beacons. */ if (!local->host_roaming) hostap_set_word(dev, HFA384X_RID_CNFROAMINGMODE, HFA384X_ROAMING_HOST); if (local->func->set_rid(dev, HFA384X_RID_SCANREQUEST, &scan_req, sizeof(scan_req))) { printk(KERN_DEBUG "SCANREQUEST failed\n"); ret = -EINVAL; } if (!local->host_roaming) hostap_set_word(dev, HFA384X_RID_CNFROAMINGMODE, HFA384X_ROAMING_FIRMWARE); return ret; } #else /* !PRISM2_NO_STATION_MODES */ static inline int prism2_request_hostscan(struct net_device *dev, u8 *ssid, u8 ssid_len) { return -EOPNOTSUPP; } static inline int prism2_request_scan(struct net_device *dev) { return -EOPNOTSUPP; } #endif /* !PRISM2_NO_STATION_MODES */ static int prism2_ioctl_siwscan(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct iw_point *data = &wrqu->data; struct hostap_interface *iface; local_info_t *local; int ret; u8 *ssid = NULL, ssid_len = 0; struct iw_scan_req *req = (struct iw_scan_req *) extra; iface = netdev_priv(dev); local = iface->local; if (data->length < sizeof(struct iw_scan_req)) req = NULL; if (local->iw_mode == IW_MODE_MASTER) { /* In master mode, we just return the results of our local * tables, so we don't need to start anything... * Jean II */ data->length = 0; return 0; } if (!local->dev_enabled) return -ENETDOWN; if (req && data->flags & IW_SCAN_THIS_ESSID) { ssid = req->essid; ssid_len = req->essid_len; if (ssid_len && ((local->iw_mode != IW_MODE_INFRA && local->iw_mode != IW_MODE_ADHOC) || (local->sta_fw_ver < PRISM2_FW_VER(1,3,1)))) return -EOPNOTSUPP; } if (local->sta_fw_ver >= PRISM2_FW_VER(1,3,1)) ret = prism2_request_hostscan(dev, ssid, ssid_len); else ret = prism2_request_scan(dev); if (ret == 0) local->scan_timestamp = jiffies; /* Could inquire F101, F103 or wait for SIOCGIWSCAN and read RID */ return ret; } #ifndef PRISM2_NO_STATION_MODES static char * __prism2_translate_scan(local_info_t *local, struct iw_request_info *info, struct hfa384x_hostscan_result *scan, struct hostap_bss_info *bss, char *current_ev, char *end_buf) { int i, chan; struct iw_event iwe; char *current_val; u16 capabilities; u8 *pos; u8 *ssid, *bssid; size_t ssid_len; char *buf; if (bss) { ssid = bss->ssid; ssid_len = bss->ssid_len; bssid = bss->bssid; } else { ssid = scan->ssid; ssid_len = le16_to_cpu(scan->ssid_len); bssid = scan->bssid; } if (ssid_len > 32) ssid_len = 32; /* First entry *MUST* be the AP MAC address */ memset(&iwe, 0, sizeof(iwe)); iwe.cmd = SIOCGIWAP; iwe.u.ap_addr.sa_family = ARPHRD_ETHER; memcpy(iwe.u.ap_addr.sa_data, bssid, ETH_ALEN); current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe, IW_EV_ADDR_LEN); /* Other entries will be displayed in the order we give them */ memset(&iwe, 0, sizeof(iwe)); iwe.cmd = SIOCGIWESSID; iwe.u.data.length = ssid_len; iwe.u.data.flags = 1; current_ev = iwe_stream_add_point(info, current_ev, end_buf, &iwe, ssid); memset(&iwe, 0, sizeof(iwe)); iwe.cmd = SIOCGIWMODE; if (bss) { capabilities = bss->capab_info; } else { capabilities = le16_to_cpu(scan->capability); } if (capabilities & (WLAN_CAPABILITY_ESS | WLAN_CAPABILITY_IBSS)) { if (capabilities & WLAN_CAPABILITY_ESS) iwe.u.mode = IW_MODE_MASTER; else iwe.u.mode = IW_MODE_ADHOC; current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe, IW_EV_UINT_LEN); } memset(&iwe, 0, sizeof(iwe)); iwe.cmd = SIOCGIWFREQ; if (scan) { chan = le16_to_cpu(scan->chid); } else if (bss) { chan = bss->chan; } else { chan = 0; } if (chan > 0) { iwe.u.freq.m = freq_list[chan - 1] * 100000; iwe.u.freq.e = 1; current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe, IW_EV_FREQ_LEN); } if (scan) { memset(&iwe, 0, sizeof(iwe)); iwe.cmd = IWEVQUAL; if (local->last_scan_type == PRISM2_HOSTSCAN) { iwe.u.qual.level = le16_to_cpu(scan->sl); iwe.u.qual.noise = le16_to_cpu(scan->anl); } else { iwe.u.qual.level = HFA384X_LEVEL_TO_dBm(le16_to_cpu(scan->sl)); iwe.u.qual.noise = HFA384X_LEVEL_TO_dBm(le16_to_cpu(scan->anl)); } iwe.u.qual.updated = IW_QUAL_LEVEL_UPDATED | IW_QUAL_NOISE_UPDATED | IW_QUAL_QUAL_INVALID | IW_QUAL_DBM; current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe, IW_EV_QUAL_LEN); } memset(&iwe, 0, sizeof(iwe)); iwe.cmd = SIOCGIWENCODE; if (capabilities & WLAN_CAPABILITY_PRIVACY) iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY; else iwe.u.data.flags = IW_ENCODE_DISABLED; iwe.u.data.length = 0; current_ev = iwe_stream_add_point(info, current_ev, end_buf, &iwe, ""); /* TODO: add SuppRates into BSS table */ if (scan) { memset(&iwe, 0, sizeof(iwe)); iwe.cmd = SIOCGIWRATE; current_val = current_ev + iwe_stream_lcp_len(info); pos = scan->sup_rates; for (i = 0; i < sizeof(scan->sup_rates); i++) { if (pos[i] == 0) break; /* Bit rate given in 500 kb/s units (+ 0x80) */ iwe.u.bitrate.value = ((pos[i] & 0x7f) * 500000); current_val = iwe_stream_add_value( info, current_ev, current_val, end_buf, &iwe, IW_EV_PARAM_LEN); } /* Check if we added any event */ if ((current_val - current_ev) > iwe_stream_lcp_len(info)) current_ev = current_val; } /* TODO: add BeaconInt,resp_rate,atim into BSS table */ buf = kmalloc(MAX_WPA_IE_LEN * 2 + 30, GFP_ATOMIC); if (buf && scan) { memset(&iwe, 0, sizeof(iwe)); iwe.cmd = IWEVCUSTOM; sprintf(buf, "bcn_int=%d", le16_to_cpu(scan->beacon_interval)); iwe.u.data.length = strlen(buf); current_ev = iwe_stream_add_point(info, current_ev, end_buf, &iwe, buf); memset(&iwe, 0, sizeof(iwe)); iwe.cmd = IWEVCUSTOM; sprintf(buf, "resp_rate=%d", le16_to_cpu(scan->rate)); iwe.u.data.length = strlen(buf); current_ev = iwe_stream_add_point(info, current_ev, end_buf, &iwe, buf); if (local->last_scan_type == PRISM2_HOSTSCAN && (capabilities & WLAN_CAPABILITY_IBSS)) { memset(&iwe, 0, sizeof(iwe)); iwe.cmd = IWEVCUSTOM; sprintf(buf, "atim=%d", le16_to_cpu(scan->atim)); iwe.u.data.length = strlen(buf); current_ev = iwe_stream_add_point(info, current_ev, end_buf, &iwe, buf); } } kfree(buf); if (bss && bss->wpa_ie_len > 0 && bss->wpa_ie_len <= MAX_WPA_IE_LEN) { memset(&iwe, 0, sizeof(iwe)); iwe.cmd = IWEVGENIE; iwe.u.data.length = bss->wpa_ie_len; current_ev = iwe_stream_add_point(info, current_ev, end_buf, &iwe, bss->wpa_ie); } if (bss && bss->rsn_ie_len > 0 && bss->rsn_ie_len <= MAX_WPA_IE_LEN) { memset(&iwe, 0, sizeof(iwe)); iwe.cmd = IWEVGENIE; iwe.u.data.length = bss->rsn_ie_len; current_ev = iwe_stream_add_point(info, current_ev, end_buf, &iwe, bss->rsn_ie); } return current_ev; } /* Translate scan data returned from the card to a card independent * format that the Wireless Tools will understand - Jean II */ static inline int prism2_translate_scan(local_info_t *local, struct iw_request_info *info, char *buffer, int buflen) { struct hfa384x_hostscan_result *scan; int entry; char *current_ev = buffer; char *end_buf = buffer + buflen; struct list_head *ptr; spin_lock_bh(&local->lock); list_for_each(ptr, &local->bss_list) { struct hostap_bss_info *bss; bss = list_entry(ptr, struct hostap_bss_info, list); bss->included = 0; } for (entry = 0; entry < local->last_scan_results_count; entry++) { int found = 0; scan = &local->last_scan_results[entry]; /* Report every SSID if the AP is using multiple SSIDs. If no * BSS record is found (e.g., when WPA mode is disabled), * report the AP once. */ list_for_each(ptr, &local->bss_list) { struct hostap_bss_info *bss; bss = list_entry(ptr, struct hostap_bss_info, list); if (ether_addr_equal(bss->bssid, scan->bssid)) { bss->included = 1; current_ev = __prism2_translate_scan( local, info, scan, bss, current_ev, end_buf); found++; } } if (!found) { current_ev = __prism2_translate_scan( local, info, scan, NULL, current_ev, end_buf); } /* Check if there is space for one more entry */ if ((end_buf - current_ev) <= IW_EV_ADDR_LEN) { /* Ask user space to try again with a bigger buffer */ spin_unlock_bh(&local->lock); return -E2BIG; } } /* Prism2 firmware has limits (32 at least in some versions) for number * of BSSes in scan results. Extend this limit by using local BSS list. */ list_for_each(ptr, &local->bss_list) { struct hostap_bss_info *bss; bss = list_entry(ptr, struct hostap_bss_info, list); if (bss->included) continue; current_ev = __prism2_translate_scan(local, info, NULL, bss, current_ev, end_buf); /* Check if there is space for one more entry */ if ((end_buf - current_ev) <= IW_EV_ADDR_LEN) { /* Ask user space to try again with a bigger buffer */ spin_unlock_bh(&local->lock); return -E2BIG; } } spin_unlock_bh(&local->lock); return current_ev - buffer; } #endif /* PRISM2_NO_STATION_MODES */ static inline int prism2_ioctl_giwscan_sta(struct net_device *dev, struct iw_request_info *info, struct iw_point *data, char *extra) { #ifdef PRISM2_NO_STATION_MODES return -EOPNOTSUPP; #else /* PRISM2_NO_STATION_MODES */ struct hostap_interface *iface; local_info_t *local; int res; iface = netdev_priv(dev); local = iface->local; /* Wait until the scan is finished. We can probably do better * than that - Jean II */ if (local->scan_timestamp && time_before(jiffies, local->scan_timestamp + 3 * HZ)) { /* Important note : we don't want to block the caller * until results are ready for various reasons. * First, managing wait queues is complex and racy * (there may be multiple simultaneous callers). * Second, we grab some rtnetlink lock before coming * here (in dev_ioctl()). * Third, the caller can wait on the Wireless Event * - Jean II */ return -EAGAIN; } local->scan_timestamp = 0; res = prism2_translate_scan(local, info, extra, data->length); if (res >= 0) { data->length = res; return 0; } else { data->length = 0; return res; } #endif /* PRISM2_NO_STATION_MODES */ } static int prism2_ioctl_giwscan(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct iw_point *data = &wrqu->data; struct hostap_interface *iface; local_info_t *local; int res; iface = netdev_priv(dev); local = iface->local; if (local->iw_mode == IW_MODE_MASTER) { /* In MASTER mode, it doesn't make sense to go around * scanning the frequencies and make the stations we serve * wait when what the user is really interested about is the * list of stations and access points we are talking to. * So, just extract results from our cache... * Jean II */ /* Translate to WE format */ res = prism2_ap_translate_scan(dev, info, extra); if (res >= 0) { printk(KERN_DEBUG "Scan result translation succeeded " "(length=%d)\n", res); data->length = res; return 0; } else { printk(KERN_DEBUG "Scan result translation failed (res=%d)\n", res); data->length = 0; return res; } } else { /* Station mode */ return prism2_ioctl_giwscan_sta(dev, info, data, extra); } } static const struct iw_priv_args prism2_priv[] = { { PRISM2_IOCTL_MONITOR, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "monitor" }, { PRISM2_IOCTL_READMIF, IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | 1, IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | 1, "readmif" }, { PRISM2_IOCTL_WRITEMIF, IW_PRIV_TYPE_BYTE | IW_PRIV_SIZE_FIXED | 2, 0, "writemif" }, { PRISM2_IOCTL_RESET, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "reset" }, { PRISM2_IOCTL_INQUIRE, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "inquire" }, { PRISM2_IOCTL_SET_RID_WORD, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "set_rid_word" }, { PRISM2_IOCTL_MACCMD, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "maccmd" }, { PRISM2_IOCTL_WDS_ADD, IW_PRIV_TYPE_ADDR | IW_PRIV_SIZE_FIXED | 1, 0, "wds_add" }, { PRISM2_IOCTL_WDS_DEL, IW_PRIV_TYPE_ADDR | IW_PRIV_SIZE_FIXED | 1, 0, "wds_del" }, { PRISM2_IOCTL_ADDMAC, IW_PRIV_TYPE_ADDR | IW_PRIV_SIZE_FIXED | 1, 0, "addmac" }, { PRISM2_IOCTL_DELMAC, IW_PRIV_TYPE_ADDR | IW_PRIV_SIZE_FIXED | 1, 0, "delmac" }, { PRISM2_IOCTL_KICKMAC, IW_PRIV_TYPE_ADDR | IW_PRIV_SIZE_FIXED | 1, 0, "kickmac" }, /* --- raw access to sub-ioctls --- */ { PRISM2_IOCTL_PRISM2_PARAM, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "prism2_param" }, { PRISM2_IOCTL_GET_PRISM2_PARAM, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getprism2_param" }, /* --- sub-ioctls handlers --- */ { PRISM2_IOCTL_PRISM2_PARAM, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "" }, { PRISM2_IOCTL_GET_PRISM2_PARAM, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "" }, /* --- sub-ioctls definitions --- */ { PRISM2_PARAM_TXRATECTRL, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "txratectrl" }, { PRISM2_PARAM_TXRATECTRL, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "gettxratectrl" }, { PRISM2_PARAM_BEACON_INT, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "beacon_int" }, { PRISM2_PARAM_BEACON_INT, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getbeacon_int" }, #ifndef PRISM2_NO_STATION_MODES { PRISM2_PARAM_PSEUDO_IBSS, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "pseudo_ibss" }, { PRISM2_PARAM_PSEUDO_IBSS, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getpseudo_ibss" }, #endif /* PRISM2_NO_STATION_MODES */ { PRISM2_PARAM_ALC, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "alc" }, { PRISM2_PARAM_ALC, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getalc" }, { PRISM2_PARAM_DUMP, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "dump" }, { PRISM2_PARAM_DUMP, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getdump" }, { PRISM2_PARAM_OTHER_AP_POLICY, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "other_ap_policy" }, { PRISM2_PARAM_OTHER_AP_POLICY, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getother_ap_pol" }, { PRISM2_PARAM_AP_MAX_INACTIVITY, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "max_inactivity" }, { PRISM2_PARAM_AP_MAX_INACTIVITY, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getmax_inactivi" }, { PRISM2_PARAM_AP_BRIDGE_PACKETS, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "bridge_packets" }, { PRISM2_PARAM_AP_BRIDGE_PACKETS, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getbridge_packe" }, { PRISM2_PARAM_DTIM_PERIOD, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "dtim_period" }, { PRISM2_PARAM_DTIM_PERIOD, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getdtim_period" }, { PRISM2_PARAM_AP_NULLFUNC_ACK, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "nullfunc_ack" }, { PRISM2_PARAM_AP_NULLFUNC_ACK, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getnullfunc_ack" }, { PRISM2_PARAM_MAX_WDS, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "max_wds" }, { PRISM2_PARAM_MAX_WDS, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getmax_wds" }, { PRISM2_PARAM_AP_AUTOM_AP_WDS, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "autom_ap_wds" }, { PRISM2_PARAM_AP_AUTOM_AP_WDS, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getautom_ap_wds" }, { PRISM2_PARAM_AP_AUTH_ALGS, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "ap_auth_algs" }, { PRISM2_PARAM_AP_AUTH_ALGS, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getap_auth_algs" }, { PRISM2_PARAM_MONITOR_ALLOW_FCSERR, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "allow_fcserr" }, { PRISM2_PARAM_MONITOR_ALLOW_FCSERR, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getallow_fcserr" }, { PRISM2_PARAM_HOST_ENCRYPT, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "host_encrypt" }, { PRISM2_PARAM_HOST_ENCRYPT, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "gethost_encrypt" }, { PRISM2_PARAM_HOST_DECRYPT, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "host_decrypt" }, { PRISM2_PARAM_HOST_DECRYPT, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "gethost_decrypt" }, #ifndef PRISM2_NO_STATION_MODES { PRISM2_PARAM_HOST_ROAMING, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "host_roaming" }, { PRISM2_PARAM_HOST_ROAMING, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "gethost_roaming" }, #endif /* PRISM2_NO_STATION_MODES */ { PRISM2_PARAM_BCRX_STA_KEY, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "bcrx_sta_key" }, { PRISM2_PARAM_BCRX_STA_KEY, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getbcrx_sta_key" }, { PRISM2_PARAM_IEEE_802_1X, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "ieee_802_1x" }, { PRISM2_PARAM_IEEE_802_1X, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getieee_802_1x" }, { PRISM2_PARAM_ANTSEL_TX, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "antsel_tx" }, { PRISM2_PARAM_ANTSEL_TX, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getantsel_tx" }, { PRISM2_PARAM_ANTSEL_RX, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "antsel_rx" }, { PRISM2_PARAM_ANTSEL_RX, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getantsel_rx" }, { PRISM2_PARAM_MONITOR_TYPE, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "monitor_type" }, { PRISM2_PARAM_MONITOR_TYPE, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getmonitor_type" }, { PRISM2_PARAM_WDS_TYPE, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "wds_type" }, { PRISM2_PARAM_WDS_TYPE, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getwds_type" }, { PRISM2_PARAM_HOSTSCAN, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "hostscan" }, { PRISM2_PARAM_HOSTSCAN, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "gethostscan" }, { PRISM2_PARAM_AP_SCAN, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "ap_scan" }, { PRISM2_PARAM_AP_SCAN, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getap_scan" }, { PRISM2_PARAM_ENH_SEC, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "enh_sec" }, { PRISM2_PARAM_ENH_SEC, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getenh_sec" }, #ifdef PRISM2_IO_DEBUG { PRISM2_PARAM_IO_DEBUG, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "io_debug" }, { PRISM2_PARAM_IO_DEBUG, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getio_debug" }, #endif /* PRISM2_IO_DEBUG */ { PRISM2_PARAM_BASIC_RATES, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "basic_rates" }, { PRISM2_PARAM_BASIC_RATES, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getbasic_rates" }, { PRISM2_PARAM_OPER_RATES, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "oper_rates" }, { PRISM2_PARAM_OPER_RATES, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getoper_rates" }, { PRISM2_PARAM_HOSTAPD, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "hostapd" }, { PRISM2_PARAM_HOSTAPD, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "gethostapd" }, { PRISM2_PARAM_HOSTAPD_STA, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "hostapd_sta" }, { PRISM2_PARAM_HOSTAPD_STA, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "gethostapd_sta" }, { PRISM2_PARAM_WPA, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "wpa" }, { PRISM2_PARAM_WPA, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getwpa" }, { PRISM2_PARAM_PRIVACY_INVOKED, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "privacy_invoked" }, { PRISM2_PARAM_PRIVACY_INVOKED, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getprivacy_invo" }, { PRISM2_PARAM_TKIP_COUNTERMEASURES, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "tkip_countermea" }, { PRISM2_PARAM_TKIP_COUNTERMEASURES, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "gettkip_counter" }, { PRISM2_PARAM_DROP_UNENCRYPTED, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "drop_unencrypte" }, { PRISM2_PARAM_DROP_UNENCRYPTED, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getdrop_unencry" }, { PRISM2_PARAM_SCAN_CHANNEL_MASK, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "scan_channels" }, { PRISM2_PARAM_SCAN_CHANNEL_MASK, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "getscan_channel" }, }; static int prism2_ioctl_priv_inquire(struct net_device *dev, int *i) { struct hostap_interface *iface; local_info_t *local; iface = netdev_priv(dev); local = iface->local; if (local->func->cmd(dev, HFA384X_CMDCODE_INQUIRE, *i, NULL, NULL)) return -EOPNOTSUPP; return 0; } static int prism2_ioctl_priv_prism2_param(struct net_device *dev, struct iw_request_info *info, union iwreq_data *uwrq, char *extra) { struct hostap_interface *iface; local_info_t *local; int *i = (int *) extra; int param = *i; int value = *(i + 1); int ret = 0; u16 val; iface = netdev_priv(dev); local = iface->local; switch (param) { case PRISM2_PARAM_TXRATECTRL: local->fw_tx_rate_control = value; break; case PRISM2_PARAM_BEACON_INT: if (hostap_set_word(dev, HFA384X_RID_CNFBEACONINT, value) || local->func->reset_port(dev)) ret = -EINVAL; else local->beacon_int = value; break; #ifndef PRISM2_NO_STATION_MODES case PRISM2_PARAM_PSEUDO_IBSS: if (value == local->pseudo_adhoc) break; if (value != 0 && value != 1) { ret = -EINVAL; break; } printk(KERN_DEBUG "prism2: %s: pseudo IBSS change %d -> %d\n", dev->name, local->pseudo_adhoc, value); local->pseudo_adhoc = value; if (local->iw_mode != IW_MODE_ADHOC) break; if (hostap_set_word(dev, HFA384X_RID_CNFPORTTYPE, hostap_get_porttype(local))) { ret = -EOPNOTSUPP; break; } if (local->func->reset_port(dev)) ret = -EINVAL; break; #endif /* PRISM2_NO_STATION_MODES */ case PRISM2_PARAM_ALC: printk(KERN_DEBUG "%s: %s ALC\n", dev->name, value == 0 ? "Disabling" : "Enabling"); val = HFA384X_TEST_CFG_BIT_ALC; local->func->cmd(dev, HFA384X_CMDCODE_TEST | (HFA384X_TEST_CFG_BITS << 8), value == 0 ? 0 : 1, &val, NULL); break; case PRISM2_PARAM_DUMP: local->frame_dump = value; break; case PRISM2_PARAM_OTHER_AP_POLICY: if (value < 0 || value > 3) { ret = -EINVAL; break; } if (local->ap != NULL) local->ap->ap_policy = value; break; case PRISM2_PARAM_AP_MAX_INACTIVITY: if (value < 0 || value > 7 * 24 * 60 * 60) { ret = -EINVAL; break; } if (local->ap != NULL) local->ap->max_inactivity = value * HZ; break; case PRISM2_PARAM_AP_BRIDGE_PACKETS: if (local->ap != NULL) local->ap->bridge_packets = value; break; case PRISM2_PARAM_DTIM_PERIOD: if (value < 0 || value > 65535) { ret = -EINVAL; break; } if (hostap_set_word(dev, HFA384X_RID_CNFOWNDTIMPERIOD, value) || local->func->reset_port(dev)) ret = -EINVAL; else local->dtim_period = value; break; case PRISM2_PARAM_AP_NULLFUNC_ACK: if (local->ap != NULL) local->ap->nullfunc_ack = value; break; case PRISM2_PARAM_MAX_WDS: local->wds_max_connections = value; break; case PRISM2_PARAM_AP_AUTOM_AP_WDS: if (local->ap != NULL) { if (!local->ap->autom_ap_wds && value) { /* add WDS link to all APs in STA table */ hostap_add_wds_links(local); } local->ap->autom_ap_wds = value; } break; case PRISM2_PARAM_AP_AUTH_ALGS: local->auth_algs = value; if (hostap_set_auth_algs(local)) ret = -EINVAL; break; case PRISM2_PARAM_MONITOR_ALLOW_FCSERR: local->monitor_allow_fcserr = value; break; case PRISM2_PARAM_HOST_ENCRYPT: local->host_encrypt = value; if (hostap_set_encryption(local) || local->func->reset_port(dev)) ret = -EINVAL; break; case PRISM2_PARAM_HOST_DECRYPT: local->host_decrypt = value; if (hostap_set_encryption(local) || local->func->reset_port(dev)) ret = -EINVAL; break; #ifndef PRISM2_NO_STATION_MODES case PRISM2_PARAM_HOST_ROAMING: if (value < 0 || value > 2) { ret = -EINVAL; break; } local->host_roaming = value; if (hostap_set_roaming(local) || local->func->reset_port(dev)) ret = -EINVAL; break; #endif /* PRISM2_NO_STATION_MODES */ case PRISM2_PARAM_BCRX_STA_KEY: local->bcrx_sta_key = value; break; case PRISM2_PARAM_IEEE_802_1X: local->ieee_802_1x = value; break; case PRISM2_PARAM_ANTSEL_TX: if (value < 0 || value > HOSTAP_ANTSEL_HIGH) { ret = -EINVAL; break; } local->antsel_tx = value; hostap_set_antsel(local); break; case PRISM2_PARAM_ANTSEL_RX: if (value < 0 || value > HOSTAP_ANTSEL_HIGH) { ret = -EINVAL; break; } local->antsel_rx = value; hostap_set_antsel(local); break; case PRISM2_PARAM_MONITOR_TYPE: if (value != PRISM2_MONITOR_80211 && value != PRISM2_MONITOR_CAPHDR && value != PRISM2_MONITOR_PRISM && value != PRISM2_MONITOR_RADIOTAP) { ret = -EINVAL; break; } local->monitor_type = value; if (local->iw_mode == IW_MODE_MONITOR) hostap_monitor_set_type(local); break; case PRISM2_PARAM_WDS_TYPE: local->wds_type = value; break; case PRISM2_PARAM_HOSTSCAN: { struct hfa384x_hostscan_request scan_req; u16 rate; memset(&scan_req, 0, sizeof(scan_req)); scan_req.channel_list = cpu_to_le16(0x3fff); switch (value) { case 1: rate = HFA384X_RATES_1MBPS; break; case 2: rate = HFA384X_RATES_2MBPS; break; case 3: rate = HFA384X_RATES_5MBPS; break; case 4: rate = HFA384X_RATES_11MBPS; break; default: rate = HFA384X_RATES_1MBPS; break; } scan_req.txrate = cpu_to_le16(rate); /* leave SSID empty to accept all SSIDs */ if (local->iw_mode == IW_MODE_MASTER) { if (hostap_set_word(dev, HFA384X_RID_CNFPORTTYPE, HFA384X_PORTTYPE_BSS) || local->func->reset_port(dev)) printk(KERN_DEBUG "Leaving Host AP mode " "for HostScan failed\n"); } if (local->func->set_rid(dev, HFA384X_RID_HOSTSCAN, &scan_req, sizeof(scan_req))) { printk(KERN_DEBUG "HOSTSCAN failed\n"); ret = -EINVAL; } if (local->iw_mode == IW_MODE_MASTER) { wait_queue_entry_t __wait; init_waitqueue_entry(&__wait, current); add_wait_queue(&local->hostscan_wq, &__wait); set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(HZ); if (signal_pending(current)) ret = -EINTR; set_current_state(TASK_RUNNING); remove_wait_queue(&local->hostscan_wq, &__wait); if (hostap_set_word(dev, HFA384X_RID_CNFPORTTYPE, HFA384X_PORTTYPE_HOSTAP) || local->func->reset_port(dev)) printk(KERN_DEBUG "Returning to Host AP mode " "after HostScan failed\n"); } break; } case PRISM2_PARAM_AP_SCAN: local->passive_scan_interval = value; if (timer_pending(&local->passive_scan_timer)) del_timer(&local->passive_scan_timer); if (value > 0 && value < INT_MAX / HZ) { local->passive_scan_timer.expires = jiffies + local->passive_scan_interval * HZ; add_timer(&local->passive_scan_timer); } break; case PRISM2_PARAM_ENH_SEC: if (value < 0 || value > 3) { ret = -EINVAL; break; } local->enh_sec = value; if (hostap_set_word(dev, HFA384X_RID_CNFENHSECURITY, local->enh_sec) || local->func->reset_port(dev)) { printk(KERN_INFO "%s: cnfEnhSecurity requires STA f/w " "1.6.3 or newer\n", dev->name); ret = -EOPNOTSUPP; } break; #ifdef PRISM2_IO_DEBUG case PRISM2_PARAM_IO_DEBUG: local->io_debug_enabled = value; break; #endif /* PRISM2_IO_DEBUG */ case PRISM2_PARAM_BASIC_RATES: if ((value & local->tx_rate_control) != value || value == 0) { printk(KERN_INFO "%s: invalid basic rate set - basic " "rates must be in supported rate set\n", dev->name); ret = -EINVAL; break; } local->basic_rates = value; if (hostap_set_word(dev, HFA384X_RID_CNFBASICRATES, local->basic_rates) || local->func->reset_port(dev)) ret = -EINVAL; break; case PRISM2_PARAM_OPER_RATES: local->tx_rate_control = value; if (hostap_set_rate(dev)) ret = -EINVAL; break; case PRISM2_PARAM_HOSTAPD: ret = hostap_set_hostapd(local, value, 1); break; case PRISM2_PARAM_HOSTAPD_STA: ret = hostap_set_hostapd_sta(local, value, 1); break; case PRISM2_PARAM_WPA: local->wpa = value; if (local->sta_fw_ver < PRISM2_FW_VER(1,7,0)) ret = -EOPNOTSUPP; else if (hostap_set_word(dev, HFA384X_RID_SSNHANDLINGMODE, value ? 1 : 0)) ret = -EINVAL; break; case PRISM2_PARAM_PRIVACY_INVOKED: local->privacy_invoked = value; if (hostap_set_encryption(local) || local->func->reset_port(dev)) ret = -EINVAL; break; case PRISM2_PARAM_TKIP_COUNTERMEASURES: local->tkip_countermeasures = value; break; case PRISM2_PARAM_DROP_UNENCRYPTED: local->drop_unencrypted = value; break; case PRISM2_PARAM_SCAN_CHANNEL_MASK: local->scan_channel_mask = value; break; default: printk(KERN_DEBUG "%s: prism2_param: unknown param %d\n", dev->name, param); ret = -EOPNOTSUPP; break; } return ret; } static int prism2_ioctl_priv_get_prism2_param(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct hostap_interface *iface; local_info_t *local; int *param = (int *) extra; int ret = 0; iface = netdev_priv(dev); local = iface->local; switch (*param) { case PRISM2_PARAM_TXRATECTRL: *param = local->fw_tx_rate_control; break; case PRISM2_PARAM_BEACON_INT: *param = local->beacon_int; break; case PRISM2_PARAM_PSEUDO_IBSS: *param = local->pseudo_adhoc; break; case PRISM2_PARAM_ALC: ret = -EOPNOTSUPP; /* FIX */ break; case PRISM2_PARAM_DUMP: *param = local->frame_dump; break; case PRISM2_PARAM_OTHER_AP_POLICY: if (local->ap != NULL) *param = local->ap->ap_policy; else ret = -EOPNOTSUPP; break; case PRISM2_PARAM_AP_MAX_INACTIVITY: if (local->ap != NULL) *param = local->ap->max_inactivity / HZ; else ret = -EOPNOTSUPP; break; case PRISM2_PARAM_AP_BRIDGE_PACKETS: if (local->ap != NULL) *param = local->ap->bridge_packets; else ret = -EOPNOTSUPP; break; case PRISM2_PARAM_DTIM_PERIOD: *param = local->dtim_period; break; case PRISM2_PARAM_AP_NULLFUNC_ACK: if (local->ap != NULL) *param = local->ap->nullfunc_ack; else ret = -EOPNOTSUPP; break; case PRISM2_PARAM_MAX_WDS: *param = local->wds_max_connections; break; case PRISM2_PARAM_AP_AUTOM_AP_WDS: if (local->ap != NULL) *param = local->ap->autom_ap_wds; else ret = -EOPNOTSUPP; break; case PRISM2_PARAM_AP_AUTH_ALGS: *param = local->auth_algs; break; case PRISM2_PARAM_MONITOR_ALLOW_FCSERR: *param = local->monitor_allow_fcserr; break; case PRISM2_PARAM_HOST_ENCRYPT: *param = local->host_encrypt; break; case PRISM2_PARAM_HOST_DECRYPT: *param = local->host_decrypt; break; case PRISM2_PARAM_HOST_ROAMING: *param = local->host_roaming; break; case PRISM2_PARAM_BCRX_STA_KEY: *param = local->bcrx_sta_key; break; case PRISM2_PARAM_IEEE_802_1X: *param = local->ieee_802_1x; break; case PRISM2_PARAM_ANTSEL_TX: *param = local->antsel_tx; break; case PRISM2_PARAM_ANTSEL_RX: *param = local->antsel_rx; break; case PRISM2_PARAM_MONITOR_TYPE: *param = local->monitor_type; break; case PRISM2_PARAM_WDS_TYPE: *param = local->wds_type; break; case PRISM2_PARAM_HOSTSCAN: ret = -EOPNOTSUPP; break; case PRISM2_PARAM_AP_SCAN: *param = local->passive_scan_interval; break; case PRISM2_PARAM_ENH_SEC: *param = local->enh_sec; break; #ifdef PRISM2_IO_DEBUG case PRISM2_PARAM_IO_DEBUG: *param = local->io_debug_enabled; break; #endif /* PRISM2_IO_DEBUG */ case PRISM2_PARAM_BASIC_RATES: *param = local->basic_rates; break; case PRISM2_PARAM_OPER_RATES: *param = local->tx_rate_control; break; case PRISM2_PARAM_HOSTAPD: *param = local->hostapd; break; case PRISM2_PARAM_HOSTAPD_STA: *param = local->hostapd_sta; break; case PRISM2_PARAM_WPA: if (local->sta_fw_ver < PRISM2_FW_VER(1,7,0)) ret = -EOPNOTSUPP; *param = local->wpa; break; case PRISM2_PARAM_PRIVACY_INVOKED: *param = local->privacy_invoked; break; case PRISM2_PARAM_TKIP_COUNTERMEASURES: *param = local->tkip_countermeasures; break; case PRISM2_PARAM_DROP_UNENCRYPTED: *param = local->drop_unencrypted; break; case PRISM2_PARAM_SCAN_CHANNEL_MASK: *param = local->scan_channel_mask; break; default: printk(KERN_DEBUG "%s: get_prism2_param: unknown param %d\n", dev->name, *param); ret = -EOPNOTSUPP; break; } return ret; } static int prism2_ioctl_priv_readmif(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct hostap_interface *iface; local_info_t *local; u16 resp0; iface = netdev_priv(dev); local = iface->local; if (local->func->cmd(dev, HFA384X_CMDCODE_READMIF, *extra, NULL, &resp0)) return -EOPNOTSUPP; else *extra = resp0; return 0; } static int prism2_ioctl_priv_writemif(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct hostap_interface *iface; local_info_t *local; u16 cr, val; iface = netdev_priv(dev); local = iface->local; cr = *extra; val = *(extra + 1); if (local->func->cmd(dev, HFA384X_CMDCODE_WRITEMIF, cr, &val, NULL)) return -EOPNOTSUPP; return 0; } static int prism2_ioctl_priv_monitor(struct net_device *dev, int *i) { struct hostap_interface *iface; local_info_t *local; int ret = 0; union iwreq_data wrqu; iface = netdev_priv(dev); local = iface->local; printk(KERN_DEBUG "%s: process %d (%s) used deprecated iwpriv monitor " "- update software to use iwconfig mode monitor\n", dev->name, task_pid_nr(current), current->comm); /* Backward compatibility code - this can be removed at some point */ if (*i == 0) { /* Disable monitor mode - old mode was not saved, so go to * Master mode */ wrqu.mode = IW_MODE_MASTER; ret = prism2_ioctl_siwmode(dev, NULL, &wrqu, NULL); } else if (*i == 1) { /* netlink socket mode is not supported anymore since it did * not separate different devices from each other and was not * best method for delivering large amount of packets to * user space */ ret = -EOPNOTSUPP; } else if (*i == 2 || *i == 3) { switch (*i) { case 2: local->monitor_type = PRISM2_MONITOR_80211; break; case 3: local->monitor_type = PRISM2_MONITOR_PRISM; break; } wrqu.mode = IW_MODE_MONITOR; ret = prism2_ioctl_siwmode(dev, NULL, &wrqu, NULL); hostap_monitor_mode_enable(local); } else ret = -EINVAL; return ret; } static int prism2_ioctl_priv_reset(struct net_device *dev, int *i) { struct hostap_interface *iface; local_info_t *local; iface = netdev_priv(dev); local = iface->local; printk(KERN_DEBUG "%s: manual reset request(%d)\n", dev->name, *i); switch (*i) { case 0: /* Disable and enable card */ local->func->hw_shutdown(dev, 1); local->func->hw_config(dev, 0); break; case 1: /* COR sreset */ local->func->hw_reset(dev); break; case 2: /* Disable and enable port 0 */ local->func->reset_port(dev); break; case 3: prism2_sta_deauth(local, WLAN_REASON_DEAUTH_LEAVING); if (local->func->cmd(dev, HFA384X_CMDCODE_DISABLE, 0, NULL, NULL)) return -EINVAL; break; case 4: if (local->func->cmd(dev, HFA384X_CMDCODE_ENABLE, 0, NULL, NULL)) return -EINVAL; break; default: printk(KERN_DEBUG "Unknown reset request %d\n", *i); return -EOPNOTSUPP; } return 0; } static int prism2_ioctl_priv_set_rid_word(struct net_device *dev, int *i) { int rid = *i; int value = *(i + 1); printk(KERN_DEBUG "%s: Set RID[0x%X] = %d\n", dev->name, rid, value); if (hostap_set_word(dev, rid, value)) return -EINVAL; return 0; } #ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT static int ap_mac_cmd_ioctl(local_info_t *local, int *cmd) { int ret = 0; switch (*cmd) { case AP_MAC_CMD_POLICY_OPEN: local->ap->mac_restrictions.policy = MAC_POLICY_OPEN; break; case AP_MAC_CMD_POLICY_ALLOW: local->ap->mac_restrictions.policy = MAC_POLICY_ALLOW; break; case AP_MAC_CMD_POLICY_DENY: local->ap->mac_restrictions.policy = MAC_POLICY_DENY; break; case AP_MAC_CMD_FLUSH: ap_control_flush_macs(&local->ap->mac_restrictions); break; case AP_MAC_CMD_KICKALL: ap_control_kickall(local->ap); hostap_deauth_all_stas(local->dev, local->ap, 0); break; default: ret = -EOPNOTSUPP; break; } return ret; } #endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ #ifdef PRISM2_DOWNLOAD_SUPPORT static int prism2_ioctl_priv_download(local_info_t *local, struct iw_point *p) { struct prism2_download_param *param; int ret = 0; if (p->length < sizeof(struct prism2_download_param) || p->length > 1024 || !p->pointer) return -EINVAL; param = memdup_user(p->pointer, p->length); if (IS_ERR(param)) { return PTR_ERR(param); } if (p->length < sizeof(struct prism2_download_param) + param->num_areas * sizeof(struct prism2_download_area)) { ret = -EINVAL; goto out; } ret = local->func->download(local, param); out: kfree(param); return ret; } #endif /* PRISM2_DOWNLOAD_SUPPORT */ static int prism2_set_genericelement(struct net_device *dev, u8 *elem, size_t len) { struct hostap_interface *iface = netdev_priv(dev); local_info_t *local = iface->local; u8 *buf; /* * Add 16-bit length in the beginning of the buffer because Prism2 RID * includes it. */ buf = kmalloc(len + 2, GFP_KERNEL); if (buf == NULL) return -ENOMEM; *((__le16 *) buf) = cpu_to_le16(len); memcpy(buf + 2, elem, len); kfree(local->generic_elem); local->generic_elem = buf; local->generic_elem_len = len + 2; return local->func->set_rid(local->dev, HFA384X_RID_GENERICELEMENT, buf, len + 2); } static int prism2_ioctl_siwauth(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct iw_param *data = &wrqu->param; struct hostap_interface *iface = netdev_priv(dev); local_info_t *local = iface->local; switch (data->flags & IW_AUTH_INDEX) { case IW_AUTH_WPA_VERSION: case IW_AUTH_CIPHER_PAIRWISE: case IW_AUTH_CIPHER_GROUP: case IW_AUTH_KEY_MGMT: /* * Host AP driver does not use these parameters and allows * wpa_supplicant to control them internally. */ break; case IW_AUTH_TKIP_COUNTERMEASURES: local->tkip_countermeasures = data->value; break; case IW_AUTH_DROP_UNENCRYPTED: local->drop_unencrypted = data->value; break; case IW_AUTH_80211_AUTH_ALG: local->auth_algs = data->value; break; case IW_AUTH_WPA_ENABLED: if (data->value == 0) { local->wpa = 0; if (local->sta_fw_ver < PRISM2_FW_VER(1,7,0)) break; prism2_set_genericelement(dev, "", 0); local->host_roaming = 0; local->privacy_invoked = 0; if (hostap_set_word(dev, HFA384X_RID_SSNHANDLINGMODE, 0) || hostap_set_roaming(local) || hostap_set_encryption(local) || local->func->reset_port(dev)) return -EINVAL; break; } if (local->sta_fw_ver < PRISM2_FW_VER(1,7,0)) return -EOPNOTSUPP; local->host_roaming = 2; local->privacy_invoked = 1; local->wpa = 1; if (hostap_set_word(dev, HFA384X_RID_SSNHANDLINGMODE, 1) || hostap_set_roaming(local) || hostap_set_encryption(local) || local->func->reset_port(dev)) return -EINVAL; break; case IW_AUTH_RX_UNENCRYPTED_EAPOL: local->ieee_802_1x = data->value; break; case IW_AUTH_PRIVACY_INVOKED: local->privacy_invoked = data->value; break; default: return -EOPNOTSUPP; } return 0; } static int prism2_ioctl_giwauth(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct iw_param *data = &wrqu->param; struct hostap_interface *iface = netdev_priv(dev); local_info_t *local = iface->local; switch (data->flags & IW_AUTH_INDEX) { case IW_AUTH_WPA_VERSION: case IW_AUTH_CIPHER_PAIRWISE: case IW_AUTH_CIPHER_GROUP: case IW_AUTH_KEY_MGMT: /* * Host AP driver does not use these parameters and allows * wpa_supplicant to control them internally. */ return -EOPNOTSUPP; case IW_AUTH_TKIP_COUNTERMEASURES: data->value = local->tkip_countermeasures; break; case IW_AUTH_DROP_UNENCRYPTED: data->value = local->drop_unencrypted; break; case IW_AUTH_80211_AUTH_ALG: data->value = local->auth_algs; break; case IW_AUTH_WPA_ENABLED: data->value = local->wpa; break; case IW_AUTH_RX_UNENCRYPTED_EAPOL: data->value = local->ieee_802_1x; break; default: return -EOPNOTSUPP; } return 0; } static int prism2_ioctl_siwencodeext(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct iw_point *erq = &wrqu->encoding; struct hostap_interface *iface = netdev_priv(dev); local_info_t *local = iface->local; struct iw_encode_ext *ext = (struct iw_encode_ext *) extra; int i, ret = 0; struct lib80211_crypto_ops *ops; struct lib80211_crypt_data **crypt; void *sta_ptr; u8 *addr; const char *alg, *module; i = erq->flags & IW_ENCODE_INDEX; if (i > WEP_KEYS) return -EINVAL; if (i < 1 || i > WEP_KEYS) i = local->crypt_info.tx_keyidx; else i--; if (i < 0 || i >= WEP_KEYS) return -EINVAL; addr = ext->addr.sa_data; if (is_broadcast_ether_addr(addr)) { sta_ptr = NULL; crypt = &local->crypt_info.crypt[i]; } else { if (i != 0) return -EINVAL; sta_ptr = ap_crypt_get_ptrs(local->ap, addr, 0, &crypt); if (sta_ptr == NULL) { if (local->iw_mode == IW_MODE_INFRA) { /* * TODO: add STA entry for the current AP so * that unicast key can be used. For now, this * is emulated by using default key idx 0. */ i = 0; crypt = &local->crypt_info.crypt[i]; } else return -EINVAL; } } if ((erq->flags & IW_ENCODE_DISABLED) || ext->alg == IW_ENCODE_ALG_NONE) { if (*crypt) lib80211_crypt_delayed_deinit(&local->crypt_info, crypt); goto done; } switch (ext->alg) { case IW_ENCODE_ALG_WEP: alg = "WEP"; module = "lib80211_crypt_wep"; break; case IW_ENCODE_ALG_TKIP: alg = "TKIP"; module = "lib80211_crypt_tkip"; break; case IW_ENCODE_ALG_CCMP: alg = "CCMP"; module = "lib80211_crypt_ccmp"; break; default: printk(KERN_DEBUG "%s: unsupported algorithm %d\n", local->dev->name, ext->alg); ret = -EOPNOTSUPP; goto done; } ops = lib80211_get_crypto_ops(alg); if (ops == NULL) { request_module(module); ops = lib80211_get_crypto_ops(alg); } if (ops == NULL) { printk(KERN_DEBUG "%s: unknown crypto alg '%s'\n", local->dev->name, alg); ret = -EOPNOTSUPP; goto done; } if (sta_ptr || ext->alg != IW_ENCODE_ALG_WEP) { /* * Per station encryption and other than WEP algorithms * require host-based encryption, so force them on * automatically. */ local->host_decrypt = local->host_encrypt = 1; } if (*crypt == NULL || (*crypt)->ops != ops) { struct lib80211_crypt_data *new_crypt; lib80211_crypt_delayed_deinit(&local->crypt_info, crypt); new_crypt = kzalloc(sizeof(struct lib80211_crypt_data), GFP_KERNEL); if (new_crypt == NULL) { ret = -ENOMEM; goto done; } new_crypt->ops = ops; if (new_crypt->ops && try_module_get(new_crypt->ops->owner)) new_crypt->priv = new_crypt->ops->init(i); if (new_crypt->priv == NULL) { kfree(new_crypt); ret = -EINVAL; goto done; } *crypt = new_crypt; } /* * TODO: if ext_flags does not have IW_ENCODE_EXT_RX_SEQ_VALID, the * existing seq# should not be changed. * TODO: if ext_flags has IW_ENCODE_EXT_TX_SEQ_VALID, next TX seq# * should be changed to something else than zero. */ if ((!(ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) || ext->key_len > 0) && (*crypt)->ops->set_key && (*crypt)->ops->set_key(ext->key, ext->key_len, ext->rx_seq, (*crypt)->priv) < 0) { printk(KERN_DEBUG "%s: key setting failed\n", local->dev->name); ret = -EINVAL; goto done; } if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) { if (!sta_ptr) local->crypt_info.tx_keyidx = i; } if (sta_ptr == NULL && ext->key_len > 0) { int first = 1, j; for (j = 0; j < WEP_KEYS; j++) { if (j != i && local->crypt_info.crypt[j]) { first = 0; break; } } if (first) local->crypt_info.tx_keyidx = i; } done: if (sta_ptr) hostap_handle_sta_release(sta_ptr); local->open_wep = erq->flags & IW_ENCODE_OPEN; /* * Do not reset port0 if card is in Managed mode since resetting will * generate new IEEE 802.11 authentication which may end up in looping * with IEEE 802.1X. Prism2 documentation seem to require port reset * after WEP configuration. However, keys are apparently changed at * least in Managed mode. */ if (ret == 0 && (hostap_set_encryption(local) || (local->iw_mode != IW_MODE_INFRA && local->func->reset_port(local->dev)))) ret = -EINVAL; return ret; } static int prism2_ioctl_giwencodeext(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct iw_point *erq = &wrqu->encoding; struct hostap_interface *iface = netdev_priv(dev); local_info_t *local = iface->local; struct lib80211_crypt_data **crypt; void *sta_ptr; int max_key_len, i; struct iw_encode_ext *ext = (struct iw_encode_ext *) extra; u8 *addr; max_key_len = erq->length - sizeof(*ext); if (max_key_len < 0) return -EINVAL; i = erq->flags & IW_ENCODE_INDEX; if (i < 1 || i > WEP_KEYS) i = local->crypt_info.tx_keyidx; else i--; addr = ext->addr.sa_data; if (is_broadcast_ether_addr(addr)) { sta_ptr = NULL; crypt = &local->crypt_info.crypt[i]; } else { i = 0; sta_ptr = ap_crypt_get_ptrs(local->ap, addr, 0, &crypt); if (sta_ptr == NULL) return -EINVAL; } erq->flags = i + 1; memset(ext, 0, sizeof(*ext)); if (*crypt == NULL || (*crypt)->ops == NULL) { ext->alg = IW_ENCODE_ALG_NONE; ext->key_len = 0; erq->flags |= IW_ENCODE_DISABLED; } else { if (strcmp((*crypt)->ops->name, "WEP") == 0) ext->alg = IW_ENCODE_ALG_WEP; else if (strcmp((*crypt)->ops->name, "TKIP") == 0) ext->alg = IW_ENCODE_ALG_TKIP; else if (strcmp((*crypt)->ops->name, "CCMP") == 0) ext->alg = IW_ENCODE_ALG_CCMP; else return -EINVAL; if ((*crypt)->ops->get_key) { ext->key_len = (*crypt)->ops->get_key(ext->key, max_key_len, ext->tx_seq, (*crypt)->priv); if (ext->key_len && (ext->alg == IW_ENCODE_ALG_TKIP || ext->alg == IW_ENCODE_ALG_CCMP)) ext->ext_flags |= IW_ENCODE_EXT_TX_SEQ_VALID; } } if (sta_ptr) hostap_handle_sta_release(sta_ptr); return 0; } static int prism2_ioctl_set_encryption(local_info_t *local, struct prism2_hostapd_param *param, int param_len) { int ret = 0; struct lib80211_crypto_ops *ops; struct lib80211_crypt_data **crypt; void *sta_ptr; param->u.crypt.err = 0; param->u.crypt.alg[HOSTAP_CRYPT_ALG_NAME_LEN - 1] = '\0'; if (param_len != (int) ((char *) param->u.crypt.key - (char *) param) + param->u.crypt.key_len) return -EINVAL; if (is_broadcast_ether_addr(param->sta_addr)) { if (param->u.crypt.idx >= WEP_KEYS) return -EINVAL; sta_ptr = NULL; crypt = &local->crypt_info.crypt[param->u.crypt.idx]; } else { if (param->u.crypt.idx) return -EINVAL; sta_ptr = ap_crypt_get_ptrs( local->ap, param->sta_addr, (param->u.crypt.flags & HOSTAP_CRYPT_FLAG_PERMANENT), &crypt); if (sta_ptr == NULL) { param->u.crypt.err = HOSTAP_CRYPT_ERR_UNKNOWN_ADDR; return -EINVAL; } } if (strcmp(param->u.crypt.alg, "none") == 0) { if (crypt) lib80211_crypt_delayed_deinit(&local->crypt_info, crypt); goto done; } ops = lib80211_get_crypto_ops(param->u.crypt.alg); if (ops == NULL && strcmp(param->u.crypt.alg, "WEP") == 0) { request_module("lib80211_crypt_wep"); ops = lib80211_get_crypto_ops(param->u.crypt.alg); } else if (ops == NULL && strcmp(param->u.crypt.alg, "TKIP") == 0) { request_module("lib80211_crypt_tkip"); ops = lib80211_get_crypto_ops(param->u.crypt.alg); } else if (ops == NULL && strcmp(param->u.crypt.alg, "CCMP") == 0) { request_module("lib80211_crypt_ccmp"); ops = lib80211_get_crypto_ops(param->u.crypt.alg); } if (ops == NULL) { printk(KERN_DEBUG "%s: unknown crypto alg '%s'\n", local->dev->name, param->u.crypt.alg); param->u.crypt.err = HOSTAP_CRYPT_ERR_UNKNOWN_ALG; ret = -EINVAL; goto done; } /* station based encryption and other than WEP algorithms require * host-based encryption, so force them on automatically */ local->host_decrypt = local->host_encrypt = 1; if (*crypt == NULL || (*crypt)->ops != ops) { struct lib80211_crypt_data *new_crypt; lib80211_crypt_delayed_deinit(&local->crypt_info, crypt); new_crypt = kzalloc(sizeof(struct lib80211_crypt_data), GFP_KERNEL); if (new_crypt == NULL) { ret = -ENOMEM; goto done; } new_crypt->ops = ops; new_crypt->priv = new_crypt->ops->init(param->u.crypt.idx); if (new_crypt->priv == NULL) { kfree(new_crypt); param->u.crypt.err = HOSTAP_CRYPT_ERR_CRYPT_INIT_FAILED; ret = -EINVAL; goto done; } *crypt = new_crypt; } if ((!(param->u.crypt.flags & HOSTAP_CRYPT_FLAG_SET_TX_KEY) || param->u.crypt.key_len > 0) && (*crypt)->ops->set_key && (*crypt)->ops->set_key(param->u.crypt.key, param->u.crypt.key_len, param->u.crypt.seq, (*crypt)->priv) < 0) { printk(KERN_DEBUG "%s: key setting failed\n", local->dev->name); param->u.crypt.err = HOSTAP_CRYPT_ERR_KEY_SET_FAILED; ret = -EINVAL; goto done; } if (param->u.crypt.flags & HOSTAP_CRYPT_FLAG_SET_TX_KEY) { if (!sta_ptr) local->crypt_info.tx_keyidx = param->u.crypt.idx; else if (param->u.crypt.idx) { printk(KERN_DEBUG "%s: TX key idx setting failed\n", local->dev->name); param->u.crypt.err = HOSTAP_CRYPT_ERR_TX_KEY_SET_FAILED; ret = -EINVAL; goto done; } } done: if (sta_ptr) hostap_handle_sta_release(sta_ptr); /* Do not reset port0 if card is in Managed mode since resetting will * generate new IEEE 802.11 authentication which may end up in looping * with IEEE 802.1X. Prism2 documentation seem to require port reset * after WEP configuration. However, keys are apparently changed at * least in Managed mode. */ if (ret == 0 && (hostap_set_encryption(local) || (local->iw_mode != IW_MODE_INFRA && local->func->reset_port(local->dev)))) { param->u.crypt.err = HOSTAP_CRYPT_ERR_CARD_CONF_FAILED; return -EINVAL; } return ret; } static int prism2_ioctl_get_encryption(local_info_t *local, struct prism2_hostapd_param *param, int param_len) { struct lib80211_crypt_data **crypt; void *sta_ptr; int max_key_len; param->u.crypt.err = 0; max_key_len = param_len - (int) ((char *) param->u.crypt.key - (char *) param); if (max_key_len < 0) return -EINVAL; if (is_broadcast_ether_addr(param->sta_addr)) { sta_ptr = NULL; if (param->u.crypt.idx >= WEP_KEYS) param->u.crypt.idx = local->crypt_info.tx_keyidx; crypt = &local->crypt_info.crypt[param->u.crypt.idx]; } else { param->u.crypt.idx = 0; sta_ptr = ap_crypt_get_ptrs(local->ap, param->sta_addr, 0, &crypt); if (sta_ptr == NULL) { param->u.crypt.err = HOSTAP_CRYPT_ERR_UNKNOWN_ADDR; return -EINVAL; } } if (*crypt == NULL || (*crypt)->ops == NULL) { memcpy(param->u.crypt.alg, "none", 5); param->u.crypt.key_len = 0; param->u.crypt.idx = 0xff; } else { strscpy(param->u.crypt.alg, (*crypt)->ops->name, HOSTAP_CRYPT_ALG_NAME_LEN); param->u.crypt.key_len = 0; memset(param->u.crypt.seq, 0, 8); if ((*crypt)->ops->get_key) { param->u.crypt.key_len = (*crypt)->ops->get_key(param->u.crypt.key, max_key_len, param->u.crypt.seq, (*crypt)->priv); } } if (sta_ptr) hostap_handle_sta_release(sta_ptr); return 0; } static int prism2_ioctl_get_rid(local_info_t *local, struct prism2_hostapd_param *param, int param_len) { int max_len, res; max_len = param_len - PRISM2_HOSTAPD_RID_HDR_LEN; if (max_len < 0) return -EINVAL; res = local->func->get_rid(local->dev, param->u.rid.rid, param->u.rid.data, param->u.rid.len, 0); if (res >= 0) { param->u.rid.len = res; return 0; } return res; } static int prism2_ioctl_set_rid(local_info_t *local, struct prism2_hostapd_param *param, int param_len) { int max_len; max_len = param_len - PRISM2_HOSTAPD_RID_HDR_LEN; if (max_len < 0 || max_len < param->u.rid.len) return -EINVAL; return local->func->set_rid(local->dev, param->u.rid.rid, param->u.rid.data, param->u.rid.len); } static int prism2_ioctl_set_assoc_ap_addr(local_info_t *local, struct prism2_hostapd_param *param, int param_len) { printk(KERN_DEBUG "%ssta: associated as client with AP %pM\n", local->dev->name, param->sta_addr); memcpy(local->assoc_ap_addr, param->sta_addr, ETH_ALEN); return 0; } static int prism2_ioctl_siwgenie(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct iw_point *data = &wrqu->data; return prism2_set_genericelement(dev, extra, data->length); } static int prism2_ioctl_giwgenie(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct iw_point *data = &wrqu->data; struct hostap_interface *iface = netdev_priv(dev); local_info_t *local = iface->local; int len = local->generic_elem_len - 2; if (len <= 0 || local->generic_elem == NULL) { data->length = 0; return 0; } if (data->length < len) return -E2BIG; data->length = len; memcpy(extra, local->generic_elem + 2, len); return 0; } static int prism2_ioctl_set_generic_element(local_info_t *local, struct prism2_hostapd_param *param, int param_len) { int max_len, len; len = param->u.generic_elem.len; max_len = param_len - PRISM2_HOSTAPD_GENERIC_ELEMENT_HDR_LEN; if (max_len < 0 || max_len < len) return -EINVAL; return prism2_set_genericelement(local->dev, param->u.generic_elem.data, len); } static int prism2_ioctl_siwmlme(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct hostap_interface *iface = netdev_priv(dev); local_info_t *local = iface->local; struct iw_mlme *mlme = (struct iw_mlme *) extra; __le16 reason; reason = cpu_to_le16(mlme->reason_code); switch (mlme->cmd) { case IW_MLME_DEAUTH: return prism2_sta_send_mgmt(local, mlme->addr.sa_data, IEEE80211_STYPE_DEAUTH, (u8 *) &reason, 2); case IW_MLME_DISASSOC: return prism2_sta_send_mgmt(local, mlme->addr.sa_data, IEEE80211_STYPE_DISASSOC, (u8 *) &reason, 2); default: return -EOPNOTSUPP; } } static int prism2_ioctl_mlme(local_info_t *local, struct prism2_hostapd_param *param) { __le16 reason; reason = cpu_to_le16(param->u.mlme.reason_code); switch (param->u.mlme.cmd) { case MLME_STA_DEAUTH: return prism2_sta_send_mgmt(local, param->sta_addr, IEEE80211_STYPE_DEAUTH, (u8 *) &reason, 2); case MLME_STA_DISASSOC: return prism2_sta_send_mgmt(local, param->sta_addr, IEEE80211_STYPE_DISASSOC, (u8 *) &reason, 2); default: return -EOPNOTSUPP; } } static int prism2_ioctl_scan_req(local_info_t *local, struct prism2_hostapd_param *param) { #ifndef PRISM2_NO_STATION_MODES if ((local->iw_mode != IW_MODE_INFRA && local->iw_mode != IW_MODE_ADHOC) || (local->sta_fw_ver < PRISM2_FW_VER(1,3,1))) return -EOPNOTSUPP; if (!local->dev_enabled) return -ENETDOWN; return prism2_request_hostscan(local->dev, param->u.scan_req.ssid, param->u.scan_req.ssid_len); #else /* PRISM2_NO_STATION_MODES */ return -EOPNOTSUPP; #endif /* PRISM2_NO_STATION_MODES */ } static int prism2_ioctl_priv_hostapd(local_info_t *local, struct iw_point *p) { struct prism2_hostapd_param *param; int ret = 0; int ap_ioctl = 0; if (p->length < sizeof(struct prism2_hostapd_param) || p->length > PRISM2_HOSTAPD_MAX_BUF_SIZE || !p->pointer) return -EINVAL; param = memdup_user(p->pointer, p->length); if (IS_ERR(param)) { return PTR_ERR(param); } switch (param->cmd) { case PRISM2_SET_ENCRYPTION: ret = prism2_ioctl_set_encryption(local, param, p->length); break; case PRISM2_GET_ENCRYPTION: ret = prism2_ioctl_get_encryption(local, param, p->length); break; case PRISM2_HOSTAPD_GET_RID: ret = prism2_ioctl_get_rid(local, param, p->length); break; case PRISM2_HOSTAPD_SET_RID: ret = prism2_ioctl_set_rid(local, param, p->length); break; case PRISM2_HOSTAPD_SET_ASSOC_AP_ADDR: ret = prism2_ioctl_set_assoc_ap_addr(local, param, p->length); break; case PRISM2_HOSTAPD_SET_GENERIC_ELEMENT: ret = prism2_ioctl_set_generic_element(local, param, p->length); break; case PRISM2_HOSTAPD_MLME: ret = prism2_ioctl_mlme(local, param); break; case PRISM2_HOSTAPD_SCAN_REQ: ret = prism2_ioctl_scan_req(local, param); break; default: ret = prism2_hostapd(local->ap, param); ap_ioctl = 1; break; } if (ret == 1 || !ap_ioctl) { if (copy_to_user(p->pointer, param, p->length)) { ret = -EFAULT; goto out; } else if (ap_ioctl) ret = 0; } out: kfree(param); return ret; } static void prism2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct hostap_interface *iface; local_info_t *local; iface = netdev_priv(dev); local = iface->local; strscpy(info->driver, "hostap", sizeof(info->driver)); snprintf(info->fw_version, sizeof(info->fw_version), "%d.%d.%d", (local->sta_fw_ver >> 16) & 0xff, (local->sta_fw_ver >> 8) & 0xff, local->sta_fw_ver & 0xff); } const struct ethtool_ops prism2_ethtool_ops = { .get_drvinfo = prism2_get_drvinfo }; /* Structures to export the Wireless Handlers */ static const iw_handler prism2_handler[] = { IW_HANDLER(SIOCGIWNAME, prism2_get_name), IW_HANDLER(SIOCSIWFREQ, prism2_ioctl_siwfreq), IW_HANDLER(SIOCGIWFREQ, prism2_ioctl_giwfreq), IW_HANDLER(SIOCSIWMODE, prism2_ioctl_siwmode), IW_HANDLER(SIOCGIWMODE, prism2_ioctl_giwmode), IW_HANDLER(SIOCSIWSENS, prism2_ioctl_siwsens), IW_HANDLER(SIOCGIWSENS, prism2_ioctl_giwsens), IW_HANDLER(SIOCGIWRANGE, prism2_ioctl_giwrange), IW_HANDLER(SIOCSIWSPY, iw_handler_set_spy), IW_HANDLER(SIOCGIWSPY, iw_handler_get_spy), IW_HANDLER(SIOCSIWTHRSPY, iw_handler_set_thrspy), IW_HANDLER(SIOCGIWTHRSPY, iw_handler_get_thrspy), IW_HANDLER(SIOCSIWAP, prism2_ioctl_siwap), IW_HANDLER(SIOCGIWAP, prism2_ioctl_giwap), IW_HANDLER(SIOCSIWMLME, prism2_ioctl_siwmlme), IW_HANDLER(SIOCGIWAPLIST, prism2_ioctl_giwaplist), IW_HANDLER(SIOCSIWSCAN, prism2_ioctl_siwscan), IW_HANDLER(SIOCGIWSCAN, prism2_ioctl_giwscan), IW_HANDLER(SIOCSIWESSID, prism2_ioctl_siwessid), IW_HANDLER(SIOCGIWESSID, prism2_ioctl_giwessid), IW_HANDLER(SIOCSIWNICKN, prism2_ioctl_siwnickn), IW_HANDLER(SIOCGIWNICKN, prism2_ioctl_giwnickn), IW_HANDLER(SIOCSIWRATE, prism2_ioctl_siwrate), IW_HANDLER(SIOCGIWRATE, prism2_ioctl_giwrate), IW_HANDLER(SIOCSIWRTS, prism2_ioctl_siwrts), IW_HANDLER(SIOCGIWRTS, prism2_ioctl_giwrts), IW_HANDLER(SIOCSIWFRAG, prism2_ioctl_siwfrag), IW_HANDLER(SIOCGIWFRAG, prism2_ioctl_giwfrag), IW_HANDLER(SIOCSIWTXPOW, prism2_ioctl_siwtxpow), IW_HANDLER(SIOCGIWTXPOW, prism2_ioctl_giwtxpow), IW_HANDLER(SIOCSIWRETRY, prism2_ioctl_siwretry), IW_HANDLER(SIOCGIWRETRY, prism2_ioctl_giwretry), IW_HANDLER(SIOCSIWENCODE, prism2_ioctl_siwencode), IW_HANDLER(SIOCGIWENCODE, prism2_ioctl_giwencode), IW_HANDLER(SIOCSIWPOWER, prism2_ioctl_siwpower), IW_HANDLER(SIOCGIWPOWER, prism2_ioctl_giwpower), IW_HANDLER(SIOCSIWGENIE, prism2_ioctl_siwgenie), IW_HANDLER(SIOCGIWGENIE, prism2_ioctl_giwgenie), IW_HANDLER(SIOCSIWAUTH, prism2_ioctl_siwauth), IW_HANDLER(SIOCGIWAUTH, prism2_ioctl_giwauth), IW_HANDLER(SIOCSIWENCODEEXT, prism2_ioctl_siwencodeext), IW_HANDLER(SIOCGIWENCODEEXT, prism2_ioctl_giwencodeext), }; static const iw_handler prism2_private_handler[] = { /* SIOCIWFIRSTPRIV + */ prism2_ioctl_priv_prism2_param, /* 0 */ prism2_ioctl_priv_get_prism2_param, /* 1 */ prism2_ioctl_priv_writemif, /* 2 */ prism2_ioctl_priv_readmif, /* 3 */ }; const struct iw_handler_def hostap_iw_handler_def = { .num_standard = ARRAY_SIZE(prism2_handler), .num_private = ARRAY_SIZE(prism2_private_handler), .num_private_args = ARRAY_SIZE(prism2_priv), .standard = prism2_handler, .private = prism2_private_handler, .private_args = (struct iw_priv_args *) prism2_priv, .get_wireless_stats = hostap_get_wireless_stats, }; /* Private ioctls (iwpriv) that have not yet been converted * into new wireless extensions API */ int hostap_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct iwreq *wrq = (struct iwreq *) ifr; struct hostap_interface *iface; local_info_t *local; int ret = 0; iface = netdev_priv(dev); local = iface->local; switch (cmd) { case PRISM2_IOCTL_INQUIRE: if (!capable(CAP_NET_ADMIN)) ret = -EPERM; else ret = prism2_ioctl_priv_inquire(dev, (int *) wrq->u.name); break; case PRISM2_IOCTL_MONITOR: if (!capable(CAP_NET_ADMIN)) ret = -EPERM; else ret = prism2_ioctl_priv_monitor(dev, (int *) wrq->u.name); break; case PRISM2_IOCTL_RESET: if (!capable(CAP_NET_ADMIN)) ret = -EPERM; else ret = prism2_ioctl_priv_reset(dev, (int *) wrq->u.name); break; case PRISM2_IOCTL_WDS_ADD: if (!capable(CAP_NET_ADMIN)) ret = -EPERM; else ret = prism2_wds_add(local, wrq->u.ap_addr.sa_data, 1); break; case PRISM2_IOCTL_WDS_DEL: if (!capable(CAP_NET_ADMIN)) ret = -EPERM; else ret = prism2_wds_del(local, wrq->u.ap_addr.sa_data, 1, 0); break; case PRISM2_IOCTL_SET_RID_WORD: if (!capable(CAP_NET_ADMIN)) ret = -EPERM; else ret = prism2_ioctl_priv_set_rid_word(dev, (int *) wrq->u.name); break; #ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT case PRISM2_IOCTL_MACCMD: if (!capable(CAP_NET_ADMIN)) ret = -EPERM; else ret = ap_mac_cmd_ioctl(local, (int *) wrq->u.name); break; case PRISM2_IOCTL_ADDMAC: if (!capable(CAP_NET_ADMIN)) ret = -EPERM; else ret = ap_control_add_mac(&local->ap->mac_restrictions, wrq->u.ap_addr.sa_data); break; case PRISM2_IOCTL_DELMAC: if (!capable(CAP_NET_ADMIN)) ret = -EPERM; else ret = ap_control_del_mac(&local->ap->mac_restrictions, wrq->u.ap_addr.sa_data); break; case PRISM2_IOCTL_KICKMAC: if (!capable(CAP_NET_ADMIN)) ret = -EPERM; else ret = ap_control_kick_mac(local->ap, local->dev, wrq->u.ap_addr.sa_data); break; #endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ default: ret = -EOPNOTSUPP; break; } return ret; } /* Private ioctls that are not used with iwpriv; * in SIOCDEVPRIVATE range */ int hostap_siocdevprivate(struct net_device *dev, struct ifreq *ifr, void __user *data, int cmd) { struct iwreq *wrq = (struct iwreq *)ifr; struct hostap_interface *iface; local_info_t *local; int ret = 0; iface = netdev_priv(dev); local = iface->local; if (in_compat_syscall()) /* not implemented yet */ return -EOPNOTSUPP; switch (cmd) { #ifdef PRISM2_DOWNLOAD_SUPPORT case PRISM2_IOCTL_DOWNLOAD: if (!capable(CAP_NET_ADMIN)) ret = -EPERM; else ret = prism2_ioctl_priv_download(local, &wrq->u.data); break; #endif /* PRISM2_DOWNLOAD_SUPPORT */ case PRISM2_IOCTL_HOSTAPD: if (!capable(CAP_NET_ADMIN)) ret = -EPERM; else ret = prism2_ioctl_priv_hostapd(local, &wrq->u.data); break; default: ret = -EOPNOTSUPP; break; } return ret; }
linux-master
drivers/net/wireless/intersil/hostap/hostap_ioctl.c
// SPDX-License-Identifier: GPL-2.0 /* * Intersil Prism2 driver with Host AP (software access point) support * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen * <[email protected]> * Copyright (c) 2002-2005, Jouni Malinen <[email protected]> * * This file is to be included into hostap.c when S/W AP functionality is * compiled. * * AP: FIX: * - if unicast Class 2 (assoc,reassoc,disassoc) frame received from * unauthenticated STA, send deauth. frame (8802.11: 5.5) * - if unicast Class 3 (data with to/from DS,deauth,pspoll) frame received * from authenticated, but unassoc STA, send disassoc frame (8802.11: 5.5) * - if unicast Class 3 received from unauthenticated STA, send deauth. frame * (8802.11: 5.5) */ #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/delay.h> #include <linux/random.h> #include <linux/if_arp.h> #include <linux/slab.h> #include <linux/export.h> #include <linux/moduleparam.h> #include <linux/etherdevice.h> #include "hostap_wlan.h" #include "hostap.h" #include "hostap_ap.h" static int other_ap_policy[MAX_PARM_DEVICES] = { AP_OTHER_AP_SKIP_ALL, DEF_INTS }; module_param_array(other_ap_policy, int, NULL, 0444); MODULE_PARM_DESC(other_ap_policy, "Other AP beacon monitoring policy (0-3)"); static int ap_max_inactivity[MAX_PARM_DEVICES] = { AP_MAX_INACTIVITY_SEC, DEF_INTS }; module_param_array(ap_max_inactivity, int, NULL, 0444); MODULE_PARM_DESC(ap_max_inactivity, "AP timeout (in seconds) for station " "inactivity"); static int ap_bridge_packets[MAX_PARM_DEVICES] = { 1, DEF_INTS }; module_param_array(ap_bridge_packets, int, NULL, 0444); MODULE_PARM_DESC(ap_bridge_packets, "Bridge packets directly between " "stations"); static int autom_ap_wds[MAX_PARM_DEVICES] = { 0, DEF_INTS }; module_param_array(autom_ap_wds, int, NULL, 0444); MODULE_PARM_DESC(autom_ap_wds, "Add WDS connections to other APs " "automatically"); static struct sta_info* ap_get_sta(struct ap_data *ap, u8 *sta); static void hostap_event_expired_sta(struct net_device *dev, struct sta_info *sta); static void handle_add_proc_queue(struct work_struct *work); #ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT static void handle_wds_oper_queue(struct work_struct *work); static void prism2_send_mgmt(struct net_device *dev, u16 type_subtype, char *body, int body_len, u8 *addr, u16 tx_cb_idx); #endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ #if !defined(PRISM2_NO_PROCFS_DEBUG) && defined(CONFIG_PROC_FS) static int ap_debug_proc_show(struct seq_file *m, void *v) { struct ap_data *ap = pde_data(file_inode(m->file)); seq_printf(m, "BridgedUnicastFrames=%u\n", ap->bridged_unicast); seq_printf(m, "BridgedMulticastFrames=%u\n", ap->bridged_multicast); seq_printf(m, "max_inactivity=%u\n", ap->max_inactivity / HZ); seq_printf(m, "bridge_packets=%u\n", ap->bridge_packets); seq_printf(m, "nullfunc_ack=%u\n", ap->nullfunc_ack); seq_printf(m, "autom_ap_wds=%u\n", ap->autom_ap_wds); seq_printf(m, "auth_algs=%u\n", ap->local->auth_algs); seq_printf(m, "tx_drop_nonassoc=%u\n", ap->tx_drop_nonassoc); return 0; } #endif static void ap_sta_hash_add(struct ap_data *ap, struct sta_info *sta) { sta->hnext = ap->sta_hash[STA_HASH(sta->addr)]; ap->sta_hash[STA_HASH(sta->addr)] = sta; } static void ap_sta_hash_del(struct ap_data *ap, struct sta_info *sta) { struct sta_info *s; s = ap->sta_hash[STA_HASH(sta->addr)]; if (s == NULL) return; if (ether_addr_equal(s->addr, sta->addr)) { ap->sta_hash[STA_HASH(sta->addr)] = s->hnext; return; } while (s->hnext != NULL && !ether_addr_equal(s->hnext->addr, sta->addr)) s = s->hnext; if (s->hnext != NULL) s->hnext = s->hnext->hnext; else printk("AP: could not remove STA %pM from hash table\n", sta->addr); } static void ap_free_sta(struct ap_data *ap, struct sta_info *sta) { if (sta->ap && sta->local) hostap_event_expired_sta(sta->local->dev, sta); if (ap->proc != NULL) { char name[20]; sprintf(name, "%pM", sta->addr); remove_proc_entry(name, ap->proc); } if (sta->crypt) { sta->crypt->ops->deinit(sta->crypt->priv); kfree(sta->crypt); sta->crypt = NULL; } skb_queue_purge(&sta->tx_buf); ap->num_sta--; #ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT if (sta->aid > 0) ap->sta_aid[sta->aid - 1] = NULL; if (!sta->ap) kfree(sta->u.sta.challenge); timer_shutdown_sync(&sta->timer); #endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ kfree(sta); } static void hostap_set_tim(local_info_t *local, int aid, int set) { if (local->func->set_tim) local->func->set_tim(local->dev, aid, set); } static void hostap_event_new_sta(struct net_device *dev, struct sta_info *sta) { union iwreq_data wrqu; memset(&wrqu, 0, sizeof(wrqu)); memcpy(wrqu.addr.sa_data, sta->addr, ETH_ALEN); wrqu.addr.sa_family = ARPHRD_ETHER; wireless_send_event(dev, IWEVREGISTERED, &wrqu, NULL); } static void hostap_event_expired_sta(struct net_device *dev, struct sta_info *sta) { union iwreq_data wrqu; memset(&wrqu, 0, sizeof(wrqu)); memcpy(wrqu.addr.sa_data, sta->addr, ETH_ALEN); wrqu.addr.sa_family = ARPHRD_ETHER; wireless_send_event(dev, IWEVEXPIRED, &wrqu, NULL); } #ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT static void ap_handle_timer(struct timer_list *t) { struct sta_info *sta = from_timer(sta, t, timer); local_info_t *local; struct ap_data *ap; unsigned long next_time = 0; int was_assoc; if (sta == NULL || sta->local == NULL || sta->local->ap == NULL) { PDEBUG(DEBUG_AP, "ap_handle_timer() called with NULL data\n"); return; } local = sta->local; ap = local->ap; was_assoc = sta->flags & WLAN_STA_ASSOC; if (atomic_read(&sta->users) != 0) next_time = jiffies + HZ; else if ((sta->flags & WLAN_STA_PERM) && !(sta->flags & WLAN_STA_AUTH)) next_time = jiffies + ap->max_inactivity; if (time_before(jiffies, sta->last_rx + ap->max_inactivity)) { /* station activity detected; reset timeout state */ sta->timeout_next = STA_NULLFUNC; next_time = sta->last_rx + ap->max_inactivity; } else if (sta->timeout_next == STA_DISASSOC && !(sta->flags & WLAN_STA_PENDING_POLL)) { /* STA ACKed data nullfunc frame poll */ sta->timeout_next = STA_NULLFUNC; next_time = jiffies + ap->max_inactivity; } if (next_time) { sta->timer.expires = next_time; add_timer(&sta->timer); return; } if (sta->ap) sta->timeout_next = STA_DEAUTH; if (sta->timeout_next == STA_DEAUTH && !(sta->flags & WLAN_STA_PERM)) { spin_lock(&ap->sta_table_lock); ap_sta_hash_del(ap, sta); list_del(&sta->list); spin_unlock(&ap->sta_table_lock); sta->flags &= ~(WLAN_STA_AUTH | WLAN_STA_ASSOC); } else if (sta->timeout_next == STA_DISASSOC) sta->flags &= ~WLAN_STA_ASSOC; if (was_assoc && !(sta->flags & WLAN_STA_ASSOC) && !sta->ap) hostap_event_expired_sta(local->dev, sta); if (sta->timeout_next == STA_DEAUTH && sta->aid > 0 && !skb_queue_empty(&sta->tx_buf)) { hostap_set_tim(local, sta->aid, 0); sta->flags &= ~WLAN_STA_TIM; } if (sta->ap) { if (ap->autom_ap_wds) { PDEBUG(DEBUG_AP, "%s: removing automatic WDS " "connection to AP %pM\n", local->dev->name, sta->addr); hostap_wds_link_oper(local, sta->addr, WDS_DEL); } } else if (sta->timeout_next == STA_NULLFUNC) { /* send data frame to poll STA and check whether this frame * is ACKed */ /* FIX: IEEE80211_STYPE_NULLFUNC would be more appropriate, but * it is apparently not retried so TX Exc events are not * received for it */ sta->flags |= WLAN_STA_PENDING_POLL; prism2_send_mgmt(local->dev, IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA, NULL, 0, sta->addr, ap->tx_callback_poll); } else { int deauth = sta->timeout_next == STA_DEAUTH; __le16 resp; PDEBUG(DEBUG_AP, "%s: sending %s info to STA %pM" "(last=%lu, jiffies=%lu)\n", local->dev->name, deauth ? "deauthentication" : "disassociation", sta->addr, sta->last_rx, jiffies); resp = cpu_to_le16(deauth ? WLAN_REASON_PREV_AUTH_NOT_VALID : WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY); prism2_send_mgmt(local->dev, IEEE80211_FTYPE_MGMT | (deauth ? IEEE80211_STYPE_DEAUTH : IEEE80211_STYPE_DISASSOC), (char *) &resp, 2, sta->addr, 0); } if (sta->timeout_next == STA_DEAUTH) { if (sta->flags & WLAN_STA_PERM) { PDEBUG(DEBUG_AP, "%s: STA %pM" " would have been removed, " "but it has 'perm' flag\n", local->dev->name, sta->addr); } else ap_free_sta(ap, sta); return; } if (sta->timeout_next == STA_NULLFUNC) { sta->timeout_next = STA_DISASSOC; sta->timer.expires = jiffies + AP_DISASSOC_DELAY; } else { sta->timeout_next = STA_DEAUTH; sta->timer.expires = jiffies + AP_DEAUTH_DELAY; } add_timer(&sta->timer); } void hostap_deauth_all_stas(struct net_device *dev, struct ap_data *ap, int resend) { u8 addr[ETH_ALEN]; __le16 resp; int i; PDEBUG(DEBUG_AP, "%s: Deauthenticate all stations\n", dev->name); eth_broadcast_addr(addr); resp = cpu_to_le16(WLAN_REASON_PREV_AUTH_NOT_VALID); /* deauth message sent; try to resend it few times; the message is * broadcast, so it may be delayed until next DTIM; there is not much * else we can do at this point since the driver is going to be shut * down */ for (i = 0; i < 5; i++) { prism2_send_mgmt(dev, IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_DEAUTH, (char *) &resp, 2, addr, 0); if (!resend || ap->num_sta <= 0) return; mdelay(50); } } static int ap_control_proc_show(struct seq_file *m, void *v) { struct ap_data *ap = pde_data(file_inode(m->file)); char *policy_txt; struct mac_entry *entry; if (v == SEQ_START_TOKEN) { switch (ap->mac_restrictions.policy) { case MAC_POLICY_OPEN: policy_txt = "open"; break; case MAC_POLICY_ALLOW: policy_txt = "allow"; break; case MAC_POLICY_DENY: policy_txt = "deny"; break; default: policy_txt = "unknown"; break; } seq_printf(m, "MAC policy: %s\n", policy_txt); seq_printf(m, "MAC entries: %u\n", ap->mac_restrictions.entries); seq_puts(m, "MAC list:\n"); return 0; } entry = v; seq_printf(m, "%pM\n", entry->addr); return 0; } static void *ap_control_proc_start(struct seq_file *m, loff_t *_pos) { struct ap_data *ap = pde_data(file_inode(m->file)); spin_lock_bh(&ap->mac_restrictions.lock); return seq_list_start_head(&ap->mac_restrictions.mac_list, *_pos); } static void *ap_control_proc_next(struct seq_file *m, void *v, loff_t *_pos) { struct ap_data *ap = pde_data(file_inode(m->file)); return seq_list_next(v, &ap->mac_restrictions.mac_list, _pos); } static void ap_control_proc_stop(struct seq_file *m, void *v) { struct ap_data *ap = pde_data(file_inode(m->file)); spin_unlock_bh(&ap->mac_restrictions.lock); } static const struct seq_operations ap_control_proc_seqops = { .start = ap_control_proc_start, .next = ap_control_proc_next, .stop = ap_control_proc_stop, .show = ap_control_proc_show, }; int ap_control_add_mac(struct mac_restrictions *mac_restrictions, u8 *mac) { struct mac_entry *entry; entry = kmalloc(sizeof(struct mac_entry), GFP_KERNEL); if (entry == NULL) return -ENOMEM; memcpy(entry->addr, mac, ETH_ALEN); spin_lock_bh(&mac_restrictions->lock); list_add_tail(&entry->list, &mac_restrictions->mac_list); mac_restrictions->entries++; spin_unlock_bh(&mac_restrictions->lock); return 0; } int ap_control_del_mac(struct mac_restrictions *mac_restrictions, u8 *mac) { struct list_head *ptr; struct mac_entry *entry; spin_lock_bh(&mac_restrictions->lock); for (ptr = mac_restrictions->mac_list.next; ptr != &mac_restrictions->mac_list; ptr = ptr->next) { entry = list_entry(ptr, struct mac_entry, list); if (ether_addr_equal(entry->addr, mac)) { list_del(ptr); kfree(entry); mac_restrictions->entries--; spin_unlock_bh(&mac_restrictions->lock); return 0; } } spin_unlock_bh(&mac_restrictions->lock); return -1; } static int ap_control_mac_deny(struct mac_restrictions *mac_restrictions, u8 *mac) { struct mac_entry *entry; int found = 0; if (mac_restrictions->policy == MAC_POLICY_OPEN) return 0; spin_lock_bh(&mac_restrictions->lock); list_for_each_entry(entry, &mac_restrictions->mac_list, list) { if (ether_addr_equal(entry->addr, mac)) { found = 1; break; } } spin_unlock_bh(&mac_restrictions->lock); if (mac_restrictions->policy == MAC_POLICY_ALLOW) return !found; else return found; } void ap_control_flush_macs(struct mac_restrictions *mac_restrictions) { struct list_head *ptr, *n; struct mac_entry *entry; if (mac_restrictions->entries == 0) return; spin_lock_bh(&mac_restrictions->lock); for (ptr = mac_restrictions->mac_list.next, n = ptr->next; ptr != &mac_restrictions->mac_list; ptr = n, n = ptr->next) { entry = list_entry(ptr, struct mac_entry, list); list_del(ptr); kfree(entry); } mac_restrictions->entries = 0; spin_unlock_bh(&mac_restrictions->lock); } int ap_control_kick_mac(struct ap_data *ap, struct net_device *dev, u8 *mac) { struct sta_info *sta; __le16 resp; spin_lock_bh(&ap->sta_table_lock); sta = ap_get_sta(ap, mac); if (sta) { ap_sta_hash_del(ap, sta); list_del(&sta->list); } spin_unlock_bh(&ap->sta_table_lock); if (!sta) return -EINVAL; resp = cpu_to_le16(WLAN_REASON_PREV_AUTH_NOT_VALID); prism2_send_mgmt(dev, IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_DEAUTH, (char *) &resp, 2, sta->addr, 0); if ((sta->flags & WLAN_STA_ASSOC) && !sta->ap) hostap_event_expired_sta(dev, sta); ap_free_sta(ap, sta); return 0; } #endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ void ap_control_kickall(struct ap_data *ap) { struct list_head *ptr, *n; struct sta_info *sta; spin_lock_bh(&ap->sta_table_lock); for (ptr = ap->sta_list.next, n = ptr->next; ptr != &ap->sta_list; ptr = n, n = ptr->next) { sta = list_entry(ptr, struct sta_info, list); ap_sta_hash_del(ap, sta); list_del(&sta->list); if ((sta->flags & WLAN_STA_ASSOC) && !sta->ap && sta->local) hostap_event_expired_sta(sta->local->dev, sta); ap_free_sta(ap, sta); } spin_unlock_bh(&ap->sta_table_lock); } #ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT static int prism2_ap_proc_show(struct seq_file *m, void *v) { struct sta_info *sta = v; int i; if (v == SEQ_START_TOKEN) { seq_printf(m, "# BSSID CHAN SIGNAL NOISE RATE SSID FLAGS\n"); return 0; } if (!sta->ap) return 0; seq_printf(m, "%pM %d %d %d %d '", sta->addr, sta->u.ap.channel, sta->last_rx_signal, sta->last_rx_silence, sta->last_rx_rate); for (i = 0; i < sta->u.ap.ssid_len; i++) { if (sta->u.ap.ssid[i] >= 32 && sta->u.ap.ssid[i] < 127) seq_putc(m, sta->u.ap.ssid[i]); else seq_printf(m, "<%02x>", sta->u.ap.ssid[i]); } seq_putc(m, '\''); if (sta->capability & WLAN_CAPABILITY_ESS) seq_puts(m, " [ESS]"); if (sta->capability & WLAN_CAPABILITY_IBSS) seq_puts(m, " [IBSS]"); if (sta->capability & WLAN_CAPABILITY_PRIVACY) seq_puts(m, " [WEP]"); seq_putc(m, '\n'); return 0; } static void *prism2_ap_proc_start(struct seq_file *m, loff_t *_pos) { struct ap_data *ap = pde_data(file_inode(m->file)); spin_lock_bh(&ap->sta_table_lock); return seq_list_start_head(&ap->sta_list, *_pos); } static void *prism2_ap_proc_next(struct seq_file *m, void *v, loff_t *_pos) { struct ap_data *ap = pde_data(file_inode(m->file)); return seq_list_next(v, &ap->sta_list, _pos); } static void prism2_ap_proc_stop(struct seq_file *m, void *v) { struct ap_data *ap = pde_data(file_inode(m->file)); spin_unlock_bh(&ap->sta_table_lock); } static const struct seq_operations prism2_ap_proc_seqops = { .start = prism2_ap_proc_start, .next = prism2_ap_proc_next, .stop = prism2_ap_proc_stop, .show = prism2_ap_proc_show, }; #endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ void hostap_check_sta_fw_version(struct ap_data *ap, int sta_fw_ver) { if (!ap) return; if (sta_fw_ver == PRISM2_FW_VER(0,8,0)) { PDEBUG(DEBUG_AP, "Using data::nullfunc ACK workaround - " "firmware upgrade recommended\n"); ap->nullfunc_ack = 1; } else ap->nullfunc_ack = 0; if (sta_fw_ver == PRISM2_FW_VER(1,4,2)) { printk(KERN_WARNING "%s: Warning: secondary station firmware " "version 1.4.2 does not seem to work in Host AP mode\n", ap->local->dev->name); } } /* Called only as a tasklet (software IRQ) */ static void hostap_ap_tx_cb(struct sk_buff *skb, int ok, void *data) { struct ap_data *ap = data; struct ieee80211_hdr *hdr; if (!ap->local->hostapd || !ap->local->apdev) { dev_kfree_skb(skb); return; } /* Pass the TX callback frame to the hostapd; use 802.11 header version * 1 to indicate failure (no ACK) and 2 success (frame ACKed) */ hdr = (struct ieee80211_hdr *) skb->data; hdr->frame_control &= cpu_to_le16(~IEEE80211_FCTL_VERS); hdr->frame_control |= cpu_to_le16(ok ? BIT(1) : BIT(0)); skb->dev = ap->local->apdev; skb_pull(skb, hostap_80211_get_hdrlen(hdr->frame_control)); skb->pkt_type = PACKET_OTHERHOST; skb->protocol = cpu_to_be16(ETH_P_802_2); memset(skb->cb, 0, sizeof(skb->cb)); netif_rx(skb); } #ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT /* Called only as a tasklet (software IRQ) */ static void hostap_ap_tx_cb_auth(struct sk_buff *skb, int ok, void *data) { struct ap_data *ap = data; struct net_device *dev = ap->local->dev; struct ieee80211_hdr *hdr; u16 auth_alg, auth_transaction, status; __le16 *pos; struct sta_info *sta = NULL; char *txt = NULL; if (ap->local->hostapd) { dev_kfree_skb(skb); return; } hdr = (struct ieee80211_hdr *) skb->data; if (!ieee80211_is_auth(hdr->frame_control) || skb->len < IEEE80211_MGMT_HDR_LEN + 6) { printk(KERN_DEBUG "%s: hostap_ap_tx_cb_auth received invalid " "frame\n", dev->name); dev_kfree_skb(skb); return; } pos = (__le16 *) (skb->data + IEEE80211_MGMT_HDR_LEN); auth_alg = le16_to_cpu(*pos++); auth_transaction = le16_to_cpu(*pos++); status = le16_to_cpu(*pos++); if (!ok) { txt = "frame was not ACKed"; goto done; } spin_lock(&ap->sta_table_lock); sta = ap_get_sta(ap, hdr->addr1); if (sta) atomic_inc(&sta->users); spin_unlock(&ap->sta_table_lock); if (!sta) { txt = "STA not found"; goto done; } if (status == WLAN_STATUS_SUCCESS && ((auth_alg == WLAN_AUTH_OPEN && auth_transaction == 2) || (auth_alg == WLAN_AUTH_SHARED_KEY && auth_transaction == 4))) { txt = "STA authenticated"; sta->flags |= WLAN_STA_AUTH; sta->last_auth = jiffies; } else if (status != WLAN_STATUS_SUCCESS) txt = "authentication failed"; done: if (sta) atomic_dec(&sta->users); if (txt) { PDEBUG(DEBUG_AP, "%s: %pM auth_cb - alg=%d " "trans#=%d status=%d - %s\n", dev->name, hdr->addr1, auth_alg, auth_transaction, status, txt); } dev_kfree_skb(skb); } /* Called only as a tasklet (software IRQ) */ static void hostap_ap_tx_cb_assoc(struct sk_buff *skb, int ok, void *data) { struct ap_data *ap = data; struct net_device *dev = ap->local->dev; struct ieee80211_hdr *hdr; u16 status; __le16 *pos; struct sta_info *sta = NULL; char *txt = NULL; if (ap->local->hostapd) { dev_kfree_skb(skb); return; } hdr = (struct ieee80211_hdr *) skb->data; if ((!ieee80211_is_assoc_resp(hdr->frame_control) && !ieee80211_is_reassoc_resp(hdr->frame_control)) || skb->len < IEEE80211_MGMT_HDR_LEN + 4) { printk(KERN_DEBUG "%s: hostap_ap_tx_cb_assoc received invalid " "frame\n", dev->name); dev_kfree_skb(skb); return; } if (!ok) { txt = "frame was not ACKed"; goto done; } spin_lock(&ap->sta_table_lock); sta = ap_get_sta(ap, hdr->addr1); if (sta) atomic_inc(&sta->users); spin_unlock(&ap->sta_table_lock); if (!sta) { txt = "STA not found"; goto done; } pos = (__le16 *) (skb->data + IEEE80211_MGMT_HDR_LEN); pos++; status = le16_to_cpu(*pos++); if (status == WLAN_STATUS_SUCCESS) { if (!(sta->flags & WLAN_STA_ASSOC)) hostap_event_new_sta(dev, sta); txt = "STA associated"; sta->flags |= WLAN_STA_ASSOC; sta->last_assoc = jiffies; } else txt = "association failed"; done: if (sta) atomic_dec(&sta->users); if (txt) { PDEBUG(DEBUG_AP, "%s: %pM assoc_cb - %s\n", dev->name, hdr->addr1, txt); } dev_kfree_skb(skb); } /* Called only as a tasklet (software IRQ); TX callback for poll frames used * in verifying whether the STA is still present. */ static void hostap_ap_tx_cb_poll(struct sk_buff *skb, int ok, void *data) { struct ap_data *ap = data; struct ieee80211_hdr *hdr; struct sta_info *sta; if (skb->len < 24) goto fail; hdr = (struct ieee80211_hdr *) skb->data; if (ok) { spin_lock(&ap->sta_table_lock); sta = ap_get_sta(ap, hdr->addr1); if (sta) sta->flags &= ~WLAN_STA_PENDING_POLL; spin_unlock(&ap->sta_table_lock); } else { PDEBUG(DEBUG_AP, "%s: STA %pM did not ACK activity poll frame\n", ap->local->dev->name, hdr->addr1); } fail: dev_kfree_skb(skb); } #endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ void hostap_init_data(local_info_t *local) { struct ap_data *ap = local->ap; if (ap == NULL) { printk(KERN_WARNING "hostap_init_data: ap == NULL\n"); return; } memset(ap, 0, sizeof(struct ap_data)); ap->local = local; ap->ap_policy = GET_INT_PARM(other_ap_policy, local->card_idx); ap->bridge_packets = GET_INT_PARM(ap_bridge_packets, local->card_idx); ap->max_inactivity = GET_INT_PARM(ap_max_inactivity, local->card_idx) * HZ; ap->autom_ap_wds = GET_INT_PARM(autom_ap_wds, local->card_idx); spin_lock_init(&ap->sta_table_lock); INIT_LIST_HEAD(&ap->sta_list); /* Initialize task queue structure for AP management */ INIT_WORK(&local->ap->add_sta_proc_queue, handle_add_proc_queue); ap->tx_callback_idx = hostap_tx_callback_register(local, hostap_ap_tx_cb, ap); if (ap->tx_callback_idx == 0) printk(KERN_WARNING "%s: failed to register TX callback for " "AP\n", local->dev->name); #ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT INIT_WORK(&local->ap->wds_oper_queue, handle_wds_oper_queue); ap->tx_callback_auth = hostap_tx_callback_register(local, hostap_ap_tx_cb_auth, ap); ap->tx_callback_assoc = hostap_tx_callback_register(local, hostap_ap_tx_cb_assoc, ap); ap->tx_callback_poll = hostap_tx_callback_register(local, hostap_ap_tx_cb_poll, ap); if (ap->tx_callback_auth == 0 || ap->tx_callback_assoc == 0 || ap->tx_callback_poll == 0) printk(KERN_WARNING "%s: failed to register TX callback for " "AP\n", local->dev->name); spin_lock_init(&ap->mac_restrictions.lock); INIT_LIST_HEAD(&ap->mac_restrictions.mac_list); #endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ ap->initialized = 1; } void hostap_init_ap_proc(local_info_t *local) { struct ap_data *ap = local->ap; ap->proc = local->proc; if (ap->proc == NULL) return; #ifndef PRISM2_NO_PROCFS_DEBUG proc_create_single_data("ap_debug", 0, ap->proc, ap_debug_proc_show, ap); #endif /* PRISM2_NO_PROCFS_DEBUG */ #ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT proc_create_seq_data("ap_control", 0, ap->proc, &ap_control_proc_seqops, ap); proc_create_seq_data("ap", 0, ap->proc, &prism2_ap_proc_seqops, ap); #endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ } void hostap_free_data(struct ap_data *ap) { struct sta_info *n, *sta; if (ap == NULL || !ap->initialized) { printk(KERN_DEBUG "hostap_free_data: ap has not yet been " "initialized - skip resource freeing\n"); return; } flush_work(&ap->add_sta_proc_queue); #ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT flush_work(&ap->wds_oper_queue); if (ap->crypt) ap->crypt->deinit(ap->crypt_priv); ap->crypt = ap->crypt_priv = NULL; #endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ list_for_each_entry_safe(sta, n, &ap->sta_list, list) { ap_sta_hash_del(ap, sta); list_del(&sta->list); if ((sta->flags & WLAN_STA_ASSOC) && !sta->ap && sta->local) hostap_event_expired_sta(sta->local->dev, sta); ap_free_sta(ap, sta); } #ifndef PRISM2_NO_PROCFS_DEBUG if (ap->proc != NULL) { remove_proc_entry("ap_debug", ap->proc); } #endif /* PRISM2_NO_PROCFS_DEBUG */ #ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT if (ap->proc != NULL) { remove_proc_entry("ap", ap->proc); remove_proc_entry("ap_control", ap->proc); } ap_control_flush_macs(&ap->mac_restrictions); #endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ ap->initialized = 0; } /* caller should have mutex for AP STA list handling */ static struct sta_info* ap_get_sta(struct ap_data *ap, u8 *sta) { struct sta_info *s; s = ap->sta_hash[STA_HASH(sta)]; while (s != NULL && !ether_addr_equal(s->addr, sta)) s = s->hnext; return s; } #ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT /* Called from timer handler and from scheduled AP queue handlers */ static void prism2_send_mgmt(struct net_device *dev, u16 type_subtype, char *body, int body_len, u8 *addr, u16 tx_cb_idx) { struct hostap_interface *iface; local_info_t *local; struct ieee80211_hdr *hdr; u16 fc; struct sk_buff *skb; struct hostap_skb_tx_data *meta; int hdrlen; iface = netdev_priv(dev); local = iface->local; dev = local->dev; /* always use master radio device */ iface = netdev_priv(dev); if (!(dev->flags & IFF_UP)) { PDEBUG(DEBUG_AP, "%s: prism2_send_mgmt - device is not UP - " "cannot send frame\n", dev->name); return; } skb = dev_alloc_skb(sizeof(*hdr) + body_len); if (skb == NULL) { PDEBUG(DEBUG_AP, "%s: prism2_send_mgmt failed to allocate " "skb\n", dev->name); return; } fc = type_subtype; hdrlen = hostap_80211_get_hdrlen(cpu_to_le16(type_subtype)); hdr = skb_put_zero(skb, hdrlen); if (body) skb_put_data(skb, body, body_len); /* FIX: ctrl::ack sending used special HFA384X_TX_CTRL_802_11 * tx_control instead of using local->tx_control */ memcpy(hdr->addr1, addr, ETH_ALEN); /* DA / RA */ if (ieee80211_is_data(hdr->frame_control)) { fc |= IEEE80211_FCTL_FROMDS; memcpy(hdr->addr2, dev->dev_addr, ETH_ALEN); /* BSSID */ memcpy(hdr->addr3, dev->dev_addr, ETH_ALEN); /* SA */ } else if (ieee80211_is_ctl(hdr->frame_control)) { /* control:ACK does not have addr2 or addr3 */ eth_zero_addr(hdr->addr2); eth_zero_addr(hdr->addr3); } else { memcpy(hdr->addr2, dev->dev_addr, ETH_ALEN); /* SA */ memcpy(hdr->addr3, dev->dev_addr, ETH_ALEN); /* BSSID */ } hdr->frame_control = cpu_to_le16(fc); meta = (struct hostap_skb_tx_data *) skb->cb; memset(meta, 0, sizeof(*meta)); meta->magic = HOSTAP_SKB_TX_DATA_MAGIC; meta->iface = iface; meta->tx_cb_idx = tx_cb_idx; skb->dev = dev; skb_reset_mac_header(skb); skb_reset_network_header(skb); dev_queue_xmit(skb); } #endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ #ifdef CONFIG_PROC_FS static int prism2_sta_proc_show(struct seq_file *m, void *v) { struct sta_info *sta = m->private; int i; /* FIX: possible race condition.. the STA data could have just expired, * but proc entry was still here so that the read could have started; * some locking should be done here.. */ seq_printf(m, "%s=%pM\nusers=%d\naid=%d\n" "flags=0x%04x%s%s%s%s%s%s%s\n" "capability=0x%02x\nlisten_interval=%d\nsupported_rates=", sta->ap ? "AP" : "STA", sta->addr, atomic_read(&sta->users), sta->aid, sta->flags, sta->flags & WLAN_STA_AUTH ? " AUTH" : "", sta->flags & WLAN_STA_ASSOC ? " ASSOC" : "", sta->flags & WLAN_STA_PS ? " PS" : "", sta->flags & WLAN_STA_TIM ? " TIM" : "", sta->flags & WLAN_STA_PERM ? " PERM" : "", sta->flags & WLAN_STA_AUTHORIZED ? " AUTHORIZED" : "", sta->flags & WLAN_STA_PENDING_POLL ? " POLL" : "", sta->capability, sta->listen_interval); /* supported_rates: 500 kbit/s units with msb ignored */ for (i = 0; i < sizeof(sta->supported_rates); i++) if (sta->supported_rates[i] != 0) seq_printf(m, "%d%sMbps ", (sta->supported_rates[i] & 0x7f) / 2, sta->supported_rates[i] & 1 ? ".5" : ""); seq_printf(m, "\njiffies=%lu\nlast_auth=%lu\nlast_assoc=%lu\n" "last_rx=%lu\nlast_tx=%lu\nrx_packets=%lu\n" "tx_packets=%lu\n" "rx_bytes=%lu\ntx_bytes=%lu\nbuffer_count=%d\n" "last_rx: silence=%d dBm signal=%d dBm rate=%d%s Mbps\n" "tx_rate=%d\ntx[1M]=%d\ntx[2M]=%d\ntx[5.5M]=%d\n" "tx[11M]=%d\n" "rx[1M]=%d\nrx[2M]=%d\nrx[5.5M]=%d\nrx[11M]=%d\n", jiffies, sta->last_auth, sta->last_assoc, sta->last_rx, sta->last_tx, sta->rx_packets, sta->tx_packets, sta->rx_bytes, sta->tx_bytes, skb_queue_len(&sta->tx_buf), sta->last_rx_silence, sta->last_rx_signal, sta->last_rx_rate / 10, sta->last_rx_rate % 10 ? ".5" : "", sta->tx_rate, sta->tx_count[0], sta->tx_count[1], sta->tx_count[2], sta->tx_count[3], sta->rx_count[0], sta->rx_count[1], sta->rx_count[2], sta->rx_count[3]); if (sta->crypt && sta->crypt->ops && sta->crypt->ops->print_stats) sta->crypt->ops->print_stats(m, sta->crypt->priv); #ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT if (sta->ap) { if (sta->u.ap.channel >= 0) seq_printf(m, "channel=%d\n", sta->u.ap.channel); seq_puts(m, "ssid="); for (i = 0; i < sta->u.ap.ssid_len; i++) { if (sta->u.ap.ssid[i] >= 32 && sta->u.ap.ssid[i] < 127) seq_putc(m, sta->u.ap.ssid[i]); else seq_printf(m, "<%02x>", sta->u.ap.ssid[i]); } seq_putc(m, '\n'); } #endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ return 0; } #endif static void handle_add_proc_queue(struct work_struct *work) { struct ap_data *ap = container_of(work, struct ap_data, add_sta_proc_queue); struct sta_info *sta; char name[20]; struct add_sta_proc_data *entry, *prev; entry = ap->add_sta_proc_entries; ap->add_sta_proc_entries = NULL; while (entry) { spin_lock_bh(&ap->sta_table_lock); sta = ap_get_sta(ap, entry->addr); if (sta) atomic_inc(&sta->users); spin_unlock_bh(&ap->sta_table_lock); if (sta) { sprintf(name, "%pM", sta->addr); sta->proc = proc_create_single_data( name, 0, ap->proc, prism2_sta_proc_show, sta); atomic_dec(&sta->users); } prev = entry; entry = entry->next; kfree(prev); } } static struct sta_info * ap_add_sta(struct ap_data *ap, u8 *addr) { struct sta_info *sta; sta = kzalloc(sizeof(struct sta_info), GFP_ATOMIC); if (sta == NULL) { PDEBUG(DEBUG_AP, "AP: kmalloc failed\n"); return NULL; } /* initialize STA info data */ sta->local = ap->local; skb_queue_head_init(&sta->tx_buf); memcpy(sta->addr, addr, ETH_ALEN); atomic_inc(&sta->users); spin_lock_bh(&ap->sta_table_lock); list_add(&sta->list, &ap->sta_list); ap->num_sta++; ap_sta_hash_add(ap, sta); spin_unlock_bh(&ap->sta_table_lock); if (ap->proc) { struct add_sta_proc_data *entry; /* schedule a non-interrupt context process to add a procfs * entry for the STA since procfs code use GFP_KERNEL */ entry = kmalloc(sizeof(*entry), GFP_ATOMIC); if (entry) { memcpy(entry->addr, sta->addr, ETH_ALEN); entry->next = ap->add_sta_proc_entries; ap->add_sta_proc_entries = entry; schedule_work(&ap->add_sta_proc_queue); } else printk(KERN_DEBUG "Failed to add STA proc data\n"); } #ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT timer_setup(&sta->timer, ap_handle_timer, 0); sta->timer.expires = jiffies + ap->max_inactivity; if (!ap->local->hostapd) add_timer(&sta->timer); #endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ return sta; } static int ap_tx_rate_ok(int rateidx, struct sta_info *sta, local_info_t *local) { if (rateidx > sta->tx_max_rate || !(sta->tx_supp_rates & (1 << rateidx))) return 0; if (local->tx_rate_control != 0 && !(local->tx_rate_control & (1 << rateidx))) return 0; return 1; } static void prism2_check_tx_rates(struct sta_info *sta) { int i; sta->tx_supp_rates = 0; for (i = 0; i < sizeof(sta->supported_rates); i++) { if ((sta->supported_rates[i] & 0x7f) == 2) sta->tx_supp_rates |= WLAN_RATE_1M; if ((sta->supported_rates[i] & 0x7f) == 4) sta->tx_supp_rates |= WLAN_RATE_2M; if ((sta->supported_rates[i] & 0x7f) == 11) sta->tx_supp_rates |= WLAN_RATE_5M5; if ((sta->supported_rates[i] & 0x7f) == 22) sta->tx_supp_rates |= WLAN_RATE_11M; } sta->tx_max_rate = sta->tx_rate = sta->tx_rate_idx = 0; if (sta->tx_supp_rates & WLAN_RATE_1M) { sta->tx_max_rate = 0; if (ap_tx_rate_ok(0, sta, sta->local)) { sta->tx_rate = 10; sta->tx_rate_idx = 0; } } if (sta->tx_supp_rates & WLAN_RATE_2M) { sta->tx_max_rate = 1; if (ap_tx_rate_ok(1, sta, sta->local)) { sta->tx_rate = 20; sta->tx_rate_idx = 1; } } if (sta->tx_supp_rates & WLAN_RATE_5M5) { sta->tx_max_rate = 2; if (ap_tx_rate_ok(2, sta, sta->local)) { sta->tx_rate = 55; sta->tx_rate_idx = 2; } } if (sta->tx_supp_rates & WLAN_RATE_11M) { sta->tx_max_rate = 3; if (ap_tx_rate_ok(3, sta, sta->local)) { sta->tx_rate = 110; sta->tx_rate_idx = 3; } } } #ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT static void ap_crypt_init(struct ap_data *ap) { ap->crypt = lib80211_get_crypto_ops("WEP"); if (ap->crypt) { if (ap->crypt->init) { ap->crypt_priv = ap->crypt->init(0); if (ap->crypt_priv == NULL) ap->crypt = NULL; else { u8 key[WEP_KEY_LEN]; get_random_bytes(key, WEP_KEY_LEN); ap->crypt->set_key(key, WEP_KEY_LEN, NULL, ap->crypt_priv); } } } if (ap->crypt == NULL) { printk(KERN_WARNING "AP could not initialize WEP: load module " "lib80211_crypt_wep.ko\n"); } } /* Generate challenge data for shared key authentication. IEEE 802.11 specifies * that WEP algorithm is used for generating challenge. This should be unique, * but otherwise there is not really need for randomness etc. Initialize WEP * with pseudo random key and then use increasing IV to get unique challenge * streams. * * Called only as a scheduled task for pending AP frames. */ static char * ap_auth_make_challenge(struct ap_data *ap) { char *tmpbuf; struct sk_buff *skb; if (ap->crypt == NULL) { ap_crypt_init(ap); if (ap->crypt == NULL) return NULL; } tmpbuf = kmalloc(WLAN_AUTH_CHALLENGE_LEN, GFP_ATOMIC); if (tmpbuf == NULL) { PDEBUG(DEBUG_AP, "AP: kmalloc failed for challenge\n"); return NULL; } skb = dev_alloc_skb(WLAN_AUTH_CHALLENGE_LEN + ap->crypt->extra_mpdu_prefix_len + ap->crypt->extra_mpdu_postfix_len); if (skb == NULL) { kfree(tmpbuf); return NULL; } skb_reserve(skb, ap->crypt->extra_mpdu_prefix_len); skb_put_zero(skb, WLAN_AUTH_CHALLENGE_LEN); if (ap->crypt->encrypt_mpdu(skb, 0, ap->crypt_priv)) { dev_kfree_skb(skb); kfree(tmpbuf); return NULL; } skb_copy_from_linear_data_offset(skb, ap->crypt->extra_mpdu_prefix_len, tmpbuf, WLAN_AUTH_CHALLENGE_LEN); dev_kfree_skb(skb); return tmpbuf; } /* Called only as a scheduled task for pending AP frames. */ static void handle_authen(local_info_t *local, struct sk_buff *skb, struct hostap_80211_rx_status *rx_stats) { struct net_device *dev = local->dev; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; size_t hdrlen; struct ap_data *ap = local->ap; char body[8 + WLAN_AUTH_CHALLENGE_LEN], *challenge = NULL; int len, olen; u16 auth_alg, auth_transaction, status_code; __le16 *pos; u16 resp = WLAN_STATUS_SUCCESS; struct sta_info *sta = NULL; struct lib80211_crypt_data *crypt; char *txt = ""; len = skb->len - IEEE80211_MGMT_HDR_LEN; hdrlen = hostap_80211_get_hdrlen(hdr->frame_control); if (len < 6) { PDEBUG(DEBUG_AP, "%s: handle_authen - too short payload " "(len=%d) from %pM\n", dev->name, len, hdr->addr2); return; } spin_lock_bh(&local->ap->sta_table_lock); sta = ap_get_sta(local->ap, hdr->addr2); if (sta) atomic_inc(&sta->users); spin_unlock_bh(&local->ap->sta_table_lock); if (sta && sta->crypt) crypt = sta->crypt; else { int idx = 0; if (skb->len >= hdrlen + 3) idx = skb->data[hdrlen + 3] >> 6; crypt = local->crypt_info.crypt[idx]; } pos = (__le16 *) (skb->data + IEEE80211_MGMT_HDR_LEN); auth_alg = __le16_to_cpu(*pos); pos++; auth_transaction = __le16_to_cpu(*pos); pos++; status_code = __le16_to_cpu(*pos); pos++; if (ether_addr_equal(dev->dev_addr, hdr->addr2) || ap_control_mac_deny(&ap->mac_restrictions, hdr->addr2)) { txt = "authentication denied"; resp = WLAN_STATUS_UNSPECIFIED_FAILURE; goto fail; } if (((local->auth_algs & PRISM2_AUTH_OPEN) && auth_alg == WLAN_AUTH_OPEN) || ((local->auth_algs & PRISM2_AUTH_SHARED_KEY) && crypt && auth_alg == WLAN_AUTH_SHARED_KEY)) { } else { txt = "unsupported algorithm"; resp = WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG; goto fail; } if (len >= 8) { u8 *u = (u8 *) pos; if (*u == WLAN_EID_CHALLENGE) { if (*(u + 1) != WLAN_AUTH_CHALLENGE_LEN) { txt = "invalid challenge len"; resp = WLAN_STATUS_CHALLENGE_FAIL; goto fail; } if (len - 8 < WLAN_AUTH_CHALLENGE_LEN) { txt = "challenge underflow"; resp = WLAN_STATUS_CHALLENGE_FAIL; goto fail; } challenge = (char *) (u + 2); } } if (sta && sta->ap) { if (time_after(jiffies, sta->u.ap.last_beacon + (10 * sta->listen_interval * HZ) / 1024)) { PDEBUG(DEBUG_AP, "%s: no beacons received for a while," " assuming AP %pM is now STA\n", dev->name, sta->addr); sta->ap = 0; sta->flags = 0; sta->u.sta.challenge = NULL; } else { txt = "AP trying to authenticate?"; resp = WLAN_STATUS_UNSPECIFIED_FAILURE; goto fail; } } if ((auth_alg == WLAN_AUTH_OPEN && auth_transaction == 1) || (auth_alg == WLAN_AUTH_SHARED_KEY && (auth_transaction == 1 || (auth_transaction == 3 && sta != NULL && sta->u.sta.challenge != NULL)))) { } else { txt = "unknown authentication transaction number"; resp = WLAN_STATUS_UNKNOWN_AUTH_TRANSACTION; goto fail; } if (sta == NULL) { txt = "new STA"; if (local->ap->num_sta >= MAX_STA_COUNT) { /* FIX: might try to remove some old STAs first? */ txt = "no more room for new STAs"; resp = WLAN_STATUS_UNSPECIFIED_FAILURE; goto fail; } sta = ap_add_sta(local->ap, hdr->addr2); if (sta == NULL) { txt = "ap_add_sta failed"; resp = WLAN_STATUS_UNSPECIFIED_FAILURE; goto fail; } } switch (auth_alg) { case WLAN_AUTH_OPEN: txt = "authOK"; /* IEEE 802.11 standard is not completely clear about * whether STA is considered authenticated after * authentication OK frame has been send or after it * has been ACKed. In order to reduce interoperability * issues, mark the STA authenticated before ACK. */ sta->flags |= WLAN_STA_AUTH; break; case WLAN_AUTH_SHARED_KEY: if (auth_transaction == 1) { if (sta->u.sta.challenge == NULL) { sta->u.sta.challenge = ap_auth_make_challenge(local->ap); if (sta->u.sta.challenge == NULL) { resp = WLAN_STATUS_UNSPECIFIED_FAILURE; goto fail; } } } else { if (sta->u.sta.challenge == NULL || challenge == NULL || memcmp(sta->u.sta.challenge, challenge, WLAN_AUTH_CHALLENGE_LEN) != 0 || !ieee80211_has_protected(hdr->frame_control)) { txt = "challenge response incorrect"; resp = WLAN_STATUS_CHALLENGE_FAIL; goto fail; } txt = "challenge OK - authOK"; /* IEEE 802.11 standard is not completely clear about * whether STA is considered authenticated after * authentication OK frame has been send or after it * has been ACKed. In order to reduce interoperability * issues, mark the STA authenticated before ACK. */ sta->flags |= WLAN_STA_AUTH; kfree(sta->u.sta.challenge); sta->u.sta.challenge = NULL; } break; } fail: pos = (__le16 *) body; *pos = cpu_to_le16(auth_alg); pos++; *pos = cpu_to_le16(auth_transaction + 1); pos++; *pos = cpu_to_le16(resp); /* status_code */ pos++; olen = 6; if (resp == WLAN_STATUS_SUCCESS && sta != NULL && sta->u.sta.challenge != NULL && auth_alg == WLAN_AUTH_SHARED_KEY && auth_transaction == 1) { u8 *tmp = (u8 *) pos; *tmp++ = WLAN_EID_CHALLENGE; *tmp++ = WLAN_AUTH_CHALLENGE_LEN; pos++; memcpy(pos, sta->u.sta.challenge, WLAN_AUTH_CHALLENGE_LEN); olen += 2 + WLAN_AUTH_CHALLENGE_LEN; } prism2_send_mgmt(dev, IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_AUTH, body, olen, hdr->addr2, ap->tx_callback_auth); if (sta) { sta->last_rx = jiffies; atomic_dec(&sta->users); } if (resp) { PDEBUG(DEBUG_AP, "%s: %pM auth (alg=%d " "trans#=%d stat=%d len=%d fc=%04x) ==> %d (%s)\n", dev->name, hdr->addr2, auth_alg, auth_transaction, status_code, len, le16_to_cpu(hdr->frame_control), resp, txt); } } /* Called only as a scheduled task for pending AP frames. */ static void handle_assoc(local_info_t *local, struct sk_buff *skb, struct hostap_80211_rx_status *rx_stats, int reassoc) { struct net_device *dev = local->dev; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; char body[12], *p, *lpos; int len, left; __le16 *pos; u16 resp = WLAN_STATUS_SUCCESS; struct sta_info *sta = NULL; int send_deauth = 0; char __always_unused *txt = ""; u8 prev_ap[ETH_ALEN]; left = len = skb->len - IEEE80211_MGMT_HDR_LEN; if (len < (reassoc ? 10 : 4)) { PDEBUG(DEBUG_AP, "%s: handle_assoc - too short payload " "(len=%d, reassoc=%d) from %pM\n", dev->name, len, reassoc, hdr->addr2); return; } spin_lock_bh(&local->ap->sta_table_lock); sta = ap_get_sta(local->ap, hdr->addr2); if (sta == NULL || (sta->flags & WLAN_STA_AUTH) == 0) { spin_unlock_bh(&local->ap->sta_table_lock); txt = "trying to associate before authentication"; send_deauth = 1; resp = WLAN_STATUS_UNSPECIFIED_FAILURE; sta = NULL; /* do not decrement sta->users */ goto fail; } atomic_inc(&sta->users); spin_unlock_bh(&local->ap->sta_table_lock); pos = (__le16 *) (skb->data + IEEE80211_MGMT_HDR_LEN); sta->capability = __le16_to_cpu(*pos); pos++; left -= 2; sta->listen_interval = __le16_to_cpu(*pos); pos++; left -= 2; if (reassoc) { memcpy(prev_ap, pos, ETH_ALEN); pos++; pos++; pos++; left -= 6; } else eth_zero_addr(prev_ap); if (left >= 2) { unsigned int ileft; unsigned char *u = (unsigned char *) pos; if (*u == WLAN_EID_SSID) { u++; left--; ileft = *u; u++; left--; if (ileft > left || ileft > MAX_SSID_LEN) { txt = "SSID overflow"; resp = WLAN_STATUS_UNSPECIFIED_FAILURE; goto fail; } if (ileft != strlen(local->essid) || memcmp(local->essid, u, ileft) != 0) { txt = "not our SSID"; resp = WLAN_STATUS_ASSOC_DENIED_UNSPEC; goto fail; } u += ileft; left -= ileft; } if (left >= 2 && *u == WLAN_EID_SUPP_RATES) { u++; left--; ileft = *u; u++; left--; if (ileft > left || ileft == 0 || ileft > WLAN_SUPP_RATES_MAX) { txt = "SUPP_RATES len error"; resp = WLAN_STATUS_UNSPECIFIED_FAILURE; goto fail; } memset(sta->supported_rates, 0, sizeof(sta->supported_rates)); memcpy(sta->supported_rates, u, ileft); prism2_check_tx_rates(sta); u += ileft; left -= ileft; } if (left > 0) { PDEBUG(DEBUG_AP, "%s: assoc from %pM" " with extra data (%d bytes) [", dev->name, hdr->addr2, left); while (left > 0) { PDEBUG2(DEBUG_AP, "<%02x>", *u); u++; left--; } PDEBUG2(DEBUG_AP, "]\n"); } } else { txt = "frame underflow"; resp = WLAN_STATUS_UNSPECIFIED_FAILURE; goto fail; } /* get a unique AID */ if (sta->aid > 0) txt = "OK, old AID"; else { spin_lock_bh(&local->ap->sta_table_lock); for (sta->aid = 1; sta->aid <= MAX_AID_TABLE_SIZE; sta->aid++) if (local->ap->sta_aid[sta->aid - 1] == NULL) break; if (sta->aid > MAX_AID_TABLE_SIZE) { sta->aid = 0; spin_unlock_bh(&local->ap->sta_table_lock); resp = WLAN_STATUS_AP_UNABLE_TO_HANDLE_NEW_STA; txt = "no room for more AIDs"; } else { local->ap->sta_aid[sta->aid - 1] = sta; spin_unlock_bh(&local->ap->sta_table_lock); txt = "OK, new AID"; } } fail: pos = (__le16 *) body; if (send_deauth) { *pos = cpu_to_le16(WLAN_REASON_STA_REQ_ASSOC_WITHOUT_AUTH); pos++; } else { /* FIX: CF-Pollable and CF-PollReq should be set to match the * values in beacons/probe responses */ /* FIX: how about privacy and WEP? */ /* capability */ *pos = cpu_to_le16(WLAN_CAPABILITY_ESS); pos++; /* status_code */ *pos = cpu_to_le16(resp); pos++; *pos = cpu_to_le16((sta && sta->aid > 0 ? sta->aid : 0) | BIT(14) | BIT(15)); /* AID */ pos++; /* Supported rates (Information element) */ p = (char *) pos; *p++ = WLAN_EID_SUPP_RATES; lpos = p; *p++ = 0; /* len */ if (local->tx_rate_control & WLAN_RATE_1M) { *p++ = local->basic_rates & WLAN_RATE_1M ? 0x82 : 0x02; (*lpos)++; } if (local->tx_rate_control & WLAN_RATE_2M) { *p++ = local->basic_rates & WLAN_RATE_2M ? 0x84 : 0x04; (*lpos)++; } if (local->tx_rate_control & WLAN_RATE_5M5) { *p++ = local->basic_rates & WLAN_RATE_5M5 ? 0x8b : 0x0b; (*lpos)++; } if (local->tx_rate_control & WLAN_RATE_11M) { *p++ = local->basic_rates & WLAN_RATE_11M ? 0x96 : 0x16; (*lpos)++; } pos = (__le16 *) p; } prism2_send_mgmt(dev, IEEE80211_FTYPE_MGMT | (send_deauth ? IEEE80211_STYPE_DEAUTH : (reassoc ? IEEE80211_STYPE_REASSOC_RESP : IEEE80211_STYPE_ASSOC_RESP)), body, (u8 *) pos - (u8 *) body, hdr->addr2, send_deauth ? 0 : local->ap->tx_callback_assoc); if (sta) { if (resp == WLAN_STATUS_SUCCESS) { sta->last_rx = jiffies; /* STA will be marked associated from TX callback, if * AssocResp is ACKed */ } atomic_dec(&sta->users); } #if 0 PDEBUG(DEBUG_AP, "%s: %pM %sassoc (len=%d " "prev_ap=%pM) => %d(%d) (%s)\n", dev->name, hdr->addr2, reassoc ? "re" : "", len, prev_ap, resp, send_deauth, txt); #endif } /* Called only as a scheduled task for pending AP frames. */ static void handle_deauth(local_info_t *local, struct sk_buff *skb, struct hostap_80211_rx_status *rx_stats) { struct net_device *dev = local->dev; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; char *body = (char *) (skb->data + IEEE80211_MGMT_HDR_LEN); int len; u16 reason_code; __le16 *pos; struct sta_info *sta = NULL; len = skb->len - IEEE80211_MGMT_HDR_LEN; if (len < 2) { printk("handle_deauth - too short payload (len=%d)\n", len); return; } pos = (__le16 *) body; reason_code = le16_to_cpu(*pos); PDEBUG(DEBUG_AP, "%s: deauthentication: %pM len=%d, " "reason_code=%d\n", dev->name, hdr->addr2, len, reason_code); spin_lock_bh(&local->ap->sta_table_lock); sta = ap_get_sta(local->ap, hdr->addr2); if (sta != NULL) { if ((sta->flags & WLAN_STA_ASSOC) && !sta->ap) hostap_event_expired_sta(local->dev, sta); sta->flags &= ~(WLAN_STA_AUTH | WLAN_STA_ASSOC); } spin_unlock_bh(&local->ap->sta_table_lock); if (sta == NULL) { printk("%s: deauthentication from %pM, " "reason_code=%d, but STA not authenticated\n", dev->name, hdr->addr2, reason_code); } } /* Called only as a scheduled task for pending AP frames. */ static void handle_disassoc(local_info_t *local, struct sk_buff *skb, struct hostap_80211_rx_status *rx_stats) { struct net_device *dev = local->dev; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; char *body = skb->data + IEEE80211_MGMT_HDR_LEN; int len; u16 reason_code; __le16 *pos; struct sta_info *sta = NULL; len = skb->len - IEEE80211_MGMT_HDR_LEN; if (len < 2) { printk("handle_disassoc - too short payload (len=%d)\n", len); return; } pos = (__le16 *) body; reason_code = le16_to_cpu(*pos); PDEBUG(DEBUG_AP, "%s: disassociation: %pM len=%d, " "reason_code=%d\n", dev->name, hdr->addr2, len, reason_code); spin_lock_bh(&local->ap->sta_table_lock); sta = ap_get_sta(local->ap, hdr->addr2); if (sta != NULL) { if ((sta->flags & WLAN_STA_ASSOC) && !sta->ap) hostap_event_expired_sta(local->dev, sta); sta->flags &= ~WLAN_STA_ASSOC; } spin_unlock_bh(&local->ap->sta_table_lock); if (sta == NULL) { printk("%s: disassociation from %pM, " "reason_code=%d, but STA not authenticated\n", dev->name, hdr->addr2, reason_code); } } /* Called only as a scheduled task for pending AP frames. */ static void ap_handle_data_nullfunc(local_info_t *local, struct ieee80211_hdr *hdr) { struct net_device *dev = local->dev; /* some STA f/w's seem to require control::ACK frame for * data::nullfunc, but at least Prism2 station f/w version 0.8.0 does * not send this.. * send control::ACK for the data::nullfunc */ printk(KERN_DEBUG "Sending control::ACK for data::nullfunc\n"); prism2_send_mgmt(dev, IEEE80211_FTYPE_CTL | IEEE80211_STYPE_ACK, NULL, 0, hdr->addr2, 0); } /* Called only as a scheduled task for pending AP frames. */ static void ap_handle_dropped_data(local_info_t *local, struct ieee80211_hdr *hdr) { struct net_device *dev = local->dev; struct sta_info *sta; __le16 reason; spin_lock_bh(&local->ap->sta_table_lock); sta = ap_get_sta(local->ap, hdr->addr2); if (sta) atomic_inc(&sta->users); spin_unlock_bh(&local->ap->sta_table_lock); if (sta != NULL && (sta->flags & WLAN_STA_ASSOC)) { PDEBUG(DEBUG_AP, "ap_handle_dropped_data: STA is now okay?\n"); atomic_dec(&sta->users); return; } reason = cpu_to_le16(WLAN_REASON_CLASS3_FRAME_FROM_NONASSOC_STA); prism2_send_mgmt(dev, IEEE80211_FTYPE_MGMT | ((sta == NULL || !(sta->flags & WLAN_STA_ASSOC)) ? IEEE80211_STYPE_DEAUTH : IEEE80211_STYPE_DISASSOC), (char *) &reason, sizeof(reason), hdr->addr2, 0); if (sta) atomic_dec(&sta->users); } #endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ /* Called only as a scheduled task for pending AP frames. */ static void pspoll_send_buffered(local_info_t *local, struct sta_info *sta, struct sk_buff *skb) { struct hostap_skb_tx_data *meta; if (!(sta->flags & WLAN_STA_PS)) { /* Station has moved to non-PS mode, so send all buffered * frames using normal device queue. */ dev_queue_xmit(skb); return; } /* add a flag for hostap_handle_sta_tx() to know that this skb should * be passed through even though STA is using PS */ meta = (struct hostap_skb_tx_data *) skb->cb; meta->flags |= HOSTAP_TX_FLAGS_BUFFERED_FRAME; if (!skb_queue_empty(&sta->tx_buf)) { /* indicate to STA that more frames follow */ meta->flags |= HOSTAP_TX_FLAGS_ADD_MOREDATA; } dev_queue_xmit(skb); } /* Called only as a scheduled task for pending AP frames. */ static void handle_pspoll(local_info_t *local, struct ieee80211_hdr *hdr, struct hostap_80211_rx_status *rx_stats) { struct net_device *dev = local->dev; struct sta_info *sta; u16 aid; struct sk_buff *skb; PDEBUG(DEBUG_PS2, "handle_pspoll: BSSID=%pM, TA=%pM PWRMGT=%d\n", hdr->addr1, hdr->addr2, !!ieee80211_has_pm(hdr->frame_control)); if (!ether_addr_equal(hdr->addr1, dev->dev_addr)) { PDEBUG(DEBUG_AP, "handle_pspoll - addr1(BSSID)=%pM not own MAC\n", hdr->addr1); return; } aid = le16_to_cpu(hdr->duration_id); if ((aid & (BIT(15) | BIT(14))) != (BIT(15) | BIT(14))) { PDEBUG(DEBUG_PS, " PSPOLL and AID[15:14] not set\n"); return; } aid &= ~(BIT(15) | BIT(14)); if (aid == 0 || aid > MAX_AID_TABLE_SIZE) { PDEBUG(DEBUG_PS, " invalid aid=%d\n", aid); return; } PDEBUG(DEBUG_PS2, " aid=%d\n", aid); spin_lock_bh(&local->ap->sta_table_lock); sta = ap_get_sta(local->ap, hdr->addr2); if (sta) atomic_inc(&sta->users); spin_unlock_bh(&local->ap->sta_table_lock); if (sta == NULL) { PDEBUG(DEBUG_PS, " STA not found\n"); return; } if (sta->aid != aid) { PDEBUG(DEBUG_PS, " received aid=%i does not match with " "assoc.aid=%d\n", aid, sta->aid); return; } /* FIX: todo: * - add timeout for buffering (clear aid in TIM vector if buffer timed * out (expiry time must be longer than ListenInterval for * the corresponding STA; "8802-11: 11.2.1.9 AP aging function" * - what to do, if buffered, pspolled, and sent frame is not ACKed by * sta; store buffer for later use and leave TIM aid bit set? use * TX event to check whether frame was ACKed? */ while ((skb = skb_dequeue(&sta->tx_buf)) != NULL) { /* send buffered frame .. */ PDEBUG(DEBUG_PS2, "Sending buffered frame to STA after PS POLL" " (buffer_count=%d)\n", skb_queue_len(&sta->tx_buf)); pspoll_send_buffered(local, sta, skb); if (sta->flags & WLAN_STA_PS) { /* send only one buffered packet per PS Poll */ /* FIX: should ignore further PS Polls until the * buffered packet that was just sent is acknowledged * (Tx or TxExc event) */ break; } } if (skb_queue_empty(&sta->tx_buf)) { /* try to clear aid from TIM */ if (!(sta->flags & WLAN_STA_TIM)) PDEBUG(DEBUG_PS2, "Re-unsetting TIM for aid %d\n", aid); hostap_set_tim(local, aid, 0); sta->flags &= ~WLAN_STA_TIM; } atomic_dec(&sta->users); } #ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT static void handle_wds_oper_queue(struct work_struct *work) { struct ap_data *ap = container_of(work, struct ap_data, wds_oper_queue); local_info_t *local = ap->local; struct wds_oper_data *entry, *prev; spin_lock_bh(&local->lock); entry = local->ap->wds_oper_entries; local->ap->wds_oper_entries = NULL; spin_unlock_bh(&local->lock); while (entry) { PDEBUG(DEBUG_AP, "%s: %s automatic WDS connection " "to AP %pM\n", local->dev->name, entry->type == WDS_ADD ? "adding" : "removing", entry->addr); if (entry->type == WDS_ADD) prism2_wds_add(local, entry->addr, 0); else if (entry->type == WDS_DEL) prism2_wds_del(local, entry->addr, 0, 1); prev = entry; entry = entry->next; kfree(prev); } } /* Called only as a scheduled task for pending AP frames. */ static void handle_beacon(local_info_t *local, struct sk_buff *skb, struct hostap_80211_rx_status *rx_stats) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; char *body = skb->data + IEEE80211_MGMT_HDR_LEN; int len, left; u16 beacon_int, capability; __le16 *pos; char *ssid = NULL; unsigned char *supp_rates = NULL; int ssid_len = 0, supp_rates_len = 0; struct sta_info *sta = NULL; int new_sta = 0, channel = -1; len = skb->len - IEEE80211_MGMT_HDR_LEN; if (len < 8 + 2 + 2) { printk(KERN_DEBUG "handle_beacon - too short payload " "(len=%d)\n", len); return; } pos = (__le16 *) body; left = len; /* Timestamp (8 octets) */ pos += 4; left -= 8; /* Beacon interval (2 octets) */ beacon_int = le16_to_cpu(*pos); pos++; left -= 2; /* Capability information (2 octets) */ capability = le16_to_cpu(*pos); pos++; left -= 2; if (local->ap->ap_policy != AP_OTHER_AP_EVEN_IBSS && capability & WLAN_CAPABILITY_IBSS) return; if (left >= 2) { unsigned int ileft; unsigned char *u = (unsigned char *) pos; if (*u == WLAN_EID_SSID) { u++; left--; ileft = *u; u++; left--; if (ileft > left || ileft > MAX_SSID_LEN) { PDEBUG(DEBUG_AP, "SSID: overflow\n"); return; } if (local->ap->ap_policy == AP_OTHER_AP_SAME_SSID && (ileft != strlen(local->essid) || memcmp(local->essid, u, ileft) != 0)) { /* not our SSID */ return; } ssid = u; ssid_len = ileft; u += ileft; left -= ileft; } if (*u == WLAN_EID_SUPP_RATES) { u++; left--; ileft = *u; u++; left--; if (ileft > left || ileft == 0 || ileft > 8) { PDEBUG(DEBUG_AP, " - SUPP_RATES len error\n"); return; } supp_rates = u; supp_rates_len = ileft; u += ileft; left -= ileft; } if (*u == WLAN_EID_DS_PARAMS) { u++; left--; ileft = *u; u++; left--; if (ileft > left || ileft != 1) { PDEBUG(DEBUG_AP, " - DS_PARAMS len error\n"); return; } channel = *u; u += ileft; left -= ileft; } } spin_lock_bh(&local->ap->sta_table_lock); sta = ap_get_sta(local->ap, hdr->addr2); if (sta != NULL) atomic_inc(&sta->users); spin_unlock_bh(&local->ap->sta_table_lock); if (sta == NULL) { /* add new AP */ new_sta = 1; sta = ap_add_sta(local->ap, hdr->addr2); if (sta == NULL) { printk(KERN_INFO "prism2: kmalloc failed for AP " "data structure\n"); return; } hostap_event_new_sta(local->dev, sta); /* mark APs authentication and associated for pseudo ad-hoc * style communication */ sta->flags = WLAN_STA_AUTH | WLAN_STA_ASSOC; if (local->ap->autom_ap_wds) { hostap_wds_link_oper(local, sta->addr, WDS_ADD); } } sta->ap = 1; if (ssid) { sta->u.ap.ssid_len = ssid_len; memcpy(sta->u.ap.ssid, ssid, ssid_len); sta->u.ap.ssid[ssid_len] = '\0'; } else { sta->u.ap.ssid_len = 0; sta->u.ap.ssid[0] = '\0'; } sta->u.ap.channel = channel; sta->rx_packets++; sta->rx_bytes += len; sta->u.ap.last_beacon = sta->last_rx = jiffies; sta->capability = capability; sta->listen_interval = beacon_int; atomic_dec(&sta->users); if (new_sta) { memset(sta->supported_rates, 0, sizeof(sta->supported_rates)); memcpy(sta->supported_rates, supp_rates, supp_rates_len); prism2_check_tx_rates(sta); } } #endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ /* Called only as a tasklet. */ static void handle_ap_item(local_info_t *local, struct sk_buff *skb, struct hostap_80211_rx_status *rx_stats) { #ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT struct net_device *dev = local->dev; #endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ u16 fc, type, stype; struct ieee80211_hdr *hdr; /* FIX: should give skb->len to handler functions and check that the * buffer is long enough */ hdr = (struct ieee80211_hdr *) skb->data; fc = le16_to_cpu(hdr->frame_control); type = fc & IEEE80211_FCTL_FTYPE; stype = fc & IEEE80211_FCTL_STYPE; #ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT if (!local->hostapd && type == IEEE80211_FTYPE_DATA) { PDEBUG(DEBUG_AP, "handle_ap_item - data frame\n"); if (!(fc & IEEE80211_FCTL_TODS) || (fc & IEEE80211_FCTL_FROMDS)) { if (stype == IEEE80211_STYPE_NULLFUNC) { /* no ToDS nullfunc seems to be used to check * AP association; so send reject message to * speed up re-association */ ap_handle_dropped_data(local, hdr); goto done; } PDEBUG(DEBUG_AP, " not ToDS frame (fc=0x%04x)\n", fc); goto done; } if (!ether_addr_equal(hdr->addr1, dev->dev_addr)) { PDEBUG(DEBUG_AP, "handle_ap_item - addr1(BSSID)=%pM" " not own MAC\n", hdr->addr1); goto done; } if (local->ap->nullfunc_ack && stype == IEEE80211_STYPE_NULLFUNC) ap_handle_data_nullfunc(local, hdr); else ap_handle_dropped_data(local, hdr); goto done; } if (type == IEEE80211_FTYPE_MGMT && stype == IEEE80211_STYPE_BEACON) { handle_beacon(local, skb, rx_stats); goto done; } #endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ if (type == IEEE80211_FTYPE_CTL && stype == IEEE80211_STYPE_PSPOLL) { handle_pspoll(local, hdr, rx_stats); goto done; } if (local->hostapd) { PDEBUG(DEBUG_AP, "Unknown frame in AP queue: type=0x%02x " "subtype=0x%02x\n", type, stype); goto done; } #ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT if (type != IEEE80211_FTYPE_MGMT) { PDEBUG(DEBUG_AP, "handle_ap_item - not a management frame?\n"); goto done; } if (!ether_addr_equal(hdr->addr1, dev->dev_addr)) { PDEBUG(DEBUG_AP, "handle_ap_item - addr1(DA)=%pM" " not own MAC\n", hdr->addr1); goto done; } if (!ether_addr_equal(hdr->addr3, dev->dev_addr)) { PDEBUG(DEBUG_AP, "handle_ap_item - addr3(BSSID)=%pM" " not own MAC\n", hdr->addr3); goto done; } switch (stype) { case IEEE80211_STYPE_ASSOC_REQ: handle_assoc(local, skb, rx_stats, 0); break; case IEEE80211_STYPE_ASSOC_RESP: PDEBUG(DEBUG_AP, "==> ASSOC RESP (ignored)\n"); break; case IEEE80211_STYPE_REASSOC_REQ: handle_assoc(local, skb, rx_stats, 1); break; case IEEE80211_STYPE_REASSOC_RESP: PDEBUG(DEBUG_AP, "==> REASSOC RESP (ignored)\n"); break; case IEEE80211_STYPE_ATIM: PDEBUG(DEBUG_AP, "==> ATIM (ignored)\n"); break; case IEEE80211_STYPE_DISASSOC: handle_disassoc(local, skb, rx_stats); break; case IEEE80211_STYPE_AUTH: handle_authen(local, skb, rx_stats); break; case IEEE80211_STYPE_DEAUTH: handle_deauth(local, skb, rx_stats); break; default: PDEBUG(DEBUG_AP, "Unknown mgmt frame subtype 0x%02x\n", stype >> 4); break; } #endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ done: dev_kfree_skb(skb); } /* Called only as a tasklet (software IRQ) */ void hostap_rx(struct net_device *dev, struct sk_buff *skb, struct hostap_80211_rx_status *rx_stats) { struct hostap_interface *iface; local_info_t *local; struct ieee80211_hdr *hdr; iface = netdev_priv(dev); local = iface->local; if (skb->len < 16) goto drop; dev->stats.rx_packets++; hdr = (struct ieee80211_hdr *) skb->data; if (local->ap->ap_policy == AP_OTHER_AP_SKIP_ALL && ieee80211_is_beacon(hdr->frame_control)) goto drop; skb->protocol = cpu_to_be16(ETH_P_HOSTAP); handle_ap_item(local, skb, rx_stats); return; drop: dev_kfree_skb(skb); } /* Called only as a tasklet (software IRQ) */ static void schedule_packet_send(local_info_t *local, struct sta_info *sta) { struct sk_buff *skb; struct ieee80211_hdr *hdr; struct hostap_80211_rx_status rx_stats; if (skb_queue_empty(&sta->tx_buf)) return; skb = dev_alloc_skb(16); if (skb == NULL) { printk(KERN_DEBUG "%s: schedule_packet_send: skb alloc " "failed\n", local->dev->name); return; } hdr = skb_put(skb, 16); /* Generate a fake pspoll frame to start packet delivery */ hdr->frame_control = cpu_to_le16( IEEE80211_FTYPE_CTL | IEEE80211_STYPE_PSPOLL); memcpy(hdr->addr1, local->dev->dev_addr, ETH_ALEN); memcpy(hdr->addr2, sta->addr, ETH_ALEN); hdr->duration_id = cpu_to_le16(sta->aid | BIT(15) | BIT(14)); PDEBUG(DEBUG_PS2, "%s: Scheduling buffered packet delivery for STA %pM\n", local->dev->name, sta->addr); skb->dev = local->dev; memset(&rx_stats, 0, sizeof(rx_stats)); hostap_rx(local->dev, skb, &rx_stats); } int prism2_ap_get_sta_qual(local_info_t *local, struct sockaddr addr[], struct iw_quality qual[], int buf_size, int aplist) { struct ap_data *ap = local->ap; struct list_head *ptr; int count = 0; spin_lock_bh(&ap->sta_table_lock); for (ptr = ap->sta_list.next; ptr != NULL && ptr != &ap->sta_list; ptr = ptr->next) { struct sta_info *sta = (struct sta_info *) ptr; if (aplist && !sta->ap) continue; addr[count].sa_family = ARPHRD_ETHER; memcpy(addr[count].sa_data, sta->addr, ETH_ALEN); if (sta->last_rx_silence == 0) qual[count].qual = sta->last_rx_signal < 27 ? 0 : (sta->last_rx_signal - 27) * 92 / 127; else qual[count].qual = sta->last_rx_signal - sta->last_rx_silence - 35; qual[count].level = HFA384X_LEVEL_TO_dBm(sta->last_rx_signal); qual[count].noise = HFA384X_LEVEL_TO_dBm(sta->last_rx_silence); qual[count].updated = sta->last_rx_updated; sta->last_rx_updated = IW_QUAL_DBM; count++; if (count >= buf_size) break; } spin_unlock_bh(&ap->sta_table_lock); return count; } /* Translate our list of Access Points & Stations to a card independent * format that the Wireless Tools will understand - Jean II */ int prism2_ap_translate_scan(struct net_device *dev, struct iw_request_info *info, char *buffer) { struct hostap_interface *iface; local_info_t *local; struct ap_data *ap; struct list_head *ptr; struct iw_event iwe; char *current_ev = buffer; char *end_buf = buffer + IW_SCAN_MAX_DATA; #if !defined(PRISM2_NO_KERNEL_IEEE80211_MGMT) char buf[64]; #endif iface = netdev_priv(dev); local = iface->local; ap = local->ap; spin_lock_bh(&ap->sta_table_lock); for (ptr = ap->sta_list.next; ptr != NULL && ptr != &ap->sta_list; ptr = ptr->next) { struct sta_info *sta = (struct sta_info *) ptr; /* First entry *MUST* be the AP MAC address */ memset(&iwe, 0, sizeof(iwe)); iwe.cmd = SIOCGIWAP; iwe.u.ap_addr.sa_family = ARPHRD_ETHER; memcpy(iwe.u.ap_addr.sa_data, sta->addr, ETH_ALEN); iwe.len = IW_EV_ADDR_LEN; current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe, IW_EV_ADDR_LEN); /* Use the mode to indicate if it's a station or * an Access Point */ memset(&iwe, 0, sizeof(iwe)); iwe.cmd = SIOCGIWMODE; if (sta->ap) iwe.u.mode = IW_MODE_MASTER; else iwe.u.mode = IW_MODE_INFRA; iwe.len = IW_EV_UINT_LEN; current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe, IW_EV_UINT_LEN); /* Some quality */ memset(&iwe, 0, sizeof(iwe)); iwe.cmd = IWEVQUAL; if (sta->last_rx_silence == 0) iwe.u.qual.qual = sta->last_rx_signal < 27 ? 0 : (sta->last_rx_signal - 27) * 92 / 127; else iwe.u.qual.qual = sta->last_rx_signal - sta->last_rx_silence - 35; iwe.u.qual.level = HFA384X_LEVEL_TO_dBm(sta->last_rx_signal); iwe.u.qual.noise = HFA384X_LEVEL_TO_dBm(sta->last_rx_silence); iwe.u.qual.updated = sta->last_rx_updated; iwe.len = IW_EV_QUAL_LEN; current_ev = iwe_stream_add_event(info, current_ev, end_buf, &iwe, IW_EV_QUAL_LEN); #ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT if (sta->ap) { memset(&iwe, 0, sizeof(iwe)); iwe.cmd = SIOCGIWESSID; iwe.u.data.length = sta->u.ap.ssid_len; iwe.u.data.flags = 1; current_ev = iwe_stream_add_point(info, current_ev, end_buf, &iwe, sta->u.ap.ssid); memset(&iwe, 0, sizeof(iwe)); iwe.cmd = SIOCGIWENCODE; if (sta->capability & WLAN_CAPABILITY_PRIVACY) iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY; else iwe.u.data.flags = IW_ENCODE_DISABLED; current_ev = iwe_stream_add_point(info, current_ev, end_buf, &iwe, sta->u.ap.ssid); if (sta->u.ap.channel > 0 && sta->u.ap.channel <= FREQ_COUNT) { memset(&iwe, 0, sizeof(iwe)); iwe.cmd = SIOCGIWFREQ; iwe.u.freq.m = freq_list[sta->u.ap.channel - 1] * 100000; iwe.u.freq.e = 1; current_ev = iwe_stream_add_event( info, current_ev, end_buf, &iwe, IW_EV_FREQ_LEN); } memset(&iwe, 0, sizeof(iwe)); iwe.cmd = IWEVCUSTOM; sprintf(buf, "beacon_interval=%d", sta->listen_interval); iwe.u.data.length = strlen(buf); current_ev = iwe_stream_add_point(info, current_ev, end_buf, &iwe, buf); } #endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ sta->last_rx_updated = IW_QUAL_DBM; /* To be continued, we should make good use of IWEVCUSTOM */ } spin_unlock_bh(&ap->sta_table_lock); return current_ev - buffer; } static int prism2_hostapd_add_sta(struct ap_data *ap, struct prism2_hostapd_param *param) { struct sta_info *sta; spin_lock_bh(&ap->sta_table_lock); sta = ap_get_sta(ap, param->sta_addr); if (sta) atomic_inc(&sta->users); spin_unlock_bh(&ap->sta_table_lock); if (sta == NULL) { sta = ap_add_sta(ap, param->sta_addr); if (sta == NULL) return -1; } if (!(sta->flags & WLAN_STA_ASSOC) && !sta->ap && sta->local) hostap_event_new_sta(sta->local->dev, sta); sta->flags |= WLAN_STA_AUTH | WLAN_STA_ASSOC; sta->last_rx = jiffies; sta->aid = param->u.add_sta.aid; sta->capability = param->u.add_sta.capability; sta->tx_supp_rates = param->u.add_sta.tx_supp_rates; if (sta->tx_supp_rates & WLAN_RATE_1M) sta->supported_rates[0] = 2; if (sta->tx_supp_rates & WLAN_RATE_2M) sta->supported_rates[1] = 4; if (sta->tx_supp_rates & WLAN_RATE_5M5) sta->supported_rates[2] = 11; if (sta->tx_supp_rates & WLAN_RATE_11M) sta->supported_rates[3] = 22; prism2_check_tx_rates(sta); atomic_dec(&sta->users); return 0; } static int prism2_hostapd_remove_sta(struct ap_data *ap, struct prism2_hostapd_param *param) { struct sta_info *sta; spin_lock_bh(&ap->sta_table_lock); sta = ap_get_sta(ap, param->sta_addr); if (sta) { ap_sta_hash_del(ap, sta); list_del(&sta->list); } spin_unlock_bh(&ap->sta_table_lock); if (!sta) return -ENOENT; if ((sta->flags & WLAN_STA_ASSOC) && !sta->ap && sta->local) hostap_event_expired_sta(sta->local->dev, sta); ap_free_sta(ap, sta); return 0; } static int prism2_hostapd_get_info_sta(struct ap_data *ap, struct prism2_hostapd_param *param) { struct sta_info *sta; spin_lock_bh(&ap->sta_table_lock); sta = ap_get_sta(ap, param->sta_addr); if (sta) atomic_inc(&sta->users); spin_unlock_bh(&ap->sta_table_lock); if (!sta) return -ENOENT; param->u.get_info_sta.inactive_sec = (jiffies - sta->last_rx) / HZ; atomic_dec(&sta->users); return 1; } static int prism2_hostapd_set_flags_sta(struct ap_data *ap, struct prism2_hostapd_param *param) { struct sta_info *sta; spin_lock_bh(&ap->sta_table_lock); sta = ap_get_sta(ap, param->sta_addr); if (sta) { sta->flags |= param->u.set_flags_sta.flags_or; sta->flags &= param->u.set_flags_sta.flags_and; } spin_unlock_bh(&ap->sta_table_lock); if (!sta) return -ENOENT; return 0; } static int prism2_hostapd_sta_clear_stats(struct ap_data *ap, struct prism2_hostapd_param *param) { struct sta_info *sta; int rate; spin_lock_bh(&ap->sta_table_lock); sta = ap_get_sta(ap, param->sta_addr); if (sta) { sta->rx_packets = sta->tx_packets = 0; sta->rx_bytes = sta->tx_bytes = 0; for (rate = 0; rate < WLAN_RATE_COUNT; rate++) { sta->tx_count[rate] = 0; sta->rx_count[rate] = 0; } } spin_unlock_bh(&ap->sta_table_lock); if (!sta) return -ENOENT; return 0; } int prism2_hostapd(struct ap_data *ap, struct prism2_hostapd_param *param) { switch (param->cmd) { case PRISM2_HOSTAPD_FLUSH: ap_control_kickall(ap); return 0; case PRISM2_HOSTAPD_ADD_STA: return prism2_hostapd_add_sta(ap, param); case PRISM2_HOSTAPD_REMOVE_STA: return prism2_hostapd_remove_sta(ap, param); case PRISM2_HOSTAPD_GET_INFO_STA: return prism2_hostapd_get_info_sta(ap, param); case PRISM2_HOSTAPD_SET_FLAGS_STA: return prism2_hostapd_set_flags_sta(ap, param); case PRISM2_HOSTAPD_STA_CLEAR_STATS: return prism2_hostapd_sta_clear_stats(ap, param); default: printk(KERN_WARNING "prism2_hostapd: unknown cmd=%d\n", param->cmd); return -EOPNOTSUPP; } } /* Update station info for host-based TX rate control and return current * TX rate */ static int ap_update_sta_tx_rate(struct sta_info *sta, struct net_device *dev) { int ret = sta->tx_rate; struct hostap_interface *iface; local_info_t *local; iface = netdev_priv(dev); local = iface->local; sta->tx_count[sta->tx_rate_idx]++; sta->tx_since_last_failure++; sta->tx_consecutive_exc = 0; if (sta->tx_since_last_failure >= WLAN_RATE_UPDATE_COUNT && sta->tx_rate_idx < sta->tx_max_rate) { /* use next higher rate */ int old_rate, new_rate; old_rate = new_rate = sta->tx_rate_idx; while (new_rate < sta->tx_max_rate) { new_rate++; if (ap_tx_rate_ok(new_rate, sta, local)) { sta->tx_rate_idx = new_rate; break; } } if (old_rate != sta->tx_rate_idx) { switch (sta->tx_rate_idx) { case 0: sta->tx_rate = 10; break; case 1: sta->tx_rate = 20; break; case 2: sta->tx_rate = 55; break; case 3: sta->tx_rate = 110; break; default: sta->tx_rate = 0; break; } PDEBUG(DEBUG_AP, "%s: STA %pM TX rate raised to %d\n", dev->name, sta->addr, sta->tx_rate); } sta->tx_since_last_failure = 0; } return ret; } /* Called only from software IRQ. Called for each TX frame prior possible * encryption and transmit. */ ap_tx_ret hostap_handle_sta_tx(local_info_t *local, struct hostap_tx_data *tx) { struct sta_info *sta = NULL; struct sk_buff *skb = tx->skb; int set_tim, ret; struct ieee80211_hdr *hdr; struct hostap_skb_tx_data *meta; meta = (struct hostap_skb_tx_data *) skb->cb; ret = AP_TX_CONTINUE; if (local->ap == NULL || skb->len < 10 || meta->iface->type == HOSTAP_INTERFACE_STA) goto out; hdr = (struct ieee80211_hdr *) skb->data; if (hdr->addr1[0] & 0x01) { /* broadcast/multicast frame - no AP related processing */ if (local->ap->num_sta <= 0) ret = AP_TX_DROP; goto out; } /* unicast packet - check whether destination STA is associated */ spin_lock(&local->ap->sta_table_lock); sta = ap_get_sta(local->ap, hdr->addr1); if (sta) atomic_inc(&sta->users); spin_unlock(&local->ap->sta_table_lock); if (local->iw_mode == IW_MODE_MASTER && sta == NULL && !(meta->flags & HOSTAP_TX_FLAGS_WDS) && meta->iface->type != HOSTAP_INTERFACE_MASTER && meta->iface->type != HOSTAP_INTERFACE_AP) { #if 0 /* This can happen, e.g., when wlan0 is added to a bridge and * bridging code does not know which port is the correct target * for a unicast frame. In this case, the packet is send to all * ports of the bridge. Since this is a valid scenario, do not * print out any errors here. */ if (net_ratelimit()) { printk(KERN_DEBUG "AP: drop packet to non-associated " "STA %pM\n", hdr->addr1); } #endif local->ap->tx_drop_nonassoc++; ret = AP_TX_DROP; goto out; } if (sta == NULL) goto out; if (!(sta->flags & WLAN_STA_AUTHORIZED)) ret = AP_TX_CONTINUE_NOT_AUTHORIZED; /* Set tx_rate if using host-based TX rate control */ if (!local->fw_tx_rate_control) local->ap->last_tx_rate = meta->rate = ap_update_sta_tx_rate(sta, local->dev); if (local->iw_mode != IW_MODE_MASTER) goto out; if (!(sta->flags & WLAN_STA_PS)) goto out; if (meta->flags & HOSTAP_TX_FLAGS_ADD_MOREDATA) { /* indicate to STA that more frames follow */ hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA); } if (meta->flags & HOSTAP_TX_FLAGS_BUFFERED_FRAME) { /* packet was already buffered and now send due to * PS poll, so do not rebuffer it */ goto out; } if (skb_queue_len(&sta->tx_buf) >= STA_MAX_TX_BUFFER) { PDEBUG(DEBUG_PS, "%s: No more space in STA (%pM)'s" "PS mode buffer\n", local->dev->name, sta->addr); /* Make sure that TIM is set for the station (it might not be * after AP wlan hw reset). */ /* FIX: should fix hw reset to restore bits based on STA * buffer state.. */ hostap_set_tim(local, sta->aid, 1); sta->flags |= WLAN_STA_TIM; ret = AP_TX_DROP; goto out; } /* STA in PS mode, buffer frame for later delivery */ set_tim = skb_queue_empty(&sta->tx_buf); skb_queue_tail(&sta->tx_buf, skb); /* FIX: could save RX time to skb and expire buffered frames after * some time if STA does not poll for them */ if (set_tim) { if (sta->flags & WLAN_STA_TIM) PDEBUG(DEBUG_PS2, "Re-setting TIM for aid %d\n", sta->aid); hostap_set_tim(local, sta->aid, 1); sta->flags |= WLAN_STA_TIM; } ret = AP_TX_BUFFERED; out: if (sta != NULL) { if (ret == AP_TX_CONTINUE || ret == AP_TX_CONTINUE_NOT_AUTHORIZED) { sta->tx_packets++; sta->tx_bytes += skb->len; sta->last_tx = jiffies; } if ((ret == AP_TX_CONTINUE || ret == AP_TX_CONTINUE_NOT_AUTHORIZED) && sta->crypt && tx->host_encrypt) { tx->crypt = sta->crypt; tx->sta_ptr = sta; /* hostap_handle_sta_release() will * be called to release sta info * later */ } else atomic_dec(&sta->users); } return ret; } void hostap_handle_sta_release(void *ptr) { struct sta_info *sta = ptr; atomic_dec(&sta->users); } /* Called only as a tasklet (software IRQ) */ void hostap_handle_sta_tx_exc(local_info_t *local, struct sk_buff *skb) { struct sta_info *sta; struct ieee80211_hdr *hdr; struct hostap_skb_tx_data *meta; hdr = (struct ieee80211_hdr *) skb->data; meta = (struct hostap_skb_tx_data *) skb->cb; spin_lock(&local->ap->sta_table_lock); sta = ap_get_sta(local->ap, hdr->addr1); if (!sta) { spin_unlock(&local->ap->sta_table_lock); PDEBUG(DEBUG_AP, "%s: Could not find STA %pM" " for this TX error (@%lu)\n", local->dev->name, hdr->addr1, jiffies); return; } sta->tx_since_last_failure = 0; sta->tx_consecutive_exc++; if (sta->tx_consecutive_exc >= WLAN_RATE_DECREASE_THRESHOLD && sta->tx_rate_idx > 0 && meta->rate <= sta->tx_rate) { /* use next lower rate */ int old, rate; old = rate = sta->tx_rate_idx; while (rate > 0) { rate--; if (ap_tx_rate_ok(rate, sta, local)) { sta->tx_rate_idx = rate; break; } } if (old != sta->tx_rate_idx) { switch (sta->tx_rate_idx) { case 0: sta->tx_rate = 10; break; case 1: sta->tx_rate = 20; break; case 2: sta->tx_rate = 55; break; case 3: sta->tx_rate = 110; break; default: sta->tx_rate = 0; break; } PDEBUG(DEBUG_AP, "%s: STA %pM TX rate lowered to %d\n", local->dev->name, sta->addr, sta->tx_rate); } sta->tx_consecutive_exc = 0; } spin_unlock(&local->ap->sta_table_lock); } static void hostap_update_sta_ps2(local_info_t *local, struct sta_info *sta, int pwrmgt, int type, int stype) { if (pwrmgt && !(sta->flags & WLAN_STA_PS)) { sta->flags |= WLAN_STA_PS; PDEBUG(DEBUG_PS2, "STA %pM changed to use PS " "mode (type=0x%02X, stype=0x%02X)\n", sta->addr, type >> 2, stype >> 4); } else if (!pwrmgt && (sta->flags & WLAN_STA_PS)) { sta->flags &= ~WLAN_STA_PS; PDEBUG(DEBUG_PS2, "STA %pM changed to not use " "PS mode (type=0x%02X, stype=0x%02X)\n", sta->addr, type >> 2, stype >> 4); if (type != IEEE80211_FTYPE_CTL || stype != IEEE80211_STYPE_PSPOLL) schedule_packet_send(local, sta); } } /* Called only as a tasklet (software IRQ). Called for each RX frame to update * STA power saving state. pwrmgt is a flag from 802.11 frame_control field. */ int hostap_update_sta_ps(local_info_t *local, struct ieee80211_hdr *hdr) { struct sta_info *sta; u16 fc; spin_lock(&local->ap->sta_table_lock); sta = ap_get_sta(local->ap, hdr->addr2); if (sta) atomic_inc(&sta->users); spin_unlock(&local->ap->sta_table_lock); if (!sta) return -1; fc = le16_to_cpu(hdr->frame_control); hostap_update_sta_ps2(local, sta, fc & IEEE80211_FCTL_PM, fc & IEEE80211_FCTL_FTYPE, fc & IEEE80211_FCTL_STYPE); atomic_dec(&sta->users); return 0; } /* Called only as a tasklet (software IRQ). Called for each RX frame after * getting RX header and payload from hardware. */ ap_rx_ret hostap_handle_sta_rx(local_info_t *local, struct net_device *dev, struct sk_buff *skb, struct hostap_80211_rx_status *rx_stats, int wds) { int ret; struct sta_info *sta; u16 fc, type, stype; struct ieee80211_hdr *hdr; if (local->ap == NULL) return AP_RX_CONTINUE; hdr = (struct ieee80211_hdr *) skb->data; fc = le16_to_cpu(hdr->frame_control); type = fc & IEEE80211_FCTL_FTYPE; stype = fc & IEEE80211_FCTL_STYPE; spin_lock(&local->ap->sta_table_lock); sta = ap_get_sta(local->ap, hdr->addr2); if (sta) atomic_inc(&sta->users); spin_unlock(&local->ap->sta_table_lock); if (sta && !(sta->flags & WLAN_STA_AUTHORIZED)) ret = AP_RX_CONTINUE_NOT_AUTHORIZED; else ret = AP_RX_CONTINUE; if (fc & IEEE80211_FCTL_TODS) { if (!wds && (sta == NULL || !(sta->flags & WLAN_STA_ASSOC))) { if (local->hostapd) { prism2_rx_80211(local->apdev, skb, rx_stats, PRISM2_RX_NON_ASSOC); #ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT } else { printk(KERN_DEBUG "%s: dropped received packet" " from non-associated STA %pM" " (type=0x%02x, subtype=0x%02x)\n", dev->name, hdr->addr2, type >> 2, stype >> 4); hostap_rx(dev, skb, rx_stats); #endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ } ret = AP_RX_EXIT; goto out; } } else if (fc & IEEE80211_FCTL_FROMDS) { if (!wds) { /* FromDS frame - not for us; probably * broadcast/multicast in another BSS - drop */ if (ether_addr_equal(hdr->addr1, dev->dev_addr)) { printk(KERN_DEBUG "Odd.. FromDS packet " "received with own BSSID\n"); hostap_dump_rx_80211(dev->name, skb, rx_stats); } ret = AP_RX_DROP; goto out; } } else if (stype == IEEE80211_STYPE_NULLFUNC && sta == NULL && ether_addr_equal(hdr->addr1, dev->dev_addr)) { if (local->hostapd) { prism2_rx_80211(local->apdev, skb, rx_stats, PRISM2_RX_NON_ASSOC); #ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT } else { /* At least Lucent f/w seems to send data::nullfunc * frames with no ToDS flag when the current AP returns * after being unavailable for some time. Speed up * re-association by informing the station about it not * being associated. */ printk(KERN_DEBUG "%s: rejected received nullfunc frame" " without ToDS from not associated STA %pM\n", dev->name, hdr->addr2); hostap_rx(dev, skb, rx_stats); #endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ } ret = AP_RX_EXIT; goto out; } else if (stype == IEEE80211_STYPE_NULLFUNC) { /* At least Lucent cards seem to send periodic nullfunc * frames with ToDS. Let these through to update SQ * stats and PS state. Nullfunc frames do not contain * any data and they will be dropped below. */ } else { /* If BSSID (Addr3) is foreign, this frame is a normal * broadcast frame from an IBSS network. Drop it silently. * If BSSID is own, report the dropping of this frame. */ if (ether_addr_equal(hdr->addr3, dev->dev_addr)) { printk(KERN_DEBUG "%s: dropped received packet from %pM" " with no ToDS flag " "(type=0x%02x, subtype=0x%02x)\n", dev->name, hdr->addr2, type >> 2, stype >> 4); hostap_dump_rx_80211(dev->name, skb, rx_stats); } ret = AP_RX_DROP; goto out; } if (sta) { hostap_update_sta_ps2(local, sta, fc & IEEE80211_FCTL_PM, type, stype); sta->rx_packets++; sta->rx_bytes += skb->len; sta->last_rx = jiffies; } if (local->ap->nullfunc_ack && stype == IEEE80211_STYPE_NULLFUNC && fc & IEEE80211_FCTL_TODS) { if (local->hostapd) { prism2_rx_80211(local->apdev, skb, rx_stats, PRISM2_RX_NULLFUNC_ACK); #ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT } else { /* some STA f/w's seem to require control::ACK frame * for data::nullfunc, but Prism2 f/w 0.8.0 (at least * from Compaq) does not send this.. Try to generate * ACK for these frames from the host driver to make * power saving work with, e.g., Lucent WaveLAN f/w */ hostap_rx(dev, skb, rx_stats); #endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ } ret = AP_RX_EXIT; goto out; } out: if (sta) atomic_dec(&sta->users); return ret; } /* Called only as a tasklet (software IRQ) */ int hostap_handle_sta_crypto(local_info_t *local, struct ieee80211_hdr *hdr, struct lib80211_crypt_data **crypt, void **sta_ptr) { struct sta_info *sta; spin_lock(&local->ap->sta_table_lock); sta = ap_get_sta(local->ap, hdr->addr2); if (sta) atomic_inc(&sta->users); spin_unlock(&local->ap->sta_table_lock); if (!sta) return -1; if (sta->crypt) { *crypt = sta->crypt; *sta_ptr = sta; /* hostap_handle_sta_release() will be called to release STA * info */ } else atomic_dec(&sta->users); return 0; } /* Called only as a tasklet (software IRQ) */ int hostap_is_sta_assoc(struct ap_data *ap, u8 *sta_addr) { struct sta_info *sta; int ret = 0; spin_lock(&ap->sta_table_lock); sta = ap_get_sta(ap, sta_addr); if (sta != NULL && (sta->flags & WLAN_STA_ASSOC) && !sta->ap) ret = 1; spin_unlock(&ap->sta_table_lock); return ret; } /* Called only as a tasklet (software IRQ) */ int hostap_is_sta_authorized(struct ap_data *ap, u8 *sta_addr) { struct sta_info *sta; int ret = 0; spin_lock(&ap->sta_table_lock); sta = ap_get_sta(ap, sta_addr); if (sta != NULL && (sta->flags & WLAN_STA_ASSOC) && !sta->ap && ((sta->flags & WLAN_STA_AUTHORIZED) || ap->local->ieee_802_1x == 0)) ret = 1; spin_unlock(&ap->sta_table_lock); return ret; } /* Called only as a tasklet (software IRQ) */ int hostap_add_sta(struct ap_data *ap, u8 *sta_addr) { struct sta_info *sta; int ret = 1; if (!ap) return -1; spin_lock(&ap->sta_table_lock); sta = ap_get_sta(ap, sta_addr); if (sta) ret = 0; spin_unlock(&ap->sta_table_lock); if (ret == 1) { sta = ap_add_sta(ap, sta_addr); if (!sta) return -1; sta->flags = WLAN_STA_AUTH | WLAN_STA_ASSOC; sta->ap = 1; memset(sta->supported_rates, 0, sizeof(sta->supported_rates)); /* No way of knowing which rates are supported since we did not * get supported rates element from beacon/assoc req. Assume * that remote end supports all 802.11b rates. */ sta->supported_rates[0] = 0x82; sta->supported_rates[1] = 0x84; sta->supported_rates[2] = 0x0b; sta->supported_rates[3] = 0x16; sta->tx_supp_rates = WLAN_RATE_1M | WLAN_RATE_2M | WLAN_RATE_5M5 | WLAN_RATE_11M; sta->tx_rate = 110; sta->tx_max_rate = sta->tx_rate_idx = 3; } return ret; } /* Called only as a tasklet (software IRQ) */ int hostap_update_rx_stats(struct ap_data *ap, struct ieee80211_hdr *hdr, struct hostap_80211_rx_status *rx_stats) { struct sta_info *sta; if (!ap) return -1; spin_lock(&ap->sta_table_lock); sta = ap_get_sta(ap, hdr->addr2); if (sta) { sta->last_rx_silence = rx_stats->noise; sta->last_rx_signal = rx_stats->signal; sta->last_rx_rate = rx_stats->rate; sta->last_rx_updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM; if (rx_stats->rate == 10) sta->rx_count[0]++; else if (rx_stats->rate == 20) sta->rx_count[1]++; else if (rx_stats->rate == 55) sta->rx_count[2]++; else if (rx_stats->rate == 110) sta->rx_count[3]++; } spin_unlock(&ap->sta_table_lock); return sta ? 0 : -1; } void hostap_update_rates(local_info_t *local) { struct sta_info *sta; struct ap_data *ap = local->ap; if (!ap) return; spin_lock_bh(&ap->sta_table_lock); list_for_each_entry(sta, &ap->sta_list, list) { prism2_check_tx_rates(sta); } spin_unlock_bh(&ap->sta_table_lock); } void * ap_crypt_get_ptrs(struct ap_data *ap, u8 *addr, int permanent, struct lib80211_crypt_data ***crypt) { struct sta_info *sta; spin_lock_bh(&ap->sta_table_lock); sta = ap_get_sta(ap, addr); if (sta) atomic_inc(&sta->users); spin_unlock_bh(&ap->sta_table_lock); if (!sta && permanent) sta = ap_add_sta(ap, addr); if (!sta) return NULL; if (permanent) sta->flags |= WLAN_STA_PERM; *crypt = &sta->crypt; return sta; } void hostap_add_wds_links(local_info_t *local) { struct ap_data *ap = local->ap; struct sta_info *sta; spin_lock_bh(&ap->sta_table_lock); list_for_each_entry(sta, &ap->sta_list, list) { if (sta->ap) hostap_wds_link_oper(local, sta->addr, WDS_ADD); } spin_unlock_bh(&ap->sta_table_lock); schedule_work(&local->ap->wds_oper_queue); } void hostap_wds_link_oper(local_info_t *local, u8 *addr, wds_oper_type type) { struct wds_oper_data *entry; entry = kmalloc(sizeof(*entry), GFP_ATOMIC); if (!entry) return; memcpy(entry->addr, addr, ETH_ALEN); entry->type = type; spin_lock_bh(&local->lock); entry->next = local->ap->wds_oper_entries; local->ap->wds_oper_entries = entry; spin_unlock_bh(&local->lock); schedule_work(&local->ap->wds_oper_queue); } EXPORT_SYMBOL(hostap_init_data); EXPORT_SYMBOL(hostap_init_ap_proc); EXPORT_SYMBOL(hostap_free_data); EXPORT_SYMBOL(hostap_check_sta_fw_version); EXPORT_SYMBOL(hostap_handle_sta_tx_exc); #ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT #endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */
linux-master
drivers/net/wireless/intersil/hostap/hostap_ap.c
// SPDX-License-Identifier: GPL-2.0 /* /proc routines for Host AP driver */ #include <linux/types.h> #include <linux/proc_fs.h> #include <linux/export.h> #include <net/lib80211.h> #include "hostap_wlan.h" #include "hostap.h" #define PROC_LIMIT (PAGE_SIZE - 80) #if !defined(PRISM2_NO_PROCFS_DEBUG) && defined(CONFIG_PROC_FS) static int prism2_debug_proc_show(struct seq_file *m, void *v) { local_info_t *local = m->private; int i; seq_printf(m, "next_txfid=%d next_alloc=%d\n", local->next_txfid, local->next_alloc); for (i = 0; i < PRISM2_TXFID_COUNT; i++) seq_printf(m, "FID: tx=%04X intransmit=%04X\n", local->txfid[i], local->intransmitfid[i]); seq_printf(m, "FW TX rate control: %d\n", local->fw_tx_rate_control); seq_printf(m, "beacon_int=%d\n", local->beacon_int); seq_printf(m, "dtim_period=%d\n", local->dtim_period); seq_printf(m, "wds_max_connections=%d\n", local->wds_max_connections); seq_printf(m, "dev_enabled=%d\n", local->dev_enabled); seq_printf(m, "sw_tick_stuck=%d\n", local->sw_tick_stuck); for (i = 0; i < WEP_KEYS; i++) { if (local->crypt_info.crypt[i] && local->crypt_info.crypt[i]->ops) { seq_printf(m, "crypt[%d]=%s\n", i, local->crypt_info.crypt[i]->ops->name); } } seq_printf(m, "pri_only=%d\n", local->pri_only); seq_printf(m, "pci=%d\n", local->func->hw_type == HOSTAP_HW_PCI); seq_printf(m, "sram_type=%d\n", local->sram_type); seq_printf(m, "no_pri=%d\n", local->no_pri); return 0; } #endif #ifdef CONFIG_PROC_FS static int prism2_stats_proc_show(struct seq_file *m, void *v) { local_info_t *local = m->private; struct comm_tallies_sums *sums = &local->comm_tallies; seq_printf(m, "TxUnicastFrames=%u\n", sums->tx_unicast_frames); seq_printf(m, "TxMulticastframes=%u\n", sums->tx_multicast_frames); seq_printf(m, "TxFragments=%u\n", sums->tx_fragments); seq_printf(m, "TxUnicastOctets=%u\n", sums->tx_unicast_octets); seq_printf(m, "TxMulticastOctets=%u\n", sums->tx_multicast_octets); seq_printf(m, "TxDeferredTransmissions=%u\n", sums->tx_deferred_transmissions); seq_printf(m, "TxSingleRetryFrames=%u\n", sums->tx_single_retry_frames); seq_printf(m, "TxMultipleRetryFrames=%u\n", sums->tx_multiple_retry_frames); seq_printf(m, "TxRetryLimitExceeded=%u\n", sums->tx_retry_limit_exceeded); seq_printf(m, "TxDiscards=%u\n", sums->tx_discards); seq_printf(m, "RxUnicastFrames=%u\n", sums->rx_unicast_frames); seq_printf(m, "RxMulticastFrames=%u\n", sums->rx_multicast_frames); seq_printf(m, "RxFragments=%u\n", sums->rx_fragments); seq_printf(m, "RxUnicastOctets=%u\n", sums->rx_unicast_octets); seq_printf(m, "RxMulticastOctets=%u\n", sums->rx_multicast_octets); seq_printf(m, "RxFCSErrors=%u\n", sums->rx_fcs_errors); seq_printf(m, "RxDiscardsNoBuffer=%u\n", sums->rx_discards_no_buffer); seq_printf(m, "TxDiscardsWrongSA=%u\n", sums->tx_discards_wrong_sa); seq_printf(m, "RxDiscardsWEPUndecryptable=%u\n", sums->rx_discards_wep_undecryptable); seq_printf(m, "RxMessageInMsgFragments=%u\n", sums->rx_message_in_msg_fragments); seq_printf(m, "RxMessageInBadMsgFragments=%u\n", sums->rx_message_in_bad_msg_fragments); /* FIX: this may grow too long for one page(?) */ return 0; } #endif static int prism2_wds_proc_show(struct seq_file *m, void *v) { struct list_head *ptr = v; struct hostap_interface *iface; iface = list_entry(ptr, struct hostap_interface, list); if (iface->type == HOSTAP_INTERFACE_WDS) seq_printf(m, "%s\t%pM\n", iface->dev->name, iface->u.wds.remote_addr); return 0; } static void *prism2_wds_proc_start(struct seq_file *m, loff_t *_pos) { local_info_t *local = pde_data(file_inode(m->file)); read_lock_bh(&local->iface_lock); return seq_list_start(&local->hostap_interfaces, *_pos); } static void *prism2_wds_proc_next(struct seq_file *m, void *v, loff_t *_pos) { local_info_t *local = pde_data(file_inode(m->file)); return seq_list_next(v, &local->hostap_interfaces, _pos); } static void prism2_wds_proc_stop(struct seq_file *m, void *v) { local_info_t *local = pde_data(file_inode(m->file)); read_unlock_bh(&local->iface_lock); } static const struct seq_operations prism2_wds_proc_seqops = { .start = prism2_wds_proc_start, .next = prism2_wds_proc_next, .stop = prism2_wds_proc_stop, .show = prism2_wds_proc_show, }; static int prism2_bss_list_proc_show(struct seq_file *m, void *v) { local_info_t *local = pde_data(file_inode(m->file)); struct list_head *ptr = v; struct hostap_bss_info *bss; if (ptr == &local->bss_list) { seq_printf(m, "#BSSID\tlast_update\tcount\tcapab_info\tSSID(txt)\t" "SSID(hex)\tWPA IE\n"); return 0; } bss = list_entry(ptr, struct hostap_bss_info, list); seq_printf(m, "%pM\t%lu\t%u\t0x%x\t", bss->bssid, bss->last_update, bss->count, bss->capab_info); seq_printf(m, "%*pE", (int)bss->ssid_len, bss->ssid); seq_putc(m, '\t'); seq_printf(m, "%*phN", (int)bss->ssid_len, bss->ssid); seq_putc(m, '\t'); seq_printf(m, "%*phN", (int)bss->wpa_ie_len, bss->wpa_ie); seq_putc(m, '\n'); return 0; } static void *prism2_bss_list_proc_start(struct seq_file *m, loff_t *_pos) __acquires(&local->lock) { local_info_t *local = pde_data(file_inode(m->file)); spin_lock_bh(&local->lock); return seq_list_start_head(&local->bss_list, *_pos); } static void *prism2_bss_list_proc_next(struct seq_file *m, void *v, loff_t *_pos) { local_info_t *local = pde_data(file_inode(m->file)); return seq_list_next(v, &local->bss_list, _pos); } static void prism2_bss_list_proc_stop(struct seq_file *m, void *v) __releases(&local->lock) { local_info_t *local = pde_data(file_inode(m->file)); spin_unlock_bh(&local->lock); } static const struct seq_operations prism2_bss_list_proc_seqops = { .start = prism2_bss_list_proc_start, .next = prism2_bss_list_proc_next, .stop = prism2_bss_list_proc_stop, .show = prism2_bss_list_proc_show, }; #ifdef CONFIG_PROC_FS static int prism2_crypt_proc_show(struct seq_file *m, void *v) { local_info_t *local = m->private; int i; seq_printf(m, "tx_keyidx=%d\n", local->crypt_info.tx_keyidx); for (i = 0; i < WEP_KEYS; i++) { if (local->crypt_info.crypt[i] && local->crypt_info.crypt[i]->ops && local->crypt_info.crypt[i]->ops->print_stats) { local->crypt_info.crypt[i]->ops->print_stats( m, local->crypt_info.crypt[i]->priv); } } return 0; } #endif static ssize_t prism2_pda_proc_read(struct file *file, char __user *buf, size_t count, loff_t *_pos) { local_info_t *local = pde_data(file_inode(file)); size_t off; if (local->pda == NULL || *_pos >= PRISM2_PDA_SIZE) return 0; off = *_pos; if (count > PRISM2_PDA_SIZE - off) count = PRISM2_PDA_SIZE - off; if (copy_to_user(buf, local->pda + off, count) != 0) return -EFAULT; *_pos += count; return count; } static const struct proc_ops prism2_pda_proc_ops = { .proc_read = prism2_pda_proc_read, .proc_lseek = generic_file_llseek, }; static ssize_t prism2_aux_dump_proc_no_read(struct file *file, char __user *buf, size_t bufsize, loff_t *_pos) { return 0; } static const struct proc_ops prism2_aux_dump_proc_ops = { .proc_read = prism2_aux_dump_proc_no_read, .proc_lseek = default_llseek, }; #ifdef PRISM2_IO_DEBUG static int prism2_io_debug_proc_read(char *page, char **start, off_t off, int count, int *eof, void *data) { local_info_t *local = (local_info_t *) data; int head = local->io_debug_head; int start_bytes, left, copy; if (off + count > PRISM2_IO_DEBUG_SIZE * 4) { *eof = 1; if (off >= PRISM2_IO_DEBUG_SIZE * 4) return 0; count = PRISM2_IO_DEBUG_SIZE * 4 - off; } start_bytes = (PRISM2_IO_DEBUG_SIZE - head) * 4; left = count; if (off < start_bytes) { copy = start_bytes - off; if (copy > count) copy = count; memcpy(page, ((u8 *) &local->io_debug[head]) + off, copy); left -= copy; if (left > 0) memcpy(&page[copy], local->io_debug, left); } else { memcpy(page, ((u8 *) local->io_debug) + (off - start_bytes), left); } *start = page; return count; } #endif /* PRISM2_IO_DEBUG */ #ifndef PRISM2_NO_STATION_MODES static int prism2_scan_results_proc_show(struct seq_file *m, void *v) { local_info_t *local = pde_data(file_inode(m->file)); unsigned long entry; int i, len; struct hfa384x_hostscan_result *scanres; u8 *p; if (v == SEQ_START_TOKEN) { seq_printf(m, "CHID ANL SL BcnInt Capab Rate BSSID ATIM SupRates SSID\n"); return 0; } entry = (unsigned long)v - 2; scanres = &local->last_scan_results[entry]; seq_printf(m, "%d %d %d %d 0x%02x %d %pM %d ", le16_to_cpu(scanres->chid), (s16) le16_to_cpu(scanres->anl), (s16) le16_to_cpu(scanres->sl), le16_to_cpu(scanres->beacon_interval), le16_to_cpu(scanres->capability), le16_to_cpu(scanres->rate), scanres->bssid, le16_to_cpu(scanres->atim)); p = scanres->sup_rates; for (i = 0; i < sizeof(scanres->sup_rates); i++) { if (p[i] == 0) break; seq_printf(m, "<%02x>", p[i]); } seq_putc(m, ' '); p = scanres->ssid; len = le16_to_cpu(scanres->ssid_len); if (len > 32) len = 32; for (i = 0; i < len; i++) { unsigned char c = p[i]; if (c >= 32 && c < 127) seq_putc(m, c); else seq_printf(m, "<%02x>", c); } seq_putc(m, '\n'); return 0; } static void *prism2_scan_results_proc_start(struct seq_file *m, loff_t *_pos) { local_info_t *local = pde_data(file_inode(m->file)); spin_lock_bh(&local->lock); /* We have a header (pos 0) + N results to show (pos 1...N) */ if (*_pos > local->last_scan_results_count) return NULL; return (void *)(unsigned long)(*_pos + 1); /* 0 would be EOF */ } static void *prism2_scan_results_proc_next(struct seq_file *m, void *v, loff_t *_pos) { local_info_t *local = pde_data(file_inode(m->file)); ++*_pos; if (*_pos > local->last_scan_results_count) return NULL; return (void *)(unsigned long)(*_pos + 1); /* 0 would be EOF */ } static void prism2_scan_results_proc_stop(struct seq_file *m, void *v) { local_info_t *local = pde_data(file_inode(m->file)); spin_unlock_bh(&local->lock); } static const struct seq_operations prism2_scan_results_proc_seqops = { .start = prism2_scan_results_proc_start, .next = prism2_scan_results_proc_next, .stop = prism2_scan_results_proc_stop, .show = prism2_scan_results_proc_show, }; #endif /* PRISM2_NO_STATION_MODES */ void hostap_init_proc(local_info_t *local) { local->proc = NULL; if (hostap_proc == NULL) { printk(KERN_WARNING "%s: hostap proc directory not created\n", local->dev->name); return; } local->proc = proc_mkdir(local->ddev->name, hostap_proc); if (local->proc == NULL) { printk(KERN_INFO "/proc/net/hostap/%s creation failed\n", local->ddev->name); return; } #ifndef PRISM2_NO_PROCFS_DEBUG proc_create_single_data("debug", 0, local->proc, prism2_debug_proc_show, local); #endif /* PRISM2_NO_PROCFS_DEBUG */ proc_create_single_data("stats", 0, local->proc, prism2_stats_proc_show, local); proc_create_seq_data("wds", 0, local->proc, &prism2_wds_proc_seqops, local); proc_create_data("pda", 0, local->proc, &prism2_pda_proc_ops, local); proc_create_data("aux_dump", 0, local->proc, local->func->read_aux_proc_ops ?: &prism2_aux_dump_proc_ops, local); proc_create_seq_data("bss_list", 0, local->proc, &prism2_bss_list_proc_seqops, local); proc_create_single_data("crypt", 0, local->proc, prism2_crypt_proc_show, local); #ifdef PRISM2_IO_DEBUG proc_create_single_data("io_debug", 0, local->proc, prism2_debug_proc_show, local); #endif /* PRISM2_IO_DEBUG */ #ifndef PRISM2_NO_STATION_MODES proc_create_seq_data("scan_results", 0, local->proc, &prism2_scan_results_proc_seqops, local); #endif /* PRISM2_NO_STATION_MODES */ } void hostap_remove_proc(local_info_t *local) { proc_remove(local->proc); } EXPORT_SYMBOL(hostap_init_proc); EXPORT_SYMBOL(hostap_remove_proc);
linux-master
drivers/net/wireless/intersil/hostap/hostap_proc.c
// SPDX-License-Identifier: GPL-2.0-only #define PRISM2_PCI /* Host AP driver's support for Intersil Prism2.5 PCI cards is based on * driver patches from Reyk Floeter <[email protected]> and * Andy Warner <[email protected]> */ #include <linux/module.h> #include <linux/if.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/slab.h> #include <linux/workqueue.h> #include <linux/wireless.h> #include <net/iw_handler.h> #include <linux/ioport.h> #include <linux/pci.h> #include <asm/io.h> #include "hostap_wlan.h" static char *dev_info = "hostap_pci"; MODULE_AUTHOR("Jouni Malinen"); MODULE_DESCRIPTION("Support for Intersil Prism2.5-based 802.11 wireless LAN " "PCI cards."); MODULE_LICENSE("GPL"); /* struct local_info::hw_priv */ struct hostap_pci_priv { void __iomem *mem_start; }; /* FIX: do we need mb/wmb/rmb with memory operations? */ static const struct pci_device_id prism2_pci_id_table[] = { /* Intersil Prism3 ISL3872 11Mb/s WLAN Controller */ { 0x1260, 0x3872, PCI_ANY_ID, PCI_ANY_ID }, /* Intersil Prism2.5 ISL3874 11Mb/s WLAN Controller */ { 0x1260, 0x3873, PCI_ANY_ID, PCI_ANY_ID }, /* Samsung MagicLAN SWL-2210P */ { 0x167d, 0xa000, PCI_ANY_ID, PCI_ANY_ID }, { 0 } }; #ifdef PRISM2_IO_DEBUG static inline void hfa384x_outb_debug(struct net_device *dev, int a, u8 v) { struct hostap_interface *iface; struct hostap_pci_priv *hw_priv; local_info_t *local; unsigned long flags; iface = netdev_priv(dev); local = iface->local; hw_priv = local->hw_priv; spin_lock_irqsave(&local->lock, flags); prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_OUTB, a, v); writeb(v, hw_priv->mem_start + a); spin_unlock_irqrestore(&local->lock, flags); } static inline u8 hfa384x_inb_debug(struct net_device *dev, int a) { struct hostap_interface *iface; struct hostap_pci_priv *hw_priv; local_info_t *local; unsigned long flags; u8 v; iface = netdev_priv(dev); local = iface->local; hw_priv = local->hw_priv; spin_lock_irqsave(&local->lock, flags); v = readb(hw_priv->mem_start + a); prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_INB, a, v); spin_unlock_irqrestore(&local->lock, flags); return v; } static inline void hfa384x_outw_debug(struct net_device *dev, int a, u16 v) { struct hostap_interface *iface; struct hostap_pci_priv *hw_priv; local_info_t *local; unsigned long flags; iface = netdev_priv(dev); local = iface->local; hw_priv = local->hw_priv; spin_lock_irqsave(&local->lock, flags); prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_OUTW, a, v); writew(v, hw_priv->mem_start + a); spin_unlock_irqrestore(&local->lock, flags); } static inline u16 hfa384x_inw_debug(struct net_device *dev, int a) { struct hostap_interface *iface; struct hostap_pci_priv *hw_priv; local_info_t *local; unsigned long flags; u16 v; iface = netdev_priv(dev); local = iface->local; hw_priv = local->hw_priv; spin_lock_irqsave(&local->lock, flags); v = readw(hw_priv->mem_start + a); prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_INW, a, v); spin_unlock_irqrestore(&local->lock, flags); return v; } #define HFA384X_OUTB(v,a) hfa384x_outb_debug(dev, (a), (v)) #define HFA384X_INB(a) hfa384x_inb_debug(dev, (a)) #define HFA384X_OUTW(v,a) hfa384x_outw_debug(dev, (a), (v)) #define HFA384X_INW(a) hfa384x_inw_debug(dev, (a)) #define HFA384X_OUTW_DATA(v,a) hfa384x_outw_debug(dev, (a), le16_to_cpu((v))) #define HFA384X_INW_DATA(a) cpu_to_le16(hfa384x_inw_debug(dev, (a))) #else /* PRISM2_IO_DEBUG */ static inline void hfa384x_outb(struct net_device *dev, int a, u8 v) { struct hostap_interface *iface; struct hostap_pci_priv *hw_priv; iface = netdev_priv(dev); hw_priv = iface->local->hw_priv; writeb(v, hw_priv->mem_start + a); } static inline u8 hfa384x_inb(struct net_device *dev, int a) { struct hostap_interface *iface; struct hostap_pci_priv *hw_priv; iface = netdev_priv(dev); hw_priv = iface->local->hw_priv; return readb(hw_priv->mem_start + a); } static inline void hfa384x_outw(struct net_device *dev, int a, u16 v) { struct hostap_interface *iface; struct hostap_pci_priv *hw_priv; iface = netdev_priv(dev); hw_priv = iface->local->hw_priv; writew(v, hw_priv->mem_start + a); } static inline u16 hfa384x_inw(struct net_device *dev, int a) { struct hostap_interface *iface; struct hostap_pci_priv *hw_priv; iface = netdev_priv(dev); hw_priv = iface->local->hw_priv; return readw(hw_priv->mem_start + a); } #define HFA384X_OUTB(v,a) hfa384x_outb(dev, (a), (v)) #define HFA384X_INB(a) hfa384x_inb(dev, (a)) #define HFA384X_OUTW(v,a) hfa384x_outw(dev, (a), (v)) #define HFA384X_INW(a) hfa384x_inw(dev, (a)) #define HFA384X_OUTW_DATA(v,a) hfa384x_outw(dev, (a), le16_to_cpu((v))) #define HFA384X_INW_DATA(a) cpu_to_le16(hfa384x_inw(dev, (a))) #endif /* PRISM2_IO_DEBUG */ static int hfa384x_from_bap(struct net_device *dev, u16 bap, void *buf, int len) { u16 d_off; __le16 *pos; d_off = (bap == 1) ? HFA384X_DATA1_OFF : HFA384X_DATA0_OFF; pos = (__le16 *) buf; for ( ; len > 1; len -= 2) *pos++ = HFA384X_INW_DATA(d_off); if (len & 1) *((char *) pos) = HFA384X_INB(d_off); return 0; } static int hfa384x_to_bap(struct net_device *dev, u16 bap, void *buf, int len) { u16 d_off; __le16 *pos; d_off = (bap == 1) ? HFA384X_DATA1_OFF : HFA384X_DATA0_OFF; pos = (__le16 *) buf; for ( ; len > 1; len -= 2) HFA384X_OUTW_DATA(*pos++, d_off); if (len & 1) HFA384X_OUTB(*((char *) pos), d_off); return 0; } /* FIX: This might change at some point.. */ #include "hostap_hw.c" static void prism2_pci_cor_sreset(local_info_t *local) { struct net_device *dev = local->dev; u16 reg; reg = HFA384X_INB(HFA384X_PCICOR_OFF); printk(KERN_DEBUG "%s: Original COR value: 0x%0x\n", dev->name, reg); /* linux-wlan-ng uses extremely long hold and settle times for * COR sreset. A comment in the driver code mentions that the long * delays appear to be necessary. However, at least IBM 22P6901 seems * to work fine with shorter delays. * * Longer delays can be configured by uncommenting following line: */ /* #define PRISM2_PCI_USE_LONG_DELAYS */ #ifdef PRISM2_PCI_USE_LONG_DELAYS int i; HFA384X_OUTW(reg | 0x0080, HFA384X_PCICOR_OFF); mdelay(250); HFA384X_OUTW(reg & ~0x0080, HFA384X_PCICOR_OFF); mdelay(500); /* Wait for f/w to complete initialization (CMD:BUSY == 0) */ i = 2000000 / 10; while ((HFA384X_INW(HFA384X_CMD_OFF) & HFA384X_CMD_BUSY) && --i) udelay(10); #else /* PRISM2_PCI_USE_LONG_DELAYS */ HFA384X_OUTW(reg | 0x0080, HFA384X_PCICOR_OFF); mdelay(2); HFA384X_OUTW(reg & ~0x0080, HFA384X_PCICOR_OFF); mdelay(2); #endif /* PRISM2_PCI_USE_LONG_DELAYS */ if (HFA384X_INW(HFA384X_CMD_OFF) & HFA384X_CMD_BUSY) { printk(KERN_DEBUG "%s: COR sreset timeout\n", dev->name); } } static void prism2_pci_genesis_reset(local_info_t *local, int hcr) { struct net_device *dev = local->dev; HFA384X_OUTW(0x00C5, HFA384X_PCICOR_OFF); mdelay(10); HFA384X_OUTW(hcr, HFA384X_PCIHCR_OFF); mdelay(10); HFA384X_OUTW(0x0045, HFA384X_PCICOR_OFF); mdelay(10); } static struct prism2_helper_functions prism2_pci_funcs = { .card_present = NULL, .cor_sreset = prism2_pci_cor_sreset, .genesis_reset = prism2_pci_genesis_reset, .hw_type = HOSTAP_HW_PCI, }; static int prism2_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) { unsigned long phymem; void __iomem *mem = NULL; local_info_t *local = NULL; struct net_device *dev = NULL; static int cards_found /* = 0 */; int irq_registered = 0; struct hostap_interface *iface; struct hostap_pci_priv *hw_priv; hw_priv = kzalloc(sizeof(*hw_priv), GFP_KERNEL); if (hw_priv == NULL) return -ENOMEM; if (pci_enable_device(pdev)) goto err_out_free; phymem = pci_resource_start(pdev, 0); if (!request_mem_region(phymem, pci_resource_len(pdev, 0), "Prism2")) { printk(KERN_ERR "prism2: Cannot reserve PCI memory region\n"); goto err_out_disable; } mem = pci_ioremap_bar(pdev, 0); if (mem == NULL) { printk(KERN_ERR "prism2: Cannot remap PCI memory region\n") ; goto fail; } dev = prism2_init_local_data(&prism2_pci_funcs, cards_found, &pdev->dev); if (dev == NULL) goto fail; iface = netdev_priv(dev); local = iface->local; local->hw_priv = hw_priv; cards_found++; dev->irq = pdev->irq; hw_priv->mem_start = mem; dev->base_addr = (unsigned long) mem; prism2_pci_cor_sreset(local); pci_set_drvdata(pdev, dev); if (request_irq(dev->irq, prism2_interrupt, IRQF_SHARED, dev->name, dev)) { printk(KERN_WARNING "%s: request_irq failed\n", dev->name); goto fail; } else irq_registered = 1; if (!local->pri_only && prism2_hw_config(dev, 1)) { printk(KERN_DEBUG "%s: hardware initialization failed\n", dev_info); goto fail; } printk(KERN_INFO "%s: Intersil Prism2.5 PCI: " "mem=0x%lx, irq=%d\n", dev->name, phymem, dev->irq); return hostap_hw_ready(dev); fail: if (irq_registered && dev) free_irq(dev->irq, dev); if (mem) iounmap(mem); release_mem_region(phymem, pci_resource_len(pdev, 0)); err_out_disable: pci_disable_device(pdev); prism2_free_local_data(dev); err_out_free: kfree(hw_priv); return -ENODEV; } static void prism2_pci_remove(struct pci_dev *pdev) { struct net_device *dev; struct hostap_interface *iface; void __iomem *mem_start; struct hostap_pci_priv *hw_priv; dev = pci_get_drvdata(pdev); iface = netdev_priv(dev); hw_priv = iface->local->hw_priv; /* Reset the hardware, and ensure interrupts are disabled. */ prism2_pci_cor_sreset(iface->local); hfa384x_disable_interrupts(dev); if (dev->irq) free_irq(dev->irq, dev); mem_start = hw_priv->mem_start; prism2_free_local_data(dev); kfree(hw_priv); iounmap(mem_start); release_mem_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); pci_disable_device(pdev); } static int __maybe_unused prism2_pci_suspend(struct device *dev_d) { struct net_device *dev = dev_get_drvdata(dev_d); if (netif_running(dev)) { netif_stop_queue(dev); netif_device_detach(dev); } prism2_suspend(dev); return 0; } static int __maybe_unused prism2_pci_resume(struct device *dev_d) { struct net_device *dev = dev_get_drvdata(dev_d); prism2_hw_config(dev, 0); if (netif_running(dev)) { netif_device_attach(dev); netif_start_queue(dev); } return 0; } MODULE_DEVICE_TABLE(pci, prism2_pci_id_table); static SIMPLE_DEV_PM_OPS(prism2_pci_pm_ops, prism2_pci_suspend, prism2_pci_resume); static struct pci_driver prism2_pci_driver = { .name = "hostap_pci", .id_table = prism2_pci_id_table, .probe = prism2_pci_probe, .remove = prism2_pci_remove, .driver.pm = &prism2_pci_pm_ops, }; module_pci_driver(prism2_pci_driver);
linux-master
drivers/net/wireless/intersil/hostap/hostap_pci.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/etherdevice.h> #include <linux/slab.h> #include <linux/export.h> #include <net/lib80211.h> #include <linux/if_arp.h> #include "hostap_80211.h" #include "hostap.h" #include "hostap_ap.h" /* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */ /* Ethernet-II snap header (RFC1042 for most EtherTypes) */ static unsigned char rfc1042_header[] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 }; /* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */ static unsigned char bridge_tunnel_header[] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 }; /* No encapsulation header if EtherType < 0x600 (=length) */ void hostap_dump_rx_80211(const char *name, struct sk_buff *skb, struct hostap_80211_rx_status *rx_stats) { struct ieee80211_hdr *hdr; u16 fc; hdr = (struct ieee80211_hdr *) skb->data; printk(KERN_DEBUG "%s: RX signal=%d noise=%d rate=%d len=%d " "jiffies=%ld\n", name, rx_stats->signal, rx_stats->noise, rx_stats->rate, skb->len, jiffies); if (skb->len < 2) return; fc = le16_to_cpu(hdr->frame_control); printk(KERN_DEBUG " FC=0x%04x (type=%d:%d)%s%s", fc, (fc & IEEE80211_FCTL_FTYPE) >> 2, (fc & IEEE80211_FCTL_STYPE) >> 4, fc & IEEE80211_FCTL_TODS ? " [ToDS]" : "", fc & IEEE80211_FCTL_FROMDS ? " [FromDS]" : ""); if (skb->len < IEEE80211_DATA_HDR3_LEN) { printk("\n"); return; } printk(" dur=0x%04x seq=0x%04x\n", le16_to_cpu(hdr->duration_id), le16_to_cpu(hdr->seq_ctrl)); printk(KERN_DEBUG " A1=%pM", hdr->addr1); printk(" A2=%pM", hdr->addr2); printk(" A3=%pM", hdr->addr3); if (skb->len >= 30) printk(" A4=%pM", hdr->addr4); printk("\n"); } /* Send RX frame to netif with 802.11 (and possible prism) header. * Called from hardware or software IRQ context. */ int prism2_rx_80211(struct net_device *dev, struct sk_buff *skb, struct hostap_80211_rx_status *rx_stats, int type) { struct hostap_interface *iface; local_info_t *local; int hdrlen, phdrlen, head_need, tail_need; u16 fc; int prism_header, ret; struct ieee80211_hdr *fhdr; iface = netdev_priv(dev); local = iface->local; if (dev->type == ARPHRD_IEEE80211_PRISM) { if (local->monitor_type == PRISM2_MONITOR_PRISM) { prism_header = 1; phdrlen = sizeof(struct linux_wlan_ng_prism_hdr); } else { /* local->monitor_type == PRISM2_MONITOR_CAPHDR */ prism_header = 2; phdrlen = sizeof(struct linux_wlan_ng_cap_hdr); } } else if (dev->type == ARPHRD_IEEE80211_RADIOTAP) { prism_header = 3; phdrlen = sizeof(struct hostap_radiotap_rx); } else { prism_header = 0; phdrlen = 0; } fhdr = (struct ieee80211_hdr *) skb->data; fc = le16_to_cpu(fhdr->frame_control); if (type == PRISM2_RX_MGMT && (fc & IEEE80211_FCTL_VERS)) { printk(KERN_DEBUG "%s: dropped management frame with header " "version %d\n", dev->name, fc & IEEE80211_FCTL_VERS); dev_kfree_skb_any(skb); return 0; } hdrlen = hostap_80211_get_hdrlen(fhdr->frame_control); /* check if there is enough room for extra data; if not, expand skb * buffer to be large enough for the changes */ head_need = phdrlen; tail_need = 0; #ifdef PRISM2_ADD_BOGUS_CRC tail_need += 4; #endif /* PRISM2_ADD_BOGUS_CRC */ head_need -= skb_headroom(skb); tail_need -= skb_tailroom(skb); if (head_need > 0 || tail_need > 0) { if (pskb_expand_head(skb, head_need > 0 ? head_need : 0, tail_need > 0 ? tail_need : 0, GFP_ATOMIC)) { printk(KERN_DEBUG "%s: prism2_rx_80211 failed to " "reallocate skb buffer\n", dev->name); dev_kfree_skb_any(skb); return 0; } } /* We now have an skb with enough head and tail room, so just insert * the extra data */ #ifdef PRISM2_ADD_BOGUS_CRC memset(skb_put(skb, 4), 0xff, 4); /* Prism2 strips CRC */ #endif /* PRISM2_ADD_BOGUS_CRC */ if (prism_header == 1) { struct linux_wlan_ng_prism_hdr *hdr; hdr = skb_push(skb, phdrlen); memset(hdr, 0, phdrlen); hdr->msgcode = LWNG_CAP_DID_BASE; hdr->msglen = sizeof(*hdr); memcpy(hdr->devname, dev->name, sizeof(hdr->devname)); #define LWNG_SETVAL(f,i,s,l,d) \ hdr->f.did = LWNG_CAP_DID_BASE | (i << 12); \ hdr->f.status = s; hdr->f.len = l; hdr->f.data = d LWNG_SETVAL(hosttime, 1, 0, 4, jiffies); LWNG_SETVAL(mactime, 2, 0, 4, rx_stats->mac_time); LWNG_SETVAL(channel, 3, 1 /* no value */, 4, 0); LWNG_SETVAL(rssi, 4, 1 /* no value */, 4, 0); LWNG_SETVAL(sq, 5, 1 /* no value */, 4, 0); LWNG_SETVAL(signal, 6, 0, 4, rx_stats->signal); LWNG_SETVAL(noise, 7, 0, 4, rx_stats->noise); LWNG_SETVAL(rate, 8, 0, 4, rx_stats->rate / 5); LWNG_SETVAL(istx, 9, 0, 4, 0); LWNG_SETVAL(frmlen, 10, 0, 4, skb->len - phdrlen); #undef LWNG_SETVAL } else if (prism_header == 2) { struct linux_wlan_ng_cap_hdr *hdr; hdr = skb_push(skb, phdrlen); memset(hdr, 0, phdrlen); hdr->version = htonl(LWNG_CAPHDR_VERSION); hdr->length = htonl(phdrlen); hdr->mactime = __cpu_to_be64(rx_stats->mac_time); hdr->hosttime = __cpu_to_be64(jiffies); hdr->phytype = htonl(4); /* dss_dot11_b */ hdr->channel = htonl(local->channel); hdr->datarate = htonl(rx_stats->rate); hdr->antenna = htonl(0); /* unknown */ hdr->priority = htonl(0); /* unknown */ hdr->ssi_type = htonl(3); /* raw */ hdr->ssi_signal = htonl(rx_stats->signal); hdr->ssi_noise = htonl(rx_stats->noise); hdr->preamble = htonl(0); /* unknown */ hdr->encoding = htonl(1); /* cck */ } else if (prism_header == 3) { struct hostap_radiotap_rx *hdr; hdr = skb_push(skb, phdrlen); memset(hdr, 0, phdrlen); hdr->hdr.it_len = cpu_to_le16(phdrlen); hdr->hdr.it_present = cpu_to_le32((1 << IEEE80211_RADIOTAP_TSFT) | (1 << IEEE80211_RADIOTAP_CHANNEL) | (1 << IEEE80211_RADIOTAP_RATE) | (1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL) | (1 << IEEE80211_RADIOTAP_DBM_ANTNOISE)); hdr->tsft = cpu_to_le64(rx_stats->mac_time); hdr->chan_freq = cpu_to_le16(freq_list[local->channel - 1]); hdr->chan_flags = cpu_to_le16(IEEE80211_CHAN_CCK | IEEE80211_CHAN_2GHZ); hdr->rate = rx_stats->rate / 5; hdr->dbm_antsignal = rx_stats->signal; hdr->dbm_antnoise = rx_stats->noise; } ret = skb->len - phdrlen; skb->dev = dev; skb_reset_mac_header(skb); skb_pull(skb, hdrlen); if (prism_header) skb_pull(skb, phdrlen); skb->pkt_type = PACKET_OTHERHOST; skb->protocol = cpu_to_be16(ETH_P_802_2); memset(skb->cb, 0, sizeof(skb->cb)); netif_rx(skb); return ret; } /* Called only as a tasklet (software IRQ) */ static void monitor_rx(struct net_device *dev, struct sk_buff *skb, struct hostap_80211_rx_status *rx_stats) { int len; len = prism2_rx_80211(dev, skb, rx_stats, PRISM2_RX_MONITOR); dev->stats.rx_packets++; dev->stats.rx_bytes += len; } /* Called only as a tasklet (software IRQ) */ static struct prism2_frag_entry * prism2_frag_cache_find(local_info_t *local, unsigned int seq, unsigned int frag, u8 *src, u8 *dst) { struct prism2_frag_entry *entry; int i; for (i = 0; i < PRISM2_FRAG_CACHE_LEN; i++) { entry = &local->frag_cache[i]; if (entry->skb != NULL && time_after(jiffies, entry->first_frag_time + 2 * HZ)) { printk(KERN_DEBUG "%s: expiring fragment cache entry " "seq=%u last_frag=%u\n", local->dev->name, entry->seq, entry->last_frag); dev_kfree_skb(entry->skb); entry->skb = NULL; } if (entry->skb != NULL && entry->seq == seq && (entry->last_frag + 1 == frag || frag == -1) && memcmp(entry->src_addr, src, ETH_ALEN) == 0 && memcmp(entry->dst_addr, dst, ETH_ALEN) == 0) return entry; } return NULL; } /* Called only as a tasklet (software IRQ) */ static struct sk_buff * prism2_frag_cache_get(local_info_t *local, struct ieee80211_hdr *hdr) { struct sk_buff *skb = NULL; u16 sc; unsigned int frag, seq; struct prism2_frag_entry *entry; sc = le16_to_cpu(hdr->seq_ctrl); frag = sc & IEEE80211_SCTL_FRAG; seq = (sc & IEEE80211_SCTL_SEQ) >> 4; if (frag == 0) { /* Reserve enough space to fit maximum frame length */ skb = dev_alloc_skb(local->dev->mtu + sizeof(struct ieee80211_hdr) + 8 /* LLC */ + 2 /* alignment */ + 8 /* WEP */ + ETH_ALEN /* WDS */); if (skb == NULL) return NULL; entry = &local->frag_cache[local->frag_next_idx]; local->frag_next_idx++; if (local->frag_next_idx >= PRISM2_FRAG_CACHE_LEN) local->frag_next_idx = 0; if (entry->skb != NULL) dev_kfree_skb(entry->skb); entry->first_frag_time = jiffies; entry->seq = seq; entry->last_frag = frag; entry->skb = skb; memcpy(entry->src_addr, hdr->addr2, ETH_ALEN); memcpy(entry->dst_addr, hdr->addr1, ETH_ALEN); } else { /* received a fragment of a frame for which the head fragment * should have already been received */ entry = prism2_frag_cache_find(local, seq, frag, hdr->addr2, hdr->addr1); if (entry != NULL) { entry->last_frag = frag; skb = entry->skb; } } return skb; } /* Called only as a tasklet (software IRQ) */ static int prism2_frag_cache_invalidate(local_info_t *local, struct ieee80211_hdr *hdr) { u16 sc; unsigned int seq; struct prism2_frag_entry *entry; sc = le16_to_cpu(hdr->seq_ctrl); seq = (sc & IEEE80211_SCTL_SEQ) >> 4; entry = prism2_frag_cache_find(local, seq, -1, hdr->addr2, hdr->addr1); if (entry == NULL) { printk(KERN_DEBUG "%s: could not invalidate fragment cache " "entry (seq=%u)\n", local->dev->name, seq); return -1; } entry->skb = NULL; return 0; } static struct hostap_bss_info *__hostap_get_bss(local_info_t *local, u8 *bssid, u8 *ssid, size_t ssid_len) { struct list_head *ptr; struct hostap_bss_info *bss; list_for_each(ptr, &local->bss_list) { bss = list_entry(ptr, struct hostap_bss_info, list); if (memcmp(bss->bssid, bssid, ETH_ALEN) == 0 && (ssid == NULL || (ssid_len == bss->ssid_len && memcmp(ssid, bss->ssid, ssid_len) == 0))) { list_move(&bss->list, &local->bss_list); return bss; } } return NULL; } static struct hostap_bss_info *__hostap_add_bss(local_info_t *local, u8 *bssid, u8 *ssid, size_t ssid_len) { struct hostap_bss_info *bss; if (local->num_bss_info >= HOSTAP_MAX_BSS_COUNT) { bss = list_entry(local->bss_list.prev, struct hostap_bss_info, list); list_del(&bss->list); local->num_bss_info--; } else { bss = kmalloc(sizeof(*bss), GFP_ATOMIC); if (bss == NULL) return NULL; } memset(bss, 0, sizeof(*bss)); memcpy(bss->bssid, bssid, ETH_ALEN); memcpy(bss->ssid, ssid, ssid_len); bss->ssid_len = ssid_len; local->num_bss_info++; list_add(&bss->list, &local->bss_list); return bss; } static void __hostap_expire_bss(local_info_t *local) { struct hostap_bss_info *bss; while (local->num_bss_info > 0) { bss = list_entry(local->bss_list.prev, struct hostap_bss_info, list); if (!time_after(jiffies, bss->last_update + 60 * HZ)) break; list_del(&bss->list); local->num_bss_info--; kfree(bss); } } /* Both IEEE 802.11 Beacon and Probe Response frames have similar structure, so * the same routine can be used to parse both of them. */ static void hostap_rx_sta_beacon(local_info_t *local, struct sk_buff *skb, int stype) { struct hostap_ieee80211_mgmt *mgmt; int left, chan = 0; u8 *pos; u8 *ssid = NULL, *wpa = NULL, *rsn = NULL; size_t ssid_len = 0, wpa_len = 0, rsn_len = 0; struct hostap_bss_info *bss; if (skb->len < IEEE80211_MGMT_HDR_LEN + sizeof(mgmt->u.beacon)) return; mgmt = (struct hostap_ieee80211_mgmt *) skb->data; pos = mgmt->u.beacon.variable; left = skb->len - (pos - skb->data); while (left >= 2) { if (2 + pos[1] > left) return; /* parse failed */ switch (*pos) { case WLAN_EID_SSID: ssid = pos + 2; ssid_len = pos[1]; break; case WLAN_EID_VENDOR_SPECIFIC: if (pos[1] >= 4 && pos[2] == 0x00 && pos[3] == 0x50 && pos[4] == 0xf2 && pos[5] == 1) { wpa = pos; wpa_len = pos[1] + 2; } break; case WLAN_EID_RSN: rsn = pos; rsn_len = pos[1] + 2; break; case WLAN_EID_DS_PARAMS: if (pos[1] >= 1) chan = pos[2]; break; } left -= 2 + pos[1]; pos += 2 + pos[1]; } if (wpa_len > MAX_WPA_IE_LEN) wpa_len = MAX_WPA_IE_LEN; if (rsn_len > MAX_WPA_IE_LEN) rsn_len = MAX_WPA_IE_LEN; if (ssid_len > sizeof(bss->ssid)) ssid_len = sizeof(bss->ssid); spin_lock(&local->lock); bss = __hostap_get_bss(local, mgmt->bssid, ssid, ssid_len); if (bss == NULL) bss = __hostap_add_bss(local, mgmt->bssid, ssid, ssid_len); if (bss) { bss->last_update = jiffies; bss->count++; bss->capab_info = le16_to_cpu(mgmt->u.beacon.capab_info); if (wpa) { memcpy(bss->wpa_ie, wpa, wpa_len); bss->wpa_ie_len = wpa_len; } else bss->wpa_ie_len = 0; if (rsn) { memcpy(bss->rsn_ie, rsn, rsn_len); bss->rsn_ie_len = rsn_len; } else bss->rsn_ie_len = 0; bss->chan = chan; } __hostap_expire_bss(local); spin_unlock(&local->lock); } static int hostap_rx_frame_mgmt(local_info_t *local, struct sk_buff *skb, struct hostap_80211_rx_status *rx_stats, u16 type, u16 stype) { if (local->iw_mode == IW_MODE_MASTER) hostap_update_sta_ps(local, (struct ieee80211_hdr *) skb->data); if (local->hostapd && type == IEEE80211_FTYPE_MGMT) { if (stype == IEEE80211_STYPE_BEACON && local->iw_mode == IW_MODE_MASTER) { struct sk_buff *skb2; /* Process beacon frames also in kernel driver to * update STA(AP) table statistics */ skb2 = skb_clone(skb, GFP_ATOMIC); if (skb2) hostap_rx(skb2->dev, skb2, rx_stats); } /* send management frames to the user space daemon for * processing */ local->apdevstats.rx_packets++; local->apdevstats.rx_bytes += skb->len; if (local->apdev == NULL) return -1; prism2_rx_80211(local->apdev, skb, rx_stats, PRISM2_RX_MGMT); return 0; } if (local->iw_mode == IW_MODE_MASTER) { if (type != IEEE80211_FTYPE_MGMT && type != IEEE80211_FTYPE_CTL) { printk(KERN_DEBUG "%s: unknown management frame " "(type=0x%02x, stype=0x%02x) dropped\n", skb->dev->name, type >> 2, stype >> 4); return -1; } hostap_rx(skb->dev, skb, rx_stats); return 0; } else if (type == IEEE80211_FTYPE_MGMT && (stype == IEEE80211_STYPE_BEACON || stype == IEEE80211_STYPE_PROBE_RESP)) { hostap_rx_sta_beacon(local, skb, stype); return -1; } else if (type == IEEE80211_FTYPE_MGMT && (stype == IEEE80211_STYPE_ASSOC_RESP || stype == IEEE80211_STYPE_REASSOC_RESP)) { /* Ignore (Re)AssocResp silently since these are not currently * needed but are still received when WPA/RSN mode is enabled. */ return -1; } else { printk(KERN_DEBUG "%s: hostap_rx_frame_mgmt: dropped unhandled" " management frame in non-Host AP mode (type=%d:%d)\n", skb->dev->name, type >> 2, stype >> 4); return -1; } } /* Called only as a tasklet (software IRQ) */ static struct net_device *prism2_rx_get_wds(local_info_t *local, u8 *addr) { struct hostap_interface *iface = NULL; struct list_head *ptr; read_lock_bh(&local->iface_lock); list_for_each(ptr, &local->hostap_interfaces) { iface = list_entry(ptr, struct hostap_interface, list); if (iface->type == HOSTAP_INTERFACE_WDS && memcmp(iface->u.wds.remote_addr, addr, ETH_ALEN) == 0) break; iface = NULL; } read_unlock_bh(&local->iface_lock); return iface ? iface->dev : NULL; } static int hostap_rx_frame_wds(local_info_t *local, struct ieee80211_hdr *hdr, u16 fc, struct net_device **wds) { /* FIX: is this really supposed to accept WDS frames only in Master * mode? What about Repeater or Managed with WDS frames? */ if ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) != (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS) && (local->iw_mode != IW_MODE_MASTER || !(fc & IEEE80211_FCTL_TODS))) return 0; /* not a WDS frame */ /* Possible WDS frame: either IEEE 802.11 compliant (if FromDS) * or own non-standard frame with 4th address after payload */ if (!ether_addr_equal(hdr->addr1, local->dev->dev_addr) && (hdr->addr1[0] != 0xff || hdr->addr1[1] != 0xff || hdr->addr1[2] != 0xff || hdr->addr1[3] != 0xff || hdr->addr1[4] != 0xff || hdr->addr1[5] != 0xff)) { /* RA (or BSSID) is not ours - drop */ PDEBUG(DEBUG_EXTRA2, "%s: received WDS frame with " "not own or broadcast %s=%pM\n", local->dev->name, fc & IEEE80211_FCTL_FROMDS ? "RA" : "BSSID", hdr->addr1); return -1; } /* check if the frame came from a registered WDS connection */ *wds = prism2_rx_get_wds(local, hdr->addr2); if (*wds == NULL && fc & IEEE80211_FCTL_FROMDS && (local->iw_mode != IW_MODE_INFRA || !(local->wds_type & HOSTAP_WDS_AP_CLIENT) || memcmp(hdr->addr2, local->bssid, ETH_ALEN) != 0)) { /* require that WDS link has been registered with TA or the * frame is from current AP when using 'AP client mode' */ PDEBUG(DEBUG_EXTRA, "%s: received WDS[4 addr] frame " "from unknown TA=%pM\n", local->dev->name, hdr->addr2); if (local->ap && local->ap->autom_ap_wds) hostap_wds_link_oper(local, hdr->addr2, WDS_ADD); return -1; } if (*wds && !(fc & IEEE80211_FCTL_FROMDS) && local->ap && hostap_is_sta_assoc(local->ap, hdr->addr2)) { /* STA is actually associated with us even though it has a * registered WDS link. Assume it is in 'AP client' mode. * Since this is a 3-addr frame, assume it is not (bogus) WDS * frame and process it like any normal ToDS frame from * associated STA. */ *wds = NULL; } return 0; } static int hostap_is_eapol_frame(local_info_t *local, struct sk_buff *skb) { struct net_device *dev = local->dev; u16 fc, ethertype; struct ieee80211_hdr *hdr; u8 *pos; if (skb->len < 24) return 0; hdr = (struct ieee80211_hdr *) skb->data; fc = le16_to_cpu(hdr->frame_control); /* check that the frame is unicast frame to us */ if ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) == IEEE80211_FCTL_TODS && ether_addr_equal(hdr->addr1, dev->dev_addr) && ether_addr_equal(hdr->addr3, dev->dev_addr)) { /* ToDS frame with own addr BSSID and DA */ } else if ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) == IEEE80211_FCTL_FROMDS && ether_addr_equal(hdr->addr1, dev->dev_addr)) { /* FromDS frame with own addr as DA */ } else return 0; if (skb->len < 24 + 8) return 0; /* check for port access entity Ethernet type */ pos = skb->data + 24; ethertype = (pos[6] << 8) | pos[7]; if (ethertype == ETH_P_PAE) return 1; return 0; } /* Called only as a tasklet (software IRQ) */ static int hostap_rx_frame_decrypt(local_info_t *local, struct sk_buff *skb, struct lib80211_crypt_data *crypt) { struct ieee80211_hdr *hdr; int res, hdrlen; if (crypt == NULL || crypt->ops->decrypt_mpdu == NULL) return 0; hdr = (struct ieee80211_hdr *) skb->data; hdrlen = hostap_80211_get_hdrlen(hdr->frame_control); if (local->tkip_countermeasures && strcmp(crypt->ops->name, "TKIP") == 0) { if (net_ratelimit()) { printk(KERN_DEBUG "%s: TKIP countermeasures: dropped " "received packet from %pM\n", local->dev->name, hdr->addr2); } return -1; } atomic_inc(&crypt->refcnt); res = crypt->ops->decrypt_mpdu(skb, hdrlen, crypt->priv); atomic_dec(&crypt->refcnt); if (res < 0) { printk(KERN_DEBUG "%s: decryption failed (SA=%pM) res=%d\n", local->dev->name, hdr->addr2, res); local->comm_tallies.rx_discards_wep_undecryptable++; return -1; } return res; } /* Called only as a tasklet (software IRQ) */ static int hostap_rx_frame_decrypt_msdu(local_info_t *local, struct sk_buff *skb, int keyidx, struct lib80211_crypt_data *crypt) { struct ieee80211_hdr *hdr; int res, hdrlen; if (crypt == NULL || crypt->ops->decrypt_msdu == NULL) return 0; hdr = (struct ieee80211_hdr *) skb->data; hdrlen = hostap_80211_get_hdrlen(hdr->frame_control); atomic_inc(&crypt->refcnt); res = crypt->ops->decrypt_msdu(skb, keyidx, hdrlen, crypt->priv); atomic_dec(&crypt->refcnt); if (res < 0) { printk(KERN_DEBUG "%s: MSDU decryption/MIC verification failed" " (SA=%pM keyidx=%d)\n", local->dev->name, hdr->addr2, keyidx); return -1; } return 0; } /* All received frames are sent to this function. @skb contains the frame in * IEEE 802.11 format, i.e., in the format it was sent over air. * This function is called only as a tasklet (software IRQ). */ void hostap_80211_rx(struct net_device *dev, struct sk_buff *skb, struct hostap_80211_rx_status *rx_stats) { struct hostap_interface *iface; local_info_t *local; struct ieee80211_hdr *hdr; size_t hdrlen; u16 fc, type, stype, sc; struct net_device *wds = NULL; unsigned int frag; u8 *payload; struct sk_buff *skb2 = NULL; u16 ethertype; int frame_authorized = 0; int from_assoc_ap = 0; u8 dst[ETH_ALEN]; u8 src[ETH_ALEN]; struct lib80211_crypt_data *crypt = NULL; void *sta = NULL; int keyidx = 0; iface = netdev_priv(dev); local = iface->local; iface->stats.rx_packets++; iface->stats.rx_bytes += skb->len; /* dev is the master radio device; change this to be the default * virtual interface (this may be changed to WDS device below) */ dev = local->ddev; iface = netdev_priv(dev); hdr = (struct ieee80211_hdr *) skb->data; if (skb->len < 10) goto rx_dropped; fc = le16_to_cpu(hdr->frame_control); type = fc & IEEE80211_FCTL_FTYPE; stype = fc & IEEE80211_FCTL_STYPE; sc = le16_to_cpu(hdr->seq_ctrl); frag = sc & IEEE80211_SCTL_FRAG; hdrlen = hostap_80211_get_hdrlen(hdr->frame_control); /* Put this code here so that we avoid duplicating it in all * Rx paths. - Jean II */ #ifdef IW_WIRELESS_SPY /* defined in iw_handler.h */ /* If spy monitoring on */ if (iface->spy_data.spy_number > 0) { struct iw_quality wstats; wstats.level = rx_stats->signal; wstats.noise = rx_stats->noise; wstats.updated = IW_QUAL_LEVEL_UPDATED | IW_QUAL_NOISE_UPDATED | IW_QUAL_QUAL_INVALID | IW_QUAL_DBM; /* Update spy records */ wireless_spy_update(dev, hdr->addr2, &wstats); } #endif /* IW_WIRELESS_SPY */ hostap_update_rx_stats(local->ap, hdr, rx_stats); if (local->iw_mode == IW_MODE_MONITOR) { monitor_rx(dev, skb, rx_stats); return; } if (local->host_decrypt) { int idx = 0; if (skb->len >= hdrlen + 3) idx = skb->data[hdrlen + 3] >> 6; crypt = local->crypt_info.crypt[idx]; sta = NULL; /* Use station specific key to override default keys if the * receiver address is a unicast address ("individual RA"). If * bcrx_sta_key parameter is set, station specific key is used * even with broad/multicast targets (this is against IEEE * 802.11, but makes it easier to use different keys with * stations that do not support WEP key mapping). */ if (!(hdr->addr1[0] & 0x01) || local->bcrx_sta_key) (void) hostap_handle_sta_crypto(local, hdr, &crypt, &sta); /* allow NULL decrypt to indicate an station specific override * for default encryption */ if (crypt && (crypt->ops == NULL || crypt->ops->decrypt_mpdu == NULL)) crypt = NULL; if (!crypt && (fc & IEEE80211_FCTL_PROTECTED)) { #if 0 /* This seems to be triggered by some (multicast?) * frames from other than current BSS, so just drop the * frames silently instead of filling system log with * these reports. */ printk(KERN_DEBUG "%s: WEP decryption failed (not set)" " (SA=%pM)\n", local->dev->name, hdr->addr2); #endif local->comm_tallies.rx_discards_wep_undecryptable++; goto rx_dropped; } } if (type != IEEE80211_FTYPE_DATA) { if (type == IEEE80211_FTYPE_MGMT && stype == IEEE80211_STYPE_AUTH && fc & IEEE80211_FCTL_PROTECTED && local->host_decrypt && (keyidx = hostap_rx_frame_decrypt(local, skb, crypt)) < 0) { printk(KERN_DEBUG "%s: failed to decrypt mgmt::auth " "from %pM\n", dev->name, hdr->addr2); /* TODO: could inform hostapd about this so that it * could send auth failure report */ goto rx_dropped; } if (hostap_rx_frame_mgmt(local, skb, rx_stats, type, stype)) goto rx_dropped; else goto rx_exit; } /* Data frame - extract src/dst addresses */ if (skb->len < IEEE80211_DATA_HDR3_LEN) goto rx_dropped; switch (fc & (IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS)) { case IEEE80211_FCTL_FROMDS: memcpy(dst, hdr->addr1, ETH_ALEN); memcpy(src, hdr->addr3, ETH_ALEN); break; case IEEE80211_FCTL_TODS: memcpy(dst, hdr->addr3, ETH_ALEN); memcpy(src, hdr->addr2, ETH_ALEN); break; case IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS: if (skb->len < IEEE80211_DATA_HDR4_LEN) goto rx_dropped; memcpy(dst, hdr->addr3, ETH_ALEN); memcpy(src, hdr->addr4, ETH_ALEN); break; default: memcpy(dst, hdr->addr1, ETH_ALEN); memcpy(src, hdr->addr2, ETH_ALEN); break; } if (hostap_rx_frame_wds(local, hdr, fc, &wds)) goto rx_dropped; if (wds) skb->dev = dev = wds; if (local->iw_mode == IW_MODE_MASTER && !wds && (fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) == IEEE80211_FCTL_FROMDS && local->stadev && memcmp(hdr->addr2, local->assoc_ap_addr, ETH_ALEN) == 0) { /* Frame from BSSID of the AP for which we are a client */ skb->dev = dev = local->stadev; from_assoc_ap = 1; } if ((local->iw_mode == IW_MODE_MASTER || local->iw_mode == IW_MODE_REPEAT) && !from_assoc_ap) { switch (hostap_handle_sta_rx(local, dev, skb, rx_stats, wds != NULL)) { case AP_RX_CONTINUE_NOT_AUTHORIZED: frame_authorized = 0; break; case AP_RX_CONTINUE: frame_authorized = 1; break; case AP_RX_DROP: goto rx_dropped; case AP_RX_EXIT: goto rx_exit; } } /* Nullfunc frames may have PS-bit set, so they must be passed to * hostap_handle_sta_rx() before being dropped here. */ if (stype != IEEE80211_STYPE_DATA && stype != IEEE80211_STYPE_DATA_CFACK && stype != IEEE80211_STYPE_DATA_CFPOLL && stype != IEEE80211_STYPE_DATA_CFACKPOLL) { if (stype != IEEE80211_STYPE_NULLFUNC) printk(KERN_DEBUG "%s: RX: dropped data frame " "with no data (type=0x%02x, subtype=0x%02x)\n", dev->name, type >> 2, stype >> 4); goto rx_dropped; } /* skb: hdr + (possibly fragmented, possibly encrypted) payload */ if (local->host_decrypt && (fc & IEEE80211_FCTL_PROTECTED) && (keyidx = hostap_rx_frame_decrypt(local, skb, crypt)) < 0) goto rx_dropped; hdr = (struct ieee80211_hdr *) skb->data; /* skb: hdr + (possibly fragmented) plaintext payload */ if (local->host_decrypt && (fc & IEEE80211_FCTL_PROTECTED) && (frag != 0 || (fc & IEEE80211_FCTL_MOREFRAGS))) { int flen; struct sk_buff *frag_skb = prism2_frag_cache_get(local, hdr); if (!frag_skb) { printk(KERN_DEBUG "%s: Rx cannot get skb from " "fragment cache (morefrag=%d seq=%u frag=%u)\n", dev->name, (fc & IEEE80211_FCTL_MOREFRAGS) != 0, (sc & IEEE80211_SCTL_SEQ) >> 4, frag); goto rx_dropped; } flen = skb->len; if (frag != 0) flen -= hdrlen; if (frag_skb->tail + flen > frag_skb->end) { printk(KERN_WARNING "%s: host decrypted and " "reassembled frame did not fit skb\n", dev->name); prism2_frag_cache_invalidate(local, hdr); goto rx_dropped; } if (frag == 0) { /* copy first fragment (including full headers) into * beginning of the fragment cache skb */ skb_copy_from_linear_data(skb, skb_put(frag_skb, flen), flen); } else { /* append frame payload to the end of the fragment * cache skb */ skb_copy_from_linear_data_offset(skb, hdrlen, skb_put(frag_skb, flen), flen); } dev_kfree_skb(skb); skb = NULL; if (fc & IEEE80211_FCTL_MOREFRAGS) { /* more fragments expected - leave the skb in fragment * cache for now; it will be delivered to upper layers * after all fragments have been received */ goto rx_exit; } /* this was the last fragment and the frame will be * delivered, so remove skb from fragment cache */ skb = frag_skb; hdr = (struct ieee80211_hdr *) skb->data; prism2_frag_cache_invalidate(local, hdr); } /* skb: hdr + (possible reassembled) full MSDU payload; possibly still * encrypted/authenticated */ if (local->host_decrypt && (fc & IEEE80211_FCTL_PROTECTED) && hostap_rx_frame_decrypt_msdu(local, skb, keyidx, crypt)) goto rx_dropped; hdr = (struct ieee80211_hdr *) skb->data; if (crypt && !(fc & IEEE80211_FCTL_PROTECTED) && !local->open_wep) { if (local->ieee_802_1x && hostap_is_eapol_frame(local, skb)) { /* pass unencrypted EAPOL frames even if encryption is * configured */ PDEBUG(DEBUG_EXTRA2, "%s: RX: IEEE 802.1X - passing " "unencrypted EAPOL frame\n", local->dev->name); } else { printk(KERN_DEBUG "%s: encryption configured, but RX " "frame not encrypted (SA=%pM)\n", local->dev->name, hdr->addr2); goto rx_dropped; } } if (local->drop_unencrypted && !(fc & IEEE80211_FCTL_PROTECTED) && !hostap_is_eapol_frame(local, skb)) { if (net_ratelimit()) { printk(KERN_DEBUG "%s: dropped unencrypted RX data " "frame from %pM (drop_unencrypted=1)\n", dev->name, hdr->addr2); } goto rx_dropped; } /* skb: hdr + (possible reassembled) full plaintext payload */ payload = skb->data + hdrlen; ethertype = (payload[6] << 8) | payload[7]; /* If IEEE 802.1X is used, check whether the port is authorized to send * the received frame. */ if (local->ieee_802_1x && local->iw_mode == IW_MODE_MASTER) { if (ethertype == ETH_P_PAE) { PDEBUG(DEBUG_EXTRA2, "%s: RX: IEEE 802.1X frame\n", dev->name); if (local->hostapd && local->apdev) { /* Send IEEE 802.1X frames to the user * space daemon for processing */ prism2_rx_80211(local->apdev, skb, rx_stats, PRISM2_RX_MGMT); local->apdevstats.rx_packets++; local->apdevstats.rx_bytes += skb->len; goto rx_exit; } } else if (!frame_authorized) { printk(KERN_DEBUG "%s: dropped frame from " "unauthorized port (IEEE 802.1X): " "ethertype=0x%04x\n", dev->name, ethertype); goto rx_dropped; } } /* convert hdr + possible LLC headers into Ethernet header */ if (skb->len - hdrlen >= 8 && ((memcmp(payload, rfc1042_header, 6) == 0 && ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) || memcmp(payload, bridge_tunnel_header, 6) == 0)) { /* remove RFC1042 or Bridge-Tunnel encapsulation and * replace EtherType */ skb_pull(skb, hdrlen + 6); memcpy(skb_push(skb, ETH_ALEN), src, ETH_ALEN); memcpy(skb_push(skb, ETH_ALEN), dst, ETH_ALEN); } else { __be16 len; /* Leave Ethernet header part of hdr and full payload */ skb_pull(skb, hdrlen); len = htons(skb->len); memcpy(skb_push(skb, 2), &len, 2); memcpy(skb_push(skb, ETH_ALEN), src, ETH_ALEN); memcpy(skb_push(skb, ETH_ALEN), dst, ETH_ALEN); } if (wds && ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) == IEEE80211_FCTL_TODS) && skb->len >= ETH_HLEN + ETH_ALEN) { /* Non-standard frame: get addr4 from its bogus location after * the payload */ skb_copy_from_linear_data_offset(skb, skb->len - ETH_ALEN, skb->data + ETH_ALEN, ETH_ALEN); skb_trim(skb, skb->len - ETH_ALEN); } dev->stats.rx_packets++; dev->stats.rx_bytes += skb->len; if (local->iw_mode == IW_MODE_MASTER && !wds && local->ap->bridge_packets) { if (dst[0] & 0x01) { /* copy multicast frame both to the higher layers and * to the wireless media */ local->ap->bridged_multicast++; skb2 = skb_clone(skb, GFP_ATOMIC); if (skb2 == NULL) printk(KERN_DEBUG "%s: skb_clone failed for " "multicast frame\n", dev->name); } else if (hostap_is_sta_authorized(local->ap, dst)) { /* send frame directly to the associated STA using * wireless media and not passing to higher layers */ local->ap->bridged_unicast++; skb2 = skb; skb = NULL; } } if (skb2 != NULL) { /* send to wireless media */ skb2->dev = dev; skb2->protocol = cpu_to_be16(ETH_P_802_3); skb_reset_mac_header(skb2); skb_reset_network_header(skb2); /* skb2->network_header += ETH_HLEN; */ dev_queue_xmit(skb2); } if (skb) { skb->protocol = eth_type_trans(skb, dev); memset(skb->cb, 0, sizeof(skb->cb)); netif_rx(skb); } rx_exit: if (sta) hostap_handle_sta_release(sta); return; rx_dropped: dev_kfree_skb(skb); dev->stats.rx_dropped++; goto rx_exit; } EXPORT_SYMBOL(hostap_80211_rx);
linux-master
drivers/net/wireless/intersil/hostap/hostap_80211_rx.c
// SPDX-License-Identifier: GPL-2.0-only #define PRISM2_PLX /* Host AP driver's support for PC Cards on PCI adapters using PLX9052 is * based on: * - Host AP driver patch from [email protected] * - linux-wlan-ng driver, Copyright (C) AbsoluteValue Systems, Inc. */ #include <linux/module.h> #include <linux/if.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/slab.h> #include <linux/workqueue.h> #include <linux/wireless.h> #include <net/iw_handler.h> #include <linux/ioport.h> #include <linux/pci.h> #include <asm/io.h> #include "hostap_wlan.h" static char *dev_info = "hostap_plx"; MODULE_AUTHOR("Jouni Malinen"); MODULE_DESCRIPTION("Support for Intersil Prism2-based 802.11 wireless LAN " "cards (PLX)."); MODULE_LICENSE("GPL"); static int ignore_cis; module_param(ignore_cis, int, 0444); MODULE_PARM_DESC(ignore_cis, "Do not verify manfid information in CIS"); /* struct local_info::hw_priv */ struct hostap_plx_priv { void __iomem *attr_mem; unsigned int cor_offset; }; #define PLX_MIN_ATTR_LEN 512 /* at least 2 x 256 is needed for CIS */ #define COR_SRESET 0x80 #define COR_LEVLREQ 0x40 #define COR_ENABLE_FUNC 0x01 /* PCI Configuration Registers */ #define PLX_PCIIPR 0x3d /* PCI Interrupt Pin */ /* Local Configuration Registers */ #define PLX_INTCSR 0x4c /* Interrupt Control/Status Register */ #define PLX_INTCSR_PCI_INTEN BIT(6) /* PCI Interrupt Enable */ #define PLX_CNTRL 0x50 #define PLX_CNTRL_SERIAL_EEPROM_PRESENT BIT(28) #define PLXDEV(vendor,dev,str) { vendor, dev, PCI_ANY_ID, PCI_ANY_ID } static const struct pci_device_id prism2_plx_id_table[] = { PLXDEV(0x10b7, 0x7770, "3Com AirConnect PCI 777A"), PLXDEV(0x111a, 0x1023, "Siemens SpeedStream SS1023"), PLXDEV(0x126c, 0x8030, "Nortel emobility"), PLXDEV(0x1562, 0x0001, "Symbol LA-4123"), PLXDEV(0x1385, 0x4100, "Netgear MA301"), PLXDEV(0x15e8, 0x0130, "National Datacomm NCP130 (PLX9052)"), PLXDEV(0x15e8, 0x0131, "National Datacomm NCP130 (TMD7160)"), PLXDEV(0x1638, 0x1100, "Eumitcom WL11000"), PLXDEV(0x16ab, 0x1100, "Global Sun Tech GL24110P"), PLXDEV(0x16ab, 0x1101, "Global Sun Tech GL24110P (?)"), PLXDEV(0x16ab, 0x1102, "Linksys WPC11 with WDT11"), PLXDEV(0x16ab, 0x1103, "Longshine 8031"), PLXDEV(0x16ec, 0x3685, "US Robotics USR2415"), PLXDEV(0xec80, 0xec00, "Belkin F5D6000"), { 0 } }; /* Array of known Prism2/2.5 PC Card manufactured ids. If your card's manfid * is not listed here, you will need to add it here to get the driver * initialized. */ static struct prism2_plx_manfid { u16 manfid1, manfid2; } prism2_plx_known_manfids[] = { { 0x000b, 0x7110 } /* D-Link DWL-650 Rev. P1 */, { 0x000b, 0x7300 } /* Philips 802.11b WLAN PCMCIA */, { 0x0101, 0x0777 } /* 3Com AirConnect PCI 777A */, { 0x0126, 0x8000 } /* Proxim RangeLAN */, { 0x0138, 0x0002 } /* Compaq WL100 */, { 0x0156, 0x0002 } /* Intersil Prism II Ref. Design (and others) */, { 0x026f, 0x030b } /* Buffalo WLI-CF-S11G */, { 0x0274, 0x1612 } /* Linksys WPC11 Ver 2.5 */, { 0x0274, 0x1613 } /* Linksys WPC11 Ver 3 */, { 0x028a, 0x0002 } /* D-Link DRC-650 */, { 0x0250, 0x0002 } /* Samsung SWL2000-N */, { 0xc250, 0x0002 } /* EMTAC A2424i */, { 0xd601, 0x0002 } /* Z-Com XI300 */, { 0xd601, 0x0005 } /* Zcomax XI-325H 200mW */, { 0, 0} }; #ifdef PRISM2_IO_DEBUG static inline void hfa384x_outb_debug(struct net_device *dev, int a, u8 v) { struct hostap_interface *iface; local_info_t *local; unsigned long flags; iface = netdev_priv(dev); local = iface->local; spin_lock_irqsave(&local->lock, flags); prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_OUTB, a, v); outb(v, dev->base_addr + a); spin_unlock_irqrestore(&local->lock, flags); } static inline u8 hfa384x_inb_debug(struct net_device *dev, int a) { struct hostap_interface *iface; local_info_t *local; unsigned long flags; u8 v; iface = netdev_priv(dev); local = iface->local; spin_lock_irqsave(&local->lock, flags); v = inb(dev->base_addr + a); prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_INB, a, v); spin_unlock_irqrestore(&local->lock, flags); return v; } static inline void hfa384x_outw_debug(struct net_device *dev, int a, u16 v) { struct hostap_interface *iface; local_info_t *local; unsigned long flags; iface = netdev_priv(dev); local = iface->local; spin_lock_irqsave(&local->lock, flags); prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_OUTW, a, v); outw(v, dev->base_addr + a); spin_unlock_irqrestore(&local->lock, flags); } static inline u16 hfa384x_inw_debug(struct net_device *dev, int a) { struct hostap_interface *iface; local_info_t *local; unsigned long flags; u16 v; iface = netdev_priv(dev); local = iface->local; spin_lock_irqsave(&local->lock, flags); v = inw(dev->base_addr + a); prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_INW, a, v); spin_unlock_irqrestore(&local->lock, flags); return v; } static inline void hfa384x_outsw_debug(struct net_device *dev, int a, u8 *buf, int wc) { struct hostap_interface *iface; local_info_t *local; unsigned long flags; iface = netdev_priv(dev); local = iface->local; spin_lock_irqsave(&local->lock, flags); prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_OUTSW, a, wc); outsw(dev->base_addr + a, buf, wc); spin_unlock_irqrestore(&local->lock, flags); } static inline void hfa384x_insw_debug(struct net_device *dev, int a, u8 *buf, int wc) { struct hostap_interface *iface; local_info_t *local; unsigned long flags; iface = netdev_priv(dev); local = iface->local; spin_lock_irqsave(&local->lock, flags); prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_INSW, a, wc); insw(dev->base_addr + a, buf, wc); spin_unlock_irqrestore(&local->lock, flags); } #define HFA384X_OUTB(v,a) hfa384x_outb_debug(dev, (a), (v)) #define HFA384X_INB(a) hfa384x_inb_debug(dev, (a)) #define HFA384X_OUTW(v,a) hfa384x_outw_debug(dev, (a), (v)) #define HFA384X_INW(a) hfa384x_inw_debug(dev, (a)) #define HFA384X_OUTSW(a, buf, wc) hfa384x_outsw_debug(dev, (a), (buf), (wc)) #define HFA384X_INSW(a, buf, wc) hfa384x_insw_debug(dev, (a), (buf), (wc)) #else /* PRISM2_IO_DEBUG */ #define HFA384X_OUTB(v,a) outb((v), dev->base_addr + (a)) #define HFA384X_INB(a) inb(dev->base_addr + (a)) #define HFA384X_OUTW(v,a) outw((v), dev->base_addr + (a)) #define HFA384X_INW(a) inw(dev->base_addr + (a)) #define HFA384X_INSW(a, buf, wc) insw(dev->base_addr + (a), buf, wc) #define HFA384X_OUTSW(a, buf, wc) outsw(dev->base_addr + (a), buf, wc) #endif /* PRISM2_IO_DEBUG */ static int hfa384x_from_bap(struct net_device *dev, u16 bap, void *buf, int len) { u16 d_off; u16 *pos; d_off = (bap == 1) ? HFA384X_DATA1_OFF : HFA384X_DATA0_OFF; pos = (u16 *) buf; if (len / 2) HFA384X_INSW(d_off, buf, len / 2); pos += len / 2; if (len & 1) *((char *) pos) = HFA384X_INB(d_off); return 0; } static int hfa384x_to_bap(struct net_device *dev, u16 bap, void *buf, int len) { u16 d_off; u16 *pos; d_off = (bap == 1) ? HFA384X_DATA1_OFF : HFA384X_DATA0_OFF; pos = (u16 *) buf; if (len / 2) HFA384X_OUTSW(d_off, buf, len / 2); pos += len / 2; if (len & 1) HFA384X_OUTB(*((char *) pos), d_off); return 0; } /* FIX: This might change at some point.. */ #include "hostap_hw.c" static void prism2_plx_cor_sreset(local_info_t *local) { unsigned char corsave; struct hostap_plx_priv *hw_priv = local->hw_priv; printk(KERN_DEBUG "%s: Doing reset via direct COR access.\n", dev_info); /* Set sreset bit of COR and clear it after hold time */ if (hw_priv->attr_mem == NULL) { /* TMD7160 - COR at card's first I/O addr */ corsave = inb(hw_priv->cor_offset); outb(corsave | COR_SRESET, hw_priv->cor_offset); mdelay(2); outb(corsave & ~COR_SRESET, hw_priv->cor_offset); mdelay(2); } else { /* PLX9052 */ corsave = readb(hw_priv->attr_mem + hw_priv->cor_offset); writeb(corsave | COR_SRESET, hw_priv->attr_mem + hw_priv->cor_offset); mdelay(2); writeb(corsave & ~COR_SRESET, hw_priv->attr_mem + hw_priv->cor_offset); mdelay(2); } } static void prism2_plx_genesis_reset(local_info_t *local, int hcr) { unsigned char corsave; struct hostap_plx_priv *hw_priv = local->hw_priv; if (hw_priv->attr_mem == NULL) { /* TMD7160 - COR at card's first I/O addr */ corsave = inb(hw_priv->cor_offset); outb(corsave | COR_SRESET, hw_priv->cor_offset); mdelay(10); outb(hcr, hw_priv->cor_offset + 2); mdelay(10); outb(corsave & ~COR_SRESET, hw_priv->cor_offset); mdelay(10); } else { /* PLX9052 */ corsave = readb(hw_priv->attr_mem + hw_priv->cor_offset); writeb(corsave | COR_SRESET, hw_priv->attr_mem + hw_priv->cor_offset); mdelay(10); writeb(hcr, hw_priv->attr_mem + hw_priv->cor_offset + 2); mdelay(10); writeb(corsave & ~COR_SRESET, hw_priv->attr_mem + hw_priv->cor_offset); mdelay(10); } } static struct prism2_helper_functions prism2_plx_funcs = { .card_present = NULL, .cor_sreset = prism2_plx_cor_sreset, .genesis_reset = prism2_plx_genesis_reset, .hw_type = HOSTAP_HW_PLX, }; static int prism2_plx_check_cis(void __iomem *attr_mem, int attr_len, unsigned int *cor_offset, unsigned int *cor_index) { #define CISTPL_CONFIG 0x1A #define CISTPL_MANFID 0x20 #define CISTPL_END 0xFF #define CIS_MAX_LEN 256 u8 *cis; int i, pos; unsigned int rmsz, rasz, manfid1, manfid2; struct prism2_plx_manfid *manfid; cis = kmalloc(CIS_MAX_LEN, GFP_KERNEL); if (cis == NULL) return -ENOMEM; /* read CIS; it is in even offsets in the beginning of attr_mem */ for (i = 0; i < CIS_MAX_LEN; i++) cis[i] = readb(attr_mem + 2 * i); printk(KERN_DEBUG "%s: CIS: %6ph ...\n", dev_info, cis); /* set reasonable defaults for Prism2 cards just in case CIS parsing * fails */ *cor_offset = 0x3e0; *cor_index = 0x01; manfid1 = manfid2 = 0; pos = 0; while (pos < CIS_MAX_LEN - 1 && cis[pos] != CISTPL_END) { if (pos + 2 + cis[pos + 1] > CIS_MAX_LEN) goto cis_error; switch (cis[pos]) { case CISTPL_CONFIG: if (cis[pos + 1] < 2) goto cis_error; rmsz = (cis[pos + 2] & 0x3c) >> 2; rasz = cis[pos + 2] & 0x03; if (4 + rasz + rmsz > cis[pos + 1]) goto cis_error; *cor_index = cis[pos + 3] & 0x3F; *cor_offset = 0; for (i = 0; i <= rasz; i++) *cor_offset += cis[pos + 4 + i] << (8 * i); printk(KERN_DEBUG "%s: cor_index=0x%x " "cor_offset=0x%x\n", dev_info, *cor_index, *cor_offset); if (*cor_offset > attr_len) { printk(KERN_ERR "%s: COR offset not within " "attr_mem\n", dev_info); kfree(cis); return -1; } break; case CISTPL_MANFID: if (cis[pos + 1] < 4) goto cis_error; manfid1 = cis[pos + 2] + (cis[pos + 3] << 8); manfid2 = cis[pos + 4] + (cis[pos + 5] << 8); printk(KERN_DEBUG "%s: manfid=0x%04x, 0x%04x\n", dev_info, manfid1, manfid2); break; } pos += cis[pos + 1] + 2; } if (pos >= CIS_MAX_LEN || cis[pos] != CISTPL_END) goto cis_error; for (manfid = prism2_plx_known_manfids; manfid->manfid1 != 0; manfid++) if (manfid1 == manfid->manfid1 && manfid2 == manfid->manfid2) { kfree(cis); return 0; } printk(KERN_INFO "%s: unknown manfid 0x%04x, 0x%04x - assuming this is" " not supported card\n", dev_info, manfid1, manfid2); goto fail; cis_error: printk(KERN_WARNING "%s: invalid CIS data\n", dev_info); fail: kfree(cis); if (ignore_cis) { printk(KERN_INFO "%s: ignore_cis parameter set - ignoring " "errors during CIS verification\n", dev_info); return 0; } return -1; } static int prism2_plx_probe(struct pci_dev *pdev, const struct pci_device_id *id) { unsigned int pccard_ioaddr, plx_ioaddr; unsigned long pccard_attr_mem; unsigned int pccard_attr_len; void __iomem *attr_mem = NULL; unsigned int cor_offset = 0, cor_index = 0; u32 reg; local_info_t *local = NULL; struct net_device *dev = NULL; struct hostap_interface *iface; static int cards_found /* = 0 */; int irq_registered = 0; int tmd7160; struct hostap_plx_priv *hw_priv; hw_priv = kzalloc(sizeof(*hw_priv), GFP_KERNEL); if (hw_priv == NULL) return -ENOMEM; if (pci_enable_device(pdev)) goto err_out_free; /* National Datacomm NCP130 based on TMD7160, not PLX9052. */ tmd7160 = (pdev->vendor == 0x15e8) && (pdev->device == 0x0131); plx_ioaddr = pci_resource_start(pdev, 1); pccard_ioaddr = pci_resource_start(pdev, tmd7160 ? 2 : 3); if (tmd7160) { /* TMD7160 */ attr_mem = NULL; /* no access to PC Card attribute memory */ printk(KERN_INFO "TMD7160 PCI/PCMCIA adapter: io=0x%x, " "irq=%d, pccard_io=0x%x\n", plx_ioaddr, pdev->irq, pccard_ioaddr); cor_offset = plx_ioaddr; cor_index = 0x04; outb(cor_index | COR_LEVLREQ | COR_ENABLE_FUNC, plx_ioaddr); mdelay(1); reg = inb(plx_ioaddr); if (reg != (cor_index | COR_LEVLREQ | COR_ENABLE_FUNC)) { printk(KERN_ERR "%s: Error setting COR (expected=" "0x%02x, was=0x%02x)\n", dev_info, cor_index | COR_LEVLREQ | COR_ENABLE_FUNC, reg); goto fail; } } else { /* PLX9052 */ pccard_attr_mem = pci_resource_start(pdev, 2); pccard_attr_len = pci_resource_len(pdev, 2); if (pccard_attr_len < PLX_MIN_ATTR_LEN) goto fail; attr_mem = ioremap(pccard_attr_mem, pccard_attr_len); if (attr_mem == NULL) { printk(KERN_ERR "%s: cannot remap attr_mem\n", dev_info); goto fail; } printk(KERN_INFO "PLX9052 PCI/PCMCIA adapter: " "mem=0x%lx, plx_io=0x%x, irq=%d, pccard_io=0x%x\n", pccard_attr_mem, plx_ioaddr, pdev->irq, pccard_ioaddr); if (prism2_plx_check_cis(attr_mem, pccard_attr_len, &cor_offset, &cor_index)) { printk(KERN_INFO "Unknown PC Card CIS - not a " "Prism2/2.5 card?\n"); goto fail; } printk(KERN_DEBUG "Prism2/2.5 PC Card detected in PLX9052 " "adapter\n"); /* Write COR to enable PC Card */ writeb(cor_index | COR_LEVLREQ | COR_ENABLE_FUNC, attr_mem + cor_offset); /* Enable PCI interrupts if they are not already enabled */ reg = inl(plx_ioaddr + PLX_INTCSR); printk(KERN_DEBUG "PLX_INTCSR=0x%x\n", reg); if (!(reg & PLX_INTCSR_PCI_INTEN)) { outl(reg | PLX_INTCSR_PCI_INTEN, plx_ioaddr + PLX_INTCSR); if (!(inl(plx_ioaddr + PLX_INTCSR) & PLX_INTCSR_PCI_INTEN)) { printk(KERN_WARNING "%s: Could not enable " "Local Interrupts\n", dev_info); goto fail; } } reg = inl(plx_ioaddr + PLX_CNTRL); printk(KERN_DEBUG "PLX_CNTRL=0x%x (Serial EEPROM " "present=%d)\n", reg, (reg & PLX_CNTRL_SERIAL_EEPROM_PRESENT) != 0); /* should set PLX_PCIIPR to 0x01 (INTA#) if Serial EEPROM is * not present; but are there really such cards in use(?) */ } dev = prism2_init_local_data(&prism2_plx_funcs, cards_found, &pdev->dev); if (dev == NULL) goto fail; iface = netdev_priv(dev); local = iface->local; local->hw_priv = hw_priv; cards_found++; dev->irq = pdev->irq; dev->base_addr = pccard_ioaddr; hw_priv->attr_mem = attr_mem; hw_priv->cor_offset = cor_offset; pci_set_drvdata(pdev, dev); if (request_irq(dev->irq, prism2_interrupt, IRQF_SHARED, dev->name, dev)) { printk(KERN_WARNING "%s: request_irq failed\n", dev->name); goto fail; } else irq_registered = 1; if (prism2_hw_config(dev, 1)) { printk(KERN_DEBUG "%s: hardware initialization failed\n", dev_info); goto fail; } return hostap_hw_ready(dev); fail: if (irq_registered && dev) free_irq(dev->irq, dev); if (attr_mem) iounmap(attr_mem); pci_disable_device(pdev); prism2_free_local_data(dev); err_out_free: kfree(hw_priv); return -ENODEV; } static void prism2_plx_remove(struct pci_dev *pdev) { struct net_device *dev; struct hostap_interface *iface; struct hostap_plx_priv *hw_priv; dev = pci_get_drvdata(pdev); iface = netdev_priv(dev); hw_priv = iface->local->hw_priv; /* Reset the hardware, and ensure interrupts are disabled. */ prism2_plx_cor_sreset(iface->local); hfa384x_disable_interrupts(dev); if (hw_priv->attr_mem) iounmap(hw_priv->attr_mem); if (dev->irq) free_irq(dev->irq, dev); prism2_free_local_data(dev); kfree(hw_priv); pci_disable_device(pdev); } MODULE_DEVICE_TABLE(pci, prism2_plx_id_table); static struct pci_driver prism2_plx_driver = { .name = "hostap_plx", .id_table = prism2_plx_id_table, .probe = prism2_plx_probe, .remove = prism2_plx_remove, }; module_pci_driver(prism2_plx_driver);
linux-master
drivers/net/wireless/intersil/hostap/hostap_plx.c
// SPDX-License-Identifier: GPL-2.0-only /* * Host AP (software wireless LAN access point) driver for * Intersil Prism2/2.5/3 - hostap.o module, common routines * * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen * <[email protected]> * Copyright (c) 2002-2005, Jouni Malinen <[email protected]> */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/proc_fs.h> #include <linux/if_arp.h> #include <linux/delay.h> #include <linux/random.h> #include <linux/workqueue.h> #include <linux/kmod.h> #include <linux/rtnetlink.h> #include <linux/wireless.h> #include <linux/etherdevice.h> #include <net/net_namespace.h> #include <net/iw_handler.h> #include <net/lib80211.h> #include <linux/uaccess.h> #include "hostap_wlan.h" #include "hostap_80211.h" #include "hostap_ap.h" #include "hostap.h" MODULE_AUTHOR("Jouni Malinen"); MODULE_DESCRIPTION("Host AP common routines"); MODULE_LICENSE("GPL"); #define TX_TIMEOUT (2 * HZ) #define PRISM2_MAX_FRAME_SIZE 2304 #define PRISM2_MIN_MTU 256 /* FIX: */ #define PRISM2_MAX_MTU (PRISM2_MAX_FRAME_SIZE - (6 /* LLC */ + 8 /* WEP */)) struct net_device * hostap_add_interface(struct local_info *local, int type, int rtnl_locked, const char *prefix, const char *name) { struct net_device *dev, *mdev; struct hostap_interface *iface; int ret; dev = alloc_etherdev(sizeof(struct hostap_interface)); if (dev == NULL) return NULL; iface = netdev_priv(dev); iface->dev = dev; iface->local = local; iface->type = type; list_add(&iface->list, &local->hostap_interfaces); mdev = local->dev; eth_hw_addr_inherit(dev, mdev); dev->base_addr = mdev->base_addr; dev->irq = mdev->irq; dev->mem_start = mdev->mem_start; dev->mem_end = mdev->mem_end; hostap_setup_dev(dev, local, type); dev->needs_free_netdev = true; sprintf(dev->name, "%s%s", prefix, name); if (!rtnl_locked) rtnl_lock(); SET_NETDEV_DEV(dev, mdev->dev.parent); ret = register_netdevice(dev); if (!rtnl_locked) rtnl_unlock(); if (ret < 0) { printk(KERN_WARNING "%s: failed to add new netdevice!\n", dev->name); free_netdev(dev); return NULL; } printk(KERN_DEBUG "%s: registered netdevice %s\n", mdev->name, dev->name); return dev; } void hostap_remove_interface(struct net_device *dev, int rtnl_locked, int remove_from_list) { struct hostap_interface *iface; if (!dev) return; iface = netdev_priv(dev); if (remove_from_list) { list_del(&iface->list); } if (dev == iface->local->ddev) iface->local->ddev = NULL; else if (dev == iface->local->apdev) iface->local->apdev = NULL; else if (dev == iface->local->stadev) iface->local->stadev = NULL; if (rtnl_locked) unregister_netdevice(dev); else unregister_netdev(dev); /* 'dev->needs_free_netdev = true' implies device data, including * private data, will be freed when the device is removed */ } static inline int prism2_wds_special_addr(u8 *addr) { if (addr[0] || addr[1] || addr[2] || addr[3] || addr[4] || addr[5]) return 0; return 1; } int prism2_wds_add(local_info_t *local, u8 *remote_addr, int rtnl_locked) { struct net_device *dev; struct list_head *ptr; struct hostap_interface *iface, *empty, *match; empty = match = NULL; read_lock_bh(&local->iface_lock); list_for_each(ptr, &local->hostap_interfaces) { iface = list_entry(ptr, struct hostap_interface, list); if (iface->type != HOSTAP_INTERFACE_WDS) continue; if (prism2_wds_special_addr(iface->u.wds.remote_addr)) empty = iface; else if (ether_addr_equal(iface->u.wds.remote_addr, remote_addr)) { match = iface; break; } } if (!match && empty && !prism2_wds_special_addr(remote_addr)) { /* take pre-allocated entry into use */ memcpy(empty->u.wds.remote_addr, remote_addr, ETH_ALEN); read_unlock_bh(&local->iface_lock); printk(KERN_DEBUG "%s: using pre-allocated WDS netdevice %s\n", local->dev->name, empty->dev->name); return 0; } read_unlock_bh(&local->iface_lock); if (!prism2_wds_special_addr(remote_addr)) { if (match) return -EEXIST; hostap_add_sta(local->ap, remote_addr); } if (local->wds_connections >= local->wds_max_connections) return -ENOBUFS; /* verify that there is room for wds# postfix in the interface name */ if (strlen(local->dev->name) >= IFNAMSIZ - 5) { printk(KERN_DEBUG "'%s' too long base device name\n", local->dev->name); return -EINVAL; } dev = hostap_add_interface(local, HOSTAP_INTERFACE_WDS, rtnl_locked, local->ddev->name, "wds%d"); if (dev == NULL) return -ENOMEM; iface = netdev_priv(dev); memcpy(iface->u.wds.remote_addr, remote_addr, ETH_ALEN); local->wds_connections++; return 0; } int prism2_wds_del(local_info_t *local, u8 *remote_addr, int rtnl_locked, int do_not_remove) { unsigned long flags; struct list_head *ptr; struct hostap_interface *iface, *selected = NULL; write_lock_irqsave(&local->iface_lock, flags); list_for_each(ptr, &local->hostap_interfaces) { iface = list_entry(ptr, struct hostap_interface, list); if (iface->type != HOSTAP_INTERFACE_WDS) continue; if (ether_addr_equal(iface->u.wds.remote_addr, remote_addr)) { selected = iface; break; } } if (selected && !do_not_remove) list_del(&selected->list); write_unlock_irqrestore(&local->iface_lock, flags); if (selected) { if (do_not_remove) eth_zero_addr(selected->u.wds.remote_addr); else { hostap_remove_interface(selected->dev, rtnl_locked, 0); local->wds_connections--; } } return selected ? 0 : -ENODEV; } u16 hostap_tx_callback_register(local_info_t *local, void (*func)(struct sk_buff *, int ok, void *), void *data) { unsigned long flags; struct hostap_tx_callback_info *entry; entry = kmalloc(sizeof(*entry), GFP_KERNEL); if (entry == NULL) return 0; entry->func = func; entry->data = data; spin_lock_irqsave(&local->lock, flags); entry->idx = local->tx_callback ? local->tx_callback->idx + 1 : 1; entry->next = local->tx_callback; local->tx_callback = entry; spin_unlock_irqrestore(&local->lock, flags); return entry->idx; } int hostap_tx_callback_unregister(local_info_t *local, u16 idx) { unsigned long flags; struct hostap_tx_callback_info *cb, *prev = NULL; spin_lock_irqsave(&local->lock, flags); cb = local->tx_callback; while (cb != NULL && cb->idx != idx) { prev = cb; cb = cb->next; } if (cb) { if (prev == NULL) local->tx_callback = cb->next; else prev->next = cb->next; kfree(cb); } spin_unlock_irqrestore(&local->lock, flags); return cb ? 0 : -1; } /* val is in host byte order */ int hostap_set_word(struct net_device *dev, int rid, u16 val) { struct hostap_interface *iface; __le16 tmp = cpu_to_le16(val); iface = netdev_priv(dev); return iface->local->func->set_rid(dev, rid, &tmp, 2); } int hostap_set_string(struct net_device *dev, int rid, const char *val) { struct hostap_interface *iface; char buf[MAX_SSID_LEN + 2]; int len; iface = netdev_priv(dev); len = strlen(val); if (len > MAX_SSID_LEN) return -1; memset(buf, 0, sizeof(buf)); buf[0] = len; /* little endian 16 bit word */ memcpy(buf + 2, val, len); return iface->local->func->set_rid(dev, rid, &buf, MAX_SSID_LEN + 2); } u16 hostap_get_porttype(local_info_t *local) { if (local->iw_mode == IW_MODE_ADHOC && local->pseudo_adhoc) return HFA384X_PORTTYPE_PSEUDO_IBSS; if (local->iw_mode == IW_MODE_ADHOC) return HFA384X_PORTTYPE_IBSS; if (local->iw_mode == IW_MODE_INFRA) return HFA384X_PORTTYPE_BSS; if (local->iw_mode == IW_MODE_REPEAT) return HFA384X_PORTTYPE_WDS; if (local->iw_mode == IW_MODE_MONITOR) return HFA384X_PORTTYPE_PSEUDO_IBSS; return HFA384X_PORTTYPE_HOSTAP; } int hostap_set_encryption(local_info_t *local) { u16 val, old_val; int i, keylen, len, idx; char keybuf[WEP_KEY_LEN + 1]; enum { NONE, WEP, OTHER } encrypt_type; idx = local->crypt_info.tx_keyidx; if (local->crypt_info.crypt[idx] == NULL || local->crypt_info.crypt[idx]->ops == NULL) encrypt_type = NONE; else if (strcmp(local->crypt_info.crypt[idx]->ops->name, "WEP") == 0) encrypt_type = WEP; else encrypt_type = OTHER; if (local->func->get_rid(local->dev, HFA384X_RID_CNFWEPFLAGS, &val, 2, 1) < 0) { printk(KERN_DEBUG "Could not read current WEP flags.\n"); goto fail; } le16_to_cpus(&val); old_val = val; if (encrypt_type != NONE || local->privacy_invoked) val |= HFA384X_WEPFLAGS_PRIVACYINVOKED; else val &= ~HFA384X_WEPFLAGS_PRIVACYINVOKED; if (local->open_wep || encrypt_type == NONE || ((local->ieee_802_1x || local->wpa) && local->host_decrypt)) val &= ~HFA384X_WEPFLAGS_EXCLUDEUNENCRYPTED; else val |= HFA384X_WEPFLAGS_EXCLUDEUNENCRYPTED; if ((encrypt_type != NONE || local->privacy_invoked) && (encrypt_type == OTHER || local->host_encrypt)) val |= HFA384X_WEPFLAGS_HOSTENCRYPT; else val &= ~HFA384X_WEPFLAGS_HOSTENCRYPT; if ((encrypt_type != NONE || local->privacy_invoked) && (encrypt_type == OTHER || local->host_decrypt)) val |= HFA384X_WEPFLAGS_HOSTDECRYPT; else val &= ~HFA384X_WEPFLAGS_HOSTDECRYPT; if (val != old_val && hostap_set_word(local->dev, HFA384X_RID_CNFWEPFLAGS, val)) { printk(KERN_DEBUG "Could not write new WEP flags (0x%x)\n", val); goto fail; } if (encrypt_type != WEP) return 0; /* 104-bit support seems to require that all the keys are set to the * same keylen */ keylen = 6; /* first 5 octets */ len = local->crypt_info.crypt[idx]->ops->get_key(keybuf, sizeof(keybuf), NULL, local->crypt_info.crypt[idx]->priv); if (idx >= 0 && idx < WEP_KEYS && len > 5) keylen = WEP_KEY_LEN + 1; /* first 13 octets */ for (i = 0; i < WEP_KEYS; i++) { memset(keybuf, 0, sizeof(keybuf)); if (local->crypt_info.crypt[i]) { (void) local->crypt_info.crypt[i]->ops->get_key( keybuf, sizeof(keybuf), NULL, local->crypt_info.crypt[i]->priv); } if (local->func->set_rid(local->dev, HFA384X_RID_CNFDEFAULTKEY0 + i, keybuf, keylen)) { printk(KERN_DEBUG "Could not set key %d (len=%d)\n", i, keylen); goto fail; } } if (hostap_set_word(local->dev, HFA384X_RID_CNFWEPDEFAULTKEYID, idx)) { printk(KERN_DEBUG "Could not set default keyid %d\n", idx); goto fail; } return 0; fail: printk(KERN_DEBUG "%s: encryption setup failed\n", local->dev->name); return -1; } int hostap_set_antsel(local_info_t *local) { u16 val; int ret = 0; if (local->antsel_tx != HOSTAP_ANTSEL_DO_NOT_TOUCH && local->func->cmd(local->dev, HFA384X_CMDCODE_READMIF, HFA386X_CR_TX_CONFIGURE, NULL, &val) == 0) { val &= ~(BIT(2) | BIT(1)); switch (local->antsel_tx) { case HOSTAP_ANTSEL_DIVERSITY: val |= BIT(1); break; case HOSTAP_ANTSEL_LOW: break; case HOSTAP_ANTSEL_HIGH: val |= BIT(2); break; } if (local->func->cmd(local->dev, HFA384X_CMDCODE_WRITEMIF, HFA386X_CR_TX_CONFIGURE, &val, NULL)) { printk(KERN_INFO "%s: setting TX AntSel failed\n", local->dev->name); ret = -1; } } if (local->antsel_rx != HOSTAP_ANTSEL_DO_NOT_TOUCH && local->func->cmd(local->dev, HFA384X_CMDCODE_READMIF, HFA386X_CR_RX_CONFIGURE, NULL, &val) == 0) { val &= ~(BIT(1) | BIT(0)); switch (local->antsel_rx) { case HOSTAP_ANTSEL_DIVERSITY: break; case HOSTAP_ANTSEL_LOW: val |= BIT(0); break; case HOSTAP_ANTSEL_HIGH: val |= BIT(0) | BIT(1); break; } if (local->func->cmd(local->dev, HFA384X_CMDCODE_WRITEMIF, HFA386X_CR_RX_CONFIGURE, &val, NULL)) { printk(KERN_INFO "%s: setting RX AntSel failed\n", local->dev->name); ret = -1; } } return ret; } int hostap_set_roaming(local_info_t *local) { u16 val; switch (local->host_roaming) { case 1: val = HFA384X_ROAMING_HOST; break; case 2: val = HFA384X_ROAMING_DISABLED; break; case 0: default: val = HFA384X_ROAMING_FIRMWARE; break; } return hostap_set_word(local->dev, HFA384X_RID_CNFROAMINGMODE, val); } int hostap_set_auth_algs(local_info_t *local) { int val = local->auth_algs; /* At least STA f/w v0.6.2 seems to have issues with cnfAuthentication * set to include both Open and Shared Key flags. It tries to use * Shared Key authentication in that case even if WEP keys are not * configured.. STA f/w v0.7.6 is able to handle such configuration, * but it is unknown when this was fixed between 0.6.2 .. 0.7.6. */ if (local->sta_fw_ver < PRISM2_FW_VER(0,7,0) && val != PRISM2_AUTH_OPEN && val != PRISM2_AUTH_SHARED_KEY) val = PRISM2_AUTH_OPEN; if (hostap_set_word(local->dev, HFA384X_RID_CNFAUTHENTICATION, val)) { printk(KERN_INFO "%s: cnfAuthentication setting to 0x%x " "failed\n", local->dev->name, local->auth_algs); return -EINVAL; } return 0; } void hostap_dump_rx_header(const char *name, const struct hfa384x_rx_frame *rx) { u16 status, fc; status = __le16_to_cpu(rx->status); printk(KERN_DEBUG "%s: RX status=0x%04x (port=%d, type=%d, " "fcserr=%d) silence=%d signal=%d rate=%d rxflow=%d; " "jiffies=%ld\n", name, status, (status >> 8) & 0x07, status >> 13, status & 1, rx->silence, rx->signal, rx->rate, rx->rxflow, jiffies); fc = __le16_to_cpu(rx->frame_control); printk(KERN_DEBUG " FC=0x%04x (type=%d:%d) dur=0x%04x seq=0x%04x " "data_len=%d%s%s\n", fc, (fc & IEEE80211_FCTL_FTYPE) >> 2, (fc & IEEE80211_FCTL_STYPE) >> 4, __le16_to_cpu(rx->duration_id), __le16_to_cpu(rx->seq_ctrl), __le16_to_cpu(rx->data_len), fc & IEEE80211_FCTL_TODS ? " [ToDS]" : "", fc & IEEE80211_FCTL_FROMDS ? " [FromDS]" : ""); printk(KERN_DEBUG " A1=%pM A2=%pM A3=%pM A4=%pM\n", rx->addr1, rx->addr2, rx->addr3, rx->addr4); printk(KERN_DEBUG " dst=%pM src=%pM len=%d\n", rx->dst_addr, rx->src_addr, __be16_to_cpu(rx->len)); } void hostap_dump_tx_header(const char *name, const struct hfa384x_tx_frame *tx) { u16 fc; printk(KERN_DEBUG "%s: TX status=0x%04x retry_count=%d tx_rate=%d " "tx_control=0x%04x; jiffies=%ld\n", name, __le16_to_cpu(tx->status), tx->retry_count, tx->tx_rate, __le16_to_cpu(tx->tx_control), jiffies); fc = __le16_to_cpu(tx->frame_control); printk(KERN_DEBUG " FC=0x%04x (type=%d:%d) dur=0x%04x seq=0x%04x " "data_len=%d%s%s\n", fc, (fc & IEEE80211_FCTL_FTYPE) >> 2, (fc & IEEE80211_FCTL_STYPE) >> 4, __le16_to_cpu(tx->duration_id), __le16_to_cpu(tx->seq_ctrl), __le16_to_cpu(tx->data_len), fc & IEEE80211_FCTL_TODS ? " [ToDS]" : "", fc & IEEE80211_FCTL_FROMDS ? " [FromDS]" : ""); printk(KERN_DEBUG " A1=%pM A2=%pM A3=%pM A4=%pM\n", tx->addr1, tx->addr2, tx->addr3, tx->addr4); printk(KERN_DEBUG " dst=%pM src=%pM len=%d\n", tx->dst_addr, tx->src_addr, __be16_to_cpu(tx->len)); } static int hostap_80211_header_parse(const struct sk_buff *skb, unsigned char *haddr) { memcpy(haddr, skb_mac_header(skb) + 10, ETH_ALEN); /* addr2 */ return ETH_ALEN; } int hostap_80211_get_hdrlen(__le16 fc) { if (ieee80211_is_data(fc) && ieee80211_has_a4 (fc)) return 30; /* Addr4 */ else if (ieee80211_is_cts(fc) || ieee80211_is_ack(fc)) return 10; else if (ieee80211_is_ctl(fc)) return 16; return 24; } static int prism2_close(struct net_device *dev) { struct hostap_interface *iface; local_info_t *local; PDEBUG(DEBUG_FLOW, "%s: prism2_close\n", dev->name); iface = netdev_priv(dev); local = iface->local; if (dev == local->ddev) { prism2_sta_deauth(local, WLAN_REASON_DEAUTH_LEAVING); } #ifndef PRISM2_NO_KERNEL_IEEE80211_MGMT if (!local->hostapd && dev == local->dev && (!local->func->card_present || local->func->card_present(local)) && local->hw_ready && local->ap && local->iw_mode == IW_MODE_MASTER) hostap_deauth_all_stas(dev, local->ap, 1); #endif /* PRISM2_NO_KERNEL_IEEE80211_MGMT */ if (dev == local->dev) { local->func->hw_shutdown(dev, HOSTAP_HW_ENABLE_CMDCOMPL); } if (netif_running(dev)) { netif_stop_queue(dev); netif_device_detach(dev); } cancel_work_sync(&local->reset_queue); cancel_work_sync(&local->set_multicast_list_queue); cancel_work_sync(&local->set_tim_queue); #ifndef PRISM2_NO_STATION_MODES cancel_work_sync(&local->info_queue); #endif cancel_work_sync(&local->comms_qual_update); module_put(local->hw_module); local->num_dev_open--; if (dev != local->dev && local->dev->flags & IFF_UP && local->master_dev_auto_open && local->num_dev_open == 1) { /* Close master radio interface automatically if it was also * opened automatically and we are now closing the last * remaining non-master device. */ dev_close(local->dev); } return 0; } static int prism2_open(struct net_device *dev) { struct hostap_interface *iface; local_info_t *local; PDEBUG(DEBUG_FLOW, "%s: prism2_open\n", dev->name); iface = netdev_priv(dev); local = iface->local; if (local->no_pri) { printk(KERN_DEBUG "%s: could not set interface UP - no PRI " "f/w\n", dev->name); return -ENODEV; } if ((local->func->card_present && !local->func->card_present(local)) || local->hw_downloading) return -ENODEV; if (!try_module_get(local->hw_module)) return -ENODEV; local->num_dev_open++; if (!local->dev_enabled && local->func->hw_enable(dev, 1)) { printk(KERN_WARNING "%s: could not enable MAC port\n", dev->name); prism2_close(dev); return -ENODEV; } if (!local->dev_enabled) prism2_callback(local, PRISM2_CALLBACK_ENABLE); local->dev_enabled = 1; if (dev != local->dev && !(local->dev->flags & IFF_UP)) { /* Master radio interface is needed for all operation, so open * it automatically when any virtual net_device is opened. */ local->master_dev_auto_open = 1; dev_open(local->dev, NULL); } netif_device_attach(dev); netif_start_queue(dev); return 0; } static int prism2_set_mac_address(struct net_device *dev, void *p) { struct hostap_interface *iface; local_info_t *local; struct list_head *ptr; struct sockaddr *addr = p; iface = netdev_priv(dev); local = iface->local; if (local->func->set_rid(dev, HFA384X_RID_CNFOWNMACADDR, addr->sa_data, ETH_ALEN) < 0 || local->func->reset_port(dev)) return -EINVAL; read_lock_bh(&local->iface_lock); list_for_each(ptr, &local->hostap_interfaces) { iface = list_entry(ptr, struct hostap_interface, list); eth_hw_addr_set(iface->dev, addr->sa_data); } eth_hw_addr_set(local->dev, addr->sa_data); read_unlock_bh(&local->iface_lock); return 0; } /* TODO: to be further implemented as soon as Prism2 fully supports * GroupAddresses and correct documentation is available */ void hostap_set_multicast_list_queue(struct work_struct *work) { local_info_t *local = container_of(work, local_info_t, set_multicast_list_queue); struct net_device *dev = local->dev; if (hostap_set_word(dev, HFA384X_RID_PROMISCUOUSMODE, local->is_promisc)) { printk(KERN_INFO "%s: %sabling promiscuous mode failed\n", dev->name, local->is_promisc ? "en" : "dis"); } } static void hostap_set_multicast_list(struct net_device *dev) { #if 0 /* FIX: promiscuous mode seems to be causing a lot of problems with * some station firmware versions (FCSErr frames, invalid MACPort, etc. * corrupted incoming frames). This code is now commented out while the * problems are investigated. */ struct hostap_interface *iface; local_info_t *local; iface = netdev_priv(dev); local = iface->local; if ((dev->flags & IFF_ALLMULTI) || (dev->flags & IFF_PROMISC)) { local->is_promisc = 1; } else { local->is_promisc = 0; } schedule_work(&local->set_multicast_list_queue); #endif } static void prism2_tx_timeout(struct net_device *dev, unsigned int txqueue) { struct hostap_interface *iface; local_info_t *local; struct hfa384x_regs regs; iface = netdev_priv(dev); local = iface->local; printk(KERN_WARNING "%s Tx timed out! Resetting card\n", dev->name); netif_stop_queue(local->dev); local->func->read_regs(dev, &regs); printk(KERN_DEBUG "%s: CMD=%04x EVSTAT=%04x " "OFFSET0=%04x OFFSET1=%04x SWSUPPORT0=%04x\n", dev->name, regs.cmd, regs.evstat, regs.offset0, regs.offset1, regs.swsupport0); local->func->schedule_reset(local); } const struct header_ops hostap_80211_ops = { .create = eth_header, .cache = eth_header_cache, .cache_update = eth_header_cache_update, .parse = hostap_80211_header_parse, }; EXPORT_SYMBOL(hostap_80211_ops); static const struct net_device_ops hostap_netdev_ops = { .ndo_start_xmit = hostap_data_start_xmit, .ndo_open = prism2_open, .ndo_stop = prism2_close, .ndo_do_ioctl = hostap_ioctl, .ndo_siocdevprivate = hostap_siocdevprivate, .ndo_set_mac_address = prism2_set_mac_address, .ndo_set_rx_mode = hostap_set_multicast_list, .ndo_tx_timeout = prism2_tx_timeout, .ndo_validate_addr = eth_validate_addr, }; static const struct net_device_ops hostap_mgmt_netdev_ops = { .ndo_start_xmit = hostap_mgmt_start_xmit, .ndo_open = prism2_open, .ndo_stop = prism2_close, .ndo_do_ioctl = hostap_ioctl, .ndo_siocdevprivate = hostap_siocdevprivate, .ndo_set_mac_address = prism2_set_mac_address, .ndo_set_rx_mode = hostap_set_multicast_list, .ndo_tx_timeout = prism2_tx_timeout, .ndo_validate_addr = eth_validate_addr, }; static const struct net_device_ops hostap_master_ops = { .ndo_start_xmit = hostap_master_start_xmit, .ndo_open = prism2_open, .ndo_stop = prism2_close, .ndo_do_ioctl = hostap_ioctl, .ndo_siocdevprivate = hostap_siocdevprivate, .ndo_set_mac_address = prism2_set_mac_address, .ndo_set_rx_mode = hostap_set_multicast_list, .ndo_tx_timeout = prism2_tx_timeout, .ndo_validate_addr = eth_validate_addr, }; void hostap_setup_dev(struct net_device *dev, local_info_t *local, int type) { struct hostap_interface *iface; iface = netdev_priv(dev); ether_setup(dev); dev->min_mtu = PRISM2_MIN_MTU; dev->max_mtu = PRISM2_MAX_MTU; dev->priv_flags &= ~IFF_TX_SKB_SHARING; /* kernel callbacks */ if (iface) { /* Currently, we point to the proper spy_data only on * the main_dev. This could be fixed. Jean II */ iface->wireless_data.spy_data = &iface->spy_data; dev->wireless_data = &iface->wireless_data; } dev->wireless_handlers = &hostap_iw_handler_def; dev->watchdog_timeo = TX_TIMEOUT; switch(type) { case HOSTAP_INTERFACE_AP: dev->priv_flags |= IFF_NO_QUEUE; /* use main radio device queue */ dev->netdev_ops = &hostap_mgmt_netdev_ops; dev->type = ARPHRD_IEEE80211; dev->header_ops = &hostap_80211_ops; break; case HOSTAP_INTERFACE_MASTER: dev->netdev_ops = &hostap_master_ops; break; default: dev->priv_flags |= IFF_NO_QUEUE; /* use main radio device queue */ dev->netdev_ops = &hostap_netdev_ops; } dev->mtu = local->mtu; dev->ethtool_ops = &prism2_ethtool_ops; } static int hostap_enable_hostapd(local_info_t *local, int rtnl_locked) { struct net_device *dev = local->dev; if (local->apdev) return -EEXIST; printk(KERN_DEBUG "%s: enabling hostapd mode\n", dev->name); local->apdev = hostap_add_interface(local, HOSTAP_INTERFACE_AP, rtnl_locked, local->ddev->name, "ap"); if (local->apdev == NULL) return -ENOMEM; return 0; } static int hostap_disable_hostapd(local_info_t *local, int rtnl_locked) { struct net_device *dev = local->dev; printk(KERN_DEBUG "%s: disabling hostapd mode\n", dev->name); hostap_remove_interface(local->apdev, rtnl_locked, 1); local->apdev = NULL; return 0; } static int hostap_enable_hostapd_sta(local_info_t *local, int rtnl_locked) { struct net_device *dev = local->dev; if (local->stadev) return -EEXIST; printk(KERN_DEBUG "%s: enabling hostapd STA mode\n", dev->name); local->stadev = hostap_add_interface(local, HOSTAP_INTERFACE_STA, rtnl_locked, local->ddev->name, "sta"); if (local->stadev == NULL) return -ENOMEM; return 0; } static int hostap_disable_hostapd_sta(local_info_t *local, int rtnl_locked) { struct net_device *dev = local->dev; printk(KERN_DEBUG "%s: disabling hostapd mode\n", dev->name); hostap_remove_interface(local->stadev, rtnl_locked, 1); local->stadev = NULL; return 0; } int hostap_set_hostapd(local_info_t *local, int val, int rtnl_locked) { int ret; if (val < 0 || val > 1) return -EINVAL; if (local->hostapd == val) return 0; if (val) { ret = hostap_enable_hostapd(local, rtnl_locked); if (ret == 0) local->hostapd = 1; } else { local->hostapd = 0; ret = hostap_disable_hostapd(local, rtnl_locked); if (ret != 0) local->hostapd = 1; } return ret; } int hostap_set_hostapd_sta(local_info_t *local, int val, int rtnl_locked) { int ret; if (val < 0 || val > 1) return -EINVAL; if (local->hostapd_sta == val) return 0; if (val) { ret = hostap_enable_hostapd_sta(local, rtnl_locked); if (ret == 0) local->hostapd_sta = 1; } else { local->hostapd_sta = 0; ret = hostap_disable_hostapd_sta(local, rtnl_locked); if (ret != 0) local->hostapd_sta = 1; } return ret; } int prism2_update_comms_qual(struct net_device *dev) { struct hostap_interface *iface; local_info_t *local; int ret = 0; struct hfa384x_comms_quality sq; iface = netdev_priv(dev); local = iface->local; if (!local->sta_fw_ver) ret = -1; else if (local->sta_fw_ver >= PRISM2_FW_VER(1,3,1)) { if (local->func->get_rid(local->dev, HFA384X_RID_DBMCOMMSQUALITY, &sq, sizeof(sq), 1) >= 0) { local->comms_qual = (s16) le16_to_cpu(sq.comm_qual); local->avg_signal = (s16) le16_to_cpu(sq.signal_level); local->avg_noise = (s16) le16_to_cpu(sq.noise_level); local->last_comms_qual_update = jiffies; } else ret = -1; } else { if (local->func->get_rid(local->dev, HFA384X_RID_COMMSQUALITY, &sq, sizeof(sq), 1) >= 0) { local->comms_qual = le16_to_cpu(sq.comm_qual); local->avg_signal = HFA384X_LEVEL_TO_dBm( le16_to_cpu(sq.signal_level)); local->avg_noise = HFA384X_LEVEL_TO_dBm( le16_to_cpu(sq.noise_level)); local->last_comms_qual_update = jiffies; } else ret = -1; } return ret; } int prism2_sta_send_mgmt(local_info_t *local, u8 *dst, u16 stype, u8 *body, size_t bodylen) { struct sk_buff *skb; struct hostap_ieee80211_mgmt *mgmt; struct hostap_skb_tx_data *meta; struct net_device *dev = local->dev; skb = dev_alloc_skb(IEEE80211_MGMT_HDR_LEN + bodylen); if (skb == NULL) return -ENOMEM; mgmt = skb_put_zero(skb, IEEE80211_MGMT_HDR_LEN); mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | stype); memcpy(mgmt->da, dst, ETH_ALEN); memcpy(mgmt->sa, dev->dev_addr, ETH_ALEN); memcpy(mgmt->bssid, dst, ETH_ALEN); if (body) skb_put_data(skb, body, bodylen); meta = (struct hostap_skb_tx_data *) skb->cb; memset(meta, 0, sizeof(*meta)); meta->magic = HOSTAP_SKB_TX_DATA_MAGIC; meta->iface = netdev_priv(dev); skb->dev = dev; skb_reset_mac_header(skb); skb_reset_network_header(skb); dev_queue_xmit(skb); return 0; } int prism2_sta_deauth(local_info_t *local, u16 reason) { union iwreq_data wrqu; int ret; __le16 val = cpu_to_le16(reason); if (local->iw_mode != IW_MODE_INFRA || is_zero_ether_addr(local->bssid) || ether_addr_equal(local->bssid, "\x44\x44\x44\x44\x44\x44")) return 0; ret = prism2_sta_send_mgmt(local, local->bssid, IEEE80211_STYPE_DEAUTH, (u8 *) &val, 2); eth_zero_addr(wrqu.ap_addr.sa_data); wireless_send_event(local->dev, SIOCGIWAP, &wrqu, NULL); return ret; } struct proc_dir_entry *hostap_proc; static int __init hostap_init(void) { if (init_net.proc_net != NULL) { hostap_proc = proc_mkdir("hostap", init_net.proc_net); if (!hostap_proc) printk(KERN_WARNING "Failed to mkdir " "/proc/net/hostap\n"); } else hostap_proc = NULL; return 0; } static void __exit hostap_exit(void) { if (hostap_proc != NULL) { hostap_proc = NULL; remove_proc_entry("hostap", init_net.proc_net); } } EXPORT_SYMBOL(hostap_set_word); EXPORT_SYMBOL(hostap_set_string); EXPORT_SYMBOL(hostap_get_porttype); EXPORT_SYMBOL(hostap_set_encryption); EXPORT_SYMBOL(hostap_set_antsel); EXPORT_SYMBOL(hostap_set_roaming); EXPORT_SYMBOL(hostap_set_auth_algs); EXPORT_SYMBOL(hostap_dump_rx_header); EXPORT_SYMBOL(hostap_dump_tx_header); EXPORT_SYMBOL(hostap_80211_get_hdrlen); EXPORT_SYMBOL(hostap_setup_dev); EXPORT_SYMBOL(hostap_set_multicast_list_queue); EXPORT_SYMBOL(hostap_set_hostapd); EXPORT_SYMBOL(hostap_set_hostapd_sta); EXPORT_SYMBOL(hostap_add_interface); EXPORT_SYMBOL(hostap_remove_interface); EXPORT_SYMBOL(prism2_update_comms_qual); module_init(hostap_init); module_exit(hostap_exit);
linux-master
drivers/net/wireless/intersil/hostap/hostap_main.c
// SPDX-License-Identifier: GPL-2.0 /* Host AP driver Info Frame processing (part of hostap.o module) */ #include <linux/if_arp.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/export.h> #include <linux/etherdevice.h> #include "hostap_wlan.h" #include "hostap.h" #include "hostap_ap.h" /* Called only as a tasklet (software IRQ) */ static void prism2_info_commtallies16(local_info_t *local, unsigned char *buf, int left) { struct hfa384x_comm_tallies *tallies; if (left < sizeof(struct hfa384x_comm_tallies)) { printk(KERN_DEBUG "%s: too short (len=%d) commtallies " "info frame\n", local->dev->name, left); return; } tallies = (struct hfa384x_comm_tallies *) buf; #define ADD_COMM_TALLIES(name) \ local->comm_tallies.name += le16_to_cpu(tallies->name) ADD_COMM_TALLIES(tx_unicast_frames); ADD_COMM_TALLIES(tx_multicast_frames); ADD_COMM_TALLIES(tx_fragments); ADD_COMM_TALLIES(tx_unicast_octets); ADD_COMM_TALLIES(tx_multicast_octets); ADD_COMM_TALLIES(tx_deferred_transmissions); ADD_COMM_TALLIES(tx_single_retry_frames); ADD_COMM_TALLIES(tx_multiple_retry_frames); ADD_COMM_TALLIES(tx_retry_limit_exceeded); ADD_COMM_TALLIES(tx_discards); ADD_COMM_TALLIES(rx_unicast_frames); ADD_COMM_TALLIES(rx_multicast_frames); ADD_COMM_TALLIES(rx_fragments); ADD_COMM_TALLIES(rx_unicast_octets); ADD_COMM_TALLIES(rx_multicast_octets); ADD_COMM_TALLIES(rx_fcs_errors); ADD_COMM_TALLIES(rx_discards_no_buffer); ADD_COMM_TALLIES(tx_discards_wrong_sa); ADD_COMM_TALLIES(rx_discards_wep_undecryptable); ADD_COMM_TALLIES(rx_message_in_msg_fragments); ADD_COMM_TALLIES(rx_message_in_bad_msg_fragments); #undef ADD_COMM_TALLIES } /* Called only as a tasklet (software IRQ) */ static void prism2_info_commtallies32(local_info_t *local, unsigned char *buf, int left) { struct hfa384x_comm_tallies32 *tallies; if (left < sizeof(struct hfa384x_comm_tallies32)) { printk(KERN_DEBUG "%s: too short (len=%d) commtallies32 " "info frame\n", local->dev->name, left); return; } tallies = (struct hfa384x_comm_tallies32 *) buf; #define ADD_COMM_TALLIES(name) \ local->comm_tallies.name += le32_to_cpu(tallies->name) ADD_COMM_TALLIES(tx_unicast_frames); ADD_COMM_TALLIES(tx_multicast_frames); ADD_COMM_TALLIES(tx_fragments); ADD_COMM_TALLIES(tx_unicast_octets); ADD_COMM_TALLIES(tx_multicast_octets); ADD_COMM_TALLIES(tx_deferred_transmissions); ADD_COMM_TALLIES(tx_single_retry_frames); ADD_COMM_TALLIES(tx_multiple_retry_frames); ADD_COMM_TALLIES(tx_retry_limit_exceeded); ADD_COMM_TALLIES(tx_discards); ADD_COMM_TALLIES(rx_unicast_frames); ADD_COMM_TALLIES(rx_multicast_frames); ADD_COMM_TALLIES(rx_fragments); ADD_COMM_TALLIES(rx_unicast_octets); ADD_COMM_TALLIES(rx_multicast_octets); ADD_COMM_TALLIES(rx_fcs_errors); ADD_COMM_TALLIES(rx_discards_no_buffer); ADD_COMM_TALLIES(tx_discards_wrong_sa); ADD_COMM_TALLIES(rx_discards_wep_undecryptable); ADD_COMM_TALLIES(rx_message_in_msg_fragments); ADD_COMM_TALLIES(rx_message_in_bad_msg_fragments); #undef ADD_COMM_TALLIES } /* Called only as a tasklet (software IRQ) */ static void prism2_info_commtallies(local_info_t *local, unsigned char *buf, int left) { if (local->tallies32) prism2_info_commtallies32(local, buf, left); else prism2_info_commtallies16(local, buf, left); } #ifndef PRISM2_NO_STATION_MODES #ifndef PRISM2_NO_DEBUG static const char* hfa384x_linkstatus_str(u16 linkstatus) { switch (linkstatus) { case HFA384X_LINKSTATUS_CONNECTED: return "Connected"; case HFA384X_LINKSTATUS_DISCONNECTED: return "Disconnected"; case HFA384X_LINKSTATUS_AP_CHANGE: return "Access point change"; case HFA384X_LINKSTATUS_AP_OUT_OF_RANGE: return "Access point out of range"; case HFA384X_LINKSTATUS_AP_IN_RANGE: return "Access point in range"; case HFA384X_LINKSTATUS_ASSOC_FAILED: return "Association failed"; default: return "Unknown"; } } #endif /* PRISM2_NO_DEBUG */ /* Called only as a tasklet (software IRQ) */ static void prism2_info_linkstatus(local_info_t *local, unsigned char *buf, int left) { u16 val; int non_sta_mode; /* Alloc new JoinRequests to occur since LinkStatus for the previous * has been received */ local->last_join_time = 0; if (left != 2) { printk(KERN_DEBUG "%s: invalid linkstatus info frame " "length %d\n", local->dev->name, left); return; } non_sta_mode = local->iw_mode == IW_MODE_MASTER || local->iw_mode == IW_MODE_REPEAT || local->iw_mode == IW_MODE_MONITOR; val = buf[0] | (buf[1] << 8); if (!non_sta_mode || val != HFA384X_LINKSTATUS_DISCONNECTED) { PDEBUG(DEBUG_EXTRA, "%s: LinkStatus=%d (%s)\n", local->dev->name, val, hfa384x_linkstatus_str(val)); } if (non_sta_mode) { netif_carrier_on(local->dev); netif_carrier_on(local->ddev); return; } /* Get current BSSID later in scheduled task */ set_bit(PRISM2_INFO_PENDING_LINKSTATUS, &local->pending_info); local->prev_link_status = val; schedule_work(&local->info_queue); } static void prism2_host_roaming(local_info_t *local) { struct hfa384x_join_request req; struct net_device *dev = local->dev; struct hfa384x_hostscan_result *selected, *entry; int i; unsigned long flags; if (local->last_join_time && time_before(jiffies, local->last_join_time + 10 * HZ)) { PDEBUG(DEBUG_EXTRA, "%s: last join request has not yet been " "completed - waiting for it before issuing new one\n", dev->name); return; } /* ScanResults are sorted: first ESS results in decreasing signal * quality then IBSS results in similar order. * Trivial roaming policy: just select the first entry. * This could probably be improved by adding hysteresis to limit * number of handoffs, etc. * * Could do periodic RID_SCANREQUEST or Inquire F101 to get new * ScanResults */ spin_lock_irqsave(&local->lock, flags); if (local->last_scan_results == NULL || local->last_scan_results_count == 0) { spin_unlock_irqrestore(&local->lock, flags); PDEBUG(DEBUG_EXTRA, "%s: no scan results for host roaming\n", dev->name); return; } selected = &local->last_scan_results[0]; if (local->preferred_ap[0] || local->preferred_ap[1] || local->preferred_ap[2] || local->preferred_ap[3] || local->preferred_ap[4] || local->preferred_ap[5]) { /* Try to find preferred AP */ PDEBUG(DEBUG_EXTRA, "%s: Preferred AP BSSID %pM\n", dev->name, local->preferred_ap); for (i = 0; i < local->last_scan_results_count; i++) { entry = &local->last_scan_results[i]; if (memcmp(local->preferred_ap, entry->bssid, 6) == 0) { PDEBUG(DEBUG_EXTRA, "%s: using preferred AP " "selection\n", dev->name); selected = entry; break; } } } memcpy(req.bssid, selected->bssid, ETH_ALEN); req.channel = selected->chid; spin_unlock_irqrestore(&local->lock, flags); PDEBUG(DEBUG_EXTRA, "%s: JoinRequest: BSSID=%pM" " channel=%d\n", dev->name, req.bssid, le16_to_cpu(req.channel)); if (local->func->set_rid(dev, HFA384X_RID_JOINREQUEST, &req, sizeof(req))) { printk(KERN_DEBUG "%s: JoinRequest failed\n", dev->name); } local->last_join_time = jiffies; } static void hostap_report_scan_complete(local_info_t *local) { union iwreq_data wrqu; /* Inform user space about new scan results (just empty event, * SIOCGIWSCAN can be used to fetch data */ wrqu.data.length = 0; wrqu.data.flags = 0; wireless_send_event(local->dev, SIOCGIWSCAN, &wrqu, NULL); /* Allow SIOCGIWSCAN handling to occur since we have received * scanning result */ local->scan_timestamp = 0; } /* Called only as a tasklet (software IRQ) */ static void prism2_info_scanresults(local_info_t *local, unsigned char *buf, int left) { u16 *pos; int new_count, i; unsigned long flags; struct hfa384x_scan_result *res; struct hfa384x_hostscan_result *results, *prev; if (left < 4) { printk(KERN_DEBUG "%s: invalid scanresult info frame " "length %d\n", local->dev->name, left); return; } pos = (u16 *) buf; pos++; pos++; left -= 4; new_count = left / sizeof(struct hfa384x_scan_result); results = kmalloc_array(new_count, sizeof(struct hfa384x_hostscan_result), GFP_ATOMIC); if (results == NULL) return; /* Convert to hostscan result format. */ res = (struct hfa384x_scan_result *) pos; for (i = 0; i < new_count; i++) { memcpy(&results[i], &res[i], sizeof(struct hfa384x_scan_result)); results[i].atim = 0; } spin_lock_irqsave(&local->lock, flags); local->last_scan_type = PRISM2_SCAN; prev = local->last_scan_results; local->last_scan_results = results; local->last_scan_results_count = new_count; spin_unlock_irqrestore(&local->lock, flags); kfree(prev); hostap_report_scan_complete(local); /* Perform rest of ScanResults handling later in scheduled task */ set_bit(PRISM2_INFO_PENDING_SCANRESULTS, &local->pending_info); schedule_work(&local->info_queue); } /* Called only as a tasklet (software IRQ) */ static void prism2_info_hostscanresults(local_info_t *local, unsigned char *buf, int left) { int i, result_size, copy_len, new_count; struct hfa384x_hostscan_result *results, *prev; unsigned long flags; __le16 *pos; u8 *ptr; wake_up_interruptible(&local->hostscan_wq); if (left < 4) { printk(KERN_DEBUG "%s: invalid hostscanresult info frame " "length %d\n", local->dev->name, left); return; } pos = (__le16 *) buf; copy_len = result_size = le16_to_cpu(*pos); if (result_size == 0) { printk(KERN_DEBUG "%s: invalid result_size (0) in " "hostscanresults\n", local->dev->name); return; } if (copy_len > sizeof(struct hfa384x_hostscan_result)) copy_len = sizeof(struct hfa384x_hostscan_result); pos++; pos++; left -= 4; ptr = (u8 *) pos; new_count = left / result_size; results = kcalloc(new_count, sizeof(struct hfa384x_hostscan_result), GFP_ATOMIC); if (results == NULL) return; for (i = 0; i < new_count; i++) { memcpy(&results[i], ptr, copy_len); ptr += result_size; left -= result_size; } if (left) { printk(KERN_DEBUG "%s: short HostScan result entry (%d/%d)\n", local->dev->name, left, result_size); } spin_lock_irqsave(&local->lock, flags); local->last_scan_type = PRISM2_HOSTSCAN; prev = local->last_scan_results; local->last_scan_results = results; local->last_scan_results_count = new_count; spin_unlock_irqrestore(&local->lock, flags); kfree(prev); hostap_report_scan_complete(local); } #endif /* PRISM2_NO_STATION_MODES */ /* Called only as a tasklet (software IRQ) */ void hostap_info_process(local_info_t *local, struct sk_buff *skb) { struct hfa384x_info_frame *info; unsigned char *buf; int left; #ifndef PRISM2_NO_DEBUG int i; #endif /* PRISM2_NO_DEBUG */ info = (struct hfa384x_info_frame *) skb->data; buf = skb->data + sizeof(*info); left = skb->len - sizeof(*info); switch (le16_to_cpu(info->type)) { case HFA384X_INFO_COMMTALLIES: prism2_info_commtallies(local, buf, left); break; #ifndef PRISM2_NO_STATION_MODES case HFA384X_INFO_LINKSTATUS: prism2_info_linkstatus(local, buf, left); break; case HFA384X_INFO_SCANRESULTS: prism2_info_scanresults(local, buf, left); break; case HFA384X_INFO_HOSTSCANRESULTS: prism2_info_hostscanresults(local, buf, left); break; #endif /* PRISM2_NO_STATION_MODES */ #ifndef PRISM2_NO_DEBUG default: PDEBUG(DEBUG_EXTRA, "%s: INFO - len=%d type=0x%04x\n", local->dev->name, le16_to_cpu(info->len), le16_to_cpu(info->type)); PDEBUG(DEBUG_EXTRA, "Unknown info frame:"); for (i = 0; i < (left < 100 ? left : 100); i++) PDEBUG2(DEBUG_EXTRA, " %02x", buf[i]); PDEBUG2(DEBUG_EXTRA, "\n"); break; #endif /* PRISM2_NO_DEBUG */ } } #ifndef PRISM2_NO_STATION_MODES static void handle_info_queue_linkstatus(local_info_t *local) { int val = local->prev_link_status; int connected; union iwreq_data wrqu; connected = val == HFA384X_LINKSTATUS_CONNECTED || val == HFA384X_LINKSTATUS_AP_CHANGE || val == HFA384X_LINKSTATUS_AP_IN_RANGE; if (local->func->get_rid(local->dev, HFA384X_RID_CURRENTBSSID, local->bssid, ETH_ALEN, 1) < 0) { printk(KERN_DEBUG "%s: could not read CURRENTBSSID after " "LinkStatus event\n", local->dev->name); } else { PDEBUG(DEBUG_EXTRA, "%s: LinkStatus: BSSID=%pM\n", local->dev->name, (unsigned char *) local->bssid); if (local->wds_type & HOSTAP_WDS_AP_CLIENT) hostap_add_sta(local->ap, local->bssid); } /* Get BSSID if we have a valid AP address */ if (connected) { netif_carrier_on(local->dev); netif_carrier_on(local->ddev); memcpy(wrqu.ap_addr.sa_data, local->bssid, ETH_ALEN); } else { netif_carrier_off(local->dev); netif_carrier_off(local->ddev); eth_zero_addr(wrqu.ap_addr.sa_data); } wrqu.ap_addr.sa_family = ARPHRD_ETHER; /* * Filter out sequential disconnect events in order not to cause a * flood of SIOCGIWAP events that have a race condition with EAPOL * frames and can confuse wpa_supplicant about the current association * status. */ if (connected || local->prev_linkstatus_connected) wireless_send_event(local->dev, SIOCGIWAP, &wrqu, NULL); local->prev_linkstatus_connected = connected; } static void handle_info_queue_scanresults(local_info_t *local) { if (local->host_roaming == 1 && local->iw_mode == IW_MODE_INFRA) prism2_host_roaming(local); if (local->host_roaming == 2 && local->iw_mode == IW_MODE_INFRA && !is_zero_ether_addr(local->preferred_ap)) { /* * Firmware seems to be getting into odd state in host_roaming * mode 2 when hostscan is used without join command, so try * to fix this by re-joining the current AP. This does not * actually trigger a new association if the current AP is * still in the scan results. */ prism2_host_roaming(local); } } /* Called only as scheduled task after receiving info frames (used to avoid * pending too much time in HW IRQ handler). */ static void handle_info_queue(struct work_struct *work) { local_info_t *local = container_of(work, local_info_t, info_queue); if (test_and_clear_bit(PRISM2_INFO_PENDING_LINKSTATUS, &local->pending_info)) handle_info_queue_linkstatus(local); if (test_and_clear_bit(PRISM2_INFO_PENDING_SCANRESULTS, &local->pending_info)) handle_info_queue_scanresults(local); } #endif /* PRISM2_NO_STATION_MODES */ void hostap_info_init(local_info_t *local) { skb_queue_head_init(&local->info_list); #ifndef PRISM2_NO_STATION_MODES INIT_WORK(&local->info_queue, handle_info_queue); #endif /* PRISM2_NO_STATION_MODES */ } EXPORT_SYMBOL(hostap_info_init); EXPORT_SYMBOL(hostap_info_process);
linux-master
drivers/net/wireless/intersil/hostap/hostap_info.c
// SPDX-License-Identifier: GPL-2.0 #include <linux/slab.h> #include <linux/export.h> #include <linux/etherdevice.h> #include "hostap_80211.h" #include "hostap_common.h" #include "hostap_wlan.h" #include "hostap.h" #include "hostap_ap.h" /* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */ /* Ethernet-II snap header (RFC1042 for most EtherTypes) */ static unsigned char rfc1042_header[] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 }; /* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */ static unsigned char bridge_tunnel_header[] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 }; /* No encapsulation header if EtherType < 0x600 (=length) */ void hostap_dump_tx_80211(const char *name, struct sk_buff *skb) { struct ieee80211_hdr *hdr; u16 fc; hdr = (struct ieee80211_hdr *) skb->data; printk(KERN_DEBUG "%s: TX len=%d jiffies=%ld\n", name, skb->len, jiffies); if (skb->len < 2) return; fc = le16_to_cpu(hdr->frame_control); printk(KERN_DEBUG " FC=0x%04x (type=%d:%d)%s%s", fc, (fc & IEEE80211_FCTL_FTYPE) >> 2, (fc & IEEE80211_FCTL_STYPE) >> 4, fc & IEEE80211_FCTL_TODS ? " [ToDS]" : "", fc & IEEE80211_FCTL_FROMDS ? " [FromDS]" : ""); if (skb->len < IEEE80211_DATA_HDR3_LEN) { printk("\n"); return; } printk(" dur=0x%04x seq=0x%04x\n", le16_to_cpu(hdr->duration_id), le16_to_cpu(hdr->seq_ctrl)); printk(KERN_DEBUG " A1=%pM", hdr->addr1); printk(" A2=%pM", hdr->addr2); printk(" A3=%pM", hdr->addr3); if (skb->len >= 30) printk(" A4=%pM", hdr->addr4); printk("\n"); } /* hard_start_xmit function for data interfaces (wlan#, wlan#wds#, wlan#sta) * Convert Ethernet header into a suitable IEEE 802.11 header depending on * device configuration. */ netdev_tx_t hostap_data_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct hostap_interface *iface; local_info_t *local; int need_headroom, need_tailroom = 0; struct ieee80211_hdr hdr; u16 fc, ethertype = 0; enum { WDS_NO = 0, WDS_OWN_FRAME, WDS_COMPLIANT_FRAME } use_wds = WDS_NO; u8 *encaps_data; int hdr_len, encaps_len, skip_header_bytes; int to_assoc_ap = 0; struct hostap_skb_tx_data *meta; iface = netdev_priv(dev); local = iface->local; if (skb->len < ETH_HLEN) { printk(KERN_DEBUG "%s: hostap_data_start_xmit: short skb " "(len=%d)\n", dev->name, skb->len); kfree_skb(skb); return NETDEV_TX_OK; } if (local->ddev != dev) { use_wds = (local->iw_mode == IW_MODE_MASTER && !(local->wds_type & HOSTAP_WDS_STANDARD_FRAME)) ? WDS_OWN_FRAME : WDS_COMPLIANT_FRAME; if (dev == local->stadev) { to_assoc_ap = 1; use_wds = WDS_NO; } else if (dev == local->apdev) { printk(KERN_DEBUG "%s: prism2_tx: trying to use " "AP device with Ethernet net dev\n", dev->name); kfree_skb(skb); return NETDEV_TX_OK; } } else { if (local->iw_mode == IW_MODE_REPEAT) { printk(KERN_DEBUG "%s: prism2_tx: trying to use " "non-WDS link in Repeater mode\n", dev->name); kfree_skb(skb); return NETDEV_TX_OK; } else if (local->iw_mode == IW_MODE_INFRA && (local->wds_type & HOSTAP_WDS_AP_CLIENT) && !ether_addr_equal(skb->data + ETH_ALEN, dev->dev_addr)) { /* AP client mode: send frames with foreign src addr * using 4-addr WDS frames */ use_wds = WDS_COMPLIANT_FRAME; } } /* Incoming skb->data: dst_addr[6], src_addr[6], proto[2], payload * ==> * Prism2 TX frame with 802.11 header: * txdesc (address order depending on used mode; includes dst_addr and * src_addr), possible encapsulation (RFC1042/Bridge-Tunnel; * proto[2], payload {, possible addr4[6]} */ ethertype = (skb->data[12] << 8) | skb->data[13]; memset(&hdr, 0, sizeof(hdr)); /* Length of data after IEEE 802.11 header */ encaps_data = NULL; encaps_len = 0; skip_header_bytes = ETH_HLEN; if (ethertype == ETH_P_AARP || ethertype == ETH_P_IPX) { encaps_data = bridge_tunnel_header; encaps_len = sizeof(bridge_tunnel_header); skip_header_bytes -= 2; } else if (ethertype >= 0x600) { encaps_data = rfc1042_header; encaps_len = sizeof(rfc1042_header); skip_header_bytes -= 2; } fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA; hdr_len = IEEE80211_DATA_HDR3_LEN; if (use_wds != WDS_NO) { /* Note! Prism2 station firmware has problems with sending real * 802.11 frames with four addresses; until these problems can * be fixed or worked around, 4-addr frames needed for WDS are * using incompatible format: FromDS flag is not set and the * fourth address is added after the frame payload; it is * assumed, that the receiving station knows how to handle this * frame format */ if (use_wds == WDS_COMPLIANT_FRAME) { fc |= IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS; /* From&To DS: Addr1 = RA, Addr2 = TA, Addr3 = DA, * Addr4 = SA */ skb_copy_from_linear_data_offset(skb, ETH_ALEN, &hdr.addr4, ETH_ALEN); hdr_len += ETH_ALEN; } else { /* bogus 4-addr format to workaround Prism2 station * f/w bug */ fc |= IEEE80211_FCTL_TODS; /* From DS: Addr1 = DA (used as RA), * Addr2 = BSSID (used as TA), Addr3 = SA (used as DA), */ /* SA from skb->data + ETH_ALEN will be added after * frame payload; use hdr.addr4 as a temporary buffer */ skb_copy_from_linear_data_offset(skb, ETH_ALEN, &hdr.addr4, ETH_ALEN); need_tailroom += ETH_ALEN; } /* send broadcast and multicast frames to broadcast RA, if * configured; otherwise, use unicast RA of the WDS link */ if ((local->wds_type & HOSTAP_WDS_BROADCAST_RA) && is_multicast_ether_addr(skb->data)) eth_broadcast_addr(hdr.addr1); else if (iface->type == HOSTAP_INTERFACE_WDS) memcpy(&hdr.addr1, iface->u.wds.remote_addr, ETH_ALEN); else memcpy(&hdr.addr1, local->bssid, ETH_ALEN); memcpy(&hdr.addr2, dev->dev_addr, ETH_ALEN); skb_copy_from_linear_data(skb, &hdr.addr3, ETH_ALEN); } else if (local->iw_mode == IW_MODE_MASTER && !to_assoc_ap) { fc |= IEEE80211_FCTL_FROMDS; /* From DS: Addr1 = DA, Addr2 = BSSID, Addr3 = SA */ skb_copy_from_linear_data(skb, &hdr.addr1, ETH_ALEN); memcpy(&hdr.addr2, dev->dev_addr, ETH_ALEN); skb_copy_from_linear_data_offset(skb, ETH_ALEN, &hdr.addr3, ETH_ALEN); } else if (local->iw_mode == IW_MODE_INFRA || to_assoc_ap) { fc |= IEEE80211_FCTL_TODS; /* To DS: Addr1 = BSSID, Addr2 = SA, Addr3 = DA */ memcpy(&hdr.addr1, to_assoc_ap ? local->assoc_ap_addr : local->bssid, ETH_ALEN); skb_copy_from_linear_data_offset(skb, ETH_ALEN, &hdr.addr2, ETH_ALEN); skb_copy_from_linear_data(skb, &hdr.addr3, ETH_ALEN); } else if (local->iw_mode == IW_MODE_ADHOC) { /* not From/To DS: Addr1 = DA, Addr2 = SA, Addr3 = BSSID */ skb_copy_from_linear_data(skb, &hdr.addr1, ETH_ALEN); skb_copy_from_linear_data_offset(skb, ETH_ALEN, &hdr.addr2, ETH_ALEN); memcpy(&hdr.addr3, local->bssid, ETH_ALEN); } hdr.frame_control = cpu_to_le16(fc); skb_pull(skb, skip_header_bytes); need_headroom = local->func->need_tx_headroom + hdr_len + encaps_len; if (skb_tailroom(skb) < need_tailroom) { skb = skb_unshare(skb, GFP_ATOMIC); if (skb == NULL) { iface->stats.tx_dropped++; return NETDEV_TX_OK; } if (pskb_expand_head(skb, need_headroom, need_tailroom, GFP_ATOMIC)) { kfree_skb(skb); iface->stats.tx_dropped++; return NETDEV_TX_OK; } } else if (skb_headroom(skb) < need_headroom) { struct sk_buff *tmp = skb; skb = skb_realloc_headroom(skb, need_headroom); kfree_skb(tmp); if (skb == NULL) { iface->stats.tx_dropped++; return NETDEV_TX_OK; } } else { skb = skb_unshare(skb, GFP_ATOMIC); if (skb == NULL) { iface->stats.tx_dropped++; return NETDEV_TX_OK; } } if (encaps_data) memcpy(skb_push(skb, encaps_len), encaps_data, encaps_len); memcpy(skb_push(skb, hdr_len), &hdr, hdr_len); if (use_wds == WDS_OWN_FRAME) { skb_put_data(skb, &hdr.addr4, ETH_ALEN); } iface->stats.tx_packets++; iface->stats.tx_bytes += skb->len; skb_reset_mac_header(skb); meta = (struct hostap_skb_tx_data *) skb->cb; memset(meta, 0, sizeof(*meta)); meta->magic = HOSTAP_SKB_TX_DATA_MAGIC; if (use_wds) meta->flags |= HOSTAP_TX_FLAGS_WDS; meta->ethertype = ethertype; meta->iface = iface; /* Send IEEE 802.11 encapsulated frame using the master radio device */ skb->dev = local->dev; dev_queue_xmit(skb); return NETDEV_TX_OK; } /* hard_start_xmit function for hostapd wlan#ap interfaces */ netdev_tx_t hostap_mgmt_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct hostap_interface *iface; local_info_t *local; struct hostap_skb_tx_data *meta; struct ieee80211_hdr *hdr; u16 fc; iface = netdev_priv(dev); local = iface->local; if (skb->len < 10) { printk(KERN_DEBUG "%s: hostap_mgmt_start_xmit: short skb " "(len=%d)\n", dev->name, skb->len); kfree_skb(skb); return NETDEV_TX_OK; } iface->stats.tx_packets++; iface->stats.tx_bytes += skb->len; meta = (struct hostap_skb_tx_data *) skb->cb; memset(meta, 0, sizeof(*meta)); meta->magic = HOSTAP_SKB_TX_DATA_MAGIC; meta->iface = iface; if (skb->len >= IEEE80211_DATA_HDR3_LEN + sizeof(rfc1042_header) + 2) { hdr = (struct ieee80211_hdr *) skb->data; fc = le16_to_cpu(hdr->frame_control); if (ieee80211_is_data(hdr->frame_control) && (fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_DATA) { u8 *pos = &skb->data[IEEE80211_DATA_HDR3_LEN + sizeof(rfc1042_header)]; meta->ethertype = (pos[0] << 8) | pos[1]; } } /* Send IEEE 802.11 encapsulated frame using the master radio device */ skb->dev = local->dev; dev_queue_xmit(skb); return NETDEV_TX_OK; } /* Called only from software IRQ */ static struct sk_buff * hostap_tx_encrypt(struct sk_buff *skb, struct lib80211_crypt_data *crypt) { struct hostap_interface *iface; local_info_t *local; struct ieee80211_hdr *hdr; int prefix_len, postfix_len, hdr_len, res; iface = netdev_priv(skb->dev); local = iface->local; if (skb->len < IEEE80211_DATA_HDR3_LEN) { kfree_skb(skb); return NULL; } if (local->tkip_countermeasures && strcmp(crypt->ops->name, "TKIP") == 0) { hdr = (struct ieee80211_hdr *) skb->data; if (net_ratelimit()) { printk(KERN_DEBUG "%s: TKIP countermeasures: dropped " "TX packet to %pM\n", local->dev->name, hdr->addr1); } kfree_skb(skb); return NULL; } skb = skb_unshare(skb, GFP_ATOMIC); if (skb == NULL) return NULL; prefix_len = crypt->ops->extra_mpdu_prefix_len + crypt->ops->extra_msdu_prefix_len; postfix_len = crypt->ops->extra_mpdu_postfix_len + crypt->ops->extra_msdu_postfix_len; if ((skb_headroom(skb) < prefix_len || skb_tailroom(skb) < postfix_len) && pskb_expand_head(skb, prefix_len, postfix_len, GFP_ATOMIC)) { kfree_skb(skb); return NULL; } hdr = (struct ieee80211_hdr *) skb->data; hdr_len = hostap_80211_get_hdrlen(hdr->frame_control); /* Host-based IEEE 802.11 fragmentation for TX is not yet supported, so * call both MSDU and MPDU encryption functions from here. */ atomic_inc(&crypt->refcnt); res = 0; if (crypt->ops->encrypt_msdu) res = crypt->ops->encrypt_msdu(skb, hdr_len, crypt->priv); if (res == 0 && crypt->ops->encrypt_mpdu) res = crypt->ops->encrypt_mpdu(skb, hdr_len, crypt->priv); atomic_dec(&crypt->refcnt); if (res < 0) { kfree_skb(skb); return NULL; } return skb; } /* hard_start_xmit function for master radio interface wifi#. * AP processing (TX rate control, power save buffering, etc.). * Use hardware TX function to send the frame. */ netdev_tx_t hostap_master_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct hostap_interface *iface; local_info_t *local; netdev_tx_t ret = NETDEV_TX_BUSY; u16 fc; struct hostap_tx_data tx; ap_tx_ret tx_ret; struct hostap_skb_tx_data *meta; int no_encrypt = 0; struct ieee80211_hdr *hdr; iface = netdev_priv(dev); local = iface->local; tx.skb = skb; tx.sta_ptr = NULL; meta = (struct hostap_skb_tx_data *) skb->cb; if (meta->magic != HOSTAP_SKB_TX_DATA_MAGIC) { printk(KERN_DEBUG "%s: invalid skb->cb magic (0x%08x, " "expected 0x%08x)\n", dev->name, meta->magic, HOSTAP_SKB_TX_DATA_MAGIC); ret = NETDEV_TX_OK; iface->stats.tx_dropped++; goto fail; } if (local->host_encrypt) { /* Set crypt to default algorithm and key; will be replaced in * AP code if STA has own alg/key */ tx.crypt = local->crypt_info.crypt[local->crypt_info.tx_keyidx]; tx.host_encrypt = 1; } else { tx.crypt = NULL; tx.host_encrypt = 0; } if (skb->len < 24) { printk(KERN_DEBUG "%s: hostap_master_start_xmit: short skb " "(len=%d)\n", dev->name, skb->len); ret = NETDEV_TX_OK; iface->stats.tx_dropped++; goto fail; } /* FIX (?): * Wi-Fi 802.11b test plan suggests that AP should ignore power save * bit in authentication and (re)association frames and assume tha * STA remains awake for the response. */ tx_ret = hostap_handle_sta_tx(local, &tx); skb = tx.skb; meta = (struct hostap_skb_tx_data *) skb->cb; hdr = (struct ieee80211_hdr *) skb->data; fc = le16_to_cpu(hdr->frame_control); switch (tx_ret) { case AP_TX_CONTINUE: break; case AP_TX_CONTINUE_NOT_AUTHORIZED: if (local->ieee_802_1x && ieee80211_is_data(hdr->frame_control) && meta->ethertype != ETH_P_PAE && !(meta->flags & HOSTAP_TX_FLAGS_WDS)) { printk(KERN_DEBUG "%s: dropped frame to unauthorized " "port (IEEE 802.1X): ethertype=0x%04x\n", dev->name, meta->ethertype); hostap_dump_tx_80211(dev->name, skb); ret = NETDEV_TX_OK; /* drop packet */ iface->stats.tx_dropped++; goto fail; } break; case AP_TX_DROP: ret = NETDEV_TX_OK; /* drop packet */ iface->stats.tx_dropped++; goto fail; case AP_TX_RETRY: goto fail; case AP_TX_BUFFERED: /* do not free skb here, it will be freed when the * buffered frame is sent/timed out */ ret = NETDEV_TX_OK; goto tx_exit; } /* Request TX callback if protocol version is 2 in 802.11 header; * this version 2 is a special case used between hostapd and kernel * driver */ if (((fc & IEEE80211_FCTL_VERS) == BIT(1)) && local->ap && local->ap->tx_callback_idx && meta->tx_cb_idx == 0) { meta->tx_cb_idx = local->ap->tx_callback_idx; /* remove special version from the frame header */ fc &= ~IEEE80211_FCTL_VERS; hdr->frame_control = cpu_to_le16(fc); } if (!ieee80211_is_data(hdr->frame_control)) { no_encrypt = 1; tx.crypt = NULL; } if (local->ieee_802_1x && meta->ethertype == ETH_P_PAE && tx.crypt && !(fc & IEEE80211_FCTL_PROTECTED)) { no_encrypt = 1; PDEBUG(DEBUG_EXTRA2, "%s: TX: IEEE 802.1X - passing " "unencrypted EAPOL frame\n", dev->name); tx.crypt = NULL; /* no encryption for IEEE 802.1X frames */ } if (tx.crypt && (!tx.crypt->ops || !tx.crypt->ops->encrypt_mpdu)) tx.crypt = NULL; else if ((tx.crypt || local->crypt_info.crypt[local->crypt_info.tx_keyidx]) && !no_encrypt) { /* Add ISWEP flag both for firmware and host based encryption */ fc |= IEEE80211_FCTL_PROTECTED; hdr->frame_control = cpu_to_le16(fc); } else if (local->drop_unencrypted && ieee80211_is_data(hdr->frame_control) && meta->ethertype != ETH_P_PAE) { if (net_ratelimit()) { printk(KERN_DEBUG "%s: dropped unencrypted TX data " "frame (drop_unencrypted=1)\n", dev->name); } iface->stats.tx_dropped++; ret = NETDEV_TX_OK; goto fail; } if (tx.crypt) { skb = hostap_tx_encrypt(skb, tx.crypt); if (skb == NULL) { printk(KERN_DEBUG "%s: TX - encryption failed\n", dev->name); ret = NETDEV_TX_OK; goto fail; } meta = (struct hostap_skb_tx_data *) skb->cb; if (meta->magic != HOSTAP_SKB_TX_DATA_MAGIC) { printk(KERN_DEBUG "%s: invalid skb->cb magic (0x%08x, " "expected 0x%08x) after hostap_tx_encrypt\n", dev->name, meta->magic, HOSTAP_SKB_TX_DATA_MAGIC); ret = NETDEV_TX_OK; iface->stats.tx_dropped++; goto fail; } } if (local->func->tx == NULL || local->func->tx(skb, dev)) { ret = NETDEV_TX_OK; iface->stats.tx_dropped++; } else { ret = NETDEV_TX_OK; iface->stats.tx_packets++; iface->stats.tx_bytes += skb->len; } fail: if (ret == NETDEV_TX_OK && skb) dev_kfree_skb(skb); tx_exit: if (tx.sta_ptr) hostap_handle_sta_release(tx.sta_ptr); return ret; } EXPORT_SYMBOL(hostap_master_start_xmit);
linux-master
drivers/net/wireless/intersil/hostap/hostap_80211_tx.c
// SPDX-License-Identifier: GPL-2.0 static int prism2_enable_aux_port(struct net_device *dev, int enable) { u16 val, reg; int i, tries; unsigned long flags; struct hostap_interface *iface; local_info_t *local; iface = netdev_priv(dev); local = iface->local; if (local->no_pri) { if (enable) { PDEBUG(DEBUG_EXTRA2, "%s: no PRI f/w - assuming Aux " "port is already enabled\n", dev->name); } return 0; } spin_lock_irqsave(&local->cmdlock, flags); /* wait until busy bit is clear */ tries = HFA384X_CMD_BUSY_TIMEOUT; while (HFA384X_INW(HFA384X_CMD_OFF) & HFA384X_CMD_BUSY && tries > 0) { tries--; udelay(1); } if (tries == 0) { reg = HFA384X_INW(HFA384X_CMD_OFF); spin_unlock_irqrestore(&local->cmdlock, flags); printk("%s: prism2_enable_aux_port - timeout - reg=0x%04x\n", dev->name, reg); return -ETIMEDOUT; } val = HFA384X_INW(HFA384X_CONTROL_OFF); if (enable) { HFA384X_OUTW(HFA384X_AUX_MAGIC0, HFA384X_PARAM0_OFF); HFA384X_OUTW(HFA384X_AUX_MAGIC1, HFA384X_PARAM1_OFF); HFA384X_OUTW(HFA384X_AUX_MAGIC2, HFA384X_PARAM2_OFF); if ((val & HFA384X_AUX_PORT_MASK) != HFA384X_AUX_PORT_DISABLED) printk("prism2_enable_aux_port: was not disabled!?\n"); val &= ~HFA384X_AUX_PORT_MASK; val |= HFA384X_AUX_PORT_ENABLE; } else { HFA384X_OUTW(0, HFA384X_PARAM0_OFF); HFA384X_OUTW(0, HFA384X_PARAM1_OFF); HFA384X_OUTW(0, HFA384X_PARAM2_OFF); if ((val & HFA384X_AUX_PORT_MASK) != HFA384X_AUX_PORT_ENABLED) printk("prism2_enable_aux_port: was not enabled!?\n"); val &= ~HFA384X_AUX_PORT_MASK; val |= HFA384X_AUX_PORT_DISABLE; } HFA384X_OUTW(val, HFA384X_CONTROL_OFF); udelay(5); i = 10000; while (i > 0) { val = HFA384X_INW(HFA384X_CONTROL_OFF); val &= HFA384X_AUX_PORT_MASK; if ((enable && val == HFA384X_AUX_PORT_ENABLED) || (!enable && val == HFA384X_AUX_PORT_DISABLED)) break; udelay(10); i--; } spin_unlock_irqrestore(&local->cmdlock, flags); if (i == 0) { printk("prism2_enable_aux_port(%d) timed out\n", enable); return -ETIMEDOUT; } return 0; } static int hfa384x_from_aux(struct net_device *dev, unsigned int addr, int len, void *buf) { u16 page, offset; if (addr & 1 || len & 1) return -1; page = addr >> 7; offset = addr & 0x7f; HFA384X_OUTW(page, HFA384X_AUXPAGE_OFF); HFA384X_OUTW(offset, HFA384X_AUXOFFSET_OFF); udelay(5); #ifdef PRISM2_PCI { __le16 *pos = (__le16 *) buf; while (len > 0) { *pos++ = HFA384X_INW_DATA(HFA384X_AUXDATA_OFF); len -= 2; } } #else /* PRISM2_PCI */ HFA384X_INSW(HFA384X_AUXDATA_OFF, buf, len / 2); #endif /* PRISM2_PCI */ return 0; } static int hfa384x_to_aux(struct net_device *dev, unsigned int addr, int len, void *buf) { u16 page, offset; if (addr & 1 || len & 1) return -1; page = addr >> 7; offset = addr & 0x7f; HFA384X_OUTW(page, HFA384X_AUXPAGE_OFF); HFA384X_OUTW(offset, HFA384X_AUXOFFSET_OFF); udelay(5); #ifdef PRISM2_PCI { __le16 *pos = (__le16 *) buf; while (len > 0) { HFA384X_OUTW_DATA(*pos++, HFA384X_AUXDATA_OFF); len -= 2; } } #else /* PRISM2_PCI */ HFA384X_OUTSW(HFA384X_AUXDATA_OFF, buf, len / 2); #endif /* PRISM2_PCI */ return 0; } static int prism2_pda_ok(u8 *buf) { __le16 *pda = (__le16 *) buf; int pos; u16 len, pdr; if (buf[0] == 0xff && buf[1] == 0x00 && buf[2] == 0xff && buf[3] == 0x00) return 0; pos = 0; while (pos + 1 < PRISM2_PDA_SIZE / 2) { len = le16_to_cpu(pda[pos]); pdr = le16_to_cpu(pda[pos + 1]); if (len == 0 || pos + len > PRISM2_PDA_SIZE / 2) return 0; if (pdr == 0x0000 && len == 2) { /* PDA end found */ return 1; } pos += len + 1; } return 0; } #define prism2_download_aux_dump_npages 65536 struct prism2_download_aux_dump { local_info_t *local; u16 page[0x80]; }; static int prism2_download_aux_dump_proc_show(struct seq_file *m, void *v) { struct prism2_download_aux_dump *ctx = m->private; hfa384x_from_aux(ctx->local->dev, (unsigned long)v - 1, 0x80, ctx->page); seq_write(m, ctx->page, 0x80); return 0; } static void *prism2_download_aux_dump_proc_start(struct seq_file *m, loff_t *_pos) { struct prism2_download_aux_dump *ctx = m->private; prism2_enable_aux_port(ctx->local->dev, 1); if (*_pos >= prism2_download_aux_dump_npages) return NULL; return (void *)((unsigned long)*_pos + 1); } static void *prism2_download_aux_dump_proc_next(struct seq_file *m, void *v, loff_t *_pos) { ++*_pos; if (*_pos >= prism2_download_aux_dump_npages) return NULL; return (void *)((unsigned long)*_pos + 1); } static void prism2_download_aux_dump_proc_stop(struct seq_file *m, void *v) { struct prism2_download_aux_dump *ctx = m->private; prism2_enable_aux_port(ctx->local->dev, 0); } static const struct seq_operations prism2_download_aux_dump_proc_seqops = { .start = prism2_download_aux_dump_proc_start, .next = prism2_download_aux_dump_proc_next, .stop = prism2_download_aux_dump_proc_stop, .show = prism2_download_aux_dump_proc_show, }; static int prism2_download_aux_dump_proc_open(struct inode *inode, struct file *file) { int ret = seq_open_private(file, &prism2_download_aux_dump_proc_seqops, sizeof(struct prism2_download_aux_dump)); if (ret == 0) { struct seq_file *m = file->private_data; m->private = pde_data(inode); } return ret; } static const struct proc_ops prism2_download_aux_dump_proc_ops = { .proc_open = prism2_download_aux_dump_proc_open, .proc_read = seq_read, .proc_lseek = seq_lseek, .proc_release = seq_release_private, }; static u8 * prism2_read_pda(struct net_device *dev) { u8 *buf; int res, i, found = 0; #define NUM_PDA_ADDRS 4 unsigned int pda_addr[NUM_PDA_ADDRS] = { 0x7f0000 /* others than HFA3841 */, 0x3f0000 /* HFA3841 */, 0x390000 /* apparently used in older cards */, 0x7f0002 /* Intel PRO/Wireless 2011B (PCI) */, }; buf = kmalloc(PRISM2_PDA_SIZE, GFP_KERNEL); if (buf == NULL) return NULL; /* Note: wlan card should be in initial state (just after init cmd) * and no other operations should be performed concurrently. */ prism2_enable_aux_port(dev, 1); for (i = 0; i < NUM_PDA_ADDRS; i++) { PDEBUG(DEBUG_EXTRA2, "%s: trying to read PDA from 0x%08x", dev->name, pda_addr[i]); res = hfa384x_from_aux(dev, pda_addr[i], PRISM2_PDA_SIZE, buf); if (res) continue; if (res == 0 && prism2_pda_ok(buf)) { PDEBUG2(DEBUG_EXTRA2, ": OK\n"); found = 1; break; } else { PDEBUG2(DEBUG_EXTRA2, ": failed\n"); } } prism2_enable_aux_port(dev, 0); if (!found) { printk(KERN_DEBUG "%s: valid PDA not found\n", dev->name); kfree(buf); buf = NULL; } return buf; } static int prism2_download_volatile(local_info_t *local, struct prism2_download_data *param) { struct net_device *dev = local->dev; int ret = 0, i; u16 param0, param1; if (local->hw_downloading) { printk(KERN_WARNING "%s: Already downloading - aborting new " "request\n", dev->name); return -1; } local->hw_downloading = 1; if (local->pri_only) { hfa384x_disable_interrupts(dev); } else { prism2_hw_shutdown(dev, 0); if (prism2_hw_init(dev, 0)) { printk(KERN_WARNING "%s: Could not initialize card for" " download\n", dev->name); ret = -1; goto out; } } if (prism2_enable_aux_port(dev, 1)) { printk(KERN_WARNING "%s: Could not enable AUX port\n", dev->name); ret = -1; goto out; } param0 = param->start_addr & 0xffff; param1 = param->start_addr >> 16; HFA384X_OUTW(0, HFA384X_PARAM2_OFF); HFA384X_OUTW(param1, HFA384X_PARAM1_OFF); if (hfa384x_cmd_wait(dev, HFA384X_CMDCODE_DOWNLOAD | (HFA384X_PROGMODE_ENABLE_VOLATILE << 8), param0)) { printk(KERN_WARNING "%s: Download command execution failed\n", dev->name); ret = -1; goto out; } for (i = 0; i < param->num_areas; i++) { PDEBUG(DEBUG_EXTRA2, "%s: Writing %d bytes at 0x%08x\n", dev->name, param->data[i].len, param->data[i].addr); if (hfa384x_to_aux(dev, param->data[i].addr, param->data[i].len, param->data[i].data)) { printk(KERN_WARNING "%s: RAM download at 0x%08x " "(len=%d) failed\n", dev->name, param->data[i].addr, param->data[i].len); ret = -1; goto out; } } HFA384X_OUTW(param1, HFA384X_PARAM1_OFF); HFA384X_OUTW(0, HFA384X_PARAM2_OFF); if (hfa384x_cmd_no_wait(dev, HFA384X_CMDCODE_DOWNLOAD | (HFA384X_PROGMODE_DISABLE << 8), param0)) { printk(KERN_WARNING "%s: Download command execution failed\n", dev->name); ret = -1; goto out; } /* ProgMode disable causes the hardware to restart itself from the * given starting address. Give hw some time and ACK command just in * case restart did not happen. */ mdelay(5); HFA384X_OUTW(HFA384X_EV_CMD, HFA384X_EVACK_OFF); if (prism2_enable_aux_port(dev, 0)) { printk(KERN_DEBUG "%s: Disabling AUX port failed\n", dev->name); /* continue anyway.. restart should have taken care of this */ } mdelay(5); local->hw_downloading = 0; if (prism2_hw_config(dev, 2)) { printk(KERN_WARNING "%s: Card configuration after RAM " "download failed\n", dev->name); ret = -1; goto out; } out: local->hw_downloading = 0; return ret; } static int prism2_enable_genesis(local_info_t *local, int hcr) { struct net_device *dev = local->dev; u8 initseq[4] = { 0x00, 0xe1, 0xa1, 0xff }; u8 readbuf[4]; printk(KERN_DEBUG "%s: test Genesis mode with HCR 0x%02x\n", dev->name, hcr); local->func->cor_sreset(local); hfa384x_to_aux(dev, 0x7e0038, sizeof(initseq), initseq); local->func->genesis_reset(local, hcr); /* Readback test */ hfa384x_from_aux(dev, 0x7e0038, sizeof(readbuf), readbuf); hfa384x_to_aux(dev, 0x7e0038, sizeof(initseq), initseq); hfa384x_from_aux(dev, 0x7e0038, sizeof(readbuf), readbuf); if (memcmp(initseq, readbuf, sizeof(initseq)) == 0) { printk(KERN_DEBUG "Readback test succeeded, HCR 0x%02x\n", hcr); return 0; } else { printk(KERN_DEBUG "Readback test failed, HCR 0x%02x write %4ph read %4ph\n", hcr, initseq, readbuf); return 1; } } static int prism2_get_ram_size(local_info_t *local) { int ret; /* Try to enable genesis mode; 0x1F for x8 SRAM or 0x0F for x16 SRAM */ if (prism2_enable_genesis(local, 0x1f) == 0) ret = 8; else if (prism2_enable_genesis(local, 0x0f) == 0) ret = 16; else ret = -1; /* Disable genesis mode */ local->func->genesis_reset(local, ret == 16 ? 0x07 : 0x17); return ret; } static int prism2_download_genesis(local_info_t *local, struct prism2_download_data *param) { struct net_device *dev = local->dev; int ram16 = 0, i; int ret = 0; if (local->hw_downloading) { printk(KERN_WARNING "%s: Already downloading - aborting new " "request\n", dev->name); return -EBUSY; } if (!local->func->genesis_reset || !local->func->cor_sreset) { printk(KERN_INFO "%s: Genesis mode downloading not supported " "with this hwmodel\n", dev->name); return -EOPNOTSUPP; } local->hw_downloading = 1; if (prism2_enable_aux_port(dev, 1)) { printk(KERN_DEBUG "%s: failed to enable AUX port\n", dev->name); ret = -EIO; goto out; } if (local->sram_type == -1) { /* 0x1F for x8 SRAM or 0x0F for x16 SRAM */ if (prism2_enable_genesis(local, 0x1f) == 0) { ram16 = 0; PDEBUG(DEBUG_EXTRA2, "%s: Genesis mode OK using x8 " "SRAM\n", dev->name); } else if (prism2_enable_genesis(local, 0x0f) == 0) { ram16 = 1; PDEBUG(DEBUG_EXTRA2, "%s: Genesis mode OK using x16 " "SRAM\n", dev->name); } else { printk(KERN_DEBUG "%s: Could not initiate genesis " "mode\n", dev->name); ret = -EIO; goto out; } } else { if (prism2_enable_genesis(local, local->sram_type == 8 ? 0x1f : 0x0f)) { printk(KERN_DEBUG "%s: Failed to set Genesis " "mode (sram_type=%d)\n", dev->name, local->sram_type); ret = -EIO; goto out; } ram16 = local->sram_type != 8; } for (i = 0; i < param->num_areas; i++) { PDEBUG(DEBUG_EXTRA2, "%s: Writing %d bytes at 0x%08x\n", dev->name, param->data[i].len, param->data[i].addr); if (hfa384x_to_aux(dev, param->data[i].addr, param->data[i].len, param->data[i].data)) { printk(KERN_WARNING "%s: RAM download at 0x%08x " "(len=%d) failed\n", dev->name, param->data[i].addr, param->data[i].len); ret = -EIO; goto out; } } PDEBUG(DEBUG_EXTRA2, "Disable genesis mode\n"); local->func->genesis_reset(local, ram16 ? 0x07 : 0x17); if (prism2_enable_aux_port(dev, 0)) { printk(KERN_DEBUG "%s: Failed to disable AUX port\n", dev->name); } mdelay(5); local->hw_downloading = 0; PDEBUG(DEBUG_EXTRA2, "Trying to initialize card\n"); /* * Make sure the INIT command does not generate a command completion * event by disabling interrupts. */ hfa384x_disable_interrupts(dev); if (prism2_hw_init(dev, 1)) { printk(KERN_DEBUG "%s: Initialization after genesis mode " "download failed\n", dev->name); ret = -EIO; goto out; } PDEBUG(DEBUG_EXTRA2, "Card initialized - running PRI only\n"); if (prism2_hw_init2(dev, 1)) { printk(KERN_DEBUG "%s: Initialization(2) after genesis mode " "download failed\n", dev->name); ret = -EIO; goto out; } out: local->hw_downloading = 0; return ret; } #ifdef PRISM2_NON_VOLATILE_DOWNLOAD /* Note! Non-volatile downloading functionality has not yet been tested * thoroughly and it may corrupt flash image and effectively kill the card that * is being updated. You have been warned. */ static inline int prism2_download_block(struct net_device *dev, u32 addr, u8 *data, u32 bufaddr, int rest_len) { u16 param0, param1; int block_len; block_len = rest_len < 4096 ? rest_len : 4096; param0 = addr & 0xffff; param1 = addr >> 16; HFA384X_OUTW(block_len, HFA384X_PARAM2_OFF); HFA384X_OUTW(param1, HFA384X_PARAM1_OFF); if (hfa384x_cmd_wait(dev, HFA384X_CMDCODE_DOWNLOAD | (HFA384X_PROGMODE_ENABLE_NON_VOLATILE << 8), param0)) { printk(KERN_WARNING "%s: Flash download command execution " "failed\n", dev->name); return -1; } if (hfa384x_to_aux(dev, bufaddr, block_len, data)) { printk(KERN_WARNING "%s: flash download at 0x%08x " "(len=%d) failed\n", dev->name, addr, block_len); return -1; } HFA384X_OUTW(0, HFA384X_PARAM2_OFF); HFA384X_OUTW(0, HFA384X_PARAM1_OFF); if (hfa384x_cmd_wait(dev, HFA384X_CMDCODE_DOWNLOAD | (HFA384X_PROGMODE_PROGRAM_NON_VOLATILE << 8), 0)) { printk(KERN_WARNING "%s: Flash write command execution " "failed\n", dev->name); return -1; } return block_len; } static int prism2_download_nonvolatile(local_info_t *local, struct prism2_download_data *dl) { struct net_device *dev = local->dev; int ret = 0, i; struct { __le16 page; __le16 offset; __le16 len; } dlbuffer; u32 bufaddr; if (local->hw_downloading) { printk(KERN_WARNING "%s: Already downloading - aborting new " "request\n", dev->name); return -1; } ret = local->func->get_rid(dev, HFA384X_RID_DOWNLOADBUFFER, &dlbuffer, 6, 0); if (ret < 0) { printk(KERN_WARNING "%s: Could not read download buffer " "parameters\n", dev->name); goto out; } printk(KERN_DEBUG "Download buffer: %d bytes at 0x%04x:0x%04x\n", le16_to_cpu(dlbuffer.len), le16_to_cpu(dlbuffer.page), le16_to_cpu(dlbuffer.offset)); bufaddr = (le16_to_cpu(dlbuffer.page) << 7) + le16_to_cpu(dlbuffer.offset); local->hw_downloading = 1; if (!local->pri_only) { prism2_hw_shutdown(dev, 0); if (prism2_hw_init(dev, 0)) { printk(KERN_WARNING "%s: Could not initialize card for" " download\n", dev->name); ret = -1; goto out; } } hfa384x_disable_interrupts(dev); if (prism2_enable_aux_port(dev, 1)) { printk(KERN_WARNING "%s: Could not enable AUX port\n", dev->name); ret = -1; goto out; } printk(KERN_DEBUG "%s: starting flash download\n", dev->name); for (i = 0; i < dl->num_areas; i++) { int rest_len = dl->data[i].len; int data_off = 0; while (rest_len > 0) { int block_len; block_len = prism2_download_block( dev, dl->data[i].addr + data_off, dl->data[i].data + data_off, bufaddr, rest_len); if (block_len < 0) { ret = -1; goto out; } rest_len -= block_len; data_off += block_len; } } HFA384X_OUTW(0, HFA384X_PARAM1_OFF); HFA384X_OUTW(0, HFA384X_PARAM2_OFF); if (hfa384x_cmd_wait(dev, HFA384X_CMDCODE_DOWNLOAD | (HFA384X_PROGMODE_DISABLE << 8), 0)) { printk(KERN_WARNING "%s: Download command execution failed\n", dev->name); ret = -1; goto out; } if (prism2_enable_aux_port(dev, 0)) { printk(KERN_DEBUG "%s: Disabling AUX port failed\n", dev->name); /* continue anyway.. restart should have taken care of this */ } mdelay(5); local->func->hw_reset(dev); local->hw_downloading = 0; if (prism2_hw_config(dev, 2)) { printk(KERN_WARNING "%s: Card configuration after flash " "download failed\n", dev->name); ret = -1; } else { printk(KERN_INFO "%s: Card initialized successfully after " "flash download\n", dev->name); } out: local->hw_downloading = 0; return ret; } #endif /* PRISM2_NON_VOLATILE_DOWNLOAD */ static void prism2_download_free_data(struct prism2_download_data *dl) { int i; if (dl == NULL) return; for (i = 0; i < dl->num_areas; i++) kfree(dl->data[i].data); kfree(dl); } static int prism2_download(local_info_t *local, struct prism2_download_param *param) { int ret = 0; int i; u32 total_len = 0; struct prism2_download_data *dl = NULL; printk(KERN_DEBUG "prism2_download: dl_cmd=%d start_addr=0x%08x " "num_areas=%d\n", param->dl_cmd, param->start_addr, param->num_areas); if (param->num_areas > 100) { ret = -EINVAL; goto out; } dl = kzalloc(sizeof(*dl) + param->num_areas * sizeof(struct prism2_download_data_area), GFP_KERNEL); if (dl == NULL) { ret = -ENOMEM; goto out; } dl->dl_cmd = param->dl_cmd; dl->start_addr = param->start_addr; dl->num_areas = param->num_areas; for (i = 0; i < param->num_areas; i++) { PDEBUG(DEBUG_EXTRA2, " area %d: addr=0x%08x len=%d ptr=0x%p\n", i, param->data[i].addr, param->data[i].len, param->data[i].ptr); dl->data[i].addr = param->data[i].addr; dl->data[i].len = param->data[i].len; total_len += param->data[i].len; if (param->data[i].len > PRISM2_MAX_DOWNLOAD_AREA_LEN || total_len > PRISM2_MAX_DOWNLOAD_LEN) { ret = -E2BIG; goto out; } dl->data[i].data = kmalloc(dl->data[i].len, GFP_KERNEL); if (dl->data[i].data == NULL) { ret = -ENOMEM; goto out; } if (copy_from_user(dl->data[i].data, param->data[i].ptr, param->data[i].len)) { ret = -EFAULT; goto out; } } switch (param->dl_cmd) { case PRISM2_DOWNLOAD_VOLATILE: case PRISM2_DOWNLOAD_VOLATILE_PERSISTENT: ret = prism2_download_volatile(local, dl); break; case PRISM2_DOWNLOAD_VOLATILE_GENESIS: case PRISM2_DOWNLOAD_VOLATILE_GENESIS_PERSISTENT: ret = prism2_download_genesis(local, dl); break; case PRISM2_DOWNLOAD_NON_VOLATILE: #ifdef PRISM2_NON_VOLATILE_DOWNLOAD ret = prism2_download_nonvolatile(local, dl); #else /* PRISM2_NON_VOLATILE_DOWNLOAD */ printk(KERN_INFO "%s: non-volatile downloading not enabled\n", local->dev->name); ret = -EOPNOTSUPP; #endif /* PRISM2_NON_VOLATILE_DOWNLOAD */ break; default: printk(KERN_DEBUG "%s: unsupported download command %d\n", local->dev->name, param->dl_cmd); ret = -EINVAL; break; } out: if (ret == 0 && dl && param->dl_cmd == PRISM2_DOWNLOAD_VOLATILE_GENESIS_PERSISTENT) { prism2_download_free_data(local->dl_pri); local->dl_pri = dl; } else if (ret == 0 && dl && param->dl_cmd == PRISM2_DOWNLOAD_VOLATILE_PERSISTENT) { prism2_download_free_data(local->dl_sec); local->dl_sec = dl; } else prism2_download_free_data(dl); return ret; }
linux-master
drivers/net/wireless/intersil/hostap/hostap_download.c
// SPDX-License-Identifier: GPL-2.0-only #define PRISM2_PCCARD #include <linux/module.h> #include <linux/if.h> #include <linux/slab.h> #include <linux/wait.h> #include <linux/timer.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/workqueue.h> #include <linux/wireless.h> #include <net/iw_handler.h> #include <pcmcia/cistpl.h> #include <pcmcia/cisreg.h> #include <pcmcia/ds.h> #include <asm/io.h> #include "hostap_wlan.h" static char *dev_info = "hostap_cs"; MODULE_AUTHOR("Jouni Malinen"); MODULE_DESCRIPTION("Support for Intersil Prism2-based 802.11 wireless LAN " "cards (PC Card)."); MODULE_LICENSE("GPL"); static int ignore_cis_vcc; module_param(ignore_cis_vcc, int, 0444); MODULE_PARM_DESC(ignore_cis_vcc, "Ignore broken CIS VCC entry"); /* struct local_info::hw_priv */ struct hostap_cs_priv { struct pcmcia_device *link; int sandisk_connectplus; }; #ifdef PRISM2_IO_DEBUG static inline void hfa384x_outb_debug(struct net_device *dev, int a, u8 v) { struct hostap_interface *iface; local_info_t *local; unsigned long flags; iface = netdev_priv(dev); local = iface->local; spin_lock_irqsave(&local->lock, flags); prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_OUTB, a, v); outb(v, dev->base_addr + a); spin_unlock_irqrestore(&local->lock, flags); } static inline u8 hfa384x_inb_debug(struct net_device *dev, int a) { struct hostap_interface *iface; local_info_t *local; unsigned long flags; u8 v; iface = netdev_priv(dev); local = iface->local; spin_lock_irqsave(&local->lock, flags); v = inb(dev->base_addr + a); prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_INB, a, v); spin_unlock_irqrestore(&local->lock, flags); return v; } static inline void hfa384x_outw_debug(struct net_device *dev, int a, u16 v) { struct hostap_interface *iface; local_info_t *local; unsigned long flags; iface = netdev_priv(dev); local = iface->local; spin_lock_irqsave(&local->lock, flags); prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_OUTW, a, v); outw(v, dev->base_addr + a); spin_unlock_irqrestore(&local->lock, flags); } static inline u16 hfa384x_inw_debug(struct net_device *dev, int a) { struct hostap_interface *iface; local_info_t *local; unsigned long flags; u16 v; iface = netdev_priv(dev); local = iface->local; spin_lock_irqsave(&local->lock, flags); v = inw(dev->base_addr + a); prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_INW, a, v); spin_unlock_irqrestore(&local->lock, flags); return v; } static inline void hfa384x_outsw_debug(struct net_device *dev, int a, u8 *buf, int wc) { struct hostap_interface *iface; local_info_t *local; unsigned long flags; iface = netdev_priv(dev); local = iface->local; spin_lock_irqsave(&local->lock, flags); prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_OUTSW, a, wc); outsw(dev->base_addr + a, buf, wc); spin_unlock_irqrestore(&local->lock, flags); } static inline void hfa384x_insw_debug(struct net_device *dev, int a, u8 *buf, int wc) { struct hostap_interface *iface; local_info_t *local; unsigned long flags; iface = netdev_priv(dev); local = iface->local; spin_lock_irqsave(&local->lock, flags); prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_INSW, a, wc); insw(dev->base_addr + a, buf, wc); spin_unlock_irqrestore(&local->lock, flags); } #define HFA384X_OUTB(v,a) hfa384x_outb_debug(dev, (a), (v)) #define HFA384X_INB(a) hfa384x_inb_debug(dev, (a)) #define HFA384X_OUTW(v,a) hfa384x_outw_debug(dev, (a), (v)) #define HFA384X_INW(a) hfa384x_inw_debug(dev, (a)) #define HFA384X_OUTSW(a, buf, wc) hfa384x_outsw_debug(dev, (a), (buf), (wc)) #define HFA384X_INSW(a, buf, wc) hfa384x_insw_debug(dev, (a), (buf), (wc)) #else /* PRISM2_IO_DEBUG */ #define HFA384X_OUTB(v,a) outb((v), dev->base_addr + (a)) #define HFA384X_INB(a) inb(dev->base_addr + (a)) #define HFA384X_OUTW(v,a) outw((v), dev->base_addr + (a)) #define HFA384X_INW(a) inw(dev->base_addr + (a)) #define HFA384X_INSW(a, buf, wc) insw(dev->base_addr + (a), buf, wc) #define HFA384X_OUTSW(a, buf, wc) outsw(dev->base_addr + (a), buf, wc) #endif /* PRISM2_IO_DEBUG */ static int hfa384x_from_bap(struct net_device *dev, u16 bap, void *buf, int len) { u16 d_off; u16 *pos; d_off = (bap == 1) ? HFA384X_DATA1_OFF : HFA384X_DATA0_OFF; pos = (u16 *) buf; if (len / 2) HFA384X_INSW(d_off, buf, len / 2); pos += len / 2; if (len & 1) *((char *) pos) = HFA384X_INB(d_off); return 0; } static int hfa384x_to_bap(struct net_device *dev, u16 bap, void *buf, int len) { u16 d_off; u16 *pos; d_off = (bap == 1) ? HFA384X_DATA1_OFF : HFA384X_DATA0_OFF; pos = (u16 *) buf; if (len / 2) HFA384X_OUTSW(d_off, buf, len / 2); pos += len / 2; if (len & 1) HFA384X_OUTB(*((char *) pos), d_off); return 0; } /* FIX: This might change at some point.. */ #include "hostap_hw.c" static void prism2_detach(struct pcmcia_device *p_dev); static void prism2_release(u_long arg); static int prism2_config(struct pcmcia_device *link); static int prism2_pccard_card_present(local_info_t *local) { struct hostap_cs_priv *hw_priv = local->hw_priv; if (hw_priv != NULL && hw_priv->link != NULL && pcmcia_dev_present(hw_priv->link)) return 1; return 0; } /* * SanDisk CompactFlash WLAN Flashcard - Product Manual v1.0 * Document No. 20-10-00058, January 2004 * http://www.sandisk.com/pdf/industrial/ProdManualCFWLANv1.0.pdf */ #define SANDISK_WLAN_ACTIVATION_OFF 0x40 #define SANDISK_HCR_OFF 0x42 static void sandisk_set_iobase(local_info_t *local) { int res; struct hostap_cs_priv *hw_priv = local->hw_priv; res = pcmcia_write_config_byte(hw_priv->link, 0x10, hw_priv->link->resource[0]->start & 0x00ff); if (res != 0) { printk(KERN_DEBUG "Prism3 SanDisk - failed to set I/O base 0 -" " res=%d\n", res); } udelay(10); res = pcmcia_write_config_byte(hw_priv->link, 0x12, (hw_priv->link->resource[0]->start >> 8) & 0x00ff); if (res != 0) { printk(KERN_DEBUG "Prism3 SanDisk - failed to set I/O base 1 -" " res=%d\n", res); } } static void sandisk_write_hcr(local_info_t *local, int hcr) { struct net_device *dev = local->dev; int i; HFA384X_OUTB(0x80, SANDISK_WLAN_ACTIVATION_OFF); udelay(50); for (i = 0; i < 10; i++) { HFA384X_OUTB(hcr, SANDISK_HCR_OFF); } udelay(55); HFA384X_OUTB(0x45, SANDISK_WLAN_ACTIVATION_OFF); } static int sandisk_enable_wireless(struct net_device *dev) { int res, ret = 0; struct hostap_interface *iface = netdev_priv(dev); local_info_t *local = iface->local; struct hostap_cs_priv *hw_priv = local->hw_priv; if (resource_size(hw_priv->link->resource[0]) < 0x42) { /* Not enough ports to be SanDisk multi-function card */ ret = -ENODEV; goto done; } if (hw_priv->link->manf_id != 0xd601 || hw_priv->link->card_id != 0x0101) { /* No SanDisk manfid found */ ret = -ENODEV; goto done; } if (hw_priv->link->socket->functions < 2) { /* No multi-function links found */ ret = -ENODEV; goto done; } printk(KERN_DEBUG "%s: Multi-function SanDisk ConnectPlus detected" " - using vendor-specific initialization\n", dev->name); hw_priv->sandisk_connectplus = 1; res = pcmcia_write_config_byte(hw_priv->link, CISREG_COR, COR_SOFT_RESET); if (res != 0) { printk(KERN_DEBUG "%s: SanDisk - COR sreset failed (%d)\n", dev->name, res); goto done; } mdelay(5); /* * Do not enable interrupts here to avoid some bogus events. Interrupts * will be enabled during the first cor_sreset call. */ res = pcmcia_write_config_byte(hw_priv->link, CISREG_COR, (COR_LEVEL_REQ | 0x8 | COR_ADDR_DECODE | COR_FUNC_ENA)); if (res != 0) { printk(KERN_DEBUG "%s: SanDisk - COR sreset failed (%d)\n", dev->name, res); goto done; } mdelay(5); sandisk_set_iobase(local); HFA384X_OUTB(0xc5, SANDISK_WLAN_ACTIVATION_OFF); udelay(10); HFA384X_OUTB(0x4b, SANDISK_WLAN_ACTIVATION_OFF); udelay(10); done: return ret; } static void prism2_pccard_cor_sreset(local_info_t *local) { int res; u8 val; struct hostap_cs_priv *hw_priv = local->hw_priv; if (!prism2_pccard_card_present(local)) return; res = pcmcia_read_config_byte(hw_priv->link, CISREG_COR, &val); if (res != 0) { printk(KERN_DEBUG "prism2_pccard_cor_sreset failed 1 (%d)\n", res); return; } printk(KERN_DEBUG "prism2_pccard_cor_sreset: original COR %02x\n", val); val |= COR_SOFT_RESET; res = pcmcia_write_config_byte(hw_priv->link, CISREG_COR, val); if (res != 0) { printk(KERN_DEBUG "prism2_pccard_cor_sreset failed 2 (%d)\n", res); return; } mdelay(hw_priv->sandisk_connectplus ? 5 : 2); val &= ~COR_SOFT_RESET; if (hw_priv->sandisk_connectplus) val |= COR_IREQ_ENA; res = pcmcia_write_config_byte(hw_priv->link, CISREG_COR, val); if (res != 0) { printk(KERN_DEBUG "prism2_pccard_cor_sreset failed 3 (%d)\n", res); return; } mdelay(hw_priv->sandisk_connectplus ? 5 : 2); if (hw_priv->sandisk_connectplus) sandisk_set_iobase(local); } static void prism2_pccard_genesis_reset(local_info_t *local, int hcr) { int res; u8 old_cor; struct hostap_cs_priv *hw_priv = local->hw_priv; if (!prism2_pccard_card_present(local)) return; if (hw_priv->sandisk_connectplus) { sandisk_write_hcr(local, hcr); return; } res = pcmcia_read_config_byte(hw_priv->link, CISREG_COR, &old_cor); if (res != 0) { printk(KERN_DEBUG "%s failed 1 (%d)\n", __func__, res); return; } printk(KERN_DEBUG "%s: original COR %02x\n", __func__, old_cor); res = pcmcia_write_config_byte(hw_priv->link, CISREG_COR, old_cor | COR_SOFT_RESET); if (res != 0) { printk(KERN_DEBUG "%s failed 2 (%d)\n", __func__, res); return; } mdelay(10); /* Setup Genesis mode */ res = pcmcia_write_config_byte(hw_priv->link, CISREG_CCSR, hcr); if (res != 0) { printk(KERN_DEBUG "%s failed 3 (%d)\n", __func__, res); return; } mdelay(10); res = pcmcia_write_config_byte(hw_priv->link, CISREG_COR, old_cor & ~COR_SOFT_RESET); if (res != 0) { printk(KERN_DEBUG "%s failed 4 (%d)\n", __func__, res); return; } mdelay(10); } static struct prism2_helper_functions prism2_pccard_funcs = { .card_present = prism2_pccard_card_present, .cor_sreset = prism2_pccard_cor_sreset, .genesis_reset = prism2_pccard_genesis_reset, .hw_type = HOSTAP_HW_PCCARD, }; /* allocate local data and register with CardServices * initialize dev_link structure, but do not configure the card yet */ static int hostap_cs_probe(struct pcmcia_device *p_dev) { int ret; PDEBUG(DEBUG_HW, "%s: setting Vcc=33 (constant)\n", dev_info); ret = prism2_config(p_dev); if (ret) { PDEBUG(DEBUG_EXTRA, "prism2_config() failed\n"); } return ret; } static void prism2_detach(struct pcmcia_device *link) { PDEBUG(DEBUG_FLOW, "prism2_detach\n"); prism2_release((u_long)link); /* release net devices */ if (link->priv) { struct hostap_cs_priv *hw_priv; struct net_device *dev; struct hostap_interface *iface; dev = link->priv; iface = netdev_priv(dev); hw_priv = iface->local->hw_priv; prism2_free_local_data(dev); kfree(hw_priv); } } static int prism2_config_check(struct pcmcia_device *p_dev, void *priv_data) { if (p_dev->config_index == 0) return -EINVAL; return pcmcia_request_io(p_dev); } static int prism2_config(struct pcmcia_device *link) { struct net_device *dev; struct hostap_interface *iface; local_info_t *local; int ret; struct hostap_cs_priv *hw_priv; unsigned long flags; PDEBUG(DEBUG_FLOW, "prism2_config()\n"); hw_priv = kzalloc(sizeof(*hw_priv), GFP_KERNEL); if (hw_priv == NULL) { ret = -ENOMEM; goto failed; } /* Look for an appropriate configuration table entry in the CIS */ link->config_flags |= CONF_AUTO_SET_VPP | CONF_AUTO_AUDIO | CONF_AUTO_CHECK_VCC | CONF_AUTO_SET_IO | CONF_ENABLE_IRQ; if (ignore_cis_vcc) link->config_flags &= ~CONF_AUTO_CHECK_VCC; ret = pcmcia_loop_config(link, prism2_config_check, NULL); if (ret) { if (!ignore_cis_vcc) printk(KERN_ERR "GetNextTuple(): No matching " "CIS configuration. Maybe you need the " "ignore_cis_vcc=1 parameter.\n"); goto failed; } /* Need to allocate net_device before requesting IRQ handler */ dev = prism2_init_local_data(&prism2_pccard_funcs, 0, &link->dev); if (!dev) { ret = -ENOMEM; goto failed; } link->priv = dev; iface = netdev_priv(dev); local = iface->local; local->hw_priv = hw_priv; hw_priv->link = link; /* * We enable IRQ here, but IRQ handler will not proceed * until dev->base_addr is set below. This protect us from * receive interrupts when driver is not initialized. */ ret = pcmcia_request_irq(link, prism2_interrupt); if (ret) goto failed; ret = pcmcia_enable_device(link); if (ret) goto failed; spin_lock_irqsave(&local->irq_init_lock, flags); dev->irq = link->irq; dev->base_addr = link->resource[0]->start; spin_unlock_irqrestore(&local->irq_init_lock, flags); local->shutdown = 0; sandisk_enable_wireless(dev); ret = prism2_hw_config(dev, 1); if (!ret) ret = hostap_hw_ready(dev); return ret; failed: kfree(hw_priv); prism2_release((u_long)link); return ret; } static void prism2_release(u_long arg) { struct pcmcia_device *link = (struct pcmcia_device *)arg; PDEBUG(DEBUG_FLOW, "prism2_release\n"); if (link->priv) { struct net_device *dev = link->priv; struct hostap_interface *iface; iface = netdev_priv(dev); prism2_hw_shutdown(dev, 0); iface->local->shutdown = 1; } pcmcia_disable_device(link); PDEBUG(DEBUG_FLOW, "release - done\n"); } static int hostap_cs_suspend(struct pcmcia_device *link) { struct net_device *dev = (struct net_device *) link->priv; int dev_open = 0; struct hostap_interface *iface = NULL; if (!dev) return -ENODEV; iface = netdev_priv(dev); PDEBUG(DEBUG_EXTRA, "%s: CS_EVENT_PM_SUSPEND\n", dev_info); if (iface && iface->local) dev_open = iface->local->num_dev_open > 0; if (dev_open) { netif_stop_queue(dev); netif_device_detach(dev); } prism2_suspend(dev); return 0; } static int hostap_cs_resume(struct pcmcia_device *link) { struct net_device *dev = (struct net_device *) link->priv; int dev_open = 0; struct hostap_interface *iface = NULL; if (!dev) return -ENODEV; iface = netdev_priv(dev); PDEBUG(DEBUG_EXTRA, "%s: CS_EVENT_PM_RESUME\n", dev_info); if (iface && iface->local) dev_open = iface->local->num_dev_open > 0; prism2_hw_shutdown(dev, 1); prism2_hw_config(dev, dev_open ? 0 : 1); if (dev_open) { netif_device_attach(dev); netif_start_queue(dev); } return 0; } static const struct pcmcia_device_id hostap_cs_ids[] = { PCMCIA_DEVICE_MANF_CARD(0x000b, 0x7100), PCMCIA_DEVICE_MANF_CARD(0x000b, 0x7300), PCMCIA_DEVICE_MANF_CARD(0x0101, 0x0777), PCMCIA_DEVICE_MANF_CARD(0x0126, 0x8000), PCMCIA_DEVICE_MANF_CARD(0x0138, 0x0002), PCMCIA_DEVICE_MANF_CARD(0x01bf, 0x3301), PCMCIA_DEVICE_MANF_CARD(0x0250, 0x0002), PCMCIA_DEVICE_MANF_CARD(0x026f, 0x030b), PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1612), PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1613), PCMCIA_DEVICE_MANF_CARD(0x028a, 0x0002), PCMCIA_DEVICE_MANF_CARD(0x02aa, 0x0002), PCMCIA_DEVICE_MANF_CARD(0x02d2, 0x0001), PCMCIA_DEVICE_MANF_CARD(0x50c2, 0x0001), PCMCIA_DEVICE_MANF_CARD(0x50c2, 0x7300), /* PCMCIA_DEVICE_MANF_CARD(0xc00f, 0x0000), conflict with pcnet_cs */ PCMCIA_DEVICE_MANF_CARD(0xc250, 0x0002), PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0002), PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0005), PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0010), PCMCIA_DEVICE_MANF_CARD(0x0126, 0x0002), PCMCIA_DEVICE_MANF_CARD_PROD_ID1(0xd601, 0x0005, "ADLINK 345 CF", 0x2d858104), PCMCIA_DEVICE_MANF_CARD_PROD_ID1(0x0156, 0x0002, "INTERSIL", 0x74c5e40d), PCMCIA_DEVICE_MANF_CARD_PROD_ID1(0x0156, 0x0002, "Intersil", 0x4b801a17), PCMCIA_DEVICE_MANF_CARD_PROD_ID3(0x0156, 0x0002, "Version 01.02", 0x4b74baa0), PCMCIA_MFC_DEVICE_PROD_ID12(0, "SanDisk", "ConnectPlus", 0x7a954bd9, 0x74be00c6), PCMCIA_DEVICE_PROD_ID123( "Addtron", "AWP-100 Wireless PCMCIA", "Version 01.02", 0xe6ec52ce, 0x08649af2, 0x4b74baa0), PCMCIA_DEVICE_PROD_ID123( "Canon", "Wireless LAN CF Card K30225", "Version 01.00", 0x96ef6fe2, 0x263fcbab, 0xa57adb8c), PCMCIA_DEVICE_PROD_ID123( "D", "Link DWL-650 11Mbps WLAN Card", "Version 01.02", 0x71b18589, 0xb6f1b0ab, 0x4b74baa0), PCMCIA_DEVICE_PROD_ID123( "Instant Wireless ", " Network PC CARD", "Version 01.02", 0x11d901af, 0x6e9bd926, 0x4b74baa0), PCMCIA_DEVICE_PROD_ID123( "SMC", "SMC2632W", "Version 01.02", 0xc4f8b18b, 0x474a1f2a, 0x4b74baa0), PCMCIA_DEVICE_PROD_ID12("BUFFALO", "WLI-CF-S11G", 0x2decece3, 0x82067c18), PCMCIA_DEVICE_PROD_ID12("Compaq", "WL200_11Mbps_Wireless_PCI_Card", 0x54f7c49c, 0x15a75e5b), PCMCIA_DEVICE_PROD_ID12("INTERSIL", "HFA384x/IEEE", 0x74c5e40d, 0xdb472a18), PCMCIA_DEVICE_PROD_ID12("Linksys", "Wireless CompactFlash Card", 0x0733cc81, 0x0c52f395), PCMCIA_DEVICE_PROD_ID12( "ZoomAir 11Mbps High", "Rate wireless Networking", 0x273fe3db, 0x32a1eaee), PCMCIA_DEVICE_PROD_ID12("NETGEAR MA401 Wireless PC", "Card", 0xa37434e9, 0x9762e8f1), PCMCIA_DEVICE_PROD_ID123( "Pretec", "CompactWLAN Card 802.11b", "2.5", 0x1cadd3e5, 0xe697636c, 0x7a5bfcf1), PCMCIA_DEVICE_PROD_ID123( "U.S. Robotics", "IEEE 802.11b PC-CARD", "Version 01.02", 0xc7b8df9d, 0x1700d087, 0x4b74baa0), PCMCIA_DEVICE_PROD_ID123( "Allied Telesyn", "AT-WCL452 Wireless PCMCIA Radio", "Ver. 1.00", 0x5cd01705, 0x4271660f, 0x9d08ee12), PCMCIA_DEVICE_PROD_ID123( "Wireless LAN" , "11Mbps PC Card", "Version 01.02", 0x4b8870ff, 0x70e946d1, 0x4b74baa0), PCMCIA_DEVICE_PROD_ID3("HFA3863", 0x355cb092), PCMCIA_DEVICE_PROD_ID3("ISL37100P", 0x630d52b2), PCMCIA_DEVICE_PROD_ID3("ISL37101P-10", 0xdd97a26b), PCMCIA_DEVICE_PROD_ID3("ISL37300P", 0xc9049a39), PCMCIA_DEVICE_NULL }; MODULE_DEVICE_TABLE(pcmcia, hostap_cs_ids); static struct pcmcia_driver hostap_driver = { .name = "hostap_cs", .probe = hostap_cs_probe, .remove = prism2_detach, .owner = THIS_MODULE, .id_table = hostap_cs_ids, .suspend = hostap_cs_suspend, .resume = hostap_cs_resume, }; module_pcmcia_driver(hostap_driver);
linux-master
drivers/net/wireless/intersil/hostap/hostap_cs.c
// SPDX-License-Identifier: GPL-2.0-only /* * Host AP (software wireless LAN access point) driver for * Intersil Prism2/2.5/3. * * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen * <[email protected]> * Copyright (c) 2002-2005, Jouni Malinen <[email protected]> * * FIX: * - there is currently no way of associating TX packets to correct wds device * when TX Exc/OK event occurs, so all tx_packets and some * tx_errors/tx_dropped are added to the main netdevice; using sw_support * field in txdesc might be used to fix this (using Alloc event to increment * tx_packets would need some further info in txfid table) * * Buffer Access Path (BAP) usage: * Prism2 cards have two separate BAPs for accessing the card memory. These * should allow concurrent access to two different frames and the driver * previously used BAP0 for sending data and BAP1 for receiving data. * However, there seems to be number of issues with concurrent access and at * least one know hardware bug in using BAP0 and BAP1 concurrently with PCI * Prism2.5. Therefore, the driver now only uses BAP0 for moving data between * host and card memories. BAP0 accesses are protected with local->baplock * (spin_lock_bh) to prevent concurrent use. */ #include <asm/delay.h> #include <linux/uaccess.h> #include <linux/slab.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/if_arp.h> #include <linux/delay.h> #include <linux/random.h> #include <linux/wait.h> #include <linux/sched/signal.h> #include <linux/rtnetlink.h> #include <linux/wireless.h> #include <net/iw_handler.h> #include <net/lib80211.h> #include <asm/irq.h> #include "hostap_80211.h" #include "hostap.h" #include "hostap_ap.h" /* #define final_version */ static int mtu = 1500; module_param(mtu, int, 0444); MODULE_PARM_DESC(mtu, "Maximum transfer unit"); static int channel[MAX_PARM_DEVICES] = { 3, DEF_INTS }; module_param_array(channel, int, NULL, 0444); MODULE_PARM_DESC(channel, "Initial channel"); static char essid[33] = "test"; module_param_string(essid, essid, sizeof(essid), 0444); MODULE_PARM_DESC(essid, "Host AP's ESSID"); static int iw_mode[MAX_PARM_DEVICES] = { IW_MODE_MASTER, DEF_INTS }; module_param_array(iw_mode, int, NULL, 0444); MODULE_PARM_DESC(iw_mode, "Initial operation mode"); static int beacon_int[MAX_PARM_DEVICES] = { 100, DEF_INTS }; module_param_array(beacon_int, int, NULL, 0444); MODULE_PARM_DESC(beacon_int, "Beacon interval (1 = 1024 usec)"); static int dtim_period[MAX_PARM_DEVICES] = { 1, DEF_INTS }; module_param_array(dtim_period, int, NULL, 0444); MODULE_PARM_DESC(dtim_period, "DTIM period"); static char dev_template[16] = "wlan%d"; module_param_string(dev_template, dev_template, sizeof(dev_template), 0444); MODULE_PARM_DESC(dev_template, "Prefix for network device name (default: " "wlan%d)"); #ifdef final_version #define EXTRA_EVENTS_WTERR 0 #else /* check WTERR events (Wait Time-out) in development versions */ #define EXTRA_EVENTS_WTERR HFA384X_EV_WTERR #endif /* Events that will be using BAP0 */ #define HFA384X_BAP0_EVENTS \ (HFA384X_EV_TXEXC | HFA384X_EV_RX | HFA384X_EV_INFO | HFA384X_EV_TX) /* event mask, i.e., events that will result in an interrupt */ #define HFA384X_EVENT_MASK \ (HFA384X_BAP0_EVENTS | HFA384X_EV_ALLOC | HFA384X_EV_INFDROP | \ HFA384X_EV_CMD | HFA384X_EV_TICK | \ EXTRA_EVENTS_WTERR) /* Default TX control flags: use 802.11 headers and request interrupt for * failed transmits. Frames that request ACK callback, will add * _TX_OK flag and _ALT_RTRY flag may be used to select different retry policy. */ #define HFA384X_TX_CTRL_FLAGS \ (HFA384X_TX_CTRL_802_11 | HFA384X_TX_CTRL_TX_EX) /* ca. 1 usec */ #define HFA384X_CMD_BUSY_TIMEOUT 5000 #define HFA384X_BAP_BUSY_TIMEOUT 50000 /* ca. 10 usec */ #define HFA384X_CMD_COMPL_TIMEOUT 20000 #define HFA384X_DL_COMPL_TIMEOUT 1000000 /* Wait times for initialization; yield to other processes to avoid busy * waiting for long time. */ #define HFA384X_INIT_TIMEOUT (HZ / 2) /* 500 ms */ #define HFA384X_ALLOC_COMPL_TIMEOUT (HZ / 20) /* 50 ms */ static void prism2_hw_reset(struct net_device *dev); static void prism2_check_sta_fw_version(local_info_t *local); #ifdef PRISM2_DOWNLOAD_SUPPORT /* hostap_download.c */ static const struct proc_ops prism2_download_aux_dump_proc_ops; static u8 * prism2_read_pda(struct net_device *dev); static int prism2_download(local_info_t *local, struct prism2_download_param *param); static void prism2_download_free_data(struct prism2_download_data *dl); static int prism2_download_volatile(local_info_t *local, struct prism2_download_data *param); static int prism2_download_genesis(local_info_t *local, struct prism2_download_data *param); static int prism2_get_ram_size(local_info_t *local); #endif /* PRISM2_DOWNLOAD_SUPPORT */ #ifndef final_version /* magic value written to SWSUPPORT0 reg. for detecting whether card is still * present */ #define HFA384X_MAGIC 0x8A32 #endif static void hfa384x_read_regs(struct net_device *dev, struct hfa384x_regs *regs) { regs->cmd = HFA384X_INW(HFA384X_CMD_OFF); regs->evstat = HFA384X_INW(HFA384X_EVSTAT_OFF); regs->offset0 = HFA384X_INW(HFA384X_OFFSET0_OFF); regs->offset1 = HFA384X_INW(HFA384X_OFFSET1_OFF); regs->swsupport0 = HFA384X_INW(HFA384X_SWSUPPORT0_OFF); } /** * __hostap_cmd_queue_free - Free Prism2 command queue entry (private) * @local: pointer to private Host AP driver data * @entry: Prism2 command queue entry to be freed * @del_req: request the entry to be removed * * Internal helper function for freeing Prism2 command queue entries. * Caller must have acquired local->cmdlock before calling this function. */ static inline void __hostap_cmd_queue_free(local_info_t *local, struct hostap_cmd_queue *entry, int del_req) { if (del_req) { entry->del_req = 1; if (!list_empty(&entry->list)) { list_del_init(&entry->list); local->cmd_queue_len--; } } if (refcount_dec_and_test(&entry->usecnt) && entry->del_req) kfree(entry); } /** * hostap_cmd_queue_free - Free Prism2 command queue entry * @local: pointer to private Host AP driver data * @entry: Prism2 command queue entry to be freed * @del_req: request the entry to be removed * * Free a Prism2 command queue entry. */ static inline void hostap_cmd_queue_free(local_info_t *local, struct hostap_cmd_queue *entry, int del_req) { unsigned long flags; spin_lock_irqsave(&local->cmdlock, flags); __hostap_cmd_queue_free(local, entry, del_req); spin_unlock_irqrestore(&local->cmdlock, flags); } /** * prism2_clear_cmd_queue - Free all pending Prism2 command queue entries * @local: pointer to private Host AP driver data */ static void prism2_clear_cmd_queue(local_info_t *local) { struct list_head *ptr, *n; unsigned long flags; struct hostap_cmd_queue *entry; spin_lock_irqsave(&local->cmdlock, flags); list_for_each_safe(ptr, n, &local->cmd_queue) { entry = list_entry(ptr, struct hostap_cmd_queue, list); refcount_inc(&entry->usecnt); printk(KERN_DEBUG "%s: removed pending cmd_queue entry " "(type=%d, cmd=0x%04x, param0=0x%04x)\n", local->dev->name, entry->type, entry->cmd, entry->param0); __hostap_cmd_queue_free(local, entry, 1); } if (local->cmd_queue_len) { /* This should not happen; print debug message and clear * queue length. */ printk(KERN_DEBUG "%s: cmd_queue_len (%d) not zero after " "flush\n", local->dev->name, local->cmd_queue_len); local->cmd_queue_len = 0; } spin_unlock_irqrestore(&local->cmdlock, flags); } /** * hfa384x_cmd_issue - Issue a Prism2 command to the hardware * @dev: pointer to net_device * @entry: Prism2 command queue entry to be issued */ static int hfa384x_cmd_issue(struct net_device *dev, struct hostap_cmd_queue *entry) { struct hostap_interface *iface; local_info_t *local; int tries; u16 reg; unsigned long flags; iface = netdev_priv(dev); local = iface->local; if (local->func->card_present && !local->func->card_present(local)) return -ENODEV; if (entry->issued) { printk(KERN_DEBUG "%s: driver bug - re-issuing command @%p\n", dev->name, entry); } /* wait until busy bit is clear; this should always be clear since the * commands are serialized */ tries = HFA384X_CMD_BUSY_TIMEOUT; while (HFA384X_INW(HFA384X_CMD_OFF) & HFA384X_CMD_BUSY && tries > 0) { tries--; udelay(1); } #ifndef final_version if (tries != HFA384X_CMD_BUSY_TIMEOUT) { prism2_io_debug_error(dev, 1); printk(KERN_DEBUG "%s: hfa384x_cmd_issue: cmd reg was busy " "for %d usec\n", dev->name, HFA384X_CMD_BUSY_TIMEOUT - tries); } #endif if (tries == 0) { reg = HFA384X_INW(HFA384X_CMD_OFF); prism2_io_debug_error(dev, 2); printk(KERN_DEBUG "%s: hfa384x_cmd_issue - timeout - " "reg=0x%04x\n", dev->name, reg); return -ETIMEDOUT; } /* write command */ spin_lock_irqsave(&local->cmdlock, flags); HFA384X_OUTW(entry->param0, HFA384X_PARAM0_OFF); HFA384X_OUTW(entry->param1, HFA384X_PARAM1_OFF); HFA384X_OUTW(entry->cmd, HFA384X_CMD_OFF); entry->issued = 1; spin_unlock_irqrestore(&local->cmdlock, flags); return 0; } /** * hfa384x_cmd - Issue a Prism2 command and wait (sleep) for completion * @dev: pointer to net_device * @cmd: Prism2 command code (HFA384X_CMD_CODE_*) * @param0: value for Param0 register * @param1: value for Param1 register (pointer; %NULL if not used) * @resp0: pointer for Resp0 data or %NULL if Resp0 is not needed * * Issue given command (possibly after waiting in command queue) and sleep * until the command is completed (or timed out or interrupted). This can be * called only from user process context. */ static int hfa384x_cmd(struct net_device *dev, u16 cmd, u16 param0, u16 *param1, u16 *resp0) { struct hostap_interface *iface; local_info_t *local; int err, res, issue, issued = 0; unsigned long flags; struct hostap_cmd_queue *entry; DECLARE_WAITQUEUE(wait, current); iface = netdev_priv(dev); local = iface->local; if (local->cmd_queue_len >= HOSTAP_CMD_QUEUE_MAX_LEN) { printk(KERN_DEBUG "%s: hfa384x_cmd: cmd_queue full\n", dev->name); return -1; } if (signal_pending(current)) return -EINTR; entry = kzalloc(sizeof(*entry), GFP_ATOMIC); if (entry == NULL) return -ENOMEM; refcount_set(&entry->usecnt, 1); entry->type = CMD_SLEEP; entry->cmd = cmd; entry->param0 = param0; if (param1) entry->param1 = *param1; init_waitqueue_head(&entry->compl); /* prepare to wait for command completion event, but do not sleep yet */ add_wait_queue(&entry->compl, &wait); set_current_state(TASK_INTERRUPTIBLE); spin_lock_irqsave(&local->cmdlock, flags); issue = list_empty(&local->cmd_queue); if (issue) entry->issuing = 1; list_add_tail(&entry->list, &local->cmd_queue); local->cmd_queue_len++; spin_unlock_irqrestore(&local->cmdlock, flags); err = 0; if (!issue) goto wait_completion; if (signal_pending(current)) err = -EINTR; if (!err) { if (hfa384x_cmd_issue(dev, entry)) err = -ETIMEDOUT; else issued = 1; } wait_completion: if (!err && entry->type != CMD_COMPLETED) { /* sleep until command is completed or timed out */ res = schedule_timeout(2 * HZ); } else res = -1; if (!err && signal_pending(current)) err = -EINTR; if (err && issued) { /* the command was issued, so a CmdCompl event should occur * soon; however, there's a pending signal and * schedule_timeout() would be interrupted; wait a short period * of time to avoid removing entry from the list before * CmdCompl event */ udelay(300); } set_current_state(TASK_RUNNING); remove_wait_queue(&entry->compl, &wait); /* If entry->list is still in the list, it must be removed * first and in this case prism2_cmd_ev() does not yet have * local reference to it, and the data can be kfree()'d * here. If the command completion event is still generated, * it will be assigned to next (possibly) pending command, but * the driver will reset the card anyway due to timeout * * If the entry is not in the list prism2_cmd_ev() has a local * reference to it, but keeps cmdlock as long as the data is * needed, so the data can be kfree()'d here. */ /* FIX: if the entry->list is in the list, it has not been completed * yet, so removing it here is somewhat wrong.. this could cause * references to freed memory and next list_del() causing NULL pointer * dereference.. it would probably be better to leave the entry in the * list and the list should be emptied during hw reset */ spin_lock_irqsave(&local->cmdlock, flags); if (!list_empty(&entry->list)) { printk(KERN_DEBUG "%s: hfa384x_cmd: entry still in list? " "(entry=%p, type=%d, res=%d)\n", dev->name, entry, entry->type, res); list_del_init(&entry->list); local->cmd_queue_len--; } spin_unlock_irqrestore(&local->cmdlock, flags); if (err) { printk(KERN_DEBUG "%s: hfa384x_cmd: interrupted; err=%d\n", dev->name, err); res = err; goto done; } if (entry->type != CMD_COMPLETED) { u16 reg = HFA384X_INW(HFA384X_EVSTAT_OFF); printk(KERN_DEBUG "%s: hfa384x_cmd: command was not " "completed (res=%d, entry=%p, type=%d, cmd=0x%04x, " "param0=0x%04x, EVSTAT=%04x INTEN=%04x)\n", dev->name, res, entry, entry->type, entry->cmd, entry->param0, reg, HFA384X_INW(HFA384X_INTEN_OFF)); if (reg & HFA384X_EV_CMD) { /* Command completion event is pending, but the * interrupt was not delivered - probably an issue * with pcmcia-cs configuration. */ printk(KERN_WARNING "%s: interrupt delivery does not " "seem to work\n", dev->name); } prism2_io_debug_error(dev, 3); res = -ETIMEDOUT; goto done; } if (resp0 != NULL) *resp0 = entry->resp0; #ifndef final_version if (entry->res) { printk(KERN_DEBUG "%s: CMD=0x%04x => res=0x%02x, " "resp0=0x%04x\n", dev->name, cmd, entry->res, entry->resp0); } #endif /* final_version */ res = entry->res; done: hostap_cmd_queue_free(local, entry, 1); return res; } /** * hfa384x_cmd_callback - Issue a Prism2 command; callback when completed * @dev: pointer to net_device * @cmd: Prism2 command code (HFA384X_CMD_CODE_*) * @param0: value for Param0 register * @callback: command completion callback function (%NULL = no callback) * @context: context data to be given to the callback function * * Issue given command (possibly after waiting in command queue) and use * callback function to indicate command completion. This can be called both * from user and interrupt context. The callback function will be called in * hardware IRQ context. It can be %NULL, when no function is called when * command is completed. */ static int hfa384x_cmd_callback(struct net_device *dev, u16 cmd, u16 param0, void (*callback)(struct net_device *dev, long context, u16 resp0, u16 status), long context) { struct hostap_interface *iface; local_info_t *local; int issue, ret; unsigned long flags; struct hostap_cmd_queue *entry; iface = netdev_priv(dev); local = iface->local; if (local->cmd_queue_len >= HOSTAP_CMD_QUEUE_MAX_LEN + 2) { printk(KERN_DEBUG "%s: hfa384x_cmd: cmd_queue full\n", dev->name); return -1; } entry = kzalloc(sizeof(*entry), GFP_ATOMIC); if (entry == NULL) return -ENOMEM; refcount_set(&entry->usecnt, 1); entry->type = CMD_CALLBACK; entry->cmd = cmd; entry->param0 = param0; entry->callback = callback; entry->context = context; spin_lock_irqsave(&local->cmdlock, flags); issue = list_empty(&local->cmd_queue); if (issue) entry->issuing = 1; list_add_tail(&entry->list, &local->cmd_queue); local->cmd_queue_len++; spin_unlock_irqrestore(&local->cmdlock, flags); if (issue && hfa384x_cmd_issue(dev, entry)) ret = -ETIMEDOUT; else ret = 0; hostap_cmd_queue_free(local, entry, ret); return ret; } /** * __hfa384x_cmd_no_wait - Issue a Prism2 command (private) * @dev: pointer to net_device * @cmd: Prism2 command code (HFA384X_CMD_CODE_*) * @param0: value for Param0 register * @io_debug_num: I/O debug error number * * Shared helper function for hfa384x_cmd_wait() and hfa384x_cmd_no_wait(). */ static int __hfa384x_cmd_no_wait(struct net_device *dev, u16 cmd, u16 param0, int io_debug_num) { int tries; u16 reg; /* wait until busy bit is clear; this should always be clear since the * commands are serialized */ tries = HFA384X_CMD_BUSY_TIMEOUT; while (HFA384X_INW(HFA384X_CMD_OFF) & HFA384X_CMD_BUSY && tries > 0) { tries--; udelay(1); } if (tries == 0) { reg = HFA384X_INW(HFA384X_CMD_OFF); prism2_io_debug_error(dev, io_debug_num); printk(KERN_DEBUG "%s: __hfa384x_cmd_no_wait(%d) - timeout - " "reg=0x%04x\n", dev->name, io_debug_num, reg); return -ETIMEDOUT; } /* write command */ HFA384X_OUTW(param0, HFA384X_PARAM0_OFF); HFA384X_OUTW(cmd, HFA384X_CMD_OFF); return 0; } /** * hfa384x_cmd_wait - Issue a Prism2 command and busy wait for completion * @dev: pointer to net_device * @cmd: Prism2 command code (HFA384X_CMD_CODE_*) * @param0: value for Param0 register */ static int hfa384x_cmd_wait(struct net_device *dev, u16 cmd, u16 param0) { int res, tries; u16 reg; res = __hfa384x_cmd_no_wait(dev, cmd, param0, 4); if (res) return res; /* wait for command completion */ if ((cmd & HFA384X_CMDCODE_MASK) == HFA384X_CMDCODE_DOWNLOAD) tries = HFA384X_DL_COMPL_TIMEOUT; else tries = HFA384X_CMD_COMPL_TIMEOUT; while (!(HFA384X_INW(HFA384X_EVSTAT_OFF) & HFA384X_EV_CMD) && tries > 0) { tries--; udelay(10); } if (tries == 0) { reg = HFA384X_INW(HFA384X_EVSTAT_OFF); prism2_io_debug_error(dev, 5); printk(KERN_DEBUG "%s: hfa384x_cmd_wait - timeout2 - " "reg=0x%04x\n", dev->name, reg); return -ETIMEDOUT; } res = (HFA384X_INW(HFA384X_STATUS_OFF) & (BIT(14) | BIT(13) | BIT(12) | BIT(11) | BIT(10) | BIT(9) | BIT(8))) >> 8; #ifndef final_version if (res) { printk(KERN_DEBUG "%s: CMD=0x%04x => res=0x%02x\n", dev->name, cmd, res); } #endif HFA384X_OUTW(HFA384X_EV_CMD, HFA384X_EVACK_OFF); return res; } /** * hfa384x_cmd_no_wait - Issue a Prism2 command; do not wait for completion * @dev: pointer to net_device * @cmd: Prism2 command code (HFA384X_CMD_CODE_*) * @param0: value for Param0 register */ static inline int hfa384x_cmd_no_wait(struct net_device *dev, u16 cmd, u16 param0) { return __hfa384x_cmd_no_wait(dev, cmd, param0, 6); } /** * prism2_cmd_ev - Prism2 command completion event handler * @dev: pointer to net_device * * Interrupt handler for command completion events. Called by the main * interrupt handler in hardware IRQ context. Read Resp0 and status registers * from the hardware and ACK the event. Depending on the issued command type * either wake up the sleeping process that is waiting for command completion * or call the callback function. Issue the next command, if one is pending. */ static void prism2_cmd_ev(struct net_device *dev) { struct hostap_interface *iface; local_info_t *local; struct hostap_cmd_queue *entry = NULL; iface = netdev_priv(dev); local = iface->local; spin_lock(&local->cmdlock); if (!list_empty(&local->cmd_queue)) { entry = list_entry(local->cmd_queue.next, struct hostap_cmd_queue, list); refcount_inc(&entry->usecnt); list_del_init(&entry->list); local->cmd_queue_len--; if (!entry->issued) { printk(KERN_DEBUG "%s: Command completion event, but " "cmd not issued\n", dev->name); __hostap_cmd_queue_free(local, entry, 1); entry = NULL; } } spin_unlock(&local->cmdlock); if (!entry) { HFA384X_OUTW(HFA384X_EV_CMD, HFA384X_EVACK_OFF); printk(KERN_DEBUG "%s: Command completion event, but no " "pending commands\n", dev->name); return; } entry->resp0 = HFA384X_INW(HFA384X_RESP0_OFF); entry->res = (HFA384X_INW(HFA384X_STATUS_OFF) & (BIT(14) | BIT(13) | BIT(12) | BIT(11) | BIT(10) | BIT(9) | BIT(8))) >> 8; HFA384X_OUTW(HFA384X_EV_CMD, HFA384X_EVACK_OFF); /* TODO: rest of the CmdEv handling could be moved to tasklet */ if (entry->type == CMD_SLEEP) { entry->type = CMD_COMPLETED; wake_up_interruptible(&entry->compl); } else if (entry->type == CMD_CALLBACK) { if (entry->callback) entry->callback(dev, entry->context, entry->resp0, entry->res); } else { printk(KERN_DEBUG "%s: Invalid command completion type %d\n", dev->name, entry->type); } hostap_cmd_queue_free(local, entry, 1); /* issue next command, if pending */ entry = NULL; spin_lock(&local->cmdlock); if (!list_empty(&local->cmd_queue)) { entry = list_entry(local->cmd_queue.next, struct hostap_cmd_queue, list); if (entry->issuing) { /* hfa384x_cmd() has already started issuing this * command, so do not start here */ entry = NULL; } if (entry) refcount_inc(&entry->usecnt); } spin_unlock(&local->cmdlock); if (entry) { /* issue next command; if command issuing fails, remove the * entry from cmd_queue */ int res = hfa384x_cmd_issue(dev, entry); spin_lock(&local->cmdlock); __hostap_cmd_queue_free(local, entry, res); spin_unlock(&local->cmdlock); } } static int hfa384x_wait_offset(struct net_device *dev, u16 o_off) { int tries = HFA384X_BAP_BUSY_TIMEOUT; int res = HFA384X_INW(o_off) & HFA384X_OFFSET_BUSY; while (res && tries > 0) { tries--; udelay(1); res = HFA384X_INW(o_off) & HFA384X_OFFSET_BUSY; } return res; } /* Offset must be even */ static int hfa384x_setup_bap(struct net_device *dev, u16 bap, u16 id, int offset) { u16 o_off, s_off; int ret = 0; if (offset % 2 || bap > 1) return -EINVAL; if (bap == BAP1) { o_off = HFA384X_OFFSET1_OFF; s_off = HFA384X_SELECT1_OFF; } else { o_off = HFA384X_OFFSET0_OFF; s_off = HFA384X_SELECT0_OFF; } if (hfa384x_wait_offset(dev, o_off)) { prism2_io_debug_error(dev, 7); printk(KERN_DEBUG "%s: hfa384x_setup_bap - timeout before\n", dev->name); ret = -ETIMEDOUT; goto out; } HFA384X_OUTW(id, s_off); HFA384X_OUTW(offset, o_off); if (hfa384x_wait_offset(dev, o_off)) { prism2_io_debug_error(dev, 8); printk(KERN_DEBUG "%s: hfa384x_setup_bap - timeout after\n", dev->name); ret = -ETIMEDOUT; goto out; } #ifndef final_version if (HFA384X_INW(o_off) & HFA384X_OFFSET_ERR) { prism2_io_debug_error(dev, 9); printk(KERN_DEBUG "%s: hfa384x_setup_bap - offset error " "(%d,0x04%x,%d); reg=0x%04x\n", dev->name, bap, id, offset, HFA384X_INW(o_off)); ret = -EINVAL; } #endif out: return ret; } static int hfa384x_get_rid(struct net_device *dev, u16 rid, void *buf, int len, int exact_len) { struct hostap_interface *iface; local_info_t *local; int res, rlen = 0; struct hfa384x_rid_hdr rec; iface = netdev_priv(dev); local = iface->local; if (local->no_pri) { printk(KERN_DEBUG "%s: cannot get RID %04x (len=%d) - no PRI " "f/w\n", dev->name, rid, len); return -ENOTTY; /* Well.. not really correct, but return * something unique enough.. */ } if ((local->func->card_present && !local->func->card_present(local)) || local->hw_downloading) return -ENODEV; res = mutex_lock_interruptible(&local->rid_bap_mtx); if (res) return res; res = hfa384x_cmd(dev, HFA384X_CMDCODE_ACCESS, rid, NULL, NULL); if (res) { printk(KERN_DEBUG "%s: hfa384x_get_rid: CMDCODE_ACCESS failed " "(res=%d, rid=%04x, len=%d)\n", dev->name, res, rid, len); mutex_unlock(&local->rid_bap_mtx); return res; } spin_lock_bh(&local->baplock); res = hfa384x_setup_bap(dev, BAP0, rid, 0); if (res) goto unlock; res = hfa384x_from_bap(dev, BAP0, &rec, sizeof(rec)); if (res) goto unlock; if (le16_to_cpu(rec.len) == 0) { /* RID not available */ res = -ENODATA; goto unlock; } rlen = (le16_to_cpu(rec.len) - 1) * 2; if (exact_len && rlen != len) { printk(KERN_DEBUG "%s: hfa384x_get_rid - RID len mismatch: " "rid=0x%04x, len=%d (expected %d)\n", dev->name, rid, rlen, len); res = -ENODATA; } res = hfa384x_from_bap(dev, BAP0, buf, len); unlock: spin_unlock_bh(&local->baplock); mutex_unlock(&local->rid_bap_mtx); if (res) { if (res != -ENODATA) printk(KERN_DEBUG "%s: hfa384x_get_rid (rid=%04x, " "len=%d) - failed - res=%d\n", dev->name, rid, len, res); if (res == -ETIMEDOUT) prism2_hw_reset(dev); return res; } return rlen; } static int hfa384x_set_rid(struct net_device *dev, u16 rid, void *buf, int len) { struct hostap_interface *iface; local_info_t *local; struct hfa384x_rid_hdr rec; int res; iface = netdev_priv(dev); local = iface->local; if (local->no_pri) { printk(KERN_DEBUG "%s: cannot set RID %04x (len=%d) - no PRI " "f/w\n", dev->name, rid, len); return -ENOTTY; /* Well.. not really correct, but return * something unique enough.. */ } if ((local->func->card_present && !local->func->card_present(local)) || local->hw_downloading) return -ENODEV; rec.rid = cpu_to_le16(rid); /* RID len in words and +1 for rec.rid */ rec.len = cpu_to_le16(len / 2 + len % 2 + 1); res = mutex_lock_interruptible(&local->rid_bap_mtx); if (res) return res; spin_lock_bh(&local->baplock); res = hfa384x_setup_bap(dev, BAP0, rid, 0); if (!res) res = hfa384x_to_bap(dev, BAP0, &rec, sizeof(rec)); if (!res) res = hfa384x_to_bap(dev, BAP0, buf, len); spin_unlock_bh(&local->baplock); if (res) { printk(KERN_DEBUG "%s: hfa384x_set_rid (rid=%04x, len=%d) - " "failed - res=%d\n", dev->name, rid, len, res); mutex_unlock(&local->rid_bap_mtx); return res; } res = hfa384x_cmd(dev, HFA384X_CMDCODE_ACCESS_WRITE, rid, NULL, NULL); mutex_unlock(&local->rid_bap_mtx); if (res) { printk(KERN_DEBUG "%s: hfa384x_set_rid: CMDCODE_ACCESS_WRITE " "failed (res=%d, rid=%04x, len=%d)\n", dev->name, res, rid, len); if (res == -ETIMEDOUT) prism2_hw_reset(dev); } return res; } static void hfa384x_disable_interrupts(struct net_device *dev) { /* disable interrupts and clear event status */ HFA384X_OUTW(0, HFA384X_INTEN_OFF); HFA384X_OUTW(0xffff, HFA384X_EVACK_OFF); } static void hfa384x_enable_interrupts(struct net_device *dev) { /* ack pending events and enable interrupts from selected events */ HFA384X_OUTW(0xffff, HFA384X_EVACK_OFF); HFA384X_OUTW(HFA384X_EVENT_MASK, HFA384X_INTEN_OFF); } static void hfa384x_events_no_bap0(struct net_device *dev) { HFA384X_OUTW(HFA384X_EVENT_MASK & ~HFA384X_BAP0_EVENTS, HFA384X_INTEN_OFF); } static void hfa384x_events_all(struct net_device *dev) { HFA384X_OUTW(HFA384X_EVENT_MASK, HFA384X_INTEN_OFF); } static void hfa384x_events_only_cmd(struct net_device *dev) { HFA384X_OUTW(HFA384X_EV_CMD, HFA384X_INTEN_OFF); } static u16 hfa384x_allocate_fid(struct net_device *dev, int len) { u16 fid; unsigned long delay; /* FIX: this could be replace with hfa384x_cmd() if the Alloc event * below would be handled like CmdCompl event (sleep here, wake up from * interrupt handler */ if (hfa384x_cmd_wait(dev, HFA384X_CMDCODE_ALLOC, len)) { printk(KERN_DEBUG "%s: cannot allocate fid, len=%d\n", dev->name, len); return 0xffff; } delay = jiffies + HFA384X_ALLOC_COMPL_TIMEOUT; while (!(HFA384X_INW(HFA384X_EVSTAT_OFF) & HFA384X_EV_ALLOC) && time_before(jiffies, delay)) yield(); if (!(HFA384X_INW(HFA384X_EVSTAT_OFF) & HFA384X_EV_ALLOC)) { printk("%s: fid allocate, len=%d - timeout\n", dev->name, len); return 0xffff; } fid = HFA384X_INW(HFA384X_ALLOCFID_OFF); HFA384X_OUTW(HFA384X_EV_ALLOC, HFA384X_EVACK_OFF); return fid; } static int prism2_reset_port(struct net_device *dev) { struct hostap_interface *iface; local_info_t *local; int res; iface = netdev_priv(dev); local = iface->local; if (!local->dev_enabled) return 0; res = hfa384x_cmd(dev, HFA384X_CMDCODE_DISABLE, 0, NULL, NULL); if (res) printk(KERN_DEBUG "%s: reset port failed to disable port\n", dev->name); else { res = hfa384x_cmd(dev, HFA384X_CMDCODE_ENABLE, 0, NULL, NULL); if (res) printk(KERN_DEBUG "%s: reset port failed to enable " "port\n", dev->name); } /* It looks like at least some STA firmware versions reset * fragmentation threshold back to 2346 after enable command. Restore * the configured value, if it differs from this default. */ if (local->fragm_threshold != 2346 && hostap_set_word(dev, HFA384X_RID_FRAGMENTATIONTHRESHOLD, local->fragm_threshold)) { printk(KERN_DEBUG "%s: failed to restore fragmentation " "threshold (%d) after Port0 enable\n", dev->name, local->fragm_threshold); } /* Some firmwares lose antenna selection settings on reset */ (void) hostap_set_antsel(local); return res; } static int prism2_get_version_info(struct net_device *dev, u16 rid, const char *txt) { struct hfa384x_comp_ident comp; struct hostap_interface *iface; local_info_t *local; iface = netdev_priv(dev); local = iface->local; if (local->no_pri) { /* PRI f/w not yet available - cannot read RIDs */ return -1; } if (hfa384x_get_rid(dev, rid, &comp, sizeof(comp), 1) < 0) { printk(KERN_DEBUG "Could not get RID for component %s\n", txt); return -1; } printk(KERN_INFO "%s: %s: id=0x%02x v%d.%d.%d\n", dev->name, txt, __le16_to_cpu(comp.id), __le16_to_cpu(comp.major), __le16_to_cpu(comp.minor), __le16_to_cpu(comp.variant)); return 0; } static int prism2_setup_rids(struct net_device *dev) { struct hostap_interface *iface; local_info_t *local; __le16 tmp; int ret = 0; iface = netdev_priv(dev); local = iface->local; hostap_set_word(dev, HFA384X_RID_TICKTIME, 2000); if (!local->fw_ap) { u16 tmp1 = hostap_get_porttype(local); ret = hostap_set_word(dev, HFA384X_RID_CNFPORTTYPE, tmp1); if (ret) { printk("%s: Port type setting to %d failed\n", dev->name, tmp1); goto fail; } } /* Setting SSID to empty string seems to kill the card in Host AP mode */ if (local->iw_mode != IW_MODE_MASTER || local->essid[0] != '\0') { ret = hostap_set_string(dev, HFA384X_RID_CNFOWNSSID, local->essid); if (ret) { printk("%s: AP own SSID setting failed\n", dev->name); goto fail; } } ret = hostap_set_word(dev, HFA384X_RID_CNFMAXDATALEN, PRISM2_DATA_MAXLEN); if (ret) { printk("%s: MAC data length setting to %d failed\n", dev->name, PRISM2_DATA_MAXLEN); goto fail; } if (hfa384x_get_rid(dev, HFA384X_RID_CHANNELLIST, &tmp, 2, 1) < 0) { printk("%s: Channel list read failed\n", dev->name); ret = -EINVAL; goto fail; } local->channel_mask = le16_to_cpu(tmp); if (local->channel < 1 || local->channel > 14 || !(local->channel_mask & (1 << (local->channel - 1)))) { printk(KERN_WARNING "%s: Channel setting out of range " "(%d)!\n", dev->name, local->channel); ret = -EBUSY; goto fail; } ret = hostap_set_word(dev, HFA384X_RID_CNFOWNCHANNEL, local->channel); if (ret) { printk("%s: Channel setting to %d failed\n", dev->name, local->channel); goto fail; } ret = hostap_set_word(dev, HFA384X_RID_CNFBEACONINT, local->beacon_int); if (ret) { printk("%s: Beacon interval setting to %d failed\n", dev->name, local->beacon_int); /* this may fail with Symbol/Lucent firmware */ if (ret == -ETIMEDOUT) goto fail; } ret = hostap_set_word(dev, HFA384X_RID_CNFOWNDTIMPERIOD, local->dtim_period); if (ret) { printk("%s: DTIM period setting to %d failed\n", dev->name, local->dtim_period); /* this may fail with Symbol/Lucent firmware */ if (ret == -ETIMEDOUT) goto fail; } ret = hostap_set_word(dev, HFA384X_RID_PROMISCUOUSMODE, local->is_promisc); if (ret) printk(KERN_INFO "%s: Setting promiscuous mode (%d) failed\n", dev->name, local->is_promisc); if (!local->fw_ap) { ret = hostap_set_string(dev, HFA384X_RID_CNFDESIREDSSID, local->essid); if (ret) { printk("%s: Desired SSID setting failed\n", dev->name); goto fail; } } /* Setup TXRateControl, defaults to allow use of 1, 2, 5.5, and * 11 Mbps in automatic TX rate fallback and 1 and 2 Mbps as basic * rates */ if (local->tx_rate_control == 0) { local->tx_rate_control = HFA384X_RATES_1MBPS | HFA384X_RATES_2MBPS | HFA384X_RATES_5MBPS | HFA384X_RATES_11MBPS; } if (local->basic_rates == 0) local->basic_rates = HFA384X_RATES_1MBPS | HFA384X_RATES_2MBPS; if (!local->fw_ap) { ret = hostap_set_word(dev, HFA384X_RID_TXRATECONTROL, local->tx_rate_control); if (ret) { printk("%s: TXRateControl setting to %d failed\n", dev->name, local->tx_rate_control); goto fail; } ret = hostap_set_word(dev, HFA384X_RID_CNFSUPPORTEDRATES, local->tx_rate_control); if (ret) { printk("%s: cnfSupportedRates setting to %d failed\n", dev->name, local->tx_rate_control); } ret = hostap_set_word(dev, HFA384X_RID_CNFBASICRATES, local->basic_rates); if (ret) { printk("%s: cnfBasicRates setting to %d failed\n", dev->name, local->basic_rates); } ret = hostap_set_word(dev, HFA384X_RID_CREATEIBSS, 1); if (ret) { printk("%s: Create IBSS setting to 1 failed\n", dev->name); } } if (local->name_set) (void) hostap_set_string(dev, HFA384X_RID_CNFOWNNAME, local->name); if (hostap_set_encryption(local)) { printk(KERN_INFO "%s: could not configure encryption\n", dev->name); } (void) hostap_set_antsel(local); if (hostap_set_roaming(local)) { printk(KERN_INFO "%s: could not set host roaming\n", dev->name); } if (local->sta_fw_ver >= PRISM2_FW_VER(1,6,3) && hostap_set_word(dev, HFA384X_RID_CNFENHSECURITY, local->enh_sec)) printk(KERN_INFO "%s: cnfEnhSecurity setting to 0x%x failed\n", dev->name, local->enh_sec); /* 32-bit tallies were added in STA f/w 0.8.0, but they were apparently * not working correctly (last seven counters report bogus values). * This has been fixed in 0.8.2, so enable 32-bit tallies only * beginning with that firmware version. Another bug fix for 32-bit * tallies in 1.4.0; should 16-bit tallies be used for some other * versions, too? */ if (local->sta_fw_ver >= PRISM2_FW_VER(0,8,2)) { if (hostap_set_word(dev, HFA384X_RID_CNFTHIRTY2TALLY, 1)) { printk(KERN_INFO "%s: cnfThirty2Tally setting " "failed\n", dev->name); local->tallies32 = 0; } else local->tallies32 = 1; } else local->tallies32 = 0; hostap_set_auth_algs(local); if (hostap_set_word(dev, HFA384X_RID_FRAGMENTATIONTHRESHOLD, local->fragm_threshold)) { printk(KERN_INFO "%s: setting FragmentationThreshold to %d " "failed\n", dev->name, local->fragm_threshold); } if (hostap_set_word(dev, HFA384X_RID_RTSTHRESHOLD, local->rts_threshold)) { printk(KERN_INFO "%s: setting RTSThreshold to %d failed\n", dev->name, local->rts_threshold); } if (local->manual_retry_count >= 0 && hostap_set_word(dev, HFA384X_RID_CNFALTRETRYCOUNT, local->manual_retry_count)) { printk(KERN_INFO "%s: setting cnfAltRetryCount to %d failed\n", dev->name, local->manual_retry_count); } if (local->sta_fw_ver >= PRISM2_FW_VER(1,3,1) && hfa384x_get_rid(dev, HFA384X_RID_CNFDBMADJUST, &tmp, 2, 1) == 2) { local->rssi_to_dBm = le16_to_cpu(tmp); } if (local->sta_fw_ver >= PRISM2_FW_VER(1,7,0) && local->wpa && hostap_set_word(dev, HFA384X_RID_SSNHANDLINGMODE, 1)) { printk(KERN_INFO "%s: setting ssnHandlingMode to 1 failed\n", dev->name); } if (local->sta_fw_ver >= PRISM2_FW_VER(1,7,0) && local->generic_elem && hfa384x_set_rid(dev, HFA384X_RID_GENERICELEMENT, local->generic_elem, local->generic_elem_len)) { printk(KERN_INFO "%s: setting genericElement failed\n", dev->name); } fail: return ret; } static int prism2_hw_init(struct net_device *dev, int initial) { struct hostap_interface *iface; local_info_t *local; int ret, first = 1; unsigned long start, delay; PDEBUG(DEBUG_FLOW, "prism2_hw_init()\n"); iface = netdev_priv(dev); local = iface->local; clear_bit(HOSTAP_BITS_TRANSMIT, &local->bits); init: /* initialize HFA 384x */ ret = hfa384x_cmd_no_wait(dev, HFA384X_CMDCODE_INIT, 0); if (ret) { printk(KERN_INFO "%s: first command failed - assuming card " "does not have primary firmware\n", dev_info); } if (first && (HFA384X_INW(HFA384X_EVSTAT_OFF) & HFA384X_EV_CMD)) { /* EvStat has Cmd bit set in some cases, so retry once if no * wait was needed */ HFA384X_OUTW(HFA384X_EV_CMD, HFA384X_EVACK_OFF); printk(KERN_DEBUG "%s: init command completed too quickly - " "retrying\n", dev->name); first = 0; goto init; } start = jiffies; delay = jiffies + HFA384X_INIT_TIMEOUT; while (!(HFA384X_INW(HFA384X_EVSTAT_OFF) & HFA384X_EV_CMD) && time_before(jiffies, delay)) yield(); if (!(HFA384X_INW(HFA384X_EVSTAT_OFF) & HFA384X_EV_CMD)) { printk(KERN_DEBUG "%s: assuming no Primary image in " "flash - card initialization not completed\n", dev_info); local->no_pri = 1; #ifdef PRISM2_DOWNLOAD_SUPPORT if (local->sram_type == -1) local->sram_type = prism2_get_ram_size(local); #endif /* PRISM2_DOWNLOAD_SUPPORT */ return 1; } local->no_pri = 0; printk(KERN_DEBUG "prism2_hw_init: initialized in %lu ms\n", (jiffies - start) * 1000 / HZ); HFA384X_OUTW(HFA384X_EV_CMD, HFA384X_EVACK_OFF); return 0; } static int prism2_hw_init2(struct net_device *dev, int initial) { struct hostap_interface *iface; local_info_t *local; int i; iface = netdev_priv(dev); local = iface->local; #ifdef PRISM2_DOWNLOAD_SUPPORT kfree(local->pda); if (local->no_pri) local->pda = NULL; else local->pda = prism2_read_pda(dev); #endif /* PRISM2_DOWNLOAD_SUPPORT */ hfa384x_disable_interrupts(dev); #ifndef final_version HFA384X_OUTW(HFA384X_MAGIC, HFA384X_SWSUPPORT0_OFF); if (HFA384X_INW(HFA384X_SWSUPPORT0_OFF) != HFA384X_MAGIC) { printk("SWSUPPORT0 write/read failed: %04X != %04X\n", HFA384X_INW(HFA384X_SWSUPPORT0_OFF), HFA384X_MAGIC); goto failed; } #endif if (initial || local->pri_only) { hfa384x_events_only_cmd(dev); /* get card version information */ if (prism2_get_version_info(dev, HFA384X_RID_NICID, "NIC") || prism2_get_version_info(dev, HFA384X_RID_PRIID, "PRI")) { hfa384x_disable_interrupts(dev); goto failed; } if (prism2_get_version_info(dev, HFA384X_RID_STAID, "STA")) { printk(KERN_DEBUG "%s: Failed to read STA f/w version " "- only Primary f/w present\n", dev->name); local->pri_only = 1; return 0; } local->pri_only = 0; hfa384x_disable_interrupts(dev); } /* FIX: could convert allocate_fid to use sleeping CmdCompl wait and * enable interrupts before this. This would also require some sort of * sleeping AllocEv waiting */ /* allocate TX FIDs */ local->txfid_len = PRISM2_TXFID_LEN; for (i = 0; i < PRISM2_TXFID_COUNT; i++) { local->txfid[i] = hfa384x_allocate_fid(dev, local->txfid_len); if (local->txfid[i] == 0xffff && local->txfid_len > 1600) { local->txfid[i] = hfa384x_allocate_fid(dev, 1600); if (local->txfid[i] != 0xffff) { printk(KERN_DEBUG "%s: Using shorter TX FID " "(1600 bytes)\n", dev->name); local->txfid_len = 1600; } } if (local->txfid[i] == 0xffff) goto failed; local->intransmitfid[i] = PRISM2_TXFID_EMPTY; } hfa384x_events_only_cmd(dev); if (initial) { u8 addr[ETH_ALEN] = {}; struct list_head *ptr; prism2_check_sta_fw_version(local); if (hfa384x_get_rid(dev, HFA384X_RID_CNFOWNMACADDR, addr, ETH_ALEN, 1) < 0) { printk("%s: could not get own MAC address\n", dev->name); } eth_hw_addr_set(dev, addr); list_for_each(ptr, &local->hostap_interfaces) { iface = list_entry(ptr, struct hostap_interface, list); eth_hw_addr_inherit(iface->dev, dev); } } else if (local->fw_ap) prism2_check_sta_fw_version(local); prism2_setup_rids(dev); /* MAC is now configured, but port 0 is not yet enabled */ return 0; failed: if (!local->no_pri) printk(KERN_WARNING "%s: Initialization failed\n", dev_info); return 1; } static int prism2_hw_enable(struct net_device *dev, int initial) { struct hostap_interface *iface; local_info_t *local; int was_resetting; iface = netdev_priv(dev); local = iface->local; was_resetting = local->hw_resetting; if (hfa384x_cmd(dev, HFA384X_CMDCODE_ENABLE, 0, NULL, NULL)) { printk("%s: MAC port 0 enabling failed\n", dev->name); return 1; } local->hw_ready = 1; local->hw_reset_tries = 0; local->hw_resetting = 0; hfa384x_enable_interrupts(dev); /* at least D-Link DWL-650 seems to require additional port reset * before it starts acting as an AP, so reset port automatically * here just in case */ if (initial && prism2_reset_port(dev)) { printk("%s: MAC port 0 resetting failed\n", dev->name); return 1; } if (was_resetting && netif_queue_stopped(dev)) { /* If hw_reset() was called during pending transmit, netif * queue was stopped. Wake it up now since the wlan card has * been resetted. */ netif_wake_queue(dev); } return 0; } static int prism2_hw_config(struct net_device *dev, int initial) { struct hostap_interface *iface; local_info_t *local; iface = netdev_priv(dev); local = iface->local; if (local->hw_downloading) return 1; if (prism2_hw_init(dev, initial)) { return local->no_pri ? 0 : 1; } if (prism2_hw_init2(dev, initial)) return 1; /* Enable firmware if secondary image is loaded and at least one of the * netdevices is up. */ if (!local->pri_only && (initial == 0 || (initial == 2 && local->num_dev_open > 0))) { if (!local->dev_enabled) prism2_callback(local, PRISM2_CALLBACK_ENABLE); local->dev_enabled = 1; return prism2_hw_enable(dev, initial); } return 0; } static void prism2_hw_shutdown(struct net_device *dev, int no_disable) { struct hostap_interface *iface; local_info_t *local; iface = netdev_priv(dev); local = iface->local; /* Allow only command completion events during disable */ hfa384x_events_only_cmd(dev); local->hw_ready = 0; if (local->dev_enabled) prism2_callback(local, PRISM2_CALLBACK_DISABLE); local->dev_enabled = 0; if (local->func->card_present && !local->func->card_present(local)) { printk(KERN_DEBUG "%s: card already removed or not configured " "during shutdown\n", dev->name); return; } if ((no_disable & HOSTAP_HW_NO_DISABLE) == 0 && hfa384x_cmd(dev, HFA384X_CMDCODE_DISABLE, 0, NULL, NULL)) printk(KERN_WARNING "%s: Shutdown failed\n", dev_info); hfa384x_disable_interrupts(dev); if (no_disable & HOSTAP_HW_ENABLE_CMDCOMPL) hfa384x_events_only_cmd(dev); else prism2_clear_cmd_queue(local); } static void prism2_hw_reset(struct net_device *dev) { struct hostap_interface *iface; local_info_t *local; #if 0 static long last_reset = 0; /* do not reset card more than once per second to avoid ending up in a * busy loop resetting the card */ if (time_before_eq(jiffies, last_reset + HZ)) return; last_reset = jiffies; #endif iface = netdev_priv(dev); local = iface->local; if (local->hw_downloading) return; if (local->hw_resetting) { printk(KERN_WARNING "%s: %s: already resetting card - " "ignoring reset request\n", dev_info, dev->name); return; } local->hw_reset_tries++; if (local->hw_reset_tries > 10) { printk(KERN_WARNING "%s: too many reset tries, skipping\n", dev->name); return; } printk(KERN_WARNING "%s: %s: resetting card\n", dev_info, dev->name); hfa384x_disable_interrupts(dev); local->hw_resetting = 1; if (local->func->cor_sreset) { /* Host system seems to hang in some cases with high traffic * load or shared interrupts during COR sreset. Disable shared * interrupts during reset to avoid these crashes. COS sreset * takes quite a long time, so it is unfortunate that this * seems to be needed. Anyway, I do not know of any better way * of avoiding the crash. */ disable_irq(dev->irq); local->func->cor_sreset(local); enable_irq(dev->irq); } prism2_hw_shutdown(dev, 1); prism2_hw_config(dev, 0); local->hw_resetting = 0; #ifdef PRISM2_DOWNLOAD_SUPPORT if (local->dl_pri) { printk(KERN_DEBUG "%s: persistent download of primary " "firmware\n", dev->name); if (prism2_download_genesis(local, local->dl_pri) < 0) printk(KERN_WARNING "%s: download (PRI) failed\n", dev->name); } if (local->dl_sec) { printk(KERN_DEBUG "%s: persistent download of secondary " "firmware\n", dev->name); if (prism2_download_volatile(local, local->dl_sec) < 0) printk(KERN_WARNING "%s: download (SEC) failed\n", dev->name); } #endif /* PRISM2_DOWNLOAD_SUPPORT */ /* TODO: restore beacon TIM bits for STAs that have buffered frames */ } static void prism2_schedule_reset(local_info_t *local) { schedule_work(&local->reset_queue); } /* Called only as scheduled task after noticing card timeout in interrupt * context */ static void handle_reset_queue(struct work_struct *work) { local_info_t *local = container_of(work, local_info_t, reset_queue); printk(KERN_DEBUG "%s: scheduled card reset\n", local->dev->name); prism2_hw_reset(local->dev); if (netif_queue_stopped(local->dev)) { int i; for (i = 0; i < PRISM2_TXFID_COUNT; i++) if (local->intransmitfid[i] == PRISM2_TXFID_EMPTY) { PDEBUG(DEBUG_EXTRA, "prism2_tx_timeout: " "wake up queue\n"); netif_wake_queue(local->dev); break; } } } static int prism2_get_txfid_idx(local_info_t *local) { int idx, end; unsigned long flags; spin_lock_irqsave(&local->txfidlock, flags); end = idx = local->next_txfid; do { if (local->intransmitfid[idx] == PRISM2_TXFID_EMPTY) { local->intransmitfid[idx] = PRISM2_TXFID_RESERVED; spin_unlock_irqrestore(&local->txfidlock, flags); return idx; } idx++; if (idx >= PRISM2_TXFID_COUNT) idx = 0; } while (idx != end); spin_unlock_irqrestore(&local->txfidlock, flags); PDEBUG(DEBUG_EXTRA2, "prism2_get_txfid_idx: no room in txfid buf: " "packet dropped\n"); local->dev->stats.tx_dropped++; return -1; } /* Called only from hardware IRQ */ static void prism2_transmit_cb(struct net_device *dev, long context, u16 resp0, u16 res) { struct hostap_interface *iface; local_info_t *local; int idx = (int) context; iface = netdev_priv(dev); local = iface->local; if (res) { printk(KERN_DEBUG "%s: prism2_transmit_cb - res=0x%02x\n", dev->name, res); return; } if (idx < 0 || idx >= PRISM2_TXFID_COUNT) { printk(KERN_DEBUG "%s: prism2_transmit_cb called with invalid " "idx=%d\n", dev->name, idx); return; } if (!test_and_clear_bit(HOSTAP_BITS_TRANSMIT, &local->bits)) { printk(KERN_DEBUG "%s: driver bug: prism2_transmit_cb called " "with no pending transmit\n", dev->name); } if (netif_queue_stopped(dev)) { /* ready for next TX, so wake up queue that was stopped in * prism2_transmit() */ netif_wake_queue(dev); } spin_lock(&local->txfidlock); /* With reclaim, Resp0 contains new txfid for transmit; the old txfid * will be automatically allocated for the next TX frame */ local->intransmitfid[idx] = resp0; PDEBUG(DEBUG_FID, "%s: prism2_transmit_cb: txfid[%d]=0x%04x, " "resp0=0x%04x, transmit_txfid=0x%04x\n", dev->name, idx, local->txfid[idx], resp0, local->intransmitfid[local->next_txfid]); idx++; if (idx >= PRISM2_TXFID_COUNT) idx = 0; local->next_txfid = idx; /* check if all TX buffers are occupied */ do { if (local->intransmitfid[idx] == PRISM2_TXFID_EMPTY) { spin_unlock(&local->txfidlock); return; } idx++; if (idx >= PRISM2_TXFID_COUNT) idx = 0; } while (idx != local->next_txfid); spin_unlock(&local->txfidlock); /* no empty TX buffers, stop queue */ netif_stop_queue(dev); } /* Called only from software IRQ if PCI bus master is not used (with bus master * this can be called both from software and hardware IRQ) */ static int prism2_transmit(struct net_device *dev, int idx) { struct hostap_interface *iface; local_info_t *local; int res; iface = netdev_priv(dev); local = iface->local; /* The driver tries to stop netif queue so that there would not be * more than one attempt to transmit frames going on; check that this * is really the case */ if (test_and_set_bit(HOSTAP_BITS_TRANSMIT, &local->bits)) { printk(KERN_DEBUG "%s: driver bug - prism2_transmit() called " "when previous TX was pending\n", dev->name); return -1; } /* stop the queue for the time that transmit is pending */ netif_stop_queue(dev); /* transmit packet */ res = hfa384x_cmd_callback( dev, HFA384X_CMDCODE_TRANSMIT | HFA384X_CMD_TX_RECLAIM, local->txfid[idx], prism2_transmit_cb, (long) idx); if (res) { printk(KERN_DEBUG "%s: prism2_transmit: CMDCODE_TRANSMIT " "failed (res=%d)\n", dev->name, res); dev->stats.tx_dropped++; netif_wake_queue(dev); return -1; } netif_trans_update(dev); /* Since we did not wait for command completion, the card continues * to process on the background and we will finish handling when * command completion event is handled (prism2_cmd_ev() function) */ return 0; } /* Send IEEE 802.11 frame (convert the header into Prism2 TX descriptor and * send the payload with this descriptor) */ /* Called only from software IRQ */ static int prism2_tx_80211(struct sk_buff *skb, struct net_device *dev) { struct hostap_interface *iface; local_info_t *local; struct hfa384x_tx_frame txdesc; struct hostap_skb_tx_data *meta; int hdr_len, data_len, idx, res, ret = -1; u16 tx_control; iface = netdev_priv(dev); local = iface->local; meta = (struct hostap_skb_tx_data *) skb->cb; prism2_callback(local, PRISM2_CALLBACK_TX_START); if ((local->func->card_present && !local->func->card_present(local)) || !local->hw_ready || local->hw_downloading || local->pri_only) { if (net_ratelimit()) { printk(KERN_DEBUG "%s: prism2_tx_80211: hw not ready -" " skipping\n", dev->name); } goto fail; } memset(&txdesc, 0, sizeof(txdesc)); /* skb->data starts with txdesc->frame_control */ hdr_len = sizeof(txdesc.header); BUILD_BUG_ON(hdr_len != 24); skb_copy_from_linear_data(skb, &txdesc.header, hdr_len); if (ieee80211_is_data(txdesc.frame_control) && ieee80211_has_a4(txdesc.frame_control) && skb->len >= 30) { /* Addr4 */ skb_copy_from_linear_data_offset(skb, hdr_len, txdesc.addr4, ETH_ALEN); hdr_len += ETH_ALEN; } tx_control = local->tx_control; if (meta->tx_cb_idx) { tx_control |= HFA384X_TX_CTRL_TX_OK; txdesc.sw_support = cpu_to_le32(meta->tx_cb_idx); } txdesc.tx_control = cpu_to_le16(tx_control); txdesc.tx_rate = meta->rate; data_len = skb->len - hdr_len; txdesc.data_len = cpu_to_le16(data_len); txdesc.len = cpu_to_be16(data_len); idx = prism2_get_txfid_idx(local); if (idx < 0) goto fail; if (local->frame_dump & PRISM2_DUMP_TX_HDR) hostap_dump_tx_header(dev->name, &txdesc); spin_lock(&local->baplock); res = hfa384x_setup_bap(dev, BAP0, local->txfid[idx], 0); if (!res) res = hfa384x_to_bap(dev, BAP0, &txdesc, sizeof(txdesc)); if (!res) res = hfa384x_to_bap(dev, BAP0, skb->data + hdr_len, skb->len - hdr_len); spin_unlock(&local->baplock); if (!res) res = prism2_transmit(dev, idx); if (res) { printk(KERN_DEBUG "%s: prism2_tx_80211 - to BAP0 failed\n", dev->name); local->intransmitfid[idx] = PRISM2_TXFID_EMPTY; schedule_work(&local->reset_queue); goto fail; } ret = 0; fail: prism2_callback(local, PRISM2_CALLBACK_TX_END); return ret; } /* Some SMP systems have reported number of odd errors with hostap_pci. fid * register has changed values between consecutive reads for an unknown reason. * This should really not happen, so more debugging is needed. This test * version is a bit slower, but it will detect most of such register changes * and will try to get the correct fid eventually. */ #define EXTRA_FID_READ_TESTS static u16 prism2_read_fid_reg(struct net_device *dev, u16 reg) { #ifdef EXTRA_FID_READ_TESTS u16 val, val2, val3; int i; for (i = 0; i < 10; i++) { val = HFA384X_INW(reg); val2 = HFA384X_INW(reg); val3 = HFA384X_INW(reg); if (val == val2 && val == val3) return val; printk(KERN_DEBUG "%s: detected fid change (try=%d, reg=%04x):" " %04x %04x %04x\n", dev->name, i, reg, val, val2, val3); if ((val == val2 || val == val3) && val != 0) return val; if (val2 == val3 && val2 != 0) return val2; } printk(KERN_WARNING "%s: Uhhuh.. could not read good fid from reg " "%04x (%04x %04x %04x)\n", dev->name, reg, val, val2, val3); return val; #else /* EXTRA_FID_READ_TESTS */ return HFA384X_INW(reg); #endif /* EXTRA_FID_READ_TESTS */ } /* Called only as a tasklet (software IRQ) */ static void prism2_rx(local_info_t *local) { struct net_device *dev = local->dev; int res, rx_pending = 0; u16 len, hdr_len, rxfid, status, macport; struct hfa384x_rx_frame rxdesc; struct sk_buff *skb = NULL; prism2_callback(local, PRISM2_CALLBACK_RX_START); rxfid = prism2_read_fid_reg(dev, HFA384X_RXFID_OFF); #ifndef final_version if (rxfid == 0) { rxfid = HFA384X_INW(HFA384X_RXFID_OFF); printk(KERN_DEBUG "prism2_rx: rxfid=0 (next 0x%04x)\n", rxfid); if (rxfid == 0) { schedule_work(&local->reset_queue); goto rx_dropped; } /* try to continue with the new rxfid value */ } #endif spin_lock(&local->baplock); res = hfa384x_setup_bap(dev, BAP0, rxfid, 0); if (!res) res = hfa384x_from_bap(dev, BAP0, &rxdesc, sizeof(rxdesc)); if (res) { spin_unlock(&local->baplock); printk(KERN_DEBUG "%s: copy from BAP0 failed %d\n", dev->name, res); if (res == -ETIMEDOUT) { schedule_work(&local->reset_queue); } goto rx_dropped; } len = le16_to_cpu(rxdesc.data_len); hdr_len = sizeof(rxdesc); status = le16_to_cpu(rxdesc.status); macport = (status >> 8) & 0x07; /* Drop frames with too large reported payload length. Monitor mode * seems to sometimes pass frames (e.g., ctrl::ack) with signed and * negative value, so allow also values 65522 .. 65534 (-14 .. -2) for * macport 7 */ if (len > PRISM2_DATA_MAXLEN + 8 /* WEP */) { if (macport == 7 && local->iw_mode == IW_MODE_MONITOR) { if (len >= (u16) -14) { hdr_len -= 65535 - len; hdr_len--; } len = 0; } else { spin_unlock(&local->baplock); printk(KERN_DEBUG "%s: Received frame with invalid " "length 0x%04x\n", dev->name, len); hostap_dump_rx_header(dev->name, &rxdesc); goto rx_dropped; } } skb = dev_alloc_skb(len + hdr_len); if (!skb) { spin_unlock(&local->baplock); printk(KERN_DEBUG "%s: RX failed to allocate skb\n", dev->name); goto rx_dropped; } skb->dev = dev; skb_put_data(skb, &rxdesc, hdr_len); if (len > 0) res = hfa384x_from_bap(dev, BAP0, skb_put(skb, len), len); spin_unlock(&local->baplock); if (res) { printk(KERN_DEBUG "%s: RX failed to read " "frame data\n", dev->name); goto rx_dropped; } skb_queue_tail(&local->rx_list, skb); tasklet_schedule(&local->rx_tasklet); rx_exit: prism2_callback(local, PRISM2_CALLBACK_RX_END); if (!rx_pending) { HFA384X_OUTW(HFA384X_EV_RX, HFA384X_EVACK_OFF); } return; rx_dropped: dev->stats.rx_dropped++; if (skb) dev_kfree_skb(skb); goto rx_exit; } /* Called only as a tasklet (software IRQ) */ static void hostap_rx_skb(local_info_t *local, struct sk_buff *skb) { struct hfa384x_rx_frame *rxdesc; struct net_device *dev = skb->dev; struct hostap_80211_rx_status stats; int hdrlen, rx_hdrlen; rx_hdrlen = sizeof(*rxdesc); if (skb->len < sizeof(*rxdesc)) { /* Allow monitor mode to receive shorter frames */ if (local->iw_mode == IW_MODE_MONITOR && skb->len >= sizeof(*rxdesc) - 30) { rx_hdrlen = skb->len; } else { dev_kfree_skb(skb); return; } } rxdesc = (struct hfa384x_rx_frame *) skb->data; if (local->frame_dump & PRISM2_DUMP_RX_HDR && skb->len >= sizeof(*rxdesc)) hostap_dump_rx_header(dev->name, rxdesc); if (le16_to_cpu(rxdesc->status) & HFA384X_RX_STATUS_FCSERR && (!local->monitor_allow_fcserr || local->iw_mode != IW_MODE_MONITOR)) goto drop; if (skb->len > PRISM2_DATA_MAXLEN) { printk(KERN_DEBUG "%s: RX: len(%d) > MAX(%d)\n", dev->name, skb->len, PRISM2_DATA_MAXLEN); goto drop; } stats.mac_time = le32_to_cpu(rxdesc->time); stats.signal = rxdesc->signal - local->rssi_to_dBm; stats.noise = rxdesc->silence - local->rssi_to_dBm; stats.rate = rxdesc->rate; /* Convert Prism2 RX structure into IEEE 802.11 header */ hdrlen = hostap_80211_get_hdrlen(rxdesc->frame_control); if (hdrlen > rx_hdrlen) hdrlen = rx_hdrlen; memmove(skb_pull(skb, rx_hdrlen - hdrlen), &rxdesc->frame_control, hdrlen); hostap_80211_rx(dev, skb, &stats); return; drop: dev_kfree_skb(skb); } /* Called only as a tasklet (software IRQ) */ static void hostap_rx_tasklet(struct tasklet_struct *t) { local_info_t *local = from_tasklet(local, t, rx_tasklet); struct sk_buff *skb; while ((skb = skb_dequeue(&local->rx_list)) != NULL) hostap_rx_skb(local, skb); } /* Called only from hardware IRQ */ static void prism2_alloc_ev(struct net_device *dev) { struct hostap_interface *iface; local_info_t *local; int idx; u16 fid; iface = netdev_priv(dev); local = iface->local; fid = prism2_read_fid_reg(dev, HFA384X_ALLOCFID_OFF); PDEBUG(DEBUG_FID, "FID: interrupt: ALLOC - fid=0x%04x\n", fid); spin_lock(&local->txfidlock); idx = local->next_alloc; do { if (local->txfid[idx] == fid) { PDEBUG(DEBUG_FID, "FID: found matching txfid[%d]\n", idx); #ifndef final_version if (local->intransmitfid[idx] == PRISM2_TXFID_EMPTY) printk("Already released txfid found at idx " "%d\n", idx); if (local->intransmitfid[idx] == PRISM2_TXFID_RESERVED) printk("Already reserved txfid found at idx " "%d\n", idx); #endif local->intransmitfid[idx] = PRISM2_TXFID_EMPTY; idx++; local->next_alloc = idx >= PRISM2_TXFID_COUNT ? 0 : idx; if (!test_bit(HOSTAP_BITS_TRANSMIT, &local->bits) && netif_queue_stopped(dev)) netif_wake_queue(dev); spin_unlock(&local->txfidlock); return; } idx++; if (idx >= PRISM2_TXFID_COUNT) idx = 0; } while (idx != local->next_alloc); printk(KERN_WARNING "%s: could not find matching txfid (0x%04x, new " "read 0x%04x) for alloc event\n", dev->name, fid, HFA384X_INW(HFA384X_ALLOCFID_OFF)); printk(KERN_DEBUG "TXFIDs:"); for (idx = 0; idx < PRISM2_TXFID_COUNT; idx++) printk(" %04x[%04x]", local->txfid[idx], local->intransmitfid[idx]); printk("\n"); spin_unlock(&local->txfidlock); /* FIX: should probably schedule reset; reference to one txfid was lost * completely.. Bad things will happen if we run out of txfids * Actually, this will cause netdev watchdog to notice TX timeout and * then card reset after all txfids have been leaked. */ } /* Called only as a tasklet (software IRQ) */ static void hostap_tx_callback(local_info_t *local, struct hfa384x_tx_frame *txdesc, int ok, char *payload) { u16 sw_support, hdrlen, len; struct sk_buff *skb; struct hostap_tx_callback_info *cb; /* Make sure that frame was from us. */ if (!ether_addr_equal(txdesc->addr2, local->dev->dev_addr)) { printk(KERN_DEBUG "%s: TX callback - foreign frame\n", local->dev->name); return; } sw_support = le32_to_cpu(txdesc->sw_support); spin_lock(&local->lock); cb = local->tx_callback; while (cb != NULL && cb->idx != sw_support) cb = cb->next; spin_unlock(&local->lock); if (cb == NULL) { printk(KERN_DEBUG "%s: could not find TX callback (idx %d)\n", local->dev->name, sw_support); return; } hdrlen = hostap_80211_get_hdrlen(txdesc->frame_control); len = le16_to_cpu(txdesc->data_len); skb = dev_alloc_skb(hdrlen + len); if (skb == NULL) { printk(KERN_DEBUG "%s: hostap_tx_callback failed to allocate " "skb\n", local->dev->name); return; } skb_put_data(skb, (void *)&txdesc->frame_control, hdrlen); if (payload) skb_put_data(skb, payload, len); skb->dev = local->dev; skb_reset_mac_header(skb); cb->func(skb, ok, cb->data); } /* Called only as a tasklet (software IRQ) */ static int hostap_tx_compl_read(local_info_t *local, int error, struct hfa384x_tx_frame *txdesc, char **payload) { u16 fid, len; int res, ret = 0; struct net_device *dev = local->dev; fid = prism2_read_fid_reg(dev, HFA384X_TXCOMPLFID_OFF); PDEBUG(DEBUG_FID, "interrupt: TX (err=%d) - fid=0x%04x\n", fid, error); spin_lock(&local->baplock); res = hfa384x_setup_bap(dev, BAP0, fid, 0); if (!res) res = hfa384x_from_bap(dev, BAP0, txdesc, sizeof(*txdesc)); if (res) { PDEBUG(DEBUG_EXTRA, "%s: TX (err=%d) - fid=0x%04x - could not " "read txdesc\n", dev->name, error, fid); if (res == -ETIMEDOUT) { schedule_work(&local->reset_queue); } ret = -1; goto fail; } if (txdesc->sw_support) { len = le16_to_cpu(txdesc->data_len); if (len < PRISM2_DATA_MAXLEN) { *payload = kmalloc(len, GFP_ATOMIC); if (*payload == NULL || hfa384x_from_bap(dev, BAP0, *payload, len)) { PDEBUG(DEBUG_EXTRA, "%s: could not read TX " "frame payload\n", dev->name); kfree(*payload); *payload = NULL; ret = -1; goto fail; } } } fail: spin_unlock(&local->baplock); return ret; } /* Called only as a tasklet (software IRQ) */ static void prism2_tx_ev(local_info_t *local) { struct net_device *dev = local->dev; char *payload = NULL; struct hfa384x_tx_frame txdesc; if (hostap_tx_compl_read(local, 0, &txdesc, &payload)) goto fail; if (local->frame_dump & PRISM2_DUMP_TX_HDR) { PDEBUG(DEBUG_EXTRA, "%s: TX - status=0x%04x " "retry_count=%d tx_rate=%d seq_ctrl=%d " "duration_id=%d\n", dev->name, le16_to_cpu(txdesc.status), txdesc.retry_count, txdesc.tx_rate, le16_to_cpu(txdesc.seq_ctrl), le16_to_cpu(txdesc.duration_id)); } if (txdesc.sw_support) hostap_tx_callback(local, &txdesc, 1, payload); kfree(payload); fail: HFA384X_OUTW(HFA384X_EV_TX, HFA384X_EVACK_OFF); } /* Called only as a tasklet (software IRQ) */ static void hostap_sta_tx_exc_tasklet(struct tasklet_struct *t) { local_info_t *local = from_tasklet(local, t, sta_tx_exc_tasklet); struct sk_buff *skb; while ((skb = skb_dequeue(&local->sta_tx_exc_list)) != NULL) { struct hfa384x_tx_frame *txdesc = (struct hfa384x_tx_frame *) skb->data; if (skb->len >= sizeof(*txdesc)) { /* Convert Prism2 RX structure into IEEE 802.11 header */ int hdrlen = hostap_80211_get_hdrlen(txdesc->frame_control); memmove(skb_pull(skb, sizeof(*txdesc) - hdrlen), &txdesc->frame_control, hdrlen); hostap_handle_sta_tx_exc(local, skb); } dev_kfree_skb(skb); } } /* Called only as a tasklet (software IRQ) */ static void prism2_txexc(local_info_t *local) { struct net_device *dev = local->dev; u16 status, fc; int show_dump, res; char *payload = NULL; struct hfa384x_tx_frame txdesc; show_dump = local->frame_dump & PRISM2_DUMP_TXEXC_HDR; dev->stats.tx_errors++; res = hostap_tx_compl_read(local, 1, &txdesc, &payload); HFA384X_OUTW(HFA384X_EV_TXEXC, HFA384X_EVACK_OFF); if (res) return; status = le16_to_cpu(txdesc.status); /* We produce a TXDROP event only for retry or lifetime * exceeded, because that's the only status that really mean * that this particular node went away. * Other errors means that *we* screwed up. - Jean II */ if (status & (HFA384X_TX_STATUS_RETRYERR | HFA384X_TX_STATUS_AGEDERR)) { union iwreq_data wrqu; /* Copy 802.11 dest address. */ memcpy(wrqu.addr.sa_data, txdesc.addr1, ETH_ALEN); wrqu.addr.sa_family = ARPHRD_ETHER; wireless_send_event(dev, IWEVTXDROP, &wrqu, NULL); } else show_dump = 1; if (local->iw_mode == IW_MODE_MASTER || local->iw_mode == IW_MODE_REPEAT || local->wds_type & HOSTAP_WDS_AP_CLIENT) { struct sk_buff *skb; skb = dev_alloc_skb(sizeof(txdesc)); if (skb) { skb_put_data(skb, &txdesc, sizeof(txdesc)); skb_queue_tail(&local->sta_tx_exc_list, skb); tasklet_schedule(&local->sta_tx_exc_tasklet); } } if (txdesc.sw_support) hostap_tx_callback(local, &txdesc, 0, payload); kfree(payload); if (!show_dump) return; PDEBUG(DEBUG_EXTRA, "%s: TXEXC - status=0x%04x (%s%s%s%s)" " tx_control=%04x\n", dev->name, status, status & HFA384X_TX_STATUS_RETRYERR ? "[RetryErr]" : "", status & HFA384X_TX_STATUS_AGEDERR ? "[AgedErr]" : "", status & HFA384X_TX_STATUS_DISCON ? "[Discon]" : "", status & HFA384X_TX_STATUS_FORMERR ? "[FormErr]" : "", le16_to_cpu(txdesc.tx_control)); fc = le16_to_cpu(txdesc.frame_control); PDEBUG(DEBUG_EXTRA, " retry_count=%d tx_rate=%d fc=0x%04x " "(%s%s%s::%d%s%s)\n", txdesc.retry_count, txdesc.tx_rate, fc, ieee80211_is_mgmt(txdesc.frame_control) ? "Mgmt" : "", ieee80211_is_ctl(txdesc.frame_control) ? "Ctrl" : "", ieee80211_is_data(txdesc.frame_control) ? "Data" : "", (fc & IEEE80211_FCTL_STYPE) >> 4, ieee80211_has_tods(txdesc.frame_control) ? " ToDS" : "", ieee80211_has_fromds(txdesc.frame_control) ? " FromDS" : ""); PDEBUG(DEBUG_EXTRA, " A1=%pM A2=%pM A3=%pM A4=%pM\n", txdesc.addr1, txdesc.addr2, txdesc.addr3, txdesc.addr4); } /* Called only as a tasklet (software IRQ) */ static void hostap_info_tasklet(struct tasklet_struct *t) { local_info_t *local = from_tasklet(local, t, info_tasklet); struct sk_buff *skb; while ((skb = skb_dequeue(&local->info_list)) != NULL) { hostap_info_process(local, skb); dev_kfree_skb(skb); } } /* Called only as a tasklet (software IRQ) */ static void prism2_info(local_info_t *local) { struct net_device *dev = local->dev; u16 fid; int res, left; struct hfa384x_info_frame info; struct sk_buff *skb; fid = HFA384X_INW(HFA384X_INFOFID_OFF); spin_lock(&local->baplock); res = hfa384x_setup_bap(dev, BAP0, fid, 0); if (!res) res = hfa384x_from_bap(dev, BAP0, &info, sizeof(info)); if (res) { spin_unlock(&local->baplock); printk(KERN_DEBUG "Could not get info frame (fid=0x%04x)\n", fid); if (res == -ETIMEDOUT) { schedule_work(&local->reset_queue); } goto out; } left = (le16_to_cpu(info.len) - 1) * 2; if (info.len & cpu_to_le16(0x8000) || info.len == 0 || left > 2060) { /* data register seems to give 0x8000 in some error cases even * though busy bit is not set in offset register; * in addition, length must be at least 1 due to type field */ spin_unlock(&local->baplock); printk(KERN_DEBUG "%s: Received info frame with invalid " "length 0x%04x (type 0x%04x)\n", dev->name, le16_to_cpu(info.len), le16_to_cpu(info.type)); goto out; } skb = dev_alloc_skb(sizeof(info) + left); if (skb == NULL) { spin_unlock(&local->baplock); printk(KERN_DEBUG "%s: Could not allocate skb for info " "frame\n", dev->name); goto out; } skb_put_data(skb, &info, sizeof(info)); if (left > 0 && hfa384x_from_bap(dev, BAP0, skb_put(skb, left), left)) { spin_unlock(&local->baplock); printk(KERN_WARNING "%s: Info frame read failed (fid=0x%04x, " "len=0x%04x, type=0x%04x\n", dev->name, fid, le16_to_cpu(info.len), le16_to_cpu(info.type)); dev_kfree_skb(skb); goto out; } spin_unlock(&local->baplock); skb_queue_tail(&local->info_list, skb); tasklet_schedule(&local->info_tasklet); out: HFA384X_OUTW(HFA384X_EV_INFO, HFA384X_EVACK_OFF); } /* Called only as a tasklet (software IRQ) */ static void hostap_bap_tasklet(struct tasklet_struct *t) { local_info_t *local = from_tasklet(local, t, bap_tasklet); struct net_device *dev = local->dev; u16 ev; int frames = 30; if (local->func->card_present && !local->func->card_present(local)) return; set_bit(HOSTAP_BITS_BAP_TASKLET, &local->bits); /* Process all pending BAP events without generating new interrupts * for them */ while (frames-- > 0) { ev = HFA384X_INW(HFA384X_EVSTAT_OFF); if (ev == 0xffff || !(ev & HFA384X_BAP0_EVENTS)) break; if (ev & HFA384X_EV_RX) prism2_rx(local); if (ev & HFA384X_EV_INFO) prism2_info(local); if (ev & HFA384X_EV_TX) prism2_tx_ev(local); if (ev & HFA384X_EV_TXEXC) prism2_txexc(local); } set_bit(HOSTAP_BITS_BAP_TASKLET2, &local->bits); clear_bit(HOSTAP_BITS_BAP_TASKLET, &local->bits); /* Enable interrupts for new BAP events */ hfa384x_events_all(dev); clear_bit(HOSTAP_BITS_BAP_TASKLET2, &local->bits); } /* Called only from hardware IRQ */ static void prism2_infdrop(struct net_device *dev) { static unsigned long last_inquire = 0; PDEBUG(DEBUG_EXTRA, "%s: INFDROP event\n", dev->name); /* some firmware versions seem to get stuck with * full CommTallies in high traffic load cases; every * packet will then cause INFDROP event and CommTallies * info frame will not be sent automatically. Try to * get out of this state by inquiring CommTallies. */ if (!last_inquire || time_after(jiffies, last_inquire + HZ)) { hfa384x_cmd_callback(dev, HFA384X_CMDCODE_INQUIRE, HFA384X_INFO_COMMTALLIES, NULL, 0); last_inquire = jiffies; } } /* Called only from hardware IRQ */ static void prism2_ev_tick(struct net_device *dev) { struct hostap_interface *iface; local_info_t *local; u16 evstat, inten; static int prev_stuck = 0; iface = netdev_priv(dev); local = iface->local; if (time_after(jiffies, local->last_tick_timer + 5 * HZ) && local->last_tick_timer) { evstat = HFA384X_INW(HFA384X_EVSTAT_OFF); inten = HFA384X_INW(HFA384X_INTEN_OFF); if (!prev_stuck) { printk(KERN_INFO "%s: SW TICK stuck? " "bits=0x%lx EvStat=%04x IntEn=%04x\n", dev->name, local->bits, evstat, inten); } local->sw_tick_stuck++; if ((evstat & HFA384X_BAP0_EVENTS) && (inten & HFA384X_BAP0_EVENTS)) { printk(KERN_INFO "%s: trying to recover from IRQ " "hang\n", dev->name); hfa384x_events_no_bap0(dev); } prev_stuck = 1; } else prev_stuck = 0; } /* Called only from hardware IRQ */ static void prism2_check_magic(local_info_t *local) { /* at least PCI Prism2.5 with bus mastering seems to sometimes * return 0x0000 in SWSUPPORT0 for unknown reason, but re-reading the * register once or twice seems to get the correct value.. PCI cards * cannot anyway be removed during normal operation, so there is not * really any need for this verification with them. */ #ifndef PRISM2_PCI #ifndef final_version static unsigned long last_magic_err = 0; struct net_device *dev = local->dev; if (HFA384X_INW(HFA384X_SWSUPPORT0_OFF) != HFA384X_MAGIC) { if (!local->hw_ready) return; HFA384X_OUTW(0xffff, HFA384X_EVACK_OFF); if (time_after(jiffies, last_magic_err + 10 * HZ)) { printk("%s: Interrupt, but SWSUPPORT0 does not match: " "%04X != %04X - card removed?\n", dev->name, HFA384X_INW(HFA384X_SWSUPPORT0_OFF), HFA384X_MAGIC); last_magic_err = jiffies; } else if (net_ratelimit()) { printk(KERN_DEBUG "%s: interrupt - SWSUPPORT0=%04x " "MAGIC=%04x\n", dev->name, HFA384X_INW(HFA384X_SWSUPPORT0_OFF), HFA384X_MAGIC); } if (HFA384X_INW(HFA384X_SWSUPPORT0_OFF) != 0xffff) schedule_work(&local->reset_queue); return; } #endif /* final_version */ #endif /* !PRISM2_PCI */ } /* Called only from hardware IRQ */ static irqreturn_t prism2_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct hostap_interface *iface; local_info_t *local; int events = 0; u16 ev; iface = netdev_priv(dev); local = iface->local; /* Detect early interrupt before driver is fully configured */ spin_lock(&local->irq_init_lock); if (!dev->base_addr) { if (net_ratelimit()) { printk(KERN_DEBUG "%s: Interrupt, but dev not configured\n", dev->name); } spin_unlock(&local->irq_init_lock); return IRQ_HANDLED; } spin_unlock(&local->irq_init_lock); prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_INTERRUPT, 0, 0); if (local->func->card_present && !local->func->card_present(local)) { if (net_ratelimit()) { printk(KERN_DEBUG "%s: Interrupt, but dev not OK\n", dev->name); } return IRQ_HANDLED; } prism2_check_magic(local); for (;;) { ev = HFA384X_INW(HFA384X_EVSTAT_OFF); if (ev == 0xffff) { if (local->shutdown) return IRQ_HANDLED; HFA384X_OUTW(0xffff, HFA384X_EVACK_OFF); printk(KERN_DEBUG "%s: prism2_interrupt: ev=0xffff\n", dev->name); return IRQ_HANDLED; } ev &= HFA384X_INW(HFA384X_INTEN_OFF); if (ev == 0) break; if (ev & HFA384X_EV_CMD) { prism2_cmd_ev(dev); } /* Above events are needed even before hw is ready, but other * events should be skipped during initialization. This may * change for AllocEv if allocate_fid is implemented without * busy waiting. */ if (!local->hw_ready || local->hw_resetting || !local->dev_enabled) { ev = HFA384X_INW(HFA384X_EVSTAT_OFF); if (ev & HFA384X_EV_CMD) goto next_event; if ((ev & HFA384X_EVENT_MASK) == 0) return IRQ_HANDLED; if (local->dev_enabled && (ev & ~HFA384X_EV_TICK) && net_ratelimit()) { printk(KERN_DEBUG "%s: prism2_interrupt: hw " "not ready; skipping events 0x%04x " "(IntEn=0x%04x)%s%s%s\n", dev->name, ev, HFA384X_INW(HFA384X_INTEN_OFF), !local->hw_ready ? " (!hw_ready)" : "", local->hw_resetting ? " (hw_resetting)" : "", !local->dev_enabled ? " (!dev_enabled)" : ""); } HFA384X_OUTW(ev, HFA384X_EVACK_OFF); return IRQ_HANDLED; } if (ev & HFA384X_EV_TICK) { prism2_ev_tick(dev); HFA384X_OUTW(HFA384X_EV_TICK, HFA384X_EVACK_OFF); } if (ev & HFA384X_EV_ALLOC) { prism2_alloc_ev(dev); HFA384X_OUTW(HFA384X_EV_ALLOC, HFA384X_EVACK_OFF); } /* Reading data from the card is quite time consuming, so do it * in tasklets. TX, TXEXC, RX, and INFO events will be ACKed * and unmasked after needed data has been read completely. */ if (ev & HFA384X_BAP0_EVENTS) { hfa384x_events_no_bap0(dev); tasklet_schedule(&local->bap_tasklet); } #ifndef final_version if (ev & HFA384X_EV_WTERR) { PDEBUG(DEBUG_EXTRA, "%s: WTERR event\n", dev->name); HFA384X_OUTW(HFA384X_EV_WTERR, HFA384X_EVACK_OFF); } #endif /* final_version */ if (ev & HFA384X_EV_INFDROP) { prism2_infdrop(dev); HFA384X_OUTW(HFA384X_EV_INFDROP, HFA384X_EVACK_OFF); } next_event: events++; if (events >= PRISM2_MAX_INTERRUPT_EVENTS) { PDEBUG(DEBUG_EXTRA, "prism2_interrupt: >%d events " "(EvStat=0x%04x)\n", PRISM2_MAX_INTERRUPT_EVENTS, HFA384X_INW(HFA384X_EVSTAT_OFF)); break; } } prism2_io_debug_add(dev, PRISM2_IO_DEBUG_CMD_INTERRUPT, 0, 1); return IRQ_RETVAL(events); } static void prism2_check_sta_fw_version(local_info_t *local) { struct hfa384x_comp_ident comp; int id, variant, major, minor; if (hfa384x_get_rid(local->dev, HFA384X_RID_STAID, &comp, sizeof(comp), 1) < 0) return; local->fw_ap = 0; id = le16_to_cpu(comp.id); if (id != HFA384X_COMP_ID_STA) { if (id == HFA384X_COMP_ID_FW_AP) local->fw_ap = 1; return; } major = __le16_to_cpu(comp.major); minor = __le16_to_cpu(comp.minor); variant = __le16_to_cpu(comp.variant); local->sta_fw_ver = PRISM2_FW_VER(major, minor, variant); /* Station firmware versions before 1.4.x seem to have a bug in * firmware-based WEP encryption when using Host AP mode, so use * host_encrypt as a default for them. Firmware version 1.4.9 is the * first one that has been seen to produce correct encryption, but the * bug might be fixed before that (although, at least 1.4.2 is broken). */ local->fw_encrypt_ok = local->sta_fw_ver >= PRISM2_FW_VER(1,4,9); if (local->iw_mode == IW_MODE_MASTER && !local->host_encrypt && !local->fw_encrypt_ok) { printk(KERN_DEBUG "%s: defaulting to host-based encryption as " "a workaround for firmware bug in Host AP mode WEP\n", local->dev->name); local->host_encrypt = 1; } /* IEEE 802.11 standard compliant WDS frames (4 addresses) were broken * in station firmware versions before 1.5.x. With these versions, the * driver uses a workaround with bogus frame format (4th address after * the payload). This is not compatible with other AP devices. Since * the firmware bug is fixed in the latest station firmware versions, * automatically enable standard compliant mode for cards using station * firmware version 1.5.0 or newer. */ if (local->sta_fw_ver >= PRISM2_FW_VER(1,5,0)) local->wds_type |= HOSTAP_WDS_STANDARD_FRAME; else { printk(KERN_DEBUG "%s: defaulting to bogus WDS frame as a " "workaround for firmware bug in Host AP mode WDS\n", local->dev->name); } hostap_check_sta_fw_version(local->ap, local->sta_fw_ver); } static void hostap_passive_scan(struct timer_list *t) { local_info_t *local = from_timer(local, t, passive_scan_timer); struct net_device *dev = local->dev; u16 chan; if (local->passive_scan_interval <= 0) return; if (local->passive_scan_state == PASSIVE_SCAN_LISTEN) { int max_tries = 16; /* Even though host system does not really know when the WLAN * MAC is sending frames, try to avoid changing channels for * passive scanning when a host-generated frame is being * transmitted */ if (test_bit(HOSTAP_BITS_TRANSMIT, &local->bits)) { printk(KERN_DEBUG "%s: passive scan detected pending " "TX - delaying\n", dev->name); local->passive_scan_timer.expires = jiffies + HZ / 10; add_timer(&local->passive_scan_timer); return; } do { local->passive_scan_channel++; if (local->passive_scan_channel > 14) local->passive_scan_channel = 1; max_tries--; } while (!(local->channel_mask & (1 << (local->passive_scan_channel - 1))) && max_tries > 0); if (max_tries == 0) { printk(KERN_INFO "%s: no allowed passive scan channels" " found\n", dev->name); return; } printk(KERN_DEBUG "%s: passive scan channel %d\n", dev->name, local->passive_scan_channel); chan = local->passive_scan_channel; local->passive_scan_state = PASSIVE_SCAN_WAIT; local->passive_scan_timer.expires = jiffies + HZ / 10; } else { chan = local->channel; local->passive_scan_state = PASSIVE_SCAN_LISTEN; local->passive_scan_timer.expires = jiffies + local->passive_scan_interval * HZ; } if (hfa384x_cmd_callback(dev, HFA384X_CMDCODE_TEST | (HFA384X_TEST_CHANGE_CHANNEL << 8), chan, NULL, 0)) printk(KERN_ERR "%s: passive scan channel set %d " "failed\n", dev->name, chan); add_timer(&local->passive_scan_timer); } /* Called only as a scheduled task when communications quality values should * be updated. */ static void handle_comms_qual_update(struct work_struct *work) { local_info_t *local = container_of(work, local_info_t, comms_qual_update); prism2_update_comms_qual(local->dev); } /* Software watchdog - called as a timer. Hardware interrupt (Tick event) is * used to monitor that local->last_tick_timer is being updated. If not, * interrupt busy-loop is assumed and driver tries to recover by masking out * some events. */ static void hostap_tick_timer(struct timer_list *t) { static unsigned long last_inquire = 0; local_info_t *local = from_timer(local, t, tick_timer); local->last_tick_timer = jiffies; /* Inquire CommTallies every 10 seconds to keep the statistics updated * more often during low load and when using 32-bit tallies. */ if ((!last_inquire || time_after(jiffies, last_inquire + 10 * HZ)) && !local->hw_downloading && local->hw_ready && !local->hw_resetting && local->dev_enabled) { hfa384x_cmd_callback(local->dev, HFA384X_CMDCODE_INQUIRE, HFA384X_INFO_COMMTALLIES, NULL, 0); last_inquire = jiffies; } if ((local->last_comms_qual_update == 0 || time_after(jiffies, local->last_comms_qual_update + 10 * HZ)) && (local->iw_mode == IW_MODE_INFRA || local->iw_mode == IW_MODE_ADHOC)) { schedule_work(&local->comms_qual_update); } local->tick_timer.expires = jiffies + 2 * HZ; add_timer(&local->tick_timer); } #if !defined(PRISM2_NO_PROCFS_DEBUG) && defined(CONFIG_PROC_FS) static u16 hfa384x_read_reg(struct net_device *dev, u16 reg) { return HFA384X_INW(reg); } static int prism2_registers_proc_show(struct seq_file *m, void *v) { local_info_t *local = m->private; #define SHOW_REG(n) \ seq_printf(m, #n "=%04x\n", hfa384x_read_reg(local->dev, HFA384X_##n##_OFF)) SHOW_REG(CMD); SHOW_REG(PARAM0); SHOW_REG(PARAM1); SHOW_REG(PARAM2); SHOW_REG(STATUS); SHOW_REG(RESP0); SHOW_REG(RESP1); SHOW_REG(RESP2); SHOW_REG(INFOFID); SHOW_REG(CONTROL); SHOW_REG(SELECT0); SHOW_REG(SELECT1); SHOW_REG(OFFSET0); SHOW_REG(OFFSET1); SHOW_REG(RXFID); SHOW_REG(ALLOCFID); SHOW_REG(TXCOMPLFID); SHOW_REG(SWSUPPORT0); SHOW_REG(SWSUPPORT1); SHOW_REG(SWSUPPORT2); SHOW_REG(EVSTAT); SHOW_REG(INTEN); SHOW_REG(EVACK); /* Do not read data registers, because they change the state of the * MAC (offset += 2) */ /* SHOW_REG(DATA0); */ /* SHOW_REG(DATA1); */ SHOW_REG(AUXPAGE); SHOW_REG(AUXOFFSET); /* SHOW_REG(AUXDATA); */ #ifdef PRISM2_PCI SHOW_REG(PCICOR); SHOW_REG(PCIHCR); SHOW_REG(PCI_M0_ADDRH); SHOW_REG(PCI_M0_ADDRL); SHOW_REG(PCI_M0_LEN); SHOW_REG(PCI_M0_CTL); SHOW_REG(PCI_STATUS); SHOW_REG(PCI_M1_ADDRH); SHOW_REG(PCI_M1_ADDRL); SHOW_REG(PCI_M1_LEN); SHOW_REG(PCI_M1_CTL); #endif /* PRISM2_PCI */ return 0; } #endif struct set_tim_data { struct list_head list; int aid; int set; }; static int prism2_set_tim(struct net_device *dev, int aid, int set) { struct list_head *ptr; struct set_tim_data *new_entry; struct hostap_interface *iface; local_info_t *local; iface = netdev_priv(dev); local = iface->local; new_entry = kzalloc(sizeof(*new_entry), GFP_ATOMIC); if (new_entry == NULL) return -ENOMEM; new_entry->aid = aid; new_entry->set = set; spin_lock_bh(&local->set_tim_lock); list_for_each(ptr, &local->set_tim_list) { struct set_tim_data *entry = list_entry(ptr, struct set_tim_data, list); if (entry->aid == aid) { PDEBUG(DEBUG_PS2, "%s: prism2_set_tim: aid=%d " "set=%d ==> %d\n", local->dev->name, aid, entry->set, set); entry->set = set; kfree(new_entry); new_entry = NULL; break; } } if (new_entry) list_add_tail(&new_entry->list, &local->set_tim_list); spin_unlock_bh(&local->set_tim_lock); schedule_work(&local->set_tim_queue); return 0; } static void handle_set_tim_queue(struct work_struct *work) { local_info_t *local = container_of(work, local_info_t, set_tim_queue); struct set_tim_data *entry; u16 val; for (;;) { entry = NULL; spin_lock_bh(&local->set_tim_lock); if (!list_empty(&local->set_tim_list)) { entry = list_entry(local->set_tim_list.next, struct set_tim_data, list); list_del(&entry->list); } spin_unlock_bh(&local->set_tim_lock); if (!entry) break; PDEBUG(DEBUG_PS2, "%s: handle_set_tim_queue: aid=%d set=%d\n", local->dev->name, entry->aid, entry->set); val = entry->aid; if (entry->set) val |= 0x8000; if (hostap_set_word(local->dev, HFA384X_RID_CNFTIMCTRL, val)) { printk(KERN_DEBUG "%s: set_tim failed (aid=%d " "set=%d)\n", local->dev->name, entry->aid, entry->set); } kfree(entry); } } static void prism2_clear_set_tim_queue(local_info_t *local) { struct list_head *ptr, *n; list_for_each_safe(ptr, n, &local->set_tim_list) { struct set_tim_data *entry; entry = list_entry(ptr, struct set_tim_data, list); list_del(&entry->list); kfree(entry); } } /* * HostAP uses two layers of net devices, where the inner * layer gets called all the time from the outer layer. * This is a natural nesting, which needs a split lock type. */ static struct lock_class_key hostap_netdev_xmit_lock_key; static struct lock_class_key hostap_netdev_addr_lock_key; static void prism2_set_lockdep_class_one(struct net_device *dev, struct netdev_queue *txq, void *_unused) { lockdep_set_class(&txq->_xmit_lock, &hostap_netdev_xmit_lock_key); } static void prism2_set_lockdep_class(struct net_device *dev) { lockdep_set_class(&dev->addr_list_lock, &hostap_netdev_addr_lock_key); netdev_for_each_tx_queue(dev, prism2_set_lockdep_class_one, NULL); } static struct net_device * prism2_init_local_data(struct prism2_helper_functions *funcs, int card_idx, struct device *sdev) { struct net_device *dev; struct hostap_interface *iface; struct local_info *local; int len, i, ret; if (funcs == NULL) return NULL; len = strlen(dev_template); if (len >= IFNAMSIZ || strstr(dev_template, "%d") == NULL) { printk(KERN_WARNING "hostap: Invalid dev_template='%s'\n", dev_template); return NULL; } len = sizeof(struct hostap_interface) + 3 + sizeof(struct local_info) + 3 + sizeof(struct ap_data); dev = alloc_etherdev(len); if (dev == NULL) return NULL; iface = netdev_priv(dev); local = (struct local_info *) ((((long) (iface + 1)) + 3) & ~3); local->ap = (struct ap_data *) ((((long) (local + 1)) + 3) & ~3); local->dev = iface->dev = dev; iface->local = local; iface->type = HOSTAP_INTERFACE_MASTER; INIT_LIST_HEAD(&local->hostap_interfaces); local->hw_module = THIS_MODULE; #ifdef PRISM2_IO_DEBUG local->io_debug_enabled = 1; #endif /* PRISM2_IO_DEBUG */ local->func = funcs; local->func->cmd = hfa384x_cmd; local->func->read_regs = hfa384x_read_regs; local->func->get_rid = hfa384x_get_rid; local->func->set_rid = hfa384x_set_rid; local->func->hw_enable = prism2_hw_enable; local->func->hw_config = prism2_hw_config; local->func->hw_reset = prism2_hw_reset; local->func->hw_shutdown = prism2_hw_shutdown; local->func->reset_port = prism2_reset_port; local->func->schedule_reset = prism2_schedule_reset; #ifdef PRISM2_DOWNLOAD_SUPPORT local->func->read_aux_proc_ops = &prism2_download_aux_dump_proc_ops; local->func->download = prism2_download; #endif /* PRISM2_DOWNLOAD_SUPPORT */ local->func->tx = prism2_tx_80211; local->func->set_tim = prism2_set_tim; local->func->need_tx_headroom = 0; /* no need to add txdesc in * skb->data (FIX: maybe for DMA bus * mastering? */ local->mtu = mtu; rwlock_init(&local->iface_lock); spin_lock_init(&local->txfidlock); spin_lock_init(&local->cmdlock); spin_lock_init(&local->baplock); spin_lock_init(&local->lock); spin_lock_init(&local->irq_init_lock); mutex_init(&local->rid_bap_mtx); if (card_idx < 0 || card_idx >= MAX_PARM_DEVICES) card_idx = 0; local->card_idx = card_idx; len = strlen(essid); memcpy(local->essid, essid, len > MAX_SSID_LEN ? MAX_SSID_LEN : len); local->essid[MAX_SSID_LEN] = '\0'; i = GET_INT_PARM(iw_mode, card_idx); if ((i >= IW_MODE_ADHOC && i <= IW_MODE_REPEAT) || i == IW_MODE_MONITOR) { local->iw_mode = i; } else { printk(KERN_WARNING "prism2: Unknown iw_mode %d; using " "IW_MODE_MASTER\n", i); local->iw_mode = IW_MODE_MASTER; } local->channel = GET_INT_PARM(channel, card_idx); local->beacon_int = GET_INT_PARM(beacon_int, card_idx); local->dtim_period = GET_INT_PARM(dtim_period, card_idx); local->wds_max_connections = 16; local->tx_control = HFA384X_TX_CTRL_FLAGS; local->manual_retry_count = -1; local->rts_threshold = 2347; local->fragm_threshold = 2346; local->rssi_to_dBm = 100; /* default; to be overriden by * cnfDbmAdjust, if available */ local->auth_algs = PRISM2_AUTH_OPEN | PRISM2_AUTH_SHARED_KEY; local->sram_type = -1; local->scan_channel_mask = 0xffff; local->monitor_type = PRISM2_MONITOR_RADIOTAP; /* Initialize task queue structures */ INIT_WORK(&local->reset_queue, handle_reset_queue); INIT_WORK(&local->set_multicast_list_queue, hostap_set_multicast_list_queue); INIT_WORK(&local->set_tim_queue, handle_set_tim_queue); INIT_LIST_HEAD(&local->set_tim_list); spin_lock_init(&local->set_tim_lock); INIT_WORK(&local->comms_qual_update, handle_comms_qual_update); /* Initialize tasklets for handling hardware IRQ related operations * outside hw IRQ handler */ tasklet_setup(&local->bap_tasklet, hostap_bap_tasklet); tasklet_setup(&local->info_tasklet, hostap_info_tasklet); hostap_info_init(local); tasklet_setup(&local->rx_tasklet, hostap_rx_tasklet); skb_queue_head_init(&local->rx_list); tasklet_setup(&local->sta_tx_exc_tasklet, hostap_sta_tx_exc_tasklet); skb_queue_head_init(&local->sta_tx_exc_list); INIT_LIST_HEAD(&local->cmd_queue); init_waitqueue_head(&local->hostscan_wq); lib80211_crypt_info_init(&local->crypt_info, dev->name, &local->lock); timer_setup(&local->passive_scan_timer, hostap_passive_scan, 0); timer_setup(&local->tick_timer, hostap_tick_timer, 0); local->tick_timer.expires = jiffies + 2 * HZ; add_timer(&local->tick_timer); INIT_LIST_HEAD(&local->bss_list); hostap_setup_dev(dev, local, HOSTAP_INTERFACE_MASTER); dev->type = ARPHRD_IEEE80211; dev->header_ops = &hostap_80211_ops; rtnl_lock(); ret = dev_alloc_name(dev, "wifi%d"); SET_NETDEV_DEV(dev, sdev); if (ret >= 0) ret = register_netdevice(dev); prism2_set_lockdep_class(dev); rtnl_unlock(); if (ret < 0) { printk(KERN_WARNING "%s: register netdevice failed!\n", dev_info); goto fail; } printk(KERN_INFO "%s: Registered netdevice %s\n", dev_info, dev->name); hostap_init_data(local); return dev; fail: free_netdev(dev); return NULL; } static int hostap_hw_ready(struct net_device *dev) { struct hostap_interface *iface; struct local_info *local; iface = netdev_priv(dev); local = iface->local; local->ddev = hostap_add_interface(local, HOSTAP_INTERFACE_MAIN, 0, "", dev_template); if (local->ddev) { if (local->iw_mode == IW_MODE_INFRA || local->iw_mode == IW_MODE_ADHOC) { netif_carrier_off(local->dev); netif_carrier_off(local->ddev); } hostap_init_proc(local); #ifndef PRISM2_NO_PROCFS_DEBUG proc_create_single_data("registers", 0, local->proc, prism2_registers_proc_show, local); #endif /* PRISM2_NO_PROCFS_DEBUG */ hostap_init_ap_proc(local); return 0; } return -1; } static void prism2_free_local_data(struct net_device *dev) { struct hostap_tx_callback_info *tx_cb, *tx_cb_prev; int i; struct hostap_interface *iface; struct local_info *local; struct list_head *ptr, *n; if (dev == NULL) return; iface = netdev_priv(dev); local = iface->local; /* Unregister all netdevs before freeing local data. */ list_for_each_safe(ptr, n, &local->hostap_interfaces) { iface = list_entry(ptr, struct hostap_interface, list); if (iface->type == HOSTAP_INTERFACE_MASTER) { /* special handling for this interface below */ continue; } hostap_remove_interface(iface->dev, 0, 1); } unregister_netdev(local->dev); flush_work(&local->reset_queue); flush_work(&local->set_multicast_list_queue); flush_work(&local->set_tim_queue); #ifndef PRISM2_NO_STATION_MODES flush_work(&local->info_queue); #endif flush_work(&local->comms_qual_update); lib80211_crypt_info_free(&local->crypt_info); if (timer_pending(&local->passive_scan_timer)) del_timer(&local->passive_scan_timer); if (timer_pending(&local->tick_timer)) del_timer(&local->tick_timer); prism2_clear_cmd_queue(local); skb_queue_purge(&local->info_list); skb_queue_purge(&local->rx_list); skb_queue_purge(&local->sta_tx_exc_list); if (local->dev_enabled) prism2_callback(local, PRISM2_CALLBACK_DISABLE); if (local->ap != NULL) hostap_free_data(local->ap); #ifndef PRISM2_NO_PROCFS_DEBUG if (local->proc != NULL) remove_proc_entry("registers", local->proc); #endif /* PRISM2_NO_PROCFS_DEBUG */ hostap_remove_proc(local); tx_cb = local->tx_callback; while (tx_cb != NULL) { tx_cb_prev = tx_cb; tx_cb = tx_cb->next; kfree(tx_cb_prev); } hostap_set_hostapd(local, 0, 0); hostap_set_hostapd_sta(local, 0, 0); for (i = 0; i < PRISM2_FRAG_CACHE_LEN; i++) { if (local->frag_cache[i].skb != NULL) dev_kfree_skb(local->frag_cache[i].skb); } #ifdef PRISM2_DOWNLOAD_SUPPORT prism2_download_free_data(local->dl_pri); prism2_download_free_data(local->dl_sec); #endif /* PRISM2_DOWNLOAD_SUPPORT */ prism2_clear_set_tim_queue(local); list_for_each_safe(ptr, n, &local->bss_list) { struct hostap_bss_info *bss = list_entry(ptr, struct hostap_bss_info, list); kfree(bss); } kfree(local->pda); kfree(local->last_scan_results); kfree(local->generic_elem); free_netdev(local->dev); } #if defined(PRISM2_PCI) || defined(PRISM2_PCCARD) static void __maybe_unused prism2_suspend(struct net_device *dev) { struct hostap_interface *iface; struct local_info *local; union iwreq_data wrqu; iface = netdev_priv(dev); local = iface->local; /* Send disconnect event, e.g., to trigger reassociation after resume * if wpa_supplicant is used. */ memset(&wrqu, 0, sizeof(wrqu)); wrqu.ap_addr.sa_family = ARPHRD_ETHER; wireless_send_event(local->dev, SIOCGIWAP, &wrqu, NULL); /* Disable hardware and firmware */ prism2_hw_shutdown(dev, 0); } #endif /* PRISM2_PCI || PRISM2_PCCARD */ /* These might at some point be compiled separately and used as separate * kernel modules or linked into one */ #ifdef PRISM2_DOWNLOAD_SUPPORT #include "hostap_download.c" #endif /* PRISM2_DOWNLOAD_SUPPORT */ #ifdef PRISM2_CALLBACK /* External hostap_callback.c file can be used to, e.g., blink activity led. * This can use platform specific code and must define prism2_callback() * function (if PRISM2_CALLBACK is not defined, these function calls are not * used. */ #include "hostap_callback.c" #endif /* PRISM2_CALLBACK */
linux-master
drivers/net/wireless/intersil/hostap/hostap_hw.c
/* * USB Orinoco driver * * Copyright (c) 2003 Manuel Estrada Sainz * * The contents of this file are subject to the Mozilla Public License * Version 1.1 (the "License"); you may not use this file except in * compliance with the License. You may obtain a copy of the License * at http://www.mozilla.org/MPL/ * * Software distributed under the License is distributed on an "AS IS" * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See * the License for the specific language governing rights and * limitations under the License. * * Alternatively, the contents of this file may be used under the * terms of the GNU General Public License version 2 (the "GPL"), in * which case the provisions of the GPL are applicable instead of the * above. If you wish to allow the use of your version of this file * only under the terms of the GPL and not to allow others to use your * version of this file under the MPL, indicate your decision by * deleting the provisions above and replace them with the notice and * other provisions required by the GPL. If you do not delete the * provisions above, a recipient may use your version of this file * under either the MPL or the GPL. * * Queueing code based on linux-wlan-ng 0.2.1-pre5 * * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved. * * The license is the same as above. * * Initialy based on USB Skeleton driver - 0.7 * * Copyright (c) 2001 Greg Kroah-Hartman ([email protected]) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * NOTE: The original USB Skeleton driver is GPL, but all that code is * gone so MPL/GPL applies. */ #define DRIVER_NAME "orinoco_usb" #define PFX DRIVER_NAME ": " #include <linux/module.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/signal.h> #include <linux/errno.h> #include <linux/poll.h> #include <linux/slab.h> #include <linux/fcntl.h> #include <linux/spinlock.h> #include <linux/list.h> #include <linux/usb.h> #include <linux/timer.h> #include <linux/netdevice.h> #include <linux/if_arp.h> #include <linux/etherdevice.h> #include <linux/wireless.h> #include <linux/firmware.h> #include <linux/refcount.h> #include "mic.h" #include "orinoco.h" #ifndef URB_ASYNC_UNLINK #define URB_ASYNC_UNLINK 0 #endif struct header_struct { /* 802.3 */ u8 dest[ETH_ALEN]; u8 src[ETH_ALEN]; __be16 len; /* 802.2 */ u8 dsap; u8 ssap; u8 ctrl; /* SNAP */ u8 oui[3]; __be16 ethertype; } __packed; struct ez_usb_fw { u16 size; const u8 *code; }; static struct ez_usb_fw firmware = { .size = 0, .code = NULL, }; /* Debugging macros */ #undef err #define err(format, arg...) \ do { printk(KERN_ERR PFX format "\n", ## arg); } while (0) MODULE_FIRMWARE("orinoco_ezusb_fw"); /* * Under some conditions, the card gets stuck and stops paying attention * to the world (i.e. data communication stalls) until we do something to * it. Sending an INQ_TALLIES command seems to be enough and should be * harmless otherwise. This behaviour has been observed when using the * driver on a systemimager client during installation. In the past a * timer was used to send INQ_TALLIES commands when there was no other * activity, but it was troublesome and was removed. */ #define USB_COMPAQ_VENDOR_ID 0x049f /* Compaq Computer Corp. */ #define USB_COMPAQ_WL215_ID 0x001f /* Compaq WL215 USB Adapter */ #define USB_COMPAQ_W200_ID 0x0076 /* Compaq W200 USB Adapter */ #define USB_HP_WL215_ID 0x0082 /* Compaq WL215 USB Adapter */ #define USB_MELCO_VENDOR_ID 0x0411 #define USB_BUFFALO_L11_ID 0x0006 /* BUFFALO WLI-USB-L11 */ #define USB_BUFFALO_L11G_WR_ID 0x000B /* BUFFALO WLI-USB-L11G-WR */ #define USB_BUFFALO_L11G_ID 0x000D /* BUFFALO WLI-USB-L11G */ #define USB_LUCENT_VENDOR_ID 0x047E /* Lucent Technologies */ #define USB_LUCENT_ORINOCO_ID 0x0300 /* Lucent/Agere Orinoco USB Client */ #define USB_AVAYA8_VENDOR_ID 0x0D98 #define USB_AVAYAE_VENDOR_ID 0x0D9E #define USB_AVAYA_WIRELESS_ID 0x0300 /* Avaya USB Wireless Card */ #define USB_AGERE_VENDOR_ID 0x0D4E /* Agere Systems */ #define USB_AGERE_MODEL0801_ID 0x1000 /* USB Wireless Card Model 0801 */ #define USB_AGERE_MODEL0802_ID 0x1001 /* USB Wireless Card Model 0802 */ #define USB_AGERE_REBRANDED_ID 0x047A /* USB WLAN Card */ #define USB_ELSA_VENDOR_ID 0x05CC #define USB_ELSA_AIRLANCER_ID 0x3100 /* ELSA AirLancer USB-11 */ #define USB_LEGEND_VENDOR_ID 0x0E7C #define USB_LEGEND_JOYNET_ID 0x0300 /* Joynet USB WLAN Card */ #define USB_SAMSUNG_VENDOR_ID 0x04E8 #define USB_SAMSUNG_SEW2001U1_ID 0x5002 /* Samsung SEW-2001u Card */ #define USB_SAMSUNG_SEW2001U2_ID 0x5B11 /* Samsung SEW-2001u Card */ #define USB_SAMSUNG_SEW2003U_ID 0x7011 /* Samsung SEW-2003U Card */ #define USB_IGATE_VENDOR_ID 0x0681 #define USB_IGATE_IGATE_11M_ID 0x0012 /* I-GATE 11M USB Card */ #define USB_FUJITSU_VENDOR_ID 0x0BF8 #define USB_FUJITSU_E1100_ID 0x1002 /* connect2AIR WLAN E-1100 USB */ #define USB_2WIRE_VENDOR_ID 0x1630 #define USB_2WIRE_WIRELESS_ID 0xff81 /* 2Wire USB Wireless adapter */ #define EZUSB_REQUEST_FW_TRANS 0xA0 #define EZUSB_REQUEST_TRIGGER 0xAA #define EZUSB_REQUEST_TRIG_AC 0xAC #define EZUSB_CPUCS_REG 0x7F92 #define EZUSB_RID_TX 0x0700 #define EZUSB_RID_RX 0x0701 #define EZUSB_RID_INIT1 0x0702 #define EZUSB_RID_ACK 0x0710 #define EZUSB_RID_READ_PDA 0x0800 #define EZUSB_RID_PROG_INIT 0x0852 #define EZUSB_RID_PROG_SET_ADDR 0x0853 #define EZUSB_RID_PROG_BYTES 0x0854 #define EZUSB_RID_PROG_END 0x0855 #define EZUSB_RID_DOCMD 0x0860 /* Recognize info frames */ #define EZUSB_IS_INFO(id) ((id >= 0xF000) && (id <= 0xF2FF)) #define EZUSB_MAGIC 0x0210 #define EZUSB_FRAME_DATA 1 #define EZUSB_FRAME_CONTROL 2 #define DEF_TIMEOUT (3 * HZ) #define BULK_BUF_SIZE 2048 #define MAX_DL_SIZE (BULK_BUF_SIZE - sizeof(struct ezusb_packet)) #define FW_BUF_SIZE 64 #define FW_VAR_OFFSET_PTR 0x359 #define FW_VAR_VALUE 0 #define FW_HOLE_START 0x100 #define FW_HOLE_END 0x300 struct ezusb_packet { __le16 magic; /* 0x0210 */ u8 req_reply_count; u8 ans_reply_count; __le16 frame_type; /* 0x01 for data frames, 0x02 otherwise */ __le16 size; /* transport size */ __le16 crc; /* CRC up to here */ __le16 hermes_len; __le16 hermes_rid; u8 data[]; } __packed; /* Table of devices that work or may work with this driver */ static const struct usb_device_id ezusb_table[] = { {USB_DEVICE(USB_COMPAQ_VENDOR_ID, USB_COMPAQ_WL215_ID)}, {USB_DEVICE(USB_COMPAQ_VENDOR_ID, USB_HP_WL215_ID)}, {USB_DEVICE(USB_COMPAQ_VENDOR_ID, USB_COMPAQ_W200_ID)}, {USB_DEVICE(USB_MELCO_VENDOR_ID, USB_BUFFALO_L11_ID)}, {USB_DEVICE(USB_MELCO_VENDOR_ID, USB_BUFFALO_L11G_WR_ID)}, {USB_DEVICE(USB_MELCO_VENDOR_ID, USB_BUFFALO_L11G_ID)}, {USB_DEVICE(USB_LUCENT_VENDOR_ID, USB_LUCENT_ORINOCO_ID)}, {USB_DEVICE(USB_AVAYA8_VENDOR_ID, USB_AVAYA_WIRELESS_ID)}, {USB_DEVICE(USB_AVAYAE_VENDOR_ID, USB_AVAYA_WIRELESS_ID)}, {USB_DEVICE(USB_AGERE_VENDOR_ID, USB_AGERE_MODEL0801_ID)}, {USB_DEVICE(USB_AGERE_VENDOR_ID, USB_AGERE_MODEL0802_ID)}, {USB_DEVICE(USB_ELSA_VENDOR_ID, USB_ELSA_AIRLANCER_ID)}, {USB_DEVICE(USB_LEGEND_VENDOR_ID, USB_LEGEND_JOYNET_ID)}, {USB_DEVICE_VER(USB_SAMSUNG_VENDOR_ID, USB_SAMSUNG_SEW2001U1_ID, 0, 0)}, {USB_DEVICE(USB_SAMSUNG_VENDOR_ID, USB_SAMSUNG_SEW2001U2_ID)}, {USB_DEVICE(USB_SAMSUNG_VENDOR_ID, USB_SAMSUNG_SEW2003U_ID)}, {USB_DEVICE(USB_IGATE_VENDOR_ID, USB_IGATE_IGATE_11M_ID)}, {USB_DEVICE(USB_FUJITSU_VENDOR_ID, USB_FUJITSU_E1100_ID)}, {USB_DEVICE(USB_2WIRE_VENDOR_ID, USB_2WIRE_WIRELESS_ID)}, {USB_DEVICE(USB_AGERE_VENDOR_ID, USB_AGERE_REBRANDED_ID)}, {} /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, ezusb_table); /* Structure to hold all of our device specific stuff */ struct ezusb_priv { struct usb_device *udev; struct net_device *dev; struct mutex mtx; spinlock_t req_lock; struct list_head req_pending; struct list_head req_active; spinlock_t reply_count_lock; u16 hermes_reg_fake[0x40]; u8 *bap_buf; struct urb *read_urb; int read_pipe; int write_pipe; u8 reply_count; }; enum ezusb_state { EZUSB_CTX_START, EZUSB_CTX_QUEUED, EZUSB_CTX_REQ_SUBMITTED, EZUSB_CTX_REQ_COMPLETE, EZUSB_CTX_RESP_RECEIVED, EZUSB_CTX_REQ_TIMEOUT, EZUSB_CTX_REQ_FAILED, EZUSB_CTX_RESP_TIMEOUT, EZUSB_CTX_REQSUBMIT_FAIL, EZUSB_CTX_COMPLETE, }; struct request_context { struct list_head list; refcount_t refcount; struct completion done; /* Signals that CTX is dead */ int killed; struct urb *outurb; /* OUT for req pkt */ struct ezusb_priv *upriv; struct ezusb_packet *buf; int buf_length; struct timer_list timer; /* Timeout handling */ enum ezusb_state state; /* Current state */ /* the RID that we will wait for */ u16 out_rid; u16 in_rid; }; /* Forward declarations */ static void ezusb_ctx_complete(struct request_context *ctx); static void ezusb_req_queue_run(struct ezusb_priv *upriv); static void ezusb_bulk_in_callback(struct urb *urb); static inline u8 ezusb_reply_inc(u8 count) { if (count < 0x7F) return count + 1; else return 1; } static void ezusb_request_context_put(struct request_context *ctx) { if (!refcount_dec_and_test(&ctx->refcount)) return; WARN_ON(!ctx->done.done); BUG_ON(ctx->outurb->status == -EINPROGRESS); BUG_ON(timer_pending(&ctx->timer)); usb_free_urb(ctx->outurb); kfree(ctx->buf); kfree(ctx); } static inline void ezusb_mod_timer(struct ezusb_priv *upriv, struct timer_list *timer, unsigned long expire) { if (!upriv->udev) return; mod_timer(timer, expire); } static void ezusb_request_timerfn(struct timer_list *t) { struct request_context *ctx = from_timer(ctx, t, timer); ctx->outurb->transfer_flags |= URB_ASYNC_UNLINK; if (usb_unlink_urb(ctx->outurb) == -EINPROGRESS) { ctx->state = EZUSB_CTX_REQ_TIMEOUT; } else { ctx->state = EZUSB_CTX_RESP_TIMEOUT; dev_dbg(&ctx->outurb->dev->dev, "couldn't unlink\n"); refcount_inc(&ctx->refcount); ctx->killed = 1; ezusb_ctx_complete(ctx); ezusb_request_context_put(ctx); } }; static struct request_context *ezusb_alloc_ctx(struct ezusb_priv *upriv, u16 out_rid, u16 in_rid) { struct request_context *ctx; ctx = kzalloc(sizeof(*ctx), GFP_ATOMIC); if (!ctx) return NULL; ctx->buf = kmalloc(BULK_BUF_SIZE, GFP_ATOMIC); if (!ctx->buf) { kfree(ctx); return NULL; } ctx->outurb = usb_alloc_urb(0, GFP_ATOMIC); if (!ctx->outurb) { kfree(ctx->buf); kfree(ctx); return NULL; } ctx->upriv = upriv; ctx->state = EZUSB_CTX_START; ctx->out_rid = out_rid; ctx->in_rid = in_rid; refcount_set(&ctx->refcount, 1); init_completion(&ctx->done); timer_setup(&ctx->timer, ezusb_request_timerfn, 0); return ctx; } static void ezusb_ctx_complete(struct request_context *ctx) { struct ezusb_priv *upriv = ctx->upriv; unsigned long flags; spin_lock_irqsave(&upriv->req_lock, flags); list_del_init(&ctx->list); if (upriv->udev) { spin_unlock_irqrestore(&upriv->req_lock, flags); ezusb_req_queue_run(upriv); spin_lock_irqsave(&upriv->req_lock, flags); } switch (ctx->state) { case EZUSB_CTX_COMPLETE: case EZUSB_CTX_REQSUBMIT_FAIL: case EZUSB_CTX_REQ_FAILED: case EZUSB_CTX_REQ_TIMEOUT: case EZUSB_CTX_RESP_TIMEOUT: spin_unlock_irqrestore(&upriv->req_lock, flags); if ((ctx->out_rid == EZUSB_RID_TX) && upriv->dev) { struct net_device *dev = upriv->dev; struct net_device_stats *stats = &dev->stats; if (ctx->state != EZUSB_CTX_COMPLETE) stats->tx_errors++; else stats->tx_packets++; netif_wake_queue(dev); } complete_all(&ctx->done); ezusb_request_context_put(ctx); break; default: spin_unlock_irqrestore(&upriv->req_lock, flags); if (!upriv->udev) { /* This is normal, as all request contexts get flushed * when the device is disconnected */ err("Called, CTX not terminating, but device gone"); complete_all(&ctx->done); ezusb_request_context_put(ctx); break; } err("Called, CTX not in terminating state."); /* Things are really bad if this happens. Just leak * the CTX because it may still be linked to the * queue or the OUT urb may still be active. * Just leaking at least prevents an Oops or Panic. */ break; } } /* * ezusb_req_queue_run: * Description: * Note: Only one active CTX at any one time, because there's no * other (reliable) way to match the response URB to the correct * CTX. */ static void ezusb_req_queue_run(struct ezusb_priv *upriv) { unsigned long flags; struct request_context *ctx; int result; spin_lock_irqsave(&upriv->req_lock, flags); if (!list_empty(&upriv->req_active)) goto unlock; if (list_empty(&upriv->req_pending)) goto unlock; ctx = list_entry(upriv->req_pending.next, struct request_context, list); if (!ctx->upriv->udev) goto unlock; /* We need to split this off to avoid a race condition */ list_move_tail(&ctx->list, &upriv->req_active); if (ctx->state == EZUSB_CTX_QUEUED) { refcount_inc(&ctx->refcount); result = usb_submit_urb(ctx->outurb, GFP_ATOMIC); if (result) { ctx->state = EZUSB_CTX_REQSUBMIT_FAIL; spin_unlock_irqrestore(&upriv->req_lock, flags); err("Fatal, failed to submit command urb." " error=%d\n", result); ezusb_ctx_complete(ctx); ezusb_request_context_put(ctx); goto done; } ctx->state = EZUSB_CTX_REQ_SUBMITTED; ezusb_mod_timer(ctx->upriv, &ctx->timer, jiffies + DEF_TIMEOUT); } unlock: spin_unlock_irqrestore(&upriv->req_lock, flags); done: return; } static void ezusb_req_enqueue_run(struct ezusb_priv *upriv, struct request_context *ctx) { unsigned long flags; spin_lock_irqsave(&upriv->req_lock, flags); if (!ctx->upriv->udev) { spin_unlock_irqrestore(&upriv->req_lock, flags); goto done; } refcount_inc(&ctx->refcount); list_add_tail(&ctx->list, &upriv->req_pending); spin_unlock_irqrestore(&upriv->req_lock, flags); ctx->state = EZUSB_CTX_QUEUED; ezusb_req_queue_run(upriv); done: return; } static void ezusb_request_out_callback(struct urb *urb) { unsigned long flags; enum ezusb_state state; struct request_context *ctx = urb->context; struct ezusb_priv *upriv = ctx->upriv; spin_lock_irqsave(&upriv->req_lock, flags); del_timer(&ctx->timer); if (ctx->killed) { spin_unlock_irqrestore(&upriv->req_lock, flags); pr_warn("interrupt called with dead ctx\n"); goto out; } state = ctx->state; if (urb->status == 0) { switch (state) { case EZUSB_CTX_REQ_SUBMITTED: if (ctx->in_rid) { ctx->state = EZUSB_CTX_REQ_COMPLETE; /* reply URB still pending */ ezusb_mod_timer(upriv, &ctx->timer, jiffies + DEF_TIMEOUT); spin_unlock_irqrestore(&upriv->req_lock, flags); break; } fallthrough; case EZUSB_CTX_RESP_RECEIVED: /* IN already received before this OUT-ACK */ ctx->state = EZUSB_CTX_COMPLETE; spin_unlock_irqrestore(&upriv->req_lock, flags); ezusb_ctx_complete(ctx); break; default: spin_unlock_irqrestore(&upriv->req_lock, flags); err("Unexpected state(0x%x, %d) in OUT URB", state, urb->status); break; } } else { /* If someone cancels the OUT URB then its status * should be either -ECONNRESET or -ENOENT. */ switch (state) { case EZUSB_CTX_REQ_SUBMITTED: case EZUSB_CTX_RESP_RECEIVED: ctx->state = EZUSB_CTX_REQ_FAILED; fallthrough; case EZUSB_CTX_REQ_FAILED: case EZUSB_CTX_REQ_TIMEOUT: spin_unlock_irqrestore(&upriv->req_lock, flags); ezusb_ctx_complete(ctx); break; default: spin_unlock_irqrestore(&upriv->req_lock, flags); err("Unexpected state(0x%x, %d) in OUT URB", state, urb->status); break; } } out: ezusb_request_context_put(ctx); } static void ezusb_request_in_callback(struct ezusb_priv *upriv, struct urb *urb) { struct ezusb_packet *ans = urb->transfer_buffer; struct request_context *ctx = NULL; enum ezusb_state state; unsigned long flags; /* Find the CTX on the active queue that requested this URB */ spin_lock_irqsave(&upriv->req_lock, flags); if (upriv->udev) { struct list_head *item; list_for_each(item, &upriv->req_active) { struct request_context *c; int reply_count; c = list_entry(item, struct request_context, list); reply_count = ezusb_reply_inc(c->buf->req_reply_count); if ((ans->ans_reply_count == reply_count) && (le16_to_cpu(ans->hermes_rid) == c->in_rid)) { ctx = c; break; } netdev_dbg(upriv->dev, "Skipped (0x%x/0x%x) (%d/%d)\n", le16_to_cpu(ans->hermes_rid), c->in_rid, ans->ans_reply_count, reply_count); } } if (ctx == NULL) { spin_unlock_irqrestore(&upriv->req_lock, flags); err("%s: got unexpected RID: 0x%04X", __func__, le16_to_cpu(ans->hermes_rid)); ezusb_req_queue_run(upriv); return; } /* The data we want is in the in buffer, exchange */ urb->transfer_buffer = ctx->buf; ctx->buf = (void *) ans; ctx->buf_length = urb->actual_length; state = ctx->state; switch (state) { case EZUSB_CTX_REQ_SUBMITTED: /* We have received our response URB before * our request has been acknowledged. Do NOT * destroy our CTX yet, because our OUT URB * is still alive ... */ ctx->state = EZUSB_CTX_RESP_RECEIVED; spin_unlock_irqrestore(&upriv->req_lock, flags); /* Let the machine continue running. */ break; case EZUSB_CTX_REQ_COMPLETE: /* This is the usual path: our request * has already been acknowledged, and * we have now received the reply. */ ctx->state = EZUSB_CTX_COMPLETE; /* Stop the intimer */ del_timer(&ctx->timer); spin_unlock_irqrestore(&upriv->req_lock, flags); /* Call the completion handler */ ezusb_ctx_complete(ctx); break; default: spin_unlock_irqrestore(&upriv->req_lock, flags); pr_warn("Matched IN URB, unexpected context state(0x%x)\n", state); /* Throw this CTX away and try submitting another */ del_timer(&ctx->timer); ctx->outurb->transfer_flags |= URB_ASYNC_UNLINK; usb_unlink_urb(ctx->outurb); ezusb_req_queue_run(upriv); break; } /* switch */ } typedef void (*ezusb_ctx_wait)(struct ezusb_priv *, struct request_context *); static void ezusb_req_ctx_wait_compl(struct ezusb_priv *upriv, struct request_context *ctx) { switch (ctx->state) { case EZUSB_CTX_QUEUED: case EZUSB_CTX_REQ_SUBMITTED: case EZUSB_CTX_REQ_COMPLETE: case EZUSB_CTX_RESP_RECEIVED: wait_for_completion(&ctx->done); break; default: /* Done or failed - nothing to wait for */ break; } } static void ezusb_req_ctx_wait_poll(struct ezusb_priv *upriv, struct request_context *ctx) { int msecs; switch (ctx->state) { case EZUSB_CTX_QUEUED: case EZUSB_CTX_REQ_SUBMITTED: case EZUSB_CTX_REQ_COMPLETE: case EZUSB_CTX_RESP_RECEIVED: /* If we get called from a timer or with our lock acquired, then * we can't wait for the completion and have to poll. This won't * happen if the USB controller completes the URB requests in * BH. */ msecs = DEF_TIMEOUT * (1000 / HZ); while (!try_wait_for_completion(&ctx->done) && msecs--) udelay(1000); break; default: /* Done or failed - nothing to wait for */ break; } } static void ezusb_req_ctx_wait_skip(struct ezusb_priv *upriv, struct request_context *ctx) { WARN(1, "Shouldn't be invoked for in_rid\n"); } static inline u16 build_crc(struct ezusb_packet *data) { u16 crc = 0; u8 *bytes = (u8 *)data; int i; for (i = 0; i < 8; i++) crc = (crc << 1) + bytes[i]; return crc; } /* * ezusb_fill_req: * * if data == NULL and length > 0 the data is assumed to be already in * the target buffer and only the header is filled. * */ static int ezusb_fill_req(struct ezusb_packet *req, u16 length, u16 rid, const void *data, u16 frame_type, u8 reply_count) { int total_size = sizeof(*req) + length; BUG_ON(total_size > BULK_BUF_SIZE); req->magic = cpu_to_le16(EZUSB_MAGIC); req->req_reply_count = reply_count; req->ans_reply_count = 0; req->frame_type = cpu_to_le16(frame_type); req->size = cpu_to_le16(length + 4); req->crc = cpu_to_le16(build_crc(req)); req->hermes_len = cpu_to_le16(HERMES_BYTES_TO_RECLEN(length)); req->hermes_rid = cpu_to_le16(rid); if (data) memcpy(req->data, data, length); return total_size; } static int ezusb_submit_in_urb(struct ezusb_priv *upriv) { int retval = 0; void *cur_buf = upriv->read_urb->transfer_buffer; if (upriv->read_urb->status == -EINPROGRESS) { netdev_dbg(upriv->dev, "urb busy, not resubmiting\n"); retval = -EBUSY; goto exit; } usb_fill_bulk_urb(upriv->read_urb, upriv->udev, upriv->read_pipe, cur_buf, BULK_BUF_SIZE, ezusb_bulk_in_callback, upriv); upriv->read_urb->transfer_flags = 0; retval = usb_submit_urb(upriv->read_urb, GFP_ATOMIC); if (retval) err("%s submit failed %d", __func__, retval); exit: return retval; } static inline int ezusb_8051_cpucs(struct ezusb_priv *upriv, int reset) { int ret; u8 *res_val = NULL; if (!upriv->udev) { err("%s: !upriv->udev", __func__); return -EFAULT; } res_val = kmalloc(sizeof(*res_val), GFP_KERNEL); if (!res_val) return -ENOMEM; *res_val = reset; /* avoid argument promotion */ ret = usb_control_msg(upriv->udev, usb_sndctrlpipe(upriv->udev, 0), EZUSB_REQUEST_FW_TRANS, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, EZUSB_CPUCS_REG, 0, res_val, sizeof(*res_val), DEF_TIMEOUT); kfree(res_val); return ret; } static int ezusb_firmware_download(struct ezusb_priv *upriv, struct ez_usb_fw *fw) { u8 *fw_buffer; int retval, addr; int variant_offset; fw_buffer = kmalloc(FW_BUF_SIZE, GFP_KERNEL); if (!fw_buffer) { printk(KERN_ERR PFX "Out of memory for firmware buffer.\n"); return -ENOMEM; } /* * This byte is 1 and should be replaced with 0. The offset is * 0x10AD in version 0.0.6. The byte in question should follow * the end of the code pointed to by the jump in the beginning * of the firmware. Also, it is read by code located at 0x358. */ variant_offset = be16_to_cpup((__be16 *) &fw->code[FW_VAR_OFFSET_PTR]); if (variant_offset >= fw->size) { printk(KERN_ERR PFX "Invalid firmware variant offset: " "0x%04x\n", variant_offset); retval = -EINVAL; goto fail; } retval = ezusb_8051_cpucs(upriv, 1); if (retval < 0) goto fail; for (addr = 0; addr < fw->size; addr += FW_BUF_SIZE) { /* 0x100-0x300 should be left alone, it contains card * specific data, like USB enumeration information */ if ((addr >= FW_HOLE_START) && (addr < FW_HOLE_END)) continue; memcpy(fw_buffer, &fw->code[addr], FW_BUF_SIZE); if (variant_offset >= addr && variant_offset < addr + FW_BUF_SIZE) { netdev_dbg(upriv->dev, "Patching card_variant byte at 0x%04X\n", variant_offset); fw_buffer[variant_offset - addr] = FW_VAR_VALUE; } retval = usb_control_msg(upriv->udev, usb_sndctrlpipe(upriv->udev, 0), EZUSB_REQUEST_FW_TRANS, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, addr, 0x0, fw_buffer, FW_BUF_SIZE, DEF_TIMEOUT); if (retval < 0) goto fail; } retval = ezusb_8051_cpucs(upriv, 0); if (retval < 0) goto fail; goto exit; fail: printk(KERN_ERR PFX "Firmware download failed, error %d\n", retval); exit: kfree(fw_buffer); return retval; } static int ezusb_access_ltv(struct ezusb_priv *upriv, struct request_context *ctx, u16 length, const void *data, u16 frame_type, void *ans_buff, unsigned ans_size, u16 *ans_length, ezusb_ctx_wait ezusb_ctx_wait_func) { int req_size; int retval = 0; enum ezusb_state state; if (!upriv->udev) { retval = -ENODEV; goto exit; } if (upriv->read_urb->status != -EINPROGRESS) err("%s: in urb not pending", __func__); /* protect upriv->reply_count, guarantee sequential numbers */ spin_lock_bh(&upriv->reply_count_lock); req_size = ezusb_fill_req(ctx->buf, length, ctx->out_rid, data, frame_type, upriv->reply_count); usb_fill_bulk_urb(ctx->outurb, upriv->udev, upriv->write_pipe, ctx->buf, req_size, ezusb_request_out_callback, ctx); if (ctx->in_rid) upriv->reply_count = ezusb_reply_inc(upriv->reply_count); ezusb_req_enqueue_run(upriv, ctx); spin_unlock_bh(&upriv->reply_count_lock); if (ctx->in_rid) ezusb_ctx_wait_func(upriv, ctx); state = ctx->state; switch (state) { case EZUSB_CTX_COMPLETE: retval = ctx->outurb->status; break; case EZUSB_CTX_QUEUED: case EZUSB_CTX_REQ_SUBMITTED: if (!ctx->in_rid) break; fallthrough; default: err("%s: Unexpected context state %d", __func__, state); fallthrough; case EZUSB_CTX_REQ_TIMEOUT: case EZUSB_CTX_REQ_FAILED: case EZUSB_CTX_RESP_TIMEOUT: case EZUSB_CTX_REQSUBMIT_FAIL: printk(KERN_ERR PFX "Access failed, resetting (state %d," " reply_count %d)\n", state, upriv->reply_count); upriv->reply_count = 0; if (state == EZUSB_CTX_REQ_TIMEOUT || state == EZUSB_CTX_RESP_TIMEOUT) { printk(KERN_ERR PFX "ctx timed out\n"); retval = -ETIMEDOUT; } else { printk(KERN_ERR PFX "ctx failed\n"); retval = -EFAULT; } goto exit; } if (ctx->in_rid) { struct ezusb_packet *ans = ctx->buf; unsigned exp_len; if (ans->hermes_len != 0) exp_len = le16_to_cpu(ans->hermes_len) * 2 + 12; else exp_len = 14; if (exp_len != ctx->buf_length) { err("%s: length mismatch for RID 0x%04x: " "expected %d, got %d", __func__, ctx->in_rid, exp_len, ctx->buf_length); retval = -EIO; goto exit; } if (ans_buff) memcpy(ans_buff, ans->data, min(exp_len, ans_size)); if (ans_length) *ans_length = le16_to_cpu(ans->hermes_len); } exit: ezusb_request_context_put(ctx); return retval; } static int __ezusb_write_ltv(struct hermes *hw, int bap, u16 rid, u16 length, const void *data, ezusb_ctx_wait ezusb_ctx_wait_func) { struct ezusb_priv *upriv = hw->priv; u16 frame_type; struct request_context *ctx; if (length == 0) return -EINVAL; length = HERMES_RECLEN_TO_BYTES(length); /* On memory mapped devices HERMES_RID_CNFGROUPADDRESSES can be * set to be empty, but the USB bridge doesn't like it */ if (length == 0) return 0; ctx = ezusb_alloc_ctx(upriv, rid, EZUSB_RID_ACK); if (!ctx) return -ENOMEM; if (rid == EZUSB_RID_TX) frame_type = EZUSB_FRAME_DATA; else frame_type = EZUSB_FRAME_CONTROL; return ezusb_access_ltv(upriv, ctx, length, data, frame_type, NULL, 0, NULL, ezusb_ctx_wait_func); } static int ezusb_write_ltv(struct hermes *hw, int bap, u16 rid, u16 length, const void *data) { return __ezusb_write_ltv(hw, bap, rid, length, data, ezusb_req_ctx_wait_poll); } static int __ezusb_read_ltv(struct hermes *hw, int bap, u16 rid, unsigned bufsize, u16 *length, void *buf, ezusb_ctx_wait ezusb_ctx_wait_func) { struct ezusb_priv *upriv = hw->priv; struct request_context *ctx; if (bufsize % 2) return -EINVAL; ctx = ezusb_alloc_ctx(upriv, rid, rid); if (!ctx) return -ENOMEM; return ezusb_access_ltv(upriv, ctx, 0, NULL, EZUSB_FRAME_CONTROL, buf, bufsize, length, ezusb_req_ctx_wait_poll); } static int ezusb_read_ltv(struct hermes *hw, int bap, u16 rid, unsigned bufsize, u16 *length, void *buf) { return __ezusb_read_ltv(hw, bap, rid, bufsize, length, buf, ezusb_req_ctx_wait_poll); } static int ezusb_read_ltv_preempt(struct hermes *hw, int bap, u16 rid, unsigned bufsize, u16 *length, void *buf) { return __ezusb_read_ltv(hw, bap, rid, bufsize, length, buf, ezusb_req_ctx_wait_compl); } static int ezusb_doicmd_wait(struct hermes *hw, u16 cmd, u16 parm0, u16 parm1, u16 parm2, struct hermes_response *resp) { WARN_ON_ONCE(1); return -EINVAL; } static int __ezusb_docmd_wait(struct hermes *hw, u16 cmd, u16 parm0, struct hermes_response *resp, ezusb_ctx_wait ezusb_ctx_wait_func) { struct ezusb_priv *upriv = hw->priv; struct request_context *ctx; __le16 data[4] = { cpu_to_le16(cmd), cpu_to_le16(parm0), 0, 0, }; netdev_dbg(upriv->dev, "0x%04X, parm0 0x%04X\n", cmd, parm0); ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_DOCMD, EZUSB_RID_ACK); if (!ctx) return -ENOMEM; return ezusb_access_ltv(upriv, ctx, sizeof(data), &data, EZUSB_FRAME_CONTROL, NULL, 0, NULL, ezusb_ctx_wait_func); } static int ezusb_docmd_wait(struct hermes *hw, u16 cmd, u16 parm0, struct hermes_response *resp) { return __ezusb_docmd_wait(hw, cmd, parm0, resp, ezusb_req_ctx_wait_poll); } static int ezusb_bap_pread(struct hermes *hw, int bap, void *buf, int len, u16 id, u16 offset) { struct ezusb_priv *upriv = hw->priv; struct ezusb_packet *ans = (void *) upriv->read_urb->transfer_buffer; int actual_length = upriv->read_urb->actual_length; if (id == EZUSB_RID_RX) { if ((sizeof(*ans) + offset + len) > actual_length) { printk(KERN_ERR PFX "BAP read beyond buffer end " "in rx frame\n"); return -EINVAL; } memcpy(buf, ans->data + offset, len); return 0; } if (EZUSB_IS_INFO(id)) { /* Include 4 bytes for length/type */ if ((sizeof(*ans) + offset + len - 4) > actual_length) { printk(KERN_ERR PFX "BAP read beyond buffer end " "in info frame\n"); return -EFAULT; } memcpy(buf, ans->data + offset - 4, len); } else { printk(KERN_ERR PFX "Unexpected fid 0x%04x\n", id); return -EINVAL; } return 0; } static int ezusb_read_pda(struct hermes *hw, __le16 *pda, u32 pda_addr, u16 pda_len) { struct ezusb_priv *upriv = hw->priv; struct request_context *ctx; __le16 data[] = { cpu_to_le16(pda_addr & 0xffff), cpu_to_le16(pda_len - 4) }; ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_READ_PDA, EZUSB_RID_READ_PDA); if (!ctx) return -ENOMEM; /* wl_lkm does not include PDA size in the PDA area. * We will pad the information into pda, so other routines * don't have to be modified */ pda[0] = cpu_to_le16(pda_len - 2); /* Includes CFG_PROD_DATA but not itself */ pda[1] = cpu_to_le16(0x0800); /* CFG_PROD_DATA */ return ezusb_access_ltv(upriv, ctx, sizeof(data), &data, EZUSB_FRAME_CONTROL, &pda[2], pda_len - 4, NULL, ezusb_req_ctx_wait_compl); } static int ezusb_program_init(struct hermes *hw, u32 entry_point) { struct ezusb_priv *upriv = hw->priv; struct request_context *ctx; __le32 data = cpu_to_le32(entry_point); ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_PROG_INIT, EZUSB_RID_ACK); if (!ctx) return -ENOMEM; return ezusb_access_ltv(upriv, ctx, sizeof(data), &data, EZUSB_FRAME_CONTROL, NULL, 0, NULL, ezusb_req_ctx_wait_compl); } static int ezusb_program_end(struct hermes *hw) { struct ezusb_priv *upriv = hw->priv; struct request_context *ctx; ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_PROG_END, EZUSB_RID_ACK); if (!ctx) return -ENOMEM; return ezusb_access_ltv(upriv, ctx, 0, NULL, EZUSB_FRAME_CONTROL, NULL, 0, NULL, ezusb_req_ctx_wait_compl); } static int ezusb_program_bytes(struct hermes *hw, const char *buf, u32 addr, u32 len) { struct ezusb_priv *upriv = hw->priv; struct request_context *ctx; __le32 data = cpu_to_le32(addr); int err; ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_PROG_SET_ADDR, EZUSB_RID_ACK); if (!ctx) return -ENOMEM; err = ezusb_access_ltv(upriv, ctx, sizeof(data), &data, EZUSB_FRAME_CONTROL, NULL, 0, NULL, ezusb_req_ctx_wait_compl); if (err) return err; ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_PROG_BYTES, EZUSB_RID_ACK); if (!ctx) return -ENOMEM; return ezusb_access_ltv(upriv, ctx, len, buf, EZUSB_FRAME_CONTROL, NULL, 0, NULL, ezusb_req_ctx_wait_compl); } static int ezusb_program(struct hermes *hw, const char *buf, u32 addr, u32 len) { u32 ch_addr; u32 ch_len; int err = 0; /* We can only send 2048 bytes out of the bulk xmit at a time, * so we have to split any programming into chunks of <2048 * bytes. */ ch_len = (len < MAX_DL_SIZE) ? len : MAX_DL_SIZE; ch_addr = addr; while (ch_addr < (addr + len)) { pr_debug("Programming subblock of length %d " "to address 0x%08x. Data @ %p\n", ch_len, ch_addr, &buf[ch_addr - addr]); err = ezusb_program_bytes(hw, &buf[ch_addr - addr], ch_addr, ch_len); if (err) break; ch_addr += ch_len; ch_len = ((addr + len - ch_addr) < MAX_DL_SIZE) ? (addr + len - ch_addr) : MAX_DL_SIZE; } return err; } static netdev_tx_t ezusb_xmit(struct sk_buff *skb, struct net_device *dev) { struct orinoco_private *priv = ndev_priv(dev); struct net_device_stats *stats = &dev->stats; struct ezusb_priv *upriv = priv->card; u8 mic[MICHAEL_MIC_LEN + 1]; int err = 0; int tx_control; unsigned long flags; struct request_context *ctx; u8 *buf; int tx_size; if (!netif_running(dev)) { printk(KERN_ERR "%s: Tx on stopped device!\n", dev->name); return NETDEV_TX_BUSY; } if (netif_queue_stopped(dev)) { printk(KERN_DEBUG "%s: Tx while transmitter busy!\n", dev->name); return NETDEV_TX_BUSY; } if (orinoco_lock(priv, &flags) != 0) { printk(KERN_ERR "%s: ezusb_xmit() called while hw_unavailable\n", dev->name); return NETDEV_TX_BUSY; } if (!netif_carrier_ok(dev) || (priv->iw_mode == NL80211_IFTYPE_MONITOR)) { /* Oops, the firmware hasn't established a connection, silently drop the packet (this seems to be the safest approach). */ goto drop; } /* Check packet length */ if (skb->len < ETH_HLEN) goto drop; tx_control = 0; err = orinoco_process_xmit_skb(skb, dev, priv, &tx_control, &mic[0]); if (err) goto drop; ctx = ezusb_alloc_ctx(upriv, EZUSB_RID_TX, 0); if (!ctx) goto drop; memset(ctx->buf, 0, BULK_BUF_SIZE); buf = ctx->buf->data; { __le16 *tx_cntl = (__le16 *)buf; *tx_cntl = cpu_to_le16(tx_control); buf += sizeof(*tx_cntl); } memcpy(buf, skb->data, skb->len); buf += skb->len; if (tx_control & HERMES_TXCTRL_MIC) { u8 *m = mic; /* Mic has been offset so it can be copied to an even * address. We're copying eveything anyway, so we * don't need to copy that first byte. */ if (skb->len % 2) m++; memcpy(buf, m, MICHAEL_MIC_LEN); buf += MICHAEL_MIC_LEN; } /* Finally, we actually initiate the send */ netif_stop_queue(dev); /* The card may behave better if we send evenly sized usb transfers */ tx_size = ALIGN(buf - ctx->buf->data, 2); err = ezusb_access_ltv(upriv, ctx, tx_size, NULL, EZUSB_FRAME_DATA, NULL, 0, NULL, ezusb_req_ctx_wait_skip); if (err) { netif_start_queue(dev); if (net_ratelimit()) printk(KERN_ERR "%s: Error %d transmitting packet\n", dev->name, err); goto busy; } netif_trans_update(dev); stats->tx_bytes += skb->len; goto ok; drop: stats->tx_errors++; stats->tx_dropped++; ok: orinoco_unlock(priv, &flags); dev_kfree_skb(skb); return NETDEV_TX_OK; busy: orinoco_unlock(priv, &flags); return NETDEV_TX_BUSY; } static int ezusb_allocate(struct hermes *hw, u16 size, u16 *fid) { *fid = EZUSB_RID_TX; return 0; } static int ezusb_hard_reset(struct orinoco_private *priv) { struct ezusb_priv *upriv = priv->card; int retval = ezusb_8051_cpucs(upriv, 1); if (retval < 0) { err("Failed to reset"); return retval; } retval = ezusb_8051_cpucs(upriv, 0); if (retval < 0) { err("Failed to unreset"); return retval; } netdev_dbg(upriv->dev, "sending control message\n"); retval = usb_control_msg(upriv->udev, usb_sndctrlpipe(upriv->udev, 0), EZUSB_REQUEST_TRIGGER, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, 0x0, 0x0, NULL, 0, DEF_TIMEOUT); if (retval < 0) { err("EZUSB_REQUEST_TRIGGER failed retval %d", retval); return retval; } #if 0 dbg("Sending EZUSB_REQUEST_TRIG_AC"); retval = usb_control_msg(upriv->udev, usb_sndctrlpipe(upriv->udev, 0), EZUSB_REQUEST_TRIG_AC, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, 0x00FA, 0x0, NULL, 0, DEF_TIMEOUT); if (retval < 0) { err("EZUSB_REQUEST_TRIG_AC failed retval %d", retval); return retval; } #endif return 0; } static int ezusb_init(struct hermes *hw) { struct ezusb_priv *upriv = hw->priv; int retval; if (!upriv) return -EINVAL; upriv->reply_count = 0; /* Write the MAGIC number on the simulated registers to keep * orinoco.c happy */ hermes_write_regn(hw, SWSUPPORT0, HERMES_MAGIC); hermes_write_regn(hw, RXFID, EZUSB_RID_RX); usb_kill_urb(upriv->read_urb); ezusb_submit_in_urb(upriv); retval = __ezusb_write_ltv(hw, 0, EZUSB_RID_INIT1, HERMES_BYTES_TO_RECLEN(2), "\x10\x00", ezusb_req_ctx_wait_compl); if (retval < 0) { printk(KERN_ERR PFX "EZUSB_RID_INIT1 error %d\n", retval); return retval; } retval = __ezusb_docmd_wait(hw, HERMES_CMD_INIT, 0, NULL, ezusb_req_ctx_wait_compl); if (retval < 0) { printk(KERN_ERR PFX "HERMES_CMD_INIT error %d\n", retval); return retval; } return 0; } static void ezusb_bulk_in_callback(struct urb *urb) { struct ezusb_priv *upriv = (struct ezusb_priv *) urb->context; struct ezusb_packet *ans = urb->transfer_buffer; u16 crc; u16 hermes_rid; if (upriv->udev == NULL) return; if (urb->status == -ETIMEDOUT) { /* When a device gets unplugged we get this every time * we resubmit, flooding the logs. Since we don't use * USB timeouts, it shouldn't happen any other time*/ pr_warn("%s: urb timed out, not resubmitting\n", __func__); return; } if (urb->status == -ECONNABORTED) { pr_warn("%s: connection abort, resubmitting urb\n", __func__); goto resubmit; } if ((urb->status == -EILSEQ) || (urb->status == -ENOENT) || (urb->status == -ECONNRESET)) { netdev_dbg(upriv->dev, "status %d, not resubmiting\n", urb->status); return; } if (urb->status) netdev_dbg(upriv->dev, "status: %d length: %d\n", urb->status, urb->actual_length); if (urb->actual_length < sizeof(*ans)) { err("%s: short read, ignoring", __func__); goto resubmit; } crc = build_crc(ans); if (le16_to_cpu(ans->crc) != crc) { err("CRC error, ignoring packet"); goto resubmit; } hermes_rid = le16_to_cpu(ans->hermes_rid); if ((hermes_rid != EZUSB_RID_RX) && !EZUSB_IS_INFO(hermes_rid)) { ezusb_request_in_callback(upriv, urb); } else if (upriv->dev) { struct net_device *dev = upriv->dev; struct orinoco_private *priv = ndev_priv(dev); struct hermes *hw = &priv->hw; if (hermes_rid == EZUSB_RID_RX) { __orinoco_ev_rx(dev, hw); } else { hermes_write_regn(hw, INFOFID, le16_to_cpu(ans->hermes_rid)); __orinoco_ev_info(dev, hw); } } resubmit: if (upriv->udev) ezusb_submit_in_urb(upriv); } static inline void ezusb_delete(struct ezusb_priv *upriv) { struct list_head *item; struct list_head *tmp_item; unsigned long flags; BUG_ON(!upriv); mutex_lock(&upriv->mtx); upriv->udev = NULL; /* No timer will be rearmed from here */ usb_kill_urb(upriv->read_urb); spin_lock_irqsave(&upriv->req_lock, flags); list_for_each_safe(item, tmp_item, &upriv->req_active) { struct request_context *ctx; int err; ctx = list_entry(item, struct request_context, list); refcount_inc(&ctx->refcount); ctx->outurb->transfer_flags |= URB_ASYNC_UNLINK; err = usb_unlink_urb(ctx->outurb); spin_unlock_irqrestore(&upriv->req_lock, flags); if (err == -EINPROGRESS) wait_for_completion(&ctx->done); del_timer_sync(&ctx->timer); /* FIXME: there is an slight chance for the irq handler to * be running */ if (!list_empty(&ctx->list)) ezusb_ctx_complete(ctx); ezusb_request_context_put(ctx); spin_lock_irqsave(&upriv->req_lock, flags); } spin_unlock_irqrestore(&upriv->req_lock, flags); list_for_each_safe(item, tmp_item, &upriv->req_pending) ezusb_ctx_complete(list_entry(item, struct request_context, list)); if (upriv->read_urb && upriv->read_urb->status == -EINPROGRESS) printk(KERN_ERR PFX "Some URB in progress\n"); mutex_unlock(&upriv->mtx); if (upriv->read_urb) { kfree(upriv->read_urb->transfer_buffer); usb_free_urb(upriv->read_urb); } kfree(upriv->bap_buf); if (upriv->dev) { struct orinoco_private *priv = ndev_priv(upriv->dev); orinoco_if_del(priv); wiphy_unregister(priv_to_wiphy(upriv)); free_orinocodev(priv); } } static void ezusb_lock_irqsave(spinlock_t *lock, unsigned long *flags) __acquires(lock) { spin_lock_bh(lock); } static void ezusb_unlock_irqrestore(spinlock_t *lock, unsigned long *flags) __releases(lock) { spin_unlock_bh(lock); } static void ezusb_lock_irq(spinlock_t *lock) __acquires(lock) { spin_lock_bh(lock); } static void ezusb_unlock_irq(spinlock_t *lock) __releases(lock) { spin_unlock_bh(lock); } static const struct hermes_ops ezusb_ops = { .init = ezusb_init, .cmd_wait = ezusb_docmd_wait, .init_cmd_wait = ezusb_doicmd_wait, .allocate = ezusb_allocate, .read_ltv = ezusb_read_ltv, .read_ltv_pr = ezusb_read_ltv_preempt, .write_ltv = ezusb_write_ltv, .bap_pread = ezusb_bap_pread, .read_pda = ezusb_read_pda, .program_init = ezusb_program_init, .program_end = ezusb_program_end, .program = ezusb_program, .lock_irqsave = ezusb_lock_irqsave, .unlock_irqrestore = ezusb_unlock_irqrestore, .lock_irq = ezusb_lock_irq, .unlock_irq = ezusb_unlock_irq, }; static const struct net_device_ops ezusb_netdev_ops = { .ndo_open = orinoco_open, .ndo_stop = orinoco_stop, .ndo_start_xmit = ezusb_xmit, .ndo_set_rx_mode = orinoco_set_multicast_list, .ndo_change_mtu = orinoco_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_tx_timeout = orinoco_tx_timeout, }; static int ezusb_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev(interface); struct orinoco_private *priv; struct hermes *hw; struct ezusb_priv *upriv = NULL; struct usb_interface_descriptor *iface_desc; struct usb_endpoint_descriptor *ep; const struct firmware *fw_entry = NULL; int retval = 0; int i; priv = alloc_orinocodev(sizeof(*upriv), &udev->dev, ezusb_hard_reset, NULL); if (!priv) { err("Couldn't allocate orinocodev"); retval = -ENOMEM; goto exit; } hw = &priv->hw; upriv = priv->card; mutex_init(&upriv->mtx); spin_lock_init(&upriv->reply_count_lock); spin_lock_init(&upriv->req_lock); INIT_LIST_HEAD(&upriv->req_pending); INIT_LIST_HEAD(&upriv->req_active); upriv->udev = udev; hw->iobase = (void __force __iomem *) &upriv->hermes_reg_fake; hw->reg_spacing = HERMES_16BIT_REGSPACING; hw->priv = upriv; hw->ops = &ezusb_ops; /* set up the endpoint information */ /* check out the endpoints */ iface_desc = &interface->cur_altsetting->desc; for (i = 0; i < iface_desc->bNumEndpoints; ++i) { ep = &interface->cur_altsetting->endpoint[i].desc; if (usb_endpoint_is_bulk_in(ep)) { /* we found a bulk in endpoint */ if (upriv->read_urb != NULL) { pr_warn("Found a second bulk in ep, ignored\n"); continue; } upriv->read_urb = usb_alloc_urb(0, GFP_KERNEL); if (!upriv->read_urb) goto error; if (le16_to_cpu(ep->wMaxPacketSize) != 64) pr_warn("bulk in: wMaxPacketSize!= 64\n"); if (ep->bEndpointAddress != (2 | USB_DIR_IN)) pr_warn("bulk in: bEndpointAddress: %d\n", ep->bEndpointAddress); upriv->read_pipe = usb_rcvbulkpipe(udev, ep-> bEndpointAddress); upriv->read_urb->transfer_buffer = kmalloc(BULK_BUF_SIZE, GFP_KERNEL); if (!upriv->read_urb->transfer_buffer) { err("Couldn't allocate IN buffer"); goto error; } } if (usb_endpoint_is_bulk_out(ep)) { /* we found a bulk out endpoint */ if (upriv->bap_buf != NULL) { pr_warn("Found a second bulk out ep, ignored\n"); continue; } if (le16_to_cpu(ep->wMaxPacketSize) != 64) pr_warn("bulk out: wMaxPacketSize != 64\n"); if (ep->bEndpointAddress != 2) pr_warn("bulk out: bEndpointAddress: %d\n", ep->bEndpointAddress); upriv->write_pipe = usb_sndbulkpipe(udev, ep-> bEndpointAddress); upriv->bap_buf = kmalloc(BULK_BUF_SIZE, GFP_KERNEL); if (!upriv->bap_buf) { err("Couldn't allocate bulk_out_buffer"); goto error; } } } if (!upriv->bap_buf || !upriv->read_urb) { err("Didn't find the required bulk endpoints"); goto error; } if (request_firmware(&fw_entry, "orinoco_ezusb_fw", &interface->dev) == 0) { firmware.size = fw_entry->size; firmware.code = fw_entry->data; } if (firmware.size && firmware.code) { if (ezusb_firmware_download(upriv, &firmware) < 0) goto error; } else { err("No firmware to download"); goto error; } if (ezusb_hard_reset(priv) < 0) { err("Cannot reset the device"); goto error; } /* If the firmware is already downloaded orinoco.c will call * ezusb_init but if the firmware is not already there, that will make * the kernel very unstable, so we try initializing here and quit in * case of error */ if (ezusb_init(hw) < 0) { err("Couldn't initialize the device"); err("Firmware may not be downloaded or may be wrong."); goto error; } /* Initialise the main driver */ if (orinoco_init(priv) != 0) { err("orinoco_init() failed\n"); goto error; } if (orinoco_if_add(priv, 0, 0, &ezusb_netdev_ops) != 0) { upriv->dev = NULL; err("%s: orinoco_if_add() failed", __func__); wiphy_unregister(priv_to_wiphy(priv)); goto error; } upriv->dev = priv->ndev; goto exit; error: ezusb_delete(upriv); if (upriv->dev) { /* upriv->dev was 0, so ezusb_delete() didn't free it */ free_orinocodev(priv); } upriv = NULL; retval = -EFAULT; exit: if (fw_entry) { firmware.code = NULL; firmware.size = 0; release_firmware(fw_entry); } usb_set_intfdata(interface, upriv); return retval; } static void ezusb_disconnect(struct usb_interface *intf) { struct ezusb_priv *upriv = usb_get_intfdata(intf); usb_set_intfdata(intf, NULL); ezusb_delete(upriv); printk(KERN_INFO PFX "Disconnected\n"); } /* usb specific object needed to register this driver with the usb subsystem */ static struct usb_driver orinoco_driver = { .name = DRIVER_NAME, .probe = ezusb_probe, .disconnect = ezusb_disconnect, .id_table = ezusb_table, .disable_hub_initiated_lpm = 1, }; module_usb_driver(orinoco_driver); MODULE_AUTHOR("Manuel Estrada Sainz"); MODULE_DESCRIPTION("Driver for Orinoco wireless LAN cards using EZUSB bridge"); MODULE_LICENSE("Dual MPL/GPL");
linux-master
drivers/net/wireless/intersil/orinoco/orinoco_usb.c
/* orinoco_cs.c (formerly known as dldwd_cs.c) * * A driver for "Hermes" chipset based PCMCIA wireless adaptors, such * as the Lucent WavelanIEEE/Orinoco cards and their OEM (Cabletron/ * EnteraSys RoamAbout 802.11, ELSA Airlancer, Melco Buffalo and others). * It should also be usable on various Prism II based cards such as the * Linksys, D-Link and Farallon Skyline. It should also work on Symbol * cards such as the 3Com AirConnect and Ericsson WLAN. * * Copyright notice & release notes in file main.c */ #define DRIVER_NAME "orinoco_cs" #define PFX DRIVER_NAME ": " #include <linux/module.h> #include <linux/kernel.h> #include <linux/delay.h> #include <pcmcia/cistpl.h> #include <pcmcia/cisreg.h> #include <pcmcia/ds.h> #include "orinoco.h" /********************************************************************/ /* Module stuff */ /********************************************************************/ MODULE_AUTHOR("David Gibson <[email protected]>"); MODULE_DESCRIPTION("Driver for PCMCIA Lucent Orinoco," " Prism II based and similar wireless cards"); MODULE_LICENSE("Dual MPL/GPL"); /* Module parameters */ /* Some D-Link cards have buggy CIS. They do work at 5v properly, but * don't have any CIS entry for it. This workaround it... */ static int ignore_cis_vcc; /* = 0 */ module_param(ignore_cis_vcc, int, 0); MODULE_PARM_DESC(ignore_cis_vcc, "Allow voltage mismatch between card and socket"); /********************************************************************/ /* Data structures */ /********************************************************************/ /* PCMCIA specific device information (goes in the card field of * struct orinoco_private */ struct orinoco_pccard { struct pcmcia_device *p_dev; /* Used to handle hard reset */ /* yuck, we need this hack to work around the insanity of the * PCMCIA layer */ unsigned long hard_reset_in_progress; }; /********************************************************************/ /* Function prototypes */ /********************************************************************/ static int orinoco_cs_config(struct pcmcia_device *link); static void orinoco_cs_release(struct pcmcia_device *link); static void orinoco_cs_detach(struct pcmcia_device *p_dev); /********************************************************************/ /* Device methods */ /********************************************************************/ static int orinoco_cs_hard_reset(struct orinoco_private *priv) { struct orinoco_pccard *card = priv->card; struct pcmcia_device *link = card->p_dev; int err; /* We need atomic ops here, because we're not holding the lock */ set_bit(0, &card->hard_reset_in_progress); err = pcmcia_reset_card(link->socket); if (err) return err; msleep(100); clear_bit(0, &card->hard_reset_in_progress); return 0; } /********************************************************************/ /* PCMCIA stuff */ /********************************************************************/ static int orinoco_cs_probe(struct pcmcia_device *link) { struct orinoco_private *priv; struct orinoco_pccard *card; int ret; priv = alloc_orinocodev(sizeof(*card), &link->dev, orinoco_cs_hard_reset, NULL); if (!priv) return -ENOMEM; card = priv->card; /* Link both structures together */ card->p_dev = link; link->priv = priv; ret = orinoco_cs_config(link); if (ret) goto err_free_orinocodev; return 0; err_free_orinocodev: free_orinocodev(priv); return ret; } static void orinoco_cs_detach(struct pcmcia_device *link) { struct orinoco_private *priv = link->priv; orinoco_if_del(priv); orinoco_cs_release(link); wiphy_unregister(priv_to_wiphy(priv)); free_orinocodev(priv); } /* orinoco_cs_detach */ static int orinoco_cs_config_check(struct pcmcia_device *p_dev, void *priv_data) { if (p_dev->config_index == 0) return -EINVAL; return pcmcia_request_io(p_dev); }; static int orinoco_cs_config(struct pcmcia_device *link) { struct orinoco_private *priv = link->priv; struct hermes *hw = &priv->hw; int ret; void __iomem *mem; link->config_flags |= CONF_AUTO_SET_VPP | CONF_AUTO_CHECK_VCC | CONF_AUTO_SET_IO | CONF_ENABLE_IRQ; if (ignore_cis_vcc) link->config_flags &= ~CONF_AUTO_CHECK_VCC; ret = pcmcia_loop_config(link, orinoco_cs_config_check, NULL); if (ret) { if (!ignore_cis_vcc) printk(KERN_ERR PFX "GetNextTuple(): No matching " "CIS configuration. Maybe you need the " "ignore_cis_vcc=1 parameter.\n"); goto failed; } mem = ioport_map(link->resource[0]->start, resource_size(link->resource[0])); if (!mem) goto failed; /* We initialize the hermes structure before completing PCMCIA * configuration just in case the interrupt handler gets * called. */ hermes_struct_init(hw, mem, HERMES_16BIT_REGSPACING); ret = pcmcia_request_irq(link, orinoco_interrupt); if (ret) goto failed; ret = pcmcia_enable_device(link); if (ret) goto failed; /* Initialise the main driver */ if (orinoco_init(priv) != 0) { printk(KERN_ERR PFX "orinoco_init() failed\n"); goto failed; } /* Register an interface with the stack */ if (orinoco_if_add(priv, link->resource[0]->start, link->irq, NULL) != 0) { printk(KERN_ERR PFX "orinoco_if_add() failed\n"); goto failed; } return 0; failed: orinoco_cs_release(link); return -ENODEV; } /* orinoco_cs_config */ static void orinoco_cs_release(struct pcmcia_device *link) { struct orinoco_private *priv = link->priv; unsigned long flags; /* We're committed to taking the device away now, so mark the * hardware as unavailable */ priv->hw.ops->lock_irqsave(&priv->lock, &flags); priv->hw_unavailable++; priv->hw.ops->unlock_irqrestore(&priv->lock, &flags); pcmcia_disable_device(link); if (priv->hw.iobase) ioport_unmap(priv->hw.iobase); } /* orinoco_cs_release */ static int orinoco_cs_suspend(struct pcmcia_device *link) { struct orinoco_private *priv = link->priv; struct orinoco_pccard *card = priv->card; /* This is probably racy, but I can't think of a better way, short of rewriting the PCMCIA layer to not suck :-( */ if (!test_bit(0, &card->hard_reset_in_progress)) orinoco_down(priv); return 0; } static int orinoco_cs_resume(struct pcmcia_device *link) { struct orinoco_private *priv = link->priv; struct orinoco_pccard *card = priv->card; int err = 0; if (!test_bit(0, &card->hard_reset_in_progress)) err = orinoco_up(priv); return err; } /********************************************************************/ /* Module initialization */ /********************************************************************/ static const struct pcmcia_device_id orinoco_cs_ids[] = { PCMCIA_DEVICE_MANF_CARD(0x0101, 0x0777), /* 3Com AirConnect PCI 777A */ PCMCIA_DEVICE_MANF_CARD(0x016b, 0x0001), /* Ericsson WLAN Card C11 */ PCMCIA_DEVICE_MANF_CARD(0x01eb, 0x080a), /* Nortel Networks eMobility 802.11 Wireless Adapter */ PCMCIA_DEVICE_MANF_CARD(0x0261, 0x0002), /* AirWay 802.11 Adapter (PCMCIA) */ PCMCIA_DEVICE_MANF_CARD(0x0268, 0x0001), /* ARtem Onair */ PCMCIA_DEVICE_MANF_CARD(0x0268, 0x0003), /* ARtem Onair Comcard 11 */ PCMCIA_DEVICE_MANF_CARD(0x026f, 0x0305), /* Buffalo WLI-PCM-S11 */ PCMCIA_DEVICE_MANF_CARD(0x02aa, 0x0002), /* ASUS SpaceLink WL-100 */ PCMCIA_DEVICE_MANF_CARD(0x02ac, 0x0002), /* SpeedStream SS1021 Wireless Adapter */ PCMCIA_DEVICE_MANF_CARD(0x02ac, 0x3021), /* SpeedStream Wireless Adapter */ PCMCIA_DEVICE_MANF_CARD(0x14ea, 0xb001), /* PLANEX RoadLannerWave GW-NS11H */ PCMCIA_DEVICE_PROD_ID12("3Com", "3CRWE737A AirConnect Wireless LAN PC Card", 0x41240e5b, 0x56010af3), PCMCIA_DEVICE_PROD_ID12("Allied Telesyn", "AT-WCL452 Wireless PCMCIA Radio", 0x5cd01705, 0x4271660f), PCMCIA_DEVICE_PROD_ID12("ASUS", "802_11B_CF_CARD_25", 0x78fc06ee, 0x45a50c1e), PCMCIA_DEVICE_PROD_ID12("ASUS", "802_11b_PC_CARD_25", 0x78fc06ee, 0xdb9aa842), PCMCIA_DEVICE_PROD_ID12("Avaya Communication", "Avaya Wireless PC Card", 0xd8a43b78, 0x0d341169), PCMCIA_DEVICE_PROD_ID12("BENQ", "AWL100 PCMCIA ADAPTER", 0x35dadc74, 0x01f7fedb), PCMCIA_DEVICE_PROD_ID12("Cabletron", "RoamAbout 802.11 DS", 0x32d445f5, 0xedeffd90), PCMCIA_DEVICE_PROD_ID12("D-Link Corporation", "D-Link DWL-650H 11Mbps WLAN Adapter", 0xef544d24, 0xcd8ea916), PCMCIA_DEVICE_PROD_ID12("ELSA", "AirLancer MC-11", 0x4507a33a, 0xef54f0e3), PCMCIA_DEVICE_PROD_ID12("HyperLink", "Wireless PC Card 11Mbps", 0x56cc3f1a, 0x0bcf220c), PCMCIA_DEVICE_PROD_ID12("Intel", "PRO/Wireless 2011 LAN PC Card", 0x816cc815, 0x07f58077), PCMCIA_DEVICE_PROD_ID12("LeArtery", "SYNCBYAIR 11Mbps Wireless LAN PC Card", 0x7e3b326a, 0x49893e92), PCMCIA_DEVICE_PROD_ID12("Lucent Technologies", "WaveLAN/IEEE", 0x23eb9949, 0xc562e72a), PCMCIA_DEVICE_PROD_ID12("MELCO", "WLI-PCM-L11", 0x481e0094, 0x7360e410), PCMCIA_DEVICE_PROD_ID12("MELCO", "WLI-PCM-L11G", 0x481e0094, 0xf57ca4b3), PCMCIA_DEVICE_PROD_ID12("NCR", "WaveLAN/IEEE", 0x24358cd4, 0xc562e72a), PCMCIA_DEVICE_PROD_ID12("Nortel Networks", "emobility 802.11 Wireless LAN PC Card", 0x2d617ea0, 0x88cd5767), PCMCIA_DEVICE_PROD_ID12("OTC", "Wireless AirEZY 2411-PCC WLAN Card", 0x4ac44287, 0x235a6bed), PCMCIA_DEVICE_PROD_ID12("PROXIM", "LAN PC CARD HARMONY 80211B", 0xc6536a5e, 0x090c3cd9), PCMCIA_DEVICE_PROD_ID12("PROXIM", "LAN PCI CARD HARMONY 80211B", 0xc6536a5e, 0x9f494e26), PCMCIA_DEVICE_PROD_ID12("SAMSUNG", "11Mbps WLAN Card", 0x43d74cb4, 0x579bd91b), PCMCIA_DEVICE_PROD_ID12("Symbol Technologies", "LA4111 Spectrum24 Wireless LAN PC Card", 0x3f02b4d6, 0x3663cb0e), PCMCIA_DEVICE_MANF_CARD_PROD_ID3(0x0156, 0x0002, "Version 01.01", 0xd27deb1a), /* Lucent Orinoco */ #ifdef CONFIG_HERMES_PRISM /* Only entries that certainly identify Prism chipset */ PCMCIA_DEVICE_MANF_CARD(0x000b, 0x7100), /* SonicWALL Long Range Wireless Card */ PCMCIA_DEVICE_MANF_CARD(0x000b, 0x7300), /* Sohoware NCP110, Philips 802.11b */ PCMCIA_DEVICE_MANF_CARD(0x0089, 0x0002), /* AnyPoint(TM) Wireless II PC Card */ PCMCIA_DEVICE_MANF_CARD(0x0126, 0x8000), /* PROXIM RangeLAN-DS/LAN PC CARD */ PCMCIA_DEVICE_MANF_CARD(0x0138, 0x0002), /* Compaq WL100 11 Mbps Wireless Adapter */ PCMCIA_DEVICE_MANF_CARD(0x01ff, 0x0008), /* Intermec MobileLAN 11Mbps 802.11b WLAN Card */ PCMCIA_DEVICE_MANF_CARD(0x0250, 0x0002), /* Samsung SWL2000-N 11Mb/s WLAN Card */ PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1612), /* Linksys WPC11 Version 2.5 */ PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1613), /* Linksys WPC11 Version 3 */ PCMCIA_DEVICE_MANF_CARD(0x028a, 0x0002), /* Compaq HNW-100 11 Mbps Wireless Adapter */ PCMCIA_DEVICE_MANF_CARD(0x028a, 0x0673), /* Linksys WCF12 Wireless CompactFlash Card */ PCMCIA_DEVICE_MANF_CARD(0x50c2, 0x7300), /* Airvast WN-100 */ PCMCIA_DEVICE_MANF_CARD(0x9005, 0x0021), /* Adaptec Ultra Wireless ANW-8030 */ PCMCIA_DEVICE_MANF_CARD(0xc001, 0x0008), /* CONTEC FLEXSCAN/FX-DDS110-PCC */ PCMCIA_DEVICE_MANF_CARD(0xc250, 0x0002), /* Conceptronic CON11Cpro, EMTAC A2424i */ PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0002), /* Safeway 802.11b, ZCOMAX AirRunner/XI-300 */ PCMCIA_DEVICE_MANF_CARD(0xd601, 0x0005), /* D-Link DCF660, Sandisk Connect SDWCFB-000 */ PCMCIA_DEVICE_PROD_ID123("Instant Wireless ", " Network PC CARD", "Version 01.02", 0x11d901af, 0x6e9bd926, 0x4b74baa0), PCMCIA_DEVICE_PROD_ID12("ACTIONTEC", "PRISM Wireless LAN PC Card", 0x393089da, 0xa71e69d5), PCMCIA_DEVICE_PROD_ID12("Addtron", "AWP-100 Wireless PCMCIA", 0xe6ec52ce, 0x08649af2), PCMCIA_DEVICE_PROD_ID12("BUFFALO", "WLI-CF-S11G", 0x2decece3, 0x82067c18), PCMCIA_DEVICE_PROD_ID12("BUFFALO", "WLI-PCM-L11G", 0x2decece3, 0xf57ca4b3), PCMCIA_DEVICE_PROD_ID12("Compaq", "WL200_11Mbps_Wireless_PCI_Card", 0x54f7c49c, 0x15a75e5b), PCMCIA_DEVICE_PROD_ID12("corega K.K.", "Wireless LAN PCC-11", 0x5261440f, 0xa6405584), PCMCIA_DEVICE_PROD_ID12("corega K.K.", "Wireless LAN PCCA-11", 0x5261440f, 0xdf6115f9), PCMCIA_DEVICE_PROD_ID12("corega_K.K.", "Wireless_LAN_PCCB-11", 0x29e33311, 0xee7a27ae), PCMCIA_DEVICE_PROD_ID12("Digital Data Communications", "WPC-0100", 0xfdd73470, 0xe0b6f146), PCMCIA_DEVICE_PROD_ID12("D", "Link DRC-650 11Mbps WLAN Card", 0x71b18589, 0xf144e3ac), PCMCIA_DEVICE_PROD_ID12("D", "Link DWL-650 11Mbps WLAN Card", 0x71b18589, 0xb6f1b0ab), PCMCIA_DEVICE_PROD_ID12(" ", "IEEE 802.11 Wireless LAN/PC Card", 0x3b6e20c8, 0xefccafe9), PCMCIA_DEVICE_PROD_ID12("INTERSIL", "HFA384x/IEEE", 0x74c5e40d, 0xdb472a18), PCMCIA_DEVICE_PROD_ID12("INTERSIL", "I-GATE 11M PC Card / PC Card plus", 0x74c5e40d, 0x8304ff77), PCMCIA_DEVICE_PROD_ID12("Intersil", "PRISM 2_5 PCMCIA ADAPTER", 0x4b801a17, 0x6345a0bf), PCMCIA_DEVICE_PROD_ID12("Linksys", "Wireless CompactFlash Card", 0x0733cc81, 0x0c52f395), PCMCIA_DEVICE_PROD_ID12("Microsoft", "Wireless Notebook Adapter MN-520", 0x5961bf85, 0x6eec8c01), PCMCIA_DEVICE_PROD_ID12("NETGEAR MA401RA Wireless PC", "Card", 0x0306467f, 0x9762e8f1), PCMCIA_DEVICE_PROD_ID12("NETGEAR MA401 Wireless PC", "Card", 0xa37434e9, 0x9762e8f1), PCMCIA_DEVICE_PROD_ID12("OEM", "PRISM2 IEEE 802.11 PC-Card", 0xfea54c90, 0x48f2bdd6), PCMCIA_DEVICE_PROD_ID12("PLANEX", "GeoWave/GW-CF110", 0x209f40ab, 0xd9715264), PCMCIA_DEVICE_PROD_ID12("PLANEX", "GeoWave/GW-NS110", 0x209f40ab, 0x46263178), PCMCIA_DEVICE_PROD_ID12("SMC", "SMC2532W-B EliteConnect Wireless Adapter", 0xc4f8b18b, 0x196bd757), PCMCIA_DEVICE_PROD_ID12("SMC", "SMC2632W", 0xc4f8b18b, 0x474a1f2a), PCMCIA_DEVICE_PROD_ID12("ZoomAir 11Mbps High", "Rate wireless Networking", 0x273fe3db, 0x32a1eaee), PCMCIA_DEVICE_PROD_ID3("HFA3863", 0x355cb092), PCMCIA_DEVICE_PROD_ID3("ISL37100P", 0x630d52b2), PCMCIA_DEVICE_PROD_ID3("ISL37101P-10", 0xdd97a26b), PCMCIA_DEVICE_PROD_ID3("ISL37300P", 0xc9049a39), /* This may be Agere or Intersil Firmware */ PCMCIA_DEVICE_MANF_CARD(0x0156, 0x0002), #endif PCMCIA_DEVICE_NULL, }; MODULE_DEVICE_TABLE(pcmcia, orinoco_cs_ids); static struct pcmcia_driver orinoco_driver = { .owner = THIS_MODULE, .name = DRIVER_NAME, .probe = orinoco_cs_probe, .remove = orinoco_cs_detach, .id_table = orinoco_cs_ids, .suspend = orinoco_cs_suspend, .resume = orinoco_cs_resume, }; module_pcmcia_driver(orinoco_driver);
linux-master
drivers/net/wireless/intersil/orinoco/orinoco_cs.c
/* Encapsulate basic setting changes and retrieval on Hermes hardware * * See copyright notice in main.c */ #include <linux/kernel.h> #include <linux/device.h> #include <linux/if_arp.h> #include <linux/ieee80211.h> #include <linux/wireless.h> #include <net/cfg80211.h> #include "hermes.h" #include "hermes_rid.h" #include "orinoco.h" #include "hw.h" #define SYMBOL_MAX_VER_LEN (14) /* Symbol firmware has a bug allocating buffers larger than this */ #define TX_NICBUF_SIZE_BUG 1585 /********************************************************************/ /* Data tables */ /********************************************************************/ /* This tables gives the actual meanings of the bitrate IDs returned * by the firmware. */ static const struct { int bitrate; /* in 100s of kilobits */ int automatic; u16 agere_txratectrl; u16 intersil_txratectrl; } bitrate_table[] = { {110, 1, 3, 15}, /* Entry 0 is the default */ {10, 0, 1, 1}, {10, 1, 1, 1}, {20, 0, 2, 2}, {20, 1, 6, 3}, {55, 0, 4, 4}, {55, 1, 7, 7}, {110, 0, 5, 8}, }; #define BITRATE_TABLE_SIZE ARRAY_SIZE(bitrate_table) /* Firmware version encoding */ struct comp_id { u16 id, variant, major, minor; } __packed; static inline enum fwtype determine_firmware_type(struct comp_id *nic_id) { if (nic_id->id < 0x8000) return FIRMWARE_TYPE_AGERE; else if (nic_id->id == 0x8000 && nic_id->major == 0) return FIRMWARE_TYPE_SYMBOL; else return FIRMWARE_TYPE_INTERSIL; } /* Set priv->firmware type, determine firmware properties * This function can be called before we have registerred with netdev, * so all errors go out with dev_* rather than printk * * If non-NULL stores a firmware description in fw_name. * If non-NULL stores a HW version in hw_ver * * These are output via generic cfg80211 ethtool support. */ int determine_fw_capabilities(struct orinoco_private *priv, char *fw_name, size_t fw_name_len, u32 *hw_ver) { struct device *dev = priv->dev; struct hermes *hw = &priv->hw; int err; struct comp_id nic_id, sta_id; unsigned int firmver; char tmp[SYMBOL_MAX_VER_LEN + 1] __attribute__((aligned(2))); /* Get the hardware version */ err = HERMES_READ_RECORD_PR(hw, USER_BAP, HERMES_RID_NICID, &nic_id); if (err) { dev_err(dev, "Cannot read hardware identity: error %d\n", err); return err; } le16_to_cpus(&nic_id.id); le16_to_cpus(&nic_id.variant); le16_to_cpus(&nic_id.major); le16_to_cpus(&nic_id.minor); dev_info(dev, "Hardware identity %04x:%04x:%04x:%04x\n", nic_id.id, nic_id.variant, nic_id.major, nic_id.minor); if (hw_ver) *hw_ver = (((nic_id.id & 0xff) << 24) | ((nic_id.variant & 0xff) << 16) | ((nic_id.major & 0xff) << 8) | (nic_id.minor & 0xff)); priv->firmware_type = determine_firmware_type(&nic_id); /* Get the firmware version */ err = HERMES_READ_RECORD_PR(hw, USER_BAP, HERMES_RID_STAID, &sta_id); if (err) { dev_err(dev, "Cannot read station identity: error %d\n", err); return err; } le16_to_cpus(&sta_id.id); le16_to_cpus(&sta_id.variant); le16_to_cpus(&sta_id.major); le16_to_cpus(&sta_id.minor); dev_info(dev, "Station identity %04x:%04x:%04x:%04x\n", sta_id.id, sta_id.variant, sta_id.major, sta_id.minor); switch (sta_id.id) { case 0x15: dev_err(dev, "Primary firmware is active\n"); return -ENODEV; case 0x14b: dev_err(dev, "Tertiary firmware is active\n"); return -ENODEV; case 0x1f: /* Intersil, Agere, Symbol Spectrum24 */ case 0x21: /* Symbol Spectrum24 Trilogy */ break; default: dev_notice(dev, "Unknown station ID, please report\n"); break; } /* Default capabilities */ priv->has_sensitivity = 1; priv->has_mwo = 0; priv->has_preamble = 0; priv->has_port3 = 1; priv->has_ibss = 1; priv->has_wep = 0; priv->has_big_wep = 0; priv->has_alt_txcntl = 0; priv->has_ext_scan = 0; priv->has_wpa = 0; priv->do_fw_download = 0; /* Determine capabilities from the firmware version */ switch (priv->firmware_type) { case FIRMWARE_TYPE_AGERE: /* Lucent Wavelan IEEE, Lucent Orinoco, Cabletron RoamAbout, ELSA, Melco, HP, IBM, Dell 1150, Compaq 110/210 */ if (fw_name) snprintf(fw_name, fw_name_len, "Lucent/Agere %d.%02d", sta_id.major, sta_id.minor); firmver = ((unsigned long)sta_id.major << 16) | sta_id.minor; priv->has_ibss = (firmver >= 0x60006); priv->has_wep = (firmver >= 0x40020); priv->has_big_wep = 1; /* FIXME: this is wrong - how do we tell Gold cards from the others? */ priv->has_mwo = (firmver >= 0x60000); priv->has_pm = (firmver >= 0x40020); /* Don't work in 7.52 ? */ priv->ibss_port = 1; priv->has_hostscan = (firmver >= 0x8000a); priv->do_fw_download = 1; priv->broken_monitor = (firmver >= 0x80000); priv->has_alt_txcntl = (firmver >= 0x90000); /* All 9.x ? */ priv->has_ext_scan = (firmver >= 0x90000); /* All 9.x ? */ priv->has_wpa = (firmver >= 0x9002a); /* Tested with Agere firmware : * 1.16 ; 4.08 ; 4.52 ; 6.04 ; 6.16 ; 7.28 => Jean II * Tested CableTron firmware : 4.32 => Anton */ break; case FIRMWARE_TYPE_SYMBOL: /* Symbol , 3Com AirConnect, Intel, Ericsson WLAN */ /* Intel MAC : 00:02:B3:* */ /* 3Com MAC : 00:50:DA:* */ memset(tmp, 0, sizeof(tmp)); /* Get the Symbol firmware version */ err = hw->ops->read_ltv_pr(hw, USER_BAP, HERMES_RID_SECONDARYVERSION_SYMBOL, SYMBOL_MAX_VER_LEN, NULL, &tmp); if (err) { dev_warn(dev, "Error %d reading Symbol firmware info. " "Wildly guessing capabilities...\n", err); firmver = 0; tmp[0] = '\0'; } else { /* The firmware revision is a string, the format is * something like : "V2.20-01". * Quick and dirty parsing... - Jean II */ firmver = ((tmp[1] - '0') << 16) | ((tmp[3] - '0') << 12) | ((tmp[4] - '0') << 8) | ((tmp[6] - '0') << 4) | (tmp[7] - '0'); tmp[SYMBOL_MAX_VER_LEN] = '\0'; } if (fw_name) snprintf(fw_name, fw_name_len, "Symbol %s", tmp); priv->has_ibss = (firmver >= 0x20000); priv->has_wep = (firmver >= 0x15012); priv->has_big_wep = (firmver >= 0x20000); priv->has_pm = (firmver >= 0x20000 && firmver < 0x22000) || (firmver >= 0x29000 && firmver < 0x30000) || firmver >= 0x31000; priv->has_preamble = (firmver >= 0x20000); priv->ibss_port = 4; /* Symbol firmware is found on various cards, but * there has been no attempt to check firmware * download on non-spectrum_cs based cards. * * Given that the Agere firmware download works * differently, we should avoid doing a firmware * download with the Symbol algorithm on non-spectrum * cards. * * For now we can identify a spectrum_cs based card * because it has a firmware reset function. */ priv->do_fw_download = (priv->stop_fw != NULL); priv->broken_disableport = (firmver == 0x25013) || (firmver >= 0x30000 && firmver <= 0x31000); priv->has_hostscan = (firmver >= 0x31001) || (firmver >= 0x29057 && firmver < 0x30000); /* Tested with Intel firmware : 0x20015 => Jean II */ /* Tested with 3Com firmware : 0x15012 & 0x22001 => Jean II */ break; case FIRMWARE_TYPE_INTERSIL: /* D-Link, Linksys, Adtron, ZoomAir, and many others... * Samsung, Compaq 100/200 and Proxim are slightly * different and less well tested */ /* D-Link MAC : 00:40:05:* */ /* Addtron MAC : 00:90:D1:* */ if (fw_name) snprintf(fw_name, fw_name_len, "Intersil %d.%d.%d", sta_id.major, sta_id.minor, sta_id.variant); firmver = ((unsigned long)sta_id.major << 16) | ((unsigned long)sta_id.minor << 8) | sta_id.variant; priv->has_ibss = (firmver >= 0x000700); /* FIXME */ priv->has_big_wep = priv->has_wep = (firmver >= 0x000800); priv->has_pm = (firmver >= 0x000700); priv->has_hostscan = (firmver >= 0x010301); if (firmver >= 0x000800) priv->ibss_port = 0; else { dev_notice(dev, "Intersil firmware earlier than v0.8.x" " - several features not supported\n"); priv->ibss_port = 1; } break; } if (fw_name) dev_info(dev, "Firmware determined as %s\n", fw_name); #ifndef CONFIG_HERMES_PRISM if (priv->firmware_type == FIRMWARE_TYPE_INTERSIL) { dev_err(dev, "Support for Prism chipset is not enabled\n"); return -ENODEV; } #endif return 0; } /* Read settings from EEPROM into our private structure. * MAC address gets dropped into callers buffer * Can be called before netdev registration. */ int orinoco_hw_read_card_settings(struct orinoco_private *priv, u8 *dev_addr) { struct device *dev = priv->dev; struct hermes_idstring nickbuf; struct hermes *hw = &priv->hw; int len; int err; u16 reclen; /* Get the MAC address */ err = hw->ops->read_ltv_pr(hw, USER_BAP, HERMES_RID_CNFOWNMACADDR, ETH_ALEN, NULL, dev_addr); if (err) { dev_warn(dev, "Failed to read MAC address!\n"); goto out; } dev_dbg(dev, "MAC address %pM\n", dev_addr); /* Get the station name */ err = hw->ops->read_ltv_pr(hw, USER_BAP, HERMES_RID_CNFOWNNAME, sizeof(nickbuf), &reclen, &nickbuf); if (err) { dev_err(dev, "failed to read station name\n"); goto out; } if (nickbuf.len) len = min(IW_ESSID_MAX_SIZE, (int)le16_to_cpu(nickbuf.len)); else len = min(IW_ESSID_MAX_SIZE, 2 * reclen); memcpy(priv->nick, &nickbuf.val, len); priv->nick[len] = '\0'; dev_dbg(dev, "Station name \"%s\"\n", priv->nick); /* Get allowed channels */ err = hermes_read_wordrec_pr(hw, USER_BAP, HERMES_RID_CHANNELLIST, &priv->channel_mask); if (err) { dev_err(dev, "Failed to read channel list!\n"); goto out; } /* Get initial AP density */ err = hermes_read_wordrec_pr(hw, USER_BAP, HERMES_RID_CNFSYSTEMSCALE, &priv->ap_density); if (err || priv->ap_density < 1 || priv->ap_density > 3) priv->has_sensitivity = 0; /* Get initial RTS threshold */ err = hermes_read_wordrec_pr(hw, USER_BAP, HERMES_RID_CNFRTSTHRESHOLD, &priv->rts_thresh); if (err) { dev_err(dev, "Failed to read RTS threshold!\n"); goto out; } /* Get initial fragmentation settings */ if (priv->has_mwo) err = hermes_read_wordrec_pr(hw, USER_BAP, HERMES_RID_CNFMWOROBUST_AGERE, &priv->mwo_robust); else err = hermes_read_wordrec_pr(hw, USER_BAP, HERMES_RID_CNFFRAGMENTATIONTHRESHOLD, &priv->frag_thresh); if (err) { dev_err(dev, "Failed to read fragmentation settings!\n"); goto out; } /* Power management setup */ if (priv->has_pm) { priv->pm_on = 0; priv->pm_mcast = 1; err = hermes_read_wordrec_pr(hw, USER_BAP, HERMES_RID_CNFMAXSLEEPDURATION, &priv->pm_period); if (err) { dev_err(dev, "Failed to read power management " "period!\n"); goto out; } err = hermes_read_wordrec_pr(hw, USER_BAP, HERMES_RID_CNFPMHOLDOVERDURATION, &priv->pm_timeout); if (err) { dev_err(dev, "Failed to read power management " "timeout!\n"); goto out; } } /* Preamble setup */ if (priv->has_preamble) { err = hermes_read_wordrec_pr(hw, USER_BAP, HERMES_RID_CNFPREAMBLE_SYMBOL, &priv->preamble); if (err) { dev_err(dev, "Failed to read preamble setup\n"); goto out; } } /* Retry settings */ err = hermes_read_wordrec_pr(hw, USER_BAP, HERMES_RID_SHORTRETRYLIMIT, &priv->short_retry_limit); if (err) { dev_err(dev, "Failed to read short retry limit\n"); goto out; } err = hermes_read_wordrec_pr(hw, USER_BAP, HERMES_RID_LONGRETRYLIMIT, &priv->long_retry_limit); if (err) { dev_err(dev, "Failed to read long retry limit\n"); goto out; } err = hermes_read_wordrec_pr(hw, USER_BAP, HERMES_RID_MAXTRANSMITLIFETIME, &priv->retry_lifetime); if (err) { dev_err(dev, "Failed to read max retry lifetime\n"); goto out; } out: return err; } /* Can be called before netdev registration */ int orinoco_hw_allocate_fid(struct orinoco_private *priv) { struct device *dev = priv->dev; struct hermes *hw = &priv->hw; int err; err = hw->ops->allocate(hw, priv->nicbuf_size, &priv->txfid); if (err == -EIO && priv->nicbuf_size > TX_NICBUF_SIZE_BUG) { /* Try workaround for old Symbol firmware bug */ priv->nicbuf_size = TX_NICBUF_SIZE_BUG; err = hw->ops->allocate(hw, priv->nicbuf_size, &priv->txfid); dev_warn(dev, "Firmware ALLOC bug detected " "(old Symbol firmware?). Work around %s\n", err ? "failed!" : "ok."); } return err; } int orinoco_get_bitratemode(int bitrate, int automatic) { int ratemode = -1; int i; if ((bitrate != 10) && (bitrate != 20) && (bitrate != 55) && (bitrate != 110)) return ratemode; for (i = 0; i < BITRATE_TABLE_SIZE; i++) { if ((bitrate_table[i].bitrate == bitrate) && (bitrate_table[i].automatic == automatic)) { ratemode = i; break; } } return ratemode; } void orinoco_get_ratemode_cfg(int ratemode, int *bitrate, int *automatic) { BUG_ON((ratemode < 0) || (ratemode >= BITRATE_TABLE_SIZE)); *bitrate = bitrate_table[ratemode].bitrate * 100000; *automatic = bitrate_table[ratemode].automatic; } int orinoco_hw_program_rids(struct orinoco_private *priv) { struct net_device *dev = priv->ndev; struct wireless_dev *wdev = netdev_priv(dev); struct hermes *hw = &priv->hw; int err; struct hermes_idstring idbuf; /* Set the MAC address */ err = hw->ops->write_ltv(hw, USER_BAP, HERMES_RID_CNFOWNMACADDR, HERMES_BYTES_TO_RECLEN(ETH_ALEN), dev->dev_addr); if (err) { printk(KERN_ERR "%s: Error %d setting MAC address\n", dev->name, err); return err; } /* Set up the link mode */ err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFPORTTYPE, priv->port_type); if (err) { printk(KERN_ERR "%s: Error %d setting port type\n", dev->name, err); return err; } /* Set the channel/frequency */ if (priv->channel != 0 && priv->iw_mode != NL80211_IFTYPE_STATION) { err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFOWNCHANNEL, priv->channel); if (err) { printk(KERN_ERR "%s: Error %d setting channel %d\n", dev->name, err, priv->channel); return err; } } if (priv->has_ibss) { u16 createibss; if ((strlen(priv->desired_essid) == 0) && (priv->createibss)) { printk(KERN_WARNING "%s: This firmware requires an " "ESSID in IBSS-Ad-Hoc mode.\n", dev->name); /* With wvlan_cs, in this case, we would crash. * hopefully, this driver will behave better... * Jean II */ createibss = 0; } else { createibss = priv->createibss; } err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFCREATEIBSS, createibss); if (err) { printk(KERN_ERR "%s: Error %d setting CREATEIBSS\n", dev->name, err); return err; } } /* Set the desired BSSID */ err = __orinoco_hw_set_wap(priv); if (err) { printk(KERN_ERR "%s: Error %d setting AP address\n", dev->name, err); return err; } /* Set the desired ESSID */ idbuf.len = cpu_to_le16(strlen(priv->desired_essid)); memcpy(&idbuf.val, priv->desired_essid, sizeof(idbuf.val)); /* WinXP wants partner to configure OWNSSID even in IBSS mode. (jimc) */ err = hw->ops->write_ltv(hw, USER_BAP, HERMES_RID_CNFOWNSSID, HERMES_BYTES_TO_RECLEN(strlen(priv->desired_essid) + 2), &idbuf); if (err) { printk(KERN_ERR "%s: Error %d setting OWNSSID\n", dev->name, err); return err; } err = hw->ops->write_ltv(hw, USER_BAP, HERMES_RID_CNFDESIREDSSID, HERMES_BYTES_TO_RECLEN(strlen(priv->desired_essid) + 2), &idbuf); if (err) { printk(KERN_ERR "%s: Error %d setting DESIREDSSID\n", dev->name, err); return err; } /* Set the station name */ idbuf.len = cpu_to_le16(strlen(priv->nick)); memcpy(&idbuf.val, priv->nick, sizeof(idbuf.val)); err = hw->ops->write_ltv(hw, USER_BAP, HERMES_RID_CNFOWNNAME, HERMES_BYTES_TO_RECLEN(strlen(priv->nick) + 2), &idbuf); if (err) { printk(KERN_ERR "%s: Error %d setting nickname\n", dev->name, err); return err; } /* Set AP density */ if (priv->has_sensitivity) { err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFSYSTEMSCALE, priv->ap_density); if (err) { printk(KERN_WARNING "%s: Error %d setting SYSTEMSCALE. " "Disabling sensitivity control\n", dev->name, err); priv->has_sensitivity = 0; } } /* Set RTS threshold */ err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFRTSTHRESHOLD, priv->rts_thresh); if (err) { printk(KERN_ERR "%s: Error %d setting RTS threshold\n", dev->name, err); return err; } /* Set fragmentation threshold or MWO robustness */ if (priv->has_mwo) err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFMWOROBUST_AGERE, priv->mwo_robust); else err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFFRAGMENTATIONTHRESHOLD, priv->frag_thresh); if (err) { printk(KERN_ERR "%s: Error %d setting fragmentation\n", dev->name, err); return err; } /* Set bitrate */ err = __orinoco_hw_set_bitrate(priv); if (err) { printk(KERN_ERR "%s: Error %d setting bitrate\n", dev->name, err); return err; } /* Set power management */ if (priv->has_pm) { err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFPMENABLED, priv->pm_on); if (err) { printk(KERN_ERR "%s: Error %d setting up PM\n", dev->name, err); return err; } err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFMULTICASTRECEIVE, priv->pm_mcast); if (err) { printk(KERN_ERR "%s: Error %d setting up PM\n", dev->name, err); return err; } err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFMAXSLEEPDURATION, priv->pm_period); if (err) { printk(KERN_ERR "%s: Error %d setting up PM\n", dev->name, err); return err; } err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFPMHOLDOVERDURATION, priv->pm_timeout); if (err) { printk(KERN_ERR "%s: Error %d setting up PM\n", dev->name, err); return err; } } /* Set preamble - only for Symbol so far... */ if (priv->has_preamble) { err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFPREAMBLE_SYMBOL, priv->preamble); if (err) { printk(KERN_ERR "%s: Error %d setting preamble\n", dev->name, err); return err; } } /* Set up encryption */ if (priv->has_wep || priv->has_wpa) { err = __orinoco_hw_setup_enc(priv); if (err) { printk(KERN_ERR "%s: Error %d activating encryption\n", dev->name, err); return err; } } if (priv->iw_mode == NL80211_IFTYPE_MONITOR) { /* Enable monitor mode */ dev->type = ARPHRD_IEEE80211; err = hw->ops->cmd_wait(hw, HERMES_CMD_TEST | HERMES_TEST_MONITOR, 0, NULL); } else { /* Disable monitor mode */ dev->type = ARPHRD_ETHER; err = hw->ops->cmd_wait(hw, HERMES_CMD_TEST | HERMES_TEST_STOP, 0, NULL); } if (err) return err; /* Reset promiscuity / multicast*/ priv->promiscuous = 0; priv->mc_count = 0; /* Record mode change */ wdev->iftype = priv->iw_mode; return 0; } /* Get tsc from the firmware */ int orinoco_hw_get_tkip_iv(struct orinoco_private *priv, int key, u8 *tsc) { struct hermes *hw = &priv->hw; int err = 0; u8 tsc_arr[4][ORINOCO_SEQ_LEN]; if ((key < 0) || (key >= 4)) return -EINVAL; err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_CURRENT_TKIP_IV, sizeof(tsc_arr), NULL, &tsc_arr); if (!err) memcpy(tsc, &tsc_arr[key][0], sizeof(tsc_arr[0])); return err; } int __orinoco_hw_set_bitrate(struct orinoco_private *priv) { struct hermes *hw = &priv->hw; int ratemode = priv->bitratemode; int err = 0; if (ratemode >= BITRATE_TABLE_SIZE) { printk(KERN_ERR "%s: BUG: Invalid bitrate mode %d\n", priv->ndev->name, ratemode); return -EINVAL; } switch (priv->firmware_type) { case FIRMWARE_TYPE_AGERE: err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFTXRATECONTROL, bitrate_table[ratemode].agere_txratectrl); break; case FIRMWARE_TYPE_INTERSIL: case FIRMWARE_TYPE_SYMBOL: err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFTXRATECONTROL, bitrate_table[ratemode].intersil_txratectrl); break; default: BUG(); } return err; } int orinoco_hw_get_act_bitrate(struct orinoco_private *priv, int *bitrate) { struct hermes *hw = &priv->hw; int i; int err = 0; u16 val; err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CURRENTTXRATE, &val); if (err) return err; switch (priv->firmware_type) { case FIRMWARE_TYPE_AGERE: /* Lucent style rate */ /* Note : in Lucent firmware, the return value of * HERMES_RID_CURRENTTXRATE is the bitrate in Mb/s, * and therefore is totally different from the * encoding of HERMES_RID_CNFTXRATECONTROL. * Don't forget that 6Mb/s is really 5.5Mb/s */ if (val == 6) *bitrate = 5500000; else *bitrate = val * 1000000; break; case FIRMWARE_TYPE_INTERSIL: /* Intersil style rate */ case FIRMWARE_TYPE_SYMBOL: /* Symbol style rate */ for (i = 0; i < BITRATE_TABLE_SIZE; i++) if (bitrate_table[i].intersil_txratectrl == val) { *bitrate = bitrate_table[i].bitrate * 100000; break; } if (i >= BITRATE_TABLE_SIZE) { printk(KERN_INFO "%s: Unable to determine current bitrate (0x%04hx)\n", priv->ndev->name, val); err = -EIO; } break; default: BUG(); } return err; } /* Set fixed AP address */ int __orinoco_hw_set_wap(struct orinoco_private *priv) { int roaming_flag; int err = 0; struct hermes *hw = &priv->hw; switch (priv->firmware_type) { case FIRMWARE_TYPE_AGERE: /* not supported */ break; case FIRMWARE_TYPE_INTERSIL: if (priv->bssid_fixed) roaming_flag = 2; else roaming_flag = 1; err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFROAMINGMODE, roaming_flag); break; case FIRMWARE_TYPE_SYMBOL: err = HERMES_WRITE_RECORD(hw, USER_BAP, HERMES_RID_CNFMANDATORYBSSID_SYMBOL, &priv->desired_bssid); break; } return err; } /* Change the WEP keys and/or the current keys. Can be called * either from __orinoco_hw_setup_enc() or directly from * orinoco_ioctl_setiwencode(). In the later case the association * with the AP is not broken (if the firmware can handle it), * which is needed for 802.1x implementations. */ int __orinoco_hw_setup_wepkeys(struct orinoco_private *priv) { struct hermes *hw = &priv->hw; int err = 0; int i; switch (priv->firmware_type) { case FIRMWARE_TYPE_AGERE: { struct orinoco_key keys[ORINOCO_MAX_KEYS]; memset(&keys, 0, sizeof(keys)); for (i = 0; i < ORINOCO_MAX_KEYS; i++) { int len = min(priv->keys[i].key_len, ORINOCO_MAX_KEY_SIZE); memcpy(&keys[i].data, priv->keys[i].key, len); if (len > SMALL_KEY_SIZE) keys[i].len = cpu_to_le16(LARGE_KEY_SIZE); else if (len > 0) keys[i].len = cpu_to_le16(SMALL_KEY_SIZE); else keys[i].len = cpu_to_le16(0); } err = HERMES_WRITE_RECORD(hw, USER_BAP, HERMES_RID_CNFWEPKEYS_AGERE, &keys); if (err) return err; err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFTXKEY_AGERE, priv->tx_key); if (err) return err; break; } case FIRMWARE_TYPE_INTERSIL: case FIRMWARE_TYPE_SYMBOL: { int keylen; /* Force uniform key length to work around * firmware bugs */ keylen = priv->keys[priv->tx_key].key_len; if (keylen > LARGE_KEY_SIZE) { printk(KERN_ERR "%s: BUG: Key %d has oversize length %d.\n", priv->ndev->name, priv->tx_key, keylen); return -E2BIG; } else if (keylen > SMALL_KEY_SIZE) keylen = LARGE_KEY_SIZE; else if (keylen > 0) keylen = SMALL_KEY_SIZE; else keylen = 0; /* Write all 4 keys */ for (i = 0; i < ORINOCO_MAX_KEYS; i++) { u8 key[LARGE_KEY_SIZE] = { 0 }; memcpy(key, priv->keys[i].key, priv->keys[i].key_len); err = hw->ops->write_ltv(hw, USER_BAP, HERMES_RID_CNFDEFAULTKEY0 + i, HERMES_BYTES_TO_RECLEN(keylen), key); if (err) return err; } /* Write the index of the key used in transmission */ err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFWEPDEFAULTKEYID, priv->tx_key); if (err) return err; } break; } return 0; } int __orinoco_hw_setup_enc(struct orinoco_private *priv) { struct hermes *hw = &priv->hw; int err = 0; int master_wep_flag; int auth_flag; int enc_flag; /* Setup WEP keys */ if (priv->encode_alg == ORINOCO_ALG_WEP) __orinoco_hw_setup_wepkeys(priv); if (priv->wep_restrict) auth_flag = HERMES_AUTH_SHARED_KEY; else auth_flag = HERMES_AUTH_OPEN; if (priv->wpa_enabled) enc_flag = 2; else if (priv->encode_alg == ORINOCO_ALG_WEP) enc_flag = 1; else enc_flag = 0; switch (priv->firmware_type) { case FIRMWARE_TYPE_AGERE: /* Agere style WEP */ if (priv->encode_alg == ORINOCO_ALG_WEP) { /* Enable the shared-key authentication. */ err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFAUTHENTICATION_AGERE, auth_flag); if (err) return err; } err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFWEPENABLED_AGERE, enc_flag); if (err) return err; if (priv->has_wpa) { /* Set WPA key management */ err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFSETWPAAUTHMGMTSUITE_AGERE, priv->key_mgmt); if (err) return err; } break; case FIRMWARE_TYPE_INTERSIL: /* Intersil style WEP */ case FIRMWARE_TYPE_SYMBOL: /* Symbol style WEP */ if (priv->encode_alg == ORINOCO_ALG_WEP) { if (priv->wep_restrict || (priv->firmware_type == FIRMWARE_TYPE_SYMBOL)) master_wep_flag = HERMES_WEP_PRIVACY_INVOKED | HERMES_WEP_EXCL_UNENCRYPTED; else master_wep_flag = HERMES_WEP_PRIVACY_INVOKED; err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFAUTHENTICATION, auth_flag); if (err) return err; } else master_wep_flag = 0; if (priv->iw_mode == NL80211_IFTYPE_MONITOR) master_wep_flag |= HERMES_WEP_HOST_DECRYPT; /* Master WEP setting : on/off */ err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFWEPFLAGS_INTERSIL, master_wep_flag); if (err) return err; break; } return 0; } /* key must be 32 bytes, including the tx and rx MIC keys. * rsc must be NULL or up to 8 bytes * tsc must be NULL or up to 8 bytes */ int __orinoco_hw_set_tkip_key(struct orinoco_private *priv, int key_idx, int set_tx, const u8 *key, size_t key_len, const u8 *rsc, size_t rsc_len, const u8 *tsc, size_t tsc_len) { struct { __le16 idx; u8 rsc[ORINOCO_SEQ_LEN]; struct { u8 key[TKIP_KEYLEN]; u8 tx_mic[MIC_KEYLEN]; u8 rx_mic[MIC_KEYLEN]; } tkip; u8 tsc[ORINOCO_SEQ_LEN]; } __packed buf; struct hermes *hw = &priv->hw; int ret; int err; int k; u16 xmitting; key_idx &= 0x3; if (set_tx) key_idx |= 0x8000; buf.idx = cpu_to_le16(key_idx); if (key_len != sizeof(buf.tkip)) return -EINVAL; memcpy(&buf.tkip, key, sizeof(buf.tkip)); if (rsc_len > sizeof(buf.rsc)) rsc_len = sizeof(buf.rsc); if (tsc_len > sizeof(buf.tsc)) tsc_len = sizeof(buf.tsc); memset(buf.rsc, 0, sizeof(buf.rsc)); memset(buf.tsc, 0, sizeof(buf.tsc)); if (rsc != NULL) memcpy(buf.rsc, rsc, rsc_len); if (tsc != NULL) memcpy(buf.tsc, tsc, tsc_len); else buf.tsc[4] = 0x10; /* Wait up to 100ms for tx queue to empty */ for (k = 100; k > 0; k--) { udelay(1000); ret = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_TXQUEUEEMPTY, &xmitting); if (ret || !xmitting) break; } if (k == 0) ret = -ETIMEDOUT; err = HERMES_WRITE_RECORD(hw, USER_BAP, HERMES_RID_CNFADDDEFAULTTKIPKEY_AGERE, &buf); return ret ? ret : err; } int orinoco_clear_tkip_key(struct orinoco_private *priv, int key_idx) { struct hermes *hw = &priv->hw; int err; err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFREMDEFAULTTKIPKEY_AGERE, key_idx); if (err) printk(KERN_WARNING "%s: Error %d clearing TKIP key %d\n", priv->ndev->name, err, key_idx); return err; } int __orinoco_hw_set_multicast_list(struct orinoco_private *priv, struct net_device *dev, int mc_count, int promisc) { struct hermes *hw = &priv->hw; int err = 0; if (promisc != priv->promiscuous) { err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFPROMISCUOUSMODE, promisc); if (err) { printk(KERN_ERR "%s: Error %d setting PROMISCUOUSMODE to 1.\n", priv->ndev->name, err); } else priv->promiscuous = promisc; } /* If we're not in promiscuous mode, then we need to set the * group address if either we want to multicast, or if we were * multicasting and want to stop */ if (!promisc && (mc_count || priv->mc_count)) { struct netdev_hw_addr *ha; struct hermes_multicast mclist; int i = 0; netdev_for_each_mc_addr(ha, dev) { if (i == mc_count) break; memcpy(mclist.addr[i++], ha->addr, ETH_ALEN); } err = hw->ops->write_ltv(hw, USER_BAP, HERMES_RID_CNFGROUPADDRESSES, HERMES_BYTES_TO_RECLEN(mc_count * ETH_ALEN), &mclist); if (err) printk(KERN_ERR "%s: Error %d setting multicast list.\n", priv->ndev->name, err); else priv->mc_count = mc_count; } return err; } /* Return : < 0 -> error code ; >= 0 -> length */ int orinoco_hw_get_essid(struct orinoco_private *priv, int *active, char buf[IW_ESSID_MAX_SIZE + 1]) { struct hermes *hw = &priv->hw; int err = 0; struct hermes_idstring essidbuf; char *p = (char *)(&essidbuf.val); int len; unsigned long flags; if (orinoco_lock(priv, &flags) != 0) return -EBUSY; if (strlen(priv->desired_essid) > 0) { /* We read the desired SSID from the hardware rather than from priv->desired_essid, just in case the firmware is allowed to change it on us. I'm not sure about this */ /* My guess is that the OWNSSID should always be whatever * we set to the card, whereas CURRENT_SSID is the one that * may change... - Jean II */ u16 rid; *active = 1; rid = (priv->port_type == 3) ? HERMES_RID_CNFOWNSSID : HERMES_RID_CNFDESIREDSSID; err = hw->ops->read_ltv(hw, USER_BAP, rid, sizeof(essidbuf), NULL, &essidbuf); if (err) goto fail_unlock; } else { *active = 0; err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_CURRENTSSID, sizeof(essidbuf), NULL, &essidbuf); if (err) goto fail_unlock; } len = le16_to_cpu(essidbuf.len); BUG_ON(len > IW_ESSID_MAX_SIZE); memset(buf, 0, IW_ESSID_MAX_SIZE); memcpy(buf, p, len); err = len; fail_unlock: orinoco_unlock(priv, &flags); return err; } int orinoco_hw_get_freq(struct orinoco_private *priv) { struct hermes *hw = &priv->hw; int err = 0; u16 channel; int freq = 0; unsigned long flags; if (orinoco_lock(priv, &flags) != 0) return -EBUSY; err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CURRENTCHANNEL, &channel); if (err) goto out; /* Intersil firmware 1.3.5 returns 0 when the interface is down */ if (channel == 0) { err = -EBUSY; goto out; } if ((channel < 1) || (channel > NUM_CHANNELS)) { printk(KERN_WARNING "%s: Channel out of range (%d)!\n", priv->ndev->name, channel); err = -EBUSY; goto out; } freq = ieee80211_channel_to_frequency(channel, NL80211_BAND_2GHZ); out: orinoco_unlock(priv, &flags); if (err > 0) err = -EBUSY; return err ? err : freq; } int orinoco_hw_get_bitratelist(struct orinoco_private *priv, int *numrates, s32 *rates, int max) { struct hermes *hw = &priv->hw; struct hermes_idstring list; unsigned char *p = (unsigned char *)&list.val; int err = 0; int num; int i; unsigned long flags; if (orinoco_lock(priv, &flags) != 0) return -EBUSY; err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_SUPPORTEDDATARATES, sizeof(list), NULL, &list); orinoco_unlock(priv, &flags); if (err) return err; num = le16_to_cpu(list.len); *numrates = num; num = min(num, max); for (i = 0; i < num; i++) rates[i] = (p[i] & 0x7f) * 500000; /* convert to bps */ return 0; } int orinoco_hw_trigger_scan(struct orinoco_private *priv, const struct cfg80211_ssid *ssid) { struct net_device *dev = priv->ndev; struct hermes *hw = &priv->hw; unsigned long flags; int err = 0; if (orinoco_lock(priv, &flags) != 0) return -EBUSY; /* Scanning with port 0 disabled would fail */ if (!netif_running(dev)) { err = -ENETDOWN; goto out; } /* In monitor mode, the scan results are always empty. * Probe responses are passed to the driver as received * frames and could be processed in software. */ if (priv->iw_mode == NL80211_IFTYPE_MONITOR) { err = -EOPNOTSUPP; goto out; } if (priv->has_hostscan) { switch (priv->firmware_type) { case FIRMWARE_TYPE_SYMBOL: err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFHOSTSCAN_SYMBOL, HERMES_HOSTSCAN_SYMBOL_ONCE | HERMES_HOSTSCAN_SYMBOL_BCAST); break; case FIRMWARE_TYPE_INTERSIL: { __le16 req[3]; req[0] = cpu_to_le16(0x3fff); /* All channels */ req[1] = cpu_to_le16(0x0001); /* rate 1 Mbps */ req[2] = 0; /* Any ESSID */ err = HERMES_WRITE_RECORD(hw, USER_BAP, HERMES_RID_CNFHOSTSCAN, &req); break; } case FIRMWARE_TYPE_AGERE: if (ssid->ssid_len > 0) { struct hermes_idstring idbuf; size_t len = ssid->ssid_len; idbuf.len = cpu_to_le16(len); memcpy(idbuf.val, ssid->ssid, len); err = hw->ops->write_ltv(hw, USER_BAP, HERMES_RID_CNFSCANSSID_AGERE, HERMES_BYTES_TO_RECLEN(len + 2), &idbuf); } else err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFSCANSSID_AGERE, 0); /* Any ESSID */ if (err) break; if (priv->has_ext_scan) { err = hermes_write_wordrec(hw, USER_BAP, HERMES_RID_CNFSCANCHANNELS2GHZ, 0x7FFF); if (err) goto out; err = hermes_inquire(hw, HERMES_INQ_CHANNELINFO); } else err = hermes_inquire(hw, HERMES_INQ_SCAN); break; } } else err = hermes_inquire(hw, HERMES_INQ_SCAN); out: orinoco_unlock(priv, &flags); return err; } /* Disassociate from node with BSSID addr */ int orinoco_hw_disassociate(struct orinoco_private *priv, u8 *addr, u16 reason_code) { struct hermes *hw = &priv->hw; int err; struct { u8 addr[ETH_ALEN]; __le16 reason_code; } __packed buf; /* Currently only supported by WPA enabled Agere fw */ if (!priv->has_wpa) return -EOPNOTSUPP; memcpy(buf.addr, addr, ETH_ALEN); buf.reason_code = cpu_to_le16(reason_code); err = HERMES_WRITE_RECORD(hw, USER_BAP, HERMES_RID_CNFDISASSOCIATE, &buf); return err; } int orinoco_hw_get_current_bssid(struct orinoco_private *priv, u8 *addr) { struct hermes *hw = &priv->hw; int err; err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_CURRENTBSSID, ETH_ALEN, NULL, addr); return err; }
linux-master
drivers/net/wireless/intersil/orinoco/hw.c
/* Wireless extensions support. * * See copyright notice in main.c */ #include <linux/slab.h> #include <linux/kernel.h> #include <linux/if_arp.h> #include <linux/wireless.h> #include <linux/ieee80211.h> #include <linux/etherdevice.h> #include <net/iw_handler.h> #include <net/cfg80211.h> #include <net/cfg80211-wext.h> #include "hermes.h" #include "hermes_rid.h" #include "orinoco.h" #include "hw.h" #include "mic.h" #include "scan.h" #include "main.h" #include "wext.h" #define MAX_RID_LEN 1024 /* Helper routine to record keys * It is called under orinoco_lock so it may not sleep */ static int orinoco_set_key(struct orinoco_private *priv, int index, enum orinoco_alg alg, const u8 *key, int key_len, const u8 *seq, int seq_len) { kfree_sensitive(priv->keys[index].key); kfree_sensitive(priv->keys[index].seq); if (key_len) { priv->keys[index].key = kzalloc(key_len, GFP_ATOMIC); if (!priv->keys[index].key) goto nomem; } else priv->keys[index].key = NULL; if (seq_len) { priv->keys[index].seq = kzalloc(seq_len, GFP_ATOMIC); if (!priv->keys[index].seq) goto free_key; } else priv->keys[index].seq = NULL; priv->keys[index].key_len = key_len; priv->keys[index].seq_len = seq_len; if (key_len) memcpy((void *)priv->keys[index].key, key, key_len); if (seq_len) memcpy((void *)priv->keys[index].seq, seq, seq_len); switch (alg) { case ORINOCO_ALG_TKIP: priv->keys[index].cipher = WLAN_CIPHER_SUITE_TKIP; break; case ORINOCO_ALG_WEP: priv->keys[index].cipher = (key_len > SMALL_KEY_SIZE) ? WLAN_CIPHER_SUITE_WEP104 : WLAN_CIPHER_SUITE_WEP40; break; case ORINOCO_ALG_NONE: default: priv->keys[index].cipher = 0; break; } return 0; free_key: kfree(priv->keys[index].key); priv->keys[index].key = NULL; nomem: priv->keys[index].key_len = 0; priv->keys[index].seq_len = 0; priv->keys[index].cipher = 0; return -ENOMEM; } static struct iw_statistics *orinoco_get_wireless_stats(struct net_device *dev) { struct orinoco_private *priv = ndev_priv(dev); struct hermes *hw = &priv->hw; struct iw_statistics *wstats = &priv->wstats; int err; unsigned long flags; if (!netif_device_present(dev)) { printk(KERN_WARNING "%s: get_wireless_stats() called while device not present\n", dev->name); return NULL; /* FIXME: Can we do better than this? */ } /* If busy, return the old stats. Returning NULL may cause * the interface to disappear from /proc/net/wireless */ if (orinoco_lock(priv, &flags) != 0) return wstats; /* We can't really wait for the tallies inquiry command to * complete, so we just use the previous results and trigger * a new tallies inquiry command for next time - Jean II */ /* FIXME: Really we should wait for the inquiry to come back - * as it is the stats we give don't make a whole lot of sense. * Unfortunately, it's not clear how to do that within the * wireless extensions framework: I think we're in user * context, but a lock seems to be held by the time we get in * here so we're not safe to sleep here. */ hermes_inquire(hw, HERMES_INQ_TALLIES); if (priv->iw_mode == NL80211_IFTYPE_ADHOC) { memset(&wstats->qual, 0, sizeof(wstats->qual)); /* If a spy address is defined, we report stats of the * first spy address - Jean II */ if (SPY_NUMBER(priv)) { wstats->qual.qual = priv->spy_data.spy_stat[0].qual; wstats->qual.level = priv->spy_data.spy_stat[0].level; wstats->qual.noise = priv->spy_data.spy_stat[0].noise; wstats->qual.updated = priv->spy_data.spy_stat[0].updated; } } else { struct { __le16 qual, signal, noise, unused; } __packed cq; err = HERMES_READ_RECORD(hw, USER_BAP, HERMES_RID_COMMSQUALITY, &cq); if (!err) { wstats->qual.qual = (int)le16_to_cpu(cq.qual); wstats->qual.level = (int)le16_to_cpu(cq.signal) - 0x95; wstats->qual.noise = (int)le16_to_cpu(cq.noise) - 0x95; wstats->qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM; } } orinoco_unlock(priv, &flags); return wstats; } /********************************************************************/ /* Wireless extensions */ /********************************************************************/ static int orinoco_ioctl_setwap(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct sockaddr *ap_addr = &wrqu->ap_addr; struct orinoco_private *priv = ndev_priv(dev); int err = -EINPROGRESS; /* Call commit handler */ unsigned long flags; if (orinoco_lock(priv, &flags) != 0) return -EBUSY; /* Enable automatic roaming - no sanity checks are needed */ if (is_zero_ether_addr(ap_addr->sa_data) || is_broadcast_ether_addr(ap_addr->sa_data)) { priv->bssid_fixed = 0; eth_zero_addr(priv->desired_bssid); /* "off" means keep existing connection */ if (ap_addr->sa_data[0] == 0) { __orinoco_hw_set_wap(priv); err = 0; } goto out; } if (priv->firmware_type == FIRMWARE_TYPE_AGERE) { printk(KERN_WARNING "%s: Lucent/Agere firmware doesn't " "support manual roaming\n", dev->name); err = -EOPNOTSUPP; goto out; } if (priv->iw_mode != NL80211_IFTYPE_STATION) { printk(KERN_WARNING "%s: Manual roaming supported only in " "managed mode\n", dev->name); err = -EOPNOTSUPP; goto out; } /* Intersil firmware hangs without Desired ESSID */ if (priv->firmware_type == FIRMWARE_TYPE_INTERSIL && strlen(priv->desired_essid) == 0) { printk(KERN_WARNING "%s: Desired ESSID must be set for " "manual roaming\n", dev->name); err = -EOPNOTSUPP; goto out; } /* Finally, enable manual roaming */ priv->bssid_fixed = 1; memcpy(priv->desired_bssid, &ap_addr->sa_data, ETH_ALEN); out: orinoco_unlock(priv, &flags); return err; } static int orinoco_ioctl_getwap(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct sockaddr *ap_addr = &wrqu->ap_addr; struct orinoco_private *priv = ndev_priv(dev); int err = 0; unsigned long flags; if (orinoco_lock(priv, &flags) != 0) return -EBUSY; ap_addr->sa_family = ARPHRD_ETHER; err = orinoco_hw_get_current_bssid(priv, ap_addr->sa_data); orinoco_unlock(priv, &flags); return err; } static int orinoco_ioctl_setiwencode(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *keybuf) { struct iw_point *erq = &wrqu->encoding; struct orinoco_private *priv = ndev_priv(dev); int index = (erq->flags & IW_ENCODE_INDEX) - 1; int setindex = priv->tx_key; enum orinoco_alg encode_alg = priv->encode_alg; int restricted = priv->wep_restrict; int err = -EINPROGRESS; /* Call commit handler */ unsigned long flags; if (!priv->has_wep) return -EOPNOTSUPP; if (erq->pointer) { /* We actually have a key to set - check its length */ if (erq->length > LARGE_KEY_SIZE) return -E2BIG; if ((erq->length > SMALL_KEY_SIZE) && !priv->has_big_wep) return -E2BIG; } if (orinoco_lock(priv, &flags) != 0) return -EBUSY; /* Clear any TKIP key we have */ if ((priv->has_wpa) && (priv->encode_alg == ORINOCO_ALG_TKIP)) (void) orinoco_clear_tkip_key(priv, setindex); if (erq->length > 0) { if ((index < 0) || (index >= ORINOCO_MAX_KEYS)) index = priv->tx_key; /* Switch on WEP if off */ if (encode_alg != ORINOCO_ALG_WEP) { setindex = index; encode_alg = ORINOCO_ALG_WEP; } } else { /* Important note : if the user do "iwconfig eth0 enc off", * we will arrive there with an index of -1. This is valid * but need to be taken care off... Jean II */ if ((index < 0) || (index >= ORINOCO_MAX_KEYS)) { if ((index != -1) || (erq->flags == 0)) { err = -EINVAL; goto out; } } else { /* Set the index : Check that the key is valid */ if (priv->keys[index].key_len == 0) { err = -EINVAL; goto out; } setindex = index; } } if (erq->flags & IW_ENCODE_DISABLED) encode_alg = ORINOCO_ALG_NONE; if (erq->flags & IW_ENCODE_OPEN) restricted = 0; if (erq->flags & IW_ENCODE_RESTRICTED) restricted = 1; if (erq->pointer && erq->length > 0) { err = orinoco_set_key(priv, index, ORINOCO_ALG_WEP, keybuf, erq->length, NULL, 0); } priv->tx_key = setindex; /* Try fast key change if connected and only keys are changed */ if ((priv->encode_alg == encode_alg) && (priv->wep_restrict == restricted) && netif_carrier_ok(dev)) { err = __orinoco_hw_setup_wepkeys(priv); /* No need to commit if successful */ goto out; } priv->encode_alg = encode_alg; priv->wep_restrict = restricted; out: orinoco_unlock(priv, &flags); return err; } static int orinoco_ioctl_getiwencode(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *keybuf) { struct iw_point *erq = &wrqu->encoding; struct orinoco_private *priv = ndev_priv(dev); int index = (erq->flags & IW_ENCODE_INDEX) - 1; unsigned long flags; if (!priv->has_wep) return -EOPNOTSUPP; if (orinoco_lock(priv, &flags) != 0) return -EBUSY; if ((index < 0) || (index >= ORINOCO_MAX_KEYS)) index = priv->tx_key; erq->flags = 0; if (!priv->encode_alg) erq->flags |= IW_ENCODE_DISABLED; erq->flags |= index + 1; if (priv->wep_restrict) erq->flags |= IW_ENCODE_RESTRICTED; else erq->flags |= IW_ENCODE_OPEN; erq->length = priv->keys[index].key_len; memcpy(keybuf, priv->keys[index].key, erq->length); orinoco_unlock(priv, &flags); return 0; } static int orinoco_ioctl_setessid(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *essidbuf) { struct iw_point *erq = &wrqu->essid; struct orinoco_private *priv = ndev_priv(dev); unsigned long flags; /* Note : ESSID is ignored in Ad-Hoc demo mode, but we can set it * anyway... - Jean II */ /* Hum... Should not use Wireless Extension constant (may change), * should use our own... - Jean II */ if (erq->length > IW_ESSID_MAX_SIZE) return -E2BIG; if (orinoco_lock(priv, &flags) != 0) return -EBUSY; /* NULL the string (for NULL termination & ESSID = ANY) - Jean II */ memset(priv->desired_essid, 0, sizeof(priv->desired_essid)); /* If not ANY, get the new ESSID */ if (erq->flags) memcpy(priv->desired_essid, essidbuf, erq->length); orinoco_unlock(priv, &flags); return -EINPROGRESS; /* Call commit handler */ } static int orinoco_ioctl_getessid(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *essidbuf) { struct iw_point *erq = &wrqu->essid; struct orinoco_private *priv = ndev_priv(dev); int active; int err = 0; unsigned long flags; if (netif_running(dev)) { err = orinoco_hw_get_essid(priv, &active, essidbuf); if (err < 0) return err; erq->length = err; } else { if (orinoco_lock(priv, &flags) != 0) return -EBUSY; memcpy(essidbuf, priv->desired_essid, IW_ESSID_MAX_SIZE); erq->length = strlen(priv->desired_essid); orinoco_unlock(priv, &flags); } erq->flags = 1; return 0; } static int orinoco_ioctl_setfreq(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct iw_freq *frq = &wrqu->freq; struct orinoco_private *priv = ndev_priv(dev); int chan = -1; unsigned long flags; int err = -EINPROGRESS; /* Call commit handler */ /* In infrastructure mode the AP sets the channel */ if (priv->iw_mode == NL80211_IFTYPE_STATION) return -EBUSY; if ((frq->e == 0) && (frq->m <= 1000)) { /* Setting by channel number */ chan = frq->m; } else { /* Setting by frequency */ int denom = 1; int i; /* Calculate denominator to rescale to MHz */ for (i = 0; i < (6 - frq->e); i++) denom *= 10; chan = ieee80211_frequency_to_channel(frq->m / denom); } if ((chan < 1) || (chan > NUM_CHANNELS) || !(priv->channel_mask & (1 << (chan - 1)))) return -EINVAL; if (orinoco_lock(priv, &flags) != 0) return -EBUSY; priv->channel = chan; if (priv->iw_mode == NL80211_IFTYPE_MONITOR) { /* Fast channel change - no commit if successful */ struct hermes *hw = &priv->hw; err = hw->ops->cmd_wait(hw, HERMES_CMD_TEST | HERMES_TEST_SET_CHANNEL, chan, NULL); } orinoco_unlock(priv, &flags); return err; } static int orinoco_ioctl_getfreq(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct iw_freq *frq = &wrqu->freq; struct orinoco_private *priv = ndev_priv(dev); int tmp; /* Locking done in there */ tmp = orinoco_hw_get_freq(priv); if (tmp < 0) return tmp; frq->m = tmp * 100000; frq->e = 1; return 0; } static int orinoco_ioctl_getsens(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct iw_param *srq = &wrqu->sens; struct orinoco_private *priv = ndev_priv(dev); struct hermes *hw = &priv->hw; u16 val; int err; unsigned long flags; if (!priv->has_sensitivity) return -EOPNOTSUPP; if (orinoco_lock(priv, &flags) != 0) return -EBUSY; err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CNFSYSTEMSCALE, &val); orinoco_unlock(priv, &flags); if (err) return err; srq->value = val; srq->fixed = 0; /* auto */ return 0; } static int orinoco_ioctl_setsens(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct iw_param *srq = &wrqu->sens; struct orinoco_private *priv = ndev_priv(dev); int val = srq->value; unsigned long flags; if (!priv->has_sensitivity) return -EOPNOTSUPP; if ((val < 1) || (val > 3)) return -EINVAL; if (orinoco_lock(priv, &flags) != 0) return -EBUSY; priv->ap_density = val; orinoco_unlock(priv, &flags); return -EINPROGRESS; /* Call commit handler */ } static int orinoco_ioctl_setrate(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct iw_param *rrq = &wrqu->bitrate; struct orinoco_private *priv = ndev_priv(dev); int ratemode; int bitrate; /* 100s of kilobits */ unsigned long flags; /* As the user space doesn't know our highest rate, it uses -1 * to ask us to set the highest rate. Test it using "iwconfig * ethX rate auto" - Jean II */ if (rrq->value == -1) bitrate = 110; else { if (rrq->value % 100000) return -EINVAL; bitrate = rrq->value / 100000; } ratemode = orinoco_get_bitratemode(bitrate, !rrq->fixed); if (ratemode == -1) return -EINVAL; if (orinoco_lock(priv, &flags) != 0) return -EBUSY; priv->bitratemode = ratemode; orinoco_unlock(priv, &flags); return -EINPROGRESS; } static int orinoco_ioctl_getrate(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct iw_param *rrq = &wrqu->bitrate; struct orinoco_private *priv = ndev_priv(dev); int err = 0; int bitrate, automatic; unsigned long flags; if (orinoco_lock(priv, &flags) != 0) return -EBUSY; orinoco_get_ratemode_cfg(priv->bitratemode, &bitrate, &automatic); /* If the interface is running we try to find more about the current mode */ if (netif_running(dev)) { int act_bitrate; int lerr; /* Ignore errors if we can't get the actual bitrate */ lerr = orinoco_hw_get_act_bitrate(priv, &act_bitrate); if (!lerr) bitrate = act_bitrate; } orinoco_unlock(priv, &flags); rrq->value = bitrate; rrq->fixed = !automatic; rrq->disabled = 0; return err; } static int orinoco_ioctl_setpower(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct iw_param *prq = &wrqu->power; struct orinoco_private *priv = ndev_priv(dev); int err = -EINPROGRESS; /* Call commit handler */ unsigned long flags; if (orinoco_lock(priv, &flags) != 0) return -EBUSY; if (prq->disabled) { priv->pm_on = 0; } else { switch (prq->flags & IW_POWER_MODE) { case IW_POWER_UNICAST_R: priv->pm_mcast = 0; priv->pm_on = 1; break; case IW_POWER_ALL_R: priv->pm_mcast = 1; priv->pm_on = 1; break; case IW_POWER_ON: /* No flags : but we may have a value - Jean II */ break; default: err = -EINVAL; goto out; } if (prq->flags & IW_POWER_TIMEOUT) { priv->pm_on = 1; priv->pm_timeout = prq->value / 1000; } if (prq->flags & IW_POWER_PERIOD) { priv->pm_on = 1; priv->pm_period = prq->value / 1000; } /* It's valid to not have a value if we are just toggling * the flags... Jean II */ if (!priv->pm_on) { err = -EINVAL; goto out; } } out: orinoco_unlock(priv, &flags); return err; } static int orinoco_ioctl_getpower(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct iw_param *prq = &wrqu->power; struct orinoco_private *priv = ndev_priv(dev); struct hermes *hw = &priv->hw; int err = 0; u16 enable, period, timeout, mcast; unsigned long flags; if (orinoco_lock(priv, &flags) != 0) return -EBUSY; err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CNFPMENABLED, &enable); if (err) goto out; err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CNFMAXSLEEPDURATION, &period); if (err) goto out; err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CNFPMHOLDOVERDURATION, &timeout); if (err) goto out; err = hermes_read_wordrec(hw, USER_BAP, HERMES_RID_CNFMULTICASTRECEIVE, &mcast); if (err) goto out; prq->disabled = !enable; /* Note : by default, display the period */ if ((prq->flags & IW_POWER_TYPE) == IW_POWER_TIMEOUT) { prq->flags = IW_POWER_TIMEOUT; prq->value = timeout * 1000; } else { prq->flags = IW_POWER_PERIOD; prq->value = period * 1000; } if (mcast) prq->flags |= IW_POWER_ALL_R; else prq->flags |= IW_POWER_UNICAST_R; out: orinoco_unlock(priv, &flags); return err; } static int orinoco_ioctl_set_encodeext(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct orinoco_private *priv = ndev_priv(dev); struct iw_point *encoding = &wrqu->encoding; struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; int idx, alg = ext->alg, set_key = 1; unsigned long flags; int err = -EINVAL; if (orinoco_lock(priv, &flags) != 0) return -EBUSY; /* Determine and validate the key index */ idx = encoding->flags & IW_ENCODE_INDEX; if (idx) { if ((idx < 1) || (idx > 4)) goto out; idx--; } else idx = priv->tx_key; if (encoding->flags & IW_ENCODE_DISABLED) alg = IW_ENCODE_ALG_NONE; if (priv->has_wpa && (alg != IW_ENCODE_ALG_TKIP)) { /* Clear any TKIP TX key we had */ (void) orinoco_clear_tkip_key(priv, priv->tx_key); } if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) { priv->tx_key = idx; set_key = ((alg == IW_ENCODE_ALG_TKIP) || (ext->key_len > 0)) ? 1 : 0; } if (set_key) { /* Set the requested key first */ switch (alg) { case IW_ENCODE_ALG_NONE: priv->encode_alg = ORINOCO_ALG_NONE; err = orinoco_set_key(priv, idx, ORINOCO_ALG_NONE, NULL, 0, NULL, 0); break; case IW_ENCODE_ALG_WEP: if (ext->key_len <= 0) goto out; priv->encode_alg = ORINOCO_ALG_WEP; err = orinoco_set_key(priv, idx, ORINOCO_ALG_WEP, ext->key, ext->key_len, NULL, 0); break; case IW_ENCODE_ALG_TKIP: { u8 *tkip_iv = NULL; if (!priv->has_wpa || (ext->key_len > sizeof(struct orinoco_tkip_key))) goto out; priv->encode_alg = ORINOCO_ALG_TKIP; if (ext->ext_flags & IW_ENCODE_EXT_RX_SEQ_VALID) tkip_iv = &ext->rx_seq[0]; err = orinoco_set_key(priv, idx, ORINOCO_ALG_TKIP, ext->key, ext->key_len, tkip_iv, ORINOCO_SEQ_LEN); err = __orinoco_hw_set_tkip_key(priv, idx, ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY, priv->keys[idx].key, priv->keys[idx].key_len, tkip_iv, ORINOCO_SEQ_LEN, NULL, 0); if (err) printk(KERN_ERR "%s: Error %d setting TKIP key" "\n", dev->name, err); goto out; } default: goto out; } } err = -EINPROGRESS; out: orinoco_unlock(priv, &flags); return err; } static int orinoco_ioctl_get_encodeext(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct orinoco_private *priv = ndev_priv(dev); struct iw_point *encoding = &wrqu->encoding; struct iw_encode_ext *ext = (struct iw_encode_ext *)extra; int idx, max_key_len; unsigned long flags; int err; if (orinoco_lock(priv, &flags) != 0) return -EBUSY; err = -EINVAL; max_key_len = encoding->length - sizeof(*ext); if (max_key_len < 0) goto out; idx = encoding->flags & IW_ENCODE_INDEX; if (idx) { if ((idx < 1) || (idx > 4)) goto out; idx--; } else idx = priv->tx_key; encoding->flags = idx + 1; memset(ext, 0, sizeof(*ext)); switch (priv->encode_alg) { case ORINOCO_ALG_NONE: ext->alg = IW_ENCODE_ALG_NONE; ext->key_len = 0; encoding->flags |= IW_ENCODE_DISABLED; break; case ORINOCO_ALG_WEP: ext->alg = IW_ENCODE_ALG_WEP; ext->key_len = min(priv->keys[idx].key_len, max_key_len); memcpy(ext->key, priv->keys[idx].key, ext->key_len); encoding->flags |= IW_ENCODE_ENABLED; break; case ORINOCO_ALG_TKIP: ext->alg = IW_ENCODE_ALG_TKIP; ext->key_len = min(priv->keys[idx].key_len, max_key_len); memcpy(ext->key, priv->keys[idx].key, ext->key_len); encoding->flags |= IW_ENCODE_ENABLED; break; } err = 0; out: orinoco_unlock(priv, &flags); return err; } static int orinoco_ioctl_set_auth(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct orinoco_private *priv = ndev_priv(dev); struct hermes *hw = &priv->hw; struct iw_param *param = &wrqu->param; unsigned long flags; int ret = -EINPROGRESS; if (orinoco_lock(priv, &flags) != 0) return -EBUSY; switch (param->flags & IW_AUTH_INDEX) { case IW_AUTH_WPA_VERSION: case IW_AUTH_CIPHER_PAIRWISE: case IW_AUTH_CIPHER_GROUP: case IW_AUTH_RX_UNENCRYPTED_EAPOL: case IW_AUTH_PRIVACY_INVOKED: case IW_AUTH_DROP_UNENCRYPTED: /* * orinoco does not use these parameters */ break; case IW_AUTH_MFP: /* Management Frame Protection not supported. * Only fail if set to required. */ if (param->value == IW_AUTH_MFP_REQUIRED) ret = -EINVAL; break; case IW_AUTH_KEY_MGMT: /* wl_lkm implies value 2 == PSK for Hermes I * which ties in with WEXT * no other hints tho :( */ priv->key_mgmt = param->value; break; case IW_AUTH_TKIP_COUNTERMEASURES: /* When countermeasures are enabled, shut down the * card; when disabled, re-enable the card. This must * take effect immediately. * * TODO: Make sure that the EAPOL message is getting * out before card disabled */ if (param->value) { priv->tkip_cm_active = 1; ret = hermes_disable_port(hw, 0); } else { priv->tkip_cm_active = 0; ret = hermes_enable_port(hw, 0); } break; case IW_AUTH_80211_AUTH_ALG: if (param->value & IW_AUTH_ALG_SHARED_KEY) priv->wep_restrict = 1; else if (param->value & IW_AUTH_ALG_OPEN_SYSTEM) priv->wep_restrict = 0; else ret = -EINVAL; break; case IW_AUTH_WPA_ENABLED: if (priv->has_wpa) { priv->wpa_enabled = param->value ? 1 : 0; } else { if (param->value) ret = -EOPNOTSUPP; /* else silently accept disable of WPA */ priv->wpa_enabled = 0; } break; default: ret = -EOPNOTSUPP; } orinoco_unlock(priv, &flags); return ret; } static int orinoco_ioctl_get_auth(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct orinoco_private *priv = ndev_priv(dev); struct iw_param *param = &wrqu->param; unsigned long flags; int ret = 0; if (orinoco_lock(priv, &flags) != 0) return -EBUSY; switch (param->flags & IW_AUTH_INDEX) { case IW_AUTH_KEY_MGMT: param->value = priv->key_mgmt; break; case IW_AUTH_TKIP_COUNTERMEASURES: param->value = priv->tkip_cm_active; break; case IW_AUTH_80211_AUTH_ALG: if (priv->wep_restrict) param->value = IW_AUTH_ALG_SHARED_KEY; else param->value = IW_AUTH_ALG_OPEN_SYSTEM; break; case IW_AUTH_WPA_ENABLED: param->value = priv->wpa_enabled; break; default: ret = -EOPNOTSUPP; } orinoco_unlock(priv, &flags); return ret; } static int orinoco_ioctl_set_genie(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct orinoco_private *priv = ndev_priv(dev); u8 *buf; unsigned long flags; /* cut off at IEEE80211_MAX_DATA_LEN */ if ((wrqu->data.length > IEEE80211_MAX_DATA_LEN) || (wrqu->data.length && (extra == NULL))) return -EINVAL; if (wrqu->data.length) { buf = kmemdup(extra, wrqu->data.length, GFP_KERNEL); if (buf == NULL) return -ENOMEM; } else buf = NULL; if (orinoco_lock(priv, &flags) != 0) { kfree(buf); return -EBUSY; } kfree(priv->wpa_ie); priv->wpa_ie = buf; priv->wpa_ie_len = wrqu->data.length; if (priv->wpa_ie) { /* Looks like wl_lkm wants to check the auth alg, and * somehow pass it to the firmware. * Instead it just calls the key mgmt rid * - we do this in set auth. */ } orinoco_unlock(priv, &flags); return 0; } static int orinoco_ioctl_get_genie(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct orinoco_private *priv = ndev_priv(dev); unsigned long flags; int err = 0; if (orinoco_lock(priv, &flags) != 0) return -EBUSY; if ((priv->wpa_ie_len == 0) || (priv->wpa_ie == NULL)) { wrqu->data.length = 0; goto out; } if (wrqu->data.length < priv->wpa_ie_len) { err = -E2BIG; goto out; } wrqu->data.length = priv->wpa_ie_len; memcpy(extra, priv->wpa_ie, priv->wpa_ie_len); out: orinoco_unlock(priv, &flags); return err; } static int orinoco_ioctl_set_mlme(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct orinoco_private *priv = ndev_priv(dev); struct iw_mlme *mlme = (struct iw_mlme *)extra; unsigned long flags; int ret = 0; if (orinoco_lock(priv, &flags) != 0) return -EBUSY; switch (mlme->cmd) { case IW_MLME_DEAUTH: /* silently ignore */ break; case IW_MLME_DISASSOC: ret = orinoco_hw_disassociate(priv, mlme->addr.sa_data, mlme->reason_code); break; default: ret = -EOPNOTSUPP; } orinoco_unlock(priv, &flags); return ret; } static int orinoco_ioctl_reset(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct orinoco_private *priv = ndev_priv(dev); if (!capable(CAP_NET_ADMIN)) return -EPERM; if (info->cmd == (SIOCIWFIRSTPRIV + 0x1)) { printk(KERN_DEBUG "%s: Forcing reset!\n", dev->name); /* Firmware reset */ orinoco_reset(&priv->reset_work); } else { printk(KERN_DEBUG "%s: Force scheduling reset!\n", dev->name); schedule_work(&priv->reset_work); } return 0; } static int orinoco_ioctl_setibssport(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct orinoco_private *priv = ndev_priv(dev); int val = *((int *) extra); unsigned long flags; if (orinoco_lock(priv, &flags) != 0) return -EBUSY; priv->ibss_port = val; /* Actually update the mode we are using */ set_port_type(priv); orinoco_unlock(priv, &flags); return -EINPROGRESS; /* Call commit handler */ } static int orinoco_ioctl_getibssport(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct orinoco_private *priv = ndev_priv(dev); int *val = (int *) extra; *val = priv->ibss_port; return 0; } static int orinoco_ioctl_setport3(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct orinoco_private *priv = ndev_priv(dev); int val = *((int *) extra); int err = 0; unsigned long flags; if (orinoco_lock(priv, &flags) != 0) return -EBUSY; switch (val) { case 0: /* Try to do IEEE ad-hoc mode */ if (!priv->has_ibss) { err = -EINVAL; break; } priv->prefer_port3 = 0; break; case 1: /* Try to do Lucent proprietary ad-hoc mode */ if (!priv->has_port3) { err = -EINVAL; break; } priv->prefer_port3 = 1; break; default: err = -EINVAL; } if (!err) { /* Actually update the mode we are using */ set_port_type(priv); err = -EINPROGRESS; } orinoco_unlock(priv, &flags); return err; } static int orinoco_ioctl_getport3(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct orinoco_private *priv = ndev_priv(dev); int *val = (int *) extra; *val = priv->prefer_port3; return 0; } static int orinoco_ioctl_setpreamble(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct orinoco_private *priv = ndev_priv(dev); unsigned long flags; int val; if (!priv->has_preamble) return -EOPNOTSUPP; /* 802.11b has recently defined some short preamble. * Basically, the Phy header has been reduced in size. * This increase performance, especially at high rates * (the preamble is transmitted at 1Mb/s), unfortunately * this give compatibility troubles... - Jean II */ val = *((int *) extra); if (orinoco_lock(priv, &flags) != 0) return -EBUSY; if (val) priv->preamble = 1; else priv->preamble = 0; orinoco_unlock(priv, &flags); return -EINPROGRESS; /* Call commit handler */ } static int orinoco_ioctl_getpreamble(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct orinoco_private *priv = ndev_priv(dev); int *val = (int *) extra; if (!priv->has_preamble) return -EOPNOTSUPP; *val = priv->preamble; return 0; } /* ioctl interface to hermes_read_ltv() * To use with iwpriv, pass the RID as the token argument, e.g. * iwpriv get_rid [0xfc00] * At least Wireless Tools 25 is required to use iwpriv. * For Wireless Tools 25 and 26 append "dummy" are the end. */ static int orinoco_ioctl_getrid(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct iw_point *data = &wrqu->data; struct orinoco_private *priv = ndev_priv(dev); struct hermes *hw = &priv->hw; int rid = data->flags; u16 length; int err; unsigned long flags; /* It's a "get" function, but we don't want users to access the * WEP key and other raw firmware data */ if (!capable(CAP_NET_ADMIN)) return -EPERM; if (rid < 0xfc00 || rid > 0xffff) return -EINVAL; if (orinoco_lock(priv, &flags) != 0) return -EBUSY; err = hw->ops->read_ltv(hw, USER_BAP, rid, MAX_RID_LEN, &length, extra); if (err) goto out; data->length = min_t(u16, HERMES_RECLEN_TO_BYTES(length), MAX_RID_LEN); out: orinoco_unlock(priv, &flags); return err; } /* Commit handler, called after set operations */ static int orinoco_ioctl_commit(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct orinoco_private *priv = ndev_priv(dev); unsigned long flags; int err = 0; if (!priv->open) return 0; if (orinoco_lock(priv, &flags) != 0) return err; err = orinoco_commit(priv); orinoco_unlock(priv, &flags); return err; } static const struct iw_priv_args orinoco_privtab[] = { { SIOCIWFIRSTPRIV + 0x0, 0, 0, "force_reset" }, { SIOCIWFIRSTPRIV + 0x1, 0, 0, "card_reset" }, { SIOCIWFIRSTPRIV + 0x2, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_port3" }, { SIOCIWFIRSTPRIV + 0x3, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_port3" }, { SIOCIWFIRSTPRIV + 0x4, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_preamble" }, { SIOCIWFIRSTPRIV + 0x5, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_preamble" }, { SIOCIWFIRSTPRIV + 0x6, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_ibssport" }, { SIOCIWFIRSTPRIV + 0x7, 0, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, "get_ibssport" }, { SIOCIWFIRSTPRIV + 0x9, 0, IW_PRIV_TYPE_BYTE | MAX_RID_LEN, "get_rid" }, }; /* * Structures to export the Wireless Handlers */ static const iw_handler orinoco_handler[] = { IW_HANDLER(SIOCSIWCOMMIT, orinoco_ioctl_commit), IW_HANDLER(SIOCGIWNAME, cfg80211_wext_giwname), IW_HANDLER(SIOCSIWFREQ, orinoco_ioctl_setfreq), IW_HANDLER(SIOCGIWFREQ, orinoco_ioctl_getfreq), IW_HANDLER(SIOCSIWMODE, cfg80211_wext_siwmode), IW_HANDLER(SIOCGIWMODE, cfg80211_wext_giwmode), IW_HANDLER(SIOCSIWSENS, orinoco_ioctl_setsens), IW_HANDLER(SIOCGIWSENS, orinoco_ioctl_getsens), IW_HANDLER(SIOCGIWRANGE, cfg80211_wext_giwrange), IW_HANDLER(SIOCSIWSPY, iw_handler_set_spy), IW_HANDLER(SIOCGIWSPY, iw_handler_get_spy), IW_HANDLER(SIOCSIWTHRSPY, iw_handler_set_thrspy), IW_HANDLER(SIOCGIWTHRSPY, iw_handler_get_thrspy), IW_HANDLER(SIOCSIWAP, orinoco_ioctl_setwap), IW_HANDLER(SIOCGIWAP, orinoco_ioctl_getwap), IW_HANDLER(SIOCSIWSCAN, cfg80211_wext_siwscan), IW_HANDLER(SIOCGIWSCAN, cfg80211_wext_giwscan), IW_HANDLER(SIOCSIWESSID, orinoco_ioctl_setessid), IW_HANDLER(SIOCGIWESSID, orinoco_ioctl_getessid), IW_HANDLER(SIOCSIWRATE, orinoco_ioctl_setrate), IW_HANDLER(SIOCGIWRATE, orinoco_ioctl_getrate), IW_HANDLER(SIOCSIWRTS, cfg80211_wext_siwrts), IW_HANDLER(SIOCGIWRTS, cfg80211_wext_giwrts), IW_HANDLER(SIOCSIWFRAG, cfg80211_wext_siwfrag), IW_HANDLER(SIOCGIWFRAG, cfg80211_wext_giwfrag), IW_HANDLER(SIOCGIWRETRY, cfg80211_wext_giwretry), IW_HANDLER(SIOCSIWENCODE, orinoco_ioctl_setiwencode), IW_HANDLER(SIOCGIWENCODE, orinoco_ioctl_getiwencode), IW_HANDLER(SIOCSIWPOWER, orinoco_ioctl_setpower), IW_HANDLER(SIOCGIWPOWER, orinoco_ioctl_getpower), IW_HANDLER(SIOCSIWGENIE, orinoco_ioctl_set_genie), IW_HANDLER(SIOCGIWGENIE, orinoco_ioctl_get_genie), IW_HANDLER(SIOCSIWMLME, orinoco_ioctl_set_mlme), IW_HANDLER(SIOCSIWAUTH, orinoco_ioctl_set_auth), IW_HANDLER(SIOCGIWAUTH, orinoco_ioctl_get_auth), IW_HANDLER(SIOCSIWENCODEEXT, orinoco_ioctl_set_encodeext), IW_HANDLER(SIOCGIWENCODEEXT, orinoco_ioctl_get_encodeext), }; /* Added typecasting since we no longer use iwreq_data -- Moustafa */ static const iw_handler orinoco_private_handler[] = { [0] = orinoco_ioctl_reset, [1] = orinoco_ioctl_reset, [2] = orinoco_ioctl_setport3, [3] = orinoco_ioctl_getport3, [4] = orinoco_ioctl_setpreamble, [5] = orinoco_ioctl_getpreamble, [6] = orinoco_ioctl_setibssport, [7] = orinoco_ioctl_getibssport, [9] = orinoco_ioctl_getrid, }; const struct iw_handler_def orinoco_handler_def = { .num_standard = ARRAY_SIZE(orinoco_handler), .num_private = ARRAY_SIZE(orinoco_private_handler), .num_private_args = ARRAY_SIZE(orinoco_privtab), .standard = orinoco_handler, .private = orinoco_private_handler, .private_args = orinoco_privtab, .get_wireless_stats = orinoco_get_wireless_stats, };
linux-master
drivers/net/wireless/intersil/orinoco/wext.c
/* cfg80211 support * * See copyright notice in main.c */ #include <linux/ieee80211.h> #include <net/cfg80211.h> #include "hw.h" #include "main.h" #include "orinoco.h" #include "cfg.h" /* Supported bitrates. Must agree with hw.c */ static struct ieee80211_rate orinoco_rates[] = { { .bitrate = 10 }, { .bitrate = 20 }, { .bitrate = 55 }, { .bitrate = 110 }, }; static const void * const orinoco_wiphy_privid = &orinoco_wiphy_privid; /* Called after orinoco_private is allocated. */ void orinoco_wiphy_init(struct wiphy *wiphy) { struct orinoco_private *priv = wiphy_priv(wiphy); wiphy->privid = orinoco_wiphy_privid; set_wiphy_dev(wiphy, priv->dev); } /* Called after firmware is initialised */ int orinoco_wiphy_register(struct wiphy *wiphy) { struct orinoco_private *priv = wiphy_priv(wiphy); int i, channels = 0; if (priv->firmware_type == FIRMWARE_TYPE_AGERE) wiphy->max_scan_ssids = 1; else wiphy->max_scan_ssids = 0; wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); /* TODO: should we set if we only have demo ad-hoc? * (priv->has_port3) */ if (priv->has_ibss) wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC); if (!priv->broken_monitor || force_monitor) wiphy->interface_modes |= BIT(NL80211_IFTYPE_MONITOR); priv->band.bitrates = orinoco_rates; priv->band.n_bitrates = ARRAY_SIZE(orinoco_rates); /* Only support channels allowed by the card EEPROM */ for (i = 0; i < NUM_CHANNELS; i++) { if (priv->channel_mask & (1 << i)) { priv->channels[i].center_freq = ieee80211_channel_to_frequency(i + 1, NL80211_BAND_2GHZ); channels++; } } priv->band.channels = priv->channels; priv->band.n_channels = channels; wiphy->bands[NL80211_BAND_2GHZ] = &priv->band; wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; i = 0; if (priv->has_wep) { priv->cipher_suites[i] = WLAN_CIPHER_SUITE_WEP40; i++; if (priv->has_big_wep) { priv->cipher_suites[i] = WLAN_CIPHER_SUITE_WEP104; i++; } } if (priv->has_wpa) { priv->cipher_suites[i] = WLAN_CIPHER_SUITE_TKIP; i++; } wiphy->cipher_suites = priv->cipher_suites; wiphy->n_cipher_suites = i; wiphy->rts_threshold = priv->rts_thresh; if (!priv->has_mwo) wiphy->frag_threshold = priv->frag_thresh + 1; wiphy->retry_short = priv->short_retry_limit; wiphy->retry_long = priv->long_retry_limit; return wiphy_register(wiphy); } static int orinoco_change_vif(struct wiphy *wiphy, struct net_device *dev, enum nl80211_iftype type, struct vif_params *params) { struct orinoco_private *priv = wiphy_priv(wiphy); int err = 0; unsigned long lock; if (orinoco_lock(priv, &lock) != 0) return -EBUSY; switch (type) { case NL80211_IFTYPE_ADHOC: if (!priv->has_ibss && !priv->has_port3) err = -EINVAL; break; case NL80211_IFTYPE_STATION: break; case NL80211_IFTYPE_MONITOR: if (priv->broken_monitor && !force_monitor) { wiphy_warn(wiphy, "Monitor mode support is buggy in this firmware, not enabling\n"); err = -EINVAL; } break; default: err = -EINVAL; } if (!err) { priv->iw_mode = type; set_port_type(priv); err = orinoco_commit(priv); } orinoco_unlock(priv, &lock); return err; } static int orinoco_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request) { struct orinoco_private *priv = wiphy_priv(wiphy); int err; if (!request) return -EINVAL; if (priv->scan_request && priv->scan_request != request) return -EBUSY; priv->scan_request = request; err = orinoco_hw_trigger_scan(priv, request->ssids); /* On error the we aren't processing the request */ if (err) priv->scan_request = NULL; return err; } static int orinoco_set_monitor_channel(struct wiphy *wiphy, struct cfg80211_chan_def *chandef) { struct orinoco_private *priv = wiphy_priv(wiphy); int err = 0; unsigned long flags; int channel; if (!chandef->chan) return -EINVAL; if (cfg80211_get_chandef_type(chandef) != NL80211_CHAN_NO_HT) return -EINVAL; if (chandef->chan->band != NL80211_BAND_2GHZ) return -EINVAL; channel = ieee80211_frequency_to_channel(chandef->chan->center_freq); if ((channel < 1) || (channel > NUM_CHANNELS) || !(priv->channel_mask & (1 << (channel - 1)))) return -EINVAL; if (orinoco_lock(priv, &flags) != 0) return -EBUSY; priv->channel = channel; if (priv->iw_mode == NL80211_IFTYPE_MONITOR) { /* Fast channel change - no commit if successful */ struct hermes *hw = &priv->hw; err = hw->ops->cmd_wait(hw, HERMES_CMD_TEST | HERMES_TEST_SET_CHANNEL, channel, NULL); } orinoco_unlock(priv, &flags); return err; } static int orinoco_set_wiphy_params(struct wiphy *wiphy, u32 changed) { struct orinoco_private *priv = wiphy_priv(wiphy); int frag_value = -1; int rts_value = -1; int err = 0; if (changed & WIPHY_PARAM_RETRY_SHORT) { /* Setting short retry not supported */ err = -EINVAL; } if (changed & WIPHY_PARAM_RETRY_LONG) { /* Setting long retry not supported */ err = -EINVAL; } if (changed & WIPHY_PARAM_FRAG_THRESHOLD) { /* Set fragmentation */ if (priv->has_mwo) { if (wiphy->frag_threshold == -1) frag_value = 0; else { printk(KERN_WARNING "%s: Fixed fragmentation " "is not supported on this firmware. " "Using MWO robust instead.\n", priv->ndev->name); frag_value = 1; } } else { if (wiphy->frag_threshold == -1) frag_value = 2346; else if ((wiphy->frag_threshold < 257) || (wiphy->frag_threshold > 2347)) err = -EINVAL; else /* cfg80211 value is 257-2347 (odd only) * orinoco rid has range 256-2346 (even only) */ frag_value = wiphy->frag_threshold & ~0x1; } } if (changed & WIPHY_PARAM_RTS_THRESHOLD) { /* Set RTS. * * Prism documentation suggests default of 2432, * and a range of 0-3000. * * Current implementation uses 2347 as the default and * the upper limit. */ if (wiphy->rts_threshold == -1) rts_value = 2347; else if (wiphy->rts_threshold > 2347) err = -EINVAL; else rts_value = wiphy->rts_threshold; } if (!err) { unsigned long flags; if (orinoco_lock(priv, &flags) != 0) return -EBUSY; if (frag_value >= 0) { if (priv->has_mwo) priv->mwo_robust = frag_value; else priv->frag_thresh = frag_value; } if (rts_value >= 0) priv->rts_thresh = rts_value; err = orinoco_commit(priv); orinoco_unlock(priv, &flags); } return err; } const struct cfg80211_ops orinoco_cfg_ops = { .change_virtual_intf = orinoco_change_vif, .set_monitor_channel = orinoco_set_monitor_channel, .scan = orinoco_scan, .set_wiphy_params = orinoco_set_wiphy_params, };
linux-master
drivers/net/wireless/intersil/orinoco/cfg.c
/* orinoco_tmd.c * * Driver for Prism II devices which would usually be driven by orinoco_cs, * but are connected to the PCI bus by a TMD7160. * * Copyright (C) 2003 Joerg Dorchain <joerg AT dorchain.net> * based heavily upon orinoco_plx.c Copyright (C) 2001 Daniel Barlow * * The contents of this file are subject to the Mozilla Public License * Version 1.1 (the "License"); you may not use this file except in * compliance with the License. You may obtain a copy of the License * at http://www.mozilla.org/MPL/ * * Software distributed under the License is distributed on an "AS IS" * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See * the License for the specific language governing rights and * limitations under the License. * * Alternatively, the contents of this file may be used under the * terms of the GNU General Public License version 2 (the "GPL"), in * which case the provisions of the GPL are applicable instead of the * above. If you wish to allow the use of your version of this file * only under the terms of the GPL and not to allow others to use your * version of this file under the MPL, indicate your decision by * deleting the provisions above and replace them with the notice and * other provisions required by the GPL. If you do not delete the * provisions above, a recipient may use your version of this file * under either the MPL or the GPL. * * The actual driving is done by main.c, this is just resource * allocation stuff. * * This driver is modeled after the orinoco_plx driver. The main * difference is that the TMD chip has only IO port ranges and doesn't * provide access to the PCMCIA attribute space. * * Pheecom sells cards with the TMD chip as "ASIC version" */ #define DRIVER_NAME "orinoco_tmd" #define PFX DRIVER_NAME ": " #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/pci.h> #include <pcmcia/cisreg.h> #include "orinoco.h" #include "orinoco_pci.h" #define COR_VALUE (COR_LEVEL_REQ | COR_FUNC_ENA) /* Enable PC card with interrupt in level trigger */ #define COR_RESET (0x80) /* reset bit in the COR register */ #define TMD_RESET_TIME (500) /* milliseconds */ /* * Do a soft reset of the card using the Configuration Option Register */ static int orinoco_tmd_cor_reset(struct orinoco_private *priv) { struct hermes *hw = &priv->hw; struct orinoco_pci_card *card = priv->card; unsigned long timeout; u16 reg; iowrite8(COR_VALUE | COR_RESET, card->bridge_io); mdelay(1); iowrite8(COR_VALUE, card->bridge_io); mdelay(1); /* Just in case, wait more until the card is no longer busy */ timeout = jiffies + msecs_to_jiffies(TMD_RESET_TIME); reg = hermes_read_regn(hw, CMD); while (time_before(jiffies, timeout) && (reg & HERMES_CMD_BUSY)) { mdelay(1); reg = hermes_read_regn(hw, CMD); } /* Still busy? */ if (reg & HERMES_CMD_BUSY) { printk(KERN_ERR PFX "Busy timeout\n"); return -ETIMEDOUT; } return 0; } static int orinoco_tmd_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { int err; struct orinoco_private *priv; struct orinoco_pci_card *card; void __iomem *hermes_io, *bridge_io; err = pci_enable_device(pdev); if (err) { printk(KERN_ERR PFX "Cannot enable PCI device\n"); return err; } err = pci_request_regions(pdev, DRIVER_NAME); if (err) { printk(KERN_ERR PFX "Cannot obtain PCI resources\n"); goto fail_resources; } bridge_io = pci_iomap(pdev, 1, 0); if (!bridge_io) { printk(KERN_ERR PFX "Cannot map bridge registers\n"); err = -EIO; goto fail_map_bridge; } hermes_io = pci_iomap(pdev, 2, 0); if (!hermes_io) { printk(KERN_ERR PFX "Cannot map chipset registers\n"); err = -EIO; goto fail_map_hermes; } /* Allocate network device */ priv = alloc_orinocodev(sizeof(*card), &pdev->dev, orinoco_tmd_cor_reset, NULL); if (!priv) { printk(KERN_ERR PFX "Cannot allocate network device\n"); err = -ENOMEM; goto fail_alloc; } card = priv->card; card->bridge_io = bridge_io; hermes_struct_init(&priv->hw, hermes_io, HERMES_16BIT_REGSPACING); err = request_irq(pdev->irq, orinoco_interrupt, IRQF_SHARED, DRIVER_NAME, priv); if (err) { printk(KERN_ERR PFX "Cannot allocate IRQ %d\n", pdev->irq); err = -EBUSY; goto fail_irq; } err = orinoco_tmd_cor_reset(priv); if (err) { printk(KERN_ERR PFX "Initial reset failed\n"); goto fail; } err = orinoco_init(priv); if (err) { printk(KERN_ERR PFX "orinoco_init() failed\n"); goto fail; } err = orinoco_if_add(priv, 0, 0, NULL); if (err) { printk(KERN_ERR PFX "orinoco_if_add() failed\n"); goto fail; } pci_set_drvdata(pdev, priv); return 0; fail: free_irq(pdev->irq, priv); fail_irq: free_orinocodev(priv); fail_alloc: pci_iounmap(pdev, hermes_io); fail_map_hermes: pci_iounmap(pdev, bridge_io); fail_map_bridge: pci_release_regions(pdev); fail_resources: pci_disable_device(pdev); return err; } static void orinoco_tmd_remove_one(struct pci_dev *pdev) { struct orinoco_private *priv = pci_get_drvdata(pdev); struct orinoco_pci_card *card = priv->card; orinoco_if_del(priv); free_irq(pdev->irq, priv); free_orinocodev(priv); pci_iounmap(pdev, priv->hw.iobase); pci_iounmap(pdev, card->bridge_io); pci_release_regions(pdev); pci_disable_device(pdev); } static const struct pci_device_id orinoco_tmd_id_table[] = { {0x15e8, 0x0131, PCI_ANY_ID, PCI_ANY_ID,}, /* NDC and OEMs, e.g. pheecom */ {0,}, }; MODULE_DEVICE_TABLE(pci, orinoco_tmd_id_table); static struct pci_driver orinoco_tmd_driver = { .name = DRIVER_NAME, .id_table = orinoco_tmd_id_table, .probe = orinoco_tmd_init_one, .remove = orinoco_tmd_remove_one, .driver.pm = &orinoco_pci_pm_ops, }; static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION " (Joerg Dorchain <[email protected]>)"; MODULE_AUTHOR("Joerg Dorchain <[email protected]>"); MODULE_DESCRIPTION("Driver for wireless LAN cards using the TMD7160 PCI bridge"); MODULE_LICENSE("Dual MPL/GPL"); static int __init orinoco_tmd_init(void) { printk(KERN_DEBUG "%s\n", version); return pci_register_driver(&orinoco_tmd_driver); } static void __exit orinoco_tmd_exit(void) { pci_unregister_driver(&orinoco_tmd_driver); } module_init(orinoco_tmd_init); module_exit(orinoco_tmd_exit);
linux-master
drivers/net/wireless/intersil/orinoco/orinoco_tmd.c
/* Helpers for managing scan queues * * See copyright notice in main.c */ #include <linux/gfp.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/ieee80211.h> #include <net/cfg80211.h> #include "hermes.h" #include "orinoco.h" #include "main.h" #include "scan.h" #define ZERO_DBM_OFFSET 0x95 #define MAX_SIGNAL_LEVEL 0x8A #define MIN_SIGNAL_LEVEL 0x2F #define SIGNAL_TO_DBM(x) \ (clamp_t(s32, (x), MIN_SIGNAL_LEVEL, MAX_SIGNAL_LEVEL) \ - ZERO_DBM_OFFSET) #define SIGNAL_TO_MBM(x) (SIGNAL_TO_DBM(x) * 100) static int symbol_build_supp_rates(u8 *buf, const __le16 *rates) { int i; u8 rate; buf[0] = WLAN_EID_SUPP_RATES; for (i = 0; i < 5; i++) { rate = le16_to_cpu(rates[i]); /* NULL terminated */ if (rate == 0x0) break; buf[i + 2] = rate; } buf[1] = i; return i + 2; } static int prism_build_supp_rates(u8 *buf, const u8 *rates) { int i; buf[0] = WLAN_EID_SUPP_RATES; for (i = 0; i < 8; i++) { /* NULL terminated */ if (rates[i] == 0x0) break; buf[i + 2] = rates[i]; } buf[1] = i; /* We might still have another 2 rates, which need to go in * extended supported rates */ if (i == 8 && rates[i] > 0) { buf[10] = WLAN_EID_EXT_SUPP_RATES; for (; i < 10; i++) { /* NULL terminated */ if (rates[i] == 0x0) break; buf[i + 2] = rates[i]; } buf[11] = i - 8; } return (i < 8) ? i + 2 : i + 4; } static void orinoco_add_hostscan_result(struct orinoco_private *priv, const union hermes_scan_info *bss) { struct wiphy *wiphy = priv_to_wiphy(priv); struct ieee80211_channel *channel; struct cfg80211_bss *cbss; u8 *ie; u8 ie_buf[46]; u64 timestamp; s32 signal; u16 capability; u16 beacon_interval; int ie_len; int freq; int len; len = le16_to_cpu(bss->a.essid_len); /* Reconstruct SSID and bitrate IEs to pass up */ ie_buf[0] = WLAN_EID_SSID; ie_buf[1] = len; memcpy(&ie_buf[2], bss->a.essid, len); ie = ie_buf + len + 2; ie_len = ie_buf[1] + 2; switch (priv->firmware_type) { case FIRMWARE_TYPE_SYMBOL: ie_len += symbol_build_supp_rates(ie, bss->s.rates); break; case FIRMWARE_TYPE_INTERSIL: ie_len += prism_build_supp_rates(ie, bss->p.rates); break; case FIRMWARE_TYPE_AGERE: default: break; } freq = ieee80211_channel_to_frequency( le16_to_cpu(bss->a.channel), NL80211_BAND_2GHZ); channel = ieee80211_get_channel(wiphy, freq); if (!channel) { printk(KERN_DEBUG "Invalid channel designation %04X(%04X)", bss->a.channel, freq); return; /* Then ignore it for now */ } timestamp = 0; capability = le16_to_cpu(bss->a.capabilities); beacon_interval = le16_to_cpu(bss->a.beacon_interv); signal = SIGNAL_TO_MBM(le16_to_cpu(bss->a.level)); cbss = cfg80211_inform_bss(wiphy, channel, CFG80211_BSS_FTYPE_UNKNOWN, bss->a.bssid, timestamp, capability, beacon_interval, ie_buf, ie_len, signal, GFP_KERNEL); cfg80211_put_bss(wiphy, cbss); } void orinoco_add_extscan_result(struct orinoco_private *priv, struct agere_ext_scan_info *bss, size_t len) { struct wiphy *wiphy = priv_to_wiphy(priv); struct ieee80211_channel *channel; struct cfg80211_bss *cbss; const u8 *ie; u64 timestamp; s32 signal; u16 capability; u16 beacon_interval; size_t ie_len; int chan, freq; ie_len = len - sizeof(*bss); ie = cfg80211_find_ie(WLAN_EID_DS_PARAMS, bss->data, ie_len); chan = ie ? ie[2] : 0; freq = ieee80211_channel_to_frequency(chan, NL80211_BAND_2GHZ); channel = ieee80211_get_channel(wiphy, freq); timestamp = le64_to_cpu(bss->timestamp); capability = le16_to_cpu(bss->capabilities); beacon_interval = le16_to_cpu(bss->beacon_interval); ie = bss->data; signal = SIGNAL_TO_MBM(bss->level); cbss = cfg80211_inform_bss(wiphy, channel, CFG80211_BSS_FTYPE_UNKNOWN, bss->bssid, timestamp, capability, beacon_interval, ie, ie_len, signal, GFP_KERNEL); cfg80211_put_bss(wiphy, cbss); } void orinoco_add_hostscan_results(struct orinoco_private *priv, unsigned char *buf, size_t len) { int offset; /* In the scan data */ size_t atom_len; bool abort = false; switch (priv->firmware_type) { case FIRMWARE_TYPE_AGERE: atom_len = sizeof(struct agere_scan_apinfo); offset = 0; break; case FIRMWARE_TYPE_SYMBOL: /* Lack of documentation necessitates this hack. * Different firmwares have 68 or 76 byte long atoms. * We try modulo first. If the length divides by both, * we check what would be the channel in the second * frame for a 68-byte atom. 76-byte atoms have 0 there. * Valid channel cannot be 0. */ if (len % 76) atom_len = 68; else if (len % 68) atom_len = 76; else if (len >= 1292 && buf[68] == 0) atom_len = 76; else atom_len = 68; offset = 0; break; case FIRMWARE_TYPE_INTERSIL: offset = 4; if (priv->has_hostscan) { atom_len = le16_to_cpup((__le16 *)buf); /* Sanity check for atom_len */ if (atom_len < sizeof(struct prism2_scan_apinfo)) { printk(KERN_ERR "%s: Invalid atom_len in scan " "data: %zu\n", priv->ndev->name, atom_len); abort = true; goto scan_abort; } } else atom_len = offsetof(struct prism2_scan_apinfo, atim); break; default: abort = true; goto scan_abort; } /* Check that we got an whole number of atoms */ if ((len - offset) % atom_len) { printk(KERN_ERR "%s: Unexpected scan data length %zu, " "atom_len %zu, offset %d\n", priv->ndev->name, len, atom_len, offset); abort = true; goto scan_abort; } /* Process the entries one by one */ for (; offset + atom_len <= len; offset += atom_len) { union hermes_scan_info *atom; atom = (union hermes_scan_info *) (buf + offset); orinoco_add_hostscan_result(priv, atom); } scan_abort: if (priv->scan_request) { struct cfg80211_scan_info info = { .aborted = abort, }; cfg80211_scan_done(priv->scan_request, &info); priv->scan_request = NULL; } } void orinoco_scan_done(struct orinoco_private *priv, bool abort) { if (priv->scan_request) { struct cfg80211_scan_info info = { .aborted = abort, }; cfg80211_scan_done(priv->scan_request, &info); priv->scan_request = NULL; } }
linux-master
drivers/net/wireless/intersil/orinoco/scan.c
/* Firmware file reading and download helpers * * See copyright notice in main.c */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/firmware.h> #include <linux/device.h> #include <linux/module.h> #include "hermes.h" #include "hermes_dld.h" #include "orinoco.h" #include "fw.h" /* End markers (for Symbol firmware only) */ #define TEXT_END 0x1A /* End of text header */ struct fw_info { char *pri_fw; char *sta_fw; char *ap_fw; u32 pda_addr; u16 pda_size; }; static const struct fw_info orinoco_fw[] = { { NULL, "agere_sta_fw.bin", "agere_ap_fw.bin", 0x00390000, 1000 }, { NULL, "prism_sta_fw.bin", "prism_ap_fw.bin", 0, 1024 }, { "symbol_sp24t_prim_fw", "symbol_sp24t_sec_fw", NULL, 0x00003100, 512 } }; MODULE_FIRMWARE("agere_sta_fw.bin"); MODULE_FIRMWARE("agere_ap_fw.bin"); MODULE_FIRMWARE("prism_sta_fw.bin"); MODULE_FIRMWARE("prism_ap_fw.bin"); MODULE_FIRMWARE("symbol_sp24t_prim_fw"); MODULE_FIRMWARE("symbol_sp24t_sec_fw"); /* Structure used to access fields in FW * Make sure LE decoding macros are used */ struct orinoco_fw_header { char hdr_vers[6]; /* ASCII string for header version */ __le16 headersize; /* Total length of header */ __le32 entry_point; /* NIC entry point */ __le32 blocks; /* Number of blocks to program */ __le32 block_offset; /* Offset of block data from eof header */ __le32 pdr_offset; /* Offset to PDR data from eof header */ __le32 pri_offset; /* Offset to primary plug data */ __le32 compat_offset; /* Offset to compatibility data*/ char signature[]; /* FW signature length headersize-20 */ } __packed; /* Check the range of various header entries. Return a pointer to a * description of the problem, or NULL if everything checks out. */ static const char *validate_fw(const struct orinoco_fw_header *hdr, size_t len) { u16 hdrsize; if (len < sizeof(*hdr)) return "image too small"; if (memcmp(hdr->hdr_vers, "HFW", 3) != 0) return "format not recognised"; hdrsize = le16_to_cpu(hdr->headersize); if (hdrsize > len) return "bad headersize"; if ((hdrsize + le32_to_cpu(hdr->block_offset)) > len) return "bad block offset"; if ((hdrsize + le32_to_cpu(hdr->pdr_offset)) > len) return "bad PDR offset"; if ((hdrsize + le32_to_cpu(hdr->pri_offset)) > len) return "bad PRI offset"; if ((hdrsize + le32_to_cpu(hdr->compat_offset)) > len) return "bad compat offset"; /* TODO: consider adding a checksum or CRC to the firmware format */ return NULL; } #if defined(CONFIG_HERMES_CACHE_FW_ON_INIT) || defined(CONFIG_PM_SLEEP) static inline const struct firmware * orinoco_cached_fw_get(struct orinoco_private *priv, bool primary) { if (primary) return priv->cached_pri_fw; else return priv->cached_fw; } #else #define orinoco_cached_fw_get(priv, primary) (NULL) #endif /* Download either STA or AP firmware into the card. */ static int orinoco_dl_firmware(struct orinoco_private *priv, const struct fw_info *fw, int ap) { /* Plug Data Area (PDA) */ __le16 *pda; struct hermes *hw = &priv->hw; const struct firmware *fw_entry; const struct orinoco_fw_header *hdr; const unsigned char *first_block; const void *end; const char *firmware; const char *fw_err; struct device *dev = priv->dev; int err = 0; pda = kzalloc(fw->pda_size, GFP_KERNEL); if (!pda) return -ENOMEM; if (ap) firmware = fw->ap_fw; else firmware = fw->sta_fw; dev_dbg(dev, "Attempting to download firmware %s\n", firmware); /* Read current plug data */ err = hw->ops->read_pda(hw, pda, fw->pda_addr, fw->pda_size); dev_dbg(dev, "Read PDA returned %d\n", err); if (err) goto free; if (!orinoco_cached_fw_get(priv, false)) { err = request_firmware(&fw_entry, firmware, priv->dev); if (err) { dev_err(dev, "Cannot find firmware %s\n", firmware); err = -ENOENT; goto free; } } else fw_entry = orinoco_cached_fw_get(priv, false); hdr = (const struct orinoco_fw_header *) fw_entry->data; fw_err = validate_fw(hdr, fw_entry->size); if (fw_err) { dev_warn(dev, "Invalid firmware image detected (%s). " "Aborting download\n", fw_err); err = -EINVAL; goto abort; } /* Enable aux port to allow programming */ err = hw->ops->program_init(hw, le32_to_cpu(hdr->entry_point)); dev_dbg(dev, "Program init returned %d\n", err); if (err != 0) goto abort; /* Program data */ first_block = (fw_entry->data + le16_to_cpu(hdr->headersize) + le32_to_cpu(hdr->block_offset)); end = fw_entry->data + fw_entry->size; err = hermes_program(hw, first_block, end); dev_dbg(dev, "Program returned %d\n", err); if (err != 0) goto abort; /* Update production data */ first_block = (fw_entry->data + le16_to_cpu(hdr->headersize) + le32_to_cpu(hdr->pdr_offset)); err = hermes_apply_pda_with_defaults(hw, first_block, end, pda, &pda[fw->pda_size / sizeof(*pda)]); dev_dbg(dev, "Apply PDA returned %d\n", err); if (err) goto abort; /* Tell card we've finished */ err = hw->ops->program_end(hw); dev_dbg(dev, "Program end returned %d\n", err); if (err != 0) goto abort; /* Check if we're running */ dev_dbg(dev, "hermes_present returned %d\n", hermes_present(hw)); abort: /* If we requested the firmware, release it. */ if (!orinoco_cached_fw_get(priv, false)) release_firmware(fw_entry); free: kfree(pda); return err; } /* * Process a firmware image - stop the card, load the firmware, reset * the card and make sure it responds. For the secondary firmware take * care of the PDA - read it and then write it on top of the firmware. */ static int symbol_dl_image(struct orinoco_private *priv, const struct fw_info *fw, const unsigned char *image, const void *end, int secondary) { struct hermes *hw = &priv->hw; int ret = 0; const unsigned char *ptr; const unsigned char *first_block; /* Plug Data Area (PDA) */ __le16 *pda = NULL; /* Binary block begins after the 0x1A marker */ ptr = image; while (*ptr++ != TEXT_END); first_block = ptr; /* Read the PDA from EEPROM */ if (secondary) { pda = kzalloc(fw->pda_size, GFP_KERNEL); if (!pda) return -ENOMEM; ret = hw->ops->read_pda(hw, pda, fw->pda_addr, fw->pda_size); if (ret) goto free; } /* Stop the firmware, so that it can be safely rewritten */ if (priv->stop_fw) { ret = priv->stop_fw(priv, 1); if (ret) goto free; } /* Program the adapter with new firmware */ ret = hermes_program(hw, first_block, end); if (ret) goto free; /* Write the PDA to the adapter */ if (secondary) { size_t len = hermes_blocks_length(first_block, end); ptr = first_block + len; ret = hermes_apply_pda(hw, ptr, end, pda, &pda[fw->pda_size / sizeof(*pda)]); kfree(pda); if (ret) return ret; } /* Run the firmware */ if (priv->stop_fw) { ret = priv->stop_fw(priv, 0); if (ret) return ret; } /* Reset hermes chip and make sure it responds */ ret = hw->ops->init(hw); /* hermes_reset() should return 0 with the secondary firmware */ if (secondary && ret != 0) return -ENODEV; /* And this should work with any firmware */ if (!hermes_present(hw)) return -ENODEV; return 0; free: kfree(pda); return ret; } /* * Download the firmware into the card, this also does a PCMCIA soft * reset on the card, to make sure it's in a sane state. */ static int symbol_dl_firmware(struct orinoco_private *priv, const struct fw_info *fw) { struct device *dev = priv->dev; int ret; const struct firmware *fw_entry; if (!orinoco_cached_fw_get(priv, true)) { if (request_firmware(&fw_entry, fw->pri_fw, priv->dev) != 0) { dev_err(dev, "Cannot find firmware: %s\n", fw->pri_fw); return -ENOENT; } } else fw_entry = orinoco_cached_fw_get(priv, true); /* Load primary firmware */ ret = symbol_dl_image(priv, fw, fw_entry->data, fw_entry->data + fw_entry->size, 0); if (!orinoco_cached_fw_get(priv, true)) release_firmware(fw_entry); if (ret) { dev_err(dev, "Primary firmware download failed\n"); return ret; } if (!orinoco_cached_fw_get(priv, false)) { if (request_firmware(&fw_entry, fw->sta_fw, priv->dev) != 0) { dev_err(dev, "Cannot find firmware: %s\n", fw->sta_fw); return -ENOENT; } } else fw_entry = orinoco_cached_fw_get(priv, false); /* Load secondary firmware */ ret = symbol_dl_image(priv, fw, fw_entry->data, fw_entry->data + fw_entry->size, 1); if (!orinoco_cached_fw_get(priv, false)) release_firmware(fw_entry); if (ret) dev_err(dev, "Secondary firmware download failed\n"); return ret; } int orinoco_download(struct orinoco_private *priv) { int err = 0; /* Reload firmware */ switch (priv->firmware_type) { case FIRMWARE_TYPE_AGERE: /* case FIRMWARE_TYPE_INTERSIL: */ err = orinoco_dl_firmware(priv, &orinoco_fw[priv->firmware_type], 0); break; case FIRMWARE_TYPE_SYMBOL: err = symbol_dl_firmware(priv, &orinoco_fw[priv->firmware_type]); break; case FIRMWARE_TYPE_INTERSIL: break; } /* TODO: if we fail we probably need to reinitialise * the driver */ return err; } #if defined(CONFIG_HERMES_CACHE_FW_ON_INIT) || defined(CONFIG_PM_SLEEP) void orinoco_cache_fw(struct orinoco_private *priv, int ap) { const struct firmware *fw_entry = NULL; const char *pri_fw; const char *fw; pri_fw = orinoco_fw[priv->firmware_type].pri_fw; if (ap) fw = orinoco_fw[priv->firmware_type].ap_fw; else fw = orinoco_fw[priv->firmware_type].sta_fw; if (pri_fw) { if (request_firmware(&fw_entry, pri_fw, priv->dev) == 0) priv->cached_pri_fw = fw_entry; } if (fw) { if (request_firmware(&fw_entry, fw, priv->dev) == 0) priv->cached_fw = fw_entry; } } void orinoco_uncache_fw(struct orinoco_private *priv) { release_firmware(priv->cached_pri_fw); release_firmware(priv->cached_fw); priv->cached_pri_fw = NULL; priv->cached_fw = NULL; } #endif
linux-master
drivers/net/wireless/intersil/orinoco/fw.c
/* orinoco_plx.c * * Driver for Prism II devices which would usually be driven by orinoco_cs, * but are connected to the PCI bus by a PLX9052. * * Current maintainers are: * Pavel Roskin <proski AT gnu.org> * and David Gibson <hermes AT gibson.dropbear.id.au> * * (C) Copyright David Gibson, IBM Corp. 2001-2003. * Copyright (C) 2001 Daniel Barlow * * The contents of this file are subject to the Mozilla Public License * Version 1.1 (the "License"); you may not use this file except in * compliance with the License. You may obtain a copy of the License * at http://www.mozilla.org/MPL/ * * Software distributed under the License is distributed on an "AS IS" * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See * the License for the specific language governing rights and * limitations under the License. * * Alternatively, the contents of this file may be used under the * terms of the GNU General Public License version 2 (the "GPL"), in * which case the provisions of the GPL are applicable instead of the * above. If you wish to allow the use of your version of this file * only under the terms of the GPL and not to allow others to use your * version of this file under the MPL, indicate your decision by * deleting the provisions above and replace them with the notice and * other provisions required by the GPL. If you do not delete the * provisions above, a recipient may use your version of this file * under either the MPL or the GPL. * * Here's the general details on how the PLX9052 adapter works: * * - Two PCI I/O address spaces, one 0x80 long which contains the * PLX9052 registers, and one that's 0x40 long mapped to the PCMCIA * slot I/O address space. * * - One PCI memory address space, mapped to the PCMCIA attribute space * (containing the CIS). * * Using the later, you can read through the CIS data to make sure the * card is compatible with the driver. Keep in mind that the PCMCIA * spec specifies the CIS as the lower 8 bits of each word read from * the CIS, so to read the bytes of the CIS, read every other byte * (0,2,4,...). Passing that test, you need to enable the I/O address * space on the PCMCIA card via the PCMCIA COR register. This is the * first byte following the CIS. In my case (which may not have any * relation to what's on the PRISM2 cards), COR was at offset 0x800 * within the PCI memory space. Write 0x41 to the COR register to * enable I/O mode and to select level triggered interrupts. To * confirm you actually succeeded, read the COR register back and make * sure it actually got set to 0x41, in case you have an unexpected * card inserted. * * Following that, you can treat the second PCI I/O address space (the * one that's not 0x80 in length) as the PCMCIA I/O space. * * Note that in the Eumitcom's source for their drivers, they register * the interrupt as edge triggered when registering it with the * Windows kernel. I don't recall how to register edge triggered on * Linux (if it can be done at all). But in some experimentation, I * don't see much operational difference between using either * interrupt mode. Don't mess with the interrupt mode in the COR * register though, as the PLX9052 wants level triggers with the way * the serial EEPROM configures it on the WL11000. * * There's some other little quirks related to timing that I bumped * into, but I don't recall right now. Also, there's two variants of * the WL11000 I've seen, revision A1 and T2. These seem to differ * slightly in the timings configured in the wait-state generator in * the PLX9052. There have also been some comments from Eumitcom that * cards shouldn't be hot swapped, apparently due to risk of cooking * the PLX9052. I'm unsure why they believe this, as I can't see * anything in the design that would really cause a problem, except * for crashing drivers not written to expect it. And having developed * drivers for the WL11000, I'd say it's quite tricky to write code * that will successfully deal with a hot unplug. Very odd things * happen on the I/O side of things. But anyway, be warned. Despite * that, I've hot-swapped a number of times during debugging and * driver development for various reasons (stuck WAIT# line after the * radio card's firmware locks up). */ #define DRIVER_NAME "orinoco_plx" #define PFX DRIVER_NAME ": " #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/pci.h> #include <pcmcia/cisreg.h> #include "orinoco.h" #include "orinoco_pci.h" #define COR_OFFSET (0x3e0) /* COR attribute offset of Prism2 PC card */ #define COR_VALUE (COR_LEVEL_REQ | COR_FUNC_ENA) /* Enable PC card with interrupt in level trigger */ #define COR_RESET (0x80) /* reset bit in the COR register */ #define PLX_RESET_TIME (500) /* milliseconds */ #define PLX_INTCSR 0x4c /* Interrupt Control & Status Register */ #define PLX_INTCSR_INTEN (1 << 6) /* Interrupt Enable bit */ /* * Do a soft reset of the card using the Configuration Option Register */ static int orinoco_plx_cor_reset(struct orinoco_private *priv) { struct hermes *hw = &priv->hw; struct orinoco_pci_card *card = priv->card; unsigned long timeout; u16 reg; iowrite8(COR_VALUE | COR_RESET, card->attr_io + COR_OFFSET); mdelay(1); iowrite8(COR_VALUE, card->attr_io + COR_OFFSET); mdelay(1); /* Just in case, wait more until the card is no longer busy */ timeout = jiffies + msecs_to_jiffies(PLX_RESET_TIME); reg = hermes_read_regn(hw, CMD); while (time_before(jiffies, timeout) && (reg & HERMES_CMD_BUSY)) { mdelay(1); reg = hermes_read_regn(hw, CMD); } /* Still busy? */ if (reg & HERMES_CMD_BUSY) { printk(KERN_ERR PFX "Busy timeout\n"); return -ETIMEDOUT; } return 0; } static int orinoco_plx_hw_init(struct orinoco_pci_card *card) { int i; u32 csr_reg; static const u8 cis_magic[] = { 0x01, 0x03, 0x00, 0x00, 0xff, 0x17, 0x04, 0x67 }; printk(KERN_DEBUG PFX "CIS: "); for (i = 0; i < 16; i++) printk("%02X:", ioread8(card->attr_io + (i << 1))); printk("\n"); /* Verify whether a supported PC card is present */ /* FIXME: we probably need to be smarted about this */ for (i = 0; i < sizeof(cis_magic); i++) { if (cis_magic[i] != ioread8(card->attr_io + (i << 1))) { printk(KERN_ERR PFX "The CIS value of Prism2 PC " "card is unexpected\n"); return -ENODEV; } } /* bjoern: We need to tell the card to enable interrupts, in case the serial eprom didn't do this already. See the PLX9052 data book, p8-1 and 8-24 for reference. */ csr_reg = ioread32(card->bridge_io + PLX_INTCSR); if (!(csr_reg & PLX_INTCSR_INTEN)) { csr_reg |= PLX_INTCSR_INTEN; iowrite32(csr_reg, card->bridge_io + PLX_INTCSR); csr_reg = ioread32(card->bridge_io + PLX_INTCSR); if (!(csr_reg & PLX_INTCSR_INTEN)) { printk(KERN_ERR PFX "Cannot enable interrupts\n"); return -EIO; } } return 0; } static int orinoco_plx_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { int err; struct orinoco_private *priv; struct orinoco_pci_card *card; void __iomem *hermes_io, *attr_io, *bridge_io; err = pci_enable_device(pdev); if (err) { printk(KERN_ERR PFX "Cannot enable PCI device\n"); return err; } err = pci_request_regions(pdev, DRIVER_NAME); if (err) { printk(KERN_ERR PFX "Cannot obtain PCI resources\n"); goto fail_resources; } bridge_io = pci_iomap(pdev, 1, 0); if (!bridge_io) { printk(KERN_ERR PFX "Cannot map bridge registers\n"); err = -EIO; goto fail_map_bridge; } attr_io = pci_iomap(pdev, 2, 0); if (!attr_io) { printk(KERN_ERR PFX "Cannot map PCMCIA attributes\n"); err = -EIO; goto fail_map_attr; } hermes_io = pci_iomap(pdev, 3, 0); if (!hermes_io) { printk(KERN_ERR PFX "Cannot map chipset registers\n"); err = -EIO; goto fail_map_hermes; } /* Allocate network device */ priv = alloc_orinocodev(sizeof(*card), &pdev->dev, orinoco_plx_cor_reset, NULL); if (!priv) { printk(KERN_ERR PFX "Cannot allocate network device\n"); err = -ENOMEM; goto fail_alloc; } card = priv->card; card->bridge_io = bridge_io; card->attr_io = attr_io; hermes_struct_init(&priv->hw, hermes_io, HERMES_16BIT_REGSPACING); err = request_irq(pdev->irq, orinoco_interrupt, IRQF_SHARED, DRIVER_NAME, priv); if (err) { printk(KERN_ERR PFX "Cannot allocate IRQ %d\n", pdev->irq); err = -EBUSY; goto fail_irq; } err = orinoco_plx_hw_init(card); if (err) { printk(KERN_ERR PFX "Hardware initialization failed\n"); goto fail; } err = orinoco_plx_cor_reset(priv); if (err) { printk(KERN_ERR PFX "Initial reset failed\n"); goto fail; } err = orinoco_init(priv); if (err) { printk(KERN_ERR PFX "orinoco_init() failed\n"); goto fail; } err = orinoco_if_add(priv, 0, 0, NULL); if (err) { printk(KERN_ERR PFX "orinoco_if_add() failed\n"); goto fail_wiphy; } pci_set_drvdata(pdev, priv); return 0; fail_wiphy: wiphy_unregister(priv_to_wiphy(priv)); fail: free_irq(pdev->irq, priv); fail_irq: free_orinocodev(priv); fail_alloc: pci_iounmap(pdev, hermes_io); fail_map_hermes: pci_iounmap(pdev, attr_io); fail_map_attr: pci_iounmap(pdev, bridge_io); fail_map_bridge: pci_release_regions(pdev); fail_resources: pci_disable_device(pdev); return err; } static void orinoco_plx_remove_one(struct pci_dev *pdev) { struct orinoco_private *priv = pci_get_drvdata(pdev); struct orinoco_pci_card *card = priv->card; orinoco_if_del(priv); wiphy_unregister(priv_to_wiphy(priv)); free_irq(pdev->irq, priv); free_orinocodev(priv); pci_iounmap(pdev, priv->hw.iobase); pci_iounmap(pdev, card->attr_io); pci_iounmap(pdev, card->bridge_io); pci_release_regions(pdev); pci_disable_device(pdev); } static const struct pci_device_id orinoco_plx_id_table[] = { {0x111a, 0x1023, PCI_ANY_ID, PCI_ANY_ID,}, /* Siemens SpeedStream SS1023 */ {0x1385, 0x4100, PCI_ANY_ID, PCI_ANY_ID,}, /* Netgear MA301 */ {0x15e8, 0x0130, PCI_ANY_ID, PCI_ANY_ID,}, /* Correga - does this work? */ {0x1638, 0x1100, PCI_ANY_ID, PCI_ANY_ID,}, /* SMC EZConnect SMC2602W, Eumitcom PCI WL11000, Addtron AWA-100 */ {0x16ab, 0x1100, PCI_ANY_ID, PCI_ANY_ID,}, /* Global Sun Tech GL24110P */ {0x16ab, 0x1101, PCI_ANY_ID, PCI_ANY_ID,}, /* Reported working, but unknown */ {0x16ab, 0x1102, PCI_ANY_ID, PCI_ANY_ID,}, /* Linksys WDT11 */ {0x16ec, 0x3685, PCI_ANY_ID, PCI_ANY_ID,}, /* USR 2415 */ {0xec80, 0xec00, PCI_ANY_ID, PCI_ANY_ID,}, /* Belkin F5D6000 tested by Brendan W. McAdams <rit AT jacked-in.org> */ {0x10b7, 0x7770, PCI_ANY_ID, PCI_ANY_ID,}, /* 3Com AirConnect PCI tested by Damien Persohn <damien AT persohn.net> */ {0,}, }; MODULE_DEVICE_TABLE(pci, orinoco_plx_id_table); static struct pci_driver orinoco_plx_driver = { .name = DRIVER_NAME, .id_table = orinoco_plx_id_table, .probe = orinoco_plx_init_one, .remove = orinoco_plx_remove_one, .driver.pm = &orinoco_pci_pm_ops, }; static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION " (Pavel Roskin <[email protected]>," " David Gibson <[email protected]>," " Daniel Barlow <[email protected]>)"; MODULE_AUTHOR("Daniel Barlow <[email protected]>"); MODULE_DESCRIPTION("Driver for wireless LAN cards using the PLX9052 PCI bridge"); MODULE_LICENSE("Dual MPL/GPL"); static int __init orinoco_plx_init(void) { printk(KERN_DEBUG "%s\n", version); return pci_register_driver(&orinoco_plx_driver); } static void __exit orinoco_plx_exit(void) { pci_unregister_driver(&orinoco_plx_driver); } module_init(orinoco_plx_init); module_exit(orinoco_plx_exit);
linux-master
drivers/net/wireless/intersil/orinoco/orinoco_plx.c
/* orinoco_pci.c * * Driver for Prism 2.5/3 devices that have a direct PCI interface * (i.e. these are not PCMCIA cards in a PCMCIA-to-PCI bridge). * The card contains only one PCI region, which contains all the usual * hermes registers, as well as the COR register. * * Current maintainers are: * Pavel Roskin <proski AT gnu.org> * and David Gibson <hermes AT gibson.dropbear.id.au> * * Some of this code is borrowed from orinoco_plx.c * Copyright (C) 2001 Daniel Barlow <dan AT telent.net> * Some of this code is "inspired" by linux-wlan-ng-0.1.10, but nothing * has been copied from it. linux-wlan-ng-0.1.10 is originally : * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved. * This file originally written by: * Copyright (C) 2001 Jean Tourrilhes <jt AT hpl.hp.com> * And is now maintained by: * (C) Copyright David Gibson, IBM Corp. 2002-2003. * * The contents of this file are subject to the Mozilla Public License * Version 1.1 (the "License"); you may not use this file except in * compliance with the License. You may obtain a copy of the License * at http://www.mozilla.org/MPL/ * * Software distributed under the License is distributed on an "AS IS" * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See * the License for the specific language governing rights and * limitations under the License. * * Alternatively, the contents of this file may be used under the * terms of the GNU General Public License version 2 (the "GPL"), in * which case the provisions of the GPL are applicable instead of the * above. If you wish to allow the use of your version of this file * only under the terms of the GPL and not to allow others to use your * version of this file under the MPL, indicate your decision by * deleting the provisions above and replace them with the notice and * other provisions required by the GPL. If you do not delete the * provisions above, a recipient may use your version of this file * under either the MPL or the GPL. */ #define DRIVER_NAME "orinoco_pci" #define PFX DRIVER_NAME ": " #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/pci.h> #include "orinoco.h" #include "orinoco_pci.h" /* Offset of the COR register of the PCI card */ #define HERMES_PCI_COR (0x26) /* Bitmask to reset the card */ #define HERMES_PCI_COR_MASK (0x0080) /* Magic timeouts for doing the reset. * Those times are straight from wlan-ng, and it is claimed that they * are necessary. Alan will kill me. Take your time and grab a coffee. */ #define HERMES_PCI_COR_ONT (250) /* ms */ #define HERMES_PCI_COR_OFFT (500) /* ms */ #define HERMES_PCI_COR_BUSYT (500) /* ms */ /* * Do a soft reset of the card using the Configuration Option Register * We need this to get going... * This is the part of the code that is strongly inspired from wlan-ng * * Note : This code is done with irq enabled. This mean that many * interrupts will occur while we are there. This is why we use the * jiffies to regulate time instead of a straight mdelay(). Usually we * need only around 245 iteration of the loop to do 250 ms delay. * * Note bis : Don't try to access HERMES_CMD during the reset phase. * It just won't work ! */ static int orinoco_pci_cor_reset(struct orinoco_private *priv) { struct hermes *hw = &priv->hw; unsigned long timeout; u16 reg; /* Assert the reset until the card notices */ hermes_write_regn(hw, PCI_COR, HERMES_PCI_COR_MASK); mdelay(HERMES_PCI_COR_ONT); /* Give time for the card to recover from this hard effort */ hermes_write_regn(hw, PCI_COR, 0x0000); mdelay(HERMES_PCI_COR_OFFT); /* The card is ready when it's no longer busy */ timeout = jiffies + msecs_to_jiffies(HERMES_PCI_COR_BUSYT); reg = hermes_read_regn(hw, CMD); while (time_before(jiffies, timeout) && (reg & HERMES_CMD_BUSY)) { mdelay(1); reg = hermes_read_regn(hw, CMD); } /* Still busy? */ if (reg & HERMES_CMD_BUSY) { printk(KERN_ERR PFX "Busy timeout\n"); return -ETIMEDOUT; } return 0; } static int orinoco_pci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { int err; struct orinoco_private *priv; struct orinoco_pci_card *card; void __iomem *hermes_io; err = pci_enable_device(pdev); if (err) { printk(KERN_ERR PFX "Cannot enable PCI device\n"); return err; } err = pci_request_regions(pdev, DRIVER_NAME); if (err) { printk(KERN_ERR PFX "Cannot obtain PCI resources\n"); goto fail_resources; } hermes_io = pci_iomap(pdev, 0, 0); if (!hermes_io) { printk(KERN_ERR PFX "Cannot remap chipset registers\n"); err = -EIO; goto fail_map_hermes; } /* Allocate network device */ priv = alloc_orinocodev(sizeof(*card), &pdev->dev, orinoco_pci_cor_reset, NULL); if (!priv) { printk(KERN_ERR PFX "Cannot allocate network device\n"); err = -ENOMEM; goto fail_alloc; } card = priv->card; hermes_struct_init(&priv->hw, hermes_io, HERMES_32BIT_REGSPACING); err = request_irq(pdev->irq, orinoco_interrupt, IRQF_SHARED, DRIVER_NAME, priv); if (err) { printk(KERN_ERR PFX "Cannot allocate IRQ %d\n", pdev->irq); err = -EBUSY; goto fail_irq; } err = orinoco_pci_cor_reset(priv); if (err) { printk(KERN_ERR PFX "Initial reset failed\n"); goto fail; } err = orinoco_init(priv); if (err) { printk(KERN_ERR PFX "orinoco_init() failed\n"); goto fail; } err = orinoco_if_add(priv, 0, 0, NULL); if (err) { printk(KERN_ERR PFX "orinoco_if_add() failed\n"); goto fail_wiphy; } pci_set_drvdata(pdev, priv); return 0; fail_wiphy: wiphy_unregister(priv_to_wiphy(priv)); fail: free_irq(pdev->irq, priv); fail_irq: free_orinocodev(priv); fail_alloc: pci_iounmap(pdev, hermes_io); fail_map_hermes: pci_release_regions(pdev); fail_resources: pci_disable_device(pdev); return err; } static void orinoco_pci_remove_one(struct pci_dev *pdev) { struct orinoco_private *priv = pci_get_drvdata(pdev); orinoco_if_del(priv); wiphy_unregister(priv_to_wiphy(priv)); free_irq(pdev->irq, priv); free_orinocodev(priv); pci_iounmap(pdev, priv->hw.iobase); pci_release_regions(pdev); pci_disable_device(pdev); } static const struct pci_device_id orinoco_pci_id_table[] = { /* Intersil Prism 3 */ {0x1260, 0x3872, PCI_ANY_ID, PCI_ANY_ID,}, /* Intersil Prism 2.5 */ {0x1260, 0x3873, PCI_ANY_ID, PCI_ANY_ID,}, /* Samsung MagicLAN SWL-2210P */ {0x167d, 0xa000, PCI_ANY_ID, PCI_ANY_ID,}, {0,}, }; MODULE_DEVICE_TABLE(pci, orinoco_pci_id_table); static struct pci_driver orinoco_pci_driver = { .name = DRIVER_NAME, .id_table = orinoco_pci_id_table, .probe = orinoco_pci_init_one, .remove = orinoco_pci_remove_one, .driver.pm = &orinoco_pci_pm_ops, }; static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION " (Pavel Roskin <[email protected]>," " David Gibson <[email protected]> &" " Jean Tourrilhes <[email protected]>)"; MODULE_AUTHOR("Pavel Roskin <[email protected]> &" " David Gibson <[email protected]>"); MODULE_DESCRIPTION("Driver for wireless LAN cards using direct PCI interface"); MODULE_LICENSE("Dual MPL/GPL"); static int __init orinoco_pci_init(void) { printk(KERN_DEBUG "%s\n", version); return pci_register_driver(&orinoco_pci_driver); } static void __exit orinoco_pci_exit(void) { pci_unregister_driver(&orinoco_pci_driver); } module_init(orinoco_pci_init); module_exit(orinoco_pci_exit);
linux-master
drivers/net/wireless/intersil/orinoco/orinoco_pci.c
/* airport.c * * A driver for "Hermes" chipset based Apple Airport wireless * card. * * Copyright notice & release notes in file main.c * * Note specific to airport stub: * * 0.05 : first version of the new split driver * 0.06 : fix possible hang on powerup, add sleep support */ #define DRIVER_NAME "airport" #define PFX DRIVER_NAME ": " #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/mod_devicetable.h> #include <asm/pmac_feature.h> #include "orinoco.h" #define AIRPORT_IO_LEN (0x1000) /* one page */ struct airport { struct macio_dev *mdev; void __iomem *vaddr; unsigned int irq; int irq_requested; int ndev_registered; }; static int airport_suspend(struct macio_dev *mdev, pm_message_t state) { struct orinoco_private *priv = dev_get_drvdata(&mdev->ofdev.dev); struct net_device *dev = priv->ndev; struct airport *card = priv->card; unsigned long flags; int err; printk(KERN_DEBUG "%s: Airport entering sleep mode\n", dev->name); err = orinoco_lock(priv, &flags); if (err) { printk(KERN_ERR "%s: hw_unavailable on PBOOK_SLEEP_NOW\n", dev->name); return 0; } orinoco_down(priv); orinoco_unlock(priv, &flags); disable_irq(card->irq); pmac_call_feature(PMAC_FTR_AIRPORT_ENABLE, macio_get_of_node(mdev), 0, 0); return 0; } static int airport_resume(struct macio_dev *mdev) { struct orinoco_private *priv = dev_get_drvdata(&mdev->ofdev.dev); struct net_device *dev = priv->ndev; struct airport *card = priv->card; unsigned long flags; int err; printk(KERN_DEBUG "%s: Airport waking up\n", dev->name); pmac_call_feature(PMAC_FTR_AIRPORT_ENABLE, macio_get_of_node(mdev), 0, 1); msleep(200); enable_irq(card->irq); priv->hw.ops->lock_irqsave(&priv->lock, &flags); err = orinoco_up(priv); priv->hw.ops->unlock_irqrestore(&priv->lock, &flags); return err; } static int airport_detach(struct macio_dev *mdev) { struct orinoco_private *priv = dev_get_drvdata(&mdev->ofdev.dev); struct airport *card = priv->card; if (card->ndev_registered) orinoco_if_del(priv); card->ndev_registered = 0; if (card->irq_requested) free_irq(card->irq, priv); card->irq_requested = 0; if (card->vaddr) iounmap(card->vaddr); card->vaddr = NULL; macio_release_resource(mdev, 0); pmac_call_feature(PMAC_FTR_AIRPORT_ENABLE, macio_get_of_node(mdev), 0, 0); ssleep(1); macio_set_drvdata(mdev, NULL); free_orinocodev(priv); return 0; } static int airport_hard_reset(struct orinoco_private *priv) { /* It would be nice to power cycle the Airport for a real hard * reset, but for some reason although it appears to * re-initialize properly, it falls in a screaming heap * shortly afterwards. */ #if 0 struct airport *card = priv->card; /* Vitally important. If we don't do this it seems we get an * interrupt somewhere during the power cycle, since * hw_unavailable is already set it doesn't get ACKed, we get * into an interrupt loop and the PMU decides to turn us * off. */ disable_irq(card->irq); pmac_call_feature(PMAC_FTR_AIRPORT_ENABLE, macio_get_of_node(card->mdev), 0, 0); ssleep(1); pmac_call_feature(PMAC_FTR_AIRPORT_ENABLE, macio_get_of_node(card->mdev), 0, 1); ssleep(1); enable_irq(card->irq); ssleep(1); #endif return 0; } static int airport_attach(struct macio_dev *mdev, const struct of_device_id *match) { struct orinoco_private *priv; struct airport *card; unsigned long phys_addr; struct hermes *hw; if (macio_resource_count(mdev) < 1 || macio_irq_count(mdev) < 1) { printk(KERN_ERR PFX "Wrong interrupt/addresses in OF tree\n"); return -ENODEV; } /* Allocate space for private device-specific data */ priv = alloc_orinocodev(sizeof(*card), &mdev->ofdev.dev, airport_hard_reset, NULL); if (!priv) { printk(KERN_ERR PFX "Cannot allocate network device\n"); return -ENODEV; } card = priv->card; hw = &priv->hw; card->mdev = mdev; if (macio_request_resource(mdev, 0, DRIVER_NAME)) { printk(KERN_ERR PFX "can't request IO resource !\n"); free_orinocodev(priv); return -EBUSY; } macio_set_drvdata(mdev, priv); /* Setup interrupts & base address */ card->irq = macio_irq(mdev, 0); phys_addr = macio_resource_start(mdev, 0); /* Physical address */ printk(KERN_DEBUG PFX "Physical address %lx\n", phys_addr); card->vaddr = ioremap(phys_addr, AIRPORT_IO_LEN); if (!card->vaddr) { printk(KERN_ERR PFX "ioremap() failed\n"); goto failed; } hermes_struct_init(hw, card->vaddr, HERMES_16BIT_REGSPACING); /* Power up card */ pmac_call_feature(PMAC_FTR_AIRPORT_ENABLE, macio_get_of_node(mdev), 0, 1); ssleep(1); /* Reset it before we get the interrupt */ hw->ops->init(hw); if (request_irq(card->irq, orinoco_interrupt, 0, DRIVER_NAME, priv)) { printk(KERN_ERR PFX "Couldn't get IRQ %d\n", card->irq); goto failed; } card->irq_requested = 1; /* Initialise the main driver */ if (orinoco_init(priv) != 0) { printk(KERN_ERR PFX "orinoco_init() failed\n"); goto failed; } /* Register an interface with the stack */ if (orinoco_if_add(priv, phys_addr, card->irq, NULL) != 0) { printk(KERN_ERR PFX "orinoco_if_add() failed\n"); goto failed; } card->ndev_registered = 1; return 0; failed: airport_detach(mdev); return -ENODEV; } /* airport_attach */ static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION " (Benjamin Herrenschmidt <[email protected]>)"; MODULE_AUTHOR("Benjamin Herrenschmidt <[email protected]>"); MODULE_DESCRIPTION("Driver for the Apple Airport wireless card."); MODULE_LICENSE("Dual MPL/GPL"); static const struct of_device_id airport_match[] = { { .name = "radio", }, {}, }; MODULE_DEVICE_TABLE(of, airport_match); static struct macio_driver airport_driver = { .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, .of_match_table = airport_match, }, .probe = airport_attach, .remove = airport_detach, .suspend = airport_suspend, .resume = airport_resume, }; static int __init init_airport(void) { printk(KERN_DEBUG "%s\n", version); return macio_register_driver(&airport_driver); } static void __exit exit_airport(void) { macio_unregister_driver(&airport_driver); } module_init(init_airport); module_exit(exit_airport);
linux-master
drivers/net/wireless/intersil/orinoco/airport.c
/* main.c - (formerly known as dldwd_cs.c, orinoco_cs.c and orinoco.c) * * A driver for Hermes or Prism 2 chipset based PCMCIA wireless * adaptors, with Lucent/Agere, Intersil or Symbol firmware. * * Current maintainers (as of 29 September 2003) are: * Pavel Roskin <proski AT gnu.org> * and David Gibson <hermes AT gibson.dropbear.id.au> * * (C) Copyright David Gibson, IBM Corporation 2001-2003. * Copyright (C) 2000 David Gibson, Linuxcare Australia. * With some help from : * Copyright (C) 2001 Jean Tourrilhes, HP Labs * Copyright (C) 2001 Benjamin Herrenschmidt * * Based on dummy_cs.c 1.27 2000/06/12 21:27:25 * * Portions based on wvlan_cs.c 1.0.6, Copyright Andreas Neuhaus <andy * AT fasta.fh-dortmund.de> * http://www.stud.fh-dortmund.de/~andy/wvlan/ * * The contents of this file are subject to the Mozilla Public License * Version 1.1 (the "License"); you may not use this file except in * compliance with the License. You may obtain a copy of the License * at http://www.mozilla.org/MPL/ * * Software distributed under the License is distributed on an "AS IS" * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See * the License for the specific language governing rights and * limitations under the License. * * The initial developer of the original code is David A. Hinds * <dahinds AT users.sourceforge.net>. Portions created by David * A. Hinds are Copyright (C) 1999 David A. Hinds. All Rights * Reserved. * * Alternatively, the contents of this file may be used under the * terms of the GNU General Public License version 2 (the "GPL"), in * which case the provisions of the GPL are applicable instead of the * above. If you wish to allow the use of your version of this file * only under the terms of the GPL and not to allow others to use your * version of this file under the MPL, indicate your decision by * deleting the provisions above and replace them with the notice and * other provisions required by the GPL. If you do not delete the * provisions above, a recipient may use your version of this file * under either the MPL or the GPL. */ /* * TODO * o Handle de-encapsulation within network layer, provide 802.11 * headers (patch from Thomas 'Dent' Mirlacher) * o Fix possible races in SPY handling. * o Disconnect wireless extensions from fundamental configuration. * o (maybe) Software WEP support (patch from Stano Meduna). * o (maybe) Use multiple Tx buffers - driver handling queue * rather than firmware. */ /* Locking and synchronization: * * The basic principle is that everything is serialized through a * single spinlock, priv->lock. The lock is used in user, bh and irq * context, so when taken outside hardirq context it should always be * taken with interrupts disabled. The lock protects both the * hardware and the struct orinoco_private. * * Another flag, priv->hw_unavailable indicates that the hardware is * unavailable for an extended period of time (e.g. suspended, or in * the middle of a hard reset). This flag is protected by the * spinlock. All code which touches the hardware should check the * flag after taking the lock, and if it is set, give up on whatever * they are doing and drop the lock again. The orinoco_lock() * function handles this (it unlocks and returns -EBUSY if * hw_unavailable is non-zero). */ #define DRIVER_NAME "orinoco" #include <linux/module.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/device.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/suspend.h> #include <linux/if_arp.h> #include <linux/wireless.h> #include <linux/ieee80211.h> #include <net/iw_handler.h> #include <net/cfg80211.h> #include "hermes_rid.h" #include "hermes_dld.h" #include "hw.h" #include "scan.h" #include "mic.h" #include "fw.h" #include "wext.h" #include "cfg.h" #include "main.h" #include "orinoco.h" /********************************************************************/ /* Module information */ /********************************************************************/ MODULE_AUTHOR("Pavel Roskin <[email protected]> & " "David Gibson <[email protected]>"); MODULE_DESCRIPTION("Driver for Lucent Orinoco, Prism II based " "and similar wireless cards"); MODULE_LICENSE("Dual MPL/GPL"); /* Level of debugging. Used in the macros in orinoco.h */ #ifdef ORINOCO_DEBUG int orinoco_debug = ORINOCO_DEBUG; EXPORT_SYMBOL(orinoco_debug); module_param(orinoco_debug, int, 0644); MODULE_PARM_DESC(orinoco_debug, "Debug level"); #endif static bool suppress_linkstatus; /* = 0 */ module_param(suppress_linkstatus, bool, 0644); MODULE_PARM_DESC(suppress_linkstatus, "Don't log link status changes"); static int ignore_disconnect; /* = 0 */ module_param(ignore_disconnect, int, 0644); MODULE_PARM_DESC(ignore_disconnect, "Don't report lost link to the network layer"); int force_monitor; /* = 0 */ module_param(force_monitor, int, 0644); MODULE_PARM_DESC(force_monitor, "Allow monitor mode for all firmware versions"); /********************************************************************/ /* Internal constants */ /********************************************************************/ /* 802.2 LLC/SNAP header used for Ethernet encapsulation over 802.11 */ static const u8 encaps_hdr[] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00}; #define ENCAPS_OVERHEAD (sizeof(encaps_hdr) + 2) #define ORINOCO_MIN_MTU 256 #define ORINOCO_MAX_MTU (IEEE80211_MAX_DATA_LEN - ENCAPS_OVERHEAD) #define MAX_IRQLOOPS_PER_IRQ 10 #define MAX_IRQLOOPS_PER_JIFFY (20000 / HZ) /* Based on a guestimate of * how many events the * device could * legitimately generate */ #define DUMMY_FID 0xFFFF /*#define MAX_MULTICAST(priv) (priv->firmware_type == FIRMWARE_TYPE_AGERE ? \ HERMES_MAX_MULTICAST : 0)*/ #define MAX_MULTICAST(priv) (HERMES_MAX_MULTICAST) #define ORINOCO_INTEN (HERMES_EV_RX | HERMES_EV_ALLOC \ | HERMES_EV_TX | HERMES_EV_TXEXC \ | HERMES_EV_WTERR | HERMES_EV_INFO \ | HERMES_EV_INFDROP) /********************************************************************/ /* Data types */ /********************************************************************/ /* Beginning of the Tx descriptor, used in TxExc handling */ struct hermes_txexc_data { struct hermes_tx_descriptor desc; __le16 frame_ctl; __le16 duration_id; u8 addr1[ETH_ALEN]; } __packed; /* Rx frame header except compatibility 802.3 header */ struct hermes_rx_descriptor { /* Control */ __le16 status; __le32 time; u8 silence; u8 signal; u8 rate; u8 rxflow; __le32 reserved; /* 802.11 header */ __le16 frame_ctl; __le16 duration_id; u8 addr1[ETH_ALEN]; u8 addr2[ETH_ALEN]; u8 addr3[ETH_ALEN]; __le16 seq_ctl; u8 addr4[ETH_ALEN]; /* Data length */ __le16 data_len; } __packed; struct orinoco_rx_data { struct hermes_rx_descriptor *desc; struct sk_buff *skb; struct list_head list; }; struct orinoco_scan_data { void *buf; size_t len; int type; struct list_head list; }; /********************************************************************/ /* Function prototypes */ /********************************************************************/ static int __orinoco_set_multicast_list(struct net_device *dev); static int __orinoco_up(struct orinoco_private *priv); static int __orinoco_down(struct orinoco_private *priv); static int __orinoco_commit(struct orinoco_private *priv); /********************************************************************/ /* Internal helper functions */ /********************************************************************/ void set_port_type(struct orinoco_private *priv) { switch (priv->iw_mode) { case NL80211_IFTYPE_STATION: priv->port_type = 1; priv->createibss = 0; break; case NL80211_IFTYPE_ADHOC: if (priv->prefer_port3) { priv->port_type = 3; priv->createibss = 0; } else { priv->port_type = priv->ibss_port; priv->createibss = 1; } break; case NL80211_IFTYPE_MONITOR: priv->port_type = 3; priv->createibss = 0; break; default: printk(KERN_ERR "%s: Invalid priv->iw_mode in set_port_type()\n", priv->ndev->name); } } /********************************************************************/ /* Device methods */ /********************************************************************/ int orinoco_open(struct net_device *dev) { struct orinoco_private *priv = ndev_priv(dev); unsigned long flags; int err; if (orinoco_lock(priv, &flags) != 0) return -EBUSY; err = __orinoco_up(priv); if (!err) priv->open = 1; orinoco_unlock(priv, &flags); return err; } EXPORT_SYMBOL(orinoco_open); int orinoco_stop(struct net_device *dev) { struct orinoco_private *priv = ndev_priv(dev); int err = 0; /* We mustn't use orinoco_lock() here, because we need to be able to close the interface even if hw_unavailable is set (e.g. as we're released after a PC Card removal) */ orinoco_lock_irq(priv); priv->open = 0; err = __orinoco_down(priv); orinoco_unlock_irq(priv); return err; } EXPORT_SYMBOL(orinoco_stop); void orinoco_set_multicast_list(struct net_device *dev) { struct orinoco_private *priv = ndev_priv(dev); unsigned long flags; if (orinoco_lock(priv, &flags) != 0) { printk(KERN_DEBUG "%s: orinoco_set_multicast_list() " "called when hw_unavailable\n", dev->name); return; } __orinoco_set_multicast_list(dev); orinoco_unlock(priv, &flags); } EXPORT_SYMBOL(orinoco_set_multicast_list); int orinoco_change_mtu(struct net_device *dev, int new_mtu) { struct orinoco_private *priv = ndev_priv(dev); /* MTU + encapsulation + header length */ if ((new_mtu + ENCAPS_OVERHEAD + sizeof(struct ieee80211_hdr)) > (priv->nicbuf_size - ETH_HLEN)) return -EINVAL; dev->mtu = new_mtu; return 0; } EXPORT_SYMBOL(orinoco_change_mtu); /********************************************************************/ /* Tx path */ /********************************************************************/ /* Add encapsulation and MIC to the existing SKB. * The main xmit routine will then send the whole lot to the card. * Need 8 bytes headroom * Need 8 bytes tailroom * * With encapsulated ethernet II frame * -------- * 803.3 header (14 bytes) * dst[6] * -------- src[6] * 803.3 header (14 bytes) len[2] * dst[6] 803.2 header (8 bytes) * src[6] encaps[6] * len[2] <- leave alone -> len[2] * -------- -------- <-- 0 * Payload Payload * ... ... * * -------- -------- * MIC (8 bytes) * -------- * * returns 0 on success, -ENOMEM on error. */ int orinoco_process_xmit_skb(struct sk_buff *skb, struct net_device *dev, struct orinoco_private *priv, int *tx_control, u8 *mic_buf) { struct orinoco_tkip_key *key; struct ethhdr *eh; int do_mic; key = (struct orinoco_tkip_key *) priv->keys[priv->tx_key].key; do_mic = ((priv->encode_alg == ORINOCO_ALG_TKIP) && (key != NULL)); if (do_mic) *tx_control |= (priv->tx_key << HERMES_MIC_KEY_ID_SHIFT) | HERMES_TXCTRL_MIC; eh = (struct ethhdr *)skb->data; /* Encapsulate Ethernet-II frames */ if (ntohs(eh->h_proto) > ETH_DATA_LEN) { /* Ethernet-II frame */ struct header_struct { struct ethhdr eth; /* 802.3 header */ u8 encap[6]; /* 802.2 header */ } __packed hdr; int len = skb->len + sizeof(encaps_hdr) - (2 * ETH_ALEN); if (skb_headroom(skb) < ENCAPS_OVERHEAD) { if (net_ratelimit()) printk(KERN_ERR "%s: Not enough headroom for 802.2 headers %d\n", dev->name, skb_headroom(skb)); return -ENOMEM; } /* Fill in new header */ memcpy(&hdr.eth, eh, 2 * ETH_ALEN); hdr.eth.h_proto = htons(len); memcpy(hdr.encap, encaps_hdr, sizeof(encaps_hdr)); /* Make room for the new header, and copy it in */ eh = skb_push(skb, ENCAPS_OVERHEAD); memcpy(eh, &hdr, sizeof(hdr)); } /* Calculate Michael MIC */ if (do_mic) { size_t len = skb->len - ETH_HLEN; u8 *mic = &mic_buf[0]; /* Have to write to an even address, so copy the spare * byte across */ if (skb->len % 2) { *mic = skb->data[skb->len - 1]; mic++; } orinoco_mic(priv->tx_tfm_mic, key->tx_mic, eh->h_dest, eh->h_source, 0 /* priority */, skb->data + ETH_HLEN, len, mic); } return 0; } EXPORT_SYMBOL(orinoco_process_xmit_skb); static netdev_tx_t orinoco_xmit(struct sk_buff *skb, struct net_device *dev) { struct orinoco_private *priv = ndev_priv(dev); struct net_device_stats *stats = &dev->stats; struct hermes *hw = &priv->hw; int err = 0; u16 txfid = priv->txfid; int tx_control; unsigned long flags; u8 mic_buf[MICHAEL_MIC_LEN + 1]; if (!netif_running(dev)) { printk(KERN_ERR "%s: Tx on stopped device!\n", dev->name); return NETDEV_TX_BUSY; } if (netif_queue_stopped(dev)) { printk(KERN_DEBUG "%s: Tx while transmitter busy!\n", dev->name); return NETDEV_TX_BUSY; } if (orinoco_lock(priv, &flags) != 0) { printk(KERN_ERR "%s: orinoco_xmit() called while hw_unavailable\n", dev->name); return NETDEV_TX_BUSY; } if (!netif_carrier_ok(dev) || (priv->iw_mode == NL80211_IFTYPE_MONITOR)) { /* Oops, the firmware hasn't established a connection, silently drop the packet (this seems to be the safest approach). */ goto drop; } /* Check packet length */ if (skb->len < ETH_HLEN) goto drop; tx_control = HERMES_TXCTRL_TX_OK | HERMES_TXCTRL_TX_EX; err = orinoco_process_xmit_skb(skb, dev, priv, &tx_control, &mic_buf[0]); if (err) goto drop; if (priv->has_alt_txcntl) { /* WPA enabled firmwares have tx_cntl at the end of * the 802.11 header. So write zeroed descriptor and * 802.11 header at the same time */ char desc[HERMES_802_3_OFFSET]; __le16 *txcntl = (__le16 *) &desc[HERMES_TXCNTL2_OFFSET]; memset(&desc, 0, sizeof(desc)); *txcntl = cpu_to_le16(tx_control); err = hw->ops->bap_pwrite(hw, USER_BAP, &desc, sizeof(desc), txfid, 0); if (err) { if (net_ratelimit()) printk(KERN_ERR "%s: Error %d writing Tx " "descriptor to BAP\n", dev->name, err); goto busy; } } else { struct hermes_tx_descriptor desc; memset(&desc, 0, sizeof(desc)); desc.tx_control = cpu_to_le16(tx_control); err = hw->ops->bap_pwrite(hw, USER_BAP, &desc, sizeof(desc), txfid, 0); if (err) { if (net_ratelimit()) printk(KERN_ERR "%s: Error %d writing Tx " "descriptor to BAP\n", dev->name, err); goto busy; } /* Clear the 802.11 header and data length fields - some * firmwares (e.g. Lucent/Agere 8.xx) appear to get confused * if this isn't done. */ hermes_clear_words(hw, HERMES_DATA0, HERMES_802_3_OFFSET - HERMES_802_11_OFFSET); } err = hw->ops->bap_pwrite(hw, USER_BAP, skb->data, skb->len, txfid, HERMES_802_3_OFFSET); if (err) { printk(KERN_ERR "%s: Error %d writing packet to BAP\n", dev->name, err); goto busy; } if (tx_control & HERMES_TXCTRL_MIC) { size_t offset = HERMES_802_3_OFFSET + skb->len; size_t len = MICHAEL_MIC_LEN; if (offset % 2) { offset--; len++; } err = hw->ops->bap_pwrite(hw, USER_BAP, &mic_buf[0], len, txfid, offset); if (err) { printk(KERN_ERR "%s: Error %d writing MIC to BAP\n", dev->name, err); goto busy; } } /* Finally, we actually initiate the send */ netif_stop_queue(dev); err = hw->ops->cmd_wait(hw, HERMES_CMD_TX | HERMES_CMD_RECL, txfid, NULL); if (err) { netif_start_queue(dev); if (net_ratelimit()) printk(KERN_ERR "%s: Error %d transmitting packet\n", dev->name, err); goto busy; } stats->tx_bytes += HERMES_802_3_OFFSET + skb->len; goto ok; drop: stats->tx_errors++; stats->tx_dropped++; ok: orinoco_unlock(priv, &flags); dev_kfree_skb(skb); return NETDEV_TX_OK; busy: if (err == -EIO) schedule_work(&priv->reset_work); orinoco_unlock(priv, &flags); return NETDEV_TX_BUSY; } static void __orinoco_ev_alloc(struct net_device *dev, struct hermes *hw) { struct orinoco_private *priv = ndev_priv(dev); u16 fid = hermes_read_regn(hw, ALLOCFID); if (fid != priv->txfid) { if (fid != DUMMY_FID) printk(KERN_WARNING "%s: Allocate event on unexpected fid (%04X)\n", dev->name, fid); return; } hermes_write_regn(hw, ALLOCFID, DUMMY_FID); } static void __orinoco_ev_tx(struct net_device *dev, struct hermes *hw) { dev->stats.tx_packets++; netif_wake_queue(dev); hermes_write_regn(hw, TXCOMPLFID, DUMMY_FID); } static void __orinoco_ev_txexc(struct net_device *dev, struct hermes *hw) { struct net_device_stats *stats = &dev->stats; u16 fid = hermes_read_regn(hw, TXCOMPLFID); u16 status; struct hermes_txexc_data hdr; int err = 0; if (fid == DUMMY_FID) return; /* Nothing's really happened */ /* Read part of the frame header - we need status and addr1 */ err = hw->ops->bap_pread(hw, IRQ_BAP, &hdr, sizeof(struct hermes_txexc_data), fid, 0); hermes_write_regn(hw, TXCOMPLFID, DUMMY_FID); stats->tx_errors++; if (err) { printk(KERN_WARNING "%s: Unable to read descriptor on Tx error " "(FID=%04X error %d)\n", dev->name, fid, err); return; } DEBUG(1, "%s: Tx error, err %d (FID=%04X)\n", dev->name, err, fid); /* We produce a TXDROP event only for retry or lifetime * exceeded, because that's the only status that really mean * that this particular node went away. * Other errors means that *we* screwed up. - Jean II */ status = le16_to_cpu(hdr.desc.status); if (status & (HERMES_TXSTAT_RETRYERR | HERMES_TXSTAT_AGEDERR)) { union iwreq_data wrqu; /* Copy 802.11 dest address. * We use the 802.11 header because the frame may * not be 802.3 or may be mangled... * In Ad-Hoc mode, it will be the node address. * In managed mode, it will be most likely the AP addr * User space will figure out how to convert it to * whatever it needs (IP address or else). * - Jean II */ memcpy(wrqu.addr.sa_data, hdr.addr1, ETH_ALEN); wrqu.addr.sa_family = ARPHRD_ETHER; /* Send event to user space */ wireless_send_event(dev, IWEVTXDROP, &wrqu, NULL); } netif_wake_queue(dev); } void orinoco_tx_timeout(struct net_device *dev, unsigned int txqueue) { struct orinoco_private *priv = ndev_priv(dev); struct net_device_stats *stats = &dev->stats; struct hermes *hw = &priv->hw; printk(KERN_WARNING "%s: Tx timeout! " "ALLOCFID=%04x, TXCOMPLFID=%04x, EVSTAT=%04x\n", dev->name, hermes_read_regn(hw, ALLOCFID), hermes_read_regn(hw, TXCOMPLFID), hermes_read_regn(hw, EVSTAT)); stats->tx_errors++; schedule_work(&priv->reset_work); } EXPORT_SYMBOL(orinoco_tx_timeout); /********************************************************************/ /* Rx path (data frames) */ /********************************************************************/ /* Does the frame have a SNAP header indicating it should be * de-encapsulated to Ethernet-II? */ static inline int is_ethersnap(void *_hdr) { u8 *hdr = _hdr; /* We de-encapsulate all packets which, a) have SNAP headers * (i.e. SSAP=DSAP=0xaa and CTRL=0x3 in the 802.2 LLC header * and where b) the OUI of the SNAP header is 00:00:00 or * 00:00:f8 - we need both because different APs appear to use * different OUIs for some reason */ return (memcmp(hdr, &encaps_hdr, 5) == 0) && ((hdr[5] == 0x00) || (hdr[5] == 0xf8)); } static inline void orinoco_spy_gather(struct net_device *dev, u_char *mac, int level, int noise) { struct iw_quality wstats; wstats.level = level - 0x95; wstats.noise = noise - 0x95; wstats.qual = (level > noise) ? (level - noise) : 0; wstats.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM; /* Update spy records */ wireless_spy_update(dev, mac, &wstats); } static void orinoco_stat_gather(struct net_device *dev, struct sk_buff *skb, struct hermes_rx_descriptor *desc) { struct orinoco_private *priv = ndev_priv(dev); /* Using spy support with lots of Rx packets, like in an * infrastructure (AP), will really slow down everything, because * the MAC address must be compared to each entry of the spy list. * If the user really asks for it (set some address in the * spy list), we do it, but he will pay the price. * Note that to get here, you need both WIRELESS_SPY * compiled in AND some addresses in the list !!! */ /* Note : gcc will optimise the whole section away if * WIRELESS_SPY is not defined... - Jean II */ if (SPY_NUMBER(priv)) { orinoco_spy_gather(dev, skb_mac_header(skb) + ETH_ALEN, desc->signal, desc->silence); } } /* * orinoco_rx_monitor - handle received monitor frames. * * Arguments: * dev network device * rxfid received FID * desc rx descriptor of the frame * * Call context: interrupt */ static void orinoco_rx_monitor(struct net_device *dev, u16 rxfid, struct hermes_rx_descriptor *desc) { u32 hdrlen = 30; /* return full header by default */ u32 datalen = 0; u16 fc; int err; int len; struct sk_buff *skb; struct orinoco_private *priv = ndev_priv(dev); struct net_device_stats *stats = &dev->stats; struct hermes *hw = &priv->hw; len = le16_to_cpu(desc->data_len); /* Determine the size of the header and the data */ fc = le16_to_cpu(desc->frame_ctl); switch (fc & IEEE80211_FCTL_FTYPE) { case IEEE80211_FTYPE_DATA: if ((fc & IEEE80211_FCTL_TODS) && (fc & IEEE80211_FCTL_FROMDS)) hdrlen = 30; else hdrlen = 24; datalen = len; break; case IEEE80211_FTYPE_MGMT: hdrlen = 24; datalen = len; break; case IEEE80211_FTYPE_CTL: switch (fc & IEEE80211_FCTL_STYPE) { case IEEE80211_STYPE_PSPOLL: case IEEE80211_STYPE_RTS: case IEEE80211_STYPE_CFEND: case IEEE80211_STYPE_CFENDACK: hdrlen = 16; break; case IEEE80211_STYPE_CTS: case IEEE80211_STYPE_ACK: hdrlen = 10; break; } break; default: /* Unknown frame type */ break; } /* sanity check the length */ if (datalen > IEEE80211_MAX_DATA_LEN + 12) { printk(KERN_DEBUG "%s: oversized monitor frame, " "data length = %d\n", dev->name, datalen); stats->rx_length_errors++; goto update_stats; } skb = dev_alloc_skb(hdrlen + datalen); if (!skb) { printk(KERN_WARNING "%s: Cannot allocate skb for monitor frame\n", dev->name); goto update_stats; } /* Copy the 802.11 header to the skb */ skb_put_data(skb, &(desc->frame_ctl), hdrlen); skb_reset_mac_header(skb); /* If any, copy the data from the card to the skb */ if (datalen > 0) { err = hw->ops->bap_pread(hw, IRQ_BAP, skb_put(skb, datalen), ALIGN(datalen, 2), rxfid, HERMES_802_2_OFFSET); if (err) { printk(KERN_ERR "%s: error %d reading monitor frame\n", dev->name, err); goto drop; } } skb->dev = dev; skb->ip_summed = CHECKSUM_NONE; skb->pkt_type = PACKET_OTHERHOST; skb->protocol = cpu_to_be16(ETH_P_802_2); stats->rx_packets++; stats->rx_bytes += skb->len; netif_rx(skb); return; drop: dev_kfree_skb_irq(skb); update_stats: stats->rx_errors++; stats->rx_dropped++; } void __orinoco_ev_rx(struct net_device *dev, struct hermes *hw) { struct orinoco_private *priv = ndev_priv(dev); struct net_device_stats *stats = &dev->stats; struct iw_statistics *wstats = &priv->wstats; struct sk_buff *skb = NULL; u16 rxfid, status; int length; struct hermes_rx_descriptor *desc; struct orinoco_rx_data *rx_data; int err; desc = kmalloc(sizeof(*desc), GFP_ATOMIC); if (!desc) goto update_stats; rxfid = hermes_read_regn(hw, RXFID); err = hw->ops->bap_pread(hw, IRQ_BAP, desc, sizeof(*desc), rxfid, 0); if (err) { printk(KERN_ERR "%s: error %d reading Rx descriptor. " "Frame dropped.\n", dev->name, err); goto update_stats; } status = le16_to_cpu(desc->status); if (status & HERMES_RXSTAT_BADCRC) { DEBUG(1, "%s: Bad CRC on Rx. Frame dropped.\n", dev->name); stats->rx_crc_errors++; goto update_stats; } /* Handle frames in monitor mode */ if (priv->iw_mode == NL80211_IFTYPE_MONITOR) { orinoco_rx_monitor(dev, rxfid, desc); goto out; } if (status & HERMES_RXSTAT_UNDECRYPTABLE) { DEBUG(1, "%s: Undecryptable frame on Rx. Frame dropped.\n", dev->name); wstats->discard.code++; goto update_stats; } length = le16_to_cpu(desc->data_len); /* Sanity checks */ if (length < 3) { /* No for even an 802.2 LLC header */ /* At least on Symbol firmware with PCF we get quite a lot of these legitimately - Poll frames with no data. */ goto out; } if (length > IEEE80211_MAX_DATA_LEN) { printk(KERN_WARNING "%s: Oversized frame received (%d bytes)\n", dev->name, length); stats->rx_length_errors++; goto update_stats; } /* Payload size does not include Michael MIC. Increase payload * size to read it together with the data. */ if (status & HERMES_RXSTAT_MIC) length += MICHAEL_MIC_LEN; /* We need space for the packet data itself, plus an ethernet header, plus 2 bytes so we can align the IP header on a 32bit boundary, plus 1 byte so we can read in odd length packets from the card, which has an IO granularity of 16 bits */ skb = dev_alloc_skb(length + ETH_HLEN + 2 + 1); if (!skb) { printk(KERN_WARNING "%s: Can't allocate skb for Rx\n", dev->name); goto update_stats; } /* We'll prepend the header, so reserve space for it. The worst case is no decapsulation, when 802.3 header is prepended and nothing is removed. 2 is for aligning the IP header. */ skb_reserve(skb, ETH_HLEN + 2); err = hw->ops->bap_pread(hw, IRQ_BAP, skb_put(skb, length), ALIGN(length, 2), rxfid, HERMES_802_2_OFFSET); if (err) { printk(KERN_ERR "%s: error %d reading frame. " "Frame dropped.\n", dev->name, err); goto drop; } /* Add desc and skb to rx queue */ rx_data = kzalloc(sizeof(*rx_data), GFP_ATOMIC); if (!rx_data) goto drop; rx_data->desc = desc; rx_data->skb = skb; list_add_tail(&rx_data->list, &priv->rx_list); tasklet_schedule(&priv->rx_tasklet); return; drop: dev_kfree_skb_irq(skb); update_stats: stats->rx_errors++; stats->rx_dropped++; out: kfree(desc); } EXPORT_SYMBOL(__orinoco_ev_rx); static void orinoco_rx(struct net_device *dev, struct hermes_rx_descriptor *desc, struct sk_buff *skb) { struct orinoco_private *priv = ndev_priv(dev); struct net_device_stats *stats = &dev->stats; u16 status, fc; int length; struct ethhdr *hdr; status = le16_to_cpu(desc->status); length = le16_to_cpu(desc->data_len); fc = le16_to_cpu(desc->frame_ctl); /* Calculate and check MIC */ if (status & HERMES_RXSTAT_MIC) { struct orinoco_tkip_key *key; int key_id = ((status & HERMES_RXSTAT_MIC_KEY_ID) >> HERMES_MIC_KEY_ID_SHIFT); u8 mic[MICHAEL_MIC_LEN]; u8 *rxmic; u8 *src = (fc & IEEE80211_FCTL_FROMDS) ? desc->addr3 : desc->addr2; /* Extract Michael MIC from payload */ rxmic = skb->data + skb->len - MICHAEL_MIC_LEN; skb_trim(skb, skb->len - MICHAEL_MIC_LEN); length -= MICHAEL_MIC_LEN; key = (struct orinoco_tkip_key *) priv->keys[key_id].key; if (!key) { printk(KERN_WARNING "%s: Received encrypted frame from " "%pM using key %i, but key is not installed\n", dev->name, src, key_id); goto drop; } orinoco_mic(priv->rx_tfm_mic, key->rx_mic, desc->addr1, src, 0, /* priority or QoS? */ skb->data, skb->len, &mic[0]); if (memcmp(mic, rxmic, MICHAEL_MIC_LEN)) { union iwreq_data wrqu; struct iw_michaelmicfailure wxmic; printk(KERN_WARNING "%s: " "Invalid Michael MIC in data frame from %pM, " "using key %i\n", dev->name, src, key_id); /* TODO: update stats */ /* Notify userspace */ memset(&wxmic, 0, sizeof(wxmic)); wxmic.flags = key_id & IW_MICFAILURE_KEY_ID; wxmic.flags |= (desc->addr1[0] & 1) ? IW_MICFAILURE_GROUP : IW_MICFAILURE_PAIRWISE; wxmic.src_addr.sa_family = ARPHRD_ETHER; memcpy(wxmic.src_addr.sa_data, src, ETH_ALEN); (void) orinoco_hw_get_tkip_iv(priv, key_id, &wxmic.tsc[0]); memset(&wrqu, 0, sizeof(wrqu)); wrqu.data.length = sizeof(wxmic); wireless_send_event(dev, IWEVMICHAELMICFAILURE, &wrqu, (char *) &wxmic); goto drop; } } /* Handle decapsulation * In most cases, the firmware tell us about SNAP frames. * For some reason, the SNAP frames sent by LinkSys APs * are not properly recognised by most firmwares. * So, check ourselves */ if (length >= ENCAPS_OVERHEAD && (((status & HERMES_RXSTAT_MSGTYPE) == HERMES_RXSTAT_1042) || ((status & HERMES_RXSTAT_MSGTYPE) == HERMES_RXSTAT_TUNNEL) || is_ethersnap(skb->data))) { /* These indicate a SNAP within 802.2 LLC within 802.11 frame which we'll need to de-encapsulate to the original EthernetII frame. */ hdr = skb_push(skb, ETH_HLEN - ENCAPS_OVERHEAD); } else { /* 802.3 frame - prepend 802.3 header as is */ hdr = skb_push(skb, ETH_HLEN); hdr->h_proto = htons(length); } memcpy(hdr->h_dest, desc->addr1, ETH_ALEN); if (fc & IEEE80211_FCTL_FROMDS) memcpy(hdr->h_source, desc->addr3, ETH_ALEN); else memcpy(hdr->h_source, desc->addr2, ETH_ALEN); skb->protocol = eth_type_trans(skb, dev); skb->ip_summed = CHECKSUM_NONE; if (fc & IEEE80211_FCTL_TODS) skb->pkt_type = PACKET_OTHERHOST; /* Process the wireless stats if needed */ orinoco_stat_gather(dev, skb, desc); /* Pass the packet to the networking stack */ netif_rx(skb); stats->rx_packets++; stats->rx_bytes += length; return; drop: dev_kfree_skb(skb); stats->rx_errors++; stats->rx_dropped++; } static void orinoco_rx_isr_tasklet(struct tasklet_struct *t) { struct orinoco_private *priv = from_tasklet(priv, t, rx_tasklet); struct net_device *dev = priv->ndev; struct orinoco_rx_data *rx_data, *temp; struct hermes_rx_descriptor *desc; struct sk_buff *skb; unsigned long flags; /* orinoco_rx requires the driver lock, and we also need to * protect priv->rx_list, so just hold the lock over the * lot. * * If orinoco_lock fails, we've unplugged the card. In this * case just abort. */ if (orinoco_lock(priv, &flags) != 0) return; /* extract desc and skb from queue */ list_for_each_entry_safe(rx_data, temp, &priv->rx_list, list) { desc = rx_data->desc; skb = rx_data->skb; list_del(&rx_data->list); kfree(rx_data); orinoco_rx(dev, desc, skb); kfree(desc); } orinoco_unlock(priv, &flags); } /********************************************************************/ /* Rx path (info frames) */ /********************************************************************/ static void print_linkstatus(struct net_device *dev, u16 status) { char *s; if (suppress_linkstatus) return; switch (status) { case HERMES_LINKSTATUS_NOT_CONNECTED: s = "Not Connected"; break; case HERMES_LINKSTATUS_CONNECTED: s = "Connected"; break; case HERMES_LINKSTATUS_DISCONNECTED: s = "Disconnected"; break; case HERMES_LINKSTATUS_AP_CHANGE: s = "AP Changed"; break; case HERMES_LINKSTATUS_AP_OUT_OF_RANGE: s = "AP Out of Range"; break; case HERMES_LINKSTATUS_AP_IN_RANGE: s = "AP In Range"; break; case HERMES_LINKSTATUS_ASSOC_FAILED: s = "Association Failed"; break; default: s = "UNKNOWN"; } printk(KERN_DEBUG "%s: New link status: %s (%04x)\n", dev->name, s, status); } /* Search scan results for requested BSSID, join it if found */ static void orinoco_join_ap(struct work_struct *work) { struct orinoco_private *priv = container_of(work, struct orinoco_private, join_work); struct net_device *dev = priv->ndev; struct hermes *hw = &priv->hw; int err; unsigned long flags; struct join_req { u8 bssid[ETH_ALEN]; __le16 channel; } __packed req; const int atom_len = offsetof(struct prism2_scan_apinfo, atim); struct prism2_scan_apinfo *atom = NULL; int offset = 4; int found = 0; u8 *buf; u16 len; /* Allocate buffer for scan results */ buf = kmalloc(MAX_SCAN_LEN, GFP_KERNEL); if (!buf) return; if (orinoco_lock(priv, &flags) != 0) goto fail_lock; /* Sanity checks in case user changed something in the meantime */ if (!priv->bssid_fixed) goto out; if (strlen(priv->desired_essid) == 0) goto out; /* Read scan results from the firmware */ err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_SCANRESULTSTABLE, MAX_SCAN_LEN, &len, buf); if (err) { printk(KERN_ERR "%s: Cannot read scan results\n", dev->name); goto out; } len = HERMES_RECLEN_TO_BYTES(len); /* Go through the scan results looking for the channel of the AP * we were requested to join */ for (; offset + atom_len <= len; offset += atom_len) { atom = (struct prism2_scan_apinfo *) (buf + offset); if (memcmp(&atom->bssid, priv->desired_bssid, ETH_ALEN) == 0) { found = 1; break; } } if (!found) { DEBUG(1, "%s: Requested AP not found in scan results\n", dev->name); goto out; } memcpy(req.bssid, priv->desired_bssid, ETH_ALEN); req.channel = atom->channel; /* both are little-endian */ err = HERMES_WRITE_RECORD(hw, USER_BAP, HERMES_RID_CNFJOINREQUEST, &req); if (err) printk(KERN_ERR "%s: Error issuing join request\n", dev->name); out: orinoco_unlock(priv, &flags); fail_lock: kfree(buf); } /* Send new BSSID to userspace */ static void orinoco_send_bssid_wevent(struct orinoco_private *priv) { struct net_device *dev = priv->ndev; struct hermes *hw = &priv->hw; union iwreq_data wrqu; int err; err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_CURRENTBSSID, ETH_ALEN, NULL, wrqu.ap_addr.sa_data); if (err != 0) return; wrqu.ap_addr.sa_family = ARPHRD_ETHER; /* Send event to user space */ wireless_send_event(dev, SIOCGIWAP, &wrqu, NULL); } static void orinoco_send_assocreqie_wevent(struct orinoco_private *priv) { struct net_device *dev = priv->ndev; struct hermes *hw = &priv->hw; union iwreq_data wrqu; int err; u8 buf[88]; u8 *ie; if (!priv->has_wpa) return; err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_CURRENT_ASSOC_REQ_INFO, sizeof(buf), NULL, &buf); if (err != 0) return; ie = orinoco_get_wpa_ie(buf, sizeof(buf)); if (ie) { int rem = sizeof(buf) - (ie - &buf[0]); wrqu.data.length = ie[1] + 2; if (wrqu.data.length > rem) wrqu.data.length = rem; if (wrqu.data.length) /* Send event to user space */ wireless_send_event(dev, IWEVASSOCREQIE, &wrqu, ie); } } static void orinoco_send_assocrespie_wevent(struct orinoco_private *priv) { struct net_device *dev = priv->ndev; struct hermes *hw = &priv->hw; union iwreq_data wrqu; int err; u8 buf[88]; /* TODO: verify max size or IW_GENERIC_IE_MAX */ u8 *ie; if (!priv->has_wpa) return; err = hw->ops->read_ltv(hw, USER_BAP, HERMES_RID_CURRENT_ASSOC_RESP_INFO, sizeof(buf), NULL, &buf); if (err != 0) return; ie = orinoco_get_wpa_ie(buf, sizeof(buf)); if (ie) { int rem = sizeof(buf) - (ie - &buf[0]); wrqu.data.length = ie[1] + 2; if (wrqu.data.length > rem) wrqu.data.length = rem; if (wrqu.data.length) /* Send event to user space */ wireless_send_event(dev, IWEVASSOCRESPIE, &wrqu, ie); } } static void orinoco_send_wevents(struct work_struct *work) { struct orinoco_private *priv = container_of(work, struct orinoco_private, wevent_work); unsigned long flags; if (orinoco_lock(priv, &flags) != 0) return; orinoco_send_assocreqie_wevent(priv); orinoco_send_assocrespie_wevent(priv); orinoco_send_bssid_wevent(priv); orinoco_unlock(priv, &flags); } static void qbuf_scan(struct orinoco_private *priv, void *buf, int len, int type) { struct orinoco_scan_data *sd; unsigned long flags; sd = kmalloc(sizeof(*sd), GFP_ATOMIC); if (!sd) return; sd->buf = buf; sd->len = len; sd->type = type; spin_lock_irqsave(&priv->scan_lock, flags); list_add_tail(&sd->list, &priv->scan_list); spin_unlock_irqrestore(&priv->scan_lock, flags); schedule_work(&priv->process_scan); } static void qabort_scan(struct orinoco_private *priv) { struct orinoco_scan_data *sd; unsigned long flags; sd = kmalloc(sizeof(*sd), GFP_ATOMIC); if (!sd) return; sd->len = -1; /* Abort */ spin_lock_irqsave(&priv->scan_lock, flags); list_add_tail(&sd->list, &priv->scan_list); spin_unlock_irqrestore(&priv->scan_lock, flags); schedule_work(&priv->process_scan); } static void orinoco_process_scan_results(struct work_struct *work) { struct orinoco_private *priv = container_of(work, struct orinoco_private, process_scan); struct orinoco_scan_data *sd, *temp; unsigned long flags; void *buf; int len; int type; spin_lock_irqsave(&priv->scan_lock, flags); list_for_each_entry_safe(sd, temp, &priv->scan_list, list) { buf = sd->buf; len = sd->len; type = sd->type; list_del(&sd->list); spin_unlock_irqrestore(&priv->scan_lock, flags); kfree(sd); if (len > 0) { if (type == HERMES_INQ_CHANNELINFO) orinoco_add_extscan_result(priv, buf, len); else orinoco_add_hostscan_results(priv, buf, len); kfree(buf); } else { /* Either abort or complete the scan */ orinoco_scan_done(priv, (len < 0)); } spin_lock_irqsave(&priv->scan_lock, flags); } spin_unlock_irqrestore(&priv->scan_lock, flags); } void __orinoco_ev_info(struct net_device *dev, struct hermes *hw) { struct orinoco_private *priv = ndev_priv(dev); u16 infofid; struct { __le16 len; __le16 type; } __packed info; int len, type; int err; /* This is an answer to an INQUIRE command that we did earlier, * or an information "event" generated by the card * The controller return to us a pseudo frame containing * the information in question - Jean II */ infofid = hermes_read_regn(hw, INFOFID); /* Read the info frame header - don't try too hard */ err = hw->ops->bap_pread(hw, IRQ_BAP, &info, sizeof(info), infofid, 0); if (err) { printk(KERN_ERR "%s: error %d reading info frame. " "Frame dropped.\n", dev->name, err); return; } len = HERMES_RECLEN_TO_BYTES(le16_to_cpu(info.len)); type = le16_to_cpu(info.type); switch (type) { case HERMES_INQ_TALLIES: { struct hermes_tallies_frame tallies; struct iw_statistics *wstats = &priv->wstats; if (len > sizeof(tallies)) { printk(KERN_WARNING "%s: Tallies frame too long (%d bytes)\n", dev->name, len); len = sizeof(tallies); } err = hw->ops->bap_pread(hw, IRQ_BAP, &tallies, len, infofid, sizeof(info)); if (err) break; /* Increment our various counters */ /* wstats->discard.nwid - no wrong BSSID stuff */ wstats->discard.code += le16_to_cpu(tallies.RxWEPUndecryptable); if (len == sizeof(tallies)) wstats->discard.code += le16_to_cpu(tallies.RxDiscards_WEPICVError) + le16_to_cpu(tallies.RxDiscards_WEPExcluded); wstats->discard.misc += le16_to_cpu(tallies.TxDiscardsWrongSA); wstats->discard.fragment += le16_to_cpu(tallies.RxMsgInBadMsgFragments); wstats->discard.retries += le16_to_cpu(tallies.TxRetryLimitExceeded); /* wstats->miss.beacon - no match */ } break; case HERMES_INQ_LINKSTATUS: { struct hermes_linkstatus linkstatus; u16 newstatus; int connected; if (priv->iw_mode == NL80211_IFTYPE_MONITOR) break; if (len != sizeof(linkstatus)) { printk(KERN_WARNING "%s: Unexpected size for linkstatus frame (%d bytes)\n", dev->name, len); break; } err = hw->ops->bap_pread(hw, IRQ_BAP, &linkstatus, len, infofid, sizeof(info)); if (err) break; newstatus = le16_to_cpu(linkstatus.linkstatus); /* Symbol firmware uses "out of range" to signal that * the hostscan frame can be requested. */ if (newstatus == HERMES_LINKSTATUS_AP_OUT_OF_RANGE && priv->firmware_type == FIRMWARE_TYPE_SYMBOL && priv->has_hostscan && priv->scan_request) { hermes_inquire(hw, HERMES_INQ_HOSTSCAN_SYMBOL); break; } connected = (newstatus == HERMES_LINKSTATUS_CONNECTED) || (newstatus == HERMES_LINKSTATUS_AP_CHANGE) || (newstatus == HERMES_LINKSTATUS_AP_IN_RANGE); if (connected) netif_carrier_on(dev); else if (!ignore_disconnect) netif_carrier_off(dev); if (newstatus != priv->last_linkstatus) { priv->last_linkstatus = newstatus; print_linkstatus(dev, newstatus); /* The info frame contains only one word which is the * status (see hermes.h). The status is pretty boring * in itself, that's why we export the new BSSID... * Jean II */ schedule_work(&priv->wevent_work); } } break; case HERMES_INQ_SCAN: if (!priv->scan_request && priv->bssid_fixed && priv->firmware_type == FIRMWARE_TYPE_INTERSIL) { schedule_work(&priv->join_work); break; } fallthrough; case HERMES_INQ_HOSTSCAN: case HERMES_INQ_HOSTSCAN_SYMBOL: { /* Result of a scanning. Contains information about * cells in the vicinity - Jean II */ unsigned char *buf; /* Sanity check */ if (len > 4096) { printk(KERN_WARNING "%s: Scan results too large (%d bytes)\n", dev->name, len); qabort_scan(priv); break; } /* Allocate buffer for results */ buf = kmalloc(len, GFP_ATOMIC); if (buf == NULL) { /* No memory, so can't printk()... */ qabort_scan(priv); break; } /* Read scan data */ err = hw->ops->bap_pread(hw, IRQ_BAP, (void *) buf, len, infofid, sizeof(info)); if (err) { kfree(buf); qabort_scan(priv); break; } #ifdef ORINOCO_DEBUG { int i; printk(KERN_DEBUG "Scan result [%02X", buf[0]); for (i = 1; i < (len * 2); i++) printk(":%02X", buf[i]); printk("]\n"); } #endif /* ORINOCO_DEBUG */ qbuf_scan(priv, buf, len, type); } break; case HERMES_INQ_CHANNELINFO: { struct agere_ext_scan_info *bss; if (!priv->scan_request) { printk(KERN_DEBUG "%s: Got chaninfo without scan, " "len=%d\n", dev->name, len); break; } /* An empty result indicates that the scan is complete */ if (len == 0) { qbuf_scan(priv, NULL, len, type); break; } /* Sanity check */ else if (len < (offsetof(struct agere_ext_scan_info, data) + 2)) { /* Drop this result now so we don't have to * keep checking later */ printk(KERN_WARNING "%s: Ext scan results too short (%d bytes)\n", dev->name, len); break; } bss = kmalloc(len, GFP_ATOMIC); if (bss == NULL) break; /* Read scan data */ err = hw->ops->bap_pread(hw, IRQ_BAP, (void *) bss, len, infofid, sizeof(info)); if (err) kfree(bss); else qbuf_scan(priv, bss, len, type); break; } case HERMES_INQ_SEC_STAT_AGERE: /* Security status (Agere specific) */ /* Ignore this frame for now */ if (priv->firmware_type == FIRMWARE_TYPE_AGERE) break; fallthrough; default: printk(KERN_DEBUG "%s: Unknown information frame received: " "type 0x%04x, length %d\n", dev->name, type, len); /* We don't actually do anything about it */ break; } } EXPORT_SYMBOL(__orinoco_ev_info); static void __orinoco_ev_infdrop(struct net_device *dev, struct hermes *hw) { if (net_ratelimit()) printk(KERN_DEBUG "%s: Information frame lost.\n", dev->name); } /********************************************************************/ /* Internal hardware control routines */ /********************************************************************/ static int __orinoco_up(struct orinoco_private *priv) { struct net_device *dev = priv->ndev; struct hermes *hw = &priv->hw; int err; netif_carrier_off(dev); /* just to make sure */ err = __orinoco_commit(priv); if (err) { printk(KERN_ERR "%s: Error %d configuring card\n", dev->name, err); return err; } /* Fire things up again */ hermes_set_irqmask(hw, ORINOCO_INTEN); err = hermes_enable_port(hw, 0); if (err) { printk(KERN_ERR "%s: Error %d enabling MAC port\n", dev->name, err); return err; } netif_start_queue(dev); return 0; } static int __orinoco_down(struct orinoco_private *priv) { struct net_device *dev = priv->ndev; struct hermes *hw = &priv->hw; int err; netif_stop_queue(dev); if (!priv->hw_unavailable) { if (!priv->broken_disableport) { err = hermes_disable_port(hw, 0); if (err) { /* Some firmwares (e.g. Intersil 1.3.x) seem * to have problems disabling the port, oh * well, too bad. */ printk(KERN_WARNING "%s: Error %d disabling MAC port\n", dev->name, err); priv->broken_disableport = 1; } } hermes_set_irqmask(hw, 0); hermes_write_regn(hw, EVACK, 0xffff); } orinoco_scan_done(priv, true); /* firmware will have to reassociate */ netif_carrier_off(dev); priv->last_linkstatus = 0xffff; return 0; } static int orinoco_reinit_firmware(struct orinoco_private *priv) { struct hermes *hw = &priv->hw; int err; err = hw->ops->init(hw); if (priv->do_fw_download && !err) { err = orinoco_download(priv); if (err) priv->do_fw_download = 0; } if (!err) err = orinoco_hw_allocate_fid(priv); return err; } static int __orinoco_set_multicast_list(struct net_device *dev) { struct orinoco_private *priv = ndev_priv(dev); int err = 0; int promisc, mc_count; /* The Hermes doesn't seem to have an allmulti mode, so we go * into promiscuous mode and let the upper levels deal. */ if ((dev->flags & IFF_PROMISC) || (dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > MAX_MULTICAST(priv))) { promisc = 1; mc_count = 0; } else { promisc = 0; mc_count = netdev_mc_count(dev); } err = __orinoco_hw_set_multicast_list(priv, dev, mc_count, promisc); return err; } /* This must be called from user context, without locks held - use * schedule_work() */ void orinoco_reset(struct work_struct *work) { struct orinoco_private *priv = container_of(work, struct orinoco_private, reset_work); struct net_device *dev = priv->ndev; struct hermes *hw = &priv->hw; int err; unsigned long flags; if (orinoco_lock(priv, &flags) != 0) /* When the hardware becomes available again, whatever * detects that is responsible for re-initializing * it. So no need for anything further */ return; netif_stop_queue(dev); /* Shut off interrupts. Depending on what state the hardware * is in, this might not work, but we'll try anyway */ hermes_set_irqmask(hw, 0); hermes_write_regn(hw, EVACK, 0xffff); priv->hw_unavailable++; priv->last_linkstatus = 0xffff; /* firmware will have to reassociate */ netif_carrier_off(dev); orinoco_unlock(priv, &flags); /* Scanning support: Notify scan cancellation */ orinoco_scan_done(priv, true); if (priv->hard_reset) { err = (*priv->hard_reset)(priv); if (err) { printk(KERN_ERR "%s: orinoco_reset: Error %d " "performing hard reset\n", dev->name, err); goto disable; } } err = orinoco_reinit_firmware(priv); if (err) { printk(KERN_ERR "%s: orinoco_reset: Error %d re-initializing firmware\n", dev->name, err); goto disable; } /* This has to be called from user context */ orinoco_lock_irq(priv); priv->hw_unavailable--; /* priv->open or priv->hw_unavailable might have changed while * we dropped the lock */ if (priv->open && (!priv->hw_unavailable)) { err = __orinoco_up(priv); if (err) { printk(KERN_ERR "%s: orinoco_reset: Error %d reenabling card\n", dev->name, err); } else netif_trans_update(dev); } orinoco_unlock_irq(priv); return; disable: hermes_set_irqmask(hw, 0); netif_device_detach(dev); printk(KERN_ERR "%s: Device has been disabled!\n", dev->name); } static int __orinoco_commit(struct orinoco_private *priv) { struct net_device *dev = priv->ndev; int err = 0; /* If we've called commit, we are reconfiguring or bringing the * interface up. Maintaining countermeasures across this would * be confusing, so note that we've disabled them. The port will * be enabled later in orinoco_commit or __orinoco_up. */ priv->tkip_cm_active = 0; err = orinoco_hw_program_rids(priv); /* FIXME: what about netif_tx_lock */ (void) __orinoco_set_multicast_list(dev); return err; } /* Ensures configuration changes are applied. May result in a reset. * The caller should hold priv->lock */ int orinoco_commit(struct orinoco_private *priv) { struct net_device *dev = priv->ndev; struct hermes *hw = &priv->hw; int err; if (priv->broken_disableport) { schedule_work(&priv->reset_work); return 0; } err = hermes_disable_port(hw, 0); if (err) { printk(KERN_WARNING "%s: Unable to disable port " "while reconfiguring card\n", dev->name); priv->broken_disableport = 1; goto out; } err = __orinoco_commit(priv); if (err) { printk(KERN_WARNING "%s: Unable to reconfigure card\n", dev->name); goto out; } err = hermes_enable_port(hw, 0); if (err) { printk(KERN_WARNING "%s: Unable to enable port while reconfiguring card\n", dev->name); goto out; } out: if (err) { printk(KERN_WARNING "%s: Resetting instead...\n", dev->name); schedule_work(&priv->reset_work); err = 0; } return err; } /********************************************************************/ /* Interrupt handler */ /********************************************************************/ static void __orinoco_ev_tick(struct net_device *dev, struct hermes *hw) { printk(KERN_DEBUG "%s: TICK\n", dev->name); } static void __orinoco_ev_wterr(struct net_device *dev, struct hermes *hw) { /* This seems to happen a fair bit under load, but ignoring it seems to work fine...*/ printk(KERN_DEBUG "%s: MAC controller error (WTERR). Ignoring.\n", dev->name); } irqreturn_t orinoco_interrupt(int irq, void *dev_id) { struct orinoco_private *priv = dev_id; struct net_device *dev = priv->ndev; struct hermes *hw = &priv->hw; int count = MAX_IRQLOOPS_PER_IRQ; u16 evstat, events; /* These are used to detect a runaway interrupt situation. * * If we get more than MAX_IRQLOOPS_PER_JIFFY iterations in a jiffy, * we panic and shut down the hardware */ /* jiffies value the last time we were called */ static int last_irq_jiffy; /* = 0 */ static int loops_this_jiffy; /* = 0 */ unsigned long flags; if (orinoco_lock(priv, &flags) != 0) { /* If hw is unavailable - we don't know if the irq was * for us or not */ return IRQ_HANDLED; } evstat = hermes_read_regn(hw, EVSTAT); events = evstat & hw->inten; if (!events) { orinoco_unlock(priv, &flags); return IRQ_NONE; } if (jiffies != last_irq_jiffy) loops_this_jiffy = 0; last_irq_jiffy = jiffies; while (events && count--) { if (++loops_this_jiffy > MAX_IRQLOOPS_PER_JIFFY) { printk(KERN_WARNING "%s: IRQ handler is looping too " "much! Resetting.\n", dev->name); /* Disable interrupts for now */ hermes_set_irqmask(hw, 0); schedule_work(&priv->reset_work); break; } /* Check the card hasn't been removed */ if (!hermes_present(hw)) { DEBUG(0, "orinoco_interrupt(): card removed\n"); break; } if (events & HERMES_EV_TICK) __orinoco_ev_tick(dev, hw); if (events & HERMES_EV_WTERR) __orinoco_ev_wterr(dev, hw); if (events & HERMES_EV_INFDROP) __orinoco_ev_infdrop(dev, hw); if (events & HERMES_EV_INFO) __orinoco_ev_info(dev, hw); if (events & HERMES_EV_RX) __orinoco_ev_rx(dev, hw); if (events & HERMES_EV_TXEXC) __orinoco_ev_txexc(dev, hw); if (events & HERMES_EV_TX) __orinoco_ev_tx(dev, hw); if (events & HERMES_EV_ALLOC) __orinoco_ev_alloc(dev, hw); hermes_write_regn(hw, EVACK, evstat); evstat = hermes_read_regn(hw, EVSTAT); events = evstat & hw->inten; } orinoco_unlock(priv, &flags); return IRQ_HANDLED; } EXPORT_SYMBOL(orinoco_interrupt); /********************************************************************/ /* Power management */ /********************************************************************/ #if defined(CONFIG_PM_SLEEP) && !defined(CONFIG_HERMES_CACHE_FW_ON_INIT) static int orinoco_pm_notifier(struct notifier_block *notifier, unsigned long pm_event, void *unused) { struct orinoco_private *priv = container_of(notifier, struct orinoco_private, pm_notifier); /* All we need to do is cache the firmware before suspend, and * release it when we come out. * * Only need to do this if we're downloading firmware. */ if (!priv->do_fw_download) return NOTIFY_DONE; switch (pm_event) { case PM_HIBERNATION_PREPARE: case PM_SUSPEND_PREPARE: orinoco_cache_fw(priv, 0); break; case PM_POST_RESTORE: /* Restore from hibernation failed. We need to clean * up in exactly the same way, so fall through. */ case PM_POST_HIBERNATION: case PM_POST_SUSPEND: orinoco_uncache_fw(priv); break; case PM_RESTORE_PREPARE: default: break; } return NOTIFY_DONE; } static void orinoco_register_pm_notifier(struct orinoco_private *priv) { priv->pm_notifier.notifier_call = orinoco_pm_notifier; register_pm_notifier(&priv->pm_notifier); } static void orinoco_unregister_pm_notifier(struct orinoco_private *priv) { unregister_pm_notifier(&priv->pm_notifier); } #else /* !PM_SLEEP || HERMES_CACHE_FW_ON_INIT */ #define orinoco_register_pm_notifier(priv) do { } while (0) #define orinoco_unregister_pm_notifier(priv) do { } while (0) #endif /********************************************************************/ /* Initialization */ /********************************************************************/ int orinoco_init(struct orinoco_private *priv) { struct device *dev = priv->dev; struct wiphy *wiphy = priv_to_wiphy(priv); struct hermes *hw = &priv->hw; int err = 0; /* No need to lock, the hw_unavailable flag is already set in * alloc_orinocodev() */ priv->nicbuf_size = IEEE80211_MAX_FRAME_LEN + ETH_HLEN; /* Initialize the firmware */ err = hw->ops->init(hw); if (err != 0) { dev_err(dev, "Failed to initialize firmware (err = %d)\n", err); goto out; } err = determine_fw_capabilities(priv, wiphy->fw_version, sizeof(wiphy->fw_version), &wiphy->hw_version); if (err != 0) { dev_err(dev, "Incompatible firmware, aborting\n"); goto out; } if (priv->do_fw_download) { #ifdef CONFIG_HERMES_CACHE_FW_ON_INIT orinoco_cache_fw(priv, 0); #endif err = orinoco_download(priv); if (err) priv->do_fw_download = 0; /* Check firmware version again */ err = determine_fw_capabilities(priv, wiphy->fw_version, sizeof(wiphy->fw_version), &wiphy->hw_version); if (err != 0) { dev_err(dev, "Incompatible firmware, aborting\n"); goto out; } } if (priv->has_port3) dev_info(dev, "Ad-hoc demo mode supported\n"); if (priv->has_ibss) dev_info(dev, "IEEE standard IBSS ad-hoc mode supported\n"); if (priv->has_wep) dev_info(dev, "WEP supported, %s-bit key\n", priv->has_big_wep ? "104" : "40"); if (priv->has_wpa) { dev_info(dev, "WPA-PSK supported\n"); if (orinoco_mic_init(priv)) { dev_err(dev, "Failed to setup MIC crypto algorithm. " "Disabling WPA support\n"); priv->has_wpa = 0; } } err = orinoco_hw_read_card_settings(priv, wiphy->perm_addr); if (err) goto out; err = orinoco_hw_allocate_fid(priv); if (err) { dev_err(dev, "Failed to allocate NIC buffer!\n"); goto out; } /* Set up the default configuration */ priv->iw_mode = NL80211_IFTYPE_STATION; /* By default use IEEE/IBSS ad-hoc mode if we have it */ priv->prefer_port3 = priv->has_port3 && (!priv->has_ibss); set_port_type(priv); priv->channel = 0; /* use firmware default */ priv->promiscuous = 0; priv->encode_alg = ORINOCO_ALG_NONE; priv->tx_key = 0; priv->wpa_enabled = 0; priv->tkip_cm_active = 0; priv->key_mgmt = 0; priv->wpa_ie_len = 0; priv->wpa_ie = NULL; if (orinoco_wiphy_register(wiphy)) { err = -ENODEV; goto out; } /* Make the hardware available, as long as it hasn't been * removed elsewhere (e.g. by PCMCIA hot unplug) */ orinoco_lock_irq(priv); priv->hw_unavailable--; orinoco_unlock_irq(priv); dev_dbg(dev, "Ready\n"); out: return err; } EXPORT_SYMBOL(orinoco_init); static const struct net_device_ops orinoco_netdev_ops = { .ndo_open = orinoco_open, .ndo_stop = orinoco_stop, .ndo_start_xmit = orinoco_xmit, .ndo_set_rx_mode = orinoco_set_multicast_list, .ndo_change_mtu = orinoco_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, .ndo_tx_timeout = orinoco_tx_timeout, }; /* Allocate private data. * * This driver has a number of structures associated with it * netdev - Net device structure for each network interface * wiphy - structure associated with wireless phy * wireless_dev (wdev) - structure for each wireless interface * hw - structure for hermes chip info * card - card specific structure for use by the card driver * (airport, orinoco_cs) * priv - orinoco private data * device - generic linux device structure * * +---------+ +---------+ * | wiphy | | netdev | * | +-------+ | +-------+ * | | priv | | | wdev | * | | +-----+ +-+-------+ * | | | hw | * | +-+-----+ * | | card | * +-+-------+ * * priv has a link to netdev and device * wdev has a link to wiphy */ struct orinoco_private *alloc_orinocodev(int sizeof_card, struct device *device, int (*hard_reset)(struct orinoco_private *), int (*stop_fw)(struct orinoco_private *, int)) { struct orinoco_private *priv; struct wiphy *wiphy; /* allocate wiphy * NOTE: We only support a single virtual interface * but this may change when monitor mode is added */ wiphy = wiphy_new(&orinoco_cfg_ops, sizeof(struct orinoco_private) + sizeof_card); if (!wiphy) return NULL; priv = wiphy_priv(wiphy); priv->dev = device; if (sizeof_card) priv->card = (void *)((unsigned long)priv + sizeof(struct orinoco_private)); else priv->card = NULL; orinoco_wiphy_init(wiphy); #ifdef WIRELESS_SPY priv->wireless_data.spy_data = &priv->spy_data; #endif /* Set up default callbacks */ priv->hard_reset = hard_reset; priv->stop_fw = stop_fw; spin_lock_init(&priv->lock); priv->open = 0; priv->hw_unavailable = 1; /* orinoco_init() must clear this * before anything else touches the * hardware */ INIT_WORK(&priv->reset_work, orinoco_reset); INIT_WORK(&priv->join_work, orinoco_join_ap); INIT_WORK(&priv->wevent_work, orinoco_send_wevents); INIT_LIST_HEAD(&priv->rx_list); tasklet_setup(&priv->rx_tasklet, orinoco_rx_isr_tasklet); spin_lock_init(&priv->scan_lock); INIT_LIST_HEAD(&priv->scan_list); INIT_WORK(&priv->process_scan, orinoco_process_scan_results); priv->last_linkstatus = 0xffff; #if defined(CONFIG_HERMES_CACHE_FW_ON_INIT) || defined(CONFIG_PM_SLEEP) priv->cached_pri_fw = NULL; priv->cached_fw = NULL; #endif /* Register PM notifiers */ orinoco_register_pm_notifier(priv); return priv; } EXPORT_SYMBOL(alloc_orinocodev); /* We can only support a single interface. We provide a separate * function to set it up to distinguish between hardware * initialisation and interface setup. * * The base_addr and irq parameters are passed on to netdev for use * with SIOCGIFMAP. */ int orinoco_if_add(struct orinoco_private *priv, unsigned long base_addr, unsigned int irq, const struct net_device_ops *ops) { struct wiphy *wiphy = priv_to_wiphy(priv); struct wireless_dev *wdev; struct net_device *dev; int ret; dev = alloc_etherdev(sizeof(struct wireless_dev)); if (!dev) return -ENOMEM; /* Initialise wireless_dev */ wdev = netdev_priv(dev); wdev->wiphy = wiphy; wdev->iftype = NL80211_IFTYPE_STATION; /* Setup / override net_device fields */ dev->ieee80211_ptr = wdev; dev->watchdog_timeo = HZ; /* 1 second timeout */ dev->wireless_handlers = &orinoco_handler_def; #ifdef WIRELESS_SPY dev->wireless_data = &priv->wireless_data; #endif /* Default to standard ops if not set */ if (ops) dev->netdev_ops = ops; else dev->netdev_ops = &orinoco_netdev_ops; /* we use the default eth_mac_addr for setting the MAC addr */ /* Reserve space in skb for the SNAP header */ dev->needed_headroom = ENCAPS_OVERHEAD; netif_carrier_off(dev); eth_hw_addr_set(dev, wiphy->perm_addr); dev->base_addr = base_addr; dev->irq = irq; dev->min_mtu = ORINOCO_MIN_MTU; dev->max_mtu = ORINOCO_MAX_MTU; SET_NETDEV_DEV(dev, priv->dev); ret = register_netdev(dev); if (ret) goto fail; priv->ndev = dev; /* Report what we've done */ dev_dbg(priv->dev, "Registered interface %s.\n", dev->name); return 0; fail: free_netdev(dev); return ret; } EXPORT_SYMBOL(orinoco_if_add); void orinoco_if_del(struct orinoco_private *priv) { struct net_device *dev = priv->ndev; unregister_netdev(dev); free_netdev(dev); } EXPORT_SYMBOL(orinoco_if_del); void free_orinocodev(struct orinoco_private *priv) { struct wiphy *wiphy = priv_to_wiphy(priv); struct orinoco_rx_data *rx_data, *temp; struct orinoco_scan_data *sd, *sdtemp; /* If the tasklet is scheduled when we call tasklet_kill it * will run one final time. However the tasklet will only * drain priv->rx_list if the hw is still available. */ tasklet_kill(&priv->rx_tasklet); /* Explicitly drain priv->rx_list */ list_for_each_entry_safe(rx_data, temp, &priv->rx_list, list) { list_del(&rx_data->list); dev_kfree_skb(rx_data->skb); kfree(rx_data->desc); kfree(rx_data); } cancel_work_sync(&priv->process_scan); /* Explicitly drain priv->scan_list */ list_for_each_entry_safe(sd, sdtemp, &priv->scan_list, list) { list_del(&sd->list); if (sd->len > 0) kfree(sd->buf); kfree(sd); } orinoco_unregister_pm_notifier(priv); orinoco_uncache_fw(priv); priv->wpa_ie_len = 0; kfree(priv->wpa_ie); orinoco_mic_free(priv); wiphy_free(wiphy); } EXPORT_SYMBOL(free_orinocodev); int orinoco_up(struct orinoco_private *priv) { struct net_device *dev = priv->ndev; unsigned long flags; int err; priv->hw.ops->lock_irqsave(&priv->lock, &flags); err = orinoco_reinit_firmware(priv); if (err) { printk(KERN_ERR "%s: Error %d re-initializing firmware\n", dev->name, err); goto exit; } netif_device_attach(dev); priv->hw_unavailable--; if (priv->open && !priv->hw_unavailable) { err = __orinoco_up(priv); if (err) printk(KERN_ERR "%s: Error %d restarting card\n", dev->name, err); } exit: priv->hw.ops->unlock_irqrestore(&priv->lock, &flags); return 0; } EXPORT_SYMBOL(orinoco_up); void orinoco_down(struct orinoco_private *priv) { struct net_device *dev = priv->ndev; unsigned long flags; int err; priv->hw.ops->lock_irqsave(&priv->lock, &flags); err = __orinoco_down(priv); if (err) printk(KERN_WARNING "%s: Error %d downing interface\n", dev->name, err); netif_device_detach(dev); priv->hw_unavailable++; priv->hw.ops->unlock_irqrestore(&priv->lock, &flags); } EXPORT_SYMBOL(orinoco_down); /********************************************************************/ /* Module initialization */ /********************************************************************/ /* Can't be declared "const" or the whole __initdata section will * become const */ static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION " (David Gibson <[email protected]>, " "Pavel Roskin <[email protected]>, et al)"; static int __init init_orinoco(void) { printk(KERN_DEBUG "%s\n", version); return 0; } static void __exit exit_orinoco(void) { } module_init(init_orinoco); module_exit(exit_orinoco);
linux-master
drivers/net/wireless/intersil/orinoco/main.c
/* orinoco_nortel.c * * Driver for Prism II devices which would usually be driven by orinoco_cs, * but are connected to the PCI bus by a PCI-to-PCMCIA adapter used in * Nortel emobility, Symbol LA-4113 and Symbol LA-4123. * * Copyright (C) 2002 Tobias Hoffmann * (C) 2003 Christoph Jungegger <[email protected]> * * Some of this code is borrowed from orinoco_plx.c * Copyright (C) 2001 Daniel Barlow * Some of this code is borrowed from orinoco_pci.c * Copyright (C) 2001 Jean Tourrilhes * Some of this code is "inspired" by linux-wlan-ng-0.1.10, but nothing * has been copied from it. linux-wlan-ng-0.1.10 is originally : * Copyright (C) 1999 AbsoluteValue Systems, Inc. All Rights Reserved. * * The contents of this file are subject to the Mozilla Public License * Version 1.1 (the "License"); you may not use this file except in * compliance with the License. You may obtain a copy of the License * at http://www.mozilla.org/MPL/ * * Software distributed under the License is distributed on an "AS IS" * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See * the License for the specific language governing rights and * limitations under the License. * * Alternatively, the contents of this file may be used under the * terms of the GNU General Public License version 2 (the "GPL"), in * which case the provisions of the GPL are applicable instead of the * above. If you wish to allow the use of your version of this file * only under the terms of the GPL and not to allow others to use your * version of this file under the MPL, indicate your decision by * deleting the provisions above and replace them with the notice and * other provisions required by the GPL. If you do not delete the * provisions above, a recipient may use your version of this file * under either the MPL or the GPL. */ #define DRIVER_NAME "orinoco_nortel" #define PFX DRIVER_NAME ": " #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/pci.h> #include <pcmcia/cisreg.h> #include "orinoco.h" #include "orinoco_pci.h" #define COR_OFFSET (0xe0) /* COR attribute offset of Prism2 PC card */ #define COR_VALUE (COR_LEVEL_REQ | COR_FUNC_ENA) /* Enable PC card with interrupt in level trigger */ /* * Do a soft reset of the card using the Configuration Option Register * We need this to get going... * This is the part of the code that is strongly inspired from wlan-ng * * Note bis : Don't try to access HERMES_CMD during the reset phase. * It just won't work ! */ static int orinoco_nortel_cor_reset(struct orinoco_private *priv) { struct orinoco_pci_card *card = priv->card; /* Assert the reset until the card notices */ iowrite16(8, card->bridge_io + 2); ioread16(card->attr_io + COR_OFFSET); iowrite16(0x80, card->attr_io + COR_OFFSET); mdelay(1); /* Give time for the card to recover from this hard effort */ iowrite16(0, card->attr_io + COR_OFFSET); iowrite16(0, card->attr_io + COR_OFFSET); mdelay(1); /* Set COR as usual */ iowrite16(COR_VALUE, card->attr_io + COR_OFFSET); iowrite16(COR_VALUE, card->attr_io + COR_OFFSET); mdelay(1); iowrite16(0x228, card->bridge_io + 2); return 0; } static int orinoco_nortel_hw_init(struct orinoco_pci_card *card) { int i; u32 reg; /* Setup bridge */ if (ioread16(card->bridge_io) & 1) { printk(KERN_ERR PFX "brg1 answer1 wrong\n"); return -EBUSY; } iowrite16(0x118, card->bridge_io + 2); iowrite16(0x108, card->bridge_io + 2); mdelay(30); iowrite16(0x8, card->bridge_io + 2); for (i = 0; i < 30; i++) { mdelay(30); if (ioread16(card->bridge_io) & 0x10) break; } if (i == 30) { printk(KERN_ERR PFX "brg1 timed out\n"); return -EBUSY; } if (ioread16(card->attr_io + COR_OFFSET) & 1) { printk(KERN_ERR PFX "brg2 answer1 wrong\n"); return -EBUSY; } if (ioread16(card->attr_io + COR_OFFSET + 2) & 1) { printk(KERN_ERR PFX "brg2 answer2 wrong\n"); return -EBUSY; } if (ioread16(card->attr_io + COR_OFFSET + 4) & 1) { printk(KERN_ERR PFX "brg2 answer3 wrong\n"); return -EBUSY; } /* Set the PCMCIA COR register */ iowrite16(COR_VALUE, card->attr_io + COR_OFFSET); mdelay(1); reg = ioread16(card->attr_io + COR_OFFSET); if (reg != COR_VALUE) { printk(KERN_ERR PFX "Error setting COR value (reg=%x)\n", reg); return -EBUSY; } /* Set LEDs */ iowrite16(1, card->bridge_io + 10); return 0; } static int orinoco_nortel_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { int err; struct orinoco_private *priv; struct orinoco_pci_card *card; void __iomem *hermes_io, *bridge_io, *attr_io; err = pci_enable_device(pdev); if (err) { printk(KERN_ERR PFX "Cannot enable PCI device\n"); return err; } err = pci_request_regions(pdev, DRIVER_NAME); if (err) { printk(KERN_ERR PFX "Cannot obtain PCI resources\n"); goto fail_resources; } bridge_io = pci_iomap(pdev, 0, 0); if (!bridge_io) { printk(KERN_ERR PFX "Cannot map bridge registers\n"); err = -EIO; goto fail_map_bridge; } attr_io = pci_iomap(pdev, 1, 0); if (!attr_io) { printk(KERN_ERR PFX "Cannot map PCMCIA attributes\n"); err = -EIO; goto fail_map_attr; } hermes_io = pci_iomap(pdev, 2, 0); if (!hermes_io) { printk(KERN_ERR PFX "Cannot map chipset registers\n"); err = -EIO; goto fail_map_hermes; } /* Allocate network device */ priv = alloc_orinocodev(sizeof(*card), &pdev->dev, orinoco_nortel_cor_reset, NULL); if (!priv) { printk(KERN_ERR PFX "Cannot allocate network device\n"); err = -ENOMEM; goto fail_alloc; } card = priv->card; card->bridge_io = bridge_io; card->attr_io = attr_io; hermes_struct_init(&priv->hw, hermes_io, HERMES_16BIT_REGSPACING); err = request_irq(pdev->irq, orinoco_interrupt, IRQF_SHARED, DRIVER_NAME, priv); if (err) { printk(KERN_ERR PFX "Cannot allocate IRQ %d\n", pdev->irq); err = -EBUSY; goto fail_irq; } err = orinoco_nortel_hw_init(card); if (err) { printk(KERN_ERR PFX "Hardware initialization failed\n"); goto fail; } err = orinoco_nortel_cor_reset(priv); if (err) { printk(KERN_ERR PFX "Initial reset failed\n"); goto fail; } err = orinoco_init(priv); if (err) { printk(KERN_ERR PFX "orinoco_init() failed\n"); goto fail; } err = orinoco_if_add(priv, 0, 0, NULL); if (err) { printk(KERN_ERR PFX "orinoco_if_add() failed\n"); goto fail_wiphy; } pci_set_drvdata(pdev, priv); return 0; fail_wiphy: wiphy_unregister(priv_to_wiphy(priv)); fail: free_irq(pdev->irq, priv); fail_irq: free_orinocodev(priv); fail_alloc: pci_iounmap(pdev, hermes_io); fail_map_hermes: pci_iounmap(pdev, attr_io); fail_map_attr: pci_iounmap(pdev, bridge_io); fail_map_bridge: pci_release_regions(pdev); fail_resources: pci_disable_device(pdev); return err; } static void orinoco_nortel_remove_one(struct pci_dev *pdev) { struct orinoco_private *priv = pci_get_drvdata(pdev); struct orinoco_pci_card *card = priv->card; /* Clear LEDs */ iowrite16(0, card->bridge_io + 10); orinoco_if_del(priv); wiphy_unregister(priv_to_wiphy(priv)); free_irq(pdev->irq, priv); free_orinocodev(priv); pci_iounmap(pdev, priv->hw.iobase); pci_iounmap(pdev, card->attr_io); pci_iounmap(pdev, card->bridge_io); pci_release_regions(pdev); pci_disable_device(pdev); } static const struct pci_device_id orinoco_nortel_id_table[] = { /* Nortel emobility PCI */ {0x126c, 0x8030, PCI_ANY_ID, PCI_ANY_ID,}, /* Symbol LA-4123 PCI */ {0x1562, 0x0001, PCI_ANY_ID, PCI_ANY_ID,}, {0,}, }; MODULE_DEVICE_TABLE(pci, orinoco_nortel_id_table); static struct pci_driver orinoco_nortel_driver = { .name = DRIVER_NAME, .id_table = orinoco_nortel_id_table, .probe = orinoco_nortel_init_one, .remove = orinoco_nortel_remove_one, .driver.pm = &orinoco_pci_pm_ops, }; static char version[] __initdata = DRIVER_NAME " " DRIVER_VERSION " (Tobias Hoffmann & Christoph Jungegger <[email protected]>)"; MODULE_AUTHOR("Christoph Jungegger <[email protected]>"); MODULE_DESCRIPTION("Driver for wireless LAN cards using the Nortel PCI bridge"); MODULE_LICENSE("Dual MPL/GPL"); static int __init orinoco_nortel_init(void) { printk(KERN_DEBUG "%s\n", version); return pci_register_driver(&orinoco_nortel_driver); } static void __exit orinoco_nortel_exit(void) { pci_unregister_driver(&orinoco_nortel_driver); } module_init(orinoco_nortel_init); module_exit(orinoco_nortel_exit);
linux-master
drivers/net/wireless/intersil/orinoco/orinoco_nortel.c
/* * Driver for 802.11b cards using RAM-loadable Symbol firmware, such as * Symbol Wireless Networker LA4137, CompactFlash cards by Socket * Communications and Intel PRO/Wireless 2011B. * * The driver implements Symbol firmware download. The rest is handled * in hermes.c and main.c. * * Utilities for downloading the Symbol firmware are available at * http://sourceforge.net/projects/orinoco/ * * Copyright (C) 2002-2005 Pavel Roskin <[email protected]> * Portions based on orinoco_cs.c: * Copyright (C) David Gibson, Linuxcare Australia * Portions based on Spectrum24tDnld.c from original spectrum24 driver: * Copyright (C) Symbol Technologies. * * See copyright notice in file main.c. */ #define DRIVER_NAME "spectrum_cs" #define PFX DRIVER_NAME ": " #include <linux/module.h> #include <linux/kernel.h> #include <linux/delay.h> #include <pcmcia/cistpl.h> #include <pcmcia/cisreg.h> #include <pcmcia/ds.h> #include "orinoco.h" /********************************************************************/ /* Module stuff */ /********************************************************************/ MODULE_AUTHOR("Pavel Roskin <[email protected]>"); MODULE_DESCRIPTION("Driver for Symbol Spectrum24 Trilogy cards with firmware downloader"); MODULE_LICENSE("Dual MPL/GPL"); /* Module parameters */ /* Some D-Link cards have buggy CIS. They do work at 5v properly, but * don't have any CIS entry for it. This workaround it... */ static int ignore_cis_vcc; /* = 0 */ module_param(ignore_cis_vcc, int, 0); MODULE_PARM_DESC(ignore_cis_vcc, "Allow voltage mismatch between card and socket"); /********************************************************************/ /* Data structures */ /********************************************************************/ /* PCMCIA specific device information (goes in the card field of * struct orinoco_private */ struct orinoco_pccard { struct pcmcia_device *p_dev; }; /********************************************************************/ /* Function prototypes */ /********************************************************************/ static int spectrum_cs_config(struct pcmcia_device *link); static void spectrum_cs_release(struct pcmcia_device *link); /* Constants for the CISREG_CCSR register */ #define HCR_RUN 0x07 /* run firmware after reset */ #define HCR_IDLE 0x0E /* don't run firmware after reset */ #define HCR_MEM16 0x10 /* memory width bit, should be preserved */ /* * Reset the card using configuration registers COR and CCSR. * If IDLE is 1, stop the firmware, so that it can be safely rewritten. */ static int spectrum_reset(struct pcmcia_device *link, int idle) { int ret; u8 save_cor; u8 ccsr; /* Doing it if hardware is gone is guaranteed crash */ if (!pcmcia_dev_present(link)) return -ENODEV; /* Save original COR value */ ret = pcmcia_read_config_byte(link, CISREG_COR, &save_cor); if (ret) goto failed; /* Soft-Reset card */ ret = pcmcia_write_config_byte(link, CISREG_COR, (save_cor | COR_SOFT_RESET)); if (ret) goto failed; udelay(1000); /* Read CCSR */ ret = pcmcia_read_config_byte(link, CISREG_CCSR, &ccsr); if (ret) goto failed; /* * Start or stop the firmware. Memory width bit should be * preserved from the value we've just read. */ ccsr = (idle ? HCR_IDLE : HCR_RUN) | (ccsr & HCR_MEM16); ret = pcmcia_write_config_byte(link, CISREG_CCSR, ccsr); if (ret) goto failed; udelay(1000); /* Restore original COR configuration index */ ret = pcmcia_write_config_byte(link, CISREG_COR, (save_cor & ~COR_SOFT_RESET)); if (ret) goto failed; udelay(1000); return 0; failed: return -ENODEV; } /********************************************************************/ /* Device methods */ /********************************************************************/ static int spectrum_cs_hard_reset(struct orinoco_private *priv) { struct orinoco_pccard *card = priv->card; struct pcmcia_device *link = card->p_dev; /* Soft reset using COR and HCR */ spectrum_reset(link, 0); return 0; } static int spectrum_cs_stop_firmware(struct orinoco_private *priv, int idle) { struct orinoco_pccard *card = priv->card; struct pcmcia_device *link = card->p_dev; return spectrum_reset(link, idle); } /********************************************************************/ /* PCMCIA stuff */ /********************************************************************/ static int spectrum_cs_probe(struct pcmcia_device *link) { struct orinoco_private *priv; struct orinoco_pccard *card; int ret; priv = alloc_orinocodev(sizeof(*card), &link->dev, spectrum_cs_hard_reset, spectrum_cs_stop_firmware); if (!priv) return -ENOMEM; card = priv->card; /* Link both structures together */ card->p_dev = link; link->priv = priv; ret = spectrum_cs_config(link); if (ret) goto err_free_orinocodev; return 0; err_free_orinocodev: free_orinocodev(priv); return ret; } static void spectrum_cs_detach(struct pcmcia_device *link) { struct orinoco_private *priv = link->priv; orinoco_if_del(priv); spectrum_cs_release(link); free_orinocodev(priv); } /* spectrum_cs_detach */ static int spectrum_cs_config_check(struct pcmcia_device *p_dev, void *priv_data) { if (p_dev->config_index == 0) return -EINVAL; return pcmcia_request_io(p_dev); }; static int spectrum_cs_config(struct pcmcia_device *link) { struct orinoco_private *priv = link->priv; struct hermes *hw = &priv->hw; int ret; void __iomem *mem; link->config_flags |= CONF_AUTO_SET_VPP | CONF_AUTO_CHECK_VCC | CONF_AUTO_SET_IO | CONF_ENABLE_IRQ; if (ignore_cis_vcc) link->config_flags &= ~CONF_AUTO_CHECK_VCC; ret = pcmcia_loop_config(link, spectrum_cs_config_check, NULL); if (ret) { if (!ignore_cis_vcc) printk(KERN_ERR PFX "GetNextTuple(): No matching " "CIS configuration. Maybe you need the " "ignore_cis_vcc=1 parameter.\n"); goto failed; } mem = ioport_map(link->resource[0]->start, resource_size(link->resource[0])); if (!mem) goto failed; /* We initialize the hermes structure before completing PCMCIA * configuration just in case the interrupt handler gets * called. */ hermes_struct_init(hw, mem, HERMES_16BIT_REGSPACING); hw->eeprom_pda = true; ret = pcmcia_request_irq(link, orinoco_interrupt); if (ret) goto failed; ret = pcmcia_enable_device(link); if (ret) goto failed; /* Reset card */ if (spectrum_cs_hard_reset(priv) != 0) goto failed; /* Initialise the main driver */ if (orinoco_init(priv) != 0) { printk(KERN_ERR PFX "orinoco_init() failed\n"); goto failed; } /* Register an interface with the stack */ if (orinoco_if_add(priv, link->resource[0]->start, link->irq, NULL) != 0) { printk(KERN_ERR PFX "orinoco_if_add() failed\n"); goto failed; } return 0; failed: spectrum_cs_release(link); return -ENODEV; } /* spectrum_cs_config */ static void spectrum_cs_release(struct pcmcia_device *link) { struct orinoco_private *priv = link->priv; unsigned long flags; /* We're committed to taking the device away now, so mark the * hardware as unavailable */ priv->hw.ops->lock_irqsave(&priv->lock, &flags); priv->hw_unavailable++; priv->hw.ops->unlock_irqrestore(&priv->lock, &flags); pcmcia_disable_device(link); if (priv->hw.iobase) ioport_unmap(priv->hw.iobase); } /* spectrum_cs_release */ static int spectrum_cs_suspend(struct pcmcia_device *link) { struct orinoco_private *priv = link->priv; /* Mark the device as stopped, to block IO until later */ orinoco_down(priv); return 0; } static int spectrum_cs_resume(struct pcmcia_device *link) { struct orinoco_private *priv = link->priv; int err = orinoco_up(priv); return err; } /********************************************************************/ /* Module initialization */ /********************************************************************/ static const struct pcmcia_device_id spectrum_cs_ids[] = { PCMCIA_DEVICE_MANF_CARD(0x026c, 0x0001), /* Symbol Spectrum24 LA4137 */ PCMCIA_DEVICE_MANF_CARD(0x0104, 0x0001), /* Socket Communications CF */ PCMCIA_DEVICE_PROD_ID12("Intel", "PRO/Wireless LAN PC Card", 0x816cc815, 0x6fbf459a), /* 2011B, not 2011 */ PCMCIA_DEVICE_NULL, }; MODULE_DEVICE_TABLE(pcmcia, spectrum_cs_ids); static struct pcmcia_driver orinoco_driver = { .owner = THIS_MODULE, .name = DRIVER_NAME, .probe = spectrum_cs_probe, .remove = spectrum_cs_detach, .suspend = spectrum_cs_suspend, .resume = spectrum_cs_resume, .id_table = spectrum_cs_ids, }; module_pcmcia_driver(orinoco_driver);
linux-master
drivers/net/wireless/intersil/orinoco/spectrum_cs.c
/* Orinoco MIC helpers * * See copyright notice in main.c */ #include <linux/kernel.h> #include <linux/string.h> #include <linux/if_ether.h> #include <linux/scatterlist.h> #include <crypto/hash.h> #include "orinoco.h" #include "mic.h" /********************************************************************/ /* Michael MIC crypto setup */ /********************************************************************/ int orinoco_mic_init(struct orinoco_private *priv) { priv->tx_tfm_mic = crypto_alloc_shash("michael_mic", 0, 0); if (IS_ERR(priv->tx_tfm_mic)) { printk(KERN_DEBUG "%s: could not allocate " "crypto API michael_mic\n", __func__); priv->tx_tfm_mic = NULL; return -ENOMEM; } priv->rx_tfm_mic = crypto_alloc_shash("michael_mic", 0, 0); if (IS_ERR(priv->rx_tfm_mic)) { printk(KERN_DEBUG "%s: could not allocate " "crypto API michael_mic\n", __func__); priv->rx_tfm_mic = NULL; return -ENOMEM; } return 0; } void orinoco_mic_free(struct orinoco_private *priv) { if (priv->tx_tfm_mic) crypto_free_shash(priv->tx_tfm_mic); if (priv->rx_tfm_mic) crypto_free_shash(priv->rx_tfm_mic); } int orinoco_mic(struct crypto_shash *tfm_michael, u8 *key, u8 *da, u8 *sa, u8 priority, u8 *data, size_t data_len, u8 *mic) { SHASH_DESC_ON_STACK(desc, tfm_michael); u8 hdr[ETH_HLEN + 2]; /* size of header + padding */ int err; if (tfm_michael == NULL) { printk(KERN_WARNING "%s: tfm_michael == NULL\n", __func__); return -1; } /* Copy header into buffer. We need the padding on the end zeroed */ memcpy(&hdr[0], da, ETH_ALEN); memcpy(&hdr[ETH_ALEN], sa, ETH_ALEN); hdr[ETH_ALEN * 2] = priority; hdr[ETH_ALEN * 2 + 1] = 0; hdr[ETH_ALEN * 2 + 2] = 0; hdr[ETH_ALEN * 2 + 3] = 0; desc->tfm = tfm_michael; err = crypto_shash_setkey(tfm_michael, key, MIC_KEYLEN); if (err) return err; err = crypto_shash_init(desc); if (err) return err; err = crypto_shash_update(desc, hdr, sizeof(hdr)); if (err) return err; err = crypto_shash_update(desc, data, data_len); if (err) return err; err = crypto_shash_final(desc, mic); shash_desc_zero(desc); return err; }
linux-master
drivers/net/wireless/intersil/orinoco/mic.c
/* hermes.c * * Driver core for the "Hermes" wireless MAC controller, as used in * the Lucent Orinoco and Cabletron RoamAbout cards. It should also * work on the hfa3841 and hfa3842 MAC controller chips used in the * Prism II chipsets. * * This is not a complete driver, just low-level access routines for * the MAC controller itself. * * Based on the prism2 driver from Absolute Value Systems' linux-wlan * project, the Linux wvlan_cs driver, Lucent's HCF-Light * (wvlan_hcf.c) library, and the NetBSD wireless driver (in no * particular order). * * Copyright (C) 2000, David Gibson, Linuxcare Australia. * (C) Copyright David Gibson, IBM Corp. 2001-2003. * * The contents of this file are subject to the Mozilla Public License * Version 1.1 (the "License"); you may not use this file except in * compliance with the License. You may obtain a copy of the License * at http://www.mozilla.org/MPL/ * * Software distributed under the License is distributed on an "AS IS" * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See * the License for the specific language governing rights and * limitations under the License. * * Alternatively, the contents of this file may be used under the * terms of the GNU General Public License version 2 (the "GPL"), in * which case the provisions of the GPL are applicable instead of the * above. If you wish to allow the use of your version of this file * only under the terms of the GPL and not to allow others to use your * version of this file under the MPL, indicate your decision by * deleting the provisions above and replace them with the notice and * other provisions required by the GPL. If you do not delete the * provisions above, a recipient may use your version of this file * under either the MPL or the GPL. */ #include <linux/net.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/delay.h> #include "hermes.h" /* These are maximum timeouts. Most often, card wil react much faster */ #define CMD_BUSY_TIMEOUT (100) /* In iterations of ~1us */ #define CMD_INIT_TIMEOUT (50000) /* in iterations of ~10us */ #define CMD_COMPL_TIMEOUT (20000) /* in iterations of ~10us */ #define ALLOC_COMPL_TIMEOUT (1000) /* in iterations of ~10us */ /* * AUX port access. To unlock the AUX port write the access keys to the * PARAM0-2 registers, then write HERMES_AUX_ENABLE to the HERMES_CONTROL * register. Then read it and make sure it's HERMES_AUX_ENABLED. */ #define HERMES_AUX_ENABLE 0x8000 /* Enable auxiliary port access */ #define HERMES_AUX_DISABLE 0x4000 /* Disable to auxiliary port access */ #define HERMES_AUX_ENABLED 0xC000 /* Auxiliary port is open */ #define HERMES_AUX_DISABLED 0x0000 /* Auxiliary port is closed */ #define HERMES_AUX_PW0 0xFE01 #define HERMES_AUX_PW1 0xDC23 #define HERMES_AUX_PW2 0xBA45 /* HERMES_CMD_DOWNLD */ #define HERMES_PROGRAM_DISABLE (0x0000 | HERMES_CMD_DOWNLD) #define HERMES_PROGRAM_ENABLE_VOLATILE (0x0100 | HERMES_CMD_DOWNLD) #define HERMES_PROGRAM_ENABLE_NON_VOLATILE (0x0200 | HERMES_CMD_DOWNLD) #define HERMES_PROGRAM_NON_VOLATILE (0x0300 | HERMES_CMD_DOWNLD) /* * Debugging helpers */ #define DMSG(stuff...) do {printk(KERN_DEBUG "hermes @ %p: " , hw->iobase); \ printk(stuff); } while (0) #undef HERMES_DEBUG #ifdef HERMES_DEBUG #define DEBUG(lvl, stuff...) if ((lvl) <= HERMES_DEBUG) DMSG(stuff) #else /* ! HERMES_DEBUG */ #define DEBUG(lvl, stuff...) do { } while (0) #endif /* ! HERMES_DEBUG */ static const struct hermes_ops hermes_ops_local; /* * Internal functions */ /* Issue a command to the chip. Waiting for it to complete is the caller's problem. Returns -EBUSY if the command register is busy, 0 on success. Callable from any context. */ static int hermes_issue_cmd(struct hermes *hw, u16 cmd, u16 param0, u16 param1, u16 param2) { int k = CMD_BUSY_TIMEOUT; u16 reg; /* First wait for the command register to unbusy */ reg = hermes_read_regn(hw, CMD); while ((reg & HERMES_CMD_BUSY) && k) { k--; udelay(1); reg = hermes_read_regn(hw, CMD); } if (reg & HERMES_CMD_BUSY) return -EBUSY; hermes_write_regn(hw, PARAM2, param2); hermes_write_regn(hw, PARAM1, param1); hermes_write_regn(hw, PARAM0, param0); hermes_write_regn(hw, CMD, cmd); return 0; } /* * Function definitions */ /* For doing cmds that wipe the magic constant in SWSUPPORT0 */ static int hermes_doicmd_wait(struct hermes *hw, u16 cmd, u16 parm0, u16 parm1, u16 parm2, struct hermes_response *resp) { int err = 0; int k; u16 status, reg; err = hermes_issue_cmd(hw, cmd, parm0, parm1, parm2); if (err) return err; reg = hermes_read_regn(hw, EVSTAT); k = CMD_INIT_TIMEOUT; while ((!(reg & HERMES_EV_CMD)) && k) { k--; udelay(10); reg = hermes_read_regn(hw, EVSTAT); } hermes_write_regn(hw, SWSUPPORT0, HERMES_MAGIC); if (!hermes_present(hw)) { DEBUG(0, "hermes @ 0x%x: Card removed during reset.\n", hw->iobase); err = -ENODEV; goto out; } if (!(reg & HERMES_EV_CMD)) { printk(KERN_ERR "hermes @ %p: " "Timeout waiting for card to reset (reg=0x%04x)!\n", hw->iobase, reg); err = -ETIMEDOUT; goto out; } status = hermes_read_regn(hw, STATUS); if (resp) { resp->status = status; resp->resp0 = hermes_read_regn(hw, RESP0); resp->resp1 = hermes_read_regn(hw, RESP1); resp->resp2 = hermes_read_regn(hw, RESP2); } hermes_write_regn(hw, EVACK, HERMES_EV_CMD); if (status & HERMES_STATUS_RESULT) err = -EIO; out: return err; } void hermes_struct_init(struct hermes *hw, void __iomem *address, int reg_spacing) { hw->iobase = address; hw->reg_spacing = reg_spacing; hw->inten = 0x0; hw->eeprom_pda = false; hw->ops = &hermes_ops_local; } EXPORT_SYMBOL(hermes_struct_init); static int hermes_init(struct hermes *hw) { u16 reg; int err = 0; int k; /* We don't want to be interrupted while resetting the chipset */ hw->inten = 0x0; hermes_write_regn(hw, INTEN, 0); hermes_write_regn(hw, EVACK, 0xffff); /* Normally it's a "can't happen" for the command register to be busy when we go to issue a command because we are serializing all commands. However we want to have some chance of resetting the card even if it gets into a stupid state, so we actually wait to see if the command register will unbusy itself here. */ k = CMD_BUSY_TIMEOUT; reg = hermes_read_regn(hw, CMD); while (k && (reg & HERMES_CMD_BUSY)) { if (reg == 0xffff) /* Special case - the card has probably been removed, so don't wait for the timeout */ return -ENODEV; k--; udelay(1); reg = hermes_read_regn(hw, CMD); } /* No need to explicitly handle the timeout - if we've timed out hermes_issue_cmd() will probably return -EBUSY below */ /* According to the documentation, EVSTAT may contain obsolete event occurrence information. We have to acknowledge it by writing EVACK. */ reg = hermes_read_regn(hw, EVSTAT); hermes_write_regn(hw, EVACK, reg); /* We don't use hermes_docmd_wait here, because the reset wipes the magic constant in SWSUPPORT0 away, and it gets confused */ err = hermes_doicmd_wait(hw, HERMES_CMD_INIT, 0, 0, 0, NULL); return err; } /* Issue a command to the chip, and (busy!) wait for it to * complete. * * Returns: * < 0 on internal error * 0 on success * > 0 on error returned by the firmware * * Callable from any context, but locking is your problem. */ static int hermes_docmd_wait(struct hermes *hw, u16 cmd, u16 parm0, struct hermes_response *resp) { int err; int k; u16 reg; u16 status; err = hermes_issue_cmd(hw, cmd, parm0, 0, 0); if (err) { if (!hermes_present(hw)) { if (net_ratelimit()) printk(KERN_WARNING "hermes @ %p: " "Card removed while issuing command " "0x%04x.\n", hw->iobase, cmd); err = -ENODEV; } else if (net_ratelimit()) printk(KERN_ERR "hermes @ %p: " "Error %d issuing command 0x%04x.\n", hw->iobase, err, cmd); goto out; } reg = hermes_read_regn(hw, EVSTAT); k = CMD_COMPL_TIMEOUT; while ((!(reg & HERMES_EV_CMD)) && k) { k--; udelay(10); reg = hermes_read_regn(hw, EVSTAT); } if (!hermes_present(hw)) { printk(KERN_WARNING "hermes @ %p: Card removed " "while waiting for command 0x%04x completion.\n", hw->iobase, cmd); err = -ENODEV; goto out; } if (!(reg & HERMES_EV_CMD)) { printk(KERN_ERR "hermes @ %p: Timeout waiting for " "command 0x%04x completion.\n", hw->iobase, cmd); err = -ETIMEDOUT; goto out; } status = hermes_read_regn(hw, STATUS); if (resp) { resp->status = status; resp->resp0 = hermes_read_regn(hw, RESP0); resp->resp1 = hermes_read_regn(hw, RESP1); resp->resp2 = hermes_read_regn(hw, RESP2); } hermes_write_regn(hw, EVACK, HERMES_EV_CMD); if (status & HERMES_STATUS_RESULT) err = -EIO; out: return err; } static int hermes_allocate(struct hermes *hw, u16 size, u16 *fid) { int err = 0; int k; u16 reg; if ((size < HERMES_ALLOC_LEN_MIN) || (size > HERMES_ALLOC_LEN_MAX)) return -EINVAL; err = hermes_docmd_wait(hw, HERMES_CMD_ALLOC, size, NULL); if (err) return err; reg = hermes_read_regn(hw, EVSTAT); k = ALLOC_COMPL_TIMEOUT; while ((!(reg & HERMES_EV_ALLOC)) && k) { k--; udelay(10); reg = hermes_read_regn(hw, EVSTAT); } if (!hermes_present(hw)) { printk(KERN_WARNING "hermes @ %p: " "Card removed waiting for frame allocation.\n", hw->iobase); return -ENODEV; } if (!(reg & HERMES_EV_ALLOC)) { printk(KERN_ERR "hermes @ %p: " "Timeout waiting for frame allocation\n", hw->iobase); return -ETIMEDOUT; } *fid = hermes_read_regn(hw, ALLOCFID); hermes_write_regn(hw, EVACK, HERMES_EV_ALLOC); return 0; } /* Set up a BAP to read a particular chunk of data from card's internal buffer. * * Returns: * < 0 on internal failure (errno) * 0 on success * > 0 on error * from firmware * * Callable from any context */ static int hermes_bap_seek(struct hermes *hw, int bap, u16 id, u16 offset) { int sreg = bap ? HERMES_SELECT1 : HERMES_SELECT0; int oreg = bap ? HERMES_OFFSET1 : HERMES_OFFSET0; int k; u16 reg; /* Paranoia.. */ if ((offset > HERMES_BAP_OFFSET_MAX) || (offset % 2)) return -EINVAL; k = HERMES_BAP_BUSY_TIMEOUT; reg = hermes_read_reg(hw, oreg); while ((reg & HERMES_OFFSET_BUSY) && k) { k--; udelay(1); reg = hermes_read_reg(hw, oreg); } if (reg & HERMES_OFFSET_BUSY) return -ETIMEDOUT; /* Now we actually set up the transfer */ hermes_write_reg(hw, sreg, id); hermes_write_reg(hw, oreg, offset); /* Wait for the BAP to be ready */ k = HERMES_BAP_BUSY_TIMEOUT; reg = hermes_read_reg(hw, oreg); while ((reg & (HERMES_OFFSET_BUSY | HERMES_OFFSET_ERR)) && k) { k--; udelay(1); reg = hermes_read_reg(hw, oreg); } if (reg != offset) { printk(KERN_ERR "hermes @ %p: BAP%d offset %s: " "reg=0x%x id=0x%x offset=0x%x\n", hw->iobase, bap, (reg & HERMES_OFFSET_BUSY) ? "timeout" : "error", reg, id, offset); if (reg & HERMES_OFFSET_BUSY) return -ETIMEDOUT; return -EIO; /* error or wrong offset */ } return 0; } /* Read a block of data from the chip's buffer, via the * BAP. Synchronization/serialization is the caller's problem. len * must be even. * * Returns: * < 0 on internal failure (errno) * 0 on success * > 0 on error from firmware */ static int hermes_bap_pread(struct hermes *hw, int bap, void *buf, int len, u16 id, u16 offset) { int dreg = bap ? HERMES_DATA1 : HERMES_DATA0; int err = 0; if ((len < 0) || (len % 2)) return -EINVAL; err = hermes_bap_seek(hw, bap, id, offset); if (err) goto out; /* Actually do the transfer */ hermes_read_words(hw, dreg, buf, len / 2); out: return err; } /* Write a block of data to the chip's buffer, via the * BAP. Synchronization/serialization is the caller's problem. * * Returns: * < 0 on internal failure (errno) * 0 on success * > 0 on error from firmware */ static int hermes_bap_pwrite(struct hermes *hw, int bap, const void *buf, int len, u16 id, u16 offset) { int dreg = bap ? HERMES_DATA1 : HERMES_DATA0; int err = 0; if (len < 0) return -EINVAL; err = hermes_bap_seek(hw, bap, id, offset); if (err) goto out; /* Actually do the transfer */ hermes_write_bytes(hw, dreg, buf, len); out: return err; } /* Read a Length-Type-Value record from the card. * * If length is NULL, we ignore the length read from the card, and * read the entire buffer regardless. This is useful because some of * the configuration records appear to have incorrect lengths in * practice. * * Callable from user or bh context. */ static int hermes_read_ltv(struct hermes *hw, int bap, u16 rid, unsigned bufsize, u16 *length, void *buf) { int err = 0; int dreg = bap ? HERMES_DATA1 : HERMES_DATA0; u16 rlength, rtype; unsigned nwords; if (bufsize % 2) return -EINVAL; err = hermes_docmd_wait(hw, HERMES_CMD_ACCESS, rid, NULL); if (err) return err; err = hermes_bap_seek(hw, bap, rid, 0); if (err) return err; rlength = hermes_read_reg(hw, dreg); if (!rlength) return -ENODATA; rtype = hermes_read_reg(hw, dreg); if (length) *length = rlength; if (rtype != rid) printk(KERN_WARNING "hermes @ %p: %s(): " "rid (0x%04x) does not match type (0x%04x)\n", hw->iobase, __func__, rid, rtype); if (HERMES_RECLEN_TO_BYTES(rlength) > bufsize) printk(KERN_WARNING "hermes @ %p: " "Truncating LTV record from %d to %d bytes. " "(rid=0x%04x, len=0x%04x)\n", hw->iobase, HERMES_RECLEN_TO_BYTES(rlength), bufsize, rid, rlength); nwords = min((unsigned)rlength - 1, bufsize / 2); hermes_read_words(hw, dreg, buf, nwords); return 0; } static int hermes_write_ltv(struct hermes *hw, int bap, u16 rid, u16 length, const void *value) { int dreg = bap ? HERMES_DATA1 : HERMES_DATA0; int err = 0; unsigned count; if (length == 0) return -EINVAL; err = hermes_bap_seek(hw, bap, rid, 0); if (err) return err; hermes_write_reg(hw, dreg, length); hermes_write_reg(hw, dreg, rid); count = length - 1; hermes_write_bytes(hw, dreg, value, count << 1); err = hermes_docmd_wait(hw, HERMES_CMD_ACCESS | HERMES_CMD_WRITE, rid, NULL); return err; } /*** Hermes AUX control ***/ static inline void hermes_aux_setaddr(struct hermes *hw, u32 addr) { hermes_write_reg(hw, HERMES_AUXPAGE, (u16) (addr >> 7)); hermes_write_reg(hw, HERMES_AUXOFFSET, (u16) (addr & 0x7F)); } static inline int hermes_aux_control(struct hermes *hw, int enabled) { int desired_state = enabled ? HERMES_AUX_ENABLED : HERMES_AUX_DISABLED; int action = enabled ? HERMES_AUX_ENABLE : HERMES_AUX_DISABLE; int i; /* Already open? */ if (hermes_read_reg(hw, HERMES_CONTROL) == desired_state) return 0; hermes_write_reg(hw, HERMES_PARAM0, HERMES_AUX_PW0); hermes_write_reg(hw, HERMES_PARAM1, HERMES_AUX_PW1); hermes_write_reg(hw, HERMES_PARAM2, HERMES_AUX_PW2); hermes_write_reg(hw, HERMES_CONTROL, action); for (i = 0; i < 20; i++) { udelay(10); if (hermes_read_reg(hw, HERMES_CONTROL) == desired_state) return 0; } return -EBUSY; } /*** Hermes programming ***/ /* About to start programming data (Hermes I) * offset is the entry point * * Spectrum_cs' Symbol fw does not require this * wl_lkm Agere fw does * Don't know about intersil */ static int hermesi_program_init(struct hermes *hw, u32 offset) { int err; /* Disable interrupts?*/ /*hw->inten = 0x0;*/ /*hermes_write_regn(hw, INTEN, 0);*/ /*hermes_set_irqmask(hw, 0);*/ /* Acknowledge any outstanding command */ hermes_write_regn(hw, EVACK, 0xFFFF); /* Using init_cmd_wait rather than cmd_wait */ err = hw->ops->init_cmd_wait(hw, 0x0100 | HERMES_CMD_INIT, 0, 0, 0, NULL); if (err) return err; err = hw->ops->init_cmd_wait(hw, 0x0000 | HERMES_CMD_INIT, 0, 0, 0, NULL); if (err) return err; err = hermes_aux_control(hw, 1); pr_debug("AUX enable returned %d\n", err); if (err) return err; pr_debug("Enabling volatile, EP 0x%08x\n", offset); err = hw->ops->init_cmd_wait(hw, HERMES_PROGRAM_ENABLE_VOLATILE, offset & 0xFFFFu, offset >> 16, 0, NULL); pr_debug("PROGRAM_ENABLE returned %d\n", err); return err; } /* Done programming data (Hermes I) * * Spectrum_cs' Symbol fw does not require this * wl_lkm Agere fw does * Don't know about intersil */ static int hermesi_program_end(struct hermes *hw) { struct hermes_response resp; int rc = 0; int err; rc = hw->ops->cmd_wait(hw, HERMES_PROGRAM_DISABLE, 0, &resp); pr_debug("PROGRAM_DISABLE returned %d, " "r0 0x%04x, r1 0x%04x, r2 0x%04x\n", rc, resp.resp0, resp.resp1, resp.resp2); if ((rc == 0) && ((resp.status & HERMES_STATUS_CMDCODE) != HERMES_CMD_DOWNLD)) rc = -EIO; err = hermes_aux_control(hw, 0); pr_debug("AUX disable returned %d\n", err); /* Acknowledge any outstanding command */ hermes_write_regn(hw, EVACK, 0xFFFF); /* Reinitialise, ignoring return */ (void) hw->ops->init_cmd_wait(hw, 0x0000 | HERMES_CMD_INIT, 0, 0, 0, NULL); return rc ? rc : err; } static int hermes_program_bytes(struct hermes *hw, const char *data, u32 addr, u32 len) { /* wl lkm splits the programming into chunks of 2000 bytes. * This restriction appears to come from USB. The PCMCIA * adapters can program the whole lot in one go */ hermes_aux_setaddr(hw, addr); hermes_write_bytes(hw, HERMES_AUXDATA, data, len); return 0; } /* Read PDA from the adapter */ static int hermes_read_pda(struct hermes *hw, __le16 *pda, u32 pda_addr, u16 pda_len) { int ret; u16 pda_size; u16 data_len = pda_len; __le16 *data = pda; if (hw->eeprom_pda) { /* PDA of spectrum symbol is in eeprom */ /* Issue command to read EEPROM */ ret = hw->ops->cmd_wait(hw, HERMES_CMD_READMIF, 0, NULL); if (ret) return ret; } else { /* wl_lkm does not include PDA size in the PDA area. * We will pad the information into pda, so other routines * don't have to be modified */ pda[0] = cpu_to_le16(pda_len - 2); /* Includes CFG_PROD_DATA but not itself */ pda[1] = cpu_to_le16(0x0800); /* CFG_PROD_DATA */ data_len = pda_len - 4; data = pda + 2; } /* Open auxiliary port */ ret = hermes_aux_control(hw, 1); pr_debug("AUX enable returned %d\n", ret); if (ret) return ret; /* Read PDA */ hermes_aux_setaddr(hw, pda_addr); hermes_read_words(hw, HERMES_AUXDATA, data, data_len / 2); /* Close aux port */ ret = hermes_aux_control(hw, 0); pr_debug("AUX disable returned %d\n", ret); /* Check PDA length */ pda_size = le16_to_cpu(pda[0]); pr_debug("Actual PDA length %d, Max allowed %d\n", pda_size, pda_len); if (pda_size > pda_len) return -EINVAL; return 0; } static void hermes_lock_irqsave(spinlock_t *lock, unsigned long *flags) __acquires(lock) { spin_lock_irqsave(lock, *flags); } static void hermes_unlock_irqrestore(spinlock_t *lock, unsigned long *flags) __releases(lock) { spin_unlock_irqrestore(lock, *flags); } static void hermes_lock_irq(spinlock_t *lock) __acquires(lock) { spin_lock_irq(lock); } static void hermes_unlock_irq(spinlock_t *lock) __releases(lock) { spin_unlock_irq(lock); } /* Hermes operations for local buses */ static const struct hermes_ops hermes_ops_local = { .init = hermes_init, .cmd_wait = hermes_docmd_wait, .init_cmd_wait = hermes_doicmd_wait, .allocate = hermes_allocate, .read_ltv = hermes_read_ltv, .read_ltv_pr = hermes_read_ltv, .write_ltv = hermes_write_ltv, .bap_pread = hermes_bap_pread, .bap_pwrite = hermes_bap_pwrite, .read_pda = hermes_read_pda, .program_init = hermesi_program_init, .program_end = hermesi_program_end, .program = hermes_program_bytes, .lock_irqsave = hermes_lock_irqsave, .unlock_irqrestore = hermes_unlock_irqrestore, .lock_irq = hermes_lock_irq, .unlock_irq = hermes_unlock_irq, };
linux-master
drivers/net/wireless/intersil/orinoco/hermes.c
/* * Hermes download helper. * * This helper: * - is capable of writing to the volatile area of the hermes device * - is currently not capable of writing to non-volatile areas * - provide helpers to identify and update plugin data * - is not capable of interpreting a fw image directly. That is up to * the main card driver. * - deals with Hermes I devices. It can probably be modified to deal * with Hermes II devices * * Copyright (C) 2007, David Kilroy * * Plug data code slightly modified from spectrum_cs driver * Copyright (C) 2002-2005 Pavel Roskin <[email protected]> * Portions based on information in wl_lkm_718 Agere driver * COPYRIGHT (C) 2001-2004 by Agere Systems Inc. All Rights Reserved * * The contents of this file are subject to the Mozilla Public License * Version 1.1 (the "License"); you may not use this file except in * compliance with the License. You may obtain a copy of the License * at http://www.mozilla.org/MPL/ * * Software distributed under the License is distributed on an "AS IS" * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See * the License for the specific language governing rights and * limitations under the License. * * Alternatively, the contents of this file may be used under the * terms of the GNU General Public License version 2 (the "GPL"), in * which case the provisions of the GPL are applicable instead of the * above. If you wish to allow the use of your version of this file * only under the terms of the GPL and not to allow others to use your * version of this file under the MPL, indicate your decision by * deleting the provisions above and replace them with the notice and * other provisions required by the GPL. If you do not delete the * provisions above, a recipient may use your version of this file * under either the MPL or the GPL. */ #include <linux/module.h> #include <linux/delay.h> #include "hermes.h" #include "hermes_dld.h" #define PFX "hermes_dld: " /* End markers used in dblocks */ #define PDI_END 0x00000000 /* End of PDA */ #define BLOCK_END 0xFFFFFFFF /* Last image block */ #define TEXT_END 0x1A /* End of text header */ /* * The following structures have little-endian fields denoted by * the leading underscore. Don't access them directly - use inline * functions defined below. */ /* * The binary image to be downloaded consists of series of data blocks. * Each block has the following structure. */ struct dblock { __le32 addr; /* adapter address where to write the block */ __le16 len; /* length of the data only, in bytes */ char data[]; /* data to be written */ } __packed; /* * Plug Data References are located in the image after the last data * block. They refer to areas in the adapter memory where the plug data * items with matching ID should be written. */ struct pdr { __le32 id; /* record ID */ __le32 addr; /* adapter address where to write the data */ __le32 len; /* expected length of the data, in bytes */ char next[]; /* next PDR starts here */ } __packed; /* * Plug Data Items are located in the EEPROM read from the adapter by * primary firmware. They refer to the device-specific data that should * be plugged into the secondary firmware. */ struct pdi { __le16 len; /* length of ID and data, in words */ __le16 id; /* record ID */ char data[]; /* plug data */ } __packed; /*** FW data block access functions ***/ static inline u32 dblock_addr(const struct dblock *blk) { return le32_to_cpu(blk->addr); } static inline u32 dblock_len(const struct dblock *blk) { return le16_to_cpu(blk->len); } /*** PDR Access functions ***/ static inline u32 pdr_id(const struct pdr *pdr) { return le32_to_cpu(pdr->id); } static inline u32 pdr_addr(const struct pdr *pdr) { return le32_to_cpu(pdr->addr); } static inline u32 pdr_len(const struct pdr *pdr) { return le32_to_cpu(pdr->len); } /*** PDI Access functions ***/ static inline u32 pdi_id(const struct pdi *pdi) { return le16_to_cpu(pdi->id); } /* Return length of the data only, in bytes */ static inline u32 pdi_len(const struct pdi *pdi) { return 2 * (le16_to_cpu(pdi->len) - 1); } /*** Plug Data Functions ***/ /* * Scan PDR for the record with the specified RECORD_ID. * If it's not found, return NULL. */ static const struct pdr * hermes_find_pdr(const struct pdr *first_pdr, u32 record_id, const void *end) { const struct pdr *pdr = first_pdr; end -= sizeof(struct pdr); while (((void *) pdr <= end) && (pdr_id(pdr) != PDI_END)) { /* * PDR area is currently not terminated by PDI_END. * It's followed by CRC records, which have the type * field where PDR has length. The type can be 0 or 1. */ if (pdr_len(pdr) < 2) return NULL; /* If the record ID matches, we are done */ if (pdr_id(pdr) == record_id) return pdr; pdr = (struct pdr *) pdr->next; } return NULL; } /* Scan production data items for a particular entry */ static const struct pdi * hermes_find_pdi(const struct pdi *first_pdi, u32 record_id, const void *end) { const struct pdi *pdi = first_pdi; end -= sizeof(struct pdi); while (((void *) pdi <= end) && (pdi_id(pdi) != PDI_END)) { /* If the record ID matches, we are done */ if (pdi_id(pdi) == record_id) return pdi; pdi = (struct pdi *) &pdi->data[pdi_len(pdi)]; } return NULL; } /* Process one Plug Data Item - find corresponding PDR and plug it */ static int hermes_plug_pdi(struct hermes *hw, const struct pdr *first_pdr, const struct pdi *pdi, const void *pdr_end) { const struct pdr *pdr; /* Find the PDR corresponding to this PDI */ pdr = hermes_find_pdr(first_pdr, pdi_id(pdi), pdr_end); /* No match is found, safe to ignore */ if (!pdr) return 0; /* Lengths of the data in PDI and PDR must match */ if (pdi_len(pdi) != pdr_len(pdr)) return -EINVAL; /* do the actual plugging */ hw->ops->program(hw, pdi->data, pdr_addr(pdr), pdi_len(pdi)); return 0; } /* Parse PDA and write the records into the adapter * * Attempt to write every records that is in the specified pda * which also has a valid production data record for the firmware. */ int hermes_apply_pda(struct hermes *hw, const char *first_pdr, const void *pdr_end, const __le16 *pda, const void *pda_end) { int ret; const struct pdi *pdi; const struct pdr *pdr; pdr = (const struct pdr *) first_pdr; pda_end -= sizeof(struct pdi); /* Go through every PDI and plug them into the adapter */ pdi = (const struct pdi *) (pda + 2); while (((void *) pdi <= pda_end) && (pdi_id(pdi) != PDI_END)) { ret = hermes_plug_pdi(hw, pdr, pdi, pdr_end); if (ret) return ret; /* Increment to the next PDI */ pdi = (const struct pdi *) &pdi->data[pdi_len(pdi)]; } return 0; } /* Identify the total number of bytes in all blocks * including the header data. */ size_t hermes_blocks_length(const char *first_block, const void *end) { const struct dblock *blk = (const struct dblock *) first_block; int total_len = 0; int len; end -= sizeof(*blk); /* Skip all blocks to locate Plug Data References * (Spectrum CS) */ while (((void *) blk <= end) && (dblock_addr(blk) != BLOCK_END)) { len = dblock_len(blk); total_len += sizeof(*blk) + len; blk = (struct dblock *) &blk->data[len]; } return total_len; } /*** Hermes programming ***/ /* Program the data blocks */ int hermes_program(struct hermes *hw, const char *first_block, const void *end) { const struct dblock *blk; u32 blkaddr; u32 blklen; int err = 0; blk = (const struct dblock *) first_block; if ((void *) blk > (end - sizeof(*blk))) return -EIO; blkaddr = dblock_addr(blk); blklen = dblock_len(blk); while ((blkaddr != BLOCK_END) && (((void *) blk + blklen) <= end)) { pr_debug(PFX "Programming block of length %d " "to address 0x%08x\n", blklen, blkaddr); err = hw->ops->program(hw, blk->data, blkaddr, blklen); if (err) break; blk = (const struct dblock *) &blk->data[blklen]; if ((void *) blk > (end - sizeof(*blk))) return -EIO; blkaddr = dblock_addr(blk); blklen = dblock_len(blk); } return err; } /*** Default plugging data for Hermes I ***/ /* Values from wl_lkm_718/hcf/dhf.c */ #define DEFINE_DEFAULT_PDR(pid, length, data) \ static const struct { \ __le16 len; \ __le16 id; \ u8 val[length]; \ } __packed default_pdr_data_##pid = { \ cpu_to_le16((sizeof(default_pdr_data_##pid)/ \ sizeof(__le16)) - 1), \ cpu_to_le16(pid), \ data \ } #define DEFAULT_PDR(pid) default_pdr_data_##pid /* HWIF Compatibility */ DEFINE_DEFAULT_PDR(0x0005, 10, "\x00\x00\x06\x00\x01\x00\x01\x00\x01\x00"); /* PPPPSign */ DEFINE_DEFAULT_PDR(0x0108, 4, "\x00\x00\x00\x00"); /* PPPPProf */ DEFINE_DEFAULT_PDR(0x0109, 10, "\x00\x00\x00\x00\x03\x00\x00\x00\x00\x00"); /* Antenna diversity */ DEFINE_DEFAULT_PDR(0x0150, 2, "\x00\x3F"); /* Modem VCO band Set-up */ DEFINE_DEFAULT_PDR(0x0160, 28, "\x00\x00\x00\x00\x00\x00\x00\x00" "\x00\x00\x00\x00\x00\x00\x00\x00" "\x00\x00\x00\x00\x00\x00\x00\x00" "\x00\x00\x00\x00"); /* Modem Rx Gain Table Values */ DEFINE_DEFAULT_PDR(0x0161, 256, "\x3F\x01\x3F\01\x3F\x01\x3F\x01" "\x3F\x01\x3F\01\x3F\x01\x3F\x01" "\x3F\x01\x3F\01\x3F\x01\x3F\x01" "\x3F\x01\x3F\01\x3F\x01\x3F\x01" "\x3F\x01\x3E\01\x3E\x01\x3D\x01" "\x3D\x01\x3C\01\x3C\x01\x3B\x01" "\x3B\x01\x3A\01\x3A\x01\x39\x01" "\x39\x01\x38\01\x38\x01\x37\x01" "\x37\x01\x36\01\x36\x01\x35\x01" "\x35\x01\x34\01\x34\x01\x33\x01" "\x33\x01\x32\x01\x32\x01\x31\x01" "\x31\x01\x30\x01\x30\x01\x7B\x01" "\x7B\x01\x7A\x01\x7A\x01\x79\x01" "\x79\x01\x78\x01\x78\x01\x77\x01" "\x77\x01\x76\x01\x76\x01\x75\x01" "\x75\x01\x74\x01\x74\x01\x73\x01" "\x73\x01\x72\x01\x72\x01\x71\x01" "\x71\x01\x70\x01\x70\x01\x68\x01" "\x68\x01\x67\x01\x67\x01\x66\x01" "\x66\x01\x65\x01\x65\x01\x57\x01" "\x57\x01\x56\x01\x56\x01\x55\x01" "\x55\x01\x54\x01\x54\x01\x53\x01" "\x53\x01\x52\x01\x52\x01\x51\x01" "\x51\x01\x50\x01\x50\x01\x48\x01" "\x48\x01\x47\x01\x47\x01\x46\x01" "\x46\x01\x45\x01\x45\x01\x44\x01" "\x44\x01\x43\x01\x43\x01\x42\x01" "\x42\x01\x41\x01\x41\x01\x40\x01" "\x40\x01\x40\x01\x40\x01\x40\x01" "\x40\x01\x40\x01\x40\x01\x40\x01" "\x40\x01\x40\x01\x40\x01\x40\x01" "\x40\x01\x40\x01\x40\x01\x40\x01"); /* Write PDA according to certain rules. * * For every production data record, look for a previous setting in * the pda, and use that. * * For certain records, use defaults if they are not found in pda. */ int hermes_apply_pda_with_defaults(struct hermes *hw, const char *first_pdr, const void *pdr_end, const __le16 *pda, const void *pda_end) { const struct pdr *pdr = (const struct pdr *) first_pdr; const struct pdi *first_pdi = (const struct pdi *) &pda[2]; const struct pdi *pdi; const struct pdi *default_pdi = NULL; const struct pdi *outdoor_pdi; int record_id; pdr_end -= sizeof(struct pdr); while (((void *) pdr <= pdr_end) && (pdr_id(pdr) != PDI_END)) { /* * For spectrum_cs firmwares, * PDR area is currently not terminated by PDI_END. * It's followed by CRC records, which have the type * field where PDR has length. The type can be 0 or 1. */ if (pdr_len(pdr) < 2) break; record_id = pdr_id(pdr); pdi = hermes_find_pdi(first_pdi, record_id, pda_end); if (pdi) pr_debug(PFX "Found record 0x%04x at %p\n", record_id, pdi); switch (record_id) { case 0x110: /* Modem REFDAC values */ case 0x120: /* Modem VGDAC values */ outdoor_pdi = hermes_find_pdi(first_pdi, record_id + 1, pda_end); default_pdi = NULL; if (outdoor_pdi) { pdi = outdoor_pdi; pr_debug(PFX "Using outdoor record 0x%04x at %p\n", record_id + 1, pdi); } break; case 0x5: /* HWIF Compatibility */ default_pdi = (struct pdi *) &DEFAULT_PDR(0x0005); break; case 0x108: /* PPPPSign */ default_pdi = (struct pdi *) &DEFAULT_PDR(0x0108); break; case 0x109: /* PPPPProf */ default_pdi = (struct pdi *) &DEFAULT_PDR(0x0109); break; case 0x150: /* Antenna diversity */ default_pdi = (struct pdi *) &DEFAULT_PDR(0x0150); break; case 0x160: /* Modem VCO band Set-up */ default_pdi = (struct pdi *) &DEFAULT_PDR(0x0160); break; case 0x161: /* Modem Rx Gain Table Values */ default_pdi = (struct pdi *) &DEFAULT_PDR(0x0161); break; default: default_pdi = NULL; break; } if (!pdi && default_pdi) { /* Use default */ pdi = default_pdi; pr_debug(PFX "Using default record 0x%04x at %p\n", record_id, pdi); } if (pdi) { /* Lengths of the data in PDI and PDR must match */ if ((pdi_len(pdi) == pdr_len(pdr)) && ((void *) pdi->data + pdi_len(pdi) < pda_end)) { /* do the actual plugging */ hw->ops->program(hw, pdi->data, pdr_addr(pdr), pdi_len(pdi)); } } pdr++; } return 0; }
linux-master
drivers/net/wireless/intersil/orinoco/hermes_dld.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (C) 2008 Christian Lamparter <[email protected]> * Copyright 2008 Johannes Berg <[email protected]> * * This driver is a port from stlc45xx: * Copyright (C) 2008 Nokia Corporation and/or its subsidiary(-ies). */ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/firmware.h> #include <linux/delay.h> #include <linux/irq.h> #include <linux/spi/spi.h> #include <linux/etherdevice.h> #include <linux/gpio.h> #include <linux/slab.h> #include "p54spi.h" #include "p54.h" #include "lmac.h" #ifdef CONFIG_P54_SPI_DEFAULT_EEPROM #include "p54spi_eeprom.h" #endif /* CONFIG_P54_SPI_DEFAULT_EEPROM */ MODULE_FIRMWARE("3826.arm"); MODULE_FIRMWARE("3826.eeprom"); /* gpios should be handled in board files and provided via platform data, * but because it's currently impossible for p54spi to have a header file * in include/linux, let's use module paramaters for now */ static int p54spi_gpio_power = 97; module_param(p54spi_gpio_power, int, 0444); MODULE_PARM_DESC(p54spi_gpio_power, "gpio number for power line"); static int p54spi_gpio_irq = 87; module_param(p54spi_gpio_irq, int, 0444); MODULE_PARM_DESC(p54spi_gpio_irq, "gpio number for irq line"); static void p54spi_spi_read(struct p54s_priv *priv, u8 address, void *buf, size_t len) { struct spi_transfer t[2]; struct spi_message m; __le16 addr; /* We first push the address */ addr = cpu_to_le16(address << 8 | SPI_ADRS_READ_BIT_15); spi_message_init(&m); memset(t, 0, sizeof(t)); t[0].tx_buf = &addr; t[0].len = sizeof(addr); spi_message_add_tail(&t[0], &m); t[1].rx_buf = buf; t[1].len = len; spi_message_add_tail(&t[1], &m); spi_sync(priv->spi, &m); } static void p54spi_spi_write(struct p54s_priv *priv, u8 address, const void *buf, size_t len) { struct spi_transfer t[3]; struct spi_message m; __le16 addr; /* We first push the address */ addr = cpu_to_le16(address << 8); spi_message_init(&m); memset(t, 0, sizeof(t)); t[0].tx_buf = &addr; t[0].len = sizeof(addr); spi_message_add_tail(&t[0], &m); t[1].tx_buf = buf; t[1].len = len & ~1; spi_message_add_tail(&t[1], &m); if (len % 2) { __le16 last_word; last_word = cpu_to_le16(((u8 *)buf)[len - 1]); t[2].tx_buf = &last_word; t[2].len = sizeof(last_word); spi_message_add_tail(&t[2], &m); } spi_sync(priv->spi, &m); } static u32 p54spi_read32(struct p54s_priv *priv, u8 addr) { __le32 val; p54spi_spi_read(priv, addr, &val, sizeof(val)); return le32_to_cpu(val); } static inline void p54spi_write16(struct p54s_priv *priv, u8 addr, __le16 val) { p54spi_spi_write(priv, addr, &val, sizeof(val)); } static inline void p54spi_write32(struct p54s_priv *priv, u8 addr, __le32 val) { p54spi_spi_write(priv, addr, &val, sizeof(val)); } static int p54spi_wait_bit(struct p54s_priv *priv, u16 reg, u32 bits) { int i; for (i = 0; i < 2000; i++) { u32 buffer = p54spi_read32(priv, reg); if ((buffer & bits) == bits) return 1; } return 0; } static int p54spi_spi_write_dma(struct p54s_priv *priv, __le32 base, const void *buf, size_t len) { if (!p54spi_wait_bit(priv, SPI_ADRS_DMA_WRITE_CTRL, HOST_ALLOWED)) { dev_err(&priv->spi->dev, "spi_write_dma not allowed " "to DMA write.\n"); return -EAGAIN; } p54spi_write16(priv, SPI_ADRS_DMA_WRITE_CTRL, cpu_to_le16(SPI_DMA_WRITE_CTRL_ENABLE)); p54spi_write16(priv, SPI_ADRS_DMA_WRITE_LEN, cpu_to_le16(len)); p54spi_write32(priv, SPI_ADRS_DMA_WRITE_BASE, base); p54spi_spi_write(priv, SPI_ADRS_DMA_DATA, buf, len); return 0; } static int p54spi_request_firmware(struct ieee80211_hw *dev) { struct p54s_priv *priv = dev->priv; int ret; /* FIXME: should driver use it's own struct device? */ ret = request_firmware(&priv->firmware, "3826.arm", &priv->spi->dev); if (ret < 0) { dev_err(&priv->spi->dev, "request_firmware() failed: %d", ret); return ret; } ret = p54_parse_firmware(dev, priv->firmware); if (ret) { /* the firmware is released by the caller */ return ret; } return 0; } static int p54spi_request_eeprom(struct ieee80211_hw *dev) { struct p54s_priv *priv = dev->priv; const struct firmware *eeprom; int ret; /* allow users to customize their eeprom. */ ret = request_firmware_direct(&eeprom, "3826.eeprom", &priv->spi->dev); if (ret < 0) { #ifdef CONFIG_P54_SPI_DEFAULT_EEPROM dev_info(&priv->spi->dev, "loading default eeprom...\n"); ret = p54_parse_eeprom(dev, (void *) p54spi_eeprom, sizeof(p54spi_eeprom)); #else dev_err(&priv->spi->dev, "Failed to request user eeprom\n"); #endif /* CONFIG_P54_SPI_DEFAULT_EEPROM */ } else { dev_info(&priv->spi->dev, "loading user eeprom...\n"); ret = p54_parse_eeprom(dev, (void *) eeprom->data, (int)eeprom->size); release_firmware(eeprom); } return ret; } static int p54spi_upload_firmware(struct ieee80211_hw *dev) { struct p54s_priv *priv = dev->priv; unsigned long fw_len, _fw_len; unsigned int offset = 0; int err = 0; u8 *fw; fw_len = priv->firmware->size; fw = kmemdup(priv->firmware->data, fw_len, GFP_KERNEL); if (!fw) return -ENOMEM; /* stop the device */ p54spi_write16(priv, SPI_ADRS_DEV_CTRL_STAT, cpu_to_le16( SPI_CTRL_STAT_HOST_OVERRIDE | SPI_CTRL_STAT_HOST_RESET | SPI_CTRL_STAT_START_HALTED)); msleep(TARGET_BOOT_SLEEP); p54spi_write16(priv, SPI_ADRS_DEV_CTRL_STAT, cpu_to_le16( SPI_CTRL_STAT_HOST_OVERRIDE | SPI_CTRL_STAT_START_HALTED)); msleep(TARGET_BOOT_SLEEP); while (fw_len > 0) { _fw_len = min_t(long, fw_len, SPI_MAX_PACKET_SIZE); err = p54spi_spi_write_dma(priv, cpu_to_le32( ISL38XX_DEV_FIRMWARE_ADDR + offset), (fw + offset), _fw_len); if (err < 0) goto out; fw_len -= _fw_len; offset += _fw_len; } BUG_ON(fw_len != 0); /* enable host interrupts */ p54spi_write32(priv, SPI_ADRS_HOST_INT_EN, cpu_to_le32(SPI_HOST_INTS_DEFAULT)); /* boot the device */ p54spi_write16(priv, SPI_ADRS_DEV_CTRL_STAT, cpu_to_le16( SPI_CTRL_STAT_HOST_OVERRIDE | SPI_CTRL_STAT_HOST_RESET | SPI_CTRL_STAT_RAM_BOOT)); msleep(TARGET_BOOT_SLEEP); p54spi_write16(priv, SPI_ADRS_DEV_CTRL_STAT, cpu_to_le16( SPI_CTRL_STAT_HOST_OVERRIDE | SPI_CTRL_STAT_RAM_BOOT)); msleep(TARGET_BOOT_SLEEP); out: kfree(fw); return err; } static void p54spi_power_off(struct p54s_priv *priv) { disable_irq(gpio_to_irq(p54spi_gpio_irq)); gpio_set_value(p54spi_gpio_power, 0); } static void p54spi_power_on(struct p54s_priv *priv) { gpio_set_value(p54spi_gpio_power, 1); enable_irq(gpio_to_irq(p54spi_gpio_irq)); /* need to wait a while before device can be accessed, the length * is just a guess */ msleep(10); } static inline void p54spi_int_ack(struct p54s_priv *priv, u32 val) { p54spi_write32(priv, SPI_ADRS_HOST_INT_ACK, cpu_to_le32(val)); } static int p54spi_wakeup(struct p54s_priv *priv) { /* wake the chip */ p54spi_write32(priv, SPI_ADRS_ARM_INTERRUPTS, cpu_to_le32(SPI_TARGET_INT_WAKEUP)); /* And wait for the READY interrupt */ if (!p54spi_wait_bit(priv, SPI_ADRS_HOST_INTERRUPTS, SPI_HOST_INT_READY)) { dev_err(&priv->spi->dev, "INT_READY timeout\n"); return -EBUSY; } p54spi_int_ack(priv, SPI_HOST_INT_READY); return 0; } static inline void p54spi_sleep(struct p54s_priv *priv) { p54spi_write32(priv, SPI_ADRS_ARM_INTERRUPTS, cpu_to_le32(SPI_TARGET_INT_SLEEP)); } static void p54spi_int_ready(struct p54s_priv *priv) { p54spi_write32(priv, SPI_ADRS_HOST_INT_EN, cpu_to_le32( SPI_HOST_INT_UPDATE | SPI_HOST_INT_SW_UPDATE)); switch (priv->fw_state) { case FW_STATE_BOOTING: priv->fw_state = FW_STATE_READY; complete(&priv->fw_comp); break; case FW_STATE_RESETTING: priv->fw_state = FW_STATE_READY; /* TODO: reinitialize state */ break; default: break; } } static int p54spi_rx(struct p54s_priv *priv) { struct sk_buff *skb; u16 len; u16 rx_head[2]; #define READAHEAD_SZ (sizeof(rx_head)-sizeof(u16)) if (p54spi_wakeup(priv) < 0) return -EBUSY; /* Read data size and first data word in one SPI transaction * This is workaround for firmware/DMA bug, * when first data word gets lost under high load. */ p54spi_spi_read(priv, SPI_ADRS_DMA_DATA, rx_head, sizeof(rx_head)); len = rx_head[0]; if (len == 0) { p54spi_sleep(priv); dev_err(&priv->spi->dev, "rx request of zero bytes\n"); return 0; } /* Firmware may insert up to 4 padding bytes after the lmac header, * but it does not amend the size of SPI data transfer. * Such packets has correct data size in header, thus referencing * past the end of allocated skb. Reserve extra 4 bytes for this case */ skb = dev_alloc_skb(len + 4); if (!skb) { p54spi_sleep(priv); dev_err(&priv->spi->dev, "could not alloc skb"); return -ENOMEM; } if (len <= READAHEAD_SZ) { skb_put_data(skb, rx_head + 1, len); } else { skb_put_data(skb, rx_head + 1, READAHEAD_SZ); p54spi_spi_read(priv, SPI_ADRS_DMA_DATA, skb_put(skb, len - READAHEAD_SZ), len - READAHEAD_SZ); } p54spi_sleep(priv); /* Put additional bytes to compensate for the possible * alignment-caused truncation */ skb_put(skb, 4); if (p54_rx(priv->hw, skb) == 0) dev_kfree_skb(skb); return 0; } static irqreturn_t p54spi_interrupt(int irq, void *config) { struct spi_device *spi = config; struct p54s_priv *priv = spi_get_drvdata(spi); ieee80211_queue_work(priv->hw, &priv->work); return IRQ_HANDLED; } static int p54spi_tx_frame(struct p54s_priv *priv, struct sk_buff *skb) { struct p54_hdr *hdr = (struct p54_hdr *) skb->data; int ret = 0; if (p54spi_wakeup(priv) < 0) return -EBUSY; ret = p54spi_spi_write_dma(priv, hdr->req_id, skb->data, skb->len); if (ret < 0) goto out; if (!p54spi_wait_bit(priv, SPI_ADRS_HOST_INTERRUPTS, SPI_HOST_INT_WR_READY)) { dev_err(&priv->spi->dev, "WR_READY timeout\n"); ret = -EAGAIN; goto out; } p54spi_int_ack(priv, SPI_HOST_INT_WR_READY); if (FREE_AFTER_TX(skb)) p54_free_skb(priv->hw, skb); out: p54spi_sleep(priv); return ret; } static int p54spi_wq_tx(struct p54s_priv *priv) { struct p54s_tx_info *entry; struct sk_buff *skb; struct ieee80211_tx_info *info; struct p54_tx_info *minfo; struct p54s_tx_info *dinfo; unsigned long flags; int ret = 0; spin_lock_irqsave(&priv->tx_lock, flags); while (!list_empty(&priv->tx_pending)) { entry = list_entry(priv->tx_pending.next, struct p54s_tx_info, tx_list); list_del_init(&entry->tx_list); spin_unlock_irqrestore(&priv->tx_lock, flags); dinfo = container_of((void *) entry, struct p54s_tx_info, tx_list); minfo = container_of((void *) dinfo, struct p54_tx_info, data); info = container_of((void *) minfo, struct ieee80211_tx_info, rate_driver_data); skb = container_of((void *) info, struct sk_buff, cb); ret = p54spi_tx_frame(priv, skb); if (ret < 0) { p54_free_skb(priv->hw, skb); return ret; } spin_lock_irqsave(&priv->tx_lock, flags); } spin_unlock_irqrestore(&priv->tx_lock, flags); return ret; } static void p54spi_op_tx(struct ieee80211_hw *dev, struct sk_buff *skb) { struct p54s_priv *priv = dev->priv; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct p54_tx_info *mi = (struct p54_tx_info *) info->rate_driver_data; struct p54s_tx_info *di = (struct p54s_tx_info *) mi->data; unsigned long flags; BUILD_BUG_ON(sizeof(*di) > sizeof((mi->data))); spin_lock_irqsave(&priv->tx_lock, flags); list_add_tail(&di->tx_list, &priv->tx_pending); spin_unlock_irqrestore(&priv->tx_lock, flags); ieee80211_queue_work(priv->hw, &priv->work); } static void p54spi_work(struct work_struct *work) { struct p54s_priv *priv = container_of(work, struct p54s_priv, work); u32 ints; int ret; mutex_lock(&priv->mutex); if (priv->fw_state == FW_STATE_OFF) goto out; ints = p54spi_read32(priv, SPI_ADRS_HOST_INTERRUPTS); if (ints & SPI_HOST_INT_READY) { p54spi_int_ready(priv); p54spi_int_ack(priv, SPI_HOST_INT_READY); } if (priv->fw_state != FW_STATE_READY) goto out; if (ints & SPI_HOST_INT_UPDATE) { p54spi_int_ack(priv, SPI_HOST_INT_UPDATE); ret = p54spi_rx(priv); if (ret < 0) goto out; } if (ints & SPI_HOST_INT_SW_UPDATE) { p54spi_int_ack(priv, SPI_HOST_INT_SW_UPDATE); ret = p54spi_rx(priv); if (ret < 0) goto out; } ret = p54spi_wq_tx(priv); out: mutex_unlock(&priv->mutex); } static int p54spi_op_start(struct ieee80211_hw *dev) { struct p54s_priv *priv = dev->priv; unsigned long timeout; int ret = 0; if (mutex_lock_interruptible(&priv->mutex)) { ret = -EINTR; goto out; } priv->fw_state = FW_STATE_BOOTING; p54spi_power_on(priv); ret = p54spi_upload_firmware(dev); if (ret < 0) { p54spi_power_off(priv); goto out_unlock; } mutex_unlock(&priv->mutex); timeout = msecs_to_jiffies(2000); timeout = wait_for_completion_interruptible_timeout(&priv->fw_comp, timeout); if (!timeout) { dev_err(&priv->spi->dev, "firmware boot failed"); p54spi_power_off(priv); ret = -1; goto out; } if (mutex_lock_interruptible(&priv->mutex)) { ret = -EINTR; p54spi_power_off(priv); goto out; } WARN_ON(priv->fw_state != FW_STATE_READY); out_unlock: mutex_unlock(&priv->mutex); out: return ret; } static void p54spi_op_stop(struct ieee80211_hw *dev) { struct p54s_priv *priv = dev->priv; unsigned long flags; mutex_lock(&priv->mutex); WARN_ON(priv->fw_state != FW_STATE_READY); p54spi_power_off(priv); spin_lock_irqsave(&priv->tx_lock, flags); INIT_LIST_HEAD(&priv->tx_pending); spin_unlock_irqrestore(&priv->tx_lock, flags); priv->fw_state = FW_STATE_OFF; mutex_unlock(&priv->mutex); cancel_work_sync(&priv->work); } static int p54spi_probe(struct spi_device *spi) { struct p54s_priv *priv = NULL; struct ieee80211_hw *hw; int ret = -EINVAL; hw = p54_init_common(sizeof(*priv)); if (!hw) { dev_err(&spi->dev, "could not alloc ieee80211_hw"); return -ENOMEM; } priv = hw->priv; priv->hw = hw; spi_set_drvdata(spi, priv); priv->spi = spi; spi->bits_per_word = 16; spi->max_speed_hz = 24000000; ret = spi_setup(spi); if (ret < 0) { dev_err(&priv->spi->dev, "spi_setup failed"); goto err_free; } ret = gpio_request(p54spi_gpio_power, "p54spi power"); if (ret < 0) { dev_err(&priv->spi->dev, "power GPIO request failed: %d", ret); goto err_free; } ret = gpio_request(p54spi_gpio_irq, "p54spi irq"); if (ret < 0) { dev_err(&priv->spi->dev, "irq GPIO request failed: %d", ret); goto err_free_gpio_power; } gpio_direction_output(p54spi_gpio_power, 0); gpio_direction_input(p54spi_gpio_irq); ret = request_irq(gpio_to_irq(p54spi_gpio_irq), p54spi_interrupt, 0, "p54spi", priv->spi); if (ret < 0) { dev_err(&priv->spi->dev, "request_irq() failed"); goto err_free_gpio_irq; } irq_set_irq_type(gpio_to_irq(p54spi_gpio_irq), IRQ_TYPE_EDGE_RISING); disable_irq(gpio_to_irq(p54spi_gpio_irq)); INIT_WORK(&priv->work, p54spi_work); init_completion(&priv->fw_comp); INIT_LIST_HEAD(&priv->tx_pending); mutex_init(&priv->mutex); spin_lock_init(&priv->tx_lock); SET_IEEE80211_DEV(hw, &spi->dev); priv->common.open = p54spi_op_start; priv->common.stop = p54spi_op_stop; priv->common.tx = p54spi_op_tx; ret = p54spi_request_firmware(hw); if (ret < 0) goto err_free_common; ret = p54spi_request_eeprom(hw); if (ret) goto err_free_common; ret = p54_register_common(hw, &priv->spi->dev); if (ret) goto err_free_common; return 0; err_free_common: release_firmware(priv->firmware); free_irq(gpio_to_irq(p54spi_gpio_irq), spi); err_free_gpio_irq: gpio_free(p54spi_gpio_irq); err_free_gpio_power: gpio_free(p54spi_gpio_power); err_free: p54_free_common(priv->hw); return ret; } static void p54spi_remove(struct spi_device *spi) { struct p54s_priv *priv = spi_get_drvdata(spi); p54_unregister_common(priv->hw); free_irq(gpio_to_irq(p54spi_gpio_irq), spi); gpio_free(p54spi_gpio_power); gpio_free(p54spi_gpio_irq); release_firmware(priv->firmware); mutex_destroy(&priv->mutex); p54_free_common(priv->hw); } static struct spi_driver p54spi_driver = { .driver = { .name = "p54spi", }, .probe = p54spi_probe, .remove = p54spi_remove, }; module_spi_driver(p54spi_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Christian Lamparter <[email protected]>"); MODULE_ALIAS("spi:cx3110x"); MODULE_ALIAS("spi:p54spi"); MODULE_ALIAS("spi:stlc45xx");
linux-master
drivers/net/wireless/intersil/p54/p54spi.c
// SPDX-License-Identifier: GPL-2.0-only /* * Common code for mac80211 Prism54 drivers * * Copyright (c) 2006, Michael Wu <[email protected]> * Copyright (c) 2007-2009, Christian Lamparter <[email protected]> * Copyright 2008, Johannes Berg <[email protected]> * * Based on: * - the islsm (softmac prism54) driver, which is: * Copyright 2004-2006 Jean-Baptiste Note <[email protected]>, et al. * - stlc45xx driver * Copyright (C) 2008 Nokia Corporation and/or its subsidiary(-ies). */ #include <linux/firmware.h> #include <linux/etherdevice.h> #include <net/mac80211.h> #ifdef CONFIG_P54_LEDS #include <linux/leds.h> #endif /* CONFIG_P54_LEDS */ #include "p54.h" #include "lmac.h" static void p54_update_leds(struct work_struct *work) { struct p54_common *priv = container_of(work, struct p54_common, led_work.work); int err, i, tmp, blink_delay = 400; bool rerun = false; /* Don't toggle the LED, when the device is down. */ if (priv->mode == NL80211_IFTYPE_UNSPECIFIED) return ; for (i = 0; i < ARRAY_SIZE(priv->leds); i++) if (priv->leds[i].toggled) { priv->softled_state |= BIT(i); tmp = 70 + 200 / (priv->leds[i].toggled); if (tmp < blink_delay) blink_delay = tmp; if (priv->leds[i].led_dev.brightness == LED_OFF) rerun = true; priv->leds[i].toggled = !!priv->leds[i].led_dev.brightness; } else priv->softled_state &= ~BIT(i); err = p54_set_leds(priv); if (err && net_ratelimit()) wiphy_err(priv->hw->wiphy, "failed to update LEDs (%d).\n", err); if (rerun) ieee80211_queue_delayed_work(priv->hw, &priv->led_work, msecs_to_jiffies(blink_delay)); } static void p54_led_brightness_set(struct led_classdev *led_dev, enum led_brightness brightness) { struct p54_led_dev *led = container_of(led_dev, struct p54_led_dev, led_dev); struct ieee80211_hw *dev = led->hw_dev; struct p54_common *priv = dev->priv; if (priv->mode == NL80211_IFTYPE_UNSPECIFIED) return ; if ((brightness) && (led->registered)) { led->toggled++; ieee80211_queue_delayed_work(priv->hw, &priv->led_work, HZ/10); } } static int p54_register_led(struct p54_common *priv, unsigned int led_index, char *name, const char *trigger) { struct p54_led_dev *led = &priv->leds[led_index]; int err; if (led->registered) return -EEXIST; snprintf(led->name, sizeof(led->name), "p54-%s::%s", wiphy_name(priv->hw->wiphy), name); led->hw_dev = priv->hw; led->index = led_index; led->led_dev.name = led->name; led->led_dev.default_trigger = trigger; led->led_dev.brightness_set = p54_led_brightness_set; err = led_classdev_register(wiphy_dev(priv->hw->wiphy), &led->led_dev); if (err) wiphy_err(priv->hw->wiphy, "Failed to register %s LED.\n", name); else led->registered = 1; return err; } int p54_init_leds(struct p54_common *priv) { int err; /* * TODO: * Figure out if the EEPROM contains some hints about the number * of available/programmable LEDs of the device. */ INIT_DELAYED_WORK(&priv->led_work, p54_update_leds); err = p54_register_led(priv, 0, "assoc", ieee80211_get_assoc_led_name(priv->hw)); if (err) return err; err = p54_register_led(priv, 1, "tx", ieee80211_get_tx_led_name(priv->hw)); if (err) return err; err = p54_register_led(priv, 2, "rx", ieee80211_get_rx_led_name(priv->hw)); if (err) return err; err = p54_register_led(priv, 3, "radio", ieee80211_get_radio_led_name(priv->hw)); if (err) return err; err = p54_set_leds(priv); return err; } void p54_unregister_leds(struct p54_common *priv) { int i; for (i = 0; i < ARRAY_SIZE(priv->leds); i++) { if (priv->leds[i].registered) { priv->leds[i].registered = false; priv->leds[i].toggled = 0; led_classdev_unregister(&priv->leds[i].led_dev); } } cancel_delayed_work_sync(&priv->led_work); }
linux-master
drivers/net/wireless/intersil/p54/led.c
// SPDX-License-Identifier: GPL-2.0-only /* * Linux device driver for USB based Prism54 * * Copyright (c) 2006, Michael Wu <[email protected]> * * Based on the islsm (softmac prism54) driver, which is: * Copyright 2004-2006 Jean-Baptiste Note <[email protected]>, et al. */ #include <linux/usb.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/firmware.h> #include <linux/etherdevice.h> #include <linux/delay.h> #include <linux/crc32.h> #include <linux/module.h> #include <net/mac80211.h> #include "p54.h" #include "lmac.h" #include "p54usb.h" MODULE_AUTHOR("Michael Wu <[email protected]>"); MODULE_DESCRIPTION("Prism54 USB wireless driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("prism54usb"); MODULE_FIRMWARE("isl3886usb"); MODULE_FIRMWARE("isl3887usb"); static struct usb_driver p54u_driver; /* * Note: * * Always update our wiki's device list (located at: * http://wireless.wiki.kernel.org/en/users/Drivers/p54/devices ), * whenever you add a new device. */ static const struct usb_device_id p54u_table[] = { /* Version 1 devices (pci chip + net2280) */ {USB_DEVICE(0x0411, 0x0050)}, /* Buffalo WLI2-USB2-G54 */ {USB_DEVICE(0x045e, 0x00c2)}, /* Microsoft MN-710 */ {USB_DEVICE(0x0506, 0x0a11)}, /* 3COM 3CRWE254G72 */ {USB_DEVICE(0x0675, 0x0530)}, /* DrayTek Vigor 530 */ {USB_DEVICE(0x06b9, 0x0120)}, /* Thomson SpeedTouch 120g */ {USB_DEVICE(0x0707, 0xee06)}, /* SMC 2862W-G */ {USB_DEVICE(0x07aa, 0x001c)}, /* Corega CG-WLUSB2GT */ {USB_DEVICE(0x083a, 0x4501)}, /* Accton 802.11g WN4501 USB */ {USB_DEVICE(0x083a, 0x4502)}, /* Siemens Gigaset USB Adapter */ {USB_DEVICE(0x083a, 0x5501)}, /* Phillips CPWUA054 */ {USB_DEVICE(0x0846, 0x4200)}, /* Netgear WG121 */ {USB_DEVICE(0x0846, 0x4210)}, /* Netgear WG121 the second ? */ {USB_DEVICE(0x0846, 0x4220)}, /* Netgear WG111 */ {USB_DEVICE(0x09aa, 0x1000)}, /* Spinnaker Proto board */ {USB_DEVICE(0x0bf8, 0x1007)}, /* Fujitsu E-5400 USB */ {USB_DEVICE(0x0cde, 0x0006)}, /* Medion 40900, Roper Europe */ {USB_DEVICE(0x0db0, 0x6826)}, /* MSI UB54G (MS-6826) */ {USB_DEVICE(0x107b, 0x55f2)}, /* Gateway WGU-210 (Gemtek) */ {USB_DEVICE(0x124a, 0x4023)}, /* Shuttle PN15, Airvast WM168g, IOGear GWU513 */ {USB_DEVICE(0x124a, 0x4026)}, /* AirVasT USB wireless device */ {USB_DEVICE(0x1435, 0x0210)}, /* Inventel UR054G */ {USB_DEVICE(0x15a9, 0x0002)}, /* Gemtek WUBI-100GW 802.11g */ {USB_DEVICE(0x1630, 0x0005)}, /* 2Wire 802.11g USB (v1) / Z-Com */ {USB_DEVICE(0x182d, 0x096b)}, /* Sitecom WL-107 */ {USB_DEVICE(0x1915, 0x2234)}, /* Linksys WUSB54G OEM */ {USB_DEVICE(0x1915, 0x2235)}, /* Linksys WUSB54G Portable OEM */ {USB_DEVICE(0x2001, 0x3701)}, /* DLink DWL-G120 Spinnaker */ {USB_DEVICE(0x2001, 0x3703)}, /* DLink DWL-G122 */ {USB_DEVICE(0x2001, 0x3762)}, /* Conceptronic C54U */ {USB_DEVICE(0x5041, 0x2234)}, /* Linksys WUSB54G */ {USB_DEVICE(0x5041, 0x2235)}, /* Linksys WUSB54G Portable */ /* Version 2 devices (3887) */ {USB_DEVICE(0x0471, 0x1230)}, /* Philips CPWUA054/00 */ {USB_DEVICE(0x050d, 0x7050)}, /* Belkin F5D7050 ver 1000 */ {USB_DEVICE(0x0572, 0x2000)}, /* Cohiba Proto board */ {USB_DEVICE(0x0572, 0x2002)}, /* Cohiba Proto board */ {USB_DEVICE(0x06a9, 0x000e)}, /* Westell 802.11g USB (A90-211WG-01) */ {USB_DEVICE(0x06b9, 0x0121)}, /* Thomson SpeedTouch 121g */ {USB_DEVICE(0x0707, 0xee13)}, /* SMC 2862W-G version 2 */ {USB_DEVICE(0x07aa, 0x0020)}, /* Corega WLUSB2GTST USB */ {USB_DEVICE(0x0803, 0x4310)}, /* Zoom 4410a */ {USB_DEVICE(0x083a, 0x4521)}, /* Siemens Gigaset USB Adapter 54 version 2 */ {USB_DEVICE(0x083a, 0x4531)}, /* T-Com Sinus 154 data II */ {USB_DEVICE(0x083a, 0xc501)}, /* Zoom Wireless-G 4410 */ {USB_DEVICE(0x083a, 0xf503)}, /* Accton FD7050E ver 1010ec */ {USB_DEVICE(0x0846, 0x4240)}, /* Netgear WG111 (v2) */ {USB_DEVICE(0x0915, 0x2000)}, /* Cohiba Proto board */ {USB_DEVICE(0x0915, 0x2002)}, /* Cohiba Proto board */ {USB_DEVICE(0x0baf, 0x0118)}, /* U.S. Robotics U5 802.11g Adapter*/ {USB_DEVICE(0x0bf8, 0x1009)}, /* FUJITSU E-5400 USB D1700*/ /* {USB_DEVICE(0x0cde, 0x0006)}, * Medion MD40900 already listed above, * just noting it here for clarity */ {USB_DEVICE(0x0cde, 0x0008)}, /* Sagem XG703A */ {USB_DEVICE(0x0cde, 0x0015)}, /* Zcomax XG-705A */ {USB_DEVICE(0x0d8e, 0x3762)}, /* DLink DWL-G120 Cohiba */ {USB_DEVICE(0x124a, 0x4025)}, /* IOGear GWU513 (GW3887IK chip) */ {USB_DEVICE(0x1260, 0xee22)}, /* SMC 2862W-G version 2 */ {USB_DEVICE(0x13b1, 0x000a)}, /* Linksys WUSB54G ver 2 */ {USB_DEVICE(0x13B1, 0x000C)}, /* Linksys WUSB54AG */ {USB_DEVICE(0x1413, 0x5400)}, /* Telsey 802.11g USB2.0 Adapter */ {USB_DEVICE(0x1435, 0x0427)}, /* Inventel UR054G */ /* {USB_DEVICE(0x15a9, 0x0002)}, * Also SparkLAN WL-682 with 3887 */ {USB_DEVICE(0x1668, 0x1050)}, /* Actiontec 802UIG-1 */ {USB_DEVICE(0x1740, 0x1000)}, /* Senao NUB-350 */ {USB_DEVICE(0x2001, 0x3704)}, /* DLink DWL-G122 rev A2 */ {USB_DEVICE(0x2001, 0x3705)}, /* D-Link DWL-G120 rev C1 */ {USB_DEVICE(0x413c, 0x5513)}, /* Dell WLA3310 USB Wireless Adapter */ {USB_DEVICE(0x413c, 0x8102)}, /* Spinnaker DUT */ {USB_DEVICE(0x413c, 0x8104)}, /* Cohiba Proto board */ {} }; MODULE_DEVICE_TABLE(usb, p54u_table); static const struct { u32 intf; enum p54u_hw_type type; const char *fw; char hw[20]; } p54u_fwlist[__NUM_P54U_HWTYPES] = { { .type = P54U_NET2280, .intf = FW_LM86, .fw = "isl3886usb", .hw = "ISL3886 + net2280", }, { .type = P54U_3887, .intf = FW_LM87, .fw = "isl3887usb", .hw = "ISL3887", }, }; static void p54u_rx_cb(struct urb *urb) { struct sk_buff *skb = (struct sk_buff *) urb->context; struct p54u_rx_info *info = (struct p54u_rx_info *)skb->cb; struct ieee80211_hw *dev = info->dev; struct p54u_priv *priv = dev->priv; skb_unlink(skb, &priv->rx_queue); if (unlikely(urb->status)) { dev_kfree_skb_irq(skb); return; } skb_put(skb, urb->actual_length); if (priv->hw_type == P54U_NET2280) skb_pull(skb, priv->common.tx_hdr_len); if (priv->common.fw_interface == FW_LM87) { skb_pull(skb, 4); skb_put(skb, 4); } if (p54_rx(dev, skb)) { skb = dev_alloc_skb(priv->common.rx_mtu + 32); if (unlikely(!skb)) { /* TODO check rx queue length and refill *somewhere* */ return; } info = (struct p54u_rx_info *) skb->cb; info->urb = urb; info->dev = dev; urb->transfer_buffer = skb_tail_pointer(skb); urb->context = skb; } else { if (priv->hw_type == P54U_NET2280) skb_push(skb, priv->common.tx_hdr_len); if (priv->common.fw_interface == FW_LM87) { skb_push(skb, 4); skb_put(skb, 4); } skb_reset_tail_pointer(skb); skb_trim(skb, 0); urb->transfer_buffer = skb_tail_pointer(skb); } skb_queue_tail(&priv->rx_queue, skb); usb_anchor_urb(urb, &priv->submitted); if (usb_submit_urb(urb, GFP_ATOMIC)) { skb_unlink(skb, &priv->rx_queue); usb_unanchor_urb(urb); dev_kfree_skb_irq(skb); } } static void p54u_tx_cb(struct urb *urb) { struct sk_buff *skb = urb->context; struct ieee80211_hw *dev = usb_get_intfdata(usb_ifnum_to_if(urb->dev, 0)); p54_free_skb(dev, skb); } static void p54u_tx_dummy_cb(struct urb *urb) { } static void p54u_free_urbs(struct ieee80211_hw *dev) { struct p54u_priv *priv = dev->priv; usb_kill_anchored_urbs(&priv->submitted); } static void p54u_stop(struct ieee80211_hw *dev) { /* * TODO: figure out how to reliably stop the 3887 and net2280 so * the hardware is still usable next time we want to start it. * until then, we just stop listening to the hardware.. */ p54u_free_urbs(dev); } static int p54u_init_urbs(struct ieee80211_hw *dev) { struct p54u_priv *priv = dev->priv; struct urb *entry = NULL; struct sk_buff *skb; struct p54u_rx_info *info; int ret = 0; while (skb_queue_len(&priv->rx_queue) < 32) { skb = __dev_alloc_skb(priv->common.rx_mtu + 32, GFP_KERNEL); if (!skb) { ret = -ENOMEM; goto err; } entry = usb_alloc_urb(0, GFP_KERNEL); if (!entry) { ret = -ENOMEM; goto err; } usb_fill_bulk_urb(entry, priv->udev, usb_rcvbulkpipe(priv->udev, P54U_PIPE_DATA), skb_tail_pointer(skb), priv->common.rx_mtu + 32, p54u_rx_cb, skb); info = (struct p54u_rx_info *) skb->cb; info->urb = entry; info->dev = dev; skb_queue_tail(&priv->rx_queue, skb); usb_anchor_urb(entry, &priv->submitted); ret = usb_submit_urb(entry, GFP_KERNEL); if (ret) { skb_unlink(skb, &priv->rx_queue); usb_unanchor_urb(entry); goto err; } usb_free_urb(entry); entry = NULL; } return 0; err: usb_free_urb(entry); kfree_skb(skb); p54u_free_urbs(dev); return ret; } static int p54u_open(struct ieee80211_hw *dev) { /* * TODO: Because we don't know how to reliably stop the 3887 and * the isl3886+net2280, other than brutally cut off all * communications. We have to reinitialize the urbs on every start. */ return p54u_init_urbs(dev); } static __le32 p54u_lm87_chksum(const __le32 *data, size_t length) { u32 chk = 0; length >>= 2; while (length--) { chk ^= le32_to_cpu(*data++); chk = (chk >> 5) ^ (chk << 3); } return cpu_to_le32(chk); } static void p54u_tx_lm87(struct ieee80211_hw *dev, struct sk_buff *skb) { struct p54u_priv *priv = dev->priv; struct urb *data_urb; struct lm87_tx_hdr *hdr = (void *)skb->data - sizeof(*hdr); data_urb = usb_alloc_urb(0, GFP_ATOMIC); if (!data_urb) { p54_free_skb(dev, skb); return; } hdr->chksum = p54u_lm87_chksum((__le32 *)skb->data, skb->len); hdr->device_addr = ((struct p54_hdr *)skb->data)->req_id; usb_fill_bulk_urb(data_urb, priv->udev, usb_sndbulkpipe(priv->udev, P54U_PIPE_DATA), hdr, skb->len + sizeof(*hdr), FREE_AFTER_TX(skb) ? p54u_tx_cb : p54u_tx_dummy_cb, skb); data_urb->transfer_flags |= URB_ZERO_PACKET; usb_anchor_urb(data_urb, &priv->submitted); if (usb_submit_urb(data_urb, GFP_ATOMIC)) { usb_unanchor_urb(data_urb); p54_free_skb(dev, skb); } usb_free_urb(data_urb); } static void p54u_tx_net2280(struct ieee80211_hw *dev, struct sk_buff *skb) { struct p54u_priv *priv = dev->priv; struct urb *int_urb = NULL, *data_urb = NULL; struct net2280_tx_hdr *hdr = (void *)skb->data - sizeof(*hdr); struct net2280_reg_write *reg = NULL; int err = -ENOMEM; reg = kmalloc(sizeof(*reg), GFP_ATOMIC); if (!reg) goto out; int_urb = usb_alloc_urb(0, GFP_ATOMIC); if (!int_urb) goto out; data_urb = usb_alloc_urb(0, GFP_ATOMIC); if (!data_urb) goto out; reg->port = cpu_to_le16(NET2280_DEV_U32); reg->addr = cpu_to_le32(P54U_DEV_BASE); reg->val = cpu_to_le32(ISL38XX_DEV_INT_DATA); memset(hdr, 0, sizeof(*hdr)); hdr->len = cpu_to_le16(skb->len); hdr->device_addr = ((struct p54_hdr *) skb->data)->req_id; usb_fill_bulk_urb(int_urb, priv->udev, usb_sndbulkpipe(priv->udev, P54U_PIPE_DEV), reg, sizeof(*reg), p54u_tx_dummy_cb, dev); /* * URB_FREE_BUFFER triggers a code path in the USB subsystem that will * free what is inside the transfer_buffer after the last reference to * the int_urb is dropped. */ int_urb->transfer_flags |= URB_FREE_BUFFER | URB_ZERO_PACKET; reg = NULL; usb_fill_bulk_urb(data_urb, priv->udev, usb_sndbulkpipe(priv->udev, P54U_PIPE_DATA), hdr, skb->len + sizeof(*hdr), FREE_AFTER_TX(skb) ? p54u_tx_cb : p54u_tx_dummy_cb, skb); data_urb->transfer_flags |= URB_ZERO_PACKET; usb_anchor_urb(int_urb, &priv->submitted); err = usb_submit_urb(int_urb, GFP_ATOMIC); if (err) { usb_unanchor_urb(int_urb); goto out; } usb_anchor_urb(data_urb, &priv->submitted); err = usb_submit_urb(data_urb, GFP_ATOMIC); if (err) { usb_unanchor_urb(data_urb); goto out; } out: usb_free_urb(int_urb); usb_free_urb(data_urb); if (err) { kfree(reg); p54_free_skb(dev, skb); } } static int p54u_write(struct p54u_priv *priv, struct net2280_reg_write *buf, enum net2280_op_type type, __le32 addr, __le32 val) { unsigned int ep; int alen; if (type & 0x0800) ep = usb_sndbulkpipe(priv->udev, P54U_PIPE_DEV); else ep = usb_sndbulkpipe(priv->udev, P54U_PIPE_BRG); buf->port = cpu_to_le16(type); buf->addr = addr; buf->val = val; return usb_bulk_msg(priv->udev, ep, buf, sizeof(*buf), &alen, 1000); } static int p54u_read(struct p54u_priv *priv, void *buf, enum net2280_op_type type, __le32 addr, __le32 *val) { struct net2280_reg_read *read = buf; __le32 *reg = buf; unsigned int ep; int alen, err; if (type & 0x0800) ep = P54U_PIPE_DEV; else ep = P54U_PIPE_BRG; read->port = cpu_to_le16(type); read->addr = addr; err = usb_bulk_msg(priv->udev, usb_sndbulkpipe(priv->udev, ep), read, sizeof(*read), &alen, 1000); if (err) return err; err = usb_bulk_msg(priv->udev, usb_rcvbulkpipe(priv->udev, ep), reg, sizeof(*reg), &alen, 1000); if (err) return err; *val = *reg; return 0; } static int p54u_bulk_msg(struct p54u_priv *priv, unsigned int ep, void *data, size_t len) { int alen; return usb_bulk_msg(priv->udev, usb_sndbulkpipe(priv->udev, ep), data, len, &alen, 2000); } static int p54u_device_reset(struct ieee80211_hw *dev) { struct p54u_priv *priv = dev->priv; int ret, lock = (priv->intf->condition != USB_INTERFACE_BINDING); if (lock) { ret = usb_lock_device_for_reset(priv->udev, priv->intf); if (ret < 0) { dev_err(&priv->udev->dev, "(p54usb) unable to lock " "device for reset (%d)!\n", ret); return ret; } } ret = usb_reset_device(priv->udev); if (lock) usb_unlock_device(priv->udev); if (ret) dev_err(&priv->udev->dev, "(p54usb) unable to reset " "device (%d)!\n", ret); return ret; } static const char p54u_romboot_3887[] = "~~~~"; static int p54u_firmware_reset_3887(struct ieee80211_hw *dev) { struct p54u_priv *priv = dev->priv; u8 *buf; int ret; buf = kmemdup(p54u_romboot_3887, 4, GFP_KERNEL); if (!buf) return -ENOMEM; ret = p54u_bulk_msg(priv, P54U_PIPE_DATA, buf, 4); kfree(buf); if (ret) dev_err(&priv->udev->dev, "(p54usb) unable to jump to " "boot ROM (%d)!\n", ret); return ret; } static const char p54u_firmware_upload_3887[] = "<\r"; static int p54u_upload_firmware_3887(struct ieee80211_hw *dev) { struct p54u_priv *priv = dev->priv; int err, alen; u8 carry = 0; u8 *buf, *tmp; const u8 *data; unsigned int left, remains, block_size; struct x2_header *hdr; unsigned long timeout; err = p54u_firmware_reset_3887(dev); if (err) return err; tmp = buf = kmalloc(P54U_FW_BLOCK, GFP_KERNEL); if (!buf) return -ENOMEM; left = block_size = min_t(size_t, P54U_FW_BLOCK, priv->fw->size); strcpy(buf, p54u_firmware_upload_3887); left -= strlen(p54u_firmware_upload_3887); tmp += strlen(p54u_firmware_upload_3887); data = priv->fw->data; remains = priv->fw->size; hdr = (struct x2_header *)(buf + strlen(p54u_firmware_upload_3887)); memcpy(hdr->signature, X2_SIGNATURE, X2_SIGNATURE_SIZE); hdr->fw_load_addr = cpu_to_le32(ISL38XX_DEV_FIRMWARE_ADDR); hdr->fw_length = cpu_to_le32(priv->fw->size); hdr->crc = cpu_to_le32(~crc32_le(~0, (void *)&hdr->fw_load_addr, sizeof(u32)*2)); left -= sizeof(*hdr); tmp += sizeof(*hdr); while (remains) { while (left--) { if (carry) { *tmp++ = carry; carry = 0; remains--; continue; } switch (*data) { case '~': *tmp++ = '}'; carry = '^'; break; case '}': *tmp++ = '}'; carry = ']'; break; default: *tmp++ = *data; remains--; break; } data++; } err = p54u_bulk_msg(priv, P54U_PIPE_DATA, buf, block_size); if (err) { dev_err(&priv->udev->dev, "(p54usb) firmware " "upload failed!\n"); goto err_upload_failed; } tmp = buf; left = block_size = min((unsigned int)P54U_FW_BLOCK, remains); } *((__le32 *)buf) = cpu_to_le32(~crc32_le(~0, priv->fw->data, priv->fw->size)); err = p54u_bulk_msg(priv, P54U_PIPE_DATA, buf, sizeof(u32)); if (err) { dev_err(&priv->udev->dev, "(p54usb) firmware upload failed!\n"); goto err_upload_failed; } timeout = jiffies + msecs_to_jiffies(1000); while (!(err = usb_bulk_msg(priv->udev, usb_rcvbulkpipe(priv->udev, P54U_PIPE_DATA), buf, 128, &alen, 1000))) { if (alen > 2 && !memcmp(buf, "OK", 2)) break; if (alen > 5 && !memcmp(buf, "ERROR", 5)) { err = -EINVAL; break; } if (time_after(jiffies, timeout)) { dev_err(&priv->udev->dev, "(p54usb) firmware boot " "timed out!\n"); err = -ETIMEDOUT; break; } } if (err) { dev_err(&priv->udev->dev, "(p54usb) firmware upload failed!\n"); goto err_upload_failed; } buf[0] = 'g'; buf[1] = '\r'; err = p54u_bulk_msg(priv, P54U_PIPE_DATA, buf, 2); if (err) { dev_err(&priv->udev->dev, "(p54usb) firmware boot failed!\n"); goto err_upload_failed; } timeout = jiffies + msecs_to_jiffies(1000); while (!(err = usb_bulk_msg(priv->udev, usb_rcvbulkpipe(priv->udev, P54U_PIPE_DATA), buf, 128, &alen, 1000))) { if (alen > 0 && buf[0] == 'g') break; if (time_after(jiffies, timeout)) { err = -ETIMEDOUT; break; } } if (err) goto err_upload_failed; err_upload_failed: kfree(buf); return err; } static int p54u_upload_firmware_net2280(struct ieee80211_hw *dev) { struct p54u_priv *priv = dev->priv; const struct p54p_csr *devreg = (const struct p54p_csr *) P54U_DEV_BASE; int err, alen; void *buf; __le32 reg; unsigned int remains, offset; const u8 *data; buf = kmalloc(512, GFP_KERNEL); if (!buf) return -ENOMEM; #define P54U_WRITE(type, addr, data) \ do {\ err = p54u_write(priv, buf, type,\ cpu_to_le32((u32)(unsigned long)addr), data);\ if (err) \ goto fail;\ } while (0) #define P54U_READ(type, addr) \ do {\ err = p54u_read(priv, buf, type,\ cpu_to_le32((u32)(unsigned long)addr), &reg);\ if (err)\ goto fail;\ } while (0) /* power down net2280 bridge */ P54U_READ(NET2280_BRG_U32, NET2280_GPIOCTL); reg |= cpu_to_le32(P54U_BRG_POWER_DOWN); reg &= cpu_to_le32(~P54U_BRG_POWER_UP); P54U_WRITE(NET2280_BRG_U32, NET2280_GPIOCTL, reg); mdelay(100); /* power up bridge */ reg |= cpu_to_le32(P54U_BRG_POWER_UP); reg &= cpu_to_le32(~P54U_BRG_POWER_DOWN); P54U_WRITE(NET2280_BRG_U32, NET2280_GPIOCTL, reg); mdelay(100); P54U_WRITE(NET2280_BRG_U32, NET2280_DEVINIT, cpu_to_le32(NET2280_CLK_30Mhz | NET2280_PCI_ENABLE | NET2280_PCI_SOFT_RESET)); mdelay(20); P54U_WRITE(NET2280_BRG_CFG_U16, PCI_COMMAND, cpu_to_le32(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER)); P54U_WRITE(NET2280_BRG_CFG_U32, PCI_BASE_ADDRESS_0, cpu_to_le32(NET2280_BASE)); P54U_READ(NET2280_BRG_CFG_U16, PCI_STATUS); reg |= cpu_to_le32(PCI_STATUS_REC_MASTER_ABORT); P54U_WRITE(NET2280_BRG_CFG_U16, PCI_STATUS, reg); // TODO: we really need this? P54U_READ(NET2280_BRG_U32, NET2280_RELNUM); P54U_WRITE(NET2280_BRG_U32, NET2280_EPA_RSP, cpu_to_le32(NET2280_CLEAR_NAK_OUT_PACKETS_MODE)); P54U_WRITE(NET2280_BRG_U32, NET2280_EPC_RSP, cpu_to_le32(NET2280_CLEAR_NAK_OUT_PACKETS_MODE)); P54U_WRITE(NET2280_BRG_CFG_U32, PCI_BASE_ADDRESS_2, cpu_to_le32(NET2280_BASE2)); /* finally done setting up the bridge */ P54U_WRITE(NET2280_DEV_CFG_U16, 0x10000 | PCI_COMMAND, cpu_to_le32(PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER)); P54U_WRITE(NET2280_DEV_CFG_U16, 0x10000 | 0x40 /* TRDY timeout */, 0); P54U_WRITE(NET2280_DEV_CFG_U32, 0x10000 | PCI_BASE_ADDRESS_0, cpu_to_le32(P54U_DEV_BASE)); P54U_WRITE(NET2280_BRG_U32, NET2280_USBIRQENB1, 0); P54U_WRITE(NET2280_BRG_U32, NET2280_IRQSTAT1, cpu_to_le32(NET2280_PCI_INTA_INTERRUPT)); /* do romboot */ P54U_WRITE(NET2280_DEV_U32, &devreg->int_enable, 0); P54U_READ(NET2280_DEV_U32, &devreg->ctrl_stat); reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET); reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RAMBOOT); reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_CLKRUN); P54U_WRITE(NET2280_DEV_U32, &devreg->ctrl_stat, reg); mdelay(20); reg |= cpu_to_le32(ISL38XX_CTRL_STAT_RESET); P54U_WRITE(NET2280_DEV_U32, &devreg->ctrl_stat, reg); mdelay(20); reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET); P54U_WRITE(NET2280_DEV_U32, &devreg->ctrl_stat, reg); mdelay(100); P54U_READ(NET2280_DEV_U32, &devreg->int_ident); P54U_WRITE(NET2280_DEV_U32, &devreg->int_ack, reg); /* finally, we can upload firmware now! */ remains = priv->fw->size; data = priv->fw->data; offset = ISL38XX_DEV_FIRMWARE_ADDR; while (remains) { unsigned int block_len = min(remains, (unsigned int)512); memcpy(buf, data, block_len); err = p54u_bulk_msg(priv, P54U_PIPE_DATA, buf, block_len); if (err) { dev_err(&priv->udev->dev, "(p54usb) firmware block " "upload failed\n"); goto fail; } P54U_WRITE(NET2280_DEV_U32, &devreg->direct_mem_base, cpu_to_le32(0xc0000f00)); P54U_WRITE(NET2280_DEV_U32, 0x0020 | (unsigned long)&devreg->direct_mem_win, 0); P54U_WRITE(NET2280_DEV_U32, 0x0020 | (unsigned long)&devreg->direct_mem_win, cpu_to_le32(1)); P54U_WRITE(NET2280_DEV_U32, 0x0024 | (unsigned long)&devreg->direct_mem_win, cpu_to_le32(block_len)); P54U_WRITE(NET2280_DEV_U32, 0x0028 | (unsigned long)&devreg->direct_mem_win, cpu_to_le32(offset)); P54U_WRITE(NET2280_DEV_U32, &devreg->dma_addr, cpu_to_le32(NET2280_EPA_FIFO_PCI_ADDR)); P54U_WRITE(NET2280_DEV_U32, &devreg->dma_len, cpu_to_le32(block_len >> 2)); P54U_WRITE(NET2280_DEV_U32, &devreg->dma_ctrl, cpu_to_le32(ISL38XX_DMA_MASTER_CONTROL_TRIGGER)); mdelay(10); P54U_READ(NET2280_DEV_U32, 0x002C | (unsigned long)&devreg->direct_mem_win); if (!(reg & cpu_to_le32(ISL38XX_DMA_STATUS_DONE)) || !(reg & cpu_to_le32(ISL38XX_DMA_STATUS_READY))) { dev_err(&priv->udev->dev, "(p54usb) firmware DMA " "transfer failed\n"); goto fail; } P54U_WRITE(NET2280_BRG_U32, NET2280_EPA_STAT, cpu_to_le32(NET2280_FIFO_FLUSH)); remains -= block_len; data += block_len; offset += block_len; } /* do ramboot */ P54U_READ(NET2280_DEV_U32, &devreg->ctrl_stat); reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET); reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_CLKRUN); reg |= cpu_to_le32(ISL38XX_CTRL_STAT_RAMBOOT); P54U_WRITE(NET2280_DEV_U32, &devreg->ctrl_stat, reg); mdelay(20); reg |= cpu_to_le32(ISL38XX_CTRL_STAT_RESET); P54U_WRITE(NET2280_DEV_U32, &devreg->ctrl_stat, reg); reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET); P54U_WRITE(NET2280_DEV_U32, &devreg->ctrl_stat, reg); mdelay(100); P54U_READ(NET2280_DEV_U32, &devreg->int_ident); P54U_WRITE(NET2280_DEV_U32, &devreg->int_ack, reg); /* start up the firmware */ P54U_WRITE(NET2280_DEV_U32, &devreg->int_enable, cpu_to_le32(ISL38XX_INT_IDENT_INIT)); P54U_WRITE(NET2280_BRG_U32, NET2280_IRQSTAT1, cpu_to_le32(NET2280_PCI_INTA_INTERRUPT)); P54U_WRITE(NET2280_BRG_U32, NET2280_USBIRQENB1, cpu_to_le32(NET2280_PCI_INTA_INTERRUPT_ENABLE | NET2280_USB_INTERRUPT_ENABLE)); P54U_WRITE(NET2280_DEV_U32, &devreg->dev_int, cpu_to_le32(ISL38XX_DEV_INT_RESET)); err = usb_interrupt_msg(priv->udev, usb_rcvbulkpipe(priv->udev, P54U_PIPE_INT), buf, sizeof(__le32), &alen, 1000); if (err || alen != sizeof(__le32)) goto fail; P54U_READ(NET2280_DEV_U32, &devreg->int_ident); P54U_WRITE(NET2280_DEV_U32, &devreg->int_ack, reg); if (!(reg & cpu_to_le32(ISL38XX_INT_IDENT_INIT))) err = -EINVAL; P54U_WRITE(NET2280_BRG_U32, NET2280_USBIRQENB1, 0); P54U_WRITE(NET2280_BRG_U32, NET2280_IRQSTAT1, cpu_to_le32(NET2280_PCI_INTA_INTERRUPT)); #undef P54U_WRITE #undef P54U_READ fail: kfree(buf); return err; } static int p54_find_type(struct p54u_priv *priv) { int i; for (i = 0; i < __NUM_P54U_HWTYPES; i++) if (p54u_fwlist[i].type == priv->hw_type) break; if (i == __NUM_P54U_HWTYPES) return -EOPNOTSUPP; return i; } static int p54u_start_ops(struct p54u_priv *priv) { struct ieee80211_hw *dev = priv->common.hw; int ret; ret = p54_parse_firmware(dev, priv->fw); if (ret) goto err_out; ret = p54_find_type(priv); if (ret < 0) goto err_out; if (priv->common.fw_interface != p54u_fwlist[ret].intf) { dev_err(&priv->udev->dev, "wrong firmware, please get " "a firmware for \"%s\" and try again.\n", p54u_fwlist[ret].hw); ret = -ENODEV; goto err_out; } ret = priv->upload_fw(dev); if (ret) goto err_out; ret = p54u_open(dev); if (ret) goto err_out; ret = p54_read_eeprom(dev); if (ret) goto err_stop; p54u_stop(dev); ret = p54_register_common(dev, &priv->udev->dev); if (ret) goto err_stop; return 0; err_stop: p54u_stop(dev); err_out: /* * p54u_disconnect will do the rest of the * cleanup */ return ret; } static void p54u_load_firmware_cb(const struct firmware *firmware, void *context) { struct p54u_priv *priv = context; struct usb_device *udev = priv->udev; struct usb_interface *intf = priv->intf; int err; if (firmware) { priv->fw = firmware; err = p54u_start_ops(priv); } else { err = -ENOENT; dev_err(&udev->dev, "Firmware not found.\n"); } complete(&priv->fw_wait_load); /* * At this point p54u_disconnect may have already freed * the "priv" context. Do not use it anymore! */ priv = NULL; if (err) { dev_err(&intf->dev, "failed to initialize device (%d)\n", err); usb_lock_device(udev); usb_driver_release_interface(&p54u_driver, intf); usb_unlock_device(udev); } usb_put_intf(intf); } static int p54u_load_firmware(struct ieee80211_hw *dev, struct usb_interface *intf) { struct usb_device *udev = interface_to_usbdev(intf); struct p54u_priv *priv = dev->priv; struct device *device = &udev->dev; int err, i; BUILD_BUG_ON(ARRAY_SIZE(p54u_fwlist) != __NUM_P54U_HWTYPES); init_completion(&priv->fw_wait_load); i = p54_find_type(priv); if (i < 0) return i; dev_info(&priv->udev->dev, "Loading firmware file %s\n", p54u_fwlist[i].fw); usb_get_intf(intf); err = request_firmware_nowait(THIS_MODULE, 1, p54u_fwlist[i].fw, device, GFP_KERNEL, priv, p54u_load_firmware_cb); if (err) { dev_err(&priv->udev->dev, "(p54usb) cannot load firmware %s " "(%d)!\n", p54u_fwlist[i].fw, err); usb_put_intf(intf); } return err; } static int p54u_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev(intf); struct ieee80211_hw *dev; struct p54u_priv *priv; int err; unsigned int i, recognized_pipes; dev = p54_init_common(sizeof(*priv)); if (!dev) { dev_err(&udev->dev, "(p54usb) ieee80211 alloc failed\n"); return -ENOMEM; } priv = dev->priv; priv->hw_type = P54U_INVALID_HW; SET_IEEE80211_DEV(dev, &intf->dev); usb_set_intfdata(intf, dev); priv->udev = udev; priv->intf = intf; skb_queue_head_init(&priv->rx_queue); init_usb_anchor(&priv->submitted); /* really lazy and simple way of figuring out if we're a 3887 */ /* TODO: should just stick the identification in the device table */ i = intf->altsetting->desc.bNumEndpoints; recognized_pipes = 0; while (i--) { switch (intf->altsetting->endpoint[i].desc.bEndpointAddress) { case P54U_PIPE_DATA: case P54U_PIPE_MGMT: case P54U_PIPE_BRG: case P54U_PIPE_DEV: case P54U_PIPE_DATA | USB_DIR_IN: case P54U_PIPE_MGMT | USB_DIR_IN: case P54U_PIPE_BRG | USB_DIR_IN: case P54U_PIPE_DEV | USB_DIR_IN: case P54U_PIPE_INT | USB_DIR_IN: recognized_pipes++; } } priv->common.open = p54u_open; priv->common.stop = p54u_stop; if (recognized_pipes < P54U_PIPE_NUMBER) { #ifdef CONFIG_PM /* ISL3887 needs a full reset on resume */ udev->reset_resume = 1; #endif /* CONFIG_PM */ err = p54u_device_reset(dev); priv->hw_type = P54U_3887; dev->extra_tx_headroom += sizeof(struct lm87_tx_hdr); priv->common.tx_hdr_len = sizeof(struct lm87_tx_hdr); priv->common.tx = p54u_tx_lm87; priv->upload_fw = p54u_upload_firmware_3887; } else { priv->hw_type = P54U_NET2280; dev->extra_tx_headroom += sizeof(struct net2280_tx_hdr); priv->common.tx_hdr_len = sizeof(struct net2280_tx_hdr); priv->common.tx = p54u_tx_net2280; priv->upload_fw = p54u_upload_firmware_net2280; } err = p54u_load_firmware(dev, intf); if (err) p54_free_common(dev); return err; } static void p54u_disconnect(struct usb_interface *intf) { struct ieee80211_hw *dev = usb_get_intfdata(intf); struct p54u_priv *priv; if (!dev) return; priv = dev->priv; wait_for_completion(&priv->fw_wait_load); p54_unregister_common(dev); release_firmware(priv->fw); p54_free_common(dev); } static int p54u_pre_reset(struct usb_interface *intf) { struct ieee80211_hw *dev = usb_get_intfdata(intf); if (!dev) return -ENODEV; p54u_stop(dev); return 0; } static int p54u_resume(struct usb_interface *intf) { struct ieee80211_hw *dev = usb_get_intfdata(intf); struct p54u_priv *priv; if (!dev) return -ENODEV; priv = dev->priv; if (unlikely(!(priv->upload_fw && priv->fw))) return 0; return priv->upload_fw(dev); } static int p54u_post_reset(struct usb_interface *intf) { struct ieee80211_hw *dev = usb_get_intfdata(intf); struct p54u_priv *priv; int err; err = p54u_resume(intf); if (err) return err; /* reinitialize old device state */ priv = dev->priv; if (priv->common.mode != NL80211_IFTYPE_UNSPECIFIED) ieee80211_restart_hw(dev); return 0; } #ifdef CONFIG_PM static int p54u_suspend(struct usb_interface *intf, pm_message_t message) { return p54u_pre_reset(intf); } #endif /* CONFIG_PM */ static struct usb_driver p54u_driver = { .name = "p54usb", .id_table = p54u_table, .probe = p54u_probe, .disconnect = p54u_disconnect, .pre_reset = p54u_pre_reset, .post_reset = p54u_post_reset, #ifdef CONFIG_PM .suspend = p54u_suspend, .resume = p54u_resume, .reset_resume = p54u_resume, #endif /* CONFIG_PM */ .soft_unbind = 1, .disable_hub_initiated_lpm = 1, }; module_usb_driver(p54u_driver);
linux-master
drivers/net/wireless/intersil/p54/p54usb.c
// SPDX-License-Identifier: GPL-2.0-only /* * Firmware I/O code for mac80211 Prism54 drivers * * Copyright (c) 2006, Michael Wu <[email protected]> * Copyright (c) 2007-2009, Christian Lamparter <[email protected]> * Copyright 2008, Johannes Berg <[email protected]> * * Based on: * - the islsm (softmac prism54) driver, which is: * Copyright 2004-2006 Jean-Baptiste Note <[email protected]>, et al. * - stlc45xx driver * Copyright (C) 2008 Nokia Corporation and/or its subsidiary(-ies). */ #include <linux/slab.h> #include <linux/firmware.h> #include <linux/etherdevice.h> #include <linux/export.h> #include <net/mac80211.h> #include "p54.h" #include "eeprom.h" #include "lmac.h" int p54_parse_firmware(struct ieee80211_hw *dev, const struct firmware *fw) { struct p54_common *priv = dev->priv; struct exp_if *exp_if; struct bootrec *bootrec; u32 *data = (u32 *)fw->data; u32 *end_data = (u32 *)fw->data + (fw->size >> 2); u8 *fw_version = NULL; size_t len; int i; int maxlen; if (priv->rx_start) return 0; while (data < end_data && *data) data++; while (data < end_data && !*data) data++; bootrec = (struct bootrec *) data; while (bootrec->data <= end_data && (bootrec->data + (len = le32_to_cpu(bootrec->len))) <= end_data) { u32 code = le32_to_cpu(bootrec->code); switch (code) { case BR_CODE_COMPONENT_ID: priv->fw_interface = be32_to_cpup((__be32 *) bootrec->data); switch (priv->fw_interface) { case FW_LM86: case FW_LM20: case FW_LM87: { char *iftype = (char *)bootrec->data; wiphy_info(priv->hw->wiphy, "p54 detected a LM%c%c firmware\n", iftype[2], iftype[3]); break; } case FW_FMAC: default: wiphy_err(priv->hw->wiphy, "unsupported firmware\n"); return -ENODEV; } break; case BR_CODE_COMPONENT_VERSION: /* 24 bytes should be enough for all firmwares */ if (strnlen((unsigned char *) bootrec->data, 24) < 24) fw_version = (unsigned char *) bootrec->data; break; case BR_CODE_DESCR: { struct bootrec_desc *desc = (struct bootrec_desc *)bootrec->data; priv->rx_start = le32_to_cpu(desc->rx_start); /* FIXME add sanity checking */ priv->rx_end = le32_to_cpu(desc->rx_end) - 0x3500; priv->headroom = desc->headroom; priv->tailroom = desc->tailroom; priv->privacy_caps = desc->privacy_caps; priv->rx_keycache_size = desc->rx_keycache_size; if (le32_to_cpu(bootrec->len) == 11) priv->rx_mtu = le16_to_cpu(desc->rx_mtu); else priv->rx_mtu = (size_t) 0x620 - priv->tx_hdr_len; maxlen = priv->tx_hdr_len + /* USB devices */ sizeof(struct p54_rx_data) + 4 + /* rx alignment */ IEEE80211_MAX_FRAG_THRESHOLD; if (priv->rx_mtu > maxlen && PAGE_SIZE == 4096) { printk(KERN_INFO "p54: rx_mtu reduced from %d " "to %d\n", priv->rx_mtu, maxlen); priv->rx_mtu = maxlen; } break; } case BR_CODE_EXPOSED_IF: exp_if = (struct exp_if *) bootrec->data; for (i = 0; i < (len * sizeof(*exp_if) / 4); i++) if (exp_if[i].if_id == cpu_to_le16(IF_ID_LMAC)) priv->fw_var = le16_to_cpu(exp_if[i].variant); break; case BR_CODE_DEPENDENT_IF: break; case BR_CODE_END_OF_BRA: case LEGACY_BR_CODE_END_OF_BRA: end_data = NULL; break; default: break; } bootrec = (struct bootrec *)&bootrec->data[len]; } if (fw_version) { wiphy_info(priv->hw->wiphy, "FW rev %s - Softmac protocol %x.%x\n", fw_version, priv->fw_var >> 8, priv->fw_var & 0xff); snprintf(dev->wiphy->fw_version, sizeof(dev->wiphy->fw_version), "%s - %x.%x", fw_version, priv->fw_var >> 8, priv->fw_var & 0xff); } if (priv->fw_var < 0x500) wiphy_info(priv->hw->wiphy, "you are using an obsolete firmware. " "visit http://wireless.wiki.kernel.org/en/users/Drivers/p54 " "and grab one for \"kernel >= 2.6.28\"!\n"); if (priv->fw_var >= 0x300) { /* Firmware supports QoS, use it! */ if (priv->fw_var >= 0x500) { priv->tx_stats[P54_QUEUE_AC_VO].limit = 16; priv->tx_stats[P54_QUEUE_AC_VI].limit = 16; priv->tx_stats[P54_QUEUE_AC_BE].limit = 16; priv->tx_stats[P54_QUEUE_AC_BK].limit = 16; } else { priv->tx_stats[P54_QUEUE_AC_VO].limit = 3; priv->tx_stats[P54_QUEUE_AC_VI].limit = 4; priv->tx_stats[P54_QUEUE_AC_BE].limit = 3; priv->tx_stats[P54_QUEUE_AC_BK].limit = 2; } priv->hw->queues = P54_QUEUE_AC_NUM; } wiphy_info(priv->hw->wiphy, "cryptographic accelerator WEP:%s, TKIP:%s, CCMP:%s\n", (priv->privacy_caps & BR_DESC_PRIV_CAP_WEP) ? "YES" : "no", (priv->privacy_caps & (BR_DESC_PRIV_CAP_TKIP | BR_DESC_PRIV_CAP_MICHAEL)) ? "YES" : "no", (priv->privacy_caps & BR_DESC_PRIV_CAP_AESCCMP) ? "YES" : "no"); if (priv->rx_keycache_size) { /* * NOTE: * * The firmware provides at most 255 (0 - 254) slots * for keys which are then used to offload decryption. * As a result the 255 entry (aka 0xff) can be used * safely by the driver to mark keys that didn't fit * into the full cache. This trick saves us from * keeping a extra list for uploaded keys. */ priv->used_rxkeys = bitmap_zalloc(priv->rx_keycache_size, GFP_KERNEL); if (!priv->used_rxkeys) return -ENOMEM; } return 0; } EXPORT_SYMBOL_GPL(p54_parse_firmware); static struct sk_buff *p54_alloc_skb(struct p54_common *priv, u16 hdr_flags, u16 payload_len, u16 type, gfp_t memflags) { struct p54_hdr *hdr; struct sk_buff *skb; size_t frame_len = sizeof(*hdr) + payload_len; if (frame_len > P54_MAX_CTRL_FRAME_LEN) return NULL; if (unlikely(skb_queue_len(&priv->tx_pending) > 64)) return NULL; skb = __dev_alloc_skb(priv->tx_hdr_len + frame_len, memflags); if (!skb) return NULL; skb_reserve(skb, priv->tx_hdr_len); hdr = skb_put(skb, sizeof(*hdr)); hdr->flags = cpu_to_le16(hdr_flags); hdr->len = cpu_to_le16(payload_len); hdr->type = cpu_to_le16(type); hdr->tries = hdr->rts_tries = 0; return skb; } int p54_download_eeprom(struct p54_common *priv, void *buf, u16 offset, u16 len) { struct p54_eeprom_lm86 *eeprom_hdr; struct sk_buff *skb; size_t eeprom_hdr_size; int ret = 0; long timeout; if (priv->fw_var >= 0x509) eeprom_hdr_size = sizeof(*eeprom_hdr); else eeprom_hdr_size = 0x4; skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL, eeprom_hdr_size + len, P54_CONTROL_TYPE_EEPROM_READBACK, GFP_KERNEL); if (unlikely(!skb)) return -ENOMEM; mutex_lock(&priv->eeprom_mutex); priv->eeprom = buf; eeprom_hdr = skb_put(skb, eeprom_hdr_size + len); if (priv->fw_var < 0x509) { eeprom_hdr->v1.offset = cpu_to_le16(offset); eeprom_hdr->v1.len = cpu_to_le16(len); } else { eeprom_hdr->v2.offset = cpu_to_le32(offset); eeprom_hdr->v2.len = cpu_to_le16(len); eeprom_hdr->v2.magic2 = 0xf; memcpy(eeprom_hdr->v2.magic, (const char *)"LOCK", 4); } p54_tx(priv, skb); timeout = wait_for_completion_interruptible_timeout( &priv->eeprom_comp, HZ); if (timeout <= 0) { wiphy_err(priv->hw->wiphy, "device does not respond or signal received!\n"); ret = -EBUSY; } priv->eeprom = NULL; mutex_unlock(&priv->eeprom_mutex); return ret; } int p54_update_beacon_tim(struct p54_common *priv, u16 aid, bool set) { struct sk_buff *skb; struct p54_tim *tim; skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*tim), P54_CONTROL_TYPE_TIM, GFP_ATOMIC); if (unlikely(!skb)) return -ENOMEM; tim = skb_put(skb, sizeof(*tim)); tim->count = 1; tim->entry[0] = cpu_to_le16(set ? (aid | 0x8000) : aid); p54_tx(priv, skb); return 0; } int p54_sta_unlock(struct p54_common *priv, u8 *addr) { struct sk_buff *skb; struct p54_sta_unlock *sta; skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*sta), P54_CONTROL_TYPE_PSM_STA_UNLOCK, GFP_ATOMIC); if (unlikely(!skb)) return -ENOMEM; sta = skb_put(skb, sizeof(*sta)); memcpy(sta->addr, addr, ETH_ALEN); p54_tx(priv, skb); return 0; } int p54_tx_cancel(struct p54_common *priv, __le32 req_id) { struct sk_buff *skb; struct p54_txcancel *cancel; u32 _req_id = le32_to_cpu(req_id); if (unlikely(_req_id < priv->rx_start || _req_id > priv->rx_end)) return -EINVAL; skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*cancel), P54_CONTROL_TYPE_TXCANCEL, GFP_ATOMIC); if (unlikely(!skb)) return -ENOMEM; cancel = skb_put(skb, sizeof(*cancel)); cancel->req_id = req_id; p54_tx(priv, skb); return 0; } int p54_setup_mac(struct p54_common *priv) { struct sk_buff *skb; struct p54_setup_mac *setup; u16 mode; skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*setup), P54_CONTROL_TYPE_SETUP, GFP_ATOMIC); if (!skb) return -ENOMEM; setup = skb_put(skb, sizeof(*setup)); if (!(priv->hw->conf.flags & IEEE80211_CONF_IDLE)) { switch (priv->mode) { case NL80211_IFTYPE_STATION: mode = P54_FILTER_TYPE_STATION; break; case NL80211_IFTYPE_AP: mode = P54_FILTER_TYPE_AP; break; case NL80211_IFTYPE_ADHOC: case NL80211_IFTYPE_MESH_POINT: mode = P54_FILTER_TYPE_IBSS; break; case NL80211_IFTYPE_MONITOR: mode = P54_FILTER_TYPE_PROMISCUOUS; break; default: mode = P54_FILTER_TYPE_HIBERNATE; break; } /* * "TRANSPARENT and PROMISCUOUS are mutually exclusive" * STSW45X0C LMAC API - page 12 */ if (priv->filter_flags & FIF_OTHER_BSS && (mode != P54_FILTER_TYPE_PROMISCUOUS)) mode |= P54_FILTER_TYPE_TRANSPARENT; } else { mode = P54_FILTER_TYPE_HIBERNATE; } setup->mac_mode = cpu_to_le16(mode); memcpy(setup->mac_addr, priv->mac_addr, ETH_ALEN); memcpy(setup->bssid, priv->bssid, ETH_ALEN); setup->rx_antenna = 2 & priv->rx_diversity_mask; /* automatic */ setup->rx_align = 0; if (priv->fw_var < 0x500) { setup->v1.basic_rate_mask = cpu_to_le32(priv->basic_rate_mask); memset(setup->v1.rts_rates, 0, 8); setup->v1.rx_addr = cpu_to_le32(priv->rx_end); setup->v1.max_rx = cpu_to_le16(priv->rx_mtu); setup->v1.rxhw = cpu_to_le16(priv->rxhw); setup->v1.wakeup_timer = cpu_to_le16(priv->wakeup_timer); setup->v1.unalloc0 = cpu_to_le16(0); } else { setup->v2.rx_addr = cpu_to_le32(priv->rx_end); setup->v2.max_rx = cpu_to_le16(priv->rx_mtu); setup->v2.rxhw = cpu_to_le16(priv->rxhw); setup->v2.timer = cpu_to_le16(priv->wakeup_timer); setup->v2.truncate = cpu_to_le16(48896); setup->v2.basic_rate_mask = cpu_to_le32(priv->basic_rate_mask); setup->v2.sbss_offset = 0; setup->v2.mcast_window = 0; setup->v2.rx_rssi_threshold = 0; setup->v2.rx_ed_threshold = 0; setup->v2.ref_clock = cpu_to_le32(644245094); setup->v2.lpf_bandwidth = cpu_to_le16(65535); setup->v2.osc_start_delay = cpu_to_le16(65535); } p54_tx(priv, skb); priv->phy_idle = mode == P54_FILTER_TYPE_HIBERNATE; return 0; } int p54_scan(struct p54_common *priv, u16 mode, u16 dwell) { struct sk_buff *skb; struct p54_hdr *hdr; struct p54_scan_head *head; struct p54_iq_autocal_entry *iq_autocal; union p54_scan_body_union *body; struct p54_scan_tail_rate *rate; struct pda_rssi_cal_entry *rssi; struct p54_rssi_db_entry *rssi_data; unsigned int i; void *entry; __le16 freq = cpu_to_le16(priv->hw->conf.chandef.chan->center_freq); skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*head) + 2 + sizeof(*iq_autocal) + sizeof(*body) + sizeof(*rate) + 2 * sizeof(*rssi), P54_CONTROL_TYPE_SCAN, GFP_ATOMIC); if (!skb) return -ENOMEM; head = skb_put(skb, sizeof(*head)); memset(head->scan_params, 0, sizeof(head->scan_params)); head->mode = cpu_to_le16(mode); head->dwell = cpu_to_le16(dwell); head->freq = freq; if (priv->rxhw == PDR_SYNTH_FRONTEND_LONGBOW) { __le16 *pa_power_points = skb_put(skb, 2); *pa_power_points = cpu_to_le16(0x0c); } iq_autocal = skb_put(skb, sizeof(*iq_autocal)); for (i = 0; i < priv->iq_autocal_len; i++) { if (priv->iq_autocal[i].freq != freq) continue; memcpy(iq_autocal, &priv->iq_autocal[i].params, sizeof(struct p54_iq_autocal_entry)); break; } if (i == priv->iq_autocal_len) goto err; if (priv->rxhw == PDR_SYNTH_FRONTEND_LONGBOW) body = skb_put(skb, sizeof(body->longbow)); else body = skb_put(skb, sizeof(body->normal)); for (i = 0; i < priv->output_limit->entries; i++) { __le16 *entry_freq = (void *) (priv->output_limit->data + priv->output_limit->entry_size * i); if (*entry_freq != freq) continue; if (priv->rxhw == PDR_SYNTH_FRONTEND_LONGBOW) { memcpy(&body->longbow.power_limits, (void *) entry_freq + sizeof(__le16), priv->output_limit->entry_size); } else { struct pda_channel_output_limit *limits = (void *) entry_freq; body->normal.val_barker = 0x38; body->normal.val_bpsk = body->normal.dup_bpsk = limits->val_bpsk; body->normal.val_qpsk = body->normal.dup_qpsk = limits->val_qpsk; body->normal.val_16qam = body->normal.dup_16qam = limits->val_16qam; body->normal.val_64qam = body->normal.dup_64qam = limits->val_64qam; } break; } if (i == priv->output_limit->entries) goto err; entry = (void *)(priv->curve_data->data + priv->curve_data->offset); for (i = 0; i < priv->curve_data->entries; i++) { if (*((__le16 *)entry) != freq) { entry += priv->curve_data->entry_size; continue; } if (priv->rxhw == PDR_SYNTH_FRONTEND_LONGBOW) { memcpy(&body->longbow.curve_data, entry + sizeof(__le16), priv->curve_data->entry_size); } else { struct p54_scan_body *chan = &body->normal; struct pda_pa_curve_data *curve_data = (void *) priv->curve_data->data; entry += sizeof(__le16); chan->pa_points_per_curve = 8; memset(chan->curve_data, 0, sizeof(chan->curve_data)); memcpy(chan->curve_data, entry, sizeof(struct p54_pa_curve_data_sample) * min((u8)8, curve_data->points_per_channel)); } break; } if (i == priv->curve_data->entries) goto err; if ((priv->fw_var >= 0x500) && (priv->fw_var < 0x509)) { rate = skb_put(skb, sizeof(*rate)); rate->basic_rate_mask = cpu_to_le32(priv->basic_rate_mask); for (i = 0; i < sizeof(rate->rts_rates); i++) rate->rts_rates[i] = i; } rssi = skb_put(skb, sizeof(*rssi)); rssi_data = p54_rssi_find(priv, le16_to_cpu(freq)); rssi->mul = cpu_to_le16(rssi_data->mul); rssi->add = cpu_to_le16(rssi_data->add); if (priv->rxhw == PDR_SYNTH_FRONTEND_LONGBOW) { /* Longbow frontend needs ever more */ rssi = skb_put(skb, sizeof(*rssi)); rssi->mul = cpu_to_le16(rssi_data->longbow_unkn); rssi->add = cpu_to_le16(rssi_data->longbow_unk2); } if (priv->fw_var >= 0x509) { rate = skb_put(skb, sizeof(*rate)); rate->basic_rate_mask = cpu_to_le32(priv->basic_rate_mask); for (i = 0; i < sizeof(rate->rts_rates); i++) rate->rts_rates[i] = i; } hdr = (struct p54_hdr *) skb->data; hdr->len = cpu_to_le16(skb->len - sizeof(*hdr)); p54_tx(priv, skb); priv->cur_rssi = rssi_data; return 0; err: wiphy_err(priv->hw->wiphy, "frequency change to channel %d failed.\n", ieee80211_frequency_to_channel( priv->hw->conf.chandef.chan->center_freq)); dev_kfree_skb_any(skb); return -EINVAL; } int p54_set_leds(struct p54_common *priv) { struct sk_buff *skb; struct p54_led *led; skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*led), P54_CONTROL_TYPE_LED, GFP_ATOMIC); if (unlikely(!skb)) return -ENOMEM; led = skb_put(skb, sizeof(*led)); led->flags = cpu_to_le16(0x0003); led->mask[0] = led->mask[1] = cpu_to_le16(priv->softled_state); led->delay[0] = cpu_to_le16(1); led->delay[1] = cpu_to_le16(0); p54_tx(priv, skb); return 0; } int p54_set_edcf(struct p54_common *priv) { struct sk_buff *skb; struct p54_edcf *edcf; u8 rtd; skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*edcf), P54_CONTROL_TYPE_DCFINIT, GFP_ATOMIC); if (unlikely(!skb)) return -ENOMEM; edcf = skb_put(skb, sizeof(*edcf)); if (priv->use_short_slot) { edcf->slottime = 9; edcf->sifs = 0x10; edcf->eofpad = 0x00; } else { edcf->slottime = 20; edcf->sifs = 0x0a; edcf->eofpad = 0x06; } /* * calculate the extra round trip delay according to the * formula from 802.11-2007 17.3.8.6. */ rtd = 3 * priv->coverage_class; edcf->slottime += rtd; edcf->round_trip_delay = cpu_to_le16(rtd); /* (see prism54/isl_oid.h for further details) */ edcf->frameburst = cpu_to_le16(0); edcf->flags = 0; memset(edcf->mapping, 0, sizeof(edcf->mapping)); memcpy(edcf->queue, priv->qos_params, sizeof(edcf->queue)); p54_tx(priv, skb); return 0; } int p54_set_ps(struct p54_common *priv) { struct sk_buff *skb; struct p54_psm *psm; unsigned int i; u16 mode; if (priv->hw->conf.flags & IEEE80211_CONF_PS && !priv->powersave_override) mode = P54_PSM | P54_PSM_BEACON_TIMEOUT | P54_PSM_DTIM | P54_PSM_CHECKSUM | P54_PSM_MCBC; else mode = P54_PSM_CAM; skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*psm), P54_CONTROL_TYPE_PSM, GFP_ATOMIC); if (!skb) return -ENOMEM; psm = skb_put(skb, sizeof(*psm)); psm->mode = cpu_to_le16(mode); psm->aid = cpu_to_le16(priv->aid); for (i = 0; i < ARRAY_SIZE(psm->intervals); i++) { psm->intervals[i].interval = cpu_to_le16(priv->hw->conf.listen_interval); psm->intervals[i].periods = cpu_to_le16(1); } psm->beacon_rssi_skip_max = 200; psm->rssi_delta_threshold = 0; psm->nr = 1; psm->exclude[0] = WLAN_EID_TIM; p54_tx(priv, skb); priv->phy_ps = mode != P54_PSM_CAM; return 0; } int p54_init_xbow_synth(struct p54_common *priv) { struct sk_buff *skb; struct p54_xbow_synth *xbow; skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*xbow), P54_CONTROL_TYPE_XBOW_SYNTH_CFG, GFP_KERNEL); if (unlikely(!skb)) return -ENOMEM; xbow = skb_put(skb, sizeof(*xbow)); xbow->magic1 = cpu_to_le16(0x1); xbow->magic2 = cpu_to_le16(0x2); xbow->freq = cpu_to_le16(5390); memset(xbow->padding, 0, sizeof(xbow->padding)); p54_tx(priv, skb); return 0; } int p54_upload_key(struct p54_common *priv, u8 algo, int slot, u8 idx, u8 len, u8 *addr, u8* key) { struct sk_buff *skb; struct p54_keycache *rxkey; skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*rxkey), P54_CONTROL_TYPE_RX_KEYCACHE, GFP_KERNEL); if (unlikely(!skb)) return -ENOMEM; rxkey = skb_put(skb, sizeof(*rxkey)); rxkey->entry = slot; rxkey->key_id = idx; rxkey->key_type = algo; if (addr) memcpy(rxkey->mac, addr, ETH_ALEN); else eth_broadcast_addr(rxkey->mac); switch (algo) { case P54_CRYPTO_WEP: case P54_CRYPTO_AESCCMP: rxkey->key_len = min_t(u8, 16, len); memcpy(rxkey->key, key, rxkey->key_len); break; case P54_CRYPTO_TKIPMICHAEL: rxkey->key_len = 24; memcpy(rxkey->key, key, 16); memcpy(&(rxkey->key[16]), &(key [NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY]), 8); break; case P54_CRYPTO_NONE: rxkey->key_len = 0; memset(rxkey->key, 0, sizeof(rxkey->key)); break; default: wiphy_err(priv->hw->wiphy, "invalid cryptographic algorithm: %d\n", algo); dev_kfree_skb(skb); return -EINVAL; } p54_tx(priv, skb); return 0; } int p54_fetch_statistics(struct p54_common *priv) { struct ieee80211_tx_info *txinfo; struct p54_tx_info *p54info; struct sk_buff *skb; skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL, sizeof(struct p54_statistics), P54_CONTROL_TYPE_STAT_READBACK, GFP_KERNEL); if (!skb) return -ENOMEM; /* * The statistic feedback causes some extra headaches here, if it * is not to crash/corrupt the firmware data structures. * * Unlike all other Control Get OIDs we can not use helpers like * skb_put to reserve the space for the data we're requesting. * Instead the extra frame length -which will hold the results later- * will only be told to the p54_assign_address, so that following * frames won't be placed into the allegedly empty area. */ txinfo = IEEE80211_SKB_CB(skb); p54info = (void *) txinfo->rate_driver_data; p54info->extra_len = sizeof(struct p54_statistics); p54_tx(priv, skb); return 0; } int p54_set_groupfilter(struct p54_common *priv) { struct p54_group_address_table *grp; struct sk_buff *skb; bool on = false; skb = p54_alloc_skb(priv, P54_HDR_FLAG_CONTROL_OPSET, sizeof(*grp), P54_CONTROL_TYPE_GROUP_ADDRESS_TABLE, GFP_KERNEL); if (!skb) return -ENOMEM; grp = skb_put(skb, sizeof(*grp)); on = !(priv->filter_flags & FIF_ALLMULTI) && (priv->mc_maclist_num > 0 && priv->mc_maclist_num <= MC_FILTER_ADDRESS_NUM); if (on) { grp->filter_enable = cpu_to_le16(1); grp->num_address = cpu_to_le16(priv->mc_maclist_num); memcpy(grp->mac_list, priv->mc_maclist, sizeof(grp->mac_list)); } else { grp->filter_enable = cpu_to_le16(0); grp->num_address = cpu_to_le16(0); memset(grp->mac_list, 0, sizeof(grp->mac_list)); } p54_tx(priv, skb); return 0; }
linux-master
drivers/net/wireless/intersil/p54/fwio.c
// SPDX-License-Identifier: GPL-2.0-only /* * Linux device driver for PCI based Prism54 * * Copyright (c) 2006, Michael Wu <[email protected]> * Copyright (c) 2008, Christian Lamparter <[email protected]> * * Based on the islsm (softmac prism54) driver, which is: * Copyright 2004-2006 Jean-Baptiste Note <[email protected]>, et al. */ #include <linux/pci.h> #include <linux/slab.h> #include <linux/firmware.h> #include <linux/etherdevice.h> #include <linux/delay.h> #include <linux/completion.h> #include <linux/module.h> #include <net/mac80211.h> #include "p54.h" #include "lmac.h" #include "p54pci.h" MODULE_AUTHOR("Michael Wu <[email protected]>"); MODULE_DESCRIPTION("Prism54 PCI wireless driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("prism54pci"); MODULE_FIRMWARE("isl3886pci"); static const struct pci_device_id p54p_table[] = { /* Intersil PRISM Duette/Prism GT Wireless LAN adapter */ { PCI_DEVICE(0x1260, 0x3890) }, /* 3COM 3CRWE154G72 Wireless LAN adapter */ { PCI_DEVICE(0x10b7, 0x6001) }, /* Intersil PRISM Indigo Wireless LAN adapter */ { PCI_DEVICE(0x1260, 0x3877) }, /* Intersil PRISM Javelin/Xbow Wireless LAN adapter */ { PCI_DEVICE(0x1260, 0x3886) }, /* Intersil PRISM Xbow Wireless LAN adapter (Symbol AP-300) */ { PCI_DEVICE(0x1260, 0xffff) }, { }, }; MODULE_DEVICE_TABLE(pci, p54p_table); static int p54p_upload_firmware(struct ieee80211_hw *dev) { struct p54p_priv *priv = dev->priv; __le32 reg; int err; __le32 *data; u32 remains, left, device_addr; P54P_WRITE(int_enable, cpu_to_le32(0)); P54P_READ(int_enable); udelay(10); reg = P54P_READ(ctrl_stat); reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET); reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RAMBOOT); P54P_WRITE(ctrl_stat, reg); P54P_READ(ctrl_stat); udelay(10); reg |= cpu_to_le32(ISL38XX_CTRL_STAT_RESET); P54P_WRITE(ctrl_stat, reg); wmb(); udelay(10); reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET); P54P_WRITE(ctrl_stat, reg); wmb(); /* wait for the firmware to reset properly */ mdelay(10); err = p54_parse_firmware(dev, priv->firmware); if (err) return err; if (priv->common.fw_interface != FW_LM86) { dev_err(&priv->pdev->dev, "wrong firmware, " "please get a LM86(PCI) firmware a try again.\n"); return -EINVAL; } data = (__le32 *) priv->firmware->data; remains = priv->firmware->size; device_addr = ISL38XX_DEV_FIRMWARE_ADDR; while (remains) { u32 i = 0; left = min((u32)0x1000, remains); P54P_WRITE(direct_mem_base, cpu_to_le32(device_addr)); P54P_READ(int_enable); device_addr += 0x1000; while (i < left) { P54P_WRITE(direct_mem_win[i], *data++); i += sizeof(u32); } remains -= left; P54P_READ(int_enable); } reg = P54P_READ(ctrl_stat); reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_CLKRUN); reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET); reg |= cpu_to_le32(ISL38XX_CTRL_STAT_RAMBOOT); P54P_WRITE(ctrl_stat, reg); P54P_READ(ctrl_stat); udelay(10); reg |= cpu_to_le32(ISL38XX_CTRL_STAT_RESET); P54P_WRITE(ctrl_stat, reg); wmb(); udelay(10); reg &= cpu_to_le32(~ISL38XX_CTRL_STAT_RESET); P54P_WRITE(ctrl_stat, reg); wmb(); udelay(10); /* wait for the firmware to boot properly */ mdelay(100); return 0; } static void p54p_refill_rx_ring(struct ieee80211_hw *dev, int ring_index, struct p54p_desc *ring, u32 ring_limit, struct sk_buff **rx_buf, u32 index) { struct p54p_priv *priv = dev->priv; struct p54p_ring_control *ring_control = priv->ring_control; u32 limit, idx, i; idx = le32_to_cpu(ring_control->host_idx[ring_index]); limit = idx; limit -= index; limit = ring_limit - limit; i = idx % ring_limit; while (limit-- > 1) { struct p54p_desc *desc = &ring[i]; if (!desc->host_addr) { struct sk_buff *skb; dma_addr_t mapping; skb = dev_alloc_skb(priv->common.rx_mtu + 32); if (!skb) break; mapping = dma_map_single(&priv->pdev->dev, skb_tail_pointer(skb), priv->common.rx_mtu + 32, DMA_FROM_DEVICE); if (dma_mapping_error(&priv->pdev->dev, mapping)) { dev_kfree_skb_any(skb); dev_err(&priv->pdev->dev, "RX DMA Mapping error\n"); break; } desc->host_addr = cpu_to_le32(mapping); desc->device_addr = 0; // FIXME: necessary? desc->len = cpu_to_le16(priv->common.rx_mtu + 32); desc->flags = 0; rx_buf[i] = skb; } i++; idx++; i %= ring_limit; } wmb(); ring_control->host_idx[ring_index] = cpu_to_le32(idx); } static void p54p_check_rx_ring(struct ieee80211_hw *dev, u32 *index, int ring_index, struct p54p_desc *ring, u32 ring_limit, struct sk_buff **rx_buf) { struct p54p_priv *priv = dev->priv; struct p54p_ring_control *ring_control = priv->ring_control; struct p54p_desc *desc; u32 idx, i; i = (*index) % ring_limit; (*index) = idx = le32_to_cpu(ring_control->device_idx[ring_index]); idx %= ring_limit; while (i != idx) { u16 len; struct sk_buff *skb; dma_addr_t dma_addr; desc = &ring[i]; len = le16_to_cpu(desc->len); skb = rx_buf[i]; if (!skb) { i++; i %= ring_limit; continue; } if (unlikely(len > priv->common.rx_mtu)) { if (net_ratelimit()) dev_err(&priv->pdev->dev, "rx'd frame size " "exceeds length threshold.\n"); len = priv->common.rx_mtu; } dma_addr = le32_to_cpu(desc->host_addr); dma_sync_single_for_cpu(&priv->pdev->dev, dma_addr, priv->common.rx_mtu + 32, DMA_FROM_DEVICE); skb_put(skb, len); if (p54_rx(dev, skb)) { dma_unmap_single(&priv->pdev->dev, dma_addr, priv->common.rx_mtu + 32, DMA_FROM_DEVICE); rx_buf[i] = NULL; desc->host_addr = cpu_to_le32(0); } else { skb_trim(skb, 0); dma_sync_single_for_device(&priv->pdev->dev, dma_addr, priv->common.rx_mtu + 32, DMA_FROM_DEVICE); desc->len = cpu_to_le16(priv->common.rx_mtu + 32); } i++; i %= ring_limit; } p54p_refill_rx_ring(dev, ring_index, ring, ring_limit, rx_buf, *index); } static void p54p_check_tx_ring(struct ieee80211_hw *dev, u32 *index, int ring_index, struct p54p_desc *ring, u32 ring_limit, struct sk_buff **tx_buf) { struct p54p_priv *priv = dev->priv; struct p54p_ring_control *ring_control = priv->ring_control; struct p54p_desc *desc; struct sk_buff *skb; u32 idx, i; i = (*index) % ring_limit; (*index) = idx = le32_to_cpu(ring_control->device_idx[ring_index]); idx %= ring_limit; while (i != idx) { desc = &ring[i]; skb = tx_buf[i]; tx_buf[i] = NULL; dma_unmap_single(&priv->pdev->dev, le32_to_cpu(desc->host_addr), le16_to_cpu(desc->len), DMA_TO_DEVICE); desc->host_addr = 0; desc->device_addr = 0; desc->len = 0; desc->flags = 0; if (skb && FREE_AFTER_TX(skb)) p54_free_skb(dev, skb); i++; i %= ring_limit; } } static void p54p_tasklet(struct tasklet_struct *t) { struct p54p_priv *priv = from_tasklet(priv, t, tasklet); struct ieee80211_hw *dev = pci_get_drvdata(priv->pdev); struct p54p_ring_control *ring_control = priv->ring_control; p54p_check_tx_ring(dev, &priv->tx_idx_mgmt, 3, ring_control->tx_mgmt, ARRAY_SIZE(ring_control->tx_mgmt), priv->tx_buf_mgmt); p54p_check_tx_ring(dev, &priv->tx_idx_data, 1, ring_control->tx_data, ARRAY_SIZE(ring_control->tx_data), priv->tx_buf_data); p54p_check_rx_ring(dev, &priv->rx_idx_mgmt, 2, ring_control->rx_mgmt, ARRAY_SIZE(ring_control->rx_mgmt), priv->rx_buf_mgmt); p54p_check_rx_ring(dev, &priv->rx_idx_data, 0, ring_control->rx_data, ARRAY_SIZE(ring_control->rx_data), priv->rx_buf_data); wmb(); P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_UPDATE)); } static irqreturn_t p54p_interrupt(int irq, void *dev_id) { struct ieee80211_hw *dev = dev_id; struct p54p_priv *priv = dev->priv; __le32 reg; reg = P54P_READ(int_ident); if (unlikely(reg == cpu_to_le32(0xFFFFFFFF))) { goto out; } P54P_WRITE(int_ack, reg); reg &= P54P_READ(int_enable); if (reg & cpu_to_le32(ISL38XX_INT_IDENT_UPDATE)) tasklet_schedule(&priv->tasklet); else if (reg & cpu_to_le32(ISL38XX_INT_IDENT_INIT)) complete(&priv->boot_comp); out: return reg ? IRQ_HANDLED : IRQ_NONE; } static void p54p_tx(struct ieee80211_hw *dev, struct sk_buff *skb) { unsigned long flags; struct p54p_priv *priv = dev->priv; struct p54p_ring_control *ring_control = priv->ring_control; struct p54p_desc *desc; dma_addr_t mapping; u32 idx, i; __le32 device_addr; spin_lock_irqsave(&priv->lock, flags); idx = le32_to_cpu(ring_control->host_idx[1]); i = idx % ARRAY_SIZE(ring_control->tx_data); device_addr = ((struct p54_hdr *)skb->data)->req_id; mapping = dma_map_single(&priv->pdev->dev, skb->data, skb->len, DMA_TO_DEVICE); if (dma_mapping_error(&priv->pdev->dev, mapping)) { spin_unlock_irqrestore(&priv->lock, flags); p54_free_skb(dev, skb); dev_err(&priv->pdev->dev, "TX DMA mapping error\n"); return ; } priv->tx_buf_data[i] = skb; desc = &ring_control->tx_data[i]; desc->host_addr = cpu_to_le32(mapping); desc->device_addr = device_addr; desc->len = cpu_to_le16(skb->len); desc->flags = 0; wmb(); ring_control->host_idx[1] = cpu_to_le32(idx + 1); spin_unlock_irqrestore(&priv->lock, flags); P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_UPDATE)); P54P_READ(dev_int); } static void p54p_stop(struct ieee80211_hw *dev) { struct p54p_priv *priv = dev->priv; struct p54p_ring_control *ring_control = priv->ring_control; unsigned int i; struct p54p_desc *desc; P54P_WRITE(int_enable, cpu_to_le32(0)); P54P_READ(int_enable); udelay(10); free_irq(priv->pdev->irq, dev); tasklet_kill(&priv->tasklet); P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_RESET)); for (i = 0; i < ARRAY_SIZE(priv->rx_buf_data); i++) { desc = &ring_control->rx_data[i]; if (desc->host_addr) dma_unmap_single(&priv->pdev->dev, le32_to_cpu(desc->host_addr), priv->common.rx_mtu + 32, DMA_FROM_DEVICE); kfree_skb(priv->rx_buf_data[i]); priv->rx_buf_data[i] = NULL; } for (i = 0; i < ARRAY_SIZE(priv->rx_buf_mgmt); i++) { desc = &ring_control->rx_mgmt[i]; if (desc->host_addr) dma_unmap_single(&priv->pdev->dev, le32_to_cpu(desc->host_addr), priv->common.rx_mtu + 32, DMA_FROM_DEVICE); kfree_skb(priv->rx_buf_mgmt[i]); priv->rx_buf_mgmt[i] = NULL; } for (i = 0; i < ARRAY_SIZE(priv->tx_buf_data); i++) { desc = &ring_control->tx_data[i]; if (desc->host_addr) dma_unmap_single(&priv->pdev->dev, le32_to_cpu(desc->host_addr), le16_to_cpu(desc->len), DMA_TO_DEVICE); p54_free_skb(dev, priv->tx_buf_data[i]); priv->tx_buf_data[i] = NULL; } for (i = 0; i < ARRAY_SIZE(priv->tx_buf_mgmt); i++) { desc = &ring_control->tx_mgmt[i]; if (desc->host_addr) dma_unmap_single(&priv->pdev->dev, le32_to_cpu(desc->host_addr), le16_to_cpu(desc->len), DMA_TO_DEVICE); p54_free_skb(dev, priv->tx_buf_mgmt[i]); priv->tx_buf_mgmt[i] = NULL; } memset(ring_control, 0, sizeof(*ring_control)); } static int p54p_open(struct ieee80211_hw *dev) { struct p54p_priv *priv = dev->priv; int err; long timeout; init_completion(&priv->boot_comp); err = request_irq(priv->pdev->irq, p54p_interrupt, IRQF_SHARED, "p54pci", dev); if (err) { dev_err(&priv->pdev->dev, "failed to register IRQ handler\n"); return err; } memset(priv->ring_control, 0, sizeof(*priv->ring_control)); err = p54p_upload_firmware(dev); if (err) { free_irq(priv->pdev->irq, dev); return err; } priv->rx_idx_data = priv->tx_idx_data = 0; priv->rx_idx_mgmt = priv->tx_idx_mgmt = 0; p54p_refill_rx_ring(dev, 0, priv->ring_control->rx_data, ARRAY_SIZE(priv->ring_control->rx_data), priv->rx_buf_data, 0); p54p_refill_rx_ring(dev, 2, priv->ring_control->rx_mgmt, ARRAY_SIZE(priv->ring_control->rx_mgmt), priv->rx_buf_mgmt, 0); P54P_WRITE(ring_control_base, cpu_to_le32(priv->ring_control_dma)); P54P_READ(ring_control_base); wmb(); udelay(10); P54P_WRITE(int_enable, cpu_to_le32(ISL38XX_INT_IDENT_INIT)); P54P_READ(int_enable); wmb(); udelay(10); P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_RESET)); P54P_READ(dev_int); timeout = wait_for_completion_interruptible_timeout( &priv->boot_comp, HZ); if (timeout <= 0) { wiphy_err(dev->wiphy, "Cannot boot firmware!\n"); p54p_stop(dev); return timeout ? -ERESTARTSYS : -ETIMEDOUT; } P54P_WRITE(int_enable, cpu_to_le32(ISL38XX_INT_IDENT_UPDATE)); P54P_READ(int_enable); wmb(); udelay(10); P54P_WRITE(dev_int, cpu_to_le32(ISL38XX_DEV_INT_UPDATE)); P54P_READ(dev_int); wmb(); udelay(10); return 0; } static void p54p_firmware_step2(const struct firmware *fw, void *context) { struct p54p_priv *priv = context; struct ieee80211_hw *dev = priv->common.hw; struct pci_dev *pdev = priv->pdev; int err; if (!fw) { dev_err(&pdev->dev, "Cannot find firmware (isl3886pci)\n"); err = -ENOENT; goto out; } priv->firmware = fw; err = p54p_open(dev); if (err) goto out; err = p54_read_eeprom(dev); p54p_stop(dev); if (err) goto out; err = p54_register_common(dev, &pdev->dev); if (err) goto out; out: complete(&priv->fw_loaded); if (err) { struct device *parent = pdev->dev.parent; if (parent) device_lock(parent); /* * This will indirectly result in a call to p54p_remove. * Hence, we don't need to bother with freeing any * allocated ressources at all. */ device_release_driver(&pdev->dev); if (parent) device_unlock(parent); } pci_dev_put(pdev); } static int p54p_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct p54p_priv *priv; struct ieee80211_hw *dev; unsigned long mem_addr, mem_len; int err; pci_dev_get(pdev); err = pci_enable_device(pdev); if (err) { dev_err(&pdev->dev, "Cannot enable new PCI device\n"); goto err_put; } mem_addr = pci_resource_start(pdev, 0); mem_len = pci_resource_len(pdev, 0); if (mem_len < sizeof(struct p54p_csr)) { dev_err(&pdev->dev, "Too short PCI resources\n"); err = -ENODEV; goto err_disable_dev; } err = pci_request_regions(pdev, "p54pci"); if (err) { dev_err(&pdev->dev, "Cannot obtain PCI resources\n"); goto err_disable_dev; } err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)); if (!err) err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "No suitable DMA available\n"); goto err_free_reg; } pci_set_master(pdev); pci_try_set_mwi(pdev); pci_write_config_byte(pdev, 0x40, 0); pci_write_config_byte(pdev, 0x41, 0); dev = p54_init_common(sizeof(*priv)); if (!dev) { dev_err(&pdev->dev, "ieee80211 alloc failed\n"); err = -ENOMEM; goto err_free_reg; } priv = dev->priv; priv->pdev = pdev; init_completion(&priv->fw_loaded); SET_IEEE80211_DEV(dev, &pdev->dev); pci_set_drvdata(pdev, dev); priv->map = ioremap(mem_addr, mem_len); if (!priv->map) { dev_err(&pdev->dev, "Cannot map device memory\n"); err = -ENOMEM; goto err_free_dev; } priv->ring_control = dma_alloc_coherent(&pdev->dev, sizeof(*priv->ring_control), &priv->ring_control_dma, GFP_KERNEL); if (!priv->ring_control) { dev_err(&pdev->dev, "Cannot allocate rings\n"); err = -ENOMEM; goto err_iounmap; } priv->common.open = p54p_open; priv->common.stop = p54p_stop; priv->common.tx = p54p_tx; spin_lock_init(&priv->lock); tasklet_setup(&priv->tasklet, p54p_tasklet); err = request_firmware_nowait(THIS_MODULE, 1, "isl3886pci", &priv->pdev->dev, GFP_KERNEL, priv, p54p_firmware_step2); if (!err) return 0; dma_free_coherent(&pdev->dev, sizeof(*priv->ring_control), priv->ring_control, priv->ring_control_dma); err_iounmap: iounmap(priv->map); err_free_dev: p54_free_common(dev); err_free_reg: pci_release_regions(pdev); err_disable_dev: pci_disable_device(pdev); err_put: pci_dev_put(pdev); return err; } static void p54p_remove(struct pci_dev *pdev) { struct ieee80211_hw *dev = pci_get_drvdata(pdev); struct p54p_priv *priv; if (!dev) return; priv = dev->priv; wait_for_completion(&priv->fw_loaded); p54_unregister_common(dev); release_firmware(priv->firmware); dma_free_coherent(&pdev->dev, sizeof(*priv->ring_control), priv->ring_control, priv->ring_control_dma); iounmap(priv->map); pci_release_regions(pdev); pci_disable_device(pdev); p54_free_common(dev); } #ifdef CONFIG_PM_SLEEP static int p54p_suspend(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); pci_save_state(pdev); pci_set_power_state(pdev, PCI_D3hot); pci_disable_device(pdev); return 0; } static int p54p_resume(struct device *device) { struct pci_dev *pdev = to_pci_dev(device); int err; err = pci_reenable_device(pdev); if (err) return err; return pci_set_power_state(pdev, PCI_D0); } static SIMPLE_DEV_PM_OPS(p54pci_pm_ops, p54p_suspend, p54p_resume); #define P54P_PM_OPS (&p54pci_pm_ops) #else #define P54P_PM_OPS (NULL) #endif /* CONFIG_PM_SLEEP */ static struct pci_driver p54p_driver = { .name = "p54pci", .id_table = p54p_table, .probe = p54p_probe, .remove = p54p_remove, .driver.pm = P54P_PM_OPS, }; module_pci_driver(p54p_driver);
linux-master
drivers/net/wireless/intersil/p54/p54pci.c
// SPDX-License-Identifier: GPL-2.0-only /* * EEPROM parser code for mac80211 Prism54 drivers * * Copyright (c) 2006, Michael Wu <[email protected]> * Copyright (c) 2007-2009, Christian Lamparter <[email protected]> * Copyright 2008, Johannes Berg <[email protected]> * * Based on: * - the islsm (softmac prism54) driver, which is: * Copyright 2004-2006 Jean-Baptiste Note <[email protected]>, et al. * - stlc45xx driver * Copyright (C) 2008 Nokia Corporation and/or its subsidiary(-ies). */ #include <linux/firmware.h> #include <linux/etherdevice.h> #include <linux/sort.h> #include <linux/slab.h> #include <net/mac80211.h> #include <linux/crc-ccitt.h> #include <linux/export.h> #include "p54.h" #include "eeprom.h" #include "lmac.h" static struct ieee80211_rate p54_bgrates[] = { { .bitrate = 10, .hw_value = 0, }, { .bitrate = 20, .hw_value = 1, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, { .bitrate = 55, .hw_value = 2, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, { .bitrate = 110, .hw_value = 3, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, { .bitrate = 60, .hw_value = 4, }, { .bitrate = 90, .hw_value = 5, }, { .bitrate = 120, .hw_value = 6, }, { .bitrate = 180, .hw_value = 7, }, { .bitrate = 240, .hw_value = 8, }, { .bitrate = 360, .hw_value = 9, }, { .bitrate = 480, .hw_value = 10, }, { .bitrate = 540, .hw_value = 11, }, }; static struct ieee80211_rate p54_arates[] = { { .bitrate = 60, .hw_value = 4, }, { .bitrate = 90, .hw_value = 5, }, { .bitrate = 120, .hw_value = 6, }, { .bitrate = 180, .hw_value = 7, }, { .bitrate = 240, .hw_value = 8, }, { .bitrate = 360, .hw_value = 9, }, { .bitrate = 480, .hw_value = 10, }, { .bitrate = 540, .hw_value = 11, }, }; static struct p54_rssi_db_entry p54_rssi_default = { /* * The defaults are taken from usb-logs of the * vendor driver. So, they should be safe to * use in case we can't get a match from the * rssi <-> dBm conversion database. */ .mul = 130, .add = -398, }; #define CHAN_HAS_CAL BIT(0) #define CHAN_HAS_LIMIT BIT(1) #define CHAN_HAS_CURVE BIT(2) #define CHAN_HAS_ALL (CHAN_HAS_CAL | CHAN_HAS_LIMIT | CHAN_HAS_CURVE) struct p54_channel_entry { u16 freq; u16 data; int index; int max_power; enum nl80211_band band; }; struct p54_channel_list { struct p54_channel_entry *channels; size_t entries; size_t max_entries; size_t band_channel_num[NUM_NL80211_BANDS]; }; static int p54_get_band_from_freq(u16 freq) { /* FIXME: sync these values with the 802.11 spec */ if ((freq >= 2412) && (freq <= 2484)) return NL80211_BAND_2GHZ; if ((freq >= 4920) && (freq <= 5825)) return NL80211_BAND_5GHZ; return -1; } static int same_band(u16 freq, u16 freq2) { return p54_get_band_from_freq(freq) == p54_get_band_from_freq(freq2); } static int p54_compare_channels(const void *_a, const void *_b) { const struct p54_channel_entry *a = _a; const struct p54_channel_entry *b = _b; return a->freq - b->freq; } static int p54_compare_rssichan(const void *_a, const void *_b) { const struct p54_rssi_db_entry *a = _a; const struct p54_rssi_db_entry *b = _b; return a->freq - b->freq; } static int p54_fill_band_bitrates(struct ieee80211_hw *dev, struct ieee80211_supported_band *band_entry, enum nl80211_band band) { /* TODO: generate rate array dynamically */ switch (band) { case NL80211_BAND_2GHZ: band_entry->bitrates = p54_bgrates; band_entry->n_bitrates = ARRAY_SIZE(p54_bgrates); break; case NL80211_BAND_5GHZ: band_entry->bitrates = p54_arates; band_entry->n_bitrates = ARRAY_SIZE(p54_arates); break; default: return -EINVAL; } return 0; } static int p54_generate_band(struct ieee80211_hw *dev, struct p54_channel_list *list, unsigned int *chan_num, enum nl80211_band band) { struct p54_common *priv = dev->priv; struct ieee80211_supported_band *tmp, *old; unsigned int i, j; int ret = -ENOMEM; if ((!list->entries) || (!list->band_channel_num[band])) return -EINVAL; tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); if (!tmp) goto err_out; tmp->channels = kcalloc(list->band_channel_num[band], sizeof(struct ieee80211_channel), GFP_KERNEL); if (!tmp->channels) goto err_out; ret = p54_fill_band_bitrates(dev, tmp, band); if (ret) goto err_out; for (i = 0, j = 0; (j < list->band_channel_num[band]) && (i < list->entries); i++) { struct p54_channel_entry *chan = &list->channels[i]; struct ieee80211_channel *dest = &tmp->channels[j]; if (chan->band != band) continue; if (chan->data != CHAN_HAS_ALL) { wiphy_err(dev->wiphy, "%s%s%s is/are missing for " "channel:%d [%d MHz].\n", (chan->data & CHAN_HAS_CAL ? "" : " [iqauto calibration data]"), (chan->data & CHAN_HAS_LIMIT ? "" : " [output power limits]"), (chan->data & CHAN_HAS_CURVE ? "" : " [curve data]"), chan->index, chan->freq); continue; } dest->band = chan->band; dest->center_freq = chan->freq; dest->max_power = chan->max_power; priv->survey[*chan_num].channel = &tmp->channels[j]; priv->survey[*chan_num].filled = SURVEY_INFO_NOISE_DBM | SURVEY_INFO_TIME | SURVEY_INFO_TIME_BUSY | SURVEY_INFO_TIME_TX; dest->hw_value = (*chan_num); j++; (*chan_num)++; } if (j == 0) { wiphy_err(dev->wiphy, "Disabling totally damaged %d GHz band\n", (band == NL80211_BAND_2GHZ) ? 2 : 5); ret = -ENODATA; goto err_out; } tmp->n_channels = j; old = priv->band_table[band]; priv->band_table[band] = tmp; if (old) { kfree(old->channels); kfree(old); } return 0; err_out: if (tmp) { kfree(tmp->channels); kfree(tmp); } return ret; } static struct p54_channel_entry *p54_update_channel_param(struct p54_channel_list *list, u16 freq, u16 data) { int i; struct p54_channel_entry *entry = NULL; /* * usually all lists in the eeprom are mostly sorted. * so it's very likely that the entry we are looking for * is right at the end of the list */ for (i = list->entries; i >= 0; i--) { if (freq == list->channels[i].freq) { entry = &list->channels[i]; break; } } if ((i < 0) && (list->entries < list->max_entries)) { /* entry does not exist yet. Initialize a new one. */ int band = p54_get_band_from_freq(freq); /* * filter out frequencies which don't belong into * any supported band. */ if (band >= 0) { i = list->entries++; list->band_channel_num[band]++; entry = &list->channels[i]; entry->freq = freq; entry->band = band; entry->index = ieee80211_frequency_to_channel(freq); entry->max_power = 0; entry->data = 0; } } if (entry) entry->data |= data; return entry; } static int p54_get_maxpower(struct p54_common *priv, void *data) { switch (priv->rxhw & PDR_SYNTH_FRONTEND_MASK) { case PDR_SYNTH_FRONTEND_LONGBOW: { struct pda_channel_output_limit_longbow *pda = data; int j; u16 rawpower = 0; pda = data; for (j = 0; j < ARRAY_SIZE(pda->point); j++) { struct pda_channel_output_limit_point_longbow *point = &pda->point[j]; rawpower = max_t(u16, rawpower, le16_to_cpu(point->val_qpsk)); rawpower = max_t(u16, rawpower, le16_to_cpu(point->val_bpsk)); rawpower = max_t(u16, rawpower, le16_to_cpu(point->val_16qam)); rawpower = max_t(u16, rawpower, le16_to_cpu(point->val_64qam)); } /* longbow seems to use 1/16 dBm units */ return rawpower / 16; } case PDR_SYNTH_FRONTEND_DUETTE3: case PDR_SYNTH_FRONTEND_DUETTE2: case PDR_SYNTH_FRONTEND_FRISBEE: case PDR_SYNTH_FRONTEND_XBOW: { struct pda_channel_output_limit *pda = data; u8 rawpower = 0; rawpower = max(rawpower, pda->val_qpsk); rawpower = max(rawpower, pda->val_bpsk); rawpower = max(rawpower, pda->val_16qam); rawpower = max(rawpower, pda->val_64qam); /* raw values are in 1/4 dBm units */ return rawpower / 4; } default: return 20; } } static int p54_generate_channel_lists(struct ieee80211_hw *dev) { struct p54_common *priv = dev->priv; struct p54_channel_list *list; unsigned int i, j, k, max_channel_num; int ret = 0; u16 freq; if ((priv->iq_autocal_len != priv->curve_data->entries) || (priv->iq_autocal_len != priv->output_limit->entries)) wiphy_err(dev->wiphy, "Unsupported or damaged EEPROM detected. " "You may not be able to use all channels.\n"); max_channel_num = max_t(unsigned int, priv->output_limit->entries, priv->iq_autocal_len); max_channel_num = max_t(unsigned int, max_channel_num, priv->curve_data->entries); list = kzalloc(sizeof(*list), GFP_KERNEL); if (!list) { ret = -ENOMEM; goto free; } priv->chan_num = max_channel_num; priv->survey = kcalloc(max_channel_num, sizeof(struct survey_info), GFP_KERNEL); if (!priv->survey) { ret = -ENOMEM; goto free; } list->max_entries = max_channel_num; list->channels = kcalloc(max_channel_num, sizeof(struct p54_channel_entry), GFP_KERNEL); if (!list->channels) { ret = -ENOMEM; goto free; } for (i = 0; i < max_channel_num; i++) { if (i < priv->iq_autocal_len) { freq = le16_to_cpu(priv->iq_autocal[i].freq); p54_update_channel_param(list, freq, CHAN_HAS_CAL); } if (i < priv->output_limit->entries) { struct p54_channel_entry *tmp; void *data = (void *) ((unsigned long) i * priv->output_limit->entry_size + priv->output_limit->offset + priv->output_limit->data); freq = le16_to_cpup((__le16 *) data); tmp = p54_update_channel_param(list, freq, CHAN_HAS_LIMIT); if (tmp) { tmp->max_power = p54_get_maxpower(priv, data); } } if (i < priv->curve_data->entries) { freq = le16_to_cpup((__le16 *) (i * priv->curve_data->entry_size + priv->curve_data->offset + priv->curve_data->data)); p54_update_channel_param(list, freq, CHAN_HAS_CURVE); } } /* sort the channel list by frequency */ sort(list->channels, list->entries, sizeof(struct p54_channel_entry), p54_compare_channels, NULL); k = 0; for (i = 0, j = 0; i < NUM_NL80211_BANDS; i++) { if (p54_generate_band(dev, list, &k, i) == 0) j++; } if (j == 0) { /* no useable band available. */ ret = -EINVAL; } free: if (list) { kfree(list->channels); kfree(list); } if (ret) { kfree(priv->survey); priv->survey = NULL; } return ret; } static int p54_convert_rev0(struct ieee80211_hw *dev, struct pda_pa_curve_data *curve_data) { struct p54_common *priv = dev->priv; struct p54_pa_curve_data_sample *dst; struct pda_pa_curve_data_sample_rev0 *src; size_t cd_len = sizeof(*curve_data) + (curve_data->points_per_channel*sizeof(*dst) + 2) * curve_data->channels; unsigned int i, j; void *source, *target; priv->curve_data = kmalloc(sizeof(*priv->curve_data) + cd_len, GFP_KERNEL); if (!priv->curve_data) return -ENOMEM; priv->curve_data->entries = curve_data->channels; priv->curve_data->entry_size = sizeof(__le16) + sizeof(*dst) * curve_data->points_per_channel; priv->curve_data->offset = offsetof(struct pda_pa_curve_data, data); priv->curve_data->len = cd_len; memcpy(priv->curve_data->data, curve_data, sizeof(*curve_data)); source = curve_data->data; target = ((struct pda_pa_curve_data *) priv->curve_data->data)->data; for (i = 0; i < curve_data->channels; i++) { __le16 *freq = source; source += sizeof(__le16); *((__le16 *)target) = *freq; target += sizeof(__le16); for (j = 0; j < curve_data->points_per_channel; j++) { dst = target; src = source; dst->rf_power = src->rf_power; dst->pa_detector = src->pa_detector; dst->data_64qam = src->pcv; /* "invent" the points for the other modulations */ #define SUB(x, y) (u8)(((x) - (y)) > (x) ? 0 : (x) - (y)) dst->data_16qam = SUB(src->pcv, 12); dst->data_qpsk = SUB(dst->data_16qam, 12); dst->data_bpsk = SUB(dst->data_qpsk, 12); dst->data_barker = SUB(dst->data_bpsk, 14); #undef SUB target += sizeof(*dst); source += sizeof(*src); } } return 0; } static int p54_convert_rev1(struct ieee80211_hw *dev, struct pda_pa_curve_data *curve_data) { struct p54_common *priv = dev->priv; struct p54_pa_curve_data_sample *dst; struct pda_pa_curve_data_sample_rev1 *src; size_t cd_len = sizeof(*curve_data) + (curve_data->points_per_channel*sizeof(*dst) + 2) * curve_data->channels; unsigned int i, j; void *source, *target; priv->curve_data = kzalloc(cd_len + sizeof(*priv->curve_data), GFP_KERNEL); if (!priv->curve_data) return -ENOMEM; priv->curve_data->entries = curve_data->channels; priv->curve_data->entry_size = sizeof(__le16) + sizeof(*dst) * curve_data->points_per_channel; priv->curve_data->offset = offsetof(struct pda_pa_curve_data, data); priv->curve_data->len = cd_len; memcpy(priv->curve_data->data, curve_data, sizeof(*curve_data)); source = curve_data->data; target = ((struct pda_pa_curve_data *) priv->curve_data->data)->data; for (i = 0; i < curve_data->channels; i++) { __le16 *freq = source; source += sizeof(__le16); *((__le16 *)target) = *freq; target += sizeof(__le16); for (j = 0; j < curve_data->points_per_channel; j++) { memcpy(target, source, sizeof(*src)); target += sizeof(*dst); source += sizeof(*src); } source++; } return 0; } static const char *p54_rf_chips[] = { "INVALID-0", "Duette3", "Duette2", "Frisbee", "Xbow", "Longbow", "INVALID-6", "INVALID-7" }; static int p54_parse_rssical(struct ieee80211_hw *dev, u8 *data, int len, u16 type) { struct p54_common *priv = dev->priv; struct p54_rssi_db_entry *entry; size_t db_len, entries; int offset = 0, i; if (type != PDR_RSSI_LINEAR_APPROXIMATION_EXTENDED) { entries = (type == PDR_RSSI_LINEAR_APPROXIMATION) ? 1 : 2; if (len != sizeof(struct pda_rssi_cal_entry) * entries) { wiphy_err(dev->wiphy, "rssical size mismatch.\n"); goto err_data; } } else { /* * Some devices (Dell 1450 USB, Xbow 5GHz card, etc...) * have an empty two byte header. */ if (*((__le16 *)&data[offset]) == cpu_to_le16(0)) offset += 2; entries = (len - offset) / sizeof(struct pda_rssi_cal_ext_entry); if (len < offset || (len - offset) % sizeof(struct pda_rssi_cal_ext_entry) || entries == 0) { wiphy_err(dev->wiphy, "invalid rssi database.\n"); goto err_data; } } db_len = sizeof(*entry) * entries; priv->rssi_db = kzalloc(db_len + sizeof(*priv->rssi_db), GFP_KERNEL); if (!priv->rssi_db) return -ENOMEM; priv->rssi_db->offset = 0; priv->rssi_db->entries = entries; priv->rssi_db->entry_size = sizeof(*entry); priv->rssi_db->len = db_len; entry = (void *)((unsigned long)priv->rssi_db->data + priv->rssi_db->offset); if (type == PDR_RSSI_LINEAR_APPROXIMATION_EXTENDED) { struct pda_rssi_cal_ext_entry *cal = (void *) &data[offset]; for (i = 0; i < entries; i++) { entry[i].freq = le16_to_cpu(cal[i].freq); entry[i].mul = (s16) le16_to_cpu(cal[i].mul); entry[i].add = (s16) le16_to_cpu(cal[i].add); } } else { struct pda_rssi_cal_entry *cal = (void *) &data[offset]; for (i = 0; i < entries; i++) { u16 freq = 0; switch (i) { case NL80211_BAND_2GHZ: freq = 2437; break; case NL80211_BAND_5GHZ: freq = 5240; break; } entry[i].freq = freq; entry[i].mul = (s16) le16_to_cpu(cal[i].mul); entry[i].add = (s16) le16_to_cpu(cal[i].add); } } /* sort the list by channel frequency */ sort(entry, entries, sizeof(*entry), p54_compare_rssichan, NULL); return 0; err_data: wiphy_err(dev->wiphy, "rssi calibration data packing type:(%x) len:%d.\n", type, len); print_hex_dump_bytes("rssical:", DUMP_PREFIX_NONE, data, len); wiphy_err(dev->wiphy, "please report this issue.\n"); return -EINVAL; } struct p54_rssi_db_entry *p54_rssi_find(struct p54_common *priv, const u16 freq) { struct p54_rssi_db_entry *entry; int i, found = -1; if (!priv->rssi_db) return &p54_rssi_default; entry = (void *)(priv->rssi_db->data + priv->rssi_db->offset); for (i = 0; i < priv->rssi_db->entries; i++) { if (!same_band(freq, entry[i].freq)) continue; if (found == -1) { found = i; continue; } /* nearest match */ if (abs(freq - entry[i].freq) < abs(freq - entry[found].freq)) { found = i; continue; } else { break; } } return found < 0 ? &p54_rssi_default : &entry[found]; } static void p54_parse_default_country(struct ieee80211_hw *dev, void *data, int len) { struct pda_country *country; if (len != sizeof(*country)) { wiphy_err(dev->wiphy, "found possible invalid default country eeprom entry. (entry size: %d)\n", len); print_hex_dump_bytes("country:", DUMP_PREFIX_NONE, data, len); wiphy_err(dev->wiphy, "please report this issue.\n"); return; } country = (struct pda_country *) data; if (country->flags == PDR_COUNTRY_CERT_CODE_PSEUDO) regulatory_hint(dev->wiphy, country->alpha2); else { /* TODO: * write a shared/common function that converts * "Regulatory domain codes" (802.11-2007 14.8.2.2) * into ISO/IEC 3166-1 alpha2 for regulatory_hint. */ } } static int p54_convert_output_limits(struct ieee80211_hw *dev, u8 *data, size_t len) { struct p54_common *priv = dev->priv; if (len < 2) return -EINVAL; if (data[0] != 0) { wiphy_err(dev->wiphy, "unknown output power db revision:%x\n", data[0]); return -EINVAL; } if (2 + data[1] * sizeof(struct pda_channel_output_limit) > len) return -EINVAL; priv->output_limit = kmalloc(data[1] * sizeof(struct pda_channel_output_limit) + sizeof(*priv->output_limit), GFP_KERNEL); if (!priv->output_limit) return -ENOMEM; priv->output_limit->offset = 0; priv->output_limit->entries = data[1]; priv->output_limit->entry_size = sizeof(struct pda_channel_output_limit); priv->output_limit->len = priv->output_limit->entry_size * priv->output_limit->entries + priv->output_limit->offset; memcpy(priv->output_limit->data, &data[2], data[1] * sizeof(struct pda_channel_output_limit)); return 0; } static struct p54_cal_database *p54_convert_db(struct pda_custom_wrapper *src, size_t total_len) { struct p54_cal_database *dst; size_t payload_len, entries, entry_size, offset; payload_len = le16_to_cpu(src->len); entries = le16_to_cpu(src->entries); entry_size = le16_to_cpu(src->entry_size); offset = le16_to_cpu(src->offset); if (((entries * entry_size + offset) != payload_len) || (payload_len + sizeof(*src) != total_len)) return NULL; dst = kmalloc(sizeof(*dst) + payload_len, GFP_KERNEL); if (!dst) return NULL; dst->entries = entries; dst->entry_size = entry_size; dst->offset = offset; dst->len = payload_len; memcpy(dst->data, src->data, payload_len); return dst; } int p54_parse_eeprom(struct ieee80211_hw *dev, void *eeprom, int len) { struct p54_common *priv = dev->priv; struct eeprom_pda_wrap *wrap; struct pda_entry *entry; unsigned int data_len, entry_len; void *tmp; int err; u8 *end = (u8 *)eeprom + len; u16 synth = 0; u16 crc16 = ~0; wrap = (struct eeprom_pda_wrap *) eeprom; entry = (void *)wrap->data + le16_to_cpu(wrap->len); /* verify that at least the entry length/code fits */ while ((u8 *)entry <= end - sizeof(*entry)) { entry_len = le16_to_cpu(entry->len); data_len = ((entry_len - 1) << 1); /* abort if entry exceeds whole structure */ if ((u8 *)entry + sizeof(*entry) + data_len > end) break; switch (le16_to_cpu(entry->code)) { case PDR_MAC_ADDRESS: if (data_len != ETH_ALEN) break; SET_IEEE80211_PERM_ADDR(dev, entry->data); break; case PDR_PRISM_PA_CAL_OUTPUT_POWER_LIMITS: if (priv->output_limit) break; err = p54_convert_output_limits(dev, entry->data, data_len); if (err) goto err; break; case PDR_PRISM_PA_CAL_CURVE_DATA: { struct pda_pa_curve_data *curve_data = (struct pda_pa_curve_data *)entry->data; if (data_len < sizeof(*curve_data)) { err = -EINVAL; goto err; } switch (curve_data->cal_method_rev) { case 0: err = p54_convert_rev0(dev, curve_data); break; case 1: err = p54_convert_rev1(dev, curve_data); break; default: wiphy_err(dev->wiphy, "unknown curve data revision %d\n", curve_data->cal_method_rev); err = -ENODEV; break; } if (err) goto err; } break; case PDR_PRISM_ZIF_TX_IQ_CALIBRATION: priv->iq_autocal = kmemdup(entry->data, data_len, GFP_KERNEL); if (!priv->iq_autocal) { err = -ENOMEM; goto err; } priv->iq_autocal_len = data_len / sizeof(struct pda_iq_autocal_entry); break; case PDR_DEFAULT_COUNTRY: p54_parse_default_country(dev, entry->data, data_len); break; case PDR_INTERFACE_LIST: tmp = entry->data; while ((u8 *)tmp < entry->data + data_len) { struct exp_if *exp_if = tmp; if (exp_if->if_id == cpu_to_le16(IF_ID_ISL39000)) synth = le16_to_cpu(exp_if->variant); tmp += sizeof(*exp_if); } break; case PDR_HARDWARE_PLATFORM_COMPONENT_ID: if (data_len < 2) break; priv->version = *(u8 *)(entry->data + 1); break; case PDR_RSSI_LINEAR_APPROXIMATION: case PDR_RSSI_LINEAR_APPROXIMATION_DUAL_BAND: case PDR_RSSI_LINEAR_APPROXIMATION_EXTENDED: err = p54_parse_rssical(dev, entry->data, data_len, le16_to_cpu(entry->code)); if (err) goto err; break; case PDR_RSSI_LINEAR_APPROXIMATION_CUSTOMV2: { struct pda_custom_wrapper *pda = (void *) entry->data; __le16 *src; u16 *dst; int i; if (priv->rssi_db || data_len < sizeof(*pda)) break; priv->rssi_db = p54_convert_db(pda, data_len); if (!priv->rssi_db) break; src = (void *) priv->rssi_db->data; dst = (void *) priv->rssi_db->data; for (i = 0; i < priv->rssi_db->entries; i++) *(dst++) = (s16) le16_to_cpu(*(src++)); } break; case PDR_PRISM_PA_CAL_OUTPUT_POWER_LIMITS_CUSTOM: { struct pda_custom_wrapper *pda = (void *) entry->data; if (priv->output_limit || data_len < sizeof(*pda)) break; priv->output_limit = p54_convert_db(pda, data_len); } break; case PDR_PRISM_PA_CAL_CURVE_DATA_CUSTOM: { struct pda_custom_wrapper *pda = (void *) entry->data; if (priv->curve_data || data_len < sizeof(*pda)) break; priv->curve_data = p54_convert_db(pda, data_len); } break; case PDR_END: crc16 = ~crc_ccitt(crc16, (u8 *) entry, sizeof(*entry)); if (crc16 != le16_to_cpup((__le16 *)entry->data)) { wiphy_err(dev->wiphy, "eeprom failed checksum " "test!\n"); err = -ENOMSG; goto err; } else { goto good_eeprom; } break; default: break; } crc16 = crc_ccitt(crc16, (u8 *)entry, (entry_len + 1) * 2); entry = (void *)entry + (entry_len + 1) * 2; } wiphy_err(dev->wiphy, "unexpected end of eeprom data.\n"); err = -ENODATA; goto err; good_eeprom: if (!synth || !priv->iq_autocal || !priv->output_limit || !priv->curve_data) { wiphy_err(dev->wiphy, "not all required entries found in eeprom!\n"); err = -EINVAL; goto err; } priv->rxhw = synth & PDR_SYNTH_FRONTEND_MASK; err = p54_generate_channel_lists(dev); if (err) goto err; if (priv->rxhw == PDR_SYNTH_FRONTEND_XBOW) p54_init_xbow_synth(priv); if (!(synth & PDR_SYNTH_24_GHZ_DISABLED)) dev->wiphy->bands[NL80211_BAND_2GHZ] = priv->band_table[NL80211_BAND_2GHZ]; if (!(synth & PDR_SYNTH_5_GHZ_DISABLED)) dev->wiphy->bands[NL80211_BAND_5GHZ] = priv->band_table[NL80211_BAND_5GHZ]; if ((synth & PDR_SYNTH_RX_DIV_MASK) == PDR_SYNTH_RX_DIV_SUPPORTED) priv->rx_diversity_mask = 3; if ((synth & PDR_SYNTH_TX_DIV_MASK) == PDR_SYNTH_TX_DIV_SUPPORTED) priv->tx_diversity_mask = 3; if (!is_valid_ether_addr(dev->wiphy->perm_addr)) { u8 perm_addr[ETH_ALEN]; wiphy_warn(dev->wiphy, "Invalid hwaddr! Using randomly generated MAC addr\n"); eth_random_addr(perm_addr); SET_IEEE80211_PERM_ADDR(dev, perm_addr); } priv->cur_rssi = &p54_rssi_default; wiphy_info(dev->wiphy, "hwaddr %pM, MAC:isl38%02x RF:%s\n", dev->wiphy->perm_addr, priv->version, p54_rf_chips[priv->rxhw]); return 0; err: kfree(priv->iq_autocal); kfree(priv->output_limit); kfree(priv->curve_data); kfree(priv->rssi_db); kfree(priv->survey); priv->iq_autocal = NULL; priv->output_limit = NULL; priv->curve_data = NULL; priv->rssi_db = NULL; priv->survey = NULL; wiphy_err(dev->wiphy, "eeprom parse failed!\n"); return err; } EXPORT_SYMBOL_GPL(p54_parse_eeprom); int p54_read_eeprom(struct ieee80211_hw *dev) { struct p54_common *priv = dev->priv; size_t eeprom_size = 0x2020, offset = 0, blocksize, maxblocksize; int ret = -ENOMEM; void *eeprom; maxblocksize = EEPROM_READBACK_LEN; if (priv->fw_var >= 0x509) maxblocksize -= 0xc; else maxblocksize -= 0x4; eeprom = kzalloc(eeprom_size, GFP_KERNEL); if (unlikely(!eeprom)) goto free; while (eeprom_size) { blocksize = min(eeprom_size, maxblocksize); ret = p54_download_eeprom(priv, eeprom + offset, offset, blocksize); if (unlikely(ret)) goto free; offset += blocksize; eeprom_size -= blocksize; } ret = p54_parse_eeprom(dev, eeprom, offset); free: kfree(eeprom); return ret; } EXPORT_SYMBOL_GPL(p54_read_eeprom);
linux-master
drivers/net/wireless/intersil/p54/eeprom.c
// SPDX-License-Identifier: GPL-2.0-only /* * mac80211 glue code for mac80211 Prism54 drivers * * Copyright (c) 2006, Michael Wu <[email protected]> * Copyright (c) 2007-2009, Christian Lamparter <[email protected]> * Copyright 2008, Johannes Berg <[email protected]> * * Based on: * - the islsm (softmac prism54) driver, which is: * Copyright 2004-2006 Jean-Baptiste Note <[email protected]>, et al. * - stlc45xx driver * Copyright (C) 2008 Nokia Corporation and/or its subsidiary(-ies). */ #include <linux/slab.h> #include <linux/firmware.h> #include <linux/etherdevice.h> #include <linux/module.h> #include <net/mac80211.h> #include "p54.h" #include "lmac.h" static bool modparam_nohwcrypt; module_param_named(nohwcrypt, modparam_nohwcrypt, bool, 0444); MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); MODULE_AUTHOR("Michael Wu <[email protected]>"); MODULE_DESCRIPTION("Softmac Prism54 common code"); MODULE_LICENSE("GPL"); MODULE_ALIAS("prism54common"); static int p54_sta_add_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct p54_common *priv = hw->priv; /* * Notify the firmware that we don't want or we don't * need to buffer frames for this station anymore. */ p54_sta_unlock(priv, sta->addr); return 0; } static void p54_sta_notify(struct ieee80211_hw *dev, struct ieee80211_vif *vif, enum sta_notify_cmd notify_cmd, struct ieee80211_sta *sta) { struct p54_common *priv = dev->priv; switch (notify_cmd) { case STA_NOTIFY_AWAKE: /* update the firmware's filter table */ p54_sta_unlock(priv, sta->addr); break; default: break; } } static int p54_set_tim(struct ieee80211_hw *dev, struct ieee80211_sta *sta, bool set) { struct p54_common *priv = dev->priv; return p54_update_beacon_tim(priv, sta->aid, set); } u8 *p54_find_ie(struct sk_buff *skb, u8 ie) { struct ieee80211_mgmt *mgmt = (void *)skb->data; u8 *pos, *end; if (skb->len <= sizeof(mgmt)) return NULL; pos = (u8 *)mgmt->u.beacon.variable; end = skb->data + skb->len; while (pos < end) { if (pos + 2 + pos[1] > end) return NULL; if (pos[0] == ie) return pos; pos += 2 + pos[1]; } return NULL; } static int p54_beacon_format_ie_tim(struct sk_buff *skb) { /* * the good excuse for this mess is ... the firmware. * The dummy TIM MUST be at the end of the beacon frame, * because it'll be overwritten! */ u8 *tim; u8 dtim_len; u8 dtim_period; u8 *next; tim = p54_find_ie(skb, WLAN_EID_TIM); if (!tim) return 0; dtim_len = tim[1]; dtim_period = tim[3]; next = tim + 2 + dtim_len; if (dtim_len < 3) return -EINVAL; memmove(tim, next, skb_tail_pointer(skb) - next); tim = skb_tail_pointer(skb) - (dtim_len + 2); /* add the dummy at the end */ tim[0] = WLAN_EID_TIM; tim[1] = 3; tim[2] = 0; tim[3] = dtim_period; tim[4] = 0; if (dtim_len > 3) skb_trim(skb, skb->len - (dtim_len - 3)); return 0; } static int p54_beacon_update(struct p54_common *priv, struct ieee80211_vif *vif) { struct ieee80211_tx_control control = { }; struct sk_buff *beacon; int ret; beacon = ieee80211_beacon_get(priv->hw, vif, 0); if (!beacon) return -ENOMEM; ret = p54_beacon_format_ie_tim(beacon); if (ret) return ret; /* * During operation, the firmware takes care of beaconing. * The driver only needs to upload a new beacon template, once * the template was changed by the stack or userspace. * * LMAC API 3.2.2 also specifies that the driver does not need * to cancel the old beacon template by hand, instead the firmware * will release the previous one through the feedback mechanism. */ p54_tx_80211(priv->hw, &control, beacon); priv->tsf_high32 = 0; priv->tsf_low32 = 0; return 0; } static int p54_start(struct ieee80211_hw *dev) { struct p54_common *priv = dev->priv; int err; mutex_lock(&priv->conf_mutex); err = priv->open(dev); if (err) goto out; P54_SET_QUEUE(priv->qos_params[0], 0x0002, 0x0003, 0x0007, 47); P54_SET_QUEUE(priv->qos_params[1], 0x0002, 0x0007, 0x000f, 94); P54_SET_QUEUE(priv->qos_params[2], 0x0003, 0x000f, 0x03ff, 0); P54_SET_QUEUE(priv->qos_params[3], 0x0007, 0x000f, 0x03ff, 0); err = p54_set_edcf(priv); if (err) goto out; eth_broadcast_addr(priv->bssid); priv->mode = NL80211_IFTYPE_MONITOR; err = p54_setup_mac(priv); if (err) { priv->mode = NL80211_IFTYPE_UNSPECIFIED; goto out; } ieee80211_queue_delayed_work(dev, &priv->work, 0); priv->softled_state = 0; err = p54_set_leds(priv); out: mutex_unlock(&priv->conf_mutex); return err; } static void p54_stop(struct ieee80211_hw *dev) { struct p54_common *priv = dev->priv; int i; priv->mode = NL80211_IFTYPE_UNSPECIFIED; priv->softled_state = 0; cancel_delayed_work_sync(&priv->work); mutex_lock(&priv->conf_mutex); p54_set_leds(priv); priv->stop(dev); skb_queue_purge(&priv->tx_pending); skb_queue_purge(&priv->tx_queue); for (i = 0; i < P54_QUEUE_NUM; i++) { priv->tx_stats[i].count = 0; priv->tx_stats[i].len = 0; } priv->beacon_req_id = cpu_to_le32(0); priv->tsf_high32 = priv->tsf_low32 = 0; mutex_unlock(&priv->conf_mutex); } static int p54_add_interface(struct ieee80211_hw *dev, struct ieee80211_vif *vif) { struct p54_common *priv = dev->priv; int err; vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER; mutex_lock(&priv->conf_mutex); if (priv->mode != NL80211_IFTYPE_MONITOR) { mutex_unlock(&priv->conf_mutex); return -EOPNOTSUPP; } priv->vif = vif; switch (vif->type) { case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_ADHOC: case NL80211_IFTYPE_AP: case NL80211_IFTYPE_MESH_POINT: priv->mode = vif->type; break; default: mutex_unlock(&priv->conf_mutex); return -EOPNOTSUPP; } memcpy(priv->mac_addr, vif->addr, ETH_ALEN); err = p54_setup_mac(priv); mutex_unlock(&priv->conf_mutex); return err; } static void p54_remove_interface(struct ieee80211_hw *dev, struct ieee80211_vif *vif) { struct p54_common *priv = dev->priv; mutex_lock(&priv->conf_mutex); priv->vif = NULL; /* * LMAC API 3.2.2 states that any active beacon template must be * canceled by the driver before attempting a mode transition. */ if (le32_to_cpu(priv->beacon_req_id) != 0) { p54_tx_cancel(priv, priv->beacon_req_id); wait_for_completion_interruptible_timeout(&priv->beacon_comp, HZ); } priv->mode = NL80211_IFTYPE_MONITOR; eth_zero_addr(priv->mac_addr); eth_zero_addr(priv->bssid); p54_setup_mac(priv); mutex_unlock(&priv->conf_mutex); } static int p54_wait_for_stats(struct ieee80211_hw *dev) { struct p54_common *priv = dev->priv; int ret; priv->update_stats = true; ret = p54_fetch_statistics(priv); if (ret) return ret; ret = wait_for_completion_interruptible_timeout(&priv->stat_comp, HZ); if (ret == 0) return -ETIMEDOUT; return 0; } static void p54_reset_stats(struct p54_common *priv) { struct ieee80211_channel *chan = priv->curchan; if (chan) { struct survey_info *info = &priv->survey[chan->hw_value]; /* only reset channel statistics, don't touch .filled, etc. */ info->time = 0; info->time_busy = 0; info->time_tx = 0; } priv->update_stats = true; priv->survey_raw.active = 0; priv->survey_raw.cca = 0; priv->survey_raw.tx = 0; } static int p54_config(struct ieee80211_hw *dev, u32 changed) { int ret = 0; struct p54_common *priv = dev->priv; struct ieee80211_conf *conf = &dev->conf; mutex_lock(&priv->conf_mutex); if (changed & IEEE80211_CONF_CHANGE_POWER) priv->output_power = conf->power_level << 2; if (changed & IEEE80211_CONF_CHANGE_CHANNEL) { struct ieee80211_channel *oldchan; WARN_ON(p54_wait_for_stats(dev)); oldchan = priv->curchan; priv->curchan = NULL; ret = p54_scan(priv, P54_SCAN_EXIT, 0); if (ret) { priv->curchan = oldchan; goto out; } /* * TODO: Use the LM_SCAN_TRAP to determine the current * operating channel. */ priv->curchan = priv->hw->conf.chandef.chan; p54_reset_stats(priv); WARN_ON(p54_fetch_statistics(priv)); } if (changed & IEEE80211_CONF_CHANGE_PS) { WARN_ON(p54_wait_for_stats(dev)); ret = p54_set_ps(priv); if (ret) goto out; WARN_ON(p54_wait_for_stats(dev)); } if (changed & IEEE80211_CONF_CHANGE_IDLE) { WARN_ON(p54_wait_for_stats(dev)); ret = p54_setup_mac(priv); if (ret) goto out; WARN_ON(p54_wait_for_stats(dev)); } out: mutex_unlock(&priv->conf_mutex); return ret; } static u64 p54_prepare_multicast(struct ieee80211_hw *dev, struct netdev_hw_addr_list *mc_list) { struct p54_common *priv = dev->priv; struct netdev_hw_addr *ha; int i; BUILD_BUG_ON(ARRAY_SIZE(priv->mc_maclist) != ARRAY_SIZE(((struct p54_group_address_table *)NULL)->mac_list)); /* * The first entry is reserved for the global broadcast MAC. * Otherwise the firmware will drop it and ARP will no longer work. */ i = 1; priv->mc_maclist_num = netdev_hw_addr_list_count(mc_list) + i; netdev_hw_addr_list_for_each(ha, mc_list) { memcpy(&priv->mc_maclist[i], ha->addr, ETH_ALEN); i++; if (i >= ARRAY_SIZE(priv->mc_maclist)) break; } return 1; /* update */ } static void p54_configure_filter(struct ieee80211_hw *dev, unsigned int changed_flags, unsigned int *total_flags, u64 multicast) { struct p54_common *priv = dev->priv; *total_flags &= FIF_ALLMULTI | FIF_OTHER_BSS; priv->filter_flags = *total_flags; if (changed_flags & FIF_OTHER_BSS) p54_setup_mac(priv); if (changed_flags & FIF_ALLMULTI || multicast) p54_set_groupfilter(priv); } static int p54_conf_tx(struct ieee80211_hw *dev, struct ieee80211_vif *vif, unsigned int link_id, u16 queue, const struct ieee80211_tx_queue_params *params) { struct p54_common *priv = dev->priv; int ret; mutex_lock(&priv->conf_mutex); P54_SET_QUEUE(priv->qos_params[queue], params->aifs, params->cw_min, params->cw_max, params->txop); ret = p54_set_edcf(priv); mutex_unlock(&priv->conf_mutex); return ret; } static void p54_work(struct work_struct *work) { struct p54_common *priv = container_of(work, struct p54_common, work.work); if (unlikely(priv->mode == NL80211_IFTYPE_UNSPECIFIED)) return ; /* * TODO: walk through tx_queue and do the following tasks * 1. initiate bursts. * 2. cancel stuck frames / reset the device if necessary. */ mutex_lock(&priv->conf_mutex); WARN_ON_ONCE(p54_fetch_statistics(priv)); mutex_unlock(&priv->conf_mutex); } static int p54_get_stats(struct ieee80211_hw *dev, struct ieee80211_low_level_stats *stats) { struct p54_common *priv = dev->priv; memcpy(stats, &priv->stats, sizeof(*stats)); return 0; } static void p54_bss_info_changed(struct ieee80211_hw *dev, struct ieee80211_vif *vif, struct ieee80211_bss_conf *info, u64 changed) { struct p54_common *priv = dev->priv; mutex_lock(&priv->conf_mutex); if (changed & BSS_CHANGED_BSSID) { memcpy(priv->bssid, info->bssid, ETH_ALEN); p54_setup_mac(priv); } if (changed & BSS_CHANGED_BEACON) { p54_scan(priv, P54_SCAN_EXIT, 0); p54_setup_mac(priv); p54_beacon_update(priv, vif); p54_set_edcf(priv); } if (changed & (BSS_CHANGED_ERP_SLOT | BSS_CHANGED_BEACON)) { priv->use_short_slot = info->use_short_slot; p54_set_edcf(priv); } if (changed & BSS_CHANGED_BASIC_RATES) { if (dev->conf.chandef.chan->band == NL80211_BAND_5GHZ) priv->basic_rate_mask = (info->basic_rates << 4); else priv->basic_rate_mask = info->basic_rates; p54_setup_mac(priv); if (priv->fw_var >= 0x500) p54_scan(priv, P54_SCAN_EXIT, 0); } if (changed & BSS_CHANGED_ASSOC) { if (vif->cfg.assoc) { priv->aid = vif->cfg.aid; priv->wakeup_timer = info->beacon_int * info->dtim_period * 5; p54_setup_mac(priv); } else { priv->wakeup_timer = 500; priv->aid = 0; } } mutex_unlock(&priv->conf_mutex); } static int p54_set_key(struct ieee80211_hw *dev, enum set_key_cmd cmd, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key) { struct p54_common *priv = dev->priv; int slot, ret = 0; u8 algo = 0; u8 *addr = NULL; if (modparam_nohwcrypt) return -EOPNOTSUPP; if (key->flags & IEEE80211_KEY_FLAG_RX_MGMT) { /* * Unfortunately most/all firmwares are trying to decrypt * incoming management frames if a suitable key can be found. * However, in doing so the data in these frames gets * corrupted. So, we can't have firmware supported crypto * offload in this case. */ return -EOPNOTSUPP; } mutex_lock(&priv->conf_mutex); if (cmd == SET_KEY) { switch (key->cipher) { case WLAN_CIPHER_SUITE_TKIP: if (!(priv->privacy_caps & (BR_DESC_PRIV_CAP_MICHAEL | BR_DESC_PRIV_CAP_TKIP))) { ret = -EOPNOTSUPP; goto out_unlock; } key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; algo = P54_CRYPTO_TKIPMICHAEL; break; case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: if (!(priv->privacy_caps & BR_DESC_PRIV_CAP_WEP)) { ret = -EOPNOTSUPP; goto out_unlock; } key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; algo = P54_CRYPTO_WEP; break; case WLAN_CIPHER_SUITE_CCMP: if (!(priv->privacy_caps & BR_DESC_PRIV_CAP_AESCCMP)) { ret = -EOPNOTSUPP; goto out_unlock; } key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; algo = P54_CRYPTO_AESCCMP; break; default: ret = -EOPNOTSUPP; goto out_unlock; } slot = bitmap_find_free_region(priv->used_rxkeys, priv->rx_keycache_size, 0); if (slot < 0) { /* * The device supports the chosen algorithm, but the * firmware does not provide enough key slots to store * all of them. * But encryption offload for outgoing frames is always * possible, so we just pretend that the upload was * successful and do the decryption in software. */ /* mark the key as invalid. */ key->hw_key_idx = 0xff; goto out_unlock; } key->flags |= IEEE80211_KEY_FLAG_RESERVE_TAILROOM; } else { slot = key->hw_key_idx; if (slot == 0xff) { /* This key was not uploaded into the rx key cache. */ goto out_unlock; } bitmap_release_region(priv->used_rxkeys, slot, 0); algo = 0; } if (sta) addr = sta->addr; ret = p54_upload_key(priv, algo, slot, key->keyidx, key->keylen, addr, key->key); if (ret) { bitmap_release_region(priv->used_rxkeys, slot, 0); ret = -EOPNOTSUPP; goto out_unlock; } key->hw_key_idx = slot; out_unlock: mutex_unlock(&priv->conf_mutex); return ret; } static int p54_get_survey(struct ieee80211_hw *dev, int idx, struct survey_info *survey) { struct p54_common *priv = dev->priv; struct ieee80211_channel *chan; int err, tries; bool in_use = false; if (idx >= priv->chan_num) return -ENOENT; #define MAX_TRIES 1 for (tries = 0; tries < MAX_TRIES; tries++) { chan = priv->curchan; if (chan && chan->hw_value == idx) { mutex_lock(&priv->conf_mutex); err = p54_wait_for_stats(dev); mutex_unlock(&priv->conf_mutex); if (err) return err; in_use = true; } memcpy(survey, &priv->survey[idx], sizeof(*survey)); if (in_use) { /* test if the reported statistics are valid. */ if (survey->time != 0) { survey->filled |= SURVEY_INFO_IN_USE; } else { /* * hw/fw has not accumulated enough sample sets. * Wait for 100ms, this ought to be enough to * get at least one non-null set of channel * usage statistics. */ msleep(100); continue; } } return 0; } return -ETIMEDOUT; #undef MAX_TRIES } static unsigned int p54_flush_count(struct p54_common *priv) { unsigned int total = 0, i; BUILD_BUG_ON(P54_QUEUE_NUM > ARRAY_SIZE(priv->tx_stats)); /* * Because the firmware has the sole control over any frames * in the P54_QUEUE_BEACON or P54_QUEUE_SCAN queues, they * don't really count as pending or active. */ for (i = P54_QUEUE_MGMT; i < P54_QUEUE_NUM; i++) total += priv->tx_stats[i].len; return total; } static void p54_flush(struct ieee80211_hw *dev, struct ieee80211_vif *vif, u32 queues, bool drop) { struct p54_common *priv = dev->priv; unsigned int total, i; /* * Currently, it wouldn't really matter if we wait for one second * or 15 minutes. But once someone gets around and completes the * TODOs [ancel stuck frames / reset device] in p54_work, it will * suddenly make sense to wait that long. */ i = P54_STATISTICS_UPDATE * 2 / 20; /* * In this case no locking is required because as we speak the * queues have already been stopped and no new frames can sneak * up from behind. */ while ((total = p54_flush_count(priv)) && i--) { /* waste time */ msleep(20); } WARN(total, "tx flush timeout, unresponsive firmware"); } static void p54_set_coverage_class(struct ieee80211_hw *dev, s16 coverage_class) { struct p54_common *priv = dev->priv; mutex_lock(&priv->conf_mutex); /* support all coverage class values as in 802.11-2007 Table 7-27 */ priv->coverage_class = clamp_t(u8, coverage_class, 0, 31); p54_set_edcf(priv); mutex_unlock(&priv->conf_mutex); } static const struct ieee80211_ops p54_ops = { .tx = p54_tx_80211, .wake_tx_queue = ieee80211_handle_wake_tx_queue, .start = p54_start, .stop = p54_stop, .add_interface = p54_add_interface, .remove_interface = p54_remove_interface, .set_tim = p54_set_tim, .sta_notify = p54_sta_notify, .sta_add = p54_sta_add_remove, .sta_remove = p54_sta_add_remove, .set_key = p54_set_key, .config = p54_config, .flush = p54_flush, .bss_info_changed = p54_bss_info_changed, .prepare_multicast = p54_prepare_multicast, .configure_filter = p54_configure_filter, .conf_tx = p54_conf_tx, .get_stats = p54_get_stats, .get_survey = p54_get_survey, .set_coverage_class = p54_set_coverage_class, }; struct ieee80211_hw *p54_init_common(size_t priv_data_len) { struct ieee80211_hw *dev; struct p54_common *priv; dev = ieee80211_alloc_hw(priv_data_len, &p54_ops); if (!dev) return NULL; priv = dev->priv; priv->hw = dev; priv->mode = NL80211_IFTYPE_UNSPECIFIED; priv->basic_rate_mask = 0x15f; spin_lock_init(&priv->tx_stats_lock); skb_queue_head_init(&priv->tx_queue); skb_queue_head_init(&priv->tx_pending); ieee80211_hw_set(dev, REPORTS_TX_ACK_STATUS); ieee80211_hw_set(dev, MFP_CAPABLE); ieee80211_hw_set(dev, PS_NULLFUNC_STACK); ieee80211_hw_set(dev, SUPPORTS_PS); ieee80211_hw_set(dev, RX_INCLUDES_FCS); ieee80211_hw_set(dev, SIGNAL_DBM); dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_MESH_POINT); priv->beacon_req_id = cpu_to_le32(0); priv->tx_stats[P54_QUEUE_BEACON].limit = 1; priv->tx_stats[P54_QUEUE_FWSCAN].limit = 1; priv->tx_stats[P54_QUEUE_MGMT].limit = 3; priv->tx_stats[P54_QUEUE_CAB].limit = 3; priv->tx_stats[P54_QUEUE_DATA].limit = 5; dev->queues = 1; priv->noise = -94; /* * We support at most 8 tries no matter which rate they're at, * we cannot support max_rates * max_rate_tries as we set it * here, but setting it correctly to 4/2 or so would limit us * artificially if the RC algorithm wants just two rates, so * let's say 4/7, we'll redistribute it at TX time, see the * comments there. */ dev->max_rates = 4; dev->max_rate_tries = 7; dev->extra_tx_headroom = sizeof(struct p54_hdr) + 4 + sizeof(struct p54_tx_data); /* * For now, disable PS by default because it affects * link stability significantly. */ dev->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; mutex_init(&priv->conf_mutex); mutex_init(&priv->eeprom_mutex); init_completion(&priv->stat_comp); init_completion(&priv->eeprom_comp); init_completion(&priv->beacon_comp); INIT_DELAYED_WORK(&priv->work, p54_work); eth_broadcast_addr(priv->mc_maclist[0]); priv->curchan = NULL; p54_reset_stats(priv); return dev; } EXPORT_SYMBOL_GPL(p54_init_common); int p54_register_common(struct ieee80211_hw *dev, struct device *pdev) { struct p54_common __maybe_unused *priv = dev->priv; int err; err = ieee80211_register_hw(dev); if (err) { dev_err(pdev, "Cannot register device (%d).\n", err); return err; } priv->registered = true; #ifdef CONFIG_P54_LEDS err = p54_init_leds(priv); if (err) { p54_unregister_common(dev); return err; } #endif /* CONFIG_P54_LEDS */ dev_info(pdev, "is registered as '%s'\n", wiphy_name(dev->wiphy)); return 0; } EXPORT_SYMBOL_GPL(p54_register_common); void p54_free_common(struct ieee80211_hw *dev) { struct p54_common *priv = dev->priv; unsigned int i; for (i = 0; i < NUM_NL80211_BANDS; i++) kfree(priv->band_table[i]); kfree(priv->iq_autocal); kfree(priv->output_limit); kfree(priv->curve_data); kfree(priv->rssi_db); bitmap_free(priv->used_rxkeys); kfree(priv->survey); priv->iq_autocal = NULL; priv->output_limit = NULL; priv->curve_data = NULL; priv->rssi_db = NULL; priv->used_rxkeys = NULL; priv->survey = NULL; ieee80211_free_hw(dev); } EXPORT_SYMBOL_GPL(p54_free_common); void p54_unregister_common(struct ieee80211_hw *dev) { struct p54_common *priv = dev->priv; if (priv->registered) { priv->registered = false; #ifdef CONFIG_P54_LEDS p54_unregister_leds(priv); #endif /* CONFIG_P54_LEDS */ ieee80211_unregister_hw(dev); } mutex_destroy(&priv->conf_mutex); mutex_destroy(&priv->eeprom_mutex); } EXPORT_SYMBOL_GPL(p54_unregister_common);
linux-master
drivers/net/wireless/intersil/p54/main.c
// SPDX-License-Identifier: GPL-2.0-only /* * Common code for mac80211 Prism54 drivers * * Copyright (c) 2006, Michael Wu <[email protected]> * Copyright (c) 2007-2009, Christian Lamparter <[email protected]> * Copyright 2008, Johannes Berg <[email protected]> * * Based on: * - the islsm (softmac prism54) driver, which is: * Copyright 2004-2006 Jean-Baptiste Note <[email protected]>, et al. * - stlc45xx driver * Copyright (C) 2008 Nokia Corporation and/or its subsidiary(-ies). */ #include <linux/export.h> #include <linux/firmware.h> #include <linux/etherdevice.h> #include <asm/div64.h> #include <net/mac80211.h> #include "p54.h" #include "lmac.h" #ifdef P54_MM_DEBUG static void p54_dump_tx_queue(struct p54_common *priv) { unsigned long flags; struct ieee80211_tx_info *info; struct p54_tx_info *range; struct sk_buff *skb; struct p54_hdr *hdr; unsigned int i = 0; u32 prev_addr; u32 largest_hole = 0, free; spin_lock_irqsave(&priv->tx_queue.lock, flags); wiphy_debug(priv->hw->wiphy, "/ --- tx queue dump (%d entries) ---\n", skb_queue_len(&priv->tx_queue)); prev_addr = priv->rx_start; skb_queue_walk(&priv->tx_queue, skb) { info = IEEE80211_SKB_CB(skb); range = (void *) info->rate_driver_data; hdr = (void *) skb->data; free = range->start_addr - prev_addr; wiphy_debug(priv->hw->wiphy, "| [%02d] => [skb:%p skb_len:0x%04x " "hdr:{flags:%02x len:%04x req_id:%04x type:%02x} " "mem:{start:%04x end:%04x, free:%d}]\n", i++, skb, skb->len, le16_to_cpu(hdr->flags), le16_to_cpu(hdr->len), le32_to_cpu(hdr->req_id), le16_to_cpu(hdr->type), range->start_addr, range->end_addr, free); prev_addr = range->end_addr; largest_hole = max(largest_hole, free); } free = priv->rx_end - prev_addr; largest_hole = max(largest_hole, free); wiphy_debug(priv->hw->wiphy, "\\ --- [free: %d], largest free block: %d ---\n", free, largest_hole); spin_unlock_irqrestore(&priv->tx_queue.lock, flags); } #endif /* P54_MM_DEBUG */ /* * So, the firmware is somewhat stupid and doesn't know what places in its * memory incoming data should go to. By poking around in the firmware, we * can find some unused memory to upload our packets to. However, data that we * want the card to TX needs to stay intact until the card has told us that * it is done with it. This function finds empty places we can upload to and * marks allocated areas as reserved if necessary. p54_find_and_unlink_skb or * p54_free_skb frees allocated areas. */ static int p54_assign_address(struct p54_common *priv, struct sk_buff *skb) { struct sk_buff *entry, *target_skb = NULL; struct ieee80211_tx_info *info; struct p54_tx_info *range; struct p54_hdr *data = (void *) skb->data; unsigned long flags; u32 last_addr = priv->rx_start; u32 target_addr = priv->rx_start; u16 len = priv->headroom + skb->len + priv->tailroom + 3; info = IEEE80211_SKB_CB(skb); range = (void *) info->rate_driver_data; len = (range->extra_len + len) & ~0x3; spin_lock_irqsave(&priv->tx_queue.lock, flags); if (unlikely(skb_queue_len(&priv->tx_queue) == 32)) { /* * The tx_queue is now really full. * * TODO: check if the device has crashed and reset it. */ spin_unlock_irqrestore(&priv->tx_queue.lock, flags); return -EBUSY; } skb_queue_walk(&priv->tx_queue, entry) { u32 hole_size; info = IEEE80211_SKB_CB(entry); range = (void *) info->rate_driver_data; hole_size = range->start_addr - last_addr; if (!target_skb && hole_size >= len) { target_skb = entry->prev; hole_size -= len; target_addr = last_addr; break; } last_addr = range->end_addr; } if (unlikely(!target_skb)) { if (priv->rx_end - last_addr >= len) { target_skb = skb_peek_tail(&priv->tx_queue); if (target_skb) { info = IEEE80211_SKB_CB(target_skb); range = (void *)info->rate_driver_data; target_addr = range->end_addr; } } else { spin_unlock_irqrestore(&priv->tx_queue.lock, flags); return -ENOSPC; } } info = IEEE80211_SKB_CB(skb); range = (void *) info->rate_driver_data; range->start_addr = target_addr; range->end_addr = target_addr + len; data->req_id = cpu_to_le32(target_addr + priv->headroom); if (IS_DATA_FRAME(skb) && unlikely(GET_HW_QUEUE(skb) == P54_QUEUE_BEACON)) priv->beacon_req_id = data->req_id; if (target_skb) __skb_queue_after(&priv->tx_queue, target_skb, skb); else __skb_queue_head(&priv->tx_queue, skb); spin_unlock_irqrestore(&priv->tx_queue.lock, flags); return 0; } static void p54_tx_pending(struct p54_common *priv) { struct sk_buff *skb; int ret; skb = skb_dequeue(&priv->tx_pending); if (unlikely(!skb)) return ; ret = p54_assign_address(priv, skb); if (unlikely(ret)) skb_queue_head(&priv->tx_pending, skb); else priv->tx(priv->hw, skb); } static void p54_wake_queues(struct p54_common *priv) { unsigned long flags; unsigned int i; if (unlikely(priv->mode == NL80211_IFTYPE_UNSPECIFIED)) return ; p54_tx_pending(priv); spin_lock_irqsave(&priv->tx_stats_lock, flags); for (i = 0; i < priv->hw->queues; i++) { if (priv->tx_stats[i + P54_QUEUE_DATA].len < priv->tx_stats[i + P54_QUEUE_DATA].limit) ieee80211_wake_queue(priv->hw, i); } spin_unlock_irqrestore(&priv->tx_stats_lock, flags); } static int p54_tx_qos_accounting_alloc(struct p54_common *priv, struct sk_buff *skb, const u16 p54_queue) { struct p54_tx_queue_stats *queue; unsigned long flags; if (WARN_ON(p54_queue >= P54_QUEUE_NUM)) return -EINVAL; queue = &priv->tx_stats[p54_queue]; spin_lock_irqsave(&priv->tx_stats_lock, flags); if (unlikely(queue->len >= queue->limit && IS_QOS_QUEUE(p54_queue))) { spin_unlock_irqrestore(&priv->tx_stats_lock, flags); return -ENOSPC; } queue->len++; queue->count++; if (unlikely(queue->len == queue->limit && IS_QOS_QUEUE(p54_queue))) { u16 ac_queue = p54_queue - P54_QUEUE_DATA; ieee80211_stop_queue(priv->hw, ac_queue); } spin_unlock_irqrestore(&priv->tx_stats_lock, flags); return 0; } static void p54_tx_qos_accounting_free(struct p54_common *priv, struct sk_buff *skb) { if (IS_DATA_FRAME(skb)) { unsigned long flags; spin_lock_irqsave(&priv->tx_stats_lock, flags); priv->tx_stats[GET_HW_QUEUE(skb)].len--; spin_unlock_irqrestore(&priv->tx_stats_lock, flags); if (unlikely(GET_HW_QUEUE(skb) == P54_QUEUE_BEACON)) { if (priv->beacon_req_id == GET_REQ_ID(skb)) { /* this is the active beacon set anymore */ priv->beacon_req_id = 0; } complete(&priv->beacon_comp); } } p54_wake_queues(priv); } void p54_free_skb(struct ieee80211_hw *dev, struct sk_buff *skb) { struct p54_common *priv = dev->priv; if (unlikely(!skb)) return ; skb_unlink(skb, &priv->tx_queue); p54_tx_qos_accounting_free(priv, skb); ieee80211_free_txskb(dev, skb); } EXPORT_SYMBOL_GPL(p54_free_skb); static struct sk_buff *p54_find_and_unlink_skb(struct p54_common *priv, const __le32 req_id) { struct sk_buff *entry; unsigned long flags; spin_lock_irqsave(&priv->tx_queue.lock, flags); skb_queue_walk(&priv->tx_queue, entry) { struct p54_hdr *hdr = (struct p54_hdr *) entry->data; if (hdr->req_id == req_id) { __skb_unlink(entry, &priv->tx_queue); spin_unlock_irqrestore(&priv->tx_queue.lock, flags); p54_tx_qos_accounting_free(priv, entry); return entry; } } spin_unlock_irqrestore(&priv->tx_queue.lock, flags); return NULL; } void p54_tx(struct p54_common *priv, struct sk_buff *skb) { skb_queue_tail(&priv->tx_pending, skb); p54_tx_pending(priv); } static int p54_rssi_to_dbm(struct p54_common *priv, int rssi) { if (priv->rxhw != 5) { return ((rssi * priv->cur_rssi->mul) / 64 + priv->cur_rssi->add) / 4; } else { /* * TODO: find the correct formula */ return rssi / 2 - 110; } } /* * Even if the firmware is capable of dealing with incoming traffic, * while dozing, we have to prepared in case mac80211 uses PS-POLL * to retrieve outstanding frames from our AP. * (see comment in net/mac80211/mlme.c @ line 1993) */ static void p54_pspoll_workaround(struct p54_common *priv, struct sk_buff *skb) { struct ieee80211_hdr *hdr = (void *) skb->data; struct ieee80211_tim_ie *tim_ie; u8 *tim; u8 tim_len; bool new_psm; /* only beacons have a TIM IE */ if (!ieee80211_is_beacon(hdr->frame_control)) return; if (!priv->aid) return; /* only consider beacons from the associated BSSID */ if (!ether_addr_equal_64bits(hdr->addr3, priv->bssid)) return; tim = p54_find_ie(skb, WLAN_EID_TIM); if (!tim) return; tim_len = tim[1]; tim_ie = (struct ieee80211_tim_ie *) &tim[2]; new_psm = ieee80211_check_tim(tim_ie, tim_len, priv->aid); if (new_psm != priv->powersave_override) { priv->powersave_override = new_psm; p54_set_ps(priv); } } static int p54_rx_data(struct p54_common *priv, struct sk_buff *skb) { struct p54_rx_data *hdr = (struct p54_rx_data *) skb->data; struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); u16 freq = le16_to_cpu(hdr->freq); size_t header_len = sizeof(*hdr); u32 tsf32; __le16 fc; u8 rate = hdr->rate & 0xf; /* * If the device is in a unspecified state we have to * ignore all data frames. Else we could end up with a * nasty crash. */ if (unlikely(priv->mode == NL80211_IFTYPE_UNSPECIFIED)) return 0; if (!(hdr->flags & cpu_to_le16(P54_HDR_FLAG_DATA_IN_FCS_GOOD))) return 0; if (hdr->decrypt_status == P54_DECRYPT_OK) rx_status->flag |= RX_FLAG_DECRYPTED; if ((hdr->decrypt_status == P54_DECRYPT_FAIL_MICHAEL) || (hdr->decrypt_status == P54_DECRYPT_FAIL_TKIP)) rx_status->flag |= RX_FLAG_MMIC_ERROR; rx_status->signal = p54_rssi_to_dbm(priv, hdr->rssi); if (hdr->rate & 0x10) rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE; if (priv->hw->conf.chandef.chan->band == NL80211_BAND_5GHZ) rx_status->rate_idx = (rate < 4) ? 0 : rate - 4; else rx_status->rate_idx = rate; rx_status->freq = freq; rx_status->band = priv->hw->conf.chandef.chan->band; rx_status->antenna = hdr->antenna; tsf32 = le32_to_cpu(hdr->tsf32); if (tsf32 < priv->tsf_low32) priv->tsf_high32++; rx_status->mactime = ((u64)priv->tsf_high32) << 32 | tsf32; priv->tsf_low32 = tsf32; /* LMAC API Page 10/29 - s_lm_data_in - clock * "usec accurate timestamp of hardware clock * at end of frame (before OFDM SIFS EOF padding" */ rx_status->flag |= RX_FLAG_MACTIME_END; if (hdr->flags & cpu_to_le16(P54_HDR_FLAG_DATA_ALIGN)) header_len += hdr->align[0]; skb_pull(skb, header_len); skb_trim(skb, le16_to_cpu(hdr->len)); fc = ((struct ieee80211_hdr *)skb->data)->frame_control; if (ieee80211_is_probe_resp(fc) || ieee80211_is_beacon(fc)) rx_status->boottime_ns = ktime_get_boottime_ns(); if (unlikely(priv->hw->conf.flags & IEEE80211_CONF_PS)) p54_pspoll_workaround(priv, skb); ieee80211_rx_irqsafe(priv->hw, skb); ieee80211_queue_delayed_work(priv->hw, &priv->work, msecs_to_jiffies(P54_STATISTICS_UPDATE)); return -1; } static void p54_rx_frame_sent(struct p54_common *priv, struct sk_buff *skb) { struct p54_hdr *hdr = (struct p54_hdr *) skb->data; struct p54_frame_sent *payload = (struct p54_frame_sent *) hdr->data; struct ieee80211_tx_info *info; struct p54_hdr *entry_hdr; struct p54_tx_data *entry_data; struct sk_buff *entry; unsigned int pad = 0, frame_len; int count, idx; entry = p54_find_and_unlink_skb(priv, hdr->req_id); if (unlikely(!entry)) return ; frame_len = entry->len; info = IEEE80211_SKB_CB(entry); entry_hdr = (struct p54_hdr *) entry->data; entry_data = (struct p54_tx_data *) entry_hdr->data; priv->stats.dot11ACKFailureCount += payload->tries - 1; /* * Frames in P54_QUEUE_FWSCAN and P54_QUEUE_BEACON are * generated by the driver. Therefore tx_status is bogus * and we don't want to confuse the mac80211 stack. */ if (unlikely(entry_data->hw_queue < P54_QUEUE_FWSCAN)) { dev_kfree_skb_any(entry); return ; } /* * Clear manually, ieee80211_tx_info_clear_status would * clear the counts too and we need them. */ memset_after(&info->status, 0, rates); if (entry_hdr->flags & cpu_to_le16(P54_HDR_FLAG_DATA_ALIGN)) pad = entry_data->align[0]; /* walk through the rates array and adjust the counts */ count = payload->tries; for (idx = 0; idx < 4; idx++) { if (count >= info->status.rates[idx].count) { count -= info->status.rates[idx].count; } else if (count > 0) { info->status.rates[idx].count = count; count = 0; } else { info->status.rates[idx].idx = -1; info->status.rates[idx].count = 0; } } if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) && !(payload->status & P54_TX_FAILED)) info->flags |= IEEE80211_TX_STAT_ACK; if (payload->status & P54_TX_PSM_CANCELLED) info->flags |= IEEE80211_TX_STAT_TX_FILTERED; info->status.ack_signal = p54_rssi_to_dbm(priv, (int)payload->ack_rssi); /* Undo all changes to the frame. */ switch (entry_data->key_type) { case P54_CRYPTO_TKIPMICHAEL: { u8 *iv = (u8 *)(entry_data->align + pad + entry_data->crypt_offset); /* Restore the original TKIP IV. */ iv[2] = iv[0]; iv[0] = iv[1]; iv[1] = (iv[0] | 0x20) & 0x7f; /* WEPSeed - 8.3.2.2 */ frame_len -= 12; /* remove TKIP_MMIC + TKIP_ICV */ break; } case P54_CRYPTO_AESCCMP: frame_len -= 8; /* remove CCMP_MIC */ break; case P54_CRYPTO_WEP: frame_len -= 4; /* remove WEP_ICV */ break; } skb_trim(entry, frame_len); skb_pull(entry, sizeof(*hdr) + pad + sizeof(*entry_data)); ieee80211_tx_status_irqsafe(priv->hw, entry); } static void p54_rx_eeprom_readback(struct p54_common *priv, struct sk_buff *skb) { struct p54_hdr *hdr = (struct p54_hdr *) skb->data; struct p54_eeprom_lm86 *eeprom = (struct p54_eeprom_lm86 *) hdr->data; struct sk_buff *tmp; if (!priv->eeprom) return ; if (priv->fw_var >= 0x509) { memcpy(priv->eeprom, eeprom->v2.data, le16_to_cpu(eeprom->v2.len)); } else { memcpy(priv->eeprom, eeprom->v1.data, le16_to_cpu(eeprom->v1.len)); } priv->eeprom = NULL; tmp = p54_find_and_unlink_skb(priv, hdr->req_id); dev_kfree_skb_any(tmp); complete(&priv->eeprom_comp); } static void p54_rx_stats(struct p54_common *priv, struct sk_buff *skb) { struct p54_hdr *hdr = (struct p54_hdr *) skb->data; struct p54_statistics *stats = (struct p54_statistics *) hdr->data; struct sk_buff *tmp; struct ieee80211_channel *chan; unsigned int i, rssi, tx, cca, dtime, dtotal, dcca, dtx, drssi, unit; u32 tsf32; if (unlikely(priv->mode == NL80211_IFTYPE_UNSPECIFIED)) return ; tsf32 = le32_to_cpu(stats->tsf32); if (tsf32 < priv->tsf_low32) priv->tsf_high32++; priv->tsf_low32 = tsf32; priv->stats.dot11RTSFailureCount = le32_to_cpu(stats->rts_fail); priv->stats.dot11RTSSuccessCount = le32_to_cpu(stats->rts_success); priv->stats.dot11FCSErrorCount = le32_to_cpu(stats->rx_bad_fcs); priv->noise = p54_rssi_to_dbm(priv, le32_to_cpu(stats->noise)); /* * STSW450X LMAC API page 26 - 3.8 Statistics * "The exact measurement period can be derived from the * timestamp member". */ dtime = tsf32 - priv->survey_raw.timestamp; /* * STSW450X LMAC API page 26 - 3.8.1 Noise histogram * The LMAC samples RSSI, CCA and transmit state at regular * periods (typically 8 times per 1k [as in 1024] usec). */ cca = le32_to_cpu(stats->sample_cca); tx = le32_to_cpu(stats->sample_tx); rssi = 0; for (i = 0; i < ARRAY_SIZE(stats->sample_noise); i++) rssi += le32_to_cpu(stats->sample_noise[i]); dcca = cca - priv->survey_raw.cached_cca; drssi = rssi - priv->survey_raw.cached_rssi; dtx = tx - priv->survey_raw.cached_tx; dtotal = dcca + drssi + dtx; /* * update statistics when more than a second is over since the * last call, or when a update is badly needed. */ if (dtotal && (priv->update_stats || dtime >= USEC_PER_SEC) && dtime >= dtotal) { priv->survey_raw.timestamp = tsf32; priv->update_stats = false; unit = dtime / dtotal; if (dcca) { priv->survey_raw.cca += dcca * unit; priv->survey_raw.cached_cca = cca; } if (dtx) { priv->survey_raw.tx += dtx * unit; priv->survey_raw.cached_tx = tx; } if (drssi) { priv->survey_raw.rssi += drssi * unit; priv->survey_raw.cached_rssi = rssi; } /* 1024 usec / 8 times = 128 usec / time */ if (!(priv->phy_ps || priv->phy_idle)) priv->survey_raw.active += dtotal * unit; else priv->survey_raw.active += (dcca + dtx) * unit; } chan = priv->curchan; if (chan) { struct survey_info *survey = &priv->survey[chan->hw_value]; survey->noise = clamp(priv->noise, -128, 127); survey->time = priv->survey_raw.active; survey->time_tx = priv->survey_raw.tx; survey->time_busy = priv->survey_raw.tx + priv->survey_raw.cca; do_div(survey->time, 1024); do_div(survey->time_tx, 1024); do_div(survey->time_busy, 1024); } tmp = p54_find_and_unlink_skb(priv, hdr->req_id); dev_kfree_skb_any(tmp); complete(&priv->stat_comp); } static void p54_rx_trap(struct p54_common *priv, struct sk_buff *skb) { struct p54_hdr *hdr = (struct p54_hdr *) skb->data; struct p54_trap *trap = (struct p54_trap *) hdr->data; u16 event = le16_to_cpu(trap->event); u16 freq = le16_to_cpu(trap->frequency); switch (event) { case P54_TRAP_BEACON_TX: break; case P54_TRAP_RADAR: wiphy_info(priv->hw->wiphy, "radar (freq:%d MHz)\n", freq); break; case P54_TRAP_NO_BEACON: if (priv->vif) ieee80211_beacon_loss(priv->vif); break; case P54_TRAP_SCAN: break; case P54_TRAP_TBTT: break; case P54_TRAP_TIMER: break; case P54_TRAP_FAA_RADIO_OFF: wiphy_rfkill_set_hw_state(priv->hw->wiphy, true); break; case P54_TRAP_FAA_RADIO_ON: wiphy_rfkill_set_hw_state(priv->hw->wiphy, false); break; default: wiphy_info(priv->hw->wiphy, "received event:%x freq:%d\n", event, freq); break; } } static int p54_rx_control(struct p54_common *priv, struct sk_buff *skb) { struct p54_hdr *hdr = (struct p54_hdr *) skb->data; switch (le16_to_cpu(hdr->type)) { case P54_CONTROL_TYPE_TXDONE: p54_rx_frame_sent(priv, skb); break; case P54_CONTROL_TYPE_TRAP: p54_rx_trap(priv, skb); break; case P54_CONTROL_TYPE_BBP: break; case P54_CONTROL_TYPE_STAT_READBACK: p54_rx_stats(priv, skb); break; case P54_CONTROL_TYPE_EEPROM_READBACK: p54_rx_eeprom_readback(priv, skb); break; default: wiphy_debug(priv->hw->wiphy, "not handling 0x%02x type control frame\n", le16_to_cpu(hdr->type)); break; } return 0; } /* returns zero if skb can be reused */ int p54_rx(struct ieee80211_hw *dev, struct sk_buff *skb) { struct p54_common *priv = dev->priv; u16 type = le16_to_cpu(*((__le16 *)skb->data)); if (type & P54_HDR_FLAG_CONTROL) return p54_rx_control(priv, skb); else return p54_rx_data(priv, skb); } EXPORT_SYMBOL_GPL(p54_rx); static void p54_tx_80211_header(struct p54_common *priv, struct sk_buff *skb, struct ieee80211_tx_info *info, struct ieee80211_sta *sta, u8 *queue, u32 *extra_len, u16 *flags, u16 *aid, bool *burst_possible) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; if (ieee80211_is_data_qos(hdr->frame_control)) *burst_possible = true; else *burst_possible = false; if (!(info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)) *flags |= P54_HDR_FLAG_DATA_OUT_SEQNR; if (info->flags & IEEE80211_TX_CTL_NO_PS_BUFFER) *flags |= P54_HDR_FLAG_DATA_OUT_NOCANCEL; if (info->flags & IEEE80211_TX_CTL_CLEAR_PS_FILT) *flags |= P54_HDR_FLAG_DATA_OUT_NOCANCEL; *queue = skb_get_queue_mapping(skb) + P54_QUEUE_DATA; switch (priv->mode) { case NL80211_IFTYPE_MONITOR: /* * We have to set P54_HDR_FLAG_DATA_OUT_PROMISC for * every frame in promiscuous/monitor mode. * see STSW45x0C LMAC API - page 12. */ *aid = 0; *flags |= P54_HDR_FLAG_DATA_OUT_PROMISC; break; case NL80211_IFTYPE_STATION: *aid = 1; break; case NL80211_IFTYPE_AP: case NL80211_IFTYPE_ADHOC: case NL80211_IFTYPE_MESH_POINT: if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) { *aid = 0; *queue = P54_QUEUE_CAB; return; } if (unlikely(ieee80211_is_mgmt(hdr->frame_control))) { if (ieee80211_is_probe_resp(hdr->frame_control)) { *aid = 0; *flags |= P54_HDR_FLAG_DATA_OUT_TIMESTAMP | P54_HDR_FLAG_DATA_OUT_NOCANCEL; return; } else if (ieee80211_is_beacon(hdr->frame_control)) { *aid = 0; if (info->flags & IEEE80211_TX_CTL_INJECTED) { /* * Injecting beacons on top of a AP is * not a good idea... nevertheless, * it should be doable. */ return; } *flags |= P54_HDR_FLAG_DATA_OUT_TIMESTAMP; *queue = P54_QUEUE_BEACON; *extra_len = IEEE80211_MAX_TIM_LEN; return; } } if (sta) *aid = sta->aid; break; } } static u8 p54_convert_algo(u32 cipher) { switch (cipher) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: return P54_CRYPTO_WEP; case WLAN_CIPHER_SUITE_TKIP: return P54_CRYPTO_TKIPMICHAEL; case WLAN_CIPHER_SUITE_CCMP: return P54_CRYPTO_AESCCMP; default: return 0; } } void p54_tx_80211(struct ieee80211_hw *dev, struct ieee80211_tx_control *control, struct sk_buff *skb) { struct p54_common *priv = dev->priv; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct p54_tx_info *p54info; struct p54_hdr *hdr; struct p54_tx_data *txhdr; unsigned int padding, len, extra_len = 0; int i, j, ridx; u16 hdr_flags = 0, aid = 0; u8 rate, queue = 0, crypt_offset = 0; u8 cts_rate = 0x20; u8 rc_flags; u8 calculated_tries[4]; u8 nrates = 0, nremaining = 8; bool burst_allowed = false; p54_tx_80211_header(priv, skb, info, control->sta, &queue, &extra_len, &hdr_flags, &aid, &burst_allowed); if (p54_tx_qos_accounting_alloc(priv, skb, queue)) { ieee80211_free_txskb(dev, skb); return; } padding = (unsigned long)(skb->data - (sizeof(*hdr) + sizeof(*txhdr))) & 3; len = skb->len; if (info->control.hw_key) { crypt_offset = ieee80211_get_hdrlen_from_skb(skb); if (info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) { u8 *iv = (u8 *)(skb->data + crypt_offset); /* * The firmware excepts that the IV has to have * this special format */ iv[1] = iv[0]; iv[0] = iv[2]; iv[2] = 0; } } txhdr = skb_push(skb, sizeof(*txhdr) + padding); hdr = skb_push(skb, sizeof(*hdr)); if (padding) hdr_flags |= P54_HDR_FLAG_DATA_ALIGN; hdr->type = cpu_to_le16(aid); hdr->rts_tries = info->control.rates[0].count; /* * we register the rates in perfect order, and * RTS/CTS won't happen on 5 GHz */ cts_rate = info->control.rts_cts_rate_idx; memset(&txhdr->rateset, 0, sizeof(txhdr->rateset)); /* see how many rates got used */ for (i = 0; i < dev->max_rates; i++) { if (info->control.rates[i].idx < 0) break; nrates++; } /* limit tries to 8/nrates per rate */ for (i = 0; i < nrates; i++) { /* * The magic expression here is equivalent to 8/nrates for * all values that matter, but avoids division and jumps. * Note that nrates can only take the values 1 through 4. */ calculated_tries[i] = min_t(int, ((15 >> nrates) | 1) + 1, info->control.rates[i].count); nremaining -= calculated_tries[i]; } /* if there are tries left, distribute from back to front */ for (i = nrates - 1; nremaining > 0 && i >= 0; i--) { int tmp = info->control.rates[i].count - calculated_tries[i]; if (tmp <= 0) continue; /* RC requested more tries at this rate */ tmp = min_t(int, tmp, nremaining); calculated_tries[i] += tmp; nremaining -= tmp; } ridx = 0; for (i = 0; i < nrates && ridx < 8; i++) { /* we register the rates in perfect order */ rate = info->control.rates[i].idx; if (info->band == NL80211_BAND_5GHZ) rate += 4; /* store the count we actually calculated for TX status */ info->control.rates[i].count = calculated_tries[i]; rc_flags = info->control.rates[i].flags; if (rc_flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) { rate |= 0x10; cts_rate |= 0x10; } if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS) { burst_allowed = false; rate |= 0x40; } else if (rc_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) { rate |= 0x20; burst_allowed = false; } for (j = 0; j < calculated_tries[i] && ridx < 8; j++) { txhdr->rateset[ridx] = rate; ridx++; } } if (burst_allowed) hdr_flags |= P54_HDR_FLAG_DATA_OUT_BURST; /* TODO: enable bursting */ hdr->flags = cpu_to_le16(hdr_flags); hdr->tries = ridx; txhdr->rts_rate_idx = 0; if (info->control.hw_key) { txhdr->key_type = p54_convert_algo(info->control.hw_key->cipher); txhdr->key_len = min((u8)16, info->control.hw_key->keylen); memcpy(txhdr->key, info->control.hw_key->key, txhdr->key_len); if (info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) { /* reserve space for the MIC key */ len += 8; skb_put_data(skb, &(info->control.hw_key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY]), 8); } /* reserve some space for ICV */ len += info->control.hw_key->icv_len; skb_put_zero(skb, info->control.hw_key->icv_len); } else { txhdr->key_type = 0; txhdr->key_len = 0; } txhdr->crypt_offset = crypt_offset; txhdr->hw_queue = queue; txhdr->backlog = priv->tx_stats[queue].len - 1; memset(txhdr->durations, 0, sizeof(txhdr->durations)); txhdr->tx_antenna = 2 & priv->tx_diversity_mask; if (priv->rxhw == 5) { txhdr->longbow.cts_rate = cts_rate; txhdr->longbow.output_power = cpu_to_le16(priv->output_power); } else { txhdr->normal.output_power = priv->output_power; txhdr->normal.cts_rate = cts_rate; } if (padding) txhdr->align[0] = padding; hdr->len = cpu_to_le16(len); /* modifies skb->cb and with it info, so must be last! */ p54info = (void *) info->rate_driver_data; p54info->extra_len = extra_len; p54_tx(priv, skb); }
linux-master
drivers/net/wireless/intersil/p54/txrx.c
// SPDX-License-Identifier: GPL-2.0-only /* * Device handling thread implementation for mac80211 ST-Ericsson CW1200 drivers * * Copyright (c) 2010, ST-Ericsson * Author: Dmitry Tarnyagin <[email protected]> * * Based on: * ST-Ericsson UMAC CW1200 driver, which is * Copyright (c) 2010, ST-Ericsson * Author: Ajitpal Singh <[email protected]> */ #include <linux/module.h> #include <net/mac80211.h> #include <linux/kthread.h> #include <linux/timer.h> #include "cw1200.h" #include "bh.h" #include "hwio.h" #include "wsm.h" #include "hwbus.h" #include "debug.h" #include "fwio.h" static int cw1200_bh(void *arg); #define DOWNLOAD_BLOCK_SIZE_WR (0x1000 - 4) /* an SPI message cannot be bigger than (2"12-1)*2 bytes * "*2" to cvt to bytes */ #define MAX_SZ_RD_WR_BUFFERS (DOWNLOAD_BLOCK_SIZE_WR*2) #define PIGGYBACK_CTRL_REG (2) #define EFFECTIVE_BUF_SIZE (MAX_SZ_RD_WR_BUFFERS - PIGGYBACK_CTRL_REG) /* Suspend state privates */ enum cw1200_bh_pm_state { CW1200_BH_RESUMED = 0, CW1200_BH_SUSPEND, CW1200_BH_SUSPENDED, CW1200_BH_RESUME, }; static void cw1200_bh_work(struct work_struct *work) { struct cw1200_common *priv = container_of(work, struct cw1200_common, bh_work); cw1200_bh(priv); } int cw1200_register_bh(struct cw1200_common *priv) { int err = 0; /* Realtime workqueue */ priv->bh_workqueue = alloc_workqueue("cw1200_bh", WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_CPU_INTENSIVE, 1); if (!priv->bh_workqueue) return -ENOMEM; INIT_WORK(&priv->bh_work, cw1200_bh_work); pr_debug("[BH] register.\n"); atomic_set(&priv->bh_rx, 0); atomic_set(&priv->bh_tx, 0); atomic_set(&priv->bh_term, 0); atomic_set(&priv->bh_suspend, CW1200_BH_RESUMED); priv->bh_error = 0; priv->hw_bufs_used = 0; priv->buf_id_tx = 0; priv->buf_id_rx = 0; init_waitqueue_head(&priv->bh_wq); init_waitqueue_head(&priv->bh_evt_wq); err = !queue_work(priv->bh_workqueue, &priv->bh_work); WARN_ON(err); return err; } void cw1200_unregister_bh(struct cw1200_common *priv) { atomic_inc(&priv->bh_term); wake_up(&priv->bh_wq); destroy_workqueue(priv->bh_workqueue); priv->bh_workqueue = NULL; pr_debug("[BH] unregistered.\n"); } void cw1200_irq_handler(struct cw1200_common *priv) { pr_debug("[BH] irq.\n"); /* Disable Interrupts! */ /* NOTE: hwbus_ops->lock already held */ __cw1200_irq_enable(priv, 0); if (/* WARN_ON */(priv->bh_error)) return; if (atomic_inc_return(&priv->bh_rx) == 1) wake_up(&priv->bh_wq); } EXPORT_SYMBOL_GPL(cw1200_irq_handler); void cw1200_bh_wakeup(struct cw1200_common *priv) { pr_debug("[BH] wakeup.\n"); if (priv->bh_error) { pr_err("[BH] wakeup failed (BH error)\n"); return; } if (atomic_inc_return(&priv->bh_tx) == 1) wake_up(&priv->bh_wq); } int cw1200_bh_suspend(struct cw1200_common *priv) { pr_debug("[BH] suspend.\n"); if (priv->bh_error) { wiphy_warn(priv->hw->wiphy, "BH error -- can't suspend\n"); return -EINVAL; } atomic_set(&priv->bh_suspend, CW1200_BH_SUSPEND); wake_up(&priv->bh_wq); return wait_event_timeout(priv->bh_evt_wq, priv->bh_error || (CW1200_BH_SUSPENDED == atomic_read(&priv->bh_suspend)), 1 * HZ) ? 0 : -ETIMEDOUT; } int cw1200_bh_resume(struct cw1200_common *priv) { pr_debug("[BH] resume.\n"); if (priv->bh_error) { wiphy_warn(priv->hw->wiphy, "BH error -- can't resume\n"); return -EINVAL; } atomic_set(&priv->bh_suspend, CW1200_BH_RESUME); wake_up(&priv->bh_wq); return wait_event_timeout(priv->bh_evt_wq, priv->bh_error || (CW1200_BH_RESUMED == atomic_read(&priv->bh_suspend)), 1 * HZ) ? 0 : -ETIMEDOUT; } static inline void wsm_alloc_tx_buffer(struct cw1200_common *priv) { ++priv->hw_bufs_used; } int wsm_release_tx_buffer(struct cw1200_common *priv, int count) { int ret = 0; int hw_bufs_used = priv->hw_bufs_used; priv->hw_bufs_used -= count; if (WARN_ON(priv->hw_bufs_used < 0)) ret = -1; else if (hw_bufs_used >= priv->wsm_caps.input_buffers) ret = 1; if (!priv->hw_bufs_used) wake_up(&priv->bh_evt_wq); return ret; } static int cw1200_bh_read_ctrl_reg(struct cw1200_common *priv, u16 *ctrl_reg) { int ret; ret = cw1200_reg_read_16(priv, ST90TDS_CONTROL_REG_ID, ctrl_reg); if (ret) { ret = cw1200_reg_read_16(priv, ST90TDS_CONTROL_REG_ID, ctrl_reg); if (ret) pr_err("[BH] Failed to read control register.\n"); } return ret; } static int cw1200_device_wakeup(struct cw1200_common *priv) { u16 ctrl_reg; int ret; pr_debug("[BH] Device wakeup.\n"); /* First, set the dpll register */ ret = cw1200_reg_write_32(priv, ST90TDS_TSET_GEN_R_W_REG_ID, cw1200_dpll_from_clk(priv->hw_refclk)); if (WARN_ON(ret)) return ret; /* To force the device to be always-on, the host sets WLAN_UP to 1 */ ret = cw1200_reg_write_16(priv, ST90TDS_CONTROL_REG_ID, ST90TDS_CONT_WUP_BIT); if (WARN_ON(ret)) return ret; ret = cw1200_bh_read_ctrl_reg(priv, &ctrl_reg); if (WARN_ON(ret)) return ret; /* If the device returns WLAN_RDY as 1, the device is active and will * remain active. */ if (ctrl_reg & ST90TDS_CONT_RDY_BIT) { pr_debug("[BH] Device awake.\n"); return 1; } return 0; } /* Must be called from BH thraed. */ void cw1200_enable_powersave(struct cw1200_common *priv, bool enable) { pr_debug("[BH] Powerave is %s.\n", enable ? "enabled" : "disabled"); priv->powersave_enabled = enable; } static int cw1200_bh_rx_helper(struct cw1200_common *priv, uint16_t *ctrl_reg, int *tx) { size_t read_len = 0; struct sk_buff *skb_rx = NULL; struct wsm_hdr *wsm; size_t wsm_len; u16 wsm_id; u8 wsm_seq; int rx_resync = 1; size_t alloc_len; u8 *data; read_len = (*ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK) * 2; if (!read_len) return 0; /* No more work */ if (WARN_ON((read_len < sizeof(struct wsm_hdr)) || (read_len > EFFECTIVE_BUF_SIZE))) { pr_debug("Invalid read len: %zu (%04x)", read_len, *ctrl_reg); goto err; } /* Add SIZE of PIGGYBACK reg (CONTROL Reg) * to the NEXT Message length + 2 Bytes for SKB */ read_len = read_len + 2; alloc_len = priv->hwbus_ops->align_size( priv->hwbus_priv, read_len); /* Check if not exceeding CW1200 capabilities */ if (WARN_ON_ONCE(alloc_len > EFFECTIVE_BUF_SIZE)) { pr_debug("Read aligned len: %zu\n", alloc_len); } skb_rx = dev_alloc_skb(alloc_len); if (WARN_ON(!skb_rx)) goto err; skb_trim(skb_rx, 0); skb_put(skb_rx, read_len); data = skb_rx->data; if (WARN_ON(!data)) goto err; if (WARN_ON(cw1200_data_read(priv, data, alloc_len))) { pr_err("rx blew up, len %zu\n", alloc_len); goto err; } /* Piggyback */ *ctrl_reg = __le16_to_cpu( ((__le16 *)data)[alloc_len / 2 - 1]); wsm = (struct wsm_hdr *)data; wsm_len = __le16_to_cpu(wsm->len); if (WARN_ON(wsm_len > read_len)) goto err; if (priv->wsm_enable_wsm_dumps) print_hex_dump_bytes("<-- ", DUMP_PREFIX_NONE, data, wsm_len); wsm_id = __le16_to_cpu(wsm->id) & 0xFFF; wsm_seq = (__le16_to_cpu(wsm->id) >> 13) & 7; skb_trim(skb_rx, wsm_len); if (wsm_id == 0x0800) { wsm_handle_exception(priv, &data[sizeof(*wsm)], wsm_len - sizeof(*wsm)); goto err; } else if (!rx_resync) { if (WARN_ON(wsm_seq != priv->wsm_rx_seq)) goto err; } priv->wsm_rx_seq = (wsm_seq + 1) & 7; rx_resync = 0; if (wsm_id & 0x0400) { int rc = wsm_release_tx_buffer(priv, 1); if (WARN_ON(rc < 0)) return rc; else if (rc > 0) *tx = 1; } /* cw1200_wsm_rx takes care on SKB livetime */ if (WARN_ON(wsm_handle_rx(priv, wsm_id, wsm, &skb_rx))) goto err; dev_kfree_skb(skb_rx); return 0; err: dev_kfree_skb(skb_rx); return -1; } static int cw1200_bh_tx_helper(struct cw1200_common *priv, int *pending_tx, int *tx_burst) { size_t tx_len; u8 *data; int ret; struct wsm_hdr *wsm; if (priv->device_can_sleep) { ret = cw1200_device_wakeup(priv); if (WARN_ON(ret < 0)) { /* Error in wakeup */ *pending_tx = 1; return 0; } else if (ret) { /* Woke up */ priv->device_can_sleep = false; } else { /* Did not awake */ *pending_tx = 1; return 0; } } wsm_alloc_tx_buffer(priv); ret = wsm_get_tx(priv, &data, &tx_len, tx_burst); if (ret <= 0) { wsm_release_tx_buffer(priv, 1); if (WARN_ON(ret < 0)) return ret; /* Error */ return 0; /* No work */ } wsm = (struct wsm_hdr *)data; BUG_ON(tx_len < sizeof(*wsm)); BUG_ON(__le16_to_cpu(wsm->len) != tx_len); atomic_inc(&priv->bh_tx); tx_len = priv->hwbus_ops->align_size( priv->hwbus_priv, tx_len); /* Check if not exceeding CW1200 capabilities */ if (WARN_ON_ONCE(tx_len > EFFECTIVE_BUF_SIZE)) pr_debug("Write aligned len: %zu\n", tx_len); wsm->id &= __cpu_to_le16(0xffff ^ WSM_TX_SEQ(WSM_TX_SEQ_MAX)); wsm->id |= __cpu_to_le16(WSM_TX_SEQ(priv->wsm_tx_seq)); if (WARN_ON(cw1200_data_write(priv, data, tx_len))) { pr_err("tx blew up, len %zu\n", tx_len); wsm_release_tx_buffer(priv, 1); return -1; /* Error */ } if (priv->wsm_enable_wsm_dumps) print_hex_dump_bytes("--> ", DUMP_PREFIX_NONE, data, __le16_to_cpu(wsm->len)); wsm_txed(priv, data); priv->wsm_tx_seq = (priv->wsm_tx_seq + 1) & WSM_TX_SEQ_MAX; if (*tx_burst > 1) { cw1200_debug_tx_burst(priv); return 1; /* Work remains */ } return 0; } static int cw1200_bh(void *arg) { struct cw1200_common *priv = arg; int rx, tx, term, suspend; u16 ctrl_reg = 0; int tx_allowed; int pending_tx = 0; int tx_burst; long status; u32 dummy; int ret; for (;;) { if (!priv->hw_bufs_used && priv->powersave_enabled && !priv->device_can_sleep && !atomic_read(&priv->recent_scan)) { status = 1 * HZ; pr_debug("[BH] Device wakedown. No data.\n"); cw1200_reg_write_16(priv, ST90TDS_CONTROL_REG_ID, 0); priv->device_can_sleep = true; } else if (priv->hw_bufs_used) { /* Interrupt loss detection */ status = 1 * HZ; } else { status = MAX_SCHEDULE_TIMEOUT; } /* Dummy Read for SDIO retry mechanism*/ if ((priv->hw_type != -1) && (atomic_read(&priv->bh_rx) == 0) && (atomic_read(&priv->bh_tx) == 0)) cw1200_reg_read(priv, ST90TDS_CONFIG_REG_ID, &dummy, sizeof(dummy)); pr_debug("[BH] waiting ...\n"); status = wait_event_interruptible_timeout(priv->bh_wq, ({ rx = atomic_xchg(&priv->bh_rx, 0); tx = atomic_xchg(&priv->bh_tx, 0); term = atomic_xchg(&priv->bh_term, 0); suspend = pending_tx ? 0 : atomic_read(&priv->bh_suspend); (rx || tx || term || suspend || priv->bh_error); }), status); pr_debug("[BH] - rx: %d, tx: %d, term: %d, bh_err: %d, suspend: %d, status: %ld\n", rx, tx, term, suspend, priv->bh_error, status); /* Did an error occur? */ if ((status < 0 && status != -ERESTARTSYS) || term || priv->bh_error) { break; } if (!status) { /* wait_event timed out */ unsigned long timestamp = jiffies; long timeout; int pending = 0; int i; /* Check to see if we have any outstanding frames */ if (priv->hw_bufs_used && (!rx || !tx)) { wiphy_warn(priv->hw->wiphy, "Missed interrupt? (%d frames outstanding)\n", priv->hw_bufs_used); rx = 1; /* Get a timestamp of "oldest" frame */ for (i = 0; i < 4; ++i) pending += cw1200_queue_get_xmit_timestamp( &priv->tx_queue[i], &timestamp, priv->pending_frame_id); /* Check if frame transmission is timed out. * Add an extra second with respect to possible * interrupt loss. */ timeout = timestamp + WSM_CMD_LAST_CHANCE_TIMEOUT + 1 * HZ - jiffies; /* And terminate BH thread if the frame is "stuck" */ if (pending && timeout < 0) { wiphy_warn(priv->hw->wiphy, "Timeout waiting for TX confirm (%d/%d pending, %ld vs %lu).\n", priv->hw_bufs_used, pending, timestamp, jiffies); break; } } else if (!priv->device_can_sleep && !atomic_read(&priv->recent_scan)) { pr_debug("[BH] Device wakedown. Timeout.\n"); cw1200_reg_write_16(priv, ST90TDS_CONTROL_REG_ID, 0); priv->device_can_sleep = true; } goto done; } else if (suspend) { pr_debug("[BH] Device suspend.\n"); if (priv->powersave_enabled) { pr_debug("[BH] Device wakedown. Suspend.\n"); cw1200_reg_write_16(priv, ST90TDS_CONTROL_REG_ID, 0); priv->device_can_sleep = true; } atomic_set(&priv->bh_suspend, CW1200_BH_SUSPENDED); wake_up(&priv->bh_evt_wq); status = wait_event_interruptible(priv->bh_wq, CW1200_BH_RESUME == atomic_read(&priv->bh_suspend)); if (status < 0) { wiphy_err(priv->hw->wiphy, "Failed to wait for resume: %ld.\n", status); break; } pr_debug("[BH] Device resume.\n"); atomic_set(&priv->bh_suspend, CW1200_BH_RESUMED); wake_up(&priv->bh_evt_wq); atomic_inc(&priv->bh_rx); goto done; } rx: tx += pending_tx; pending_tx = 0; if (cw1200_bh_read_ctrl_reg(priv, &ctrl_reg)) break; /* Don't bother trying to rx unless we have data to read */ if (ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK) { ret = cw1200_bh_rx_helper(priv, &ctrl_reg, &tx); if (ret < 0) break; /* Double up here if there's more data.. */ if (ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK) { ret = cw1200_bh_rx_helper(priv, &ctrl_reg, &tx); if (ret < 0) break; } } tx: if (tx) { tx = 0; BUG_ON(priv->hw_bufs_used > priv->wsm_caps.input_buffers); tx_burst = priv->wsm_caps.input_buffers - priv->hw_bufs_used; tx_allowed = tx_burst > 0; if (!tx_allowed) { /* Buffers full. Ensure we process tx * after we handle rx.. */ pending_tx = tx; goto done_rx; } ret = cw1200_bh_tx_helper(priv, &pending_tx, &tx_burst); if (ret < 0) break; if (ret > 0) /* More to transmit */ tx = ret; /* Re-read ctrl reg */ if (cw1200_bh_read_ctrl_reg(priv, &ctrl_reg)) break; } done_rx: if (priv->bh_error) break; if (ctrl_reg & ST90TDS_CONT_NEXT_LEN_MASK) goto rx; if (tx) goto tx; done: /* Re-enable device interrupts */ priv->hwbus_ops->lock(priv->hwbus_priv); __cw1200_irq_enable(priv, 1); priv->hwbus_ops->unlock(priv->hwbus_priv); } /* Explicitly disable device interrupts */ priv->hwbus_ops->lock(priv->hwbus_priv); __cw1200_irq_enable(priv, 0); priv->hwbus_ops->unlock(priv->hwbus_priv); if (!term) { pr_err("[BH] Fatal error, exiting.\n"); priv->bh_error = 1; /* TODO: schedule_work(recovery) */ } return 0; }
linux-master
drivers/net/wireless/st/cw1200/bh.c
// SPDX-License-Identifier: GPL-2.0-only /* * Firmware I/O code for mac80211 ST-Ericsson CW1200 drivers * * Copyright (c) 2010, ST-Ericsson * Author: Dmitry Tarnyagin <[email protected]> * * Based on: * ST-Ericsson UMAC CW1200 driver which is * Copyright (c) 2010, ST-Ericsson * Author: Ajitpal Singh <[email protected]> */ #include <linux/vmalloc.h> #include <linux/sched.h> #include <linux/firmware.h> #include "cw1200.h" #include "fwio.h" #include "hwio.h" #include "hwbus.h" #include "bh.h" static int cw1200_get_hw_type(u32 config_reg_val, int *major_revision) { int hw_type = -1; u32 silicon_type = (config_reg_val >> 24) & 0x7; u32 silicon_vers = (config_reg_val >> 31) & 0x1; switch (silicon_type) { case 0x00: *major_revision = 1; hw_type = HIF_9000_SILICON_VERSATILE; break; case 0x01: case 0x02: /* CW1x00 */ case 0x04: /* CW1x60 */ *major_revision = silicon_type; if (silicon_vers) hw_type = HIF_8601_VERSATILE; else hw_type = HIF_8601_SILICON; break; default: break; } return hw_type; } static int cw1200_load_firmware_cw1200(struct cw1200_common *priv) { int ret, block, num_blocks; unsigned i; u32 val32; u32 put = 0, get = 0; u8 *buf = NULL; const char *fw_path; const struct firmware *firmware = NULL; /* Macroses are local. */ #define APB_WRITE(reg, val) \ do { \ ret = cw1200_apb_write_32(priv, CW1200_APB(reg), (val)); \ if (ret < 0) \ goto exit; \ } while (0) #define APB_WRITE2(reg, val) \ do { \ ret = cw1200_apb_write_32(priv, CW1200_APB(reg), (val)); \ if (ret < 0) \ goto free_buffer; \ } while (0) #define APB_READ(reg, val) \ do { \ ret = cw1200_apb_read_32(priv, CW1200_APB(reg), &(val)); \ if (ret < 0) \ goto free_buffer; \ } while (0) #define REG_WRITE(reg, val) \ do { \ ret = cw1200_reg_write_32(priv, (reg), (val)); \ if (ret < 0) \ goto exit; \ } while (0) #define REG_READ(reg, val) \ do { \ ret = cw1200_reg_read_32(priv, (reg), &(val)); \ if (ret < 0) \ goto exit; \ } while (0) switch (priv->hw_revision) { case CW1200_HW_REV_CUT10: fw_path = FIRMWARE_CUT10; if (!priv->sdd_path) priv->sdd_path = SDD_FILE_10; break; case CW1200_HW_REV_CUT11: fw_path = FIRMWARE_CUT11; if (!priv->sdd_path) priv->sdd_path = SDD_FILE_11; break; case CW1200_HW_REV_CUT20: fw_path = FIRMWARE_CUT20; if (!priv->sdd_path) priv->sdd_path = SDD_FILE_20; break; case CW1200_HW_REV_CUT22: fw_path = FIRMWARE_CUT22; if (!priv->sdd_path) priv->sdd_path = SDD_FILE_22; break; case CW1X60_HW_REV: fw_path = FIRMWARE_CW1X60; if (!priv->sdd_path) priv->sdd_path = SDD_FILE_CW1X60; break; default: pr_err("Invalid silicon revision %d.\n", priv->hw_revision); return -EINVAL; } /* Initialize common registers */ APB_WRITE(DOWNLOAD_IMAGE_SIZE_REG, DOWNLOAD_ARE_YOU_HERE); APB_WRITE(DOWNLOAD_PUT_REG, 0); APB_WRITE(DOWNLOAD_GET_REG, 0); APB_WRITE(DOWNLOAD_STATUS_REG, DOWNLOAD_PENDING); APB_WRITE(DOWNLOAD_FLAGS_REG, 0); /* Write the NOP Instruction */ REG_WRITE(ST90TDS_SRAM_BASE_ADDR_REG_ID, 0xFFF20000); REG_WRITE(ST90TDS_AHB_DPORT_REG_ID, 0xEAFFFFFE); /* Release CPU from RESET */ REG_READ(ST90TDS_CONFIG_REG_ID, val32); val32 &= ~ST90TDS_CONFIG_CPU_RESET_BIT; REG_WRITE(ST90TDS_CONFIG_REG_ID, val32); /* Enable Clock */ val32 &= ~ST90TDS_CONFIG_CPU_CLK_DIS_BIT; REG_WRITE(ST90TDS_CONFIG_REG_ID, val32); /* Load a firmware file */ ret = request_firmware(&firmware, fw_path, priv->pdev); if (ret) { pr_err("Can't load firmware file %s.\n", fw_path); goto exit; } buf = kmalloc(DOWNLOAD_BLOCK_SIZE, GFP_KERNEL | GFP_DMA); if (!buf) { pr_err("Can't allocate firmware load buffer.\n"); ret = -ENOMEM; goto firmware_release; } /* Check if the bootloader is ready */ for (i = 0; i < 100; i += 1 + i / 2) { APB_READ(DOWNLOAD_IMAGE_SIZE_REG, val32); if (val32 == DOWNLOAD_I_AM_HERE) break; mdelay(i); } /* End of for loop */ if (val32 != DOWNLOAD_I_AM_HERE) { pr_err("Bootloader is not ready.\n"); ret = -ETIMEDOUT; goto free_buffer; } /* Calculcate number of download blocks */ num_blocks = (firmware->size - 1) / DOWNLOAD_BLOCK_SIZE + 1; /* Updating the length in Download Ctrl Area */ val32 = firmware->size; /* Explicit cast from size_t to u32 */ APB_WRITE2(DOWNLOAD_IMAGE_SIZE_REG, val32); /* Firmware downloading loop */ for (block = 0; block < num_blocks; block++) { size_t tx_size; size_t block_size; /* check the download status */ APB_READ(DOWNLOAD_STATUS_REG, val32); if (val32 != DOWNLOAD_PENDING) { pr_err("Bootloader reported error %d.\n", val32); ret = -EIO; goto free_buffer; } /* loop until put - get <= 24K */ for (i = 0; i < 100; i++) { APB_READ(DOWNLOAD_GET_REG, get); if ((put - get) <= (DOWNLOAD_FIFO_SIZE - DOWNLOAD_BLOCK_SIZE)) break; mdelay(i); } if ((put - get) > (DOWNLOAD_FIFO_SIZE - DOWNLOAD_BLOCK_SIZE)) { pr_err("Timeout waiting for FIFO.\n"); ret = -ETIMEDOUT; goto free_buffer; } /* calculate the block size */ tx_size = block_size = min_t(size_t, firmware->size - put, DOWNLOAD_BLOCK_SIZE); memcpy(buf, &firmware->data[put], block_size); if (block_size < DOWNLOAD_BLOCK_SIZE) { memset(&buf[block_size], 0, DOWNLOAD_BLOCK_SIZE - block_size); tx_size = DOWNLOAD_BLOCK_SIZE; } /* send the block to sram */ ret = cw1200_apb_write(priv, CW1200_APB(DOWNLOAD_FIFO_OFFSET + (put & (DOWNLOAD_FIFO_SIZE - 1))), buf, tx_size); if (ret < 0) { pr_err("Can't write firmware block @ %d!\n", put & (DOWNLOAD_FIFO_SIZE - 1)); goto free_buffer; } /* update the put register */ put += block_size; APB_WRITE2(DOWNLOAD_PUT_REG, put); } /* End of firmware download loop */ /* Wait for the download completion */ for (i = 0; i < 300; i += 1 + i / 2) { APB_READ(DOWNLOAD_STATUS_REG, val32); if (val32 != DOWNLOAD_PENDING) break; mdelay(i); } if (val32 != DOWNLOAD_SUCCESS) { pr_err("Wait for download completion failed: 0x%.8X\n", val32); ret = -ETIMEDOUT; goto free_buffer; } else { pr_info("Firmware download completed.\n"); ret = 0; } free_buffer: kfree(buf); firmware_release: release_firmware(firmware); exit: return ret; #undef APB_WRITE #undef APB_WRITE2 #undef APB_READ #undef REG_WRITE #undef REG_READ } static int config_reg_read(struct cw1200_common *priv, u32 *val) { switch (priv->hw_type) { case HIF_9000_SILICON_VERSATILE: { u16 val16; int ret = cw1200_reg_read_16(priv, ST90TDS_CONFIG_REG_ID, &val16); if (ret < 0) return ret; *val = val16; return 0; } case HIF_8601_VERSATILE: case HIF_8601_SILICON: default: cw1200_reg_read_32(priv, ST90TDS_CONFIG_REG_ID, val); break; } return 0; } static int config_reg_write(struct cw1200_common *priv, u32 val) { switch (priv->hw_type) { case HIF_9000_SILICON_VERSATILE: return cw1200_reg_write_16(priv, ST90TDS_CONFIG_REG_ID, (u16)val); case HIF_8601_VERSATILE: case HIF_8601_SILICON: default: return cw1200_reg_write_32(priv, ST90TDS_CONFIG_REG_ID, val); } return 0; } int cw1200_load_firmware(struct cw1200_common *priv) { int ret; int i; u32 val32; u16 val16; int major_revision = -1; /* Read CONFIG Register */ ret = cw1200_reg_read_32(priv, ST90TDS_CONFIG_REG_ID, &val32); if (ret < 0) { pr_err("Can't read config register.\n"); goto out; } if (val32 == 0 || val32 == 0xffffffff) { pr_err("Bad config register value (0x%08x)\n", val32); ret = -EIO; goto out; } ret = cw1200_get_hw_type(val32, &major_revision); if (ret < 0) { pr_err("Can't deduce hardware type.\n"); goto out; } priv->hw_type = ret; /* Set DPLL Reg value, and read back to confirm writes work */ ret = cw1200_reg_write_32(priv, ST90TDS_TSET_GEN_R_W_REG_ID, cw1200_dpll_from_clk(priv->hw_refclk)); if (ret < 0) { pr_err("Can't write DPLL register.\n"); goto out; } msleep(20); ret = cw1200_reg_read_32(priv, ST90TDS_TSET_GEN_R_W_REG_ID, &val32); if (ret < 0) { pr_err("Can't read DPLL register.\n"); goto out; } if (val32 != cw1200_dpll_from_clk(priv->hw_refclk)) { pr_err("Unable to initialise DPLL register. Wrote 0x%.8X, Read 0x%.8X.\n", cw1200_dpll_from_clk(priv->hw_refclk), val32); ret = -EIO; goto out; } /* Set wakeup bit in device */ ret = cw1200_reg_read_16(priv, ST90TDS_CONTROL_REG_ID, &val16); if (ret < 0) { pr_err("set_wakeup: can't read control register.\n"); goto out; } ret = cw1200_reg_write_16(priv, ST90TDS_CONTROL_REG_ID, val16 | ST90TDS_CONT_WUP_BIT); if (ret < 0) { pr_err("set_wakeup: can't write control register.\n"); goto out; } /* Wait for wakeup */ for (i = 0; i < 300; i += (1 + i / 2)) { ret = cw1200_reg_read_16(priv, ST90TDS_CONTROL_REG_ID, &val16); if (ret < 0) { pr_err("wait_for_wakeup: can't read control register.\n"); goto out; } if (val16 & ST90TDS_CONT_RDY_BIT) break; msleep(i); } if ((val16 & ST90TDS_CONT_RDY_BIT) == 0) { pr_err("wait_for_wakeup: device is not responding.\n"); ret = -ETIMEDOUT; goto out; } switch (major_revision) { case 1: /* CW1200 Hardware detection logic : Check for CUT1.1 */ ret = cw1200_ahb_read_32(priv, CW1200_CUT_ID_ADDR, &val32); if (ret) { pr_err("HW detection: can't read CUT ID.\n"); goto out; } switch (val32) { case CW1200_CUT_11_ID_STR: pr_info("CW1x00 Cut 1.1 silicon detected.\n"); priv->hw_revision = CW1200_HW_REV_CUT11; break; default: pr_info("CW1x00 Cut 1.0 silicon detected.\n"); priv->hw_revision = CW1200_HW_REV_CUT10; break; } /* According to ST-E, CUT<2.0 has busted BA TID0-3. Just disable it entirely... */ priv->ba_rx_tid_mask = 0; priv->ba_tx_tid_mask = 0; break; case 2: { u32 ar1, ar2, ar3; ret = cw1200_ahb_read_32(priv, CW1200_CUT2_ID_ADDR, &ar1); if (ret) { pr_err("(1) HW detection: can't read CUT ID\n"); goto out; } ret = cw1200_ahb_read_32(priv, CW1200_CUT2_ID_ADDR + 4, &ar2); if (ret) { pr_err("(2) HW detection: can't read CUT ID.\n"); goto out; } ret = cw1200_ahb_read_32(priv, CW1200_CUT2_ID_ADDR + 8, &ar3); if (ret) { pr_err("(3) HW detection: can't read CUT ID.\n"); goto out; } if (ar1 == CW1200_CUT_22_ID_STR1 && ar2 == CW1200_CUT_22_ID_STR2 && ar3 == CW1200_CUT_22_ID_STR3) { pr_info("CW1x00 Cut 2.2 silicon detected.\n"); priv->hw_revision = CW1200_HW_REV_CUT22; } else { pr_info("CW1x00 Cut 2.0 silicon detected.\n"); priv->hw_revision = CW1200_HW_REV_CUT20; } break; } case 4: pr_info("CW1x60 silicon detected.\n"); priv->hw_revision = CW1X60_HW_REV; break; default: pr_err("Unsupported silicon major revision %d.\n", major_revision); ret = -ENOTSUPP; goto out; } /* Checking for access mode */ ret = config_reg_read(priv, &val32); if (ret < 0) { pr_err("Can't read config register.\n"); goto out; } if (!(val32 & ST90TDS_CONFIG_ACCESS_MODE_BIT)) { pr_err("Device is already in QUEUE mode!\n"); ret = -EINVAL; goto out; } switch (priv->hw_type) { case HIF_8601_SILICON: if (priv->hw_revision == CW1X60_HW_REV) { pr_err("Can't handle CW1160/1260 firmware load yet.\n"); ret = -ENOTSUPP; goto out; } ret = cw1200_load_firmware_cw1200(priv); break; default: pr_err("Can't perform firmware load for hw type %d.\n", priv->hw_type); ret = -ENOTSUPP; goto out; } if (ret < 0) { pr_err("Firmware load error.\n"); goto out; } /* Enable interrupt signalling */ priv->hwbus_ops->lock(priv->hwbus_priv); ret = __cw1200_irq_enable(priv, 1); priv->hwbus_ops->unlock(priv->hwbus_priv); if (ret < 0) goto unsubscribe; /* Configure device for MESSSAGE MODE */ ret = config_reg_read(priv, &val32); if (ret < 0) { pr_err("Can't read config register.\n"); goto unsubscribe; } ret = config_reg_write(priv, val32 & ~ST90TDS_CONFIG_ACCESS_MODE_BIT); if (ret < 0) { pr_err("Can't write config register.\n"); goto unsubscribe; } /* Unless we read the CONFIG Register we are * not able to get an interrupt */ mdelay(10); config_reg_read(priv, &val32); out: return ret; unsubscribe: /* Disable interrupt signalling */ priv->hwbus_ops->lock(priv->hwbus_priv); ret = __cw1200_irq_enable(priv, 0); priv->hwbus_ops->unlock(priv->hwbus_priv); return ret; }
linux-master
drivers/net/wireless/st/cw1200/fwio.c
// SPDX-License-Identifier: GPL-2.0-only /* * Scan implementation for ST-Ericsson CW1200 mac80211 drivers * * Copyright (c) 2010, ST-Ericsson * Author: Dmitry Tarnyagin <[email protected]> */ #include <linux/sched.h> #include "cw1200.h" #include "scan.h" #include "sta.h" #include "pm.h" static void cw1200_scan_restart_delayed(struct cw1200_common *priv); static int cw1200_scan_start(struct cw1200_common *priv, struct wsm_scan *scan) { int ret, i; int tmo = 2000; switch (priv->join_status) { case CW1200_JOIN_STATUS_PRE_STA: case CW1200_JOIN_STATUS_JOINING: return -EBUSY; default: break; } wiphy_dbg(priv->hw->wiphy, "[SCAN] hw req, type %d, %d channels, flags: 0x%x.\n", scan->type, scan->num_channels, scan->flags); for (i = 0; i < scan->num_channels; ++i) tmo += scan->ch[i].max_chan_time + 10; cancel_delayed_work_sync(&priv->clear_recent_scan_work); atomic_set(&priv->scan.in_progress, 1); atomic_set(&priv->recent_scan, 1); cw1200_pm_stay_awake(&priv->pm_state, msecs_to_jiffies(tmo)); queue_delayed_work(priv->workqueue, &priv->scan.timeout, msecs_to_jiffies(tmo)); ret = wsm_scan(priv, scan); if (ret) { atomic_set(&priv->scan.in_progress, 0); cancel_delayed_work_sync(&priv->scan.timeout); cw1200_scan_restart_delayed(priv); } return ret; } int cw1200_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_scan_request *hw_req) { struct cw1200_common *priv = hw->priv; struct cfg80211_scan_request *req = &hw_req->req; struct wsm_template_frame frame = { .frame_type = WSM_FRAME_TYPE_PROBE_REQUEST, }; int i, ret; if (!priv->vif) return -EINVAL; /* Scan when P2P_GO corrupt firmware MiniAP mode */ if (priv->join_status == CW1200_JOIN_STATUS_AP) return -EOPNOTSUPP; if (req->n_ssids == 1 && !req->ssids[0].ssid_len) req->n_ssids = 0; wiphy_dbg(hw->wiphy, "[SCAN] Scan request for %d SSIDs.\n", req->n_ssids); if (req->n_ssids > WSM_SCAN_MAX_NUM_OF_SSIDS) return -EINVAL; frame.skb = ieee80211_probereq_get(hw, priv->vif->addr, NULL, 0, req->ie_len); if (!frame.skb) return -ENOMEM; if (req->ie_len) skb_put_data(frame.skb, req->ie, req->ie_len); /* will be unlocked in cw1200_scan_work() */ down(&priv->scan.lock); mutex_lock(&priv->conf_mutex); ret = wsm_set_template_frame(priv, &frame); if (!ret) { /* Host want to be the probe responder. */ ret = wsm_set_probe_responder(priv, true); } if (ret) { mutex_unlock(&priv->conf_mutex); up(&priv->scan.lock); dev_kfree_skb(frame.skb); return ret; } wsm_lock_tx(priv); BUG_ON(priv->scan.req); priv->scan.req = req; priv->scan.n_ssids = 0; priv->scan.status = 0; priv->scan.begin = &req->channels[0]; priv->scan.curr = priv->scan.begin; priv->scan.end = &req->channels[req->n_channels]; priv->scan.output_power = priv->output_power; for (i = 0; i < req->n_ssids; ++i) { struct wsm_ssid *dst = &priv->scan.ssids[priv->scan.n_ssids]; memcpy(&dst->ssid[0], req->ssids[i].ssid, sizeof(dst->ssid)); dst->length = req->ssids[i].ssid_len; ++priv->scan.n_ssids; } mutex_unlock(&priv->conf_mutex); dev_kfree_skb(frame.skb); queue_work(priv->workqueue, &priv->scan.work); return 0; } void cw1200_scan_work(struct work_struct *work) { struct cw1200_common *priv = container_of(work, struct cw1200_common, scan.work); struct ieee80211_channel **it; struct wsm_scan scan = { .type = WSM_SCAN_TYPE_FOREGROUND, .flags = WSM_SCAN_FLAG_SPLIT_METHOD, }; bool first_run = (priv->scan.begin == priv->scan.curr && priv->scan.begin != priv->scan.end); int i; if (first_run) { /* Firmware gets crazy if scan request is sent * when STA is joined but not yet associated. * Force unjoin in this case. */ if (cancel_delayed_work_sync(&priv->join_timeout) > 0) cw1200_join_timeout(&priv->join_timeout.work); } mutex_lock(&priv->conf_mutex); if (first_run) { if (priv->join_status == CW1200_JOIN_STATUS_STA && !(priv->powersave_mode.mode & WSM_PSM_PS)) { struct wsm_set_pm pm = priv->powersave_mode; pm.mode = WSM_PSM_PS; cw1200_set_pm(priv, &pm); } else if (priv->join_status == CW1200_JOIN_STATUS_MONITOR) { /* FW bug: driver has to restart p2p-dev mode * after scan */ cw1200_disable_listening(priv); } } if (!priv->scan.req || (priv->scan.curr == priv->scan.end)) { struct cfg80211_scan_info info = { .aborted = priv->scan.status ? 1 : 0, }; if (priv->scan.output_power != priv->output_power) wsm_set_output_power(priv, priv->output_power * 10); if (priv->join_status == CW1200_JOIN_STATUS_STA && !(priv->powersave_mode.mode & WSM_PSM_PS)) cw1200_set_pm(priv, &priv->powersave_mode); if (priv->scan.status < 0) wiphy_warn(priv->hw->wiphy, "[SCAN] Scan failed (%d).\n", priv->scan.status); else if (priv->scan.req) wiphy_dbg(priv->hw->wiphy, "[SCAN] Scan completed.\n"); else wiphy_dbg(priv->hw->wiphy, "[SCAN] Scan canceled.\n"); priv->scan.req = NULL; cw1200_scan_restart_delayed(priv); wsm_unlock_tx(priv); mutex_unlock(&priv->conf_mutex); ieee80211_scan_completed(priv->hw, &info); up(&priv->scan.lock); return; } else { struct ieee80211_channel *first = *priv->scan.curr; for (it = priv->scan.curr + 1, i = 1; it != priv->scan.end && i < WSM_SCAN_MAX_NUM_OF_CHANNELS; ++it, ++i) { if ((*it)->band != first->band) break; if (((*it)->flags ^ first->flags) & IEEE80211_CHAN_NO_IR) break; if (!(first->flags & IEEE80211_CHAN_NO_IR) && (*it)->max_power != first->max_power) break; } scan.band = first->band; if (priv->scan.req->no_cck) scan.max_tx_rate = WSM_TRANSMIT_RATE_6; else scan.max_tx_rate = WSM_TRANSMIT_RATE_1; scan.num_probes = (first->flags & IEEE80211_CHAN_NO_IR) ? 0 : 2; scan.num_ssids = priv->scan.n_ssids; scan.ssids = &priv->scan.ssids[0]; scan.num_channels = it - priv->scan.curr; /* TODO: Is it optimal? */ scan.probe_delay = 100; /* It is not stated in WSM specification, however * FW team says that driver may not use FG scan * when joined. */ if (priv->join_status == CW1200_JOIN_STATUS_STA) { scan.type = WSM_SCAN_TYPE_BACKGROUND; scan.flags = WSM_SCAN_FLAG_FORCE_BACKGROUND; } scan.ch = kcalloc(it - priv->scan.curr, sizeof(struct wsm_scan_ch), GFP_KERNEL); if (!scan.ch) { priv->scan.status = -ENOMEM; goto fail; } for (i = 0; i < scan.num_channels; ++i) { scan.ch[i].number = priv->scan.curr[i]->hw_value; if (priv->scan.curr[i]->flags & IEEE80211_CHAN_NO_IR) { scan.ch[i].min_chan_time = 50; scan.ch[i].max_chan_time = 100; } else { scan.ch[i].min_chan_time = 10; scan.ch[i].max_chan_time = 25; } } if (!(first->flags & IEEE80211_CHAN_NO_IR) && priv->scan.output_power != first->max_power) { priv->scan.output_power = first->max_power; wsm_set_output_power(priv, priv->scan.output_power * 10); } priv->scan.status = cw1200_scan_start(priv, &scan); kfree(scan.ch); if (priv->scan.status) goto fail; priv->scan.curr = it; } mutex_unlock(&priv->conf_mutex); return; fail: priv->scan.curr = priv->scan.end; mutex_unlock(&priv->conf_mutex); queue_work(priv->workqueue, &priv->scan.work); return; } static void cw1200_scan_restart_delayed(struct cw1200_common *priv) { /* FW bug: driver has to restart p2p-dev mode after scan. */ if (priv->join_status == CW1200_JOIN_STATUS_MONITOR) { cw1200_enable_listening(priv); cw1200_update_filtering(priv); } if (priv->delayed_unjoin) { priv->delayed_unjoin = false; if (queue_work(priv->workqueue, &priv->unjoin_work) <= 0) wsm_unlock_tx(priv); } else if (priv->delayed_link_loss) { wiphy_dbg(priv->hw->wiphy, "[CQM] Requeue BSS loss.\n"); priv->delayed_link_loss = 0; cw1200_cqm_bssloss_sm(priv, 1, 0, 0); } } static void cw1200_scan_complete(struct cw1200_common *priv) { queue_delayed_work(priv->workqueue, &priv->clear_recent_scan_work, HZ); if (priv->scan.direct_probe) { wiphy_dbg(priv->hw->wiphy, "[SCAN] Direct probe complete.\n"); cw1200_scan_restart_delayed(priv); priv->scan.direct_probe = 0; up(&priv->scan.lock); wsm_unlock_tx(priv); } else { cw1200_scan_work(&priv->scan.work); } } void cw1200_scan_failed_cb(struct cw1200_common *priv) { if (priv->mode == NL80211_IFTYPE_UNSPECIFIED) /* STA is stopped. */ return; if (cancel_delayed_work_sync(&priv->scan.timeout) > 0) { priv->scan.status = -EIO; queue_delayed_work(priv->workqueue, &priv->scan.timeout, 0); } } void cw1200_scan_complete_cb(struct cw1200_common *priv, struct wsm_scan_complete *arg) { if (priv->mode == NL80211_IFTYPE_UNSPECIFIED) /* STA is stopped. */ return; if (cancel_delayed_work_sync(&priv->scan.timeout) > 0) { priv->scan.status = 1; queue_delayed_work(priv->workqueue, &priv->scan.timeout, 0); } } void cw1200_clear_recent_scan_work(struct work_struct *work) { struct cw1200_common *priv = container_of(work, struct cw1200_common, clear_recent_scan_work.work); atomic_xchg(&priv->recent_scan, 0); } void cw1200_scan_timeout(struct work_struct *work) { struct cw1200_common *priv = container_of(work, struct cw1200_common, scan.timeout.work); if (atomic_xchg(&priv->scan.in_progress, 0)) { if (priv->scan.status > 0) { priv->scan.status = 0; } else if (!priv->scan.status) { wiphy_warn(priv->hw->wiphy, "Timeout waiting for scan complete notification.\n"); priv->scan.status = -ETIMEDOUT; priv->scan.curr = priv->scan.end; wsm_stop_scan(priv); } cw1200_scan_complete(priv); } } void cw1200_probe_work(struct work_struct *work) { struct cw1200_common *priv = container_of(work, struct cw1200_common, scan.probe_work.work); u8 queue_id = cw1200_queue_get_queue_id(priv->pending_frame_id); struct cw1200_queue *queue = &priv->tx_queue[queue_id]; const struct cw1200_txpriv *txpriv; struct wsm_tx *wsm; struct wsm_template_frame frame = { .frame_type = WSM_FRAME_TYPE_PROBE_REQUEST, }; struct wsm_ssid ssids[1] = {{ .length = 0, } }; struct wsm_scan_ch ch[1] = {{ .min_chan_time = 0, .max_chan_time = 10, } }; struct wsm_scan scan = { .type = WSM_SCAN_TYPE_FOREGROUND, .num_probes = 1, .probe_delay = 0, .num_channels = 1, .ssids = ssids, .ch = ch, }; u8 *ies; size_t ies_len; int ret; wiphy_dbg(priv->hw->wiphy, "[SCAN] Direct probe work.\n"); mutex_lock(&priv->conf_mutex); if (down_trylock(&priv->scan.lock)) { /* Scan is already in progress. Requeue self. */ schedule(); queue_delayed_work(priv->workqueue, &priv->scan.probe_work, msecs_to_jiffies(100)); mutex_unlock(&priv->conf_mutex); return; } /* Make sure we still have a pending probe req */ if (cw1200_queue_get_skb(queue, priv->pending_frame_id, &frame.skb, &txpriv)) { up(&priv->scan.lock); mutex_unlock(&priv->conf_mutex); wsm_unlock_tx(priv); return; } wsm = (struct wsm_tx *)frame.skb->data; scan.max_tx_rate = wsm->max_tx_rate; scan.band = (priv->channel->band == NL80211_BAND_5GHZ) ? WSM_PHY_BAND_5G : WSM_PHY_BAND_2_4G; if (priv->join_status == CW1200_JOIN_STATUS_STA || priv->join_status == CW1200_JOIN_STATUS_IBSS) { scan.type = WSM_SCAN_TYPE_BACKGROUND; scan.flags = WSM_SCAN_FLAG_FORCE_BACKGROUND; } ch[0].number = priv->channel->hw_value; skb_pull(frame.skb, txpriv->offset); ies = &frame.skb->data[sizeof(struct ieee80211_hdr_3addr)]; ies_len = frame.skb->len - sizeof(struct ieee80211_hdr_3addr); if (ies_len) { u8 *ssidie = (u8 *)cfg80211_find_ie(WLAN_EID_SSID, ies, ies_len); if (ssidie && ssidie[1] && ssidie[1] <= sizeof(ssids[0].ssid)) { u8 *nextie = &ssidie[2 + ssidie[1]]; /* Remove SSID from the IE list. It has to be provided * as a separate argument in cw1200_scan_start call */ /* Store SSID localy */ ssids[0].length = ssidie[1]; memcpy(ssids[0].ssid, &ssidie[2], ssids[0].length); scan.num_ssids = 1; /* Remove SSID from IE list */ ssidie[1] = 0; memmove(&ssidie[2], nextie, &ies[ies_len] - nextie); skb_trim(frame.skb, frame.skb->len - ssids[0].length); } } /* FW bug: driver has to restart p2p-dev mode after scan */ if (priv->join_status == CW1200_JOIN_STATUS_MONITOR) cw1200_disable_listening(priv); ret = wsm_set_template_frame(priv, &frame); priv->scan.direct_probe = 1; if (!ret) { wsm_flush_tx(priv); ret = cw1200_scan_start(priv, &scan); } mutex_unlock(&priv->conf_mutex); skb_push(frame.skb, txpriv->offset); if (!ret) IEEE80211_SKB_CB(frame.skb)->flags |= IEEE80211_TX_STAT_ACK; BUG_ON(cw1200_queue_remove(queue, priv->pending_frame_id)); if (ret) { priv->scan.direct_probe = 0; up(&priv->scan.lock); wsm_unlock_tx(priv); } return; }
linux-master
drivers/net/wireless/st/cw1200/scan.c
// SPDX-License-Identifier: GPL-2.0-only /* * O(1) TX queue with built-in allocator for ST-Ericsson CW1200 drivers * * Copyright (c) 2010, ST-Ericsson * Author: Dmitry Tarnyagin <[email protected]> */ #include <net/mac80211.h> #include <linux/sched.h> #include <linux/jiffies.h> #include "queue.h" #include "cw1200.h" #include "debug.h" /* private */ struct cw1200_queue_item { struct list_head head; struct sk_buff *skb; u32 packet_id; unsigned long queue_timestamp; unsigned long xmit_timestamp; struct cw1200_txpriv txpriv; u8 generation; }; static inline void __cw1200_queue_lock(struct cw1200_queue *queue) { struct cw1200_queue_stats *stats = queue->stats; if (queue->tx_locked_cnt++ == 0) { pr_debug("[TX] Queue %d is locked.\n", queue->queue_id); ieee80211_stop_queue(stats->priv->hw, queue->queue_id); } } static inline void __cw1200_queue_unlock(struct cw1200_queue *queue) { struct cw1200_queue_stats *stats = queue->stats; BUG_ON(!queue->tx_locked_cnt); if (--queue->tx_locked_cnt == 0) { pr_debug("[TX] Queue %d is unlocked.\n", queue->queue_id); ieee80211_wake_queue(stats->priv->hw, queue->queue_id); } } static inline void cw1200_queue_parse_id(u32 packet_id, u8 *queue_generation, u8 *queue_id, u8 *item_generation, u8 *item_id) { *item_id = (packet_id >> 0) & 0xFF; *item_generation = (packet_id >> 8) & 0xFF; *queue_id = (packet_id >> 16) & 0xFF; *queue_generation = (packet_id >> 24) & 0xFF; } static inline u32 cw1200_queue_mk_packet_id(u8 queue_generation, u8 queue_id, u8 item_generation, u8 item_id) { return ((u32)item_id << 0) | ((u32)item_generation << 8) | ((u32)queue_id << 16) | ((u32)queue_generation << 24); } static void cw1200_queue_post_gc(struct cw1200_queue_stats *stats, struct list_head *gc_list) { struct cw1200_queue_item *item, *tmp; list_for_each_entry_safe(item, tmp, gc_list, head) { list_del(&item->head); stats->skb_dtor(stats->priv, item->skb, &item->txpriv); kfree(item); } } static void cw1200_queue_register_post_gc(struct list_head *gc_list, struct cw1200_queue_item *item) { struct cw1200_queue_item *gc_item; gc_item = kmemdup(item, sizeof(struct cw1200_queue_item), GFP_ATOMIC); BUG_ON(!gc_item); list_add_tail(&gc_item->head, gc_list); } static void __cw1200_queue_gc(struct cw1200_queue *queue, struct list_head *head, bool unlock) { struct cw1200_queue_stats *stats = queue->stats; struct cw1200_queue_item *item = NULL, *iter, *tmp; bool wakeup_stats = false; list_for_each_entry_safe(iter, tmp, &queue->queue, head) { if (time_is_after_jiffies(iter->queue_timestamp + queue->ttl)) { item = iter; break; } --queue->num_queued; --queue->link_map_cache[iter->txpriv.link_id]; spin_lock_bh(&stats->lock); --stats->num_queued; if (!--stats->link_map_cache[iter->txpriv.link_id]) wakeup_stats = true; spin_unlock_bh(&stats->lock); cw1200_debug_tx_ttl(stats->priv); cw1200_queue_register_post_gc(head, iter); iter->skb = NULL; list_move_tail(&iter->head, &queue->free_pool); } if (wakeup_stats) wake_up(&stats->wait_link_id_empty); if (queue->overfull) { if (queue->num_queued <= (queue->capacity >> 1)) { queue->overfull = false; if (unlock) __cw1200_queue_unlock(queue); } else if (item) { unsigned long tmo = item->queue_timestamp + queue->ttl; mod_timer(&queue->gc, tmo); cw1200_pm_stay_awake(&stats->priv->pm_state, tmo - jiffies); } } } static void cw1200_queue_gc(struct timer_list *t) { LIST_HEAD(list); struct cw1200_queue *queue = from_timer(queue, t, gc); spin_lock_bh(&queue->lock); __cw1200_queue_gc(queue, &list, true); spin_unlock_bh(&queue->lock); cw1200_queue_post_gc(queue->stats, &list); } int cw1200_queue_stats_init(struct cw1200_queue_stats *stats, size_t map_capacity, cw1200_queue_skb_dtor_t skb_dtor, struct cw1200_common *priv) { memset(stats, 0, sizeof(*stats)); stats->map_capacity = map_capacity; stats->skb_dtor = skb_dtor; stats->priv = priv; spin_lock_init(&stats->lock); init_waitqueue_head(&stats->wait_link_id_empty); stats->link_map_cache = kcalloc(map_capacity, sizeof(int), GFP_KERNEL); if (!stats->link_map_cache) return -ENOMEM; return 0; } int cw1200_queue_init(struct cw1200_queue *queue, struct cw1200_queue_stats *stats, u8 queue_id, size_t capacity, unsigned long ttl) { size_t i; memset(queue, 0, sizeof(*queue)); queue->stats = stats; queue->capacity = capacity; queue->queue_id = queue_id; queue->ttl = ttl; INIT_LIST_HEAD(&queue->queue); INIT_LIST_HEAD(&queue->pending); INIT_LIST_HEAD(&queue->free_pool); spin_lock_init(&queue->lock); timer_setup(&queue->gc, cw1200_queue_gc, 0); queue->pool = kcalloc(capacity, sizeof(struct cw1200_queue_item), GFP_KERNEL); if (!queue->pool) return -ENOMEM; queue->link_map_cache = kcalloc(stats->map_capacity, sizeof(int), GFP_KERNEL); if (!queue->link_map_cache) { kfree(queue->pool); queue->pool = NULL; return -ENOMEM; } for (i = 0; i < capacity; ++i) list_add_tail(&queue->pool[i].head, &queue->free_pool); return 0; } int cw1200_queue_clear(struct cw1200_queue *queue) { int i; LIST_HEAD(gc_list); struct cw1200_queue_stats *stats = queue->stats; struct cw1200_queue_item *item, *tmp; spin_lock_bh(&queue->lock); queue->generation++; list_splice_tail_init(&queue->queue, &queue->pending); list_for_each_entry_safe(item, tmp, &queue->pending, head) { WARN_ON(!item->skb); cw1200_queue_register_post_gc(&gc_list, item); item->skb = NULL; list_move_tail(&item->head, &queue->free_pool); } queue->num_queued = 0; queue->num_pending = 0; spin_lock_bh(&stats->lock); for (i = 0; i < stats->map_capacity; ++i) { stats->num_queued -= queue->link_map_cache[i]; stats->link_map_cache[i] -= queue->link_map_cache[i]; queue->link_map_cache[i] = 0; } spin_unlock_bh(&stats->lock); if (queue->overfull) { queue->overfull = false; __cw1200_queue_unlock(queue); } spin_unlock_bh(&queue->lock); wake_up(&stats->wait_link_id_empty); cw1200_queue_post_gc(stats, &gc_list); return 0; } void cw1200_queue_stats_deinit(struct cw1200_queue_stats *stats) { kfree(stats->link_map_cache); stats->link_map_cache = NULL; } void cw1200_queue_deinit(struct cw1200_queue *queue) { cw1200_queue_clear(queue); del_timer_sync(&queue->gc); INIT_LIST_HEAD(&queue->free_pool); kfree(queue->pool); kfree(queue->link_map_cache); queue->pool = NULL; queue->link_map_cache = NULL; queue->capacity = 0; } size_t cw1200_queue_get_num_queued(struct cw1200_queue *queue, u32 link_id_map) { size_t ret; int i, bit; size_t map_capacity = queue->stats->map_capacity; if (!link_id_map) return 0; spin_lock_bh(&queue->lock); if (link_id_map == (u32)-1) { ret = queue->num_queued - queue->num_pending; } else { ret = 0; for (i = 0, bit = 1; i < map_capacity; ++i, bit <<= 1) { if (link_id_map & bit) ret += queue->link_map_cache[i]; } } spin_unlock_bh(&queue->lock); return ret; } int cw1200_queue_put(struct cw1200_queue *queue, struct sk_buff *skb, struct cw1200_txpriv *txpriv) { int ret = 0; struct cw1200_queue_stats *stats = queue->stats; if (txpriv->link_id >= queue->stats->map_capacity) return -EINVAL; spin_lock_bh(&queue->lock); if (!WARN_ON(list_empty(&queue->free_pool))) { struct cw1200_queue_item *item = list_first_entry( &queue->free_pool, struct cw1200_queue_item, head); BUG_ON(item->skb); list_move_tail(&item->head, &queue->queue); item->skb = skb; item->txpriv = *txpriv; item->generation = 0; item->packet_id = cw1200_queue_mk_packet_id(queue->generation, queue->queue_id, item->generation, item - queue->pool); item->queue_timestamp = jiffies; ++queue->num_queued; ++queue->link_map_cache[txpriv->link_id]; spin_lock_bh(&stats->lock); ++stats->num_queued; ++stats->link_map_cache[txpriv->link_id]; spin_unlock_bh(&stats->lock); /* TX may happen in parallel sometimes. * Leave extra queue slots so we don't overflow. */ if (queue->overfull == false && queue->num_queued >= (queue->capacity - (num_present_cpus() - 1))) { queue->overfull = true; __cw1200_queue_lock(queue); mod_timer(&queue->gc, jiffies); } } else { ret = -ENOENT; } spin_unlock_bh(&queue->lock); return ret; } int cw1200_queue_get(struct cw1200_queue *queue, u32 link_id_map, struct wsm_tx **tx, struct ieee80211_tx_info **tx_info, const struct cw1200_txpriv **txpriv) { int ret = -ENOENT; struct cw1200_queue_item *item; struct cw1200_queue_stats *stats = queue->stats; bool wakeup_stats = false; spin_lock_bh(&queue->lock); list_for_each_entry(item, &queue->queue, head) { if (link_id_map & BIT(item->txpriv.link_id)) { ret = 0; break; } } if (!WARN_ON(ret)) { *tx = (struct wsm_tx *)item->skb->data; *tx_info = IEEE80211_SKB_CB(item->skb); *txpriv = &item->txpriv; (*tx)->packet_id = item->packet_id; list_move_tail(&item->head, &queue->pending); ++queue->num_pending; --queue->link_map_cache[item->txpriv.link_id]; item->xmit_timestamp = jiffies; spin_lock_bh(&stats->lock); --stats->num_queued; if (!--stats->link_map_cache[item->txpriv.link_id]) wakeup_stats = true; spin_unlock_bh(&stats->lock); } spin_unlock_bh(&queue->lock); if (wakeup_stats) wake_up(&stats->wait_link_id_empty); return ret; } int cw1200_queue_requeue(struct cw1200_queue *queue, u32 packet_id) { int ret = 0; u8 queue_generation, queue_id, item_generation, item_id; struct cw1200_queue_item *item; struct cw1200_queue_stats *stats = queue->stats; cw1200_queue_parse_id(packet_id, &queue_generation, &queue_id, &item_generation, &item_id); item = &queue->pool[item_id]; spin_lock_bh(&queue->lock); BUG_ON(queue_id != queue->queue_id); if (queue_generation != queue->generation) { ret = -ENOENT; } else if (item_id >= (unsigned) queue->capacity) { WARN_ON(1); ret = -EINVAL; } else if (item->generation != item_generation) { WARN_ON(1); ret = -ENOENT; } else { --queue->num_pending; ++queue->link_map_cache[item->txpriv.link_id]; spin_lock_bh(&stats->lock); ++stats->num_queued; ++stats->link_map_cache[item->txpriv.link_id]; spin_unlock_bh(&stats->lock); item->generation = ++item_generation; item->packet_id = cw1200_queue_mk_packet_id(queue_generation, queue_id, item_generation, item_id); list_move(&item->head, &queue->queue); } spin_unlock_bh(&queue->lock); return ret; } int cw1200_queue_requeue_all(struct cw1200_queue *queue) { struct cw1200_queue_item *item, *tmp; struct cw1200_queue_stats *stats = queue->stats; spin_lock_bh(&queue->lock); list_for_each_entry_safe_reverse(item, tmp, &queue->pending, head) { --queue->num_pending; ++queue->link_map_cache[item->txpriv.link_id]; spin_lock_bh(&stats->lock); ++stats->num_queued; ++stats->link_map_cache[item->txpriv.link_id]; spin_unlock_bh(&stats->lock); ++item->generation; item->packet_id = cw1200_queue_mk_packet_id(queue->generation, queue->queue_id, item->generation, item - queue->pool); list_move(&item->head, &queue->queue); } spin_unlock_bh(&queue->lock); return 0; } int cw1200_queue_remove(struct cw1200_queue *queue, u32 packet_id) { int ret = 0; u8 queue_generation, queue_id, item_generation, item_id; struct cw1200_queue_item *item; struct cw1200_queue_stats *stats = queue->stats; struct sk_buff *gc_skb = NULL; struct cw1200_txpriv gc_txpriv; cw1200_queue_parse_id(packet_id, &queue_generation, &queue_id, &item_generation, &item_id); item = &queue->pool[item_id]; spin_lock_bh(&queue->lock); BUG_ON(queue_id != queue->queue_id); if (queue_generation != queue->generation) { ret = -ENOENT; } else if (item_id >= (unsigned) queue->capacity) { WARN_ON(1); ret = -EINVAL; } else if (item->generation != item_generation) { WARN_ON(1); ret = -ENOENT; } else { gc_txpriv = item->txpriv; gc_skb = item->skb; item->skb = NULL; --queue->num_pending; --queue->num_queued; ++queue->num_sent; ++item->generation; /* Do not use list_move_tail here, but list_move: * try to utilize cache row. */ list_move(&item->head, &queue->free_pool); if (queue->overfull && (queue->num_queued <= (queue->capacity >> 1))) { queue->overfull = false; __cw1200_queue_unlock(queue); } } spin_unlock_bh(&queue->lock); if (gc_skb) stats->skb_dtor(stats->priv, gc_skb, &gc_txpriv); return ret; } int cw1200_queue_get_skb(struct cw1200_queue *queue, u32 packet_id, struct sk_buff **skb, const struct cw1200_txpriv **txpriv) { int ret = 0; u8 queue_generation, queue_id, item_generation, item_id; struct cw1200_queue_item *item; cw1200_queue_parse_id(packet_id, &queue_generation, &queue_id, &item_generation, &item_id); item = &queue->pool[item_id]; spin_lock_bh(&queue->lock); BUG_ON(queue_id != queue->queue_id); if (queue_generation != queue->generation) { ret = -ENOENT; } else if (item_id >= (unsigned) queue->capacity) { WARN_ON(1); ret = -EINVAL; } else if (item->generation != item_generation) { WARN_ON(1); ret = -ENOENT; } else { *skb = item->skb; *txpriv = &item->txpriv; } spin_unlock_bh(&queue->lock); return ret; } void cw1200_queue_lock(struct cw1200_queue *queue) { spin_lock_bh(&queue->lock); __cw1200_queue_lock(queue); spin_unlock_bh(&queue->lock); } void cw1200_queue_unlock(struct cw1200_queue *queue) { spin_lock_bh(&queue->lock); __cw1200_queue_unlock(queue); spin_unlock_bh(&queue->lock); } bool cw1200_queue_get_xmit_timestamp(struct cw1200_queue *queue, unsigned long *timestamp, u32 pending_frame_id) { struct cw1200_queue_item *item; bool ret; spin_lock_bh(&queue->lock); ret = !list_empty(&queue->pending); if (ret) { list_for_each_entry(item, &queue->pending, head) { if (item->packet_id != pending_frame_id) if (time_before(item->xmit_timestamp, *timestamp)) *timestamp = item->xmit_timestamp; } } spin_unlock_bh(&queue->lock); return ret; } bool cw1200_queue_stats_is_empty(struct cw1200_queue_stats *stats, u32 link_id_map) { bool empty = true; spin_lock_bh(&stats->lock); if (link_id_map == (u32)-1) { empty = stats->num_queued == 0; } else { int i; for (i = 0; i < stats->map_capacity; ++i) { if (link_id_map & BIT(i)) { if (stats->link_map_cache[i]) { empty = false; break; } } } } spin_unlock_bh(&stats->lock); return empty; }
linux-master
drivers/net/wireless/st/cw1200/queue.c
// SPDX-License-Identifier: GPL-2.0-only /* * mac80211 glue code for mac80211 ST-Ericsson CW1200 drivers * DebugFS code * * Copyright (c) 2010, ST-Ericsson * Author: Dmitry Tarnyagin <[email protected]> */ #include <linux/module.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include "cw1200.h" #include "debug.h" #include "fwio.h" /* join_status */ static const char * const cw1200_debug_join_status[] = { "passive", "monitor", "station (joining)", "station (not authenticated yet)", "station", "adhoc", "access point", }; /* WSM_JOIN_PREAMBLE_... */ static const char * const cw1200_debug_preamble[] = { "long", "short", "long on 1 and 2 Mbps", }; static const char * const cw1200_debug_link_id[] = { "OFF", "REQ", "SOFT", "HARD", "RESET", "RESET_REMAP", }; static const char *cw1200_debug_mode(int mode) { switch (mode) { case NL80211_IFTYPE_UNSPECIFIED: return "unspecified"; case NL80211_IFTYPE_MONITOR: return "monitor"; case NL80211_IFTYPE_STATION: return "station"; case NL80211_IFTYPE_ADHOC: return "adhoc"; case NL80211_IFTYPE_MESH_POINT: return "mesh point"; case NL80211_IFTYPE_AP: return "access point"; case NL80211_IFTYPE_P2P_CLIENT: return "p2p client"; case NL80211_IFTYPE_P2P_GO: return "p2p go"; default: return "unsupported"; } } static void cw1200_queue_status_show(struct seq_file *seq, struct cw1200_queue *q) { int i; seq_printf(seq, "Queue %d:\n", q->queue_id); seq_printf(seq, " capacity: %zu\n", q->capacity); seq_printf(seq, " queued: %zu\n", q->num_queued); seq_printf(seq, " pending: %zu\n", q->num_pending); seq_printf(seq, " sent: %zu\n", q->num_sent); seq_printf(seq, " locked: %s\n", q->tx_locked_cnt ? "yes" : "no"); seq_printf(seq, " overfull: %s\n", q->overfull ? "yes" : "no"); seq_puts(seq, " link map: 0-> "); for (i = 0; i < q->stats->map_capacity; ++i) seq_printf(seq, "%.2d ", q->link_map_cache[i]); seq_printf(seq, "<-%zu\n", q->stats->map_capacity); } static void cw1200_debug_print_map(struct seq_file *seq, struct cw1200_common *priv, const char *label, u32 map) { int i; seq_printf(seq, "%s0-> ", label); for (i = 0; i < priv->tx_queue_stats.map_capacity; ++i) seq_printf(seq, "%s ", (map & BIT(i)) ? "**" : ".."); seq_printf(seq, "<-%zu\n", priv->tx_queue_stats.map_capacity - 1); } static int cw1200_status_show(struct seq_file *seq, void *v) { int i; struct list_head *item; struct cw1200_common *priv = seq->private; struct cw1200_debug_priv *d = priv->debug; seq_puts(seq, "CW1200 Wireless LAN driver status\n"); seq_printf(seq, "Hardware: %d.%d\n", priv->wsm_caps.hw_id, priv->wsm_caps.hw_subid); seq_printf(seq, "Firmware: %s %d.%d\n", cw1200_fw_types[priv->wsm_caps.fw_type], priv->wsm_caps.fw_ver, priv->wsm_caps.fw_build); seq_printf(seq, "FW API: %d\n", priv->wsm_caps.fw_api); seq_printf(seq, "FW caps: 0x%.4X\n", priv->wsm_caps.fw_cap); seq_printf(seq, "FW label: '%s'\n", priv->wsm_caps.fw_label); seq_printf(seq, "Mode: %s%s\n", cw1200_debug_mode(priv->mode), priv->listening ? " (listening)" : ""); seq_printf(seq, "Join state: %s\n", cw1200_debug_join_status[priv->join_status]); if (priv->channel) seq_printf(seq, "Channel: %d%s\n", priv->channel->hw_value, priv->channel_switch_in_progress ? " (switching)" : ""); if (priv->rx_filter.promiscuous) seq_puts(seq, "Filter: promisc\n"); else if (priv->rx_filter.fcs) seq_puts(seq, "Filter: fcs\n"); if (priv->rx_filter.bssid) seq_puts(seq, "Filter: bssid\n"); if (!priv->disable_beacon_filter) seq_puts(seq, "Filter: beacons\n"); if (priv->enable_beacon || priv->mode == NL80211_IFTYPE_AP || priv->mode == NL80211_IFTYPE_ADHOC || priv->mode == NL80211_IFTYPE_MESH_POINT || priv->mode == NL80211_IFTYPE_P2P_GO) seq_printf(seq, "Beaconing: %s\n", priv->enable_beacon ? "enabled" : "disabled"); for (i = 0; i < 4; ++i) seq_printf(seq, "EDCA(%d): %d, %d, %d, %d, %d\n", i, priv->edca.params[i].cwmin, priv->edca.params[i].cwmax, priv->edca.params[i].aifns, priv->edca.params[i].txop_limit, priv->edca.params[i].max_rx_lifetime); if (priv->join_status == CW1200_JOIN_STATUS_STA) { static const char *pm_mode = "unknown"; switch (priv->powersave_mode.mode) { case WSM_PSM_ACTIVE: pm_mode = "off"; break; case WSM_PSM_PS: pm_mode = "on"; break; case WSM_PSM_FAST_PS: pm_mode = "dynamic"; break; } seq_printf(seq, "Preamble: %s\n", cw1200_debug_preamble[priv->association_mode.preamble]); seq_printf(seq, "AMPDU spcn: %d\n", priv->association_mode.mpdu_start_spacing); seq_printf(seq, "Basic rate: 0x%.8X\n", le32_to_cpu(priv->association_mode.basic_rate_set)); seq_printf(seq, "Bss lost: %d beacons\n", priv->bss_params.beacon_lost_count); seq_printf(seq, "AID: %d\n", priv->bss_params.aid); seq_printf(seq, "Rates: 0x%.8X\n", priv->bss_params.operational_rate_set); seq_printf(seq, "Powersave: %s\n", pm_mode); } seq_printf(seq, "HT: %s\n", cw1200_is_ht(&priv->ht_info) ? "on" : "off"); if (cw1200_is_ht(&priv->ht_info)) { seq_printf(seq, "Greenfield: %s\n", cw1200_ht_greenfield(&priv->ht_info) ? "yes" : "no"); seq_printf(seq, "AMPDU dens: %d\n", cw1200_ht_ampdu_density(&priv->ht_info)); } seq_printf(seq, "RSSI thold: %d\n", priv->cqm_rssi_thold); seq_printf(seq, "RSSI hyst: %d\n", priv->cqm_rssi_hyst); seq_printf(seq, "Long retr: %d\n", priv->long_frame_max_tx_count); seq_printf(seq, "Short retr: %d\n", priv->short_frame_max_tx_count); spin_lock_bh(&priv->tx_policy_cache.lock); i = 0; list_for_each(item, &priv->tx_policy_cache.used) ++i; spin_unlock_bh(&priv->tx_policy_cache.lock); seq_printf(seq, "RC in use: %d\n", i); seq_puts(seq, "\n"); for (i = 0; i < 4; ++i) { cw1200_queue_status_show(seq, &priv->tx_queue[i]); seq_puts(seq, "\n"); } cw1200_debug_print_map(seq, priv, "Link map: ", priv->link_id_map); cw1200_debug_print_map(seq, priv, "Asleep map: ", priv->sta_asleep_mask); cw1200_debug_print_map(seq, priv, "PSPOLL map: ", priv->pspoll_mask); seq_puts(seq, "\n"); for (i = 0; i < CW1200_MAX_STA_IN_AP_MODE; ++i) { if (priv->link_id_db[i].status) { seq_printf(seq, "Link %d: %s, %pM\n", i + 1, cw1200_debug_link_id[priv->link_id_db[i].status], priv->link_id_db[i].mac); } } seq_puts(seq, "\n"); seq_printf(seq, "BH status: %s\n", atomic_read(&priv->bh_term) ? "terminated" : "alive"); seq_printf(seq, "Pending RX: %d\n", atomic_read(&priv->bh_rx)); seq_printf(seq, "Pending TX: %d\n", atomic_read(&priv->bh_tx)); if (priv->bh_error) seq_printf(seq, "BH errcode: %d\n", priv->bh_error); seq_printf(seq, "TX bufs: %d x %d bytes\n", priv->wsm_caps.input_buffers, priv->wsm_caps.input_buffer_size); seq_printf(seq, "Used bufs: %d\n", priv->hw_bufs_used); seq_printf(seq, "Powermgmt: %s\n", priv->powersave_enabled ? "on" : "off"); seq_printf(seq, "Device: %s\n", priv->device_can_sleep ? "asleep" : "awake"); spin_lock(&priv->wsm_cmd.lock); seq_printf(seq, "WSM status: %s\n", priv->wsm_cmd.done ? "idle" : "active"); seq_printf(seq, "WSM cmd: 0x%.4X (%td bytes)\n", priv->wsm_cmd.cmd, priv->wsm_cmd.len); seq_printf(seq, "WSM retval: %d\n", priv->wsm_cmd.ret); spin_unlock(&priv->wsm_cmd.lock); seq_printf(seq, "Datapath: %s\n", atomic_read(&priv->tx_lock) ? "locked" : "unlocked"); if (atomic_read(&priv->tx_lock)) seq_printf(seq, "TXlock cnt: %d\n", atomic_read(&priv->tx_lock)); seq_printf(seq, "TXed: %d\n", d->tx); seq_printf(seq, "AGG TXed: %d\n", d->tx_agg); seq_printf(seq, "MULTI TXed: %d (%d)\n", d->tx_multi, d->tx_multi_frames); seq_printf(seq, "RXed: %d\n", d->rx); seq_printf(seq, "AGG RXed: %d\n", d->rx_agg); seq_printf(seq, "TX miss: %d\n", d->tx_cache_miss); seq_printf(seq, "TX align: %d\n", d->tx_align); seq_printf(seq, "TX burst: %d\n", d->tx_burst); seq_printf(seq, "TX TTL: %d\n", d->tx_ttl); seq_printf(seq, "Scan: %s\n", atomic_read(&priv->scan.in_progress) ? "active" : "idle"); return 0; } DEFINE_SHOW_ATTRIBUTE(cw1200_status); static int cw1200_counters_show(struct seq_file *seq, void *v) { int ret; struct cw1200_common *priv = seq->private; struct wsm_mib_counters_table counters; ret = wsm_get_counters_table(priv, &counters); if (ret) return ret; #define PUT_COUNTER(tab, name) \ seq_printf(seq, "%s:" tab "%d\n", #name, \ __le32_to_cpu(counters.name)) PUT_COUNTER("\t\t", plcp_errors); PUT_COUNTER("\t\t", fcs_errors); PUT_COUNTER("\t\t", tx_packets); PUT_COUNTER("\t\t", rx_packets); PUT_COUNTER("\t\t", rx_packet_errors); PUT_COUNTER("\t", rx_decryption_failures); PUT_COUNTER("\t\t", rx_mic_failures); PUT_COUNTER("\t", rx_no_key_failures); PUT_COUNTER("\t", tx_multicast_frames); PUT_COUNTER("\t", tx_frames_success); PUT_COUNTER("\t", tx_frame_failures); PUT_COUNTER("\t", tx_frames_retried); PUT_COUNTER("\t", tx_frames_multi_retried); PUT_COUNTER("\t", rx_frame_duplicates); PUT_COUNTER("\t\t", rts_success); PUT_COUNTER("\t\t", rts_failures); PUT_COUNTER("\t\t", ack_failures); PUT_COUNTER("\t", rx_multicast_frames); PUT_COUNTER("\t", rx_frames_success); PUT_COUNTER("\t", rx_cmac_icv_errors); PUT_COUNTER("\t\t", rx_cmac_replays); PUT_COUNTER("\t", rx_mgmt_ccmp_replays); #undef PUT_COUNTER return 0; } DEFINE_SHOW_ATTRIBUTE(cw1200_counters); static ssize_t cw1200_wsm_dumps(struct file *file, const char __user *user_buf, size_t count, loff_t *ppos) { struct cw1200_common *priv = file->private_data; char buf[1]; if (!count) return -EINVAL; if (copy_from_user(buf, user_buf, 1)) return -EFAULT; if (buf[0] == '1') priv->wsm_enable_wsm_dumps = 1; else priv->wsm_enable_wsm_dumps = 0; return count; } static const struct file_operations fops_wsm_dumps = { .open = simple_open, .write = cw1200_wsm_dumps, .llseek = default_llseek, }; int cw1200_debug_init(struct cw1200_common *priv) { int ret = -ENOMEM; struct cw1200_debug_priv *d = kzalloc(sizeof(struct cw1200_debug_priv), GFP_KERNEL); priv->debug = d; if (!d) return ret; d->debugfs_phy = debugfs_create_dir("cw1200", priv->hw->wiphy->debugfsdir); debugfs_create_file("status", 0400, d->debugfs_phy, priv, &cw1200_status_fops); debugfs_create_file("counters", 0400, d->debugfs_phy, priv, &cw1200_counters_fops); debugfs_create_file("wsm_dumps", 0200, d->debugfs_phy, priv, &fops_wsm_dumps); return 0; } void cw1200_debug_release(struct cw1200_common *priv) { struct cw1200_debug_priv *d = priv->debug; if (d) { debugfs_remove_recursive(d->debugfs_phy); priv->debug = NULL; kfree(d); } }
linux-master
drivers/net/wireless/st/cw1200/debug.c
// SPDX-License-Identifier: GPL-2.0-only /* * Mac80211 power management API for ST-Ericsson CW1200 drivers * * Copyright (c) 2011, ST-Ericsson * Author: Dmitry Tarnyagin <[email protected]> */ #include <linux/module.h> #include <linux/if_ether.h> #include "cw1200.h" #include "pm.h" #include "sta.h" #include "bh.h" #include "hwbus.h" #define CW1200_BEACON_SKIPPING_MULTIPLIER 3 struct cw1200_udp_port_filter { struct wsm_udp_port_filter_hdr hdr; /* Up to 4 filters are allowed. */ struct wsm_udp_port_filter filters[WSM_MAX_FILTER_ELEMENTS]; } __packed; struct cw1200_ether_type_filter { struct wsm_ether_type_filter_hdr hdr; /* Up to 4 filters are allowed. */ struct wsm_ether_type_filter filters[WSM_MAX_FILTER_ELEMENTS]; } __packed; static struct cw1200_udp_port_filter cw1200_udp_port_filter_on = { .hdr.num = 2, .filters = { [0] = { .action = WSM_FILTER_ACTION_FILTER_OUT, .type = WSM_FILTER_PORT_TYPE_DST, .port = __cpu_to_le16(67), /* DHCP Bootps */ }, [1] = { .action = WSM_FILTER_ACTION_FILTER_OUT, .type = WSM_FILTER_PORT_TYPE_DST, .port = __cpu_to_le16(68), /* DHCP Bootpc */ }, } }; static struct wsm_udp_port_filter_hdr cw1200_udp_port_filter_off = { .num = 0, }; #ifndef ETH_P_WAPI #define ETH_P_WAPI 0x88B4 #endif static struct cw1200_ether_type_filter cw1200_ether_type_filter_on = { .hdr.num = 4, .filters = { [0] = { .action = WSM_FILTER_ACTION_FILTER_IN, .type = __cpu_to_le16(ETH_P_IP), }, [1] = { .action = WSM_FILTER_ACTION_FILTER_IN, .type = __cpu_to_le16(ETH_P_PAE), }, [2] = { .action = WSM_FILTER_ACTION_FILTER_IN, .type = __cpu_to_le16(ETH_P_WAPI), }, [3] = { .action = WSM_FILTER_ACTION_FILTER_IN, .type = __cpu_to_le16(ETH_P_ARP), }, }, }; static struct wsm_ether_type_filter_hdr cw1200_ether_type_filter_off = { .num = 0, }; /* private */ struct cw1200_suspend_state { unsigned long bss_loss_tmo; unsigned long join_tmo; unsigned long direct_probe; unsigned long link_id_gc; bool beacon_skipping; u8 prev_ps_mode; }; static void cw1200_pm_stay_awake_tmo(struct timer_list *unused) { /* XXX what's the point of this ? */ } int cw1200_pm_init(struct cw1200_pm_state *pm, struct cw1200_common *priv) { spin_lock_init(&pm->lock); timer_setup(&pm->stay_awake, cw1200_pm_stay_awake_tmo, 0); return 0; } void cw1200_pm_deinit(struct cw1200_pm_state *pm) { del_timer_sync(&pm->stay_awake); } void cw1200_pm_stay_awake(struct cw1200_pm_state *pm, unsigned long tmo) { long cur_tmo; spin_lock_bh(&pm->lock); cur_tmo = pm->stay_awake.expires - jiffies; if (!timer_pending(&pm->stay_awake) || cur_tmo < (long)tmo) mod_timer(&pm->stay_awake, jiffies + tmo); spin_unlock_bh(&pm->lock); } static long cw1200_suspend_work(struct delayed_work *work) { int ret = cancel_delayed_work(work); long tmo; if (ret > 0) { /* Timer is pending */ tmo = work->timer.expires - jiffies; if (tmo < 0) tmo = 0; } else { tmo = -1; } return tmo; } static int cw1200_resume_work(struct cw1200_common *priv, struct delayed_work *work, unsigned long tmo) { if ((long)tmo < 0) return 1; return queue_delayed_work(priv->workqueue, work, tmo); } int cw1200_can_suspend(struct cw1200_common *priv) { if (atomic_read(&priv->bh_rx)) { wiphy_dbg(priv->hw->wiphy, "Suspend interrupted.\n"); return 0; } return 1; } EXPORT_SYMBOL_GPL(cw1200_can_suspend); int cw1200_wow_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan) { struct cw1200_common *priv = hw->priv; struct cw1200_pm_state *pm_state = &priv->pm_state; struct cw1200_suspend_state *state; int ret; spin_lock_bh(&pm_state->lock); ret = timer_pending(&pm_state->stay_awake); spin_unlock_bh(&pm_state->lock); if (ret) return -EAGAIN; /* Do not suspend when datapath is not idle */ if (priv->tx_queue_stats.num_queued) return -EBUSY; /* Make sure there is no configuration requests in progress. */ if (!mutex_trylock(&priv->conf_mutex)) return -EBUSY; /* Ensure pending operations are done. * Note also that wow_suspend must return in ~2.5sec, before * watchdog is triggered. */ if (priv->channel_switch_in_progress) goto revert1; /* Do not suspend when join is pending */ if (priv->join_pending) goto revert1; /* Do not suspend when scanning */ if (down_trylock(&priv->scan.lock)) goto revert1; /* Lock TX. */ wsm_lock_tx_async(priv); /* Wait to avoid possible race with bh code. * But do not wait too long... */ if (wait_event_timeout(priv->bh_evt_wq, !priv->hw_bufs_used, HZ / 10) <= 0) goto revert2; /* Set UDP filter */ wsm_set_udp_port_filter(priv, &cw1200_udp_port_filter_on.hdr); /* Set ethernet frame type filter */ wsm_set_ether_type_filter(priv, &cw1200_ether_type_filter_on.hdr); /* Allocate state */ state = kzalloc(sizeof(struct cw1200_suspend_state), GFP_KERNEL); if (!state) goto revert3; /* Change to legacy PS while going to suspend */ if (!priv->vif->p2p && priv->join_status == CW1200_JOIN_STATUS_STA && priv->powersave_mode.mode != WSM_PSM_PS) { state->prev_ps_mode = priv->powersave_mode.mode; priv->powersave_mode.mode = WSM_PSM_PS; cw1200_set_pm(priv, &priv->powersave_mode); if (wait_event_interruptible_timeout(priv->ps_mode_switch_done, !priv->ps_mode_switch_in_progress, 1*HZ) <= 0) { goto revert4; } } /* Store delayed work states. */ state->bss_loss_tmo = cw1200_suspend_work(&priv->bss_loss_work); state->join_tmo = cw1200_suspend_work(&priv->join_timeout); state->direct_probe = cw1200_suspend_work(&priv->scan.probe_work); state->link_id_gc = cw1200_suspend_work(&priv->link_id_gc_work); cancel_delayed_work_sync(&priv->clear_recent_scan_work); atomic_set(&priv->recent_scan, 0); /* Enable beacon skipping */ if (priv->join_status == CW1200_JOIN_STATUS_STA && priv->join_dtim_period && !priv->has_multicast_subscription) { state->beacon_skipping = true; wsm_set_beacon_wakeup_period(priv, priv->join_dtim_period, CW1200_BEACON_SKIPPING_MULTIPLIER * priv->join_dtim_period); } /* Stop serving thread */ if (cw1200_bh_suspend(priv)) goto revert5; ret = timer_pending(&priv->mcast_timeout); if (ret) goto revert6; /* Store suspend state */ pm_state->suspend_state = state; /* Enable IRQ wake */ ret = priv->hwbus_ops->power_mgmt(priv->hwbus_priv, true); if (ret) { wiphy_err(priv->hw->wiphy, "PM request failed: %d. WoW is disabled.\n", ret); cw1200_wow_resume(hw); return -EBUSY; } /* Force resume if event is coming from the device. */ if (atomic_read(&priv->bh_rx)) { cw1200_wow_resume(hw); return -EAGAIN; } return 0; revert6: WARN_ON(cw1200_bh_resume(priv)); revert5: cw1200_resume_work(priv, &priv->bss_loss_work, state->bss_loss_tmo); cw1200_resume_work(priv, &priv->join_timeout, state->join_tmo); cw1200_resume_work(priv, &priv->scan.probe_work, state->direct_probe); cw1200_resume_work(priv, &priv->link_id_gc_work, state->link_id_gc); revert4: kfree(state); revert3: wsm_set_udp_port_filter(priv, &cw1200_udp_port_filter_off); wsm_set_ether_type_filter(priv, &cw1200_ether_type_filter_off); revert2: wsm_unlock_tx(priv); up(&priv->scan.lock); revert1: mutex_unlock(&priv->conf_mutex); return -EBUSY; } int cw1200_wow_resume(struct ieee80211_hw *hw) { struct cw1200_common *priv = hw->priv; struct cw1200_pm_state *pm_state = &priv->pm_state; struct cw1200_suspend_state *state; state = pm_state->suspend_state; pm_state->suspend_state = NULL; /* Disable IRQ wake */ priv->hwbus_ops->power_mgmt(priv->hwbus_priv, false); /* Scan.lock must be released before BH is resumed other way * in case when BSS_LOST command arrived the processing of the * command will be delayed. */ up(&priv->scan.lock); /* Resume BH thread */ WARN_ON(cw1200_bh_resume(priv)); /* Restores previous PS mode */ if (!priv->vif->p2p && priv->join_status == CW1200_JOIN_STATUS_STA) { priv->powersave_mode.mode = state->prev_ps_mode; cw1200_set_pm(priv, &priv->powersave_mode); } if (state->beacon_skipping) { wsm_set_beacon_wakeup_period(priv, priv->beacon_int * priv->join_dtim_period > MAX_BEACON_SKIP_TIME_MS ? 1 : priv->join_dtim_period, 0); state->beacon_skipping = false; } /* Resume delayed work */ cw1200_resume_work(priv, &priv->bss_loss_work, state->bss_loss_tmo); cw1200_resume_work(priv, &priv->join_timeout, state->join_tmo); cw1200_resume_work(priv, &priv->scan.probe_work, state->direct_probe); cw1200_resume_work(priv, &priv->link_id_gc_work, state->link_id_gc); /* Remove UDP port filter */ wsm_set_udp_port_filter(priv, &cw1200_udp_port_filter_off); /* Remove ethernet frame type filter */ wsm_set_ether_type_filter(priv, &cw1200_ether_type_filter_off); /* Unlock datapath */ wsm_unlock_tx(priv); /* Unlock configuration mutex */ mutex_unlock(&priv->conf_mutex); /* Free memory */ kfree(state); return 0; }
linux-master
drivers/net/wireless/st/cw1200/pm.c
// SPDX-License-Identifier: GPL-2.0-only /* * mac80211 glue code for mac80211 ST-Ericsson CW1200 drivers * * Copyright (c) 2010, ST-Ericsson * Author: Dmitry Tarnyagin <[email protected]> * * Based on: * Copyright (c) 2006, Michael Wu <[email protected]> * Copyright (c) 2007-2009, Christian Lamparter <[email protected]> * Copyright 2008, Johannes Berg <[email protected]> * * Based on: * - the islsm (softmac prism54) driver, which is: * Copyright 2004-2006 Jean-Baptiste Note <[email protected]>, et al. * - stlc45xx driver * Copyright (C) 2008 Nokia Corporation and/or its subsidiary(-ies). */ #include <linux/module.h> #include <linux/firmware.h> #include <linux/etherdevice.h> #include <linux/vmalloc.h> #include <linux/random.h> #include <linux/sched.h> #include <net/mac80211.h> #include "cw1200.h" #include "txrx.h" #include "hwbus.h" #include "fwio.h" #include "hwio.h" #include "bh.h" #include "sta.h" #include "scan.h" #include "debug.h" #include "pm.h" MODULE_AUTHOR("Dmitry Tarnyagin <[email protected]>"); MODULE_DESCRIPTION("Softmac ST-Ericsson CW1200 common code"); MODULE_LICENSE("GPL"); MODULE_ALIAS("cw1200_core"); /* Accept MAC address of the form macaddr=0x00,0x80,0xE1,0x30,0x40,0x50 */ static u8 cw1200_mac_template[ETH_ALEN] = {0x02, 0x80, 0xe1, 0x00, 0x00, 0x00}; module_param_array_named(macaddr, cw1200_mac_template, byte, NULL, 0444); MODULE_PARM_DESC(macaddr, "Override platform_data MAC address"); static char *cw1200_sdd_path; module_param(cw1200_sdd_path, charp, 0644); MODULE_PARM_DESC(cw1200_sdd_path, "Override platform_data SDD file"); static int cw1200_refclk; module_param(cw1200_refclk, int, 0644); MODULE_PARM_DESC(cw1200_refclk, "Override platform_data reference clock"); int cw1200_power_mode = wsm_power_mode_quiescent; module_param(cw1200_power_mode, int, 0644); MODULE_PARM_DESC(cw1200_power_mode, "WSM power mode. 0 == active, 1 == doze, 2 == quiescent (default)"); #define RATETAB_ENT(_rate, _rateid, _flags) \ { \ .bitrate = (_rate), \ .hw_value = (_rateid), \ .flags = (_flags), \ } static struct ieee80211_rate cw1200_rates[] = { RATETAB_ENT(10, 0, 0), RATETAB_ENT(20, 1, 0), RATETAB_ENT(55, 2, 0), RATETAB_ENT(110, 3, 0), RATETAB_ENT(60, 6, 0), RATETAB_ENT(90, 7, 0), RATETAB_ENT(120, 8, 0), RATETAB_ENT(180, 9, 0), RATETAB_ENT(240, 10, 0), RATETAB_ENT(360, 11, 0), RATETAB_ENT(480, 12, 0), RATETAB_ENT(540, 13, 0), }; static struct ieee80211_rate cw1200_mcs_rates[] = { RATETAB_ENT(65, 14, IEEE80211_TX_RC_MCS), RATETAB_ENT(130, 15, IEEE80211_TX_RC_MCS), RATETAB_ENT(195, 16, IEEE80211_TX_RC_MCS), RATETAB_ENT(260, 17, IEEE80211_TX_RC_MCS), RATETAB_ENT(390, 18, IEEE80211_TX_RC_MCS), RATETAB_ENT(520, 19, IEEE80211_TX_RC_MCS), RATETAB_ENT(585, 20, IEEE80211_TX_RC_MCS), RATETAB_ENT(650, 21, IEEE80211_TX_RC_MCS), }; #define cw1200_a_rates (cw1200_rates + 4) #define cw1200_a_rates_size (ARRAY_SIZE(cw1200_rates) - 4) #define cw1200_g_rates (cw1200_rates + 0) #define cw1200_g_rates_size (ARRAY_SIZE(cw1200_rates)) #define cw1200_n_rates (cw1200_mcs_rates) #define cw1200_n_rates_size (ARRAY_SIZE(cw1200_mcs_rates)) #define CHAN2G(_channel, _freq, _flags) { \ .band = NL80211_BAND_2GHZ, \ .center_freq = (_freq), \ .hw_value = (_channel), \ .flags = (_flags), \ .max_antenna_gain = 0, \ .max_power = 30, \ } #define CHAN5G(_channel, _flags) { \ .band = NL80211_BAND_5GHZ, \ .center_freq = 5000 + (5 * (_channel)), \ .hw_value = (_channel), \ .flags = (_flags), \ .max_antenna_gain = 0, \ .max_power = 30, \ } static struct ieee80211_channel cw1200_2ghz_chantable[] = { CHAN2G(1, 2412, 0), CHAN2G(2, 2417, 0), CHAN2G(3, 2422, 0), CHAN2G(4, 2427, 0), CHAN2G(5, 2432, 0), CHAN2G(6, 2437, 0), CHAN2G(7, 2442, 0), CHAN2G(8, 2447, 0), CHAN2G(9, 2452, 0), CHAN2G(10, 2457, 0), CHAN2G(11, 2462, 0), CHAN2G(12, 2467, 0), CHAN2G(13, 2472, 0), CHAN2G(14, 2484, 0), }; static struct ieee80211_channel cw1200_5ghz_chantable[] = { CHAN5G(34, 0), CHAN5G(36, 0), CHAN5G(38, 0), CHAN5G(40, 0), CHAN5G(42, 0), CHAN5G(44, 0), CHAN5G(46, 0), CHAN5G(48, 0), CHAN5G(52, 0), CHAN5G(56, 0), CHAN5G(60, 0), CHAN5G(64, 0), CHAN5G(100, 0), CHAN5G(104, 0), CHAN5G(108, 0), CHAN5G(112, 0), CHAN5G(116, 0), CHAN5G(120, 0), CHAN5G(124, 0), CHAN5G(128, 0), CHAN5G(132, 0), CHAN5G(136, 0), CHAN5G(140, 0), CHAN5G(149, 0), CHAN5G(153, 0), CHAN5G(157, 0), CHAN5G(161, 0), CHAN5G(165, 0), CHAN5G(184, 0), CHAN5G(188, 0), CHAN5G(192, 0), CHAN5G(196, 0), CHAN5G(200, 0), CHAN5G(204, 0), CHAN5G(208, 0), CHAN5G(212, 0), CHAN5G(216, 0), }; static struct ieee80211_supported_band cw1200_band_2ghz = { .channels = cw1200_2ghz_chantable, .n_channels = ARRAY_SIZE(cw1200_2ghz_chantable), .bitrates = cw1200_g_rates, .n_bitrates = cw1200_g_rates_size, .ht_cap = { .cap = IEEE80211_HT_CAP_GRN_FLD | (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT) | IEEE80211_HT_CAP_MAX_AMSDU, .ht_supported = 1, .ampdu_factor = IEEE80211_HT_MAX_AMPDU_8K, .ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE, .mcs = { .rx_mask[0] = 0xFF, .rx_highest = __cpu_to_le16(0x41), .tx_params = IEEE80211_HT_MCS_TX_DEFINED, }, }, }; static struct ieee80211_supported_band cw1200_band_5ghz = { .channels = cw1200_5ghz_chantable, .n_channels = ARRAY_SIZE(cw1200_5ghz_chantable), .bitrates = cw1200_a_rates, .n_bitrates = cw1200_a_rates_size, .ht_cap = { .cap = IEEE80211_HT_CAP_GRN_FLD | (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT) | IEEE80211_HT_CAP_MAX_AMSDU, .ht_supported = 1, .ampdu_factor = IEEE80211_HT_MAX_AMPDU_8K, .ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE, .mcs = { .rx_mask[0] = 0xFF, .rx_highest = __cpu_to_le16(0x41), .tx_params = IEEE80211_HT_MCS_TX_DEFINED, }, }, }; static const unsigned long cw1200_ttl[] = { 1 * HZ, /* VO */ 2 * HZ, /* VI */ 5 * HZ, /* BE */ 10 * HZ /* BK */ }; static const struct ieee80211_ops cw1200_ops = { .start = cw1200_start, .stop = cw1200_stop, .add_interface = cw1200_add_interface, .remove_interface = cw1200_remove_interface, .change_interface = cw1200_change_interface, .tx = cw1200_tx, .wake_tx_queue = ieee80211_handle_wake_tx_queue, .hw_scan = cw1200_hw_scan, .set_tim = cw1200_set_tim, .sta_notify = cw1200_sta_notify, .sta_add = cw1200_sta_add, .sta_remove = cw1200_sta_remove, .set_key = cw1200_set_key, .set_rts_threshold = cw1200_set_rts_threshold, .config = cw1200_config, .bss_info_changed = cw1200_bss_info_changed, .prepare_multicast = cw1200_prepare_multicast, .configure_filter = cw1200_configure_filter, .conf_tx = cw1200_conf_tx, .get_stats = cw1200_get_stats, .ampdu_action = cw1200_ampdu_action, .flush = cw1200_flush, #ifdef CONFIG_PM .suspend = cw1200_wow_suspend, .resume = cw1200_wow_resume, #endif /* Intentionally not offloaded: */ /*.channel_switch = cw1200_channel_switch, */ /*.remain_on_channel = cw1200_remain_on_channel, */ /*.cancel_remain_on_channel = cw1200_cancel_remain_on_channel, */ }; static int cw1200_ba_rx_tids = -1; static int cw1200_ba_tx_tids = -1; module_param(cw1200_ba_rx_tids, int, 0644); module_param(cw1200_ba_tx_tids, int, 0644); MODULE_PARM_DESC(cw1200_ba_rx_tids, "Block ACK RX TIDs"); MODULE_PARM_DESC(cw1200_ba_tx_tids, "Block ACK TX TIDs"); #ifdef CONFIG_PM static const struct wiphy_wowlan_support cw1200_wowlan_support = { /* Support only for limited wowlan functionalities */ .flags = WIPHY_WOWLAN_ANY | WIPHY_WOWLAN_DISCONNECT, }; #endif static struct ieee80211_hw *cw1200_init_common(const u8 *macaddr, const bool have_5ghz) { int i, band; struct ieee80211_hw *hw; struct cw1200_common *priv; hw = ieee80211_alloc_hw(sizeof(struct cw1200_common), &cw1200_ops); if (!hw) return NULL; priv = hw->priv; priv->hw = hw; priv->hw_type = -1; priv->mode = NL80211_IFTYPE_UNSPECIFIED; priv->rates = cw1200_rates; /* TODO: fetch from FW */ priv->mcs_rates = cw1200_n_rates; if (cw1200_ba_rx_tids != -1) priv->ba_rx_tid_mask = cw1200_ba_rx_tids; else priv->ba_rx_tid_mask = 0xFF; /* Enable RX BLKACK for all TIDs */ if (cw1200_ba_tx_tids != -1) priv->ba_tx_tid_mask = cw1200_ba_tx_tids; else priv->ba_tx_tid_mask = 0xff; /* Enable TX BLKACK for all TIDs */ ieee80211_hw_set(hw, NEED_DTIM_BEFORE_ASSOC); ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW); ieee80211_hw_set(hw, AMPDU_AGGREGATION); ieee80211_hw_set(hw, CONNECTION_MONITOR); ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS); ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS); ieee80211_hw_set(hw, SIGNAL_DBM); ieee80211_hw_set(hw, SUPPORTS_PS); hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_MESH_POINT) | BIT(NL80211_IFTYPE_P2P_CLIENT) | BIT(NL80211_IFTYPE_P2P_GO); #ifdef CONFIG_PM hw->wiphy->wowlan = &cw1200_wowlan_support; #endif hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD; hw->queues = 4; priv->rts_threshold = -1; hw->max_rates = 8; hw->max_rate_tries = 15; hw->extra_tx_headroom = WSM_TX_EXTRA_HEADROOM + 8; /* TKIP IV */ hw->sta_data_size = sizeof(struct cw1200_sta_priv); hw->wiphy->bands[NL80211_BAND_2GHZ] = &cw1200_band_2ghz; if (have_5ghz) hw->wiphy->bands[NL80211_BAND_5GHZ] = &cw1200_band_5ghz; /* Channel params have to be cleared before registering wiphy again */ for (band = 0; band < NUM_NL80211_BANDS; band++) { struct ieee80211_supported_band *sband = hw->wiphy->bands[band]; if (!sband) continue; for (i = 0; i < sband->n_channels; i++) { sband->channels[i].flags = 0; sband->channels[i].max_antenna_gain = 0; sband->channels[i].max_power = 30; } } hw->wiphy->max_scan_ssids = 2; hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN; if (macaddr) SET_IEEE80211_PERM_ADDR(hw, (u8 *)macaddr); else SET_IEEE80211_PERM_ADDR(hw, cw1200_mac_template); /* Fix up mac address if necessary */ if (hw->wiphy->perm_addr[3] == 0 && hw->wiphy->perm_addr[4] == 0 && hw->wiphy->perm_addr[5] == 0) { get_random_bytes(&hw->wiphy->perm_addr[3], 3); } mutex_init(&priv->wsm_cmd_mux); mutex_init(&priv->conf_mutex); priv->workqueue = create_singlethread_workqueue("cw1200_wq"); if (!priv->workqueue) { ieee80211_free_hw(hw); return NULL; } sema_init(&priv->scan.lock, 1); INIT_WORK(&priv->scan.work, cw1200_scan_work); INIT_DELAYED_WORK(&priv->scan.probe_work, cw1200_probe_work); INIT_DELAYED_WORK(&priv->scan.timeout, cw1200_scan_timeout); INIT_DELAYED_WORK(&priv->clear_recent_scan_work, cw1200_clear_recent_scan_work); INIT_DELAYED_WORK(&priv->join_timeout, cw1200_join_timeout); INIT_WORK(&priv->unjoin_work, cw1200_unjoin_work); INIT_WORK(&priv->join_complete_work, cw1200_join_complete_work); INIT_WORK(&priv->wep_key_work, cw1200_wep_key_work); INIT_WORK(&priv->tx_policy_upload_work, tx_policy_upload_work); spin_lock_init(&priv->event_queue_lock); INIT_LIST_HEAD(&priv->event_queue); INIT_WORK(&priv->event_handler, cw1200_event_handler); INIT_DELAYED_WORK(&priv->bss_loss_work, cw1200_bss_loss_work); INIT_WORK(&priv->bss_params_work, cw1200_bss_params_work); spin_lock_init(&priv->bss_loss_lock); spin_lock_init(&priv->ps_state_lock); INIT_WORK(&priv->set_cts_work, cw1200_set_cts_work); INIT_WORK(&priv->set_tim_work, cw1200_set_tim_work); INIT_WORK(&priv->multicast_start_work, cw1200_multicast_start_work); INIT_WORK(&priv->multicast_stop_work, cw1200_multicast_stop_work); INIT_WORK(&priv->link_id_work, cw1200_link_id_work); INIT_DELAYED_WORK(&priv->link_id_gc_work, cw1200_link_id_gc_work); INIT_WORK(&priv->linkid_reset_work, cw1200_link_id_reset); INIT_WORK(&priv->update_filtering_work, cw1200_update_filtering_work); INIT_WORK(&priv->set_beacon_wakeup_period_work, cw1200_set_beacon_wakeup_period_work); timer_setup(&priv->mcast_timeout, cw1200_mcast_timeout, 0); if (cw1200_queue_stats_init(&priv->tx_queue_stats, CW1200_LINK_ID_MAX, cw1200_skb_dtor, priv)) { destroy_workqueue(priv->workqueue); ieee80211_free_hw(hw); return NULL; } for (i = 0; i < 4; ++i) { if (cw1200_queue_init(&priv->tx_queue[i], &priv->tx_queue_stats, i, 16, cw1200_ttl[i])) { for (; i > 0; i--) cw1200_queue_deinit(&priv->tx_queue[i - 1]); cw1200_queue_stats_deinit(&priv->tx_queue_stats); destroy_workqueue(priv->workqueue); ieee80211_free_hw(hw); return NULL; } } init_waitqueue_head(&priv->channel_switch_done); init_waitqueue_head(&priv->wsm_cmd_wq); init_waitqueue_head(&priv->wsm_startup_done); init_waitqueue_head(&priv->ps_mode_switch_done); wsm_buf_init(&priv->wsm_cmd_buf); spin_lock_init(&priv->wsm_cmd.lock); priv->wsm_cmd.done = 1; tx_policy_init(priv); return hw; } static int cw1200_register_common(struct ieee80211_hw *dev) { struct cw1200_common *priv = dev->priv; int err; #ifdef CONFIG_PM err = cw1200_pm_init(&priv->pm_state, priv); if (err) { pr_err("Cannot init PM. (%d).\n", err); return err; } #endif err = ieee80211_register_hw(dev); if (err) { pr_err("Cannot register device (%d).\n", err); #ifdef CONFIG_PM cw1200_pm_deinit(&priv->pm_state); #endif return err; } cw1200_debug_init(priv); pr_info("Registered as '%s'\n", wiphy_name(dev->wiphy)); return 0; } static void cw1200_free_common(struct ieee80211_hw *dev) { ieee80211_free_hw(dev); } static void cw1200_unregister_common(struct ieee80211_hw *dev) { struct cw1200_common *priv = dev->priv; int i; ieee80211_unregister_hw(dev); del_timer_sync(&priv->mcast_timeout); cw1200_unregister_bh(priv); cw1200_debug_release(priv); mutex_destroy(&priv->conf_mutex); wsm_buf_deinit(&priv->wsm_cmd_buf); destroy_workqueue(priv->workqueue); priv->workqueue = NULL; if (priv->sdd) { release_firmware(priv->sdd); priv->sdd = NULL; } for (i = 0; i < 4; ++i) cw1200_queue_deinit(&priv->tx_queue[i]); cw1200_queue_stats_deinit(&priv->tx_queue_stats); #ifdef CONFIG_PM cw1200_pm_deinit(&priv->pm_state); #endif } /* Clock is in KHz */ u32 cw1200_dpll_from_clk(u16 clk_khz) { switch (clk_khz) { case 0x32C8: /* 13000 KHz */ return 0x1D89D241; case 0x3E80: /* 16000 KHz */ return 0x000001E1; case 0x41A0: /* 16800 KHz */ return 0x124931C1; case 0x4B00: /* 19200 KHz */ return 0x00000191; case 0x5DC0: /* 24000 KHz */ return 0x00000141; case 0x6590: /* 26000 KHz */ return 0x0EC4F121; case 0x8340: /* 33600 KHz */ return 0x092490E1; case 0x9600: /* 38400 KHz */ return 0x100010C1; case 0x9C40: /* 40000 KHz */ return 0x000000C1; case 0xBB80: /* 48000 KHz */ return 0x000000A1; case 0xCB20: /* 52000 KHz */ return 0x07627091; default: pr_err("Unknown Refclk freq (0x%04x), using 26000KHz\n", clk_khz); return 0x0EC4F121; } } int cw1200_core_probe(const struct hwbus_ops *hwbus_ops, struct hwbus_priv *hwbus, struct device *pdev, struct cw1200_common **core, int ref_clk, const u8 *macaddr, const char *sdd_path, bool have_5ghz) { int err = -EINVAL; struct ieee80211_hw *dev; struct cw1200_common *priv; struct wsm_operational_mode mode = { .power_mode = cw1200_power_mode, .disable_more_flag_usage = true, }; dev = cw1200_init_common(macaddr, have_5ghz); if (!dev) goto err; priv = dev->priv; priv->hw_refclk = ref_clk; if (cw1200_refclk) priv->hw_refclk = cw1200_refclk; priv->sdd_path = (char *)sdd_path; if (cw1200_sdd_path) priv->sdd_path = cw1200_sdd_path; priv->hwbus_ops = hwbus_ops; priv->hwbus_priv = hwbus; priv->pdev = pdev; SET_IEEE80211_DEV(priv->hw, pdev); /* Pass struct cw1200_common back up */ *core = priv; err = cw1200_register_bh(priv); if (err) goto err1; err = cw1200_load_firmware(priv); if (err) goto err2; if (wait_event_interruptible_timeout(priv->wsm_startup_done, priv->firmware_ready, 3*HZ) <= 0) { /* TODO: Need to find how to reset device in QUEUE mode properly. */ pr_err("Timeout waiting on device startup\n"); err = -ETIMEDOUT; goto err2; } /* Set low-power mode. */ wsm_set_operational_mode(priv, &mode); /* Enable multi-TX confirmation */ wsm_use_multi_tx_conf(priv, true); err = cw1200_register_common(dev); if (err) goto err2; return err; err2: cw1200_unregister_bh(priv); err1: cw1200_free_common(dev); err: *core = NULL; return err; } EXPORT_SYMBOL_GPL(cw1200_core_probe); void cw1200_core_release(struct cw1200_common *self) { /* Disable device interrupts */ self->hwbus_ops->lock(self->hwbus_priv); __cw1200_irq_enable(self, 0); self->hwbus_ops->unlock(self->hwbus_priv); /* And then clean up */ cw1200_unregister_common(self->hw); cw1200_free_common(self->hw); return; } EXPORT_SYMBOL_GPL(cw1200_core_release);
linux-master
drivers/net/wireless/st/cw1200/main.c
// SPDX-License-Identifier: GPL-2.0-only /* * Mac80211 SPI driver for ST-Ericsson CW1200 device * * Copyright (c) 2011, Sagrad Inc. * Author: Solomon Peachy <[email protected]> * * Based on cw1200_sdio.c * Copyright (c) 2010, ST-Ericsson * Author: Dmitry Tarnyagin <[email protected]> */ #include <linux/module.h> #include <linux/gpio.h> #include <linux/delay.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <net/mac80211.h> #include <linux/spi/spi.h> #include <linux/device.h> #include "cw1200.h" #include "hwbus.h" #include <linux/platform_data/net-cw1200.h> #include "hwio.h" MODULE_AUTHOR("Solomon Peachy <[email protected]>"); MODULE_DESCRIPTION("mac80211 ST-Ericsson CW1200 SPI driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("spi:cw1200_wlan_spi"); /* #define SPI_DEBUG */ struct hwbus_priv { struct spi_device *func; struct cw1200_common *core; const struct cw1200_platform_data_spi *pdata; spinlock_t lock; /* Serialize all bus operations */ wait_queue_head_t wq; int claimed; }; #define SDIO_TO_SPI_ADDR(addr) ((addr & 0x1f)>>2) #define SET_WRITE 0x7FFF /* usage: and operation */ #define SET_READ 0x8000 /* usage: or operation */ /* Notes on byte ordering: LE: B0 B1 B2 B3 BE: B3 B2 B1 B0 Hardware expects 32-bit data to be written as 16-bit BE words: B1 B0 B3 B2 */ static int cw1200_spi_memcpy_fromio(struct hwbus_priv *self, unsigned int addr, void *dst, int count) { int ret, i; u16 regaddr; struct spi_message m; struct spi_transfer t_addr = { .tx_buf = &regaddr, .len = sizeof(regaddr), }; struct spi_transfer t_msg = { .rx_buf = dst, .len = count, }; regaddr = (SDIO_TO_SPI_ADDR(addr))<<12; regaddr |= SET_READ; regaddr |= (count>>1); #ifdef SPI_DEBUG pr_info("READ : %04d from 0x%02x (%04x)\n", count, addr, regaddr); #endif /* Header is LE16 */ regaddr = cpu_to_le16(regaddr); /* We have to byteswap if the SPI bus is limited to 8b operation or we are running on a Big Endian system */ #if defined(__LITTLE_ENDIAN) if (self->func->bits_per_word == 8) #endif regaddr = swab16(regaddr); spi_message_init(&m); spi_message_add_tail(&t_addr, &m); spi_message_add_tail(&t_msg, &m); ret = spi_sync(self->func, &m); #ifdef SPI_DEBUG pr_info("READ : "); for (i = 0; i < t_addr.len; i++) printk("%02x ", ((u8 *)t_addr.tx_buf)[i]); printk(" : "); for (i = 0; i < t_msg.len; i++) printk("%02x ", ((u8 *)t_msg.rx_buf)[i]); printk("\n"); #endif /* We have to byteswap if the SPI bus is limited to 8b operation or we are running on a Big Endian system */ #if defined(__LITTLE_ENDIAN) if (self->func->bits_per_word == 8) #endif { uint16_t *buf = (uint16_t *)dst; for (i = 0; i < ((count + 1) >> 1); i++) buf[i] = swab16(buf[i]); } return ret; } static int cw1200_spi_memcpy_toio(struct hwbus_priv *self, unsigned int addr, const void *src, int count) { int rval, i; u16 regaddr; struct spi_transfer t_addr = { .tx_buf = &regaddr, .len = sizeof(regaddr), }; struct spi_transfer t_msg = { .tx_buf = src, .len = count, }; struct spi_message m; regaddr = (SDIO_TO_SPI_ADDR(addr))<<12; regaddr &= SET_WRITE; regaddr |= (count>>1); #ifdef SPI_DEBUG pr_info("WRITE: %04d to 0x%02x (%04x)\n", count, addr, regaddr); #endif /* Header is LE16 */ regaddr = cpu_to_le16(regaddr); /* We have to byteswap if the SPI bus is limited to 8b operation or we are running on a Big Endian system */ #if defined(__LITTLE_ENDIAN) if (self->func->bits_per_word == 8) #endif { uint16_t *buf = (uint16_t *)src; regaddr = swab16(regaddr); for (i = 0; i < ((count + 1) >> 1); i++) buf[i] = swab16(buf[i]); } #ifdef SPI_DEBUG pr_info("WRITE: "); for (i = 0; i < t_addr.len; i++) printk("%02x ", ((u8 *)t_addr.tx_buf)[i]); printk(" : "); for (i = 0; i < t_msg.len; i++) printk("%02x ", ((u8 *)t_msg.tx_buf)[i]); printk("\n"); #endif spi_message_init(&m); spi_message_add_tail(&t_addr, &m); spi_message_add_tail(&t_msg, &m); rval = spi_sync(self->func, &m); #ifdef SPI_DEBUG pr_info("WROTE: %d\n", m.actual_length); #endif #if defined(__LITTLE_ENDIAN) /* We have to byteswap if the SPI bus is limited to 8b operation */ if (self->func->bits_per_word == 8) #endif { uint16_t *buf = (uint16_t *)src; for (i = 0; i < ((count + 1) >> 1); i++) buf[i] = swab16(buf[i]); } return rval; } static void cw1200_spi_lock(struct hwbus_priv *self) { unsigned long flags; DECLARE_WAITQUEUE(wait, current); might_sleep(); add_wait_queue(&self->wq, &wait); spin_lock_irqsave(&self->lock, flags); while (1) { set_current_state(TASK_UNINTERRUPTIBLE); if (!self->claimed) break; spin_unlock_irqrestore(&self->lock, flags); schedule(); spin_lock_irqsave(&self->lock, flags); } set_current_state(TASK_RUNNING); self->claimed = 1; spin_unlock_irqrestore(&self->lock, flags); remove_wait_queue(&self->wq, &wait); return; } static void cw1200_spi_unlock(struct hwbus_priv *self) { unsigned long flags; spin_lock_irqsave(&self->lock, flags); self->claimed = 0; spin_unlock_irqrestore(&self->lock, flags); wake_up(&self->wq); return; } static irqreturn_t cw1200_spi_irq_handler(int irq, void *dev_id) { struct hwbus_priv *self = dev_id; if (self->core) { cw1200_spi_lock(self); cw1200_irq_handler(self->core); cw1200_spi_unlock(self); return IRQ_HANDLED; } else { return IRQ_NONE; } } static int cw1200_spi_irq_subscribe(struct hwbus_priv *self) { int ret; pr_debug("SW IRQ subscribe\n"); ret = request_threaded_irq(self->func->irq, NULL, cw1200_spi_irq_handler, IRQF_TRIGGER_HIGH | IRQF_ONESHOT, "cw1200_wlan_irq", self); if (WARN_ON(ret < 0)) goto exit; ret = enable_irq_wake(self->func->irq); if (WARN_ON(ret)) goto free_irq; return 0; free_irq: free_irq(self->func->irq, self); exit: return ret; } static void cw1200_spi_irq_unsubscribe(struct hwbus_priv *self) { pr_debug("SW IRQ unsubscribe\n"); disable_irq_wake(self->func->irq); free_irq(self->func->irq, self); } static int cw1200_spi_off(const struct cw1200_platform_data_spi *pdata) { if (pdata->reset) { gpio_set_value(pdata->reset, 0); msleep(30); /* Min is 2 * CLK32K cycles */ gpio_free(pdata->reset); } if (pdata->power_ctrl) pdata->power_ctrl(pdata, false); if (pdata->clk_ctrl) pdata->clk_ctrl(pdata, false); return 0; } static int cw1200_spi_on(const struct cw1200_platform_data_spi *pdata) { /* Ensure I/Os are pulled low */ if (pdata->reset) { gpio_request(pdata->reset, "cw1200_wlan_reset"); gpio_direction_output(pdata->reset, 0); } if (pdata->powerup) { gpio_request(pdata->powerup, "cw1200_wlan_powerup"); gpio_direction_output(pdata->powerup, 0); } if (pdata->reset || pdata->powerup) msleep(10); /* Settle time? */ /* Enable 3v3 and 1v8 to hardware */ if (pdata->power_ctrl) { if (pdata->power_ctrl(pdata, true)) { pr_err("power_ctrl() failed!\n"); return -1; } } /* Enable CLK32K */ if (pdata->clk_ctrl) { if (pdata->clk_ctrl(pdata, true)) { pr_err("clk_ctrl() failed!\n"); return -1; } msleep(10); /* Delay until clock is stable for 2 cycles */ } /* Enable POWERUP signal */ if (pdata->powerup) { gpio_set_value(pdata->powerup, 1); msleep(250); /* or more..? */ } /* Enable RSTn signal */ if (pdata->reset) { gpio_set_value(pdata->reset, 1); msleep(50); /* Or more..? */ } return 0; } static size_t cw1200_spi_align_size(struct hwbus_priv *self, size_t size) { return size & 1 ? size + 1 : size; } static int cw1200_spi_pm(struct hwbus_priv *self, bool suspend) { return irq_set_irq_wake(self->func->irq, suspend); } static const struct hwbus_ops cw1200_spi_hwbus_ops = { .hwbus_memcpy_fromio = cw1200_spi_memcpy_fromio, .hwbus_memcpy_toio = cw1200_spi_memcpy_toio, .lock = cw1200_spi_lock, .unlock = cw1200_spi_unlock, .align_size = cw1200_spi_align_size, .power_mgmt = cw1200_spi_pm, }; /* Probe Function to be called by SPI stack when device is discovered */ static int cw1200_spi_probe(struct spi_device *func) { const struct cw1200_platform_data_spi *plat_data = dev_get_platdata(&func->dev); struct hwbus_priv *self; int status; /* Sanity check speed */ if (func->max_speed_hz > 52000000) func->max_speed_hz = 52000000; if (func->max_speed_hz < 1000000) func->max_speed_hz = 1000000; /* Fix up transfer size */ if (plat_data->spi_bits_per_word) func->bits_per_word = plat_data->spi_bits_per_word; if (!func->bits_per_word) func->bits_per_word = 16; /* And finally.. */ func->mode = SPI_MODE_0; pr_info("cw1200_wlan_spi: Probe called (CS %d M %d BPW %d CLK %d)\n", spi_get_chipselect(func, 0), func->mode, func->bits_per_word, func->max_speed_hz); if (cw1200_spi_on(plat_data)) { pr_err("spi_on() failed!\n"); return -1; } if (spi_setup(func)) { pr_err("spi_setup() failed!\n"); return -1; } self = devm_kzalloc(&func->dev, sizeof(*self), GFP_KERNEL); if (!self) { pr_err("Can't allocate SPI hwbus_priv."); return -ENOMEM; } self->pdata = plat_data; self->func = func; spin_lock_init(&self->lock); spi_set_drvdata(func, self); init_waitqueue_head(&self->wq); status = cw1200_spi_irq_subscribe(self); status = cw1200_core_probe(&cw1200_spi_hwbus_ops, self, &func->dev, &self->core, self->pdata->ref_clk, self->pdata->macaddr, self->pdata->sdd_file, self->pdata->have_5ghz); if (status) { cw1200_spi_irq_unsubscribe(self); cw1200_spi_off(plat_data); } return status; } /* Disconnect Function to be called by SPI stack when device is disconnected */ static void cw1200_spi_disconnect(struct spi_device *func) { struct hwbus_priv *self = spi_get_drvdata(func); if (self) { cw1200_spi_irq_unsubscribe(self); if (self->core) { cw1200_core_release(self->core); self->core = NULL; } } cw1200_spi_off(dev_get_platdata(&func->dev)); } static int __maybe_unused cw1200_spi_suspend(struct device *dev) { struct hwbus_priv *self = spi_get_drvdata(to_spi_device(dev)); if (!cw1200_can_suspend(self->core)) return -EAGAIN; /* XXX notify host that we have to keep CW1200 powered on? */ return 0; } static SIMPLE_DEV_PM_OPS(cw1200_pm_ops, cw1200_spi_suspend, NULL); static struct spi_driver spi_driver = { .probe = cw1200_spi_probe, .remove = cw1200_spi_disconnect, .driver = { .name = "cw1200_wlan_spi", .pm = IS_ENABLED(CONFIG_PM) ? &cw1200_pm_ops : NULL, }, }; module_spi_driver(spi_driver);
linux-master
drivers/net/wireless/st/cw1200/cw1200_spi.c
// SPDX-License-Identifier: GPL-2.0-only /* * Datapath implementation for ST-Ericsson CW1200 mac80211 drivers * * Copyright (c) 2010, ST-Ericsson * Author: Dmitry Tarnyagin <[email protected]> */ #include <net/mac80211.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include "cw1200.h" #include "wsm.h" #include "bh.h" #include "sta.h" #include "debug.h" #define CW1200_INVALID_RATE_ID (0xFF) static int cw1200_handle_action_rx(struct cw1200_common *priv, struct sk_buff *skb); static const struct ieee80211_rate * cw1200_get_tx_rate(const struct cw1200_common *priv, const struct ieee80211_tx_rate *rate); /* ******************************************************************** */ /* TX queue lock / unlock */ static inline void cw1200_tx_queues_lock(struct cw1200_common *priv) { int i; for (i = 0; i < 4; ++i) cw1200_queue_lock(&priv->tx_queue[i]); } static inline void cw1200_tx_queues_unlock(struct cw1200_common *priv) { int i; for (i = 0; i < 4; ++i) cw1200_queue_unlock(&priv->tx_queue[i]); } /* ******************************************************************** */ /* TX policy cache implementation */ static void tx_policy_dump(struct tx_policy *policy) { pr_debug("[TX policy] %.1X%.1X%.1X%.1X%.1X%.1X%.1X%.1X %.1X%.1X%.1X%.1X%.1X%.1X%.1X%.1X %.1X%.1X%.1X%.1X%.1X%.1X%.1X%.1X: %d\n", policy->raw[0] & 0x0F, policy->raw[0] >> 4, policy->raw[1] & 0x0F, policy->raw[1] >> 4, policy->raw[2] & 0x0F, policy->raw[2] >> 4, policy->raw[3] & 0x0F, policy->raw[3] >> 4, policy->raw[4] & 0x0F, policy->raw[4] >> 4, policy->raw[5] & 0x0F, policy->raw[5] >> 4, policy->raw[6] & 0x0F, policy->raw[6] >> 4, policy->raw[7] & 0x0F, policy->raw[7] >> 4, policy->raw[8] & 0x0F, policy->raw[8] >> 4, policy->raw[9] & 0x0F, policy->raw[9] >> 4, policy->raw[10] & 0x0F, policy->raw[10] >> 4, policy->raw[11] & 0x0F, policy->raw[11] >> 4, policy->defined); } static void tx_policy_build(const struct cw1200_common *priv, /* [out] */ struct tx_policy *policy, struct ieee80211_tx_rate *rates, size_t count) { int i, j; unsigned limit = priv->short_frame_max_tx_count; unsigned total = 0; BUG_ON(rates[0].idx < 0); memset(policy, 0, sizeof(*policy)); /* Sort rates in descending order. */ for (i = 1; i < count; ++i) { if (rates[i].idx < 0) { count = i; break; } if (rates[i].idx > rates[i - 1].idx) { struct ieee80211_tx_rate tmp = rates[i - 1]; rates[i - 1] = rates[i]; rates[i] = tmp; } } /* Eliminate duplicates. */ total = rates[0].count; for (i = 0, j = 1; j < count; ++j) { if (rates[j].idx == rates[i].idx) { rates[i].count += rates[j].count; } else if (rates[j].idx > rates[i].idx) { break; } else { ++i; if (i != j) rates[i] = rates[j]; } total += rates[j].count; } count = i + 1; /* Re-fill policy trying to keep every requested rate and with * respect to the global max tx retransmission count. */ if (limit < count) limit = count; if (total > limit) { for (i = 0; i < count; ++i) { int left = count - i - 1; if (rates[i].count > limit - left) rates[i].count = limit - left; limit -= rates[i].count; } } /* HACK!!! Device has problems (at least) switching from * 54Mbps CTS to 1Mbps. This switch takes enormous amount * of time (100-200 ms), leading to valuable throughput drop. * As a workaround, additional g-rates are injected to the * policy. */ if (count == 2 && !(rates[0].flags & IEEE80211_TX_RC_MCS) && rates[0].idx > 4 && rates[0].count > 2 && rates[1].idx < 2) { int mid_rate = (rates[0].idx + 4) >> 1; /* Decrease number of retries for the initial rate */ rates[0].count -= 2; if (mid_rate != 4) { /* Keep fallback rate at 1Mbps. */ rates[3] = rates[1]; /* Inject 1 transmission on lowest g-rate */ rates[2].idx = 4; rates[2].count = 1; rates[2].flags = rates[1].flags; /* Inject 1 transmission on mid-rate */ rates[1].idx = mid_rate; rates[1].count = 1; /* Fallback to 1 Mbps is a really bad thing, * so let's try to increase probability of * successful transmission on the lowest g rate * even more */ if (rates[0].count >= 3) { --rates[0].count; ++rates[2].count; } /* Adjust amount of rates defined */ count += 2; } else { /* Keep fallback rate at 1Mbps. */ rates[2] = rates[1]; /* Inject 2 transmissions on lowest g-rate */ rates[1].idx = 4; rates[1].count = 2; /* Adjust amount of rates defined */ count += 1; } } policy->defined = cw1200_get_tx_rate(priv, &rates[0])->hw_value + 1; for (i = 0; i < count; ++i) { register unsigned rateid, off, shift, retries; rateid = cw1200_get_tx_rate(priv, &rates[i])->hw_value; off = rateid >> 3; /* eq. rateid / 8 */ shift = (rateid & 0x07) << 2; /* eq. (rateid % 8) * 4 */ retries = rates[i].count; if (retries > 0x0F) { rates[i].count = 0x0f; retries = 0x0F; } policy->tbl[off] |= __cpu_to_le32(retries << shift); policy->retry_count += retries; } pr_debug("[TX policy] Policy (%zu): %d:%d, %d:%d, %d:%d, %d:%d\n", count, rates[0].idx, rates[0].count, rates[1].idx, rates[1].count, rates[2].idx, rates[2].count, rates[3].idx, rates[3].count); } static inline bool tx_policy_is_equal(const struct tx_policy *wanted, const struct tx_policy *cached) { size_t count = wanted->defined >> 1; if (wanted->defined > cached->defined) return false; if (count) { if (memcmp(wanted->raw, cached->raw, count)) return false; } if (wanted->defined & 1) { if ((wanted->raw[count] & 0x0F) != (cached->raw[count] & 0x0F)) return false; } return true; } static int tx_policy_find(struct tx_policy_cache *cache, const struct tx_policy *wanted) { /* O(n) complexity. Not so good, but there's only 8 entries in * the cache. * Also lru helps to reduce search time. */ struct tx_policy_cache_entry *it; /* First search for policy in "used" list */ list_for_each_entry(it, &cache->used, link) { if (tx_policy_is_equal(wanted, &it->policy)) return it - cache->cache; } /* Then - in "free list" */ list_for_each_entry(it, &cache->free, link) { if (tx_policy_is_equal(wanted, &it->policy)) return it - cache->cache; } return -1; } static inline void tx_policy_use(struct tx_policy_cache *cache, struct tx_policy_cache_entry *entry) { ++entry->policy.usage_count; list_move(&entry->link, &cache->used); } static inline int tx_policy_release(struct tx_policy_cache *cache, struct tx_policy_cache_entry *entry) { int ret = --entry->policy.usage_count; if (!ret) list_move(&entry->link, &cache->free); return ret; } void tx_policy_clean(struct cw1200_common *priv) { int idx, locked; struct tx_policy_cache *cache = &priv->tx_policy_cache; struct tx_policy_cache_entry *entry; cw1200_tx_queues_lock(priv); spin_lock_bh(&cache->lock); locked = list_empty(&cache->free); for (idx = 0; idx < TX_POLICY_CACHE_SIZE; idx++) { entry = &cache->cache[idx]; /* Policy usage count should be 0 at this time as all queues should be empty */ if (WARN_ON(entry->policy.usage_count)) { entry->policy.usage_count = 0; list_move(&entry->link, &cache->free); } memset(&entry->policy, 0, sizeof(entry->policy)); } if (locked) cw1200_tx_queues_unlock(priv); cw1200_tx_queues_unlock(priv); spin_unlock_bh(&cache->lock); } /* ******************************************************************** */ /* External TX policy cache API */ void tx_policy_init(struct cw1200_common *priv) { struct tx_policy_cache *cache = &priv->tx_policy_cache; int i; memset(cache, 0, sizeof(*cache)); spin_lock_init(&cache->lock); INIT_LIST_HEAD(&cache->used); INIT_LIST_HEAD(&cache->free); for (i = 0; i < TX_POLICY_CACHE_SIZE; ++i) list_add(&cache->cache[i].link, &cache->free); } static int tx_policy_get(struct cw1200_common *priv, struct ieee80211_tx_rate *rates, size_t count, bool *renew) { int idx; struct tx_policy_cache *cache = &priv->tx_policy_cache; struct tx_policy wanted; tx_policy_build(priv, &wanted, rates, count); spin_lock_bh(&cache->lock); if (WARN_ON_ONCE(list_empty(&cache->free))) { spin_unlock_bh(&cache->lock); return CW1200_INVALID_RATE_ID; } idx = tx_policy_find(cache, &wanted); if (idx >= 0) { pr_debug("[TX policy] Used TX policy: %d\n", idx); *renew = false; } else { struct tx_policy_cache_entry *entry; *renew = true; /* If policy is not found create a new one * using the oldest entry in "free" list */ entry = list_entry(cache->free.prev, struct tx_policy_cache_entry, link); entry->policy = wanted; idx = entry - cache->cache; pr_debug("[TX policy] New TX policy: %d\n", idx); tx_policy_dump(&entry->policy); } tx_policy_use(cache, &cache->cache[idx]); if (list_empty(&cache->free)) { /* Lock TX queues. */ cw1200_tx_queues_lock(priv); } spin_unlock_bh(&cache->lock); return idx; } static void tx_policy_put(struct cw1200_common *priv, int idx) { int usage, locked; struct tx_policy_cache *cache = &priv->tx_policy_cache; spin_lock_bh(&cache->lock); locked = list_empty(&cache->free); usage = tx_policy_release(cache, &cache->cache[idx]); if (locked && !usage) { /* Unlock TX queues. */ cw1200_tx_queues_unlock(priv); } spin_unlock_bh(&cache->lock); } static int tx_policy_upload(struct cw1200_common *priv) { struct tx_policy_cache *cache = &priv->tx_policy_cache; int i; struct wsm_set_tx_rate_retry_policy arg = { .num = 0, }; spin_lock_bh(&cache->lock); /* Upload only modified entries. */ for (i = 0; i < TX_POLICY_CACHE_SIZE; ++i) { struct tx_policy *src = &cache->cache[i].policy; if (src->retry_count && !src->uploaded) { struct wsm_tx_rate_retry_policy *dst = &arg.tbl[arg.num]; dst->index = i; dst->short_retries = priv->short_frame_max_tx_count; dst->long_retries = priv->long_frame_max_tx_count; dst->flags = WSM_TX_RATE_POLICY_FLAG_TERMINATE_WHEN_FINISHED | WSM_TX_RATE_POLICY_FLAG_COUNT_INITIAL_TRANSMIT; memcpy(dst->rate_count_indices, src->tbl, sizeof(dst->rate_count_indices)); src->uploaded = 1; ++arg.num; } } spin_unlock_bh(&cache->lock); cw1200_debug_tx_cache_miss(priv); pr_debug("[TX policy] Upload %d policies\n", arg.num); return wsm_set_tx_rate_retry_policy(priv, &arg); } void tx_policy_upload_work(struct work_struct *work) { struct cw1200_common *priv = container_of(work, struct cw1200_common, tx_policy_upload_work); pr_debug("[TX] TX policy upload.\n"); tx_policy_upload(priv); wsm_unlock_tx(priv); cw1200_tx_queues_unlock(priv); } /* ******************************************************************** */ /* cw1200 TX implementation */ struct cw1200_txinfo { struct sk_buff *skb; unsigned queue; struct ieee80211_tx_info *tx_info; const struct ieee80211_rate *rate; struct ieee80211_hdr *hdr; size_t hdrlen; const u8 *da; struct cw1200_sta_priv *sta_priv; struct ieee80211_sta *sta; struct cw1200_txpriv txpriv; }; u32 cw1200_rate_mask_to_wsm(struct cw1200_common *priv, u32 rates) { u32 ret = 0; int i; for (i = 0; i < 32; ++i) { if (rates & BIT(i)) ret |= BIT(priv->rates[i].hw_value); } return ret; } static const struct ieee80211_rate * cw1200_get_tx_rate(const struct cw1200_common *priv, const struct ieee80211_tx_rate *rate) { if (rate->idx < 0) return NULL; if (rate->flags & IEEE80211_TX_RC_MCS) return &priv->mcs_rates[rate->idx]; return &priv->hw->wiphy->bands[priv->channel->band]-> bitrates[rate->idx]; } static int cw1200_tx_h_calc_link_ids(struct cw1200_common *priv, struct cw1200_txinfo *t) { if (t->sta && t->sta_priv->link_id) t->txpriv.raw_link_id = t->txpriv.link_id = t->sta_priv->link_id; else if (priv->mode != NL80211_IFTYPE_AP) t->txpriv.raw_link_id = t->txpriv.link_id = 0; else if (is_multicast_ether_addr(t->da)) { if (priv->enable_beacon) { t->txpriv.raw_link_id = 0; t->txpriv.link_id = CW1200_LINK_ID_AFTER_DTIM; } else { t->txpriv.raw_link_id = 0; t->txpriv.link_id = 0; } } else { t->txpriv.link_id = cw1200_find_link_id(priv, t->da); if (!t->txpriv.link_id) t->txpriv.link_id = cw1200_alloc_link_id(priv, t->da); if (!t->txpriv.link_id) { wiphy_err(priv->hw->wiphy, "No more link IDs available.\n"); return -ENOENT; } t->txpriv.raw_link_id = t->txpriv.link_id; } if (t->txpriv.raw_link_id) priv->link_id_db[t->txpriv.raw_link_id - 1].timestamp = jiffies; if (t->sta && (t->sta->uapsd_queues & BIT(t->queue))) t->txpriv.link_id = CW1200_LINK_ID_UAPSD; return 0; } static void cw1200_tx_h_pm(struct cw1200_common *priv, struct cw1200_txinfo *t) { if (ieee80211_is_auth(t->hdr->frame_control)) { u32 mask = ~BIT(t->txpriv.raw_link_id); spin_lock_bh(&priv->ps_state_lock); priv->sta_asleep_mask &= mask; priv->pspoll_mask &= mask; spin_unlock_bh(&priv->ps_state_lock); } } static void cw1200_tx_h_calc_tid(struct cw1200_common *priv, struct cw1200_txinfo *t) { if (ieee80211_is_data_qos(t->hdr->frame_control)) { u8 *qos = ieee80211_get_qos_ctl(t->hdr); t->txpriv.tid = qos[0] & IEEE80211_QOS_CTL_TID_MASK; } else if (ieee80211_is_data(t->hdr->frame_control)) { t->txpriv.tid = 0; } } static int cw1200_tx_h_crypt(struct cw1200_common *priv, struct cw1200_txinfo *t) { if (!t->tx_info->control.hw_key || !ieee80211_has_protected(t->hdr->frame_control)) return 0; t->hdrlen += t->tx_info->control.hw_key->iv_len; skb_put(t->skb, t->tx_info->control.hw_key->icv_len); if (t->tx_info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) skb_put(t->skb, 8); /* MIC space */ return 0; } static int cw1200_tx_h_align(struct cw1200_common *priv, struct cw1200_txinfo *t, u8 *flags) { size_t offset = (size_t)t->skb->data & 3; if (!offset) return 0; if (offset & 1) { wiphy_err(priv->hw->wiphy, "Bug: attempt to transmit a frame with wrong alignment: %zu\n", offset); return -EINVAL; } if (skb_headroom(t->skb) < offset) { wiphy_err(priv->hw->wiphy, "Bug: no space allocated for DMA alignment. headroom: %d\n", skb_headroom(t->skb)); return -ENOMEM; } skb_push(t->skb, offset); t->hdrlen += offset; t->txpriv.offset += offset; *flags |= WSM_TX_2BYTES_SHIFT; cw1200_debug_tx_align(priv); return 0; } static int cw1200_tx_h_action(struct cw1200_common *priv, struct cw1200_txinfo *t) { struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)t->hdr; if (ieee80211_is_action(t->hdr->frame_control) && mgmt->u.action.category == WLAN_CATEGORY_BACK) return 1; else return 0; } /* Add WSM header */ static struct wsm_tx * cw1200_tx_h_wsm(struct cw1200_common *priv, struct cw1200_txinfo *t) { struct wsm_tx *wsm; if (skb_headroom(t->skb) < sizeof(struct wsm_tx)) { wiphy_err(priv->hw->wiphy, "Bug: no space allocated for WSM header. headroom: %d\n", skb_headroom(t->skb)); return NULL; } wsm = skb_push(t->skb, sizeof(struct wsm_tx)); t->txpriv.offset += sizeof(struct wsm_tx); memset(wsm, 0, sizeof(*wsm)); wsm->hdr.len = __cpu_to_le16(t->skb->len); wsm->hdr.id = __cpu_to_le16(0x0004); wsm->queue_id = wsm_queue_id_to_wsm(t->queue); return wsm; } /* BT Coex specific handling */ static void cw1200_tx_h_bt(struct cw1200_common *priv, struct cw1200_txinfo *t, struct wsm_tx *wsm) { u8 priority = 0; if (!priv->bt_present) return; if (ieee80211_is_nullfunc(t->hdr->frame_control)) { priority = WSM_EPTA_PRIORITY_MGT; } else if (ieee80211_is_data(t->hdr->frame_control)) { /* Skip LLC SNAP header (+6) */ u8 *payload = &t->skb->data[t->hdrlen]; __be16 *ethertype = (__be16 *)&payload[6]; if (be16_to_cpu(*ethertype) == ETH_P_PAE) priority = WSM_EPTA_PRIORITY_EAPOL; } else if (ieee80211_is_assoc_req(t->hdr->frame_control) || ieee80211_is_reassoc_req(t->hdr->frame_control)) { struct ieee80211_mgmt *mgt_frame = (struct ieee80211_mgmt *)t->hdr; if (le16_to_cpu(mgt_frame->u.assoc_req.listen_interval) < priv->listen_interval) { pr_debug("Modified Listen Interval to %d from %d\n", priv->listen_interval, mgt_frame->u.assoc_req.listen_interval); /* Replace listen interval derieved from * the one read from SDD */ mgt_frame->u.assoc_req.listen_interval = cpu_to_le16(priv->listen_interval); } } if (!priority) { if (ieee80211_is_action(t->hdr->frame_control)) priority = WSM_EPTA_PRIORITY_ACTION; else if (ieee80211_is_mgmt(t->hdr->frame_control)) priority = WSM_EPTA_PRIORITY_MGT; else if (wsm->queue_id == WSM_QUEUE_VOICE) priority = WSM_EPTA_PRIORITY_VOICE; else if (wsm->queue_id == WSM_QUEUE_VIDEO) priority = WSM_EPTA_PRIORITY_VIDEO; else priority = WSM_EPTA_PRIORITY_DATA; } pr_debug("[TX] EPTA priority %d.\n", priority); wsm->flags |= priority << 1; } static int cw1200_tx_h_rate_policy(struct cw1200_common *priv, struct cw1200_txinfo *t, struct wsm_tx *wsm) { bool tx_policy_renew = false; t->txpriv.rate_id = tx_policy_get(priv, t->tx_info->control.rates, IEEE80211_TX_MAX_RATES, &tx_policy_renew); if (t->txpriv.rate_id == CW1200_INVALID_RATE_ID) return -EFAULT; wsm->flags |= t->txpriv.rate_id << 4; t->rate = cw1200_get_tx_rate(priv, &t->tx_info->control.rates[0]); wsm->max_tx_rate = t->rate->hw_value; if (t->rate->flags & IEEE80211_TX_RC_MCS) { if (cw1200_ht_greenfield(&priv->ht_info)) wsm->ht_tx_parameters |= __cpu_to_le32(WSM_HT_TX_GREENFIELD); else wsm->ht_tx_parameters |= __cpu_to_le32(WSM_HT_TX_MIXED); } if (tx_policy_renew) { pr_debug("[TX] TX policy renew.\n"); /* It's not so optimal to stop TX queues every now and then. * Better to reimplement task scheduling with * a counter. TODO. */ wsm_lock_tx_async(priv); cw1200_tx_queues_lock(priv); if (queue_work(priv->workqueue, &priv->tx_policy_upload_work) <= 0) { cw1200_tx_queues_unlock(priv); wsm_unlock_tx(priv); } } return 0; } static bool cw1200_tx_h_pm_state(struct cw1200_common *priv, struct cw1200_txinfo *t) { int was_buffered = 1; if (t->txpriv.link_id == CW1200_LINK_ID_AFTER_DTIM && !priv->buffered_multicasts) { priv->buffered_multicasts = true; if (priv->sta_asleep_mask) queue_work(priv->workqueue, &priv->multicast_start_work); } if (t->txpriv.raw_link_id && t->txpriv.tid < CW1200_MAX_TID) was_buffered = priv->link_id_db[t->txpriv.raw_link_id - 1].buffered[t->txpriv.tid]++; return !was_buffered; } /* ******************************************************************** */ void cw1200_tx(struct ieee80211_hw *dev, struct ieee80211_tx_control *control, struct sk_buff *skb) { struct cw1200_common *priv = dev->priv; struct cw1200_txinfo t = { .skb = skb, .queue = skb_get_queue_mapping(skb), .tx_info = IEEE80211_SKB_CB(skb), .hdr = (struct ieee80211_hdr *)skb->data, .txpriv.tid = CW1200_MAX_TID, .txpriv.rate_id = CW1200_INVALID_RATE_ID, }; struct ieee80211_sta *sta; struct wsm_tx *wsm; bool tid_update = false; u8 flags = 0; int ret; if (priv->bh_error) goto drop; t.hdrlen = ieee80211_hdrlen(t.hdr->frame_control); t.da = ieee80211_get_DA(t.hdr); if (control) { t.sta = control->sta; t.sta_priv = (struct cw1200_sta_priv *)&t.sta->drv_priv; } if (WARN_ON(t.queue >= 4)) goto drop; ret = cw1200_tx_h_calc_link_ids(priv, &t); if (ret) goto drop; pr_debug("[TX] TX %d bytes (queue: %d, link_id: %d (%d)).\n", skb->len, t.queue, t.txpriv.link_id, t.txpriv.raw_link_id); cw1200_tx_h_pm(priv, &t); cw1200_tx_h_calc_tid(priv, &t); ret = cw1200_tx_h_crypt(priv, &t); if (ret) goto drop; ret = cw1200_tx_h_align(priv, &t, &flags); if (ret) goto drop; ret = cw1200_tx_h_action(priv, &t); if (ret) goto drop; wsm = cw1200_tx_h_wsm(priv, &t); if (!wsm) { ret = -ENOMEM; goto drop; } wsm->flags |= flags; cw1200_tx_h_bt(priv, &t, wsm); ret = cw1200_tx_h_rate_policy(priv, &t, wsm); if (ret) goto drop; sta = t.sta; spin_lock_bh(&priv->ps_state_lock); { tid_update = cw1200_tx_h_pm_state(priv, &t); BUG_ON(cw1200_queue_put(&priv->tx_queue[t.queue], t.skb, &t.txpriv)); } spin_unlock_bh(&priv->ps_state_lock); if (tid_update && sta) ieee80211_sta_set_buffered(sta, t.txpriv.tid, true); cw1200_bh_wakeup(priv); return; drop: cw1200_skb_dtor(priv, skb, &t.txpriv); return; } /* ******************************************************************** */ static int cw1200_handle_action_rx(struct cw1200_common *priv, struct sk_buff *skb) { struct ieee80211_mgmt *mgmt = (void *)skb->data; /* Filter block ACK negotiation: fully controlled by firmware */ if (mgmt->u.action.category == WLAN_CATEGORY_BACK) return 1; return 0; } static int cw1200_handle_pspoll(struct cw1200_common *priv, struct sk_buff *skb) { struct ieee80211_sta *sta; struct ieee80211_pspoll *pspoll = (struct ieee80211_pspoll *)skb->data; int link_id = 0; u32 pspoll_mask = 0; int drop = 1; int i; if (priv->join_status != CW1200_JOIN_STATUS_AP) goto done; if (memcmp(priv->vif->addr, pspoll->bssid, ETH_ALEN)) goto done; rcu_read_lock(); sta = ieee80211_find_sta(priv->vif, pspoll->ta); if (sta) { struct cw1200_sta_priv *sta_priv; sta_priv = (struct cw1200_sta_priv *)&sta->drv_priv; link_id = sta_priv->link_id; pspoll_mask = BIT(sta_priv->link_id); } rcu_read_unlock(); if (!link_id) goto done; priv->pspoll_mask |= pspoll_mask; drop = 0; /* Do not report pspols if data for given link id is queued already. */ for (i = 0; i < 4; ++i) { if (cw1200_queue_get_num_queued(&priv->tx_queue[i], pspoll_mask)) { cw1200_bh_wakeup(priv); drop = 1; break; } } pr_debug("[RX] PSPOLL: %s\n", drop ? "local" : "fwd"); done: return drop; } /* ******************************************************************** */ void cw1200_tx_confirm_cb(struct cw1200_common *priv, int link_id, struct wsm_tx_confirm *arg) { u8 queue_id = cw1200_queue_get_queue_id(arg->packet_id); struct cw1200_queue *queue = &priv->tx_queue[queue_id]; struct sk_buff *skb; const struct cw1200_txpriv *txpriv; pr_debug("[TX] TX confirm: %d, %d.\n", arg->status, arg->ack_failures); if (priv->mode == NL80211_IFTYPE_UNSPECIFIED) { /* STA is stopped. */ return; } if (WARN_ON(queue_id >= 4)) return; if (arg->status) pr_debug("TX failed: %d.\n", arg->status); if ((arg->status == WSM_REQUEUE) && (arg->flags & WSM_TX_STATUS_REQUEUE)) { /* "Requeue" means "implicit suspend" */ struct wsm_suspend_resume suspend = { .link_id = link_id, .stop = 1, .multicast = !link_id, }; cw1200_suspend_resume(priv, &suspend); wiphy_warn(priv->hw->wiphy, "Requeue for link_id %d (try %d). STAs asleep: 0x%.8X\n", link_id, cw1200_queue_get_generation(arg->packet_id) + 1, priv->sta_asleep_mask); cw1200_queue_requeue(queue, arg->packet_id); spin_lock_bh(&priv->ps_state_lock); if (!link_id) { priv->buffered_multicasts = true; if (priv->sta_asleep_mask) { queue_work(priv->workqueue, &priv->multicast_start_work); } } spin_unlock_bh(&priv->ps_state_lock); } else if (!cw1200_queue_get_skb(queue, arg->packet_id, &skb, &txpriv)) { struct ieee80211_tx_info *tx = IEEE80211_SKB_CB(skb); int tx_count = arg->ack_failures; u8 ht_flags = 0; int i; if (cw1200_ht_greenfield(&priv->ht_info)) ht_flags |= IEEE80211_TX_RC_GREEN_FIELD; spin_lock(&priv->bss_loss_lock); if (priv->bss_loss_state && arg->packet_id == priv->bss_loss_confirm_id) { if (arg->status) { /* Recovery failed */ __cw1200_cqm_bssloss_sm(priv, 0, 0, 1); } else { /* Recovery succeeded */ __cw1200_cqm_bssloss_sm(priv, 0, 1, 0); } } spin_unlock(&priv->bss_loss_lock); if (!arg->status) { tx->flags |= IEEE80211_TX_STAT_ACK; ++tx_count; cw1200_debug_txed(priv); if (arg->flags & WSM_TX_STATUS_AGGREGATION) { /* Do not report aggregation to mac80211: * it confuses minstrel a lot. */ /* tx->flags |= IEEE80211_TX_STAT_AMPDU; */ cw1200_debug_txed_agg(priv); } } else { if (tx_count) ++tx_count; } for (i = 0; i < IEEE80211_TX_MAX_RATES; ++i) { if (tx->status.rates[i].count >= tx_count) { tx->status.rates[i].count = tx_count; break; } tx_count -= tx->status.rates[i].count; if (tx->status.rates[i].flags & IEEE80211_TX_RC_MCS) tx->status.rates[i].flags |= ht_flags; } for (++i; i < IEEE80211_TX_MAX_RATES; ++i) { tx->status.rates[i].count = 0; tx->status.rates[i].idx = -1; } /* Pull off any crypto trailers that we added on */ if (tx->control.hw_key) { skb_trim(skb, skb->len - tx->control.hw_key->icv_len); if (tx->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) skb_trim(skb, skb->len - 8); /* MIC space */ } cw1200_queue_remove(queue, arg->packet_id); } /* XXX TODO: Only wake if there are pending transmits.. */ cw1200_bh_wakeup(priv); } static void cw1200_notify_buffered_tx(struct cw1200_common *priv, struct sk_buff *skb, int link_id, int tid) { struct ieee80211_sta *sta; struct ieee80211_hdr *hdr; u8 *buffered; u8 still_buffered = 0; if (link_id && tid < CW1200_MAX_TID) { buffered = priv->link_id_db [link_id - 1].buffered; spin_lock_bh(&priv->ps_state_lock); if (!WARN_ON(!buffered[tid])) still_buffered = --buffered[tid]; spin_unlock_bh(&priv->ps_state_lock); if (!still_buffered && tid < CW1200_MAX_TID) { hdr = (struct ieee80211_hdr *)skb->data; rcu_read_lock(); sta = ieee80211_find_sta(priv->vif, hdr->addr1); if (sta) ieee80211_sta_set_buffered(sta, tid, false); rcu_read_unlock(); } } } void cw1200_skb_dtor(struct cw1200_common *priv, struct sk_buff *skb, const struct cw1200_txpriv *txpriv) { skb_pull(skb, txpriv->offset); if (txpriv->rate_id != CW1200_INVALID_RATE_ID) { cw1200_notify_buffered_tx(priv, skb, txpriv->raw_link_id, txpriv->tid); tx_policy_put(priv, txpriv->rate_id); } ieee80211_tx_status(priv->hw, skb); } void cw1200_rx_cb(struct cw1200_common *priv, struct wsm_rx *arg, int link_id, struct sk_buff **skb_p) { struct sk_buff *skb = *skb_p; struct ieee80211_rx_status *hdr = IEEE80211_SKB_RXCB(skb); struct ieee80211_hdr *frame = (struct ieee80211_hdr *)skb->data; struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; struct cw1200_link_entry *entry = NULL; unsigned long grace_period; bool early_data = false; bool p2p = priv->vif && priv->vif->p2p; size_t hdrlen; hdr->flag = 0; if (priv->mode == NL80211_IFTYPE_UNSPECIFIED) { /* STA is stopped. */ goto drop; } if (link_id && link_id <= CW1200_MAX_STA_IN_AP_MODE) { entry = &priv->link_id_db[link_id - 1]; if (entry->status == CW1200_LINK_SOFT && ieee80211_is_data(frame->frame_control)) early_data = true; entry->timestamp = jiffies; } else if (p2p && ieee80211_is_action(frame->frame_control) && (mgmt->u.action.category == WLAN_CATEGORY_PUBLIC)) { pr_debug("[RX] Going to MAP&RESET link ID\n"); WARN_ON(work_pending(&priv->linkid_reset_work)); memcpy(&priv->action_frame_sa[0], ieee80211_get_SA(frame), ETH_ALEN); priv->action_linkid = 0; schedule_work(&priv->linkid_reset_work); } if (link_id && p2p && ieee80211_is_action(frame->frame_control) && (mgmt->u.action.category == WLAN_CATEGORY_PUBLIC)) { /* Link ID already exists for the ACTION frame. * Reset and Remap */ WARN_ON(work_pending(&priv->linkid_reset_work)); memcpy(&priv->action_frame_sa[0], ieee80211_get_SA(frame), ETH_ALEN); priv->action_linkid = link_id; schedule_work(&priv->linkid_reset_work); } if (arg->status) { if (arg->status == WSM_STATUS_MICFAILURE) { pr_debug("[RX] MIC failure.\n"); hdr->flag |= RX_FLAG_MMIC_ERROR; } else if (arg->status == WSM_STATUS_NO_KEY_FOUND) { pr_debug("[RX] No key found.\n"); goto drop; } else { pr_debug("[RX] Receive failure: %d.\n", arg->status); goto drop; } } if (skb->len < sizeof(struct ieee80211_pspoll)) { wiphy_warn(priv->hw->wiphy, "Malformed SDU rx'ed. Size is lesser than IEEE header.\n"); goto drop; } if (ieee80211_is_pspoll(frame->frame_control)) if (cw1200_handle_pspoll(priv, skb)) goto drop; hdr->band = ((arg->channel_number & 0xff00) || (arg->channel_number > 14)) ? NL80211_BAND_5GHZ : NL80211_BAND_2GHZ; hdr->freq = ieee80211_channel_to_frequency( arg->channel_number, hdr->band); if (arg->rx_rate >= 14) { hdr->encoding = RX_ENC_HT; hdr->rate_idx = arg->rx_rate - 14; } else if (arg->rx_rate >= 4) { hdr->rate_idx = arg->rx_rate - 2; } else { hdr->rate_idx = arg->rx_rate; } hdr->signal = (s8)arg->rcpi_rssi; hdr->antenna = 0; hdrlen = ieee80211_hdrlen(frame->frame_control); if (WSM_RX_STATUS_ENCRYPTION(arg->flags)) { size_t iv_len = 0, icv_len = 0; hdr->flag |= RX_FLAG_DECRYPTED | RX_FLAG_IV_STRIPPED; /* Oops... There is no fast way to ask mac80211 about * IV/ICV lengths. Even defineas are not exposed. */ switch (WSM_RX_STATUS_ENCRYPTION(arg->flags)) { case WSM_RX_STATUS_WEP: iv_len = 4 /* WEP_IV_LEN */; icv_len = 4 /* WEP_ICV_LEN */; break; case WSM_RX_STATUS_TKIP: iv_len = 8 /* TKIP_IV_LEN */; icv_len = 4 /* TKIP_ICV_LEN */ + 8 /*MICHAEL_MIC_LEN*/; hdr->flag |= RX_FLAG_MMIC_STRIPPED; break; case WSM_RX_STATUS_AES: iv_len = 8 /* CCMP_HDR_LEN */; icv_len = 8 /* CCMP_MIC_LEN */; break; case WSM_RX_STATUS_WAPI: iv_len = 18 /* WAPI_HDR_LEN */; icv_len = 16 /* WAPI_MIC_LEN */; break; default: pr_warn("Unknown encryption type %d\n", WSM_RX_STATUS_ENCRYPTION(arg->flags)); goto drop; } /* Firmware strips ICV in case of MIC failure. */ if (arg->status == WSM_STATUS_MICFAILURE) icv_len = 0; if (skb->len < hdrlen + iv_len + icv_len) { wiphy_warn(priv->hw->wiphy, "Malformed SDU rx'ed. Size is lesser than crypto headers.\n"); goto drop; } /* Remove IV, ICV and MIC */ skb_trim(skb, skb->len - icv_len); memmove(skb->data + iv_len, skb->data, hdrlen); skb_pull(skb, iv_len); } /* Remove TSF from the end of frame */ if (arg->flags & WSM_RX_STATUS_TSF_INCLUDED) { hdr->mactime = get_unaligned_le64(skb->data + skb->len - 8); if (skb->len >= 8) skb_trim(skb, skb->len - 8); } else { hdr->mactime = 0; } cw1200_debug_rxed(priv); if (arg->flags & WSM_RX_STATUS_AGGREGATE) cw1200_debug_rxed_agg(priv); if (ieee80211_is_action(frame->frame_control) && (arg->flags & WSM_RX_STATUS_ADDRESS1)) { if (cw1200_handle_action_rx(priv, skb)) return; } else if (ieee80211_is_beacon(frame->frame_control) && !arg->status && priv->vif && ether_addr_equal(ieee80211_get_SA(frame), priv->vif->bss_conf.bssid)) { const u8 *tim_ie; u8 *ies = ((struct ieee80211_mgmt *) (skb->data))->u.beacon.variable; size_t ies_len = skb->len - (ies - (u8 *)(skb->data)); tim_ie = cfg80211_find_ie(WLAN_EID_TIM, ies, ies_len); if (tim_ie) { struct ieee80211_tim_ie *tim = (struct ieee80211_tim_ie *)&tim_ie[2]; if (priv->join_dtim_period != tim->dtim_period) { priv->join_dtim_period = tim->dtim_period; queue_work(priv->workqueue, &priv->set_beacon_wakeup_period_work); } } /* Disable beacon filter once we're associated... */ if (priv->disable_beacon_filter && (priv->vif->cfg.assoc || priv->vif->cfg.ibss_joined)) { priv->disable_beacon_filter = false; queue_work(priv->workqueue, &priv->update_filtering_work); } } /* Stay awake after frame is received to give * userspace chance to react and acquire appropriate * wakelock. */ if (ieee80211_is_auth(frame->frame_control)) grace_period = 5 * HZ; else if (ieee80211_is_deauth(frame->frame_control)) grace_period = 5 * HZ; else grace_period = 1 * HZ; cw1200_pm_stay_awake(&priv->pm_state, grace_period); if (early_data) { spin_lock_bh(&priv->ps_state_lock); /* Double-check status with lock held */ if (entry->status == CW1200_LINK_SOFT) skb_queue_tail(&entry->rx_queue, skb); else ieee80211_rx_irqsafe(priv->hw, skb); spin_unlock_bh(&priv->ps_state_lock); } else { ieee80211_rx_irqsafe(priv->hw, skb); } *skb_p = NULL; return; drop: /* TODO: update failure counters */ return; } /* ******************************************************************** */ /* Security */ int cw1200_alloc_key(struct cw1200_common *priv) { int idx; idx = ffs(~priv->key_map) - 1; if (idx < 0 || idx > WSM_KEY_MAX_INDEX) return -1; priv->key_map |= BIT(idx); priv->keys[idx].index = idx; return idx; } void cw1200_free_key(struct cw1200_common *priv, int idx) { BUG_ON(!(priv->key_map & BIT(idx))); memset(&priv->keys[idx], 0, sizeof(priv->keys[idx])); priv->key_map &= ~BIT(idx); } void cw1200_free_keys(struct cw1200_common *priv) { memset(&priv->keys, 0, sizeof(priv->keys)); priv->key_map = 0; } int cw1200_upload_keys(struct cw1200_common *priv) { int idx, ret = 0; for (idx = 0; idx <= WSM_KEY_MAX_INDEX; ++idx) if (priv->key_map & BIT(idx)) { ret = wsm_add_key(priv, &priv->keys[idx]); if (ret < 0) break; } return ret; } /* Workaround for WFD test case 6.1.10 */ void cw1200_link_id_reset(struct work_struct *work) { struct cw1200_common *priv = container_of(work, struct cw1200_common, linkid_reset_work); int temp_linkid; if (!priv->action_linkid) { /* In GO mode we can receive ACTION frames without a linkID */ temp_linkid = cw1200_alloc_link_id(priv, &priv->action_frame_sa[0]); WARN_ON(!temp_linkid); if (temp_linkid) { /* Make sure we execute the WQ */ flush_workqueue(priv->workqueue); /* Release the link ID */ spin_lock_bh(&priv->ps_state_lock); priv->link_id_db[temp_linkid - 1].prev_status = priv->link_id_db[temp_linkid - 1].status; priv->link_id_db[temp_linkid - 1].status = CW1200_LINK_RESET; spin_unlock_bh(&priv->ps_state_lock); wsm_lock_tx_async(priv); if (queue_work(priv->workqueue, &priv->link_id_work) <= 0) wsm_unlock_tx(priv); } } else { spin_lock_bh(&priv->ps_state_lock); priv->link_id_db[priv->action_linkid - 1].prev_status = priv->link_id_db[priv->action_linkid - 1].status; priv->link_id_db[priv->action_linkid - 1].status = CW1200_LINK_RESET_REMAP; spin_unlock_bh(&priv->ps_state_lock); wsm_lock_tx_async(priv); if (queue_work(priv->workqueue, &priv->link_id_work) <= 0) wsm_unlock_tx(priv); flush_workqueue(priv->workqueue); } } int cw1200_find_link_id(struct cw1200_common *priv, const u8 *mac) { int i, ret = 0; spin_lock_bh(&priv->ps_state_lock); for (i = 0; i < CW1200_MAX_STA_IN_AP_MODE; ++i) { if (!memcmp(mac, priv->link_id_db[i].mac, ETH_ALEN) && priv->link_id_db[i].status) { priv->link_id_db[i].timestamp = jiffies; ret = i + 1; break; } } spin_unlock_bh(&priv->ps_state_lock); return ret; } int cw1200_alloc_link_id(struct cw1200_common *priv, const u8 *mac) { int i, ret = 0; unsigned long max_inactivity = 0; unsigned long now = jiffies; spin_lock_bh(&priv->ps_state_lock); for (i = 0; i < CW1200_MAX_STA_IN_AP_MODE; ++i) { if (!priv->link_id_db[i].status) { ret = i + 1; break; } else if (priv->link_id_db[i].status != CW1200_LINK_HARD && !priv->tx_queue_stats.link_map_cache[i + 1]) { unsigned long inactivity = now - priv->link_id_db[i].timestamp; if (inactivity < max_inactivity) continue; max_inactivity = inactivity; ret = i + 1; } } if (ret) { struct cw1200_link_entry *entry = &priv->link_id_db[ret - 1]; pr_debug("[AP] STA added, link_id: %d\n", ret); entry->status = CW1200_LINK_RESERVE; memcpy(&entry->mac, mac, ETH_ALEN); memset(&entry->buffered, 0, CW1200_MAX_TID); skb_queue_head_init(&entry->rx_queue); wsm_lock_tx_async(priv); if (queue_work(priv->workqueue, &priv->link_id_work) <= 0) wsm_unlock_tx(priv); } else { wiphy_info(priv->hw->wiphy, "[AP] Early: no more link IDs available.\n"); } spin_unlock_bh(&priv->ps_state_lock); return ret; } void cw1200_link_id_work(struct work_struct *work) { struct cw1200_common *priv = container_of(work, struct cw1200_common, link_id_work); wsm_flush_tx(priv); cw1200_link_id_gc_work(&priv->link_id_gc_work.work); wsm_unlock_tx(priv); } void cw1200_link_id_gc_work(struct work_struct *work) { struct cw1200_common *priv = container_of(work, struct cw1200_common, link_id_gc_work.work); struct wsm_reset reset = { .reset_statistics = false, }; struct wsm_map_link map_link = { .link_id = 0, }; unsigned long now = jiffies; unsigned long next_gc = -1; long ttl; bool need_reset; u32 mask; int i; if (priv->join_status != CW1200_JOIN_STATUS_AP) return; wsm_lock_tx(priv); spin_lock_bh(&priv->ps_state_lock); for (i = 0; i < CW1200_MAX_STA_IN_AP_MODE; ++i) { need_reset = false; mask = BIT(i + 1); if (priv->link_id_db[i].status == CW1200_LINK_RESERVE || (priv->link_id_db[i].status == CW1200_LINK_HARD && !(priv->link_id_map & mask))) { if (priv->link_id_map & mask) { priv->sta_asleep_mask &= ~mask; priv->pspoll_mask &= ~mask; need_reset = true; } priv->link_id_map |= mask; if (priv->link_id_db[i].status != CW1200_LINK_HARD) priv->link_id_db[i].status = CW1200_LINK_SOFT; memcpy(map_link.mac_addr, priv->link_id_db[i].mac, ETH_ALEN); spin_unlock_bh(&priv->ps_state_lock); if (need_reset) { reset.link_id = i + 1; wsm_reset(priv, &reset); } map_link.link_id = i + 1; wsm_map_link(priv, &map_link); next_gc = min(next_gc, CW1200_LINK_ID_GC_TIMEOUT); spin_lock_bh(&priv->ps_state_lock); } else if (priv->link_id_db[i].status == CW1200_LINK_SOFT) { ttl = priv->link_id_db[i].timestamp - now + CW1200_LINK_ID_GC_TIMEOUT; if (ttl <= 0) { need_reset = true; priv->link_id_db[i].status = CW1200_LINK_OFF; priv->link_id_map &= ~mask; priv->sta_asleep_mask &= ~mask; priv->pspoll_mask &= ~mask; eth_zero_addr(map_link.mac_addr); spin_unlock_bh(&priv->ps_state_lock); reset.link_id = i + 1; wsm_reset(priv, &reset); spin_lock_bh(&priv->ps_state_lock); } else { next_gc = min_t(unsigned long, next_gc, ttl); } } else if (priv->link_id_db[i].status == CW1200_LINK_RESET || priv->link_id_db[i].status == CW1200_LINK_RESET_REMAP) { int status = priv->link_id_db[i].status; priv->link_id_db[i].status = priv->link_id_db[i].prev_status; priv->link_id_db[i].timestamp = now; reset.link_id = i + 1; spin_unlock_bh(&priv->ps_state_lock); wsm_reset(priv, &reset); if (status == CW1200_LINK_RESET_REMAP) { memcpy(map_link.mac_addr, priv->link_id_db[i].mac, ETH_ALEN); map_link.link_id = i + 1; wsm_map_link(priv, &map_link); next_gc = min(next_gc, CW1200_LINK_ID_GC_TIMEOUT); } spin_lock_bh(&priv->ps_state_lock); } if (need_reset) { skb_queue_purge(&priv->link_id_db[i].rx_queue); pr_debug("[AP] STA removed, link_id: %d\n", reset.link_id); } } spin_unlock_bh(&priv->ps_state_lock); if (next_gc != -1) queue_delayed_work(priv->workqueue, &priv->link_id_gc_work, next_gc); wsm_unlock_tx(priv); }
linux-master
drivers/net/wireless/st/cw1200/txrx.c
// SPDX-License-Identifier: GPL-2.0-only /* * Mac80211 STA API for ST-Ericsson CW1200 drivers * * Copyright (c) 2010, ST-Ericsson * Author: Dmitry Tarnyagin <[email protected]> */ #include <linux/vmalloc.h> #include <linux/sched.h> #include <linux/firmware.h> #include <linux/module.h> #include <linux/etherdevice.h> #include "cw1200.h" #include "sta.h" #include "fwio.h" #include "bh.h" #include "debug.h" #ifndef ERP_INFO_BYTE_OFFSET #define ERP_INFO_BYTE_OFFSET 2 #endif static void cw1200_do_join(struct cw1200_common *priv); static void cw1200_do_unjoin(struct cw1200_common *priv); static int cw1200_upload_beacon(struct cw1200_common *priv); static int cw1200_upload_pspoll(struct cw1200_common *priv); static int cw1200_upload_null(struct cw1200_common *priv); static int cw1200_upload_qosnull(struct cw1200_common *priv); static int cw1200_start_ap(struct cw1200_common *priv); static int cw1200_update_beaconing(struct cw1200_common *priv); static int cw1200_enable_beaconing(struct cw1200_common *priv, bool enable); static void __cw1200_sta_notify(struct ieee80211_hw *dev, struct ieee80211_vif *vif, enum sta_notify_cmd notify_cmd, int link_id); static int __cw1200_flush(struct cw1200_common *priv, bool drop); static inline void __cw1200_free_event_queue(struct list_head *list) { struct cw1200_wsm_event *event, *tmp; list_for_each_entry_safe(event, tmp, list, link) { list_del(&event->link); kfree(event); } } /* ******************************************************************** */ /* STA API */ int cw1200_start(struct ieee80211_hw *dev) { struct cw1200_common *priv = dev->priv; int ret = 0; cw1200_pm_stay_awake(&priv->pm_state, HZ); mutex_lock(&priv->conf_mutex); /* default EDCA */ WSM_EDCA_SET(&priv->edca, 0, 0x0002, 0x0003, 0x0007, 47, 0xc8, false); WSM_EDCA_SET(&priv->edca, 1, 0x0002, 0x0007, 0x000f, 94, 0xc8, false); WSM_EDCA_SET(&priv->edca, 2, 0x0003, 0x000f, 0x03ff, 0, 0xc8, false); WSM_EDCA_SET(&priv->edca, 3, 0x0007, 0x000f, 0x03ff, 0, 0xc8, false); ret = wsm_set_edca_params(priv, &priv->edca); if (ret) goto out; ret = cw1200_set_uapsd_param(priv, &priv->edca); if (ret) goto out; priv->setbssparams_done = false; memcpy(priv->mac_addr, dev->wiphy->perm_addr, ETH_ALEN); priv->mode = NL80211_IFTYPE_MONITOR; priv->wep_default_key_id = -1; priv->cqm_beacon_loss_count = 10; ret = cw1200_setup_mac(priv); if (ret) goto out; out: mutex_unlock(&priv->conf_mutex); return ret; } void cw1200_stop(struct ieee80211_hw *dev) { struct cw1200_common *priv = dev->priv; LIST_HEAD(list); int i; wsm_lock_tx(priv); while (down_trylock(&priv->scan.lock)) { /* Scan is in progress. Force it to stop. */ priv->scan.req = NULL; schedule(); } up(&priv->scan.lock); cancel_delayed_work_sync(&priv->scan.probe_work); cancel_delayed_work_sync(&priv->scan.timeout); cancel_delayed_work_sync(&priv->clear_recent_scan_work); cancel_delayed_work_sync(&priv->join_timeout); cw1200_cqm_bssloss_sm(priv, 0, 0, 0); cancel_work_sync(&priv->unjoin_work); cancel_delayed_work_sync(&priv->link_id_gc_work); flush_workqueue(priv->workqueue); del_timer_sync(&priv->mcast_timeout); mutex_lock(&priv->conf_mutex); priv->mode = NL80211_IFTYPE_UNSPECIFIED; priv->listening = false; spin_lock(&priv->event_queue_lock); list_splice_init(&priv->event_queue, &list); spin_unlock(&priv->event_queue_lock); __cw1200_free_event_queue(&list); priv->join_status = CW1200_JOIN_STATUS_PASSIVE; priv->join_pending = false; for (i = 0; i < 4; i++) cw1200_queue_clear(&priv->tx_queue[i]); mutex_unlock(&priv->conf_mutex); tx_policy_clean(priv); /* HACK! */ if (atomic_xchg(&priv->tx_lock, 1) != 1) pr_debug("[STA] TX is force-unlocked due to stop request.\n"); wsm_unlock_tx(priv); atomic_xchg(&priv->tx_lock, 0); /* for recovery to work */ } static int cw1200_bssloss_mitigation = 1; module_param(cw1200_bssloss_mitigation, int, 0644); MODULE_PARM_DESC(cw1200_bssloss_mitigation, "BSS Loss mitigation. 0 == disabled, 1 == enabled (default)"); void __cw1200_cqm_bssloss_sm(struct cw1200_common *priv, int init, int good, int bad) { int tx = 0; priv->delayed_link_loss = 0; cancel_work_sync(&priv->bss_params_work); pr_debug("[STA] CQM BSSLOSS_SM: state: %d init %d good %d bad: %d txlock: %d uj: %d\n", priv->bss_loss_state, init, good, bad, atomic_read(&priv->tx_lock), priv->delayed_unjoin); /* If we have a pending unjoin */ if (priv->delayed_unjoin) return; if (init) { queue_delayed_work(priv->workqueue, &priv->bss_loss_work, HZ); priv->bss_loss_state = 0; /* Skip the confimration procedure in P2P case */ if (!priv->vif->p2p && !atomic_read(&priv->tx_lock)) tx = 1; } else if (good) { cancel_delayed_work_sync(&priv->bss_loss_work); priv->bss_loss_state = 0; queue_work(priv->workqueue, &priv->bss_params_work); } else if (bad) { /* XXX Should we just keep going until we time out? */ if (priv->bss_loss_state < 3) tx = 1; } else { cancel_delayed_work_sync(&priv->bss_loss_work); priv->bss_loss_state = 0; } /* Bypass mitigation if it's disabled */ if (!cw1200_bssloss_mitigation) tx = 0; /* Spit out a NULL packet to our AP if necessary */ if (tx) { struct sk_buff *skb; priv->bss_loss_state++; skb = ieee80211_nullfunc_get(priv->hw, priv->vif, -1, false); WARN_ON(!skb); if (skb) cw1200_tx(priv->hw, NULL, skb); } } int cw1200_add_interface(struct ieee80211_hw *dev, struct ieee80211_vif *vif) { int ret; struct cw1200_common *priv = dev->priv; /* __le32 auto_calibration_mode = __cpu_to_le32(1); */ vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER | IEEE80211_VIF_SUPPORTS_UAPSD | IEEE80211_VIF_SUPPORTS_CQM_RSSI; mutex_lock(&priv->conf_mutex); if (priv->mode != NL80211_IFTYPE_MONITOR) { mutex_unlock(&priv->conf_mutex); return -EOPNOTSUPP; } switch (vif->type) { case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_ADHOC: case NL80211_IFTYPE_MESH_POINT: case NL80211_IFTYPE_AP: priv->mode = vif->type; break; default: mutex_unlock(&priv->conf_mutex); return -EOPNOTSUPP; } priv->vif = vif; memcpy(priv->mac_addr, vif->addr, ETH_ALEN); ret = cw1200_setup_mac(priv); /* Enable auto-calibration */ /* Exception in subsequent channel switch; disabled. * wsm_write_mib(priv, WSM_MIB_ID_SET_AUTO_CALIBRATION_MODE, * &auto_calibration_mode, sizeof(auto_calibration_mode)); */ mutex_unlock(&priv->conf_mutex); return ret; } void cw1200_remove_interface(struct ieee80211_hw *dev, struct ieee80211_vif *vif) { struct cw1200_common *priv = dev->priv; struct wsm_reset reset = { .reset_statistics = true, }; int i; mutex_lock(&priv->conf_mutex); switch (priv->join_status) { case CW1200_JOIN_STATUS_JOINING: case CW1200_JOIN_STATUS_PRE_STA: case CW1200_JOIN_STATUS_STA: case CW1200_JOIN_STATUS_IBSS: wsm_lock_tx(priv); if (queue_work(priv->workqueue, &priv->unjoin_work) <= 0) wsm_unlock_tx(priv); break; case CW1200_JOIN_STATUS_AP: for (i = 0; priv->link_id_map; ++i) { if (priv->link_id_map & BIT(i)) { reset.link_id = i; wsm_reset(priv, &reset); priv->link_id_map &= ~BIT(i); } } memset(priv->link_id_db, 0, sizeof(priv->link_id_db)); priv->sta_asleep_mask = 0; priv->enable_beacon = false; priv->tx_multicast = false; priv->aid0_bit_set = false; priv->buffered_multicasts = false; priv->pspoll_mask = 0; reset.link_id = 0; wsm_reset(priv, &reset); break; case CW1200_JOIN_STATUS_MONITOR: cw1200_update_listening(priv, false); break; default: break; } priv->vif = NULL; priv->mode = NL80211_IFTYPE_MONITOR; eth_zero_addr(priv->mac_addr); memset(&priv->p2p_ps_modeinfo, 0, sizeof(priv->p2p_ps_modeinfo)); cw1200_free_keys(priv); cw1200_setup_mac(priv); priv->listening = false; priv->join_status = CW1200_JOIN_STATUS_PASSIVE; if (!__cw1200_flush(priv, true)) wsm_unlock_tx(priv); mutex_unlock(&priv->conf_mutex); } int cw1200_change_interface(struct ieee80211_hw *dev, struct ieee80211_vif *vif, enum nl80211_iftype new_type, bool p2p) { int ret = 0; pr_debug("change_interface new: %d (%d), old: %d (%d)\n", new_type, p2p, vif->type, vif->p2p); if (new_type != vif->type || vif->p2p != p2p) { cw1200_remove_interface(dev, vif); vif->type = new_type; vif->p2p = p2p; ret = cw1200_add_interface(dev, vif); } return ret; } int cw1200_config(struct ieee80211_hw *dev, u32 changed) { int ret = 0; struct cw1200_common *priv = dev->priv; struct ieee80211_conf *conf = &dev->conf; pr_debug("CONFIG CHANGED: %08x\n", changed); down(&priv->scan.lock); mutex_lock(&priv->conf_mutex); /* TODO: IEEE80211_CONF_CHANGE_QOS */ /* TODO: IEEE80211_CONF_CHANGE_LISTEN_INTERVAL */ if (changed & IEEE80211_CONF_CHANGE_POWER) { priv->output_power = conf->power_level; pr_debug("[STA] TX power: %d\n", priv->output_power); wsm_set_output_power(priv, priv->output_power * 10); } if ((changed & IEEE80211_CONF_CHANGE_CHANNEL) && (priv->channel != conf->chandef.chan)) { struct ieee80211_channel *ch = conf->chandef.chan; struct wsm_switch_channel channel = { .channel_number = ch->hw_value, }; pr_debug("[STA] Freq %d (wsm ch: %d).\n", ch->center_freq, ch->hw_value); /* __cw1200_flush() implicitly locks tx, if successful */ if (!__cw1200_flush(priv, false)) { if (!wsm_switch_channel(priv, &channel)) { ret = wait_event_timeout(priv->channel_switch_done, !priv->channel_switch_in_progress, 3 * HZ); if (ret) { /* Already unlocks if successful */ priv->channel = ch; ret = 0; } else { ret = -ETIMEDOUT; } } else { /* Unlock if switch channel fails */ wsm_unlock_tx(priv); } } } if (changed & IEEE80211_CONF_CHANGE_PS) { if (!(conf->flags & IEEE80211_CONF_PS)) priv->powersave_mode.mode = WSM_PSM_ACTIVE; else if (conf->dynamic_ps_timeout <= 0) priv->powersave_mode.mode = WSM_PSM_PS; else priv->powersave_mode.mode = WSM_PSM_FAST_PS; /* Firmware requires that value for this 1-byte field must * be specified in units of 500us. Values above the 128ms * threshold are not supported. */ if (conf->dynamic_ps_timeout >= 0x80) priv->powersave_mode.fast_psm_idle_period = 0xFF; else priv->powersave_mode.fast_psm_idle_period = conf->dynamic_ps_timeout << 1; if (priv->join_status == CW1200_JOIN_STATUS_STA && priv->bss_params.aid) cw1200_set_pm(priv, &priv->powersave_mode); } if (changed & IEEE80211_CONF_CHANGE_MONITOR) { /* TBD: It looks like it's transparent * there's a monitor interface present -- use this * to determine for example whether to calculate * timestamps for packets or not, do not use instead * of filter flags! */ } if (changed & IEEE80211_CONF_CHANGE_IDLE) { struct wsm_operational_mode mode = { .power_mode = cw1200_power_mode, .disable_more_flag_usage = true, }; wsm_lock_tx(priv); /* Disable p2p-dev mode forced by TX request */ if ((priv->join_status == CW1200_JOIN_STATUS_MONITOR) && (conf->flags & IEEE80211_CONF_IDLE) && !priv->listening) { cw1200_disable_listening(priv); priv->join_status = CW1200_JOIN_STATUS_PASSIVE; } wsm_set_operational_mode(priv, &mode); wsm_unlock_tx(priv); } if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) { pr_debug("[STA] Retry limits: %d (long), %d (short).\n", conf->long_frame_max_tx_count, conf->short_frame_max_tx_count); spin_lock_bh(&priv->tx_policy_cache.lock); priv->long_frame_max_tx_count = conf->long_frame_max_tx_count; priv->short_frame_max_tx_count = (conf->short_frame_max_tx_count < 0x0F) ? conf->short_frame_max_tx_count : 0x0F; priv->hw->max_rate_tries = priv->short_frame_max_tx_count; spin_unlock_bh(&priv->tx_policy_cache.lock); } mutex_unlock(&priv->conf_mutex); up(&priv->scan.lock); return ret; } void cw1200_update_filtering(struct cw1200_common *priv) { int ret; bool bssid_filtering = !priv->rx_filter.bssid; bool is_p2p = priv->vif && priv->vif->p2p; bool is_sta = priv->vif && NL80211_IFTYPE_STATION == priv->vif->type; static struct wsm_beacon_filter_control bf_ctrl; static struct wsm_mib_beacon_filter_table bf_tbl = { .entry[0].ie_id = WLAN_EID_VENDOR_SPECIFIC, .entry[0].flags = WSM_BEACON_FILTER_IE_HAS_CHANGED | WSM_BEACON_FILTER_IE_NO_LONGER_PRESENT | WSM_BEACON_FILTER_IE_HAS_APPEARED, .entry[0].oui[0] = 0x50, .entry[0].oui[1] = 0x6F, .entry[0].oui[2] = 0x9A, .entry[1].ie_id = WLAN_EID_HT_OPERATION, .entry[1].flags = WSM_BEACON_FILTER_IE_HAS_CHANGED | WSM_BEACON_FILTER_IE_NO_LONGER_PRESENT | WSM_BEACON_FILTER_IE_HAS_APPEARED, .entry[2].ie_id = WLAN_EID_ERP_INFO, .entry[2].flags = WSM_BEACON_FILTER_IE_HAS_CHANGED | WSM_BEACON_FILTER_IE_NO_LONGER_PRESENT | WSM_BEACON_FILTER_IE_HAS_APPEARED, }; if (priv->join_status == CW1200_JOIN_STATUS_PASSIVE) return; else if (priv->join_status == CW1200_JOIN_STATUS_MONITOR) bssid_filtering = false; if (priv->disable_beacon_filter) { bf_ctrl.enabled = 0; bf_ctrl.bcn_count = 1; bf_tbl.num = __cpu_to_le32(0); } else if (is_p2p || !is_sta) { bf_ctrl.enabled = WSM_BEACON_FILTER_ENABLE | WSM_BEACON_FILTER_AUTO_ERP; bf_ctrl.bcn_count = 0; bf_tbl.num = __cpu_to_le32(2); } else { bf_ctrl.enabled = WSM_BEACON_FILTER_ENABLE; bf_ctrl.bcn_count = 0; bf_tbl.num = __cpu_to_le32(3); } /* When acting as p2p client being connected to p2p GO, in order to * receive frames from a different p2p device, turn off bssid filter. * * WARNING: FW dependency! * This can only be used with FW WSM371 and its successors. * In that FW version even with bssid filter turned off, * device will block most of the unwanted frames. */ if (is_p2p) bssid_filtering = false; ret = wsm_set_rx_filter(priv, &priv->rx_filter); if (!ret) ret = wsm_set_beacon_filter_table(priv, &bf_tbl); if (!ret) ret = wsm_beacon_filter_control(priv, &bf_ctrl); if (!ret) ret = wsm_set_bssid_filtering(priv, bssid_filtering); if (!ret) ret = wsm_set_multicast_filter(priv, &priv->multicast_filter); if (ret) wiphy_err(priv->hw->wiphy, "Update filtering failed: %d.\n", ret); return; } void cw1200_update_filtering_work(struct work_struct *work) { struct cw1200_common *priv = container_of(work, struct cw1200_common, update_filtering_work); cw1200_update_filtering(priv); } void cw1200_set_beacon_wakeup_period_work(struct work_struct *work) { struct cw1200_common *priv = container_of(work, struct cw1200_common, set_beacon_wakeup_period_work); wsm_set_beacon_wakeup_period(priv, priv->beacon_int * priv->join_dtim_period > MAX_BEACON_SKIP_TIME_MS ? 1 : priv->join_dtim_period, 0); } u64 cw1200_prepare_multicast(struct ieee80211_hw *hw, struct netdev_hw_addr_list *mc_list) { static u8 broadcast_ipv6[ETH_ALEN] = { 0x33, 0x33, 0x00, 0x00, 0x00, 0x01 }; static u8 broadcast_ipv4[ETH_ALEN] = { 0x01, 0x00, 0x5e, 0x00, 0x00, 0x01 }; struct cw1200_common *priv = hw->priv; struct netdev_hw_addr *ha; int count = 0; /* Disable multicast filtering */ priv->has_multicast_subscription = false; memset(&priv->multicast_filter, 0x00, sizeof(priv->multicast_filter)); if (netdev_hw_addr_list_count(mc_list) > WSM_MAX_GRP_ADDRTABLE_ENTRIES) return 0; /* Enable if requested */ netdev_hw_addr_list_for_each(ha, mc_list) { pr_debug("[STA] multicast: %pM\n", ha->addr); memcpy(&priv->multicast_filter.macaddrs[count], ha->addr, ETH_ALEN); if (!ether_addr_equal(ha->addr, broadcast_ipv4) && !ether_addr_equal(ha->addr, broadcast_ipv6)) priv->has_multicast_subscription = true; count++; } if (count) { priv->multicast_filter.enable = __cpu_to_le32(1); priv->multicast_filter.num_addrs = __cpu_to_le32(count); } return netdev_hw_addr_list_count(mc_list); } void cw1200_configure_filter(struct ieee80211_hw *dev, unsigned int changed_flags, unsigned int *total_flags, u64 multicast) { struct cw1200_common *priv = dev->priv; bool listening = !!(*total_flags & (FIF_OTHER_BSS | FIF_BCN_PRBRESP_PROMISC | FIF_PROBE_REQ)); *total_flags &= FIF_OTHER_BSS | FIF_FCSFAIL | FIF_BCN_PRBRESP_PROMISC | FIF_PROBE_REQ; down(&priv->scan.lock); mutex_lock(&priv->conf_mutex); priv->rx_filter.promiscuous = 0; priv->rx_filter.bssid = (*total_flags & (FIF_OTHER_BSS | FIF_PROBE_REQ)) ? 1 : 0; priv->rx_filter.fcs = (*total_flags & FIF_FCSFAIL) ? 1 : 0; priv->disable_beacon_filter = !(*total_flags & (FIF_BCN_PRBRESP_PROMISC | FIF_PROBE_REQ)); if (priv->listening != listening) { priv->listening = listening; wsm_lock_tx(priv); cw1200_update_listening(priv, listening); wsm_unlock_tx(priv); } cw1200_update_filtering(priv); mutex_unlock(&priv->conf_mutex); up(&priv->scan.lock); } int cw1200_conf_tx(struct ieee80211_hw *dev, struct ieee80211_vif *vif, unsigned int link_id, u16 queue, const struct ieee80211_tx_queue_params *params) { struct cw1200_common *priv = dev->priv; int ret = 0; /* To prevent re-applying PM request OID again and again*/ bool old_uapsd_flags; mutex_lock(&priv->conf_mutex); if (queue < dev->queues) { old_uapsd_flags = le16_to_cpu(priv->uapsd_info.uapsd_flags); WSM_TX_QUEUE_SET(&priv->tx_queue_params, queue, 0, 0, 0); ret = wsm_set_tx_queue_params(priv, &priv->tx_queue_params.params[queue], queue); if (ret) { ret = -EINVAL; goto out; } WSM_EDCA_SET(&priv->edca, queue, params->aifs, params->cw_min, params->cw_max, params->txop, 0xc8, params->uapsd); ret = wsm_set_edca_params(priv, &priv->edca); if (ret) { ret = -EINVAL; goto out; } if (priv->mode == NL80211_IFTYPE_STATION) { ret = cw1200_set_uapsd_param(priv, &priv->edca); if (!ret && priv->setbssparams_done && (priv->join_status == CW1200_JOIN_STATUS_STA) && (old_uapsd_flags != le16_to_cpu(priv->uapsd_info.uapsd_flags))) ret = cw1200_set_pm(priv, &priv->powersave_mode); } } else { ret = -EINVAL; } out: mutex_unlock(&priv->conf_mutex); return ret; } int cw1200_get_stats(struct ieee80211_hw *dev, struct ieee80211_low_level_stats *stats) { struct cw1200_common *priv = dev->priv; memcpy(stats, &priv->stats, sizeof(*stats)); return 0; } int cw1200_set_pm(struct cw1200_common *priv, const struct wsm_set_pm *arg) { struct wsm_set_pm pm = *arg; if (priv->uapsd_info.uapsd_flags != 0) pm.mode &= ~WSM_PSM_FAST_PS_FLAG; if (memcmp(&pm, &priv->firmware_ps_mode, sizeof(struct wsm_set_pm))) { priv->firmware_ps_mode = pm; return wsm_set_pm(priv, &pm); } else { return 0; } } int cw1200_set_key(struct ieee80211_hw *dev, enum set_key_cmd cmd, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key) { int ret = -EOPNOTSUPP; struct cw1200_common *priv = dev->priv; struct ieee80211_key_seq seq; mutex_lock(&priv->conf_mutex); if (cmd == SET_KEY) { u8 *peer_addr = NULL; int pairwise = (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) ? 1 : 0; int idx = cw1200_alloc_key(priv); struct wsm_add_key *wsm_key = &priv->keys[idx]; if (idx < 0) { ret = -EINVAL; goto finally; } if (sta) peer_addr = sta->addr; key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE | IEEE80211_KEY_FLAG_RESERVE_TAILROOM; switch (key->cipher) { case WLAN_CIPHER_SUITE_WEP40: case WLAN_CIPHER_SUITE_WEP104: if (key->keylen > 16) { cw1200_free_key(priv, idx); ret = -EINVAL; goto finally; } if (pairwise) { wsm_key->type = WSM_KEY_TYPE_WEP_PAIRWISE; memcpy(wsm_key->wep_pairwise.peer, peer_addr, ETH_ALEN); memcpy(wsm_key->wep_pairwise.keydata, &key->key[0], key->keylen); wsm_key->wep_pairwise.keylen = key->keylen; } else { wsm_key->type = WSM_KEY_TYPE_WEP_DEFAULT; memcpy(wsm_key->wep_group.keydata, &key->key[0], key->keylen); wsm_key->wep_group.keylen = key->keylen; wsm_key->wep_group.keyid = key->keyidx; } break; case WLAN_CIPHER_SUITE_TKIP: ieee80211_get_key_rx_seq(key, 0, &seq); if (pairwise) { wsm_key->type = WSM_KEY_TYPE_TKIP_PAIRWISE; memcpy(wsm_key->tkip_pairwise.peer, peer_addr, ETH_ALEN); memcpy(wsm_key->tkip_pairwise.keydata, &key->key[0], 16); memcpy(wsm_key->tkip_pairwise.tx_mic_key, &key->key[16], 8); memcpy(wsm_key->tkip_pairwise.rx_mic_key, &key->key[24], 8); } else { size_t mic_offset = (priv->mode == NL80211_IFTYPE_AP) ? 16 : 24; wsm_key->type = WSM_KEY_TYPE_TKIP_GROUP; memcpy(wsm_key->tkip_group.keydata, &key->key[0], 16); memcpy(wsm_key->tkip_group.rx_mic_key, &key->key[mic_offset], 8); wsm_key->tkip_group.rx_seqnum[0] = seq.tkip.iv16 & 0xff; wsm_key->tkip_group.rx_seqnum[1] = (seq.tkip.iv16 >> 8) & 0xff; wsm_key->tkip_group.rx_seqnum[2] = seq.tkip.iv32 & 0xff; wsm_key->tkip_group.rx_seqnum[3] = (seq.tkip.iv32 >> 8) & 0xff; wsm_key->tkip_group.rx_seqnum[4] = (seq.tkip.iv32 >> 16) & 0xff; wsm_key->tkip_group.rx_seqnum[5] = (seq.tkip.iv32 >> 24) & 0xff; wsm_key->tkip_group.rx_seqnum[6] = 0; wsm_key->tkip_group.rx_seqnum[7] = 0; wsm_key->tkip_group.keyid = key->keyidx; } break; case WLAN_CIPHER_SUITE_CCMP: ieee80211_get_key_rx_seq(key, 0, &seq); if (pairwise) { wsm_key->type = WSM_KEY_TYPE_AES_PAIRWISE; memcpy(wsm_key->aes_pairwise.peer, peer_addr, ETH_ALEN); memcpy(wsm_key->aes_pairwise.keydata, &key->key[0], 16); } else { wsm_key->type = WSM_KEY_TYPE_AES_GROUP; memcpy(wsm_key->aes_group.keydata, &key->key[0], 16); wsm_key->aes_group.rx_seqnum[0] = seq.ccmp.pn[5]; wsm_key->aes_group.rx_seqnum[1] = seq.ccmp.pn[4]; wsm_key->aes_group.rx_seqnum[2] = seq.ccmp.pn[3]; wsm_key->aes_group.rx_seqnum[3] = seq.ccmp.pn[2]; wsm_key->aes_group.rx_seqnum[4] = seq.ccmp.pn[1]; wsm_key->aes_group.rx_seqnum[5] = seq.ccmp.pn[0]; wsm_key->aes_group.rx_seqnum[6] = 0; wsm_key->aes_group.rx_seqnum[7] = 0; wsm_key->aes_group.keyid = key->keyidx; } break; case WLAN_CIPHER_SUITE_SMS4: if (pairwise) { wsm_key->type = WSM_KEY_TYPE_WAPI_PAIRWISE; memcpy(wsm_key->wapi_pairwise.peer, peer_addr, ETH_ALEN); memcpy(wsm_key->wapi_pairwise.keydata, &key->key[0], 16); memcpy(wsm_key->wapi_pairwise.mic_key, &key->key[16], 16); wsm_key->wapi_pairwise.keyid = key->keyidx; } else { wsm_key->type = WSM_KEY_TYPE_WAPI_GROUP; memcpy(wsm_key->wapi_group.keydata, &key->key[0], 16); memcpy(wsm_key->wapi_group.mic_key, &key->key[16], 16); wsm_key->wapi_group.keyid = key->keyidx; } break; default: pr_warn("Unhandled key type %d\n", key->cipher); cw1200_free_key(priv, idx); ret = -EOPNOTSUPP; goto finally; } ret = wsm_add_key(priv, wsm_key); if (!ret) key->hw_key_idx = idx; else cw1200_free_key(priv, idx); } else if (cmd == DISABLE_KEY) { struct wsm_remove_key wsm_key = { .index = key->hw_key_idx, }; if (wsm_key.index > WSM_KEY_MAX_INDEX) { ret = -EINVAL; goto finally; } cw1200_free_key(priv, wsm_key.index); ret = wsm_remove_key(priv, &wsm_key); } else { pr_warn("Unhandled key command %d\n", cmd); } finally: mutex_unlock(&priv->conf_mutex); return ret; } void cw1200_wep_key_work(struct work_struct *work) { struct cw1200_common *priv = container_of(work, struct cw1200_common, wep_key_work); u8 queue_id = cw1200_queue_get_queue_id(priv->pending_frame_id); struct cw1200_queue *queue = &priv->tx_queue[queue_id]; __le32 wep_default_key_id = __cpu_to_le32( priv->wep_default_key_id); pr_debug("[STA] Setting default WEP key: %d\n", priv->wep_default_key_id); wsm_flush_tx(priv); wsm_write_mib(priv, WSM_MIB_ID_DOT11_WEP_DEFAULT_KEY_ID, &wep_default_key_id, sizeof(wep_default_key_id)); cw1200_queue_requeue(queue, priv->pending_frame_id); wsm_unlock_tx(priv); } int cw1200_set_rts_threshold(struct ieee80211_hw *hw, u32 value) { int ret = 0; __le32 val32; struct cw1200_common *priv = hw->priv; if (priv->mode == NL80211_IFTYPE_UNSPECIFIED) return 0; if (value != (u32) -1) val32 = __cpu_to_le32(value); else val32 = 0; /* disabled */ if (priv->rts_threshold == value) goto out; pr_debug("[STA] Setting RTS threshold: %d\n", priv->rts_threshold); /* mutex_lock(&priv->conf_mutex); */ ret = wsm_write_mib(priv, WSM_MIB_ID_DOT11_RTS_THRESHOLD, &val32, sizeof(val32)); if (!ret) priv->rts_threshold = value; /* mutex_unlock(&priv->conf_mutex); */ out: return ret; } /* If successful, LOCKS the TX queue! */ static int __cw1200_flush(struct cw1200_common *priv, bool drop) { int i, ret; for (;;) { /* TODO: correct flush handling is required when dev_stop. * Temporary workaround: 2s */ if (drop) { for (i = 0; i < 4; ++i) cw1200_queue_clear(&priv->tx_queue[i]); } else { ret = wait_event_timeout( priv->tx_queue_stats.wait_link_id_empty, cw1200_queue_stats_is_empty( &priv->tx_queue_stats, -1), 2 * HZ); } if (!drop && ret <= 0) { ret = -ETIMEDOUT; break; } else { ret = 0; } wsm_lock_tx(priv); if (!cw1200_queue_stats_is_empty(&priv->tx_queue_stats, -1)) { /* Highly unlikely: WSM requeued frames. */ wsm_unlock_tx(priv); continue; } break; } return ret; } void cw1200_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u32 queues, bool drop) { struct cw1200_common *priv = hw->priv; switch (priv->mode) { case NL80211_IFTYPE_MONITOR: drop = true; break; case NL80211_IFTYPE_AP: if (!priv->enable_beacon) drop = true; break; } if (!__cw1200_flush(priv, drop)) wsm_unlock_tx(priv); return; } /* ******************************************************************** */ /* WSM callbacks */ void cw1200_free_event_queue(struct cw1200_common *priv) { LIST_HEAD(list); spin_lock(&priv->event_queue_lock); list_splice_init(&priv->event_queue, &list); spin_unlock(&priv->event_queue_lock); __cw1200_free_event_queue(&list); } void cw1200_event_handler(struct work_struct *work) { struct cw1200_common *priv = container_of(work, struct cw1200_common, event_handler); struct cw1200_wsm_event *event; LIST_HEAD(list); spin_lock(&priv->event_queue_lock); list_splice_init(&priv->event_queue, &list); spin_unlock(&priv->event_queue_lock); list_for_each_entry(event, &list, link) { switch (event->evt.id) { case WSM_EVENT_ERROR: pr_err("Unhandled WSM Error from LMAC\n"); break; case WSM_EVENT_BSS_LOST: pr_debug("[CQM] BSS lost.\n"); cancel_work_sync(&priv->unjoin_work); if (!down_trylock(&priv->scan.lock)) { cw1200_cqm_bssloss_sm(priv, 1, 0, 0); up(&priv->scan.lock); } else { /* Scan is in progress. Delay reporting. * Scan complete will trigger bss_loss_work */ priv->delayed_link_loss = 1; /* Also start a watchdog. */ queue_delayed_work(priv->workqueue, &priv->bss_loss_work, 5*HZ); } break; case WSM_EVENT_BSS_REGAINED: pr_debug("[CQM] BSS regained.\n"); cw1200_cqm_bssloss_sm(priv, 0, 0, 0); cancel_work_sync(&priv->unjoin_work); break; case WSM_EVENT_RADAR_DETECTED: wiphy_info(priv->hw->wiphy, "radar pulse detected\n"); break; case WSM_EVENT_RCPI_RSSI: { /* RSSI: signed Q8.0, RCPI: unsigned Q7.1 * RSSI = RCPI / 2 - 110 */ int rcpi_rssi = (int)(event->evt.data & 0xFF); int cqm_evt; if (priv->cqm_use_rssi) rcpi_rssi = (s8)rcpi_rssi; else rcpi_rssi = rcpi_rssi / 2 - 110; cqm_evt = (rcpi_rssi <= priv->cqm_rssi_thold) ? NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW : NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH; pr_debug("[CQM] RSSI event: %d.\n", rcpi_rssi); ieee80211_cqm_rssi_notify(priv->vif, cqm_evt, rcpi_rssi, GFP_KERNEL); break; } case WSM_EVENT_BT_INACTIVE: pr_warn("Unhandled BT INACTIVE from LMAC\n"); break; case WSM_EVENT_BT_ACTIVE: pr_warn("Unhandled BT ACTIVE from LMAC\n"); break; } } __cw1200_free_event_queue(&list); } void cw1200_bss_loss_work(struct work_struct *work) { struct cw1200_common *priv = container_of(work, struct cw1200_common, bss_loss_work.work); pr_debug("[CQM] Reporting connection loss.\n"); wsm_lock_tx(priv); if (queue_work(priv->workqueue, &priv->unjoin_work) <= 0) wsm_unlock_tx(priv); } void cw1200_bss_params_work(struct work_struct *work) { struct cw1200_common *priv = container_of(work, struct cw1200_common, bss_params_work); mutex_lock(&priv->conf_mutex); priv->bss_params.reset_beacon_loss = 1; wsm_set_bss_params(priv, &priv->bss_params); priv->bss_params.reset_beacon_loss = 0; mutex_unlock(&priv->conf_mutex); } /* ******************************************************************** */ /* Internal API */ /* This function is called to Parse the SDD file * to extract listen_interval and PTA related information * sdd is a TLV: u8 id, u8 len, u8 data[] */ static int cw1200_parse_sdd_file(struct cw1200_common *priv) { const u8 *p = priv->sdd->data; int ret = 0; while (p + 2 <= priv->sdd->data + priv->sdd->size) { if (p + p[1] + 2 > priv->sdd->data + priv->sdd->size) { pr_warn("Malformed sdd structure\n"); return -1; } switch (p[0]) { case SDD_PTA_CFG_ELT_ID: { u16 v; if (p[1] < 4) { pr_warn("SDD_PTA_CFG_ELT_ID malformed\n"); ret = -1; break; } v = le16_to_cpu(*((__le16 *)(p + 2))); if (!v) /* non-zero means this is enabled */ break; v = le16_to_cpu(*((__le16 *)(p + 4))); priv->conf_listen_interval = (v >> 7) & 0x1F; pr_debug("PTA found; Listen Interval %d\n", priv->conf_listen_interval); break; } case SDD_REFERENCE_FREQUENCY_ELT_ID: { u16 clk = le16_to_cpu(*((__le16 *)(p + 2))); if (clk != priv->hw_refclk) pr_warn("SDD file doesn't match configured refclk (%d vs %d)\n", clk, priv->hw_refclk); break; } default: break; } p += p[1] + 2; } if (!priv->bt_present) { pr_debug("PTA element NOT found.\n"); priv->conf_listen_interval = 0; } return ret; } int cw1200_setup_mac(struct cw1200_common *priv) { int ret = 0; /* NOTE: There is a bug in FW: it reports signal * as RSSI if RSSI subscription is enabled. * It's not enough to set WSM_RCPI_RSSI_USE_RSSI. * * NOTE2: RSSI based reports have been switched to RCPI, since * FW has a bug and RSSI reported values are not stable, * what can lead to signal level oscilations in user-end applications */ struct wsm_rcpi_rssi_threshold threshold = { .rssiRcpiMode = WSM_RCPI_RSSI_THRESHOLD_ENABLE | WSM_RCPI_RSSI_DONT_USE_UPPER | WSM_RCPI_RSSI_DONT_USE_LOWER, .rollingAverageCount = 16, }; struct wsm_configuration cfg = { .dot11StationId = &priv->mac_addr[0], }; /* Remember the decission here to make sure, we will handle * the RCPI/RSSI value correctly on WSM_EVENT_RCPI_RSS */ if (threshold.rssiRcpiMode & WSM_RCPI_RSSI_USE_RSSI) priv->cqm_use_rssi = true; if (!priv->sdd) { ret = request_firmware(&priv->sdd, priv->sdd_path, priv->pdev); if (ret) { pr_err("Can't load sdd file %s.\n", priv->sdd_path); return ret; } cw1200_parse_sdd_file(priv); } cfg.dpdData = priv->sdd->data; cfg.dpdData_size = priv->sdd->size; ret = wsm_configuration(priv, &cfg); if (ret) return ret; /* Configure RSSI/SCPI reporting as RSSI. */ wsm_set_rcpi_rssi_threshold(priv, &threshold); return 0; } static void cw1200_join_complete(struct cw1200_common *priv) { pr_debug("[STA] Join complete (%d)\n", priv->join_complete_status); priv->join_pending = false; if (priv->join_complete_status) { priv->join_status = CW1200_JOIN_STATUS_PASSIVE; cw1200_update_listening(priv, priv->listening); cw1200_do_unjoin(priv); ieee80211_connection_loss(priv->vif); } else { if (priv->mode == NL80211_IFTYPE_ADHOC) priv->join_status = CW1200_JOIN_STATUS_IBSS; else priv->join_status = CW1200_JOIN_STATUS_PRE_STA; } wsm_unlock_tx(priv); /* Clearing the lock held before do_join() */ } void cw1200_join_complete_work(struct work_struct *work) { struct cw1200_common *priv = container_of(work, struct cw1200_common, join_complete_work); mutex_lock(&priv->conf_mutex); cw1200_join_complete(priv); mutex_unlock(&priv->conf_mutex); } void cw1200_join_complete_cb(struct cw1200_common *priv, struct wsm_join_complete *arg) { pr_debug("[STA] cw1200_join_complete_cb called, status=%d.\n", arg->status); if (cancel_delayed_work(&priv->join_timeout)) { priv->join_complete_status = arg->status; queue_work(priv->workqueue, &priv->join_complete_work); } } /* MUST be called with tx_lock held! It will be unlocked for us. */ static void cw1200_do_join(struct cw1200_common *priv) { const u8 *bssid; struct ieee80211_bss_conf *conf = &priv->vif->bss_conf; struct cfg80211_bss *bss = NULL; struct wsm_protected_mgmt_policy mgmt_policy; struct wsm_join join = { .mode = priv->vif->cfg.ibss_joined ? WSM_JOIN_MODE_IBSS : WSM_JOIN_MODE_BSS, .preamble_type = WSM_JOIN_PREAMBLE_LONG, .probe_for_join = 1, .atim_window = 0, .basic_rate_set = cw1200_rate_mask_to_wsm(priv, conf->basic_rates), }; if (delayed_work_pending(&priv->join_timeout)) { pr_warn("[STA] - Join request already pending, skipping..\n"); wsm_unlock_tx(priv); return; } if (priv->join_status) cw1200_do_unjoin(priv); bssid = priv->vif->bss_conf.bssid; bss = cfg80211_get_bss(priv->hw->wiphy, priv->channel, bssid, NULL, 0, IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY); if (!bss && !priv->vif->cfg.ibss_joined) { wsm_unlock_tx(priv); return; } mutex_lock(&priv->conf_mutex); /* Under the conf lock: check scan status and * bail out if it is in progress. */ if (atomic_read(&priv->scan.in_progress)) { wsm_unlock_tx(priv); goto done_put; } priv->join_pending = true; /* Sanity check basic rates */ if (!join.basic_rate_set) join.basic_rate_set = 7; /* Sanity check beacon interval */ if (!priv->beacon_int) priv->beacon_int = 1; join.beacon_interval = priv->beacon_int; /* BT Coex related changes */ if (priv->bt_present) { if (((priv->conf_listen_interval * 100) % priv->beacon_int) == 0) priv->listen_interval = ((priv->conf_listen_interval * 100) / priv->beacon_int); else priv->listen_interval = ((priv->conf_listen_interval * 100) / priv->beacon_int + 1); } if (priv->hw->conf.ps_dtim_period) priv->join_dtim_period = priv->hw->conf.ps_dtim_period; join.dtim_period = priv->join_dtim_period; join.channel_number = priv->channel->hw_value; join.band = (priv->channel->band == NL80211_BAND_5GHZ) ? WSM_PHY_BAND_5G : WSM_PHY_BAND_2_4G; memcpy(join.bssid, bssid, sizeof(join.bssid)); pr_debug("[STA] Join BSSID: %pM DTIM: %d, interval: %d\n", join.bssid, join.dtim_period, priv->beacon_int); if (!priv->vif->cfg.ibss_joined) { const u8 *ssidie; rcu_read_lock(); ssidie = ieee80211_bss_get_ie(bss, WLAN_EID_SSID); if (ssidie) { join.ssid_len = ssidie[1]; memcpy(join.ssid, &ssidie[2], join.ssid_len); } rcu_read_unlock(); } if (priv->vif->p2p) { join.flags |= WSM_JOIN_FLAGS_P2P_GO; join.basic_rate_set = cw1200_rate_mask_to_wsm(priv, 0xFF0); } /* Enable asynchronous join calls */ if (!priv->vif->cfg.ibss_joined) { join.flags |= WSM_JOIN_FLAGS_FORCE; join.flags |= WSM_JOIN_FLAGS_FORCE_WITH_COMPLETE_IND; } wsm_flush_tx(priv); /* Stay Awake for Join and Auth Timeouts and a bit more */ cw1200_pm_stay_awake(&priv->pm_state, CW1200_JOIN_TIMEOUT + CW1200_AUTH_TIMEOUT); cw1200_update_listening(priv, false); /* Turn on Block ACKs */ wsm_set_block_ack_policy(priv, priv->ba_tx_tid_mask, priv->ba_rx_tid_mask); /* Set up timeout */ if (join.flags & WSM_JOIN_FLAGS_FORCE_WITH_COMPLETE_IND) { priv->join_status = CW1200_JOIN_STATUS_JOINING; queue_delayed_work(priv->workqueue, &priv->join_timeout, CW1200_JOIN_TIMEOUT); } /* 802.11w protected mgmt frames */ mgmt_policy.protectedMgmtEnable = 0; mgmt_policy.unprotectedMgmtFramesAllowed = 1; mgmt_policy.encryptionForAuthFrame = 1; wsm_set_protected_mgmt_policy(priv, &mgmt_policy); /* Perform actual join */ if (wsm_join(priv, &join)) { pr_err("[STA] cw1200_join_work: wsm_join failed!\n"); cancel_delayed_work_sync(&priv->join_timeout); cw1200_update_listening(priv, priv->listening); /* Tx lock still held, unjoin will clear it. */ if (queue_work(priv->workqueue, &priv->unjoin_work) <= 0) wsm_unlock_tx(priv); } else { if (!(join.flags & WSM_JOIN_FLAGS_FORCE_WITH_COMPLETE_IND)) cw1200_join_complete(priv); /* Will clear tx_lock */ /* Upload keys */ cw1200_upload_keys(priv); /* Due to beacon filtering it is possible that the * AP's beacon is not known for the mac80211 stack. * Disable filtering temporary to make sure the stack * receives at least one */ priv->disable_beacon_filter = true; } cw1200_update_filtering(priv); done_put: mutex_unlock(&priv->conf_mutex); if (bss) cfg80211_put_bss(priv->hw->wiphy, bss); } void cw1200_join_timeout(struct work_struct *work) { struct cw1200_common *priv = container_of(work, struct cw1200_common, join_timeout.work); pr_debug("[WSM] Join timed out.\n"); wsm_lock_tx(priv); if (queue_work(priv->workqueue, &priv->unjoin_work) <= 0) wsm_unlock_tx(priv); } static void cw1200_do_unjoin(struct cw1200_common *priv) { struct wsm_reset reset = { .reset_statistics = true, }; cancel_delayed_work_sync(&priv->join_timeout); mutex_lock(&priv->conf_mutex); priv->join_pending = false; if (atomic_read(&priv->scan.in_progress)) { if (priv->delayed_unjoin) wiphy_dbg(priv->hw->wiphy, "Delayed unjoin is already scheduled.\n"); else priv->delayed_unjoin = true; goto done; } priv->delayed_link_loss = false; if (!priv->join_status) goto done; if (priv->join_status == CW1200_JOIN_STATUS_AP) goto done; cancel_work_sync(&priv->update_filtering_work); cancel_work_sync(&priv->set_beacon_wakeup_period_work); priv->join_status = CW1200_JOIN_STATUS_PASSIVE; /* Unjoin is a reset. */ wsm_flush_tx(priv); wsm_keep_alive_period(priv, 0); wsm_reset(priv, &reset); wsm_set_output_power(priv, priv->output_power * 10); priv->join_dtim_period = 0; cw1200_setup_mac(priv); cw1200_free_event_queue(priv); cancel_work_sync(&priv->event_handler); cw1200_update_listening(priv, priv->listening); cw1200_cqm_bssloss_sm(priv, 0, 0, 0); /* Disable Block ACKs */ wsm_set_block_ack_policy(priv, 0, 0); priv->disable_beacon_filter = false; cw1200_update_filtering(priv); memset(&priv->association_mode, 0, sizeof(priv->association_mode)); memset(&priv->bss_params, 0, sizeof(priv->bss_params)); priv->setbssparams_done = false; memset(&priv->firmware_ps_mode, 0, sizeof(priv->firmware_ps_mode)); pr_debug("[STA] Unjoin completed.\n"); done: mutex_unlock(&priv->conf_mutex); } void cw1200_unjoin_work(struct work_struct *work) { struct cw1200_common *priv = container_of(work, struct cw1200_common, unjoin_work); cw1200_do_unjoin(priv); /* Tell the stack we're dead */ ieee80211_connection_loss(priv->vif); wsm_unlock_tx(priv); } int cw1200_enable_listening(struct cw1200_common *priv) { struct wsm_start start = { .mode = WSM_START_MODE_P2P_DEV, .band = WSM_PHY_BAND_2_4G, .beacon_interval = 100, .dtim_period = 1, .probe_delay = 0, .basic_rate_set = 0x0F, }; if (priv->channel) { start.band = priv->channel->band == NL80211_BAND_5GHZ ? WSM_PHY_BAND_5G : WSM_PHY_BAND_2_4G; start.channel_number = priv->channel->hw_value; } else { start.band = WSM_PHY_BAND_2_4G; start.channel_number = 1; } return wsm_start(priv, &start); } int cw1200_disable_listening(struct cw1200_common *priv) { int ret; struct wsm_reset reset = { .reset_statistics = true, }; ret = wsm_reset(priv, &reset); return ret; } void cw1200_update_listening(struct cw1200_common *priv, bool enabled) { if (enabled) { if (priv->join_status == CW1200_JOIN_STATUS_PASSIVE) { if (!cw1200_enable_listening(priv)) priv->join_status = CW1200_JOIN_STATUS_MONITOR; wsm_set_probe_responder(priv, true); } } else { if (priv->join_status == CW1200_JOIN_STATUS_MONITOR) { if (!cw1200_disable_listening(priv)) priv->join_status = CW1200_JOIN_STATUS_PASSIVE; wsm_set_probe_responder(priv, false); } } } int cw1200_set_uapsd_param(struct cw1200_common *priv, const struct wsm_edca_params *arg) { int ret; u16 uapsd_flags = 0; /* Here's the mapping AC [queue, bit] * VO [0,3], VI [1, 2], BE [2, 1], BK [3, 0] */ if (arg->uapsd_enable[0]) uapsd_flags |= 1 << 3; if (arg->uapsd_enable[1]) uapsd_flags |= 1 << 2; if (arg->uapsd_enable[2]) uapsd_flags |= 1 << 1; if (arg->uapsd_enable[3]) uapsd_flags |= 1; /* Currently pseudo U-APSD operation is not supported, so setting * MinAutoTriggerInterval, MaxAutoTriggerInterval and * AutoTriggerStep to 0 */ priv->uapsd_info.uapsd_flags = cpu_to_le16(uapsd_flags); priv->uapsd_info.min_auto_trigger_interval = 0; priv->uapsd_info.max_auto_trigger_interval = 0; priv->uapsd_info.auto_trigger_step = 0; ret = wsm_set_uapsd_info(priv, &priv->uapsd_info); return ret; } /* ******************************************************************** */ /* AP API */ int cw1200_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct cw1200_common *priv = hw->priv; struct cw1200_sta_priv *sta_priv = (struct cw1200_sta_priv *)&sta->drv_priv; struct cw1200_link_entry *entry; struct sk_buff *skb; if (priv->mode != NL80211_IFTYPE_AP) return 0; sta_priv->link_id = cw1200_find_link_id(priv, sta->addr); if (WARN_ON(!sta_priv->link_id)) { wiphy_info(priv->hw->wiphy, "[AP] No more link IDs available.\n"); return -ENOENT; } entry = &priv->link_id_db[sta_priv->link_id - 1]; spin_lock_bh(&priv->ps_state_lock); if ((sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK) == IEEE80211_WMM_IE_STA_QOSINFO_AC_MASK) priv->sta_asleep_mask |= BIT(sta_priv->link_id); entry->status = CW1200_LINK_HARD; while ((skb = skb_dequeue(&entry->rx_queue))) ieee80211_rx_irqsafe(priv->hw, skb); spin_unlock_bh(&priv->ps_state_lock); return 0; } int cw1200_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct cw1200_common *priv = hw->priv; struct cw1200_sta_priv *sta_priv = (struct cw1200_sta_priv *)&sta->drv_priv; struct cw1200_link_entry *entry; if (priv->mode != NL80211_IFTYPE_AP || !sta_priv->link_id) return 0; entry = &priv->link_id_db[sta_priv->link_id - 1]; spin_lock_bh(&priv->ps_state_lock); entry->status = CW1200_LINK_RESERVE; entry->timestamp = jiffies; wsm_lock_tx_async(priv); if (queue_work(priv->workqueue, &priv->link_id_work) <= 0) wsm_unlock_tx(priv); spin_unlock_bh(&priv->ps_state_lock); flush_workqueue(priv->workqueue); return 0; } static void __cw1200_sta_notify(struct ieee80211_hw *dev, struct ieee80211_vif *vif, enum sta_notify_cmd notify_cmd, int link_id) { struct cw1200_common *priv = dev->priv; u32 bit, prev; /* Zero link id means "for all link IDs" */ if (link_id) bit = BIT(link_id); else if (WARN_ON_ONCE(notify_cmd != STA_NOTIFY_AWAKE)) bit = 0; else bit = priv->link_id_map; prev = priv->sta_asleep_mask & bit; switch (notify_cmd) { case STA_NOTIFY_SLEEP: if (!prev) { if (priv->buffered_multicasts && !priv->sta_asleep_mask) queue_work(priv->workqueue, &priv->multicast_start_work); priv->sta_asleep_mask |= bit; } break; case STA_NOTIFY_AWAKE: if (prev) { priv->sta_asleep_mask &= ~bit; priv->pspoll_mask &= ~bit; if (priv->tx_multicast && link_id && !priv->sta_asleep_mask) queue_work(priv->workqueue, &priv->multicast_stop_work); cw1200_bh_wakeup(priv); } break; } } void cw1200_sta_notify(struct ieee80211_hw *dev, struct ieee80211_vif *vif, enum sta_notify_cmd notify_cmd, struct ieee80211_sta *sta) { struct cw1200_common *priv = dev->priv; struct cw1200_sta_priv *sta_priv = (struct cw1200_sta_priv *)&sta->drv_priv; spin_lock_bh(&priv->ps_state_lock); __cw1200_sta_notify(dev, vif, notify_cmd, sta_priv->link_id); spin_unlock_bh(&priv->ps_state_lock); } static void cw1200_ps_notify(struct cw1200_common *priv, int link_id, bool ps) { if (link_id > CW1200_MAX_STA_IN_AP_MODE) return; pr_debug("%s for LinkId: %d. STAs asleep: %.8X\n", ps ? "Stop" : "Start", link_id, priv->sta_asleep_mask); __cw1200_sta_notify(priv->hw, priv->vif, ps ? STA_NOTIFY_SLEEP : STA_NOTIFY_AWAKE, link_id); } static int cw1200_set_tim_impl(struct cw1200_common *priv, bool aid0_bit_set) { struct sk_buff *skb; struct wsm_update_ie update_ie = { .what = WSM_UPDATE_IE_BEACON, .count = 1, }; u16 tim_offset, tim_length; pr_debug("[AP] mcast: %s.\n", aid0_bit_set ? "ena" : "dis"); skb = ieee80211_beacon_get_tim(priv->hw, priv->vif, &tim_offset, &tim_length, 0); if (!skb) { if (!__cw1200_flush(priv, true)) wsm_unlock_tx(priv); return -ENOENT; } if (tim_offset && tim_length >= 6) { /* Ignore DTIM count from mac80211: * firmware handles DTIM internally. */ skb->data[tim_offset + 2] = 0; /* Set/reset aid0 bit */ if (aid0_bit_set) skb->data[tim_offset + 4] |= 1; else skb->data[tim_offset + 4] &= ~1; } update_ie.ies = &skb->data[tim_offset]; update_ie.length = tim_length; wsm_update_ie(priv, &update_ie); dev_kfree_skb(skb); return 0; } void cw1200_set_tim_work(struct work_struct *work) { struct cw1200_common *priv = container_of(work, struct cw1200_common, set_tim_work); (void)cw1200_set_tim_impl(priv, priv->aid0_bit_set); } int cw1200_set_tim(struct ieee80211_hw *dev, struct ieee80211_sta *sta, bool set) { struct cw1200_common *priv = dev->priv; queue_work(priv->workqueue, &priv->set_tim_work); return 0; } void cw1200_set_cts_work(struct work_struct *work) { struct cw1200_common *priv = container_of(work, struct cw1200_common, set_cts_work); u8 erp_ie[3] = {WLAN_EID_ERP_INFO, 0x1, 0}; struct wsm_update_ie update_ie = { .what = WSM_UPDATE_IE_BEACON, .count = 1, .ies = erp_ie, .length = 3, }; u32 erp_info; __le32 use_cts_prot; mutex_lock(&priv->conf_mutex); erp_info = priv->erp_info; mutex_unlock(&priv->conf_mutex); use_cts_prot = erp_info & WLAN_ERP_USE_PROTECTION ? __cpu_to_le32(1) : 0; erp_ie[ERP_INFO_BYTE_OFFSET] = erp_info; pr_debug("[STA] ERP information 0x%x\n", erp_info); wsm_write_mib(priv, WSM_MIB_ID_NON_ERP_PROTECTION, &use_cts_prot, sizeof(use_cts_prot)); wsm_update_ie(priv, &update_ie); return; } static int cw1200_set_btcoexinfo(struct cw1200_common *priv) { struct wsm_override_internal_txrate arg; int ret = 0; if (priv->mode == NL80211_IFTYPE_STATION) { /* Plumb PSPOLL and NULL template */ cw1200_upload_pspoll(priv); cw1200_upload_null(priv); cw1200_upload_qosnull(priv); } else { return 0; } memset(&arg, 0, sizeof(struct wsm_override_internal_txrate)); if (!priv->vif->p2p) { /* STATION mode */ if (priv->bss_params.operational_rate_set & ~0xF) { pr_debug("[STA] STA has ERP rates\n"); /* G or BG mode */ arg.internalTxRate = (__ffs( priv->bss_params.operational_rate_set & ~0xF)); } else { pr_debug("[STA] STA has non ERP rates\n"); /* B only mode */ arg.internalTxRate = (__ffs(le32_to_cpu(priv->association_mode.basic_rate_set))); } arg.nonErpInternalTxRate = (__ffs(le32_to_cpu(priv->association_mode.basic_rate_set))); } else { /* P2P mode */ arg.internalTxRate = (__ffs(priv->bss_params.operational_rate_set & ~0xF)); arg.nonErpInternalTxRate = (__ffs(priv->bss_params.operational_rate_set & ~0xF)); } pr_debug("[STA] BTCOEX_INFO MODE %d, internalTxRate : %x, nonErpInternalTxRate: %x\n", priv->mode, arg.internalTxRate, arg.nonErpInternalTxRate); ret = wsm_write_mib(priv, WSM_MIB_ID_OVERRIDE_INTERNAL_TX_RATE, &arg, sizeof(arg)); return ret; } void cw1200_bss_info_changed(struct ieee80211_hw *dev, struct ieee80211_vif *vif, struct ieee80211_bss_conf *info, u64 changed) { struct cw1200_common *priv = dev->priv; bool do_join = false; mutex_lock(&priv->conf_mutex); pr_debug("BSS CHANGED: %llx\n", changed); /* TODO: BSS_CHANGED_QOS */ /* TODO: BSS_CHANGED_TXPOWER */ if (changed & BSS_CHANGED_ARP_FILTER) { struct wsm_mib_arp_ipv4_filter filter = {0}; int i; pr_debug("[STA] BSS_CHANGED_ARP_FILTER cnt: %d\n", vif->cfg.arp_addr_cnt); /* Currently only one IP address is supported by firmware. * In case of more IPs arp filtering will be disabled. */ if (vif->cfg.arp_addr_cnt > 0 && vif->cfg.arp_addr_cnt <= WSM_MAX_ARP_IP_ADDRTABLE_ENTRIES) { for (i = 0; i < vif->cfg.arp_addr_cnt; i++) { filter.ipv4addrs[i] = vif->cfg.arp_addr_list[i]; pr_debug("[STA] addr[%d]: 0x%X\n", i, filter.ipv4addrs[i]); } filter.enable = __cpu_to_le32(1); } pr_debug("[STA] arp ip filter enable: %d\n", __le32_to_cpu(filter.enable)); wsm_set_arp_ipv4_filter(priv, &filter); } if (changed & (BSS_CHANGED_BEACON | BSS_CHANGED_AP_PROBE_RESP | BSS_CHANGED_BSSID | BSS_CHANGED_SSID | BSS_CHANGED_IBSS)) { pr_debug("BSS_CHANGED_BEACON\n"); priv->beacon_int = info->beacon_int; cw1200_update_beaconing(priv); cw1200_upload_beacon(priv); } if (changed & BSS_CHANGED_BEACON_ENABLED) { pr_debug("BSS_CHANGED_BEACON_ENABLED (%d)\n", info->enable_beacon); if (priv->enable_beacon != info->enable_beacon) { cw1200_enable_beaconing(priv, info->enable_beacon); priv->enable_beacon = info->enable_beacon; } } if (changed & BSS_CHANGED_BEACON_INT) { pr_debug("CHANGED_BEACON_INT\n"); if (vif->cfg.ibss_joined) do_join = true; else if (priv->join_status == CW1200_JOIN_STATUS_AP) cw1200_update_beaconing(priv); } /* assoc/disassoc, or maybe AID changed */ if (changed & BSS_CHANGED_ASSOC) { wsm_lock_tx(priv); priv->wep_default_key_id = -1; wsm_unlock_tx(priv); } if (changed & BSS_CHANGED_BSSID) { pr_debug("BSS_CHANGED_BSSID\n"); do_join = true; } if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_BSSID | BSS_CHANGED_IBSS | BSS_CHANGED_BASIC_RATES | BSS_CHANGED_HT)) { pr_debug("BSS_CHANGED_ASSOC\n"); if (vif->cfg.assoc) { if (priv->join_status < CW1200_JOIN_STATUS_PRE_STA) { ieee80211_connection_loss(vif); mutex_unlock(&priv->conf_mutex); return; } else if (priv->join_status == CW1200_JOIN_STATUS_PRE_STA) { priv->join_status = CW1200_JOIN_STATUS_STA; } } else { do_join = true; } if (vif->cfg.assoc || vif->cfg.ibss_joined) { struct ieee80211_sta *sta = NULL; __le32 htprot = 0; if (info->dtim_period) priv->join_dtim_period = info->dtim_period; priv->beacon_int = info->beacon_int; rcu_read_lock(); if (info->bssid && !vif->cfg.ibss_joined) sta = ieee80211_find_sta(vif, info->bssid); if (sta) { priv->ht_info.ht_cap = sta->deflink.ht_cap; priv->bss_params.operational_rate_set = cw1200_rate_mask_to_wsm(priv, sta->deflink.supp_rates[priv->channel->band]); priv->ht_info.channel_type = cfg80211_get_chandef_type(&dev->conf.chandef); priv->ht_info.operation_mode = info->ht_operation_mode; } else { memset(&priv->ht_info, 0, sizeof(priv->ht_info)); priv->bss_params.operational_rate_set = -1; } rcu_read_unlock(); /* Non Greenfield stations present */ if (priv->ht_info.operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT) htprot |= cpu_to_le32(WSM_NON_GREENFIELD_STA_PRESENT); /* Set HT protection method */ htprot |= cpu_to_le32((priv->ht_info.operation_mode & IEEE80211_HT_OP_MODE_PROTECTION) << 2); /* TODO: * STBC_param.dual_cts * STBC_param.LSIG_TXOP_FILL */ wsm_write_mib(priv, WSM_MIB_ID_SET_HT_PROTECTION, &htprot, sizeof(htprot)); priv->association_mode.greenfield = cw1200_ht_greenfield(&priv->ht_info); priv->association_mode.flags = WSM_ASSOCIATION_MODE_SNOOP_ASSOC_FRAMES | WSM_ASSOCIATION_MODE_USE_PREAMBLE_TYPE | WSM_ASSOCIATION_MODE_USE_HT_MODE | WSM_ASSOCIATION_MODE_USE_BASIC_RATE_SET | WSM_ASSOCIATION_MODE_USE_MPDU_START_SPACING; priv->association_mode.preamble = info->use_short_preamble ? WSM_JOIN_PREAMBLE_SHORT : WSM_JOIN_PREAMBLE_LONG; priv->association_mode.basic_rate_set = __cpu_to_le32( cw1200_rate_mask_to_wsm(priv, info->basic_rates)); priv->association_mode.mpdu_start_spacing = cw1200_ht_ampdu_density(&priv->ht_info); cw1200_cqm_bssloss_sm(priv, 0, 0, 0); cancel_work_sync(&priv->unjoin_work); priv->bss_params.beacon_lost_count = priv->cqm_beacon_loss_count; priv->bss_params.aid = vif->cfg.aid; if (priv->join_dtim_period < 1) priv->join_dtim_period = 1; pr_debug("[STA] DTIM %d, interval: %d\n", priv->join_dtim_period, priv->beacon_int); pr_debug("[STA] Preamble: %d, Greenfield: %d, Aid: %d, Rates: 0x%.8X, Basic: 0x%.8X\n", priv->association_mode.preamble, priv->association_mode.greenfield, priv->bss_params.aid, priv->bss_params.operational_rate_set, priv->association_mode.basic_rate_set); wsm_set_association_mode(priv, &priv->association_mode); if (!vif->cfg.ibss_joined) { wsm_keep_alive_period(priv, 30 /* sec */); wsm_set_bss_params(priv, &priv->bss_params); priv->setbssparams_done = true; cw1200_set_beacon_wakeup_period_work(&priv->set_beacon_wakeup_period_work); cw1200_set_pm(priv, &priv->powersave_mode); } if (priv->vif->p2p) { pr_debug("[STA] Setting p2p powersave configuration.\n"); wsm_set_p2p_ps_modeinfo(priv, &priv->p2p_ps_modeinfo); } if (priv->bt_present) cw1200_set_btcoexinfo(priv); } else { memset(&priv->association_mode, 0, sizeof(priv->association_mode)); memset(&priv->bss_params, 0, sizeof(priv->bss_params)); } } /* ERP Protection */ if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_ERP_PREAMBLE)) { u32 prev_erp_info = priv->erp_info; if (info->use_cts_prot) priv->erp_info |= WLAN_ERP_USE_PROTECTION; else if (!(prev_erp_info & WLAN_ERP_NON_ERP_PRESENT)) priv->erp_info &= ~WLAN_ERP_USE_PROTECTION; if (info->use_short_preamble) priv->erp_info |= WLAN_ERP_BARKER_PREAMBLE; else priv->erp_info &= ~WLAN_ERP_BARKER_PREAMBLE; pr_debug("[STA] ERP Protection: %x\n", priv->erp_info); if (prev_erp_info != priv->erp_info) queue_work(priv->workqueue, &priv->set_cts_work); } /* ERP Slottime */ if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_ERP_SLOT)) { __le32 slot_time = info->use_short_slot ? __cpu_to_le32(9) : __cpu_to_le32(20); pr_debug("[STA] Slot time: %d us.\n", __le32_to_cpu(slot_time)); wsm_write_mib(priv, WSM_MIB_ID_DOT11_SLOT_TIME, &slot_time, sizeof(slot_time)); } if (changed & (BSS_CHANGED_ASSOC | BSS_CHANGED_CQM)) { struct wsm_rcpi_rssi_threshold threshold = { .rollingAverageCount = 8, }; pr_debug("[CQM] RSSI threshold subscribe: %d +- %d\n", info->cqm_rssi_thold, info->cqm_rssi_hyst); priv->cqm_rssi_thold = info->cqm_rssi_thold; priv->cqm_rssi_hyst = info->cqm_rssi_hyst; if (info->cqm_rssi_thold || info->cqm_rssi_hyst) { /* RSSI subscription enabled */ /* TODO: It's not a correct way of setting threshold. * Upper and lower must be set equal here and adjusted * in callback. However current implementation is much * more relaible and stable. */ /* RSSI: signed Q8.0, RCPI: unsigned Q7.1 * RSSI = RCPI / 2 - 110 */ if (priv->cqm_use_rssi) { threshold.upperThreshold = info->cqm_rssi_thold + info->cqm_rssi_hyst; threshold.lowerThreshold = info->cqm_rssi_thold; threshold.rssiRcpiMode |= WSM_RCPI_RSSI_USE_RSSI; } else { threshold.upperThreshold = (info->cqm_rssi_thold + info->cqm_rssi_hyst + 110) * 2; threshold.lowerThreshold = (info->cqm_rssi_thold + 110) * 2; } threshold.rssiRcpiMode |= WSM_RCPI_RSSI_THRESHOLD_ENABLE; } else { /* There is a bug in FW, see sta.c. We have to enable * dummy subscription to get correct RSSI values. */ threshold.rssiRcpiMode |= WSM_RCPI_RSSI_THRESHOLD_ENABLE | WSM_RCPI_RSSI_DONT_USE_UPPER | WSM_RCPI_RSSI_DONT_USE_LOWER; if (priv->cqm_use_rssi) threshold.rssiRcpiMode |= WSM_RCPI_RSSI_USE_RSSI; } wsm_set_rcpi_rssi_threshold(priv, &threshold); } mutex_unlock(&priv->conf_mutex); if (do_join) { wsm_lock_tx(priv); cw1200_do_join(priv); /* Will unlock it for us */ } } void cw1200_multicast_start_work(struct work_struct *work) { struct cw1200_common *priv = container_of(work, struct cw1200_common, multicast_start_work); long tmo = priv->join_dtim_period * (priv->beacon_int + 20) * HZ / 1024; cancel_work_sync(&priv->multicast_stop_work); if (!priv->aid0_bit_set) { wsm_lock_tx(priv); cw1200_set_tim_impl(priv, true); priv->aid0_bit_set = true; mod_timer(&priv->mcast_timeout, jiffies + tmo); wsm_unlock_tx(priv); } } void cw1200_multicast_stop_work(struct work_struct *work) { struct cw1200_common *priv = container_of(work, struct cw1200_common, multicast_stop_work); if (priv->aid0_bit_set) { del_timer_sync(&priv->mcast_timeout); wsm_lock_tx(priv); priv->aid0_bit_set = false; cw1200_set_tim_impl(priv, false); wsm_unlock_tx(priv); } } void cw1200_mcast_timeout(struct timer_list *t) { struct cw1200_common *priv = from_timer(priv, t, mcast_timeout); wiphy_warn(priv->hw->wiphy, "Multicast delivery timeout.\n"); spin_lock_bh(&priv->ps_state_lock); priv->tx_multicast = priv->aid0_bit_set && priv->buffered_multicasts; if (priv->tx_multicast) cw1200_bh_wakeup(priv); spin_unlock_bh(&priv->ps_state_lock); } int cw1200_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_ampdu_params *params) { /* Aggregation is implemented fully in firmware, * including block ack negotiation. Do not allow * mac80211 stack to do anything: it interferes with * the firmware. */ /* Note that we still need this function stubbed. */ return -ENOTSUPP; } /* ******************************************************************** */ /* WSM callback */ void cw1200_suspend_resume(struct cw1200_common *priv, struct wsm_suspend_resume *arg) { pr_debug("[AP] %s: %s\n", arg->stop ? "stop" : "start", arg->multicast ? "broadcast" : "unicast"); if (arg->multicast) { bool cancel_tmo = false; spin_lock_bh(&priv->ps_state_lock); if (arg->stop) { priv->tx_multicast = false; } else { /* Firmware sends this indication every DTIM if there * is a STA in powersave connected. There is no reason * to suspend, following wakeup will consume much more * power than it could be saved. */ cw1200_pm_stay_awake(&priv->pm_state, priv->join_dtim_period * (priv->beacon_int + 20) * HZ / 1024); priv->tx_multicast = (priv->aid0_bit_set && priv->buffered_multicasts); if (priv->tx_multicast) { cancel_tmo = true; cw1200_bh_wakeup(priv); } } spin_unlock_bh(&priv->ps_state_lock); if (cancel_tmo) del_timer_sync(&priv->mcast_timeout); } else { spin_lock_bh(&priv->ps_state_lock); cw1200_ps_notify(priv, arg->link_id, arg->stop); spin_unlock_bh(&priv->ps_state_lock); if (!arg->stop) cw1200_bh_wakeup(priv); } return; } /* ******************************************************************** */ /* AP privates */ static int cw1200_upload_beacon(struct cw1200_common *priv) { int ret = 0; struct ieee80211_mgmt *mgmt; struct wsm_template_frame frame = { .frame_type = WSM_FRAME_TYPE_BEACON, }; u16 tim_offset; u16 tim_len; if (priv->mode == NL80211_IFTYPE_STATION || priv->mode == NL80211_IFTYPE_MONITOR || priv->mode == NL80211_IFTYPE_UNSPECIFIED) goto done; if (priv->vif->p2p) frame.rate = WSM_TRANSMIT_RATE_6; frame.skb = ieee80211_beacon_get_tim(priv->hw, priv->vif, &tim_offset, &tim_len, 0); if (!frame.skb) return -ENOMEM; ret = wsm_set_template_frame(priv, &frame); if (ret) goto done; /* TODO: Distill probe resp; remove TIM * and any other beacon-specific IEs */ mgmt = (void *)frame.skb->data; mgmt->frame_control = __cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_PROBE_RESP); frame.frame_type = WSM_FRAME_TYPE_PROBE_RESPONSE; if (priv->vif->p2p) { ret = wsm_set_probe_responder(priv, true); } else { ret = wsm_set_template_frame(priv, &frame); wsm_set_probe_responder(priv, false); } done: dev_kfree_skb(frame.skb); return ret; } static int cw1200_upload_pspoll(struct cw1200_common *priv) { int ret = 0; struct wsm_template_frame frame = { .frame_type = WSM_FRAME_TYPE_PS_POLL, .rate = 0xFF, }; frame.skb = ieee80211_pspoll_get(priv->hw, priv->vif); if (!frame.skb) return -ENOMEM; ret = wsm_set_template_frame(priv, &frame); dev_kfree_skb(frame.skb); return ret; } static int cw1200_upload_null(struct cw1200_common *priv) { int ret = 0; struct wsm_template_frame frame = { .frame_type = WSM_FRAME_TYPE_NULL, .rate = 0xFF, }; frame.skb = ieee80211_nullfunc_get(priv->hw, priv->vif,-1, false); if (!frame.skb) return -ENOMEM; ret = wsm_set_template_frame(priv, &frame); dev_kfree_skb(frame.skb); return ret; } static int cw1200_upload_qosnull(struct cw1200_common *priv) { /* TODO: This needs to be implemented struct wsm_template_frame frame = { .frame_type = WSM_FRAME_TYPE_QOS_NULL, .rate = 0xFF, }; frame.skb = ieee80211_qosnullfunc_get(priv->hw, priv->vif); if (!frame.skb) return -ENOMEM; ret = wsm_set_template_frame(priv, &frame); dev_kfree_skb(frame.skb); */ return 0; } static int cw1200_enable_beaconing(struct cw1200_common *priv, bool enable) { struct wsm_beacon_transmit transmit = { .enable_beaconing = enable, }; return wsm_beacon_transmit(priv, &transmit); } static int cw1200_start_ap(struct cw1200_common *priv) { int ret; struct ieee80211_bss_conf *conf = &priv->vif->bss_conf; struct wsm_start start = { .mode = priv->vif->p2p ? WSM_START_MODE_P2P_GO : WSM_START_MODE_AP, .band = (priv->channel->band == NL80211_BAND_5GHZ) ? WSM_PHY_BAND_5G : WSM_PHY_BAND_2_4G, .channel_number = priv->channel->hw_value, .beacon_interval = conf->beacon_int, .dtim_period = conf->dtim_period, .preamble = conf->use_short_preamble ? WSM_JOIN_PREAMBLE_SHORT : WSM_JOIN_PREAMBLE_LONG, .probe_delay = 100, .basic_rate_set = cw1200_rate_mask_to_wsm(priv, conf->basic_rates), }; struct wsm_operational_mode mode = { .power_mode = cw1200_power_mode, .disable_more_flag_usage = true, }; memset(start.ssid, 0, sizeof(start.ssid)); if (!conf->hidden_ssid) { start.ssid_len = priv->vif->cfg.ssid_len; memcpy(start.ssid, priv->vif->cfg.ssid, start.ssid_len); } priv->beacon_int = conf->beacon_int; priv->join_dtim_period = conf->dtim_period; memset(&priv->link_id_db, 0, sizeof(priv->link_id_db)); pr_debug("[AP] ch: %d(%d), bcn: %d(%d), brt: 0x%.8X, ssid: %.*s.\n", start.channel_number, start.band, start.beacon_interval, start.dtim_period, start.basic_rate_set, start.ssid_len, start.ssid); ret = wsm_start(priv, &start); if (!ret) ret = cw1200_upload_keys(priv); if (!ret && priv->vif->p2p) { pr_debug("[AP] Setting p2p powersave configuration.\n"); wsm_set_p2p_ps_modeinfo(priv, &priv->p2p_ps_modeinfo); } if (!ret) { wsm_set_block_ack_policy(priv, 0, 0); priv->join_status = CW1200_JOIN_STATUS_AP; cw1200_update_filtering(priv); } wsm_set_operational_mode(priv, &mode); return ret; } static int cw1200_update_beaconing(struct cw1200_common *priv) { struct ieee80211_bss_conf *conf = &priv->vif->bss_conf; struct wsm_reset reset = { .link_id = 0, .reset_statistics = true, }; if (priv->mode == NL80211_IFTYPE_AP) { /* TODO: check if changed channel, band */ if (priv->join_status != CW1200_JOIN_STATUS_AP || priv->beacon_int != conf->beacon_int) { pr_debug("ap restarting\n"); wsm_lock_tx(priv); if (priv->join_status != CW1200_JOIN_STATUS_PASSIVE) wsm_reset(priv, &reset); priv->join_status = CW1200_JOIN_STATUS_PASSIVE; cw1200_start_ap(priv); wsm_unlock_tx(priv); } else pr_debug("ap started join_status: %d\n", priv->join_status); } return 0; }
linux-master
drivers/net/wireless/st/cw1200/sta.c
// SPDX-License-Identifier: GPL-2.0-only /* * WSM host interface (HI) implementation for * ST-Ericsson CW1200 mac80211 drivers. * * Copyright (c) 2010, ST-Ericsson * Author: Dmitry Tarnyagin <[email protected]> */ #include <linux/skbuff.h> #include <linux/wait.h> #include <linux/delay.h> #include <linux/sched.h> #include <linux/random.h> #include "cw1200.h" #include "wsm.h" #include "bh.h" #include "sta.h" #include "debug.h" #define WSM_CMD_TIMEOUT (2 * HZ) /* With respect to interrupt loss */ #define WSM_CMD_START_TIMEOUT (7 * HZ) #define WSM_CMD_RESET_TIMEOUT (3 * HZ) /* 2 sec. timeout was observed. */ #define WSM_CMD_MAX_TIMEOUT (3 * HZ) #define WSM_SKIP(buf, size) \ do { \ if ((buf)->data + size > (buf)->end) \ goto underflow; \ (buf)->data += size; \ } while (0) #define WSM_GET(buf, ptr, size) \ do { \ if ((buf)->data + size > (buf)->end) \ goto underflow; \ memcpy(ptr, (buf)->data, size); \ (buf)->data += size; \ } while (0) #define __WSM_GET(buf, type, type2, cvt) \ ({ \ type val; \ if ((buf)->data + sizeof(type) > (buf)->end) \ goto underflow; \ val = cvt(*(type2 *)(buf)->data); \ (buf)->data += sizeof(type); \ val; \ }) #define WSM_GET8(buf) __WSM_GET(buf, u8, u8, (u8)) #define WSM_GET16(buf) __WSM_GET(buf, u16, __le16, __le16_to_cpu) #define WSM_GET32(buf) __WSM_GET(buf, u32, __le32, __le32_to_cpu) #define WSM_PUT(buf, ptr, size) \ do { \ if ((buf)->data + size > (buf)->end) \ if (wsm_buf_reserve((buf), size)) \ goto nomem; \ memcpy((buf)->data, ptr, size); \ (buf)->data += size; \ } while (0) #define __WSM_PUT(buf, val, type, type2, cvt) \ do { \ if ((buf)->data + sizeof(type) > (buf)->end) \ if (wsm_buf_reserve((buf), sizeof(type))) \ goto nomem; \ *(type2 *)(buf)->data = cvt(val); \ (buf)->data += sizeof(type); \ } while (0) #define WSM_PUT8(buf, val) __WSM_PUT(buf, val, u8, u8, (u8)) #define WSM_PUT16(buf, val) __WSM_PUT(buf, val, u16, __le16, __cpu_to_le16) #define WSM_PUT32(buf, val) __WSM_PUT(buf, val, u32, __le32, __cpu_to_le32) static void wsm_buf_reset(struct wsm_buf *buf); static int wsm_buf_reserve(struct wsm_buf *buf, size_t extra_size); static int wsm_cmd_send(struct cw1200_common *priv, struct wsm_buf *buf, void *arg, u16 cmd, long tmo); #define wsm_cmd_lock(__priv) mutex_lock(&((__priv)->wsm_cmd_mux)) #define wsm_cmd_unlock(__priv) mutex_unlock(&((__priv)->wsm_cmd_mux)) /* ******************************************************************** */ /* WSM API implementation */ static int wsm_generic_confirm(struct cw1200_common *priv, void *arg, struct wsm_buf *buf) { u32 status = WSM_GET32(buf); if (status != WSM_STATUS_SUCCESS) return -EINVAL; return 0; underflow: WARN_ON(1); return -EINVAL; } int wsm_configuration(struct cw1200_common *priv, struct wsm_configuration *arg) { int ret; struct wsm_buf *buf = &priv->wsm_cmd_buf; wsm_cmd_lock(priv); WSM_PUT32(buf, arg->dot11MaxTransmitMsduLifeTime); WSM_PUT32(buf, arg->dot11MaxReceiveLifeTime); WSM_PUT32(buf, arg->dot11RtsThreshold); /* DPD block. */ WSM_PUT16(buf, arg->dpdData_size + 12); WSM_PUT16(buf, 1); /* DPD version */ WSM_PUT(buf, arg->dot11StationId, ETH_ALEN); WSM_PUT16(buf, 5); /* DPD flags */ WSM_PUT(buf, arg->dpdData, arg->dpdData_size); ret = wsm_cmd_send(priv, buf, arg, WSM_CONFIGURATION_REQ_ID, WSM_CMD_TIMEOUT); wsm_cmd_unlock(priv); return ret; nomem: wsm_cmd_unlock(priv); return -ENOMEM; } static int wsm_configuration_confirm(struct cw1200_common *priv, struct wsm_configuration *arg, struct wsm_buf *buf) { int i; int status; status = WSM_GET32(buf); if (WARN_ON(status != WSM_STATUS_SUCCESS)) return -EINVAL; WSM_GET(buf, arg->dot11StationId, ETH_ALEN); arg->dot11FrequencyBandsSupported = WSM_GET8(buf); WSM_SKIP(buf, 1); arg->supportedRateMask = WSM_GET32(buf); for (i = 0; i < 2; ++i) { arg->txPowerRange[i].min_power_level = WSM_GET32(buf); arg->txPowerRange[i].max_power_level = WSM_GET32(buf); arg->txPowerRange[i].stepping = WSM_GET32(buf); } return 0; underflow: WARN_ON(1); return -EINVAL; } /* ******************************************************************** */ int wsm_reset(struct cw1200_common *priv, const struct wsm_reset *arg) { int ret; struct wsm_buf *buf = &priv->wsm_cmd_buf; u16 cmd = WSM_RESET_REQ_ID | WSM_TX_LINK_ID(arg->link_id); wsm_cmd_lock(priv); WSM_PUT32(buf, arg->reset_statistics ? 0 : 1); ret = wsm_cmd_send(priv, buf, NULL, cmd, WSM_CMD_RESET_TIMEOUT); wsm_cmd_unlock(priv); return ret; nomem: wsm_cmd_unlock(priv); return -ENOMEM; } /* ******************************************************************** */ struct wsm_mib { u16 mib_id; void *buf; size_t buf_size; }; int wsm_read_mib(struct cw1200_common *priv, u16 mib_id, void *_buf, size_t buf_size) { int ret; struct wsm_buf *buf = &priv->wsm_cmd_buf; struct wsm_mib mib_buf = { .mib_id = mib_id, .buf = _buf, .buf_size = buf_size, }; wsm_cmd_lock(priv); WSM_PUT16(buf, mib_id); WSM_PUT16(buf, 0); ret = wsm_cmd_send(priv, buf, &mib_buf, WSM_READ_MIB_REQ_ID, WSM_CMD_TIMEOUT); wsm_cmd_unlock(priv); return ret; nomem: wsm_cmd_unlock(priv); return -ENOMEM; } static int wsm_read_mib_confirm(struct cw1200_common *priv, struct wsm_mib *arg, struct wsm_buf *buf) { u16 size; if (WARN_ON(WSM_GET32(buf) != WSM_STATUS_SUCCESS)) return -EINVAL; if (WARN_ON(WSM_GET16(buf) != arg->mib_id)) return -EINVAL; size = WSM_GET16(buf); if (size > arg->buf_size) size = arg->buf_size; WSM_GET(buf, arg->buf, size); arg->buf_size = size; return 0; underflow: WARN_ON(1); return -EINVAL; } /* ******************************************************************** */ int wsm_write_mib(struct cw1200_common *priv, u16 mib_id, void *_buf, size_t buf_size) { int ret; struct wsm_buf *buf = &priv->wsm_cmd_buf; struct wsm_mib mib_buf = { .mib_id = mib_id, .buf = _buf, .buf_size = buf_size, }; wsm_cmd_lock(priv); WSM_PUT16(buf, mib_id); WSM_PUT16(buf, buf_size); WSM_PUT(buf, _buf, buf_size); ret = wsm_cmd_send(priv, buf, &mib_buf, WSM_WRITE_MIB_REQ_ID, WSM_CMD_TIMEOUT); wsm_cmd_unlock(priv); return ret; nomem: wsm_cmd_unlock(priv); return -ENOMEM; } static int wsm_write_mib_confirm(struct cw1200_common *priv, struct wsm_mib *arg, struct wsm_buf *buf) { int ret; ret = wsm_generic_confirm(priv, arg, buf); if (ret) return ret; if (arg->mib_id == WSM_MIB_ID_OPERATIONAL_POWER_MODE) { /* OperationalMode: update PM status. */ const char *p = arg->buf; cw1200_enable_powersave(priv, (p[0] & 0x0F) ? true : false); } return 0; } /* ******************************************************************** */ int wsm_scan(struct cw1200_common *priv, const struct wsm_scan *arg) { int i; int ret; struct wsm_buf *buf = &priv->wsm_cmd_buf; if (arg->num_channels > 48) return -EINVAL; if (arg->num_ssids > 2) return -EINVAL; if (arg->band > 1) return -EINVAL; wsm_cmd_lock(priv); WSM_PUT8(buf, arg->band); WSM_PUT8(buf, arg->type); WSM_PUT8(buf, arg->flags); WSM_PUT8(buf, arg->max_tx_rate); WSM_PUT32(buf, arg->auto_scan_interval); WSM_PUT8(buf, arg->num_probes); WSM_PUT8(buf, arg->num_channels); WSM_PUT8(buf, arg->num_ssids); WSM_PUT8(buf, arg->probe_delay); for (i = 0; i < arg->num_channels; ++i) { WSM_PUT16(buf, arg->ch[i].number); WSM_PUT16(buf, 0); WSM_PUT32(buf, arg->ch[i].min_chan_time); WSM_PUT32(buf, arg->ch[i].max_chan_time); WSM_PUT32(buf, 0); } for (i = 0; i < arg->num_ssids; ++i) { WSM_PUT32(buf, arg->ssids[i].length); WSM_PUT(buf, &arg->ssids[i].ssid[0], sizeof(arg->ssids[i].ssid)); } ret = wsm_cmd_send(priv, buf, NULL, WSM_START_SCAN_REQ_ID, WSM_CMD_TIMEOUT); wsm_cmd_unlock(priv); return ret; nomem: wsm_cmd_unlock(priv); return -ENOMEM; } /* ******************************************************************** */ int wsm_stop_scan(struct cw1200_common *priv) { int ret; struct wsm_buf *buf = &priv->wsm_cmd_buf; wsm_cmd_lock(priv); ret = wsm_cmd_send(priv, buf, NULL, WSM_STOP_SCAN_REQ_ID, WSM_CMD_TIMEOUT); wsm_cmd_unlock(priv); return ret; } static int wsm_tx_confirm(struct cw1200_common *priv, struct wsm_buf *buf, int link_id) { struct wsm_tx_confirm tx_confirm; tx_confirm.packet_id = WSM_GET32(buf); tx_confirm.status = WSM_GET32(buf); tx_confirm.tx_rate = WSM_GET8(buf); tx_confirm.ack_failures = WSM_GET8(buf); tx_confirm.flags = WSM_GET16(buf); tx_confirm.media_delay = WSM_GET32(buf); tx_confirm.tx_queue_delay = WSM_GET32(buf); cw1200_tx_confirm_cb(priv, link_id, &tx_confirm); return 0; underflow: WARN_ON(1); return -EINVAL; } static int wsm_multi_tx_confirm(struct cw1200_common *priv, struct wsm_buf *buf, int link_id) { int ret; int count; count = WSM_GET32(buf); if (WARN_ON(count <= 0)) return -EINVAL; if (count > 1) { /* We already released one buffer, now for the rest */ ret = wsm_release_tx_buffer(priv, count - 1); if (ret < 0) return ret; else if (ret > 0) cw1200_bh_wakeup(priv); } cw1200_debug_txed_multi(priv, count); do { ret = wsm_tx_confirm(priv, buf, link_id); } while (!ret && --count); return ret; underflow: WARN_ON(1); return -EINVAL; } /* ******************************************************************** */ static int wsm_join_confirm(struct cw1200_common *priv, struct wsm_join_cnf *arg, struct wsm_buf *buf) { arg->status = WSM_GET32(buf); if (WARN_ON(arg->status) != WSM_STATUS_SUCCESS) return -EINVAL; arg->min_power_level = WSM_GET32(buf); arg->max_power_level = WSM_GET32(buf); return 0; underflow: WARN_ON(1); return -EINVAL; } int wsm_join(struct cw1200_common *priv, struct wsm_join *arg) { int ret; struct wsm_buf *buf = &priv->wsm_cmd_buf; struct wsm_join_cnf resp; wsm_cmd_lock(priv); WSM_PUT8(buf, arg->mode); WSM_PUT8(buf, arg->band); WSM_PUT16(buf, arg->channel_number); WSM_PUT(buf, &arg->bssid[0], sizeof(arg->bssid)); WSM_PUT16(buf, arg->atim_window); WSM_PUT8(buf, arg->preamble_type); WSM_PUT8(buf, arg->probe_for_join); WSM_PUT8(buf, arg->dtim_period); WSM_PUT8(buf, arg->flags); WSM_PUT32(buf, arg->ssid_len); WSM_PUT(buf, &arg->ssid[0], sizeof(arg->ssid)); WSM_PUT32(buf, arg->beacon_interval); WSM_PUT32(buf, arg->basic_rate_set); priv->tx_burst_idx = -1; ret = wsm_cmd_send(priv, buf, &resp, WSM_JOIN_REQ_ID, WSM_CMD_TIMEOUT); /* TODO: Update state based on resp.min|max_power_level */ priv->join_complete_status = resp.status; wsm_cmd_unlock(priv); return ret; nomem: wsm_cmd_unlock(priv); return -ENOMEM; } /* ******************************************************************** */ int wsm_set_bss_params(struct cw1200_common *priv, const struct wsm_set_bss_params *arg) { int ret; struct wsm_buf *buf = &priv->wsm_cmd_buf; wsm_cmd_lock(priv); WSM_PUT8(buf, (arg->reset_beacon_loss ? 0x1 : 0)); WSM_PUT8(buf, arg->beacon_lost_count); WSM_PUT16(buf, arg->aid); WSM_PUT32(buf, arg->operational_rate_set); ret = wsm_cmd_send(priv, buf, NULL, WSM_SET_BSS_PARAMS_REQ_ID, WSM_CMD_TIMEOUT); wsm_cmd_unlock(priv); return ret; nomem: wsm_cmd_unlock(priv); return -ENOMEM; } /* ******************************************************************** */ int wsm_add_key(struct cw1200_common *priv, const struct wsm_add_key *arg) { int ret; struct wsm_buf *buf = &priv->wsm_cmd_buf; wsm_cmd_lock(priv); WSM_PUT(buf, arg, sizeof(*arg)); ret = wsm_cmd_send(priv, buf, NULL, WSM_ADD_KEY_REQ_ID, WSM_CMD_TIMEOUT); wsm_cmd_unlock(priv); return ret; nomem: wsm_cmd_unlock(priv); return -ENOMEM; } /* ******************************************************************** */ int wsm_remove_key(struct cw1200_common *priv, const struct wsm_remove_key *arg) { int ret; struct wsm_buf *buf = &priv->wsm_cmd_buf; wsm_cmd_lock(priv); WSM_PUT8(buf, arg->index); WSM_PUT8(buf, 0); WSM_PUT16(buf, 0); ret = wsm_cmd_send(priv, buf, NULL, WSM_REMOVE_KEY_REQ_ID, WSM_CMD_TIMEOUT); wsm_cmd_unlock(priv); return ret; nomem: wsm_cmd_unlock(priv); return -ENOMEM; } /* ******************************************************************** */ int wsm_set_tx_queue_params(struct cw1200_common *priv, const struct wsm_set_tx_queue_params *arg, u8 id) { int ret; struct wsm_buf *buf = &priv->wsm_cmd_buf; static const u8 queue_id_to_wmm_aci[] = { 3, 2, 0, 1 }; wsm_cmd_lock(priv); WSM_PUT8(buf, queue_id_to_wmm_aci[id]); WSM_PUT8(buf, 0); WSM_PUT8(buf, arg->ackPolicy); WSM_PUT8(buf, 0); WSM_PUT32(buf, arg->maxTransmitLifetime); WSM_PUT16(buf, arg->allowedMediumTime); WSM_PUT16(buf, 0); ret = wsm_cmd_send(priv, buf, NULL, 0x0012, WSM_CMD_TIMEOUT); wsm_cmd_unlock(priv); return ret; nomem: wsm_cmd_unlock(priv); return -ENOMEM; } /* ******************************************************************** */ int wsm_set_edca_params(struct cw1200_common *priv, const struct wsm_edca_params *arg) { int ret; struct wsm_buf *buf = &priv->wsm_cmd_buf; wsm_cmd_lock(priv); /* Implemented according to specification. */ WSM_PUT16(buf, arg->params[3].cwmin); WSM_PUT16(buf, arg->params[2].cwmin); WSM_PUT16(buf, arg->params[1].cwmin); WSM_PUT16(buf, arg->params[0].cwmin); WSM_PUT16(buf, arg->params[3].cwmax); WSM_PUT16(buf, arg->params[2].cwmax); WSM_PUT16(buf, arg->params[1].cwmax); WSM_PUT16(buf, arg->params[0].cwmax); WSM_PUT8(buf, arg->params[3].aifns); WSM_PUT8(buf, arg->params[2].aifns); WSM_PUT8(buf, arg->params[1].aifns); WSM_PUT8(buf, arg->params[0].aifns); WSM_PUT16(buf, arg->params[3].txop_limit); WSM_PUT16(buf, arg->params[2].txop_limit); WSM_PUT16(buf, arg->params[1].txop_limit); WSM_PUT16(buf, arg->params[0].txop_limit); WSM_PUT32(buf, arg->params[3].max_rx_lifetime); WSM_PUT32(buf, arg->params[2].max_rx_lifetime); WSM_PUT32(buf, arg->params[1].max_rx_lifetime); WSM_PUT32(buf, arg->params[0].max_rx_lifetime); ret = wsm_cmd_send(priv, buf, NULL, WSM_EDCA_PARAMS_REQ_ID, WSM_CMD_TIMEOUT); wsm_cmd_unlock(priv); return ret; nomem: wsm_cmd_unlock(priv); return -ENOMEM; } /* ******************************************************************** */ int wsm_switch_channel(struct cw1200_common *priv, const struct wsm_switch_channel *arg) { int ret; struct wsm_buf *buf = &priv->wsm_cmd_buf; wsm_cmd_lock(priv); WSM_PUT8(buf, arg->mode); WSM_PUT8(buf, arg->switch_count); WSM_PUT16(buf, arg->channel_number); priv->channel_switch_in_progress = 1; ret = wsm_cmd_send(priv, buf, NULL, WSM_SWITCH_CHANNEL_REQ_ID, WSM_CMD_TIMEOUT); if (ret) priv->channel_switch_in_progress = 0; wsm_cmd_unlock(priv); return ret; nomem: wsm_cmd_unlock(priv); return -ENOMEM; } /* ******************************************************************** */ int wsm_set_pm(struct cw1200_common *priv, const struct wsm_set_pm *arg) { int ret; struct wsm_buf *buf = &priv->wsm_cmd_buf; priv->ps_mode_switch_in_progress = 1; wsm_cmd_lock(priv); WSM_PUT8(buf, arg->mode); WSM_PUT8(buf, arg->fast_psm_idle_period); WSM_PUT8(buf, arg->ap_psm_change_period); WSM_PUT8(buf, arg->min_auto_pspoll_period); ret = wsm_cmd_send(priv, buf, NULL, WSM_SET_PM_REQ_ID, WSM_CMD_TIMEOUT); wsm_cmd_unlock(priv); return ret; nomem: wsm_cmd_unlock(priv); return -ENOMEM; } /* ******************************************************************** */ int wsm_start(struct cw1200_common *priv, const struct wsm_start *arg) { int ret; struct wsm_buf *buf = &priv->wsm_cmd_buf; wsm_cmd_lock(priv); WSM_PUT8(buf, arg->mode); WSM_PUT8(buf, arg->band); WSM_PUT16(buf, arg->channel_number); WSM_PUT32(buf, arg->ct_window); WSM_PUT32(buf, arg->beacon_interval); WSM_PUT8(buf, arg->dtim_period); WSM_PUT8(buf, arg->preamble); WSM_PUT8(buf, arg->probe_delay); WSM_PUT8(buf, arg->ssid_len); WSM_PUT(buf, arg->ssid, sizeof(arg->ssid)); WSM_PUT32(buf, arg->basic_rate_set); priv->tx_burst_idx = -1; ret = wsm_cmd_send(priv, buf, NULL, WSM_START_REQ_ID, WSM_CMD_START_TIMEOUT); wsm_cmd_unlock(priv); return ret; nomem: wsm_cmd_unlock(priv); return -ENOMEM; } /* ******************************************************************** */ int wsm_beacon_transmit(struct cw1200_common *priv, const struct wsm_beacon_transmit *arg) { int ret; struct wsm_buf *buf = &priv->wsm_cmd_buf; wsm_cmd_lock(priv); WSM_PUT32(buf, arg->enable_beaconing ? 1 : 0); ret = wsm_cmd_send(priv, buf, NULL, WSM_BEACON_TRANSMIT_REQ_ID, WSM_CMD_TIMEOUT); wsm_cmd_unlock(priv); return ret; nomem: wsm_cmd_unlock(priv); return -ENOMEM; } /* ******************************************************************** */ int wsm_start_find(struct cw1200_common *priv) { int ret; struct wsm_buf *buf = &priv->wsm_cmd_buf; wsm_cmd_lock(priv); ret = wsm_cmd_send(priv, buf, NULL, 0x0019, WSM_CMD_TIMEOUT); wsm_cmd_unlock(priv); return ret; } /* ******************************************************************** */ int wsm_stop_find(struct cw1200_common *priv) { int ret; struct wsm_buf *buf = &priv->wsm_cmd_buf; wsm_cmd_lock(priv); ret = wsm_cmd_send(priv, buf, NULL, 0x001A, WSM_CMD_TIMEOUT); wsm_cmd_unlock(priv); return ret; } /* ******************************************************************** */ int wsm_map_link(struct cw1200_common *priv, const struct wsm_map_link *arg) { int ret; struct wsm_buf *buf = &priv->wsm_cmd_buf; u16 cmd = 0x001C | WSM_TX_LINK_ID(arg->link_id); wsm_cmd_lock(priv); WSM_PUT(buf, &arg->mac_addr[0], sizeof(arg->mac_addr)); WSM_PUT16(buf, 0); ret = wsm_cmd_send(priv, buf, NULL, cmd, WSM_CMD_TIMEOUT); wsm_cmd_unlock(priv); return ret; nomem: wsm_cmd_unlock(priv); return -ENOMEM; } /* ******************************************************************** */ int wsm_update_ie(struct cw1200_common *priv, const struct wsm_update_ie *arg) { int ret; struct wsm_buf *buf = &priv->wsm_cmd_buf; wsm_cmd_lock(priv); WSM_PUT16(buf, arg->what); WSM_PUT16(buf, arg->count); WSM_PUT(buf, arg->ies, arg->length); ret = wsm_cmd_send(priv, buf, NULL, 0x001B, WSM_CMD_TIMEOUT); wsm_cmd_unlock(priv); return ret; nomem: wsm_cmd_unlock(priv); return -ENOMEM; } /* ******************************************************************** */ int wsm_set_probe_responder(struct cw1200_common *priv, bool enable) { priv->rx_filter.probeResponder = enable; return wsm_set_rx_filter(priv, &priv->rx_filter); } /* ******************************************************************** */ /* WSM indication events implementation */ const char * const cw1200_fw_types[] = { "ETF", "WFM", "WSM", "HI test", "Platform test" }; static int wsm_startup_indication(struct cw1200_common *priv, struct wsm_buf *buf) { priv->wsm_caps.input_buffers = WSM_GET16(buf); priv->wsm_caps.input_buffer_size = WSM_GET16(buf); priv->wsm_caps.hw_id = WSM_GET16(buf); priv->wsm_caps.hw_subid = WSM_GET16(buf); priv->wsm_caps.status = WSM_GET16(buf); priv->wsm_caps.fw_cap = WSM_GET16(buf); priv->wsm_caps.fw_type = WSM_GET16(buf); priv->wsm_caps.fw_api = WSM_GET16(buf); priv->wsm_caps.fw_build = WSM_GET16(buf); priv->wsm_caps.fw_ver = WSM_GET16(buf); WSM_GET(buf, priv->wsm_caps.fw_label, sizeof(priv->wsm_caps.fw_label)); priv->wsm_caps.fw_label[sizeof(priv->wsm_caps.fw_label) - 1] = 0; /* Do not trust FW too much... */ if (WARN_ON(priv->wsm_caps.status)) return -EINVAL; if (WARN_ON(priv->wsm_caps.fw_type > 4)) return -EINVAL; pr_info("CW1200 WSM init done.\n" " Input buffers: %d x %d bytes\n" " Hardware: %d.%d\n" " %s firmware [%s], ver: %d, build: %d," " api: %d, cap: 0x%.4X\n", priv->wsm_caps.input_buffers, priv->wsm_caps.input_buffer_size, priv->wsm_caps.hw_id, priv->wsm_caps.hw_subid, cw1200_fw_types[priv->wsm_caps.fw_type], priv->wsm_caps.fw_label, priv->wsm_caps.fw_ver, priv->wsm_caps.fw_build, priv->wsm_caps.fw_api, priv->wsm_caps.fw_cap); /* Disable unsupported frequency bands */ if (!(priv->wsm_caps.fw_cap & 0x1)) priv->hw->wiphy->bands[NL80211_BAND_2GHZ] = NULL; if (!(priv->wsm_caps.fw_cap & 0x2)) priv->hw->wiphy->bands[NL80211_BAND_5GHZ] = NULL; priv->firmware_ready = 1; wake_up(&priv->wsm_startup_done); return 0; underflow: WARN_ON(1); return -EINVAL; } static int wsm_receive_indication(struct cw1200_common *priv, int link_id, struct wsm_buf *buf, struct sk_buff **skb_p) { struct wsm_rx rx; struct ieee80211_hdr *hdr; size_t hdr_len; __le16 fctl; rx.status = WSM_GET32(buf); rx.channel_number = WSM_GET16(buf); rx.rx_rate = WSM_GET8(buf); rx.rcpi_rssi = WSM_GET8(buf); rx.flags = WSM_GET32(buf); /* FW Workaround: Drop probe resp or beacon when RSSI is 0 */ hdr = (struct ieee80211_hdr *)(*skb_p)->data; if (!rx.rcpi_rssi && (ieee80211_is_probe_resp(hdr->frame_control) || ieee80211_is_beacon(hdr->frame_control))) return 0; /* If no RSSI subscription has been made, * convert RCPI to RSSI here */ if (!priv->cqm_use_rssi) rx.rcpi_rssi = rx.rcpi_rssi / 2 - 110; fctl = *(__le16 *)buf->data; hdr_len = buf->data - buf->begin; skb_pull(*skb_p, hdr_len); if (!rx.status && ieee80211_is_deauth(fctl)) { if (priv->join_status == CW1200_JOIN_STATUS_STA) { /* Shedule unjoin work */ pr_debug("[WSM] Issue unjoin command (RX).\n"); wsm_lock_tx_async(priv); if (queue_work(priv->workqueue, &priv->unjoin_work) <= 0) wsm_unlock_tx(priv); } } cw1200_rx_cb(priv, &rx, link_id, skb_p); if (*skb_p) skb_push(*skb_p, hdr_len); return 0; underflow: return -EINVAL; } static int wsm_event_indication(struct cw1200_common *priv, struct wsm_buf *buf) { int first; struct cw1200_wsm_event *event; if (priv->mode == NL80211_IFTYPE_UNSPECIFIED) { /* STA is stopped. */ return 0; } event = kzalloc(sizeof(struct cw1200_wsm_event), GFP_KERNEL); if (!event) return -ENOMEM; event->evt.id = WSM_GET32(buf); event->evt.data = WSM_GET32(buf); pr_debug("[WSM] Event: %d(%d)\n", event->evt.id, event->evt.data); spin_lock(&priv->event_queue_lock); first = list_empty(&priv->event_queue); list_add_tail(&event->link, &priv->event_queue); spin_unlock(&priv->event_queue_lock); if (first) queue_work(priv->workqueue, &priv->event_handler); return 0; underflow: kfree(event); return -EINVAL; } static int wsm_channel_switch_indication(struct cw1200_common *priv, struct wsm_buf *buf) { WARN_ON(WSM_GET32(buf)); priv->channel_switch_in_progress = 0; wake_up(&priv->channel_switch_done); wsm_unlock_tx(priv); return 0; underflow: return -EINVAL; } static int wsm_set_pm_indication(struct cw1200_common *priv, struct wsm_buf *buf) { /* TODO: Check buf (struct wsm_set_pm_complete) for validity */ if (priv->ps_mode_switch_in_progress) { priv->ps_mode_switch_in_progress = 0; wake_up(&priv->ps_mode_switch_done); } return 0; } static int wsm_scan_started(struct cw1200_common *priv, void *arg, struct wsm_buf *buf) { u32 status = WSM_GET32(buf); if (status != WSM_STATUS_SUCCESS) { cw1200_scan_failed_cb(priv); return -EINVAL; } return 0; underflow: WARN_ON(1); return -EINVAL; } static int wsm_scan_complete_indication(struct cw1200_common *priv, struct wsm_buf *buf) { struct wsm_scan_complete arg; arg.status = WSM_GET32(buf); arg.psm = WSM_GET8(buf); arg.num_channels = WSM_GET8(buf); cw1200_scan_complete_cb(priv, &arg); return 0; underflow: return -EINVAL; } static int wsm_join_complete_indication(struct cw1200_common *priv, struct wsm_buf *buf) { struct wsm_join_complete arg; arg.status = WSM_GET32(buf); pr_debug("[WSM] Join complete indication, status: %d\n", arg.status); cw1200_join_complete_cb(priv, &arg); return 0; underflow: return -EINVAL; } static int wsm_find_complete_indication(struct cw1200_common *priv, struct wsm_buf *buf) { pr_warn("Implement find_complete_indication\n"); return 0; } static int wsm_ba_timeout_indication(struct cw1200_common *priv, struct wsm_buf *buf) { u8 tid; u8 addr[ETH_ALEN]; WSM_GET32(buf); tid = WSM_GET8(buf); WSM_GET8(buf); WSM_GET(buf, addr, ETH_ALEN); pr_info("BlockACK timeout, tid %d, addr %pM\n", tid, addr); return 0; underflow: return -EINVAL; } static int wsm_suspend_resume_indication(struct cw1200_common *priv, int link_id, struct wsm_buf *buf) { u32 flags; struct wsm_suspend_resume arg; flags = WSM_GET32(buf); arg.link_id = link_id; arg.stop = !(flags & 1); arg.multicast = !!(flags & 8); arg.queue = (flags >> 1) & 3; cw1200_suspend_resume(priv, &arg); return 0; underflow: return -EINVAL; } /* ******************************************************************** */ /* WSM TX */ static int wsm_cmd_send(struct cw1200_common *priv, struct wsm_buf *buf, void *arg, u16 cmd, long tmo) { size_t buf_len = buf->data - buf->begin; int ret; /* Don't bother if we're dead. */ if (priv->bh_error) { ret = 0; goto done; } /* Block until the cmd buffer is completed. Tortuous. */ spin_lock(&priv->wsm_cmd.lock); while (!priv->wsm_cmd.done) { spin_unlock(&priv->wsm_cmd.lock); spin_lock(&priv->wsm_cmd.lock); } priv->wsm_cmd.done = 0; spin_unlock(&priv->wsm_cmd.lock); if (cmd == WSM_WRITE_MIB_REQ_ID || cmd == WSM_READ_MIB_REQ_ID) pr_debug("[WSM] >>> 0x%.4X [MIB: 0x%.4X] (%zu)\n", cmd, __le16_to_cpu(((__le16 *)buf->begin)[2]), buf_len); else pr_debug("[WSM] >>> 0x%.4X (%zu)\n", cmd, buf_len); /* Due to buggy SPI on CW1200, we need to * pad the message by a few bytes to ensure * that it's completely received. */ buf_len += 4; /* Fill HI message header */ /* BH will add sequence number */ ((__le16 *)buf->begin)[0] = __cpu_to_le16(buf_len); ((__le16 *)buf->begin)[1] = __cpu_to_le16(cmd); spin_lock(&priv->wsm_cmd.lock); BUG_ON(priv->wsm_cmd.ptr); priv->wsm_cmd.ptr = buf->begin; priv->wsm_cmd.len = buf_len; priv->wsm_cmd.arg = arg; priv->wsm_cmd.cmd = cmd; spin_unlock(&priv->wsm_cmd.lock); cw1200_bh_wakeup(priv); /* Wait for command completion */ ret = wait_event_timeout(priv->wsm_cmd_wq, priv->wsm_cmd.done, tmo); if (!ret && !priv->wsm_cmd.done) { spin_lock(&priv->wsm_cmd.lock); priv->wsm_cmd.done = 1; priv->wsm_cmd.ptr = NULL; spin_unlock(&priv->wsm_cmd.lock); if (priv->bh_error) { /* Return ok to help system cleanup */ ret = 0; } else { pr_err("CMD req (0x%04x) stuck in firmware, killing BH\n", priv->wsm_cmd.cmd); print_hex_dump_bytes("REQDUMP: ", DUMP_PREFIX_NONE, buf->begin, buf_len); pr_err("Outstanding outgoing frames: %d\n", priv->hw_bufs_used); /* Kill BH thread to report the error to the top layer. */ atomic_inc(&priv->bh_term); wake_up(&priv->bh_wq); ret = -ETIMEDOUT; } } else { spin_lock(&priv->wsm_cmd.lock); BUG_ON(!priv->wsm_cmd.done); ret = priv->wsm_cmd.ret; spin_unlock(&priv->wsm_cmd.lock); } done: wsm_buf_reset(buf); return ret; } /* ******************************************************************** */ /* WSM TX port control */ void wsm_lock_tx(struct cw1200_common *priv) { wsm_cmd_lock(priv); if (atomic_inc_return(&priv->tx_lock) == 1) { if (wsm_flush_tx(priv)) pr_debug("[WSM] TX is locked.\n"); } wsm_cmd_unlock(priv); } void wsm_lock_tx_async(struct cw1200_common *priv) { if (atomic_inc_return(&priv->tx_lock) == 1) pr_debug("[WSM] TX is locked (async).\n"); } bool wsm_flush_tx(struct cw1200_common *priv) { unsigned long timestamp = jiffies; bool pending = false; long timeout; int i; /* Flush must be called with TX lock held. */ BUG_ON(!atomic_read(&priv->tx_lock)); /* First check if we really need to do something. * It is safe to use unprotected access, as hw_bufs_used * can only decrements. */ if (!priv->hw_bufs_used) return true; if (priv->bh_error) { /* In case of failure do not wait for magic. */ pr_err("[WSM] Fatal error occurred, will not flush TX.\n"); return false; } else { /* Get a timestamp of "oldest" frame */ for (i = 0; i < 4; ++i) pending |= cw1200_queue_get_xmit_timestamp( &priv->tx_queue[i], &timestamp, 0xffffffff); /* If there's nothing pending, we're good */ if (!pending) return true; timeout = timestamp + WSM_CMD_LAST_CHANCE_TIMEOUT - jiffies; if (timeout < 0 || wait_event_timeout(priv->bh_evt_wq, !priv->hw_bufs_used, timeout) <= 0) { /* Hmmm... Not good. Frame had stuck in firmware. */ priv->bh_error = 1; wiphy_err(priv->hw->wiphy, "[WSM] TX Frames (%d) stuck in firmware, killing BH\n", priv->hw_bufs_used); wake_up(&priv->bh_wq); return false; } /* Ok, everything is flushed. */ return true; } } void wsm_unlock_tx(struct cw1200_common *priv) { int tx_lock; tx_lock = atomic_dec_return(&priv->tx_lock); BUG_ON(tx_lock < 0); if (tx_lock == 0) { if (!priv->bh_error) cw1200_bh_wakeup(priv); pr_debug("[WSM] TX is unlocked.\n"); } } /* ******************************************************************** */ /* WSM RX */ int wsm_handle_exception(struct cw1200_common *priv, u8 *data, size_t len) { struct wsm_buf buf; u32 reason; u32 reg[18]; char fname[48]; unsigned int i; static const char * const reason_str[] = { "undefined instruction", "prefetch abort", "data abort", "unknown error", }; buf.begin = buf.data = data; buf.end = &buf.begin[len]; reason = WSM_GET32(&buf); for (i = 0; i < ARRAY_SIZE(reg); ++i) reg[i] = WSM_GET32(&buf); WSM_GET(&buf, fname, sizeof(fname)); if (reason < 4) wiphy_err(priv->hw->wiphy, "Firmware exception: %s.\n", reason_str[reason]); else wiphy_err(priv->hw->wiphy, "Firmware assert at %.*s, line %d\n", (int) sizeof(fname), fname, reg[1]); for (i = 0; i < 12; i += 4) wiphy_err(priv->hw->wiphy, "R%d: 0x%.8X, R%d: 0x%.8X, R%d: 0x%.8X, R%d: 0x%.8X,\n", i + 0, reg[i + 0], i + 1, reg[i + 1], i + 2, reg[i + 2], i + 3, reg[i + 3]); wiphy_err(priv->hw->wiphy, "R12: 0x%.8X, SP: 0x%.8X, LR: 0x%.8X, PC: 0x%.8X,\n", reg[i + 0], reg[i + 1], reg[i + 2], reg[i + 3]); i += 4; wiphy_err(priv->hw->wiphy, "CPSR: 0x%.8X, SPSR: 0x%.8X\n", reg[i + 0], reg[i + 1]); print_hex_dump_bytes("R1: ", DUMP_PREFIX_NONE, fname, sizeof(fname)); return 0; underflow: wiphy_err(priv->hw->wiphy, "Firmware exception.\n"); print_hex_dump_bytes("Exception: ", DUMP_PREFIX_NONE, data, len); return -EINVAL; } int wsm_handle_rx(struct cw1200_common *priv, u16 id, struct wsm_hdr *wsm, struct sk_buff **skb_p) { int ret = 0; struct wsm_buf wsm_buf; int link_id = (id >> 6) & 0x0F; /* Strip link id. */ id &= ~WSM_TX_LINK_ID(WSM_TX_LINK_ID_MAX); wsm_buf.begin = (u8 *)&wsm[0]; wsm_buf.data = (u8 *)&wsm[1]; wsm_buf.end = &wsm_buf.begin[__le16_to_cpu(wsm->len)]; pr_debug("[WSM] <<< 0x%.4X (%td)\n", id, wsm_buf.end - wsm_buf.begin); if (id == WSM_TX_CONFIRM_IND_ID) { ret = wsm_tx_confirm(priv, &wsm_buf, link_id); } else if (id == WSM_MULTI_TX_CONFIRM_ID) { ret = wsm_multi_tx_confirm(priv, &wsm_buf, link_id); } else if (id & 0x0400) { void *wsm_arg; u16 wsm_cmd; /* Do not trust FW too much. Protection against repeated * response and race condition removal (see above). */ spin_lock(&priv->wsm_cmd.lock); wsm_arg = priv->wsm_cmd.arg; wsm_cmd = priv->wsm_cmd.cmd & ~WSM_TX_LINK_ID(WSM_TX_LINK_ID_MAX); priv->wsm_cmd.cmd = 0xFFFF; spin_unlock(&priv->wsm_cmd.lock); if (WARN_ON((id & ~0x0400) != wsm_cmd)) { /* Note that any non-zero is a fatal retcode. */ ret = -EINVAL; goto out; } /* Note that wsm_arg can be NULL in case of timeout in * wsm_cmd_send(). */ switch (id) { case WSM_READ_MIB_RESP_ID: if (wsm_arg) ret = wsm_read_mib_confirm(priv, wsm_arg, &wsm_buf); break; case WSM_WRITE_MIB_RESP_ID: if (wsm_arg) ret = wsm_write_mib_confirm(priv, wsm_arg, &wsm_buf); break; case WSM_START_SCAN_RESP_ID: if (wsm_arg) ret = wsm_scan_started(priv, wsm_arg, &wsm_buf); break; case WSM_CONFIGURATION_RESP_ID: if (wsm_arg) ret = wsm_configuration_confirm(priv, wsm_arg, &wsm_buf); break; case WSM_JOIN_RESP_ID: if (wsm_arg) ret = wsm_join_confirm(priv, wsm_arg, &wsm_buf); break; case WSM_STOP_SCAN_RESP_ID: case WSM_RESET_RESP_ID: case WSM_ADD_KEY_RESP_ID: case WSM_REMOVE_KEY_RESP_ID: case WSM_SET_PM_RESP_ID: case WSM_SET_BSS_PARAMS_RESP_ID: case 0x0412: /* set_tx_queue_params */ case WSM_EDCA_PARAMS_RESP_ID: case WSM_SWITCH_CHANNEL_RESP_ID: case WSM_START_RESP_ID: case WSM_BEACON_TRANSMIT_RESP_ID: case 0x0419: /* start_find */ case 0x041A: /* stop_find */ case 0x041B: /* update_ie */ case 0x041C: /* map_link */ WARN_ON(wsm_arg != NULL); ret = wsm_generic_confirm(priv, wsm_arg, &wsm_buf); if (ret) { wiphy_warn(priv->hw->wiphy, "wsm_generic_confirm failed for request 0x%04x.\n", id & ~0x0400); /* often 0x407 and 0x410 occur, this means we're dead.. */ if (priv->join_status >= CW1200_JOIN_STATUS_JOINING) { wsm_lock_tx(priv); if (queue_work(priv->workqueue, &priv->unjoin_work) <= 0) wsm_unlock_tx(priv); } } break; default: wiphy_warn(priv->hw->wiphy, "Unrecognized confirmation 0x%04x\n", id & ~0x0400); } spin_lock(&priv->wsm_cmd.lock); priv->wsm_cmd.ret = ret; priv->wsm_cmd.done = 1; spin_unlock(&priv->wsm_cmd.lock); ret = 0; /* Error response from device should ne stop BH. */ wake_up(&priv->wsm_cmd_wq); } else if (id & 0x0800) { switch (id) { case WSM_STARTUP_IND_ID: ret = wsm_startup_indication(priv, &wsm_buf); break; case WSM_RECEIVE_IND_ID: ret = wsm_receive_indication(priv, link_id, &wsm_buf, skb_p); break; case 0x0805: ret = wsm_event_indication(priv, &wsm_buf); break; case WSM_SCAN_COMPLETE_IND_ID: ret = wsm_scan_complete_indication(priv, &wsm_buf); break; case 0x0808: ret = wsm_ba_timeout_indication(priv, &wsm_buf); break; case 0x0809: ret = wsm_set_pm_indication(priv, &wsm_buf); break; case 0x080A: ret = wsm_channel_switch_indication(priv, &wsm_buf); break; case 0x080B: ret = wsm_find_complete_indication(priv, &wsm_buf); break; case 0x080C: ret = wsm_suspend_resume_indication(priv, link_id, &wsm_buf); break; case 0x080F: ret = wsm_join_complete_indication(priv, &wsm_buf); break; default: pr_warn("Unrecognised WSM ID %04x\n", id); } } else { WARN_ON(1); ret = -EINVAL; } out: return ret; } static bool wsm_handle_tx_data(struct cw1200_common *priv, struct wsm_tx *wsm, const struct ieee80211_tx_info *tx_info, const struct cw1200_txpriv *txpriv, struct cw1200_queue *queue) { bool handled = false; const struct ieee80211_hdr *frame = (struct ieee80211_hdr *)&((u8 *)wsm)[txpriv->offset]; __le16 fctl = frame->frame_control; enum { do_probe, do_drop, do_wep, do_tx, } action = do_tx; switch (priv->mode) { case NL80211_IFTYPE_STATION: if (priv->join_status == CW1200_JOIN_STATUS_MONITOR) action = do_tx; else if (priv->join_status < CW1200_JOIN_STATUS_PRE_STA) action = do_drop; break; case NL80211_IFTYPE_AP: if (!priv->join_status) { action = do_drop; } else if (!(BIT(txpriv->raw_link_id) & (BIT(0) | priv->link_id_map))) { wiphy_warn(priv->hw->wiphy, "A frame with expired link id is dropped.\n"); action = do_drop; } if (cw1200_queue_get_generation(wsm->packet_id) > CW1200_MAX_REQUEUE_ATTEMPTS) { /* HACK!!! WSM324 firmware has tendency to requeue * multicast frames in a loop, causing performance * drop and high power consumption of the driver. * In this situation it is better just to drop * the problematic frame. */ wiphy_warn(priv->hw->wiphy, "Too many attempts to requeue a frame; dropped.\n"); action = do_drop; } break; case NL80211_IFTYPE_ADHOC: if (priv->join_status != CW1200_JOIN_STATUS_IBSS) action = do_drop; break; case NL80211_IFTYPE_MESH_POINT: action = do_tx; /* TODO: Test me! */ break; case NL80211_IFTYPE_MONITOR: default: action = do_drop; break; } if (action == do_tx) { if (ieee80211_is_nullfunc(fctl)) { spin_lock(&priv->bss_loss_lock); if (priv->bss_loss_state) { priv->bss_loss_confirm_id = wsm->packet_id; wsm->queue_id = WSM_QUEUE_VOICE; } spin_unlock(&priv->bss_loss_lock); } else if (ieee80211_is_probe_req(fctl)) { action = do_probe; } else if (ieee80211_is_deauth(fctl) && priv->mode != NL80211_IFTYPE_AP) { pr_debug("[WSM] Issue unjoin command due to tx deauth.\n"); wsm_lock_tx_async(priv); if (queue_work(priv->workqueue, &priv->unjoin_work) <= 0) wsm_unlock_tx(priv); } else if (ieee80211_has_protected(fctl) && tx_info->control.hw_key && tx_info->control.hw_key->keyidx != priv->wep_default_key_id && (tx_info->control.hw_key->cipher == WLAN_CIPHER_SUITE_WEP40 || tx_info->control.hw_key->cipher == WLAN_CIPHER_SUITE_WEP104)) { action = do_wep; } } switch (action) { case do_probe: /* An interesting FW "feature". Device filters probe responses. * The easiest way to get it back is to convert * probe request into WSM start_scan command. */ pr_debug("[WSM] Convert probe request to scan.\n"); wsm_lock_tx_async(priv); priv->pending_frame_id = wsm->packet_id; if (queue_delayed_work(priv->workqueue, &priv->scan.probe_work, 0) <= 0) wsm_unlock_tx(priv); handled = true; break; case do_drop: pr_debug("[WSM] Drop frame (0x%.4X).\n", fctl); BUG_ON(cw1200_queue_remove(queue, wsm->packet_id)); handled = true; break; case do_wep: pr_debug("[WSM] Issue set_default_wep_key.\n"); wsm_lock_tx_async(priv); priv->wep_default_key_id = tx_info->control.hw_key->keyidx; priv->pending_frame_id = wsm->packet_id; if (queue_work(priv->workqueue, &priv->wep_key_work) <= 0) wsm_unlock_tx(priv); handled = true; break; case do_tx: pr_debug("[WSM] Transmit frame.\n"); break; default: /* Do nothing */ break; } return handled; } static int cw1200_get_prio_queue(struct cw1200_common *priv, u32 link_id_map, int *total) { static const int urgent = BIT(CW1200_LINK_ID_AFTER_DTIM) | BIT(CW1200_LINK_ID_UAPSD); struct wsm_edca_queue_params *edca; unsigned score, best = -1; int winner = -1; int queued; int i; /* search for a winner using edca params */ for (i = 0; i < 4; ++i) { queued = cw1200_queue_get_num_queued(&priv->tx_queue[i], link_id_map); if (!queued) continue; *total += queued; edca = &priv->edca.params[i]; score = ((edca->aifns + edca->cwmin) << 16) + ((edca->cwmax - edca->cwmin) * get_random_u16()); if (score < best && (winner < 0 || i != 3)) { best = score; winner = i; } } /* override winner if bursting */ if (winner >= 0 && priv->tx_burst_idx >= 0 && winner != priv->tx_burst_idx && !cw1200_queue_get_num_queued( &priv->tx_queue[winner], link_id_map & urgent) && cw1200_queue_get_num_queued( &priv->tx_queue[priv->tx_burst_idx], link_id_map)) winner = priv->tx_burst_idx; return winner; } static int wsm_get_tx_queue_and_mask(struct cw1200_common *priv, struct cw1200_queue **queue_p, u32 *tx_allowed_mask_p, bool *more) { int idx; u32 tx_allowed_mask; int total = 0; /* Search for a queue with multicast frames buffered */ if (priv->tx_multicast) { tx_allowed_mask = BIT(CW1200_LINK_ID_AFTER_DTIM); idx = cw1200_get_prio_queue(priv, tx_allowed_mask, &total); if (idx >= 0) { *more = total > 1; goto found; } } /* Search for unicast traffic */ tx_allowed_mask = ~priv->sta_asleep_mask; tx_allowed_mask |= BIT(CW1200_LINK_ID_UAPSD); if (priv->sta_asleep_mask) { tx_allowed_mask |= priv->pspoll_mask; tx_allowed_mask &= ~BIT(CW1200_LINK_ID_AFTER_DTIM); } else { tx_allowed_mask |= BIT(CW1200_LINK_ID_AFTER_DTIM); } idx = cw1200_get_prio_queue(priv, tx_allowed_mask, &total); if (idx < 0) return -ENOENT; found: *queue_p = &priv->tx_queue[idx]; *tx_allowed_mask_p = tx_allowed_mask; return 0; } int wsm_get_tx(struct cw1200_common *priv, u8 **data, size_t *tx_len, int *burst) { struct wsm_tx *wsm = NULL; struct ieee80211_tx_info *tx_info; struct cw1200_queue *queue = NULL; int queue_num; u32 tx_allowed_mask = 0; const struct cw1200_txpriv *txpriv = NULL; int count = 0; /* More is used only for broadcasts. */ bool more = false; if (priv->wsm_cmd.ptr) { /* CMD request */ ++count; spin_lock(&priv->wsm_cmd.lock); BUG_ON(!priv->wsm_cmd.ptr); *data = priv->wsm_cmd.ptr; *tx_len = priv->wsm_cmd.len; *burst = 1; spin_unlock(&priv->wsm_cmd.lock); } else { for (;;) { int ret; if (atomic_add_return(0, &priv->tx_lock)) break; spin_lock_bh(&priv->ps_state_lock); ret = wsm_get_tx_queue_and_mask(priv, &queue, &tx_allowed_mask, &more); queue_num = queue - priv->tx_queue; if (priv->buffered_multicasts && (ret || !more) && (priv->tx_multicast || !priv->sta_asleep_mask)) { priv->buffered_multicasts = false; if (priv->tx_multicast) { priv->tx_multicast = false; queue_work(priv->workqueue, &priv->multicast_stop_work); } } spin_unlock_bh(&priv->ps_state_lock); if (ret) break; if (cw1200_queue_get(queue, tx_allowed_mask, &wsm, &tx_info, &txpriv)) continue; if (wsm_handle_tx_data(priv, wsm, tx_info, txpriv, queue)) continue; /* Handled by WSM */ wsm->hdr.id &= __cpu_to_le16( ~WSM_TX_LINK_ID(WSM_TX_LINK_ID_MAX)); wsm->hdr.id |= cpu_to_le16( WSM_TX_LINK_ID(txpriv->raw_link_id)); priv->pspoll_mask &= ~BIT(txpriv->raw_link_id); *data = (u8 *)wsm; *tx_len = __le16_to_cpu(wsm->hdr.len); /* allow bursting if txop is set */ if (priv->edca.params[queue_num].txop_limit) *burst = min(*burst, (int)cw1200_queue_get_num_queued(queue, tx_allowed_mask) + 1); else *burst = 1; /* store index of bursting queue */ if (*burst > 1) priv->tx_burst_idx = queue_num; else priv->tx_burst_idx = -1; if (more) { struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) &((u8 *)wsm)[txpriv->offset]; /* more buffered multicast/broadcast frames * ==> set MoreData flag in IEEE 802.11 header * to inform PS STAs */ hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA); } pr_debug("[WSM] >>> 0x%.4X (%zu) %p %c\n", 0x0004, *tx_len, *data, wsm->more ? 'M' : ' '); ++count; break; } } return count; } void wsm_txed(struct cw1200_common *priv, u8 *data) { if (data == priv->wsm_cmd.ptr) { spin_lock(&priv->wsm_cmd.lock); priv->wsm_cmd.ptr = NULL; spin_unlock(&priv->wsm_cmd.lock); } } /* ******************************************************************** */ /* WSM buffer */ void wsm_buf_init(struct wsm_buf *buf) { BUG_ON(buf->begin); buf->begin = kmalloc(FWLOAD_BLOCK_SIZE, GFP_KERNEL | GFP_DMA); buf->end = buf->begin ? &buf->begin[FWLOAD_BLOCK_SIZE] : buf->begin; wsm_buf_reset(buf); } void wsm_buf_deinit(struct wsm_buf *buf) { kfree(buf->begin); buf->begin = buf->data = buf->end = NULL; } static void wsm_buf_reset(struct wsm_buf *buf) { if (buf->begin) { buf->data = &buf->begin[4]; *(u32 *)buf->begin = 0; } else { buf->data = buf->begin; } } static int wsm_buf_reserve(struct wsm_buf *buf, size_t extra_size) { size_t pos = buf->data - buf->begin; size_t size = pos + extra_size; u8 *tmp; size = round_up(size, FWLOAD_BLOCK_SIZE); tmp = krealloc(buf->begin, size, GFP_KERNEL | GFP_DMA); if (!tmp) { wsm_buf_deinit(buf); return -ENOMEM; } buf->begin = tmp; buf->data = &buf->begin[pos]; buf->end = &buf->begin[size]; return 0; }
linux-master
drivers/net/wireless/st/cw1200/wsm.c
// SPDX-License-Identifier: GPL-2.0-only /* * Low-level device IO routines for ST-Ericsson CW1200 drivers * * Copyright (c) 2010, ST-Ericsson * Author: Dmitry Tarnyagin <[email protected]> * * Based on: * ST-Ericsson UMAC CW1200 driver, which is * Copyright (c) 2010, ST-Ericsson * Author: Ajitpal Singh <[email protected]> */ #include <linux/types.h> #include "cw1200.h" #include "hwio.h" #include "hwbus.h" /* Sdio addr is 4*spi_addr */ #define SPI_REG_ADDR_TO_SDIO(spi_reg_addr) ((spi_reg_addr) << 2) #define SDIO_ADDR17BIT(buf_id, mpf, rfu, reg_id_ofs) \ ((((buf_id) & 0x1F) << 7) \ | (((mpf) & 1) << 6) \ | (((rfu) & 1) << 5) \ | (((reg_id_ofs) & 0x1F) << 0)) #define MAX_RETRY 3 static int __cw1200_reg_read(struct cw1200_common *priv, u16 addr, void *buf, size_t buf_len, int buf_id) { u16 addr_sdio; u32 sdio_reg_addr_17bit; /* Check if buffer is aligned to 4 byte boundary */ if (WARN_ON(((unsigned long)buf & 3) && (buf_len > 4))) { pr_err("buffer is not aligned.\n"); return -EINVAL; } /* Convert to SDIO Register Address */ addr_sdio = SPI_REG_ADDR_TO_SDIO(addr); sdio_reg_addr_17bit = SDIO_ADDR17BIT(buf_id, 0, 0, addr_sdio); return priv->hwbus_ops->hwbus_memcpy_fromio(priv->hwbus_priv, sdio_reg_addr_17bit, buf, buf_len); } static int __cw1200_reg_write(struct cw1200_common *priv, u16 addr, const void *buf, size_t buf_len, int buf_id) { u16 addr_sdio; u32 sdio_reg_addr_17bit; /* Convert to SDIO Register Address */ addr_sdio = SPI_REG_ADDR_TO_SDIO(addr); sdio_reg_addr_17bit = SDIO_ADDR17BIT(buf_id, 0, 0, addr_sdio); return priv->hwbus_ops->hwbus_memcpy_toio(priv->hwbus_priv, sdio_reg_addr_17bit, buf, buf_len); } static inline int __cw1200_reg_read_32(struct cw1200_common *priv, u16 addr, u32 *val) { __le32 tmp; int i = __cw1200_reg_read(priv, addr, &tmp, sizeof(tmp), 0); *val = le32_to_cpu(tmp); return i; } static inline int __cw1200_reg_write_32(struct cw1200_common *priv, u16 addr, u32 val) { __le32 tmp = cpu_to_le32(val); return __cw1200_reg_write(priv, addr, &tmp, sizeof(tmp), 0); } static inline int __cw1200_reg_read_16(struct cw1200_common *priv, u16 addr, u16 *val) { __le16 tmp; int i = __cw1200_reg_read(priv, addr, &tmp, sizeof(tmp), 0); *val = le16_to_cpu(tmp); return i; } static inline int __cw1200_reg_write_16(struct cw1200_common *priv, u16 addr, u16 val) { __le16 tmp = cpu_to_le16(val); return __cw1200_reg_write(priv, addr, &tmp, sizeof(tmp), 0); } int cw1200_reg_read(struct cw1200_common *priv, u16 addr, void *buf, size_t buf_len) { int ret; priv->hwbus_ops->lock(priv->hwbus_priv); ret = __cw1200_reg_read(priv, addr, buf, buf_len, 0); priv->hwbus_ops->unlock(priv->hwbus_priv); return ret; } int cw1200_reg_write(struct cw1200_common *priv, u16 addr, const void *buf, size_t buf_len) { int ret; priv->hwbus_ops->lock(priv->hwbus_priv); ret = __cw1200_reg_write(priv, addr, buf, buf_len, 0); priv->hwbus_ops->unlock(priv->hwbus_priv); return ret; } int cw1200_data_read(struct cw1200_common *priv, void *buf, size_t buf_len) { int ret, retry = 1; int buf_id_rx = priv->buf_id_rx; priv->hwbus_ops->lock(priv->hwbus_priv); while (retry <= MAX_RETRY) { ret = __cw1200_reg_read(priv, ST90TDS_IN_OUT_QUEUE_REG_ID, buf, buf_len, buf_id_rx + 1); if (!ret) { buf_id_rx = (buf_id_rx + 1) & 3; priv->buf_id_rx = buf_id_rx; break; } else { retry++; mdelay(1); pr_err("error :[%d]\n", ret); } } priv->hwbus_ops->unlock(priv->hwbus_priv); return ret; } int cw1200_data_write(struct cw1200_common *priv, const void *buf, size_t buf_len) { int ret, retry = 1; int buf_id_tx = priv->buf_id_tx; priv->hwbus_ops->lock(priv->hwbus_priv); while (retry <= MAX_RETRY) { ret = __cw1200_reg_write(priv, ST90TDS_IN_OUT_QUEUE_REG_ID, buf, buf_len, buf_id_tx); if (!ret) { buf_id_tx = (buf_id_tx + 1) & 31; priv->buf_id_tx = buf_id_tx; break; } else { retry++; mdelay(1); pr_err("error :[%d]\n", ret); } } priv->hwbus_ops->unlock(priv->hwbus_priv); return ret; } int cw1200_indirect_read(struct cw1200_common *priv, u32 addr, void *buf, size_t buf_len, u32 prefetch, u16 port_addr) { u32 val32 = 0; int i, ret; if ((buf_len / 2) >= 0x1000) { pr_err("Can't read more than 0xfff words.\n"); return -EINVAL; } priv->hwbus_ops->lock(priv->hwbus_priv); /* Write address */ ret = __cw1200_reg_write_32(priv, ST90TDS_SRAM_BASE_ADDR_REG_ID, addr); if (ret < 0) { pr_err("Can't write address register.\n"); goto out; } /* Read CONFIG Register Value - We will read 32 bits */ ret = __cw1200_reg_read_32(priv, ST90TDS_CONFIG_REG_ID, &val32); if (ret < 0) { pr_err("Can't read config register.\n"); goto out; } /* Set PREFETCH bit */ ret = __cw1200_reg_write_32(priv, ST90TDS_CONFIG_REG_ID, val32 | prefetch); if (ret < 0) { pr_err("Can't write prefetch bit.\n"); goto out; } /* Check for PRE-FETCH bit to be cleared */ for (i = 0; i < 20; i++) { ret = __cw1200_reg_read_32(priv, ST90TDS_CONFIG_REG_ID, &val32); if (ret < 0) { pr_err("Can't check prefetch bit.\n"); goto out; } if (!(val32 & prefetch)) break; mdelay(i); } if (val32 & prefetch) { pr_err("Prefetch bit is not cleared.\n"); goto out; } /* Read data port */ ret = __cw1200_reg_read(priv, port_addr, buf, buf_len, 0); if (ret < 0) { pr_err("Can't read data port.\n"); goto out; } out: priv->hwbus_ops->unlock(priv->hwbus_priv); return ret; } int cw1200_apb_write(struct cw1200_common *priv, u32 addr, const void *buf, size_t buf_len) { int ret; if ((buf_len / 2) >= 0x1000) { pr_err("Can't write more than 0xfff words.\n"); return -EINVAL; } priv->hwbus_ops->lock(priv->hwbus_priv); /* Write address */ ret = __cw1200_reg_write_32(priv, ST90TDS_SRAM_BASE_ADDR_REG_ID, addr); if (ret < 0) { pr_err("Can't write address register.\n"); goto out; } /* Write data port */ ret = __cw1200_reg_write(priv, ST90TDS_SRAM_DPORT_REG_ID, buf, buf_len, 0); if (ret < 0) { pr_err("Can't write data port.\n"); goto out; } out: priv->hwbus_ops->unlock(priv->hwbus_priv); return ret; } int __cw1200_irq_enable(struct cw1200_common *priv, int enable) { u32 val32; u16 val16; int ret; if (HIF_8601_SILICON == priv->hw_type) { ret = __cw1200_reg_read_32(priv, ST90TDS_CONFIG_REG_ID, &val32); if (ret < 0) { pr_err("Can't read config register.\n"); return ret; } if (enable) val32 |= ST90TDS_CONF_IRQ_RDY_ENABLE; else val32 &= ~ST90TDS_CONF_IRQ_RDY_ENABLE; ret = __cw1200_reg_write_32(priv, ST90TDS_CONFIG_REG_ID, val32); if (ret < 0) { pr_err("Can't write config register.\n"); return ret; } } else { ret = __cw1200_reg_read_16(priv, ST90TDS_CONFIG_REG_ID, &val16); if (ret < 0) { pr_err("Can't read control register.\n"); return ret; } if (enable) val16 |= ST90TDS_CONT_IRQ_RDY_ENABLE; else val16 &= ~ST90TDS_CONT_IRQ_RDY_ENABLE; ret = __cw1200_reg_write_16(priv, ST90TDS_CONFIG_REG_ID, val16); if (ret < 0) { pr_err("Can't write control register.\n"); return ret; } } return 0; }
linux-master
drivers/net/wireless/st/cw1200/hwio.c
// SPDX-License-Identifier: GPL-2.0-only /* * Mac80211 SDIO driver for ST-Ericsson CW1200 device * * Copyright (c) 2010, ST-Ericsson * Author: Dmitry Tarnyagin <[email protected]> */ #include <linux/module.h> #include <linux/interrupt.h> #include <linux/gpio.h> #include <linux/delay.h> #include <linux/mmc/host.h> #include <linux/mmc/sdio_func.h> #include <linux/mmc/card.h> #include <linux/mmc/sdio.h> #include <linux/mmc/sdio_ids.h> #include <net/mac80211.h> #include "cw1200.h" #include "hwbus.h" #include <linux/platform_data/net-cw1200.h> #include "hwio.h" MODULE_AUTHOR("Dmitry Tarnyagin <[email protected]>"); MODULE_DESCRIPTION("mac80211 ST-Ericsson CW1200 SDIO driver"); MODULE_LICENSE("GPL"); #define SDIO_BLOCK_SIZE (512) /* Default platform data for Sagrad modules */ static struct cw1200_platform_data_sdio sagrad_109x_evk_platform_data = { .ref_clk = 38400, .have_5ghz = false, .sdd_file = "sdd_sagrad_1091_1098.bin", }; /* Allow platform data to be overridden */ static struct cw1200_platform_data_sdio *global_plat_data = &sagrad_109x_evk_platform_data; void __init cw1200_sdio_set_platform_data(struct cw1200_platform_data_sdio *pdata) { global_plat_data = pdata; } struct hwbus_priv { struct sdio_func *func; struct cw1200_common *core; const struct cw1200_platform_data_sdio *pdata; }; static const struct sdio_device_id cw1200_sdio_ids[] = { { SDIO_DEVICE(SDIO_VENDOR_ID_STE, SDIO_DEVICE_ID_STE_CW1200) }, { /* end: all zeroes */ }, }; MODULE_DEVICE_TABLE(sdio, cw1200_sdio_ids); /* hwbus_ops implemetation */ static int cw1200_sdio_memcpy_fromio(struct hwbus_priv *self, unsigned int addr, void *dst, int count) { return sdio_memcpy_fromio(self->func, dst, addr, count); } static int cw1200_sdio_memcpy_toio(struct hwbus_priv *self, unsigned int addr, const void *src, int count) { return sdio_memcpy_toio(self->func, addr, (void *)src, count); } static void cw1200_sdio_lock(struct hwbus_priv *self) { sdio_claim_host(self->func); } static void cw1200_sdio_unlock(struct hwbus_priv *self) { sdio_release_host(self->func); } static void cw1200_sdio_irq_handler(struct sdio_func *func) { struct hwbus_priv *self = sdio_get_drvdata(func); /* note: sdio_host already claimed here. */ if (self->core) cw1200_irq_handler(self->core); } static irqreturn_t cw1200_gpio_hardirq(int irq, void *dev_id) { return IRQ_WAKE_THREAD; } static irqreturn_t cw1200_gpio_irq(int irq, void *dev_id) { struct hwbus_priv *self = dev_id; if (self->core) { cw1200_sdio_lock(self); cw1200_irq_handler(self->core); cw1200_sdio_unlock(self); return IRQ_HANDLED; } else { return IRQ_NONE; } } static int cw1200_request_irq(struct hwbus_priv *self) { int ret; u8 cccr; cccr = sdio_f0_readb(self->func, SDIO_CCCR_IENx, &ret); if (WARN_ON(ret)) goto err; /* Master interrupt enable ... */ cccr |= BIT(0); /* ... for our function */ cccr |= BIT(self->func->num); sdio_f0_writeb(self->func, cccr, SDIO_CCCR_IENx, &ret); if (WARN_ON(ret)) goto err; ret = enable_irq_wake(self->pdata->irq); if (WARN_ON(ret)) goto err; /* Request the IRQ */ ret = request_threaded_irq(self->pdata->irq, cw1200_gpio_hardirq, cw1200_gpio_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT, "cw1200_wlan_irq", self); if (WARN_ON(ret)) goto err; return 0; err: return ret; } static int cw1200_sdio_irq_subscribe(struct hwbus_priv *self) { int ret = 0; pr_debug("SW IRQ subscribe\n"); sdio_claim_host(self->func); if (self->pdata->irq) ret = cw1200_request_irq(self); else ret = sdio_claim_irq(self->func, cw1200_sdio_irq_handler); sdio_release_host(self->func); return ret; } static int cw1200_sdio_irq_unsubscribe(struct hwbus_priv *self) { int ret = 0; pr_debug("SW IRQ unsubscribe\n"); if (self->pdata->irq) { disable_irq_wake(self->pdata->irq); free_irq(self->pdata->irq, self); } else { sdio_claim_host(self->func); ret = sdio_release_irq(self->func); sdio_release_host(self->func); } return ret; } static int cw1200_sdio_off(const struct cw1200_platform_data_sdio *pdata) { if (pdata->reset) { gpio_set_value(pdata->reset, 0); msleep(30); /* Min is 2 * CLK32K cycles */ gpio_free(pdata->reset); } if (pdata->power_ctrl) pdata->power_ctrl(pdata, false); if (pdata->clk_ctrl) pdata->clk_ctrl(pdata, false); return 0; } static int cw1200_sdio_on(const struct cw1200_platform_data_sdio *pdata) { /* Ensure I/Os are pulled low */ if (pdata->reset) { gpio_request(pdata->reset, "cw1200_wlan_reset"); gpio_direction_output(pdata->reset, 0); } if (pdata->powerup) { gpio_request(pdata->powerup, "cw1200_wlan_powerup"); gpio_direction_output(pdata->powerup, 0); } if (pdata->reset || pdata->powerup) msleep(10); /* Settle time? */ /* Enable 3v3 and 1v8 to hardware */ if (pdata->power_ctrl) { if (pdata->power_ctrl(pdata, true)) { pr_err("power_ctrl() failed!\n"); return -1; } } /* Enable CLK32K */ if (pdata->clk_ctrl) { if (pdata->clk_ctrl(pdata, true)) { pr_err("clk_ctrl() failed!\n"); return -1; } msleep(10); /* Delay until clock is stable for 2 cycles */ } /* Enable POWERUP signal */ if (pdata->powerup) { gpio_set_value(pdata->powerup, 1); msleep(250); /* or more..? */ } /* Enable RSTn signal */ if (pdata->reset) { gpio_set_value(pdata->reset, 1); msleep(50); /* Or more..? */ } return 0; } static size_t cw1200_sdio_align_size(struct hwbus_priv *self, size_t size) { if (self->pdata->no_nptb) size = round_up(size, SDIO_BLOCK_SIZE); else size = sdio_align_size(self->func, size); return size; } static int cw1200_sdio_pm(struct hwbus_priv *self, bool suspend) { int ret = 0; if (self->pdata->irq) ret = irq_set_irq_wake(self->pdata->irq, suspend); return ret; } static const struct hwbus_ops cw1200_sdio_hwbus_ops = { .hwbus_memcpy_fromio = cw1200_sdio_memcpy_fromio, .hwbus_memcpy_toio = cw1200_sdio_memcpy_toio, .lock = cw1200_sdio_lock, .unlock = cw1200_sdio_unlock, .align_size = cw1200_sdio_align_size, .power_mgmt = cw1200_sdio_pm, }; /* Probe Function to be called by SDIO stack when device is discovered */ static int cw1200_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id) { struct hwbus_priv *self; int status; pr_info("cw1200_wlan_sdio: Probe called\n"); /* We are only able to handle the wlan function */ if (func->num != 0x01) return -ENODEV; self = kzalloc(sizeof(*self), GFP_KERNEL); if (!self) { pr_err("Can't allocate SDIO hwbus_priv.\n"); return -ENOMEM; } func->card->quirks |= MMC_QUIRK_LENIENT_FN0; self->pdata = global_plat_data; /* FIXME */ self->func = func; sdio_set_drvdata(func, self); sdio_claim_host(func); sdio_enable_func(func); sdio_release_host(func); status = cw1200_sdio_irq_subscribe(self); status = cw1200_core_probe(&cw1200_sdio_hwbus_ops, self, &func->dev, &self->core, self->pdata->ref_clk, self->pdata->macaddr, self->pdata->sdd_file, self->pdata->have_5ghz); if (status) { cw1200_sdio_irq_unsubscribe(self); sdio_claim_host(func); sdio_disable_func(func); sdio_release_host(func); sdio_set_drvdata(func, NULL); kfree(self); } return status; } /* Disconnect Function to be called by SDIO stack when * device is disconnected */ static void cw1200_sdio_disconnect(struct sdio_func *func) { struct hwbus_priv *self = sdio_get_drvdata(func); if (self) { cw1200_sdio_irq_unsubscribe(self); if (self->core) { cw1200_core_release(self->core); self->core = NULL; } sdio_claim_host(func); sdio_disable_func(func); sdio_release_host(func); sdio_set_drvdata(func, NULL); kfree(self); } } #ifdef CONFIG_PM static int cw1200_sdio_suspend(struct device *dev) { int ret; struct sdio_func *func = dev_to_sdio_func(dev); struct hwbus_priv *self = sdio_get_drvdata(func); if (!cw1200_can_suspend(self->core)) return -EAGAIN; /* Notify SDIO that CW1200 will remain powered during suspend */ ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); if (ret) pr_err("Error setting SDIO pm flags: %i\n", ret); return ret; } static int cw1200_sdio_resume(struct device *dev) { return 0; } static const struct dev_pm_ops cw1200_pm_ops = { .suspend = cw1200_sdio_suspend, .resume = cw1200_sdio_resume, }; #endif static struct sdio_driver sdio_driver = { .name = "cw1200_wlan_sdio", .id_table = cw1200_sdio_ids, .probe = cw1200_sdio_probe, .remove = cw1200_sdio_disconnect, #ifdef CONFIG_PM .drv = { .pm = &cw1200_pm_ops, } #endif }; /* Init Module function -> Called by insmod */ static int __init cw1200_sdio_init(void) { const struct cw1200_platform_data_sdio *pdata; int ret; /* FIXME -- this won't support multiple devices */ pdata = global_plat_data; if (cw1200_sdio_on(pdata)) { ret = -1; goto err; } ret = sdio_register_driver(&sdio_driver); if (ret) goto err; return 0; err: cw1200_sdio_off(pdata); return ret; } /* Called at Driver Unloading */ static void __exit cw1200_sdio_exit(void) { const struct cw1200_platform_data_sdio *pdata; /* FIXME -- this won't support multiple devices */ pdata = global_plat_data; sdio_unregister_driver(&sdio_driver); cw1200_sdio_off(pdata); } module_init(cw1200_sdio_init); module_exit(cw1200_sdio_exit);
linux-master
drivers/net/wireless/st/cw1200/cw1200_sdio.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2012 - 2018 Microchip Technology Inc., and its subsidiaries. * All rights reserved. */ #include <linux/clk.h> #include <linux/mmc/sdio_func.h> #include <linux/mmc/sdio_ids.h> #include <linux/mmc/host.h> #include <linux/mmc/sdio.h> #include <linux/of_irq.h> #include "netdev.h" #include "cfg80211.h" #define SDIO_MODALIAS "wilc1000_sdio" static const struct sdio_device_id wilc_sdio_ids[] = { { SDIO_DEVICE(SDIO_VENDOR_ID_MICROCHIP_WILC, SDIO_DEVICE_ID_MICROCHIP_WILC1000) }, { }, }; MODULE_DEVICE_TABLE(sdio, wilc_sdio_ids); #define WILC_SDIO_BLOCK_SIZE 512 struct wilc_sdio { bool irq_gpio; u32 block_size; bool isinit; u8 *cmd53_buf; }; struct sdio_cmd52 { u32 read_write: 1; u32 function: 3; u32 raw: 1; u32 address: 17; u32 data: 8; }; struct sdio_cmd53 { u32 read_write: 1; u32 function: 3; u32 block_mode: 1; u32 increment: 1; u32 address: 17; u32 count: 9; u8 *buffer; u32 block_size; bool use_global_buf; }; static const struct wilc_hif_func wilc_hif_sdio; static void wilc_sdio_interrupt(struct sdio_func *func) { sdio_release_host(func); wilc_handle_isr(sdio_get_drvdata(func)); sdio_claim_host(func); } static int wilc_sdio_cmd52(struct wilc *wilc, struct sdio_cmd52 *cmd) { struct sdio_func *func = container_of(wilc->dev, struct sdio_func, dev); int ret; u8 data; sdio_claim_host(func); func->num = cmd->function; if (cmd->read_write) { /* write */ if (cmd->raw) { sdio_writeb(func, cmd->data, cmd->address, &ret); data = sdio_readb(func, cmd->address, &ret); cmd->data = data; } else { sdio_writeb(func, cmd->data, cmd->address, &ret); } } else { /* read */ data = sdio_readb(func, cmd->address, &ret); cmd->data = data; } sdio_release_host(func); if (ret) dev_err(&func->dev, "%s..failed, err(%d)\n", __func__, ret); return ret; } static int wilc_sdio_cmd53(struct wilc *wilc, struct sdio_cmd53 *cmd) { struct sdio_func *func = container_of(wilc->dev, struct sdio_func, dev); int size, ret; struct wilc_sdio *sdio_priv = wilc->bus_data; u8 *buf = cmd->buffer; sdio_claim_host(func); func->num = cmd->function; func->cur_blksize = cmd->block_size; if (cmd->block_mode) size = cmd->count * cmd->block_size; else size = cmd->count; if (cmd->use_global_buf) { if (size > sizeof(u32)) return -EINVAL; buf = sdio_priv->cmd53_buf; } if (cmd->read_write) { /* write */ if (cmd->use_global_buf) memcpy(buf, cmd->buffer, size); ret = sdio_memcpy_toio(func, cmd->address, buf, size); } else { /* read */ ret = sdio_memcpy_fromio(func, buf, cmd->address, size); if (cmd->use_global_buf) memcpy(cmd->buffer, buf, size); } sdio_release_host(func); if (ret) dev_err(&func->dev, "%s..failed, err(%d)\n", __func__, ret); return ret; } static int wilc_sdio_probe(struct sdio_func *func, const struct sdio_device_id *id) { struct wilc *wilc; int ret; struct wilc_sdio *sdio_priv; sdio_priv = kzalloc(sizeof(*sdio_priv), GFP_KERNEL); if (!sdio_priv) return -ENOMEM; sdio_priv->cmd53_buf = kzalloc(sizeof(u32), GFP_KERNEL); if (!sdio_priv->cmd53_buf) { ret = -ENOMEM; goto free; } ret = wilc_cfg80211_init(&wilc, &func->dev, WILC_HIF_SDIO, &wilc_hif_sdio); if (ret) goto free; if (IS_ENABLED(CONFIG_WILC1000_HW_OOB_INTR)) { struct device_node *np = func->card->dev.of_node; int irq_num = of_irq_get(np, 0); if (irq_num > 0) { wilc->dev_irq_num = irq_num; sdio_priv->irq_gpio = true; } } sdio_set_drvdata(func, wilc); wilc->bus_data = sdio_priv; wilc->dev = &func->dev; wilc->rtc_clk = devm_clk_get_optional(&func->card->dev, "rtc"); if (IS_ERR(wilc->rtc_clk)) { ret = PTR_ERR(wilc->rtc_clk); goto dispose_irq; } clk_prepare_enable(wilc->rtc_clk); dev_info(&func->dev, "Driver Initializing success\n"); return 0; dispose_irq: irq_dispose_mapping(wilc->dev_irq_num); wilc_netdev_cleanup(wilc); free: kfree(sdio_priv->cmd53_buf); kfree(sdio_priv); return ret; } static void wilc_sdio_remove(struct sdio_func *func) { struct wilc *wilc = sdio_get_drvdata(func); struct wilc_sdio *sdio_priv = wilc->bus_data; clk_disable_unprepare(wilc->rtc_clk); wilc_netdev_cleanup(wilc); kfree(sdio_priv->cmd53_buf); kfree(sdio_priv); } static int wilc_sdio_reset(struct wilc *wilc) { struct sdio_cmd52 cmd; int ret; struct sdio_func *func = dev_to_sdio_func(wilc->dev); cmd.read_write = 1; cmd.function = 0; cmd.raw = 0; cmd.address = SDIO_CCCR_ABORT; cmd.data = WILC_SDIO_CCCR_ABORT_RESET; ret = wilc_sdio_cmd52(wilc, &cmd); if (ret) { dev_err(&func->dev, "Fail cmd 52, reset cmd ...\n"); return ret; } return 0; } static bool wilc_sdio_is_init(struct wilc *wilc) { struct wilc_sdio *sdio_priv = wilc->bus_data; return sdio_priv->isinit; } static int wilc_sdio_suspend(struct device *dev) { struct sdio_func *func = dev_to_sdio_func(dev); struct wilc *wilc = sdio_get_drvdata(func); int ret; dev_info(dev, "sdio suspend\n"); chip_wakeup(wilc); if (!IS_ERR(wilc->rtc_clk)) clk_disable_unprepare(wilc->rtc_clk); if (wilc->suspend_event) { host_sleep_notify(wilc); chip_allow_sleep(wilc); } ret = wilc_sdio_reset(wilc); if (ret) { dev_err(&func->dev, "Fail reset sdio\n"); return ret; } sdio_claim_host(func); return 0; } static int wilc_sdio_enable_interrupt(struct wilc *dev) { struct sdio_func *func = container_of(dev->dev, struct sdio_func, dev); int ret = 0; sdio_claim_host(func); ret = sdio_claim_irq(func, wilc_sdio_interrupt); sdio_release_host(func); if (ret < 0) { dev_err(&func->dev, "can't claim sdio_irq, err(%d)\n", ret); ret = -EIO; } return ret; } static void wilc_sdio_disable_interrupt(struct wilc *dev) { struct sdio_func *func = container_of(dev->dev, struct sdio_func, dev); int ret; sdio_claim_host(func); ret = sdio_release_irq(func); if (ret < 0) dev_err(&func->dev, "can't release sdio_irq, err(%d)\n", ret); sdio_release_host(func); } /******************************************** * * Function 0 * ********************************************/ static int wilc_sdio_set_func0_csa_address(struct wilc *wilc, u32 adr) { struct sdio_func *func = dev_to_sdio_func(wilc->dev); struct sdio_cmd52 cmd; int ret; /** * Review: BIG ENDIAN **/ cmd.read_write = 1; cmd.function = 0; cmd.raw = 0; cmd.address = WILC_SDIO_FBR_CSA_REG; cmd.data = (u8)adr; ret = wilc_sdio_cmd52(wilc, &cmd); if (ret) { dev_err(&func->dev, "Failed cmd52, set %04x data...\n", cmd.address); return ret; } cmd.address = WILC_SDIO_FBR_CSA_REG + 1; cmd.data = (u8)(adr >> 8); ret = wilc_sdio_cmd52(wilc, &cmd); if (ret) { dev_err(&func->dev, "Failed cmd52, set %04x data...\n", cmd.address); return ret; } cmd.address = WILC_SDIO_FBR_CSA_REG + 2; cmd.data = (u8)(adr >> 16); ret = wilc_sdio_cmd52(wilc, &cmd); if (ret) { dev_err(&func->dev, "Failed cmd52, set %04x data...\n", cmd.address); return ret; } return 0; } static int wilc_sdio_set_block_size(struct wilc *wilc, u8 func_num, u32 block_size) { struct sdio_func *func = dev_to_sdio_func(wilc->dev); struct sdio_cmd52 cmd; int ret; cmd.read_write = 1; cmd.function = 0; cmd.raw = 0; cmd.address = SDIO_FBR_BASE(func_num) + SDIO_CCCR_BLKSIZE; cmd.data = (u8)block_size; ret = wilc_sdio_cmd52(wilc, &cmd); if (ret) { dev_err(&func->dev, "Failed cmd52, set %04x data...\n", cmd.address); return ret; } cmd.address = SDIO_FBR_BASE(func_num) + SDIO_CCCR_BLKSIZE + 1; cmd.data = (u8)(block_size >> 8); ret = wilc_sdio_cmd52(wilc, &cmd); if (ret) { dev_err(&func->dev, "Failed cmd52, set %04x data...\n", cmd.address); return ret; } return 0; } /******************************************** * * Sdio interfaces * ********************************************/ static int wilc_sdio_write_reg(struct wilc *wilc, u32 addr, u32 data) { struct sdio_func *func = dev_to_sdio_func(wilc->dev); struct wilc_sdio *sdio_priv = wilc->bus_data; int ret; cpu_to_le32s(&data); if (addr >= 0xf0 && addr <= 0xff) { /* only vendor specific registers */ struct sdio_cmd52 cmd; cmd.read_write = 1; cmd.function = 0; cmd.raw = 0; cmd.address = addr; cmd.data = data; ret = wilc_sdio_cmd52(wilc, &cmd); if (ret) dev_err(&func->dev, "Failed cmd 52, read reg (%08x) ...\n", addr); } else { struct sdio_cmd53 cmd; /** * set the AHB address **/ ret = wilc_sdio_set_func0_csa_address(wilc, addr); if (ret) return ret; cmd.read_write = 1; cmd.function = 0; cmd.address = WILC_SDIO_FBR_DATA_REG; cmd.block_mode = 0; cmd.increment = 1; cmd.count = sizeof(u32); cmd.buffer = (u8 *)&data; cmd.use_global_buf = true; cmd.block_size = sdio_priv->block_size; ret = wilc_sdio_cmd53(wilc, &cmd); if (ret) dev_err(&func->dev, "Failed cmd53, write reg (%08x)...\n", addr); } return ret; } static int wilc_sdio_write(struct wilc *wilc, u32 addr, u8 *buf, u32 size) { struct sdio_func *func = dev_to_sdio_func(wilc->dev); struct wilc_sdio *sdio_priv = wilc->bus_data; u32 block_size = sdio_priv->block_size; struct sdio_cmd53 cmd; int nblk, nleft, ret; cmd.read_write = 1; if (addr > 0) { /** * func 0 access **/ cmd.function = 0; cmd.address = WILC_SDIO_FBR_DATA_REG; } else { /** * func 1 access **/ cmd.function = 1; cmd.address = WILC_SDIO_F1_DATA_REG; } size = ALIGN(size, 4); nblk = size / block_size; nleft = size % block_size; cmd.use_global_buf = false; if (nblk > 0) { cmd.block_mode = 1; cmd.increment = 1; cmd.count = nblk; cmd.buffer = buf; cmd.block_size = block_size; if (addr > 0) { ret = wilc_sdio_set_func0_csa_address(wilc, addr); if (ret) return ret; } ret = wilc_sdio_cmd53(wilc, &cmd); if (ret) { dev_err(&func->dev, "Failed cmd53 [%x], block send...\n", addr); return ret; } if (addr > 0) addr += nblk * block_size; buf += nblk * block_size; } if (nleft > 0) { cmd.block_mode = 0; cmd.increment = 1; cmd.count = nleft; cmd.buffer = buf; cmd.block_size = block_size; if (addr > 0) { ret = wilc_sdio_set_func0_csa_address(wilc, addr); if (ret) return ret; } ret = wilc_sdio_cmd53(wilc, &cmd); if (ret) { dev_err(&func->dev, "Failed cmd53 [%x], bytes send...\n", addr); return ret; } } return 0; } static int wilc_sdio_read_reg(struct wilc *wilc, u32 addr, u32 *data) { struct sdio_func *func = dev_to_sdio_func(wilc->dev); struct wilc_sdio *sdio_priv = wilc->bus_data; int ret; if (addr >= 0xf0 && addr <= 0xff) { /* only vendor specific registers */ struct sdio_cmd52 cmd; cmd.read_write = 0; cmd.function = 0; cmd.raw = 0; cmd.address = addr; ret = wilc_sdio_cmd52(wilc, &cmd); if (ret) { dev_err(&func->dev, "Failed cmd 52, read reg (%08x) ...\n", addr); return ret; } *data = cmd.data; } else { struct sdio_cmd53 cmd; ret = wilc_sdio_set_func0_csa_address(wilc, addr); if (ret) return ret; cmd.read_write = 0; cmd.function = 0; cmd.address = WILC_SDIO_FBR_DATA_REG; cmd.block_mode = 0; cmd.increment = 1; cmd.count = sizeof(u32); cmd.buffer = (u8 *)data; cmd.use_global_buf = true; cmd.block_size = sdio_priv->block_size; ret = wilc_sdio_cmd53(wilc, &cmd); if (ret) { dev_err(&func->dev, "Failed cmd53, read reg (%08x)...\n", addr); return ret; } } le32_to_cpus(data); return 0; } static int wilc_sdio_read(struct wilc *wilc, u32 addr, u8 *buf, u32 size) { struct sdio_func *func = dev_to_sdio_func(wilc->dev); struct wilc_sdio *sdio_priv = wilc->bus_data; u32 block_size = sdio_priv->block_size; struct sdio_cmd53 cmd; int nblk, nleft, ret; cmd.read_write = 0; if (addr > 0) { /** * func 0 access **/ cmd.function = 0; cmd.address = WILC_SDIO_FBR_DATA_REG; } else { /** * func 1 access **/ cmd.function = 1; cmd.address = WILC_SDIO_F1_DATA_REG; } size = ALIGN(size, 4); nblk = size / block_size; nleft = size % block_size; cmd.use_global_buf = false; if (nblk > 0) { cmd.block_mode = 1; cmd.increment = 1; cmd.count = nblk; cmd.buffer = buf; cmd.block_size = block_size; if (addr > 0) { ret = wilc_sdio_set_func0_csa_address(wilc, addr); if (ret) return ret; } ret = wilc_sdio_cmd53(wilc, &cmd); if (ret) { dev_err(&func->dev, "Failed cmd53 [%x], block read...\n", addr); return ret; } if (addr > 0) addr += nblk * block_size; buf += nblk * block_size; } /* if (nblk > 0) */ if (nleft > 0) { cmd.block_mode = 0; cmd.increment = 1; cmd.count = nleft; cmd.buffer = buf; cmd.block_size = block_size; if (addr > 0) { ret = wilc_sdio_set_func0_csa_address(wilc, addr); if (ret) return ret; } ret = wilc_sdio_cmd53(wilc, &cmd); if (ret) { dev_err(&func->dev, "Failed cmd53 [%x], bytes read...\n", addr); return ret; } } return 0; } /******************************************** * * Bus interfaces * ********************************************/ static int wilc_sdio_deinit(struct wilc *wilc) { struct wilc_sdio *sdio_priv = wilc->bus_data; sdio_priv->isinit = false; return 0; } static int wilc_sdio_init(struct wilc *wilc, bool resume) { struct sdio_func *func = dev_to_sdio_func(wilc->dev); struct wilc_sdio *sdio_priv = wilc->bus_data; struct sdio_cmd52 cmd; int loop, ret; u32 chipid; /** * function 0 csa enable **/ cmd.read_write = 1; cmd.function = 0; cmd.raw = 1; cmd.address = SDIO_FBR_BASE(1); cmd.data = SDIO_FBR_ENABLE_CSA; ret = wilc_sdio_cmd52(wilc, &cmd); if (ret) { dev_err(&func->dev, "Fail cmd 52, enable csa...\n"); return ret; } /** * function 0 block size **/ ret = wilc_sdio_set_block_size(wilc, 0, WILC_SDIO_BLOCK_SIZE); if (ret) { dev_err(&func->dev, "Fail cmd 52, set func 0 block size...\n"); return ret; } sdio_priv->block_size = WILC_SDIO_BLOCK_SIZE; /** * enable func1 IO **/ cmd.read_write = 1; cmd.function = 0; cmd.raw = 1; cmd.address = SDIO_CCCR_IOEx; cmd.data = WILC_SDIO_CCCR_IO_EN_FUNC1; ret = wilc_sdio_cmd52(wilc, &cmd); if (ret) { dev_err(&func->dev, "Fail cmd 52, set IOE register...\n"); return ret; } /** * make sure func 1 is up **/ cmd.read_write = 0; cmd.function = 0; cmd.raw = 0; cmd.address = SDIO_CCCR_IORx; loop = 3; do { cmd.data = 0; ret = wilc_sdio_cmd52(wilc, &cmd); if (ret) { dev_err(&func->dev, "Fail cmd 52, get IOR register...\n"); return ret; } if (cmd.data == WILC_SDIO_CCCR_IO_EN_FUNC1) break; } while (loop--); if (loop <= 0) { dev_err(&func->dev, "Fail func 1 is not ready...\n"); return -EINVAL; } /** * func 1 is ready, set func 1 block size **/ ret = wilc_sdio_set_block_size(wilc, 1, WILC_SDIO_BLOCK_SIZE); if (ret) { dev_err(&func->dev, "Fail set func 1 block size...\n"); return ret; } /** * func 1 interrupt enable **/ cmd.read_write = 1; cmd.function = 0; cmd.raw = 1; cmd.address = SDIO_CCCR_IENx; cmd.data = WILC_SDIO_CCCR_IEN_MASTER | WILC_SDIO_CCCR_IEN_FUNC1; ret = wilc_sdio_cmd52(wilc, &cmd); if (ret) { dev_err(&func->dev, "Fail cmd 52, set IEN register...\n"); return ret; } /** * make sure can read back chip id correctly **/ if (!resume) { ret = wilc_sdio_read_reg(wilc, WILC_CHIPID, &chipid); if (ret) { dev_err(&func->dev, "Fail cmd read chip id...\n"); return ret; } dev_err(&func->dev, "chipid (%08x)\n", chipid); } sdio_priv->isinit = true; return 0; } static int wilc_sdio_read_size(struct wilc *wilc, u32 *size) { u32 tmp; struct sdio_cmd52 cmd; /** * Read DMA count in words **/ cmd.read_write = 0; cmd.function = 0; cmd.raw = 0; cmd.address = WILC_SDIO_INTERRUPT_DATA_SZ_REG; cmd.data = 0; wilc_sdio_cmd52(wilc, &cmd); tmp = cmd.data; cmd.address = WILC_SDIO_INTERRUPT_DATA_SZ_REG + 1; cmd.data = 0; wilc_sdio_cmd52(wilc, &cmd); tmp |= (cmd.data << 8); *size = tmp; return 0; } static int wilc_sdio_read_int(struct wilc *wilc, u32 *int_status) { struct sdio_func *func = dev_to_sdio_func(wilc->dev); struct wilc_sdio *sdio_priv = wilc->bus_data; u32 tmp; u8 irq_flags; struct sdio_cmd52 cmd; wilc_sdio_read_size(wilc, &tmp); /** * Read IRQ flags **/ if (!sdio_priv->irq_gpio) { cmd.function = 1; cmd.address = WILC_SDIO_EXT_IRQ_FLAG_REG; } else { cmd.function = 0; cmd.address = WILC_SDIO_IRQ_FLAG_REG; } cmd.raw = 0; cmd.read_write = 0; cmd.data = 0; wilc_sdio_cmd52(wilc, &cmd); irq_flags = cmd.data; tmp |= FIELD_PREP(IRG_FLAGS_MASK, cmd.data); if (FIELD_GET(UNHANDLED_IRQ_MASK, irq_flags)) dev_err(&func->dev, "Unexpected interrupt (1) int=%lx\n", FIELD_GET(UNHANDLED_IRQ_MASK, irq_flags)); *int_status = tmp; return 0; } static int wilc_sdio_clear_int_ext(struct wilc *wilc, u32 val) { struct sdio_func *func = dev_to_sdio_func(wilc->dev); struct wilc_sdio *sdio_priv = wilc->bus_data; int ret; u32 reg = 0; if (sdio_priv->irq_gpio) reg = val & (BIT(MAX_NUM_INT) - 1); /* select VMM table 0 */ if (val & SEL_VMM_TBL0) reg |= BIT(5); /* select VMM table 1 */ if (val & SEL_VMM_TBL1) reg |= BIT(6); /* enable VMM */ if (val & EN_VMM) reg |= BIT(7); if (reg) { struct sdio_cmd52 cmd; cmd.read_write = 1; cmd.function = 0; cmd.raw = 0; cmd.address = WILC_SDIO_IRQ_CLEAR_FLAG_REG; cmd.data = reg; ret = wilc_sdio_cmd52(wilc, &cmd); if (ret) { dev_err(&func->dev, "Failed cmd52, set (%02x) data (%d) ...\n", cmd.address, __LINE__); return ret; } } return 0; } static int wilc_sdio_sync_ext(struct wilc *wilc, int nint) { struct sdio_func *func = dev_to_sdio_func(wilc->dev); struct wilc_sdio *sdio_priv = wilc->bus_data; u32 reg; if (nint > MAX_NUM_INT) { dev_err(&func->dev, "Too many interrupts (%d)...\n", nint); return -EINVAL; } /** * Disable power sequencer **/ if (wilc_sdio_read_reg(wilc, WILC_MISC, &reg)) { dev_err(&func->dev, "Failed read misc reg...\n"); return -EINVAL; } reg &= ~BIT(8); if (wilc_sdio_write_reg(wilc, WILC_MISC, reg)) { dev_err(&func->dev, "Failed write misc reg...\n"); return -EINVAL; } if (sdio_priv->irq_gpio) { u32 reg; int ret, i; /** * interrupt pin mux select **/ ret = wilc_sdio_read_reg(wilc, WILC_PIN_MUX_0, &reg); if (ret) { dev_err(&func->dev, "Failed read reg (%08x)...\n", WILC_PIN_MUX_0); return ret; } reg |= BIT(8); ret = wilc_sdio_write_reg(wilc, WILC_PIN_MUX_0, reg); if (ret) { dev_err(&func->dev, "Failed write reg (%08x)...\n", WILC_PIN_MUX_0); return ret; } /** * interrupt enable **/ ret = wilc_sdio_read_reg(wilc, WILC_INTR_ENABLE, &reg); if (ret) { dev_err(&func->dev, "Failed read reg (%08x)...\n", WILC_INTR_ENABLE); return ret; } for (i = 0; (i < 5) && (nint > 0); i++, nint--) reg |= BIT((27 + i)); ret = wilc_sdio_write_reg(wilc, WILC_INTR_ENABLE, reg); if (ret) { dev_err(&func->dev, "Failed write reg (%08x)...\n", WILC_INTR_ENABLE); return ret; } if (nint) { ret = wilc_sdio_read_reg(wilc, WILC_INTR2_ENABLE, &reg); if (ret) { dev_err(&func->dev, "Failed read reg (%08x)...\n", WILC_INTR2_ENABLE); return ret; } for (i = 0; (i < 3) && (nint > 0); i++, nint--) reg |= BIT(i); ret = wilc_sdio_write_reg(wilc, WILC_INTR2_ENABLE, reg); if (ret) { dev_err(&func->dev, "Failed write reg (%08x)...\n", WILC_INTR2_ENABLE); return ret; } } } return 0; } /* Global sdio HIF function table */ static const struct wilc_hif_func wilc_hif_sdio = { .hif_init = wilc_sdio_init, .hif_deinit = wilc_sdio_deinit, .hif_read_reg = wilc_sdio_read_reg, .hif_write_reg = wilc_sdio_write_reg, .hif_block_rx = wilc_sdio_read, .hif_block_tx = wilc_sdio_write, .hif_read_int = wilc_sdio_read_int, .hif_clear_int_ext = wilc_sdio_clear_int_ext, .hif_read_size = wilc_sdio_read_size, .hif_block_tx_ext = wilc_sdio_write, .hif_block_rx_ext = wilc_sdio_read, .hif_sync_ext = wilc_sdio_sync_ext, .enable_interrupt = wilc_sdio_enable_interrupt, .disable_interrupt = wilc_sdio_disable_interrupt, .hif_reset = wilc_sdio_reset, .hif_is_init = wilc_sdio_is_init, }; static int wilc_sdio_resume(struct device *dev) { struct sdio_func *func = dev_to_sdio_func(dev); struct wilc *wilc = sdio_get_drvdata(func); dev_info(dev, "sdio resume\n"); sdio_release_host(func); chip_wakeup(wilc); wilc_sdio_init(wilc, true); if (wilc->suspend_event) host_wakeup_notify(wilc); chip_allow_sleep(wilc); return 0; } static const struct of_device_id wilc_of_match[] = { { .compatible = "microchip,wilc1000", }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, wilc_of_match); static const struct dev_pm_ops wilc_sdio_pm_ops = { .suspend = wilc_sdio_suspend, .resume = wilc_sdio_resume, }; static struct sdio_driver wilc_sdio_driver = { .name = SDIO_MODALIAS, .id_table = wilc_sdio_ids, .probe = wilc_sdio_probe, .remove = wilc_sdio_remove, .drv = { .pm = &wilc_sdio_pm_ops, .of_match_table = wilc_of_match, } }; module_driver(wilc_sdio_driver, sdio_register_driver, sdio_unregister_driver); MODULE_LICENSE("GPL");
linux-master
drivers/net/wireless/microchip/wilc1000/sdio.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2012 - 2018 Microchip Technology Inc., and its subsidiaries. * All rights reserved. */ #include <linux/if_ether.h> #include <linux/ip.h> #include <net/dsfield.h> #include "cfg80211.h" #include "wlan_cfg.h" #define WAKE_UP_TRIAL_RETRY 10000 static inline bool is_wilc1000(u32 id) { return (id & (~WILC_CHIP_REV_FIELD)) == WILC_1000_BASE_ID; } static inline void acquire_bus(struct wilc *wilc, enum bus_acquire acquire) { mutex_lock(&wilc->hif_cs); if (acquire == WILC_BUS_ACQUIRE_AND_WAKEUP && wilc->power_save_mode) chip_wakeup(wilc); } static inline void release_bus(struct wilc *wilc, enum bus_release release) { if (release == WILC_BUS_RELEASE_ALLOW_SLEEP && wilc->power_save_mode) chip_allow_sleep(wilc); mutex_unlock(&wilc->hif_cs); } static void wilc_wlan_txq_remove(struct wilc *wilc, u8 q_num, struct txq_entry_t *tqe) { list_del(&tqe->list); wilc->txq_entries -= 1; wilc->txq[q_num].count--; } static struct txq_entry_t * wilc_wlan_txq_remove_from_head(struct wilc *wilc, u8 q_num) { struct txq_entry_t *tqe = NULL; unsigned long flags; spin_lock_irqsave(&wilc->txq_spinlock, flags); if (!list_empty(&wilc->txq[q_num].txq_head.list)) { tqe = list_first_entry(&wilc->txq[q_num].txq_head.list, struct txq_entry_t, list); list_del(&tqe->list); wilc->txq_entries -= 1; wilc->txq[q_num].count--; } spin_unlock_irqrestore(&wilc->txq_spinlock, flags); return tqe; } static void wilc_wlan_txq_add_to_tail(struct net_device *dev, u8 q_num, struct txq_entry_t *tqe) { unsigned long flags; struct wilc_vif *vif = netdev_priv(dev); struct wilc *wilc = vif->wilc; spin_lock_irqsave(&wilc->txq_spinlock, flags); list_add_tail(&tqe->list, &wilc->txq[q_num].txq_head.list); wilc->txq_entries += 1; wilc->txq[q_num].count++; spin_unlock_irqrestore(&wilc->txq_spinlock, flags); complete(&wilc->txq_event); } static void wilc_wlan_txq_add_to_head(struct wilc_vif *vif, u8 q_num, struct txq_entry_t *tqe) { unsigned long flags; struct wilc *wilc = vif->wilc; mutex_lock(&wilc->txq_add_to_head_cs); spin_lock_irqsave(&wilc->txq_spinlock, flags); list_add(&tqe->list, &wilc->txq[q_num].txq_head.list); wilc->txq_entries += 1; wilc->txq[q_num].count++; spin_unlock_irqrestore(&wilc->txq_spinlock, flags); mutex_unlock(&wilc->txq_add_to_head_cs); complete(&wilc->txq_event); } #define NOT_TCP_ACK (-1) static inline void add_tcp_session(struct wilc_vif *vif, u32 src_prt, u32 dst_prt, u32 seq) { struct tcp_ack_filter *f = &vif->ack_filter; if (f->tcp_session < 2 * MAX_TCP_SESSION) { f->ack_session_info[f->tcp_session].seq_num = seq; f->ack_session_info[f->tcp_session].bigger_ack_num = 0; f->ack_session_info[f->tcp_session].src_port = src_prt; f->ack_session_info[f->tcp_session].dst_port = dst_prt; f->tcp_session++; } } static inline void update_tcp_session(struct wilc_vif *vif, u32 index, u32 ack) { struct tcp_ack_filter *f = &vif->ack_filter; if (index < 2 * MAX_TCP_SESSION && ack > f->ack_session_info[index].bigger_ack_num) f->ack_session_info[index].bigger_ack_num = ack; } static inline void add_tcp_pending_ack(struct wilc_vif *vif, u32 ack, u32 session_index, struct txq_entry_t *txqe) { struct tcp_ack_filter *f = &vif->ack_filter; u32 i = f->pending_base + f->pending_acks_idx; if (i < MAX_PENDING_ACKS) { f->pending_acks[i].ack_num = ack; f->pending_acks[i].txqe = txqe; f->pending_acks[i].session_index = session_index; txqe->ack_idx = i; f->pending_acks_idx++; } } static inline void tcp_process(struct net_device *dev, struct txq_entry_t *tqe) { void *buffer = tqe->buffer; const struct ethhdr *eth_hdr_ptr = buffer; int i; unsigned long flags; struct wilc_vif *vif = netdev_priv(dev); struct wilc *wilc = vif->wilc; struct tcp_ack_filter *f = &vif->ack_filter; const struct iphdr *ip_hdr_ptr; const struct tcphdr *tcp_hdr_ptr; u32 ihl, total_length, data_offset; spin_lock_irqsave(&wilc->txq_spinlock, flags); if (eth_hdr_ptr->h_proto != htons(ETH_P_IP)) goto out; ip_hdr_ptr = buffer + ETH_HLEN; if (ip_hdr_ptr->protocol != IPPROTO_TCP) goto out; ihl = ip_hdr_ptr->ihl << 2; tcp_hdr_ptr = buffer + ETH_HLEN + ihl; total_length = ntohs(ip_hdr_ptr->tot_len); data_offset = tcp_hdr_ptr->doff << 2; if (total_length == (ihl + data_offset)) { u32 seq_no, ack_no; seq_no = ntohl(tcp_hdr_ptr->seq); ack_no = ntohl(tcp_hdr_ptr->ack_seq); for (i = 0; i < f->tcp_session; i++) { u32 j = f->ack_session_info[i].seq_num; if (i < 2 * MAX_TCP_SESSION && j == seq_no) { update_tcp_session(vif, i, ack_no); break; } } if (i == f->tcp_session) add_tcp_session(vif, 0, 0, seq_no); add_tcp_pending_ack(vif, ack_no, i, tqe); } out: spin_unlock_irqrestore(&wilc->txq_spinlock, flags); } static void wilc_wlan_txq_filter_dup_tcp_ack(struct net_device *dev) { struct wilc_vif *vif = netdev_priv(dev); struct wilc *wilc = vif->wilc; struct tcp_ack_filter *f = &vif->ack_filter; u32 i = 0; u32 dropped = 0; unsigned long flags; spin_lock_irqsave(&wilc->txq_spinlock, flags); for (i = f->pending_base; i < (f->pending_base + f->pending_acks_idx); i++) { u32 index; u32 bigger_ack_num; if (i >= MAX_PENDING_ACKS) break; index = f->pending_acks[i].session_index; if (index >= 2 * MAX_TCP_SESSION) break; bigger_ack_num = f->ack_session_info[index].bigger_ack_num; if (f->pending_acks[i].ack_num < bigger_ack_num) { struct txq_entry_t *tqe; tqe = f->pending_acks[i].txqe; if (tqe) { wilc_wlan_txq_remove(wilc, tqe->q_num, tqe); tqe->status = 1; if (tqe->tx_complete_func) tqe->tx_complete_func(tqe->priv, tqe->status); kfree(tqe); dropped++; } } } f->pending_acks_idx = 0; f->tcp_session = 0; if (f->pending_base == 0) f->pending_base = MAX_TCP_SESSION; else f->pending_base = 0; spin_unlock_irqrestore(&wilc->txq_spinlock, flags); while (dropped > 0) { wait_for_completion_timeout(&wilc->txq_event, msecs_to_jiffies(1)); dropped--; } } void wilc_enable_tcp_ack_filter(struct wilc_vif *vif, bool value) { vif->ack_filter.enabled = value; } static int wilc_wlan_txq_add_cfg_pkt(struct wilc_vif *vif, u8 *buffer, u32 buffer_size) { struct txq_entry_t *tqe; struct wilc *wilc = vif->wilc; netdev_dbg(vif->ndev, "Adding config packet ...\n"); if (wilc->quit) { netdev_dbg(vif->ndev, "Return due to clear function\n"); complete(&wilc->cfg_event); return 0; } tqe = kmalloc(sizeof(*tqe), GFP_ATOMIC); if (!tqe) { complete(&wilc->cfg_event); return 0; } tqe->type = WILC_CFG_PKT; tqe->buffer = buffer; tqe->buffer_size = buffer_size; tqe->tx_complete_func = NULL; tqe->priv = NULL; tqe->q_num = AC_VO_Q; tqe->ack_idx = NOT_TCP_ACK; tqe->vif = vif; wilc_wlan_txq_add_to_head(vif, AC_VO_Q, tqe); return 1; } static bool is_ac_q_limit(struct wilc *wl, u8 q_num) { u8 factors[NQUEUES] = {1, 1, 1, 1}; u16 i; unsigned long flags; struct wilc_tx_queue_status *q = &wl->tx_q_limit; u8 end_index; u8 q_limit; bool ret = false; spin_lock_irqsave(&wl->txq_spinlock, flags); if (!q->initialized) { for (i = 0; i < AC_BUFFER_SIZE; i++) q->buffer[i] = i % NQUEUES; for (i = 0; i < NQUEUES; i++) { q->cnt[i] = AC_BUFFER_SIZE * factors[i] / NQUEUES; q->sum += q->cnt[i]; } q->end_index = AC_BUFFER_SIZE - 1; q->initialized = 1; } end_index = q->end_index; q->cnt[q->buffer[end_index]] -= factors[q->buffer[end_index]]; q->cnt[q_num] += factors[q_num]; q->sum += (factors[q_num] - factors[q->buffer[end_index]]); q->buffer[end_index] = q_num; if (end_index > 0) q->end_index--; else q->end_index = AC_BUFFER_SIZE - 1; if (!q->sum) q_limit = 1; else q_limit = (q->cnt[q_num] * FLOW_CONTROL_UPPER_THRESHOLD / q->sum) + 1; if (wl->txq[q_num].count <= q_limit) ret = true; spin_unlock_irqrestore(&wl->txq_spinlock, flags); return ret; } static inline u8 ac_classify(struct wilc *wilc, struct sk_buff *skb) { u8 q_num = AC_BE_Q; u8 dscp; switch (skb->protocol) { case htons(ETH_P_IP): dscp = ipv4_get_dsfield(ip_hdr(skb)) & 0xfc; break; case htons(ETH_P_IPV6): dscp = ipv6_get_dsfield(ipv6_hdr(skb)) & 0xfc; break; default: return q_num; } switch (dscp) { case 0x08: case 0x20: case 0x40: q_num = AC_BK_Q; break; case 0x80: case 0xA0: case 0x28: q_num = AC_VI_Q; break; case 0xC0: case 0xD0: case 0xE0: case 0x88: case 0xB8: q_num = AC_VO_Q; break; } return q_num; } static inline int ac_balance(struct wilc *wl, u8 *ratio) { u8 i, max_count = 0; if (!ratio) return -EINVAL; for (i = 0; i < NQUEUES; i++) if (wl->txq[i].fw.count > max_count) max_count = wl->txq[i].fw.count; for (i = 0; i < NQUEUES; i++) ratio[i] = max_count - wl->txq[i].fw.count; return 0; } static inline void ac_update_fw_ac_pkt_info(struct wilc *wl, u32 reg) { wl->txq[AC_BK_Q].fw.count = FIELD_GET(BK_AC_COUNT_FIELD, reg); wl->txq[AC_BE_Q].fw.count = FIELD_GET(BE_AC_COUNT_FIELD, reg); wl->txq[AC_VI_Q].fw.count = FIELD_GET(VI_AC_COUNT_FIELD, reg); wl->txq[AC_VO_Q].fw.count = FIELD_GET(VO_AC_COUNT_FIELD, reg); wl->txq[AC_BK_Q].fw.acm = FIELD_GET(BK_AC_ACM_STAT_FIELD, reg); wl->txq[AC_BE_Q].fw.acm = FIELD_GET(BE_AC_ACM_STAT_FIELD, reg); wl->txq[AC_VI_Q].fw.acm = FIELD_GET(VI_AC_ACM_STAT_FIELD, reg); wl->txq[AC_VO_Q].fw.acm = FIELD_GET(VO_AC_ACM_STAT_FIELD, reg); } static inline u8 ac_change(struct wilc *wilc, u8 *ac) { do { if (wilc->txq[*ac].fw.acm == 0) return 0; (*ac)++; } while (*ac < NQUEUES); return 1; } int wilc_wlan_txq_add_net_pkt(struct net_device *dev, struct tx_complete_data *tx_data, u8 *buffer, u32 buffer_size, void (*tx_complete_fn)(void *, int)) { struct txq_entry_t *tqe; struct wilc_vif *vif = netdev_priv(dev); struct wilc *wilc; u8 q_num; wilc = vif->wilc; if (wilc->quit) { tx_complete_fn(tx_data, 0); return 0; } if (!wilc->initialized) { tx_complete_fn(tx_data, 0); return 0; } tqe = kmalloc(sizeof(*tqe), GFP_ATOMIC); if (!tqe) { tx_complete_fn(tx_data, 0); return 0; } tqe->type = WILC_NET_PKT; tqe->buffer = buffer; tqe->buffer_size = buffer_size; tqe->tx_complete_func = tx_complete_fn; tqe->priv = tx_data; tqe->vif = vif; q_num = ac_classify(wilc, tx_data->skb); tqe->q_num = q_num; if (ac_change(wilc, &q_num)) { tx_complete_fn(tx_data, 0); kfree(tqe); return 0; } if (is_ac_q_limit(wilc, q_num)) { tqe->ack_idx = NOT_TCP_ACK; if (vif->ack_filter.enabled) tcp_process(dev, tqe); wilc_wlan_txq_add_to_tail(dev, q_num, tqe); } else { tx_complete_fn(tx_data, 0); kfree(tqe); } return wilc->txq_entries; } int wilc_wlan_txq_add_mgmt_pkt(struct net_device *dev, void *priv, u8 *buffer, u32 buffer_size, void (*tx_complete_fn)(void *, int)) { struct txq_entry_t *tqe; struct wilc_vif *vif = netdev_priv(dev); struct wilc *wilc; wilc = vif->wilc; if (wilc->quit) { tx_complete_fn(priv, 0); return 0; } if (!wilc->initialized) { tx_complete_fn(priv, 0); return 0; } tqe = kmalloc(sizeof(*tqe), GFP_ATOMIC); if (!tqe) { tx_complete_fn(priv, 0); return 0; } tqe->type = WILC_MGMT_PKT; tqe->buffer = buffer; tqe->buffer_size = buffer_size; tqe->tx_complete_func = tx_complete_fn; tqe->priv = priv; tqe->q_num = AC_BE_Q; tqe->ack_idx = NOT_TCP_ACK; tqe->vif = vif; wilc_wlan_txq_add_to_tail(dev, AC_VO_Q, tqe); return 1; } static struct txq_entry_t *wilc_wlan_txq_get_first(struct wilc *wilc, u8 q_num) { struct txq_entry_t *tqe = NULL; unsigned long flags; spin_lock_irqsave(&wilc->txq_spinlock, flags); if (!list_empty(&wilc->txq[q_num].txq_head.list)) tqe = list_first_entry(&wilc->txq[q_num].txq_head.list, struct txq_entry_t, list); spin_unlock_irqrestore(&wilc->txq_spinlock, flags); return tqe; } static struct txq_entry_t *wilc_wlan_txq_get_next(struct wilc *wilc, struct txq_entry_t *tqe, u8 q_num) { unsigned long flags; spin_lock_irqsave(&wilc->txq_spinlock, flags); if (!list_is_last(&tqe->list, &wilc->txq[q_num].txq_head.list)) tqe = list_next_entry(tqe, list); else tqe = NULL; spin_unlock_irqrestore(&wilc->txq_spinlock, flags); return tqe; } static void wilc_wlan_rxq_add(struct wilc *wilc, struct rxq_entry_t *rqe) { if (wilc->quit) return; mutex_lock(&wilc->rxq_cs); list_add_tail(&rqe->list, &wilc->rxq_head.list); mutex_unlock(&wilc->rxq_cs); } static struct rxq_entry_t *wilc_wlan_rxq_remove(struct wilc *wilc) { struct rxq_entry_t *rqe = NULL; mutex_lock(&wilc->rxq_cs); if (!list_empty(&wilc->rxq_head.list)) { rqe = list_first_entry(&wilc->rxq_head.list, struct rxq_entry_t, list); list_del(&rqe->list); } mutex_unlock(&wilc->rxq_cs); return rqe; } void chip_allow_sleep(struct wilc *wilc) { u32 reg = 0; const struct wilc_hif_func *hif_func = wilc->hif_func; u32 wakeup_reg, wakeup_bit; u32 to_host_from_fw_reg, to_host_from_fw_bit; u32 from_host_to_fw_reg, from_host_to_fw_bit; u32 trials = 100; int ret; if (wilc->io_type == WILC_HIF_SDIO) { wakeup_reg = WILC_SDIO_WAKEUP_REG; wakeup_bit = WILC_SDIO_WAKEUP_BIT; from_host_to_fw_reg = WILC_SDIO_HOST_TO_FW_REG; from_host_to_fw_bit = WILC_SDIO_HOST_TO_FW_BIT; to_host_from_fw_reg = WILC_SDIO_FW_TO_HOST_REG; to_host_from_fw_bit = WILC_SDIO_FW_TO_HOST_BIT; } else { wakeup_reg = WILC_SPI_WAKEUP_REG; wakeup_bit = WILC_SPI_WAKEUP_BIT; from_host_to_fw_reg = WILC_SPI_HOST_TO_FW_REG; from_host_to_fw_bit = WILC_SPI_HOST_TO_FW_BIT; to_host_from_fw_reg = WILC_SPI_FW_TO_HOST_REG; to_host_from_fw_bit = WILC_SPI_FW_TO_HOST_BIT; } while (--trials) { ret = hif_func->hif_read_reg(wilc, to_host_from_fw_reg, &reg); if (ret) return; if ((reg & to_host_from_fw_bit) == 0) break; } if (!trials) pr_warn("FW not responding\n"); /* Clear bit 1 */ ret = hif_func->hif_read_reg(wilc, wakeup_reg, &reg); if (ret) return; if (reg & wakeup_bit) { reg &= ~wakeup_bit; ret = hif_func->hif_write_reg(wilc, wakeup_reg, reg); if (ret) return; } ret = hif_func->hif_read_reg(wilc, from_host_to_fw_reg, &reg); if (ret) return; if (reg & from_host_to_fw_bit) { reg &= ~from_host_to_fw_bit; ret = hif_func->hif_write_reg(wilc, from_host_to_fw_reg, reg); if (ret) return; } } EXPORT_SYMBOL_GPL(chip_allow_sleep); void chip_wakeup(struct wilc *wilc) { u32 ret = 0; u32 clk_status_val = 0, trials = 0; u32 wakeup_reg, wakeup_bit; u32 clk_status_reg, clk_status_bit; u32 from_host_to_fw_reg, from_host_to_fw_bit; const struct wilc_hif_func *hif_func = wilc->hif_func; if (wilc->io_type == WILC_HIF_SDIO) { wakeup_reg = WILC_SDIO_WAKEUP_REG; wakeup_bit = WILC_SDIO_WAKEUP_BIT; clk_status_reg = WILC_SDIO_CLK_STATUS_REG; clk_status_bit = WILC_SDIO_CLK_STATUS_BIT; from_host_to_fw_reg = WILC_SDIO_HOST_TO_FW_REG; from_host_to_fw_bit = WILC_SDIO_HOST_TO_FW_BIT; } else { wakeup_reg = WILC_SPI_WAKEUP_REG; wakeup_bit = WILC_SPI_WAKEUP_BIT; clk_status_reg = WILC_SPI_CLK_STATUS_REG; clk_status_bit = WILC_SPI_CLK_STATUS_BIT; from_host_to_fw_reg = WILC_SPI_HOST_TO_FW_REG; from_host_to_fw_bit = WILC_SPI_HOST_TO_FW_BIT; } /* indicate host wakeup */ ret = hif_func->hif_write_reg(wilc, from_host_to_fw_reg, from_host_to_fw_bit); if (ret) return; /* Set wake-up bit */ ret = hif_func->hif_write_reg(wilc, wakeup_reg, wakeup_bit); if (ret) return; while (trials < WAKE_UP_TRIAL_RETRY) { ret = hif_func->hif_read_reg(wilc, clk_status_reg, &clk_status_val); if (ret) { pr_err("Bus error %d %x\n", ret, clk_status_val); return; } if (clk_status_val & clk_status_bit) break; trials++; } if (trials >= WAKE_UP_TRIAL_RETRY) { pr_err("Failed to wake-up the chip\n"); return; } /* Sometimes spi fail to read clock regs after reading * writing clockless registers */ if (wilc->io_type == WILC_HIF_SPI) wilc->hif_func->hif_reset(wilc); } EXPORT_SYMBOL_GPL(chip_wakeup); void host_wakeup_notify(struct wilc *wilc) { acquire_bus(wilc, WILC_BUS_ACQUIRE_ONLY); wilc->hif_func->hif_write_reg(wilc, WILC_CORTUS_INTERRUPT_2, 1); release_bus(wilc, WILC_BUS_RELEASE_ONLY); } EXPORT_SYMBOL_GPL(host_wakeup_notify); void host_sleep_notify(struct wilc *wilc) { acquire_bus(wilc, WILC_BUS_ACQUIRE_ONLY); wilc->hif_func->hif_write_reg(wilc, WILC_CORTUS_INTERRUPT_1, 1); release_bus(wilc, WILC_BUS_RELEASE_ONLY); } EXPORT_SYMBOL_GPL(host_sleep_notify); int wilc_wlan_handle_txq(struct wilc *wilc, u32 *txq_count) { int i, entries = 0; u8 k, ac; u32 sum; u32 reg; u8 ac_desired_ratio[NQUEUES] = {0, 0, 0, 0}; u8 ac_preserve_ratio[NQUEUES] = {1, 1, 1, 1}; u8 *num_pkts_to_add; u8 vmm_entries_ac[WILC_VMM_TBL_SIZE]; u32 offset = 0; bool max_size_over = 0, ac_exist = 0; int vmm_sz = 0; struct txq_entry_t *tqe_q[NQUEUES]; int ret = 0; int counter; int timeout; u32 *vmm_table = wilc->vmm_table; u8 ac_pkt_num_to_chip[NQUEUES] = {0, 0, 0, 0}; const struct wilc_hif_func *func; int srcu_idx; u8 *txb = wilc->tx_buffer; struct wilc_vif *vif; if (wilc->quit) goto out_update_cnt; if (ac_balance(wilc, ac_desired_ratio)) return -EINVAL; mutex_lock(&wilc->txq_add_to_head_cs); srcu_idx = srcu_read_lock(&wilc->srcu); list_for_each_entry_rcu(vif, &wilc->vif_list, list) wilc_wlan_txq_filter_dup_tcp_ack(vif->ndev); srcu_read_unlock(&wilc->srcu, srcu_idx); for (ac = 0; ac < NQUEUES; ac++) tqe_q[ac] = wilc_wlan_txq_get_first(wilc, ac); i = 0; sum = 0; max_size_over = 0; num_pkts_to_add = ac_desired_ratio; do { ac_exist = 0; for (ac = 0; (ac < NQUEUES) && (!max_size_over); ac++) { if (!tqe_q[ac]) continue; ac_exist = 1; for (k = 0; (k < num_pkts_to_add[ac]) && (!max_size_over) && tqe_q[ac]; k++) { if (i >= (WILC_VMM_TBL_SIZE - 1)) { max_size_over = 1; break; } if (tqe_q[ac]->type == WILC_CFG_PKT) vmm_sz = ETH_CONFIG_PKT_HDR_OFFSET; else if (tqe_q[ac]->type == WILC_NET_PKT) vmm_sz = ETH_ETHERNET_HDR_OFFSET; else vmm_sz = HOST_HDR_OFFSET; vmm_sz += tqe_q[ac]->buffer_size; vmm_sz = ALIGN(vmm_sz, 4); if ((sum + vmm_sz) > WILC_TX_BUFF_SIZE) { max_size_over = 1; break; } vmm_table[i] = vmm_sz / 4; if (tqe_q[ac]->type == WILC_CFG_PKT) vmm_table[i] |= BIT(10); cpu_to_le32s(&vmm_table[i]); vmm_entries_ac[i] = ac; i++; sum += vmm_sz; tqe_q[ac] = wilc_wlan_txq_get_next(wilc, tqe_q[ac], ac); } } num_pkts_to_add = ac_preserve_ratio; } while (!max_size_over && ac_exist); if (i == 0) goto out_unlock; vmm_table[i] = 0x0; acquire_bus(wilc, WILC_BUS_ACQUIRE_AND_WAKEUP); counter = 0; func = wilc->hif_func; do { ret = func->hif_read_reg(wilc, WILC_HOST_TX_CTRL, &reg); if (ret) break; if ((reg & 0x1) == 0) { ac_update_fw_ac_pkt_info(wilc, reg); break; } counter++; if (counter > 200) { counter = 0; ret = func->hif_write_reg(wilc, WILC_HOST_TX_CTRL, 0); break; } } while (!wilc->quit); if (ret) goto out_release_bus; timeout = 200; do { ret = func->hif_block_tx(wilc, WILC_VMM_TBL_RX_SHADOW_BASE, (u8 *)vmm_table, ((i + 1) * 4)); if (ret) break; ret = func->hif_write_reg(wilc, WILC_HOST_VMM_CTL, 0x2); if (ret) break; do { ret = func->hif_read_reg(wilc, WILC_HOST_VMM_CTL, &reg); if (ret) break; if (FIELD_GET(WILC_VMM_ENTRY_AVAILABLE, reg)) { entries = FIELD_GET(WILC_VMM_ENTRY_COUNT, reg); break; } } while (--timeout); if (timeout <= 0) { ret = func->hif_write_reg(wilc, WILC_HOST_VMM_CTL, 0x0); break; } if (ret) break; if (entries == 0) { ret = func->hif_read_reg(wilc, WILC_HOST_TX_CTRL, &reg); if (ret) break; reg &= ~BIT(0); ret = func->hif_write_reg(wilc, WILC_HOST_TX_CTRL, reg); } } while (0); if (ret) goto out_release_bus; if (entries == 0) { /* * No VMM space available in firmware so retry to transmit * the packet from tx queue. */ ret = WILC_VMM_ENTRY_FULL_RETRY; goto out_release_bus; } release_bus(wilc, WILC_BUS_RELEASE_ALLOW_SLEEP); offset = 0; i = 0; do { struct txq_entry_t *tqe; u32 header, buffer_offset; char *bssid; u8 mgmt_ptk = 0; if (vmm_table[i] == 0 || vmm_entries_ac[i] >= NQUEUES) break; tqe = wilc_wlan_txq_remove_from_head(wilc, vmm_entries_ac[i]); if (!tqe) break; ac_pkt_num_to_chip[vmm_entries_ac[i]]++; vif = tqe->vif; le32_to_cpus(&vmm_table[i]); vmm_sz = FIELD_GET(WILC_VMM_BUFFER_SIZE, vmm_table[i]); vmm_sz *= 4; if (tqe->type == WILC_MGMT_PKT) mgmt_ptk = 1; header = (FIELD_PREP(WILC_VMM_HDR_TYPE, tqe->type) | FIELD_PREP(WILC_VMM_HDR_MGMT_FIELD, mgmt_ptk) | FIELD_PREP(WILC_VMM_HDR_PKT_SIZE, tqe->buffer_size) | FIELD_PREP(WILC_VMM_HDR_BUFF_SIZE, vmm_sz)); cpu_to_le32s(&header); memcpy(&txb[offset], &header, 4); if (tqe->type == WILC_CFG_PKT) { buffer_offset = ETH_CONFIG_PKT_HDR_OFFSET; } else if (tqe->type == WILC_NET_PKT) { int prio = tqe->q_num; bssid = tqe->vif->bssid; buffer_offset = ETH_ETHERNET_HDR_OFFSET; memcpy(&txb[offset + 4], &prio, sizeof(prio)); memcpy(&txb[offset + 8], bssid, 6); } else { buffer_offset = HOST_HDR_OFFSET; } memcpy(&txb[offset + buffer_offset], tqe->buffer, tqe->buffer_size); offset += vmm_sz; i++; tqe->status = 1; if (tqe->tx_complete_func) tqe->tx_complete_func(tqe->priv, tqe->status); if (tqe->ack_idx != NOT_TCP_ACK && tqe->ack_idx < MAX_PENDING_ACKS) vif->ack_filter.pending_acks[tqe->ack_idx].txqe = NULL; kfree(tqe); } while (--entries); for (i = 0; i < NQUEUES; i++) wilc->txq[i].fw.count += ac_pkt_num_to_chip[i]; acquire_bus(wilc, WILC_BUS_ACQUIRE_AND_WAKEUP); ret = func->hif_clear_int_ext(wilc, ENABLE_TX_VMM); if (ret) goto out_release_bus; ret = func->hif_block_tx_ext(wilc, 0, txb, offset); out_release_bus: release_bus(wilc, WILC_BUS_RELEASE_ALLOW_SLEEP); out_unlock: mutex_unlock(&wilc->txq_add_to_head_cs); out_update_cnt: *txq_count = wilc->txq_entries; return ret; } static void wilc_wlan_handle_rx_buff(struct wilc *wilc, u8 *buffer, int size) { int offset = 0; u32 header; u32 pkt_len, pkt_offset, tp_len; int is_cfg_packet; u8 *buff_ptr; do { buff_ptr = buffer + offset; header = get_unaligned_le32(buff_ptr); is_cfg_packet = FIELD_GET(WILC_PKT_HDR_CONFIG_FIELD, header); pkt_offset = FIELD_GET(WILC_PKT_HDR_OFFSET_FIELD, header); tp_len = FIELD_GET(WILC_PKT_HDR_TOTAL_LEN_FIELD, header); pkt_len = FIELD_GET(WILC_PKT_HDR_LEN_FIELD, header); if (pkt_len == 0 || tp_len == 0) break; if (pkt_offset & IS_MANAGMEMENT) { buff_ptr += HOST_HDR_OFFSET; wilc_wfi_mgmt_rx(wilc, buff_ptr, pkt_len, pkt_offset & IS_MGMT_AUTH_PKT); } else { if (!is_cfg_packet) { wilc_frmw_to_host(wilc, buff_ptr, pkt_len, pkt_offset); } else { struct wilc_cfg_rsp rsp; buff_ptr += pkt_offset; wilc_wlan_cfg_indicate_rx(wilc, buff_ptr, pkt_len, &rsp); if (rsp.type == WILC_CFG_RSP) { if (wilc->cfg_seq_no == rsp.seq_no) complete(&wilc->cfg_event); } else if (rsp.type == WILC_CFG_RSP_STATUS) { wilc_mac_indicate(wilc); } } } offset += tp_len; } while (offset < size); } static void wilc_wlan_handle_rxq(struct wilc *wilc) { int size; u8 *buffer; struct rxq_entry_t *rqe; while (!wilc->quit) { rqe = wilc_wlan_rxq_remove(wilc); if (!rqe) break; buffer = rqe->buffer; size = rqe->buffer_size; wilc_wlan_handle_rx_buff(wilc, buffer, size); kfree(rqe); } if (wilc->quit) complete(&wilc->cfg_event); } static void wilc_unknown_isr_ext(struct wilc *wilc) { wilc->hif_func->hif_clear_int_ext(wilc, 0); } static void wilc_wlan_handle_isr_ext(struct wilc *wilc, u32 int_status) { u32 offset = wilc->rx_buffer_offset; u8 *buffer = NULL; u32 size; u32 retries = 0; int ret = 0; struct rxq_entry_t *rqe; size = FIELD_GET(WILC_INTERRUPT_DATA_SIZE, int_status) << 2; while (!size && retries < 10) { wilc->hif_func->hif_read_size(wilc, &size); size = FIELD_GET(WILC_INTERRUPT_DATA_SIZE, size) << 2; retries++; } if (size <= 0) return; if (WILC_RX_BUFF_SIZE - offset < size) offset = 0; buffer = &wilc->rx_buffer[offset]; wilc->hif_func->hif_clear_int_ext(wilc, DATA_INT_CLR | ENABLE_RX_VMM); ret = wilc->hif_func->hif_block_rx_ext(wilc, 0, buffer, size); if (ret) return; offset += size; wilc->rx_buffer_offset = offset; rqe = kmalloc(sizeof(*rqe), GFP_KERNEL); if (!rqe) return; rqe->buffer = buffer; rqe->buffer_size = size; wilc_wlan_rxq_add(wilc, rqe); wilc_wlan_handle_rxq(wilc); } void wilc_handle_isr(struct wilc *wilc) { u32 int_status; acquire_bus(wilc, WILC_BUS_ACQUIRE_AND_WAKEUP); wilc->hif_func->hif_read_int(wilc, &int_status); if (int_status & DATA_INT_EXT) wilc_wlan_handle_isr_ext(wilc, int_status); if (!(int_status & (ALL_INT_EXT))) wilc_unknown_isr_ext(wilc); release_bus(wilc, WILC_BUS_RELEASE_ALLOW_SLEEP); } EXPORT_SYMBOL_GPL(wilc_handle_isr); int wilc_wlan_firmware_download(struct wilc *wilc, const u8 *buffer, u32 buffer_size) { u32 offset; u32 addr, size, size2, blksz; u8 *dma_buffer; int ret = 0; u32 reg = 0; blksz = BIT(12); dma_buffer = kmalloc(blksz, GFP_KERNEL); if (!dma_buffer) return -EIO; offset = 0; pr_debug("%s: Downloading firmware size = %d\n", __func__, buffer_size); acquire_bus(wilc, WILC_BUS_ACQUIRE_AND_WAKEUP); wilc->hif_func->hif_read_reg(wilc, WILC_GLB_RESET_0, &reg); reg &= ~BIT(10); ret = wilc->hif_func->hif_write_reg(wilc, WILC_GLB_RESET_0, reg); wilc->hif_func->hif_read_reg(wilc, WILC_GLB_RESET_0, &reg); if (reg & BIT(10)) pr_err("%s: Failed to reset\n", __func__); release_bus(wilc, WILC_BUS_RELEASE_ONLY); do { addr = get_unaligned_le32(&buffer[offset]); size = get_unaligned_le32(&buffer[offset + 4]); acquire_bus(wilc, WILC_BUS_ACQUIRE_AND_WAKEUP); offset += 8; while (((int)size) && (offset < buffer_size)) { if (size <= blksz) size2 = size; else size2 = blksz; memcpy(dma_buffer, &buffer[offset], size2); ret = wilc->hif_func->hif_block_tx(wilc, addr, dma_buffer, size2); if (ret) break; addr += size2; offset += size2; size -= size2; } release_bus(wilc, WILC_BUS_RELEASE_ALLOW_SLEEP); if (ret) { pr_err("%s Bus error\n", __func__); goto fail; } pr_debug("%s Offset = %d\n", __func__, offset); } while (offset < buffer_size); fail: kfree(dma_buffer); return ret; } int wilc_wlan_start(struct wilc *wilc) { u32 reg = 0; int ret; u32 chipid; if (wilc->io_type == WILC_HIF_SDIO) { reg = 0; reg |= BIT(3); } else if (wilc->io_type == WILC_HIF_SPI) { reg = 1; } acquire_bus(wilc, WILC_BUS_ACQUIRE_ONLY); ret = wilc->hif_func->hif_write_reg(wilc, WILC_VMM_CORE_CFG, reg); if (ret) goto release; reg = 0; if (wilc->io_type == WILC_HIF_SDIO && wilc->dev_irq_num) reg |= WILC_HAVE_SDIO_IRQ_GPIO; ret = wilc->hif_func->hif_write_reg(wilc, WILC_GP_REG_1, reg); if (ret) goto release; wilc->hif_func->hif_sync_ext(wilc, NUM_INT_EXT); ret = wilc->hif_func->hif_read_reg(wilc, WILC_CHIPID, &chipid); if (ret) goto release; wilc->hif_func->hif_read_reg(wilc, WILC_GLB_RESET_0, &reg); if ((reg & BIT(10)) == BIT(10)) { reg &= ~BIT(10); wilc->hif_func->hif_write_reg(wilc, WILC_GLB_RESET_0, reg); wilc->hif_func->hif_read_reg(wilc, WILC_GLB_RESET_0, &reg); } reg |= BIT(10); ret = wilc->hif_func->hif_write_reg(wilc, WILC_GLB_RESET_0, reg); wilc->hif_func->hif_read_reg(wilc, WILC_GLB_RESET_0, &reg); release: release_bus(wilc, WILC_BUS_RELEASE_ONLY); return ret; } int wilc_wlan_stop(struct wilc *wilc, struct wilc_vif *vif) { u32 reg = 0; int ret; acquire_bus(wilc, WILC_BUS_ACQUIRE_AND_WAKEUP); ret = wilc->hif_func->hif_read_reg(wilc, WILC_GP_REG_0, &reg); if (ret) { netdev_err(vif->ndev, "Error while reading reg\n"); goto release; } ret = wilc->hif_func->hif_write_reg(wilc, WILC_GP_REG_0, (reg | WILC_ABORT_REQ_BIT)); if (ret) { netdev_err(vif->ndev, "Error while writing reg\n"); goto release; } ret = wilc->hif_func->hif_read_reg(wilc, WILC_FW_HOST_COMM, &reg); if (ret) { netdev_err(vif->ndev, "Error while reading reg\n"); goto release; } reg = BIT(0); ret = wilc->hif_func->hif_write_reg(wilc, WILC_FW_HOST_COMM, reg); if (ret) { netdev_err(vif->ndev, "Error while writing reg\n"); goto release; } ret = 0; release: /* host comm is disabled - we can't issue sleep command anymore: */ release_bus(wilc, WILC_BUS_RELEASE_ONLY); return ret; } void wilc_wlan_cleanup(struct net_device *dev) { struct txq_entry_t *tqe; struct rxq_entry_t *rqe; u8 ac; struct wilc_vif *vif = netdev_priv(dev); struct wilc *wilc = vif->wilc; wilc->quit = 1; for (ac = 0; ac < NQUEUES; ac++) { while ((tqe = wilc_wlan_txq_remove_from_head(wilc, ac))) { if (tqe->tx_complete_func) tqe->tx_complete_func(tqe->priv, 0); kfree(tqe); } } while ((rqe = wilc_wlan_rxq_remove(wilc))) kfree(rqe); kfree(wilc->vmm_table); wilc->vmm_table = NULL; kfree(wilc->rx_buffer); wilc->rx_buffer = NULL; kfree(wilc->tx_buffer); wilc->tx_buffer = NULL; wilc->hif_func->hif_deinit(wilc); } static int wilc_wlan_cfg_commit(struct wilc_vif *vif, int type, u32 drv_handler) { struct wilc *wilc = vif->wilc; struct wilc_cfg_frame *cfg = &wilc->cfg_frame; int t_len = wilc->cfg_frame_offset + sizeof(struct wilc_cfg_cmd_hdr); if (type == WILC_CFG_SET) cfg->hdr.cmd_type = 'W'; else cfg->hdr.cmd_type = 'Q'; cfg->hdr.seq_no = wilc->cfg_seq_no % 256; cfg->hdr.total_len = cpu_to_le16(t_len); cfg->hdr.driver_handler = cpu_to_le32(drv_handler); wilc->cfg_seq_no = cfg->hdr.seq_no; if (!wilc_wlan_txq_add_cfg_pkt(vif, (u8 *)&cfg->hdr, t_len)) return -1; return 0; } int wilc_wlan_cfg_set(struct wilc_vif *vif, int start, u16 wid, u8 *buffer, u32 buffer_size, int commit, u32 drv_handler) { u32 offset; int ret_size; struct wilc *wilc = vif->wilc; mutex_lock(&wilc->cfg_cmd_lock); if (start) wilc->cfg_frame_offset = 0; offset = wilc->cfg_frame_offset; ret_size = wilc_wlan_cfg_set_wid(wilc->cfg_frame.frame, offset, wid, buffer, buffer_size); offset += ret_size; wilc->cfg_frame_offset = offset; if (!commit) { mutex_unlock(&wilc->cfg_cmd_lock); return ret_size; } netdev_dbg(vif->ndev, "%s: seqno[%d]\n", __func__, wilc->cfg_seq_no); if (wilc_wlan_cfg_commit(vif, WILC_CFG_SET, drv_handler)) ret_size = 0; if (!wait_for_completion_timeout(&wilc->cfg_event, WILC_CFG_PKTS_TIMEOUT)) { netdev_dbg(vif->ndev, "%s: Timed Out\n", __func__); ret_size = 0; } wilc->cfg_frame_offset = 0; wilc->cfg_seq_no += 1; mutex_unlock(&wilc->cfg_cmd_lock); return ret_size; } int wilc_wlan_cfg_get(struct wilc_vif *vif, int start, u16 wid, int commit, u32 drv_handler) { u32 offset; int ret_size; struct wilc *wilc = vif->wilc; mutex_lock(&wilc->cfg_cmd_lock); if (start) wilc->cfg_frame_offset = 0; offset = wilc->cfg_frame_offset; ret_size = wilc_wlan_cfg_get_wid(wilc->cfg_frame.frame, offset, wid); offset += ret_size; wilc->cfg_frame_offset = offset; if (!commit) { mutex_unlock(&wilc->cfg_cmd_lock); return ret_size; } if (wilc_wlan_cfg_commit(vif, WILC_CFG_QUERY, drv_handler)) ret_size = 0; if (!wait_for_completion_timeout(&wilc->cfg_event, WILC_CFG_PKTS_TIMEOUT)) { netdev_dbg(vif->ndev, "%s: Timed Out\n", __func__); ret_size = 0; } wilc->cfg_frame_offset = 0; wilc->cfg_seq_no += 1; mutex_unlock(&wilc->cfg_cmd_lock); return ret_size; } int wilc_send_config_pkt(struct wilc_vif *vif, u8 mode, struct wid *wids, u32 count) { int i; int ret = 0; u32 drv = wilc_get_vif_idx(vif); if (mode == WILC_GET_CFG) { for (i = 0; i < count; i++) { if (!wilc_wlan_cfg_get(vif, !i, wids[i].id, (i == count - 1), drv)) { ret = -ETIMEDOUT; break; } } for (i = 0; i < count; i++) { wids[i].size = wilc_wlan_cfg_get_val(vif->wilc, wids[i].id, wids[i].val, wids[i].size); } } else if (mode == WILC_SET_CFG) { for (i = 0; i < count; i++) { if (!wilc_wlan_cfg_set(vif, !i, wids[i].id, wids[i].val, wids[i].size, (i == count - 1), drv)) { ret = -ETIMEDOUT; break; } } } return ret; } static int init_chip(struct net_device *dev) { u32 chipid; u32 reg; int ret = 0; struct wilc_vif *vif = netdev_priv(dev); struct wilc *wilc = vif->wilc; acquire_bus(wilc, WILC_BUS_ACQUIRE_ONLY); chipid = wilc_get_chipid(wilc, true); if ((chipid & 0xfff) != 0xa0) { ret = wilc->hif_func->hif_read_reg(wilc, WILC_CORTUS_RESET_MUX_SEL, &reg); if (ret) { netdev_err(dev, "fail read reg 0x1118\n"); goto release; } reg |= BIT(0); ret = wilc->hif_func->hif_write_reg(wilc, WILC_CORTUS_RESET_MUX_SEL, reg); if (ret) { netdev_err(dev, "fail write reg 0x1118\n"); goto release; } ret = wilc->hif_func->hif_write_reg(wilc, WILC_CORTUS_BOOT_REGISTER, WILC_CORTUS_BOOT_FROM_IRAM); if (ret) { netdev_err(dev, "fail write reg 0xc0000\n"); goto release; } } release: release_bus(wilc, WILC_BUS_RELEASE_ONLY); return ret; } u32 wilc_get_chipid(struct wilc *wilc, bool update) { u32 chipid = 0; u32 rfrevid = 0; if (wilc->chipid == 0 || update) { wilc->hif_func->hif_read_reg(wilc, WILC_CHIPID, &chipid); wilc->hif_func->hif_read_reg(wilc, WILC_RF_REVISION_ID, &rfrevid); if (!is_wilc1000(chipid)) { wilc->chipid = 0; return wilc->chipid; } if (chipid == WILC_1000_BASE_ID_2A) { /* 0x1002A0 */ if (rfrevid != 0x1) chipid = WILC_1000_BASE_ID_2A_REV1; } else if (chipid == WILC_1000_BASE_ID_2B) { /* 0x1002B0 */ if (rfrevid == 0x4) chipid = WILC_1000_BASE_ID_2B_REV1; else if (rfrevid != 0x3) chipid = WILC_1000_BASE_ID_2B_REV2; } wilc->chipid = chipid; } return wilc->chipid; } int wilc_wlan_init(struct net_device *dev) { int ret = 0; struct wilc_vif *vif = netdev_priv(dev); struct wilc *wilc; wilc = vif->wilc; wilc->quit = 0; if (!wilc->hif_func->hif_is_init(wilc)) { acquire_bus(wilc, WILC_BUS_ACQUIRE_ONLY); ret = wilc->hif_func->hif_init(wilc, false); release_bus(wilc, WILC_BUS_RELEASE_ONLY); if (ret) goto fail; } if (!wilc->vmm_table) wilc->vmm_table = kzalloc(WILC_VMM_TBL_SIZE, GFP_KERNEL); if (!wilc->vmm_table) { ret = -ENOBUFS; goto fail; } if (!wilc->tx_buffer) wilc->tx_buffer = kmalloc(WILC_TX_BUFF_SIZE, GFP_KERNEL); if (!wilc->tx_buffer) { ret = -ENOBUFS; goto fail; } if (!wilc->rx_buffer) wilc->rx_buffer = kmalloc(WILC_RX_BUFF_SIZE, GFP_KERNEL); if (!wilc->rx_buffer) { ret = -ENOBUFS; goto fail; } if (init_chip(dev)) { ret = -EIO; goto fail; } return 0; fail: kfree(wilc->vmm_table); wilc->vmm_table = NULL; kfree(wilc->rx_buffer); wilc->rx_buffer = NULL; kfree(wilc->tx_buffer); wilc->tx_buffer = NULL; return ret; }
linux-master
drivers/net/wireless/microchip/wilc1000/wlan.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2012 - 2018 Microchip Technology Inc., and its subsidiaries. * All rights reserved. */ #include <linux/clk.h> #include <linux/spi/spi.h> #include <linux/crc7.h> #include <linux/crc-itu-t.h> #include <linux/gpio/consumer.h> #include "netdev.h" #include "cfg80211.h" #define SPI_MODALIAS "wilc1000_spi" static bool enable_crc7; /* protect SPI commands with CRC7 */ module_param(enable_crc7, bool, 0644); MODULE_PARM_DESC(enable_crc7, "Enable CRC7 checksum to protect command transfers\n" "\t\t\tagainst corruption during the SPI transfer.\n" "\t\t\tCommand transfers are short and the CPU-cycle cost\n" "\t\t\tof enabling this is small."); static bool enable_crc16; /* protect SPI data with CRC16 */ module_param(enable_crc16, bool, 0644); MODULE_PARM_DESC(enable_crc16, "Enable CRC16 checksum to protect data transfers\n" "\t\t\tagainst corruption during the SPI transfer.\n" "\t\t\tData transfers can be large and the CPU-cycle cost\n" "\t\t\tof enabling this may be substantial."); /* * For CMD_SINGLE_READ and CMD_INTERNAL_READ, WILC may insert one or * more zero bytes between the command response and the DATA Start tag * (0xf3). This behavior appears to be undocumented in "ATWILC1000 * USER GUIDE" (https://tinyurl.com/4hhshdts) but we have observed 1-4 * zero bytes when the SPI bus operates at 48MHz and none when it * operates at 1MHz. */ #define WILC_SPI_RSP_HDR_EXTRA_DATA 8 struct wilc_spi { bool isinit; /* true if SPI protocol has been configured */ bool probing_crc; /* true if we're probing chip's CRC config */ bool crc7_enabled; /* true if crc7 is currently enabled */ bool crc16_enabled; /* true if crc16 is currently enabled */ struct wilc_gpios { struct gpio_desc *enable; /* ENABLE GPIO or NULL */ struct gpio_desc *reset; /* RESET GPIO or NULL */ } gpios; }; static const struct wilc_hif_func wilc_hif_spi; static int wilc_spi_reset(struct wilc *wilc); /******************************************** * * Spi protocol Function * ********************************************/ #define CMD_DMA_WRITE 0xc1 #define CMD_DMA_READ 0xc2 #define CMD_INTERNAL_WRITE 0xc3 #define CMD_INTERNAL_READ 0xc4 #define CMD_TERMINATE 0xc5 #define CMD_REPEAT 0xc6 #define CMD_DMA_EXT_WRITE 0xc7 #define CMD_DMA_EXT_READ 0xc8 #define CMD_SINGLE_WRITE 0xc9 #define CMD_SINGLE_READ 0xca #define CMD_RESET 0xcf #define SPI_RETRY_MAX_LIMIT 10 #define SPI_ENABLE_VMM_RETRY_LIMIT 2 /* SPI response fields (section 11.1.2 in ATWILC1000 User Guide): */ #define RSP_START_FIELD GENMASK(7, 4) #define RSP_TYPE_FIELD GENMASK(3, 0) /* SPI response values for the response fields: */ #define RSP_START_TAG 0xc #define RSP_TYPE_FIRST_PACKET 0x1 #define RSP_TYPE_INNER_PACKET 0x2 #define RSP_TYPE_LAST_PACKET 0x3 #define RSP_STATE_NO_ERROR 0x00 #define PROTOCOL_REG_PKT_SZ_MASK GENMASK(6, 4) #define PROTOCOL_REG_CRC16_MASK GENMASK(3, 3) #define PROTOCOL_REG_CRC7_MASK GENMASK(2, 2) /* * The SPI data packet size may be any integer power of two in the * range from 256 to 8192 bytes. */ #define DATA_PKT_LOG_SZ_MIN 8 /* 256 B */ #define DATA_PKT_LOG_SZ_MAX 13 /* 8 KiB */ /* * Select the data packet size (log2 of number of bytes): Use the * maximum data packet size. We only retransmit complete packets, so * there is no benefit from using smaller data packets. */ #define DATA_PKT_LOG_SZ DATA_PKT_LOG_SZ_MAX #define DATA_PKT_SZ (1 << DATA_PKT_LOG_SZ) #define WILC_SPI_COMMAND_STAT_SUCCESS 0 #define WILC_GET_RESP_HDR_START(h) (((h) >> 4) & 0xf) struct wilc_spi_cmd { u8 cmd_type; union { struct { u8 addr[3]; u8 crc[]; } __packed simple_cmd; struct { u8 addr[3]; u8 size[2]; u8 crc[]; } __packed dma_cmd; struct { u8 addr[3]; u8 size[3]; u8 crc[]; } __packed dma_cmd_ext; struct { u8 addr[2]; __be32 data; u8 crc[]; } __packed internal_w_cmd; struct { u8 addr[3]; __be32 data; u8 crc[]; } __packed w_cmd; } u; } __packed; struct wilc_spi_read_rsp_data { u8 header; u8 data[4]; u8 crc[]; } __packed; struct wilc_spi_rsp_data { u8 rsp_cmd_type; u8 status; u8 data[]; } __packed; struct wilc_spi_special_cmd_rsp { u8 skip_byte; u8 rsp_cmd_type; u8 status; } __packed; static int wilc_parse_gpios(struct wilc *wilc) { struct spi_device *spi = to_spi_device(wilc->dev); struct wilc_spi *spi_priv = wilc->bus_data; struct wilc_gpios *gpios = &spi_priv->gpios; /* get ENABLE pin and deassert it (if it is defined): */ gpios->enable = devm_gpiod_get_optional(&spi->dev, "enable", GPIOD_OUT_LOW); /* get RESET pin and assert it (if it is defined): */ if (gpios->enable) { /* if enable pin exists, reset must exist as well */ gpios->reset = devm_gpiod_get(&spi->dev, "reset", GPIOD_OUT_HIGH); if (IS_ERR(gpios->reset)) { dev_err(&spi->dev, "missing reset gpio.\n"); return PTR_ERR(gpios->reset); } } else { gpios->reset = devm_gpiod_get_optional(&spi->dev, "reset", GPIOD_OUT_HIGH); } return 0; } static void wilc_wlan_power(struct wilc *wilc, bool on) { struct wilc_spi *spi_priv = wilc->bus_data; struct wilc_gpios *gpios = &spi_priv->gpios; if (on) { /* assert ENABLE: */ gpiod_set_value(gpios->enable, 1); mdelay(5); /* assert RESET: */ gpiod_set_value(gpios->reset, 1); } else { /* deassert RESET: */ gpiod_set_value(gpios->reset, 0); /* deassert ENABLE: */ gpiod_set_value(gpios->enable, 0); } } static int wilc_bus_probe(struct spi_device *spi) { int ret; struct wilc *wilc; struct wilc_spi *spi_priv; spi_priv = kzalloc(sizeof(*spi_priv), GFP_KERNEL); if (!spi_priv) return -ENOMEM; ret = wilc_cfg80211_init(&wilc, &spi->dev, WILC_HIF_SPI, &wilc_hif_spi); if (ret) goto free; spi_set_drvdata(spi, wilc); wilc->dev = &spi->dev; wilc->bus_data = spi_priv; wilc->dev_irq_num = spi->irq; ret = wilc_parse_gpios(wilc); if (ret < 0) goto netdev_cleanup; wilc->rtc_clk = devm_clk_get_optional(&spi->dev, "rtc"); if (IS_ERR(wilc->rtc_clk)) { ret = PTR_ERR(wilc->rtc_clk); goto netdev_cleanup; } clk_prepare_enable(wilc->rtc_clk); return 0; netdev_cleanup: wilc_netdev_cleanup(wilc); free: kfree(spi_priv); return ret; } static void wilc_bus_remove(struct spi_device *spi) { struct wilc *wilc = spi_get_drvdata(spi); struct wilc_spi *spi_priv = wilc->bus_data; clk_disable_unprepare(wilc->rtc_clk); wilc_netdev_cleanup(wilc); kfree(spi_priv); } static const struct of_device_id wilc_of_match[] = { { .compatible = "microchip,wilc1000", }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, wilc_of_match); static const struct spi_device_id wilc_spi_id[] = { { "wilc1000", 0 }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(spi, wilc_spi_id); static struct spi_driver wilc_spi_driver = { .driver = { .name = SPI_MODALIAS, .of_match_table = wilc_of_match, }, .id_table = wilc_spi_id, .probe = wilc_bus_probe, .remove = wilc_bus_remove, }; module_spi_driver(wilc_spi_driver); MODULE_LICENSE("GPL"); static int wilc_spi_tx(struct wilc *wilc, u8 *b, u32 len) { struct spi_device *spi = to_spi_device(wilc->dev); int ret; struct spi_message msg; if (len > 0 && b) { struct spi_transfer tr = { .tx_buf = b, .len = len, .delay = { .value = 0, .unit = SPI_DELAY_UNIT_USECS }, }; char *r_buffer = kzalloc(len, GFP_KERNEL); if (!r_buffer) return -ENOMEM; tr.rx_buf = r_buffer; dev_dbg(&spi->dev, "Request writing %d bytes\n", len); memset(&msg, 0, sizeof(msg)); spi_message_init(&msg); msg.spi = spi; spi_message_add_tail(&tr, &msg); ret = spi_sync(spi, &msg); if (ret < 0) dev_err(&spi->dev, "SPI transaction failed\n"); kfree(r_buffer); } else { dev_err(&spi->dev, "can't write data with the following length: %d\n", len); ret = -EINVAL; } return ret; } static int wilc_spi_rx(struct wilc *wilc, u8 *rb, u32 rlen) { struct spi_device *spi = to_spi_device(wilc->dev); int ret; if (rlen > 0) { struct spi_message msg; struct spi_transfer tr = { .rx_buf = rb, .len = rlen, .delay = { .value = 0, .unit = SPI_DELAY_UNIT_USECS }, }; char *t_buffer = kzalloc(rlen, GFP_KERNEL); if (!t_buffer) return -ENOMEM; tr.tx_buf = t_buffer; memset(&msg, 0, sizeof(msg)); spi_message_init(&msg); msg.spi = spi; spi_message_add_tail(&tr, &msg); ret = spi_sync(spi, &msg); if (ret < 0) dev_err(&spi->dev, "SPI transaction failed\n"); kfree(t_buffer); } else { dev_err(&spi->dev, "can't read data with the following length: %u\n", rlen); ret = -EINVAL; } return ret; } static int wilc_spi_tx_rx(struct wilc *wilc, u8 *wb, u8 *rb, u32 rlen) { struct spi_device *spi = to_spi_device(wilc->dev); int ret; if (rlen > 0) { struct spi_message msg; struct spi_transfer tr = { .rx_buf = rb, .tx_buf = wb, .len = rlen, .bits_per_word = 8, .delay = { .value = 0, .unit = SPI_DELAY_UNIT_USECS }, }; memset(&msg, 0, sizeof(msg)); spi_message_init(&msg); msg.spi = spi; spi_message_add_tail(&tr, &msg); ret = spi_sync(spi, &msg); if (ret < 0) dev_err(&spi->dev, "SPI transaction failed\n"); } else { dev_err(&spi->dev, "can't read data with the following length: %u\n", rlen); ret = -EINVAL; } return ret; } static int spi_data_write(struct wilc *wilc, u8 *b, u32 sz) { struct spi_device *spi = to_spi_device(wilc->dev); struct wilc_spi *spi_priv = wilc->bus_data; int ix, nbytes; int result = 0; u8 cmd, order, crc[2]; u16 crc_calc; /* * Data */ ix = 0; do { if (sz <= DATA_PKT_SZ) { nbytes = sz; order = 0x3; } else { nbytes = DATA_PKT_SZ; if (ix == 0) order = 0x1; else order = 0x02; } /* * Write command */ cmd = 0xf0; cmd |= order; if (wilc_spi_tx(wilc, &cmd, 1)) { dev_err(&spi->dev, "Failed data block cmd write, bus error...\n"); result = -EINVAL; break; } /* * Write data */ if (wilc_spi_tx(wilc, &b[ix], nbytes)) { dev_err(&spi->dev, "Failed data block write, bus error...\n"); result = -EINVAL; break; } /* * Write CRC */ if (spi_priv->crc16_enabled) { crc_calc = crc_itu_t(0xffff, &b[ix], nbytes); crc[0] = crc_calc >> 8; crc[1] = crc_calc; if (wilc_spi_tx(wilc, crc, 2)) { dev_err(&spi->dev, "Failed data block crc write, bus error...\n"); result = -EINVAL; break; } } /* * No need to wait for response */ ix += nbytes; sz -= nbytes; } while (sz); return result; } /******************************************** * * Spi Internal Read/Write Function * ********************************************/ static u8 wilc_get_crc7(u8 *buffer, u32 len) { return crc7_be(0xfe, buffer, len); } static int wilc_spi_single_read(struct wilc *wilc, u8 cmd, u32 adr, void *b, u8 clockless) { struct spi_device *spi = to_spi_device(wilc->dev); struct wilc_spi *spi_priv = wilc->bus_data; u8 wb[32], rb[32]; int cmd_len, resp_len, i; u16 crc_calc, crc_recv; struct wilc_spi_cmd *c; struct wilc_spi_rsp_data *r; struct wilc_spi_read_rsp_data *r_data; memset(wb, 0x0, sizeof(wb)); memset(rb, 0x0, sizeof(rb)); c = (struct wilc_spi_cmd *)wb; c->cmd_type = cmd; if (cmd == CMD_SINGLE_READ) { c->u.simple_cmd.addr[0] = adr >> 16; c->u.simple_cmd.addr[1] = adr >> 8; c->u.simple_cmd.addr[2] = adr; } else if (cmd == CMD_INTERNAL_READ) { c->u.simple_cmd.addr[0] = adr >> 8; if (clockless == 1) c->u.simple_cmd.addr[0] |= BIT(7); c->u.simple_cmd.addr[1] = adr; c->u.simple_cmd.addr[2] = 0x0; } else { dev_err(&spi->dev, "cmd [%x] not supported\n", cmd); return -EINVAL; } cmd_len = offsetof(struct wilc_spi_cmd, u.simple_cmd.crc); resp_len = sizeof(*r) + sizeof(*r_data) + WILC_SPI_RSP_HDR_EXTRA_DATA; if (spi_priv->crc7_enabled) { c->u.simple_cmd.crc[0] = wilc_get_crc7(wb, cmd_len); cmd_len += 1; resp_len += 2; } if (cmd_len + resp_len > ARRAY_SIZE(wb)) { dev_err(&spi->dev, "spi buffer size too small (%d) (%d) (%zu)\n", cmd_len, resp_len, ARRAY_SIZE(wb)); return -EINVAL; } if (wilc_spi_tx_rx(wilc, wb, rb, cmd_len + resp_len)) { dev_err(&spi->dev, "Failed cmd write, bus error...\n"); return -EINVAL; } r = (struct wilc_spi_rsp_data *)&rb[cmd_len]; if (r->rsp_cmd_type != cmd && !clockless) { if (!spi_priv->probing_crc) dev_err(&spi->dev, "Failed cmd, cmd (%02x), resp (%02x)\n", cmd, r->rsp_cmd_type); return -EINVAL; } if (r->status != WILC_SPI_COMMAND_STAT_SUCCESS && !clockless) { dev_err(&spi->dev, "Failed cmd state response state (%02x)\n", r->status); return -EINVAL; } for (i = 0; i < WILC_SPI_RSP_HDR_EXTRA_DATA; ++i) if (WILC_GET_RESP_HDR_START(r->data[i]) == 0xf) break; if (i >= WILC_SPI_RSP_HDR_EXTRA_DATA) { dev_err(&spi->dev, "Error, data start missing\n"); return -EINVAL; } r_data = (struct wilc_spi_read_rsp_data *)&r->data[i]; if (b) memcpy(b, r_data->data, 4); if (!clockless && spi_priv->crc16_enabled) { crc_recv = (r_data->crc[0] << 8) | r_data->crc[1]; crc_calc = crc_itu_t(0xffff, r_data->data, 4); if (crc_recv != crc_calc) { dev_err(&spi->dev, "%s: bad CRC 0x%04x " "(calculated 0x%04x)\n", __func__, crc_recv, crc_calc); return -EINVAL; } } return 0; } static int wilc_spi_write_cmd(struct wilc *wilc, u8 cmd, u32 adr, u32 data, u8 clockless) { struct spi_device *spi = to_spi_device(wilc->dev); struct wilc_spi *spi_priv = wilc->bus_data; u8 wb[32], rb[32]; int cmd_len, resp_len; struct wilc_spi_cmd *c; struct wilc_spi_rsp_data *r; memset(wb, 0x0, sizeof(wb)); memset(rb, 0x0, sizeof(rb)); c = (struct wilc_spi_cmd *)wb; c->cmd_type = cmd; if (cmd == CMD_INTERNAL_WRITE) { c->u.internal_w_cmd.addr[0] = adr >> 8; if (clockless == 1) c->u.internal_w_cmd.addr[0] |= BIT(7); c->u.internal_w_cmd.addr[1] = adr; c->u.internal_w_cmd.data = cpu_to_be32(data); cmd_len = offsetof(struct wilc_spi_cmd, u.internal_w_cmd.crc); if (spi_priv->crc7_enabled) c->u.internal_w_cmd.crc[0] = wilc_get_crc7(wb, cmd_len); } else if (cmd == CMD_SINGLE_WRITE) { c->u.w_cmd.addr[0] = adr >> 16; c->u.w_cmd.addr[1] = adr >> 8; c->u.w_cmd.addr[2] = adr; c->u.w_cmd.data = cpu_to_be32(data); cmd_len = offsetof(struct wilc_spi_cmd, u.w_cmd.crc); if (spi_priv->crc7_enabled) c->u.w_cmd.crc[0] = wilc_get_crc7(wb, cmd_len); } else { dev_err(&spi->dev, "write cmd [%x] not supported\n", cmd); return -EINVAL; } if (spi_priv->crc7_enabled) cmd_len += 1; resp_len = sizeof(*r); if (cmd_len + resp_len > ARRAY_SIZE(wb)) { dev_err(&spi->dev, "spi buffer size too small (%d) (%d) (%zu)\n", cmd_len, resp_len, ARRAY_SIZE(wb)); return -EINVAL; } if (wilc_spi_tx_rx(wilc, wb, rb, cmd_len + resp_len)) { dev_err(&spi->dev, "Failed cmd write, bus error...\n"); return -EINVAL; } r = (struct wilc_spi_rsp_data *)&rb[cmd_len]; /* * Clockless registers operations might return unexptected responses, * even if successful. */ if (r->rsp_cmd_type != cmd && !clockless) { dev_err(&spi->dev, "Failed cmd response, cmd (%02x), resp (%02x)\n", cmd, r->rsp_cmd_type); return -EINVAL; } if (r->status != WILC_SPI_COMMAND_STAT_SUCCESS && !clockless) { dev_err(&spi->dev, "Failed cmd state response state (%02x)\n", r->status); return -EINVAL; } return 0; } static int wilc_spi_dma_rw(struct wilc *wilc, u8 cmd, u32 adr, u8 *b, u32 sz) { struct spi_device *spi = to_spi_device(wilc->dev); struct wilc_spi *spi_priv = wilc->bus_data; u16 crc_recv, crc_calc; u8 wb[32], rb[32]; int cmd_len, resp_len; int retry, ix = 0; u8 crc[2]; struct wilc_spi_cmd *c; struct wilc_spi_rsp_data *r; memset(wb, 0x0, sizeof(wb)); memset(rb, 0x0, sizeof(rb)); c = (struct wilc_spi_cmd *)wb; c->cmd_type = cmd; if (cmd == CMD_DMA_WRITE || cmd == CMD_DMA_READ) { c->u.dma_cmd.addr[0] = adr >> 16; c->u.dma_cmd.addr[1] = adr >> 8; c->u.dma_cmd.addr[2] = adr; c->u.dma_cmd.size[0] = sz >> 8; c->u.dma_cmd.size[1] = sz; cmd_len = offsetof(struct wilc_spi_cmd, u.dma_cmd.crc); if (spi_priv->crc7_enabled) c->u.dma_cmd.crc[0] = wilc_get_crc7(wb, cmd_len); } else if (cmd == CMD_DMA_EXT_WRITE || cmd == CMD_DMA_EXT_READ) { c->u.dma_cmd_ext.addr[0] = adr >> 16; c->u.dma_cmd_ext.addr[1] = adr >> 8; c->u.dma_cmd_ext.addr[2] = adr; c->u.dma_cmd_ext.size[0] = sz >> 16; c->u.dma_cmd_ext.size[1] = sz >> 8; c->u.dma_cmd_ext.size[2] = sz; cmd_len = offsetof(struct wilc_spi_cmd, u.dma_cmd_ext.crc); if (spi_priv->crc7_enabled) c->u.dma_cmd_ext.crc[0] = wilc_get_crc7(wb, cmd_len); } else { dev_err(&spi->dev, "dma read write cmd [%x] not supported\n", cmd); return -EINVAL; } if (spi_priv->crc7_enabled) cmd_len += 1; resp_len = sizeof(*r); if (cmd_len + resp_len > ARRAY_SIZE(wb)) { dev_err(&spi->dev, "spi buffer size too small (%d)(%d) (%zu)\n", cmd_len, resp_len, ARRAY_SIZE(wb)); return -EINVAL; } if (wilc_spi_tx_rx(wilc, wb, rb, cmd_len + resp_len)) { dev_err(&spi->dev, "Failed cmd write, bus error...\n"); return -EINVAL; } r = (struct wilc_spi_rsp_data *)&rb[cmd_len]; if (r->rsp_cmd_type != cmd) { dev_err(&spi->dev, "Failed cmd response, cmd (%02x), resp (%02x)\n", cmd, r->rsp_cmd_type); return -EINVAL; } if (r->status != WILC_SPI_COMMAND_STAT_SUCCESS) { dev_err(&spi->dev, "Failed cmd state response state (%02x)\n", r->status); return -EINVAL; } if (cmd == CMD_DMA_WRITE || cmd == CMD_DMA_EXT_WRITE) return 0; while (sz > 0) { int nbytes; u8 rsp; nbytes = min_t(u32, sz, DATA_PKT_SZ); /* * Data Response header */ retry = 100; do { if (wilc_spi_rx(wilc, &rsp, 1)) { dev_err(&spi->dev, "Failed resp read, bus err\n"); return -EINVAL; } if (WILC_GET_RESP_HDR_START(rsp) == 0xf) break; } while (retry--); /* * Read bytes */ if (wilc_spi_rx(wilc, &b[ix], nbytes)) { dev_err(&spi->dev, "Failed block read, bus err\n"); return -EINVAL; } /* * Read CRC */ if (spi_priv->crc16_enabled) { if (wilc_spi_rx(wilc, crc, 2)) { dev_err(&spi->dev, "Failed block CRC read, bus err\n"); return -EINVAL; } crc_recv = (crc[0] << 8) | crc[1]; crc_calc = crc_itu_t(0xffff, &b[ix], nbytes); if (crc_recv != crc_calc) { dev_err(&spi->dev, "%s: bad CRC 0x%04x " "(calculated 0x%04x)\n", __func__, crc_recv, crc_calc); return -EINVAL; } } ix += nbytes; sz -= nbytes; } return 0; } static int wilc_spi_special_cmd(struct wilc *wilc, u8 cmd) { struct spi_device *spi = to_spi_device(wilc->dev); struct wilc_spi *spi_priv = wilc->bus_data; u8 wb[32], rb[32]; int cmd_len, resp_len = 0; struct wilc_spi_cmd *c; struct wilc_spi_special_cmd_rsp *r; if (cmd != CMD_TERMINATE && cmd != CMD_REPEAT && cmd != CMD_RESET) return -EINVAL; memset(wb, 0x0, sizeof(wb)); memset(rb, 0x0, sizeof(rb)); c = (struct wilc_spi_cmd *)wb; c->cmd_type = cmd; if (cmd == CMD_RESET) memset(c->u.simple_cmd.addr, 0xFF, 3); cmd_len = offsetof(struct wilc_spi_cmd, u.simple_cmd.crc); resp_len = sizeof(*r); if (spi_priv->crc7_enabled) { c->u.simple_cmd.crc[0] = wilc_get_crc7(wb, cmd_len); cmd_len += 1; } if (cmd_len + resp_len > ARRAY_SIZE(wb)) { dev_err(&spi->dev, "spi buffer size too small (%d) (%d) (%zu)\n", cmd_len, resp_len, ARRAY_SIZE(wb)); return -EINVAL; } if (wilc_spi_tx_rx(wilc, wb, rb, cmd_len + resp_len)) { dev_err(&spi->dev, "Failed cmd write, bus error...\n"); return -EINVAL; } r = (struct wilc_spi_special_cmd_rsp *)&rb[cmd_len]; if (r->rsp_cmd_type != cmd) { if (!spi_priv->probing_crc) dev_err(&spi->dev, "Failed cmd response, cmd (%02x), resp (%02x)\n", cmd, r->rsp_cmd_type); return -EINVAL; } if (r->status != WILC_SPI_COMMAND_STAT_SUCCESS) { dev_err(&spi->dev, "Failed cmd state response state (%02x)\n", r->status); return -EINVAL; } return 0; } static void wilc_spi_reset_cmd_sequence(struct wilc *wl, u8 attempt, u32 addr) { struct spi_device *spi = to_spi_device(wl->dev); struct wilc_spi *spi_priv = wl->bus_data; if (!spi_priv->probing_crc) dev_err(&spi->dev, "Reset and retry %d %x\n", attempt, addr); usleep_range(1000, 1100); wilc_spi_reset(wl); usleep_range(1000, 1100); } static int wilc_spi_read_reg(struct wilc *wilc, u32 addr, u32 *data) { struct spi_device *spi = to_spi_device(wilc->dev); int result; u8 cmd = CMD_SINGLE_READ; u8 clockless = 0; u8 i; if (addr <= WILC_SPI_CLOCKLESS_ADDR_LIMIT) { /* Clockless register */ cmd = CMD_INTERNAL_READ; clockless = 1; } for (i = 0; i < SPI_RETRY_MAX_LIMIT; i++) { result = wilc_spi_single_read(wilc, cmd, addr, data, clockless); if (!result) { le32_to_cpus(data); return 0; } /* retry is not applicable for clockless registers */ if (clockless) break; dev_err(&spi->dev, "Failed cmd, read reg (%08x)...\n", addr); wilc_spi_reset_cmd_sequence(wilc, i, addr); } return result; } static int wilc_spi_read(struct wilc *wilc, u32 addr, u8 *buf, u32 size) { struct spi_device *spi = to_spi_device(wilc->dev); int result; u8 i; if (size <= 4) return -EINVAL; for (i = 0; i < SPI_RETRY_MAX_LIMIT; i++) { result = wilc_spi_dma_rw(wilc, CMD_DMA_EXT_READ, addr, buf, size); if (!result) return 0; dev_err(&spi->dev, "Failed cmd, read block (%08x)...\n", addr); wilc_spi_reset_cmd_sequence(wilc, i, addr); } return result; } static int spi_internal_write(struct wilc *wilc, u32 adr, u32 dat) { struct spi_device *spi = to_spi_device(wilc->dev); int result; u8 i; for (i = 0; i < SPI_RETRY_MAX_LIMIT; i++) { result = wilc_spi_write_cmd(wilc, CMD_INTERNAL_WRITE, adr, dat, 0); if (!result) return 0; dev_err(&spi->dev, "Failed internal write cmd...\n"); wilc_spi_reset_cmd_sequence(wilc, i, adr); } return result; } static int spi_internal_read(struct wilc *wilc, u32 adr, u32 *data) { struct spi_device *spi = to_spi_device(wilc->dev); struct wilc_spi *spi_priv = wilc->bus_data; int result; u8 i; for (i = 0; i < SPI_RETRY_MAX_LIMIT; i++) { result = wilc_spi_single_read(wilc, CMD_INTERNAL_READ, adr, data, 0); if (!result) { le32_to_cpus(data); return 0; } if (!spi_priv->probing_crc) dev_err(&spi->dev, "Failed internal read cmd...\n"); wilc_spi_reset_cmd_sequence(wilc, i, adr); } return result; } /******************************************** * * Spi interfaces * ********************************************/ static int wilc_spi_write_reg(struct wilc *wilc, u32 addr, u32 data) { struct spi_device *spi = to_spi_device(wilc->dev); int result; u8 cmd = CMD_SINGLE_WRITE; u8 clockless = 0; u8 i; if (addr <= WILC_SPI_CLOCKLESS_ADDR_LIMIT) { /* Clockless register */ cmd = CMD_INTERNAL_WRITE; clockless = 1; } for (i = 0; i < SPI_RETRY_MAX_LIMIT; i++) { result = wilc_spi_write_cmd(wilc, cmd, addr, data, clockless); if (!result) return 0; dev_err(&spi->dev, "Failed cmd, write reg (%08x)...\n", addr); if (clockless) break; wilc_spi_reset_cmd_sequence(wilc, i, addr); } return result; } static int spi_data_rsp(struct wilc *wilc, u8 cmd) { struct spi_device *spi = to_spi_device(wilc->dev); int result, i; u8 rsp[4]; /* * The response to data packets is two bytes long. For * efficiency's sake, wilc_spi_write() wisely ignores the * responses for all packets but the final one. The downside * of that optimization is that when the final data packet is * short, we may receive (part of) the response to the * second-to-last packet before the one for the final packet. * To handle this, we always read 4 bytes and then search for * the last byte that contains the "Response Start" code (0xc * in the top 4 bits). We then know that this byte is the * first response byte of the final data packet. */ result = wilc_spi_rx(wilc, rsp, sizeof(rsp)); if (result) { dev_err(&spi->dev, "Failed bus error...\n"); return result; } for (i = sizeof(rsp) - 2; i >= 0; --i) if (FIELD_GET(RSP_START_FIELD, rsp[i]) == RSP_START_TAG) break; if (i < 0) { dev_err(&spi->dev, "Data packet response missing (%02x %02x %02x %02x)\n", rsp[0], rsp[1], rsp[2], rsp[3]); return -1; } /* rsp[i] is the last response start byte */ if (FIELD_GET(RSP_TYPE_FIELD, rsp[i]) != RSP_TYPE_LAST_PACKET || rsp[i + 1] != RSP_STATE_NO_ERROR) { dev_err(&spi->dev, "Data response error (%02x %02x)\n", rsp[i], rsp[i + 1]); return -1; } return 0; } static int wilc_spi_write(struct wilc *wilc, u32 addr, u8 *buf, u32 size) { struct spi_device *spi = to_spi_device(wilc->dev); int result; u8 i; /* * has to be greated than 4 */ if (size <= 4) return -EINVAL; for (i = 0; i < SPI_RETRY_MAX_LIMIT; i++) { result = wilc_spi_dma_rw(wilc, CMD_DMA_EXT_WRITE, addr, NULL, size); if (result) { dev_err(&spi->dev, "Failed cmd, write block (%08x)...\n", addr); wilc_spi_reset_cmd_sequence(wilc, i, addr); continue; } /* * Data */ result = spi_data_write(wilc, buf, size); if (result) { dev_err(&spi->dev, "Failed block data write...\n"); wilc_spi_reset_cmd_sequence(wilc, i, addr); continue; } /* * Data response */ result = spi_data_rsp(wilc, CMD_DMA_EXT_WRITE); if (result) { dev_err(&spi->dev, "Failed block data rsp...\n"); wilc_spi_reset_cmd_sequence(wilc, i, addr); continue; } break; } return result; } /******************************************** * * Bus interfaces * ********************************************/ static int wilc_spi_reset(struct wilc *wilc) { struct spi_device *spi = to_spi_device(wilc->dev); struct wilc_spi *spi_priv = wilc->bus_data; int result; result = wilc_spi_special_cmd(wilc, CMD_RESET); if (result && !spi_priv->probing_crc) dev_err(&spi->dev, "Failed cmd reset\n"); return result; } static bool wilc_spi_is_init(struct wilc *wilc) { struct wilc_spi *spi_priv = wilc->bus_data; return spi_priv->isinit; } static int wilc_spi_deinit(struct wilc *wilc) { struct wilc_spi *spi_priv = wilc->bus_data; spi_priv->isinit = false; wilc_wlan_power(wilc, false); return 0; } static int wilc_spi_init(struct wilc *wilc, bool resume) { struct spi_device *spi = to_spi_device(wilc->dev); struct wilc_spi *spi_priv = wilc->bus_data; u32 reg; u32 chipid; int ret, i; if (spi_priv->isinit) { /* Confirm we can read chipid register without error: */ ret = wilc_spi_read_reg(wilc, WILC_CHIPID, &chipid); if (ret == 0) return 0; dev_err(&spi->dev, "Fail cmd read chip id...\n"); } wilc_wlan_power(wilc, true); /* * configure protocol */ /* * Infer the CRC settings that are currently in effect. This * is necessary because we can't be sure that the chip has * been RESET (e.g, after module unload and reload). */ spi_priv->probing_crc = true; spi_priv->crc7_enabled = enable_crc7; spi_priv->crc16_enabled = false; /* don't check CRC16 during probing */ for (i = 0; i < 2; ++i) { ret = spi_internal_read(wilc, WILC_SPI_PROTOCOL_OFFSET, &reg); if (ret == 0) break; spi_priv->crc7_enabled = !enable_crc7; } if (ret) { dev_err(&spi->dev, "Failed with CRC7 on and off.\n"); return ret; } /* set up the desired CRC configuration: */ reg &= ~(PROTOCOL_REG_CRC7_MASK | PROTOCOL_REG_CRC16_MASK); if (enable_crc7) reg |= PROTOCOL_REG_CRC7_MASK; if (enable_crc16) reg |= PROTOCOL_REG_CRC16_MASK; /* set up the data packet size: */ BUILD_BUG_ON(DATA_PKT_LOG_SZ < DATA_PKT_LOG_SZ_MIN || DATA_PKT_LOG_SZ > DATA_PKT_LOG_SZ_MAX); reg &= ~PROTOCOL_REG_PKT_SZ_MASK; reg |= FIELD_PREP(PROTOCOL_REG_PKT_SZ_MASK, DATA_PKT_LOG_SZ - DATA_PKT_LOG_SZ_MIN); /* establish the new setup: */ ret = spi_internal_write(wilc, WILC_SPI_PROTOCOL_OFFSET, reg); if (ret) { dev_err(&spi->dev, "[wilc spi %d]: Failed internal write reg\n", __LINE__); return ret; } /* update our state to match new protocol settings: */ spi_priv->crc7_enabled = enable_crc7; spi_priv->crc16_enabled = enable_crc16; /* re-read to make sure new settings are in effect: */ spi_internal_read(wilc, WILC_SPI_PROTOCOL_OFFSET, &reg); spi_priv->probing_crc = false; /* * make sure can read chip id without protocol error */ ret = wilc_spi_read_reg(wilc, WILC_CHIPID, &chipid); if (ret) { dev_err(&spi->dev, "Fail cmd read chip id...\n"); return ret; } spi_priv->isinit = true; return 0; } static int wilc_spi_read_size(struct wilc *wilc, u32 *size) { int ret; ret = spi_internal_read(wilc, WILC_SPI_INT_STATUS - WILC_SPI_REG_BASE, size); *size = FIELD_GET(IRQ_DMA_WD_CNT_MASK, *size); return ret; } static int wilc_spi_read_int(struct wilc *wilc, u32 *int_status) { return spi_internal_read(wilc, WILC_SPI_INT_STATUS - WILC_SPI_REG_BASE, int_status); } static int wilc_spi_clear_int_ext(struct wilc *wilc, u32 val) { int ret; int retry = SPI_ENABLE_VMM_RETRY_LIMIT; u32 check; while (retry) { ret = spi_internal_write(wilc, WILC_SPI_INT_CLEAR - WILC_SPI_REG_BASE, val); if (ret) break; ret = spi_internal_read(wilc, WILC_SPI_INT_CLEAR - WILC_SPI_REG_BASE, &check); if (ret || ((check & EN_VMM) == (val & EN_VMM))) break; retry--; } return ret; } static int wilc_spi_sync_ext(struct wilc *wilc, int nint) { struct spi_device *spi = to_spi_device(wilc->dev); u32 reg; int ret, i; if (nint > MAX_NUM_INT) { dev_err(&spi->dev, "Too many interrupts (%d)...\n", nint); return -EINVAL; } /* * interrupt pin mux select */ ret = wilc_spi_read_reg(wilc, WILC_PIN_MUX_0, &reg); if (ret) { dev_err(&spi->dev, "Failed read reg (%08x)...\n", WILC_PIN_MUX_0); return ret; } reg |= BIT(8); ret = wilc_spi_write_reg(wilc, WILC_PIN_MUX_0, reg); if (ret) { dev_err(&spi->dev, "Failed write reg (%08x)...\n", WILC_PIN_MUX_0); return ret; } /* * interrupt enable */ ret = wilc_spi_read_reg(wilc, WILC_INTR_ENABLE, &reg); if (ret) { dev_err(&spi->dev, "Failed read reg (%08x)...\n", WILC_INTR_ENABLE); return ret; } for (i = 0; (i < 5) && (nint > 0); i++, nint--) reg |= (BIT((27 + i))); ret = wilc_spi_write_reg(wilc, WILC_INTR_ENABLE, reg); if (ret) { dev_err(&spi->dev, "Failed write reg (%08x)...\n", WILC_INTR_ENABLE); return ret; } if (nint) { ret = wilc_spi_read_reg(wilc, WILC_INTR2_ENABLE, &reg); if (ret) { dev_err(&spi->dev, "Failed read reg (%08x)...\n", WILC_INTR2_ENABLE); return ret; } for (i = 0; (i < 3) && (nint > 0); i++, nint--) reg |= BIT(i); ret = wilc_spi_write_reg(wilc, WILC_INTR2_ENABLE, reg); if (ret) { dev_err(&spi->dev, "Failed write reg (%08x)...\n", WILC_INTR2_ENABLE); return ret; } } return 0; } /* Global spi HIF function table */ static const struct wilc_hif_func wilc_hif_spi = { .hif_init = wilc_spi_init, .hif_deinit = wilc_spi_deinit, .hif_read_reg = wilc_spi_read_reg, .hif_write_reg = wilc_spi_write_reg, .hif_block_rx = wilc_spi_read, .hif_block_tx = wilc_spi_write, .hif_read_int = wilc_spi_read_int, .hif_clear_int_ext = wilc_spi_clear_int_ext, .hif_read_size = wilc_spi_read_size, .hif_block_tx_ext = wilc_spi_write, .hif_block_rx_ext = wilc_spi_read, .hif_sync_ext = wilc_spi_sync_ext, .hif_reset = wilc_spi_reset, .hif_is_init = wilc_spi_is_init, };
linux-master
drivers/net/wireless/microchip/wilc1000/spi.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2012 - 2018 Microchip Technology Inc., and its subsidiaries. * All rights reserved. */ #include "cfg80211.h" #define GO_NEG_REQ 0x00 #define GO_NEG_RSP 0x01 #define GO_NEG_CONF 0x02 #define P2P_INV_REQ 0x03 #define P2P_INV_RSP 0x04 #define WILC_INVALID_CHANNEL 0 /* Operation at 2.4 GHz with channels 1-13 */ #define WILC_WLAN_OPERATING_CLASS_2_4GHZ 0x51 static const struct ieee80211_txrx_stypes wilc_wfi_cfg80211_mgmt_types[NUM_NL80211_IFTYPES] = { [NL80211_IFTYPE_STATION] = { .tx = BIT(IEEE80211_STYPE_ACTION >> 4) | BIT(IEEE80211_STYPE_AUTH >> 4), .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | BIT(IEEE80211_STYPE_PROBE_REQ >> 4) | BIT(IEEE80211_STYPE_AUTH >> 4) }, [NL80211_IFTYPE_AP] = { .tx = 0xffff, .rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) | BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) | BIT(IEEE80211_STYPE_PROBE_REQ >> 4) | BIT(IEEE80211_STYPE_DISASSOC >> 4) | BIT(IEEE80211_STYPE_AUTH >> 4) | BIT(IEEE80211_STYPE_DEAUTH >> 4) | BIT(IEEE80211_STYPE_ACTION >> 4) }, [NL80211_IFTYPE_P2P_CLIENT] = { .tx = 0xffff, .rx = BIT(IEEE80211_STYPE_ACTION >> 4) | BIT(IEEE80211_STYPE_PROBE_REQ >> 4) | BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) | BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) | BIT(IEEE80211_STYPE_DISASSOC >> 4) | BIT(IEEE80211_STYPE_AUTH >> 4) | BIT(IEEE80211_STYPE_DEAUTH >> 4) } }; #ifdef CONFIG_PM static const struct wiphy_wowlan_support wowlan_support = { .flags = WIPHY_WOWLAN_ANY }; #endif struct wilc_p2p_mgmt_data { int size; u8 *buff; }; struct wilc_p2p_pub_act_frame { u8 category; u8 action; u8 oui[3]; u8 oui_type; u8 oui_subtype; u8 dialog_token; u8 elem[]; } __packed; struct wilc_vendor_specific_ie { u8 tag_number; u8 tag_len; u8 oui[3]; u8 oui_type; u8 attr[]; } __packed; struct wilc_attr_entry { u8 attr_type; __le16 attr_len; u8 val[]; } __packed; struct wilc_attr_oper_ch { u8 attr_type; __le16 attr_len; u8 country_code[IEEE80211_COUNTRY_STRING_LEN]; u8 op_class; u8 op_channel; } __packed; struct wilc_attr_ch_list { u8 attr_type; __le16 attr_len; u8 country_code[IEEE80211_COUNTRY_STRING_LEN]; u8 elem[]; } __packed; struct wilc_ch_list_elem { u8 op_class; u8 no_of_channels; u8 ch_list[]; } __packed; static void cfg_scan_result(enum scan_event scan_event, struct wilc_rcvd_net_info *info, void *user_void) { struct wilc_priv *priv = user_void; if (!priv->cfg_scanning) return; if (scan_event == SCAN_EVENT_NETWORK_FOUND) { s32 freq; struct ieee80211_channel *channel; struct cfg80211_bss *bss; struct wiphy *wiphy = priv->dev->ieee80211_ptr->wiphy; if (!wiphy || !info) return; freq = ieee80211_channel_to_frequency((s32)info->ch, NL80211_BAND_2GHZ); channel = ieee80211_get_channel(wiphy, freq); if (!channel) return; bss = cfg80211_inform_bss_frame(wiphy, channel, info->mgmt, info->frame_len, (s32)info->rssi * 100, GFP_KERNEL); cfg80211_put_bss(wiphy, bss); } else if (scan_event == SCAN_EVENT_DONE) { mutex_lock(&priv->scan_req_lock); if (priv->scan_req) { struct cfg80211_scan_info info = { .aborted = false, }; cfg80211_scan_done(priv->scan_req, &info); priv->cfg_scanning = false; priv->scan_req = NULL; } mutex_unlock(&priv->scan_req_lock); } else if (scan_event == SCAN_EVENT_ABORTED) { mutex_lock(&priv->scan_req_lock); if (priv->scan_req) { struct cfg80211_scan_info info = { .aborted = false, }; cfg80211_scan_done(priv->scan_req, &info); priv->cfg_scanning = false; priv->scan_req = NULL; } mutex_unlock(&priv->scan_req_lock); } } static void cfg_connect_result(enum conn_event conn_disconn_evt, u8 mac_status, void *priv_data) { struct wilc_priv *priv = priv_data; struct net_device *dev = priv->dev; struct wilc_vif *vif = netdev_priv(dev); struct wilc *wl = vif->wilc; struct host_if_drv *wfi_drv = priv->hif_drv; struct wilc_conn_info *conn_info = &wfi_drv->conn_info; struct wiphy *wiphy = dev->ieee80211_ptr->wiphy; vif->connecting = false; if (conn_disconn_evt == CONN_DISCONN_EVENT_CONN_RESP) { u16 connect_status = conn_info->status; if (mac_status == WILC_MAC_STATUS_DISCONNECTED && connect_status == WLAN_STATUS_SUCCESS) { connect_status = WLAN_STATUS_UNSPECIFIED_FAILURE; wilc_wlan_set_bssid(priv->dev, NULL, WILC_STATION_MODE); if (vif->iftype != WILC_CLIENT_MODE) wl->sta_ch = WILC_INVALID_CHANNEL; netdev_err(dev, "Unspecified failure\n"); } if (connect_status == WLAN_STATUS_SUCCESS) memcpy(priv->associated_bss, conn_info->bssid, ETH_ALEN); cfg80211_ref_bss(wiphy, vif->bss); cfg80211_connect_bss(dev, conn_info->bssid, vif->bss, conn_info->req_ies, conn_info->req_ies_len, conn_info->resp_ies, conn_info->resp_ies_len, connect_status, GFP_KERNEL, NL80211_TIMEOUT_UNSPECIFIED); vif->bss = NULL; } else if (conn_disconn_evt == CONN_DISCONN_EVENT_DISCONN_NOTIF) { u16 reason = 0; eth_zero_addr(priv->associated_bss); wilc_wlan_set_bssid(priv->dev, NULL, WILC_STATION_MODE); if (vif->iftype != WILC_CLIENT_MODE) { wl->sta_ch = WILC_INVALID_CHANNEL; } else { if (wfi_drv->ifc_up) reason = 3; else reason = 1; } cfg80211_disconnected(dev, reason, NULL, 0, false, GFP_KERNEL); } } struct wilc_vif *wilc_get_wl_to_vif(struct wilc *wl) { struct wilc_vif *vif; vif = list_first_or_null_rcu(&wl->vif_list, typeof(*vif), list); if (!vif) return ERR_PTR(-EINVAL); return vif; } static int set_channel(struct wiphy *wiphy, struct cfg80211_chan_def *chandef) { struct wilc *wl = wiphy_priv(wiphy); struct wilc_vif *vif; u32 channelnum; int result; int srcu_idx; srcu_idx = srcu_read_lock(&wl->srcu); vif = wilc_get_wl_to_vif(wl); if (IS_ERR(vif)) { srcu_read_unlock(&wl->srcu, srcu_idx); return PTR_ERR(vif); } channelnum = ieee80211_frequency_to_channel(chandef->chan->center_freq); wl->op_ch = channelnum; result = wilc_set_mac_chnl_num(vif, channelnum); if (result) netdev_err(vif->ndev, "Error in setting channel\n"); srcu_read_unlock(&wl->srcu, srcu_idx); return result; } static int scan(struct wiphy *wiphy, struct cfg80211_scan_request *request) { struct wilc_vif *vif = netdev_priv(request->wdev->netdev); struct wilc_priv *priv = &vif->priv; u32 i; int ret = 0; u8 scan_ch_list[WILC_MAX_NUM_SCANNED_CH]; u8 scan_type; if (request->n_channels > WILC_MAX_NUM_SCANNED_CH) { netdev_err(vif->ndev, "Requested scanned channels over\n"); return -EINVAL; } priv->scan_req = request; priv->cfg_scanning = true; for (i = 0; i < request->n_channels; i++) { u16 freq = request->channels[i]->center_freq; scan_ch_list[i] = ieee80211_frequency_to_channel(freq); } if (request->n_ssids) scan_type = WILC_FW_ACTIVE_SCAN; else scan_type = WILC_FW_PASSIVE_SCAN; ret = wilc_scan(vif, WILC_FW_USER_SCAN, scan_type, scan_ch_list, request->n_channels, cfg_scan_result, (void *)priv, request); if (ret) { priv->scan_req = NULL; priv->cfg_scanning = false; } return ret; } static int connect(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_connect_params *sme) { struct wilc_vif *vif = netdev_priv(dev); struct wilc_priv *priv = &vif->priv; struct host_if_drv *wfi_drv = priv->hif_drv; int ret; u32 i; u8 security = WILC_FW_SEC_NO; enum mfptype mfp_type = WILC_FW_MFP_NONE; enum authtype auth_type = WILC_FW_AUTH_ANY; u32 cipher_group; struct cfg80211_bss *bss; void *join_params; u8 ch; vif->connecting = true; cipher_group = sme->crypto.cipher_group; if (cipher_group != 0) { if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_2) { if (cipher_group == WLAN_CIPHER_SUITE_TKIP) security = WILC_FW_SEC_WPA2_TKIP; else security = WILC_FW_SEC_WPA2_AES; } else if (sme->crypto.wpa_versions & NL80211_WPA_VERSION_1) { if (cipher_group == WLAN_CIPHER_SUITE_TKIP) security = WILC_FW_SEC_WPA_TKIP; else security = WILC_FW_SEC_WPA_AES; } else { ret = -ENOTSUPP; netdev_err(dev, "%s: Unsupported cipher\n", __func__); goto out_error; } } if ((sme->crypto.wpa_versions & NL80211_WPA_VERSION_1) || (sme->crypto.wpa_versions & NL80211_WPA_VERSION_2)) { for (i = 0; i < sme->crypto.n_ciphers_pairwise; i++) { u32 ciphers_pairwise = sme->crypto.ciphers_pairwise[i]; if (ciphers_pairwise == WLAN_CIPHER_SUITE_TKIP) security |= WILC_FW_TKIP; else security |= WILC_FW_AES; } } switch (sme->auth_type) { case NL80211_AUTHTYPE_OPEN_SYSTEM: auth_type = WILC_FW_AUTH_OPEN_SYSTEM; break; case NL80211_AUTHTYPE_SAE: auth_type = WILC_FW_AUTH_SAE; if (sme->ssid_len) { memcpy(vif->auth.ssid.ssid, sme->ssid, sme->ssid_len); vif->auth.ssid.ssid_len = sme->ssid_len; } vif->auth.key_mgmt_suite = cpu_to_be32(sme->crypto.akm_suites[0]); ether_addr_copy(vif->auth.bssid, sme->bssid); break; default: break; } if (sme->crypto.n_akm_suites) { if (sme->crypto.akm_suites[0] == WLAN_AKM_SUITE_8021X) auth_type = WILC_FW_AUTH_IEEE8021; else if (sme->crypto.akm_suites[0] == WLAN_AKM_SUITE_PSK_SHA256) auth_type = WILC_FW_AUTH_OPEN_SYSTEM_SHA256; else if (sme->crypto.akm_suites[0] == WLAN_AKM_SUITE_8021X_SHA256) auth_type = WILC_FW_AUTH_IEE8021X_SHA256; } if (wfi_drv->usr_scan_req.scan_result) { netdev_err(vif->ndev, "%s: Scan in progress\n", __func__); ret = -EBUSY; goto out_error; } bss = cfg80211_get_bss(wiphy, sme->channel, sme->bssid, sme->ssid, sme->ssid_len, IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY(sme->privacy)); if (!bss) { ret = -EINVAL; goto out_error; } if (ether_addr_equal_unaligned(vif->bssid, bss->bssid)) { ret = -EALREADY; goto out_put_bss; } join_params = wilc_parse_join_bss_param(bss, &sme->crypto); if (!join_params) { netdev_err(dev, "%s: failed to construct join param\n", __func__); ret = -EINVAL; goto out_put_bss; } ch = ieee80211_frequency_to_channel(bss->channel->center_freq); vif->wilc->op_ch = ch; if (vif->iftype != WILC_CLIENT_MODE) vif->wilc->sta_ch = ch; wilc_wlan_set_bssid(dev, bss->bssid, WILC_STATION_MODE); wfi_drv->conn_info.security = security; wfi_drv->conn_info.auth_type = auth_type; wfi_drv->conn_info.ch = ch; wfi_drv->conn_info.conn_result = cfg_connect_result; wfi_drv->conn_info.arg = priv; wfi_drv->conn_info.param = join_params; if (sme->mfp == NL80211_MFP_OPTIONAL) mfp_type = WILC_FW_MFP_OPTIONAL; else if (sme->mfp == NL80211_MFP_REQUIRED) mfp_type = WILC_FW_MFP_REQUIRED; wfi_drv->conn_info.mfp_type = mfp_type; ret = wilc_set_join_req(vif, bss->bssid, sme->ie, sme->ie_len); if (ret) { netdev_err(dev, "wilc_set_join_req(): Error\n"); ret = -ENOENT; if (vif->iftype != WILC_CLIENT_MODE) vif->wilc->sta_ch = WILC_INVALID_CHANNEL; wilc_wlan_set_bssid(dev, NULL, WILC_STATION_MODE); wfi_drv->conn_info.conn_result = NULL; kfree(join_params); goto out_put_bss; } kfree(join_params); vif->bss = bss; cfg80211_put_bss(wiphy, bss); return 0; out_put_bss: cfg80211_put_bss(wiphy, bss); out_error: vif->connecting = false; return ret; } static int disconnect(struct wiphy *wiphy, struct net_device *dev, u16 reason_code) { struct wilc_vif *vif = netdev_priv(dev); struct wilc_priv *priv = &vif->priv; struct wilc *wilc = vif->wilc; int ret; vif->connecting = false; if (!wilc) return -EIO; if (wilc->close) { /* already disconnected done */ cfg80211_disconnected(dev, 0, NULL, 0, true, GFP_KERNEL); return 0; } if (vif->iftype != WILC_CLIENT_MODE) wilc->sta_ch = WILC_INVALID_CHANNEL; wilc_wlan_set_bssid(priv->dev, NULL, WILC_STATION_MODE); priv->hif_drv->p2p_timeout = 0; ret = wilc_disconnect(vif); if (ret != 0) { netdev_err(priv->dev, "Error in disconnecting\n"); ret = -EINVAL; } vif->bss = NULL; return ret; } static int wilc_wfi_cfg_allocate_wpa_entry(struct wilc_priv *priv, u8 idx) { if (!priv->wilc_gtk[idx]) { priv->wilc_gtk[idx] = kzalloc(sizeof(*priv->wilc_gtk[idx]), GFP_KERNEL); if (!priv->wilc_gtk[idx]) return -ENOMEM; } if (!priv->wilc_ptk[idx]) { priv->wilc_ptk[idx] = kzalloc(sizeof(*priv->wilc_ptk[idx]), GFP_KERNEL); if (!priv->wilc_ptk[idx]) return -ENOMEM; } return 0; } static int wilc_wfi_cfg_allocate_wpa_igtk_entry(struct wilc_priv *priv, u8 idx) { idx -= 4; if (!priv->wilc_igtk[idx]) { priv->wilc_igtk[idx] = kzalloc(sizeof(*priv->wilc_igtk[idx]), GFP_KERNEL); if (!priv->wilc_igtk[idx]) return -ENOMEM; } return 0; } static int wilc_wfi_cfg_copy_wpa_info(struct wilc_wfi_key *key_info, struct key_params *params) { kfree(key_info->key); key_info->key = kmemdup(params->key, params->key_len, GFP_KERNEL); if (!key_info->key) return -ENOMEM; kfree(key_info->seq); if (params->seq_len > 0) { key_info->seq = kmemdup(params->seq, params->seq_len, GFP_KERNEL); if (!key_info->seq) return -ENOMEM; } key_info->cipher = params->cipher; key_info->key_len = params->key_len; key_info->seq_len = params->seq_len; return 0; } static int add_key(struct wiphy *wiphy, struct net_device *netdev, int link_id, u8 key_index, bool pairwise, const u8 *mac_addr, struct key_params *params) { int ret = 0, keylen = params->key_len; const u8 *rx_mic = NULL; const u8 *tx_mic = NULL; u8 mode = WILC_FW_SEC_NO; u8 op_mode; struct wilc_vif *vif = netdev_priv(netdev); struct wilc_priv *priv = &vif->priv; struct wilc_wfi_key *key; switch (params->cipher) { case WLAN_CIPHER_SUITE_TKIP: case WLAN_CIPHER_SUITE_CCMP: if (priv->wdev.iftype == NL80211_IFTYPE_AP || priv->wdev.iftype == NL80211_IFTYPE_P2P_GO) { struct wilc_wfi_key *key; ret = wilc_wfi_cfg_allocate_wpa_entry(priv, key_index); if (ret) return -ENOMEM; if (params->key_len > 16 && params->cipher == WLAN_CIPHER_SUITE_TKIP) { tx_mic = params->key + 24; rx_mic = params->key + 16; keylen = params->key_len - 16; } if (!pairwise) { if (params->cipher == WLAN_CIPHER_SUITE_TKIP) mode = WILC_FW_SEC_WPA_TKIP; else mode = WILC_FW_SEC_WPA2_AES; priv->wilc_groupkey = mode; key = priv->wilc_gtk[key_index]; } else { if (params->cipher == WLAN_CIPHER_SUITE_TKIP) mode = WILC_FW_SEC_WPA_TKIP; else mode = priv->wilc_groupkey | WILC_FW_AES; key = priv->wilc_ptk[key_index]; } ret = wilc_wfi_cfg_copy_wpa_info(key, params); if (ret) return -ENOMEM; op_mode = WILC_AP_MODE; } else { if (params->key_len > 16 && params->cipher == WLAN_CIPHER_SUITE_TKIP) { rx_mic = params->key + 24; tx_mic = params->key + 16; keylen = params->key_len - 16; } op_mode = WILC_STATION_MODE; } if (!pairwise) ret = wilc_add_rx_gtk(vif, params->key, keylen, key_index, params->seq_len, params->seq, rx_mic, tx_mic, op_mode, mode); else ret = wilc_add_ptk(vif, params->key, keylen, mac_addr, rx_mic, tx_mic, op_mode, mode, key_index); break; case WLAN_CIPHER_SUITE_AES_CMAC: ret = wilc_wfi_cfg_allocate_wpa_igtk_entry(priv, key_index); if (ret) return -ENOMEM; key = priv->wilc_igtk[key_index - 4]; ret = wilc_wfi_cfg_copy_wpa_info(key, params); if (ret) return -ENOMEM; if (priv->wdev.iftype == NL80211_IFTYPE_AP || priv->wdev.iftype == NL80211_IFTYPE_P2P_GO) op_mode = WILC_AP_MODE; else op_mode = WILC_STATION_MODE; ret = wilc_add_igtk(vif, params->key, keylen, params->seq, params->seq_len, mac_addr, op_mode, key_index); break; default: netdev_err(netdev, "%s: Unsupported cipher\n", __func__); ret = -ENOTSUPP; } return ret; } static int del_key(struct wiphy *wiphy, struct net_device *netdev, int link_id, u8 key_index, bool pairwise, const u8 *mac_addr) { struct wilc_vif *vif = netdev_priv(netdev); struct wilc_priv *priv = &vif->priv; if (!pairwise && (key_index == 4 || key_index == 5)) { key_index -= 4; if (priv->wilc_igtk[key_index]) { kfree(priv->wilc_igtk[key_index]->key); priv->wilc_igtk[key_index]->key = NULL; kfree(priv->wilc_igtk[key_index]->seq); priv->wilc_igtk[key_index]->seq = NULL; kfree(priv->wilc_igtk[key_index]); priv->wilc_igtk[key_index] = NULL; } } else { if (priv->wilc_gtk[key_index]) { kfree(priv->wilc_gtk[key_index]->key); priv->wilc_gtk[key_index]->key = NULL; kfree(priv->wilc_gtk[key_index]->seq); priv->wilc_gtk[key_index]->seq = NULL; kfree(priv->wilc_gtk[key_index]); priv->wilc_gtk[key_index] = NULL; } if (priv->wilc_ptk[key_index]) { kfree(priv->wilc_ptk[key_index]->key); priv->wilc_ptk[key_index]->key = NULL; kfree(priv->wilc_ptk[key_index]->seq); priv->wilc_ptk[key_index]->seq = NULL; kfree(priv->wilc_ptk[key_index]); priv->wilc_ptk[key_index] = NULL; } } return 0; } static int get_key(struct wiphy *wiphy, struct net_device *netdev, int link_id, u8 key_index, bool pairwise, const u8 *mac_addr, void *cookie, void (*callback)(void *cookie, struct key_params *)) { struct wilc_vif *vif = netdev_priv(netdev); struct wilc_priv *priv = &vif->priv; struct key_params key_params; if (!pairwise) { if (key_index == 4 || key_index == 5) { key_index -= 4; key_params.key = priv->wilc_igtk[key_index]->key; key_params.cipher = priv->wilc_igtk[key_index]->cipher; key_params.key_len = priv->wilc_igtk[key_index]->key_len; key_params.seq = priv->wilc_igtk[key_index]->seq; key_params.seq_len = priv->wilc_igtk[key_index]->seq_len; } else { key_params.key = priv->wilc_gtk[key_index]->key; key_params.cipher = priv->wilc_gtk[key_index]->cipher; key_params.key_len = priv->wilc_gtk[key_index]->key_len; key_params.seq = priv->wilc_gtk[key_index]->seq; key_params.seq_len = priv->wilc_gtk[key_index]->seq_len; } } else { key_params.key = priv->wilc_ptk[key_index]->key; key_params.cipher = priv->wilc_ptk[key_index]->cipher; key_params.key_len = priv->wilc_ptk[key_index]->key_len; key_params.seq = priv->wilc_ptk[key_index]->seq; key_params.seq_len = priv->wilc_ptk[key_index]->seq_len; } callback(cookie, &key_params); return 0; } /* wiphy_new_nm() will WARNON if not present */ static int set_default_key(struct wiphy *wiphy, struct net_device *netdev, int link_id, u8 key_index, bool unicast, bool multicast) { return 0; } static int set_default_mgmt_key(struct wiphy *wiphy, struct net_device *netdev, int link_id, u8 key_index) { struct wilc_vif *vif = netdev_priv(netdev); return wilc_set_default_mgmt_key_index(vif, key_index); } static int get_station(struct wiphy *wiphy, struct net_device *dev, const u8 *mac, struct station_info *sinfo) { struct wilc_vif *vif = netdev_priv(dev); struct wilc_priv *priv = &vif->priv; struct wilc *wilc = vif->wilc; u32 i = 0; u32 associatedsta = ~0; u32 inactive_time = 0; if (vif->iftype == WILC_AP_MODE || vif->iftype == WILC_GO_MODE) { for (i = 0; i < NUM_STA_ASSOCIATED; i++) { if (!(memcmp(mac, priv->assoc_stainfo.sta_associated_bss[i], ETH_ALEN))) { associatedsta = i; break; } } if (associatedsta == ~0) { netdev_err(dev, "sta required is not associated\n"); return -ENOENT; } sinfo->filled |= BIT_ULL(NL80211_STA_INFO_INACTIVE_TIME); wilc_get_inactive_time(vif, mac, &inactive_time); sinfo->inactive_time = 1000 * inactive_time; } else if (vif->iftype == WILC_STATION_MODE) { struct rf_info stats; if (!wilc->initialized) return -EBUSY; wilc_get_statistics(vif, &stats); sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL) | BIT_ULL(NL80211_STA_INFO_RX_PACKETS) | BIT_ULL(NL80211_STA_INFO_TX_PACKETS) | BIT_ULL(NL80211_STA_INFO_TX_FAILED) | BIT_ULL(NL80211_STA_INFO_TX_BITRATE); sinfo->signal = stats.rssi; sinfo->rx_packets = stats.rx_cnt; sinfo->tx_packets = stats.tx_cnt + stats.tx_fail_cnt; sinfo->tx_failed = stats.tx_fail_cnt; sinfo->txrate.legacy = stats.link_speed * 10; if (stats.link_speed > TCP_ACK_FILTER_LINK_SPEED_THRESH && stats.link_speed != DEFAULT_LINK_SPEED) wilc_enable_tcp_ack_filter(vif, true); else if (stats.link_speed != DEFAULT_LINK_SPEED) wilc_enable_tcp_ack_filter(vif, false); } return 0; } static int change_bss(struct wiphy *wiphy, struct net_device *dev, struct bss_parameters *params) { return 0; } static int set_wiphy_params(struct wiphy *wiphy, u32 changed) { int ret = -EINVAL; struct cfg_param_attr cfg_param_val; struct wilc *wl = wiphy_priv(wiphy); struct wilc_vif *vif; struct wilc_priv *priv; int srcu_idx; srcu_idx = srcu_read_lock(&wl->srcu); vif = wilc_get_wl_to_vif(wl); if (IS_ERR(vif)) goto out; priv = &vif->priv; cfg_param_val.flag = 0; if (changed & WIPHY_PARAM_RETRY_SHORT) { netdev_dbg(vif->ndev, "Setting WIPHY_PARAM_RETRY_SHORT %d\n", wiphy->retry_short); cfg_param_val.flag |= WILC_CFG_PARAM_RETRY_SHORT; cfg_param_val.short_retry_limit = wiphy->retry_short; } if (changed & WIPHY_PARAM_RETRY_LONG) { netdev_dbg(vif->ndev, "Setting WIPHY_PARAM_RETRY_LONG %d\n", wiphy->retry_long); cfg_param_val.flag |= WILC_CFG_PARAM_RETRY_LONG; cfg_param_val.long_retry_limit = wiphy->retry_long; } if (changed & WIPHY_PARAM_FRAG_THRESHOLD) { if (wiphy->frag_threshold > 255 && wiphy->frag_threshold < 7937) { netdev_dbg(vif->ndev, "Setting WIPHY_PARAM_FRAG_THRESHOLD %d\n", wiphy->frag_threshold); cfg_param_val.flag |= WILC_CFG_PARAM_FRAG_THRESHOLD; cfg_param_val.frag_threshold = wiphy->frag_threshold; } else { netdev_err(vif->ndev, "Fragmentation threshold out of range\n"); goto out; } } if (changed & WIPHY_PARAM_RTS_THRESHOLD) { if (wiphy->rts_threshold > 255) { netdev_dbg(vif->ndev, "Setting WIPHY_PARAM_RTS_THRESHOLD %d\n", wiphy->rts_threshold); cfg_param_val.flag |= WILC_CFG_PARAM_RTS_THRESHOLD; cfg_param_val.rts_threshold = wiphy->rts_threshold; } else { netdev_err(vif->ndev, "RTS threshold out of range\n"); goto out; } } ret = wilc_hif_set_cfg(vif, &cfg_param_val); if (ret) netdev_err(priv->dev, "Error in setting WIPHY PARAMS\n"); out: srcu_read_unlock(&wl->srcu, srcu_idx); return ret; } static int set_pmksa(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_pmksa *pmksa) { struct wilc_vif *vif = netdev_priv(netdev); struct wilc_priv *priv = &vif->priv; u32 i; int ret = 0; u8 flag = 0; for (i = 0; i < priv->pmkid_list.numpmkid; i++) { if (!memcmp(pmksa->bssid, priv->pmkid_list.pmkidlist[i].bssid, ETH_ALEN)) { flag = PMKID_FOUND; break; } } if (i < WILC_MAX_NUM_PMKIDS) { memcpy(priv->pmkid_list.pmkidlist[i].bssid, pmksa->bssid, ETH_ALEN); memcpy(priv->pmkid_list.pmkidlist[i].pmkid, pmksa->pmkid, WLAN_PMKID_LEN); if (!(flag == PMKID_FOUND)) priv->pmkid_list.numpmkid++; } else { netdev_err(netdev, "Invalid PMKID index\n"); ret = -EINVAL; } if (!ret) ret = wilc_set_pmkid_info(vif, &priv->pmkid_list); return ret; } static int del_pmksa(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_pmksa *pmksa) { u32 i; struct wilc_vif *vif = netdev_priv(netdev); struct wilc_priv *priv = &vif->priv; for (i = 0; i < priv->pmkid_list.numpmkid; i++) { if (!memcmp(pmksa->bssid, priv->pmkid_list.pmkidlist[i].bssid, ETH_ALEN)) { memset(&priv->pmkid_list.pmkidlist[i], 0, sizeof(struct wilc_pmkid)); break; } } if (i == priv->pmkid_list.numpmkid) return -EINVAL; for (; i < (priv->pmkid_list.numpmkid - 1); i++) { memcpy(priv->pmkid_list.pmkidlist[i].bssid, priv->pmkid_list.pmkidlist[i + 1].bssid, ETH_ALEN); memcpy(priv->pmkid_list.pmkidlist[i].pmkid, priv->pmkid_list.pmkidlist[i + 1].pmkid, WLAN_PMKID_LEN); } priv->pmkid_list.numpmkid--; return 0; } static int flush_pmksa(struct wiphy *wiphy, struct net_device *netdev) { struct wilc_vif *vif = netdev_priv(netdev); memset(&vif->priv.pmkid_list, 0, sizeof(struct wilc_pmkid_attr)); return 0; } static inline void wilc_wfi_cfg_parse_ch_attr(u8 *buf, u32 len, u8 sta_ch) { struct wilc_attr_entry *e; struct wilc_attr_ch_list *ch_list; struct wilc_attr_oper_ch *op_ch; u32 index = 0; u8 ch_list_idx = 0; u8 op_ch_idx = 0; if (sta_ch == WILC_INVALID_CHANNEL) return; while (index + sizeof(*e) <= len) { u16 attr_size; e = (struct wilc_attr_entry *)&buf[index]; attr_size = le16_to_cpu(e->attr_len); if (index + sizeof(*e) + attr_size > len) return; if (e->attr_type == IEEE80211_P2P_ATTR_CHANNEL_LIST && attr_size >= (sizeof(struct wilc_attr_ch_list) - sizeof(*e))) ch_list_idx = index; else if (e->attr_type == IEEE80211_P2P_ATTR_OPER_CHANNEL && attr_size == (sizeof(struct wilc_attr_oper_ch) - sizeof(*e))) op_ch_idx = index; if (ch_list_idx && op_ch_idx) break; index += sizeof(*e) + attr_size; } if (ch_list_idx) { u16 elem_size; ch_list = (struct wilc_attr_ch_list *)&buf[ch_list_idx]; /* the number of bytes following the final 'elem' member */ elem_size = le16_to_cpu(ch_list->attr_len) - (sizeof(*ch_list) - sizeof(struct wilc_attr_entry)); for (unsigned int i = 0; i < elem_size;) { struct wilc_ch_list_elem *e; e = (struct wilc_ch_list_elem *)(ch_list->elem + i); i += sizeof(*e); if (i > elem_size) break; i += e->no_of_channels; if (i > elem_size) break; if (e->op_class == WILC_WLAN_OPERATING_CLASS_2_4GHZ) { memset(e->ch_list, sta_ch, e->no_of_channels); break; } } } if (op_ch_idx) { op_ch = (struct wilc_attr_oper_ch *)&buf[op_ch_idx]; op_ch->op_class = WILC_WLAN_OPERATING_CLASS_2_4GHZ; op_ch->op_channel = sta_ch; } } bool wilc_wfi_mgmt_frame_rx(struct wilc_vif *vif, u8 *buff, u32 size) { struct wilc *wl = vif->wilc; struct wilc_priv *priv = &vif->priv; int freq; freq = ieee80211_channel_to_frequency(wl->op_ch, NL80211_BAND_2GHZ); return cfg80211_rx_mgmt(&priv->wdev, freq, 0, buff, size, 0); } void wilc_wfi_p2p_rx(struct wilc_vif *vif, u8 *buff, u32 size) { struct wilc *wl = vif->wilc; struct wilc_priv *priv = &vif->priv; struct host_if_drv *wfi_drv = priv->hif_drv; struct ieee80211_mgmt *mgmt; struct wilc_vendor_specific_ie *p; struct wilc_p2p_pub_act_frame *d; int ie_offset = offsetof(struct ieee80211_mgmt, u) + sizeof(*d); const u8 *vendor_ie; u32 header, pkt_offset; s32 freq; header = get_unaligned_le32(buff - HOST_HDR_OFFSET); pkt_offset = FIELD_GET(WILC_PKT_HDR_OFFSET_FIELD, header); if (pkt_offset & IS_MANAGMEMENT_CALLBACK) { bool ack = false; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)buff; if (ieee80211_is_probe_resp(hdr->frame_control) || pkt_offset & IS_MGMT_STATUS_SUCCES) ack = true; cfg80211_mgmt_tx_status(&priv->wdev, priv->tx_cookie, buff, size, ack, GFP_KERNEL); return; } freq = ieee80211_channel_to_frequency(wl->op_ch, NL80211_BAND_2GHZ); mgmt = (struct ieee80211_mgmt *)buff; if (!ieee80211_is_action(mgmt->frame_control)) goto out_rx_mgmt; if (priv->cfg_scanning && time_after_eq(jiffies, (unsigned long)wfi_drv->p2p_timeout)) { netdev_dbg(vif->ndev, "Receiving action wrong ch\n"); return; } if (!ieee80211_is_public_action((struct ieee80211_hdr *)buff, size)) goto out_rx_mgmt; d = (struct wilc_p2p_pub_act_frame *)(&mgmt->u.action); if (d->oui_subtype != GO_NEG_REQ && d->oui_subtype != GO_NEG_RSP && d->oui_subtype != P2P_INV_REQ && d->oui_subtype != P2P_INV_RSP) goto out_rx_mgmt; vendor_ie = cfg80211_find_vendor_ie(WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P, buff + ie_offset, size - ie_offset); if (!vendor_ie) goto out_rx_mgmt; p = (struct wilc_vendor_specific_ie *)vendor_ie; wilc_wfi_cfg_parse_ch_attr(p->attr, p->tag_len - 4, vif->wilc->sta_ch); out_rx_mgmt: cfg80211_rx_mgmt(&priv->wdev, freq, 0, buff, size, 0); } static void wilc_wfi_mgmt_tx_complete(void *priv, int status) { struct wilc_p2p_mgmt_data *pv_data = priv; kfree(pv_data->buff); kfree(pv_data); } static void wilc_wfi_remain_on_channel_expired(void *data, u64 cookie) { struct wilc_vif *vif = data; struct wilc_priv *priv = &vif->priv; struct wilc_wfi_p2p_listen_params *params = &priv->remain_on_ch_params; if (cookie != params->listen_cookie) return; priv->p2p_listen_state = false; cfg80211_remain_on_channel_expired(&priv->wdev, params->listen_cookie, params->listen_ch, GFP_KERNEL); } static int remain_on_channel(struct wiphy *wiphy, struct wireless_dev *wdev, struct ieee80211_channel *chan, unsigned int duration, u64 *cookie) { int ret = 0; struct wilc_vif *vif = netdev_priv(wdev->netdev); struct wilc_priv *priv = &vif->priv; u64 id; if (wdev->iftype == NL80211_IFTYPE_AP) { netdev_dbg(vif->ndev, "Required while in AP mode\n"); return ret; } id = ++priv->inc_roc_cookie; if (id == 0) id = ++priv->inc_roc_cookie; ret = wilc_remain_on_channel(vif, id, duration, chan->hw_value, wilc_wfi_remain_on_channel_expired, (void *)vif); if (ret) return ret; vif->wilc->op_ch = chan->hw_value; priv->remain_on_ch_params.listen_ch = chan; priv->remain_on_ch_params.listen_cookie = id; *cookie = id; priv->p2p_listen_state = true; priv->remain_on_ch_params.listen_duration = duration; cfg80211_ready_on_channel(wdev, *cookie, chan, duration, GFP_KERNEL); mod_timer(&vif->hif_drv->remain_on_ch_timer, jiffies + msecs_to_jiffies(duration + 1000)); return ret; } static int cancel_remain_on_channel(struct wiphy *wiphy, struct wireless_dev *wdev, u64 cookie) { struct wilc_vif *vif = netdev_priv(wdev->netdev); struct wilc_priv *priv = &vif->priv; if (cookie != priv->remain_on_ch_params.listen_cookie) return -ENOENT; return wilc_listen_state_expired(vif, cookie); } static int mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev, struct cfg80211_mgmt_tx_params *params, u64 *cookie) { struct ieee80211_channel *chan = params->chan; unsigned int wait = params->wait; const u8 *buf = params->buf; size_t len = params->len; const struct ieee80211_mgmt *mgmt; struct wilc_p2p_mgmt_data *mgmt_tx; struct wilc_vif *vif = netdev_priv(wdev->netdev); struct wilc_priv *priv = &vif->priv; struct host_if_drv *wfi_drv = priv->hif_drv; struct wilc_vendor_specific_ie *p; struct wilc_p2p_pub_act_frame *d; int ie_offset = offsetof(struct ieee80211_mgmt, u) + sizeof(*d); const u8 *vendor_ie; int ret = 0; *cookie = get_random_u32(); priv->tx_cookie = *cookie; mgmt = (const struct ieee80211_mgmt *)buf; if (!ieee80211_is_mgmt(mgmt->frame_control)) goto out; mgmt_tx = kmalloc(sizeof(*mgmt_tx), GFP_KERNEL); if (!mgmt_tx) { ret = -ENOMEM; goto out; } mgmt_tx->buff = kmemdup(buf, len, GFP_KERNEL); if (!mgmt_tx->buff) { ret = -ENOMEM; kfree(mgmt_tx); goto out; } mgmt_tx->size = len; if (ieee80211_is_probe_resp(mgmt->frame_control)) { wilc_set_mac_chnl_num(vif, chan->hw_value); vif->wilc->op_ch = chan->hw_value; goto out_txq_add_pkt; } if (!ieee80211_is_public_action((struct ieee80211_hdr *)buf, len)) { if (chan) wilc_set_mac_chnl_num(vif, chan->hw_value); else wilc_set_mac_chnl_num(vif, vif->wilc->op_ch); goto out_set_timeout; } d = (struct wilc_p2p_pub_act_frame *)(&mgmt->u.action); if (d->oui_type != WLAN_OUI_TYPE_WFA_P2P || d->oui_subtype != GO_NEG_CONF) { wilc_set_mac_chnl_num(vif, chan->hw_value); vif->wilc->op_ch = chan->hw_value; } if (d->oui_subtype != P2P_INV_REQ && d->oui_subtype != P2P_INV_RSP) goto out_set_timeout; vendor_ie = cfg80211_find_vendor_ie(WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P, mgmt_tx->buff + ie_offset, len - ie_offset); if (!vendor_ie) goto out_set_timeout; p = (struct wilc_vendor_specific_ie *)vendor_ie; wilc_wfi_cfg_parse_ch_attr(p->attr, p->tag_len - 4, vif->wilc->sta_ch); out_set_timeout: wfi_drv->p2p_timeout = (jiffies + msecs_to_jiffies(wait)); out_txq_add_pkt: wilc_wlan_txq_add_mgmt_pkt(wdev->netdev, mgmt_tx, mgmt_tx->buff, mgmt_tx->size, wilc_wfi_mgmt_tx_complete); out: return ret; } static int mgmt_tx_cancel_wait(struct wiphy *wiphy, struct wireless_dev *wdev, u64 cookie) { struct wilc_vif *vif = netdev_priv(wdev->netdev); struct wilc_priv *priv = &vif->priv; struct host_if_drv *wfi_drv = priv->hif_drv; wfi_drv->p2p_timeout = jiffies; if (!priv->p2p_listen_state) { struct wilc_wfi_p2p_listen_params *params; params = &priv->remain_on_ch_params; cfg80211_remain_on_channel_expired(wdev, params->listen_cookie, params->listen_ch, GFP_KERNEL); } return 0; } void wilc_update_mgmt_frame_registrations(struct wiphy *wiphy, struct wireless_dev *wdev, struct mgmt_frame_regs *upd) { struct wilc *wl = wiphy_priv(wiphy); struct wilc_vif *vif = netdev_priv(wdev->netdev); u32 presp_bit = BIT(IEEE80211_STYPE_PROBE_REQ >> 4); u32 action_bit = BIT(IEEE80211_STYPE_ACTION >> 4); u32 pauth_bit = BIT(IEEE80211_STYPE_AUTH >> 4); if (wl->initialized) { bool prev = vif->mgmt_reg_stypes & presp_bit; bool now = upd->interface_stypes & presp_bit; if (now != prev) wilc_frame_register(vif, IEEE80211_STYPE_PROBE_REQ, now); prev = vif->mgmt_reg_stypes & action_bit; now = upd->interface_stypes & action_bit; if (now != prev) wilc_frame_register(vif, IEEE80211_STYPE_ACTION, now); prev = vif->mgmt_reg_stypes & pauth_bit; now = upd->interface_stypes & pauth_bit; if (now != prev) wilc_frame_register(vif, IEEE80211_STYPE_AUTH, now); } vif->mgmt_reg_stypes = upd->interface_stypes & (presp_bit | action_bit | pauth_bit); } static int external_auth(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_external_auth_params *auth) { struct wilc_vif *vif = netdev_priv(dev); if (auth->status == WLAN_STATUS_SUCCESS) wilc_set_external_auth_param(vif, auth); return 0; } static int set_cqm_rssi_config(struct wiphy *wiphy, struct net_device *dev, s32 rssi_thold, u32 rssi_hyst) { return 0; } static int dump_station(struct wiphy *wiphy, struct net_device *dev, int idx, u8 *mac, struct station_info *sinfo) { struct wilc_vif *vif = netdev_priv(dev); int ret; if (idx != 0) return -ENOENT; ret = wilc_get_rssi(vif, &sinfo->signal); if (ret) return ret; sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL); memcpy(mac, vif->priv.associated_bss, ETH_ALEN); return 0; } static int set_power_mgmt(struct wiphy *wiphy, struct net_device *dev, bool enabled, int timeout) { struct wilc_vif *vif = netdev_priv(dev); struct wilc_priv *priv = &vif->priv; if (!priv->hif_drv) return -EIO; wilc_set_power_mgmt(vif, enabled, timeout); return 0; } static int change_virtual_intf(struct wiphy *wiphy, struct net_device *dev, enum nl80211_iftype type, struct vif_params *params) { struct wilc *wl = wiphy_priv(wiphy); struct wilc_vif *vif = netdev_priv(dev); struct wilc_priv *priv = &vif->priv; switch (type) { case NL80211_IFTYPE_STATION: vif->connecting = false; dev->ieee80211_ptr->iftype = type; priv->wdev.iftype = type; vif->monitor_flag = 0; if (vif->iftype == WILC_AP_MODE || vif->iftype == WILC_GO_MODE) wilc_wfi_deinit_mon_interface(wl, true); vif->iftype = WILC_STATION_MODE; if (wl->initialized) wilc_set_operation_mode(vif, wilc_get_vif_idx(vif), WILC_STATION_MODE, vif->idx); memset(priv->assoc_stainfo.sta_associated_bss, 0, WILC_MAX_NUM_STA * ETH_ALEN); break; case NL80211_IFTYPE_P2P_CLIENT: vif->connecting = false; dev->ieee80211_ptr->iftype = type; priv->wdev.iftype = type; vif->monitor_flag = 0; vif->iftype = WILC_CLIENT_MODE; if (wl->initialized) wilc_set_operation_mode(vif, wilc_get_vif_idx(vif), WILC_STATION_MODE, vif->idx); break; case NL80211_IFTYPE_AP: dev->ieee80211_ptr->iftype = type; priv->wdev.iftype = type; vif->iftype = WILC_AP_MODE; if (wl->initialized) wilc_set_operation_mode(vif, wilc_get_vif_idx(vif), WILC_AP_MODE, vif->idx); break; case NL80211_IFTYPE_P2P_GO: dev->ieee80211_ptr->iftype = type; priv->wdev.iftype = type; vif->iftype = WILC_GO_MODE; if (wl->initialized) wilc_set_operation_mode(vif, wilc_get_vif_idx(vif), WILC_AP_MODE, vif->idx); break; default: netdev_err(dev, "Unknown interface type= %d\n", type); return -EINVAL; } return 0; } static int start_ap(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_ap_settings *settings) { struct wilc_vif *vif = netdev_priv(dev); int ret; ret = set_channel(wiphy, &settings->chandef); if (ret != 0) netdev_err(dev, "Error in setting channel\n"); wilc_wlan_set_bssid(dev, dev->dev_addr, WILC_AP_MODE); return wilc_add_beacon(vif, settings->beacon_interval, settings->dtim_period, &settings->beacon); } static int change_beacon(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_beacon_data *beacon) { struct wilc_vif *vif = netdev_priv(dev); return wilc_add_beacon(vif, 0, 0, beacon); } static int stop_ap(struct wiphy *wiphy, struct net_device *dev, unsigned int link_id) { int ret; struct wilc_vif *vif = netdev_priv(dev); wilc_wlan_set_bssid(dev, NULL, WILC_AP_MODE); ret = wilc_del_beacon(vif); if (ret) netdev_err(dev, "Host delete beacon fail\n"); return ret; } static int add_station(struct wiphy *wiphy, struct net_device *dev, const u8 *mac, struct station_parameters *params) { int ret = 0; struct wilc_vif *vif = netdev_priv(dev); struct wilc_priv *priv = &vif->priv; if (vif->iftype == WILC_AP_MODE || vif->iftype == WILC_GO_MODE) { memcpy(priv->assoc_stainfo.sta_associated_bss[params->aid], mac, ETH_ALEN); ret = wilc_add_station(vif, mac, params); if (ret) netdev_err(dev, "Host add station fail\n"); } return ret; } static int del_station(struct wiphy *wiphy, struct net_device *dev, struct station_del_parameters *params) { const u8 *mac = params->mac; int ret = 0; struct wilc_vif *vif = netdev_priv(dev); struct wilc_priv *priv = &vif->priv; struct sta_info *info; if (!(vif->iftype == WILC_AP_MODE || vif->iftype == WILC_GO_MODE)) return ret; info = &priv->assoc_stainfo; if (!mac) ret = wilc_del_allstation(vif, info->sta_associated_bss); ret = wilc_del_station(vif, mac); if (ret) netdev_err(dev, "Host delete station fail\n"); return ret; } static int change_station(struct wiphy *wiphy, struct net_device *dev, const u8 *mac, struct station_parameters *params) { int ret = 0; struct wilc_vif *vif = netdev_priv(dev); if (vif->iftype == WILC_AP_MODE || vif->iftype == WILC_GO_MODE) { ret = wilc_edit_station(vif, mac, params); if (ret) netdev_err(dev, "Host edit station fail\n"); } return ret; } static struct wilc_vif *wilc_get_vif_from_type(struct wilc *wl, int type) { struct wilc_vif *vif; list_for_each_entry_rcu(vif, &wl->vif_list, list) { if (vif->iftype == type) return vif; } return NULL; } static struct wireless_dev *add_virtual_intf(struct wiphy *wiphy, const char *name, unsigned char name_assign_type, enum nl80211_iftype type, struct vif_params *params) { struct wilc *wl = wiphy_priv(wiphy); struct wilc_vif *vif; struct wireless_dev *wdev; int iftype; if (type == NL80211_IFTYPE_MONITOR) { struct net_device *ndev; int srcu_idx; srcu_idx = srcu_read_lock(&wl->srcu); vif = wilc_get_vif_from_type(wl, WILC_AP_MODE); if (!vif) { vif = wilc_get_vif_from_type(wl, WILC_GO_MODE); if (!vif) { srcu_read_unlock(&wl->srcu, srcu_idx); goto validate_interface; } } if (vif->monitor_flag) { srcu_read_unlock(&wl->srcu, srcu_idx); goto validate_interface; } ndev = wilc_wfi_init_mon_interface(wl, name, vif->ndev); if (ndev) { vif->monitor_flag = 1; } else { srcu_read_unlock(&wl->srcu, srcu_idx); return ERR_PTR(-EINVAL); } wdev = &vif->priv.wdev; srcu_read_unlock(&wl->srcu, srcu_idx); return wdev; } validate_interface: mutex_lock(&wl->vif_mutex); if (wl->vif_num == WILC_NUM_CONCURRENT_IFC) { pr_err("Reached maximum number of interface\n"); mutex_unlock(&wl->vif_mutex); return ERR_PTR(-EINVAL); } mutex_unlock(&wl->vif_mutex); switch (type) { case NL80211_IFTYPE_STATION: iftype = WILC_STATION_MODE; break; case NL80211_IFTYPE_AP: iftype = WILC_AP_MODE; break; default: return ERR_PTR(-EOPNOTSUPP); } vif = wilc_netdev_ifc_init(wl, name, iftype, type, true); if (IS_ERR(vif)) return ERR_CAST(vif); return &vif->priv.wdev; } static int del_virtual_intf(struct wiphy *wiphy, struct wireless_dev *wdev) { struct wilc *wl = wiphy_priv(wiphy); struct wilc_vif *vif; if (wdev->iftype == NL80211_IFTYPE_AP || wdev->iftype == NL80211_IFTYPE_P2P_GO) wilc_wfi_deinit_mon_interface(wl, true); vif = netdev_priv(wdev->netdev); cfg80211_stop_iface(wiphy, wdev, GFP_KERNEL); cfg80211_unregister_netdevice(vif->ndev); vif->monitor_flag = 0; wilc_set_operation_mode(vif, 0, 0, 0); mutex_lock(&wl->vif_mutex); list_del_rcu(&vif->list); wl->vif_num--; mutex_unlock(&wl->vif_mutex); synchronize_srcu(&wl->srcu); return 0; } static int wilc_suspend(struct wiphy *wiphy, struct cfg80211_wowlan *wow) { struct wilc *wl = wiphy_priv(wiphy); if (!wow && wilc_wlan_get_num_conn_ifcs(wl)) wl->suspend_event = true; else wl->suspend_event = false; return 0; } static int wilc_resume(struct wiphy *wiphy) { return 0; } static void wilc_set_wakeup(struct wiphy *wiphy, bool enabled) { struct wilc *wl = wiphy_priv(wiphy); struct wilc_vif *vif; int srcu_idx; srcu_idx = srcu_read_lock(&wl->srcu); vif = wilc_get_wl_to_vif(wl); if (IS_ERR(vif)) { srcu_read_unlock(&wl->srcu, srcu_idx); return; } netdev_info(vif->ndev, "cfg set wake up = %d\n", enabled); wilc_set_wowlan_trigger(vif, enabled); srcu_read_unlock(&wl->srcu, srcu_idx); } static int set_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev, enum nl80211_tx_power_setting type, int mbm) { int ret; int srcu_idx; s32 tx_power = MBM_TO_DBM(mbm); struct wilc *wl = wiphy_priv(wiphy); struct wilc_vif *vif; if (!wl->initialized) return -EIO; srcu_idx = srcu_read_lock(&wl->srcu); vif = wilc_get_wl_to_vif(wl); if (IS_ERR(vif)) { srcu_read_unlock(&wl->srcu, srcu_idx); return -EINVAL; } netdev_info(vif->ndev, "Setting tx power %d\n", tx_power); if (tx_power < 0) tx_power = 0; else if (tx_power > 18) tx_power = 18; ret = wilc_set_tx_power(vif, tx_power); if (ret) netdev_err(vif->ndev, "Failed to set tx power\n"); srcu_read_unlock(&wl->srcu, srcu_idx); return ret; } static int get_tx_power(struct wiphy *wiphy, struct wireless_dev *wdev, int *dbm) { int ret; struct wilc_vif *vif = netdev_priv(wdev->netdev); struct wilc *wl = vif->wilc; /* If firmware is not started, return. */ if (!wl->initialized) return -EIO; ret = wilc_get_tx_power(vif, (u8 *)dbm); if (ret) netdev_err(vif->ndev, "Failed to get tx power\n"); return ret; } static const struct cfg80211_ops wilc_cfg80211_ops = { .set_monitor_channel = set_channel, .scan = scan, .connect = connect, .disconnect = disconnect, .add_key = add_key, .del_key = del_key, .get_key = get_key, .set_default_key = set_default_key, .set_default_mgmt_key = set_default_mgmt_key, .add_virtual_intf = add_virtual_intf, .del_virtual_intf = del_virtual_intf, .change_virtual_intf = change_virtual_intf, .start_ap = start_ap, .change_beacon = change_beacon, .stop_ap = stop_ap, .add_station = add_station, .del_station = del_station, .change_station = change_station, .get_station = get_station, .dump_station = dump_station, .change_bss = change_bss, .set_wiphy_params = set_wiphy_params, .external_auth = external_auth, .set_pmksa = set_pmksa, .del_pmksa = del_pmksa, .flush_pmksa = flush_pmksa, .remain_on_channel = remain_on_channel, .cancel_remain_on_channel = cancel_remain_on_channel, .mgmt_tx_cancel_wait = mgmt_tx_cancel_wait, .mgmt_tx = mgmt_tx, .update_mgmt_frame_registrations = wilc_update_mgmt_frame_registrations, .set_power_mgmt = set_power_mgmt, .set_cqm_rssi_config = set_cqm_rssi_config, .suspend = wilc_suspend, .resume = wilc_resume, .set_wakeup = wilc_set_wakeup, .set_tx_power = set_tx_power, .get_tx_power = get_tx_power, }; static void wlan_init_locks(struct wilc *wl) { mutex_init(&wl->hif_cs); mutex_init(&wl->rxq_cs); mutex_init(&wl->cfg_cmd_lock); mutex_init(&wl->vif_mutex); mutex_init(&wl->deinit_lock); spin_lock_init(&wl->txq_spinlock); mutex_init(&wl->txq_add_to_head_cs); init_completion(&wl->txq_event); init_completion(&wl->cfg_event); init_completion(&wl->sync_event); init_completion(&wl->txq_thread_started); init_srcu_struct(&wl->srcu); } void wlan_deinit_locks(struct wilc *wilc) { mutex_destroy(&wilc->hif_cs); mutex_destroy(&wilc->rxq_cs); mutex_destroy(&wilc->cfg_cmd_lock); mutex_destroy(&wilc->txq_add_to_head_cs); mutex_destroy(&wilc->vif_mutex); mutex_destroy(&wilc->deinit_lock); cleanup_srcu_struct(&wilc->srcu); } int wilc_cfg80211_init(struct wilc **wilc, struct device *dev, int io_type, const struct wilc_hif_func *ops) { struct wilc *wl; struct wilc_vif *vif; int ret, i; wl = wilc_create_wiphy(dev); if (!wl) return -EINVAL; wlan_init_locks(wl); ret = wilc_wlan_cfg_init(wl); if (ret) goto free_wl; *wilc = wl; wl->io_type = io_type; wl->hif_func = ops; for (i = 0; i < NQUEUES; i++) INIT_LIST_HEAD(&wl->txq[i].txq_head.list); INIT_LIST_HEAD(&wl->rxq_head.list); INIT_LIST_HEAD(&wl->vif_list); vif = wilc_netdev_ifc_init(wl, "wlan%d", WILC_STATION_MODE, NL80211_IFTYPE_STATION, false); if (IS_ERR(vif)) { ret = PTR_ERR(vif); goto free_cfg; } return 0; free_cfg: wilc_wlan_cfg_deinit(wl); free_wl: wlan_deinit_locks(wl); wiphy_unregister(wl->wiphy); wiphy_free(wl->wiphy); return ret; } EXPORT_SYMBOL_GPL(wilc_cfg80211_init); struct wilc *wilc_create_wiphy(struct device *dev) { struct wiphy *wiphy; struct wilc *wl; int ret; wiphy = wiphy_new(&wilc_cfg80211_ops, sizeof(*wl)); if (!wiphy) return NULL; wl = wiphy_priv(wiphy); memcpy(wl->bitrates, wilc_bitrates, sizeof(wilc_bitrates)); memcpy(wl->channels, wilc_2ghz_channels, sizeof(wilc_2ghz_channels)); wl->band.bitrates = wl->bitrates; wl->band.n_bitrates = ARRAY_SIZE(wl->bitrates); wl->band.channels = wl->channels; wl->band.n_channels = ARRAY_SIZE(wilc_2ghz_channels); wl->band.ht_cap.ht_supported = 1; wl->band.ht_cap.cap |= (1 << IEEE80211_HT_CAP_RX_STBC_SHIFT); wl->band.ht_cap.mcs.rx_mask[0] = 0xff; wl->band.ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_8K; wl->band.ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_NONE; wiphy->bands[NL80211_BAND_2GHZ] = &wl->band; wiphy->max_scan_ssids = WILC_MAX_NUM_PROBED_SSID; #ifdef CONFIG_PM wiphy->wowlan = &wowlan_support; #endif wiphy->max_num_pmkids = WILC_MAX_NUM_PMKIDS; wiphy->max_scan_ie_len = 1000; wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; memcpy(wl->cipher_suites, wilc_cipher_suites, sizeof(wilc_cipher_suites)); wiphy->cipher_suites = wl->cipher_suites; wiphy->n_cipher_suites = ARRAY_SIZE(wilc_cipher_suites); wiphy->mgmt_stypes = wilc_wfi_cfg80211_mgmt_types; wiphy->max_remain_on_channel_duration = 500; wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP) | BIT(NL80211_IFTYPE_MONITOR) | BIT(NL80211_IFTYPE_P2P_GO) | BIT(NL80211_IFTYPE_P2P_CLIENT); wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; wiphy->features |= NL80211_FEATURE_SAE; set_wiphy_dev(wiphy, dev); wl->wiphy = wiphy; ret = wiphy_register(wiphy); if (ret) { wiphy_free(wiphy); return NULL; } return wl; } int wilc_init_host_int(struct net_device *net) { int ret; struct wilc_vif *vif = netdev_priv(net); struct wilc_priv *priv = &vif->priv; priv->p2p_listen_state = false; mutex_init(&priv->scan_req_lock); ret = wilc_init(net, &priv->hif_drv); if (ret) netdev_err(net, "Error while initializing hostinterface\n"); return ret; } void wilc_deinit_host_int(struct net_device *net) { int ret; struct wilc_vif *vif = netdev_priv(net); struct wilc_priv *priv = &vif->priv; priv->p2p_listen_state = false; flush_workqueue(vif->wilc->hif_workqueue); mutex_destroy(&priv->scan_req_lock); ret = wilc_deinit(vif); if (ret) netdev_err(net, "Error while deinitializing host interface\n"); }
linux-master
drivers/net/wireless/microchip/wilc1000/cfg80211.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2012 - 2018 Microchip Technology Inc., and its subsidiaries. * All rights reserved. */ #include "netdev.h" #define WILC_HIF_SCAN_TIMEOUT_MS 5000 #define WILC_HIF_CONNECT_TIMEOUT_MS 9500 #define WILC_FALSE_FRMWR_CHANNEL 100 #define WILC_SCAN_WID_LIST_SIZE 6 struct wilc_rcvd_mac_info { u8 status; }; struct wilc_set_multicast { u32 enabled; u32 cnt; u8 *mc_list; }; struct host_if_wowlan_trigger { u8 wowlan_trigger; }; struct wilc_del_all_sta { u8 assoc_sta; u8 mac[WILC_MAX_NUM_STA][ETH_ALEN]; }; union wilc_message_body { struct wilc_rcvd_net_info net_info; struct wilc_rcvd_mac_info mac_info; struct wilc_set_multicast mc_info; struct wilc_remain_ch remain_on_ch; char *data; struct host_if_wowlan_trigger wow_trigger; }; struct host_if_msg { union wilc_message_body body; struct wilc_vif *vif; struct work_struct work; void (*fn)(struct work_struct *ws); struct completion work_comp; bool is_sync; }; /* 'msg' should be free by the caller for syc */ static struct host_if_msg* wilc_alloc_work(struct wilc_vif *vif, void (*work_fun)(struct work_struct *), bool is_sync) { struct host_if_msg *msg; if (!work_fun) return ERR_PTR(-EINVAL); msg = kzalloc(sizeof(*msg), GFP_ATOMIC); if (!msg) return ERR_PTR(-ENOMEM); msg->fn = work_fun; msg->vif = vif; msg->is_sync = is_sync; if (is_sync) init_completion(&msg->work_comp); return msg; } static int wilc_enqueue_work(struct host_if_msg *msg) { INIT_WORK(&msg->work, msg->fn); if (!msg->vif || !msg->vif->wilc || !msg->vif->wilc->hif_workqueue) return -EINVAL; if (!queue_work(msg->vif->wilc->hif_workqueue, &msg->work)) return -EINVAL; return 0; } /* The idx starts from 0 to (NUM_CONCURRENT_IFC - 1), but 0 index used as * special purpose in wilc device, so we add 1 to the index to starts from 1. * As a result, the returned index will be 1 to NUM_CONCURRENT_IFC. */ int wilc_get_vif_idx(struct wilc_vif *vif) { return vif->idx + 1; } /* We need to minus 1 from idx which is from wilc device to get real index * of wilc->vif[], because we add 1 when pass to wilc device in the function * wilc_get_vif_idx. * As a result, the index should be between 0 and (NUM_CONCURRENT_IFC - 1). */ static struct wilc_vif *wilc_get_vif_from_idx(struct wilc *wilc, int idx) { int index = idx - 1; struct wilc_vif *vif; if (index < 0 || index >= WILC_NUM_CONCURRENT_IFC) return NULL; list_for_each_entry_rcu(vif, &wilc->vif_list, list) { if (vif->idx == index) return vif; } return NULL; } static int handle_scan_done(struct wilc_vif *vif, enum scan_event evt) { int result = 0; u8 abort_running_scan; struct wid wid; struct host_if_drv *hif_drv = vif->hif_drv; struct wilc_user_scan_req *scan_req; if (evt == SCAN_EVENT_ABORTED) { abort_running_scan = 1; wid.id = WID_ABORT_RUNNING_SCAN; wid.type = WID_CHAR; wid.val = (s8 *)&abort_running_scan; wid.size = sizeof(char); result = wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1); if (result) { netdev_err(vif->ndev, "Failed to set abort running\n"); result = -EFAULT; } } if (!hif_drv) { netdev_err(vif->ndev, "%s: hif driver is NULL\n", __func__); return result; } scan_req = &hif_drv->usr_scan_req; if (scan_req->scan_result) { scan_req->scan_result(evt, NULL, scan_req->arg); scan_req->scan_result = NULL; } return result; } int wilc_scan(struct wilc_vif *vif, u8 scan_source, u8 scan_type, u8 *ch_freq_list, u8 ch_list_len, void (*scan_result_fn)(enum scan_event, struct wilc_rcvd_net_info *, void *), void *user_arg, struct cfg80211_scan_request *request) { int result = 0; struct wid wid_list[WILC_SCAN_WID_LIST_SIZE]; u32 index = 0; u32 i, scan_timeout; u8 *buffer; u8 valuesize = 0; u8 *search_ssid_vals = NULL; struct host_if_drv *hif_drv = vif->hif_drv; if (hif_drv->hif_state >= HOST_IF_SCANNING && hif_drv->hif_state < HOST_IF_CONNECTED) { netdev_err(vif->ndev, "Already scan\n"); result = -EBUSY; goto error; } if (vif->connecting) { netdev_err(vif->ndev, "Don't do obss scan\n"); result = -EBUSY; goto error; } hif_drv->usr_scan_req.ch_cnt = 0; if (request->n_ssids) { for (i = 0; i < request->n_ssids; i++) valuesize += ((request->ssids[i].ssid_len) + 1); search_ssid_vals = kmalloc(valuesize + 1, GFP_KERNEL); if (search_ssid_vals) { wid_list[index].id = WID_SSID_PROBE_REQ; wid_list[index].type = WID_STR; wid_list[index].val = search_ssid_vals; buffer = wid_list[index].val; *buffer++ = request->n_ssids; for (i = 0; i < request->n_ssids; i++) { *buffer++ = request->ssids[i].ssid_len; memcpy(buffer, request->ssids[i].ssid, request->ssids[i].ssid_len); buffer += request->ssids[i].ssid_len; } wid_list[index].size = (s32)(valuesize + 1); index++; } } wid_list[index].id = WID_INFO_ELEMENT_PROBE; wid_list[index].type = WID_BIN_DATA; wid_list[index].val = (s8 *)request->ie; wid_list[index].size = request->ie_len; index++; wid_list[index].id = WID_SCAN_TYPE; wid_list[index].type = WID_CHAR; wid_list[index].size = sizeof(char); wid_list[index].val = (s8 *)&scan_type; index++; if (scan_type == WILC_FW_PASSIVE_SCAN && request->duration) { wid_list[index].id = WID_PASSIVE_SCAN_TIME; wid_list[index].type = WID_SHORT; wid_list[index].size = sizeof(u16); wid_list[index].val = (s8 *)&request->duration; index++; scan_timeout = (request->duration * ch_list_len) + 500; } else { scan_timeout = WILC_HIF_SCAN_TIMEOUT_MS; } wid_list[index].id = WID_SCAN_CHANNEL_LIST; wid_list[index].type = WID_BIN_DATA; if (ch_freq_list && ch_list_len > 0) { for (i = 0; i < ch_list_len; i++) { if (ch_freq_list[i] > 0) ch_freq_list[i] -= 1; } } wid_list[index].val = ch_freq_list; wid_list[index].size = ch_list_len; index++; wid_list[index].id = WID_START_SCAN_REQ; wid_list[index].type = WID_CHAR; wid_list[index].size = sizeof(char); wid_list[index].val = (s8 *)&scan_source; index++; hif_drv->usr_scan_req.scan_result = scan_result_fn; hif_drv->usr_scan_req.arg = user_arg; result = wilc_send_config_pkt(vif, WILC_SET_CFG, wid_list, index); if (result) { netdev_err(vif->ndev, "Failed to send scan parameters\n"); goto error; } hif_drv->scan_timer_vif = vif; mod_timer(&hif_drv->scan_timer, jiffies + msecs_to_jiffies(scan_timeout)); error: kfree(search_ssid_vals); return result; } static int wilc_send_connect_wid(struct wilc_vif *vif) { int result = 0; struct wid wid_list[5]; u32 wid_cnt = 0; struct host_if_drv *hif_drv = vif->hif_drv; struct wilc_conn_info *conn_attr = &hif_drv->conn_info; struct wilc_join_bss_param *bss_param = conn_attr->param; wid_list[wid_cnt].id = WID_SET_MFP; wid_list[wid_cnt].type = WID_CHAR; wid_list[wid_cnt].size = sizeof(char); wid_list[wid_cnt].val = (s8 *)&conn_attr->mfp_type; wid_cnt++; wid_list[wid_cnt].id = WID_INFO_ELEMENT_ASSOCIATE; wid_list[wid_cnt].type = WID_BIN_DATA; wid_list[wid_cnt].val = conn_attr->req_ies; wid_list[wid_cnt].size = conn_attr->req_ies_len; wid_cnt++; wid_list[wid_cnt].id = WID_11I_MODE; wid_list[wid_cnt].type = WID_CHAR; wid_list[wid_cnt].size = sizeof(char); wid_list[wid_cnt].val = (s8 *)&conn_attr->security; wid_cnt++; wid_list[wid_cnt].id = WID_AUTH_TYPE; wid_list[wid_cnt].type = WID_CHAR; wid_list[wid_cnt].size = sizeof(char); wid_list[wid_cnt].val = (s8 *)&conn_attr->auth_type; wid_cnt++; wid_list[wid_cnt].id = WID_JOIN_REQ_EXTENDED; wid_list[wid_cnt].type = WID_STR; wid_list[wid_cnt].size = sizeof(*bss_param); wid_list[wid_cnt].val = (u8 *)bss_param; wid_cnt++; result = wilc_send_config_pkt(vif, WILC_SET_CFG, wid_list, wid_cnt); if (result) { netdev_err(vif->ndev, "failed to send config packet\n"); goto error; } else { if (conn_attr->auth_type == WILC_FW_AUTH_SAE) hif_drv->hif_state = HOST_IF_EXTERNAL_AUTH; else hif_drv->hif_state = HOST_IF_WAITING_CONN_RESP; } return 0; error: kfree(conn_attr->req_ies); conn_attr->req_ies = NULL; return result; } static void handle_connect_timeout(struct work_struct *work) { struct host_if_msg *msg = container_of(work, struct host_if_msg, work); struct wilc_vif *vif = msg->vif; int result; struct wid wid; u16 dummy_reason_code = 0; struct host_if_drv *hif_drv = vif->hif_drv; if (!hif_drv) { netdev_err(vif->ndev, "%s: hif driver is NULL\n", __func__); goto out; } hif_drv->hif_state = HOST_IF_IDLE; if (hif_drv->conn_info.conn_result) { hif_drv->conn_info.conn_result(CONN_DISCONN_EVENT_CONN_RESP, WILC_MAC_STATUS_DISCONNECTED, hif_drv->conn_info.arg); } else { netdev_err(vif->ndev, "%s: conn_result is NULL\n", __func__); } wid.id = WID_DISCONNECT; wid.type = WID_CHAR; wid.val = (s8 *)&dummy_reason_code; wid.size = sizeof(char); result = wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1); if (result) netdev_err(vif->ndev, "Failed to send disconnect\n"); hif_drv->conn_info.req_ies_len = 0; kfree(hif_drv->conn_info.req_ies); hif_drv->conn_info.req_ies = NULL; out: kfree(msg); } void *wilc_parse_join_bss_param(struct cfg80211_bss *bss, struct cfg80211_crypto_settings *crypto) { struct wilc_join_bss_param *param; struct ieee80211_p2p_noa_attr noa_attr; u8 rates_len = 0; const u8 *tim_elm, *ssid_elm, *rates_ie, *supp_rates_ie; const u8 *ht_ie, *wpa_ie, *wmm_ie, *rsn_ie; int ret; const struct cfg80211_bss_ies *ies = rcu_dereference(bss->ies); param = kzalloc(sizeof(*param), GFP_KERNEL); if (!param) return NULL; param->beacon_period = cpu_to_le16(bss->beacon_interval); param->cap_info = cpu_to_le16(bss->capability); param->bss_type = WILC_FW_BSS_TYPE_INFRA; param->ch = ieee80211_frequency_to_channel(bss->channel->center_freq); ether_addr_copy(param->bssid, bss->bssid); ssid_elm = cfg80211_find_ie(WLAN_EID_SSID, ies->data, ies->len); if (ssid_elm) { if (ssid_elm[1] <= IEEE80211_MAX_SSID_LEN) memcpy(param->ssid, ssid_elm + 2, ssid_elm[1]); } tim_elm = cfg80211_find_ie(WLAN_EID_TIM, ies->data, ies->len); if (tim_elm && tim_elm[1] >= 2) param->dtim_period = tim_elm[3]; memset(param->p_suites, 0xFF, 3); memset(param->akm_suites, 0xFF, 3); rates_ie = cfg80211_find_ie(WLAN_EID_SUPP_RATES, ies->data, ies->len); if (rates_ie) { rates_len = rates_ie[1]; if (rates_len > WILC_MAX_RATES_SUPPORTED) rates_len = WILC_MAX_RATES_SUPPORTED; param->supp_rates[0] = rates_len; memcpy(&param->supp_rates[1], rates_ie + 2, rates_len); } if (rates_len < WILC_MAX_RATES_SUPPORTED) { supp_rates_ie = cfg80211_find_ie(WLAN_EID_EXT_SUPP_RATES, ies->data, ies->len); if (supp_rates_ie) { u8 ext_rates = supp_rates_ie[1]; if (ext_rates > (WILC_MAX_RATES_SUPPORTED - rates_len)) param->supp_rates[0] = WILC_MAX_RATES_SUPPORTED; else param->supp_rates[0] += ext_rates; memcpy(&param->supp_rates[rates_len + 1], supp_rates_ie + 2, (param->supp_rates[0] - rates_len)); } } ht_ie = cfg80211_find_ie(WLAN_EID_HT_CAPABILITY, ies->data, ies->len); if (ht_ie) param->ht_capable = true; ret = cfg80211_get_p2p_attr(ies->data, ies->len, IEEE80211_P2P_ATTR_ABSENCE_NOTICE, (u8 *)&noa_attr, sizeof(noa_attr)); if (ret > 0) { param->tsf_lo = cpu_to_le32(ies->tsf); param->noa_enabled = 1; param->idx = noa_attr.index; if (noa_attr.oppps_ctwindow & IEEE80211_P2P_OPPPS_ENABLE_BIT) { param->opp_enabled = 1; param->opp_en.ct_window = noa_attr.oppps_ctwindow; param->opp_en.cnt = noa_attr.desc[0].count; param->opp_en.duration = noa_attr.desc[0].duration; param->opp_en.interval = noa_attr.desc[0].interval; param->opp_en.start_time = noa_attr.desc[0].start_time; } else { param->opp_enabled = 0; param->opp_dis.cnt = noa_attr.desc[0].count; param->opp_dis.duration = noa_attr.desc[0].duration; param->opp_dis.interval = noa_attr.desc[0].interval; param->opp_dis.start_time = noa_attr.desc[0].start_time; } } wmm_ie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, WLAN_OUI_TYPE_MICROSOFT_WMM, ies->data, ies->len); if (wmm_ie) { struct ieee80211_wmm_param_ie *ie; ie = (struct ieee80211_wmm_param_ie *)wmm_ie; if ((ie->oui_subtype == 0 || ie->oui_subtype == 1) && ie->version == 1) { param->wmm_cap = true; if (ie->qos_info & BIT(7)) param->uapsd_cap = true; } } wpa_ie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, WLAN_OUI_TYPE_MICROSOFT_WPA, ies->data, ies->len); if (wpa_ie) { param->mode_802_11i = 1; param->rsn_found = true; } rsn_ie = cfg80211_find_ie(WLAN_EID_RSN, ies->data, ies->len); if (rsn_ie) { int rsn_ie_len = sizeof(struct element) + rsn_ie[1]; int offset = 8; param->mode_802_11i = 2; param->rsn_found = true; /* extract RSN capabilities */ if (offset < rsn_ie_len) { /* skip over pairwise suites */ offset += (rsn_ie[offset] * 4) + 2; if (offset < rsn_ie_len) { /* skip over authentication suites */ offset += (rsn_ie[offset] * 4) + 2; if (offset + 1 < rsn_ie_len) memcpy(param->rsn_cap, &rsn_ie[offset], 2); } } } if (param->rsn_found) { int i; param->rsn_grp_policy = crypto->cipher_group & 0xFF; for (i = 0; i < crypto->n_ciphers_pairwise && i < 3; i++) param->p_suites[i] = crypto->ciphers_pairwise[i] & 0xFF; for (i = 0; i < crypto->n_akm_suites && i < 3; i++) param->akm_suites[i] = crypto->akm_suites[i] & 0xFF; } return (void *)param; } static void handle_rcvd_ntwrk_info(struct work_struct *work) { struct host_if_msg *msg = container_of(work, struct host_if_msg, work); struct wilc_rcvd_net_info *rcvd_info = &msg->body.net_info; struct wilc_user_scan_req *scan_req = &msg->vif->hif_drv->usr_scan_req; const u8 *ch_elm; u8 *ies; int ies_len; size_t offset; if (ieee80211_is_probe_resp(rcvd_info->mgmt->frame_control)) offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable); else if (ieee80211_is_beacon(rcvd_info->mgmt->frame_control)) offset = offsetof(struct ieee80211_mgmt, u.beacon.variable); else goto done; ies = rcvd_info->mgmt->u.beacon.variable; ies_len = rcvd_info->frame_len - offset; if (ies_len <= 0) goto done; ch_elm = cfg80211_find_ie(WLAN_EID_DS_PARAMS, ies, ies_len); if (ch_elm && ch_elm[1] > 0) rcvd_info->ch = ch_elm[2]; if (scan_req->scan_result) scan_req->scan_result(SCAN_EVENT_NETWORK_FOUND, rcvd_info, scan_req->arg); done: kfree(rcvd_info->mgmt); kfree(msg); } static void host_int_get_assoc_res_info(struct wilc_vif *vif, u8 *assoc_resp_info, u32 max_assoc_resp_info_len, u32 *rcvd_assoc_resp_info_len) { int result; struct wid wid; wid.id = WID_ASSOC_RES_INFO; wid.type = WID_STR; wid.val = assoc_resp_info; wid.size = max_assoc_resp_info_len; result = wilc_send_config_pkt(vif, WILC_GET_CFG, &wid, 1); if (result) { *rcvd_assoc_resp_info_len = 0; netdev_err(vif->ndev, "Failed to send association response\n"); return; } *rcvd_assoc_resp_info_len = wid.size; } static s32 wilc_parse_assoc_resp_info(u8 *buffer, u32 buffer_len, struct wilc_conn_info *ret_conn_info) { u8 *ies; u16 ies_len; struct wilc_assoc_resp *res = (struct wilc_assoc_resp *)buffer; ret_conn_info->status = le16_to_cpu(res->status_code); if (ret_conn_info->status == WLAN_STATUS_SUCCESS) { ies = &buffer[sizeof(*res)]; ies_len = buffer_len - sizeof(*res); ret_conn_info->resp_ies = kmemdup(ies, ies_len, GFP_KERNEL); if (!ret_conn_info->resp_ies) return -ENOMEM; ret_conn_info->resp_ies_len = ies_len; } return 0; } static inline void host_int_parse_assoc_resp_info(struct wilc_vif *vif, u8 mac_status) { struct host_if_drv *hif_drv = vif->hif_drv; struct wilc_conn_info *conn_info = &hif_drv->conn_info; if (mac_status == WILC_MAC_STATUS_CONNECTED) { u32 assoc_resp_info_len; memset(hif_drv->assoc_resp, 0, WILC_MAX_ASSOC_RESP_FRAME_SIZE); host_int_get_assoc_res_info(vif, hif_drv->assoc_resp, WILC_MAX_ASSOC_RESP_FRAME_SIZE, &assoc_resp_info_len); if (assoc_resp_info_len != 0) { s32 err = 0; err = wilc_parse_assoc_resp_info(hif_drv->assoc_resp, assoc_resp_info_len, conn_info); if (err) netdev_err(vif->ndev, "wilc_parse_assoc_resp_info() returned error %d\n", err); } } del_timer(&hif_drv->connect_timer); conn_info->conn_result(CONN_DISCONN_EVENT_CONN_RESP, mac_status, hif_drv->conn_info.arg); if (mac_status == WILC_MAC_STATUS_CONNECTED && conn_info->status == WLAN_STATUS_SUCCESS) { ether_addr_copy(hif_drv->assoc_bssid, conn_info->bssid); hif_drv->hif_state = HOST_IF_CONNECTED; } else { hif_drv->hif_state = HOST_IF_IDLE; } kfree(conn_info->resp_ies); conn_info->resp_ies = NULL; conn_info->resp_ies_len = 0; kfree(conn_info->req_ies); conn_info->req_ies = NULL; conn_info->req_ies_len = 0; } void wilc_handle_disconnect(struct wilc_vif *vif) { struct host_if_drv *hif_drv = vif->hif_drv; if (hif_drv->usr_scan_req.scan_result) { del_timer(&hif_drv->scan_timer); handle_scan_done(vif, SCAN_EVENT_ABORTED); } if (hif_drv->conn_info.conn_result) hif_drv->conn_info.conn_result(CONN_DISCONN_EVENT_DISCONN_NOTIF, 0, hif_drv->conn_info.arg); eth_zero_addr(hif_drv->assoc_bssid); hif_drv->conn_info.req_ies_len = 0; kfree(hif_drv->conn_info.req_ies); hif_drv->conn_info.req_ies = NULL; hif_drv->hif_state = HOST_IF_IDLE; } static void handle_rcvd_gnrl_async_info(struct work_struct *work) { struct host_if_msg *msg = container_of(work, struct host_if_msg, work); struct wilc_vif *vif = msg->vif; struct wilc_rcvd_mac_info *mac_info = &msg->body.mac_info; struct host_if_drv *hif_drv = vif->hif_drv; if (!hif_drv) { netdev_err(vif->ndev, "%s: hif driver is NULL\n", __func__); goto free_msg; } if (!hif_drv->conn_info.conn_result) { netdev_err(vif->ndev, "%s: conn_result is NULL\n", __func__); goto free_msg; } if (hif_drv->hif_state == HOST_IF_EXTERNAL_AUTH) { cfg80211_external_auth_request(vif->ndev, &vif->auth, GFP_KERNEL); hif_drv->hif_state = HOST_IF_WAITING_CONN_RESP; } else if (hif_drv->hif_state == HOST_IF_WAITING_CONN_RESP) { host_int_parse_assoc_resp_info(vif, mac_info->status); } else if (mac_info->status == WILC_MAC_STATUS_DISCONNECTED) { if (hif_drv->hif_state == HOST_IF_CONNECTED) { wilc_handle_disconnect(vif); } else if (hif_drv->usr_scan_req.scan_result) { del_timer(&hif_drv->scan_timer); handle_scan_done(vif, SCAN_EVENT_ABORTED); } } free_msg: kfree(msg); } int wilc_disconnect(struct wilc_vif *vif) { struct wid wid; struct host_if_drv *hif_drv = vif->hif_drv; struct wilc_user_scan_req *scan_req; struct wilc_conn_info *conn_info; int result; u16 dummy_reason_code = 0; wid.id = WID_DISCONNECT; wid.type = WID_CHAR; wid.val = (s8 *)&dummy_reason_code; wid.size = sizeof(char); result = wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1); if (result) { netdev_err(vif->ndev, "Failed to send disconnect\n"); return result; } scan_req = &hif_drv->usr_scan_req; conn_info = &hif_drv->conn_info; if (scan_req->scan_result) { del_timer(&hif_drv->scan_timer); scan_req->scan_result(SCAN_EVENT_ABORTED, NULL, scan_req->arg); scan_req->scan_result = NULL; } if (conn_info->conn_result) { if (hif_drv->hif_state == HOST_IF_WAITING_CONN_RESP || hif_drv->hif_state == HOST_IF_EXTERNAL_AUTH) del_timer(&hif_drv->connect_timer); conn_info->conn_result(CONN_DISCONN_EVENT_DISCONN_NOTIF, 0, conn_info->arg); } else { netdev_err(vif->ndev, "%s: conn_result is NULL\n", __func__); } hif_drv->hif_state = HOST_IF_IDLE; eth_zero_addr(hif_drv->assoc_bssid); conn_info->req_ies_len = 0; kfree(conn_info->req_ies); conn_info->req_ies = NULL; return 0; } int wilc_get_statistics(struct wilc_vif *vif, struct rf_info *stats) { struct wid wid_list[5]; u32 wid_cnt = 0, result; wid_list[wid_cnt].id = WID_LINKSPEED; wid_list[wid_cnt].type = WID_CHAR; wid_list[wid_cnt].size = sizeof(char); wid_list[wid_cnt].val = (s8 *)&stats->link_speed; wid_cnt++; wid_list[wid_cnt].id = WID_RSSI; wid_list[wid_cnt].type = WID_CHAR; wid_list[wid_cnt].size = sizeof(char); wid_list[wid_cnt].val = (s8 *)&stats->rssi; wid_cnt++; wid_list[wid_cnt].id = WID_SUCCESS_FRAME_COUNT; wid_list[wid_cnt].type = WID_INT; wid_list[wid_cnt].size = sizeof(u32); wid_list[wid_cnt].val = (s8 *)&stats->tx_cnt; wid_cnt++; wid_list[wid_cnt].id = WID_RECEIVED_FRAGMENT_COUNT; wid_list[wid_cnt].type = WID_INT; wid_list[wid_cnt].size = sizeof(u32); wid_list[wid_cnt].val = (s8 *)&stats->rx_cnt; wid_cnt++; wid_list[wid_cnt].id = WID_FAILED_COUNT; wid_list[wid_cnt].type = WID_INT; wid_list[wid_cnt].size = sizeof(u32); wid_list[wid_cnt].val = (s8 *)&stats->tx_fail_cnt; wid_cnt++; result = wilc_send_config_pkt(vif, WILC_GET_CFG, wid_list, wid_cnt); if (result) { netdev_err(vif->ndev, "Failed to send scan parameters\n"); return result; } if (stats->link_speed > TCP_ACK_FILTER_LINK_SPEED_THRESH && stats->link_speed != DEFAULT_LINK_SPEED) wilc_enable_tcp_ack_filter(vif, true); else if (stats->link_speed != DEFAULT_LINK_SPEED) wilc_enable_tcp_ack_filter(vif, false); return result; } static void handle_get_statistics(struct work_struct *work) { struct host_if_msg *msg = container_of(work, struct host_if_msg, work); struct wilc_vif *vif = msg->vif; struct rf_info *stats = (struct rf_info *)msg->body.data; wilc_get_statistics(vif, stats); kfree(msg); } static void wilc_hif_pack_sta_param(u8 *cur_byte, const u8 *mac, struct station_parameters *params) { ether_addr_copy(cur_byte, mac); cur_byte += ETH_ALEN; put_unaligned_le16(params->aid, cur_byte); cur_byte += 2; *cur_byte++ = params->link_sta_params.supported_rates_len; if (params->link_sta_params.supported_rates_len > 0) memcpy(cur_byte, params->link_sta_params.supported_rates, params->link_sta_params.supported_rates_len); cur_byte += params->link_sta_params.supported_rates_len; if (params->link_sta_params.ht_capa) { *cur_byte++ = true; memcpy(cur_byte, params->link_sta_params.ht_capa, sizeof(struct ieee80211_ht_cap)); } else { *cur_byte++ = false; } cur_byte += sizeof(struct ieee80211_ht_cap); put_unaligned_le16(params->sta_flags_mask, cur_byte); cur_byte += 2; put_unaligned_le16(params->sta_flags_set, cur_byte); } static int handle_remain_on_chan(struct wilc_vif *vif, struct wilc_remain_ch *hif_remain_ch) { int result; u8 remain_on_chan_flag; struct wid wid; struct host_if_drv *hif_drv = vif->hif_drv; if (hif_drv->usr_scan_req.scan_result) return -EBUSY; if (hif_drv->hif_state == HOST_IF_WAITING_CONN_RESP) return -EBUSY; if (vif->connecting) return -EBUSY; remain_on_chan_flag = true; wid.id = WID_REMAIN_ON_CHAN; wid.type = WID_STR; wid.size = 2; wid.val = kmalloc(wid.size, GFP_KERNEL); if (!wid.val) return -ENOMEM; wid.val[0] = remain_on_chan_flag; wid.val[1] = (s8)hif_remain_ch->ch; result = wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1); kfree(wid.val); if (result) return -EBUSY; hif_drv->remain_on_ch.arg = hif_remain_ch->arg; hif_drv->remain_on_ch.expired = hif_remain_ch->expired; hif_drv->remain_on_ch.ch = hif_remain_ch->ch; hif_drv->remain_on_ch.cookie = hif_remain_ch->cookie; hif_drv->remain_on_ch_timer_vif = vif; return 0; } static int wilc_handle_roc_expired(struct wilc_vif *vif, u64 cookie) { u8 remain_on_chan_flag; struct wid wid; int result; struct host_if_drv *hif_drv = vif->hif_drv; if (vif->priv.p2p_listen_state) { remain_on_chan_flag = false; wid.id = WID_REMAIN_ON_CHAN; wid.type = WID_STR; wid.size = 2; wid.val = kmalloc(wid.size, GFP_KERNEL); if (!wid.val) return -ENOMEM; wid.val[0] = remain_on_chan_flag; wid.val[1] = WILC_FALSE_FRMWR_CHANNEL; result = wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1); kfree(wid.val); if (result != 0) { netdev_err(vif->ndev, "Failed to set remain channel\n"); return -EINVAL; } if (hif_drv->remain_on_ch.expired) { hif_drv->remain_on_ch.expired(hif_drv->remain_on_ch.arg, cookie); } } else { netdev_dbg(vif->ndev, "Not in listen state\n"); } return 0; } static void wilc_handle_listen_state_expired(struct work_struct *work) { struct host_if_msg *msg = container_of(work, struct host_if_msg, work); wilc_handle_roc_expired(msg->vif, msg->body.remain_on_ch.cookie); kfree(msg); } static void listen_timer_cb(struct timer_list *t) { struct host_if_drv *hif_drv = from_timer(hif_drv, t, remain_on_ch_timer); struct wilc_vif *vif = hif_drv->remain_on_ch_timer_vif; int result; struct host_if_msg *msg; del_timer(&vif->hif_drv->remain_on_ch_timer); msg = wilc_alloc_work(vif, wilc_handle_listen_state_expired, false); if (IS_ERR(msg)) return; msg->body.remain_on_ch.cookie = vif->hif_drv->remain_on_ch.cookie; result = wilc_enqueue_work(msg); if (result) { netdev_err(vif->ndev, "%s: enqueue work failed\n", __func__); kfree(msg); } } static void handle_set_mcast_filter(struct work_struct *work) { struct host_if_msg *msg = container_of(work, struct host_if_msg, work); struct wilc_vif *vif = msg->vif; struct wilc_set_multicast *set_mc = &msg->body.mc_info; int result; struct wid wid; u8 *cur_byte; wid.id = WID_SETUP_MULTICAST_FILTER; wid.type = WID_BIN; wid.size = sizeof(struct wilc_set_multicast) + (set_mc->cnt * ETH_ALEN); wid.val = kmalloc(wid.size, GFP_KERNEL); if (!wid.val) goto error; cur_byte = wid.val; put_unaligned_le32(set_mc->enabled, cur_byte); cur_byte += 4; put_unaligned_le32(set_mc->cnt, cur_byte); cur_byte += 4; if (set_mc->cnt > 0 && set_mc->mc_list) memcpy(cur_byte, set_mc->mc_list, set_mc->cnt * ETH_ALEN); result = wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1); if (result) netdev_err(vif->ndev, "Failed to send setup multicast\n"); error: kfree(set_mc->mc_list); kfree(wid.val); kfree(msg); } void wilc_set_wowlan_trigger(struct wilc_vif *vif, bool enabled) { int ret; struct wid wid; u8 wowlan_trigger = 0; if (enabled) wowlan_trigger = 1; wid.id = WID_WOWLAN_TRIGGER; wid.type = WID_CHAR; wid.val = &wowlan_trigger; wid.size = sizeof(char); ret = wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1); if (ret) pr_err("Failed to send wowlan trigger config packet\n"); } int wilc_set_external_auth_param(struct wilc_vif *vif, struct cfg80211_external_auth_params *auth) { int ret; struct wid wid; struct wilc_external_auth_param *param; wid.id = WID_EXTERNAL_AUTH_PARAM; wid.type = WID_BIN_DATA; wid.size = sizeof(*param); param = kzalloc(sizeof(*param), GFP_KERNEL); if (!param) return -EINVAL; wid.val = (u8 *)param; param->action = auth->action; ether_addr_copy(param->bssid, auth->bssid); memcpy(param->ssid, auth->ssid.ssid, auth->ssid.ssid_len); param->ssid_len = auth->ssid.ssid_len; ret = wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1); kfree(param); return ret; } static void handle_scan_timer(struct work_struct *work) { struct host_if_msg *msg = container_of(work, struct host_if_msg, work); handle_scan_done(msg->vif, SCAN_EVENT_ABORTED); kfree(msg); } static void handle_scan_complete(struct work_struct *work) { struct host_if_msg *msg = container_of(work, struct host_if_msg, work); del_timer(&msg->vif->hif_drv->scan_timer); handle_scan_done(msg->vif, SCAN_EVENT_DONE); kfree(msg); } static void timer_scan_cb(struct timer_list *t) { struct host_if_drv *hif_drv = from_timer(hif_drv, t, scan_timer); struct wilc_vif *vif = hif_drv->scan_timer_vif; struct host_if_msg *msg; int result; msg = wilc_alloc_work(vif, handle_scan_timer, false); if (IS_ERR(msg)) return; result = wilc_enqueue_work(msg); if (result) kfree(msg); } static void timer_connect_cb(struct timer_list *t) { struct host_if_drv *hif_drv = from_timer(hif_drv, t, connect_timer); struct wilc_vif *vif = hif_drv->connect_timer_vif; struct host_if_msg *msg; int result; msg = wilc_alloc_work(vif, handle_connect_timeout, false); if (IS_ERR(msg)) return; result = wilc_enqueue_work(msg); if (result) kfree(msg); } int wilc_add_ptk(struct wilc_vif *vif, const u8 *ptk, u8 ptk_key_len, const u8 *mac_addr, const u8 *rx_mic, const u8 *tx_mic, u8 mode, u8 cipher_mode, u8 index) { int result = 0; u8 t_key_len = ptk_key_len + WILC_RX_MIC_KEY_LEN + WILC_TX_MIC_KEY_LEN; if (mode == WILC_AP_MODE) { struct wid wid_list[2]; struct wilc_ap_wpa_ptk *key_buf; wid_list[0].id = WID_11I_MODE; wid_list[0].type = WID_CHAR; wid_list[0].size = sizeof(char); wid_list[0].val = (s8 *)&cipher_mode; key_buf = kzalloc(sizeof(*key_buf) + t_key_len, GFP_KERNEL); if (!key_buf) return -ENOMEM; ether_addr_copy(key_buf->mac_addr, mac_addr); key_buf->index = index; key_buf->key_len = t_key_len; memcpy(&key_buf->key[0], ptk, ptk_key_len); if (rx_mic) memcpy(&key_buf->key[ptk_key_len], rx_mic, WILC_RX_MIC_KEY_LEN); if (tx_mic) memcpy(&key_buf->key[ptk_key_len + WILC_RX_MIC_KEY_LEN], tx_mic, WILC_TX_MIC_KEY_LEN); wid_list[1].id = WID_ADD_PTK; wid_list[1].type = WID_STR; wid_list[1].size = sizeof(*key_buf) + t_key_len; wid_list[1].val = (u8 *)key_buf; result = wilc_send_config_pkt(vif, WILC_SET_CFG, wid_list, ARRAY_SIZE(wid_list)); kfree(key_buf); } else if (mode == WILC_STATION_MODE) { struct wid wid; struct wilc_sta_wpa_ptk *key_buf; key_buf = kzalloc(sizeof(*key_buf) + t_key_len, GFP_KERNEL); if (!key_buf) return -ENOMEM; ether_addr_copy(key_buf->mac_addr, mac_addr); key_buf->key_len = t_key_len; memcpy(&key_buf->key[0], ptk, ptk_key_len); if (rx_mic) memcpy(&key_buf->key[ptk_key_len], rx_mic, WILC_RX_MIC_KEY_LEN); if (tx_mic) memcpy(&key_buf->key[ptk_key_len + WILC_RX_MIC_KEY_LEN], tx_mic, WILC_TX_MIC_KEY_LEN); wid.id = WID_ADD_PTK; wid.type = WID_STR; wid.size = sizeof(*key_buf) + t_key_len; wid.val = (s8 *)key_buf; result = wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1); kfree(key_buf); } return result; } int wilc_add_igtk(struct wilc_vif *vif, const u8 *igtk, u8 igtk_key_len, const u8 *pn, u8 pn_len, const u8 *mac_addr, u8 mode, u8 index) { int result = 0; u8 t_key_len = igtk_key_len; struct wid wid; struct wilc_wpa_igtk *key_buf; key_buf = kzalloc(sizeof(*key_buf) + t_key_len, GFP_KERNEL); if (!key_buf) return -ENOMEM; key_buf->index = index; memcpy(&key_buf->pn[0], pn, pn_len); key_buf->pn_len = pn_len; memcpy(&key_buf->key[0], igtk, igtk_key_len); key_buf->key_len = t_key_len; wid.id = WID_ADD_IGTK; wid.type = WID_STR; wid.size = sizeof(*key_buf) + t_key_len; wid.val = (s8 *)key_buf; result = wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1); kfree(key_buf); return result; } int wilc_add_rx_gtk(struct wilc_vif *vif, const u8 *rx_gtk, u8 gtk_key_len, u8 index, u32 key_rsc_len, const u8 *key_rsc, const u8 *rx_mic, const u8 *tx_mic, u8 mode, u8 cipher_mode) { int result = 0; struct wilc_gtk_key *gtk_key; int t_key_len = gtk_key_len + WILC_RX_MIC_KEY_LEN + WILC_TX_MIC_KEY_LEN; gtk_key = kzalloc(sizeof(*gtk_key) + t_key_len, GFP_KERNEL); if (!gtk_key) return -ENOMEM; /* fill bssid value only in station mode */ if (mode == WILC_STATION_MODE && vif->hif_drv->hif_state == HOST_IF_CONNECTED) memcpy(gtk_key->mac_addr, vif->hif_drv->assoc_bssid, ETH_ALEN); if (key_rsc) memcpy(gtk_key->rsc, key_rsc, 8); gtk_key->index = index; gtk_key->key_len = t_key_len; memcpy(&gtk_key->key[0], rx_gtk, gtk_key_len); if (rx_mic) memcpy(&gtk_key->key[gtk_key_len], rx_mic, WILC_RX_MIC_KEY_LEN); if (tx_mic) memcpy(&gtk_key->key[gtk_key_len + WILC_RX_MIC_KEY_LEN], tx_mic, WILC_TX_MIC_KEY_LEN); if (mode == WILC_AP_MODE) { struct wid wid_list[2]; wid_list[0].id = WID_11I_MODE; wid_list[0].type = WID_CHAR; wid_list[0].size = sizeof(char); wid_list[0].val = (s8 *)&cipher_mode; wid_list[1].id = WID_ADD_RX_GTK; wid_list[1].type = WID_STR; wid_list[1].size = sizeof(*gtk_key) + t_key_len; wid_list[1].val = (u8 *)gtk_key; result = wilc_send_config_pkt(vif, WILC_SET_CFG, wid_list, ARRAY_SIZE(wid_list)); } else if (mode == WILC_STATION_MODE) { struct wid wid; wid.id = WID_ADD_RX_GTK; wid.type = WID_STR; wid.size = sizeof(*gtk_key) + t_key_len; wid.val = (u8 *)gtk_key; result = wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1); } kfree(gtk_key); return result; } int wilc_set_pmkid_info(struct wilc_vif *vif, struct wilc_pmkid_attr *pmkid) { struct wid wid; wid.id = WID_PMKID_INFO; wid.type = WID_STR; wid.size = (pmkid->numpmkid * sizeof(struct wilc_pmkid)) + 1; wid.val = (u8 *)pmkid; return wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1); } int wilc_get_mac_address(struct wilc_vif *vif, u8 *mac_addr) { int result; struct wid wid; wid.id = WID_MAC_ADDR; wid.type = WID_STR; wid.size = ETH_ALEN; wid.val = mac_addr; result = wilc_send_config_pkt(vif, WILC_GET_CFG, &wid, 1); if (result) netdev_err(vif->ndev, "Failed to get mac address\n"); return result; } int wilc_set_mac_address(struct wilc_vif *vif, u8 *mac_addr) { struct wid wid; int result; wid.id = WID_MAC_ADDR; wid.type = WID_STR; wid.size = ETH_ALEN; wid.val = mac_addr; result = wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1); if (result) netdev_err(vif->ndev, "Failed to set mac address\n"); return result; } int wilc_set_join_req(struct wilc_vif *vif, u8 *bssid, const u8 *ies, size_t ies_len) { int result; struct host_if_drv *hif_drv = vif->hif_drv; struct wilc_conn_info *conn_info = &hif_drv->conn_info; if (bssid) ether_addr_copy(conn_info->bssid, bssid); if (ies) { conn_info->req_ies_len = ies_len; conn_info->req_ies = kmemdup(ies, ies_len, GFP_KERNEL); if (!conn_info->req_ies) return -ENOMEM; } result = wilc_send_connect_wid(vif); if (result) goto free_ies; hif_drv->connect_timer_vif = vif; mod_timer(&hif_drv->connect_timer, jiffies + msecs_to_jiffies(WILC_HIF_CONNECT_TIMEOUT_MS)); return 0; free_ies: kfree(conn_info->req_ies); return result; } int wilc_set_mac_chnl_num(struct wilc_vif *vif, u8 channel) { struct wid wid; int result; wid.id = WID_CURRENT_CHANNEL; wid.type = WID_CHAR; wid.size = sizeof(char); wid.val = &channel; result = wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1); if (result) netdev_err(vif->ndev, "Failed to set channel\n"); return result; } int wilc_set_operation_mode(struct wilc_vif *vif, int index, u8 mode, u8 ifc_id) { struct wid wid; int result; struct wilc_drv_handler drv; wid.id = WID_SET_OPERATION_MODE; wid.type = WID_STR; wid.size = sizeof(drv); wid.val = (u8 *)&drv; drv.handler = cpu_to_le32(index); drv.mode = (ifc_id | (mode << 1)); result = wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1); if (result) netdev_err(vif->ndev, "Failed to set driver handler\n"); return result; } s32 wilc_get_inactive_time(struct wilc_vif *vif, const u8 *mac, u32 *out_val) { struct wid wid; s32 result; wid.id = WID_SET_STA_MAC_INACTIVE_TIME; wid.type = WID_STR; wid.size = ETH_ALEN; wid.val = kzalloc(wid.size, GFP_KERNEL); if (!wid.val) return -ENOMEM; ether_addr_copy(wid.val, mac); result = wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1); kfree(wid.val); if (result) { netdev_err(vif->ndev, "Failed to set inactive mac\n"); return result; } wid.id = WID_GET_INACTIVE_TIME; wid.type = WID_INT; wid.val = (s8 *)out_val; wid.size = sizeof(u32); result = wilc_send_config_pkt(vif, WILC_GET_CFG, &wid, 1); if (result) netdev_err(vif->ndev, "Failed to get inactive time\n"); return result; } int wilc_get_rssi(struct wilc_vif *vif, s8 *rssi_level) { struct wid wid; int result; if (!rssi_level) { netdev_err(vif->ndev, "%s: RSSI level is NULL\n", __func__); return -EFAULT; } wid.id = WID_RSSI; wid.type = WID_CHAR; wid.size = sizeof(char); wid.val = rssi_level; result = wilc_send_config_pkt(vif, WILC_GET_CFG, &wid, 1); if (result) netdev_err(vif->ndev, "Failed to get RSSI value\n"); return result; } static int wilc_get_stats_async(struct wilc_vif *vif, struct rf_info *stats) { int result; struct host_if_msg *msg; msg = wilc_alloc_work(vif, handle_get_statistics, false); if (IS_ERR(msg)) return PTR_ERR(msg); msg->body.data = (char *)stats; result = wilc_enqueue_work(msg); if (result) { netdev_err(vif->ndev, "%s: enqueue work failed\n", __func__); kfree(msg); return result; } return result; } int wilc_hif_set_cfg(struct wilc_vif *vif, struct cfg_param_attr *param) { struct wid wid_list[4]; int i = 0; if (param->flag & WILC_CFG_PARAM_RETRY_SHORT) { wid_list[i].id = WID_SHORT_RETRY_LIMIT; wid_list[i].val = (s8 *)&param->short_retry_limit; wid_list[i].type = WID_SHORT; wid_list[i].size = sizeof(u16); i++; } if (param->flag & WILC_CFG_PARAM_RETRY_LONG) { wid_list[i].id = WID_LONG_RETRY_LIMIT; wid_list[i].val = (s8 *)&param->long_retry_limit; wid_list[i].type = WID_SHORT; wid_list[i].size = sizeof(u16); i++; } if (param->flag & WILC_CFG_PARAM_FRAG_THRESHOLD) { wid_list[i].id = WID_FRAG_THRESHOLD; wid_list[i].val = (s8 *)&param->frag_threshold; wid_list[i].type = WID_SHORT; wid_list[i].size = sizeof(u16); i++; } if (param->flag & WILC_CFG_PARAM_RTS_THRESHOLD) { wid_list[i].id = WID_RTS_THRESHOLD; wid_list[i].val = (s8 *)&param->rts_threshold; wid_list[i].type = WID_SHORT; wid_list[i].size = sizeof(u16); i++; } return wilc_send_config_pkt(vif, WILC_SET_CFG, wid_list, i); } static void get_periodic_rssi(struct timer_list *t) { struct wilc_vif *vif = from_timer(vif, t, periodic_rssi); if (!vif->hif_drv) { netdev_err(vif->ndev, "%s: hif driver is NULL", __func__); return; } if (vif->hif_drv->hif_state == HOST_IF_CONNECTED) wilc_get_stats_async(vif, &vif->periodic_stat); mod_timer(&vif->periodic_rssi, jiffies + msecs_to_jiffies(5000)); } int wilc_init(struct net_device *dev, struct host_if_drv **hif_drv_handler) { struct host_if_drv *hif_drv; struct wilc_vif *vif = netdev_priv(dev); hif_drv = kzalloc(sizeof(*hif_drv), GFP_KERNEL); if (!hif_drv) return -ENOMEM; *hif_drv_handler = hif_drv; vif->hif_drv = hif_drv; timer_setup(&vif->periodic_rssi, get_periodic_rssi, 0); mod_timer(&vif->periodic_rssi, jiffies + msecs_to_jiffies(5000)); timer_setup(&hif_drv->scan_timer, timer_scan_cb, 0); timer_setup(&hif_drv->connect_timer, timer_connect_cb, 0); timer_setup(&hif_drv->remain_on_ch_timer, listen_timer_cb, 0); hif_drv->hif_state = HOST_IF_IDLE; hif_drv->p2p_timeout = 0; return 0; } int wilc_deinit(struct wilc_vif *vif) { int result = 0; struct host_if_drv *hif_drv = vif->hif_drv; if (!hif_drv) { netdev_err(vif->ndev, "%s: hif driver is NULL", __func__); return -EFAULT; } mutex_lock(&vif->wilc->deinit_lock); timer_shutdown_sync(&hif_drv->scan_timer); timer_shutdown_sync(&hif_drv->connect_timer); del_timer_sync(&vif->periodic_rssi); timer_shutdown_sync(&hif_drv->remain_on_ch_timer); if (hif_drv->usr_scan_req.scan_result) { hif_drv->usr_scan_req.scan_result(SCAN_EVENT_ABORTED, NULL, hif_drv->usr_scan_req.arg); hif_drv->usr_scan_req.scan_result = NULL; } hif_drv->hif_state = HOST_IF_IDLE; kfree(hif_drv); vif->hif_drv = NULL; mutex_unlock(&vif->wilc->deinit_lock); return result; } void wilc_network_info_received(struct wilc *wilc, u8 *buffer, u32 length) { int result; struct host_if_msg *msg; int id; struct host_if_drv *hif_drv; struct wilc_vif *vif; id = get_unaligned_le32(&buffer[length - 4]); vif = wilc_get_vif_from_idx(wilc, id); if (!vif) return; hif_drv = vif->hif_drv; if (!hif_drv) { netdev_err(vif->ndev, "driver not init[%p]\n", hif_drv); return; } msg = wilc_alloc_work(vif, handle_rcvd_ntwrk_info, false); if (IS_ERR(msg)) return; msg->body.net_info.frame_len = get_unaligned_le16(&buffer[6]) - 1; msg->body.net_info.rssi = buffer[8]; msg->body.net_info.mgmt = kmemdup(&buffer[9], msg->body.net_info.frame_len, GFP_KERNEL); if (!msg->body.net_info.mgmt) { kfree(msg); return; } result = wilc_enqueue_work(msg); if (result) { netdev_err(vif->ndev, "%s: enqueue work failed\n", __func__); kfree(msg->body.net_info.mgmt); kfree(msg); } } void wilc_gnrl_async_info_received(struct wilc *wilc, u8 *buffer, u32 length) { int result; struct host_if_msg *msg; int id; struct host_if_drv *hif_drv; struct wilc_vif *vif; mutex_lock(&wilc->deinit_lock); id = get_unaligned_le32(&buffer[length - 4]); vif = wilc_get_vif_from_idx(wilc, id); if (!vif) { mutex_unlock(&wilc->deinit_lock); return; } hif_drv = vif->hif_drv; if (!hif_drv) { mutex_unlock(&wilc->deinit_lock); return; } if (!hif_drv->conn_info.conn_result) { netdev_err(vif->ndev, "%s: conn_result is NULL\n", __func__); mutex_unlock(&wilc->deinit_lock); return; } msg = wilc_alloc_work(vif, handle_rcvd_gnrl_async_info, false); if (IS_ERR(msg)) { mutex_unlock(&wilc->deinit_lock); return; } msg->body.mac_info.status = buffer[7]; result = wilc_enqueue_work(msg); if (result) { netdev_err(vif->ndev, "%s: enqueue work failed\n", __func__); kfree(msg); } mutex_unlock(&wilc->deinit_lock); } void wilc_scan_complete_received(struct wilc *wilc, u8 *buffer, u32 length) { int result; int id; struct host_if_drv *hif_drv; struct wilc_vif *vif; id = get_unaligned_le32(&buffer[length - 4]); vif = wilc_get_vif_from_idx(wilc, id); if (!vif) return; hif_drv = vif->hif_drv; if (!hif_drv) return; if (hif_drv->usr_scan_req.scan_result) { struct host_if_msg *msg; msg = wilc_alloc_work(vif, handle_scan_complete, false); if (IS_ERR(msg)) return; result = wilc_enqueue_work(msg); if (result) { netdev_err(vif->ndev, "%s: enqueue work failed\n", __func__); kfree(msg); } } } int wilc_remain_on_channel(struct wilc_vif *vif, u64 cookie, u32 duration, u16 chan, void (*expired)(void *, u64), void *user_arg) { struct wilc_remain_ch roc; int result; roc.ch = chan; roc.expired = expired; roc.arg = user_arg; roc.duration = duration; roc.cookie = cookie; result = handle_remain_on_chan(vif, &roc); if (result) netdev_err(vif->ndev, "%s: failed to set remain on channel\n", __func__); return result; } int wilc_listen_state_expired(struct wilc_vif *vif, u64 cookie) { if (!vif->hif_drv) { netdev_err(vif->ndev, "%s: hif driver is NULL", __func__); return -EFAULT; } del_timer(&vif->hif_drv->remain_on_ch_timer); return wilc_handle_roc_expired(vif, cookie); } void wilc_frame_register(struct wilc_vif *vif, u16 frame_type, bool reg) { struct wid wid; int result; struct wilc_reg_frame reg_frame; wid.id = WID_REGISTER_FRAME; wid.type = WID_STR; wid.size = sizeof(reg_frame); wid.val = (u8 *)&reg_frame; memset(&reg_frame, 0x0, sizeof(reg_frame)); if (reg) reg_frame.reg = 1; switch (frame_type) { case IEEE80211_STYPE_ACTION: reg_frame.reg_id = WILC_FW_ACTION_FRM_IDX; break; case IEEE80211_STYPE_PROBE_REQ: reg_frame.reg_id = WILC_FW_PROBE_REQ_IDX; break; case IEEE80211_STYPE_AUTH: reg_frame.reg_id = WILC_FW_AUTH_REQ_IDX; break; default: break; } reg_frame.frame_type = cpu_to_le16(frame_type); result = wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1); if (result) netdev_err(vif->ndev, "Failed to frame register\n"); } int wilc_add_beacon(struct wilc_vif *vif, u32 interval, u32 dtim_period, struct cfg80211_beacon_data *params) { struct wid wid; int result; u8 *cur_byte; wid.id = WID_ADD_BEACON; wid.type = WID_BIN; wid.size = params->head_len + params->tail_len + 16; wid.val = kzalloc(wid.size, GFP_KERNEL); if (!wid.val) return -ENOMEM; cur_byte = wid.val; put_unaligned_le32(interval, cur_byte); cur_byte += 4; put_unaligned_le32(dtim_period, cur_byte); cur_byte += 4; put_unaligned_le32(params->head_len, cur_byte); cur_byte += 4; if (params->head_len > 0) memcpy(cur_byte, params->head, params->head_len); cur_byte += params->head_len; put_unaligned_le32(params->tail_len, cur_byte); cur_byte += 4; if (params->tail_len > 0) memcpy(cur_byte, params->tail, params->tail_len); result = wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1); if (result) netdev_err(vif->ndev, "Failed to send add beacon\n"); kfree(wid.val); return result; } int wilc_del_beacon(struct wilc_vif *vif) { int result; struct wid wid; u8 del_beacon = 0; wid.id = WID_DEL_BEACON; wid.type = WID_CHAR; wid.size = sizeof(char); wid.val = &del_beacon; result = wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1); if (result) netdev_err(vif->ndev, "Failed to send delete beacon\n"); return result; } int wilc_add_station(struct wilc_vif *vif, const u8 *mac, struct station_parameters *params) { struct wid wid; int result; u8 *cur_byte; wid.id = WID_ADD_STA; wid.type = WID_BIN; wid.size = WILC_ADD_STA_LENGTH + params->link_sta_params.supported_rates_len; wid.val = kmalloc(wid.size, GFP_KERNEL); if (!wid.val) return -ENOMEM; cur_byte = wid.val; wilc_hif_pack_sta_param(cur_byte, mac, params); result = wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1); if (result != 0) netdev_err(vif->ndev, "Failed to send add station\n"); kfree(wid.val); return result; } int wilc_del_station(struct wilc_vif *vif, const u8 *mac_addr) { struct wid wid; int result; wid.id = WID_REMOVE_STA; wid.type = WID_BIN; wid.size = ETH_ALEN; wid.val = kzalloc(wid.size, GFP_KERNEL); if (!wid.val) return -ENOMEM; if (!mac_addr) eth_broadcast_addr(wid.val); else ether_addr_copy(wid.val, mac_addr); result = wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1); if (result) netdev_err(vif->ndev, "Failed to del station\n"); kfree(wid.val); return result; } int wilc_del_allstation(struct wilc_vif *vif, u8 mac_addr[][ETH_ALEN]) { struct wid wid; int result; int i; u8 assoc_sta = 0; struct wilc_del_all_sta del_sta; memset(&del_sta, 0x0, sizeof(del_sta)); for (i = 0; i < WILC_MAX_NUM_STA; i++) { if (!is_zero_ether_addr(mac_addr[i])) { assoc_sta++; ether_addr_copy(del_sta.mac[i], mac_addr[i]); } } if (!assoc_sta) return 0; del_sta.assoc_sta = assoc_sta; wid.id = WID_DEL_ALL_STA; wid.type = WID_STR; wid.size = (assoc_sta * ETH_ALEN) + 1; wid.val = (u8 *)&del_sta; result = wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1); if (result) netdev_err(vif->ndev, "Failed to send delete all station\n"); return result; } int wilc_edit_station(struct wilc_vif *vif, const u8 *mac, struct station_parameters *params) { struct wid wid; int result; u8 *cur_byte; wid.id = WID_EDIT_STA; wid.type = WID_BIN; wid.size = WILC_ADD_STA_LENGTH + params->link_sta_params.supported_rates_len; wid.val = kmalloc(wid.size, GFP_KERNEL); if (!wid.val) return -ENOMEM; cur_byte = wid.val; wilc_hif_pack_sta_param(cur_byte, mac, params); result = wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1); if (result) netdev_err(vif->ndev, "Failed to send edit station\n"); kfree(wid.val); return result; } int wilc_set_power_mgmt(struct wilc_vif *vif, bool enabled, u32 timeout) { struct wilc *wilc = vif->wilc; struct wid wid; int result; s8 power_mode; if (enabled) power_mode = WILC_FW_MIN_FAST_PS; else power_mode = WILC_FW_NO_POWERSAVE; wid.id = WID_POWER_MANAGEMENT; wid.val = &power_mode; wid.size = sizeof(char); result = wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1); if (result) netdev_err(vif->ndev, "Failed to send power management\n"); else wilc->power_save_mode = enabled; return result; } int wilc_setup_multicast_filter(struct wilc_vif *vif, u32 enabled, u32 count, u8 *mc_list) { int result; struct host_if_msg *msg; msg = wilc_alloc_work(vif, handle_set_mcast_filter, false); if (IS_ERR(msg)) return PTR_ERR(msg); msg->body.mc_info.enabled = enabled; msg->body.mc_info.cnt = count; msg->body.mc_info.mc_list = mc_list; result = wilc_enqueue_work(msg); if (result) { netdev_err(vif->ndev, "%s: enqueue work failed\n", __func__); kfree(msg); } return result; } int wilc_set_tx_power(struct wilc_vif *vif, u8 tx_power) { struct wid wid; wid.id = WID_TX_POWER; wid.type = WID_CHAR; wid.val = &tx_power; wid.size = sizeof(char); return wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1); } int wilc_get_tx_power(struct wilc_vif *vif, u8 *tx_power) { struct wid wid; wid.id = WID_TX_POWER; wid.type = WID_CHAR; wid.val = tx_power; wid.size = sizeof(char); return wilc_send_config_pkt(vif, WILC_GET_CFG, &wid, 1); } int wilc_set_default_mgmt_key_index(struct wilc_vif *vif, u8 index) { struct wid wid; int result; wid.id = WID_DEFAULT_MGMT_KEY_ID; wid.type = WID_CHAR; wid.size = sizeof(char); wid.val = &index; result = wilc_send_config_pkt(vif, WILC_SET_CFG, &wid, 1); if (result) netdev_err(vif->ndev, "Failed to send default mgmt key index\n"); return result; }
linux-master
drivers/net/wireless/microchip/wilc1000/hif.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2012 - 2018 Microchip Technology Inc., and its subsidiaries. * All rights reserved. */ #include "cfg80211.h" struct wilc_wfi_radiotap_hdr { struct ieee80211_radiotap_header hdr; u8 rate; } __packed; struct wilc_wfi_radiotap_cb_hdr { struct ieee80211_radiotap_header hdr; u8 rate; u8 dump; u16 tx_flags; } __packed; #define TX_RADIOTAP_PRESENT ((1 << IEEE80211_RADIOTAP_RATE) | \ (1 << IEEE80211_RADIOTAP_TX_FLAGS)) void wilc_wfi_monitor_rx(struct net_device *mon_dev, u8 *buff, u32 size) { u32 header, pkt_offset; struct sk_buff *skb = NULL; struct wilc_wfi_radiotap_hdr *hdr; struct wilc_wfi_radiotap_cb_hdr *cb_hdr; if (!mon_dev) return; if (!netif_running(mon_dev)) return; /* Get WILC header */ header = get_unaligned_le32(buff - HOST_HDR_OFFSET); /* * The packet offset field contain info about what type of management * the frame we are dealing with and ack status */ pkt_offset = FIELD_GET(WILC_PKT_HDR_OFFSET_FIELD, header); if (pkt_offset & IS_MANAGMEMENT_CALLBACK) { /* hostapd callback mgmt frame */ skb = dev_alloc_skb(size + sizeof(*cb_hdr)); if (!skb) return; skb_put_data(skb, buff, size); cb_hdr = skb_push(skb, sizeof(*cb_hdr)); memset(cb_hdr, 0, sizeof(*cb_hdr)); cb_hdr->hdr.it_version = 0; /* PKTHDR_RADIOTAP_VERSION; */ cb_hdr->hdr.it_len = cpu_to_le16(sizeof(*cb_hdr)); cb_hdr->hdr.it_present = cpu_to_le32(TX_RADIOTAP_PRESENT); cb_hdr->rate = 5; if (pkt_offset & IS_MGMT_STATUS_SUCCES) { /* success */ cb_hdr->tx_flags = IEEE80211_RADIOTAP_F_TX_RTS; } else { cb_hdr->tx_flags = IEEE80211_RADIOTAP_F_TX_FAIL; } } else { skb = dev_alloc_skb(size + sizeof(*hdr)); if (!skb) return; skb_put_data(skb, buff, size); hdr = skb_push(skb, sizeof(*hdr)); memset(hdr, 0, sizeof(struct wilc_wfi_radiotap_hdr)); hdr->hdr.it_version = 0; /* PKTHDR_RADIOTAP_VERSION; */ hdr->hdr.it_len = cpu_to_le16(sizeof(*hdr)); hdr->hdr.it_present = cpu_to_le32 (1 << IEEE80211_RADIOTAP_RATE); hdr->rate = 5; } skb->dev = mon_dev; skb_reset_mac_header(skb); skb->ip_summed = CHECKSUM_UNNECESSARY; skb->pkt_type = PACKET_OTHERHOST; skb->protocol = htons(ETH_P_802_2); memset(skb->cb, 0, sizeof(skb->cb)); netif_rx(skb); } struct tx_complete_mon_data { int size; void *buff; }; static void mgmt_tx_complete(void *priv, int status) { struct tx_complete_mon_data *pv_data = priv; /* * in case of fully hosting mode, the freeing will be done * in response to the cfg packet */ kfree(pv_data->buff); kfree(pv_data); } static int mon_mgmt_tx(struct net_device *dev, const u8 *buf, size_t len) { struct tx_complete_mon_data *mgmt_tx = NULL; if (!dev) return -EFAULT; netif_stop_queue(dev); mgmt_tx = kmalloc(sizeof(*mgmt_tx), GFP_ATOMIC); if (!mgmt_tx) return -ENOMEM; mgmt_tx->buff = kmemdup(buf, len, GFP_ATOMIC); if (!mgmt_tx->buff) { kfree(mgmt_tx); return -ENOMEM; } mgmt_tx->size = len; wilc_wlan_txq_add_mgmt_pkt(dev, mgmt_tx, mgmt_tx->buff, mgmt_tx->size, mgmt_tx_complete); netif_wake_queue(dev); return 0; } static netdev_tx_t wilc_wfi_mon_xmit(struct sk_buff *skb, struct net_device *dev) { u32 rtap_len, ret = 0; struct wilc_wfi_mon_priv *mon_priv; struct sk_buff *skb2; struct wilc_wfi_radiotap_cb_hdr *cb_hdr; u8 srcadd[ETH_ALEN]; u8 bssid[ETH_ALEN]; mon_priv = netdev_priv(dev); if (!mon_priv) return -EFAULT; rtap_len = ieee80211_get_radiotap_len(skb->data); if (skb->len < rtap_len) return -1; skb_pull(skb, rtap_len); if (skb->data[0] == 0xc0 && is_broadcast_ether_addr(&skb->data[4])) { skb2 = dev_alloc_skb(skb->len + sizeof(*cb_hdr)); if (!skb2) return -ENOMEM; skb_put_data(skb2, skb->data, skb->len); cb_hdr = skb_push(skb2, sizeof(*cb_hdr)); memset(cb_hdr, 0, sizeof(struct wilc_wfi_radiotap_cb_hdr)); cb_hdr->hdr.it_version = 0; /* PKTHDR_RADIOTAP_VERSION; */ cb_hdr->hdr.it_len = cpu_to_le16(sizeof(*cb_hdr)); cb_hdr->hdr.it_present = cpu_to_le32(TX_RADIOTAP_PRESENT); cb_hdr->rate = 5; cb_hdr->tx_flags = 0x0004; skb2->dev = dev; skb_reset_mac_header(skb2); skb2->ip_summed = CHECKSUM_UNNECESSARY; skb2->pkt_type = PACKET_OTHERHOST; skb2->protocol = htons(ETH_P_802_2); memset(skb2->cb, 0, sizeof(skb2->cb)); netif_rx(skb2); return 0; } skb->dev = mon_priv->real_ndev; ether_addr_copy(srcadd, &skb->data[10]); ether_addr_copy(bssid, &skb->data[16]); /* * Identify if data or mgmt packet, if source address and bssid * fields are equal send it to mgmt frames handler */ if (!(memcmp(srcadd, bssid, 6))) { ret = mon_mgmt_tx(mon_priv->real_ndev, skb->data, skb->len); if (ret) netdev_err(dev, "fail to mgmt tx\n"); dev_kfree_skb(skb); } else { ret = wilc_mac_xmit(skb, mon_priv->real_ndev); } return ret; } static const struct net_device_ops wilc_wfi_netdev_ops = { .ndo_start_xmit = wilc_wfi_mon_xmit, }; struct net_device *wilc_wfi_init_mon_interface(struct wilc *wl, const char *name, struct net_device *real_dev) { struct wilc_wfi_mon_priv *priv; /* If monitor interface is already initialized, return it */ if (wl->monitor_dev) return wl->monitor_dev; wl->monitor_dev = alloc_etherdev(sizeof(struct wilc_wfi_mon_priv)); if (!wl->monitor_dev) return NULL; wl->monitor_dev->type = ARPHRD_IEEE80211_RADIOTAP; strscpy(wl->monitor_dev->name, name, IFNAMSIZ); wl->monitor_dev->netdev_ops = &wilc_wfi_netdev_ops; wl->monitor_dev->needs_free_netdev = true; if (register_netdevice(wl->monitor_dev)) { netdev_err(real_dev, "register_netdevice failed\n"); free_netdev(wl->monitor_dev); return NULL; } priv = netdev_priv(wl->monitor_dev); priv->real_ndev = real_dev; return wl->monitor_dev; } void wilc_wfi_deinit_mon_interface(struct wilc *wl, bool rtnl_locked) { if (!wl->monitor_dev) return; if (rtnl_locked) unregister_netdevice(wl->monitor_dev); else unregister_netdev(wl->monitor_dev); wl->monitor_dev = NULL; }
linux-master
drivers/net/wireless/microchip/wilc1000/mon.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2012 - 2018 Microchip Technology Inc., and its subsidiaries. * All rights reserved. */ #include <linux/bitfield.h> #include "wlan_if.h" #include "wlan.h" #include "wlan_cfg.h" #include "netdev.h" enum cfg_cmd_type { CFG_BYTE_CMD = 0, CFG_HWORD_CMD = 1, CFG_WORD_CMD = 2, CFG_STR_CMD = 3, CFG_BIN_CMD = 4 }; static const struct wilc_cfg_byte g_cfg_byte[] = { {WID_STATUS, 0}, {WID_RSSI, 0}, {WID_LINKSPEED, 0}, {WID_TX_POWER, 0}, {WID_WOWLAN_TRIGGER, 0}, {WID_NIL, 0} }; static const struct wilc_cfg_hword g_cfg_hword[] = { {WID_NIL, 0} }; static const struct wilc_cfg_word g_cfg_word[] = { {WID_FAILED_COUNT, 0}, {WID_RECEIVED_FRAGMENT_COUNT, 0}, {WID_SUCCESS_FRAME_COUNT, 0}, {WID_GET_INACTIVE_TIME, 0}, {WID_NIL, 0} }; static const struct wilc_cfg_str g_cfg_str[] = { {WID_FIRMWARE_VERSION, NULL}, {WID_MAC_ADDR, NULL}, {WID_ASSOC_RES_INFO, NULL}, {WID_NIL, NULL} }; #define WILC_RESP_MSG_TYPE_CONFIG_REPLY 'R' #define WILC_RESP_MSG_TYPE_STATUS_INFO 'I' #define WILC_RESP_MSG_TYPE_NETWORK_INFO 'N' #define WILC_RESP_MSG_TYPE_SCAN_COMPLETE 'S' /******************************************** * * Configuration Functions * ********************************************/ static int wilc_wlan_cfg_set_byte(u8 *frame, u32 offset, u16 id, u8 val8) { if ((offset + 4) >= WILC_MAX_CFG_FRAME_SIZE) return 0; put_unaligned_le16(id, &frame[offset]); put_unaligned_le16(1, &frame[offset + 2]); frame[offset + 4] = val8; return 5; } static int wilc_wlan_cfg_set_hword(u8 *frame, u32 offset, u16 id, u16 val16) { if ((offset + 5) >= WILC_MAX_CFG_FRAME_SIZE) return 0; put_unaligned_le16(id, &frame[offset]); put_unaligned_le16(2, &frame[offset + 2]); put_unaligned_le16(val16, &frame[offset + 4]); return 6; } static int wilc_wlan_cfg_set_word(u8 *frame, u32 offset, u16 id, u32 val32) { if ((offset + 7) >= WILC_MAX_CFG_FRAME_SIZE) return 0; put_unaligned_le16(id, &frame[offset]); put_unaligned_le16(4, &frame[offset + 2]); put_unaligned_le32(val32, &frame[offset + 4]); return 8; } static int wilc_wlan_cfg_set_str(u8 *frame, u32 offset, u16 id, u8 *str, u32 size) { if ((offset + size + 4) >= WILC_MAX_CFG_FRAME_SIZE) return 0; put_unaligned_le16(id, &frame[offset]); put_unaligned_le16(size, &frame[offset + 2]); if (str && size != 0) memcpy(&frame[offset + 4], str, size); return (size + 4); } static int wilc_wlan_cfg_set_bin(u8 *frame, u32 offset, u16 id, u8 *b, u32 size) { u32 i; u8 checksum = 0; if ((offset + size + 5) >= WILC_MAX_CFG_FRAME_SIZE) return 0; put_unaligned_le16(id, &frame[offset]); put_unaligned_le16(size, &frame[offset + 2]); if ((b) && size != 0) { memcpy(&frame[offset + 4], b, size); for (i = 0; i < size; i++) checksum += frame[offset + i + 4]; } frame[offset + size + 4] = checksum; return (size + 5); } /******************************************** * * Configuration Response Functions * ********************************************/ static void wilc_wlan_parse_response_frame(struct wilc *wl, u8 *info, int size) { u16 wid; u32 len = 0, i = 0; struct wilc_cfg *cfg = &wl->cfg; while (size > 0) { i = 0; wid = get_unaligned_le16(info); switch (FIELD_GET(WILC_WID_TYPE, wid)) { case WID_CHAR: while (cfg->b[i].id != WID_NIL && cfg->b[i].id != wid) i++; if (cfg->b[i].id == wid) cfg->b[i].val = info[4]; len = 3; break; case WID_SHORT: while (cfg->hw[i].id != WID_NIL && cfg->hw[i].id != wid) i++; if (cfg->hw[i].id == wid) cfg->hw[i].val = get_unaligned_le16(&info[4]); len = 4; break; case WID_INT: while (cfg->w[i].id != WID_NIL && cfg->w[i].id != wid) i++; if (cfg->w[i].id == wid) cfg->w[i].val = get_unaligned_le32(&info[4]); len = 6; break; case WID_STR: while (cfg->s[i].id != WID_NIL && cfg->s[i].id != wid) i++; if (cfg->s[i].id == wid) memcpy(cfg->s[i].str, &info[2], get_unaligned_le16(&info[2]) + 2); len = 2 + get_unaligned_le16(&info[2]); break; default: break; } size -= (2 + len); info += (2 + len); } } static void wilc_wlan_parse_info_frame(struct wilc *wl, u8 *info) { u32 wid, len; wid = get_unaligned_le16(info); len = info[2]; if (len == 1 && wid == WID_STATUS) { int i = 0; while (wl->cfg.b[i].id != WID_NIL && wl->cfg.b[i].id != wid) i++; if (wl->cfg.b[i].id == wid) wl->cfg.b[i].val = info[3]; } } /******************************************** * * Configuration Exported Functions * ********************************************/ int wilc_wlan_cfg_set_wid(u8 *frame, u32 offset, u16 id, u8 *buf, int size) { u8 type = FIELD_GET(WILC_WID_TYPE, id); int ret = 0; switch (type) { case CFG_BYTE_CMD: if (size >= 1) ret = wilc_wlan_cfg_set_byte(frame, offset, id, *buf); break; case CFG_HWORD_CMD: if (size >= 2) ret = wilc_wlan_cfg_set_hword(frame, offset, id, *((u16 *)buf)); break; case CFG_WORD_CMD: if (size >= 4) ret = wilc_wlan_cfg_set_word(frame, offset, id, *((u32 *)buf)); break; case CFG_STR_CMD: ret = wilc_wlan_cfg_set_str(frame, offset, id, buf, size); break; case CFG_BIN_CMD: ret = wilc_wlan_cfg_set_bin(frame, offset, id, buf, size); break; } return ret; } int wilc_wlan_cfg_get_wid(u8 *frame, u32 offset, u16 id) { if ((offset + 2) >= WILC_MAX_CFG_FRAME_SIZE) return 0; put_unaligned_le16(id, &frame[offset]); return 2; } int wilc_wlan_cfg_get_val(struct wilc *wl, u16 wid, u8 *buffer, u32 buffer_size) { u8 type = FIELD_GET(WILC_WID_TYPE, wid); int i, ret = 0; struct wilc_cfg *cfg = &wl->cfg; i = 0; if (type == CFG_BYTE_CMD) { while (cfg->b[i].id != WID_NIL && cfg->b[i].id != wid) i++; if (cfg->b[i].id == wid) { memcpy(buffer, &cfg->b[i].val, 1); ret = 1; } } else if (type == CFG_HWORD_CMD) { while (cfg->hw[i].id != WID_NIL && cfg->hw[i].id != wid) i++; if (cfg->hw[i].id == wid) { memcpy(buffer, &cfg->hw[i].val, 2); ret = 2; } } else if (type == CFG_WORD_CMD) { while (cfg->w[i].id != WID_NIL && cfg->w[i].id != wid) i++; if (cfg->w[i].id == wid) { memcpy(buffer, &cfg->w[i].val, 4); ret = 4; } } else if (type == CFG_STR_CMD) { while (cfg->s[i].id != WID_NIL && cfg->s[i].id != wid) i++; if (cfg->s[i].id == wid) { u16 size = get_unaligned_le16(cfg->s[i].str); if (buffer_size >= size) { memcpy(buffer, &cfg->s[i].str[2], size); ret = size; } } } return ret; } void wilc_wlan_cfg_indicate_rx(struct wilc *wilc, u8 *frame, int size, struct wilc_cfg_rsp *rsp) { u8 msg_type; u8 msg_id; msg_type = frame[0]; msg_id = frame[1]; /* seq no */ frame += 4; size -= 4; rsp->type = 0; switch (msg_type) { case WILC_RESP_MSG_TYPE_CONFIG_REPLY: wilc_wlan_parse_response_frame(wilc, frame, size); rsp->type = WILC_CFG_RSP; rsp->seq_no = msg_id; break; case WILC_RESP_MSG_TYPE_STATUS_INFO: wilc_wlan_parse_info_frame(wilc, frame); rsp->type = WILC_CFG_RSP_STATUS; rsp->seq_no = msg_id; /* call host interface info parse as well */ wilc_gnrl_async_info_received(wilc, frame - 4, size + 4); break; case WILC_RESP_MSG_TYPE_NETWORK_INFO: wilc_network_info_received(wilc, frame - 4, size + 4); break; case WILC_RESP_MSG_TYPE_SCAN_COMPLETE: wilc_scan_complete_received(wilc, frame - 4, size + 4); break; default: rsp->seq_no = msg_id; break; } } int wilc_wlan_cfg_init(struct wilc *wl) { struct wilc_cfg_str_vals *str_vals; int i = 0; wl->cfg.b = kmemdup(g_cfg_byte, sizeof(g_cfg_byte), GFP_KERNEL); if (!wl->cfg.b) return -ENOMEM; wl->cfg.hw = kmemdup(g_cfg_hword, sizeof(g_cfg_hword), GFP_KERNEL); if (!wl->cfg.hw) goto out_b; wl->cfg.w = kmemdup(g_cfg_word, sizeof(g_cfg_word), GFP_KERNEL); if (!wl->cfg.w) goto out_hw; wl->cfg.s = kmemdup(g_cfg_str, sizeof(g_cfg_str), GFP_KERNEL); if (!wl->cfg.s) goto out_w; str_vals = kzalloc(sizeof(*str_vals), GFP_KERNEL); if (!str_vals) goto out_s; wl->cfg.str_vals = str_vals; /* store the string cfg parameters */ wl->cfg.s[i].id = WID_FIRMWARE_VERSION; wl->cfg.s[i].str = str_vals->firmware_version; i++; wl->cfg.s[i].id = WID_MAC_ADDR; wl->cfg.s[i].str = str_vals->mac_address; i++; wl->cfg.s[i].id = WID_ASSOC_RES_INFO; wl->cfg.s[i].str = str_vals->assoc_rsp; i++; wl->cfg.s[i].id = WID_NIL; wl->cfg.s[i].str = NULL; return 0; out_s: kfree(wl->cfg.s); out_w: kfree(wl->cfg.w); out_hw: kfree(wl->cfg.hw); out_b: kfree(wl->cfg.b); return -ENOMEM; } void wilc_wlan_cfg_deinit(struct wilc *wl) { kfree(wl->cfg.b); kfree(wl->cfg.hw); kfree(wl->cfg.w); kfree(wl->cfg.s); kfree(wl->cfg.str_vals); }
linux-master
drivers/net/wireless/microchip/wilc1000/wlan_cfg.c
// SPDX-License-Identifier: GPL-2.0 /* * Copyright (c) 2012 - 2018 Microchip Technology Inc., and its subsidiaries. * All rights reserved. */ #include <linux/irq.h> #include <linux/kthread.h> #include <linux/firmware.h> #include <linux/netdevice.h> #include <linux/inetdevice.h> #include "cfg80211.h" #include "wlan_cfg.h" #define WILC_MULTICAST_TABLE_SIZE 8 #define WILC_MAX_FW_VERSION_STR_SIZE 50 /* latest API version supported */ #define WILC1000_API_VER 1 #define WILC1000_FW_PREFIX "atmel/wilc1000_wifi_firmware-" #define __WILC1000_FW(api) WILC1000_FW_PREFIX #api ".bin" #define WILC1000_FW(api) __WILC1000_FW(api) static irqreturn_t isr_uh_routine(int irq, void *user_data) { struct wilc *wilc = user_data; if (wilc->close) { pr_err("Can't handle UH interrupt\n"); return IRQ_HANDLED; } return IRQ_WAKE_THREAD; } static irqreturn_t isr_bh_routine(int irq, void *userdata) { struct wilc *wilc = userdata; if (wilc->close) { pr_err("Can't handle BH interrupt\n"); return IRQ_HANDLED; } wilc_handle_isr(wilc); return IRQ_HANDLED; } static int init_irq(struct net_device *dev) { struct wilc_vif *vif = netdev_priv(dev); struct wilc *wl = vif->wilc; int ret; ret = request_threaded_irq(wl->dev_irq_num, isr_uh_routine, isr_bh_routine, IRQF_TRIGGER_FALLING | IRQF_ONESHOT, dev->name, wl); if (ret) { netdev_err(dev, "Failed to request IRQ [%d]\n", ret); return ret; } netdev_dbg(dev, "IRQ request succeeded IRQ-NUM= %d\n", wl->dev_irq_num); return 0; } static void deinit_irq(struct net_device *dev) { struct wilc_vif *vif = netdev_priv(dev); struct wilc *wilc = vif->wilc; /* Deinitialize IRQ */ if (wilc->dev_irq_num) free_irq(wilc->dev_irq_num, wilc); } void wilc_mac_indicate(struct wilc *wilc) { s8 status; wilc_wlan_cfg_get_val(wilc, WID_STATUS, &status, 1); if (wilc->mac_status == WILC_MAC_STATUS_INIT) { wilc->mac_status = status; complete(&wilc->sync_event); } else { wilc->mac_status = status; } } static struct net_device *get_if_handler(struct wilc *wilc, u8 *mac_header) { struct net_device *ndev = NULL; struct wilc_vif *vif; struct ieee80211_hdr *h = (struct ieee80211_hdr *)mac_header; list_for_each_entry_rcu(vif, &wilc->vif_list, list) { if (vif->iftype == WILC_STATION_MODE) if (ether_addr_equal_unaligned(h->addr2, vif->bssid)) { ndev = vif->ndev; goto out; } if (vif->iftype == WILC_AP_MODE) if (ether_addr_equal_unaligned(h->addr1, vif->bssid)) { ndev = vif->ndev; goto out; } } out: return ndev; } void wilc_wlan_set_bssid(struct net_device *wilc_netdev, const u8 *bssid, u8 mode) { struct wilc_vif *vif = netdev_priv(wilc_netdev); if (bssid) ether_addr_copy(vif->bssid, bssid); else eth_zero_addr(vif->bssid); vif->iftype = mode; } int wilc_wlan_get_num_conn_ifcs(struct wilc *wilc) { int srcu_idx; u8 ret_val = 0; struct wilc_vif *vif; srcu_idx = srcu_read_lock(&wilc->srcu); list_for_each_entry_rcu(vif, &wilc->vif_list, list) { if (!is_zero_ether_addr(vif->bssid)) ret_val++; } srcu_read_unlock(&wilc->srcu, srcu_idx); return ret_val; } static int wilc_txq_task(void *vp) { int ret; u32 txq_count; struct wilc *wl = vp; complete(&wl->txq_thread_started); while (1) { wait_for_completion(&wl->txq_event); if (wl->close) { complete(&wl->txq_thread_started); while (!kthread_should_stop()) schedule(); break; } do { ret = wilc_wlan_handle_txq(wl, &txq_count); if (txq_count < FLOW_CONTROL_LOWER_THRESHOLD) { int srcu_idx; struct wilc_vif *ifc; srcu_idx = srcu_read_lock(&wl->srcu); list_for_each_entry_rcu(ifc, &wl->vif_list, list) { if (ifc->mac_opened && ifc->ndev) netif_wake_queue(ifc->ndev); } srcu_read_unlock(&wl->srcu, srcu_idx); } } while (ret == WILC_VMM_ENTRY_FULL_RETRY && !wl->close); } return 0; } static int wilc_wlan_get_firmware(struct net_device *dev) { struct wilc_vif *vif = netdev_priv(dev); struct wilc *wilc = vif->wilc; int chip_id; const struct firmware *wilc_fw; int ret; chip_id = wilc_get_chipid(wilc, false); netdev_info(dev, "ChipID [%x] loading firmware [%s]\n", chip_id, WILC1000_FW(WILC1000_API_VER)); ret = request_firmware(&wilc_fw, WILC1000_FW(WILC1000_API_VER), wilc->dev); if (ret != 0) { netdev_err(dev, "%s - firmware not available\n", WILC1000_FW(WILC1000_API_VER)); return -EINVAL; } wilc->firmware = wilc_fw; return 0; } static int wilc_start_firmware(struct net_device *dev) { struct wilc_vif *vif = netdev_priv(dev); struct wilc *wilc = vif->wilc; int ret = 0; ret = wilc_wlan_start(wilc); if (ret) return ret; if (!wait_for_completion_timeout(&wilc->sync_event, msecs_to_jiffies(5000))) return -ETIME; return 0; } static int wilc1000_firmware_download(struct net_device *dev) { struct wilc_vif *vif = netdev_priv(dev); struct wilc *wilc = vif->wilc; int ret = 0; if (!wilc->firmware) { netdev_err(dev, "Firmware buffer is NULL\n"); return -ENOBUFS; } ret = wilc_wlan_firmware_download(wilc, wilc->firmware->data, wilc->firmware->size); if (ret) return ret; release_firmware(wilc->firmware); wilc->firmware = NULL; netdev_dbg(dev, "Download Succeeded\n"); return 0; } static int wilc_init_fw_config(struct net_device *dev, struct wilc_vif *vif) { struct wilc_priv *priv = &vif->priv; struct host_if_drv *hif_drv; u8 b; u16 hw; u32 w; netdev_dbg(dev, "Start configuring Firmware\n"); hif_drv = (struct host_if_drv *)priv->hif_drv; netdev_dbg(dev, "Host = %p\n", hif_drv); w = vif->iftype; cpu_to_le32s(&w); if (!wilc_wlan_cfg_set(vif, 1, WID_SET_OPERATION_MODE, (u8 *)&w, 4, 0, 0)) goto fail; b = WILC_FW_BSS_TYPE_INFRA; if (!wilc_wlan_cfg_set(vif, 0, WID_BSS_TYPE, &b, 1, 0, 0)) goto fail; b = WILC_FW_TX_RATE_AUTO; if (!wilc_wlan_cfg_set(vif, 0, WID_CURRENT_TX_RATE, &b, 1, 0, 0)) goto fail; b = WILC_FW_OPER_MODE_G_MIXED_11B_2; if (!wilc_wlan_cfg_set(vif, 0, WID_11G_OPERATING_MODE, &b, 1, 0, 0)) goto fail; b = WILC_FW_PREAMBLE_SHORT; if (!wilc_wlan_cfg_set(vif, 0, WID_PREAMBLE, &b, 1, 0, 0)) goto fail; b = WILC_FW_11N_PROT_AUTO; if (!wilc_wlan_cfg_set(vif, 0, WID_11N_PROT_MECH, &b, 1, 0, 0)) goto fail; b = WILC_FW_ACTIVE_SCAN; if (!wilc_wlan_cfg_set(vif, 0, WID_SCAN_TYPE, &b, 1, 0, 0)) goto fail; b = WILC_FW_SITE_SURVEY_OFF; if (!wilc_wlan_cfg_set(vif, 0, WID_SITE_SURVEY, &b, 1, 0, 0)) goto fail; hw = 0xffff; cpu_to_le16s(&hw); if (!wilc_wlan_cfg_set(vif, 0, WID_RTS_THRESHOLD, (u8 *)&hw, 2, 0, 0)) goto fail; hw = 2346; cpu_to_le16s(&hw); if (!wilc_wlan_cfg_set(vif, 0, WID_FRAG_THRESHOLD, (u8 *)&hw, 2, 0, 0)) goto fail; b = 0; if (!wilc_wlan_cfg_set(vif, 0, WID_BCAST_SSID, &b, 1, 0, 0)) goto fail; b = 1; if (!wilc_wlan_cfg_set(vif, 0, WID_QOS_ENABLE, &b, 1, 0, 0)) goto fail; b = WILC_FW_NO_POWERSAVE; if (!wilc_wlan_cfg_set(vif, 0, WID_POWER_MANAGEMENT, &b, 1, 0, 0)) goto fail; b = WILC_FW_SEC_NO; if (!wilc_wlan_cfg_set(vif, 0, WID_11I_MODE, &b, 1, 0, 0)) goto fail; b = WILC_FW_AUTH_OPEN_SYSTEM; if (!wilc_wlan_cfg_set(vif, 0, WID_AUTH_TYPE, &b, 1, 0, 0)) goto fail; b = 3; if (!wilc_wlan_cfg_set(vif, 0, WID_LISTEN_INTERVAL, &b, 1, 0, 0)) goto fail; b = 3; if (!wilc_wlan_cfg_set(vif, 0, WID_DTIM_PERIOD, &b, 1, 0, 0)) goto fail; b = WILC_FW_ACK_POLICY_NORMAL; if (!wilc_wlan_cfg_set(vif, 0, WID_ACK_POLICY, &b, 1, 0, 0)) goto fail; b = 0; if (!wilc_wlan_cfg_set(vif, 0, WID_USER_CONTROL_ON_TX_POWER, &b, 1, 0, 0)) goto fail; b = 48; if (!wilc_wlan_cfg_set(vif, 0, WID_TX_POWER_LEVEL_11A, &b, 1, 0, 0)) goto fail; b = 28; if (!wilc_wlan_cfg_set(vif, 0, WID_TX_POWER_LEVEL_11B, &b, 1, 0, 0)) goto fail; hw = 100; cpu_to_le16s(&hw); if (!wilc_wlan_cfg_set(vif, 0, WID_BEACON_INTERVAL, (u8 *)&hw, 2, 0, 0)) goto fail; b = WILC_FW_REKEY_POLICY_DISABLE; if (!wilc_wlan_cfg_set(vif, 0, WID_REKEY_POLICY, &b, 1, 0, 0)) goto fail; w = 84600; cpu_to_le32s(&w); if (!wilc_wlan_cfg_set(vif, 0, WID_REKEY_PERIOD, (u8 *)&w, 4, 0, 0)) goto fail; w = 500; cpu_to_le32s(&w); if (!wilc_wlan_cfg_set(vif, 0, WID_REKEY_PACKET_COUNT, (u8 *)&w, 4, 0, 0)) goto fail; b = 1; if (!wilc_wlan_cfg_set(vif, 0, WID_SHORT_SLOT_ALLOWED, &b, 1, 0, 0)) goto fail; b = WILC_FW_ERP_PROT_SELF_CTS; if (!wilc_wlan_cfg_set(vif, 0, WID_11N_ERP_PROT_TYPE, &b, 1, 0, 0)) goto fail; b = 1; if (!wilc_wlan_cfg_set(vif, 0, WID_11N_ENABLE, &b, 1, 0, 0)) goto fail; b = WILC_FW_11N_OP_MODE_HT_MIXED; if (!wilc_wlan_cfg_set(vif, 0, WID_11N_OPERATING_MODE, &b, 1, 0, 0)) goto fail; b = 1; if (!wilc_wlan_cfg_set(vif, 0, WID_11N_TXOP_PROT_DISABLE, &b, 1, 0, 0)) goto fail; b = WILC_FW_OBBS_NONHT_DETECT_PROTECT_REPORT; if (!wilc_wlan_cfg_set(vif, 0, WID_11N_OBSS_NONHT_DETECTION, &b, 1, 0, 0)) goto fail; b = WILC_FW_HT_PROT_RTS_CTS_NONHT; if (!wilc_wlan_cfg_set(vif, 0, WID_11N_HT_PROT_TYPE, &b, 1, 0, 0)) goto fail; b = 0; if (!wilc_wlan_cfg_set(vif, 0, WID_11N_RIFS_PROT_ENABLE, &b, 1, 0, 0)) goto fail; b = 7; if (!wilc_wlan_cfg_set(vif, 0, WID_11N_CURRENT_TX_MCS, &b, 1, 0, 0)) goto fail; b = 1; if (!wilc_wlan_cfg_set(vif, 0, WID_11N_IMMEDIATE_BA_ENABLED, &b, 1, 1, 1)) goto fail; return 0; fail: return -EINVAL; } static void wlan_deinitialize_threads(struct net_device *dev) { struct wilc_vif *vif = netdev_priv(dev); struct wilc *wl = vif->wilc; wl->close = 1; complete(&wl->txq_event); if (wl->txq_thread) { kthread_stop(wl->txq_thread); wl->txq_thread = NULL; } } static void wilc_wlan_deinitialize(struct net_device *dev) { struct wilc_vif *vif = netdev_priv(dev); struct wilc *wl = vif->wilc; if (!wl) { netdev_err(dev, "wl is NULL\n"); return; } if (wl->initialized) { netdev_info(dev, "Deinitializing wilc1000...\n"); if (!wl->dev_irq_num && wl->hif_func->disable_interrupt) { mutex_lock(&wl->hif_cs); wl->hif_func->disable_interrupt(wl); mutex_unlock(&wl->hif_cs); } complete(&wl->txq_event); wlan_deinitialize_threads(dev); deinit_irq(dev); wilc_wlan_stop(wl, vif); wilc_wlan_cleanup(dev); wl->initialized = false; netdev_dbg(dev, "wilc1000 deinitialization Done\n"); } else { netdev_dbg(dev, "wilc1000 is not initialized\n"); } } static int wlan_initialize_threads(struct net_device *dev) { struct wilc_vif *vif = netdev_priv(dev); struct wilc *wilc = vif->wilc; wilc->txq_thread = kthread_run(wilc_txq_task, (void *)wilc, "%s-tx", dev->name); if (IS_ERR(wilc->txq_thread)) { netdev_err(dev, "couldn't create TXQ thread\n"); wilc->close = 1; return PTR_ERR(wilc->txq_thread); } wait_for_completion(&wilc->txq_thread_started); return 0; } static int wilc_wlan_initialize(struct net_device *dev, struct wilc_vif *vif) { int ret = 0; struct wilc *wl = vif->wilc; if (!wl->initialized) { wl->mac_status = WILC_MAC_STATUS_INIT; wl->close = 0; ret = wilc_wlan_init(dev); if (ret) return ret; ret = wlan_initialize_threads(dev); if (ret) goto fail_wilc_wlan; if (wl->dev_irq_num && init_irq(dev)) { ret = -EIO; goto fail_threads; } if (!wl->dev_irq_num && wl->hif_func->enable_interrupt && wl->hif_func->enable_interrupt(wl)) { ret = -EIO; goto fail_irq_init; } ret = wilc_wlan_get_firmware(dev); if (ret) goto fail_irq_enable; ret = wilc1000_firmware_download(dev); if (ret) goto fail_irq_enable; ret = wilc_start_firmware(dev); if (ret) goto fail_irq_enable; if (wilc_wlan_cfg_get(vif, 1, WID_FIRMWARE_VERSION, 1, 0)) { int size; char firmware_ver[WILC_MAX_FW_VERSION_STR_SIZE]; size = wilc_wlan_cfg_get_val(wl, WID_FIRMWARE_VERSION, firmware_ver, sizeof(firmware_ver)); firmware_ver[size] = '\0'; netdev_dbg(dev, "Firmware Ver = %s\n", firmware_ver); } ret = wilc_init_fw_config(dev, vif); if (ret) { netdev_err(dev, "Failed to configure firmware\n"); goto fail_fw_start; } wl->initialized = true; return 0; fail_fw_start: wilc_wlan_stop(wl, vif); fail_irq_enable: if (!wl->dev_irq_num && wl->hif_func->disable_interrupt) wl->hif_func->disable_interrupt(wl); fail_irq_init: if (wl->dev_irq_num) deinit_irq(dev); fail_threads: wlan_deinitialize_threads(dev); fail_wilc_wlan: wilc_wlan_cleanup(dev); netdev_err(dev, "WLAN initialization FAILED\n"); } else { netdev_dbg(dev, "wilc1000 already initialized\n"); } return ret; } static int mac_init_fn(struct net_device *ndev) { netif_start_queue(ndev); netif_stop_queue(ndev); return 0; } static int wilc_mac_open(struct net_device *ndev) { struct wilc_vif *vif = netdev_priv(ndev); struct wilc *wl = vif->wilc; int ret = 0; struct mgmt_frame_regs mgmt_regs = {}; u8 addr[ETH_ALEN] __aligned(2); if (!wl || !wl->dev) { netdev_err(ndev, "device not ready\n"); return -ENODEV; } netdev_dbg(ndev, "MAC OPEN[%p]\n", ndev); ret = wilc_init_host_int(ndev); if (ret) return ret; ret = wilc_wlan_initialize(ndev, vif); if (ret) { wilc_deinit_host_int(ndev); return ret; } wilc_set_operation_mode(vif, wilc_get_vif_idx(vif), vif->iftype, vif->idx); if (is_valid_ether_addr(ndev->dev_addr)) { ether_addr_copy(addr, ndev->dev_addr); wilc_set_mac_address(vif, addr); } else { wilc_get_mac_address(vif, addr); eth_hw_addr_set(ndev, addr); } netdev_dbg(ndev, "Mac address: %pM\n", ndev->dev_addr); if (!is_valid_ether_addr(ndev->dev_addr)) { netdev_err(ndev, "Wrong MAC address\n"); wilc_deinit_host_int(ndev); wilc_wlan_deinitialize(ndev); return -EINVAL; } mgmt_regs.interface_stypes = vif->mgmt_reg_stypes; /* so we detect a change */ vif->mgmt_reg_stypes = 0; wilc_update_mgmt_frame_registrations(vif->ndev->ieee80211_ptr->wiphy, vif->ndev->ieee80211_ptr, &mgmt_regs); netif_wake_queue(ndev); wl->open_ifcs++; vif->mac_opened = 1; return 0; } static struct net_device_stats *mac_stats(struct net_device *dev) { struct wilc_vif *vif = netdev_priv(dev); return &vif->netstats; } static int wilc_set_mac_addr(struct net_device *dev, void *p) { int result; struct wilc_vif *vif = netdev_priv(dev); struct wilc *wilc = vif->wilc; struct sockaddr *addr = (struct sockaddr *)p; unsigned char mac_addr[ETH_ALEN]; struct wilc_vif *tmp_vif; int srcu_idx; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; if (!vif->mac_opened) { eth_commit_mac_addr_change(dev, p); return 0; } /* Verify MAC Address is not already in use: */ srcu_idx = srcu_read_lock(&wilc->srcu); list_for_each_entry_rcu(tmp_vif, &wilc->vif_list, list) { wilc_get_mac_address(tmp_vif, mac_addr); if (ether_addr_equal(addr->sa_data, mac_addr)) { if (vif != tmp_vif) { srcu_read_unlock(&wilc->srcu, srcu_idx); return -EADDRNOTAVAIL; } srcu_read_unlock(&wilc->srcu, srcu_idx); return 0; } } srcu_read_unlock(&wilc->srcu, srcu_idx); result = wilc_set_mac_address(vif, (u8 *)addr->sa_data); if (result) return result; eth_commit_mac_addr_change(dev, p); return result; } static void wilc_set_multicast_list(struct net_device *dev) { struct netdev_hw_addr *ha; struct wilc_vif *vif = netdev_priv(dev); int i; u8 *mc_list; u8 *cur_mc; if (dev->flags & IFF_PROMISC) return; if (dev->flags & IFF_ALLMULTI || dev->mc.count > WILC_MULTICAST_TABLE_SIZE) { wilc_setup_multicast_filter(vif, 0, 0, NULL); return; } if (dev->mc.count == 0) { wilc_setup_multicast_filter(vif, 1, 0, NULL); return; } mc_list = kmalloc_array(dev->mc.count, ETH_ALEN, GFP_ATOMIC); if (!mc_list) return; cur_mc = mc_list; i = 0; netdev_for_each_mc_addr(ha, dev) { memcpy(cur_mc, ha->addr, ETH_ALEN); netdev_dbg(dev, "Entry[%d]: %pM\n", i, cur_mc); i++; cur_mc += ETH_ALEN; } if (wilc_setup_multicast_filter(vif, 1, dev->mc.count, mc_list)) kfree(mc_list); } static void wilc_tx_complete(void *priv, int status) { struct tx_complete_data *pv_data = priv; dev_kfree_skb(pv_data->skb); kfree(pv_data); } netdev_tx_t wilc_mac_xmit(struct sk_buff *skb, struct net_device *ndev) { struct wilc_vif *vif = netdev_priv(ndev); struct wilc *wilc = vif->wilc; struct tx_complete_data *tx_data = NULL; int queue_count; if (skb->dev != ndev) { netdev_err(ndev, "Packet not destined to this device\n"); dev_kfree_skb(skb); return NETDEV_TX_OK; } tx_data = kmalloc(sizeof(*tx_data), GFP_ATOMIC); if (!tx_data) { dev_kfree_skb(skb); netif_wake_queue(ndev); return NETDEV_TX_OK; } tx_data->buff = skb->data; tx_data->size = skb->len; tx_data->skb = skb; vif->netstats.tx_packets++; vif->netstats.tx_bytes += tx_data->size; queue_count = wilc_wlan_txq_add_net_pkt(ndev, tx_data, tx_data->buff, tx_data->size, wilc_tx_complete); if (queue_count > FLOW_CONTROL_UPPER_THRESHOLD) { int srcu_idx; struct wilc_vif *vif; srcu_idx = srcu_read_lock(&wilc->srcu); list_for_each_entry_rcu(vif, &wilc->vif_list, list) { if (vif->mac_opened) netif_stop_queue(vif->ndev); } srcu_read_unlock(&wilc->srcu, srcu_idx); } return NETDEV_TX_OK; } static int wilc_mac_close(struct net_device *ndev) { struct wilc_vif *vif = netdev_priv(ndev); struct wilc *wl = vif->wilc; netdev_dbg(ndev, "Mac close\n"); if (wl->open_ifcs > 0) wl->open_ifcs--; else return 0; if (vif->ndev) { netif_stop_queue(vif->ndev); wilc_handle_disconnect(vif); wilc_deinit_host_int(vif->ndev); } if (wl->open_ifcs == 0) { netdev_dbg(ndev, "Deinitializing wilc1000\n"); wl->close = 1; wilc_wlan_deinitialize(ndev); } vif->mac_opened = 0; return 0; } void wilc_frmw_to_host(struct wilc *wilc, u8 *buff, u32 size, u32 pkt_offset) { unsigned int frame_len = 0; int stats; unsigned char *buff_to_send = NULL; struct sk_buff *skb; struct net_device *wilc_netdev; struct wilc_vif *vif; if (!wilc) return; wilc_netdev = get_if_handler(wilc, buff); if (!wilc_netdev) return; buff += pkt_offset; vif = netdev_priv(wilc_netdev); if (size > 0) { frame_len = size; buff_to_send = buff; skb = dev_alloc_skb(frame_len); if (!skb) return; skb->dev = wilc_netdev; skb_put_data(skb, buff_to_send, frame_len); skb->protocol = eth_type_trans(skb, wilc_netdev); vif->netstats.rx_packets++; vif->netstats.rx_bytes += frame_len; skb->ip_summed = CHECKSUM_UNNECESSARY; stats = netif_rx(skb); netdev_dbg(wilc_netdev, "netif_rx ret value is: %d\n", stats); } } void wilc_wfi_mgmt_rx(struct wilc *wilc, u8 *buff, u32 size, bool is_auth) { int srcu_idx; struct wilc_vif *vif; srcu_idx = srcu_read_lock(&wilc->srcu); list_for_each_entry_rcu(vif, &wilc->vif_list, list) { struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)buff; u16 type = le16_to_cpup((__le16 *)buff); u32 type_bit = BIT(type >> 4); u32 auth_bit = BIT(IEEE80211_STYPE_AUTH >> 4); if ((vif->mgmt_reg_stypes & auth_bit && ieee80211_is_auth(mgmt->frame_control)) && vif->iftype == WILC_STATION_MODE && is_auth) { wilc_wfi_mgmt_frame_rx(vif, buff, size); break; } if (vif->priv.p2p_listen_state && vif->mgmt_reg_stypes & type_bit) wilc_wfi_p2p_rx(vif, buff, size); if (vif->monitor_flag) wilc_wfi_monitor_rx(wilc->monitor_dev, buff, size); } srcu_read_unlock(&wilc->srcu, srcu_idx); } static const struct net_device_ops wilc_netdev_ops = { .ndo_init = mac_init_fn, .ndo_open = wilc_mac_open, .ndo_stop = wilc_mac_close, .ndo_set_mac_address = wilc_set_mac_addr, .ndo_start_xmit = wilc_mac_xmit, .ndo_get_stats = mac_stats, .ndo_set_rx_mode = wilc_set_multicast_list, }; void wilc_netdev_cleanup(struct wilc *wilc) { struct wilc_vif *vif; int srcu_idx, ifc_cnt = 0; if (!wilc) return; if (wilc->firmware) { release_firmware(wilc->firmware); wilc->firmware = NULL; } srcu_idx = srcu_read_lock(&wilc->srcu); list_for_each_entry_rcu(vif, &wilc->vif_list, list) { if (vif->ndev) unregister_netdev(vif->ndev); } srcu_read_unlock(&wilc->srcu, srcu_idx); wilc_wfi_deinit_mon_interface(wilc, false); destroy_workqueue(wilc->hif_workqueue); while (ifc_cnt < WILC_NUM_CONCURRENT_IFC) { mutex_lock(&wilc->vif_mutex); if (wilc->vif_num <= 0) { mutex_unlock(&wilc->vif_mutex); break; } vif = wilc_get_wl_to_vif(wilc); if (!IS_ERR(vif)) list_del_rcu(&vif->list); wilc->vif_num--; mutex_unlock(&wilc->vif_mutex); synchronize_srcu(&wilc->srcu); ifc_cnt++; } wilc_wlan_cfg_deinit(wilc); wlan_deinit_locks(wilc); wiphy_unregister(wilc->wiphy); wiphy_free(wilc->wiphy); } EXPORT_SYMBOL_GPL(wilc_netdev_cleanup); static u8 wilc_get_available_idx(struct wilc *wl) { int idx = 0; struct wilc_vif *vif; int srcu_idx; srcu_idx = srcu_read_lock(&wl->srcu); list_for_each_entry_rcu(vif, &wl->vif_list, list) { if (vif->idx == 0) idx = 1; else idx = 0; } srcu_read_unlock(&wl->srcu, srcu_idx); return idx; } struct wilc_vif *wilc_netdev_ifc_init(struct wilc *wl, const char *name, int vif_type, enum nl80211_iftype type, bool rtnl_locked) { struct net_device *ndev; struct wilc_vif *vif; int ret; ndev = alloc_etherdev(sizeof(*vif)); if (!ndev) return ERR_PTR(-ENOMEM); vif = netdev_priv(ndev); ndev->ieee80211_ptr = &vif->priv.wdev; strcpy(ndev->name, name); vif->wilc = wl; vif->ndev = ndev; ndev->ml_priv = vif; ndev->netdev_ops = &wilc_netdev_ops; SET_NETDEV_DEV(ndev, wiphy_dev(wl->wiphy)); vif->priv.wdev.wiphy = wl->wiphy; vif->priv.wdev.netdev = ndev; vif->priv.wdev.iftype = type; vif->priv.dev = ndev; if (rtnl_locked) ret = cfg80211_register_netdevice(ndev); else ret = register_netdev(ndev); if (ret) { ret = -EFAULT; goto error; } wl->hif_workqueue = alloc_ordered_workqueue("%s-wq", WQ_MEM_RECLAIM, ndev->name); if (!wl->hif_workqueue) { ret = -ENOMEM; goto unregister_netdev; } ndev->needs_free_netdev = true; vif->iftype = vif_type; vif->idx = wilc_get_available_idx(wl); vif->mac_opened = 0; mutex_lock(&wl->vif_mutex); list_add_tail_rcu(&vif->list, &wl->vif_list); wl->vif_num += 1; mutex_unlock(&wl->vif_mutex); synchronize_srcu(&wl->srcu); return vif; unregister_netdev: if (rtnl_locked) cfg80211_unregister_netdevice(ndev); else unregister_netdev(ndev); error: free_netdev(ndev); return ERR_PTR(ret); } MODULE_LICENSE("GPL"); MODULE_FIRMWARE(WILC1000_FW(WILC1000_API_VER));
linux-master
drivers/net/wireless/microchip/wilc1000/netdev.c
// SPDX-License-Identifier: GPL-2.0 /* drivers/net/wireless/virt_wifi.c * * A fake implementation of cfg80211_ops that can be tacked on to an ethernet * net_device to make it appear as a wireless connection. * * Copyright (C) 2018 Google, Inc. * * Author: [email protected] */ #include <net/cfg80211.h> #include <net/rtnetlink.h> #include <linux/etherdevice.h> #include <linux/math64.h> #include <linux/module.h> static struct wiphy *common_wiphy; struct virt_wifi_wiphy_priv { struct delayed_work scan_result; struct cfg80211_scan_request *scan_request; bool being_deleted; }; static struct ieee80211_channel channel_2ghz = { .band = NL80211_BAND_2GHZ, .center_freq = 2432, .hw_value = 2432, .max_power = 20, }; static struct ieee80211_rate bitrates_2ghz[] = { { .bitrate = 10 }, { .bitrate = 20 }, { .bitrate = 55 }, { .bitrate = 110 }, { .bitrate = 60 }, { .bitrate = 120 }, { .bitrate = 240 }, }; static struct ieee80211_supported_band band_2ghz = { .channels = &channel_2ghz, .bitrates = bitrates_2ghz, .band = NL80211_BAND_2GHZ, .n_channels = 1, .n_bitrates = ARRAY_SIZE(bitrates_2ghz), .ht_cap = { .ht_supported = true, .cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 | IEEE80211_HT_CAP_GRN_FLD | IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_DSSSCCK40, .ampdu_factor = 0x3, .ampdu_density = 0x6, .mcs = { .rx_mask = {0xff, 0xff}, .tx_params = IEEE80211_HT_MCS_TX_DEFINED, }, }, }; static struct ieee80211_channel channel_5ghz = { .band = NL80211_BAND_5GHZ, .center_freq = 5240, .hw_value = 5240, .max_power = 20, }; static struct ieee80211_rate bitrates_5ghz[] = { { .bitrate = 60 }, { .bitrate = 120 }, { .bitrate = 240 }, }; #define RX_MCS_MAP (IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 | \ IEEE80211_VHT_MCS_SUPPORT_0_9 << 2 | \ IEEE80211_VHT_MCS_SUPPORT_0_9 << 4 | \ IEEE80211_VHT_MCS_SUPPORT_0_9 << 6 | \ IEEE80211_VHT_MCS_SUPPORT_0_9 << 8 | \ IEEE80211_VHT_MCS_SUPPORT_0_9 << 10 | \ IEEE80211_VHT_MCS_SUPPORT_0_9 << 12 | \ IEEE80211_VHT_MCS_SUPPORT_0_9 << 14) #define TX_MCS_MAP (IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 | \ IEEE80211_VHT_MCS_SUPPORT_0_9 << 2 | \ IEEE80211_VHT_MCS_SUPPORT_0_9 << 4 | \ IEEE80211_VHT_MCS_SUPPORT_0_9 << 6 | \ IEEE80211_VHT_MCS_SUPPORT_0_9 << 8 | \ IEEE80211_VHT_MCS_SUPPORT_0_9 << 10 | \ IEEE80211_VHT_MCS_SUPPORT_0_9 << 12 | \ IEEE80211_VHT_MCS_SUPPORT_0_9 << 14) static struct ieee80211_supported_band band_5ghz = { .channels = &channel_5ghz, .bitrates = bitrates_5ghz, .band = NL80211_BAND_5GHZ, .n_channels = 1, .n_bitrates = ARRAY_SIZE(bitrates_5ghz), .ht_cap = { .ht_supported = true, .cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 | IEEE80211_HT_CAP_GRN_FLD | IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_DSSSCCK40, .ampdu_factor = 0x3, .ampdu_density = 0x6, .mcs = { .rx_mask = {0xff, 0xff}, .tx_params = IEEE80211_HT_MCS_TX_DEFINED, }, }, .vht_cap = { .vht_supported = true, .cap = IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 | IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ | IEEE80211_VHT_CAP_RXLDPC | IEEE80211_VHT_CAP_SHORT_GI_80 | IEEE80211_VHT_CAP_SHORT_GI_160 | IEEE80211_VHT_CAP_TXSTBC | IEEE80211_VHT_CAP_RXSTBC_1 | IEEE80211_VHT_CAP_RXSTBC_2 | IEEE80211_VHT_CAP_RXSTBC_3 | IEEE80211_VHT_CAP_RXSTBC_4 | IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK, .vht_mcs = { .rx_mcs_map = cpu_to_le16(RX_MCS_MAP), .tx_mcs_map = cpu_to_le16(TX_MCS_MAP), } }, }; /* Assigned at module init. Guaranteed locally-administered and unicast. */ static u8 fake_router_bssid[ETH_ALEN] __ro_after_init = {}; static void virt_wifi_inform_bss(struct wiphy *wiphy) { u64 tsf = div_u64(ktime_get_boottime_ns(), 1000); struct cfg80211_bss *informed_bss; static const struct { u8 tag; u8 len; u8 ssid[8]; } __packed ssid = { .tag = WLAN_EID_SSID, .len = 8, .ssid = "VirtWifi", }; informed_bss = cfg80211_inform_bss(wiphy, &channel_5ghz, CFG80211_BSS_FTYPE_PRESP, fake_router_bssid, tsf, WLAN_CAPABILITY_ESS, 0, (void *)&ssid, sizeof(ssid), DBM_TO_MBM(-50), GFP_KERNEL); cfg80211_put_bss(wiphy, informed_bss); } /* Called with the rtnl lock held. */ static int virt_wifi_scan(struct wiphy *wiphy, struct cfg80211_scan_request *request) { struct virt_wifi_wiphy_priv *priv = wiphy_priv(wiphy); wiphy_debug(wiphy, "scan\n"); if (priv->scan_request || priv->being_deleted) return -EBUSY; priv->scan_request = request; schedule_delayed_work(&priv->scan_result, HZ * 2); return 0; } /* Acquires and releases the rdev BSS lock. */ static void virt_wifi_scan_result(struct work_struct *work) { struct virt_wifi_wiphy_priv *priv = container_of(work, struct virt_wifi_wiphy_priv, scan_result.work); struct wiphy *wiphy = priv_to_wiphy(priv); struct cfg80211_scan_info scan_info = { .aborted = false }; virt_wifi_inform_bss(wiphy); /* Schedules work which acquires and releases the rtnl lock. */ cfg80211_scan_done(priv->scan_request, &scan_info); priv->scan_request = NULL; } /* May acquire and release the rdev BSS lock. */ static void virt_wifi_cancel_scan(struct wiphy *wiphy) { struct virt_wifi_wiphy_priv *priv = wiphy_priv(wiphy); cancel_delayed_work_sync(&priv->scan_result); /* Clean up dangling callbacks if necessary. */ if (priv->scan_request) { struct cfg80211_scan_info scan_info = { .aborted = true }; /* Schedules work which acquires and releases the rtnl lock. */ cfg80211_scan_done(priv->scan_request, &scan_info); priv->scan_request = NULL; } } struct virt_wifi_netdev_priv { struct delayed_work connect; struct net_device *lowerdev; struct net_device *upperdev; u32 tx_packets; u32 tx_failed; u8 connect_requested_bss[ETH_ALEN]; bool is_up; bool is_connected; bool being_deleted; }; /* Called with the rtnl lock held. */ static int virt_wifi_connect(struct wiphy *wiphy, struct net_device *netdev, struct cfg80211_connect_params *sme) { struct virt_wifi_netdev_priv *priv = netdev_priv(netdev); bool could_schedule; if (priv->being_deleted || !priv->is_up) return -EBUSY; could_schedule = schedule_delayed_work(&priv->connect, HZ * 2); if (!could_schedule) return -EBUSY; if (sme->bssid) { ether_addr_copy(priv->connect_requested_bss, sme->bssid); } else { virt_wifi_inform_bss(wiphy); eth_zero_addr(priv->connect_requested_bss); } wiphy_debug(wiphy, "connect\n"); return 0; } /* Acquires and releases the rdev event lock. */ static void virt_wifi_connect_complete(struct work_struct *work) { struct virt_wifi_netdev_priv *priv = container_of(work, struct virt_wifi_netdev_priv, connect.work); u8 *requested_bss = priv->connect_requested_bss; bool right_addr = ether_addr_equal(requested_bss, fake_router_bssid); u16 status = WLAN_STATUS_SUCCESS; if (is_zero_ether_addr(requested_bss)) requested_bss = NULL; if (!priv->is_up || (requested_bss && !right_addr)) status = WLAN_STATUS_UNSPECIFIED_FAILURE; else priv->is_connected = true; /* Schedules an event that acquires the rtnl lock. */ cfg80211_connect_result(priv->upperdev, requested_bss, NULL, 0, NULL, 0, status, GFP_KERNEL); netif_carrier_on(priv->upperdev); } /* May acquire and release the rdev event lock. */ static void virt_wifi_cancel_connect(struct net_device *netdev) { struct virt_wifi_netdev_priv *priv = netdev_priv(netdev); /* If there is work pending, clean up dangling callbacks. */ if (cancel_delayed_work_sync(&priv->connect)) { /* Schedules an event that acquires the rtnl lock. */ cfg80211_connect_result(priv->upperdev, priv->connect_requested_bss, NULL, 0, NULL, 0, WLAN_STATUS_UNSPECIFIED_FAILURE, GFP_KERNEL); } } /* Called with the rtnl lock held. Acquires the rdev event lock. */ static int virt_wifi_disconnect(struct wiphy *wiphy, struct net_device *netdev, u16 reason_code) { struct virt_wifi_netdev_priv *priv = netdev_priv(netdev); if (priv->being_deleted) return -EBUSY; wiphy_debug(wiphy, "disconnect\n"); virt_wifi_cancel_connect(netdev); cfg80211_disconnected(netdev, reason_code, NULL, 0, true, GFP_KERNEL); priv->is_connected = false; netif_carrier_off(netdev); return 0; } /* Called with the rtnl lock held. */ static int virt_wifi_get_station(struct wiphy *wiphy, struct net_device *dev, const u8 *mac, struct station_info *sinfo) { struct virt_wifi_netdev_priv *priv = netdev_priv(dev); wiphy_debug(wiphy, "get_station\n"); if (!priv->is_connected || !ether_addr_equal(mac, fake_router_bssid)) return -ENOENT; sinfo->filled = BIT_ULL(NL80211_STA_INFO_TX_PACKETS) | BIT_ULL(NL80211_STA_INFO_TX_FAILED) | BIT_ULL(NL80211_STA_INFO_SIGNAL) | BIT_ULL(NL80211_STA_INFO_TX_BITRATE); sinfo->tx_packets = priv->tx_packets; sinfo->tx_failed = priv->tx_failed; /* For CFG80211_SIGNAL_TYPE_MBM, value is expressed in _dBm_ */ sinfo->signal = -50; sinfo->txrate = (struct rate_info) { .legacy = 10, /* units are 100kbit/s */ }; return 0; } /* Called with the rtnl lock held. */ static int virt_wifi_dump_station(struct wiphy *wiphy, struct net_device *dev, int idx, u8 *mac, struct station_info *sinfo) { struct virt_wifi_netdev_priv *priv = netdev_priv(dev); wiphy_debug(wiphy, "dump_station\n"); if (idx != 0 || !priv->is_connected) return -ENOENT; ether_addr_copy(mac, fake_router_bssid); return virt_wifi_get_station(wiphy, dev, fake_router_bssid, sinfo); } static const struct cfg80211_ops virt_wifi_cfg80211_ops = { .scan = virt_wifi_scan, .connect = virt_wifi_connect, .disconnect = virt_wifi_disconnect, .get_station = virt_wifi_get_station, .dump_station = virt_wifi_dump_station, }; /* Acquires and releases the rtnl lock. */ static struct wiphy *virt_wifi_make_wiphy(void) { struct wiphy *wiphy; struct virt_wifi_wiphy_priv *priv; int err; wiphy = wiphy_new(&virt_wifi_cfg80211_ops, sizeof(*priv)); if (!wiphy) return NULL; wiphy->max_scan_ssids = 4; wiphy->max_scan_ie_len = 1000; wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM; wiphy->bands[NL80211_BAND_2GHZ] = &band_2ghz; wiphy->bands[NL80211_BAND_5GHZ] = &band_5ghz; wiphy->bands[NL80211_BAND_60GHZ] = NULL; wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); priv = wiphy_priv(wiphy); priv->being_deleted = false; priv->scan_request = NULL; INIT_DELAYED_WORK(&priv->scan_result, virt_wifi_scan_result); err = wiphy_register(wiphy); if (err < 0) { wiphy_free(wiphy); return NULL; } return wiphy; } /* Acquires and releases the rtnl lock. */ static void virt_wifi_destroy_wiphy(struct wiphy *wiphy) { struct virt_wifi_wiphy_priv *priv; WARN(!wiphy, "%s called with null wiphy", __func__); if (!wiphy) return; priv = wiphy_priv(wiphy); priv->being_deleted = true; virt_wifi_cancel_scan(wiphy); if (wiphy->registered) wiphy_unregister(wiphy); wiphy_free(wiphy); } /* Enters and exits a RCU-bh critical section. */ static netdev_tx_t virt_wifi_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct virt_wifi_netdev_priv *priv = netdev_priv(dev); priv->tx_packets++; if (!priv->is_connected) { priv->tx_failed++; return NET_XMIT_DROP; } skb->dev = priv->lowerdev; return dev_queue_xmit(skb); } /* Called with rtnl lock held. */ static int virt_wifi_net_device_open(struct net_device *dev) { struct virt_wifi_netdev_priv *priv = netdev_priv(dev); priv->is_up = true; return 0; } /* Called with rtnl lock held. */ static int virt_wifi_net_device_stop(struct net_device *dev) { struct virt_wifi_netdev_priv *n_priv = netdev_priv(dev); n_priv->is_up = false; if (!dev->ieee80211_ptr) return 0; virt_wifi_cancel_scan(dev->ieee80211_ptr->wiphy); virt_wifi_cancel_connect(dev); netif_carrier_off(dev); return 0; } static int virt_wifi_net_device_get_iflink(const struct net_device *dev) { struct virt_wifi_netdev_priv *priv = netdev_priv(dev); return priv->lowerdev->ifindex; } static const struct net_device_ops virt_wifi_ops = { .ndo_start_xmit = virt_wifi_start_xmit, .ndo_open = virt_wifi_net_device_open, .ndo_stop = virt_wifi_net_device_stop, .ndo_get_iflink = virt_wifi_net_device_get_iflink, }; /* Invoked as part of rtnl lock release. */ static void virt_wifi_net_device_destructor(struct net_device *dev) { /* Delayed past dellink to allow nl80211 to react to the device being * deleted. */ kfree(dev->ieee80211_ptr); dev->ieee80211_ptr = NULL; } /* No lock interaction. */ static void virt_wifi_setup(struct net_device *dev) { ether_setup(dev); dev->netdev_ops = &virt_wifi_ops; dev->needs_free_netdev = true; } /* Called in a RCU read critical section from netif_receive_skb */ static rx_handler_result_t virt_wifi_rx_handler(struct sk_buff **pskb) { struct sk_buff *skb = *pskb; struct virt_wifi_netdev_priv *priv = rcu_dereference(skb->dev->rx_handler_data); if (!priv->is_connected) return RX_HANDLER_PASS; /* GFP_ATOMIC because this is a packet interrupt handler. */ skb = skb_share_check(skb, GFP_ATOMIC); if (!skb) { dev_err(&priv->upperdev->dev, "can't skb_share_check\n"); return RX_HANDLER_CONSUMED; } *pskb = skb; skb->dev = priv->upperdev; skb->pkt_type = PACKET_HOST; return RX_HANDLER_ANOTHER; } /* Called with rtnl lock held. */ static int virt_wifi_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[], struct netlink_ext_ack *extack) { struct virt_wifi_netdev_priv *priv = netdev_priv(dev); int err; if (!tb[IFLA_LINK]) return -EINVAL; netif_carrier_off(dev); priv->upperdev = dev; priv->lowerdev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK])); if (!priv->lowerdev) return -ENODEV; if (!tb[IFLA_MTU]) dev->mtu = priv->lowerdev->mtu; else if (dev->mtu > priv->lowerdev->mtu) return -EINVAL; err = netdev_rx_handler_register(priv->lowerdev, virt_wifi_rx_handler, priv); if (err) { dev_err(&priv->lowerdev->dev, "can't netdev_rx_handler_register: %d\n", err); return err; } eth_hw_addr_inherit(dev, priv->lowerdev); netif_stacked_transfer_operstate(priv->lowerdev, dev); SET_NETDEV_DEV(dev, &priv->lowerdev->dev); dev->ieee80211_ptr = kzalloc(sizeof(*dev->ieee80211_ptr), GFP_KERNEL); if (!dev->ieee80211_ptr) { err = -ENOMEM; goto remove_handler; } dev->ieee80211_ptr->iftype = NL80211_IFTYPE_STATION; dev->ieee80211_ptr->wiphy = common_wiphy; err = register_netdevice(dev); if (err) { dev_err(&priv->lowerdev->dev, "can't register_netdevice: %d\n", err); goto free_wireless_dev; } err = netdev_upper_dev_link(priv->lowerdev, dev, extack); if (err) { dev_err(&priv->lowerdev->dev, "can't netdev_upper_dev_link: %d\n", err); goto unregister_netdev; } dev->priv_destructor = virt_wifi_net_device_destructor; priv->being_deleted = false; priv->is_connected = false; priv->is_up = false; INIT_DELAYED_WORK(&priv->connect, virt_wifi_connect_complete); __module_get(THIS_MODULE); return 0; unregister_netdev: unregister_netdevice(dev); free_wireless_dev: kfree(dev->ieee80211_ptr); dev->ieee80211_ptr = NULL; remove_handler: netdev_rx_handler_unregister(priv->lowerdev); return err; } /* Called with rtnl lock held. */ static void virt_wifi_dellink(struct net_device *dev, struct list_head *head) { struct virt_wifi_netdev_priv *priv = netdev_priv(dev); if (dev->ieee80211_ptr) virt_wifi_cancel_scan(dev->ieee80211_ptr->wiphy); priv->being_deleted = true; virt_wifi_cancel_connect(dev); netif_carrier_off(dev); netdev_rx_handler_unregister(priv->lowerdev); netdev_upper_dev_unlink(priv->lowerdev, dev); unregister_netdevice_queue(dev, head); module_put(THIS_MODULE); /* Deleting the wiphy is handled in the module destructor. */ } static struct rtnl_link_ops virt_wifi_link_ops = { .kind = "virt_wifi", .setup = virt_wifi_setup, .newlink = virt_wifi_newlink, .dellink = virt_wifi_dellink, .priv_size = sizeof(struct virt_wifi_netdev_priv), }; static bool netif_is_virt_wifi_dev(const struct net_device *dev) { return rcu_access_pointer(dev->rx_handler) == virt_wifi_rx_handler; } static int virt_wifi_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *lower_dev = netdev_notifier_info_to_dev(ptr); struct virt_wifi_netdev_priv *priv; struct net_device *upper_dev; LIST_HEAD(list_kill); if (!netif_is_virt_wifi_dev(lower_dev)) return NOTIFY_DONE; switch (event) { case NETDEV_UNREGISTER: priv = rtnl_dereference(lower_dev->rx_handler_data); if (!priv) return NOTIFY_DONE; upper_dev = priv->upperdev; upper_dev->rtnl_link_ops->dellink(upper_dev, &list_kill); unregister_netdevice_many(&list_kill); break; } return NOTIFY_DONE; } static struct notifier_block virt_wifi_notifier = { .notifier_call = virt_wifi_event, }; /* Acquires and releases the rtnl lock. */ static int __init virt_wifi_init_module(void) { int err; /* Guaranteed to be locally-administered and not multicast. */ eth_random_addr(fake_router_bssid); err = register_netdevice_notifier(&virt_wifi_notifier); if (err) return err; err = -ENOMEM; common_wiphy = virt_wifi_make_wiphy(); if (!common_wiphy) goto notifier; err = rtnl_link_register(&virt_wifi_link_ops); if (err) goto destroy_wiphy; return 0; destroy_wiphy: virt_wifi_destroy_wiphy(common_wiphy); notifier: unregister_netdevice_notifier(&virt_wifi_notifier); return err; } /* Acquires and releases the rtnl lock. */ static void __exit virt_wifi_cleanup_module(void) { /* Will delete any devices that depend on the wiphy. */ rtnl_link_unregister(&virt_wifi_link_ops); virt_wifi_destroy_wiphy(common_wiphy); unregister_netdevice_notifier(&virt_wifi_notifier); } module_init(virt_wifi_init_module); module_exit(virt_wifi_cleanup_module); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Cody Schuffelen <[email protected]>"); MODULE_DESCRIPTION("Driver for a wireless wrapper of ethernet devices"); MODULE_ALIAS_RTNL_LINK("virt_wifi");
linux-master
drivers/net/wireless/virtual/virt_wifi.c
// SPDX-License-Identifier: GPL-2.0-only /* * mac80211_hwsim - software simulator of 802.11 radio(s) for mac80211 * Copyright (c) 2008, Jouni Malinen <[email protected]> * Copyright (c) 2011, Javier Lopez <[email protected]> * Copyright (c) 2016 - 2017 Intel Deutschland GmbH * Copyright (C) 2018 - 2023 Intel Corporation */ /* * TODO: * - Add TSF sync and fix IBSS beacon transmission by adding * competition for "air time" at TBTT * - RX filtering based on filter configuration (data->rx_filter) */ #include <linux/list.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <net/dst.h> #include <net/xfrm.h> #include <net/mac80211.h> #include <net/ieee80211_radiotap.h> #include <linux/if_arp.h> #include <linux/rtnetlink.h> #include <linux/etherdevice.h> #include <linux/platform_device.h> #include <linux/debugfs.h> #include <linux/module.h> #include <linux/ktime.h> #include <net/genetlink.h> #include <net/net_namespace.h> #include <net/netns/generic.h> #include <linux/rhashtable.h> #include <linux/nospec.h> #include <linux/virtio.h> #include <linux/virtio_ids.h> #include <linux/virtio_config.h> #include "mac80211_hwsim.h" #define WARN_QUEUE 100 #define MAX_QUEUE 200 MODULE_AUTHOR("Jouni Malinen"); MODULE_DESCRIPTION("Software simulator of 802.11 radio(s) for mac80211"); MODULE_LICENSE("GPL"); static int radios = 2; module_param(radios, int, 0444); MODULE_PARM_DESC(radios, "Number of simulated radios"); static int channels = 1; module_param(channels, int, 0444); MODULE_PARM_DESC(channels, "Number of concurrent channels"); static bool paged_rx = false; module_param(paged_rx, bool, 0644); MODULE_PARM_DESC(paged_rx, "Use paged SKBs for RX instead of linear ones"); static bool rctbl = false; module_param(rctbl, bool, 0444); MODULE_PARM_DESC(rctbl, "Handle rate control table"); static bool support_p2p_device = true; module_param(support_p2p_device, bool, 0444); MODULE_PARM_DESC(support_p2p_device, "Support P2P-Device interface type"); static bool mlo; module_param(mlo, bool, 0444); MODULE_PARM_DESC(mlo, "Support MLO"); /** * enum hwsim_regtest - the type of regulatory tests we offer * * These are the different values you can use for the regtest * module parameter. This is useful to help test world roaming * and the driver regulatory_hint() call and combinations of these. * If you want to do specific alpha2 regulatory domain tests simply * use the userspace regulatory request as that will be respected as * well without the need of this module parameter. This is designed * only for testing the driver regulatory request, world roaming * and all possible combinations. * * @HWSIM_REGTEST_DISABLED: No regulatory tests are performed, * this is the default value. * @HWSIM_REGTEST_DRIVER_REG_FOLLOW: Used for testing the driver regulatory * hint, only one driver regulatory hint will be sent as such the * secondary radios are expected to follow. * @HWSIM_REGTEST_DRIVER_REG_ALL: Used for testing the driver regulatory * request with all radios reporting the same regulatory domain. * @HWSIM_REGTEST_DIFF_COUNTRY: Used for testing the drivers calling * different regulatory domains requests. Expected behaviour is for * an intersection to occur but each device will still use their * respective regulatory requested domains. Subsequent radios will * use the resulting intersection. * @HWSIM_REGTEST_WORLD_ROAM: Used for testing the world roaming. We accomplish * this by using a custom beacon-capable regulatory domain for the first * radio. All other device world roam. * @HWSIM_REGTEST_CUSTOM_WORLD: Used for testing the custom world regulatory * domain requests. All radios will adhere to this custom world regulatory * domain. * @HWSIM_REGTEST_CUSTOM_WORLD_2: Used for testing 2 custom world regulatory * domain requests. The first radio will adhere to the first custom world * regulatory domain, the second one to the second custom world regulatory * domain. All other devices will world roam. * @HWSIM_REGTEST_STRICT_FOLLOW: Used for testing strict regulatory domain * settings, only the first radio will send a regulatory domain request * and use strict settings. The rest of the radios are expected to follow. * @HWSIM_REGTEST_STRICT_ALL: Used for testing strict regulatory domain * settings. All radios will adhere to this. * @HWSIM_REGTEST_STRICT_AND_DRIVER_REG: Used for testing strict regulatory * domain settings, combined with secondary driver regulatory domain * settings. The first radio will get a strict regulatory domain setting * using the first driver regulatory request and the second radio will use * non-strict settings using the second driver regulatory request. All * other devices should follow the intersection created between the * first two. * @HWSIM_REGTEST_ALL: Used for testing every possible mix. You will need * at least 6 radios for a complete test. We will test in this order: * 1 - driver custom world regulatory domain * 2 - second custom world regulatory domain * 3 - first driver regulatory domain request * 4 - second driver regulatory domain request * 5 - strict regulatory domain settings using the third driver regulatory * domain request * 6 and on - should follow the intersection of the 3rd, 4rth and 5th radio * regulatory requests. */ enum hwsim_regtest { HWSIM_REGTEST_DISABLED = 0, HWSIM_REGTEST_DRIVER_REG_FOLLOW = 1, HWSIM_REGTEST_DRIVER_REG_ALL = 2, HWSIM_REGTEST_DIFF_COUNTRY = 3, HWSIM_REGTEST_WORLD_ROAM = 4, HWSIM_REGTEST_CUSTOM_WORLD = 5, HWSIM_REGTEST_CUSTOM_WORLD_2 = 6, HWSIM_REGTEST_STRICT_FOLLOW = 7, HWSIM_REGTEST_STRICT_ALL = 8, HWSIM_REGTEST_STRICT_AND_DRIVER_REG = 9, HWSIM_REGTEST_ALL = 10, }; /* Set to one of the HWSIM_REGTEST_* values above */ static int regtest = HWSIM_REGTEST_DISABLED; module_param(regtest, int, 0444); MODULE_PARM_DESC(regtest, "The type of regulatory test we want to run"); static const char *hwsim_alpha2s[] = { "FI", "AL", "US", "DE", "JP", "AL", }; static const struct ieee80211_regdomain hwsim_world_regdom_custom_01 = { .n_reg_rules = 5, .alpha2 = "99", .reg_rules = { REG_RULE(2412-10, 2462+10, 40, 0, 20, 0), REG_RULE(2484-10, 2484+10, 40, 0, 20, 0), REG_RULE(5150-10, 5240+10, 40, 0, 30, 0), REG_RULE(5745-10, 5825+10, 40, 0, 30, 0), REG_RULE(5855-10, 5925+10, 40, 0, 33, 0), } }; static const struct ieee80211_regdomain hwsim_world_regdom_custom_02 = { .n_reg_rules = 3, .alpha2 = "99", .reg_rules = { REG_RULE(2412-10, 2462+10, 40, 0, 20, 0), REG_RULE(5725-10, 5850+10, 40, 0, 30, NL80211_RRF_NO_IR), REG_RULE(5855-10, 5925+10, 40, 0, 33, 0), } }; static const struct ieee80211_regdomain hwsim_world_regdom_custom_03 = { .n_reg_rules = 6, .alpha2 = "99", .reg_rules = { REG_RULE(2412 - 10, 2462 + 10, 40, 0, 20, 0), REG_RULE(2484 - 10, 2484 + 10, 40, 0, 20, 0), REG_RULE(5150 - 10, 5240 + 10, 40, 0, 30, 0), REG_RULE(5745 - 10, 5825 + 10, 40, 0, 30, 0), REG_RULE(5855 - 10, 5925 + 10, 40, 0, 33, 0), REG_RULE(5955 - 10, 7125 + 10, 320, 0, 33, 0), } }; static const struct ieee80211_regdomain *hwsim_world_regdom_custom[] = { &hwsim_world_regdom_custom_01, &hwsim_world_regdom_custom_02, &hwsim_world_regdom_custom_03, }; struct hwsim_vif_priv { u32 magic; u8 bssid[ETH_ALEN]; bool assoc; bool bcn_en; u16 aid; }; #define HWSIM_VIF_MAGIC 0x69537748 static inline void hwsim_check_magic(struct ieee80211_vif *vif) { struct hwsim_vif_priv *vp = (void *)vif->drv_priv; WARN(vp->magic != HWSIM_VIF_MAGIC, "Invalid VIF (%p) magic %#x, %pM, %d/%d\n", vif, vp->magic, vif->addr, vif->type, vif->p2p); } static inline void hwsim_set_magic(struct ieee80211_vif *vif) { struct hwsim_vif_priv *vp = (void *)vif->drv_priv; vp->magic = HWSIM_VIF_MAGIC; } static inline void hwsim_clear_magic(struct ieee80211_vif *vif) { struct hwsim_vif_priv *vp = (void *)vif->drv_priv; vp->magic = 0; } struct hwsim_sta_priv { u32 magic; unsigned int last_link; u16 active_links_rx; }; #define HWSIM_STA_MAGIC 0x6d537749 static inline void hwsim_check_sta_magic(struct ieee80211_sta *sta) { struct hwsim_sta_priv *sp = (void *)sta->drv_priv; WARN_ON(sp->magic != HWSIM_STA_MAGIC); } static inline void hwsim_set_sta_magic(struct ieee80211_sta *sta) { struct hwsim_sta_priv *sp = (void *)sta->drv_priv; sp->magic = HWSIM_STA_MAGIC; } static inline void hwsim_clear_sta_magic(struct ieee80211_sta *sta) { struct hwsim_sta_priv *sp = (void *)sta->drv_priv; sp->magic = 0; } struct hwsim_chanctx_priv { u32 magic; }; #define HWSIM_CHANCTX_MAGIC 0x6d53774a static inline void hwsim_check_chanctx_magic(struct ieee80211_chanctx_conf *c) { struct hwsim_chanctx_priv *cp = (void *)c->drv_priv; WARN_ON(cp->magic != HWSIM_CHANCTX_MAGIC); } static inline void hwsim_set_chanctx_magic(struct ieee80211_chanctx_conf *c) { struct hwsim_chanctx_priv *cp = (void *)c->drv_priv; cp->magic = HWSIM_CHANCTX_MAGIC; } static inline void hwsim_clear_chanctx_magic(struct ieee80211_chanctx_conf *c) { struct hwsim_chanctx_priv *cp = (void *)c->drv_priv; cp->magic = 0; } static unsigned int hwsim_net_id; static DEFINE_IDA(hwsim_netgroup_ida); struct hwsim_net { int netgroup; u32 wmediumd; }; static inline int hwsim_net_get_netgroup(struct net *net) { struct hwsim_net *hwsim_net = net_generic(net, hwsim_net_id); return hwsim_net->netgroup; } static inline int hwsim_net_set_netgroup(struct net *net) { struct hwsim_net *hwsim_net = net_generic(net, hwsim_net_id); hwsim_net->netgroup = ida_alloc(&hwsim_netgroup_ida, GFP_KERNEL); return hwsim_net->netgroup >= 0 ? 0 : -ENOMEM; } static inline u32 hwsim_net_get_wmediumd(struct net *net) { struct hwsim_net *hwsim_net = net_generic(net, hwsim_net_id); return hwsim_net->wmediumd; } static inline void hwsim_net_set_wmediumd(struct net *net, u32 portid) { struct hwsim_net *hwsim_net = net_generic(net, hwsim_net_id); hwsim_net->wmediumd = portid; } static struct class *hwsim_class; static struct net_device *hwsim_mon; /* global monitor netdev */ #define CHAN2G(_freq) { \ .band = NL80211_BAND_2GHZ, \ .center_freq = (_freq), \ .hw_value = (_freq), \ } #define CHAN5G(_freq) { \ .band = NL80211_BAND_5GHZ, \ .center_freq = (_freq), \ .hw_value = (_freq), \ } #define CHAN6G(_freq) { \ .band = NL80211_BAND_6GHZ, \ .center_freq = (_freq), \ .hw_value = (_freq), \ } static const struct ieee80211_channel hwsim_channels_2ghz[] = { CHAN2G(2412), /* Channel 1 */ CHAN2G(2417), /* Channel 2 */ CHAN2G(2422), /* Channel 3 */ CHAN2G(2427), /* Channel 4 */ CHAN2G(2432), /* Channel 5 */ CHAN2G(2437), /* Channel 6 */ CHAN2G(2442), /* Channel 7 */ CHAN2G(2447), /* Channel 8 */ CHAN2G(2452), /* Channel 9 */ CHAN2G(2457), /* Channel 10 */ CHAN2G(2462), /* Channel 11 */ CHAN2G(2467), /* Channel 12 */ CHAN2G(2472), /* Channel 13 */ CHAN2G(2484), /* Channel 14 */ }; static const struct ieee80211_channel hwsim_channels_5ghz[] = { CHAN5G(5180), /* Channel 36 */ CHAN5G(5200), /* Channel 40 */ CHAN5G(5220), /* Channel 44 */ CHAN5G(5240), /* Channel 48 */ CHAN5G(5260), /* Channel 52 */ CHAN5G(5280), /* Channel 56 */ CHAN5G(5300), /* Channel 60 */ CHAN5G(5320), /* Channel 64 */ CHAN5G(5500), /* Channel 100 */ CHAN5G(5520), /* Channel 104 */ CHAN5G(5540), /* Channel 108 */ CHAN5G(5560), /* Channel 112 */ CHAN5G(5580), /* Channel 116 */ CHAN5G(5600), /* Channel 120 */ CHAN5G(5620), /* Channel 124 */ CHAN5G(5640), /* Channel 128 */ CHAN5G(5660), /* Channel 132 */ CHAN5G(5680), /* Channel 136 */ CHAN5G(5700), /* Channel 140 */ CHAN5G(5745), /* Channel 149 */ CHAN5G(5765), /* Channel 153 */ CHAN5G(5785), /* Channel 157 */ CHAN5G(5805), /* Channel 161 */ CHAN5G(5825), /* Channel 165 */ CHAN5G(5845), /* Channel 169 */ CHAN5G(5855), /* Channel 171 */ CHAN5G(5860), /* Channel 172 */ CHAN5G(5865), /* Channel 173 */ CHAN5G(5870), /* Channel 174 */ CHAN5G(5875), /* Channel 175 */ CHAN5G(5880), /* Channel 176 */ CHAN5G(5885), /* Channel 177 */ CHAN5G(5890), /* Channel 178 */ CHAN5G(5895), /* Channel 179 */ CHAN5G(5900), /* Channel 180 */ CHAN5G(5905), /* Channel 181 */ CHAN5G(5910), /* Channel 182 */ CHAN5G(5915), /* Channel 183 */ CHAN5G(5920), /* Channel 184 */ CHAN5G(5925), /* Channel 185 */ }; static const struct ieee80211_channel hwsim_channels_6ghz[] = { CHAN6G(5955), /* Channel 1 */ CHAN6G(5975), /* Channel 5 */ CHAN6G(5995), /* Channel 9 */ CHAN6G(6015), /* Channel 13 */ CHAN6G(6035), /* Channel 17 */ CHAN6G(6055), /* Channel 21 */ CHAN6G(6075), /* Channel 25 */ CHAN6G(6095), /* Channel 29 */ CHAN6G(6115), /* Channel 33 */ CHAN6G(6135), /* Channel 37 */ CHAN6G(6155), /* Channel 41 */ CHAN6G(6175), /* Channel 45 */ CHAN6G(6195), /* Channel 49 */ CHAN6G(6215), /* Channel 53 */ CHAN6G(6235), /* Channel 57 */ CHAN6G(6255), /* Channel 61 */ CHAN6G(6275), /* Channel 65 */ CHAN6G(6295), /* Channel 69 */ CHAN6G(6315), /* Channel 73 */ CHAN6G(6335), /* Channel 77 */ CHAN6G(6355), /* Channel 81 */ CHAN6G(6375), /* Channel 85 */ CHAN6G(6395), /* Channel 89 */ CHAN6G(6415), /* Channel 93 */ CHAN6G(6435), /* Channel 97 */ CHAN6G(6455), /* Channel 181 */ CHAN6G(6475), /* Channel 105 */ CHAN6G(6495), /* Channel 109 */ CHAN6G(6515), /* Channel 113 */ CHAN6G(6535), /* Channel 117 */ CHAN6G(6555), /* Channel 121 */ CHAN6G(6575), /* Channel 125 */ CHAN6G(6595), /* Channel 129 */ CHAN6G(6615), /* Channel 133 */ CHAN6G(6635), /* Channel 137 */ CHAN6G(6655), /* Channel 141 */ CHAN6G(6675), /* Channel 145 */ CHAN6G(6695), /* Channel 149 */ CHAN6G(6715), /* Channel 153 */ CHAN6G(6735), /* Channel 157 */ CHAN6G(6755), /* Channel 161 */ CHAN6G(6775), /* Channel 165 */ CHAN6G(6795), /* Channel 169 */ CHAN6G(6815), /* Channel 173 */ CHAN6G(6835), /* Channel 177 */ CHAN6G(6855), /* Channel 181 */ CHAN6G(6875), /* Channel 185 */ CHAN6G(6895), /* Channel 189 */ CHAN6G(6915), /* Channel 193 */ CHAN6G(6935), /* Channel 197 */ CHAN6G(6955), /* Channel 201 */ CHAN6G(6975), /* Channel 205 */ CHAN6G(6995), /* Channel 209 */ CHAN6G(7015), /* Channel 213 */ CHAN6G(7035), /* Channel 217 */ CHAN6G(7055), /* Channel 221 */ CHAN6G(7075), /* Channel 225 */ CHAN6G(7095), /* Channel 229 */ CHAN6G(7115), /* Channel 233 */ }; #define NUM_S1G_CHANS_US 51 static struct ieee80211_channel hwsim_channels_s1g[NUM_S1G_CHANS_US]; static const struct ieee80211_sta_s1g_cap hwsim_s1g_cap = { .s1g = true, .cap = { S1G_CAP0_SGI_1MHZ | S1G_CAP0_SGI_2MHZ, 0, 0, S1G_CAP3_MAX_MPDU_LEN, 0, S1G_CAP5_AMPDU, 0, S1G_CAP7_DUP_1MHZ, S1G_CAP8_TWT_RESPOND | S1G_CAP8_TWT_REQUEST, 0}, .nss_mcs = { 0xfc | 1, /* MCS 7 for 1 SS */ /* RX Highest Supported Long GI Data Rate 0:7 */ 0, /* RX Highest Supported Long GI Data Rate 0:7 */ /* TX S1G MCS Map 0:6 */ 0xfa, /* TX S1G MCS Map :7 */ /* TX Highest Supported Long GI Data Rate 0:6 */ 0x80, /* TX Highest Supported Long GI Data Rate 7:8 */ /* Rx Single spatial stream and S1G-MCS Map for 1MHz */ /* Tx Single spatial stream and S1G-MCS Map for 1MHz */ 0 }, }; static void hwsim_init_s1g_channels(struct ieee80211_channel *chans) { int ch, freq; for (ch = 0; ch < NUM_S1G_CHANS_US; ch++) { freq = 902000 + (ch + 1) * 500; chans[ch].band = NL80211_BAND_S1GHZ; chans[ch].center_freq = KHZ_TO_MHZ(freq); chans[ch].freq_offset = freq % 1000; chans[ch].hw_value = ch + 1; } } static const struct ieee80211_rate hwsim_rates[] = { { .bitrate = 10 }, { .bitrate = 20, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, { .bitrate = 55, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, { .bitrate = 110, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, { .bitrate = 60 }, { .bitrate = 90 }, { .bitrate = 120 }, { .bitrate = 180 }, { .bitrate = 240 }, { .bitrate = 360 }, { .bitrate = 480 }, { .bitrate = 540 } }; #define DEFAULT_RX_RSSI -50 static const u32 hwsim_ciphers[] = { WLAN_CIPHER_SUITE_WEP40, WLAN_CIPHER_SUITE_WEP104, WLAN_CIPHER_SUITE_TKIP, WLAN_CIPHER_SUITE_CCMP, WLAN_CIPHER_SUITE_CCMP_256, WLAN_CIPHER_SUITE_GCMP, WLAN_CIPHER_SUITE_GCMP_256, WLAN_CIPHER_SUITE_AES_CMAC, WLAN_CIPHER_SUITE_BIP_CMAC_256, WLAN_CIPHER_SUITE_BIP_GMAC_128, WLAN_CIPHER_SUITE_BIP_GMAC_256, }; #define OUI_QCA 0x001374 #define QCA_NL80211_SUBCMD_TEST 1 enum qca_nl80211_vendor_subcmds { QCA_WLAN_VENDOR_ATTR_TEST = 8, QCA_WLAN_VENDOR_ATTR_MAX = QCA_WLAN_VENDOR_ATTR_TEST }; static const struct nla_policy hwsim_vendor_test_policy[QCA_WLAN_VENDOR_ATTR_MAX + 1] = { [QCA_WLAN_VENDOR_ATTR_MAX] = { .type = NLA_U32 }, }; static int mac80211_hwsim_vendor_cmd_test(struct wiphy *wiphy, struct wireless_dev *wdev, const void *data, int data_len) { struct sk_buff *skb; struct nlattr *tb[QCA_WLAN_VENDOR_ATTR_MAX + 1]; int err; u32 val; err = nla_parse_deprecated(tb, QCA_WLAN_VENDOR_ATTR_MAX, data, data_len, hwsim_vendor_test_policy, NULL); if (err) return err; if (!tb[QCA_WLAN_VENDOR_ATTR_TEST]) return -EINVAL; val = nla_get_u32(tb[QCA_WLAN_VENDOR_ATTR_TEST]); wiphy_dbg(wiphy, "%s: test=%u\n", __func__, val); /* Send a vendor event as a test. Note that this would not normally be * done within a command handler, but rather, based on some other * trigger. For simplicity, this command is used to trigger the event * here. * * event_idx = 0 (index in mac80211_hwsim_vendor_commands) */ skb = cfg80211_vendor_event_alloc(wiphy, wdev, 100, 0, GFP_KERNEL); if (skb) { /* skb_put() or nla_put() will fill up data within * NL80211_ATTR_VENDOR_DATA. */ /* Add vendor data */ nla_put_u32(skb, QCA_WLAN_VENDOR_ATTR_TEST, val + 1); /* Send the event - this will call nla_nest_end() */ cfg80211_vendor_event(skb, GFP_KERNEL); } /* Send a response to the command */ skb = cfg80211_vendor_cmd_alloc_reply_skb(wiphy, 10); if (!skb) return -ENOMEM; /* skb_put() or nla_put() will fill up data within * NL80211_ATTR_VENDOR_DATA */ nla_put_u32(skb, QCA_WLAN_VENDOR_ATTR_TEST, val + 2); return cfg80211_vendor_cmd_reply(skb); } static struct wiphy_vendor_command mac80211_hwsim_vendor_commands[] = { { .info = { .vendor_id = OUI_QCA, .subcmd = QCA_NL80211_SUBCMD_TEST }, .flags = WIPHY_VENDOR_CMD_NEED_NETDEV, .doit = mac80211_hwsim_vendor_cmd_test, .policy = hwsim_vendor_test_policy, .maxattr = QCA_WLAN_VENDOR_ATTR_MAX, } }; /* Advertise support vendor specific events */ static const struct nl80211_vendor_cmd_info mac80211_hwsim_vendor_events[] = { { .vendor_id = OUI_QCA, .subcmd = 1 }, }; static DEFINE_SPINLOCK(hwsim_radio_lock); static LIST_HEAD(hwsim_radios); static struct rhashtable hwsim_radios_rht; static int hwsim_radio_idx; static int hwsim_radios_generation = 1; static struct platform_driver mac80211_hwsim_driver = { .driver = { .name = "mac80211_hwsim", }, }; struct mac80211_hwsim_link_data { u32 link_id; u64 beacon_int /* beacon interval in us */; struct hrtimer beacon_timer; }; struct mac80211_hwsim_data { struct list_head list; struct rhash_head rht; struct ieee80211_hw *hw; struct device *dev; struct ieee80211_supported_band bands[NUM_NL80211_BANDS]; struct ieee80211_channel channels_2ghz[ARRAY_SIZE(hwsim_channels_2ghz)]; struct ieee80211_channel channels_5ghz[ARRAY_SIZE(hwsim_channels_5ghz)]; struct ieee80211_channel channels_6ghz[ARRAY_SIZE(hwsim_channels_6ghz)]; struct ieee80211_channel channels_s1g[ARRAY_SIZE(hwsim_channels_s1g)]; struct ieee80211_rate rates[ARRAY_SIZE(hwsim_rates)]; struct ieee80211_iface_combination if_combination; struct ieee80211_iface_limit if_limits[3]; int n_if_limits; u32 ciphers[ARRAY_SIZE(hwsim_ciphers)]; struct mac_address addresses[2]; int channels, idx; bool use_chanctx; bool destroy_on_close; u32 portid; char alpha2[2]; const struct ieee80211_regdomain *regd; struct ieee80211_channel *tmp_chan; struct ieee80211_channel *roc_chan; u32 roc_duration; struct delayed_work roc_start; struct delayed_work roc_done; struct delayed_work hw_scan; struct cfg80211_scan_request *hw_scan_request; struct ieee80211_vif *hw_scan_vif; int scan_chan_idx; u8 scan_addr[ETH_ALEN]; struct { struct ieee80211_channel *channel; unsigned long next_start, start, end; } survey_data[ARRAY_SIZE(hwsim_channels_2ghz) + ARRAY_SIZE(hwsim_channels_5ghz) + ARRAY_SIZE(hwsim_channels_6ghz)]; struct ieee80211_channel *channel; enum nl80211_chan_width bw; unsigned int rx_filter; bool started, idle, scanning; struct mutex mutex; enum ps_mode { PS_DISABLED, PS_ENABLED, PS_AUTO_POLL, PS_MANUAL_POLL } ps; bool ps_poll_pending; struct dentry *debugfs; atomic_t pending_cookie; struct sk_buff_head pending; /* packets pending */ /* * Only radios in the same group can communicate together (the * channel has to match too). Each bit represents a group. A * radio can be in more than one group. */ u64 group; /* group shared by radios created in the same netns */ int netgroup; /* wmediumd portid responsible for netgroup of this radio */ u32 wmediumd; /* difference between this hw's clock and the real clock, in usecs */ s64 tsf_offset; s64 bcn_delta; /* absolute beacon transmission time. Used to cover up "tx" delay. */ u64 abs_bcn_ts; /* Stats */ u64 tx_pkts; u64 rx_pkts; u64 tx_bytes; u64 rx_bytes; u64 tx_dropped; u64 tx_failed; /* RSSI in rx status of the receiver */ int rx_rssi; /* only used when pmsr capability is supplied */ struct cfg80211_pmsr_capabilities pmsr_capa; struct cfg80211_pmsr_request *pmsr_request; struct wireless_dev *pmsr_request_wdev; struct mac80211_hwsim_link_data link_data[IEEE80211_MLD_MAX_NUM_LINKS]; }; static const struct rhashtable_params hwsim_rht_params = { .nelem_hint = 2, .automatic_shrinking = true, .key_len = ETH_ALEN, .key_offset = offsetof(struct mac80211_hwsim_data, addresses[1]), .head_offset = offsetof(struct mac80211_hwsim_data, rht), }; struct hwsim_radiotap_hdr { struct ieee80211_radiotap_header hdr; __le64 rt_tsft; u8 rt_flags; u8 rt_rate; __le16 rt_channel; __le16 rt_chbitmask; } __packed; struct hwsim_radiotap_ack_hdr { struct ieee80211_radiotap_header hdr; u8 rt_flags; u8 pad; __le16 rt_channel; __le16 rt_chbitmask; } __packed; static struct mac80211_hwsim_data *get_hwsim_data_ref_from_addr(const u8 *addr) { return rhashtable_lookup_fast(&hwsim_radios_rht, addr, hwsim_rht_params); } /* MAC80211_HWSIM netlink family */ static struct genl_family hwsim_genl_family; enum hwsim_multicast_groups { HWSIM_MCGRP_CONFIG, }; static const struct genl_multicast_group hwsim_mcgrps[] = { [HWSIM_MCGRP_CONFIG] = { .name = "config", }, }; /* MAC80211_HWSIM netlink policy */ static const struct nla_policy hwsim_rate_info_policy[HWSIM_RATE_INFO_ATTR_MAX + 1] = { [HWSIM_RATE_INFO_ATTR_FLAGS] = { .type = NLA_U8 }, [HWSIM_RATE_INFO_ATTR_MCS] = { .type = NLA_U8 }, [HWSIM_RATE_INFO_ATTR_LEGACY] = { .type = NLA_U16 }, [HWSIM_RATE_INFO_ATTR_NSS] = { .type = NLA_U8 }, [HWSIM_RATE_INFO_ATTR_BW] = { .type = NLA_U8 }, [HWSIM_RATE_INFO_ATTR_HE_GI] = { .type = NLA_U8 }, [HWSIM_RATE_INFO_ATTR_HE_DCM] = { .type = NLA_U8 }, [HWSIM_RATE_INFO_ATTR_HE_RU_ALLOC] = { .type = NLA_U8 }, [HWSIM_RATE_INFO_ATTR_N_BOUNDED_CH] = { .type = NLA_U8 }, [HWSIM_RATE_INFO_ATTR_EHT_GI] = { .type = NLA_U8 }, [HWSIM_RATE_INFO_ATTR_EHT_RU_ALLOC] = { .type = NLA_U8 }, }; static const struct nla_policy hwsim_ftm_result_policy[NL80211_PMSR_FTM_RESP_ATTR_MAX + 1] = { [NL80211_PMSR_FTM_RESP_ATTR_FAIL_REASON] = { .type = NLA_U32 }, [NL80211_PMSR_FTM_RESP_ATTR_BURST_INDEX] = { .type = NLA_U16 }, [NL80211_PMSR_FTM_RESP_ATTR_NUM_FTMR_ATTEMPTS] = { .type = NLA_U32 }, [NL80211_PMSR_FTM_RESP_ATTR_NUM_FTMR_SUCCESSES] = { .type = NLA_U32 }, [NL80211_PMSR_FTM_RESP_ATTR_BUSY_RETRY_TIME] = { .type = NLA_U8 }, [NL80211_PMSR_FTM_RESP_ATTR_NUM_BURSTS_EXP] = { .type = NLA_U8 }, [NL80211_PMSR_FTM_RESP_ATTR_BURST_DURATION] = { .type = NLA_U8 }, [NL80211_PMSR_FTM_RESP_ATTR_FTMS_PER_BURST] = { .type = NLA_U8 }, [NL80211_PMSR_FTM_RESP_ATTR_RSSI_AVG] = { .type = NLA_U32 }, [NL80211_PMSR_FTM_RESP_ATTR_RSSI_SPREAD] = { .type = NLA_U32 }, [NL80211_PMSR_FTM_RESP_ATTR_TX_RATE] = NLA_POLICY_NESTED(hwsim_rate_info_policy), [NL80211_PMSR_FTM_RESP_ATTR_RX_RATE] = NLA_POLICY_NESTED(hwsim_rate_info_policy), [NL80211_PMSR_FTM_RESP_ATTR_RTT_AVG] = { .type = NLA_U64 }, [NL80211_PMSR_FTM_RESP_ATTR_RTT_VARIANCE] = { .type = NLA_U64 }, [NL80211_PMSR_FTM_RESP_ATTR_RTT_SPREAD] = { .type = NLA_U64 }, [NL80211_PMSR_FTM_RESP_ATTR_DIST_AVG] = { .type = NLA_U64 }, [NL80211_PMSR_FTM_RESP_ATTR_DIST_VARIANCE] = { .type = NLA_U64 }, [NL80211_PMSR_FTM_RESP_ATTR_DIST_SPREAD] = { .type = NLA_U64 }, [NL80211_PMSR_FTM_RESP_ATTR_LCI] = { .type = NLA_STRING }, [NL80211_PMSR_FTM_RESP_ATTR_CIVICLOC] = { .type = NLA_STRING }, }; static const struct nla_policy hwsim_pmsr_resp_type_policy[NL80211_PMSR_TYPE_MAX + 1] = { [NL80211_PMSR_TYPE_FTM] = NLA_POLICY_NESTED(hwsim_ftm_result_policy), }; static const struct nla_policy hwsim_pmsr_resp_policy[NL80211_PMSR_RESP_ATTR_MAX + 1] = { [NL80211_PMSR_RESP_ATTR_STATUS] = { .type = NLA_U32 }, [NL80211_PMSR_RESP_ATTR_HOST_TIME] = { .type = NLA_U64 }, [NL80211_PMSR_RESP_ATTR_AP_TSF] = { .type = NLA_U64 }, [NL80211_PMSR_RESP_ATTR_FINAL] = { .type = NLA_FLAG }, [NL80211_PMSR_RESP_ATTR_DATA] = NLA_POLICY_NESTED(hwsim_pmsr_resp_type_policy), }; static const struct nla_policy hwsim_pmsr_peer_result_policy[NL80211_PMSR_PEER_ATTR_MAX + 1] = { [NL80211_PMSR_PEER_ATTR_ADDR] = NLA_POLICY_ETH_ADDR_COMPAT, [NL80211_PMSR_PEER_ATTR_CHAN] = { .type = NLA_REJECT }, [NL80211_PMSR_PEER_ATTR_REQ] = { .type = NLA_REJECT }, [NL80211_PMSR_PEER_ATTR_RESP] = NLA_POLICY_NESTED(hwsim_pmsr_resp_policy), }; static const struct nla_policy hwsim_pmsr_peers_result_policy[NL80211_PMSR_ATTR_MAX + 1] = { [NL80211_PMSR_ATTR_MAX_PEERS] = { .type = NLA_REJECT }, [NL80211_PMSR_ATTR_REPORT_AP_TSF] = { .type = NLA_REJECT }, [NL80211_PMSR_ATTR_RANDOMIZE_MAC_ADDR] = { .type = NLA_REJECT }, [NL80211_PMSR_ATTR_TYPE_CAPA] = { .type = NLA_REJECT }, [NL80211_PMSR_ATTR_PEERS] = NLA_POLICY_NESTED_ARRAY(hwsim_pmsr_peer_result_policy), }; static const struct nla_policy hwsim_ftm_capa_policy[NL80211_PMSR_FTM_CAPA_ATTR_MAX + 1] = { [NL80211_PMSR_FTM_CAPA_ATTR_ASAP] = { .type = NLA_FLAG }, [NL80211_PMSR_FTM_CAPA_ATTR_NON_ASAP] = { .type = NLA_FLAG }, [NL80211_PMSR_FTM_CAPA_ATTR_REQ_LCI] = { .type = NLA_FLAG }, [NL80211_PMSR_FTM_CAPA_ATTR_REQ_CIVICLOC] = { .type = NLA_FLAG }, [NL80211_PMSR_FTM_CAPA_ATTR_PREAMBLES] = { .type = NLA_U32 }, [NL80211_PMSR_FTM_CAPA_ATTR_BANDWIDTHS] = { .type = NLA_U32 }, [NL80211_PMSR_FTM_CAPA_ATTR_MAX_BURSTS_EXPONENT] = NLA_POLICY_MAX(NLA_U8, 15), [NL80211_PMSR_FTM_CAPA_ATTR_MAX_FTMS_PER_BURST] = NLA_POLICY_MAX(NLA_U8, 31), [NL80211_PMSR_FTM_CAPA_ATTR_TRIGGER_BASED] = { .type = NLA_FLAG }, [NL80211_PMSR_FTM_CAPA_ATTR_NON_TRIGGER_BASED] = { .type = NLA_FLAG }, }; static const struct nla_policy hwsim_pmsr_capa_type_policy[NL80211_PMSR_TYPE_MAX + 1] = { [NL80211_PMSR_TYPE_FTM] = NLA_POLICY_NESTED(hwsim_ftm_capa_policy), }; static const struct nla_policy hwsim_pmsr_capa_policy[NL80211_PMSR_ATTR_MAX + 1] = { [NL80211_PMSR_ATTR_MAX_PEERS] = { .type = NLA_U32 }, [NL80211_PMSR_ATTR_REPORT_AP_TSF] = { .type = NLA_FLAG }, [NL80211_PMSR_ATTR_RANDOMIZE_MAC_ADDR] = { .type = NLA_FLAG }, [NL80211_PMSR_ATTR_TYPE_CAPA] = NLA_POLICY_NESTED(hwsim_pmsr_capa_type_policy), [NL80211_PMSR_ATTR_PEERS] = { .type = NLA_REJECT }, // only for request. }; static const struct nla_policy hwsim_genl_policy[HWSIM_ATTR_MAX + 1] = { [HWSIM_ATTR_ADDR_RECEIVER] = NLA_POLICY_ETH_ADDR_COMPAT, [HWSIM_ATTR_ADDR_TRANSMITTER] = NLA_POLICY_ETH_ADDR_COMPAT, [HWSIM_ATTR_FRAME] = { .type = NLA_BINARY, .len = IEEE80211_MAX_DATA_LEN }, [HWSIM_ATTR_FLAGS] = { .type = NLA_U32 }, [HWSIM_ATTR_RX_RATE] = { .type = NLA_U32 }, [HWSIM_ATTR_SIGNAL] = { .type = NLA_U32 }, [HWSIM_ATTR_TX_INFO] = { .type = NLA_BINARY, .len = IEEE80211_TX_MAX_RATES * sizeof(struct hwsim_tx_rate)}, [HWSIM_ATTR_COOKIE] = { .type = NLA_U64 }, [HWSIM_ATTR_CHANNELS] = { .type = NLA_U32 }, [HWSIM_ATTR_RADIO_ID] = { .type = NLA_U32 }, [HWSIM_ATTR_REG_HINT_ALPHA2] = { .type = NLA_STRING, .len = 2 }, [HWSIM_ATTR_REG_CUSTOM_REG] = { .type = NLA_U32 }, [HWSIM_ATTR_REG_STRICT_REG] = { .type = NLA_FLAG }, [HWSIM_ATTR_SUPPORT_P2P_DEVICE] = { .type = NLA_FLAG }, [HWSIM_ATTR_USE_CHANCTX] = { .type = NLA_FLAG }, [HWSIM_ATTR_DESTROY_RADIO_ON_CLOSE] = { .type = NLA_FLAG }, [HWSIM_ATTR_RADIO_NAME] = { .type = NLA_STRING }, [HWSIM_ATTR_NO_VIF] = { .type = NLA_FLAG }, [HWSIM_ATTR_FREQ] = { .type = NLA_U32 }, [HWSIM_ATTR_TX_INFO_FLAGS] = { .type = NLA_BINARY }, [HWSIM_ATTR_PERM_ADDR] = NLA_POLICY_ETH_ADDR_COMPAT, [HWSIM_ATTR_IFTYPE_SUPPORT] = { .type = NLA_U32 }, [HWSIM_ATTR_CIPHER_SUPPORT] = { .type = NLA_BINARY }, [HWSIM_ATTR_MLO_SUPPORT] = { .type = NLA_FLAG }, [HWSIM_ATTR_PMSR_SUPPORT] = NLA_POLICY_NESTED(hwsim_pmsr_capa_policy), [HWSIM_ATTR_PMSR_RESULT] = NLA_POLICY_NESTED(hwsim_pmsr_peers_result_policy), }; #if IS_REACHABLE(CONFIG_VIRTIO) /* MAC80211_HWSIM virtio queues */ static struct virtqueue *hwsim_vqs[HWSIM_NUM_VQS]; static bool hwsim_virtio_enabled; static DEFINE_SPINLOCK(hwsim_virtio_lock); static void hwsim_virtio_rx_work(struct work_struct *work); static DECLARE_WORK(hwsim_virtio_rx, hwsim_virtio_rx_work); static int hwsim_tx_virtio(struct mac80211_hwsim_data *data, struct sk_buff *skb) { struct scatterlist sg[1]; unsigned long flags; int err; spin_lock_irqsave(&hwsim_virtio_lock, flags); if (!hwsim_virtio_enabled) { err = -ENODEV; goto out_free; } sg_init_one(sg, skb->head, skb_end_offset(skb)); err = virtqueue_add_outbuf(hwsim_vqs[HWSIM_VQ_TX], sg, 1, skb, GFP_ATOMIC); if (err) goto out_free; virtqueue_kick(hwsim_vqs[HWSIM_VQ_TX]); spin_unlock_irqrestore(&hwsim_virtio_lock, flags); return 0; out_free: spin_unlock_irqrestore(&hwsim_virtio_lock, flags); nlmsg_free(skb); return err; } #else /* cause a linker error if this ends up being needed */ extern int hwsim_tx_virtio(struct mac80211_hwsim_data *data, struct sk_buff *skb); #define hwsim_virtio_enabled false #endif static int hwsim_get_chanwidth(enum nl80211_chan_width bw) { switch (bw) { case NL80211_CHAN_WIDTH_20_NOHT: case NL80211_CHAN_WIDTH_20: return 20; case NL80211_CHAN_WIDTH_40: return 40; case NL80211_CHAN_WIDTH_80: return 80; case NL80211_CHAN_WIDTH_80P80: case NL80211_CHAN_WIDTH_160: return 160; case NL80211_CHAN_WIDTH_320: return 320; case NL80211_CHAN_WIDTH_5: return 5; case NL80211_CHAN_WIDTH_10: return 10; case NL80211_CHAN_WIDTH_1: return 1; case NL80211_CHAN_WIDTH_2: return 2; case NL80211_CHAN_WIDTH_4: return 4; case NL80211_CHAN_WIDTH_8: return 8; case NL80211_CHAN_WIDTH_16: return 16; } return INT_MAX; } static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw, struct sk_buff *skb, struct ieee80211_channel *chan); /* sysfs attributes */ static void hwsim_send_ps_poll(void *dat, u8 *mac, struct ieee80211_vif *vif) { struct mac80211_hwsim_data *data = dat; struct hwsim_vif_priv *vp = (void *)vif->drv_priv; struct sk_buff *skb; struct ieee80211_pspoll *pspoll; if (!vp->assoc) return; wiphy_dbg(data->hw->wiphy, "%s: send PS-Poll to %pM for aid %d\n", __func__, vp->bssid, vp->aid); skb = dev_alloc_skb(sizeof(*pspoll)); if (!skb) return; pspoll = skb_put(skb, sizeof(*pspoll)); pspoll->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_PSPOLL | IEEE80211_FCTL_PM); pspoll->aid = cpu_to_le16(0xc000 | vp->aid); memcpy(pspoll->bssid, vp->bssid, ETH_ALEN); memcpy(pspoll->ta, mac, ETH_ALEN); rcu_read_lock(); mac80211_hwsim_tx_frame(data->hw, skb, rcu_dereference(vif->bss_conf.chanctx_conf)->def.chan); rcu_read_unlock(); } static void hwsim_send_nullfunc(struct mac80211_hwsim_data *data, u8 *mac, struct ieee80211_vif *vif, int ps) { struct hwsim_vif_priv *vp = (void *)vif->drv_priv; struct sk_buff *skb; struct ieee80211_hdr *hdr; struct ieee80211_tx_info *cb; if (!vp->assoc) return; wiphy_dbg(data->hw->wiphy, "%s: send data::nullfunc to %pM ps=%d\n", __func__, vp->bssid, ps); skb = dev_alloc_skb(sizeof(*hdr)); if (!skb) return; hdr = skb_put(skb, sizeof(*hdr) - ETH_ALEN); hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_NULLFUNC | IEEE80211_FCTL_TODS | (ps ? IEEE80211_FCTL_PM : 0)); hdr->duration_id = cpu_to_le16(0); memcpy(hdr->addr1, vp->bssid, ETH_ALEN); memcpy(hdr->addr2, mac, ETH_ALEN); memcpy(hdr->addr3, vp->bssid, ETH_ALEN); cb = IEEE80211_SKB_CB(skb); cb->control.rates[0].count = 1; cb->control.rates[1].idx = -1; rcu_read_lock(); mac80211_hwsim_tx_frame(data->hw, skb, rcu_dereference(vif->bss_conf.chanctx_conf)->def.chan); rcu_read_unlock(); } static void hwsim_send_nullfunc_ps(void *dat, u8 *mac, struct ieee80211_vif *vif) { struct mac80211_hwsim_data *data = dat; hwsim_send_nullfunc(data, mac, vif, 1); } static void hwsim_send_nullfunc_no_ps(void *dat, u8 *mac, struct ieee80211_vif *vif) { struct mac80211_hwsim_data *data = dat; hwsim_send_nullfunc(data, mac, vif, 0); } static int hwsim_fops_ps_read(void *dat, u64 *val) { struct mac80211_hwsim_data *data = dat; *val = data->ps; return 0; } static int hwsim_fops_ps_write(void *dat, u64 val) { struct mac80211_hwsim_data *data = dat; enum ps_mode old_ps; if (val != PS_DISABLED && val != PS_ENABLED && val != PS_AUTO_POLL && val != PS_MANUAL_POLL) return -EINVAL; if (val == PS_MANUAL_POLL) { if (data->ps != PS_ENABLED) return -EINVAL; local_bh_disable(); ieee80211_iterate_active_interfaces_atomic( data->hw, IEEE80211_IFACE_ITER_NORMAL, hwsim_send_ps_poll, data); local_bh_enable(); return 0; } old_ps = data->ps; data->ps = val; local_bh_disable(); if (old_ps == PS_DISABLED && val != PS_DISABLED) { ieee80211_iterate_active_interfaces_atomic( data->hw, IEEE80211_IFACE_ITER_NORMAL, hwsim_send_nullfunc_ps, data); } else if (old_ps != PS_DISABLED && val == PS_DISABLED) { ieee80211_iterate_active_interfaces_atomic( data->hw, IEEE80211_IFACE_ITER_NORMAL, hwsim_send_nullfunc_no_ps, data); } local_bh_enable(); return 0; } DEFINE_DEBUGFS_ATTRIBUTE(hwsim_fops_ps, hwsim_fops_ps_read, hwsim_fops_ps_write, "%llu\n"); static int hwsim_write_simulate_radar(void *dat, u64 val) { struct mac80211_hwsim_data *data = dat; ieee80211_radar_detected(data->hw); return 0; } DEFINE_DEBUGFS_ATTRIBUTE(hwsim_simulate_radar, NULL, hwsim_write_simulate_radar, "%llu\n"); static int hwsim_fops_group_read(void *dat, u64 *val) { struct mac80211_hwsim_data *data = dat; *val = data->group; return 0; } static int hwsim_fops_group_write(void *dat, u64 val) { struct mac80211_hwsim_data *data = dat; data->group = val; return 0; } DEFINE_DEBUGFS_ATTRIBUTE(hwsim_fops_group, hwsim_fops_group_read, hwsim_fops_group_write, "%llx\n"); static int hwsim_fops_rx_rssi_read(void *dat, u64 *val) { struct mac80211_hwsim_data *data = dat; *val = data->rx_rssi; return 0; } static int hwsim_fops_rx_rssi_write(void *dat, u64 val) { struct mac80211_hwsim_data *data = dat; int rssi = (int)val; if (rssi >= 0 || rssi < -100) return -EINVAL; data->rx_rssi = rssi; return 0; } DEFINE_DEBUGFS_ATTRIBUTE(hwsim_fops_rx_rssi, hwsim_fops_rx_rssi_read, hwsim_fops_rx_rssi_write, "%lld\n"); static netdev_tx_t hwsim_mon_xmit(struct sk_buff *skb, struct net_device *dev) { /* TODO: allow packet injection */ dev_kfree_skb(skb); return NETDEV_TX_OK; } static inline u64 mac80211_hwsim_get_tsf_raw(void) { return ktime_to_us(ktime_get_real()); } static __le64 __mac80211_hwsim_get_tsf(struct mac80211_hwsim_data *data) { u64 now = mac80211_hwsim_get_tsf_raw(); return cpu_to_le64(now + data->tsf_offset); } static u64 mac80211_hwsim_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct mac80211_hwsim_data *data = hw->priv; return le64_to_cpu(__mac80211_hwsim_get_tsf(data)); } static void mac80211_hwsim_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u64 tsf) { struct mac80211_hwsim_data *data = hw->priv; u64 now = mac80211_hwsim_get_tsf(hw, vif); /* MLD not supported here */ u32 bcn_int = data->link_data[0].beacon_int; u64 delta = abs(tsf - now); /* adjust after beaconing with new timestamp at old TBTT */ if (tsf > now) { data->tsf_offset += delta; data->bcn_delta = do_div(delta, bcn_int); } else { data->tsf_offset -= delta; data->bcn_delta = -(s64)do_div(delta, bcn_int); } } static void mac80211_hwsim_monitor_rx(struct ieee80211_hw *hw, struct sk_buff *tx_skb, struct ieee80211_channel *chan) { struct mac80211_hwsim_data *data = hw->priv; struct sk_buff *skb; struct hwsim_radiotap_hdr *hdr; u16 flags, bitrate; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(tx_skb); struct ieee80211_rate *txrate = ieee80211_get_tx_rate(hw, info); if (!txrate) bitrate = 0; else bitrate = txrate->bitrate; if (!netif_running(hwsim_mon)) return; skb = skb_copy_expand(tx_skb, sizeof(*hdr), 0, GFP_ATOMIC); if (skb == NULL) return; hdr = skb_push(skb, sizeof(*hdr)); hdr->hdr.it_version = PKTHDR_RADIOTAP_VERSION; hdr->hdr.it_pad = 0; hdr->hdr.it_len = cpu_to_le16(sizeof(*hdr)); hdr->hdr.it_present = cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) | (1 << IEEE80211_RADIOTAP_RATE) | (1 << IEEE80211_RADIOTAP_TSFT) | (1 << IEEE80211_RADIOTAP_CHANNEL)); hdr->rt_tsft = __mac80211_hwsim_get_tsf(data); hdr->rt_flags = 0; hdr->rt_rate = bitrate / 5; hdr->rt_channel = cpu_to_le16(chan->center_freq); flags = IEEE80211_CHAN_2GHZ; if (txrate && txrate->flags & IEEE80211_RATE_ERP_G) flags |= IEEE80211_CHAN_OFDM; else flags |= IEEE80211_CHAN_CCK; hdr->rt_chbitmask = cpu_to_le16(flags); skb->dev = hwsim_mon; skb_reset_mac_header(skb); skb->ip_summed = CHECKSUM_UNNECESSARY; skb->pkt_type = PACKET_OTHERHOST; skb->protocol = htons(ETH_P_802_2); memset(skb->cb, 0, sizeof(skb->cb)); netif_rx(skb); } static void mac80211_hwsim_monitor_ack(struct ieee80211_channel *chan, const u8 *addr) { struct sk_buff *skb; struct hwsim_radiotap_ack_hdr *hdr; u16 flags; struct ieee80211_hdr *hdr11; if (!netif_running(hwsim_mon)) return; skb = dev_alloc_skb(100); if (skb == NULL) return; hdr = skb_put(skb, sizeof(*hdr)); hdr->hdr.it_version = PKTHDR_RADIOTAP_VERSION; hdr->hdr.it_pad = 0; hdr->hdr.it_len = cpu_to_le16(sizeof(*hdr)); hdr->hdr.it_present = cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) | (1 << IEEE80211_RADIOTAP_CHANNEL)); hdr->rt_flags = 0; hdr->pad = 0; hdr->rt_channel = cpu_to_le16(chan->center_freq); flags = IEEE80211_CHAN_2GHZ; hdr->rt_chbitmask = cpu_to_le16(flags); hdr11 = skb_put(skb, 10); hdr11->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_ACK); hdr11->duration_id = cpu_to_le16(0); memcpy(hdr11->addr1, addr, ETH_ALEN); skb->dev = hwsim_mon; skb_reset_mac_header(skb); skb->ip_summed = CHECKSUM_UNNECESSARY; skb->pkt_type = PACKET_OTHERHOST; skb->protocol = htons(ETH_P_802_2); memset(skb->cb, 0, sizeof(skb->cb)); netif_rx(skb); } struct mac80211_hwsim_addr_match_data { u8 addr[ETH_ALEN]; bool ret; }; static void mac80211_hwsim_addr_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { int i; struct mac80211_hwsim_addr_match_data *md = data; if (memcmp(mac, md->addr, ETH_ALEN) == 0) { md->ret = true; return; } /* Match the link address */ for (i = 0; i < ARRAY_SIZE(vif->link_conf); i++) { struct ieee80211_bss_conf *conf; conf = rcu_dereference(vif->link_conf[i]); if (!conf) continue; if (memcmp(conf->addr, md->addr, ETH_ALEN) == 0) { md->ret = true; return; } } } static bool mac80211_hwsim_addr_match(struct mac80211_hwsim_data *data, const u8 *addr) { struct mac80211_hwsim_addr_match_data md = { .ret = false, }; if (data->scanning && memcmp(addr, data->scan_addr, ETH_ALEN) == 0) return true; memcpy(md.addr, addr, ETH_ALEN); ieee80211_iterate_active_interfaces_atomic(data->hw, IEEE80211_IFACE_ITER_NORMAL, mac80211_hwsim_addr_iter, &md); return md.ret; } static bool hwsim_ps_rx_ok(struct mac80211_hwsim_data *data, struct sk_buff *skb) { switch (data->ps) { case PS_DISABLED: return true; case PS_ENABLED: return false; case PS_AUTO_POLL: /* TODO: accept (some) Beacons by default and other frames only * if pending PS-Poll has been sent */ return true; case PS_MANUAL_POLL: /* Allow unicast frames to own address if there is a pending * PS-Poll */ if (data->ps_poll_pending && mac80211_hwsim_addr_match(data, skb->data + 4)) { data->ps_poll_pending = false; return true; } return false; } return true; } static int hwsim_unicast_netgroup(struct mac80211_hwsim_data *data, struct sk_buff *skb, int portid) { struct net *net; bool found = false; int res = -ENOENT; rcu_read_lock(); for_each_net_rcu(net) { if (data->netgroup == hwsim_net_get_netgroup(net)) { res = genlmsg_unicast(net, skb, portid); found = true; break; } } rcu_read_unlock(); if (!found) nlmsg_free(skb); return res; } static void mac80211_hwsim_config_mac_nl(struct ieee80211_hw *hw, const u8 *addr, bool add) { struct mac80211_hwsim_data *data = hw->priv; u32 _portid = READ_ONCE(data->wmediumd); struct sk_buff *skb; void *msg_head; WARN_ON(!is_valid_ether_addr(addr)); if (!_portid && !hwsim_virtio_enabled) return; skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_ATOMIC); if (!skb) return; msg_head = genlmsg_put(skb, 0, 0, &hwsim_genl_family, 0, add ? HWSIM_CMD_ADD_MAC_ADDR : HWSIM_CMD_DEL_MAC_ADDR); if (!msg_head) { pr_debug("mac80211_hwsim: problem with msg_head\n"); goto nla_put_failure; } if (nla_put(skb, HWSIM_ATTR_ADDR_TRANSMITTER, ETH_ALEN, data->addresses[1].addr)) goto nla_put_failure; if (nla_put(skb, HWSIM_ATTR_ADDR_RECEIVER, ETH_ALEN, addr)) goto nla_put_failure; genlmsg_end(skb, msg_head); if (hwsim_virtio_enabled) hwsim_tx_virtio(data, skb); else hwsim_unicast_netgroup(data, skb, _portid); return; nla_put_failure: nlmsg_free(skb); } static inline u16 trans_tx_rate_flags_ieee2hwsim(struct ieee80211_tx_rate *rate) { u16 result = 0; if (rate->flags & IEEE80211_TX_RC_USE_RTS_CTS) result |= MAC80211_HWSIM_TX_RC_USE_RTS_CTS; if (rate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT) result |= MAC80211_HWSIM_TX_RC_USE_CTS_PROTECT; if (rate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) result |= MAC80211_HWSIM_TX_RC_USE_SHORT_PREAMBLE; if (rate->flags & IEEE80211_TX_RC_MCS) result |= MAC80211_HWSIM_TX_RC_MCS; if (rate->flags & IEEE80211_TX_RC_GREEN_FIELD) result |= MAC80211_HWSIM_TX_RC_GREEN_FIELD; if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) result |= MAC80211_HWSIM_TX_RC_40_MHZ_WIDTH; if (rate->flags & IEEE80211_TX_RC_DUP_DATA) result |= MAC80211_HWSIM_TX_RC_DUP_DATA; if (rate->flags & IEEE80211_TX_RC_SHORT_GI) result |= MAC80211_HWSIM_TX_RC_SHORT_GI; if (rate->flags & IEEE80211_TX_RC_VHT_MCS) result |= MAC80211_HWSIM_TX_RC_VHT_MCS; if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH) result |= MAC80211_HWSIM_TX_RC_80_MHZ_WIDTH; if (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH) result |= MAC80211_HWSIM_TX_RC_160_MHZ_WIDTH; return result; } static void mac80211_hwsim_tx_frame_nl(struct ieee80211_hw *hw, struct sk_buff *my_skb, int dst_portid, struct ieee80211_channel *channel) { struct sk_buff *skb; struct mac80211_hwsim_data *data = hw->priv; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) my_skb->data; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(my_skb); void *msg_head; unsigned int hwsim_flags = 0; int i; struct hwsim_tx_rate tx_attempts[IEEE80211_TX_MAX_RATES]; struct hwsim_tx_rate_flag tx_attempts_flags[IEEE80211_TX_MAX_RATES]; uintptr_t cookie; if (data->ps != PS_DISABLED) hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM); /* If the queue contains MAX_QUEUE skb's drop some */ if (skb_queue_len(&data->pending) >= MAX_QUEUE) { /* Dropping until WARN_QUEUE level */ while (skb_queue_len(&data->pending) >= WARN_QUEUE) { ieee80211_free_txskb(hw, skb_dequeue(&data->pending)); data->tx_dropped++; } } skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_ATOMIC); if (skb == NULL) goto nla_put_failure; msg_head = genlmsg_put(skb, 0, 0, &hwsim_genl_family, 0, HWSIM_CMD_FRAME); if (msg_head == NULL) { pr_debug("mac80211_hwsim: problem with msg_head\n"); goto nla_put_failure; } if (nla_put(skb, HWSIM_ATTR_ADDR_TRANSMITTER, ETH_ALEN, data->addresses[1].addr)) goto nla_put_failure; /* We get the skb->data */ if (nla_put(skb, HWSIM_ATTR_FRAME, my_skb->len, my_skb->data)) goto nla_put_failure; /* We get the flags for this transmission, and we translate them to wmediumd flags */ if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) hwsim_flags |= HWSIM_TX_CTL_REQ_TX_STATUS; if (info->flags & IEEE80211_TX_CTL_NO_ACK) hwsim_flags |= HWSIM_TX_CTL_NO_ACK; if (nla_put_u32(skb, HWSIM_ATTR_FLAGS, hwsim_flags)) goto nla_put_failure; if (nla_put_u32(skb, HWSIM_ATTR_FREQ, channel->center_freq)) goto nla_put_failure; /* We get the tx control (rate and retries) info*/ for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { tx_attempts[i].idx = info->status.rates[i].idx; tx_attempts_flags[i].idx = info->status.rates[i].idx; tx_attempts[i].count = info->status.rates[i].count; tx_attempts_flags[i].flags = trans_tx_rate_flags_ieee2hwsim( &info->status.rates[i]); } if (nla_put(skb, HWSIM_ATTR_TX_INFO, sizeof(struct hwsim_tx_rate)*IEEE80211_TX_MAX_RATES, tx_attempts)) goto nla_put_failure; if (nla_put(skb, HWSIM_ATTR_TX_INFO_FLAGS, sizeof(struct hwsim_tx_rate_flag) * IEEE80211_TX_MAX_RATES, tx_attempts_flags)) goto nla_put_failure; /* We create a cookie to identify this skb */ cookie = atomic_inc_return(&data->pending_cookie); info->rate_driver_data[0] = (void *)cookie; if (nla_put_u64_64bit(skb, HWSIM_ATTR_COOKIE, cookie, HWSIM_ATTR_PAD)) goto nla_put_failure; genlmsg_end(skb, msg_head); if (hwsim_virtio_enabled) { if (hwsim_tx_virtio(data, skb)) goto err_free_txskb; } else { if (hwsim_unicast_netgroup(data, skb, dst_portid)) goto err_free_txskb; } /* Enqueue the packet */ skb_queue_tail(&data->pending, my_skb); data->tx_pkts++; data->tx_bytes += my_skb->len; return; nla_put_failure: nlmsg_free(skb); err_free_txskb: pr_debug("mac80211_hwsim: error occurred in %s\n", __func__); ieee80211_free_txskb(hw, my_skb); data->tx_failed++; } static bool hwsim_chans_compat(struct ieee80211_channel *c1, struct ieee80211_channel *c2) { if (!c1 || !c2) return false; return c1->center_freq == c2->center_freq; } struct tx_iter_data { struct ieee80211_channel *channel; bool receive; }; static void mac80211_hwsim_tx_iter(void *_data, u8 *addr, struct ieee80211_vif *vif) { struct tx_iter_data *data = _data; int i; for (i = 0; i < ARRAY_SIZE(vif->link_conf); i++) { struct ieee80211_bss_conf *conf; struct ieee80211_chanctx_conf *chanctx; conf = rcu_dereference(vif->link_conf[i]); if (!conf) continue; chanctx = rcu_dereference(conf->chanctx_conf); if (!chanctx) continue; if (!hwsim_chans_compat(data->channel, chanctx->def.chan)) continue; data->receive = true; return; } } static void mac80211_hwsim_add_vendor_rtap(struct sk_buff *skb) { /* * To enable this code, #define the HWSIM_RADIOTAP_OUI, * e.g. like this: * #define HWSIM_RADIOTAP_OUI "\x02\x00\x00" * (but you should use a valid OUI, not that) * * If anyone wants to 'donate' a radiotap OUI/subns code * please send a patch removing this #ifdef and changing * the values accordingly. */ #ifdef HWSIM_RADIOTAP_OUI struct ieee80211_radiotap_vendor_tlv *rtap; static const char vendor_data[8] = "ABCDEFGH"; // Make sure no padding is needed BUILD_BUG_ON(sizeof(vendor_data) % 4); /* this is last radiotap info before the mac header, so * skb_reset_mac_header for mac8022 to know the end of * the radiotap TLV/beginning of the 802.11 header */ skb_reset_mac_header(skb); /* * Note that this code requires the headroom in the SKB * that was allocated earlier. */ rtap = skb_push(skb, sizeof(*rtap) + sizeof(vendor_data)); rtap->len = cpu_to_le16(sizeof(*rtap) - sizeof(struct ieee80211_radiotap_tlv) + sizeof(vendor_data)); rtap->type = cpu_to_le16(IEEE80211_RADIOTAP_VENDOR_NAMESPACE); rtap->content.oui[0] = HWSIM_RADIOTAP_OUI[0]; rtap->content.oui[1] = HWSIM_RADIOTAP_OUI[1]; rtap->content.oui[2] = HWSIM_RADIOTAP_OUI[2]; rtap->content.oui_subtype = 127; /* clear reserved field */ rtap->content.reserved = 0; rtap->content.vendor_type = 0; memcpy(rtap->content.data, vendor_data, sizeof(vendor_data)); IEEE80211_SKB_RXCB(skb)->flag |= RX_FLAG_RADIOTAP_TLV_AT_END; #endif } static void mac80211_hwsim_rx(struct mac80211_hwsim_data *data, struct ieee80211_rx_status *rx_status, struct sk_buff *skb) { struct ieee80211_hdr *hdr = (void *)skb->data; if (!ieee80211_has_morefrags(hdr->frame_control) && !is_multicast_ether_addr(hdr->addr1) && (ieee80211_is_mgmt(hdr->frame_control) || ieee80211_is_data(hdr->frame_control))) { struct ieee80211_sta *sta; unsigned int link_id; rcu_read_lock(); sta = ieee80211_find_sta_by_link_addrs(data->hw, hdr->addr2, hdr->addr1, &link_id); if (sta) { struct hwsim_sta_priv *sp = (void *)sta->drv_priv; if (ieee80211_has_pm(hdr->frame_control)) sp->active_links_rx &= ~BIT(link_id); else sp->active_links_rx |= BIT(link_id); } rcu_read_unlock(); } memcpy(IEEE80211_SKB_RXCB(skb), rx_status, sizeof(*rx_status)); mac80211_hwsim_add_vendor_rtap(skb); data->rx_pkts++; data->rx_bytes += skb->len; ieee80211_rx_irqsafe(data->hw, skb); } static bool mac80211_hwsim_tx_frame_no_nl(struct ieee80211_hw *hw, struct sk_buff *skb, struct ieee80211_channel *chan) { struct mac80211_hwsim_data *data = hw->priv, *data2; bool ack = false; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_rx_status rx_status; u64 now; memset(&rx_status, 0, sizeof(rx_status)); rx_status.flag |= RX_FLAG_MACTIME_START; rx_status.freq = chan->center_freq; rx_status.freq_offset = chan->freq_offset ? 1 : 0; rx_status.band = chan->band; if (info->control.rates[0].flags & IEEE80211_TX_RC_VHT_MCS) { rx_status.rate_idx = ieee80211_rate_get_vht_mcs(&info->control.rates[0]); rx_status.nss = ieee80211_rate_get_vht_nss(&info->control.rates[0]); rx_status.encoding = RX_ENC_VHT; } else { rx_status.rate_idx = info->control.rates[0].idx; if (info->control.rates[0].flags & IEEE80211_TX_RC_MCS) rx_status.encoding = RX_ENC_HT; } if (info->control.rates[0].flags & IEEE80211_TX_RC_40_MHZ_WIDTH) rx_status.bw = RATE_INFO_BW_40; else if (info->control.rates[0].flags & IEEE80211_TX_RC_80_MHZ_WIDTH) rx_status.bw = RATE_INFO_BW_80; else if (info->control.rates[0].flags & IEEE80211_TX_RC_160_MHZ_WIDTH) rx_status.bw = RATE_INFO_BW_160; else rx_status.bw = RATE_INFO_BW_20; if (info->control.rates[0].flags & IEEE80211_TX_RC_SHORT_GI) rx_status.enc_flags |= RX_ENC_FLAG_SHORT_GI; /* TODO: simulate optional packet loss */ rx_status.signal = data->rx_rssi; if (info->control.vif) rx_status.signal += info->control.vif->bss_conf.txpower; if (data->ps != PS_DISABLED) hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM); /* release the skb's source info */ skb_orphan(skb); skb_dst_drop(skb); skb->mark = 0; skb_ext_reset(skb); nf_reset_ct(skb); /* * Get absolute mactime here so all HWs RX at the "same time", and * absolute TX time for beacon mactime so the timestamp matches. * Giving beacons a different mactime than non-beacons looks messy, but * it helps the Toffset be exact and a ~10us mactime discrepancy * probably doesn't really matter. */ if (ieee80211_is_beacon(hdr->frame_control) || ieee80211_is_probe_resp(hdr->frame_control)) { rx_status.boottime_ns = ktime_get_boottime_ns(); now = data->abs_bcn_ts; } else { now = mac80211_hwsim_get_tsf_raw(); } /* Copy skb to all enabled radios that are on the current frequency */ spin_lock(&hwsim_radio_lock); list_for_each_entry(data2, &hwsim_radios, list) { struct sk_buff *nskb; struct tx_iter_data tx_iter_data = { .receive = false, .channel = chan, }; if (data == data2) continue; if (!data2->started || (data2->idle && !data2->tmp_chan) || !hwsim_ps_rx_ok(data2, skb)) continue; if (!(data->group & data2->group)) continue; if (data->netgroup != data2->netgroup) continue; if (!hwsim_chans_compat(chan, data2->tmp_chan) && !hwsim_chans_compat(chan, data2->channel)) { ieee80211_iterate_active_interfaces_atomic( data2->hw, IEEE80211_IFACE_ITER_NORMAL, mac80211_hwsim_tx_iter, &tx_iter_data); if (!tx_iter_data.receive) continue; } /* * reserve some space for our vendor and the normal * radiotap header, since we're copying anyway */ if (skb->len < PAGE_SIZE && paged_rx) { struct page *page = alloc_page(GFP_ATOMIC); if (!page) continue; nskb = dev_alloc_skb(128); if (!nskb) { __free_page(page); continue; } memcpy(page_address(page), skb->data, skb->len); skb_add_rx_frag(nskb, 0, page, 0, skb->len, skb->len); } else { nskb = skb_copy(skb, GFP_ATOMIC); if (!nskb) continue; } if (mac80211_hwsim_addr_match(data2, hdr->addr1)) ack = true; rx_status.mactime = now + data2->tsf_offset; mac80211_hwsim_rx(data2, &rx_status, nskb); } spin_unlock(&hwsim_radio_lock); return ack; } static struct ieee80211_bss_conf * mac80211_hwsim_select_tx_link(struct mac80211_hwsim_data *data, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_hdr *hdr, struct ieee80211_link_sta **link_sta) { struct hwsim_sta_priv *sp = (void *)sta->drv_priv; int i; if (!ieee80211_vif_is_mld(vif)) return &vif->bss_conf; WARN_ON(is_multicast_ether_addr(hdr->addr1)); if (WARN_ON_ONCE(!sta || !sta->valid_links)) return &vif->bss_conf; for (i = 0; i < ARRAY_SIZE(vif->link_conf); i++) { struct ieee80211_bss_conf *bss_conf; unsigned int link_id; /* round-robin the available link IDs */ link_id = (sp->last_link + i + 1) % ARRAY_SIZE(vif->link_conf); if (!(vif->active_links & BIT(link_id))) continue; if (!(sp->active_links_rx & BIT(link_id))) continue; *link_sta = rcu_dereference(sta->link[link_id]); if (!*link_sta) continue; bss_conf = rcu_dereference(vif->link_conf[link_id]); if (WARN_ON_ONCE(!bss_conf)) continue; /* can happen while switching links */ if (!rcu_access_pointer(bss_conf->chanctx_conf)) continue; sp->last_link = link_id; return bss_conf; } return NULL; } static void mac80211_hwsim_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, struct sk_buff *skb) { struct mac80211_hwsim_data *data = hw->priv; struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr = (void *)skb->data; struct ieee80211_chanctx_conf *chanctx_conf; struct ieee80211_channel *channel; bool ack; enum nl80211_chan_width confbw = NL80211_CHAN_WIDTH_20_NOHT; u32 _portid, i; if (WARN_ON(skb->len < 10)) { /* Should not happen; just a sanity check for addr1 use */ ieee80211_free_txskb(hw, skb); return; } if (!data->use_chanctx) { channel = data->channel; confbw = data->bw; } else if (txi->hw_queue == 4) { channel = data->tmp_chan; } else { u8 link = u32_get_bits(IEEE80211_SKB_CB(skb)->control.flags, IEEE80211_TX_CTRL_MLO_LINK); struct ieee80211_vif *vif = txi->control.vif; struct ieee80211_link_sta *link_sta = NULL; struct ieee80211_sta *sta = control->sta; struct ieee80211_bss_conf *bss_conf; if (link != IEEE80211_LINK_UNSPECIFIED) { bss_conf = rcu_dereference(txi->control.vif->link_conf[link]); if (sta) link_sta = rcu_dereference(sta->link[link]); } else { bss_conf = mac80211_hwsim_select_tx_link(data, vif, sta, hdr, &link_sta); } if (unlikely(!bss_conf)) { /* if it's an MLO STA, it might have deactivated all * links temporarily - but we don't handle real PS in * this code yet, so just drop the frame in that case */ WARN(link != IEEE80211_LINK_UNSPECIFIED || !sta || !sta->mlo, "link:%d, sta:%pM, sta->mlo:%d\n", link, sta ? sta->addr : NULL, sta ? sta->mlo : -1); ieee80211_free_txskb(hw, skb); return; } if (sta && sta->mlo) { if (WARN_ON(!link_sta)) { ieee80211_free_txskb(hw, skb); return; } /* address translation to link addresses on TX */ ether_addr_copy(hdr->addr1, link_sta->addr); ether_addr_copy(hdr->addr2, bss_conf->addr); /* translate A3 only if it's the BSSID */ if (!ieee80211_has_tods(hdr->frame_control) && !ieee80211_has_fromds(hdr->frame_control)) { if (ether_addr_equal(hdr->addr3, sta->addr)) ether_addr_copy(hdr->addr3, link_sta->addr); else if (ether_addr_equal(hdr->addr3, vif->addr)) ether_addr_copy(hdr->addr3, bss_conf->addr); } /* no need to look at A4, if present it's SA */ } chanctx_conf = rcu_dereference(bss_conf->chanctx_conf); if (chanctx_conf) { channel = chanctx_conf->def.chan; confbw = chanctx_conf->def.width; } else { channel = NULL; } } if (WARN(!channel, "TX w/o channel - queue = %d\n", txi->hw_queue)) { ieee80211_free_txskb(hw, skb); return; } if (data->idle && !data->tmp_chan) { wiphy_dbg(hw->wiphy, "Trying to TX when idle - reject\n"); ieee80211_free_txskb(hw, skb); return; } if (txi->control.vif) hwsim_check_magic(txi->control.vif); if (control->sta) hwsim_check_sta_magic(control->sta); if (ieee80211_hw_check(hw, SUPPORTS_RC_TABLE)) ieee80211_get_tx_rates(txi->control.vif, control->sta, skb, txi->control.rates, ARRAY_SIZE(txi->control.rates)); for (i = 0; i < ARRAY_SIZE(txi->control.rates); i++) { u16 rflags = txi->control.rates[i].flags; /* initialize to data->bw for 5/10 MHz handling */ enum nl80211_chan_width bw = data->bw; if (txi->control.rates[i].idx == -1) break; if (rflags & IEEE80211_TX_RC_40_MHZ_WIDTH) bw = NL80211_CHAN_WIDTH_40; else if (rflags & IEEE80211_TX_RC_80_MHZ_WIDTH) bw = NL80211_CHAN_WIDTH_80; else if (rflags & IEEE80211_TX_RC_160_MHZ_WIDTH) bw = NL80211_CHAN_WIDTH_160; if (WARN_ON(hwsim_get_chanwidth(bw) > hwsim_get_chanwidth(confbw))) return; } if (skb->len >= 24 + 8 && ieee80211_is_probe_resp(hdr->frame_control)) { /* fake header transmission time */ struct ieee80211_mgmt *mgmt; struct ieee80211_rate *txrate; /* TODO: get MCS */ int bitrate = 100; u64 ts; mgmt = (struct ieee80211_mgmt *)skb->data; txrate = ieee80211_get_tx_rate(hw, txi); if (txrate) bitrate = txrate->bitrate; ts = mac80211_hwsim_get_tsf_raw(); mgmt->u.probe_resp.timestamp = cpu_to_le64(ts + data->tsf_offset + 24 * 8 * 10 / bitrate); } mac80211_hwsim_monitor_rx(hw, skb, channel); /* wmediumd mode check */ _portid = READ_ONCE(data->wmediumd); if (_portid || hwsim_virtio_enabled) return mac80211_hwsim_tx_frame_nl(hw, skb, _portid, channel); /* NO wmediumd detected, perfect medium simulation */ data->tx_pkts++; data->tx_bytes += skb->len; ack = mac80211_hwsim_tx_frame_no_nl(hw, skb, channel); if (ack && skb->len >= 16) mac80211_hwsim_monitor_ack(channel, hdr->addr2); ieee80211_tx_info_clear_status(txi); /* frame was transmitted at most favorable rate at first attempt */ txi->control.rates[0].count = 1; txi->control.rates[1].idx = -1; if (!(txi->flags & IEEE80211_TX_CTL_NO_ACK) && ack) txi->flags |= IEEE80211_TX_STAT_ACK; ieee80211_tx_status_irqsafe(hw, skb); } static int mac80211_hwsim_start(struct ieee80211_hw *hw) { struct mac80211_hwsim_data *data = hw->priv; wiphy_dbg(hw->wiphy, "%s\n", __func__); data->started = true; return 0; } static void mac80211_hwsim_stop(struct ieee80211_hw *hw) { struct mac80211_hwsim_data *data = hw->priv; int i; data->started = false; for (i = 0; i < ARRAY_SIZE(data->link_data); i++) hrtimer_cancel(&data->link_data[i].beacon_timer); while (!skb_queue_empty(&data->pending)) ieee80211_free_txskb(hw, skb_dequeue(&data->pending)); wiphy_dbg(hw->wiphy, "%s\n", __func__); } static int mac80211_hwsim_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { wiphy_dbg(hw->wiphy, "%s (type=%d mac_addr=%pM)\n", __func__, ieee80211_vif_type_p2p(vif), vif->addr); hwsim_set_magic(vif); if (vif->type != NL80211_IFTYPE_MONITOR) mac80211_hwsim_config_mac_nl(hw, vif->addr, true); vif->cab_queue = 0; vif->hw_queue[IEEE80211_AC_VO] = 0; vif->hw_queue[IEEE80211_AC_VI] = 1; vif->hw_queue[IEEE80211_AC_BE] = 2; vif->hw_queue[IEEE80211_AC_BK] = 3; return 0; } static int mac80211_hwsim_change_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum nl80211_iftype newtype, bool newp2p) { newtype = ieee80211_iftype_p2p(newtype, newp2p); wiphy_dbg(hw->wiphy, "%s (old type=%d, new type=%d, mac_addr=%pM)\n", __func__, ieee80211_vif_type_p2p(vif), newtype, vif->addr); hwsim_check_magic(vif); /* * interface may change from non-AP to AP in * which case this needs to be set up again */ vif->cab_queue = 0; return 0; } static void mac80211_hwsim_remove_interface( struct ieee80211_hw *hw, struct ieee80211_vif *vif) { wiphy_dbg(hw->wiphy, "%s (type=%d mac_addr=%pM)\n", __func__, ieee80211_vif_type_p2p(vif), vif->addr); hwsim_check_magic(vif); hwsim_clear_magic(vif); if (vif->type != NL80211_IFTYPE_MONITOR) mac80211_hwsim_config_mac_nl(hw, vif->addr, false); } static void mac80211_hwsim_tx_frame(struct ieee80211_hw *hw, struct sk_buff *skb, struct ieee80211_channel *chan) { struct mac80211_hwsim_data *data = hw->priv; u32 _portid = READ_ONCE(data->wmediumd); if (ieee80211_hw_check(hw, SUPPORTS_RC_TABLE)) { struct ieee80211_tx_info *txi = IEEE80211_SKB_CB(skb); ieee80211_get_tx_rates(txi->control.vif, NULL, skb, txi->control.rates, ARRAY_SIZE(txi->control.rates)); } mac80211_hwsim_monitor_rx(hw, skb, chan); if (_portid || hwsim_virtio_enabled) return mac80211_hwsim_tx_frame_nl(hw, skb, _portid, chan); data->tx_pkts++; data->tx_bytes += skb->len; mac80211_hwsim_tx_frame_no_nl(hw, skb, chan); dev_kfree_skb(skb); } static void __mac80211_hwsim_beacon_tx(struct ieee80211_bss_conf *link_conf, struct mac80211_hwsim_data *data, struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct sk_buff *skb) { struct ieee80211_tx_info *info; struct ieee80211_rate *txrate; struct ieee80211_mgmt *mgmt; /* TODO: get MCS */ int bitrate = 100; info = IEEE80211_SKB_CB(skb); if (ieee80211_hw_check(hw, SUPPORTS_RC_TABLE)) ieee80211_get_tx_rates(vif, NULL, skb, info->control.rates, ARRAY_SIZE(info->control.rates)); txrate = ieee80211_get_tx_rate(hw, info); if (txrate) bitrate = txrate->bitrate; mgmt = (struct ieee80211_mgmt *) skb->data; /* fake header transmission time */ data->abs_bcn_ts = mac80211_hwsim_get_tsf_raw(); if (ieee80211_is_s1g_beacon(mgmt->frame_control)) { struct ieee80211_ext *ext = (void *) mgmt; ext->u.s1g_beacon.timestamp = cpu_to_le32(data->abs_bcn_ts + data->tsf_offset + 10 * 8 * 10 / bitrate); } else { mgmt->u.beacon.timestamp = cpu_to_le64(data->abs_bcn_ts + data->tsf_offset + 24 * 8 * 10 / bitrate); } mac80211_hwsim_tx_frame(hw, skb, rcu_dereference(link_conf->chanctx_conf)->def.chan); } static void mac80211_hwsim_beacon_tx(void *arg, u8 *mac, struct ieee80211_vif *vif) { struct mac80211_hwsim_link_data *link_data = arg; u32 link_id = link_data->link_id; struct ieee80211_bss_conf *link_conf; struct mac80211_hwsim_data *data = container_of(link_data, struct mac80211_hwsim_data, link_data[link_id]); struct ieee80211_hw *hw = data->hw; struct sk_buff *skb; hwsim_check_magic(vif); link_conf = rcu_dereference(vif->link_conf[link_id]); if (!link_conf) return; if (vif->type != NL80211_IFTYPE_AP && vif->type != NL80211_IFTYPE_MESH_POINT && vif->type != NL80211_IFTYPE_ADHOC && vif->type != NL80211_IFTYPE_OCB) return; if (vif->mbssid_tx_vif && vif->mbssid_tx_vif != vif) return; if (vif->bss_conf.ema_ap) { struct ieee80211_ema_beacons *ema; u8 i = 0; ema = ieee80211_beacon_get_template_ema_list(hw, vif, link_id); if (!ema || !ema->cnt) return; for (i = 0; i < ema->cnt; i++) { __mac80211_hwsim_beacon_tx(link_conf, data, hw, vif, ema->bcn[i].skb); ema->bcn[i].skb = NULL; /* Already freed */ } ieee80211_beacon_free_ema_list(ema); } else { skb = ieee80211_beacon_get(hw, vif, link_id); if (!skb) return; __mac80211_hwsim_beacon_tx(link_conf, data, hw, vif, skb); } while ((skb = ieee80211_get_buffered_bc(hw, vif)) != NULL) { mac80211_hwsim_tx_frame(hw, skb, rcu_dereference(link_conf->chanctx_conf)->def.chan); } if (link_conf->csa_active && ieee80211_beacon_cntdwn_is_complete(vif)) ieee80211_csa_finish(vif); } static enum hrtimer_restart mac80211_hwsim_beacon(struct hrtimer *timer) { struct mac80211_hwsim_link_data *link_data = container_of(timer, struct mac80211_hwsim_link_data, beacon_timer); struct mac80211_hwsim_data *data = container_of(link_data, struct mac80211_hwsim_data, link_data[link_data->link_id]); struct ieee80211_hw *hw = data->hw; u64 bcn_int = link_data->beacon_int; if (!data->started) return HRTIMER_NORESTART; ieee80211_iterate_active_interfaces_atomic( hw, IEEE80211_IFACE_ITER_NORMAL, mac80211_hwsim_beacon_tx, link_data); /* beacon at new TBTT + beacon interval */ if (data->bcn_delta) { bcn_int -= data->bcn_delta; data->bcn_delta = 0; } hrtimer_forward_now(&link_data->beacon_timer, ns_to_ktime(bcn_int * NSEC_PER_USEC)); return HRTIMER_RESTART; } static const char * const hwsim_chanwidths[] = { [NL80211_CHAN_WIDTH_5] = "ht5", [NL80211_CHAN_WIDTH_10] = "ht10", [NL80211_CHAN_WIDTH_20_NOHT] = "noht", [NL80211_CHAN_WIDTH_20] = "ht20", [NL80211_CHAN_WIDTH_40] = "ht40", [NL80211_CHAN_WIDTH_80] = "vht80", [NL80211_CHAN_WIDTH_80P80] = "vht80p80", [NL80211_CHAN_WIDTH_160] = "vht160", [NL80211_CHAN_WIDTH_1] = "1MHz", [NL80211_CHAN_WIDTH_2] = "2MHz", [NL80211_CHAN_WIDTH_4] = "4MHz", [NL80211_CHAN_WIDTH_8] = "8MHz", [NL80211_CHAN_WIDTH_16] = "16MHz", }; static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed) { struct mac80211_hwsim_data *data = hw->priv; struct ieee80211_conf *conf = &hw->conf; static const char *smps_modes[IEEE80211_SMPS_NUM_MODES] = { [IEEE80211_SMPS_AUTOMATIC] = "auto", [IEEE80211_SMPS_OFF] = "off", [IEEE80211_SMPS_STATIC] = "static", [IEEE80211_SMPS_DYNAMIC] = "dynamic", }; int idx; if (conf->chandef.chan) wiphy_dbg(hw->wiphy, "%s (freq=%d(%d - %d)/%s idle=%d ps=%d smps=%s)\n", __func__, conf->chandef.chan->center_freq, conf->chandef.center_freq1, conf->chandef.center_freq2, hwsim_chanwidths[conf->chandef.width], !!(conf->flags & IEEE80211_CONF_IDLE), !!(conf->flags & IEEE80211_CONF_PS), smps_modes[conf->smps_mode]); else wiphy_dbg(hw->wiphy, "%s (freq=0 idle=%d ps=%d smps=%s)\n", __func__, !!(conf->flags & IEEE80211_CONF_IDLE), !!(conf->flags & IEEE80211_CONF_PS), smps_modes[conf->smps_mode]); data->idle = !!(conf->flags & IEEE80211_CONF_IDLE); WARN_ON(conf->chandef.chan && data->use_chanctx); mutex_lock(&data->mutex); if (data->scanning && conf->chandef.chan) { for (idx = 0; idx < ARRAY_SIZE(data->survey_data); idx++) { if (data->survey_data[idx].channel == data->channel) { data->survey_data[idx].start = data->survey_data[idx].next_start; data->survey_data[idx].end = jiffies; break; } } data->channel = conf->chandef.chan; data->bw = conf->chandef.width; for (idx = 0; idx < ARRAY_SIZE(data->survey_data); idx++) { if (data->survey_data[idx].channel && data->survey_data[idx].channel != data->channel) continue; data->survey_data[idx].channel = data->channel; data->survey_data[idx].next_start = jiffies; break; } } else { data->channel = conf->chandef.chan; data->bw = conf->chandef.width; } mutex_unlock(&data->mutex); for (idx = 0; idx < ARRAY_SIZE(data->link_data); idx++) { struct mac80211_hwsim_link_data *link_data = &data->link_data[idx]; if (!data->started || !link_data->beacon_int) { hrtimer_cancel(&link_data->beacon_timer); } else if (!hrtimer_is_queued(&link_data->beacon_timer)) { u64 tsf = mac80211_hwsim_get_tsf(hw, NULL); u32 bcn_int = link_data->beacon_int; u64 until_tbtt = bcn_int - do_div(tsf, bcn_int); hrtimer_start(&link_data->beacon_timer, ns_to_ktime(until_tbtt * NSEC_PER_USEC), HRTIMER_MODE_REL_SOFT); } } return 0; } static void mac80211_hwsim_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, unsigned int *total_flags,u64 multicast) { struct mac80211_hwsim_data *data = hw->priv; wiphy_dbg(hw->wiphy, "%s\n", __func__); data->rx_filter = 0; if (*total_flags & FIF_ALLMULTI) data->rx_filter |= FIF_ALLMULTI; if (*total_flags & FIF_MCAST_ACTION) data->rx_filter |= FIF_MCAST_ACTION; *total_flags = data->rx_filter; } static void mac80211_hwsim_bcn_en_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { unsigned int *count = data; struct hwsim_vif_priv *vp = (void *)vif->drv_priv; if (vp->bcn_en) (*count)++; } static void mac80211_hwsim_vif_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u64 changed) { struct hwsim_vif_priv *vp = (void *)vif->drv_priv; hwsim_check_magic(vif); wiphy_dbg(hw->wiphy, "%s(changed=0x%llx vif->addr=%pM)\n", __func__, changed, vif->addr); if (changed & BSS_CHANGED_ASSOC) { wiphy_dbg(hw->wiphy, " ASSOC: assoc=%d aid=%d\n", vif->cfg.assoc, vif->cfg.aid); vp->assoc = vif->cfg.assoc; vp->aid = vif->cfg.aid; } } static void mac80211_hwsim_link_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *info, u64 changed) { struct hwsim_vif_priv *vp = (void *)vif->drv_priv; struct mac80211_hwsim_data *data = hw->priv; unsigned int link_id = info->link_id; struct mac80211_hwsim_link_data *link_data = &data->link_data[link_id]; hwsim_check_magic(vif); wiphy_dbg(hw->wiphy, "%s(changed=0x%llx vif->addr=%pM, link id %u)\n", __func__, (unsigned long long)changed, vif->addr, link_id); if (changed & BSS_CHANGED_BSSID) { wiphy_dbg(hw->wiphy, "%s: BSSID changed: %pM\n", __func__, info->bssid); memcpy(vp->bssid, info->bssid, ETH_ALEN); } if (changed & BSS_CHANGED_BEACON_ENABLED) { wiphy_dbg(hw->wiphy, " BCN EN: %d (BI=%u)\n", info->enable_beacon, info->beacon_int); vp->bcn_en = info->enable_beacon; if (data->started && !hrtimer_is_queued(&link_data->beacon_timer) && info->enable_beacon) { u64 tsf, until_tbtt; u32 bcn_int; link_data->beacon_int = info->beacon_int * 1024; tsf = mac80211_hwsim_get_tsf(hw, vif); bcn_int = link_data->beacon_int; until_tbtt = bcn_int - do_div(tsf, bcn_int); hrtimer_start(&link_data->beacon_timer, ns_to_ktime(until_tbtt * NSEC_PER_USEC), HRTIMER_MODE_REL_SOFT); } else if (!info->enable_beacon) { unsigned int count = 0; ieee80211_iterate_active_interfaces_atomic( data->hw, IEEE80211_IFACE_ITER_NORMAL, mac80211_hwsim_bcn_en_iter, &count); wiphy_dbg(hw->wiphy, " beaconing vifs remaining: %u", count); if (count == 0) { hrtimer_cancel(&link_data->beacon_timer); link_data->beacon_int = 0; } } } if (changed & BSS_CHANGED_ERP_CTS_PROT) { wiphy_dbg(hw->wiphy, " ERP_CTS_PROT: %d\n", info->use_cts_prot); } if (changed & BSS_CHANGED_ERP_PREAMBLE) { wiphy_dbg(hw->wiphy, " ERP_PREAMBLE: %d\n", info->use_short_preamble); } if (changed & BSS_CHANGED_ERP_SLOT) { wiphy_dbg(hw->wiphy, " ERP_SLOT: %d\n", info->use_short_slot); } if (changed & BSS_CHANGED_HT) { wiphy_dbg(hw->wiphy, " HT: op_mode=0x%x\n", info->ht_operation_mode); } if (changed & BSS_CHANGED_BASIC_RATES) { wiphy_dbg(hw->wiphy, " BASIC_RATES: 0x%llx\n", (unsigned long long) info->basic_rates); } if (changed & BSS_CHANGED_TXPOWER) wiphy_dbg(hw->wiphy, " TX Power: %d dBm\n", info->txpower); } static void mac80211_hwsim_sta_rc_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u32 changed) { struct mac80211_hwsim_data *data = hw->priv; u32 bw = U32_MAX; int link_id; rcu_read_lock(); for (link_id = 0; link_id < ARRAY_SIZE(vif->link_conf); link_id++) { enum nl80211_chan_width confbw = NL80211_CHAN_WIDTH_20_NOHT; struct ieee80211_bss_conf *vif_conf; struct ieee80211_link_sta *link_sta; link_sta = rcu_dereference(sta->link[link_id]); if (!link_sta) continue; switch (link_sta->bandwidth) { #define C(_bw) case IEEE80211_STA_RX_BW_##_bw: bw = _bw; break C(20); C(40); C(80); C(160); C(320); #undef C } if (!data->use_chanctx) { confbw = data->bw; } else { struct ieee80211_chanctx_conf *chanctx_conf; vif_conf = rcu_dereference(vif->link_conf[link_id]); if (WARN_ON(!vif_conf)) continue; chanctx_conf = rcu_dereference(vif_conf->chanctx_conf); if (!WARN_ON(!chanctx_conf)) confbw = chanctx_conf->def.width; } WARN(bw > hwsim_get_chanwidth(confbw), "intf %pM [link=%d]: bad STA %pM bandwidth %d MHz (%d) > channel config %d MHz (%d)\n", vif->addr, link_id, sta->addr, bw, sta->deflink.bandwidth, hwsim_get_chanwidth(data->bw), data->bw); } rcu_read_unlock(); } static int mac80211_hwsim_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct hwsim_sta_priv *sp = (void *)sta->drv_priv; hwsim_check_magic(vif); hwsim_set_sta_magic(sta); mac80211_hwsim_sta_rc_update(hw, vif, sta, 0); if (sta->valid_links) { WARN(hweight16(sta->valid_links) > 1, "expect to add STA with single link, have 0x%x\n", sta->valid_links); sp->active_links_rx = sta->valid_links; } return 0; } static int mac80211_hwsim_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { hwsim_check_magic(vif); hwsim_clear_sta_magic(sta); return 0; } static int mac80211_hwsim_sta_state(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, enum ieee80211_sta_state old_state, enum ieee80211_sta_state new_state) { if (new_state == IEEE80211_STA_NOTEXIST) return mac80211_hwsim_sta_remove(hw, vif, sta); if (old_state == IEEE80211_STA_NOTEXIST) return mac80211_hwsim_sta_add(hw, vif, sta); /* * when client is authorized (AP station marked as such), * enable all links */ if (vif->type == NL80211_IFTYPE_STATION && new_state == IEEE80211_STA_AUTHORIZED && !sta->tdls) ieee80211_set_active_links_async(vif, ieee80211_vif_usable_links(vif)); return 0; } static void mac80211_hwsim_sta_notify(struct ieee80211_hw *hw, struct ieee80211_vif *vif, enum sta_notify_cmd cmd, struct ieee80211_sta *sta) { hwsim_check_magic(vif); switch (cmd) { case STA_NOTIFY_SLEEP: case STA_NOTIFY_AWAKE: /* TODO: make good use of these flags */ break; default: WARN(1, "Invalid sta notify: %d\n", cmd); break; } } static int mac80211_hwsim_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set) { hwsim_check_sta_magic(sta); return 0; } static int mac80211_hwsim_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, unsigned int link_id, u16 queue, const struct ieee80211_tx_queue_params *params) { wiphy_dbg(hw->wiphy, "%s (queue=%d txop=%d cw_min=%d cw_max=%d aifs=%d)\n", __func__, queue, params->txop, params->cw_min, params->cw_max, params->aifs); return 0; } static int mac80211_hwsim_get_survey(struct ieee80211_hw *hw, int idx, struct survey_info *survey) { struct mac80211_hwsim_data *hwsim = hw->priv; if (idx < 0 || idx >= ARRAY_SIZE(hwsim->survey_data)) return -ENOENT; mutex_lock(&hwsim->mutex); survey->channel = hwsim->survey_data[idx].channel; if (!survey->channel) { mutex_unlock(&hwsim->mutex); return -ENOENT; } /* * Magically conjured dummy values --- this is only ok for simulated hardware. * * A real driver which cannot determine real values noise MUST NOT * report any, especially not a magically conjured ones :-) */ survey->filled = SURVEY_INFO_NOISE_DBM | SURVEY_INFO_TIME | SURVEY_INFO_TIME_BUSY; survey->noise = -92; survey->time = jiffies_to_msecs(hwsim->survey_data[idx].end - hwsim->survey_data[idx].start); /* report 12.5% of channel time is used */ survey->time_busy = survey->time/8; mutex_unlock(&hwsim->mutex); return 0; } #ifdef CONFIG_NL80211_TESTMODE /* * This section contains example code for using netlink * attributes with the testmode command in nl80211. */ /* These enums need to be kept in sync with userspace */ enum hwsim_testmode_attr { __HWSIM_TM_ATTR_INVALID = 0, HWSIM_TM_ATTR_CMD = 1, HWSIM_TM_ATTR_PS = 2, /* keep last */ __HWSIM_TM_ATTR_AFTER_LAST, HWSIM_TM_ATTR_MAX = __HWSIM_TM_ATTR_AFTER_LAST - 1 }; enum hwsim_testmode_cmd { HWSIM_TM_CMD_SET_PS = 0, HWSIM_TM_CMD_GET_PS = 1, HWSIM_TM_CMD_STOP_QUEUES = 2, HWSIM_TM_CMD_WAKE_QUEUES = 3, }; static const struct nla_policy hwsim_testmode_policy[HWSIM_TM_ATTR_MAX + 1] = { [HWSIM_TM_ATTR_CMD] = { .type = NLA_U32 }, [HWSIM_TM_ATTR_PS] = { .type = NLA_U32 }, }; static int mac80211_hwsim_testmode_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif, void *data, int len) { struct mac80211_hwsim_data *hwsim = hw->priv; struct nlattr *tb[HWSIM_TM_ATTR_MAX + 1]; struct sk_buff *skb; int err, ps; err = nla_parse_deprecated(tb, HWSIM_TM_ATTR_MAX, data, len, hwsim_testmode_policy, NULL); if (err) return err; if (!tb[HWSIM_TM_ATTR_CMD]) return -EINVAL; switch (nla_get_u32(tb[HWSIM_TM_ATTR_CMD])) { case HWSIM_TM_CMD_SET_PS: if (!tb[HWSIM_TM_ATTR_PS]) return -EINVAL; ps = nla_get_u32(tb[HWSIM_TM_ATTR_PS]); return hwsim_fops_ps_write(hwsim, ps); case HWSIM_TM_CMD_GET_PS: skb = cfg80211_testmode_alloc_reply_skb(hw->wiphy, nla_total_size(sizeof(u32))); if (!skb) return -ENOMEM; if (nla_put_u32(skb, HWSIM_TM_ATTR_PS, hwsim->ps)) goto nla_put_failure; return cfg80211_testmode_reply(skb); case HWSIM_TM_CMD_STOP_QUEUES: ieee80211_stop_queues(hw); return 0; case HWSIM_TM_CMD_WAKE_QUEUES: ieee80211_wake_queues(hw); return 0; default: return -EOPNOTSUPP; } nla_put_failure: kfree_skb(skb); return -ENOBUFS; } #endif static int mac80211_hwsim_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_ampdu_params *params) { struct ieee80211_sta *sta = params->sta; enum ieee80211_ampdu_mlme_action action = params->action; u16 tid = params->tid; switch (action) { case IEEE80211_AMPDU_TX_START: return IEEE80211_AMPDU_TX_START_IMMEDIATE; case IEEE80211_AMPDU_TX_STOP_CONT: case IEEE80211_AMPDU_TX_STOP_FLUSH: case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); break; case IEEE80211_AMPDU_TX_OPERATIONAL: break; case IEEE80211_AMPDU_RX_START: case IEEE80211_AMPDU_RX_STOP: break; default: return -EOPNOTSUPP; } return 0; } static void mac80211_hwsim_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u32 queues, bool drop) { /* Not implemented, queues only on kernel side */ } static void hw_scan_work(struct work_struct *work) { struct mac80211_hwsim_data *hwsim = container_of(work, struct mac80211_hwsim_data, hw_scan.work); struct cfg80211_scan_request *req = hwsim->hw_scan_request; int dwell, i; mutex_lock(&hwsim->mutex); if (hwsim->scan_chan_idx >= req->n_channels) { struct cfg80211_scan_info info = { .aborted = false, }; wiphy_dbg(hwsim->hw->wiphy, "hw scan complete\n"); ieee80211_scan_completed(hwsim->hw, &info); hwsim->hw_scan_request = NULL; hwsim->hw_scan_vif = NULL; hwsim->tmp_chan = NULL; mutex_unlock(&hwsim->mutex); mac80211_hwsim_config_mac_nl(hwsim->hw, hwsim->scan_addr, false); return; } wiphy_dbg(hwsim->hw->wiphy, "hw scan %d MHz\n", req->channels[hwsim->scan_chan_idx]->center_freq); hwsim->tmp_chan = req->channels[hwsim->scan_chan_idx]; if (hwsim->tmp_chan->flags & (IEEE80211_CHAN_NO_IR | IEEE80211_CHAN_RADAR) || !req->n_ssids) { dwell = 120; } else { dwell = 30; /* send probes */ for (i = 0; i < req->n_ssids; i++) { struct sk_buff *probe; struct ieee80211_mgmt *mgmt; probe = ieee80211_probereq_get(hwsim->hw, hwsim->scan_addr, req->ssids[i].ssid, req->ssids[i].ssid_len, req->ie_len); if (!probe) continue; mgmt = (struct ieee80211_mgmt *) probe->data; memcpy(mgmt->da, req->bssid, ETH_ALEN); memcpy(mgmt->bssid, req->bssid, ETH_ALEN); if (req->ie_len) skb_put_data(probe, req->ie, req->ie_len); rcu_read_lock(); if (!ieee80211_tx_prepare_skb(hwsim->hw, hwsim->hw_scan_vif, probe, hwsim->tmp_chan->band, NULL)) { rcu_read_unlock(); kfree_skb(probe); continue; } local_bh_disable(); mac80211_hwsim_tx_frame(hwsim->hw, probe, hwsim->tmp_chan); rcu_read_unlock(); local_bh_enable(); } } ieee80211_queue_delayed_work(hwsim->hw, &hwsim->hw_scan, msecs_to_jiffies(dwell)); hwsim->survey_data[hwsim->scan_chan_idx].channel = hwsim->tmp_chan; hwsim->survey_data[hwsim->scan_chan_idx].start = jiffies; hwsim->survey_data[hwsim->scan_chan_idx].end = jiffies + msecs_to_jiffies(dwell); hwsim->scan_chan_idx++; mutex_unlock(&hwsim->mutex); } static int mac80211_hwsim_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_scan_request *hw_req) { struct mac80211_hwsim_data *hwsim = hw->priv; struct cfg80211_scan_request *req = &hw_req->req; mutex_lock(&hwsim->mutex); if (WARN_ON(hwsim->tmp_chan || hwsim->hw_scan_request)) { mutex_unlock(&hwsim->mutex); return -EBUSY; } hwsim->hw_scan_request = req; hwsim->hw_scan_vif = vif; hwsim->scan_chan_idx = 0; if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) get_random_mask_addr(hwsim->scan_addr, hw_req->req.mac_addr, hw_req->req.mac_addr_mask); else memcpy(hwsim->scan_addr, vif->addr, ETH_ALEN); memset(hwsim->survey_data, 0, sizeof(hwsim->survey_data)); mutex_unlock(&hwsim->mutex); mac80211_hwsim_config_mac_nl(hw, hwsim->scan_addr, true); wiphy_dbg(hw->wiphy, "hwsim hw_scan request\n"); ieee80211_queue_delayed_work(hwsim->hw, &hwsim->hw_scan, 0); return 0; } static void mac80211_hwsim_cancel_hw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct mac80211_hwsim_data *hwsim = hw->priv; struct cfg80211_scan_info info = { .aborted = true, }; wiphy_dbg(hw->wiphy, "hwsim cancel_hw_scan\n"); cancel_delayed_work_sync(&hwsim->hw_scan); mutex_lock(&hwsim->mutex); ieee80211_scan_completed(hwsim->hw, &info); hwsim->tmp_chan = NULL; hwsim->hw_scan_request = NULL; hwsim->hw_scan_vif = NULL; mutex_unlock(&hwsim->mutex); } static void mac80211_hwsim_sw_scan(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const u8 *mac_addr) { struct mac80211_hwsim_data *hwsim = hw->priv; mutex_lock(&hwsim->mutex); if (hwsim->scanning) { pr_debug("two hwsim sw_scans detected!\n"); goto out; } pr_debug("hwsim sw_scan request, prepping stuff\n"); memcpy(hwsim->scan_addr, mac_addr, ETH_ALEN); mac80211_hwsim_config_mac_nl(hw, hwsim->scan_addr, true); hwsim->scanning = true; memset(hwsim->survey_data, 0, sizeof(hwsim->survey_data)); out: mutex_unlock(&hwsim->mutex); } static void mac80211_hwsim_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct mac80211_hwsim_data *hwsim = hw->priv; mutex_lock(&hwsim->mutex); pr_debug("hwsim sw_scan_complete\n"); hwsim->scanning = false; mac80211_hwsim_config_mac_nl(hw, hwsim->scan_addr, false); eth_zero_addr(hwsim->scan_addr); mutex_unlock(&hwsim->mutex); } static void hw_roc_start(struct work_struct *work) { struct mac80211_hwsim_data *hwsim = container_of(work, struct mac80211_hwsim_data, roc_start.work); mutex_lock(&hwsim->mutex); wiphy_dbg(hwsim->hw->wiphy, "hwsim ROC begins\n"); hwsim->tmp_chan = hwsim->roc_chan; ieee80211_ready_on_channel(hwsim->hw); ieee80211_queue_delayed_work(hwsim->hw, &hwsim->roc_done, msecs_to_jiffies(hwsim->roc_duration)); mutex_unlock(&hwsim->mutex); } static void hw_roc_done(struct work_struct *work) { struct mac80211_hwsim_data *hwsim = container_of(work, struct mac80211_hwsim_data, roc_done.work); mutex_lock(&hwsim->mutex); ieee80211_remain_on_channel_expired(hwsim->hw); hwsim->tmp_chan = NULL; mutex_unlock(&hwsim->mutex); wiphy_dbg(hwsim->hw->wiphy, "hwsim ROC expired\n"); } static int mac80211_hwsim_roc(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_channel *chan, int duration, enum ieee80211_roc_type type) { struct mac80211_hwsim_data *hwsim = hw->priv; mutex_lock(&hwsim->mutex); if (WARN_ON(hwsim->tmp_chan || hwsim->hw_scan_request)) { mutex_unlock(&hwsim->mutex); return -EBUSY; } hwsim->roc_chan = chan; hwsim->roc_duration = duration; mutex_unlock(&hwsim->mutex); wiphy_dbg(hw->wiphy, "hwsim ROC (%d MHz, %d ms)\n", chan->center_freq, duration); ieee80211_queue_delayed_work(hw, &hwsim->roc_start, HZ/50); return 0; } static int mac80211_hwsim_croc(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct mac80211_hwsim_data *hwsim = hw->priv; cancel_delayed_work_sync(&hwsim->roc_start); cancel_delayed_work_sync(&hwsim->roc_done); mutex_lock(&hwsim->mutex); hwsim->tmp_chan = NULL; mutex_unlock(&hwsim->mutex); wiphy_dbg(hw->wiphy, "hwsim ROC canceled\n"); return 0; } static int mac80211_hwsim_add_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *ctx) { hwsim_set_chanctx_magic(ctx); wiphy_dbg(hw->wiphy, "add channel context control: %d MHz/width: %d/cfreqs:%d/%d MHz\n", ctx->def.chan->center_freq, ctx->def.width, ctx->def.center_freq1, ctx->def.center_freq2); return 0; } static void mac80211_hwsim_remove_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *ctx) { wiphy_dbg(hw->wiphy, "remove channel context control: %d MHz/width: %d/cfreqs:%d/%d MHz\n", ctx->def.chan->center_freq, ctx->def.width, ctx->def.center_freq1, ctx->def.center_freq2); hwsim_check_chanctx_magic(ctx); hwsim_clear_chanctx_magic(ctx); } static void mac80211_hwsim_change_chanctx(struct ieee80211_hw *hw, struct ieee80211_chanctx_conf *ctx, u32 changed) { hwsim_check_chanctx_magic(ctx); wiphy_dbg(hw->wiphy, "change channel context control: %d MHz/width: %d/cfreqs:%d/%d MHz\n", ctx->def.chan->center_freq, ctx->def.width, ctx->def.center_freq1, ctx->def.center_freq2); } static int mac80211_hwsim_assign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *link_conf, struct ieee80211_chanctx_conf *ctx) { hwsim_check_magic(vif); hwsim_check_chanctx_magic(ctx); /* if we activate a link while already associated wake it up */ if (vif->type == NL80211_IFTYPE_STATION && vif->cfg.assoc) { struct sk_buff *skb; skb = ieee80211_nullfunc_get(hw, vif, link_conf->link_id, true); if (skb) { local_bh_disable(); mac80211_hwsim_tx_frame(hw, skb, ctx->def.chan); local_bh_enable(); } } return 0; } static void mac80211_hwsim_unassign_vif_chanctx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *link_conf, struct ieee80211_chanctx_conf *ctx) { hwsim_check_magic(vif); hwsim_check_chanctx_magic(ctx); /* if we deactivate a link while associated suspend it first */ if (vif->type == NL80211_IFTYPE_STATION && vif->cfg.assoc) { struct sk_buff *skb; skb = ieee80211_nullfunc_get(hw, vif, link_conf->link_id, true); if (skb) { struct ieee80211_hdr *hdr = (void *)skb->data; hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM); local_bh_disable(); mac80211_hwsim_tx_frame(hw, skb, ctx->def.chan); local_bh_enable(); } } } static const char mac80211_hwsim_gstrings_stats[][ETH_GSTRING_LEN] = { "tx_pkts_nic", "tx_bytes_nic", "rx_pkts_nic", "rx_bytes_nic", "d_tx_dropped", "d_tx_failed", "d_ps_mode", "d_group", }; #define MAC80211_HWSIM_SSTATS_LEN ARRAY_SIZE(mac80211_hwsim_gstrings_stats) static void mac80211_hwsim_get_et_strings(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u32 sset, u8 *data) { if (sset == ETH_SS_STATS) memcpy(data, *mac80211_hwsim_gstrings_stats, sizeof(mac80211_hwsim_gstrings_stats)); } static int mac80211_hwsim_get_et_sset_count(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int sset) { if (sset == ETH_SS_STATS) return MAC80211_HWSIM_SSTATS_LEN; return 0; } static void mac80211_hwsim_get_et_stats(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ethtool_stats *stats, u64 *data) { struct mac80211_hwsim_data *ar = hw->priv; int i = 0; data[i++] = ar->tx_pkts; data[i++] = ar->tx_bytes; data[i++] = ar->rx_pkts; data[i++] = ar->rx_bytes; data[i++] = ar->tx_dropped; data[i++] = ar->tx_failed; data[i++] = ar->ps; data[i++] = ar->group; WARN_ON(i != MAC80211_HWSIM_SSTATS_LEN); } static int mac80211_hwsim_tx_last_beacon(struct ieee80211_hw *hw) { return 1; } static int mac80211_hwsim_set_rts_threshold(struct ieee80211_hw *hw, u32 value) { return -EOPNOTSUPP; } static int mac80211_hwsim_change_vif_links(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u16 old_links, u16 new_links, struct ieee80211_bss_conf *old[IEEE80211_MLD_MAX_NUM_LINKS]) { unsigned long rem = old_links & ~new_links; unsigned long add = new_links & ~old_links; int i; if (!old_links) rem |= BIT(0); if (!new_links) add |= BIT(0); for_each_set_bit(i, &rem, IEEE80211_MLD_MAX_NUM_LINKS) mac80211_hwsim_config_mac_nl(hw, old[i]->addr, false); for_each_set_bit(i, &add, IEEE80211_MLD_MAX_NUM_LINKS) { struct ieee80211_bss_conf *link_conf; link_conf = link_conf_dereference_protected(vif, i); if (WARN_ON(!link_conf)) continue; mac80211_hwsim_config_mac_nl(hw, link_conf->addr, true); } return 0; } static int mac80211_hwsim_change_sta_links(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 old_links, u16 new_links) { struct hwsim_sta_priv *sp = (void *)sta->drv_priv; hwsim_check_sta_magic(sta); if (vif->type == NL80211_IFTYPE_STATION) sp->active_links_rx = new_links; return 0; } static int mac80211_hwsim_send_pmsr_ftm_request_peer(struct sk_buff *msg, struct cfg80211_pmsr_ftm_request_peer *request) { struct nlattr *ftm; if (!request->requested) return -EINVAL; ftm = nla_nest_start(msg, NL80211_PMSR_TYPE_FTM); if (!ftm) return -ENOBUFS; if (nla_put_u32(msg, NL80211_PMSR_FTM_REQ_ATTR_PREAMBLE, request->preamble)) return -ENOBUFS; if (nla_put_u16(msg, NL80211_PMSR_FTM_REQ_ATTR_BURST_PERIOD, request->burst_period)) return -ENOBUFS; if (request->asap && nla_put_flag(msg, NL80211_PMSR_FTM_REQ_ATTR_ASAP)) return -ENOBUFS; if (request->request_lci && nla_put_flag(msg, NL80211_PMSR_FTM_REQ_ATTR_REQUEST_LCI)) return -ENOBUFS; if (request->request_civicloc && nla_put_flag(msg, NL80211_PMSR_FTM_REQ_ATTR_REQUEST_CIVICLOC)) return -ENOBUFS; if (request->trigger_based && nla_put_flag(msg, NL80211_PMSR_FTM_REQ_ATTR_TRIGGER_BASED)) return -ENOBUFS; if (request->non_trigger_based && nla_put_flag(msg, NL80211_PMSR_FTM_REQ_ATTR_NON_TRIGGER_BASED)) return -ENOBUFS; if (request->lmr_feedback && nla_put_flag(msg, NL80211_PMSR_FTM_REQ_ATTR_LMR_FEEDBACK)) return -ENOBUFS; if (nla_put_u8(msg, NL80211_PMSR_FTM_REQ_ATTR_NUM_BURSTS_EXP, request->num_bursts_exp)) return -ENOBUFS; if (nla_put_u8(msg, NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION, request->burst_duration)) return -ENOBUFS; if (nla_put_u8(msg, NL80211_PMSR_FTM_REQ_ATTR_FTMS_PER_BURST, request->ftms_per_burst)) return -ENOBUFS; if (nla_put_u8(msg, NL80211_PMSR_FTM_REQ_ATTR_NUM_FTMR_RETRIES, request->ftmr_retries)) return -ENOBUFS; if (nla_put_u8(msg, NL80211_PMSR_FTM_REQ_ATTR_BURST_DURATION, request->burst_duration)) return -ENOBUFS; if (nla_put_u8(msg, NL80211_PMSR_FTM_REQ_ATTR_BSS_COLOR, request->bss_color)) return -ENOBUFS; nla_nest_end(msg, ftm); return 0; } static int mac80211_hwsim_send_pmsr_request_peer(struct sk_buff *msg, struct cfg80211_pmsr_request_peer *request) { struct nlattr *peer, *chandef, *req, *data; int err; peer = nla_nest_start(msg, NL80211_PMSR_ATTR_PEERS); if (!peer) return -ENOBUFS; if (nla_put(msg, NL80211_PMSR_PEER_ATTR_ADDR, ETH_ALEN, request->addr)) return -ENOBUFS; chandef = nla_nest_start(msg, NL80211_PMSR_PEER_ATTR_CHAN); if (!chandef) return -ENOBUFS; err = nl80211_send_chandef(msg, &request->chandef); if (err) return err; nla_nest_end(msg, chandef); req = nla_nest_start(msg, NL80211_PMSR_PEER_ATTR_REQ); if (!req) return -ENOBUFS; if (request->report_ap_tsf && nla_put_flag(msg, NL80211_PMSR_REQ_ATTR_GET_AP_TSF)) return -ENOBUFS; data = nla_nest_start(msg, NL80211_PMSR_REQ_ATTR_DATA); if (!data) return -ENOBUFS; err = mac80211_hwsim_send_pmsr_ftm_request_peer(msg, &request->ftm); if (err) return err; nla_nest_end(msg, data); nla_nest_end(msg, req); nla_nest_end(msg, peer); return 0; } static int mac80211_hwsim_send_pmsr_request(struct sk_buff *msg, struct cfg80211_pmsr_request *request) { struct nlattr *pmsr; int err; pmsr = nla_nest_start(msg, NL80211_ATTR_PEER_MEASUREMENTS); if (!pmsr) return -ENOBUFS; if (nla_put_u32(msg, NL80211_ATTR_TIMEOUT, request->timeout)) return -ENOBUFS; if (!is_zero_ether_addr(request->mac_addr)) { if (nla_put(msg, NL80211_ATTR_MAC, ETH_ALEN, request->mac_addr)) return -ENOBUFS; if (nla_put(msg, NL80211_ATTR_MAC_MASK, ETH_ALEN, request->mac_addr_mask)) return -ENOBUFS; } for (int i = 0; i < request->n_peers; i++) { err = mac80211_hwsim_send_pmsr_request_peer(msg, &request->peers[i]); if (err) return err; } nla_nest_end(msg, pmsr); return 0; } static int mac80211_hwsim_start_pmsr(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct cfg80211_pmsr_request *request) { struct mac80211_hwsim_data *data; struct sk_buff *skb = NULL; struct nlattr *pmsr; void *msg_head; u32 _portid; int err = 0; data = hw->priv; _portid = READ_ONCE(data->wmediumd); if (!_portid && !hwsim_virtio_enabled) return -EOPNOTSUPP; mutex_lock(&data->mutex); if (data->pmsr_request) { err = -EBUSY; goto out_free; } skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!skb) { err = -ENOMEM; goto out_free; } msg_head = genlmsg_put(skb, 0, 0, &hwsim_genl_family, 0, HWSIM_CMD_START_PMSR); if (nla_put(skb, HWSIM_ATTR_ADDR_TRANSMITTER, ETH_ALEN, data->addresses[1].addr)) { err = -ENOMEM; goto out_free; } pmsr = nla_nest_start(skb, HWSIM_ATTR_PMSR_REQUEST); if (!pmsr) { err = -ENOMEM; goto out_free; } err = mac80211_hwsim_send_pmsr_request(skb, request); if (err) goto out_free; nla_nest_end(skb, pmsr); genlmsg_end(skb, msg_head); if (hwsim_virtio_enabled) hwsim_tx_virtio(data, skb); else hwsim_unicast_netgroup(data, skb, _portid); data->pmsr_request = request; data->pmsr_request_wdev = ieee80211_vif_to_wdev(vif); out_free: if (err && skb) nlmsg_free(skb); mutex_unlock(&data->mutex); return err; } static void mac80211_hwsim_abort_pmsr(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct cfg80211_pmsr_request *request) { struct mac80211_hwsim_data *data; struct sk_buff *skb = NULL; struct nlattr *pmsr; void *msg_head; u32 _portid; int err = 0; data = hw->priv; _portid = READ_ONCE(data->wmediumd); if (!_portid && !hwsim_virtio_enabled) return; mutex_lock(&data->mutex); if (data->pmsr_request != request) { err = -EINVAL; goto out; } skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!skb) { err = -ENOMEM; goto out; } msg_head = genlmsg_put(skb, 0, 0, &hwsim_genl_family, 0, HWSIM_CMD_ABORT_PMSR); if (nla_put(skb, HWSIM_ATTR_ADDR_TRANSMITTER, ETH_ALEN, data->addresses[1].addr)) goto out; pmsr = nla_nest_start(skb, HWSIM_ATTR_PMSR_REQUEST); if (!pmsr) { err = -ENOMEM; goto out; } err = mac80211_hwsim_send_pmsr_request(skb, request); if (err) goto out; err = nla_nest_end(skb, pmsr); if (err) goto out; genlmsg_end(skb, msg_head); if (hwsim_virtio_enabled) hwsim_tx_virtio(data, skb); else hwsim_unicast_netgroup(data, skb, _portid); out: if (err && skb) nlmsg_free(skb); mutex_unlock(&data->mutex); } static int mac80211_hwsim_parse_rate_info(struct nlattr *rateattr, struct rate_info *rate_info, struct genl_info *info) { struct nlattr *tb[HWSIM_RATE_INFO_ATTR_MAX + 1]; int ret; ret = nla_parse_nested(tb, HWSIM_RATE_INFO_ATTR_MAX, rateattr, hwsim_rate_info_policy, info->extack); if (ret) return ret; if (tb[HWSIM_RATE_INFO_ATTR_FLAGS]) rate_info->flags = nla_get_u8(tb[HWSIM_RATE_INFO_ATTR_FLAGS]); if (tb[HWSIM_RATE_INFO_ATTR_MCS]) rate_info->mcs = nla_get_u8(tb[HWSIM_RATE_INFO_ATTR_MCS]); if (tb[HWSIM_RATE_INFO_ATTR_LEGACY]) rate_info->legacy = nla_get_u16(tb[HWSIM_RATE_INFO_ATTR_LEGACY]); if (tb[HWSIM_RATE_INFO_ATTR_NSS]) rate_info->nss = nla_get_u8(tb[HWSIM_RATE_INFO_ATTR_NSS]); if (tb[HWSIM_RATE_INFO_ATTR_BW]) rate_info->bw = nla_get_u8(tb[HWSIM_RATE_INFO_ATTR_BW]); if (tb[HWSIM_RATE_INFO_ATTR_HE_GI]) rate_info->he_gi = nla_get_u8(tb[HWSIM_RATE_INFO_ATTR_HE_GI]); if (tb[HWSIM_RATE_INFO_ATTR_HE_DCM]) rate_info->he_dcm = nla_get_u8(tb[HWSIM_RATE_INFO_ATTR_HE_DCM]); if (tb[HWSIM_RATE_INFO_ATTR_HE_RU_ALLOC]) rate_info->he_ru_alloc = nla_get_u8(tb[HWSIM_RATE_INFO_ATTR_HE_RU_ALLOC]); if (tb[HWSIM_RATE_INFO_ATTR_N_BOUNDED_CH]) rate_info->n_bonded_ch = nla_get_u8(tb[HWSIM_RATE_INFO_ATTR_N_BOUNDED_CH]); if (tb[HWSIM_RATE_INFO_ATTR_EHT_GI]) rate_info->eht_gi = nla_get_u8(tb[HWSIM_RATE_INFO_ATTR_EHT_GI]); if (tb[HWSIM_RATE_INFO_ATTR_EHT_RU_ALLOC]) rate_info->eht_ru_alloc = nla_get_u8(tb[HWSIM_RATE_INFO_ATTR_EHT_RU_ALLOC]); return 0; } static int mac80211_hwsim_parse_ftm_result(struct nlattr *ftm, struct cfg80211_pmsr_ftm_result *result, struct genl_info *info) { struct nlattr *tb[NL80211_PMSR_FTM_RESP_ATTR_MAX + 1]; int ret; ret = nla_parse_nested(tb, NL80211_PMSR_FTM_RESP_ATTR_MAX, ftm, hwsim_ftm_result_policy, info->extack); if (ret) return ret; if (tb[NL80211_PMSR_FTM_RESP_ATTR_FAIL_REASON]) result->failure_reason = nla_get_u32(tb[NL80211_PMSR_FTM_RESP_ATTR_FAIL_REASON]); if (tb[NL80211_PMSR_FTM_RESP_ATTR_BURST_INDEX]) result->burst_index = nla_get_u16(tb[NL80211_PMSR_FTM_RESP_ATTR_BURST_INDEX]); if (tb[NL80211_PMSR_FTM_RESP_ATTR_NUM_FTMR_ATTEMPTS]) { result->num_ftmr_attempts_valid = 1; result->num_ftmr_attempts = nla_get_u32(tb[NL80211_PMSR_FTM_RESP_ATTR_NUM_FTMR_ATTEMPTS]); } if (tb[NL80211_PMSR_FTM_RESP_ATTR_NUM_FTMR_SUCCESSES]) { result->num_ftmr_successes_valid = 1; result->num_ftmr_successes = nla_get_u32(tb[NL80211_PMSR_FTM_RESP_ATTR_NUM_FTMR_SUCCESSES]); } if (tb[NL80211_PMSR_FTM_RESP_ATTR_BUSY_RETRY_TIME]) result->busy_retry_time = nla_get_u8(tb[NL80211_PMSR_FTM_RESP_ATTR_BUSY_RETRY_TIME]); if (tb[NL80211_PMSR_FTM_RESP_ATTR_NUM_BURSTS_EXP]) result->num_bursts_exp = nla_get_u8(tb[NL80211_PMSR_FTM_RESP_ATTR_NUM_BURSTS_EXP]); if (tb[NL80211_PMSR_FTM_RESP_ATTR_BURST_DURATION]) result->burst_duration = nla_get_u8(tb[NL80211_PMSR_FTM_RESP_ATTR_BURST_DURATION]); if (tb[NL80211_PMSR_FTM_RESP_ATTR_FTMS_PER_BURST]) result->ftms_per_burst = nla_get_u8(tb[NL80211_PMSR_FTM_RESP_ATTR_FTMS_PER_BURST]); if (tb[NL80211_PMSR_FTM_RESP_ATTR_RSSI_AVG]) { result->rssi_avg_valid = 1; result->rssi_avg = nla_get_s32(tb[NL80211_PMSR_FTM_RESP_ATTR_RSSI_AVG]); } if (tb[NL80211_PMSR_FTM_RESP_ATTR_RSSI_SPREAD]) { result->rssi_spread_valid = 1; result->rssi_spread = nla_get_s32(tb[NL80211_PMSR_FTM_RESP_ATTR_RSSI_SPREAD]); } if (tb[NL80211_PMSR_FTM_RESP_ATTR_TX_RATE]) { result->tx_rate_valid = 1; ret = mac80211_hwsim_parse_rate_info(tb[NL80211_PMSR_FTM_RESP_ATTR_TX_RATE], &result->tx_rate, info); if (ret) return ret; } if (tb[NL80211_PMSR_FTM_RESP_ATTR_RX_RATE]) { result->rx_rate_valid = 1; ret = mac80211_hwsim_parse_rate_info(tb[NL80211_PMSR_FTM_RESP_ATTR_RX_RATE], &result->rx_rate, info); if (ret) return ret; } if (tb[NL80211_PMSR_FTM_RESP_ATTR_RTT_AVG]) { result->rtt_avg_valid = 1; result->rtt_avg = nla_get_u64(tb[NL80211_PMSR_FTM_RESP_ATTR_RTT_AVG]); } if (tb[NL80211_PMSR_FTM_RESP_ATTR_RTT_VARIANCE]) { result->rtt_variance_valid = 1; result->rtt_variance = nla_get_u64(tb[NL80211_PMSR_FTM_RESP_ATTR_RTT_VARIANCE]); } if (tb[NL80211_PMSR_FTM_RESP_ATTR_RTT_SPREAD]) { result->rtt_spread_valid = 1; result->rtt_spread = nla_get_u64(tb[NL80211_PMSR_FTM_RESP_ATTR_RTT_SPREAD]); } if (tb[NL80211_PMSR_FTM_RESP_ATTR_DIST_AVG]) { result->dist_avg_valid = 1; result->dist_avg = nla_get_u64(tb[NL80211_PMSR_FTM_RESP_ATTR_DIST_AVG]); } if (tb[NL80211_PMSR_FTM_RESP_ATTR_DIST_VARIANCE]) { result->dist_variance_valid = 1; result->dist_variance = nla_get_u64(tb[NL80211_PMSR_FTM_RESP_ATTR_DIST_VARIANCE]); } if (tb[NL80211_PMSR_FTM_RESP_ATTR_DIST_SPREAD]) { result->dist_spread_valid = 1; result->dist_spread = nla_get_u64(tb[NL80211_PMSR_FTM_RESP_ATTR_DIST_SPREAD]); } if (tb[NL80211_PMSR_FTM_RESP_ATTR_LCI]) { result->lci = nla_data(tb[NL80211_PMSR_FTM_RESP_ATTR_LCI]); result->lci_len = nla_len(tb[NL80211_PMSR_FTM_RESP_ATTR_LCI]); } if (tb[NL80211_PMSR_FTM_RESP_ATTR_CIVICLOC]) { result->civicloc = nla_data(tb[NL80211_PMSR_FTM_RESP_ATTR_CIVICLOC]); result->civicloc_len = nla_len(tb[NL80211_PMSR_FTM_RESP_ATTR_CIVICLOC]); } return 0; } static int mac80211_hwsim_parse_pmsr_resp(struct nlattr *resp, struct cfg80211_pmsr_result *result, struct genl_info *info) { struct nlattr *tb[NL80211_PMSR_RESP_ATTR_MAX + 1]; struct nlattr *pmsr; int rem; int ret; ret = nla_parse_nested(tb, NL80211_PMSR_RESP_ATTR_MAX, resp, hwsim_pmsr_resp_policy, info->extack); if (ret) return ret; if (tb[NL80211_PMSR_RESP_ATTR_STATUS]) result->status = nla_get_u32(tb[NL80211_PMSR_RESP_ATTR_STATUS]); if (tb[NL80211_PMSR_RESP_ATTR_HOST_TIME]) result->host_time = nla_get_u64(tb[NL80211_PMSR_RESP_ATTR_HOST_TIME]); if (tb[NL80211_PMSR_RESP_ATTR_AP_TSF]) { result->ap_tsf_valid = 1; result->ap_tsf = nla_get_u64(tb[NL80211_PMSR_RESP_ATTR_AP_TSF]); } result->final = !!tb[NL80211_PMSR_RESP_ATTR_FINAL]; if (!tb[NL80211_PMSR_RESP_ATTR_DATA]) return 0; nla_for_each_nested(pmsr, tb[NL80211_PMSR_RESP_ATTR_DATA], rem) { switch (nla_type(pmsr)) { case NL80211_PMSR_TYPE_FTM: result->type = NL80211_PMSR_TYPE_FTM; ret = mac80211_hwsim_parse_ftm_result(pmsr, &result->ftm, info); if (ret) return ret; break; default: NL_SET_ERR_MSG_ATTR(info->extack, pmsr, "Unknown pmsr resp type"); return -EINVAL; } } return 0; } static int mac80211_hwsim_parse_pmsr_result(struct nlattr *peer, struct cfg80211_pmsr_result *result, struct genl_info *info) { struct nlattr *tb[NL80211_PMSR_PEER_ATTR_MAX + 1]; int ret; if (!peer) return -EINVAL; ret = nla_parse_nested(tb, NL80211_PMSR_PEER_ATTR_MAX, peer, hwsim_pmsr_peer_result_policy, info->extack); if (ret) return ret; if (tb[NL80211_PMSR_PEER_ATTR_ADDR]) memcpy(result->addr, nla_data(tb[NL80211_PMSR_PEER_ATTR_ADDR]), ETH_ALEN); if (tb[NL80211_PMSR_PEER_ATTR_RESP]) { ret = mac80211_hwsim_parse_pmsr_resp(tb[NL80211_PMSR_PEER_ATTR_RESP], result, info); if (ret) return ret; } return 0; }; static int hwsim_pmsr_report_nl(struct sk_buff *msg, struct genl_info *info) { struct mac80211_hwsim_data *data; struct nlattr *peers, *peer; struct nlattr *reqattr; const u8 *src; int err; int rem; if (!info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER]) return -EINVAL; src = nla_data(info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER]); data = get_hwsim_data_ref_from_addr(src); if (!data) return -EINVAL; mutex_lock(&data->mutex); if (!data->pmsr_request) { err = -EINVAL; goto out; } reqattr = info->attrs[HWSIM_ATTR_PMSR_RESULT]; if (!reqattr) { err = -EINVAL; goto out; } peers = nla_find_nested(reqattr, NL80211_PMSR_ATTR_PEERS); if (!peers) { err = -EINVAL; goto out; } nla_for_each_nested(peer, peers, rem) { struct cfg80211_pmsr_result result; err = mac80211_hwsim_parse_pmsr_result(peer, &result, info); if (err) goto out; cfg80211_pmsr_report(data->pmsr_request_wdev, data->pmsr_request, &result, GFP_KERNEL); } cfg80211_pmsr_complete(data->pmsr_request_wdev, data->pmsr_request, GFP_KERNEL); err = 0; out: data->pmsr_request = NULL; data->pmsr_request_wdev = NULL; mutex_unlock(&data->mutex); return err; } #define HWSIM_COMMON_OPS \ .tx = mac80211_hwsim_tx, \ .wake_tx_queue = ieee80211_handle_wake_tx_queue, \ .start = mac80211_hwsim_start, \ .stop = mac80211_hwsim_stop, \ .add_interface = mac80211_hwsim_add_interface, \ .change_interface = mac80211_hwsim_change_interface, \ .remove_interface = mac80211_hwsim_remove_interface, \ .config = mac80211_hwsim_config, \ .configure_filter = mac80211_hwsim_configure_filter, \ .vif_cfg_changed = mac80211_hwsim_vif_info_changed, \ .link_info_changed = mac80211_hwsim_link_info_changed, \ .tx_last_beacon = mac80211_hwsim_tx_last_beacon, \ .sta_notify = mac80211_hwsim_sta_notify, \ .sta_rc_update = mac80211_hwsim_sta_rc_update, \ .conf_tx = mac80211_hwsim_conf_tx, \ .get_survey = mac80211_hwsim_get_survey, \ CFG80211_TESTMODE_CMD(mac80211_hwsim_testmode_cmd) \ .ampdu_action = mac80211_hwsim_ampdu_action, \ .flush = mac80211_hwsim_flush, \ .get_et_sset_count = mac80211_hwsim_get_et_sset_count, \ .get_et_stats = mac80211_hwsim_get_et_stats, \ .get_et_strings = mac80211_hwsim_get_et_strings, \ .start_pmsr = mac80211_hwsim_start_pmsr, \ .abort_pmsr = mac80211_hwsim_abort_pmsr, #define HWSIM_NON_MLO_OPS \ .sta_add = mac80211_hwsim_sta_add, \ .sta_remove = mac80211_hwsim_sta_remove, \ .set_tim = mac80211_hwsim_set_tim, \ .get_tsf = mac80211_hwsim_get_tsf, \ .set_tsf = mac80211_hwsim_set_tsf, static const struct ieee80211_ops mac80211_hwsim_ops = { HWSIM_COMMON_OPS HWSIM_NON_MLO_OPS .sw_scan_start = mac80211_hwsim_sw_scan, .sw_scan_complete = mac80211_hwsim_sw_scan_complete, }; #define HWSIM_CHANCTX_OPS \ .hw_scan = mac80211_hwsim_hw_scan, \ .cancel_hw_scan = mac80211_hwsim_cancel_hw_scan, \ .remain_on_channel = mac80211_hwsim_roc, \ .cancel_remain_on_channel = mac80211_hwsim_croc, \ .add_chanctx = mac80211_hwsim_add_chanctx, \ .remove_chanctx = mac80211_hwsim_remove_chanctx, \ .change_chanctx = mac80211_hwsim_change_chanctx, \ .assign_vif_chanctx = mac80211_hwsim_assign_vif_chanctx,\ .unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx, static const struct ieee80211_ops mac80211_hwsim_mchan_ops = { HWSIM_COMMON_OPS HWSIM_NON_MLO_OPS HWSIM_CHANCTX_OPS }; static const struct ieee80211_ops mac80211_hwsim_mlo_ops = { HWSIM_COMMON_OPS HWSIM_CHANCTX_OPS .set_rts_threshold = mac80211_hwsim_set_rts_threshold, .change_vif_links = mac80211_hwsim_change_vif_links, .change_sta_links = mac80211_hwsim_change_sta_links, .sta_state = mac80211_hwsim_sta_state, }; struct hwsim_new_radio_params { unsigned int channels; const char *reg_alpha2; const struct ieee80211_regdomain *regd; bool reg_strict; bool p2p_device; bool use_chanctx; bool destroy_on_close; const char *hwname; bool no_vif; const u8 *perm_addr; u32 iftypes; u32 *ciphers; u8 n_ciphers; bool mlo; const struct cfg80211_pmsr_capabilities *pmsr_capa; }; static void hwsim_mcast_config_msg(struct sk_buff *mcast_skb, struct genl_info *info) { if (info) genl_notify(&hwsim_genl_family, mcast_skb, info, HWSIM_MCGRP_CONFIG, GFP_KERNEL); else genlmsg_multicast(&hwsim_genl_family, mcast_skb, 0, HWSIM_MCGRP_CONFIG, GFP_KERNEL); } static int append_radio_msg(struct sk_buff *skb, int id, struct hwsim_new_radio_params *param) { int ret; ret = nla_put_u32(skb, HWSIM_ATTR_RADIO_ID, id); if (ret < 0) return ret; if (param->channels) { ret = nla_put_u32(skb, HWSIM_ATTR_CHANNELS, param->channels); if (ret < 0) return ret; } if (param->reg_alpha2) { ret = nla_put(skb, HWSIM_ATTR_REG_HINT_ALPHA2, 2, param->reg_alpha2); if (ret < 0) return ret; } if (param->regd) { int i; for (i = 0; i < ARRAY_SIZE(hwsim_world_regdom_custom); i++) { if (hwsim_world_regdom_custom[i] != param->regd) continue; ret = nla_put_u32(skb, HWSIM_ATTR_REG_CUSTOM_REG, i); if (ret < 0) return ret; break; } } if (param->reg_strict) { ret = nla_put_flag(skb, HWSIM_ATTR_REG_STRICT_REG); if (ret < 0) return ret; } if (param->p2p_device) { ret = nla_put_flag(skb, HWSIM_ATTR_SUPPORT_P2P_DEVICE); if (ret < 0) return ret; } if (param->use_chanctx) { ret = nla_put_flag(skb, HWSIM_ATTR_USE_CHANCTX); if (ret < 0) return ret; } if (param->hwname) { ret = nla_put(skb, HWSIM_ATTR_RADIO_NAME, strlen(param->hwname), param->hwname); if (ret < 0) return ret; } return 0; } static void hwsim_mcast_new_radio(int id, struct genl_info *info, struct hwsim_new_radio_params *param) { struct sk_buff *mcast_skb; void *data; mcast_skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!mcast_skb) return; data = genlmsg_put(mcast_skb, 0, 0, &hwsim_genl_family, 0, HWSIM_CMD_NEW_RADIO); if (!data) goto out_err; if (append_radio_msg(mcast_skb, id, param) < 0) goto out_err; genlmsg_end(mcast_skb, data); hwsim_mcast_config_msg(mcast_skb, info); return; out_err: nlmsg_free(mcast_skb); } static const struct ieee80211_sband_iftype_data sband_capa_2ghz[] = { { .types_mask = BIT(NL80211_IFTYPE_STATION), .he_cap = { .has_he = true, .he_cap_elem = { .mac_cap_info[0] = IEEE80211_HE_MAC_CAP0_HTC_HE, .mac_cap_info[1] = IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US | IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8, .mac_cap_info[2] = IEEE80211_HE_MAC_CAP2_BSR | IEEE80211_HE_MAC_CAP2_MU_CASCADING | IEEE80211_HE_MAC_CAP2_ACK_EN, .mac_cap_info[3] = IEEE80211_HE_MAC_CAP3_OMI_CONTROL | IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3, .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU, .phy_cap_info[1] = IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK | IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A | IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD | IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS, .phy_cap_info[2] = IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US | IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ | IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ | IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO | IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO, /* Leave all the other PHY capability bytes * unset, as DCM, beam forming, RU and PPE * threshold information are not supported */ }, .he_mcs_nss_supp = { .rx_mcs_80 = cpu_to_le16(0xfffa), .tx_mcs_80 = cpu_to_le16(0xfffa), .rx_mcs_160 = cpu_to_le16(0xffff), .tx_mcs_160 = cpu_to_le16(0xffff), .rx_mcs_80p80 = cpu_to_le16(0xffff), .tx_mcs_80p80 = cpu_to_le16(0xffff), }, }, .eht_cap = { .has_eht = true, .eht_cap_elem = { .mac_cap_info[0] = IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS | IEEE80211_EHT_MAC_CAP0_OM_CONTROL | IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1, .phy_cap_info[0] = IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ | IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI | IEEE80211_EHT_PHY_CAP0_PARTIAL_BW_UL_MU_MIMO | IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMER | IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE, .phy_cap_info[3] = IEEE80211_EHT_PHY_CAP3_NG_16_SU_FEEDBACK | IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK | IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK | IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK | IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK | IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK | IEEE80211_EHT_PHY_CAP3_TRIG_CQI_FDBK, .phy_cap_info[4] = IEEE80211_EHT_PHY_CAP4_PART_BW_DL_MU_MIMO | IEEE80211_EHT_PHY_CAP4_PSR_SR_SUPP | IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP | IEEE80211_EHT_PHY_CAP4_EHT_MU_PPDU_4_EHT_LTF_08_GI | IEEE80211_EHT_PHY_CAP4_MAX_NC_MASK, .phy_cap_info[5] = IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK | IEEE80211_EHT_PHY_CAP5_TX_LESS_242_TONE_RU_SUPP | IEEE80211_EHT_PHY_CAP5_RX_LESS_242_TONE_RU_SUPP | IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT | IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK | IEEE80211_EHT_PHY_CAP5_MAX_NUM_SUPP_EHT_LTF_MASK, .phy_cap_info[6] = IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK | IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK, .phy_cap_info[7] = IEEE80211_EHT_PHY_CAP7_20MHZ_STA_RX_NDP_WIDER_BW, }, /* For all MCS and bandwidth, set 8 NSS for both Tx and * Rx */ .eht_mcs_nss_supp = { /* * Since B0, B1, B2 and B3 are not set in * the supported channel width set field in the * HE PHY capabilities information field the * device is a 20MHz only device on 2.4GHz band. */ .only_20mhz = { .rx_tx_mcs7_max_nss = 0x88, .rx_tx_mcs9_max_nss = 0x88, .rx_tx_mcs11_max_nss = 0x88, .rx_tx_mcs13_max_nss = 0x88, }, }, /* PPE threshold information is not supported */ }, }, { .types_mask = BIT(NL80211_IFTYPE_AP), .he_cap = { .has_he = true, .he_cap_elem = { .mac_cap_info[0] = IEEE80211_HE_MAC_CAP0_HTC_HE, .mac_cap_info[1] = IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US | IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8, .mac_cap_info[2] = IEEE80211_HE_MAC_CAP2_BSR | IEEE80211_HE_MAC_CAP2_MU_CASCADING | IEEE80211_HE_MAC_CAP2_ACK_EN, .mac_cap_info[3] = IEEE80211_HE_MAC_CAP3_OMI_CONTROL | IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3, .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU, .phy_cap_info[1] = IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK | IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A | IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD | IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS, .phy_cap_info[2] = IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US | IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ | IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ | IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO | IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO, /* Leave all the other PHY capability bytes * unset, as DCM, beam forming, RU and PPE * threshold information are not supported */ }, .he_mcs_nss_supp = { .rx_mcs_80 = cpu_to_le16(0xfffa), .tx_mcs_80 = cpu_to_le16(0xfffa), .rx_mcs_160 = cpu_to_le16(0xffff), .tx_mcs_160 = cpu_to_le16(0xffff), .rx_mcs_80p80 = cpu_to_le16(0xffff), .tx_mcs_80p80 = cpu_to_le16(0xffff), }, }, .eht_cap = { .has_eht = true, .eht_cap_elem = { .mac_cap_info[0] = IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS | IEEE80211_EHT_MAC_CAP0_OM_CONTROL | IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1, .phy_cap_info[0] = IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ | IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI | IEEE80211_EHT_PHY_CAP0_PARTIAL_BW_UL_MU_MIMO | IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMER | IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE, .phy_cap_info[3] = IEEE80211_EHT_PHY_CAP3_NG_16_SU_FEEDBACK | IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK | IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK | IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK | IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK | IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK | IEEE80211_EHT_PHY_CAP3_TRIG_CQI_FDBK, .phy_cap_info[4] = IEEE80211_EHT_PHY_CAP4_PART_BW_DL_MU_MIMO | IEEE80211_EHT_PHY_CAP4_PSR_SR_SUPP | IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP | IEEE80211_EHT_PHY_CAP4_EHT_MU_PPDU_4_EHT_LTF_08_GI | IEEE80211_EHT_PHY_CAP4_MAX_NC_MASK, .phy_cap_info[5] = IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK | IEEE80211_EHT_PHY_CAP5_TX_LESS_242_TONE_RU_SUPP | IEEE80211_EHT_PHY_CAP5_RX_LESS_242_TONE_RU_SUPP | IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT | IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK | IEEE80211_EHT_PHY_CAP5_MAX_NUM_SUPP_EHT_LTF_MASK, .phy_cap_info[6] = IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK | IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK, .phy_cap_info[7] = IEEE80211_EHT_PHY_CAP7_20MHZ_STA_RX_NDP_WIDER_BW, }, /* For all MCS and bandwidth, set 8 NSS for both Tx and * Rx */ .eht_mcs_nss_supp = { /* * Since B0, B1, B2 and B3 are not set in * the supported channel width set field in the * HE PHY capabilities information field the * device is a 20MHz only device on 2.4GHz band. */ .only_20mhz = { .rx_tx_mcs7_max_nss = 0x88, .rx_tx_mcs9_max_nss = 0x88, .rx_tx_mcs11_max_nss = 0x88, .rx_tx_mcs13_max_nss = 0x88, }, }, /* PPE threshold information is not supported */ }, }, #ifdef CONFIG_MAC80211_MESH { .types_mask = BIT(NL80211_IFTYPE_MESH_POINT), .he_cap = { .has_he = true, .he_cap_elem = { .mac_cap_info[0] = IEEE80211_HE_MAC_CAP0_HTC_HE, .mac_cap_info[1] = IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8, .mac_cap_info[2] = IEEE80211_HE_MAC_CAP2_ACK_EN, .mac_cap_info[3] = IEEE80211_HE_MAC_CAP3_OMI_CONTROL | IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3, .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU, .phy_cap_info[1] = IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK | IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A | IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD | IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS, .phy_cap_info[2] = 0, /* Leave all the other PHY capability bytes * unset, as DCM, beam forming, RU and PPE * threshold information are not supported */ }, .he_mcs_nss_supp = { .rx_mcs_80 = cpu_to_le16(0xfffa), .tx_mcs_80 = cpu_to_le16(0xfffa), .rx_mcs_160 = cpu_to_le16(0xffff), .tx_mcs_160 = cpu_to_le16(0xffff), .rx_mcs_80p80 = cpu_to_le16(0xffff), .tx_mcs_80p80 = cpu_to_le16(0xffff), }, }, }, #endif }; static const struct ieee80211_sband_iftype_data sband_capa_5ghz[] = { { /* TODO: should we support other types, e.g., P2P? */ .types_mask = BIT(NL80211_IFTYPE_STATION), .he_cap = { .has_he = true, .he_cap_elem = { .mac_cap_info[0] = IEEE80211_HE_MAC_CAP0_HTC_HE, .mac_cap_info[1] = IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US | IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8, .mac_cap_info[2] = IEEE80211_HE_MAC_CAP2_BSR | IEEE80211_HE_MAC_CAP2_MU_CASCADING | IEEE80211_HE_MAC_CAP2_ACK_EN, .mac_cap_info[3] = IEEE80211_HE_MAC_CAP3_OMI_CONTROL | IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3, .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU, .phy_cap_info[0] = IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G | IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G | IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G, .phy_cap_info[1] = IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK | IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A | IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD | IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS, .phy_cap_info[2] = IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US | IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ | IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ | IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO | IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO, /* Leave all the other PHY capability bytes * unset, as DCM, beam forming, RU and PPE * threshold information are not supported */ }, .he_mcs_nss_supp = { .rx_mcs_80 = cpu_to_le16(0xfffa), .tx_mcs_80 = cpu_to_le16(0xfffa), .rx_mcs_160 = cpu_to_le16(0xfffa), .tx_mcs_160 = cpu_to_le16(0xfffa), .rx_mcs_80p80 = cpu_to_le16(0xfffa), .tx_mcs_80p80 = cpu_to_le16(0xfffa), }, }, .eht_cap = { .has_eht = true, .eht_cap_elem = { .mac_cap_info[0] = IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS | IEEE80211_EHT_MAC_CAP0_OM_CONTROL | IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1, .phy_cap_info[0] = IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ | IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI | IEEE80211_EHT_PHY_CAP0_PARTIAL_BW_UL_MU_MIMO | IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMER | IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE | IEEE80211_EHT_PHY_CAP0_BEAMFORMEE_SS_80MHZ_MASK, .phy_cap_info[1] = IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_80MHZ_MASK | IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK, .phy_cap_info[2] = IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_80MHZ_MASK | IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_160MHZ_MASK, .phy_cap_info[3] = IEEE80211_EHT_PHY_CAP3_NG_16_SU_FEEDBACK | IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK | IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK | IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK | IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK | IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK | IEEE80211_EHT_PHY_CAP3_TRIG_CQI_FDBK, .phy_cap_info[4] = IEEE80211_EHT_PHY_CAP4_PART_BW_DL_MU_MIMO | IEEE80211_EHT_PHY_CAP4_PSR_SR_SUPP | IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP | IEEE80211_EHT_PHY_CAP4_EHT_MU_PPDU_4_EHT_LTF_08_GI | IEEE80211_EHT_PHY_CAP4_MAX_NC_MASK, .phy_cap_info[5] = IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK | IEEE80211_EHT_PHY_CAP5_TX_LESS_242_TONE_RU_SUPP | IEEE80211_EHT_PHY_CAP5_RX_LESS_242_TONE_RU_SUPP | IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT | IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK | IEEE80211_EHT_PHY_CAP5_MAX_NUM_SUPP_EHT_LTF_MASK, .phy_cap_info[6] = IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK | IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK, .phy_cap_info[7] = IEEE80211_EHT_PHY_CAP7_20MHZ_STA_RX_NDP_WIDER_BW | IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ | IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ | IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ | IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ, }, /* For all MCS and bandwidth, set 8 NSS for both Tx and * Rx */ .eht_mcs_nss_supp = { /* * As B1 and B2 are set in the supported * channel width set field in the HE PHY * capabilities information field include all * the following MCS/NSS. */ .bw._80 = { .rx_tx_mcs9_max_nss = 0x88, .rx_tx_mcs11_max_nss = 0x88, .rx_tx_mcs13_max_nss = 0x88, }, .bw._160 = { .rx_tx_mcs9_max_nss = 0x88, .rx_tx_mcs11_max_nss = 0x88, .rx_tx_mcs13_max_nss = 0x88, }, }, /* PPE threshold information is not supported */ }, }, { .types_mask = BIT(NL80211_IFTYPE_AP), .he_cap = { .has_he = true, .he_cap_elem = { .mac_cap_info[0] = IEEE80211_HE_MAC_CAP0_HTC_HE, .mac_cap_info[1] = IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US | IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8, .mac_cap_info[2] = IEEE80211_HE_MAC_CAP2_BSR | IEEE80211_HE_MAC_CAP2_MU_CASCADING | IEEE80211_HE_MAC_CAP2_ACK_EN, .mac_cap_info[3] = IEEE80211_HE_MAC_CAP3_OMI_CONTROL | IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3, .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU, .phy_cap_info[0] = IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G | IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G | IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G, .phy_cap_info[1] = IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK | IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A | IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD | IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS, .phy_cap_info[2] = IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US | IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ | IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ | IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO | IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO, /* Leave all the other PHY capability bytes * unset, as DCM, beam forming, RU and PPE * threshold information are not supported */ }, .he_mcs_nss_supp = { .rx_mcs_80 = cpu_to_le16(0xfffa), .tx_mcs_80 = cpu_to_le16(0xfffa), .rx_mcs_160 = cpu_to_le16(0xfffa), .tx_mcs_160 = cpu_to_le16(0xfffa), .rx_mcs_80p80 = cpu_to_le16(0xfffa), .tx_mcs_80p80 = cpu_to_le16(0xfffa), }, }, .eht_cap = { .has_eht = true, .eht_cap_elem = { .mac_cap_info[0] = IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS | IEEE80211_EHT_MAC_CAP0_OM_CONTROL | IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1, .phy_cap_info[0] = IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ | IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI | IEEE80211_EHT_PHY_CAP0_PARTIAL_BW_UL_MU_MIMO | IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMER | IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE | IEEE80211_EHT_PHY_CAP0_BEAMFORMEE_SS_80MHZ_MASK, .phy_cap_info[1] = IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_80MHZ_MASK | IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK, .phy_cap_info[2] = IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_80MHZ_MASK | IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_160MHZ_MASK, .phy_cap_info[3] = IEEE80211_EHT_PHY_CAP3_NG_16_SU_FEEDBACK | IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK | IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK | IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK | IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK | IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK | IEEE80211_EHT_PHY_CAP3_TRIG_CQI_FDBK, .phy_cap_info[4] = IEEE80211_EHT_PHY_CAP4_PART_BW_DL_MU_MIMO | IEEE80211_EHT_PHY_CAP4_PSR_SR_SUPP | IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP | IEEE80211_EHT_PHY_CAP4_EHT_MU_PPDU_4_EHT_LTF_08_GI | IEEE80211_EHT_PHY_CAP4_MAX_NC_MASK, .phy_cap_info[5] = IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK | IEEE80211_EHT_PHY_CAP5_TX_LESS_242_TONE_RU_SUPP | IEEE80211_EHT_PHY_CAP5_RX_LESS_242_TONE_RU_SUPP | IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT | IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK | IEEE80211_EHT_PHY_CAP5_MAX_NUM_SUPP_EHT_LTF_MASK, .phy_cap_info[6] = IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK | IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK, .phy_cap_info[7] = IEEE80211_EHT_PHY_CAP7_20MHZ_STA_RX_NDP_WIDER_BW | IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ | IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ | IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ | IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ, }, /* For all MCS and bandwidth, set 8 NSS for both Tx and * Rx */ .eht_mcs_nss_supp = { /* * As B1 and B2 are set in the supported * channel width set field in the HE PHY * capabilities information field include all * the following MCS/NSS. */ .bw._80 = { .rx_tx_mcs9_max_nss = 0x88, .rx_tx_mcs11_max_nss = 0x88, .rx_tx_mcs13_max_nss = 0x88, }, .bw._160 = { .rx_tx_mcs9_max_nss = 0x88, .rx_tx_mcs11_max_nss = 0x88, .rx_tx_mcs13_max_nss = 0x88, }, }, /* PPE threshold information is not supported */ }, }, #ifdef CONFIG_MAC80211_MESH { /* TODO: should we support other types, e.g., IBSS?*/ .types_mask = BIT(NL80211_IFTYPE_MESH_POINT), .he_cap = { .has_he = true, .he_cap_elem = { .mac_cap_info[0] = IEEE80211_HE_MAC_CAP0_HTC_HE, .mac_cap_info[1] = IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8, .mac_cap_info[2] = IEEE80211_HE_MAC_CAP2_ACK_EN, .mac_cap_info[3] = IEEE80211_HE_MAC_CAP3_OMI_CONTROL | IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3, .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU, .phy_cap_info[0] = IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G | IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G | IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G, .phy_cap_info[1] = IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK | IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A | IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD | IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS, .phy_cap_info[2] = 0, /* Leave all the other PHY capability bytes * unset, as DCM, beam forming, RU and PPE * threshold information are not supported */ }, .he_mcs_nss_supp = { .rx_mcs_80 = cpu_to_le16(0xfffa), .tx_mcs_80 = cpu_to_le16(0xfffa), .rx_mcs_160 = cpu_to_le16(0xfffa), .tx_mcs_160 = cpu_to_le16(0xfffa), .rx_mcs_80p80 = cpu_to_le16(0xfffa), .tx_mcs_80p80 = cpu_to_le16(0xfffa), }, }, }, #endif }; static const struct ieee80211_sband_iftype_data sband_capa_6ghz[] = { { /* TODO: should we support other types, e.g., P2P? */ .types_mask = BIT(NL80211_IFTYPE_STATION), .he_6ghz_capa = { .capa = cpu_to_le16(IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START | IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP | IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN | IEEE80211_HE_6GHZ_CAP_SM_PS | IEEE80211_HE_6GHZ_CAP_RD_RESPONDER | IEEE80211_HE_6GHZ_CAP_TX_ANTPAT_CONS | IEEE80211_HE_6GHZ_CAP_RX_ANTPAT_CONS), }, .he_cap = { .has_he = true, .he_cap_elem = { .mac_cap_info[0] = IEEE80211_HE_MAC_CAP0_HTC_HE, .mac_cap_info[1] = IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US | IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8, .mac_cap_info[2] = IEEE80211_HE_MAC_CAP2_BSR | IEEE80211_HE_MAC_CAP2_MU_CASCADING | IEEE80211_HE_MAC_CAP2_ACK_EN, .mac_cap_info[3] = IEEE80211_HE_MAC_CAP3_OMI_CONTROL | IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3, .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU, .phy_cap_info[0] = IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G | IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G | IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G, .phy_cap_info[1] = IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK | IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A | IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD | IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS, .phy_cap_info[2] = IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US | IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ | IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ | IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO | IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO, /* Leave all the other PHY capability bytes * unset, as DCM, beam forming, RU and PPE * threshold information are not supported */ }, .he_mcs_nss_supp = { .rx_mcs_80 = cpu_to_le16(0xfffa), .tx_mcs_80 = cpu_to_le16(0xfffa), .rx_mcs_160 = cpu_to_le16(0xfffa), .tx_mcs_160 = cpu_to_le16(0xfffa), .rx_mcs_80p80 = cpu_to_le16(0xfffa), .tx_mcs_80p80 = cpu_to_le16(0xfffa), }, }, .eht_cap = { .has_eht = true, .eht_cap_elem = { .mac_cap_info[0] = IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS | IEEE80211_EHT_MAC_CAP0_OM_CONTROL | IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1, .phy_cap_info[0] = IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ | IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ | IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI | IEEE80211_EHT_PHY_CAP0_PARTIAL_BW_UL_MU_MIMO | IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMER | IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE | IEEE80211_EHT_PHY_CAP0_BEAMFORMEE_SS_80MHZ_MASK, .phy_cap_info[1] = IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_80MHZ_MASK | IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK | IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_320MHZ_MASK, .phy_cap_info[2] = IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_80MHZ_MASK | IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_160MHZ_MASK | IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK, .phy_cap_info[3] = IEEE80211_EHT_PHY_CAP3_NG_16_SU_FEEDBACK | IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK | IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK | IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK | IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK | IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK | IEEE80211_EHT_PHY_CAP3_TRIG_CQI_FDBK, .phy_cap_info[4] = IEEE80211_EHT_PHY_CAP4_PART_BW_DL_MU_MIMO | IEEE80211_EHT_PHY_CAP4_PSR_SR_SUPP | IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP | IEEE80211_EHT_PHY_CAP4_EHT_MU_PPDU_4_EHT_LTF_08_GI | IEEE80211_EHT_PHY_CAP4_MAX_NC_MASK, .phy_cap_info[5] = IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK | IEEE80211_EHT_PHY_CAP5_TX_LESS_242_TONE_RU_SUPP | IEEE80211_EHT_PHY_CAP5_RX_LESS_242_TONE_RU_SUPP | IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT | IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK | IEEE80211_EHT_PHY_CAP5_MAX_NUM_SUPP_EHT_LTF_MASK, .phy_cap_info[6] = IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK | IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK | IEEE80211_EHT_PHY_CAP6_EHT_DUP_6GHZ_SUPP, .phy_cap_info[7] = IEEE80211_EHT_PHY_CAP7_20MHZ_STA_RX_NDP_WIDER_BW | IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ | IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ | IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_320MHZ | IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ | IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ | IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_320MHZ, }, /* For all MCS and bandwidth, set 8 NSS for both Tx and * Rx */ .eht_mcs_nss_supp = { /* * As B1 and B2 are set in the supported * channel width set field in the HE PHY * capabilities information field and 320MHz in * 6GHz is supported include all the following * MCS/NSS. */ .bw._80 = { .rx_tx_mcs9_max_nss = 0x88, .rx_tx_mcs11_max_nss = 0x88, .rx_tx_mcs13_max_nss = 0x88, }, .bw._160 = { .rx_tx_mcs9_max_nss = 0x88, .rx_tx_mcs11_max_nss = 0x88, .rx_tx_mcs13_max_nss = 0x88, }, .bw._320 = { .rx_tx_mcs9_max_nss = 0x88, .rx_tx_mcs11_max_nss = 0x88, .rx_tx_mcs13_max_nss = 0x88, }, }, /* PPE threshold information is not supported */ }, }, { .types_mask = BIT(NL80211_IFTYPE_AP), .he_6ghz_capa = { .capa = cpu_to_le16(IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START | IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP | IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN | IEEE80211_HE_6GHZ_CAP_SM_PS | IEEE80211_HE_6GHZ_CAP_RD_RESPONDER | IEEE80211_HE_6GHZ_CAP_TX_ANTPAT_CONS | IEEE80211_HE_6GHZ_CAP_RX_ANTPAT_CONS), }, .he_cap = { .has_he = true, .he_cap_elem = { .mac_cap_info[0] = IEEE80211_HE_MAC_CAP0_HTC_HE, .mac_cap_info[1] = IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US | IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8, .mac_cap_info[2] = IEEE80211_HE_MAC_CAP2_BSR | IEEE80211_HE_MAC_CAP2_MU_CASCADING | IEEE80211_HE_MAC_CAP2_ACK_EN, .mac_cap_info[3] = IEEE80211_HE_MAC_CAP3_OMI_CONTROL | IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3, .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU, .phy_cap_info[0] = IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G | IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G | IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G, .phy_cap_info[1] = IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK | IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A | IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD | IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS, .phy_cap_info[2] = IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US | IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ | IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ | IEEE80211_HE_PHY_CAP2_UL_MU_FULL_MU_MIMO | IEEE80211_HE_PHY_CAP2_UL_MU_PARTIAL_MU_MIMO, /* Leave all the other PHY capability bytes * unset, as DCM, beam forming, RU and PPE * threshold information are not supported */ }, .he_mcs_nss_supp = { .rx_mcs_80 = cpu_to_le16(0xfffa), .tx_mcs_80 = cpu_to_le16(0xfffa), .rx_mcs_160 = cpu_to_le16(0xfffa), .tx_mcs_160 = cpu_to_le16(0xfffa), .rx_mcs_80p80 = cpu_to_le16(0xfffa), .tx_mcs_80p80 = cpu_to_le16(0xfffa), }, }, .eht_cap = { .has_eht = true, .eht_cap_elem = { .mac_cap_info[0] = IEEE80211_EHT_MAC_CAP0_EPCS_PRIO_ACCESS | IEEE80211_EHT_MAC_CAP0_OM_CONTROL | IEEE80211_EHT_MAC_CAP0_TRIG_TXOP_SHARING_MODE1, .phy_cap_info[0] = IEEE80211_EHT_PHY_CAP0_320MHZ_IN_6GHZ | IEEE80211_EHT_PHY_CAP0_242_TONE_RU_GT20MHZ | IEEE80211_EHT_PHY_CAP0_NDP_4_EHT_LFT_32_GI | IEEE80211_EHT_PHY_CAP0_PARTIAL_BW_UL_MU_MIMO | IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMER | IEEE80211_EHT_PHY_CAP0_SU_BEAMFORMEE | IEEE80211_EHT_PHY_CAP0_BEAMFORMEE_SS_80MHZ_MASK, .phy_cap_info[1] = IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_80MHZ_MASK | IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_160MHZ_MASK | IEEE80211_EHT_PHY_CAP1_BEAMFORMEE_SS_320MHZ_MASK, .phy_cap_info[2] = IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_80MHZ_MASK | IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_160MHZ_MASK | IEEE80211_EHT_PHY_CAP2_SOUNDING_DIM_320MHZ_MASK, .phy_cap_info[3] = IEEE80211_EHT_PHY_CAP3_NG_16_SU_FEEDBACK | IEEE80211_EHT_PHY_CAP3_NG_16_MU_FEEDBACK | IEEE80211_EHT_PHY_CAP3_CODEBOOK_4_2_SU_FDBK | IEEE80211_EHT_PHY_CAP3_CODEBOOK_7_5_MU_FDBK | IEEE80211_EHT_PHY_CAP3_TRIG_SU_BF_FDBK | IEEE80211_EHT_PHY_CAP3_TRIG_MU_BF_PART_BW_FDBK | IEEE80211_EHT_PHY_CAP3_TRIG_CQI_FDBK, .phy_cap_info[4] = IEEE80211_EHT_PHY_CAP4_PART_BW_DL_MU_MIMO | IEEE80211_EHT_PHY_CAP4_PSR_SR_SUPP | IEEE80211_EHT_PHY_CAP4_POWER_BOOST_FACT_SUPP | IEEE80211_EHT_PHY_CAP4_EHT_MU_PPDU_4_EHT_LTF_08_GI | IEEE80211_EHT_PHY_CAP4_MAX_NC_MASK, .phy_cap_info[5] = IEEE80211_EHT_PHY_CAP5_NON_TRIG_CQI_FEEDBACK | IEEE80211_EHT_PHY_CAP5_TX_LESS_242_TONE_RU_SUPP | IEEE80211_EHT_PHY_CAP5_RX_LESS_242_TONE_RU_SUPP | IEEE80211_EHT_PHY_CAP5_PPE_THRESHOLD_PRESENT | IEEE80211_EHT_PHY_CAP5_COMMON_NOMINAL_PKT_PAD_MASK | IEEE80211_EHT_PHY_CAP5_MAX_NUM_SUPP_EHT_LTF_MASK, .phy_cap_info[6] = IEEE80211_EHT_PHY_CAP6_MAX_NUM_SUPP_EHT_LTF_MASK | IEEE80211_EHT_PHY_CAP6_MCS15_SUPP_MASK | IEEE80211_EHT_PHY_CAP6_EHT_DUP_6GHZ_SUPP, .phy_cap_info[7] = IEEE80211_EHT_PHY_CAP7_20MHZ_STA_RX_NDP_WIDER_BW | IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_80MHZ | IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_160MHZ | IEEE80211_EHT_PHY_CAP7_NON_OFDMA_UL_MU_MIMO_320MHZ | IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_80MHZ | IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_160MHZ | IEEE80211_EHT_PHY_CAP7_MU_BEAMFORMER_320MHZ, }, /* For all MCS and bandwidth, set 8 NSS for both Tx and * Rx */ .eht_mcs_nss_supp = { /* * As B1 and B2 are set in the supported * channel width set field in the HE PHY * capabilities information field and 320MHz in * 6GHz is supported include all the following * MCS/NSS. */ .bw._80 = { .rx_tx_mcs9_max_nss = 0x88, .rx_tx_mcs11_max_nss = 0x88, .rx_tx_mcs13_max_nss = 0x88, }, .bw._160 = { .rx_tx_mcs9_max_nss = 0x88, .rx_tx_mcs11_max_nss = 0x88, .rx_tx_mcs13_max_nss = 0x88, }, .bw._320 = { .rx_tx_mcs9_max_nss = 0x88, .rx_tx_mcs11_max_nss = 0x88, .rx_tx_mcs13_max_nss = 0x88, }, }, /* PPE threshold information is not supported */ }, }, #ifdef CONFIG_MAC80211_MESH { /* TODO: should we support other types, e.g., IBSS?*/ .types_mask = BIT(NL80211_IFTYPE_MESH_POINT), .he_6ghz_capa = { .capa = cpu_to_le16(IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START | IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP | IEEE80211_HE_6GHZ_CAP_MAX_MPDU_LEN | IEEE80211_HE_6GHZ_CAP_SM_PS | IEEE80211_HE_6GHZ_CAP_RD_RESPONDER | IEEE80211_HE_6GHZ_CAP_TX_ANTPAT_CONS | IEEE80211_HE_6GHZ_CAP_RX_ANTPAT_CONS), }, .he_cap = { .has_he = true, .he_cap_elem = { .mac_cap_info[0] = IEEE80211_HE_MAC_CAP0_HTC_HE, .mac_cap_info[1] = IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_RX_QOS_8, .mac_cap_info[2] = IEEE80211_HE_MAC_CAP2_ACK_EN, .mac_cap_info[3] = IEEE80211_HE_MAC_CAP3_OMI_CONTROL | IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_EXT_3, .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMSDU_IN_AMPDU, .phy_cap_info[0] = IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G | IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G | IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_80PLUS80_MHZ_IN_5G, .phy_cap_info[1] = IEEE80211_HE_PHY_CAP1_PREAMBLE_PUNC_RX_MASK | IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A | IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD | IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_TX_MAX_NSTS, .phy_cap_info[2] = 0, /* Leave all the other PHY capability bytes * unset, as DCM, beam forming, RU and PPE * threshold information are not supported */ }, .he_mcs_nss_supp = { .rx_mcs_80 = cpu_to_le16(0xfffa), .tx_mcs_80 = cpu_to_le16(0xfffa), .rx_mcs_160 = cpu_to_le16(0xfffa), .tx_mcs_160 = cpu_to_le16(0xfffa), .rx_mcs_80p80 = cpu_to_le16(0xfffa), .tx_mcs_80p80 = cpu_to_le16(0xfffa), }, }, }, #endif }; static void mac80211_hwsim_sband_capab(struct ieee80211_supported_band *sband) { u16 n_iftype_data; if (sband->band == NL80211_BAND_2GHZ) { n_iftype_data = ARRAY_SIZE(sband_capa_2ghz); sband->iftype_data = (struct ieee80211_sband_iftype_data *)sband_capa_2ghz; } else if (sband->band == NL80211_BAND_5GHZ) { n_iftype_data = ARRAY_SIZE(sband_capa_5ghz); sband->iftype_data = (struct ieee80211_sband_iftype_data *)sband_capa_5ghz; } else if (sband->band == NL80211_BAND_6GHZ) { n_iftype_data = ARRAY_SIZE(sband_capa_6ghz); sband->iftype_data = (struct ieee80211_sband_iftype_data *)sband_capa_6ghz; } else { return; } sband->n_iftype_data = n_iftype_data; } #ifdef CONFIG_MAC80211_MESH #define HWSIM_MESH_BIT BIT(NL80211_IFTYPE_MESH_POINT) #else #define HWSIM_MESH_BIT 0 #endif #define HWSIM_DEFAULT_IF_LIMIT \ (BIT(NL80211_IFTYPE_STATION) | \ BIT(NL80211_IFTYPE_P2P_CLIENT) | \ BIT(NL80211_IFTYPE_AP) | \ BIT(NL80211_IFTYPE_P2P_GO) | \ HWSIM_MESH_BIT) #define HWSIM_IFTYPE_SUPPORT_MASK \ (BIT(NL80211_IFTYPE_STATION) | \ BIT(NL80211_IFTYPE_AP) | \ BIT(NL80211_IFTYPE_P2P_CLIENT) | \ BIT(NL80211_IFTYPE_P2P_GO) | \ BIT(NL80211_IFTYPE_ADHOC) | \ BIT(NL80211_IFTYPE_MESH_POINT) | \ BIT(NL80211_IFTYPE_OCB)) static int mac80211_hwsim_new_radio(struct genl_info *info, struct hwsim_new_radio_params *param) { int err; u8 addr[ETH_ALEN]; struct mac80211_hwsim_data *data; struct ieee80211_hw *hw; enum nl80211_band band; const struct ieee80211_ops *ops = &mac80211_hwsim_ops; struct net *net; int idx, i; int n_limits = 0; if (WARN_ON(param->channels > 1 && !param->use_chanctx)) return -EINVAL; spin_lock_bh(&hwsim_radio_lock); idx = hwsim_radio_idx++; spin_unlock_bh(&hwsim_radio_lock); if (param->mlo) ops = &mac80211_hwsim_mlo_ops; else if (param->use_chanctx) ops = &mac80211_hwsim_mchan_ops; hw = ieee80211_alloc_hw_nm(sizeof(*data), ops, param->hwname); if (!hw) { pr_debug("mac80211_hwsim: ieee80211_alloc_hw failed\n"); err = -ENOMEM; goto failed; } /* ieee80211_alloc_hw_nm may have used a default name */ param->hwname = wiphy_name(hw->wiphy); if (info) net = genl_info_net(info); else net = &init_net; wiphy_net_set(hw->wiphy, net); data = hw->priv; data->hw = hw; data->dev = device_create(hwsim_class, NULL, 0, hw, "hwsim%d", idx); if (IS_ERR(data->dev)) { printk(KERN_DEBUG "mac80211_hwsim: device_create failed (%ld)\n", PTR_ERR(data->dev)); err = -ENOMEM; goto failed_drvdata; } data->dev->driver = &mac80211_hwsim_driver.driver; err = device_bind_driver(data->dev); if (err != 0) { pr_debug("mac80211_hwsim: device_bind_driver failed (%d)\n", err); goto failed_bind; } skb_queue_head_init(&data->pending); SET_IEEE80211_DEV(hw, data->dev); if (!param->perm_addr) { eth_zero_addr(addr); addr[0] = 0x02; addr[3] = idx >> 8; addr[4] = idx; memcpy(data->addresses[0].addr, addr, ETH_ALEN); /* Why need here second address ? */ memcpy(data->addresses[1].addr, addr, ETH_ALEN); data->addresses[1].addr[0] |= 0x40; hw->wiphy->n_addresses = 2; hw->wiphy->addresses = data->addresses; /* possible address clash is checked at hash table insertion */ } else { memcpy(data->addresses[0].addr, param->perm_addr, ETH_ALEN); /* compatibility with automatically generated mac addr */ memcpy(data->addresses[1].addr, param->perm_addr, ETH_ALEN); hw->wiphy->n_addresses = 2; hw->wiphy->addresses = data->addresses; } data->channels = param->channels; data->use_chanctx = param->use_chanctx; data->idx = idx; data->destroy_on_close = param->destroy_on_close; if (info) data->portid = info->snd_portid; /* setup interface limits, only on interface types we support */ if (param->iftypes & BIT(NL80211_IFTYPE_ADHOC)) { data->if_limits[n_limits].max = 1; data->if_limits[n_limits].types = BIT(NL80211_IFTYPE_ADHOC); n_limits++; } if (param->iftypes & HWSIM_DEFAULT_IF_LIMIT) { data->if_limits[n_limits].max = 2048; /* * For this case, we may only support a subset of * HWSIM_DEFAULT_IF_LIMIT, therefore we only want to add the * bits that both param->iftype & HWSIM_DEFAULT_IF_LIMIT have. */ data->if_limits[n_limits].types = HWSIM_DEFAULT_IF_LIMIT & param->iftypes; n_limits++; } if (param->iftypes & BIT(NL80211_IFTYPE_P2P_DEVICE)) { data->if_limits[n_limits].max = 1; data->if_limits[n_limits].types = BIT(NL80211_IFTYPE_P2P_DEVICE); n_limits++; } if (data->use_chanctx) { hw->wiphy->max_scan_ssids = 255; hw->wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN; hw->wiphy->max_remain_on_channel_duration = 1000; data->if_combination.radar_detect_widths = 0; data->if_combination.num_different_channels = data->channels; } else { data->if_combination.num_different_channels = 1; data->if_combination.radar_detect_widths = BIT(NL80211_CHAN_WIDTH_5) | BIT(NL80211_CHAN_WIDTH_10) | BIT(NL80211_CHAN_WIDTH_20_NOHT) | BIT(NL80211_CHAN_WIDTH_20) | BIT(NL80211_CHAN_WIDTH_40) | BIT(NL80211_CHAN_WIDTH_80) | BIT(NL80211_CHAN_WIDTH_160); } if (!n_limits) { err = -EINVAL; goto failed_hw; } data->if_combination.max_interfaces = 0; for (i = 0; i < n_limits; i++) data->if_combination.max_interfaces += data->if_limits[i].max; data->if_combination.n_limits = n_limits; data->if_combination.limits = data->if_limits; /* * If we actually were asked to support combinations, * advertise them - if there's only a single thing like * only IBSS then don't advertise it as combinations. */ if (data->if_combination.max_interfaces > 1) { hw->wiphy->iface_combinations = &data->if_combination; hw->wiphy->n_iface_combinations = 1; } if (param->ciphers) { memcpy(data->ciphers, param->ciphers, param->n_ciphers * sizeof(u32)); hw->wiphy->cipher_suites = data->ciphers; hw->wiphy->n_cipher_suites = param->n_ciphers; } hw->wiphy->mbssid_max_interfaces = 8; hw->wiphy->ema_max_profile_periodicity = 3; data->rx_rssi = DEFAULT_RX_RSSI; INIT_DELAYED_WORK(&data->roc_start, hw_roc_start); INIT_DELAYED_WORK(&data->roc_done, hw_roc_done); INIT_DELAYED_WORK(&data->hw_scan, hw_scan_work); hw->queues = 5; hw->offchannel_tx_hw_queue = 4; ieee80211_hw_set(hw, SUPPORT_FAST_XMIT); ieee80211_hw_set(hw, CHANCTX_STA_CSA); ieee80211_hw_set(hw, SUPPORTS_HT_CCK_RATES); ieee80211_hw_set(hw, QUEUE_CONTROL); ieee80211_hw_set(hw, WANT_MONITOR_VIF); ieee80211_hw_set(hw, AMPDU_AGGREGATION); ieee80211_hw_set(hw, MFP_CAPABLE); ieee80211_hw_set(hw, SIGNAL_DBM); ieee80211_hw_set(hw, SUPPORTS_PS); ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS); ieee80211_hw_set(hw, TDLS_WIDER_BW); ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID); if (param->mlo) { hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_MLO; ieee80211_hw_set(hw, HAS_RATE_CONTROL); ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS); ieee80211_hw_set(hw, CONNECTION_MONITOR); ieee80211_hw_set(hw, AP_LINK_PS); } else { ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING); ieee80211_hw_set(hw, PS_NULLFUNC_STACK); if (rctbl) ieee80211_hw_set(hw, SUPPORTS_RC_TABLE); } hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS | WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL | WIPHY_FLAG_AP_UAPSD | WIPHY_FLAG_SUPPORTS_5_10_MHZ | WIPHY_FLAG_HAS_CHANNEL_SWITCH; hw->wiphy->features |= NL80211_FEATURE_ACTIVE_MONITOR | NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE | NL80211_FEATURE_STATIC_SMPS | NL80211_FEATURE_DYNAMIC_SMPS | NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR; wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS); wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_BEACON_PROTECTION); wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_MULTICAST_REGISTRATIONS); wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_BEACON_RATE_LEGACY); wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER); wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_SCAN_MIN_PREQ_CONTENT); hw->wiphy->interface_modes = param->iftypes; /* ask mac80211 to reserve space for magic */ hw->vif_data_size = sizeof(struct hwsim_vif_priv); hw->sta_data_size = sizeof(struct hwsim_sta_priv); hw->chanctx_data_size = sizeof(struct hwsim_chanctx_priv); memcpy(data->channels_2ghz, hwsim_channels_2ghz, sizeof(hwsim_channels_2ghz)); memcpy(data->channels_5ghz, hwsim_channels_5ghz, sizeof(hwsim_channels_5ghz)); memcpy(data->channels_6ghz, hwsim_channels_6ghz, sizeof(hwsim_channels_6ghz)); memcpy(data->channels_s1g, hwsim_channels_s1g, sizeof(hwsim_channels_s1g)); memcpy(data->rates, hwsim_rates, sizeof(hwsim_rates)); for (band = NL80211_BAND_2GHZ; band < NUM_NL80211_BANDS; band++) { struct ieee80211_supported_band *sband = &data->bands[band]; sband->band = band; switch (band) { case NL80211_BAND_2GHZ: sband->channels = data->channels_2ghz; sband->n_channels = ARRAY_SIZE(hwsim_channels_2ghz); sband->bitrates = data->rates; sband->n_bitrates = ARRAY_SIZE(hwsim_rates); break; case NL80211_BAND_5GHZ: sband->channels = data->channels_5ghz; sband->n_channels = ARRAY_SIZE(hwsim_channels_5ghz); sband->bitrates = data->rates + 4; sband->n_bitrates = ARRAY_SIZE(hwsim_rates) - 4; sband->vht_cap.vht_supported = true; sband->vht_cap.cap = IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 | IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ | IEEE80211_VHT_CAP_RXLDPC | IEEE80211_VHT_CAP_SHORT_GI_80 | IEEE80211_VHT_CAP_SHORT_GI_160 | IEEE80211_VHT_CAP_TXSTBC | IEEE80211_VHT_CAP_RXSTBC_4 | IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; sband->vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(IEEE80211_VHT_MCS_SUPPORT_0_9 << 0 | IEEE80211_VHT_MCS_SUPPORT_0_9 << 2 | IEEE80211_VHT_MCS_SUPPORT_0_9 << 4 | IEEE80211_VHT_MCS_SUPPORT_0_9 << 6 | IEEE80211_VHT_MCS_SUPPORT_0_9 << 8 | IEEE80211_VHT_MCS_SUPPORT_0_9 << 10 | IEEE80211_VHT_MCS_SUPPORT_0_9 << 12 | IEEE80211_VHT_MCS_SUPPORT_0_9 << 14); sband->vht_cap.vht_mcs.tx_mcs_map = sband->vht_cap.vht_mcs.rx_mcs_map; break; case NL80211_BAND_6GHZ: sband->channels = data->channels_6ghz; sband->n_channels = ARRAY_SIZE(hwsim_channels_6ghz); sband->bitrates = data->rates + 4; sband->n_bitrates = ARRAY_SIZE(hwsim_rates) - 4; break; case NL80211_BAND_S1GHZ: memcpy(&sband->s1g_cap, &hwsim_s1g_cap, sizeof(sband->s1g_cap)); sband->channels = data->channels_s1g; sband->n_channels = ARRAY_SIZE(hwsim_channels_s1g); break; default: continue; } if (band != NL80211_BAND_6GHZ){ sband->ht_cap.ht_supported = true; sband->ht_cap.cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 | IEEE80211_HT_CAP_GRN_FLD | IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_DSSSCCK40; sband->ht_cap.ampdu_factor = 0x3; sband->ht_cap.ampdu_density = 0x6; memset(&sband->ht_cap.mcs, 0, sizeof(sband->ht_cap.mcs)); sband->ht_cap.mcs.rx_mask[0] = 0xff; sband->ht_cap.mcs.rx_mask[1] = 0xff; sband->ht_cap.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; } mac80211_hwsim_sband_capab(sband); hw->wiphy->bands[band] = sband; } /* By default all radios belong to the first group */ data->group = 1; mutex_init(&data->mutex); data->netgroup = hwsim_net_get_netgroup(net); data->wmediumd = hwsim_net_get_wmediumd(net); /* Enable frame retransmissions for lossy channels */ hw->max_rates = 4; hw->max_rate_tries = 11; hw->wiphy->vendor_commands = mac80211_hwsim_vendor_commands; hw->wiphy->n_vendor_commands = ARRAY_SIZE(mac80211_hwsim_vendor_commands); hw->wiphy->vendor_events = mac80211_hwsim_vendor_events; hw->wiphy->n_vendor_events = ARRAY_SIZE(mac80211_hwsim_vendor_events); if (param->reg_strict) hw->wiphy->regulatory_flags |= REGULATORY_STRICT_REG; if (param->regd) { data->regd = param->regd; hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG; wiphy_apply_custom_regulatory(hw->wiphy, param->regd); /* give the regulatory workqueue a chance to run */ schedule_timeout_interruptible(1); } if (param->no_vif) ieee80211_hw_set(hw, NO_AUTO_VIF); wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); for (i = 0; i < ARRAY_SIZE(data->link_data); i++) { hrtimer_init(&data->link_data[i].beacon_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_SOFT); data->link_data[i].beacon_timer.function = mac80211_hwsim_beacon; data->link_data[i].link_id = i; } err = ieee80211_register_hw(hw); if (err < 0) { pr_debug("mac80211_hwsim: ieee80211_register_hw failed (%d)\n", err); goto failed_hw; } wiphy_dbg(hw->wiphy, "hwaddr %pM registered\n", hw->wiphy->perm_addr); if (param->reg_alpha2) { data->alpha2[0] = param->reg_alpha2[0]; data->alpha2[1] = param->reg_alpha2[1]; regulatory_hint(hw->wiphy, param->reg_alpha2); } data->debugfs = debugfs_create_dir("hwsim", hw->wiphy->debugfsdir); debugfs_create_file("ps", 0666, data->debugfs, data, &hwsim_fops_ps); debugfs_create_file("group", 0666, data->debugfs, data, &hwsim_fops_group); debugfs_create_file("rx_rssi", 0666, data->debugfs, data, &hwsim_fops_rx_rssi); if (!data->use_chanctx) debugfs_create_file("dfs_simulate_radar", 0222, data->debugfs, data, &hwsim_simulate_radar); if (param->pmsr_capa) { data->pmsr_capa = *param->pmsr_capa; hw->wiphy->pmsr_capa = &data->pmsr_capa; } spin_lock_bh(&hwsim_radio_lock); err = rhashtable_insert_fast(&hwsim_radios_rht, &data->rht, hwsim_rht_params); if (err < 0) { if (info) { GENL_SET_ERR_MSG(info, "perm addr already present"); NL_SET_BAD_ATTR(info->extack, info->attrs[HWSIM_ATTR_PERM_ADDR]); } spin_unlock_bh(&hwsim_radio_lock); goto failed_final_insert; } list_add_tail(&data->list, &hwsim_radios); hwsim_radios_generation++; spin_unlock_bh(&hwsim_radio_lock); hwsim_mcast_new_radio(idx, info, param); return idx; failed_final_insert: debugfs_remove_recursive(data->debugfs); ieee80211_unregister_hw(data->hw); failed_hw: device_release_driver(data->dev); failed_bind: device_unregister(data->dev); failed_drvdata: ieee80211_free_hw(hw); failed: return err; } static void hwsim_mcast_del_radio(int id, const char *hwname, struct genl_info *info) { struct sk_buff *skb; void *data; int ret; skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!skb) return; data = genlmsg_put(skb, 0, 0, &hwsim_genl_family, 0, HWSIM_CMD_DEL_RADIO); if (!data) goto error; ret = nla_put_u32(skb, HWSIM_ATTR_RADIO_ID, id); if (ret < 0) goto error; ret = nla_put(skb, HWSIM_ATTR_RADIO_NAME, strlen(hwname), hwname); if (ret < 0) goto error; genlmsg_end(skb, data); hwsim_mcast_config_msg(skb, info); return; error: nlmsg_free(skb); } static void mac80211_hwsim_del_radio(struct mac80211_hwsim_data *data, const char *hwname, struct genl_info *info) { hwsim_mcast_del_radio(data->idx, hwname, info); debugfs_remove_recursive(data->debugfs); ieee80211_unregister_hw(data->hw); device_release_driver(data->dev); device_unregister(data->dev); ieee80211_free_hw(data->hw); } static int mac80211_hwsim_get_radio(struct sk_buff *skb, struct mac80211_hwsim_data *data, u32 portid, u32 seq, struct netlink_callback *cb, int flags) { void *hdr; struct hwsim_new_radio_params param = { }; int res = -EMSGSIZE; hdr = genlmsg_put(skb, portid, seq, &hwsim_genl_family, flags, HWSIM_CMD_GET_RADIO); if (!hdr) return -EMSGSIZE; if (cb) genl_dump_check_consistent(cb, hdr); if (data->alpha2[0] && data->alpha2[1]) param.reg_alpha2 = data->alpha2; param.reg_strict = !!(data->hw->wiphy->regulatory_flags & REGULATORY_STRICT_REG); param.p2p_device = !!(data->hw->wiphy->interface_modes & BIT(NL80211_IFTYPE_P2P_DEVICE)); param.use_chanctx = data->use_chanctx; param.regd = data->regd; param.channels = data->channels; param.hwname = wiphy_name(data->hw->wiphy); param.pmsr_capa = &data->pmsr_capa; res = append_radio_msg(skb, data->idx, &param); if (res < 0) goto out_err; genlmsg_end(skb, hdr); return 0; out_err: genlmsg_cancel(skb, hdr); return res; } static void mac80211_hwsim_free(void) { struct mac80211_hwsim_data *data; spin_lock_bh(&hwsim_radio_lock); while ((data = list_first_entry_or_null(&hwsim_radios, struct mac80211_hwsim_data, list))) { list_del(&data->list); spin_unlock_bh(&hwsim_radio_lock); mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy), NULL); spin_lock_bh(&hwsim_radio_lock); } spin_unlock_bh(&hwsim_radio_lock); class_destroy(hwsim_class); } static const struct net_device_ops hwsim_netdev_ops = { .ndo_start_xmit = hwsim_mon_xmit, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; static void hwsim_mon_setup(struct net_device *dev) { u8 addr[ETH_ALEN]; dev->netdev_ops = &hwsim_netdev_ops; dev->needs_free_netdev = true; ether_setup(dev); dev->priv_flags |= IFF_NO_QUEUE; dev->type = ARPHRD_IEEE80211_RADIOTAP; eth_zero_addr(addr); addr[0] = 0x12; eth_hw_addr_set(dev, addr); } static void hwsim_register_wmediumd(struct net *net, u32 portid) { struct mac80211_hwsim_data *data; hwsim_net_set_wmediumd(net, portid); spin_lock_bh(&hwsim_radio_lock); list_for_each_entry(data, &hwsim_radios, list) { if (data->netgroup == hwsim_net_get_netgroup(net)) data->wmediumd = portid; } spin_unlock_bh(&hwsim_radio_lock); } static int hwsim_tx_info_frame_received_nl(struct sk_buff *skb_2, struct genl_info *info) { struct ieee80211_hdr *hdr; struct mac80211_hwsim_data *data2; struct ieee80211_tx_info *txi; struct hwsim_tx_rate *tx_attempts; u64 ret_skb_cookie; struct sk_buff *skb, *tmp; const u8 *src; unsigned int hwsim_flags; int i; unsigned long flags; bool found = false; if (!info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER] || !info->attrs[HWSIM_ATTR_FLAGS] || !info->attrs[HWSIM_ATTR_COOKIE] || !info->attrs[HWSIM_ATTR_SIGNAL] || !info->attrs[HWSIM_ATTR_TX_INFO]) goto out; src = (void *)nla_data(info->attrs[HWSIM_ATTR_ADDR_TRANSMITTER]); hwsim_flags = nla_get_u32(info->attrs[HWSIM_ATTR_FLAGS]); ret_skb_cookie = nla_get_u64(info->attrs[HWSIM_ATTR_COOKIE]); data2 = get_hwsim_data_ref_from_addr(src); if (!data2) goto out; if (!hwsim_virtio_enabled) { if (hwsim_net_get_netgroup(genl_info_net(info)) != data2->netgroup) goto out; if (info->snd_portid != data2->wmediumd) goto out; } /* look for the skb matching the cookie passed back from user */ spin_lock_irqsave(&data2->pending.lock, flags); skb_queue_walk_safe(&data2->pending, skb, tmp) { uintptr_t skb_cookie; txi = IEEE80211_SKB_CB(skb); skb_cookie = (uintptr_t)txi->rate_driver_data[0]; if (skb_cookie == ret_skb_cookie) { __skb_unlink(skb, &data2->pending); found = true; break; } } spin_unlock_irqrestore(&data2->pending.lock, flags); /* not found */ if (!found) goto out; /* Tx info received because the frame was broadcasted on user space, so we get all the necessary info: tx attempts and skb control buff */ tx_attempts = (struct hwsim_tx_rate *)nla_data( info->attrs[HWSIM_ATTR_TX_INFO]); /* now send back TX status */ txi = IEEE80211_SKB_CB(skb); ieee80211_tx_info_clear_status(txi); for (i = 0; i < IEEE80211_TX_MAX_RATES; i++) { txi->status.rates[i].idx = tx_attempts[i].idx; txi->status.rates[i].count = tx_attempts[i].count; } txi->status.ack_signal = nla_get_u32(info->attrs[HWSIM_ATTR_SIGNAL]); if (!(hwsim_flags & HWSIM_TX_CTL_NO_ACK) && (hwsim_flags & HWSIM_TX_STAT_ACK)) { if (skb->len >= 16) { hdr = (struct ieee80211_hdr *) skb->data; mac80211_hwsim_monitor_ack(data2->channel, hdr->addr2); } txi->flags |= IEEE80211_TX_STAT_ACK; } if (hwsim_flags & HWSIM_TX_CTL_NO_ACK) txi->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; ieee80211_tx_status_irqsafe(data2->hw, skb); return 0; out: return -EINVAL; } static int hwsim_cloned_frame_received_nl(struct sk_buff *skb_2, struct genl_info *info) { struct mac80211_hwsim_data *data2; struct ieee80211_rx_status rx_status; struct ieee80211_hdr *hdr; const u8 *dst; int frame_data_len; void *frame_data; struct sk_buff *skb = NULL; struct ieee80211_channel *channel = NULL; if (!info->attrs[HWSIM_ATTR_ADDR_RECEIVER] || !info->attrs[HWSIM_ATTR_FRAME] || !info->attrs[HWSIM_ATTR_RX_RATE] || !info->attrs[HWSIM_ATTR_SIGNAL]) goto out; dst = (void *)nla_data(info->attrs[HWSIM_ATTR_ADDR_RECEIVER]); frame_data_len = nla_len(info->attrs[HWSIM_ATTR_FRAME]); frame_data = (void *)nla_data(info->attrs[HWSIM_ATTR_FRAME]); if (frame_data_len < sizeof(struct ieee80211_hdr_3addr) || frame_data_len > IEEE80211_MAX_DATA_LEN) goto err; /* Allocate new skb here */ skb = alloc_skb(frame_data_len, GFP_KERNEL); if (skb == NULL) goto err; /* Copy the data */ skb_put_data(skb, frame_data, frame_data_len); data2 = get_hwsim_data_ref_from_addr(dst); if (!data2) goto out; if (data2->use_chanctx) { if (data2->tmp_chan) channel = data2->tmp_chan; } else { channel = data2->channel; } if (!hwsim_virtio_enabled) { if (hwsim_net_get_netgroup(genl_info_net(info)) != data2->netgroup) goto out; if (info->snd_portid != data2->wmediumd) goto out; } /* check if radio is configured properly */ if ((data2->idle && !data2->tmp_chan) || !data2->started) goto out; /* A frame is received from user space */ memset(&rx_status, 0, sizeof(rx_status)); if (info->attrs[HWSIM_ATTR_FREQ]) { struct tx_iter_data iter_data = {}; /* throw away off-channel packets, but allow both the temporary * ("hw" scan/remain-on-channel), regular channels and links, * since the internal datapath also allows this */ rx_status.freq = nla_get_u32(info->attrs[HWSIM_ATTR_FREQ]); iter_data.channel = ieee80211_get_channel(data2->hw->wiphy, rx_status.freq); if (!iter_data.channel) goto out; rx_status.band = iter_data.channel->band; mutex_lock(&data2->mutex); if (!hwsim_chans_compat(iter_data.channel, channel)) { ieee80211_iterate_active_interfaces_atomic( data2->hw, IEEE80211_IFACE_ITER_NORMAL, mac80211_hwsim_tx_iter, &iter_data); if (!iter_data.receive) { mutex_unlock(&data2->mutex); goto out; } } mutex_unlock(&data2->mutex); } else if (!channel) { goto out; } else { rx_status.freq = channel->center_freq; rx_status.band = channel->band; } rx_status.rate_idx = nla_get_u32(info->attrs[HWSIM_ATTR_RX_RATE]); if (rx_status.rate_idx >= data2->hw->wiphy->bands[rx_status.band]->n_bitrates) goto out; rx_status.signal = nla_get_u32(info->attrs[HWSIM_ATTR_SIGNAL]); hdr = (void *)skb->data; if (ieee80211_is_beacon(hdr->frame_control) || ieee80211_is_probe_resp(hdr->frame_control)) rx_status.boottime_ns = ktime_get_boottime_ns(); mac80211_hwsim_rx(data2, &rx_status, skb); return 0; err: pr_debug("mac80211_hwsim: error occurred in %s\n", __func__); out: dev_kfree_skb(skb); return -EINVAL; } static int hwsim_register_received_nl(struct sk_buff *skb_2, struct genl_info *info) { struct net *net = genl_info_net(info); struct mac80211_hwsim_data *data; int chans = 1; spin_lock_bh(&hwsim_radio_lock); list_for_each_entry(data, &hwsim_radios, list) chans = max(chans, data->channels); spin_unlock_bh(&hwsim_radio_lock); /* In the future we should revise the userspace API and allow it * to set a flag that it does support multi-channel, then we can * let this pass conditionally on the flag. * For current userspace, prohibit it since it won't work right. */ if (chans > 1) return -EOPNOTSUPP; if (hwsim_net_get_wmediumd(net)) return -EBUSY; hwsim_register_wmediumd(net, info->snd_portid); pr_debug("mac80211_hwsim: received a REGISTER, " "switching to wmediumd mode with pid %d\n", info->snd_portid); return 0; } /* ensures ciphers only include ciphers listed in 'hwsim_ciphers' array */ static bool hwsim_known_ciphers(const u32 *ciphers, int n_ciphers) { int i; for (i = 0; i < n_ciphers; i++) { int j; int found = 0; for (j = 0; j < ARRAY_SIZE(hwsim_ciphers); j++) { if (ciphers[i] == hwsim_ciphers[j]) { found = 1; break; } } if (!found) return false; } return true; } static int parse_ftm_capa(const struct nlattr *ftm_capa, struct cfg80211_pmsr_capabilities *out, struct genl_info *info) { struct nlattr *tb[NL80211_PMSR_FTM_CAPA_ATTR_MAX + 1]; int ret; ret = nla_parse_nested(tb, NL80211_PMSR_FTM_CAPA_ATTR_MAX, ftm_capa, hwsim_ftm_capa_policy, NULL); if (ret) { NL_SET_ERR_MSG_ATTR(info->extack, ftm_capa, "malformed FTM capability"); return -EINVAL; } out->ftm.supported = 1; if (tb[NL80211_PMSR_FTM_CAPA_ATTR_PREAMBLES]) out->ftm.preambles = nla_get_u32(tb[NL80211_PMSR_FTM_CAPA_ATTR_PREAMBLES]); if (tb[NL80211_PMSR_FTM_CAPA_ATTR_BANDWIDTHS]) out->ftm.bandwidths = nla_get_u32(tb[NL80211_PMSR_FTM_CAPA_ATTR_BANDWIDTHS]); if (tb[NL80211_PMSR_FTM_CAPA_ATTR_MAX_BURSTS_EXPONENT]) out->ftm.max_bursts_exponent = nla_get_u8(tb[NL80211_PMSR_FTM_CAPA_ATTR_MAX_BURSTS_EXPONENT]); if (tb[NL80211_PMSR_FTM_CAPA_ATTR_MAX_FTMS_PER_BURST]) out->ftm.max_ftms_per_burst = nla_get_u8(tb[NL80211_PMSR_FTM_CAPA_ATTR_MAX_FTMS_PER_BURST]); out->ftm.asap = !!tb[NL80211_PMSR_FTM_CAPA_ATTR_ASAP]; out->ftm.non_asap = !!tb[NL80211_PMSR_FTM_CAPA_ATTR_NON_ASAP]; out->ftm.request_lci = !!tb[NL80211_PMSR_FTM_CAPA_ATTR_REQ_LCI]; out->ftm.request_civicloc = !!tb[NL80211_PMSR_FTM_CAPA_ATTR_REQ_CIVICLOC]; out->ftm.trigger_based = !!tb[NL80211_PMSR_FTM_CAPA_ATTR_TRIGGER_BASED]; out->ftm.non_trigger_based = !!tb[NL80211_PMSR_FTM_CAPA_ATTR_NON_TRIGGER_BASED]; return 0; } static int parse_pmsr_capa(const struct nlattr *pmsr_capa, struct cfg80211_pmsr_capabilities *out, struct genl_info *info) { struct nlattr *tb[NL80211_PMSR_ATTR_MAX + 1]; struct nlattr *nla; int size; int ret; ret = nla_parse_nested(tb, NL80211_PMSR_ATTR_MAX, pmsr_capa, hwsim_pmsr_capa_policy, NULL); if (ret) { NL_SET_ERR_MSG_ATTR(info->extack, pmsr_capa, "malformed PMSR capability"); return -EINVAL; } if (tb[NL80211_PMSR_ATTR_MAX_PEERS]) out->max_peers = nla_get_u32(tb[NL80211_PMSR_ATTR_MAX_PEERS]); out->report_ap_tsf = !!tb[NL80211_PMSR_ATTR_REPORT_AP_TSF]; out->randomize_mac_addr = !!tb[NL80211_PMSR_ATTR_RANDOMIZE_MAC_ADDR]; if (!tb[NL80211_PMSR_ATTR_TYPE_CAPA]) { NL_SET_ERR_MSG_ATTR(info->extack, tb[NL80211_PMSR_ATTR_TYPE_CAPA], "malformed PMSR type"); return -EINVAL; } nla_for_each_nested(nla, tb[NL80211_PMSR_ATTR_TYPE_CAPA], size) { switch (nla_type(nla)) { case NL80211_PMSR_TYPE_FTM: parse_ftm_capa(nla, out, info); break; default: NL_SET_ERR_MSG_ATTR(info->extack, nla, "unsupported measurement type"); return -EINVAL; } } return 0; } static int hwsim_new_radio_nl(struct sk_buff *msg, struct genl_info *info) { struct hwsim_new_radio_params param = { 0 }; const char *hwname = NULL; int ret; param.reg_strict = info->attrs[HWSIM_ATTR_REG_STRICT_REG]; param.p2p_device = info->attrs[HWSIM_ATTR_SUPPORT_P2P_DEVICE]; param.channels = channels; param.destroy_on_close = info->attrs[HWSIM_ATTR_DESTROY_RADIO_ON_CLOSE]; if (info->attrs[HWSIM_ATTR_CHANNELS]) param.channels = nla_get_u32(info->attrs[HWSIM_ATTR_CHANNELS]); if (param.channels < 1) { GENL_SET_ERR_MSG(info, "must have at least one channel"); return -EINVAL; } if (info->attrs[HWSIM_ATTR_NO_VIF]) param.no_vif = true; if (info->attrs[HWSIM_ATTR_USE_CHANCTX]) param.use_chanctx = true; else param.use_chanctx = (param.channels > 1); if (info->attrs[HWSIM_ATTR_REG_HINT_ALPHA2]) param.reg_alpha2 = nla_data(info->attrs[HWSIM_ATTR_REG_HINT_ALPHA2]); if (info->attrs[HWSIM_ATTR_REG_CUSTOM_REG]) { u32 idx = nla_get_u32(info->attrs[HWSIM_ATTR_REG_CUSTOM_REG]); if (idx >= ARRAY_SIZE(hwsim_world_regdom_custom)) return -EINVAL; idx = array_index_nospec(idx, ARRAY_SIZE(hwsim_world_regdom_custom)); param.regd = hwsim_world_regdom_custom[idx]; } if (info->attrs[HWSIM_ATTR_PERM_ADDR]) { if (!is_valid_ether_addr( nla_data(info->attrs[HWSIM_ATTR_PERM_ADDR]))) { GENL_SET_ERR_MSG(info,"MAC is no valid source addr"); NL_SET_BAD_ATTR(info->extack, info->attrs[HWSIM_ATTR_PERM_ADDR]); return -EINVAL; } param.perm_addr = nla_data(info->attrs[HWSIM_ATTR_PERM_ADDR]); } if (info->attrs[HWSIM_ATTR_IFTYPE_SUPPORT]) { param.iftypes = nla_get_u32(info->attrs[HWSIM_ATTR_IFTYPE_SUPPORT]); if (param.iftypes & ~HWSIM_IFTYPE_SUPPORT_MASK) { NL_SET_ERR_MSG_ATTR(info->extack, info->attrs[HWSIM_ATTR_IFTYPE_SUPPORT], "cannot support more iftypes than kernel"); return -EINVAL; } } else { param.iftypes = HWSIM_IFTYPE_SUPPORT_MASK; } /* ensure both flag and iftype support is honored */ if (param.p2p_device || param.iftypes & BIT(NL80211_IFTYPE_P2P_DEVICE)) { param.iftypes |= BIT(NL80211_IFTYPE_P2P_DEVICE); param.p2p_device = true; } if (info->attrs[HWSIM_ATTR_CIPHER_SUPPORT]) { u32 len = nla_len(info->attrs[HWSIM_ATTR_CIPHER_SUPPORT]); param.ciphers = nla_data(info->attrs[HWSIM_ATTR_CIPHER_SUPPORT]); if (len % sizeof(u32)) { NL_SET_ERR_MSG_ATTR(info->extack, info->attrs[HWSIM_ATTR_CIPHER_SUPPORT], "bad cipher list length"); return -EINVAL; } param.n_ciphers = len / sizeof(u32); if (param.n_ciphers > ARRAY_SIZE(hwsim_ciphers)) { NL_SET_ERR_MSG_ATTR(info->extack, info->attrs[HWSIM_ATTR_CIPHER_SUPPORT], "too many ciphers specified"); return -EINVAL; } if (!hwsim_known_ciphers(param.ciphers, param.n_ciphers)) { NL_SET_ERR_MSG_ATTR(info->extack, info->attrs[HWSIM_ATTR_CIPHER_SUPPORT], "unsupported ciphers specified"); return -EINVAL; } } param.mlo = info->attrs[HWSIM_ATTR_MLO_SUPPORT]; if (param.mlo) param.use_chanctx = true; if (info->attrs[HWSIM_ATTR_RADIO_NAME]) { hwname = kstrndup((char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]), nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]), GFP_KERNEL); if (!hwname) return -ENOMEM; param.hwname = hwname; } if (info->attrs[HWSIM_ATTR_PMSR_SUPPORT]) { struct cfg80211_pmsr_capabilities *pmsr_capa; pmsr_capa = kmalloc(sizeof(*pmsr_capa), GFP_KERNEL); if (!pmsr_capa) { ret = -ENOMEM; goto out_free; } param.pmsr_capa = pmsr_capa; ret = parse_pmsr_capa(info->attrs[HWSIM_ATTR_PMSR_SUPPORT], pmsr_capa, info); if (ret) goto out_free; } ret = mac80211_hwsim_new_radio(info, &param); out_free: kfree(hwname); kfree(param.pmsr_capa); return ret; } static int hwsim_del_radio_nl(struct sk_buff *msg, struct genl_info *info) { struct mac80211_hwsim_data *data; s64 idx = -1; const char *hwname = NULL; if (info->attrs[HWSIM_ATTR_RADIO_ID]) { idx = nla_get_u32(info->attrs[HWSIM_ATTR_RADIO_ID]); } else if (info->attrs[HWSIM_ATTR_RADIO_NAME]) { hwname = kstrndup((char *)nla_data(info->attrs[HWSIM_ATTR_RADIO_NAME]), nla_len(info->attrs[HWSIM_ATTR_RADIO_NAME]), GFP_KERNEL); if (!hwname) return -ENOMEM; } else return -EINVAL; spin_lock_bh(&hwsim_radio_lock); list_for_each_entry(data, &hwsim_radios, list) { if (idx >= 0) { if (data->idx != idx) continue; } else { if (!hwname || strcmp(hwname, wiphy_name(data->hw->wiphy))) continue; } if (!net_eq(wiphy_net(data->hw->wiphy), genl_info_net(info))) continue; list_del(&data->list); rhashtable_remove_fast(&hwsim_radios_rht, &data->rht, hwsim_rht_params); hwsim_radios_generation++; spin_unlock_bh(&hwsim_radio_lock); mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy), info); kfree(hwname); return 0; } spin_unlock_bh(&hwsim_radio_lock); kfree(hwname); return -ENODEV; } static int hwsim_get_radio_nl(struct sk_buff *msg, struct genl_info *info) { struct mac80211_hwsim_data *data; struct sk_buff *skb; int idx, res = -ENODEV; if (!info->attrs[HWSIM_ATTR_RADIO_ID]) return -EINVAL; idx = nla_get_u32(info->attrs[HWSIM_ATTR_RADIO_ID]); spin_lock_bh(&hwsim_radio_lock); list_for_each_entry(data, &hwsim_radios, list) { if (data->idx != idx) continue; if (!net_eq(wiphy_net(data->hw->wiphy), genl_info_net(info))) continue; skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); if (!skb) { res = -ENOMEM; goto out_err; } res = mac80211_hwsim_get_radio(skb, data, info->snd_portid, info->snd_seq, NULL, 0); if (res < 0) { nlmsg_free(skb); goto out_err; } res = genlmsg_reply(skb, info); break; } out_err: spin_unlock_bh(&hwsim_radio_lock); return res; } static int hwsim_dump_radio_nl(struct sk_buff *skb, struct netlink_callback *cb) { int last_idx = cb->args[0] - 1; struct mac80211_hwsim_data *data = NULL; int res = 0; void *hdr; spin_lock_bh(&hwsim_radio_lock); cb->seq = hwsim_radios_generation; if (last_idx >= hwsim_radio_idx-1) goto done; list_for_each_entry(data, &hwsim_radios, list) { if (data->idx <= last_idx) continue; if (!net_eq(wiphy_net(data->hw->wiphy), sock_net(skb->sk))) continue; res = mac80211_hwsim_get_radio(skb, data, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, cb, NLM_F_MULTI); if (res < 0) break; last_idx = data->idx; } cb->args[0] = last_idx + 1; /* list changed, but no new element sent, set interrupted flag */ if (skb->len == 0 && cb->prev_seq && cb->seq != cb->prev_seq) { hdr = genlmsg_put(skb, NETLINK_CB(cb->skb).portid, cb->nlh->nlmsg_seq, &hwsim_genl_family, NLM_F_MULTI, HWSIM_CMD_GET_RADIO); if (hdr) { genl_dump_check_consistent(cb, hdr); genlmsg_end(skb, hdr); } else { res = -EMSGSIZE; } } done: spin_unlock_bh(&hwsim_radio_lock); return res ?: skb->len; } /* Generic Netlink operations array */ static const struct genl_small_ops hwsim_ops[] = { { .cmd = HWSIM_CMD_REGISTER, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = hwsim_register_received_nl, .flags = GENL_UNS_ADMIN_PERM, }, { .cmd = HWSIM_CMD_FRAME, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = hwsim_cloned_frame_received_nl, }, { .cmd = HWSIM_CMD_TX_INFO_FRAME, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = hwsim_tx_info_frame_received_nl, }, { .cmd = HWSIM_CMD_NEW_RADIO, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = hwsim_new_radio_nl, .flags = GENL_UNS_ADMIN_PERM, }, { .cmd = HWSIM_CMD_DEL_RADIO, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = hwsim_del_radio_nl, .flags = GENL_UNS_ADMIN_PERM, }, { .cmd = HWSIM_CMD_GET_RADIO, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = hwsim_get_radio_nl, .dumpit = hwsim_dump_radio_nl, }, { .cmd = HWSIM_CMD_REPORT_PMSR, .validate = GENL_DONT_VALIDATE_STRICT | GENL_DONT_VALIDATE_DUMP, .doit = hwsim_pmsr_report_nl, }, }; static struct genl_family hwsim_genl_family __ro_after_init = { .name = "MAC80211_HWSIM", .version = 1, .maxattr = HWSIM_ATTR_MAX, .policy = hwsim_genl_policy, .netnsok = true, .module = THIS_MODULE, .small_ops = hwsim_ops, .n_small_ops = ARRAY_SIZE(hwsim_ops), .resv_start_op = HWSIM_CMD_REPORT_PMSR + 1, // match with __HWSIM_CMD_MAX .mcgrps = hwsim_mcgrps, .n_mcgrps = ARRAY_SIZE(hwsim_mcgrps), }; static void remove_user_radios(u32 portid) { struct mac80211_hwsim_data *entry, *tmp; LIST_HEAD(list); spin_lock_bh(&hwsim_radio_lock); list_for_each_entry_safe(entry, tmp, &hwsim_radios, list) { if (entry->destroy_on_close && entry->portid == portid) { list_move(&entry->list, &list); rhashtable_remove_fast(&hwsim_radios_rht, &entry->rht, hwsim_rht_params); hwsim_radios_generation++; } } spin_unlock_bh(&hwsim_radio_lock); list_for_each_entry_safe(entry, tmp, &list, list) { list_del(&entry->list); mac80211_hwsim_del_radio(entry, wiphy_name(entry->hw->wiphy), NULL); } } static int mac80211_hwsim_netlink_notify(struct notifier_block *nb, unsigned long state, void *_notify) { struct netlink_notify *notify = _notify; if (state != NETLINK_URELEASE) return NOTIFY_DONE; remove_user_radios(notify->portid); if (notify->portid == hwsim_net_get_wmediumd(notify->net)) { printk(KERN_INFO "mac80211_hwsim: wmediumd released netlink" " socket, switching to perfect channel medium\n"); hwsim_register_wmediumd(notify->net, 0); } return NOTIFY_DONE; } static struct notifier_block hwsim_netlink_notifier = { .notifier_call = mac80211_hwsim_netlink_notify, }; static int __init hwsim_init_netlink(void) { int rc; printk(KERN_INFO "mac80211_hwsim: initializing netlink\n"); rc = genl_register_family(&hwsim_genl_family); if (rc) goto failure; rc = netlink_register_notifier(&hwsim_netlink_notifier); if (rc) { genl_unregister_family(&hwsim_genl_family); goto failure; } return 0; failure: pr_debug("mac80211_hwsim: error occurred in %s\n", __func__); return -EINVAL; } static __net_init int hwsim_init_net(struct net *net) { return hwsim_net_set_netgroup(net); } static void __net_exit hwsim_exit_net(struct net *net) { struct mac80211_hwsim_data *data, *tmp; LIST_HEAD(list); spin_lock_bh(&hwsim_radio_lock); list_for_each_entry_safe(data, tmp, &hwsim_radios, list) { if (!net_eq(wiphy_net(data->hw->wiphy), net)) continue; /* Radios created in init_net are returned to init_net. */ if (data->netgroup == hwsim_net_get_netgroup(&init_net)) continue; list_move(&data->list, &list); rhashtable_remove_fast(&hwsim_radios_rht, &data->rht, hwsim_rht_params); hwsim_radios_generation++; } spin_unlock_bh(&hwsim_radio_lock); list_for_each_entry_safe(data, tmp, &list, list) { list_del(&data->list); mac80211_hwsim_del_radio(data, wiphy_name(data->hw->wiphy), NULL); } ida_free(&hwsim_netgroup_ida, hwsim_net_get_netgroup(net)); } static struct pernet_operations hwsim_net_ops = { .init = hwsim_init_net, .exit = hwsim_exit_net, .id = &hwsim_net_id, .size = sizeof(struct hwsim_net), }; static void hwsim_exit_netlink(void) { /* unregister the notifier */ netlink_unregister_notifier(&hwsim_netlink_notifier); /* unregister the family */ genl_unregister_family(&hwsim_genl_family); } #if IS_REACHABLE(CONFIG_VIRTIO) static void hwsim_virtio_tx_done(struct virtqueue *vq) { unsigned int len; struct sk_buff *skb; unsigned long flags; spin_lock_irqsave(&hwsim_virtio_lock, flags); while ((skb = virtqueue_get_buf(vq, &len))) dev_kfree_skb_irq(skb); spin_unlock_irqrestore(&hwsim_virtio_lock, flags); } static int hwsim_virtio_handle_cmd(struct sk_buff *skb) { struct nlmsghdr *nlh; struct genlmsghdr *gnlh; struct nlattr *tb[HWSIM_ATTR_MAX + 1]; struct genl_info info = {}; int err; nlh = nlmsg_hdr(skb); gnlh = nlmsg_data(nlh); if (skb->len < nlh->nlmsg_len) return -EINVAL; err = genlmsg_parse(nlh, &hwsim_genl_family, tb, HWSIM_ATTR_MAX, hwsim_genl_policy, NULL); if (err) { pr_err_ratelimited("hwsim: genlmsg_parse returned %d\n", err); return err; } info.attrs = tb; switch (gnlh->cmd) { case HWSIM_CMD_FRAME: hwsim_cloned_frame_received_nl(skb, &info); break; case HWSIM_CMD_TX_INFO_FRAME: hwsim_tx_info_frame_received_nl(skb, &info); break; case HWSIM_CMD_REPORT_PMSR: hwsim_pmsr_report_nl(skb, &info); break; default: pr_err_ratelimited("hwsim: invalid cmd: %d\n", gnlh->cmd); return -EPROTO; } return 0; } static void hwsim_virtio_rx_work(struct work_struct *work) { struct virtqueue *vq; unsigned int len; struct sk_buff *skb; struct scatterlist sg[1]; int err; unsigned long flags; spin_lock_irqsave(&hwsim_virtio_lock, flags); if (!hwsim_virtio_enabled) goto out_unlock; skb = virtqueue_get_buf(hwsim_vqs[HWSIM_VQ_RX], &len); if (!skb) goto out_unlock; spin_unlock_irqrestore(&hwsim_virtio_lock, flags); skb->data = skb->head; skb_reset_tail_pointer(skb); skb_put(skb, len); hwsim_virtio_handle_cmd(skb); spin_lock_irqsave(&hwsim_virtio_lock, flags); if (!hwsim_virtio_enabled) { dev_kfree_skb_irq(skb); goto out_unlock; } vq = hwsim_vqs[HWSIM_VQ_RX]; sg_init_one(sg, skb->head, skb_end_offset(skb)); err = virtqueue_add_inbuf(vq, sg, 1, skb, GFP_ATOMIC); if (WARN(err, "virtqueue_add_inbuf returned %d\n", err)) dev_kfree_skb_irq(skb); else virtqueue_kick(vq); schedule_work(&hwsim_virtio_rx); out_unlock: spin_unlock_irqrestore(&hwsim_virtio_lock, flags); } static void hwsim_virtio_rx_done(struct virtqueue *vq) { schedule_work(&hwsim_virtio_rx); } static int init_vqs(struct virtio_device *vdev) { vq_callback_t *callbacks[HWSIM_NUM_VQS] = { [HWSIM_VQ_TX] = hwsim_virtio_tx_done, [HWSIM_VQ_RX] = hwsim_virtio_rx_done, }; const char *names[HWSIM_NUM_VQS] = { [HWSIM_VQ_TX] = "tx", [HWSIM_VQ_RX] = "rx", }; return virtio_find_vqs(vdev, HWSIM_NUM_VQS, hwsim_vqs, callbacks, names, NULL); } static int fill_vq(struct virtqueue *vq) { int i, err; struct sk_buff *skb; struct scatterlist sg[1]; for (i = 0; i < virtqueue_get_vring_size(vq); i++) { skb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!skb) return -ENOMEM; sg_init_one(sg, skb->head, skb_end_offset(skb)); err = virtqueue_add_inbuf(vq, sg, 1, skb, GFP_KERNEL); if (err) { nlmsg_free(skb); return err; } } virtqueue_kick(vq); return 0; } static void remove_vqs(struct virtio_device *vdev) { int i; virtio_reset_device(vdev); for (i = 0; i < ARRAY_SIZE(hwsim_vqs); i++) { struct virtqueue *vq = hwsim_vqs[i]; struct sk_buff *skb; while ((skb = virtqueue_detach_unused_buf(vq))) nlmsg_free(skb); } vdev->config->del_vqs(vdev); } static int hwsim_virtio_probe(struct virtio_device *vdev) { int err; unsigned long flags; spin_lock_irqsave(&hwsim_virtio_lock, flags); if (hwsim_virtio_enabled) { spin_unlock_irqrestore(&hwsim_virtio_lock, flags); return -EEXIST; } spin_unlock_irqrestore(&hwsim_virtio_lock, flags); err = init_vqs(vdev); if (err) return err; virtio_device_ready(vdev); err = fill_vq(hwsim_vqs[HWSIM_VQ_RX]); if (err) goto out_remove; spin_lock_irqsave(&hwsim_virtio_lock, flags); hwsim_virtio_enabled = true; spin_unlock_irqrestore(&hwsim_virtio_lock, flags); schedule_work(&hwsim_virtio_rx); return 0; out_remove: remove_vqs(vdev); return err; } static void hwsim_virtio_remove(struct virtio_device *vdev) { hwsim_virtio_enabled = false; cancel_work_sync(&hwsim_virtio_rx); remove_vqs(vdev); } /* MAC80211_HWSIM virtio device id table */ static const struct virtio_device_id id_table[] = { { VIRTIO_ID_MAC80211_HWSIM, VIRTIO_DEV_ANY_ID }, { 0 } }; MODULE_DEVICE_TABLE(virtio, id_table); static struct virtio_driver virtio_hwsim = { .driver.name = KBUILD_MODNAME, .driver.owner = THIS_MODULE, .id_table = id_table, .probe = hwsim_virtio_probe, .remove = hwsim_virtio_remove, }; static int hwsim_register_virtio_driver(void) { return register_virtio_driver(&virtio_hwsim); } static void hwsim_unregister_virtio_driver(void) { unregister_virtio_driver(&virtio_hwsim); } #else static inline int hwsim_register_virtio_driver(void) { return 0; } static inline void hwsim_unregister_virtio_driver(void) { } #endif static int __init init_mac80211_hwsim(void) { int i, err; if (radios < 0 || radios > 100) return -EINVAL; if (channels < 1) return -EINVAL; err = rhashtable_init(&hwsim_radios_rht, &hwsim_rht_params); if (err) return err; err = register_pernet_device(&hwsim_net_ops); if (err) goto out_free_rht; err = platform_driver_register(&mac80211_hwsim_driver); if (err) goto out_unregister_pernet; err = hwsim_init_netlink(); if (err) goto out_unregister_driver; err = hwsim_register_virtio_driver(); if (err) goto out_exit_netlink; hwsim_class = class_create("mac80211_hwsim"); if (IS_ERR(hwsim_class)) { err = PTR_ERR(hwsim_class); goto out_exit_virtio; } hwsim_init_s1g_channels(hwsim_channels_s1g); for (i = 0; i < radios; i++) { struct hwsim_new_radio_params param = { 0 }; param.channels = channels; switch (regtest) { case HWSIM_REGTEST_DIFF_COUNTRY: if (i < ARRAY_SIZE(hwsim_alpha2s)) param.reg_alpha2 = hwsim_alpha2s[i]; break; case HWSIM_REGTEST_DRIVER_REG_FOLLOW: if (!i) param.reg_alpha2 = hwsim_alpha2s[0]; break; case HWSIM_REGTEST_STRICT_ALL: param.reg_strict = true; fallthrough; case HWSIM_REGTEST_DRIVER_REG_ALL: param.reg_alpha2 = hwsim_alpha2s[0]; break; case HWSIM_REGTEST_WORLD_ROAM: if (i == 0) param.regd = &hwsim_world_regdom_custom_01; break; case HWSIM_REGTEST_CUSTOM_WORLD: param.regd = &hwsim_world_regdom_custom_01; break; case HWSIM_REGTEST_CUSTOM_WORLD_2: if (i == 0) param.regd = &hwsim_world_regdom_custom_01; else if (i == 1) param.regd = &hwsim_world_regdom_custom_02; break; case HWSIM_REGTEST_STRICT_FOLLOW: if (i == 0) { param.reg_strict = true; param.reg_alpha2 = hwsim_alpha2s[0]; } break; case HWSIM_REGTEST_STRICT_AND_DRIVER_REG: if (i == 0) { param.reg_strict = true; param.reg_alpha2 = hwsim_alpha2s[0]; } else if (i == 1) { param.reg_alpha2 = hwsim_alpha2s[1]; } break; case HWSIM_REGTEST_ALL: switch (i) { case 0: param.regd = &hwsim_world_regdom_custom_01; break; case 1: param.regd = &hwsim_world_regdom_custom_02; break; case 2: param.reg_alpha2 = hwsim_alpha2s[0]; break; case 3: param.reg_alpha2 = hwsim_alpha2s[1]; break; case 4: param.reg_strict = true; param.reg_alpha2 = hwsim_alpha2s[2]; break; } break; default: break; } param.p2p_device = support_p2p_device; param.mlo = mlo; param.use_chanctx = channels > 1 || mlo; param.iftypes = HWSIM_IFTYPE_SUPPORT_MASK; if (param.p2p_device) param.iftypes |= BIT(NL80211_IFTYPE_P2P_DEVICE); err = mac80211_hwsim_new_radio(NULL, &param); if (err < 0) goto out_free_radios; } hwsim_mon = alloc_netdev(0, "hwsim%d", NET_NAME_UNKNOWN, hwsim_mon_setup); if (hwsim_mon == NULL) { err = -ENOMEM; goto out_free_radios; } rtnl_lock(); err = dev_alloc_name(hwsim_mon, hwsim_mon->name); if (err < 0) { rtnl_unlock(); goto out_free_mon; } err = register_netdevice(hwsim_mon); if (err < 0) { rtnl_unlock(); goto out_free_mon; } rtnl_unlock(); return 0; out_free_mon: free_netdev(hwsim_mon); out_free_radios: mac80211_hwsim_free(); out_exit_virtio: hwsim_unregister_virtio_driver(); out_exit_netlink: hwsim_exit_netlink(); out_unregister_driver: platform_driver_unregister(&mac80211_hwsim_driver); out_unregister_pernet: unregister_pernet_device(&hwsim_net_ops); out_free_rht: rhashtable_destroy(&hwsim_radios_rht); return err; } module_init(init_mac80211_hwsim); static void __exit exit_mac80211_hwsim(void) { pr_debug("mac80211_hwsim: unregister radios\n"); hwsim_unregister_virtio_driver(); hwsim_exit_netlink(); mac80211_hwsim_free(); rhashtable_destroy(&hwsim_radios_rht); unregister_netdev(hwsim_mon); platform_driver_unregister(&mac80211_hwsim_driver); unregister_pernet_device(&hwsim_net_ops); } module_exit(exit_mac80211_hwsim);
linux-master
drivers/net/wireless/virtual/mac80211_hwsim.c
// SPDX-License-Identifier: GPL-2.0-or-later /* Copyright (C) 2004 - 2009 Ivo van Doorn <[email protected]> <http://rt2x00.serialmonkey.com> */ /* Module: rt2x00lib Abstract: rt2x00 generic link tuning routines. */ #include <linux/kernel.h> #include <linux/module.h> #include "rt2x00.h" #include "rt2x00lib.h" /* * When we lack RSSI information return something less then -80 to * tell the driver to tune the device to maximum sensitivity. */ #define DEFAULT_RSSI -128 static inline int rt2x00link_get_avg_rssi(struct ewma_rssi *ewma) { unsigned long avg; avg = ewma_rssi_read(ewma); if (avg) return -avg; return DEFAULT_RSSI; } static int rt2x00link_antenna_get_link_rssi(struct rt2x00_dev *rt2x00dev) { struct link_ant *ant = &rt2x00dev->link.ant; if (rt2x00dev->link.qual.rx_success) return rt2x00link_get_avg_rssi(&ant->rssi_ant); return DEFAULT_RSSI; } static int rt2x00link_antenna_get_rssi_history(struct rt2x00_dev *rt2x00dev) { struct link_ant *ant = &rt2x00dev->link.ant; if (ant->rssi_history) return ant->rssi_history; return DEFAULT_RSSI; } static void rt2x00link_antenna_update_rssi_history(struct rt2x00_dev *rt2x00dev, int rssi) { struct link_ant *ant = &rt2x00dev->link.ant; ant->rssi_history = rssi; } static void rt2x00link_antenna_reset(struct rt2x00_dev *rt2x00dev) { ewma_rssi_init(&rt2x00dev->link.ant.rssi_ant); } static void rt2x00lib_antenna_diversity_sample(struct rt2x00_dev *rt2x00dev) { struct link_ant *ant = &rt2x00dev->link.ant; struct antenna_setup new_ant; int other_antenna; int sample_current = rt2x00link_antenna_get_link_rssi(rt2x00dev); int sample_other = rt2x00link_antenna_get_rssi_history(rt2x00dev); memcpy(&new_ant, &ant->active, sizeof(new_ant)); /* * We are done sampling. Now we should evaluate the results. */ ant->flags &= ~ANTENNA_MODE_SAMPLE; /* * During the last period we have sampled the RSSI * from both antennas. It now is time to determine * which antenna demonstrated the best performance. * When we are already on the antenna with the best * performance, just create a good starting point * for the history and we are done. */ if (sample_current >= sample_other) { rt2x00link_antenna_update_rssi_history(rt2x00dev, sample_current); return; } other_antenna = (ant->active.rx == ANTENNA_A) ? ANTENNA_B : ANTENNA_A; if (ant->flags & ANTENNA_RX_DIVERSITY) new_ant.rx = other_antenna; if (ant->flags & ANTENNA_TX_DIVERSITY) new_ant.tx = other_antenna; rt2x00lib_config_antenna(rt2x00dev, new_ant); } static void rt2x00lib_antenna_diversity_eval(struct rt2x00_dev *rt2x00dev) { struct link_ant *ant = &rt2x00dev->link.ant; struct antenna_setup new_ant; int rssi_curr; int rssi_old; memcpy(&new_ant, &ant->active, sizeof(new_ant)); /* * Get current RSSI value along with the historical value, * after that update the history with the current value. */ rssi_curr = rt2x00link_antenna_get_link_rssi(rt2x00dev); rssi_old = rt2x00link_antenna_get_rssi_history(rt2x00dev); rt2x00link_antenna_update_rssi_history(rt2x00dev, rssi_curr); /* * Legacy driver indicates that we should swap antenna's * when the difference in RSSI is greater that 5. This * also should be done when the RSSI was actually better * then the previous sample. * When the difference exceeds the threshold we should * sample the rssi from the other antenna to make a valid * comparison between the 2 antennas. */ if (abs(rssi_curr - rssi_old) < 5) return; ant->flags |= ANTENNA_MODE_SAMPLE; if (ant->flags & ANTENNA_RX_DIVERSITY) new_ant.rx = (new_ant.rx == ANTENNA_A) ? ANTENNA_B : ANTENNA_A; if (ant->flags & ANTENNA_TX_DIVERSITY) new_ant.tx = (new_ant.tx == ANTENNA_A) ? ANTENNA_B : ANTENNA_A; rt2x00lib_config_antenna(rt2x00dev, new_ant); } static bool rt2x00lib_antenna_diversity(struct rt2x00_dev *rt2x00dev) { struct link_ant *ant = &rt2x00dev->link.ant; /* * Determine if software diversity is enabled for * either the TX or RX antenna (or both). */ if (!(ant->flags & ANTENNA_RX_DIVERSITY) && !(ant->flags & ANTENNA_TX_DIVERSITY)) { ant->flags = 0; return true; } /* * If we have only sampled the data over the last period * we should now harvest the data. Otherwise just evaluate * the data. The latter should only be performed once * every 2 seconds. */ if (ant->flags & ANTENNA_MODE_SAMPLE) { rt2x00lib_antenna_diversity_sample(rt2x00dev); return true; } else if (rt2x00dev->link.count & 1) { rt2x00lib_antenna_diversity_eval(rt2x00dev); return true; } return false; } void rt2x00link_update_stats(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb, struct rxdone_entry_desc *rxdesc) { struct link *link = &rt2x00dev->link; struct link_qual *qual = &rt2x00dev->link.qual; struct link_ant *ant = &rt2x00dev->link.ant; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; /* * No need to update the stats for !=STA interfaces */ if (!rt2x00dev->intf_sta_count) return; /* * Frame was received successfully since non-successful * frames would have been dropped by the hardware. */ qual->rx_success++; /* * We are only interested in quality statistics from * beacons which came from the BSS which we are * associated with. */ if (!ieee80211_is_beacon(hdr->frame_control) || !(rxdesc->dev_flags & RXDONE_MY_BSS)) return; /* * Update global RSSI */ ewma_rssi_add(&link->avg_rssi, -rxdesc->rssi); /* * Update antenna RSSI */ ewma_rssi_add(&ant->rssi_ant, -rxdesc->rssi); } void rt2x00link_start_tuner(struct rt2x00_dev *rt2x00dev) { struct link *link = &rt2x00dev->link; /* * Single monitor mode interfaces should never have * work with link tuners. */ if (!rt2x00dev->intf_ap_count && !rt2x00dev->intf_sta_count) return; /* * While scanning, link tuning is disabled. By default * the most sensitive settings will be used to make sure * that all beacons and probe responses will be received * during the scan. */ if (test_bit(DEVICE_STATE_SCANNING, &rt2x00dev->flags)) return; rt2x00link_reset_tuner(rt2x00dev, false); if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) ieee80211_queue_delayed_work(rt2x00dev->hw, &link->work, LINK_TUNE_INTERVAL); } void rt2x00link_stop_tuner(struct rt2x00_dev *rt2x00dev) { cancel_delayed_work_sync(&rt2x00dev->link.work); } void rt2x00link_reset_tuner(struct rt2x00_dev *rt2x00dev, bool antenna) { struct link_qual *qual = &rt2x00dev->link.qual; u8 vgc_level = qual->vgc_level_reg; if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) return; /* * Reset link information. * Both the currently active vgc level as well as * the link tuner counter should be reset. Resetting * the counter is important for devices where the * device should only perform link tuning during the * first minute after being enabled. */ rt2x00dev->link.count = 0; memset(qual, 0, sizeof(*qual)); ewma_rssi_init(&rt2x00dev->link.avg_rssi); /* * Restore the VGC level as stored in the registers, * the driver can use this to determine if the register * must be updated during reset or not. */ qual->vgc_level_reg = vgc_level; /* * Reset the link tuner. */ rt2x00dev->ops->lib->reset_tuner(rt2x00dev, qual); if (antenna) rt2x00link_antenna_reset(rt2x00dev); } static void rt2x00link_reset_qual(struct rt2x00_dev *rt2x00dev) { struct link_qual *qual = &rt2x00dev->link.qual; qual->rx_success = 0; qual->rx_failed = 0; qual->tx_success = 0; qual->tx_failed = 0; } static void rt2x00link_tuner_sta(struct rt2x00_dev *rt2x00dev, struct link *link) { struct link_qual *qual = &rt2x00dev->link.qual; /* * Update statistics. */ rt2x00dev->ops->lib->link_stats(rt2x00dev, qual); rt2x00dev->low_level_stats.dot11FCSErrorCount += qual->rx_failed; /* * Update quality RSSI for link tuning, * when we have received some frames and we managed to * collect the RSSI data we could use this. Otherwise we * must fallback to the default RSSI value. */ if (!qual->rx_success) qual->rssi = DEFAULT_RSSI; else qual->rssi = rt2x00link_get_avg_rssi(&link->avg_rssi); /* * Check if link tuning is supported by the hardware, some hardware * do not support link tuning at all, while other devices can disable * the feature from the EEPROM. */ if (rt2x00_has_cap_link_tuning(rt2x00dev)) rt2x00dev->ops->lib->link_tuner(rt2x00dev, qual, link->count); /* * Send a signal to the led to update the led signal strength. */ rt2x00leds_led_quality(rt2x00dev, qual->rssi); /* * Evaluate antenna setup, make this the last step when * rt2x00lib_antenna_diversity made changes the quality * statistics will be reset. */ if (rt2x00lib_antenna_diversity(rt2x00dev)) rt2x00link_reset_qual(rt2x00dev); } static void rt2x00link_tuner(struct work_struct *work) { struct rt2x00_dev *rt2x00dev = container_of(work, struct rt2x00_dev, link.work.work); struct link *link = &rt2x00dev->link; /* * When the radio is shutting down we should * immediately cease all link tuning. */ if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags) || test_bit(DEVICE_STATE_SCANNING, &rt2x00dev->flags)) return; /* Do not race with rt2x00mac_config(). */ mutex_lock(&rt2x00dev->conf_mutex); if (rt2x00dev->intf_sta_count) rt2x00link_tuner_sta(rt2x00dev, link); if (rt2x00dev->ops->lib->gain_calibration && (link->count % (AGC_SECONDS / LINK_TUNE_SECONDS)) == 0) rt2x00dev->ops->lib->gain_calibration(rt2x00dev); if (rt2x00dev->ops->lib->vco_calibration && rt2x00_has_cap_vco_recalibration(rt2x00dev) && (link->count % (VCO_SECONDS / LINK_TUNE_SECONDS)) == 0) rt2x00dev->ops->lib->vco_calibration(rt2x00dev); mutex_unlock(&rt2x00dev->conf_mutex); /* * Increase tuner counter, and reschedule the next link tuner run. */ link->count++; if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) ieee80211_queue_delayed_work(rt2x00dev->hw, &link->work, LINK_TUNE_INTERVAL); } void rt2x00link_start_watchdog(struct rt2x00_dev *rt2x00dev) { struct link *link = &rt2x00dev->link; if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) && rt2x00dev->ops->lib->watchdog && !link->watchdog_disabled) ieee80211_queue_delayed_work(rt2x00dev->hw, &link->watchdog_work, link->watchdog_interval); } void rt2x00link_stop_watchdog(struct rt2x00_dev *rt2x00dev) { cancel_delayed_work_sync(&rt2x00dev->link.watchdog_work); } static void rt2x00link_watchdog(struct work_struct *work) { struct rt2x00_dev *rt2x00dev = container_of(work, struct rt2x00_dev, link.watchdog_work.work); struct link *link = &rt2x00dev->link; /* * When the radio is shutting down we should * immediately cease the watchdog monitoring. */ if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) return; rt2x00dev->ops->lib->watchdog(rt2x00dev); if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) ieee80211_queue_delayed_work(rt2x00dev->hw, &link->watchdog_work, link->watchdog_interval); } void rt2x00link_register(struct rt2x00_dev *rt2x00dev) { struct link *link = &rt2x00dev->link; INIT_DELAYED_WORK(&link->work, rt2x00link_tuner); INIT_DELAYED_WORK(&link->watchdog_work, rt2x00link_watchdog); if (link->watchdog_interval == 0) link->watchdog_interval = WATCHDOG_INTERVAL; }
linux-master
drivers/net/wireless/ralink/rt2x00/rt2x00link.c
// SPDX-License-Identifier: GPL-2.0-or-later /* Copyright (C) 2009 - 2010 Ivo van Doorn <[email protected]> * Copyright (C) 2009 Alban Browaeys <[email protected]> * Copyright (C) 2009 Felix Fietkau <[email protected]> * Copyright (C) 2009 Luis Correia <[email protected]> * Copyright (C) 2009 Mattias Nissler <[email protected]> * Copyright (C) 2009 Mark Asselstine <[email protected]> * Copyright (C) 2009 Xose Vazquez Perez <[email protected]> * Copyright (C) 2009 Bart Zolnierkiewicz <[email protected]> * <http://rt2x00.serialmonkey.com> */ /* Module: rt2800mmio * Abstract: rt2800 MMIO device routines. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/export.h> #include "rt2x00.h" #include "rt2x00mmio.h" #include "rt2800.h" #include "rt2800lib.h" #include "rt2800mmio.h" unsigned int rt2800mmio_get_dma_done(struct data_queue *queue) { struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; struct queue_entry *entry; int idx, qid; switch (queue->qid) { case QID_AC_VO: case QID_AC_VI: case QID_AC_BE: case QID_AC_BK: qid = queue->qid; idx = rt2x00mmio_register_read(rt2x00dev, TX_DTX_IDX(qid)); break; case QID_MGMT: idx = rt2x00mmio_register_read(rt2x00dev, TX_DTX_IDX(5)); break; case QID_RX: entry = rt2x00queue_get_entry(queue, Q_INDEX_DMA_DONE); idx = entry->entry_idx; break; default: WARN_ON_ONCE(1); idx = 0; break; } return idx; } EXPORT_SYMBOL_GPL(rt2800mmio_get_dma_done); /* * TX descriptor initialization */ __le32 *rt2800mmio_get_txwi(struct queue_entry *entry) { return (__le32 *) entry->skb->data; } EXPORT_SYMBOL_GPL(rt2800mmio_get_txwi); void rt2800mmio_write_tx_desc(struct queue_entry *entry, struct txentry_desc *txdesc) { struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); struct queue_entry_priv_mmio *entry_priv = entry->priv_data; __le32 *txd = entry_priv->desc; u32 word; const unsigned int txwi_size = entry->queue->winfo_size; /* * The buffers pointed by SD_PTR0/SD_LEN0 and SD_PTR1/SD_LEN1 * must contains a TXWI structure + 802.11 header + padding + 802.11 * data. We choose to have SD_PTR0/SD_LEN0 only contains TXWI and * SD_PTR1/SD_LEN1 contains 802.11 header + padding + 802.11 * data. It means that LAST_SEC0 is always 0. */ /* * Initialize TX descriptor */ word = 0; rt2x00_set_field32(&word, TXD_W0_SD_PTR0, skbdesc->skb_dma); rt2x00_desc_write(txd, 0, word); word = 0; rt2x00_set_field32(&word, TXD_W1_SD_LEN1, entry->skb->len); rt2x00_set_field32(&word, TXD_W1_LAST_SEC1, !test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags)); rt2x00_set_field32(&word, TXD_W1_BURST, test_bit(ENTRY_TXD_BURST, &txdesc->flags)); rt2x00_set_field32(&word, TXD_W1_SD_LEN0, txwi_size); rt2x00_set_field32(&word, TXD_W1_LAST_SEC0, 0); rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 0); rt2x00_desc_write(txd, 1, word); word = 0; rt2x00_set_field32(&word, TXD_W2_SD_PTR1, skbdesc->skb_dma + txwi_size); rt2x00_desc_write(txd, 2, word); word = 0; rt2x00_set_field32(&word, TXD_W3_WIV, !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags)); rt2x00_set_field32(&word, TXD_W3_QSEL, 2); rt2x00_desc_write(txd, 3, word); /* * Register descriptor details in skb frame descriptor. */ skbdesc->desc = txd; skbdesc->desc_len = TXD_DESC_SIZE; } EXPORT_SYMBOL_GPL(rt2800mmio_write_tx_desc); /* * RX control handlers */ void rt2800mmio_fill_rxdone(struct queue_entry *entry, struct rxdone_entry_desc *rxdesc) { struct queue_entry_priv_mmio *entry_priv = entry->priv_data; __le32 *rxd = entry_priv->desc; u32 word; word = rt2x00_desc_read(rxd, 3); if (rt2x00_get_field32(word, RXD_W3_CRC_ERROR)) rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC; /* * Unfortunately we don't know the cipher type used during * decryption. This prevents us from correct providing * correct statistics through debugfs. */ rxdesc->cipher_status = rt2x00_get_field32(word, RXD_W3_CIPHER_ERROR); if (rt2x00_get_field32(word, RXD_W3_DECRYPTED)) { /* * Hardware has stripped IV/EIV data from 802.11 frame during * decryption. Unfortunately the descriptor doesn't contain * any fields with the EIV/IV data either, so they can't * be restored by rt2x00lib. */ rxdesc->flags |= RX_FLAG_IV_STRIPPED; /* * The hardware has already checked the Michael Mic and has * stripped it from the frame. Signal this to mac80211. */ rxdesc->flags |= RX_FLAG_MMIC_STRIPPED; if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS) { rxdesc->flags |= RX_FLAG_DECRYPTED; } else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC) { /* * In order to check the Michael Mic, the packet must have * been decrypted. Mac80211 doesnt check the MMIC failure * flag to initiate MMIC countermeasures if the decoded flag * has not been set. */ rxdesc->flags |= RX_FLAG_DECRYPTED; rxdesc->flags |= RX_FLAG_MMIC_ERROR; } } if (rt2x00_get_field32(word, RXD_W3_MY_BSS)) rxdesc->dev_flags |= RXDONE_MY_BSS; if (rt2x00_get_field32(word, RXD_W3_L2PAD)) rxdesc->dev_flags |= RXDONE_L2PAD; /* * Process the RXWI structure that is at the start of the buffer. */ rt2800_process_rxwi(entry, rxdesc); } EXPORT_SYMBOL_GPL(rt2800mmio_fill_rxdone); /* * Interrupt functions. */ static void rt2800mmio_wakeup(struct rt2x00_dev *rt2x00dev) { struct ieee80211_conf conf = { .flags = 0 }; struct rt2x00lib_conf libconf = { .conf = &conf }; rt2800_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS); } static inline void rt2800mmio_enable_interrupt(struct rt2x00_dev *rt2x00dev, struct rt2x00_field32 irq_field) { u32 reg; /* * Enable a single interrupt. The interrupt mask register * access needs locking. */ spin_lock_irq(&rt2x00dev->irqmask_lock); reg = rt2x00mmio_register_read(rt2x00dev, INT_MASK_CSR); rt2x00_set_field32(&reg, irq_field, 1); rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg); spin_unlock_irq(&rt2x00dev->irqmask_lock); } void rt2800mmio_pretbtt_tasklet(struct tasklet_struct *t) { struct rt2x00_dev *rt2x00dev = from_tasklet(rt2x00dev, t, pretbtt_tasklet); rt2x00lib_pretbtt(rt2x00dev); if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) rt2800mmio_enable_interrupt(rt2x00dev, INT_MASK_CSR_PRE_TBTT); } EXPORT_SYMBOL_GPL(rt2800mmio_pretbtt_tasklet); void rt2800mmio_tbtt_tasklet(struct tasklet_struct *t) { struct rt2x00_dev *rt2x00dev = from_tasklet(rt2x00dev, t, tbtt_tasklet); struct rt2800_drv_data *drv_data = rt2x00dev->drv_data; u32 reg; rt2x00lib_beacondone(rt2x00dev); if (rt2x00dev->intf_ap_count) { /* * The rt2800pci hardware tbtt timer is off by 1us per tbtt * causing beacon skew and as a result causing problems with * some powersaving clients over time. Shorten the beacon * interval every 64 beacons by 64us to mitigate this effect. */ if (drv_data->tbtt_tick == (BCN_TBTT_OFFSET - 2)) { reg = rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG); rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL, (rt2x00dev->beacon_int * 16) - 1); rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg); } else if (drv_data->tbtt_tick == (BCN_TBTT_OFFSET - 1)) { reg = rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG); rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL, (rt2x00dev->beacon_int * 16)); rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg); } drv_data->tbtt_tick++; drv_data->tbtt_tick %= BCN_TBTT_OFFSET; } if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) rt2800mmio_enable_interrupt(rt2x00dev, INT_MASK_CSR_TBTT); } EXPORT_SYMBOL_GPL(rt2800mmio_tbtt_tasklet); void rt2800mmio_rxdone_tasklet(struct tasklet_struct *t) { struct rt2x00_dev *rt2x00dev = from_tasklet(rt2x00dev, t, rxdone_tasklet); if (rt2x00mmio_rxdone(rt2x00dev)) tasklet_schedule(&rt2x00dev->rxdone_tasklet); else if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) rt2800mmio_enable_interrupt(rt2x00dev, INT_MASK_CSR_RX_DONE); } EXPORT_SYMBOL_GPL(rt2800mmio_rxdone_tasklet); void rt2800mmio_autowake_tasklet(struct tasklet_struct *t) { struct rt2x00_dev *rt2x00dev = from_tasklet(rt2x00dev, t, autowake_tasklet); rt2800mmio_wakeup(rt2x00dev); if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) rt2800mmio_enable_interrupt(rt2x00dev, INT_MASK_CSR_AUTO_WAKEUP); } EXPORT_SYMBOL_GPL(rt2800mmio_autowake_tasklet); static void rt2800mmio_fetch_txstatus(struct rt2x00_dev *rt2x00dev) { u32 status; unsigned long flags; /* * The TX_FIFO_STATUS interrupt needs special care. We should * read TX_STA_FIFO but we should do it immediately as otherwise * the register can overflow and we would lose status reports. * * Hence, read the TX_STA_FIFO register and copy all tx status * reports into a kernel FIFO which is handled in the txstatus * tasklet. We use a tasklet to process the tx status reports * because we can schedule the tasklet multiple times (when the * interrupt fires again during tx status processing). * * We also read statuses from tx status timeout timer, use * lock to prevent concurent writes to fifo. */ spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags); while (!kfifo_is_full(&rt2x00dev->txstatus_fifo)) { status = rt2x00mmio_register_read(rt2x00dev, TX_STA_FIFO); if (!rt2x00_get_field32(status, TX_STA_FIFO_VALID)) break; kfifo_put(&rt2x00dev->txstatus_fifo, status); } spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags); } void rt2800mmio_txstatus_tasklet(struct tasklet_struct *t) { struct rt2x00_dev *rt2x00dev = from_tasklet(rt2x00dev, t, txstatus_tasklet); rt2800_txdone(rt2x00dev, 16); if (!kfifo_is_empty(&rt2x00dev->txstatus_fifo)) tasklet_schedule(&rt2x00dev->txstatus_tasklet); } EXPORT_SYMBOL_GPL(rt2800mmio_txstatus_tasklet); irqreturn_t rt2800mmio_interrupt(int irq, void *dev_instance) { struct rt2x00_dev *rt2x00dev = dev_instance; u32 reg, mask; /* Read status and ACK all interrupts */ reg = rt2x00mmio_register_read(rt2x00dev, INT_SOURCE_CSR); rt2x00mmio_register_write(rt2x00dev, INT_SOURCE_CSR, reg); if (!reg) return IRQ_NONE; if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) return IRQ_HANDLED; /* * Since INT_MASK_CSR and INT_SOURCE_CSR use the same bits * for interrupts and interrupt masks we can just use the value of * INT_SOURCE_CSR to create the interrupt mask. */ mask = ~reg; if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS)) { rt2x00_set_field32(&mask, INT_MASK_CSR_TX_FIFO_STATUS, 1); rt2800mmio_fetch_txstatus(rt2x00dev); if (!kfifo_is_empty(&rt2x00dev->txstatus_fifo)) tasklet_schedule(&rt2x00dev->txstatus_tasklet); } if (rt2x00_get_field32(reg, INT_SOURCE_CSR_PRE_TBTT)) tasklet_hi_schedule(&rt2x00dev->pretbtt_tasklet); if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TBTT)) tasklet_hi_schedule(&rt2x00dev->tbtt_tasklet); if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RX_DONE)) tasklet_schedule(&rt2x00dev->rxdone_tasklet); if (rt2x00_get_field32(reg, INT_SOURCE_CSR_AUTO_WAKEUP)) tasklet_schedule(&rt2x00dev->autowake_tasklet); /* * Disable all interrupts for which a tasklet was scheduled right now, * the tasklet will reenable the appropriate interrupts. */ spin_lock(&rt2x00dev->irqmask_lock); reg = rt2x00mmio_register_read(rt2x00dev, INT_MASK_CSR); reg &= mask; rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg); spin_unlock(&rt2x00dev->irqmask_lock); return IRQ_HANDLED; } EXPORT_SYMBOL_GPL(rt2800mmio_interrupt); void rt2800mmio_toggle_irq(struct rt2x00_dev *rt2x00dev, enum dev_state state) { u32 reg; unsigned long flags; /* * When interrupts are being enabled, the interrupt registers * should clear the register to assure a clean state. */ if (state == STATE_RADIO_IRQ_ON) { reg = rt2x00mmio_register_read(rt2x00dev, INT_SOURCE_CSR); rt2x00mmio_register_write(rt2x00dev, INT_SOURCE_CSR, reg); } spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags); reg = 0; if (state == STATE_RADIO_IRQ_ON) { rt2x00_set_field32(&reg, INT_MASK_CSR_RX_DONE, 1); rt2x00_set_field32(&reg, INT_MASK_CSR_TBTT, 1); rt2x00_set_field32(&reg, INT_MASK_CSR_PRE_TBTT, 1); rt2x00_set_field32(&reg, INT_MASK_CSR_TX_FIFO_STATUS, 1); rt2x00_set_field32(&reg, INT_MASK_CSR_AUTO_WAKEUP, 1); } rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg); spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags); if (state == STATE_RADIO_IRQ_OFF) { /* * Wait for possibly running tasklets to finish. */ tasklet_kill(&rt2x00dev->txstatus_tasklet); tasklet_kill(&rt2x00dev->rxdone_tasklet); tasklet_kill(&rt2x00dev->autowake_tasklet); tasklet_kill(&rt2x00dev->tbtt_tasklet); tasklet_kill(&rt2x00dev->pretbtt_tasklet); } } EXPORT_SYMBOL_GPL(rt2800mmio_toggle_irq); /* * Queue handlers. */ void rt2800mmio_start_queue(struct data_queue *queue) { struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; u32 reg; switch (queue->qid) { case QID_RX: reg = rt2x00mmio_register_read(rt2x00dev, MAC_SYS_CTRL); rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 1); rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg); break; case QID_BEACON: reg = rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG); rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1); rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1); rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1); rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg); reg = rt2x00mmio_register_read(rt2x00dev, INT_TIMER_EN); rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER, 1); rt2x00mmio_register_write(rt2x00dev, INT_TIMER_EN, reg); break; default: break; } } EXPORT_SYMBOL_GPL(rt2800mmio_start_queue); /* 200 ms */ #define TXSTATUS_TIMEOUT 200000000 void rt2800mmio_kick_queue(struct data_queue *queue) { struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; struct queue_entry *entry; switch (queue->qid) { case QID_AC_VO: case QID_AC_VI: case QID_AC_BE: case QID_AC_BK: WARN_ON_ONCE(rt2x00queue_empty(queue)); entry = rt2x00queue_get_entry(queue, Q_INDEX); rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX(queue->qid), entry->entry_idx); hrtimer_start(&rt2x00dev->txstatus_timer, TXSTATUS_TIMEOUT, HRTIMER_MODE_REL); break; case QID_MGMT: entry = rt2x00queue_get_entry(queue, Q_INDEX); rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX(5), entry->entry_idx); break; default: break; } } EXPORT_SYMBOL_GPL(rt2800mmio_kick_queue); void rt2800mmio_flush_queue(struct data_queue *queue, bool drop) { struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; bool tx_queue = false; unsigned int i; switch (queue->qid) { case QID_AC_VO: case QID_AC_VI: case QID_AC_BE: case QID_AC_BK: tx_queue = true; break; case QID_RX: break; default: return; } for (i = 0; i < 5; i++) { /* * Check if the driver is already done, otherwise we * have to sleep a little while to give the driver/hw * the oppurtunity to complete interrupt process itself. */ if (rt2x00queue_empty(queue)) break; /* * For TX queues schedule completion tasklet to catch * tx status timeouts, othewise just wait. */ if (tx_queue) queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work); /* * Wait for a little while to give the driver * the oppurtunity to recover itself. */ msleep(50); } } EXPORT_SYMBOL_GPL(rt2800mmio_flush_queue); void rt2800mmio_stop_queue(struct data_queue *queue) { struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; u32 reg; switch (queue->qid) { case QID_RX: reg = rt2x00mmio_register_read(rt2x00dev, MAC_SYS_CTRL); rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 0); rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg); break; case QID_BEACON: reg = rt2x00mmio_register_read(rt2x00dev, BCN_TIME_CFG); rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 0); rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 0); rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0); rt2x00mmio_register_write(rt2x00dev, BCN_TIME_CFG, reg); reg = rt2x00mmio_register_read(rt2x00dev, INT_TIMER_EN); rt2x00_set_field32(&reg, INT_TIMER_EN_PRE_TBTT_TIMER, 0); rt2x00mmio_register_write(rt2x00dev, INT_TIMER_EN, reg); /* * Wait for current invocation to finish. The tasklet * won't be scheduled anymore afterwards since we disabled * the TBTT and PRE TBTT timer. */ tasklet_kill(&rt2x00dev->tbtt_tasklet); tasklet_kill(&rt2x00dev->pretbtt_tasklet); break; default: break; } } EXPORT_SYMBOL_GPL(rt2800mmio_stop_queue); void rt2800mmio_queue_init(struct data_queue *queue) { struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; unsigned short txwi_size, rxwi_size; rt2800_get_txwi_rxwi_size(rt2x00dev, &txwi_size, &rxwi_size); switch (queue->qid) { case QID_RX: queue->limit = 128; queue->data_size = AGGREGATION_SIZE; queue->desc_size = RXD_DESC_SIZE; queue->winfo_size = rxwi_size; queue->priv_size = sizeof(struct queue_entry_priv_mmio); break; case QID_AC_VO: case QID_AC_VI: case QID_AC_BE: case QID_AC_BK: queue->limit = 64; queue->data_size = AGGREGATION_SIZE; queue->desc_size = TXD_DESC_SIZE; queue->winfo_size = txwi_size; queue->priv_size = sizeof(struct queue_entry_priv_mmio); break; case QID_BEACON: queue->limit = 8; queue->data_size = 0; /* No DMA required for beacons */ queue->desc_size = TXD_DESC_SIZE; queue->winfo_size = txwi_size; queue->priv_size = sizeof(struct queue_entry_priv_mmio); break; case QID_ATIM: default: BUG(); break; } } EXPORT_SYMBOL_GPL(rt2800mmio_queue_init); /* * Initialization functions. */ bool rt2800mmio_get_entry_state(struct queue_entry *entry) { struct queue_entry_priv_mmio *entry_priv = entry->priv_data; u32 word; if (entry->queue->qid == QID_RX) { word = rt2x00_desc_read(entry_priv->desc, 1); return (!rt2x00_get_field32(word, RXD_W1_DMA_DONE)); } else { word = rt2x00_desc_read(entry_priv->desc, 1); return (!rt2x00_get_field32(word, TXD_W1_DMA_DONE)); } } EXPORT_SYMBOL_GPL(rt2800mmio_get_entry_state); void rt2800mmio_clear_entry(struct queue_entry *entry) { struct queue_entry_priv_mmio *entry_priv = entry->priv_data; struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; u32 word; if (entry->queue->qid == QID_RX) { word = rt2x00_desc_read(entry_priv->desc, 0); rt2x00_set_field32(&word, RXD_W0_SDP0, skbdesc->skb_dma); rt2x00_desc_write(entry_priv->desc, 0, word); word = rt2x00_desc_read(entry_priv->desc, 1); rt2x00_set_field32(&word, RXD_W1_DMA_DONE, 0); rt2x00_desc_write(entry_priv->desc, 1, word); /* * Set RX IDX in register to inform hardware that we have * handled this entry and it is available for reuse again. */ rt2x00mmio_register_write(rt2x00dev, RX_CRX_IDX, entry->entry_idx); } else { word = rt2x00_desc_read(entry_priv->desc, 1); rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 1); rt2x00_desc_write(entry_priv->desc, 1, word); /* If last entry stop txstatus timer */ if (entry->queue->length == 1) hrtimer_cancel(&rt2x00dev->txstatus_timer); } } EXPORT_SYMBOL_GPL(rt2800mmio_clear_entry); int rt2800mmio_init_queues(struct rt2x00_dev *rt2x00dev) { struct queue_entry_priv_mmio *entry_priv; /* * Initialize registers. */ entry_priv = rt2x00dev->tx[0].entries[0].priv_data; rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR0, entry_priv->desc_dma); rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT0, rt2x00dev->tx[0].limit); rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX0, 0); rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX0, 0); entry_priv = rt2x00dev->tx[1].entries[0].priv_data; rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR1, entry_priv->desc_dma); rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT1, rt2x00dev->tx[1].limit); rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX1, 0); rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX1, 0); entry_priv = rt2x00dev->tx[2].entries[0].priv_data; rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR2, entry_priv->desc_dma); rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT2, rt2x00dev->tx[2].limit); rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX2, 0); rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX2, 0); entry_priv = rt2x00dev->tx[3].entries[0].priv_data; rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR3, entry_priv->desc_dma); rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT3, rt2x00dev->tx[3].limit); rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX3, 0); rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX3, 0); rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR4, 0); rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT4, 0); rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX4, 0); rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX4, 0); rt2x00mmio_register_write(rt2x00dev, TX_BASE_PTR5, 0); rt2x00mmio_register_write(rt2x00dev, TX_MAX_CNT5, 0); rt2x00mmio_register_write(rt2x00dev, TX_CTX_IDX5, 0); rt2x00mmio_register_write(rt2x00dev, TX_DTX_IDX5, 0); entry_priv = rt2x00dev->rx->entries[0].priv_data; rt2x00mmio_register_write(rt2x00dev, RX_BASE_PTR, entry_priv->desc_dma); rt2x00mmio_register_write(rt2x00dev, RX_MAX_CNT, rt2x00dev->rx[0].limit); rt2x00mmio_register_write(rt2x00dev, RX_CRX_IDX, rt2x00dev->rx[0].limit - 1); rt2x00mmio_register_write(rt2x00dev, RX_DRX_IDX, 0); rt2800_disable_wpdma(rt2x00dev); rt2x00mmio_register_write(rt2x00dev, DELAY_INT_CFG, 0); return 0; } EXPORT_SYMBOL_GPL(rt2800mmio_init_queues); int rt2800mmio_init_registers(struct rt2x00_dev *rt2x00dev) { u32 reg; /* * Reset DMA indexes */ reg = rt2x00mmio_register_read(rt2x00dev, WPDMA_RST_IDX); rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, 1); rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, 1); rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, 1); rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX3, 1); rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX4, 1); rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX5, 1); rt2x00_set_field32(&reg, WPDMA_RST_IDX_DRX_IDX0, 1); rt2x00mmio_register_write(rt2x00dev, WPDMA_RST_IDX, reg); rt2x00mmio_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f); rt2x00mmio_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00); if (rt2x00_is_pcie(rt2x00dev) && (rt2x00_rt(rt2x00dev, RT3090) || rt2x00_rt(rt2x00dev, RT3390) || rt2x00_rt(rt2x00dev, RT3572) || rt2x00_rt(rt2x00dev, RT3593) || rt2x00_rt(rt2x00dev, RT5390) || rt2x00_rt(rt2x00dev, RT5392) || rt2x00_rt(rt2x00dev, RT5592))) { reg = rt2x00mmio_register_read(rt2x00dev, AUX_CTRL); rt2x00_set_field32(&reg, AUX_CTRL_FORCE_PCIE_CLK, 1); rt2x00_set_field32(&reg, AUX_CTRL_WAKE_PCIE_EN, 1); rt2x00mmio_register_write(rt2x00dev, AUX_CTRL, reg); } rt2x00mmio_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003); reg = 0; rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_CSR, 1); rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_BBP, 1); rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, reg); rt2x00mmio_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000); return 0; } EXPORT_SYMBOL_GPL(rt2800mmio_init_registers); /* * Device state switch handlers. */ int rt2800mmio_enable_radio(struct rt2x00_dev *rt2x00dev) { /* Wait for DMA, ignore error until we initialize queues. */ rt2800_wait_wpdma_ready(rt2x00dev); if (unlikely(rt2800mmio_init_queues(rt2x00dev))) return -EIO; return rt2800_enable_radio(rt2x00dev); } EXPORT_SYMBOL_GPL(rt2800mmio_enable_radio); static void rt2800mmio_work_txdone(struct work_struct *work) { struct rt2x00_dev *rt2x00dev = container_of(work, struct rt2x00_dev, txdone_work); if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) return; while (!kfifo_is_empty(&rt2x00dev->txstatus_fifo) || rt2800_txstatus_timeout(rt2x00dev)) { tasklet_disable(&rt2x00dev->txstatus_tasklet); rt2800_txdone(rt2x00dev, UINT_MAX); rt2800_txdone_nostatus(rt2x00dev); tasklet_enable(&rt2x00dev->txstatus_tasklet); } if (rt2800_txstatus_pending(rt2x00dev)) hrtimer_start(&rt2x00dev->txstatus_timer, TXSTATUS_TIMEOUT, HRTIMER_MODE_REL); } static enum hrtimer_restart rt2800mmio_tx_sta_fifo_timeout(struct hrtimer *timer) { struct rt2x00_dev *rt2x00dev = container_of(timer, struct rt2x00_dev, txstatus_timer); if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) goto out; if (!rt2800_txstatus_pending(rt2x00dev)) goto out; rt2800mmio_fetch_txstatus(rt2x00dev); if (!kfifo_is_empty(&rt2x00dev->txstatus_fifo)) tasklet_schedule(&rt2x00dev->txstatus_tasklet); else queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work); out: return HRTIMER_NORESTART; } int rt2800mmio_probe_hw(struct rt2x00_dev *rt2x00dev) { int retval; retval = rt2800_probe_hw(rt2x00dev); if (retval) return retval; /* * Set txstatus timer function. */ rt2x00dev->txstatus_timer.function = rt2800mmio_tx_sta_fifo_timeout; /* * Overwrite TX done handler */ INIT_WORK(&rt2x00dev->txdone_work, rt2800mmio_work_txdone); return 0; } EXPORT_SYMBOL_GPL(rt2800mmio_probe_hw); MODULE_AUTHOR(DRV_PROJECT); MODULE_VERSION(DRV_VERSION); MODULE_DESCRIPTION("rt2800 MMIO library"); MODULE_LICENSE("GPL");
linux-master
drivers/net/wireless/ralink/rt2x00/rt2800mmio.c
// SPDX-License-Identifier: GPL-2.0-or-later /* Copyright (C) 2010 Willow Garage <http://www.willowgarage.com> Copyright (C) 2009 - 2010 Ivo van Doorn <[email protected]> Copyright (C) 2009 Mattias Nissler <[email protected]> Copyright (C) 2009 Felix Fietkau <[email protected]> Copyright (C) 2009 Xose Vazquez Perez <[email protected]> Copyright (C) 2009 Axel Kollhofer <[email protected]> <http://rt2x00.serialmonkey.com> */ /* Module: rt2800usb Abstract: rt2800usb device specific routines. Supported chipsets: RT2800U. */ #include <linux/delay.h> #include <linux/etherdevice.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/usb.h> #include "rt2x00.h" #include "rt2x00usb.h" #include "rt2800lib.h" #include "rt2800.h" #include "rt2800usb.h" /* * Allow hardware encryption to be disabled. */ static bool modparam_nohwcrypt; module_param_named(nohwcrypt, modparam_nohwcrypt, bool, 0444); MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); static bool rt2800usb_hwcrypt_disabled(struct rt2x00_dev *rt2x00dev) { return modparam_nohwcrypt; } /* * Queue handlers. */ static void rt2800usb_start_queue(struct data_queue *queue) { struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; u32 reg; switch (queue->qid) { case QID_RX: reg = rt2x00usb_register_read(rt2x00dev, MAC_SYS_CTRL); rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 1); rt2x00usb_register_write(rt2x00dev, MAC_SYS_CTRL, reg); break; case QID_BEACON: reg = rt2x00usb_register_read(rt2x00dev, BCN_TIME_CFG); rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1); rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1); rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1); rt2x00usb_register_write(rt2x00dev, BCN_TIME_CFG, reg); break; default: break; } } static void rt2800usb_stop_queue(struct data_queue *queue) { struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; u32 reg; switch (queue->qid) { case QID_RX: reg = rt2x00usb_register_read(rt2x00dev, MAC_SYS_CTRL); rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 0); rt2x00usb_register_write(rt2x00dev, MAC_SYS_CTRL, reg); break; case QID_BEACON: reg = rt2x00usb_register_read(rt2x00dev, BCN_TIME_CFG); rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 0); rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 0); rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0); rt2x00usb_register_write(rt2x00dev, BCN_TIME_CFG, reg); break; default: break; } } #define TXSTATUS_READ_INTERVAL 1000000 static bool rt2800usb_tx_sta_fifo_read_completed(struct rt2x00_dev *rt2x00dev, int urb_status, u32 tx_status) { bool valid; if (urb_status) { rt2x00_warn(rt2x00dev, "TX status read failed %d\n", urb_status); goto stop_reading; } valid = rt2x00_get_field32(tx_status, TX_STA_FIFO_VALID); if (valid) { if (!kfifo_put(&rt2x00dev->txstatus_fifo, tx_status)) rt2x00_warn(rt2x00dev, "TX status FIFO overrun\n"); queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work); /* Reschedule urb to read TX status again instantly */ return true; } /* Check if there is any entry that timedout waiting on TX status */ if (rt2800_txstatus_timeout(rt2x00dev)) queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work); if (rt2800_txstatus_pending(rt2x00dev)) { /* Read register after 1 ms */ hrtimer_start(&rt2x00dev->txstatus_timer, TXSTATUS_READ_INTERVAL, HRTIMER_MODE_REL); return false; } stop_reading: clear_bit(TX_STATUS_READING, &rt2x00dev->flags); /* * There is small race window above, between txstatus pending check and * clear_bit someone could do rt2x00usb_interrupt_txdone, so recheck * here again if status reading is needed. */ if (rt2800_txstatus_pending(rt2x00dev) && !test_and_set_bit(TX_STATUS_READING, &rt2x00dev->flags)) return true; else return false; } static void rt2800usb_async_read_tx_status(struct rt2x00_dev *rt2x00dev) { if (test_and_set_bit(TX_STATUS_READING, &rt2x00dev->flags)) return; /* Read TX_STA_FIFO register after 2 ms */ hrtimer_start(&rt2x00dev->txstatus_timer, 2 * TXSTATUS_READ_INTERVAL, HRTIMER_MODE_REL); } static void rt2800usb_tx_dma_done(struct queue_entry *entry) { struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; rt2800usb_async_read_tx_status(rt2x00dev); } static enum hrtimer_restart rt2800usb_tx_sta_fifo_timeout(struct hrtimer *timer) { struct rt2x00_dev *rt2x00dev = container_of(timer, struct rt2x00_dev, txstatus_timer); rt2x00usb_register_read_async(rt2x00dev, TX_STA_FIFO, rt2800usb_tx_sta_fifo_read_completed); return HRTIMER_NORESTART; } /* * Firmware functions */ static int rt2800usb_autorun_detect(struct rt2x00_dev *rt2x00dev) { __le32 *reg; u32 fw_mode; int ret; reg = kmalloc(sizeof(*reg), GFP_KERNEL); if (reg == NULL) return -ENOMEM; /* cannot use rt2x00usb_register_read here as it uses different * mode (MULTI_READ vs. DEVICE_MODE) and does not pass the * magic value USB_MODE_AUTORUN (0x11) to the device, thus the * returned value would be invalid. */ ret = rt2x00usb_vendor_request(rt2x00dev, USB_DEVICE_MODE, USB_VENDOR_REQUEST_IN, 0, USB_MODE_AUTORUN, reg, sizeof(*reg), REGISTER_TIMEOUT_FIRMWARE); fw_mode = le32_to_cpu(*reg); kfree(reg); if (ret < 0) return ret; if ((fw_mode & 0x00000003) == 2) return 1; return 0; } static char *rt2800usb_get_firmware_name(struct rt2x00_dev *rt2x00dev) { return FIRMWARE_RT2870; } static int rt2800usb_write_firmware(struct rt2x00_dev *rt2x00dev, const u8 *data, const size_t len) { int status; u32 offset; u32 length; int retval; /* * Check which section of the firmware we need. */ if (rt2x00_rt(rt2x00dev, RT2860) || rt2x00_rt(rt2x00dev, RT2872) || rt2x00_rt(rt2x00dev, RT3070)) { offset = 0; length = 4096; } else { offset = 4096; length = 4096; } /* * Write firmware to device. */ retval = rt2800usb_autorun_detect(rt2x00dev); if (retval < 0) return retval; if (retval) { rt2x00_info(rt2x00dev, "Firmware loading not required - NIC in AutoRun mode\n"); __clear_bit(REQUIRE_FIRMWARE, &rt2x00dev->cap_flags); } else { rt2x00usb_register_multiwrite(rt2x00dev, FIRMWARE_IMAGE_BASE, data + offset, length); } rt2x00usb_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0); rt2x00usb_register_write(rt2x00dev, H2M_MAILBOX_STATUS, ~0); /* * Send firmware request to device to load firmware, * we need to specify a long timeout time. */ status = rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0, USB_MODE_FIRMWARE, REGISTER_TIMEOUT_FIRMWARE); if (status < 0) { rt2x00_err(rt2x00dev, "Failed to write Firmware to device\n"); return status; } msleep(10); rt2x00usb_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0); return 0; } /* * Device state switch handlers. */ static int rt2800usb_init_registers(struct rt2x00_dev *rt2x00dev) { u32 reg; /* * Wait until BBP and RF are ready. */ if (rt2800_wait_csr_ready(rt2x00dev)) return -EBUSY; reg = rt2x00usb_register_read(rt2x00dev, PBF_SYS_CTRL); rt2x00usb_register_write(rt2x00dev, PBF_SYS_CTRL, reg & ~0x00002000); reg = 0; rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_CSR, 1); rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_BBP, 1); rt2x00usb_register_write(rt2x00dev, MAC_SYS_CTRL, reg); rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0, USB_MODE_RESET, REGISTER_TIMEOUT); rt2x00usb_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000); return 0; } static int rt2800usb_enable_radio(struct rt2x00_dev *rt2x00dev) { u32 reg = 0; if (unlikely(rt2800_wait_wpdma_ready(rt2x00dev))) return -EIO; rt2x00_set_field32(&reg, USB_DMA_CFG_PHY_CLEAR, 0); rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_AGG_EN, 0); rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_AGG_TIMEOUT, 128); /* * Total room for RX frames in kilobytes, PBF might still exceed * this limit so reduce the number to prevent errors. */ rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_AGG_LIMIT, ((rt2x00dev->rx->limit * DATA_FRAME_SIZE) / 1024) - 3); rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_EN, 1); rt2x00_set_field32(&reg, USB_DMA_CFG_TX_BULK_EN, 1); rt2x00usb_register_write(rt2x00dev, USB_DMA_CFG, reg); return rt2800_enable_radio(rt2x00dev); } static void rt2800usb_disable_radio(struct rt2x00_dev *rt2x00dev) { rt2800_disable_radio(rt2x00dev); } static int rt2800usb_set_state(struct rt2x00_dev *rt2x00dev, enum dev_state state) { if (state == STATE_AWAKE) rt2800_mcu_request(rt2x00dev, MCU_WAKEUP, 0xff, 0, 2); else rt2800_mcu_request(rt2x00dev, MCU_SLEEP, 0xff, 0xff, 2); return 0; } static int rt2800usb_set_device_state(struct rt2x00_dev *rt2x00dev, enum dev_state state) { int retval = 0; switch (state) { case STATE_RADIO_ON: /* * Before the radio can be enabled, the device first has * to be woken up. After that it needs a bit of time * to be fully awake and then the radio can be enabled. */ rt2800usb_set_state(rt2x00dev, STATE_AWAKE); msleep(1); retval = rt2800usb_enable_radio(rt2x00dev); break; case STATE_RADIO_OFF: /* * After the radio has been disabled, the device should * be put to sleep for powersaving. */ rt2800usb_disable_radio(rt2x00dev); rt2800usb_set_state(rt2x00dev, STATE_SLEEP); break; case STATE_RADIO_IRQ_ON: case STATE_RADIO_IRQ_OFF: /* No support, but no error either */ break; case STATE_DEEP_SLEEP: case STATE_SLEEP: case STATE_STANDBY: case STATE_AWAKE: retval = rt2800usb_set_state(rt2x00dev, state); break; default: retval = -ENOTSUPP; break; } if (unlikely(retval)) rt2x00_err(rt2x00dev, "Device failed to enter state %d (%d)\n", state, retval); return retval; } static unsigned int rt2800usb_get_dma_done(struct data_queue *queue) { struct queue_entry *entry; entry = rt2x00queue_get_entry(queue, Q_INDEX_DMA_DONE); return entry->entry_idx; } /* * TX descriptor initialization */ static __le32 *rt2800usb_get_txwi(struct queue_entry *entry) { if (entry->queue->qid == QID_BEACON) return (__le32 *) (entry->skb->data); else return (__le32 *) (entry->skb->data + TXINFO_DESC_SIZE); } static void rt2800usb_write_tx_desc(struct queue_entry *entry, struct txentry_desc *txdesc) { struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); __le32 *txi = (__le32 *) entry->skb->data; u32 word; /* * Initialize TXINFO descriptor */ word = rt2x00_desc_read(txi, 0); /* * The size of TXINFO_W0_USB_DMA_TX_PKT_LEN is * TXWI + 802.11 header + L2 pad + payload + pad, * so need to decrease size of TXINFO. */ rt2x00_set_field32(&word, TXINFO_W0_USB_DMA_TX_PKT_LEN, roundup(entry->skb->len, 4) - TXINFO_DESC_SIZE); rt2x00_set_field32(&word, TXINFO_W0_WIV, !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags)); rt2x00_set_field32(&word, TXINFO_W0_QSEL, 2); rt2x00_set_field32(&word, TXINFO_W0_SW_USE_LAST_ROUND, 0); rt2x00_set_field32(&word, TXINFO_W0_USB_DMA_NEXT_VALID, 0); rt2x00_set_field32(&word, TXINFO_W0_USB_DMA_TX_BURST, test_bit(ENTRY_TXD_BURST, &txdesc->flags)); rt2x00_desc_write(txi, 0, word); /* * Register descriptor details in skb frame descriptor. */ skbdesc->flags |= SKBDESC_DESC_IN_SKB; skbdesc->desc = txi; skbdesc->desc_len = TXINFO_DESC_SIZE + entry->queue->winfo_size; } /* * TX data initialization */ static int rt2800usb_get_tx_data_len(struct queue_entry *entry) { /* * pad(1~3 bytes) is needed after each 802.11 payload. * USB end pad(4 bytes) is needed at each USB bulk out packet end. * TX frame format is : * | TXINFO | TXWI | 802.11 header | L2 pad | payload | pad | USB end pad | * |<------------- tx_pkt_len ------------->| */ return roundup(entry->skb->len, 4) + 4; } /* * TX control handlers */ static void rt2800usb_work_txdone(struct work_struct *work) { struct rt2x00_dev *rt2x00dev = container_of(work, struct rt2x00_dev, txdone_work); while (!kfifo_is_empty(&rt2x00dev->txstatus_fifo) || rt2800_txstatus_timeout(rt2x00dev)) { rt2800_txdone(rt2x00dev, UINT_MAX); rt2800_txdone_nostatus(rt2x00dev); /* * The hw may delay sending the packet after DMA complete * if the medium is busy, thus the TX_STA_FIFO entry is * also delayed -> use a timer to retrieve it. */ if (rt2800_txstatus_pending(rt2x00dev)) rt2800usb_async_read_tx_status(rt2x00dev); } } /* * RX control handlers */ static void rt2800usb_fill_rxdone(struct queue_entry *entry, struct rxdone_entry_desc *rxdesc) { struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); __le32 *rxi = (__le32 *)entry->skb->data; __le32 *rxd; u32 word; int rx_pkt_len; /* * Copy descriptor to the skbdesc->desc buffer, making it safe from * moving of frame data in rt2x00usb. */ memcpy(skbdesc->desc, rxi, skbdesc->desc_len); /* * RX frame format is : * | RXINFO | RXWI | header | L2 pad | payload | pad | RXD | USB pad | * |<------------ rx_pkt_len -------------->| */ word = rt2x00_desc_read(rxi, 0); rx_pkt_len = rt2x00_get_field32(word, RXINFO_W0_USB_DMA_RX_PKT_LEN); /* * Remove the RXINFO structure from the sbk. */ skb_pull(entry->skb, RXINFO_DESC_SIZE); /* * Check for rx_pkt_len validity. Return if invalid, leaving * rxdesc->size zeroed out by the upper level. */ if (unlikely(rx_pkt_len == 0 || rx_pkt_len > entry->queue->data_size)) { rt2x00_err(entry->queue->rt2x00dev, "Bad frame size %d, forcing to 0\n", rx_pkt_len); return; } rxd = (__le32 *)(entry->skb->data + rx_pkt_len); /* * It is now safe to read the descriptor on all architectures. */ word = rt2x00_desc_read(rxd, 0); if (rt2x00_get_field32(word, RXD_W0_CRC_ERROR)) rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC; rxdesc->cipher_status = rt2x00_get_field32(word, RXD_W0_CIPHER_ERROR); if (rt2x00_get_field32(word, RXD_W0_DECRYPTED)) { /* * Hardware has stripped IV/EIV data from 802.11 frame during * decryption. Unfortunately the descriptor doesn't contain * any fields with the EIV/IV data either, so they can't * be restored by rt2x00lib. */ rxdesc->flags |= RX_FLAG_IV_STRIPPED; /* * The hardware has already checked the Michael Mic and has * stripped it from the frame. Signal this to mac80211. */ rxdesc->flags |= RX_FLAG_MMIC_STRIPPED; if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS) { rxdesc->flags |= RX_FLAG_DECRYPTED; } else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC) { /* * In order to check the Michael Mic, the packet must have * been decrypted. Mac80211 doesnt check the MMIC failure * flag to initiate MMIC countermeasures if the decoded flag * has not been set. */ rxdesc->flags |= RX_FLAG_DECRYPTED; rxdesc->flags |= RX_FLAG_MMIC_ERROR; } } if (rt2x00_get_field32(word, RXD_W0_MY_BSS)) rxdesc->dev_flags |= RXDONE_MY_BSS; if (rt2x00_get_field32(word, RXD_W0_L2PAD)) rxdesc->dev_flags |= RXDONE_L2PAD; /* * Remove RXD descriptor from end of buffer. */ skb_trim(entry->skb, rx_pkt_len); /* * Process the RXWI structure. */ rt2800_process_rxwi(entry, rxdesc); } /* * Device probe functions. */ static int rt2800usb_efuse_detect(struct rt2x00_dev *rt2x00dev) { int retval; retval = rt2800usb_autorun_detect(rt2x00dev); if (retval < 0) return retval; if (retval) return 1; return rt2800_efuse_detect(rt2x00dev); } static int rt2800usb_read_eeprom(struct rt2x00_dev *rt2x00dev) { int retval; retval = rt2800usb_efuse_detect(rt2x00dev); if (retval < 0) return retval; if (retval) retval = rt2800_read_eeprom_efuse(rt2x00dev); else retval = rt2x00usb_eeprom_read(rt2x00dev, rt2x00dev->eeprom, EEPROM_SIZE); return retval; } static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev) { int retval; retval = rt2800_probe_hw(rt2x00dev); if (retval) return retval; /* * Set txstatus timer function. */ rt2x00dev->txstatus_timer.function = rt2800usb_tx_sta_fifo_timeout; /* * Overwrite TX done handler */ INIT_WORK(&rt2x00dev->txdone_work, rt2800usb_work_txdone); return 0; } static const struct ieee80211_ops rt2800usb_mac80211_ops = { .tx = rt2x00mac_tx, .wake_tx_queue = ieee80211_handle_wake_tx_queue, .start = rt2x00mac_start, .stop = rt2x00mac_stop, .add_interface = rt2x00mac_add_interface, .remove_interface = rt2x00mac_remove_interface, .config = rt2x00mac_config, .configure_filter = rt2x00mac_configure_filter, .set_tim = rt2x00mac_set_tim, .set_key = rt2x00mac_set_key, .sw_scan_start = rt2x00mac_sw_scan_start, .sw_scan_complete = rt2x00mac_sw_scan_complete, .get_stats = rt2x00mac_get_stats, .get_key_seq = rt2800_get_key_seq, .set_rts_threshold = rt2800_set_rts_threshold, .sta_add = rt2800_sta_add, .sta_remove = rt2800_sta_remove, .bss_info_changed = rt2x00mac_bss_info_changed, .conf_tx = rt2800_conf_tx, .get_tsf = rt2800_get_tsf, .rfkill_poll = rt2x00mac_rfkill_poll, .ampdu_action = rt2800_ampdu_action, .flush = rt2x00mac_flush, .get_survey = rt2800_get_survey, .get_ringparam = rt2x00mac_get_ringparam, .tx_frames_pending = rt2x00mac_tx_frames_pending, .reconfig_complete = rt2x00mac_reconfig_complete, }; static const struct rt2800_ops rt2800usb_rt2800_ops = { .register_read = rt2x00usb_register_read, .register_read_lock = rt2x00usb_register_read_lock, .register_write = rt2x00usb_register_write, .register_write_lock = rt2x00usb_register_write_lock, .register_multiread = rt2x00usb_register_multiread, .register_multiwrite = rt2x00usb_register_multiwrite, .regbusy_read = rt2x00usb_regbusy_read, .read_eeprom = rt2800usb_read_eeprom, .hwcrypt_disabled = rt2800usb_hwcrypt_disabled, .drv_write_firmware = rt2800usb_write_firmware, .drv_init_registers = rt2800usb_init_registers, .drv_get_txwi = rt2800usb_get_txwi, .drv_get_dma_done = rt2800usb_get_dma_done, }; static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = { .probe_hw = rt2800usb_probe_hw, .get_firmware_name = rt2800usb_get_firmware_name, .check_firmware = rt2800_check_firmware, .load_firmware = rt2800_load_firmware, .initialize = rt2x00usb_initialize, .uninitialize = rt2x00usb_uninitialize, .clear_entry = rt2x00usb_clear_entry, .set_device_state = rt2800usb_set_device_state, .rfkill_poll = rt2800_rfkill_poll, .link_stats = rt2800_link_stats, .reset_tuner = rt2800_reset_tuner, .link_tuner = rt2800_link_tuner, .gain_calibration = rt2800_gain_calibration, .vco_calibration = rt2800_vco_calibration, .watchdog = rt2800_watchdog, .start_queue = rt2800usb_start_queue, .kick_queue = rt2x00usb_kick_queue, .stop_queue = rt2800usb_stop_queue, .flush_queue = rt2x00usb_flush_queue, .tx_dma_done = rt2800usb_tx_dma_done, .write_tx_desc = rt2800usb_write_tx_desc, .write_tx_data = rt2800_write_tx_data, .write_beacon = rt2800_write_beacon, .clear_beacon = rt2800_clear_beacon, .get_tx_data_len = rt2800usb_get_tx_data_len, .fill_rxdone = rt2800usb_fill_rxdone, .config_shared_key = rt2800_config_shared_key, .config_pairwise_key = rt2800_config_pairwise_key, .config_filter = rt2800_config_filter, .config_intf = rt2800_config_intf, .config_erp = rt2800_config_erp, .config_ant = rt2800_config_ant, .config = rt2800_config, .pre_reset_hw = rt2800_pre_reset_hw, }; static void rt2800usb_queue_init(struct data_queue *queue) { struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; unsigned short txwi_size, rxwi_size; rt2800_get_txwi_rxwi_size(rt2x00dev, &txwi_size, &rxwi_size); switch (queue->qid) { case QID_RX: queue->limit = 128; queue->data_size = AGGREGATION_SIZE; queue->desc_size = RXINFO_DESC_SIZE; queue->winfo_size = rxwi_size; queue->priv_size = sizeof(struct queue_entry_priv_usb); break; case QID_AC_VO: case QID_AC_VI: case QID_AC_BE: case QID_AC_BK: queue->limit = 16; queue->data_size = AGGREGATION_SIZE; queue->desc_size = TXINFO_DESC_SIZE; queue->winfo_size = txwi_size; queue->priv_size = sizeof(struct queue_entry_priv_usb); break; case QID_BEACON: queue->limit = 8; queue->data_size = MGMT_FRAME_SIZE; queue->desc_size = TXINFO_DESC_SIZE; queue->winfo_size = txwi_size; queue->priv_size = sizeof(struct queue_entry_priv_usb); break; case QID_ATIM: default: BUG(); break; } } static const struct rt2x00_ops rt2800usb_ops = { .name = KBUILD_MODNAME, .drv_data_size = sizeof(struct rt2800_drv_data), .max_ap_intf = 8, .eeprom_size = EEPROM_SIZE, .rf_size = RF_SIZE, .tx_queues = NUM_TX_QUEUES, .queue_init = rt2800usb_queue_init, .lib = &rt2800usb_rt2x00_ops, .drv = &rt2800usb_rt2800_ops, .hw = &rt2800usb_mac80211_ops, #ifdef CONFIG_RT2X00_LIB_DEBUGFS .debugfs = &rt2800_rt2x00debug, #endif /* CONFIG_RT2X00_LIB_DEBUGFS */ }; /* * rt2800usb module information. */ static const struct usb_device_id rt2800usb_device_table[] = { /* Abocom */ { USB_DEVICE(0x07b8, 0x2870) }, { USB_DEVICE(0x07b8, 0x2770) }, { USB_DEVICE(0x07b8, 0x3070) }, { USB_DEVICE(0x07b8, 0x3071) }, { USB_DEVICE(0x07b8, 0x3072) }, { USB_DEVICE(0x1482, 0x3c09) }, /* AirTies */ { USB_DEVICE(0x1eda, 0x2012) }, { USB_DEVICE(0x1eda, 0x2210) }, { USB_DEVICE(0x1eda, 0x2310) }, /* Allwin */ { USB_DEVICE(0x8516, 0x2070) }, { USB_DEVICE(0x8516, 0x2770) }, { USB_DEVICE(0x8516, 0x2870) }, { USB_DEVICE(0x8516, 0x3070) }, { USB_DEVICE(0x8516, 0x3071) }, { USB_DEVICE(0x8516, 0x3072) }, /* Alpha Networks */ { USB_DEVICE(0x14b2, 0x3c06) }, { USB_DEVICE(0x14b2, 0x3c07) }, { USB_DEVICE(0x14b2, 0x3c09) }, { USB_DEVICE(0x14b2, 0x3c12) }, { USB_DEVICE(0x14b2, 0x3c23) }, { USB_DEVICE(0x14b2, 0x3c25) }, { USB_DEVICE(0x14b2, 0x3c27) }, { USB_DEVICE(0x14b2, 0x3c28) }, { USB_DEVICE(0x14b2, 0x3c2c) }, /* Amit */ { USB_DEVICE(0x15c5, 0x0008) }, /* Askey */ { USB_DEVICE(0x1690, 0x0740) }, /* ASUS */ { USB_DEVICE(0x0b05, 0x1731) }, { USB_DEVICE(0x0b05, 0x1732) }, { USB_DEVICE(0x0b05, 0x1742) }, { USB_DEVICE(0x0b05, 0x1784) }, { USB_DEVICE(0x1761, 0x0b05) }, /* AzureWave */ { USB_DEVICE(0x13d3, 0x3247) }, { USB_DEVICE(0x13d3, 0x3273) }, { USB_DEVICE(0x13d3, 0x3305) }, { USB_DEVICE(0x13d3, 0x3307) }, { USB_DEVICE(0x13d3, 0x3321) }, /* Belkin */ { USB_DEVICE(0x050d, 0x8053) }, { USB_DEVICE(0x050d, 0x805c) }, { USB_DEVICE(0x050d, 0x815c) }, { USB_DEVICE(0x050d, 0x825a) }, { USB_DEVICE(0x050d, 0x825b) }, { USB_DEVICE(0x050d, 0x935a) }, { USB_DEVICE(0x050d, 0x935b) }, /* Buffalo */ { USB_DEVICE(0x0411, 0x00e8) }, { USB_DEVICE(0x0411, 0x0158) }, { USB_DEVICE(0x0411, 0x015d) }, { USB_DEVICE(0x0411, 0x016f) }, { USB_DEVICE(0x0411, 0x01a2) }, { USB_DEVICE(0x0411, 0x01ee) }, { USB_DEVICE(0x0411, 0x01a8) }, { USB_DEVICE(0x0411, 0x01fd) }, /* Corega */ { USB_DEVICE(0x07aa, 0x002f) }, { USB_DEVICE(0x07aa, 0x003c) }, { USB_DEVICE(0x07aa, 0x003f) }, { USB_DEVICE(0x18c5, 0x0012) }, /* D-Link */ { USB_DEVICE(0x07d1, 0x3c09) }, { USB_DEVICE(0x07d1, 0x3c0a) }, { USB_DEVICE(0x07d1, 0x3c0d) }, { USB_DEVICE(0x07d1, 0x3c0e) }, { USB_DEVICE(0x07d1, 0x3c0f) }, { USB_DEVICE(0x07d1, 0x3c11) }, { USB_DEVICE(0x07d1, 0x3c13) }, { USB_DEVICE(0x07d1, 0x3c15) }, { USB_DEVICE(0x07d1, 0x3c16) }, { USB_DEVICE(0x07d1, 0x3c17) }, { USB_DEVICE(0x2001, 0x3317) }, { USB_DEVICE(0x2001, 0x3c1b) }, { USB_DEVICE(0x2001, 0x3c25) }, /* Draytek */ { USB_DEVICE(0x07fa, 0x7712) }, /* DVICO */ { USB_DEVICE(0x0fe9, 0xb307) }, /* Edimax */ { USB_DEVICE(0x7392, 0x4085) }, { USB_DEVICE(0x7392, 0x7711) }, { USB_DEVICE(0x7392, 0x7717) }, { USB_DEVICE(0x7392, 0x7718) }, { USB_DEVICE(0x7392, 0x7722) }, /* Encore */ { USB_DEVICE(0x203d, 0x1480) }, { USB_DEVICE(0x203d, 0x14a9) }, /* EnGenius */ { USB_DEVICE(0x1740, 0x9701) }, { USB_DEVICE(0x1740, 0x9702) }, { USB_DEVICE(0x1740, 0x9703) }, { USB_DEVICE(0x1740, 0x9705) }, { USB_DEVICE(0x1740, 0x9706) }, { USB_DEVICE(0x1740, 0x9707) }, { USB_DEVICE(0x1740, 0x9708) }, { USB_DEVICE(0x1740, 0x9709) }, /* Gemtek */ { USB_DEVICE(0x15a9, 0x0012) }, /* Gigabyte */ { USB_DEVICE(0x1044, 0x800b) }, { USB_DEVICE(0x1044, 0x800d) }, /* Hawking */ { USB_DEVICE(0x0e66, 0x0001) }, { USB_DEVICE(0x0e66, 0x0003) }, { USB_DEVICE(0x0e66, 0x0009) }, { USB_DEVICE(0x0e66, 0x000b) }, { USB_DEVICE(0x0e66, 0x0013) }, { USB_DEVICE(0x0e66, 0x0017) }, { USB_DEVICE(0x0e66, 0x0018) }, /* I-O DATA */ { USB_DEVICE(0x04bb, 0x0945) }, { USB_DEVICE(0x04bb, 0x0947) }, { USB_DEVICE(0x04bb, 0x0948) }, /* Linksys */ { USB_DEVICE(0x13b1, 0x0031) }, { USB_DEVICE(0x1737, 0x0070) }, { USB_DEVICE(0x1737, 0x0071) }, { USB_DEVICE(0x1737, 0x0077) }, { USB_DEVICE(0x1737, 0x0078) }, /* Logitec */ { USB_DEVICE(0x0789, 0x0162) }, { USB_DEVICE(0x0789, 0x0163) }, { USB_DEVICE(0x0789, 0x0164) }, { USB_DEVICE(0x0789, 0x0166) }, /* Motorola */ { USB_DEVICE(0x100d, 0x9031) }, /* MSI */ { USB_DEVICE(0x0db0, 0x3820) }, { USB_DEVICE(0x0db0, 0x3821) }, { USB_DEVICE(0x0db0, 0x3822) }, { USB_DEVICE(0x0db0, 0x3870) }, { USB_DEVICE(0x0db0, 0x3871) }, { USB_DEVICE(0x0db0, 0x6899) }, { USB_DEVICE(0x0db0, 0x821a) }, { USB_DEVICE(0x0db0, 0x822a) }, { USB_DEVICE(0x0db0, 0x822b) }, { USB_DEVICE(0x0db0, 0x822c) }, { USB_DEVICE(0x0db0, 0x870a) }, { USB_DEVICE(0x0db0, 0x871a) }, { USB_DEVICE(0x0db0, 0x871b) }, { USB_DEVICE(0x0db0, 0x871c) }, { USB_DEVICE(0x0db0, 0x899a) }, /* Ovislink */ { USB_DEVICE(0x1b75, 0x3070) }, { USB_DEVICE(0x1b75, 0x3071) }, { USB_DEVICE(0x1b75, 0x3072) }, { USB_DEVICE(0x1b75, 0xa200) }, /* Para */ { USB_DEVICE(0x20b8, 0x8888) }, /* Pegatron */ { USB_DEVICE(0x1d4d, 0x0002) }, { USB_DEVICE(0x1d4d, 0x000c) }, { USB_DEVICE(0x1d4d, 0x000e) }, { USB_DEVICE(0x1d4d, 0x0011) }, /* Philips */ { USB_DEVICE(0x0471, 0x200f) }, /* Planex */ { USB_DEVICE(0x2019, 0x5201) }, { USB_DEVICE(0x2019, 0xab25) }, { USB_DEVICE(0x2019, 0xed06) }, /* Quanta */ { USB_DEVICE(0x1a32, 0x0304) }, /* Ralink */ { USB_DEVICE(0x148f, 0x2070) }, { USB_DEVICE(0x148f, 0x2770) }, { USB_DEVICE(0x148f, 0x2870) }, { USB_DEVICE(0x148f, 0x3070) }, { USB_DEVICE(0x148f, 0x3071) }, { USB_DEVICE(0x148f, 0x3072) }, /* Samsung */ { USB_DEVICE(0x04e8, 0x2018) }, /* Siemens */ { USB_DEVICE(0x129b, 0x1828) }, /* Sitecom */ { USB_DEVICE(0x0df6, 0x0017) }, { USB_DEVICE(0x0df6, 0x002b) }, { USB_DEVICE(0x0df6, 0x002c) }, { USB_DEVICE(0x0df6, 0x002d) }, { USB_DEVICE(0x0df6, 0x0039) }, { USB_DEVICE(0x0df6, 0x003b) }, { USB_DEVICE(0x0df6, 0x003d) }, { USB_DEVICE(0x0df6, 0x003e) }, { USB_DEVICE(0x0df6, 0x003f) }, { USB_DEVICE(0x0df6, 0x0040) }, { USB_DEVICE(0x0df6, 0x0042) }, { USB_DEVICE(0x0df6, 0x0047) }, { USB_DEVICE(0x0df6, 0x0048) }, { USB_DEVICE(0x0df6, 0x0051) }, { USB_DEVICE(0x0df6, 0x005f) }, { USB_DEVICE(0x0df6, 0x0060) }, /* SMC */ { USB_DEVICE(0x083a, 0x6618) }, { USB_DEVICE(0x083a, 0x7511) }, { USB_DEVICE(0x083a, 0x7512) }, { USB_DEVICE(0x083a, 0x7522) }, { USB_DEVICE(0x083a, 0x8522) }, { USB_DEVICE(0x083a, 0xa618) }, { USB_DEVICE(0x083a, 0xa701) }, { USB_DEVICE(0x083a, 0xa702) }, { USB_DEVICE(0x083a, 0xa703) }, { USB_DEVICE(0x083a, 0xb522) }, /* Sparklan */ { USB_DEVICE(0x15a9, 0x0006) }, /* Sweex */ { USB_DEVICE(0x177f, 0x0153) }, { USB_DEVICE(0x177f, 0x0164) }, { USB_DEVICE(0x177f, 0x0302) }, { USB_DEVICE(0x177f, 0x0313) }, { USB_DEVICE(0x177f, 0x0323) }, { USB_DEVICE(0x177f, 0x0324) }, { USB_DEVICE(0x177f, 0x1163) }, /* U-Media */ { USB_DEVICE(0x157e, 0x300e) }, { USB_DEVICE(0x157e, 0x3013) }, /* ZCOM */ { USB_DEVICE(0x0cde, 0x0022) }, { USB_DEVICE(0x0cde, 0x0025) }, /* Zinwell */ { USB_DEVICE(0x5a57, 0x0280) }, { USB_DEVICE(0x5a57, 0x0282) }, { USB_DEVICE(0x5a57, 0x0283) }, { USB_DEVICE(0x5a57, 0x5257) }, /* Zyxel */ { USB_DEVICE(0x0586, 0x3416) }, { USB_DEVICE(0x0586, 0x3418) }, { USB_DEVICE(0x0586, 0x341a) }, { USB_DEVICE(0x0586, 0x341e) }, { USB_DEVICE(0x0586, 0x343e) }, #ifdef CONFIG_RT2800USB_RT33XX /* Belkin */ { USB_DEVICE(0x050d, 0x945b) }, /* D-Link */ { USB_DEVICE(0x2001, 0x3c17) }, /* Panasonic */ { USB_DEVICE(0x083a, 0xb511) }, /* Accton/Arcadyan/Epson */ { USB_DEVICE(0x083a, 0xb512) }, /* Philips */ { USB_DEVICE(0x0471, 0x20dd) }, /* Ralink */ { USB_DEVICE(0x148f, 0x3370) }, { USB_DEVICE(0x148f, 0x8070) }, /* Sitecom */ { USB_DEVICE(0x0df6, 0x0050) }, /* Sweex */ { USB_DEVICE(0x177f, 0x0163) }, { USB_DEVICE(0x177f, 0x0165) }, #endif #ifdef CONFIG_RT2800USB_RT35XX /* Allwin */ { USB_DEVICE(0x8516, 0x3572) }, /* Askey */ { USB_DEVICE(0x1690, 0x0744) }, { USB_DEVICE(0x1690, 0x0761) }, { USB_DEVICE(0x1690, 0x0764) }, /* ASUS */ { USB_DEVICE(0x0b05, 0x179d) }, /* Cisco */ { USB_DEVICE(0x167b, 0x4001) }, /* EnGenius */ { USB_DEVICE(0x1740, 0x9801) }, /* I-O DATA */ { USB_DEVICE(0x04bb, 0x0944) }, /* Linksys */ { USB_DEVICE(0x13b1, 0x002f) }, { USB_DEVICE(0x1737, 0x0079) }, /* Logitec */ { USB_DEVICE(0x0789, 0x0170) }, /* Ralink */ { USB_DEVICE(0x148f, 0x3572) }, /* Sitecom */ { USB_DEVICE(0x0df6, 0x0041) }, { USB_DEVICE(0x0df6, 0x0062) }, { USB_DEVICE(0x0df6, 0x0065) }, { USB_DEVICE(0x0df6, 0x0066) }, { USB_DEVICE(0x0df6, 0x0068) }, /* Toshiba */ { USB_DEVICE(0x0930, 0x0a07) }, /* Zinwell */ { USB_DEVICE(0x5a57, 0x0284) }, #endif #ifdef CONFIG_RT2800USB_RT3573 /* AirLive */ { USB_DEVICE(0x1b75, 0x7733) }, /* ASUS */ { USB_DEVICE(0x0b05, 0x17bc) }, { USB_DEVICE(0x0b05, 0x17ad) }, /* Belkin */ { USB_DEVICE(0x050d, 0x1103) }, /* Cameo */ { USB_DEVICE(0x148f, 0xf301) }, /* D-Link */ { USB_DEVICE(0x2001, 0x3c1f) }, /* Edimax */ { USB_DEVICE(0x7392, 0x7733) }, /* Hawking */ { USB_DEVICE(0x0e66, 0x0020) }, { USB_DEVICE(0x0e66, 0x0021) }, /* I-O DATA */ { USB_DEVICE(0x04bb, 0x094e) }, /* Linksys */ { USB_DEVICE(0x13b1, 0x003b) }, /* Logitec */ { USB_DEVICE(0x0789, 0x016b) }, /* NETGEAR */ { USB_DEVICE(0x0846, 0x9012) }, { USB_DEVICE(0x0846, 0x9013) }, { USB_DEVICE(0x0846, 0x9019) }, /* Planex */ { USB_DEVICE(0x2019, 0xed14) }, { USB_DEVICE(0x2019, 0xed19) }, /* Ralink */ { USB_DEVICE(0x148f, 0x3573) }, /* Sitecom */ { USB_DEVICE(0x0df6, 0x0067) }, { USB_DEVICE(0x0df6, 0x006a) }, { USB_DEVICE(0x0df6, 0x006e) }, /* ZyXEL */ { USB_DEVICE(0x0586, 0x3421) }, #endif #ifdef CONFIG_RT2800USB_RT53XX /* Arcadyan */ { USB_DEVICE(0x043e, 0x7a12) }, /* ASUS */ { USB_DEVICE(0x0b05, 0x17e8) }, /* Azurewave */ { USB_DEVICE(0x13d3, 0x3329) }, { USB_DEVICE(0x13d3, 0x3365) }, /* D-Link */ { USB_DEVICE(0x2001, 0x3c15) }, { USB_DEVICE(0x2001, 0x3c19) }, { USB_DEVICE(0x2001, 0x3c1c) }, { USB_DEVICE(0x2001, 0x3c1d) }, { USB_DEVICE(0x2001, 0x3c1e) }, { USB_DEVICE(0x2001, 0x3c20) }, { USB_DEVICE(0x2001, 0x3c22) }, { USB_DEVICE(0x2001, 0x3c23) }, /* LG innotek */ { USB_DEVICE(0x043e, 0x7a22) }, { USB_DEVICE(0x043e, 0x7a42) }, /* Panasonic */ { USB_DEVICE(0x04da, 0x1801) }, { USB_DEVICE(0x04da, 0x1800) }, { USB_DEVICE(0x04da, 0x23f6) }, /* Philips */ { USB_DEVICE(0x0471, 0x2104) }, { USB_DEVICE(0x0471, 0x2126) }, { USB_DEVICE(0x0471, 0x2180) }, { USB_DEVICE(0x0471, 0x2181) }, { USB_DEVICE(0x0471, 0x2182) }, /* Ralink */ { USB_DEVICE(0x148f, 0x5370) }, { USB_DEVICE(0x148f, 0x5372) }, #endif #ifdef CONFIG_RT2800USB_RT55XX /* Arcadyan */ { USB_DEVICE(0x043e, 0x7a32) }, /* AVM GmbH */ { USB_DEVICE(0x057c, 0x8501) }, /* Buffalo */ { USB_DEVICE(0x0411, 0x0241) }, { USB_DEVICE(0x0411, 0x0253) }, /* D-Link */ { USB_DEVICE(0x2001, 0x3c1a) }, { USB_DEVICE(0x2001, 0x3c21) }, /* Proware */ { USB_DEVICE(0x043e, 0x7a13) }, /* Ralink */ { USB_DEVICE(0x148f, 0x5572) }, /* TRENDnet */ { USB_DEVICE(0x20f4, 0x724a) }, #endif #ifdef CONFIG_RT2800USB_UNKNOWN /* * Unclear what kind of devices these are (they aren't supported by the * vendor linux driver). */ /* Abocom */ { USB_DEVICE(0x07b8, 0x3073) }, { USB_DEVICE(0x07b8, 0x3074) }, /* Alpha Networks */ { USB_DEVICE(0x14b2, 0x3c08) }, { USB_DEVICE(0x14b2, 0x3c11) }, /* Amigo */ { USB_DEVICE(0x0e0b, 0x9031) }, { USB_DEVICE(0x0e0b, 0x9041) }, /* ASUS */ { USB_DEVICE(0x0b05, 0x166a) }, { USB_DEVICE(0x0b05, 0x1760) }, { USB_DEVICE(0x0b05, 0x1761) }, { USB_DEVICE(0x0b05, 0x1790) }, { USB_DEVICE(0x0b05, 0x17a7) }, /* AzureWave */ { USB_DEVICE(0x13d3, 0x3262) }, { USB_DEVICE(0x13d3, 0x3284) }, { USB_DEVICE(0x13d3, 0x3322) }, { USB_DEVICE(0x13d3, 0x3340) }, { USB_DEVICE(0x13d3, 0x3399) }, { USB_DEVICE(0x13d3, 0x3400) }, { USB_DEVICE(0x13d3, 0x3401) }, /* Belkin */ { USB_DEVICE(0x050d, 0x1003) }, /* Buffalo */ { USB_DEVICE(0x0411, 0x012e) }, { USB_DEVICE(0x0411, 0x0148) }, { USB_DEVICE(0x0411, 0x0150) }, /* Corega */ { USB_DEVICE(0x07aa, 0x0041) }, { USB_DEVICE(0x07aa, 0x0042) }, { USB_DEVICE(0x18c5, 0x0008) }, /* D-Link */ { USB_DEVICE(0x07d1, 0x3c0b) }, /* Encore */ { USB_DEVICE(0x203d, 0x14a1) }, /* EnGenius */ { USB_DEVICE(0x1740, 0x0600) }, { USB_DEVICE(0x1740, 0x0602) }, /* Gemtek */ { USB_DEVICE(0x15a9, 0x0010) }, /* Gigabyte */ { USB_DEVICE(0x1044, 0x800c) }, /* Hercules */ { USB_DEVICE(0x06f8, 0xe036) }, /* Huawei */ { USB_DEVICE(0x148f, 0xf101) }, /* I-O DATA */ { USB_DEVICE(0x04bb, 0x094b) }, /* LevelOne */ { USB_DEVICE(0x1740, 0x0605) }, { USB_DEVICE(0x1740, 0x0615) }, /* Logitec */ { USB_DEVICE(0x0789, 0x0168) }, { USB_DEVICE(0x0789, 0x0169) }, /* Motorola */ { USB_DEVICE(0x100d, 0x9032) }, /* Pegatron */ { USB_DEVICE(0x05a6, 0x0101) }, { USB_DEVICE(0x1d4d, 0x0010) }, /* Planex */ { USB_DEVICE(0x2019, 0xab24) }, { USB_DEVICE(0x2019, 0xab29) }, /* Qcom */ { USB_DEVICE(0x18e8, 0x6259) }, /* RadioShack */ { USB_DEVICE(0x08b9, 0x1197) }, /* Sitecom */ { USB_DEVICE(0x0df6, 0x003c) }, { USB_DEVICE(0x0df6, 0x004a) }, { USB_DEVICE(0x0df6, 0x004d) }, { USB_DEVICE(0x0df6, 0x0053) }, { USB_DEVICE(0x0df6, 0x0069) }, { USB_DEVICE(0x0df6, 0x006f) }, { USB_DEVICE(0x0df6, 0x0078) }, /* SMC */ { USB_DEVICE(0x083a, 0xa512) }, { USB_DEVICE(0x083a, 0xc522) }, { USB_DEVICE(0x083a, 0xd522) }, { USB_DEVICE(0x083a, 0xf511) }, /* Sweex */ { USB_DEVICE(0x177f, 0x0254) }, /* TP-LINK */ { USB_DEVICE(0xf201, 0x5370) }, #endif { 0, } }; MODULE_AUTHOR(DRV_PROJECT); MODULE_VERSION(DRV_VERSION); MODULE_DESCRIPTION("Ralink RT2800 USB Wireless LAN driver."); MODULE_DEVICE_TABLE(usb, rt2800usb_device_table); MODULE_FIRMWARE(FIRMWARE_RT2870); MODULE_LICENSE("GPL"); static int rt2800usb_probe(struct usb_interface *usb_intf, const struct usb_device_id *id) { return rt2x00usb_probe(usb_intf, &rt2800usb_ops); } static struct usb_driver rt2800usb_driver = { .name = KBUILD_MODNAME, .id_table = rt2800usb_device_table, .probe = rt2800usb_probe, .disconnect = rt2x00usb_disconnect, .suspend = rt2x00usb_suspend, .resume = rt2x00usb_resume, .reset_resume = rt2x00usb_resume, .disable_hub_initiated_lpm = 1, }; module_usb_driver(rt2800usb_driver);
linux-master
drivers/net/wireless/ralink/rt2x00/rt2800usb.c
// SPDX-License-Identifier: GPL-2.0-or-later /* Copyright (C) 2010 Willow Garage <http://www.willowgarage.com> Copyright (C) 2010 Ivo van Doorn <[email protected]> Copyright (C) 2009 Bartlomiej Zolnierkiewicz <[email protected]> Copyright (C) 2009 Gertjan van Wingerde <[email protected]> Based on the original rt2800pci.c and rt2800usb.c. Copyright (C) 2009 Alban Browaeys <[email protected]> Copyright (C) 2009 Felix Fietkau <[email protected]> Copyright (C) 2009 Luis Correia <[email protected]> Copyright (C) 2009 Mattias Nissler <[email protected]> Copyright (C) 2009 Mark Asselstine <[email protected]> Copyright (C) 2009 Xose Vazquez Perez <[email protected]> <http://rt2x00.serialmonkey.com> */ /* Module: rt2800lib Abstract: rt2800 generic device routines. */ #include <linux/crc-ccitt.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include "rt2x00.h" #include "rt2800lib.h" #include "rt2800.h" static bool modparam_watchdog; module_param_named(watchdog, modparam_watchdog, bool, S_IRUGO); MODULE_PARM_DESC(watchdog, "Enable watchdog to detect tx/rx hangs and reset hardware if detected"); /* * Register access. * All access to the CSR registers will go through the methods * rt2800_register_read and rt2800_register_write. * BBP and RF register require indirect register access, * and use the CSR registers BBPCSR and RFCSR to achieve this. * These indirect registers work with busy bits, * and we will try maximal REGISTER_BUSY_COUNT times to access * the register while taking a REGISTER_BUSY_DELAY us delay * between each attampt. When the busy bit is still set at that time, * the access attempt is considered to have failed, * and we will print an error. * The _lock versions must be used if you already hold the csr_mutex */ #define WAIT_FOR_BBP(__dev, __reg) \ rt2800_regbusy_read((__dev), BBP_CSR_CFG, BBP_CSR_CFG_BUSY, (__reg)) #define WAIT_FOR_RFCSR(__dev, __reg) \ rt2800_regbusy_read((__dev), RF_CSR_CFG, RF_CSR_CFG_BUSY, (__reg)) #define WAIT_FOR_RFCSR_MT7620(__dev, __reg) \ rt2800_regbusy_read((__dev), RF_CSR_CFG, RF_CSR_CFG_BUSY_MT7620, \ (__reg)) #define WAIT_FOR_RF(__dev, __reg) \ rt2800_regbusy_read((__dev), RF_CSR_CFG0, RF_CSR_CFG0_BUSY, (__reg)) #define WAIT_FOR_MCU(__dev, __reg) \ rt2800_regbusy_read((__dev), H2M_MAILBOX_CSR, \ H2M_MAILBOX_CSR_OWNER, (__reg)) static inline bool rt2800_is_305x_soc(struct rt2x00_dev *rt2x00dev) { /* check for rt2872 on SoC */ if (!rt2x00_is_soc(rt2x00dev) || !rt2x00_rt(rt2x00dev, RT2872)) return false; /* we know for sure that these rf chipsets are used on rt305x boards */ if (rt2x00_rf(rt2x00dev, RF3020) || rt2x00_rf(rt2x00dev, RF3021) || rt2x00_rf(rt2x00dev, RF3022)) return true; rt2x00_warn(rt2x00dev, "Unknown RF chipset on rt305x\n"); return false; } static void rt2800_bbp_write(struct rt2x00_dev *rt2x00dev, const unsigned int word, const u8 value) { u32 reg; mutex_lock(&rt2x00dev->csr_mutex); /* * Wait until the BBP becomes available, afterwards we * can safely write the new data into the register. */ if (WAIT_FOR_BBP(rt2x00dev, &reg)) { reg = 0; rt2x00_set_field32(&reg, BBP_CSR_CFG_VALUE, value); rt2x00_set_field32(&reg, BBP_CSR_CFG_REGNUM, word); rt2x00_set_field32(&reg, BBP_CSR_CFG_BUSY, 1); rt2x00_set_field32(&reg, BBP_CSR_CFG_READ_CONTROL, 0); rt2x00_set_field32(&reg, BBP_CSR_CFG_BBP_RW_MODE, 1); rt2800_register_write_lock(rt2x00dev, BBP_CSR_CFG, reg); } mutex_unlock(&rt2x00dev->csr_mutex); } static u8 rt2800_bbp_read(struct rt2x00_dev *rt2x00dev, const unsigned int word) { u32 reg; u8 value; mutex_lock(&rt2x00dev->csr_mutex); /* * Wait until the BBP becomes available, afterwards we * can safely write the read request into the register. * After the data has been written, we wait until hardware * returns the correct value, if at any time the register * doesn't become available in time, reg will be 0xffffffff * which means we return 0xff to the caller. */ if (WAIT_FOR_BBP(rt2x00dev, &reg)) { reg = 0; rt2x00_set_field32(&reg, BBP_CSR_CFG_REGNUM, word); rt2x00_set_field32(&reg, BBP_CSR_CFG_BUSY, 1); rt2x00_set_field32(&reg, BBP_CSR_CFG_READ_CONTROL, 1); rt2x00_set_field32(&reg, BBP_CSR_CFG_BBP_RW_MODE, 1); rt2800_register_write_lock(rt2x00dev, BBP_CSR_CFG, reg); WAIT_FOR_BBP(rt2x00dev, &reg); } value = rt2x00_get_field32(reg, BBP_CSR_CFG_VALUE); mutex_unlock(&rt2x00dev->csr_mutex); return value; } static void rt2800_rfcsr_write(struct rt2x00_dev *rt2x00dev, const unsigned int word, const u8 value) { u32 reg; mutex_lock(&rt2x00dev->csr_mutex); /* * Wait until the RFCSR becomes available, afterwards we * can safely write the new data into the register. */ switch (rt2x00dev->chip.rt) { case RT6352: if (WAIT_FOR_RFCSR_MT7620(rt2x00dev, &reg)) { reg = 0; rt2x00_set_field32(&reg, RF_CSR_CFG_DATA_MT7620, value); rt2x00_set_field32(&reg, RF_CSR_CFG_REGNUM_MT7620, word); rt2x00_set_field32(&reg, RF_CSR_CFG_WRITE_MT7620, 1); rt2x00_set_field32(&reg, RF_CSR_CFG_BUSY_MT7620, 1); rt2800_register_write_lock(rt2x00dev, RF_CSR_CFG, reg); } break; default: if (WAIT_FOR_RFCSR(rt2x00dev, &reg)) { reg = 0; rt2x00_set_field32(&reg, RF_CSR_CFG_DATA, value); rt2x00_set_field32(&reg, RF_CSR_CFG_REGNUM, word); rt2x00_set_field32(&reg, RF_CSR_CFG_WRITE, 1); rt2x00_set_field32(&reg, RF_CSR_CFG_BUSY, 1); rt2800_register_write_lock(rt2x00dev, RF_CSR_CFG, reg); } break; } mutex_unlock(&rt2x00dev->csr_mutex); } static void rt2800_rfcsr_write_bank(struct rt2x00_dev *rt2x00dev, const u8 bank, const unsigned int reg, const u8 value) { rt2800_rfcsr_write(rt2x00dev, (reg | (bank << 6)), value); } static void rt2800_rfcsr_write_chanreg(struct rt2x00_dev *rt2x00dev, const unsigned int reg, const u8 value) { rt2800_rfcsr_write_bank(rt2x00dev, 4, reg, value); rt2800_rfcsr_write_bank(rt2x00dev, 6, reg, value); } static void rt2800_rfcsr_write_dccal(struct rt2x00_dev *rt2x00dev, const unsigned int reg, const u8 value) { rt2800_rfcsr_write_bank(rt2x00dev, 5, reg, value); rt2800_rfcsr_write_bank(rt2x00dev, 7, reg, value); } static void rt2800_bbp_dcoc_write(struct rt2x00_dev *rt2x00dev, const u8 reg, const u8 value) { rt2800_bbp_write(rt2x00dev, 158, reg); rt2800_bbp_write(rt2x00dev, 159, value); } static u8 rt2800_bbp_dcoc_read(struct rt2x00_dev *rt2x00dev, const u8 reg) { rt2800_bbp_write(rt2x00dev, 158, reg); return rt2800_bbp_read(rt2x00dev, 159); } static void rt2800_bbp_glrt_write(struct rt2x00_dev *rt2x00dev, const u8 reg, const u8 value) { rt2800_bbp_write(rt2x00dev, 195, reg); rt2800_bbp_write(rt2x00dev, 196, value); } static u8 rt2800_rfcsr_read(struct rt2x00_dev *rt2x00dev, const unsigned int word) { u32 reg; u8 value; mutex_lock(&rt2x00dev->csr_mutex); /* * Wait until the RFCSR becomes available, afterwards we * can safely write the read request into the register. * After the data has been written, we wait until hardware * returns the correct value, if at any time the register * doesn't become available in time, reg will be 0xffffffff * which means we return 0xff to the caller. */ switch (rt2x00dev->chip.rt) { case RT6352: if (WAIT_FOR_RFCSR_MT7620(rt2x00dev, &reg)) { reg = 0; rt2x00_set_field32(&reg, RF_CSR_CFG_REGNUM_MT7620, word); rt2x00_set_field32(&reg, RF_CSR_CFG_WRITE_MT7620, 0); rt2x00_set_field32(&reg, RF_CSR_CFG_BUSY_MT7620, 1); rt2800_register_write_lock(rt2x00dev, RF_CSR_CFG, reg); WAIT_FOR_RFCSR_MT7620(rt2x00dev, &reg); } value = rt2x00_get_field32(reg, RF_CSR_CFG_DATA_MT7620); break; default: if (WAIT_FOR_RFCSR(rt2x00dev, &reg)) { reg = 0; rt2x00_set_field32(&reg, RF_CSR_CFG_REGNUM, word); rt2x00_set_field32(&reg, RF_CSR_CFG_WRITE, 0); rt2x00_set_field32(&reg, RF_CSR_CFG_BUSY, 1); rt2800_register_write_lock(rt2x00dev, RF_CSR_CFG, reg); WAIT_FOR_RFCSR(rt2x00dev, &reg); } value = rt2x00_get_field32(reg, RF_CSR_CFG_DATA); break; } mutex_unlock(&rt2x00dev->csr_mutex); return value; } static u8 rt2800_rfcsr_read_bank(struct rt2x00_dev *rt2x00dev, const u8 bank, const unsigned int reg) { return rt2800_rfcsr_read(rt2x00dev, (reg | (bank << 6))); } static void rt2800_rf_write(struct rt2x00_dev *rt2x00dev, const unsigned int word, const u32 value) { u32 reg; mutex_lock(&rt2x00dev->csr_mutex); /* * Wait until the RF becomes available, afterwards we * can safely write the new data into the register. */ if (WAIT_FOR_RF(rt2x00dev, &reg)) { reg = 0; rt2x00_set_field32(&reg, RF_CSR_CFG0_REG_VALUE_BW, value); rt2x00_set_field32(&reg, RF_CSR_CFG0_STANDBYMODE, 0); rt2x00_set_field32(&reg, RF_CSR_CFG0_SEL, 0); rt2x00_set_field32(&reg, RF_CSR_CFG0_BUSY, 1); rt2800_register_write_lock(rt2x00dev, RF_CSR_CFG0, reg); rt2x00_rf_write(rt2x00dev, word, value); } mutex_unlock(&rt2x00dev->csr_mutex); } static const unsigned int rt2800_eeprom_map[EEPROM_WORD_COUNT] = { [EEPROM_CHIP_ID] = 0x0000, [EEPROM_VERSION] = 0x0001, [EEPROM_MAC_ADDR_0] = 0x0002, [EEPROM_MAC_ADDR_1] = 0x0003, [EEPROM_MAC_ADDR_2] = 0x0004, [EEPROM_NIC_CONF0] = 0x001a, [EEPROM_NIC_CONF1] = 0x001b, [EEPROM_FREQ] = 0x001d, [EEPROM_LED_AG_CONF] = 0x001e, [EEPROM_LED_ACT_CONF] = 0x001f, [EEPROM_LED_POLARITY] = 0x0020, [EEPROM_NIC_CONF2] = 0x0021, [EEPROM_LNA] = 0x0022, [EEPROM_RSSI_BG] = 0x0023, [EEPROM_RSSI_BG2] = 0x0024, [EEPROM_TXMIXER_GAIN_BG] = 0x0024, /* overlaps with RSSI_BG2 */ [EEPROM_RSSI_A] = 0x0025, [EEPROM_RSSI_A2] = 0x0026, [EEPROM_TXMIXER_GAIN_A] = 0x0026, /* overlaps with RSSI_A2 */ [EEPROM_EIRP_MAX_TX_POWER] = 0x0027, [EEPROM_TXPOWER_DELTA] = 0x0028, [EEPROM_TXPOWER_BG1] = 0x0029, [EEPROM_TXPOWER_BG2] = 0x0030, [EEPROM_TSSI_BOUND_BG1] = 0x0037, [EEPROM_TSSI_BOUND_BG2] = 0x0038, [EEPROM_TSSI_BOUND_BG3] = 0x0039, [EEPROM_TSSI_BOUND_BG4] = 0x003a, [EEPROM_TSSI_BOUND_BG5] = 0x003b, [EEPROM_TXPOWER_A1] = 0x003c, [EEPROM_TXPOWER_A2] = 0x0053, [EEPROM_TXPOWER_INIT] = 0x0068, [EEPROM_TSSI_BOUND_A1] = 0x006a, [EEPROM_TSSI_BOUND_A2] = 0x006b, [EEPROM_TSSI_BOUND_A3] = 0x006c, [EEPROM_TSSI_BOUND_A4] = 0x006d, [EEPROM_TSSI_BOUND_A5] = 0x006e, [EEPROM_TXPOWER_BYRATE] = 0x006f, [EEPROM_BBP_START] = 0x0078, }; static const unsigned int rt2800_eeprom_map_ext[EEPROM_WORD_COUNT] = { [EEPROM_CHIP_ID] = 0x0000, [EEPROM_VERSION] = 0x0001, [EEPROM_MAC_ADDR_0] = 0x0002, [EEPROM_MAC_ADDR_1] = 0x0003, [EEPROM_MAC_ADDR_2] = 0x0004, [EEPROM_NIC_CONF0] = 0x001a, [EEPROM_NIC_CONF1] = 0x001b, [EEPROM_NIC_CONF2] = 0x001c, [EEPROM_EIRP_MAX_TX_POWER] = 0x0020, [EEPROM_FREQ] = 0x0022, [EEPROM_LED_AG_CONF] = 0x0023, [EEPROM_LED_ACT_CONF] = 0x0024, [EEPROM_LED_POLARITY] = 0x0025, [EEPROM_LNA] = 0x0026, [EEPROM_EXT_LNA2] = 0x0027, [EEPROM_RSSI_BG] = 0x0028, [EEPROM_RSSI_BG2] = 0x0029, [EEPROM_RSSI_A] = 0x002a, [EEPROM_RSSI_A2] = 0x002b, [EEPROM_TXPOWER_BG1] = 0x0030, [EEPROM_TXPOWER_BG2] = 0x0037, [EEPROM_EXT_TXPOWER_BG3] = 0x003e, [EEPROM_TSSI_BOUND_BG1] = 0x0045, [EEPROM_TSSI_BOUND_BG2] = 0x0046, [EEPROM_TSSI_BOUND_BG3] = 0x0047, [EEPROM_TSSI_BOUND_BG4] = 0x0048, [EEPROM_TSSI_BOUND_BG5] = 0x0049, [EEPROM_TXPOWER_A1] = 0x004b, [EEPROM_TXPOWER_A2] = 0x0065, [EEPROM_EXT_TXPOWER_A3] = 0x007f, [EEPROM_TSSI_BOUND_A1] = 0x009a, [EEPROM_TSSI_BOUND_A2] = 0x009b, [EEPROM_TSSI_BOUND_A3] = 0x009c, [EEPROM_TSSI_BOUND_A4] = 0x009d, [EEPROM_TSSI_BOUND_A5] = 0x009e, [EEPROM_TXPOWER_BYRATE] = 0x00a0, }; static unsigned int rt2800_eeprom_word_index(struct rt2x00_dev *rt2x00dev, const enum rt2800_eeprom_word word) { const unsigned int *map; unsigned int index; if (WARN_ONCE(word >= EEPROM_WORD_COUNT, "%s: invalid EEPROM word %d\n", wiphy_name(rt2x00dev->hw->wiphy), word)) return 0; if (rt2x00_rt(rt2x00dev, RT3593) || rt2x00_rt(rt2x00dev, RT3883)) map = rt2800_eeprom_map_ext; else map = rt2800_eeprom_map; index = map[word]; /* Index 0 is valid only for EEPROM_CHIP_ID. * Otherwise it means that the offset of the * given word is not initialized in the map, * or that the field is not usable on the * actual chipset. */ WARN_ONCE(word != EEPROM_CHIP_ID && index == 0, "%s: invalid access of EEPROM word %d\n", wiphy_name(rt2x00dev->hw->wiphy), word); return index; } static void *rt2800_eeprom_addr(struct rt2x00_dev *rt2x00dev, const enum rt2800_eeprom_word word) { unsigned int index; index = rt2800_eeprom_word_index(rt2x00dev, word); return rt2x00_eeprom_addr(rt2x00dev, index); } static u16 rt2800_eeprom_read(struct rt2x00_dev *rt2x00dev, const enum rt2800_eeprom_word word) { unsigned int index; index = rt2800_eeprom_word_index(rt2x00dev, word); return rt2x00_eeprom_read(rt2x00dev, index); } static void rt2800_eeprom_write(struct rt2x00_dev *rt2x00dev, const enum rt2800_eeprom_word word, u16 data) { unsigned int index; index = rt2800_eeprom_word_index(rt2x00dev, word); rt2x00_eeprom_write(rt2x00dev, index, data); } static u16 rt2800_eeprom_read_from_array(struct rt2x00_dev *rt2x00dev, const enum rt2800_eeprom_word array, unsigned int offset) { unsigned int index; index = rt2800_eeprom_word_index(rt2x00dev, array); return rt2x00_eeprom_read(rt2x00dev, index + offset); } static int rt2800_enable_wlan_rt3290(struct rt2x00_dev *rt2x00dev) { u32 reg; int i, count; reg = rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL); rt2x00_set_field32(&reg, WLAN_GPIO_OUT_OE_BIT_ALL, 0xff); rt2x00_set_field32(&reg, FRC_WL_ANT_SET, 1); rt2x00_set_field32(&reg, WLAN_CLK_EN, 0); rt2x00_set_field32(&reg, WLAN_EN, 1); rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg); udelay(REGISTER_BUSY_DELAY); count = 0; do { /* * Check PLL_LD & XTAL_RDY. */ for (i = 0; i < REGISTER_BUSY_COUNT; i++) { reg = rt2800_register_read(rt2x00dev, CMB_CTRL); if (rt2x00_get_field32(reg, PLL_LD) && rt2x00_get_field32(reg, XTAL_RDY)) break; udelay(REGISTER_BUSY_DELAY); } if (i >= REGISTER_BUSY_COUNT) { if (count >= 10) return -EIO; rt2800_register_write(rt2x00dev, 0x58, 0x018); udelay(REGISTER_BUSY_DELAY); rt2800_register_write(rt2x00dev, 0x58, 0x418); udelay(REGISTER_BUSY_DELAY); rt2800_register_write(rt2x00dev, 0x58, 0x618); udelay(REGISTER_BUSY_DELAY); count++; } else { count = 0; } reg = rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL); rt2x00_set_field32(&reg, PCIE_APP0_CLK_REQ, 0); rt2x00_set_field32(&reg, WLAN_CLK_EN, 1); rt2x00_set_field32(&reg, WLAN_RESET, 1); rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg); udelay(10); rt2x00_set_field32(&reg, WLAN_RESET, 0); rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg); udelay(10); rt2800_register_write(rt2x00dev, INT_SOURCE_CSR, 0x7fffffff); } while (count != 0); return 0; } void rt2800_mcu_request(struct rt2x00_dev *rt2x00dev, const u8 command, const u8 token, const u8 arg0, const u8 arg1) { u32 reg; /* * SOC devices don't support MCU requests. */ if (rt2x00_is_soc(rt2x00dev)) return; mutex_lock(&rt2x00dev->csr_mutex); /* * Wait until the MCU becomes available, afterwards we * can safely write the new data into the register. */ if (WAIT_FOR_MCU(rt2x00dev, &reg)) { rt2x00_set_field32(&reg, H2M_MAILBOX_CSR_OWNER, 1); rt2x00_set_field32(&reg, H2M_MAILBOX_CSR_CMD_TOKEN, token); rt2x00_set_field32(&reg, H2M_MAILBOX_CSR_ARG0, arg0); rt2x00_set_field32(&reg, H2M_MAILBOX_CSR_ARG1, arg1); rt2800_register_write_lock(rt2x00dev, H2M_MAILBOX_CSR, reg); reg = 0; rt2x00_set_field32(&reg, HOST_CMD_CSR_HOST_COMMAND, command); rt2800_register_write_lock(rt2x00dev, HOST_CMD_CSR, reg); } mutex_unlock(&rt2x00dev->csr_mutex); } EXPORT_SYMBOL_GPL(rt2800_mcu_request); int rt2800_wait_csr_ready(struct rt2x00_dev *rt2x00dev) { unsigned int i = 0; u32 reg; for (i = 0; i < REGISTER_BUSY_COUNT; i++) { reg = rt2800_register_read(rt2x00dev, MAC_CSR0); if (reg && reg != ~0) return 0; msleep(1); } rt2x00_err(rt2x00dev, "Unstable hardware\n"); return -EBUSY; } EXPORT_SYMBOL_GPL(rt2800_wait_csr_ready); int rt2800_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev) { unsigned int i; u32 reg; /* * Some devices are really slow to respond here. Wait a whole second * before timing out. */ for (i = 0; i < REGISTER_BUSY_COUNT; i++) { reg = rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG); if (!rt2x00_get_field32(reg, WPDMA_GLO_CFG_TX_DMA_BUSY) && !rt2x00_get_field32(reg, WPDMA_GLO_CFG_RX_DMA_BUSY)) return 0; msleep(10); } rt2x00_err(rt2x00dev, "WPDMA TX/RX busy [0x%08x]\n", reg); return -EACCES; } EXPORT_SYMBOL_GPL(rt2800_wait_wpdma_ready); void rt2800_disable_wpdma(struct rt2x00_dev *rt2x00dev) { u32 reg; reg = rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG); rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0); rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_DMA_BUSY, 0); rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0); rt2x00_set_field32(&reg, WPDMA_GLO_CFG_RX_DMA_BUSY, 0); rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1); rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg); } EXPORT_SYMBOL_GPL(rt2800_disable_wpdma); void rt2800_get_txwi_rxwi_size(struct rt2x00_dev *rt2x00dev, unsigned short *txwi_size, unsigned short *rxwi_size) { switch (rt2x00dev->chip.rt) { case RT3593: case RT3883: *txwi_size = TXWI_DESC_SIZE_4WORDS; *rxwi_size = RXWI_DESC_SIZE_5WORDS; break; case RT5592: case RT6352: *txwi_size = TXWI_DESC_SIZE_5WORDS; *rxwi_size = RXWI_DESC_SIZE_6WORDS; break; default: *txwi_size = TXWI_DESC_SIZE_4WORDS; *rxwi_size = RXWI_DESC_SIZE_4WORDS; break; } } EXPORT_SYMBOL_GPL(rt2800_get_txwi_rxwi_size); static bool rt2800_check_firmware_crc(const u8 *data, const size_t len) { u16 fw_crc; u16 crc; /* * The last 2 bytes in the firmware array are the crc checksum itself, * this means that we should never pass those 2 bytes to the crc * algorithm. */ fw_crc = (data[len - 2] << 8 | data[len - 1]); /* * Use the crc ccitt algorithm. * This will return the same value as the legacy driver which * used bit ordering reversion on the both the firmware bytes * before input input as well as on the final output. * Obviously using crc ccitt directly is much more efficient. */ crc = crc_ccitt(~0, data, len - 2); /* * There is a small difference between the crc-itu-t + bitrev and * the crc-ccitt crc calculation. In the latter method the 2 bytes * will be swapped, use swab16 to convert the crc to the correct * value. */ crc = swab16(crc); return fw_crc == crc; } int rt2800_check_firmware(struct rt2x00_dev *rt2x00dev, const u8 *data, const size_t len) { size_t offset = 0; size_t fw_len; bool multiple; /* * PCI(e) & SOC devices require firmware with a length * of 8kb. USB devices require firmware files with a length * of 4kb. Certain USB chipsets however require different firmware, * which Ralink only provides attached to the original firmware * file. Thus for USB devices, firmware files have a length * which is a multiple of 4kb. The firmware for rt3290 chip also * have a length which is a multiple of 4kb. */ if (rt2x00_is_usb(rt2x00dev) || rt2x00_rt(rt2x00dev, RT3290)) fw_len = 4096; else fw_len = 8192; multiple = true; /* * Validate the firmware length */ if (len != fw_len && (!multiple || (len % fw_len) != 0)) return FW_BAD_LENGTH; /* * Check if the chipset requires one of the upper parts * of the firmware. */ if (rt2x00_is_usb(rt2x00dev) && !rt2x00_rt(rt2x00dev, RT2860) && !rt2x00_rt(rt2x00dev, RT2872) && !rt2x00_rt(rt2x00dev, RT3070) && ((len / fw_len) == 1)) return FW_BAD_VERSION; /* * 8kb firmware files must be checked as if it were * 2 separate firmware files. */ while (offset < len) { if (!rt2800_check_firmware_crc(data + offset, fw_len)) return FW_BAD_CRC; offset += fw_len; } return FW_OK; } EXPORT_SYMBOL_GPL(rt2800_check_firmware); int rt2800_load_firmware(struct rt2x00_dev *rt2x00dev, const u8 *data, const size_t len) { unsigned int i; u32 reg; int retval; if (rt2x00_rt(rt2x00dev, RT3290)) { retval = rt2800_enable_wlan_rt3290(rt2x00dev); if (retval) return -EBUSY; } /* * If driver doesn't wake up firmware here, * rt2800_load_firmware will hang forever when interface is up again. */ rt2800_register_write(rt2x00dev, AUTOWAKEUP_CFG, 0x00000000); /* * Wait for stable hardware. */ if (rt2800_wait_csr_ready(rt2x00dev)) return -EBUSY; if (rt2x00_is_pci(rt2x00dev)) { if (rt2x00_rt(rt2x00dev, RT3290) || rt2x00_rt(rt2x00dev, RT3572) || rt2x00_rt(rt2x00dev, RT5390) || rt2x00_rt(rt2x00dev, RT5392)) { reg = rt2800_register_read(rt2x00dev, AUX_CTRL); rt2x00_set_field32(&reg, AUX_CTRL_FORCE_PCIE_CLK, 1); rt2x00_set_field32(&reg, AUX_CTRL_WAKE_PCIE_EN, 1); rt2800_register_write(rt2x00dev, AUX_CTRL, reg); } rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000002); } rt2800_disable_wpdma(rt2x00dev); /* * Write firmware to the device. */ rt2800_drv_write_firmware(rt2x00dev, data, len); /* * Wait for device to stabilize. */ for (i = 0; i < REGISTER_BUSY_COUNT; i++) { reg = rt2800_register_read(rt2x00dev, PBF_SYS_CTRL); if (rt2x00_get_field32(reg, PBF_SYS_CTRL_READY)) break; msleep(1); } if (i == REGISTER_BUSY_COUNT) { rt2x00_err(rt2x00dev, "PBF system register not ready\n"); return -EBUSY; } /* * Disable DMA, will be reenabled later when enabling * the radio. */ rt2800_disable_wpdma(rt2x00dev); /* * Initialize firmware. */ rt2800_register_write(rt2x00dev, H2M_BBP_AGENT, 0); rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0); if (rt2x00_is_usb(rt2x00dev)) { rt2800_register_write(rt2x00dev, H2M_INT_SRC, 0); rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0, 0, 0); } msleep(1); return 0; } EXPORT_SYMBOL_GPL(rt2800_load_firmware); void rt2800_write_tx_data(struct queue_entry *entry, struct txentry_desc *txdesc) { __le32 *txwi = rt2800_drv_get_txwi(entry); u32 word; int i; /* * Initialize TX Info descriptor */ word = rt2x00_desc_read(txwi, 0); rt2x00_set_field32(&word, TXWI_W0_FRAG, test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags)); rt2x00_set_field32(&word, TXWI_W0_MIMO_PS, test_bit(ENTRY_TXD_HT_MIMO_PS, &txdesc->flags)); rt2x00_set_field32(&word, TXWI_W0_CF_ACK, 0); rt2x00_set_field32(&word, TXWI_W0_TS, test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags)); rt2x00_set_field32(&word, TXWI_W0_AMPDU, test_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags)); rt2x00_set_field32(&word, TXWI_W0_MPDU_DENSITY, txdesc->u.ht.mpdu_density); rt2x00_set_field32(&word, TXWI_W0_TX_OP, txdesc->u.ht.txop); rt2x00_set_field32(&word, TXWI_W0_MCS, txdesc->u.ht.mcs); rt2x00_set_field32(&word, TXWI_W0_BW, test_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags)); rt2x00_set_field32(&word, TXWI_W0_SHORT_GI, test_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags)); rt2x00_set_field32(&word, TXWI_W0_STBC, txdesc->u.ht.stbc); rt2x00_set_field32(&word, TXWI_W0_PHYMODE, txdesc->rate_mode); rt2x00_desc_write(txwi, 0, word); word = rt2x00_desc_read(txwi, 1); rt2x00_set_field32(&word, TXWI_W1_ACK, test_bit(ENTRY_TXD_ACK, &txdesc->flags)); rt2x00_set_field32(&word, TXWI_W1_NSEQ, test_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags)); rt2x00_set_field32(&word, TXWI_W1_BW_WIN_SIZE, txdesc->u.ht.ba_size); rt2x00_set_field32(&word, TXWI_W1_WIRELESS_CLI_ID, test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags) ? txdesc->key_idx : txdesc->u.ht.wcid); rt2x00_set_field32(&word, TXWI_W1_MPDU_TOTAL_BYTE_COUNT, txdesc->length); rt2x00_set_field32(&word, TXWI_W1_PACKETID_QUEUE, entry->queue->qid); rt2x00_set_field32(&word, TXWI_W1_PACKETID_ENTRY, (entry->entry_idx % 3) + 1); rt2x00_desc_write(txwi, 1, word); /* * Always write 0 to IV/EIV fields (word 2 and 3), hardware will insert * the IV from the IVEIV register when TXD_W3_WIV is set to 0. * When TXD_W3_WIV is set to 1 it will use the IV data * from the descriptor. The TXWI_W1_WIRELESS_CLI_ID indicates which * crypto entry in the registers should be used to encrypt the frame. * * Nulify all remaining words as well, we don't know how to program them. */ for (i = 2; i < entry->queue->winfo_size / sizeof(__le32); i++) _rt2x00_desc_write(txwi, i, 0); } EXPORT_SYMBOL_GPL(rt2800_write_tx_data); static int rt2800_agc_to_rssi(struct rt2x00_dev *rt2x00dev, u32 rxwi_w2) { s8 rssi0 = rt2x00_get_field32(rxwi_w2, RXWI_W2_RSSI0); s8 rssi1 = rt2x00_get_field32(rxwi_w2, RXWI_W2_RSSI1); s8 rssi2 = rt2x00_get_field32(rxwi_w2, RXWI_W2_RSSI2); u16 eeprom; u8 offset0; u8 offset1; u8 offset2; if (rt2x00dev->curr_band == NL80211_BAND_2GHZ) { eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_BG); offset0 = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG_OFFSET0); offset1 = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG_OFFSET1); eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2); offset2 = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG2_OFFSET2); } else { eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_A); offset0 = rt2x00_get_field16(eeprom, EEPROM_RSSI_A_OFFSET0); offset1 = rt2x00_get_field16(eeprom, EEPROM_RSSI_A_OFFSET1); eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_A2); offset2 = rt2x00_get_field16(eeprom, EEPROM_RSSI_A2_OFFSET2); } /* * Convert the value from the descriptor into the RSSI value * If the value in the descriptor is 0, it is considered invalid * and the default (extremely low) rssi value is assumed */ rssi0 = (rssi0) ? (-12 - offset0 - rt2x00dev->lna_gain - rssi0) : -128; rssi1 = (rssi1) ? (-12 - offset1 - rt2x00dev->lna_gain - rssi1) : -128; rssi2 = (rssi2) ? (-12 - offset2 - rt2x00dev->lna_gain - rssi2) : -128; /* * mac80211 only accepts a single RSSI value. Calculating the * average doesn't deliver a fair answer either since -60:-60 would * be considered equally good as -50:-70 while the second is the one * which gives less energy... */ rssi0 = max(rssi0, rssi1); return (int)max(rssi0, rssi2); } void rt2800_process_rxwi(struct queue_entry *entry, struct rxdone_entry_desc *rxdesc) { __le32 *rxwi = (__le32 *) entry->skb->data; u32 word; word = rt2x00_desc_read(rxwi, 0); rxdesc->cipher = rt2x00_get_field32(word, RXWI_W0_UDF); rxdesc->size = rt2x00_get_field32(word, RXWI_W0_MPDU_TOTAL_BYTE_COUNT); word = rt2x00_desc_read(rxwi, 1); if (rt2x00_get_field32(word, RXWI_W1_SHORT_GI)) rxdesc->enc_flags |= RX_ENC_FLAG_SHORT_GI; if (rt2x00_get_field32(word, RXWI_W1_BW)) rxdesc->bw = RATE_INFO_BW_40; /* * Detect RX rate, always use MCS as signal type. */ rxdesc->dev_flags |= RXDONE_SIGNAL_MCS; rxdesc->signal = rt2x00_get_field32(word, RXWI_W1_MCS); rxdesc->rate_mode = rt2x00_get_field32(word, RXWI_W1_PHYMODE); /* * Mask of 0x8 bit to remove the short preamble flag. */ if (rxdesc->rate_mode == RATE_MODE_CCK) rxdesc->signal &= ~0x8; word = rt2x00_desc_read(rxwi, 2); /* * Convert descriptor AGC value to RSSI value. */ rxdesc->rssi = rt2800_agc_to_rssi(entry->queue->rt2x00dev, word); /* * Remove RXWI descriptor from start of the buffer. */ skb_pull(entry->skb, entry->queue->winfo_size); } EXPORT_SYMBOL_GPL(rt2800_process_rxwi); static void rt2800_rate_from_status(struct skb_frame_desc *skbdesc, u32 status, enum nl80211_band band) { u8 flags = 0; u8 idx = rt2x00_get_field32(status, TX_STA_FIFO_MCS); switch (rt2x00_get_field32(status, TX_STA_FIFO_PHYMODE)) { case RATE_MODE_HT_GREENFIELD: flags |= IEEE80211_TX_RC_GREEN_FIELD; fallthrough; case RATE_MODE_HT_MIX: flags |= IEEE80211_TX_RC_MCS; break; case RATE_MODE_OFDM: if (band == NL80211_BAND_2GHZ) idx += 4; break; case RATE_MODE_CCK: if (idx >= 8) idx -= 8; break; } if (rt2x00_get_field32(status, TX_STA_FIFO_BW)) flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; if (rt2x00_get_field32(status, TX_STA_FIFO_SGI)) flags |= IEEE80211_TX_RC_SHORT_GI; skbdesc->tx_rate_idx = idx; skbdesc->tx_rate_flags = flags; } static bool rt2800_txdone_entry_check(struct queue_entry *entry, u32 reg) { __le32 *txwi; u32 word; int wcid, ack, pid; int tx_wcid, tx_ack, tx_pid, is_agg; /* * This frames has returned with an IO error, * so the status report is not intended for this * frame. */ if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags)) return false; wcid = rt2x00_get_field32(reg, TX_STA_FIFO_WCID); ack = rt2x00_get_field32(reg, TX_STA_FIFO_TX_ACK_REQUIRED); pid = rt2x00_get_field32(reg, TX_STA_FIFO_PID_TYPE); is_agg = rt2x00_get_field32(reg, TX_STA_FIFO_TX_AGGRE); /* * Validate if this TX status report is intended for * this entry by comparing the WCID/ACK/PID fields. */ txwi = rt2800_drv_get_txwi(entry); word = rt2x00_desc_read(txwi, 1); tx_wcid = rt2x00_get_field32(word, TXWI_W1_WIRELESS_CLI_ID); tx_ack = rt2x00_get_field32(word, TXWI_W1_ACK); tx_pid = rt2x00_get_field32(word, TXWI_W1_PACKETID); if (wcid != tx_wcid || ack != tx_ack || (!is_agg && pid != tx_pid)) { rt2x00_dbg(entry->queue->rt2x00dev, "TX status report missed for queue %d entry %d\n", entry->queue->qid, entry->entry_idx); return false; } return true; } void rt2800_txdone_entry(struct queue_entry *entry, u32 status, __le32 *txwi, bool match) { struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; struct rt2800_drv_data *drv_data = rt2x00dev->drv_data; struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); struct txdone_entry_desc txdesc; u32 word; u16 mcs, real_mcs; int aggr, ampdu, wcid, ack_req; /* * Obtain the status about this packet. */ txdesc.flags = 0; word = rt2x00_desc_read(txwi, 0); mcs = rt2x00_get_field32(word, TXWI_W0_MCS); ampdu = rt2x00_get_field32(word, TXWI_W0_AMPDU); real_mcs = rt2x00_get_field32(status, TX_STA_FIFO_MCS); aggr = rt2x00_get_field32(status, TX_STA_FIFO_TX_AGGRE); wcid = rt2x00_get_field32(status, TX_STA_FIFO_WCID); ack_req = rt2x00_get_field32(status, TX_STA_FIFO_TX_ACK_REQUIRED); /* * If a frame was meant to be sent as a single non-aggregated MPDU * but ended up in an aggregate the used tx rate doesn't correlate * with the one specified in the TXWI as the whole aggregate is sent * with the same rate. * * For example: two frames are sent to rt2x00, the first one sets * AMPDU=1 and requests MCS7 whereas the second frame sets AMDPU=0 * and requests MCS15. If the hw aggregates both frames into one * AMDPU the tx status for both frames will contain MCS7 although * the frame was sent successfully. * * Hence, replace the requested rate with the real tx rate to not * confuse the rate control algortihm by providing clearly wrong * data. * * FIXME: if we do not find matching entry, we tell that frame was * posted without any retries. We need to find a way to fix that * and provide retry count. */ if (unlikely((aggr == 1 && ampdu == 0 && real_mcs != mcs)) || !match) { rt2800_rate_from_status(skbdesc, status, rt2x00dev->curr_band); mcs = real_mcs; } if (aggr == 1 || ampdu == 1) __set_bit(TXDONE_AMPDU, &txdesc.flags); if (!ack_req) __set_bit(TXDONE_NO_ACK_REQ, &txdesc.flags); /* * Ralink has a retry mechanism using a global fallback * table. We setup this fallback table to try the immediate * lower rate for all rates. In the TX_STA_FIFO, the MCS field * always contains the MCS used for the last transmission, be * it successful or not. */ if (rt2x00_get_field32(status, TX_STA_FIFO_TX_SUCCESS)) { /* * Transmission succeeded. The number of retries is * mcs - real_mcs */ __set_bit(TXDONE_SUCCESS, &txdesc.flags); txdesc.retry = ((mcs > real_mcs) ? mcs - real_mcs : 0); } else { /* * Transmission failed. The number of retries is * always 7 in this case (for a total number of 8 * frames sent). */ __set_bit(TXDONE_FAILURE, &txdesc.flags); txdesc.retry = rt2x00dev->long_retry; } /* * the frame was retried at least once * -> hw used fallback rates */ if (txdesc.retry) __set_bit(TXDONE_FALLBACK, &txdesc.flags); if (!match) { /* RCU assures non-null sta will not be freed by mac80211. */ rcu_read_lock(); if (likely(wcid >= WCID_START && wcid <= WCID_END)) skbdesc->sta = drv_data->wcid_to_sta[wcid - WCID_START]; else skbdesc->sta = NULL; rt2x00lib_txdone_nomatch(entry, &txdesc); rcu_read_unlock(); } else { rt2x00lib_txdone(entry, &txdesc); } } EXPORT_SYMBOL_GPL(rt2800_txdone_entry); void rt2800_txdone(struct rt2x00_dev *rt2x00dev, unsigned int quota) { struct data_queue *queue; struct queue_entry *entry; u32 reg; u8 qid; bool match; while (quota-- > 0 && kfifo_get(&rt2x00dev->txstatus_fifo, &reg)) { /* * TX_STA_FIFO_PID_QUEUE is a 2-bit field, thus qid is * guaranteed to be one of the TX QIDs . */ qid = rt2x00_get_field32(reg, TX_STA_FIFO_PID_QUEUE); queue = rt2x00queue_get_tx_queue(rt2x00dev, qid); if (unlikely(rt2x00queue_empty(queue))) { rt2x00_dbg(rt2x00dev, "Got TX status for an empty queue %u, dropping\n", qid); break; } entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE); if (unlikely(test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) || !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))) { rt2x00_warn(rt2x00dev, "Data pending for entry %u in queue %u\n", entry->entry_idx, qid); break; } match = rt2800_txdone_entry_check(entry, reg); rt2800_txdone_entry(entry, reg, rt2800_drv_get_txwi(entry), match); } } EXPORT_SYMBOL_GPL(rt2800_txdone); static inline bool rt2800_entry_txstatus_timeout(struct rt2x00_dev *rt2x00dev, struct queue_entry *entry) { bool ret; unsigned long tout; if (!test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags)) return false; if (test_bit(DEVICE_STATE_FLUSHING, &rt2x00dev->flags)) tout = msecs_to_jiffies(50); else tout = msecs_to_jiffies(2000); ret = time_after(jiffies, entry->last_action + tout); if (unlikely(ret)) rt2x00_dbg(entry->queue->rt2x00dev, "TX status timeout for entry %d in queue %d\n", entry->entry_idx, entry->queue->qid); return ret; } bool rt2800_txstatus_timeout(struct rt2x00_dev *rt2x00dev) { struct data_queue *queue; struct queue_entry *entry; tx_queue_for_each(rt2x00dev, queue) { entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE); if (rt2800_entry_txstatus_timeout(rt2x00dev, entry)) return true; } return false; } EXPORT_SYMBOL_GPL(rt2800_txstatus_timeout); /* * test if there is an entry in any TX queue for which DMA is done * but the TX status has not been returned yet */ bool rt2800_txstatus_pending(struct rt2x00_dev *rt2x00dev) { struct data_queue *queue; tx_queue_for_each(rt2x00dev, queue) { if (rt2x00queue_get_entry(queue, Q_INDEX_DMA_DONE) != rt2x00queue_get_entry(queue, Q_INDEX_DONE)) return true; } return false; } EXPORT_SYMBOL_GPL(rt2800_txstatus_pending); void rt2800_txdone_nostatus(struct rt2x00_dev *rt2x00dev) { struct data_queue *queue; struct queue_entry *entry; /* * Process any trailing TX status reports for IO failures, * we loop until we find the first non-IO error entry. This * can either be a frame which is free, is being uploaded, * or has completed the upload but didn't have an entry * in the TX_STAT_FIFO register yet. */ tx_queue_for_each(rt2x00dev, queue) { while (!rt2x00queue_empty(queue)) { entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE); if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) || !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags)) break; if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags) || rt2800_entry_txstatus_timeout(rt2x00dev, entry)) rt2x00lib_txdone_noinfo(entry, TXDONE_FAILURE); else break; } } } EXPORT_SYMBOL_GPL(rt2800_txdone_nostatus); static int rt2800_check_hung(struct data_queue *queue) { unsigned int cur_idx = rt2800_drv_get_dma_done(queue); if (queue->wd_idx != cur_idx) queue->wd_count = 0; else queue->wd_count++; return queue->wd_count > 16; } static void rt2800_update_survey(struct rt2x00_dev *rt2x00dev) { struct ieee80211_channel *chan = rt2x00dev->hw->conf.chandef.chan; struct rt2x00_chan_survey *chan_survey = &rt2x00dev->chan_survey[chan->hw_value]; chan_survey->time_idle += rt2800_register_read(rt2x00dev, CH_IDLE_STA); chan_survey->time_busy += rt2800_register_read(rt2x00dev, CH_BUSY_STA); chan_survey->time_ext_busy += rt2800_register_read(rt2x00dev, CH_BUSY_STA_SEC); } void rt2800_watchdog(struct rt2x00_dev *rt2x00dev) { struct data_queue *queue; bool hung_tx = false; bool hung_rx = false; if (test_bit(DEVICE_STATE_SCANNING, &rt2x00dev->flags)) return; rt2800_update_survey(rt2x00dev); queue_for_each(rt2x00dev, queue) { switch (queue->qid) { case QID_AC_VO: case QID_AC_VI: case QID_AC_BE: case QID_AC_BK: case QID_MGMT: if (rt2x00queue_empty(queue)) continue; hung_tx = rt2800_check_hung(queue); break; case QID_RX: /* For station mode we should reactive at least * beacons. TODO: need to find good way detect * RX hung for AP mode. */ if (rt2x00dev->intf_sta_count == 0) continue; hung_rx = rt2800_check_hung(queue); break; default: break; } } if (hung_tx) rt2x00_warn(rt2x00dev, "Watchdog TX hung detected\n"); if (hung_rx) rt2x00_warn(rt2x00dev, "Watchdog RX hung detected\n"); if (hung_tx || hung_rx) ieee80211_restart_hw(rt2x00dev->hw); } EXPORT_SYMBOL_GPL(rt2800_watchdog); static unsigned int rt2800_hw_beacon_base(struct rt2x00_dev *rt2x00dev, unsigned int index) { return HW_BEACON_BASE(index); } static inline u8 rt2800_get_beacon_offset(struct rt2x00_dev *rt2x00dev, unsigned int index) { return BEACON_BASE_TO_OFFSET(rt2800_hw_beacon_base(rt2x00dev, index)); } static void rt2800_update_beacons_setup(struct rt2x00_dev *rt2x00dev) { struct data_queue *queue = rt2x00dev->bcn; struct queue_entry *entry; int i, bcn_num = 0; u64 off, reg = 0; u32 bssid_dw1; /* * Setup offsets of all active beacons in BCN_OFFSET{0,1} registers. */ for (i = 0; i < queue->limit; i++) { entry = &queue->entries[i]; if (!test_bit(ENTRY_BCN_ENABLED, &entry->flags)) continue; off = rt2800_get_beacon_offset(rt2x00dev, entry->entry_idx); reg |= off << (8 * bcn_num); bcn_num++; } rt2800_register_write(rt2x00dev, BCN_OFFSET0, (u32) reg); rt2800_register_write(rt2x00dev, BCN_OFFSET1, (u32) (reg >> 32)); /* * H/W sends up to MAC_BSSID_DW1_BSS_BCN_NUM + 1 consecutive beacons. */ bssid_dw1 = rt2800_register_read(rt2x00dev, MAC_BSSID_DW1); rt2x00_set_field32(&bssid_dw1, MAC_BSSID_DW1_BSS_BCN_NUM, bcn_num > 0 ? bcn_num - 1 : 0); rt2800_register_write(rt2x00dev, MAC_BSSID_DW1, bssid_dw1); } void rt2800_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc) { struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); unsigned int beacon_base; unsigned int padding_len; u32 orig_reg, reg; const int txwi_desc_size = entry->queue->winfo_size; /* * Disable beaconing while we are reloading the beacon data, * otherwise we might be sending out invalid data. */ reg = rt2800_register_read(rt2x00dev, BCN_TIME_CFG); orig_reg = reg; rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0); rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg); /* * Add space for the TXWI in front of the skb. */ memset(skb_push(entry->skb, txwi_desc_size), 0, txwi_desc_size); /* * Register descriptor details in skb frame descriptor. */ skbdesc->flags |= SKBDESC_DESC_IN_SKB; skbdesc->desc = entry->skb->data; skbdesc->desc_len = txwi_desc_size; /* * Add the TXWI for the beacon to the skb. */ rt2800_write_tx_data(entry, txdesc); /* * Dump beacon to userspace through debugfs. */ rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry); /* * Write entire beacon with TXWI and padding to register. */ padding_len = roundup(entry->skb->len, 4) - entry->skb->len; if (padding_len && skb_pad(entry->skb, padding_len)) { rt2x00_err(rt2x00dev, "Failure padding beacon, aborting\n"); /* skb freed by skb_pad() on failure */ entry->skb = NULL; rt2800_register_write(rt2x00dev, BCN_TIME_CFG, orig_reg); return; } beacon_base = rt2800_hw_beacon_base(rt2x00dev, entry->entry_idx); rt2800_register_multiwrite(rt2x00dev, beacon_base, entry->skb->data, entry->skb->len + padding_len); __set_bit(ENTRY_BCN_ENABLED, &entry->flags); /* * Change global beacons settings. */ rt2800_update_beacons_setup(rt2x00dev); /* * Restore beaconing state. */ rt2800_register_write(rt2x00dev, BCN_TIME_CFG, orig_reg); /* * Clean up beacon skb. */ dev_kfree_skb_any(entry->skb); entry->skb = NULL; } EXPORT_SYMBOL_GPL(rt2800_write_beacon); static inline void rt2800_clear_beacon_register(struct rt2x00_dev *rt2x00dev, unsigned int index) { int i; const int txwi_desc_size = rt2x00dev->bcn->winfo_size; unsigned int beacon_base; beacon_base = rt2800_hw_beacon_base(rt2x00dev, index); /* * For the Beacon base registers we only need to clear * the whole TXWI which (when set to 0) will invalidate * the entire beacon. */ for (i = 0; i < txwi_desc_size; i += sizeof(__le32)) rt2800_register_write(rt2x00dev, beacon_base + i, 0); } void rt2800_clear_beacon(struct queue_entry *entry) { struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; u32 orig_reg, reg; /* * Disable beaconing while we are reloading the beacon data, * otherwise we might be sending out invalid data. */ orig_reg = rt2800_register_read(rt2x00dev, BCN_TIME_CFG); reg = orig_reg; rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0); rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg); /* * Clear beacon. */ rt2800_clear_beacon_register(rt2x00dev, entry->entry_idx); __clear_bit(ENTRY_BCN_ENABLED, &entry->flags); /* * Change global beacons settings. */ rt2800_update_beacons_setup(rt2x00dev); /* * Restore beaconing state. */ rt2800_register_write(rt2x00dev, BCN_TIME_CFG, orig_reg); } EXPORT_SYMBOL_GPL(rt2800_clear_beacon); #ifdef CONFIG_RT2X00_LIB_DEBUGFS const struct rt2x00debug rt2800_rt2x00debug = { .owner = THIS_MODULE, .csr = { .read = rt2800_register_read, .write = rt2800_register_write, .flags = RT2X00DEBUGFS_OFFSET, .word_base = CSR_REG_BASE, .word_size = sizeof(u32), .word_count = CSR_REG_SIZE / sizeof(u32), }, .eeprom = { /* NOTE: The local EEPROM access functions can't * be used here, use the generic versions instead. */ .read = rt2x00_eeprom_read, .write = rt2x00_eeprom_write, .word_base = EEPROM_BASE, .word_size = sizeof(u16), .word_count = EEPROM_SIZE / sizeof(u16), }, .bbp = { .read = rt2800_bbp_read, .write = rt2800_bbp_write, .word_base = BBP_BASE, .word_size = sizeof(u8), .word_count = BBP_SIZE / sizeof(u8), }, .rf = { .read = rt2x00_rf_read, .write = rt2800_rf_write, .word_base = RF_BASE, .word_size = sizeof(u32), .word_count = RF_SIZE / sizeof(u32), }, .rfcsr = { .read = rt2800_rfcsr_read, .write = rt2800_rfcsr_write, .word_base = RFCSR_BASE, .word_size = sizeof(u8), .word_count = RFCSR_SIZE / sizeof(u8), }, }; EXPORT_SYMBOL_GPL(rt2800_rt2x00debug); #endif /* CONFIG_RT2X00_LIB_DEBUGFS */ int rt2800_rfkill_poll(struct rt2x00_dev *rt2x00dev) { u32 reg; if (rt2x00_rt(rt2x00dev, RT3290)) { reg = rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL); return rt2x00_get_field32(reg, WLAN_GPIO_IN_BIT0); } else { reg = rt2800_register_read(rt2x00dev, GPIO_CTRL); return rt2x00_get_field32(reg, GPIO_CTRL_VAL2); } } EXPORT_SYMBOL_GPL(rt2800_rfkill_poll); #ifdef CONFIG_RT2X00_LIB_LEDS static void rt2800_brightness_set(struct led_classdev *led_cdev, enum led_brightness brightness) { struct rt2x00_led *led = container_of(led_cdev, struct rt2x00_led, led_dev); unsigned int enabled = brightness != LED_OFF; unsigned int bg_mode = (enabled && led->rt2x00dev->curr_band == NL80211_BAND_2GHZ); unsigned int polarity = rt2x00_get_field16(led->rt2x00dev->led_mcu_reg, EEPROM_FREQ_LED_POLARITY); unsigned int ledmode = rt2x00_get_field16(led->rt2x00dev->led_mcu_reg, EEPROM_FREQ_LED_MODE); u32 reg; /* Check for SoC (SOC devices don't support MCU requests) */ if (rt2x00_is_soc(led->rt2x00dev)) { reg = rt2800_register_read(led->rt2x00dev, LED_CFG); /* Set LED Polarity */ rt2x00_set_field32(&reg, LED_CFG_LED_POLAR, polarity); /* Set LED Mode */ if (led->type == LED_TYPE_RADIO) { rt2x00_set_field32(&reg, LED_CFG_G_LED_MODE, enabled ? 3 : 0); } else if (led->type == LED_TYPE_ASSOC) { rt2x00_set_field32(&reg, LED_CFG_Y_LED_MODE, enabled ? 3 : 0); } else if (led->type == LED_TYPE_QUALITY) { rt2x00_set_field32(&reg, LED_CFG_R_LED_MODE, enabled ? 3 : 0); } rt2800_register_write(led->rt2x00dev, LED_CFG, reg); } else { if (led->type == LED_TYPE_RADIO) { rt2800_mcu_request(led->rt2x00dev, MCU_LED, 0xff, ledmode, enabled ? 0x20 : 0); } else if (led->type == LED_TYPE_ASSOC) { rt2800_mcu_request(led->rt2x00dev, MCU_LED, 0xff, ledmode, enabled ? (bg_mode ? 0x60 : 0xa0) : 0x20); } else if (led->type == LED_TYPE_QUALITY) { /* * The brightness is divided into 6 levels (0 - 5), * The specs tell us the following levels: * 0, 1 ,3, 7, 15, 31 * to determine the level in a simple way we can simply * work with bitshifting: * (1 << level) - 1 */ rt2800_mcu_request(led->rt2x00dev, MCU_LED_STRENGTH, 0xff, (1 << brightness / (LED_FULL / 6)) - 1, polarity); } } } static void rt2800_init_led(struct rt2x00_dev *rt2x00dev, struct rt2x00_led *led, enum led_type type) { led->rt2x00dev = rt2x00dev; led->type = type; led->led_dev.brightness_set = rt2800_brightness_set; led->flags = LED_INITIALIZED; } #endif /* CONFIG_RT2X00_LIB_LEDS */ /* * Configuration handlers. */ static void rt2800_config_wcid(struct rt2x00_dev *rt2x00dev, const u8 *address, int wcid) { struct mac_wcid_entry wcid_entry; u32 offset; offset = MAC_WCID_ENTRY(wcid); memset(&wcid_entry, 0xff, sizeof(wcid_entry)); if (address) memcpy(wcid_entry.mac, address, ETH_ALEN); rt2800_register_multiwrite(rt2x00dev, offset, &wcid_entry, sizeof(wcid_entry)); } static void rt2800_delete_wcid_attr(struct rt2x00_dev *rt2x00dev, int wcid) { u32 offset; offset = MAC_WCID_ATTR_ENTRY(wcid); rt2800_register_write(rt2x00dev, offset, 0); } static void rt2800_config_wcid_attr_bssidx(struct rt2x00_dev *rt2x00dev, int wcid, u32 bssidx) { u32 offset = MAC_WCID_ATTR_ENTRY(wcid); u32 reg; /* * The BSS Idx numbers is split in a main value of 3 bits, * and a extended field for adding one additional bit to the value. */ reg = rt2800_register_read(rt2x00dev, offset); rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_BSS_IDX, (bssidx & 0x7)); rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_BSS_IDX_EXT, (bssidx & 0x8) >> 3); rt2800_register_write(rt2x00dev, offset, reg); } static void rt2800_config_wcid_attr_cipher(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_crypto *crypto, struct ieee80211_key_conf *key) { struct mac_iveiv_entry iveiv_entry; u32 offset; u32 reg; offset = MAC_WCID_ATTR_ENTRY(key->hw_key_idx); if (crypto->cmd == SET_KEY) { reg = rt2800_register_read(rt2x00dev, offset); rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_KEYTAB, !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)); /* * Both the cipher as the BSS Idx numbers are split in a main * value of 3 bits, and a extended field for adding one additional * bit to the value. */ rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_CIPHER, (crypto->cipher & 0x7)); rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_CIPHER_EXT, (crypto->cipher & 0x8) >> 3); rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_RX_WIUDF, crypto->cipher); rt2800_register_write(rt2x00dev, offset, reg); } else { /* Delete the cipher without touching the bssidx */ reg = rt2800_register_read(rt2x00dev, offset); rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_KEYTAB, 0); rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_CIPHER, 0); rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_CIPHER_EXT, 0); rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_RX_WIUDF, 0); rt2800_register_write(rt2x00dev, offset, reg); } if (test_bit(DEVICE_STATE_RESET, &rt2x00dev->flags)) return; offset = MAC_IVEIV_ENTRY(key->hw_key_idx); memset(&iveiv_entry, 0, sizeof(iveiv_entry)); if ((crypto->cipher == CIPHER_TKIP) || (crypto->cipher == CIPHER_TKIP_NO_MIC) || (crypto->cipher == CIPHER_AES)) iveiv_entry.iv[3] |= 0x20; iveiv_entry.iv[3] |= key->keyidx << 6; rt2800_register_multiwrite(rt2x00dev, offset, &iveiv_entry, sizeof(iveiv_entry)); } int rt2800_config_shared_key(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_crypto *crypto, struct ieee80211_key_conf *key) { struct hw_key_entry key_entry; struct rt2x00_field32 field; u32 offset; u32 reg; if (crypto->cmd == SET_KEY) { key->hw_key_idx = (4 * crypto->bssidx) + key->keyidx; memcpy(key_entry.key, crypto->key, sizeof(key_entry.key)); memcpy(key_entry.tx_mic, crypto->tx_mic, sizeof(key_entry.tx_mic)); memcpy(key_entry.rx_mic, crypto->rx_mic, sizeof(key_entry.rx_mic)); offset = SHARED_KEY_ENTRY(key->hw_key_idx); rt2800_register_multiwrite(rt2x00dev, offset, &key_entry, sizeof(key_entry)); } /* * The cipher types are stored over multiple registers * starting with SHARED_KEY_MODE_BASE each word will have * 32 bits and contains the cipher types for 2 bssidx each. * Using the correct defines correctly will cause overhead, * so just calculate the correct offset. */ field.bit_offset = 4 * (key->hw_key_idx % 8); field.bit_mask = 0x7 << field.bit_offset; offset = SHARED_KEY_MODE_ENTRY(key->hw_key_idx / 8); reg = rt2800_register_read(rt2x00dev, offset); rt2x00_set_field32(&reg, field, (crypto->cmd == SET_KEY) * crypto->cipher); rt2800_register_write(rt2x00dev, offset, reg); /* * Update WCID information */ rt2800_config_wcid(rt2x00dev, crypto->address, key->hw_key_idx); rt2800_config_wcid_attr_bssidx(rt2x00dev, key->hw_key_idx, crypto->bssidx); rt2800_config_wcid_attr_cipher(rt2x00dev, crypto, key); return 0; } EXPORT_SYMBOL_GPL(rt2800_config_shared_key); int rt2800_config_pairwise_key(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_crypto *crypto, struct ieee80211_key_conf *key) { struct hw_key_entry key_entry; u32 offset; if (crypto->cmd == SET_KEY) { /* * Allow key configuration only for STAs that are * known by the hw. */ if (crypto->wcid > WCID_END) return -ENOSPC; key->hw_key_idx = crypto->wcid; memcpy(key_entry.key, crypto->key, sizeof(key_entry.key)); memcpy(key_entry.tx_mic, crypto->tx_mic, sizeof(key_entry.tx_mic)); memcpy(key_entry.rx_mic, crypto->rx_mic, sizeof(key_entry.rx_mic)); offset = PAIRWISE_KEY_ENTRY(key->hw_key_idx); rt2800_register_multiwrite(rt2x00dev, offset, &key_entry, sizeof(key_entry)); } /* * Update WCID information */ rt2800_config_wcid_attr_cipher(rt2x00dev, crypto, key); return 0; } EXPORT_SYMBOL_GPL(rt2800_config_pairwise_key); static void rt2800_set_max_psdu_len(struct rt2x00_dev *rt2x00dev) { u8 i, max_psdu; u32 reg; struct rt2800_drv_data *drv_data = rt2x00dev->drv_data; for (i = 0; i < 3; i++) if (drv_data->ampdu_factor_cnt[i] > 0) break; max_psdu = min(drv_data->max_psdu, i); reg = rt2800_register_read(rt2x00dev, MAX_LEN_CFG); rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, max_psdu); rt2800_register_write(rt2x00dev, MAX_LEN_CFG, reg); } int rt2800_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct rt2x00_dev *rt2x00dev = hw->priv; struct rt2800_drv_data *drv_data = rt2x00dev->drv_data; struct rt2x00_sta *sta_priv = sta_to_rt2x00_sta(sta); int wcid; /* * Limit global maximum TX AMPDU length to smallest value of all * connected stations. In AP mode this can be suboptimal, but we * do not have a choice if some connected STA is not capable to * receive the same amount of data like the others. */ if (sta->deflink.ht_cap.ht_supported) { drv_data->ampdu_factor_cnt[sta->deflink.ht_cap.ampdu_factor & 3]++; rt2800_set_max_psdu_len(rt2x00dev); } /* * Search for the first free WCID entry and return the corresponding * index. */ wcid = find_first_zero_bit(drv_data->sta_ids, STA_IDS_SIZE) + WCID_START; /* * Store selected wcid even if it is invalid so that we can * later decide if the STA is uploaded into the hw. */ sta_priv->wcid = wcid; /* * No space left in the device, however, we can still communicate * with the STA -> No error. */ if (wcid > WCID_END) return 0; __set_bit(wcid - WCID_START, drv_data->sta_ids); drv_data->wcid_to_sta[wcid - WCID_START] = sta; /* * Clean up WCID attributes and write STA address to the device. */ rt2800_delete_wcid_attr(rt2x00dev, wcid); rt2800_config_wcid(rt2x00dev, sta->addr, wcid); rt2800_config_wcid_attr_bssidx(rt2x00dev, wcid, rt2x00lib_get_bssidx(rt2x00dev, vif)); return 0; } EXPORT_SYMBOL_GPL(rt2800_sta_add); int rt2800_sta_remove(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_sta *sta) { struct rt2x00_dev *rt2x00dev = hw->priv; struct rt2800_drv_data *drv_data = rt2x00dev->drv_data; struct rt2x00_sta *sta_priv = sta_to_rt2x00_sta(sta); int wcid = sta_priv->wcid; if (sta->deflink.ht_cap.ht_supported) { drv_data->ampdu_factor_cnt[sta->deflink.ht_cap.ampdu_factor & 3]--; rt2800_set_max_psdu_len(rt2x00dev); } if (wcid > WCID_END) return 0; /* * Remove WCID entry, no need to clean the attributes as they will * get renewed when the WCID is reused. */ rt2800_config_wcid(rt2x00dev, NULL, wcid); drv_data->wcid_to_sta[wcid - WCID_START] = NULL; __clear_bit(wcid - WCID_START, drv_data->sta_ids); return 0; } EXPORT_SYMBOL_GPL(rt2800_sta_remove); void rt2800_pre_reset_hw(struct rt2x00_dev *rt2x00dev) { struct rt2800_drv_data *drv_data = rt2x00dev->drv_data; struct data_queue *queue = rt2x00dev->bcn; struct queue_entry *entry; int i, wcid; for (wcid = WCID_START; wcid < WCID_END; wcid++) { drv_data->wcid_to_sta[wcid - WCID_START] = NULL; __clear_bit(wcid - WCID_START, drv_data->sta_ids); } for (i = 0; i < queue->limit; i++) { entry = &queue->entries[i]; clear_bit(ENTRY_BCN_ASSIGNED, &entry->flags); } } EXPORT_SYMBOL_GPL(rt2800_pre_reset_hw); void rt2800_config_filter(struct rt2x00_dev *rt2x00dev, const unsigned int filter_flags) { u32 reg; /* * Start configuration steps. * Note that the version error will always be dropped * and broadcast frames will always be accepted since * there is no filter for it at this time. */ reg = rt2800_register_read(rt2x00dev, RX_FILTER_CFG); rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_CRC_ERROR, !(filter_flags & FIF_FCSFAIL)); rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_PHY_ERROR, !(filter_flags & FIF_PLCPFAIL)); rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_NOT_TO_ME, !test_bit(CONFIG_MONITORING, &rt2x00dev->flags)); rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_NOT_MY_BSSD, 0); rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_VER_ERROR, 1); rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_MULTICAST, !(filter_flags & FIF_ALLMULTI)); rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_BROADCAST, 0); rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_DUPLICATE, 1); rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_CF_END_ACK, !(filter_flags & FIF_CONTROL)); rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_CF_END, !(filter_flags & FIF_CONTROL)); rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_ACK, !(filter_flags & FIF_CONTROL)); rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_CTS, !(filter_flags & FIF_CONTROL)); rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_RTS, !(filter_flags & FIF_CONTROL)); rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_PSPOLL, !(filter_flags & FIF_PSPOLL)); rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_BA, 0); rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_BAR, !(filter_flags & FIF_CONTROL)); rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_CNTL, !(filter_flags & FIF_CONTROL)); rt2800_register_write(rt2x00dev, RX_FILTER_CFG, reg); } EXPORT_SYMBOL_GPL(rt2800_config_filter); void rt2800_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00_intf *intf, struct rt2x00intf_conf *conf, const unsigned int flags) { u32 reg; bool update_bssid = false; if (flags & CONFIG_UPDATE_TYPE) { /* * Enable synchronisation. */ reg = rt2800_register_read(rt2x00dev, BCN_TIME_CFG); rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_SYNC, conf->sync); rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg); if (conf->sync == TSF_SYNC_AP_NONE) { /* * Tune beacon queue transmit parameters for AP mode */ reg = rt2800_register_read(rt2x00dev, TBTT_SYNC_CFG); rt2x00_set_field32(&reg, TBTT_SYNC_CFG_BCN_CWMIN, 0); rt2x00_set_field32(&reg, TBTT_SYNC_CFG_BCN_AIFSN, 1); rt2x00_set_field32(&reg, TBTT_SYNC_CFG_BCN_EXP_WIN, 32); rt2x00_set_field32(&reg, TBTT_SYNC_CFG_TBTT_ADJUST, 0); rt2800_register_write(rt2x00dev, TBTT_SYNC_CFG, reg); } else { reg = rt2800_register_read(rt2x00dev, TBTT_SYNC_CFG); rt2x00_set_field32(&reg, TBTT_SYNC_CFG_BCN_CWMIN, 4); rt2x00_set_field32(&reg, TBTT_SYNC_CFG_BCN_AIFSN, 2); rt2x00_set_field32(&reg, TBTT_SYNC_CFG_BCN_EXP_WIN, 32); rt2x00_set_field32(&reg, TBTT_SYNC_CFG_TBTT_ADJUST, 16); rt2800_register_write(rt2x00dev, TBTT_SYNC_CFG, reg); } } if (flags & CONFIG_UPDATE_MAC) { if (flags & CONFIG_UPDATE_TYPE && conf->sync == TSF_SYNC_AP_NONE) { /* * The BSSID register has to be set to our own mac * address in AP mode. */ memcpy(conf->bssid, conf->mac, sizeof(conf->mac)); update_bssid = true; } if (!is_zero_ether_addr((const u8 *)conf->mac)) { reg = le32_to_cpu(conf->mac[1]); rt2x00_set_field32(&reg, MAC_ADDR_DW1_UNICAST_TO_ME_MASK, 0xff); conf->mac[1] = cpu_to_le32(reg); } rt2800_register_multiwrite(rt2x00dev, MAC_ADDR_DW0, conf->mac, sizeof(conf->mac)); } if ((flags & CONFIG_UPDATE_BSSID) || update_bssid) { if (!is_zero_ether_addr((const u8 *)conf->bssid)) { reg = le32_to_cpu(conf->bssid[1]); rt2x00_set_field32(&reg, MAC_BSSID_DW1_BSS_ID_MASK, 3); rt2x00_set_field32(&reg, MAC_BSSID_DW1_BSS_BCN_NUM, 0); conf->bssid[1] = cpu_to_le32(reg); } rt2800_register_multiwrite(rt2x00dev, MAC_BSSID_DW0, conf->bssid, sizeof(conf->bssid)); } } EXPORT_SYMBOL_GPL(rt2800_config_intf); static void rt2800_config_ht_opmode(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_erp *erp) { bool any_sta_nongf = !!(erp->ht_opmode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT); u8 protection = erp->ht_opmode & IEEE80211_HT_OP_MODE_PROTECTION; u8 mm20_mode, mm40_mode, gf20_mode, gf40_mode; u16 mm20_rate, mm40_rate, gf20_rate, gf40_rate; u32 reg; /* default protection rate for HT20: OFDM 24M */ mm20_rate = gf20_rate = 0x4004; /* default protection rate for HT40: duplicate OFDM 24M */ mm40_rate = gf40_rate = 0x4084; switch (protection) { case IEEE80211_HT_OP_MODE_PROTECTION_NONE: /* * All STAs in this BSS are HT20/40 but there might be * STAs not supporting greenfield mode. * => Disable protection for HT transmissions. */ mm20_mode = mm40_mode = gf20_mode = gf40_mode = 0; break; case IEEE80211_HT_OP_MODE_PROTECTION_20MHZ: /* * All STAs in this BSS are HT20 or HT20/40 but there * might be STAs not supporting greenfield mode. * => Protect all HT40 transmissions. */ mm20_mode = gf20_mode = 0; mm40_mode = gf40_mode = 1; break; case IEEE80211_HT_OP_MODE_PROTECTION_NONMEMBER: /* * Nonmember protection: * According to 802.11n we _should_ protect all * HT transmissions (but we don't have to). * * But if cts_protection is enabled we _shall_ protect * all HT transmissions using a CCK rate. * * And if any station is non GF we _shall_ protect * GF transmissions. * * We decide to protect everything * -> fall through to mixed mode. */ case IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED: /* * Legacy STAs are present * => Protect all HT transmissions. */ mm20_mode = mm40_mode = gf20_mode = gf40_mode = 1; /* * If erp protection is needed we have to protect HT * transmissions with CCK 11M long preamble. */ if (erp->cts_protection) { /* don't duplicate RTS/CTS in CCK mode */ mm20_rate = mm40_rate = 0x0003; gf20_rate = gf40_rate = 0x0003; } break; } /* check for STAs not supporting greenfield mode */ if (any_sta_nongf) gf20_mode = gf40_mode = 1; /* Update HT protection config */ reg = rt2800_register_read(rt2x00dev, MM20_PROT_CFG); rt2x00_set_field32(&reg, MM20_PROT_CFG_PROTECT_RATE, mm20_rate); rt2x00_set_field32(&reg, MM20_PROT_CFG_PROTECT_CTRL, mm20_mode); rt2800_register_write(rt2x00dev, MM20_PROT_CFG, reg); reg = rt2800_register_read(rt2x00dev, MM40_PROT_CFG); rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_RATE, mm40_rate); rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_CTRL, mm40_mode); rt2800_register_write(rt2x00dev, MM40_PROT_CFG, reg); reg = rt2800_register_read(rt2x00dev, GF20_PROT_CFG); rt2x00_set_field32(&reg, GF20_PROT_CFG_PROTECT_RATE, gf20_rate); rt2x00_set_field32(&reg, GF20_PROT_CFG_PROTECT_CTRL, gf20_mode); rt2800_register_write(rt2x00dev, GF20_PROT_CFG, reg); reg = rt2800_register_read(rt2x00dev, GF40_PROT_CFG); rt2x00_set_field32(&reg, GF40_PROT_CFG_PROTECT_RATE, gf40_rate); rt2x00_set_field32(&reg, GF40_PROT_CFG_PROTECT_CTRL, gf40_mode); rt2800_register_write(rt2x00dev, GF40_PROT_CFG, reg); } void rt2800_config_erp(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_erp *erp, u32 changed) { u32 reg; if (changed & BSS_CHANGED_ERP_PREAMBLE) { reg = rt2800_register_read(rt2x00dev, AUTO_RSP_CFG); rt2x00_set_field32(&reg, AUTO_RSP_CFG_AR_PREAMBLE, !!erp->short_preamble); rt2800_register_write(rt2x00dev, AUTO_RSP_CFG, reg); } if (changed & BSS_CHANGED_ERP_CTS_PROT) { reg = rt2800_register_read(rt2x00dev, OFDM_PROT_CFG); rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_CTRL, erp->cts_protection ? 2 : 0); rt2800_register_write(rt2x00dev, OFDM_PROT_CFG, reg); } if (changed & BSS_CHANGED_BASIC_RATES) { rt2800_register_write(rt2x00dev, LEGACY_BASIC_RATE, 0xff0 | erp->basic_rates); rt2800_register_write(rt2x00dev, HT_BASIC_RATE, 0x00008003); } if (changed & BSS_CHANGED_ERP_SLOT) { reg = rt2800_register_read(rt2x00dev, BKOFF_SLOT_CFG); rt2x00_set_field32(&reg, BKOFF_SLOT_CFG_SLOT_TIME, erp->slot_time); rt2800_register_write(rt2x00dev, BKOFF_SLOT_CFG, reg); reg = rt2800_register_read(rt2x00dev, XIFS_TIME_CFG); rt2x00_set_field32(&reg, XIFS_TIME_CFG_EIFS, erp->eifs); rt2800_register_write(rt2x00dev, XIFS_TIME_CFG, reg); } if (changed & BSS_CHANGED_BEACON_INT) { reg = rt2800_register_read(rt2x00dev, BCN_TIME_CFG); rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL, erp->beacon_int * 16); rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg); } if (changed & BSS_CHANGED_HT) rt2800_config_ht_opmode(rt2x00dev, erp); } EXPORT_SYMBOL_GPL(rt2800_config_erp); static int rt2800_wait_bbp_rf_ready(struct rt2x00_dev *rt2x00dev, const struct rt2x00_field32 mask) { unsigned int i; u32 reg; for (i = 0; i < REGISTER_BUSY_COUNT; i++) { reg = rt2800_register_read(rt2x00dev, MAC_STATUS_CFG); if (!rt2x00_get_field32(reg, mask)) return 0; udelay(REGISTER_BUSY_DELAY); } rt2x00_err(rt2x00dev, "BBP/RF register access failed, aborting\n"); return -EACCES; } static int rt2800_wait_bbp_ready(struct rt2x00_dev *rt2x00dev) { unsigned int i; u8 value; /* * BBP was enabled after firmware was loaded, * but we need to reactivate it now. */ rt2800_register_write(rt2x00dev, H2M_BBP_AGENT, 0); rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0); msleep(1); for (i = 0; i < REGISTER_BUSY_COUNT; i++) { value = rt2800_bbp_read(rt2x00dev, 0); if ((value != 0xff) && (value != 0x00)) return 0; udelay(REGISTER_BUSY_DELAY); } rt2x00_err(rt2x00dev, "BBP register access failed, aborting\n"); return -EACCES; } static void rt2800_config_3572bt_ant(struct rt2x00_dev *rt2x00dev) { u32 reg; u16 eeprom; u8 led_ctrl, led_g_mode, led_r_mode; reg = rt2800_register_read(rt2x00dev, GPIO_SWITCH); if (rt2x00dev->curr_band == NL80211_BAND_5GHZ) { rt2x00_set_field32(&reg, GPIO_SWITCH_0, 1); rt2x00_set_field32(&reg, GPIO_SWITCH_1, 1); } else { rt2x00_set_field32(&reg, GPIO_SWITCH_0, 0); rt2x00_set_field32(&reg, GPIO_SWITCH_1, 0); } rt2800_register_write(rt2x00dev, GPIO_SWITCH, reg); reg = rt2800_register_read(rt2x00dev, LED_CFG); led_g_mode = rt2x00_get_field32(reg, LED_CFG_LED_POLAR) ? 3 : 0; led_r_mode = rt2x00_get_field32(reg, LED_CFG_LED_POLAR) ? 0 : 3; if (led_g_mode != rt2x00_get_field32(reg, LED_CFG_G_LED_MODE) || led_r_mode != rt2x00_get_field32(reg, LED_CFG_R_LED_MODE)) { eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_FREQ); led_ctrl = rt2x00_get_field16(eeprom, EEPROM_FREQ_LED_MODE); if (led_ctrl == 0 || led_ctrl > 0x40) { rt2x00_set_field32(&reg, LED_CFG_G_LED_MODE, led_g_mode); rt2x00_set_field32(&reg, LED_CFG_R_LED_MODE, led_r_mode); rt2800_register_write(rt2x00dev, LED_CFG, reg); } else { rt2800_mcu_request(rt2x00dev, MCU_BAND_SELECT, 0xff, (led_g_mode << 2) | led_r_mode, 1); } } } static void rt2800_set_ant_diversity(struct rt2x00_dev *rt2x00dev, enum antenna ant) { u32 reg; u8 eesk_pin = (ant == ANTENNA_A) ? 1 : 0; u8 gpio_bit3 = (ant == ANTENNA_A) ? 0 : 1; if (rt2x00_is_pci(rt2x00dev)) { reg = rt2800_register_read(rt2x00dev, E2PROM_CSR); rt2x00_set_field32(&reg, E2PROM_CSR_DATA_CLOCK, eesk_pin); rt2800_register_write(rt2x00dev, E2PROM_CSR, reg); } else if (rt2x00_is_usb(rt2x00dev)) rt2800_mcu_request(rt2x00dev, MCU_ANT_SELECT, 0xff, eesk_pin, 0); reg = rt2800_register_read(rt2x00dev, GPIO_CTRL); rt2x00_set_field32(&reg, GPIO_CTRL_DIR3, 0); rt2x00_set_field32(&reg, GPIO_CTRL_VAL3, gpio_bit3); rt2800_register_write(rt2x00dev, GPIO_CTRL, reg); } void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant) { u8 r1; u8 r3; u16 eeprom; r1 = rt2800_bbp_read(rt2x00dev, 1); r3 = rt2800_bbp_read(rt2x00dev, 3); if (rt2x00_rt(rt2x00dev, RT3572) && rt2x00_has_cap_bt_coexist(rt2x00dev)) rt2800_config_3572bt_ant(rt2x00dev); /* * Configure the TX antenna. */ switch (ant->tx_chain_num) { case 1: rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 0); break; case 2: if (rt2x00_rt(rt2x00dev, RT3572) && rt2x00_has_cap_bt_coexist(rt2x00dev)) rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 1); else rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 2); break; case 3: rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 2); break; } /* * Configure the RX antenna. */ switch (ant->rx_chain_num) { case 1: if (rt2x00_rt(rt2x00dev, RT3070) || rt2x00_rt(rt2x00dev, RT3090) || rt2x00_rt(rt2x00dev, RT3352) || rt2x00_rt(rt2x00dev, RT3390)) { eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1); if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_ANT_DIVERSITY)) rt2800_set_ant_diversity(rt2x00dev, rt2x00dev->default_ant.rx); } rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 0); break; case 2: if (rt2x00_rt(rt2x00dev, RT3572) && rt2x00_has_cap_bt_coexist(rt2x00dev)) { rt2x00_set_field8(&r3, BBP3_RX_ADC, 1); rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, rt2x00dev->curr_band == NL80211_BAND_5GHZ); rt2800_set_ant_diversity(rt2x00dev, ANTENNA_B); } else { rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 1); } break; case 3: rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 2); break; } rt2800_bbp_write(rt2x00dev, 3, r3); rt2800_bbp_write(rt2x00dev, 1, r1); if (rt2x00_rt(rt2x00dev, RT3593) || rt2x00_rt(rt2x00dev, RT3883)) { if (ant->rx_chain_num == 1) rt2800_bbp_write(rt2x00dev, 86, 0x00); else rt2800_bbp_write(rt2x00dev, 86, 0x46); } } EXPORT_SYMBOL_GPL(rt2800_config_ant); static void rt2800_config_lna_gain(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_conf *libconf) { u16 eeprom; short lna_gain; if (libconf->rf.channel <= 14) { eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_LNA); lna_gain = rt2x00_get_field16(eeprom, EEPROM_LNA_BG); } else if (libconf->rf.channel <= 64) { eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_LNA); lna_gain = rt2x00_get_field16(eeprom, EEPROM_LNA_A0); } else if (libconf->rf.channel <= 128) { if (rt2x00_rt(rt2x00dev, RT3593) || rt2x00_rt(rt2x00dev, RT3883)) { eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_EXT_LNA2); lna_gain = rt2x00_get_field16(eeprom, EEPROM_EXT_LNA2_A1); } else { eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2); lna_gain = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG2_LNA_A1); } } else { if (rt2x00_rt(rt2x00dev, RT3593) || rt2x00_rt(rt2x00dev, RT3883)) { eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_EXT_LNA2); lna_gain = rt2x00_get_field16(eeprom, EEPROM_EXT_LNA2_A2); } else { eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_A2); lna_gain = rt2x00_get_field16(eeprom, EEPROM_RSSI_A2_LNA_A2); } } rt2x00dev->lna_gain = lna_gain; } static inline bool rt2800_clk_is_20mhz(struct rt2x00_dev *rt2x00dev) { return clk_get_rate(rt2x00dev->clk) == 20000000; } #define FREQ_OFFSET_BOUND 0x5f static void rt2800_freq_cal_mode1(struct rt2x00_dev *rt2x00dev) { u8 freq_offset, prev_freq_offset; u8 rfcsr, prev_rfcsr; freq_offset = rt2x00_get_field8(rt2x00dev->freq_offset, RFCSR17_CODE); freq_offset = min_t(u8, freq_offset, FREQ_OFFSET_BOUND); rfcsr = rt2800_rfcsr_read(rt2x00dev, 17); prev_rfcsr = rfcsr; rt2x00_set_field8(&rfcsr, RFCSR17_CODE, freq_offset); if (rfcsr == prev_rfcsr) return; if (rt2x00_is_usb(rt2x00dev)) { rt2800_mcu_request(rt2x00dev, MCU_FREQ_OFFSET, 0xff, freq_offset, prev_rfcsr); return; } prev_freq_offset = rt2x00_get_field8(prev_rfcsr, RFCSR17_CODE); while (prev_freq_offset != freq_offset) { if (prev_freq_offset < freq_offset) prev_freq_offset++; else prev_freq_offset--; rt2x00_set_field8(&rfcsr, RFCSR17_CODE, prev_freq_offset); rt2800_rfcsr_write(rt2x00dev, 17, rfcsr); usleep_range(1000, 1500); } } static void rt2800_config_channel_rf2xxx(struct rt2x00_dev *rt2x00dev, struct ieee80211_conf *conf, struct rf_channel *rf, struct channel_info *info) { rt2x00_set_field32(&rf->rf4, RF4_FREQ_OFFSET, rt2x00dev->freq_offset); if (rt2x00dev->default_ant.tx_chain_num == 1) rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_TX1, 1); if (rt2x00dev->default_ant.rx_chain_num == 1) { rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_RX1, 1); rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_RX2, 1); } else if (rt2x00dev->default_ant.rx_chain_num == 2) rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_RX2, 1); if (rf->channel > 14) { /* * When TX power is below 0, we should increase it by 7 to * make it a positive value (Minimum value is -7). * However this means that values between 0 and 7 have * double meaning, and we should set a 7DBm boost flag. */ rt2x00_set_field32(&rf->rf3, RF3_TXPOWER_A_7DBM_BOOST, (info->default_power1 >= 0)); if (info->default_power1 < 0) info->default_power1 += 7; rt2x00_set_field32(&rf->rf3, RF3_TXPOWER_A, info->default_power1); rt2x00_set_field32(&rf->rf4, RF4_TXPOWER_A_7DBM_BOOST, (info->default_power2 >= 0)); if (info->default_power2 < 0) info->default_power2 += 7; rt2x00_set_field32(&rf->rf4, RF4_TXPOWER_A, info->default_power2); } else { rt2x00_set_field32(&rf->rf3, RF3_TXPOWER_G, info->default_power1); rt2x00_set_field32(&rf->rf4, RF4_TXPOWER_G, info->default_power2); } rt2x00_set_field32(&rf->rf4, RF4_HT40, conf_is_ht40(conf)); rt2800_rf_write(rt2x00dev, 1, rf->rf1); rt2800_rf_write(rt2x00dev, 2, rf->rf2); rt2800_rf_write(rt2x00dev, 3, rf->rf3 & ~0x00000004); rt2800_rf_write(rt2x00dev, 4, rf->rf4); udelay(200); rt2800_rf_write(rt2x00dev, 1, rf->rf1); rt2800_rf_write(rt2x00dev, 2, rf->rf2); rt2800_rf_write(rt2x00dev, 3, rf->rf3 | 0x00000004); rt2800_rf_write(rt2x00dev, 4, rf->rf4); udelay(200); rt2800_rf_write(rt2x00dev, 1, rf->rf1); rt2800_rf_write(rt2x00dev, 2, rf->rf2); rt2800_rf_write(rt2x00dev, 3, rf->rf3 & ~0x00000004); rt2800_rf_write(rt2x00dev, 4, rf->rf4); } static void rt2800_config_channel_rf3xxx(struct rt2x00_dev *rt2x00dev, struct ieee80211_conf *conf, struct rf_channel *rf, struct channel_info *info) { struct rt2800_drv_data *drv_data = rt2x00dev->drv_data; u8 rfcsr, calib_tx, calib_rx; rt2800_rfcsr_write(rt2x00dev, 2, rf->rf1); rfcsr = rt2800_rfcsr_read(rt2x00dev, 3); rt2x00_set_field8(&rfcsr, RFCSR3_K, rf->rf3); rt2800_rfcsr_write(rt2x00dev, 3, rfcsr); rfcsr = rt2800_rfcsr_read(rt2x00dev, 6); rt2x00_set_field8(&rfcsr, RFCSR6_R1, rf->rf2); rt2800_rfcsr_write(rt2x00dev, 6, rfcsr); rfcsr = rt2800_rfcsr_read(rt2x00dev, 12); rt2x00_set_field8(&rfcsr, RFCSR12_TX_POWER, info->default_power1); rt2800_rfcsr_write(rt2x00dev, 12, rfcsr); rfcsr = rt2800_rfcsr_read(rt2x00dev, 13); rt2x00_set_field8(&rfcsr, RFCSR13_TX_POWER, info->default_power2); rt2800_rfcsr_write(rt2x00dev, 13, rfcsr); rfcsr = rt2800_rfcsr_read(rt2x00dev, 1); rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 0); rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, rt2x00dev->default_ant.rx_chain_num <= 1); rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, rt2x00dev->default_ant.rx_chain_num <= 2); rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 0); rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, rt2x00dev->default_ant.tx_chain_num <= 1); rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, rt2x00dev->default_ant.tx_chain_num <= 2); rt2800_rfcsr_write(rt2x00dev, 1, rfcsr); rfcsr = rt2800_rfcsr_read(rt2x00dev, 23); rt2x00_set_field8(&rfcsr, RFCSR23_FREQ_OFFSET, rt2x00dev->freq_offset); rt2800_rfcsr_write(rt2x00dev, 23, rfcsr); if (rt2x00_rt(rt2x00dev, RT3390)) { calib_tx = conf_is_ht40(conf) ? 0x68 : 0x4f; calib_rx = conf_is_ht40(conf) ? 0x6f : 0x4f; } else { if (conf_is_ht40(conf)) { calib_tx = drv_data->calibration_bw40; calib_rx = drv_data->calibration_bw40; } else { calib_tx = drv_data->calibration_bw20; calib_rx = drv_data->calibration_bw20; } } rfcsr = rt2800_rfcsr_read(rt2x00dev, 24); rt2x00_set_field8(&rfcsr, RFCSR24_TX_CALIB, calib_tx); rt2800_rfcsr_write(rt2x00dev, 24, rfcsr); rfcsr = rt2800_rfcsr_read(rt2x00dev, 31); rt2x00_set_field8(&rfcsr, RFCSR31_RX_CALIB, calib_rx); rt2800_rfcsr_write(rt2x00dev, 31, rfcsr); rfcsr = rt2800_rfcsr_read(rt2x00dev, 7); rt2x00_set_field8(&rfcsr, RFCSR7_RF_TUNING, 1); rt2800_rfcsr_write(rt2x00dev, 7, rfcsr); rfcsr = rt2800_rfcsr_read(rt2x00dev, 30); rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1); rt2800_rfcsr_write(rt2x00dev, 30, rfcsr); usleep_range(1000, 1500); rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 0); rt2800_rfcsr_write(rt2x00dev, 30, rfcsr); } static void rt2800_config_channel_rf3052(struct rt2x00_dev *rt2x00dev, struct ieee80211_conf *conf, struct rf_channel *rf, struct channel_info *info) { struct rt2800_drv_data *drv_data = rt2x00dev->drv_data; u8 rfcsr; u32 reg; if (rf->channel <= 14) { rt2800_bbp_write(rt2x00dev, 25, drv_data->bbp25); rt2800_bbp_write(rt2x00dev, 26, drv_data->bbp26); } else { rt2800_bbp_write(rt2x00dev, 25, 0x09); rt2800_bbp_write(rt2x00dev, 26, 0xff); } rt2800_rfcsr_write(rt2x00dev, 2, rf->rf1); rt2800_rfcsr_write(rt2x00dev, 3, rf->rf3); rfcsr = rt2800_rfcsr_read(rt2x00dev, 6); rt2x00_set_field8(&rfcsr, RFCSR6_R1, rf->rf2); if (rf->channel <= 14) rt2x00_set_field8(&rfcsr, RFCSR6_TXDIV, 2); else rt2x00_set_field8(&rfcsr, RFCSR6_TXDIV, 1); rt2800_rfcsr_write(rt2x00dev, 6, rfcsr); rfcsr = rt2800_rfcsr_read(rt2x00dev, 5); if (rf->channel <= 14) rt2x00_set_field8(&rfcsr, RFCSR5_R1, 1); else rt2x00_set_field8(&rfcsr, RFCSR5_R1, 2); rt2800_rfcsr_write(rt2x00dev, 5, rfcsr); rfcsr = rt2800_rfcsr_read(rt2x00dev, 12); if (rf->channel <= 14) { rt2x00_set_field8(&rfcsr, RFCSR12_DR0, 3); rt2x00_set_field8(&rfcsr, RFCSR12_TX_POWER, info->default_power1); } else { rt2x00_set_field8(&rfcsr, RFCSR12_DR0, 7); rt2x00_set_field8(&rfcsr, RFCSR12_TX_POWER, (info->default_power1 & 0x3) | ((info->default_power1 & 0xC) << 1)); } rt2800_rfcsr_write(rt2x00dev, 12, rfcsr); rfcsr = rt2800_rfcsr_read(rt2x00dev, 13); if (rf->channel <= 14) { rt2x00_set_field8(&rfcsr, RFCSR13_DR0, 3); rt2x00_set_field8(&rfcsr, RFCSR13_TX_POWER, info->default_power2); } else { rt2x00_set_field8(&rfcsr, RFCSR13_DR0, 7); rt2x00_set_field8(&rfcsr, RFCSR13_TX_POWER, (info->default_power2 & 0x3) | ((info->default_power2 & 0xC) << 1)); } rt2800_rfcsr_write(rt2x00dev, 13, rfcsr); rfcsr = rt2800_rfcsr_read(rt2x00dev, 1); rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 0); rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 0); rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 0); rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 0); rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 0); rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 0); if (rt2x00_has_cap_bt_coexist(rt2x00dev)) { if (rf->channel <= 14) { rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 1); rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 1); } rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 1); rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 1); } else { switch (rt2x00dev->default_ant.tx_chain_num) { case 1: rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 1); fallthrough; case 2: rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 1); break; } switch (rt2x00dev->default_ant.rx_chain_num) { case 1: rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 1); fallthrough; case 2: rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 1); break; } } rt2800_rfcsr_write(rt2x00dev, 1, rfcsr); rfcsr = rt2800_rfcsr_read(rt2x00dev, 23); rt2x00_set_field8(&rfcsr, RFCSR23_FREQ_OFFSET, rt2x00dev->freq_offset); rt2800_rfcsr_write(rt2x00dev, 23, rfcsr); if (conf_is_ht40(conf)) { rt2800_rfcsr_write(rt2x00dev, 24, drv_data->calibration_bw40); rt2800_rfcsr_write(rt2x00dev, 31, drv_data->calibration_bw40); } else { rt2800_rfcsr_write(rt2x00dev, 24, drv_data->calibration_bw20); rt2800_rfcsr_write(rt2x00dev, 31, drv_data->calibration_bw20); } if (rf->channel <= 14) { rt2800_rfcsr_write(rt2x00dev, 7, 0xd8); rt2800_rfcsr_write(rt2x00dev, 9, 0xc3); rt2800_rfcsr_write(rt2x00dev, 10, 0xf1); rt2800_rfcsr_write(rt2x00dev, 11, 0xb9); rt2800_rfcsr_write(rt2x00dev, 15, 0x53); rfcsr = 0x4c; rt2x00_set_field8(&rfcsr, RFCSR16_TXMIXER_GAIN, drv_data->txmixer_gain_24g); rt2800_rfcsr_write(rt2x00dev, 16, rfcsr); rt2800_rfcsr_write(rt2x00dev, 17, 0x23); rt2800_rfcsr_write(rt2x00dev, 19, 0x93); rt2800_rfcsr_write(rt2x00dev, 20, 0xb3); rt2800_rfcsr_write(rt2x00dev, 25, 0x15); rt2800_rfcsr_write(rt2x00dev, 26, 0x85); rt2800_rfcsr_write(rt2x00dev, 27, 0x00); rt2800_rfcsr_write(rt2x00dev, 29, 0x9b); } else { rfcsr = rt2800_rfcsr_read(rt2x00dev, 7); rt2x00_set_field8(&rfcsr, RFCSR7_BIT2, 1); rt2x00_set_field8(&rfcsr, RFCSR7_BIT3, 0); rt2x00_set_field8(&rfcsr, RFCSR7_BIT4, 1); rt2x00_set_field8(&rfcsr, RFCSR7_BITS67, 0); rt2800_rfcsr_write(rt2x00dev, 7, rfcsr); rt2800_rfcsr_write(rt2x00dev, 9, 0xc0); rt2800_rfcsr_write(rt2x00dev, 10, 0xf1); rt2800_rfcsr_write(rt2x00dev, 11, 0x00); rt2800_rfcsr_write(rt2x00dev, 15, 0x43); rfcsr = 0x7a; rt2x00_set_field8(&rfcsr, RFCSR16_TXMIXER_GAIN, drv_data->txmixer_gain_5g); rt2800_rfcsr_write(rt2x00dev, 16, rfcsr); rt2800_rfcsr_write(rt2x00dev, 17, 0x23); if (rf->channel <= 64) { rt2800_rfcsr_write(rt2x00dev, 19, 0xb7); rt2800_rfcsr_write(rt2x00dev, 20, 0xf6); rt2800_rfcsr_write(rt2x00dev, 25, 0x3d); } else if (rf->channel <= 128) { rt2800_rfcsr_write(rt2x00dev, 19, 0x74); rt2800_rfcsr_write(rt2x00dev, 20, 0xf4); rt2800_rfcsr_write(rt2x00dev, 25, 0x01); } else { rt2800_rfcsr_write(rt2x00dev, 19, 0x72); rt2800_rfcsr_write(rt2x00dev, 20, 0xf3); rt2800_rfcsr_write(rt2x00dev, 25, 0x01); } rt2800_rfcsr_write(rt2x00dev, 26, 0x87); rt2800_rfcsr_write(rt2x00dev, 27, 0x01); rt2800_rfcsr_write(rt2x00dev, 29, 0x9f); } reg = rt2800_register_read(rt2x00dev, GPIO_CTRL); rt2x00_set_field32(&reg, GPIO_CTRL_DIR7, 0); if (rf->channel <= 14) rt2x00_set_field32(&reg, GPIO_CTRL_VAL7, 1); else rt2x00_set_field32(&reg, GPIO_CTRL_VAL7, 0); rt2800_register_write(rt2x00dev, GPIO_CTRL, reg); rfcsr = rt2800_rfcsr_read(rt2x00dev, 7); rt2x00_set_field8(&rfcsr, RFCSR7_RF_TUNING, 1); rt2800_rfcsr_write(rt2x00dev, 7, rfcsr); } static void rt2800_config_channel_rf3053(struct rt2x00_dev *rt2x00dev, struct ieee80211_conf *conf, struct rf_channel *rf, struct channel_info *info) { struct rt2800_drv_data *drv_data = rt2x00dev->drv_data; u8 txrx_agc_fc; u8 txrx_h20m; u8 rfcsr; u8 bbp; const bool txbf_enabled = false; /* TODO */ /* TODO: use TX{0,1,2}FinePowerControl values from EEPROM */ bbp = rt2800_bbp_read(rt2x00dev, 109); rt2x00_set_field8(&bbp, BBP109_TX0_POWER, 0); rt2x00_set_field8(&bbp, BBP109_TX1_POWER, 0); rt2800_bbp_write(rt2x00dev, 109, bbp); bbp = rt2800_bbp_read(rt2x00dev, 110); rt2x00_set_field8(&bbp, BBP110_TX2_POWER, 0); rt2800_bbp_write(rt2x00dev, 110, bbp); if (rf->channel <= 14) { /* Restore BBP 25 & 26 for 2.4 GHz */ rt2800_bbp_write(rt2x00dev, 25, drv_data->bbp25); rt2800_bbp_write(rt2x00dev, 26, drv_data->bbp26); } else { /* Hard code BBP 25 & 26 for 5GHz */ /* Enable IQ Phase correction */ rt2800_bbp_write(rt2x00dev, 25, 0x09); /* Setup IQ Phase correction value */ rt2800_bbp_write(rt2x00dev, 26, 0xff); } rt2800_rfcsr_write(rt2x00dev, 8, rf->rf1); rt2800_rfcsr_write(rt2x00dev, 9, rf->rf3 & 0xf); rfcsr = rt2800_rfcsr_read(rt2x00dev, 11); rt2x00_set_field8(&rfcsr, RFCSR11_R, (rf->rf2 & 0x3)); rt2800_rfcsr_write(rt2x00dev, 11, rfcsr); rfcsr = rt2800_rfcsr_read(rt2x00dev, 11); rt2x00_set_field8(&rfcsr, RFCSR11_PLL_IDOH, 1); if (rf->channel <= 14) rt2x00_set_field8(&rfcsr, RFCSR11_PLL_MOD, 1); else rt2x00_set_field8(&rfcsr, RFCSR11_PLL_MOD, 2); rt2800_rfcsr_write(rt2x00dev, 11, rfcsr); rfcsr = rt2800_rfcsr_read(rt2x00dev, 53); if (rf->channel <= 14) { rfcsr = 0; rt2x00_set_field8(&rfcsr, RFCSR53_TX_POWER, info->default_power1 & 0x1f); } else { if (rt2x00_is_usb(rt2x00dev)) rfcsr = 0x40; rt2x00_set_field8(&rfcsr, RFCSR53_TX_POWER, ((info->default_power1 & 0x18) << 1) | (info->default_power1 & 7)); } rt2800_rfcsr_write(rt2x00dev, 53, rfcsr); rfcsr = rt2800_rfcsr_read(rt2x00dev, 55); if (rf->channel <= 14) { rfcsr = 0; rt2x00_set_field8(&rfcsr, RFCSR55_TX_POWER, info->default_power2 & 0x1f); } else { if (rt2x00_is_usb(rt2x00dev)) rfcsr = 0x40; rt2x00_set_field8(&rfcsr, RFCSR55_TX_POWER, ((info->default_power2 & 0x18) << 1) | (info->default_power2 & 7)); } rt2800_rfcsr_write(rt2x00dev, 55, rfcsr); rfcsr = rt2800_rfcsr_read(rt2x00dev, 54); if (rf->channel <= 14) { rfcsr = 0; rt2x00_set_field8(&rfcsr, RFCSR54_TX_POWER, info->default_power3 & 0x1f); } else { if (rt2x00_is_usb(rt2x00dev)) rfcsr = 0x40; rt2x00_set_field8(&rfcsr, RFCSR54_TX_POWER, ((info->default_power3 & 0x18) << 1) | (info->default_power3 & 7)); } rt2800_rfcsr_write(rt2x00dev, 54, rfcsr); rfcsr = rt2800_rfcsr_read(rt2x00dev, 1); rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 0); rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 0); rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 0); rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 0); rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 0); rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 0); rt2x00_set_field8(&rfcsr, RFCSR1_RF_BLOCK_EN, 1); rt2x00_set_field8(&rfcsr, RFCSR1_PLL_PD, 1); switch (rt2x00dev->default_ant.tx_chain_num) { case 3: rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 1); fallthrough; case 2: rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 1); fallthrough; case 1: rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 1); break; } switch (rt2x00dev->default_ant.rx_chain_num) { case 3: rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 1); fallthrough; case 2: rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 1); fallthrough; case 1: rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 1); break; } rt2800_rfcsr_write(rt2x00dev, 1, rfcsr); rt2800_freq_cal_mode1(rt2x00dev); if (conf_is_ht40(conf)) { txrx_agc_fc = rt2x00_get_field8(drv_data->calibration_bw40, RFCSR24_TX_AGC_FC); txrx_h20m = rt2x00_get_field8(drv_data->calibration_bw40, RFCSR24_TX_H20M); } else { txrx_agc_fc = rt2x00_get_field8(drv_data->calibration_bw20, RFCSR24_TX_AGC_FC); txrx_h20m = rt2x00_get_field8(drv_data->calibration_bw20, RFCSR24_TX_H20M); } /* NOTE: the reference driver does not writes the new value * back to RFCSR 32 */ rfcsr = rt2800_rfcsr_read(rt2x00dev, 32); rt2x00_set_field8(&rfcsr, RFCSR32_TX_AGC_FC, txrx_agc_fc); if (rf->channel <= 14) rfcsr = 0xa0; else rfcsr = 0x80; rt2800_rfcsr_write(rt2x00dev, 31, rfcsr); rfcsr = rt2800_rfcsr_read(rt2x00dev, 30); rt2x00_set_field8(&rfcsr, RFCSR30_TX_H20M, txrx_h20m); rt2x00_set_field8(&rfcsr, RFCSR30_RX_H20M, txrx_h20m); rt2800_rfcsr_write(rt2x00dev, 30, rfcsr); /* Band selection */ rfcsr = rt2800_rfcsr_read(rt2x00dev, 36); if (rf->channel <= 14) rt2x00_set_field8(&rfcsr, RFCSR36_RF_BS, 1); else rt2x00_set_field8(&rfcsr, RFCSR36_RF_BS, 0); rt2800_rfcsr_write(rt2x00dev, 36, rfcsr); rfcsr = rt2800_rfcsr_read(rt2x00dev, 34); if (rf->channel <= 14) rfcsr = 0x3c; else rfcsr = 0x20; rt2800_rfcsr_write(rt2x00dev, 34, rfcsr); rfcsr = rt2800_rfcsr_read(rt2x00dev, 12); if (rf->channel <= 14) rfcsr = 0x1a; else rfcsr = 0x12; rt2800_rfcsr_write(rt2x00dev, 12, rfcsr); rfcsr = rt2800_rfcsr_read(rt2x00dev, 6); if (rf->channel >= 1 && rf->channel <= 14) rt2x00_set_field8(&rfcsr, RFCSR6_VCO_IC, 1); else if (rf->channel >= 36 && rf->channel <= 64) rt2x00_set_field8(&rfcsr, RFCSR6_VCO_IC, 2); else if (rf->channel >= 100 && rf->channel <= 128) rt2x00_set_field8(&rfcsr, RFCSR6_VCO_IC, 2); else rt2x00_set_field8(&rfcsr, RFCSR6_VCO_IC, 1); rt2800_rfcsr_write(rt2x00dev, 6, rfcsr); rfcsr = rt2800_rfcsr_read(rt2x00dev, 30); rt2x00_set_field8(&rfcsr, RFCSR30_RX_VCM, 2); rt2800_rfcsr_write(rt2x00dev, 30, rfcsr); rt2800_rfcsr_write(rt2x00dev, 46, 0x60); if (rf->channel <= 14) { rt2800_rfcsr_write(rt2x00dev, 10, 0xd3); rt2800_rfcsr_write(rt2x00dev, 13, 0x12); } else { rt2800_rfcsr_write(rt2x00dev, 10, 0xd8); rt2800_rfcsr_write(rt2x00dev, 13, 0x23); } rfcsr = rt2800_rfcsr_read(rt2x00dev, 51); rt2x00_set_field8(&rfcsr, RFCSR51_BITS01, 1); rt2800_rfcsr_write(rt2x00dev, 51, rfcsr); rfcsr = rt2800_rfcsr_read(rt2x00dev, 51); if (rf->channel <= 14) { rt2x00_set_field8(&rfcsr, RFCSR51_BITS24, 5); rt2x00_set_field8(&rfcsr, RFCSR51_BITS57, 3); } else { rt2x00_set_field8(&rfcsr, RFCSR51_BITS24, 4); rt2x00_set_field8(&rfcsr, RFCSR51_BITS57, 2); } rt2800_rfcsr_write(rt2x00dev, 51, rfcsr); rfcsr = rt2800_rfcsr_read(rt2x00dev, 49); if (rf->channel <= 14) rt2x00_set_field8(&rfcsr, RFCSR49_TX_LO1_IC, 3); else rt2x00_set_field8(&rfcsr, RFCSR49_TX_LO1_IC, 2); if (txbf_enabled) rt2x00_set_field8(&rfcsr, RFCSR49_TX_DIV, 1); rt2800_rfcsr_write(rt2x00dev, 49, rfcsr); rfcsr = rt2800_rfcsr_read(rt2x00dev, 50); rt2x00_set_field8(&rfcsr, RFCSR50_TX_LO1_EN, 0); rt2800_rfcsr_write(rt2x00dev, 50, rfcsr); rfcsr = rt2800_rfcsr_read(rt2x00dev, 57); if (rf->channel <= 14) rt2x00_set_field8(&rfcsr, RFCSR57_DRV_CC, 0x1b); else rt2x00_set_field8(&rfcsr, RFCSR57_DRV_CC, 0x0f); rt2800_rfcsr_write(rt2x00dev, 57, rfcsr); if (rf->channel <= 14) { rt2800_rfcsr_write(rt2x00dev, 44, 0x93); rt2800_rfcsr_write(rt2x00dev, 52, 0x45); } else { rt2800_rfcsr_write(rt2x00dev, 44, 0x9b); rt2800_rfcsr_write(rt2x00dev, 52, 0x05); } /* Initiate VCO calibration */ rfcsr = rt2800_rfcsr_read(rt2x00dev, 3); if (rf->channel <= 14) { rt2x00_set_field8(&rfcsr, RFCSR3_VCOCAL_EN, 1); } else { rt2x00_set_field8(&rfcsr, RFCSR3_BIT1, 1); rt2x00_set_field8(&rfcsr, RFCSR3_BIT2, 1); rt2x00_set_field8(&rfcsr, RFCSR3_BIT3, 1); rt2x00_set_field8(&rfcsr, RFCSR3_BIT4, 1); rt2x00_set_field8(&rfcsr, RFCSR3_BIT5, 1); rt2x00_set_field8(&rfcsr, RFCSR3_VCOCAL_EN, 1); } rt2800_rfcsr_write(rt2x00dev, 3, rfcsr); if (rf->channel >= 1 && rf->channel <= 14) { rfcsr = 0x23; if (txbf_enabled) rt2x00_set_field8(&rfcsr, RFCSR39_RX_DIV, 1); rt2800_rfcsr_write(rt2x00dev, 39, rfcsr); rt2800_rfcsr_write(rt2x00dev, 45, 0xbb); } else if (rf->channel >= 36 && rf->channel <= 64) { rfcsr = 0x36; if (txbf_enabled) rt2x00_set_field8(&rfcsr, RFCSR39_RX_DIV, 1); rt2800_rfcsr_write(rt2x00dev, 39, 0x36); rt2800_rfcsr_write(rt2x00dev, 45, 0xeb); } else if (rf->channel >= 100 && rf->channel <= 128) { rfcsr = 0x32; if (txbf_enabled) rt2x00_set_field8(&rfcsr, RFCSR39_RX_DIV, 1); rt2800_rfcsr_write(rt2x00dev, 39, rfcsr); rt2800_rfcsr_write(rt2x00dev, 45, 0xb3); } else { rfcsr = 0x30; if (txbf_enabled) rt2x00_set_field8(&rfcsr, RFCSR39_RX_DIV, 1); rt2800_rfcsr_write(rt2x00dev, 39, rfcsr); rt2800_rfcsr_write(rt2x00dev, 45, 0x9b); } } static void rt2800_config_channel_rf3853(struct rt2x00_dev *rt2x00dev, struct ieee80211_conf *conf, struct rf_channel *rf, struct channel_info *info) { u8 rfcsr; u8 bbp; u8 pwr1, pwr2, pwr3; const bool txbf_enabled = false; /* TODO */ /* TODO: add band selection */ if (rf->channel <= 14) rt2800_rfcsr_write(rt2x00dev, 6, 0x40); else if (rf->channel < 132) rt2800_rfcsr_write(rt2x00dev, 6, 0x80); else rt2800_rfcsr_write(rt2x00dev, 6, 0x40); rt2800_rfcsr_write(rt2x00dev, 8, rf->rf1); rt2800_rfcsr_write(rt2x00dev, 9, rf->rf3); if (rf->channel <= 14) rt2800_rfcsr_write(rt2x00dev, 11, 0x46); else rt2800_rfcsr_write(rt2x00dev, 11, 0x48); if (rf->channel <= 14) rt2800_rfcsr_write(rt2x00dev, 12, 0x1a); else rt2800_rfcsr_write(rt2x00dev, 12, 0x52); rt2800_rfcsr_write(rt2x00dev, 13, 0x12); rfcsr = rt2800_rfcsr_read(rt2x00dev, 1); rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 0); rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 0); rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 0); rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 0); rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 0); rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 0); rt2x00_set_field8(&rfcsr, RFCSR1_RF_BLOCK_EN, 1); rt2x00_set_field8(&rfcsr, RFCSR1_PLL_PD, 1); switch (rt2x00dev->default_ant.tx_chain_num) { case 3: rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 1); fallthrough; case 2: rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 1); fallthrough; case 1: rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 1); break; } switch (rt2x00dev->default_ant.rx_chain_num) { case 3: rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 1); fallthrough; case 2: rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 1); fallthrough; case 1: rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 1); break; } rt2800_rfcsr_write(rt2x00dev, 1, rfcsr); rt2800_freq_cal_mode1(rt2x00dev); rfcsr = rt2800_rfcsr_read(rt2x00dev, 30); if (!conf_is_ht40(conf)) rfcsr &= ~(0x06); else rfcsr |= 0x06; rt2800_rfcsr_write(rt2x00dev, 30, rfcsr); if (rf->channel <= 14) rt2800_rfcsr_write(rt2x00dev, 31, 0xa0); else rt2800_rfcsr_write(rt2x00dev, 31, 0x80); if (conf_is_ht40(conf)) rt2800_rfcsr_write(rt2x00dev, 32, 0x80); else rt2800_rfcsr_write(rt2x00dev, 32, 0xd8); if (rf->channel <= 14) rt2800_rfcsr_write(rt2x00dev, 34, 0x3c); else rt2800_rfcsr_write(rt2x00dev, 34, 0x20); /* loopback RF_BS */ rfcsr = rt2800_rfcsr_read(rt2x00dev, 36); if (rf->channel <= 14) rt2x00_set_field8(&rfcsr, RFCSR36_RF_BS, 1); else rt2x00_set_field8(&rfcsr, RFCSR36_RF_BS, 0); rt2800_rfcsr_write(rt2x00dev, 36, rfcsr); if (rf->channel <= 14) rfcsr = 0x23; else if (rf->channel < 100) rfcsr = 0x36; else if (rf->channel < 132) rfcsr = 0x32; else rfcsr = 0x30; if (txbf_enabled) rfcsr |= 0x40; rt2800_rfcsr_write(rt2x00dev, 39, rfcsr); if (rf->channel <= 14) rt2800_rfcsr_write(rt2x00dev, 44, 0x93); else rt2800_rfcsr_write(rt2x00dev, 44, 0x9b); if (rf->channel <= 14) rfcsr = 0xbb; else if (rf->channel < 100) rfcsr = 0xeb; else if (rf->channel < 132) rfcsr = 0xb3; else rfcsr = 0x9b; rt2800_rfcsr_write(rt2x00dev, 45, rfcsr); if (rf->channel <= 14) rfcsr = 0x8e; else rfcsr = 0x8a; if (txbf_enabled) rfcsr |= 0x20; rt2800_rfcsr_write(rt2x00dev, 49, rfcsr); rt2800_rfcsr_write(rt2x00dev, 50, 0x86); rfcsr = rt2800_rfcsr_read(rt2x00dev, 51); if (rf->channel <= 14) rt2800_rfcsr_write(rt2x00dev, 51, 0x75); else rt2800_rfcsr_write(rt2x00dev, 51, 0x51); rfcsr = rt2800_rfcsr_read(rt2x00dev, 52); if (rf->channel <= 14) rt2800_rfcsr_write(rt2x00dev, 52, 0x45); else rt2800_rfcsr_write(rt2x00dev, 52, 0x05); if (rf->channel <= 14) { pwr1 = info->default_power1 & 0x1f; pwr2 = info->default_power2 & 0x1f; pwr3 = info->default_power3 & 0x1f; } else { pwr1 = 0x48 | ((info->default_power1 & 0x18) << 1) | (info->default_power1 & 0x7); pwr2 = 0x48 | ((info->default_power2 & 0x18) << 1) | (info->default_power2 & 0x7); pwr3 = 0x48 | ((info->default_power3 & 0x18) << 1) | (info->default_power3 & 0x7); } rt2800_rfcsr_write(rt2x00dev, 53, pwr1); rt2800_rfcsr_write(rt2x00dev, 54, pwr2); rt2800_rfcsr_write(rt2x00dev, 55, pwr3); rt2x00_dbg(rt2x00dev, "Channel:%d, pwr1:%02x, pwr2:%02x, pwr3:%02x\n", rf->channel, pwr1, pwr2, pwr3); bbp = (info->default_power1 >> 5) | ((info->default_power2 & 0xe0) >> 1); rt2800_bbp_write(rt2x00dev, 109, bbp); bbp = rt2800_bbp_read(rt2x00dev, 110); bbp &= 0x0f; bbp |= (info->default_power3 & 0xe0) >> 1; rt2800_bbp_write(rt2x00dev, 110, bbp); rfcsr = rt2800_rfcsr_read(rt2x00dev, 57); if (rf->channel <= 14) rt2800_rfcsr_write(rt2x00dev, 57, 0x6e); else rt2800_rfcsr_write(rt2x00dev, 57, 0x3e); /* Enable RF tuning */ rfcsr = rt2800_rfcsr_read(rt2x00dev, 3); rt2x00_set_field8(&rfcsr, RFCSR3_VCOCAL_EN, 1); rt2800_rfcsr_write(rt2x00dev, 3, rfcsr); udelay(2000); bbp = rt2800_bbp_read(rt2x00dev, 49); /* clear update flag */ rt2800_bbp_write(rt2x00dev, 49, bbp & 0xfe); rt2800_bbp_write(rt2x00dev, 49, bbp); /* TODO: add calibration for TxBF */ } #define POWER_BOUND 0x27 #define POWER_BOUND_5G 0x2b static void rt2800_config_channel_rf3290(struct rt2x00_dev *rt2x00dev, struct ieee80211_conf *conf, struct rf_channel *rf, struct channel_info *info) { u8 rfcsr; rt2800_rfcsr_write(rt2x00dev, 8, rf->rf1); rt2800_rfcsr_write(rt2x00dev, 9, rf->rf3); rfcsr = rt2800_rfcsr_read(rt2x00dev, 11); rt2x00_set_field8(&rfcsr, RFCSR11_R, rf->rf2); rt2800_rfcsr_write(rt2x00dev, 11, rfcsr); rfcsr = rt2800_rfcsr_read(rt2x00dev, 49); if (info->default_power1 > POWER_BOUND) rt2x00_set_field8(&rfcsr, RFCSR49_TX, POWER_BOUND); else rt2x00_set_field8(&rfcsr, RFCSR49_TX, info->default_power1); rt2800_rfcsr_write(rt2x00dev, 49, rfcsr); rt2800_freq_cal_mode1(rt2x00dev); if (rf->channel <= 14) { if (rf->channel == 6) rt2800_bbp_write(rt2x00dev, 68, 0x0c); else rt2800_bbp_write(rt2x00dev, 68, 0x0b); if (rf->channel >= 1 && rf->channel <= 6) rt2800_bbp_write(rt2x00dev, 59, 0x0f); else if (rf->channel >= 7 && rf->channel <= 11) rt2800_bbp_write(rt2x00dev, 59, 0x0e); else if (rf->channel >= 12 && rf->channel <= 14) rt2800_bbp_write(rt2x00dev, 59, 0x0d); } } static void rt2800_config_channel_rf3322(struct rt2x00_dev *rt2x00dev, struct ieee80211_conf *conf, struct rf_channel *rf, struct channel_info *info) { u8 rfcsr; rt2800_rfcsr_write(rt2x00dev, 8, rf->rf1); rt2800_rfcsr_write(rt2x00dev, 9, rf->rf3); rt2800_rfcsr_write(rt2x00dev, 11, 0x42); rt2800_rfcsr_write(rt2x00dev, 12, 0x1c); rt2800_rfcsr_write(rt2x00dev, 13, 0x00); if (info->default_power1 > POWER_BOUND) rt2800_rfcsr_write(rt2x00dev, 47, POWER_BOUND); else rt2800_rfcsr_write(rt2x00dev, 47, info->default_power1); if (info->default_power2 > POWER_BOUND) rt2800_rfcsr_write(rt2x00dev, 48, POWER_BOUND); else rt2800_rfcsr_write(rt2x00dev, 48, info->default_power2); rt2800_freq_cal_mode1(rt2x00dev); rfcsr = rt2800_rfcsr_read(rt2x00dev, 1); rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 1); rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 1); if ( rt2x00dev->default_ant.tx_chain_num == 2 ) rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 1); else rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 0); if ( rt2x00dev->default_ant.rx_chain_num == 2 ) rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 1); else rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 0); rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 0); rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 0); rt2800_rfcsr_write(rt2x00dev, 1, rfcsr); rt2800_rfcsr_write(rt2x00dev, 31, 80); } static void rt2800_config_channel_rf53xx(struct rt2x00_dev *rt2x00dev, struct ieee80211_conf *conf, struct rf_channel *rf, struct channel_info *info) { u8 rfcsr; int idx = rf->channel-1; rt2800_rfcsr_write(rt2x00dev, 8, rf->rf1); rt2800_rfcsr_write(rt2x00dev, 9, rf->rf3); rfcsr = rt2800_rfcsr_read(rt2x00dev, 11); rt2x00_set_field8(&rfcsr, RFCSR11_R, rf->rf2); rt2800_rfcsr_write(rt2x00dev, 11, rfcsr); rfcsr = rt2800_rfcsr_read(rt2x00dev, 49); if (info->default_power1 > POWER_BOUND) rt2x00_set_field8(&rfcsr, RFCSR49_TX, POWER_BOUND); else rt2x00_set_field8(&rfcsr, RFCSR49_TX, info->default_power1); rt2800_rfcsr_write(rt2x00dev, 49, rfcsr); if (rt2x00_rt(rt2x00dev, RT5392)) { rfcsr = rt2800_rfcsr_read(rt2x00dev, 50); if (info->default_power2 > POWER_BOUND) rt2x00_set_field8(&rfcsr, RFCSR50_TX, POWER_BOUND); else rt2x00_set_field8(&rfcsr, RFCSR50_TX, info->default_power2); rt2800_rfcsr_write(rt2x00dev, 50, rfcsr); } rfcsr = rt2800_rfcsr_read(rt2x00dev, 1); if (rt2x00_rt(rt2x00dev, RT5392)) { rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 1); rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 1); } rt2x00_set_field8(&rfcsr, RFCSR1_RF_BLOCK_EN, 1); rt2x00_set_field8(&rfcsr, RFCSR1_PLL_PD, 1); rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 1); rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 1); rt2800_rfcsr_write(rt2x00dev, 1, rfcsr); rt2800_freq_cal_mode1(rt2x00dev); if (rt2x00_has_cap_bt_coexist(rt2x00dev)) { if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) { /* r55/r59 value array of channel 1~14 */ static const u8 r55_bt_rev[] = {0x83, 0x83, 0x83, 0x73, 0x73, 0x63, 0x53, 0x53, 0x53, 0x43, 0x43, 0x43, 0x43, 0x43}; static const u8 r59_bt_rev[] = {0x0e, 0x0e, 0x0e, 0x0e, 0x0e, 0x0b, 0x0a, 0x09, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07}; rt2800_rfcsr_write(rt2x00dev, 55, r55_bt_rev[idx]); rt2800_rfcsr_write(rt2x00dev, 59, r59_bt_rev[idx]); } else { static const u8 r59_bt[] = {0x8b, 0x8b, 0x8b, 0x8b, 0x8b, 0x8b, 0x8b, 0x8a, 0x89, 0x88, 0x88, 0x86, 0x85, 0x84}; rt2800_rfcsr_write(rt2x00dev, 59, r59_bt[idx]); } } else { if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) { static const u8 r55_nonbt_rev[] = {0x23, 0x23, 0x23, 0x23, 0x13, 0x13, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03}; static const u8 r59_nonbt_rev[] = {0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x07, 0x06, 0x05, 0x04, 0x04}; rt2800_rfcsr_write(rt2x00dev, 55, r55_nonbt_rev[idx]); rt2800_rfcsr_write(rt2x00dev, 59, r59_nonbt_rev[idx]); } else if (rt2x00_rt(rt2x00dev, RT5390) || rt2x00_rt(rt2x00dev, RT5392) || rt2x00_rt(rt2x00dev, RT6352)) { static const u8 r59_non_bt[] = {0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8f, 0x8d, 0x8a, 0x88, 0x88, 0x87, 0x87, 0x86}; rt2800_rfcsr_write(rt2x00dev, 59, r59_non_bt[idx]); } else if (rt2x00_rt(rt2x00dev, RT5350)) { static const u8 r59_non_bt[] = {0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0b, 0x0a, 0x0a, 0x09, 0x08, 0x07, 0x07, 0x06}; rt2800_rfcsr_write(rt2x00dev, 59, r59_non_bt[idx]); } } } static void rt2800_config_channel_rf55xx(struct rt2x00_dev *rt2x00dev, struct ieee80211_conf *conf, struct rf_channel *rf, struct channel_info *info) { u8 rfcsr, ep_reg; u32 reg; int power_bound; /* TODO */ const bool is_11b = false; const bool is_type_ep = false; reg = rt2800_register_read(rt2x00dev, LDO_CFG0); rt2x00_set_field32(&reg, LDO_CFG0_LDO_CORE_VLEVEL, (rf->channel > 14 || conf_is_ht40(conf)) ? 5 : 0); rt2800_register_write(rt2x00dev, LDO_CFG0, reg); /* Order of values on rf_channel entry: N, K, mod, R */ rt2800_rfcsr_write(rt2x00dev, 8, rf->rf1 & 0xff); rfcsr = rt2800_rfcsr_read(rt2x00dev, 9); rt2x00_set_field8(&rfcsr, RFCSR9_K, rf->rf2 & 0xf); rt2x00_set_field8(&rfcsr, RFCSR9_N, (rf->rf1 & 0x100) >> 8); rt2x00_set_field8(&rfcsr, RFCSR9_MOD, ((rf->rf3 - 8) & 0x4) >> 2); rt2800_rfcsr_write(rt2x00dev, 9, rfcsr); rfcsr = rt2800_rfcsr_read(rt2x00dev, 11); rt2x00_set_field8(&rfcsr, RFCSR11_R, rf->rf4 - 1); rt2x00_set_field8(&rfcsr, RFCSR11_MOD, (rf->rf3 - 8) & 0x3); rt2800_rfcsr_write(rt2x00dev, 11, rfcsr); if (rf->channel <= 14) { rt2800_rfcsr_write(rt2x00dev, 10, 0x90); /* FIXME: RF11 owerwrite ? */ rt2800_rfcsr_write(rt2x00dev, 11, 0x4A); rt2800_rfcsr_write(rt2x00dev, 12, 0x52); rt2800_rfcsr_write(rt2x00dev, 13, 0x42); rt2800_rfcsr_write(rt2x00dev, 22, 0x40); rt2800_rfcsr_write(rt2x00dev, 24, 0x4A); rt2800_rfcsr_write(rt2x00dev, 25, 0x80); rt2800_rfcsr_write(rt2x00dev, 27, 0x42); rt2800_rfcsr_write(rt2x00dev, 36, 0x80); rt2800_rfcsr_write(rt2x00dev, 37, 0x08); rt2800_rfcsr_write(rt2x00dev, 38, 0x89); rt2800_rfcsr_write(rt2x00dev, 39, 0x1B); rt2800_rfcsr_write(rt2x00dev, 40, 0x0D); rt2800_rfcsr_write(rt2x00dev, 41, 0x9B); rt2800_rfcsr_write(rt2x00dev, 42, 0xD5); rt2800_rfcsr_write(rt2x00dev, 43, 0x72); rt2800_rfcsr_write(rt2x00dev, 44, 0x0E); rt2800_rfcsr_write(rt2x00dev, 45, 0xA2); rt2800_rfcsr_write(rt2x00dev, 46, 0x6B); rt2800_rfcsr_write(rt2x00dev, 48, 0x10); rt2800_rfcsr_write(rt2x00dev, 51, 0x3E); rt2800_rfcsr_write(rt2x00dev, 52, 0x48); rt2800_rfcsr_write(rt2x00dev, 54, 0x38); rt2800_rfcsr_write(rt2x00dev, 56, 0xA1); rt2800_rfcsr_write(rt2x00dev, 57, 0x00); rt2800_rfcsr_write(rt2x00dev, 58, 0x39); rt2800_rfcsr_write(rt2x00dev, 60, 0x45); rt2800_rfcsr_write(rt2x00dev, 61, 0x91); rt2800_rfcsr_write(rt2x00dev, 62, 0x39); /* TODO RF27 <- tssi */ rfcsr = rf->channel <= 10 ? 0x07 : 0x06; rt2800_rfcsr_write(rt2x00dev, 23, rfcsr); rt2800_rfcsr_write(rt2x00dev, 59, rfcsr); if (is_11b) { /* CCK */ rt2800_rfcsr_write(rt2x00dev, 31, 0xF8); rt2800_rfcsr_write(rt2x00dev, 32, 0xC0); if (is_type_ep) rt2800_rfcsr_write(rt2x00dev, 55, 0x06); else rt2800_rfcsr_write(rt2x00dev, 55, 0x47); } else { /* OFDM */ if (is_type_ep) rt2800_rfcsr_write(rt2x00dev, 55, 0x03); else rt2800_rfcsr_write(rt2x00dev, 55, 0x43); } power_bound = POWER_BOUND; ep_reg = 0x2; } else { rt2800_rfcsr_write(rt2x00dev, 10, 0x97); /* FIMXE: RF11 overwrite */ rt2800_rfcsr_write(rt2x00dev, 11, 0x40); rt2800_rfcsr_write(rt2x00dev, 25, 0xBF); rt2800_rfcsr_write(rt2x00dev, 27, 0x42); rt2800_rfcsr_write(rt2x00dev, 36, 0x00); rt2800_rfcsr_write(rt2x00dev, 37, 0x04); rt2800_rfcsr_write(rt2x00dev, 38, 0x85); rt2800_rfcsr_write(rt2x00dev, 40, 0x42); rt2800_rfcsr_write(rt2x00dev, 41, 0xBB); rt2800_rfcsr_write(rt2x00dev, 42, 0xD7); rt2800_rfcsr_write(rt2x00dev, 45, 0x41); rt2800_rfcsr_write(rt2x00dev, 48, 0x00); rt2800_rfcsr_write(rt2x00dev, 57, 0x77); rt2800_rfcsr_write(rt2x00dev, 60, 0x05); rt2800_rfcsr_write(rt2x00dev, 61, 0x01); /* TODO RF27 <- tssi */ if (rf->channel >= 36 && rf->channel <= 64) { rt2800_rfcsr_write(rt2x00dev, 12, 0x2E); rt2800_rfcsr_write(rt2x00dev, 13, 0x22); rt2800_rfcsr_write(rt2x00dev, 22, 0x60); rt2800_rfcsr_write(rt2x00dev, 23, 0x7F); if (rf->channel <= 50) rt2800_rfcsr_write(rt2x00dev, 24, 0x09); else if (rf->channel >= 52) rt2800_rfcsr_write(rt2x00dev, 24, 0x07); rt2800_rfcsr_write(rt2x00dev, 39, 0x1C); rt2800_rfcsr_write(rt2x00dev, 43, 0x5B); rt2800_rfcsr_write(rt2x00dev, 44, 0X40); rt2800_rfcsr_write(rt2x00dev, 46, 0X00); rt2800_rfcsr_write(rt2x00dev, 51, 0xFE); rt2800_rfcsr_write(rt2x00dev, 52, 0x0C); rt2800_rfcsr_write(rt2x00dev, 54, 0xF8); if (rf->channel <= 50) { rt2800_rfcsr_write(rt2x00dev, 55, 0x06), rt2800_rfcsr_write(rt2x00dev, 56, 0xD3); } else if (rf->channel >= 52) { rt2800_rfcsr_write(rt2x00dev, 55, 0x04); rt2800_rfcsr_write(rt2x00dev, 56, 0xBB); } rt2800_rfcsr_write(rt2x00dev, 58, 0x15); rt2800_rfcsr_write(rt2x00dev, 59, 0x7F); rt2800_rfcsr_write(rt2x00dev, 62, 0x15); } else if (rf->channel >= 100 && rf->channel <= 165) { rt2800_rfcsr_write(rt2x00dev, 12, 0x0E); rt2800_rfcsr_write(rt2x00dev, 13, 0x42); rt2800_rfcsr_write(rt2x00dev, 22, 0x40); if (rf->channel <= 153) { rt2800_rfcsr_write(rt2x00dev, 23, 0x3C); rt2800_rfcsr_write(rt2x00dev, 24, 0x06); } else if (rf->channel >= 155) { rt2800_rfcsr_write(rt2x00dev, 23, 0x38); rt2800_rfcsr_write(rt2x00dev, 24, 0x05); } if (rf->channel <= 138) { rt2800_rfcsr_write(rt2x00dev, 39, 0x1A); rt2800_rfcsr_write(rt2x00dev, 43, 0x3B); rt2800_rfcsr_write(rt2x00dev, 44, 0x20); rt2800_rfcsr_write(rt2x00dev, 46, 0x18); } else if (rf->channel >= 140) { rt2800_rfcsr_write(rt2x00dev, 39, 0x18); rt2800_rfcsr_write(rt2x00dev, 43, 0x1B); rt2800_rfcsr_write(rt2x00dev, 44, 0x10); rt2800_rfcsr_write(rt2x00dev, 46, 0X08); } if (rf->channel <= 124) rt2800_rfcsr_write(rt2x00dev, 51, 0xFC); else if (rf->channel >= 126) rt2800_rfcsr_write(rt2x00dev, 51, 0xEC); if (rf->channel <= 138) rt2800_rfcsr_write(rt2x00dev, 52, 0x06); else if (rf->channel >= 140) rt2800_rfcsr_write(rt2x00dev, 52, 0x06); rt2800_rfcsr_write(rt2x00dev, 54, 0xEB); if (rf->channel <= 138) rt2800_rfcsr_write(rt2x00dev, 55, 0x01); else if (rf->channel >= 140) rt2800_rfcsr_write(rt2x00dev, 55, 0x00); if (rf->channel <= 128) rt2800_rfcsr_write(rt2x00dev, 56, 0xBB); else if (rf->channel >= 130) rt2800_rfcsr_write(rt2x00dev, 56, 0xAB); if (rf->channel <= 116) rt2800_rfcsr_write(rt2x00dev, 58, 0x1D); else if (rf->channel >= 118) rt2800_rfcsr_write(rt2x00dev, 58, 0x15); if (rf->channel <= 138) rt2800_rfcsr_write(rt2x00dev, 59, 0x3F); else if (rf->channel >= 140) rt2800_rfcsr_write(rt2x00dev, 59, 0x7C); if (rf->channel <= 116) rt2800_rfcsr_write(rt2x00dev, 62, 0x1D); else if (rf->channel >= 118) rt2800_rfcsr_write(rt2x00dev, 62, 0x15); } power_bound = POWER_BOUND_5G; ep_reg = 0x3; } rfcsr = rt2800_rfcsr_read(rt2x00dev, 49); if (info->default_power1 > power_bound) rt2x00_set_field8(&rfcsr, RFCSR49_TX, power_bound); else rt2x00_set_field8(&rfcsr, RFCSR49_TX, info->default_power1); if (is_type_ep) rt2x00_set_field8(&rfcsr, RFCSR49_EP, ep_reg); rt2800_rfcsr_write(rt2x00dev, 49, rfcsr); rfcsr = rt2800_rfcsr_read(rt2x00dev, 50); if (info->default_power2 > power_bound) rt2x00_set_field8(&rfcsr, RFCSR50_TX, power_bound); else rt2x00_set_field8(&rfcsr, RFCSR50_TX, info->default_power2); if (is_type_ep) rt2x00_set_field8(&rfcsr, RFCSR50_EP, ep_reg); rt2800_rfcsr_write(rt2x00dev, 50, rfcsr); rfcsr = rt2800_rfcsr_read(rt2x00dev, 1); rt2x00_set_field8(&rfcsr, RFCSR1_RF_BLOCK_EN, 1); rt2x00_set_field8(&rfcsr, RFCSR1_PLL_PD, 1); rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, rt2x00dev->default_ant.tx_chain_num >= 1); rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, rt2x00dev->default_ant.tx_chain_num == 2); rt2x00_set_field8(&rfcsr, RFCSR1_TX2_PD, 0); rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, rt2x00dev->default_ant.rx_chain_num >= 1); rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, rt2x00dev->default_ant.rx_chain_num == 2); rt2x00_set_field8(&rfcsr, RFCSR1_RX2_PD, 0); rt2800_rfcsr_write(rt2x00dev, 1, rfcsr); rt2800_rfcsr_write(rt2x00dev, 6, 0xe4); if (conf_is_ht40(conf)) rt2800_rfcsr_write(rt2x00dev, 30, 0x16); else rt2800_rfcsr_write(rt2x00dev, 30, 0x10); if (!is_11b) { rt2800_rfcsr_write(rt2x00dev, 31, 0x80); rt2800_rfcsr_write(rt2x00dev, 32, 0x80); } /* TODO proper frequency adjustment */ rt2800_freq_cal_mode1(rt2x00dev); /* TODO merge with others */ rfcsr = rt2800_rfcsr_read(rt2x00dev, 3); rt2x00_set_field8(&rfcsr, RFCSR3_VCOCAL_EN, 1); rt2800_rfcsr_write(rt2x00dev, 3, rfcsr); /* BBP settings */ rt2800_bbp_write(rt2x00dev, 62, 0x37 - rt2x00dev->lna_gain); rt2800_bbp_write(rt2x00dev, 63, 0x37 - rt2x00dev->lna_gain); rt2800_bbp_write(rt2x00dev, 64, 0x37 - rt2x00dev->lna_gain); rt2800_bbp_write(rt2x00dev, 79, (rf->channel <= 14) ? 0x1C : 0x18); rt2800_bbp_write(rt2x00dev, 80, (rf->channel <= 14) ? 0x0E : 0x08); rt2800_bbp_write(rt2x00dev, 81, (rf->channel <= 14) ? 0x3A : 0x38); rt2800_bbp_write(rt2x00dev, 82, (rf->channel <= 14) ? 0x62 : 0x92); /* GLRT band configuration */ rt2800_bbp_write(rt2x00dev, 195, 128); rt2800_bbp_write(rt2x00dev, 196, (rf->channel <= 14) ? 0xE0 : 0xF0); rt2800_bbp_write(rt2x00dev, 195, 129); rt2800_bbp_write(rt2x00dev, 196, (rf->channel <= 14) ? 0x1F : 0x1E); rt2800_bbp_write(rt2x00dev, 195, 130); rt2800_bbp_write(rt2x00dev, 196, (rf->channel <= 14) ? 0x38 : 0x28); rt2800_bbp_write(rt2x00dev, 195, 131); rt2800_bbp_write(rt2x00dev, 196, (rf->channel <= 14) ? 0x32 : 0x20); rt2800_bbp_write(rt2x00dev, 195, 133); rt2800_bbp_write(rt2x00dev, 196, (rf->channel <= 14) ? 0x28 : 0x7F); rt2800_bbp_write(rt2x00dev, 195, 124); rt2800_bbp_write(rt2x00dev, 196, (rf->channel <= 14) ? 0x19 : 0x7F); } static void rt2800_config_channel_rf7620(struct rt2x00_dev *rt2x00dev, struct ieee80211_conf *conf, struct rf_channel *rf, struct channel_info *info) { struct rt2800_drv_data *drv_data = rt2x00dev->drv_data; u8 rx_agc_fc, tx_agc_fc; u8 rfcsr; /* Frequeny plan setting */ /* Rdiv setting (set 0x03 if Xtal==20) * R13[1:0] */ rfcsr = rt2800_rfcsr_read(rt2x00dev, 13); rt2x00_set_field8(&rfcsr, RFCSR13_RDIV_MT7620, rt2800_clk_is_20mhz(rt2x00dev) ? 3 : 0); rt2800_rfcsr_write(rt2x00dev, 13, rfcsr); /* N setting * R20[7:0] in rf->rf1 * R21[0] always 0 */ rfcsr = rt2800_rfcsr_read(rt2x00dev, 20); rfcsr = (rf->rf1 & 0x00ff); rt2800_rfcsr_write(rt2x00dev, 20, rfcsr); rfcsr = rt2800_rfcsr_read(rt2x00dev, 21); rt2x00_set_field8(&rfcsr, RFCSR21_BIT1, 0); rt2800_rfcsr_write(rt2x00dev, 21, rfcsr); /* K setting (always 0) * R16[3:0] (RF PLL freq selection) */ rfcsr = rt2800_rfcsr_read(rt2x00dev, 16); rt2x00_set_field8(&rfcsr, RFCSR16_RF_PLL_FREQ_SEL_MT7620, 0); rt2800_rfcsr_write(rt2x00dev, 16, rfcsr); /* D setting (always 0) * R22[2:0] (D=15, R22[2:0]=<111>) */ rfcsr = rt2800_rfcsr_read(rt2x00dev, 22); rt2x00_set_field8(&rfcsr, RFCSR22_FREQPLAN_D_MT7620, 0); rt2800_rfcsr_write(rt2x00dev, 22, rfcsr); /* Ksd setting * Ksd: R17<7:0> in rf->rf2 * R18<7:0> in rf->rf3 * R19<1:0> in rf->rf4 */ rfcsr = rt2800_rfcsr_read(rt2x00dev, 17); rfcsr = rf->rf2; rt2800_rfcsr_write(rt2x00dev, 17, rfcsr); rfcsr = rt2800_rfcsr_read(rt2x00dev, 18); rfcsr = rf->rf3; rt2800_rfcsr_write(rt2x00dev, 18, rfcsr); rfcsr = rt2800_rfcsr_read(rt2x00dev, 19); rt2x00_set_field8(&rfcsr, RFCSR19_K, rf->rf4); rt2800_rfcsr_write(rt2x00dev, 19, rfcsr); /* Default: XO=20MHz , SDM mode */ rfcsr = rt2800_rfcsr_read(rt2x00dev, 16); rt2x00_set_field8(&rfcsr, RFCSR16_SDM_MODE_MT7620, 0x80); rt2800_rfcsr_write(rt2x00dev, 16, rfcsr); rfcsr = rt2800_rfcsr_read(rt2x00dev, 21); rt2x00_set_field8(&rfcsr, RFCSR21_BIT8, 1); rt2800_rfcsr_write(rt2x00dev, 21, rfcsr); rfcsr = rt2800_rfcsr_read(rt2x00dev, 1); rt2x00_set_field8(&rfcsr, RFCSR1_TX2_EN_MT7620, rt2x00dev->default_ant.tx_chain_num != 1); rt2800_rfcsr_write(rt2x00dev, 1, rfcsr); rfcsr = rt2800_rfcsr_read(rt2x00dev, 2); rt2x00_set_field8(&rfcsr, RFCSR2_TX2_EN_MT7620, rt2x00dev->default_ant.tx_chain_num != 1); rt2x00_set_field8(&rfcsr, RFCSR2_RX2_EN_MT7620, rt2x00dev->default_ant.rx_chain_num != 1); rt2800_rfcsr_write(rt2x00dev, 2, rfcsr); rfcsr = rt2800_rfcsr_read(rt2x00dev, 42); rt2x00_set_field8(&rfcsr, RFCSR42_TX2_EN_MT7620, rt2x00dev->default_ant.tx_chain_num != 1); rt2800_rfcsr_write(rt2x00dev, 42, rfcsr); /* RF for DC Cal BW */ if (conf_is_ht40(conf)) { rt2800_rfcsr_write_dccal(rt2x00dev, 6, 0x10); rt2800_rfcsr_write_dccal(rt2x00dev, 7, 0x10); rt2800_rfcsr_write_dccal(rt2x00dev, 8, 0x04); rt2800_rfcsr_write_dccal(rt2x00dev, 58, 0x10); rt2800_rfcsr_write_dccal(rt2x00dev, 59, 0x10); } else { rt2800_rfcsr_write_dccal(rt2x00dev, 6, 0x20); rt2800_rfcsr_write_dccal(rt2x00dev, 7, 0x20); rt2800_rfcsr_write_dccal(rt2x00dev, 8, 0x00); rt2800_rfcsr_write_dccal(rt2x00dev, 58, 0x20); rt2800_rfcsr_write_dccal(rt2x00dev, 59, 0x20); } if (conf_is_ht40(conf)) { rt2800_rfcsr_write_dccal(rt2x00dev, 58, 0x08); rt2800_rfcsr_write_dccal(rt2x00dev, 59, 0x08); } else { rt2800_rfcsr_write_dccal(rt2x00dev, 58, 0x28); rt2800_rfcsr_write_dccal(rt2x00dev, 59, 0x28); } rfcsr = rt2800_rfcsr_read(rt2x00dev, 28); rt2x00_set_field8(&rfcsr, RFCSR28_CH11_HT40, conf_is_ht40(conf) && (rf->channel == 11)); rt2800_rfcsr_write(rt2x00dev, 28, rfcsr); if (!test_bit(DEVICE_STATE_SCANNING, &rt2x00dev->flags)) { if (conf_is_ht40(conf)) { rx_agc_fc = drv_data->rx_calibration_bw40; tx_agc_fc = drv_data->tx_calibration_bw40; } else { rx_agc_fc = drv_data->rx_calibration_bw20; tx_agc_fc = drv_data->tx_calibration_bw20; } rfcsr = rt2800_rfcsr_read_bank(rt2x00dev, 5, 6); rfcsr &= (~0x3F); rfcsr |= rx_agc_fc; rt2800_rfcsr_write_bank(rt2x00dev, 5, 6, rfcsr); rfcsr = rt2800_rfcsr_read_bank(rt2x00dev, 5, 7); rfcsr &= (~0x3F); rfcsr |= rx_agc_fc; rt2800_rfcsr_write_bank(rt2x00dev, 5, 7, rfcsr); rfcsr = rt2800_rfcsr_read_bank(rt2x00dev, 7, 6); rfcsr &= (~0x3F); rfcsr |= rx_agc_fc; rt2800_rfcsr_write_bank(rt2x00dev, 7, 6, rfcsr); rfcsr = rt2800_rfcsr_read_bank(rt2x00dev, 7, 7); rfcsr &= (~0x3F); rfcsr |= rx_agc_fc; rt2800_rfcsr_write_bank(rt2x00dev, 7, 7, rfcsr); rfcsr = rt2800_rfcsr_read_bank(rt2x00dev, 5, 58); rfcsr &= (~0x3F); rfcsr |= tx_agc_fc; rt2800_rfcsr_write_bank(rt2x00dev, 5, 58, rfcsr); rfcsr = rt2800_rfcsr_read_bank(rt2x00dev, 5, 59); rfcsr &= (~0x3F); rfcsr |= tx_agc_fc; rt2800_rfcsr_write_bank(rt2x00dev, 5, 59, rfcsr); rfcsr = rt2800_rfcsr_read_bank(rt2x00dev, 7, 58); rfcsr &= (~0x3F); rfcsr |= tx_agc_fc; rt2800_rfcsr_write_bank(rt2x00dev, 7, 58, rfcsr); rfcsr = rt2800_rfcsr_read_bank(rt2x00dev, 7, 59); rfcsr &= (~0x3F); rfcsr |= tx_agc_fc; rt2800_rfcsr_write_bank(rt2x00dev, 7, 59, rfcsr); } if (conf_is_ht40(conf)) { rt2800_bbp_glrt_write(rt2x00dev, 141, 0x10); rt2800_bbp_glrt_write(rt2x00dev, 157, 0x2f); } else { rt2800_bbp_glrt_write(rt2x00dev, 141, 0x1a); rt2800_bbp_glrt_write(rt2x00dev, 157, 0x40); } } static void rt2800_config_alc_rt6352(struct rt2x00_dev *rt2x00dev, struct ieee80211_channel *chan, int power_level) { int cur_channel = rt2x00dev->rf_channel; u16 eeprom, chan_power, rate_power, target_power; u16 tx_power[2]; s8 *power_group[2]; u32 mac_sys_ctrl; u32 cnt, reg; u8 bbp; if (WARN_ON(cur_channel < 1 || cur_channel > 14)) return; /* get per chain power, 2 chains in total, unit is 0.5dBm */ power_level = (power_level - 3) * 2; /* We can't get the accurate TX power. Based on some tests, the real * TX power is approximately equal to channel_power + (max)rate_power. * Usually max rate_power is the gain of the OFDM 6M rate. The antenna * gain and externel PA gain are not included as we are unable to * obtain these values. */ rate_power = rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE, 1); rate_power &= 0x3f; power_level -= rate_power; if (power_level < 1) power_level = 1; power_group[0] = rt2800_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG1); power_group[1] = rt2800_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG2); for (cnt = 0; cnt < 2; cnt++) { chan_power = power_group[cnt][cur_channel - 1]; if (chan_power >= 0x20 || chan_power == 0) chan_power = 0x10; tx_power[cnt] = power_level < chan_power ? power_level : chan_power; } reg = rt2800_register_read(rt2x00dev, TX_ALC_CFG_0); rt2x00_set_field32(&reg, TX_ALC_CFG_0_CH_INIT_0, tx_power[0]); rt2x00_set_field32(&reg, TX_ALC_CFG_0_CH_INIT_1, tx_power[1]); rt2x00_set_field32(&reg, TX_ALC_CFG_0_LIMIT_0, 0x2f); rt2x00_set_field32(&reg, TX_ALC_CFG_0_LIMIT_1, 0x2f); eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1); if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_INTERNAL_TX_ALC)) { /* init base power by eeprom target power */ target_power = rt2800_eeprom_read(rt2x00dev, EEPROM_TXPOWER_INIT); rt2x00_set_field32(&reg, TX_ALC_CFG_0_CH_INIT_0, target_power); rt2x00_set_field32(&reg, TX_ALC_CFG_0_CH_INIT_1, target_power); } rt2800_register_write(rt2x00dev, TX_ALC_CFG_0, reg); reg = rt2800_register_read(rt2x00dev, TX_ALC_CFG_1); rt2x00_set_field32(&reg, TX_ALC_CFG_1_TX_TEMP_COMP, 0); rt2800_register_write(rt2x00dev, TX_ALC_CFG_1, reg); /* Save MAC SYS CTRL registers */ mac_sys_ctrl = rt2800_register_read(rt2x00dev, MAC_SYS_CTRL); /* Disable Tx/Rx */ rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, 0); /* Check MAC Tx/Rx idle */ if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev, MAC_STATUS_CFG_BBP_RF_BUSY))) rt2x00_warn(rt2x00dev, "RF busy while configuring ALC\n"); if (chan->center_freq > 2457) { bbp = rt2800_bbp_read(rt2x00dev, 30); bbp = 0x40; rt2800_bbp_write(rt2x00dev, 30, bbp); rt2800_rfcsr_write(rt2x00dev, 39, 0); if (rt2x00_has_cap_external_lna_bg(rt2x00dev)) rt2800_rfcsr_write(rt2x00dev, 42, 0xfb); else rt2800_rfcsr_write(rt2x00dev, 42, 0x7b); } else { bbp = rt2800_bbp_read(rt2x00dev, 30); bbp = 0x1f; rt2800_bbp_write(rt2x00dev, 30, bbp); rt2800_rfcsr_write(rt2x00dev, 39, 0x80); if (rt2x00_has_cap_external_lna_bg(rt2x00dev)) rt2800_rfcsr_write(rt2x00dev, 42, 0xdb); else rt2800_rfcsr_write(rt2x00dev, 42, 0x5b); } rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, mac_sys_ctrl); rt2800_vco_calibration(rt2x00dev); } static void rt2800_bbp_write_with_rx_chain(struct rt2x00_dev *rt2x00dev, const unsigned int word, const u8 value) { u8 chain, reg; for (chain = 0; chain < rt2x00dev->default_ant.rx_chain_num; chain++) { reg = rt2800_bbp_read(rt2x00dev, 27); rt2x00_set_field8(&reg, BBP27_RX_CHAIN_SEL, chain); rt2800_bbp_write(rt2x00dev, 27, reg); rt2800_bbp_write(rt2x00dev, word, value); } } static void rt2800_iq_calibrate(struct rt2x00_dev *rt2x00dev, int channel) { u8 cal; /* TX0 IQ Gain */ rt2800_bbp_write(rt2x00dev, 158, 0x2c); if (channel <= 14) cal = rt2x00_eeprom_byte(rt2x00dev, EEPROM_IQ_GAIN_CAL_TX0_2G); else if (channel >= 36 && channel <= 64) cal = rt2x00_eeprom_byte(rt2x00dev, EEPROM_IQ_GAIN_CAL_TX0_CH36_TO_CH64_5G); else if (channel >= 100 && channel <= 138) cal = rt2x00_eeprom_byte(rt2x00dev, EEPROM_IQ_GAIN_CAL_TX0_CH100_TO_CH138_5G); else if (channel >= 140 && channel <= 165) cal = rt2x00_eeprom_byte(rt2x00dev, EEPROM_IQ_GAIN_CAL_TX0_CH140_TO_CH165_5G); else cal = 0; rt2800_bbp_write(rt2x00dev, 159, cal); /* TX0 IQ Phase */ rt2800_bbp_write(rt2x00dev, 158, 0x2d); if (channel <= 14) cal = rt2x00_eeprom_byte(rt2x00dev, EEPROM_IQ_PHASE_CAL_TX0_2G); else if (channel >= 36 && channel <= 64) cal = rt2x00_eeprom_byte(rt2x00dev, EEPROM_IQ_PHASE_CAL_TX0_CH36_TO_CH64_5G); else if (channel >= 100 && channel <= 138) cal = rt2x00_eeprom_byte(rt2x00dev, EEPROM_IQ_PHASE_CAL_TX0_CH100_TO_CH138_5G); else if (channel >= 140 && channel <= 165) cal = rt2x00_eeprom_byte(rt2x00dev, EEPROM_IQ_PHASE_CAL_TX0_CH140_TO_CH165_5G); else cal = 0; rt2800_bbp_write(rt2x00dev, 159, cal); /* TX1 IQ Gain */ rt2800_bbp_write(rt2x00dev, 158, 0x4a); if (channel <= 14) cal = rt2x00_eeprom_byte(rt2x00dev, EEPROM_IQ_GAIN_CAL_TX1_2G); else if (channel >= 36 && channel <= 64) cal = rt2x00_eeprom_byte(rt2x00dev, EEPROM_IQ_GAIN_CAL_TX1_CH36_TO_CH64_5G); else if (channel >= 100 && channel <= 138) cal = rt2x00_eeprom_byte(rt2x00dev, EEPROM_IQ_GAIN_CAL_TX1_CH100_TO_CH138_5G); else if (channel >= 140 && channel <= 165) cal = rt2x00_eeprom_byte(rt2x00dev, EEPROM_IQ_GAIN_CAL_TX1_CH140_TO_CH165_5G); else cal = 0; rt2800_bbp_write(rt2x00dev, 159, cal); /* TX1 IQ Phase */ rt2800_bbp_write(rt2x00dev, 158, 0x4b); if (channel <= 14) cal = rt2x00_eeprom_byte(rt2x00dev, EEPROM_IQ_PHASE_CAL_TX1_2G); else if (channel >= 36 && channel <= 64) cal = rt2x00_eeprom_byte(rt2x00dev, EEPROM_IQ_PHASE_CAL_TX1_CH36_TO_CH64_5G); else if (channel >= 100 && channel <= 138) cal = rt2x00_eeprom_byte(rt2x00dev, EEPROM_IQ_PHASE_CAL_TX1_CH100_TO_CH138_5G); else if (channel >= 140 && channel <= 165) cal = rt2x00_eeprom_byte(rt2x00dev, EEPROM_IQ_PHASE_CAL_TX1_CH140_TO_CH165_5G); else cal = 0; rt2800_bbp_write(rt2x00dev, 159, cal); /* FIXME: possible RX0, RX1 callibration ? */ /* RF IQ compensation control */ rt2800_bbp_write(rt2x00dev, 158, 0x04); cal = rt2x00_eeprom_byte(rt2x00dev, EEPROM_RF_IQ_COMPENSATION_CONTROL); rt2800_bbp_write(rt2x00dev, 159, cal != 0xff ? cal : 0); /* RF IQ imbalance compensation control */ rt2800_bbp_write(rt2x00dev, 158, 0x03); cal = rt2x00_eeprom_byte(rt2x00dev, EEPROM_RF_IQ_IMBALANCE_COMPENSATION_CONTROL); rt2800_bbp_write(rt2x00dev, 159, cal != 0xff ? cal : 0); } static s8 rt2800_txpower_to_dev(struct rt2x00_dev *rt2x00dev, unsigned int channel, s8 txpower) { if (rt2x00_rt(rt2x00dev, RT3593) || rt2x00_rt(rt2x00dev, RT3883)) txpower = rt2x00_get_field8(txpower, EEPROM_TXPOWER_ALC); if (channel <= 14) return clamp_t(s8, txpower, MIN_G_TXPOWER, MAX_G_TXPOWER); if (rt2x00_rt(rt2x00dev, RT3593) || rt2x00_rt(rt2x00dev, RT3883)) return clamp_t(s8, txpower, MIN_A_TXPOWER_3593, MAX_A_TXPOWER_3593); else return clamp_t(s8, txpower, MIN_A_TXPOWER, MAX_A_TXPOWER); } static void rt3883_bbp_adjust(struct rt2x00_dev *rt2x00dev, struct rf_channel *rf) { u8 bbp; bbp = (rf->channel > 14) ? 0x48 : 0x38; rt2800_bbp_write_with_rx_chain(rt2x00dev, 66, bbp); rt2800_bbp_write(rt2x00dev, 69, 0x12); if (rf->channel <= 14) { rt2800_bbp_write(rt2x00dev, 70, 0x0a); } else { /* Disable CCK packet detection */ rt2800_bbp_write(rt2x00dev, 70, 0x00); } rt2800_bbp_write(rt2x00dev, 73, 0x10); if (rf->channel > 14) { rt2800_bbp_write(rt2x00dev, 62, 0x1d); rt2800_bbp_write(rt2x00dev, 63, 0x1d); rt2800_bbp_write(rt2x00dev, 64, 0x1d); } else { rt2800_bbp_write(rt2x00dev, 62, 0x2d); rt2800_bbp_write(rt2x00dev, 63, 0x2d); rt2800_bbp_write(rt2x00dev, 64, 0x2d); } } static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev, struct ieee80211_conf *conf, struct rf_channel *rf, struct channel_info *info) { u32 reg; u32 tx_pin; u8 bbp, rfcsr; info->default_power1 = rt2800_txpower_to_dev(rt2x00dev, rf->channel, info->default_power1); info->default_power2 = rt2800_txpower_to_dev(rt2x00dev, rf->channel, info->default_power2); if (rt2x00dev->default_ant.tx_chain_num > 2) info->default_power3 = rt2800_txpower_to_dev(rt2x00dev, rf->channel, info->default_power3); switch (rt2x00dev->chip.rt) { case RT3883: rt3883_bbp_adjust(rt2x00dev, rf); break; } switch (rt2x00dev->chip.rf) { case RF2020: case RF3020: case RF3021: case RF3022: case RF3320: rt2800_config_channel_rf3xxx(rt2x00dev, conf, rf, info); break; case RF3052: rt2800_config_channel_rf3052(rt2x00dev, conf, rf, info); break; case RF3053: rt2800_config_channel_rf3053(rt2x00dev, conf, rf, info); break; case RF3290: rt2800_config_channel_rf3290(rt2x00dev, conf, rf, info); break; case RF3322: rt2800_config_channel_rf3322(rt2x00dev, conf, rf, info); break; case RF3853: rt2800_config_channel_rf3853(rt2x00dev, conf, rf, info); break; case RF3070: case RF5350: case RF5360: case RF5362: case RF5370: case RF5372: case RF5390: case RF5392: rt2800_config_channel_rf53xx(rt2x00dev, conf, rf, info); break; case RF5592: rt2800_config_channel_rf55xx(rt2x00dev, conf, rf, info); break; case RF7620: rt2800_config_channel_rf7620(rt2x00dev, conf, rf, info); break; default: rt2800_config_channel_rf2xxx(rt2x00dev, conf, rf, info); } if (rt2x00_rf(rt2x00dev, RF3070) || rt2x00_rf(rt2x00dev, RF3290) || rt2x00_rf(rt2x00dev, RF3322) || rt2x00_rf(rt2x00dev, RF5350) || rt2x00_rf(rt2x00dev, RF5360) || rt2x00_rf(rt2x00dev, RF5362) || rt2x00_rf(rt2x00dev, RF5370) || rt2x00_rf(rt2x00dev, RF5372) || rt2x00_rf(rt2x00dev, RF5390) || rt2x00_rf(rt2x00dev, RF5392)) { rfcsr = rt2800_rfcsr_read(rt2x00dev, 30); if (rt2x00_rf(rt2x00dev, RF3322)) { rt2x00_set_field8(&rfcsr, RF3322_RFCSR30_TX_H20M, conf_is_ht40(conf)); rt2x00_set_field8(&rfcsr, RF3322_RFCSR30_RX_H20M, conf_is_ht40(conf)); } else { rt2x00_set_field8(&rfcsr, RFCSR30_TX_H20M, conf_is_ht40(conf)); rt2x00_set_field8(&rfcsr, RFCSR30_RX_H20M, conf_is_ht40(conf)); } rt2800_rfcsr_write(rt2x00dev, 30, rfcsr); rfcsr = rt2800_rfcsr_read(rt2x00dev, 3); rt2x00_set_field8(&rfcsr, RFCSR3_VCOCAL_EN, 1); rt2800_rfcsr_write(rt2x00dev, 3, rfcsr); } /* * Change BBP settings */ if (rt2x00_rt(rt2x00dev, RT3352)) { rt2800_bbp_write(rt2x00dev, 62, 0x37 - rt2x00dev->lna_gain); rt2800_bbp_write(rt2x00dev, 63, 0x37 - rt2x00dev->lna_gain); rt2800_bbp_write(rt2x00dev, 64, 0x37 - rt2x00dev->lna_gain); rt2800_bbp_write(rt2x00dev, 27, 0x0); rt2800_bbp_write(rt2x00dev, 66, 0x26 + rt2x00dev->lna_gain); rt2800_bbp_write(rt2x00dev, 27, 0x20); rt2800_bbp_write(rt2x00dev, 66, 0x26 + rt2x00dev->lna_gain); rt2800_bbp_write(rt2x00dev, 86, 0x38); rt2800_bbp_write(rt2x00dev, 83, 0x6a); } else if (rt2x00_rt(rt2x00dev, RT3593)) { if (rf->channel > 14) { /* Disable CCK Packet detection on 5GHz */ rt2800_bbp_write(rt2x00dev, 70, 0x00); } else { rt2800_bbp_write(rt2x00dev, 70, 0x0a); } if (conf_is_ht40(conf)) rt2800_bbp_write(rt2x00dev, 105, 0x04); else rt2800_bbp_write(rt2x00dev, 105, 0x34); rt2800_bbp_write(rt2x00dev, 62, 0x37 - rt2x00dev->lna_gain); rt2800_bbp_write(rt2x00dev, 63, 0x37 - rt2x00dev->lna_gain); rt2800_bbp_write(rt2x00dev, 64, 0x37 - rt2x00dev->lna_gain); rt2800_bbp_write(rt2x00dev, 77, 0x98); } else if (rt2x00_rt(rt2x00dev, RT3883)) { rt2800_bbp_write(rt2x00dev, 62, 0x37 - rt2x00dev->lna_gain); rt2800_bbp_write(rt2x00dev, 63, 0x37 - rt2x00dev->lna_gain); rt2800_bbp_write(rt2x00dev, 64, 0x37 - rt2x00dev->lna_gain); if (rt2x00dev->default_ant.rx_chain_num > 1) rt2800_bbp_write(rt2x00dev, 86, 0x46); else rt2800_bbp_write(rt2x00dev, 86, 0); } else { rt2800_bbp_write(rt2x00dev, 62, 0x37 - rt2x00dev->lna_gain); rt2800_bbp_write(rt2x00dev, 63, 0x37 - rt2x00dev->lna_gain); rt2800_bbp_write(rt2x00dev, 64, 0x37 - rt2x00dev->lna_gain); if (rt2x00_rt(rt2x00dev, RT6352)) rt2800_bbp_write(rt2x00dev, 86, 0x38); else rt2800_bbp_write(rt2x00dev, 86, 0); } if (rf->channel <= 14) { if (!rt2x00_rt(rt2x00dev, RT5390) && !rt2x00_rt(rt2x00dev, RT5392) && !rt2x00_rt(rt2x00dev, RT6352)) { if (rt2x00_has_cap_external_lna_bg(rt2x00dev)) { rt2800_bbp_write(rt2x00dev, 82, 0x62); rt2800_bbp_write(rt2x00dev, 82, 0x62); rt2800_bbp_write(rt2x00dev, 75, 0x46); } else { if (rt2x00_rt(rt2x00dev, RT3593)) rt2800_bbp_write(rt2x00dev, 82, 0x62); else rt2800_bbp_write(rt2x00dev, 82, 0x84); rt2800_bbp_write(rt2x00dev, 75, 0x50); } if (rt2x00_rt(rt2x00dev, RT3593) || rt2x00_rt(rt2x00dev, RT3883)) rt2800_bbp_write(rt2x00dev, 83, 0x8a); } } else { if (rt2x00_rt(rt2x00dev, RT3572)) rt2800_bbp_write(rt2x00dev, 82, 0x94); else if (rt2x00_rt(rt2x00dev, RT3593) || rt2x00_rt(rt2x00dev, RT3883)) rt2800_bbp_write(rt2x00dev, 82, 0x82); else if (!rt2x00_rt(rt2x00dev, RT6352)) rt2800_bbp_write(rt2x00dev, 82, 0xf2); if (rt2x00_rt(rt2x00dev, RT3593) || rt2x00_rt(rt2x00dev, RT3883)) rt2800_bbp_write(rt2x00dev, 83, 0x9a); if (rt2x00_has_cap_external_lna_a(rt2x00dev)) rt2800_bbp_write(rt2x00dev, 75, 0x46); else rt2800_bbp_write(rt2x00dev, 75, 0x50); } reg = rt2800_register_read(rt2x00dev, TX_BAND_CFG); rt2x00_set_field32(&reg, TX_BAND_CFG_HT40_MINUS, conf_is_ht40_minus(conf)); rt2x00_set_field32(&reg, TX_BAND_CFG_A, rf->channel > 14); rt2x00_set_field32(&reg, TX_BAND_CFG_BG, rf->channel <= 14); rt2800_register_write(rt2x00dev, TX_BAND_CFG, reg); if (rt2x00_rt(rt2x00dev, RT3572)) rt2800_rfcsr_write(rt2x00dev, 8, 0); if (rt2x00_rt(rt2x00dev, RT6352)) { tx_pin = rt2800_register_read(rt2x00dev, TX_PIN_CFG); rt2x00_set_field32(&tx_pin, TX_PIN_CFG_RFRX_EN, 1); } else { tx_pin = 0; } switch (rt2x00dev->default_ant.tx_chain_num) { case 3: /* Turn on tertiary PAs */ rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A2_EN, rf->channel > 14); rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G2_EN, rf->channel <= 14); fallthrough; case 2: /* Turn on secondary PAs */ rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A1_EN, rf->channel > 14); rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G1_EN, rf->channel <= 14); fallthrough; case 1: /* Turn on primary PAs */ rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A0_EN, rf->channel > 14); if (rt2x00_has_cap_bt_coexist(rt2x00dev)) rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G0_EN, 1); else rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G0_EN, rf->channel <= 14); break; } switch (rt2x00dev->default_ant.rx_chain_num) { case 3: /* Turn on tertiary LNAs */ rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A2_EN, 1); rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G2_EN, 1); fallthrough; case 2: /* Turn on secondary LNAs */ rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A1_EN, 1); rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G1_EN, 1); fallthrough; case 1: /* Turn on primary LNAs */ rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A0_EN, 1); rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G0_EN, 1); break; } rt2x00_set_field32(&tx_pin, TX_PIN_CFG_RFTR_EN, 1); rt2x00_set_field32(&tx_pin, TX_PIN_CFG_TRSW_EN, 1); rt2800_register_write(rt2x00dev, TX_PIN_CFG, tx_pin); if (rt2x00_rt(rt2x00dev, RT3572)) { rt2800_rfcsr_write(rt2x00dev, 8, 0x80); /* AGC init */ if (rf->channel <= 14) reg = 0x1c + (2 * rt2x00dev->lna_gain); else reg = 0x22 + ((rt2x00dev->lna_gain * 5) / 3); rt2800_bbp_write_with_rx_chain(rt2x00dev, 66, reg); } if (rt2x00_rt(rt2x00dev, RT3593)) { reg = rt2800_register_read(rt2x00dev, GPIO_CTRL); /* Band selection */ if (rt2x00_is_usb(rt2x00dev) || rt2x00_is_pcie(rt2x00dev)) { /* GPIO #8 controls all paths */ rt2x00_set_field32(&reg, GPIO_CTRL_DIR8, 0); if (rf->channel <= 14) rt2x00_set_field32(&reg, GPIO_CTRL_VAL8, 1); else rt2x00_set_field32(&reg, GPIO_CTRL_VAL8, 0); } /* LNA PE control. */ if (rt2x00_is_usb(rt2x00dev)) { /* GPIO #4 controls PE0 and PE1, * GPIO #7 controls PE2 */ rt2x00_set_field32(&reg, GPIO_CTRL_DIR4, 0); rt2x00_set_field32(&reg, GPIO_CTRL_DIR7, 0); rt2x00_set_field32(&reg, GPIO_CTRL_VAL4, 1); rt2x00_set_field32(&reg, GPIO_CTRL_VAL7, 1); } else if (rt2x00_is_pcie(rt2x00dev)) { /* GPIO #4 controls PE0, PE1 and PE2 */ rt2x00_set_field32(&reg, GPIO_CTRL_DIR4, 0); rt2x00_set_field32(&reg, GPIO_CTRL_VAL4, 1); } rt2800_register_write(rt2x00dev, GPIO_CTRL, reg); /* AGC init */ if (rf->channel <= 14) reg = 0x1c + 2 * rt2x00dev->lna_gain; else reg = 0x22 + ((rt2x00dev->lna_gain * 5) / 3); rt2800_bbp_write_with_rx_chain(rt2x00dev, 66, reg); usleep_range(1000, 1500); } if (rt2x00_rt(rt2x00dev, RT3883)) { if (!conf_is_ht40(conf)) rt2800_bbp_write(rt2x00dev, 105, 0x34); else rt2800_bbp_write(rt2x00dev, 105, 0x04); /* AGC init */ if (rf->channel <= 14) reg = 0x2e + rt2x00dev->lna_gain; else reg = 0x20 + ((rt2x00dev->lna_gain * 5) / 3); rt2800_bbp_write_with_rx_chain(rt2x00dev, 66, reg); usleep_range(1000, 1500); } if (rt2x00_rt(rt2x00dev, RT5592) || rt2x00_rt(rt2x00dev, RT6352)) { reg = 0x10; if (!conf_is_ht40(conf)) { if (rt2x00_rt(rt2x00dev, RT6352) && rt2x00_has_cap_external_lna_bg(rt2x00dev)) { reg |= 0x5; } else { reg |= 0xa; } } rt2800_bbp_write(rt2x00dev, 195, 141); rt2800_bbp_write(rt2x00dev, 196, reg); /* AGC init. * Despite the vendor driver using different values here for * RT6352 chip, we use 0x1c for now. This may have to be changed * once TSSI got implemented. */ reg = (rf->channel <= 14 ? 0x1c : 0x24) + 2*rt2x00dev->lna_gain; rt2800_bbp_write_with_rx_chain(rt2x00dev, 66, reg); if (rt2x00_rt(rt2x00dev, RT5592)) rt2800_iq_calibrate(rt2x00dev, rf->channel); } if (rt2x00_rt(rt2x00dev, RT6352)) { if (test_bit(CAPABILITY_EXTERNAL_PA_TX0, &rt2x00dev->cap_flags)) { reg = rt2800_register_read(rt2x00dev, RF_CONTROL3); reg |= 0x00000101; rt2800_register_write(rt2x00dev, RF_CONTROL3, reg); reg = rt2800_register_read(rt2x00dev, RF_BYPASS3); reg |= 0x00000101; rt2800_register_write(rt2x00dev, RF_BYPASS3, reg); rt2800_rfcsr_write_chanreg(rt2x00dev, 43, 0x73); rt2800_rfcsr_write_chanreg(rt2x00dev, 44, 0x73); rt2800_rfcsr_write_chanreg(rt2x00dev, 45, 0x73); rt2800_rfcsr_write_chanreg(rt2x00dev, 46, 0x27); rt2800_rfcsr_write_chanreg(rt2x00dev, 47, 0xC8); rt2800_rfcsr_write_chanreg(rt2x00dev, 48, 0xA4); rt2800_rfcsr_write_chanreg(rt2x00dev, 49, 0x05); rt2800_rfcsr_write_chanreg(rt2x00dev, 54, 0x27); rt2800_rfcsr_write_chanreg(rt2x00dev, 55, 0xC8); rt2800_rfcsr_write_chanreg(rt2x00dev, 56, 0xA4); rt2800_rfcsr_write_chanreg(rt2x00dev, 57, 0x05); rt2800_rfcsr_write_chanreg(rt2x00dev, 58, 0x27); rt2800_rfcsr_write_chanreg(rt2x00dev, 59, 0xC8); rt2800_rfcsr_write_chanreg(rt2x00dev, 60, 0xA4); rt2800_rfcsr_write_chanreg(rt2x00dev, 61, 0x05); rt2800_rfcsr_write_dccal(rt2x00dev, 05, 0x00); rt2800_register_write(rt2x00dev, TX0_RF_GAIN_CORRECT, 0x36303636); rt2800_register_write(rt2x00dev, TX0_RF_GAIN_ATTEN, 0x6C6C6B6C); rt2800_register_write(rt2x00dev, TX1_RF_GAIN_ATTEN, 0x6C6C6B6C); } } bbp = rt2800_bbp_read(rt2x00dev, 4); rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 2 * conf_is_ht40(conf)); rt2800_bbp_write(rt2x00dev, 4, bbp); bbp = rt2800_bbp_read(rt2x00dev, 3); rt2x00_set_field8(&bbp, BBP3_HT40_MINUS, conf_is_ht40_minus(conf)); rt2800_bbp_write(rt2x00dev, 3, bbp); if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860C)) { if (conf_is_ht40(conf)) { rt2800_bbp_write(rt2x00dev, 69, 0x1a); rt2800_bbp_write(rt2x00dev, 70, 0x0a); rt2800_bbp_write(rt2x00dev, 73, 0x16); } else { rt2800_bbp_write(rt2x00dev, 69, 0x16); rt2800_bbp_write(rt2x00dev, 70, 0x08); rt2800_bbp_write(rt2x00dev, 73, 0x11); } } usleep_range(1000, 1500); /* * Clear channel statistic counters */ reg = rt2800_register_read(rt2x00dev, CH_IDLE_STA); reg = rt2800_register_read(rt2x00dev, CH_BUSY_STA); reg = rt2800_register_read(rt2x00dev, CH_BUSY_STA_SEC); /* * Clear update flag */ if (rt2x00_rt(rt2x00dev, RT3352) || rt2x00_rt(rt2x00dev, RT5350)) { bbp = rt2800_bbp_read(rt2x00dev, 49); rt2x00_set_field8(&bbp, BBP49_UPDATE_FLAG, 0); rt2800_bbp_write(rt2x00dev, 49, bbp); } } static int rt2800_get_gain_calibration_delta(struct rt2x00_dev *rt2x00dev) { u8 tssi_bounds[9]; u8 current_tssi; u16 eeprom; u8 step; int i; /* * First check if temperature compensation is supported. */ eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1); if (!rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_EXTERNAL_TX_ALC)) return 0; /* * Read TSSI boundaries for temperature compensation from * the EEPROM. * * Array idx 0 1 2 3 4 5 6 7 8 * Matching Delta value -4 -3 -2 -1 0 +1 +2 +3 +4 * Example TSSI bounds 0xF0 0xD0 0xB5 0xA0 0x88 0x45 0x25 0x15 0x00 */ if (rt2x00dev->curr_band == NL80211_BAND_2GHZ) { eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG1); tssi_bounds[0] = rt2x00_get_field16(eeprom, EEPROM_TSSI_BOUND_BG1_MINUS4); tssi_bounds[1] = rt2x00_get_field16(eeprom, EEPROM_TSSI_BOUND_BG1_MINUS3); eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG2); tssi_bounds[2] = rt2x00_get_field16(eeprom, EEPROM_TSSI_BOUND_BG2_MINUS2); tssi_bounds[3] = rt2x00_get_field16(eeprom, EEPROM_TSSI_BOUND_BG2_MINUS1); eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG3); tssi_bounds[4] = rt2x00_get_field16(eeprom, EEPROM_TSSI_BOUND_BG3_REF); tssi_bounds[5] = rt2x00_get_field16(eeprom, EEPROM_TSSI_BOUND_BG3_PLUS1); eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG4); tssi_bounds[6] = rt2x00_get_field16(eeprom, EEPROM_TSSI_BOUND_BG4_PLUS2); tssi_bounds[7] = rt2x00_get_field16(eeprom, EEPROM_TSSI_BOUND_BG4_PLUS3); eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_BG5); tssi_bounds[8] = rt2x00_get_field16(eeprom, EEPROM_TSSI_BOUND_BG5_PLUS4); step = rt2x00_get_field16(eeprom, EEPROM_TSSI_BOUND_BG5_AGC_STEP); } else { eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A1); tssi_bounds[0] = rt2x00_get_field16(eeprom, EEPROM_TSSI_BOUND_A1_MINUS4); tssi_bounds[1] = rt2x00_get_field16(eeprom, EEPROM_TSSI_BOUND_A1_MINUS3); eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A2); tssi_bounds[2] = rt2x00_get_field16(eeprom, EEPROM_TSSI_BOUND_A2_MINUS2); tssi_bounds[3] = rt2x00_get_field16(eeprom, EEPROM_TSSI_BOUND_A2_MINUS1); eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A3); tssi_bounds[4] = rt2x00_get_field16(eeprom, EEPROM_TSSI_BOUND_A3_REF); tssi_bounds[5] = rt2x00_get_field16(eeprom, EEPROM_TSSI_BOUND_A3_PLUS1); eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A4); tssi_bounds[6] = rt2x00_get_field16(eeprom, EEPROM_TSSI_BOUND_A4_PLUS2); tssi_bounds[7] = rt2x00_get_field16(eeprom, EEPROM_TSSI_BOUND_A4_PLUS3); eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_TSSI_BOUND_A5); tssi_bounds[8] = rt2x00_get_field16(eeprom, EEPROM_TSSI_BOUND_A5_PLUS4); step = rt2x00_get_field16(eeprom, EEPROM_TSSI_BOUND_A5_AGC_STEP); } /* * Check if temperature compensation is supported. */ if (tssi_bounds[4] == 0xff || step == 0xff) return 0; /* * Read current TSSI (BBP 49). */ current_tssi = rt2800_bbp_read(rt2x00dev, 49); /* * Compare TSSI value (BBP49) with the compensation boundaries * from the EEPROM and increase or decrease tx power. */ for (i = 0; i <= 3; i++) { if (current_tssi > tssi_bounds[i]) break; } if (i == 4) { for (i = 8; i >= 5; i--) { if (current_tssi < tssi_bounds[i]) break; } } return (i - 4) * step; } static int rt2800_get_txpower_bw_comp(struct rt2x00_dev *rt2x00dev, enum nl80211_band band) { u16 eeprom; u8 comp_en; u8 comp_type; int comp_value = 0; eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_TXPOWER_DELTA); /* * HT40 compensation not required. */ if (eeprom == 0xffff || !test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags)) return 0; if (band == NL80211_BAND_2GHZ) { comp_en = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_DELTA_ENABLE_2G); if (comp_en) { comp_type = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_DELTA_TYPE_2G); comp_value = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_DELTA_VALUE_2G); if (!comp_type) comp_value = -comp_value; } } else { comp_en = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_DELTA_ENABLE_5G); if (comp_en) { comp_type = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_DELTA_TYPE_5G); comp_value = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_DELTA_VALUE_5G); if (!comp_type) comp_value = -comp_value; } } return comp_value; } static int rt2800_get_txpower_reg_delta(struct rt2x00_dev *rt2x00dev, int power_level, int max_power) { int delta; if (rt2x00_has_cap_power_limit(rt2x00dev)) return 0; /* * XXX: We don't know the maximum transmit power of our hardware since * the EEPROM doesn't expose it. We only know that we are calibrated * to 100% tx power. * * Hence, we assume the regulatory limit that cfg80211 calulated for * the current channel is our maximum and if we are requested to lower * the value we just reduce our tx power accordingly. */ delta = power_level - max_power; return min(delta, 0); } static u8 rt2800_compensate_txpower(struct rt2x00_dev *rt2x00dev, int is_rate_b, enum nl80211_band band, int power_level, u8 txpower, int delta) { u16 eeprom; u8 criterion; u8 eirp_txpower; u8 eirp_txpower_criterion; u8 reg_limit; if (rt2x00_rt(rt2x00dev, RT3593)) return min_t(u8, txpower, 0xc); if (rt2x00_rt(rt2x00dev, RT3883)) return min_t(u8, txpower, 0xf); if (rt2x00_has_cap_power_limit(rt2x00dev)) { /* * Check if eirp txpower exceed txpower_limit. * We use OFDM 6M as criterion and its eirp txpower * is stored at EEPROM_EIRP_MAX_TX_POWER. * .11b data rate need add additional 4dbm * when calculating eirp txpower. */ eeprom = rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE, 1); criterion = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0); eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_EIRP_MAX_TX_POWER); if (band == NL80211_BAND_2GHZ) eirp_txpower_criterion = rt2x00_get_field16(eeprom, EEPROM_EIRP_MAX_TX_POWER_2GHZ); else eirp_txpower_criterion = rt2x00_get_field16(eeprom, EEPROM_EIRP_MAX_TX_POWER_5GHZ); eirp_txpower = eirp_txpower_criterion + (txpower - criterion) + (is_rate_b ? 4 : 0) + delta; reg_limit = (eirp_txpower > power_level) ? (eirp_txpower - power_level) : 0; } else reg_limit = 0; txpower = max(0, txpower + delta - reg_limit); return min_t(u8, txpower, 0xc); } enum { TX_PWR_CFG_0_IDX, TX_PWR_CFG_1_IDX, TX_PWR_CFG_2_IDX, TX_PWR_CFG_3_IDX, TX_PWR_CFG_4_IDX, TX_PWR_CFG_5_IDX, TX_PWR_CFG_6_IDX, TX_PWR_CFG_7_IDX, TX_PWR_CFG_8_IDX, TX_PWR_CFG_9_IDX, TX_PWR_CFG_0_EXT_IDX, TX_PWR_CFG_1_EXT_IDX, TX_PWR_CFG_2_EXT_IDX, TX_PWR_CFG_3_EXT_IDX, TX_PWR_CFG_4_EXT_IDX, TX_PWR_CFG_IDX_COUNT, }; static void rt2800_config_txpower_rt3593(struct rt2x00_dev *rt2x00dev, struct ieee80211_channel *chan, int power_level) { u8 txpower; u16 eeprom; u32 regs[TX_PWR_CFG_IDX_COUNT]; unsigned int offset; enum nl80211_band band = chan->band; int delta; int i; memset(regs, '\0', sizeof(regs)); /* TODO: adapt TX power reduction from the rt28xx code */ /* calculate temperature compensation delta */ delta = rt2800_get_gain_calibration_delta(rt2x00dev); if (band == NL80211_BAND_5GHZ) offset = 16; else offset = 0; if (test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags)) offset += 8; /* read the next four txpower values */ eeprom = rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE, offset); /* CCK 1MBS,2MBS */ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0); txpower = rt2800_compensate_txpower(rt2x00dev, 1, band, power_level, txpower, delta); rt2x00_set_field32(&regs[TX_PWR_CFG_0_IDX], TX_PWR_CFG_0_CCK1_CH0, txpower); rt2x00_set_field32(&regs[TX_PWR_CFG_0_IDX], TX_PWR_CFG_0_CCK1_CH1, txpower); rt2x00_set_field32(&regs[TX_PWR_CFG_0_EXT_IDX], TX_PWR_CFG_0_EXT_CCK1_CH2, txpower); /* CCK 5.5MBS,11MBS */ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE1); txpower = rt2800_compensate_txpower(rt2x00dev, 1, band, power_level, txpower, delta); rt2x00_set_field32(&regs[TX_PWR_CFG_0_IDX], TX_PWR_CFG_0_CCK5_CH0, txpower); rt2x00_set_field32(&regs[TX_PWR_CFG_0_IDX], TX_PWR_CFG_0_CCK5_CH1, txpower); rt2x00_set_field32(&regs[TX_PWR_CFG_0_EXT_IDX], TX_PWR_CFG_0_EXT_CCK5_CH2, txpower); /* OFDM 6MBS,9MBS */ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE2); txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level, txpower, delta); rt2x00_set_field32(&regs[TX_PWR_CFG_0_IDX], TX_PWR_CFG_0_OFDM6_CH0, txpower); rt2x00_set_field32(&regs[TX_PWR_CFG_0_IDX], TX_PWR_CFG_0_OFDM6_CH1, txpower); rt2x00_set_field32(&regs[TX_PWR_CFG_0_EXT_IDX], TX_PWR_CFG_0_EXT_OFDM6_CH2, txpower); /* OFDM 12MBS,18MBS */ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE3); txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level, txpower, delta); rt2x00_set_field32(&regs[TX_PWR_CFG_0_IDX], TX_PWR_CFG_0_OFDM12_CH0, txpower); rt2x00_set_field32(&regs[TX_PWR_CFG_0_IDX], TX_PWR_CFG_0_OFDM12_CH1, txpower); rt2x00_set_field32(&regs[TX_PWR_CFG_0_EXT_IDX], TX_PWR_CFG_0_EXT_OFDM12_CH2, txpower); /* read the next four txpower values */ eeprom = rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE, offset + 1); /* OFDM 24MBS,36MBS */ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0); txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level, txpower, delta); rt2x00_set_field32(&regs[TX_PWR_CFG_1_IDX], TX_PWR_CFG_1_OFDM24_CH0, txpower); rt2x00_set_field32(&regs[TX_PWR_CFG_1_IDX], TX_PWR_CFG_1_OFDM24_CH1, txpower); rt2x00_set_field32(&regs[TX_PWR_CFG_1_EXT_IDX], TX_PWR_CFG_1_EXT_OFDM24_CH2, txpower); /* OFDM 48MBS */ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE1); txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level, txpower, delta); rt2x00_set_field32(&regs[TX_PWR_CFG_1_IDX], TX_PWR_CFG_1_OFDM48_CH0, txpower); rt2x00_set_field32(&regs[TX_PWR_CFG_1_IDX], TX_PWR_CFG_1_OFDM48_CH1, txpower); rt2x00_set_field32(&regs[TX_PWR_CFG_1_EXT_IDX], TX_PWR_CFG_1_EXT_OFDM48_CH2, txpower); /* OFDM 54MBS */ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE2); txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level, txpower, delta); rt2x00_set_field32(&regs[TX_PWR_CFG_7_IDX], TX_PWR_CFG_7_OFDM54_CH0, txpower); rt2x00_set_field32(&regs[TX_PWR_CFG_7_IDX], TX_PWR_CFG_7_OFDM54_CH1, txpower); rt2x00_set_field32(&regs[TX_PWR_CFG_7_IDX], TX_PWR_CFG_7_OFDM54_CH2, txpower); /* read the next four txpower values */ eeprom = rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE, offset + 2); /* MCS 0,1 */ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0); txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level, txpower, delta); rt2x00_set_field32(&regs[TX_PWR_CFG_1_IDX], TX_PWR_CFG_1_MCS0_CH0, txpower); rt2x00_set_field32(&regs[TX_PWR_CFG_1_IDX], TX_PWR_CFG_1_MCS0_CH1, txpower); rt2x00_set_field32(&regs[TX_PWR_CFG_1_EXT_IDX], TX_PWR_CFG_1_EXT_MCS0_CH2, txpower); /* MCS 2,3 */ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE1); txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level, txpower, delta); rt2x00_set_field32(&regs[TX_PWR_CFG_1_IDX], TX_PWR_CFG_1_MCS2_CH0, txpower); rt2x00_set_field32(&regs[TX_PWR_CFG_1_IDX], TX_PWR_CFG_1_MCS2_CH1, txpower); rt2x00_set_field32(&regs[TX_PWR_CFG_1_EXT_IDX], TX_PWR_CFG_1_EXT_MCS2_CH2, txpower); /* MCS 4,5 */ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE2); txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level, txpower, delta); rt2x00_set_field32(&regs[TX_PWR_CFG_2_IDX], TX_PWR_CFG_2_MCS4_CH0, txpower); rt2x00_set_field32(&regs[TX_PWR_CFG_2_IDX], TX_PWR_CFG_2_MCS4_CH1, txpower); rt2x00_set_field32(&regs[TX_PWR_CFG_2_EXT_IDX], TX_PWR_CFG_2_EXT_MCS4_CH2, txpower); /* MCS 6 */ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE3); txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level, txpower, delta); rt2x00_set_field32(&regs[TX_PWR_CFG_2_IDX], TX_PWR_CFG_2_MCS6_CH0, txpower); rt2x00_set_field32(&regs[TX_PWR_CFG_2_IDX], TX_PWR_CFG_2_MCS6_CH1, txpower); rt2x00_set_field32(&regs[TX_PWR_CFG_2_EXT_IDX], TX_PWR_CFG_2_EXT_MCS6_CH2, txpower); /* read the next four txpower values */ eeprom = rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE, offset + 3); /* MCS 7 */ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0); txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level, txpower, delta); rt2x00_set_field32(&regs[TX_PWR_CFG_7_IDX], TX_PWR_CFG_7_MCS7_CH0, txpower); rt2x00_set_field32(&regs[TX_PWR_CFG_7_IDX], TX_PWR_CFG_7_MCS7_CH1, txpower); rt2x00_set_field32(&regs[TX_PWR_CFG_7_IDX], TX_PWR_CFG_7_MCS7_CH2, txpower); /* MCS 8,9 */ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE1); txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level, txpower, delta); rt2x00_set_field32(&regs[TX_PWR_CFG_2_IDX], TX_PWR_CFG_2_MCS8_CH0, txpower); rt2x00_set_field32(&regs[TX_PWR_CFG_2_IDX], TX_PWR_CFG_2_MCS8_CH1, txpower); rt2x00_set_field32(&regs[TX_PWR_CFG_2_EXT_IDX], TX_PWR_CFG_2_EXT_MCS8_CH2, txpower); /* MCS 10,11 */ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE2); txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level, txpower, delta); rt2x00_set_field32(&regs[TX_PWR_CFG_2_IDX], TX_PWR_CFG_2_MCS10_CH0, txpower); rt2x00_set_field32(&regs[TX_PWR_CFG_2_IDX], TX_PWR_CFG_2_MCS10_CH1, txpower); rt2x00_set_field32(&regs[TX_PWR_CFG_2_EXT_IDX], TX_PWR_CFG_2_EXT_MCS10_CH2, txpower); /* MCS 12,13 */ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE3); txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level, txpower, delta); rt2x00_set_field32(&regs[TX_PWR_CFG_3_IDX], TX_PWR_CFG_3_MCS12_CH0, txpower); rt2x00_set_field32(&regs[TX_PWR_CFG_3_IDX], TX_PWR_CFG_3_MCS12_CH1, txpower); rt2x00_set_field32(&regs[TX_PWR_CFG_3_EXT_IDX], TX_PWR_CFG_3_EXT_MCS12_CH2, txpower); /* read the next four txpower values */ eeprom = rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE, offset + 4); /* MCS 14 */ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0); txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level, txpower, delta); rt2x00_set_field32(&regs[TX_PWR_CFG_3_IDX], TX_PWR_CFG_3_MCS14_CH0, txpower); rt2x00_set_field32(&regs[TX_PWR_CFG_3_IDX], TX_PWR_CFG_3_MCS14_CH1, txpower); rt2x00_set_field32(&regs[TX_PWR_CFG_3_EXT_IDX], TX_PWR_CFG_3_EXT_MCS14_CH2, txpower); /* MCS 15 */ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE1); txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level, txpower, delta); rt2x00_set_field32(&regs[TX_PWR_CFG_8_IDX], TX_PWR_CFG_8_MCS15_CH0, txpower); rt2x00_set_field32(&regs[TX_PWR_CFG_8_IDX], TX_PWR_CFG_8_MCS15_CH1, txpower); rt2x00_set_field32(&regs[TX_PWR_CFG_8_IDX], TX_PWR_CFG_8_MCS15_CH2, txpower); /* MCS 16,17 */ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE2); txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level, txpower, delta); rt2x00_set_field32(&regs[TX_PWR_CFG_5_IDX], TX_PWR_CFG_5_MCS16_CH0, txpower); rt2x00_set_field32(&regs[TX_PWR_CFG_5_IDX], TX_PWR_CFG_5_MCS16_CH1, txpower); rt2x00_set_field32(&regs[TX_PWR_CFG_5_IDX], TX_PWR_CFG_5_MCS16_CH2, txpower); /* MCS 18,19 */ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE3); txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level, txpower, delta); rt2x00_set_field32(&regs[TX_PWR_CFG_5_IDX], TX_PWR_CFG_5_MCS18_CH0, txpower); rt2x00_set_field32(&regs[TX_PWR_CFG_5_IDX], TX_PWR_CFG_5_MCS18_CH1, txpower); rt2x00_set_field32(&regs[TX_PWR_CFG_5_IDX], TX_PWR_CFG_5_MCS18_CH2, txpower); /* read the next four txpower values */ eeprom = rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE, offset + 5); /* MCS 20,21 */ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0); txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level, txpower, delta); rt2x00_set_field32(&regs[TX_PWR_CFG_6_IDX], TX_PWR_CFG_6_MCS20_CH0, txpower); rt2x00_set_field32(&regs[TX_PWR_CFG_6_IDX], TX_PWR_CFG_6_MCS20_CH1, txpower); rt2x00_set_field32(&regs[TX_PWR_CFG_6_IDX], TX_PWR_CFG_6_MCS20_CH2, txpower); /* MCS 22 */ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE1); txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level, txpower, delta); rt2x00_set_field32(&regs[TX_PWR_CFG_6_IDX], TX_PWR_CFG_6_MCS22_CH0, txpower); rt2x00_set_field32(&regs[TX_PWR_CFG_6_IDX], TX_PWR_CFG_6_MCS22_CH1, txpower); rt2x00_set_field32(&regs[TX_PWR_CFG_6_IDX], TX_PWR_CFG_6_MCS22_CH2, txpower); /* MCS 23 */ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE2); txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level, txpower, delta); rt2x00_set_field32(&regs[TX_PWR_CFG_8_IDX], TX_PWR_CFG_8_MCS23_CH0, txpower); rt2x00_set_field32(&regs[TX_PWR_CFG_8_IDX], TX_PWR_CFG_8_MCS23_CH1, txpower); rt2x00_set_field32(&regs[TX_PWR_CFG_8_IDX], TX_PWR_CFG_8_MCS23_CH2, txpower); /* read the next four txpower values */ eeprom = rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE, offset + 6); /* STBC, MCS 0,1 */ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0); txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level, txpower, delta); rt2x00_set_field32(&regs[TX_PWR_CFG_3_IDX], TX_PWR_CFG_3_STBC0_CH0, txpower); rt2x00_set_field32(&regs[TX_PWR_CFG_3_IDX], TX_PWR_CFG_3_STBC0_CH1, txpower); rt2x00_set_field32(&regs[TX_PWR_CFG_3_EXT_IDX], TX_PWR_CFG_3_EXT_STBC0_CH2, txpower); /* STBC, MCS 2,3 */ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE1); txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level, txpower, delta); rt2x00_set_field32(&regs[TX_PWR_CFG_3_IDX], TX_PWR_CFG_3_STBC2_CH0, txpower); rt2x00_set_field32(&regs[TX_PWR_CFG_3_IDX], TX_PWR_CFG_3_STBC2_CH1, txpower); rt2x00_set_field32(&regs[TX_PWR_CFG_3_EXT_IDX], TX_PWR_CFG_3_EXT_STBC2_CH2, txpower); /* STBC, MCS 4,5 */ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE2); txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level, txpower, delta); rt2x00_set_field32(&regs[TX_PWR_CFG_4_IDX], TX_PWR_CFG_RATE0, txpower); rt2x00_set_field32(&regs[TX_PWR_CFG_4_IDX], TX_PWR_CFG_RATE1, txpower); rt2x00_set_field32(&regs[TX_PWR_CFG_4_EXT_IDX], TX_PWR_CFG_RATE0, txpower); /* STBC, MCS 6 */ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE3); txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level, txpower, delta); rt2x00_set_field32(&regs[TX_PWR_CFG_4_IDX], TX_PWR_CFG_RATE2, txpower); rt2x00_set_field32(&regs[TX_PWR_CFG_4_IDX], TX_PWR_CFG_RATE3, txpower); rt2x00_set_field32(&regs[TX_PWR_CFG_4_EXT_IDX], TX_PWR_CFG_RATE2, txpower); /* read the next four txpower values */ eeprom = rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE, offset + 7); /* STBC, MCS 7 */ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0); txpower = rt2800_compensate_txpower(rt2x00dev, 0, band, power_level, txpower, delta); rt2x00_set_field32(&regs[TX_PWR_CFG_9_IDX], TX_PWR_CFG_9_STBC7_CH0, txpower); rt2x00_set_field32(&regs[TX_PWR_CFG_9_IDX], TX_PWR_CFG_9_STBC7_CH1, txpower); rt2x00_set_field32(&regs[TX_PWR_CFG_9_IDX], TX_PWR_CFG_9_STBC7_CH2, txpower); rt2800_register_write(rt2x00dev, TX_PWR_CFG_0, regs[TX_PWR_CFG_0_IDX]); rt2800_register_write(rt2x00dev, TX_PWR_CFG_1, regs[TX_PWR_CFG_1_IDX]); rt2800_register_write(rt2x00dev, TX_PWR_CFG_2, regs[TX_PWR_CFG_2_IDX]); rt2800_register_write(rt2x00dev, TX_PWR_CFG_3, regs[TX_PWR_CFG_3_IDX]); rt2800_register_write(rt2x00dev, TX_PWR_CFG_4, regs[TX_PWR_CFG_4_IDX]); rt2800_register_write(rt2x00dev, TX_PWR_CFG_5, regs[TX_PWR_CFG_5_IDX]); rt2800_register_write(rt2x00dev, TX_PWR_CFG_6, regs[TX_PWR_CFG_6_IDX]); rt2800_register_write(rt2x00dev, TX_PWR_CFG_7, regs[TX_PWR_CFG_7_IDX]); rt2800_register_write(rt2x00dev, TX_PWR_CFG_8, regs[TX_PWR_CFG_8_IDX]); rt2800_register_write(rt2x00dev, TX_PWR_CFG_9, regs[TX_PWR_CFG_9_IDX]); rt2800_register_write(rt2x00dev, TX_PWR_CFG_0_EXT, regs[TX_PWR_CFG_0_EXT_IDX]); rt2800_register_write(rt2x00dev, TX_PWR_CFG_1_EXT, regs[TX_PWR_CFG_1_EXT_IDX]); rt2800_register_write(rt2x00dev, TX_PWR_CFG_2_EXT, regs[TX_PWR_CFG_2_EXT_IDX]); rt2800_register_write(rt2x00dev, TX_PWR_CFG_3_EXT, regs[TX_PWR_CFG_3_EXT_IDX]); rt2800_register_write(rt2x00dev, TX_PWR_CFG_4_EXT, regs[TX_PWR_CFG_4_EXT_IDX]); for (i = 0; i < TX_PWR_CFG_IDX_COUNT; i++) rt2x00_dbg(rt2x00dev, "band:%cGHz, BW:%c0MHz, TX_PWR_CFG_%d%s = %08lx\n", (band == NL80211_BAND_5GHZ) ? '5' : '2', (test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags)) ? '4' : '2', (i > TX_PWR_CFG_9_IDX) ? (i - TX_PWR_CFG_9_IDX - 1) : i, (i > TX_PWR_CFG_9_IDX) ? "_EXT" : "", (unsigned long) regs[i]); } static void rt2800_config_txpower_rt6352(struct rt2x00_dev *rt2x00dev, struct ieee80211_channel *chan, int power_level) { u32 reg, pwreg; u16 eeprom; u32 data, gdata; u8 t, i; enum nl80211_band band = chan->band; int delta; /* Warn user if bw_comp is set in EEPROM */ delta = rt2800_get_txpower_bw_comp(rt2x00dev, band); if (delta) rt2x00_warn(rt2x00dev, "ignoring EEPROM HT40 power delta: %d\n", delta); /* populate TX_PWR_CFG_0 up to TX_PWR_CFG_4 from EEPROM for HT20, limit * value to 0x3f and replace 0x20 by 0x21 as this is what the vendor * driver does as well, though it looks kinda wrong. * Maybe some misunderstanding of what a signed 8-bit value is? Maybe * the hardware has a problem handling 0x20, and as the code initially * used a fixed offset between HT20 and HT40 rates they had to work- * around that issue and most likely just forgot about it later on. * Maybe we should use rt2800_get_txpower_bw_comp() here as well, * however, the corresponding EEPROM value is not respected by the * vendor driver, so maybe this is rather being taken care of the * TXALC and the driver doesn't need to handle it...? * Though this is all very awkward, just do as they did, as that's what * board vendors expected when they populated the EEPROM... */ for (i = 0; i < 5; i++) { eeprom = rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE, i * 2); data = eeprom; t = eeprom & 0x3f; if (t == 32) t++; gdata = t; t = (eeprom & 0x3f00) >> 8; if (t == 32) t++; gdata |= (t << 8); eeprom = rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE, (i * 2) + 1); t = eeprom & 0x3f; if (t == 32) t++; gdata |= (t << 16); t = (eeprom & 0x3f00) >> 8; if (t == 32) t++; gdata |= (t << 24); data |= (eeprom << 16); if (!test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags)) { /* HT20 */ if (data != 0xffffffff) rt2800_register_write(rt2x00dev, TX_PWR_CFG_0 + (i * 4), data); } else { /* HT40 */ if (gdata != 0xffffffff) rt2800_register_write(rt2x00dev, TX_PWR_CFG_0 + (i * 4), gdata); } } /* Aparently Ralink ran out of space in the BYRATE calibration section * of the EERPOM which is copied to the corresponding TX_PWR_CFG_x * registers. As recent 2T chips use 8-bit instead of 4-bit values for * power-offsets more space would be needed. Ralink decided to keep the * EEPROM layout untouched and rather have some shared values covering * multiple bitrates. * Populate the registers not covered by the EEPROM in the same way the * vendor driver does. */ /* For OFDM 54MBS use value from OFDM 48MBS */ pwreg = 0; reg = rt2800_register_read(rt2x00dev, TX_PWR_CFG_1); t = rt2x00_get_field32(reg, TX_PWR_CFG_1B_48MBS); rt2x00_set_field32(&pwreg, TX_PWR_CFG_7B_54MBS, t); /* For MCS 7 use value from MCS 6 */ reg = rt2800_register_read(rt2x00dev, TX_PWR_CFG_2); t = rt2x00_get_field32(reg, TX_PWR_CFG_2B_MCS6_MCS7); rt2x00_set_field32(&pwreg, TX_PWR_CFG_7B_MCS7, t); rt2800_register_write(rt2x00dev, TX_PWR_CFG_7, pwreg); /* For MCS 15 use value from MCS 14 */ pwreg = 0; reg = rt2800_register_read(rt2x00dev, TX_PWR_CFG_3); t = rt2x00_get_field32(reg, TX_PWR_CFG_3B_MCS14); rt2x00_set_field32(&pwreg, TX_PWR_CFG_8B_MCS15, t); rt2800_register_write(rt2x00dev, TX_PWR_CFG_8, pwreg); /* For STBC MCS 7 use value from STBC MCS 6 */ pwreg = 0; reg = rt2800_register_read(rt2x00dev, TX_PWR_CFG_4); t = rt2x00_get_field32(reg, TX_PWR_CFG_4B_STBC_MCS6); rt2x00_set_field32(&pwreg, TX_PWR_CFG_9B_STBC_MCS7, t); rt2800_register_write(rt2x00dev, TX_PWR_CFG_9, pwreg); rt2800_config_alc_rt6352(rt2x00dev, chan, power_level); /* TODO: temperature compensation code! */ } /* * We configure transmit power using MAC TX_PWR_CFG_{0,...,N} registers and * BBP R1 register. TX_PWR_CFG_X allow to configure per rate TX power values, * 4 bits for each rate (tune from 0 to 15 dBm). BBP_R1 controls transmit power * for all rates, but allow to set only 4 discrete values: -12, -6, 0 and 6 dBm. * Reference per rate transmit power values are located in the EEPROM at * EEPROM_TXPOWER_BYRATE offset. We adjust them and BBP R1 settings according to * current conditions (i.e. band, bandwidth, temperature, user settings). */ static void rt2800_config_txpower_rt28xx(struct rt2x00_dev *rt2x00dev, struct ieee80211_channel *chan, int power_level) { u8 txpower, r1; u16 eeprom; u32 reg, offset; int i, is_rate_b, delta, power_ctrl; enum nl80211_band band = chan->band; /* * Calculate HT40 compensation. For 40MHz we need to add or subtract * value read from EEPROM (different for 2GHz and for 5GHz). */ delta = rt2800_get_txpower_bw_comp(rt2x00dev, band); /* * Calculate temperature compensation. Depends on measurement of current * TSSI (Transmitter Signal Strength Indication) we know TX power (due * to temperature or maybe other factors) is smaller or bigger than * expected. We adjust it, based on TSSI reference and boundaries values * provided in EEPROM. */ switch (rt2x00dev->chip.rt) { case RT2860: case RT2872: case RT2883: case RT3070: case RT3071: case RT3090: case RT3572: delta += rt2800_get_gain_calibration_delta(rt2x00dev); break; default: /* TODO: temperature compensation code for other chips. */ break; } /* * Decrease power according to user settings, on devices with unknown * maximum tx power. For other devices we take user power_level into * consideration on rt2800_compensate_txpower(). */ delta += rt2800_get_txpower_reg_delta(rt2x00dev, power_level, chan->max_power); /* * BBP_R1 controls TX power for all rates, it allow to set the following * gains -12, -6, 0, +6 dBm by setting values 2, 1, 0, 3 respectively. * * TODO: we do not use +6 dBm option to do not increase power beyond * regulatory limit, however this could be utilized for devices with * CAPABILITY_POWER_LIMIT. */ if (delta <= -12) { power_ctrl = 2; delta += 12; } else if (delta <= -6) { power_ctrl = 1; delta += 6; } else { power_ctrl = 0; } r1 = rt2800_bbp_read(rt2x00dev, 1); rt2x00_set_field8(&r1, BBP1_TX_POWER_CTRL, power_ctrl); rt2800_bbp_write(rt2x00dev, 1, r1); offset = TX_PWR_CFG_0; for (i = 0; i < EEPROM_TXPOWER_BYRATE_SIZE; i += 2) { /* just to be safe */ if (offset > TX_PWR_CFG_4) break; reg = rt2800_register_read(rt2x00dev, offset); /* read the next four txpower values */ eeprom = rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE, i); is_rate_b = i ? 0 : 1; /* * TX_PWR_CFG_0: 1MBS, TX_PWR_CFG_1: 24MBS, * TX_PWR_CFG_2: MCS4, TX_PWR_CFG_3: MCS12, * TX_PWR_CFG_4: unknown */ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0); txpower = rt2800_compensate_txpower(rt2x00dev, is_rate_b, band, power_level, txpower, delta); rt2x00_set_field32(&reg, TX_PWR_CFG_RATE0, txpower); /* * TX_PWR_CFG_0: 2MBS, TX_PWR_CFG_1: 36MBS, * TX_PWR_CFG_2: MCS5, TX_PWR_CFG_3: MCS13, * TX_PWR_CFG_4: unknown */ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE1); txpower = rt2800_compensate_txpower(rt2x00dev, is_rate_b, band, power_level, txpower, delta); rt2x00_set_field32(&reg, TX_PWR_CFG_RATE1, txpower); /* * TX_PWR_CFG_0: 5.5MBS, TX_PWR_CFG_1: 48MBS, * TX_PWR_CFG_2: MCS6, TX_PWR_CFG_3: MCS14, * TX_PWR_CFG_4: unknown */ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE2); txpower = rt2800_compensate_txpower(rt2x00dev, is_rate_b, band, power_level, txpower, delta); rt2x00_set_field32(&reg, TX_PWR_CFG_RATE2, txpower); /* * TX_PWR_CFG_0: 11MBS, TX_PWR_CFG_1: 54MBS, * TX_PWR_CFG_2: MCS7, TX_PWR_CFG_3: MCS15, * TX_PWR_CFG_4: unknown */ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE3); txpower = rt2800_compensate_txpower(rt2x00dev, is_rate_b, band, power_level, txpower, delta); rt2x00_set_field32(&reg, TX_PWR_CFG_RATE3, txpower); /* read the next four txpower values */ eeprom = rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_TXPOWER_BYRATE, i + 1); is_rate_b = 0; /* * TX_PWR_CFG_0: 6MBS, TX_PWR_CFG_1: MCS0, * TX_PWR_CFG_2: MCS8, TX_PWR_CFG_3: unknown, * TX_PWR_CFG_4: unknown */ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE0); txpower = rt2800_compensate_txpower(rt2x00dev, is_rate_b, band, power_level, txpower, delta); rt2x00_set_field32(&reg, TX_PWR_CFG_RATE4, txpower); /* * TX_PWR_CFG_0: 9MBS, TX_PWR_CFG_1: MCS1, * TX_PWR_CFG_2: MCS9, TX_PWR_CFG_3: unknown, * TX_PWR_CFG_4: unknown */ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE1); txpower = rt2800_compensate_txpower(rt2x00dev, is_rate_b, band, power_level, txpower, delta); rt2x00_set_field32(&reg, TX_PWR_CFG_RATE5, txpower); /* * TX_PWR_CFG_0: 12MBS, TX_PWR_CFG_1: MCS2, * TX_PWR_CFG_2: MCS10, TX_PWR_CFG_3: unknown, * TX_PWR_CFG_4: unknown */ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE2); txpower = rt2800_compensate_txpower(rt2x00dev, is_rate_b, band, power_level, txpower, delta); rt2x00_set_field32(&reg, TX_PWR_CFG_RATE6, txpower); /* * TX_PWR_CFG_0: 18MBS, TX_PWR_CFG_1: MCS3, * TX_PWR_CFG_2: MCS11, TX_PWR_CFG_3: unknown, * TX_PWR_CFG_4: unknown */ txpower = rt2x00_get_field16(eeprom, EEPROM_TXPOWER_BYRATE_RATE3); txpower = rt2800_compensate_txpower(rt2x00dev, is_rate_b, band, power_level, txpower, delta); rt2x00_set_field32(&reg, TX_PWR_CFG_RATE7, txpower); rt2800_register_write(rt2x00dev, offset, reg); /* next TX_PWR_CFG register */ offset += 4; } } static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev, struct ieee80211_channel *chan, int power_level) { if (rt2x00_rt(rt2x00dev, RT3593) || rt2x00_rt(rt2x00dev, RT3883)) rt2800_config_txpower_rt3593(rt2x00dev, chan, power_level); else if (rt2x00_rt(rt2x00dev, RT6352)) rt2800_config_txpower_rt6352(rt2x00dev, chan, power_level); else rt2800_config_txpower_rt28xx(rt2x00dev, chan, power_level); } void rt2800_gain_calibration(struct rt2x00_dev *rt2x00dev) { rt2800_config_txpower(rt2x00dev, rt2x00dev->hw->conf.chandef.chan, rt2x00dev->tx_power); } EXPORT_SYMBOL_GPL(rt2800_gain_calibration); void rt2800_vco_calibration(struct rt2x00_dev *rt2x00dev) { u32 tx_pin; u8 rfcsr; unsigned long min_sleep = 0; /* * A voltage-controlled oscillator(VCO) is an electronic oscillator * designed to be controlled in oscillation frequency by a voltage * input. Maybe the temperature will affect the frequency of * oscillation to be shifted. The VCO calibration will be called * periodically to adjust the frequency to be precision. */ tx_pin = rt2800_register_read(rt2x00dev, TX_PIN_CFG); tx_pin &= TX_PIN_CFG_PA_PE_DISABLE; rt2800_register_write(rt2x00dev, TX_PIN_CFG, tx_pin); switch (rt2x00dev->chip.rf) { case RF2020: case RF3020: case RF3021: case RF3022: case RF3320: case RF3052: rfcsr = rt2800_rfcsr_read(rt2x00dev, 7); rt2x00_set_field8(&rfcsr, RFCSR7_RF_TUNING, 1); rt2800_rfcsr_write(rt2x00dev, 7, rfcsr); break; case RF3053: case RF3070: case RF3290: case RF3853: case RF5350: case RF5360: case RF5362: case RF5370: case RF5372: case RF5390: case RF5392: case RF5592: rfcsr = rt2800_rfcsr_read(rt2x00dev, 3); rt2x00_set_field8(&rfcsr, RFCSR3_VCOCAL_EN, 1); rt2800_rfcsr_write(rt2x00dev, 3, rfcsr); min_sleep = 1000; break; case RF7620: rt2800_rfcsr_write(rt2x00dev, 5, 0x40); rt2800_rfcsr_write(rt2x00dev, 4, 0x0C); rfcsr = rt2800_rfcsr_read(rt2x00dev, 4); rt2x00_set_field8(&rfcsr, RFCSR4_VCOCAL_EN, 1); rt2800_rfcsr_write(rt2x00dev, 4, rfcsr); min_sleep = 2000; break; default: WARN_ONCE(1, "Not supported RF chipset %x for VCO recalibration", rt2x00dev->chip.rf); return; } if (min_sleep > 0) usleep_range(min_sleep, min_sleep * 2); tx_pin = rt2800_register_read(rt2x00dev, TX_PIN_CFG); if (rt2x00dev->rf_channel <= 14) { switch (rt2x00dev->default_ant.tx_chain_num) { case 3: rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G2_EN, 1); fallthrough; case 2: rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G1_EN, 1); fallthrough; case 1: default: rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G0_EN, 1); break; } } else { switch (rt2x00dev->default_ant.tx_chain_num) { case 3: rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A2_EN, 1); fallthrough; case 2: rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A1_EN, 1); fallthrough; case 1: default: rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A0_EN, 1); break; } } rt2800_register_write(rt2x00dev, TX_PIN_CFG, tx_pin); if (rt2x00_rt(rt2x00dev, RT6352)) { if (rt2x00dev->default_ant.rx_chain_num == 1) { rt2800_bbp_write(rt2x00dev, 91, 0x07); rt2800_bbp_write(rt2x00dev, 95, 0x1A); rt2800_bbp_write(rt2x00dev, 195, 128); rt2800_bbp_write(rt2x00dev, 196, 0xA0); rt2800_bbp_write(rt2x00dev, 195, 170); rt2800_bbp_write(rt2x00dev, 196, 0x12); rt2800_bbp_write(rt2x00dev, 195, 171); rt2800_bbp_write(rt2x00dev, 196, 0x10); } else { rt2800_bbp_write(rt2x00dev, 91, 0x06); rt2800_bbp_write(rt2x00dev, 95, 0x9A); rt2800_bbp_write(rt2x00dev, 195, 128); rt2800_bbp_write(rt2x00dev, 196, 0xE0); rt2800_bbp_write(rt2x00dev, 195, 170); rt2800_bbp_write(rt2x00dev, 196, 0x30); rt2800_bbp_write(rt2x00dev, 195, 171); rt2800_bbp_write(rt2x00dev, 196, 0x30); } if (rt2x00_has_cap_external_lna_bg(rt2x00dev)) { rt2800_bbp_write(rt2x00dev, 75, 0x68); rt2800_bbp_write(rt2x00dev, 76, 0x4C); rt2800_bbp_write(rt2x00dev, 79, 0x1C); rt2800_bbp_write(rt2x00dev, 80, 0x0C); rt2800_bbp_write(rt2x00dev, 82, 0xB6); } /* On 11A, We should delay and wait RF/BBP to be stable * and the appropriate time should be 1000 micro seconds * 2005/06/05 - On 11G, we also need this delay time. * Otherwise it's difficult to pass the WHQL. */ usleep_range(1000, 1500); } } EXPORT_SYMBOL_GPL(rt2800_vco_calibration); static void rt2800_config_retry_limit(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_conf *libconf) { u32 reg; reg = rt2800_register_read(rt2x00dev, TX_RTY_CFG); rt2x00_set_field32(&reg, TX_RTY_CFG_SHORT_RTY_LIMIT, libconf->conf->short_frame_max_tx_count); rt2x00_set_field32(&reg, TX_RTY_CFG_LONG_RTY_LIMIT, libconf->conf->long_frame_max_tx_count); rt2800_register_write(rt2x00dev, TX_RTY_CFG, reg); } static void rt2800_config_ps(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_conf *libconf) { enum dev_state state = (libconf->conf->flags & IEEE80211_CONF_PS) ? STATE_SLEEP : STATE_AWAKE; u32 reg; if (state == STATE_SLEEP) { rt2800_register_write(rt2x00dev, AUTOWAKEUP_CFG, 0); reg = rt2800_register_read(rt2x00dev, AUTOWAKEUP_CFG); rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_AUTO_LEAD_TIME, 5); rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_TBCN_BEFORE_WAKE, libconf->conf->listen_interval - 1); rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_AUTOWAKE, 1); rt2800_register_write(rt2x00dev, AUTOWAKEUP_CFG, reg); rt2x00dev->ops->lib->set_device_state(rt2x00dev, state); } else { reg = rt2800_register_read(rt2x00dev, AUTOWAKEUP_CFG); rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_AUTO_LEAD_TIME, 0); rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_TBCN_BEFORE_WAKE, 0); rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_AUTOWAKE, 0); rt2800_register_write(rt2x00dev, AUTOWAKEUP_CFG, reg); rt2x00dev->ops->lib->set_device_state(rt2x00dev, state); } } void rt2800_config(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_conf *libconf, const unsigned int flags) { /* Always recalculate LNA gain before changing configuration */ rt2800_config_lna_gain(rt2x00dev, libconf); if (flags & IEEE80211_CONF_CHANGE_CHANNEL) { /* * To provide correct survey data for survey-based ACS algorithm * we have to save survey data for current channel before switching. */ rt2800_update_survey(rt2x00dev); rt2800_config_channel(rt2x00dev, libconf->conf, &libconf->rf, &libconf->channel); rt2800_config_txpower(rt2x00dev, libconf->conf->chandef.chan, libconf->conf->power_level); } if (flags & IEEE80211_CONF_CHANGE_POWER) rt2800_config_txpower(rt2x00dev, libconf->conf->chandef.chan, libconf->conf->power_level); if (flags & IEEE80211_CONF_CHANGE_RETRY_LIMITS) rt2800_config_retry_limit(rt2x00dev, libconf); if (flags & IEEE80211_CONF_CHANGE_PS) rt2800_config_ps(rt2x00dev, libconf); } EXPORT_SYMBOL_GPL(rt2800_config); /* * Link tuning */ void rt2800_link_stats(struct rt2x00_dev *rt2x00dev, struct link_qual *qual) { u32 reg; /* * Update FCS error count from register. */ reg = rt2800_register_read(rt2x00dev, RX_STA_CNT0); qual->rx_failed = rt2x00_get_field32(reg, RX_STA_CNT0_CRC_ERR); } EXPORT_SYMBOL_GPL(rt2800_link_stats); static u8 rt2800_get_default_vgc(struct rt2x00_dev *rt2x00dev) { u8 vgc; if (rt2x00dev->curr_band == NL80211_BAND_2GHZ) { if (rt2x00_rt(rt2x00dev, RT3070) || rt2x00_rt(rt2x00dev, RT3071) || rt2x00_rt(rt2x00dev, RT3090) || rt2x00_rt(rt2x00dev, RT3290) || rt2x00_rt(rt2x00dev, RT3390) || rt2x00_rt(rt2x00dev, RT3572) || rt2x00_rt(rt2x00dev, RT3593) || rt2x00_rt(rt2x00dev, RT5390) || rt2x00_rt(rt2x00dev, RT5392) || rt2x00_rt(rt2x00dev, RT5592) || rt2x00_rt(rt2x00dev, RT6352)) vgc = 0x1c + (2 * rt2x00dev->lna_gain); else vgc = 0x2e + rt2x00dev->lna_gain; } else { /* 5GHZ band */ if (rt2x00_rt(rt2x00dev, RT3593) || rt2x00_rt(rt2x00dev, RT3883)) vgc = 0x20 + (rt2x00dev->lna_gain * 5) / 3; else if (rt2x00_rt(rt2x00dev, RT5592)) vgc = 0x24 + (2 * rt2x00dev->lna_gain); else { if (!test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags)) vgc = 0x32 + (rt2x00dev->lna_gain * 5) / 3; else vgc = 0x3a + (rt2x00dev->lna_gain * 5) / 3; } } return vgc; } static inline void rt2800_set_vgc(struct rt2x00_dev *rt2x00dev, struct link_qual *qual, u8 vgc_level) { if (qual->vgc_level != vgc_level) { if (rt2x00_rt(rt2x00dev, RT3572) || rt2x00_rt(rt2x00dev, RT3593) || rt2x00_rt(rt2x00dev, RT3883) || rt2x00_rt(rt2x00dev, RT6352)) { rt2800_bbp_write_with_rx_chain(rt2x00dev, 66, vgc_level); } else if (rt2x00_rt(rt2x00dev, RT5592)) { rt2800_bbp_write(rt2x00dev, 83, qual->rssi > -65 ? 0x4a : 0x7a); rt2800_bbp_write_with_rx_chain(rt2x00dev, 66, vgc_level); } else { rt2800_bbp_write(rt2x00dev, 66, vgc_level); } qual->vgc_level = vgc_level; qual->vgc_level_reg = vgc_level; } } void rt2800_reset_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual) { rt2800_set_vgc(rt2x00dev, qual, rt2800_get_default_vgc(rt2x00dev)); } EXPORT_SYMBOL_GPL(rt2800_reset_tuner); void rt2800_link_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual, const u32 count) { u8 vgc; if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860C)) return; /* When RSSI is better than a certain threshold, increase VGC * with a chip specific value in order to improve the balance * between sensibility and noise isolation. */ vgc = rt2800_get_default_vgc(rt2x00dev); switch (rt2x00dev->chip.rt) { case RT3572: case RT3593: if (qual->rssi > -65) { if (rt2x00dev->curr_band == NL80211_BAND_2GHZ) vgc += 0x20; else vgc += 0x10; } break; case RT3883: if (qual->rssi > -65) vgc += 0x10; break; case RT5592: if (qual->rssi > -65) vgc += 0x20; break; default: if (qual->rssi > -80) vgc += 0x10; break; } rt2800_set_vgc(rt2x00dev, qual, vgc); } EXPORT_SYMBOL_GPL(rt2800_link_tuner); /* * Initialization functions. */ static int rt2800_init_registers(struct rt2x00_dev *rt2x00dev) { struct rt2800_drv_data *drv_data = rt2x00dev->drv_data; u32 reg; u16 eeprom; unsigned int i; int ret; rt2800_disable_wpdma(rt2x00dev); ret = rt2800_drv_init_registers(rt2x00dev); if (ret) return ret; rt2800_register_write(rt2x00dev, LEGACY_BASIC_RATE, 0x0000013f); rt2800_register_write(rt2x00dev, HT_BASIC_RATE, 0x00008003); rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000); reg = rt2800_register_read(rt2x00dev, BCN_TIME_CFG); rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL, 1600); rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 0); rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_SYNC, 0); rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 0); rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0); rt2x00_set_field32(&reg, BCN_TIME_CFG_TX_TIME_COMPENSATE, 0); rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg); rt2800_config_filter(rt2x00dev, FIF_ALLMULTI); reg = rt2800_register_read(rt2x00dev, BKOFF_SLOT_CFG); rt2x00_set_field32(&reg, BKOFF_SLOT_CFG_SLOT_TIME, 9); rt2x00_set_field32(&reg, BKOFF_SLOT_CFG_CC_DELAY_TIME, 2); rt2800_register_write(rt2x00dev, BKOFF_SLOT_CFG, reg); if (rt2x00_rt(rt2x00dev, RT3290)) { reg = rt2800_register_read(rt2x00dev, WLAN_FUN_CTRL); if (rt2x00_get_field32(reg, WLAN_EN) == 1) { rt2x00_set_field32(&reg, PCIE_APP0_CLK_REQ, 1); rt2800_register_write(rt2x00dev, WLAN_FUN_CTRL, reg); } reg = rt2800_register_read(rt2x00dev, CMB_CTRL); if (!(rt2x00_get_field32(reg, LDO0_EN) == 1)) { rt2x00_set_field32(&reg, LDO0_EN, 1); rt2x00_set_field32(&reg, LDO_BGSEL, 3); rt2800_register_write(rt2x00dev, CMB_CTRL, reg); } reg = rt2800_register_read(rt2x00dev, OSC_CTRL); rt2x00_set_field32(&reg, OSC_ROSC_EN, 1); rt2x00_set_field32(&reg, OSC_CAL_REQ, 1); rt2x00_set_field32(&reg, OSC_REF_CYCLE, 0x27); rt2800_register_write(rt2x00dev, OSC_CTRL, reg); reg = rt2800_register_read(rt2x00dev, COEX_CFG0); rt2x00_set_field32(&reg, COEX_CFG_ANT, 0x5e); rt2800_register_write(rt2x00dev, COEX_CFG0, reg); reg = rt2800_register_read(rt2x00dev, COEX_CFG2); rt2x00_set_field32(&reg, BT_COEX_CFG1, 0x00); rt2x00_set_field32(&reg, BT_COEX_CFG0, 0x17); rt2x00_set_field32(&reg, WL_COEX_CFG1, 0x93); rt2x00_set_field32(&reg, WL_COEX_CFG0, 0x7f); rt2800_register_write(rt2x00dev, COEX_CFG2, reg); reg = rt2800_register_read(rt2x00dev, PLL_CTRL); rt2x00_set_field32(&reg, PLL_CONTROL, 1); rt2800_register_write(rt2x00dev, PLL_CTRL, reg); } if (rt2x00_rt(rt2x00dev, RT3071) || rt2x00_rt(rt2x00dev, RT3090) || rt2x00_rt(rt2x00dev, RT3290) || rt2x00_rt(rt2x00dev, RT3390)) { if (rt2x00_rt(rt2x00dev, RT3290)) rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000404); else rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400); rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000); if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) || rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) || rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E)) { eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1); if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_DAC_TEST)) rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x0000002c); else rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x0000000f); } else { rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000); } } else if (rt2x00_rt(rt2x00dev, RT3070)) { rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400); if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F)) { rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000); rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x0000002c); } else { rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606); rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000); } } else if (rt2800_is_305x_soc(rt2x00dev)) { rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400); rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000); rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000030); } else if (rt2x00_rt(rt2x00dev, RT3352)) { rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000402); rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606); rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000); } else if (rt2x00_rt(rt2x00dev, RT3572)) { rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400); rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606); } else if (rt2x00_rt(rt2x00dev, RT3593)) { rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000402); rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000); if (rt2x00_rt_rev_lt(rt2x00dev, RT3593, REV_RT3593E)) { eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1); if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_DAC_TEST)) rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x0000001f); else rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x0000000f); } else { rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000); } } else if (rt2x00_rt(rt2x00dev, RT3883)) { rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000402); rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000); rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00040000); rt2800_register_write(rt2x00dev, TX_TXBF_CFG_0, 0x8000fc21); rt2800_register_write(rt2x00dev, TX_TXBF_CFG_3, 0x00009c40); } else if (rt2x00_rt(rt2x00dev, RT5390) || rt2x00_rt(rt2x00dev, RT5392)) { rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000404); rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606); rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000); } else if (rt2x00_rt(rt2x00dev, RT5592)) { rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000404); rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000); rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000); } else if (rt2x00_rt(rt2x00dev, RT5350)) { rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000404); } else if (rt2x00_rt(rt2x00dev, RT6352)) { rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000401); rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x000C0001); rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000); rt2800_register_write(rt2x00dev, TX_ALC_VGA3, 0x00000000); rt2800_register_write(rt2x00dev, TX0_BB_GAIN_ATTEN, 0x0); rt2800_register_write(rt2x00dev, TX1_BB_GAIN_ATTEN, 0x0); rt2800_register_write(rt2x00dev, TX0_RF_GAIN_ATTEN, 0x6C6C666C); rt2800_register_write(rt2x00dev, TX1_RF_GAIN_ATTEN, 0x6C6C666C); rt2800_register_write(rt2x00dev, TX0_RF_GAIN_CORRECT, 0x3630363A); rt2800_register_write(rt2x00dev, TX1_RF_GAIN_CORRECT, 0x3630363A); reg = rt2800_register_read(rt2x00dev, TX_ALC_CFG_1); rt2x00_set_field32(&reg, TX_ALC_CFG_1_ROS_BUSY_EN, 0); rt2800_register_write(rt2x00dev, TX_ALC_CFG_1, reg); } else { rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000000); rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606); } reg = rt2800_register_read(rt2x00dev, TX_LINK_CFG); rt2x00_set_field32(&reg, TX_LINK_CFG_REMOTE_MFB_LIFETIME, 32); rt2x00_set_field32(&reg, TX_LINK_CFG_MFB_ENABLE, 0); rt2x00_set_field32(&reg, TX_LINK_CFG_REMOTE_UMFS_ENABLE, 0); rt2x00_set_field32(&reg, TX_LINK_CFG_TX_MRQ_EN, 0); rt2x00_set_field32(&reg, TX_LINK_CFG_TX_RDG_EN, 0); rt2x00_set_field32(&reg, TX_LINK_CFG_TX_CF_ACK_EN, 1); rt2x00_set_field32(&reg, TX_LINK_CFG_REMOTE_MFB, 0); rt2x00_set_field32(&reg, TX_LINK_CFG_REMOTE_MFS, 0); rt2800_register_write(rt2x00dev, TX_LINK_CFG, reg); reg = rt2800_register_read(rt2x00dev, TX_TIMEOUT_CFG); rt2x00_set_field32(&reg, TX_TIMEOUT_CFG_MPDU_LIFETIME, 9); rt2x00_set_field32(&reg, TX_TIMEOUT_CFG_RX_ACK_TIMEOUT, 32); rt2x00_set_field32(&reg, TX_TIMEOUT_CFG_TX_OP_TIMEOUT, 10); rt2800_register_write(rt2x00dev, TX_TIMEOUT_CFG, reg); reg = rt2800_register_read(rt2x00dev, MAX_LEN_CFG); rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_MPDU, AGGREGATION_SIZE); if (rt2x00_is_usb(rt2x00dev)) { drv_data->max_psdu = 3; } else if (rt2x00_rt_rev_gte(rt2x00dev, RT2872, REV_RT2872E) || rt2x00_rt(rt2x00dev, RT2883) || rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070E)) { drv_data->max_psdu = 2; } else { drv_data->max_psdu = 1; } rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, drv_data->max_psdu); rt2x00_set_field32(&reg, MAX_LEN_CFG_MIN_PSDU, 10); rt2x00_set_field32(&reg, MAX_LEN_CFG_MIN_MPDU, 10); rt2800_register_write(rt2x00dev, MAX_LEN_CFG, reg); reg = rt2800_register_read(rt2x00dev, LED_CFG); rt2x00_set_field32(&reg, LED_CFG_ON_PERIOD, 70); rt2x00_set_field32(&reg, LED_CFG_OFF_PERIOD, 30); rt2x00_set_field32(&reg, LED_CFG_SLOW_BLINK_PERIOD, 3); rt2x00_set_field32(&reg, LED_CFG_R_LED_MODE, 3); rt2x00_set_field32(&reg, LED_CFG_G_LED_MODE, 3); rt2x00_set_field32(&reg, LED_CFG_Y_LED_MODE, 3); rt2x00_set_field32(&reg, LED_CFG_LED_POLAR, 1); rt2800_register_write(rt2x00dev, LED_CFG, reg); rt2800_register_write(rt2x00dev, PBF_MAX_PCNT, 0x1f3fbf9f); reg = rt2800_register_read(rt2x00dev, TX_RTY_CFG); rt2x00_set_field32(&reg, TX_RTY_CFG_SHORT_RTY_LIMIT, 2); rt2x00_set_field32(&reg, TX_RTY_CFG_LONG_RTY_LIMIT, 2); rt2x00_set_field32(&reg, TX_RTY_CFG_LONG_RTY_THRE, 2000); rt2x00_set_field32(&reg, TX_RTY_CFG_NON_AGG_RTY_MODE, 0); rt2x00_set_field32(&reg, TX_RTY_CFG_AGG_RTY_MODE, 0); rt2x00_set_field32(&reg, TX_RTY_CFG_TX_AUTO_FB_ENABLE, 1); rt2800_register_write(rt2x00dev, TX_RTY_CFG, reg); reg = rt2800_register_read(rt2x00dev, AUTO_RSP_CFG); rt2x00_set_field32(&reg, AUTO_RSP_CFG_AUTORESPONDER, 1); rt2x00_set_field32(&reg, AUTO_RSP_CFG_BAC_ACK_POLICY, 1); rt2x00_set_field32(&reg, AUTO_RSP_CFG_CTS_40_MMODE, 1); rt2x00_set_field32(&reg, AUTO_RSP_CFG_CTS_40_MREF, 0); rt2x00_set_field32(&reg, AUTO_RSP_CFG_AR_PREAMBLE, 0); rt2x00_set_field32(&reg, AUTO_RSP_CFG_DUAL_CTS_EN, 0); rt2x00_set_field32(&reg, AUTO_RSP_CFG_ACK_CTS_PSM_BIT, 0); rt2800_register_write(rt2x00dev, AUTO_RSP_CFG, reg); reg = rt2800_register_read(rt2x00dev, CCK_PROT_CFG); rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_RATE, 3); rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_CTRL, 0); rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_NAV_SHORT, 1); rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_CCK, 1); rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_OFDM, 1); rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_MM20, 1); rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_MM40, 0); rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_GF20, 1); rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_GF40, 0); rt2x00_set_field32(&reg, CCK_PROT_CFG_RTS_TH_EN, 1); rt2800_register_write(rt2x00dev, CCK_PROT_CFG, reg); reg = rt2800_register_read(rt2x00dev, OFDM_PROT_CFG); rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_RATE, 3); rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_CTRL, 0); rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_NAV_SHORT, 1); rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_CCK, 1); rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_OFDM, 1); rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_MM20, 1); rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_MM40, 0); rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_GF20, 1); rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_GF40, 0); rt2x00_set_field32(&reg, OFDM_PROT_CFG_RTS_TH_EN, 1); rt2800_register_write(rt2x00dev, OFDM_PROT_CFG, reg); reg = rt2800_register_read(rt2x00dev, MM20_PROT_CFG); rt2x00_set_field32(&reg, MM20_PROT_CFG_PROTECT_RATE, 0x4004); rt2x00_set_field32(&reg, MM20_PROT_CFG_PROTECT_CTRL, 1); rt2x00_set_field32(&reg, MM20_PROT_CFG_PROTECT_NAV_SHORT, 1); rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_CCK, 0); rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_OFDM, 1); rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_MM20, 1); rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_MM40, 0); rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_GF20, 1); rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_GF40, 0); rt2x00_set_field32(&reg, MM20_PROT_CFG_RTS_TH_EN, 0); rt2800_register_write(rt2x00dev, MM20_PROT_CFG, reg); reg = rt2800_register_read(rt2x00dev, MM40_PROT_CFG); rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_RATE, 0x4084); rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_CTRL, 1); rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_NAV_SHORT, 1); rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_CCK, 0); rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_OFDM, 1); rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_MM20, 1); rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_MM40, 1); rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_GF20, 1); rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_GF40, 1); rt2x00_set_field32(&reg, MM40_PROT_CFG_RTS_TH_EN, 0); rt2800_register_write(rt2x00dev, MM40_PROT_CFG, reg); reg = rt2800_register_read(rt2x00dev, GF20_PROT_CFG); rt2x00_set_field32(&reg, GF20_PROT_CFG_PROTECT_RATE, 0x4004); rt2x00_set_field32(&reg, GF20_PROT_CFG_PROTECT_CTRL, 1); rt2x00_set_field32(&reg, GF20_PROT_CFG_PROTECT_NAV_SHORT, 1); rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_CCK, 0); rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_OFDM, 1); rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_MM20, 1); rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_MM40, 0); rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_GF20, 1); rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_GF40, 0); rt2x00_set_field32(&reg, GF20_PROT_CFG_RTS_TH_EN, 0); rt2800_register_write(rt2x00dev, GF20_PROT_CFG, reg); reg = rt2800_register_read(rt2x00dev, GF40_PROT_CFG); rt2x00_set_field32(&reg, GF40_PROT_CFG_PROTECT_RATE, 0x4084); rt2x00_set_field32(&reg, GF40_PROT_CFG_PROTECT_CTRL, 1); rt2x00_set_field32(&reg, GF40_PROT_CFG_PROTECT_NAV_SHORT, 1); rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_CCK, 0); rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_OFDM, 1); rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_MM20, 1); rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_MM40, 1); rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_GF20, 1); rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_GF40, 1); rt2x00_set_field32(&reg, GF40_PROT_CFG_RTS_TH_EN, 0); rt2800_register_write(rt2x00dev, GF40_PROT_CFG, reg); if (rt2x00_is_usb(rt2x00dev)) { rt2800_register_write(rt2x00dev, PBF_CFG, 0xf40006); reg = rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG); rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0); rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_DMA_BUSY, 0); rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0); rt2x00_set_field32(&reg, WPDMA_GLO_CFG_RX_DMA_BUSY, 0); rt2x00_set_field32(&reg, WPDMA_GLO_CFG_WP_DMA_BURST_SIZE, 3); rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 0); rt2x00_set_field32(&reg, WPDMA_GLO_CFG_BIG_ENDIAN, 0); rt2x00_set_field32(&reg, WPDMA_GLO_CFG_RX_HDR_SCATTER, 0); rt2x00_set_field32(&reg, WPDMA_GLO_CFG_HDR_SEG_LEN, 0); rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg); } /* * The legacy driver also sets TXOP_CTRL_CFG_RESERVED_TRUN_EN to 1 * although it is reserved. */ reg = rt2800_register_read(rt2x00dev, TXOP_CTRL_CFG); rt2x00_set_field32(&reg, TXOP_CTRL_CFG_TIMEOUT_TRUN_EN, 1); rt2x00_set_field32(&reg, TXOP_CTRL_CFG_AC_TRUN_EN, 1); rt2x00_set_field32(&reg, TXOP_CTRL_CFG_TXRATEGRP_TRUN_EN, 1); rt2x00_set_field32(&reg, TXOP_CTRL_CFG_USER_MODE_TRUN_EN, 1); rt2x00_set_field32(&reg, TXOP_CTRL_CFG_MIMO_PS_TRUN_EN, 1); rt2x00_set_field32(&reg, TXOP_CTRL_CFG_RESERVED_TRUN_EN, 1); rt2x00_set_field32(&reg, TXOP_CTRL_CFG_LSIG_TXOP_EN, 0); rt2x00_set_field32(&reg, TXOP_CTRL_CFG_EXT_CCA_EN, 0); rt2x00_set_field32(&reg, TXOP_CTRL_CFG_EXT_CCA_DLY, 88); rt2x00_set_field32(&reg, TXOP_CTRL_CFG_EXT_CWMIN, 0); rt2800_register_write(rt2x00dev, TXOP_CTRL_CFG, reg); reg = rt2x00_rt(rt2x00dev, RT5592) ? 0x00000082 : 0x00000002; rt2800_register_write(rt2x00dev, TXOP_HLDR_ET, reg); if (rt2x00_rt(rt2x00dev, RT3883)) { rt2800_register_write(rt2x00dev, TX_FBK_CFG_3S_0, 0x12111008); rt2800_register_write(rt2x00dev, TX_FBK_CFG_3S_1, 0x16151413); } reg = rt2800_register_read(rt2x00dev, TX_RTS_CFG); rt2x00_set_field32(&reg, TX_RTS_CFG_AUTO_RTS_RETRY_LIMIT, 7); rt2x00_set_field32(&reg, TX_RTS_CFG_RTS_THRES, IEEE80211_MAX_RTS_THRESHOLD); rt2x00_set_field32(&reg, TX_RTS_CFG_RTS_FBK_EN, 1); rt2800_register_write(rt2x00dev, TX_RTS_CFG, reg); rt2800_register_write(rt2x00dev, EXP_ACK_TIME, 0x002400ca); /* * Usually the CCK SIFS time should be set to 10 and the OFDM SIFS * time should be set to 16. However, the original Ralink driver uses * 16 for both and indeed using a value of 10 for CCK SIFS results in * connection problems with 11g + CTS protection. Hence, use the same * defaults as the Ralink driver: 16 for both, CCK and OFDM SIFS. */ reg = rt2800_register_read(rt2x00dev, XIFS_TIME_CFG); rt2x00_set_field32(&reg, XIFS_TIME_CFG_CCKM_SIFS_TIME, 16); rt2x00_set_field32(&reg, XIFS_TIME_CFG_OFDM_SIFS_TIME, 16); rt2x00_set_field32(&reg, XIFS_TIME_CFG_OFDM_XIFS_TIME, 4); rt2x00_set_field32(&reg, XIFS_TIME_CFG_EIFS, 314); rt2x00_set_field32(&reg, XIFS_TIME_CFG_BB_RXEND_ENABLE, 1); rt2800_register_write(rt2x00dev, XIFS_TIME_CFG, reg); rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003); /* * ASIC will keep garbage value after boot, clear encryption keys. */ for (i = 0; i < 4; i++) rt2800_register_write(rt2x00dev, SHARED_KEY_MODE_ENTRY(i), 0); for (i = 0; i < 256; i++) { rt2800_config_wcid(rt2x00dev, NULL, i); rt2800_delete_wcid_attr(rt2x00dev, i); } /* * Clear encryption initialization vectors on start, but keep them * for watchdog reset. Otherwise we will have wrong IVs and not be * able to keep connections after reset. */ if (!test_bit(DEVICE_STATE_RESET, &rt2x00dev->flags)) for (i = 0; i < 256; i++) rt2800_register_write(rt2x00dev, MAC_IVEIV_ENTRY(i), 0); /* * Clear all beacons */ for (i = 0; i < 8; i++) rt2800_clear_beacon_register(rt2x00dev, i); if (rt2x00_is_usb(rt2x00dev)) { reg = rt2800_register_read(rt2x00dev, US_CYC_CNT); rt2x00_set_field32(&reg, US_CYC_CNT_CLOCK_CYCLE, 30); rt2800_register_write(rt2x00dev, US_CYC_CNT, reg); } else if (rt2x00_is_pcie(rt2x00dev)) { reg = rt2800_register_read(rt2x00dev, US_CYC_CNT); rt2x00_set_field32(&reg, US_CYC_CNT_CLOCK_CYCLE, 125); rt2800_register_write(rt2x00dev, US_CYC_CNT, reg); } else if (rt2x00_is_soc(rt2x00dev)) { struct clk *clk = clk_get_sys("bus", NULL); int rate; if (IS_ERR(clk)) { clk = clk_get_sys("cpu", NULL); if (IS_ERR(clk)) { rate = 125; } else { rate = clk_get_rate(clk) / 3000000; clk_put(clk); } } else { rate = clk_get_rate(clk) / 1000000; clk_put(clk); } reg = rt2800_register_read(rt2x00dev, US_CYC_CNT); rt2x00_set_field32(&reg, US_CYC_CNT_CLOCK_CYCLE, rate); rt2800_register_write(rt2x00dev, US_CYC_CNT, reg); } reg = rt2800_register_read(rt2x00dev, HT_FBK_CFG0); rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS0FBK, 0); rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS1FBK, 0); rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS2FBK, 1); rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS3FBK, 2); rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS4FBK, 3); rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS5FBK, 4); rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS6FBK, 5); rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS7FBK, 6); rt2800_register_write(rt2x00dev, HT_FBK_CFG0, reg); reg = rt2800_register_read(rt2x00dev, HT_FBK_CFG1); rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS8FBK, 8); rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS9FBK, 8); rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS10FBK, 9); rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS11FBK, 10); rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS12FBK, 11); rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS13FBK, 12); rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS14FBK, 13); rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS15FBK, 14); rt2800_register_write(rt2x00dev, HT_FBK_CFG1, reg); reg = rt2800_register_read(rt2x00dev, LG_FBK_CFG0); rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS0FBK, 8); rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS1FBK, 8); rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS2FBK, 9); rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS3FBK, 10); rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS4FBK, 11); rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS5FBK, 12); rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS6FBK, 13); rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS7FBK, 14); rt2800_register_write(rt2x00dev, LG_FBK_CFG0, reg); reg = rt2800_register_read(rt2x00dev, LG_FBK_CFG1); rt2x00_set_field32(&reg, LG_FBK_CFG0_CCKMCS0FBK, 0); rt2x00_set_field32(&reg, LG_FBK_CFG0_CCKMCS1FBK, 0); rt2x00_set_field32(&reg, LG_FBK_CFG0_CCKMCS2FBK, 1); rt2x00_set_field32(&reg, LG_FBK_CFG0_CCKMCS3FBK, 2); rt2800_register_write(rt2x00dev, LG_FBK_CFG1, reg); /* * Do not force the BA window size, we use the TXWI to set it */ reg = rt2800_register_read(rt2x00dev, AMPDU_BA_WINSIZE); rt2x00_set_field32(&reg, AMPDU_BA_WINSIZE_FORCE_WINSIZE_ENABLE, 0); rt2x00_set_field32(&reg, AMPDU_BA_WINSIZE_FORCE_WINSIZE, 0); rt2800_register_write(rt2x00dev, AMPDU_BA_WINSIZE, reg); /* * We must clear the error counters. * These registers are cleared on read, * so we may pass a useless variable to store the value. */ reg = rt2800_register_read(rt2x00dev, RX_STA_CNT0); reg = rt2800_register_read(rt2x00dev, RX_STA_CNT1); reg = rt2800_register_read(rt2x00dev, RX_STA_CNT2); reg = rt2800_register_read(rt2x00dev, TX_STA_CNT0); reg = rt2800_register_read(rt2x00dev, TX_STA_CNT1); reg = rt2800_register_read(rt2x00dev, TX_STA_CNT2); /* * Setup leadtime for pre tbtt interrupt to 6ms */ reg = rt2800_register_read(rt2x00dev, INT_TIMER_CFG); rt2x00_set_field32(&reg, INT_TIMER_CFG_PRE_TBTT_TIMER, 6 << 4); rt2800_register_write(rt2x00dev, INT_TIMER_CFG, reg); /* * Set up channel statistics timer */ reg = rt2800_register_read(rt2x00dev, CH_TIME_CFG); rt2x00_set_field32(&reg, CH_TIME_CFG_EIFS_BUSY, 1); rt2x00_set_field32(&reg, CH_TIME_CFG_NAV_BUSY, 1); rt2x00_set_field32(&reg, CH_TIME_CFG_RX_BUSY, 1); rt2x00_set_field32(&reg, CH_TIME_CFG_TX_BUSY, 1); rt2x00_set_field32(&reg, CH_TIME_CFG_TMR_EN, 1); rt2800_register_write(rt2x00dev, CH_TIME_CFG, reg); return 0; } static void rt2800_bbp4_mac_if_ctrl(struct rt2x00_dev *rt2x00dev) { u8 value; value = rt2800_bbp_read(rt2x00dev, 4); rt2x00_set_field8(&value, BBP4_MAC_IF_CTRL, 1); rt2800_bbp_write(rt2x00dev, 4, value); } static void rt2800_init_freq_calibration(struct rt2x00_dev *rt2x00dev) { rt2800_bbp_write(rt2x00dev, 142, 1); rt2800_bbp_write(rt2x00dev, 143, 57); } static void rt2800_init_bbp_5592_glrt(struct rt2x00_dev *rt2x00dev) { static const u8 glrt_table[] = { 0xE0, 0x1F, 0X38, 0x32, 0x08, 0x28, 0x19, 0x0A, 0xFF, 0x00, /* 128 ~ 137 */ 0x16, 0x10, 0x10, 0x0B, 0x36, 0x2C, 0x26, 0x24, 0x42, 0x36, /* 138 ~ 147 */ 0x30, 0x2D, 0x4C, 0x46, 0x3D, 0x40, 0x3E, 0x42, 0x3D, 0x40, /* 148 ~ 157 */ 0X3C, 0x34, 0x2C, 0x2F, 0x3C, 0x35, 0x2E, 0x2A, 0x49, 0x41, /* 158 ~ 167 */ 0x36, 0x31, 0x30, 0x30, 0x0E, 0x0D, 0x28, 0x21, 0x1C, 0x16, /* 168 ~ 177 */ 0x50, 0x4A, 0x43, 0x40, 0x10, 0x10, 0x10, 0x10, 0x00, 0x00, /* 178 ~ 187 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 188 ~ 197 */ 0x00, 0x00, 0x7D, 0x14, 0x32, 0x2C, 0x36, 0x4C, 0x43, 0x2C, /* 198 ~ 207 */ 0x2E, 0x36, 0x30, 0x6E, /* 208 ~ 211 */ }; int i; for (i = 0; i < ARRAY_SIZE(glrt_table); i++) { rt2800_bbp_write(rt2x00dev, 195, 128 + i); rt2800_bbp_write(rt2x00dev, 196, glrt_table[i]); } }; static void rt2800_init_bbp_early(struct rt2x00_dev *rt2x00dev) { rt2800_bbp_write(rt2x00dev, 65, 0x2C); rt2800_bbp_write(rt2x00dev, 66, 0x38); rt2800_bbp_write(rt2x00dev, 68, 0x0B); rt2800_bbp_write(rt2x00dev, 69, 0x12); rt2800_bbp_write(rt2x00dev, 70, 0x0a); rt2800_bbp_write(rt2x00dev, 73, 0x10); rt2800_bbp_write(rt2x00dev, 81, 0x37); rt2800_bbp_write(rt2x00dev, 82, 0x62); rt2800_bbp_write(rt2x00dev, 83, 0x6A); rt2800_bbp_write(rt2x00dev, 84, 0x99); rt2800_bbp_write(rt2x00dev, 86, 0x00); rt2800_bbp_write(rt2x00dev, 91, 0x04); rt2800_bbp_write(rt2x00dev, 92, 0x00); rt2800_bbp_write(rt2x00dev, 103, 0x00); rt2800_bbp_write(rt2x00dev, 105, 0x05); rt2800_bbp_write(rt2x00dev, 106, 0x35); } static void rt2800_disable_unused_dac_adc(struct rt2x00_dev *rt2x00dev) { u16 eeprom; u8 value; value = rt2800_bbp_read(rt2x00dev, 138); eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0); if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH) == 1) value |= 0x20; if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH) == 1) value &= ~0x02; rt2800_bbp_write(rt2x00dev, 138, value); } static void rt2800_init_bbp_305x_soc(struct rt2x00_dev *rt2x00dev) { rt2800_bbp_write(rt2x00dev, 31, 0x08); rt2800_bbp_write(rt2x00dev, 65, 0x2c); rt2800_bbp_write(rt2x00dev, 66, 0x38); rt2800_bbp_write(rt2x00dev, 69, 0x12); rt2800_bbp_write(rt2x00dev, 73, 0x10); rt2800_bbp_write(rt2x00dev, 70, 0x0a); rt2800_bbp_write(rt2x00dev, 78, 0x0e); rt2800_bbp_write(rt2x00dev, 80, 0x08); rt2800_bbp_write(rt2x00dev, 82, 0x62); rt2800_bbp_write(rt2x00dev, 83, 0x6a); rt2800_bbp_write(rt2x00dev, 84, 0x99); rt2800_bbp_write(rt2x00dev, 86, 0x00); rt2800_bbp_write(rt2x00dev, 91, 0x04); rt2800_bbp_write(rt2x00dev, 92, 0x00); rt2800_bbp_write(rt2x00dev, 103, 0xc0); rt2800_bbp_write(rt2x00dev, 105, 0x01); rt2800_bbp_write(rt2x00dev, 106, 0x35); } static void rt2800_init_bbp_28xx(struct rt2x00_dev *rt2x00dev) { rt2800_bbp_write(rt2x00dev, 65, 0x2c); rt2800_bbp_write(rt2x00dev, 66, 0x38); if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860C)) { rt2800_bbp_write(rt2x00dev, 69, 0x16); rt2800_bbp_write(rt2x00dev, 73, 0x12); } else { rt2800_bbp_write(rt2x00dev, 69, 0x12); rt2800_bbp_write(rt2x00dev, 73, 0x10); } rt2800_bbp_write(rt2x00dev, 70, 0x0a); rt2800_bbp_write(rt2x00dev, 81, 0x37); rt2800_bbp_write(rt2x00dev, 82, 0x62); rt2800_bbp_write(rt2x00dev, 83, 0x6a); if (rt2x00_rt_rev(rt2x00dev, RT2860, REV_RT2860D)) rt2800_bbp_write(rt2x00dev, 84, 0x19); else rt2800_bbp_write(rt2x00dev, 84, 0x99); rt2800_bbp_write(rt2x00dev, 86, 0x00); rt2800_bbp_write(rt2x00dev, 91, 0x04); rt2800_bbp_write(rt2x00dev, 92, 0x00); rt2800_bbp_write(rt2x00dev, 103, 0x00); rt2800_bbp_write(rt2x00dev, 105, 0x05); rt2800_bbp_write(rt2x00dev, 106, 0x35); } static void rt2800_init_bbp_30xx(struct rt2x00_dev *rt2x00dev) { rt2800_bbp_write(rt2x00dev, 65, 0x2c); rt2800_bbp_write(rt2x00dev, 66, 0x38); rt2800_bbp_write(rt2x00dev, 69, 0x12); rt2800_bbp_write(rt2x00dev, 73, 0x10); rt2800_bbp_write(rt2x00dev, 70, 0x0a); rt2800_bbp_write(rt2x00dev, 79, 0x13); rt2800_bbp_write(rt2x00dev, 80, 0x05); rt2800_bbp_write(rt2x00dev, 81, 0x33); rt2800_bbp_write(rt2x00dev, 82, 0x62); rt2800_bbp_write(rt2x00dev, 83, 0x6a); rt2800_bbp_write(rt2x00dev, 84, 0x99); rt2800_bbp_write(rt2x00dev, 86, 0x00); rt2800_bbp_write(rt2x00dev, 91, 0x04); rt2800_bbp_write(rt2x00dev, 92, 0x00); if (rt2x00_rt_rev_gte(rt2x00dev, RT3070, REV_RT3070F) || rt2x00_rt_rev_gte(rt2x00dev, RT3071, REV_RT3071E) || rt2x00_rt_rev_gte(rt2x00dev, RT3090, REV_RT3090E)) rt2800_bbp_write(rt2x00dev, 103, 0xc0); else rt2800_bbp_write(rt2x00dev, 103, 0x00); rt2800_bbp_write(rt2x00dev, 105, 0x05); rt2800_bbp_write(rt2x00dev, 106, 0x35); if (rt2x00_rt(rt2x00dev, RT3071) || rt2x00_rt(rt2x00dev, RT3090)) rt2800_disable_unused_dac_adc(rt2x00dev); } static void rt2800_init_bbp_3290(struct rt2x00_dev *rt2x00dev) { u8 value; rt2800_bbp4_mac_if_ctrl(rt2x00dev); rt2800_bbp_write(rt2x00dev, 31, 0x08); rt2800_bbp_write(rt2x00dev, 65, 0x2c); rt2800_bbp_write(rt2x00dev, 66, 0x38); rt2800_bbp_write(rt2x00dev, 68, 0x0b); rt2800_bbp_write(rt2x00dev, 69, 0x12); rt2800_bbp_write(rt2x00dev, 73, 0x13); rt2800_bbp_write(rt2x00dev, 75, 0x46); rt2800_bbp_write(rt2x00dev, 76, 0x28); rt2800_bbp_write(rt2x00dev, 77, 0x58); rt2800_bbp_write(rt2x00dev, 70, 0x0a); rt2800_bbp_write(rt2x00dev, 74, 0x0b); rt2800_bbp_write(rt2x00dev, 79, 0x18); rt2800_bbp_write(rt2x00dev, 80, 0x09); rt2800_bbp_write(rt2x00dev, 81, 0x33); rt2800_bbp_write(rt2x00dev, 82, 0x62); rt2800_bbp_write(rt2x00dev, 83, 0x7a); rt2800_bbp_write(rt2x00dev, 84, 0x9a); rt2800_bbp_write(rt2x00dev, 86, 0x38); rt2800_bbp_write(rt2x00dev, 91, 0x04); rt2800_bbp_write(rt2x00dev, 92, 0x02); rt2800_bbp_write(rt2x00dev, 103, 0xc0); rt2800_bbp_write(rt2x00dev, 104, 0x92); rt2800_bbp_write(rt2x00dev, 105, 0x1c); rt2800_bbp_write(rt2x00dev, 106, 0x03); rt2800_bbp_write(rt2x00dev, 128, 0x12); rt2800_bbp_write(rt2x00dev, 67, 0x24); rt2800_bbp_write(rt2x00dev, 143, 0x04); rt2800_bbp_write(rt2x00dev, 142, 0x99); rt2800_bbp_write(rt2x00dev, 150, 0x30); rt2800_bbp_write(rt2x00dev, 151, 0x2e); rt2800_bbp_write(rt2x00dev, 152, 0x20); rt2800_bbp_write(rt2x00dev, 153, 0x34); rt2800_bbp_write(rt2x00dev, 154, 0x40); rt2800_bbp_write(rt2x00dev, 155, 0x3b); rt2800_bbp_write(rt2x00dev, 253, 0x04); value = rt2800_bbp_read(rt2x00dev, 47); rt2x00_set_field8(&value, BBP47_TSSI_ADC6, 1); rt2800_bbp_write(rt2x00dev, 47, value); /* Use 5-bit ADC for Acquisition and 8-bit ADC for data */ value = rt2800_bbp_read(rt2x00dev, 3); rt2x00_set_field8(&value, BBP3_ADC_MODE_SWITCH, 1); rt2x00_set_field8(&value, BBP3_ADC_INIT_MODE, 1); rt2800_bbp_write(rt2x00dev, 3, value); } static void rt2800_init_bbp_3352(struct rt2x00_dev *rt2x00dev) { rt2800_bbp_write(rt2x00dev, 3, 0x00); rt2800_bbp_write(rt2x00dev, 4, 0x50); rt2800_bbp_write(rt2x00dev, 31, 0x08); rt2800_bbp_write(rt2x00dev, 47, 0x48); rt2800_bbp_write(rt2x00dev, 65, 0x2c); rt2800_bbp_write(rt2x00dev, 66, 0x38); rt2800_bbp_write(rt2x00dev, 68, 0x0b); rt2800_bbp_write(rt2x00dev, 69, 0x12); rt2800_bbp_write(rt2x00dev, 73, 0x13); rt2800_bbp_write(rt2x00dev, 75, 0x46); rt2800_bbp_write(rt2x00dev, 76, 0x28); rt2800_bbp_write(rt2x00dev, 77, 0x59); rt2800_bbp_write(rt2x00dev, 70, 0x0a); rt2800_bbp_write(rt2x00dev, 78, 0x0e); rt2800_bbp_write(rt2x00dev, 80, 0x08); rt2800_bbp_write(rt2x00dev, 81, 0x37); rt2800_bbp_write(rt2x00dev, 82, 0x62); if (rt2x00_rt(rt2x00dev, RT5350)) { rt2800_bbp_write(rt2x00dev, 83, 0x7a); rt2800_bbp_write(rt2x00dev, 84, 0x9a); } else { rt2800_bbp_write(rt2x00dev, 83, 0x6a); rt2800_bbp_write(rt2x00dev, 84, 0x99); } rt2800_bbp_write(rt2x00dev, 86, 0x38); rt2800_bbp_write(rt2x00dev, 88, 0x90); rt2800_bbp_write(rt2x00dev, 91, 0x04); rt2800_bbp_write(rt2x00dev, 92, 0x02); rt2800_bbp_write(rt2x00dev, 103, 0xc0); rt2800_bbp_write(rt2x00dev, 104, 0x92); if (rt2x00_rt(rt2x00dev, RT5350)) { rt2800_bbp_write(rt2x00dev, 105, 0x3c); rt2800_bbp_write(rt2x00dev, 106, 0x03); } else { rt2800_bbp_write(rt2x00dev, 105, 0x34); rt2800_bbp_write(rt2x00dev, 106, 0x05); } rt2800_bbp_write(rt2x00dev, 120, 0x50); rt2800_bbp_write(rt2x00dev, 137, 0x0f); rt2800_bbp_write(rt2x00dev, 163, 0xbd); /* Set ITxBF timeout to 0x9c40=1000msec */ rt2800_bbp_write(rt2x00dev, 179, 0x02); rt2800_bbp_write(rt2x00dev, 180, 0x00); rt2800_bbp_write(rt2x00dev, 182, 0x40); rt2800_bbp_write(rt2x00dev, 180, 0x01); rt2800_bbp_write(rt2x00dev, 182, 0x9c); rt2800_bbp_write(rt2x00dev, 179, 0x00); /* Reprogram the inband interface to put right values in RXWI */ rt2800_bbp_write(rt2x00dev, 142, 0x04); rt2800_bbp_write(rt2x00dev, 143, 0x3b); rt2800_bbp_write(rt2x00dev, 142, 0x06); rt2800_bbp_write(rt2x00dev, 143, 0xa0); rt2800_bbp_write(rt2x00dev, 142, 0x07); rt2800_bbp_write(rt2x00dev, 143, 0xa1); rt2800_bbp_write(rt2x00dev, 142, 0x08); rt2800_bbp_write(rt2x00dev, 143, 0xa2); rt2800_bbp_write(rt2x00dev, 148, 0xc8); if (rt2x00_rt(rt2x00dev, RT5350)) { /* Antenna Software OFDM */ rt2800_bbp_write(rt2x00dev, 150, 0x40); /* Antenna Software CCK */ rt2800_bbp_write(rt2x00dev, 151, 0x30); rt2800_bbp_write(rt2x00dev, 152, 0xa3); /* Clear previously selected antenna */ rt2800_bbp_write(rt2x00dev, 154, 0); } } static void rt2800_init_bbp_3390(struct rt2x00_dev *rt2x00dev) { rt2800_bbp_write(rt2x00dev, 65, 0x2c); rt2800_bbp_write(rt2x00dev, 66, 0x38); rt2800_bbp_write(rt2x00dev, 69, 0x12); rt2800_bbp_write(rt2x00dev, 73, 0x10); rt2800_bbp_write(rt2x00dev, 70, 0x0a); rt2800_bbp_write(rt2x00dev, 79, 0x13); rt2800_bbp_write(rt2x00dev, 80, 0x05); rt2800_bbp_write(rt2x00dev, 81, 0x33); rt2800_bbp_write(rt2x00dev, 82, 0x62); rt2800_bbp_write(rt2x00dev, 83, 0x6a); rt2800_bbp_write(rt2x00dev, 84, 0x99); rt2800_bbp_write(rt2x00dev, 86, 0x00); rt2800_bbp_write(rt2x00dev, 91, 0x04); rt2800_bbp_write(rt2x00dev, 92, 0x00); if (rt2x00_rt_rev_gte(rt2x00dev, RT3390, REV_RT3390E)) rt2800_bbp_write(rt2x00dev, 103, 0xc0); else rt2800_bbp_write(rt2x00dev, 103, 0x00); rt2800_bbp_write(rt2x00dev, 105, 0x05); rt2800_bbp_write(rt2x00dev, 106, 0x35); rt2800_disable_unused_dac_adc(rt2x00dev); } static void rt2800_init_bbp_3572(struct rt2x00_dev *rt2x00dev) { rt2800_bbp_write(rt2x00dev, 31, 0x08); rt2800_bbp_write(rt2x00dev, 65, 0x2c); rt2800_bbp_write(rt2x00dev, 66, 0x38); rt2800_bbp_write(rt2x00dev, 69, 0x12); rt2800_bbp_write(rt2x00dev, 73, 0x10); rt2800_bbp_write(rt2x00dev, 70, 0x0a); rt2800_bbp_write(rt2x00dev, 79, 0x13); rt2800_bbp_write(rt2x00dev, 80, 0x05); rt2800_bbp_write(rt2x00dev, 81, 0x33); rt2800_bbp_write(rt2x00dev, 82, 0x62); rt2800_bbp_write(rt2x00dev, 83, 0x6a); rt2800_bbp_write(rt2x00dev, 84, 0x99); rt2800_bbp_write(rt2x00dev, 86, 0x00); rt2800_bbp_write(rt2x00dev, 91, 0x04); rt2800_bbp_write(rt2x00dev, 92, 0x00); rt2800_bbp_write(rt2x00dev, 103, 0xc0); rt2800_bbp_write(rt2x00dev, 105, 0x05); rt2800_bbp_write(rt2x00dev, 106, 0x35); rt2800_disable_unused_dac_adc(rt2x00dev); } static void rt2800_init_bbp_3593(struct rt2x00_dev *rt2x00dev) { rt2800_init_bbp_early(rt2x00dev); rt2800_bbp_write(rt2x00dev, 79, 0x13); rt2800_bbp_write(rt2x00dev, 80, 0x05); rt2800_bbp_write(rt2x00dev, 81, 0x33); rt2800_bbp_write(rt2x00dev, 137, 0x0f); rt2800_bbp_write(rt2x00dev, 84, 0x19); /* Enable DC filter */ if (rt2x00_rt_rev_gte(rt2x00dev, RT3593, REV_RT3593E)) rt2800_bbp_write(rt2x00dev, 103, 0xc0); } static void rt2800_init_bbp_3883(struct rt2x00_dev *rt2x00dev) { rt2800_init_bbp_early(rt2x00dev); rt2800_bbp_write(rt2x00dev, 4, 0x50); rt2800_bbp_write(rt2x00dev, 47, 0x48); rt2800_bbp_write(rt2x00dev, 86, 0x46); rt2800_bbp_write(rt2x00dev, 88, 0x90); rt2800_bbp_write(rt2x00dev, 92, 0x02); rt2800_bbp_write(rt2x00dev, 103, 0xc0); rt2800_bbp_write(rt2x00dev, 104, 0x92); rt2800_bbp_write(rt2x00dev, 105, 0x34); rt2800_bbp_write(rt2x00dev, 106, 0x12); rt2800_bbp_write(rt2x00dev, 120, 0x50); rt2800_bbp_write(rt2x00dev, 137, 0x0f); rt2800_bbp_write(rt2x00dev, 163, 0x9d); /* Set ITxBF timeout to 0x9C40=1000msec */ rt2800_bbp_write(rt2x00dev, 179, 0x02); rt2800_bbp_write(rt2x00dev, 180, 0x00); rt2800_bbp_write(rt2x00dev, 182, 0x40); rt2800_bbp_write(rt2x00dev, 180, 0x01); rt2800_bbp_write(rt2x00dev, 182, 0x9c); rt2800_bbp_write(rt2x00dev, 179, 0x00); /* Reprogram the inband interface to put right values in RXWI */ rt2800_bbp_write(rt2x00dev, 142, 0x04); rt2800_bbp_write(rt2x00dev, 143, 0x3b); rt2800_bbp_write(rt2x00dev, 142, 0x06); rt2800_bbp_write(rt2x00dev, 143, 0xa0); rt2800_bbp_write(rt2x00dev, 142, 0x07); rt2800_bbp_write(rt2x00dev, 143, 0xa1); rt2800_bbp_write(rt2x00dev, 142, 0x08); rt2800_bbp_write(rt2x00dev, 143, 0xa2); rt2800_bbp_write(rt2x00dev, 148, 0xc8); } static void rt2800_init_bbp_53xx(struct rt2x00_dev *rt2x00dev) { int ant, div_mode; u16 eeprom; u8 value; rt2800_bbp4_mac_if_ctrl(rt2x00dev); rt2800_bbp_write(rt2x00dev, 31, 0x08); rt2800_bbp_write(rt2x00dev, 65, 0x2c); rt2800_bbp_write(rt2x00dev, 66, 0x38); rt2800_bbp_write(rt2x00dev, 68, 0x0b); rt2800_bbp_write(rt2x00dev, 69, 0x12); rt2800_bbp_write(rt2x00dev, 73, 0x13); rt2800_bbp_write(rt2x00dev, 75, 0x46); rt2800_bbp_write(rt2x00dev, 76, 0x28); rt2800_bbp_write(rt2x00dev, 77, 0x59); rt2800_bbp_write(rt2x00dev, 70, 0x0a); rt2800_bbp_write(rt2x00dev, 79, 0x13); rt2800_bbp_write(rt2x00dev, 80, 0x05); rt2800_bbp_write(rt2x00dev, 81, 0x33); rt2800_bbp_write(rt2x00dev, 82, 0x62); rt2800_bbp_write(rt2x00dev, 83, 0x7a); rt2800_bbp_write(rt2x00dev, 84, 0x9a); rt2800_bbp_write(rt2x00dev, 86, 0x38); if (rt2x00_rt(rt2x00dev, RT5392)) rt2800_bbp_write(rt2x00dev, 88, 0x90); rt2800_bbp_write(rt2x00dev, 91, 0x04); rt2800_bbp_write(rt2x00dev, 92, 0x02); if (rt2x00_rt(rt2x00dev, RT5392)) { rt2800_bbp_write(rt2x00dev, 95, 0x9a); rt2800_bbp_write(rt2x00dev, 98, 0x12); } rt2800_bbp_write(rt2x00dev, 103, 0xc0); rt2800_bbp_write(rt2x00dev, 104, 0x92); rt2800_bbp_write(rt2x00dev, 105, 0x3c); if (rt2x00_rt(rt2x00dev, RT5390)) rt2800_bbp_write(rt2x00dev, 106, 0x03); else if (rt2x00_rt(rt2x00dev, RT5392)) rt2800_bbp_write(rt2x00dev, 106, 0x12); else WARN_ON(1); rt2800_bbp_write(rt2x00dev, 128, 0x12); if (rt2x00_rt(rt2x00dev, RT5392)) { rt2800_bbp_write(rt2x00dev, 134, 0xd0); rt2800_bbp_write(rt2x00dev, 135, 0xf6); } rt2800_disable_unused_dac_adc(rt2x00dev); eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1); div_mode = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_ANT_DIVERSITY); ant = (div_mode == 3) ? 1 : 0; /* check if this is a Bluetooth combo card */ if (rt2x00_has_cap_bt_coexist(rt2x00dev)) { u32 reg; reg = rt2800_register_read(rt2x00dev, GPIO_CTRL); rt2x00_set_field32(&reg, GPIO_CTRL_DIR3, 0); rt2x00_set_field32(&reg, GPIO_CTRL_DIR6, 0); rt2x00_set_field32(&reg, GPIO_CTRL_VAL3, 0); rt2x00_set_field32(&reg, GPIO_CTRL_VAL6, 0); if (ant == 0) rt2x00_set_field32(&reg, GPIO_CTRL_VAL3, 1); else if (ant == 1) rt2x00_set_field32(&reg, GPIO_CTRL_VAL6, 1); rt2800_register_write(rt2x00dev, GPIO_CTRL, reg); } /* These chips have hardware RX antenna diversity */ if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390R) || rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5370G)) { rt2800_bbp_write(rt2x00dev, 150, 0); /* Disable Antenna Software OFDM */ rt2800_bbp_write(rt2x00dev, 151, 0); /* Disable Antenna Software CCK */ rt2800_bbp_write(rt2x00dev, 154, 0); /* Clear previously selected antenna */ } value = rt2800_bbp_read(rt2x00dev, 152); if (ant == 0) rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 1); else rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 0); rt2800_bbp_write(rt2x00dev, 152, value); rt2800_init_freq_calibration(rt2x00dev); } static void rt2800_init_bbp_5592(struct rt2x00_dev *rt2x00dev) { int ant, div_mode; u16 eeprom; u8 value; rt2800_init_bbp_early(rt2x00dev); value = rt2800_bbp_read(rt2x00dev, 105); rt2x00_set_field8(&value, BBP105_MLD, rt2x00dev->default_ant.rx_chain_num == 2); rt2800_bbp_write(rt2x00dev, 105, value); rt2800_bbp4_mac_if_ctrl(rt2x00dev); rt2800_bbp_write(rt2x00dev, 20, 0x06); rt2800_bbp_write(rt2x00dev, 31, 0x08); rt2800_bbp_write(rt2x00dev, 65, 0x2C); rt2800_bbp_write(rt2x00dev, 68, 0xDD); rt2800_bbp_write(rt2x00dev, 69, 0x1A); rt2800_bbp_write(rt2x00dev, 70, 0x05); rt2800_bbp_write(rt2x00dev, 73, 0x13); rt2800_bbp_write(rt2x00dev, 74, 0x0F); rt2800_bbp_write(rt2x00dev, 75, 0x4F); rt2800_bbp_write(rt2x00dev, 76, 0x28); rt2800_bbp_write(rt2x00dev, 77, 0x59); rt2800_bbp_write(rt2x00dev, 84, 0x9A); rt2800_bbp_write(rt2x00dev, 86, 0x38); rt2800_bbp_write(rt2x00dev, 88, 0x90); rt2800_bbp_write(rt2x00dev, 91, 0x04); rt2800_bbp_write(rt2x00dev, 92, 0x02); rt2800_bbp_write(rt2x00dev, 95, 0x9a); rt2800_bbp_write(rt2x00dev, 98, 0x12); rt2800_bbp_write(rt2x00dev, 103, 0xC0); rt2800_bbp_write(rt2x00dev, 104, 0x92); /* FIXME BBP105 owerwrite */ rt2800_bbp_write(rt2x00dev, 105, 0x3C); rt2800_bbp_write(rt2x00dev, 106, 0x35); rt2800_bbp_write(rt2x00dev, 128, 0x12); rt2800_bbp_write(rt2x00dev, 134, 0xD0); rt2800_bbp_write(rt2x00dev, 135, 0xF6); rt2800_bbp_write(rt2x00dev, 137, 0x0F); /* Initialize GLRT (Generalized Likehood Radio Test) */ rt2800_init_bbp_5592_glrt(rt2x00dev); rt2800_bbp4_mac_if_ctrl(rt2x00dev); eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1); div_mode = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_ANT_DIVERSITY); ant = (div_mode == 3) ? 1 : 0; value = rt2800_bbp_read(rt2x00dev, 152); if (ant == 0) { /* Main antenna */ rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 1); } else { /* Auxiliary antenna */ rt2x00_set_field8(&value, BBP152_RX_DEFAULT_ANT, 0); } rt2800_bbp_write(rt2x00dev, 152, value); if (rt2x00_rt_rev_gte(rt2x00dev, RT5592, REV_RT5592C)) { value = rt2800_bbp_read(rt2x00dev, 254); rt2x00_set_field8(&value, BBP254_BIT7, 1); rt2800_bbp_write(rt2x00dev, 254, value); } rt2800_init_freq_calibration(rt2x00dev); rt2800_bbp_write(rt2x00dev, 84, 0x19); if (rt2x00_rt_rev_gte(rt2x00dev, RT5592, REV_RT5592C)) rt2800_bbp_write(rt2x00dev, 103, 0xc0); } static void rt2800_init_bbp_6352(struct rt2x00_dev *rt2x00dev) { u8 bbp; /* Apply Maximum Likelihood Detection (MLD) for 2 stream case */ bbp = rt2800_bbp_read(rt2x00dev, 105); rt2x00_set_field8(&bbp, BBP105_MLD, rt2x00dev->default_ant.rx_chain_num == 2); rt2800_bbp_write(rt2x00dev, 105, bbp); /* Avoid data loss and CRC errors */ rt2800_bbp4_mac_if_ctrl(rt2x00dev); /* Fix I/Q swap issue */ bbp = rt2800_bbp_read(rt2x00dev, 1); bbp |= 0x04; rt2800_bbp_write(rt2x00dev, 1, bbp); /* BBP for G band */ rt2800_bbp_write(rt2x00dev, 3, 0x08); rt2800_bbp_write(rt2x00dev, 4, 0x00); /* rt2800_bbp4_mac_if_ctrl? */ rt2800_bbp_write(rt2x00dev, 6, 0x08); rt2800_bbp_write(rt2x00dev, 14, 0x09); rt2800_bbp_write(rt2x00dev, 15, 0xFF); rt2800_bbp_write(rt2x00dev, 16, 0x01); rt2800_bbp_write(rt2x00dev, 20, 0x06); rt2800_bbp_write(rt2x00dev, 21, 0x00); rt2800_bbp_write(rt2x00dev, 22, 0x00); rt2800_bbp_write(rt2x00dev, 27, 0x00); rt2800_bbp_write(rt2x00dev, 28, 0x00); rt2800_bbp_write(rt2x00dev, 30, 0x00); rt2800_bbp_write(rt2x00dev, 31, 0x48); rt2800_bbp_write(rt2x00dev, 47, 0x40); rt2800_bbp_write(rt2x00dev, 62, 0x00); rt2800_bbp_write(rt2x00dev, 63, 0x00); rt2800_bbp_write(rt2x00dev, 64, 0x00); rt2800_bbp_write(rt2x00dev, 65, 0x2C); rt2800_bbp_write(rt2x00dev, 66, 0x1C); rt2800_bbp_write(rt2x00dev, 67, 0x20); rt2800_bbp_write(rt2x00dev, 68, 0xDD); rt2800_bbp_write(rt2x00dev, 69, 0x10); rt2800_bbp_write(rt2x00dev, 70, 0x05); rt2800_bbp_write(rt2x00dev, 73, 0x18); rt2800_bbp_write(rt2x00dev, 74, 0x0F); rt2800_bbp_write(rt2x00dev, 75, 0x60); rt2800_bbp_write(rt2x00dev, 76, 0x44); rt2800_bbp_write(rt2x00dev, 77, 0x59); rt2800_bbp_write(rt2x00dev, 78, 0x1E); rt2800_bbp_write(rt2x00dev, 79, 0x1C); rt2800_bbp_write(rt2x00dev, 80, 0x0C); rt2800_bbp_write(rt2x00dev, 81, 0x3A); rt2800_bbp_write(rt2x00dev, 82, 0xB6); rt2800_bbp_write(rt2x00dev, 83, 0x9A); rt2800_bbp_write(rt2x00dev, 84, 0x9A); rt2800_bbp_write(rt2x00dev, 86, 0x38); rt2800_bbp_write(rt2x00dev, 88, 0x90); rt2800_bbp_write(rt2x00dev, 91, 0x04); rt2800_bbp_write(rt2x00dev, 92, 0x02); rt2800_bbp_write(rt2x00dev, 95, 0x9A); rt2800_bbp_write(rt2x00dev, 96, 0x00); rt2800_bbp_write(rt2x00dev, 103, 0xC0); rt2800_bbp_write(rt2x00dev, 104, 0x92); /* FIXME BBP105 owerwrite */ rt2800_bbp_write(rt2x00dev, 105, 0x3C); rt2800_bbp_write(rt2x00dev, 106, 0x12); rt2800_bbp_write(rt2x00dev, 109, 0x00); rt2800_bbp_write(rt2x00dev, 134, 0x10); rt2800_bbp_write(rt2x00dev, 135, 0xA6); rt2800_bbp_write(rt2x00dev, 137, 0x04); rt2800_bbp_write(rt2x00dev, 142, 0x30); rt2800_bbp_write(rt2x00dev, 143, 0xF7); rt2800_bbp_write(rt2x00dev, 160, 0xEC); rt2800_bbp_write(rt2x00dev, 161, 0xC4); rt2800_bbp_write(rt2x00dev, 162, 0x77); rt2800_bbp_write(rt2x00dev, 163, 0xF9); rt2800_bbp_write(rt2x00dev, 164, 0x00); rt2800_bbp_write(rt2x00dev, 165, 0x00); rt2800_bbp_write(rt2x00dev, 186, 0x00); rt2800_bbp_write(rt2x00dev, 187, 0x00); rt2800_bbp_write(rt2x00dev, 188, 0x00); rt2800_bbp_write(rt2x00dev, 186, 0x00); rt2800_bbp_write(rt2x00dev, 187, 0x01); rt2800_bbp_write(rt2x00dev, 188, 0x00); rt2800_bbp_write(rt2x00dev, 189, 0x00); rt2800_bbp_write(rt2x00dev, 91, 0x06); rt2800_bbp_write(rt2x00dev, 92, 0x04); rt2800_bbp_write(rt2x00dev, 93, 0x54); rt2800_bbp_write(rt2x00dev, 99, 0x50); rt2800_bbp_write(rt2x00dev, 148, 0x84); rt2800_bbp_write(rt2x00dev, 167, 0x80); rt2800_bbp_write(rt2x00dev, 178, 0xFF); rt2800_bbp_write(rt2x00dev, 106, 0x13); /* BBP for G band GLRT function (BBP_128 ~ BBP_221) */ rt2800_bbp_glrt_write(rt2x00dev, 0, 0x00); rt2800_bbp_glrt_write(rt2x00dev, 1, 0x14); rt2800_bbp_glrt_write(rt2x00dev, 2, 0x20); rt2800_bbp_glrt_write(rt2x00dev, 3, 0x0A); rt2800_bbp_glrt_write(rt2x00dev, 10, 0x16); rt2800_bbp_glrt_write(rt2x00dev, 11, 0x06); rt2800_bbp_glrt_write(rt2x00dev, 12, 0x02); rt2800_bbp_glrt_write(rt2x00dev, 13, 0x07); rt2800_bbp_glrt_write(rt2x00dev, 14, 0x05); rt2800_bbp_glrt_write(rt2x00dev, 15, 0x09); rt2800_bbp_glrt_write(rt2x00dev, 16, 0x20); rt2800_bbp_glrt_write(rt2x00dev, 17, 0x08); rt2800_bbp_glrt_write(rt2x00dev, 18, 0x4A); rt2800_bbp_glrt_write(rt2x00dev, 19, 0x00); rt2800_bbp_glrt_write(rt2x00dev, 20, 0x00); rt2800_bbp_glrt_write(rt2x00dev, 128, 0xE0); rt2800_bbp_glrt_write(rt2x00dev, 129, 0x1F); rt2800_bbp_glrt_write(rt2x00dev, 130, 0x4F); rt2800_bbp_glrt_write(rt2x00dev, 131, 0x32); rt2800_bbp_glrt_write(rt2x00dev, 132, 0x08); rt2800_bbp_glrt_write(rt2x00dev, 133, 0x28); rt2800_bbp_glrt_write(rt2x00dev, 134, 0x19); rt2800_bbp_glrt_write(rt2x00dev, 135, 0x0A); rt2800_bbp_glrt_write(rt2x00dev, 138, 0x16); rt2800_bbp_glrt_write(rt2x00dev, 139, 0x10); rt2800_bbp_glrt_write(rt2x00dev, 140, 0x10); rt2800_bbp_glrt_write(rt2x00dev, 141, 0x1A); rt2800_bbp_glrt_write(rt2x00dev, 142, 0x36); rt2800_bbp_glrt_write(rt2x00dev, 143, 0x2C); rt2800_bbp_glrt_write(rt2x00dev, 144, 0x26); rt2800_bbp_glrt_write(rt2x00dev, 145, 0x24); rt2800_bbp_glrt_write(rt2x00dev, 146, 0x42); rt2800_bbp_glrt_write(rt2x00dev, 147, 0x40); rt2800_bbp_glrt_write(rt2x00dev, 148, 0x30); rt2800_bbp_glrt_write(rt2x00dev, 149, 0x29); rt2800_bbp_glrt_write(rt2x00dev, 150, 0x4C); rt2800_bbp_glrt_write(rt2x00dev, 151, 0x46); rt2800_bbp_glrt_write(rt2x00dev, 152, 0x3D); rt2800_bbp_glrt_write(rt2x00dev, 153, 0x40); rt2800_bbp_glrt_write(rt2x00dev, 154, 0x3E); rt2800_bbp_glrt_write(rt2x00dev, 155, 0x38); rt2800_bbp_glrt_write(rt2x00dev, 156, 0x3D); rt2800_bbp_glrt_write(rt2x00dev, 157, 0x2F); rt2800_bbp_glrt_write(rt2x00dev, 158, 0x3C); rt2800_bbp_glrt_write(rt2x00dev, 159, 0x34); rt2800_bbp_glrt_write(rt2x00dev, 160, 0x2C); rt2800_bbp_glrt_write(rt2x00dev, 161, 0x2F); rt2800_bbp_glrt_write(rt2x00dev, 162, 0x3C); rt2800_bbp_glrt_write(rt2x00dev, 163, 0x35); rt2800_bbp_glrt_write(rt2x00dev, 164, 0x2E); rt2800_bbp_glrt_write(rt2x00dev, 165, 0x2F); rt2800_bbp_glrt_write(rt2x00dev, 166, 0x49); rt2800_bbp_glrt_write(rt2x00dev, 167, 0x41); rt2800_bbp_glrt_write(rt2x00dev, 168, 0x36); rt2800_bbp_glrt_write(rt2x00dev, 169, 0x39); rt2800_bbp_glrt_write(rt2x00dev, 170, 0x30); rt2800_bbp_glrt_write(rt2x00dev, 171, 0x30); rt2800_bbp_glrt_write(rt2x00dev, 172, 0x0E); rt2800_bbp_glrt_write(rt2x00dev, 173, 0x0D); rt2800_bbp_glrt_write(rt2x00dev, 174, 0x28); rt2800_bbp_glrt_write(rt2x00dev, 175, 0x21); rt2800_bbp_glrt_write(rt2x00dev, 176, 0x1C); rt2800_bbp_glrt_write(rt2x00dev, 177, 0x16); rt2800_bbp_glrt_write(rt2x00dev, 178, 0x50); rt2800_bbp_glrt_write(rt2x00dev, 179, 0x4A); rt2800_bbp_glrt_write(rt2x00dev, 180, 0x43); rt2800_bbp_glrt_write(rt2x00dev, 181, 0x50); rt2800_bbp_glrt_write(rt2x00dev, 182, 0x10); rt2800_bbp_glrt_write(rt2x00dev, 183, 0x10); rt2800_bbp_glrt_write(rt2x00dev, 184, 0x10); rt2800_bbp_glrt_write(rt2x00dev, 185, 0x10); rt2800_bbp_glrt_write(rt2x00dev, 200, 0x7D); rt2800_bbp_glrt_write(rt2x00dev, 201, 0x14); rt2800_bbp_glrt_write(rt2x00dev, 202, 0x32); rt2800_bbp_glrt_write(rt2x00dev, 203, 0x2C); rt2800_bbp_glrt_write(rt2x00dev, 204, 0x36); rt2800_bbp_glrt_write(rt2x00dev, 205, 0x4C); rt2800_bbp_glrt_write(rt2x00dev, 206, 0x43); rt2800_bbp_glrt_write(rt2x00dev, 207, 0x2C); rt2800_bbp_glrt_write(rt2x00dev, 208, 0x2E); rt2800_bbp_glrt_write(rt2x00dev, 209, 0x36); rt2800_bbp_glrt_write(rt2x00dev, 210, 0x30); rt2800_bbp_glrt_write(rt2x00dev, 211, 0x6E); /* BBP for G band DCOC function */ rt2800_bbp_dcoc_write(rt2x00dev, 140, 0x0C); rt2800_bbp_dcoc_write(rt2x00dev, 141, 0x00); rt2800_bbp_dcoc_write(rt2x00dev, 142, 0x10); rt2800_bbp_dcoc_write(rt2x00dev, 143, 0x10); rt2800_bbp_dcoc_write(rt2x00dev, 144, 0x10); rt2800_bbp_dcoc_write(rt2x00dev, 145, 0x10); rt2800_bbp_dcoc_write(rt2x00dev, 146, 0x08); rt2800_bbp_dcoc_write(rt2x00dev, 147, 0x40); rt2800_bbp_dcoc_write(rt2x00dev, 148, 0x04); rt2800_bbp_dcoc_write(rt2x00dev, 149, 0x04); rt2800_bbp_dcoc_write(rt2x00dev, 150, 0x08); rt2800_bbp_dcoc_write(rt2x00dev, 151, 0x08); rt2800_bbp_dcoc_write(rt2x00dev, 152, 0x03); rt2800_bbp_dcoc_write(rt2x00dev, 153, 0x03); rt2800_bbp_dcoc_write(rt2x00dev, 154, 0x03); rt2800_bbp_dcoc_write(rt2x00dev, 155, 0x02); rt2800_bbp_dcoc_write(rt2x00dev, 156, 0x40); rt2800_bbp_dcoc_write(rt2x00dev, 157, 0x40); rt2800_bbp_dcoc_write(rt2x00dev, 158, 0x64); rt2800_bbp_dcoc_write(rt2x00dev, 159, 0x64); rt2800_bbp4_mac_if_ctrl(rt2x00dev); } static void rt2800_init_bbp(struct rt2x00_dev *rt2x00dev) { unsigned int i; u16 eeprom; u8 reg_id; u8 value; if (rt2800_is_305x_soc(rt2x00dev)) rt2800_init_bbp_305x_soc(rt2x00dev); switch (rt2x00dev->chip.rt) { case RT2860: case RT2872: case RT2883: rt2800_init_bbp_28xx(rt2x00dev); break; case RT3070: case RT3071: case RT3090: rt2800_init_bbp_30xx(rt2x00dev); break; case RT3290: rt2800_init_bbp_3290(rt2x00dev); break; case RT3352: case RT5350: rt2800_init_bbp_3352(rt2x00dev); break; case RT3390: rt2800_init_bbp_3390(rt2x00dev); break; case RT3572: rt2800_init_bbp_3572(rt2x00dev); break; case RT3593: rt2800_init_bbp_3593(rt2x00dev); return; case RT3883: rt2800_init_bbp_3883(rt2x00dev); return; case RT5390: case RT5392: rt2800_init_bbp_53xx(rt2x00dev); break; case RT5592: rt2800_init_bbp_5592(rt2x00dev); return; case RT6352: rt2800_init_bbp_6352(rt2x00dev); break; } for (i = 0; i < EEPROM_BBP_SIZE; i++) { eeprom = rt2800_eeprom_read_from_array(rt2x00dev, EEPROM_BBP_START, i); if (eeprom != 0xffff && eeprom != 0x0000) { reg_id = rt2x00_get_field16(eeprom, EEPROM_BBP_REG_ID); value = rt2x00_get_field16(eeprom, EEPROM_BBP_VALUE); rt2800_bbp_write(rt2x00dev, reg_id, value); } } } static void rt2800_led_open_drain_enable(struct rt2x00_dev *rt2x00dev) { u32 reg; reg = rt2800_register_read(rt2x00dev, OPT_14_CSR); rt2x00_set_field32(&reg, OPT_14_CSR_BIT0, 1); rt2800_register_write(rt2x00dev, OPT_14_CSR, reg); } static u8 rt2800_init_rx_filter(struct rt2x00_dev *rt2x00dev, bool bw40, u8 filter_target) { unsigned int i; u8 bbp; u8 rfcsr; u8 passband; u8 stopband; u8 overtuned = 0; u8 rfcsr24 = (bw40) ? 0x27 : 0x07; rt2800_rfcsr_write(rt2x00dev, 24, rfcsr24); bbp = rt2800_bbp_read(rt2x00dev, 4); rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 2 * bw40); rt2800_bbp_write(rt2x00dev, 4, bbp); rfcsr = rt2800_rfcsr_read(rt2x00dev, 31); rt2x00_set_field8(&rfcsr, RFCSR31_RX_H20M, bw40); rt2800_rfcsr_write(rt2x00dev, 31, rfcsr); rfcsr = rt2800_rfcsr_read(rt2x00dev, 22); rt2x00_set_field8(&rfcsr, RFCSR22_BASEBAND_LOOPBACK, 1); rt2800_rfcsr_write(rt2x00dev, 22, rfcsr); /* * Set power & frequency of passband test tone */ rt2800_bbp_write(rt2x00dev, 24, 0); for (i = 0; i < 100; i++) { rt2800_bbp_write(rt2x00dev, 25, 0x90); msleep(1); passband = rt2800_bbp_read(rt2x00dev, 55); if (passband) break; } /* * Set power & frequency of stopband test tone */ rt2800_bbp_write(rt2x00dev, 24, 0x06); for (i = 0; i < 100; i++) { rt2800_bbp_write(rt2x00dev, 25, 0x90); msleep(1); stopband = rt2800_bbp_read(rt2x00dev, 55); if ((passband - stopband) <= filter_target) { rfcsr24++; overtuned += ((passband - stopband) == filter_target); } else break; rt2800_rfcsr_write(rt2x00dev, 24, rfcsr24); } rfcsr24 -= !!overtuned; rt2800_rfcsr_write(rt2x00dev, 24, rfcsr24); return rfcsr24; } static void rt2800_rf_init_calibration(struct rt2x00_dev *rt2x00dev, const unsigned int rf_reg) { u8 rfcsr; rfcsr = rt2800_rfcsr_read(rt2x00dev, rf_reg); rt2x00_set_field8(&rfcsr, FIELD8(0x80), 1); rt2800_rfcsr_write(rt2x00dev, rf_reg, rfcsr); msleep(1); rt2x00_set_field8(&rfcsr, FIELD8(0x80), 0); rt2800_rfcsr_write(rt2x00dev, rf_reg, rfcsr); } static void rt2800_rx_filter_calibration(struct rt2x00_dev *rt2x00dev) { struct rt2800_drv_data *drv_data = rt2x00dev->drv_data; u8 filter_tgt_bw20; u8 filter_tgt_bw40; u8 rfcsr, bbp; /* * TODO: sync filter_tgt values with vendor driver */ if (rt2x00_rt(rt2x00dev, RT3070)) { filter_tgt_bw20 = 0x16; filter_tgt_bw40 = 0x19; } else { filter_tgt_bw20 = 0x13; filter_tgt_bw40 = 0x15; } drv_data->calibration_bw20 = rt2800_init_rx_filter(rt2x00dev, false, filter_tgt_bw20); drv_data->calibration_bw40 = rt2800_init_rx_filter(rt2x00dev, true, filter_tgt_bw40); /* * Save BBP 25 & 26 values for later use in channel switching (for 3052) */ drv_data->bbp25 = rt2800_bbp_read(rt2x00dev, 25); drv_data->bbp26 = rt2800_bbp_read(rt2x00dev, 26); /* * Set back to initial state */ rt2800_bbp_write(rt2x00dev, 24, 0); rfcsr = rt2800_rfcsr_read(rt2x00dev, 22); rt2x00_set_field8(&rfcsr, RFCSR22_BASEBAND_LOOPBACK, 0); rt2800_rfcsr_write(rt2x00dev, 22, rfcsr); /* * Set BBP back to BW20 */ bbp = rt2800_bbp_read(rt2x00dev, 4); rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 0); rt2800_bbp_write(rt2x00dev, 4, bbp); } static void rt2800_normal_mode_setup_3xxx(struct rt2x00_dev *rt2x00dev) { struct rt2800_drv_data *drv_data = rt2x00dev->drv_data; u8 min_gain, rfcsr, bbp; u16 eeprom; rfcsr = rt2800_rfcsr_read(rt2x00dev, 17); rt2x00_set_field8(&rfcsr, RFCSR17_TX_LO1_EN, 0); if (rt2x00_rt(rt2x00dev, RT3070) || rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) || rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E) || rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E)) { if (!rt2x00_has_cap_external_lna_bg(rt2x00dev)) rt2x00_set_field8(&rfcsr, RFCSR17_R, 1); } min_gain = rt2x00_rt(rt2x00dev, RT3070) ? 1 : 2; if (drv_data->txmixer_gain_24g >= min_gain) { rt2x00_set_field8(&rfcsr, RFCSR17_TXMIXER_GAIN, drv_data->txmixer_gain_24g); } rt2800_rfcsr_write(rt2x00dev, 17, rfcsr); if (rt2x00_rt(rt2x00dev, RT3090)) { /* Turn off unused DAC1 and ADC1 to reduce power consumption */ bbp = rt2800_bbp_read(rt2x00dev, 138); eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0); if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH) == 1) rt2x00_set_field8(&bbp, BBP138_RX_ADC1, 0); if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH) == 1) rt2x00_set_field8(&bbp, BBP138_TX_DAC1, 1); rt2800_bbp_write(rt2x00dev, 138, bbp); } if (rt2x00_rt(rt2x00dev, RT3070)) { rfcsr = rt2800_rfcsr_read(rt2x00dev, 27); if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F)) rt2x00_set_field8(&rfcsr, RFCSR27_R1, 3); else rt2x00_set_field8(&rfcsr, RFCSR27_R1, 0); rt2x00_set_field8(&rfcsr, RFCSR27_R2, 0); rt2x00_set_field8(&rfcsr, RFCSR27_R3, 0); rt2x00_set_field8(&rfcsr, RFCSR27_R4, 0); rt2800_rfcsr_write(rt2x00dev, 27, rfcsr); } else if (rt2x00_rt(rt2x00dev, RT3071) || rt2x00_rt(rt2x00dev, RT3090) || rt2x00_rt(rt2x00dev, RT3390)) { rfcsr = rt2800_rfcsr_read(rt2x00dev, 1); rt2x00_set_field8(&rfcsr, RFCSR1_RF_BLOCK_EN, 1); rt2x00_set_field8(&rfcsr, RFCSR1_RX0_PD, 0); rt2x00_set_field8(&rfcsr, RFCSR1_TX0_PD, 0); rt2x00_set_field8(&rfcsr, RFCSR1_RX1_PD, 1); rt2x00_set_field8(&rfcsr, RFCSR1_TX1_PD, 1); rt2800_rfcsr_write(rt2x00dev, 1, rfcsr); rfcsr = rt2800_rfcsr_read(rt2x00dev, 15); rt2x00_set_field8(&rfcsr, RFCSR15_TX_LO2_EN, 0); rt2800_rfcsr_write(rt2x00dev, 15, rfcsr); rfcsr = rt2800_rfcsr_read(rt2x00dev, 20); rt2x00_set_field8(&rfcsr, RFCSR20_RX_LO1_EN, 0); rt2800_rfcsr_write(rt2x00dev, 20, rfcsr); rfcsr = rt2800_rfcsr_read(rt2x00dev, 21); rt2x00_set_field8(&rfcsr, RFCSR21_RX_LO2_EN, 0); rt2800_rfcsr_write(rt2x00dev, 21, rfcsr); } } static void rt2800_normal_mode_setup_3593(struct rt2x00_dev *rt2x00dev) { struct rt2800_drv_data *drv_data = rt2x00dev->drv_data; u8 rfcsr; u8 tx_gain; rfcsr = rt2800_rfcsr_read(rt2x00dev, 50); rt2x00_set_field8(&rfcsr, RFCSR50_TX_LO2_EN, 0); rt2800_rfcsr_write(rt2x00dev, 50, rfcsr); rfcsr = rt2800_rfcsr_read(rt2x00dev, 51); tx_gain = rt2x00_get_field8(drv_data->txmixer_gain_24g, RFCSR17_TXMIXER_GAIN); rt2x00_set_field8(&rfcsr, RFCSR51_BITS24, tx_gain); rt2800_rfcsr_write(rt2x00dev, 51, rfcsr); rfcsr = rt2800_rfcsr_read(rt2x00dev, 38); rt2x00_set_field8(&rfcsr, RFCSR38_RX_LO1_EN, 0); rt2800_rfcsr_write(rt2x00dev, 38, rfcsr); rfcsr = rt2800_rfcsr_read(rt2x00dev, 39); rt2x00_set_field8(&rfcsr, RFCSR39_RX_LO2_EN, 0); rt2800_rfcsr_write(rt2x00dev, 39, rfcsr); rfcsr = rt2800_rfcsr_read(rt2x00dev, 1); rt2x00_set_field8(&rfcsr, RFCSR1_RF_BLOCK_EN, 1); rt2x00_set_field8(&rfcsr, RFCSR1_PLL_PD, 1); rt2800_rfcsr_write(rt2x00dev, 1, rfcsr); rfcsr = rt2800_rfcsr_read(rt2x00dev, 30); rt2x00_set_field8(&rfcsr, RFCSR30_RX_VCM, 2); rt2800_rfcsr_write(rt2x00dev, 30, rfcsr); /* TODO: enable stream mode */ } static void rt2800_normal_mode_setup_5xxx(struct rt2x00_dev *rt2x00dev) { u8 reg; u16 eeprom; /* Turn off unused DAC1 and ADC1 to reduce power consumption */ reg = rt2800_bbp_read(rt2x00dev, 138); eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0); if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH) == 1) rt2x00_set_field8(&reg, BBP138_RX_ADC1, 0); if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH) == 1) rt2x00_set_field8(&reg, BBP138_TX_DAC1, 1); rt2800_bbp_write(rt2x00dev, 138, reg); reg = rt2800_rfcsr_read(rt2x00dev, 38); rt2x00_set_field8(&reg, RFCSR38_RX_LO1_EN, 0); rt2800_rfcsr_write(rt2x00dev, 38, reg); reg = rt2800_rfcsr_read(rt2x00dev, 39); rt2x00_set_field8(&reg, RFCSR39_RX_LO2_EN, 0); rt2800_rfcsr_write(rt2x00dev, 39, reg); rt2800_bbp4_mac_if_ctrl(rt2x00dev); reg = rt2800_rfcsr_read(rt2x00dev, 30); rt2x00_set_field8(&reg, RFCSR30_RX_VCM, 2); rt2800_rfcsr_write(rt2x00dev, 30, reg); } static void rt2800_init_rfcsr_305x_soc(struct rt2x00_dev *rt2x00dev) { rt2800_rf_init_calibration(rt2x00dev, 30); rt2800_rfcsr_write(rt2x00dev, 0, 0x50); rt2800_rfcsr_write(rt2x00dev, 1, 0x01); rt2800_rfcsr_write(rt2x00dev, 2, 0xf7); rt2800_rfcsr_write(rt2x00dev, 3, 0x75); rt2800_rfcsr_write(rt2x00dev, 4, 0x40); rt2800_rfcsr_write(rt2x00dev, 5, 0x03); rt2800_rfcsr_write(rt2x00dev, 6, 0x02); rt2800_rfcsr_write(rt2x00dev, 7, 0x50); rt2800_rfcsr_write(rt2x00dev, 8, 0x39); rt2800_rfcsr_write(rt2x00dev, 9, 0x0f); rt2800_rfcsr_write(rt2x00dev, 10, 0x60); rt2800_rfcsr_write(rt2x00dev, 11, 0x21); rt2800_rfcsr_write(rt2x00dev, 12, 0x75); rt2800_rfcsr_write(rt2x00dev, 13, 0x75); rt2800_rfcsr_write(rt2x00dev, 14, 0x90); rt2800_rfcsr_write(rt2x00dev, 15, 0x58); rt2800_rfcsr_write(rt2x00dev, 16, 0xb3); rt2800_rfcsr_write(rt2x00dev, 17, 0x92); rt2800_rfcsr_write(rt2x00dev, 18, 0x2c); rt2800_rfcsr_write(rt2x00dev, 19, 0x02); rt2800_rfcsr_write(rt2x00dev, 20, 0xba); rt2800_rfcsr_write(rt2x00dev, 21, 0xdb); rt2800_rfcsr_write(rt2x00dev, 22, 0x00); rt2800_rfcsr_write(rt2x00dev, 23, 0x31); rt2800_rfcsr_write(rt2x00dev, 24, 0x08); rt2800_rfcsr_write(rt2x00dev, 25, 0x01); rt2800_rfcsr_write(rt2x00dev, 26, 0x25); rt2800_rfcsr_write(rt2x00dev, 27, 0x23); rt2800_rfcsr_write(rt2x00dev, 28, 0x13); rt2800_rfcsr_write(rt2x00dev, 29, 0x83); rt2800_rfcsr_write(rt2x00dev, 30, 0x00); rt2800_rfcsr_write(rt2x00dev, 31, 0x00); } static void rt2800_init_rfcsr_30xx(struct rt2x00_dev *rt2x00dev) { u8 rfcsr; u16 eeprom; u32 reg; /* XXX vendor driver do this only for 3070 */ rt2800_rf_init_calibration(rt2x00dev, 30); rt2800_rfcsr_write(rt2x00dev, 4, 0x40); rt2800_rfcsr_write(rt2x00dev, 5, 0x03); rt2800_rfcsr_write(rt2x00dev, 6, 0x02); rt2800_rfcsr_write(rt2x00dev, 7, 0x60); rt2800_rfcsr_write(rt2x00dev, 9, 0x0f); rt2800_rfcsr_write(rt2x00dev, 10, 0x41); rt2800_rfcsr_write(rt2x00dev, 11, 0x21); rt2800_rfcsr_write(rt2x00dev, 12, 0x7b); rt2800_rfcsr_write(rt2x00dev, 14, 0x90); rt2800_rfcsr_write(rt2x00dev, 15, 0x58); rt2800_rfcsr_write(rt2x00dev, 16, 0xb3); rt2800_rfcsr_write(rt2x00dev, 17, 0x92); rt2800_rfcsr_write(rt2x00dev, 18, 0x2c); rt2800_rfcsr_write(rt2x00dev, 19, 0x02); rt2800_rfcsr_write(rt2x00dev, 20, 0xba); rt2800_rfcsr_write(rt2x00dev, 21, 0xdb); rt2800_rfcsr_write(rt2x00dev, 24, 0x16); rt2800_rfcsr_write(rt2x00dev, 25, 0x03); rt2800_rfcsr_write(rt2x00dev, 29, 0x1f); if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F)) { reg = rt2800_register_read(rt2x00dev, LDO_CFG0); rt2x00_set_field32(&reg, LDO_CFG0_BGSEL, 1); rt2x00_set_field32(&reg, LDO_CFG0_LDO_CORE_VLEVEL, 3); rt2800_register_write(rt2x00dev, LDO_CFG0, reg); } else if (rt2x00_rt(rt2x00dev, RT3071) || rt2x00_rt(rt2x00dev, RT3090)) { rt2800_rfcsr_write(rt2x00dev, 31, 0x14); rfcsr = rt2800_rfcsr_read(rt2x00dev, 6); rt2x00_set_field8(&rfcsr, RFCSR6_R2, 1); rt2800_rfcsr_write(rt2x00dev, 6, rfcsr); reg = rt2800_register_read(rt2x00dev, LDO_CFG0); rt2x00_set_field32(&reg, LDO_CFG0_BGSEL, 1); if (rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) || rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E)) { eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1); if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_DAC_TEST)) rt2x00_set_field32(&reg, LDO_CFG0_LDO_CORE_VLEVEL, 3); else rt2x00_set_field32(&reg, LDO_CFG0_LDO_CORE_VLEVEL, 0); } rt2800_register_write(rt2x00dev, LDO_CFG0, reg); reg = rt2800_register_read(rt2x00dev, GPIO_SWITCH); rt2x00_set_field32(&reg, GPIO_SWITCH_5, 0); rt2800_register_write(rt2x00dev, GPIO_SWITCH, reg); } rt2800_rx_filter_calibration(rt2x00dev); if (rt2x00_rt_rev_lt(rt2x00dev, RT3070, REV_RT3070F) || rt2x00_rt_rev_lt(rt2x00dev, RT3071, REV_RT3071E) || rt2x00_rt_rev_lt(rt2x00dev, RT3090, REV_RT3090E)) rt2800_rfcsr_write(rt2x00dev, 27, 0x03); rt2800_led_open_drain_enable(rt2x00dev); rt2800_normal_mode_setup_3xxx(rt2x00dev); } static void rt2800_init_rfcsr_3290(struct rt2x00_dev *rt2x00dev) { u8 rfcsr; rt2800_rf_init_calibration(rt2x00dev, 2); rt2800_rfcsr_write(rt2x00dev, 1, 0x0f); rt2800_rfcsr_write(rt2x00dev, 2, 0x80); rt2800_rfcsr_write(rt2x00dev, 3, 0x08); rt2800_rfcsr_write(rt2x00dev, 4, 0x00); rt2800_rfcsr_write(rt2x00dev, 6, 0xa0); rt2800_rfcsr_write(rt2x00dev, 8, 0xf3); rt2800_rfcsr_write(rt2x00dev, 9, 0x02); rt2800_rfcsr_write(rt2x00dev, 10, 0x53); rt2800_rfcsr_write(rt2x00dev, 11, 0x4a); rt2800_rfcsr_write(rt2x00dev, 12, 0x46); rt2800_rfcsr_write(rt2x00dev, 13, 0x9f); rt2800_rfcsr_write(rt2x00dev, 18, 0x02); rt2800_rfcsr_write(rt2x00dev, 22, 0x20); rt2800_rfcsr_write(rt2x00dev, 25, 0x83); rt2800_rfcsr_write(rt2x00dev, 26, 0x82); rt2800_rfcsr_write(rt2x00dev, 27, 0x09); rt2800_rfcsr_write(rt2x00dev, 29, 0x10); rt2800_rfcsr_write(rt2x00dev, 30, 0x10); rt2800_rfcsr_write(rt2x00dev, 31, 0x80); rt2800_rfcsr_write(rt2x00dev, 32, 0x80); rt2800_rfcsr_write(rt2x00dev, 33, 0x00); rt2800_rfcsr_write(rt2x00dev, 34, 0x05); rt2800_rfcsr_write(rt2x00dev, 35, 0x12); rt2800_rfcsr_write(rt2x00dev, 36, 0x00); rt2800_rfcsr_write(rt2x00dev, 38, 0x85); rt2800_rfcsr_write(rt2x00dev, 39, 0x1b); rt2800_rfcsr_write(rt2x00dev, 40, 0x0b); rt2800_rfcsr_write(rt2x00dev, 41, 0xbb); rt2800_rfcsr_write(rt2x00dev, 42, 0xd5); rt2800_rfcsr_write(rt2x00dev, 43, 0x7b); rt2800_rfcsr_write(rt2x00dev, 44, 0x0e); rt2800_rfcsr_write(rt2x00dev, 45, 0xa2); rt2800_rfcsr_write(rt2x00dev, 46, 0x73); rt2800_rfcsr_write(rt2x00dev, 47, 0x00); rt2800_rfcsr_write(rt2x00dev, 48, 0x10); rt2800_rfcsr_write(rt2x00dev, 49, 0x98); rt2800_rfcsr_write(rt2x00dev, 52, 0x38); rt2800_rfcsr_write(rt2x00dev, 53, 0x00); rt2800_rfcsr_write(rt2x00dev, 54, 0x78); rt2800_rfcsr_write(rt2x00dev, 55, 0x43); rt2800_rfcsr_write(rt2x00dev, 56, 0x02); rt2800_rfcsr_write(rt2x00dev, 57, 0x80); rt2800_rfcsr_write(rt2x00dev, 58, 0x7f); rt2800_rfcsr_write(rt2x00dev, 59, 0x09); rt2800_rfcsr_write(rt2x00dev, 60, 0x45); rt2800_rfcsr_write(rt2x00dev, 61, 0xc1); rfcsr = rt2800_rfcsr_read(rt2x00dev, 29); rt2x00_set_field8(&rfcsr, RFCSR29_RSSI_GAIN, 3); rt2800_rfcsr_write(rt2x00dev, 29, rfcsr); rt2800_led_open_drain_enable(rt2x00dev); rt2800_normal_mode_setup_3xxx(rt2x00dev); } static void rt2800_init_rfcsr_3352(struct rt2x00_dev *rt2x00dev) { int tx0_ext_pa = test_bit(CAPABILITY_EXTERNAL_PA_TX0, &rt2x00dev->cap_flags); int tx1_ext_pa = test_bit(CAPABILITY_EXTERNAL_PA_TX1, &rt2x00dev->cap_flags); u8 rfcsr; rt2800_rf_init_calibration(rt2x00dev, 30); rt2800_rfcsr_write(rt2x00dev, 0, 0xf0); rt2800_rfcsr_write(rt2x00dev, 1, 0x23); rt2800_rfcsr_write(rt2x00dev, 2, 0x50); rt2800_rfcsr_write(rt2x00dev, 3, 0x18); rt2800_rfcsr_write(rt2x00dev, 4, 0x00); rt2800_rfcsr_write(rt2x00dev, 5, 0x00); rt2800_rfcsr_write(rt2x00dev, 6, 0x33); rt2800_rfcsr_write(rt2x00dev, 7, 0x00); rt2800_rfcsr_write(rt2x00dev, 8, 0xf1); rt2800_rfcsr_write(rt2x00dev, 9, 0x02); rt2800_rfcsr_write(rt2x00dev, 10, 0xd2); rt2800_rfcsr_write(rt2x00dev, 11, 0x42); rt2800_rfcsr_write(rt2x00dev, 12, 0x1c); rt2800_rfcsr_write(rt2x00dev, 13, 0x00); rt2800_rfcsr_write(rt2x00dev, 14, 0x5a); rt2800_rfcsr_write(rt2x00dev, 15, 0x00); rt2800_rfcsr_write(rt2x00dev, 16, 0x01); rt2800_rfcsr_write(rt2x00dev, 18, 0x45); rt2800_rfcsr_write(rt2x00dev, 19, 0x02); rt2800_rfcsr_write(rt2x00dev, 20, 0x00); rt2800_rfcsr_write(rt2x00dev, 21, 0x00); rt2800_rfcsr_write(rt2x00dev, 22, 0x00); rt2800_rfcsr_write(rt2x00dev, 23, 0x00); rt2800_rfcsr_write(rt2x00dev, 24, 0x00); rt2800_rfcsr_write(rt2x00dev, 25, 0x80); rt2800_rfcsr_write(rt2x00dev, 26, 0x00); rt2800_rfcsr_write(rt2x00dev, 27, 0x03); rt2800_rfcsr_write(rt2x00dev, 28, 0x03); rt2800_rfcsr_write(rt2x00dev, 29, 0x00); rt2800_rfcsr_write(rt2x00dev, 30, 0x10); rt2800_rfcsr_write(rt2x00dev, 31, 0x80); rt2800_rfcsr_write(rt2x00dev, 32, 0x80); rt2800_rfcsr_write(rt2x00dev, 33, 0x00); rfcsr = 0x01; if (tx0_ext_pa) rt2x00_set_field8(&rfcsr, RFCSR34_TX0_EXT_PA, 1); if (tx1_ext_pa) rt2x00_set_field8(&rfcsr, RFCSR34_TX1_EXT_PA, 1); rt2800_rfcsr_write(rt2x00dev, 34, rfcsr); rt2800_rfcsr_write(rt2x00dev, 35, 0x03); rt2800_rfcsr_write(rt2x00dev, 36, 0xbd); rt2800_rfcsr_write(rt2x00dev, 37, 0x3c); rt2800_rfcsr_write(rt2x00dev, 38, 0x5f); rt2800_rfcsr_write(rt2x00dev, 39, 0xc5); rt2800_rfcsr_write(rt2x00dev, 40, 0x33); rfcsr = 0x52; if (!tx0_ext_pa) { rt2x00_set_field8(&rfcsr, RFCSR41_BIT1, 1); rt2x00_set_field8(&rfcsr, RFCSR41_BIT4, 1); } rt2800_rfcsr_write(rt2x00dev, 41, rfcsr); rfcsr = 0x52; if (!tx1_ext_pa) { rt2x00_set_field8(&rfcsr, RFCSR42_BIT1, 1); rt2x00_set_field8(&rfcsr, RFCSR42_BIT4, 1); } rt2800_rfcsr_write(rt2x00dev, 42, rfcsr); rt2800_rfcsr_write(rt2x00dev, 43, 0xdb); rt2800_rfcsr_write(rt2x00dev, 44, 0xdb); rt2800_rfcsr_write(rt2x00dev, 45, 0xdb); rt2800_rfcsr_write(rt2x00dev, 46, 0xdd); rt2800_rfcsr_write(rt2x00dev, 47, 0x0d); rt2800_rfcsr_write(rt2x00dev, 48, 0x14); rt2800_rfcsr_write(rt2x00dev, 49, 0x00); rfcsr = 0x2d; if (tx0_ext_pa) rt2x00_set_field8(&rfcsr, RFCSR50_TX0_EXT_PA, 1); if (tx1_ext_pa) rt2x00_set_field8(&rfcsr, RFCSR50_TX1_EXT_PA, 1); rt2800_rfcsr_write(rt2x00dev, 50, rfcsr); rt2800_rfcsr_write(rt2x00dev, 51, (tx0_ext_pa ? 0x52 : 0x7f)); rt2800_rfcsr_write(rt2x00dev, 52, (tx0_ext_pa ? 0xc0 : 0x00)); rt2800_rfcsr_write(rt2x00dev, 53, (tx0_ext_pa ? 0xd2 : 0x52)); rt2800_rfcsr_write(rt2x00dev, 54, (tx0_ext_pa ? 0xc0 : 0x1b)); rt2800_rfcsr_write(rt2x00dev, 55, (tx1_ext_pa ? 0x52 : 0x7f)); rt2800_rfcsr_write(rt2x00dev, 56, (tx1_ext_pa ? 0xc0 : 0x00)); rt2800_rfcsr_write(rt2x00dev, 57, (tx0_ext_pa ? 0x49 : 0x52)); rt2800_rfcsr_write(rt2x00dev, 58, (tx1_ext_pa ? 0xc0 : 0x1b)); rt2800_rfcsr_write(rt2x00dev, 59, 0x00); rt2800_rfcsr_write(rt2x00dev, 60, 0x00); rt2800_rfcsr_write(rt2x00dev, 61, 0x00); rt2800_rfcsr_write(rt2x00dev, 62, 0x00); rt2800_rfcsr_write(rt2x00dev, 63, 0x00); rt2800_rx_filter_calibration(rt2x00dev); rt2800_led_open_drain_enable(rt2x00dev); rt2800_normal_mode_setup_3xxx(rt2x00dev); } static void rt2800_init_rfcsr_3390(struct rt2x00_dev *rt2x00dev) { u32 reg; rt2800_rf_init_calibration(rt2x00dev, 30); rt2800_rfcsr_write(rt2x00dev, 0, 0xa0); rt2800_rfcsr_write(rt2x00dev, 1, 0xe1); rt2800_rfcsr_write(rt2x00dev, 2, 0xf1); rt2800_rfcsr_write(rt2x00dev, 3, 0x62); rt2800_rfcsr_write(rt2x00dev, 4, 0x40); rt2800_rfcsr_write(rt2x00dev, 5, 0x8b); rt2800_rfcsr_write(rt2x00dev, 6, 0x42); rt2800_rfcsr_write(rt2x00dev, 7, 0x34); rt2800_rfcsr_write(rt2x00dev, 8, 0x00); rt2800_rfcsr_write(rt2x00dev, 9, 0xc0); rt2800_rfcsr_write(rt2x00dev, 10, 0x61); rt2800_rfcsr_write(rt2x00dev, 11, 0x21); rt2800_rfcsr_write(rt2x00dev, 12, 0x3b); rt2800_rfcsr_write(rt2x00dev, 13, 0xe0); rt2800_rfcsr_write(rt2x00dev, 14, 0x90); rt2800_rfcsr_write(rt2x00dev, 15, 0x53); rt2800_rfcsr_write(rt2x00dev, 16, 0xe0); rt2800_rfcsr_write(rt2x00dev, 17, 0x94); rt2800_rfcsr_write(rt2x00dev, 18, 0x5c); rt2800_rfcsr_write(rt2x00dev, 19, 0x4a); rt2800_rfcsr_write(rt2x00dev, 20, 0xb2); rt2800_rfcsr_write(rt2x00dev, 21, 0xf6); rt2800_rfcsr_write(rt2x00dev, 22, 0x00); rt2800_rfcsr_write(rt2x00dev, 23, 0x14); rt2800_rfcsr_write(rt2x00dev, 24, 0x08); rt2800_rfcsr_write(rt2x00dev, 25, 0x3d); rt2800_rfcsr_write(rt2x00dev, 26, 0x85); rt2800_rfcsr_write(rt2x00dev, 27, 0x00); rt2800_rfcsr_write(rt2x00dev, 28, 0x41); rt2800_rfcsr_write(rt2x00dev, 29, 0x8f); rt2800_rfcsr_write(rt2x00dev, 30, 0x20); rt2800_rfcsr_write(rt2x00dev, 31, 0x0f); reg = rt2800_register_read(rt2x00dev, GPIO_SWITCH); rt2x00_set_field32(&reg, GPIO_SWITCH_5, 0); rt2800_register_write(rt2x00dev, GPIO_SWITCH, reg); rt2800_rx_filter_calibration(rt2x00dev); if (rt2x00_rt_rev_lt(rt2x00dev, RT3390, REV_RT3390E)) rt2800_rfcsr_write(rt2x00dev, 27, 0x03); rt2800_led_open_drain_enable(rt2x00dev); rt2800_normal_mode_setup_3xxx(rt2x00dev); } static void rt2800_init_rfcsr_3572(struct rt2x00_dev *rt2x00dev) { u8 rfcsr; u32 reg; rt2800_rf_init_calibration(rt2x00dev, 30); rt2800_rfcsr_write(rt2x00dev, 0, 0x70); rt2800_rfcsr_write(rt2x00dev, 1, 0x81); rt2800_rfcsr_write(rt2x00dev, 2, 0xf1); rt2800_rfcsr_write(rt2x00dev, 3, 0x02); rt2800_rfcsr_write(rt2x00dev, 4, 0x4c); rt2800_rfcsr_write(rt2x00dev, 5, 0x05); rt2800_rfcsr_write(rt2x00dev, 6, 0x4a); rt2800_rfcsr_write(rt2x00dev, 7, 0xd8); rt2800_rfcsr_write(rt2x00dev, 9, 0xc3); rt2800_rfcsr_write(rt2x00dev, 10, 0xf1); rt2800_rfcsr_write(rt2x00dev, 11, 0xb9); rt2800_rfcsr_write(rt2x00dev, 12, 0x70); rt2800_rfcsr_write(rt2x00dev, 13, 0x65); rt2800_rfcsr_write(rt2x00dev, 14, 0xa0); rt2800_rfcsr_write(rt2x00dev, 15, 0x53); rt2800_rfcsr_write(rt2x00dev, 16, 0x4c); rt2800_rfcsr_write(rt2x00dev, 17, 0x23); rt2800_rfcsr_write(rt2x00dev, 18, 0xac); rt2800_rfcsr_write(rt2x00dev, 19, 0x93); rt2800_rfcsr_write(rt2x00dev, 20, 0xb3); rt2800_rfcsr_write(rt2x00dev, 21, 0xd0); rt2800_rfcsr_write(rt2x00dev, 22, 0x00); rt2800_rfcsr_write(rt2x00dev, 23, 0x3c); rt2800_rfcsr_write(rt2x00dev, 24, 0x16); rt2800_rfcsr_write(rt2x00dev, 25, 0x15); rt2800_rfcsr_write(rt2x00dev, 26, 0x85); rt2800_rfcsr_write(rt2x00dev, 27, 0x00); rt2800_rfcsr_write(rt2x00dev, 28, 0x00); rt2800_rfcsr_write(rt2x00dev, 29, 0x9b); rt2800_rfcsr_write(rt2x00dev, 30, 0x09); rt2800_rfcsr_write(rt2x00dev, 31, 0x10); rfcsr = rt2800_rfcsr_read(rt2x00dev, 6); rt2x00_set_field8(&rfcsr, RFCSR6_R2, 1); rt2800_rfcsr_write(rt2x00dev, 6, rfcsr); reg = rt2800_register_read(rt2x00dev, LDO_CFG0); rt2x00_set_field32(&reg, LDO_CFG0_LDO_CORE_VLEVEL, 3); rt2x00_set_field32(&reg, LDO_CFG0_BGSEL, 1); rt2800_register_write(rt2x00dev, LDO_CFG0, reg); msleep(1); reg = rt2800_register_read(rt2x00dev, LDO_CFG0); rt2x00_set_field32(&reg, LDO_CFG0_LDO_CORE_VLEVEL, 0); rt2x00_set_field32(&reg, LDO_CFG0_BGSEL, 1); rt2800_register_write(rt2x00dev, LDO_CFG0, reg); rt2800_rx_filter_calibration(rt2x00dev); rt2800_led_open_drain_enable(rt2x00dev); rt2800_normal_mode_setup_3xxx(rt2x00dev); } static void rt3593_post_bbp_init(struct rt2x00_dev *rt2x00dev) { u8 bbp; bool txbf_enabled = false; /* FIXME */ bbp = rt2800_bbp_read(rt2x00dev, 105); if (rt2x00dev->default_ant.rx_chain_num == 1) rt2x00_set_field8(&bbp, BBP105_MLD, 0); else rt2x00_set_field8(&bbp, BBP105_MLD, 1); rt2800_bbp_write(rt2x00dev, 105, bbp); rt2800_bbp4_mac_if_ctrl(rt2x00dev); rt2800_bbp_write(rt2x00dev, 92, 0x02); rt2800_bbp_write(rt2x00dev, 82, 0x82); rt2800_bbp_write(rt2x00dev, 106, 0x05); rt2800_bbp_write(rt2x00dev, 104, 0x92); rt2800_bbp_write(rt2x00dev, 88, 0x90); rt2800_bbp_write(rt2x00dev, 148, 0xc8); rt2800_bbp_write(rt2x00dev, 47, 0x48); rt2800_bbp_write(rt2x00dev, 120, 0x50); if (txbf_enabled) rt2800_bbp_write(rt2x00dev, 163, 0xbd); else rt2800_bbp_write(rt2x00dev, 163, 0x9d); /* SNR mapping */ rt2800_bbp_write(rt2x00dev, 142, 6); rt2800_bbp_write(rt2x00dev, 143, 160); rt2800_bbp_write(rt2x00dev, 142, 7); rt2800_bbp_write(rt2x00dev, 143, 161); rt2800_bbp_write(rt2x00dev, 142, 8); rt2800_bbp_write(rt2x00dev, 143, 162); /* ADC/DAC control */ rt2800_bbp_write(rt2x00dev, 31, 0x08); /* RX AGC energy lower bound in log2 */ rt2800_bbp_write(rt2x00dev, 68, 0x0b); /* FIXME: BBP 105 owerwrite? */ rt2800_bbp_write(rt2x00dev, 105, 0x04); } static void rt2800_init_rfcsr_3593(struct rt2x00_dev *rt2x00dev) { struct rt2800_drv_data *drv_data = rt2x00dev->drv_data; u32 reg; u8 rfcsr; /* Disable GPIO #4 and #7 function for LAN PE control */ reg = rt2800_register_read(rt2x00dev, GPIO_SWITCH); rt2x00_set_field32(&reg, GPIO_SWITCH_4, 0); rt2x00_set_field32(&reg, GPIO_SWITCH_7, 0); rt2800_register_write(rt2x00dev, GPIO_SWITCH, reg); /* Initialize default register values */ rt2800_rfcsr_write(rt2x00dev, 1, 0x03); rt2800_rfcsr_write(rt2x00dev, 3, 0x80); rt2800_rfcsr_write(rt2x00dev, 5, 0x00); rt2800_rfcsr_write(rt2x00dev, 6, 0x40); rt2800_rfcsr_write(rt2x00dev, 8, 0xf1); rt2800_rfcsr_write(rt2x00dev, 9, 0x02); rt2800_rfcsr_write(rt2x00dev, 10, 0xd3); rt2800_rfcsr_write(rt2x00dev, 11, 0x40); rt2800_rfcsr_write(rt2x00dev, 12, 0x4e); rt2800_rfcsr_write(rt2x00dev, 13, 0x12); rt2800_rfcsr_write(rt2x00dev, 18, 0x40); rt2800_rfcsr_write(rt2x00dev, 22, 0x20); rt2800_rfcsr_write(rt2x00dev, 30, 0x10); rt2800_rfcsr_write(rt2x00dev, 31, 0x80); rt2800_rfcsr_write(rt2x00dev, 32, 0x78); rt2800_rfcsr_write(rt2x00dev, 33, 0x3b); rt2800_rfcsr_write(rt2x00dev, 34, 0x3c); rt2800_rfcsr_write(rt2x00dev, 35, 0xe0); rt2800_rfcsr_write(rt2x00dev, 38, 0x86); rt2800_rfcsr_write(rt2x00dev, 39, 0x23); rt2800_rfcsr_write(rt2x00dev, 44, 0xd3); rt2800_rfcsr_write(rt2x00dev, 45, 0xbb); rt2800_rfcsr_write(rt2x00dev, 46, 0x60); rt2800_rfcsr_write(rt2x00dev, 49, 0x8e); rt2800_rfcsr_write(rt2x00dev, 50, 0x86); rt2800_rfcsr_write(rt2x00dev, 51, 0x75); rt2800_rfcsr_write(rt2x00dev, 52, 0x45); rt2800_rfcsr_write(rt2x00dev, 53, 0x18); rt2800_rfcsr_write(rt2x00dev, 54, 0x18); rt2800_rfcsr_write(rt2x00dev, 55, 0x18); rt2800_rfcsr_write(rt2x00dev, 56, 0xdb); rt2800_rfcsr_write(rt2x00dev, 57, 0x6e); /* Initiate calibration */ /* TODO: use rt2800_rf_init_calibration ? */ rfcsr = rt2800_rfcsr_read(rt2x00dev, 2); rt2x00_set_field8(&rfcsr, RFCSR2_RESCAL_EN, 1); rt2800_rfcsr_write(rt2x00dev, 2, rfcsr); rt2800_freq_cal_mode1(rt2x00dev); rfcsr = rt2800_rfcsr_read(rt2x00dev, 18); rt2x00_set_field8(&rfcsr, RFCSR18_XO_TUNE_BYPASS, 1); rt2800_rfcsr_write(rt2x00dev, 18, rfcsr); reg = rt2800_register_read(rt2x00dev, LDO_CFG0); rt2x00_set_field32(&reg, LDO_CFG0_LDO_CORE_VLEVEL, 3); rt2x00_set_field32(&reg, LDO_CFG0_BGSEL, 1); rt2800_register_write(rt2x00dev, LDO_CFG0, reg); usleep_range(1000, 1500); reg = rt2800_register_read(rt2x00dev, LDO_CFG0); rt2x00_set_field32(&reg, LDO_CFG0_LDO_CORE_VLEVEL, 0); rt2800_register_write(rt2x00dev, LDO_CFG0, reg); /* Set initial values for RX filter calibration */ drv_data->calibration_bw20 = 0x1f; drv_data->calibration_bw40 = 0x2f; /* Save BBP 25 & 26 values for later use in channel switching */ drv_data->bbp25 = rt2800_bbp_read(rt2x00dev, 25); drv_data->bbp26 = rt2800_bbp_read(rt2x00dev, 26); rt2800_led_open_drain_enable(rt2x00dev); rt2800_normal_mode_setup_3593(rt2x00dev); rt3593_post_bbp_init(rt2x00dev); /* TODO: enable stream mode support */ } static void rt2800_init_rfcsr_5350(struct rt2x00_dev *rt2x00dev) { rt2800_rfcsr_write(rt2x00dev, 0, 0xf0); rt2800_rfcsr_write(rt2x00dev, 1, 0x23); rt2800_rfcsr_write(rt2x00dev, 2, 0x50); rt2800_rfcsr_write(rt2x00dev, 3, 0x08); rt2800_rfcsr_write(rt2x00dev, 4, 0x49); rt2800_rfcsr_write(rt2x00dev, 5, 0x10); rt2800_rfcsr_write(rt2x00dev, 6, 0xe0); rt2800_rfcsr_write(rt2x00dev, 7, 0x00); rt2800_rfcsr_write(rt2x00dev, 8, 0xf1); rt2800_rfcsr_write(rt2x00dev, 9, 0x02); rt2800_rfcsr_write(rt2x00dev, 10, 0x53); rt2800_rfcsr_write(rt2x00dev, 11, 0x4a); rt2800_rfcsr_write(rt2x00dev, 12, 0x46); if (rt2800_clk_is_20mhz(rt2x00dev)) rt2800_rfcsr_write(rt2x00dev, 13, 0x1f); else rt2800_rfcsr_write(rt2x00dev, 13, 0x9f); rt2800_rfcsr_write(rt2x00dev, 14, 0x00); rt2800_rfcsr_write(rt2x00dev, 15, 0x00); rt2800_rfcsr_write(rt2x00dev, 16, 0xc0); rt2800_rfcsr_write(rt2x00dev, 18, 0x03); rt2800_rfcsr_write(rt2x00dev, 19, 0x00); rt2800_rfcsr_write(rt2x00dev, 20, 0x00); rt2800_rfcsr_write(rt2x00dev, 21, 0x00); rt2800_rfcsr_write(rt2x00dev, 22, 0x20); rt2800_rfcsr_write(rt2x00dev, 23, 0x00); rt2800_rfcsr_write(rt2x00dev, 24, 0x00); rt2800_rfcsr_write(rt2x00dev, 25, 0x80); rt2800_rfcsr_write(rt2x00dev, 26, 0x00); rt2800_rfcsr_write(rt2x00dev, 27, 0x03); rt2800_rfcsr_write(rt2x00dev, 28, 0x00); rt2800_rfcsr_write(rt2x00dev, 29, 0xd0); rt2800_rfcsr_write(rt2x00dev, 30, 0x10); rt2800_rfcsr_write(rt2x00dev, 31, 0x80); rt2800_rfcsr_write(rt2x00dev, 32, 0x80); rt2800_rfcsr_write(rt2x00dev, 33, 0x00); rt2800_rfcsr_write(rt2x00dev, 34, 0x07); rt2800_rfcsr_write(rt2x00dev, 35, 0x12); rt2800_rfcsr_write(rt2x00dev, 36, 0x00); rt2800_rfcsr_write(rt2x00dev, 37, 0x08); rt2800_rfcsr_write(rt2x00dev, 38, 0x85); rt2800_rfcsr_write(rt2x00dev, 39, 0x1b); rt2800_rfcsr_write(rt2x00dev, 40, 0x0b); rt2800_rfcsr_write(rt2x00dev, 41, 0xbb); rt2800_rfcsr_write(rt2x00dev, 42, 0xd5); rt2800_rfcsr_write(rt2x00dev, 43, 0x9b); rt2800_rfcsr_write(rt2x00dev, 44, 0x0c); rt2800_rfcsr_write(rt2x00dev, 45, 0xa6); rt2800_rfcsr_write(rt2x00dev, 46, 0x73); rt2800_rfcsr_write(rt2x00dev, 47, 0x00); rt2800_rfcsr_write(rt2x00dev, 48, 0x10); rt2800_rfcsr_write(rt2x00dev, 49, 0x80); rt2800_rfcsr_write(rt2x00dev, 50, 0x00); rt2800_rfcsr_write(rt2x00dev, 51, 0x00); rt2800_rfcsr_write(rt2x00dev, 52, 0x38); rt2800_rfcsr_write(rt2x00dev, 53, 0x00); rt2800_rfcsr_write(rt2x00dev, 54, 0x38); rt2800_rfcsr_write(rt2x00dev, 55, 0x43); rt2800_rfcsr_write(rt2x00dev, 56, 0x82); rt2800_rfcsr_write(rt2x00dev, 57, 0x00); rt2800_rfcsr_write(rt2x00dev, 58, 0x39); rt2800_rfcsr_write(rt2x00dev, 59, 0x0b); rt2800_rfcsr_write(rt2x00dev, 60, 0x45); rt2800_rfcsr_write(rt2x00dev, 61, 0xd1); rt2800_rfcsr_write(rt2x00dev, 62, 0x00); rt2800_rfcsr_write(rt2x00dev, 63, 0x00); } static void rt2800_init_rfcsr_3883(struct rt2x00_dev *rt2x00dev) { u8 rfcsr; /* TODO: get the actual ECO value from the SoC */ const unsigned int eco = 5; rt2800_rf_init_calibration(rt2x00dev, 2); rt2800_rfcsr_write(rt2x00dev, 0, 0xe0); rt2800_rfcsr_write(rt2x00dev, 1, 0x03); rt2800_rfcsr_write(rt2x00dev, 2, 0x50); rt2800_rfcsr_write(rt2x00dev, 3, 0x20); rt2800_rfcsr_write(rt2x00dev, 4, 0x00); rt2800_rfcsr_write(rt2x00dev, 5, 0x00); rt2800_rfcsr_write(rt2x00dev, 6, 0x40); rt2800_rfcsr_write(rt2x00dev, 7, 0x00); rt2800_rfcsr_write(rt2x00dev, 8, 0x5b); rt2800_rfcsr_write(rt2x00dev, 9, 0x08); rt2800_rfcsr_write(rt2x00dev, 10, 0xd3); rt2800_rfcsr_write(rt2x00dev, 11, 0x48); rt2800_rfcsr_write(rt2x00dev, 12, 0x1a); rt2800_rfcsr_write(rt2x00dev, 13, 0x12); rt2800_rfcsr_write(rt2x00dev, 14, 0x00); rt2800_rfcsr_write(rt2x00dev, 15, 0x00); rt2800_rfcsr_write(rt2x00dev, 16, 0x00); /* RFCSR 17 will be initialized later based on the * frequency offset stored in the EEPROM */ rt2800_rfcsr_write(rt2x00dev, 18, 0x40); rt2800_rfcsr_write(rt2x00dev, 19, 0x00); rt2800_rfcsr_write(rt2x00dev, 20, 0x00); rt2800_rfcsr_write(rt2x00dev, 21, 0x00); rt2800_rfcsr_write(rt2x00dev, 22, 0x20); rt2800_rfcsr_write(rt2x00dev, 23, 0xc0); rt2800_rfcsr_write(rt2x00dev, 24, 0x00); rt2800_rfcsr_write(rt2x00dev, 25, 0x00); rt2800_rfcsr_write(rt2x00dev, 26, 0x00); rt2800_rfcsr_write(rt2x00dev, 27, 0x00); rt2800_rfcsr_write(rt2x00dev, 28, 0x00); rt2800_rfcsr_write(rt2x00dev, 29, 0x00); rt2800_rfcsr_write(rt2x00dev, 30, 0x10); rt2800_rfcsr_write(rt2x00dev, 31, 0x80); rt2800_rfcsr_write(rt2x00dev, 32, 0x80); rt2800_rfcsr_write(rt2x00dev, 33, 0x00); rt2800_rfcsr_write(rt2x00dev, 34, 0x20); rt2800_rfcsr_write(rt2x00dev, 35, 0x00); rt2800_rfcsr_write(rt2x00dev, 36, 0x00); rt2800_rfcsr_write(rt2x00dev, 37, 0x00); rt2800_rfcsr_write(rt2x00dev, 38, 0x86); rt2800_rfcsr_write(rt2x00dev, 39, 0x23); rt2800_rfcsr_write(rt2x00dev, 40, 0x00); rt2800_rfcsr_write(rt2x00dev, 41, 0x00); rt2800_rfcsr_write(rt2x00dev, 42, 0x00); rt2800_rfcsr_write(rt2x00dev, 43, 0x00); rt2800_rfcsr_write(rt2x00dev, 44, 0x93); rt2800_rfcsr_write(rt2x00dev, 45, 0xbb); rt2800_rfcsr_write(rt2x00dev, 46, 0x60); rt2800_rfcsr_write(rt2x00dev, 47, 0x00); rt2800_rfcsr_write(rt2x00dev, 48, 0x00); rt2800_rfcsr_write(rt2x00dev, 49, 0x8e); rt2800_rfcsr_write(rt2x00dev, 50, 0x86); rt2800_rfcsr_write(rt2x00dev, 51, 0x51); rt2800_rfcsr_write(rt2x00dev, 52, 0x05); rt2800_rfcsr_write(rt2x00dev, 53, 0x76); rt2800_rfcsr_write(rt2x00dev, 54, 0x76); rt2800_rfcsr_write(rt2x00dev, 55, 0x76); rt2800_rfcsr_write(rt2x00dev, 56, 0xdb); rt2800_rfcsr_write(rt2x00dev, 57, 0x3e); rt2800_rfcsr_write(rt2x00dev, 58, 0x00); rt2800_rfcsr_write(rt2x00dev, 59, 0x00); rt2800_rfcsr_write(rt2x00dev, 60, 0x00); rt2800_rfcsr_write(rt2x00dev, 61, 0x00); rt2800_rfcsr_write(rt2x00dev, 62, 0x00); rt2800_rfcsr_write(rt2x00dev, 63, 0x00); /* TODO: rx filter calibration? */ rt2800_bbp_write(rt2x00dev, 137, 0x0f); rt2800_bbp_write(rt2x00dev, 163, 0x9d); rt2800_bbp_write(rt2x00dev, 105, 0x05); rt2800_bbp_write(rt2x00dev, 179, 0x02); rt2800_bbp_write(rt2x00dev, 180, 0x00); rt2800_bbp_write(rt2x00dev, 182, 0x40); rt2800_bbp_write(rt2x00dev, 180, 0x01); rt2800_bbp_write(rt2x00dev, 182, 0x9c); rt2800_bbp_write(rt2x00dev, 179, 0x00); rt2800_bbp_write(rt2x00dev, 142, 0x04); rt2800_bbp_write(rt2x00dev, 143, 0x3b); rt2800_bbp_write(rt2x00dev, 142, 0x06); rt2800_bbp_write(rt2x00dev, 143, 0xa0); rt2800_bbp_write(rt2x00dev, 142, 0x07); rt2800_bbp_write(rt2x00dev, 143, 0xa1); rt2800_bbp_write(rt2x00dev, 142, 0x08); rt2800_bbp_write(rt2x00dev, 143, 0xa2); rt2800_bbp_write(rt2x00dev, 148, 0xc8); if (eco == 5) { rt2800_rfcsr_write(rt2x00dev, 32, 0xd8); rt2800_rfcsr_write(rt2x00dev, 33, 0x32); } rfcsr = rt2800_rfcsr_read(rt2x00dev, 2); rt2x00_set_field8(&rfcsr, RFCSR2_RESCAL_BP, 0); rt2x00_set_field8(&rfcsr, RFCSR2_RESCAL_EN, 1); rt2800_rfcsr_write(rt2x00dev, 2, rfcsr); msleep(1); rt2x00_set_field8(&rfcsr, RFCSR2_RESCAL_EN, 0); rt2800_rfcsr_write(rt2x00dev, 2, rfcsr); rfcsr = rt2800_rfcsr_read(rt2x00dev, 1); rt2x00_set_field8(&rfcsr, RFCSR1_RF_BLOCK_EN, 1); rt2800_rfcsr_write(rt2x00dev, 1, rfcsr); rfcsr = rt2800_rfcsr_read(rt2x00dev, 6); rfcsr |= 0xc0; rt2800_rfcsr_write(rt2x00dev, 6, rfcsr); rfcsr = rt2800_rfcsr_read(rt2x00dev, 22); rfcsr |= 0x20; rt2800_rfcsr_write(rt2x00dev, 22, rfcsr); rfcsr = rt2800_rfcsr_read(rt2x00dev, 46); rfcsr |= 0x20; rt2800_rfcsr_write(rt2x00dev, 46, rfcsr); rfcsr = rt2800_rfcsr_read(rt2x00dev, 20); rfcsr &= ~0xee; rt2800_rfcsr_write(rt2x00dev, 20, rfcsr); } static void rt2800_init_rfcsr_5390(struct rt2x00_dev *rt2x00dev) { rt2800_rf_init_calibration(rt2x00dev, 2); rt2800_rfcsr_write(rt2x00dev, 1, 0x0f); rt2800_rfcsr_write(rt2x00dev, 2, 0x80); rt2800_rfcsr_write(rt2x00dev, 3, 0x88); rt2800_rfcsr_write(rt2x00dev, 5, 0x10); if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) rt2800_rfcsr_write(rt2x00dev, 6, 0xe0); else rt2800_rfcsr_write(rt2x00dev, 6, 0xa0); rt2800_rfcsr_write(rt2x00dev, 7, 0x00); rt2800_rfcsr_write(rt2x00dev, 10, 0x53); rt2800_rfcsr_write(rt2x00dev, 11, 0x4a); rt2800_rfcsr_write(rt2x00dev, 12, 0x46); rt2800_rfcsr_write(rt2x00dev, 13, 0x9f); rt2800_rfcsr_write(rt2x00dev, 14, 0x00); rt2800_rfcsr_write(rt2x00dev, 15, 0x00); rt2800_rfcsr_write(rt2x00dev, 16, 0x00); rt2800_rfcsr_write(rt2x00dev, 18, 0x03); rt2800_rfcsr_write(rt2x00dev, 19, 0x00); rt2800_rfcsr_write(rt2x00dev, 20, 0x00); rt2800_rfcsr_write(rt2x00dev, 21, 0x00); rt2800_rfcsr_write(rt2x00dev, 22, 0x20); rt2800_rfcsr_write(rt2x00dev, 23, 0x00); rt2800_rfcsr_write(rt2x00dev, 24, 0x00); if (rt2x00_is_usb(rt2x00dev) && rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) rt2800_rfcsr_write(rt2x00dev, 25, 0x80); else rt2800_rfcsr_write(rt2x00dev, 25, 0xc0); rt2800_rfcsr_write(rt2x00dev, 26, 0x00); rt2800_rfcsr_write(rt2x00dev, 27, 0x09); rt2800_rfcsr_write(rt2x00dev, 28, 0x00); rt2800_rfcsr_write(rt2x00dev, 29, 0x10); rt2800_rfcsr_write(rt2x00dev, 30, 0x10); rt2800_rfcsr_write(rt2x00dev, 31, 0x80); rt2800_rfcsr_write(rt2x00dev, 32, 0x80); rt2800_rfcsr_write(rt2x00dev, 33, 0x00); rt2800_rfcsr_write(rt2x00dev, 34, 0x07); rt2800_rfcsr_write(rt2x00dev, 35, 0x12); rt2800_rfcsr_write(rt2x00dev, 36, 0x00); rt2800_rfcsr_write(rt2x00dev, 37, 0x08); rt2800_rfcsr_write(rt2x00dev, 38, 0x85); rt2800_rfcsr_write(rt2x00dev, 39, 0x1b); rt2800_rfcsr_write(rt2x00dev, 40, 0x0b); rt2800_rfcsr_write(rt2x00dev, 41, 0xbb); rt2800_rfcsr_write(rt2x00dev, 42, 0xd2); rt2800_rfcsr_write(rt2x00dev, 43, 0x9a); rt2800_rfcsr_write(rt2x00dev, 44, 0x0e); rt2800_rfcsr_write(rt2x00dev, 45, 0xa2); if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) rt2800_rfcsr_write(rt2x00dev, 46, 0x73); else rt2800_rfcsr_write(rt2x00dev, 46, 0x7b); rt2800_rfcsr_write(rt2x00dev, 47, 0x00); rt2800_rfcsr_write(rt2x00dev, 48, 0x10); rt2800_rfcsr_write(rt2x00dev, 49, 0x94); rt2800_rfcsr_write(rt2x00dev, 52, 0x38); if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) rt2800_rfcsr_write(rt2x00dev, 53, 0x00); else rt2800_rfcsr_write(rt2x00dev, 53, 0x84); rt2800_rfcsr_write(rt2x00dev, 54, 0x78); rt2800_rfcsr_write(rt2x00dev, 55, 0x44); if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) rt2800_rfcsr_write(rt2x00dev, 56, 0x42); else rt2800_rfcsr_write(rt2x00dev, 56, 0x22); rt2800_rfcsr_write(rt2x00dev, 57, 0x80); rt2800_rfcsr_write(rt2x00dev, 58, 0x7f); rt2800_rfcsr_write(rt2x00dev, 59, 0x8f); rt2800_rfcsr_write(rt2x00dev, 60, 0x45); if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390F)) { if (rt2x00_is_usb(rt2x00dev)) rt2800_rfcsr_write(rt2x00dev, 61, 0xd1); else rt2800_rfcsr_write(rt2x00dev, 61, 0xd5); } else { if (rt2x00_is_usb(rt2x00dev)) rt2800_rfcsr_write(rt2x00dev, 61, 0xdd); else rt2800_rfcsr_write(rt2x00dev, 61, 0xb5); } rt2800_rfcsr_write(rt2x00dev, 62, 0x00); rt2800_rfcsr_write(rt2x00dev, 63, 0x00); rt2800_normal_mode_setup_5xxx(rt2x00dev); rt2800_led_open_drain_enable(rt2x00dev); } static void rt2800_init_rfcsr_5392(struct rt2x00_dev *rt2x00dev) { rt2800_rf_init_calibration(rt2x00dev, 2); rt2800_rfcsr_write(rt2x00dev, 1, 0x17); rt2800_rfcsr_write(rt2x00dev, 3, 0x88); rt2800_rfcsr_write(rt2x00dev, 5, 0x10); rt2800_rfcsr_write(rt2x00dev, 6, 0xe0); rt2800_rfcsr_write(rt2x00dev, 7, 0x00); rt2800_rfcsr_write(rt2x00dev, 10, 0x53); rt2800_rfcsr_write(rt2x00dev, 11, 0x4a); rt2800_rfcsr_write(rt2x00dev, 12, 0x46); rt2800_rfcsr_write(rt2x00dev, 13, 0x9f); rt2800_rfcsr_write(rt2x00dev, 14, 0x00); rt2800_rfcsr_write(rt2x00dev, 15, 0x00); rt2800_rfcsr_write(rt2x00dev, 16, 0x00); rt2800_rfcsr_write(rt2x00dev, 18, 0x03); rt2800_rfcsr_write(rt2x00dev, 19, 0x4d); rt2800_rfcsr_write(rt2x00dev, 20, 0x00); rt2800_rfcsr_write(rt2x00dev, 21, 0x8d); rt2800_rfcsr_write(rt2x00dev, 22, 0x20); rt2800_rfcsr_write(rt2x00dev, 23, 0x0b); rt2800_rfcsr_write(rt2x00dev, 24, 0x44); rt2800_rfcsr_write(rt2x00dev, 25, 0x80); rt2800_rfcsr_write(rt2x00dev, 26, 0x82); rt2800_rfcsr_write(rt2x00dev, 27, 0x09); rt2800_rfcsr_write(rt2x00dev, 28, 0x00); rt2800_rfcsr_write(rt2x00dev, 29, 0x10); rt2800_rfcsr_write(rt2x00dev, 30, 0x10); rt2800_rfcsr_write(rt2x00dev, 31, 0x80); rt2800_rfcsr_write(rt2x00dev, 32, 0x20); rt2800_rfcsr_write(rt2x00dev, 33, 0xC0); rt2800_rfcsr_write(rt2x00dev, 34, 0x07); rt2800_rfcsr_write(rt2x00dev, 35, 0x12); rt2800_rfcsr_write(rt2x00dev, 36, 0x00); rt2800_rfcsr_write(rt2x00dev, 37, 0x08); rt2800_rfcsr_write(rt2x00dev, 38, 0x89); rt2800_rfcsr_write(rt2x00dev, 39, 0x1b); rt2800_rfcsr_write(rt2x00dev, 40, 0x0f); rt2800_rfcsr_write(rt2x00dev, 41, 0xbb); rt2800_rfcsr_write(rt2x00dev, 42, 0xd5); rt2800_rfcsr_write(rt2x00dev, 43, 0x9b); rt2800_rfcsr_write(rt2x00dev, 44, 0x0e); rt2800_rfcsr_write(rt2x00dev, 45, 0xa2); rt2800_rfcsr_write(rt2x00dev, 46, 0x73); rt2800_rfcsr_write(rt2x00dev, 47, 0x0c); rt2800_rfcsr_write(rt2x00dev, 48, 0x10); rt2800_rfcsr_write(rt2x00dev, 49, 0x94); rt2800_rfcsr_write(rt2x00dev, 50, 0x94); rt2800_rfcsr_write(rt2x00dev, 51, 0x3a); rt2800_rfcsr_write(rt2x00dev, 52, 0x48); rt2800_rfcsr_write(rt2x00dev, 53, 0x44); rt2800_rfcsr_write(rt2x00dev, 54, 0x38); rt2800_rfcsr_write(rt2x00dev, 55, 0x43); rt2800_rfcsr_write(rt2x00dev, 56, 0xa1); rt2800_rfcsr_write(rt2x00dev, 57, 0x00); rt2800_rfcsr_write(rt2x00dev, 58, 0x39); rt2800_rfcsr_write(rt2x00dev, 59, 0x07); rt2800_rfcsr_write(rt2x00dev, 60, 0x45); rt2800_rfcsr_write(rt2x00dev, 61, 0x91); rt2800_rfcsr_write(rt2x00dev, 62, 0x39); rt2800_rfcsr_write(rt2x00dev, 63, 0x07); rt2800_normal_mode_setup_5xxx(rt2x00dev); rt2800_led_open_drain_enable(rt2x00dev); } static void rt2800_init_rfcsr_5592(struct rt2x00_dev *rt2x00dev) { rt2800_rf_init_calibration(rt2x00dev, 30); rt2800_rfcsr_write(rt2x00dev, 1, 0x3F); rt2800_rfcsr_write(rt2x00dev, 3, 0x08); rt2800_rfcsr_write(rt2x00dev, 5, 0x10); rt2800_rfcsr_write(rt2x00dev, 6, 0xE4); rt2800_rfcsr_write(rt2x00dev, 7, 0x00); rt2800_rfcsr_write(rt2x00dev, 14, 0x00); rt2800_rfcsr_write(rt2x00dev, 15, 0x00); rt2800_rfcsr_write(rt2x00dev, 16, 0x00); rt2800_rfcsr_write(rt2x00dev, 18, 0x03); rt2800_rfcsr_write(rt2x00dev, 19, 0x4D); rt2800_rfcsr_write(rt2x00dev, 20, 0x10); rt2800_rfcsr_write(rt2x00dev, 21, 0x8D); rt2800_rfcsr_write(rt2x00dev, 26, 0x82); rt2800_rfcsr_write(rt2x00dev, 28, 0x00); rt2800_rfcsr_write(rt2x00dev, 29, 0x10); rt2800_rfcsr_write(rt2x00dev, 33, 0xC0); rt2800_rfcsr_write(rt2x00dev, 34, 0x07); rt2800_rfcsr_write(rt2x00dev, 35, 0x12); rt2800_rfcsr_write(rt2x00dev, 47, 0x0C); rt2800_rfcsr_write(rt2x00dev, 53, 0x22); rt2800_rfcsr_write(rt2x00dev, 63, 0x07); rt2800_rfcsr_write(rt2x00dev, 2, 0x80); msleep(1); rt2800_freq_cal_mode1(rt2x00dev); /* Enable DC filter */ if (rt2x00_rt_rev_gte(rt2x00dev, RT5592, REV_RT5592C)) rt2800_bbp_write(rt2x00dev, 103, 0xc0); rt2800_normal_mode_setup_5xxx(rt2x00dev); if (rt2x00_rt_rev_lt(rt2x00dev, RT5592, REV_RT5592C)) rt2800_rfcsr_write(rt2x00dev, 27, 0x03); rt2800_led_open_drain_enable(rt2x00dev); } static void rt2800_rf_self_txdc_cal(struct rt2x00_dev *rt2x00dev) { u8 rfb5r1_org, rfb7r1_org, rfvalue; u32 mac0518, mac051c, mac0528, mac052c; u8 i; mac0518 = rt2800_register_read(rt2x00dev, RF_CONTROL0); mac051c = rt2800_register_read(rt2x00dev, RF_BYPASS0); mac0528 = rt2800_register_read(rt2x00dev, RF_CONTROL2); mac052c = rt2800_register_read(rt2x00dev, RF_BYPASS2); rt2800_register_write(rt2x00dev, RF_BYPASS0, 0x0); rt2800_register_write(rt2x00dev, RF_BYPASS2, 0x0); rt2800_register_write(rt2x00dev, RF_CONTROL0, 0xC); rt2800_register_write(rt2x00dev, RF_BYPASS0, 0x3306); rt2800_register_write(rt2x00dev, RF_CONTROL2, 0x3330); rt2800_register_write(rt2x00dev, RF_BYPASS2, 0xfffff); rfb5r1_org = rt2800_rfcsr_read_bank(rt2x00dev, 5, 1); rfb7r1_org = rt2800_rfcsr_read_bank(rt2x00dev, 7, 1); rt2800_rfcsr_write_bank(rt2x00dev, 5, 1, 0x4); for (i = 0; i < 100; ++i) { usleep_range(50, 100); rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 5, 1); if ((rfvalue & 0x04) != 0x4) break; } rt2800_rfcsr_write_bank(rt2x00dev, 5, 1, rfb5r1_org); rt2800_rfcsr_write_bank(rt2x00dev, 7, 1, 0x4); for (i = 0; i < 100; ++i) { usleep_range(50, 100); rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 7, 1); if ((rfvalue & 0x04) != 0x4) break; } rt2800_rfcsr_write_bank(rt2x00dev, 7, 1, rfb7r1_org); rt2800_register_write(rt2x00dev, RF_BYPASS0, 0x0); rt2800_register_write(rt2x00dev, RF_BYPASS2, 0x0); rt2800_register_write(rt2x00dev, RF_CONTROL0, mac0518); rt2800_register_write(rt2x00dev, RF_BYPASS0, mac051c); rt2800_register_write(rt2x00dev, RF_CONTROL2, mac0528); rt2800_register_write(rt2x00dev, RF_BYPASS2, mac052c); } static int rt2800_calcrcalibrationcode(struct rt2x00_dev *rt2x00dev, int d1, int d2) { int calcode = ((d2 - d1) * 1000) / 43; if ((calcode % 10) >= 5) calcode += 10; calcode = (calcode / 10); return calcode; } static void rt2800_r_calibration(struct rt2x00_dev *rt2x00dev) { u32 savemacsysctrl; u8 saverfb0r1, saverfb0r34, saverfb0r35; u8 saverfb5r4, saverfb5r17, saverfb5r18; u8 saverfb5r19, saverfb5r20; u8 savebbpr22, savebbpr47, savebbpr49; u8 bytevalue = 0; int rcalcode; u8 r_cal_code = 0; s8 d1 = 0, d2 = 0; u8 rfvalue; u32 MAC_RF_BYPASS0, MAC_RF_CONTROL0, MAC_PWR_PIN_CFG; u32 maccfg; saverfb0r1 = rt2800_rfcsr_read_bank(rt2x00dev, 0, 1); saverfb0r34 = rt2800_rfcsr_read_bank(rt2x00dev, 0, 34); saverfb0r35 = rt2800_rfcsr_read_bank(rt2x00dev, 0, 35); saverfb5r4 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 4); saverfb5r17 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 17); saverfb5r18 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 18); saverfb5r19 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 19); saverfb5r20 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 20); savebbpr22 = rt2800_bbp_read(rt2x00dev, 22); savebbpr47 = rt2800_bbp_read(rt2x00dev, 47); savebbpr49 = rt2800_bbp_read(rt2x00dev, 49); savemacsysctrl = rt2800_register_read(rt2x00dev, MAC_SYS_CTRL); MAC_RF_BYPASS0 = rt2800_register_read(rt2x00dev, RF_BYPASS0); MAC_RF_CONTROL0 = rt2800_register_read(rt2x00dev, RF_CONTROL0); MAC_PWR_PIN_CFG = rt2800_register_read(rt2x00dev, PWR_PIN_CFG); maccfg = rt2800_register_read(rt2x00dev, MAC_SYS_CTRL); maccfg &= (~0x04); rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, maccfg); if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev, MAC_STATUS_CFG_BBP_RF_BUSY_TX))) rt2x00_warn(rt2x00dev, "Wait MAC Tx Status to MAX !!!\n"); maccfg = rt2800_register_read(rt2x00dev, MAC_SYS_CTRL); maccfg &= (~0x08); rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, maccfg); if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev, MAC_STATUS_CFG_BBP_RF_BUSY_RX))) rt2x00_warn(rt2x00dev, "Wait MAC Rx Status to MAX !!!\n"); rfvalue = (MAC_RF_BYPASS0 | 0x3004); rt2800_register_write(rt2x00dev, RF_BYPASS0, rfvalue); rfvalue = (MAC_RF_CONTROL0 | (~0x3002)); rt2800_register_write(rt2x00dev, RF_CONTROL0, rfvalue); rt2800_rfcsr_write_bank(rt2x00dev, 5, 4, 0x27); rt2800_rfcsr_write_bank(rt2x00dev, 5, 17, 0x80); rt2800_rfcsr_write_bank(rt2x00dev, 5, 18, 0x83); rt2800_rfcsr_write_bank(rt2x00dev, 5, 19, 0x00); rt2800_rfcsr_write_bank(rt2x00dev, 5, 20, 0x20); rt2800_rfcsr_write_bank(rt2x00dev, 0, 1, 0x00); rt2800_rfcsr_write_bank(rt2x00dev, 0, 34, 0x13); rt2800_rfcsr_write_bank(rt2x00dev, 0, 35, 0x00); rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x1); rt2800_bbp_write(rt2x00dev, 47, 0x04); rt2800_bbp_write(rt2x00dev, 22, 0x80); usleep_range(100, 200); bytevalue = rt2800_bbp_read(rt2x00dev, 49); if (bytevalue > 128) d1 = bytevalue - 256; else d1 = (s8)bytevalue; rt2800_bbp_write(rt2x00dev, 22, 0x0); rt2800_rfcsr_write_bank(rt2x00dev, 0, 35, 0x01); rt2800_bbp_write(rt2x00dev, 22, 0x80); usleep_range(100, 200); bytevalue = rt2800_bbp_read(rt2x00dev, 49); if (bytevalue > 128) d2 = bytevalue - 256; else d2 = (s8)bytevalue; rt2800_bbp_write(rt2x00dev, 22, 0x0); rcalcode = rt2800_calcrcalibrationcode(rt2x00dev, d1, d2); if (rcalcode < 0) r_cal_code = 256 + rcalcode; else r_cal_code = (u8)rcalcode; rt2800_rfcsr_write_bank(rt2x00dev, 0, 7, r_cal_code); rt2800_bbp_write(rt2x00dev, 22, 0x0); bytevalue = rt2800_bbp_read(rt2x00dev, 21); bytevalue |= 0x1; rt2800_bbp_write(rt2x00dev, 21, bytevalue); bytevalue = rt2800_bbp_read(rt2x00dev, 21); bytevalue &= (~0x1); rt2800_bbp_write(rt2x00dev, 21, bytevalue); rt2800_rfcsr_write_bank(rt2x00dev, 0, 1, saverfb0r1); rt2800_rfcsr_write_bank(rt2x00dev, 0, 34, saverfb0r34); rt2800_rfcsr_write_bank(rt2x00dev, 0, 35, saverfb0r35); rt2800_rfcsr_write_bank(rt2x00dev, 5, 4, saverfb5r4); rt2800_rfcsr_write_bank(rt2x00dev, 5, 17, saverfb5r17); rt2800_rfcsr_write_bank(rt2x00dev, 5, 18, saverfb5r18); rt2800_rfcsr_write_bank(rt2x00dev, 5, 19, saverfb5r19); rt2800_rfcsr_write_bank(rt2x00dev, 5, 20, saverfb5r20); rt2800_bbp_write(rt2x00dev, 22, savebbpr22); rt2800_bbp_write(rt2x00dev, 47, savebbpr47); rt2800_bbp_write(rt2x00dev, 49, savebbpr49); rt2800_register_write(rt2x00dev, RF_BYPASS0, MAC_RF_BYPASS0); rt2800_register_write(rt2x00dev, RF_CONTROL0, MAC_RF_CONTROL0); rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, savemacsysctrl); rt2800_register_write(rt2x00dev, PWR_PIN_CFG, MAC_PWR_PIN_CFG); } static void rt2800_rxdcoc_calibration(struct rt2x00_dev *rt2x00dev) { u8 bbpreg = 0; u32 macvalue = 0; u8 saverfb0r2, saverfb5r4, saverfb7r4, rfvalue; int i; saverfb0r2 = rt2800_rfcsr_read_bank(rt2x00dev, 0, 2); rfvalue = saverfb0r2; rfvalue |= 0x03; rt2800_rfcsr_write_bank(rt2x00dev, 0, 2, rfvalue); rt2800_bbp_write(rt2x00dev, 158, 141); bbpreg = rt2800_bbp_read(rt2x00dev, 159); bbpreg |= 0x10; rt2800_bbp_write(rt2x00dev, 159, bbpreg); macvalue = rt2800_register_read(rt2x00dev, MAC_SYS_CTRL); rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, 0x8); if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev, MAC_STATUS_CFG_BBP_RF_BUSY_TX))) rt2x00_warn(rt2x00dev, "RF TX busy in RX RXDCOC calibration\n"); saverfb5r4 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 4); saverfb7r4 = rt2800_rfcsr_read_bank(rt2x00dev, 7, 4); saverfb5r4 = saverfb5r4 & (~0x40); saverfb7r4 = saverfb7r4 & (~0x40); rt2800_rfcsr_write_dccal(rt2x00dev, 4, 0x64); rt2800_rfcsr_write_bank(rt2x00dev, 5, 4, saverfb5r4); rt2800_rfcsr_write_bank(rt2x00dev, 7, 4, saverfb7r4); rt2800_bbp_write(rt2x00dev, 158, 141); bbpreg = rt2800_bbp_read(rt2x00dev, 159); bbpreg = bbpreg & (~0x40); rt2800_bbp_write(rt2x00dev, 159, bbpreg); bbpreg |= 0x48; rt2800_bbp_write(rt2x00dev, 159, bbpreg); for (i = 0; i < 10000; i++) { bbpreg = rt2800_bbp_read(rt2x00dev, 159); if ((bbpreg & 0x40) == 0) break; usleep_range(50, 100); } bbpreg = rt2800_bbp_read(rt2x00dev, 159); bbpreg = bbpreg & (~0x40); rt2800_bbp_write(rt2x00dev, 159, bbpreg); rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, macvalue); rt2800_bbp_write(rt2x00dev, 158, 141); bbpreg = rt2800_bbp_read(rt2x00dev, 159); bbpreg &= (~0x10); rt2800_bbp_write(rt2x00dev, 159, bbpreg); rt2800_rfcsr_write_bank(rt2x00dev, 0, 2, saverfb0r2); } static u32 rt2800_do_sqrt_accumulation(u32 si) { u32 root, root_pre, bit; s8 i; bit = 1 << 15; root = 0; for (i = 15; i >= 0; i = i - 1) { root_pre = root + bit; if ((root_pre * root_pre) <= si) root = root_pre; bit = bit >> 1; } return root; } static void rt2800_rxiq_calibration(struct rt2x00_dev *rt2x00dev) { u8 rfb0r1, rfb0r2, rfb0r42; u8 rfb4r0, rfb4r19; u8 rfb5r3, rfb5r4, rfb5r17, rfb5r18, rfb5r19, rfb5r20; u8 rfb6r0, rfb6r19; u8 rfb7r3, rfb7r4, rfb7r17, rfb7r18, rfb7r19, rfb7r20; u8 bbp1, bbp4; u8 bbpr241, bbpr242; u32 i; u8 ch_idx; u8 bbpval; u8 rfval, vga_idx = 0; int mi = 0, mq = 0, si = 0, sq = 0, riq = 0; int sigma_i, sigma_q, r_iq, g_rx; int g_imb; int ph_rx; u32 savemacsysctrl = 0; u32 orig_RF_CONTROL0 = 0; u32 orig_RF_BYPASS0 = 0; u32 orig_RF_CONTROL1 = 0; u32 orig_RF_BYPASS1 = 0; u32 orig_RF_CONTROL3 = 0; u32 orig_RF_BYPASS3 = 0; u32 bbpval1 = 0; static const u8 rf_vga_table[] = {0x20, 0x21, 0x22, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3f}; savemacsysctrl = rt2800_register_read(rt2x00dev, MAC_SYS_CTRL); orig_RF_CONTROL0 = rt2800_register_read(rt2x00dev, RF_CONTROL0); orig_RF_BYPASS0 = rt2800_register_read(rt2x00dev, RF_BYPASS0); orig_RF_CONTROL1 = rt2800_register_read(rt2x00dev, RF_CONTROL1); orig_RF_BYPASS1 = rt2800_register_read(rt2x00dev, RF_BYPASS1); orig_RF_CONTROL3 = rt2800_register_read(rt2x00dev, RF_CONTROL3); orig_RF_BYPASS3 = rt2800_register_read(rt2x00dev, RF_BYPASS3); bbp1 = rt2800_bbp_read(rt2x00dev, 1); bbp4 = rt2800_bbp_read(rt2x00dev, 4); rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, 0x0); if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev, MAC_STATUS_CFG_BBP_RF_BUSY))) rt2x00_warn(rt2x00dev, "Timeout waiting for MAC status in RXIQ calibration\n"); bbpval = bbp4 & (~0x18); bbpval = bbp4 | 0x00; rt2800_bbp_write(rt2x00dev, 4, bbpval); bbpval = rt2800_bbp_read(rt2x00dev, 21); bbpval = bbpval | 1; rt2800_bbp_write(rt2x00dev, 21, bbpval); bbpval = bbpval & 0xfe; rt2800_bbp_write(rt2x00dev, 21, bbpval); rt2800_register_write(rt2x00dev, RF_CONTROL1, 0x00000202); rt2800_register_write(rt2x00dev, RF_BYPASS1, 0x00000303); if (test_bit(CAPABILITY_EXTERNAL_PA_TX0, &rt2x00dev->cap_flags)) rt2800_register_write(rt2x00dev, RF_CONTROL3, 0x0101); else rt2800_register_write(rt2x00dev, RF_CONTROL3, 0x0000); rt2800_register_write(rt2x00dev, RF_BYPASS3, 0xf1f1); rfb0r1 = rt2800_rfcsr_read_bank(rt2x00dev, 0, 1); rfb0r2 = rt2800_rfcsr_read_bank(rt2x00dev, 0, 2); rfb0r42 = rt2800_rfcsr_read_bank(rt2x00dev, 0, 42); rfb4r0 = rt2800_rfcsr_read_bank(rt2x00dev, 4, 0); rfb4r19 = rt2800_rfcsr_read_bank(rt2x00dev, 4, 19); rfb5r3 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 3); rfb5r4 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 4); rfb5r17 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 17); rfb5r18 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 18); rfb5r19 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 19); rfb5r20 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 20); rfb6r0 = rt2800_rfcsr_read_bank(rt2x00dev, 6, 0); rfb6r19 = rt2800_rfcsr_read_bank(rt2x00dev, 6, 19); rfb7r3 = rt2800_rfcsr_read_bank(rt2x00dev, 7, 3); rfb7r4 = rt2800_rfcsr_read_bank(rt2x00dev, 7, 4); rfb7r17 = rt2800_rfcsr_read_bank(rt2x00dev, 7, 17); rfb7r18 = rt2800_rfcsr_read_bank(rt2x00dev, 7, 18); rfb7r19 = rt2800_rfcsr_read_bank(rt2x00dev, 7, 19); rfb7r20 = rt2800_rfcsr_read_bank(rt2x00dev, 7, 20); rt2800_rfcsr_write_chanreg(rt2x00dev, 0, 0x87); rt2800_rfcsr_write_chanreg(rt2x00dev, 19, 0x27); rt2800_rfcsr_write_dccal(rt2x00dev, 3, 0x38); rt2800_rfcsr_write_dccal(rt2x00dev, 4, 0x38); rt2800_rfcsr_write_dccal(rt2x00dev, 17, 0x80); rt2800_rfcsr_write_dccal(rt2x00dev, 18, 0xC1); rt2800_rfcsr_write_dccal(rt2x00dev, 19, 0x60); rt2800_rfcsr_write_dccal(rt2x00dev, 20, 0x00); rt2800_bbp_write(rt2x00dev, 23, 0x0); rt2800_bbp_write(rt2x00dev, 24, 0x0); rt2800_bbp_dcoc_write(rt2x00dev, 5, 0x0); bbpr241 = rt2800_bbp_read(rt2x00dev, 241); bbpr242 = rt2800_bbp_read(rt2x00dev, 242); rt2800_bbp_write(rt2x00dev, 241, 0x10); rt2800_bbp_write(rt2x00dev, 242, 0x84); rt2800_bbp_write(rt2x00dev, 244, 0x31); bbpval = rt2800_bbp_dcoc_read(rt2x00dev, 3); bbpval = bbpval & (~0x7); rt2800_bbp_dcoc_write(rt2x00dev, 3, bbpval); rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x00000004); udelay(1); rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x00000006); usleep_range(1, 200); rt2800_register_write(rt2x00dev, RF_BYPASS0, 0x00003376); rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x00001006); udelay(1); if (test_bit(CAPABILITY_EXTERNAL_PA_TX0, &rt2x00dev->cap_flags)) { rt2800_bbp_write(rt2x00dev, 23, 0x06); rt2800_bbp_write(rt2x00dev, 24, 0x06); } else { rt2800_bbp_write(rt2x00dev, 23, 0x02); rt2800_bbp_write(rt2x00dev, 24, 0x02); } for (ch_idx = 0; ch_idx < 2; ch_idx = ch_idx + 1) { if (ch_idx == 0) { rfval = rfb0r1 & (~0x3); rfval = rfb0r1 | 0x1; rt2800_rfcsr_write_bank(rt2x00dev, 0, 1, rfval); rfval = rfb0r2 & (~0x33); rfval = rfb0r2 | 0x11; rt2800_rfcsr_write_bank(rt2x00dev, 0, 2, rfval); rfval = rfb0r42 & (~0x50); rfval = rfb0r42 | 0x10; rt2800_rfcsr_write_bank(rt2x00dev, 0, 42, rfval); rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x00001006); udelay(1); bbpval = bbp1 & (~0x18); bbpval = bbpval | 0x00; rt2800_bbp_write(rt2x00dev, 1, bbpval); rt2800_bbp_dcoc_write(rt2x00dev, 1, 0x00); } else { rfval = rfb0r1 & (~0x3); rfval = rfb0r1 | 0x2; rt2800_rfcsr_write_bank(rt2x00dev, 0, 1, rfval); rfval = rfb0r2 & (~0x33); rfval = rfb0r2 | 0x22; rt2800_rfcsr_write_bank(rt2x00dev, 0, 2, rfval); rfval = rfb0r42 & (~0x50); rfval = rfb0r42 | 0x40; rt2800_rfcsr_write_bank(rt2x00dev, 0, 42, rfval); rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x00002006); udelay(1); bbpval = bbp1 & (~0x18); bbpval = bbpval | 0x08; rt2800_bbp_write(rt2x00dev, 1, bbpval); rt2800_bbp_dcoc_write(rt2x00dev, 1, 0x01); } usleep_range(500, 1500); vga_idx = 0; while (vga_idx < 11) { rt2800_rfcsr_write_dccal(rt2x00dev, 3, rf_vga_table[vga_idx]); rt2800_rfcsr_write_dccal(rt2x00dev, 4, rf_vga_table[vga_idx]); rt2800_bbp_dcoc_write(rt2x00dev, 0, 0x93); for (i = 0; i < 10000; i++) { bbpval = rt2800_bbp_read(rt2x00dev, 159); if ((bbpval & 0xff) == 0x93) usleep_range(50, 100); else break; } if ((bbpval & 0xff) == 0x93) { rt2x00_warn(rt2x00dev, "Fatal Error: Calibration doesn't finish"); goto restore_value; } for (i = 0; i < 5; i++) { u32 bbptemp = 0; u8 value = 0; int result = 0; rt2800_bbp_write(rt2x00dev, 158, 0x1e); rt2800_bbp_write(rt2x00dev, 159, i); rt2800_bbp_write(rt2x00dev, 158, 0x22); value = rt2800_bbp_read(rt2x00dev, 159); bbptemp = bbptemp + (value << 24); rt2800_bbp_write(rt2x00dev, 158, 0x21); value = rt2800_bbp_read(rt2x00dev, 159); bbptemp = bbptemp + (value << 16); rt2800_bbp_write(rt2x00dev, 158, 0x20); value = rt2800_bbp_read(rt2x00dev, 159); bbptemp = bbptemp + (value << 8); rt2800_bbp_write(rt2x00dev, 158, 0x1f); value = rt2800_bbp_read(rt2x00dev, 159); bbptemp = bbptemp + value; if (i < 2 && (bbptemp & 0x800000)) result = (bbptemp & 0xffffff) - 0x1000000; else result = bbptemp; if (i == 0) mi = result / 4096; else if (i == 1) mq = result / 4096; else if (i == 2) si = bbptemp / 4096; else if (i == 3) sq = bbptemp / 4096; else riq = result / 4096; } bbpval1 = si - mi * mi; rt2x00_dbg(rt2x00dev, "RXIQ si=%d, sq=%d, riq=%d, bbpval %d, vga_idx %d", si, sq, riq, bbpval1, vga_idx); if (bbpval1 >= (100 * 100)) break; if (bbpval1 <= 100) vga_idx = vga_idx + 9; else if (bbpval1 <= 158) vga_idx = vga_idx + 8; else if (bbpval1 <= 251) vga_idx = vga_idx + 7; else if (bbpval1 <= 398) vga_idx = vga_idx + 6; else if (bbpval1 <= 630) vga_idx = vga_idx + 5; else if (bbpval1 <= 1000) vga_idx = vga_idx + 4; else if (bbpval1 <= 1584) vga_idx = vga_idx + 3; else if (bbpval1 <= 2511) vga_idx = vga_idx + 2; else vga_idx = vga_idx + 1; } sigma_i = rt2800_do_sqrt_accumulation(100 * (si - mi * mi)); sigma_q = rt2800_do_sqrt_accumulation(100 * (sq - mq * mq)); r_iq = 10 * (riq - (mi * mq)); rt2x00_dbg(rt2x00dev, "Sigma_i=%d, Sigma_q=%d, R_iq=%d", sigma_i, sigma_q, r_iq); if (sigma_i <= 1400 && sigma_i >= 1000 && (sigma_i - sigma_q) <= 112 && (sigma_i - sigma_q) >= -112 && mi <= 32 && mi >= -32 && mq <= 32 && mq >= -32) { r_iq = 10 * (riq - (mi * mq)); rt2x00_dbg(rt2x00dev, "RXIQ Sigma_i=%d, Sigma_q=%d, R_iq=%d\n", sigma_i, sigma_q, r_iq); g_rx = (1000 * sigma_q) / sigma_i; g_imb = ((-2) * 128 * (1000 - g_rx)) / (1000 + g_rx); ph_rx = (r_iq * 2292) / (sigma_i * sigma_q); if (ph_rx > 20 || ph_rx < -20) { ph_rx = 0; rt2x00_warn(rt2x00dev, "RXIQ calibration FAIL"); } if (g_imb > 12 || g_imb < -12) { g_imb = 0; rt2x00_warn(rt2x00dev, "RXIQ calibration FAIL"); } } else { g_imb = 0; ph_rx = 0; rt2x00_dbg(rt2x00dev, "RXIQ Sigma_i=%d, Sigma_q=%d, R_iq=%d\n", sigma_i, sigma_q, r_iq); rt2x00_warn(rt2x00dev, "RXIQ calibration FAIL"); } if (ch_idx == 0) { rt2800_bbp_write(rt2x00dev, 158, 0x37); rt2800_bbp_write(rt2x00dev, 159, g_imb & 0x3f); rt2800_bbp_write(rt2x00dev, 158, 0x35); rt2800_bbp_write(rt2x00dev, 159, ph_rx & 0x3f); } else { rt2800_bbp_write(rt2x00dev, 158, 0x55); rt2800_bbp_write(rt2x00dev, 159, g_imb & 0x3f); rt2800_bbp_write(rt2x00dev, 158, 0x53); rt2800_bbp_write(rt2x00dev, 159, ph_rx & 0x3f); } } restore_value: rt2800_bbp_write(rt2x00dev, 158, 0x3); bbpval = rt2800_bbp_read(rt2x00dev, 159); rt2800_bbp_write(rt2x00dev, 159, (bbpval | 0x07)); rt2800_bbp_write(rt2x00dev, 158, 0x00); rt2800_bbp_write(rt2x00dev, 159, 0x00); rt2800_bbp_write(rt2x00dev, 1, bbp1); rt2800_bbp_write(rt2x00dev, 4, bbp4); rt2800_bbp_write(rt2x00dev, 241, bbpr241); rt2800_bbp_write(rt2x00dev, 242, bbpr242); rt2800_bbp_write(rt2x00dev, 244, 0x00); bbpval = rt2800_bbp_read(rt2x00dev, 21); bbpval |= 0x1; rt2800_bbp_write(rt2x00dev, 21, bbpval); usleep_range(10, 200); bbpval &= 0xfe; rt2800_bbp_write(rt2x00dev, 21, bbpval); rt2800_rfcsr_write_bank(rt2x00dev, 0, 1, rfb0r1); rt2800_rfcsr_write_bank(rt2x00dev, 0, 2, rfb0r2); rt2800_rfcsr_write_bank(rt2x00dev, 0, 42, rfb0r42); rt2800_rfcsr_write_bank(rt2x00dev, 4, 0, rfb4r0); rt2800_rfcsr_write_bank(rt2x00dev, 4, 19, rfb4r19); rt2800_rfcsr_write_bank(rt2x00dev, 5, 3, rfb5r3); rt2800_rfcsr_write_bank(rt2x00dev, 5, 4, rfb5r4); rt2800_rfcsr_write_bank(rt2x00dev, 5, 17, rfb5r17); rt2800_rfcsr_write_bank(rt2x00dev, 5, 18, rfb5r18); rt2800_rfcsr_write_bank(rt2x00dev, 5, 19, rfb5r19); rt2800_rfcsr_write_bank(rt2x00dev, 5, 20, rfb5r20); rt2800_rfcsr_write_bank(rt2x00dev, 6, 0, rfb6r0); rt2800_rfcsr_write_bank(rt2x00dev, 6, 19, rfb6r19); rt2800_rfcsr_write_bank(rt2x00dev, 7, 3, rfb7r3); rt2800_rfcsr_write_bank(rt2x00dev, 7, 4, rfb7r4); rt2800_rfcsr_write_bank(rt2x00dev, 7, 17, rfb7r17); rt2800_rfcsr_write_bank(rt2x00dev, 7, 18, rfb7r18); rt2800_rfcsr_write_bank(rt2x00dev, 7, 19, rfb7r19); rt2800_rfcsr_write_bank(rt2x00dev, 7, 20, rfb7r20); rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x00000006); udelay(1); rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x00000004); udelay(1); rt2800_register_write(rt2x00dev, RF_CONTROL0, orig_RF_CONTROL0); udelay(1); rt2800_register_write(rt2x00dev, RF_BYPASS0, orig_RF_BYPASS0); rt2800_register_write(rt2x00dev, RF_CONTROL1, orig_RF_CONTROL1); rt2800_register_write(rt2x00dev, RF_BYPASS1, orig_RF_BYPASS1); rt2800_register_write(rt2x00dev, RF_CONTROL3, orig_RF_CONTROL3); rt2800_register_write(rt2x00dev, RF_BYPASS3, orig_RF_BYPASS3); rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, savemacsysctrl); } static void rt2800_rf_configstore(struct rt2x00_dev *rt2x00dev, struct rf_reg_pair rf_reg_record[][13], u8 chain) { u8 rfvalue = 0; if (chain == CHAIN_0) { rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 0, 1); rf_reg_record[CHAIN_0][0].bank = 0; rf_reg_record[CHAIN_0][0].reg = 1; rf_reg_record[CHAIN_0][0].value = rfvalue; rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 0, 2); rf_reg_record[CHAIN_0][1].bank = 0; rf_reg_record[CHAIN_0][1].reg = 2; rf_reg_record[CHAIN_0][1].value = rfvalue; rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 0, 35); rf_reg_record[CHAIN_0][2].bank = 0; rf_reg_record[CHAIN_0][2].reg = 35; rf_reg_record[CHAIN_0][2].value = rfvalue; rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 0, 42); rf_reg_record[CHAIN_0][3].bank = 0; rf_reg_record[CHAIN_0][3].reg = 42; rf_reg_record[CHAIN_0][3].value = rfvalue; rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 4, 0); rf_reg_record[CHAIN_0][4].bank = 4; rf_reg_record[CHAIN_0][4].reg = 0; rf_reg_record[CHAIN_0][4].value = rfvalue; rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 4, 2); rf_reg_record[CHAIN_0][5].bank = 4; rf_reg_record[CHAIN_0][5].reg = 2; rf_reg_record[CHAIN_0][5].value = rfvalue; rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 4, 34); rf_reg_record[CHAIN_0][6].bank = 4; rf_reg_record[CHAIN_0][6].reg = 34; rf_reg_record[CHAIN_0][6].value = rfvalue; rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 5, 3); rf_reg_record[CHAIN_0][7].bank = 5; rf_reg_record[CHAIN_0][7].reg = 3; rf_reg_record[CHAIN_0][7].value = rfvalue; rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 5, 4); rf_reg_record[CHAIN_0][8].bank = 5; rf_reg_record[CHAIN_0][8].reg = 4; rf_reg_record[CHAIN_0][8].value = rfvalue; rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 5, 17); rf_reg_record[CHAIN_0][9].bank = 5; rf_reg_record[CHAIN_0][9].reg = 17; rf_reg_record[CHAIN_0][9].value = rfvalue; rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 5, 18); rf_reg_record[CHAIN_0][10].bank = 5; rf_reg_record[CHAIN_0][10].reg = 18; rf_reg_record[CHAIN_0][10].value = rfvalue; rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 5, 19); rf_reg_record[CHAIN_0][11].bank = 5; rf_reg_record[CHAIN_0][11].reg = 19; rf_reg_record[CHAIN_0][11].value = rfvalue; rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 5, 20); rf_reg_record[CHAIN_0][12].bank = 5; rf_reg_record[CHAIN_0][12].reg = 20; rf_reg_record[CHAIN_0][12].value = rfvalue; } else if (chain == CHAIN_1) { rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 0, 1); rf_reg_record[CHAIN_1][0].bank = 0; rf_reg_record[CHAIN_1][0].reg = 1; rf_reg_record[CHAIN_1][0].value = rfvalue; rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 0, 2); rf_reg_record[CHAIN_1][1].bank = 0; rf_reg_record[CHAIN_1][1].reg = 2; rf_reg_record[CHAIN_1][1].value = rfvalue; rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 0, 35); rf_reg_record[CHAIN_1][2].bank = 0; rf_reg_record[CHAIN_1][2].reg = 35; rf_reg_record[CHAIN_1][2].value = rfvalue; rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 0, 42); rf_reg_record[CHAIN_1][3].bank = 0; rf_reg_record[CHAIN_1][3].reg = 42; rf_reg_record[CHAIN_1][3].value = rfvalue; rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 6, 0); rf_reg_record[CHAIN_1][4].bank = 6; rf_reg_record[CHAIN_1][4].reg = 0; rf_reg_record[CHAIN_1][4].value = rfvalue; rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 6, 2); rf_reg_record[CHAIN_1][5].bank = 6; rf_reg_record[CHAIN_1][5].reg = 2; rf_reg_record[CHAIN_1][5].value = rfvalue; rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 6, 34); rf_reg_record[CHAIN_1][6].bank = 6; rf_reg_record[CHAIN_1][6].reg = 34; rf_reg_record[CHAIN_1][6].value = rfvalue; rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 7, 3); rf_reg_record[CHAIN_1][7].bank = 7; rf_reg_record[CHAIN_1][7].reg = 3; rf_reg_record[CHAIN_1][7].value = rfvalue; rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 7, 4); rf_reg_record[CHAIN_1][8].bank = 7; rf_reg_record[CHAIN_1][8].reg = 4; rf_reg_record[CHAIN_1][8].value = rfvalue; rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 7, 17); rf_reg_record[CHAIN_1][9].bank = 7; rf_reg_record[CHAIN_1][9].reg = 17; rf_reg_record[CHAIN_1][9].value = rfvalue; rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 7, 18); rf_reg_record[CHAIN_1][10].bank = 7; rf_reg_record[CHAIN_1][10].reg = 18; rf_reg_record[CHAIN_1][10].value = rfvalue; rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 7, 19); rf_reg_record[CHAIN_1][11].bank = 7; rf_reg_record[CHAIN_1][11].reg = 19; rf_reg_record[CHAIN_1][11].value = rfvalue; rfvalue = rt2800_rfcsr_read_bank(rt2x00dev, 7, 20); rf_reg_record[CHAIN_1][12].bank = 7; rf_reg_record[CHAIN_1][12].reg = 20; rf_reg_record[CHAIN_1][12].value = rfvalue; } else { rt2x00_warn(rt2x00dev, "Unknown chain = %u\n", chain); } } static void rt2800_rf_configrecover(struct rt2x00_dev *rt2x00dev, struct rf_reg_pair rf_record[][13]) { u8 chain_index = 0, record_index = 0; u8 bank = 0, rf_register = 0, value = 0; for (chain_index = 0; chain_index < 2; chain_index++) { for (record_index = 0; record_index < 13; record_index++) { bank = rf_record[chain_index][record_index].bank; rf_register = rf_record[chain_index][record_index].reg; value = rf_record[chain_index][record_index].value; rt2800_rfcsr_write_bank(rt2x00dev, bank, rf_register, value); rt2x00_dbg(rt2x00dev, "bank: %d, rf_register: %d, value: %x\n", bank, rf_register, value); } } } static void rt2800_setbbptonegenerator(struct rt2x00_dev *rt2x00dev) { rt2800_bbp_write(rt2x00dev, 158, 0xAA); rt2800_bbp_write(rt2x00dev, 159, 0x00); rt2800_bbp_write(rt2x00dev, 158, 0xAB); rt2800_bbp_write(rt2x00dev, 159, 0x0A); rt2800_bbp_write(rt2x00dev, 158, 0xAC); rt2800_bbp_write(rt2x00dev, 159, 0x3F); rt2800_bbp_write(rt2x00dev, 158, 0xAD); rt2800_bbp_write(rt2x00dev, 159, 0x3F); rt2800_bbp_write(rt2x00dev, 244, 0x40); } static u32 rt2800_do_fft_accumulation(struct rt2x00_dev *rt2x00dev, u8 tidx, u8 read_neg) { u32 macvalue = 0; int fftout_i = 0, fftout_q = 0; u32 ptmp = 0, pint = 0; u8 bbp = 0; u8 tidxi; rt2800_bbp_write(rt2x00dev, 158, 0x00); rt2800_bbp_write(rt2x00dev, 159, 0x9b); bbp = 0x9b; while (bbp == 0x9b) { usleep_range(10, 50); bbp = rt2800_bbp_read(rt2x00dev, 159); bbp = bbp & 0xff; } rt2800_bbp_write(rt2x00dev, 158, 0xba); rt2800_bbp_write(rt2x00dev, 159, tidx); rt2800_bbp_write(rt2x00dev, 159, tidx); rt2800_bbp_write(rt2x00dev, 159, tidx); macvalue = rt2800_register_read(rt2x00dev, 0x057C); fftout_i = (macvalue >> 16); fftout_i = (fftout_i & 0x8000) ? (fftout_i - 0x10000) : fftout_i; fftout_q = (macvalue & 0xffff); fftout_q = (fftout_q & 0x8000) ? (fftout_q - 0x10000) : fftout_q; ptmp = (fftout_i * fftout_i); ptmp = ptmp + (fftout_q * fftout_q); pint = ptmp; rt2x00_dbg(rt2x00dev, "I = %d, Q = %d, power = %x\n", fftout_i, fftout_q, pint); if (read_neg) { pint = pint >> 1; tidxi = 0x40 - tidx; tidxi = tidxi & 0x3f; rt2800_bbp_write(rt2x00dev, 158, 0xba); rt2800_bbp_write(rt2x00dev, 159, tidxi); rt2800_bbp_write(rt2x00dev, 159, tidxi); rt2800_bbp_write(rt2x00dev, 159, tidxi); macvalue = rt2800_register_read(rt2x00dev, 0x057C); fftout_i = (macvalue >> 16); fftout_i = (fftout_i & 0x8000) ? (fftout_i - 0x10000) : fftout_i; fftout_q = (macvalue & 0xffff); fftout_q = (fftout_q & 0x8000) ? (fftout_q - 0x10000) : fftout_q; ptmp = (fftout_i * fftout_i); ptmp = ptmp + (fftout_q * fftout_q); ptmp = ptmp >> 1; pint = pint + ptmp; } return pint; } static u32 rt2800_read_fft_accumulation(struct rt2x00_dev *rt2x00dev, u8 tidx) { u32 macvalue = 0; int fftout_i = 0, fftout_q = 0; u32 ptmp = 0, pint = 0; rt2800_bbp_write(rt2x00dev, 158, 0xBA); rt2800_bbp_write(rt2x00dev, 159, tidx); rt2800_bbp_write(rt2x00dev, 159, tidx); rt2800_bbp_write(rt2x00dev, 159, tidx); macvalue = rt2800_register_read(rt2x00dev, 0x057C); fftout_i = (macvalue >> 16); fftout_i = (fftout_i & 0x8000) ? (fftout_i - 0x10000) : fftout_i; fftout_q = (macvalue & 0xffff); fftout_q = (fftout_q & 0x8000) ? (fftout_q - 0x10000) : fftout_q; ptmp = (fftout_i * fftout_i); ptmp = ptmp + (fftout_q * fftout_q); pint = ptmp; return pint; } static void rt2800_write_dc(struct rt2x00_dev *rt2x00dev, u8 ch_idx, u8 alc, u8 iorq, u8 dc) { u8 bbp = 0; rt2800_bbp_write(rt2x00dev, 158, 0xb0); bbp = alc | 0x80; rt2800_bbp_write(rt2x00dev, 159, bbp); if (ch_idx == 0) bbp = (iorq == 0) ? 0xb1 : 0xb2; else bbp = (iorq == 0) ? 0xb8 : 0xb9; rt2800_bbp_write(rt2x00dev, 158, bbp); bbp = dc; rt2800_bbp_write(rt2x00dev, 159, bbp); } static void rt2800_loft_search(struct rt2x00_dev *rt2x00dev, u8 ch_idx, u8 alc_idx, u8 dc_result[][RF_ALC_NUM][2]) { u32 p0 = 0, p1 = 0, pf = 0; s8 idx0 = 0, idx1 = 0; u8 idxf[] = {0x00, 0x00}; u8 ibit = 0x20; u8 iorq; s8 bidx; rt2800_bbp_write(rt2x00dev, 158, 0xb0); rt2800_bbp_write(rt2x00dev, 159, 0x80); for (bidx = 5; bidx >= 0; bidx--) { for (iorq = 0; iorq <= 1; iorq++) { if (idxf[iorq] == 0x20) { idx0 = 0x20; p0 = pf; } else { idx0 = idxf[iorq] - ibit; idx0 = idx0 & 0x3F; rt2800_write_dc(rt2x00dev, ch_idx, 0, iorq, idx0); p0 = rt2800_do_fft_accumulation(rt2x00dev, 0x0A, 0); } idx1 = idxf[iorq] + (bidx == 5 ? 0 : ibit); idx1 = idx1 & 0x3F; rt2800_write_dc(rt2x00dev, ch_idx, 0, iorq, idx1); p1 = rt2800_do_fft_accumulation(rt2x00dev, 0x0A, 0); rt2x00_dbg(rt2x00dev, "alc=%u, IorQ=%u, idx_final=%2x\n", alc_idx, iorq, idxf[iorq]); rt2x00_dbg(rt2x00dev, "p0=%x, p1=%x, pf=%x, idx_0=%x, idx_1=%x, ibit=%x\n", p0, p1, pf, idx0, idx1, ibit); if (bidx != 5 && pf <= p0 && pf < p1) { idxf[iorq] = idxf[iorq]; } else if (p0 < p1) { pf = p0; idxf[iorq] = idx0 & 0x3F; } else { pf = p1; idxf[iorq] = idx1 & 0x3F; } rt2x00_dbg(rt2x00dev, "IorQ=%u, idx_final[%u]:%x, pf:%8x\n", iorq, iorq, idxf[iorq], pf); rt2800_write_dc(rt2x00dev, ch_idx, 0, iorq, idxf[iorq]); } ibit = ibit >> 1; } dc_result[ch_idx][alc_idx][0] = idxf[0]; dc_result[ch_idx][alc_idx][1] = idxf[1]; } static void rt2800_iq_search(struct rt2x00_dev *rt2x00dev, u8 ch_idx, u8 *ges, u8 *pes) { u32 p0 = 0, p1 = 0, pf = 0; s8 perr = 0, gerr = 0, iq_err = 0; s8 pef = 0, gef = 0; s8 psta, pend; s8 gsta, gend; u8 ibit = 0x20; u8 first_search = 0x00, touch_neg_max = 0x00; s8 idx0 = 0, idx1 = 0; u8 gop; u8 bbp = 0; s8 bidx; for (bidx = 5; bidx >= 1; bidx--) { for (gop = 0; gop < 2; gop++) { if (gop == 1 || bidx < 4) { if (gop == 0) iq_err = gerr; else iq_err = perr; first_search = (gop == 0) ? (bidx == 3) : (bidx == 5); touch_neg_max = (gop) ? ((iq_err & 0x0F) == 0x08) : ((iq_err & 0x3F) == 0x20); if (touch_neg_max) { p0 = pf; idx0 = iq_err; } else { idx0 = iq_err - ibit; bbp = (ch_idx == 0) ? ((gop == 0) ? 0x28 : 0x29) : ((gop == 0) ? 0x46 : 0x47); rt2800_bbp_write(rt2x00dev, 158, bbp); rt2800_bbp_write(rt2x00dev, 159, idx0); p0 = rt2800_do_fft_accumulation(rt2x00dev, 0x14, 1); } idx1 = iq_err + (first_search ? 0 : ibit); idx1 = (gop == 0) ? (idx1 & 0x0F) : (idx1 & 0x3F); bbp = (ch_idx == 0) ? (gop == 0) ? 0x28 : 0x29 : (gop == 0) ? 0x46 : 0x47; rt2800_bbp_write(rt2x00dev, 158, bbp); rt2800_bbp_write(rt2x00dev, 159, idx1); p1 = rt2800_do_fft_accumulation(rt2x00dev, 0x14, 1); rt2x00_dbg(rt2x00dev, "p0=%x, p1=%x, pwer_final=%x, idx0=%x, idx1=%x, iq_err=%x, gop=%d, ibit=%x\n", p0, p1, pf, idx0, idx1, iq_err, gop, ibit); if (!(!first_search && pf <= p0 && pf < p1)) { if (p0 < p1) { pf = p0; iq_err = idx0; } else { pf = p1; iq_err = idx1; } } bbp = (ch_idx == 0) ? (gop == 0) ? 0x28 : 0x29 : (gop == 0) ? 0x46 : 0x47; rt2800_bbp_write(rt2x00dev, 158, bbp); rt2800_bbp_write(rt2x00dev, 159, iq_err); if (gop == 0) gerr = iq_err; else perr = iq_err; rt2x00_dbg(rt2x00dev, "IQCalibration pf=%8x (%2x, %2x) !\n", pf, gerr & 0x0F, perr & 0x3F); } } if (bidx > 0) ibit = (ibit >> 1); } gerr = (gerr & 0x08) ? (gerr & 0x0F) - 0x10 : (gerr & 0x0F); perr = (perr & 0x20) ? (perr & 0x3F) - 0x40 : (perr & 0x3F); gerr = (gerr < -0x07) ? -0x07 : (gerr > 0x05) ? 0x05 : gerr; gsta = gerr - 1; gend = gerr + 2; perr = (perr < -0x1f) ? -0x1f : (perr > 0x1d) ? 0x1d : perr; psta = perr - 1; pend = perr + 2; for (gef = gsta; gef <= gend; gef = gef + 1) for (pef = psta; pef <= pend; pef = pef + 1) { bbp = (ch_idx == 0) ? 0x28 : 0x46; rt2800_bbp_write(rt2x00dev, 158, bbp); rt2800_bbp_write(rt2x00dev, 159, gef & 0x0F); bbp = (ch_idx == 0) ? 0x29 : 0x47; rt2800_bbp_write(rt2x00dev, 158, bbp); rt2800_bbp_write(rt2x00dev, 159, pef & 0x3F); p1 = rt2800_do_fft_accumulation(rt2x00dev, 0x14, 1); if (gef == gsta && pef == psta) { pf = p1; gerr = gef; perr = pef; } else if (pf > p1) { pf = p1; gerr = gef; perr = pef; } rt2x00_dbg(rt2x00dev, "Fine IQCalibration p1=%8x pf=%8x (%2x, %2x) !\n", p1, pf, gef & 0x0F, pef & 0x3F); } ges[ch_idx] = gerr & 0x0F; pes[ch_idx] = perr & 0x3F; } static void rt2800_rf_aux_tx0_loopback(struct rt2x00_dev *rt2x00dev) { rt2800_rfcsr_write_bank(rt2x00dev, 0, 1, 0x21); rt2800_rfcsr_write_bank(rt2x00dev, 0, 2, 0x10); rt2800_rfcsr_write_bank(rt2x00dev, 0, 35, 0x00); rt2800_rfcsr_write_bank(rt2x00dev, 0, 42, 0x1b); rt2800_rfcsr_write_bank(rt2x00dev, 4, 0, 0x81); rt2800_rfcsr_write_bank(rt2x00dev, 4, 2, 0x81); rt2800_rfcsr_write_bank(rt2x00dev, 4, 34, 0xee); rt2800_rfcsr_write_bank(rt2x00dev, 5, 3, 0x2d); rt2800_rfcsr_write_bank(rt2x00dev, 5, 4, 0x2d); rt2800_rfcsr_write_bank(rt2x00dev, 5, 17, 0x80); rt2800_rfcsr_write_bank(rt2x00dev, 5, 18, 0xd7); rt2800_rfcsr_write_bank(rt2x00dev, 5, 19, 0xa2); rt2800_rfcsr_write_bank(rt2x00dev, 5, 20, 0x20); } static void rt2800_rf_aux_tx1_loopback(struct rt2x00_dev *rt2x00dev) { rt2800_rfcsr_write_bank(rt2x00dev, 0, 1, 0x22); rt2800_rfcsr_write_bank(rt2x00dev, 0, 2, 0x20); rt2800_rfcsr_write_bank(rt2x00dev, 0, 35, 0x00); rt2800_rfcsr_write_bank(rt2x00dev, 0, 42, 0x4b); rt2800_rfcsr_write_bank(rt2x00dev, 6, 0, 0x81); rt2800_rfcsr_write_bank(rt2x00dev, 6, 2, 0x81); rt2800_rfcsr_write_bank(rt2x00dev, 6, 34, 0xee); rt2800_rfcsr_write_bank(rt2x00dev, 7, 3, 0x2d); rt2800_rfcsr_write_bank(rt2x00dev, 7, 4, 0x2d); rt2800_rfcsr_write_bank(rt2x00dev, 7, 17, 0x80); rt2800_rfcsr_write_bank(rt2x00dev, 7, 18, 0xd7); rt2800_rfcsr_write_bank(rt2x00dev, 7, 19, 0xa2); rt2800_rfcsr_write_bank(rt2x00dev, 7, 20, 0x20); } static void rt2800_loft_iq_calibration(struct rt2x00_dev *rt2x00dev) { struct rf_reg_pair rf_store[CHAIN_NUM][13]; u32 macorg1 = 0; u32 macorg2 = 0; u32 macorg3 = 0; u32 macorg4 = 0; u32 macorg5 = 0; u32 orig528 = 0; u32 orig52c = 0; u32 savemacsysctrl = 0; u32 macvalue = 0; u32 mac13b8 = 0; u32 p0 = 0, p1 = 0; u32 p0_idx10 = 0, p1_idx10 = 0; u8 rfvalue; u8 loft_dc_search_result[CHAIN_NUM][RF_ALC_NUM][2]; u8 ger[CHAIN_NUM], per[CHAIN_NUM]; u8 vga_gain[] = {14, 14}; u8 bbp = 0, ch_idx = 0, rf_alc_idx = 0, idx = 0; u8 bbpr30, rfb0r39, rfb0r42; u8 bbpr1; u8 bbpr4; u8 bbpr241, bbpr242; u8 count_step; static const u8 rf_gain[] = {0x00, 0x01, 0x02, 0x04, 0x08, 0x0c}; static const u8 rfvga_gain_table[] = {0x24, 0x25, 0x26, 0x27, 0x28, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x3e, 0x3F}; static const u8 bbp_2324gain[] = {0x16, 0x14, 0x12, 0x10, 0x0c, 0x08}; savemacsysctrl = rt2800_register_read(rt2x00dev, MAC_SYS_CTRL); macorg1 = rt2800_register_read(rt2x00dev, TX_PIN_CFG); macorg2 = rt2800_register_read(rt2x00dev, RF_CONTROL0); macorg3 = rt2800_register_read(rt2x00dev, RF_BYPASS0); macorg4 = rt2800_register_read(rt2x00dev, RF_CONTROL3); macorg5 = rt2800_register_read(rt2x00dev, RF_BYPASS3); mac13b8 = rt2800_register_read(rt2x00dev, 0x13b8); orig528 = rt2800_register_read(rt2x00dev, RF_CONTROL2); orig52c = rt2800_register_read(rt2x00dev, RF_BYPASS2); macvalue = rt2800_register_read(rt2x00dev, MAC_SYS_CTRL); macvalue &= (~0x04); rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, macvalue); if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev, MAC_STATUS_CFG_BBP_RF_BUSY_TX))) rt2x00_warn(rt2x00dev, "RF TX busy in LOFT IQ calibration\n"); macvalue = rt2800_register_read(rt2x00dev, MAC_SYS_CTRL); macvalue &= (~0x08); rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, macvalue); if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev, MAC_STATUS_CFG_BBP_RF_BUSY_RX))) rt2x00_warn(rt2x00dev, "RF RX busy in LOFT IQ calibration\n"); for (ch_idx = 0; ch_idx < 2; ch_idx++) rt2800_rf_configstore(rt2x00dev, rf_store, ch_idx); bbpr30 = rt2800_bbp_read(rt2x00dev, 30); rfb0r39 = rt2800_rfcsr_read_bank(rt2x00dev, 0, 39); rfb0r42 = rt2800_rfcsr_read_bank(rt2x00dev, 0, 42); rt2800_bbp_write(rt2x00dev, 30, 0x1F); rt2800_rfcsr_write_bank(rt2x00dev, 0, 39, 0x80); rt2800_rfcsr_write_bank(rt2x00dev, 0, 42, 0x5B); rt2800_bbp_write(rt2x00dev, 23, 0x00); rt2800_bbp_write(rt2x00dev, 24, 0x00); rt2800_setbbptonegenerator(rt2x00dev); for (ch_idx = 0; ch_idx < 2; ch_idx++) { rt2800_bbp_write(rt2x00dev, 23, 0x00); rt2800_bbp_write(rt2x00dev, 24, 0x00); rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00); rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0x0000000F); rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x00000004); rt2800_register_write(rt2x00dev, RF_BYPASS0, 0x00003306); rt2800_register_write(rt2x00dev, 0x13b8, 0x10); udelay(1); if (ch_idx == 0) rt2800_rf_aux_tx0_loopback(rt2x00dev); else rt2800_rf_aux_tx1_loopback(rt2x00dev); udelay(1); if (ch_idx == 0) rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x00001004); else rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x00002004); rt2800_bbp_write(rt2x00dev, 158, 0x05); rt2800_bbp_write(rt2x00dev, 159, 0x00); rt2800_bbp_write(rt2x00dev, 158, 0x01); if (ch_idx == 0) rt2800_bbp_write(rt2x00dev, 159, 0x00); else rt2800_bbp_write(rt2x00dev, 159, 0x01); vga_gain[ch_idx] = 18; for (rf_alc_idx = 0; rf_alc_idx < 3; rf_alc_idx++) { rt2800_bbp_write(rt2x00dev, 23, bbp_2324gain[rf_alc_idx]); rt2800_bbp_write(rt2x00dev, 24, bbp_2324gain[rf_alc_idx]); macvalue = rt2800_register_read(rt2x00dev, RF_CONTROL3); macvalue &= (~0x0000F1F1); macvalue |= (rf_gain[rf_alc_idx] << 4); macvalue |= (rf_gain[rf_alc_idx] << 12); rt2800_register_write(rt2x00dev, RF_CONTROL3, macvalue); macvalue = (0x0000F1F1); rt2800_register_write(rt2x00dev, RF_BYPASS3, macvalue); if (rf_alc_idx == 0) { rt2800_write_dc(rt2x00dev, ch_idx, 0, 1, 0x21); for (; vga_gain[ch_idx] > 0; vga_gain[ch_idx] = vga_gain[ch_idx] - 2) { rfvalue = rfvga_gain_table[vga_gain[ch_idx]]; rt2800_rfcsr_write_dccal(rt2x00dev, 3, rfvalue); rt2800_rfcsr_write_dccal(rt2x00dev, 4, rfvalue); rt2800_write_dc(rt2x00dev, ch_idx, 0, 1, 0x00); rt2800_write_dc(rt2x00dev, ch_idx, 0, 0, 0x00); p0 = rt2800_do_fft_accumulation(rt2x00dev, 0x0A, 0); rt2800_write_dc(rt2x00dev, ch_idx, 0, 0, 0x21); p1 = rt2800_do_fft_accumulation(rt2x00dev, 0x0A, 0); rt2x00_dbg(rt2x00dev, "LOFT AGC %d %d\n", p0, p1); if ((p0 < 7000 * 7000) && (p1 < (7000 * 7000))) break; } rt2800_write_dc(rt2x00dev, ch_idx, 0, 0, 0x00); rt2800_write_dc(rt2x00dev, ch_idx, 0, 1, 0x00); rt2x00_dbg(rt2x00dev, "Used VGA %d %x\n", vga_gain[ch_idx], rfvga_gain_table[vga_gain[ch_idx]]); if (vga_gain[ch_idx] < 0) vga_gain[ch_idx] = 0; } rfvalue = rfvga_gain_table[vga_gain[ch_idx]]; rt2800_rfcsr_write_dccal(rt2x00dev, 3, rfvalue); rt2800_rfcsr_write_dccal(rt2x00dev, 4, rfvalue); rt2800_loft_search(rt2x00dev, ch_idx, rf_alc_idx, loft_dc_search_result); } } for (rf_alc_idx = 0; rf_alc_idx < 3; rf_alc_idx++) { for (idx = 0; idx < 4; idx++) { rt2800_bbp_write(rt2x00dev, 158, 0xB0); bbp = (idx << 2) + rf_alc_idx; rt2800_bbp_write(rt2x00dev, 159, bbp); rt2x00_dbg(rt2x00dev, " ALC %2x,", bbp); rt2800_bbp_write(rt2x00dev, 158, 0xb1); bbp = loft_dc_search_result[CHAIN_0][rf_alc_idx][0x00]; bbp = bbp & 0x3F; rt2800_bbp_write(rt2x00dev, 159, bbp); rt2x00_dbg(rt2x00dev, " I0 %2x,", bbp); rt2800_bbp_write(rt2x00dev, 158, 0xb2); bbp = loft_dc_search_result[CHAIN_0][rf_alc_idx][0x01]; bbp = bbp & 0x3F; rt2800_bbp_write(rt2x00dev, 159, bbp); rt2x00_dbg(rt2x00dev, " Q0 %2x,", bbp); rt2800_bbp_write(rt2x00dev, 158, 0xb8); bbp = loft_dc_search_result[CHAIN_1][rf_alc_idx][0x00]; bbp = bbp & 0x3F; rt2800_bbp_write(rt2x00dev, 159, bbp); rt2x00_dbg(rt2x00dev, " I1 %2x,", bbp); rt2800_bbp_write(rt2x00dev, 158, 0xb9); bbp = loft_dc_search_result[CHAIN_1][rf_alc_idx][0x01]; bbp = bbp & 0x3F; rt2800_bbp_write(rt2x00dev, 159, bbp); rt2x00_dbg(rt2x00dev, " Q1 %2x\n", bbp); } } rt2800_bbp_write(rt2x00dev, 23, 0x00); rt2800_bbp_write(rt2x00dev, 24, 0x00); rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x04); rt2800_bbp_write(rt2x00dev, 158, 0x00); rt2800_bbp_write(rt2x00dev, 159, 0x00); bbp = 0x00; rt2800_bbp_write(rt2x00dev, 244, 0x00); rt2800_bbp_write(rt2x00dev, 21, 0x01); udelay(1); rt2800_bbp_write(rt2x00dev, 21, 0x00); rt2800_rf_configrecover(rt2x00dev, rf_store); rt2800_register_write(rt2x00dev, TX_PIN_CFG, macorg1); rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x04); rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x00); rt2800_register_write(rt2x00dev, RF_BYPASS0, 0x00); rt2800_register_write(rt2x00dev, RF_CONTROL0, macorg2); udelay(1); rt2800_register_write(rt2x00dev, RF_BYPASS0, macorg3); rt2800_register_write(rt2x00dev, RF_CONTROL3, macorg4); rt2800_register_write(rt2x00dev, RF_BYPASS3, macorg5); rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, savemacsysctrl); rt2800_register_write(rt2x00dev, RF_CONTROL2, orig528); rt2800_register_write(rt2x00dev, RF_BYPASS2, orig52c); rt2800_register_write(rt2x00dev, 0x13b8, mac13b8); savemacsysctrl = rt2800_register_read(rt2x00dev, MAC_SYS_CTRL); macorg1 = rt2800_register_read(rt2x00dev, TX_PIN_CFG); macorg2 = rt2800_register_read(rt2x00dev, RF_CONTROL0); macorg3 = rt2800_register_read(rt2x00dev, RF_BYPASS0); macorg4 = rt2800_register_read(rt2x00dev, RF_CONTROL3); macorg5 = rt2800_register_read(rt2x00dev, RF_BYPASS3); bbpr1 = rt2800_bbp_read(rt2x00dev, 1); bbpr4 = rt2800_bbp_read(rt2x00dev, 4); bbpr241 = rt2800_bbp_read(rt2x00dev, 241); bbpr242 = rt2800_bbp_read(rt2x00dev, 242); mac13b8 = rt2800_register_read(rt2x00dev, 0x13b8); macvalue = rt2800_register_read(rt2x00dev, MAC_SYS_CTRL); macvalue &= (~0x04); rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, macvalue); if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev, MAC_STATUS_CFG_BBP_RF_BUSY_TX))) rt2x00_warn(rt2x00dev, "RF TX busy in LOFT IQ calibration\n"); macvalue = rt2800_register_read(rt2x00dev, MAC_SYS_CTRL); macvalue &= (~0x08); rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, macvalue); if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev, MAC_STATUS_CFG_BBP_RF_BUSY_RX))) rt2x00_warn(rt2x00dev, "RF RX busy in LOFT IQ calibration\n"); if (test_bit(CAPABILITY_EXTERNAL_PA_TX0, &rt2x00dev->cap_flags)) { rt2800_register_write(rt2x00dev, RF_CONTROL3, 0x00000101); rt2800_register_write(rt2x00dev, RF_BYPASS3, 0x0000F1F1); } rt2800_bbp_write(rt2x00dev, 23, 0x00); rt2800_bbp_write(rt2x00dev, 24, 0x00); if (test_bit(CAPABILITY_EXTERNAL_PA_TX0, &rt2x00dev->cap_flags)) { rt2800_bbp_write(rt2x00dev, 4, bbpr4 & (~0x18)); rt2800_bbp_write(rt2x00dev, 21, 0x01); udelay(1); rt2800_bbp_write(rt2x00dev, 21, 0x00); rt2800_bbp_write(rt2x00dev, 241, 0x14); rt2800_bbp_write(rt2x00dev, 242, 0x80); rt2800_bbp_write(rt2x00dev, 244, 0x31); } else { rt2800_setbbptonegenerator(rt2x00dev); } rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x00000004); rt2800_register_write(rt2x00dev, RF_BYPASS0, 0x00003306); udelay(1); rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0x0000000F); if (!test_bit(CAPABILITY_EXTERNAL_PA_TX0, &rt2x00dev->cap_flags)) { rt2800_register_write(rt2x00dev, RF_CONTROL3, 0x00000000); rt2800_register_write(rt2x00dev, RF_BYPASS3, 0x0000F1F1); } rt2800_register_write(rt2x00dev, 0x13b8, 0x00000010); for (ch_idx = 0; ch_idx < 2; ch_idx++) rt2800_rf_configstore(rt2x00dev, rf_store, ch_idx); rt2800_rfcsr_write_dccal(rt2x00dev, 3, 0x3B); rt2800_rfcsr_write_dccal(rt2x00dev, 4, 0x3B); rt2800_bbp_write(rt2x00dev, 158, 0x03); rt2800_bbp_write(rt2x00dev, 159, 0x60); rt2800_bbp_write(rt2x00dev, 158, 0xB0); rt2800_bbp_write(rt2x00dev, 159, 0x80); for (ch_idx = 0; ch_idx < 2; ch_idx++) { rt2800_bbp_write(rt2x00dev, 23, 0x00); rt2800_bbp_write(rt2x00dev, 24, 0x00); if (ch_idx == 0) { rt2800_bbp_write(rt2x00dev, 158, 0x01); rt2800_bbp_write(rt2x00dev, 159, 0x00); if (test_bit(CAPABILITY_EXTERNAL_PA_TX0, &rt2x00dev->cap_flags)) { bbp = bbpr1 & (~0x18); bbp = bbp | 0x00; rt2800_bbp_write(rt2x00dev, 1, bbp); } rt2800_rf_aux_tx0_loopback(rt2x00dev); rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x00001004); } else { rt2800_bbp_write(rt2x00dev, 158, 0x01); rt2800_bbp_write(rt2x00dev, 159, 0x01); if (test_bit(CAPABILITY_EXTERNAL_PA_TX1, &rt2x00dev->cap_flags)) { bbp = bbpr1 & (~0x18); bbp = bbp | 0x08; rt2800_bbp_write(rt2x00dev, 1, bbp); } rt2800_rf_aux_tx1_loopback(rt2x00dev); rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x00002004); } rt2800_bbp_write(rt2x00dev, 158, 0x05); rt2800_bbp_write(rt2x00dev, 159, 0x04); bbp = (ch_idx == 0) ? 0x28 : 0x46; rt2800_bbp_write(rt2x00dev, 158, bbp); rt2800_bbp_write(rt2x00dev, 159, 0x00); if (test_bit(CAPABILITY_EXTERNAL_PA_TX0, &rt2x00dev->cap_flags)) { rt2800_bbp_write(rt2x00dev, 23, 0x06); rt2800_bbp_write(rt2x00dev, 24, 0x06); count_step = 1; } else { rt2800_bbp_write(rt2x00dev, 23, 0x1F); rt2800_bbp_write(rt2x00dev, 24, 0x1F); count_step = 2; } for (; vga_gain[ch_idx] < 19; vga_gain[ch_idx] = (vga_gain[ch_idx] + count_step)) { rfvalue = rfvga_gain_table[vga_gain[ch_idx]]; rt2800_rfcsr_write_dccal(rt2x00dev, 3, rfvalue); rt2800_rfcsr_write_dccal(rt2x00dev, 4, rfvalue); bbp = (ch_idx == 0) ? 0x29 : 0x47; rt2800_bbp_write(rt2x00dev, 158, bbp); rt2800_bbp_write(rt2x00dev, 159, 0x00); p0 = rt2800_do_fft_accumulation(rt2x00dev, 0x14, 0); if (test_bit(CAPABILITY_EXTERNAL_PA_TX0, &rt2x00dev->cap_flags)) p0_idx10 = rt2800_read_fft_accumulation(rt2x00dev, 0x0A); bbp = (ch_idx == 0) ? 0x29 : 0x47; rt2800_bbp_write(rt2x00dev, 158, bbp); rt2800_bbp_write(rt2x00dev, 159, 0x21); p1 = rt2800_do_fft_accumulation(rt2x00dev, 0x14, 0); if (test_bit(CAPABILITY_EXTERNAL_PA_TX1, &rt2x00dev->cap_flags)) p1_idx10 = rt2800_read_fft_accumulation(rt2x00dev, 0x0A); rt2x00_dbg(rt2x00dev, "IQ AGC %d %d\n", p0, p1); if (test_bit(CAPABILITY_EXTERNAL_PA_TX0, &rt2x00dev->cap_flags)) { rt2x00_dbg(rt2x00dev, "IQ AGC IDX 10 %d %d\n", p0_idx10, p1_idx10); if ((p0_idx10 > 7000 * 7000) || (p1_idx10 > 7000 * 7000)) { if (vga_gain[ch_idx] != 0) vga_gain[ch_idx] = vga_gain[ch_idx] - 1; break; } } if ((p0 > 2500 * 2500) || (p1 > 2500 * 2500)) break; } if (vga_gain[ch_idx] > 18) vga_gain[ch_idx] = 18; rt2x00_dbg(rt2x00dev, "Used VGA %d %x\n", vga_gain[ch_idx], rfvga_gain_table[vga_gain[ch_idx]]); bbp = (ch_idx == 0) ? 0x29 : 0x47; rt2800_bbp_write(rt2x00dev, 158, bbp); rt2800_bbp_write(rt2x00dev, 159, 0x00); rt2800_iq_search(rt2x00dev, ch_idx, ger, per); } rt2800_bbp_write(rt2x00dev, 23, 0x00); rt2800_bbp_write(rt2x00dev, 24, 0x00); rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x04); rt2800_bbp_write(rt2x00dev, 158, 0x28); bbp = ger[CHAIN_0] & 0x0F; rt2800_bbp_write(rt2x00dev, 159, bbp); rt2800_bbp_write(rt2x00dev, 158, 0x29); bbp = per[CHAIN_0] & 0x3F; rt2800_bbp_write(rt2x00dev, 159, bbp); rt2800_bbp_write(rt2x00dev, 158, 0x46); bbp = ger[CHAIN_1] & 0x0F; rt2800_bbp_write(rt2x00dev, 159, bbp); rt2800_bbp_write(rt2x00dev, 158, 0x47); bbp = per[CHAIN_1] & 0x3F; rt2800_bbp_write(rt2x00dev, 159, bbp); if (test_bit(CAPABILITY_EXTERNAL_PA_TX0, &rt2x00dev->cap_flags)) { rt2800_bbp_write(rt2x00dev, 1, bbpr1); rt2800_bbp_write(rt2x00dev, 241, bbpr241); rt2800_bbp_write(rt2x00dev, 242, bbpr242); } rt2800_bbp_write(rt2x00dev, 244, 0x00); rt2800_bbp_write(rt2x00dev, 158, 0x00); rt2800_bbp_write(rt2x00dev, 159, 0x00); rt2800_bbp_write(rt2x00dev, 158, 0xB0); rt2800_bbp_write(rt2x00dev, 159, 0x00); rt2800_bbp_write(rt2x00dev, 30, bbpr30); rt2800_rfcsr_write_bank(rt2x00dev, 0, 39, rfb0r39); rt2800_rfcsr_write_bank(rt2x00dev, 0, 42, rfb0r42); if (test_bit(CAPABILITY_EXTERNAL_PA_TX0, &rt2x00dev->cap_flags)) rt2800_bbp_write(rt2x00dev, 4, bbpr4); rt2800_bbp_write(rt2x00dev, 21, 0x01); udelay(1); rt2800_bbp_write(rt2x00dev, 21, 0x00); rt2800_rf_configrecover(rt2x00dev, rf_store); rt2800_register_write(rt2x00dev, TX_PIN_CFG, macorg1); rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x00); rt2800_register_write(rt2x00dev, RF_BYPASS0, 0x00); rt2800_register_write(rt2x00dev, RF_CONTROL0, macorg2); udelay(1); rt2800_register_write(rt2x00dev, RF_BYPASS0, macorg3); rt2800_register_write(rt2x00dev, RF_CONTROL3, macorg4); rt2800_register_write(rt2x00dev, RF_BYPASS3, macorg5); rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, savemacsysctrl); rt2800_register_write(rt2x00dev, 0x13b8, mac13b8); } static void rt2800_bbp_core_soft_reset(struct rt2x00_dev *rt2x00dev, bool set_bw, bool is_ht40) { u8 bbp_val; bbp_val = rt2800_bbp_read(rt2x00dev, 21); bbp_val |= 0x1; rt2800_bbp_write(rt2x00dev, 21, bbp_val); usleep_range(100, 200); if (set_bw) { bbp_val = rt2800_bbp_read(rt2x00dev, 4); rt2x00_set_field8(&bbp_val, BBP4_BANDWIDTH, 2 * is_ht40); rt2800_bbp_write(rt2x00dev, 4, bbp_val); usleep_range(100, 200); } bbp_val = rt2800_bbp_read(rt2x00dev, 21); bbp_val &= (~0x1); rt2800_bbp_write(rt2x00dev, 21, bbp_val); usleep_range(100, 200); } static int rt2800_rf_lp_config(struct rt2x00_dev *rt2x00dev, bool btxcal) { u8 rf_val; if (btxcal) rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x04); else rt2800_register_write(rt2x00dev, RF_CONTROL0, 0x02); rt2800_register_write(rt2x00dev, RF_BYPASS0, 0x06); rf_val = rt2800_rfcsr_read_bank(rt2x00dev, 5, 17); rf_val |= 0x80; rt2800_rfcsr_write_bank(rt2x00dev, 5, 17, rf_val); if (btxcal) { rt2800_rfcsr_write_bank(rt2x00dev, 5, 18, 0xC1); rt2800_rfcsr_write_bank(rt2x00dev, 5, 19, 0x20); rt2800_rfcsr_write_bank(rt2x00dev, 5, 20, 0x02); rf_val = rt2800_rfcsr_read_bank(rt2x00dev, 5, 3); rf_val &= (~0x3F); rf_val |= 0x3F; rt2800_rfcsr_write_bank(rt2x00dev, 5, 3, rf_val); rf_val = rt2800_rfcsr_read_bank(rt2x00dev, 5, 4); rf_val &= (~0x3F); rf_val |= 0x3F; rt2800_rfcsr_write_bank(rt2x00dev, 5, 4, rf_val); rt2800_rfcsr_write_bank(rt2x00dev, 5, 5, 0x31); } else { rt2800_rfcsr_write_bank(rt2x00dev, 5, 18, 0xF1); rt2800_rfcsr_write_bank(rt2x00dev, 5, 19, 0x18); rt2800_rfcsr_write_bank(rt2x00dev, 5, 20, 0x02); rf_val = rt2800_rfcsr_read_bank(rt2x00dev, 5, 3); rf_val &= (~0x3F); rf_val |= 0x34; rt2800_rfcsr_write_bank(rt2x00dev, 5, 3, rf_val); rf_val = rt2800_rfcsr_read_bank(rt2x00dev, 5, 4); rf_val &= (~0x3F); rf_val |= 0x34; rt2800_rfcsr_write_bank(rt2x00dev, 5, 4, rf_val); } return 0; } static s8 rt2800_lp_tx_filter_bw_cal(struct rt2x00_dev *rt2x00dev) { unsigned int cnt; u8 bbp_val; s8 cal_val; rt2800_bbp_dcoc_write(rt2x00dev, 0, 0x82); cnt = 0; do { usleep_range(500, 2000); bbp_val = rt2800_bbp_read(rt2x00dev, 159); if (bbp_val == 0x02 || cnt == 20) break; cnt++; } while (cnt < 20); bbp_val = rt2800_bbp_dcoc_read(rt2x00dev, 0x39); cal_val = bbp_val & 0x7F; if (cal_val >= 0x40) cal_val -= 128; return cal_val; } static void rt2800_bw_filter_calibration(struct rt2x00_dev *rt2x00dev, bool btxcal) { struct rt2800_drv_data *drv_data = rt2x00dev->drv_data; u8 tx_agc_fc = 0, rx_agc_fc = 0, cmm_agc_fc; u8 filter_target; u8 tx_filter_target_20m = 0x09, tx_filter_target_40m = 0x02; u8 rx_filter_target_20m = 0x27, rx_filter_target_40m = 0x31; int loop = 0, is_ht40, cnt; u8 bbp_val, rf_val; s8 cal_r32_init, cal_r32_val, cal_diff; u8 saverfb5r00, saverfb5r01, saverfb5r03, saverfb5r04, saverfb5r05; u8 saverfb5r06, saverfb5r07; u8 saverfb5r08, saverfb5r17, saverfb5r18, saverfb5r19, saverfb5r20; u8 saverfb5r37, saverfb5r38, saverfb5r39, saverfb5r40, saverfb5r41; u8 saverfb5r42, saverfb5r43, saverfb5r44, saverfb5r45, saverfb5r46; u8 saverfb5r58, saverfb5r59; u8 savebbp159r0, savebbp159r2, savebbpr23; u32 MAC_RF_CONTROL0, MAC_RF_BYPASS0; /* Save MAC registers */ MAC_RF_CONTROL0 = rt2800_register_read(rt2x00dev, RF_CONTROL0); MAC_RF_BYPASS0 = rt2800_register_read(rt2x00dev, RF_BYPASS0); /* save BBP registers */ savebbpr23 = rt2800_bbp_read(rt2x00dev, 23); savebbp159r0 = rt2800_bbp_dcoc_read(rt2x00dev, 0); savebbp159r2 = rt2800_bbp_dcoc_read(rt2x00dev, 2); /* Save RF registers */ saverfb5r00 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 0); saverfb5r01 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 1); saverfb5r03 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 3); saverfb5r04 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 4); saverfb5r05 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 5); saverfb5r06 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 6); saverfb5r07 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 7); saverfb5r08 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 8); saverfb5r17 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 17); saverfb5r18 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 18); saverfb5r19 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 19); saverfb5r20 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 20); saverfb5r37 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 37); saverfb5r38 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 38); saverfb5r39 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 39); saverfb5r40 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 40); saverfb5r41 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 41); saverfb5r42 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 42); saverfb5r43 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 43); saverfb5r44 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 44); saverfb5r45 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 45); saverfb5r46 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 46); saverfb5r58 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 58); saverfb5r59 = rt2800_rfcsr_read_bank(rt2x00dev, 5, 59); rf_val = rt2800_rfcsr_read_bank(rt2x00dev, 5, 0); rf_val |= 0x3; rt2800_rfcsr_write_bank(rt2x00dev, 5, 0, rf_val); rf_val = rt2800_rfcsr_read_bank(rt2x00dev, 5, 1); rf_val |= 0x1; rt2800_rfcsr_write_bank(rt2x00dev, 5, 1, rf_val); cnt = 0; do { usleep_range(500, 2000); rf_val = rt2800_rfcsr_read_bank(rt2x00dev, 5, 1); if (((rf_val & 0x1) == 0x00) || (cnt == 40)) break; cnt++; } while (cnt < 40); rf_val = rt2800_rfcsr_read_bank(rt2x00dev, 5, 0); rf_val &= (~0x3); rf_val |= 0x1; rt2800_rfcsr_write_bank(rt2x00dev, 5, 0, rf_val); /* I-3 */ bbp_val = rt2800_bbp_read(rt2x00dev, 23); bbp_val &= (~0x1F); bbp_val |= 0x10; rt2800_bbp_write(rt2x00dev, 23, bbp_val); do { /* I-4,5,6,7,8,9 */ if (loop == 0) { is_ht40 = false; if (btxcal) filter_target = tx_filter_target_20m; else filter_target = rx_filter_target_20m; } else { is_ht40 = true; if (btxcal) filter_target = tx_filter_target_40m; else filter_target = rx_filter_target_40m; } rf_val = rt2800_rfcsr_read_bank(rt2x00dev, 5, 8); rf_val &= (~0x04); if (loop == 1) rf_val |= 0x4; rt2800_rfcsr_write_bank(rt2x00dev, 5, 8, rf_val); rt2800_bbp_core_soft_reset(rt2x00dev, true, is_ht40); rt2800_rf_lp_config(rt2x00dev, btxcal); if (btxcal) { tx_agc_fc = 0; rf_val = rt2800_rfcsr_read_bank(rt2x00dev, 5, 58); rf_val &= (~0x7F); rt2800_rfcsr_write_bank(rt2x00dev, 5, 58, rf_val); rf_val = rt2800_rfcsr_read_bank(rt2x00dev, 5, 59); rf_val &= (~0x7F); rt2800_rfcsr_write_bank(rt2x00dev, 5, 59, rf_val); } else { rx_agc_fc = 0; rf_val = rt2800_rfcsr_read_bank(rt2x00dev, 5, 6); rf_val &= (~0x7F); rt2800_rfcsr_write_bank(rt2x00dev, 5, 6, rf_val); rf_val = rt2800_rfcsr_read_bank(rt2x00dev, 5, 7); rf_val &= (~0x7F); rt2800_rfcsr_write_bank(rt2x00dev, 5, 7, rf_val); } usleep_range(1000, 2000); bbp_val = rt2800_bbp_dcoc_read(rt2x00dev, 2); bbp_val &= (~0x6); rt2800_bbp_dcoc_write(rt2x00dev, 2, bbp_val); rt2800_bbp_core_soft_reset(rt2x00dev, false, is_ht40); cal_r32_init = rt2800_lp_tx_filter_bw_cal(rt2x00dev); bbp_val = rt2800_bbp_dcoc_read(rt2x00dev, 2); bbp_val |= 0x6; rt2800_bbp_dcoc_write(rt2x00dev, 2, bbp_val); do_cal: if (btxcal) { rf_val = rt2800_rfcsr_read_bank(rt2x00dev, 5, 58); rf_val &= (~0x7F); rf_val |= tx_agc_fc; rt2800_rfcsr_write_bank(rt2x00dev, 5, 58, rf_val); rf_val = rt2800_rfcsr_read_bank(rt2x00dev, 5, 59); rf_val &= (~0x7F); rf_val |= tx_agc_fc; rt2800_rfcsr_write_bank(rt2x00dev, 5, 59, rf_val); } else { rf_val = rt2800_rfcsr_read_bank(rt2x00dev, 5, 6); rf_val &= (~0x7F); rf_val |= rx_agc_fc; rt2800_rfcsr_write_bank(rt2x00dev, 5, 6, rf_val); rf_val = rt2800_rfcsr_read_bank(rt2x00dev, 5, 7); rf_val &= (~0x7F); rf_val |= rx_agc_fc; rt2800_rfcsr_write_bank(rt2x00dev, 5, 7, rf_val); } usleep_range(500, 1000); rt2800_bbp_core_soft_reset(rt2x00dev, false, is_ht40); cal_r32_val = rt2800_lp_tx_filter_bw_cal(rt2x00dev); cal_diff = cal_r32_init - cal_r32_val; if (btxcal) cmm_agc_fc = tx_agc_fc; else cmm_agc_fc = rx_agc_fc; if (((cal_diff > filter_target) && (cmm_agc_fc == 0)) || ((cal_diff < filter_target) && (cmm_agc_fc == 0x3f))) { if (btxcal) tx_agc_fc = 0; else rx_agc_fc = 0; } else if ((cal_diff <= filter_target) && (cmm_agc_fc < 0x3f)) { if (btxcal) tx_agc_fc++; else rx_agc_fc++; goto do_cal; } if (btxcal) { if (loop == 0) drv_data->tx_calibration_bw20 = tx_agc_fc; else drv_data->tx_calibration_bw40 = tx_agc_fc; } else { if (loop == 0) drv_data->rx_calibration_bw20 = rx_agc_fc; else drv_data->rx_calibration_bw40 = rx_agc_fc; } loop++; } while (loop <= 1); rt2800_rfcsr_write_bank(rt2x00dev, 5, 0, saverfb5r00); rt2800_rfcsr_write_bank(rt2x00dev, 5, 1, saverfb5r01); rt2800_rfcsr_write_bank(rt2x00dev, 5, 3, saverfb5r03); rt2800_rfcsr_write_bank(rt2x00dev, 5, 4, saverfb5r04); rt2800_rfcsr_write_bank(rt2x00dev, 5, 5, saverfb5r05); rt2800_rfcsr_write_bank(rt2x00dev, 5, 6, saverfb5r06); rt2800_rfcsr_write_bank(rt2x00dev, 5, 7, saverfb5r07); rt2800_rfcsr_write_bank(rt2x00dev, 5, 8, saverfb5r08); rt2800_rfcsr_write_bank(rt2x00dev, 5, 17, saverfb5r17); rt2800_rfcsr_write_bank(rt2x00dev, 5, 18, saverfb5r18); rt2800_rfcsr_write_bank(rt2x00dev, 5, 19, saverfb5r19); rt2800_rfcsr_write_bank(rt2x00dev, 5, 20, saverfb5r20); rt2800_rfcsr_write_bank(rt2x00dev, 5, 37, saverfb5r37); rt2800_rfcsr_write_bank(rt2x00dev, 5, 38, saverfb5r38); rt2800_rfcsr_write_bank(rt2x00dev, 5, 39, saverfb5r39); rt2800_rfcsr_write_bank(rt2x00dev, 5, 40, saverfb5r40); rt2800_rfcsr_write_bank(rt2x00dev, 5, 41, saverfb5r41); rt2800_rfcsr_write_bank(rt2x00dev, 5, 42, saverfb5r42); rt2800_rfcsr_write_bank(rt2x00dev, 5, 43, saverfb5r43); rt2800_rfcsr_write_bank(rt2x00dev, 5, 44, saverfb5r44); rt2800_rfcsr_write_bank(rt2x00dev, 5, 45, saverfb5r45); rt2800_rfcsr_write_bank(rt2x00dev, 5, 46, saverfb5r46); rt2800_rfcsr_write_bank(rt2x00dev, 5, 58, saverfb5r58); rt2800_rfcsr_write_bank(rt2x00dev, 5, 59, saverfb5r59); rt2800_bbp_write(rt2x00dev, 23, savebbpr23); rt2800_bbp_dcoc_write(rt2x00dev, 0, savebbp159r0); rt2800_bbp_dcoc_write(rt2x00dev, 2, savebbp159r2); bbp_val = rt2800_bbp_read(rt2x00dev, 4); rt2x00_set_field8(&bbp_val, BBP4_BANDWIDTH, 2 * test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags)); rt2800_bbp_write(rt2x00dev, 4, bbp_val); rt2800_register_write(rt2x00dev, RF_CONTROL0, MAC_RF_CONTROL0); rt2800_register_write(rt2x00dev, RF_BYPASS0, MAC_RF_BYPASS0); } static void rt2800_init_rfcsr_6352(struct rt2x00_dev *rt2x00dev) { /* Initialize RF central register to default value */ rt2800_rfcsr_write(rt2x00dev, 0, 0x02); rt2800_rfcsr_write(rt2x00dev, 1, 0x03); rt2800_rfcsr_write(rt2x00dev, 2, 0x33); rt2800_rfcsr_write(rt2x00dev, 3, 0xFF); rt2800_rfcsr_write(rt2x00dev, 4, 0x0C); rt2800_rfcsr_write(rt2x00dev, 5, 0x40); rt2800_rfcsr_write(rt2x00dev, 6, 0x00); rt2800_rfcsr_write(rt2x00dev, 7, 0x00); rt2800_rfcsr_write(rt2x00dev, 8, 0x00); rt2800_rfcsr_write(rt2x00dev, 9, 0x00); rt2800_rfcsr_write(rt2x00dev, 10, 0x00); rt2800_rfcsr_write(rt2x00dev, 11, 0x00); rt2800_rfcsr_write(rt2x00dev, 12, rt2x00dev->freq_offset); rt2800_rfcsr_write(rt2x00dev, 13, 0x00); rt2800_rfcsr_write(rt2x00dev, 14, 0x40); rt2800_rfcsr_write(rt2x00dev, 15, 0x22); rt2800_rfcsr_write(rt2x00dev, 16, 0x4C); rt2800_rfcsr_write(rt2x00dev, 17, 0x00); rt2800_rfcsr_write(rt2x00dev, 18, 0x00); rt2800_rfcsr_write(rt2x00dev, 19, 0x00); rt2800_rfcsr_write(rt2x00dev, 20, 0xA0); rt2800_rfcsr_write(rt2x00dev, 21, 0x12); rt2800_rfcsr_write(rt2x00dev, 22, 0x07); rt2800_rfcsr_write(rt2x00dev, 23, 0x13); rt2800_rfcsr_write(rt2x00dev, 24, 0xFE); rt2800_rfcsr_write(rt2x00dev, 25, 0x24); rt2800_rfcsr_write(rt2x00dev, 26, 0x7A); rt2800_rfcsr_write(rt2x00dev, 27, 0x00); rt2800_rfcsr_write(rt2x00dev, 28, 0x00); rt2800_rfcsr_write(rt2x00dev, 29, 0x05); rt2800_rfcsr_write(rt2x00dev, 30, 0x00); rt2800_rfcsr_write(rt2x00dev, 31, 0x00); rt2800_rfcsr_write(rt2x00dev, 32, 0x00); rt2800_rfcsr_write(rt2x00dev, 33, 0x00); rt2800_rfcsr_write(rt2x00dev, 34, 0x00); rt2800_rfcsr_write(rt2x00dev, 35, 0x00); rt2800_rfcsr_write(rt2x00dev, 36, 0x00); rt2800_rfcsr_write(rt2x00dev, 37, 0x00); rt2800_rfcsr_write(rt2x00dev, 38, 0x00); rt2800_rfcsr_write(rt2x00dev, 39, 0x00); rt2800_rfcsr_write(rt2x00dev, 40, 0x00); rt2800_rfcsr_write(rt2x00dev, 41, 0xD0); rt2800_rfcsr_write(rt2x00dev, 42, 0x5B); rt2800_rfcsr_write(rt2x00dev, 43, 0x00); rt2800_rfcsr_write(rt2x00dev, 11, 0x21); if (rt2800_clk_is_20mhz(rt2x00dev)) rt2800_rfcsr_write(rt2x00dev, 13, 0x03); else rt2800_rfcsr_write(rt2x00dev, 13, 0x00); rt2800_rfcsr_write(rt2x00dev, 14, 0x7C); rt2800_rfcsr_write(rt2x00dev, 16, 0x80); rt2800_rfcsr_write(rt2x00dev, 17, 0x99); rt2800_rfcsr_write(rt2x00dev, 18, 0x99); rt2800_rfcsr_write(rt2x00dev, 19, 0x09); rt2800_rfcsr_write(rt2x00dev, 20, 0x50); rt2800_rfcsr_write(rt2x00dev, 21, 0xB0); rt2800_rfcsr_write(rt2x00dev, 22, 0x00); rt2800_rfcsr_write(rt2x00dev, 23, 0x06); rt2800_rfcsr_write(rt2x00dev, 24, 0x00); rt2800_rfcsr_write(rt2x00dev, 25, 0x00); rt2800_rfcsr_write(rt2x00dev, 26, 0x5D); rt2800_rfcsr_write(rt2x00dev, 27, 0x00); rt2800_rfcsr_write(rt2x00dev, 28, 0x61); rt2800_rfcsr_write(rt2x00dev, 29, 0xB5); rt2800_rfcsr_write(rt2x00dev, 43, 0x02); rt2800_rfcsr_write(rt2x00dev, 28, 0x62); rt2800_rfcsr_write(rt2x00dev, 29, 0xAD); rt2800_rfcsr_write(rt2x00dev, 39, 0x80); /* Initialize RF channel register to default value */ rt2800_rfcsr_write_chanreg(rt2x00dev, 0, 0x03); rt2800_rfcsr_write_chanreg(rt2x00dev, 1, 0x00); rt2800_rfcsr_write_chanreg(rt2x00dev, 2, 0x00); rt2800_rfcsr_write_chanreg(rt2x00dev, 3, 0x00); rt2800_rfcsr_write_chanreg(rt2x00dev, 4, 0x00); rt2800_rfcsr_write_chanreg(rt2x00dev, 5, 0x08); rt2800_rfcsr_write_chanreg(rt2x00dev, 6, 0x00); rt2800_rfcsr_write_chanreg(rt2x00dev, 7, 0x51); rt2800_rfcsr_write_chanreg(rt2x00dev, 8, 0x53); rt2800_rfcsr_write_chanreg(rt2x00dev, 9, 0x16); rt2800_rfcsr_write_chanreg(rt2x00dev, 10, 0x61); rt2800_rfcsr_write_chanreg(rt2x00dev, 11, 0x53); rt2800_rfcsr_write_chanreg(rt2x00dev, 12, 0x22); rt2800_rfcsr_write_chanreg(rt2x00dev, 13, 0x3D); rt2800_rfcsr_write_chanreg(rt2x00dev, 14, 0x06); rt2800_rfcsr_write_chanreg(rt2x00dev, 15, 0x13); rt2800_rfcsr_write_chanreg(rt2x00dev, 16, 0x22); rt2800_rfcsr_write_chanreg(rt2x00dev, 17, 0x27); rt2800_rfcsr_write_chanreg(rt2x00dev, 18, 0x02); rt2800_rfcsr_write_chanreg(rt2x00dev, 19, 0xA7); rt2800_rfcsr_write_chanreg(rt2x00dev, 20, 0x01); rt2800_rfcsr_write_chanreg(rt2x00dev, 21, 0x52); rt2800_rfcsr_write_chanreg(rt2x00dev, 22, 0x80); rt2800_rfcsr_write_chanreg(rt2x00dev, 23, 0xB3); rt2800_rfcsr_write_chanreg(rt2x00dev, 24, 0x00); rt2800_rfcsr_write_chanreg(rt2x00dev, 25, 0x00); rt2800_rfcsr_write_chanreg(rt2x00dev, 26, 0x00); rt2800_rfcsr_write_chanreg(rt2x00dev, 27, 0x00); rt2800_rfcsr_write_chanreg(rt2x00dev, 28, 0x5C); rt2800_rfcsr_write_chanreg(rt2x00dev, 29, 0x6B); rt2800_rfcsr_write_chanreg(rt2x00dev, 30, 0x6B); rt2800_rfcsr_write_chanreg(rt2x00dev, 31, 0x31); rt2800_rfcsr_write_chanreg(rt2x00dev, 32, 0x5D); rt2800_rfcsr_write_chanreg(rt2x00dev, 33, 0x00); rt2800_rfcsr_write_chanreg(rt2x00dev, 34, 0xE6); rt2800_rfcsr_write_chanreg(rt2x00dev, 35, 0x55); rt2800_rfcsr_write_chanreg(rt2x00dev, 36, 0x00); rt2800_rfcsr_write_chanreg(rt2x00dev, 37, 0xBB); rt2800_rfcsr_write_chanreg(rt2x00dev, 38, 0xB3); rt2800_rfcsr_write_chanreg(rt2x00dev, 39, 0xB3); rt2800_rfcsr_write_chanreg(rt2x00dev, 40, 0x03); rt2800_rfcsr_write_chanreg(rt2x00dev, 41, 0x00); rt2800_rfcsr_write_chanreg(rt2x00dev, 42, 0x00); rt2800_rfcsr_write_chanreg(rt2x00dev, 43, 0xB3); rt2800_rfcsr_write_chanreg(rt2x00dev, 44, 0xD3); rt2800_rfcsr_write_chanreg(rt2x00dev, 45, 0xD5); rt2800_rfcsr_write_chanreg(rt2x00dev, 46, 0x07); rt2800_rfcsr_write_chanreg(rt2x00dev, 47, 0x68); rt2800_rfcsr_write_chanreg(rt2x00dev, 48, 0xEF); rt2800_rfcsr_write_chanreg(rt2x00dev, 49, 0x1C); rt2800_rfcsr_write_chanreg(rt2x00dev, 54, 0x07); rt2800_rfcsr_write_chanreg(rt2x00dev, 55, 0xA8); rt2800_rfcsr_write_chanreg(rt2x00dev, 56, 0x85); rt2800_rfcsr_write_chanreg(rt2x00dev, 57, 0x10); rt2800_rfcsr_write_chanreg(rt2x00dev, 58, 0x07); rt2800_rfcsr_write_chanreg(rt2x00dev, 59, 0x6A); rt2800_rfcsr_write_chanreg(rt2x00dev, 60, 0x85); rt2800_rfcsr_write_chanreg(rt2x00dev, 61, 0x10); rt2800_rfcsr_write_chanreg(rt2x00dev, 62, 0x1C); rt2800_rfcsr_write_chanreg(rt2x00dev, 63, 0x00); rt2800_rfcsr_write_bank(rt2x00dev, 6, 45, 0xC5); rt2800_rfcsr_write_chanreg(rt2x00dev, 9, 0x47); rt2800_rfcsr_write_chanreg(rt2x00dev, 10, 0x71); rt2800_rfcsr_write_chanreg(rt2x00dev, 11, 0x33); rt2800_rfcsr_write_chanreg(rt2x00dev, 14, 0x0E); rt2800_rfcsr_write_chanreg(rt2x00dev, 17, 0x23); rt2800_rfcsr_write_chanreg(rt2x00dev, 19, 0xA4); rt2800_rfcsr_write_chanreg(rt2x00dev, 20, 0x02); rt2800_rfcsr_write_chanreg(rt2x00dev, 21, 0x12); rt2800_rfcsr_write_chanreg(rt2x00dev, 28, 0x1C); rt2800_rfcsr_write_chanreg(rt2x00dev, 29, 0xEB); rt2800_rfcsr_write_chanreg(rt2x00dev, 32, 0x7D); rt2800_rfcsr_write_chanreg(rt2x00dev, 34, 0xD6); rt2800_rfcsr_write_chanreg(rt2x00dev, 36, 0x08); rt2800_rfcsr_write_chanreg(rt2x00dev, 38, 0xB4); rt2800_rfcsr_write_chanreg(rt2x00dev, 43, 0xD3); rt2800_rfcsr_write_chanreg(rt2x00dev, 44, 0xB3); rt2800_rfcsr_write_chanreg(rt2x00dev, 45, 0xD5); rt2800_rfcsr_write_chanreg(rt2x00dev, 46, 0x27); rt2800_rfcsr_write_bank(rt2x00dev, 4, 47, 0x67); rt2800_rfcsr_write_bank(rt2x00dev, 6, 47, 0x69); rt2800_rfcsr_write_chanreg(rt2x00dev, 48, 0xFF); rt2800_rfcsr_write_bank(rt2x00dev, 4, 54, 0x27); rt2800_rfcsr_write_bank(rt2x00dev, 6, 54, 0x20); rt2800_rfcsr_write_chanreg(rt2x00dev, 55, 0x66); rt2800_rfcsr_write_chanreg(rt2x00dev, 56, 0xFF); rt2800_rfcsr_write_chanreg(rt2x00dev, 57, 0x1C); rt2800_rfcsr_write_chanreg(rt2x00dev, 58, 0x20); rt2800_rfcsr_write_chanreg(rt2x00dev, 59, 0x6B); rt2800_rfcsr_write_chanreg(rt2x00dev, 60, 0xF7); rt2800_rfcsr_write_chanreg(rt2x00dev, 61, 0x09); rt2800_rfcsr_write_chanreg(rt2x00dev, 10, 0x51); rt2800_rfcsr_write_chanreg(rt2x00dev, 14, 0x06); rt2800_rfcsr_write_chanreg(rt2x00dev, 19, 0xA7); rt2800_rfcsr_write_chanreg(rt2x00dev, 28, 0x2C); rt2800_rfcsr_write_chanreg(rt2x00dev, 55, 0x64); rt2800_rfcsr_write_chanreg(rt2x00dev, 8, 0x51); rt2800_rfcsr_write_chanreg(rt2x00dev, 9, 0x36); rt2800_rfcsr_write_chanreg(rt2x00dev, 11, 0x53); rt2800_rfcsr_write_chanreg(rt2x00dev, 14, 0x16); rt2800_rfcsr_write_chanreg(rt2x00dev, 47, 0x6C); rt2800_rfcsr_write_chanreg(rt2x00dev, 48, 0xFC); rt2800_rfcsr_write_chanreg(rt2x00dev, 49, 0x1F); rt2800_rfcsr_write_chanreg(rt2x00dev, 54, 0x27); rt2800_rfcsr_write_chanreg(rt2x00dev, 55, 0x66); rt2800_rfcsr_write_chanreg(rt2x00dev, 59, 0x6B); /* Initialize RF channel register for DRQFN */ rt2800_rfcsr_write_chanreg(rt2x00dev, 43, 0xD3); rt2800_rfcsr_write_chanreg(rt2x00dev, 44, 0xE3); rt2800_rfcsr_write_chanreg(rt2x00dev, 45, 0xE5); rt2800_rfcsr_write_chanreg(rt2x00dev, 47, 0x28); rt2800_rfcsr_write_chanreg(rt2x00dev, 55, 0x68); rt2800_rfcsr_write_chanreg(rt2x00dev, 56, 0xF7); rt2800_rfcsr_write_chanreg(rt2x00dev, 58, 0x02); rt2800_rfcsr_write_chanreg(rt2x00dev, 60, 0xC7); /* Initialize RF DC calibration register to default value */ rt2800_rfcsr_write_dccal(rt2x00dev, 0, 0x47); rt2800_rfcsr_write_dccal(rt2x00dev, 1, 0x00); rt2800_rfcsr_write_dccal(rt2x00dev, 2, 0x00); rt2800_rfcsr_write_dccal(rt2x00dev, 3, 0x00); rt2800_rfcsr_write_dccal(rt2x00dev, 4, 0x00); rt2800_rfcsr_write_dccal(rt2x00dev, 5, 0x00); rt2800_rfcsr_write_dccal(rt2x00dev, 6, 0x10); rt2800_rfcsr_write_dccal(rt2x00dev, 7, 0x10); rt2800_rfcsr_write_dccal(rt2x00dev, 8, 0x04); rt2800_rfcsr_write_dccal(rt2x00dev, 9, 0x00); rt2800_rfcsr_write_dccal(rt2x00dev, 10, 0x07); rt2800_rfcsr_write_dccal(rt2x00dev, 11, 0x01); rt2800_rfcsr_write_dccal(rt2x00dev, 12, 0x07); rt2800_rfcsr_write_dccal(rt2x00dev, 13, 0x07); rt2800_rfcsr_write_dccal(rt2x00dev, 14, 0x07); rt2800_rfcsr_write_dccal(rt2x00dev, 15, 0x20); rt2800_rfcsr_write_dccal(rt2x00dev, 16, 0x22); rt2800_rfcsr_write_dccal(rt2x00dev, 17, 0x00); rt2800_rfcsr_write_dccal(rt2x00dev, 18, 0x00); rt2800_rfcsr_write_dccal(rt2x00dev, 19, 0x00); rt2800_rfcsr_write_dccal(rt2x00dev, 20, 0x00); rt2800_rfcsr_write_dccal(rt2x00dev, 21, 0xF1); rt2800_rfcsr_write_dccal(rt2x00dev, 22, 0x11); rt2800_rfcsr_write_dccal(rt2x00dev, 23, 0x02); rt2800_rfcsr_write_dccal(rt2x00dev, 24, 0x41); rt2800_rfcsr_write_dccal(rt2x00dev, 25, 0x20); rt2800_rfcsr_write_dccal(rt2x00dev, 26, 0x00); rt2800_rfcsr_write_dccal(rt2x00dev, 27, 0xD7); rt2800_rfcsr_write_dccal(rt2x00dev, 28, 0xA2); rt2800_rfcsr_write_dccal(rt2x00dev, 29, 0x20); rt2800_rfcsr_write_dccal(rt2x00dev, 30, 0x49); rt2800_rfcsr_write_dccal(rt2x00dev, 31, 0x20); rt2800_rfcsr_write_dccal(rt2x00dev, 32, 0x04); rt2800_rfcsr_write_dccal(rt2x00dev, 33, 0xF1); rt2800_rfcsr_write_dccal(rt2x00dev, 34, 0xA1); rt2800_rfcsr_write_dccal(rt2x00dev, 35, 0x01); rt2800_rfcsr_write_dccal(rt2x00dev, 41, 0x00); rt2800_rfcsr_write_dccal(rt2x00dev, 42, 0x00); rt2800_rfcsr_write_dccal(rt2x00dev, 43, 0x00); rt2800_rfcsr_write_dccal(rt2x00dev, 44, 0x00); rt2800_rfcsr_write_dccal(rt2x00dev, 45, 0x00); rt2800_rfcsr_write_dccal(rt2x00dev, 46, 0x00); rt2800_rfcsr_write_dccal(rt2x00dev, 47, 0x3E); rt2800_rfcsr_write_dccal(rt2x00dev, 48, 0x3D); rt2800_rfcsr_write_dccal(rt2x00dev, 49, 0x3E); rt2800_rfcsr_write_dccal(rt2x00dev, 50, 0x3D); rt2800_rfcsr_write_dccal(rt2x00dev, 51, 0x3E); rt2800_rfcsr_write_dccal(rt2x00dev, 52, 0x3D); rt2800_rfcsr_write_dccal(rt2x00dev, 53, 0x00); rt2800_rfcsr_write_dccal(rt2x00dev, 54, 0x00); rt2800_rfcsr_write_dccal(rt2x00dev, 55, 0x00); rt2800_rfcsr_write_dccal(rt2x00dev, 56, 0x00); rt2800_rfcsr_write_dccal(rt2x00dev, 57, 0x00); rt2800_rfcsr_write_dccal(rt2x00dev, 58, 0x10); rt2800_rfcsr_write_dccal(rt2x00dev, 59, 0x10); rt2800_rfcsr_write_dccal(rt2x00dev, 60, 0x0A); rt2800_rfcsr_write_dccal(rt2x00dev, 61, 0x00); rt2800_rfcsr_write_dccal(rt2x00dev, 62, 0x00); rt2800_rfcsr_write_dccal(rt2x00dev, 63, 0x00); rt2800_rfcsr_write_dccal(rt2x00dev, 3, 0x08); rt2800_rfcsr_write_dccal(rt2x00dev, 4, 0x04); rt2800_rfcsr_write_dccal(rt2x00dev, 5, 0x20); rt2800_rfcsr_write_dccal(rt2x00dev, 5, 0x00); rt2800_rfcsr_write_dccal(rt2x00dev, 17, 0x7C); rt2800_r_calibration(rt2x00dev); rt2800_rf_self_txdc_cal(rt2x00dev); rt2800_rxdcoc_calibration(rt2x00dev); rt2800_bw_filter_calibration(rt2x00dev, true); rt2800_bw_filter_calibration(rt2x00dev, false); rt2800_loft_iq_calibration(rt2x00dev); rt2800_rxiq_calibration(rt2x00dev); } static void rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev) { if (rt2800_is_305x_soc(rt2x00dev)) { rt2800_init_rfcsr_305x_soc(rt2x00dev); return; } switch (rt2x00dev->chip.rt) { case RT3070: case RT3071: case RT3090: rt2800_init_rfcsr_30xx(rt2x00dev); break; case RT3290: rt2800_init_rfcsr_3290(rt2x00dev); break; case RT3352: rt2800_init_rfcsr_3352(rt2x00dev); break; case RT3390: rt2800_init_rfcsr_3390(rt2x00dev); break; case RT3883: rt2800_init_rfcsr_3883(rt2x00dev); break; case RT3572: rt2800_init_rfcsr_3572(rt2x00dev); break; case RT3593: rt2800_init_rfcsr_3593(rt2x00dev); break; case RT5350: rt2800_init_rfcsr_5350(rt2x00dev); break; case RT5390: rt2800_init_rfcsr_5390(rt2x00dev); break; case RT5392: rt2800_init_rfcsr_5392(rt2x00dev); break; case RT5592: rt2800_init_rfcsr_5592(rt2x00dev); break; case RT6352: rt2800_init_rfcsr_6352(rt2x00dev); break; } } int rt2800_enable_radio(struct rt2x00_dev *rt2x00dev) { u32 reg; u16 word; /* * Initialize MAC registers. */ if (unlikely(rt2800_wait_wpdma_ready(rt2x00dev) || rt2800_init_registers(rt2x00dev))) return -EIO; /* * Wait BBP/RF to wake up. */ if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev, MAC_STATUS_CFG_BBP_RF_BUSY))) return -EIO; /* * Send signal during boot time to initialize firmware. */ rt2800_register_write(rt2x00dev, H2M_BBP_AGENT, 0); rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0); if (rt2x00_is_usb(rt2x00dev)) rt2800_register_write(rt2x00dev, H2M_INT_SRC, 0); rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0, 0, 0); msleep(1); /* * Make sure BBP is up and running. */ if (unlikely(rt2800_wait_bbp_ready(rt2x00dev))) return -EIO; /* * Initialize BBP/RF registers. */ rt2800_init_bbp(rt2x00dev); rt2800_init_rfcsr(rt2x00dev); if (rt2x00_is_usb(rt2x00dev) && (rt2x00_rt(rt2x00dev, RT3070) || rt2x00_rt(rt2x00dev, RT3071) || rt2x00_rt(rt2x00dev, RT3572))) { udelay(200); rt2800_mcu_request(rt2x00dev, MCU_CURRENT, 0, 0, 0); udelay(10); } /* * Enable RX. */ reg = rt2800_register_read(rt2x00dev, MAC_SYS_CTRL); rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_TX, 1); rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 0); rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg); udelay(50); reg = rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG); rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 1); rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 1); rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1); rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg); reg = rt2800_register_read(rt2x00dev, MAC_SYS_CTRL); rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_TX, 1); rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 1); rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg); /* * Initialize LED control */ word = rt2800_eeprom_read(rt2x00dev, EEPROM_LED_AG_CONF); rt2800_mcu_request(rt2x00dev, MCU_LED_AG_CONF, 0xff, word & 0xff, (word >> 8) & 0xff); word = rt2800_eeprom_read(rt2x00dev, EEPROM_LED_ACT_CONF); rt2800_mcu_request(rt2x00dev, MCU_LED_ACT_CONF, 0xff, word & 0xff, (word >> 8) & 0xff); word = rt2800_eeprom_read(rt2x00dev, EEPROM_LED_POLARITY); rt2800_mcu_request(rt2x00dev, MCU_LED_LED_POLARITY, 0xff, word & 0xff, (word >> 8) & 0xff); return 0; } EXPORT_SYMBOL_GPL(rt2800_enable_radio); void rt2800_disable_radio(struct rt2x00_dev *rt2x00dev) { u32 reg; rt2800_disable_wpdma(rt2x00dev); /* Wait for DMA, ignore error */ rt2800_wait_wpdma_ready(rt2x00dev); reg = rt2800_register_read(rt2x00dev, MAC_SYS_CTRL); rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_TX, 0); rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 0); rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg); } EXPORT_SYMBOL_GPL(rt2800_disable_radio); int rt2800_efuse_detect(struct rt2x00_dev *rt2x00dev) { u32 reg; u16 efuse_ctrl_reg; if (rt2x00_rt(rt2x00dev, RT3290)) efuse_ctrl_reg = EFUSE_CTRL_3290; else efuse_ctrl_reg = EFUSE_CTRL; reg = rt2800_register_read(rt2x00dev, efuse_ctrl_reg); return rt2x00_get_field32(reg, EFUSE_CTRL_PRESENT); } EXPORT_SYMBOL_GPL(rt2800_efuse_detect); static void rt2800_efuse_read(struct rt2x00_dev *rt2x00dev, unsigned int i) { u32 reg; u16 efuse_ctrl_reg; u16 efuse_data0_reg; u16 efuse_data1_reg; u16 efuse_data2_reg; u16 efuse_data3_reg; if (rt2x00_rt(rt2x00dev, RT3290)) { efuse_ctrl_reg = EFUSE_CTRL_3290; efuse_data0_reg = EFUSE_DATA0_3290; efuse_data1_reg = EFUSE_DATA1_3290; efuse_data2_reg = EFUSE_DATA2_3290; efuse_data3_reg = EFUSE_DATA3_3290; } else { efuse_ctrl_reg = EFUSE_CTRL; efuse_data0_reg = EFUSE_DATA0; efuse_data1_reg = EFUSE_DATA1; efuse_data2_reg = EFUSE_DATA2; efuse_data3_reg = EFUSE_DATA3; } mutex_lock(&rt2x00dev->csr_mutex); reg = rt2800_register_read_lock(rt2x00dev, efuse_ctrl_reg); rt2x00_set_field32(&reg, EFUSE_CTRL_ADDRESS_IN, i); rt2x00_set_field32(&reg, EFUSE_CTRL_MODE, 0); rt2x00_set_field32(&reg, EFUSE_CTRL_KICK, 1); rt2800_register_write_lock(rt2x00dev, efuse_ctrl_reg, reg); /* Wait until the EEPROM has been loaded */ rt2800_regbusy_read(rt2x00dev, efuse_ctrl_reg, EFUSE_CTRL_KICK, &reg); /* Apparently the data is read from end to start */ reg = rt2800_register_read_lock(rt2x00dev, efuse_data3_reg); /* The returned value is in CPU order, but eeprom is le */ *(u32 *)&rt2x00dev->eeprom[i] = cpu_to_le32(reg); reg = rt2800_register_read_lock(rt2x00dev, efuse_data2_reg); *(u32 *)&rt2x00dev->eeprom[i + 2] = cpu_to_le32(reg); reg = rt2800_register_read_lock(rt2x00dev, efuse_data1_reg); *(u32 *)&rt2x00dev->eeprom[i + 4] = cpu_to_le32(reg); reg = rt2800_register_read_lock(rt2x00dev, efuse_data0_reg); *(u32 *)&rt2x00dev->eeprom[i + 6] = cpu_to_le32(reg); mutex_unlock(&rt2x00dev->csr_mutex); } int rt2800_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev) { unsigned int i; for (i = 0; i < EEPROM_SIZE / sizeof(u16); i += 8) rt2800_efuse_read(rt2x00dev, i); return 0; } EXPORT_SYMBOL_GPL(rt2800_read_eeprom_efuse); static u8 rt2800_get_txmixer_gain_24g(struct rt2x00_dev *rt2x00dev) { u16 word; if (rt2x00_rt(rt2x00dev, RT3593) || rt2x00_rt(rt2x00dev, RT3883)) return 0; word = rt2800_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_BG); if ((word & 0x00ff) != 0x00ff) return rt2x00_get_field16(word, EEPROM_TXMIXER_GAIN_BG_VAL); return 0; } static u8 rt2800_get_txmixer_gain_5g(struct rt2x00_dev *rt2x00dev) { u16 word; if (rt2x00_rt(rt2x00dev, RT3593) || rt2x00_rt(rt2x00dev, RT3883)) return 0; word = rt2800_eeprom_read(rt2x00dev, EEPROM_TXMIXER_GAIN_A); if ((word & 0x00ff) != 0x00ff) return rt2x00_get_field16(word, EEPROM_TXMIXER_GAIN_A_VAL); return 0; } static int rt2800_validate_eeprom(struct rt2x00_dev *rt2x00dev) { struct rt2800_drv_data *drv_data = rt2x00dev->drv_data; u16 word; u8 *mac; u8 default_lna_gain; int retval; /* * Read the EEPROM. */ retval = rt2800_read_eeprom(rt2x00dev); if (retval) return retval; /* * Start validation of the data that has been read. */ mac = rt2800_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0); rt2x00lib_set_mac_address(rt2x00dev, mac); word = rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_NIC_CONF0_RXPATH, 2); rt2x00_set_field16(&word, EEPROM_NIC_CONF0_TXPATH, 1); rt2x00_set_field16(&word, EEPROM_NIC_CONF0_RF_TYPE, RF2820); rt2800_eeprom_write(rt2x00dev, EEPROM_NIC_CONF0, word); rt2x00_eeprom_dbg(rt2x00dev, "Antenna: 0x%04x\n", word); } else if (rt2x00_rt(rt2x00dev, RT2860) || rt2x00_rt(rt2x00dev, RT2872)) { /* * There is a max of 2 RX streams for RT28x0 series */ if (rt2x00_get_field16(word, EEPROM_NIC_CONF0_RXPATH) > 2) rt2x00_set_field16(&word, EEPROM_NIC_CONF0_RXPATH, 2); rt2800_eeprom_write(rt2x00dev, EEPROM_NIC_CONF0, word); } word = rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_NIC_CONF1_HW_RADIO, 0); rt2x00_set_field16(&word, EEPROM_NIC_CONF1_EXTERNAL_TX_ALC, 0); rt2x00_set_field16(&word, EEPROM_NIC_CONF1_EXTERNAL_LNA_2G, 0); rt2x00_set_field16(&word, EEPROM_NIC_CONF1_EXTERNAL_LNA_5G, 0); rt2x00_set_field16(&word, EEPROM_NIC_CONF1_CARDBUS_ACCEL, 0); rt2x00_set_field16(&word, EEPROM_NIC_CONF1_BW40M_SB_2G, 0); rt2x00_set_field16(&word, EEPROM_NIC_CONF1_BW40M_SB_5G, 0); rt2x00_set_field16(&word, EEPROM_NIC_CONF1_WPS_PBC, 0); rt2x00_set_field16(&word, EEPROM_NIC_CONF1_BW40M_2G, 0); rt2x00_set_field16(&word, EEPROM_NIC_CONF1_BW40M_5G, 0); rt2x00_set_field16(&word, EEPROM_NIC_CONF1_BROADBAND_EXT_LNA, 0); rt2x00_set_field16(&word, EEPROM_NIC_CONF1_ANT_DIVERSITY, 0); rt2x00_set_field16(&word, EEPROM_NIC_CONF1_INTERNAL_TX_ALC, 0); rt2x00_set_field16(&word, EEPROM_NIC_CONF1_BT_COEXIST, 0); rt2x00_set_field16(&word, EEPROM_NIC_CONF1_DAC_TEST, 0); rt2800_eeprom_write(rt2x00dev, EEPROM_NIC_CONF1, word); rt2x00_eeprom_dbg(rt2x00dev, "NIC: 0x%04x\n", word); } word = rt2800_eeprom_read(rt2x00dev, EEPROM_FREQ); if ((word & 0x00ff) == 0x00ff) { rt2x00_set_field16(&word, EEPROM_FREQ_OFFSET, 0); rt2800_eeprom_write(rt2x00dev, EEPROM_FREQ, word); rt2x00_eeprom_dbg(rt2x00dev, "Freq: 0x%04x\n", word); } if ((word & 0xff00) == 0xff00) { rt2x00_set_field16(&word, EEPROM_FREQ_LED_MODE, LED_MODE_TXRX_ACTIVITY); rt2x00_set_field16(&word, EEPROM_FREQ_LED_POLARITY, 0); rt2800_eeprom_write(rt2x00dev, EEPROM_FREQ, word); rt2800_eeprom_write(rt2x00dev, EEPROM_LED_AG_CONF, 0x5555); rt2800_eeprom_write(rt2x00dev, EEPROM_LED_ACT_CONF, 0x2221); rt2800_eeprom_write(rt2x00dev, EEPROM_LED_POLARITY, 0xa9f8); rt2x00_eeprom_dbg(rt2x00dev, "Led Mode: 0x%04x\n", word); } /* * During the LNA validation we are going to use * lna0 as correct value. Note that EEPROM_LNA * is never validated. */ word = rt2800_eeprom_read(rt2x00dev, EEPROM_LNA); default_lna_gain = rt2x00_get_field16(word, EEPROM_LNA_A0); word = rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_BG); if (abs(rt2x00_get_field16(word, EEPROM_RSSI_BG_OFFSET0)) > 10) rt2x00_set_field16(&word, EEPROM_RSSI_BG_OFFSET0, 0); if (abs(rt2x00_get_field16(word, EEPROM_RSSI_BG_OFFSET1)) > 10) rt2x00_set_field16(&word, EEPROM_RSSI_BG_OFFSET1, 0); rt2800_eeprom_write(rt2x00dev, EEPROM_RSSI_BG, word); drv_data->txmixer_gain_24g = rt2800_get_txmixer_gain_24g(rt2x00dev); word = rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2); if (abs(rt2x00_get_field16(word, EEPROM_RSSI_BG2_OFFSET2)) > 10) rt2x00_set_field16(&word, EEPROM_RSSI_BG2_OFFSET2, 0); if (!rt2x00_rt(rt2x00dev, RT3593) && !rt2x00_rt(rt2x00dev, RT3883)) { if (rt2x00_get_field16(word, EEPROM_RSSI_BG2_LNA_A1) == 0x00 || rt2x00_get_field16(word, EEPROM_RSSI_BG2_LNA_A1) == 0xff) rt2x00_set_field16(&word, EEPROM_RSSI_BG2_LNA_A1, default_lna_gain); } rt2800_eeprom_write(rt2x00dev, EEPROM_RSSI_BG2, word); drv_data->txmixer_gain_5g = rt2800_get_txmixer_gain_5g(rt2x00dev); word = rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_A); if (abs(rt2x00_get_field16(word, EEPROM_RSSI_A_OFFSET0)) > 10) rt2x00_set_field16(&word, EEPROM_RSSI_A_OFFSET0, 0); if (abs(rt2x00_get_field16(word, EEPROM_RSSI_A_OFFSET1)) > 10) rt2x00_set_field16(&word, EEPROM_RSSI_A_OFFSET1, 0); rt2800_eeprom_write(rt2x00dev, EEPROM_RSSI_A, word); word = rt2800_eeprom_read(rt2x00dev, EEPROM_RSSI_A2); if (abs(rt2x00_get_field16(word, EEPROM_RSSI_A2_OFFSET2)) > 10) rt2x00_set_field16(&word, EEPROM_RSSI_A2_OFFSET2, 0); if (!rt2x00_rt(rt2x00dev, RT3593) && !rt2x00_rt(rt2x00dev, RT3883)) { if (rt2x00_get_field16(word, EEPROM_RSSI_A2_LNA_A2) == 0x00 || rt2x00_get_field16(word, EEPROM_RSSI_A2_LNA_A2) == 0xff) rt2x00_set_field16(&word, EEPROM_RSSI_A2_LNA_A2, default_lna_gain); } rt2800_eeprom_write(rt2x00dev, EEPROM_RSSI_A2, word); if (rt2x00_rt(rt2x00dev, RT3593) || rt2x00_rt(rt2x00dev, RT3883)) { word = rt2800_eeprom_read(rt2x00dev, EEPROM_EXT_LNA2); if (rt2x00_get_field16(word, EEPROM_EXT_LNA2_A1) == 0x00 || rt2x00_get_field16(word, EEPROM_EXT_LNA2_A1) == 0xff) rt2x00_set_field16(&word, EEPROM_EXT_LNA2_A1, default_lna_gain); if (rt2x00_get_field16(word, EEPROM_EXT_LNA2_A2) == 0x00 || rt2x00_get_field16(word, EEPROM_EXT_LNA2_A2) == 0xff) rt2x00_set_field16(&word, EEPROM_EXT_LNA2_A1, default_lna_gain); rt2800_eeprom_write(rt2x00dev, EEPROM_EXT_LNA2, word); } return 0; } static int rt2800_init_eeprom(struct rt2x00_dev *rt2x00dev) { u16 value; u16 eeprom; u16 rf; /* * Read EEPROM word for configuration. */ eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF0); /* * Identify RF chipset by EEPROM value * RT28xx/RT30xx: defined in "EEPROM_NIC_CONF0_RF_TYPE" field * RT53xx: defined in "EEPROM_CHIP_ID" field */ if (rt2x00_rt(rt2x00dev, RT3290) || rt2x00_rt(rt2x00dev, RT5390) || rt2x00_rt(rt2x00dev, RT5392) || rt2x00_rt(rt2x00dev, RT6352)) rf = rt2800_eeprom_read(rt2x00dev, EEPROM_CHIP_ID); else if (rt2x00_rt(rt2x00dev, RT3352)) rf = RF3322; else if (rt2x00_rt(rt2x00dev, RT3883)) rf = RF3853; else if (rt2x00_rt(rt2x00dev, RT5350)) rf = RF5350; else if (rt2x00_rt(rt2x00dev, RT5592)) rf = RF5592; else rf = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RF_TYPE); switch (rf) { case RF2820: case RF2850: case RF2720: case RF2750: case RF3020: case RF2020: case RF3021: case RF3022: case RF3052: case RF3053: case RF3070: case RF3290: case RF3320: case RF3322: case RF3853: case RF5350: case RF5360: case RF5362: case RF5370: case RF5372: case RF5390: case RF5392: case RF5592: case RF7620: break; default: rt2x00_err(rt2x00dev, "Invalid RF chipset 0x%04x detected\n", rf); return -ENODEV; } rt2x00_set_rf(rt2x00dev, rf); /* * Identify default antenna configuration. */ rt2x00dev->default_ant.tx_chain_num = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_TXPATH); rt2x00dev->default_ant.rx_chain_num = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF0_RXPATH); eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1); if (rt2x00_rt(rt2x00dev, RT3070) || rt2x00_rt(rt2x00dev, RT3090) || rt2x00_rt(rt2x00dev, RT3352) || rt2x00_rt(rt2x00dev, RT3390)) { value = rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_ANT_DIVERSITY); switch (value) { case 0: case 1: case 2: rt2x00dev->default_ant.tx = ANTENNA_A; rt2x00dev->default_ant.rx = ANTENNA_A; break; case 3: rt2x00dev->default_ant.tx = ANTENNA_A; rt2x00dev->default_ant.rx = ANTENNA_B; break; } } else { rt2x00dev->default_ant.tx = ANTENNA_A; rt2x00dev->default_ant.rx = ANTENNA_A; } /* These chips have hardware RX antenna diversity */ if (rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5390R) || rt2x00_rt_rev_gte(rt2x00dev, RT5390, REV_RT5370G)) { rt2x00dev->default_ant.tx = ANTENNA_HW_DIVERSITY; /* Unused */ rt2x00dev->default_ant.rx = ANTENNA_HW_DIVERSITY; /* Unused */ } /* * Determine external LNA informations. */ if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_EXTERNAL_LNA_5G)) __set_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags); if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_EXTERNAL_LNA_2G)) __set_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags); /* * Detect if this device has an hardware controlled radio. */ if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_HW_RADIO)) __set_bit(CAPABILITY_HW_BUTTON, &rt2x00dev->cap_flags); /* * Detect if this device has Bluetooth co-existence. */ if (!rt2x00_rt(rt2x00dev, RT3352) && rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_BT_COEXIST)) __set_bit(CAPABILITY_BT_COEXIST, &rt2x00dev->cap_flags); /* * Read frequency offset and RF programming sequence. */ eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_FREQ); rt2x00dev->freq_offset = rt2x00_get_field16(eeprom, EEPROM_FREQ_OFFSET); /* * Store led settings, for correct led behaviour. */ #ifdef CONFIG_RT2X00_LIB_LEDS rt2800_init_led(rt2x00dev, &rt2x00dev->led_radio, LED_TYPE_RADIO); rt2800_init_led(rt2x00dev, &rt2x00dev->led_assoc, LED_TYPE_ASSOC); rt2800_init_led(rt2x00dev, &rt2x00dev->led_qual, LED_TYPE_QUALITY); rt2x00dev->led_mcu_reg = eeprom; #endif /* CONFIG_RT2X00_LIB_LEDS */ /* * Check if support EIRP tx power limit feature. */ eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_EIRP_MAX_TX_POWER); if (rt2x00_get_field16(eeprom, EEPROM_EIRP_MAX_TX_POWER_2GHZ) < EIRP_MAX_TX_POWER_LIMIT) __set_bit(CAPABILITY_POWER_LIMIT, &rt2x00dev->cap_flags); /* * Detect if device uses internal or external PA */ eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1); if (rt2x00_rt(rt2x00dev, RT3352) || rt2x00_rt(rt2x00dev, RT6352)) { if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_EXTERNAL_TX0_PA_3352)) __set_bit(CAPABILITY_EXTERNAL_PA_TX0, &rt2x00dev->cap_flags); if (rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_EXTERNAL_TX1_PA_3352)) __set_bit(CAPABILITY_EXTERNAL_PA_TX1, &rt2x00dev->cap_flags); } eeprom = rt2800_eeprom_read(rt2x00dev, EEPROM_NIC_CONF2); if (rt2x00_rt(rt2x00dev, RT6352) && eeprom != 0 && eeprom != 0xffff) { if (!rt2x00_get_field16(eeprom, EEPROM_NIC_CONF2_EXTERNAL_PA)) { __clear_bit(CAPABILITY_EXTERNAL_PA_TX0, &rt2x00dev->cap_flags); __clear_bit(CAPABILITY_EXTERNAL_PA_TX1, &rt2x00dev->cap_flags); } } return 0; } /* * RF value list for rt28xx * Supports: 2.4 GHz (all) & 5.2 GHz (RF2850 & RF2750) */ static const struct rf_channel rf_vals[] = { { 1, 0x18402ecc, 0x184c0786, 0x1816b455, 0x1800510b }, { 2, 0x18402ecc, 0x184c0786, 0x18168a55, 0x1800519f }, { 3, 0x18402ecc, 0x184c078a, 0x18168a55, 0x1800518b }, { 4, 0x18402ecc, 0x184c078a, 0x18168a55, 0x1800519f }, { 5, 0x18402ecc, 0x184c078e, 0x18168a55, 0x1800518b }, { 6, 0x18402ecc, 0x184c078e, 0x18168a55, 0x1800519f }, { 7, 0x18402ecc, 0x184c0792, 0x18168a55, 0x1800518b }, { 8, 0x18402ecc, 0x184c0792, 0x18168a55, 0x1800519f }, { 9, 0x18402ecc, 0x184c0796, 0x18168a55, 0x1800518b }, { 10, 0x18402ecc, 0x184c0796, 0x18168a55, 0x1800519f }, { 11, 0x18402ecc, 0x184c079a, 0x18168a55, 0x1800518b }, { 12, 0x18402ecc, 0x184c079a, 0x18168a55, 0x1800519f }, { 13, 0x18402ecc, 0x184c079e, 0x18168a55, 0x1800518b }, { 14, 0x18402ecc, 0x184c07a2, 0x18168a55, 0x18005193 }, /* 802.11 UNI / HyperLan 2 */ { 36, 0x18402ecc, 0x184c099a, 0x18158a55, 0x180ed1a3 }, { 38, 0x18402ecc, 0x184c099e, 0x18158a55, 0x180ed193 }, { 40, 0x18402ec8, 0x184c0682, 0x18158a55, 0x180ed183 }, { 44, 0x18402ec8, 0x184c0682, 0x18158a55, 0x180ed1a3 }, { 46, 0x18402ec8, 0x184c0686, 0x18158a55, 0x180ed18b }, { 48, 0x18402ec8, 0x184c0686, 0x18158a55, 0x180ed19b }, { 52, 0x18402ec8, 0x184c068a, 0x18158a55, 0x180ed193 }, { 54, 0x18402ec8, 0x184c068a, 0x18158a55, 0x180ed1a3 }, { 56, 0x18402ec8, 0x184c068e, 0x18158a55, 0x180ed18b }, { 60, 0x18402ec8, 0x184c0692, 0x18158a55, 0x180ed183 }, { 62, 0x18402ec8, 0x184c0692, 0x18158a55, 0x180ed193 }, { 64, 0x18402ec8, 0x184c0692, 0x18158a55, 0x180ed1a3 }, /* 802.11 HyperLan 2 */ { 100, 0x18402ec8, 0x184c06b2, 0x18178a55, 0x180ed783 }, { 102, 0x18402ec8, 0x184c06b2, 0x18578a55, 0x180ed793 }, { 104, 0x18402ec8, 0x185c06b2, 0x18578a55, 0x180ed1a3 }, { 108, 0x18402ecc, 0x185c0a32, 0x18578a55, 0x180ed193 }, { 110, 0x18402ecc, 0x184c0a36, 0x18178a55, 0x180ed183 }, { 112, 0x18402ecc, 0x184c0a36, 0x18178a55, 0x180ed19b }, { 116, 0x18402ecc, 0x184c0a3a, 0x18178a55, 0x180ed1a3 }, { 118, 0x18402ecc, 0x184c0a3e, 0x18178a55, 0x180ed193 }, { 120, 0x18402ec4, 0x184c0382, 0x18178a55, 0x180ed183 }, { 124, 0x18402ec4, 0x184c0382, 0x18178a55, 0x180ed193 }, { 126, 0x18402ec4, 0x184c0382, 0x18178a55, 0x180ed15b }, { 128, 0x18402ec4, 0x184c0382, 0x18178a55, 0x180ed1a3 }, { 132, 0x18402ec4, 0x184c0386, 0x18178a55, 0x180ed18b }, { 134, 0x18402ec4, 0x184c0386, 0x18178a55, 0x180ed193 }, { 136, 0x18402ec4, 0x184c0386, 0x18178a55, 0x180ed19b }, { 140, 0x18402ec4, 0x184c038a, 0x18178a55, 0x180ed183 }, /* 802.11 UNII */ { 149, 0x18402ec4, 0x184c038a, 0x18178a55, 0x180ed1a7 }, { 151, 0x18402ec4, 0x184c038e, 0x18178a55, 0x180ed187 }, { 153, 0x18402ec4, 0x184c038e, 0x18178a55, 0x180ed18f }, { 157, 0x18402ec4, 0x184c038e, 0x18178a55, 0x180ed19f }, { 159, 0x18402ec4, 0x184c038e, 0x18178a55, 0x180ed1a7 }, { 161, 0x18402ec4, 0x184c0392, 0x18178a55, 0x180ed187 }, { 165, 0x18402ec4, 0x184c0392, 0x18178a55, 0x180ed197 }, { 167, 0x18402ec4, 0x184c03d2, 0x18179855, 0x1815531f }, { 169, 0x18402ec4, 0x184c03d2, 0x18179855, 0x18155327 }, { 171, 0x18402ec4, 0x184c03d6, 0x18179855, 0x18155307 }, { 173, 0x18402ec4, 0x184c03d6, 0x18179855, 0x1815530f }, /* 802.11 Japan */ { 184, 0x15002ccc, 0x1500491e, 0x1509be55, 0x150c0a0b }, { 188, 0x15002ccc, 0x15004922, 0x1509be55, 0x150c0a13 }, { 192, 0x15002ccc, 0x15004926, 0x1509be55, 0x150c0a1b }, { 196, 0x15002ccc, 0x1500492a, 0x1509be55, 0x150c0a23 }, { 208, 0x15002ccc, 0x1500493a, 0x1509be55, 0x150c0a13 }, { 212, 0x15002ccc, 0x1500493e, 0x1509be55, 0x150c0a1b }, { 216, 0x15002ccc, 0x15004982, 0x1509be55, 0x150c0a23 }, }; /* * RF value list for rt3xxx * Supports: 2.4 GHz (all) & 5.2 GHz (RF3052 & RF3053) */ static const struct rf_channel rf_vals_3x[] = { {1, 241, 2, 2 }, {2, 241, 2, 7 }, {3, 242, 2, 2 }, {4, 242, 2, 7 }, {5, 243, 2, 2 }, {6, 243, 2, 7 }, {7, 244, 2, 2 }, {8, 244, 2, 7 }, {9, 245, 2, 2 }, {10, 245, 2, 7 }, {11, 246, 2, 2 }, {12, 246, 2, 7 }, {13, 247, 2, 2 }, {14, 248, 2, 4 }, /* 802.11 UNI / HyperLan 2 */ {36, 0x56, 0, 4}, {38, 0x56, 0, 6}, {40, 0x56, 0, 8}, {44, 0x57, 0, 0}, {46, 0x57, 0, 2}, {48, 0x57, 0, 4}, {52, 0x57, 0, 8}, {54, 0x57, 0, 10}, {56, 0x58, 0, 0}, {60, 0x58, 0, 4}, {62, 0x58, 0, 6}, {64, 0x58, 0, 8}, /* 802.11 HyperLan 2 */ {100, 0x5b, 0, 8}, {102, 0x5b, 0, 10}, {104, 0x5c, 0, 0}, {108, 0x5c, 0, 4}, {110, 0x5c, 0, 6}, {112, 0x5c, 0, 8}, {116, 0x5d, 0, 0}, {118, 0x5d, 0, 2}, {120, 0x5d, 0, 4}, {124, 0x5d, 0, 8}, {126, 0x5d, 0, 10}, {128, 0x5e, 0, 0}, {132, 0x5e, 0, 4}, {134, 0x5e, 0, 6}, {136, 0x5e, 0, 8}, {140, 0x5f, 0, 0}, /* 802.11 UNII */ {149, 0x5f, 0, 9}, {151, 0x5f, 0, 11}, {153, 0x60, 0, 1}, {157, 0x60, 0, 5}, {159, 0x60, 0, 7}, {161, 0x60, 0, 9}, {165, 0x61, 0, 1}, {167, 0x61, 0, 3}, {169, 0x61, 0, 5}, {171, 0x61, 0, 7}, {173, 0x61, 0, 9}, }; /* * RF value list for rt3xxx with Xtal20MHz * Supports: 2.4 GHz (all) (RF3322) */ static const struct rf_channel rf_vals_3x_xtal20[] = { {1, 0xE2, 2, 0x14}, {2, 0xE3, 2, 0x14}, {3, 0xE4, 2, 0x14}, {4, 0xE5, 2, 0x14}, {5, 0xE6, 2, 0x14}, {6, 0xE7, 2, 0x14}, {7, 0xE8, 2, 0x14}, {8, 0xE9, 2, 0x14}, {9, 0xEA, 2, 0x14}, {10, 0xEB, 2, 0x14}, {11, 0xEC, 2, 0x14}, {12, 0xED, 2, 0x14}, {13, 0xEE, 2, 0x14}, {14, 0xF0, 2, 0x18}, }; static const struct rf_channel rf_vals_3853[] = { {1, 241, 6, 2}, {2, 241, 6, 7}, {3, 242, 6, 2}, {4, 242, 6, 7}, {5, 243, 6, 2}, {6, 243, 6, 7}, {7, 244, 6, 2}, {8, 244, 6, 7}, {9, 245, 6, 2}, {10, 245, 6, 7}, {11, 246, 6, 2}, {12, 246, 6, 7}, {13, 247, 6, 2}, {14, 248, 6, 4}, {36, 0x56, 8, 4}, {38, 0x56, 8, 6}, {40, 0x56, 8, 8}, {44, 0x57, 8, 0}, {46, 0x57, 8, 2}, {48, 0x57, 8, 4}, {52, 0x57, 8, 8}, {54, 0x57, 8, 10}, {56, 0x58, 8, 0}, {60, 0x58, 8, 4}, {62, 0x58, 8, 6}, {64, 0x58, 8, 8}, {100, 0x5b, 8, 8}, {102, 0x5b, 8, 10}, {104, 0x5c, 8, 0}, {108, 0x5c, 8, 4}, {110, 0x5c, 8, 6}, {112, 0x5c, 8, 8}, {114, 0x5c, 8, 10}, {116, 0x5d, 8, 0}, {118, 0x5d, 8, 2}, {120, 0x5d, 8, 4}, {124, 0x5d, 8, 8}, {126, 0x5d, 8, 10}, {128, 0x5e, 8, 0}, {132, 0x5e, 8, 4}, {134, 0x5e, 8, 6}, {136, 0x5e, 8, 8}, {140, 0x5f, 8, 0}, {149, 0x5f, 8, 9}, {151, 0x5f, 8, 11}, {153, 0x60, 8, 1}, {157, 0x60, 8, 5}, {159, 0x60, 8, 7}, {161, 0x60, 8, 9}, {165, 0x61, 8, 1}, {167, 0x61, 8, 3}, {169, 0x61, 8, 5}, {171, 0x61, 8, 7}, {173, 0x61, 8, 9}, }; static const struct rf_channel rf_vals_5592_xtal20[] = { /* Channel, N, K, mod, R */ {1, 482, 4, 10, 3}, {2, 483, 4, 10, 3}, {3, 484, 4, 10, 3}, {4, 485, 4, 10, 3}, {5, 486, 4, 10, 3}, {6, 487, 4, 10, 3}, {7, 488, 4, 10, 3}, {8, 489, 4, 10, 3}, {9, 490, 4, 10, 3}, {10, 491, 4, 10, 3}, {11, 492, 4, 10, 3}, {12, 493, 4, 10, 3}, {13, 494, 4, 10, 3}, {14, 496, 8, 10, 3}, {36, 172, 8, 12, 1}, {38, 173, 0, 12, 1}, {40, 173, 4, 12, 1}, {42, 173, 8, 12, 1}, {44, 174, 0, 12, 1}, {46, 174, 4, 12, 1}, {48, 174, 8, 12, 1}, {50, 175, 0, 12, 1}, {52, 175, 4, 12, 1}, {54, 175, 8, 12, 1}, {56, 176, 0, 12, 1}, {58, 176, 4, 12, 1}, {60, 176, 8, 12, 1}, {62, 177, 0, 12, 1}, {64, 177, 4, 12, 1}, {100, 183, 4, 12, 1}, {102, 183, 8, 12, 1}, {104, 184, 0, 12, 1}, {106, 184, 4, 12, 1}, {108, 184, 8, 12, 1}, {110, 185, 0, 12, 1}, {112, 185, 4, 12, 1}, {114, 185, 8, 12, 1}, {116, 186, 0, 12, 1}, {118, 186, 4, 12, 1}, {120, 186, 8, 12, 1}, {122, 187, 0, 12, 1}, {124, 187, 4, 12, 1}, {126, 187, 8, 12, 1}, {128, 188, 0, 12, 1}, {130, 188, 4, 12, 1}, {132, 188, 8, 12, 1}, {134, 189, 0, 12, 1}, {136, 189, 4, 12, 1}, {138, 189, 8, 12, 1}, {140, 190, 0, 12, 1}, {149, 191, 6, 12, 1}, {151, 191, 10, 12, 1}, {153, 192, 2, 12, 1}, {155, 192, 6, 12, 1}, {157, 192, 10, 12, 1}, {159, 193, 2, 12, 1}, {161, 193, 6, 12, 1}, {165, 194, 2, 12, 1}, {184, 164, 0, 12, 1}, {188, 164, 4, 12, 1}, {192, 165, 8, 12, 1}, {196, 166, 0, 12, 1}, }; static const struct rf_channel rf_vals_5592_xtal40[] = { /* Channel, N, K, mod, R */ {1, 241, 2, 10, 3}, {2, 241, 7, 10, 3}, {3, 242, 2, 10, 3}, {4, 242, 7, 10, 3}, {5, 243, 2, 10, 3}, {6, 243, 7, 10, 3}, {7, 244, 2, 10, 3}, {8, 244, 7, 10, 3}, {9, 245, 2, 10, 3}, {10, 245, 7, 10, 3}, {11, 246, 2, 10, 3}, {12, 246, 7, 10, 3}, {13, 247, 2, 10, 3}, {14, 248, 4, 10, 3}, {36, 86, 4, 12, 1}, {38, 86, 6, 12, 1}, {40, 86, 8, 12, 1}, {42, 86, 10, 12, 1}, {44, 87, 0, 12, 1}, {46, 87, 2, 12, 1}, {48, 87, 4, 12, 1}, {50, 87, 6, 12, 1}, {52, 87, 8, 12, 1}, {54, 87, 10, 12, 1}, {56, 88, 0, 12, 1}, {58, 88, 2, 12, 1}, {60, 88, 4, 12, 1}, {62, 88, 6, 12, 1}, {64, 88, 8, 12, 1}, {100, 91, 8, 12, 1}, {102, 91, 10, 12, 1}, {104, 92, 0, 12, 1}, {106, 92, 2, 12, 1}, {108, 92, 4, 12, 1}, {110, 92, 6, 12, 1}, {112, 92, 8, 12, 1}, {114, 92, 10, 12, 1}, {116, 93, 0, 12, 1}, {118, 93, 2, 12, 1}, {120, 93, 4, 12, 1}, {122, 93, 6, 12, 1}, {124, 93, 8, 12, 1}, {126, 93, 10, 12, 1}, {128, 94, 0, 12, 1}, {130, 94, 2, 12, 1}, {132, 94, 4, 12, 1}, {134, 94, 6, 12, 1}, {136, 94, 8, 12, 1}, {138, 94, 10, 12, 1}, {140, 95, 0, 12, 1}, {149, 95, 9, 12, 1}, {151, 95, 11, 12, 1}, {153, 96, 1, 12, 1}, {155, 96, 3, 12, 1}, {157, 96, 5, 12, 1}, {159, 96, 7, 12, 1}, {161, 96, 9, 12, 1}, {165, 97, 1, 12, 1}, {184, 82, 0, 12, 1}, {188, 82, 4, 12, 1}, {192, 82, 8, 12, 1}, {196, 83, 0, 12, 1}, }; static const struct rf_channel rf_vals_7620[] = { {1, 0x50, 0x99, 0x99, 1}, {2, 0x50, 0x44, 0x44, 2}, {3, 0x50, 0xEE, 0xEE, 2}, {4, 0x50, 0x99, 0x99, 3}, {5, 0x51, 0x44, 0x44, 0}, {6, 0x51, 0xEE, 0xEE, 0}, {7, 0x51, 0x99, 0x99, 1}, {8, 0x51, 0x44, 0x44, 2}, {9, 0x51, 0xEE, 0xEE, 2}, {10, 0x51, 0x99, 0x99, 3}, {11, 0x52, 0x44, 0x44, 0}, {12, 0x52, 0xEE, 0xEE, 0}, {13, 0x52, 0x99, 0x99, 1}, {14, 0x52, 0x33, 0x33, 3}, }; static int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev) { struct hw_mode_spec *spec = &rt2x00dev->spec; struct channel_info *info; s8 *default_power1; s8 *default_power2; s8 *default_power3; unsigned int i, tx_chains, rx_chains; u32 reg; /* * Disable powersaving as default. */ rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; /* * Change default retry settings to values corresponding more closely * to rate[0].count setting of minstrel rate control algorithm. */ rt2x00dev->hw->wiphy->retry_short = 2; rt2x00dev->hw->wiphy->retry_long = 2; /* * Initialize all hw fields. */ ieee80211_hw_set(rt2x00dev->hw, REPORTS_TX_ACK_STATUS); ieee80211_hw_set(rt2x00dev->hw, AMPDU_AGGREGATION); ieee80211_hw_set(rt2x00dev->hw, PS_NULLFUNC_STACK); ieee80211_hw_set(rt2x00dev->hw, SIGNAL_DBM); ieee80211_hw_set(rt2x00dev->hw, SUPPORTS_PS); /* * Don't set IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING for USB devices * unless we are capable of sending the buffered frames out after the * DTIM transmission using rt2x00lib_beacondone. This will send out * multicast and broadcast traffic immediately instead of buffering it * infinitly and thus dropping it after some time. */ if (!rt2x00_is_usb(rt2x00dev)) ieee80211_hw_set(rt2x00dev->hw, HOST_BROADCAST_PS_BUFFERING); ieee80211_hw_set(rt2x00dev->hw, MFP_CAPABLE); SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev); SET_IEEE80211_PERM_ADDR(rt2x00dev->hw, rt2800_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0)); /* * As rt2800 has a global fallback table we cannot specify * more then one tx rate per frame but since the hw will * try several rates (based on the fallback table) we should * initialize max_report_rates to the maximum number of rates * we are going to try. Otherwise mac80211 will truncate our * reported tx rates and the rc algortihm will end up with * incorrect data. */ rt2x00dev->hw->max_rates = 1; rt2x00dev->hw->max_report_rates = 7; rt2x00dev->hw->max_rate_tries = 1; /* * Initialize hw_mode information. */ spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM; switch (rt2x00dev->chip.rf) { case RF2720: case RF2820: spec->num_channels = 14; spec->channels = rf_vals; break; case RF2750: case RF2850: spec->num_channels = ARRAY_SIZE(rf_vals); spec->channels = rf_vals; break; case RF2020: case RF3020: case RF3021: case RF3022: case RF3070: case RF3290: case RF3320: case RF3322: case RF5350: case RF5360: case RF5362: case RF5370: case RF5372: case RF5390: case RF5392: spec->num_channels = 14; if (rt2800_clk_is_20mhz(rt2x00dev)) spec->channels = rf_vals_3x_xtal20; else spec->channels = rf_vals_3x; break; case RF7620: spec->num_channels = ARRAY_SIZE(rf_vals_7620); spec->channels = rf_vals_7620; break; case RF3052: case RF3053: spec->num_channels = ARRAY_SIZE(rf_vals_3x); spec->channels = rf_vals_3x; break; case RF3853: spec->num_channels = ARRAY_SIZE(rf_vals_3853); spec->channels = rf_vals_3853; break; case RF5592: reg = rt2800_register_read(rt2x00dev, MAC_DEBUG_INDEX); if (rt2x00_get_field32(reg, MAC_DEBUG_INDEX_XTAL)) { spec->num_channels = ARRAY_SIZE(rf_vals_5592_xtal40); spec->channels = rf_vals_5592_xtal40; } else { spec->num_channels = ARRAY_SIZE(rf_vals_5592_xtal20); spec->channels = rf_vals_5592_xtal20; } break; } if (WARN_ON_ONCE(!spec->channels)) return -ENODEV; spec->supported_bands = SUPPORT_BAND_2GHZ; if (spec->num_channels > 14) spec->supported_bands |= SUPPORT_BAND_5GHZ; /* * Initialize HT information. */ if (!rt2x00_rf(rt2x00dev, RF2020)) spec->ht.ht_supported = true; else spec->ht.ht_supported = false; spec->ht.cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 | IEEE80211_HT_CAP_GRN_FLD | IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40; tx_chains = rt2x00dev->default_ant.tx_chain_num; rx_chains = rt2x00dev->default_ant.rx_chain_num; if (tx_chains >= 2) spec->ht.cap |= IEEE80211_HT_CAP_TX_STBC; spec->ht.cap |= rx_chains << IEEE80211_HT_CAP_RX_STBC_SHIFT; spec->ht.ampdu_factor = (rx_chains > 1) ? 3 : 2; spec->ht.ampdu_density = 4; spec->ht.mcs.tx_params = IEEE80211_HT_MCS_TX_DEFINED; if (tx_chains != rx_chains) { spec->ht.mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF; spec->ht.mcs.tx_params |= (tx_chains - 1) << IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT; } switch (rx_chains) { case 3: spec->ht.mcs.rx_mask[2] = 0xff; fallthrough; case 2: spec->ht.mcs.rx_mask[1] = 0xff; fallthrough; case 1: spec->ht.mcs.rx_mask[0] = 0xff; spec->ht.mcs.rx_mask[4] = 0x1; /* MCS32 */ break; } /* * Create channel information and survey arrays */ info = kcalloc(spec->num_channels, sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; rt2x00dev->chan_survey = kcalloc(spec->num_channels, sizeof(struct rt2x00_chan_survey), GFP_KERNEL); if (!rt2x00dev->chan_survey) { kfree(info); return -ENOMEM; } spec->channels_info = info; default_power1 = rt2800_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG1); default_power2 = rt2800_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG2); if (rt2x00dev->default_ant.tx_chain_num > 2) default_power3 = rt2800_eeprom_addr(rt2x00dev, EEPROM_EXT_TXPOWER_BG3); else default_power3 = NULL; for (i = 0; i < 14; i++) { info[i].default_power1 = default_power1[i]; info[i].default_power2 = default_power2[i]; if (default_power3) info[i].default_power3 = default_power3[i]; } if (spec->num_channels > 14) { default_power1 = rt2800_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A1); default_power2 = rt2800_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A2); if (rt2x00dev->default_ant.tx_chain_num > 2) default_power3 = rt2800_eeprom_addr(rt2x00dev, EEPROM_EXT_TXPOWER_A3); else default_power3 = NULL; for (i = 14; i < spec->num_channels; i++) { info[i].default_power1 = default_power1[i - 14]; info[i].default_power2 = default_power2[i - 14]; if (default_power3) info[i].default_power3 = default_power3[i - 14]; } } switch (rt2x00dev->chip.rf) { case RF2020: case RF3020: case RF3021: case RF3022: case RF3320: case RF3052: case RF3053: case RF3070: case RF3290: case RF3853: case RF5350: case RF5360: case RF5362: case RF5370: case RF5372: case RF5390: case RF5392: case RF5592: case RF7620: __set_bit(CAPABILITY_VCO_RECALIBRATION, &rt2x00dev->cap_flags); break; } return 0; } static int rt2800_probe_rt(struct rt2x00_dev *rt2x00dev) { u32 reg; u32 rt; u32 rev; if (rt2x00_rt(rt2x00dev, RT3290)) reg = rt2800_register_read(rt2x00dev, MAC_CSR0_3290); else reg = rt2800_register_read(rt2x00dev, MAC_CSR0); rt = rt2x00_get_field32(reg, MAC_CSR0_CHIPSET); rev = rt2x00_get_field32(reg, MAC_CSR0_REVISION); switch (rt) { case RT2860: case RT2872: case RT2883: case RT3070: case RT3071: case RT3090: case RT3290: case RT3352: case RT3390: case RT3572: case RT3593: case RT3883: case RT5350: case RT5390: case RT5392: case RT5592: break; default: rt2x00_err(rt2x00dev, "Invalid RT chipset 0x%04x, rev %04x detected\n", rt, rev); return -ENODEV; } if (rt == RT5390 && rt2x00_is_soc(rt2x00dev)) rt = RT6352; rt2x00_set_rt(rt2x00dev, rt, rev); return 0; } int rt2800_probe_hw(struct rt2x00_dev *rt2x00dev) { int retval; u32 reg; retval = rt2800_probe_rt(rt2x00dev); if (retval) return retval; /* * Allocate eeprom data. */ retval = rt2800_validate_eeprom(rt2x00dev); if (retval) return retval; retval = rt2800_init_eeprom(rt2x00dev); if (retval) return retval; /* * Enable rfkill polling by setting GPIO direction of the * rfkill switch GPIO pin correctly. */ reg = rt2800_register_read(rt2x00dev, GPIO_CTRL); rt2x00_set_field32(&reg, GPIO_CTRL_DIR2, 1); rt2800_register_write(rt2x00dev, GPIO_CTRL, reg); /* * Initialize hw specifications. */ retval = rt2800_probe_hw_mode(rt2x00dev); if (retval) return retval; /* * Set device capabilities. */ __set_bit(CAPABILITY_CONTROL_FILTERS, &rt2x00dev->cap_flags); __set_bit(CAPABILITY_CONTROL_FILTER_PSPOLL, &rt2x00dev->cap_flags); if (!rt2x00_is_usb(rt2x00dev)) __set_bit(CAPABILITY_PRE_TBTT_INTERRUPT, &rt2x00dev->cap_flags); /* * Set device requirements. */ if (!rt2x00_is_soc(rt2x00dev)) __set_bit(REQUIRE_FIRMWARE, &rt2x00dev->cap_flags); __set_bit(REQUIRE_L2PAD, &rt2x00dev->cap_flags); __set_bit(REQUIRE_TXSTATUS_FIFO, &rt2x00dev->cap_flags); if (!rt2800_hwcrypt_disabled(rt2x00dev)) __set_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags); __set_bit(CAPABILITY_LINK_TUNING, &rt2x00dev->cap_flags); __set_bit(REQUIRE_HT_TX_DESC, &rt2x00dev->cap_flags); if (rt2x00_is_usb(rt2x00dev)) __set_bit(REQUIRE_PS_AUTOWAKE, &rt2x00dev->cap_flags); else { __set_bit(REQUIRE_DMA, &rt2x00dev->cap_flags); __set_bit(REQUIRE_TASKLET_CONTEXT, &rt2x00dev->cap_flags); } if (modparam_watchdog) { __set_bit(CAPABILITY_RESTART_HW, &rt2x00dev->cap_flags); rt2x00dev->link.watchdog_interval = msecs_to_jiffies(100); } else { rt2x00dev->link.watchdog_disabled = true; } /* * Set the rssi offset. */ rt2x00dev->rssi_offset = DEFAULT_RSSI_OFFSET; return 0; } EXPORT_SYMBOL_GPL(rt2800_probe_hw); /* * IEEE80211 stack callback functions. */ void rt2800_get_key_seq(struct ieee80211_hw *hw, struct ieee80211_key_conf *key, struct ieee80211_key_seq *seq) { struct rt2x00_dev *rt2x00dev = hw->priv; struct mac_iveiv_entry iveiv_entry; u32 offset; if (key->cipher != WLAN_CIPHER_SUITE_TKIP) return; offset = MAC_IVEIV_ENTRY(key->hw_key_idx); rt2800_register_multiread(rt2x00dev, offset, &iveiv_entry, sizeof(iveiv_entry)); memcpy(&seq->tkip.iv16, &iveiv_entry.iv[0], 2); memcpy(&seq->tkip.iv32, &iveiv_entry.iv[4], 4); } EXPORT_SYMBOL_GPL(rt2800_get_key_seq); int rt2800_set_rts_threshold(struct ieee80211_hw *hw, u32 value) { struct rt2x00_dev *rt2x00dev = hw->priv; u32 reg; bool enabled = (value < IEEE80211_MAX_RTS_THRESHOLD); reg = rt2800_register_read(rt2x00dev, TX_RTS_CFG); rt2x00_set_field32(&reg, TX_RTS_CFG_RTS_THRES, value); rt2800_register_write(rt2x00dev, TX_RTS_CFG, reg); reg = rt2800_register_read(rt2x00dev, CCK_PROT_CFG); rt2x00_set_field32(&reg, CCK_PROT_CFG_RTS_TH_EN, enabled); rt2800_register_write(rt2x00dev, CCK_PROT_CFG, reg); reg = rt2800_register_read(rt2x00dev, OFDM_PROT_CFG); rt2x00_set_field32(&reg, OFDM_PROT_CFG_RTS_TH_EN, enabled); rt2800_register_write(rt2x00dev, OFDM_PROT_CFG, reg); reg = rt2800_register_read(rt2x00dev, MM20_PROT_CFG); rt2x00_set_field32(&reg, MM20_PROT_CFG_RTS_TH_EN, enabled); rt2800_register_write(rt2x00dev, MM20_PROT_CFG, reg); reg = rt2800_register_read(rt2x00dev, MM40_PROT_CFG); rt2x00_set_field32(&reg, MM40_PROT_CFG_RTS_TH_EN, enabled); rt2800_register_write(rt2x00dev, MM40_PROT_CFG, reg); reg = rt2800_register_read(rt2x00dev, GF20_PROT_CFG); rt2x00_set_field32(&reg, GF20_PROT_CFG_RTS_TH_EN, enabled); rt2800_register_write(rt2x00dev, GF20_PROT_CFG, reg); reg = rt2800_register_read(rt2x00dev, GF40_PROT_CFG); rt2x00_set_field32(&reg, GF40_PROT_CFG_RTS_TH_EN, enabled); rt2800_register_write(rt2x00dev, GF40_PROT_CFG, reg); return 0; } EXPORT_SYMBOL_GPL(rt2800_set_rts_threshold); int rt2800_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, unsigned int link_id, u16 queue_idx, const struct ieee80211_tx_queue_params *params) { struct rt2x00_dev *rt2x00dev = hw->priv; struct data_queue *queue; struct rt2x00_field32 field; int retval; u32 reg; u32 offset; /* * First pass the configuration through rt2x00lib, that will * update the queue settings and validate the input. After that * we are free to update the registers based on the value * in the queue parameter. */ retval = rt2x00mac_conf_tx(hw, vif, link_id, queue_idx, params); if (retval) return retval; /* * We only need to perform additional register initialization * for WMM queues/ */ if (queue_idx >= 4) return 0; queue = rt2x00queue_get_tx_queue(rt2x00dev, queue_idx); /* Update WMM TXOP register */ offset = WMM_TXOP0_CFG + (sizeof(u32) * (!!(queue_idx & 2))); field.bit_offset = (queue_idx & 1) * 16; field.bit_mask = 0xffff << field.bit_offset; reg = rt2800_register_read(rt2x00dev, offset); rt2x00_set_field32(&reg, field, queue->txop); rt2800_register_write(rt2x00dev, offset, reg); /* Update WMM registers */ field.bit_offset = queue_idx * 4; field.bit_mask = 0xf << field.bit_offset; reg = rt2800_register_read(rt2x00dev, WMM_AIFSN_CFG); rt2x00_set_field32(&reg, field, queue->aifs); rt2800_register_write(rt2x00dev, WMM_AIFSN_CFG, reg); reg = rt2800_register_read(rt2x00dev, WMM_CWMIN_CFG); rt2x00_set_field32(&reg, field, queue->cw_min); rt2800_register_write(rt2x00dev, WMM_CWMIN_CFG, reg); reg = rt2800_register_read(rt2x00dev, WMM_CWMAX_CFG); rt2x00_set_field32(&reg, field, queue->cw_max); rt2800_register_write(rt2x00dev, WMM_CWMAX_CFG, reg); /* Update EDCA registers */ offset = EDCA_AC0_CFG + (sizeof(u32) * queue_idx); reg = rt2800_register_read(rt2x00dev, offset); rt2x00_set_field32(&reg, EDCA_AC0_CFG_TX_OP, queue->txop); rt2x00_set_field32(&reg, EDCA_AC0_CFG_AIFSN, queue->aifs); rt2x00_set_field32(&reg, EDCA_AC0_CFG_CWMIN, queue->cw_min); rt2x00_set_field32(&reg, EDCA_AC0_CFG_CWMAX, queue->cw_max); rt2800_register_write(rt2x00dev, offset, reg); return 0; } EXPORT_SYMBOL_GPL(rt2800_conf_tx); u64 rt2800_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct rt2x00_dev *rt2x00dev = hw->priv; u64 tsf; u32 reg; reg = rt2800_register_read(rt2x00dev, TSF_TIMER_DW1); tsf = (u64) rt2x00_get_field32(reg, TSF_TIMER_DW1_HIGH_WORD) << 32; reg = rt2800_register_read(rt2x00dev, TSF_TIMER_DW0); tsf |= rt2x00_get_field32(reg, TSF_TIMER_DW0_LOW_WORD); return tsf; } EXPORT_SYMBOL_GPL(rt2800_get_tsf); int rt2800_ampdu_action(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_ampdu_params *params) { struct ieee80211_sta *sta = params->sta; enum ieee80211_ampdu_mlme_action action = params->action; u16 tid = params->tid; struct rt2x00_sta *sta_priv = (struct rt2x00_sta *)sta->drv_priv; int ret = 0; /* * Don't allow aggregation for stations the hardware isn't aware * of because tx status reports for frames to an unknown station * always contain wcid=WCID_END+1 and thus we can't distinguish * between multiple stations which leads to unwanted situations * when the hw reorders frames due to aggregation. */ if (sta_priv->wcid > WCID_END) return -ENOSPC; switch (action) { case IEEE80211_AMPDU_RX_START: case IEEE80211_AMPDU_RX_STOP: /* * The hw itself takes care of setting up BlockAck mechanisms. * So, we only have to allow mac80211 to nagotiate a BlockAck * agreement. Once that is done, the hw will BlockAck incoming * AMPDUs without further setup. */ break; case IEEE80211_AMPDU_TX_START: ret = IEEE80211_AMPDU_TX_START_IMMEDIATE; break; case IEEE80211_AMPDU_TX_STOP_CONT: case IEEE80211_AMPDU_TX_STOP_FLUSH: case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); break; case IEEE80211_AMPDU_TX_OPERATIONAL: break; default: rt2x00_warn((struct rt2x00_dev *)hw->priv, "Unknown AMPDU action\n"); } return ret; } EXPORT_SYMBOL_GPL(rt2800_ampdu_action); int rt2800_get_survey(struct ieee80211_hw *hw, int idx, struct survey_info *survey) { struct rt2x00_dev *rt2x00dev = hw->priv; struct rt2x00_chan_survey *chan_survey = &rt2x00dev->chan_survey[idx]; enum nl80211_band band = NL80211_BAND_2GHZ; if (idx >= rt2x00dev->bands[band].n_channels) { idx -= rt2x00dev->bands[band].n_channels; band = NL80211_BAND_5GHZ; } if (idx >= rt2x00dev->bands[band].n_channels) return -ENOENT; if (idx == 0) rt2800_update_survey(rt2x00dev); survey->channel = &rt2x00dev->bands[band].channels[idx]; survey->filled = SURVEY_INFO_TIME | SURVEY_INFO_TIME_BUSY | SURVEY_INFO_TIME_EXT_BUSY; survey->time = div_u64(chan_survey->time_idle + chan_survey->time_busy, 1000); survey->time_busy = div_u64(chan_survey->time_busy, 1000); survey->time_ext_busy = div_u64(chan_survey->time_ext_busy, 1000); if (!(hw->conf.flags & IEEE80211_CONF_OFFCHANNEL)) survey->filled |= SURVEY_INFO_IN_USE; return 0; } EXPORT_SYMBOL_GPL(rt2800_get_survey); MODULE_AUTHOR(DRV_PROJECT ", Bartlomiej Zolnierkiewicz"); MODULE_VERSION(DRV_VERSION); MODULE_DESCRIPTION("Ralink RT2800 library"); MODULE_LICENSE("GPL");
linux-master
drivers/net/wireless/ralink/rt2x00/rt2800lib.c
// SPDX-License-Identifier: GPL-2.0-or-later /* Copyright (C) 2004 - 2009 Ivo van Doorn <[email protected]> <http://rt2x00.serialmonkey.com> */ /* Module: rt2x00lib Abstract: rt2x00 crypto specific routines. */ #include <linux/kernel.h> #include <linux/module.h> #include "rt2x00.h" #include "rt2x00lib.h" enum cipher rt2x00crypto_key_to_cipher(struct ieee80211_key_conf *key) { switch (key->cipher) { case WLAN_CIPHER_SUITE_WEP40: return CIPHER_WEP64; case WLAN_CIPHER_SUITE_WEP104: return CIPHER_WEP128; case WLAN_CIPHER_SUITE_TKIP: return CIPHER_TKIP; case WLAN_CIPHER_SUITE_CCMP: return CIPHER_AES; default: return CIPHER_NONE; } } void rt2x00crypto_create_tx_descriptor(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb, struct txentry_desc *txdesc) { struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); struct ieee80211_key_conf *hw_key = tx_info->control.hw_key; if (!rt2x00_has_cap_hw_crypto(rt2x00dev) || !hw_key) return; __set_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags); txdesc->cipher = rt2x00crypto_key_to_cipher(hw_key); if (hw_key->flags & IEEE80211_KEY_FLAG_PAIRWISE) __set_bit(ENTRY_TXD_ENCRYPT_PAIRWISE, &txdesc->flags); txdesc->key_idx = hw_key->hw_key_idx; txdesc->iv_offset = txdesc->header_length; txdesc->iv_len = hw_key->iv_len; if (!(hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_IV)) __set_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags); if (!(hw_key->flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) __set_bit(ENTRY_TXD_ENCRYPT_MMIC, &txdesc->flags); } unsigned int rt2x00crypto_tx_overhead(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb) { struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); struct ieee80211_key_conf *key = tx_info->control.hw_key; unsigned int overhead = 0; if (!rt2x00_has_cap_hw_crypto(rt2x00dev) || !key) return overhead; /* * Extend frame length to include IV/EIV/ICV/MMIC, * note that these lengths should only be added when * mac80211 does not generate it. */ overhead += key->icv_len; if (!(key->flags & IEEE80211_KEY_FLAG_GENERATE_IV)) overhead += key->iv_len; if (!(key->flags & IEEE80211_KEY_FLAG_GENERATE_MMIC)) { if (key->cipher == WLAN_CIPHER_SUITE_TKIP) overhead += 8; } return overhead; } void rt2x00crypto_tx_copy_iv(struct sk_buff *skb, struct txentry_desc *txdesc) { struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); if (unlikely(!txdesc->iv_len)) return; /* Copy IV/EIV data */ memcpy(skbdesc->iv, skb->data + txdesc->iv_offset, txdesc->iv_len); } void rt2x00crypto_tx_remove_iv(struct sk_buff *skb, struct txentry_desc *txdesc) { struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); if (unlikely(!txdesc->iv_len)) return; /* Copy IV/EIV data */ memcpy(skbdesc->iv, skb->data + txdesc->iv_offset, txdesc->iv_len); /* Move ieee80211 header */ memmove(skb->data + txdesc->iv_len, skb->data, txdesc->iv_offset); /* Pull buffer to correct size */ skb_pull(skb, txdesc->iv_len); txdesc->length -= txdesc->iv_len; /* IV/EIV data has officially been stripped */ skbdesc->flags |= SKBDESC_IV_STRIPPED; } void rt2x00crypto_tx_insert_iv(struct sk_buff *skb, unsigned int header_length) { struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); const unsigned int iv_len = ((!!(skbdesc->iv[0])) * 4) + ((!!(skbdesc->iv[1])) * 4); if (!(skbdesc->flags & SKBDESC_IV_STRIPPED)) return; skb_push(skb, iv_len); /* Move ieee80211 header */ memmove(skb->data, skb->data + iv_len, header_length); /* Copy IV/EIV data */ memcpy(skb->data + header_length, skbdesc->iv, iv_len); /* IV/EIV data has returned into the frame */ skbdesc->flags &= ~SKBDESC_IV_STRIPPED; } void rt2x00crypto_rx_insert_iv(struct sk_buff *skb, unsigned int header_length, struct rxdone_entry_desc *rxdesc) { unsigned int payload_len = rxdesc->size - header_length; unsigned int align = ALIGN_SIZE(skb, header_length); unsigned int iv_len; unsigned int icv_len; unsigned int transfer = 0; /* * WEP64/WEP128: Provides IV & ICV * TKIP: Provides IV/EIV & ICV * AES: Provies IV/EIV & ICV */ switch (rxdesc->cipher) { case CIPHER_WEP64: case CIPHER_WEP128: iv_len = 4; icv_len = 4; break; case CIPHER_TKIP: iv_len = 8; icv_len = 4; break; case CIPHER_AES: iv_len = 8; icv_len = 8; break; default: /* Unsupport type */ return; } /* * Make room for new data. There are 2 possibilities * either the alignment is already present between * the 802.11 header and payload. In that case we * have to move the header less than the iv_len * since we can use the already available l2pad bytes * for the iv data. * When the alignment must be added manually we must * move the header more then iv_len since we must * make room for the payload move as well. */ if (rxdesc->dev_flags & RXDONE_L2PAD) { skb_push(skb, iv_len - align); skb_put(skb, icv_len); /* Move ieee80211 header */ memmove(skb->data + transfer, skb->data + transfer + (iv_len - align), header_length); transfer += header_length; } else { skb_push(skb, iv_len + align); if (align < icv_len) skb_put(skb, icv_len - align); else if (align > icv_len) skb_trim(skb, rxdesc->size + iv_len + icv_len); /* Move ieee80211 header */ memmove(skb->data + transfer, skb->data + transfer + iv_len + align, header_length); transfer += header_length; } /* Copy IV/EIV data */ memcpy(skb->data + transfer, rxdesc->iv, iv_len); transfer += iv_len; /* * Move payload for alignment purposes. Note that * this is only needed when no l2 padding is present. */ if (!(rxdesc->dev_flags & RXDONE_L2PAD)) { memmove(skb->data + transfer, skb->data + transfer + align, payload_len); } /* * NOTE: Always count the payload as transferred, * even when alignment was set to zero. This is required * for determining the correct offset for the ICV data. */ transfer += payload_len; /* * Copy ICV data * AES appends 8 bytes, we can't fill the upper * 4 bytes, but mac80211 doesn't care about what * we provide here anyway and strips it immediately. */ memcpy(skb->data + transfer, &rxdesc->icv, 4); transfer += icv_len; /* IV/EIV/ICV has been inserted into frame */ rxdesc->size = transfer; rxdesc->flags &= ~RX_FLAG_IV_STRIPPED; }
linux-master
drivers/net/wireless/ralink/rt2x00/rt2x00crypto.c
// SPDX-License-Identifier: GPL-2.0-or-later /* Copyright (C) 2004 - 2009 Ivo van Doorn <[email protected]> <http://rt2x00.serialmonkey.com> */ /* Module: rt2x00pci Abstract: rt2x00 generic pci device routines. */ #include <linux/dma-mapping.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/slab.h> #include "rt2x00.h" #include "rt2x00pci.h" /* * PCI driver handlers. */ static void rt2x00pci_free_reg(struct rt2x00_dev *rt2x00dev) { kfree(rt2x00dev->rf); rt2x00dev->rf = NULL; kfree(rt2x00dev->eeprom); rt2x00dev->eeprom = NULL; if (rt2x00dev->csr.base) { iounmap(rt2x00dev->csr.base); rt2x00dev->csr.base = NULL; } } static int rt2x00pci_alloc_reg(struct rt2x00_dev *rt2x00dev) { struct pci_dev *pci_dev = to_pci_dev(rt2x00dev->dev); rt2x00dev->csr.base = pci_ioremap_bar(pci_dev, 0); if (!rt2x00dev->csr.base) goto exit; rt2x00dev->eeprom = kzalloc(rt2x00dev->ops->eeprom_size, GFP_KERNEL); if (!rt2x00dev->eeprom) goto exit; rt2x00dev->rf = kzalloc(rt2x00dev->ops->rf_size, GFP_KERNEL); if (!rt2x00dev->rf) goto exit; return 0; exit: rt2x00_probe_err("Failed to allocate registers\n"); rt2x00pci_free_reg(rt2x00dev); return -ENOMEM; } int rt2x00pci_probe(struct pci_dev *pci_dev, const struct rt2x00_ops *ops) { struct ieee80211_hw *hw; struct rt2x00_dev *rt2x00dev; int retval; u16 chip; retval = pci_enable_device(pci_dev); if (retval) { rt2x00_probe_err("Enable device failed\n"); return retval; } retval = pci_request_regions(pci_dev, pci_name(pci_dev)); if (retval) { rt2x00_probe_err("PCI request regions failed\n"); goto exit_disable_device; } pci_set_master(pci_dev); if (pci_set_mwi(pci_dev)) rt2x00_probe_err("MWI not available\n"); if (dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32))) { rt2x00_probe_err("PCI DMA not supported\n"); retval = -EIO; goto exit_release_regions; } hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw); if (!hw) { rt2x00_probe_err("Failed to allocate hardware\n"); retval = -ENOMEM; goto exit_release_regions; } pci_set_drvdata(pci_dev, hw); rt2x00dev = hw->priv; rt2x00dev->dev = &pci_dev->dev; rt2x00dev->ops = ops; rt2x00dev->hw = hw; rt2x00dev->irq = pci_dev->irq; rt2x00dev->name = ops->name; if (pci_is_pcie(pci_dev)) rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_PCIE); else rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_PCI); retval = rt2x00pci_alloc_reg(rt2x00dev); if (retval) goto exit_free_device; /* * Because rt3290 chip use different efuse offset to read efuse data. * So before read efuse it need to indicate it is the * rt3290 or not. */ pci_read_config_word(pci_dev, PCI_DEVICE_ID, &chip); rt2x00dev->chip.rt = chip; retval = rt2x00lib_probe_dev(rt2x00dev); if (retval) goto exit_free_reg; return 0; exit_free_reg: rt2x00pci_free_reg(rt2x00dev); exit_free_device: ieee80211_free_hw(hw); exit_release_regions: pci_clear_mwi(pci_dev); pci_release_regions(pci_dev); exit_disable_device: pci_disable_device(pci_dev); return retval; } EXPORT_SYMBOL_GPL(rt2x00pci_probe); void rt2x00pci_remove(struct pci_dev *pci_dev) { struct ieee80211_hw *hw = pci_get_drvdata(pci_dev); struct rt2x00_dev *rt2x00dev = hw->priv; /* * Free all allocated data. */ rt2x00lib_remove_dev(rt2x00dev); rt2x00pci_free_reg(rt2x00dev); ieee80211_free_hw(hw); /* * Free the PCI device data. */ pci_clear_mwi(pci_dev); pci_disable_device(pci_dev); pci_release_regions(pci_dev); } EXPORT_SYMBOL_GPL(rt2x00pci_remove); static int __maybe_unused rt2x00pci_suspend(struct device *dev) { struct ieee80211_hw *hw = dev_get_drvdata(dev); struct rt2x00_dev *rt2x00dev = hw->priv; return rt2x00lib_suspend(rt2x00dev); } static int __maybe_unused rt2x00pci_resume(struct device *dev) { struct ieee80211_hw *hw = dev_get_drvdata(dev); struct rt2x00_dev *rt2x00dev = hw->priv; return rt2x00lib_resume(rt2x00dev); } SIMPLE_DEV_PM_OPS(rt2x00pci_pm_ops, rt2x00pci_suspend, rt2x00pci_resume); EXPORT_SYMBOL_GPL(rt2x00pci_pm_ops); /* * rt2x00pci module information. */ MODULE_AUTHOR(DRV_PROJECT); MODULE_VERSION(DRV_VERSION); MODULE_DESCRIPTION("rt2x00 pci library"); MODULE_LICENSE("GPL");
linux-master
drivers/net/wireless/ralink/rt2x00/rt2x00pci.c
// SPDX-License-Identifier: GPL-2.0-or-later /* Copyright (C) 2004 - 2009 Ivo van Doorn <[email protected]> <http://rt2x00.serialmonkey.com> */ /* Module: rt2x00mac Abstract: rt2x00 generic mac80211 routines. */ #include <linux/kernel.h> #include <linux/module.h> #include "rt2x00.h" #include "rt2x00lib.h" static int rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev, struct data_queue *queue, struct sk_buff *frag_skb) { struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(frag_skb); struct ieee80211_tx_info *rts_info; struct sk_buff *skb; unsigned int data_length; int retval = 0; if (tx_info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) data_length = sizeof(struct ieee80211_cts); else data_length = sizeof(struct ieee80211_rts); skb = dev_alloc_skb(data_length + rt2x00dev->hw->extra_tx_headroom); if (unlikely(!skb)) { rt2x00_warn(rt2x00dev, "Failed to create RTS/CTS frame\n"); return -ENOMEM; } skb_reserve(skb, rt2x00dev->hw->extra_tx_headroom); skb_put(skb, data_length); /* * Copy TX information over from original frame to * RTS/CTS frame. Note that we set the no encryption flag * since we don't want this frame to be encrypted. * RTS frames should be acked, while CTS-to-self frames * should not. The ready for TX flag is cleared to prevent * it being automatically send when the descriptor is * written to the hardware. */ memcpy(skb->cb, frag_skb->cb, sizeof(skb->cb)); rts_info = IEEE80211_SKB_CB(skb); rts_info->control.rates[0].flags &= ~IEEE80211_TX_RC_USE_RTS_CTS; rts_info->control.rates[0].flags &= ~IEEE80211_TX_RC_USE_CTS_PROTECT; if (tx_info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) rts_info->flags |= IEEE80211_TX_CTL_NO_ACK; else rts_info->flags &= ~IEEE80211_TX_CTL_NO_ACK; /* Disable hardware encryption */ rts_info->control.hw_key = NULL; /* * RTS/CTS frame should use the length of the frame plus any * encryption overhead that will be added by the hardware. */ data_length += rt2x00crypto_tx_overhead(rt2x00dev, skb); if (tx_info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) ieee80211_ctstoself_get(rt2x00dev->hw, tx_info->control.vif, frag_skb->data, data_length, tx_info, (struct ieee80211_cts *)(skb->data)); else ieee80211_rts_get(rt2x00dev->hw, tx_info->control.vif, frag_skb->data, data_length, tx_info, (struct ieee80211_rts *)(skb->data)); retval = rt2x00queue_write_tx_frame(queue, skb, NULL, true); if (retval) { dev_kfree_skb_any(skb); rt2x00_warn(rt2x00dev, "Failed to send RTS/CTS frame\n"); } return retval; } void rt2x00mac_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, struct sk_buff *skb) { struct rt2x00_dev *rt2x00dev = hw->priv; struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); enum data_queue_qid qid = skb_get_queue_mapping(skb); struct data_queue *queue = NULL; /* * Mac80211 might be calling this function while we are trying * to remove the device or perhaps suspending it. * Note that we can only stop the TX queues inside the TX path * due to possible race conditions in mac80211. */ if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) goto exit_free_skb; /* * Use the ATIM queue if appropriate and present. */ if (tx_info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM && rt2x00_has_cap_flag(rt2x00dev, REQUIRE_ATIM_QUEUE)) qid = QID_ATIM; queue = rt2x00queue_get_tx_queue(rt2x00dev, qid); if (unlikely(!queue)) { rt2x00_err(rt2x00dev, "Attempt to send packet over invalid queue %d\n" "Please file bug report to %s\n", qid, DRV_PROJECT); goto exit_free_skb; } /* * If CTS/RTS is required. create and queue that frame first. * Make sure we have at least enough entries available to send * this CTS/RTS frame as well as the data frame. * Note that when the driver has set the set_rts_threshold() * callback function it doesn't need software generation of * either RTS or CTS-to-self frame and handles everything * inside the hardware. */ if (!rt2x00dev->ops->hw->set_rts_threshold && (tx_info->control.rates[0].flags & (IEEE80211_TX_RC_USE_RTS_CTS | IEEE80211_TX_RC_USE_CTS_PROTECT))) { if (rt2x00queue_available(queue) <= 1) { /* * Recheck for full queue under lock to avoid race * conditions with rt2x00lib_txdone(). */ spin_lock(&queue->tx_lock); if (rt2x00queue_threshold(queue)) rt2x00queue_pause_queue(queue); spin_unlock(&queue->tx_lock); goto exit_free_skb; } if (rt2x00mac_tx_rts_cts(rt2x00dev, queue, skb)) goto exit_free_skb; } if (unlikely(rt2x00queue_write_tx_frame(queue, skb, control->sta, false))) goto exit_free_skb; return; exit_free_skb: ieee80211_free_txskb(hw, skb); } EXPORT_SYMBOL_GPL(rt2x00mac_tx); int rt2x00mac_start(struct ieee80211_hw *hw) { struct rt2x00_dev *rt2x00dev = hw->priv; if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) return 0; if (test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags)) { /* * This is special case for ieee80211_restart_hw(), otherwise * mac80211 never call start() two times in row without stop(); */ set_bit(DEVICE_STATE_RESET, &rt2x00dev->flags); rt2x00dev->ops->lib->pre_reset_hw(rt2x00dev); rt2x00lib_stop(rt2x00dev); } return rt2x00lib_start(rt2x00dev); } EXPORT_SYMBOL_GPL(rt2x00mac_start); void rt2x00mac_stop(struct ieee80211_hw *hw) { struct rt2x00_dev *rt2x00dev = hw->priv; if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) return; rt2x00lib_stop(rt2x00dev); } EXPORT_SYMBOL_GPL(rt2x00mac_stop); void rt2x00mac_reconfig_complete(struct ieee80211_hw *hw, enum ieee80211_reconfig_type reconfig_type) { struct rt2x00_dev *rt2x00dev = hw->priv; if (reconfig_type == IEEE80211_RECONFIG_TYPE_RESTART) clear_bit(DEVICE_STATE_RESET, &rt2x00dev->flags); } EXPORT_SYMBOL_GPL(rt2x00mac_reconfig_complete); int rt2x00mac_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct rt2x00_dev *rt2x00dev = hw->priv; struct rt2x00_intf *intf = vif_to_intf(vif); struct data_queue *queue = rt2x00dev->bcn; struct queue_entry *entry = NULL; unsigned int i; /* * Don't allow interfaces to be added * the device has disappeared. */ if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) || !test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags)) return -ENODEV; /* * Loop through all beacon queues to find a free * entry. Since there are as much beacon entries * as the maximum interfaces, this search shouldn't * fail. */ for (i = 0; i < queue->limit; i++) { entry = &queue->entries[i]; if (!test_and_set_bit(ENTRY_BCN_ASSIGNED, &entry->flags)) break; } if (unlikely(i == queue->limit)) return -ENOBUFS; /* * We are now absolutely sure the interface can be created, * increase interface count and start initialization. */ if (vif->type == NL80211_IFTYPE_AP) rt2x00dev->intf_ap_count++; else rt2x00dev->intf_sta_count++; mutex_init(&intf->beacon_skb_mutex); intf->beacon = entry; /* * The MAC address must be configured after the device * has been initialized. Otherwise the device can reset * the MAC registers. * The BSSID address must only be configured in AP mode, * however we should not send an empty BSSID address for * STA interfaces at this time, since this can cause * invalid behavior in the device. */ rt2x00lib_config_intf(rt2x00dev, intf, vif->type, vif->addr, NULL); /* * Some filters depend on the current working mode. We can force * an update during the next configure_filter() run by mac80211 by * resetting the current packet_filter state. */ rt2x00dev->packet_filter = 0; return 0; } EXPORT_SYMBOL_GPL(rt2x00mac_add_interface); void rt2x00mac_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct rt2x00_dev *rt2x00dev = hw->priv; struct rt2x00_intf *intf = vif_to_intf(vif); /* * Don't allow interfaces to be remove while * either the device has disappeared or when * no interface is present. */ if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) || (vif->type == NL80211_IFTYPE_AP && !rt2x00dev->intf_ap_count) || (vif->type != NL80211_IFTYPE_AP && !rt2x00dev->intf_sta_count)) return; if (vif->type == NL80211_IFTYPE_AP) rt2x00dev->intf_ap_count--; else rt2x00dev->intf_sta_count--; /* * Release beacon entry so it is available for * new interfaces again. */ clear_bit(ENTRY_BCN_ASSIGNED, &intf->beacon->flags); /* * Make sure the bssid and mac address registers * are cleared to prevent false ACKing of frames. */ rt2x00lib_config_intf(rt2x00dev, intf, NL80211_IFTYPE_UNSPECIFIED, NULL, NULL); } EXPORT_SYMBOL_GPL(rt2x00mac_remove_interface); int rt2x00mac_config(struct ieee80211_hw *hw, u32 changed) { struct rt2x00_dev *rt2x00dev = hw->priv; struct ieee80211_conf *conf = &hw->conf; /* * mac80211 might be calling this function while we are trying * to remove the device or perhaps suspending it. */ if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) return 0; /* * Some configuration parameters (e.g. channel and antenna values) can * only be set when the radio is enabled, but do require the RX to * be off. During this period we should keep link tuning enabled, * if for any reason the link tuner must be reset, this will be * handled by rt2x00lib_config(). */ rt2x00queue_stop_queue(rt2x00dev->rx); /* Do not race with link tuner. */ mutex_lock(&rt2x00dev->conf_mutex); /* * When we've just turned on the radio, we want to reprogram * everything to ensure a consistent state */ rt2x00lib_config(rt2x00dev, conf, changed); /* * After the radio has been enabled we need to configure * the antenna to the default settings. rt2x00lib_config_antenna() * should determine if any action should be taken based on * checking if diversity has been enabled or no antenna changes * have been made since the last configuration change. */ rt2x00lib_config_antenna(rt2x00dev, rt2x00dev->default_ant); mutex_unlock(&rt2x00dev->conf_mutex); /* Turn RX back on */ rt2x00queue_start_queue(rt2x00dev->rx); return 0; } EXPORT_SYMBOL_GPL(rt2x00mac_config); void rt2x00mac_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, unsigned int *total_flags, u64 multicast) { struct rt2x00_dev *rt2x00dev = hw->priv; /* * Mask off any flags we are going to ignore * from the total_flags field. */ *total_flags &= FIF_ALLMULTI | FIF_FCSFAIL | FIF_PLCPFAIL | FIF_CONTROL | FIF_PSPOLL | FIF_OTHER_BSS; /* * Apply some rules to the filters: * - Some filters imply different filters to be set. * - Some things we can't filter out at all. * - Multicast filter seems to kill broadcast traffic so never use it. */ *total_flags |= FIF_ALLMULTI; /* * If the device has a single filter for all control frames, * FIF_CONTROL and FIF_PSPOLL flags imply each other. * And if the device has more than one filter for control frames * of different types, but has no a separate filter for PS Poll frames, * FIF_CONTROL flag implies FIF_PSPOLL. */ if (!rt2x00_has_cap_control_filters(rt2x00dev)) { if (*total_flags & FIF_CONTROL || *total_flags & FIF_PSPOLL) *total_flags |= FIF_CONTROL | FIF_PSPOLL; } if (!rt2x00_has_cap_control_filter_pspoll(rt2x00dev)) { if (*total_flags & FIF_CONTROL) *total_flags |= FIF_PSPOLL; } rt2x00dev->packet_filter = *total_flags; rt2x00dev->ops->lib->config_filter(rt2x00dev, *total_flags); } EXPORT_SYMBOL_GPL(rt2x00mac_configure_filter); static void rt2x00mac_set_tim_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { struct rt2x00_intf *intf = vif_to_intf(vif); if (vif->type != NL80211_IFTYPE_AP && vif->type != NL80211_IFTYPE_ADHOC && vif->type != NL80211_IFTYPE_MESH_POINT) return; set_bit(DELAYED_UPDATE_BEACON, &intf->delayed_flags); } int rt2x00mac_set_tim(struct ieee80211_hw *hw, struct ieee80211_sta *sta, bool set) { struct rt2x00_dev *rt2x00dev = hw->priv; if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) return 0; ieee80211_iterate_active_interfaces_atomic( rt2x00dev->hw, IEEE80211_IFACE_ITER_RESUME_ALL, rt2x00mac_set_tim_iter, rt2x00dev); /* queue work to upodate the beacon template */ ieee80211_queue_work(rt2x00dev->hw, &rt2x00dev->intf_work); return 0; } EXPORT_SYMBOL_GPL(rt2x00mac_set_tim); #ifdef CONFIG_RT2X00_LIB_CRYPTO static void memcpy_tkip(struct rt2x00lib_crypto *crypto, u8 *key, u8 key_len) { if (key_len > NL80211_TKIP_DATA_OFFSET_ENCR_KEY) memcpy(crypto->key, &key[NL80211_TKIP_DATA_OFFSET_ENCR_KEY], sizeof(crypto->key)); if (key_len > NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY) memcpy(crypto->tx_mic, &key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY], sizeof(crypto->tx_mic)); if (key_len > NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY) memcpy(crypto->rx_mic, &key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY], sizeof(crypto->rx_mic)); } int rt2x00mac_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, struct ieee80211_vif *vif, struct ieee80211_sta *sta, struct ieee80211_key_conf *key) { struct rt2x00_dev *rt2x00dev = hw->priv; int (*set_key) (struct rt2x00_dev *rt2x00dev, struct rt2x00lib_crypto *crypto, struct ieee80211_key_conf *key); struct rt2x00lib_crypto crypto; static const u8 bcast_addr[ETH_ALEN] = { 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, }; struct rt2x00_sta *sta_priv = NULL; if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) return 0; /* The hardware can't do MFP */ if (!rt2x00_has_cap_hw_crypto(rt2x00dev) || (sta && sta->mfp)) return -EOPNOTSUPP; /* * To support IBSS RSN, don't program group keys in IBSS, the * hardware will then not attempt to decrypt the frames. */ if (vif->type == NL80211_IFTYPE_ADHOC && !(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) return -EOPNOTSUPP; if (key->keylen > 32) return -ENOSPC; memset(&crypto, 0, sizeof(crypto)); crypto.bssidx = rt2x00lib_get_bssidx(rt2x00dev, vif); crypto.cipher = rt2x00crypto_key_to_cipher(key); if (crypto.cipher == CIPHER_NONE) return -EOPNOTSUPP; if (crypto.cipher == CIPHER_TKIP && rt2x00_is_usb(rt2x00dev)) return -EOPNOTSUPP; crypto.cmd = cmd; if (sta) { crypto.address = sta->addr; sta_priv = sta_to_rt2x00_sta(sta); crypto.wcid = sta_priv->wcid; } else crypto.address = bcast_addr; if (crypto.cipher == CIPHER_TKIP) memcpy_tkip(&crypto, &key->key[0], key->keylen); else memcpy(crypto.key, &key->key[0], key->keylen); /* * Each BSS has a maximum of 4 shared keys. * Shared key index values: * 0) BSS0 key0 * 1) BSS0 key1 * ... * 4) BSS1 key0 * ... * 8) BSS2 key0 * ... * Both pairwise as shared key indeces are determined by * driver. This is required because the hardware requires * keys to be assigned in correct order (When key 1 is * provided but key 0 is not, then the key is not found * by the hardware during RX). */ if (cmd == SET_KEY) key->hw_key_idx = 0; if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) set_key = rt2x00dev->ops->lib->config_pairwise_key; else set_key = rt2x00dev->ops->lib->config_shared_key; if (!set_key) return -EOPNOTSUPP; return set_key(rt2x00dev, &crypto, key); } EXPORT_SYMBOL_GPL(rt2x00mac_set_key); #endif /* CONFIG_RT2X00_LIB_CRYPTO */ void rt2x00mac_sw_scan_start(struct ieee80211_hw *hw, struct ieee80211_vif *vif, const u8 *mac_addr) { struct rt2x00_dev *rt2x00dev = hw->priv; set_bit(DEVICE_STATE_SCANNING, &rt2x00dev->flags); rt2x00link_stop_tuner(rt2x00dev); } EXPORT_SYMBOL_GPL(rt2x00mac_sw_scan_start); void rt2x00mac_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct rt2x00_dev *rt2x00dev = hw->priv; clear_bit(DEVICE_STATE_SCANNING, &rt2x00dev->flags); rt2x00link_start_tuner(rt2x00dev); } EXPORT_SYMBOL_GPL(rt2x00mac_sw_scan_complete); int rt2x00mac_get_stats(struct ieee80211_hw *hw, struct ieee80211_low_level_stats *stats) { struct rt2x00_dev *rt2x00dev = hw->priv; /* * The dot11ACKFailureCount, dot11RTSFailureCount and * dot11RTSSuccessCount are updated in interrupt time. * dot11FCSErrorCount is updated in the link tuner. */ memcpy(stats, &rt2x00dev->low_level_stats, sizeof(*stats)); return 0; } EXPORT_SYMBOL_GPL(rt2x00mac_get_stats); void rt2x00mac_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *bss_conf, u64 changes) { struct rt2x00_dev *rt2x00dev = hw->priv; struct rt2x00_intf *intf = vif_to_intf(vif); /* * mac80211 might be calling this function while we are trying * to remove the device or perhaps suspending it. */ if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) return; /* * Update the BSSID. */ if (changes & BSS_CHANGED_BSSID) rt2x00lib_config_intf(rt2x00dev, intf, vif->type, NULL, bss_conf->bssid); /* * Start/stop beaconing. */ if (changes & BSS_CHANGED_BEACON_ENABLED) { mutex_lock(&intf->beacon_skb_mutex); if (!bss_conf->enable_beacon && intf->enable_beacon) { rt2x00dev->intf_beaconing--; intf->enable_beacon = false; if (rt2x00dev->intf_beaconing == 0) { /* * Last beaconing interface disabled * -> stop beacon queue. */ rt2x00queue_stop_queue(rt2x00dev->bcn); } /* * Clear beacon in the H/W for this vif. This is needed * to disable beaconing on this particular interface * and keep it running on other interfaces. */ rt2x00queue_clear_beacon(rt2x00dev, vif); } else if (bss_conf->enable_beacon && !intf->enable_beacon) { rt2x00dev->intf_beaconing++; intf->enable_beacon = true; /* * Upload beacon to the H/W. This is only required on * USB devices. PCI devices fetch beacons periodically. */ if (rt2x00_is_usb(rt2x00dev)) rt2x00queue_update_beacon(rt2x00dev, vif); if (rt2x00dev->intf_beaconing == 1) { /* * First beaconing interface enabled * -> start beacon queue. */ rt2x00queue_start_queue(rt2x00dev->bcn); } } mutex_unlock(&intf->beacon_skb_mutex); } /* * When the association status has changed we must reset the link * tuner counter. This is because some drivers determine if they * should perform link tuning based on the number of seconds * while associated or not associated. */ if (changes & BSS_CHANGED_ASSOC) { rt2x00dev->link.count = 0; if (vif->cfg.assoc) rt2x00dev->intf_associated++; else rt2x00dev->intf_associated--; rt2x00leds_led_assoc(rt2x00dev, !!rt2x00dev->intf_associated); } /* * When the erp information has changed, we should perform * additional configuration steps. For all other changes we are done. */ if (changes & (BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_ERP_PREAMBLE | BSS_CHANGED_ERP_SLOT | BSS_CHANGED_BASIC_RATES | BSS_CHANGED_BEACON_INT | BSS_CHANGED_HT)) rt2x00lib_config_erp(rt2x00dev, intf, bss_conf, changes); } EXPORT_SYMBOL_GPL(rt2x00mac_bss_info_changed); int rt2x00mac_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, unsigned int link_id, u16 queue_idx, const struct ieee80211_tx_queue_params *params) { struct rt2x00_dev *rt2x00dev = hw->priv; struct data_queue *queue; queue = rt2x00queue_get_tx_queue(rt2x00dev, queue_idx); if (unlikely(!queue)) return -EINVAL; /* * The passed variables are stored as real value ((2^n)-1). * Ralink registers require to know the bit number 'n'. */ if (params->cw_min > 0) queue->cw_min = fls(params->cw_min); else queue->cw_min = 5; /* cw_min: 2^5 = 32. */ if (params->cw_max > 0) queue->cw_max = fls(params->cw_max); else queue->cw_max = 10; /* cw_min: 2^10 = 1024. */ queue->aifs = params->aifs; queue->txop = params->txop; rt2x00_dbg(rt2x00dev, "Configured TX queue %d - CWmin: %d, CWmax: %d, Aifs: %d, TXop: %d\n", queue_idx, queue->cw_min, queue->cw_max, queue->aifs, queue->txop); return 0; } EXPORT_SYMBOL_GPL(rt2x00mac_conf_tx); void rt2x00mac_rfkill_poll(struct ieee80211_hw *hw) { struct rt2x00_dev *rt2x00dev = hw->priv; bool active = !!rt2x00dev->ops->lib->rfkill_poll(rt2x00dev); wiphy_rfkill_set_hw_state(hw->wiphy, !active); } EXPORT_SYMBOL_GPL(rt2x00mac_rfkill_poll); void rt2x00mac_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u32 queues, bool drop) { struct rt2x00_dev *rt2x00dev = hw->priv; struct data_queue *queue; if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) return; set_bit(DEVICE_STATE_FLUSHING, &rt2x00dev->flags); tx_queue_for_each(rt2x00dev, queue) rt2x00queue_flush_queue(queue, drop); clear_bit(DEVICE_STATE_FLUSHING, &rt2x00dev->flags); } EXPORT_SYMBOL_GPL(rt2x00mac_flush); int rt2x00mac_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant) { struct rt2x00_dev *rt2x00dev = hw->priv; struct link_ant *ant = &rt2x00dev->link.ant; struct antenna_setup *def = &rt2x00dev->default_ant; struct antenna_setup setup; // The antenna value is not supposed to be 0, // or exceed the maximum number of antenna's. if (!tx_ant || (tx_ant & ~3) || !rx_ant || (rx_ant & ~3)) return -EINVAL; // When the client tried to configure the antenna to or from // diversity mode, we must reset the default antenna as well // as that controls the diversity switch. if (ant->flags & ANTENNA_TX_DIVERSITY && tx_ant != 3) ant->flags &= ~ANTENNA_TX_DIVERSITY; if (ant->flags & ANTENNA_RX_DIVERSITY && rx_ant != 3) ant->flags &= ~ANTENNA_RX_DIVERSITY; // If diversity is being enabled, check if we need hardware // or software diversity. In the latter case, reset the value, // and make sure we update the antenna flags to have the // link tuner pick up the diversity tuning. if (tx_ant == 3 && def->tx == ANTENNA_SW_DIVERSITY) { tx_ant = ANTENNA_SW_DIVERSITY; ant->flags |= ANTENNA_TX_DIVERSITY; } if (rx_ant == 3 && def->rx == ANTENNA_SW_DIVERSITY) { rx_ant = ANTENNA_SW_DIVERSITY; ant->flags |= ANTENNA_RX_DIVERSITY; } setup.tx = tx_ant; setup.rx = rx_ant; setup.rx_chain_num = 0; setup.tx_chain_num = 0; rt2x00lib_config_antenna(rt2x00dev, setup); return 0; } EXPORT_SYMBOL_GPL(rt2x00mac_set_antenna); int rt2x00mac_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant) { struct rt2x00_dev *rt2x00dev = hw->priv; struct link_ant *ant = &rt2x00dev->link.ant; struct antenna_setup *active = &rt2x00dev->link.ant.active; // When software diversity is active, we must report this to the // client and not the current active antenna state. if (ant->flags & ANTENNA_TX_DIVERSITY) *tx_ant = ANTENNA_HW_DIVERSITY; else *tx_ant = active->tx; if (ant->flags & ANTENNA_RX_DIVERSITY) *rx_ant = ANTENNA_HW_DIVERSITY; else *rx_ant = active->rx; return 0; } EXPORT_SYMBOL_GPL(rt2x00mac_get_antenna); void rt2x00mac_get_ringparam(struct ieee80211_hw *hw, u32 *tx, u32 *tx_max, u32 *rx, u32 *rx_max) { struct rt2x00_dev *rt2x00dev = hw->priv; struct data_queue *queue; tx_queue_for_each(rt2x00dev, queue) { *tx += queue->length; *tx_max += queue->limit; } *rx = rt2x00dev->rx->length; *rx_max = rt2x00dev->rx->limit; } EXPORT_SYMBOL_GPL(rt2x00mac_get_ringparam); bool rt2x00mac_tx_frames_pending(struct ieee80211_hw *hw) { struct rt2x00_dev *rt2x00dev = hw->priv; struct data_queue *queue; tx_queue_for_each(rt2x00dev, queue) { if (!rt2x00queue_empty(queue)) return true; } return false; } EXPORT_SYMBOL_GPL(rt2x00mac_tx_frames_pending);
linux-master
drivers/net/wireless/ralink/rt2x00/rt2x00mac.c
// SPDX-License-Identifier: GPL-2.0-or-later /* Copyright (C) 2010 Willow Garage <http://www.willowgarage.com> Copyright (C) 2004 - 2010 Ivo van Doorn <[email protected]> <http://rt2x00.serialmonkey.com> */ /* Module: rt2x00usb Abstract: rt2x00 generic usb device routines. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/usb.h> #include <linux/bug.h> #include "rt2x00.h" #include "rt2x00usb.h" static bool rt2x00usb_check_usb_error(struct rt2x00_dev *rt2x00dev, int status) { if (status == -ENODEV || status == -ENOENT) return true; if (!test_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags)) return false; if (status == -EPROTO || status == -ETIMEDOUT) rt2x00dev->num_proto_errs++; else rt2x00dev->num_proto_errs = 0; if (rt2x00dev->num_proto_errs > 3) return true; return false; } /* * Interfacing with the HW. */ int rt2x00usb_vendor_request(struct rt2x00_dev *rt2x00dev, const u8 request, const u8 requesttype, const u16 offset, const u16 value, void *buffer, const u16 buffer_length, const int timeout) { struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev); int status; unsigned int pipe = (requesttype == USB_VENDOR_REQUEST_IN) ? usb_rcvctrlpipe(usb_dev, 0) : usb_sndctrlpipe(usb_dev, 0); unsigned long expire = jiffies + msecs_to_jiffies(timeout); if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) return -ENODEV; do { status = usb_control_msg(usb_dev, pipe, request, requesttype, value, offset, buffer, buffer_length, timeout / 2); if (status >= 0) return 0; if (rt2x00usb_check_usb_error(rt2x00dev, status)) { /* Device has disappeared. */ clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags); break; } } while (time_before(jiffies, expire)); rt2x00_err(rt2x00dev, "Vendor Request 0x%02x failed for offset 0x%04x with error %d\n", request, offset, status); return status; } EXPORT_SYMBOL_GPL(rt2x00usb_vendor_request); int rt2x00usb_vendor_req_buff_lock(struct rt2x00_dev *rt2x00dev, const u8 request, const u8 requesttype, const u16 offset, void *buffer, const u16 buffer_length, const int timeout) { int status; BUG_ON(!mutex_is_locked(&rt2x00dev->csr_mutex)); /* * Check for Cache availability. */ if (unlikely(!rt2x00dev->csr.cache || buffer_length > CSR_CACHE_SIZE)) { rt2x00_err(rt2x00dev, "CSR cache not available\n"); return -ENOMEM; } if (requesttype == USB_VENDOR_REQUEST_OUT) memcpy(rt2x00dev->csr.cache, buffer, buffer_length); status = rt2x00usb_vendor_request(rt2x00dev, request, requesttype, offset, 0, rt2x00dev->csr.cache, buffer_length, timeout); if (!status && requesttype == USB_VENDOR_REQUEST_IN) memcpy(buffer, rt2x00dev->csr.cache, buffer_length); return status; } EXPORT_SYMBOL_GPL(rt2x00usb_vendor_req_buff_lock); int rt2x00usb_vendor_request_buff(struct rt2x00_dev *rt2x00dev, const u8 request, const u8 requesttype, const u16 offset, void *buffer, const u16 buffer_length) { int status = 0; u8 *tb; u16 off, len, bsize; mutex_lock(&rt2x00dev->csr_mutex); tb = (u8 *)buffer; off = offset; len = buffer_length; while (len && !status) { bsize = min_t(u16, CSR_CACHE_SIZE, len); status = rt2x00usb_vendor_req_buff_lock(rt2x00dev, request, requesttype, off, tb, bsize, REGISTER_TIMEOUT); tb += bsize; len -= bsize; off += bsize; } mutex_unlock(&rt2x00dev->csr_mutex); return status; } EXPORT_SYMBOL_GPL(rt2x00usb_vendor_request_buff); int rt2x00usb_regbusy_read(struct rt2x00_dev *rt2x00dev, const unsigned int offset, const struct rt2x00_field32 field, u32 *reg) { unsigned int i; if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) return -ENODEV; for (i = 0; i < REGISTER_USB_BUSY_COUNT; i++) { *reg = rt2x00usb_register_read_lock(rt2x00dev, offset); if (!rt2x00_get_field32(*reg, field)) return 1; udelay(REGISTER_BUSY_DELAY); } rt2x00_err(rt2x00dev, "Indirect register access failed: offset=0x%.08x, value=0x%.08x\n", offset, *reg); *reg = ~0; return 0; } EXPORT_SYMBOL_GPL(rt2x00usb_regbusy_read); struct rt2x00_async_read_data { __le32 reg; struct usb_ctrlrequest cr; struct rt2x00_dev *rt2x00dev; bool (*callback)(struct rt2x00_dev *, int, u32); }; static void rt2x00usb_register_read_async_cb(struct urb *urb) { struct rt2x00_async_read_data *rd = urb->context; if (rd->callback(rd->rt2x00dev, urb->status, le32_to_cpu(rd->reg))) { usb_anchor_urb(urb, rd->rt2x00dev->anchor); if (usb_submit_urb(urb, GFP_ATOMIC) < 0) { usb_unanchor_urb(urb); kfree(rd); } } else kfree(rd); } void rt2x00usb_register_read_async(struct rt2x00_dev *rt2x00dev, const unsigned int offset, bool (*callback)(struct rt2x00_dev*, int, u32)) { struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev); struct urb *urb; struct rt2x00_async_read_data *rd; rd = kmalloc(sizeof(*rd), GFP_ATOMIC); if (!rd) return; urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) { kfree(rd); return; } rd->rt2x00dev = rt2x00dev; rd->callback = callback; rd->cr.bRequestType = USB_VENDOR_REQUEST_IN; rd->cr.bRequest = USB_MULTI_READ; rd->cr.wValue = 0; rd->cr.wIndex = cpu_to_le16(offset); rd->cr.wLength = cpu_to_le16(sizeof(u32)); usb_fill_control_urb(urb, usb_dev, usb_rcvctrlpipe(usb_dev, 0), (u8 *)(&rd->cr), &rd->reg, sizeof(rd->reg), rt2x00usb_register_read_async_cb, rd); usb_anchor_urb(urb, rt2x00dev->anchor); if (usb_submit_urb(urb, GFP_ATOMIC) < 0) { usb_unanchor_urb(urb); kfree(rd); } usb_free_urb(urb); } EXPORT_SYMBOL_GPL(rt2x00usb_register_read_async); /* * TX data handlers. */ static void rt2x00usb_work_txdone_entry(struct queue_entry *entry) { /* * If the transfer to hardware succeeded, it does not mean the * frame was send out correctly. It only means the frame * was successfully pushed to the hardware, we have no * way to determine the transmission status right now. * (Only indirectly by looking at the failed TX counters * in the register). */ if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags)) rt2x00lib_txdone_noinfo(entry, TXDONE_FAILURE); else rt2x00lib_txdone_noinfo(entry, TXDONE_UNKNOWN); } static void rt2x00usb_work_txdone(struct work_struct *work) { struct rt2x00_dev *rt2x00dev = container_of(work, struct rt2x00_dev, txdone_work); struct data_queue *queue; struct queue_entry *entry; tx_queue_for_each(rt2x00dev, queue) { while (!rt2x00queue_empty(queue)) { entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE); if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags) || !test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags)) break; rt2x00usb_work_txdone_entry(entry); } } } static void rt2x00usb_interrupt_txdone(struct urb *urb) { struct queue_entry *entry = (struct queue_entry *)urb->context; struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; if (!test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) return; /* * Check if the frame was correctly uploaded */ if (urb->status) set_bit(ENTRY_DATA_IO_FAILED, &entry->flags); /* * Report the frame as DMA done */ rt2x00lib_dmadone(entry); if (rt2x00dev->ops->lib->tx_dma_done) rt2x00dev->ops->lib->tx_dma_done(entry); /* * Schedule the delayed work for reading the TX status * from the device. */ if (!rt2x00_has_cap_flag(rt2x00dev, REQUIRE_TXSTATUS_FIFO) || !kfifo_is_empty(&rt2x00dev->txstatus_fifo)) queue_work(rt2x00dev->workqueue, &rt2x00dev->txdone_work); } static bool rt2x00usb_kick_tx_entry(struct queue_entry *entry, void *data) { struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev); struct queue_entry_priv_usb *entry_priv = entry->priv_data; u32 length; int status; if (!test_and_clear_bit(ENTRY_DATA_PENDING, &entry->flags) || test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags)) return false; /* * USB devices require certain padding at the end of each frame * and urb. Those paddings are not included in skbs. Pass entry * to the driver to determine what the overall length should be. */ length = rt2x00dev->ops->lib->get_tx_data_len(entry); status = skb_padto(entry->skb, length); if (unlikely(status)) { /* TODO: report something more appropriate than IO_FAILED. */ rt2x00_warn(rt2x00dev, "TX SKB padding error, out of memory\n"); set_bit(ENTRY_DATA_IO_FAILED, &entry->flags); rt2x00lib_dmadone(entry); return false; } usb_fill_bulk_urb(entry_priv->urb, usb_dev, usb_sndbulkpipe(usb_dev, entry->queue->usb_endpoint), entry->skb->data, length, rt2x00usb_interrupt_txdone, entry); status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC); if (status) { if (rt2x00usb_check_usb_error(rt2x00dev, status)) clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags); set_bit(ENTRY_DATA_IO_FAILED, &entry->flags); rt2x00lib_dmadone(entry); } return false; } /* * RX data handlers. */ static void rt2x00usb_work_rxdone(struct work_struct *work) { struct rt2x00_dev *rt2x00dev = container_of(work, struct rt2x00_dev, rxdone_work); struct queue_entry *entry; struct skb_frame_desc *skbdesc; u8 rxd[32]; while (!rt2x00queue_empty(rt2x00dev->rx)) { entry = rt2x00queue_get_entry(rt2x00dev->rx, Q_INDEX_DONE); if (test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) break; /* * Fill in desc fields of the skb descriptor */ skbdesc = get_skb_frame_desc(entry->skb); skbdesc->desc = rxd; skbdesc->desc_len = entry->queue->desc_size; /* * Send the frame to rt2x00lib for further processing. */ rt2x00lib_rxdone(entry, GFP_KERNEL); } } static void rt2x00usb_interrupt_rxdone(struct urb *urb) { struct queue_entry *entry = (struct queue_entry *)urb->context; struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; if (!test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) return; /* * Check if the received data is simply too small * to be actually valid, or if the urb is signaling * a problem. */ if (urb->actual_length < entry->queue->desc_size || urb->status) set_bit(ENTRY_DATA_IO_FAILED, &entry->flags); /* * Report the frame as DMA done */ rt2x00lib_dmadone(entry); /* * Schedule the delayed work for processing RX data */ queue_work(rt2x00dev->workqueue, &rt2x00dev->rxdone_work); } static bool rt2x00usb_kick_rx_entry(struct queue_entry *entry, void *data) { struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev); struct queue_entry_priv_usb *entry_priv = entry->priv_data; int status; if (test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) return false; rt2x00lib_dmastart(entry); usb_fill_bulk_urb(entry_priv->urb, usb_dev, usb_rcvbulkpipe(usb_dev, entry->queue->usb_endpoint), entry->skb->data, entry->skb->len, rt2x00usb_interrupt_rxdone, entry); status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC); if (status) { if (rt2x00usb_check_usb_error(rt2x00dev, status)) clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags); set_bit(ENTRY_DATA_IO_FAILED, &entry->flags); rt2x00lib_dmadone(entry); } return false; } void rt2x00usb_kick_queue(struct data_queue *queue) { switch (queue->qid) { case QID_AC_VO: case QID_AC_VI: case QID_AC_BE: case QID_AC_BK: if (!rt2x00queue_empty(queue)) rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, Q_INDEX, NULL, rt2x00usb_kick_tx_entry); break; case QID_RX: if (!rt2x00queue_full(queue)) rt2x00queue_for_each_entry(queue, Q_INDEX, Q_INDEX_DONE, NULL, rt2x00usb_kick_rx_entry); break; default: break; } } EXPORT_SYMBOL_GPL(rt2x00usb_kick_queue); static bool rt2x00usb_flush_entry(struct queue_entry *entry, void *data) { struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; struct queue_entry_priv_usb *entry_priv = entry->priv_data; struct queue_entry_priv_usb_bcn *bcn_priv = entry->priv_data; if (!test_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags)) return false; usb_kill_urb(entry_priv->urb); /* * Kill guardian urb (if required by driver). */ if ((entry->queue->qid == QID_BEACON) && (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_BEACON_GUARD))) usb_kill_urb(bcn_priv->guardian_urb); return false; } void rt2x00usb_flush_queue(struct data_queue *queue, bool drop) { struct work_struct *completion; unsigned int i; if (drop) rt2x00queue_for_each_entry(queue, Q_INDEX_DONE, Q_INDEX, NULL, rt2x00usb_flush_entry); /* * Obtain the queue completion handler */ switch (queue->qid) { case QID_AC_VO: case QID_AC_VI: case QID_AC_BE: case QID_AC_BK: completion = &queue->rt2x00dev->txdone_work; break; case QID_RX: completion = &queue->rt2x00dev->rxdone_work; break; default: return; } for (i = 0; i < 10; i++) { /* * Check if the driver is already done, otherwise we * have to sleep a little while to give the driver/hw * the oppurtunity to complete interrupt process itself. */ if (rt2x00queue_empty(queue)) break; /* * Schedule the completion handler manually, when this * worker function runs, it should cleanup the queue. */ queue_work(queue->rt2x00dev->workqueue, completion); /* * Wait for a little while to give the driver * the oppurtunity to recover itself. */ msleep(50); } } EXPORT_SYMBOL_GPL(rt2x00usb_flush_queue); static void rt2x00usb_watchdog_tx_dma(struct data_queue *queue) { rt2x00_warn(queue->rt2x00dev, "TX queue %d DMA timed out, invoke forced reset\n", queue->qid); rt2x00queue_stop_queue(queue); rt2x00queue_flush_queue(queue, true); rt2x00queue_start_queue(queue); } static int rt2x00usb_dma_timeout(struct data_queue *queue) { struct queue_entry *entry; entry = rt2x00queue_get_entry(queue, Q_INDEX_DMA_DONE); return rt2x00queue_dma_timeout(entry); } void rt2x00usb_watchdog(struct rt2x00_dev *rt2x00dev) { struct data_queue *queue; tx_queue_for_each(rt2x00dev, queue) { if (!rt2x00queue_empty(queue)) { if (rt2x00usb_dma_timeout(queue)) rt2x00usb_watchdog_tx_dma(queue); } } } EXPORT_SYMBOL_GPL(rt2x00usb_watchdog); /* * Radio handlers */ void rt2x00usb_disable_radio(struct rt2x00_dev *rt2x00dev) { rt2x00usb_vendor_request_sw(rt2x00dev, USB_RX_CONTROL, 0, 0, REGISTER_TIMEOUT); } EXPORT_SYMBOL_GPL(rt2x00usb_disable_radio); /* * Device initialization handlers. */ void rt2x00usb_clear_entry(struct queue_entry *entry) { entry->flags = 0; if (entry->queue->qid == QID_RX) rt2x00usb_kick_rx_entry(entry, NULL); } EXPORT_SYMBOL_GPL(rt2x00usb_clear_entry); static void rt2x00usb_assign_endpoint(struct data_queue *queue, struct usb_endpoint_descriptor *ep_desc) { struct usb_device *usb_dev = to_usb_device_intf(queue->rt2x00dev->dev); int pipe; queue->usb_endpoint = usb_endpoint_num(ep_desc); if (queue->qid == QID_RX) { pipe = usb_rcvbulkpipe(usb_dev, queue->usb_endpoint); queue->usb_maxpacket = usb_maxpacket(usb_dev, pipe); } else { pipe = usb_sndbulkpipe(usb_dev, queue->usb_endpoint); queue->usb_maxpacket = usb_maxpacket(usb_dev, pipe); } if (!queue->usb_maxpacket) queue->usb_maxpacket = 1; } static int rt2x00usb_find_endpoints(struct rt2x00_dev *rt2x00dev) { struct usb_interface *intf = to_usb_interface(rt2x00dev->dev); struct usb_host_interface *intf_desc = intf->cur_altsetting; struct usb_endpoint_descriptor *ep_desc; struct data_queue *queue = rt2x00dev->tx; struct usb_endpoint_descriptor *tx_ep_desc = NULL; unsigned int i; /* * Walk through all available endpoints to search for "bulk in" * and "bulk out" endpoints. When we find such endpoints collect * the information we need from the descriptor and assign it * to the queue. */ for (i = 0; i < intf_desc->desc.bNumEndpoints; i++) { ep_desc = &intf_desc->endpoint[i].desc; if (usb_endpoint_is_bulk_in(ep_desc)) { rt2x00usb_assign_endpoint(rt2x00dev->rx, ep_desc); } else if (usb_endpoint_is_bulk_out(ep_desc) && (queue != queue_end(rt2x00dev))) { rt2x00usb_assign_endpoint(queue, ep_desc); queue = queue_next(queue); tx_ep_desc = ep_desc; } } /* * At least 1 endpoint for RX and 1 endpoint for TX must be available. */ if (!rt2x00dev->rx->usb_endpoint || !rt2x00dev->tx->usb_endpoint) { rt2x00_err(rt2x00dev, "Bulk-in/Bulk-out endpoints not found\n"); return -EPIPE; } /* * It might be possible not all queues have a dedicated endpoint. * Loop through all TX queues and copy the endpoint information * which we have gathered from already assigned endpoints. */ txall_queue_for_each(rt2x00dev, queue) { if (!queue->usb_endpoint) rt2x00usb_assign_endpoint(queue, tx_ep_desc); } return 0; } static int rt2x00usb_alloc_entries(struct data_queue *queue) { struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; struct queue_entry_priv_usb *entry_priv; struct queue_entry_priv_usb_bcn *bcn_priv; unsigned int i; for (i = 0; i < queue->limit; i++) { entry_priv = queue->entries[i].priv_data; entry_priv->urb = usb_alloc_urb(0, GFP_KERNEL); if (!entry_priv->urb) return -ENOMEM; } /* * If this is not the beacon queue or * no guardian byte was required for the beacon, * then we are done. */ if (queue->qid != QID_BEACON || !rt2x00_has_cap_flag(rt2x00dev, REQUIRE_BEACON_GUARD)) return 0; for (i = 0; i < queue->limit; i++) { bcn_priv = queue->entries[i].priv_data; bcn_priv->guardian_urb = usb_alloc_urb(0, GFP_KERNEL); if (!bcn_priv->guardian_urb) return -ENOMEM; } return 0; } static void rt2x00usb_free_entries(struct data_queue *queue) { struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; struct queue_entry_priv_usb *entry_priv; struct queue_entry_priv_usb_bcn *bcn_priv; unsigned int i; if (!queue->entries) return; for (i = 0; i < queue->limit; i++) { entry_priv = queue->entries[i].priv_data; usb_kill_urb(entry_priv->urb); usb_free_urb(entry_priv->urb); } /* * If this is not the beacon queue or * no guardian byte was required for the beacon, * then we are done. */ if (queue->qid != QID_BEACON || !rt2x00_has_cap_flag(rt2x00dev, REQUIRE_BEACON_GUARD)) return; for (i = 0; i < queue->limit; i++) { bcn_priv = queue->entries[i].priv_data; usb_kill_urb(bcn_priv->guardian_urb); usb_free_urb(bcn_priv->guardian_urb); } } int rt2x00usb_initialize(struct rt2x00_dev *rt2x00dev) { struct data_queue *queue; int status; /* * Find endpoints for each queue */ status = rt2x00usb_find_endpoints(rt2x00dev); if (status) goto exit; /* * Allocate DMA */ queue_for_each(rt2x00dev, queue) { status = rt2x00usb_alloc_entries(queue); if (status) goto exit; } return 0; exit: rt2x00usb_uninitialize(rt2x00dev); return status; } EXPORT_SYMBOL_GPL(rt2x00usb_initialize); void rt2x00usb_uninitialize(struct rt2x00_dev *rt2x00dev) { struct data_queue *queue; usb_kill_anchored_urbs(rt2x00dev->anchor); hrtimer_cancel(&rt2x00dev->txstatus_timer); cancel_work_sync(&rt2x00dev->rxdone_work); cancel_work_sync(&rt2x00dev->txdone_work); queue_for_each(rt2x00dev, queue) rt2x00usb_free_entries(queue); } EXPORT_SYMBOL_GPL(rt2x00usb_uninitialize); /* * USB driver handlers. */ static void rt2x00usb_free_reg(struct rt2x00_dev *rt2x00dev) { kfree(rt2x00dev->rf); rt2x00dev->rf = NULL; kfree(rt2x00dev->eeprom); rt2x00dev->eeprom = NULL; kfree(rt2x00dev->csr.cache); rt2x00dev->csr.cache = NULL; } static int rt2x00usb_alloc_reg(struct rt2x00_dev *rt2x00dev) { rt2x00dev->csr.cache = kzalloc(CSR_CACHE_SIZE, GFP_KERNEL); if (!rt2x00dev->csr.cache) goto exit; rt2x00dev->eeprom = kzalloc(rt2x00dev->ops->eeprom_size, GFP_KERNEL); if (!rt2x00dev->eeprom) goto exit; rt2x00dev->rf = kzalloc(rt2x00dev->ops->rf_size, GFP_KERNEL); if (!rt2x00dev->rf) goto exit; return 0; exit: rt2x00_probe_err("Failed to allocate registers\n"); rt2x00usb_free_reg(rt2x00dev); return -ENOMEM; } int rt2x00usb_probe(struct usb_interface *usb_intf, const struct rt2x00_ops *ops) { struct usb_device *usb_dev = interface_to_usbdev(usb_intf); struct ieee80211_hw *hw; struct rt2x00_dev *rt2x00dev; int retval; usb_dev = usb_get_dev(usb_dev); usb_reset_device(usb_dev); hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw); if (!hw) { rt2x00_probe_err("Failed to allocate hardware\n"); retval = -ENOMEM; goto exit_put_device; } usb_set_intfdata(usb_intf, hw); rt2x00dev = hw->priv; rt2x00dev->dev = &usb_intf->dev; rt2x00dev->ops = ops; rt2x00dev->hw = hw; rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_USB); INIT_WORK(&rt2x00dev->rxdone_work, rt2x00usb_work_rxdone); INIT_WORK(&rt2x00dev->txdone_work, rt2x00usb_work_txdone); hrtimer_init(&rt2x00dev->txstatus_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); retval = rt2x00usb_alloc_reg(rt2x00dev); if (retval) goto exit_free_device; rt2x00dev->anchor = devm_kmalloc(&usb_dev->dev, sizeof(struct usb_anchor), GFP_KERNEL); if (!rt2x00dev->anchor) { retval = -ENOMEM; goto exit_free_reg; } init_usb_anchor(rt2x00dev->anchor); retval = rt2x00lib_probe_dev(rt2x00dev); if (retval) goto exit_free_anchor; return 0; exit_free_anchor: usb_kill_anchored_urbs(rt2x00dev->anchor); exit_free_reg: rt2x00usb_free_reg(rt2x00dev); exit_free_device: ieee80211_free_hw(hw); exit_put_device: usb_put_dev(usb_dev); usb_set_intfdata(usb_intf, NULL); return retval; } EXPORT_SYMBOL_GPL(rt2x00usb_probe); void rt2x00usb_disconnect(struct usb_interface *usb_intf) { struct ieee80211_hw *hw = usb_get_intfdata(usb_intf); struct rt2x00_dev *rt2x00dev = hw->priv; /* * Free all allocated data. */ rt2x00lib_remove_dev(rt2x00dev); rt2x00usb_free_reg(rt2x00dev); ieee80211_free_hw(hw); /* * Free the USB device data. */ usb_set_intfdata(usb_intf, NULL); usb_put_dev(interface_to_usbdev(usb_intf)); } EXPORT_SYMBOL_GPL(rt2x00usb_disconnect); #ifdef CONFIG_PM int rt2x00usb_suspend(struct usb_interface *usb_intf, pm_message_t state) { struct ieee80211_hw *hw = usb_get_intfdata(usb_intf); struct rt2x00_dev *rt2x00dev = hw->priv; return rt2x00lib_suspend(rt2x00dev); } EXPORT_SYMBOL_GPL(rt2x00usb_suspend); int rt2x00usb_resume(struct usb_interface *usb_intf) { struct ieee80211_hw *hw = usb_get_intfdata(usb_intf); struct rt2x00_dev *rt2x00dev = hw->priv; return rt2x00lib_resume(rt2x00dev); } EXPORT_SYMBOL_GPL(rt2x00usb_resume); #endif /* CONFIG_PM */ /* * rt2x00usb module information. */ MODULE_AUTHOR(DRV_PROJECT); MODULE_VERSION(DRV_VERSION); MODULE_DESCRIPTION("rt2x00 usb library"); MODULE_LICENSE("GPL");
linux-master
drivers/net/wireless/ralink/rt2x00/rt2x00usb.c
// SPDX-License-Identifier: GPL-2.0-or-later /* Copyright (C) 2004 - 2009 Ivo van Doorn <[email protected]> Copyright (C) 2004 - 2009 Gertjan van Wingerde <[email protected]> <http://rt2x00.serialmonkey.com> */ /* Module: rt2x00lib Abstract: rt2x00 firmware loading routines. */ #include <linux/kernel.h> #include <linux/module.h> #include "rt2x00.h" #include "rt2x00lib.h" static int rt2x00lib_request_firmware(struct rt2x00_dev *rt2x00dev) { struct device *device = wiphy_dev(rt2x00dev->hw->wiphy); const struct firmware *fw; char *fw_name; int retval; /* * Read correct firmware from harddisk. */ fw_name = rt2x00dev->ops->lib->get_firmware_name(rt2x00dev); if (!fw_name) { rt2x00_err(rt2x00dev, "Invalid firmware filename\n" "Please file bug report to %s\n", DRV_PROJECT); return -EINVAL; } rt2x00_info(rt2x00dev, "Loading firmware file '%s'\n", fw_name); retval = request_firmware(&fw, fw_name, device); if (retval) { rt2x00_err(rt2x00dev, "Failed to request Firmware\n"); return retval; } if (!fw || !fw->size || !fw->data) { rt2x00_err(rt2x00dev, "Failed to read Firmware\n"); release_firmware(fw); return -ENOENT; } rt2x00_info(rt2x00dev, "Firmware detected - version: %d.%d\n", fw->data[fw->size - 4], fw->data[fw->size - 3]); snprintf(rt2x00dev->hw->wiphy->fw_version, sizeof(rt2x00dev->hw->wiphy->fw_version), "%d.%d", fw->data[fw->size - 4], fw->data[fw->size - 3]); retval = rt2x00dev->ops->lib->check_firmware(rt2x00dev, fw->data, fw->size); switch (retval) { case FW_OK: break; case FW_BAD_CRC: rt2x00_err(rt2x00dev, "Firmware checksum error\n"); goto exit; case FW_BAD_LENGTH: rt2x00_err(rt2x00dev, "Invalid firmware file length (len=%zu)\n", fw->size); goto exit; case FW_BAD_VERSION: rt2x00_err(rt2x00dev, "Current firmware does not support detected chipset\n"); goto exit; } rt2x00dev->fw = fw; return 0; exit: release_firmware(fw); return -ENOENT; } int rt2x00lib_load_firmware(struct rt2x00_dev *rt2x00dev) { int retval; if (!rt2x00_has_cap_flag(rt2x00dev, REQUIRE_FIRMWARE)) return 0; if (!rt2x00dev->fw) { retval = rt2x00lib_request_firmware(rt2x00dev); if (retval) return retval; } /* * Send firmware to the device. */ retval = rt2x00dev->ops->lib->load_firmware(rt2x00dev, rt2x00dev->fw->data, rt2x00dev->fw->size); /* * When the firmware is uploaded to the hardware the LED * association status might have been triggered, for correct * LED handling it should now be reset. */ rt2x00leds_led_assoc(rt2x00dev, false); return retval; } void rt2x00lib_free_firmware(struct rt2x00_dev *rt2x00dev) { release_firmware(rt2x00dev->fw); rt2x00dev->fw = NULL; }
linux-master
drivers/net/wireless/ralink/rt2x00/rt2x00firmware.c
// SPDX-License-Identifier: GPL-2.0-or-later /* Copyright (C) 2004 - 2009 Ivo van Doorn <[email protected]> <http://rt2x00.serialmonkey.com> */ /* Module: rt2x00lib Abstract: rt2x00 debugfs specific routines. */ #include <linux/debugfs.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/poll.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/uaccess.h> #include "rt2x00.h" #include "rt2x00lib.h" #include "rt2x00dump.h" #define MAX_LINE_LENGTH 64 struct rt2x00debug_crypto { unsigned long success; unsigned long icv_error; unsigned long mic_error; unsigned long key_error; }; struct rt2x00debug_intf { /* * Pointer to driver structure where * this debugfs entry belongs to. */ struct rt2x00_dev *rt2x00dev; /* * Reference to the rt2x00debug structure * which can be used to communicate with * the registers. */ const struct rt2x00debug *debug; /* * Debugfs entries for: * - driver folder * - driver file * - chipset file * - device state flags file * - device capability flags file * - hardware restart file * - register folder * - csr offset/value files * - eeprom offset/value files * - bbp offset/value files * - rf offset/value files * - rfcsr offset/value files * - queue folder * - frame dump file * - queue stats file * - crypto stats file */ struct dentry *driver_folder; /* * The frame dump file only allows a single reader, * so we need to store the current state here. */ unsigned long frame_dump_flags; #define FRAME_DUMP_FILE_OPEN 1 /* * We queue each frame before dumping it to the user, * per read command we will pass a single skb structure * so we should be prepared to queue multiple sk buffers * before sending it to userspace. */ struct sk_buff_head frame_dump_skbqueue; wait_queue_head_t frame_dump_waitqueue; /* * HW crypto statistics. * All statistics are stored separately per cipher type. */ struct rt2x00debug_crypto crypto_stats[CIPHER_MAX]; /* * Driver and chipset files will use a data buffer * that has been created in advance. This will simplify * the code since we can use the debugfs functions. */ struct debugfs_blob_wrapper driver_blob; struct debugfs_blob_wrapper chipset_blob; /* * Requested offset for each register type. */ unsigned int offset_csr; unsigned int offset_eeprom; unsigned int offset_bbp; unsigned int offset_rf; unsigned int offset_rfcsr; }; void rt2x00debug_update_crypto(struct rt2x00_dev *rt2x00dev, struct rxdone_entry_desc *rxdesc) { struct rt2x00debug_intf *intf = rt2x00dev->debugfs_intf; enum cipher cipher = rxdesc->cipher; enum rx_crypto status = rxdesc->cipher_status; if (cipher == CIPHER_TKIP_NO_MIC) cipher = CIPHER_TKIP; if (cipher == CIPHER_NONE || cipher >= CIPHER_MAX) return; /* Remove CIPHER_NONE index */ cipher--; intf->crypto_stats[cipher].success += (status == RX_CRYPTO_SUCCESS); intf->crypto_stats[cipher].icv_error += (status == RX_CRYPTO_FAIL_ICV); intf->crypto_stats[cipher].mic_error += (status == RX_CRYPTO_FAIL_MIC); intf->crypto_stats[cipher].key_error += (status == RX_CRYPTO_FAIL_KEY); } void rt2x00debug_dump_frame(struct rt2x00_dev *rt2x00dev, enum rt2x00_dump_type type, struct queue_entry *entry) { struct rt2x00debug_intf *intf = rt2x00dev->debugfs_intf; struct sk_buff *skb = entry->skb; struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb); struct sk_buff *skbcopy; struct rt2x00dump_hdr *dump_hdr; struct timespec64 timestamp; u32 data_len; if (likely(!test_bit(FRAME_DUMP_FILE_OPEN, &intf->frame_dump_flags))) return; ktime_get_ts64(&timestamp); if (skb_queue_len(&intf->frame_dump_skbqueue) > 20) { rt2x00_dbg(rt2x00dev, "txrx dump queue length exceeded\n"); return; } data_len = skb->len; if (skbdesc->flags & SKBDESC_DESC_IN_SKB) data_len -= skbdesc->desc_len; skbcopy = alloc_skb(sizeof(*dump_hdr) + skbdesc->desc_len + data_len, GFP_ATOMIC); if (!skbcopy) { rt2x00_dbg(rt2x00dev, "Failed to copy skb for dump\n"); return; } dump_hdr = skb_put(skbcopy, sizeof(*dump_hdr)); dump_hdr->version = cpu_to_le32(DUMP_HEADER_VERSION); dump_hdr->header_length = cpu_to_le32(sizeof(*dump_hdr)); dump_hdr->desc_length = cpu_to_le32(skbdesc->desc_len); dump_hdr->data_length = cpu_to_le32(data_len); dump_hdr->chip_rt = cpu_to_le16(rt2x00dev->chip.rt); dump_hdr->chip_rf = cpu_to_le16(rt2x00dev->chip.rf); dump_hdr->chip_rev = cpu_to_le16(rt2x00dev->chip.rev); dump_hdr->type = cpu_to_le16(type); dump_hdr->queue_index = entry->queue->qid; dump_hdr->entry_index = entry->entry_idx; dump_hdr->timestamp_sec = cpu_to_le32(timestamp.tv_sec); dump_hdr->timestamp_usec = cpu_to_le32(timestamp.tv_nsec / NSEC_PER_USEC); if (!(skbdesc->flags & SKBDESC_DESC_IN_SKB)) skb_put_data(skbcopy, skbdesc->desc, skbdesc->desc_len); skb_put_data(skbcopy, skb->data, skb->len); skb_queue_tail(&intf->frame_dump_skbqueue, skbcopy); wake_up_interruptible(&intf->frame_dump_waitqueue); /* * Verify that the file has not been closed while we were working. */ if (!test_bit(FRAME_DUMP_FILE_OPEN, &intf->frame_dump_flags)) skb_queue_purge(&intf->frame_dump_skbqueue); } EXPORT_SYMBOL_GPL(rt2x00debug_dump_frame); static int rt2x00debug_file_open(struct inode *inode, struct file *file) { struct rt2x00debug_intf *intf = inode->i_private; file->private_data = inode->i_private; if (!try_module_get(intf->debug->owner)) return -EBUSY; return 0; } static int rt2x00debug_file_release(struct inode *inode, struct file *file) { struct rt2x00debug_intf *intf = file->private_data; module_put(intf->debug->owner); return 0; } static int rt2x00debug_open_queue_dump(struct inode *inode, struct file *file) { struct rt2x00debug_intf *intf = inode->i_private; int retval; retval = rt2x00debug_file_open(inode, file); if (retval) return retval; if (test_and_set_bit(FRAME_DUMP_FILE_OPEN, &intf->frame_dump_flags)) { rt2x00debug_file_release(inode, file); return -EBUSY; } return 0; } static int rt2x00debug_release_queue_dump(struct inode *inode, struct file *file) { struct rt2x00debug_intf *intf = inode->i_private; skb_queue_purge(&intf->frame_dump_skbqueue); clear_bit(FRAME_DUMP_FILE_OPEN, &intf->frame_dump_flags); return rt2x00debug_file_release(inode, file); } static ssize_t rt2x00debug_read_queue_dump(struct file *file, char __user *buf, size_t length, loff_t *offset) { struct rt2x00debug_intf *intf = file->private_data; struct sk_buff *skb; size_t status; int retval; if (file->f_flags & O_NONBLOCK) return -EAGAIN; retval = wait_event_interruptible(intf->frame_dump_waitqueue, (skb = skb_dequeue(&intf->frame_dump_skbqueue))); if (retval) return retval; status = min_t(size_t, skb->len, length); if (copy_to_user(buf, skb->data, status)) { status = -EFAULT; goto exit; } *offset += status; exit: kfree_skb(skb); return status; } static __poll_t rt2x00debug_poll_queue_dump(struct file *file, poll_table *wait) { struct rt2x00debug_intf *intf = file->private_data; poll_wait(file, &intf->frame_dump_waitqueue, wait); if (!skb_queue_empty(&intf->frame_dump_skbqueue)) return EPOLLOUT | EPOLLWRNORM; return 0; } static const struct file_operations rt2x00debug_fop_queue_dump = { .owner = THIS_MODULE, .read = rt2x00debug_read_queue_dump, .poll = rt2x00debug_poll_queue_dump, .open = rt2x00debug_open_queue_dump, .release = rt2x00debug_release_queue_dump, .llseek = default_llseek, }; static ssize_t rt2x00debug_read_queue_stats(struct file *file, char __user *buf, size_t length, loff_t *offset) { struct rt2x00debug_intf *intf = file->private_data; struct data_queue *queue; unsigned long irqflags; unsigned int lines = 1 + intf->rt2x00dev->data_queues; size_t size; char *data; char *temp; if (*offset) return 0; data = kcalloc(lines, MAX_LINE_LENGTH, GFP_KERNEL); if (!data) return -ENOMEM; temp = data + sprintf(data, "qid\tflags\t\tcount\tlimit\tlength\tindex\tdma done\tdone\n"); queue_for_each(intf->rt2x00dev, queue) { spin_lock_irqsave(&queue->index_lock, irqflags); temp += sprintf(temp, "%d\t0x%.8x\t%d\t%d\t%d\t%d\t%d\t\t%d\n", queue->qid, (unsigned int)queue->flags, queue->count, queue->limit, queue->length, queue->index[Q_INDEX], queue->index[Q_INDEX_DMA_DONE], queue->index[Q_INDEX_DONE]); spin_unlock_irqrestore(&queue->index_lock, irqflags); } size = strlen(data); size = min(size, length); if (copy_to_user(buf, data, size)) { kfree(data); return -EFAULT; } kfree(data); *offset += size; return size; } static const struct file_operations rt2x00debug_fop_queue_stats = { .owner = THIS_MODULE, .read = rt2x00debug_read_queue_stats, .open = rt2x00debug_file_open, .release = rt2x00debug_file_release, .llseek = default_llseek, }; #ifdef CONFIG_RT2X00_LIB_CRYPTO static ssize_t rt2x00debug_read_crypto_stats(struct file *file, char __user *buf, size_t length, loff_t *offset) { struct rt2x00debug_intf *intf = file->private_data; static const char * const name[] = { "WEP64", "WEP128", "TKIP", "AES" }; char *data; char *temp; size_t size; unsigned int i; if (*offset) return 0; data = kcalloc(1 + CIPHER_MAX, MAX_LINE_LENGTH, GFP_KERNEL); if (!data) return -ENOMEM; temp = data; temp += sprintf(data, "cipher\tsuccess\ticv err\tmic err\tkey err\n"); for (i = 0; i < CIPHER_MAX; i++) { temp += sprintf(temp, "%s\t%lu\t%lu\t%lu\t%lu\n", name[i], intf->crypto_stats[i].success, intf->crypto_stats[i].icv_error, intf->crypto_stats[i].mic_error, intf->crypto_stats[i].key_error); } size = strlen(data); size = min(size, length); if (copy_to_user(buf, data, size)) { kfree(data); return -EFAULT; } kfree(data); *offset += size; return size; } static const struct file_operations rt2x00debug_fop_crypto_stats = { .owner = THIS_MODULE, .read = rt2x00debug_read_crypto_stats, .open = rt2x00debug_file_open, .release = rt2x00debug_file_release, .llseek = default_llseek, }; #endif #define RT2X00DEBUGFS_OPS_READ(__name, __format, __type) \ static ssize_t rt2x00debug_read_##__name(struct file *file, \ char __user *buf, \ size_t length, \ loff_t *offset) \ { \ struct rt2x00debug_intf *intf = file->private_data; \ const struct rt2x00debug *debug = intf->debug; \ char line[16]; \ size_t size; \ unsigned int index = intf->offset_##__name; \ __type value; \ \ if (*offset) \ return 0; \ \ if (index >= debug->__name.word_count) \ return -EINVAL; \ \ index += (debug->__name.word_base / \ debug->__name.word_size); \ \ if (debug->__name.flags & RT2X00DEBUGFS_OFFSET) \ index *= debug->__name.word_size; \ \ value = debug->__name.read(intf->rt2x00dev, index); \ \ size = sprintf(line, __format, value); \ \ return simple_read_from_buffer(buf, length, offset, line, size); \ } #define RT2X00DEBUGFS_OPS_WRITE(__name, __type) \ static ssize_t rt2x00debug_write_##__name(struct file *file, \ const char __user *buf,\ size_t length, \ loff_t *offset) \ { \ struct rt2x00debug_intf *intf = file->private_data; \ const struct rt2x00debug *debug = intf->debug; \ char line[17]; \ size_t size; \ unsigned int index = intf->offset_##__name; \ __type value; \ \ if (*offset) \ return 0; \ \ if (index >= debug->__name.word_count) \ return -EINVAL; \ \ if (length > sizeof(line)) \ return -EINVAL; \ \ if (copy_from_user(line, buf, length)) \ return -EFAULT; \ line[16] = 0; \ \ size = strlen(line); \ value = simple_strtoul(line, NULL, 0); \ \ index += (debug->__name.word_base / \ debug->__name.word_size); \ \ if (debug->__name.flags & RT2X00DEBUGFS_OFFSET) \ index *= debug->__name.word_size; \ \ debug->__name.write(intf->rt2x00dev, index, value); \ \ *offset += size; \ return size; \ } #define RT2X00DEBUGFS_OPS(__name, __format, __type) \ RT2X00DEBUGFS_OPS_READ(__name, __format, __type); \ RT2X00DEBUGFS_OPS_WRITE(__name, __type); \ \ static const struct file_operations rt2x00debug_fop_##__name = {\ .owner = THIS_MODULE, \ .read = rt2x00debug_read_##__name, \ .write = rt2x00debug_write_##__name, \ .open = rt2x00debug_file_open, \ .release = rt2x00debug_file_release, \ .llseek = generic_file_llseek, \ }; RT2X00DEBUGFS_OPS(csr, "0x%.8x\n", u32); RT2X00DEBUGFS_OPS(eeprom, "0x%.4x\n", u16); RT2X00DEBUGFS_OPS(bbp, "0x%.2x\n", u8); RT2X00DEBUGFS_OPS(rf, "0x%.8x\n", u32); RT2X00DEBUGFS_OPS(rfcsr, "0x%.2x\n", u8); static ssize_t rt2x00debug_read_dev_flags(struct file *file, char __user *buf, size_t length, loff_t *offset) { struct rt2x00debug_intf *intf = file->private_data; char line[16]; size_t size; if (*offset) return 0; size = sprintf(line, "0x%.8x\n", (unsigned int)intf->rt2x00dev->flags); return simple_read_from_buffer(buf, length, offset, line, size); } static const struct file_operations rt2x00debug_fop_dev_flags = { .owner = THIS_MODULE, .read = rt2x00debug_read_dev_flags, .open = rt2x00debug_file_open, .release = rt2x00debug_file_release, .llseek = default_llseek, }; static ssize_t rt2x00debug_read_cap_flags(struct file *file, char __user *buf, size_t length, loff_t *offset) { struct rt2x00debug_intf *intf = file->private_data; char line[16]; size_t size; if (*offset) return 0; size = sprintf(line, "0x%.8x\n", (unsigned int)intf->rt2x00dev->cap_flags); return simple_read_from_buffer(buf, length, offset, line, size); } static const struct file_operations rt2x00debug_fop_cap_flags = { .owner = THIS_MODULE, .read = rt2x00debug_read_cap_flags, .open = rt2x00debug_file_open, .release = rt2x00debug_file_release, .llseek = default_llseek, }; static ssize_t rt2x00debug_write_restart_hw(struct file *file, const char __user *buf, size_t length, loff_t *offset) { struct rt2x00debug_intf *intf = file->private_data; struct rt2x00_dev *rt2x00dev = intf->rt2x00dev; static unsigned long last_reset = INITIAL_JIFFIES; if (!rt2x00_has_cap_restart_hw(rt2x00dev)) return -EOPNOTSUPP; if (time_before(jiffies, last_reset + msecs_to_jiffies(2000))) return -EBUSY; last_reset = jiffies; ieee80211_restart_hw(rt2x00dev->hw); return length; } static const struct file_operations rt2x00debug_restart_hw = { .owner = THIS_MODULE, .write = rt2x00debug_write_restart_hw, .open = simple_open, .llseek = generic_file_llseek, }; static void rt2x00debug_create_file_driver(const char *name, struct rt2x00debug_intf *intf, struct debugfs_blob_wrapper *blob) { char *data; data = kzalloc(3 * MAX_LINE_LENGTH, GFP_KERNEL); if (!data) return; blob->data = data; data += sprintf(data, "driver:\t%s\n", intf->rt2x00dev->ops->name); data += sprintf(data, "version:\t%s\n", DRV_VERSION); blob->size = strlen(blob->data); debugfs_create_blob(name, 0400, intf->driver_folder, blob); } static void rt2x00debug_create_file_chipset(const char *name, struct rt2x00debug_intf *intf, struct debugfs_blob_wrapper *blob) { const struct rt2x00debug *debug = intf->debug; char *data; data = kzalloc(9 * MAX_LINE_LENGTH, GFP_KERNEL); if (!data) return; blob->data = data; data += sprintf(data, "rt chip:\t%04x\n", intf->rt2x00dev->chip.rt); data += sprintf(data, "rf chip:\t%04x\n", intf->rt2x00dev->chip.rf); data += sprintf(data, "revision:\t%04x\n", intf->rt2x00dev->chip.rev); data += sprintf(data, "\n"); data += sprintf(data, "register\tbase\twords\twordsize\n"); #define RT2X00DEBUGFS_SPRINTF_REGISTER(__name) \ { \ if (debug->__name.read) \ data += sprintf(data, __stringify(__name) \ "\t%d\t%d\t%d\n", \ debug->__name.word_base, \ debug->__name.word_count, \ debug->__name.word_size); \ } RT2X00DEBUGFS_SPRINTF_REGISTER(csr); RT2X00DEBUGFS_SPRINTF_REGISTER(eeprom); RT2X00DEBUGFS_SPRINTF_REGISTER(bbp); RT2X00DEBUGFS_SPRINTF_REGISTER(rf); RT2X00DEBUGFS_SPRINTF_REGISTER(rfcsr); #undef RT2X00DEBUGFS_SPRINTF_REGISTER blob->size = strlen(blob->data); debugfs_create_blob(name, 0400, intf->driver_folder, blob); } void rt2x00debug_register(struct rt2x00_dev *rt2x00dev) { const struct rt2x00debug *debug = rt2x00dev->ops->debugfs; struct rt2x00debug_intf *intf; struct dentry *queue_folder; struct dentry *register_folder; intf = kzalloc(sizeof(struct rt2x00debug_intf), GFP_KERNEL); if (!intf) { rt2x00_err(rt2x00dev, "Failed to allocate debug handler\n"); return; } intf->debug = debug; intf->rt2x00dev = rt2x00dev; rt2x00dev->debugfs_intf = intf; intf->driver_folder = debugfs_create_dir(intf->rt2x00dev->ops->name, rt2x00dev->hw->wiphy->debugfsdir); rt2x00debug_create_file_driver("driver", intf, &intf->driver_blob); rt2x00debug_create_file_chipset("chipset", intf, &intf->chipset_blob); debugfs_create_file("dev_flags", 0400, intf->driver_folder, intf, &rt2x00debug_fop_dev_flags); debugfs_create_file("cap_flags", 0400, intf->driver_folder, intf, &rt2x00debug_fop_cap_flags); debugfs_create_file("restart_hw", 0200, intf->driver_folder, intf, &rt2x00debug_restart_hw); register_folder = debugfs_create_dir("register", intf->driver_folder); #define RT2X00DEBUGFS_CREATE_REGISTER_ENTRY(__intf, __name) \ ({ \ if (debug->__name.read) { \ debugfs_create_u32(__stringify(__name) "_offset", 0600, \ register_folder, \ &(__intf)->offset_##__name); \ \ debugfs_create_file(__stringify(__name) "_value", 0600, \ register_folder, (__intf), \ &rt2x00debug_fop_##__name); \ } \ }) RT2X00DEBUGFS_CREATE_REGISTER_ENTRY(intf, csr); RT2X00DEBUGFS_CREATE_REGISTER_ENTRY(intf, eeprom); RT2X00DEBUGFS_CREATE_REGISTER_ENTRY(intf, bbp); RT2X00DEBUGFS_CREATE_REGISTER_ENTRY(intf, rf); RT2X00DEBUGFS_CREATE_REGISTER_ENTRY(intf, rfcsr); #undef RT2X00DEBUGFS_CREATE_REGISTER_ENTRY queue_folder = debugfs_create_dir("queue", intf->driver_folder); debugfs_create_file("dump", 0400, queue_folder, intf, &rt2x00debug_fop_queue_dump); skb_queue_head_init(&intf->frame_dump_skbqueue); init_waitqueue_head(&intf->frame_dump_waitqueue); debugfs_create_file("queue", 0400, queue_folder, intf, &rt2x00debug_fop_queue_stats); #ifdef CONFIG_RT2X00_LIB_CRYPTO if (rt2x00_has_cap_hw_crypto(rt2x00dev)) debugfs_create_file("crypto", 0444, queue_folder, intf, &rt2x00debug_fop_crypto_stats); #endif return; } void rt2x00debug_deregister(struct rt2x00_dev *rt2x00dev) { struct rt2x00debug_intf *intf = rt2x00dev->debugfs_intf; if (unlikely(!intf)) return; skb_queue_purge(&intf->frame_dump_skbqueue); debugfs_remove_recursive(intf->driver_folder); kfree(intf->chipset_blob.data); kfree(intf->driver_blob.data); kfree(intf); rt2x00dev->debugfs_intf = NULL; }
linux-master
drivers/net/wireless/ralink/rt2x00/rt2x00debug.c
// SPDX-License-Identifier: GPL-2.0-or-later /* Copyright (C) 2004 - 2009 Ivo van Doorn <[email protected]> <http://rt2x00.serialmonkey.com> */ /* Module: rt73usb Abstract: rt73usb device specific routines. Supported chipsets: rt2571W & rt2671. */ #include <linux/crc-itu-t.h> #include <linux/delay.h> #include <linux/etherdevice.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/usb.h> #include "rt2x00.h" #include "rt2x00usb.h" #include "rt73usb.h" /* * Allow hardware encryption to be disabled. */ static bool modparam_nohwcrypt; module_param_named(nohwcrypt, modparam_nohwcrypt, bool, 0444); MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); /* * Register access. * All access to the CSR registers will go through the methods * rt2x00usb_register_read and rt2x00usb_register_write. * BBP and RF register require indirect register access, * and use the CSR registers BBPCSR and RFCSR to achieve this. * These indirect registers work with busy bits, * and we will try maximal REGISTER_BUSY_COUNT times to access * the register while taking a REGISTER_BUSY_DELAY us delay * between each attampt. When the busy bit is still set at that time, * the access attempt is considered to have failed, * and we will print an error. * The _lock versions must be used if you already hold the csr_mutex */ #define WAIT_FOR_BBP(__dev, __reg) \ rt2x00usb_regbusy_read((__dev), PHY_CSR3, PHY_CSR3_BUSY, (__reg)) #define WAIT_FOR_RF(__dev, __reg) \ rt2x00usb_regbusy_read((__dev), PHY_CSR4, PHY_CSR4_BUSY, (__reg)) static void rt73usb_bbp_write(struct rt2x00_dev *rt2x00dev, const unsigned int word, const u8 value) { u32 reg; mutex_lock(&rt2x00dev->csr_mutex); /* * Wait until the BBP becomes available, afterwards we * can safely write the new data into the register. */ if (WAIT_FOR_BBP(rt2x00dev, &reg)) { reg = 0; rt2x00_set_field32(&reg, PHY_CSR3_VALUE, value); rt2x00_set_field32(&reg, PHY_CSR3_REGNUM, word); rt2x00_set_field32(&reg, PHY_CSR3_BUSY, 1); rt2x00_set_field32(&reg, PHY_CSR3_READ_CONTROL, 0); rt2x00usb_register_write_lock(rt2x00dev, PHY_CSR3, reg); } mutex_unlock(&rt2x00dev->csr_mutex); } static u8 rt73usb_bbp_read(struct rt2x00_dev *rt2x00dev, const unsigned int word) { u32 reg; u8 value; mutex_lock(&rt2x00dev->csr_mutex); /* * Wait until the BBP becomes available, afterwards we * can safely write the read request into the register. * After the data has been written, we wait until hardware * returns the correct value, if at any time the register * doesn't become available in time, reg will be 0xffffffff * which means we return 0xff to the caller. */ if (WAIT_FOR_BBP(rt2x00dev, &reg)) { reg = 0; rt2x00_set_field32(&reg, PHY_CSR3_REGNUM, word); rt2x00_set_field32(&reg, PHY_CSR3_BUSY, 1); rt2x00_set_field32(&reg, PHY_CSR3_READ_CONTROL, 1); rt2x00usb_register_write_lock(rt2x00dev, PHY_CSR3, reg); WAIT_FOR_BBP(rt2x00dev, &reg); } value = rt2x00_get_field32(reg, PHY_CSR3_VALUE); mutex_unlock(&rt2x00dev->csr_mutex); return value; } static void rt73usb_rf_write(struct rt2x00_dev *rt2x00dev, const unsigned int word, const u32 value) { u32 reg; mutex_lock(&rt2x00dev->csr_mutex); /* * Wait until the RF becomes available, afterwards we * can safely write the new data into the register. */ if (WAIT_FOR_RF(rt2x00dev, &reg)) { reg = 0; rt2x00_set_field32(&reg, PHY_CSR4_VALUE, value); /* * RF5225 and RF2527 contain 21 bits per RF register value, * all others contain 20 bits. */ rt2x00_set_field32(&reg, PHY_CSR4_NUMBER_OF_BITS, 20 + (rt2x00_rf(rt2x00dev, RF5225) || rt2x00_rf(rt2x00dev, RF2527))); rt2x00_set_field32(&reg, PHY_CSR4_IF_SELECT, 0); rt2x00_set_field32(&reg, PHY_CSR4_BUSY, 1); rt2x00usb_register_write_lock(rt2x00dev, PHY_CSR4, reg); rt2x00_rf_write(rt2x00dev, word, value); } mutex_unlock(&rt2x00dev->csr_mutex); } #ifdef CONFIG_RT2X00_LIB_DEBUGFS static const struct rt2x00debug rt73usb_rt2x00debug = { .owner = THIS_MODULE, .csr = { .read = rt2x00usb_register_read, .write = rt2x00usb_register_write, .flags = RT2X00DEBUGFS_OFFSET, .word_base = CSR_REG_BASE, .word_size = sizeof(u32), .word_count = CSR_REG_SIZE / sizeof(u32), }, .eeprom = { .read = rt2x00_eeprom_read, .write = rt2x00_eeprom_write, .word_base = EEPROM_BASE, .word_size = sizeof(u16), .word_count = EEPROM_SIZE / sizeof(u16), }, .bbp = { .read = rt73usb_bbp_read, .write = rt73usb_bbp_write, .word_base = BBP_BASE, .word_size = sizeof(u8), .word_count = BBP_SIZE / sizeof(u8), }, .rf = { .read = rt2x00_rf_read, .write = rt73usb_rf_write, .word_base = RF_BASE, .word_size = sizeof(u32), .word_count = RF_SIZE / sizeof(u32), }, }; #endif /* CONFIG_RT2X00_LIB_DEBUGFS */ static int rt73usb_rfkill_poll(struct rt2x00_dev *rt2x00dev) { u32 reg; reg = rt2x00usb_register_read(rt2x00dev, MAC_CSR13); return rt2x00_get_field32(reg, MAC_CSR13_VAL7); } #ifdef CONFIG_RT2X00_LIB_LEDS static void rt73usb_brightness_set(struct led_classdev *led_cdev, enum led_brightness brightness) { struct rt2x00_led *led = container_of(led_cdev, struct rt2x00_led, led_dev); unsigned int enabled = brightness != LED_OFF; unsigned int a_mode = (enabled && led->rt2x00dev->curr_band == NL80211_BAND_5GHZ); unsigned int bg_mode = (enabled && led->rt2x00dev->curr_band == NL80211_BAND_2GHZ); if (led->type == LED_TYPE_RADIO) { rt2x00_set_field16(&led->rt2x00dev->led_mcu_reg, MCU_LEDCS_RADIO_STATUS, enabled); rt2x00usb_vendor_request_sw(led->rt2x00dev, USB_LED_CONTROL, 0, led->rt2x00dev->led_mcu_reg, REGISTER_TIMEOUT); } else if (led->type == LED_TYPE_ASSOC) { rt2x00_set_field16(&led->rt2x00dev->led_mcu_reg, MCU_LEDCS_LINK_BG_STATUS, bg_mode); rt2x00_set_field16(&led->rt2x00dev->led_mcu_reg, MCU_LEDCS_LINK_A_STATUS, a_mode); rt2x00usb_vendor_request_sw(led->rt2x00dev, USB_LED_CONTROL, 0, led->rt2x00dev->led_mcu_reg, REGISTER_TIMEOUT); } else if (led->type == LED_TYPE_QUALITY) { /* * The brightness is divided into 6 levels (0 - 5), * this means we need to convert the brightness * argument into the matching level within that range. */ rt2x00usb_vendor_request_sw(led->rt2x00dev, USB_LED_CONTROL, brightness / (LED_FULL / 6), led->rt2x00dev->led_mcu_reg, REGISTER_TIMEOUT); } } static int rt73usb_blink_set(struct led_classdev *led_cdev, unsigned long *delay_on, unsigned long *delay_off) { struct rt2x00_led *led = container_of(led_cdev, struct rt2x00_led, led_dev); u32 reg; reg = rt2x00usb_register_read(led->rt2x00dev, MAC_CSR14); rt2x00_set_field32(&reg, MAC_CSR14_ON_PERIOD, *delay_on); rt2x00_set_field32(&reg, MAC_CSR14_OFF_PERIOD, *delay_off); rt2x00usb_register_write(led->rt2x00dev, MAC_CSR14, reg); return 0; } static void rt73usb_init_led(struct rt2x00_dev *rt2x00dev, struct rt2x00_led *led, enum led_type type) { led->rt2x00dev = rt2x00dev; led->type = type; led->led_dev.brightness_set = rt73usb_brightness_set; led->led_dev.blink_set = rt73usb_blink_set; led->flags = LED_INITIALIZED; } #endif /* CONFIG_RT2X00_LIB_LEDS */ /* * Configuration handlers. */ static int rt73usb_config_shared_key(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_crypto *crypto, struct ieee80211_key_conf *key) { struct hw_key_entry key_entry; struct rt2x00_field32 field; u32 mask; u32 reg; if (crypto->cmd == SET_KEY) { /* * rt2x00lib can't determine the correct free * key_idx for shared keys. We have 1 register * with key valid bits. The goal is simple, read * the register, if that is full we have no slots * left. * Note that each BSS is allowed to have up to 4 * shared keys, so put a mask over the allowed * entries. */ mask = (0xf << crypto->bssidx); reg = rt2x00usb_register_read(rt2x00dev, SEC_CSR0); reg &= mask; if (reg && reg == mask) return -ENOSPC; key->hw_key_idx += reg ? ffz(reg) : 0; /* * Upload key to hardware */ memcpy(key_entry.key, crypto->key, sizeof(key_entry.key)); memcpy(key_entry.tx_mic, crypto->tx_mic, sizeof(key_entry.tx_mic)); memcpy(key_entry.rx_mic, crypto->rx_mic, sizeof(key_entry.rx_mic)); reg = SHARED_KEY_ENTRY(key->hw_key_idx); rt2x00usb_register_multiwrite(rt2x00dev, reg, &key_entry, sizeof(key_entry)); /* * The cipher types are stored over 2 registers. * bssidx 0 and 1 keys are stored in SEC_CSR1 and * bssidx 1 and 2 keys are stored in SEC_CSR5. * Using the correct defines correctly will cause overhead, * so just calculate the correct offset. */ if (key->hw_key_idx < 8) { field.bit_offset = (3 * key->hw_key_idx); field.bit_mask = 0x7 << field.bit_offset; reg = rt2x00usb_register_read(rt2x00dev, SEC_CSR1); rt2x00_set_field32(&reg, field, crypto->cipher); rt2x00usb_register_write(rt2x00dev, SEC_CSR1, reg); } else { field.bit_offset = (3 * (key->hw_key_idx - 8)); field.bit_mask = 0x7 << field.bit_offset; reg = rt2x00usb_register_read(rt2x00dev, SEC_CSR5); rt2x00_set_field32(&reg, field, crypto->cipher); rt2x00usb_register_write(rt2x00dev, SEC_CSR5, reg); } /* * The driver does not support the IV/EIV generation * in hardware. However it doesn't support the IV/EIV * inside the ieee80211 frame either, but requires it * to be provided separately for the descriptor. * rt2x00lib will cut the IV/EIV data out of all frames * given to us by mac80211, but we must tell mac80211 * to generate the IV/EIV data. */ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; } /* * SEC_CSR0 contains only single-bit fields to indicate * a particular key is valid. Because using the FIELD32() * defines directly will cause a lot of overhead we use * a calculation to determine the correct bit directly. */ mask = 1 << key->hw_key_idx; reg = rt2x00usb_register_read(rt2x00dev, SEC_CSR0); if (crypto->cmd == SET_KEY) reg |= mask; else if (crypto->cmd == DISABLE_KEY) reg &= ~mask; rt2x00usb_register_write(rt2x00dev, SEC_CSR0, reg); return 0; } static int rt73usb_config_pairwise_key(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_crypto *crypto, struct ieee80211_key_conf *key) { struct hw_pairwise_ta_entry addr_entry; struct hw_key_entry key_entry; u32 mask; u32 reg; if (crypto->cmd == SET_KEY) { /* * rt2x00lib can't determine the correct free * key_idx for pairwise keys. We have 2 registers * with key valid bits. The goal is simple, read * the first register, if that is full move to * the next register. * When both registers are full, we drop the key, * otherwise we use the first invalid entry. */ reg = rt2x00usb_register_read(rt2x00dev, SEC_CSR2); if (reg && reg == ~0) { key->hw_key_idx = 32; reg = rt2x00usb_register_read(rt2x00dev, SEC_CSR3); if (reg && reg == ~0) return -ENOSPC; } key->hw_key_idx += reg ? ffz(reg) : 0; /* * Upload key to hardware */ memcpy(key_entry.key, crypto->key, sizeof(key_entry.key)); memcpy(key_entry.tx_mic, crypto->tx_mic, sizeof(key_entry.tx_mic)); memcpy(key_entry.rx_mic, crypto->rx_mic, sizeof(key_entry.rx_mic)); reg = PAIRWISE_KEY_ENTRY(key->hw_key_idx); rt2x00usb_register_multiwrite(rt2x00dev, reg, &key_entry, sizeof(key_entry)); /* * Send the address and cipher type to the hardware register. */ memset(&addr_entry, 0, sizeof(addr_entry)); memcpy(&addr_entry, crypto->address, ETH_ALEN); addr_entry.cipher = crypto->cipher; reg = PAIRWISE_TA_ENTRY(key->hw_key_idx); rt2x00usb_register_multiwrite(rt2x00dev, reg, &addr_entry, sizeof(addr_entry)); /* * Enable pairwise lookup table for given BSS idx, * without this received frames will not be decrypted * by the hardware. */ reg = rt2x00usb_register_read(rt2x00dev, SEC_CSR4); reg |= (1 << crypto->bssidx); rt2x00usb_register_write(rt2x00dev, SEC_CSR4, reg); /* * The driver does not support the IV/EIV generation * in hardware. However it doesn't support the IV/EIV * inside the ieee80211 frame either, but requires it * to be provided separately for the descriptor. * rt2x00lib will cut the IV/EIV data out of all frames * given to us by mac80211, but we must tell mac80211 * to generate the IV/EIV data. */ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; } /* * SEC_CSR2 and SEC_CSR3 contain only single-bit fields to indicate * a particular key is valid. Because using the FIELD32() * defines directly will cause a lot of overhead we use * a calculation to determine the correct bit directly. */ if (key->hw_key_idx < 32) { mask = 1 << key->hw_key_idx; reg = rt2x00usb_register_read(rt2x00dev, SEC_CSR2); if (crypto->cmd == SET_KEY) reg |= mask; else if (crypto->cmd == DISABLE_KEY) reg &= ~mask; rt2x00usb_register_write(rt2x00dev, SEC_CSR2, reg); } else { mask = 1 << (key->hw_key_idx - 32); reg = rt2x00usb_register_read(rt2x00dev, SEC_CSR3); if (crypto->cmd == SET_KEY) reg |= mask; else if (crypto->cmd == DISABLE_KEY) reg &= ~mask; rt2x00usb_register_write(rt2x00dev, SEC_CSR3, reg); } return 0; } static void rt73usb_config_filter(struct rt2x00_dev *rt2x00dev, const unsigned int filter_flags) { u32 reg; /* * Start configuration steps. * Note that the version error will always be dropped * and broadcast frames will always be accepted since * there is no filter for it at this time. */ reg = rt2x00usb_register_read(rt2x00dev, TXRX_CSR0); rt2x00_set_field32(&reg, TXRX_CSR0_DROP_CRC, !(filter_flags & FIF_FCSFAIL)); rt2x00_set_field32(&reg, TXRX_CSR0_DROP_PHYSICAL, !(filter_flags & FIF_PLCPFAIL)); rt2x00_set_field32(&reg, TXRX_CSR0_DROP_CONTROL, !(filter_flags & (FIF_CONTROL | FIF_PSPOLL))); rt2x00_set_field32(&reg, TXRX_CSR0_DROP_NOT_TO_ME, !test_bit(CONFIG_MONITORING, &rt2x00dev->flags)); rt2x00_set_field32(&reg, TXRX_CSR0_DROP_TO_DS, !test_bit(CONFIG_MONITORING, &rt2x00dev->flags) && !rt2x00dev->intf_ap_count); rt2x00_set_field32(&reg, TXRX_CSR0_DROP_VERSION_ERROR, 1); rt2x00_set_field32(&reg, TXRX_CSR0_DROP_MULTICAST, !(filter_flags & FIF_ALLMULTI)); rt2x00_set_field32(&reg, TXRX_CSR0_DROP_BROADCAST, 0); rt2x00_set_field32(&reg, TXRX_CSR0_DROP_ACK_CTS, !(filter_flags & FIF_CONTROL)); rt2x00usb_register_write(rt2x00dev, TXRX_CSR0, reg); } static void rt73usb_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00_intf *intf, struct rt2x00intf_conf *conf, const unsigned int flags) { u32 reg; if (flags & CONFIG_UPDATE_TYPE) { /* * Enable synchronisation. */ reg = rt2x00usb_register_read(rt2x00dev, TXRX_CSR9); rt2x00_set_field32(&reg, TXRX_CSR9_TSF_SYNC, conf->sync); rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg); } if (flags & CONFIG_UPDATE_MAC) { reg = le32_to_cpu(conf->mac[1]); rt2x00_set_field32(&reg, MAC_CSR3_UNICAST_TO_ME_MASK, 0xff); conf->mac[1] = cpu_to_le32(reg); rt2x00usb_register_multiwrite(rt2x00dev, MAC_CSR2, conf->mac, sizeof(conf->mac)); } if (flags & CONFIG_UPDATE_BSSID) { reg = le32_to_cpu(conf->bssid[1]); rt2x00_set_field32(&reg, MAC_CSR5_BSS_ID_MASK, 3); conf->bssid[1] = cpu_to_le32(reg); rt2x00usb_register_multiwrite(rt2x00dev, MAC_CSR4, conf->bssid, sizeof(conf->bssid)); } } static void rt73usb_config_erp(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_erp *erp, u32 changed) { u32 reg; reg = rt2x00usb_register_read(rt2x00dev, TXRX_CSR0); rt2x00_set_field32(&reg, TXRX_CSR0_RX_ACK_TIMEOUT, 0x32); rt2x00_set_field32(&reg, TXRX_CSR0_TSF_OFFSET, IEEE80211_HEADER); rt2x00usb_register_write(rt2x00dev, TXRX_CSR0, reg); if (changed & BSS_CHANGED_ERP_PREAMBLE) { reg = rt2x00usb_register_read(rt2x00dev, TXRX_CSR4); rt2x00_set_field32(&reg, TXRX_CSR4_AUTORESPOND_ENABLE, 1); rt2x00_set_field32(&reg, TXRX_CSR4_AUTORESPOND_PREAMBLE, !!erp->short_preamble); rt2x00usb_register_write(rt2x00dev, TXRX_CSR4, reg); } if (changed & BSS_CHANGED_BASIC_RATES) rt2x00usb_register_write(rt2x00dev, TXRX_CSR5, erp->basic_rates); if (changed & BSS_CHANGED_BEACON_INT) { reg = rt2x00usb_register_read(rt2x00dev, TXRX_CSR9); rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_INTERVAL, erp->beacon_int * 16); rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg); } if (changed & BSS_CHANGED_ERP_SLOT) { reg = rt2x00usb_register_read(rt2x00dev, MAC_CSR9); rt2x00_set_field32(&reg, MAC_CSR9_SLOT_TIME, erp->slot_time); rt2x00usb_register_write(rt2x00dev, MAC_CSR9, reg); reg = rt2x00usb_register_read(rt2x00dev, MAC_CSR8); rt2x00_set_field32(&reg, MAC_CSR8_SIFS, erp->sifs); rt2x00_set_field32(&reg, MAC_CSR8_SIFS_AFTER_RX_OFDM, 3); rt2x00_set_field32(&reg, MAC_CSR8_EIFS, erp->eifs); rt2x00usb_register_write(rt2x00dev, MAC_CSR8, reg); } } static void rt73usb_config_antenna_5x(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant) { u8 r3; u8 r4; u8 r77; u8 temp; r3 = rt73usb_bbp_read(rt2x00dev, 3); r4 = rt73usb_bbp_read(rt2x00dev, 4); r77 = rt73usb_bbp_read(rt2x00dev, 77); rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, 0); /* * Configure the RX antenna. */ switch (ant->rx) { case ANTENNA_HW_DIVERSITY: rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 2); temp = !rt2x00_has_cap_frame_type(rt2x00dev) && (rt2x00dev->curr_band != NL80211_BAND_5GHZ); rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, temp); break; case ANTENNA_A: rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1); rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 0); if (rt2x00dev->curr_band == NL80211_BAND_5GHZ) rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 0); else rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 3); break; case ANTENNA_B: default: rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1); rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 0); if (rt2x00dev->curr_band == NL80211_BAND_5GHZ) rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 3); else rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 0); break; } rt73usb_bbp_write(rt2x00dev, 77, r77); rt73usb_bbp_write(rt2x00dev, 3, r3); rt73usb_bbp_write(rt2x00dev, 4, r4); } static void rt73usb_config_antenna_2x(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant) { u8 r3; u8 r4; u8 r77; r3 = rt73usb_bbp_read(rt2x00dev, 3); r4 = rt73usb_bbp_read(rt2x00dev, 4); r77 = rt73usb_bbp_read(rt2x00dev, 77); rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, 0); rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, !rt2x00_has_cap_frame_type(rt2x00dev)); /* * Configure the RX antenna. */ switch (ant->rx) { case ANTENNA_HW_DIVERSITY: rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 2); break; case ANTENNA_A: rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 3); rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1); break; case ANTENNA_B: default: rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 0); rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1); break; } rt73usb_bbp_write(rt2x00dev, 77, r77); rt73usb_bbp_write(rt2x00dev, 3, r3); rt73usb_bbp_write(rt2x00dev, 4, r4); } struct antenna_sel { u8 word; /* * value[0] -> non-LNA * value[1] -> LNA */ u8 value[2]; }; static const struct antenna_sel antenna_sel_a[] = { { 96, { 0x58, 0x78 } }, { 104, { 0x38, 0x48 } }, { 75, { 0xfe, 0x80 } }, { 86, { 0xfe, 0x80 } }, { 88, { 0xfe, 0x80 } }, { 35, { 0x60, 0x60 } }, { 97, { 0x58, 0x58 } }, { 98, { 0x58, 0x58 } }, }; static const struct antenna_sel antenna_sel_bg[] = { { 96, { 0x48, 0x68 } }, { 104, { 0x2c, 0x3c } }, { 75, { 0xfe, 0x80 } }, { 86, { 0xfe, 0x80 } }, { 88, { 0xfe, 0x80 } }, { 35, { 0x50, 0x50 } }, { 97, { 0x48, 0x48 } }, { 98, { 0x48, 0x48 } }, }; static void rt73usb_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant) { const struct antenna_sel *sel; unsigned int lna; unsigned int i; u32 reg; /* * We should never come here because rt2x00lib is supposed * to catch this and send us the correct antenna explicitely. */ BUG_ON(ant->rx == ANTENNA_SW_DIVERSITY || ant->tx == ANTENNA_SW_DIVERSITY); if (rt2x00dev->curr_band == NL80211_BAND_5GHZ) { sel = antenna_sel_a; lna = rt2x00_has_cap_external_lna_a(rt2x00dev); } else { sel = antenna_sel_bg; lna = rt2x00_has_cap_external_lna_bg(rt2x00dev); } for (i = 0; i < ARRAY_SIZE(antenna_sel_a); i++) rt73usb_bbp_write(rt2x00dev, sel[i].word, sel[i].value[lna]); reg = rt2x00usb_register_read(rt2x00dev, PHY_CSR0); rt2x00_set_field32(&reg, PHY_CSR0_PA_PE_BG, (rt2x00dev->curr_band == NL80211_BAND_2GHZ)); rt2x00_set_field32(&reg, PHY_CSR0_PA_PE_A, (rt2x00dev->curr_band == NL80211_BAND_5GHZ)); rt2x00usb_register_write(rt2x00dev, PHY_CSR0, reg); if (rt2x00_rf(rt2x00dev, RF5226) || rt2x00_rf(rt2x00dev, RF5225)) rt73usb_config_antenna_5x(rt2x00dev, ant); else if (rt2x00_rf(rt2x00dev, RF2528) || rt2x00_rf(rt2x00dev, RF2527)) rt73usb_config_antenna_2x(rt2x00dev, ant); } static void rt73usb_config_lna_gain(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_conf *libconf) { u16 eeprom; short lna_gain = 0; if (libconf->conf->chandef.chan->band == NL80211_BAND_2GHZ) { if (rt2x00_has_cap_external_lna_bg(rt2x00dev)) lna_gain += 14; eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_BG); lna_gain -= rt2x00_get_field16(eeprom, EEPROM_RSSI_OFFSET_BG_1); } else { eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_A); lna_gain -= rt2x00_get_field16(eeprom, EEPROM_RSSI_OFFSET_A_1); } rt2x00dev->lna_gain = lna_gain; } static void rt73usb_config_channel(struct rt2x00_dev *rt2x00dev, struct rf_channel *rf, const int txpower) { u8 r3; u8 r94; u8 smart; rt2x00_set_field32(&rf->rf3, RF3_TXPOWER, TXPOWER_TO_DEV(txpower)); rt2x00_set_field32(&rf->rf4, RF4_FREQ_OFFSET, rt2x00dev->freq_offset); smart = !(rt2x00_rf(rt2x00dev, RF5225) || rt2x00_rf(rt2x00dev, RF2527)); r3 = rt73usb_bbp_read(rt2x00dev, 3); rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, smart); rt73usb_bbp_write(rt2x00dev, 3, r3); r94 = 6; if (txpower > MAX_TXPOWER && txpower <= (MAX_TXPOWER + r94)) r94 += txpower - MAX_TXPOWER; else if (txpower < MIN_TXPOWER && txpower >= (MIN_TXPOWER - r94)) r94 += txpower; rt73usb_bbp_write(rt2x00dev, 94, r94); rt73usb_rf_write(rt2x00dev, 1, rf->rf1); rt73usb_rf_write(rt2x00dev, 2, rf->rf2); rt73usb_rf_write(rt2x00dev, 3, rf->rf3 & ~0x00000004); rt73usb_rf_write(rt2x00dev, 4, rf->rf4); rt73usb_rf_write(rt2x00dev, 1, rf->rf1); rt73usb_rf_write(rt2x00dev, 2, rf->rf2); rt73usb_rf_write(rt2x00dev, 3, rf->rf3 | 0x00000004); rt73usb_rf_write(rt2x00dev, 4, rf->rf4); rt73usb_rf_write(rt2x00dev, 1, rf->rf1); rt73usb_rf_write(rt2x00dev, 2, rf->rf2); rt73usb_rf_write(rt2x00dev, 3, rf->rf3 & ~0x00000004); rt73usb_rf_write(rt2x00dev, 4, rf->rf4); udelay(10); } static void rt73usb_config_txpower(struct rt2x00_dev *rt2x00dev, const int txpower) { struct rf_channel rf; rf.rf1 = rt2x00_rf_read(rt2x00dev, 1); rf.rf2 = rt2x00_rf_read(rt2x00dev, 2); rf.rf3 = rt2x00_rf_read(rt2x00dev, 3); rf.rf4 = rt2x00_rf_read(rt2x00dev, 4); rt73usb_config_channel(rt2x00dev, &rf, txpower); } static void rt73usb_config_retry_limit(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_conf *libconf) { u32 reg; reg = rt2x00usb_register_read(rt2x00dev, TXRX_CSR4); rt2x00_set_field32(&reg, TXRX_CSR4_OFDM_TX_RATE_DOWN, 1); rt2x00_set_field32(&reg, TXRX_CSR4_OFDM_TX_RATE_STEP, 0); rt2x00_set_field32(&reg, TXRX_CSR4_OFDM_TX_FALLBACK_CCK, 0); rt2x00_set_field32(&reg, TXRX_CSR4_LONG_RETRY_LIMIT, libconf->conf->long_frame_max_tx_count); rt2x00_set_field32(&reg, TXRX_CSR4_SHORT_RETRY_LIMIT, libconf->conf->short_frame_max_tx_count); rt2x00usb_register_write(rt2x00dev, TXRX_CSR4, reg); } static void rt73usb_config_ps(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_conf *libconf) { enum dev_state state = (libconf->conf->flags & IEEE80211_CONF_PS) ? STATE_SLEEP : STATE_AWAKE; u32 reg; if (state == STATE_SLEEP) { reg = rt2x00usb_register_read(rt2x00dev, MAC_CSR11); rt2x00_set_field32(&reg, MAC_CSR11_DELAY_AFTER_TBCN, rt2x00dev->beacon_int - 10); rt2x00_set_field32(&reg, MAC_CSR11_TBCN_BEFORE_WAKEUP, libconf->conf->listen_interval - 1); rt2x00_set_field32(&reg, MAC_CSR11_WAKEUP_LATENCY, 5); /* We must first disable autowake before it can be enabled */ rt2x00_set_field32(&reg, MAC_CSR11_AUTOWAKE, 0); rt2x00usb_register_write(rt2x00dev, MAC_CSR11, reg); rt2x00_set_field32(&reg, MAC_CSR11_AUTOWAKE, 1); rt2x00usb_register_write(rt2x00dev, MAC_CSR11, reg); rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0, USB_MODE_SLEEP, REGISTER_TIMEOUT); } else { reg = rt2x00usb_register_read(rt2x00dev, MAC_CSR11); rt2x00_set_field32(&reg, MAC_CSR11_DELAY_AFTER_TBCN, 0); rt2x00_set_field32(&reg, MAC_CSR11_TBCN_BEFORE_WAKEUP, 0); rt2x00_set_field32(&reg, MAC_CSR11_AUTOWAKE, 0); rt2x00_set_field32(&reg, MAC_CSR11_WAKEUP_LATENCY, 0); rt2x00usb_register_write(rt2x00dev, MAC_CSR11, reg); rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0, USB_MODE_WAKEUP, REGISTER_TIMEOUT); } } static void rt73usb_config(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_conf *libconf, const unsigned int flags) { /* Always recalculate LNA gain before changing configuration */ rt73usb_config_lna_gain(rt2x00dev, libconf); if (flags & IEEE80211_CONF_CHANGE_CHANNEL) rt73usb_config_channel(rt2x00dev, &libconf->rf, libconf->conf->power_level); if ((flags & IEEE80211_CONF_CHANGE_POWER) && !(flags & IEEE80211_CONF_CHANGE_CHANNEL)) rt73usb_config_txpower(rt2x00dev, libconf->conf->power_level); if (flags & IEEE80211_CONF_CHANGE_RETRY_LIMITS) rt73usb_config_retry_limit(rt2x00dev, libconf); if (flags & IEEE80211_CONF_CHANGE_PS) rt73usb_config_ps(rt2x00dev, libconf); } /* * Link tuning */ static void rt73usb_link_stats(struct rt2x00_dev *rt2x00dev, struct link_qual *qual) { u32 reg; /* * Update FCS error count from register. */ reg = rt2x00usb_register_read(rt2x00dev, STA_CSR0); qual->rx_failed = rt2x00_get_field32(reg, STA_CSR0_FCS_ERROR); /* * Update False CCA count from register. */ reg = rt2x00usb_register_read(rt2x00dev, STA_CSR1); qual->false_cca = rt2x00_get_field32(reg, STA_CSR1_FALSE_CCA_ERROR); } static inline void rt73usb_set_vgc(struct rt2x00_dev *rt2x00dev, struct link_qual *qual, u8 vgc_level) { if (qual->vgc_level != vgc_level) { rt73usb_bbp_write(rt2x00dev, 17, vgc_level); qual->vgc_level = vgc_level; qual->vgc_level_reg = vgc_level; } } static void rt73usb_reset_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual) { rt73usb_set_vgc(rt2x00dev, qual, 0x20); } static void rt73usb_link_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual, const u32 count) { u8 up_bound; u8 low_bound; /* * Determine r17 bounds. */ if (rt2x00dev->curr_band == NL80211_BAND_5GHZ) { low_bound = 0x28; up_bound = 0x48; if (rt2x00_has_cap_external_lna_a(rt2x00dev)) { low_bound += 0x10; up_bound += 0x10; } } else { if (qual->rssi > -82) { low_bound = 0x1c; up_bound = 0x40; } else if (qual->rssi > -84) { low_bound = 0x1c; up_bound = 0x20; } else { low_bound = 0x1c; up_bound = 0x1c; } if (rt2x00_has_cap_external_lna_bg(rt2x00dev)) { low_bound += 0x14; up_bound += 0x10; } } /* * If we are not associated, we should go straight to the * dynamic CCA tuning. */ if (!rt2x00dev->intf_associated) goto dynamic_cca_tune; /* * Special big-R17 for very short distance */ if (qual->rssi > -35) { rt73usb_set_vgc(rt2x00dev, qual, 0x60); return; } /* * Special big-R17 for short distance */ if (qual->rssi >= -58) { rt73usb_set_vgc(rt2x00dev, qual, up_bound); return; } /* * Special big-R17 for middle-short distance */ if (qual->rssi >= -66) { rt73usb_set_vgc(rt2x00dev, qual, low_bound + 0x10); return; } /* * Special mid-R17 for middle distance */ if (qual->rssi >= -74) { rt73usb_set_vgc(rt2x00dev, qual, low_bound + 0x08); return; } /* * Special case: Change up_bound based on the rssi. * Lower up_bound when rssi is weaker then -74 dBm. */ up_bound -= 2 * (-74 - qual->rssi); if (low_bound > up_bound) up_bound = low_bound; if (qual->vgc_level > up_bound) { rt73usb_set_vgc(rt2x00dev, qual, up_bound); return; } dynamic_cca_tune: /* * r17 does not yet exceed upper limit, continue and base * the r17 tuning on the false CCA count. */ if ((qual->false_cca > 512) && (qual->vgc_level < up_bound)) rt73usb_set_vgc(rt2x00dev, qual, min_t(u8, qual->vgc_level + 4, up_bound)); else if ((qual->false_cca < 100) && (qual->vgc_level > low_bound)) rt73usb_set_vgc(rt2x00dev, qual, max_t(u8, qual->vgc_level - 4, low_bound)); } /* * Queue handlers. */ static void rt73usb_start_queue(struct data_queue *queue) { struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; u32 reg; switch (queue->qid) { case QID_RX: reg = rt2x00usb_register_read(rt2x00dev, TXRX_CSR0); rt2x00_set_field32(&reg, TXRX_CSR0_DISABLE_RX, 0); rt2x00usb_register_write(rt2x00dev, TXRX_CSR0, reg); break; case QID_BEACON: reg = rt2x00usb_register_read(rt2x00dev, TXRX_CSR9); rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1); rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1); rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1); rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg); break; default: break; } } static void rt73usb_stop_queue(struct data_queue *queue) { struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; u32 reg; switch (queue->qid) { case QID_RX: reg = rt2x00usb_register_read(rt2x00dev, TXRX_CSR0); rt2x00_set_field32(&reg, TXRX_CSR0_DISABLE_RX, 1); rt2x00usb_register_write(rt2x00dev, TXRX_CSR0, reg); break; case QID_BEACON: reg = rt2x00usb_register_read(rt2x00dev, TXRX_CSR9); rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 0); rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 0); rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0); rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg); break; default: break; } } /* * Firmware functions */ static char *rt73usb_get_firmware_name(struct rt2x00_dev *rt2x00dev) { return FIRMWARE_RT2571; } static int rt73usb_check_firmware(struct rt2x00_dev *rt2x00dev, const u8 *data, const size_t len) { u16 fw_crc; u16 crc; /* * Only support 2kb firmware files. */ if (len != 2048) return FW_BAD_LENGTH; /* * The last 2 bytes in the firmware array are the crc checksum itself, * this means that we should never pass those 2 bytes to the crc * algorithm. */ fw_crc = (data[len - 2] << 8 | data[len - 1]); /* * Use the crc itu-t algorithm. */ crc = crc_itu_t(0, data, len - 2); crc = crc_itu_t_byte(crc, 0); crc = crc_itu_t_byte(crc, 0); return (fw_crc == crc) ? FW_OK : FW_BAD_CRC; } static int rt73usb_load_firmware(struct rt2x00_dev *rt2x00dev, const u8 *data, const size_t len) { unsigned int i; int status; u32 reg; /* * Wait for stable hardware. */ for (i = 0; i < 100; i++) { reg = rt2x00usb_register_read(rt2x00dev, MAC_CSR0); if (reg) break; msleep(1); } if (!reg) { rt2x00_err(rt2x00dev, "Unstable hardware\n"); return -EBUSY; } /* * Write firmware to device. */ rt2x00usb_register_multiwrite(rt2x00dev, FIRMWARE_IMAGE_BASE, data, len); /* * Send firmware request to device to load firmware, * we need to specify a long timeout time. */ status = rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0, USB_MODE_FIRMWARE, REGISTER_TIMEOUT_FIRMWARE); if (status < 0) { rt2x00_err(rt2x00dev, "Failed to write Firmware to device\n"); return status; } return 0; } /* * Initialization functions. */ static int rt73usb_init_registers(struct rt2x00_dev *rt2x00dev) { u32 reg; reg = rt2x00usb_register_read(rt2x00dev, TXRX_CSR0); rt2x00_set_field32(&reg, TXRX_CSR0_AUTO_TX_SEQ, 1); rt2x00_set_field32(&reg, TXRX_CSR0_DISABLE_RX, 0); rt2x00_set_field32(&reg, TXRX_CSR0_TX_WITHOUT_WAITING, 0); rt2x00usb_register_write(rt2x00dev, TXRX_CSR0, reg); reg = rt2x00usb_register_read(rt2x00dev, TXRX_CSR1); rt2x00_set_field32(&reg, TXRX_CSR1_BBP_ID0, 47); /* CCK Signal */ rt2x00_set_field32(&reg, TXRX_CSR1_BBP_ID0_VALID, 1); rt2x00_set_field32(&reg, TXRX_CSR1_BBP_ID1, 30); /* Rssi */ rt2x00_set_field32(&reg, TXRX_CSR1_BBP_ID1_VALID, 1); rt2x00_set_field32(&reg, TXRX_CSR1_BBP_ID2, 42); /* OFDM Rate */ rt2x00_set_field32(&reg, TXRX_CSR1_BBP_ID2_VALID, 1); rt2x00_set_field32(&reg, TXRX_CSR1_BBP_ID3, 30); /* Rssi */ rt2x00_set_field32(&reg, TXRX_CSR1_BBP_ID3_VALID, 1); rt2x00usb_register_write(rt2x00dev, TXRX_CSR1, reg); /* * CCK TXD BBP registers */ reg = rt2x00usb_register_read(rt2x00dev, TXRX_CSR2); rt2x00_set_field32(&reg, TXRX_CSR2_BBP_ID0, 13); rt2x00_set_field32(&reg, TXRX_CSR2_BBP_ID0_VALID, 1); rt2x00_set_field32(&reg, TXRX_CSR2_BBP_ID1, 12); rt2x00_set_field32(&reg, TXRX_CSR2_BBP_ID1_VALID, 1); rt2x00_set_field32(&reg, TXRX_CSR2_BBP_ID2, 11); rt2x00_set_field32(&reg, TXRX_CSR2_BBP_ID2_VALID, 1); rt2x00_set_field32(&reg, TXRX_CSR2_BBP_ID3, 10); rt2x00_set_field32(&reg, TXRX_CSR2_BBP_ID3_VALID, 1); rt2x00usb_register_write(rt2x00dev, TXRX_CSR2, reg); /* * OFDM TXD BBP registers */ reg = rt2x00usb_register_read(rt2x00dev, TXRX_CSR3); rt2x00_set_field32(&reg, TXRX_CSR3_BBP_ID0, 7); rt2x00_set_field32(&reg, TXRX_CSR3_BBP_ID0_VALID, 1); rt2x00_set_field32(&reg, TXRX_CSR3_BBP_ID1, 6); rt2x00_set_field32(&reg, TXRX_CSR3_BBP_ID1_VALID, 1); rt2x00_set_field32(&reg, TXRX_CSR3_BBP_ID2, 5); rt2x00_set_field32(&reg, TXRX_CSR3_BBP_ID2_VALID, 1); rt2x00usb_register_write(rt2x00dev, TXRX_CSR3, reg); reg = rt2x00usb_register_read(rt2x00dev, TXRX_CSR7); rt2x00_set_field32(&reg, TXRX_CSR7_ACK_CTS_6MBS, 59); rt2x00_set_field32(&reg, TXRX_CSR7_ACK_CTS_9MBS, 53); rt2x00_set_field32(&reg, TXRX_CSR7_ACK_CTS_12MBS, 49); rt2x00_set_field32(&reg, TXRX_CSR7_ACK_CTS_18MBS, 46); rt2x00usb_register_write(rt2x00dev, TXRX_CSR7, reg); reg = rt2x00usb_register_read(rt2x00dev, TXRX_CSR8); rt2x00_set_field32(&reg, TXRX_CSR8_ACK_CTS_24MBS, 44); rt2x00_set_field32(&reg, TXRX_CSR8_ACK_CTS_36MBS, 42); rt2x00_set_field32(&reg, TXRX_CSR8_ACK_CTS_48MBS, 42); rt2x00_set_field32(&reg, TXRX_CSR8_ACK_CTS_54MBS, 42); rt2x00usb_register_write(rt2x00dev, TXRX_CSR8, reg); reg = rt2x00usb_register_read(rt2x00dev, TXRX_CSR9); rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_INTERVAL, 0); rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 0); rt2x00_set_field32(&reg, TXRX_CSR9_TSF_SYNC, 0); rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 0); rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0); rt2x00_set_field32(&reg, TXRX_CSR9_TIMESTAMP_COMPENSATE, 0); rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg); rt2x00usb_register_write(rt2x00dev, TXRX_CSR15, 0x0000000f); reg = rt2x00usb_register_read(rt2x00dev, MAC_CSR6); rt2x00_set_field32(&reg, MAC_CSR6_MAX_FRAME_UNIT, 0xfff); rt2x00usb_register_write(rt2x00dev, MAC_CSR6, reg); rt2x00usb_register_write(rt2x00dev, MAC_CSR10, 0x00000718); if (rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_AWAKE)) return -EBUSY; rt2x00usb_register_write(rt2x00dev, MAC_CSR13, 0x00007f00); /* * Invalidate all Shared Keys (SEC_CSR0), * and clear the Shared key Cipher algorithms (SEC_CSR1 & SEC_CSR5) */ rt2x00usb_register_write(rt2x00dev, SEC_CSR0, 0x00000000); rt2x00usb_register_write(rt2x00dev, SEC_CSR1, 0x00000000); rt2x00usb_register_write(rt2x00dev, SEC_CSR5, 0x00000000); reg = 0x000023b0; if (rt2x00_rf(rt2x00dev, RF5225) || rt2x00_rf(rt2x00dev, RF2527)) rt2x00_set_field32(&reg, PHY_CSR1_RF_RPI, 1); rt2x00usb_register_write(rt2x00dev, PHY_CSR1, reg); rt2x00usb_register_write(rt2x00dev, PHY_CSR5, 0x00040a06); rt2x00usb_register_write(rt2x00dev, PHY_CSR6, 0x00080606); rt2x00usb_register_write(rt2x00dev, PHY_CSR7, 0x00000408); reg = rt2x00usb_register_read(rt2x00dev, MAC_CSR9); rt2x00_set_field32(&reg, MAC_CSR9_CW_SELECT, 0); rt2x00usb_register_write(rt2x00dev, MAC_CSR9, reg); /* * Clear all beacons * For the Beacon base registers we only need to clear * the first byte since that byte contains the VALID and OWNER * bits which (when set to 0) will invalidate the entire beacon. */ rt2x00usb_register_write(rt2x00dev, HW_BEACON_BASE0, 0); rt2x00usb_register_write(rt2x00dev, HW_BEACON_BASE1, 0); rt2x00usb_register_write(rt2x00dev, HW_BEACON_BASE2, 0); rt2x00usb_register_write(rt2x00dev, HW_BEACON_BASE3, 0); /* * We must clear the error counters. * These registers are cleared on read, * so we may pass a useless variable to store the value. */ reg = rt2x00usb_register_read(rt2x00dev, STA_CSR0); reg = rt2x00usb_register_read(rt2x00dev, STA_CSR1); reg = rt2x00usb_register_read(rt2x00dev, STA_CSR2); /* * Reset MAC and BBP registers. */ reg = rt2x00usb_register_read(rt2x00dev, MAC_CSR1); rt2x00_set_field32(&reg, MAC_CSR1_SOFT_RESET, 1); rt2x00_set_field32(&reg, MAC_CSR1_BBP_RESET, 1); rt2x00usb_register_write(rt2x00dev, MAC_CSR1, reg); reg = rt2x00usb_register_read(rt2x00dev, MAC_CSR1); rt2x00_set_field32(&reg, MAC_CSR1_SOFT_RESET, 0); rt2x00_set_field32(&reg, MAC_CSR1_BBP_RESET, 0); rt2x00usb_register_write(rt2x00dev, MAC_CSR1, reg); reg = rt2x00usb_register_read(rt2x00dev, MAC_CSR1); rt2x00_set_field32(&reg, MAC_CSR1_HOST_READY, 1); rt2x00usb_register_write(rt2x00dev, MAC_CSR1, reg); return 0; } static int rt73usb_wait_bbp_ready(struct rt2x00_dev *rt2x00dev) { unsigned int i; u8 value; for (i = 0; i < REGISTER_USB_BUSY_COUNT; i++) { value = rt73usb_bbp_read(rt2x00dev, 0); if ((value != 0xff) && (value != 0x00)) return 0; udelay(REGISTER_BUSY_DELAY); } rt2x00_err(rt2x00dev, "BBP register access failed, aborting\n"); return -EACCES; } static int rt73usb_init_bbp(struct rt2x00_dev *rt2x00dev) { unsigned int i; u16 eeprom; u8 reg_id; u8 value; if (unlikely(rt73usb_wait_bbp_ready(rt2x00dev))) return -EACCES; rt73usb_bbp_write(rt2x00dev, 3, 0x80); rt73usb_bbp_write(rt2x00dev, 15, 0x30); rt73usb_bbp_write(rt2x00dev, 21, 0xc8); rt73usb_bbp_write(rt2x00dev, 22, 0x38); rt73usb_bbp_write(rt2x00dev, 23, 0x06); rt73usb_bbp_write(rt2x00dev, 24, 0xfe); rt73usb_bbp_write(rt2x00dev, 25, 0x0a); rt73usb_bbp_write(rt2x00dev, 26, 0x0d); rt73usb_bbp_write(rt2x00dev, 32, 0x0b); rt73usb_bbp_write(rt2x00dev, 34, 0x12); rt73usb_bbp_write(rt2x00dev, 37, 0x07); rt73usb_bbp_write(rt2x00dev, 39, 0xf8); rt73usb_bbp_write(rt2x00dev, 41, 0x60); rt73usb_bbp_write(rt2x00dev, 53, 0x10); rt73usb_bbp_write(rt2x00dev, 54, 0x18); rt73usb_bbp_write(rt2x00dev, 60, 0x10); rt73usb_bbp_write(rt2x00dev, 61, 0x04); rt73usb_bbp_write(rt2x00dev, 62, 0x04); rt73usb_bbp_write(rt2x00dev, 75, 0xfe); rt73usb_bbp_write(rt2x00dev, 86, 0xfe); rt73usb_bbp_write(rt2x00dev, 88, 0xfe); rt73usb_bbp_write(rt2x00dev, 90, 0x0f); rt73usb_bbp_write(rt2x00dev, 99, 0x00); rt73usb_bbp_write(rt2x00dev, 102, 0x16); rt73usb_bbp_write(rt2x00dev, 107, 0x04); for (i = 0; i < EEPROM_BBP_SIZE; i++) { eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i); if (eeprom != 0xffff && eeprom != 0x0000) { reg_id = rt2x00_get_field16(eeprom, EEPROM_BBP_REG_ID); value = rt2x00_get_field16(eeprom, EEPROM_BBP_VALUE); rt73usb_bbp_write(rt2x00dev, reg_id, value); } } return 0; } /* * Device state switch handlers. */ static int rt73usb_enable_radio(struct rt2x00_dev *rt2x00dev) { /* * Initialize all registers. */ if (unlikely(rt73usb_init_registers(rt2x00dev) || rt73usb_init_bbp(rt2x00dev))) return -EIO; return 0; } static void rt73usb_disable_radio(struct rt2x00_dev *rt2x00dev) { rt2x00usb_register_write(rt2x00dev, MAC_CSR10, 0x00001818); /* * Disable synchronisation. */ rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, 0); rt2x00usb_disable_radio(rt2x00dev); } static int rt73usb_set_state(struct rt2x00_dev *rt2x00dev, enum dev_state state) { u32 reg, reg2; unsigned int i; bool put_to_sleep; put_to_sleep = (state != STATE_AWAKE); reg = rt2x00usb_register_read(rt2x00dev, MAC_CSR12); rt2x00_set_field32(&reg, MAC_CSR12_FORCE_WAKEUP, !put_to_sleep); rt2x00_set_field32(&reg, MAC_CSR12_PUT_TO_SLEEP, put_to_sleep); rt2x00usb_register_write(rt2x00dev, MAC_CSR12, reg); /* * Device is not guaranteed to be in the requested state yet. * We must wait until the register indicates that the * device has entered the correct state. */ for (i = 0; i < REGISTER_BUSY_COUNT; i++) { reg2 = rt2x00usb_register_read(rt2x00dev, MAC_CSR12); state = rt2x00_get_field32(reg2, MAC_CSR12_BBP_CURRENT_STATE); if (state == !put_to_sleep) return 0; rt2x00usb_register_write(rt2x00dev, MAC_CSR12, reg); msleep(10); } return -EBUSY; } static int rt73usb_set_device_state(struct rt2x00_dev *rt2x00dev, enum dev_state state) { int retval = 0; switch (state) { case STATE_RADIO_ON: retval = rt73usb_enable_radio(rt2x00dev); break; case STATE_RADIO_OFF: rt73usb_disable_radio(rt2x00dev); break; case STATE_RADIO_IRQ_ON: case STATE_RADIO_IRQ_OFF: /* No support, but no error either */ break; case STATE_DEEP_SLEEP: case STATE_SLEEP: case STATE_STANDBY: case STATE_AWAKE: retval = rt73usb_set_state(rt2x00dev, state); break; default: retval = -ENOTSUPP; break; } if (unlikely(retval)) rt2x00_err(rt2x00dev, "Device failed to enter state %d (%d)\n", state, retval); return retval; } /* * TX descriptor initialization */ static void rt73usb_write_tx_desc(struct queue_entry *entry, struct txentry_desc *txdesc) { struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); __le32 *txd = (__le32 *) entry->skb->data; u32 word; /* * Start writing the descriptor words. */ word = rt2x00_desc_read(txd, 0); rt2x00_set_field32(&word, TXD_W0_BURST, test_bit(ENTRY_TXD_BURST, &txdesc->flags)); rt2x00_set_field32(&word, TXD_W0_VALID, 1); rt2x00_set_field32(&word, TXD_W0_MORE_FRAG, test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags)); rt2x00_set_field32(&word, TXD_W0_ACK, test_bit(ENTRY_TXD_ACK, &txdesc->flags)); rt2x00_set_field32(&word, TXD_W0_TIMESTAMP, test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags)); rt2x00_set_field32(&word, TXD_W0_OFDM, (txdesc->rate_mode == RATE_MODE_OFDM)); rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->u.plcp.ifs); rt2x00_set_field32(&word, TXD_W0_RETRY_MODE, test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags)); rt2x00_set_field32(&word, TXD_W0_TKIP_MIC, test_bit(ENTRY_TXD_ENCRYPT_MMIC, &txdesc->flags)); rt2x00_set_field32(&word, TXD_W0_KEY_TABLE, test_bit(ENTRY_TXD_ENCRYPT_PAIRWISE, &txdesc->flags)); rt2x00_set_field32(&word, TXD_W0_KEY_INDEX, txdesc->key_idx); rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, txdesc->length); rt2x00_set_field32(&word, TXD_W0_BURST2, test_bit(ENTRY_TXD_BURST, &txdesc->flags)); rt2x00_set_field32(&word, TXD_W0_CIPHER_ALG, txdesc->cipher); rt2x00_desc_write(txd, 0, word); word = rt2x00_desc_read(txd, 1); rt2x00_set_field32(&word, TXD_W1_HOST_Q_ID, entry->queue->qid); rt2x00_set_field32(&word, TXD_W1_AIFSN, entry->queue->aifs); rt2x00_set_field32(&word, TXD_W1_CWMIN, entry->queue->cw_min); rt2x00_set_field32(&word, TXD_W1_CWMAX, entry->queue->cw_max); rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, txdesc->iv_offset); rt2x00_set_field32(&word, TXD_W1_HW_SEQUENCE, test_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags)); rt2x00_desc_write(txd, 1, word); word = rt2x00_desc_read(txd, 2); rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, txdesc->u.plcp.signal); rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, txdesc->u.plcp.service); rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW, txdesc->u.plcp.length_low); rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH, txdesc->u.plcp.length_high); rt2x00_desc_write(txd, 2, word); if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags)) { _rt2x00_desc_write(txd, 3, skbdesc->iv[0]); _rt2x00_desc_write(txd, 4, skbdesc->iv[1]); } word = rt2x00_desc_read(txd, 5); rt2x00_set_field32(&word, TXD_W5_TX_POWER, TXPOWER_TO_DEV(entry->queue->rt2x00dev->tx_power)); rt2x00_set_field32(&word, TXD_W5_WAITING_DMA_DONE_INT, 1); rt2x00_desc_write(txd, 5, word); /* * Register descriptor details in skb frame descriptor. */ skbdesc->flags |= SKBDESC_DESC_IN_SKB; skbdesc->desc = txd; skbdesc->desc_len = TXD_DESC_SIZE; } /* * TX data initialization */ static void rt73usb_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc) { struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; unsigned int beacon_base; unsigned int padding_len; u32 orig_reg, reg; /* * Disable beaconing while we are reloading the beacon data, * otherwise we might be sending out invalid data. */ reg = rt2x00usb_register_read(rt2x00dev, TXRX_CSR9); orig_reg = reg; rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0); rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg); /* * Add space for the descriptor in front of the skb. */ skb_push(entry->skb, TXD_DESC_SIZE); memset(entry->skb->data, 0, TXD_DESC_SIZE); /* * Write the TX descriptor for the beacon. */ rt73usb_write_tx_desc(entry, txdesc); /* * Dump beacon to userspace through debugfs. */ rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry); /* * Write entire beacon with descriptor and padding to register. */ padding_len = roundup(entry->skb->len, 4) - entry->skb->len; if (padding_len && skb_pad(entry->skb, padding_len)) { rt2x00_err(rt2x00dev, "Failure padding beacon, aborting\n"); /* skb freed by skb_pad() on failure */ entry->skb = NULL; rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, orig_reg); return; } beacon_base = HW_BEACON_OFFSET(entry->entry_idx); rt2x00usb_register_multiwrite(rt2x00dev, beacon_base, entry->skb->data, entry->skb->len + padding_len); /* * Enable beaconing again. * * For Wi-Fi faily generated beacons between participating stations. * Set TBTT phase adaptive adjustment step to 8us (default 16us) */ rt2x00usb_register_write(rt2x00dev, TXRX_CSR10, 0x00001008); rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1); rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg); /* * Clean up the beacon skb. */ dev_kfree_skb(entry->skb); entry->skb = NULL; } static void rt73usb_clear_beacon(struct queue_entry *entry) { struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; unsigned int beacon_base; u32 orig_reg, reg; /* * Disable beaconing while we are reloading the beacon data, * otherwise we might be sending out invalid data. */ orig_reg = rt2x00usb_register_read(rt2x00dev, TXRX_CSR9); reg = orig_reg; rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0); rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, reg); /* * Clear beacon. */ beacon_base = HW_BEACON_OFFSET(entry->entry_idx); rt2x00usb_register_write(rt2x00dev, beacon_base, 0); /* * Restore beaconing state. */ rt2x00usb_register_write(rt2x00dev, TXRX_CSR9, orig_reg); } static int rt73usb_get_tx_data_len(struct queue_entry *entry) { int length; /* * The length _must_ be a multiple of 4, * but it must _not_ be a multiple of the USB packet size. */ length = roundup(entry->skb->len, 4); length += (4 * !(length % entry->queue->usb_maxpacket)); return length; } /* * RX control handlers */ static int rt73usb_agc_to_rssi(struct rt2x00_dev *rt2x00dev, int rxd_w1) { u8 offset = rt2x00dev->lna_gain; u8 lna; lna = rt2x00_get_field32(rxd_w1, RXD_W1_RSSI_LNA); switch (lna) { case 3: offset += 90; break; case 2: offset += 74; break; case 1: offset += 64; break; default: return 0; } if (rt2x00dev->curr_band == NL80211_BAND_5GHZ) { if (rt2x00_has_cap_external_lna_a(rt2x00dev)) { if (lna == 3 || lna == 2) offset += 10; } else { if (lna == 3) offset += 6; else if (lna == 2) offset += 8; } } return rt2x00_get_field32(rxd_w1, RXD_W1_RSSI_AGC) * 2 - offset; } static void rt73usb_fill_rxdone(struct queue_entry *entry, struct rxdone_entry_desc *rxdesc) { struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); __le32 *rxd = (__le32 *)entry->skb->data; u32 word0; u32 word1; /* * Copy descriptor to the skbdesc->desc buffer, making it safe from moving of * frame data in rt2x00usb. */ memcpy(skbdesc->desc, rxd, skbdesc->desc_len); rxd = (__le32 *)skbdesc->desc; /* * It is now safe to read the descriptor on all architectures. */ word0 = rt2x00_desc_read(rxd, 0); word1 = rt2x00_desc_read(rxd, 1); if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR)) rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC; rxdesc->cipher = rt2x00_get_field32(word0, RXD_W0_CIPHER_ALG); rxdesc->cipher_status = rt2x00_get_field32(word0, RXD_W0_CIPHER_ERROR); if (rxdesc->cipher != CIPHER_NONE) { rxdesc->iv[0] = _rt2x00_desc_read(rxd, 2); rxdesc->iv[1] = _rt2x00_desc_read(rxd, 3); rxdesc->dev_flags |= RXDONE_CRYPTO_IV; rxdesc->icv = _rt2x00_desc_read(rxd, 4); rxdesc->dev_flags |= RXDONE_CRYPTO_ICV; /* * Hardware has stripped IV/EIV data from 802.11 frame during * decryption. It has provided the data separately but rt2x00lib * should decide if it should be reinserted. */ rxdesc->flags |= RX_FLAG_IV_STRIPPED; /* * The hardware has already checked the Michael Mic and has * stripped it from the frame. Signal this to mac80211. */ rxdesc->flags |= RX_FLAG_MMIC_STRIPPED; if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS) rxdesc->flags |= RX_FLAG_DECRYPTED; else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC) rxdesc->flags |= RX_FLAG_MMIC_ERROR; } /* * Obtain the status about this packet. * When frame was received with an OFDM bitrate, * the signal is the PLCP value. If it was received with * a CCK bitrate the signal is the rate in 100kbit/s. */ rxdesc->signal = rt2x00_get_field32(word1, RXD_W1_SIGNAL); rxdesc->rssi = rt73usb_agc_to_rssi(rt2x00dev, word1); rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT); if (rt2x00_get_field32(word0, RXD_W0_OFDM)) rxdesc->dev_flags |= RXDONE_SIGNAL_PLCP; else rxdesc->dev_flags |= RXDONE_SIGNAL_BITRATE; if (rt2x00_get_field32(word0, RXD_W0_MY_BSS)) rxdesc->dev_flags |= RXDONE_MY_BSS; /* * Set skb pointers, and update frame information. */ skb_pull(entry->skb, entry->queue->desc_size); skb_trim(entry->skb, rxdesc->size); } /* * Device probe functions. */ static int rt73usb_validate_eeprom(struct rt2x00_dev *rt2x00dev) { u16 word; u8 *mac; s8 value; rt2x00usb_eeprom_read(rt2x00dev, rt2x00dev->eeprom, EEPROM_SIZE); /* * Start validation of the data that has been read. */ mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0); rt2x00lib_set_mac_address(rt2x00dev, mac); word = rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_ANTENNA_NUM, 2); rt2x00_set_field16(&word, EEPROM_ANTENNA_TX_DEFAULT, ANTENNA_B); rt2x00_set_field16(&word, EEPROM_ANTENNA_RX_DEFAULT, ANTENNA_B); rt2x00_set_field16(&word, EEPROM_ANTENNA_FRAME_TYPE, 0); rt2x00_set_field16(&word, EEPROM_ANTENNA_DYN_TXAGC, 0); rt2x00_set_field16(&word, EEPROM_ANTENNA_HARDWARE_RADIO, 0); rt2x00_set_field16(&word, EEPROM_ANTENNA_RF_TYPE, RF5226); rt2x00_eeprom_write(rt2x00dev, EEPROM_ANTENNA, word); rt2x00_eeprom_dbg(rt2x00dev, "Antenna: 0x%04x\n", word); } word = rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_NIC_EXTERNAL_LNA, 0); rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC, word); rt2x00_eeprom_dbg(rt2x00dev, "NIC: 0x%04x\n", word); } word = rt2x00_eeprom_read(rt2x00dev, EEPROM_LED); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_LED_POLARITY_RDY_G, 0); rt2x00_set_field16(&word, EEPROM_LED_POLARITY_RDY_A, 0); rt2x00_set_field16(&word, EEPROM_LED_POLARITY_ACT, 0); rt2x00_set_field16(&word, EEPROM_LED_POLARITY_GPIO_0, 0); rt2x00_set_field16(&word, EEPROM_LED_POLARITY_GPIO_1, 0); rt2x00_set_field16(&word, EEPROM_LED_POLARITY_GPIO_2, 0); rt2x00_set_field16(&word, EEPROM_LED_POLARITY_GPIO_3, 0); rt2x00_set_field16(&word, EEPROM_LED_POLARITY_GPIO_4, 0); rt2x00_set_field16(&word, EEPROM_LED_LED_MODE, LED_MODE_DEFAULT); rt2x00_eeprom_write(rt2x00dev, EEPROM_LED, word); rt2x00_eeprom_dbg(rt2x00dev, "Led: 0x%04x\n", word); } word = rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_FREQ_OFFSET, 0); rt2x00_set_field16(&word, EEPROM_FREQ_SEQ, 0); rt2x00_eeprom_write(rt2x00dev, EEPROM_FREQ, word); rt2x00_eeprom_dbg(rt2x00dev, "Freq: 0x%04x\n", word); } word = rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_BG); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_BG_1, 0); rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_BG_2, 0); rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_OFFSET_BG, word); rt2x00_eeprom_dbg(rt2x00dev, "RSSI OFFSET BG: 0x%04x\n", word); } else { value = rt2x00_get_field16(word, EEPROM_RSSI_OFFSET_BG_1); if (value < -10 || value > 10) rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_BG_1, 0); value = rt2x00_get_field16(word, EEPROM_RSSI_OFFSET_BG_2); if (value < -10 || value > 10) rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_BG_2, 0); rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_OFFSET_BG, word); } word = rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_A); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_A_1, 0); rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_A_2, 0); rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_OFFSET_A, word); rt2x00_eeprom_dbg(rt2x00dev, "RSSI OFFSET A: 0x%04x\n", word); } else { value = rt2x00_get_field16(word, EEPROM_RSSI_OFFSET_A_1); if (value < -10 || value > 10) rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_A_1, 0); value = rt2x00_get_field16(word, EEPROM_RSSI_OFFSET_A_2); if (value < -10 || value > 10) rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_A_2, 0); rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_OFFSET_A, word); } return 0; } static int rt73usb_init_eeprom(struct rt2x00_dev *rt2x00dev) { u32 reg; u16 value; u16 eeprom; /* * Read EEPROM word for configuration. */ eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA); /* * Identify RF chipset. */ value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE); reg = rt2x00usb_register_read(rt2x00dev, MAC_CSR0); rt2x00_set_chip(rt2x00dev, rt2x00_get_field32(reg, MAC_CSR0_CHIPSET), value, rt2x00_get_field32(reg, MAC_CSR0_REVISION)); if (!rt2x00_rt(rt2x00dev, RT2573) || (rt2x00_rev(rt2x00dev) == 0)) { rt2x00_err(rt2x00dev, "Invalid RT chipset detected\n"); return -ENODEV; } if (!rt2x00_rf(rt2x00dev, RF5226) && !rt2x00_rf(rt2x00dev, RF2528) && !rt2x00_rf(rt2x00dev, RF5225) && !rt2x00_rf(rt2x00dev, RF2527)) { rt2x00_err(rt2x00dev, "Invalid RF chipset detected\n"); return -ENODEV; } /* * Identify default antenna configuration. */ rt2x00dev->default_ant.tx = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TX_DEFAULT); rt2x00dev->default_ant.rx = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RX_DEFAULT); /* * Read the Frame type. */ if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_FRAME_TYPE)) __set_bit(CAPABILITY_FRAME_TYPE, &rt2x00dev->cap_flags); /* * Detect if this device has an hardware controlled radio. */ if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO)) __set_bit(CAPABILITY_HW_BUTTON, &rt2x00dev->cap_flags); /* * Read frequency offset. */ eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ); rt2x00dev->freq_offset = rt2x00_get_field16(eeprom, EEPROM_FREQ_OFFSET); /* * Read external LNA informations. */ eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC); if (rt2x00_get_field16(eeprom, EEPROM_NIC_EXTERNAL_LNA)) { __set_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags); __set_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags); } /* * Store led settings, for correct led behaviour. */ #ifdef CONFIG_RT2X00_LIB_LEDS eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_LED); rt73usb_init_led(rt2x00dev, &rt2x00dev->led_radio, LED_TYPE_RADIO); rt73usb_init_led(rt2x00dev, &rt2x00dev->led_assoc, LED_TYPE_ASSOC); if (value == LED_MODE_SIGNAL_STRENGTH) rt73usb_init_led(rt2x00dev, &rt2x00dev->led_qual, LED_TYPE_QUALITY); rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_LED_MODE, value); rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_GPIO_0, rt2x00_get_field16(eeprom, EEPROM_LED_POLARITY_GPIO_0)); rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_GPIO_1, rt2x00_get_field16(eeprom, EEPROM_LED_POLARITY_GPIO_1)); rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_GPIO_2, rt2x00_get_field16(eeprom, EEPROM_LED_POLARITY_GPIO_2)); rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_GPIO_3, rt2x00_get_field16(eeprom, EEPROM_LED_POLARITY_GPIO_3)); rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_GPIO_4, rt2x00_get_field16(eeprom, EEPROM_LED_POLARITY_GPIO_4)); rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_ACT, rt2x00_get_field16(eeprom, EEPROM_LED_POLARITY_ACT)); rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_READY_BG, rt2x00_get_field16(eeprom, EEPROM_LED_POLARITY_RDY_G)); rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_READY_A, rt2x00_get_field16(eeprom, EEPROM_LED_POLARITY_RDY_A)); #endif /* CONFIG_RT2X00_LIB_LEDS */ return 0; } /* * RF value list for RF2528 * Supports: 2.4 GHz */ static const struct rf_channel rf_vals_bg_2528[] = { { 1, 0x00002c0c, 0x00000786, 0x00068255, 0x000fea0b }, { 2, 0x00002c0c, 0x00000786, 0x00068255, 0x000fea1f }, { 3, 0x00002c0c, 0x0000078a, 0x00068255, 0x000fea0b }, { 4, 0x00002c0c, 0x0000078a, 0x00068255, 0x000fea1f }, { 5, 0x00002c0c, 0x0000078e, 0x00068255, 0x000fea0b }, { 6, 0x00002c0c, 0x0000078e, 0x00068255, 0x000fea1f }, { 7, 0x00002c0c, 0x00000792, 0x00068255, 0x000fea0b }, { 8, 0x00002c0c, 0x00000792, 0x00068255, 0x000fea1f }, { 9, 0x00002c0c, 0x00000796, 0x00068255, 0x000fea0b }, { 10, 0x00002c0c, 0x00000796, 0x00068255, 0x000fea1f }, { 11, 0x00002c0c, 0x0000079a, 0x00068255, 0x000fea0b }, { 12, 0x00002c0c, 0x0000079a, 0x00068255, 0x000fea1f }, { 13, 0x00002c0c, 0x0000079e, 0x00068255, 0x000fea0b }, { 14, 0x00002c0c, 0x000007a2, 0x00068255, 0x000fea13 }, }; /* * RF value list for RF5226 * Supports: 2.4 GHz & 5.2 GHz */ static const struct rf_channel rf_vals_5226[] = { { 1, 0x00002c0c, 0x00000786, 0x00068255, 0x000fea0b }, { 2, 0x00002c0c, 0x00000786, 0x00068255, 0x000fea1f }, { 3, 0x00002c0c, 0x0000078a, 0x00068255, 0x000fea0b }, { 4, 0x00002c0c, 0x0000078a, 0x00068255, 0x000fea1f }, { 5, 0x00002c0c, 0x0000078e, 0x00068255, 0x000fea0b }, { 6, 0x00002c0c, 0x0000078e, 0x00068255, 0x000fea1f }, { 7, 0x00002c0c, 0x00000792, 0x00068255, 0x000fea0b }, { 8, 0x00002c0c, 0x00000792, 0x00068255, 0x000fea1f }, { 9, 0x00002c0c, 0x00000796, 0x00068255, 0x000fea0b }, { 10, 0x00002c0c, 0x00000796, 0x00068255, 0x000fea1f }, { 11, 0x00002c0c, 0x0000079a, 0x00068255, 0x000fea0b }, { 12, 0x00002c0c, 0x0000079a, 0x00068255, 0x000fea1f }, { 13, 0x00002c0c, 0x0000079e, 0x00068255, 0x000fea0b }, { 14, 0x00002c0c, 0x000007a2, 0x00068255, 0x000fea13 }, /* 802.11 UNI / HyperLan 2 */ { 36, 0x00002c0c, 0x0000099a, 0x00098255, 0x000fea23 }, { 40, 0x00002c0c, 0x000009a2, 0x00098255, 0x000fea03 }, { 44, 0x00002c0c, 0x000009a6, 0x00098255, 0x000fea0b }, { 48, 0x00002c0c, 0x000009aa, 0x00098255, 0x000fea13 }, { 52, 0x00002c0c, 0x000009ae, 0x00098255, 0x000fea1b }, { 56, 0x00002c0c, 0x000009b2, 0x00098255, 0x000fea23 }, { 60, 0x00002c0c, 0x000009ba, 0x00098255, 0x000fea03 }, { 64, 0x00002c0c, 0x000009be, 0x00098255, 0x000fea0b }, /* 802.11 HyperLan 2 */ { 100, 0x00002c0c, 0x00000a2a, 0x000b8255, 0x000fea03 }, { 104, 0x00002c0c, 0x00000a2e, 0x000b8255, 0x000fea0b }, { 108, 0x00002c0c, 0x00000a32, 0x000b8255, 0x000fea13 }, { 112, 0x00002c0c, 0x00000a36, 0x000b8255, 0x000fea1b }, { 116, 0x00002c0c, 0x00000a3a, 0x000b8255, 0x000fea23 }, { 120, 0x00002c0c, 0x00000a82, 0x000b8255, 0x000fea03 }, { 124, 0x00002c0c, 0x00000a86, 0x000b8255, 0x000fea0b }, { 128, 0x00002c0c, 0x00000a8a, 0x000b8255, 0x000fea13 }, { 132, 0x00002c0c, 0x00000a8e, 0x000b8255, 0x000fea1b }, { 136, 0x00002c0c, 0x00000a92, 0x000b8255, 0x000fea23 }, /* 802.11 UNII */ { 140, 0x00002c0c, 0x00000a9a, 0x000b8255, 0x000fea03 }, { 149, 0x00002c0c, 0x00000aa2, 0x000b8255, 0x000fea1f }, { 153, 0x00002c0c, 0x00000aa6, 0x000b8255, 0x000fea27 }, { 157, 0x00002c0c, 0x00000aae, 0x000b8255, 0x000fea07 }, { 161, 0x00002c0c, 0x00000ab2, 0x000b8255, 0x000fea0f }, { 165, 0x00002c0c, 0x00000ab6, 0x000b8255, 0x000fea17 }, /* MMAC(Japan)J52 ch 34,38,42,46 */ { 34, 0x00002c0c, 0x0008099a, 0x000da255, 0x000d3a0b }, { 38, 0x00002c0c, 0x0008099e, 0x000da255, 0x000d3a13 }, { 42, 0x00002c0c, 0x000809a2, 0x000da255, 0x000d3a1b }, { 46, 0x00002c0c, 0x000809a6, 0x000da255, 0x000d3a23 }, }; /* * RF value list for RF5225 & RF2527 * Supports: 2.4 GHz & 5.2 GHz */ static const struct rf_channel rf_vals_5225_2527[] = { { 1, 0x00002ccc, 0x00004786, 0x00068455, 0x000ffa0b }, { 2, 0x00002ccc, 0x00004786, 0x00068455, 0x000ffa1f }, { 3, 0x00002ccc, 0x0000478a, 0x00068455, 0x000ffa0b }, { 4, 0x00002ccc, 0x0000478a, 0x00068455, 0x000ffa1f }, { 5, 0x00002ccc, 0x0000478e, 0x00068455, 0x000ffa0b }, { 6, 0x00002ccc, 0x0000478e, 0x00068455, 0x000ffa1f }, { 7, 0x00002ccc, 0x00004792, 0x00068455, 0x000ffa0b }, { 8, 0x00002ccc, 0x00004792, 0x00068455, 0x000ffa1f }, { 9, 0x00002ccc, 0x00004796, 0x00068455, 0x000ffa0b }, { 10, 0x00002ccc, 0x00004796, 0x00068455, 0x000ffa1f }, { 11, 0x00002ccc, 0x0000479a, 0x00068455, 0x000ffa0b }, { 12, 0x00002ccc, 0x0000479a, 0x00068455, 0x000ffa1f }, { 13, 0x00002ccc, 0x0000479e, 0x00068455, 0x000ffa0b }, { 14, 0x00002ccc, 0x000047a2, 0x00068455, 0x000ffa13 }, /* 802.11 UNI / HyperLan 2 */ { 36, 0x00002ccc, 0x0000499a, 0x0009be55, 0x000ffa23 }, { 40, 0x00002ccc, 0x000049a2, 0x0009be55, 0x000ffa03 }, { 44, 0x00002ccc, 0x000049a6, 0x0009be55, 0x000ffa0b }, { 48, 0x00002ccc, 0x000049aa, 0x0009be55, 0x000ffa13 }, { 52, 0x00002ccc, 0x000049ae, 0x0009ae55, 0x000ffa1b }, { 56, 0x00002ccc, 0x000049b2, 0x0009ae55, 0x000ffa23 }, { 60, 0x00002ccc, 0x000049ba, 0x0009ae55, 0x000ffa03 }, { 64, 0x00002ccc, 0x000049be, 0x0009ae55, 0x000ffa0b }, /* 802.11 HyperLan 2 */ { 100, 0x00002ccc, 0x00004a2a, 0x000bae55, 0x000ffa03 }, { 104, 0x00002ccc, 0x00004a2e, 0x000bae55, 0x000ffa0b }, { 108, 0x00002ccc, 0x00004a32, 0x000bae55, 0x000ffa13 }, { 112, 0x00002ccc, 0x00004a36, 0x000bae55, 0x000ffa1b }, { 116, 0x00002ccc, 0x00004a3a, 0x000bbe55, 0x000ffa23 }, { 120, 0x00002ccc, 0x00004a82, 0x000bbe55, 0x000ffa03 }, { 124, 0x00002ccc, 0x00004a86, 0x000bbe55, 0x000ffa0b }, { 128, 0x00002ccc, 0x00004a8a, 0x000bbe55, 0x000ffa13 }, { 132, 0x00002ccc, 0x00004a8e, 0x000bbe55, 0x000ffa1b }, { 136, 0x00002ccc, 0x00004a92, 0x000bbe55, 0x000ffa23 }, /* 802.11 UNII */ { 140, 0x00002ccc, 0x00004a9a, 0x000bbe55, 0x000ffa03 }, { 149, 0x00002ccc, 0x00004aa2, 0x000bbe55, 0x000ffa1f }, { 153, 0x00002ccc, 0x00004aa6, 0x000bbe55, 0x000ffa27 }, { 157, 0x00002ccc, 0x00004aae, 0x000bbe55, 0x000ffa07 }, { 161, 0x00002ccc, 0x00004ab2, 0x000bbe55, 0x000ffa0f }, { 165, 0x00002ccc, 0x00004ab6, 0x000bbe55, 0x000ffa17 }, /* MMAC(Japan)J52 ch 34,38,42,46 */ { 34, 0x00002ccc, 0x0000499a, 0x0009be55, 0x000ffa0b }, { 38, 0x00002ccc, 0x0000499e, 0x0009be55, 0x000ffa13 }, { 42, 0x00002ccc, 0x000049a2, 0x0009be55, 0x000ffa1b }, { 46, 0x00002ccc, 0x000049a6, 0x0009be55, 0x000ffa23 }, }; static int rt73usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev) { struct hw_mode_spec *spec = &rt2x00dev->spec; struct channel_info *info; u8 *tx_power; unsigned int i; /* * Initialize all hw fields. * * Don't set IEEE80211_HOST_BROADCAST_PS_BUFFERING unless we are * capable of sending the buffered frames out after the DTIM * transmission using rt2x00lib_beacondone. This will send out * multicast and broadcast traffic immediately instead of buffering it * infinitly and thus dropping it after some time. */ ieee80211_hw_set(rt2x00dev->hw, PS_NULLFUNC_STACK); ieee80211_hw_set(rt2x00dev->hw, SIGNAL_DBM); ieee80211_hw_set(rt2x00dev->hw, SUPPORTS_PS); SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev); SET_IEEE80211_PERM_ADDR(rt2x00dev->hw, rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0)); /* * Initialize hw_mode information. */ spec->supported_bands = SUPPORT_BAND_2GHZ; spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM; if (rt2x00_rf(rt2x00dev, RF2528)) { spec->num_channels = ARRAY_SIZE(rf_vals_bg_2528); spec->channels = rf_vals_bg_2528; } else if (rt2x00_rf(rt2x00dev, RF5226)) { spec->supported_bands |= SUPPORT_BAND_5GHZ; spec->num_channels = ARRAY_SIZE(rf_vals_5226); spec->channels = rf_vals_5226; } else if (rt2x00_rf(rt2x00dev, RF2527)) { spec->num_channels = 14; spec->channels = rf_vals_5225_2527; } else if (rt2x00_rf(rt2x00dev, RF5225)) { spec->supported_bands |= SUPPORT_BAND_5GHZ; spec->num_channels = ARRAY_SIZE(rf_vals_5225_2527); spec->channels = rf_vals_5225_2527; } /* * Create channel information array */ info = kcalloc(spec->num_channels, sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; spec->channels_info = info; tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_G_START); for (i = 0; i < 14; i++) { info[i].max_power = MAX_TXPOWER; info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i]); } if (spec->num_channels > 14) { tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A_START); for (i = 14; i < spec->num_channels; i++) { info[i].max_power = MAX_TXPOWER; info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i - 14]); } } return 0; } static int rt73usb_probe_hw(struct rt2x00_dev *rt2x00dev) { int retval; u32 reg; /* * Allocate eeprom data. */ retval = rt73usb_validate_eeprom(rt2x00dev); if (retval) return retval; retval = rt73usb_init_eeprom(rt2x00dev); if (retval) return retval; /* * Enable rfkill polling by setting GPIO direction of the * rfkill switch GPIO pin correctly. */ reg = rt2x00usb_register_read(rt2x00dev, MAC_CSR13); rt2x00_set_field32(&reg, MAC_CSR13_DIR7, 0); rt2x00usb_register_write(rt2x00dev, MAC_CSR13, reg); /* * Initialize hw specifications. */ retval = rt73usb_probe_hw_mode(rt2x00dev); if (retval) return retval; /* * This device has multiple filters for control frames, * but has no a separate filter for PS Poll frames. */ __set_bit(CAPABILITY_CONTROL_FILTERS, &rt2x00dev->cap_flags); /* * This device requires firmware. */ __set_bit(REQUIRE_FIRMWARE, &rt2x00dev->cap_flags); if (!modparam_nohwcrypt) __set_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags); __set_bit(CAPABILITY_LINK_TUNING, &rt2x00dev->cap_flags); __set_bit(REQUIRE_PS_AUTOWAKE, &rt2x00dev->cap_flags); /* * Set the rssi offset. */ rt2x00dev->rssi_offset = DEFAULT_RSSI_OFFSET; return 0; } /* * IEEE80211 stack callback functions. */ static int rt73usb_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, unsigned int link_id, u16 queue_idx, const struct ieee80211_tx_queue_params *params) { struct rt2x00_dev *rt2x00dev = hw->priv; struct data_queue *queue; struct rt2x00_field32 field; int retval; u32 reg; u32 offset; /* * First pass the configuration through rt2x00lib, that will * update the queue settings and validate the input. After that * we are free to update the registers based on the value * in the queue parameter. */ retval = rt2x00mac_conf_tx(hw, vif, link_id, queue_idx, params); if (retval) return retval; /* * We only need to perform additional register initialization * for WMM queues/ */ if (queue_idx >= 4) return 0; queue = rt2x00queue_get_tx_queue(rt2x00dev, queue_idx); /* Update WMM TXOP register */ offset = AC_TXOP_CSR0 + (sizeof(u32) * (!!(queue_idx & 2))); field.bit_offset = (queue_idx & 1) * 16; field.bit_mask = 0xffff << field.bit_offset; reg = rt2x00usb_register_read(rt2x00dev, offset); rt2x00_set_field32(&reg, field, queue->txop); rt2x00usb_register_write(rt2x00dev, offset, reg); /* Update WMM registers */ field.bit_offset = queue_idx * 4; field.bit_mask = 0xf << field.bit_offset; reg = rt2x00usb_register_read(rt2x00dev, AIFSN_CSR); rt2x00_set_field32(&reg, field, queue->aifs); rt2x00usb_register_write(rt2x00dev, AIFSN_CSR, reg); reg = rt2x00usb_register_read(rt2x00dev, CWMIN_CSR); rt2x00_set_field32(&reg, field, queue->cw_min); rt2x00usb_register_write(rt2x00dev, CWMIN_CSR, reg); reg = rt2x00usb_register_read(rt2x00dev, CWMAX_CSR); rt2x00_set_field32(&reg, field, queue->cw_max); rt2x00usb_register_write(rt2x00dev, CWMAX_CSR, reg); return 0; } static u64 rt73usb_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct rt2x00_dev *rt2x00dev = hw->priv; u64 tsf; u32 reg; reg = rt2x00usb_register_read(rt2x00dev, TXRX_CSR13); tsf = (u64) rt2x00_get_field32(reg, TXRX_CSR13_HIGH_TSFTIMER) << 32; reg = rt2x00usb_register_read(rt2x00dev, TXRX_CSR12); tsf |= rt2x00_get_field32(reg, TXRX_CSR12_LOW_TSFTIMER); return tsf; } static const struct ieee80211_ops rt73usb_mac80211_ops = { .tx = rt2x00mac_tx, .wake_tx_queue = ieee80211_handle_wake_tx_queue, .start = rt2x00mac_start, .stop = rt2x00mac_stop, .add_interface = rt2x00mac_add_interface, .remove_interface = rt2x00mac_remove_interface, .config = rt2x00mac_config, .configure_filter = rt2x00mac_configure_filter, .set_tim = rt2x00mac_set_tim, .set_key = rt2x00mac_set_key, .sw_scan_start = rt2x00mac_sw_scan_start, .sw_scan_complete = rt2x00mac_sw_scan_complete, .get_stats = rt2x00mac_get_stats, .bss_info_changed = rt2x00mac_bss_info_changed, .conf_tx = rt73usb_conf_tx, .get_tsf = rt73usb_get_tsf, .rfkill_poll = rt2x00mac_rfkill_poll, .flush = rt2x00mac_flush, .set_antenna = rt2x00mac_set_antenna, .get_antenna = rt2x00mac_get_antenna, .get_ringparam = rt2x00mac_get_ringparam, .tx_frames_pending = rt2x00mac_tx_frames_pending, }; static const struct rt2x00lib_ops rt73usb_rt2x00_ops = { .probe_hw = rt73usb_probe_hw, .get_firmware_name = rt73usb_get_firmware_name, .check_firmware = rt73usb_check_firmware, .load_firmware = rt73usb_load_firmware, .initialize = rt2x00usb_initialize, .uninitialize = rt2x00usb_uninitialize, .clear_entry = rt2x00usb_clear_entry, .set_device_state = rt73usb_set_device_state, .rfkill_poll = rt73usb_rfkill_poll, .link_stats = rt73usb_link_stats, .reset_tuner = rt73usb_reset_tuner, .link_tuner = rt73usb_link_tuner, .watchdog = rt2x00usb_watchdog, .start_queue = rt73usb_start_queue, .kick_queue = rt2x00usb_kick_queue, .stop_queue = rt73usb_stop_queue, .flush_queue = rt2x00usb_flush_queue, .write_tx_desc = rt73usb_write_tx_desc, .write_beacon = rt73usb_write_beacon, .clear_beacon = rt73usb_clear_beacon, .get_tx_data_len = rt73usb_get_tx_data_len, .fill_rxdone = rt73usb_fill_rxdone, .config_shared_key = rt73usb_config_shared_key, .config_pairwise_key = rt73usb_config_pairwise_key, .config_filter = rt73usb_config_filter, .config_intf = rt73usb_config_intf, .config_erp = rt73usb_config_erp, .config_ant = rt73usb_config_ant, .config = rt73usb_config, }; static void rt73usb_queue_init(struct data_queue *queue) { switch (queue->qid) { case QID_RX: queue->limit = 32; queue->data_size = DATA_FRAME_SIZE; queue->desc_size = RXD_DESC_SIZE; queue->priv_size = sizeof(struct queue_entry_priv_usb); break; case QID_AC_VO: case QID_AC_VI: case QID_AC_BE: case QID_AC_BK: queue->limit = 32; queue->data_size = DATA_FRAME_SIZE; queue->desc_size = TXD_DESC_SIZE; queue->priv_size = sizeof(struct queue_entry_priv_usb); break; case QID_BEACON: queue->limit = 4; queue->data_size = MGMT_FRAME_SIZE; queue->desc_size = TXINFO_SIZE; queue->priv_size = sizeof(struct queue_entry_priv_usb); break; case QID_ATIM: default: BUG(); break; } } static const struct rt2x00_ops rt73usb_ops = { .name = KBUILD_MODNAME, .max_ap_intf = 4, .eeprom_size = EEPROM_SIZE, .rf_size = RF_SIZE, .tx_queues = NUM_TX_QUEUES, .queue_init = rt73usb_queue_init, .lib = &rt73usb_rt2x00_ops, .hw = &rt73usb_mac80211_ops, #ifdef CONFIG_RT2X00_LIB_DEBUGFS .debugfs = &rt73usb_rt2x00debug, #endif /* CONFIG_RT2X00_LIB_DEBUGFS */ }; /* * rt73usb module information. */ static const struct usb_device_id rt73usb_device_table[] = { /* AboCom */ { USB_DEVICE(0x07b8, 0xb21b) }, { USB_DEVICE(0x07b8, 0xb21c) }, { USB_DEVICE(0x07b8, 0xb21d) }, { USB_DEVICE(0x07b8, 0xb21e) }, { USB_DEVICE(0x07b8, 0xb21f) }, /* AL */ { USB_DEVICE(0x14b2, 0x3c10) }, /* Amigo */ { USB_DEVICE(0x148f, 0x9021) }, { USB_DEVICE(0x0eb0, 0x9021) }, /* AMIT */ { USB_DEVICE(0x18c5, 0x0002) }, /* Askey */ { USB_DEVICE(0x1690, 0x0722) }, /* ASUS */ { USB_DEVICE(0x0b05, 0x1723) }, { USB_DEVICE(0x0b05, 0x1724) }, /* Belkin */ { USB_DEVICE(0x050d, 0x7050) }, /* FCC ID: K7SF5D7050B ver. 3.x */ { USB_DEVICE(0x050d, 0x705a) }, { USB_DEVICE(0x050d, 0x905b) }, { USB_DEVICE(0x050d, 0x905c) }, /* Billionton */ { USB_DEVICE(0x1631, 0xc019) }, { USB_DEVICE(0x08dd, 0x0120) }, /* Buffalo */ { USB_DEVICE(0x0411, 0x00d8) }, { USB_DEVICE(0x0411, 0x00d9) }, { USB_DEVICE(0x0411, 0x00e6) }, { USB_DEVICE(0x0411, 0x00f4) }, { USB_DEVICE(0x0411, 0x0116) }, { USB_DEVICE(0x0411, 0x0119) }, { USB_DEVICE(0x0411, 0x0137) }, /* CEIVA */ { USB_DEVICE(0x178d, 0x02be) }, /* CNet */ { USB_DEVICE(0x1371, 0x9022) }, { USB_DEVICE(0x1371, 0x9032) }, /* Conceptronic */ { USB_DEVICE(0x14b2, 0x3c22) }, /* Corega */ { USB_DEVICE(0x07aa, 0x002e) }, /* D-Link */ { USB_DEVICE(0x07d1, 0x3c03) }, { USB_DEVICE(0x07d1, 0x3c04) }, { USB_DEVICE(0x07d1, 0x3c06) }, { USB_DEVICE(0x07d1, 0x3c07) }, /* Edimax */ { USB_DEVICE(0x7392, 0x7318) }, { USB_DEVICE(0x7392, 0x7618) }, /* EnGenius */ { USB_DEVICE(0x1740, 0x3701) }, /* Gemtek */ { USB_DEVICE(0x15a9, 0x0004) }, /* Gigabyte */ { USB_DEVICE(0x1044, 0x8008) }, { USB_DEVICE(0x1044, 0x800a) }, /* Huawei-3Com */ { USB_DEVICE(0x1472, 0x0009) }, /* Hercules */ { USB_DEVICE(0x06f8, 0xe002) }, { USB_DEVICE(0x06f8, 0xe010) }, { USB_DEVICE(0x06f8, 0xe020) }, /* Linksys */ { USB_DEVICE(0x13b1, 0x0020) }, { USB_DEVICE(0x13b1, 0x0023) }, { USB_DEVICE(0x13b1, 0x0028) }, /* MSI */ { USB_DEVICE(0x0db0, 0x4600) }, { USB_DEVICE(0x0db0, 0x6877) }, { USB_DEVICE(0x0db0, 0x6874) }, { USB_DEVICE(0x0db0, 0xa861) }, { USB_DEVICE(0x0db0, 0xa874) }, /* Ovislink */ { USB_DEVICE(0x1b75, 0x7318) }, /* Ralink */ { USB_DEVICE(0x04bb, 0x093d) }, { USB_DEVICE(0x148f, 0x2573) }, { USB_DEVICE(0x148f, 0x2671) }, { USB_DEVICE(0x0812, 0x3101) }, /* Qcom */ { USB_DEVICE(0x18e8, 0x6196) }, { USB_DEVICE(0x18e8, 0x6229) }, { USB_DEVICE(0x18e8, 0x6238) }, /* Samsung */ { USB_DEVICE(0x04e8, 0x4471) }, /* Senao */ { USB_DEVICE(0x1740, 0x7100) }, /* Sitecom */ { USB_DEVICE(0x0df6, 0x0024) }, { USB_DEVICE(0x0df6, 0x0027) }, { USB_DEVICE(0x0df6, 0x002f) }, { USB_DEVICE(0x0df6, 0x90ac) }, { USB_DEVICE(0x0df6, 0x9712) }, /* Surecom */ { USB_DEVICE(0x0769, 0x31f3) }, /* Tilgin */ { USB_DEVICE(0x6933, 0x5001) }, /* Philips */ { USB_DEVICE(0x0471, 0x200a) }, /* Planex */ { USB_DEVICE(0x2019, 0xab01) }, { USB_DEVICE(0x2019, 0xab50) }, /* WideTell */ { USB_DEVICE(0x7167, 0x3840) }, /* Zcom */ { USB_DEVICE(0x0cde, 0x001c) }, /* ZyXEL */ { USB_DEVICE(0x0586, 0x3415) }, { 0, } }; MODULE_AUTHOR(DRV_PROJECT); MODULE_VERSION(DRV_VERSION); MODULE_DESCRIPTION("Ralink RT73 USB Wireless LAN driver."); MODULE_DEVICE_TABLE(usb, rt73usb_device_table); MODULE_FIRMWARE(FIRMWARE_RT2571); MODULE_LICENSE("GPL"); static int rt73usb_probe(struct usb_interface *usb_intf, const struct usb_device_id *id) { return rt2x00usb_probe(usb_intf, &rt73usb_ops); } static struct usb_driver rt73usb_driver = { .name = KBUILD_MODNAME, .id_table = rt73usb_device_table, .probe = rt73usb_probe, .disconnect = rt2x00usb_disconnect, .suspend = rt2x00usb_suspend, .resume = rt2x00usb_resume, .reset_resume = rt2x00usb_resume, .disable_hub_initiated_lpm = 1, }; module_usb_driver(rt73usb_driver);
linux-master
drivers/net/wireless/ralink/rt2x00/rt73usb.c
// SPDX-License-Identifier: GPL-2.0-or-later /* Copyright (C) 2004 - 2009 Ivo van Doorn <[email protected]> <http://rt2x00.serialmonkey.com> */ /* Module: rt2x00lib Abstract: rt2x00 generic configuration routines. */ #include <linux/kernel.h> #include <linux/module.h> #include "rt2x00.h" #include "rt2x00lib.h" void rt2x00lib_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00_intf *intf, enum nl80211_iftype type, const u8 *mac, const u8 *bssid) { struct rt2x00intf_conf conf; unsigned int flags = 0; conf.type = type; switch (type) { case NL80211_IFTYPE_ADHOC: conf.sync = TSF_SYNC_ADHOC; break; case NL80211_IFTYPE_AP: case NL80211_IFTYPE_MESH_POINT: conf.sync = TSF_SYNC_AP_NONE; break; case NL80211_IFTYPE_STATION: conf.sync = TSF_SYNC_INFRA; break; default: conf.sync = TSF_SYNC_NONE; break; } /* * Note that when NULL is passed as address we will send * 00:00:00:00:00 to the device to clear the address. * This will prevent the device being confused when it wants * to ACK frames or considers itself associated. */ memset(conf.mac, 0, sizeof(conf.mac)); if (mac) memcpy(conf.mac, mac, ETH_ALEN); memset(conf.bssid, 0, sizeof(conf.bssid)); if (bssid) memcpy(conf.bssid, bssid, ETH_ALEN); flags |= CONFIG_UPDATE_TYPE; if (mac || (!rt2x00dev->intf_ap_count && !rt2x00dev->intf_sta_count)) flags |= CONFIG_UPDATE_MAC; if (bssid || (!rt2x00dev->intf_ap_count && !rt2x00dev->intf_sta_count)) flags |= CONFIG_UPDATE_BSSID; rt2x00dev->ops->lib->config_intf(rt2x00dev, intf, &conf, flags); } void rt2x00lib_config_erp(struct rt2x00_dev *rt2x00dev, struct rt2x00_intf *intf, struct ieee80211_bss_conf *bss_conf, u32 changed) { struct ieee80211_vif *vif = container_of(bss_conf, struct ieee80211_vif, bss_conf); struct rt2x00lib_erp erp; memset(&erp, 0, sizeof(erp)); erp.short_preamble = bss_conf->use_short_preamble; erp.cts_protection = bss_conf->use_cts_prot; erp.slot_time = bss_conf->use_short_slot ? SHORT_SLOT_TIME : SLOT_TIME; erp.sifs = SIFS; erp.pifs = bss_conf->use_short_slot ? SHORT_PIFS : PIFS; erp.difs = bss_conf->use_short_slot ? SHORT_DIFS : DIFS; erp.eifs = bss_conf->use_short_slot ? SHORT_EIFS : EIFS; erp.basic_rates = bss_conf->basic_rates; erp.beacon_int = bss_conf->beacon_int; /* Update the AID, this is needed for dynamic PS support */ rt2x00dev->aid = vif->cfg.assoc ? vif->cfg.aid : 0; rt2x00dev->last_beacon = bss_conf->sync_tsf; /* Update global beacon interval time, this is needed for PS support */ rt2x00dev->beacon_int = bss_conf->beacon_int; if (changed & BSS_CHANGED_HT) erp.ht_opmode = bss_conf->ht_operation_mode; rt2x00dev->ops->lib->config_erp(rt2x00dev, &erp, changed); } void rt2x00lib_config_antenna(struct rt2x00_dev *rt2x00dev, struct antenna_setup config) { struct link_ant *ant = &rt2x00dev->link.ant; struct antenna_setup *def = &rt2x00dev->default_ant; struct antenna_setup *active = &rt2x00dev->link.ant.active; /* * When the caller tries to send the SW diversity, * we must update the ANTENNA_RX_DIVERSITY flag to * enable the antenna diversity in the link tuner. * * Secondly, we must guarentee we never send the * software antenna diversity command to the driver. */ if (!(ant->flags & ANTENNA_RX_DIVERSITY)) { if (config.rx == ANTENNA_SW_DIVERSITY) { ant->flags |= ANTENNA_RX_DIVERSITY; if (def->rx == ANTENNA_SW_DIVERSITY) config.rx = ANTENNA_B; else config.rx = def->rx; } } else if (config.rx == ANTENNA_SW_DIVERSITY) config.rx = active->rx; if (!(ant->flags & ANTENNA_TX_DIVERSITY)) { if (config.tx == ANTENNA_SW_DIVERSITY) { ant->flags |= ANTENNA_TX_DIVERSITY; if (def->tx == ANTENNA_SW_DIVERSITY) config.tx = ANTENNA_B; else config.tx = def->tx; } } else if (config.tx == ANTENNA_SW_DIVERSITY) config.tx = active->tx; /* * Antenna setup changes require the RX to be disabled, * else the changes will be ignored by the device. */ if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) rt2x00queue_stop_queue(rt2x00dev->rx); /* * Write new antenna setup to device and reset the link tuner. * The latter is required since we need to recalibrate the * noise-sensitivity ratio for the new setup. */ rt2x00dev->ops->lib->config_ant(rt2x00dev, &config); rt2x00link_reset_tuner(rt2x00dev, true); memcpy(active, &config, sizeof(config)); if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) rt2x00queue_start_queue(rt2x00dev->rx); } static u16 rt2x00ht_center_channel(struct rt2x00_dev *rt2x00dev, struct ieee80211_conf *conf) { struct hw_mode_spec *spec = &rt2x00dev->spec; int center_channel; u16 i; /* * Initialize center channel to current channel. */ center_channel = spec->channels[conf->chandef.chan->hw_value].channel; /* * Adjust center channel to HT40+ and HT40- operation. */ if (conf_is_ht40_plus(conf)) center_channel += 2; else if (conf_is_ht40_minus(conf)) center_channel -= (center_channel == 14) ? 1 : 2; for (i = 0; i < spec->num_channels; i++) if (spec->channels[i].channel == center_channel) return i; WARN_ON(1); return conf->chandef.chan->hw_value; } void rt2x00lib_config(struct rt2x00_dev *rt2x00dev, struct ieee80211_conf *conf, unsigned int ieee80211_flags) { struct rt2x00lib_conf libconf; u16 hw_value; u16 autowake_timeout; u16 beacon_int; u16 beacon_diff; memset(&libconf, 0, sizeof(libconf)); libconf.conf = conf; if (ieee80211_flags & IEEE80211_CONF_CHANGE_CHANNEL) { if (!conf_is_ht(conf)) set_bit(CONFIG_HT_DISABLED, &rt2x00dev->flags); else clear_bit(CONFIG_HT_DISABLED, &rt2x00dev->flags); if (conf_is_ht40(conf)) { set_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags); hw_value = rt2x00ht_center_channel(rt2x00dev, conf); } else { clear_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags); hw_value = conf->chandef.chan->hw_value; } memcpy(&libconf.rf, &rt2x00dev->spec.channels[hw_value], sizeof(libconf.rf)); memcpy(&libconf.channel, &rt2x00dev->spec.channels_info[hw_value], sizeof(libconf.channel)); /* Used for VCO periodic calibration */ rt2x00dev->rf_channel = libconf.rf.channel; } if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_PS_AUTOWAKE) && (ieee80211_flags & IEEE80211_CONF_CHANGE_PS)) cancel_delayed_work_sync(&rt2x00dev->autowakeup_work); /* * Start configuration. */ rt2x00dev->ops->lib->config(rt2x00dev, &libconf, ieee80211_flags); if (conf->flags & IEEE80211_CONF_PS) set_bit(CONFIG_POWERSAVING, &rt2x00dev->flags); else clear_bit(CONFIG_POWERSAVING, &rt2x00dev->flags); if (conf->flags & IEEE80211_CONF_MONITOR) set_bit(CONFIG_MONITORING, &rt2x00dev->flags); else clear_bit(CONFIG_MONITORING, &rt2x00dev->flags); rt2x00dev->curr_band = conf->chandef.chan->band; rt2x00dev->curr_freq = conf->chandef.chan->center_freq; rt2x00dev->tx_power = conf->power_level; rt2x00dev->short_retry = conf->short_frame_max_tx_count; rt2x00dev->long_retry = conf->long_frame_max_tx_count; /* * Some configuration changes affect the link quality * which means we need to reset the link tuner. */ if (ieee80211_flags & IEEE80211_CONF_CHANGE_CHANNEL) rt2x00link_reset_tuner(rt2x00dev, false); if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) && rt2x00_has_cap_flag(rt2x00dev, REQUIRE_PS_AUTOWAKE) && (ieee80211_flags & IEEE80211_CONF_CHANGE_PS) && (conf->flags & IEEE80211_CONF_PS)) { beacon_diff = (long)jiffies - (long)rt2x00dev->last_beacon; beacon_int = msecs_to_jiffies(rt2x00dev->beacon_int); if (beacon_diff > beacon_int) beacon_diff = 0; autowake_timeout = (conf->ps_dtim_period * beacon_int) - beacon_diff; queue_delayed_work(rt2x00dev->workqueue, &rt2x00dev->autowakeup_work, autowake_timeout - 15); } }
linux-master
drivers/net/wireless/ralink/rt2x00/rt2x00config.c
// SPDX-License-Identifier: GPL-2.0-or-later /* Copyright (C) 2009 - 2010 Ivo van Doorn <[email protected]> Copyright (C) 2009 Alban Browaeys <[email protected]> Copyright (C) 2009 Felix Fietkau <[email protected]> Copyright (C) 2009 Luis Correia <[email protected]> Copyright (C) 2009 Mattias Nissler <[email protected]> Copyright (C) 2009 Mark Asselstine <[email protected]> Copyright (C) 2009 Xose Vazquez Perez <[email protected]> Copyright (C) 2009 Bart Zolnierkiewicz <[email protected]> <http://rt2x00.serialmonkey.com> */ /* Module: rt2800pci Abstract: rt2800pci device specific routines. Supported chipsets: RT2800E & RT2800ED. */ #include <linux/delay.h> #include <linux/etherdevice.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/eeprom_93cx6.h> #include "rt2x00.h" #include "rt2x00mmio.h" #include "rt2x00pci.h" #include "rt2800lib.h" #include "rt2800mmio.h" #include "rt2800.h" #include "rt2800pci.h" /* * Allow hardware encryption to be disabled. */ static bool modparam_nohwcrypt = false; module_param_named(nohwcrypt, modparam_nohwcrypt, bool, 0444); MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); static bool rt2800pci_hwcrypt_disabled(struct rt2x00_dev *rt2x00dev) { return modparam_nohwcrypt; } static void rt2800pci_mcu_status(struct rt2x00_dev *rt2x00dev, const u8 token) { unsigned int i; u32 reg; /* * SOC devices don't support MCU requests. */ if (rt2x00_is_soc(rt2x00dev)) return; for (i = 0; i < 200; i++) { reg = rt2x00mmio_register_read(rt2x00dev, H2M_MAILBOX_CID); if ((rt2x00_get_field32(reg, H2M_MAILBOX_CID_CMD0) == token) || (rt2x00_get_field32(reg, H2M_MAILBOX_CID_CMD1) == token) || (rt2x00_get_field32(reg, H2M_MAILBOX_CID_CMD2) == token) || (rt2x00_get_field32(reg, H2M_MAILBOX_CID_CMD3) == token)) break; udelay(REGISTER_BUSY_DELAY); } if (i == 200) rt2x00_err(rt2x00dev, "MCU request failed, no response from hardware\n"); rt2x00mmio_register_write(rt2x00dev, H2M_MAILBOX_STATUS, ~0); rt2x00mmio_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0); } static void rt2800pci_eepromregister_read(struct eeprom_93cx6 *eeprom) { struct rt2x00_dev *rt2x00dev = eeprom->data; u32 reg; reg = rt2x00mmio_register_read(rt2x00dev, E2PROM_CSR); eeprom->reg_data_in = !!rt2x00_get_field32(reg, E2PROM_CSR_DATA_IN); eeprom->reg_data_out = !!rt2x00_get_field32(reg, E2PROM_CSR_DATA_OUT); eeprom->reg_data_clock = !!rt2x00_get_field32(reg, E2PROM_CSR_DATA_CLOCK); eeprom->reg_chip_select = !!rt2x00_get_field32(reg, E2PROM_CSR_CHIP_SELECT); } static void rt2800pci_eepromregister_write(struct eeprom_93cx6 *eeprom) { struct rt2x00_dev *rt2x00dev = eeprom->data; u32 reg = 0; rt2x00_set_field32(&reg, E2PROM_CSR_DATA_IN, !!eeprom->reg_data_in); rt2x00_set_field32(&reg, E2PROM_CSR_DATA_OUT, !!eeprom->reg_data_out); rt2x00_set_field32(&reg, E2PROM_CSR_DATA_CLOCK, !!eeprom->reg_data_clock); rt2x00_set_field32(&reg, E2PROM_CSR_CHIP_SELECT, !!eeprom->reg_chip_select); rt2x00mmio_register_write(rt2x00dev, E2PROM_CSR, reg); } static int rt2800pci_read_eeprom_pci(struct rt2x00_dev *rt2x00dev) { struct eeprom_93cx6 eeprom; u32 reg; reg = rt2x00mmio_register_read(rt2x00dev, E2PROM_CSR); eeprom.data = rt2x00dev; eeprom.register_read = rt2800pci_eepromregister_read; eeprom.register_write = rt2800pci_eepromregister_write; switch (rt2x00_get_field32(reg, E2PROM_CSR_TYPE)) { case 0: eeprom.width = PCI_EEPROM_WIDTH_93C46; break; case 1: eeprom.width = PCI_EEPROM_WIDTH_93C66; break; default: eeprom.width = PCI_EEPROM_WIDTH_93C86; break; } eeprom.reg_data_in = 0; eeprom.reg_data_out = 0; eeprom.reg_data_clock = 0; eeprom.reg_chip_select = 0; eeprom_93cx6_multiread(&eeprom, EEPROM_BASE, rt2x00dev->eeprom, EEPROM_SIZE / sizeof(u16)); return 0; } static int rt2800pci_efuse_detect(struct rt2x00_dev *rt2x00dev) { return rt2800_efuse_detect(rt2x00dev); } static inline int rt2800pci_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev) { return rt2800_read_eeprom_efuse(rt2x00dev); } /* * Firmware functions */ static char *rt2800pci_get_firmware_name(struct rt2x00_dev *rt2x00dev) { /* * Chip rt3290 use specific 4KB firmware named rt3290.bin. */ if (rt2x00_rt(rt2x00dev, RT3290)) return FIRMWARE_RT3290; else return FIRMWARE_RT2860; } static int rt2800pci_write_firmware(struct rt2x00_dev *rt2x00dev, const u8 *data, const size_t len) { u32 reg; /* * enable Host program ram write selection */ reg = 0; rt2x00_set_field32(&reg, PBF_SYS_CTRL_HOST_RAM_WRITE, 1); rt2x00mmio_register_write(rt2x00dev, PBF_SYS_CTRL, reg); /* * Write firmware to device. */ rt2x00mmio_register_multiwrite(rt2x00dev, FIRMWARE_IMAGE_BASE, data, len); rt2x00mmio_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000); rt2x00mmio_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00001); rt2x00mmio_register_write(rt2x00dev, H2M_BBP_AGENT, 0); rt2x00mmio_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0); return 0; } /* * Device state switch handlers. */ static int rt2800pci_enable_radio(struct rt2x00_dev *rt2x00dev) { int retval; retval = rt2800mmio_enable_radio(rt2x00dev); if (retval) return retval; /* After resume MCU_BOOT_SIGNAL will trash these. */ rt2x00mmio_register_write(rt2x00dev, H2M_MAILBOX_STATUS, ~0); rt2x00mmio_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0); rt2800_mcu_request(rt2x00dev, MCU_SLEEP, TOKEN_RADIO_OFF, 0xff, 0x02); rt2800pci_mcu_status(rt2x00dev, TOKEN_RADIO_OFF); rt2800_mcu_request(rt2x00dev, MCU_WAKEUP, TOKEN_WAKEUP, 0, 0); rt2800pci_mcu_status(rt2x00dev, TOKEN_WAKEUP); return retval; } static int rt2800pci_set_state(struct rt2x00_dev *rt2x00dev, enum dev_state state) { if (state == STATE_AWAKE) { rt2800_mcu_request(rt2x00dev, MCU_WAKEUP, TOKEN_WAKEUP, 0, 0x02); rt2800pci_mcu_status(rt2x00dev, TOKEN_WAKEUP); } else if (state == STATE_SLEEP) { rt2x00mmio_register_write(rt2x00dev, H2M_MAILBOX_STATUS, 0xffffffff); rt2x00mmio_register_write(rt2x00dev, H2M_MAILBOX_CID, 0xffffffff); rt2800_mcu_request(rt2x00dev, MCU_SLEEP, TOKEN_SLEEP, 0xff, 0x01); } return 0; } static int rt2800pci_set_device_state(struct rt2x00_dev *rt2x00dev, enum dev_state state) { int retval = 0; switch (state) { case STATE_RADIO_ON: retval = rt2800pci_enable_radio(rt2x00dev); break; case STATE_RADIO_OFF: /* * After the radio has been disabled, the device should * be put to sleep for powersaving. */ rt2800pci_set_state(rt2x00dev, STATE_SLEEP); break; case STATE_RADIO_IRQ_ON: case STATE_RADIO_IRQ_OFF: rt2800mmio_toggle_irq(rt2x00dev, state); break; case STATE_DEEP_SLEEP: case STATE_SLEEP: case STATE_STANDBY: case STATE_AWAKE: retval = rt2800pci_set_state(rt2x00dev, state); break; default: retval = -ENOTSUPP; break; } if (unlikely(retval)) rt2x00_err(rt2x00dev, "Device failed to enter state %d (%d)\n", state, retval); return retval; } /* * Device probe functions. */ static int rt2800pci_read_eeprom(struct rt2x00_dev *rt2x00dev) { int retval; if (rt2800pci_efuse_detect(rt2x00dev)) retval = rt2800pci_read_eeprom_efuse(rt2x00dev); else retval = rt2800pci_read_eeprom_pci(rt2x00dev); return retval; } static const struct ieee80211_ops rt2800pci_mac80211_ops = { .tx = rt2x00mac_tx, .wake_tx_queue = ieee80211_handle_wake_tx_queue, .start = rt2x00mac_start, .stop = rt2x00mac_stop, .add_interface = rt2x00mac_add_interface, .remove_interface = rt2x00mac_remove_interface, .config = rt2x00mac_config, .configure_filter = rt2x00mac_configure_filter, .set_key = rt2x00mac_set_key, .sw_scan_start = rt2x00mac_sw_scan_start, .sw_scan_complete = rt2x00mac_sw_scan_complete, .get_stats = rt2x00mac_get_stats, .get_key_seq = rt2800_get_key_seq, .set_rts_threshold = rt2800_set_rts_threshold, .sta_add = rt2800_sta_add, .sta_remove = rt2800_sta_remove, .bss_info_changed = rt2x00mac_bss_info_changed, .conf_tx = rt2800_conf_tx, .get_tsf = rt2800_get_tsf, .rfkill_poll = rt2x00mac_rfkill_poll, .ampdu_action = rt2800_ampdu_action, .flush = rt2x00mac_flush, .get_survey = rt2800_get_survey, .get_ringparam = rt2x00mac_get_ringparam, .tx_frames_pending = rt2x00mac_tx_frames_pending, .reconfig_complete = rt2x00mac_reconfig_complete, }; static const struct rt2800_ops rt2800pci_rt2800_ops = { .register_read = rt2x00mmio_register_read, .register_read_lock = rt2x00mmio_register_read, /* same for PCI */ .register_write = rt2x00mmio_register_write, .register_write_lock = rt2x00mmio_register_write, /* same for PCI */ .register_multiread = rt2x00mmio_register_multiread, .register_multiwrite = rt2x00mmio_register_multiwrite, .regbusy_read = rt2x00mmio_regbusy_read, .read_eeprom = rt2800pci_read_eeprom, .hwcrypt_disabled = rt2800pci_hwcrypt_disabled, .drv_write_firmware = rt2800pci_write_firmware, .drv_init_registers = rt2800mmio_init_registers, .drv_get_txwi = rt2800mmio_get_txwi, .drv_get_dma_done = rt2800mmio_get_dma_done, }; static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = { .irq_handler = rt2800mmio_interrupt, .txstatus_tasklet = rt2800mmio_txstatus_tasklet, .pretbtt_tasklet = rt2800mmio_pretbtt_tasklet, .tbtt_tasklet = rt2800mmio_tbtt_tasklet, .rxdone_tasklet = rt2800mmio_rxdone_tasklet, .autowake_tasklet = rt2800mmio_autowake_tasklet, .probe_hw = rt2800mmio_probe_hw, .get_firmware_name = rt2800pci_get_firmware_name, .check_firmware = rt2800_check_firmware, .load_firmware = rt2800_load_firmware, .initialize = rt2x00mmio_initialize, .uninitialize = rt2x00mmio_uninitialize, .get_entry_state = rt2800mmio_get_entry_state, .clear_entry = rt2800mmio_clear_entry, .set_device_state = rt2800pci_set_device_state, .rfkill_poll = rt2800_rfkill_poll, .link_stats = rt2800_link_stats, .reset_tuner = rt2800_reset_tuner, .link_tuner = rt2800_link_tuner, .gain_calibration = rt2800_gain_calibration, .vco_calibration = rt2800_vco_calibration, .watchdog = rt2800_watchdog, .start_queue = rt2800mmio_start_queue, .kick_queue = rt2800mmio_kick_queue, .stop_queue = rt2800mmio_stop_queue, .flush_queue = rt2800mmio_flush_queue, .write_tx_desc = rt2800mmio_write_tx_desc, .write_tx_data = rt2800_write_tx_data, .write_beacon = rt2800_write_beacon, .clear_beacon = rt2800_clear_beacon, .fill_rxdone = rt2800mmio_fill_rxdone, .config_shared_key = rt2800_config_shared_key, .config_pairwise_key = rt2800_config_pairwise_key, .config_filter = rt2800_config_filter, .config_intf = rt2800_config_intf, .config_erp = rt2800_config_erp, .config_ant = rt2800_config_ant, .config = rt2800_config, .pre_reset_hw = rt2800_pre_reset_hw, }; static const struct rt2x00_ops rt2800pci_ops = { .name = KBUILD_MODNAME, .drv_data_size = sizeof(struct rt2800_drv_data), .max_ap_intf = 8, .eeprom_size = EEPROM_SIZE, .rf_size = RF_SIZE, .tx_queues = NUM_TX_QUEUES, .queue_init = rt2800mmio_queue_init, .lib = &rt2800pci_rt2x00_ops, .drv = &rt2800pci_rt2800_ops, .hw = &rt2800pci_mac80211_ops, #ifdef CONFIG_RT2X00_LIB_DEBUGFS .debugfs = &rt2800_rt2x00debug, #endif /* CONFIG_RT2X00_LIB_DEBUGFS */ }; /* * RT2800pci module information. */ static const struct pci_device_id rt2800pci_device_table[] = { { PCI_DEVICE(0x1814, 0x0601) }, { PCI_DEVICE(0x1814, 0x0681) }, { PCI_DEVICE(0x1814, 0x0701) }, { PCI_DEVICE(0x1814, 0x0781) }, { PCI_DEVICE(0x1814, 0x3090) }, { PCI_DEVICE(0x1814, 0x3091) }, { PCI_DEVICE(0x1814, 0x3092) }, { PCI_DEVICE(0x1432, 0x7708) }, { PCI_DEVICE(0x1432, 0x7727) }, { PCI_DEVICE(0x1432, 0x7728) }, { PCI_DEVICE(0x1432, 0x7738) }, { PCI_DEVICE(0x1432, 0x7748) }, { PCI_DEVICE(0x1432, 0x7758) }, { PCI_DEVICE(0x1432, 0x7768) }, { PCI_DEVICE(0x1462, 0x891a) }, { PCI_DEVICE(0x1a3b, 0x1059) }, #ifdef CONFIG_RT2800PCI_RT3290 { PCI_DEVICE(0x1814, 0x3290) }, #endif #ifdef CONFIG_RT2800PCI_RT33XX { PCI_DEVICE(0x1814, 0x3390) }, #endif #ifdef CONFIG_RT2800PCI_RT35XX { PCI_DEVICE(0x1432, 0x7711) }, { PCI_DEVICE(0x1432, 0x7722) }, { PCI_DEVICE(0x1814, 0x3060) }, { PCI_DEVICE(0x1814, 0x3062) }, { PCI_DEVICE(0x1814, 0x3562) }, { PCI_DEVICE(0x1814, 0x3592) }, { PCI_DEVICE(0x1814, 0x3593) }, { PCI_DEVICE(0x1814, 0x359f) }, #endif #ifdef CONFIG_RT2800PCI_RT53XX { PCI_DEVICE(0x1814, 0x5360) }, { PCI_DEVICE(0x1814, 0x5362) }, { PCI_DEVICE(0x1814, 0x5390) }, { PCI_DEVICE(0x1814, 0x5392) }, { PCI_DEVICE(0x1814, 0x539a) }, { PCI_DEVICE(0x1814, 0x539b) }, { PCI_DEVICE(0x1814, 0x539f) }, #endif { 0, } }; MODULE_AUTHOR(DRV_PROJECT); MODULE_VERSION(DRV_VERSION); MODULE_DESCRIPTION("Ralink RT2800 PCI & PCMCIA Wireless LAN driver."); MODULE_FIRMWARE(FIRMWARE_RT2860); MODULE_DEVICE_TABLE(pci, rt2800pci_device_table); MODULE_LICENSE("GPL"); static int rt2800pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) { return rt2x00pci_probe(pci_dev, &rt2800pci_ops); } static struct pci_driver rt2800pci_driver = { .name = KBUILD_MODNAME, .id_table = rt2800pci_device_table, .probe = rt2800pci_probe, .remove = rt2x00pci_remove, .driver.pm = &rt2x00pci_pm_ops, }; module_pci_driver(rt2800pci_driver);
linux-master
drivers/net/wireless/ralink/rt2x00/rt2800pci.c
// SPDX-License-Identifier: GPL-2.0-or-later /* Copyright (C) 2004 - 2009 Ivo van Doorn <[email protected]> <http://rt2x00.serialmonkey.com> */ /* Module: rt2400pci Abstract: rt2400pci device specific routines. Supported chipsets: RT2460. */ #include <linux/delay.h> #include <linux/etherdevice.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/eeprom_93cx6.h> #include <linux/slab.h> #include "rt2x00.h" #include "rt2x00mmio.h" #include "rt2x00pci.h" #include "rt2400pci.h" /* * Register access. * All access to the CSR registers will go through the methods * rt2x00mmio_register_read and rt2x00mmio_register_write. * BBP and RF register require indirect register access, * and use the CSR registers BBPCSR and RFCSR to achieve this. * These indirect registers work with busy bits, * and we will try maximal REGISTER_BUSY_COUNT times to access * the register while taking a REGISTER_BUSY_DELAY us delay * between each attempt. When the busy bit is still set at that time, * the access attempt is considered to have failed, * and we will print an error. */ #define WAIT_FOR_BBP(__dev, __reg) \ rt2x00mmio_regbusy_read((__dev), BBPCSR, BBPCSR_BUSY, (__reg)) #define WAIT_FOR_RF(__dev, __reg) \ rt2x00mmio_regbusy_read((__dev), RFCSR, RFCSR_BUSY, (__reg)) static void rt2400pci_bbp_write(struct rt2x00_dev *rt2x00dev, const unsigned int word, const u8 value) { u32 reg; mutex_lock(&rt2x00dev->csr_mutex); /* * Wait until the BBP becomes available, afterwards we * can safely write the new data into the register. */ if (WAIT_FOR_BBP(rt2x00dev, &reg)) { reg = 0; rt2x00_set_field32(&reg, BBPCSR_VALUE, value); rt2x00_set_field32(&reg, BBPCSR_REGNUM, word); rt2x00_set_field32(&reg, BBPCSR_BUSY, 1); rt2x00_set_field32(&reg, BBPCSR_WRITE_CONTROL, 1); rt2x00mmio_register_write(rt2x00dev, BBPCSR, reg); } mutex_unlock(&rt2x00dev->csr_mutex); } static u8 rt2400pci_bbp_read(struct rt2x00_dev *rt2x00dev, const unsigned int word) { u32 reg; u8 value; mutex_lock(&rt2x00dev->csr_mutex); /* * Wait until the BBP becomes available, afterwards we * can safely write the read request into the register. * After the data has been written, we wait until hardware * returns the correct value, if at any time the register * doesn't become available in time, reg will be 0xffffffff * which means we return 0xff to the caller. */ if (WAIT_FOR_BBP(rt2x00dev, &reg)) { reg = 0; rt2x00_set_field32(&reg, BBPCSR_REGNUM, word); rt2x00_set_field32(&reg, BBPCSR_BUSY, 1); rt2x00_set_field32(&reg, BBPCSR_WRITE_CONTROL, 0); rt2x00mmio_register_write(rt2x00dev, BBPCSR, reg); WAIT_FOR_BBP(rt2x00dev, &reg); } value = rt2x00_get_field32(reg, BBPCSR_VALUE); mutex_unlock(&rt2x00dev->csr_mutex); return value; } static void rt2400pci_rf_write(struct rt2x00_dev *rt2x00dev, const unsigned int word, const u32 value) { u32 reg; mutex_lock(&rt2x00dev->csr_mutex); /* * Wait until the RF becomes available, afterwards we * can safely write the new data into the register. */ if (WAIT_FOR_RF(rt2x00dev, &reg)) { reg = 0; rt2x00_set_field32(&reg, RFCSR_VALUE, value); rt2x00_set_field32(&reg, RFCSR_NUMBER_OF_BITS, 20); rt2x00_set_field32(&reg, RFCSR_IF_SELECT, 0); rt2x00_set_field32(&reg, RFCSR_BUSY, 1); rt2x00mmio_register_write(rt2x00dev, RFCSR, reg); rt2x00_rf_write(rt2x00dev, word, value); } mutex_unlock(&rt2x00dev->csr_mutex); } static void rt2400pci_eepromregister_read(struct eeprom_93cx6 *eeprom) { struct rt2x00_dev *rt2x00dev = eeprom->data; u32 reg; reg = rt2x00mmio_register_read(rt2x00dev, CSR21); eeprom->reg_data_in = !!rt2x00_get_field32(reg, CSR21_EEPROM_DATA_IN); eeprom->reg_data_out = !!rt2x00_get_field32(reg, CSR21_EEPROM_DATA_OUT); eeprom->reg_data_clock = !!rt2x00_get_field32(reg, CSR21_EEPROM_DATA_CLOCK); eeprom->reg_chip_select = !!rt2x00_get_field32(reg, CSR21_EEPROM_CHIP_SELECT); } static void rt2400pci_eepromregister_write(struct eeprom_93cx6 *eeprom) { struct rt2x00_dev *rt2x00dev = eeprom->data; u32 reg = 0; rt2x00_set_field32(&reg, CSR21_EEPROM_DATA_IN, !!eeprom->reg_data_in); rt2x00_set_field32(&reg, CSR21_EEPROM_DATA_OUT, !!eeprom->reg_data_out); rt2x00_set_field32(&reg, CSR21_EEPROM_DATA_CLOCK, !!eeprom->reg_data_clock); rt2x00_set_field32(&reg, CSR21_EEPROM_CHIP_SELECT, !!eeprom->reg_chip_select); rt2x00mmio_register_write(rt2x00dev, CSR21, reg); } #ifdef CONFIG_RT2X00_LIB_DEBUGFS static const struct rt2x00debug rt2400pci_rt2x00debug = { .owner = THIS_MODULE, .csr = { .read = rt2x00mmio_register_read, .write = rt2x00mmio_register_write, .flags = RT2X00DEBUGFS_OFFSET, .word_base = CSR_REG_BASE, .word_size = sizeof(u32), .word_count = CSR_REG_SIZE / sizeof(u32), }, .eeprom = { .read = rt2x00_eeprom_read, .write = rt2x00_eeprom_write, .word_base = EEPROM_BASE, .word_size = sizeof(u16), .word_count = EEPROM_SIZE / sizeof(u16), }, .bbp = { .read = rt2400pci_bbp_read, .write = rt2400pci_bbp_write, .word_base = BBP_BASE, .word_size = sizeof(u8), .word_count = BBP_SIZE / sizeof(u8), }, .rf = { .read = rt2x00_rf_read, .write = rt2400pci_rf_write, .word_base = RF_BASE, .word_size = sizeof(u32), .word_count = RF_SIZE / sizeof(u32), }, }; #endif /* CONFIG_RT2X00_LIB_DEBUGFS */ static int rt2400pci_rfkill_poll(struct rt2x00_dev *rt2x00dev) { u32 reg; reg = rt2x00mmio_register_read(rt2x00dev, GPIOCSR); return rt2x00_get_field32(reg, GPIOCSR_VAL0); } #ifdef CONFIG_RT2X00_LIB_LEDS static void rt2400pci_brightness_set(struct led_classdev *led_cdev, enum led_brightness brightness) { struct rt2x00_led *led = container_of(led_cdev, struct rt2x00_led, led_dev); unsigned int enabled = brightness != LED_OFF; u32 reg; reg = rt2x00mmio_register_read(led->rt2x00dev, LEDCSR); if (led->type == LED_TYPE_RADIO || led->type == LED_TYPE_ASSOC) rt2x00_set_field32(&reg, LEDCSR_LINK, enabled); else if (led->type == LED_TYPE_ACTIVITY) rt2x00_set_field32(&reg, LEDCSR_ACTIVITY, enabled); rt2x00mmio_register_write(led->rt2x00dev, LEDCSR, reg); } static int rt2400pci_blink_set(struct led_classdev *led_cdev, unsigned long *delay_on, unsigned long *delay_off) { struct rt2x00_led *led = container_of(led_cdev, struct rt2x00_led, led_dev); u32 reg; reg = rt2x00mmio_register_read(led->rt2x00dev, LEDCSR); rt2x00_set_field32(&reg, LEDCSR_ON_PERIOD, *delay_on); rt2x00_set_field32(&reg, LEDCSR_OFF_PERIOD, *delay_off); rt2x00mmio_register_write(led->rt2x00dev, LEDCSR, reg); return 0; } static void rt2400pci_init_led(struct rt2x00_dev *rt2x00dev, struct rt2x00_led *led, enum led_type type) { led->rt2x00dev = rt2x00dev; led->type = type; led->led_dev.brightness_set = rt2400pci_brightness_set; led->led_dev.blink_set = rt2400pci_blink_set; led->flags = LED_INITIALIZED; } #endif /* CONFIG_RT2X00_LIB_LEDS */ /* * Configuration handlers. */ static void rt2400pci_config_filter(struct rt2x00_dev *rt2x00dev, const unsigned int filter_flags) { u32 reg; /* * Start configuration steps. * Note that the version error will always be dropped * since there is no filter for it at this time. */ reg = rt2x00mmio_register_read(rt2x00dev, RXCSR0); rt2x00_set_field32(&reg, RXCSR0_DROP_CRC, !(filter_flags & FIF_FCSFAIL)); rt2x00_set_field32(&reg, RXCSR0_DROP_PHYSICAL, !(filter_flags & FIF_PLCPFAIL)); rt2x00_set_field32(&reg, RXCSR0_DROP_CONTROL, !(filter_flags & FIF_CONTROL)); rt2x00_set_field32(&reg, RXCSR0_DROP_NOT_TO_ME, !test_bit(CONFIG_MONITORING, &rt2x00dev->flags)); rt2x00_set_field32(&reg, RXCSR0_DROP_TODS, !test_bit(CONFIG_MONITORING, &rt2x00dev->flags) && !rt2x00dev->intf_ap_count); rt2x00_set_field32(&reg, RXCSR0_DROP_VERSION_ERROR, 1); rt2x00mmio_register_write(rt2x00dev, RXCSR0, reg); } static void rt2400pci_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00_intf *intf, struct rt2x00intf_conf *conf, const unsigned int flags) { unsigned int bcn_preload; u32 reg; if (flags & CONFIG_UPDATE_TYPE) { /* * Enable beacon config */ bcn_preload = PREAMBLE + GET_DURATION(IEEE80211_HEADER, 20); reg = rt2x00mmio_register_read(rt2x00dev, BCNCSR1); rt2x00_set_field32(&reg, BCNCSR1_PRELOAD, bcn_preload); rt2x00mmio_register_write(rt2x00dev, BCNCSR1, reg); /* * Enable synchronisation. */ reg = rt2x00mmio_register_read(rt2x00dev, CSR14); rt2x00_set_field32(&reg, CSR14_TSF_SYNC, conf->sync); rt2x00mmio_register_write(rt2x00dev, CSR14, reg); } if (flags & CONFIG_UPDATE_MAC) rt2x00mmio_register_multiwrite(rt2x00dev, CSR3, conf->mac, sizeof(conf->mac)); if (flags & CONFIG_UPDATE_BSSID) rt2x00mmio_register_multiwrite(rt2x00dev, CSR5, conf->bssid, sizeof(conf->bssid)); } static void rt2400pci_config_erp(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_erp *erp, u32 changed) { int preamble_mask; u32 reg; /* * When short preamble is enabled, we should set bit 0x08 */ if (changed & BSS_CHANGED_ERP_PREAMBLE) { preamble_mask = erp->short_preamble << 3; reg = rt2x00mmio_register_read(rt2x00dev, TXCSR1); rt2x00_set_field32(&reg, TXCSR1_ACK_TIMEOUT, 0x1ff); rt2x00_set_field32(&reg, TXCSR1_ACK_CONSUME_TIME, 0x13a); rt2x00_set_field32(&reg, TXCSR1_TSF_OFFSET, IEEE80211_HEADER); rt2x00_set_field32(&reg, TXCSR1_AUTORESPONDER, 1); rt2x00mmio_register_write(rt2x00dev, TXCSR1, reg); reg = rt2x00mmio_register_read(rt2x00dev, ARCSR2); rt2x00_set_field32(&reg, ARCSR2_SIGNAL, 0x00); rt2x00_set_field32(&reg, ARCSR2_SERVICE, 0x04); rt2x00_set_field32(&reg, ARCSR2_LENGTH, GET_DURATION(ACK_SIZE, 10)); rt2x00mmio_register_write(rt2x00dev, ARCSR2, reg); reg = rt2x00mmio_register_read(rt2x00dev, ARCSR3); rt2x00_set_field32(&reg, ARCSR3_SIGNAL, 0x01 | preamble_mask); rt2x00_set_field32(&reg, ARCSR3_SERVICE, 0x04); rt2x00_set_field32(&reg, ARCSR2_LENGTH, GET_DURATION(ACK_SIZE, 20)); rt2x00mmio_register_write(rt2x00dev, ARCSR3, reg); reg = rt2x00mmio_register_read(rt2x00dev, ARCSR4); rt2x00_set_field32(&reg, ARCSR4_SIGNAL, 0x02 | preamble_mask); rt2x00_set_field32(&reg, ARCSR4_SERVICE, 0x04); rt2x00_set_field32(&reg, ARCSR2_LENGTH, GET_DURATION(ACK_SIZE, 55)); rt2x00mmio_register_write(rt2x00dev, ARCSR4, reg); reg = rt2x00mmio_register_read(rt2x00dev, ARCSR5); rt2x00_set_field32(&reg, ARCSR5_SIGNAL, 0x03 | preamble_mask); rt2x00_set_field32(&reg, ARCSR5_SERVICE, 0x84); rt2x00_set_field32(&reg, ARCSR2_LENGTH, GET_DURATION(ACK_SIZE, 110)); rt2x00mmio_register_write(rt2x00dev, ARCSR5, reg); } if (changed & BSS_CHANGED_BASIC_RATES) rt2x00mmio_register_write(rt2x00dev, ARCSR1, erp->basic_rates); if (changed & BSS_CHANGED_ERP_SLOT) { reg = rt2x00mmio_register_read(rt2x00dev, CSR11); rt2x00_set_field32(&reg, CSR11_SLOT_TIME, erp->slot_time); rt2x00mmio_register_write(rt2x00dev, CSR11, reg); reg = rt2x00mmio_register_read(rt2x00dev, CSR18); rt2x00_set_field32(&reg, CSR18_SIFS, erp->sifs); rt2x00_set_field32(&reg, CSR18_PIFS, erp->pifs); rt2x00mmio_register_write(rt2x00dev, CSR18, reg); reg = rt2x00mmio_register_read(rt2x00dev, CSR19); rt2x00_set_field32(&reg, CSR19_DIFS, erp->difs); rt2x00_set_field32(&reg, CSR19_EIFS, erp->eifs); rt2x00mmio_register_write(rt2x00dev, CSR19, reg); } if (changed & BSS_CHANGED_BEACON_INT) { reg = rt2x00mmio_register_read(rt2x00dev, CSR12); rt2x00_set_field32(&reg, CSR12_BEACON_INTERVAL, erp->beacon_int * 16); rt2x00_set_field32(&reg, CSR12_CFP_MAX_DURATION, erp->beacon_int * 16); rt2x00mmio_register_write(rt2x00dev, CSR12, reg); } } static void rt2400pci_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant) { u8 r1; u8 r4; /* * We should never come here because rt2x00lib is supposed * to catch this and send us the correct antenna explicitely. */ BUG_ON(ant->rx == ANTENNA_SW_DIVERSITY || ant->tx == ANTENNA_SW_DIVERSITY); r4 = rt2400pci_bbp_read(rt2x00dev, 4); r1 = rt2400pci_bbp_read(rt2x00dev, 1); /* * Configure the TX antenna. */ switch (ant->tx) { case ANTENNA_HW_DIVERSITY: rt2x00_set_field8(&r1, BBP_R1_TX_ANTENNA, 1); break; case ANTENNA_A: rt2x00_set_field8(&r1, BBP_R1_TX_ANTENNA, 0); break; case ANTENNA_B: default: rt2x00_set_field8(&r1, BBP_R1_TX_ANTENNA, 2); break; } /* * Configure the RX antenna. */ switch (ant->rx) { case ANTENNA_HW_DIVERSITY: rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA, 1); break; case ANTENNA_A: rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA, 0); break; case ANTENNA_B: default: rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA, 2); break; } rt2400pci_bbp_write(rt2x00dev, 4, r4); rt2400pci_bbp_write(rt2x00dev, 1, r1); } static void rt2400pci_config_channel(struct rt2x00_dev *rt2x00dev, struct rf_channel *rf) { /* * Switch on tuning bits. */ rt2x00_set_field32(&rf->rf1, RF1_TUNER, 1); rt2x00_set_field32(&rf->rf3, RF3_TUNER, 1); rt2400pci_rf_write(rt2x00dev, 1, rf->rf1); rt2400pci_rf_write(rt2x00dev, 2, rf->rf2); rt2400pci_rf_write(rt2x00dev, 3, rf->rf3); /* * RF2420 chipset don't need any additional actions. */ if (rt2x00_rf(rt2x00dev, RF2420)) return; /* * For the RT2421 chipsets we need to write an invalid * reference clock rate to activate auto_tune. * After that we set the value back to the correct channel. */ rt2400pci_rf_write(rt2x00dev, 1, rf->rf1); rt2400pci_rf_write(rt2x00dev, 2, 0x000c2a32); rt2400pci_rf_write(rt2x00dev, 3, rf->rf3); msleep(1); rt2400pci_rf_write(rt2x00dev, 1, rf->rf1); rt2400pci_rf_write(rt2x00dev, 2, rf->rf2); rt2400pci_rf_write(rt2x00dev, 3, rf->rf3); msleep(1); /* * Switch off tuning bits. */ rt2x00_set_field32(&rf->rf1, RF1_TUNER, 0); rt2x00_set_field32(&rf->rf3, RF3_TUNER, 0); rt2400pci_rf_write(rt2x00dev, 1, rf->rf1); rt2400pci_rf_write(rt2x00dev, 3, rf->rf3); /* * Clear false CRC during channel switch. */ rf->rf1 = rt2x00mmio_register_read(rt2x00dev, CNT0); } static void rt2400pci_config_txpower(struct rt2x00_dev *rt2x00dev, int txpower) { rt2400pci_bbp_write(rt2x00dev, 3, TXPOWER_TO_DEV(txpower)); } static void rt2400pci_config_retry_limit(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_conf *libconf) { u32 reg; reg = rt2x00mmio_register_read(rt2x00dev, CSR11); rt2x00_set_field32(&reg, CSR11_LONG_RETRY, libconf->conf->long_frame_max_tx_count); rt2x00_set_field32(&reg, CSR11_SHORT_RETRY, libconf->conf->short_frame_max_tx_count); rt2x00mmio_register_write(rt2x00dev, CSR11, reg); } static void rt2400pci_config_ps(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_conf *libconf) { enum dev_state state = (libconf->conf->flags & IEEE80211_CONF_PS) ? STATE_SLEEP : STATE_AWAKE; u32 reg; if (state == STATE_SLEEP) { reg = rt2x00mmio_register_read(rt2x00dev, CSR20); rt2x00_set_field32(&reg, CSR20_DELAY_AFTER_TBCN, (rt2x00dev->beacon_int - 20) * 16); rt2x00_set_field32(&reg, CSR20_TBCN_BEFORE_WAKEUP, libconf->conf->listen_interval - 1); /* We must first disable autowake before it can be enabled */ rt2x00_set_field32(&reg, CSR20_AUTOWAKE, 0); rt2x00mmio_register_write(rt2x00dev, CSR20, reg); rt2x00_set_field32(&reg, CSR20_AUTOWAKE, 1); rt2x00mmio_register_write(rt2x00dev, CSR20, reg); } else { reg = rt2x00mmio_register_read(rt2x00dev, CSR20); rt2x00_set_field32(&reg, CSR20_AUTOWAKE, 0); rt2x00mmio_register_write(rt2x00dev, CSR20, reg); } rt2x00dev->ops->lib->set_device_state(rt2x00dev, state); } static void rt2400pci_config(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_conf *libconf, const unsigned int flags) { if (flags & IEEE80211_CONF_CHANGE_CHANNEL) rt2400pci_config_channel(rt2x00dev, &libconf->rf); if (flags & IEEE80211_CONF_CHANGE_POWER) rt2400pci_config_txpower(rt2x00dev, libconf->conf->power_level); if (flags & IEEE80211_CONF_CHANGE_RETRY_LIMITS) rt2400pci_config_retry_limit(rt2x00dev, libconf); if (flags & IEEE80211_CONF_CHANGE_PS) rt2400pci_config_ps(rt2x00dev, libconf); } static void rt2400pci_config_cw(struct rt2x00_dev *rt2x00dev, const int cw_min, const int cw_max) { u32 reg; reg = rt2x00mmio_register_read(rt2x00dev, CSR11); rt2x00_set_field32(&reg, CSR11_CWMIN, cw_min); rt2x00_set_field32(&reg, CSR11_CWMAX, cw_max); rt2x00mmio_register_write(rt2x00dev, CSR11, reg); } /* * Link tuning */ static void rt2400pci_link_stats(struct rt2x00_dev *rt2x00dev, struct link_qual *qual) { u32 reg; u8 bbp; /* * Update FCS error count from register. */ reg = rt2x00mmio_register_read(rt2x00dev, CNT0); qual->rx_failed = rt2x00_get_field32(reg, CNT0_FCS_ERROR); /* * Update False CCA count from register. */ bbp = rt2400pci_bbp_read(rt2x00dev, 39); qual->false_cca = bbp; } static inline void rt2400pci_set_vgc(struct rt2x00_dev *rt2x00dev, struct link_qual *qual, u8 vgc_level) { if (qual->vgc_level_reg != vgc_level) { rt2400pci_bbp_write(rt2x00dev, 13, vgc_level); qual->vgc_level = vgc_level; qual->vgc_level_reg = vgc_level; } } static void rt2400pci_reset_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual) { rt2400pci_set_vgc(rt2x00dev, qual, 0x08); } static void rt2400pci_link_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual, const u32 count) { /* * The link tuner should not run longer then 60 seconds, * and should run once every 2 seconds. */ if (count > 60 || !(count & 1)) return; /* * Base r13 link tuning on the false cca count. */ if ((qual->false_cca > 512) && (qual->vgc_level < 0x20)) rt2400pci_set_vgc(rt2x00dev, qual, ++qual->vgc_level); else if ((qual->false_cca < 100) && (qual->vgc_level > 0x08)) rt2400pci_set_vgc(rt2x00dev, qual, --qual->vgc_level); } /* * Queue handlers. */ static void rt2400pci_start_queue(struct data_queue *queue) { struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; u32 reg; switch (queue->qid) { case QID_RX: reg = rt2x00mmio_register_read(rt2x00dev, RXCSR0); rt2x00_set_field32(&reg, RXCSR0_DISABLE_RX, 0); rt2x00mmio_register_write(rt2x00dev, RXCSR0, reg); break; case QID_BEACON: reg = rt2x00mmio_register_read(rt2x00dev, CSR14); rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1); rt2x00_set_field32(&reg, CSR14_TBCN, 1); rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 1); rt2x00mmio_register_write(rt2x00dev, CSR14, reg); break; default: break; } } static void rt2400pci_kick_queue(struct data_queue *queue) { struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; u32 reg; switch (queue->qid) { case QID_AC_VO: reg = rt2x00mmio_register_read(rt2x00dev, TXCSR0); rt2x00_set_field32(&reg, TXCSR0_KICK_PRIO, 1); rt2x00mmio_register_write(rt2x00dev, TXCSR0, reg); break; case QID_AC_VI: reg = rt2x00mmio_register_read(rt2x00dev, TXCSR0); rt2x00_set_field32(&reg, TXCSR0_KICK_TX, 1); rt2x00mmio_register_write(rt2x00dev, TXCSR0, reg); break; case QID_ATIM: reg = rt2x00mmio_register_read(rt2x00dev, TXCSR0); rt2x00_set_field32(&reg, TXCSR0_KICK_ATIM, 1); rt2x00mmio_register_write(rt2x00dev, TXCSR0, reg); break; default: break; } } static void rt2400pci_stop_queue(struct data_queue *queue) { struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; u32 reg; switch (queue->qid) { case QID_AC_VO: case QID_AC_VI: case QID_ATIM: reg = rt2x00mmio_register_read(rt2x00dev, TXCSR0); rt2x00_set_field32(&reg, TXCSR0_ABORT, 1); rt2x00mmio_register_write(rt2x00dev, TXCSR0, reg); break; case QID_RX: reg = rt2x00mmio_register_read(rt2x00dev, RXCSR0); rt2x00_set_field32(&reg, RXCSR0_DISABLE_RX, 1); rt2x00mmio_register_write(rt2x00dev, RXCSR0, reg); break; case QID_BEACON: reg = rt2x00mmio_register_read(rt2x00dev, CSR14); rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 0); rt2x00_set_field32(&reg, CSR14_TBCN, 0); rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 0); rt2x00mmio_register_write(rt2x00dev, CSR14, reg); /* * Wait for possibly running tbtt tasklets. */ tasklet_kill(&rt2x00dev->tbtt_tasklet); break; default: break; } } /* * Initialization functions. */ static bool rt2400pci_get_entry_state(struct queue_entry *entry) { struct queue_entry_priv_mmio *entry_priv = entry->priv_data; u32 word; if (entry->queue->qid == QID_RX) { word = rt2x00_desc_read(entry_priv->desc, 0); return rt2x00_get_field32(word, RXD_W0_OWNER_NIC); } else { word = rt2x00_desc_read(entry_priv->desc, 0); return (rt2x00_get_field32(word, TXD_W0_OWNER_NIC) || rt2x00_get_field32(word, TXD_W0_VALID)); } } static void rt2400pci_clear_entry(struct queue_entry *entry) { struct queue_entry_priv_mmio *entry_priv = entry->priv_data; struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); u32 word; if (entry->queue->qid == QID_RX) { word = rt2x00_desc_read(entry_priv->desc, 2); rt2x00_set_field32(&word, RXD_W2_BUFFER_LENGTH, entry->skb->len); rt2x00_desc_write(entry_priv->desc, 2, word); word = rt2x00_desc_read(entry_priv->desc, 1); rt2x00_set_field32(&word, RXD_W1_BUFFER_ADDRESS, skbdesc->skb_dma); rt2x00_desc_write(entry_priv->desc, 1, word); word = rt2x00_desc_read(entry_priv->desc, 0); rt2x00_set_field32(&word, RXD_W0_OWNER_NIC, 1); rt2x00_desc_write(entry_priv->desc, 0, word); } else { word = rt2x00_desc_read(entry_priv->desc, 0); rt2x00_set_field32(&word, TXD_W0_VALID, 0); rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 0); rt2x00_desc_write(entry_priv->desc, 0, word); } } static int rt2400pci_init_queues(struct rt2x00_dev *rt2x00dev) { struct queue_entry_priv_mmio *entry_priv; u32 reg; /* * Initialize registers. */ reg = rt2x00mmio_register_read(rt2x00dev, TXCSR2); rt2x00_set_field32(&reg, TXCSR2_TXD_SIZE, rt2x00dev->tx[0].desc_size); rt2x00_set_field32(&reg, TXCSR2_NUM_TXD, rt2x00dev->tx[1].limit); rt2x00_set_field32(&reg, TXCSR2_NUM_ATIM, rt2x00dev->atim->limit); rt2x00_set_field32(&reg, TXCSR2_NUM_PRIO, rt2x00dev->tx[0].limit); rt2x00mmio_register_write(rt2x00dev, TXCSR2, reg); entry_priv = rt2x00dev->tx[1].entries[0].priv_data; reg = rt2x00mmio_register_read(rt2x00dev, TXCSR3); rt2x00_set_field32(&reg, TXCSR3_TX_RING_REGISTER, entry_priv->desc_dma); rt2x00mmio_register_write(rt2x00dev, TXCSR3, reg); entry_priv = rt2x00dev->tx[0].entries[0].priv_data; reg = rt2x00mmio_register_read(rt2x00dev, TXCSR5); rt2x00_set_field32(&reg, TXCSR5_PRIO_RING_REGISTER, entry_priv->desc_dma); rt2x00mmio_register_write(rt2x00dev, TXCSR5, reg); entry_priv = rt2x00dev->atim->entries[0].priv_data; reg = rt2x00mmio_register_read(rt2x00dev, TXCSR4); rt2x00_set_field32(&reg, TXCSR4_ATIM_RING_REGISTER, entry_priv->desc_dma); rt2x00mmio_register_write(rt2x00dev, TXCSR4, reg); entry_priv = rt2x00dev->bcn->entries[0].priv_data; reg = rt2x00mmio_register_read(rt2x00dev, TXCSR6); rt2x00_set_field32(&reg, TXCSR6_BEACON_RING_REGISTER, entry_priv->desc_dma); rt2x00mmio_register_write(rt2x00dev, TXCSR6, reg); reg = rt2x00mmio_register_read(rt2x00dev, RXCSR1); rt2x00_set_field32(&reg, RXCSR1_RXD_SIZE, rt2x00dev->rx->desc_size); rt2x00_set_field32(&reg, RXCSR1_NUM_RXD, rt2x00dev->rx->limit); rt2x00mmio_register_write(rt2x00dev, RXCSR1, reg); entry_priv = rt2x00dev->rx->entries[0].priv_data; reg = rt2x00mmio_register_read(rt2x00dev, RXCSR2); rt2x00_set_field32(&reg, RXCSR2_RX_RING_REGISTER, entry_priv->desc_dma); rt2x00mmio_register_write(rt2x00dev, RXCSR2, reg); return 0; } static int rt2400pci_init_registers(struct rt2x00_dev *rt2x00dev) { u32 reg; rt2x00mmio_register_write(rt2x00dev, PSCSR0, 0x00020002); rt2x00mmio_register_write(rt2x00dev, PSCSR1, 0x00000002); rt2x00mmio_register_write(rt2x00dev, PSCSR2, 0x00023f20); rt2x00mmio_register_write(rt2x00dev, PSCSR3, 0x00000002); reg = rt2x00mmio_register_read(rt2x00dev, TIMECSR); rt2x00_set_field32(&reg, TIMECSR_US_COUNT, 33); rt2x00_set_field32(&reg, TIMECSR_US_64_COUNT, 63); rt2x00_set_field32(&reg, TIMECSR_BEACON_EXPECT, 0); rt2x00mmio_register_write(rt2x00dev, TIMECSR, reg); reg = rt2x00mmio_register_read(rt2x00dev, CSR9); rt2x00_set_field32(&reg, CSR9_MAX_FRAME_UNIT, (rt2x00dev->rx->data_size / 128)); rt2x00mmio_register_write(rt2x00dev, CSR9, reg); reg = rt2x00mmio_register_read(rt2x00dev, CSR14); rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 0); rt2x00_set_field32(&reg, CSR14_TSF_SYNC, 0); rt2x00_set_field32(&reg, CSR14_TBCN, 0); rt2x00_set_field32(&reg, CSR14_TCFP, 0); rt2x00_set_field32(&reg, CSR14_TATIMW, 0); rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 0); rt2x00_set_field32(&reg, CSR14_CFP_COUNT_PRELOAD, 0); rt2x00_set_field32(&reg, CSR14_TBCM_PRELOAD, 0); rt2x00mmio_register_write(rt2x00dev, CSR14, reg); rt2x00mmio_register_write(rt2x00dev, CNT3, 0x3f080000); reg = rt2x00mmio_register_read(rt2x00dev, ARCSR0); rt2x00_set_field32(&reg, ARCSR0_AR_BBP_DATA0, 133); rt2x00_set_field32(&reg, ARCSR0_AR_BBP_ID0, 134); rt2x00_set_field32(&reg, ARCSR0_AR_BBP_DATA1, 136); rt2x00_set_field32(&reg, ARCSR0_AR_BBP_ID1, 135); rt2x00mmio_register_write(rt2x00dev, ARCSR0, reg); reg = rt2x00mmio_register_read(rt2x00dev, RXCSR3); rt2x00_set_field32(&reg, RXCSR3_BBP_ID0, 3); /* Tx power.*/ rt2x00_set_field32(&reg, RXCSR3_BBP_ID0_VALID, 1); rt2x00_set_field32(&reg, RXCSR3_BBP_ID1, 32); /* Signal */ rt2x00_set_field32(&reg, RXCSR3_BBP_ID1_VALID, 1); rt2x00_set_field32(&reg, RXCSR3_BBP_ID2, 36); /* Rssi */ rt2x00_set_field32(&reg, RXCSR3_BBP_ID2_VALID, 1); rt2x00mmio_register_write(rt2x00dev, RXCSR3, reg); rt2x00mmio_register_write(rt2x00dev, PWRCSR0, 0x3f3b3100); if (rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_AWAKE)) return -EBUSY; rt2x00mmio_register_write(rt2x00dev, MACCSR0, 0x00217223); rt2x00mmio_register_write(rt2x00dev, MACCSR1, 0x00235518); reg = rt2x00mmio_register_read(rt2x00dev, MACCSR2); rt2x00_set_field32(&reg, MACCSR2_DELAY, 64); rt2x00mmio_register_write(rt2x00dev, MACCSR2, reg); reg = rt2x00mmio_register_read(rt2x00dev, RALINKCSR); rt2x00_set_field32(&reg, RALINKCSR_AR_BBP_DATA0, 17); rt2x00_set_field32(&reg, RALINKCSR_AR_BBP_ID0, 154); rt2x00_set_field32(&reg, RALINKCSR_AR_BBP_DATA1, 0); rt2x00_set_field32(&reg, RALINKCSR_AR_BBP_ID1, 154); rt2x00mmio_register_write(rt2x00dev, RALINKCSR, reg); reg = rt2x00mmio_register_read(rt2x00dev, CSR1); rt2x00_set_field32(&reg, CSR1_SOFT_RESET, 1); rt2x00_set_field32(&reg, CSR1_BBP_RESET, 0); rt2x00_set_field32(&reg, CSR1_HOST_READY, 0); rt2x00mmio_register_write(rt2x00dev, CSR1, reg); reg = rt2x00mmio_register_read(rt2x00dev, CSR1); rt2x00_set_field32(&reg, CSR1_SOFT_RESET, 0); rt2x00_set_field32(&reg, CSR1_HOST_READY, 1); rt2x00mmio_register_write(rt2x00dev, CSR1, reg); /* * We must clear the FCS and FIFO error count. * These registers are cleared on read, * so we may pass a useless variable to store the value. */ reg = rt2x00mmio_register_read(rt2x00dev, CNT0); reg = rt2x00mmio_register_read(rt2x00dev, CNT4); return 0; } static int rt2400pci_wait_bbp_ready(struct rt2x00_dev *rt2x00dev) { unsigned int i; u8 value; for (i = 0; i < REGISTER_BUSY_COUNT; i++) { value = rt2400pci_bbp_read(rt2x00dev, 0); if ((value != 0xff) && (value != 0x00)) return 0; udelay(REGISTER_BUSY_DELAY); } rt2x00_err(rt2x00dev, "BBP register access failed, aborting\n"); return -EACCES; } static int rt2400pci_init_bbp(struct rt2x00_dev *rt2x00dev) { unsigned int i; u16 eeprom; u8 reg_id; u8 value; if (unlikely(rt2400pci_wait_bbp_ready(rt2x00dev))) return -EACCES; rt2400pci_bbp_write(rt2x00dev, 1, 0x00); rt2400pci_bbp_write(rt2x00dev, 3, 0x27); rt2400pci_bbp_write(rt2x00dev, 4, 0x08); rt2400pci_bbp_write(rt2x00dev, 10, 0x0f); rt2400pci_bbp_write(rt2x00dev, 15, 0x72); rt2400pci_bbp_write(rt2x00dev, 16, 0x74); rt2400pci_bbp_write(rt2x00dev, 17, 0x20); rt2400pci_bbp_write(rt2x00dev, 18, 0x72); rt2400pci_bbp_write(rt2x00dev, 19, 0x0b); rt2400pci_bbp_write(rt2x00dev, 20, 0x00); rt2400pci_bbp_write(rt2x00dev, 28, 0x11); rt2400pci_bbp_write(rt2x00dev, 29, 0x04); rt2400pci_bbp_write(rt2x00dev, 30, 0x21); rt2400pci_bbp_write(rt2x00dev, 31, 0x00); for (i = 0; i < EEPROM_BBP_SIZE; i++) { eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i); if (eeprom != 0xffff && eeprom != 0x0000) { reg_id = rt2x00_get_field16(eeprom, EEPROM_BBP_REG_ID); value = rt2x00_get_field16(eeprom, EEPROM_BBP_VALUE); rt2400pci_bbp_write(rt2x00dev, reg_id, value); } } return 0; } /* * Device state switch handlers. */ static void rt2400pci_toggle_irq(struct rt2x00_dev *rt2x00dev, enum dev_state state) { int mask = (state == STATE_RADIO_IRQ_OFF); u32 reg; unsigned long flags; /* * When interrupts are being enabled, the interrupt registers * should clear the register to assure a clean state. */ if (state == STATE_RADIO_IRQ_ON) { reg = rt2x00mmio_register_read(rt2x00dev, CSR7); rt2x00mmio_register_write(rt2x00dev, CSR7, reg); } /* * Only toggle the interrupts bits we are going to use. * Non-checked interrupt bits are disabled by default. */ spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags); reg = rt2x00mmio_register_read(rt2x00dev, CSR8); rt2x00_set_field32(&reg, CSR8_TBCN_EXPIRE, mask); rt2x00_set_field32(&reg, CSR8_TXDONE_TXRING, mask); rt2x00_set_field32(&reg, CSR8_TXDONE_ATIMRING, mask); rt2x00_set_field32(&reg, CSR8_TXDONE_PRIORING, mask); rt2x00_set_field32(&reg, CSR8_RXDONE, mask); rt2x00mmio_register_write(rt2x00dev, CSR8, reg); spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags); if (state == STATE_RADIO_IRQ_OFF) { /* * Ensure that all tasklets are finished before * disabling the interrupts. */ tasklet_kill(&rt2x00dev->txstatus_tasklet); tasklet_kill(&rt2x00dev->rxdone_tasklet); tasklet_kill(&rt2x00dev->tbtt_tasklet); } } static int rt2400pci_enable_radio(struct rt2x00_dev *rt2x00dev) { /* * Initialize all registers. */ if (unlikely(rt2400pci_init_queues(rt2x00dev) || rt2400pci_init_registers(rt2x00dev) || rt2400pci_init_bbp(rt2x00dev))) return -EIO; return 0; } static void rt2400pci_disable_radio(struct rt2x00_dev *rt2x00dev) { /* * Disable power */ rt2x00mmio_register_write(rt2x00dev, PWRCSR0, 0); } static int rt2400pci_set_state(struct rt2x00_dev *rt2x00dev, enum dev_state state) { u32 reg, reg2; unsigned int i; bool put_to_sleep; u8 bbp_state; u8 rf_state; put_to_sleep = (state != STATE_AWAKE); reg = rt2x00mmio_register_read(rt2x00dev, PWRCSR1); rt2x00_set_field32(&reg, PWRCSR1_SET_STATE, 1); rt2x00_set_field32(&reg, PWRCSR1_BBP_DESIRE_STATE, state); rt2x00_set_field32(&reg, PWRCSR1_RF_DESIRE_STATE, state); rt2x00_set_field32(&reg, PWRCSR1_PUT_TO_SLEEP, put_to_sleep); rt2x00mmio_register_write(rt2x00dev, PWRCSR1, reg); /* * Device is not guaranteed to be in the requested state yet. * We must wait until the register indicates that the * device has entered the correct state. */ for (i = 0; i < REGISTER_BUSY_COUNT; i++) { reg2 = rt2x00mmio_register_read(rt2x00dev, PWRCSR1); bbp_state = rt2x00_get_field32(reg2, PWRCSR1_BBP_CURR_STATE); rf_state = rt2x00_get_field32(reg2, PWRCSR1_RF_CURR_STATE); if (bbp_state == state && rf_state == state) return 0; rt2x00mmio_register_write(rt2x00dev, PWRCSR1, reg); msleep(10); } return -EBUSY; } static int rt2400pci_set_device_state(struct rt2x00_dev *rt2x00dev, enum dev_state state) { int retval = 0; switch (state) { case STATE_RADIO_ON: retval = rt2400pci_enable_radio(rt2x00dev); break; case STATE_RADIO_OFF: rt2400pci_disable_radio(rt2x00dev); break; case STATE_RADIO_IRQ_ON: case STATE_RADIO_IRQ_OFF: rt2400pci_toggle_irq(rt2x00dev, state); break; case STATE_DEEP_SLEEP: case STATE_SLEEP: case STATE_STANDBY: case STATE_AWAKE: retval = rt2400pci_set_state(rt2x00dev, state); break; default: retval = -ENOTSUPP; break; } if (unlikely(retval)) rt2x00_err(rt2x00dev, "Device failed to enter state %d (%d)\n", state, retval); return retval; } /* * TX descriptor initialization */ static void rt2400pci_write_tx_desc(struct queue_entry *entry, struct txentry_desc *txdesc) { struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); struct queue_entry_priv_mmio *entry_priv = entry->priv_data; __le32 *txd = entry_priv->desc; u32 word; /* * Start writing the descriptor words. */ word = rt2x00_desc_read(txd, 1); rt2x00_set_field32(&word, TXD_W1_BUFFER_ADDRESS, skbdesc->skb_dma); rt2x00_desc_write(txd, 1, word); word = rt2x00_desc_read(txd, 2); rt2x00_set_field32(&word, TXD_W2_BUFFER_LENGTH, txdesc->length); rt2x00_set_field32(&word, TXD_W2_DATABYTE_COUNT, txdesc->length); rt2x00_desc_write(txd, 2, word); word = rt2x00_desc_read(txd, 3); rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL, txdesc->u.plcp.signal); rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL_REGNUM, 5); rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL_BUSY, 1); rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE, txdesc->u.plcp.service); rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE_REGNUM, 6); rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE_BUSY, 1); rt2x00_desc_write(txd, 3, word); word = rt2x00_desc_read(txd, 4); rt2x00_set_field32(&word, TXD_W4_PLCP_LENGTH_LOW, txdesc->u.plcp.length_low); rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_LOW_REGNUM, 8); rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_LOW_BUSY, 1); rt2x00_set_field32(&word, TXD_W4_PLCP_LENGTH_HIGH, txdesc->u.plcp.length_high); rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_HIGH_REGNUM, 7); rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_HIGH_BUSY, 1); rt2x00_desc_write(txd, 4, word); /* * Writing TXD word 0 must the last to prevent a race condition with * the device, whereby the device may take hold of the TXD before we * finished updating it. */ word = rt2x00_desc_read(txd, 0); rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 1); rt2x00_set_field32(&word, TXD_W0_VALID, 1); rt2x00_set_field32(&word, TXD_W0_MORE_FRAG, test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags)); rt2x00_set_field32(&word, TXD_W0_ACK, test_bit(ENTRY_TXD_ACK, &txdesc->flags)); rt2x00_set_field32(&word, TXD_W0_TIMESTAMP, test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags)); rt2x00_set_field32(&word, TXD_W0_RTS, test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags)); rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->u.plcp.ifs); rt2x00_set_field32(&word, TXD_W0_RETRY_MODE, test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags)); rt2x00_desc_write(txd, 0, word); /* * Register descriptor details in skb frame descriptor. */ skbdesc->desc = txd; skbdesc->desc_len = TXD_DESC_SIZE; } /* * TX data initialization */ static void rt2400pci_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc) { struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; u32 reg; /* * Disable beaconing while we are reloading the beacon data, * otherwise we might be sending out invalid data. */ reg = rt2x00mmio_register_read(rt2x00dev, CSR14); rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 0); rt2x00mmio_register_write(rt2x00dev, CSR14, reg); if (rt2x00queue_map_txskb(entry)) { rt2x00_err(rt2x00dev, "Fail to map beacon, aborting\n"); goto out; } /* * Enable beaconing again. */ rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 1); /* * Write the TX descriptor for the beacon. */ rt2400pci_write_tx_desc(entry, txdesc); /* * Dump beacon to userspace through debugfs. */ rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry); out: /* * Enable beaconing again. */ rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 1); rt2x00mmio_register_write(rt2x00dev, CSR14, reg); } /* * RX control handlers */ static void rt2400pci_fill_rxdone(struct queue_entry *entry, struct rxdone_entry_desc *rxdesc) { struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; struct queue_entry_priv_mmio *entry_priv = entry->priv_data; u32 word0; u32 word2; u32 word3; u32 word4; u64 tsf; u32 rx_low; u32 rx_high; word0 = rt2x00_desc_read(entry_priv->desc, 0); word2 = rt2x00_desc_read(entry_priv->desc, 2); word3 = rt2x00_desc_read(entry_priv->desc, 3); word4 = rt2x00_desc_read(entry_priv->desc, 4); if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR)) rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC; if (rt2x00_get_field32(word0, RXD_W0_PHYSICAL_ERROR)) rxdesc->flags |= RX_FLAG_FAILED_PLCP_CRC; /* * We only get the lower 32bits from the timestamp, * to get the full 64bits we must complement it with * the timestamp from get_tsf(). * Note that when a wraparound of the lower 32bits * has occurred between the frame arrival and the get_tsf() * call, we must decrease the higher 32bits with 1 to get * to correct value. */ tsf = rt2x00dev->ops->hw->get_tsf(rt2x00dev->hw, NULL); rx_low = rt2x00_get_field32(word4, RXD_W4_RX_END_TIME); rx_high = upper_32_bits(tsf); if ((u32)tsf <= rx_low) rx_high--; /* * Obtain the status about this packet. * The signal is the PLCP value, and needs to be stripped * of the preamble bit (0x08). */ rxdesc->timestamp = ((u64)rx_high << 32) | rx_low; rxdesc->signal = rt2x00_get_field32(word2, RXD_W2_SIGNAL) & ~0x08; rxdesc->rssi = rt2x00_get_field32(word3, RXD_W3_RSSI) - entry->queue->rt2x00dev->rssi_offset; rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT); rxdesc->dev_flags |= RXDONE_SIGNAL_PLCP; if (rt2x00_get_field32(word0, RXD_W0_MY_BSS)) rxdesc->dev_flags |= RXDONE_MY_BSS; } /* * Interrupt functions. */ static void rt2400pci_txdone(struct rt2x00_dev *rt2x00dev, const enum data_queue_qid queue_idx) { struct data_queue *queue = rt2x00queue_get_tx_queue(rt2x00dev, queue_idx); struct queue_entry_priv_mmio *entry_priv; struct queue_entry *entry; struct txdone_entry_desc txdesc; u32 word; while (!rt2x00queue_empty(queue)) { entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE); entry_priv = entry->priv_data; word = rt2x00_desc_read(entry_priv->desc, 0); if (rt2x00_get_field32(word, TXD_W0_OWNER_NIC) || !rt2x00_get_field32(word, TXD_W0_VALID)) break; /* * Obtain the status about this packet. */ txdesc.flags = 0; switch (rt2x00_get_field32(word, TXD_W0_RESULT)) { case 0: /* Success */ case 1: /* Success with retry */ __set_bit(TXDONE_SUCCESS, &txdesc.flags); break; case 2: /* Failure, excessive retries */ __set_bit(TXDONE_EXCESSIVE_RETRY, &txdesc.flags); fallthrough; /* this is a failed frame! */ default: /* Failure */ __set_bit(TXDONE_FAILURE, &txdesc.flags); } txdesc.retry = rt2x00_get_field32(word, TXD_W0_RETRY_COUNT); rt2x00lib_txdone(entry, &txdesc); } } static inline void rt2400pci_enable_interrupt(struct rt2x00_dev *rt2x00dev, struct rt2x00_field32 irq_field) { u32 reg; /* * Enable a single interrupt. The interrupt mask register * access needs locking. */ spin_lock_irq(&rt2x00dev->irqmask_lock); reg = rt2x00mmio_register_read(rt2x00dev, CSR8); rt2x00_set_field32(&reg, irq_field, 0); rt2x00mmio_register_write(rt2x00dev, CSR8, reg); spin_unlock_irq(&rt2x00dev->irqmask_lock); } static void rt2400pci_txstatus_tasklet(struct tasklet_struct *t) { struct rt2x00_dev *rt2x00dev = from_tasklet(rt2x00dev, t, txstatus_tasklet); u32 reg; /* * Handle all tx queues. */ rt2400pci_txdone(rt2x00dev, QID_ATIM); rt2400pci_txdone(rt2x00dev, QID_AC_VO); rt2400pci_txdone(rt2x00dev, QID_AC_VI); /* * Enable all TXDONE interrupts again. */ if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) { spin_lock_irq(&rt2x00dev->irqmask_lock); reg = rt2x00mmio_register_read(rt2x00dev, CSR8); rt2x00_set_field32(&reg, CSR8_TXDONE_TXRING, 0); rt2x00_set_field32(&reg, CSR8_TXDONE_ATIMRING, 0); rt2x00_set_field32(&reg, CSR8_TXDONE_PRIORING, 0); rt2x00mmio_register_write(rt2x00dev, CSR8, reg); spin_unlock_irq(&rt2x00dev->irqmask_lock); } } static void rt2400pci_tbtt_tasklet(struct tasklet_struct *t) { struct rt2x00_dev *rt2x00dev = from_tasklet(rt2x00dev, t, tbtt_tasklet); rt2x00lib_beacondone(rt2x00dev); if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) rt2400pci_enable_interrupt(rt2x00dev, CSR8_TBCN_EXPIRE); } static void rt2400pci_rxdone_tasklet(struct tasklet_struct *t) { struct rt2x00_dev *rt2x00dev = from_tasklet(rt2x00dev, t, rxdone_tasklet); if (rt2x00mmio_rxdone(rt2x00dev)) tasklet_schedule(&rt2x00dev->rxdone_tasklet); else if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) rt2400pci_enable_interrupt(rt2x00dev, CSR8_RXDONE); } static irqreturn_t rt2400pci_interrupt(int irq, void *dev_instance) { struct rt2x00_dev *rt2x00dev = dev_instance; u32 reg, mask; /* * Get the interrupt sources & saved to local variable. * Write register value back to clear pending interrupts. */ reg = rt2x00mmio_register_read(rt2x00dev, CSR7); rt2x00mmio_register_write(rt2x00dev, CSR7, reg); if (!reg) return IRQ_NONE; if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) return IRQ_HANDLED; mask = reg; /* * Schedule tasklets for interrupt handling. */ if (rt2x00_get_field32(reg, CSR7_TBCN_EXPIRE)) tasklet_hi_schedule(&rt2x00dev->tbtt_tasklet); if (rt2x00_get_field32(reg, CSR7_RXDONE)) tasklet_schedule(&rt2x00dev->rxdone_tasklet); if (rt2x00_get_field32(reg, CSR7_TXDONE_ATIMRING) || rt2x00_get_field32(reg, CSR7_TXDONE_PRIORING) || rt2x00_get_field32(reg, CSR7_TXDONE_TXRING)) { tasklet_schedule(&rt2x00dev->txstatus_tasklet); /* * Mask out all txdone interrupts. */ rt2x00_set_field32(&mask, CSR8_TXDONE_TXRING, 1); rt2x00_set_field32(&mask, CSR8_TXDONE_ATIMRING, 1); rt2x00_set_field32(&mask, CSR8_TXDONE_PRIORING, 1); } /* * Disable all interrupts for which a tasklet was scheduled right now, * the tasklet will reenable the appropriate interrupts. */ spin_lock(&rt2x00dev->irqmask_lock); reg = rt2x00mmio_register_read(rt2x00dev, CSR8); reg |= mask; rt2x00mmio_register_write(rt2x00dev, CSR8, reg); spin_unlock(&rt2x00dev->irqmask_lock); return IRQ_HANDLED; } /* * Device probe functions. */ static int rt2400pci_validate_eeprom(struct rt2x00_dev *rt2x00dev) { struct eeprom_93cx6 eeprom; u32 reg; u16 word; u8 *mac; reg = rt2x00mmio_register_read(rt2x00dev, CSR21); eeprom.data = rt2x00dev; eeprom.register_read = rt2400pci_eepromregister_read; eeprom.register_write = rt2400pci_eepromregister_write; eeprom.width = rt2x00_get_field32(reg, CSR21_TYPE_93C46) ? PCI_EEPROM_WIDTH_93C46 : PCI_EEPROM_WIDTH_93C66; eeprom.reg_data_in = 0; eeprom.reg_data_out = 0; eeprom.reg_data_clock = 0; eeprom.reg_chip_select = 0; eeprom_93cx6_multiread(&eeprom, EEPROM_BASE, rt2x00dev->eeprom, EEPROM_SIZE / sizeof(u16)); /* * Start validation of the data that has been read. */ mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0); rt2x00lib_set_mac_address(rt2x00dev, mac); word = rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA); if (word == 0xffff) { rt2x00_err(rt2x00dev, "Invalid EEPROM data detected\n"); return -EINVAL; } return 0; } static int rt2400pci_init_eeprom(struct rt2x00_dev *rt2x00dev) { u32 reg; u16 value; u16 eeprom; /* * Read EEPROM word for configuration. */ eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA); /* * Identify RF chipset. */ value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE); reg = rt2x00mmio_register_read(rt2x00dev, CSR0); rt2x00_set_chip(rt2x00dev, RT2460, value, rt2x00_get_field32(reg, CSR0_REVISION)); if (!rt2x00_rf(rt2x00dev, RF2420) && !rt2x00_rf(rt2x00dev, RF2421)) { rt2x00_err(rt2x00dev, "Invalid RF chipset detected\n"); return -ENODEV; } /* * Identify default antenna configuration. */ rt2x00dev->default_ant.tx = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TX_DEFAULT); rt2x00dev->default_ant.rx = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RX_DEFAULT); /* * When the eeprom indicates SW_DIVERSITY use HW_DIVERSITY instead. * I am not 100% sure about this, but the legacy drivers do not * indicate antenna swapping in software is required when * diversity is enabled. */ if (rt2x00dev->default_ant.tx == ANTENNA_SW_DIVERSITY) rt2x00dev->default_ant.tx = ANTENNA_HW_DIVERSITY; if (rt2x00dev->default_ant.rx == ANTENNA_SW_DIVERSITY) rt2x00dev->default_ant.rx = ANTENNA_HW_DIVERSITY; /* * Store led mode, for correct led behaviour. */ #ifdef CONFIG_RT2X00_LIB_LEDS value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_LED_MODE); rt2400pci_init_led(rt2x00dev, &rt2x00dev->led_radio, LED_TYPE_RADIO); if (value == LED_MODE_TXRX_ACTIVITY || value == LED_MODE_DEFAULT || value == LED_MODE_ASUS) rt2400pci_init_led(rt2x00dev, &rt2x00dev->led_qual, LED_TYPE_ACTIVITY); #endif /* CONFIG_RT2X00_LIB_LEDS */ /* * Detect if this device has an hardware controlled radio. */ if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO)) __set_bit(CAPABILITY_HW_BUTTON, &rt2x00dev->cap_flags); /* * Check if the BBP tuning should be enabled. */ if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RX_AGCVGC_TUNING)) __set_bit(CAPABILITY_LINK_TUNING, &rt2x00dev->cap_flags); return 0; } /* * RF value list for RF2420 & RF2421 * Supports: 2.4 GHz */ static const struct rf_channel rf_vals_b[] = { { 1, 0x00022058, 0x000c1fda, 0x00000101, 0 }, { 2, 0x00022058, 0x000c1fee, 0x00000101, 0 }, { 3, 0x00022058, 0x000c2002, 0x00000101, 0 }, { 4, 0x00022058, 0x000c2016, 0x00000101, 0 }, { 5, 0x00022058, 0x000c202a, 0x00000101, 0 }, { 6, 0x00022058, 0x000c203e, 0x00000101, 0 }, { 7, 0x00022058, 0x000c2052, 0x00000101, 0 }, { 8, 0x00022058, 0x000c2066, 0x00000101, 0 }, { 9, 0x00022058, 0x000c207a, 0x00000101, 0 }, { 10, 0x00022058, 0x000c208e, 0x00000101, 0 }, { 11, 0x00022058, 0x000c20a2, 0x00000101, 0 }, { 12, 0x00022058, 0x000c20b6, 0x00000101, 0 }, { 13, 0x00022058, 0x000c20ca, 0x00000101, 0 }, { 14, 0x00022058, 0x000c20fa, 0x00000101, 0 }, }; static int rt2400pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev) { struct hw_mode_spec *spec = &rt2x00dev->spec; struct channel_info *info; u8 *tx_power; unsigned int i; /* * Initialize all hw fields. */ ieee80211_hw_set(rt2x00dev->hw, PS_NULLFUNC_STACK); ieee80211_hw_set(rt2x00dev->hw, SUPPORTS_PS); ieee80211_hw_set(rt2x00dev->hw, HOST_BROADCAST_PS_BUFFERING); ieee80211_hw_set(rt2x00dev->hw, SIGNAL_DBM); SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev); SET_IEEE80211_PERM_ADDR(rt2x00dev->hw, rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0)); /* * Initialize hw_mode information. */ spec->supported_bands = SUPPORT_BAND_2GHZ; spec->supported_rates = SUPPORT_RATE_CCK; spec->num_channels = ARRAY_SIZE(rf_vals_b); spec->channels = rf_vals_b; /* * Create channel information array */ info = kcalloc(spec->num_channels, sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; spec->channels_info = info; tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_START); for (i = 0; i < 14; i++) { info[i].max_power = TXPOWER_FROM_DEV(MAX_TXPOWER); info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i]); } return 0; } static int rt2400pci_probe_hw(struct rt2x00_dev *rt2x00dev) { int retval; u32 reg; /* * Allocate eeprom data. */ retval = rt2400pci_validate_eeprom(rt2x00dev); if (retval) return retval; retval = rt2400pci_init_eeprom(rt2x00dev); if (retval) return retval; /* * Enable rfkill polling by setting GPIO direction of the * rfkill switch GPIO pin correctly. */ reg = rt2x00mmio_register_read(rt2x00dev, GPIOCSR); rt2x00_set_field32(&reg, GPIOCSR_DIR0, 1); rt2x00mmio_register_write(rt2x00dev, GPIOCSR, reg); /* * Initialize hw specifications. */ retval = rt2400pci_probe_hw_mode(rt2x00dev); if (retval) return retval; /* * This device requires the atim queue and DMA-mapped skbs. */ __set_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags); __set_bit(REQUIRE_DMA, &rt2x00dev->cap_flags); __set_bit(REQUIRE_SW_SEQNO, &rt2x00dev->cap_flags); /* * Set the rssi offset. */ rt2x00dev->rssi_offset = DEFAULT_RSSI_OFFSET; return 0; } /* * IEEE80211 stack callback functions. */ static int rt2400pci_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, unsigned int link_id, u16 queue, const struct ieee80211_tx_queue_params *params) { struct rt2x00_dev *rt2x00dev = hw->priv; /* * We don't support variating cw_min and cw_max variables * per queue. So by default we only configure the TX queue, * and ignore all other configurations. */ if (queue != 0) return -EINVAL; if (rt2x00mac_conf_tx(hw, vif, link_id, queue, params)) return -EINVAL; /* * Write configuration to register. */ rt2400pci_config_cw(rt2x00dev, rt2x00dev->tx->cw_min, rt2x00dev->tx->cw_max); return 0; } static u64 rt2400pci_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct rt2x00_dev *rt2x00dev = hw->priv; u64 tsf; u32 reg; reg = rt2x00mmio_register_read(rt2x00dev, CSR17); tsf = (u64) rt2x00_get_field32(reg, CSR17_HIGH_TSFTIMER) << 32; reg = rt2x00mmio_register_read(rt2x00dev, CSR16); tsf |= rt2x00_get_field32(reg, CSR16_LOW_TSFTIMER); return tsf; } static int rt2400pci_tx_last_beacon(struct ieee80211_hw *hw) { struct rt2x00_dev *rt2x00dev = hw->priv; u32 reg; reg = rt2x00mmio_register_read(rt2x00dev, CSR15); return rt2x00_get_field32(reg, CSR15_BEACON_SENT); } static const struct ieee80211_ops rt2400pci_mac80211_ops = { .tx = rt2x00mac_tx, .wake_tx_queue = ieee80211_handle_wake_tx_queue, .start = rt2x00mac_start, .stop = rt2x00mac_stop, .add_interface = rt2x00mac_add_interface, .remove_interface = rt2x00mac_remove_interface, .config = rt2x00mac_config, .configure_filter = rt2x00mac_configure_filter, .sw_scan_start = rt2x00mac_sw_scan_start, .sw_scan_complete = rt2x00mac_sw_scan_complete, .get_stats = rt2x00mac_get_stats, .bss_info_changed = rt2x00mac_bss_info_changed, .conf_tx = rt2400pci_conf_tx, .get_tsf = rt2400pci_get_tsf, .tx_last_beacon = rt2400pci_tx_last_beacon, .rfkill_poll = rt2x00mac_rfkill_poll, .flush = rt2x00mac_flush, .set_antenna = rt2x00mac_set_antenna, .get_antenna = rt2x00mac_get_antenna, .get_ringparam = rt2x00mac_get_ringparam, .tx_frames_pending = rt2x00mac_tx_frames_pending, }; static const struct rt2x00lib_ops rt2400pci_rt2x00_ops = { .irq_handler = rt2400pci_interrupt, .txstatus_tasklet = rt2400pci_txstatus_tasklet, .tbtt_tasklet = rt2400pci_tbtt_tasklet, .rxdone_tasklet = rt2400pci_rxdone_tasklet, .probe_hw = rt2400pci_probe_hw, .initialize = rt2x00mmio_initialize, .uninitialize = rt2x00mmio_uninitialize, .get_entry_state = rt2400pci_get_entry_state, .clear_entry = rt2400pci_clear_entry, .set_device_state = rt2400pci_set_device_state, .rfkill_poll = rt2400pci_rfkill_poll, .link_stats = rt2400pci_link_stats, .reset_tuner = rt2400pci_reset_tuner, .link_tuner = rt2400pci_link_tuner, .start_queue = rt2400pci_start_queue, .kick_queue = rt2400pci_kick_queue, .stop_queue = rt2400pci_stop_queue, .flush_queue = rt2x00mmio_flush_queue, .write_tx_desc = rt2400pci_write_tx_desc, .write_beacon = rt2400pci_write_beacon, .fill_rxdone = rt2400pci_fill_rxdone, .config_filter = rt2400pci_config_filter, .config_intf = rt2400pci_config_intf, .config_erp = rt2400pci_config_erp, .config_ant = rt2400pci_config_ant, .config = rt2400pci_config, }; static void rt2400pci_queue_init(struct data_queue *queue) { switch (queue->qid) { case QID_RX: queue->limit = 24; queue->data_size = DATA_FRAME_SIZE; queue->desc_size = RXD_DESC_SIZE; queue->priv_size = sizeof(struct queue_entry_priv_mmio); break; case QID_AC_VO: case QID_AC_VI: case QID_AC_BE: case QID_AC_BK: queue->limit = 24; queue->data_size = DATA_FRAME_SIZE; queue->desc_size = TXD_DESC_SIZE; queue->priv_size = sizeof(struct queue_entry_priv_mmio); break; case QID_BEACON: queue->limit = 1; queue->data_size = MGMT_FRAME_SIZE; queue->desc_size = TXD_DESC_SIZE; queue->priv_size = sizeof(struct queue_entry_priv_mmio); break; case QID_ATIM: queue->limit = 8; queue->data_size = DATA_FRAME_SIZE; queue->desc_size = TXD_DESC_SIZE; queue->priv_size = sizeof(struct queue_entry_priv_mmio); break; default: BUG(); break; } } static const struct rt2x00_ops rt2400pci_ops = { .name = KBUILD_MODNAME, .max_ap_intf = 1, .eeprom_size = EEPROM_SIZE, .rf_size = RF_SIZE, .tx_queues = NUM_TX_QUEUES, .queue_init = rt2400pci_queue_init, .lib = &rt2400pci_rt2x00_ops, .hw = &rt2400pci_mac80211_ops, #ifdef CONFIG_RT2X00_LIB_DEBUGFS .debugfs = &rt2400pci_rt2x00debug, #endif /* CONFIG_RT2X00_LIB_DEBUGFS */ }; /* * RT2400pci module information. */ static const struct pci_device_id rt2400pci_device_table[] = { { PCI_DEVICE(0x1814, 0x0101) }, { 0, } }; MODULE_AUTHOR(DRV_PROJECT); MODULE_VERSION(DRV_VERSION); MODULE_DESCRIPTION("Ralink RT2400 PCI & PCMCIA Wireless LAN driver."); MODULE_DEVICE_TABLE(pci, rt2400pci_device_table); MODULE_LICENSE("GPL"); static int rt2400pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) { return rt2x00pci_probe(pci_dev, &rt2400pci_ops); } static struct pci_driver rt2400pci_driver = { .name = KBUILD_MODNAME, .id_table = rt2400pci_device_table, .probe = rt2400pci_probe, .remove = rt2x00pci_remove, .driver.pm = &rt2x00pci_pm_ops, }; module_pci_driver(rt2400pci_driver);
linux-master
drivers/net/wireless/ralink/rt2x00/rt2400pci.c
// SPDX-License-Identifier: GPL-2.0-or-later /* Copyright (C) 2004 - 2009 Ivo van Doorn <[email protected]> <http://rt2x00.serialmonkey.com> */ /* Module: rt2500usb Abstract: rt2500usb device specific routines. Supported chipsets: RT2570. */ #include <linux/delay.h> #include <linux/etherdevice.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/usb.h> #include "rt2x00.h" #include "rt2x00usb.h" #include "rt2500usb.h" /* * Allow hardware encryption to be disabled. */ static bool modparam_nohwcrypt; module_param_named(nohwcrypt, modparam_nohwcrypt, bool, 0444); MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); /* * Register access. * All access to the CSR registers will go through the methods * rt2500usb_register_read and rt2500usb_register_write. * BBP and RF register require indirect register access, * and use the CSR registers BBPCSR and RFCSR to achieve this. * These indirect registers work with busy bits, * and we will try maximal REGISTER_USB_BUSY_COUNT times to access * the register while taking a REGISTER_BUSY_DELAY us delay * between each attampt. When the busy bit is still set at that time, * the access attempt is considered to have failed, * and we will print an error. * If the csr_mutex is already held then the _lock variants must * be used instead. */ static u16 rt2500usb_register_read(struct rt2x00_dev *rt2x00dev, const unsigned int offset) { __le16 reg; rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_READ, USB_VENDOR_REQUEST_IN, offset, &reg, sizeof(reg)); return le16_to_cpu(reg); } static u16 rt2500usb_register_read_lock(struct rt2x00_dev *rt2x00dev, const unsigned int offset) { __le16 reg; rt2x00usb_vendor_req_buff_lock(rt2x00dev, USB_MULTI_READ, USB_VENDOR_REQUEST_IN, offset, &reg, sizeof(reg), REGISTER_TIMEOUT); return le16_to_cpu(reg); } static void rt2500usb_register_write(struct rt2x00_dev *rt2x00dev, const unsigned int offset, u16 value) { __le16 reg = cpu_to_le16(value); rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_WRITE, USB_VENDOR_REQUEST_OUT, offset, &reg, sizeof(reg)); } static void rt2500usb_register_write_lock(struct rt2x00_dev *rt2x00dev, const unsigned int offset, u16 value) { __le16 reg = cpu_to_le16(value); rt2x00usb_vendor_req_buff_lock(rt2x00dev, USB_MULTI_WRITE, USB_VENDOR_REQUEST_OUT, offset, &reg, sizeof(reg), REGISTER_TIMEOUT); } static void rt2500usb_register_multiwrite(struct rt2x00_dev *rt2x00dev, const unsigned int offset, void *value, const u16 length) { rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_WRITE, USB_VENDOR_REQUEST_OUT, offset, value, length); } static int rt2500usb_regbusy_read(struct rt2x00_dev *rt2x00dev, const unsigned int offset, struct rt2x00_field16 field, u16 *reg) { unsigned int i; for (i = 0; i < REGISTER_USB_BUSY_COUNT; i++) { *reg = rt2500usb_register_read_lock(rt2x00dev, offset); if (!rt2x00_get_field16(*reg, field)) return 1; udelay(REGISTER_BUSY_DELAY); } rt2x00_err(rt2x00dev, "Indirect register access failed: offset=0x%.08x, value=0x%.08x\n", offset, *reg); *reg = ~0; return 0; } #define WAIT_FOR_BBP(__dev, __reg) \ rt2500usb_regbusy_read((__dev), PHY_CSR8, PHY_CSR8_BUSY, (__reg)) #define WAIT_FOR_RF(__dev, __reg) \ rt2500usb_regbusy_read((__dev), PHY_CSR10, PHY_CSR10_RF_BUSY, (__reg)) static void rt2500usb_bbp_write(struct rt2x00_dev *rt2x00dev, const unsigned int word, const u8 value) { u16 reg; mutex_lock(&rt2x00dev->csr_mutex); /* * Wait until the BBP becomes available, afterwards we * can safely write the new data into the register. */ if (WAIT_FOR_BBP(rt2x00dev, &reg)) { reg = 0; rt2x00_set_field16(&reg, PHY_CSR7_DATA, value); rt2x00_set_field16(&reg, PHY_CSR7_REG_ID, word); rt2x00_set_field16(&reg, PHY_CSR7_READ_CONTROL, 0); rt2500usb_register_write_lock(rt2x00dev, PHY_CSR7, reg); } mutex_unlock(&rt2x00dev->csr_mutex); } static u8 rt2500usb_bbp_read(struct rt2x00_dev *rt2x00dev, const unsigned int word) { u16 reg; u8 value; mutex_lock(&rt2x00dev->csr_mutex); /* * Wait until the BBP becomes available, afterwards we * can safely write the read request into the register. * After the data has been written, we wait until hardware * returns the correct value, if at any time the register * doesn't become available in time, reg will be 0xffffffff * which means we return 0xff to the caller. */ if (WAIT_FOR_BBP(rt2x00dev, &reg)) { reg = 0; rt2x00_set_field16(&reg, PHY_CSR7_REG_ID, word); rt2x00_set_field16(&reg, PHY_CSR7_READ_CONTROL, 1); rt2500usb_register_write_lock(rt2x00dev, PHY_CSR7, reg); if (WAIT_FOR_BBP(rt2x00dev, &reg)) reg = rt2500usb_register_read_lock(rt2x00dev, PHY_CSR7); } value = rt2x00_get_field16(reg, PHY_CSR7_DATA); mutex_unlock(&rt2x00dev->csr_mutex); return value; } static void rt2500usb_rf_write(struct rt2x00_dev *rt2x00dev, const unsigned int word, const u32 value) { u16 reg; mutex_lock(&rt2x00dev->csr_mutex); /* * Wait until the RF becomes available, afterwards we * can safely write the new data into the register. */ if (WAIT_FOR_RF(rt2x00dev, &reg)) { reg = 0; rt2x00_set_field16(&reg, PHY_CSR9_RF_VALUE, value); rt2500usb_register_write_lock(rt2x00dev, PHY_CSR9, reg); reg = 0; rt2x00_set_field16(&reg, PHY_CSR10_RF_VALUE, value >> 16); rt2x00_set_field16(&reg, PHY_CSR10_RF_NUMBER_OF_BITS, 20); rt2x00_set_field16(&reg, PHY_CSR10_RF_IF_SELECT, 0); rt2x00_set_field16(&reg, PHY_CSR10_RF_BUSY, 1); rt2500usb_register_write_lock(rt2x00dev, PHY_CSR10, reg); rt2x00_rf_write(rt2x00dev, word, value); } mutex_unlock(&rt2x00dev->csr_mutex); } #ifdef CONFIG_RT2X00_LIB_DEBUGFS static u32 _rt2500usb_register_read(struct rt2x00_dev *rt2x00dev, const unsigned int offset) { return rt2500usb_register_read(rt2x00dev, offset); } static void _rt2500usb_register_write(struct rt2x00_dev *rt2x00dev, const unsigned int offset, u32 value) { rt2500usb_register_write(rt2x00dev, offset, value); } static const struct rt2x00debug rt2500usb_rt2x00debug = { .owner = THIS_MODULE, .csr = { .read = _rt2500usb_register_read, .write = _rt2500usb_register_write, .flags = RT2X00DEBUGFS_OFFSET, .word_base = CSR_REG_BASE, .word_size = sizeof(u16), .word_count = CSR_REG_SIZE / sizeof(u16), }, .eeprom = { .read = rt2x00_eeprom_read, .write = rt2x00_eeprom_write, .word_base = EEPROM_BASE, .word_size = sizeof(u16), .word_count = EEPROM_SIZE / sizeof(u16), }, .bbp = { .read = rt2500usb_bbp_read, .write = rt2500usb_bbp_write, .word_base = BBP_BASE, .word_size = sizeof(u8), .word_count = BBP_SIZE / sizeof(u8), }, .rf = { .read = rt2x00_rf_read, .write = rt2500usb_rf_write, .word_base = RF_BASE, .word_size = sizeof(u32), .word_count = RF_SIZE / sizeof(u32), }, }; #endif /* CONFIG_RT2X00_LIB_DEBUGFS */ static int rt2500usb_rfkill_poll(struct rt2x00_dev *rt2x00dev) { u16 reg; reg = rt2500usb_register_read(rt2x00dev, MAC_CSR19); return rt2x00_get_field16(reg, MAC_CSR19_VAL7); } #ifdef CONFIG_RT2X00_LIB_LEDS static void rt2500usb_brightness_set(struct led_classdev *led_cdev, enum led_brightness brightness) { struct rt2x00_led *led = container_of(led_cdev, struct rt2x00_led, led_dev); unsigned int enabled = brightness != LED_OFF; u16 reg; reg = rt2500usb_register_read(led->rt2x00dev, MAC_CSR20); if (led->type == LED_TYPE_RADIO || led->type == LED_TYPE_ASSOC) rt2x00_set_field16(&reg, MAC_CSR20_LINK, enabled); else if (led->type == LED_TYPE_ACTIVITY) rt2x00_set_field16(&reg, MAC_CSR20_ACTIVITY, enabled); rt2500usb_register_write(led->rt2x00dev, MAC_CSR20, reg); } static int rt2500usb_blink_set(struct led_classdev *led_cdev, unsigned long *delay_on, unsigned long *delay_off) { struct rt2x00_led *led = container_of(led_cdev, struct rt2x00_led, led_dev); u16 reg; reg = rt2500usb_register_read(led->rt2x00dev, MAC_CSR21); rt2x00_set_field16(&reg, MAC_CSR21_ON_PERIOD, *delay_on); rt2x00_set_field16(&reg, MAC_CSR21_OFF_PERIOD, *delay_off); rt2500usb_register_write(led->rt2x00dev, MAC_CSR21, reg); return 0; } static void rt2500usb_init_led(struct rt2x00_dev *rt2x00dev, struct rt2x00_led *led, enum led_type type) { led->rt2x00dev = rt2x00dev; led->type = type; led->led_dev.brightness_set = rt2500usb_brightness_set; led->led_dev.blink_set = rt2500usb_blink_set; led->flags = LED_INITIALIZED; } #endif /* CONFIG_RT2X00_LIB_LEDS */ /* * Configuration handlers. */ /* * rt2500usb does not differentiate between shared and pairwise * keys, so we should use the same function for both key types. */ static int rt2500usb_config_key(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_crypto *crypto, struct ieee80211_key_conf *key) { u32 mask; u16 reg; enum cipher curr_cipher; if (crypto->cmd == SET_KEY) { /* * Disallow to set WEP key other than with index 0, * it is known that not work at least on some hardware. * SW crypto will be used in that case. */ if ((key->cipher == WLAN_CIPHER_SUITE_WEP40 || key->cipher == WLAN_CIPHER_SUITE_WEP104) && key->keyidx != 0) return -EOPNOTSUPP; /* * Pairwise key will always be entry 0, but this * could collide with a shared key on the same * position... */ mask = TXRX_CSR0_KEY_ID.bit_mask; reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR0); curr_cipher = rt2x00_get_field16(reg, TXRX_CSR0_ALGORITHM); reg &= mask; if (reg && reg == mask) return -ENOSPC; reg = rt2x00_get_field16(reg, TXRX_CSR0_KEY_ID); key->hw_key_idx += reg ? ffz(reg) : 0; /* * Hardware requires that all keys use the same cipher * (e.g. TKIP-only, AES-only, but not TKIP+AES). * If this is not the first key, compare the cipher with the * first one and fall back to SW crypto if not the same. */ if (key->hw_key_idx > 0 && crypto->cipher != curr_cipher) return -EOPNOTSUPP; rt2500usb_register_multiwrite(rt2x00dev, KEY_ENTRY(key->hw_key_idx), crypto->key, sizeof(crypto->key)); /* * The driver does not support the IV/EIV generation * in hardware. However it demands the data to be provided * both separately as well as inside the frame. * We already provided the CONFIG_CRYPTO_COPY_IV to rt2x00lib * to ensure rt2x00lib will not strip the data from the * frame after the copy, now we must tell mac80211 * to generate the IV/EIV data. */ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; } /* * TXRX_CSR0_KEY_ID contains only single-bit fields to indicate * a particular key is valid. */ reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR0); rt2x00_set_field16(&reg, TXRX_CSR0_ALGORITHM, crypto->cipher); rt2x00_set_field16(&reg, TXRX_CSR0_IV_OFFSET, IEEE80211_HEADER); mask = rt2x00_get_field16(reg, TXRX_CSR0_KEY_ID); if (crypto->cmd == SET_KEY) mask |= 1 << key->hw_key_idx; else if (crypto->cmd == DISABLE_KEY) mask &= ~(1 << key->hw_key_idx); rt2x00_set_field16(&reg, TXRX_CSR0_KEY_ID, mask); rt2500usb_register_write(rt2x00dev, TXRX_CSR0, reg); return 0; } static void rt2500usb_config_filter(struct rt2x00_dev *rt2x00dev, const unsigned int filter_flags) { u16 reg; /* * Start configuration steps. * Note that the version error will always be dropped * and broadcast frames will always be accepted since * there is no filter for it at this time. */ reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR2); rt2x00_set_field16(&reg, TXRX_CSR2_DROP_CRC, !(filter_flags & FIF_FCSFAIL)); rt2x00_set_field16(&reg, TXRX_CSR2_DROP_PHYSICAL, !(filter_flags & FIF_PLCPFAIL)); rt2x00_set_field16(&reg, TXRX_CSR2_DROP_CONTROL, !(filter_flags & FIF_CONTROL)); rt2x00_set_field16(&reg, TXRX_CSR2_DROP_NOT_TO_ME, !test_bit(CONFIG_MONITORING, &rt2x00dev->flags)); rt2x00_set_field16(&reg, TXRX_CSR2_DROP_TODS, !test_bit(CONFIG_MONITORING, &rt2x00dev->flags) && !rt2x00dev->intf_ap_count); rt2x00_set_field16(&reg, TXRX_CSR2_DROP_VERSION_ERROR, 1); rt2x00_set_field16(&reg, TXRX_CSR2_DROP_MULTICAST, !(filter_flags & FIF_ALLMULTI)); rt2x00_set_field16(&reg, TXRX_CSR2_DROP_BROADCAST, 0); rt2500usb_register_write(rt2x00dev, TXRX_CSR2, reg); } static void rt2500usb_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00_intf *intf, struct rt2x00intf_conf *conf, const unsigned int flags) { unsigned int bcn_preload; u16 reg; if (flags & CONFIG_UPDATE_TYPE) { /* * Enable beacon config */ bcn_preload = PREAMBLE + GET_DURATION(IEEE80211_HEADER, 20); reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR20); rt2x00_set_field16(&reg, TXRX_CSR20_OFFSET, bcn_preload >> 6); rt2x00_set_field16(&reg, TXRX_CSR20_BCN_EXPECT_WINDOW, 2 * (conf->type != NL80211_IFTYPE_STATION)); rt2500usb_register_write(rt2x00dev, TXRX_CSR20, reg); /* * Enable synchronisation. */ reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR18); rt2x00_set_field16(&reg, TXRX_CSR18_OFFSET, 0); rt2500usb_register_write(rt2x00dev, TXRX_CSR18, reg); reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR19); rt2x00_set_field16(&reg, TXRX_CSR19_TSF_SYNC, conf->sync); rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg); } if (flags & CONFIG_UPDATE_MAC) rt2500usb_register_multiwrite(rt2x00dev, MAC_CSR2, conf->mac, (3 * sizeof(__le16))); if (flags & CONFIG_UPDATE_BSSID) rt2500usb_register_multiwrite(rt2x00dev, MAC_CSR5, conf->bssid, (3 * sizeof(__le16))); } static void rt2500usb_config_erp(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_erp *erp, u32 changed) { u16 reg; if (changed & BSS_CHANGED_ERP_PREAMBLE) { reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR10); rt2x00_set_field16(&reg, TXRX_CSR10_AUTORESPOND_PREAMBLE, !!erp->short_preamble); rt2500usb_register_write(rt2x00dev, TXRX_CSR10, reg); } if (changed & BSS_CHANGED_BASIC_RATES) rt2500usb_register_write(rt2x00dev, TXRX_CSR11, erp->basic_rates); if (changed & BSS_CHANGED_BEACON_INT) { reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR18); rt2x00_set_field16(&reg, TXRX_CSR18_INTERVAL, erp->beacon_int * 4); rt2500usb_register_write(rt2x00dev, TXRX_CSR18, reg); } if (changed & BSS_CHANGED_ERP_SLOT) { rt2500usb_register_write(rt2x00dev, MAC_CSR10, erp->slot_time); rt2500usb_register_write(rt2x00dev, MAC_CSR11, erp->sifs); rt2500usb_register_write(rt2x00dev, MAC_CSR12, erp->eifs); } } static void rt2500usb_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant) { u8 r2; u8 r14; u16 csr5; u16 csr6; /* * We should never come here because rt2x00lib is supposed * to catch this and send us the correct antenna explicitely. */ BUG_ON(ant->rx == ANTENNA_SW_DIVERSITY || ant->tx == ANTENNA_SW_DIVERSITY); r2 = rt2500usb_bbp_read(rt2x00dev, 2); r14 = rt2500usb_bbp_read(rt2x00dev, 14); csr5 = rt2500usb_register_read(rt2x00dev, PHY_CSR5); csr6 = rt2500usb_register_read(rt2x00dev, PHY_CSR6); /* * Configure the TX antenna. */ switch (ant->tx) { case ANTENNA_HW_DIVERSITY: rt2x00_set_field8(&r2, BBP_R2_TX_ANTENNA, 1); rt2x00_set_field16(&csr5, PHY_CSR5_CCK, 1); rt2x00_set_field16(&csr6, PHY_CSR6_OFDM, 1); break; case ANTENNA_A: rt2x00_set_field8(&r2, BBP_R2_TX_ANTENNA, 0); rt2x00_set_field16(&csr5, PHY_CSR5_CCK, 0); rt2x00_set_field16(&csr6, PHY_CSR6_OFDM, 0); break; case ANTENNA_B: default: rt2x00_set_field8(&r2, BBP_R2_TX_ANTENNA, 2); rt2x00_set_field16(&csr5, PHY_CSR5_CCK, 2); rt2x00_set_field16(&csr6, PHY_CSR6_OFDM, 2); break; } /* * Configure the RX antenna. */ switch (ant->rx) { case ANTENNA_HW_DIVERSITY: rt2x00_set_field8(&r14, BBP_R14_RX_ANTENNA, 1); break; case ANTENNA_A: rt2x00_set_field8(&r14, BBP_R14_RX_ANTENNA, 0); break; case ANTENNA_B: default: rt2x00_set_field8(&r14, BBP_R14_RX_ANTENNA, 2); break; } /* * RT2525E and RT5222 need to flip TX I/Q */ if (rt2x00_rf(rt2x00dev, RF2525E) || rt2x00_rf(rt2x00dev, RF5222)) { rt2x00_set_field8(&r2, BBP_R2_TX_IQ_FLIP, 1); rt2x00_set_field16(&csr5, PHY_CSR5_CCK_FLIP, 1); rt2x00_set_field16(&csr6, PHY_CSR6_OFDM_FLIP, 1); /* * RT2525E does not need RX I/Q Flip. */ if (rt2x00_rf(rt2x00dev, RF2525E)) rt2x00_set_field8(&r14, BBP_R14_RX_IQ_FLIP, 0); } else { rt2x00_set_field16(&csr5, PHY_CSR5_CCK_FLIP, 0); rt2x00_set_field16(&csr6, PHY_CSR6_OFDM_FLIP, 0); } rt2500usb_bbp_write(rt2x00dev, 2, r2); rt2500usb_bbp_write(rt2x00dev, 14, r14); rt2500usb_register_write(rt2x00dev, PHY_CSR5, csr5); rt2500usb_register_write(rt2x00dev, PHY_CSR6, csr6); } static void rt2500usb_config_channel(struct rt2x00_dev *rt2x00dev, struct rf_channel *rf, const int txpower) { /* * Set TXpower. */ rt2x00_set_field32(&rf->rf3, RF3_TXPOWER, TXPOWER_TO_DEV(txpower)); /* * For RT2525E we should first set the channel to half band higher. */ if (rt2x00_rf(rt2x00dev, RF2525E)) { static const u32 vals[] = { 0x000008aa, 0x000008ae, 0x000008ae, 0x000008b2, 0x000008b2, 0x000008b6, 0x000008b6, 0x000008ba, 0x000008ba, 0x000008be, 0x000008b7, 0x00000902, 0x00000902, 0x00000906 }; rt2500usb_rf_write(rt2x00dev, 2, vals[rf->channel - 1]); if (rf->rf4) rt2500usb_rf_write(rt2x00dev, 4, rf->rf4); } rt2500usb_rf_write(rt2x00dev, 1, rf->rf1); rt2500usb_rf_write(rt2x00dev, 2, rf->rf2); rt2500usb_rf_write(rt2x00dev, 3, rf->rf3); if (rf->rf4) rt2500usb_rf_write(rt2x00dev, 4, rf->rf4); } static void rt2500usb_config_txpower(struct rt2x00_dev *rt2x00dev, const int txpower) { u32 rf3; rf3 = rt2x00_rf_read(rt2x00dev, 3); rt2x00_set_field32(&rf3, RF3_TXPOWER, TXPOWER_TO_DEV(txpower)); rt2500usb_rf_write(rt2x00dev, 3, rf3); } static void rt2500usb_config_ps(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_conf *libconf) { enum dev_state state = (libconf->conf->flags & IEEE80211_CONF_PS) ? STATE_SLEEP : STATE_AWAKE; u16 reg; if (state == STATE_SLEEP) { reg = rt2500usb_register_read(rt2x00dev, MAC_CSR18); rt2x00_set_field16(&reg, MAC_CSR18_DELAY_AFTER_BEACON, rt2x00dev->beacon_int - 20); rt2x00_set_field16(&reg, MAC_CSR18_BEACONS_BEFORE_WAKEUP, libconf->conf->listen_interval - 1); /* We must first disable autowake before it can be enabled */ rt2x00_set_field16(&reg, MAC_CSR18_AUTO_WAKE, 0); rt2500usb_register_write(rt2x00dev, MAC_CSR18, reg); rt2x00_set_field16(&reg, MAC_CSR18_AUTO_WAKE, 1); rt2500usb_register_write(rt2x00dev, MAC_CSR18, reg); } else { reg = rt2500usb_register_read(rt2x00dev, MAC_CSR18); rt2x00_set_field16(&reg, MAC_CSR18_AUTO_WAKE, 0); rt2500usb_register_write(rt2x00dev, MAC_CSR18, reg); } rt2x00dev->ops->lib->set_device_state(rt2x00dev, state); } static void rt2500usb_config(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_conf *libconf, const unsigned int flags) { if (flags & IEEE80211_CONF_CHANGE_CHANNEL) rt2500usb_config_channel(rt2x00dev, &libconf->rf, libconf->conf->power_level); if ((flags & IEEE80211_CONF_CHANGE_POWER) && !(flags & IEEE80211_CONF_CHANGE_CHANNEL)) rt2500usb_config_txpower(rt2x00dev, libconf->conf->power_level); if (flags & IEEE80211_CONF_CHANGE_PS) rt2500usb_config_ps(rt2x00dev, libconf); } /* * Link tuning */ static void rt2500usb_link_stats(struct rt2x00_dev *rt2x00dev, struct link_qual *qual) { u16 reg; /* * Update FCS error count from register. */ reg = rt2500usb_register_read(rt2x00dev, STA_CSR0); qual->rx_failed = rt2x00_get_field16(reg, STA_CSR0_FCS_ERROR); /* * Update False CCA count from register. */ reg = rt2500usb_register_read(rt2x00dev, STA_CSR3); qual->false_cca = rt2x00_get_field16(reg, STA_CSR3_FALSE_CCA_ERROR); } static void rt2500usb_reset_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual) { u16 eeprom; u16 value; eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R24); value = rt2x00_get_field16(eeprom, EEPROM_BBPTUNE_R24_LOW); rt2500usb_bbp_write(rt2x00dev, 24, value); eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R25); value = rt2x00_get_field16(eeprom, EEPROM_BBPTUNE_R25_LOW); rt2500usb_bbp_write(rt2x00dev, 25, value); eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R61); value = rt2x00_get_field16(eeprom, EEPROM_BBPTUNE_R61_LOW); rt2500usb_bbp_write(rt2x00dev, 61, value); eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_VGC); value = rt2x00_get_field16(eeprom, EEPROM_BBPTUNE_VGCUPPER); rt2500usb_bbp_write(rt2x00dev, 17, value); qual->vgc_level = value; } /* * Queue handlers. */ static void rt2500usb_start_queue(struct data_queue *queue) { struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; u16 reg; switch (queue->qid) { case QID_RX: reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR2); rt2x00_set_field16(&reg, TXRX_CSR2_DISABLE_RX, 0); rt2500usb_register_write(rt2x00dev, TXRX_CSR2, reg); break; case QID_BEACON: reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR19); rt2x00_set_field16(&reg, TXRX_CSR19_TSF_COUNT, 1); rt2x00_set_field16(&reg, TXRX_CSR19_TBCN, 1); rt2x00_set_field16(&reg, TXRX_CSR19_BEACON_GEN, 1); rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg); break; default: break; } } static void rt2500usb_stop_queue(struct data_queue *queue) { struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; u16 reg; switch (queue->qid) { case QID_RX: reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR2); rt2x00_set_field16(&reg, TXRX_CSR2_DISABLE_RX, 1); rt2500usb_register_write(rt2x00dev, TXRX_CSR2, reg); break; case QID_BEACON: reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR19); rt2x00_set_field16(&reg, TXRX_CSR19_TSF_COUNT, 0); rt2x00_set_field16(&reg, TXRX_CSR19_TBCN, 0); rt2x00_set_field16(&reg, TXRX_CSR19_BEACON_GEN, 0); rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg); break; default: break; } } /* * Initialization functions. */ static int rt2500usb_init_registers(struct rt2x00_dev *rt2x00dev) { u16 reg; rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0x0001, USB_MODE_TEST, REGISTER_TIMEOUT); rt2x00usb_vendor_request_sw(rt2x00dev, USB_SINGLE_WRITE, 0x0308, 0x00f0, REGISTER_TIMEOUT); reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR2); rt2x00_set_field16(&reg, TXRX_CSR2_DISABLE_RX, 1); rt2500usb_register_write(rt2x00dev, TXRX_CSR2, reg); rt2500usb_register_write(rt2x00dev, MAC_CSR13, 0x1111); rt2500usb_register_write(rt2x00dev, MAC_CSR14, 0x1e11); reg = rt2500usb_register_read(rt2x00dev, MAC_CSR1); rt2x00_set_field16(&reg, MAC_CSR1_SOFT_RESET, 1); rt2x00_set_field16(&reg, MAC_CSR1_BBP_RESET, 1); rt2x00_set_field16(&reg, MAC_CSR1_HOST_READY, 0); rt2500usb_register_write(rt2x00dev, MAC_CSR1, reg); reg = rt2500usb_register_read(rt2x00dev, MAC_CSR1); rt2x00_set_field16(&reg, MAC_CSR1_SOFT_RESET, 0); rt2x00_set_field16(&reg, MAC_CSR1_BBP_RESET, 0); rt2x00_set_field16(&reg, MAC_CSR1_HOST_READY, 0); rt2500usb_register_write(rt2x00dev, MAC_CSR1, reg); reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR5); rt2x00_set_field16(&reg, TXRX_CSR5_BBP_ID0, 13); rt2x00_set_field16(&reg, TXRX_CSR5_BBP_ID0_VALID, 1); rt2x00_set_field16(&reg, TXRX_CSR5_BBP_ID1, 12); rt2x00_set_field16(&reg, TXRX_CSR5_BBP_ID1_VALID, 1); rt2500usb_register_write(rt2x00dev, TXRX_CSR5, reg); reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR6); rt2x00_set_field16(&reg, TXRX_CSR6_BBP_ID0, 10); rt2x00_set_field16(&reg, TXRX_CSR6_BBP_ID0_VALID, 1); rt2x00_set_field16(&reg, TXRX_CSR6_BBP_ID1, 11); rt2x00_set_field16(&reg, TXRX_CSR6_BBP_ID1_VALID, 1); rt2500usb_register_write(rt2x00dev, TXRX_CSR6, reg); reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR7); rt2x00_set_field16(&reg, TXRX_CSR7_BBP_ID0, 7); rt2x00_set_field16(&reg, TXRX_CSR7_BBP_ID0_VALID, 1); rt2x00_set_field16(&reg, TXRX_CSR7_BBP_ID1, 6); rt2x00_set_field16(&reg, TXRX_CSR7_BBP_ID1_VALID, 1); rt2500usb_register_write(rt2x00dev, TXRX_CSR7, reg); reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR8); rt2x00_set_field16(&reg, TXRX_CSR8_BBP_ID0, 5); rt2x00_set_field16(&reg, TXRX_CSR8_BBP_ID0_VALID, 1); rt2x00_set_field16(&reg, TXRX_CSR8_BBP_ID1, 0); rt2x00_set_field16(&reg, TXRX_CSR8_BBP_ID1_VALID, 0); rt2500usb_register_write(rt2x00dev, TXRX_CSR8, reg); reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR19); rt2x00_set_field16(&reg, TXRX_CSR19_TSF_COUNT, 0); rt2x00_set_field16(&reg, TXRX_CSR19_TSF_SYNC, 0); rt2x00_set_field16(&reg, TXRX_CSR19_TBCN, 0); rt2x00_set_field16(&reg, TXRX_CSR19_BEACON_GEN, 0); rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg); rt2500usb_register_write(rt2x00dev, TXRX_CSR21, 0xe78f); rt2500usb_register_write(rt2x00dev, MAC_CSR9, 0xff1d); if (rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_AWAKE)) return -EBUSY; reg = rt2500usb_register_read(rt2x00dev, MAC_CSR1); rt2x00_set_field16(&reg, MAC_CSR1_SOFT_RESET, 0); rt2x00_set_field16(&reg, MAC_CSR1_BBP_RESET, 0); rt2x00_set_field16(&reg, MAC_CSR1_HOST_READY, 1); rt2500usb_register_write(rt2x00dev, MAC_CSR1, reg); if (rt2x00_rev(rt2x00dev) >= RT2570_VERSION_C) { reg = rt2500usb_register_read(rt2x00dev, PHY_CSR2); rt2x00_set_field16(&reg, PHY_CSR2_LNA, 0); } else { reg = 0; rt2x00_set_field16(&reg, PHY_CSR2_LNA, 1); rt2x00_set_field16(&reg, PHY_CSR2_LNA_MODE, 3); } rt2500usb_register_write(rt2x00dev, PHY_CSR2, reg); rt2500usb_register_write(rt2x00dev, MAC_CSR11, 0x0002); rt2500usb_register_write(rt2x00dev, MAC_CSR22, 0x0053); rt2500usb_register_write(rt2x00dev, MAC_CSR15, 0x01ee); rt2500usb_register_write(rt2x00dev, MAC_CSR16, 0x0000); reg = rt2500usb_register_read(rt2x00dev, MAC_CSR8); rt2x00_set_field16(&reg, MAC_CSR8_MAX_FRAME_UNIT, rt2x00dev->rx->data_size); rt2500usb_register_write(rt2x00dev, MAC_CSR8, reg); reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR0); rt2x00_set_field16(&reg, TXRX_CSR0_ALGORITHM, CIPHER_NONE); rt2x00_set_field16(&reg, TXRX_CSR0_IV_OFFSET, IEEE80211_HEADER); rt2x00_set_field16(&reg, TXRX_CSR0_KEY_ID, 0); rt2500usb_register_write(rt2x00dev, TXRX_CSR0, reg); reg = rt2500usb_register_read(rt2x00dev, MAC_CSR18); rt2x00_set_field16(&reg, MAC_CSR18_DELAY_AFTER_BEACON, 90); rt2500usb_register_write(rt2x00dev, MAC_CSR18, reg); reg = rt2500usb_register_read(rt2x00dev, PHY_CSR4); rt2x00_set_field16(&reg, PHY_CSR4_LOW_RF_LE, 1); rt2500usb_register_write(rt2x00dev, PHY_CSR4, reg); reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR1); rt2x00_set_field16(&reg, TXRX_CSR1_AUTO_SEQUENCE, 1); rt2500usb_register_write(rt2x00dev, TXRX_CSR1, reg); return 0; } static int rt2500usb_wait_bbp_ready(struct rt2x00_dev *rt2x00dev) { unsigned int i; u8 value; for (i = 0; i < REGISTER_USB_BUSY_COUNT; i++) { value = rt2500usb_bbp_read(rt2x00dev, 0); if ((value != 0xff) && (value != 0x00)) return 0; udelay(REGISTER_BUSY_DELAY); } rt2x00_err(rt2x00dev, "BBP register access failed, aborting\n"); return -EACCES; } static int rt2500usb_init_bbp(struct rt2x00_dev *rt2x00dev) { unsigned int i; u16 eeprom; u8 value; u8 reg_id; if (unlikely(rt2500usb_wait_bbp_ready(rt2x00dev))) return -EACCES; rt2500usb_bbp_write(rt2x00dev, 3, 0x02); rt2500usb_bbp_write(rt2x00dev, 4, 0x19); rt2500usb_bbp_write(rt2x00dev, 14, 0x1c); rt2500usb_bbp_write(rt2x00dev, 15, 0x30); rt2500usb_bbp_write(rt2x00dev, 16, 0xac); rt2500usb_bbp_write(rt2x00dev, 18, 0x18); rt2500usb_bbp_write(rt2x00dev, 19, 0xff); rt2500usb_bbp_write(rt2x00dev, 20, 0x1e); rt2500usb_bbp_write(rt2x00dev, 21, 0x08); rt2500usb_bbp_write(rt2x00dev, 22, 0x08); rt2500usb_bbp_write(rt2x00dev, 23, 0x08); rt2500usb_bbp_write(rt2x00dev, 24, 0x80); rt2500usb_bbp_write(rt2x00dev, 25, 0x50); rt2500usb_bbp_write(rt2x00dev, 26, 0x08); rt2500usb_bbp_write(rt2x00dev, 27, 0x23); rt2500usb_bbp_write(rt2x00dev, 30, 0x10); rt2500usb_bbp_write(rt2x00dev, 31, 0x2b); rt2500usb_bbp_write(rt2x00dev, 32, 0xb9); rt2500usb_bbp_write(rt2x00dev, 34, 0x12); rt2500usb_bbp_write(rt2x00dev, 35, 0x50); rt2500usb_bbp_write(rt2x00dev, 39, 0xc4); rt2500usb_bbp_write(rt2x00dev, 40, 0x02); rt2500usb_bbp_write(rt2x00dev, 41, 0x60); rt2500usb_bbp_write(rt2x00dev, 53, 0x10); rt2500usb_bbp_write(rt2x00dev, 54, 0x18); rt2500usb_bbp_write(rt2x00dev, 56, 0x08); rt2500usb_bbp_write(rt2x00dev, 57, 0x10); rt2500usb_bbp_write(rt2x00dev, 58, 0x08); rt2500usb_bbp_write(rt2x00dev, 61, 0x60); rt2500usb_bbp_write(rt2x00dev, 62, 0x10); rt2500usb_bbp_write(rt2x00dev, 75, 0xff); for (i = 0; i < EEPROM_BBP_SIZE; i++) { eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i); if (eeprom != 0xffff && eeprom != 0x0000) { reg_id = rt2x00_get_field16(eeprom, EEPROM_BBP_REG_ID); value = rt2x00_get_field16(eeprom, EEPROM_BBP_VALUE); rt2500usb_bbp_write(rt2x00dev, reg_id, value); } } return 0; } /* * Device state switch handlers. */ static int rt2500usb_enable_radio(struct rt2x00_dev *rt2x00dev) { /* * Initialize all registers. */ if (unlikely(rt2500usb_init_registers(rt2x00dev) || rt2500usb_init_bbp(rt2x00dev))) return -EIO; return 0; } static void rt2500usb_disable_radio(struct rt2x00_dev *rt2x00dev) { rt2500usb_register_write(rt2x00dev, MAC_CSR13, 0x2121); rt2500usb_register_write(rt2x00dev, MAC_CSR14, 0x2121); /* * Disable synchronisation. */ rt2500usb_register_write(rt2x00dev, TXRX_CSR19, 0); rt2x00usb_disable_radio(rt2x00dev); } static int rt2500usb_set_state(struct rt2x00_dev *rt2x00dev, enum dev_state state) { u16 reg; u16 reg2; unsigned int i; bool put_to_sleep; u8 bbp_state; u8 rf_state; put_to_sleep = (state != STATE_AWAKE); reg = 0; rt2x00_set_field16(&reg, MAC_CSR17_BBP_DESIRE_STATE, state); rt2x00_set_field16(&reg, MAC_CSR17_RF_DESIRE_STATE, state); rt2x00_set_field16(&reg, MAC_CSR17_PUT_TO_SLEEP, put_to_sleep); rt2500usb_register_write(rt2x00dev, MAC_CSR17, reg); rt2x00_set_field16(&reg, MAC_CSR17_SET_STATE, 1); rt2500usb_register_write(rt2x00dev, MAC_CSR17, reg); /* * Device is not guaranteed to be in the requested state yet. * We must wait until the register indicates that the * device has entered the correct state. */ for (i = 0; i < REGISTER_USB_BUSY_COUNT; i++) { reg2 = rt2500usb_register_read(rt2x00dev, MAC_CSR17); bbp_state = rt2x00_get_field16(reg2, MAC_CSR17_BBP_CURR_STATE); rf_state = rt2x00_get_field16(reg2, MAC_CSR17_RF_CURR_STATE); if (bbp_state == state && rf_state == state) return 0; rt2500usb_register_write(rt2x00dev, MAC_CSR17, reg); msleep(30); } return -EBUSY; } static int rt2500usb_set_device_state(struct rt2x00_dev *rt2x00dev, enum dev_state state) { int retval = 0; switch (state) { case STATE_RADIO_ON: retval = rt2500usb_enable_radio(rt2x00dev); break; case STATE_RADIO_OFF: rt2500usb_disable_radio(rt2x00dev); break; case STATE_RADIO_IRQ_ON: case STATE_RADIO_IRQ_OFF: /* No support, but no error either */ break; case STATE_DEEP_SLEEP: case STATE_SLEEP: case STATE_STANDBY: case STATE_AWAKE: retval = rt2500usb_set_state(rt2x00dev, state); break; default: retval = -ENOTSUPP; break; } if (unlikely(retval)) rt2x00_err(rt2x00dev, "Device failed to enter state %d (%d)\n", state, retval); return retval; } /* * TX descriptor initialization */ static void rt2500usb_write_tx_desc(struct queue_entry *entry, struct txentry_desc *txdesc) { struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); __le32 *txd = (__le32 *) entry->skb->data; u32 word; /* * Start writing the descriptor words. */ word = rt2x00_desc_read(txd, 0); rt2x00_set_field32(&word, TXD_W0_RETRY_LIMIT, txdesc->retry_limit); rt2x00_set_field32(&word, TXD_W0_MORE_FRAG, test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags)); rt2x00_set_field32(&word, TXD_W0_ACK, test_bit(ENTRY_TXD_ACK, &txdesc->flags)); rt2x00_set_field32(&word, TXD_W0_TIMESTAMP, test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags)); rt2x00_set_field32(&word, TXD_W0_OFDM, (txdesc->rate_mode == RATE_MODE_OFDM)); rt2x00_set_field32(&word, TXD_W0_NEW_SEQ, test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags)); rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->u.plcp.ifs); rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, txdesc->length); rt2x00_set_field32(&word, TXD_W0_CIPHER, !!txdesc->cipher); rt2x00_set_field32(&word, TXD_W0_KEY_ID, txdesc->key_idx); rt2x00_desc_write(txd, 0, word); word = rt2x00_desc_read(txd, 1); rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, txdesc->iv_offset); rt2x00_set_field32(&word, TXD_W1_AIFS, entry->queue->aifs); rt2x00_set_field32(&word, TXD_W1_CWMIN, entry->queue->cw_min); rt2x00_set_field32(&word, TXD_W1_CWMAX, entry->queue->cw_max); rt2x00_desc_write(txd, 1, word); word = rt2x00_desc_read(txd, 2); rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, txdesc->u.plcp.signal); rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, txdesc->u.plcp.service); rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW, txdesc->u.plcp.length_low); rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH, txdesc->u.plcp.length_high); rt2x00_desc_write(txd, 2, word); if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags)) { _rt2x00_desc_write(txd, 3, skbdesc->iv[0]); _rt2x00_desc_write(txd, 4, skbdesc->iv[1]); } /* * Register descriptor details in skb frame descriptor. */ skbdesc->flags |= SKBDESC_DESC_IN_SKB; skbdesc->desc = txd; skbdesc->desc_len = TXD_DESC_SIZE; } /* * TX data initialization */ static void rt2500usb_beacondone(struct urb *urb); static void rt2500usb_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc) { struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev); struct queue_entry_priv_usb_bcn *bcn_priv = entry->priv_data; int pipe = usb_sndbulkpipe(usb_dev, entry->queue->usb_endpoint); int length; u16 reg, reg0; /* * Disable beaconing while we are reloading the beacon data, * otherwise we might be sending out invalid data. */ reg = rt2500usb_register_read(rt2x00dev, TXRX_CSR19); rt2x00_set_field16(&reg, TXRX_CSR19_BEACON_GEN, 0); rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg); /* * Add space for the descriptor in front of the skb. */ skb_push(entry->skb, TXD_DESC_SIZE); memset(entry->skb->data, 0, TXD_DESC_SIZE); /* * Write the TX descriptor for the beacon. */ rt2500usb_write_tx_desc(entry, txdesc); /* * Dump beacon to userspace through debugfs. */ rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry); /* * USB devices cannot blindly pass the skb->len as the * length of the data to usb_fill_bulk_urb. Pass the skb * to the driver to determine what the length should be. */ length = rt2x00dev->ops->lib->get_tx_data_len(entry); usb_fill_bulk_urb(bcn_priv->urb, usb_dev, pipe, entry->skb->data, length, rt2500usb_beacondone, entry); /* * Second we need to create the guardian byte. * We only need a single byte, so lets recycle * the 'flags' field we are not using for beacons. */ bcn_priv->guardian_data = 0; usb_fill_bulk_urb(bcn_priv->guardian_urb, usb_dev, pipe, &bcn_priv->guardian_data, 1, rt2500usb_beacondone, entry); /* * Send out the guardian byte. */ usb_submit_urb(bcn_priv->guardian_urb, GFP_ATOMIC); /* * Enable beaconing again. */ rt2x00_set_field16(&reg, TXRX_CSR19_TSF_COUNT, 1); rt2x00_set_field16(&reg, TXRX_CSR19_TBCN, 1); reg0 = reg; rt2x00_set_field16(&reg, TXRX_CSR19_BEACON_GEN, 1); /* * Beacon generation will fail initially. * To prevent this we need to change the TXRX_CSR19 * register several times (reg0 is the same as reg * except for TXRX_CSR19_BEACON_GEN, which is 0 in reg0 * and 1 in reg). */ rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg); rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg0); rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg); rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg0); rt2500usb_register_write(rt2x00dev, TXRX_CSR19, reg); } static int rt2500usb_get_tx_data_len(struct queue_entry *entry) { int length; /* * The length _must_ be a multiple of 2, * but it must _not_ be a multiple of the USB packet size. */ length = roundup(entry->skb->len, 2); length += (2 * !(length % entry->queue->usb_maxpacket)); return length; } /* * RX control handlers */ static void rt2500usb_fill_rxdone(struct queue_entry *entry, struct rxdone_entry_desc *rxdesc) { struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; struct queue_entry_priv_usb *entry_priv = entry->priv_data; struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); __le32 *rxd = (__le32 *)(entry->skb->data + (entry_priv->urb->actual_length - entry->queue->desc_size)); u32 word0; u32 word1; /* * Copy descriptor to the skbdesc->desc buffer, making it safe from moving of * frame data in rt2x00usb. */ memcpy(skbdesc->desc, rxd, skbdesc->desc_len); rxd = (__le32 *)skbdesc->desc; /* * It is now safe to read the descriptor on all architectures. */ word0 = rt2x00_desc_read(rxd, 0); word1 = rt2x00_desc_read(rxd, 1); if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR)) rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC; if (rt2x00_get_field32(word0, RXD_W0_PHYSICAL_ERROR)) rxdesc->flags |= RX_FLAG_FAILED_PLCP_CRC; rxdesc->cipher = rt2x00_get_field32(word0, RXD_W0_CIPHER); if (rt2x00_get_field32(word0, RXD_W0_CIPHER_ERROR)) rxdesc->cipher_status = RX_CRYPTO_FAIL_KEY; if (rxdesc->cipher != CIPHER_NONE) { rxdesc->iv[0] = _rt2x00_desc_read(rxd, 2); rxdesc->iv[1] = _rt2x00_desc_read(rxd, 3); rxdesc->dev_flags |= RXDONE_CRYPTO_IV; /* ICV is located at the end of frame */ rxdesc->flags |= RX_FLAG_MMIC_STRIPPED; if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS) rxdesc->flags |= RX_FLAG_DECRYPTED; else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC) rxdesc->flags |= RX_FLAG_MMIC_ERROR; } /* * Obtain the status about this packet. * When frame was received with an OFDM bitrate, * the signal is the PLCP value. If it was received with * a CCK bitrate the signal is the rate in 100kbit/s. */ rxdesc->signal = rt2x00_get_field32(word1, RXD_W1_SIGNAL); rxdesc->rssi = rt2x00_get_field32(word1, RXD_W1_RSSI) - rt2x00dev->rssi_offset; rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT); if (rt2x00_get_field32(word0, RXD_W0_OFDM)) rxdesc->dev_flags |= RXDONE_SIGNAL_PLCP; else rxdesc->dev_flags |= RXDONE_SIGNAL_BITRATE; if (rt2x00_get_field32(word0, RXD_W0_MY_BSS)) rxdesc->dev_flags |= RXDONE_MY_BSS; /* * Adjust the skb memory window to the frame boundaries. */ skb_trim(entry->skb, rxdesc->size); } /* * Interrupt functions. */ static void rt2500usb_beacondone(struct urb *urb) { struct queue_entry *entry = (struct queue_entry *)urb->context; struct queue_entry_priv_usb_bcn *bcn_priv = entry->priv_data; if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &entry->queue->rt2x00dev->flags)) return; /* * Check if this was the guardian beacon, * if that was the case we need to send the real beacon now. * Otherwise we should free the sk_buffer, the device * should be doing the rest of the work now. */ if (bcn_priv->guardian_urb == urb) { usb_submit_urb(bcn_priv->urb, GFP_ATOMIC); } else if (bcn_priv->urb == urb) { dev_kfree_skb(entry->skb); entry->skb = NULL; } } /* * Device probe functions. */ static int rt2500usb_validate_eeprom(struct rt2x00_dev *rt2x00dev) { u16 word; u8 *mac; u8 bbp; rt2x00usb_eeprom_read(rt2x00dev, rt2x00dev->eeprom, EEPROM_SIZE); /* * Start validation of the data that has been read. */ mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0); rt2x00lib_set_mac_address(rt2x00dev, mac); word = rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_ANTENNA_NUM, 2); rt2x00_set_field16(&word, EEPROM_ANTENNA_TX_DEFAULT, ANTENNA_SW_DIVERSITY); rt2x00_set_field16(&word, EEPROM_ANTENNA_RX_DEFAULT, ANTENNA_SW_DIVERSITY); rt2x00_set_field16(&word, EEPROM_ANTENNA_LED_MODE, LED_MODE_DEFAULT); rt2x00_set_field16(&word, EEPROM_ANTENNA_DYN_TXAGC, 0); rt2x00_set_field16(&word, EEPROM_ANTENNA_HARDWARE_RADIO, 0); rt2x00_set_field16(&word, EEPROM_ANTENNA_RF_TYPE, RF2522); rt2x00_eeprom_write(rt2x00dev, EEPROM_ANTENNA, word); rt2x00_eeprom_dbg(rt2x00dev, "Antenna: 0x%04x\n", word); } word = rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_NIC_CARDBUS_ACCEL, 0); rt2x00_set_field16(&word, EEPROM_NIC_DYN_BBP_TUNE, 0); rt2x00_set_field16(&word, EEPROM_NIC_CCK_TX_POWER, 0); rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC, word); rt2x00_eeprom_dbg(rt2x00dev, "NIC: 0x%04x\n", word); } word = rt2x00_eeprom_read(rt2x00dev, EEPROM_CALIBRATE_OFFSET); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_CALIBRATE_OFFSET_RSSI, DEFAULT_RSSI_OFFSET); rt2x00_eeprom_write(rt2x00dev, EEPROM_CALIBRATE_OFFSET, word); rt2x00_eeprom_dbg(rt2x00dev, "Calibrate offset: 0x%04x\n", word); } word = rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_BBPTUNE_THRESHOLD, 45); rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE, word); rt2x00_eeprom_dbg(rt2x00dev, "BBPtune: 0x%04x\n", word); } /* * Switch lower vgc bound to current BBP R17 value, * lower the value a bit for better quality. */ bbp = rt2500usb_bbp_read(rt2x00dev, 17); bbp -= 6; word = rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_VGC); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_BBPTUNE_VGCUPPER, 0x40); rt2x00_set_field16(&word, EEPROM_BBPTUNE_VGCLOWER, bbp); rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_VGC, word); rt2x00_eeprom_dbg(rt2x00dev, "BBPtune vgc: 0x%04x\n", word); } else { rt2x00_set_field16(&word, EEPROM_BBPTUNE_VGCLOWER, bbp); rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_VGC, word); } word = rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R17); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_BBPTUNE_R17_LOW, 0x48); rt2x00_set_field16(&word, EEPROM_BBPTUNE_R17_HIGH, 0x41); rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_R17, word); rt2x00_eeprom_dbg(rt2x00dev, "BBPtune r17: 0x%04x\n", word); } word = rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R24); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_BBPTUNE_R24_LOW, 0x40); rt2x00_set_field16(&word, EEPROM_BBPTUNE_R24_HIGH, 0x80); rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_R24, word); rt2x00_eeprom_dbg(rt2x00dev, "BBPtune r24: 0x%04x\n", word); } word = rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R25); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_BBPTUNE_R25_LOW, 0x40); rt2x00_set_field16(&word, EEPROM_BBPTUNE_R25_HIGH, 0x50); rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_R25, word); rt2x00_eeprom_dbg(rt2x00dev, "BBPtune r25: 0x%04x\n", word); } word = rt2x00_eeprom_read(rt2x00dev, EEPROM_BBPTUNE_R61); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_BBPTUNE_R61_LOW, 0x60); rt2x00_set_field16(&word, EEPROM_BBPTUNE_R61_HIGH, 0x6d); rt2x00_eeprom_write(rt2x00dev, EEPROM_BBPTUNE_R61, word); rt2x00_eeprom_dbg(rt2x00dev, "BBPtune r61: 0x%04x\n", word); } return 0; } static int rt2500usb_init_eeprom(struct rt2x00_dev *rt2x00dev) { u16 reg; u16 value; u16 eeprom; /* * Read EEPROM word for configuration. */ eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA); /* * Identify RF chipset. */ value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE); reg = rt2500usb_register_read(rt2x00dev, MAC_CSR0); rt2x00_set_chip(rt2x00dev, RT2570, value, reg); if (((reg & 0xfff0) != 0) || ((reg & 0x0000000f) == 0)) { rt2x00_err(rt2x00dev, "Invalid RT chipset detected\n"); return -ENODEV; } if (!rt2x00_rf(rt2x00dev, RF2522) && !rt2x00_rf(rt2x00dev, RF2523) && !rt2x00_rf(rt2x00dev, RF2524) && !rt2x00_rf(rt2x00dev, RF2525) && !rt2x00_rf(rt2x00dev, RF2525E) && !rt2x00_rf(rt2x00dev, RF5222)) { rt2x00_err(rt2x00dev, "Invalid RF chipset detected\n"); return -ENODEV; } /* * Identify default antenna configuration. */ rt2x00dev->default_ant.tx = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TX_DEFAULT); rt2x00dev->default_ant.rx = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RX_DEFAULT); /* * When the eeprom indicates SW_DIVERSITY use HW_DIVERSITY instead. * I am not 100% sure about this, but the legacy drivers do not * indicate antenna swapping in software is required when * diversity is enabled. */ if (rt2x00dev->default_ant.tx == ANTENNA_SW_DIVERSITY) rt2x00dev->default_ant.tx = ANTENNA_HW_DIVERSITY; if (rt2x00dev->default_ant.rx == ANTENNA_SW_DIVERSITY) rt2x00dev->default_ant.rx = ANTENNA_HW_DIVERSITY; /* * Store led mode, for correct led behaviour. */ #ifdef CONFIG_RT2X00_LIB_LEDS value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_LED_MODE); rt2500usb_init_led(rt2x00dev, &rt2x00dev->led_radio, LED_TYPE_RADIO); if (value == LED_MODE_TXRX_ACTIVITY || value == LED_MODE_DEFAULT || value == LED_MODE_ASUS) rt2500usb_init_led(rt2x00dev, &rt2x00dev->led_qual, LED_TYPE_ACTIVITY); #endif /* CONFIG_RT2X00_LIB_LEDS */ /* * Detect if this device has an hardware controlled radio. */ if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO)) __set_bit(CAPABILITY_HW_BUTTON, &rt2x00dev->cap_flags); /* * Read the RSSI <-> dBm offset information. */ eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_CALIBRATE_OFFSET); rt2x00dev->rssi_offset = rt2x00_get_field16(eeprom, EEPROM_CALIBRATE_OFFSET_RSSI); return 0; } /* * RF value list for RF2522 * Supports: 2.4 GHz */ static const struct rf_channel rf_vals_bg_2522[] = { { 1, 0x00002050, 0x000c1fda, 0x00000101, 0 }, { 2, 0x00002050, 0x000c1fee, 0x00000101, 0 }, { 3, 0x00002050, 0x000c2002, 0x00000101, 0 }, { 4, 0x00002050, 0x000c2016, 0x00000101, 0 }, { 5, 0x00002050, 0x000c202a, 0x00000101, 0 }, { 6, 0x00002050, 0x000c203e, 0x00000101, 0 }, { 7, 0x00002050, 0x000c2052, 0x00000101, 0 }, { 8, 0x00002050, 0x000c2066, 0x00000101, 0 }, { 9, 0x00002050, 0x000c207a, 0x00000101, 0 }, { 10, 0x00002050, 0x000c208e, 0x00000101, 0 }, { 11, 0x00002050, 0x000c20a2, 0x00000101, 0 }, { 12, 0x00002050, 0x000c20b6, 0x00000101, 0 }, { 13, 0x00002050, 0x000c20ca, 0x00000101, 0 }, { 14, 0x00002050, 0x000c20fa, 0x00000101, 0 }, }; /* * RF value list for RF2523 * Supports: 2.4 GHz */ static const struct rf_channel rf_vals_bg_2523[] = { { 1, 0x00022010, 0x00000c9e, 0x000e0111, 0x00000a1b }, { 2, 0x00022010, 0x00000ca2, 0x000e0111, 0x00000a1b }, { 3, 0x00022010, 0x00000ca6, 0x000e0111, 0x00000a1b }, { 4, 0x00022010, 0x00000caa, 0x000e0111, 0x00000a1b }, { 5, 0x00022010, 0x00000cae, 0x000e0111, 0x00000a1b }, { 6, 0x00022010, 0x00000cb2, 0x000e0111, 0x00000a1b }, { 7, 0x00022010, 0x00000cb6, 0x000e0111, 0x00000a1b }, { 8, 0x00022010, 0x00000cba, 0x000e0111, 0x00000a1b }, { 9, 0x00022010, 0x00000cbe, 0x000e0111, 0x00000a1b }, { 10, 0x00022010, 0x00000d02, 0x000e0111, 0x00000a1b }, { 11, 0x00022010, 0x00000d06, 0x000e0111, 0x00000a1b }, { 12, 0x00022010, 0x00000d0a, 0x000e0111, 0x00000a1b }, { 13, 0x00022010, 0x00000d0e, 0x000e0111, 0x00000a1b }, { 14, 0x00022010, 0x00000d1a, 0x000e0111, 0x00000a03 }, }; /* * RF value list for RF2524 * Supports: 2.4 GHz */ static const struct rf_channel rf_vals_bg_2524[] = { { 1, 0x00032020, 0x00000c9e, 0x00000101, 0x00000a1b }, { 2, 0x00032020, 0x00000ca2, 0x00000101, 0x00000a1b }, { 3, 0x00032020, 0x00000ca6, 0x00000101, 0x00000a1b }, { 4, 0x00032020, 0x00000caa, 0x00000101, 0x00000a1b }, { 5, 0x00032020, 0x00000cae, 0x00000101, 0x00000a1b }, { 6, 0x00032020, 0x00000cb2, 0x00000101, 0x00000a1b }, { 7, 0x00032020, 0x00000cb6, 0x00000101, 0x00000a1b }, { 8, 0x00032020, 0x00000cba, 0x00000101, 0x00000a1b }, { 9, 0x00032020, 0x00000cbe, 0x00000101, 0x00000a1b }, { 10, 0x00032020, 0x00000d02, 0x00000101, 0x00000a1b }, { 11, 0x00032020, 0x00000d06, 0x00000101, 0x00000a1b }, { 12, 0x00032020, 0x00000d0a, 0x00000101, 0x00000a1b }, { 13, 0x00032020, 0x00000d0e, 0x00000101, 0x00000a1b }, { 14, 0x00032020, 0x00000d1a, 0x00000101, 0x00000a03 }, }; /* * RF value list for RF2525 * Supports: 2.4 GHz */ static const struct rf_channel rf_vals_bg_2525[] = { { 1, 0x00022020, 0x00080c9e, 0x00060111, 0x00000a1b }, { 2, 0x00022020, 0x00080ca2, 0x00060111, 0x00000a1b }, { 3, 0x00022020, 0x00080ca6, 0x00060111, 0x00000a1b }, { 4, 0x00022020, 0x00080caa, 0x00060111, 0x00000a1b }, { 5, 0x00022020, 0x00080cae, 0x00060111, 0x00000a1b }, { 6, 0x00022020, 0x00080cb2, 0x00060111, 0x00000a1b }, { 7, 0x00022020, 0x00080cb6, 0x00060111, 0x00000a1b }, { 8, 0x00022020, 0x00080cba, 0x00060111, 0x00000a1b }, { 9, 0x00022020, 0x00080cbe, 0x00060111, 0x00000a1b }, { 10, 0x00022020, 0x00080d02, 0x00060111, 0x00000a1b }, { 11, 0x00022020, 0x00080d06, 0x00060111, 0x00000a1b }, { 12, 0x00022020, 0x00080d0a, 0x00060111, 0x00000a1b }, { 13, 0x00022020, 0x00080d0e, 0x00060111, 0x00000a1b }, { 14, 0x00022020, 0x00080d1a, 0x00060111, 0x00000a03 }, }; /* * RF value list for RF2525e * Supports: 2.4 GHz */ static const struct rf_channel rf_vals_bg_2525e[] = { { 1, 0x00022010, 0x0000089a, 0x00060111, 0x00000e1b }, { 2, 0x00022010, 0x0000089e, 0x00060111, 0x00000e07 }, { 3, 0x00022010, 0x0000089e, 0x00060111, 0x00000e1b }, { 4, 0x00022010, 0x000008a2, 0x00060111, 0x00000e07 }, { 5, 0x00022010, 0x000008a2, 0x00060111, 0x00000e1b }, { 6, 0x00022010, 0x000008a6, 0x00060111, 0x00000e07 }, { 7, 0x00022010, 0x000008a6, 0x00060111, 0x00000e1b }, { 8, 0x00022010, 0x000008aa, 0x00060111, 0x00000e07 }, { 9, 0x00022010, 0x000008aa, 0x00060111, 0x00000e1b }, { 10, 0x00022010, 0x000008ae, 0x00060111, 0x00000e07 }, { 11, 0x00022010, 0x000008ae, 0x00060111, 0x00000e1b }, { 12, 0x00022010, 0x000008b2, 0x00060111, 0x00000e07 }, { 13, 0x00022010, 0x000008b2, 0x00060111, 0x00000e1b }, { 14, 0x00022010, 0x000008b6, 0x00060111, 0x00000e23 }, }; /* * RF value list for RF5222 * Supports: 2.4 GHz & 5.2 GHz */ static const struct rf_channel rf_vals_5222[] = { { 1, 0x00022020, 0x00001136, 0x00000101, 0x00000a0b }, { 2, 0x00022020, 0x0000113a, 0x00000101, 0x00000a0b }, { 3, 0x00022020, 0x0000113e, 0x00000101, 0x00000a0b }, { 4, 0x00022020, 0x00001182, 0x00000101, 0x00000a0b }, { 5, 0x00022020, 0x00001186, 0x00000101, 0x00000a0b }, { 6, 0x00022020, 0x0000118a, 0x00000101, 0x00000a0b }, { 7, 0x00022020, 0x0000118e, 0x00000101, 0x00000a0b }, { 8, 0x00022020, 0x00001192, 0x00000101, 0x00000a0b }, { 9, 0x00022020, 0x00001196, 0x00000101, 0x00000a0b }, { 10, 0x00022020, 0x0000119a, 0x00000101, 0x00000a0b }, { 11, 0x00022020, 0x0000119e, 0x00000101, 0x00000a0b }, { 12, 0x00022020, 0x000011a2, 0x00000101, 0x00000a0b }, { 13, 0x00022020, 0x000011a6, 0x00000101, 0x00000a0b }, { 14, 0x00022020, 0x000011ae, 0x00000101, 0x00000a1b }, /* 802.11 UNI / HyperLan 2 */ { 36, 0x00022010, 0x00018896, 0x00000101, 0x00000a1f }, { 40, 0x00022010, 0x0001889a, 0x00000101, 0x00000a1f }, { 44, 0x00022010, 0x0001889e, 0x00000101, 0x00000a1f }, { 48, 0x00022010, 0x000188a2, 0x00000101, 0x00000a1f }, { 52, 0x00022010, 0x000188a6, 0x00000101, 0x00000a1f }, { 66, 0x00022010, 0x000188aa, 0x00000101, 0x00000a1f }, { 60, 0x00022010, 0x000188ae, 0x00000101, 0x00000a1f }, { 64, 0x00022010, 0x000188b2, 0x00000101, 0x00000a1f }, /* 802.11 HyperLan 2 */ { 100, 0x00022010, 0x00008802, 0x00000101, 0x00000a0f }, { 104, 0x00022010, 0x00008806, 0x00000101, 0x00000a0f }, { 108, 0x00022010, 0x0000880a, 0x00000101, 0x00000a0f }, { 112, 0x00022010, 0x0000880e, 0x00000101, 0x00000a0f }, { 116, 0x00022010, 0x00008812, 0x00000101, 0x00000a0f }, { 120, 0x00022010, 0x00008816, 0x00000101, 0x00000a0f }, { 124, 0x00022010, 0x0000881a, 0x00000101, 0x00000a0f }, { 128, 0x00022010, 0x0000881e, 0x00000101, 0x00000a0f }, { 132, 0x00022010, 0x00008822, 0x00000101, 0x00000a0f }, { 136, 0x00022010, 0x00008826, 0x00000101, 0x00000a0f }, /* 802.11 UNII */ { 140, 0x00022010, 0x0000882a, 0x00000101, 0x00000a0f }, { 149, 0x00022020, 0x000090a6, 0x00000101, 0x00000a07 }, { 153, 0x00022020, 0x000090ae, 0x00000101, 0x00000a07 }, { 157, 0x00022020, 0x000090b6, 0x00000101, 0x00000a07 }, { 161, 0x00022020, 0x000090be, 0x00000101, 0x00000a07 }, }; static int rt2500usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev) { struct hw_mode_spec *spec = &rt2x00dev->spec; struct channel_info *info; u8 *tx_power; unsigned int i; /* * Initialize all hw fields. * * Don't set IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING unless we are * capable of sending the buffered frames out after the DTIM * transmission using rt2x00lib_beacondone. This will send out * multicast and broadcast traffic immediately instead of buffering it * infinitly and thus dropping it after some time. */ ieee80211_hw_set(rt2x00dev->hw, PS_NULLFUNC_STACK); ieee80211_hw_set(rt2x00dev->hw, SUPPORTS_PS); ieee80211_hw_set(rt2x00dev->hw, RX_INCLUDES_FCS); ieee80211_hw_set(rt2x00dev->hw, SIGNAL_DBM); /* * Disable powersaving as default. */ rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev); SET_IEEE80211_PERM_ADDR(rt2x00dev->hw, rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0)); /* * Initialize hw_mode information. */ spec->supported_bands = SUPPORT_BAND_2GHZ; spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM; if (rt2x00_rf(rt2x00dev, RF2522)) { spec->num_channels = ARRAY_SIZE(rf_vals_bg_2522); spec->channels = rf_vals_bg_2522; } else if (rt2x00_rf(rt2x00dev, RF2523)) { spec->num_channels = ARRAY_SIZE(rf_vals_bg_2523); spec->channels = rf_vals_bg_2523; } else if (rt2x00_rf(rt2x00dev, RF2524)) { spec->num_channels = ARRAY_SIZE(rf_vals_bg_2524); spec->channels = rf_vals_bg_2524; } else if (rt2x00_rf(rt2x00dev, RF2525)) { spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525); spec->channels = rf_vals_bg_2525; } else if (rt2x00_rf(rt2x00dev, RF2525E)) { spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525e); spec->channels = rf_vals_bg_2525e; } else if (rt2x00_rf(rt2x00dev, RF5222)) { spec->supported_bands |= SUPPORT_BAND_5GHZ; spec->num_channels = ARRAY_SIZE(rf_vals_5222); spec->channels = rf_vals_5222; } /* * Create channel information array */ info = kcalloc(spec->num_channels, sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; spec->channels_info = info; tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_START); for (i = 0; i < 14; i++) { info[i].max_power = MAX_TXPOWER; info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i]); } if (spec->num_channels > 14) { for (i = 14; i < spec->num_channels; i++) { info[i].max_power = MAX_TXPOWER; info[i].default_power1 = DEFAULT_TXPOWER; } } return 0; } static int rt2500usb_probe_hw(struct rt2x00_dev *rt2x00dev) { int retval; u16 reg; /* * Allocate eeprom data. */ retval = rt2500usb_validate_eeprom(rt2x00dev); if (retval) return retval; retval = rt2500usb_init_eeprom(rt2x00dev); if (retval) return retval; /* * Enable rfkill polling by setting GPIO direction of the * rfkill switch GPIO pin correctly. */ reg = rt2500usb_register_read(rt2x00dev, MAC_CSR19); rt2x00_set_field16(&reg, MAC_CSR19_DIR0, 0); rt2500usb_register_write(rt2x00dev, MAC_CSR19, reg); /* * Initialize hw specifications. */ retval = rt2500usb_probe_hw_mode(rt2x00dev); if (retval) return retval; /* * This device requires the atim queue */ __set_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags); __set_bit(REQUIRE_BEACON_GUARD, &rt2x00dev->cap_flags); if (!modparam_nohwcrypt) { __set_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags); __set_bit(REQUIRE_COPY_IV, &rt2x00dev->cap_flags); } __set_bit(REQUIRE_SW_SEQNO, &rt2x00dev->cap_flags); __set_bit(REQUIRE_PS_AUTOWAKE, &rt2x00dev->cap_flags); /* * Set the rssi offset. */ rt2x00dev->rssi_offset = DEFAULT_RSSI_OFFSET; return 0; } static const struct ieee80211_ops rt2500usb_mac80211_ops = { .tx = rt2x00mac_tx, .wake_tx_queue = ieee80211_handle_wake_tx_queue, .start = rt2x00mac_start, .stop = rt2x00mac_stop, .add_interface = rt2x00mac_add_interface, .remove_interface = rt2x00mac_remove_interface, .config = rt2x00mac_config, .configure_filter = rt2x00mac_configure_filter, .set_tim = rt2x00mac_set_tim, .set_key = rt2x00mac_set_key, .sw_scan_start = rt2x00mac_sw_scan_start, .sw_scan_complete = rt2x00mac_sw_scan_complete, .get_stats = rt2x00mac_get_stats, .bss_info_changed = rt2x00mac_bss_info_changed, .conf_tx = rt2x00mac_conf_tx, .rfkill_poll = rt2x00mac_rfkill_poll, .flush = rt2x00mac_flush, .set_antenna = rt2x00mac_set_antenna, .get_antenna = rt2x00mac_get_antenna, .get_ringparam = rt2x00mac_get_ringparam, .tx_frames_pending = rt2x00mac_tx_frames_pending, }; static const struct rt2x00lib_ops rt2500usb_rt2x00_ops = { .probe_hw = rt2500usb_probe_hw, .initialize = rt2x00usb_initialize, .uninitialize = rt2x00usb_uninitialize, .clear_entry = rt2x00usb_clear_entry, .set_device_state = rt2500usb_set_device_state, .rfkill_poll = rt2500usb_rfkill_poll, .link_stats = rt2500usb_link_stats, .reset_tuner = rt2500usb_reset_tuner, .watchdog = rt2x00usb_watchdog, .start_queue = rt2500usb_start_queue, .kick_queue = rt2x00usb_kick_queue, .stop_queue = rt2500usb_stop_queue, .flush_queue = rt2x00usb_flush_queue, .write_tx_desc = rt2500usb_write_tx_desc, .write_beacon = rt2500usb_write_beacon, .get_tx_data_len = rt2500usb_get_tx_data_len, .fill_rxdone = rt2500usb_fill_rxdone, .config_shared_key = rt2500usb_config_key, .config_pairwise_key = rt2500usb_config_key, .config_filter = rt2500usb_config_filter, .config_intf = rt2500usb_config_intf, .config_erp = rt2500usb_config_erp, .config_ant = rt2500usb_config_ant, .config = rt2500usb_config, }; static void rt2500usb_queue_init(struct data_queue *queue) { switch (queue->qid) { case QID_RX: queue->limit = 32; queue->data_size = DATA_FRAME_SIZE; queue->desc_size = RXD_DESC_SIZE; queue->priv_size = sizeof(struct queue_entry_priv_usb); break; case QID_AC_VO: case QID_AC_VI: case QID_AC_BE: case QID_AC_BK: queue->limit = 32; queue->data_size = DATA_FRAME_SIZE; queue->desc_size = TXD_DESC_SIZE; queue->priv_size = sizeof(struct queue_entry_priv_usb); break; case QID_BEACON: queue->limit = 1; queue->data_size = MGMT_FRAME_SIZE; queue->desc_size = TXD_DESC_SIZE; queue->priv_size = sizeof(struct queue_entry_priv_usb_bcn); break; case QID_ATIM: queue->limit = 8; queue->data_size = DATA_FRAME_SIZE; queue->desc_size = TXD_DESC_SIZE; queue->priv_size = sizeof(struct queue_entry_priv_usb); break; default: BUG(); break; } } static const struct rt2x00_ops rt2500usb_ops = { .name = KBUILD_MODNAME, .max_ap_intf = 1, .eeprom_size = EEPROM_SIZE, .rf_size = RF_SIZE, .tx_queues = NUM_TX_QUEUES, .queue_init = rt2500usb_queue_init, .lib = &rt2500usb_rt2x00_ops, .hw = &rt2500usb_mac80211_ops, #ifdef CONFIG_RT2X00_LIB_DEBUGFS .debugfs = &rt2500usb_rt2x00debug, #endif /* CONFIG_RT2X00_LIB_DEBUGFS */ }; /* * rt2500usb module information. */ static const struct usb_device_id rt2500usb_device_table[] = { /* ASUS */ { USB_DEVICE(0x0b05, 0x1706) }, { USB_DEVICE(0x0b05, 0x1707) }, /* Belkin */ { USB_DEVICE(0x050d, 0x7050) }, /* FCC ID: K7SF5D7050A ver. 2.x */ { USB_DEVICE(0x050d, 0x7051) }, /* Cisco Systems */ { USB_DEVICE(0x13b1, 0x000d) }, { USB_DEVICE(0x13b1, 0x0011) }, { USB_DEVICE(0x13b1, 0x001a) }, /* Conceptronic */ { USB_DEVICE(0x14b2, 0x3c02) }, /* D-LINK */ { USB_DEVICE(0x2001, 0x3c00) }, /* Gigabyte */ { USB_DEVICE(0x1044, 0x8001) }, { USB_DEVICE(0x1044, 0x8007) }, /* Hercules */ { USB_DEVICE(0x06f8, 0xe000) }, /* Melco */ { USB_DEVICE(0x0411, 0x005e) }, { USB_DEVICE(0x0411, 0x0066) }, { USB_DEVICE(0x0411, 0x0067) }, { USB_DEVICE(0x0411, 0x008b) }, { USB_DEVICE(0x0411, 0x0097) }, /* MSI */ { USB_DEVICE(0x0db0, 0x6861) }, { USB_DEVICE(0x0db0, 0x6865) }, { USB_DEVICE(0x0db0, 0x6869) }, /* Ralink */ { USB_DEVICE(0x148f, 0x1706) }, { USB_DEVICE(0x148f, 0x2570) }, { USB_DEVICE(0x148f, 0x9020) }, /* Sagem */ { USB_DEVICE(0x079b, 0x004b) }, /* Siemens */ { USB_DEVICE(0x0681, 0x3c06) }, /* SMC */ { USB_DEVICE(0x0707, 0xee13) }, /* Spairon */ { USB_DEVICE(0x114b, 0x0110) }, /* SURECOM */ { USB_DEVICE(0x0769, 0x11f3) }, /* Trust */ { USB_DEVICE(0x0eb0, 0x9020) }, /* VTech */ { USB_DEVICE(0x0f88, 0x3012) }, /* Zinwell */ { USB_DEVICE(0x5a57, 0x0260) }, { 0, } }; MODULE_AUTHOR(DRV_PROJECT); MODULE_VERSION(DRV_VERSION); MODULE_DESCRIPTION("Ralink RT2500 USB Wireless LAN driver."); MODULE_DEVICE_TABLE(usb, rt2500usb_device_table); MODULE_LICENSE("GPL"); static int rt2500usb_probe(struct usb_interface *usb_intf, const struct usb_device_id *id) { return rt2x00usb_probe(usb_intf, &rt2500usb_ops); } static struct usb_driver rt2500usb_driver = { .name = KBUILD_MODNAME, .id_table = rt2500usb_device_table, .probe = rt2500usb_probe, .disconnect = rt2x00usb_disconnect, .suspend = rt2x00usb_suspend, .resume = rt2x00usb_resume, .reset_resume = rt2x00usb_resume, .disable_hub_initiated_lpm = 1, }; module_usb_driver(rt2500usb_driver);
linux-master
drivers/net/wireless/ralink/rt2x00/rt2500usb.c
// SPDX-License-Identifier: GPL-2.0-or-later /* Copyright (C) 2010 Willow Garage <http://www.willowgarage.com> Copyright (C) 2004 - 2010 Ivo van Doorn <[email protected]> <http://rt2x00.serialmonkey.com> */ /* Module: rt2x00lib Abstract: rt2x00 generic device routines. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/log2.h> #include <linux/of.h> #include <linux/of_net.h> #include "rt2x00.h" #include "rt2x00lib.h" /* * Utility functions. */ u32 rt2x00lib_get_bssidx(struct rt2x00_dev *rt2x00dev, struct ieee80211_vif *vif) { /* * When in STA mode, bssidx is always 0 otherwise local_address[5] * contains the bss number, see BSS_ID_MASK comments for details. */ if (rt2x00dev->intf_sta_count) return 0; return vif->addr[5] & (rt2x00dev->ops->max_ap_intf - 1); } EXPORT_SYMBOL_GPL(rt2x00lib_get_bssidx); /* * Radio control handlers. */ int rt2x00lib_enable_radio(struct rt2x00_dev *rt2x00dev) { int status; /* * Don't enable the radio twice. * And check if the hardware button has been disabled. */ if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) return 0; /* * Initialize all data queues. */ rt2x00queue_init_queues(rt2x00dev); /* * Enable radio. */ status = rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_RADIO_ON); if (status) return status; rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_RADIO_IRQ_ON); rt2x00leds_led_radio(rt2x00dev, true); rt2x00led_led_activity(rt2x00dev, true); set_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags); /* * Enable queues. */ rt2x00queue_start_queues(rt2x00dev); rt2x00link_start_tuner(rt2x00dev); /* * Start watchdog monitoring. */ rt2x00link_start_watchdog(rt2x00dev); return 0; } void rt2x00lib_disable_radio(struct rt2x00_dev *rt2x00dev) { if (!test_and_clear_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) return; /* * Stop watchdog monitoring. */ rt2x00link_stop_watchdog(rt2x00dev); /* * Stop all queues */ rt2x00link_stop_tuner(rt2x00dev); rt2x00queue_stop_queues(rt2x00dev); rt2x00queue_flush_queues(rt2x00dev, true); /* * Disable radio. */ rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_RADIO_OFF); rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_RADIO_IRQ_OFF); rt2x00led_led_activity(rt2x00dev, false); rt2x00leds_led_radio(rt2x00dev, false); } static void rt2x00lib_intf_scheduled_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { struct rt2x00_dev *rt2x00dev = data; struct rt2x00_intf *intf = vif_to_intf(vif); /* * It is possible the radio was disabled while the work had been * scheduled. If that happens we should return here immediately, * note that in the spinlock protected area above the delayed_flags * have been cleared correctly. */ if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) return; if (test_and_clear_bit(DELAYED_UPDATE_BEACON, &intf->delayed_flags)) { mutex_lock(&intf->beacon_skb_mutex); rt2x00queue_update_beacon(rt2x00dev, vif); mutex_unlock(&intf->beacon_skb_mutex); } } static void rt2x00lib_intf_scheduled(struct work_struct *work) { struct rt2x00_dev *rt2x00dev = container_of(work, struct rt2x00_dev, intf_work); /* * Iterate over each interface and perform the * requested configurations. */ ieee80211_iterate_active_interfaces(rt2x00dev->hw, IEEE80211_IFACE_ITER_RESUME_ALL, rt2x00lib_intf_scheduled_iter, rt2x00dev); } static void rt2x00lib_autowakeup(struct work_struct *work) { struct rt2x00_dev *rt2x00dev = container_of(work, struct rt2x00_dev, autowakeup_work.work); if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) return; if (rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_AWAKE)) rt2x00_err(rt2x00dev, "Device failed to wakeup\n"); clear_bit(CONFIG_POWERSAVING, &rt2x00dev->flags); } /* * Interrupt context handlers. */ static void rt2x00lib_bc_buffer_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { struct ieee80211_tx_control control = {}; struct rt2x00_dev *rt2x00dev = data; struct sk_buff *skb; /* * Only AP mode interfaces do broad- and multicast buffering */ if (vif->type != NL80211_IFTYPE_AP) return; /* * Send out buffered broad- and multicast frames */ skb = ieee80211_get_buffered_bc(rt2x00dev->hw, vif); while (skb) { rt2x00mac_tx(rt2x00dev->hw, &control, skb); skb = ieee80211_get_buffered_bc(rt2x00dev->hw, vif); } } static void rt2x00lib_beaconupdate_iter(void *data, u8 *mac, struct ieee80211_vif *vif) { struct rt2x00_dev *rt2x00dev = data; if (vif->type != NL80211_IFTYPE_AP && vif->type != NL80211_IFTYPE_ADHOC && vif->type != NL80211_IFTYPE_MESH_POINT) return; /* * Update the beacon without locking. This is safe on PCI devices * as they only update the beacon periodically here. This should * never be called for USB devices. */ WARN_ON(rt2x00_is_usb(rt2x00dev)); rt2x00queue_update_beacon(rt2x00dev, vif); } void rt2x00lib_beacondone(struct rt2x00_dev *rt2x00dev) { if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) return; /* send buffered bc/mc frames out for every bssid */ ieee80211_iterate_active_interfaces_atomic( rt2x00dev->hw, IEEE80211_IFACE_ITER_RESUME_ALL, rt2x00lib_bc_buffer_iter, rt2x00dev); /* * Devices with pre tbtt interrupt don't need to update the beacon * here as they will fetch the next beacon directly prior to * transmission. */ if (rt2x00_has_cap_pre_tbtt_interrupt(rt2x00dev)) return; /* fetch next beacon */ ieee80211_iterate_active_interfaces_atomic( rt2x00dev->hw, IEEE80211_IFACE_ITER_RESUME_ALL, rt2x00lib_beaconupdate_iter, rt2x00dev); } EXPORT_SYMBOL_GPL(rt2x00lib_beacondone); void rt2x00lib_pretbtt(struct rt2x00_dev *rt2x00dev) { if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) return; /* fetch next beacon */ ieee80211_iterate_active_interfaces_atomic( rt2x00dev->hw, IEEE80211_IFACE_ITER_RESUME_ALL, rt2x00lib_beaconupdate_iter, rt2x00dev); } EXPORT_SYMBOL_GPL(rt2x00lib_pretbtt); void rt2x00lib_dmastart(struct queue_entry *entry) { set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags); rt2x00queue_index_inc(entry, Q_INDEX); } EXPORT_SYMBOL_GPL(rt2x00lib_dmastart); void rt2x00lib_dmadone(struct queue_entry *entry) { set_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags); clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags); rt2x00queue_index_inc(entry, Q_INDEX_DMA_DONE); } EXPORT_SYMBOL_GPL(rt2x00lib_dmadone); static inline int rt2x00lib_txdone_bar_status(struct queue_entry *entry) { struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; struct ieee80211_bar *bar = (void *) entry->skb->data; struct rt2x00_bar_list_entry *bar_entry; int ret; if (likely(!ieee80211_is_back_req(bar->frame_control))) return 0; /* * Unlike all other frames, the status report for BARs does * not directly come from the hardware as it is incapable of * matching a BA to a previously send BAR. The hardware will * report all BARs as if they weren't acked at all. * * Instead the RX-path will scan for incoming BAs and set the * block_acked flag if it sees one that was likely caused by * a BAR from us. * * Remove remaining BARs here and return their status for * TX done processing. */ ret = 0; rcu_read_lock(); list_for_each_entry_rcu(bar_entry, &rt2x00dev->bar_list, list) { if (bar_entry->entry != entry) continue; spin_lock_bh(&rt2x00dev->bar_list_lock); /* Return whether this BAR was blockacked or not */ ret = bar_entry->block_acked; /* Remove the BAR from our checklist */ list_del_rcu(&bar_entry->list); spin_unlock_bh(&rt2x00dev->bar_list_lock); kfree_rcu(bar_entry, head); break; } rcu_read_unlock(); return ret; } static void rt2x00lib_fill_tx_status(struct rt2x00_dev *rt2x00dev, struct ieee80211_tx_info *tx_info, struct skb_frame_desc *skbdesc, struct txdone_entry_desc *txdesc, bool success) { u8 rate_idx, rate_flags, retry_rates; int i; rate_idx = skbdesc->tx_rate_idx; rate_flags = skbdesc->tx_rate_flags; retry_rates = test_bit(TXDONE_FALLBACK, &txdesc->flags) ? (txdesc->retry + 1) : 1; /* * Initialize TX status */ memset(&tx_info->status, 0, sizeof(tx_info->status)); tx_info->status.ack_signal = 0; /* * Frame was send with retries, hardware tried * different rates to send out the frame, at each * retry it lowered the rate 1 step except when the * lowest rate was used. */ for (i = 0; i < retry_rates && i < IEEE80211_TX_MAX_RATES; i++) { tx_info->status.rates[i].idx = rate_idx - i; tx_info->status.rates[i].flags = rate_flags; if (rate_idx - i == 0) { /* * The lowest rate (index 0) was used until the * number of max retries was reached. */ tx_info->status.rates[i].count = retry_rates - i; i++; break; } tx_info->status.rates[i].count = 1; } if (i < (IEEE80211_TX_MAX_RATES - 1)) tx_info->status.rates[i].idx = -1; /* terminate */ if (test_bit(TXDONE_NO_ACK_REQ, &txdesc->flags)) tx_info->flags |= IEEE80211_TX_CTL_NO_ACK; if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK)) { if (success) tx_info->flags |= IEEE80211_TX_STAT_ACK; else rt2x00dev->low_level_stats.dot11ACKFailureCount++; } /* * Every single frame has it's own tx status, hence report * every frame as ampdu of size 1. * * TODO: if we can find out how many frames were aggregated * by the hw we could provide the real ampdu_len to mac80211 * which would allow the rc algorithm to better decide on * which rates are suitable. */ if (test_bit(TXDONE_AMPDU, &txdesc->flags) || tx_info->flags & IEEE80211_TX_CTL_AMPDU) { tx_info->flags |= IEEE80211_TX_STAT_AMPDU | IEEE80211_TX_CTL_AMPDU; tx_info->status.ampdu_len = 1; tx_info->status.ampdu_ack_len = success ? 1 : 0; } if (rate_flags & IEEE80211_TX_RC_USE_RTS_CTS) { if (success) rt2x00dev->low_level_stats.dot11RTSSuccessCount++; else rt2x00dev->low_level_stats.dot11RTSFailureCount++; } } static void rt2x00lib_clear_entry(struct rt2x00_dev *rt2x00dev, struct queue_entry *entry) { /* * Make this entry available for reuse. */ entry->skb = NULL; entry->flags = 0; rt2x00dev->ops->lib->clear_entry(entry); rt2x00queue_index_inc(entry, Q_INDEX_DONE); /* * If the data queue was below the threshold before the txdone * handler we must make sure the packet queue in the mac80211 stack * is reenabled when the txdone handler has finished. This has to be * serialized with rt2x00mac_tx(), otherwise we can wake up queue * before it was stopped. */ spin_lock_bh(&entry->queue->tx_lock); if (!rt2x00queue_threshold(entry->queue)) rt2x00queue_unpause_queue(entry->queue); spin_unlock_bh(&entry->queue->tx_lock); } void rt2x00lib_txdone_nomatch(struct queue_entry *entry, struct txdone_entry_desc *txdesc) { struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); struct ieee80211_tx_info txinfo = {}; bool success; /* * Unmap the skb. */ rt2x00queue_unmap_skb(entry); /* * Signal that the TX descriptor is no longer in the skb. */ skbdesc->flags &= ~SKBDESC_DESC_IN_SKB; /* * Send frame to debugfs immediately, after this call is completed * we are going to overwrite the skb->cb array. */ rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_TXDONE, entry); /* * Determine if the frame has been successfully transmitted and * remove BARs from our check list while checking for their * TX status. */ success = rt2x00lib_txdone_bar_status(entry) || test_bit(TXDONE_SUCCESS, &txdesc->flags); if (!test_bit(TXDONE_UNKNOWN, &txdesc->flags)) { /* * Update TX statistics. */ rt2x00dev->link.qual.tx_success += success; rt2x00dev->link.qual.tx_failed += !success; rt2x00lib_fill_tx_status(rt2x00dev, &txinfo, skbdesc, txdesc, success); ieee80211_tx_status_noskb(rt2x00dev->hw, skbdesc->sta, &txinfo); } dev_kfree_skb_any(entry->skb); rt2x00lib_clear_entry(rt2x00dev, entry); } EXPORT_SYMBOL_GPL(rt2x00lib_txdone_nomatch); void rt2x00lib_txdone(struct queue_entry *entry, struct txdone_entry_desc *txdesc) { struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(entry->skb); struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); u8 skbdesc_flags = skbdesc->flags; unsigned int header_length; bool success; /* * Unmap the skb. */ rt2x00queue_unmap_skb(entry); /* * Remove the extra tx headroom from the skb. */ skb_pull(entry->skb, rt2x00dev->extra_tx_headroom); /* * Signal that the TX descriptor is no longer in the skb. */ skbdesc->flags &= ~SKBDESC_DESC_IN_SKB; /* * Determine the length of 802.11 header. */ header_length = ieee80211_get_hdrlen_from_skb(entry->skb); /* * Remove L2 padding which was added during */ if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_L2PAD)) rt2x00queue_remove_l2pad(entry->skb, header_length); /* * If the IV/EIV data was stripped from the frame before it was * passed to the hardware, we should now reinsert it again because * mac80211 will expect the same data to be present it the * frame as it was passed to us. */ if (rt2x00_has_cap_hw_crypto(rt2x00dev)) rt2x00crypto_tx_insert_iv(entry->skb, header_length); /* * Send frame to debugfs immediately, after this call is completed * we are going to overwrite the skb->cb array. */ rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_TXDONE, entry); /* * Determine if the frame has been successfully transmitted and * remove BARs from our check list while checking for their * TX status. */ success = rt2x00lib_txdone_bar_status(entry) || test_bit(TXDONE_SUCCESS, &txdesc->flags) || test_bit(TXDONE_UNKNOWN, &txdesc->flags); /* * Update TX statistics. */ rt2x00dev->link.qual.tx_success += success; rt2x00dev->link.qual.tx_failed += !success; rt2x00lib_fill_tx_status(rt2x00dev, tx_info, skbdesc, txdesc, success); /* * Only send the status report to mac80211 when it's a frame * that originated in mac80211. If this was a extra frame coming * through a mac80211 library call (RTS/CTS) then we should not * send the status report back. */ if (!(skbdesc_flags & SKBDESC_NOT_MAC80211)) { if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_TASKLET_CONTEXT)) ieee80211_tx_status(rt2x00dev->hw, entry->skb); else ieee80211_tx_status_ni(rt2x00dev->hw, entry->skb); } else { dev_kfree_skb_any(entry->skb); } rt2x00lib_clear_entry(rt2x00dev, entry); } EXPORT_SYMBOL_GPL(rt2x00lib_txdone); void rt2x00lib_txdone_noinfo(struct queue_entry *entry, u32 status) { struct txdone_entry_desc txdesc; txdesc.flags = 0; __set_bit(status, &txdesc.flags); txdesc.retry = 0; rt2x00lib_txdone(entry, &txdesc); } EXPORT_SYMBOL_GPL(rt2x00lib_txdone_noinfo); static u8 *rt2x00lib_find_ie(u8 *data, unsigned int len, u8 ie) { struct ieee80211_mgmt *mgmt = (void *)data; u8 *pos, *end; pos = (u8 *)mgmt->u.beacon.variable; end = data + len; while (pos < end) { if (pos + 2 + pos[1] > end) return NULL; if (pos[0] == ie) return pos; pos += 2 + pos[1]; } return NULL; } static void rt2x00lib_sleep(struct work_struct *work) { struct rt2x00_dev *rt2x00dev = container_of(work, struct rt2x00_dev, sleep_work); if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) return; /* * Check again is powersaving is enabled, to prevent races from delayed * work execution. */ if (!test_bit(CONFIG_POWERSAVING, &rt2x00dev->flags)) rt2x00lib_config(rt2x00dev, &rt2x00dev->hw->conf, IEEE80211_CONF_CHANGE_PS); } static void rt2x00lib_rxdone_check_ba(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb, struct rxdone_entry_desc *rxdesc) { struct rt2x00_bar_list_entry *entry; struct ieee80211_bar *ba = (void *)skb->data; if (likely(!ieee80211_is_back(ba->frame_control))) return; if (rxdesc->size < sizeof(*ba) + FCS_LEN) return; rcu_read_lock(); list_for_each_entry_rcu(entry, &rt2x00dev->bar_list, list) { if (ba->start_seq_num != entry->start_seq_num) continue; #define TID_CHECK(a, b) ( \ ((a) & cpu_to_le16(IEEE80211_BAR_CTRL_TID_INFO_MASK)) == \ ((b) & cpu_to_le16(IEEE80211_BAR_CTRL_TID_INFO_MASK))) \ if (!TID_CHECK(ba->control, entry->control)) continue; #undef TID_CHECK if (!ether_addr_equal_64bits(ba->ra, entry->ta)) continue; if (!ether_addr_equal_64bits(ba->ta, entry->ra)) continue; /* Mark BAR since we received the according BA */ spin_lock_bh(&rt2x00dev->bar_list_lock); entry->block_acked = 1; spin_unlock_bh(&rt2x00dev->bar_list_lock); break; } rcu_read_unlock(); } static void rt2x00lib_rxdone_check_ps(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb, struct rxdone_entry_desc *rxdesc) { struct ieee80211_hdr *hdr = (void *) skb->data; struct ieee80211_tim_ie *tim_ie; u8 *tim; u8 tim_len; bool cam; /* If this is not a beacon, or if mac80211 has no powersaving * configured, or if the device is already in powersaving mode * we can exit now. */ if (likely(!ieee80211_is_beacon(hdr->frame_control) || !(rt2x00dev->hw->conf.flags & IEEE80211_CONF_PS))) return; /* min. beacon length + FCS_LEN */ if (skb->len <= 40 + FCS_LEN) return; /* and only beacons from the associated BSSID, please */ if (!(rxdesc->dev_flags & RXDONE_MY_BSS) || !rt2x00dev->aid) return; rt2x00dev->last_beacon = jiffies; tim = rt2x00lib_find_ie(skb->data, skb->len - FCS_LEN, WLAN_EID_TIM); if (!tim) return; if (tim[1] < sizeof(*tim_ie)) return; tim_len = tim[1]; tim_ie = (struct ieee80211_tim_ie *) &tim[2]; /* Check whenever the PHY can be turned off again. */ /* 1. What about buffered unicast traffic for our AID? */ cam = ieee80211_check_tim(tim_ie, tim_len, rt2x00dev->aid); /* 2. Maybe the AP wants to send multicast/broadcast data? */ cam |= (tim_ie->bitmap_ctrl & 0x01); if (!cam && !test_bit(CONFIG_POWERSAVING, &rt2x00dev->flags)) queue_work(rt2x00dev->workqueue, &rt2x00dev->sleep_work); } static int rt2x00lib_rxdone_read_signal(struct rt2x00_dev *rt2x00dev, struct rxdone_entry_desc *rxdesc) { struct ieee80211_supported_band *sband; const struct rt2x00_rate *rate; unsigned int i; int signal = rxdesc->signal; int type = (rxdesc->dev_flags & RXDONE_SIGNAL_MASK); switch (rxdesc->rate_mode) { case RATE_MODE_CCK: case RATE_MODE_OFDM: /* * For non-HT rates the MCS value needs to contain the * actually used rate modulation (CCK or OFDM). */ if (rxdesc->dev_flags & RXDONE_SIGNAL_MCS) signal = RATE_MCS(rxdesc->rate_mode, signal); sband = &rt2x00dev->bands[rt2x00dev->curr_band]; for (i = 0; i < sband->n_bitrates; i++) { rate = rt2x00_get_rate(sband->bitrates[i].hw_value); if (((type == RXDONE_SIGNAL_PLCP) && (rate->plcp == signal)) || ((type == RXDONE_SIGNAL_BITRATE) && (rate->bitrate == signal)) || ((type == RXDONE_SIGNAL_MCS) && (rate->mcs == signal))) { return i; } } break; case RATE_MODE_HT_MIX: case RATE_MODE_HT_GREENFIELD: if (signal >= 0 && signal <= 76) return signal; break; default: break; } rt2x00_warn(rt2x00dev, "Frame received with unrecognized signal, mode=0x%.4x, signal=0x%.4x, type=%d\n", rxdesc->rate_mode, signal, type); return 0; } void rt2x00lib_rxdone(struct queue_entry *entry, gfp_t gfp) { struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; struct rxdone_entry_desc rxdesc; struct sk_buff *skb; struct ieee80211_rx_status *rx_status; unsigned int header_length; int rate_idx; if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) || !test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) goto submit_entry; if (test_bit(ENTRY_DATA_IO_FAILED, &entry->flags)) goto submit_entry; /* * Allocate a new sk_buffer. If no new buffer available, drop the * received frame and reuse the existing buffer. */ skb = rt2x00queue_alloc_rxskb(entry, gfp); if (!skb) goto submit_entry; /* * Unmap the skb. */ rt2x00queue_unmap_skb(entry); /* * Extract the RXD details. */ memset(&rxdesc, 0, sizeof(rxdesc)); rt2x00dev->ops->lib->fill_rxdone(entry, &rxdesc); /* * Check for valid size in case we get corrupted descriptor from * hardware. */ if (unlikely(rxdesc.size == 0 || rxdesc.size > entry->queue->data_size)) { rt2x00_err(rt2x00dev, "Wrong frame size %d max %d\n", rxdesc.size, entry->queue->data_size); dev_kfree_skb(entry->skb); goto renew_skb; } /* * The data behind the ieee80211 header must be * aligned on a 4 byte boundary. */ header_length = ieee80211_get_hdrlen_from_skb(entry->skb); /* * Hardware might have stripped the IV/EIV/ICV data, * in that case it is possible that the data was * provided separately (through hardware descriptor) * in which case we should reinsert the data into the frame. */ if ((rxdesc.dev_flags & RXDONE_CRYPTO_IV) && (rxdesc.flags & RX_FLAG_IV_STRIPPED)) rt2x00crypto_rx_insert_iv(entry->skb, header_length, &rxdesc); else if (header_length && (rxdesc.size > header_length) && (rxdesc.dev_flags & RXDONE_L2PAD)) rt2x00queue_remove_l2pad(entry->skb, header_length); /* Trim buffer to correct size */ skb_trim(entry->skb, rxdesc.size); /* * Translate the signal to the correct bitrate index. */ rate_idx = rt2x00lib_rxdone_read_signal(rt2x00dev, &rxdesc); if (rxdesc.rate_mode == RATE_MODE_HT_MIX || rxdesc.rate_mode == RATE_MODE_HT_GREENFIELD) rxdesc.encoding = RX_ENC_HT; /* * Check if this is a beacon, and more frames have been * buffered while we were in powersaving mode. */ rt2x00lib_rxdone_check_ps(rt2x00dev, entry->skb, &rxdesc); /* * Check for incoming BlockAcks to match to the BlockAckReqs * we've send out. */ rt2x00lib_rxdone_check_ba(rt2x00dev, entry->skb, &rxdesc); /* * Update extra components */ rt2x00link_update_stats(rt2x00dev, entry->skb, &rxdesc); rt2x00debug_update_crypto(rt2x00dev, &rxdesc); rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_RXDONE, entry); /* * Initialize RX status information, and send frame * to mac80211. */ rx_status = IEEE80211_SKB_RXCB(entry->skb); /* Ensure that all fields of rx_status are initialized * properly. The skb->cb array was used for driver * specific informations, so rx_status might contain * garbage. */ memset(rx_status, 0, sizeof(*rx_status)); rx_status->mactime = rxdesc.timestamp; rx_status->band = rt2x00dev->curr_band; rx_status->freq = rt2x00dev->curr_freq; rx_status->rate_idx = rate_idx; rx_status->signal = rxdesc.rssi; rx_status->flag = rxdesc.flags; rx_status->enc_flags = rxdesc.enc_flags; rx_status->encoding = rxdesc.encoding; rx_status->bw = rxdesc.bw; rx_status->antenna = rt2x00dev->link.ant.active.rx; ieee80211_rx_ni(rt2x00dev->hw, entry->skb); renew_skb: /* * Replace the skb with the freshly allocated one. */ entry->skb = skb; submit_entry: entry->flags = 0; rt2x00queue_index_inc(entry, Q_INDEX_DONE); if (test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags) && test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) rt2x00dev->ops->lib->clear_entry(entry); } EXPORT_SYMBOL_GPL(rt2x00lib_rxdone); /* * Driver initialization handlers. */ const struct rt2x00_rate rt2x00_supported_rates[12] = { { .flags = DEV_RATE_CCK, .bitrate = 10, .ratemask = BIT(0), .plcp = 0x00, .mcs = RATE_MCS(RATE_MODE_CCK, 0), }, { .flags = DEV_RATE_CCK | DEV_RATE_SHORT_PREAMBLE, .bitrate = 20, .ratemask = BIT(1), .plcp = 0x01, .mcs = RATE_MCS(RATE_MODE_CCK, 1), }, { .flags = DEV_RATE_CCK | DEV_RATE_SHORT_PREAMBLE, .bitrate = 55, .ratemask = BIT(2), .plcp = 0x02, .mcs = RATE_MCS(RATE_MODE_CCK, 2), }, { .flags = DEV_RATE_CCK | DEV_RATE_SHORT_PREAMBLE, .bitrate = 110, .ratemask = BIT(3), .plcp = 0x03, .mcs = RATE_MCS(RATE_MODE_CCK, 3), }, { .flags = DEV_RATE_OFDM, .bitrate = 60, .ratemask = BIT(4), .plcp = 0x0b, .mcs = RATE_MCS(RATE_MODE_OFDM, 0), }, { .flags = DEV_RATE_OFDM, .bitrate = 90, .ratemask = BIT(5), .plcp = 0x0f, .mcs = RATE_MCS(RATE_MODE_OFDM, 1), }, { .flags = DEV_RATE_OFDM, .bitrate = 120, .ratemask = BIT(6), .plcp = 0x0a, .mcs = RATE_MCS(RATE_MODE_OFDM, 2), }, { .flags = DEV_RATE_OFDM, .bitrate = 180, .ratemask = BIT(7), .plcp = 0x0e, .mcs = RATE_MCS(RATE_MODE_OFDM, 3), }, { .flags = DEV_RATE_OFDM, .bitrate = 240, .ratemask = BIT(8), .plcp = 0x09, .mcs = RATE_MCS(RATE_MODE_OFDM, 4), }, { .flags = DEV_RATE_OFDM, .bitrate = 360, .ratemask = BIT(9), .plcp = 0x0d, .mcs = RATE_MCS(RATE_MODE_OFDM, 5), }, { .flags = DEV_RATE_OFDM, .bitrate = 480, .ratemask = BIT(10), .plcp = 0x08, .mcs = RATE_MCS(RATE_MODE_OFDM, 6), }, { .flags = DEV_RATE_OFDM, .bitrate = 540, .ratemask = BIT(11), .plcp = 0x0c, .mcs = RATE_MCS(RATE_MODE_OFDM, 7), }, }; static void rt2x00lib_channel(struct ieee80211_channel *entry, const int channel, const int tx_power, const int value) { /* XXX: this assumption about the band is wrong for 802.11j */ entry->band = channel <= 14 ? NL80211_BAND_2GHZ : NL80211_BAND_5GHZ; entry->center_freq = ieee80211_channel_to_frequency(channel, entry->band); entry->hw_value = value; entry->max_power = tx_power; entry->max_antenna_gain = 0xff; } static void rt2x00lib_rate(struct ieee80211_rate *entry, const u16 index, const struct rt2x00_rate *rate) { entry->flags = 0; entry->bitrate = rate->bitrate; entry->hw_value = index; entry->hw_value_short = index; if (rate->flags & DEV_RATE_SHORT_PREAMBLE) entry->flags |= IEEE80211_RATE_SHORT_PREAMBLE; } void rt2x00lib_set_mac_address(struct rt2x00_dev *rt2x00dev, u8 *eeprom_mac_addr) { of_get_mac_address(rt2x00dev->dev->of_node, eeprom_mac_addr); if (!is_valid_ether_addr(eeprom_mac_addr)) { eth_random_addr(eeprom_mac_addr); rt2x00_eeprom_dbg(rt2x00dev, "MAC: %pM\n", eeprom_mac_addr); } } EXPORT_SYMBOL_GPL(rt2x00lib_set_mac_address); static int rt2x00lib_probe_hw_modes(struct rt2x00_dev *rt2x00dev, struct hw_mode_spec *spec) { struct ieee80211_hw *hw = rt2x00dev->hw; struct ieee80211_channel *channels; struct ieee80211_rate *rates; unsigned int num_rates; unsigned int i; num_rates = 0; if (spec->supported_rates & SUPPORT_RATE_CCK) num_rates += 4; if (spec->supported_rates & SUPPORT_RATE_OFDM) num_rates += 8; channels = kcalloc(spec->num_channels, sizeof(*channels), GFP_KERNEL); if (!channels) return -ENOMEM; rates = kcalloc(num_rates, sizeof(*rates), GFP_KERNEL); if (!rates) goto exit_free_channels; /* * Initialize Rate list. */ for (i = 0; i < num_rates; i++) rt2x00lib_rate(&rates[i], i, rt2x00_get_rate(i)); /* * Initialize Channel list. */ for (i = 0; i < spec->num_channels; i++) { rt2x00lib_channel(&channels[i], spec->channels[i].channel, spec->channels_info[i].max_power, i); } /* * Intitialize 802.11b, 802.11g * Rates: CCK, OFDM. * Channels: 2.4 GHz */ if (spec->supported_bands & SUPPORT_BAND_2GHZ) { rt2x00dev->bands[NL80211_BAND_2GHZ].n_channels = 14; rt2x00dev->bands[NL80211_BAND_2GHZ].n_bitrates = num_rates; rt2x00dev->bands[NL80211_BAND_2GHZ].channels = channels; rt2x00dev->bands[NL80211_BAND_2GHZ].bitrates = rates; hw->wiphy->bands[NL80211_BAND_2GHZ] = &rt2x00dev->bands[NL80211_BAND_2GHZ]; memcpy(&rt2x00dev->bands[NL80211_BAND_2GHZ].ht_cap, &spec->ht, sizeof(spec->ht)); } /* * Intitialize 802.11a * Rates: OFDM. * Channels: OFDM, UNII, HiperLAN2. */ if (spec->supported_bands & SUPPORT_BAND_5GHZ) { rt2x00dev->bands[NL80211_BAND_5GHZ].n_channels = spec->num_channels - 14; rt2x00dev->bands[NL80211_BAND_5GHZ].n_bitrates = num_rates - 4; rt2x00dev->bands[NL80211_BAND_5GHZ].channels = &channels[14]; rt2x00dev->bands[NL80211_BAND_5GHZ].bitrates = &rates[4]; hw->wiphy->bands[NL80211_BAND_5GHZ] = &rt2x00dev->bands[NL80211_BAND_5GHZ]; memcpy(&rt2x00dev->bands[NL80211_BAND_5GHZ].ht_cap, &spec->ht, sizeof(spec->ht)); } return 0; exit_free_channels: kfree(channels); rt2x00_err(rt2x00dev, "Allocation ieee80211 modes failed\n"); return -ENOMEM; } static void rt2x00lib_remove_hw(struct rt2x00_dev *rt2x00dev) { if (test_bit(DEVICE_STATE_REGISTERED_HW, &rt2x00dev->flags)) ieee80211_unregister_hw(rt2x00dev->hw); if (likely(rt2x00dev->hw->wiphy->bands[NL80211_BAND_2GHZ])) { kfree(rt2x00dev->hw->wiphy->bands[NL80211_BAND_2GHZ]->channels); kfree(rt2x00dev->hw->wiphy->bands[NL80211_BAND_2GHZ]->bitrates); rt2x00dev->hw->wiphy->bands[NL80211_BAND_2GHZ] = NULL; rt2x00dev->hw->wiphy->bands[NL80211_BAND_5GHZ] = NULL; } kfree(rt2x00dev->spec.channels_info); kfree(rt2x00dev->chan_survey); } static const struct ieee80211_tpt_blink rt2x00_tpt_blink[] = { { .throughput = 0 * 1024, .blink_time = 334 }, { .throughput = 1 * 1024, .blink_time = 260 }, { .throughput = 2 * 1024, .blink_time = 220 }, { .throughput = 5 * 1024, .blink_time = 190 }, { .throughput = 10 * 1024, .blink_time = 170 }, { .throughput = 25 * 1024, .blink_time = 150 }, { .throughput = 54 * 1024, .blink_time = 130 }, { .throughput = 120 * 1024, .blink_time = 110 }, { .throughput = 265 * 1024, .blink_time = 80 }, { .throughput = 586 * 1024, .blink_time = 50 }, }; static int rt2x00lib_probe_hw(struct rt2x00_dev *rt2x00dev) { struct hw_mode_spec *spec = &rt2x00dev->spec; int status; if (test_bit(DEVICE_STATE_REGISTERED_HW, &rt2x00dev->flags)) return 0; /* * Initialize HW modes. */ status = rt2x00lib_probe_hw_modes(rt2x00dev, spec); if (status) return status; /* * Initialize HW fields. */ rt2x00dev->hw->queues = rt2x00dev->ops->tx_queues; /* * Initialize extra TX headroom required. */ rt2x00dev->hw->extra_tx_headroom = max_t(unsigned int, IEEE80211_TX_STATUS_HEADROOM, rt2x00dev->extra_tx_headroom); /* * Take TX headroom required for alignment into account. */ if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_L2PAD)) rt2x00dev->hw->extra_tx_headroom += RT2X00_L2PAD_SIZE; else if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_DMA)) rt2x00dev->hw->extra_tx_headroom += RT2X00_ALIGN_SIZE; /* * Tell mac80211 about the size of our private STA structure. */ rt2x00dev->hw->sta_data_size = sizeof(struct rt2x00_sta); /* * Allocate tx status FIFO for driver use. */ if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_TXSTATUS_FIFO)) { /* * Allocate the txstatus fifo. In the worst case the tx * status fifo has to hold the tx status of all entries * in all tx queues. Hence, calculate the kfifo size as * tx_queues * entry_num and round up to the nearest * power of 2. */ int kfifo_size = roundup_pow_of_two(rt2x00dev->ops->tx_queues * rt2x00dev->tx->limit * sizeof(u32)); status = kfifo_alloc(&rt2x00dev->txstatus_fifo, kfifo_size, GFP_KERNEL); if (status) return status; } /* * Initialize tasklets if used by the driver. Tasklets are * disabled until the interrupts are turned on. The driver * has to handle that. */ #define RT2X00_TASKLET_INIT(taskletname) \ if (rt2x00dev->ops->lib->taskletname) { \ tasklet_setup(&rt2x00dev->taskletname, \ rt2x00dev->ops->lib->taskletname); \ } RT2X00_TASKLET_INIT(txstatus_tasklet); RT2X00_TASKLET_INIT(pretbtt_tasklet); RT2X00_TASKLET_INIT(tbtt_tasklet); RT2X00_TASKLET_INIT(rxdone_tasklet); RT2X00_TASKLET_INIT(autowake_tasklet); #undef RT2X00_TASKLET_INIT ieee80211_create_tpt_led_trigger(rt2x00dev->hw, IEEE80211_TPT_LEDTRIG_FL_RADIO, rt2x00_tpt_blink, ARRAY_SIZE(rt2x00_tpt_blink)); /* * Register HW. */ status = ieee80211_register_hw(rt2x00dev->hw); if (status) return status; set_bit(DEVICE_STATE_REGISTERED_HW, &rt2x00dev->flags); return 0; } /* * Initialization/uninitialization handlers. */ static void rt2x00lib_uninitialize(struct rt2x00_dev *rt2x00dev) { if (!test_and_clear_bit(DEVICE_STATE_INITIALIZED, &rt2x00dev->flags)) return; /* * Stop rfkill polling. */ if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_DELAYED_RFKILL)) rt2x00rfkill_unregister(rt2x00dev); /* * Allow the HW to uninitialize. */ rt2x00dev->ops->lib->uninitialize(rt2x00dev); /* * Free allocated queue entries. */ rt2x00queue_uninitialize(rt2x00dev); } static int rt2x00lib_initialize(struct rt2x00_dev *rt2x00dev) { int status; if (test_bit(DEVICE_STATE_INITIALIZED, &rt2x00dev->flags)) return 0; /* * Allocate all queue entries. */ status = rt2x00queue_initialize(rt2x00dev); if (status) return status; /* * Initialize the device. */ status = rt2x00dev->ops->lib->initialize(rt2x00dev); if (status) { rt2x00queue_uninitialize(rt2x00dev); return status; } set_bit(DEVICE_STATE_INITIALIZED, &rt2x00dev->flags); /* * Start rfkill polling. */ if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_DELAYED_RFKILL)) rt2x00rfkill_register(rt2x00dev); return 0; } int rt2x00lib_start(struct rt2x00_dev *rt2x00dev) { int retval = 0; /* * If this is the first interface which is added, * we should load the firmware now. */ retval = rt2x00lib_load_firmware(rt2x00dev); if (retval) goto out; /* * Initialize the device. */ retval = rt2x00lib_initialize(rt2x00dev); if (retval) goto out; rt2x00dev->intf_ap_count = 0; rt2x00dev->intf_sta_count = 0; rt2x00dev->intf_associated = 0; /* Enable the radio */ retval = rt2x00lib_enable_radio(rt2x00dev); if (retval) goto out; set_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags); out: return retval; } void rt2x00lib_stop(struct rt2x00_dev *rt2x00dev) { if (!test_and_clear_bit(DEVICE_STATE_STARTED, &rt2x00dev->flags)) return; /* * Perhaps we can add something smarter here, * but for now just disabling the radio should do. */ rt2x00lib_disable_radio(rt2x00dev); rt2x00dev->intf_ap_count = 0; rt2x00dev->intf_sta_count = 0; rt2x00dev->intf_associated = 0; } static inline void rt2x00lib_set_if_combinations(struct rt2x00_dev *rt2x00dev) { struct ieee80211_iface_limit *if_limit; struct ieee80211_iface_combination *if_combination; if (rt2x00dev->ops->max_ap_intf < 2) return; /* * Build up AP interface limits structure. */ if_limit = &rt2x00dev->if_limits_ap; if_limit->max = rt2x00dev->ops->max_ap_intf; if_limit->types = BIT(NL80211_IFTYPE_AP); #ifdef CONFIG_MAC80211_MESH if_limit->types |= BIT(NL80211_IFTYPE_MESH_POINT); #endif /* * Build up AP interface combinations structure. */ if_combination = &rt2x00dev->if_combinations[IF_COMB_AP]; if_combination->limits = if_limit; if_combination->n_limits = 1; if_combination->max_interfaces = if_limit->max; if_combination->num_different_channels = 1; /* * Finally, specify the possible combinations to mac80211. */ rt2x00dev->hw->wiphy->iface_combinations = rt2x00dev->if_combinations; rt2x00dev->hw->wiphy->n_iface_combinations = 1; } static unsigned int rt2x00dev_extra_tx_headroom(struct rt2x00_dev *rt2x00dev) { if (WARN_ON(!rt2x00dev->tx)) return 0; if (rt2x00_is_usb(rt2x00dev)) return rt2x00dev->tx[0].winfo_size + rt2x00dev->tx[0].desc_size; return rt2x00dev->tx[0].winfo_size; } /* * driver allocation handlers. */ int rt2x00lib_probe_dev(struct rt2x00_dev *rt2x00dev) { int retval = -ENOMEM; /* * Set possible interface combinations. */ rt2x00lib_set_if_combinations(rt2x00dev); /* * Allocate the driver data memory, if necessary. */ if (rt2x00dev->ops->drv_data_size > 0) { rt2x00dev->drv_data = kzalloc(rt2x00dev->ops->drv_data_size, GFP_KERNEL); if (!rt2x00dev->drv_data) { retval = -ENOMEM; goto exit; } } spin_lock_init(&rt2x00dev->irqmask_lock); mutex_init(&rt2x00dev->csr_mutex); mutex_init(&rt2x00dev->conf_mutex); INIT_LIST_HEAD(&rt2x00dev->bar_list); spin_lock_init(&rt2x00dev->bar_list_lock); hrtimer_init(&rt2x00dev->txstatus_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); set_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags); /* * Make room for rt2x00_intf inside the per-interface * structure ieee80211_vif. */ rt2x00dev->hw->vif_data_size = sizeof(struct rt2x00_intf); /* * rt2x00 devices can only use the last n bits of the MAC address * for virtual interfaces. */ rt2x00dev->hw->wiphy->addr_mask[ETH_ALEN - 1] = (rt2x00dev->ops->max_ap_intf - 1); /* * Initialize work. */ rt2x00dev->workqueue = alloc_ordered_workqueue("%s", 0, wiphy_name(rt2x00dev->hw->wiphy)); if (!rt2x00dev->workqueue) { retval = -ENOMEM; goto exit; } INIT_WORK(&rt2x00dev->intf_work, rt2x00lib_intf_scheduled); INIT_DELAYED_WORK(&rt2x00dev->autowakeup_work, rt2x00lib_autowakeup); INIT_WORK(&rt2x00dev->sleep_work, rt2x00lib_sleep); /* * Let the driver probe the device to detect the capabilities. */ retval = rt2x00dev->ops->lib->probe_hw(rt2x00dev); if (retval) { rt2x00_err(rt2x00dev, "Failed to allocate device\n"); goto exit; } /* * Allocate queue array. */ retval = rt2x00queue_allocate(rt2x00dev); if (retval) goto exit; /* Cache TX headroom value */ rt2x00dev->extra_tx_headroom = rt2x00dev_extra_tx_headroom(rt2x00dev); /* * Determine which operating modes are supported, all modes * which require beaconing, depend on the availability of * beacon entries. */ rt2x00dev->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); if (rt2x00dev->bcn->limit > 0) rt2x00dev->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC) | #ifdef CONFIG_MAC80211_MESH BIT(NL80211_IFTYPE_MESH_POINT) | #endif BIT(NL80211_IFTYPE_AP); rt2x00dev->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; wiphy_ext_feature_set(rt2x00dev->hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); /* * Initialize ieee80211 structure. */ retval = rt2x00lib_probe_hw(rt2x00dev); if (retval) { rt2x00_err(rt2x00dev, "Failed to initialize hw\n"); goto exit; } /* * Register extra components. */ rt2x00link_register(rt2x00dev); rt2x00leds_register(rt2x00dev); rt2x00debug_register(rt2x00dev); /* * Start rfkill polling. */ if (!rt2x00_has_cap_flag(rt2x00dev, REQUIRE_DELAYED_RFKILL)) rt2x00rfkill_register(rt2x00dev); return 0; exit: rt2x00lib_remove_dev(rt2x00dev); return retval; } EXPORT_SYMBOL_GPL(rt2x00lib_probe_dev); void rt2x00lib_remove_dev(struct rt2x00_dev *rt2x00dev) { clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags); /* * Stop rfkill polling. */ if (!rt2x00_has_cap_flag(rt2x00dev, REQUIRE_DELAYED_RFKILL)) rt2x00rfkill_unregister(rt2x00dev); /* * Disable radio. */ rt2x00lib_disable_radio(rt2x00dev); /* * Stop all work. */ cancel_work_sync(&rt2x00dev->intf_work); cancel_delayed_work_sync(&rt2x00dev->autowakeup_work); cancel_work_sync(&rt2x00dev->sleep_work); hrtimer_cancel(&rt2x00dev->txstatus_timer); /* * Kill the tx status tasklet. */ tasklet_kill(&rt2x00dev->txstatus_tasklet); tasklet_kill(&rt2x00dev->pretbtt_tasklet); tasklet_kill(&rt2x00dev->tbtt_tasklet); tasklet_kill(&rt2x00dev->rxdone_tasklet); tasklet_kill(&rt2x00dev->autowake_tasklet); /* * Uninitialize device. */ rt2x00lib_uninitialize(rt2x00dev); if (rt2x00dev->workqueue) destroy_workqueue(rt2x00dev->workqueue); /* * Free the tx status fifo. */ kfifo_free(&rt2x00dev->txstatus_fifo); /* * Free extra components */ rt2x00debug_deregister(rt2x00dev); rt2x00leds_unregister(rt2x00dev); /* * Free ieee80211_hw memory. */ rt2x00lib_remove_hw(rt2x00dev); /* * Free firmware image. */ rt2x00lib_free_firmware(rt2x00dev); /* * Free queue structures. */ rt2x00queue_free(rt2x00dev); /* * Free the driver data. */ kfree(rt2x00dev->drv_data); } EXPORT_SYMBOL_GPL(rt2x00lib_remove_dev); /* * Device state handlers */ int rt2x00lib_suspend(struct rt2x00_dev *rt2x00dev) { rt2x00_dbg(rt2x00dev, "Going to sleep\n"); /* * Prevent mac80211 from accessing driver while suspended. */ if (!test_and_clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) return 0; /* * Cleanup as much as possible. */ rt2x00lib_uninitialize(rt2x00dev); /* * Suspend/disable extra components. */ rt2x00leds_suspend(rt2x00dev); rt2x00debug_deregister(rt2x00dev); /* * Set device mode to sleep for power management, * on some hardware this call seems to consistently fail. * From the specifications it is hard to tell why it fails, * and if this is a "bad thing". * Overall it is safe to just ignore the failure and * continue suspending. The only downside is that the * device will not be in optimal power save mode, but with * the radio and the other components already disabled the * device is as good as disabled. */ if (rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_SLEEP)) rt2x00_warn(rt2x00dev, "Device failed to enter sleep state, continue suspending\n"); return 0; } EXPORT_SYMBOL_GPL(rt2x00lib_suspend); int rt2x00lib_resume(struct rt2x00_dev *rt2x00dev) { rt2x00_dbg(rt2x00dev, "Waking up\n"); /* * Restore/enable extra components. */ rt2x00debug_register(rt2x00dev); rt2x00leds_resume(rt2x00dev); /* * We are ready again to receive requests from mac80211. */ set_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags); return 0; } EXPORT_SYMBOL_GPL(rt2x00lib_resume); /* * rt2x00lib module information. */ MODULE_AUTHOR(DRV_PROJECT); MODULE_VERSION(DRV_VERSION); MODULE_DESCRIPTION("rt2x00 library"); MODULE_LICENSE("GPL");
linux-master
drivers/net/wireless/ralink/rt2x00/rt2x00dev.c
// SPDX-License-Identifier: GPL-2.0-or-later /* Copyright (C) 2004 - 2009 Ivo van Doorn <[email protected]> <http://rt2x00.serialmonkey.com> */ /* Module: rt2x00lib Abstract: rt2x00 led specific routines. */ #include <linux/kernel.h> #include <linux/module.h> #include "rt2x00.h" #include "rt2x00lib.h" void rt2x00leds_led_quality(struct rt2x00_dev *rt2x00dev, int rssi) { struct rt2x00_led *led = &rt2x00dev->led_qual; unsigned int brightness; if ((led->type != LED_TYPE_QUALITY) || !(led->flags & LED_REGISTERED)) return; /* * Led handling requires a positive value for the rssi, * to do that correctly we need to add the correction. */ rssi += rt2x00dev->rssi_offset; /* * Get the rssi level, this is used to convert the rssi * to a LED value inside the range LED_OFF - LED_FULL. */ if (rssi <= 30) rssi = 0; else if (rssi <= 39) rssi = 1; else if (rssi <= 49) rssi = 2; else if (rssi <= 53) rssi = 3; else if (rssi <= 63) rssi = 4; else rssi = 5; /* * Note that we must _not_ send LED_OFF since the driver * is going to calculate the value and might use it in a * division. */ brightness = ((LED_FULL / 6) * rssi) + 1; if (brightness != led->led_dev.brightness) { led->led_dev.brightness_set(&led->led_dev, brightness); led->led_dev.brightness = brightness; } } static void rt2x00led_led_simple(struct rt2x00_led *led, bool enabled) { unsigned int brightness = enabled ? LED_FULL : LED_OFF; if (!(led->flags & LED_REGISTERED)) return; led->led_dev.brightness_set(&led->led_dev, brightness); led->led_dev.brightness = brightness; } void rt2x00led_led_activity(struct rt2x00_dev *rt2x00dev, bool enabled) { if (rt2x00dev->led_qual.type == LED_TYPE_ACTIVITY) rt2x00led_led_simple(&rt2x00dev->led_qual, enabled); } void rt2x00leds_led_assoc(struct rt2x00_dev *rt2x00dev, bool enabled) { if (rt2x00dev->led_assoc.type == LED_TYPE_ASSOC) rt2x00led_led_simple(&rt2x00dev->led_assoc, enabled); } void rt2x00leds_led_radio(struct rt2x00_dev *rt2x00dev, bool enabled) { if (rt2x00dev->led_radio.type == LED_TYPE_RADIO) rt2x00led_led_simple(&rt2x00dev->led_radio, enabled); } static int rt2x00leds_register_led(struct rt2x00_dev *rt2x00dev, struct rt2x00_led *led, const char *name) { struct device *device = wiphy_dev(rt2x00dev->hw->wiphy); int retval; led->led_dev.name = name; led->led_dev.brightness = LED_OFF; retval = led_classdev_register(device, &led->led_dev); if (retval) { rt2x00_err(rt2x00dev, "Failed to register led handler\n"); return retval; } led->flags |= LED_REGISTERED; return 0; } void rt2x00leds_register(struct rt2x00_dev *rt2x00dev) { char name[36]; int retval; unsigned long on_period; unsigned long off_period; const char *phy_name = wiphy_name(rt2x00dev->hw->wiphy); if (rt2x00dev->led_radio.flags & LED_INITIALIZED) { snprintf(name, sizeof(name), "%s-%s::radio", rt2x00dev->ops->name, phy_name); retval = rt2x00leds_register_led(rt2x00dev, &rt2x00dev->led_radio, name); if (retval) goto exit_fail; } if (rt2x00dev->led_assoc.flags & LED_INITIALIZED) { snprintf(name, sizeof(name), "%s-%s::assoc", rt2x00dev->ops->name, phy_name); retval = rt2x00leds_register_led(rt2x00dev, &rt2x00dev->led_assoc, name); if (retval) goto exit_fail; } if (rt2x00dev->led_qual.flags & LED_INITIALIZED) { snprintf(name, sizeof(name), "%s-%s::quality", rt2x00dev->ops->name, phy_name); retval = rt2x00leds_register_led(rt2x00dev, &rt2x00dev->led_qual, name); if (retval) goto exit_fail; } /* * Initialize blink time to default value: * On period: 70ms * Off period: 30ms */ if (rt2x00dev->led_radio.led_dev.blink_set) { on_period = 70; off_period = 30; rt2x00dev->led_radio.led_dev.blink_set( &rt2x00dev->led_radio.led_dev, &on_period, &off_period); } return; exit_fail: rt2x00leds_unregister(rt2x00dev); } static void rt2x00leds_unregister_led(struct rt2x00_led *led) { led_classdev_unregister(&led->led_dev); /* * This might look weird, but when we are unregistering while * suspended the led is already off, and since we haven't * fully resumed yet, access to the device might not be * possible yet. */ if (!(led->led_dev.flags & LED_SUSPENDED)) led->led_dev.brightness_set(&led->led_dev, LED_OFF); led->flags &= ~LED_REGISTERED; } void rt2x00leds_unregister(struct rt2x00_dev *rt2x00dev) { if (rt2x00dev->led_qual.flags & LED_REGISTERED) rt2x00leds_unregister_led(&rt2x00dev->led_qual); if (rt2x00dev->led_assoc.flags & LED_REGISTERED) rt2x00leds_unregister_led(&rt2x00dev->led_assoc); if (rt2x00dev->led_radio.flags & LED_REGISTERED) rt2x00leds_unregister_led(&rt2x00dev->led_radio); } static inline void rt2x00leds_suspend_led(struct rt2x00_led *led) { led_classdev_suspend(&led->led_dev); /* This shouldn't be needed, but just to be safe */ led->led_dev.brightness_set(&led->led_dev, LED_OFF); led->led_dev.brightness = LED_OFF; } void rt2x00leds_suspend(struct rt2x00_dev *rt2x00dev) { if (rt2x00dev->led_qual.flags & LED_REGISTERED) rt2x00leds_suspend_led(&rt2x00dev->led_qual); if (rt2x00dev->led_assoc.flags & LED_REGISTERED) rt2x00leds_suspend_led(&rt2x00dev->led_assoc); if (rt2x00dev->led_radio.flags & LED_REGISTERED) rt2x00leds_suspend_led(&rt2x00dev->led_radio); } static inline void rt2x00leds_resume_led(struct rt2x00_led *led) { led_classdev_resume(&led->led_dev); /* Device might have enabled the LEDS during resume */ led->led_dev.brightness_set(&led->led_dev, LED_OFF); led->led_dev.brightness = LED_OFF; } void rt2x00leds_resume(struct rt2x00_dev *rt2x00dev) { if (rt2x00dev->led_radio.flags & LED_REGISTERED) rt2x00leds_resume_led(&rt2x00dev->led_radio); if (rt2x00dev->led_assoc.flags & LED_REGISTERED) rt2x00leds_resume_led(&rt2x00dev->led_assoc); if (rt2x00dev->led_qual.flags & LED_REGISTERED) rt2x00leds_resume_led(&rt2x00dev->led_qual); }
linux-master
drivers/net/wireless/ralink/rt2x00/rt2x00leds.c
// SPDX-License-Identifier: GPL-2.0-or-later /* Copyright (C) 2004 - 2009 Ivo van Doorn <[email protected]> Copyright (C) 2004 - 2009 Felix Fietkau <[email protected]> <http://rt2x00.serialmonkey.com> */ /* Module: rt2x00soc Abstract: rt2x00 generic soc device routines. */ #include <linux/bug.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> #include "rt2x00.h" #include "rt2x00soc.h" static void rt2x00soc_free_reg(struct rt2x00_dev *rt2x00dev) { kfree(rt2x00dev->rf); rt2x00dev->rf = NULL; kfree(rt2x00dev->eeprom); rt2x00dev->eeprom = NULL; iounmap(rt2x00dev->csr.base); } static int rt2x00soc_alloc_reg(struct rt2x00_dev *rt2x00dev) { struct platform_device *pdev = to_platform_device(rt2x00dev->dev); struct resource *res; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENODEV; rt2x00dev->csr.base = ioremap(res->start, resource_size(res)); if (!rt2x00dev->csr.base) return -ENOMEM; rt2x00dev->eeprom = kzalloc(rt2x00dev->ops->eeprom_size, GFP_KERNEL); if (!rt2x00dev->eeprom) goto exit; rt2x00dev->rf = kzalloc(rt2x00dev->ops->rf_size, GFP_KERNEL); if (!rt2x00dev->rf) goto exit; return 0; exit: rt2x00_probe_err("Failed to allocate registers\n"); rt2x00soc_free_reg(rt2x00dev); return -ENOMEM; } int rt2x00soc_probe(struct platform_device *pdev, const struct rt2x00_ops *ops) { struct ieee80211_hw *hw; struct rt2x00_dev *rt2x00dev; int retval; hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw); if (!hw) { rt2x00_probe_err("Failed to allocate hardware\n"); return -ENOMEM; } platform_set_drvdata(pdev, hw); rt2x00dev = hw->priv; rt2x00dev->dev = &pdev->dev; rt2x00dev->ops = ops; rt2x00dev->hw = hw; rt2x00dev->irq = platform_get_irq(pdev, 0); rt2x00dev->name = pdev->dev.driver->name; rt2x00dev->clk = clk_get(&pdev->dev, NULL); if (IS_ERR(rt2x00dev->clk)) rt2x00dev->clk = NULL; rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_SOC); retval = rt2x00soc_alloc_reg(rt2x00dev); if (retval) goto exit_free_device; retval = rt2x00lib_probe_dev(rt2x00dev); if (retval) goto exit_free_reg; return 0; exit_free_reg: rt2x00soc_free_reg(rt2x00dev); exit_free_device: ieee80211_free_hw(hw); return retval; } EXPORT_SYMBOL_GPL(rt2x00soc_probe); int rt2x00soc_remove(struct platform_device *pdev) { struct ieee80211_hw *hw = platform_get_drvdata(pdev); struct rt2x00_dev *rt2x00dev = hw->priv; /* * Free all allocated data. */ rt2x00lib_remove_dev(rt2x00dev); rt2x00soc_free_reg(rt2x00dev); ieee80211_free_hw(hw); return 0; } EXPORT_SYMBOL_GPL(rt2x00soc_remove); #ifdef CONFIG_PM int rt2x00soc_suspend(struct platform_device *pdev, pm_message_t state) { struct ieee80211_hw *hw = platform_get_drvdata(pdev); struct rt2x00_dev *rt2x00dev = hw->priv; return rt2x00lib_suspend(rt2x00dev); } EXPORT_SYMBOL_GPL(rt2x00soc_suspend); int rt2x00soc_resume(struct platform_device *pdev) { struct ieee80211_hw *hw = platform_get_drvdata(pdev); struct rt2x00_dev *rt2x00dev = hw->priv; return rt2x00lib_resume(rt2x00dev); } EXPORT_SYMBOL_GPL(rt2x00soc_resume); #endif /* CONFIG_PM */ /* * rt2x00soc module information. */ MODULE_AUTHOR(DRV_PROJECT); MODULE_VERSION(DRV_VERSION); MODULE_DESCRIPTION("rt2x00 soc library"); MODULE_LICENSE("GPL");
linux-master
drivers/net/wireless/ralink/rt2x00/rt2x00soc.c
// SPDX-License-Identifier: GPL-2.0-or-later /* Copyright (C) 2004 - 2009 Ivo van Doorn <[email protected]> <http://rt2x00.serialmonkey.com> */ /* Module: rt61pci Abstract: rt61pci device specific routines. Supported chipsets: RT2561, RT2561s, RT2661. */ #include <linux/crc-itu-t.h> #include <linux/delay.h> #include <linux/etherdevice.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/pci.h> #include <linux/eeprom_93cx6.h> #include "rt2x00.h" #include "rt2x00mmio.h" #include "rt2x00pci.h" #include "rt61pci.h" /* * Allow hardware encryption to be disabled. */ static bool modparam_nohwcrypt = false; module_param_named(nohwcrypt, modparam_nohwcrypt, bool, 0444); MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); /* * Register access. * BBP and RF register require indirect register access, * and use the CSR registers PHY_CSR3 and PHY_CSR4 to achieve this. * These indirect registers work with busy bits, * and we will try maximal REGISTER_BUSY_COUNT times to access * the register while taking a REGISTER_BUSY_DELAY us delay * between each attempt. When the busy bit is still set at that time, * the access attempt is considered to have failed, * and we will print an error. */ #define WAIT_FOR_BBP(__dev, __reg) \ rt2x00mmio_regbusy_read((__dev), PHY_CSR3, PHY_CSR3_BUSY, (__reg)) #define WAIT_FOR_RF(__dev, __reg) \ rt2x00mmio_regbusy_read((__dev), PHY_CSR4, PHY_CSR4_BUSY, (__reg)) #define WAIT_FOR_MCU(__dev, __reg) \ rt2x00mmio_regbusy_read((__dev), H2M_MAILBOX_CSR, \ H2M_MAILBOX_CSR_OWNER, (__reg)) static void rt61pci_bbp_write(struct rt2x00_dev *rt2x00dev, const unsigned int word, const u8 value) { u32 reg; mutex_lock(&rt2x00dev->csr_mutex); /* * Wait until the BBP becomes available, afterwards we * can safely write the new data into the register. */ if (WAIT_FOR_BBP(rt2x00dev, &reg)) { reg = 0; rt2x00_set_field32(&reg, PHY_CSR3_VALUE, value); rt2x00_set_field32(&reg, PHY_CSR3_REGNUM, word); rt2x00_set_field32(&reg, PHY_CSR3_BUSY, 1); rt2x00_set_field32(&reg, PHY_CSR3_READ_CONTROL, 0); rt2x00mmio_register_write(rt2x00dev, PHY_CSR3, reg); } mutex_unlock(&rt2x00dev->csr_mutex); } static u8 rt61pci_bbp_read(struct rt2x00_dev *rt2x00dev, const unsigned int word) { u32 reg; u8 value; mutex_lock(&rt2x00dev->csr_mutex); /* * Wait until the BBP becomes available, afterwards we * can safely write the read request into the register. * After the data has been written, we wait until hardware * returns the correct value, if at any time the register * doesn't become available in time, reg will be 0xffffffff * which means we return 0xff to the caller. */ if (WAIT_FOR_BBP(rt2x00dev, &reg)) { reg = 0; rt2x00_set_field32(&reg, PHY_CSR3_REGNUM, word); rt2x00_set_field32(&reg, PHY_CSR3_BUSY, 1); rt2x00_set_field32(&reg, PHY_CSR3_READ_CONTROL, 1); rt2x00mmio_register_write(rt2x00dev, PHY_CSR3, reg); WAIT_FOR_BBP(rt2x00dev, &reg); } value = rt2x00_get_field32(reg, PHY_CSR3_VALUE); mutex_unlock(&rt2x00dev->csr_mutex); return value; } static void rt61pci_rf_write(struct rt2x00_dev *rt2x00dev, const unsigned int word, const u32 value) { u32 reg; mutex_lock(&rt2x00dev->csr_mutex); /* * Wait until the RF becomes available, afterwards we * can safely write the new data into the register. */ if (WAIT_FOR_RF(rt2x00dev, &reg)) { reg = 0; rt2x00_set_field32(&reg, PHY_CSR4_VALUE, value); rt2x00_set_field32(&reg, PHY_CSR4_NUMBER_OF_BITS, 21); rt2x00_set_field32(&reg, PHY_CSR4_IF_SELECT, 0); rt2x00_set_field32(&reg, PHY_CSR4_BUSY, 1); rt2x00mmio_register_write(rt2x00dev, PHY_CSR4, reg); rt2x00_rf_write(rt2x00dev, word, value); } mutex_unlock(&rt2x00dev->csr_mutex); } static void rt61pci_mcu_request(struct rt2x00_dev *rt2x00dev, const u8 command, const u8 token, const u8 arg0, const u8 arg1) { u32 reg; mutex_lock(&rt2x00dev->csr_mutex); /* * Wait until the MCU becomes available, afterwards we * can safely write the new data into the register. */ if (WAIT_FOR_MCU(rt2x00dev, &reg)) { rt2x00_set_field32(&reg, H2M_MAILBOX_CSR_OWNER, 1); rt2x00_set_field32(&reg, H2M_MAILBOX_CSR_CMD_TOKEN, token); rt2x00_set_field32(&reg, H2M_MAILBOX_CSR_ARG0, arg0); rt2x00_set_field32(&reg, H2M_MAILBOX_CSR_ARG1, arg1); rt2x00mmio_register_write(rt2x00dev, H2M_MAILBOX_CSR, reg); reg = rt2x00mmio_register_read(rt2x00dev, HOST_CMD_CSR); rt2x00_set_field32(&reg, HOST_CMD_CSR_HOST_COMMAND, command); rt2x00_set_field32(&reg, HOST_CMD_CSR_INTERRUPT_MCU, 1); rt2x00mmio_register_write(rt2x00dev, HOST_CMD_CSR, reg); } mutex_unlock(&rt2x00dev->csr_mutex); } static void rt61pci_eepromregister_read(struct eeprom_93cx6 *eeprom) { struct rt2x00_dev *rt2x00dev = eeprom->data; u32 reg; reg = rt2x00mmio_register_read(rt2x00dev, E2PROM_CSR); eeprom->reg_data_in = !!rt2x00_get_field32(reg, E2PROM_CSR_DATA_IN); eeprom->reg_data_out = !!rt2x00_get_field32(reg, E2PROM_CSR_DATA_OUT); eeprom->reg_data_clock = !!rt2x00_get_field32(reg, E2PROM_CSR_DATA_CLOCK); eeprom->reg_chip_select = !!rt2x00_get_field32(reg, E2PROM_CSR_CHIP_SELECT); } static void rt61pci_eepromregister_write(struct eeprom_93cx6 *eeprom) { struct rt2x00_dev *rt2x00dev = eeprom->data; u32 reg = 0; rt2x00_set_field32(&reg, E2PROM_CSR_DATA_IN, !!eeprom->reg_data_in); rt2x00_set_field32(&reg, E2PROM_CSR_DATA_OUT, !!eeprom->reg_data_out); rt2x00_set_field32(&reg, E2PROM_CSR_DATA_CLOCK, !!eeprom->reg_data_clock); rt2x00_set_field32(&reg, E2PROM_CSR_CHIP_SELECT, !!eeprom->reg_chip_select); rt2x00mmio_register_write(rt2x00dev, E2PROM_CSR, reg); } #ifdef CONFIG_RT2X00_LIB_DEBUGFS static const struct rt2x00debug rt61pci_rt2x00debug = { .owner = THIS_MODULE, .csr = { .read = rt2x00mmio_register_read, .write = rt2x00mmio_register_write, .flags = RT2X00DEBUGFS_OFFSET, .word_base = CSR_REG_BASE, .word_size = sizeof(u32), .word_count = CSR_REG_SIZE / sizeof(u32), }, .eeprom = { .read = rt2x00_eeprom_read, .write = rt2x00_eeprom_write, .word_base = EEPROM_BASE, .word_size = sizeof(u16), .word_count = EEPROM_SIZE / sizeof(u16), }, .bbp = { .read = rt61pci_bbp_read, .write = rt61pci_bbp_write, .word_base = BBP_BASE, .word_size = sizeof(u8), .word_count = BBP_SIZE / sizeof(u8), }, .rf = { .read = rt2x00_rf_read, .write = rt61pci_rf_write, .word_base = RF_BASE, .word_size = sizeof(u32), .word_count = RF_SIZE / sizeof(u32), }, }; #endif /* CONFIG_RT2X00_LIB_DEBUGFS */ static int rt61pci_rfkill_poll(struct rt2x00_dev *rt2x00dev) { u32 reg; reg = rt2x00mmio_register_read(rt2x00dev, MAC_CSR13); return rt2x00_get_field32(reg, MAC_CSR13_VAL5); } #ifdef CONFIG_RT2X00_LIB_LEDS static void rt61pci_brightness_set(struct led_classdev *led_cdev, enum led_brightness brightness) { struct rt2x00_led *led = container_of(led_cdev, struct rt2x00_led, led_dev); unsigned int enabled = brightness != LED_OFF; unsigned int a_mode = (enabled && led->rt2x00dev->curr_band == NL80211_BAND_5GHZ); unsigned int bg_mode = (enabled && led->rt2x00dev->curr_band == NL80211_BAND_2GHZ); if (led->type == LED_TYPE_RADIO) { rt2x00_set_field16(&led->rt2x00dev->led_mcu_reg, MCU_LEDCS_RADIO_STATUS, enabled); rt61pci_mcu_request(led->rt2x00dev, MCU_LED, 0xff, (led->rt2x00dev->led_mcu_reg & 0xff), ((led->rt2x00dev->led_mcu_reg >> 8))); } else if (led->type == LED_TYPE_ASSOC) { rt2x00_set_field16(&led->rt2x00dev->led_mcu_reg, MCU_LEDCS_LINK_BG_STATUS, bg_mode); rt2x00_set_field16(&led->rt2x00dev->led_mcu_reg, MCU_LEDCS_LINK_A_STATUS, a_mode); rt61pci_mcu_request(led->rt2x00dev, MCU_LED, 0xff, (led->rt2x00dev->led_mcu_reg & 0xff), ((led->rt2x00dev->led_mcu_reg >> 8))); } else if (led->type == LED_TYPE_QUALITY) { /* * The brightness is divided into 6 levels (0 - 5), * this means we need to convert the brightness * argument into the matching level within that range. */ rt61pci_mcu_request(led->rt2x00dev, MCU_LED_STRENGTH, 0xff, brightness / (LED_FULL / 6), 0); } } static int rt61pci_blink_set(struct led_classdev *led_cdev, unsigned long *delay_on, unsigned long *delay_off) { struct rt2x00_led *led = container_of(led_cdev, struct rt2x00_led, led_dev); u32 reg; reg = rt2x00mmio_register_read(led->rt2x00dev, MAC_CSR14); rt2x00_set_field32(&reg, MAC_CSR14_ON_PERIOD, *delay_on); rt2x00_set_field32(&reg, MAC_CSR14_OFF_PERIOD, *delay_off); rt2x00mmio_register_write(led->rt2x00dev, MAC_CSR14, reg); return 0; } static void rt61pci_init_led(struct rt2x00_dev *rt2x00dev, struct rt2x00_led *led, enum led_type type) { led->rt2x00dev = rt2x00dev; led->type = type; led->led_dev.brightness_set = rt61pci_brightness_set; led->led_dev.blink_set = rt61pci_blink_set; led->flags = LED_INITIALIZED; } #endif /* CONFIG_RT2X00_LIB_LEDS */ /* * Configuration handlers. */ static int rt61pci_config_shared_key(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_crypto *crypto, struct ieee80211_key_conf *key) { /* * Let the software handle the shared keys, * since the hardware decryption does not work reliably, * because the firmware does not know the key's keyidx. */ return -EOPNOTSUPP; } static int rt61pci_config_pairwise_key(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_crypto *crypto, struct ieee80211_key_conf *key) { struct hw_pairwise_ta_entry addr_entry; struct hw_key_entry key_entry; u32 mask; u32 reg; if (crypto->cmd == SET_KEY) { /* * rt2x00lib can't determine the correct free * key_idx for pairwise keys. We have 2 registers * with key valid bits. The goal is simple: read * the first register. If that is full, move to * the next register. * When both registers are full, we drop the key. * Otherwise, we use the first invalid entry. */ reg = rt2x00mmio_register_read(rt2x00dev, SEC_CSR2); if (reg && reg == ~0) { key->hw_key_idx = 32; reg = rt2x00mmio_register_read(rt2x00dev, SEC_CSR3); if (reg && reg == ~0) return -ENOSPC; } key->hw_key_idx += reg ? ffz(reg) : 0; /* * Upload key to hardware */ memcpy(key_entry.key, crypto->key, sizeof(key_entry.key)); memcpy(key_entry.tx_mic, crypto->tx_mic, sizeof(key_entry.tx_mic)); memcpy(key_entry.rx_mic, crypto->rx_mic, sizeof(key_entry.rx_mic)); memset(&addr_entry, 0, sizeof(addr_entry)); memcpy(&addr_entry, crypto->address, ETH_ALEN); addr_entry.cipher = crypto->cipher; reg = PAIRWISE_KEY_ENTRY(key->hw_key_idx); rt2x00mmio_register_multiwrite(rt2x00dev, reg, &key_entry, sizeof(key_entry)); reg = PAIRWISE_TA_ENTRY(key->hw_key_idx); rt2x00mmio_register_multiwrite(rt2x00dev, reg, &addr_entry, sizeof(addr_entry)); /* * Enable pairwise lookup table for given BSS idx. * Without this, received frames will not be decrypted * by the hardware. */ reg = rt2x00mmio_register_read(rt2x00dev, SEC_CSR4); reg |= (1 << crypto->bssidx); rt2x00mmio_register_write(rt2x00dev, SEC_CSR4, reg); /* * The driver does not support the IV/EIV generation * in hardware. However it doesn't support the IV/EIV * inside the ieee80211 frame either, but requires it * to be provided separately for the descriptor. * rt2x00lib will cut the IV/EIV data out of all frames * given to us by mac80211, but we must tell mac80211 * to generate the IV/EIV data. */ key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; } /* * SEC_CSR2 and SEC_CSR3 contain only single-bit fields to indicate * a particular key is valid. Because using the FIELD32() * defines directly will cause a lot of overhead, we use * a calculation to determine the correct bit directly. */ if (key->hw_key_idx < 32) { mask = 1 << key->hw_key_idx; reg = rt2x00mmio_register_read(rt2x00dev, SEC_CSR2); if (crypto->cmd == SET_KEY) reg |= mask; else if (crypto->cmd == DISABLE_KEY) reg &= ~mask; rt2x00mmio_register_write(rt2x00dev, SEC_CSR2, reg); } else { mask = 1 << (key->hw_key_idx - 32); reg = rt2x00mmio_register_read(rt2x00dev, SEC_CSR3); if (crypto->cmd == SET_KEY) reg |= mask; else if (crypto->cmd == DISABLE_KEY) reg &= ~mask; rt2x00mmio_register_write(rt2x00dev, SEC_CSR3, reg); } return 0; } static void rt61pci_config_filter(struct rt2x00_dev *rt2x00dev, const unsigned int filter_flags) { u32 reg; /* * Start configuration steps. * Note that the version error will always be dropped * and broadcast frames will always be accepted since * there is no filter for it at this time. */ reg = rt2x00mmio_register_read(rt2x00dev, TXRX_CSR0); rt2x00_set_field32(&reg, TXRX_CSR0_DROP_CRC, !(filter_flags & FIF_FCSFAIL)); rt2x00_set_field32(&reg, TXRX_CSR0_DROP_PHYSICAL, !(filter_flags & FIF_PLCPFAIL)); rt2x00_set_field32(&reg, TXRX_CSR0_DROP_CONTROL, !(filter_flags & (FIF_CONTROL | FIF_PSPOLL))); rt2x00_set_field32(&reg, TXRX_CSR0_DROP_NOT_TO_ME, !test_bit(CONFIG_MONITORING, &rt2x00dev->flags)); rt2x00_set_field32(&reg, TXRX_CSR0_DROP_TO_DS, !test_bit(CONFIG_MONITORING, &rt2x00dev->flags) && !rt2x00dev->intf_ap_count); rt2x00_set_field32(&reg, TXRX_CSR0_DROP_VERSION_ERROR, 1); rt2x00_set_field32(&reg, TXRX_CSR0_DROP_MULTICAST, !(filter_flags & FIF_ALLMULTI)); rt2x00_set_field32(&reg, TXRX_CSR0_DROP_BROADCAST, 0); rt2x00_set_field32(&reg, TXRX_CSR0_DROP_ACK_CTS, !(filter_flags & FIF_CONTROL)); rt2x00mmio_register_write(rt2x00dev, TXRX_CSR0, reg); } static void rt61pci_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00_intf *intf, struct rt2x00intf_conf *conf, const unsigned int flags) { u32 reg; if (flags & CONFIG_UPDATE_TYPE) { /* * Enable synchronisation. */ reg = rt2x00mmio_register_read(rt2x00dev, TXRX_CSR9); rt2x00_set_field32(&reg, TXRX_CSR9_TSF_SYNC, conf->sync); rt2x00mmio_register_write(rt2x00dev, TXRX_CSR9, reg); } if (flags & CONFIG_UPDATE_MAC) { reg = le32_to_cpu(conf->mac[1]); rt2x00_set_field32(&reg, MAC_CSR3_UNICAST_TO_ME_MASK, 0xff); conf->mac[1] = cpu_to_le32(reg); rt2x00mmio_register_multiwrite(rt2x00dev, MAC_CSR2, conf->mac, sizeof(conf->mac)); } if (flags & CONFIG_UPDATE_BSSID) { reg = le32_to_cpu(conf->bssid[1]); rt2x00_set_field32(&reg, MAC_CSR5_BSS_ID_MASK, 3); conf->bssid[1] = cpu_to_le32(reg); rt2x00mmio_register_multiwrite(rt2x00dev, MAC_CSR4, conf->bssid, sizeof(conf->bssid)); } } static void rt61pci_config_erp(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_erp *erp, u32 changed) { u32 reg; reg = rt2x00mmio_register_read(rt2x00dev, TXRX_CSR0); rt2x00_set_field32(&reg, TXRX_CSR0_RX_ACK_TIMEOUT, 0x32); rt2x00_set_field32(&reg, TXRX_CSR0_TSF_OFFSET, IEEE80211_HEADER); rt2x00mmio_register_write(rt2x00dev, TXRX_CSR0, reg); if (changed & BSS_CHANGED_ERP_PREAMBLE) { reg = rt2x00mmio_register_read(rt2x00dev, TXRX_CSR4); rt2x00_set_field32(&reg, TXRX_CSR4_AUTORESPOND_ENABLE, 1); rt2x00_set_field32(&reg, TXRX_CSR4_AUTORESPOND_PREAMBLE, !!erp->short_preamble); rt2x00mmio_register_write(rt2x00dev, TXRX_CSR4, reg); } if (changed & BSS_CHANGED_BASIC_RATES) rt2x00mmio_register_write(rt2x00dev, TXRX_CSR5, erp->basic_rates); if (changed & BSS_CHANGED_BEACON_INT) { reg = rt2x00mmio_register_read(rt2x00dev, TXRX_CSR9); rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_INTERVAL, erp->beacon_int * 16); rt2x00mmio_register_write(rt2x00dev, TXRX_CSR9, reg); } if (changed & BSS_CHANGED_ERP_SLOT) { reg = rt2x00mmio_register_read(rt2x00dev, MAC_CSR9); rt2x00_set_field32(&reg, MAC_CSR9_SLOT_TIME, erp->slot_time); rt2x00mmio_register_write(rt2x00dev, MAC_CSR9, reg); reg = rt2x00mmio_register_read(rt2x00dev, MAC_CSR8); rt2x00_set_field32(&reg, MAC_CSR8_SIFS, erp->sifs); rt2x00_set_field32(&reg, MAC_CSR8_SIFS_AFTER_RX_OFDM, 3); rt2x00_set_field32(&reg, MAC_CSR8_EIFS, erp->eifs); rt2x00mmio_register_write(rt2x00dev, MAC_CSR8, reg); } } static void rt61pci_config_antenna_5x(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant) { u8 r3; u8 r4; u8 r77; r3 = rt61pci_bbp_read(rt2x00dev, 3); r4 = rt61pci_bbp_read(rt2x00dev, 4); r77 = rt61pci_bbp_read(rt2x00dev, 77); rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, rt2x00_rf(rt2x00dev, RF5325)); /* * Configure the RX antenna. */ switch (ant->rx) { case ANTENNA_HW_DIVERSITY: rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 2); rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, (rt2x00dev->curr_band != NL80211_BAND_5GHZ)); break; case ANTENNA_A: rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1); rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 0); if (rt2x00dev->curr_band == NL80211_BAND_5GHZ) rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 0); else rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 3); break; case ANTENNA_B: default: rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1); rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, 0); if (rt2x00dev->curr_band == NL80211_BAND_5GHZ) rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 3); else rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 0); break; } rt61pci_bbp_write(rt2x00dev, 77, r77); rt61pci_bbp_write(rt2x00dev, 3, r3); rt61pci_bbp_write(rt2x00dev, 4, r4); } static void rt61pci_config_antenna_2x(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant) { u8 r3; u8 r4; u8 r77; r3 = rt61pci_bbp_read(rt2x00dev, 3); r4 = rt61pci_bbp_read(rt2x00dev, 4); r77 = rt61pci_bbp_read(rt2x00dev, 77); rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, rt2x00_rf(rt2x00dev, RF2529)); rt2x00_set_field8(&r4, BBP_R4_RX_FRAME_END, !rt2x00_has_cap_frame_type(rt2x00dev)); /* * Configure the RX antenna. */ switch (ant->rx) { case ANTENNA_HW_DIVERSITY: rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 2); break; case ANTENNA_A: rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1); rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 3); break; case ANTENNA_B: default: rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1); rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 0); break; } rt61pci_bbp_write(rt2x00dev, 77, r77); rt61pci_bbp_write(rt2x00dev, 3, r3); rt61pci_bbp_write(rt2x00dev, 4, r4); } static void rt61pci_config_antenna_2529_rx(struct rt2x00_dev *rt2x00dev, const int p1, const int p2) { u32 reg; reg = rt2x00mmio_register_read(rt2x00dev, MAC_CSR13); rt2x00_set_field32(&reg, MAC_CSR13_DIR4, 0); rt2x00_set_field32(&reg, MAC_CSR13_VAL4, p1); rt2x00_set_field32(&reg, MAC_CSR13_DIR3, 0); rt2x00_set_field32(&reg, MAC_CSR13_VAL3, !p2); rt2x00mmio_register_write(rt2x00dev, MAC_CSR13, reg); } static void rt61pci_config_antenna_2529(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant) { u8 r3; u8 r4; u8 r77; r3 = rt61pci_bbp_read(rt2x00dev, 3); r4 = rt61pci_bbp_read(rt2x00dev, 4); r77 = rt61pci_bbp_read(rt2x00dev, 77); /* * Configure the RX antenna. */ switch (ant->rx) { case ANTENNA_A: rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1); rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 0); rt61pci_config_antenna_2529_rx(rt2x00dev, 0, 0); break; case ANTENNA_HW_DIVERSITY: /* * FIXME: Antenna selection for the rf 2529 is very confusing * in the legacy driver. Just default to antenna B until the * legacy code can be properly translated into rt2x00 code. */ case ANTENNA_B: default: rt2x00_set_field8(&r4, BBP_R4_RX_ANTENNA_CONTROL, 1); rt2x00_set_field8(&r77, BBP_R77_RX_ANTENNA, 3); rt61pci_config_antenna_2529_rx(rt2x00dev, 1, 1); break; } rt61pci_bbp_write(rt2x00dev, 77, r77); rt61pci_bbp_write(rt2x00dev, 3, r3); rt61pci_bbp_write(rt2x00dev, 4, r4); } struct antenna_sel { u8 word; /* * value[0] -> non-LNA * value[1] -> LNA */ u8 value[2]; }; static const struct antenna_sel antenna_sel_a[] = { { 96, { 0x58, 0x78 } }, { 104, { 0x38, 0x48 } }, { 75, { 0xfe, 0x80 } }, { 86, { 0xfe, 0x80 } }, { 88, { 0xfe, 0x80 } }, { 35, { 0x60, 0x60 } }, { 97, { 0x58, 0x58 } }, { 98, { 0x58, 0x58 } }, }; static const struct antenna_sel antenna_sel_bg[] = { { 96, { 0x48, 0x68 } }, { 104, { 0x2c, 0x3c } }, { 75, { 0xfe, 0x80 } }, { 86, { 0xfe, 0x80 } }, { 88, { 0xfe, 0x80 } }, { 35, { 0x50, 0x50 } }, { 97, { 0x48, 0x48 } }, { 98, { 0x48, 0x48 } }, }; static void rt61pci_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant) { const struct antenna_sel *sel; unsigned int lna; unsigned int i; u32 reg; /* * We should never come here because rt2x00lib is supposed * to catch this and send us the correct antenna explicitely. */ BUG_ON(ant->rx == ANTENNA_SW_DIVERSITY || ant->tx == ANTENNA_SW_DIVERSITY); if (rt2x00dev->curr_band == NL80211_BAND_5GHZ) { sel = antenna_sel_a; lna = rt2x00_has_cap_external_lna_a(rt2x00dev); } else { sel = antenna_sel_bg; lna = rt2x00_has_cap_external_lna_bg(rt2x00dev); } for (i = 0; i < ARRAY_SIZE(antenna_sel_a); i++) rt61pci_bbp_write(rt2x00dev, sel[i].word, sel[i].value[lna]); reg = rt2x00mmio_register_read(rt2x00dev, PHY_CSR0); rt2x00_set_field32(&reg, PHY_CSR0_PA_PE_BG, rt2x00dev->curr_band == NL80211_BAND_2GHZ); rt2x00_set_field32(&reg, PHY_CSR0_PA_PE_A, rt2x00dev->curr_band == NL80211_BAND_5GHZ); rt2x00mmio_register_write(rt2x00dev, PHY_CSR0, reg); if (rt2x00_rf(rt2x00dev, RF5225) || rt2x00_rf(rt2x00dev, RF5325)) rt61pci_config_antenna_5x(rt2x00dev, ant); else if (rt2x00_rf(rt2x00dev, RF2527)) rt61pci_config_antenna_2x(rt2x00dev, ant); else if (rt2x00_rf(rt2x00dev, RF2529)) { if (rt2x00_has_cap_double_antenna(rt2x00dev)) rt61pci_config_antenna_2x(rt2x00dev, ant); else rt61pci_config_antenna_2529(rt2x00dev, ant); } } static void rt61pci_config_lna_gain(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_conf *libconf) { u16 eeprom; short lna_gain = 0; if (libconf->conf->chandef.chan->band == NL80211_BAND_2GHZ) { if (rt2x00_has_cap_external_lna_bg(rt2x00dev)) lna_gain += 14; eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_BG); lna_gain -= rt2x00_get_field16(eeprom, EEPROM_RSSI_OFFSET_BG_1); } else { if (rt2x00_has_cap_external_lna_a(rt2x00dev)) lna_gain += 14; eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_A); lna_gain -= rt2x00_get_field16(eeprom, EEPROM_RSSI_OFFSET_A_1); } rt2x00dev->lna_gain = lna_gain; } static void rt61pci_config_channel(struct rt2x00_dev *rt2x00dev, struct rf_channel *rf, const int txpower) { u8 r3; u8 r94; u8 smart; rt2x00_set_field32(&rf->rf3, RF3_TXPOWER, TXPOWER_TO_DEV(txpower)); rt2x00_set_field32(&rf->rf4, RF4_FREQ_OFFSET, rt2x00dev->freq_offset); smart = !(rt2x00_rf(rt2x00dev, RF5225) || rt2x00_rf(rt2x00dev, RF2527)); r3 = rt61pci_bbp_read(rt2x00dev, 3); rt2x00_set_field8(&r3, BBP_R3_SMART_MODE, smart); rt61pci_bbp_write(rt2x00dev, 3, r3); r94 = 6; if (txpower > MAX_TXPOWER && txpower <= (MAX_TXPOWER + r94)) r94 += txpower - MAX_TXPOWER; else if (txpower < MIN_TXPOWER && txpower >= (MIN_TXPOWER - r94)) r94 += txpower; rt61pci_bbp_write(rt2x00dev, 94, r94); rt61pci_rf_write(rt2x00dev, 1, rf->rf1); rt61pci_rf_write(rt2x00dev, 2, rf->rf2); rt61pci_rf_write(rt2x00dev, 3, rf->rf3 & ~0x00000004); rt61pci_rf_write(rt2x00dev, 4, rf->rf4); udelay(200); rt61pci_rf_write(rt2x00dev, 1, rf->rf1); rt61pci_rf_write(rt2x00dev, 2, rf->rf2); rt61pci_rf_write(rt2x00dev, 3, rf->rf3 | 0x00000004); rt61pci_rf_write(rt2x00dev, 4, rf->rf4); udelay(200); rt61pci_rf_write(rt2x00dev, 1, rf->rf1); rt61pci_rf_write(rt2x00dev, 2, rf->rf2); rt61pci_rf_write(rt2x00dev, 3, rf->rf3 & ~0x00000004); rt61pci_rf_write(rt2x00dev, 4, rf->rf4); msleep(1); } static void rt61pci_config_txpower(struct rt2x00_dev *rt2x00dev, const int txpower) { struct rf_channel rf; rf.rf1 = rt2x00_rf_read(rt2x00dev, 1); rf.rf2 = rt2x00_rf_read(rt2x00dev, 2); rf.rf3 = rt2x00_rf_read(rt2x00dev, 3); rf.rf4 = rt2x00_rf_read(rt2x00dev, 4); rt61pci_config_channel(rt2x00dev, &rf, txpower); } static void rt61pci_config_retry_limit(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_conf *libconf) { u32 reg; reg = rt2x00mmio_register_read(rt2x00dev, TXRX_CSR4); rt2x00_set_field32(&reg, TXRX_CSR4_OFDM_TX_RATE_DOWN, 1); rt2x00_set_field32(&reg, TXRX_CSR4_OFDM_TX_RATE_STEP, 0); rt2x00_set_field32(&reg, TXRX_CSR4_OFDM_TX_FALLBACK_CCK, 0); rt2x00_set_field32(&reg, TXRX_CSR4_LONG_RETRY_LIMIT, libconf->conf->long_frame_max_tx_count); rt2x00_set_field32(&reg, TXRX_CSR4_SHORT_RETRY_LIMIT, libconf->conf->short_frame_max_tx_count); rt2x00mmio_register_write(rt2x00dev, TXRX_CSR4, reg); } static void rt61pci_config_ps(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_conf *libconf) { enum dev_state state = (libconf->conf->flags & IEEE80211_CONF_PS) ? STATE_SLEEP : STATE_AWAKE; u32 reg; if (state == STATE_SLEEP) { reg = rt2x00mmio_register_read(rt2x00dev, MAC_CSR11); rt2x00_set_field32(&reg, MAC_CSR11_DELAY_AFTER_TBCN, rt2x00dev->beacon_int - 10); rt2x00_set_field32(&reg, MAC_CSR11_TBCN_BEFORE_WAKEUP, libconf->conf->listen_interval - 1); rt2x00_set_field32(&reg, MAC_CSR11_WAKEUP_LATENCY, 5); /* We must first disable autowake before it can be enabled */ rt2x00_set_field32(&reg, MAC_CSR11_AUTOWAKE, 0); rt2x00mmio_register_write(rt2x00dev, MAC_CSR11, reg); rt2x00_set_field32(&reg, MAC_CSR11_AUTOWAKE, 1); rt2x00mmio_register_write(rt2x00dev, MAC_CSR11, reg); rt2x00mmio_register_write(rt2x00dev, SOFT_RESET_CSR, 0x00000005); rt2x00mmio_register_write(rt2x00dev, IO_CNTL_CSR, 0x0000001c); rt2x00mmio_register_write(rt2x00dev, PCI_USEC_CSR, 0x00000060); rt61pci_mcu_request(rt2x00dev, MCU_SLEEP, 0xff, 0, 0); } else { reg = rt2x00mmio_register_read(rt2x00dev, MAC_CSR11); rt2x00_set_field32(&reg, MAC_CSR11_DELAY_AFTER_TBCN, 0); rt2x00_set_field32(&reg, MAC_CSR11_TBCN_BEFORE_WAKEUP, 0); rt2x00_set_field32(&reg, MAC_CSR11_AUTOWAKE, 0); rt2x00_set_field32(&reg, MAC_CSR11_WAKEUP_LATENCY, 0); rt2x00mmio_register_write(rt2x00dev, MAC_CSR11, reg); rt2x00mmio_register_write(rt2x00dev, SOFT_RESET_CSR, 0x00000007); rt2x00mmio_register_write(rt2x00dev, IO_CNTL_CSR, 0x00000018); rt2x00mmio_register_write(rt2x00dev, PCI_USEC_CSR, 0x00000020); rt61pci_mcu_request(rt2x00dev, MCU_WAKEUP, 0xff, 0, 0); } } static void rt61pci_config(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_conf *libconf, const unsigned int flags) { /* Always recalculate LNA gain before changing configuration */ rt61pci_config_lna_gain(rt2x00dev, libconf); if (flags & IEEE80211_CONF_CHANGE_CHANNEL) rt61pci_config_channel(rt2x00dev, &libconf->rf, libconf->conf->power_level); if ((flags & IEEE80211_CONF_CHANGE_POWER) && !(flags & IEEE80211_CONF_CHANGE_CHANNEL)) rt61pci_config_txpower(rt2x00dev, libconf->conf->power_level); if (flags & IEEE80211_CONF_CHANGE_RETRY_LIMITS) rt61pci_config_retry_limit(rt2x00dev, libconf); if (flags & IEEE80211_CONF_CHANGE_PS) rt61pci_config_ps(rt2x00dev, libconf); } /* * Link tuning */ static void rt61pci_link_stats(struct rt2x00_dev *rt2x00dev, struct link_qual *qual) { u32 reg; /* * Update FCS error count from register. */ reg = rt2x00mmio_register_read(rt2x00dev, STA_CSR0); qual->rx_failed = rt2x00_get_field32(reg, STA_CSR0_FCS_ERROR); /* * Update False CCA count from register. */ reg = rt2x00mmio_register_read(rt2x00dev, STA_CSR1); qual->false_cca = rt2x00_get_field32(reg, STA_CSR1_FALSE_CCA_ERROR); } static inline void rt61pci_set_vgc(struct rt2x00_dev *rt2x00dev, struct link_qual *qual, u8 vgc_level) { if (qual->vgc_level != vgc_level) { rt61pci_bbp_write(rt2x00dev, 17, vgc_level); qual->vgc_level = vgc_level; qual->vgc_level_reg = vgc_level; } } static void rt61pci_reset_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual) { rt61pci_set_vgc(rt2x00dev, qual, 0x20); } static void rt61pci_link_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual, const u32 count) { u8 up_bound; u8 low_bound; /* * Determine r17 bounds. */ if (rt2x00dev->curr_band == NL80211_BAND_5GHZ) { low_bound = 0x28; up_bound = 0x48; if (rt2x00_has_cap_external_lna_a(rt2x00dev)) { low_bound += 0x10; up_bound += 0x10; } } else { low_bound = 0x20; up_bound = 0x40; if (rt2x00_has_cap_external_lna_bg(rt2x00dev)) { low_bound += 0x10; up_bound += 0x10; } } /* * If we are not associated, we should go straight to the * dynamic CCA tuning. */ if (!rt2x00dev->intf_associated) goto dynamic_cca_tune; /* * Special big-R17 for very short distance */ if (qual->rssi >= -35) { rt61pci_set_vgc(rt2x00dev, qual, 0x60); return; } /* * Special big-R17 for short distance */ if (qual->rssi >= -58) { rt61pci_set_vgc(rt2x00dev, qual, up_bound); return; } /* * Special big-R17 for middle-short distance */ if (qual->rssi >= -66) { rt61pci_set_vgc(rt2x00dev, qual, low_bound + 0x10); return; } /* * Special mid-R17 for middle distance */ if (qual->rssi >= -74) { rt61pci_set_vgc(rt2x00dev, qual, low_bound + 0x08); return; } /* * Special case: Change up_bound based on the rssi. * Lower up_bound when rssi is weaker then -74 dBm. */ up_bound -= 2 * (-74 - qual->rssi); if (low_bound > up_bound) up_bound = low_bound; if (qual->vgc_level > up_bound) { rt61pci_set_vgc(rt2x00dev, qual, up_bound); return; } dynamic_cca_tune: /* * r17 does not yet exceed upper limit, continue and base * the r17 tuning on the false CCA count. */ if ((qual->false_cca > 512) && (qual->vgc_level < up_bound)) rt61pci_set_vgc(rt2x00dev, qual, ++qual->vgc_level); else if ((qual->false_cca < 100) && (qual->vgc_level > low_bound)) rt61pci_set_vgc(rt2x00dev, qual, --qual->vgc_level); } /* * Queue handlers. */ static void rt61pci_start_queue(struct data_queue *queue) { struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; u32 reg; switch (queue->qid) { case QID_RX: reg = rt2x00mmio_register_read(rt2x00dev, TXRX_CSR0); rt2x00_set_field32(&reg, TXRX_CSR0_DISABLE_RX, 0); rt2x00mmio_register_write(rt2x00dev, TXRX_CSR0, reg); break; case QID_BEACON: reg = rt2x00mmio_register_read(rt2x00dev, TXRX_CSR9); rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 1); rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 1); rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1); rt2x00mmio_register_write(rt2x00dev, TXRX_CSR9, reg); break; default: break; } } static void rt61pci_kick_queue(struct data_queue *queue) { struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; u32 reg; switch (queue->qid) { case QID_AC_VO: reg = rt2x00mmio_register_read(rt2x00dev, TX_CNTL_CSR); rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC0, 1); rt2x00mmio_register_write(rt2x00dev, TX_CNTL_CSR, reg); break; case QID_AC_VI: reg = rt2x00mmio_register_read(rt2x00dev, TX_CNTL_CSR); rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC1, 1); rt2x00mmio_register_write(rt2x00dev, TX_CNTL_CSR, reg); break; case QID_AC_BE: reg = rt2x00mmio_register_read(rt2x00dev, TX_CNTL_CSR); rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC2, 1); rt2x00mmio_register_write(rt2x00dev, TX_CNTL_CSR, reg); break; case QID_AC_BK: reg = rt2x00mmio_register_read(rt2x00dev, TX_CNTL_CSR); rt2x00_set_field32(&reg, TX_CNTL_CSR_KICK_TX_AC3, 1); rt2x00mmio_register_write(rt2x00dev, TX_CNTL_CSR, reg); break; default: break; } } static void rt61pci_stop_queue(struct data_queue *queue) { struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; u32 reg; switch (queue->qid) { case QID_AC_VO: reg = rt2x00mmio_register_read(rt2x00dev, TX_CNTL_CSR); rt2x00_set_field32(&reg, TX_CNTL_CSR_ABORT_TX_AC0, 1); rt2x00mmio_register_write(rt2x00dev, TX_CNTL_CSR, reg); break; case QID_AC_VI: reg = rt2x00mmio_register_read(rt2x00dev, TX_CNTL_CSR); rt2x00_set_field32(&reg, TX_CNTL_CSR_ABORT_TX_AC1, 1); rt2x00mmio_register_write(rt2x00dev, TX_CNTL_CSR, reg); break; case QID_AC_BE: reg = rt2x00mmio_register_read(rt2x00dev, TX_CNTL_CSR); rt2x00_set_field32(&reg, TX_CNTL_CSR_ABORT_TX_AC2, 1); rt2x00mmio_register_write(rt2x00dev, TX_CNTL_CSR, reg); break; case QID_AC_BK: reg = rt2x00mmio_register_read(rt2x00dev, TX_CNTL_CSR); rt2x00_set_field32(&reg, TX_CNTL_CSR_ABORT_TX_AC3, 1); rt2x00mmio_register_write(rt2x00dev, TX_CNTL_CSR, reg); break; case QID_RX: reg = rt2x00mmio_register_read(rt2x00dev, TXRX_CSR0); rt2x00_set_field32(&reg, TXRX_CSR0_DISABLE_RX, 1); rt2x00mmio_register_write(rt2x00dev, TXRX_CSR0, reg); break; case QID_BEACON: reg = rt2x00mmio_register_read(rt2x00dev, TXRX_CSR9); rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 0); rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 0); rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0); rt2x00mmio_register_write(rt2x00dev, TXRX_CSR9, reg); /* * Wait for possibly running tbtt tasklets. */ tasklet_kill(&rt2x00dev->tbtt_tasklet); break; default: break; } } /* * Firmware functions */ static char *rt61pci_get_firmware_name(struct rt2x00_dev *rt2x00dev) { u16 chip; char *fw_name; pci_read_config_word(to_pci_dev(rt2x00dev->dev), PCI_DEVICE_ID, &chip); switch (chip) { case RT2561_PCI_ID: fw_name = FIRMWARE_RT2561; break; case RT2561s_PCI_ID: fw_name = FIRMWARE_RT2561s; break; case RT2661_PCI_ID: fw_name = FIRMWARE_RT2661; break; default: fw_name = NULL; break; } return fw_name; } static int rt61pci_check_firmware(struct rt2x00_dev *rt2x00dev, const u8 *data, const size_t len) { u16 fw_crc; u16 crc; /* * Only support 8kb firmware files. */ if (len != 8192) return FW_BAD_LENGTH; /* * The last 2 bytes in the firmware array are the crc checksum itself. * This means that we should never pass those 2 bytes to the crc * algorithm. */ fw_crc = (data[len - 2] << 8 | data[len - 1]); /* * Use the crc itu-t algorithm. */ crc = crc_itu_t(0, data, len - 2); crc = crc_itu_t_byte(crc, 0); crc = crc_itu_t_byte(crc, 0); return (fw_crc == crc) ? FW_OK : FW_BAD_CRC; } static int rt61pci_load_firmware(struct rt2x00_dev *rt2x00dev, const u8 *data, const size_t len) { int i; u32 reg; /* * Wait for stable hardware. */ for (i = 0; i < 100; i++) { reg = rt2x00mmio_register_read(rt2x00dev, MAC_CSR0); if (reg) break; msleep(1); } if (!reg) { rt2x00_err(rt2x00dev, "Unstable hardware\n"); return -EBUSY; } /* * Prepare MCU and mailbox for firmware loading. */ reg = 0; rt2x00_set_field32(&reg, MCU_CNTL_CSR_RESET, 1); rt2x00mmio_register_write(rt2x00dev, MCU_CNTL_CSR, reg); rt2x00mmio_register_write(rt2x00dev, M2H_CMD_DONE_CSR, 0xffffffff); rt2x00mmio_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0); rt2x00mmio_register_write(rt2x00dev, HOST_CMD_CSR, 0); /* * Write firmware to device. */ reg = 0; rt2x00_set_field32(&reg, MCU_CNTL_CSR_RESET, 1); rt2x00_set_field32(&reg, MCU_CNTL_CSR_SELECT_BANK, 1); rt2x00mmio_register_write(rt2x00dev, MCU_CNTL_CSR, reg); rt2x00mmio_register_multiwrite(rt2x00dev, FIRMWARE_IMAGE_BASE, data, len); rt2x00_set_field32(&reg, MCU_CNTL_CSR_SELECT_BANK, 0); rt2x00mmio_register_write(rt2x00dev, MCU_CNTL_CSR, reg); rt2x00_set_field32(&reg, MCU_CNTL_CSR_RESET, 0); rt2x00mmio_register_write(rt2x00dev, MCU_CNTL_CSR, reg); for (i = 0; i < 100; i++) { reg = rt2x00mmio_register_read(rt2x00dev, MCU_CNTL_CSR); if (rt2x00_get_field32(reg, MCU_CNTL_CSR_READY)) break; msleep(1); } if (i == 100) { rt2x00_err(rt2x00dev, "MCU Control register not ready\n"); return -EBUSY; } /* * Hardware needs another millisecond before it is ready. */ msleep(1); /* * Reset MAC and BBP registers. */ reg = 0; rt2x00_set_field32(&reg, MAC_CSR1_SOFT_RESET, 1); rt2x00_set_field32(&reg, MAC_CSR1_BBP_RESET, 1); rt2x00mmio_register_write(rt2x00dev, MAC_CSR1, reg); reg = rt2x00mmio_register_read(rt2x00dev, MAC_CSR1); rt2x00_set_field32(&reg, MAC_CSR1_SOFT_RESET, 0); rt2x00_set_field32(&reg, MAC_CSR1_BBP_RESET, 0); rt2x00mmio_register_write(rt2x00dev, MAC_CSR1, reg); reg = rt2x00mmio_register_read(rt2x00dev, MAC_CSR1); rt2x00_set_field32(&reg, MAC_CSR1_HOST_READY, 1); rt2x00mmio_register_write(rt2x00dev, MAC_CSR1, reg); return 0; } /* * Initialization functions. */ static bool rt61pci_get_entry_state(struct queue_entry *entry) { struct queue_entry_priv_mmio *entry_priv = entry->priv_data; u32 word; if (entry->queue->qid == QID_RX) { word = rt2x00_desc_read(entry_priv->desc, 0); return rt2x00_get_field32(word, RXD_W0_OWNER_NIC); } else { word = rt2x00_desc_read(entry_priv->desc, 0); return (rt2x00_get_field32(word, TXD_W0_OWNER_NIC) || rt2x00_get_field32(word, TXD_W0_VALID)); } } static void rt61pci_clear_entry(struct queue_entry *entry) { struct queue_entry_priv_mmio *entry_priv = entry->priv_data; struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); u32 word; if (entry->queue->qid == QID_RX) { word = rt2x00_desc_read(entry_priv->desc, 5); rt2x00_set_field32(&word, RXD_W5_BUFFER_PHYSICAL_ADDRESS, skbdesc->skb_dma); rt2x00_desc_write(entry_priv->desc, 5, word); word = rt2x00_desc_read(entry_priv->desc, 0); rt2x00_set_field32(&word, RXD_W0_OWNER_NIC, 1); rt2x00_desc_write(entry_priv->desc, 0, word); } else { word = rt2x00_desc_read(entry_priv->desc, 0); rt2x00_set_field32(&word, TXD_W0_VALID, 0); rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 0); rt2x00_desc_write(entry_priv->desc, 0, word); } } static int rt61pci_init_queues(struct rt2x00_dev *rt2x00dev) { struct queue_entry_priv_mmio *entry_priv; u32 reg; /* * Initialize registers. */ reg = rt2x00mmio_register_read(rt2x00dev, TX_RING_CSR0); rt2x00_set_field32(&reg, TX_RING_CSR0_AC0_RING_SIZE, rt2x00dev->tx[0].limit); rt2x00_set_field32(&reg, TX_RING_CSR0_AC1_RING_SIZE, rt2x00dev->tx[1].limit); rt2x00_set_field32(&reg, TX_RING_CSR0_AC2_RING_SIZE, rt2x00dev->tx[2].limit); rt2x00_set_field32(&reg, TX_RING_CSR0_AC3_RING_SIZE, rt2x00dev->tx[3].limit); rt2x00mmio_register_write(rt2x00dev, TX_RING_CSR0, reg); reg = rt2x00mmio_register_read(rt2x00dev, TX_RING_CSR1); rt2x00_set_field32(&reg, TX_RING_CSR1_TXD_SIZE, rt2x00dev->tx[0].desc_size / 4); rt2x00mmio_register_write(rt2x00dev, TX_RING_CSR1, reg); entry_priv = rt2x00dev->tx[0].entries[0].priv_data; reg = rt2x00mmio_register_read(rt2x00dev, AC0_BASE_CSR); rt2x00_set_field32(&reg, AC0_BASE_CSR_RING_REGISTER, entry_priv->desc_dma); rt2x00mmio_register_write(rt2x00dev, AC0_BASE_CSR, reg); entry_priv = rt2x00dev->tx[1].entries[0].priv_data; reg = rt2x00mmio_register_read(rt2x00dev, AC1_BASE_CSR); rt2x00_set_field32(&reg, AC1_BASE_CSR_RING_REGISTER, entry_priv->desc_dma); rt2x00mmio_register_write(rt2x00dev, AC1_BASE_CSR, reg); entry_priv = rt2x00dev->tx[2].entries[0].priv_data; reg = rt2x00mmio_register_read(rt2x00dev, AC2_BASE_CSR); rt2x00_set_field32(&reg, AC2_BASE_CSR_RING_REGISTER, entry_priv->desc_dma); rt2x00mmio_register_write(rt2x00dev, AC2_BASE_CSR, reg); entry_priv = rt2x00dev->tx[3].entries[0].priv_data; reg = rt2x00mmio_register_read(rt2x00dev, AC3_BASE_CSR); rt2x00_set_field32(&reg, AC3_BASE_CSR_RING_REGISTER, entry_priv->desc_dma); rt2x00mmio_register_write(rt2x00dev, AC3_BASE_CSR, reg); reg = rt2x00mmio_register_read(rt2x00dev, RX_RING_CSR); rt2x00_set_field32(&reg, RX_RING_CSR_RING_SIZE, rt2x00dev->rx->limit); rt2x00_set_field32(&reg, RX_RING_CSR_RXD_SIZE, rt2x00dev->rx->desc_size / 4); rt2x00_set_field32(&reg, RX_RING_CSR_RXD_WRITEBACK_SIZE, 4); rt2x00mmio_register_write(rt2x00dev, RX_RING_CSR, reg); entry_priv = rt2x00dev->rx->entries[0].priv_data; reg = rt2x00mmio_register_read(rt2x00dev, RX_BASE_CSR); rt2x00_set_field32(&reg, RX_BASE_CSR_RING_REGISTER, entry_priv->desc_dma); rt2x00mmio_register_write(rt2x00dev, RX_BASE_CSR, reg); reg = rt2x00mmio_register_read(rt2x00dev, TX_DMA_DST_CSR); rt2x00_set_field32(&reg, TX_DMA_DST_CSR_DEST_AC0, 2); rt2x00_set_field32(&reg, TX_DMA_DST_CSR_DEST_AC1, 2); rt2x00_set_field32(&reg, TX_DMA_DST_CSR_DEST_AC2, 2); rt2x00_set_field32(&reg, TX_DMA_DST_CSR_DEST_AC3, 2); rt2x00mmio_register_write(rt2x00dev, TX_DMA_DST_CSR, reg); reg = rt2x00mmio_register_read(rt2x00dev, LOAD_TX_RING_CSR); rt2x00_set_field32(&reg, LOAD_TX_RING_CSR_LOAD_TXD_AC0, 1); rt2x00_set_field32(&reg, LOAD_TX_RING_CSR_LOAD_TXD_AC1, 1); rt2x00_set_field32(&reg, LOAD_TX_RING_CSR_LOAD_TXD_AC2, 1); rt2x00_set_field32(&reg, LOAD_TX_RING_CSR_LOAD_TXD_AC3, 1); rt2x00mmio_register_write(rt2x00dev, LOAD_TX_RING_CSR, reg); reg = rt2x00mmio_register_read(rt2x00dev, RX_CNTL_CSR); rt2x00_set_field32(&reg, RX_CNTL_CSR_LOAD_RXD, 1); rt2x00mmio_register_write(rt2x00dev, RX_CNTL_CSR, reg); return 0; } static int rt61pci_init_registers(struct rt2x00_dev *rt2x00dev) { u32 reg; reg = rt2x00mmio_register_read(rt2x00dev, TXRX_CSR0); rt2x00_set_field32(&reg, TXRX_CSR0_AUTO_TX_SEQ, 1); rt2x00_set_field32(&reg, TXRX_CSR0_DISABLE_RX, 0); rt2x00_set_field32(&reg, TXRX_CSR0_TX_WITHOUT_WAITING, 0); rt2x00mmio_register_write(rt2x00dev, TXRX_CSR0, reg); reg = rt2x00mmio_register_read(rt2x00dev, TXRX_CSR1); rt2x00_set_field32(&reg, TXRX_CSR1_BBP_ID0, 47); /* CCK Signal */ rt2x00_set_field32(&reg, TXRX_CSR1_BBP_ID0_VALID, 1); rt2x00_set_field32(&reg, TXRX_CSR1_BBP_ID1, 30); /* Rssi */ rt2x00_set_field32(&reg, TXRX_CSR1_BBP_ID1_VALID, 1); rt2x00_set_field32(&reg, TXRX_CSR1_BBP_ID2, 42); /* OFDM Rate */ rt2x00_set_field32(&reg, TXRX_CSR1_BBP_ID2_VALID, 1); rt2x00_set_field32(&reg, TXRX_CSR1_BBP_ID3, 30); /* Rssi */ rt2x00_set_field32(&reg, TXRX_CSR1_BBP_ID3_VALID, 1); rt2x00mmio_register_write(rt2x00dev, TXRX_CSR1, reg); /* * CCK TXD BBP registers */ reg = rt2x00mmio_register_read(rt2x00dev, TXRX_CSR2); rt2x00_set_field32(&reg, TXRX_CSR2_BBP_ID0, 13); rt2x00_set_field32(&reg, TXRX_CSR2_BBP_ID0_VALID, 1); rt2x00_set_field32(&reg, TXRX_CSR2_BBP_ID1, 12); rt2x00_set_field32(&reg, TXRX_CSR2_BBP_ID1_VALID, 1); rt2x00_set_field32(&reg, TXRX_CSR2_BBP_ID2, 11); rt2x00_set_field32(&reg, TXRX_CSR2_BBP_ID2_VALID, 1); rt2x00_set_field32(&reg, TXRX_CSR2_BBP_ID3, 10); rt2x00_set_field32(&reg, TXRX_CSR2_BBP_ID3_VALID, 1); rt2x00mmio_register_write(rt2x00dev, TXRX_CSR2, reg); /* * OFDM TXD BBP registers */ reg = rt2x00mmio_register_read(rt2x00dev, TXRX_CSR3); rt2x00_set_field32(&reg, TXRX_CSR3_BBP_ID0, 7); rt2x00_set_field32(&reg, TXRX_CSR3_BBP_ID0_VALID, 1); rt2x00_set_field32(&reg, TXRX_CSR3_BBP_ID1, 6); rt2x00_set_field32(&reg, TXRX_CSR3_BBP_ID1_VALID, 1); rt2x00_set_field32(&reg, TXRX_CSR3_BBP_ID2, 5); rt2x00_set_field32(&reg, TXRX_CSR3_BBP_ID2_VALID, 1); rt2x00mmio_register_write(rt2x00dev, TXRX_CSR3, reg); reg = rt2x00mmio_register_read(rt2x00dev, TXRX_CSR7); rt2x00_set_field32(&reg, TXRX_CSR7_ACK_CTS_6MBS, 59); rt2x00_set_field32(&reg, TXRX_CSR7_ACK_CTS_9MBS, 53); rt2x00_set_field32(&reg, TXRX_CSR7_ACK_CTS_12MBS, 49); rt2x00_set_field32(&reg, TXRX_CSR7_ACK_CTS_18MBS, 46); rt2x00mmio_register_write(rt2x00dev, TXRX_CSR7, reg); reg = rt2x00mmio_register_read(rt2x00dev, TXRX_CSR8); rt2x00_set_field32(&reg, TXRX_CSR8_ACK_CTS_24MBS, 44); rt2x00_set_field32(&reg, TXRX_CSR8_ACK_CTS_36MBS, 42); rt2x00_set_field32(&reg, TXRX_CSR8_ACK_CTS_48MBS, 42); rt2x00_set_field32(&reg, TXRX_CSR8_ACK_CTS_54MBS, 42); rt2x00mmio_register_write(rt2x00dev, TXRX_CSR8, reg); reg = rt2x00mmio_register_read(rt2x00dev, TXRX_CSR9); rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_INTERVAL, 0); rt2x00_set_field32(&reg, TXRX_CSR9_TSF_TICKING, 0); rt2x00_set_field32(&reg, TXRX_CSR9_TSF_SYNC, 0); rt2x00_set_field32(&reg, TXRX_CSR9_TBTT_ENABLE, 0); rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0); rt2x00_set_field32(&reg, TXRX_CSR9_TIMESTAMP_COMPENSATE, 0); rt2x00mmio_register_write(rt2x00dev, TXRX_CSR9, reg); rt2x00mmio_register_write(rt2x00dev, TXRX_CSR15, 0x0000000f); rt2x00mmio_register_write(rt2x00dev, MAC_CSR6, 0x00000fff); reg = rt2x00mmio_register_read(rt2x00dev, MAC_CSR9); rt2x00_set_field32(&reg, MAC_CSR9_CW_SELECT, 0); rt2x00mmio_register_write(rt2x00dev, MAC_CSR9, reg); rt2x00mmio_register_write(rt2x00dev, MAC_CSR10, 0x0000071c); if (rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_AWAKE)) return -EBUSY; rt2x00mmio_register_write(rt2x00dev, MAC_CSR13, 0x0000e000); /* * Invalidate all Shared Keys (SEC_CSR0), * and clear the Shared key Cipher algorithms (SEC_CSR1 & SEC_CSR5) */ rt2x00mmio_register_write(rt2x00dev, SEC_CSR0, 0x00000000); rt2x00mmio_register_write(rt2x00dev, SEC_CSR1, 0x00000000); rt2x00mmio_register_write(rt2x00dev, SEC_CSR5, 0x00000000); rt2x00mmio_register_write(rt2x00dev, PHY_CSR1, 0x000023b0); rt2x00mmio_register_write(rt2x00dev, PHY_CSR5, 0x060a100c); rt2x00mmio_register_write(rt2x00dev, PHY_CSR6, 0x00080606); rt2x00mmio_register_write(rt2x00dev, PHY_CSR7, 0x00000a08); rt2x00mmio_register_write(rt2x00dev, PCI_CFG_CSR, 0x28ca4404); rt2x00mmio_register_write(rt2x00dev, TEST_MODE_CSR, 0x00000200); rt2x00mmio_register_write(rt2x00dev, M2H_CMD_DONE_CSR, 0xffffffff); /* * Clear all beacons * For the Beacon base registers we only need to clear * the first byte since that byte contains the VALID and OWNER * bits which (when set to 0) will invalidate the entire beacon. */ rt2x00mmio_register_write(rt2x00dev, HW_BEACON_BASE0, 0); rt2x00mmio_register_write(rt2x00dev, HW_BEACON_BASE1, 0); rt2x00mmio_register_write(rt2x00dev, HW_BEACON_BASE2, 0); rt2x00mmio_register_write(rt2x00dev, HW_BEACON_BASE3, 0); /* * We must clear the error counters. * These registers are cleared on read, * so we may pass a useless variable to store the value. */ reg = rt2x00mmio_register_read(rt2x00dev, STA_CSR0); reg = rt2x00mmio_register_read(rt2x00dev, STA_CSR1); reg = rt2x00mmio_register_read(rt2x00dev, STA_CSR2); /* * Reset MAC and BBP registers. */ reg = rt2x00mmio_register_read(rt2x00dev, MAC_CSR1); rt2x00_set_field32(&reg, MAC_CSR1_SOFT_RESET, 1); rt2x00_set_field32(&reg, MAC_CSR1_BBP_RESET, 1); rt2x00mmio_register_write(rt2x00dev, MAC_CSR1, reg); reg = rt2x00mmio_register_read(rt2x00dev, MAC_CSR1); rt2x00_set_field32(&reg, MAC_CSR1_SOFT_RESET, 0); rt2x00_set_field32(&reg, MAC_CSR1_BBP_RESET, 0); rt2x00mmio_register_write(rt2x00dev, MAC_CSR1, reg); reg = rt2x00mmio_register_read(rt2x00dev, MAC_CSR1); rt2x00_set_field32(&reg, MAC_CSR1_HOST_READY, 1); rt2x00mmio_register_write(rt2x00dev, MAC_CSR1, reg); return 0; } static int rt61pci_wait_bbp_ready(struct rt2x00_dev *rt2x00dev) { unsigned int i; u8 value; for (i = 0; i < REGISTER_BUSY_COUNT; i++) { value = rt61pci_bbp_read(rt2x00dev, 0); if ((value != 0xff) && (value != 0x00)) return 0; udelay(REGISTER_BUSY_DELAY); } rt2x00_err(rt2x00dev, "BBP register access failed, aborting\n"); return -EACCES; } static int rt61pci_init_bbp(struct rt2x00_dev *rt2x00dev) { unsigned int i; u16 eeprom; u8 reg_id; u8 value; if (unlikely(rt61pci_wait_bbp_ready(rt2x00dev))) return -EACCES; rt61pci_bbp_write(rt2x00dev, 3, 0x00); rt61pci_bbp_write(rt2x00dev, 15, 0x30); rt61pci_bbp_write(rt2x00dev, 21, 0xc8); rt61pci_bbp_write(rt2x00dev, 22, 0x38); rt61pci_bbp_write(rt2x00dev, 23, 0x06); rt61pci_bbp_write(rt2x00dev, 24, 0xfe); rt61pci_bbp_write(rt2x00dev, 25, 0x0a); rt61pci_bbp_write(rt2x00dev, 26, 0x0d); rt61pci_bbp_write(rt2x00dev, 34, 0x12); rt61pci_bbp_write(rt2x00dev, 37, 0x07); rt61pci_bbp_write(rt2x00dev, 39, 0xf8); rt61pci_bbp_write(rt2x00dev, 41, 0x60); rt61pci_bbp_write(rt2x00dev, 53, 0x10); rt61pci_bbp_write(rt2x00dev, 54, 0x18); rt61pci_bbp_write(rt2x00dev, 60, 0x10); rt61pci_bbp_write(rt2x00dev, 61, 0x04); rt61pci_bbp_write(rt2x00dev, 62, 0x04); rt61pci_bbp_write(rt2x00dev, 75, 0xfe); rt61pci_bbp_write(rt2x00dev, 86, 0xfe); rt61pci_bbp_write(rt2x00dev, 88, 0xfe); rt61pci_bbp_write(rt2x00dev, 90, 0x0f); rt61pci_bbp_write(rt2x00dev, 99, 0x00); rt61pci_bbp_write(rt2x00dev, 102, 0x16); rt61pci_bbp_write(rt2x00dev, 107, 0x04); for (i = 0; i < EEPROM_BBP_SIZE; i++) { eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i); if (eeprom != 0xffff && eeprom != 0x0000) { reg_id = rt2x00_get_field16(eeprom, EEPROM_BBP_REG_ID); value = rt2x00_get_field16(eeprom, EEPROM_BBP_VALUE); rt61pci_bbp_write(rt2x00dev, reg_id, value); } } return 0; } /* * Device state switch handlers. */ static void rt61pci_toggle_irq(struct rt2x00_dev *rt2x00dev, enum dev_state state) { int mask = (state == STATE_RADIO_IRQ_OFF); u32 reg; unsigned long flags; /* * When interrupts are being enabled, the interrupt registers * should clear the register to assure a clean state. */ if (state == STATE_RADIO_IRQ_ON) { reg = rt2x00mmio_register_read(rt2x00dev, INT_SOURCE_CSR); rt2x00mmio_register_write(rt2x00dev, INT_SOURCE_CSR, reg); reg = rt2x00mmio_register_read(rt2x00dev, MCU_INT_SOURCE_CSR); rt2x00mmio_register_write(rt2x00dev, MCU_INT_SOURCE_CSR, reg); } /* * Only toggle the interrupts bits we are going to use. * Non-checked interrupt bits are disabled by default. */ spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags); reg = rt2x00mmio_register_read(rt2x00dev, INT_MASK_CSR); rt2x00_set_field32(&reg, INT_MASK_CSR_TXDONE, mask); rt2x00_set_field32(&reg, INT_MASK_CSR_RXDONE, mask); rt2x00_set_field32(&reg, INT_MASK_CSR_BEACON_DONE, mask); rt2x00_set_field32(&reg, INT_MASK_CSR_ENABLE_MITIGATION, mask); rt2x00_set_field32(&reg, INT_MASK_CSR_MITIGATION_PERIOD, 0xff); rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg); reg = rt2x00mmio_register_read(rt2x00dev, MCU_INT_MASK_CSR); rt2x00_set_field32(&reg, MCU_INT_MASK_CSR_0, mask); rt2x00_set_field32(&reg, MCU_INT_MASK_CSR_1, mask); rt2x00_set_field32(&reg, MCU_INT_MASK_CSR_2, mask); rt2x00_set_field32(&reg, MCU_INT_MASK_CSR_3, mask); rt2x00_set_field32(&reg, MCU_INT_MASK_CSR_4, mask); rt2x00_set_field32(&reg, MCU_INT_MASK_CSR_5, mask); rt2x00_set_field32(&reg, MCU_INT_MASK_CSR_6, mask); rt2x00_set_field32(&reg, MCU_INT_MASK_CSR_7, mask); rt2x00_set_field32(&reg, MCU_INT_MASK_CSR_TWAKEUP, mask); rt2x00mmio_register_write(rt2x00dev, MCU_INT_MASK_CSR, reg); spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags); if (state == STATE_RADIO_IRQ_OFF) { /* * Ensure that all tasklets are finished. */ tasklet_kill(&rt2x00dev->txstatus_tasklet); tasklet_kill(&rt2x00dev->rxdone_tasklet); tasklet_kill(&rt2x00dev->autowake_tasklet); tasklet_kill(&rt2x00dev->tbtt_tasklet); } } static int rt61pci_enable_radio(struct rt2x00_dev *rt2x00dev) { u32 reg; /* * Initialize all registers. */ if (unlikely(rt61pci_init_queues(rt2x00dev) || rt61pci_init_registers(rt2x00dev) || rt61pci_init_bbp(rt2x00dev))) return -EIO; /* * Enable RX. */ reg = rt2x00mmio_register_read(rt2x00dev, RX_CNTL_CSR); rt2x00_set_field32(&reg, RX_CNTL_CSR_ENABLE_RX_DMA, 1); rt2x00mmio_register_write(rt2x00dev, RX_CNTL_CSR, reg); return 0; } static void rt61pci_disable_radio(struct rt2x00_dev *rt2x00dev) { /* * Disable power */ rt2x00mmio_register_write(rt2x00dev, MAC_CSR10, 0x00001818); } static int rt61pci_set_state(struct rt2x00_dev *rt2x00dev, enum dev_state state) { u32 reg, reg2; unsigned int i; bool put_to_sleep; put_to_sleep = (state != STATE_AWAKE); reg = rt2x00mmio_register_read(rt2x00dev, MAC_CSR12); rt2x00_set_field32(&reg, MAC_CSR12_FORCE_WAKEUP, !put_to_sleep); rt2x00_set_field32(&reg, MAC_CSR12_PUT_TO_SLEEP, put_to_sleep); rt2x00mmio_register_write(rt2x00dev, MAC_CSR12, reg); /* * Device is not guaranteed to be in the requested state yet. * We must wait until the register indicates that the * device has entered the correct state. */ for (i = 0; i < REGISTER_BUSY_COUNT; i++) { reg2 = rt2x00mmio_register_read(rt2x00dev, MAC_CSR12); state = rt2x00_get_field32(reg2, MAC_CSR12_BBP_CURRENT_STATE); if (state == !put_to_sleep) return 0; rt2x00mmio_register_write(rt2x00dev, MAC_CSR12, reg); msleep(10); } return -EBUSY; } static int rt61pci_set_device_state(struct rt2x00_dev *rt2x00dev, enum dev_state state) { int retval = 0; switch (state) { case STATE_RADIO_ON: retval = rt61pci_enable_radio(rt2x00dev); break; case STATE_RADIO_OFF: rt61pci_disable_radio(rt2x00dev); break; case STATE_RADIO_IRQ_ON: case STATE_RADIO_IRQ_OFF: rt61pci_toggle_irq(rt2x00dev, state); break; case STATE_DEEP_SLEEP: case STATE_SLEEP: case STATE_STANDBY: case STATE_AWAKE: retval = rt61pci_set_state(rt2x00dev, state); break; default: retval = -ENOTSUPP; break; } if (unlikely(retval)) rt2x00_err(rt2x00dev, "Device failed to enter state %d (%d)\n", state, retval); return retval; } /* * TX descriptor initialization */ static void rt61pci_write_tx_desc(struct queue_entry *entry, struct txentry_desc *txdesc) { struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); struct queue_entry_priv_mmio *entry_priv = entry->priv_data; __le32 *txd = entry_priv->desc; u32 word; /* * Start writing the descriptor words. */ word = rt2x00_desc_read(txd, 1); rt2x00_set_field32(&word, TXD_W1_HOST_Q_ID, entry->queue->qid); rt2x00_set_field32(&word, TXD_W1_AIFSN, entry->queue->aifs); rt2x00_set_field32(&word, TXD_W1_CWMIN, entry->queue->cw_min); rt2x00_set_field32(&word, TXD_W1_CWMAX, entry->queue->cw_max); rt2x00_set_field32(&word, TXD_W1_IV_OFFSET, txdesc->iv_offset); rt2x00_set_field32(&word, TXD_W1_HW_SEQUENCE, test_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags)); rt2x00_set_field32(&word, TXD_W1_BUFFER_COUNT, 1); rt2x00_desc_write(txd, 1, word); word = rt2x00_desc_read(txd, 2); rt2x00_set_field32(&word, TXD_W2_PLCP_SIGNAL, txdesc->u.plcp.signal); rt2x00_set_field32(&word, TXD_W2_PLCP_SERVICE, txdesc->u.plcp.service); rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_LOW, txdesc->u.plcp.length_low); rt2x00_set_field32(&word, TXD_W2_PLCP_LENGTH_HIGH, txdesc->u.plcp.length_high); rt2x00_desc_write(txd, 2, word); if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags)) { _rt2x00_desc_write(txd, 3, skbdesc->iv[0]); _rt2x00_desc_write(txd, 4, skbdesc->iv[1]); } word = rt2x00_desc_read(txd, 5); rt2x00_set_field32(&word, TXD_W5_PID_TYPE, entry->queue->qid); rt2x00_set_field32(&word, TXD_W5_PID_SUBTYPE, entry->entry_idx); rt2x00_set_field32(&word, TXD_W5_TX_POWER, TXPOWER_TO_DEV(entry->queue->rt2x00dev->tx_power)); rt2x00_set_field32(&word, TXD_W5_WAITING_DMA_DONE_INT, 1); rt2x00_desc_write(txd, 5, word); if (entry->queue->qid != QID_BEACON) { word = rt2x00_desc_read(txd, 6); rt2x00_set_field32(&word, TXD_W6_BUFFER_PHYSICAL_ADDRESS, skbdesc->skb_dma); rt2x00_desc_write(txd, 6, word); word = rt2x00_desc_read(txd, 11); rt2x00_set_field32(&word, TXD_W11_BUFFER_LENGTH0, txdesc->length); rt2x00_desc_write(txd, 11, word); } /* * Writing TXD word 0 must the last to prevent a race condition with * the device, whereby the device may take hold of the TXD before we * finished updating it. */ word = rt2x00_desc_read(txd, 0); rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 1); rt2x00_set_field32(&word, TXD_W0_VALID, 1); rt2x00_set_field32(&word, TXD_W0_MORE_FRAG, test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags)); rt2x00_set_field32(&word, TXD_W0_ACK, test_bit(ENTRY_TXD_ACK, &txdesc->flags)); rt2x00_set_field32(&word, TXD_W0_TIMESTAMP, test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags)); rt2x00_set_field32(&word, TXD_W0_OFDM, (txdesc->rate_mode == RATE_MODE_OFDM)); rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->u.plcp.ifs); rt2x00_set_field32(&word, TXD_W0_RETRY_MODE, test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags)); rt2x00_set_field32(&word, TXD_W0_TKIP_MIC, test_bit(ENTRY_TXD_ENCRYPT_MMIC, &txdesc->flags)); rt2x00_set_field32(&word, TXD_W0_KEY_TABLE, test_bit(ENTRY_TXD_ENCRYPT_PAIRWISE, &txdesc->flags)); rt2x00_set_field32(&word, TXD_W0_KEY_INDEX, txdesc->key_idx); rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, txdesc->length); rt2x00_set_field32(&word, TXD_W0_BURST, test_bit(ENTRY_TXD_BURST, &txdesc->flags)); rt2x00_set_field32(&word, TXD_W0_CIPHER_ALG, txdesc->cipher); rt2x00_desc_write(txd, 0, word); /* * Register descriptor details in skb frame descriptor. */ skbdesc->desc = txd; skbdesc->desc_len = (entry->queue->qid == QID_BEACON) ? TXINFO_SIZE : TXD_DESC_SIZE; } /* * TX data initialization */ static void rt61pci_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc) { struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; struct queue_entry_priv_mmio *entry_priv = entry->priv_data; unsigned int beacon_base; unsigned int padding_len; u32 orig_reg, reg; /* * Disable beaconing while we are reloading the beacon data, * otherwise we might be sending out invalid data. */ reg = rt2x00mmio_register_read(rt2x00dev, TXRX_CSR9); orig_reg = reg; rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0); rt2x00mmio_register_write(rt2x00dev, TXRX_CSR9, reg); /* * Write the TX descriptor for the beacon. */ rt61pci_write_tx_desc(entry, txdesc); /* * Dump beacon to userspace through debugfs. */ rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry); /* * Write entire beacon with descriptor and padding to register. */ padding_len = roundup(entry->skb->len, 4) - entry->skb->len; if (padding_len && skb_pad(entry->skb, padding_len)) { rt2x00_err(rt2x00dev, "Failure padding beacon, aborting\n"); /* skb freed by skb_pad() on failure */ entry->skb = NULL; rt2x00mmio_register_write(rt2x00dev, TXRX_CSR9, orig_reg); return; } beacon_base = HW_BEACON_OFFSET(entry->entry_idx); rt2x00mmio_register_multiwrite(rt2x00dev, beacon_base, entry_priv->desc, TXINFO_SIZE); rt2x00mmio_register_multiwrite(rt2x00dev, beacon_base + TXINFO_SIZE, entry->skb->data, entry->skb->len + padding_len); /* * Enable beaconing again. * * For Wi-Fi faily generated beacons between participating * stations. Set TBTT phase adaptive adjustment step to 8us. */ rt2x00mmio_register_write(rt2x00dev, TXRX_CSR10, 0x00001008); rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 1); rt2x00mmio_register_write(rt2x00dev, TXRX_CSR9, reg); /* * Clean up beacon skb. */ dev_kfree_skb_any(entry->skb); entry->skb = NULL; } static void rt61pci_clear_beacon(struct queue_entry *entry) { struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; u32 orig_reg, reg; /* * Disable beaconing while we are reloading the beacon data, * otherwise we might be sending out invalid data. */ orig_reg = rt2x00mmio_register_read(rt2x00dev, TXRX_CSR9); reg = orig_reg; rt2x00_set_field32(&reg, TXRX_CSR9_BEACON_GEN, 0); rt2x00mmio_register_write(rt2x00dev, TXRX_CSR9, reg); /* * Clear beacon. */ rt2x00mmio_register_write(rt2x00dev, HW_BEACON_OFFSET(entry->entry_idx), 0); /* * Restore global beaconing state. */ rt2x00mmio_register_write(rt2x00dev, TXRX_CSR9, orig_reg); } /* * RX control handlers */ static int rt61pci_agc_to_rssi(struct rt2x00_dev *rt2x00dev, int rxd_w1) { u8 offset = rt2x00dev->lna_gain; u8 lna; lna = rt2x00_get_field32(rxd_w1, RXD_W1_RSSI_LNA); switch (lna) { case 3: offset += 90; break; case 2: offset += 74; break; case 1: offset += 64; break; default: return 0; } if (rt2x00dev->curr_band == NL80211_BAND_5GHZ) { if (lna == 3 || lna == 2) offset += 10; } return rt2x00_get_field32(rxd_w1, RXD_W1_RSSI_AGC) * 2 - offset; } static void rt61pci_fill_rxdone(struct queue_entry *entry, struct rxdone_entry_desc *rxdesc) { struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; struct queue_entry_priv_mmio *entry_priv = entry->priv_data; u32 word0; u32 word1; word0 = rt2x00_desc_read(entry_priv->desc, 0); word1 = rt2x00_desc_read(entry_priv->desc, 1); if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR)) rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC; rxdesc->cipher = rt2x00_get_field32(word0, RXD_W0_CIPHER_ALG); rxdesc->cipher_status = rt2x00_get_field32(word0, RXD_W0_CIPHER_ERROR); if (rxdesc->cipher != CIPHER_NONE) { rxdesc->iv[0] = _rt2x00_desc_read(entry_priv->desc, 2); rxdesc->iv[1] = _rt2x00_desc_read(entry_priv->desc, 3); rxdesc->dev_flags |= RXDONE_CRYPTO_IV; rxdesc->icv = _rt2x00_desc_read(entry_priv->desc, 4); rxdesc->dev_flags |= RXDONE_CRYPTO_ICV; /* * Hardware has stripped IV/EIV data from 802.11 frame during * decryption. It has provided the data separately but rt2x00lib * should decide if it should be reinserted. */ rxdesc->flags |= RX_FLAG_IV_STRIPPED; /* * The hardware has already checked the Michael Mic and has * stripped it from the frame. Signal this to mac80211. */ rxdesc->flags |= RX_FLAG_MMIC_STRIPPED; if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS) rxdesc->flags |= RX_FLAG_DECRYPTED; else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC) rxdesc->flags |= RX_FLAG_MMIC_ERROR; } /* * Obtain the status about this packet. * When frame was received with an OFDM bitrate, * the signal is the PLCP value. If it was received with * a CCK bitrate the signal is the rate in 100kbit/s. */ rxdesc->signal = rt2x00_get_field32(word1, RXD_W1_SIGNAL); rxdesc->rssi = rt61pci_agc_to_rssi(rt2x00dev, word1); rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT); if (rt2x00_get_field32(word0, RXD_W0_OFDM)) rxdesc->dev_flags |= RXDONE_SIGNAL_PLCP; else rxdesc->dev_flags |= RXDONE_SIGNAL_BITRATE; if (rt2x00_get_field32(word0, RXD_W0_MY_BSS)) rxdesc->dev_flags |= RXDONE_MY_BSS; } /* * Interrupt functions. */ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev) { struct data_queue *queue; struct queue_entry *entry; struct queue_entry *entry_done; struct queue_entry_priv_mmio *entry_priv; struct txdone_entry_desc txdesc; u32 word; u32 reg; int type; int index; int i; /* * TX_STA_FIFO is a stack of X entries, hence read TX_STA_FIFO * at most X times and also stop processing once the TX_STA_FIFO_VALID * flag is not set anymore. * * The legacy drivers use X=TX_RING_SIZE but state in a comment * that the TX_STA_FIFO stack has a size of 16. We stick to our * tx ring size for now. */ for (i = 0; i < rt2x00dev->tx->limit; i++) { reg = rt2x00mmio_register_read(rt2x00dev, STA_CSR4); if (!rt2x00_get_field32(reg, STA_CSR4_VALID)) break; /* * Skip this entry when it contains an invalid * queue identication number. */ type = rt2x00_get_field32(reg, STA_CSR4_PID_TYPE); queue = rt2x00queue_get_tx_queue(rt2x00dev, type); if (unlikely(!queue)) continue; /* * Skip this entry when it contains an invalid * index number. */ index = rt2x00_get_field32(reg, STA_CSR4_PID_SUBTYPE); if (unlikely(index >= queue->limit)) continue; entry = &queue->entries[index]; entry_priv = entry->priv_data; word = rt2x00_desc_read(entry_priv->desc, 0); if (rt2x00_get_field32(word, TXD_W0_OWNER_NIC) || !rt2x00_get_field32(word, TXD_W0_VALID)) return; entry_done = rt2x00queue_get_entry(queue, Q_INDEX_DONE); while (entry != entry_done) { /* Catch up. * Just report any entries we missed as failed. */ rt2x00_warn(rt2x00dev, "TX status report missed for entry %d\n", entry_done->entry_idx); rt2x00lib_txdone_noinfo(entry_done, TXDONE_UNKNOWN); entry_done = rt2x00queue_get_entry(queue, Q_INDEX_DONE); } /* * Obtain the status about this packet. */ txdesc.flags = 0; switch (rt2x00_get_field32(reg, STA_CSR4_TX_RESULT)) { case 0: /* Success, maybe with retry */ __set_bit(TXDONE_SUCCESS, &txdesc.flags); break; case 6: /* Failure, excessive retries */ __set_bit(TXDONE_EXCESSIVE_RETRY, &txdesc.flags); fallthrough; /* this is a failed frame! */ default: /* Failure */ __set_bit(TXDONE_FAILURE, &txdesc.flags); } txdesc.retry = rt2x00_get_field32(reg, STA_CSR4_RETRY_COUNT); /* * the frame was retried at least once * -> hw used fallback rates */ if (txdesc.retry) __set_bit(TXDONE_FALLBACK, &txdesc.flags); rt2x00lib_txdone(entry, &txdesc); } } static void rt61pci_wakeup(struct rt2x00_dev *rt2x00dev) { struct rt2x00lib_conf libconf = { .conf = &rt2x00dev->hw->conf }; rt61pci_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS); } static inline void rt61pci_enable_interrupt(struct rt2x00_dev *rt2x00dev, struct rt2x00_field32 irq_field) { u32 reg; /* * Enable a single interrupt. The interrupt mask register * access needs locking. */ spin_lock_irq(&rt2x00dev->irqmask_lock); reg = rt2x00mmio_register_read(rt2x00dev, INT_MASK_CSR); rt2x00_set_field32(&reg, irq_field, 0); rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg); spin_unlock_irq(&rt2x00dev->irqmask_lock); } static void rt61pci_enable_mcu_interrupt(struct rt2x00_dev *rt2x00dev, struct rt2x00_field32 irq_field) { u32 reg; /* * Enable a single MCU interrupt. The interrupt mask register * access needs locking. */ spin_lock_irq(&rt2x00dev->irqmask_lock); reg = rt2x00mmio_register_read(rt2x00dev, MCU_INT_MASK_CSR); rt2x00_set_field32(&reg, irq_field, 0); rt2x00mmio_register_write(rt2x00dev, MCU_INT_MASK_CSR, reg); spin_unlock_irq(&rt2x00dev->irqmask_lock); } static void rt61pci_txstatus_tasklet(struct tasklet_struct *t) { struct rt2x00_dev *rt2x00dev = from_tasklet(rt2x00dev, t, txstatus_tasklet); rt61pci_txdone(rt2x00dev); if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) rt61pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_TXDONE); } static void rt61pci_tbtt_tasklet(struct tasklet_struct *t) { struct rt2x00_dev *rt2x00dev = from_tasklet(rt2x00dev, t, tbtt_tasklet); rt2x00lib_beacondone(rt2x00dev); if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) rt61pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_BEACON_DONE); } static void rt61pci_rxdone_tasklet(struct tasklet_struct *t) { struct rt2x00_dev *rt2x00dev = from_tasklet(rt2x00dev, t, rxdone_tasklet); if (rt2x00mmio_rxdone(rt2x00dev)) tasklet_schedule(&rt2x00dev->rxdone_tasklet); else if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) rt61pci_enable_interrupt(rt2x00dev, INT_MASK_CSR_RXDONE); } static void rt61pci_autowake_tasklet(struct tasklet_struct *t) { struct rt2x00_dev *rt2x00dev = from_tasklet(rt2x00dev, t, autowake_tasklet); rt61pci_wakeup(rt2x00dev); rt2x00mmio_register_write(rt2x00dev, M2H_CMD_DONE_CSR, 0xffffffff); if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) rt61pci_enable_mcu_interrupt(rt2x00dev, MCU_INT_MASK_CSR_TWAKEUP); } static irqreturn_t rt61pci_interrupt(int irq, void *dev_instance) { struct rt2x00_dev *rt2x00dev = dev_instance; u32 reg_mcu, mask_mcu; u32 reg, mask; /* * Get the interrupt sources & saved to local variable. * Write register value back to clear pending interrupts. */ reg_mcu = rt2x00mmio_register_read(rt2x00dev, MCU_INT_SOURCE_CSR); rt2x00mmio_register_write(rt2x00dev, MCU_INT_SOURCE_CSR, reg_mcu); reg = rt2x00mmio_register_read(rt2x00dev, INT_SOURCE_CSR); rt2x00mmio_register_write(rt2x00dev, INT_SOURCE_CSR, reg); if (!reg && !reg_mcu) return IRQ_NONE; if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) return IRQ_HANDLED; /* * Schedule tasklets for interrupt handling. */ if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RXDONE)) tasklet_schedule(&rt2x00dev->rxdone_tasklet); if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TXDONE)) tasklet_schedule(&rt2x00dev->txstatus_tasklet); if (rt2x00_get_field32(reg, INT_SOURCE_CSR_BEACON_DONE)) tasklet_hi_schedule(&rt2x00dev->tbtt_tasklet); if (rt2x00_get_field32(reg_mcu, MCU_INT_SOURCE_CSR_TWAKEUP)) tasklet_schedule(&rt2x00dev->autowake_tasklet); /* * Since INT_MASK_CSR and INT_SOURCE_CSR use the same bits * for interrupts and interrupt masks we can just use the value of * INT_SOURCE_CSR to create the interrupt mask. */ mask = reg; mask_mcu = reg_mcu; /* * Disable all interrupts for which a tasklet was scheduled right now, * the tasklet will reenable the appropriate interrupts. */ spin_lock(&rt2x00dev->irqmask_lock); reg = rt2x00mmio_register_read(rt2x00dev, INT_MASK_CSR); reg |= mask; rt2x00mmio_register_write(rt2x00dev, INT_MASK_CSR, reg); reg = rt2x00mmio_register_read(rt2x00dev, MCU_INT_MASK_CSR); reg |= mask_mcu; rt2x00mmio_register_write(rt2x00dev, MCU_INT_MASK_CSR, reg); spin_unlock(&rt2x00dev->irqmask_lock); return IRQ_HANDLED; } /* * Device probe functions. */ static int rt61pci_validate_eeprom(struct rt2x00_dev *rt2x00dev) { struct eeprom_93cx6 eeprom; u32 reg; u16 word; u8 *mac; s8 value; reg = rt2x00mmio_register_read(rt2x00dev, E2PROM_CSR); eeprom.data = rt2x00dev; eeprom.register_read = rt61pci_eepromregister_read; eeprom.register_write = rt61pci_eepromregister_write; eeprom.width = rt2x00_get_field32(reg, E2PROM_CSR_TYPE_93C46) ? PCI_EEPROM_WIDTH_93C46 : PCI_EEPROM_WIDTH_93C66; eeprom.reg_data_in = 0; eeprom.reg_data_out = 0; eeprom.reg_data_clock = 0; eeprom.reg_chip_select = 0; eeprom_93cx6_multiread(&eeprom, EEPROM_BASE, rt2x00dev->eeprom, EEPROM_SIZE / sizeof(u16)); /* * Start validation of the data that has been read. */ mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0); rt2x00lib_set_mac_address(rt2x00dev, mac); word = rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_ANTENNA_NUM, 2); rt2x00_set_field16(&word, EEPROM_ANTENNA_TX_DEFAULT, ANTENNA_B); rt2x00_set_field16(&word, EEPROM_ANTENNA_RX_DEFAULT, ANTENNA_B); rt2x00_set_field16(&word, EEPROM_ANTENNA_FRAME_TYPE, 0); rt2x00_set_field16(&word, EEPROM_ANTENNA_DYN_TXAGC, 0); rt2x00_set_field16(&word, EEPROM_ANTENNA_HARDWARE_RADIO, 0); rt2x00_set_field16(&word, EEPROM_ANTENNA_RF_TYPE, RF5225); rt2x00_eeprom_write(rt2x00dev, EEPROM_ANTENNA, word); rt2x00_eeprom_dbg(rt2x00dev, "Antenna: 0x%04x\n", word); } word = rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_NIC_ENABLE_DIVERSITY, 0); rt2x00_set_field16(&word, EEPROM_NIC_TX_DIVERSITY, 0); rt2x00_set_field16(&word, EEPROM_NIC_RX_FIXED, 0); rt2x00_set_field16(&word, EEPROM_NIC_TX_FIXED, 0); rt2x00_set_field16(&word, EEPROM_NIC_EXTERNAL_LNA_BG, 0); rt2x00_set_field16(&word, EEPROM_NIC_CARDBUS_ACCEL, 0); rt2x00_set_field16(&word, EEPROM_NIC_EXTERNAL_LNA_A, 0); rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC, word); rt2x00_eeprom_dbg(rt2x00dev, "NIC: 0x%04x\n", word); } word = rt2x00_eeprom_read(rt2x00dev, EEPROM_LED); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_LED_LED_MODE, LED_MODE_DEFAULT); rt2x00_eeprom_write(rt2x00dev, EEPROM_LED, word); rt2x00_eeprom_dbg(rt2x00dev, "Led: 0x%04x\n", word); } word = rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_FREQ_OFFSET, 0); rt2x00_set_field16(&word, EEPROM_FREQ_SEQ, 0); rt2x00_eeprom_write(rt2x00dev, EEPROM_FREQ, word); rt2x00_eeprom_dbg(rt2x00dev, "Freq: 0x%04x\n", word); } word = rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_BG); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_BG_1, 0); rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_BG_2, 0); rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_OFFSET_BG, word); rt2x00_eeprom_dbg(rt2x00dev, "RSSI OFFSET BG: 0x%04x\n", word); } else { value = rt2x00_get_field16(word, EEPROM_RSSI_OFFSET_BG_1); if (value < -10 || value > 10) rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_BG_1, 0); value = rt2x00_get_field16(word, EEPROM_RSSI_OFFSET_BG_2); if (value < -10 || value > 10) rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_BG_2, 0); rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_OFFSET_BG, word); } word = rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_OFFSET_A); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_A_1, 0); rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_A_2, 0); rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_OFFSET_A, word); rt2x00_eeprom_dbg(rt2x00dev, "RSSI OFFSET A: 0x%04x\n", word); } else { value = rt2x00_get_field16(word, EEPROM_RSSI_OFFSET_A_1); if (value < -10 || value > 10) rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_A_1, 0); value = rt2x00_get_field16(word, EEPROM_RSSI_OFFSET_A_2); if (value < -10 || value > 10) rt2x00_set_field16(&word, EEPROM_RSSI_OFFSET_A_2, 0); rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_OFFSET_A, word); } return 0; } static int rt61pci_init_eeprom(struct rt2x00_dev *rt2x00dev) { u32 reg; u16 value; u16 eeprom; /* * Read EEPROM word for configuration. */ eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA); /* * Identify RF chipset. */ value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE); reg = rt2x00mmio_register_read(rt2x00dev, MAC_CSR0); rt2x00_set_chip(rt2x00dev, rt2x00_get_field32(reg, MAC_CSR0_CHIPSET), value, rt2x00_get_field32(reg, MAC_CSR0_REVISION)); if (!rt2x00_rf(rt2x00dev, RF5225) && !rt2x00_rf(rt2x00dev, RF5325) && !rt2x00_rf(rt2x00dev, RF2527) && !rt2x00_rf(rt2x00dev, RF2529)) { rt2x00_err(rt2x00dev, "Invalid RF chipset detected\n"); return -ENODEV; } /* * Determine number of antennas. */ if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_NUM) == 2) __set_bit(CAPABILITY_DOUBLE_ANTENNA, &rt2x00dev->cap_flags); /* * Identify default antenna configuration. */ rt2x00dev->default_ant.tx = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TX_DEFAULT); rt2x00dev->default_ant.rx = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RX_DEFAULT); /* * Read the Frame type. */ if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_FRAME_TYPE)) __set_bit(CAPABILITY_FRAME_TYPE, &rt2x00dev->cap_flags); /* * Detect if this device has a hardware controlled radio. */ if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO)) __set_bit(CAPABILITY_HW_BUTTON, &rt2x00dev->cap_flags); /* * Read frequency offset and RF programming sequence. */ eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ); if (rt2x00_get_field16(eeprom, EEPROM_FREQ_SEQ)) __set_bit(CAPABILITY_RF_SEQUENCE, &rt2x00dev->cap_flags); rt2x00dev->freq_offset = rt2x00_get_field16(eeprom, EEPROM_FREQ_OFFSET); /* * Read external LNA informations. */ eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC); if (rt2x00_get_field16(eeprom, EEPROM_NIC_EXTERNAL_LNA_A)) __set_bit(CAPABILITY_EXTERNAL_LNA_A, &rt2x00dev->cap_flags); if (rt2x00_get_field16(eeprom, EEPROM_NIC_EXTERNAL_LNA_BG)) __set_bit(CAPABILITY_EXTERNAL_LNA_BG, &rt2x00dev->cap_flags); /* * When working with a RF2529 chip without double antenna, * the antenna settings should be gathered from the NIC * eeprom word. */ if (rt2x00_rf(rt2x00dev, RF2529) && !rt2x00_has_cap_double_antenna(rt2x00dev)) { rt2x00dev->default_ant.rx = ANTENNA_A + rt2x00_get_field16(eeprom, EEPROM_NIC_RX_FIXED); rt2x00dev->default_ant.tx = ANTENNA_B - rt2x00_get_field16(eeprom, EEPROM_NIC_TX_FIXED); if (rt2x00_get_field16(eeprom, EEPROM_NIC_TX_DIVERSITY)) rt2x00dev->default_ant.tx = ANTENNA_SW_DIVERSITY; if (rt2x00_get_field16(eeprom, EEPROM_NIC_ENABLE_DIVERSITY)) rt2x00dev->default_ant.rx = ANTENNA_SW_DIVERSITY; } /* * Store led settings, for correct led behaviour. * If the eeprom value is invalid, * switch to default led mode. */ #ifdef CONFIG_RT2X00_LIB_LEDS eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_LED); value = rt2x00_get_field16(eeprom, EEPROM_LED_LED_MODE); rt61pci_init_led(rt2x00dev, &rt2x00dev->led_radio, LED_TYPE_RADIO); rt61pci_init_led(rt2x00dev, &rt2x00dev->led_assoc, LED_TYPE_ASSOC); if (value == LED_MODE_SIGNAL_STRENGTH) rt61pci_init_led(rt2x00dev, &rt2x00dev->led_qual, LED_TYPE_QUALITY); rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_LED_MODE, value); rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_GPIO_0, rt2x00_get_field16(eeprom, EEPROM_LED_POLARITY_GPIO_0)); rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_GPIO_1, rt2x00_get_field16(eeprom, EEPROM_LED_POLARITY_GPIO_1)); rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_GPIO_2, rt2x00_get_field16(eeprom, EEPROM_LED_POLARITY_GPIO_2)); rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_GPIO_3, rt2x00_get_field16(eeprom, EEPROM_LED_POLARITY_GPIO_3)); rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_GPIO_4, rt2x00_get_field16(eeprom, EEPROM_LED_POLARITY_GPIO_4)); rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_ACT, rt2x00_get_field16(eeprom, EEPROM_LED_POLARITY_ACT)); rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_READY_BG, rt2x00_get_field16(eeprom, EEPROM_LED_POLARITY_RDY_G)); rt2x00_set_field16(&rt2x00dev->led_mcu_reg, MCU_LEDCS_POLARITY_READY_A, rt2x00_get_field16(eeprom, EEPROM_LED_POLARITY_RDY_A)); #endif /* CONFIG_RT2X00_LIB_LEDS */ return 0; } /* * RF value list for RF5225 & RF5325 * Supports: 2.4 GHz & 5.2 GHz, rf_sequence disabled */ static const struct rf_channel rf_vals_noseq[] = { { 1, 0x00002ccc, 0x00004786, 0x00068455, 0x000ffa0b }, { 2, 0x00002ccc, 0x00004786, 0x00068455, 0x000ffa1f }, { 3, 0x00002ccc, 0x0000478a, 0x00068455, 0x000ffa0b }, { 4, 0x00002ccc, 0x0000478a, 0x00068455, 0x000ffa1f }, { 5, 0x00002ccc, 0x0000478e, 0x00068455, 0x000ffa0b }, { 6, 0x00002ccc, 0x0000478e, 0x00068455, 0x000ffa1f }, { 7, 0x00002ccc, 0x00004792, 0x00068455, 0x000ffa0b }, { 8, 0x00002ccc, 0x00004792, 0x00068455, 0x000ffa1f }, { 9, 0x00002ccc, 0x00004796, 0x00068455, 0x000ffa0b }, { 10, 0x00002ccc, 0x00004796, 0x00068455, 0x000ffa1f }, { 11, 0x00002ccc, 0x0000479a, 0x00068455, 0x000ffa0b }, { 12, 0x00002ccc, 0x0000479a, 0x00068455, 0x000ffa1f }, { 13, 0x00002ccc, 0x0000479e, 0x00068455, 0x000ffa0b }, { 14, 0x00002ccc, 0x000047a2, 0x00068455, 0x000ffa13 }, /* 802.11 UNI / HyperLan 2 */ { 36, 0x00002ccc, 0x0000499a, 0x0009be55, 0x000ffa23 }, { 40, 0x00002ccc, 0x000049a2, 0x0009be55, 0x000ffa03 }, { 44, 0x00002ccc, 0x000049a6, 0x0009be55, 0x000ffa0b }, { 48, 0x00002ccc, 0x000049aa, 0x0009be55, 0x000ffa13 }, { 52, 0x00002ccc, 0x000049ae, 0x0009ae55, 0x000ffa1b }, { 56, 0x00002ccc, 0x000049b2, 0x0009ae55, 0x000ffa23 }, { 60, 0x00002ccc, 0x000049ba, 0x0009ae55, 0x000ffa03 }, { 64, 0x00002ccc, 0x000049be, 0x0009ae55, 0x000ffa0b }, /* 802.11 HyperLan 2 */ { 100, 0x00002ccc, 0x00004a2a, 0x000bae55, 0x000ffa03 }, { 104, 0x00002ccc, 0x00004a2e, 0x000bae55, 0x000ffa0b }, { 108, 0x00002ccc, 0x00004a32, 0x000bae55, 0x000ffa13 }, { 112, 0x00002ccc, 0x00004a36, 0x000bae55, 0x000ffa1b }, { 116, 0x00002ccc, 0x00004a3a, 0x000bbe55, 0x000ffa23 }, { 120, 0x00002ccc, 0x00004a82, 0x000bbe55, 0x000ffa03 }, { 124, 0x00002ccc, 0x00004a86, 0x000bbe55, 0x000ffa0b }, { 128, 0x00002ccc, 0x00004a8a, 0x000bbe55, 0x000ffa13 }, { 132, 0x00002ccc, 0x00004a8e, 0x000bbe55, 0x000ffa1b }, { 136, 0x00002ccc, 0x00004a92, 0x000bbe55, 0x000ffa23 }, /* 802.11 UNII */ { 140, 0x00002ccc, 0x00004a9a, 0x000bbe55, 0x000ffa03 }, { 149, 0x00002ccc, 0x00004aa2, 0x000bbe55, 0x000ffa1f }, { 153, 0x00002ccc, 0x00004aa6, 0x000bbe55, 0x000ffa27 }, { 157, 0x00002ccc, 0x00004aae, 0x000bbe55, 0x000ffa07 }, { 161, 0x00002ccc, 0x00004ab2, 0x000bbe55, 0x000ffa0f }, { 165, 0x00002ccc, 0x00004ab6, 0x000bbe55, 0x000ffa17 }, /* MMAC(Japan)J52 ch 34,38,42,46 */ { 34, 0x00002ccc, 0x0000499a, 0x0009be55, 0x000ffa0b }, { 38, 0x00002ccc, 0x0000499e, 0x0009be55, 0x000ffa13 }, { 42, 0x00002ccc, 0x000049a2, 0x0009be55, 0x000ffa1b }, { 46, 0x00002ccc, 0x000049a6, 0x0009be55, 0x000ffa23 }, }; /* * RF value list for RF5225 & RF5325 * Supports: 2.4 GHz & 5.2 GHz, rf_sequence enabled */ static const struct rf_channel rf_vals_seq[] = { { 1, 0x00002ccc, 0x00004786, 0x00068455, 0x000ffa0b }, { 2, 0x00002ccc, 0x00004786, 0x00068455, 0x000ffa1f }, { 3, 0x00002ccc, 0x0000478a, 0x00068455, 0x000ffa0b }, { 4, 0x00002ccc, 0x0000478a, 0x00068455, 0x000ffa1f }, { 5, 0x00002ccc, 0x0000478e, 0x00068455, 0x000ffa0b }, { 6, 0x00002ccc, 0x0000478e, 0x00068455, 0x000ffa1f }, { 7, 0x00002ccc, 0x00004792, 0x00068455, 0x000ffa0b }, { 8, 0x00002ccc, 0x00004792, 0x00068455, 0x000ffa1f }, { 9, 0x00002ccc, 0x00004796, 0x00068455, 0x000ffa0b }, { 10, 0x00002ccc, 0x00004796, 0x00068455, 0x000ffa1f }, { 11, 0x00002ccc, 0x0000479a, 0x00068455, 0x000ffa0b }, { 12, 0x00002ccc, 0x0000479a, 0x00068455, 0x000ffa1f }, { 13, 0x00002ccc, 0x0000479e, 0x00068455, 0x000ffa0b }, { 14, 0x00002ccc, 0x000047a2, 0x00068455, 0x000ffa13 }, /* 802.11 UNI / HyperLan 2 */ { 36, 0x00002cd4, 0x0004481a, 0x00098455, 0x000c0a03 }, { 40, 0x00002cd0, 0x00044682, 0x00098455, 0x000c0a03 }, { 44, 0x00002cd0, 0x00044686, 0x00098455, 0x000c0a1b }, { 48, 0x00002cd0, 0x0004468e, 0x00098655, 0x000c0a0b }, { 52, 0x00002cd0, 0x00044692, 0x00098855, 0x000c0a23 }, { 56, 0x00002cd0, 0x0004469a, 0x00098c55, 0x000c0a13 }, { 60, 0x00002cd0, 0x000446a2, 0x00098e55, 0x000c0a03 }, { 64, 0x00002cd0, 0x000446a6, 0x00099255, 0x000c0a1b }, /* 802.11 HyperLan 2 */ { 100, 0x00002cd4, 0x0004489a, 0x000b9855, 0x000c0a03 }, { 104, 0x00002cd4, 0x000448a2, 0x000b9855, 0x000c0a03 }, { 108, 0x00002cd4, 0x000448aa, 0x000b9855, 0x000c0a03 }, { 112, 0x00002cd4, 0x000448b2, 0x000b9a55, 0x000c0a03 }, { 116, 0x00002cd4, 0x000448ba, 0x000b9a55, 0x000c0a03 }, { 120, 0x00002cd0, 0x00044702, 0x000b9a55, 0x000c0a03 }, { 124, 0x00002cd0, 0x00044706, 0x000b9a55, 0x000c0a1b }, { 128, 0x00002cd0, 0x0004470e, 0x000b9c55, 0x000c0a0b }, { 132, 0x00002cd0, 0x00044712, 0x000b9c55, 0x000c0a23 }, { 136, 0x00002cd0, 0x0004471a, 0x000b9e55, 0x000c0a13 }, /* 802.11 UNII */ { 140, 0x00002cd0, 0x00044722, 0x000b9e55, 0x000c0a03 }, { 149, 0x00002cd0, 0x0004472e, 0x000ba255, 0x000c0a1b }, { 153, 0x00002cd0, 0x00044736, 0x000ba255, 0x000c0a0b }, { 157, 0x00002cd4, 0x0004490a, 0x000ba255, 0x000c0a17 }, { 161, 0x00002cd4, 0x00044912, 0x000ba255, 0x000c0a17 }, { 165, 0x00002cd4, 0x0004491a, 0x000ba255, 0x000c0a17 }, /* MMAC(Japan)J52 ch 34,38,42,46 */ { 34, 0x00002ccc, 0x0000499a, 0x0009be55, 0x000c0a0b }, { 38, 0x00002ccc, 0x0000499e, 0x0009be55, 0x000c0a13 }, { 42, 0x00002ccc, 0x000049a2, 0x0009be55, 0x000c0a1b }, { 46, 0x00002ccc, 0x000049a6, 0x0009be55, 0x000c0a23 }, }; static int rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev) { struct hw_mode_spec *spec = &rt2x00dev->spec; struct channel_info *info; u8 *tx_power; unsigned int i; /* * Disable powersaving as default. */ rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; /* * Initialize all hw fields. */ ieee80211_hw_set(rt2x00dev->hw, PS_NULLFUNC_STACK); ieee80211_hw_set(rt2x00dev->hw, SUPPORTS_PS); ieee80211_hw_set(rt2x00dev->hw, HOST_BROADCAST_PS_BUFFERING); ieee80211_hw_set(rt2x00dev->hw, SIGNAL_DBM); SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev); SET_IEEE80211_PERM_ADDR(rt2x00dev->hw, rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0)); /* * As rt61 has a global fallback table we cannot specify * more then one tx rate per frame but since the hw will * try several rates (based on the fallback table) we should * initialize max_report_rates to the maximum number of rates * we are going to try. Otherwise mac80211 will truncate our * reported tx rates and the rc algortihm will end up with * incorrect data. */ rt2x00dev->hw->max_rates = 1; rt2x00dev->hw->max_report_rates = 7; rt2x00dev->hw->max_rate_tries = 1; /* * Initialize hw_mode information. */ spec->supported_bands = SUPPORT_BAND_2GHZ; spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM; if (!rt2x00_has_cap_rf_sequence(rt2x00dev)) { spec->num_channels = 14; spec->channels = rf_vals_noseq; } else { spec->num_channels = 14; spec->channels = rf_vals_seq; } if (rt2x00_rf(rt2x00dev, RF5225) || rt2x00_rf(rt2x00dev, RF5325)) { spec->supported_bands |= SUPPORT_BAND_5GHZ; spec->num_channels = ARRAY_SIZE(rf_vals_seq); } /* * Create channel information array */ info = kcalloc(spec->num_channels, sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; spec->channels_info = info; tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_G_START); for (i = 0; i < 14; i++) { info[i].max_power = MAX_TXPOWER; info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i]); } if (spec->num_channels > 14) { tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A_START); for (i = 14; i < spec->num_channels; i++) { info[i].max_power = MAX_TXPOWER; info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i - 14]); } } return 0; } static int rt61pci_probe_hw(struct rt2x00_dev *rt2x00dev) { int retval; u32 reg; /* * Disable power saving. */ rt2x00mmio_register_write(rt2x00dev, SOFT_RESET_CSR, 0x00000007); /* * Allocate eeprom data. */ retval = rt61pci_validate_eeprom(rt2x00dev); if (retval) return retval; retval = rt61pci_init_eeprom(rt2x00dev); if (retval) return retval; /* * Enable rfkill polling by setting GPIO direction of the * rfkill switch GPIO pin correctly. */ reg = rt2x00mmio_register_read(rt2x00dev, MAC_CSR13); rt2x00_set_field32(&reg, MAC_CSR13_DIR5, 1); rt2x00mmio_register_write(rt2x00dev, MAC_CSR13, reg); /* * Initialize hw specifications. */ retval = rt61pci_probe_hw_mode(rt2x00dev); if (retval) return retval; /* * This device has multiple filters for control frames, * but has no a separate filter for PS Poll frames. */ __set_bit(CAPABILITY_CONTROL_FILTERS, &rt2x00dev->cap_flags); /* * This device requires firmware and DMA mapped skbs. */ __set_bit(REQUIRE_FIRMWARE, &rt2x00dev->cap_flags); __set_bit(REQUIRE_DMA, &rt2x00dev->cap_flags); if (!modparam_nohwcrypt) __set_bit(CAPABILITY_HW_CRYPTO, &rt2x00dev->cap_flags); __set_bit(CAPABILITY_LINK_TUNING, &rt2x00dev->cap_flags); /* * Set the rssi offset. */ rt2x00dev->rssi_offset = DEFAULT_RSSI_OFFSET; return 0; } /* * IEEE80211 stack callback functions. */ static int rt61pci_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif, unsigned int link_id, u16 queue_idx, const struct ieee80211_tx_queue_params *params) { struct rt2x00_dev *rt2x00dev = hw->priv; struct data_queue *queue; struct rt2x00_field32 field; int retval; u32 reg; u32 offset; /* * First pass the configuration through rt2x00lib, that will * update the queue settings and validate the input. After that * we are free to update the registers based on the value * in the queue parameter. */ retval = rt2x00mac_conf_tx(hw, vif, link_id, queue_idx, params); if (retval) return retval; /* * We only need to perform additional register initialization * for WMM queues. */ if (queue_idx >= 4) return 0; queue = rt2x00queue_get_tx_queue(rt2x00dev, queue_idx); /* Update WMM TXOP register */ offset = AC_TXOP_CSR0 + (sizeof(u32) * (!!(queue_idx & 2))); field.bit_offset = (queue_idx & 1) * 16; field.bit_mask = 0xffff << field.bit_offset; reg = rt2x00mmio_register_read(rt2x00dev, offset); rt2x00_set_field32(&reg, field, queue->txop); rt2x00mmio_register_write(rt2x00dev, offset, reg); /* Update WMM registers */ field.bit_offset = queue_idx * 4; field.bit_mask = 0xf << field.bit_offset; reg = rt2x00mmio_register_read(rt2x00dev, AIFSN_CSR); rt2x00_set_field32(&reg, field, queue->aifs); rt2x00mmio_register_write(rt2x00dev, AIFSN_CSR, reg); reg = rt2x00mmio_register_read(rt2x00dev, CWMIN_CSR); rt2x00_set_field32(&reg, field, queue->cw_min); rt2x00mmio_register_write(rt2x00dev, CWMIN_CSR, reg); reg = rt2x00mmio_register_read(rt2x00dev, CWMAX_CSR); rt2x00_set_field32(&reg, field, queue->cw_max); rt2x00mmio_register_write(rt2x00dev, CWMAX_CSR, reg); return 0; } static u64 rt61pci_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct rt2x00_dev *rt2x00dev = hw->priv; u64 tsf; u32 reg; reg = rt2x00mmio_register_read(rt2x00dev, TXRX_CSR13); tsf = (u64) rt2x00_get_field32(reg, TXRX_CSR13_HIGH_TSFTIMER) << 32; reg = rt2x00mmio_register_read(rt2x00dev, TXRX_CSR12); tsf |= rt2x00_get_field32(reg, TXRX_CSR12_LOW_TSFTIMER); return tsf; } static const struct ieee80211_ops rt61pci_mac80211_ops = { .tx = rt2x00mac_tx, .wake_tx_queue = ieee80211_handle_wake_tx_queue, .start = rt2x00mac_start, .stop = rt2x00mac_stop, .add_interface = rt2x00mac_add_interface, .remove_interface = rt2x00mac_remove_interface, .config = rt2x00mac_config, .configure_filter = rt2x00mac_configure_filter, .set_key = rt2x00mac_set_key, .sw_scan_start = rt2x00mac_sw_scan_start, .sw_scan_complete = rt2x00mac_sw_scan_complete, .get_stats = rt2x00mac_get_stats, .bss_info_changed = rt2x00mac_bss_info_changed, .conf_tx = rt61pci_conf_tx, .get_tsf = rt61pci_get_tsf, .rfkill_poll = rt2x00mac_rfkill_poll, .flush = rt2x00mac_flush, .set_antenna = rt2x00mac_set_antenna, .get_antenna = rt2x00mac_get_antenna, .get_ringparam = rt2x00mac_get_ringparam, .tx_frames_pending = rt2x00mac_tx_frames_pending, }; static const struct rt2x00lib_ops rt61pci_rt2x00_ops = { .irq_handler = rt61pci_interrupt, .txstatus_tasklet = rt61pci_txstatus_tasklet, .tbtt_tasklet = rt61pci_tbtt_tasklet, .rxdone_tasklet = rt61pci_rxdone_tasklet, .autowake_tasklet = rt61pci_autowake_tasklet, .probe_hw = rt61pci_probe_hw, .get_firmware_name = rt61pci_get_firmware_name, .check_firmware = rt61pci_check_firmware, .load_firmware = rt61pci_load_firmware, .initialize = rt2x00mmio_initialize, .uninitialize = rt2x00mmio_uninitialize, .get_entry_state = rt61pci_get_entry_state, .clear_entry = rt61pci_clear_entry, .set_device_state = rt61pci_set_device_state, .rfkill_poll = rt61pci_rfkill_poll, .link_stats = rt61pci_link_stats, .reset_tuner = rt61pci_reset_tuner, .link_tuner = rt61pci_link_tuner, .start_queue = rt61pci_start_queue, .kick_queue = rt61pci_kick_queue, .stop_queue = rt61pci_stop_queue, .flush_queue = rt2x00mmio_flush_queue, .write_tx_desc = rt61pci_write_tx_desc, .write_beacon = rt61pci_write_beacon, .clear_beacon = rt61pci_clear_beacon, .fill_rxdone = rt61pci_fill_rxdone, .config_shared_key = rt61pci_config_shared_key, .config_pairwise_key = rt61pci_config_pairwise_key, .config_filter = rt61pci_config_filter, .config_intf = rt61pci_config_intf, .config_erp = rt61pci_config_erp, .config_ant = rt61pci_config_ant, .config = rt61pci_config, }; static void rt61pci_queue_init(struct data_queue *queue) { switch (queue->qid) { case QID_RX: queue->limit = 32; queue->data_size = DATA_FRAME_SIZE; queue->desc_size = RXD_DESC_SIZE; queue->priv_size = sizeof(struct queue_entry_priv_mmio); break; case QID_AC_VO: case QID_AC_VI: case QID_AC_BE: case QID_AC_BK: queue->limit = 32; queue->data_size = DATA_FRAME_SIZE; queue->desc_size = TXD_DESC_SIZE; queue->priv_size = sizeof(struct queue_entry_priv_mmio); break; case QID_BEACON: queue->limit = 4; queue->data_size = 0; /* No DMA required for beacons */ queue->desc_size = TXINFO_SIZE; queue->priv_size = sizeof(struct queue_entry_priv_mmio); break; case QID_ATIM: default: BUG(); break; } } static const struct rt2x00_ops rt61pci_ops = { .name = KBUILD_MODNAME, .max_ap_intf = 4, .eeprom_size = EEPROM_SIZE, .rf_size = RF_SIZE, .tx_queues = NUM_TX_QUEUES, .queue_init = rt61pci_queue_init, .lib = &rt61pci_rt2x00_ops, .hw = &rt61pci_mac80211_ops, #ifdef CONFIG_RT2X00_LIB_DEBUGFS .debugfs = &rt61pci_rt2x00debug, #endif /* CONFIG_RT2X00_LIB_DEBUGFS */ }; /* * RT61pci module information. */ static const struct pci_device_id rt61pci_device_table[] = { /* RT2561s */ { PCI_DEVICE(0x1814, 0x0301) }, /* RT2561 v2 */ { PCI_DEVICE(0x1814, 0x0302) }, /* RT2661 */ { PCI_DEVICE(0x1814, 0x0401) }, { 0, } }; MODULE_AUTHOR(DRV_PROJECT); MODULE_VERSION(DRV_VERSION); MODULE_DESCRIPTION("Ralink RT61 PCI & PCMCIA Wireless LAN driver."); MODULE_DEVICE_TABLE(pci, rt61pci_device_table); MODULE_FIRMWARE(FIRMWARE_RT2561); MODULE_FIRMWARE(FIRMWARE_RT2561s); MODULE_FIRMWARE(FIRMWARE_RT2661); MODULE_LICENSE("GPL"); static int rt61pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) { return rt2x00pci_probe(pci_dev, &rt61pci_ops); } static struct pci_driver rt61pci_driver = { .name = KBUILD_MODNAME, .id_table = rt61pci_device_table, .probe = rt61pci_probe, .remove = rt2x00pci_remove, .driver.pm = &rt2x00pci_pm_ops, }; module_pci_driver(rt61pci_driver);
linux-master
drivers/net/wireless/ralink/rt2x00/rt61pci.c
// SPDX-License-Identifier: GPL-2.0-or-later /* Copyright (C) 2010 Willow Garage <http://www.willowgarage.com> Copyright (C) 2004 - 2010 Ivo van Doorn <[email protected]> Copyright (C) 2004 - 2009 Gertjan van Wingerde <[email protected]> <http://rt2x00.serialmonkey.com> */ /* Module: rt2x00lib Abstract: rt2x00 queue specific routines. */ #include <linux/slab.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/dma-mapping.h> #include "rt2x00.h" #include "rt2x00lib.h" struct sk_buff *rt2x00queue_alloc_rxskb(struct queue_entry *entry, gfp_t gfp) { struct data_queue *queue = entry->queue; struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; struct sk_buff *skb; struct skb_frame_desc *skbdesc; unsigned int frame_size; unsigned int head_size = 0; unsigned int tail_size = 0; /* * The frame size includes descriptor size, because the * hardware directly receive the frame into the skbuffer. */ frame_size = queue->data_size + queue->desc_size + queue->winfo_size; /* * The payload should be aligned to a 4-byte boundary, * this means we need at least 3 bytes for moving the frame * into the correct offset. */ head_size = 4; /* * For IV/EIV/ICV assembly we must make sure there is * at least 8 bytes bytes available in headroom for IV/EIV * and 8 bytes for ICV data as tailroon. */ if (rt2x00_has_cap_hw_crypto(rt2x00dev)) { head_size += 8; tail_size += 8; } /* * Allocate skbuffer. */ skb = __dev_alloc_skb(frame_size + head_size + tail_size, gfp); if (!skb) return NULL; /* * Make sure we not have a frame with the requested bytes * available in the head and tail. */ skb_reserve(skb, head_size); skb_put(skb, frame_size); /* * Populate skbdesc. */ skbdesc = get_skb_frame_desc(skb); memset(skbdesc, 0, sizeof(*skbdesc)); if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_DMA)) { dma_addr_t skb_dma; skb_dma = dma_map_single(rt2x00dev->dev, skb->data, skb->len, DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(rt2x00dev->dev, skb_dma))) { dev_kfree_skb_any(skb); return NULL; } skbdesc->skb_dma = skb_dma; skbdesc->flags |= SKBDESC_DMA_MAPPED_RX; } return skb; } int rt2x00queue_map_txskb(struct queue_entry *entry) { struct device *dev = entry->queue->rt2x00dev->dev; struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); skbdesc->skb_dma = dma_map_single(dev, entry->skb->data, entry->skb->len, DMA_TO_DEVICE); if (unlikely(dma_mapping_error(dev, skbdesc->skb_dma))) return -ENOMEM; skbdesc->flags |= SKBDESC_DMA_MAPPED_TX; rt2x00lib_dmadone(entry); return 0; } EXPORT_SYMBOL_GPL(rt2x00queue_map_txskb); void rt2x00queue_unmap_skb(struct queue_entry *entry) { struct device *dev = entry->queue->rt2x00dev->dev; struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); if (skbdesc->flags & SKBDESC_DMA_MAPPED_RX) { dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len, DMA_FROM_DEVICE); skbdesc->flags &= ~SKBDESC_DMA_MAPPED_RX; } else if (skbdesc->flags & SKBDESC_DMA_MAPPED_TX) { dma_unmap_single(dev, skbdesc->skb_dma, entry->skb->len, DMA_TO_DEVICE); skbdesc->flags &= ~SKBDESC_DMA_MAPPED_TX; } } EXPORT_SYMBOL_GPL(rt2x00queue_unmap_skb); void rt2x00queue_free_skb(struct queue_entry *entry) { if (!entry->skb) return; rt2x00queue_unmap_skb(entry); dev_kfree_skb_any(entry->skb); entry->skb = NULL; } void rt2x00queue_align_frame(struct sk_buff *skb) { unsigned int frame_length = skb->len; unsigned int align = ALIGN_SIZE(skb, 0); if (!align) return; skb_push(skb, align); memmove(skb->data, skb->data + align, frame_length); skb_trim(skb, frame_length); } /* * H/W needs L2 padding between the header and the paylod if header size * is not 4 bytes aligned. */ void rt2x00queue_insert_l2pad(struct sk_buff *skb, unsigned int hdr_len) { unsigned int l2pad = (skb->len > hdr_len) ? L2PAD_SIZE(hdr_len) : 0; if (!l2pad) return; skb_push(skb, l2pad); memmove(skb->data, skb->data + l2pad, hdr_len); } void rt2x00queue_remove_l2pad(struct sk_buff *skb, unsigned int hdr_len) { unsigned int l2pad = (skb->len > hdr_len) ? L2PAD_SIZE(hdr_len) : 0; if (!l2pad) return; memmove(skb->data + l2pad, skb->data, hdr_len); skb_pull(skb, l2pad); } static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb, struct txentry_desc *txdesc) { struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct rt2x00_intf *intf = vif_to_intf(tx_info->control.vif); u16 seqno; if (!(tx_info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)) return; __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags); if (!rt2x00_has_cap_flag(rt2x00dev, REQUIRE_SW_SEQNO)) { /* * rt2800 has a H/W (or F/W) bug, device incorrectly increase * seqno on retransmitted data (non-QOS) and management frames. * To workaround the problem let's generate seqno in software. * Except for beacons which are transmitted periodically by H/W * hence hardware has to assign seqno for them. */ if (ieee80211_is_beacon(hdr->frame_control)) { __set_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags); /* H/W will generate sequence number */ return; } __clear_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags); } /* * The hardware is not able to insert a sequence number. Assign a * software generated one here. * * This is wrong because beacons are not getting sequence * numbers assigned properly. * * A secondary problem exists for drivers that cannot toggle * sequence counting per-frame, since those will override the * sequence counter given by mac80211. */ if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags)) seqno = atomic_add_return(0x10, &intf->seqno); else seqno = atomic_read(&intf->seqno); hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG); hdr->seq_ctrl |= cpu_to_le16(seqno); } static void rt2x00queue_create_tx_descriptor_plcp(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb, struct txentry_desc *txdesc, const struct rt2x00_rate *hwrate) { struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0]; unsigned int data_length; unsigned int duration; unsigned int residual; /* * Determine with what IFS priority this frame should be send. * Set ifs to IFS_SIFS when the this is not the first fragment, * or this fragment came after RTS/CTS. */ if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags)) txdesc->u.plcp.ifs = IFS_BACKOFF; else txdesc->u.plcp.ifs = IFS_SIFS; /* Data length + CRC + Crypto overhead (IV/EIV/ICV/MIC) */ data_length = skb->len + 4; data_length += rt2x00crypto_tx_overhead(rt2x00dev, skb); /* * PLCP setup * Length calculation depends on OFDM/CCK rate. */ txdesc->u.plcp.signal = hwrate->plcp; txdesc->u.plcp.service = 0x04; if (hwrate->flags & DEV_RATE_OFDM) { txdesc->u.plcp.length_high = (data_length >> 6) & 0x3f; txdesc->u.plcp.length_low = data_length & 0x3f; } else { /* * Convert length to microseconds. */ residual = GET_DURATION_RES(data_length, hwrate->bitrate); duration = GET_DURATION(data_length, hwrate->bitrate); if (residual != 0) { duration++; /* * Check if we need to set the Length Extension */ if (hwrate->bitrate == 110 && residual <= 30) txdesc->u.plcp.service |= 0x80; } txdesc->u.plcp.length_high = (duration >> 8) & 0xff; txdesc->u.plcp.length_low = duration & 0xff; /* * When preamble is enabled we should set the * preamble bit for the signal. */ if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) txdesc->u.plcp.signal |= 0x08; } } static void rt2x00queue_create_tx_descriptor_ht(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb, struct txentry_desc *txdesc, struct ieee80211_sta *sta, const struct rt2x00_rate *hwrate) { struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0]; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct rt2x00_sta *sta_priv = NULL; u8 density = 0; if (sta) { sta_priv = sta_to_rt2x00_sta(sta); txdesc->u.ht.wcid = sta_priv->wcid; density = sta->deflink.ht_cap.ampdu_density; } /* * If IEEE80211_TX_RC_MCS is set txrate->idx just contains the * mcs rate to be used */ if (txrate->flags & IEEE80211_TX_RC_MCS) { txdesc->u.ht.mcs = txrate->idx; /* * MIMO PS should be set to 1 for STA's using dynamic SM PS * when using more then one tx stream (>MCS7). */ if (sta && txdesc->u.ht.mcs > 7 && sta->deflink.smps_mode == IEEE80211_SMPS_DYNAMIC) __set_bit(ENTRY_TXD_HT_MIMO_PS, &txdesc->flags); } else { txdesc->u.ht.mcs = rt2x00_get_rate_mcs(hwrate->mcs); if (txrate->flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) txdesc->u.ht.mcs |= 0x08; } if (test_bit(CONFIG_HT_DISABLED, &rt2x00dev->flags)) { if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)) txdesc->u.ht.txop = TXOP_SIFS; else txdesc->u.ht.txop = TXOP_BACKOFF; /* Left zero on all other settings. */ return; } /* * Only one STBC stream is supported for now. */ if (tx_info->flags & IEEE80211_TX_CTL_STBC) txdesc->u.ht.stbc = 1; /* * This frame is eligible for an AMPDU, however, don't aggregate * frames that are intended to probe a specific tx rate. */ if (tx_info->flags & IEEE80211_TX_CTL_AMPDU && !(tx_info->flags & IEEE80211_TX_CTL_RATE_CTRL_PROBE)) { __set_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags); txdesc->u.ht.mpdu_density = density; txdesc->u.ht.ba_size = 7; /* FIXME: What value is needed? */ } /* * Set 40Mhz mode if necessary (for legacy rates this will * duplicate the frame to both channels). */ if (txrate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH || txrate->flags & IEEE80211_TX_RC_DUP_DATA) __set_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags); if (txrate->flags & IEEE80211_TX_RC_SHORT_GI) __set_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags); /* * Determine IFS values * - Use TXOP_BACKOFF for management frames except beacons * - Use TXOP_SIFS for fragment bursts * - Use TXOP_HTTXOP for everything else * * Note: rt2800 devices won't use CTS protection (if used) * for frames not transmitted with TXOP_HTTXOP */ if (ieee80211_is_mgmt(hdr->frame_control) && !ieee80211_is_beacon(hdr->frame_control)) txdesc->u.ht.txop = TXOP_BACKOFF; else if (!(tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT)) txdesc->u.ht.txop = TXOP_SIFS; else txdesc->u.ht.txop = TXOP_HTTXOP; } static void rt2x00queue_create_tx_descriptor(struct rt2x00_dev *rt2x00dev, struct sk_buff *skb, struct txentry_desc *txdesc, struct ieee80211_sta *sta) { struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_tx_rate *txrate = &tx_info->control.rates[0]; struct ieee80211_rate *rate; const struct rt2x00_rate *hwrate = NULL; memset(txdesc, 0, sizeof(*txdesc)); /* * Header and frame information. */ txdesc->length = skb->len; txdesc->header_length = ieee80211_get_hdrlen_from_skb(skb); /* * Check whether this frame is to be acked. */ if (!(tx_info->flags & IEEE80211_TX_CTL_NO_ACK)) __set_bit(ENTRY_TXD_ACK, &txdesc->flags); /* * Check if this is a RTS/CTS frame */ if (ieee80211_is_rts(hdr->frame_control) || ieee80211_is_cts(hdr->frame_control)) { __set_bit(ENTRY_TXD_BURST, &txdesc->flags); if (ieee80211_is_rts(hdr->frame_control)) __set_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags); else __set_bit(ENTRY_TXD_CTS_FRAME, &txdesc->flags); if (tx_info->control.rts_cts_rate_idx >= 0) rate = ieee80211_get_rts_cts_rate(rt2x00dev->hw, tx_info); } /* * Determine retry information. */ txdesc->retry_limit = tx_info->control.rates[0].count - 1; if (txdesc->retry_limit >= rt2x00dev->long_retry) __set_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags); /* * Check if more fragments are pending */ if (ieee80211_has_morefrags(hdr->frame_control)) { __set_bit(ENTRY_TXD_BURST, &txdesc->flags); __set_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags); } /* * Check if more frames (!= fragments) are pending */ if (tx_info->flags & IEEE80211_TX_CTL_MORE_FRAMES) __set_bit(ENTRY_TXD_BURST, &txdesc->flags); /* * Beacons and probe responses require the tsf timestamp * to be inserted into the frame. */ if ((ieee80211_is_beacon(hdr->frame_control) || ieee80211_is_probe_resp(hdr->frame_control)) && !(tx_info->flags & IEEE80211_TX_CTL_INJECTED)) __set_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags); if ((tx_info->flags & IEEE80211_TX_CTL_FIRST_FRAGMENT) && !test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags)) __set_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags); /* * Determine rate modulation. */ if (txrate->flags & IEEE80211_TX_RC_GREEN_FIELD) txdesc->rate_mode = RATE_MODE_HT_GREENFIELD; else if (txrate->flags & IEEE80211_TX_RC_MCS) txdesc->rate_mode = RATE_MODE_HT_MIX; else { rate = ieee80211_get_tx_rate(rt2x00dev->hw, tx_info); hwrate = rt2x00_get_rate(rate->hw_value); if (hwrate->flags & DEV_RATE_OFDM) txdesc->rate_mode = RATE_MODE_OFDM; else txdesc->rate_mode = RATE_MODE_CCK; } /* * Apply TX descriptor handling by components */ rt2x00crypto_create_tx_descriptor(rt2x00dev, skb, txdesc); rt2x00queue_create_tx_descriptor_seq(rt2x00dev, skb, txdesc); if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_HT_TX_DESC)) rt2x00queue_create_tx_descriptor_ht(rt2x00dev, skb, txdesc, sta, hwrate); else rt2x00queue_create_tx_descriptor_plcp(rt2x00dev, skb, txdesc, hwrate); } static int rt2x00queue_write_tx_data(struct queue_entry *entry, struct txentry_desc *txdesc) { struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; /* * This should not happen, we already checked the entry * was ours. When the hardware disagrees there has been * a queue corruption! */ if (unlikely(rt2x00dev->ops->lib->get_entry_state && rt2x00dev->ops->lib->get_entry_state(entry))) { rt2x00_err(rt2x00dev, "Corrupt queue %d, accessing entry which is not ours\n" "Please file bug report to %s\n", entry->queue->qid, DRV_PROJECT); return -EINVAL; } /* * Add the requested extra tx headroom in front of the skb. */ skb_push(entry->skb, rt2x00dev->extra_tx_headroom); memset(entry->skb->data, 0, rt2x00dev->extra_tx_headroom); /* * Call the driver's write_tx_data function, if it exists. */ if (rt2x00dev->ops->lib->write_tx_data) rt2x00dev->ops->lib->write_tx_data(entry, txdesc); /* * Map the skb to DMA. */ if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_DMA) && rt2x00queue_map_txskb(entry)) return -ENOMEM; return 0; } static void rt2x00queue_write_tx_descriptor(struct queue_entry *entry, struct txentry_desc *txdesc) { struct data_queue *queue = entry->queue; queue->rt2x00dev->ops->lib->write_tx_desc(entry, txdesc); /* * All processing on the frame has been completed, this means * it is now ready to be dumped to userspace through debugfs. */ rt2x00debug_dump_frame(queue->rt2x00dev, DUMP_FRAME_TX, entry); } static void rt2x00queue_kick_tx_queue(struct data_queue *queue, struct txentry_desc *txdesc) { /* * Check if we need to kick the queue, there are however a few rules * 1) Don't kick unless this is the last in frame in a burst. * When the burst flag is set, this frame is always followed * by another frame which in some way are related to eachother. * This is true for fragments, RTS or CTS-to-self frames. * 2) Rule 1 can be broken when the available entries * in the queue are less then a certain threshold. */ if (rt2x00queue_threshold(queue) || !test_bit(ENTRY_TXD_BURST, &txdesc->flags)) queue->rt2x00dev->ops->lib->kick_queue(queue); } static void rt2x00queue_bar_check(struct queue_entry *entry) { struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; struct ieee80211_bar *bar = (void *) (entry->skb->data + rt2x00dev->extra_tx_headroom); struct rt2x00_bar_list_entry *bar_entry; if (likely(!ieee80211_is_back_req(bar->frame_control))) return; bar_entry = kmalloc(sizeof(*bar_entry), GFP_ATOMIC); /* * If the alloc fails we still send the BAR out but just don't track * it in our bar list. And as a result we will report it to mac80211 * back as failed. */ if (!bar_entry) return; bar_entry->entry = entry; bar_entry->block_acked = 0; /* * Copy the relevant parts of the 802.11 BAR into out check list * such that we can use RCU for less-overhead in the RX path since * sending BARs and processing the according BlockAck should be * the exception. */ memcpy(bar_entry->ra, bar->ra, sizeof(bar->ra)); memcpy(bar_entry->ta, bar->ta, sizeof(bar->ta)); bar_entry->control = bar->control; bar_entry->start_seq_num = bar->start_seq_num; /* * Insert BAR into our BAR check list. */ spin_lock_bh(&rt2x00dev->bar_list_lock); list_add_tail_rcu(&bar_entry->list, &rt2x00dev->bar_list); spin_unlock_bh(&rt2x00dev->bar_list_lock); } int rt2x00queue_write_tx_frame(struct data_queue *queue, struct sk_buff *skb, struct ieee80211_sta *sta, bool local) { struct ieee80211_tx_info *tx_info; struct queue_entry *entry; struct txentry_desc txdesc; struct skb_frame_desc *skbdesc; u8 rate_idx, rate_flags; int ret = 0; /* * Copy all TX descriptor information into txdesc, * after that we are free to use the skb->cb array * for our information. */ rt2x00queue_create_tx_descriptor(queue->rt2x00dev, skb, &txdesc, sta); /* * All information is retrieved from the skb->cb array, * now we should claim ownership of the driver part of that * array, preserving the bitrate index and flags. */ tx_info = IEEE80211_SKB_CB(skb); rate_idx = tx_info->control.rates[0].idx; rate_flags = tx_info->control.rates[0].flags; skbdesc = get_skb_frame_desc(skb); memset(skbdesc, 0, sizeof(*skbdesc)); skbdesc->tx_rate_idx = rate_idx; skbdesc->tx_rate_flags = rate_flags; if (local) skbdesc->flags |= SKBDESC_NOT_MAC80211; /* * When hardware encryption is supported, and this frame * is to be encrypted, we should strip the IV/EIV data from * the frame so we can provide it to the driver separately. */ if (test_bit(ENTRY_TXD_ENCRYPT, &txdesc.flags) && !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc.flags)) { if (rt2x00_has_cap_flag(queue->rt2x00dev, REQUIRE_COPY_IV)) rt2x00crypto_tx_copy_iv(skb, &txdesc); else rt2x00crypto_tx_remove_iv(skb, &txdesc); } /* * When DMA allocation is required we should guarantee to the * driver that the DMA is aligned to a 4-byte boundary. * However some drivers require L2 padding to pad the payload * rather then the header. This could be a requirement for * PCI and USB devices, while header alignment only is valid * for PCI devices. */ if (rt2x00_has_cap_flag(queue->rt2x00dev, REQUIRE_L2PAD)) rt2x00queue_insert_l2pad(skb, txdesc.header_length); else if (rt2x00_has_cap_flag(queue->rt2x00dev, REQUIRE_DMA)) rt2x00queue_align_frame(skb); /* * That function must be called with bh disabled. */ spin_lock(&queue->tx_lock); if (unlikely(rt2x00queue_full(queue))) { rt2x00_dbg(queue->rt2x00dev, "Dropping frame due to full tx queue %d\n", queue->qid); ret = -ENOBUFS; goto out; } entry = rt2x00queue_get_entry(queue, Q_INDEX); if (unlikely(test_and_set_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags))) { rt2x00_err(queue->rt2x00dev, "Arrived at non-free entry in the non-full queue %d\n" "Please file bug report to %s\n", queue->qid, DRV_PROJECT); ret = -EINVAL; goto out; } entry->skb = skb; /* * It could be possible that the queue was corrupted and this * call failed. Since we always return NETDEV_TX_OK to mac80211, * this frame will simply be dropped. */ if (unlikely(rt2x00queue_write_tx_data(entry, &txdesc))) { clear_bit(ENTRY_OWNER_DEVICE_DATA, &entry->flags); entry->skb = NULL; ret = -EIO; goto out; } /* * Put BlockAckReqs into our check list for driver BA processing. */ rt2x00queue_bar_check(entry); set_bit(ENTRY_DATA_PENDING, &entry->flags); rt2x00queue_index_inc(entry, Q_INDEX); rt2x00queue_write_tx_descriptor(entry, &txdesc); rt2x00queue_kick_tx_queue(queue, &txdesc); out: /* * Pausing queue has to be serialized with rt2x00lib_txdone(), so we * do this under queue->tx_lock. Bottom halve was already disabled * before ieee80211_xmit() call. */ if (rt2x00queue_threshold(queue)) rt2x00queue_pause_queue(queue); spin_unlock(&queue->tx_lock); return ret; } int rt2x00queue_clear_beacon(struct rt2x00_dev *rt2x00dev, struct ieee80211_vif *vif) { struct rt2x00_intf *intf = vif_to_intf(vif); if (unlikely(!intf->beacon)) return -ENOBUFS; /* * Clean up the beacon skb. */ rt2x00queue_free_skb(intf->beacon); /* * Clear beacon (single bssid devices don't need to clear the beacon * since the beacon queue will get stopped anyway). */ if (rt2x00dev->ops->lib->clear_beacon) rt2x00dev->ops->lib->clear_beacon(intf->beacon); return 0; } int rt2x00queue_update_beacon(struct rt2x00_dev *rt2x00dev, struct ieee80211_vif *vif) { struct rt2x00_intf *intf = vif_to_intf(vif); struct skb_frame_desc *skbdesc; struct txentry_desc txdesc; if (unlikely(!intf->beacon)) return -ENOBUFS; /* * Clean up the beacon skb. */ rt2x00queue_free_skb(intf->beacon); intf->beacon->skb = ieee80211_beacon_get(rt2x00dev->hw, vif, 0); if (!intf->beacon->skb) return -ENOMEM; /* * Copy all TX descriptor information into txdesc, * after that we are free to use the skb->cb array * for our information. */ rt2x00queue_create_tx_descriptor(rt2x00dev, intf->beacon->skb, &txdesc, NULL); /* * Fill in skb descriptor */ skbdesc = get_skb_frame_desc(intf->beacon->skb); memset(skbdesc, 0, sizeof(*skbdesc)); /* * Send beacon to hardware. */ rt2x00dev->ops->lib->write_beacon(intf->beacon, &txdesc); return 0; } bool rt2x00queue_for_each_entry(struct data_queue *queue, enum queue_index start, enum queue_index end, void *data, bool (*fn)(struct queue_entry *entry, void *data)) { unsigned long irqflags; unsigned int index_start; unsigned int index_end; unsigned int i; if (unlikely(start >= Q_INDEX_MAX || end >= Q_INDEX_MAX)) { rt2x00_err(queue->rt2x00dev, "Entry requested from invalid index range (%d - %d)\n", start, end); return true; } /* * Only protect the range we are going to loop over, * if during our loop a extra entry is set to pending * it should not be kicked during this run, since it * is part of another TX operation. */ spin_lock_irqsave(&queue->index_lock, irqflags); index_start = queue->index[start]; index_end = queue->index[end]; spin_unlock_irqrestore(&queue->index_lock, irqflags); /* * Start from the TX done pointer, this guarantees that we will * send out all frames in the correct order. */ if (index_start < index_end) { for (i = index_start; i < index_end; i++) { if (fn(&queue->entries[i], data)) return true; } } else { for (i = index_start; i < queue->limit; i++) { if (fn(&queue->entries[i], data)) return true; } for (i = 0; i < index_end; i++) { if (fn(&queue->entries[i], data)) return true; } } return false; } EXPORT_SYMBOL_GPL(rt2x00queue_for_each_entry); struct queue_entry *rt2x00queue_get_entry(struct data_queue *queue, enum queue_index index) { struct queue_entry *entry; unsigned long irqflags; if (unlikely(index >= Q_INDEX_MAX)) { rt2x00_err(queue->rt2x00dev, "Entry requested from invalid index type (%d)\n", index); return NULL; } spin_lock_irqsave(&queue->index_lock, irqflags); entry = &queue->entries[queue->index[index]]; spin_unlock_irqrestore(&queue->index_lock, irqflags); return entry; } EXPORT_SYMBOL_GPL(rt2x00queue_get_entry); void rt2x00queue_index_inc(struct queue_entry *entry, enum queue_index index) { struct data_queue *queue = entry->queue; unsigned long irqflags; if (unlikely(index >= Q_INDEX_MAX)) { rt2x00_err(queue->rt2x00dev, "Index change on invalid index type (%d)\n", index); return; } spin_lock_irqsave(&queue->index_lock, irqflags); queue->index[index]++; if (queue->index[index] >= queue->limit) queue->index[index] = 0; entry->last_action = jiffies; if (index == Q_INDEX) { queue->length++; } else if (index == Q_INDEX_DONE) { queue->length--; queue->count++; } spin_unlock_irqrestore(&queue->index_lock, irqflags); } static void rt2x00queue_pause_queue_nocheck(struct data_queue *queue) { switch (queue->qid) { case QID_AC_VO: case QID_AC_VI: case QID_AC_BE: case QID_AC_BK: /* * For TX queues, we have to disable the queue * inside mac80211. */ ieee80211_stop_queue(queue->rt2x00dev->hw, queue->qid); break; default: break; } } void rt2x00queue_pause_queue(struct data_queue *queue) { if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) || !test_bit(QUEUE_STARTED, &queue->flags) || test_and_set_bit(QUEUE_PAUSED, &queue->flags)) return; rt2x00queue_pause_queue_nocheck(queue); } EXPORT_SYMBOL_GPL(rt2x00queue_pause_queue); void rt2x00queue_unpause_queue(struct data_queue *queue) { if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) || !test_bit(QUEUE_STARTED, &queue->flags) || !test_and_clear_bit(QUEUE_PAUSED, &queue->flags)) return; switch (queue->qid) { case QID_AC_VO: case QID_AC_VI: case QID_AC_BE: case QID_AC_BK: /* * For TX queues, we have to enable the queue * inside mac80211. */ ieee80211_wake_queue(queue->rt2x00dev->hw, queue->qid); break; case QID_RX: /* * For RX we need to kick the queue now in order to * receive frames. */ queue->rt2x00dev->ops->lib->kick_queue(queue); break; default: break; } } EXPORT_SYMBOL_GPL(rt2x00queue_unpause_queue); void rt2x00queue_start_queue(struct data_queue *queue) { mutex_lock(&queue->status_lock); if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) || test_and_set_bit(QUEUE_STARTED, &queue->flags)) { mutex_unlock(&queue->status_lock); return; } set_bit(QUEUE_PAUSED, &queue->flags); queue->rt2x00dev->ops->lib->start_queue(queue); rt2x00queue_unpause_queue(queue); mutex_unlock(&queue->status_lock); } EXPORT_SYMBOL_GPL(rt2x00queue_start_queue); void rt2x00queue_stop_queue(struct data_queue *queue) { mutex_lock(&queue->status_lock); if (!test_and_clear_bit(QUEUE_STARTED, &queue->flags)) { mutex_unlock(&queue->status_lock); return; } rt2x00queue_pause_queue_nocheck(queue); queue->rt2x00dev->ops->lib->stop_queue(queue); mutex_unlock(&queue->status_lock); } EXPORT_SYMBOL_GPL(rt2x00queue_stop_queue); void rt2x00queue_flush_queue(struct data_queue *queue, bool drop) { bool tx_queue = (queue->qid == QID_AC_VO) || (queue->qid == QID_AC_VI) || (queue->qid == QID_AC_BE) || (queue->qid == QID_AC_BK); if (rt2x00queue_empty(queue)) return; /* * If we are not supposed to drop any pending * frames, this means we must force a start (=kick) * to the queue to make sure the hardware will * start transmitting. */ if (!drop && tx_queue) queue->rt2x00dev->ops->lib->kick_queue(queue); /* * Check if driver supports flushing, if that is the case we can * defer the flushing to the driver. Otherwise we must use the * alternative which just waits for the queue to become empty. */ if (likely(queue->rt2x00dev->ops->lib->flush_queue)) queue->rt2x00dev->ops->lib->flush_queue(queue, drop); /* * The queue flush has failed... */ if (unlikely(!rt2x00queue_empty(queue))) rt2x00_warn(queue->rt2x00dev, "Queue %d failed to flush\n", queue->qid); } EXPORT_SYMBOL_GPL(rt2x00queue_flush_queue); void rt2x00queue_start_queues(struct rt2x00_dev *rt2x00dev) { struct data_queue *queue; /* * rt2x00queue_start_queue will call ieee80211_wake_queue * for each queue after is has been properly initialized. */ tx_queue_for_each(rt2x00dev, queue) rt2x00queue_start_queue(queue); rt2x00queue_start_queue(rt2x00dev->rx); } EXPORT_SYMBOL_GPL(rt2x00queue_start_queues); void rt2x00queue_stop_queues(struct rt2x00_dev *rt2x00dev) { struct data_queue *queue; /* * rt2x00queue_stop_queue will call ieee80211_stop_queue * as well, but we are completely shutting doing everything * now, so it is much safer to stop all TX queues at once, * and use rt2x00queue_stop_queue for cleaning up. */ ieee80211_stop_queues(rt2x00dev->hw); tx_queue_for_each(rt2x00dev, queue) rt2x00queue_stop_queue(queue); rt2x00queue_stop_queue(rt2x00dev->rx); } EXPORT_SYMBOL_GPL(rt2x00queue_stop_queues); void rt2x00queue_flush_queues(struct rt2x00_dev *rt2x00dev, bool drop) { struct data_queue *queue; tx_queue_for_each(rt2x00dev, queue) rt2x00queue_flush_queue(queue, drop); rt2x00queue_flush_queue(rt2x00dev->rx, drop); } EXPORT_SYMBOL_GPL(rt2x00queue_flush_queues); static void rt2x00queue_reset(struct data_queue *queue) { unsigned long irqflags; unsigned int i; spin_lock_irqsave(&queue->index_lock, irqflags); queue->count = 0; queue->length = 0; for (i = 0; i < Q_INDEX_MAX; i++) queue->index[i] = 0; spin_unlock_irqrestore(&queue->index_lock, irqflags); } void rt2x00queue_init_queues(struct rt2x00_dev *rt2x00dev) { struct data_queue *queue; unsigned int i; queue_for_each(rt2x00dev, queue) { rt2x00queue_reset(queue); for (i = 0; i < queue->limit; i++) rt2x00dev->ops->lib->clear_entry(&queue->entries[i]); } } static int rt2x00queue_alloc_entries(struct data_queue *queue) { struct queue_entry *entries; unsigned int entry_size; unsigned int i; rt2x00queue_reset(queue); /* * Allocate all queue entries. */ entry_size = sizeof(*entries) + queue->priv_size; entries = kcalloc(queue->limit, entry_size, GFP_KERNEL); if (!entries) return -ENOMEM; #define QUEUE_ENTRY_PRIV_OFFSET(__base, __index, __limit, __esize, __psize) \ (((char *)(__base)) + ((__limit) * (__esize)) + \ ((__index) * (__psize))) for (i = 0; i < queue->limit; i++) { entries[i].flags = 0; entries[i].queue = queue; entries[i].skb = NULL; entries[i].entry_idx = i; entries[i].priv_data = QUEUE_ENTRY_PRIV_OFFSET(entries, i, queue->limit, sizeof(*entries), queue->priv_size); } #undef QUEUE_ENTRY_PRIV_OFFSET queue->entries = entries; return 0; } static void rt2x00queue_free_skbs(struct data_queue *queue) { unsigned int i; if (!queue->entries) return; for (i = 0; i < queue->limit; i++) { rt2x00queue_free_skb(&queue->entries[i]); } } static int rt2x00queue_alloc_rxskbs(struct data_queue *queue) { unsigned int i; struct sk_buff *skb; for (i = 0; i < queue->limit; i++) { skb = rt2x00queue_alloc_rxskb(&queue->entries[i], GFP_KERNEL); if (!skb) return -ENOMEM; queue->entries[i].skb = skb; } return 0; } int rt2x00queue_initialize(struct rt2x00_dev *rt2x00dev) { struct data_queue *queue; int status; status = rt2x00queue_alloc_entries(rt2x00dev->rx); if (status) goto exit; tx_queue_for_each(rt2x00dev, queue) { status = rt2x00queue_alloc_entries(queue); if (status) goto exit; } status = rt2x00queue_alloc_entries(rt2x00dev->bcn); if (status) goto exit; if (rt2x00_has_cap_flag(rt2x00dev, REQUIRE_ATIM_QUEUE)) { status = rt2x00queue_alloc_entries(rt2x00dev->atim); if (status) goto exit; } status = rt2x00queue_alloc_rxskbs(rt2x00dev->rx); if (status) goto exit; return 0; exit: rt2x00_err(rt2x00dev, "Queue entries allocation failed\n"); rt2x00queue_uninitialize(rt2x00dev); return status; } void rt2x00queue_uninitialize(struct rt2x00_dev *rt2x00dev) { struct data_queue *queue; rt2x00queue_free_skbs(rt2x00dev->rx); queue_for_each(rt2x00dev, queue) { kfree(queue->entries); queue->entries = NULL; } } static void rt2x00queue_init(struct rt2x00_dev *rt2x00dev, struct data_queue *queue, enum data_queue_qid qid) { mutex_init(&queue->status_lock); spin_lock_init(&queue->tx_lock); spin_lock_init(&queue->index_lock); queue->rt2x00dev = rt2x00dev; queue->qid = qid; queue->txop = 0; queue->aifs = 2; queue->cw_min = 5; queue->cw_max = 10; rt2x00dev->ops->queue_init(queue); queue->threshold = DIV_ROUND_UP(queue->limit, 10); } int rt2x00queue_allocate(struct rt2x00_dev *rt2x00dev) { struct data_queue *queue; enum data_queue_qid qid; unsigned int req_atim = rt2x00_has_cap_flag(rt2x00dev, REQUIRE_ATIM_QUEUE); /* * We need the following queues: * RX: 1 * TX: ops->tx_queues * Beacon: 1 * Atim: 1 (if required) */ rt2x00dev->data_queues = 2 + rt2x00dev->ops->tx_queues + req_atim; queue = kcalloc(rt2x00dev->data_queues, sizeof(*queue), GFP_KERNEL); if (!queue) return -ENOMEM; /* * Initialize pointers */ rt2x00dev->rx = queue; rt2x00dev->tx = &queue[1]; rt2x00dev->bcn = &queue[1 + rt2x00dev->ops->tx_queues]; rt2x00dev->atim = req_atim ? &queue[2 + rt2x00dev->ops->tx_queues] : NULL; /* * Initialize queue parameters. * RX: qid = QID_RX * TX: qid = QID_AC_VO + index * TX: cw_min: 2^5 = 32. * TX: cw_max: 2^10 = 1024. * BCN: qid = QID_BEACON * ATIM: qid = QID_ATIM */ rt2x00queue_init(rt2x00dev, rt2x00dev->rx, QID_RX); qid = QID_AC_VO; tx_queue_for_each(rt2x00dev, queue) rt2x00queue_init(rt2x00dev, queue, qid++); rt2x00queue_init(rt2x00dev, rt2x00dev->bcn, QID_BEACON); if (req_atim) rt2x00queue_init(rt2x00dev, rt2x00dev->atim, QID_ATIM); return 0; } void rt2x00queue_free(struct rt2x00_dev *rt2x00dev) { kfree(rt2x00dev->rx); rt2x00dev->rx = NULL; rt2x00dev->tx = NULL; rt2x00dev->bcn = NULL; }
linux-master
drivers/net/wireless/ralink/rt2x00/rt2x00queue.c
// SPDX-License-Identifier: GPL-2.0-or-later /* Copyright (C) 2009 - 2010 Ivo van Doorn <[email protected]> * Copyright (C) 2009 Alban Browaeys <[email protected]> * Copyright (C) 2009 Felix Fietkau <[email protected]> * Copyright (C) 2009 Luis Correia <[email protected]> * Copyright (C) 2009 Mattias Nissler <[email protected]> * Copyright (C) 2009 Mark Asselstine <[email protected]> * Copyright (C) 2009 Xose Vazquez Perez <[email protected]> * Copyright (C) 2009 Bart Zolnierkiewicz <[email protected]> * <http://rt2x00.serialmonkey.com> */ /* Module: rt2800soc * Abstract: rt2800 WiSoC specific routines. */ #include <linux/etherdevice.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include "rt2x00.h" #include "rt2x00mmio.h" #include "rt2x00soc.h" #include "rt2800.h" #include "rt2800lib.h" #include "rt2800mmio.h" /* Allow hardware encryption to be disabled. */ static bool modparam_nohwcrypt; module_param_named(nohwcrypt, modparam_nohwcrypt, bool, 0444); MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); static bool rt2800soc_hwcrypt_disabled(struct rt2x00_dev *rt2x00dev) { return modparam_nohwcrypt; } static void rt2800soc_disable_radio(struct rt2x00_dev *rt2x00dev) { u32 reg; rt2800_disable_radio(rt2x00dev); rt2x00mmio_register_write(rt2x00dev, PWR_PIN_CFG, 0); reg = 0; if (rt2x00_rt(rt2x00dev, RT3883)) rt2x00_set_field32(&reg, TX_PIN_CFG_RFTR_EN, 1); rt2x00mmio_register_write(rt2x00dev, TX_PIN_CFG, reg); } static int rt2800soc_set_device_state(struct rt2x00_dev *rt2x00dev, enum dev_state state) { int retval = 0; switch (state) { case STATE_RADIO_ON: retval = rt2800mmio_enable_radio(rt2x00dev); break; case STATE_RADIO_OFF: rt2800soc_disable_radio(rt2x00dev); break; case STATE_RADIO_IRQ_ON: case STATE_RADIO_IRQ_OFF: rt2800mmio_toggle_irq(rt2x00dev, state); break; case STATE_DEEP_SLEEP: case STATE_SLEEP: case STATE_STANDBY: case STATE_AWAKE: /* These states are not supported, but don't report an error */ retval = 0; break; default: retval = -ENOTSUPP; break; } if (unlikely(retval)) rt2x00_err(rt2x00dev, "Device failed to enter state %d (%d)\n", state, retval); return retval; } static int rt2800soc_read_eeprom(struct rt2x00_dev *rt2x00dev) { void __iomem *base_addr = ioremap(0x1F040000, EEPROM_SIZE); if (!base_addr) return -ENOMEM; memcpy_fromio(rt2x00dev->eeprom, base_addr, EEPROM_SIZE); iounmap(base_addr); return 0; } /* Firmware functions */ static char *rt2800soc_get_firmware_name(struct rt2x00_dev *rt2x00dev) { WARN_ON_ONCE(1); return NULL; } static int rt2800soc_load_firmware(struct rt2x00_dev *rt2x00dev, const u8 *data, const size_t len) { WARN_ON_ONCE(1); return 0; } static int rt2800soc_check_firmware(struct rt2x00_dev *rt2x00dev, const u8 *data, const size_t len) { WARN_ON_ONCE(1); return 0; } static int rt2800soc_write_firmware(struct rt2x00_dev *rt2x00dev, const u8 *data, const size_t len) { WARN_ON_ONCE(1); return 0; } static const struct ieee80211_ops rt2800soc_mac80211_ops = { .tx = rt2x00mac_tx, .wake_tx_queue = ieee80211_handle_wake_tx_queue, .start = rt2x00mac_start, .stop = rt2x00mac_stop, .add_interface = rt2x00mac_add_interface, .remove_interface = rt2x00mac_remove_interface, .config = rt2x00mac_config, .configure_filter = rt2x00mac_configure_filter, .set_key = rt2x00mac_set_key, .sw_scan_start = rt2x00mac_sw_scan_start, .sw_scan_complete = rt2x00mac_sw_scan_complete, .get_stats = rt2x00mac_get_stats, .get_key_seq = rt2800_get_key_seq, .set_rts_threshold = rt2800_set_rts_threshold, .sta_add = rt2800_sta_add, .sta_remove = rt2800_sta_remove, .bss_info_changed = rt2x00mac_bss_info_changed, .conf_tx = rt2800_conf_tx, .get_tsf = rt2800_get_tsf, .rfkill_poll = rt2x00mac_rfkill_poll, .ampdu_action = rt2800_ampdu_action, .flush = rt2x00mac_flush, .get_survey = rt2800_get_survey, .get_ringparam = rt2x00mac_get_ringparam, .tx_frames_pending = rt2x00mac_tx_frames_pending, .reconfig_complete = rt2x00mac_reconfig_complete, }; static const struct rt2800_ops rt2800soc_rt2800_ops = { .register_read = rt2x00mmio_register_read, .register_read_lock = rt2x00mmio_register_read, /* same for SoCs */ .register_write = rt2x00mmio_register_write, .register_write_lock = rt2x00mmio_register_write, /* same for SoCs */ .register_multiread = rt2x00mmio_register_multiread, .register_multiwrite = rt2x00mmio_register_multiwrite, .regbusy_read = rt2x00mmio_regbusy_read, .read_eeprom = rt2800soc_read_eeprom, .hwcrypt_disabled = rt2800soc_hwcrypt_disabled, .drv_write_firmware = rt2800soc_write_firmware, .drv_init_registers = rt2800mmio_init_registers, .drv_get_txwi = rt2800mmio_get_txwi, .drv_get_dma_done = rt2800mmio_get_dma_done, }; static const struct rt2x00lib_ops rt2800soc_rt2x00_ops = { .irq_handler = rt2800mmio_interrupt, .txstatus_tasklet = rt2800mmio_txstatus_tasklet, .pretbtt_tasklet = rt2800mmio_pretbtt_tasklet, .tbtt_tasklet = rt2800mmio_tbtt_tasklet, .rxdone_tasklet = rt2800mmio_rxdone_tasklet, .autowake_tasklet = rt2800mmio_autowake_tasklet, .probe_hw = rt2800mmio_probe_hw, .get_firmware_name = rt2800soc_get_firmware_name, .check_firmware = rt2800soc_check_firmware, .load_firmware = rt2800soc_load_firmware, .initialize = rt2x00mmio_initialize, .uninitialize = rt2x00mmio_uninitialize, .get_entry_state = rt2800mmio_get_entry_state, .clear_entry = rt2800mmio_clear_entry, .set_device_state = rt2800soc_set_device_state, .rfkill_poll = rt2800_rfkill_poll, .link_stats = rt2800_link_stats, .reset_tuner = rt2800_reset_tuner, .link_tuner = rt2800_link_tuner, .gain_calibration = rt2800_gain_calibration, .vco_calibration = rt2800_vco_calibration, .watchdog = rt2800_watchdog, .start_queue = rt2800mmio_start_queue, .kick_queue = rt2800mmio_kick_queue, .stop_queue = rt2800mmio_stop_queue, .flush_queue = rt2800mmio_flush_queue, .write_tx_desc = rt2800mmio_write_tx_desc, .write_tx_data = rt2800_write_tx_data, .write_beacon = rt2800_write_beacon, .clear_beacon = rt2800_clear_beacon, .fill_rxdone = rt2800mmio_fill_rxdone, .config_shared_key = rt2800_config_shared_key, .config_pairwise_key = rt2800_config_pairwise_key, .config_filter = rt2800_config_filter, .config_intf = rt2800_config_intf, .config_erp = rt2800_config_erp, .config_ant = rt2800_config_ant, .config = rt2800_config, .pre_reset_hw = rt2800_pre_reset_hw, }; static const struct rt2x00_ops rt2800soc_ops = { .name = KBUILD_MODNAME, .drv_data_size = sizeof(struct rt2800_drv_data), .max_ap_intf = 8, .eeprom_size = EEPROM_SIZE, .rf_size = RF_SIZE, .tx_queues = NUM_TX_QUEUES, .queue_init = rt2800mmio_queue_init, .lib = &rt2800soc_rt2x00_ops, .drv = &rt2800soc_rt2800_ops, .hw = &rt2800soc_mac80211_ops, #ifdef CONFIG_RT2X00_LIB_DEBUGFS .debugfs = &rt2800_rt2x00debug, #endif /* CONFIG_RT2X00_LIB_DEBUGFS */ }; static int rt2800soc_probe(struct platform_device *pdev) { return rt2x00soc_probe(pdev, &rt2800soc_ops); } static struct platform_driver rt2800soc_driver = { .driver = { .name = "rt2800_wmac", .mod_name = KBUILD_MODNAME, }, .probe = rt2800soc_probe, .remove = rt2x00soc_remove, .suspend = rt2x00soc_suspend, .resume = rt2x00soc_resume, }; module_platform_driver(rt2800soc_driver); MODULE_AUTHOR(DRV_PROJECT); MODULE_VERSION(DRV_VERSION); MODULE_DESCRIPTION("Ralink WiSoC Wireless LAN driver."); MODULE_LICENSE("GPL");
linux-master
drivers/net/wireless/ralink/rt2x00/rt2800soc.c
// SPDX-License-Identifier: GPL-2.0-or-later /* Copyright (C) 2004 - 2009 Ivo van Doorn <[email protected]> <http://rt2x00.serialmonkey.com> */ /* Module: rt2x00mmio Abstract: rt2x00 generic mmio device routines. */ #include <linux/dma-mapping.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include "rt2x00.h" #include "rt2x00mmio.h" /* * Register access. */ int rt2x00mmio_regbusy_read(struct rt2x00_dev *rt2x00dev, const unsigned int offset, const struct rt2x00_field32 field, u32 *reg) { unsigned int i; if (!test_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags)) return 0; for (i = 0; i < REGISTER_BUSY_COUNT; i++) { *reg = rt2x00mmio_register_read(rt2x00dev, offset); if (!rt2x00_get_field32(*reg, field)) return 1; udelay(REGISTER_BUSY_DELAY); } printk_once(KERN_ERR "%s() Indirect register access failed: " "offset=0x%.08x, value=0x%.08x\n", __func__, offset, *reg); *reg = ~0; return 0; } EXPORT_SYMBOL_GPL(rt2x00mmio_regbusy_read); bool rt2x00mmio_rxdone(struct rt2x00_dev *rt2x00dev) { struct data_queue *queue = rt2x00dev->rx; struct queue_entry *entry; struct queue_entry_priv_mmio *entry_priv; struct skb_frame_desc *skbdesc; int max_rx = 16; while (--max_rx) { entry = rt2x00queue_get_entry(queue, Q_INDEX); entry_priv = entry->priv_data; if (rt2x00dev->ops->lib->get_entry_state(entry)) break; /* * Fill in desc fields of the skb descriptor */ skbdesc = get_skb_frame_desc(entry->skb); skbdesc->desc = entry_priv->desc; skbdesc->desc_len = entry->queue->desc_size; /* * DMA is already done, notify rt2x00lib that * it finished successfully. */ rt2x00lib_dmastart(entry); rt2x00lib_dmadone(entry); /* * Send the frame to rt2x00lib for further processing. */ rt2x00lib_rxdone(entry, GFP_ATOMIC); } return !max_rx; } EXPORT_SYMBOL_GPL(rt2x00mmio_rxdone); void rt2x00mmio_flush_queue(struct data_queue *queue, bool drop) { unsigned int i; for (i = 0; !rt2x00queue_empty(queue) && i < 10; i++) msleep(50); } EXPORT_SYMBOL_GPL(rt2x00mmio_flush_queue); /* * Device initialization handlers. */ static int rt2x00mmio_alloc_queue_dma(struct rt2x00_dev *rt2x00dev, struct data_queue *queue) { struct queue_entry_priv_mmio *entry_priv; void *addr; dma_addr_t dma; unsigned int i; /* * Allocate DMA memory for descriptor and buffer. */ addr = dma_alloc_coherent(rt2x00dev->dev, queue->limit * queue->desc_size, &dma, GFP_KERNEL); if (!addr) return -ENOMEM; /* * Initialize all queue entries to contain valid addresses. */ for (i = 0; i < queue->limit; i++) { entry_priv = queue->entries[i].priv_data; entry_priv->desc = addr + i * queue->desc_size; entry_priv->desc_dma = dma + i * queue->desc_size; } return 0; } static void rt2x00mmio_free_queue_dma(struct rt2x00_dev *rt2x00dev, struct data_queue *queue) { struct queue_entry_priv_mmio *entry_priv = queue->entries[0].priv_data; if (entry_priv->desc) dma_free_coherent(rt2x00dev->dev, queue->limit * queue->desc_size, entry_priv->desc, entry_priv->desc_dma); entry_priv->desc = NULL; } int rt2x00mmio_initialize(struct rt2x00_dev *rt2x00dev) { struct data_queue *queue; int status; /* * Allocate DMA */ queue_for_each(rt2x00dev, queue) { status = rt2x00mmio_alloc_queue_dma(rt2x00dev, queue); if (status) goto exit; } /* * Register interrupt handler. */ status = request_irq(rt2x00dev->irq, rt2x00dev->ops->lib->irq_handler, IRQF_SHARED, rt2x00dev->name, rt2x00dev); if (status) { rt2x00_err(rt2x00dev, "IRQ %d allocation failed (error %d)\n", rt2x00dev->irq, status); goto exit; } return 0; exit: queue_for_each(rt2x00dev, queue) rt2x00mmio_free_queue_dma(rt2x00dev, queue); return status; } EXPORT_SYMBOL_GPL(rt2x00mmio_initialize); void rt2x00mmio_uninitialize(struct rt2x00_dev *rt2x00dev) { struct data_queue *queue; /* * Free irq line. */ free_irq(rt2x00dev->irq, rt2x00dev); /* * Free DMA */ queue_for_each(rt2x00dev, queue) rt2x00mmio_free_queue_dma(rt2x00dev, queue); } EXPORT_SYMBOL_GPL(rt2x00mmio_uninitialize); /* * rt2x00mmio module information. */ MODULE_AUTHOR(DRV_PROJECT); MODULE_VERSION(DRV_VERSION); MODULE_DESCRIPTION("rt2x00 mmio library"); MODULE_LICENSE("GPL");
linux-master
drivers/net/wireless/ralink/rt2x00/rt2x00mmio.c
// SPDX-License-Identifier: GPL-2.0-or-later /* Copyright (C) 2004 - 2009 Ivo van Doorn <[email protected]> <http://rt2x00.serialmonkey.com> */ /* Module: rt2500pci Abstract: rt2500pci device specific routines. Supported chipsets: RT2560. */ #include <linux/delay.h> #include <linux/etherdevice.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/eeprom_93cx6.h> #include <linux/slab.h> #include "rt2x00.h" #include "rt2x00mmio.h" #include "rt2x00pci.h" #include "rt2500pci.h" /* * Register access. * All access to the CSR registers will go through the methods * rt2x00mmio_register_read and rt2x00mmio_register_write. * BBP and RF register require indirect register access, * and use the CSR registers BBPCSR and RFCSR to achieve this. * These indirect registers work with busy bits, * and we will try maximal REGISTER_BUSY_COUNT times to access * the register while taking a REGISTER_BUSY_DELAY us delay * between each attampt. When the busy bit is still set at that time, * the access attempt is considered to have failed, * and we will print an error. */ #define WAIT_FOR_BBP(__dev, __reg) \ rt2x00mmio_regbusy_read((__dev), BBPCSR, BBPCSR_BUSY, (__reg)) #define WAIT_FOR_RF(__dev, __reg) \ rt2x00mmio_regbusy_read((__dev), RFCSR, RFCSR_BUSY, (__reg)) static void rt2500pci_bbp_write(struct rt2x00_dev *rt2x00dev, const unsigned int word, const u8 value) { u32 reg; mutex_lock(&rt2x00dev->csr_mutex); /* * Wait until the BBP becomes available, afterwards we * can safely write the new data into the register. */ if (WAIT_FOR_BBP(rt2x00dev, &reg)) { reg = 0; rt2x00_set_field32(&reg, BBPCSR_VALUE, value); rt2x00_set_field32(&reg, BBPCSR_REGNUM, word); rt2x00_set_field32(&reg, BBPCSR_BUSY, 1); rt2x00_set_field32(&reg, BBPCSR_WRITE_CONTROL, 1); rt2x00mmio_register_write(rt2x00dev, BBPCSR, reg); } mutex_unlock(&rt2x00dev->csr_mutex); } static u8 rt2500pci_bbp_read(struct rt2x00_dev *rt2x00dev, const unsigned int word) { u32 reg; u8 value; mutex_lock(&rt2x00dev->csr_mutex); /* * Wait until the BBP becomes available, afterwards we * can safely write the read request into the register. * After the data has been written, we wait until hardware * returns the correct value, if at any time the register * doesn't become available in time, reg will be 0xffffffff * which means we return 0xff to the caller. */ if (WAIT_FOR_BBP(rt2x00dev, &reg)) { reg = 0; rt2x00_set_field32(&reg, BBPCSR_REGNUM, word); rt2x00_set_field32(&reg, BBPCSR_BUSY, 1); rt2x00_set_field32(&reg, BBPCSR_WRITE_CONTROL, 0); rt2x00mmio_register_write(rt2x00dev, BBPCSR, reg); WAIT_FOR_BBP(rt2x00dev, &reg); } value = rt2x00_get_field32(reg, BBPCSR_VALUE); mutex_unlock(&rt2x00dev->csr_mutex); return value; } static void rt2500pci_rf_write(struct rt2x00_dev *rt2x00dev, const unsigned int word, const u32 value) { u32 reg; mutex_lock(&rt2x00dev->csr_mutex); /* * Wait until the RF becomes available, afterwards we * can safely write the new data into the register. */ if (WAIT_FOR_RF(rt2x00dev, &reg)) { reg = 0; rt2x00_set_field32(&reg, RFCSR_VALUE, value); rt2x00_set_field32(&reg, RFCSR_NUMBER_OF_BITS, 20); rt2x00_set_field32(&reg, RFCSR_IF_SELECT, 0); rt2x00_set_field32(&reg, RFCSR_BUSY, 1); rt2x00mmio_register_write(rt2x00dev, RFCSR, reg); rt2x00_rf_write(rt2x00dev, word, value); } mutex_unlock(&rt2x00dev->csr_mutex); } static void rt2500pci_eepromregister_read(struct eeprom_93cx6 *eeprom) { struct rt2x00_dev *rt2x00dev = eeprom->data; u32 reg; reg = rt2x00mmio_register_read(rt2x00dev, CSR21); eeprom->reg_data_in = !!rt2x00_get_field32(reg, CSR21_EEPROM_DATA_IN); eeprom->reg_data_out = !!rt2x00_get_field32(reg, CSR21_EEPROM_DATA_OUT); eeprom->reg_data_clock = !!rt2x00_get_field32(reg, CSR21_EEPROM_DATA_CLOCK); eeprom->reg_chip_select = !!rt2x00_get_field32(reg, CSR21_EEPROM_CHIP_SELECT); } static void rt2500pci_eepromregister_write(struct eeprom_93cx6 *eeprom) { struct rt2x00_dev *rt2x00dev = eeprom->data; u32 reg = 0; rt2x00_set_field32(&reg, CSR21_EEPROM_DATA_IN, !!eeprom->reg_data_in); rt2x00_set_field32(&reg, CSR21_EEPROM_DATA_OUT, !!eeprom->reg_data_out); rt2x00_set_field32(&reg, CSR21_EEPROM_DATA_CLOCK, !!eeprom->reg_data_clock); rt2x00_set_field32(&reg, CSR21_EEPROM_CHIP_SELECT, !!eeprom->reg_chip_select); rt2x00mmio_register_write(rt2x00dev, CSR21, reg); } #ifdef CONFIG_RT2X00_LIB_DEBUGFS static const struct rt2x00debug rt2500pci_rt2x00debug = { .owner = THIS_MODULE, .csr = { .read = rt2x00mmio_register_read, .write = rt2x00mmio_register_write, .flags = RT2X00DEBUGFS_OFFSET, .word_base = CSR_REG_BASE, .word_size = sizeof(u32), .word_count = CSR_REG_SIZE / sizeof(u32), }, .eeprom = { .read = rt2x00_eeprom_read, .write = rt2x00_eeprom_write, .word_base = EEPROM_BASE, .word_size = sizeof(u16), .word_count = EEPROM_SIZE / sizeof(u16), }, .bbp = { .read = rt2500pci_bbp_read, .write = rt2500pci_bbp_write, .word_base = BBP_BASE, .word_size = sizeof(u8), .word_count = BBP_SIZE / sizeof(u8), }, .rf = { .read = rt2x00_rf_read, .write = rt2500pci_rf_write, .word_base = RF_BASE, .word_size = sizeof(u32), .word_count = RF_SIZE / sizeof(u32), }, }; #endif /* CONFIG_RT2X00_LIB_DEBUGFS */ static int rt2500pci_rfkill_poll(struct rt2x00_dev *rt2x00dev) { u32 reg; reg = rt2x00mmio_register_read(rt2x00dev, GPIOCSR); return rt2x00_get_field32(reg, GPIOCSR_VAL0); } #ifdef CONFIG_RT2X00_LIB_LEDS static void rt2500pci_brightness_set(struct led_classdev *led_cdev, enum led_brightness brightness) { struct rt2x00_led *led = container_of(led_cdev, struct rt2x00_led, led_dev); unsigned int enabled = brightness != LED_OFF; u32 reg; reg = rt2x00mmio_register_read(led->rt2x00dev, LEDCSR); if (led->type == LED_TYPE_RADIO || led->type == LED_TYPE_ASSOC) rt2x00_set_field32(&reg, LEDCSR_LINK, enabled); else if (led->type == LED_TYPE_ACTIVITY) rt2x00_set_field32(&reg, LEDCSR_ACTIVITY, enabled); rt2x00mmio_register_write(led->rt2x00dev, LEDCSR, reg); } static int rt2500pci_blink_set(struct led_classdev *led_cdev, unsigned long *delay_on, unsigned long *delay_off) { struct rt2x00_led *led = container_of(led_cdev, struct rt2x00_led, led_dev); u32 reg; reg = rt2x00mmio_register_read(led->rt2x00dev, LEDCSR); rt2x00_set_field32(&reg, LEDCSR_ON_PERIOD, *delay_on); rt2x00_set_field32(&reg, LEDCSR_OFF_PERIOD, *delay_off); rt2x00mmio_register_write(led->rt2x00dev, LEDCSR, reg); return 0; } static void rt2500pci_init_led(struct rt2x00_dev *rt2x00dev, struct rt2x00_led *led, enum led_type type) { led->rt2x00dev = rt2x00dev; led->type = type; led->led_dev.brightness_set = rt2500pci_brightness_set; led->led_dev.blink_set = rt2500pci_blink_set; led->flags = LED_INITIALIZED; } #endif /* CONFIG_RT2X00_LIB_LEDS */ /* * Configuration handlers. */ static void rt2500pci_config_filter(struct rt2x00_dev *rt2x00dev, const unsigned int filter_flags) { u32 reg; /* * Start configuration steps. * Note that the version error will always be dropped * and broadcast frames will always be accepted since * there is no filter for it at this time. */ reg = rt2x00mmio_register_read(rt2x00dev, RXCSR0); rt2x00_set_field32(&reg, RXCSR0_DROP_CRC, !(filter_flags & FIF_FCSFAIL)); rt2x00_set_field32(&reg, RXCSR0_DROP_PHYSICAL, !(filter_flags & FIF_PLCPFAIL)); rt2x00_set_field32(&reg, RXCSR0_DROP_CONTROL, !(filter_flags & FIF_CONTROL)); rt2x00_set_field32(&reg, RXCSR0_DROP_NOT_TO_ME, !test_bit(CONFIG_MONITORING, &rt2x00dev->flags)); rt2x00_set_field32(&reg, RXCSR0_DROP_TODS, !test_bit(CONFIG_MONITORING, &rt2x00dev->flags) && !rt2x00dev->intf_ap_count); rt2x00_set_field32(&reg, RXCSR0_DROP_VERSION_ERROR, 1); rt2x00_set_field32(&reg, RXCSR0_DROP_MCAST, !(filter_flags & FIF_ALLMULTI)); rt2x00_set_field32(&reg, RXCSR0_DROP_BCAST, 0); rt2x00mmio_register_write(rt2x00dev, RXCSR0, reg); } static void rt2500pci_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00_intf *intf, struct rt2x00intf_conf *conf, const unsigned int flags) { struct data_queue *queue = rt2x00dev->bcn; unsigned int bcn_preload; u32 reg; if (flags & CONFIG_UPDATE_TYPE) { /* * Enable beacon config */ bcn_preload = PREAMBLE + GET_DURATION(IEEE80211_HEADER, 20); reg = rt2x00mmio_register_read(rt2x00dev, BCNCSR1); rt2x00_set_field32(&reg, BCNCSR1_PRELOAD, bcn_preload); rt2x00_set_field32(&reg, BCNCSR1_BEACON_CWMIN, queue->cw_min); rt2x00mmio_register_write(rt2x00dev, BCNCSR1, reg); /* * Enable synchronisation. */ reg = rt2x00mmio_register_read(rt2x00dev, CSR14); rt2x00_set_field32(&reg, CSR14_TSF_SYNC, conf->sync); rt2x00mmio_register_write(rt2x00dev, CSR14, reg); } if (flags & CONFIG_UPDATE_MAC) rt2x00mmio_register_multiwrite(rt2x00dev, CSR3, conf->mac, sizeof(conf->mac)); if (flags & CONFIG_UPDATE_BSSID) rt2x00mmio_register_multiwrite(rt2x00dev, CSR5, conf->bssid, sizeof(conf->bssid)); } static void rt2500pci_config_erp(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_erp *erp, u32 changed) { int preamble_mask; u32 reg; /* * When short preamble is enabled, we should set bit 0x08 */ if (changed & BSS_CHANGED_ERP_PREAMBLE) { preamble_mask = erp->short_preamble << 3; reg = rt2x00mmio_register_read(rt2x00dev, TXCSR1); rt2x00_set_field32(&reg, TXCSR1_ACK_TIMEOUT, 0x162); rt2x00_set_field32(&reg, TXCSR1_ACK_CONSUME_TIME, 0xa2); rt2x00_set_field32(&reg, TXCSR1_TSF_OFFSET, IEEE80211_HEADER); rt2x00_set_field32(&reg, TXCSR1_AUTORESPONDER, 1); rt2x00mmio_register_write(rt2x00dev, TXCSR1, reg); reg = rt2x00mmio_register_read(rt2x00dev, ARCSR2); rt2x00_set_field32(&reg, ARCSR2_SIGNAL, 0x00); rt2x00_set_field32(&reg, ARCSR2_SERVICE, 0x04); rt2x00_set_field32(&reg, ARCSR2_LENGTH, GET_DURATION(ACK_SIZE, 10)); rt2x00mmio_register_write(rt2x00dev, ARCSR2, reg); reg = rt2x00mmio_register_read(rt2x00dev, ARCSR3); rt2x00_set_field32(&reg, ARCSR3_SIGNAL, 0x01 | preamble_mask); rt2x00_set_field32(&reg, ARCSR3_SERVICE, 0x04); rt2x00_set_field32(&reg, ARCSR2_LENGTH, GET_DURATION(ACK_SIZE, 20)); rt2x00mmio_register_write(rt2x00dev, ARCSR3, reg); reg = rt2x00mmio_register_read(rt2x00dev, ARCSR4); rt2x00_set_field32(&reg, ARCSR4_SIGNAL, 0x02 | preamble_mask); rt2x00_set_field32(&reg, ARCSR4_SERVICE, 0x04); rt2x00_set_field32(&reg, ARCSR2_LENGTH, GET_DURATION(ACK_SIZE, 55)); rt2x00mmio_register_write(rt2x00dev, ARCSR4, reg); reg = rt2x00mmio_register_read(rt2x00dev, ARCSR5); rt2x00_set_field32(&reg, ARCSR5_SIGNAL, 0x03 | preamble_mask); rt2x00_set_field32(&reg, ARCSR5_SERVICE, 0x84); rt2x00_set_field32(&reg, ARCSR2_LENGTH, GET_DURATION(ACK_SIZE, 110)); rt2x00mmio_register_write(rt2x00dev, ARCSR5, reg); } if (changed & BSS_CHANGED_BASIC_RATES) rt2x00mmio_register_write(rt2x00dev, ARCSR1, erp->basic_rates); if (changed & BSS_CHANGED_ERP_SLOT) { reg = rt2x00mmio_register_read(rt2x00dev, CSR11); rt2x00_set_field32(&reg, CSR11_SLOT_TIME, erp->slot_time); rt2x00mmio_register_write(rt2x00dev, CSR11, reg); reg = rt2x00mmio_register_read(rt2x00dev, CSR18); rt2x00_set_field32(&reg, CSR18_SIFS, erp->sifs); rt2x00_set_field32(&reg, CSR18_PIFS, erp->pifs); rt2x00mmio_register_write(rt2x00dev, CSR18, reg); reg = rt2x00mmio_register_read(rt2x00dev, CSR19); rt2x00_set_field32(&reg, CSR19_DIFS, erp->difs); rt2x00_set_field32(&reg, CSR19_EIFS, erp->eifs); rt2x00mmio_register_write(rt2x00dev, CSR19, reg); } if (changed & BSS_CHANGED_BEACON_INT) { reg = rt2x00mmio_register_read(rt2x00dev, CSR12); rt2x00_set_field32(&reg, CSR12_BEACON_INTERVAL, erp->beacon_int * 16); rt2x00_set_field32(&reg, CSR12_CFP_MAX_DURATION, erp->beacon_int * 16); rt2x00mmio_register_write(rt2x00dev, CSR12, reg); } } static void rt2500pci_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant) { u32 reg; u8 r14; u8 r2; /* * We should never come here because rt2x00lib is supposed * to catch this and send us the correct antenna explicitely. */ BUG_ON(ant->rx == ANTENNA_SW_DIVERSITY || ant->tx == ANTENNA_SW_DIVERSITY); reg = rt2x00mmio_register_read(rt2x00dev, BBPCSR1); r14 = rt2500pci_bbp_read(rt2x00dev, 14); r2 = rt2500pci_bbp_read(rt2x00dev, 2); /* * Configure the TX antenna. */ switch (ant->tx) { case ANTENNA_A: rt2x00_set_field8(&r2, BBP_R2_TX_ANTENNA, 0); rt2x00_set_field32(&reg, BBPCSR1_CCK, 0); rt2x00_set_field32(&reg, BBPCSR1_OFDM, 0); break; case ANTENNA_B: default: rt2x00_set_field8(&r2, BBP_R2_TX_ANTENNA, 2); rt2x00_set_field32(&reg, BBPCSR1_CCK, 2); rt2x00_set_field32(&reg, BBPCSR1_OFDM, 2); break; } /* * Configure the RX antenna. */ switch (ant->rx) { case ANTENNA_A: rt2x00_set_field8(&r14, BBP_R14_RX_ANTENNA, 0); break; case ANTENNA_B: default: rt2x00_set_field8(&r14, BBP_R14_RX_ANTENNA, 2); break; } /* * RT2525E and RT5222 need to flip TX I/Q */ if (rt2x00_rf(rt2x00dev, RF2525E) || rt2x00_rf(rt2x00dev, RF5222)) { rt2x00_set_field8(&r2, BBP_R2_TX_IQ_FLIP, 1); rt2x00_set_field32(&reg, BBPCSR1_CCK_FLIP, 1); rt2x00_set_field32(&reg, BBPCSR1_OFDM_FLIP, 1); /* * RT2525E does not need RX I/Q Flip. */ if (rt2x00_rf(rt2x00dev, RF2525E)) rt2x00_set_field8(&r14, BBP_R14_RX_IQ_FLIP, 0); } else { rt2x00_set_field32(&reg, BBPCSR1_CCK_FLIP, 0); rt2x00_set_field32(&reg, BBPCSR1_OFDM_FLIP, 0); } rt2x00mmio_register_write(rt2x00dev, BBPCSR1, reg); rt2500pci_bbp_write(rt2x00dev, 14, r14); rt2500pci_bbp_write(rt2x00dev, 2, r2); } static void rt2500pci_config_channel(struct rt2x00_dev *rt2x00dev, struct rf_channel *rf, const int txpower) { u8 r70; /* * Set TXpower. */ rt2x00_set_field32(&rf->rf3, RF3_TXPOWER, TXPOWER_TO_DEV(txpower)); /* * Switch on tuning bits. * For RT2523 devices we do not need to update the R1 register. */ if (!rt2x00_rf(rt2x00dev, RF2523)) rt2x00_set_field32(&rf->rf1, RF1_TUNER, 1); rt2x00_set_field32(&rf->rf3, RF3_TUNER, 1); /* * For RT2525 we should first set the channel to half band higher. */ if (rt2x00_rf(rt2x00dev, RF2525)) { static const u32 vals[] = { 0x00080cbe, 0x00080d02, 0x00080d06, 0x00080d0a, 0x00080d0e, 0x00080d12, 0x00080d16, 0x00080d1a, 0x00080d1e, 0x00080d22, 0x00080d26, 0x00080d2a, 0x00080d2e, 0x00080d3a }; rt2500pci_rf_write(rt2x00dev, 1, rf->rf1); rt2500pci_rf_write(rt2x00dev, 2, vals[rf->channel - 1]); rt2500pci_rf_write(rt2x00dev, 3, rf->rf3); if (rf->rf4) rt2500pci_rf_write(rt2x00dev, 4, rf->rf4); } rt2500pci_rf_write(rt2x00dev, 1, rf->rf1); rt2500pci_rf_write(rt2x00dev, 2, rf->rf2); rt2500pci_rf_write(rt2x00dev, 3, rf->rf3); if (rf->rf4) rt2500pci_rf_write(rt2x00dev, 4, rf->rf4); /* * Channel 14 requires the Japan filter bit to be set. */ r70 = 0x46; rt2x00_set_field8(&r70, BBP_R70_JAPAN_FILTER, rf->channel == 14); rt2500pci_bbp_write(rt2x00dev, 70, r70); msleep(1); /* * Switch off tuning bits. * For RT2523 devices we do not need to update the R1 register. */ if (!rt2x00_rf(rt2x00dev, RF2523)) { rt2x00_set_field32(&rf->rf1, RF1_TUNER, 0); rt2500pci_rf_write(rt2x00dev, 1, rf->rf1); } rt2x00_set_field32(&rf->rf3, RF3_TUNER, 0); rt2500pci_rf_write(rt2x00dev, 3, rf->rf3); /* * Clear false CRC during channel switch. */ rf->rf1 = rt2x00mmio_register_read(rt2x00dev, CNT0); } static void rt2500pci_config_txpower(struct rt2x00_dev *rt2x00dev, const int txpower) { u32 rf3; rf3 = rt2x00_rf_read(rt2x00dev, 3); rt2x00_set_field32(&rf3, RF3_TXPOWER, TXPOWER_TO_DEV(txpower)); rt2500pci_rf_write(rt2x00dev, 3, rf3); } static void rt2500pci_config_retry_limit(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_conf *libconf) { u32 reg; reg = rt2x00mmio_register_read(rt2x00dev, CSR11); rt2x00_set_field32(&reg, CSR11_LONG_RETRY, libconf->conf->long_frame_max_tx_count); rt2x00_set_field32(&reg, CSR11_SHORT_RETRY, libconf->conf->short_frame_max_tx_count); rt2x00mmio_register_write(rt2x00dev, CSR11, reg); } static void rt2500pci_config_ps(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_conf *libconf) { enum dev_state state = (libconf->conf->flags & IEEE80211_CONF_PS) ? STATE_SLEEP : STATE_AWAKE; u32 reg; if (state == STATE_SLEEP) { reg = rt2x00mmio_register_read(rt2x00dev, CSR20); rt2x00_set_field32(&reg, CSR20_DELAY_AFTER_TBCN, (rt2x00dev->beacon_int - 20) * 16); rt2x00_set_field32(&reg, CSR20_TBCN_BEFORE_WAKEUP, libconf->conf->listen_interval - 1); /* We must first disable autowake before it can be enabled */ rt2x00_set_field32(&reg, CSR20_AUTOWAKE, 0); rt2x00mmio_register_write(rt2x00dev, CSR20, reg); rt2x00_set_field32(&reg, CSR20_AUTOWAKE, 1); rt2x00mmio_register_write(rt2x00dev, CSR20, reg); } else { reg = rt2x00mmio_register_read(rt2x00dev, CSR20); rt2x00_set_field32(&reg, CSR20_AUTOWAKE, 0); rt2x00mmio_register_write(rt2x00dev, CSR20, reg); } rt2x00dev->ops->lib->set_device_state(rt2x00dev, state); } static void rt2500pci_config(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_conf *libconf, const unsigned int flags) { if (flags & IEEE80211_CONF_CHANGE_CHANNEL) rt2500pci_config_channel(rt2x00dev, &libconf->rf, libconf->conf->power_level); if ((flags & IEEE80211_CONF_CHANGE_POWER) && !(flags & IEEE80211_CONF_CHANGE_CHANNEL)) rt2500pci_config_txpower(rt2x00dev, libconf->conf->power_level); if (flags & IEEE80211_CONF_CHANGE_RETRY_LIMITS) rt2500pci_config_retry_limit(rt2x00dev, libconf); if (flags & IEEE80211_CONF_CHANGE_PS) rt2500pci_config_ps(rt2x00dev, libconf); } /* * Link tuning */ static void rt2500pci_link_stats(struct rt2x00_dev *rt2x00dev, struct link_qual *qual) { u32 reg; /* * Update FCS error count from register. */ reg = rt2x00mmio_register_read(rt2x00dev, CNT0); qual->rx_failed = rt2x00_get_field32(reg, CNT0_FCS_ERROR); /* * Update False CCA count from register. */ reg = rt2x00mmio_register_read(rt2x00dev, CNT3); qual->false_cca = rt2x00_get_field32(reg, CNT3_FALSE_CCA); } static inline void rt2500pci_set_vgc(struct rt2x00_dev *rt2x00dev, struct link_qual *qual, u8 vgc_level) { if (qual->vgc_level_reg != vgc_level) { rt2500pci_bbp_write(rt2x00dev, 17, vgc_level); qual->vgc_level = vgc_level; qual->vgc_level_reg = vgc_level; } } static void rt2500pci_reset_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual) { rt2500pci_set_vgc(rt2x00dev, qual, 0x48); } static void rt2500pci_link_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual, const u32 count) { /* * To prevent collisions with MAC ASIC on chipsets * up to version C the link tuning should halt after 20 * seconds while being associated. */ if (rt2x00_rev(rt2x00dev) < RT2560_VERSION_D && rt2x00dev->intf_associated && count > 20) return; /* * Chipset versions C and lower should directly continue * to the dynamic CCA tuning. Chipset version D and higher * should go straight to dynamic CCA tuning when they * are not associated. */ if (rt2x00_rev(rt2x00dev) < RT2560_VERSION_D || !rt2x00dev->intf_associated) goto dynamic_cca_tune; /* * A too low RSSI will cause too much false CCA which will * then corrupt the R17 tuning. To remidy this the tuning should * be stopped (While making sure the R17 value will not exceed limits) */ if (qual->rssi < -80 && count > 20) { if (qual->vgc_level_reg >= 0x41) rt2500pci_set_vgc(rt2x00dev, qual, qual->vgc_level); return; } /* * Special big-R17 for short distance */ if (qual->rssi >= -58) { rt2500pci_set_vgc(rt2x00dev, qual, 0x50); return; } /* * Special mid-R17 for middle distance */ if (qual->rssi >= -74) { rt2500pci_set_vgc(rt2x00dev, qual, 0x41); return; } /* * Leave short or middle distance condition, restore r17 * to the dynamic tuning range. */ if (qual->vgc_level_reg >= 0x41) { rt2500pci_set_vgc(rt2x00dev, qual, qual->vgc_level); return; } dynamic_cca_tune: /* * R17 is inside the dynamic tuning range, * start tuning the link based on the false cca counter. */ if (qual->false_cca > 512 && qual->vgc_level_reg < 0x40) rt2500pci_set_vgc(rt2x00dev, qual, ++qual->vgc_level_reg); else if (qual->false_cca < 100 && qual->vgc_level_reg > 0x32) rt2500pci_set_vgc(rt2x00dev, qual, --qual->vgc_level_reg); } /* * Queue handlers. */ static void rt2500pci_start_queue(struct data_queue *queue) { struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; u32 reg; switch (queue->qid) { case QID_RX: reg = rt2x00mmio_register_read(rt2x00dev, RXCSR0); rt2x00_set_field32(&reg, RXCSR0_DISABLE_RX, 0); rt2x00mmio_register_write(rt2x00dev, RXCSR0, reg); break; case QID_BEACON: reg = rt2x00mmio_register_read(rt2x00dev, CSR14); rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 1); rt2x00_set_field32(&reg, CSR14_TBCN, 1); rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 1); rt2x00mmio_register_write(rt2x00dev, CSR14, reg); break; default: break; } } static void rt2500pci_kick_queue(struct data_queue *queue) { struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; u32 reg; switch (queue->qid) { case QID_AC_VO: reg = rt2x00mmio_register_read(rt2x00dev, TXCSR0); rt2x00_set_field32(&reg, TXCSR0_KICK_PRIO, 1); rt2x00mmio_register_write(rt2x00dev, TXCSR0, reg); break; case QID_AC_VI: reg = rt2x00mmio_register_read(rt2x00dev, TXCSR0); rt2x00_set_field32(&reg, TXCSR0_KICK_TX, 1); rt2x00mmio_register_write(rt2x00dev, TXCSR0, reg); break; case QID_ATIM: reg = rt2x00mmio_register_read(rt2x00dev, TXCSR0); rt2x00_set_field32(&reg, TXCSR0_KICK_ATIM, 1); rt2x00mmio_register_write(rt2x00dev, TXCSR0, reg); break; default: break; } } static void rt2500pci_stop_queue(struct data_queue *queue) { struct rt2x00_dev *rt2x00dev = queue->rt2x00dev; u32 reg; switch (queue->qid) { case QID_AC_VO: case QID_AC_VI: case QID_ATIM: reg = rt2x00mmio_register_read(rt2x00dev, TXCSR0); rt2x00_set_field32(&reg, TXCSR0_ABORT, 1); rt2x00mmio_register_write(rt2x00dev, TXCSR0, reg); break; case QID_RX: reg = rt2x00mmio_register_read(rt2x00dev, RXCSR0); rt2x00_set_field32(&reg, RXCSR0_DISABLE_RX, 1); rt2x00mmio_register_write(rt2x00dev, RXCSR0, reg); break; case QID_BEACON: reg = rt2x00mmio_register_read(rt2x00dev, CSR14); rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 0); rt2x00_set_field32(&reg, CSR14_TBCN, 0); rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 0); rt2x00mmio_register_write(rt2x00dev, CSR14, reg); /* * Wait for possibly running tbtt tasklets. */ tasklet_kill(&rt2x00dev->tbtt_tasklet); break; default: break; } } /* * Initialization functions. */ static bool rt2500pci_get_entry_state(struct queue_entry *entry) { struct queue_entry_priv_mmio *entry_priv = entry->priv_data; u32 word; if (entry->queue->qid == QID_RX) { word = rt2x00_desc_read(entry_priv->desc, 0); return rt2x00_get_field32(word, RXD_W0_OWNER_NIC); } else { word = rt2x00_desc_read(entry_priv->desc, 0); return (rt2x00_get_field32(word, TXD_W0_OWNER_NIC) || rt2x00_get_field32(word, TXD_W0_VALID)); } } static void rt2500pci_clear_entry(struct queue_entry *entry) { struct queue_entry_priv_mmio *entry_priv = entry->priv_data; struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); u32 word; if (entry->queue->qid == QID_RX) { word = rt2x00_desc_read(entry_priv->desc, 1); rt2x00_set_field32(&word, RXD_W1_BUFFER_ADDRESS, skbdesc->skb_dma); rt2x00_desc_write(entry_priv->desc, 1, word); word = rt2x00_desc_read(entry_priv->desc, 0); rt2x00_set_field32(&word, RXD_W0_OWNER_NIC, 1); rt2x00_desc_write(entry_priv->desc, 0, word); } else { word = rt2x00_desc_read(entry_priv->desc, 0); rt2x00_set_field32(&word, TXD_W0_VALID, 0); rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 0); rt2x00_desc_write(entry_priv->desc, 0, word); } } static int rt2500pci_init_queues(struct rt2x00_dev *rt2x00dev) { struct queue_entry_priv_mmio *entry_priv; u32 reg; /* * Initialize registers. */ reg = rt2x00mmio_register_read(rt2x00dev, TXCSR2); rt2x00_set_field32(&reg, TXCSR2_TXD_SIZE, rt2x00dev->tx[0].desc_size); rt2x00_set_field32(&reg, TXCSR2_NUM_TXD, rt2x00dev->tx[1].limit); rt2x00_set_field32(&reg, TXCSR2_NUM_ATIM, rt2x00dev->atim->limit); rt2x00_set_field32(&reg, TXCSR2_NUM_PRIO, rt2x00dev->tx[0].limit); rt2x00mmio_register_write(rt2x00dev, TXCSR2, reg); entry_priv = rt2x00dev->tx[1].entries[0].priv_data; reg = rt2x00mmio_register_read(rt2x00dev, TXCSR3); rt2x00_set_field32(&reg, TXCSR3_TX_RING_REGISTER, entry_priv->desc_dma); rt2x00mmio_register_write(rt2x00dev, TXCSR3, reg); entry_priv = rt2x00dev->tx[0].entries[0].priv_data; reg = rt2x00mmio_register_read(rt2x00dev, TXCSR5); rt2x00_set_field32(&reg, TXCSR5_PRIO_RING_REGISTER, entry_priv->desc_dma); rt2x00mmio_register_write(rt2x00dev, TXCSR5, reg); entry_priv = rt2x00dev->atim->entries[0].priv_data; reg = rt2x00mmio_register_read(rt2x00dev, TXCSR4); rt2x00_set_field32(&reg, TXCSR4_ATIM_RING_REGISTER, entry_priv->desc_dma); rt2x00mmio_register_write(rt2x00dev, TXCSR4, reg); entry_priv = rt2x00dev->bcn->entries[0].priv_data; reg = rt2x00mmio_register_read(rt2x00dev, TXCSR6); rt2x00_set_field32(&reg, TXCSR6_BEACON_RING_REGISTER, entry_priv->desc_dma); rt2x00mmio_register_write(rt2x00dev, TXCSR6, reg); reg = rt2x00mmio_register_read(rt2x00dev, RXCSR1); rt2x00_set_field32(&reg, RXCSR1_RXD_SIZE, rt2x00dev->rx->desc_size); rt2x00_set_field32(&reg, RXCSR1_NUM_RXD, rt2x00dev->rx->limit); rt2x00mmio_register_write(rt2x00dev, RXCSR1, reg); entry_priv = rt2x00dev->rx->entries[0].priv_data; reg = rt2x00mmio_register_read(rt2x00dev, RXCSR2); rt2x00_set_field32(&reg, RXCSR2_RX_RING_REGISTER, entry_priv->desc_dma); rt2x00mmio_register_write(rt2x00dev, RXCSR2, reg); return 0; } static int rt2500pci_init_registers(struct rt2x00_dev *rt2x00dev) { u32 reg; rt2x00mmio_register_write(rt2x00dev, PSCSR0, 0x00020002); rt2x00mmio_register_write(rt2x00dev, PSCSR1, 0x00000002); rt2x00mmio_register_write(rt2x00dev, PSCSR2, 0x00020002); rt2x00mmio_register_write(rt2x00dev, PSCSR3, 0x00000002); reg = rt2x00mmio_register_read(rt2x00dev, TIMECSR); rt2x00_set_field32(&reg, TIMECSR_US_COUNT, 33); rt2x00_set_field32(&reg, TIMECSR_US_64_COUNT, 63); rt2x00_set_field32(&reg, TIMECSR_BEACON_EXPECT, 0); rt2x00mmio_register_write(rt2x00dev, TIMECSR, reg); reg = rt2x00mmio_register_read(rt2x00dev, CSR9); rt2x00_set_field32(&reg, CSR9_MAX_FRAME_UNIT, rt2x00dev->rx->data_size / 128); rt2x00mmio_register_write(rt2x00dev, CSR9, reg); /* * Always use CWmin and CWmax set in descriptor. */ reg = rt2x00mmio_register_read(rt2x00dev, CSR11); rt2x00_set_field32(&reg, CSR11_CW_SELECT, 0); rt2x00mmio_register_write(rt2x00dev, CSR11, reg); reg = rt2x00mmio_register_read(rt2x00dev, CSR14); rt2x00_set_field32(&reg, CSR14_TSF_COUNT, 0); rt2x00_set_field32(&reg, CSR14_TSF_SYNC, 0); rt2x00_set_field32(&reg, CSR14_TBCN, 0); rt2x00_set_field32(&reg, CSR14_TCFP, 0); rt2x00_set_field32(&reg, CSR14_TATIMW, 0); rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 0); rt2x00_set_field32(&reg, CSR14_CFP_COUNT_PRELOAD, 0); rt2x00_set_field32(&reg, CSR14_TBCM_PRELOAD, 0); rt2x00mmio_register_write(rt2x00dev, CSR14, reg); rt2x00mmio_register_write(rt2x00dev, CNT3, 0); reg = rt2x00mmio_register_read(rt2x00dev, TXCSR8); rt2x00_set_field32(&reg, TXCSR8_BBP_ID0, 10); rt2x00_set_field32(&reg, TXCSR8_BBP_ID0_VALID, 1); rt2x00_set_field32(&reg, TXCSR8_BBP_ID1, 11); rt2x00_set_field32(&reg, TXCSR8_BBP_ID1_VALID, 1); rt2x00_set_field32(&reg, TXCSR8_BBP_ID2, 13); rt2x00_set_field32(&reg, TXCSR8_BBP_ID2_VALID, 1); rt2x00_set_field32(&reg, TXCSR8_BBP_ID3, 12); rt2x00_set_field32(&reg, TXCSR8_BBP_ID3_VALID, 1); rt2x00mmio_register_write(rt2x00dev, TXCSR8, reg); reg = rt2x00mmio_register_read(rt2x00dev, ARTCSR0); rt2x00_set_field32(&reg, ARTCSR0_ACK_CTS_1MBS, 112); rt2x00_set_field32(&reg, ARTCSR0_ACK_CTS_2MBS, 56); rt2x00_set_field32(&reg, ARTCSR0_ACK_CTS_5_5MBS, 20); rt2x00_set_field32(&reg, ARTCSR0_ACK_CTS_11MBS, 10); rt2x00mmio_register_write(rt2x00dev, ARTCSR0, reg); reg = rt2x00mmio_register_read(rt2x00dev, ARTCSR1); rt2x00_set_field32(&reg, ARTCSR1_ACK_CTS_6MBS, 45); rt2x00_set_field32(&reg, ARTCSR1_ACK_CTS_9MBS, 37); rt2x00_set_field32(&reg, ARTCSR1_ACK_CTS_12MBS, 33); rt2x00_set_field32(&reg, ARTCSR1_ACK_CTS_18MBS, 29); rt2x00mmio_register_write(rt2x00dev, ARTCSR1, reg); reg = rt2x00mmio_register_read(rt2x00dev, ARTCSR2); rt2x00_set_field32(&reg, ARTCSR2_ACK_CTS_24MBS, 29); rt2x00_set_field32(&reg, ARTCSR2_ACK_CTS_36MBS, 25); rt2x00_set_field32(&reg, ARTCSR2_ACK_CTS_48MBS, 25); rt2x00_set_field32(&reg, ARTCSR2_ACK_CTS_54MBS, 25); rt2x00mmio_register_write(rt2x00dev, ARTCSR2, reg); reg = rt2x00mmio_register_read(rt2x00dev, RXCSR3); rt2x00_set_field32(&reg, RXCSR3_BBP_ID0, 47); /* CCK Signal */ rt2x00_set_field32(&reg, RXCSR3_BBP_ID0_VALID, 1); rt2x00_set_field32(&reg, RXCSR3_BBP_ID1, 51); /* Rssi */ rt2x00_set_field32(&reg, RXCSR3_BBP_ID1_VALID, 1); rt2x00_set_field32(&reg, RXCSR3_BBP_ID2, 42); /* OFDM Rate */ rt2x00_set_field32(&reg, RXCSR3_BBP_ID2_VALID, 1); rt2x00_set_field32(&reg, RXCSR3_BBP_ID3, 51); /* RSSI */ rt2x00_set_field32(&reg, RXCSR3_BBP_ID3_VALID, 1); rt2x00mmio_register_write(rt2x00dev, RXCSR3, reg); reg = rt2x00mmio_register_read(rt2x00dev, PCICSR); rt2x00_set_field32(&reg, PCICSR_BIG_ENDIAN, 0); rt2x00_set_field32(&reg, PCICSR_RX_TRESHOLD, 0); rt2x00_set_field32(&reg, PCICSR_TX_TRESHOLD, 3); rt2x00_set_field32(&reg, PCICSR_BURST_LENTH, 1); rt2x00_set_field32(&reg, PCICSR_ENABLE_CLK, 1); rt2x00_set_field32(&reg, PCICSR_READ_MULTIPLE, 1); rt2x00_set_field32(&reg, PCICSR_WRITE_INVALID, 1); rt2x00mmio_register_write(rt2x00dev, PCICSR, reg); rt2x00mmio_register_write(rt2x00dev, PWRCSR0, 0x3f3b3100); rt2x00mmio_register_write(rt2x00dev, GPIOCSR, 0x0000ff00); rt2x00mmio_register_write(rt2x00dev, TESTCSR, 0x000000f0); if (rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_AWAKE)) return -EBUSY; rt2x00mmio_register_write(rt2x00dev, MACCSR0, 0x00213223); rt2x00mmio_register_write(rt2x00dev, MACCSR1, 0x00235518); reg = rt2x00mmio_register_read(rt2x00dev, MACCSR2); rt2x00_set_field32(&reg, MACCSR2_DELAY, 64); rt2x00mmio_register_write(rt2x00dev, MACCSR2, reg); reg = rt2x00mmio_register_read(rt2x00dev, RALINKCSR); rt2x00_set_field32(&reg, RALINKCSR_AR_BBP_DATA0, 17); rt2x00_set_field32(&reg, RALINKCSR_AR_BBP_ID0, 26); rt2x00_set_field32(&reg, RALINKCSR_AR_BBP_VALID0, 1); rt2x00_set_field32(&reg, RALINKCSR_AR_BBP_DATA1, 0); rt2x00_set_field32(&reg, RALINKCSR_AR_BBP_ID1, 26); rt2x00_set_field32(&reg, RALINKCSR_AR_BBP_VALID1, 1); rt2x00mmio_register_write(rt2x00dev, RALINKCSR, reg); rt2x00mmio_register_write(rt2x00dev, BBPCSR1, 0x82188200); rt2x00mmio_register_write(rt2x00dev, TXACKCSR0, 0x00000020); reg = rt2x00mmio_register_read(rt2x00dev, CSR1); rt2x00_set_field32(&reg, CSR1_SOFT_RESET, 1); rt2x00_set_field32(&reg, CSR1_BBP_RESET, 0); rt2x00_set_field32(&reg, CSR1_HOST_READY, 0); rt2x00mmio_register_write(rt2x00dev, CSR1, reg); reg = rt2x00mmio_register_read(rt2x00dev, CSR1); rt2x00_set_field32(&reg, CSR1_SOFT_RESET, 0); rt2x00_set_field32(&reg, CSR1_HOST_READY, 1); rt2x00mmio_register_write(rt2x00dev, CSR1, reg); /* * We must clear the FCS and FIFO error count. * These registers are cleared on read, * so we may pass a useless variable to store the value. */ reg = rt2x00mmio_register_read(rt2x00dev, CNT0); reg = rt2x00mmio_register_read(rt2x00dev, CNT4); return 0; } static int rt2500pci_wait_bbp_ready(struct rt2x00_dev *rt2x00dev) { unsigned int i; u8 value; for (i = 0; i < REGISTER_BUSY_COUNT; i++) { value = rt2500pci_bbp_read(rt2x00dev, 0); if ((value != 0xff) && (value != 0x00)) return 0; udelay(REGISTER_BUSY_DELAY); } rt2x00_err(rt2x00dev, "BBP register access failed, aborting\n"); return -EACCES; } static int rt2500pci_init_bbp(struct rt2x00_dev *rt2x00dev) { unsigned int i; u16 eeprom; u8 reg_id; u8 value; if (unlikely(rt2500pci_wait_bbp_ready(rt2x00dev))) return -EACCES; rt2500pci_bbp_write(rt2x00dev, 3, 0x02); rt2500pci_bbp_write(rt2x00dev, 4, 0x19); rt2500pci_bbp_write(rt2x00dev, 14, 0x1c); rt2500pci_bbp_write(rt2x00dev, 15, 0x30); rt2500pci_bbp_write(rt2x00dev, 16, 0xac); rt2500pci_bbp_write(rt2x00dev, 18, 0x18); rt2500pci_bbp_write(rt2x00dev, 19, 0xff); rt2500pci_bbp_write(rt2x00dev, 20, 0x1e); rt2500pci_bbp_write(rt2x00dev, 21, 0x08); rt2500pci_bbp_write(rt2x00dev, 22, 0x08); rt2500pci_bbp_write(rt2x00dev, 23, 0x08); rt2500pci_bbp_write(rt2x00dev, 24, 0x70); rt2500pci_bbp_write(rt2x00dev, 25, 0x40); rt2500pci_bbp_write(rt2x00dev, 26, 0x08); rt2500pci_bbp_write(rt2x00dev, 27, 0x23); rt2500pci_bbp_write(rt2x00dev, 30, 0x10); rt2500pci_bbp_write(rt2x00dev, 31, 0x2b); rt2500pci_bbp_write(rt2x00dev, 32, 0xb9); rt2500pci_bbp_write(rt2x00dev, 34, 0x12); rt2500pci_bbp_write(rt2x00dev, 35, 0x50); rt2500pci_bbp_write(rt2x00dev, 39, 0xc4); rt2500pci_bbp_write(rt2x00dev, 40, 0x02); rt2500pci_bbp_write(rt2x00dev, 41, 0x60); rt2500pci_bbp_write(rt2x00dev, 53, 0x10); rt2500pci_bbp_write(rt2x00dev, 54, 0x18); rt2500pci_bbp_write(rt2x00dev, 56, 0x08); rt2500pci_bbp_write(rt2x00dev, 57, 0x10); rt2500pci_bbp_write(rt2x00dev, 58, 0x08); rt2500pci_bbp_write(rt2x00dev, 61, 0x6d); rt2500pci_bbp_write(rt2x00dev, 62, 0x10); for (i = 0; i < EEPROM_BBP_SIZE; i++) { eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i); if (eeprom != 0xffff && eeprom != 0x0000) { reg_id = rt2x00_get_field16(eeprom, EEPROM_BBP_REG_ID); value = rt2x00_get_field16(eeprom, EEPROM_BBP_VALUE); rt2500pci_bbp_write(rt2x00dev, reg_id, value); } } return 0; } /* * Device state switch handlers. */ static void rt2500pci_toggle_irq(struct rt2x00_dev *rt2x00dev, enum dev_state state) { int mask = (state == STATE_RADIO_IRQ_OFF); u32 reg; unsigned long flags; /* * When interrupts are being enabled, the interrupt registers * should clear the register to assure a clean state. */ if (state == STATE_RADIO_IRQ_ON) { reg = rt2x00mmio_register_read(rt2x00dev, CSR7); rt2x00mmio_register_write(rt2x00dev, CSR7, reg); } /* * Only toggle the interrupts bits we are going to use. * Non-checked interrupt bits are disabled by default. */ spin_lock_irqsave(&rt2x00dev->irqmask_lock, flags); reg = rt2x00mmio_register_read(rt2x00dev, CSR8); rt2x00_set_field32(&reg, CSR8_TBCN_EXPIRE, mask); rt2x00_set_field32(&reg, CSR8_TXDONE_TXRING, mask); rt2x00_set_field32(&reg, CSR8_TXDONE_ATIMRING, mask); rt2x00_set_field32(&reg, CSR8_TXDONE_PRIORING, mask); rt2x00_set_field32(&reg, CSR8_RXDONE, mask); rt2x00mmio_register_write(rt2x00dev, CSR8, reg); spin_unlock_irqrestore(&rt2x00dev->irqmask_lock, flags); if (state == STATE_RADIO_IRQ_OFF) { /* * Ensure that all tasklets are finished. */ tasklet_kill(&rt2x00dev->txstatus_tasklet); tasklet_kill(&rt2x00dev->rxdone_tasklet); tasklet_kill(&rt2x00dev->tbtt_tasklet); } } static int rt2500pci_enable_radio(struct rt2x00_dev *rt2x00dev) { /* * Initialize all registers. */ if (unlikely(rt2500pci_init_queues(rt2x00dev) || rt2500pci_init_registers(rt2x00dev) || rt2500pci_init_bbp(rt2x00dev))) return -EIO; return 0; } static void rt2500pci_disable_radio(struct rt2x00_dev *rt2x00dev) { /* * Disable power */ rt2x00mmio_register_write(rt2x00dev, PWRCSR0, 0); } static int rt2500pci_set_state(struct rt2x00_dev *rt2x00dev, enum dev_state state) { u32 reg, reg2; unsigned int i; bool put_to_sleep; u8 bbp_state; u8 rf_state; put_to_sleep = (state != STATE_AWAKE); reg = rt2x00mmio_register_read(rt2x00dev, PWRCSR1); rt2x00_set_field32(&reg, PWRCSR1_SET_STATE, 1); rt2x00_set_field32(&reg, PWRCSR1_BBP_DESIRE_STATE, state); rt2x00_set_field32(&reg, PWRCSR1_RF_DESIRE_STATE, state); rt2x00_set_field32(&reg, PWRCSR1_PUT_TO_SLEEP, put_to_sleep); rt2x00mmio_register_write(rt2x00dev, PWRCSR1, reg); /* * Device is not guaranteed to be in the requested state yet. * We must wait until the register indicates that the * device has entered the correct state. */ for (i = 0; i < REGISTER_BUSY_COUNT; i++) { reg2 = rt2x00mmio_register_read(rt2x00dev, PWRCSR1); bbp_state = rt2x00_get_field32(reg2, PWRCSR1_BBP_CURR_STATE); rf_state = rt2x00_get_field32(reg2, PWRCSR1_RF_CURR_STATE); if (bbp_state == state && rf_state == state) return 0; rt2x00mmio_register_write(rt2x00dev, PWRCSR1, reg); msleep(10); } return -EBUSY; } static int rt2500pci_set_device_state(struct rt2x00_dev *rt2x00dev, enum dev_state state) { int retval = 0; switch (state) { case STATE_RADIO_ON: retval = rt2500pci_enable_radio(rt2x00dev); break; case STATE_RADIO_OFF: rt2500pci_disable_radio(rt2x00dev); break; case STATE_RADIO_IRQ_ON: case STATE_RADIO_IRQ_OFF: rt2500pci_toggle_irq(rt2x00dev, state); break; case STATE_DEEP_SLEEP: case STATE_SLEEP: case STATE_STANDBY: case STATE_AWAKE: retval = rt2500pci_set_state(rt2x00dev, state); break; default: retval = -ENOTSUPP; break; } if (unlikely(retval)) rt2x00_err(rt2x00dev, "Device failed to enter state %d (%d)\n", state, retval); return retval; } /* * TX descriptor initialization */ static void rt2500pci_write_tx_desc(struct queue_entry *entry, struct txentry_desc *txdesc) { struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb); struct queue_entry_priv_mmio *entry_priv = entry->priv_data; __le32 *txd = entry_priv->desc; u32 word; /* * Start writing the descriptor words. */ word = rt2x00_desc_read(txd, 1); rt2x00_set_field32(&word, TXD_W1_BUFFER_ADDRESS, skbdesc->skb_dma); rt2x00_desc_write(txd, 1, word); word = rt2x00_desc_read(txd, 2); rt2x00_set_field32(&word, TXD_W2_IV_OFFSET, IEEE80211_HEADER); rt2x00_set_field32(&word, TXD_W2_AIFS, entry->queue->aifs); rt2x00_set_field32(&word, TXD_W2_CWMIN, entry->queue->cw_min); rt2x00_set_field32(&word, TXD_W2_CWMAX, entry->queue->cw_max); rt2x00_desc_write(txd, 2, word); word = rt2x00_desc_read(txd, 3); rt2x00_set_field32(&word, TXD_W3_PLCP_SIGNAL, txdesc->u.plcp.signal); rt2x00_set_field32(&word, TXD_W3_PLCP_SERVICE, txdesc->u.plcp.service); rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_LOW, txdesc->u.plcp.length_low); rt2x00_set_field32(&word, TXD_W3_PLCP_LENGTH_HIGH, txdesc->u.plcp.length_high); rt2x00_desc_write(txd, 3, word); word = rt2x00_desc_read(txd, 10); rt2x00_set_field32(&word, TXD_W10_RTS, test_bit(ENTRY_TXD_RTS_FRAME, &txdesc->flags)); rt2x00_desc_write(txd, 10, word); /* * Writing TXD word 0 must the last to prevent a race condition with * the device, whereby the device may take hold of the TXD before we * finished updating it. */ word = rt2x00_desc_read(txd, 0); rt2x00_set_field32(&word, TXD_W0_OWNER_NIC, 1); rt2x00_set_field32(&word, TXD_W0_VALID, 1); rt2x00_set_field32(&word, TXD_W0_MORE_FRAG, test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags)); rt2x00_set_field32(&word, TXD_W0_ACK, test_bit(ENTRY_TXD_ACK, &txdesc->flags)); rt2x00_set_field32(&word, TXD_W0_TIMESTAMP, test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags)); rt2x00_set_field32(&word, TXD_W0_OFDM, (txdesc->rate_mode == RATE_MODE_OFDM)); rt2x00_set_field32(&word, TXD_W0_CIPHER_OWNER, 1); rt2x00_set_field32(&word, TXD_W0_IFS, txdesc->u.plcp.ifs); rt2x00_set_field32(&word, TXD_W0_RETRY_MODE, test_bit(ENTRY_TXD_RETRY_MODE, &txdesc->flags)); rt2x00_set_field32(&word, TXD_W0_DATABYTE_COUNT, txdesc->length); rt2x00_set_field32(&word, TXD_W0_CIPHER_ALG, CIPHER_NONE); rt2x00_desc_write(txd, 0, word); /* * Register descriptor details in skb frame descriptor. */ skbdesc->desc = txd; skbdesc->desc_len = TXD_DESC_SIZE; } /* * TX data initialization */ static void rt2500pci_write_beacon(struct queue_entry *entry, struct txentry_desc *txdesc) { struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev; u32 reg; /* * Disable beaconing while we are reloading the beacon data, * otherwise we might be sending out invalid data. */ reg = rt2x00mmio_register_read(rt2x00dev, CSR14); rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 0); rt2x00mmio_register_write(rt2x00dev, CSR14, reg); if (rt2x00queue_map_txskb(entry)) { rt2x00_err(rt2x00dev, "Fail to map beacon, aborting\n"); goto out; } /* * Write the TX descriptor for the beacon. */ rt2500pci_write_tx_desc(entry, txdesc); /* * Dump beacon to userspace through debugfs. */ rt2x00debug_dump_frame(rt2x00dev, DUMP_FRAME_BEACON, entry); out: /* * Enable beaconing again. */ rt2x00_set_field32(&reg, CSR14_BEACON_GEN, 1); rt2x00mmio_register_write(rt2x00dev, CSR14, reg); } /* * RX control handlers */ static void rt2500pci_fill_rxdone(struct queue_entry *entry, struct rxdone_entry_desc *rxdesc) { struct queue_entry_priv_mmio *entry_priv = entry->priv_data; u32 word0; u32 word2; word0 = rt2x00_desc_read(entry_priv->desc, 0); word2 = rt2x00_desc_read(entry_priv->desc, 2); if (rt2x00_get_field32(word0, RXD_W0_CRC_ERROR)) rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC; if (rt2x00_get_field32(word0, RXD_W0_PHYSICAL_ERROR)) rxdesc->flags |= RX_FLAG_FAILED_PLCP_CRC; /* * Obtain the status about this packet. * When frame was received with an OFDM bitrate, * the signal is the PLCP value. If it was received with * a CCK bitrate the signal is the rate in 100kbit/s. */ rxdesc->signal = rt2x00_get_field32(word2, RXD_W2_SIGNAL); rxdesc->rssi = rt2x00_get_field32(word2, RXD_W2_RSSI) - entry->queue->rt2x00dev->rssi_offset; rxdesc->size = rt2x00_get_field32(word0, RXD_W0_DATABYTE_COUNT); if (rt2x00_get_field32(word0, RXD_W0_OFDM)) rxdesc->dev_flags |= RXDONE_SIGNAL_PLCP; else rxdesc->dev_flags |= RXDONE_SIGNAL_BITRATE; if (rt2x00_get_field32(word0, RXD_W0_MY_BSS)) rxdesc->dev_flags |= RXDONE_MY_BSS; } /* * Interrupt functions. */ static void rt2500pci_txdone(struct rt2x00_dev *rt2x00dev, const enum data_queue_qid queue_idx) { struct data_queue *queue = rt2x00queue_get_tx_queue(rt2x00dev, queue_idx); struct queue_entry_priv_mmio *entry_priv; struct queue_entry *entry; struct txdone_entry_desc txdesc; u32 word; while (!rt2x00queue_empty(queue)) { entry = rt2x00queue_get_entry(queue, Q_INDEX_DONE); entry_priv = entry->priv_data; word = rt2x00_desc_read(entry_priv->desc, 0); if (rt2x00_get_field32(word, TXD_W0_OWNER_NIC) || !rt2x00_get_field32(word, TXD_W0_VALID)) break; /* * Obtain the status about this packet. */ txdesc.flags = 0; switch (rt2x00_get_field32(word, TXD_W0_RESULT)) { case 0: /* Success */ case 1: /* Success with retry */ __set_bit(TXDONE_SUCCESS, &txdesc.flags); break; case 2: /* Failure, excessive retries */ __set_bit(TXDONE_EXCESSIVE_RETRY, &txdesc.flags); fallthrough; /* this is a failed frame! */ default: /* Failure */ __set_bit(TXDONE_FAILURE, &txdesc.flags); } txdesc.retry = rt2x00_get_field32(word, TXD_W0_RETRY_COUNT); rt2x00lib_txdone(entry, &txdesc); } } static inline void rt2500pci_enable_interrupt(struct rt2x00_dev *rt2x00dev, struct rt2x00_field32 irq_field) { u32 reg; /* * Enable a single interrupt. The interrupt mask register * access needs locking. */ spin_lock_irq(&rt2x00dev->irqmask_lock); reg = rt2x00mmio_register_read(rt2x00dev, CSR8); rt2x00_set_field32(&reg, irq_field, 0); rt2x00mmio_register_write(rt2x00dev, CSR8, reg); spin_unlock_irq(&rt2x00dev->irqmask_lock); } static void rt2500pci_txstatus_tasklet(struct tasklet_struct *t) { struct rt2x00_dev *rt2x00dev = from_tasklet(rt2x00dev, t, txstatus_tasklet); u32 reg; /* * Handle all tx queues. */ rt2500pci_txdone(rt2x00dev, QID_ATIM); rt2500pci_txdone(rt2x00dev, QID_AC_VO); rt2500pci_txdone(rt2x00dev, QID_AC_VI); /* * Enable all TXDONE interrupts again. */ if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) { spin_lock_irq(&rt2x00dev->irqmask_lock); reg = rt2x00mmio_register_read(rt2x00dev, CSR8); rt2x00_set_field32(&reg, CSR8_TXDONE_TXRING, 0); rt2x00_set_field32(&reg, CSR8_TXDONE_ATIMRING, 0); rt2x00_set_field32(&reg, CSR8_TXDONE_PRIORING, 0); rt2x00mmio_register_write(rt2x00dev, CSR8, reg); spin_unlock_irq(&rt2x00dev->irqmask_lock); } } static void rt2500pci_tbtt_tasklet(struct tasklet_struct *t) { struct rt2x00_dev *rt2x00dev = from_tasklet(rt2x00dev, t, tbtt_tasklet); rt2x00lib_beacondone(rt2x00dev); if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) rt2500pci_enable_interrupt(rt2x00dev, CSR8_TBCN_EXPIRE); } static void rt2500pci_rxdone_tasklet(struct tasklet_struct *t) { struct rt2x00_dev *rt2x00dev = from_tasklet(rt2x00dev, t, rxdone_tasklet); if (rt2x00mmio_rxdone(rt2x00dev)) tasklet_schedule(&rt2x00dev->rxdone_tasklet); else if (test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) rt2500pci_enable_interrupt(rt2x00dev, CSR8_RXDONE); } static irqreturn_t rt2500pci_interrupt(int irq, void *dev_instance) { struct rt2x00_dev *rt2x00dev = dev_instance; u32 reg, mask; /* * Get the interrupt sources & saved to local variable. * Write register value back to clear pending interrupts. */ reg = rt2x00mmio_register_read(rt2x00dev, CSR7); rt2x00mmio_register_write(rt2x00dev, CSR7, reg); if (!reg) return IRQ_NONE; if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags)) return IRQ_HANDLED; mask = reg; /* * Schedule tasklets for interrupt handling. */ if (rt2x00_get_field32(reg, CSR7_TBCN_EXPIRE)) tasklet_hi_schedule(&rt2x00dev->tbtt_tasklet); if (rt2x00_get_field32(reg, CSR7_RXDONE)) tasklet_schedule(&rt2x00dev->rxdone_tasklet); if (rt2x00_get_field32(reg, CSR7_TXDONE_ATIMRING) || rt2x00_get_field32(reg, CSR7_TXDONE_PRIORING) || rt2x00_get_field32(reg, CSR7_TXDONE_TXRING)) { tasklet_schedule(&rt2x00dev->txstatus_tasklet); /* * Mask out all txdone interrupts. */ rt2x00_set_field32(&mask, CSR8_TXDONE_TXRING, 1); rt2x00_set_field32(&mask, CSR8_TXDONE_ATIMRING, 1); rt2x00_set_field32(&mask, CSR8_TXDONE_PRIORING, 1); } /* * Disable all interrupts for which a tasklet was scheduled right now, * the tasklet will reenable the appropriate interrupts. */ spin_lock(&rt2x00dev->irqmask_lock); reg = rt2x00mmio_register_read(rt2x00dev, CSR8); reg |= mask; rt2x00mmio_register_write(rt2x00dev, CSR8, reg); spin_unlock(&rt2x00dev->irqmask_lock); return IRQ_HANDLED; } /* * Device probe functions. */ static int rt2500pci_validate_eeprom(struct rt2x00_dev *rt2x00dev) { struct eeprom_93cx6 eeprom; u32 reg; u16 word; u8 *mac; reg = rt2x00mmio_register_read(rt2x00dev, CSR21); eeprom.data = rt2x00dev; eeprom.register_read = rt2500pci_eepromregister_read; eeprom.register_write = rt2500pci_eepromregister_write; eeprom.width = rt2x00_get_field32(reg, CSR21_TYPE_93C46) ? PCI_EEPROM_WIDTH_93C46 : PCI_EEPROM_WIDTH_93C66; eeprom.reg_data_in = 0; eeprom.reg_data_out = 0; eeprom.reg_data_clock = 0; eeprom.reg_chip_select = 0; eeprom_93cx6_multiread(&eeprom, EEPROM_BASE, rt2x00dev->eeprom, EEPROM_SIZE / sizeof(u16)); /* * Start validation of the data that has been read. */ mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0); rt2x00lib_set_mac_address(rt2x00dev, mac); word = rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_ANTENNA_NUM, 2); rt2x00_set_field16(&word, EEPROM_ANTENNA_TX_DEFAULT, ANTENNA_SW_DIVERSITY); rt2x00_set_field16(&word, EEPROM_ANTENNA_RX_DEFAULT, ANTENNA_SW_DIVERSITY); rt2x00_set_field16(&word, EEPROM_ANTENNA_LED_MODE, LED_MODE_DEFAULT); rt2x00_set_field16(&word, EEPROM_ANTENNA_DYN_TXAGC, 0); rt2x00_set_field16(&word, EEPROM_ANTENNA_HARDWARE_RADIO, 0); rt2x00_set_field16(&word, EEPROM_ANTENNA_RF_TYPE, RF2522); rt2x00_eeprom_write(rt2x00dev, EEPROM_ANTENNA, word); rt2x00_eeprom_dbg(rt2x00dev, "Antenna: 0x%04x\n", word); } word = rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_NIC_CARDBUS_ACCEL, 0); rt2x00_set_field16(&word, EEPROM_NIC_DYN_BBP_TUNE, 0); rt2x00_set_field16(&word, EEPROM_NIC_CCK_TX_POWER, 0); rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC, word); rt2x00_eeprom_dbg(rt2x00dev, "NIC: 0x%04x\n", word); } word = rt2x00_eeprom_read(rt2x00dev, EEPROM_CALIBRATE_OFFSET); if (word == 0xffff) { rt2x00_set_field16(&word, EEPROM_CALIBRATE_OFFSET_RSSI, DEFAULT_RSSI_OFFSET); rt2x00_eeprom_write(rt2x00dev, EEPROM_CALIBRATE_OFFSET, word); rt2x00_eeprom_dbg(rt2x00dev, "Calibrate offset: 0x%04x\n", word); } return 0; } static int rt2500pci_init_eeprom(struct rt2x00_dev *rt2x00dev) { u32 reg; u16 value; u16 eeprom; /* * Read EEPROM word for configuration. */ eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA); /* * Identify RF chipset. */ value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE); reg = rt2x00mmio_register_read(rt2x00dev, CSR0); rt2x00_set_chip(rt2x00dev, RT2560, value, rt2x00_get_field32(reg, CSR0_REVISION)); if (!rt2x00_rf(rt2x00dev, RF2522) && !rt2x00_rf(rt2x00dev, RF2523) && !rt2x00_rf(rt2x00dev, RF2524) && !rt2x00_rf(rt2x00dev, RF2525) && !rt2x00_rf(rt2x00dev, RF2525E) && !rt2x00_rf(rt2x00dev, RF5222)) { rt2x00_err(rt2x00dev, "Invalid RF chipset detected\n"); return -ENODEV; } /* * Identify default antenna configuration. */ rt2x00dev->default_ant.tx = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TX_DEFAULT); rt2x00dev->default_ant.rx = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RX_DEFAULT); /* * Store led mode, for correct led behaviour. */ #ifdef CONFIG_RT2X00_LIB_LEDS value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_LED_MODE); rt2500pci_init_led(rt2x00dev, &rt2x00dev->led_radio, LED_TYPE_RADIO); if (value == LED_MODE_TXRX_ACTIVITY || value == LED_MODE_DEFAULT || value == LED_MODE_ASUS) rt2500pci_init_led(rt2x00dev, &rt2x00dev->led_qual, LED_TYPE_ACTIVITY); #endif /* CONFIG_RT2X00_LIB_LEDS */ /* * Detect if this device has an hardware controlled radio. */ if (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_HARDWARE_RADIO)) { __set_bit(CAPABILITY_HW_BUTTON, &rt2x00dev->cap_flags); /* * On this device RFKILL initialized during probe does not work. */ __set_bit(REQUIRE_DELAYED_RFKILL, &rt2x00dev->cap_flags); } /* * Check if the BBP tuning should be enabled. */ eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC); if (!rt2x00_get_field16(eeprom, EEPROM_NIC_DYN_BBP_TUNE)) __set_bit(CAPABILITY_LINK_TUNING, &rt2x00dev->cap_flags); /* * Read the RSSI <-> dBm offset information. */ eeprom = rt2x00_eeprom_read(rt2x00dev, EEPROM_CALIBRATE_OFFSET); rt2x00dev->rssi_offset = rt2x00_get_field16(eeprom, EEPROM_CALIBRATE_OFFSET_RSSI); return 0; } /* * RF value list for RF2522 * Supports: 2.4 GHz */ static const struct rf_channel rf_vals_bg_2522[] = { { 1, 0x00002050, 0x000c1fda, 0x00000101, 0 }, { 2, 0x00002050, 0x000c1fee, 0x00000101, 0 }, { 3, 0x00002050, 0x000c2002, 0x00000101, 0 }, { 4, 0x00002050, 0x000c2016, 0x00000101, 0 }, { 5, 0x00002050, 0x000c202a, 0x00000101, 0 }, { 6, 0x00002050, 0x000c203e, 0x00000101, 0 }, { 7, 0x00002050, 0x000c2052, 0x00000101, 0 }, { 8, 0x00002050, 0x000c2066, 0x00000101, 0 }, { 9, 0x00002050, 0x000c207a, 0x00000101, 0 }, { 10, 0x00002050, 0x000c208e, 0x00000101, 0 }, { 11, 0x00002050, 0x000c20a2, 0x00000101, 0 }, { 12, 0x00002050, 0x000c20b6, 0x00000101, 0 }, { 13, 0x00002050, 0x000c20ca, 0x00000101, 0 }, { 14, 0x00002050, 0x000c20fa, 0x00000101, 0 }, }; /* * RF value list for RF2523 * Supports: 2.4 GHz */ static const struct rf_channel rf_vals_bg_2523[] = { { 1, 0x00022010, 0x00000c9e, 0x000e0111, 0x00000a1b }, { 2, 0x00022010, 0x00000ca2, 0x000e0111, 0x00000a1b }, { 3, 0x00022010, 0x00000ca6, 0x000e0111, 0x00000a1b }, { 4, 0x00022010, 0x00000caa, 0x000e0111, 0x00000a1b }, { 5, 0x00022010, 0x00000cae, 0x000e0111, 0x00000a1b }, { 6, 0x00022010, 0x00000cb2, 0x000e0111, 0x00000a1b }, { 7, 0x00022010, 0x00000cb6, 0x000e0111, 0x00000a1b }, { 8, 0x00022010, 0x00000cba, 0x000e0111, 0x00000a1b }, { 9, 0x00022010, 0x00000cbe, 0x000e0111, 0x00000a1b }, { 10, 0x00022010, 0x00000d02, 0x000e0111, 0x00000a1b }, { 11, 0x00022010, 0x00000d06, 0x000e0111, 0x00000a1b }, { 12, 0x00022010, 0x00000d0a, 0x000e0111, 0x00000a1b }, { 13, 0x00022010, 0x00000d0e, 0x000e0111, 0x00000a1b }, { 14, 0x00022010, 0x00000d1a, 0x000e0111, 0x00000a03 }, }; /* * RF value list for RF2524 * Supports: 2.4 GHz */ static const struct rf_channel rf_vals_bg_2524[] = { { 1, 0x00032020, 0x00000c9e, 0x00000101, 0x00000a1b }, { 2, 0x00032020, 0x00000ca2, 0x00000101, 0x00000a1b }, { 3, 0x00032020, 0x00000ca6, 0x00000101, 0x00000a1b }, { 4, 0x00032020, 0x00000caa, 0x00000101, 0x00000a1b }, { 5, 0x00032020, 0x00000cae, 0x00000101, 0x00000a1b }, { 6, 0x00032020, 0x00000cb2, 0x00000101, 0x00000a1b }, { 7, 0x00032020, 0x00000cb6, 0x00000101, 0x00000a1b }, { 8, 0x00032020, 0x00000cba, 0x00000101, 0x00000a1b }, { 9, 0x00032020, 0x00000cbe, 0x00000101, 0x00000a1b }, { 10, 0x00032020, 0x00000d02, 0x00000101, 0x00000a1b }, { 11, 0x00032020, 0x00000d06, 0x00000101, 0x00000a1b }, { 12, 0x00032020, 0x00000d0a, 0x00000101, 0x00000a1b }, { 13, 0x00032020, 0x00000d0e, 0x00000101, 0x00000a1b }, { 14, 0x00032020, 0x00000d1a, 0x00000101, 0x00000a03 }, }; /* * RF value list for RF2525 * Supports: 2.4 GHz */ static const struct rf_channel rf_vals_bg_2525[] = { { 1, 0x00022020, 0x00080c9e, 0x00060111, 0x00000a1b }, { 2, 0x00022020, 0x00080ca2, 0x00060111, 0x00000a1b }, { 3, 0x00022020, 0x00080ca6, 0x00060111, 0x00000a1b }, { 4, 0x00022020, 0x00080caa, 0x00060111, 0x00000a1b }, { 5, 0x00022020, 0x00080cae, 0x00060111, 0x00000a1b }, { 6, 0x00022020, 0x00080cb2, 0x00060111, 0x00000a1b }, { 7, 0x00022020, 0x00080cb6, 0x00060111, 0x00000a1b }, { 8, 0x00022020, 0x00080cba, 0x00060111, 0x00000a1b }, { 9, 0x00022020, 0x00080cbe, 0x00060111, 0x00000a1b }, { 10, 0x00022020, 0x00080d02, 0x00060111, 0x00000a1b }, { 11, 0x00022020, 0x00080d06, 0x00060111, 0x00000a1b }, { 12, 0x00022020, 0x00080d0a, 0x00060111, 0x00000a1b }, { 13, 0x00022020, 0x00080d0e, 0x00060111, 0x00000a1b }, { 14, 0x00022020, 0x00080d1a, 0x00060111, 0x00000a03 }, }; /* * RF value list for RF2525e * Supports: 2.4 GHz */ static const struct rf_channel rf_vals_bg_2525e[] = { { 1, 0x00022020, 0x00081136, 0x00060111, 0x00000a0b }, { 2, 0x00022020, 0x0008113a, 0x00060111, 0x00000a0b }, { 3, 0x00022020, 0x0008113e, 0x00060111, 0x00000a0b }, { 4, 0x00022020, 0x00081182, 0x00060111, 0x00000a0b }, { 5, 0x00022020, 0x00081186, 0x00060111, 0x00000a0b }, { 6, 0x00022020, 0x0008118a, 0x00060111, 0x00000a0b }, { 7, 0x00022020, 0x0008118e, 0x00060111, 0x00000a0b }, { 8, 0x00022020, 0x00081192, 0x00060111, 0x00000a0b }, { 9, 0x00022020, 0x00081196, 0x00060111, 0x00000a0b }, { 10, 0x00022020, 0x0008119a, 0x00060111, 0x00000a0b }, { 11, 0x00022020, 0x0008119e, 0x00060111, 0x00000a0b }, { 12, 0x00022020, 0x000811a2, 0x00060111, 0x00000a0b }, { 13, 0x00022020, 0x000811a6, 0x00060111, 0x00000a0b }, { 14, 0x00022020, 0x000811ae, 0x00060111, 0x00000a1b }, }; /* * RF value list for RF5222 * Supports: 2.4 GHz & 5.2 GHz */ static const struct rf_channel rf_vals_5222[] = { { 1, 0x00022020, 0x00001136, 0x00000101, 0x00000a0b }, { 2, 0x00022020, 0x0000113a, 0x00000101, 0x00000a0b }, { 3, 0x00022020, 0x0000113e, 0x00000101, 0x00000a0b }, { 4, 0x00022020, 0x00001182, 0x00000101, 0x00000a0b }, { 5, 0x00022020, 0x00001186, 0x00000101, 0x00000a0b }, { 6, 0x00022020, 0x0000118a, 0x00000101, 0x00000a0b }, { 7, 0x00022020, 0x0000118e, 0x00000101, 0x00000a0b }, { 8, 0x00022020, 0x00001192, 0x00000101, 0x00000a0b }, { 9, 0x00022020, 0x00001196, 0x00000101, 0x00000a0b }, { 10, 0x00022020, 0x0000119a, 0x00000101, 0x00000a0b }, { 11, 0x00022020, 0x0000119e, 0x00000101, 0x00000a0b }, { 12, 0x00022020, 0x000011a2, 0x00000101, 0x00000a0b }, { 13, 0x00022020, 0x000011a6, 0x00000101, 0x00000a0b }, { 14, 0x00022020, 0x000011ae, 0x00000101, 0x00000a1b }, /* 802.11 UNI / HyperLan 2 */ { 36, 0x00022010, 0x00018896, 0x00000101, 0x00000a1f }, { 40, 0x00022010, 0x0001889a, 0x00000101, 0x00000a1f }, { 44, 0x00022010, 0x0001889e, 0x00000101, 0x00000a1f }, { 48, 0x00022010, 0x000188a2, 0x00000101, 0x00000a1f }, { 52, 0x00022010, 0x000188a6, 0x00000101, 0x00000a1f }, { 66, 0x00022010, 0x000188aa, 0x00000101, 0x00000a1f }, { 60, 0x00022010, 0x000188ae, 0x00000101, 0x00000a1f }, { 64, 0x00022010, 0x000188b2, 0x00000101, 0x00000a1f }, /* 802.11 HyperLan 2 */ { 100, 0x00022010, 0x00008802, 0x00000101, 0x00000a0f }, { 104, 0x00022010, 0x00008806, 0x00000101, 0x00000a0f }, { 108, 0x00022010, 0x0000880a, 0x00000101, 0x00000a0f }, { 112, 0x00022010, 0x0000880e, 0x00000101, 0x00000a0f }, { 116, 0x00022010, 0x00008812, 0x00000101, 0x00000a0f }, { 120, 0x00022010, 0x00008816, 0x00000101, 0x00000a0f }, { 124, 0x00022010, 0x0000881a, 0x00000101, 0x00000a0f }, { 128, 0x00022010, 0x0000881e, 0x00000101, 0x00000a0f }, { 132, 0x00022010, 0x00008822, 0x00000101, 0x00000a0f }, { 136, 0x00022010, 0x00008826, 0x00000101, 0x00000a0f }, /* 802.11 UNII */ { 140, 0x00022010, 0x0000882a, 0x00000101, 0x00000a0f }, { 149, 0x00022020, 0x000090a6, 0x00000101, 0x00000a07 }, { 153, 0x00022020, 0x000090ae, 0x00000101, 0x00000a07 }, { 157, 0x00022020, 0x000090b6, 0x00000101, 0x00000a07 }, { 161, 0x00022020, 0x000090be, 0x00000101, 0x00000a07 }, }; static int rt2500pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev) { struct hw_mode_spec *spec = &rt2x00dev->spec; struct channel_info *info; u8 *tx_power; unsigned int i; /* * Initialize all hw fields. */ ieee80211_hw_set(rt2x00dev->hw, PS_NULLFUNC_STACK); ieee80211_hw_set(rt2x00dev->hw, SUPPORTS_PS); ieee80211_hw_set(rt2x00dev->hw, HOST_BROADCAST_PS_BUFFERING); ieee80211_hw_set(rt2x00dev->hw, SIGNAL_DBM); SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev); SET_IEEE80211_PERM_ADDR(rt2x00dev->hw, rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0)); /* * Disable powersaving as default. */ rt2x00dev->hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; /* * Initialize hw_mode information. */ spec->supported_bands = SUPPORT_BAND_2GHZ; spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM; if (rt2x00_rf(rt2x00dev, RF2522)) { spec->num_channels = ARRAY_SIZE(rf_vals_bg_2522); spec->channels = rf_vals_bg_2522; } else if (rt2x00_rf(rt2x00dev, RF2523)) { spec->num_channels = ARRAY_SIZE(rf_vals_bg_2523); spec->channels = rf_vals_bg_2523; } else if (rt2x00_rf(rt2x00dev, RF2524)) { spec->num_channels = ARRAY_SIZE(rf_vals_bg_2524); spec->channels = rf_vals_bg_2524; } else if (rt2x00_rf(rt2x00dev, RF2525)) { spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525); spec->channels = rf_vals_bg_2525; } else if (rt2x00_rf(rt2x00dev, RF2525E)) { spec->num_channels = ARRAY_SIZE(rf_vals_bg_2525e); spec->channels = rf_vals_bg_2525e; } else if (rt2x00_rf(rt2x00dev, RF5222)) { spec->supported_bands |= SUPPORT_BAND_5GHZ; spec->num_channels = ARRAY_SIZE(rf_vals_5222); spec->channels = rf_vals_5222; } /* * Create channel information array */ info = kcalloc(spec->num_channels, sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; spec->channels_info = info; tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_START); for (i = 0; i < 14; i++) { info[i].max_power = MAX_TXPOWER; info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i]); } if (spec->num_channels > 14) { for (i = 14; i < spec->num_channels; i++) { info[i].max_power = MAX_TXPOWER; info[i].default_power1 = DEFAULT_TXPOWER; } } return 0; } static int rt2500pci_probe_hw(struct rt2x00_dev *rt2x00dev) { int retval; u32 reg; /* * Allocate eeprom data. */ retval = rt2500pci_validate_eeprom(rt2x00dev); if (retval) return retval; retval = rt2500pci_init_eeprom(rt2x00dev); if (retval) return retval; /* * Enable rfkill polling by setting GPIO direction of the * rfkill switch GPIO pin correctly. */ reg = rt2x00mmio_register_read(rt2x00dev, GPIOCSR); rt2x00_set_field32(&reg, GPIOCSR_DIR0, 1); rt2x00mmio_register_write(rt2x00dev, GPIOCSR, reg); /* * Initialize hw specifications. */ retval = rt2500pci_probe_hw_mode(rt2x00dev); if (retval) return retval; /* * This device requires the atim queue and DMA-mapped skbs. */ __set_bit(REQUIRE_ATIM_QUEUE, &rt2x00dev->cap_flags); __set_bit(REQUIRE_DMA, &rt2x00dev->cap_flags); __set_bit(REQUIRE_SW_SEQNO, &rt2x00dev->cap_flags); /* * Set the rssi offset. */ rt2x00dev->rssi_offset = DEFAULT_RSSI_OFFSET; return 0; } /* * IEEE80211 stack callback functions. */ static u64 rt2500pci_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct rt2x00_dev *rt2x00dev = hw->priv; u64 tsf; u32 reg; reg = rt2x00mmio_register_read(rt2x00dev, CSR17); tsf = (u64) rt2x00_get_field32(reg, CSR17_HIGH_TSFTIMER) << 32; reg = rt2x00mmio_register_read(rt2x00dev, CSR16); tsf |= rt2x00_get_field32(reg, CSR16_LOW_TSFTIMER); return tsf; } static int rt2500pci_tx_last_beacon(struct ieee80211_hw *hw) { struct rt2x00_dev *rt2x00dev = hw->priv; u32 reg; reg = rt2x00mmio_register_read(rt2x00dev, CSR15); return rt2x00_get_field32(reg, CSR15_BEACON_SENT); } static const struct ieee80211_ops rt2500pci_mac80211_ops = { .tx = rt2x00mac_tx, .wake_tx_queue = ieee80211_handle_wake_tx_queue, .start = rt2x00mac_start, .stop = rt2x00mac_stop, .add_interface = rt2x00mac_add_interface, .remove_interface = rt2x00mac_remove_interface, .config = rt2x00mac_config, .configure_filter = rt2x00mac_configure_filter, .sw_scan_start = rt2x00mac_sw_scan_start, .sw_scan_complete = rt2x00mac_sw_scan_complete, .get_stats = rt2x00mac_get_stats, .bss_info_changed = rt2x00mac_bss_info_changed, .conf_tx = rt2x00mac_conf_tx, .get_tsf = rt2500pci_get_tsf, .tx_last_beacon = rt2500pci_tx_last_beacon, .rfkill_poll = rt2x00mac_rfkill_poll, .flush = rt2x00mac_flush, .set_antenna = rt2x00mac_set_antenna, .get_antenna = rt2x00mac_get_antenna, .get_ringparam = rt2x00mac_get_ringparam, .tx_frames_pending = rt2x00mac_tx_frames_pending, }; static const struct rt2x00lib_ops rt2500pci_rt2x00_ops = { .irq_handler = rt2500pci_interrupt, .txstatus_tasklet = rt2500pci_txstatus_tasklet, .tbtt_tasklet = rt2500pci_tbtt_tasklet, .rxdone_tasklet = rt2500pci_rxdone_tasklet, .probe_hw = rt2500pci_probe_hw, .initialize = rt2x00mmio_initialize, .uninitialize = rt2x00mmio_uninitialize, .get_entry_state = rt2500pci_get_entry_state, .clear_entry = rt2500pci_clear_entry, .set_device_state = rt2500pci_set_device_state, .rfkill_poll = rt2500pci_rfkill_poll, .link_stats = rt2500pci_link_stats, .reset_tuner = rt2500pci_reset_tuner, .link_tuner = rt2500pci_link_tuner, .start_queue = rt2500pci_start_queue, .kick_queue = rt2500pci_kick_queue, .stop_queue = rt2500pci_stop_queue, .flush_queue = rt2x00mmio_flush_queue, .write_tx_desc = rt2500pci_write_tx_desc, .write_beacon = rt2500pci_write_beacon, .fill_rxdone = rt2500pci_fill_rxdone, .config_filter = rt2500pci_config_filter, .config_intf = rt2500pci_config_intf, .config_erp = rt2500pci_config_erp, .config_ant = rt2500pci_config_ant, .config = rt2500pci_config, }; static void rt2500pci_queue_init(struct data_queue *queue) { switch (queue->qid) { case QID_RX: queue->limit = 32; queue->data_size = DATA_FRAME_SIZE; queue->desc_size = RXD_DESC_SIZE; queue->priv_size = sizeof(struct queue_entry_priv_mmio); break; case QID_AC_VO: case QID_AC_VI: case QID_AC_BE: case QID_AC_BK: queue->limit = 32; queue->data_size = DATA_FRAME_SIZE; queue->desc_size = TXD_DESC_SIZE; queue->priv_size = sizeof(struct queue_entry_priv_mmio); break; case QID_BEACON: queue->limit = 1; queue->data_size = MGMT_FRAME_SIZE; queue->desc_size = TXD_DESC_SIZE; queue->priv_size = sizeof(struct queue_entry_priv_mmio); break; case QID_ATIM: queue->limit = 8; queue->data_size = DATA_FRAME_SIZE; queue->desc_size = TXD_DESC_SIZE; queue->priv_size = sizeof(struct queue_entry_priv_mmio); break; default: BUG(); break; } } static const struct rt2x00_ops rt2500pci_ops = { .name = KBUILD_MODNAME, .max_ap_intf = 1, .eeprom_size = EEPROM_SIZE, .rf_size = RF_SIZE, .tx_queues = NUM_TX_QUEUES, .queue_init = rt2500pci_queue_init, .lib = &rt2500pci_rt2x00_ops, .hw = &rt2500pci_mac80211_ops, #ifdef CONFIG_RT2X00_LIB_DEBUGFS .debugfs = &rt2500pci_rt2x00debug, #endif /* CONFIG_RT2X00_LIB_DEBUGFS */ }; /* * RT2500pci module information. */ static const struct pci_device_id rt2500pci_device_table[] = { { PCI_DEVICE(0x1814, 0x0201) }, { 0, } }; MODULE_AUTHOR(DRV_PROJECT); MODULE_VERSION(DRV_VERSION); MODULE_DESCRIPTION("Ralink RT2500 PCI & PCMCIA Wireless LAN driver."); MODULE_DEVICE_TABLE(pci, rt2500pci_device_table); MODULE_LICENSE("GPL"); static int rt2500pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *id) { return rt2x00pci_probe(pci_dev, &rt2500pci_ops); } static struct pci_driver rt2500pci_driver = { .name = KBUILD_MODNAME, .id_table = rt2500pci_device_table, .probe = rt2500pci_probe, .remove = rt2x00pci_remove, .driver.pm = &rt2x00pci_pm_ops, }; module_pci_driver(rt2500pci_driver);
linux-master
drivers/net/wireless/ralink/rt2x00/rt2500pci.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2021 pureLiFi */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/device.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/skbuff.h> #include <linux/usb.h> #include <linux/workqueue.h> #include <linux/proc_fs.h> #include <linux/fs.h> #include <linux/string.h> #include <linux/module.h> #include <net/mac80211.h> #include <asm/unaligned.h> #include <linux/sysfs.h> #include "mac.h" #include "usb.h" #include "chip.h" static const struct usb_device_id usb_ids[] = { { USB_DEVICE(PURELIFI_X_VENDOR_ID_0, PURELIFI_X_PRODUCT_ID_0), .driver_info = DEVICE_LIFI_X }, { USB_DEVICE(PURELIFI_XC_VENDOR_ID_0, PURELIFI_XC_PRODUCT_ID_0), .driver_info = DEVICE_LIFI_XC }, { USB_DEVICE(PURELIFI_XL_VENDOR_ID_0, PURELIFI_XL_PRODUCT_ID_0), .driver_info = DEVICE_LIFI_XL }, {} }; void plfxlc_send_packet_from_data_queue(struct plfxlc_usb *usb) { struct plfxlc_usb_tx *tx = &usb->tx; struct sk_buff *skb = NULL; unsigned long flags; u8 last_served_sidx; spin_lock_irqsave(&tx->lock, flags); last_served_sidx = usb->sidx; do { usb->sidx = (usb->sidx + 1) % MAX_STA_NUM; if (!(tx->station[usb->sidx].flag & STATION_CONNECTED_FLAG)) continue; if (!(tx->station[usb->sidx].flag & STATION_FIFO_FULL_FLAG)) skb = skb_peek(&tx->station[usb->sidx].data_list); } while ((usb->sidx != last_served_sidx) && (!skb)); if (skb) { skb = skb_dequeue(&tx->station[usb->sidx].data_list); plfxlc_usb_wreq_async(usb, skb->data, skb->len, USB_REQ_DATA_TX, plfxlc_tx_urb_complete, skb); if (skb_queue_len(&tx->station[usb->sidx].data_list) <= 60) ieee80211_wake_queues(plfxlc_usb_to_hw(usb)); } spin_unlock_irqrestore(&tx->lock, flags); } static void handle_rx_packet(struct plfxlc_usb *usb, const u8 *buffer, unsigned int length) { plfxlc_mac_rx(plfxlc_usb_to_hw(usb), buffer, length); } static void rx_urb_complete(struct urb *urb) { struct plfxlc_usb_tx *tx; struct plfxlc_usb *usb; unsigned int length; const u8 *buffer; u16 status; u8 sidx; int r; if (!urb) { pr_err("urb is NULL\n"); return; } if (!urb->context) { pr_err("urb ctx is NULL\n"); return; } usb = urb->context; if (usb->initialized != 1) { pr_err("usb is not initialized\n"); return; } tx = &usb->tx; switch (urb->status) { case 0: break; case -ESHUTDOWN: case -EINVAL: case -ENODEV: case -ENOENT: case -ECONNRESET: case -EPIPE: dev_dbg(plfxlc_urb_dev(urb), "urb %p error %d\n", urb, urb->status); return; default: dev_dbg(plfxlc_urb_dev(urb), "urb %p error %d\n", urb, urb->status); if (tx->submitted_urbs++ < PURELIFI_URB_RETRY_MAX) { dev_dbg(plfxlc_urb_dev(urb), "urb %p resubmit %d", urb, tx->submitted_urbs++); goto resubmit; } else { dev_dbg(plfxlc_urb_dev(urb), "urb %p max resubmits reached", urb); tx->submitted_urbs = 0; return; } } buffer = urb->transfer_buffer; length = le32_to_cpu(*(__le32 *)(buffer + sizeof(struct rx_status))) + sizeof(u32); if (urb->actual_length != (PLF_MSG_STATUS_OFFSET + 1)) { if (usb->initialized && usb->link_up) handle_rx_packet(usb, buffer, length); goto resubmit; } status = buffer[PLF_MSG_STATUS_OFFSET]; switch (status) { case STATION_FIFO_ALMOST_FULL_NOT_MESSAGE: dev_dbg(&usb->intf->dev, "FIFO full not packet receipt\n"); tx->mac_fifo_full = 1; for (sidx = 0; sidx < MAX_STA_NUM; sidx++) tx->station[sidx].flag |= STATION_FIFO_FULL_FLAG; break; case STATION_FIFO_ALMOST_FULL_MESSAGE: dev_dbg(&usb->intf->dev, "FIFO full packet receipt\n"); for (sidx = 0; sidx < MAX_STA_NUM; sidx++) tx->station[sidx].flag &= STATION_ACTIVE_FLAG; plfxlc_send_packet_from_data_queue(usb); break; case STATION_CONNECT_MESSAGE: usb->link_up = 1; dev_dbg(&usb->intf->dev, "ST_CONNECT_MSG packet receipt\n"); break; case STATION_DISCONNECT_MESSAGE: usb->link_up = 0; dev_dbg(&usb->intf->dev, "ST_DISCONN_MSG packet receipt\n"); break; default: dev_dbg(&usb->intf->dev, "Unknown packet receipt\n"); break; } resubmit: r = usb_submit_urb(urb, GFP_ATOMIC); if (r) dev_dbg(plfxlc_urb_dev(urb), "urb %p resubmit fail (%d)\n", urb, r); } static struct urb *alloc_rx_urb(struct plfxlc_usb *usb) { struct usb_device *udev = plfxlc_usb_to_usbdev(usb); struct urb *urb; void *buffer; urb = usb_alloc_urb(0, GFP_KERNEL); if (!urb) return NULL; buffer = usb_alloc_coherent(udev, USB_MAX_RX_SIZE, GFP_KERNEL, &urb->transfer_dma); if (!buffer) { usb_free_urb(urb); return NULL; } usb_fill_bulk_urb(urb, udev, usb_rcvbulkpipe(udev, EP_DATA_IN), buffer, USB_MAX_RX_SIZE, rx_urb_complete, usb); urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; return urb; } static void free_rx_urb(struct urb *urb) { if (!urb) return; usb_free_coherent(urb->dev, urb->transfer_buffer_length, urb->transfer_buffer, urb->transfer_dma); usb_free_urb(urb); } static int __lf_x_usb_enable_rx(struct plfxlc_usb *usb) { struct plfxlc_usb_rx *rx = &usb->rx; struct urb **urbs; int i, r; r = -ENOMEM; urbs = kcalloc(RX_URBS_COUNT, sizeof(struct urb *), GFP_KERNEL); if (!urbs) goto error; for (i = 0; i < RX_URBS_COUNT; i++) { urbs[i] = alloc_rx_urb(usb); if (!urbs[i]) goto error; } spin_lock_irq(&rx->lock); dev_dbg(plfxlc_usb_dev(usb), "irq_disabled %d\n", irqs_disabled()); if (rx->urbs) { spin_unlock_irq(&rx->lock); r = 0; goto error; } rx->urbs = urbs; rx->urbs_count = RX_URBS_COUNT; spin_unlock_irq(&rx->lock); for (i = 0; i < RX_URBS_COUNT; i++) { r = usb_submit_urb(urbs[i], GFP_KERNEL); if (r) goto error_submit; } return 0; error_submit: for (i = 0; i < RX_URBS_COUNT; i++) usb_kill_urb(urbs[i]); spin_lock_irq(&rx->lock); rx->urbs = NULL; rx->urbs_count = 0; spin_unlock_irq(&rx->lock); error: if (urbs) { for (i = 0; i < RX_URBS_COUNT; i++) free_rx_urb(urbs[i]); } kfree(urbs); return r; } int plfxlc_usb_enable_rx(struct plfxlc_usb *usb) { struct plfxlc_usb_rx *rx = &usb->rx; int r; mutex_lock(&rx->setup_mutex); r = __lf_x_usb_enable_rx(usb); if (!r) usb->rx_usb_enabled = 1; mutex_unlock(&rx->setup_mutex); return r; } static void __lf_x_usb_disable_rx(struct plfxlc_usb *usb) { struct plfxlc_usb_rx *rx = &usb->rx; unsigned long flags; unsigned int count; struct urb **urbs; int i; spin_lock_irqsave(&rx->lock, flags); urbs = rx->urbs; count = rx->urbs_count; spin_unlock_irqrestore(&rx->lock, flags); if (!urbs) return; for (i = 0; i < count; i++) { usb_kill_urb(urbs[i]); free_rx_urb(urbs[i]); } kfree(urbs); rx->urbs = NULL; rx->urbs_count = 0; } void plfxlc_usb_disable_rx(struct plfxlc_usb *usb) { struct plfxlc_usb_rx *rx = &usb->rx; mutex_lock(&rx->setup_mutex); __lf_x_usb_disable_rx(usb); usb->rx_usb_enabled = 0; mutex_unlock(&rx->setup_mutex); } void plfxlc_usb_disable_tx(struct plfxlc_usb *usb) { struct plfxlc_usb_tx *tx = &usb->tx; unsigned long flags; clear_bit(PLF_BIT_ENABLED, &tx->enabled); /* kill all submitted tx-urbs */ usb_kill_anchored_urbs(&tx->submitted); spin_lock_irqsave(&tx->lock, flags); WARN_ON(!skb_queue_empty(&tx->submitted_skbs)); WARN_ON(tx->submitted_urbs != 0); tx->submitted_urbs = 0; spin_unlock_irqrestore(&tx->lock, flags); /* The stopped state is ignored, relying on ieee80211_wake_queues() * in a potentionally following plfxlc_usb_enable_tx(). */ } void plfxlc_usb_enable_tx(struct plfxlc_usb *usb) { struct plfxlc_usb_tx *tx = &usb->tx; unsigned long flags; spin_lock_irqsave(&tx->lock, flags); set_bit(PLF_BIT_ENABLED, &tx->enabled); tx->submitted_urbs = 0; ieee80211_wake_queues(plfxlc_usb_to_hw(usb)); tx->stopped = 0; spin_unlock_irqrestore(&tx->lock, flags); } void plfxlc_tx_urb_complete(struct urb *urb) { struct ieee80211_tx_info *info; struct plfxlc_usb *usb; struct sk_buff *skb; skb = urb->context; info = IEEE80211_SKB_CB(skb); /* grab 'usb' pointer before handing off the skb (since * it might be freed by plfxlc_mac_tx_to_dev or mac80211) */ usb = &plfxlc_hw_mac(info->rate_driver_data[0])->chip.usb; switch (urb->status) { case 0: break; case -ESHUTDOWN: case -EINVAL: case -ENODEV: case -ENOENT: case -ECONNRESET: case -EPIPE: dev_dbg(plfxlc_urb_dev(urb), "urb %p error %d\n", urb, urb->status); break; default: dev_dbg(plfxlc_urb_dev(urb), "urb %p error %d\n", urb, urb->status); return; } plfxlc_mac_tx_to_dev(skb, urb->status); plfxlc_send_packet_from_data_queue(usb); usb_free_urb(urb); } static inline void init_usb_rx(struct plfxlc_usb *usb) { struct plfxlc_usb_rx *rx = &usb->rx; spin_lock_init(&rx->lock); mutex_init(&rx->setup_mutex); if (interface_to_usbdev(usb->intf)->speed == USB_SPEED_HIGH) rx->usb_packet_size = 512; else rx->usb_packet_size = 64; if (rx->fragment_length != 0) dev_dbg(plfxlc_usb_dev(usb), "fragment_length error\n"); } static inline void init_usb_tx(struct plfxlc_usb *usb) { struct plfxlc_usb_tx *tx = &usb->tx; spin_lock_init(&tx->lock); clear_bit(PLF_BIT_ENABLED, &tx->enabled); tx->stopped = 0; skb_queue_head_init(&tx->submitted_skbs); init_usb_anchor(&tx->submitted); } void plfxlc_usb_init(struct plfxlc_usb *usb, struct ieee80211_hw *hw, struct usb_interface *intf) { memset(usb, 0, sizeof(*usb)); usb->intf = usb_get_intf(intf); usb_set_intfdata(usb->intf, hw); init_usb_tx(usb); init_usb_rx(usb); } void plfxlc_usb_release(struct plfxlc_usb *usb) { plfxlc_op_stop(plfxlc_usb_to_hw(usb)); plfxlc_usb_disable_tx(usb); plfxlc_usb_disable_rx(usb); usb_set_intfdata(usb->intf, NULL); usb_put_intf(usb->intf); } const char *plfxlc_speed(enum usb_device_speed speed) { switch (speed) { case USB_SPEED_LOW: return "low"; case USB_SPEED_FULL: return "full"; case USB_SPEED_HIGH: return "high"; default: return "unknown"; } } int plfxlc_usb_init_hw(struct plfxlc_usb *usb) { int r; r = usb_reset_configuration(plfxlc_usb_to_usbdev(usb)); if (r) { dev_err(plfxlc_usb_dev(usb), "cfg reset failed (%d)\n", r); return r; } return 0; } static void get_usb_req(struct usb_device *udev, void *buffer, u32 buffer_len, enum plf_usb_req_enum usb_req_id, struct plf_usb_req *usb_req) { __be32 payload_len_nw = cpu_to_be32(buffer_len + FCS_LEN); const u8 *buffer_src_p = buffer; u8 *buffer_dst = usb_req->buf; u32 temp_usb_len = 0; usb_req->id = cpu_to_be32(usb_req_id); usb_req->len = cpu_to_be32(0); /* Copy buffer length into the transmitted buffer, as it is important * for the Rx MAC to know its exact length. */ if (usb_req->id == cpu_to_be32(USB_REQ_BEACON_WR)) { memcpy(buffer_dst, &payload_len_nw, sizeof(payload_len_nw)); buffer_dst += sizeof(payload_len_nw); temp_usb_len += sizeof(payload_len_nw); } memcpy(buffer_dst, buffer_src_p, buffer_len); buffer_dst += buffer_len; buffer_src_p += buffer_len; temp_usb_len += buffer_len; /* Set the FCS_LEN (4) bytes as 0 for CRC checking. */ memset(buffer_dst, 0, FCS_LEN); buffer_dst += FCS_LEN; temp_usb_len += FCS_LEN; /* Round the packet to be transmitted to 4 bytes. */ if (temp_usb_len % PURELIFI_BYTE_NUM_ALIGNMENT) { memset(buffer_dst, 0, PURELIFI_BYTE_NUM_ALIGNMENT - (temp_usb_len % PURELIFI_BYTE_NUM_ALIGNMENT)); buffer_dst += PURELIFI_BYTE_NUM_ALIGNMENT - (temp_usb_len % PURELIFI_BYTE_NUM_ALIGNMENT); temp_usb_len += PURELIFI_BYTE_NUM_ALIGNMENT - (temp_usb_len % PURELIFI_BYTE_NUM_ALIGNMENT); } usb_req->len = cpu_to_be32(temp_usb_len); } int plfxlc_usb_wreq_async(struct plfxlc_usb *usb, const u8 *buffer, int buffer_len, enum plf_usb_req_enum usb_req_id, usb_complete_t complete_fn, void *context) { struct usb_device *udev = interface_to_usbdev(usb->ez_usb); struct urb *urb = usb_alloc_urb(0, GFP_ATOMIC); int r; usb_fill_bulk_urb(urb, udev, usb_sndbulkpipe(udev, EP_DATA_OUT), (void *)buffer, buffer_len, complete_fn, context); r = usb_submit_urb(urb, GFP_ATOMIC); if (r) dev_err(&udev->dev, "Async write submit failed (%d)\n", r); return r; } int plfxlc_usb_wreq(struct usb_interface *ez_usb, void *buffer, int buffer_len, enum plf_usb_req_enum usb_req_id) { struct usb_device *udev = interface_to_usbdev(ez_usb); unsigned char *dma_buffer = NULL; struct plf_usb_req usb_req; int usb_bulk_msg_len; int actual_length; int r; get_usb_req(udev, buffer, buffer_len, usb_req_id, &usb_req); usb_bulk_msg_len = sizeof(__le32) + sizeof(__le32) + be32_to_cpu(usb_req.len); dma_buffer = kmemdup(&usb_req, usb_bulk_msg_len, GFP_KERNEL); if (!dma_buffer) { r = -ENOMEM; goto error; } r = usb_bulk_msg(udev, usb_sndbulkpipe(udev, EP_DATA_OUT), dma_buffer, usb_bulk_msg_len, &actual_length, USB_BULK_MSG_TIMEOUT_MS); kfree(dma_buffer); error: if (r) { r = -ENOMEM; dev_err(&udev->dev, "usb_bulk_msg failed (%d)\n", r); } return r; } static void slif_data_plane_sap_timer_callb(struct timer_list *t) { struct plfxlc_usb *usb = from_timer(usb, t, tx.tx_retry_timer); plfxlc_send_packet_from_data_queue(usb); timer_setup(&usb->tx.tx_retry_timer, slif_data_plane_sap_timer_callb, 0); mod_timer(&usb->tx.tx_retry_timer, jiffies + TX_RETRY_BACKOFF_JIFF); } static void sta_queue_cleanup_timer_callb(struct timer_list *t) { struct plfxlc_usb *usb = from_timer(usb, t, sta_queue_cleanup); struct plfxlc_usb_tx *tx = &usb->tx; int sidx; for (sidx = 0; sidx < MAX_STA_NUM - 1; sidx++) { if (!(tx->station[sidx].flag & STATION_CONNECTED_FLAG)) continue; if (tx->station[sidx].flag & STATION_HEARTBEAT_FLAG) { tx->station[sidx].flag ^= STATION_HEARTBEAT_FLAG; } else { eth_zero_addr(tx->station[sidx].mac); tx->station[sidx].flag = 0; } } timer_setup(&usb->sta_queue_cleanup, sta_queue_cleanup_timer_callb, 0); mod_timer(&usb->sta_queue_cleanup, jiffies + STA_QUEUE_CLEANUP_JIFF); } static int probe(struct usb_interface *intf, const struct usb_device_id *id) { u8 serial_number[PURELIFI_SERIAL_LEN]; struct ieee80211_hw *hw = NULL; struct plfxlc_usb_tx *tx; struct plfxlc_chip *chip; struct plfxlc_usb *usb; u8 hw_address[ETH_ALEN]; unsigned int i; int r = 0; hw = plfxlc_mac_alloc_hw(intf); if (!hw) { r = -ENOMEM; goto error; } chip = &plfxlc_hw_mac(hw)->chip; usb = &chip->usb; usb->ez_usb = intf; tx = &usb->tx; r = plfxlc_upload_mac_and_serial(intf, hw_address, serial_number); if (r) { dev_err(&intf->dev, "MAC and Serial upload failed (%d)\n", r); goto error; } chip->unit_type = STA; dev_err(&intf->dev, "Unit type is station"); r = plfxlc_mac_preinit_hw(hw, hw_address); if (r) { dev_err(&intf->dev, "Init mac failed (%d)\n", r); goto error; } r = ieee80211_register_hw(hw); if (r) { dev_err(&intf->dev, "Register device failed (%d)\n", r); goto error; } if ((le16_to_cpu(interface_to_usbdev(intf)->descriptor.idVendor) == PURELIFI_XL_VENDOR_ID_0) && (le16_to_cpu(interface_to_usbdev(intf)->descriptor.idProduct) == PURELIFI_XL_PRODUCT_ID_0)) { r = plfxlc_download_xl_firmware(intf); } else { r = plfxlc_download_fpga(intf); } if (r != 0) { dev_err(&intf->dev, "FPGA download failed (%d)\n", r); goto error; } tx->mac_fifo_full = 0; spin_lock_init(&tx->lock); msleep(PLF_MSLEEP_TIME); r = plfxlc_usb_init_hw(usb); if (r < 0) { dev_err(&intf->dev, "usb_init_hw failed (%d)\n", r); goto error; } msleep(PLF_MSLEEP_TIME); r = plfxlc_chip_switch_radio(chip, PLFXLC_RADIO_ON); if (r < 0) { dev_dbg(&intf->dev, "chip_switch_radio_on failed (%d)\n", r); goto error; } msleep(PLF_MSLEEP_TIME); r = plfxlc_chip_set_rate(chip, 8); if (r < 0) { dev_dbg(&intf->dev, "chip_set_rate failed (%d)\n", r); goto error; } msleep(PLF_MSLEEP_TIME); r = plfxlc_usb_wreq(usb->ez_usb, hw_address, ETH_ALEN, USB_REQ_MAC_WR); if (r < 0) { dev_dbg(&intf->dev, "MAC_WR failure (%d)\n", r); goto error; } plfxlc_chip_enable_rxtx(chip); /* Initialise the data plane Tx queue */ for (i = 0; i < MAX_STA_NUM; i++) { skb_queue_head_init(&tx->station[i].data_list); tx->station[i].flag = 0; } tx->station[STA_BROADCAST_INDEX].flag |= STATION_CONNECTED_FLAG; for (i = 0; i < ETH_ALEN; i++) tx->station[STA_BROADCAST_INDEX].mac[i] = 0xFF; timer_setup(&tx->tx_retry_timer, slif_data_plane_sap_timer_callb, 0); tx->tx_retry_timer.expires = jiffies + TX_RETRY_BACKOFF_JIFF; add_timer(&tx->tx_retry_timer); timer_setup(&usb->sta_queue_cleanup, sta_queue_cleanup_timer_callb, 0); usb->sta_queue_cleanup.expires = jiffies + STA_QUEUE_CLEANUP_JIFF; add_timer(&usb->sta_queue_cleanup); plfxlc_mac_init_hw(hw); usb->initialized = true; return 0; error: if (hw) { plfxlc_mac_release(plfxlc_hw_mac(hw)); ieee80211_unregister_hw(hw); ieee80211_free_hw(hw); } dev_err(&intf->dev, "pureLifi:Device error"); return r; } static void disconnect(struct usb_interface *intf) { struct ieee80211_hw *hw = plfxlc_intf_to_hw(intf); struct plfxlc_mac *mac; struct plfxlc_usb *usb; /* Either something really bad happened, or * we're just dealing with a DEVICE_INSTALLER. */ if (!hw) return; mac = plfxlc_hw_mac(hw); usb = &mac->chip.usb; del_timer_sync(&usb->tx.tx_retry_timer); del_timer_sync(&usb->sta_queue_cleanup); ieee80211_unregister_hw(hw); plfxlc_chip_disable_rxtx(&mac->chip); /* If the disconnect has been caused by a removal of the * driver module, the reset allows reloading of the driver. If the * reset will not be executed here, the upload of the firmware in the * probe function caused by the reloading of the driver will fail. */ usb_reset_device(interface_to_usbdev(intf)); plfxlc_mac_release(mac); ieee80211_free_hw(hw); } static void plfxlc_usb_resume(struct plfxlc_usb *usb) { struct plfxlc_mac *mac = plfxlc_usb_to_mac(usb); int r; r = plfxlc_op_start(plfxlc_usb_to_hw(usb)); if (r < 0) { dev_warn(plfxlc_usb_dev(usb), "Device resume failed (%d)\n", r); if (usb->was_running) set_bit(PURELIFI_DEVICE_RUNNING, &mac->flags); usb_queue_reset_device(usb->intf); return; } if (mac->type != NL80211_IFTYPE_UNSPECIFIED) { r = plfxlc_restore_settings(mac); if (r < 0) { dev_dbg(plfxlc_usb_dev(usb), "Restore failed (%d)\n", r); return; } } } static void plfxlc_usb_stop(struct plfxlc_usb *usb) { plfxlc_op_stop(plfxlc_usb_to_hw(usb)); plfxlc_usb_disable_tx(usb); plfxlc_usb_disable_rx(usb); usb->initialized = false; } static int pre_reset(struct usb_interface *intf) { struct ieee80211_hw *hw = usb_get_intfdata(intf); struct plfxlc_mac *mac; struct plfxlc_usb *usb; if (!hw || intf->condition != USB_INTERFACE_BOUND) return 0; mac = plfxlc_hw_mac(hw); usb = &mac->chip.usb; usb->was_running = test_bit(PURELIFI_DEVICE_RUNNING, &mac->flags); plfxlc_usb_stop(usb); return 0; } static int post_reset(struct usb_interface *intf) { struct ieee80211_hw *hw = usb_get_intfdata(intf); struct plfxlc_mac *mac; struct plfxlc_usb *usb; if (!hw || intf->condition != USB_INTERFACE_BOUND) return 0; mac = plfxlc_hw_mac(hw); usb = &mac->chip.usb; if (usb->was_running) plfxlc_usb_resume(usb); return 0; } #ifdef CONFIG_PM static struct plfxlc_usb *get_plfxlc_usb(struct usb_interface *intf) { struct ieee80211_hw *hw = plfxlc_intf_to_hw(intf); struct plfxlc_mac *mac; /* Either something really bad happened, or * we're just dealing with a DEVICE_INSTALLER. */ if (!hw) return NULL; mac = plfxlc_hw_mac(hw); return &mac->chip.usb; } static int suspend(struct usb_interface *interface, pm_message_t message) { struct plfxlc_usb *pl = get_plfxlc_usb(interface); struct plfxlc_mac *mac = plfxlc_usb_to_mac(pl); if (!pl) return -ENODEV; if (pl->initialized == 0) return 0; pl->was_running = test_bit(PURELIFI_DEVICE_RUNNING, &mac->flags); plfxlc_usb_stop(pl); return 0; } static int resume(struct usb_interface *interface) { struct plfxlc_usb *pl = get_plfxlc_usb(interface); if (!pl) return -ENODEV; if (pl->was_running) plfxlc_usb_resume(pl); return 0; } #endif static struct usb_driver driver = { .name = KBUILD_MODNAME, .id_table = usb_ids, .probe = probe, .disconnect = disconnect, .pre_reset = pre_reset, .post_reset = post_reset, #ifdef CONFIG_PM .suspend = suspend, .resume = resume, #endif .disable_hub_initiated_lpm = 1, }; static int __init usb_init(void) { int r; r = usb_register(&driver); if (r) { pr_err("%s usb_register() failed %d\n", driver.name, r); return r; } pr_debug("Driver initialized :%s\n", driver.name); return 0; } static void __exit usb_exit(void) { usb_deregister(&driver); pr_debug("%s %s\n", driver.name, __func__); } MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("USB driver for pureLiFi devices"); MODULE_AUTHOR("pureLiFi"); MODULE_VERSION("1.0"); MODULE_FIRMWARE("plfxlc/lifi-x.bin"); MODULE_DEVICE_TABLE(usb, usb_ids); module_init(usb_init); module_exit(usb_exit);
linux-master
drivers/net/wireless/purelifi/plfxlc/usb.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2021 pureLiFi */ #include <linux/firmware.h> #include <linux/bitrev.h> #include "mac.h" #include "usb.h" static int send_vendor_request(struct usb_device *udev, int request, unsigned char *buffer, int buffer_size) { return usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), request, 0xC0, 0, 0, buffer, buffer_size, PLF_USB_TIMEOUT); } static int send_vendor_command(struct usb_device *udev, int request, unsigned char *buffer, int buffer_size) { return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), request, USB_TYPE_VENDOR /*0x40*/, 0, 0, buffer, buffer_size, PLF_USB_TIMEOUT); } int plfxlc_download_fpga(struct usb_interface *intf) { struct usb_device *udev = interface_to_usbdev(intf); unsigned char *fpga_dmabuff = NULL; const struct firmware *fw = NULL; int blk_tran_len = PLF_BULK_TLEN; unsigned char *fw_data; const char *fw_name; int r, actual_length; int fw_data_i = 0; if ((le16_to_cpu(udev->descriptor.idVendor) == PURELIFI_X_VENDOR_ID_0) && (le16_to_cpu(udev->descriptor.idProduct) == PURELIFI_X_PRODUCT_ID_0)) { fw_name = "plfxlc/lifi-x.bin"; dev_dbg(&intf->dev, "bin file for X selected\n"); } else if ((le16_to_cpu(udev->descriptor.idVendor)) == PURELIFI_XC_VENDOR_ID_0 && (le16_to_cpu(udev->descriptor.idProduct) == PURELIFI_XC_PRODUCT_ID_0)) { fw_name = "plfxlc/lifi-xc.bin"; dev_dbg(&intf->dev, "bin file for XC selected\n"); } else { r = -EINVAL; goto error; } r = request_firmware(&fw, fw_name, &intf->dev); if (r) { dev_err(&intf->dev, "request_firmware failed (%d)\n", r); goto error; } fpga_dmabuff = kmalloc(PLF_FPGA_STATUS_LEN, GFP_KERNEL); if (!fpga_dmabuff) { r = -ENOMEM; goto error_free_fw; } send_vendor_request(udev, PLF_VNDR_FPGA_SET_REQ, fpga_dmabuff, PLF_FPGA_STATUS_LEN); send_vendor_command(udev, PLF_VNDR_FPGA_SET_CMD, NULL, 0); if (fpga_dmabuff[0] != PLF_FPGA_MG) { dev_err(&intf->dev, "fpga_dmabuff[0] is wrong\n"); r = -EINVAL; goto error_free_fw; } for (fw_data_i = 0; fw_data_i < fw->size;) { int tbuf_idx; if ((fw->size - fw_data_i) < blk_tran_len) blk_tran_len = fw->size - fw_data_i; fw_data = kmemdup(&fw->data[fw_data_i], blk_tran_len, GFP_KERNEL); if (!fw_data) { r = -ENOMEM; goto error_free_fw; } for (tbuf_idx = 0; tbuf_idx < blk_tran_len; tbuf_idx++) { /* u8 bit reverse */ fw_data[tbuf_idx] = bitrev8(fw_data[tbuf_idx]); } r = usb_bulk_msg(udev, usb_sndbulkpipe(interface_to_usbdev(intf), fpga_dmabuff[0] & 0xff), fw_data, blk_tran_len, &actual_length, 2 * PLF_USB_TIMEOUT); if (r) dev_err(&intf->dev, "Bulk msg failed (%d)\n", r); kfree(fw_data); fw_data_i += blk_tran_len; } kfree(fpga_dmabuff); fpga_dmabuff = kmalloc(PLF_FPGA_STATE_LEN, GFP_KERNEL); if (!fpga_dmabuff) { r = -ENOMEM; goto error_free_fw; } memset(fpga_dmabuff, 0xff, PLF_FPGA_STATE_LEN); send_vendor_request(udev, PLF_VNDR_FPGA_STATE_REQ, fpga_dmabuff, PLF_FPGA_STATE_LEN); dev_dbg(&intf->dev, "%*ph\n", 8, fpga_dmabuff); if (fpga_dmabuff[0] != 0) { r = -EINVAL; goto error_free_fw; } send_vendor_command(udev, PLF_VNDR_FPGA_STATE_CMD, NULL, 0); msleep(PLF_MSLEEP_TIME); error_free_fw: kfree(fpga_dmabuff); release_firmware(fw); error: return r; } int plfxlc_download_xl_firmware(struct usb_interface *intf) { struct usb_device *udev = interface_to_usbdev(intf); const struct firmware *fwp = NULL; struct plfxlc_firmware_file file = {0}; const char *fw_pack; int s, r; u8 *buf; u32 i; r = send_vendor_command(udev, PLF_VNDR_XL_FW_CMD, NULL, 0); msleep(PLF_MSLEEP_TIME); if (r) { dev_err(&intf->dev, "vendor command failed (%d)\n", r); return -EINVAL; } /* Code for single pack file download */ fw_pack = "plfxlc/lifi-xl.bin"; r = request_firmware(&fwp, fw_pack, &intf->dev); if (r) { dev_err(&intf->dev, "Request_firmware failed (%d)\n", r); return -EINVAL; } file.total_files = get_unaligned_le32(&fwp->data[0]); file.total_size = get_unaligned_le32(&fwp->size); dev_dbg(&intf->dev, "XL Firmware (%d, %d)\n", file.total_files, file.total_size); buf = kzalloc(PLF_XL_BUF_LEN, GFP_KERNEL); if (!buf) { release_firmware(fwp); return -ENOMEM; } if (file.total_files > 10) { dev_err(&intf->dev, "Too many files (%d)\n", file.total_files); release_firmware(fwp); kfree(buf); return -EINVAL; } /* Download firmware files in multiple steps */ for (s = 0; s < file.total_files; s++) { buf[0] = s; r = send_vendor_command(udev, PLF_VNDR_XL_FILE_CMD, buf, PLF_XL_BUF_LEN); if (s < file.total_files - 1) file.size = get_unaligned_le32(&fwp->data[4 + ((s + 1) * 4)]) - get_unaligned_le32(&fwp->data[4 + (s) * 4]); else file.size = file.total_size - get_unaligned_le32(&fwp->data[4 + (s) * 4]); if (file.size > file.total_size || file.size > 60000) { dev_err(&intf->dev, "File size is too large (%d)\n", file.size); break; } file.start_addr = get_unaligned_le32(&fwp->data[4 + (s * 4)]); if (file.size % PLF_XL_BUF_LEN && s < 2) file.size += PLF_XL_BUF_LEN - file.size % PLF_XL_BUF_LEN; file.control_packets = file.size / PLF_XL_BUF_LEN; for (i = 0; i < file.control_packets; i++) { memcpy(buf, &fwp->data[file.start_addr + (i * PLF_XL_BUF_LEN)], PLF_XL_BUF_LEN); r = send_vendor_command(udev, PLF_VNDR_XL_DATA_CMD, buf, PLF_XL_BUF_LEN); } dev_dbg(&intf->dev, "fw-dw step=%d,r=%d size=%d\n", s, r, file.size); } release_firmware(fwp); kfree(buf); /* Code for single pack file download ends fw download finish */ r = send_vendor_command(udev, PLF_VNDR_XL_EX_CMD, NULL, 0); dev_dbg(&intf->dev, "Download fpga (4) (%d)\n", r); return 0; } int plfxlc_upload_mac_and_serial(struct usb_interface *intf, unsigned char *hw_address, unsigned char *serial_number) { struct usb_device *udev = interface_to_usbdev(intf); unsigned long long firmware_version; unsigned char *dma_buffer = NULL; dma_buffer = kmalloc(PLF_SERIAL_LEN, GFP_KERNEL); if (!dma_buffer) return -ENOMEM; BUILD_BUG_ON(ETH_ALEN > PLF_SERIAL_LEN); BUILD_BUG_ON(PLF_FW_VER_LEN > PLF_SERIAL_LEN); send_vendor_request(udev, PLF_MAC_VENDOR_REQUEST, dma_buffer, ETH_ALEN); memcpy(hw_address, dma_buffer, ETH_ALEN); send_vendor_request(udev, PLF_SERIAL_NUMBER_VENDOR_REQUEST, dma_buffer, PLF_SERIAL_LEN); send_vendor_request(udev, PLF_SERIAL_NUMBER_VENDOR_REQUEST, dma_buffer, PLF_SERIAL_LEN); memcpy(serial_number, dma_buffer, PLF_SERIAL_LEN); memset(dma_buffer, 0x00, PLF_SERIAL_LEN); send_vendor_request(udev, PLF_FIRMWARE_VERSION_VENDOR_REQUEST, (unsigned char *)dma_buffer, PLF_FW_VER_LEN); memcpy(&firmware_version, dma_buffer, PLF_FW_VER_LEN); dev_info(&intf->dev, "Firmware Version: %llu\n", firmware_version); kfree(dma_buffer); dev_dbg(&intf->dev, "Mac: %pM\n", hw_address); return 0; }
linux-master
drivers/net/wireless/purelifi/plfxlc/firmware.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2021 pureLiFi */ #include <linux/kernel.h> #include <linux/errno.h> #include "chip.h" #include "mac.h" #include "usb.h" void plfxlc_chip_init(struct plfxlc_chip *chip, struct ieee80211_hw *hw, struct usb_interface *intf) { memset(chip, 0, sizeof(*chip)); mutex_init(&chip->mutex); plfxlc_usb_init(&chip->usb, hw, intf); } void plfxlc_chip_release(struct plfxlc_chip *chip) { plfxlc_usb_release(&chip->usb); mutex_destroy(&chip->mutex); } int plfxlc_set_beacon_interval(struct plfxlc_chip *chip, u16 interval, u8 dtim_period, int type) { if (!interval || (chip->beacon_set && chip->beacon_interval == interval)) return 0; chip->beacon_interval = interval; chip->beacon_set = true; return plfxlc_usb_wreq(chip->usb.ez_usb, &chip->beacon_interval, sizeof(chip->beacon_interval), USB_REQ_BEACON_INTERVAL_WR); } int plfxlc_chip_init_hw(struct plfxlc_chip *chip) { unsigned char *addr = plfxlc_mac_get_perm_addr(plfxlc_chip_to_mac(chip)); struct usb_device *udev = interface_to_usbdev(chip->usb.intf); pr_info("plfxlc chip %04x:%04x v%02x %pM %s\n", le16_to_cpu(udev->descriptor.idVendor), le16_to_cpu(udev->descriptor.idProduct), le16_to_cpu(udev->descriptor.bcdDevice), addr, plfxlc_speed(udev->speed)); return plfxlc_set_beacon_interval(chip, 100, 0, 0); } int plfxlc_chip_switch_radio(struct plfxlc_chip *chip, u16 value) { int r; __le16 radio_on = cpu_to_le16(value); r = plfxlc_usb_wreq(chip->usb.ez_usb, &radio_on, sizeof(value), USB_REQ_POWER_WR); if (r) dev_err(plfxlc_chip_dev(chip), "POWER_WR failed (%d)\n", r); return r; } int plfxlc_chip_enable_rxtx(struct plfxlc_chip *chip) { plfxlc_usb_enable_tx(&chip->usb); return plfxlc_usb_enable_rx(&chip->usb); } void plfxlc_chip_disable_rxtx(struct plfxlc_chip *chip) { u8 value = 0; plfxlc_usb_wreq(chip->usb.ez_usb, &value, sizeof(value), USB_REQ_RXTX_WR); plfxlc_usb_disable_rx(&chip->usb); plfxlc_usb_disable_tx(&chip->usb); } int plfxlc_chip_set_rate(struct plfxlc_chip *chip, u8 rate) { int r; if (!chip) return -EINVAL; r = plfxlc_usb_wreq(chip->usb.ez_usb, &rate, sizeof(rate), USB_REQ_RATE_WR); if (r) dev_err(plfxlc_chip_dev(chip), "RATE_WR failed (%d)\n", r); return r; }
linux-master
drivers/net/wireless/purelifi/plfxlc/chip.c
// SPDX-License-Identifier: GPL-2.0-only /* * Copyright (c) 2021 pureLiFi */ #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/slab.h> #include <linux/usb.h> #include <linux/gpio.h> #include <linux/jiffies.h> #include <net/ieee80211_radiotap.h> #include "chip.h" #include "mac.h" #include "usb.h" static const struct ieee80211_rate plfxlc_rates[] = { { .bitrate = 10, .hw_value = PURELIFI_CCK_RATE_1M, .flags = 0 }, { .bitrate = 20, .hw_value = PURELIFI_CCK_RATE_2M, .hw_value_short = PURELIFI_CCK_RATE_2M | PURELIFI_CCK_PREA_SHORT, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, { .bitrate = 55, .hw_value = PURELIFI_CCK_RATE_5_5M, .hw_value_short = PURELIFI_CCK_RATE_5_5M | PURELIFI_CCK_PREA_SHORT, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, { .bitrate = 110, .hw_value = PURELIFI_CCK_RATE_11M, .hw_value_short = PURELIFI_CCK_RATE_11M | PURELIFI_CCK_PREA_SHORT, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, { .bitrate = 60, .hw_value = PURELIFI_OFDM_RATE_6M, .flags = 0 }, { .bitrate = 90, .hw_value = PURELIFI_OFDM_RATE_9M, .flags = 0 }, { .bitrate = 120, .hw_value = PURELIFI_OFDM_RATE_12M, .flags = 0 }, { .bitrate = 180, .hw_value = PURELIFI_OFDM_RATE_18M, .flags = 0 }, { .bitrate = 240, .hw_value = PURELIFI_OFDM_RATE_24M, .flags = 0 }, { .bitrate = 360, .hw_value = PURELIFI_OFDM_RATE_36M, .flags = 0 }, { .bitrate = 480, .hw_value = PURELIFI_OFDM_RATE_48M, .flags = 0 }, { .bitrate = 540, .hw_value = PURELIFI_OFDM_RATE_54M, .flags = 0 } }; static const struct ieee80211_channel plfxlc_channels[] = { { .center_freq = 2412, .hw_value = 1 }, { .center_freq = 2417, .hw_value = 2 }, { .center_freq = 2422, .hw_value = 3 }, { .center_freq = 2427, .hw_value = 4 }, { .center_freq = 2432, .hw_value = 5 }, { .center_freq = 2437, .hw_value = 6 }, { .center_freq = 2442, .hw_value = 7 }, { .center_freq = 2447, .hw_value = 8 }, { .center_freq = 2452, .hw_value = 9 }, { .center_freq = 2457, .hw_value = 10 }, { .center_freq = 2462, .hw_value = 11 }, { .center_freq = 2467, .hw_value = 12 }, { .center_freq = 2472, .hw_value = 13 }, { .center_freq = 2484, .hw_value = 14 }, }; int plfxlc_mac_preinit_hw(struct ieee80211_hw *hw, const u8 *hw_address) { SET_IEEE80211_PERM_ADDR(hw, hw_address); return 0; } int plfxlc_mac_init_hw(struct ieee80211_hw *hw) { struct plfxlc_mac *mac = plfxlc_hw_mac(hw); struct plfxlc_chip *chip = &mac->chip; int r; r = plfxlc_chip_init_hw(chip); if (r) { dev_warn(plfxlc_mac_dev(mac), "init hw failed (%d)\n", r); return r; } dev_dbg(plfxlc_mac_dev(mac), "irq_disabled (%d)\n", irqs_disabled()); regulatory_hint(hw->wiphy, "00"); return r; } void plfxlc_mac_release(struct plfxlc_mac *mac) { plfxlc_chip_release(&mac->chip); lockdep_assert_held(&mac->lock); } int plfxlc_op_start(struct ieee80211_hw *hw) { plfxlc_hw_mac(hw)->chip.usb.initialized = 1; return 0; } void plfxlc_op_stop(struct ieee80211_hw *hw) { struct plfxlc_mac *mac = plfxlc_hw_mac(hw); clear_bit(PURELIFI_DEVICE_RUNNING, &mac->flags); } int plfxlc_restore_settings(struct plfxlc_mac *mac) { int beacon_interval, beacon_period; struct sk_buff *beacon; spin_lock_irq(&mac->lock); beacon_interval = mac->beacon.interval; beacon_period = mac->beacon.period; spin_unlock_irq(&mac->lock); if (mac->type != NL80211_IFTYPE_ADHOC) return 0; if (mac->vif) { beacon = ieee80211_beacon_get(mac->hw, mac->vif, 0); if (beacon) { /*beacon is hardcoded in firmware */ kfree_skb(beacon); /* Returned skb is used only once and lowlevel * driver is responsible for freeing it. */ } } plfxlc_set_beacon_interval(&mac->chip, beacon_interval, beacon_period, mac->type); spin_lock_irq(&mac->lock); mac->beacon.last_update = jiffies; spin_unlock_irq(&mac->lock); return 0; } static void plfxlc_mac_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb, int ackssi, struct tx_status *tx_status) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); int success = 1; ieee80211_tx_info_clear_status(info); if (tx_status) success = !tx_status->failure; if (success) info->flags |= IEEE80211_TX_STAT_ACK; else info->flags &= ~IEEE80211_TX_STAT_ACK; info->status.ack_signal = 50; ieee80211_tx_status_irqsafe(hw, skb); } void plfxlc_mac_tx_to_dev(struct sk_buff *skb, int error) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct ieee80211_hw *hw = info->rate_driver_data[0]; struct plfxlc_mac *mac = plfxlc_hw_mac(hw); struct sk_buff_head *q = NULL; ieee80211_tx_info_clear_status(info); skb_pull(skb, sizeof(struct plfxlc_ctrlset)); if (unlikely(error || (info->flags & IEEE80211_TX_CTL_NO_ACK))) { ieee80211_tx_status_irqsafe(hw, skb); return; } q = &mac->ack_wait_queue; skb_queue_tail(q, skb); while (skb_queue_len(q)/* > PURELIFI_MAC_MAX_ACK_WAITERS*/) { plfxlc_mac_tx_status(hw, skb_dequeue(q), mac->ack_pending ? mac->ack_signal : 0, NULL); mac->ack_pending = 0; } } static int plfxlc_fill_ctrlset(struct plfxlc_mac *mac, struct sk_buff *skb) { unsigned int frag_len = skb->len; struct plfxlc_ctrlset *cs; u32 temp_payload_len = 0; unsigned int tmp; u32 temp_len = 0; if (skb_headroom(skb) < sizeof(struct plfxlc_ctrlset)) { dev_dbg(plfxlc_mac_dev(mac), "Not enough hroom(1)\n"); return 1; } cs = (void *)skb_push(skb, sizeof(struct plfxlc_ctrlset)); temp_payload_len = frag_len; temp_len = temp_payload_len + sizeof(struct plfxlc_ctrlset) - sizeof(cs->id) - sizeof(cs->len); /* Data packet lengths must be multiple of four bytes and must * not be a multiple of 512 bytes. First, it is attempted to * append the data packet in the tailroom of the skb. In rare * occasions, the tailroom is too small. In this case, the * content of the packet is shifted into the headroom of the skb * by memcpy. Headroom is allocated at startup (below in this * file). Therefore, there will be always enough headroom. The * call skb_headroom is an additional safety which might be * dropped. */ /* check if 32 bit aligned and align data */ tmp = skb->len & 3; if (tmp) { if (skb_tailroom(skb) < (3 - tmp)) { if (skb_headroom(skb) >= 4 - tmp) { u8 len; u8 *src_pt; u8 *dest_pt; len = skb->len; src_pt = skb->data; dest_pt = skb_push(skb, 4 - tmp); memmove(dest_pt, src_pt, len); } else { return -ENOBUFS; } } else { skb_put(skb, 4 - tmp); } temp_len += 4 - tmp; } /* check if not multiple of 512 and align data */ tmp = skb->len & 0x1ff; if (!tmp) { if (skb_tailroom(skb) < 4) { if (skb_headroom(skb) >= 4) { u8 len = skb->len; u8 *src_pt = skb->data; u8 *dest_pt = skb_push(skb, 4); memmove(dest_pt, src_pt, len); } else { /* should never happen because * sufficient headroom was reserved */ return -ENOBUFS; } } else { skb_put(skb, 4); } temp_len += 4; } cs->id = cpu_to_be32(USB_REQ_DATA_TX); cs->len = cpu_to_be32(temp_len); cs->payload_len_nw = cpu_to_be32(temp_payload_len); return 0; } static void plfxlc_op_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control, struct sk_buff *skb) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct plfxlc_header *plhdr = (void *)skb->data; struct plfxlc_mac *mac = plfxlc_hw_mac(hw); struct plfxlc_usb *usb = &mac->chip.usb; unsigned long flags; int r; r = plfxlc_fill_ctrlset(mac, skb); if (r) goto fail; info->rate_driver_data[0] = hw; if (plhdr->frametype == IEEE80211_FTYPE_DATA) { u8 *dst_mac = plhdr->dmac; u8 sidx; bool found = false; struct plfxlc_usb_tx *tx = &usb->tx; for (sidx = 0; sidx < MAX_STA_NUM; sidx++) { if (!(tx->station[sidx].flag & STATION_CONNECTED_FLAG)) continue; if (memcmp(tx->station[sidx].mac, dst_mac, ETH_ALEN)) continue; found = true; break; } /* Default to broadcast address for unknown MACs */ if (!found) sidx = STA_BROADCAST_INDEX; /* Stop OS from sending packets, if the queue is half full */ if (skb_queue_len(&tx->station[sidx].data_list) > 60) ieee80211_stop_queues(plfxlc_usb_to_hw(usb)); /* Schedule packet for transmission if queue is not full */ if (skb_queue_len(&tx->station[sidx].data_list) > 256) goto fail; skb_queue_tail(&tx->station[sidx].data_list, skb); plfxlc_send_packet_from_data_queue(usb); } else { spin_lock_irqsave(&usb->tx.lock, flags); r = plfxlc_usb_wreq_async(&mac->chip.usb, skb->data, skb->len, USB_REQ_DATA_TX, plfxlc_tx_urb_complete, skb); spin_unlock_irqrestore(&usb->tx.lock, flags); if (r) goto fail; } return; fail: dev_kfree_skb(skb); } static int plfxlc_filter_ack(struct ieee80211_hw *hw, struct ieee80211_hdr *rx_hdr, struct ieee80211_rx_status *stats) { struct plfxlc_mac *mac = plfxlc_hw_mac(hw); struct sk_buff_head *q; int i, position = 0; unsigned long flags; struct sk_buff *skb; bool found = false; if (!ieee80211_is_ack(rx_hdr->frame_control)) return 0; dev_dbg(plfxlc_mac_dev(mac), "ACK Received\n"); /* code based on zy driver, this logic may need fix */ q = &mac->ack_wait_queue; spin_lock_irqsave(&q->lock, flags); skb_queue_walk(q, skb) { struct ieee80211_hdr *tx_hdr; position++; if (mac->ack_pending && skb_queue_is_first(q, skb)) continue; if (mac->ack_pending == 0) break; tx_hdr = (struct ieee80211_hdr *)skb->data; if (likely(ether_addr_equal(tx_hdr->addr2, rx_hdr->addr1))) { found = 1; break; } } if (found) { for (i = 1; i < position; i++) skb = __skb_dequeue(q); if (i == position) { plfxlc_mac_tx_status(hw, skb, mac->ack_pending ? mac->ack_signal : 0, NULL); mac->ack_pending = 0; } mac->ack_pending = skb_queue_len(q) ? 1 : 0; mac->ack_signal = stats->signal; } spin_unlock_irqrestore(&q->lock, flags); return 1; } int plfxlc_mac_rx(struct ieee80211_hw *hw, const u8 *buffer, unsigned int length) { struct plfxlc_mac *mac = plfxlc_hw_mac(hw); struct ieee80211_rx_status stats; const struct rx_status *status; unsigned int payload_length; struct plfxlc_usb_tx *tx; struct sk_buff *skb; int need_padding; __le16 fc; int sidx; /* Packet blockade during disabled interface. */ if (!mac->vif) return 0; status = (struct rx_status *)buffer; memset(&stats, 0, sizeof(stats)); stats.flag = 0; stats.freq = 2412; stats.band = NL80211_BAND_LC; mac->rssi = -15 * be16_to_cpu(status->rssi) / 10; stats.signal = mac->rssi; if (status->rate_idx > 7) stats.rate_idx = 0; else stats.rate_idx = status->rate_idx; mac->crc_errors = be64_to_cpu(status->crc_error_count); /* TODO bad frame check for CRC error*/ if (plfxlc_filter_ack(hw, (struct ieee80211_hdr *)buffer, &stats) && !mac->pass_ctrl) return 0; buffer += sizeof(struct rx_status); payload_length = get_unaligned_be32(buffer); if (payload_length > 1560) { dev_err(plfxlc_mac_dev(mac), " > MTU %u\n", payload_length); return 0; } buffer += sizeof(u32); fc = get_unaligned((__le16 *)buffer); need_padding = ieee80211_is_data_qos(fc) ^ ieee80211_has_a4(fc); tx = &mac->chip.usb.tx; for (sidx = 0; sidx < MAX_STA_NUM - 1; sidx++) { if (memcmp(&buffer[10], tx->station[sidx].mac, ETH_ALEN)) continue; if (tx->station[sidx].flag & STATION_CONNECTED_FLAG) { tx->station[sidx].flag |= STATION_HEARTBEAT_FLAG; break; } } if (sidx == MAX_STA_NUM - 1) { for (sidx = 0; sidx < MAX_STA_NUM - 1; sidx++) { if (tx->station[sidx].flag & STATION_CONNECTED_FLAG) continue; memcpy(tx->station[sidx].mac, &buffer[10], ETH_ALEN); tx->station[sidx].flag |= STATION_CONNECTED_FLAG; tx->station[sidx].flag |= STATION_HEARTBEAT_FLAG; break; } } switch (buffer[0]) { case IEEE80211_STYPE_PROBE_REQ: dev_dbg(plfxlc_mac_dev(mac), "Probe request\n"); break; case IEEE80211_STYPE_ASSOC_REQ: dev_dbg(plfxlc_mac_dev(mac), "Association request\n"); break; case IEEE80211_STYPE_AUTH: dev_dbg(plfxlc_mac_dev(mac), "Authentication req\n"); break; case IEEE80211_FTYPE_DATA: dev_dbg(plfxlc_mac_dev(mac), "802.11 data frame\n"); break; } skb = dev_alloc_skb(payload_length + (need_padding ? 2 : 0)); if (!skb) return -ENOMEM; if (need_padding) /* Make sure that the payload data is 4 byte aligned. */ skb_reserve(skb, 2); skb_put_data(skb, buffer, payload_length); memcpy(IEEE80211_SKB_RXCB(skb), &stats, sizeof(stats)); ieee80211_rx_irqsafe(hw, skb); return 0; } static int plfxlc_op_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct plfxlc_mac *mac = plfxlc_hw_mac(hw); static const char * const iftype80211[] = { [NL80211_IFTYPE_STATION] = "Station", [NL80211_IFTYPE_ADHOC] = "Adhoc" }; if (mac->type != NL80211_IFTYPE_UNSPECIFIED) return -EOPNOTSUPP; if (vif->type == NL80211_IFTYPE_ADHOC || vif->type == NL80211_IFTYPE_STATION) { dev_dbg(plfxlc_mac_dev(mac), "%s %s\n", __func__, iftype80211[vif->type]); mac->type = vif->type; mac->vif = vif; return 0; } dev_dbg(plfxlc_mac_dev(mac), "unsupported iftype\n"); return -EOPNOTSUPP; } static void plfxlc_op_remove_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif) { struct plfxlc_mac *mac = plfxlc_hw_mac(hw); mac->type = NL80211_IFTYPE_UNSPECIFIED; mac->vif = NULL; } static int plfxlc_op_config(struct ieee80211_hw *hw, u32 changed) { return 0; } #define SUPPORTED_FIF_FLAGS \ (FIF_ALLMULTI | FIF_FCSFAIL | FIF_CONTROL | \ FIF_OTHER_BSS | FIF_BCN_PRBRESP_PROMISC) static void plfxlc_op_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags, unsigned int *new_flags, u64 multicast) { struct plfxlc_mc_hash hash = { .low = multicast, .high = multicast >> 32, }; struct plfxlc_mac *mac = plfxlc_hw_mac(hw); unsigned long flags; /* Only deal with supported flags */ *new_flags &= SUPPORTED_FIF_FLAGS; /* If multicast parameter * (as returned by plfxlc_op_prepare_multicast) * has changed, no bit in changed_flags is set. To handle this * situation, we do not return if changed_flags is 0. If we do so, * we will have some issue with IPv6 which uses multicast for link * layer address resolution. */ if (*new_flags & (FIF_ALLMULTI)) plfxlc_mc_add_all(&hash); spin_lock_irqsave(&mac->lock, flags); mac->pass_failed_fcs = !!(*new_flags & FIF_FCSFAIL); mac->pass_ctrl = !!(*new_flags & FIF_CONTROL); mac->multicast_hash = hash; spin_unlock_irqrestore(&mac->lock, flags); /* no handling required for FIF_OTHER_BSS as we don't currently * do BSSID filtering */ /* FIXME: in future it would be nice to enable the probe response * filter (so that the driver doesn't see them) until * FIF_BCN_PRBRESP_PROMISC is set. however due to atomicity here, we'd * have to schedule work to enable prbresp reception, which might * happen too late. For now we'll just listen and forward them all the * time. */ } static void plfxlc_op_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ieee80211_bss_conf *bss_conf, u64 changes) { struct plfxlc_mac *mac = plfxlc_hw_mac(hw); int associated; dev_dbg(plfxlc_mac_dev(mac), "changes: %llx\n", changes); if (mac->type != NL80211_IFTYPE_ADHOC) { /* for STATION */ associated = is_valid_ether_addr(bss_conf->bssid); goto exit_all; } /* for ADHOC */ associated = true; if (changes & BSS_CHANGED_BEACON) { struct sk_buff *beacon = ieee80211_beacon_get(hw, vif, 0); if (beacon) { /*beacon is hardcoded in firmware */ kfree_skb(beacon); /*Returned skb is used only once and * low-level driver is * responsible for freeing it. */ } } if (changes & BSS_CHANGED_BEACON_ENABLED) { u16 interval = 0; u8 period = 0; if (bss_conf->enable_beacon) { period = bss_conf->dtim_period; interval = bss_conf->beacon_int; } spin_lock_irq(&mac->lock); mac->beacon.period = period; mac->beacon.interval = interval; mac->beacon.last_update = jiffies; spin_unlock_irq(&mac->lock); plfxlc_set_beacon_interval(&mac->chip, interval, period, mac->type); } exit_all: spin_lock_irq(&mac->lock); mac->associated = associated; spin_unlock_irq(&mac->lock); } static int plfxlc_get_stats(struct ieee80211_hw *hw, struct ieee80211_low_level_stats *stats) { stats->dot11ACKFailureCount = 0; stats->dot11RTSFailureCount = 0; stats->dot11FCSErrorCount = 0; stats->dot11RTSSuccessCount = 0; return 0; } static const char et_strings[][ETH_GSTRING_LEN] = { "phy_rssi", "phy_rx_crc_err" }; static int plfxlc_get_et_sset_count(struct ieee80211_hw *hw, struct ieee80211_vif *vif, int sset) { if (sset == ETH_SS_STATS) return ARRAY_SIZE(et_strings); return 0; } static void plfxlc_get_et_strings(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u32 sset, u8 *data) { if (sset == ETH_SS_STATS) memcpy(data, *et_strings, sizeof(et_strings)); } static void plfxlc_get_et_stats(struct ieee80211_hw *hw, struct ieee80211_vif *vif, struct ethtool_stats *stats, u64 *data) { struct plfxlc_mac *mac = plfxlc_hw_mac(hw); data[0] = mac->rssi; data[1] = mac->crc_errors; } static int plfxlc_set_rts_threshold(struct ieee80211_hw *hw, u32 value) { return 0; } static const struct ieee80211_ops plfxlc_ops = { .tx = plfxlc_op_tx, .wake_tx_queue = ieee80211_handle_wake_tx_queue, .start = plfxlc_op_start, .stop = plfxlc_op_stop, .add_interface = plfxlc_op_add_interface, .remove_interface = plfxlc_op_remove_interface, .set_rts_threshold = plfxlc_set_rts_threshold, .config = plfxlc_op_config, .configure_filter = plfxlc_op_configure_filter, .bss_info_changed = plfxlc_op_bss_info_changed, .get_stats = plfxlc_get_stats, .get_et_sset_count = plfxlc_get_et_sset_count, .get_et_stats = plfxlc_get_et_stats, .get_et_strings = plfxlc_get_et_strings, }; struct ieee80211_hw *plfxlc_mac_alloc_hw(struct usb_interface *intf) { struct ieee80211_hw *hw; struct plfxlc_mac *mac; hw = ieee80211_alloc_hw(sizeof(struct plfxlc_mac), &plfxlc_ops); if (!hw) { dev_dbg(&intf->dev, "out of memory\n"); return NULL; } set_wiphy_dev(hw->wiphy, &intf->dev); mac = plfxlc_hw_mac(hw); memset(mac, 0, sizeof(*mac)); spin_lock_init(&mac->lock); mac->hw = hw; mac->type = NL80211_IFTYPE_UNSPECIFIED; memcpy(mac->channels, plfxlc_channels, sizeof(plfxlc_channels)); memcpy(mac->rates, plfxlc_rates, sizeof(plfxlc_rates)); mac->band.n_bitrates = ARRAY_SIZE(plfxlc_rates); mac->band.bitrates = mac->rates; mac->band.n_channels = ARRAY_SIZE(plfxlc_channels); mac->band.channels = mac->channels; hw->wiphy->bands[NL80211_BAND_LC] = &mac->band; hw->conf.chandef.width = NL80211_CHAN_WIDTH_20; ieee80211_hw_set(hw, RX_INCLUDES_FCS); ieee80211_hw_set(hw, SIGNAL_DBM); ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING); ieee80211_hw_set(hw, MFP_CAPABLE); hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_ADHOC); hw->max_signal = 100; hw->queues = 1; /* 4 for 32 bit alignment if no tailroom */ hw->extra_tx_headroom = sizeof(struct plfxlc_ctrlset) + 4; /* Tell mac80211 that we support multi rate retries */ hw->max_rates = IEEE80211_TX_MAX_RATES; hw->max_rate_tries = 18; /* 9 rates * 2 retries/rate */ skb_queue_head_init(&mac->ack_wait_queue); mac->ack_pending = 0; plfxlc_chip_init(&mac->chip, hw, intf); SET_IEEE80211_DEV(hw, &intf->dev); return hw; }
linux-master
drivers/net/wireless/purelifi/plfxlc/mac.c
// SPDX-License-Identifier: GPL-2.0-only /* * SPI interface. * * Copyright (c) 2017-2020, Silicon Laboratories, Inc. * Copyright (c) 2011, Sagrad Inc. * Copyright (c) 2010, ST-Ericsson */ #include <linux/module.h> #include <linux/delay.h> #include <linux/gpio/consumer.h> #include <linux/spi/spi.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/of.h> #include "bus.h" #include "wfx.h" #include "hwio.h" #include "main.h" #include "bh.h" #define SET_WRITE 0x7FFF /* usage: and operation */ #define SET_READ 0x8000 /* usage: or operation */ static const struct wfx_platform_data pdata_wf200 = { .file_fw = "wfx/wfm_wf200", .file_pds = "wfx/wf200.pds", .use_rising_clk = true, }; static const struct wfx_platform_data pdata_brd4001a = { .file_fw = "wfx/wfm_wf200", .file_pds = "wfx/brd4001a.pds", .use_rising_clk = true, }; static const struct wfx_platform_data pdata_brd8022a = { .file_fw = "wfx/wfm_wf200", .file_pds = "wfx/brd8022a.pds", .use_rising_clk = true, }; static const struct wfx_platform_data pdata_brd8023a = { .file_fw = "wfx/wfm_wf200", .file_pds = "wfx/brd8023a.pds", .use_rising_clk = true, }; struct wfx_spi_priv { struct spi_device *func; struct wfx_dev *core; struct gpio_desc *gpio_reset; bool need_swab; }; /* The chip reads 16bits of data at time and place them directly into (little endian) CPU register. * So, the chip expects bytes order to be "B1 B0 B3 B2" (while LE is "B0 B1 B2 B3" and BE is * "B3 B2 B1 B0") * * A little endian host with bits_per_word == 16 should do the right job natively. The code below to * support big endian host and commonly used SPI 8bits. */ static int wfx_spi_copy_from_io(void *priv, unsigned int addr, void *dst, size_t count) { struct wfx_spi_priv *bus = priv; u16 regaddr = (addr << 12) | (count / 2) | SET_READ; struct spi_message m; struct spi_transfer t_addr = { .tx_buf = &regaddr, .len = sizeof(regaddr), }; struct spi_transfer t_msg = { .rx_buf = dst, .len = count, }; u16 *dst16 = dst; int ret, i; WARN(count % 2, "buffer size must be a multiple of 2"); cpu_to_le16s(&regaddr); if (bus->need_swab) swab16s(&regaddr); spi_message_init(&m); spi_message_add_tail(&t_addr, &m); spi_message_add_tail(&t_msg, &m); ret = spi_sync(bus->func, &m); if (bus->need_swab && addr == WFX_REG_CONFIG) for (i = 0; i < count / 2; i++) swab16s(&dst16[i]); return ret; } static int wfx_spi_copy_to_io(void *priv, unsigned int addr, const void *src, size_t count) { struct wfx_spi_priv *bus = priv; u16 regaddr = (addr << 12) | (count / 2); /* FIXME: use a bounce buffer */ u16 *src16 = (void *)src; int ret, i; struct spi_message m; struct spi_transfer t_addr = { .tx_buf = &regaddr, .len = sizeof(regaddr), }; struct spi_transfer t_msg = { .tx_buf = src, .len = count, }; WARN(count % 2, "buffer size must be a multiple of 2"); WARN(regaddr & SET_READ, "bad addr or size overflow"); cpu_to_le16s(&regaddr); /* Register address and CONFIG content always use 16bit big endian * ("BADC" order) */ if (bus->need_swab) swab16s(&regaddr); if (bus->need_swab && addr == WFX_REG_CONFIG) for (i = 0; i < count / 2; i++) swab16s(&src16[i]); spi_message_init(&m); spi_message_add_tail(&t_addr, &m); spi_message_add_tail(&t_msg, &m); ret = spi_sync(bus->func, &m); if (bus->need_swab && addr == WFX_REG_CONFIG) for (i = 0; i < count / 2; i++) swab16s(&src16[i]); return ret; } static void wfx_spi_lock(void *priv) { } static void wfx_spi_unlock(void *priv) { } static irqreturn_t wfx_spi_irq_handler(int irq, void *priv) { struct wfx_spi_priv *bus = priv; wfx_bh_request_rx(bus->core); return IRQ_HANDLED; } static int wfx_spi_irq_subscribe(void *priv) { struct wfx_spi_priv *bus = priv; u32 flags; flags = irq_get_trigger_type(bus->func->irq); if (!flags) flags = IRQF_TRIGGER_HIGH; flags |= IRQF_ONESHOT; return devm_request_threaded_irq(&bus->func->dev, bus->func->irq, NULL, wfx_spi_irq_handler, flags, "wfx", bus); } static int wfx_spi_irq_unsubscribe(void *priv) { struct wfx_spi_priv *bus = priv; devm_free_irq(&bus->func->dev, bus->func->irq, bus); return 0; } static size_t wfx_spi_align_size(void *priv, size_t size) { /* Most of SPI controllers avoid DMA if buffer size is not 32bit aligned */ return ALIGN(size, 4); } static const struct wfx_hwbus_ops wfx_spi_hwbus_ops = { .copy_from_io = wfx_spi_copy_from_io, .copy_to_io = wfx_spi_copy_to_io, .irq_subscribe = wfx_spi_irq_subscribe, .irq_unsubscribe = wfx_spi_irq_unsubscribe, .lock = wfx_spi_lock, .unlock = wfx_spi_unlock, .align_size = wfx_spi_align_size, }; static int wfx_spi_probe(struct spi_device *func) { struct wfx_platform_data *pdata; struct wfx_spi_priv *bus; int ret; if (!func->bits_per_word) func->bits_per_word = 16; ret = spi_setup(func); if (ret) return ret; pdata = (struct wfx_platform_data *)spi_get_device_id(func)->driver_data; if (!pdata) { dev_err(&func->dev, "unable to retrieve driver data (please report)\n"); return -ENODEV; } /* Trace below is also displayed by spi_setup() if compiled with DEBUG */ dev_dbg(&func->dev, "SPI params: CS=%d, mode=%d bits/word=%d speed=%d\n", spi_get_chipselect(func, 0), func->mode, func->bits_per_word, func->max_speed_hz); if (func->bits_per_word != 16 && func->bits_per_word != 8) dev_warn(&func->dev, "unusual bits/word value: %d\n", func->bits_per_word); if (func->max_speed_hz > 50000000) dev_warn(&func->dev, "%dHz is a very high speed\n", func->max_speed_hz); bus = devm_kzalloc(&func->dev, sizeof(*bus), GFP_KERNEL); if (!bus) return -ENOMEM; bus->func = func; if (func->bits_per_word == 8 || IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) bus->need_swab = true; spi_set_drvdata(func, bus); bus->gpio_reset = devm_gpiod_get_optional(&func->dev, "reset", GPIOD_OUT_LOW); if (IS_ERR(bus->gpio_reset)) return PTR_ERR(bus->gpio_reset); if (!bus->gpio_reset) { dev_warn(&func->dev, "gpio reset is not defined, trying to load firmware anyway\n"); } else { gpiod_set_consumer_name(bus->gpio_reset, "wfx reset"); gpiod_set_value_cansleep(bus->gpio_reset, 1); usleep_range(100, 150); gpiod_set_value_cansleep(bus->gpio_reset, 0); usleep_range(2000, 2500); } bus->core = wfx_init_common(&func->dev, pdata, &wfx_spi_hwbus_ops, bus); if (!bus->core) return -EIO; return wfx_probe(bus->core); } static void wfx_spi_remove(struct spi_device *func) { struct wfx_spi_priv *bus = spi_get_drvdata(func); wfx_release(bus->core); } /* For dynamic driver binding, kernel does not use OF to match driver. It only * use modalias and modalias is a copy of 'compatible' DT node with vendor * stripped. */ static const struct spi_device_id wfx_spi_id[] = { { "wf200", (kernel_ulong_t)&pdata_wf200 }, { "brd4001a", (kernel_ulong_t)&pdata_brd4001a }, { "brd8022a", (kernel_ulong_t)&pdata_brd8022a }, { "brd8023a", (kernel_ulong_t)&pdata_brd8023a }, { }, }; MODULE_DEVICE_TABLE(spi, wfx_spi_id); #ifdef CONFIG_OF static const struct of_device_id wfx_spi_of_match[] = { { .compatible = "silabs,wf200" }, { .compatible = "silabs,brd4001a" }, { .compatible = "silabs,brd8022a" }, { .compatible = "silabs,brd8023a" }, { }, }; MODULE_DEVICE_TABLE(of, wfx_spi_of_match); #endif struct spi_driver wfx_spi_driver = { .driver = { .name = "wfx-spi", .of_match_table = of_match_ptr(wfx_spi_of_match), }, .id_table = wfx_spi_id, .probe = wfx_spi_probe, .remove = wfx_spi_remove, };
linux-master
drivers/net/wireless/silabs/wfx/bus_spi.c
// SPDX-License-Identifier: GPL-2.0-only /* * Interrupt bottom half (BH). * * Copyright (c) 2017-2020, Silicon Laboratories, Inc. * Copyright (c) 2010, ST-Ericsson */ #include <linux/gpio/consumer.h> #include <net/mac80211.h> #include "bh.h" #include "wfx.h" #include "hwio.h" #include "traces.h" #include "hif_rx.h" #include "hif_api_cmd.h" static void device_wakeup(struct wfx_dev *wdev) { int max_retry = 3; if (!wdev->pdata.gpio_wakeup) return; if (gpiod_get_value_cansleep(wdev->pdata.gpio_wakeup) > 0) return; if (wfx_api_older_than(wdev, 1, 4)) { gpiod_set_value_cansleep(wdev->pdata.gpio_wakeup, 1); if (!completion_done(&wdev->hif.ctrl_ready)) usleep_range(2000, 2500); return; } for (;;) { gpiod_set_value_cansleep(wdev->pdata.gpio_wakeup, 1); /* completion.h does not provide any function to wait completion without consume it * (a kind of wait_for_completion_done_timeout()). So we have to emulate it. */ if (wait_for_completion_timeout(&wdev->hif.ctrl_ready, msecs_to_jiffies(2))) { complete(&wdev->hif.ctrl_ready); return; } else if (max_retry-- > 0) { /* Older firmwares have a race in sleep/wake-up process. Redo the process * is sufficient to unfreeze the chip. */ dev_err(wdev->dev, "timeout while wake up chip\n"); gpiod_set_value_cansleep(wdev->pdata.gpio_wakeup, 0); usleep_range(2000, 2500); } else { dev_err(wdev->dev, "max wake-up retries reached\n"); return; } } } static void device_release(struct wfx_dev *wdev) { if (!wdev->pdata.gpio_wakeup) return; gpiod_set_value_cansleep(wdev->pdata.gpio_wakeup, 0); } static int rx_helper(struct wfx_dev *wdev, size_t read_len, int *is_cnf) { struct sk_buff *skb; struct wfx_hif_msg *hif; size_t alloc_len; size_t computed_len; int release_count; int piggyback = 0; WARN(read_len > round_down(0xFFF, 2) * sizeof(u16), "request exceed the chip capability"); /* Add 2 to take into account piggyback size */ alloc_len = wdev->hwbus_ops->align_size(wdev->hwbus_priv, read_len + 2); skb = dev_alloc_skb(alloc_len); if (!skb) return -ENOMEM; if (wfx_data_read(wdev, skb->data, alloc_len)) goto err; piggyback = le16_to_cpup((__le16 *)(skb->data + alloc_len - 2)); _trace_piggyback(piggyback, false); hif = (struct wfx_hif_msg *)skb->data; WARN(hif->encrypted & 0x3, "encryption is unsupported"); if (WARN(read_len < sizeof(struct wfx_hif_msg), "corrupted read")) goto err; computed_len = le16_to_cpu(hif->len); computed_len = round_up(computed_len, 2); if (computed_len != read_len) { dev_err(wdev->dev, "inconsistent message length: %zu != %zu\n", computed_len, read_len); print_hex_dump(KERN_INFO, "hif: ", DUMP_PREFIX_OFFSET, 16, 1, hif, read_len, true); goto err; } if (!(hif->id & HIF_ID_IS_INDICATION)) { (*is_cnf)++; if (hif->id == HIF_CNF_ID_MULTI_TRANSMIT) release_count = ((struct wfx_hif_cnf_multi_transmit *)hif->body)->num_tx_confs; else release_count = 1; WARN(wdev->hif.tx_buffers_used < release_count, "corrupted buffer counter"); wdev->hif.tx_buffers_used -= release_count; } _trace_hif_recv(hif, wdev->hif.tx_buffers_used); if (hif->id != HIF_IND_ID_EXCEPTION && hif->id != HIF_IND_ID_ERROR) { if (hif->seqnum != wdev->hif.rx_seqnum) dev_warn(wdev->dev, "wrong message sequence: %d != %d\n", hif->seqnum, wdev->hif.rx_seqnum); wdev->hif.rx_seqnum = (hif->seqnum + 1) % (HIF_COUNTER_MAX + 1); } skb_put(skb, le16_to_cpu(hif->len)); /* wfx_handle_rx takes care on SKB livetime */ wfx_handle_rx(wdev, skb); if (!wdev->hif.tx_buffers_used) wake_up(&wdev->hif.tx_buffers_empty); return piggyback; err: if (skb) dev_kfree_skb(skb); return -EIO; } static int bh_work_rx(struct wfx_dev *wdev, int max_msg, int *num_cnf) { size_t len; int i; int ctrl_reg, piggyback; piggyback = 0; for (i = 0; i < max_msg; i++) { if (piggyback & CTRL_NEXT_LEN_MASK) ctrl_reg = piggyback; else if (try_wait_for_completion(&wdev->hif.ctrl_ready)) ctrl_reg = atomic_xchg(&wdev->hif.ctrl_reg, 0); else ctrl_reg = 0; if (!(ctrl_reg & CTRL_NEXT_LEN_MASK)) return i; /* ctrl_reg units are 16bits words */ len = (ctrl_reg & CTRL_NEXT_LEN_MASK) * 2; piggyback = rx_helper(wdev, len, num_cnf); if (piggyback < 0) return i; if (!(piggyback & CTRL_WLAN_READY)) dev_err(wdev->dev, "unexpected piggyback value: ready bit not set: %04x\n", piggyback); } if (piggyback & CTRL_NEXT_LEN_MASK) { ctrl_reg = atomic_xchg(&wdev->hif.ctrl_reg, piggyback); complete(&wdev->hif.ctrl_ready); if (ctrl_reg) dev_err(wdev->dev, "unexpected IRQ happened: %04x/%04x\n", ctrl_reg, piggyback); } return i; } static void tx_helper(struct wfx_dev *wdev, struct wfx_hif_msg *hif) { int ret; void *data; bool is_encrypted = false; size_t len = le16_to_cpu(hif->len); WARN(len < sizeof(*hif), "try to send corrupted data"); hif->seqnum = wdev->hif.tx_seqnum; wdev->hif.tx_seqnum = (wdev->hif.tx_seqnum + 1) % (HIF_COUNTER_MAX + 1); data = hif; WARN(len > le16_to_cpu(wdev->hw_caps.size_inp_ch_buf), "request exceed the chip capability: %zu > %d\n", len, le16_to_cpu(wdev->hw_caps.size_inp_ch_buf)); len = wdev->hwbus_ops->align_size(wdev->hwbus_priv, len); ret = wfx_data_write(wdev, data, len); if (ret) goto end; wdev->hif.tx_buffers_used++; _trace_hif_send(hif, wdev->hif.tx_buffers_used); end: if (is_encrypted) kfree(data); } static int bh_work_tx(struct wfx_dev *wdev, int max_msg) { struct wfx_hif_msg *hif; int i; for (i = 0; i < max_msg; i++) { hif = NULL; if (wdev->hif.tx_buffers_used < le16_to_cpu(wdev->hw_caps.num_inp_ch_bufs)) { if (try_wait_for_completion(&wdev->hif_cmd.ready)) { WARN(!mutex_is_locked(&wdev->hif_cmd.lock), "data locking error"); hif = wdev->hif_cmd.buf_send; } else { hif = wfx_tx_queues_get(wdev); } } if (!hif) return i; tx_helper(wdev, hif); } return i; } /* In SDIO mode, it is necessary to make an access to a register to acknowledge last received * message. It could be possible to restrict this acknowledge to SDIO mode and only if last * operation was rx. */ static void ack_sdio_data(struct wfx_dev *wdev) { u32 cfg_reg; wfx_config_reg_read(wdev, &cfg_reg); if (cfg_reg & 0xFF) { dev_warn(wdev->dev, "chip reports errors: %02x\n", cfg_reg & 0xFF); wfx_config_reg_write_bits(wdev, 0xFF, 0x00); } } static void bh_work(struct work_struct *work) { struct wfx_dev *wdev = container_of(work, struct wfx_dev, hif.bh); int stats_req = 0, stats_cnf = 0, stats_ind = 0; bool release_chip = false, last_op_is_rx = false; int num_tx, num_rx; device_wakeup(wdev); do { num_tx = bh_work_tx(wdev, 32); stats_req += num_tx; if (num_tx) last_op_is_rx = false; num_rx = bh_work_rx(wdev, 32, &stats_cnf); stats_ind += num_rx; if (num_rx) last_op_is_rx = true; } while (num_rx || num_tx); stats_ind -= stats_cnf; if (last_op_is_rx) ack_sdio_data(wdev); if (!wdev->hif.tx_buffers_used && !work_pending(work)) { device_release(wdev); release_chip = true; } _trace_bh_stats(stats_ind, stats_req, stats_cnf, wdev->hif.tx_buffers_used, release_chip); } /* An IRQ from chip did occur */ void wfx_bh_request_rx(struct wfx_dev *wdev) { u32 cur, prev; wfx_control_reg_read(wdev, &cur); prev = atomic_xchg(&wdev->hif.ctrl_reg, cur); complete(&wdev->hif.ctrl_ready); queue_work(wdev->bh_wq, &wdev->hif.bh); if (!(cur & CTRL_NEXT_LEN_MASK)) dev_err(wdev->dev, "unexpected control register value: length field is 0: %04x\n", cur); if (prev != 0) dev_err(wdev->dev, "received IRQ but previous data was not (yet) read: %04x/%04x\n", prev, cur); } /* Driver want to send data */ void wfx_bh_request_tx(struct wfx_dev *wdev) { queue_work(wdev->bh_wq, &wdev->hif.bh); } /* If IRQ is not available, this function allow to manually poll the control register and simulate * an IRQ ahen an event happened. * * Note that the device has a bug: If an IRQ raise while host read control register, the IRQ is * lost. So, use this function carefully (only duing device initialisation). */ void wfx_bh_poll_irq(struct wfx_dev *wdev) { ktime_t now, start; u32 reg; WARN(!wdev->poll_irq, "unexpected IRQ polling can mask IRQ"); flush_workqueue(wdev->bh_wq); start = ktime_get(); for (;;) { wfx_control_reg_read(wdev, &reg); now = ktime_get(); if (reg & 0xFFF) break; if (ktime_after(now, ktime_add_ms(start, 1000))) { dev_err(wdev->dev, "time out while polling control register\n"); return; } udelay(200); } wfx_bh_request_rx(wdev); } void wfx_bh_register(struct wfx_dev *wdev) { INIT_WORK(&wdev->hif.bh, bh_work); init_completion(&wdev->hif.ctrl_ready); init_waitqueue_head(&wdev->hif.tx_buffers_empty); } void wfx_bh_unregister(struct wfx_dev *wdev) { flush_work(&wdev->hif.bh); }
linux-master
drivers/net/wireless/silabs/wfx/bh.c