python_code
stringlengths 0
1.8M
| repo_name
stringclasses 7
values | file_path
stringlengths 5
99
|
---|---|---|
// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*/
#include "core.h"
#include "htc.h"
#include "htt.h"
#include "txrx.h"
#include "debug.h"
#include "trace.h"
#include "mac.h"
#include <linux/log2.h>
#include <linux/bitfield.h>
/* when under memory pressure rx ring refill may fail and needs a retry */
#define HTT_RX_RING_REFILL_RETRY_MS 50
#define HTT_RX_RING_REFILL_RESCHED_MS 5
/* shortcut to interpret a raw memory buffer as a rx descriptor */
#define HTT_RX_BUF_TO_RX_DESC(hw, buf) ath10k_htt_rx_desc_from_raw_buffer(hw, buf)
static int ath10k_htt_rx_get_csum_state(struct ath10k_hw_params *hw, struct sk_buff *skb);
static struct sk_buff *
ath10k_htt_rx_find_skb_paddr(struct ath10k *ar, u64 paddr)
{
struct ath10k_skb_rxcb *rxcb;
hash_for_each_possible(ar->htt.rx_ring.skb_table, rxcb, hlist, paddr)
if (rxcb->paddr == paddr)
return ATH10K_RXCB_SKB(rxcb);
WARN_ON_ONCE(1);
return NULL;
}
static void ath10k_htt_rx_ring_free(struct ath10k_htt *htt)
{
struct sk_buff *skb;
struct ath10k_skb_rxcb *rxcb;
struct hlist_node *n;
int i;
if (htt->rx_ring.in_ord_rx) {
hash_for_each_safe(htt->rx_ring.skb_table, i, n, rxcb, hlist) {
skb = ATH10K_RXCB_SKB(rxcb);
dma_unmap_single(htt->ar->dev, rxcb->paddr,
skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE);
hash_del(&rxcb->hlist);
dev_kfree_skb_any(skb);
}
} else {
for (i = 0; i < htt->rx_ring.size; i++) {
skb = htt->rx_ring.netbufs_ring[i];
if (!skb)
continue;
rxcb = ATH10K_SKB_RXCB(skb);
dma_unmap_single(htt->ar->dev, rxcb->paddr,
skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE);
dev_kfree_skb_any(skb);
}
}
htt->rx_ring.fill_cnt = 0;
hash_init(htt->rx_ring.skb_table);
memset(htt->rx_ring.netbufs_ring, 0,
htt->rx_ring.size * sizeof(htt->rx_ring.netbufs_ring[0]));
}
static size_t ath10k_htt_get_rx_ring_size_32(struct ath10k_htt *htt)
{
return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_32);
}
static size_t ath10k_htt_get_rx_ring_size_64(struct ath10k_htt *htt)
{
return htt->rx_ring.size * sizeof(htt->rx_ring.paddrs_ring_64);
}
static void ath10k_htt_config_paddrs_ring_32(struct ath10k_htt *htt,
void *vaddr)
{
htt->rx_ring.paddrs_ring_32 = vaddr;
}
static void ath10k_htt_config_paddrs_ring_64(struct ath10k_htt *htt,
void *vaddr)
{
htt->rx_ring.paddrs_ring_64 = vaddr;
}
static void ath10k_htt_set_paddrs_ring_32(struct ath10k_htt *htt,
dma_addr_t paddr, int idx)
{
htt->rx_ring.paddrs_ring_32[idx] = __cpu_to_le32(paddr);
}
static void ath10k_htt_set_paddrs_ring_64(struct ath10k_htt *htt,
dma_addr_t paddr, int idx)
{
htt->rx_ring.paddrs_ring_64[idx] = __cpu_to_le64(paddr);
}
static void ath10k_htt_reset_paddrs_ring_32(struct ath10k_htt *htt, int idx)
{
htt->rx_ring.paddrs_ring_32[idx] = 0;
}
static void ath10k_htt_reset_paddrs_ring_64(struct ath10k_htt *htt, int idx)
{
htt->rx_ring.paddrs_ring_64[idx] = 0;
}
static void *ath10k_htt_get_vaddr_ring_32(struct ath10k_htt *htt)
{
return (void *)htt->rx_ring.paddrs_ring_32;
}
static void *ath10k_htt_get_vaddr_ring_64(struct ath10k_htt *htt)
{
return (void *)htt->rx_ring.paddrs_ring_64;
}
static int __ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
{
struct ath10k_hw_params *hw = &htt->ar->hw_params;
struct htt_rx_desc *rx_desc;
struct ath10k_skb_rxcb *rxcb;
struct sk_buff *skb;
dma_addr_t paddr;
int ret = 0, idx;
/* The Full Rx Reorder firmware has no way of telling the host
* implicitly when it copied HTT Rx Ring buffers to MAC Rx Ring.
* To keep things simple make sure ring is always half empty. This
* guarantees there'll be no replenishment overruns possible.
*/
BUILD_BUG_ON(HTT_RX_RING_FILL_LEVEL >= HTT_RX_RING_SIZE / 2);
idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr);
if (idx < 0 || idx >= htt->rx_ring.size) {
ath10k_err(htt->ar, "rx ring index is not valid, firmware malfunctioning?\n");
idx &= htt->rx_ring.size_mask;
ret = -ENOMEM;
goto fail;
}
while (num > 0) {
skb = dev_alloc_skb(HTT_RX_BUF_SIZE + HTT_RX_DESC_ALIGN);
if (!skb) {
ret = -ENOMEM;
goto fail;
}
if (!IS_ALIGNED((unsigned long)skb->data, HTT_RX_DESC_ALIGN))
skb_pull(skb,
PTR_ALIGN(skb->data, HTT_RX_DESC_ALIGN) -
skb->data);
/* Clear rx_desc attention word before posting to Rx ring */
rx_desc = HTT_RX_BUF_TO_RX_DESC(hw, skb->data);
ath10k_htt_rx_desc_get_attention(hw, rx_desc)->flags = __cpu_to_le32(0);
paddr = dma_map_single(htt->ar->dev, skb->data,
skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(htt->ar->dev, paddr))) {
dev_kfree_skb_any(skb);
ret = -ENOMEM;
goto fail;
}
rxcb = ATH10K_SKB_RXCB(skb);
rxcb->paddr = paddr;
htt->rx_ring.netbufs_ring[idx] = skb;
ath10k_htt_set_paddrs_ring(htt, paddr, idx);
htt->rx_ring.fill_cnt++;
if (htt->rx_ring.in_ord_rx) {
hash_add(htt->rx_ring.skb_table,
&ATH10K_SKB_RXCB(skb)->hlist,
paddr);
}
num--;
idx++;
idx &= htt->rx_ring.size_mask;
}
fail:
/*
* Make sure the rx buffer is updated before available buffer
* index to avoid any potential rx ring corruption.
*/
mb();
*htt->rx_ring.alloc_idx.vaddr = __cpu_to_le32(idx);
return ret;
}
static int ath10k_htt_rx_ring_fill_n(struct ath10k_htt *htt, int num)
{
lockdep_assert_held(&htt->rx_ring.lock);
return __ath10k_htt_rx_ring_fill_n(htt, num);
}
static void ath10k_htt_rx_msdu_buff_replenish(struct ath10k_htt *htt)
{
int ret, num_deficit, num_to_fill;
/* Refilling the whole RX ring buffer proves to be a bad idea. The
* reason is RX may take up significant amount of CPU cycles and starve
* other tasks, e.g. TX on an ethernet device while acting as a bridge
* with ath10k wlan interface. This ended up with very poor performance
* once CPU the host system was overwhelmed with RX on ath10k.
*
* By limiting the number of refills the replenishing occurs
* progressively. This in turns makes use of the fact tasklets are
* processed in FIFO order. This means actual RX processing can starve
* out refilling. If there's not enough buffers on RX ring FW will not
* report RX until it is refilled with enough buffers. This
* automatically balances load wrt to CPU power.
*
* This probably comes at a cost of lower maximum throughput but
* improves the average and stability.
*/
spin_lock_bh(&htt->rx_ring.lock);
num_deficit = htt->rx_ring.fill_level - htt->rx_ring.fill_cnt;
num_to_fill = min(ATH10K_HTT_MAX_NUM_REFILL, num_deficit);
num_deficit -= num_to_fill;
ret = ath10k_htt_rx_ring_fill_n(htt, num_to_fill);
if (ret == -ENOMEM) {
/*
* Failed to fill it to the desired level -
* we'll start a timer and try again next time.
* As long as enough buffers are left in the ring for
* another A-MPDU rx, no special recovery is needed.
*/
mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
msecs_to_jiffies(HTT_RX_RING_REFILL_RETRY_MS));
} else if (num_deficit > 0) {
mod_timer(&htt->rx_ring.refill_retry_timer, jiffies +
msecs_to_jiffies(HTT_RX_RING_REFILL_RESCHED_MS));
}
spin_unlock_bh(&htt->rx_ring.lock);
}
static void ath10k_htt_rx_ring_refill_retry(struct timer_list *t)
{
struct ath10k_htt *htt = from_timer(htt, t, rx_ring.refill_retry_timer);
ath10k_htt_rx_msdu_buff_replenish(htt);
}
int ath10k_htt_rx_ring_refill(struct ath10k *ar)
{
struct ath10k_htt *htt = &ar->htt;
int ret;
if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
return 0;
spin_lock_bh(&htt->rx_ring.lock);
ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level -
htt->rx_ring.fill_cnt));
if (ret)
ath10k_htt_rx_ring_free(htt);
spin_unlock_bh(&htt->rx_ring.lock);
return ret;
}
void ath10k_htt_rx_free(struct ath10k_htt *htt)
{
if (htt->ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
return;
del_timer_sync(&htt->rx_ring.refill_retry_timer);
skb_queue_purge(&htt->rx_msdus_q);
skb_queue_purge(&htt->rx_in_ord_compl_q);
skb_queue_purge(&htt->tx_fetch_ind_q);
spin_lock_bh(&htt->rx_ring.lock);
ath10k_htt_rx_ring_free(htt);
spin_unlock_bh(&htt->rx_ring.lock);
dma_free_coherent(htt->ar->dev,
ath10k_htt_get_rx_ring_size(htt),
ath10k_htt_get_vaddr_ring(htt),
htt->rx_ring.base_paddr);
ath10k_htt_config_paddrs_ring(htt, NULL);
dma_free_coherent(htt->ar->dev,
sizeof(*htt->rx_ring.alloc_idx.vaddr),
htt->rx_ring.alloc_idx.vaddr,
htt->rx_ring.alloc_idx.paddr);
htt->rx_ring.alloc_idx.vaddr = NULL;
kfree(htt->rx_ring.netbufs_ring);
htt->rx_ring.netbufs_ring = NULL;
}
static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
int idx;
struct sk_buff *msdu;
lockdep_assert_held(&htt->rx_ring.lock);
if (htt->rx_ring.fill_cnt == 0) {
ath10k_warn(ar, "tried to pop sk_buff from an empty rx ring\n");
return NULL;
}
idx = htt->rx_ring.sw_rd_idx.msdu_payld;
msdu = htt->rx_ring.netbufs_ring[idx];
htt->rx_ring.netbufs_ring[idx] = NULL;
ath10k_htt_reset_paddrs_ring(htt, idx);
idx++;
idx &= htt->rx_ring.size_mask;
htt->rx_ring.sw_rd_idx.msdu_payld = idx;
htt->rx_ring.fill_cnt--;
dma_unmap_single(htt->ar->dev,
ATH10K_SKB_RXCB(msdu)->paddr,
msdu->len + skb_tailroom(msdu),
DMA_FROM_DEVICE);
ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
msdu->data, msdu->len + skb_tailroom(msdu));
return msdu;
}
/* return: < 0 fatal error, 0 - non chained msdu, 1 chained msdu */
static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
struct sk_buff_head *amsdu)
{
struct ath10k *ar = htt->ar;
struct ath10k_hw_params *hw = &ar->hw_params;
int msdu_len, msdu_chaining = 0;
struct sk_buff *msdu;
struct htt_rx_desc *rx_desc;
struct rx_attention *rx_desc_attention;
struct rx_frag_info_common *rx_desc_frag_info_common;
struct rx_msdu_start_common *rx_desc_msdu_start_common;
struct rx_msdu_end_common *rx_desc_msdu_end_common;
lockdep_assert_held(&htt->rx_ring.lock);
for (;;) {
int last_msdu, msdu_len_invalid, msdu_chained;
msdu = ath10k_htt_rx_netbuf_pop(htt);
if (!msdu) {
__skb_queue_purge(amsdu);
return -ENOENT;
}
__skb_queue_tail(amsdu, msdu);
rx_desc = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data);
rx_desc_attention = ath10k_htt_rx_desc_get_attention(hw, rx_desc);
rx_desc_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw,
rx_desc);
rx_desc_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rx_desc);
rx_desc_frag_info_common = ath10k_htt_rx_desc_get_frag_info(hw, rx_desc);
/* FIXME: we must report msdu payload since this is what caller
* expects now
*/
skb_put(msdu, hw->rx_desc_ops->rx_desc_msdu_payload_offset);
skb_pull(msdu, hw->rx_desc_ops->rx_desc_msdu_payload_offset);
/*
* Sanity check - confirm the HW is finished filling in the
* rx data.
* If the HW and SW are working correctly, then it's guaranteed
* that the HW's MAC DMA is done before this point in the SW.
* To prevent the case that we handle a stale Rx descriptor,
* just assert for now until we have a way to recover.
*/
if (!(__le32_to_cpu(rx_desc_attention->flags)
& RX_ATTENTION_FLAGS_MSDU_DONE)) {
__skb_queue_purge(amsdu);
return -EIO;
}
msdu_len_invalid = !!(__le32_to_cpu(rx_desc_attention->flags)
& (RX_ATTENTION_FLAGS_MPDU_LENGTH_ERR |
RX_ATTENTION_FLAGS_MSDU_LENGTH_ERR));
msdu_len = MS(__le32_to_cpu(rx_desc_msdu_start_common->info0),
RX_MSDU_START_INFO0_MSDU_LENGTH);
msdu_chained = rx_desc_frag_info_common->ring2_more_count;
if (msdu_len_invalid)
msdu_len = 0;
skb_trim(msdu, 0);
skb_put(msdu, min(msdu_len, ath10k_htt_rx_msdu_size(hw)));
msdu_len -= msdu->len;
/* Note: Chained buffers do not contain rx descriptor */
while (msdu_chained--) {
msdu = ath10k_htt_rx_netbuf_pop(htt);
if (!msdu) {
__skb_queue_purge(amsdu);
return -ENOENT;
}
__skb_queue_tail(amsdu, msdu);
skb_trim(msdu, 0);
skb_put(msdu, min(msdu_len, HTT_RX_BUF_SIZE));
msdu_len -= msdu->len;
msdu_chaining = 1;
}
last_msdu = __le32_to_cpu(rx_desc_msdu_end_common->info0) &
RX_MSDU_END_INFO0_LAST_MSDU;
/* FIXME: why are we skipping the first part of the rx_desc? */
trace_ath10k_htt_rx_desc(ar, (void *)rx_desc + sizeof(u32),
hw->rx_desc_ops->rx_desc_size - sizeof(u32));
if (last_msdu)
break;
}
if (skb_queue_empty(amsdu))
msdu_chaining = -1;
/*
* Don't refill the ring yet.
*
* First, the elements popped here are still in use - it is not
* safe to overwrite them until the matching call to
* mpdu_desc_list_next. Second, for efficiency it is preferable to
* refill the rx ring with 1 PPDU's worth of rx buffers (something
* like 32 x 3 buffers), rather than one MPDU's worth of rx buffers
* (something like 3 buffers). Consequently, we'll rely on the txrx
* SW to tell us when it is done pulling all the PPDU's rx buffers
* out of the rx ring, and then refill it just once.
*/
return msdu_chaining;
}
static struct sk_buff *ath10k_htt_rx_pop_paddr(struct ath10k_htt *htt,
u64 paddr)
{
struct ath10k *ar = htt->ar;
struct ath10k_skb_rxcb *rxcb;
struct sk_buff *msdu;
lockdep_assert_held(&htt->rx_ring.lock);
msdu = ath10k_htt_rx_find_skb_paddr(ar, paddr);
if (!msdu)
return NULL;
rxcb = ATH10K_SKB_RXCB(msdu);
hash_del(&rxcb->hlist);
htt->rx_ring.fill_cnt--;
dma_unmap_single(htt->ar->dev, rxcb->paddr,
msdu->len + skb_tailroom(msdu),
DMA_FROM_DEVICE);
ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx netbuf pop: ",
msdu->data, msdu->len + skb_tailroom(msdu));
return msdu;
}
static inline void ath10k_htt_append_frag_list(struct sk_buff *skb_head,
struct sk_buff *frag_list,
unsigned int frag_len)
{
skb_shinfo(skb_head)->frag_list = frag_list;
skb_head->data_len = frag_len;
skb_head->len += skb_head->data_len;
}
static int ath10k_htt_rx_handle_amsdu_mon_32(struct ath10k_htt *htt,
struct sk_buff *msdu,
struct htt_rx_in_ord_msdu_desc **msdu_desc)
{
struct ath10k *ar = htt->ar;
struct ath10k_hw_params *hw = &ar->hw_params;
u32 paddr;
struct sk_buff *frag_buf;
struct sk_buff *prev_frag_buf;
u8 last_frag;
struct htt_rx_in_ord_msdu_desc *ind_desc = *msdu_desc;
struct htt_rx_desc *rxd;
int amsdu_len = __le16_to_cpu(ind_desc->msdu_len);
rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data);
trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size);
skb_put(msdu, hw->rx_desc_ops->rx_desc_size);
skb_pull(msdu, hw->rx_desc_ops->rx_desc_size);
skb_put(msdu, min(amsdu_len, ath10k_htt_rx_msdu_size(hw)));
amsdu_len -= msdu->len;
last_frag = ind_desc->reserved;
if (last_frag) {
if (amsdu_len) {
ath10k_warn(ar, "invalid amsdu len %u, left %d",
__le16_to_cpu(ind_desc->msdu_len),
amsdu_len);
}
return 0;
}
ind_desc++;
paddr = __le32_to_cpu(ind_desc->msdu_paddr);
frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr);
if (!frag_buf) {
ath10k_warn(ar, "failed to pop frag-1 paddr: 0x%x", paddr);
return -ENOENT;
}
skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE));
ath10k_htt_append_frag_list(msdu, frag_buf, amsdu_len);
amsdu_len -= frag_buf->len;
prev_frag_buf = frag_buf;
last_frag = ind_desc->reserved;
while (!last_frag) {
ind_desc++;
paddr = __le32_to_cpu(ind_desc->msdu_paddr);
frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr);
if (!frag_buf) {
ath10k_warn(ar, "failed to pop frag-n paddr: 0x%x",
paddr);
prev_frag_buf->next = NULL;
return -ENOENT;
}
skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE));
last_frag = ind_desc->reserved;
amsdu_len -= frag_buf->len;
prev_frag_buf->next = frag_buf;
prev_frag_buf = frag_buf;
}
if (amsdu_len) {
ath10k_warn(ar, "invalid amsdu len %u, left %d",
__le16_to_cpu(ind_desc->msdu_len), amsdu_len);
}
*msdu_desc = ind_desc;
prev_frag_buf->next = NULL;
return 0;
}
static int
ath10k_htt_rx_handle_amsdu_mon_64(struct ath10k_htt *htt,
struct sk_buff *msdu,
struct htt_rx_in_ord_msdu_desc_ext **msdu_desc)
{
struct ath10k *ar = htt->ar;
struct ath10k_hw_params *hw = &ar->hw_params;
u64 paddr;
struct sk_buff *frag_buf;
struct sk_buff *prev_frag_buf;
u8 last_frag;
struct htt_rx_in_ord_msdu_desc_ext *ind_desc = *msdu_desc;
struct htt_rx_desc *rxd;
int amsdu_len = __le16_to_cpu(ind_desc->msdu_len);
rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data);
trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size);
skb_put(msdu, hw->rx_desc_ops->rx_desc_size);
skb_pull(msdu, hw->rx_desc_ops->rx_desc_size);
skb_put(msdu, min(amsdu_len, ath10k_htt_rx_msdu_size(hw)));
amsdu_len -= msdu->len;
last_frag = ind_desc->reserved;
if (last_frag) {
if (amsdu_len) {
ath10k_warn(ar, "invalid amsdu len %u, left %d",
__le16_to_cpu(ind_desc->msdu_len),
amsdu_len);
}
return 0;
}
ind_desc++;
paddr = __le64_to_cpu(ind_desc->msdu_paddr);
frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr);
if (!frag_buf) {
ath10k_warn(ar, "failed to pop frag-1 paddr: 0x%llx", paddr);
return -ENOENT;
}
skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE));
ath10k_htt_append_frag_list(msdu, frag_buf, amsdu_len);
amsdu_len -= frag_buf->len;
prev_frag_buf = frag_buf;
last_frag = ind_desc->reserved;
while (!last_frag) {
ind_desc++;
paddr = __le64_to_cpu(ind_desc->msdu_paddr);
frag_buf = ath10k_htt_rx_pop_paddr(htt, paddr);
if (!frag_buf) {
ath10k_warn(ar, "failed to pop frag-n paddr: 0x%llx",
paddr);
prev_frag_buf->next = NULL;
return -ENOENT;
}
skb_put(frag_buf, min(amsdu_len, HTT_RX_BUF_SIZE));
last_frag = ind_desc->reserved;
amsdu_len -= frag_buf->len;
prev_frag_buf->next = frag_buf;
prev_frag_buf = frag_buf;
}
if (amsdu_len) {
ath10k_warn(ar, "invalid amsdu len %u, left %d",
__le16_to_cpu(ind_desc->msdu_len), amsdu_len);
}
*msdu_desc = ind_desc;
prev_frag_buf->next = NULL;
return 0;
}
static int ath10k_htt_rx_pop_paddr32_list(struct ath10k_htt *htt,
struct htt_rx_in_ord_ind *ev,
struct sk_buff_head *list)
{
struct ath10k *ar = htt->ar;
struct ath10k_hw_params *hw = &ar->hw_params;
struct htt_rx_in_ord_msdu_desc *msdu_desc = ev->msdu_descs32;
struct htt_rx_desc *rxd;
struct rx_attention *rxd_attention;
struct sk_buff *msdu;
int msdu_count, ret;
bool is_offload;
u32 paddr;
lockdep_assert_held(&htt->rx_ring.lock);
msdu_count = __le16_to_cpu(ev->msdu_count);
is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
while (msdu_count--) {
paddr = __le32_to_cpu(msdu_desc->msdu_paddr);
msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
if (!msdu) {
__skb_queue_purge(list);
return -ENOENT;
}
if (!is_offload && ar->monitor_arvif) {
ret = ath10k_htt_rx_handle_amsdu_mon_32(htt, msdu,
&msdu_desc);
if (ret) {
__skb_queue_purge(list);
return ret;
}
__skb_queue_tail(list, msdu);
msdu_desc++;
continue;
}
__skb_queue_tail(list, msdu);
if (!is_offload) {
rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data);
rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size);
skb_put(msdu, hw->rx_desc_ops->rx_desc_size);
skb_pull(msdu, hw->rx_desc_ops->rx_desc_size);
skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
if (!(__le32_to_cpu(rxd_attention->flags) &
RX_ATTENTION_FLAGS_MSDU_DONE)) {
ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
return -EIO;
}
}
msdu_desc++;
}
return 0;
}
static int ath10k_htt_rx_pop_paddr64_list(struct ath10k_htt *htt,
struct htt_rx_in_ord_ind *ev,
struct sk_buff_head *list)
{
struct ath10k *ar = htt->ar;
struct ath10k_hw_params *hw = &ar->hw_params;
struct htt_rx_in_ord_msdu_desc_ext *msdu_desc = ev->msdu_descs64;
struct htt_rx_desc *rxd;
struct rx_attention *rxd_attention;
struct sk_buff *msdu;
int msdu_count, ret;
bool is_offload;
u64 paddr;
lockdep_assert_held(&htt->rx_ring.lock);
msdu_count = __le16_to_cpu(ev->msdu_count);
is_offload = !!(ev->info & HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
while (msdu_count--) {
paddr = __le64_to_cpu(msdu_desc->msdu_paddr);
msdu = ath10k_htt_rx_pop_paddr(htt, paddr);
if (!msdu) {
__skb_queue_purge(list);
return -ENOENT;
}
if (!is_offload && ar->monitor_arvif) {
ret = ath10k_htt_rx_handle_amsdu_mon_64(htt, msdu,
&msdu_desc);
if (ret) {
__skb_queue_purge(list);
return ret;
}
__skb_queue_tail(list, msdu);
msdu_desc++;
continue;
}
__skb_queue_tail(list, msdu);
if (!is_offload) {
rxd = HTT_RX_BUF_TO_RX_DESC(hw, msdu->data);
rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
trace_ath10k_htt_rx_desc(ar, rxd, hw->rx_desc_ops->rx_desc_size);
skb_put(msdu, hw->rx_desc_ops->rx_desc_size);
skb_pull(msdu, hw->rx_desc_ops->rx_desc_size);
skb_put(msdu, __le16_to_cpu(msdu_desc->msdu_len));
if (!(__le32_to_cpu(rxd_attention->flags) &
RX_ATTENTION_FLAGS_MSDU_DONE)) {
ath10k_warn(htt->ar, "tried to pop an incomplete frame, oops!\n");
return -EIO;
}
}
msdu_desc++;
}
return 0;
}
int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
dma_addr_t paddr;
void *vaddr, *vaddr_ring;
size_t size;
struct timer_list *timer = &htt->rx_ring.refill_retry_timer;
if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
return 0;
htt->rx_confused = false;
/* XXX: The fill level could be changed during runtime in response to
* the host processing latency. Is this really worth it?
*/
htt->rx_ring.size = HTT_RX_RING_SIZE;
htt->rx_ring.size_mask = htt->rx_ring.size - 1;
htt->rx_ring.fill_level = ar->hw_params.rx_ring_fill_level;
if (!is_power_of_2(htt->rx_ring.size)) {
ath10k_warn(ar, "htt rx ring size is not power of 2\n");
return -EINVAL;
}
htt->rx_ring.netbufs_ring =
kcalloc(htt->rx_ring.size, sizeof(struct sk_buff *),
GFP_KERNEL);
if (!htt->rx_ring.netbufs_ring)
goto err_netbuf;
size = ath10k_htt_get_rx_ring_size(htt);
vaddr_ring = dma_alloc_coherent(htt->ar->dev, size, &paddr, GFP_KERNEL);
if (!vaddr_ring)
goto err_dma_ring;
ath10k_htt_config_paddrs_ring(htt, vaddr_ring);
htt->rx_ring.base_paddr = paddr;
vaddr = dma_alloc_coherent(htt->ar->dev,
sizeof(*htt->rx_ring.alloc_idx.vaddr),
&paddr, GFP_KERNEL);
if (!vaddr)
goto err_dma_idx;
htt->rx_ring.alloc_idx.vaddr = vaddr;
htt->rx_ring.alloc_idx.paddr = paddr;
htt->rx_ring.sw_rd_idx.msdu_payld = htt->rx_ring.size_mask;
*htt->rx_ring.alloc_idx.vaddr = 0;
/* Initialize the Rx refill retry timer */
timer_setup(timer, ath10k_htt_rx_ring_refill_retry, 0);
spin_lock_init(&htt->rx_ring.lock);
htt->rx_ring.fill_cnt = 0;
htt->rx_ring.sw_rd_idx.msdu_payld = 0;
hash_init(htt->rx_ring.skb_table);
skb_queue_head_init(&htt->rx_msdus_q);
skb_queue_head_init(&htt->rx_in_ord_compl_q);
skb_queue_head_init(&htt->tx_fetch_ind_q);
atomic_set(&htt->num_mpdus_ready, 0);
ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt rx ring size %d fill_level %d\n",
htt->rx_ring.size, htt->rx_ring.fill_level);
return 0;
err_dma_idx:
dma_free_coherent(htt->ar->dev,
ath10k_htt_get_rx_ring_size(htt),
vaddr_ring,
htt->rx_ring.base_paddr);
ath10k_htt_config_paddrs_ring(htt, NULL);
err_dma_ring:
kfree(htt->rx_ring.netbufs_ring);
htt->rx_ring.netbufs_ring = NULL;
err_netbuf:
return -ENOMEM;
}
static int ath10k_htt_rx_crypto_param_len(struct ath10k *ar,
enum htt_rx_mpdu_encrypt_type type)
{
switch (type) {
case HTT_RX_MPDU_ENCRYPT_NONE:
return 0;
case HTT_RX_MPDU_ENCRYPT_WEP40:
case HTT_RX_MPDU_ENCRYPT_WEP104:
return IEEE80211_WEP_IV_LEN;
case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
return IEEE80211_TKIP_IV_LEN;
case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
return IEEE80211_CCMP_HDR_LEN;
case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
return IEEE80211_CCMP_256_HDR_LEN;
case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
return IEEE80211_GCMP_HDR_LEN;
case HTT_RX_MPDU_ENCRYPT_WEP128:
case HTT_RX_MPDU_ENCRYPT_WAPI:
break;
}
ath10k_warn(ar, "unsupported encryption type %d\n", type);
return 0;
}
#define MICHAEL_MIC_LEN 8
static int ath10k_htt_rx_crypto_mic_len(struct ath10k *ar,
enum htt_rx_mpdu_encrypt_type type)
{
switch (type) {
case HTT_RX_MPDU_ENCRYPT_NONE:
case HTT_RX_MPDU_ENCRYPT_WEP40:
case HTT_RX_MPDU_ENCRYPT_WEP104:
case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
return 0;
case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
return IEEE80211_CCMP_MIC_LEN;
case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
return IEEE80211_CCMP_256_MIC_LEN;
case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
return IEEE80211_GCMP_MIC_LEN;
case HTT_RX_MPDU_ENCRYPT_WEP128:
case HTT_RX_MPDU_ENCRYPT_WAPI:
break;
}
ath10k_warn(ar, "unsupported encryption type %d\n", type);
return 0;
}
static int ath10k_htt_rx_crypto_icv_len(struct ath10k *ar,
enum htt_rx_mpdu_encrypt_type type)
{
switch (type) {
case HTT_RX_MPDU_ENCRYPT_NONE:
case HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2:
case HTT_RX_MPDU_ENCRYPT_AES_CCM256_WPA2:
case HTT_RX_MPDU_ENCRYPT_AES_GCMP_WPA2:
case HTT_RX_MPDU_ENCRYPT_AES_GCMP256_WPA2:
return 0;
case HTT_RX_MPDU_ENCRYPT_WEP40:
case HTT_RX_MPDU_ENCRYPT_WEP104:
return IEEE80211_WEP_ICV_LEN;
case HTT_RX_MPDU_ENCRYPT_TKIP_WITHOUT_MIC:
case HTT_RX_MPDU_ENCRYPT_TKIP_WPA:
return IEEE80211_TKIP_ICV_LEN;
case HTT_RX_MPDU_ENCRYPT_WEP128:
case HTT_RX_MPDU_ENCRYPT_WAPI:
break;
}
ath10k_warn(ar, "unsupported encryption type %d\n", type);
return 0;
}
struct amsdu_subframe_hdr {
u8 dst[ETH_ALEN];
u8 src[ETH_ALEN];
__be16 len;
} __packed;
#define GROUP_ID_IS_SU_MIMO(x) ((x) == 0 || (x) == 63)
static inline u8 ath10k_bw_to_mac80211_bw(u8 bw)
{
u8 ret = 0;
switch (bw) {
case 0:
ret = RATE_INFO_BW_20;
break;
case 1:
ret = RATE_INFO_BW_40;
break;
case 2:
ret = RATE_INFO_BW_80;
break;
case 3:
ret = RATE_INFO_BW_160;
break;
}
return ret;
}
static void ath10k_htt_rx_h_rates(struct ath10k *ar,
struct ieee80211_rx_status *status,
struct htt_rx_desc *rxd)
{
struct ath10k_hw_params *hw = &ar->hw_params;
struct rx_attention *rxd_attention;
struct rx_mpdu_start *rxd_mpdu_start;
struct rx_mpdu_end *rxd_mpdu_end;
struct rx_msdu_start_common *rxd_msdu_start_common;
struct rx_msdu_end_common *rxd_msdu_end_common;
struct rx_ppdu_start *rxd_ppdu_start;
struct ieee80211_supported_band *sband;
u8 cck, rate, bw, sgi, mcs, nss;
u8 *rxd_msdu_payload;
u8 preamble = 0;
u8 group_id;
u32 info1, info2, info3;
u32 stbc, nsts_su;
rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd);
rxd_mpdu_end = ath10k_htt_rx_desc_get_mpdu_end(hw, rxd);
rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd);
rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
rxd_ppdu_start = ath10k_htt_rx_desc_get_ppdu_start(hw, rxd);
rxd_msdu_payload = ath10k_htt_rx_desc_get_msdu_payload(hw, rxd);
info1 = __le32_to_cpu(rxd_ppdu_start->info1);
info2 = __le32_to_cpu(rxd_ppdu_start->info2);
info3 = __le32_to_cpu(rxd_ppdu_start->info3);
preamble = MS(info1, RX_PPDU_START_INFO1_PREAMBLE_TYPE);
switch (preamble) {
case HTT_RX_LEGACY:
/* To get legacy rate index band is required. Since band can't
* be undefined check if freq is non-zero.
*/
if (!status->freq)
return;
cck = info1 & RX_PPDU_START_INFO1_L_SIG_RATE_SELECT;
rate = MS(info1, RX_PPDU_START_INFO1_L_SIG_RATE);
rate &= ~RX_PPDU_START_RATE_FLAG;
sband = &ar->mac.sbands[status->band];
status->rate_idx = ath10k_mac_hw_rate_to_idx(sband, rate, cck);
break;
case HTT_RX_HT:
case HTT_RX_HT_WITH_TXBF:
/* HT-SIG - Table 20-11 in info2 and info3 */
mcs = info2 & 0x1F;
nss = mcs >> 3;
bw = (info2 >> 7) & 1;
sgi = (info3 >> 7) & 1;
status->rate_idx = mcs;
status->encoding = RX_ENC_HT;
if (sgi)
status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
if (bw)
status->bw = RATE_INFO_BW_40;
break;
case HTT_RX_VHT:
case HTT_RX_VHT_WITH_TXBF:
/* VHT-SIG-A1 in info2, VHT-SIG-A2 in info3
* TODO check this
*/
bw = info2 & 3;
sgi = info3 & 1;
stbc = (info2 >> 3) & 1;
group_id = (info2 >> 4) & 0x3F;
if (GROUP_ID_IS_SU_MIMO(group_id)) {
mcs = (info3 >> 4) & 0x0F;
nsts_su = ((info2 >> 10) & 0x07);
if (stbc)
nss = (nsts_su >> 2) + 1;
else
nss = (nsts_su + 1);
} else {
/* Hardware doesn't decode VHT-SIG-B into Rx descriptor
* so it's impossible to decode MCS. Also since
* firmware consumes Group Id Management frames host
* has no knowledge regarding group/user position
* mapping so it's impossible to pick the correct Nsts
* from VHT-SIG-A1.
*
* Bandwidth and SGI are valid so report the rateinfo
* on best-effort basis.
*/
mcs = 0;
nss = 1;
}
if (mcs > 0x09) {
ath10k_warn(ar, "invalid MCS received %u\n", mcs);
ath10k_warn(ar, "rxd %08x mpdu start %08x %08x msdu start %08x %08x ppdu start %08x %08x %08x %08x %08x\n",
__le32_to_cpu(rxd_attention->flags),
__le32_to_cpu(rxd_mpdu_start->info0),
__le32_to_cpu(rxd_mpdu_start->info1),
__le32_to_cpu(rxd_msdu_start_common->info0),
__le32_to_cpu(rxd_msdu_start_common->info1),
rxd_ppdu_start->info0,
__le32_to_cpu(rxd_ppdu_start->info1),
__le32_to_cpu(rxd_ppdu_start->info2),
__le32_to_cpu(rxd_ppdu_start->info3),
__le32_to_cpu(rxd_ppdu_start->info4));
ath10k_warn(ar, "msdu end %08x mpdu end %08x\n",
__le32_to_cpu(rxd_msdu_end_common->info0),
__le32_to_cpu(rxd_mpdu_end->info0));
ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL,
"rx desc msdu payload: ",
rxd_msdu_payload, 50);
}
status->rate_idx = mcs;
status->nss = nss;
if (sgi)
status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
status->bw = ath10k_bw_to_mac80211_bw(bw);
status->encoding = RX_ENC_VHT;
break;
default:
break;
}
}
static struct ieee80211_channel *
ath10k_htt_rx_h_peer_channel(struct ath10k *ar, struct htt_rx_desc *rxd)
{
struct ath10k_hw_params *hw = &ar->hw_params;
struct rx_attention *rxd_attention;
struct rx_msdu_end_common *rxd_msdu_end_common;
struct rx_mpdu_start *rxd_mpdu_start;
struct ath10k_peer *peer;
struct ath10k_vif *arvif;
struct cfg80211_chan_def def;
u16 peer_id;
lockdep_assert_held(&ar->data_lock);
if (!rxd)
return NULL;
rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd);
if (rxd_attention->flags &
__cpu_to_le32(RX_ATTENTION_FLAGS_PEER_IDX_INVALID))
return NULL;
if (!(rxd_msdu_end_common->info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU)))
return NULL;
peer_id = MS(__le32_to_cpu(rxd_mpdu_start->info0),
RX_MPDU_START_INFO0_PEER_IDX);
peer = ath10k_peer_find_by_id(ar, peer_id);
if (!peer)
return NULL;
arvif = ath10k_get_arvif(ar, peer->vdev_id);
if (WARN_ON_ONCE(!arvif))
return NULL;
if (ath10k_mac_vif_chan(arvif->vif, &def))
return NULL;
return def.chan;
}
static struct ieee80211_channel *
ath10k_htt_rx_h_vdev_channel(struct ath10k *ar, u32 vdev_id)
{
struct ath10k_vif *arvif;
struct cfg80211_chan_def def;
lockdep_assert_held(&ar->data_lock);
list_for_each_entry(arvif, &ar->arvifs, list) {
if (arvif->vdev_id == vdev_id &&
ath10k_mac_vif_chan(arvif->vif, &def) == 0)
return def.chan;
}
return NULL;
}
static void
ath10k_htt_rx_h_any_chan_iter(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *conf,
void *data)
{
struct cfg80211_chan_def *def = data;
*def = conf->def;
}
static struct ieee80211_channel *
ath10k_htt_rx_h_any_channel(struct ath10k *ar)
{
struct cfg80211_chan_def def = {};
ieee80211_iter_chan_contexts_atomic(ar->hw,
ath10k_htt_rx_h_any_chan_iter,
&def);
return def.chan;
}
static bool ath10k_htt_rx_h_channel(struct ath10k *ar,
struct ieee80211_rx_status *status,
struct htt_rx_desc *rxd,
u32 vdev_id)
{
struct ieee80211_channel *ch;
spin_lock_bh(&ar->data_lock);
ch = ar->scan_channel;
if (!ch)
ch = ar->rx_channel;
if (!ch)
ch = ath10k_htt_rx_h_peer_channel(ar, rxd);
if (!ch)
ch = ath10k_htt_rx_h_vdev_channel(ar, vdev_id);
if (!ch)
ch = ath10k_htt_rx_h_any_channel(ar);
if (!ch)
ch = ar->tgt_oper_chan;
spin_unlock_bh(&ar->data_lock);
if (!ch)
return false;
status->band = ch->band;
status->freq = ch->center_freq;
return true;
}
static void ath10k_htt_rx_h_signal(struct ath10k *ar,
struct ieee80211_rx_status *status,
struct htt_rx_desc *rxd)
{
struct ath10k_hw_params *hw = &ar->hw_params;
struct rx_ppdu_start *rxd_ppdu_start = ath10k_htt_rx_desc_get_ppdu_start(hw, rxd);
int i;
for (i = 0; i < IEEE80211_MAX_CHAINS ; i++) {
status->chains &= ~BIT(i);
if (rxd_ppdu_start->rssi_chains[i].pri20_mhz != 0x80) {
status->chain_signal[i] = ATH10K_DEFAULT_NOISE_FLOOR +
rxd_ppdu_start->rssi_chains[i].pri20_mhz;
status->chains |= BIT(i);
}
}
/* FIXME: Get real NF */
status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
rxd_ppdu_start->rssi_comb;
status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
}
static void ath10k_htt_rx_h_mactime(struct ath10k *ar,
struct ieee80211_rx_status *status,
struct htt_rx_desc *rxd)
{
struct ath10k_hw_params *hw = &ar->hw_params;
struct rx_ppdu_end_common *rxd_ppdu_end_common;
rxd_ppdu_end_common = ath10k_htt_rx_desc_get_ppdu_end(hw, rxd);
/* FIXME: TSF is known only at the end of PPDU, in the last MPDU. This
* means all prior MSDUs in a PPDU are reported to mac80211 without the
* TSF. Is it worth holding frames until end of PPDU is known?
*
* FIXME: Can we get/compute 64bit TSF?
*/
status->mactime = __le32_to_cpu(rxd_ppdu_end_common->tsf_timestamp);
status->flag |= RX_FLAG_MACTIME_END;
}
static void ath10k_htt_rx_h_ppdu(struct ath10k *ar,
struct sk_buff_head *amsdu,
struct ieee80211_rx_status *status,
u32 vdev_id)
{
struct sk_buff *first;
struct ath10k_hw_params *hw = &ar->hw_params;
struct htt_rx_desc *rxd;
struct rx_attention *rxd_attention;
bool is_first_ppdu;
bool is_last_ppdu;
if (skb_queue_empty(amsdu))
return;
first = skb_peek(amsdu);
rxd = HTT_RX_BUF_TO_RX_DESC(hw,
(void *)first->data - hw->rx_desc_ops->rx_desc_size);
rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
is_first_ppdu = !!(rxd_attention->flags &
__cpu_to_le32(RX_ATTENTION_FLAGS_FIRST_MPDU));
is_last_ppdu = !!(rxd_attention->flags &
__cpu_to_le32(RX_ATTENTION_FLAGS_LAST_MPDU));
if (is_first_ppdu) {
/* New PPDU starts so clear out the old per-PPDU status. */
status->freq = 0;
status->rate_idx = 0;
status->nss = 0;
status->encoding = RX_ENC_LEGACY;
status->bw = RATE_INFO_BW_20;
status->flag &= ~RX_FLAG_MACTIME_END;
status->flag |= RX_FLAG_NO_SIGNAL_VAL;
status->flag &= ~(RX_FLAG_AMPDU_IS_LAST);
status->flag |= RX_FLAG_AMPDU_DETAILS | RX_FLAG_AMPDU_LAST_KNOWN;
status->ampdu_reference = ar->ampdu_reference;
ath10k_htt_rx_h_signal(ar, status, rxd);
ath10k_htt_rx_h_channel(ar, status, rxd, vdev_id);
ath10k_htt_rx_h_rates(ar, status, rxd);
}
if (is_last_ppdu) {
ath10k_htt_rx_h_mactime(ar, status, rxd);
/* set ampdu last segment flag */
status->flag |= RX_FLAG_AMPDU_IS_LAST;
ar->ampdu_reference++;
}
}
static const char * const tid_to_ac[] = {
"BE",
"BK",
"BK",
"BE",
"VI",
"VI",
"VO",
"VO",
};
static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size)
{
u8 *qc;
int tid;
if (!ieee80211_is_data_qos(hdr->frame_control))
return "";
qc = ieee80211_get_qos_ctl(hdr);
tid = *qc & IEEE80211_QOS_CTL_TID_MASK;
if (tid < 8)
snprintf(out, size, "tid %d (%s)", tid, tid_to_ac[tid]);
else
snprintf(out, size, "tid %d", tid);
return out;
}
static void ath10k_htt_rx_h_queue_msdu(struct ath10k *ar,
struct ieee80211_rx_status *rx_status,
struct sk_buff *skb)
{
struct ieee80211_rx_status *status;
status = IEEE80211_SKB_RXCB(skb);
*status = *rx_status;
skb_queue_tail(&ar->htt.rx_msdus_q, skb);
}
static void ath10k_process_rx(struct ath10k *ar, struct sk_buff *skb)
{
struct ieee80211_rx_status *status;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
char tid[32];
status = IEEE80211_SKB_RXCB(skb);
if (!(ar->filter_flags & FIF_FCSFAIL) &&
status->flag & RX_FLAG_FAILED_FCS_CRC) {
ar->stats.rx_crc_err_drop++;
dev_kfree_skb_any(skb);
return;
}
ath10k_dbg(ar, ATH10K_DBG_DATA,
"rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
skb,
skb->len,
ieee80211_get_SA(hdr),
ath10k_get_tid(hdr, tid, sizeof(tid)),
is_multicast_ether_addr(ieee80211_get_DA(hdr)) ?
"mcast" : "ucast",
IEEE80211_SEQ_TO_SN(__le16_to_cpu(hdr->seq_ctrl)),
(status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
(status->encoding == RX_ENC_HT) ? "ht" : "",
(status->encoding == RX_ENC_VHT) ? "vht" : "",
(status->bw == RATE_INFO_BW_40) ? "40" : "",
(status->bw == RATE_INFO_BW_80) ? "80" : "",
(status->bw == RATE_INFO_BW_160) ? "160" : "",
status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",
status->rate_idx,
status->nss,
status->freq,
status->band, status->flag,
!!(status->flag & RX_FLAG_FAILED_FCS_CRC),
!!(status->flag & RX_FLAG_MMIC_ERROR),
!!(status->flag & RX_FLAG_AMSDU_MORE));
ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "rx skb: ",
skb->data, skb->len);
trace_ath10k_rx_hdr(ar, skb->data, skb->len);
trace_ath10k_rx_payload(ar, skb->data, skb->len);
ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi);
}
static int ath10k_htt_rx_nwifi_hdrlen(struct ath10k *ar,
struct ieee80211_hdr *hdr)
{
int len = ieee80211_hdrlen(hdr->frame_control);
if (!test_bit(ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING,
ar->running_fw->fw_file.fw_features))
len = round_up(len, 4);
return len;
}
static void ath10k_htt_rx_h_undecap_raw(struct ath10k *ar,
struct sk_buff *msdu,
struct ieee80211_rx_status *status,
enum htt_rx_mpdu_encrypt_type enctype,
bool is_decrypted,
const u8 first_hdr[64])
{
struct ieee80211_hdr *hdr;
struct ath10k_hw_params *hw = &ar->hw_params;
struct htt_rx_desc *rxd;
struct rx_msdu_end_common *rxd_msdu_end_common;
size_t hdr_len;
size_t crypto_len;
bool is_first;
bool is_last;
bool msdu_limit_err;
int bytes_aligned = ar->hw_params.decap_align_bytes;
u8 *qos;
rxd = HTT_RX_BUF_TO_RX_DESC(hw,
(void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
is_first = !!(rxd_msdu_end_common->info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
is_last = !!(rxd_msdu_end_common->info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
/* Delivered decapped frame:
* [802.11 header]
* [crypto param] <-- can be trimmed if !fcs_err &&
* !decrypt_err && !peer_idx_invalid
* [amsdu header] <-- only if A-MSDU
* [rfc1042/llc]
* [payload]
* [FCS] <-- at end, needs to be trimmed
*/
/* Some hardwares(QCA99x0 variants) limit number of msdus in a-msdu when
* deaggregate, so that unwanted MSDU-deaggregation is avoided for
* error packets. If limit exceeds, hw sends all remaining MSDUs as
* a single last MSDU with this msdu limit error set.
*/
msdu_limit_err = ath10k_htt_rx_desc_msdu_limit_error(hw, rxd);
/* If MSDU limit error happens, then don't warn on, the partial raw MSDU
* without first MSDU is expected in that case, and handled later here.
*/
/* This probably shouldn't happen but warn just in case */
if (WARN_ON_ONCE(!is_first && !msdu_limit_err))
return;
/* This probably shouldn't happen but warn just in case */
if (WARN_ON_ONCE(!(is_first && is_last) && !msdu_limit_err))
return;
skb_trim(msdu, msdu->len - FCS_LEN);
/* Push original 80211 header */
if (unlikely(msdu_limit_err)) {
hdr = (struct ieee80211_hdr *)first_hdr;
hdr_len = ieee80211_hdrlen(hdr->frame_control);
crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
if (ieee80211_is_data_qos(hdr->frame_control)) {
qos = ieee80211_get_qos_ctl(hdr);
qos[0] |= IEEE80211_QOS_CTL_A_MSDU_PRESENT;
}
if (crypto_len)
memcpy(skb_push(msdu, crypto_len),
(void *)hdr + round_up(hdr_len, bytes_aligned),
crypto_len);
memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
}
/* In most cases this will be true for sniffed frames. It makes sense
* to deliver them as-is without stripping the crypto param. This is
* necessary for software based decryption.
*
* If there's no error then the frame is decrypted. At least that is
* the case for frames that come in via fragmented rx indication.
*/
if (!is_decrypted)
return;
/* The payload is decrypted so strip crypto params. Start from tail
* since hdr is used to compute some stuff.
*/
hdr = (void *)msdu->data;
/* Tail */
if (status->flag & RX_FLAG_IV_STRIPPED) {
skb_trim(msdu, msdu->len -
ath10k_htt_rx_crypto_mic_len(ar, enctype));
skb_trim(msdu, msdu->len -
ath10k_htt_rx_crypto_icv_len(ar, enctype));
} else {
/* MIC */
if (status->flag & RX_FLAG_MIC_STRIPPED)
skb_trim(msdu, msdu->len -
ath10k_htt_rx_crypto_mic_len(ar, enctype));
/* ICV */
if (status->flag & RX_FLAG_ICV_STRIPPED)
skb_trim(msdu, msdu->len -
ath10k_htt_rx_crypto_icv_len(ar, enctype));
}
/* MMIC */
if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
!ieee80211_has_morefrags(hdr->frame_control) &&
enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
skb_trim(msdu, msdu->len - MICHAEL_MIC_LEN);
/* Head */
if (status->flag & RX_FLAG_IV_STRIPPED) {
hdr_len = ieee80211_hdrlen(hdr->frame_control);
crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
memmove((void *)msdu->data + crypto_len,
(void *)msdu->data, hdr_len);
skb_pull(msdu, crypto_len);
}
}
static void ath10k_htt_rx_h_undecap_nwifi(struct ath10k *ar,
struct sk_buff *msdu,
struct ieee80211_rx_status *status,
const u8 first_hdr[64],
enum htt_rx_mpdu_encrypt_type enctype)
{
struct ath10k_hw_params *hw = &ar->hw_params;
struct ieee80211_hdr *hdr;
struct htt_rx_desc *rxd;
size_t hdr_len;
u8 da[ETH_ALEN];
u8 sa[ETH_ALEN];
int l3_pad_bytes;
int bytes_aligned = ar->hw_params.decap_align_bytes;
/* Delivered decapped frame:
* [nwifi 802.11 header] <-- replaced with 802.11 hdr
* [rfc1042/llc]
*
* Note: The nwifi header doesn't have QoS Control and is
* (always?) a 3addr frame.
*
* Note2: There's no A-MSDU subframe header. Even if it's part
* of an A-MSDU.
*/
/* pull decapped header and copy SA & DA */
rxd = HTT_RX_BUF_TO_RX_DESC(hw, (void *)msdu->data -
hw->rx_desc_ops->rx_desc_size);
l3_pad_bytes = ath10k_htt_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
skb_put(msdu, l3_pad_bytes);
hdr = (struct ieee80211_hdr *)(msdu->data + l3_pad_bytes);
hdr_len = ath10k_htt_rx_nwifi_hdrlen(ar, hdr);
ether_addr_copy(da, ieee80211_get_DA(hdr));
ether_addr_copy(sa, ieee80211_get_SA(hdr));
skb_pull(msdu, hdr_len);
/* push original 802.11 header */
hdr = (struct ieee80211_hdr *)first_hdr;
hdr_len = ieee80211_hdrlen(hdr->frame_control);
if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
memcpy(skb_push(msdu,
ath10k_htt_rx_crypto_param_len(ar, enctype)),
(void *)hdr + round_up(hdr_len, bytes_aligned),
ath10k_htt_rx_crypto_param_len(ar, enctype));
}
memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
/* original 802.11 header has a different DA and in
* case of 4addr it may also have different SA
*/
hdr = (struct ieee80211_hdr *)msdu->data;
ether_addr_copy(ieee80211_get_DA(hdr), da);
ether_addr_copy(ieee80211_get_SA(hdr), sa);
}
static void *ath10k_htt_rx_h_find_rfc1042(struct ath10k *ar,
struct sk_buff *msdu,
enum htt_rx_mpdu_encrypt_type enctype)
{
struct ieee80211_hdr *hdr;
struct ath10k_hw_params *hw = &ar->hw_params;
struct htt_rx_desc *rxd;
struct rx_msdu_end_common *rxd_msdu_end_common;
u8 *rxd_rx_hdr_status;
size_t hdr_len, crypto_len;
void *rfc1042;
bool is_first, is_last, is_amsdu;
int bytes_aligned = ar->hw_params.decap_align_bytes;
rxd = HTT_RX_BUF_TO_RX_DESC(hw,
(void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
rxd_rx_hdr_status = ath10k_htt_rx_desc_get_rx_hdr_status(hw, rxd);
hdr = (void *)rxd_rx_hdr_status;
is_first = !!(rxd_msdu_end_common->info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
is_last = !!(rxd_msdu_end_common->info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
is_amsdu = !(is_first && is_last);
rfc1042 = hdr;
if (is_first) {
hdr_len = ieee80211_hdrlen(hdr->frame_control);
crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
rfc1042 += round_up(hdr_len, bytes_aligned) +
round_up(crypto_len, bytes_aligned);
}
if (is_amsdu)
rfc1042 += sizeof(struct amsdu_subframe_hdr);
return rfc1042;
}
static void ath10k_htt_rx_h_undecap_eth(struct ath10k *ar,
struct sk_buff *msdu,
struct ieee80211_rx_status *status,
const u8 first_hdr[64],
enum htt_rx_mpdu_encrypt_type enctype)
{
struct ath10k_hw_params *hw = &ar->hw_params;
struct ieee80211_hdr *hdr;
struct ethhdr *eth;
size_t hdr_len;
void *rfc1042;
u8 da[ETH_ALEN];
u8 sa[ETH_ALEN];
int l3_pad_bytes;
struct htt_rx_desc *rxd;
int bytes_aligned = ar->hw_params.decap_align_bytes;
/* Delivered decapped frame:
* [eth header] <-- replaced with 802.11 hdr & rfc1042/llc
* [payload]
*/
rfc1042 = ath10k_htt_rx_h_find_rfc1042(ar, msdu, enctype);
if (WARN_ON_ONCE(!rfc1042))
return;
rxd = HTT_RX_BUF_TO_RX_DESC(hw,
(void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
l3_pad_bytes = ath10k_htt_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
skb_put(msdu, l3_pad_bytes);
skb_pull(msdu, l3_pad_bytes);
/* pull decapped header and copy SA & DA */
eth = (struct ethhdr *)msdu->data;
ether_addr_copy(da, eth->h_dest);
ether_addr_copy(sa, eth->h_source);
skb_pull(msdu, sizeof(struct ethhdr));
/* push rfc1042/llc/snap */
memcpy(skb_push(msdu, sizeof(struct rfc1042_hdr)), rfc1042,
sizeof(struct rfc1042_hdr));
/* push original 802.11 header */
hdr = (struct ieee80211_hdr *)first_hdr;
hdr_len = ieee80211_hdrlen(hdr->frame_control);
if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
memcpy(skb_push(msdu,
ath10k_htt_rx_crypto_param_len(ar, enctype)),
(void *)hdr + round_up(hdr_len, bytes_aligned),
ath10k_htt_rx_crypto_param_len(ar, enctype));
}
memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
/* original 802.11 header has a different DA and in
* case of 4addr it may also have different SA
*/
hdr = (struct ieee80211_hdr *)msdu->data;
ether_addr_copy(ieee80211_get_DA(hdr), da);
ether_addr_copy(ieee80211_get_SA(hdr), sa);
}
static void ath10k_htt_rx_h_undecap_snap(struct ath10k *ar,
struct sk_buff *msdu,
struct ieee80211_rx_status *status,
const u8 first_hdr[64],
enum htt_rx_mpdu_encrypt_type enctype)
{
struct ath10k_hw_params *hw = &ar->hw_params;
struct ieee80211_hdr *hdr;
size_t hdr_len;
int l3_pad_bytes;
struct htt_rx_desc *rxd;
int bytes_aligned = ar->hw_params.decap_align_bytes;
/* Delivered decapped frame:
* [amsdu header] <-- replaced with 802.11 hdr
* [rfc1042/llc]
* [payload]
*/
rxd = HTT_RX_BUF_TO_RX_DESC(hw,
(void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
l3_pad_bytes = ath10k_htt_rx_desc_get_l3_pad_bytes(&ar->hw_params, rxd);
skb_put(msdu, l3_pad_bytes);
skb_pull(msdu, sizeof(struct amsdu_subframe_hdr) + l3_pad_bytes);
hdr = (struct ieee80211_hdr *)first_hdr;
hdr_len = ieee80211_hdrlen(hdr->frame_control);
if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
memcpy(skb_push(msdu,
ath10k_htt_rx_crypto_param_len(ar, enctype)),
(void *)hdr + round_up(hdr_len, bytes_aligned),
ath10k_htt_rx_crypto_param_len(ar, enctype));
}
memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
}
static void ath10k_htt_rx_h_undecap(struct ath10k *ar,
struct sk_buff *msdu,
struct ieee80211_rx_status *status,
u8 first_hdr[64],
enum htt_rx_mpdu_encrypt_type enctype,
bool is_decrypted)
{
struct ath10k_hw_params *hw = &ar->hw_params;
struct htt_rx_desc *rxd;
struct rx_msdu_start_common *rxd_msdu_start_common;
enum rx_msdu_decap_format decap;
/* First msdu's decapped header:
* [802.11 header] <-- padded to 4 bytes long
* [crypto param] <-- padded to 4 bytes long
* [amsdu header] <-- only if A-MSDU
* [rfc1042/llc]
*
* Other (2nd, 3rd, ..) msdu's decapped header:
* [amsdu header] <-- only if A-MSDU
* [rfc1042/llc]
*/
rxd = HTT_RX_BUF_TO_RX_DESC(hw,
(void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd);
decap = MS(__le32_to_cpu(rxd_msdu_start_common->info1),
RX_MSDU_START_INFO1_DECAP_FORMAT);
switch (decap) {
case RX_MSDU_DECAP_RAW:
ath10k_htt_rx_h_undecap_raw(ar, msdu, status, enctype,
is_decrypted, first_hdr);
break;
case RX_MSDU_DECAP_NATIVE_WIFI:
ath10k_htt_rx_h_undecap_nwifi(ar, msdu, status, first_hdr,
enctype);
break;
case RX_MSDU_DECAP_ETHERNET2_DIX:
ath10k_htt_rx_h_undecap_eth(ar, msdu, status, first_hdr, enctype);
break;
case RX_MSDU_DECAP_8023_SNAP_LLC:
ath10k_htt_rx_h_undecap_snap(ar, msdu, status, first_hdr,
enctype);
break;
}
}
static int ath10k_htt_rx_get_csum_state(struct ath10k_hw_params *hw, struct sk_buff *skb)
{
struct htt_rx_desc *rxd;
struct rx_attention *rxd_attention;
struct rx_msdu_start_common *rxd_msdu_start_common;
u32 flags, info;
bool is_ip4, is_ip6;
bool is_tcp, is_udp;
bool ip_csum_ok, tcpudp_csum_ok;
rxd = HTT_RX_BUF_TO_RX_DESC(hw,
(void *)skb->data - hw->rx_desc_ops->rx_desc_size);
rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd);
flags = __le32_to_cpu(rxd_attention->flags);
info = __le32_to_cpu(rxd_msdu_start_common->info1);
is_ip4 = !!(info & RX_MSDU_START_INFO1_IPV4_PROTO);
is_ip6 = !!(info & RX_MSDU_START_INFO1_IPV6_PROTO);
is_tcp = !!(info & RX_MSDU_START_INFO1_TCP_PROTO);
is_udp = !!(info & RX_MSDU_START_INFO1_UDP_PROTO);
ip_csum_ok = !(flags & RX_ATTENTION_FLAGS_IP_CHKSUM_FAIL);
tcpudp_csum_ok = !(flags & RX_ATTENTION_FLAGS_TCP_UDP_CHKSUM_FAIL);
if (!is_ip4 && !is_ip6)
return CHECKSUM_NONE;
if (!is_tcp && !is_udp)
return CHECKSUM_NONE;
if (!ip_csum_ok)
return CHECKSUM_NONE;
if (!tcpudp_csum_ok)
return CHECKSUM_NONE;
return CHECKSUM_UNNECESSARY;
}
static void ath10k_htt_rx_h_csum_offload(struct ath10k_hw_params *hw,
struct sk_buff *msdu)
{
msdu->ip_summed = ath10k_htt_rx_get_csum_state(hw, msdu);
}
static u64 ath10k_htt_rx_h_get_pn(struct ath10k *ar, struct sk_buff *skb,
enum htt_rx_mpdu_encrypt_type enctype)
{
struct ieee80211_hdr *hdr;
u64 pn = 0;
u8 *ehdr;
hdr = (struct ieee80211_hdr *)skb->data;
ehdr = skb->data + ieee80211_hdrlen(hdr->frame_control);
if (enctype == HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2) {
pn = ehdr[0];
pn |= (u64)ehdr[1] << 8;
pn |= (u64)ehdr[4] << 16;
pn |= (u64)ehdr[5] << 24;
pn |= (u64)ehdr[6] << 32;
pn |= (u64)ehdr[7] << 40;
}
return pn;
}
static bool ath10k_htt_rx_h_frag_multicast_check(struct ath10k *ar,
struct sk_buff *skb)
{
struct ieee80211_hdr *hdr;
hdr = (struct ieee80211_hdr *)skb->data;
return !is_multicast_ether_addr(hdr->addr1);
}
static bool ath10k_htt_rx_h_frag_pn_check(struct ath10k *ar,
struct sk_buff *skb,
u16 peer_id,
enum htt_rx_mpdu_encrypt_type enctype)
{
struct ath10k_peer *peer;
union htt_rx_pn_t *last_pn, new_pn = {0};
struct ieee80211_hdr *hdr;
u8 tid, frag_number;
u32 seq;
peer = ath10k_peer_find_by_id(ar, peer_id);
if (!peer) {
ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid peer for frag pn check\n");
return false;
}
hdr = (struct ieee80211_hdr *)skb->data;
if (ieee80211_is_data_qos(hdr->frame_control))
tid = ieee80211_get_tid(hdr);
else
tid = ATH10K_TXRX_NON_QOS_TID;
last_pn = &peer->frag_tids_last_pn[tid];
new_pn.pn48 = ath10k_htt_rx_h_get_pn(ar, skb, enctype);
frag_number = le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
seq = IEEE80211_SEQ_TO_SN(__le16_to_cpu(hdr->seq_ctrl));
if (frag_number == 0) {
last_pn->pn48 = new_pn.pn48;
peer->frag_tids_seq[tid] = seq;
} else {
if (seq != peer->frag_tids_seq[tid])
return false;
if (new_pn.pn48 != last_pn->pn48 + 1)
return false;
last_pn->pn48 = new_pn.pn48;
}
return true;
}
static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
struct sk_buff_head *amsdu,
struct ieee80211_rx_status *status,
bool fill_crypt_header,
u8 *rx_hdr,
enum ath10k_pkt_rx_err *err,
u16 peer_id,
bool frag)
{
struct sk_buff *first;
struct sk_buff *last;
struct sk_buff *msdu, *temp;
struct ath10k_hw_params *hw = &ar->hw_params;
struct htt_rx_desc *rxd;
struct rx_attention *rxd_attention;
struct rx_mpdu_start *rxd_mpdu_start;
struct ieee80211_hdr *hdr;
enum htt_rx_mpdu_encrypt_type enctype;
u8 first_hdr[64];
u8 *qos;
bool has_fcs_err;
bool has_crypto_err;
bool has_tkip_err;
bool has_peer_idx_invalid;
bool is_decrypted;
bool is_mgmt;
u32 attention;
bool frag_pn_check = true, multicast_check = true;
if (skb_queue_empty(amsdu))
return;
first = skb_peek(amsdu);
rxd = HTT_RX_BUF_TO_RX_DESC(hw,
(void *)first->data - hw->rx_desc_ops->rx_desc_size);
rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd);
is_mgmt = !!(rxd_attention->flags &
__cpu_to_le32(RX_ATTENTION_FLAGS_MGMT_TYPE));
enctype = MS(__le32_to_cpu(rxd_mpdu_start->info0),
RX_MPDU_START_INFO0_ENCRYPT_TYPE);
/* First MSDU's Rx descriptor in an A-MSDU contains full 802.11
* decapped header. It'll be used for undecapping of each MSDU.
*/
hdr = (void *)ath10k_htt_rx_desc_get_rx_hdr_status(hw, rxd);
memcpy(first_hdr, hdr, RX_HTT_HDR_STATUS_LEN);
if (rx_hdr)
memcpy(rx_hdr, hdr, RX_HTT_HDR_STATUS_LEN);
/* Each A-MSDU subframe will use the original header as the base and be
* reported as a separate MSDU so strip the A-MSDU bit from QoS Ctl.
*/
hdr = (void *)first_hdr;
if (ieee80211_is_data_qos(hdr->frame_control)) {
qos = ieee80211_get_qos_ctl(hdr);
qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
}
/* Some attention flags are valid only in the last MSDU. */
last = skb_peek_tail(amsdu);
rxd = HTT_RX_BUF_TO_RX_DESC(hw,
(void *)last->data - hw->rx_desc_ops->rx_desc_size);
rxd_attention = ath10k_htt_rx_desc_get_attention(hw, rxd);
attention = __le32_to_cpu(rxd_attention->flags);
has_fcs_err = !!(attention & RX_ATTENTION_FLAGS_FCS_ERR);
has_crypto_err = !!(attention & RX_ATTENTION_FLAGS_DECRYPT_ERR);
has_tkip_err = !!(attention & RX_ATTENTION_FLAGS_TKIP_MIC_ERR);
has_peer_idx_invalid = !!(attention & RX_ATTENTION_FLAGS_PEER_IDX_INVALID);
/* Note: If hardware captures an encrypted frame that it can't decrypt,
* e.g. due to fcs error, missing peer or invalid key data it will
* report the frame as raw.
*/
is_decrypted = (enctype != HTT_RX_MPDU_ENCRYPT_NONE &&
!has_fcs_err &&
!has_crypto_err &&
!has_peer_idx_invalid);
/* Clear per-MPDU flags while leaving per-PPDU flags intact. */
status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
RX_FLAG_MMIC_ERROR |
RX_FLAG_DECRYPTED |
RX_FLAG_IV_STRIPPED |
RX_FLAG_ONLY_MONITOR |
RX_FLAG_MMIC_STRIPPED);
if (has_fcs_err)
status->flag |= RX_FLAG_FAILED_FCS_CRC;
if (has_tkip_err)
status->flag |= RX_FLAG_MMIC_ERROR;
if (err) {
if (has_fcs_err)
*err = ATH10K_PKT_RX_ERR_FCS;
else if (has_tkip_err)
*err = ATH10K_PKT_RX_ERR_TKIP;
else if (has_crypto_err)
*err = ATH10K_PKT_RX_ERR_CRYPT;
else if (has_peer_idx_invalid)
*err = ATH10K_PKT_RX_ERR_PEER_IDX_INVAL;
}
/* Firmware reports all necessary management frames via WMI already.
* They are not reported to monitor interfaces at all so pass the ones
* coming via HTT to monitor interfaces instead. This simplifies
* matters a lot.
*/
if (is_mgmt)
status->flag |= RX_FLAG_ONLY_MONITOR;
if (is_decrypted) {
status->flag |= RX_FLAG_DECRYPTED;
if (likely(!is_mgmt))
status->flag |= RX_FLAG_MMIC_STRIPPED;
if (fill_crypt_header)
status->flag |= RX_FLAG_MIC_STRIPPED |
RX_FLAG_ICV_STRIPPED;
else
status->flag |= RX_FLAG_IV_STRIPPED;
}
skb_queue_walk(amsdu, msdu) {
if (frag && !fill_crypt_header && is_decrypted &&
enctype == HTT_RX_MPDU_ENCRYPT_AES_CCM_WPA2)
frag_pn_check = ath10k_htt_rx_h_frag_pn_check(ar,
msdu,
peer_id,
enctype);
if (frag)
multicast_check = ath10k_htt_rx_h_frag_multicast_check(ar,
msdu);
if (!frag_pn_check || !multicast_check) {
/* Discard the fragment with invalid PN or multicast DA
*/
temp = msdu->prev;
__skb_unlink(msdu, amsdu);
dev_kfree_skb_any(msdu);
msdu = temp;
frag_pn_check = true;
multicast_check = true;
continue;
}
ath10k_htt_rx_h_csum_offload(&ar->hw_params, msdu);
if (frag && !fill_crypt_header &&
enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
status->flag &= ~RX_FLAG_MMIC_STRIPPED;
ath10k_htt_rx_h_undecap(ar, msdu, status, first_hdr, enctype,
is_decrypted);
/* Undecapping involves copying the original 802.11 header back
* to sk_buff. If frame is protected and hardware has decrypted
* it then remove the protected bit.
*/
if (!is_decrypted)
continue;
if (is_mgmt)
continue;
if (fill_crypt_header)
continue;
hdr = (void *)msdu->data;
hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
if (frag && !fill_crypt_header &&
enctype == HTT_RX_MPDU_ENCRYPT_TKIP_WPA)
status->flag &= ~RX_FLAG_IV_STRIPPED &
~RX_FLAG_MMIC_STRIPPED;
}
}
static void ath10k_htt_rx_h_enqueue(struct ath10k *ar,
struct sk_buff_head *amsdu,
struct ieee80211_rx_status *status)
{
struct sk_buff *msdu;
struct sk_buff *first_subframe;
first_subframe = skb_peek(amsdu);
while ((msdu = __skb_dequeue(amsdu))) {
/* Setup per-MSDU flags */
if (skb_queue_empty(amsdu))
status->flag &= ~RX_FLAG_AMSDU_MORE;
else
status->flag |= RX_FLAG_AMSDU_MORE;
if (msdu == first_subframe) {
first_subframe = NULL;
status->flag &= ~RX_FLAG_ALLOW_SAME_PN;
} else {
status->flag |= RX_FLAG_ALLOW_SAME_PN;
}
ath10k_htt_rx_h_queue_msdu(ar, status, msdu);
}
}
static int ath10k_unchain_msdu(struct sk_buff_head *amsdu,
unsigned long *unchain_cnt)
{
struct sk_buff *skb, *first;
int space;
int total_len = 0;
int amsdu_len = skb_queue_len(amsdu);
/* TODO: Might could optimize this by using
* skb_try_coalesce or similar method to
* decrease copying, or maybe get mac80211 to
* provide a way to just receive a list of
* skb?
*/
first = __skb_dequeue(amsdu);
/* Allocate total length all at once. */
skb_queue_walk(amsdu, skb)
total_len += skb->len;
space = total_len - skb_tailroom(first);
if ((space > 0) &&
(pskb_expand_head(first, 0, space, GFP_ATOMIC) < 0)) {
/* TODO: bump some rx-oom error stat */
/* put it back together so we can free the
* whole list at once.
*/
__skb_queue_head(amsdu, first);
return -1;
}
/* Walk list again, copying contents into
* msdu_head
*/
while ((skb = __skb_dequeue(amsdu))) {
skb_copy_from_linear_data(skb, skb_put(first, skb->len),
skb->len);
dev_kfree_skb_any(skb);
}
__skb_queue_head(amsdu, first);
*unchain_cnt += amsdu_len - 1;
return 0;
}
static void ath10k_htt_rx_h_unchain(struct ath10k *ar,
struct sk_buff_head *amsdu,
unsigned long *drop_cnt,
unsigned long *unchain_cnt)
{
struct sk_buff *first;
struct ath10k_hw_params *hw = &ar->hw_params;
struct htt_rx_desc *rxd;
struct rx_msdu_start_common *rxd_msdu_start_common;
struct rx_frag_info_common *rxd_frag_info;
enum rx_msdu_decap_format decap;
first = skb_peek(amsdu);
rxd = HTT_RX_BUF_TO_RX_DESC(hw,
(void *)first->data - hw->rx_desc_ops->rx_desc_size);
rxd_msdu_start_common = ath10k_htt_rx_desc_get_msdu_start(hw, rxd);
rxd_frag_info = ath10k_htt_rx_desc_get_frag_info(hw, rxd);
decap = MS(__le32_to_cpu(rxd_msdu_start_common->info1),
RX_MSDU_START_INFO1_DECAP_FORMAT);
/* FIXME: Current unchaining logic can only handle simple case of raw
* msdu chaining. If decapping is other than raw the chaining may be
* more complex and this isn't handled by the current code. Don't even
* try re-constructing such frames - it'll be pretty much garbage.
*/
if (decap != RX_MSDU_DECAP_RAW ||
skb_queue_len(amsdu) != 1 + rxd_frag_info->ring2_more_count) {
*drop_cnt += skb_queue_len(amsdu);
__skb_queue_purge(amsdu);
return;
}
ath10k_unchain_msdu(amsdu, unchain_cnt);
}
static bool ath10k_htt_rx_validate_amsdu(struct ath10k *ar,
struct sk_buff_head *amsdu)
{
u8 *subframe_hdr;
struct sk_buff *first;
bool is_first, is_last;
struct ath10k_hw_params *hw = &ar->hw_params;
struct htt_rx_desc *rxd;
struct rx_msdu_end_common *rxd_msdu_end_common;
struct rx_mpdu_start *rxd_mpdu_start;
struct ieee80211_hdr *hdr;
size_t hdr_len, crypto_len;
enum htt_rx_mpdu_encrypt_type enctype;
int bytes_aligned = ar->hw_params.decap_align_bytes;
first = skb_peek(amsdu);
rxd = HTT_RX_BUF_TO_RX_DESC(hw,
(void *)first->data - hw->rx_desc_ops->rx_desc_size);
rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
rxd_mpdu_start = ath10k_htt_rx_desc_get_mpdu_start(hw, rxd);
hdr = (void *)ath10k_htt_rx_desc_get_rx_hdr_status(hw, rxd);
is_first = !!(rxd_msdu_end_common->info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_FIRST_MSDU));
is_last = !!(rxd_msdu_end_common->info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU));
/* Return in case of non-aggregated msdu */
if (is_first && is_last)
return true;
/* First msdu flag is not set for the first msdu of the list */
if (!is_first)
return false;
enctype = MS(__le32_to_cpu(rxd_mpdu_start->info0),
RX_MPDU_START_INFO0_ENCRYPT_TYPE);
hdr_len = ieee80211_hdrlen(hdr->frame_control);
crypto_len = ath10k_htt_rx_crypto_param_len(ar, enctype);
subframe_hdr = (u8 *)hdr + round_up(hdr_len, bytes_aligned) +
crypto_len;
/* Validate if the amsdu has a proper first subframe.
* There are chances a single msdu can be received as amsdu when
* the unauthenticated amsdu flag of a QoS header
* gets flipped in non-SPP AMSDU's, in such cases the first
* subframe has llc/snap header in place of a valid da.
* return false if the da matches rfc1042 pattern
*/
if (ether_addr_equal(subframe_hdr, rfc1042_header))
return false;
return true;
}
static bool ath10k_htt_rx_amsdu_allowed(struct ath10k *ar,
struct sk_buff_head *amsdu,
struct ieee80211_rx_status *rx_status)
{
if (!rx_status->freq) {
ath10k_dbg(ar, ATH10K_DBG_HTT, "no channel configured; ignoring frame(s)!\n");
return false;
}
if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) {
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx cac running\n");
return false;
}
if (!ath10k_htt_rx_validate_amsdu(ar, amsdu)) {
ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid amsdu received\n");
return false;
}
return true;
}
static void ath10k_htt_rx_h_filter(struct ath10k *ar,
struct sk_buff_head *amsdu,
struct ieee80211_rx_status *rx_status,
unsigned long *drop_cnt)
{
if (skb_queue_empty(amsdu))
return;
if (ath10k_htt_rx_amsdu_allowed(ar, amsdu, rx_status))
return;
if (drop_cnt)
*drop_cnt += skb_queue_len(amsdu);
__skb_queue_purge(amsdu);
}
static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
struct ieee80211_rx_status *rx_status = &htt->rx_status;
struct sk_buff_head amsdu;
int ret;
unsigned long drop_cnt = 0;
unsigned long unchain_cnt = 0;
unsigned long drop_cnt_filter = 0;
unsigned long msdus_to_queue, num_msdus;
enum ath10k_pkt_rx_err err = ATH10K_PKT_RX_ERR_MAX;
u8 first_hdr[RX_HTT_HDR_STATUS_LEN];
__skb_queue_head_init(&amsdu);
spin_lock_bh(&htt->rx_ring.lock);
if (htt->rx_confused) {
spin_unlock_bh(&htt->rx_ring.lock);
return -EIO;
}
ret = ath10k_htt_rx_amsdu_pop(htt, &amsdu);
spin_unlock_bh(&htt->rx_ring.lock);
if (ret < 0) {
ath10k_warn(ar, "rx ring became corrupted: %d\n", ret);
__skb_queue_purge(&amsdu);
/* FIXME: It's probably a good idea to reboot the
* device instead of leaving it inoperable.
*/
htt->rx_confused = true;
return ret;
}
num_msdus = skb_queue_len(&amsdu);
ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
/* only for ret = 1 indicates chained msdus */
if (ret > 0)
ath10k_htt_rx_h_unchain(ar, &amsdu, &drop_cnt, &unchain_cnt);
ath10k_htt_rx_h_filter(ar, &amsdu, rx_status, &drop_cnt_filter);
ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true, first_hdr, &err, 0,
false);
msdus_to_queue = skb_queue_len(&amsdu);
ath10k_htt_rx_h_enqueue(ar, &amsdu, rx_status);
ath10k_sta_update_rx_tid_stats(ar, first_hdr, num_msdus, err,
unchain_cnt, drop_cnt, drop_cnt_filter,
msdus_to_queue);
return 0;
}
static void ath10k_htt_rx_mpdu_desc_pn_hl(struct htt_hl_rx_desc *rx_desc,
union htt_rx_pn_t *pn,
int pn_len_bits)
{
switch (pn_len_bits) {
case 48:
pn->pn48 = __le32_to_cpu(rx_desc->pn_31_0) +
((u64)(__le32_to_cpu(rx_desc->u0.pn_63_32) & 0xFFFF) << 32);
break;
case 24:
pn->pn24 = __le32_to_cpu(rx_desc->pn_31_0);
break;
}
}
static bool ath10k_htt_rx_pn_cmp48(union htt_rx_pn_t *new_pn,
union htt_rx_pn_t *old_pn)
{
return ((new_pn->pn48 & 0xffffffffffffULL) <=
(old_pn->pn48 & 0xffffffffffffULL));
}
static bool ath10k_htt_rx_pn_check_replay_hl(struct ath10k *ar,
struct ath10k_peer *peer,
struct htt_rx_indication_hl *rx)
{
bool last_pn_valid, pn_invalid = false;
enum htt_txrx_sec_cast_type sec_index;
enum htt_security_types sec_type;
union htt_rx_pn_t new_pn = {0};
struct htt_hl_rx_desc *rx_desc;
union htt_rx_pn_t *last_pn;
u32 rx_desc_info, tid;
int num_mpdu_ranges;
lockdep_assert_held(&ar->data_lock);
if (!peer)
return false;
if (!(rx->fw_desc.flags & FW_RX_DESC_FLAGS_FIRST_MSDU))
return false;
num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
rx_desc = (struct htt_hl_rx_desc *)&rx->mpdu_ranges[num_mpdu_ranges];
rx_desc_info = __le32_to_cpu(rx_desc->info);
if (!MS(rx_desc_info, HTT_RX_DESC_HL_INFO_ENCRYPTED))
return false;
tid = MS(rx->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID);
last_pn_valid = peer->tids_last_pn_valid[tid];
last_pn = &peer->tids_last_pn[tid];
if (MS(rx_desc_info, HTT_RX_DESC_HL_INFO_MCAST_BCAST))
sec_index = HTT_TXRX_SEC_MCAST;
else
sec_index = HTT_TXRX_SEC_UCAST;
sec_type = peer->rx_pn[sec_index].sec_type;
ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc, &new_pn, peer->rx_pn[sec_index].pn_len);
if (sec_type != HTT_SECURITY_AES_CCMP &&
sec_type != HTT_SECURITY_TKIP &&
sec_type != HTT_SECURITY_TKIP_NOMIC)
return false;
if (last_pn_valid)
pn_invalid = ath10k_htt_rx_pn_cmp48(&new_pn, last_pn);
else
peer->tids_last_pn_valid[tid] = true;
if (!pn_invalid)
last_pn->pn48 = new_pn.pn48;
return pn_invalid;
}
static bool ath10k_htt_rx_proc_rx_ind_hl(struct ath10k_htt *htt,
struct htt_rx_indication_hl *rx,
struct sk_buff *skb,
enum htt_rx_pn_check_type check_pn_type,
enum htt_rx_tkip_demic_type tkip_mic_type)
{
struct ath10k *ar = htt->ar;
struct ath10k_peer *peer;
struct htt_rx_indication_mpdu_range *mpdu_ranges;
struct fw_rx_desc_hl *fw_desc;
enum htt_txrx_sec_cast_type sec_index;
enum htt_security_types sec_type;
union htt_rx_pn_t new_pn = {0};
struct htt_hl_rx_desc *rx_desc;
struct ieee80211_hdr *hdr;
struct ieee80211_rx_status *rx_status;
u16 peer_id;
u8 rx_desc_len;
int num_mpdu_ranges;
size_t tot_hdr_len;
struct ieee80211_channel *ch;
bool pn_invalid, qos, first_msdu;
u32 tid, rx_desc_info;
peer_id = __le16_to_cpu(rx->hdr.peer_id);
tid = MS(rx->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID);
spin_lock_bh(&ar->data_lock);
peer = ath10k_peer_find_by_id(ar, peer_id);
spin_unlock_bh(&ar->data_lock);
if (!peer && peer_id != HTT_INVALID_PEERID)
ath10k_warn(ar, "Got RX ind from invalid peer: %u\n", peer_id);
if (!peer)
return true;
num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
mpdu_ranges = htt_rx_ind_get_mpdu_ranges_hl(rx);
fw_desc = &rx->fw_desc;
rx_desc_len = fw_desc->len;
if (fw_desc->u.bits.discard) {
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt discard mpdu\n");
goto err;
}
/* I have not yet seen any case where num_mpdu_ranges > 1.
* qcacld does not seem handle that case either, so we introduce the
* same limitation here as well.
*/
if (num_mpdu_ranges > 1)
ath10k_warn(ar,
"Unsupported number of MPDU ranges: %d, ignoring all but the first\n",
num_mpdu_ranges);
if (mpdu_ranges->mpdu_range_status !=
HTT_RX_IND_MPDU_STATUS_OK &&
mpdu_ranges->mpdu_range_status !=
HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR) {
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt mpdu_range_status %d\n",
mpdu_ranges->mpdu_range_status);
goto err;
}
rx_desc = (struct htt_hl_rx_desc *)&rx->mpdu_ranges[num_mpdu_ranges];
rx_desc_info = __le32_to_cpu(rx_desc->info);
if (MS(rx_desc_info, HTT_RX_DESC_HL_INFO_MCAST_BCAST))
sec_index = HTT_TXRX_SEC_MCAST;
else
sec_index = HTT_TXRX_SEC_UCAST;
sec_type = peer->rx_pn[sec_index].sec_type;
first_msdu = rx->fw_desc.flags & FW_RX_DESC_FLAGS_FIRST_MSDU;
ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc, &new_pn, peer->rx_pn[sec_index].pn_len);
if (check_pn_type == HTT_RX_PN_CHECK && tid >= IEEE80211_NUM_TIDS) {
spin_lock_bh(&ar->data_lock);
pn_invalid = ath10k_htt_rx_pn_check_replay_hl(ar, peer, rx);
spin_unlock_bh(&ar->data_lock);
if (pn_invalid)
goto err;
}
/* Strip off all headers before the MAC header before delivery to
* mac80211
*/
tot_hdr_len = sizeof(struct htt_resp_hdr) + sizeof(rx->hdr) +
sizeof(rx->ppdu) + sizeof(rx->prefix) +
sizeof(rx->fw_desc) +
sizeof(*mpdu_ranges) * num_mpdu_ranges + rx_desc_len;
skb_pull(skb, tot_hdr_len);
hdr = (struct ieee80211_hdr *)skb->data;
qos = ieee80211_is_data_qos(hdr->frame_control);
rx_status = IEEE80211_SKB_RXCB(skb);
memset(rx_status, 0, sizeof(*rx_status));
if (rx->ppdu.combined_rssi == 0) {
/* SDIO firmware does not provide signal */
rx_status->signal = 0;
rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
} else {
rx_status->signal = ATH10K_DEFAULT_NOISE_FLOOR +
rx->ppdu.combined_rssi;
rx_status->flag &= ~RX_FLAG_NO_SIGNAL_VAL;
}
spin_lock_bh(&ar->data_lock);
ch = ar->scan_channel;
if (!ch)
ch = ar->rx_channel;
if (!ch)
ch = ath10k_htt_rx_h_any_channel(ar);
if (!ch)
ch = ar->tgt_oper_chan;
spin_unlock_bh(&ar->data_lock);
if (ch) {
rx_status->band = ch->band;
rx_status->freq = ch->center_freq;
}
if (rx->fw_desc.flags & FW_RX_DESC_FLAGS_LAST_MSDU)
rx_status->flag &= ~RX_FLAG_AMSDU_MORE;
else
rx_status->flag |= RX_FLAG_AMSDU_MORE;
/* Not entirely sure about this, but all frames from the chipset has
* the protected flag set even though they have already been decrypted.
* Unmasking this flag is necessary in order for mac80211 not to drop
* the frame.
* TODO: Verify this is always the case or find out a way to check
* if there has been hw decryption.
*/
if (ieee80211_has_protected(hdr->frame_control)) {
hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
rx_status->flag |= RX_FLAG_DECRYPTED |
RX_FLAG_IV_STRIPPED |
RX_FLAG_MMIC_STRIPPED;
if (tid < IEEE80211_NUM_TIDS &&
first_msdu &&
check_pn_type == HTT_RX_PN_CHECK &&
(sec_type == HTT_SECURITY_AES_CCMP ||
sec_type == HTT_SECURITY_TKIP ||
sec_type == HTT_SECURITY_TKIP_NOMIC)) {
u8 offset, *ivp, i;
s8 keyidx = 0;
__le64 pn48 = cpu_to_le64(new_pn.pn48);
hdr = (struct ieee80211_hdr *)skb->data;
offset = ieee80211_hdrlen(hdr->frame_control);
hdr->frame_control |= __cpu_to_le16(IEEE80211_FCTL_PROTECTED);
rx_status->flag &= ~RX_FLAG_IV_STRIPPED;
memmove(skb->data - IEEE80211_CCMP_HDR_LEN,
skb->data, offset);
skb_push(skb, IEEE80211_CCMP_HDR_LEN);
ivp = skb->data + offset;
memset(skb->data + offset, 0, IEEE80211_CCMP_HDR_LEN);
/* Ext IV */
ivp[IEEE80211_WEP_IV_LEN - 1] |= ATH10K_IEEE80211_EXTIV;
for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
if (peer->keys[i] &&
peer->keys[i]->flags & IEEE80211_KEY_FLAG_PAIRWISE)
keyidx = peer->keys[i]->keyidx;
}
/* Key ID */
ivp[IEEE80211_WEP_IV_LEN - 1] |= keyidx << 6;
if (sec_type == HTT_SECURITY_AES_CCMP) {
rx_status->flag |= RX_FLAG_MIC_STRIPPED;
/* pn 0, pn 1 */
memcpy(skb->data + offset, &pn48, 2);
/* pn 1, pn 3 , pn 34 , pn 5 */
memcpy(skb->data + offset + 4, ((u8 *)&pn48) + 2, 4);
} else {
rx_status->flag |= RX_FLAG_ICV_STRIPPED;
/* TSC 0 */
memcpy(skb->data + offset + 2, &pn48, 1);
/* TSC 1 */
memcpy(skb->data + offset, ((u8 *)&pn48) + 1, 1);
/* TSC 2 , TSC 3 , TSC 4 , TSC 5*/
memcpy(skb->data + offset + 4, ((u8 *)&pn48) + 2, 4);
}
}
}
if (tkip_mic_type == HTT_RX_TKIP_MIC)
rx_status->flag &= ~RX_FLAG_IV_STRIPPED &
~RX_FLAG_MMIC_STRIPPED;
if (mpdu_ranges->mpdu_range_status == HTT_RX_IND_MPDU_STATUS_TKIP_MIC_ERR)
rx_status->flag |= RX_FLAG_MMIC_ERROR;
if (!qos && tid < IEEE80211_NUM_TIDS) {
u8 offset;
__le16 qos_ctrl = 0;
hdr = (struct ieee80211_hdr *)skb->data;
offset = ieee80211_hdrlen(hdr->frame_control);
hdr->frame_control |= cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
memmove(skb->data - IEEE80211_QOS_CTL_LEN, skb->data, offset);
skb_push(skb, IEEE80211_QOS_CTL_LEN);
qos_ctrl = cpu_to_le16(tid);
memcpy(skb->data + offset, &qos_ctrl, IEEE80211_QOS_CTL_LEN);
}
if (ar->napi.dev)
ieee80211_rx_napi(ar->hw, NULL, skb, &ar->napi);
else
ieee80211_rx_ni(ar->hw, skb);
/* We have delivered the skb to the upper layers (mac80211) so we
* must not free it.
*/
return false;
err:
/* Tell the caller that it must free the skb since we have not
* consumed it
*/
return true;
}
static int ath10k_htt_rx_frag_tkip_decap_nomic(struct sk_buff *skb,
u16 head_len,
u16 hdr_len)
{
u8 *ivp, *orig_hdr;
orig_hdr = skb->data;
ivp = orig_hdr + hdr_len + head_len;
/* the ExtIV bit is always set to 1 for TKIP */
if (!(ivp[IEEE80211_WEP_IV_LEN - 1] & ATH10K_IEEE80211_EXTIV))
return -EINVAL;
memmove(orig_hdr + IEEE80211_TKIP_IV_LEN, orig_hdr, head_len + hdr_len);
skb_pull(skb, IEEE80211_TKIP_IV_LEN);
skb_trim(skb, skb->len - ATH10K_IEEE80211_TKIP_MICLEN);
return 0;
}
static int ath10k_htt_rx_frag_tkip_decap_withmic(struct sk_buff *skb,
u16 head_len,
u16 hdr_len)
{
u8 *ivp, *orig_hdr;
orig_hdr = skb->data;
ivp = orig_hdr + hdr_len + head_len;
/* the ExtIV bit is always set to 1 for TKIP */
if (!(ivp[IEEE80211_WEP_IV_LEN - 1] & ATH10K_IEEE80211_EXTIV))
return -EINVAL;
memmove(orig_hdr + IEEE80211_TKIP_IV_LEN, orig_hdr, head_len + hdr_len);
skb_pull(skb, IEEE80211_TKIP_IV_LEN);
skb_trim(skb, skb->len - IEEE80211_TKIP_ICV_LEN);
return 0;
}
static int ath10k_htt_rx_frag_ccmp_decap(struct sk_buff *skb,
u16 head_len,
u16 hdr_len)
{
u8 *ivp, *orig_hdr;
orig_hdr = skb->data;
ivp = orig_hdr + hdr_len + head_len;
/* the ExtIV bit is always set to 1 for CCMP */
if (!(ivp[IEEE80211_WEP_IV_LEN - 1] & ATH10K_IEEE80211_EXTIV))
return -EINVAL;
skb_trim(skb, skb->len - IEEE80211_CCMP_MIC_LEN);
memmove(orig_hdr + IEEE80211_CCMP_HDR_LEN, orig_hdr, head_len + hdr_len);
skb_pull(skb, IEEE80211_CCMP_HDR_LEN);
return 0;
}
static int ath10k_htt_rx_frag_wep_decap(struct sk_buff *skb,
u16 head_len,
u16 hdr_len)
{
u8 *orig_hdr;
orig_hdr = skb->data;
memmove(orig_hdr + IEEE80211_WEP_IV_LEN,
orig_hdr, head_len + hdr_len);
skb_pull(skb, IEEE80211_WEP_IV_LEN);
skb_trim(skb, skb->len - IEEE80211_WEP_ICV_LEN);
return 0;
}
static bool ath10k_htt_rx_proc_rx_frag_ind_hl(struct ath10k_htt *htt,
struct htt_rx_fragment_indication *rx,
struct sk_buff *skb)
{
struct ath10k *ar = htt->ar;
enum htt_rx_tkip_demic_type tkip_mic = HTT_RX_NON_TKIP_MIC;
enum htt_txrx_sec_cast_type sec_index;
struct htt_rx_indication_hl *rx_hl;
enum htt_security_types sec_type;
u32 tid, frag, seq, rx_desc_info;
union htt_rx_pn_t new_pn = {0};
struct htt_hl_rx_desc *rx_desc;
u16 peer_id, sc, hdr_space;
union htt_rx_pn_t *last_pn;
struct ieee80211_hdr *hdr;
int ret, num_mpdu_ranges;
struct ath10k_peer *peer;
struct htt_resp *resp;
size_t tot_hdr_len;
resp = (struct htt_resp *)(skb->data + HTT_RX_FRAG_IND_INFO0_HEADER_LEN);
skb_pull(skb, HTT_RX_FRAG_IND_INFO0_HEADER_LEN);
skb_trim(skb, skb->len - FCS_LEN);
peer_id = __le16_to_cpu(rx->peer_id);
rx_hl = (struct htt_rx_indication_hl *)(&resp->rx_ind_hl);
spin_lock_bh(&ar->data_lock);
peer = ath10k_peer_find_by_id(ar, peer_id);
if (!peer) {
ath10k_dbg(ar, ATH10K_DBG_HTT, "invalid peer: %u\n", peer_id);
goto err;
}
num_mpdu_ranges = MS(__le32_to_cpu(rx_hl->hdr.info1),
HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
tot_hdr_len = sizeof(struct htt_resp_hdr) +
sizeof(rx_hl->hdr) +
sizeof(rx_hl->ppdu) +
sizeof(rx_hl->prefix) +
sizeof(rx_hl->fw_desc) +
sizeof(struct htt_rx_indication_mpdu_range) * num_mpdu_ranges;
tid = MS(rx_hl->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID);
rx_desc = (struct htt_hl_rx_desc *)(skb->data + tot_hdr_len);
rx_desc_info = __le32_to_cpu(rx_desc->info);
hdr = (struct ieee80211_hdr *)((u8 *)rx_desc + rx_hl->fw_desc.len);
if (is_multicast_ether_addr(hdr->addr1)) {
/* Discard the fragment with multicast DA */
goto err;
}
if (!MS(rx_desc_info, HTT_RX_DESC_HL_INFO_ENCRYPTED)) {
spin_unlock_bh(&ar->data_lock);
return ath10k_htt_rx_proc_rx_ind_hl(htt, &resp->rx_ind_hl, skb,
HTT_RX_NON_PN_CHECK,
HTT_RX_NON_TKIP_MIC);
}
if (ieee80211_has_retry(hdr->frame_control))
goto err;
hdr_space = ieee80211_hdrlen(hdr->frame_control);
sc = __le16_to_cpu(hdr->seq_ctrl);
seq = IEEE80211_SEQ_TO_SN(sc);
frag = sc & IEEE80211_SCTL_FRAG;
sec_index = MS(rx_desc_info, HTT_RX_DESC_HL_INFO_MCAST_BCAST) ?
HTT_TXRX_SEC_MCAST : HTT_TXRX_SEC_UCAST;
sec_type = peer->rx_pn[sec_index].sec_type;
ath10k_htt_rx_mpdu_desc_pn_hl(rx_desc, &new_pn, peer->rx_pn[sec_index].pn_len);
switch (sec_type) {
case HTT_SECURITY_TKIP:
tkip_mic = HTT_RX_TKIP_MIC;
ret = ath10k_htt_rx_frag_tkip_decap_withmic(skb,
tot_hdr_len +
rx_hl->fw_desc.len,
hdr_space);
if (ret)
goto err;
break;
case HTT_SECURITY_TKIP_NOMIC:
ret = ath10k_htt_rx_frag_tkip_decap_nomic(skb,
tot_hdr_len +
rx_hl->fw_desc.len,
hdr_space);
if (ret)
goto err;
break;
case HTT_SECURITY_AES_CCMP:
ret = ath10k_htt_rx_frag_ccmp_decap(skb,
tot_hdr_len + rx_hl->fw_desc.len,
hdr_space);
if (ret)
goto err;
break;
case HTT_SECURITY_WEP128:
case HTT_SECURITY_WEP104:
case HTT_SECURITY_WEP40:
ret = ath10k_htt_rx_frag_wep_decap(skb,
tot_hdr_len + rx_hl->fw_desc.len,
hdr_space);
if (ret)
goto err;
break;
default:
break;
}
resp = (struct htt_resp *)(skb->data);
if (sec_type != HTT_SECURITY_AES_CCMP &&
sec_type != HTT_SECURITY_TKIP &&
sec_type != HTT_SECURITY_TKIP_NOMIC) {
spin_unlock_bh(&ar->data_lock);
return ath10k_htt_rx_proc_rx_ind_hl(htt, &resp->rx_ind_hl, skb,
HTT_RX_NON_PN_CHECK,
HTT_RX_NON_TKIP_MIC);
}
last_pn = &peer->frag_tids_last_pn[tid];
if (frag == 0) {
if (ath10k_htt_rx_pn_check_replay_hl(ar, peer, &resp->rx_ind_hl))
goto err;
last_pn->pn48 = new_pn.pn48;
peer->frag_tids_seq[tid] = seq;
} else if (sec_type == HTT_SECURITY_AES_CCMP) {
if (seq != peer->frag_tids_seq[tid])
goto err;
if (new_pn.pn48 != last_pn->pn48 + 1)
goto err;
last_pn->pn48 = new_pn.pn48;
last_pn = &peer->tids_last_pn[tid];
last_pn->pn48 = new_pn.pn48;
}
spin_unlock_bh(&ar->data_lock);
return ath10k_htt_rx_proc_rx_ind_hl(htt, &resp->rx_ind_hl, skb,
HTT_RX_NON_PN_CHECK, tkip_mic);
err:
spin_unlock_bh(&ar->data_lock);
/* Tell the caller that it must free the skb since we have not
* consumed it
*/
return true;
}
static void ath10k_htt_rx_proc_rx_ind_ll(struct ath10k_htt *htt,
struct htt_rx_indication *rx)
{
struct ath10k *ar = htt->ar;
struct htt_rx_indication_mpdu_range *mpdu_ranges;
int num_mpdu_ranges;
int i, mpdu_count = 0;
u16 peer_id;
u8 tid;
num_mpdu_ranges = MS(__le32_to_cpu(rx->hdr.info1),
HTT_RX_INDICATION_INFO1_NUM_MPDU_RANGES);
peer_id = __le16_to_cpu(rx->hdr.peer_id);
tid = MS(rx->hdr.info0, HTT_RX_INDICATION_INFO0_EXT_TID);
mpdu_ranges = htt_rx_ind_get_mpdu_ranges(rx);
ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt rx ind: ",
rx, struct_size(rx, mpdu_ranges, num_mpdu_ranges));
for (i = 0; i < num_mpdu_ranges; i++)
mpdu_count += mpdu_ranges[i].mpdu_count;
atomic_add(mpdu_count, &htt->num_mpdus_ready);
ath10k_sta_update_rx_tid_stats_ampdu(ar, peer_id, tid, mpdu_ranges,
num_mpdu_ranges);
}
static void ath10k_htt_rx_tx_compl_ind(struct ath10k *ar,
struct sk_buff *skb)
{
struct ath10k_htt *htt = &ar->htt;
struct htt_resp *resp = (struct htt_resp *)skb->data;
struct htt_tx_done tx_done = {};
int status = MS(resp->data_tx_completion.flags, HTT_DATA_TX_STATUS);
__le16 msdu_id, *msdus;
bool rssi_enabled = false;
u8 msdu_count = 0, num_airtime_records, tid;
int i, htt_pad = 0;
struct htt_data_tx_compl_ppdu_dur *ppdu_info;
struct ath10k_peer *peer;
u16 ppdu_info_offset = 0, peer_id;
u32 tx_duration;
switch (status) {
case HTT_DATA_TX_STATUS_NO_ACK:
tx_done.status = HTT_TX_COMPL_STATE_NOACK;
break;
case HTT_DATA_TX_STATUS_OK:
tx_done.status = HTT_TX_COMPL_STATE_ACK;
break;
case HTT_DATA_TX_STATUS_DISCARD:
case HTT_DATA_TX_STATUS_POSTPONE:
case HTT_DATA_TX_STATUS_DOWNLOAD_FAIL:
tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
break;
default:
ath10k_warn(ar, "unhandled tx completion status %d\n", status);
tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
break;
}
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx completion num_msdus %d\n",
resp->data_tx_completion.num_msdus);
msdu_count = resp->data_tx_completion.num_msdus;
msdus = resp->data_tx_completion.msdus;
rssi_enabled = ath10k_is_rssi_enable(&ar->hw_params, resp);
if (rssi_enabled)
htt_pad = ath10k_tx_data_rssi_get_pad_bytes(&ar->hw_params,
resp);
for (i = 0; i < msdu_count; i++) {
msdu_id = msdus[i];
tx_done.msdu_id = __le16_to_cpu(msdu_id);
if (rssi_enabled) {
/* Total no of MSDUs should be even,
* if odd MSDUs are sent firmware fills
* last msdu id with 0xffff
*/
if (msdu_count & 0x01) {
msdu_id = msdus[msdu_count + i + 1 + htt_pad];
tx_done.ack_rssi = __le16_to_cpu(msdu_id);
} else {
msdu_id = msdus[msdu_count + i + htt_pad];
tx_done.ack_rssi = __le16_to_cpu(msdu_id);
}
}
/* kfifo_put: In practice firmware shouldn't fire off per-CE
* interrupt and main interrupt (MSI/-X range case) for the same
* HTC service so it should be safe to use kfifo_put w/o lock.
*
* From kfifo_put() documentation:
* Note that with only one concurrent reader and one concurrent
* writer, you don't need extra locking to use these macro.
*/
if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) {
ath10k_txrx_tx_unref(htt, &tx_done);
} else if (!kfifo_put(&htt->txdone_fifo, tx_done)) {
ath10k_warn(ar, "txdone fifo overrun, msdu_id %d status %d\n",
tx_done.msdu_id, tx_done.status);
ath10k_txrx_tx_unref(htt, &tx_done);
}
}
if (!(resp->data_tx_completion.flags2 & HTT_TX_CMPL_FLAG_PPDU_DURATION_PRESENT))
return;
ppdu_info_offset = (msdu_count & 0x01) ? msdu_count + 1 : msdu_count;
if (rssi_enabled)
ppdu_info_offset += ppdu_info_offset;
if (resp->data_tx_completion.flags2 &
(HTT_TX_CMPL_FLAG_PPID_PRESENT | HTT_TX_CMPL_FLAG_PA_PRESENT))
ppdu_info_offset += 2;
ppdu_info = (struct htt_data_tx_compl_ppdu_dur *)&msdus[ppdu_info_offset];
num_airtime_records = FIELD_GET(HTT_TX_COMPL_PPDU_DUR_INFO0_NUM_ENTRIES_MASK,
__le32_to_cpu(ppdu_info->info0));
for (i = 0; i < num_airtime_records; i++) {
struct htt_data_tx_ppdu_dur *ppdu_dur;
u32 info0;
ppdu_dur = &ppdu_info->ppdu_dur[i];
info0 = __le32_to_cpu(ppdu_dur->info0);
peer_id = FIELD_GET(HTT_TX_PPDU_DUR_INFO0_PEER_ID_MASK,
info0);
rcu_read_lock();
spin_lock_bh(&ar->data_lock);
peer = ath10k_peer_find_by_id(ar, peer_id);
if (!peer || !peer->sta) {
spin_unlock_bh(&ar->data_lock);
rcu_read_unlock();
continue;
}
tid = FIELD_GET(HTT_TX_PPDU_DUR_INFO0_TID_MASK, info0) &
IEEE80211_QOS_CTL_TID_MASK;
tx_duration = __le32_to_cpu(ppdu_dur->tx_duration);
ieee80211_sta_register_airtime(peer->sta, tid, tx_duration, 0);
spin_unlock_bh(&ar->data_lock);
rcu_read_unlock();
}
}
static void ath10k_htt_rx_addba(struct ath10k *ar, struct htt_resp *resp)
{
struct htt_rx_addba *ev = &resp->rx_addba;
struct ath10k_peer *peer;
struct ath10k_vif *arvif;
u16 info0, tid, peer_id;
info0 = __le16_to_cpu(ev->info0);
tid = MS(info0, HTT_RX_BA_INFO0_TID);
peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
ath10k_dbg(ar, ATH10K_DBG_HTT,
"htt rx addba tid %u peer_id %u size %u\n",
tid, peer_id, ev->window_size);
spin_lock_bh(&ar->data_lock);
peer = ath10k_peer_find_by_id(ar, peer_id);
if (!peer) {
ath10k_warn(ar, "received addba event for invalid peer_id: %u\n",
peer_id);
spin_unlock_bh(&ar->data_lock);
return;
}
arvif = ath10k_get_arvif(ar, peer->vdev_id);
if (!arvif) {
ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
peer->vdev_id);
spin_unlock_bh(&ar->data_lock);
return;
}
ath10k_dbg(ar, ATH10K_DBG_HTT,
"htt rx start rx ba session sta %pM tid %u size %u\n",
peer->addr, tid, ev->window_size);
ieee80211_start_rx_ba_session_offl(arvif->vif, peer->addr, tid);
spin_unlock_bh(&ar->data_lock);
}
static void ath10k_htt_rx_delba(struct ath10k *ar, struct htt_resp *resp)
{
struct htt_rx_delba *ev = &resp->rx_delba;
struct ath10k_peer *peer;
struct ath10k_vif *arvif;
u16 info0, tid, peer_id;
info0 = __le16_to_cpu(ev->info0);
tid = MS(info0, HTT_RX_BA_INFO0_TID);
peer_id = MS(info0, HTT_RX_BA_INFO0_PEER_ID);
ath10k_dbg(ar, ATH10K_DBG_HTT,
"htt rx delba tid %u peer_id %u\n",
tid, peer_id);
spin_lock_bh(&ar->data_lock);
peer = ath10k_peer_find_by_id(ar, peer_id);
if (!peer) {
ath10k_warn(ar, "received addba event for invalid peer_id: %u\n",
peer_id);
spin_unlock_bh(&ar->data_lock);
return;
}
arvif = ath10k_get_arvif(ar, peer->vdev_id);
if (!arvif) {
ath10k_warn(ar, "received addba event for invalid vdev_id: %u\n",
peer->vdev_id);
spin_unlock_bh(&ar->data_lock);
return;
}
ath10k_dbg(ar, ATH10K_DBG_HTT,
"htt rx stop rx ba session sta %pM tid %u\n",
peer->addr, tid);
ieee80211_stop_rx_ba_session_offl(arvif->vif, peer->addr, tid);
spin_unlock_bh(&ar->data_lock);
}
static int ath10k_htt_rx_extract_amsdu(struct ath10k_hw_params *hw,
struct sk_buff_head *list,
struct sk_buff_head *amsdu)
{
struct sk_buff *msdu;
struct htt_rx_desc *rxd;
struct rx_msdu_end_common *rxd_msdu_end_common;
if (skb_queue_empty(list))
return -ENOBUFS;
if (WARN_ON(!skb_queue_empty(amsdu)))
return -EINVAL;
while ((msdu = __skb_dequeue(list))) {
__skb_queue_tail(amsdu, msdu);
rxd = HTT_RX_BUF_TO_RX_DESC(hw,
(void *)msdu->data -
hw->rx_desc_ops->rx_desc_size);
rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
if (rxd_msdu_end_common->info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))
break;
}
msdu = skb_peek_tail(amsdu);
rxd = HTT_RX_BUF_TO_RX_DESC(hw,
(void *)msdu->data - hw->rx_desc_ops->rx_desc_size);
rxd_msdu_end_common = ath10k_htt_rx_desc_get_msdu_end(hw, rxd);
if (!(rxd_msdu_end_common->info0 &
__cpu_to_le32(RX_MSDU_END_INFO0_LAST_MSDU))) {
skb_queue_splice_init(amsdu, list);
return -EAGAIN;
}
return 0;
}
static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status,
struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
if (!ieee80211_has_protected(hdr->frame_control))
return;
/* Offloaded frames are already decrypted but firmware insists they are
* protected in the 802.11 header. Strip the flag. Otherwise mac80211
* will drop the frame.
*/
hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
status->flag |= RX_FLAG_DECRYPTED |
RX_FLAG_IV_STRIPPED |
RX_FLAG_MMIC_STRIPPED;
}
static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
struct sk_buff_head *list)
{
struct ath10k_htt *htt = &ar->htt;
struct ieee80211_rx_status *status = &htt->rx_status;
struct htt_rx_offload_msdu *rx;
struct sk_buff *msdu;
size_t offset;
while ((msdu = __skb_dequeue(list))) {
/* Offloaded frames don't have Rx descriptor. Instead they have
* a short meta information header.
*/
rx = (void *)msdu->data;
skb_put(msdu, sizeof(*rx));
skb_pull(msdu, sizeof(*rx));
if (skb_tailroom(msdu) < __le16_to_cpu(rx->msdu_len)) {
ath10k_warn(ar, "dropping frame: offloaded rx msdu is too long!\n");
dev_kfree_skb_any(msdu);
continue;
}
skb_put(msdu, __le16_to_cpu(rx->msdu_len));
/* Offloaded rx header length isn't multiple of 2 nor 4 so the
* actual payload is unaligned. Align the frame. Otherwise
* mac80211 complains. This shouldn't reduce performance much
* because these offloaded frames are rare.
*/
offset = 4 - ((unsigned long)msdu->data & 3);
skb_put(msdu, offset);
memmove(msdu->data + offset, msdu->data, msdu->len);
skb_pull(msdu, offset);
/* FIXME: The frame is NWifi. Re-construct QoS Control
* if possible later.
*/
memset(status, 0, sizeof(*status));
status->flag |= RX_FLAG_NO_SIGNAL_VAL;
ath10k_htt_rx_h_rx_offload_prot(status, msdu);
ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id);
ath10k_htt_rx_h_queue_msdu(ar, status, msdu);
}
}
static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
{
struct ath10k_htt *htt = &ar->htt;
struct htt_resp *resp = (void *)skb->data;
struct ieee80211_rx_status *status = &htt->rx_status;
struct sk_buff_head list;
struct sk_buff_head amsdu;
u16 peer_id;
u16 msdu_count;
u8 vdev_id;
u8 tid;
bool offload;
bool frag;
int ret;
lockdep_assert_held(&htt->rx_ring.lock);
if (htt->rx_confused)
return -EIO;
skb_pull(skb, sizeof(resp->hdr));
skb_pull(skb, sizeof(resp->rx_in_ord_ind));
peer_id = __le16_to_cpu(resp->rx_in_ord_ind.peer_id);
msdu_count = __le16_to_cpu(resp->rx_in_ord_ind.msdu_count);
vdev_id = resp->rx_in_ord_ind.vdev_id;
tid = SM(resp->rx_in_ord_ind.info, HTT_RX_IN_ORD_IND_INFO_TID);
offload = !!(resp->rx_in_ord_ind.info &
HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK);
frag = !!(resp->rx_in_ord_ind.info & HTT_RX_IN_ORD_IND_INFO_FRAG_MASK);
ath10k_dbg(ar, ATH10K_DBG_HTT,
"htt rx in ord vdev %i peer %i tid %i offload %i frag %i msdu count %i\n",
vdev_id, peer_id, tid, offload, frag, msdu_count);
if (skb->len < msdu_count * sizeof(*resp->rx_in_ord_ind.msdu_descs32)) {
ath10k_warn(ar, "dropping invalid in order rx indication\n");
return -EINVAL;
}
/* The event can deliver more than 1 A-MSDU. Each A-MSDU is later
* extracted and processed.
*/
__skb_queue_head_init(&list);
if (ar->hw_params.target_64bit)
ret = ath10k_htt_rx_pop_paddr64_list(htt, &resp->rx_in_ord_ind,
&list);
else
ret = ath10k_htt_rx_pop_paddr32_list(htt, &resp->rx_in_ord_ind,
&list);
if (ret < 0) {
ath10k_warn(ar, "failed to pop paddr list: %d\n", ret);
htt->rx_confused = true;
return -EIO;
}
/* Offloaded frames are very different and need to be handled
* separately.
*/
if (offload)
ath10k_htt_rx_h_rx_offload(ar, &list);
while (!skb_queue_empty(&list)) {
__skb_queue_head_init(&amsdu);
ret = ath10k_htt_rx_extract_amsdu(&ar->hw_params, &list, &amsdu);
switch (ret) {
case 0:
/* Note: The in-order indication may report interleaved
* frames from different PPDUs meaning reported rx rate
* to mac80211 isn't accurate/reliable. It's still
* better to report something than nothing though. This
* should still give an idea about rx rate to the user.
*/
ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
ath10k_htt_rx_h_filter(ar, &amsdu, status, NULL);
ath10k_htt_rx_h_mpdu(ar, &amsdu, status, false, NULL,
NULL, peer_id, frag);
ath10k_htt_rx_h_enqueue(ar, &amsdu, status);
break;
case -EAGAIN:
fallthrough;
default:
/* Should not happen. */
ath10k_warn(ar, "failed to extract amsdu: %d\n", ret);
htt->rx_confused = true;
__skb_queue_purge(&list);
return -EIO;
}
}
return ret;
}
static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar,
const __le32 *resp_ids,
int num_resp_ids)
{
int i;
u32 resp_id;
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm num_resp_ids %d\n",
num_resp_ids);
for (i = 0; i < num_resp_ids; i++) {
resp_id = le32_to_cpu(resp_ids[i]);
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm resp_id %u\n",
resp_id);
/* TODO: free resp_id */
}
}
static void ath10k_htt_rx_tx_fetch_ind(struct ath10k *ar, struct sk_buff *skb)
{
struct ieee80211_hw *hw = ar->hw;
struct ieee80211_txq *txq;
struct htt_resp *resp = (struct htt_resp *)skb->data;
struct htt_tx_fetch_record *record;
size_t len;
size_t max_num_bytes;
size_t max_num_msdus;
size_t num_bytes;
size_t num_msdus;
const __le32 *resp_ids;
u16 num_records;
u16 num_resp_ids;
u16 peer_id;
u8 tid;
int ret;
int i;
bool may_tx;
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind\n");
len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_ind);
if (unlikely(skb->len < len)) {
ath10k_warn(ar, "received corrupted tx_fetch_ind event: buffer too short\n");
return;
}
num_records = le16_to_cpu(resp->tx_fetch_ind.num_records);
num_resp_ids = le16_to_cpu(resp->tx_fetch_ind.num_resp_ids);
len += sizeof(resp->tx_fetch_ind.records[0]) * num_records;
len += sizeof(resp->tx_fetch_ind.resp_ids[0]) * num_resp_ids;
if (unlikely(skb->len < len)) {
ath10k_warn(ar, "received corrupted tx_fetch_ind event: too many records/resp_ids\n");
return;
}
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch ind num records %u num resps %u seq %u\n",
num_records, num_resp_ids,
le16_to_cpu(resp->tx_fetch_ind.fetch_seq_num));
if (!ar->htt.tx_q_state.enabled) {
ath10k_warn(ar, "received unexpected tx_fetch_ind event: not enabled\n");
return;
}
if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH) {
ath10k_warn(ar, "received unexpected tx_fetch_ind event: in push mode\n");
return;
}
rcu_read_lock();
for (i = 0; i < num_records; i++) {
record = &resp->tx_fetch_ind.records[i];
peer_id = MS(le16_to_cpu(record->info),
HTT_TX_FETCH_RECORD_INFO_PEER_ID);
tid = MS(le16_to_cpu(record->info),
HTT_TX_FETCH_RECORD_INFO_TID);
max_num_msdus = le16_to_cpu(record->num_msdus);
max_num_bytes = le32_to_cpu(record->num_bytes);
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch record %i peer_id %u tid %u msdus %zu bytes %zu\n",
i, peer_id, tid, max_num_msdus, max_num_bytes);
if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
ath10k_warn(ar, "received out of range peer_id %u tid %u\n",
peer_id, tid);
continue;
}
spin_lock_bh(&ar->data_lock);
txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
spin_unlock_bh(&ar->data_lock);
/* It is okay to release the lock and use txq because RCU read
* lock is held.
*/
if (unlikely(!txq)) {
ath10k_warn(ar, "failed to lookup txq for peer_id %u tid %u\n",
peer_id, tid);
continue;
}
num_msdus = 0;
num_bytes = 0;
ieee80211_txq_schedule_start(hw, txq->ac);
may_tx = ieee80211_txq_may_transmit(hw, txq);
while (num_msdus < max_num_msdus &&
num_bytes < max_num_bytes) {
if (!may_tx)
break;
ret = ath10k_mac_tx_push_txq(hw, txq);
if (ret < 0)
break;
num_msdus++;
num_bytes += ret;
}
ieee80211_return_txq(hw, txq, false);
ieee80211_txq_schedule_end(hw, txq->ac);
record->num_msdus = cpu_to_le16(num_msdus);
record->num_bytes = cpu_to_le32(num_bytes);
ath10k_htt_tx_txq_recalc(hw, txq);
}
rcu_read_unlock();
resp_ids = ath10k_htt_get_tx_fetch_ind_resp_ids(&resp->tx_fetch_ind);
ath10k_htt_rx_tx_fetch_resp_id_confirm(ar, resp_ids, num_resp_ids);
ret = ath10k_htt_tx_fetch_resp(ar,
resp->tx_fetch_ind.token,
resp->tx_fetch_ind.fetch_seq_num,
resp->tx_fetch_ind.records,
num_records);
if (unlikely(ret)) {
ath10k_warn(ar, "failed to submit tx fetch resp for token 0x%08x: %d\n",
le32_to_cpu(resp->tx_fetch_ind.token), ret);
/* FIXME: request fw restart */
}
ath10k_htt_tx_txq_sync(ar);
}
static void ath10k_htt_rx_tx_fetch_confirm(struct ath10k *ar,
struct sk_buff *skb)
{
const struct htt_resp *resp = (void *)skb->data;
size_t len;
int num_resp_ids;
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx fetch confirm\n");
len = sizeof(resp->hdr) + sizeof(resp->tx_fetch_confirm);
if (unlikely(skb->len < len)) {
ath10k_warn(ar, "received corrupted tx_fetch_confirm event: buffer too short\n");
return;
}
num_resp_ids = le16_to_cpu(resp->tx_fetch_confirm.num_resp_ids);
len += sizeof(resp->tx_fetch_confirm.resp_ids[0]) * num_resp_ids;
if (unlikely(skb->len < len)) {
ath10k_warn(ar, "received corrupted tx_fetch_confirm event: resp_ids buffer overflow\n");
return;
}
ath10k_htt_rx_tx_fetch_resp_id_confirm(ar,
resp->tx_fetch_confirm.resp_ids,
num_resp_ids);
}
static void ath10k_htt_rx_tx_mode_switch_ind(struct ath10k *ar,
struct sk_buff *skb)
{
const struct htt_resp *resp = (void *)skb->data;
const struct htt_tx_mode_switch_record *record;
struct ieee80211_txq *txq;
struct ath10k_txq *artxq;
size_t len;
size_t num_records;
enum htt_tx_mode_switch_mode mode;
bool enable;
u16 info0;
u16 info1;
u16 threshold;
u16 peer_id;
u8 tid;
int i;
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx tx mode switch ind\n");
len = sizeof(resp->hdr) + sizeof(resp->tx_mode_switch_ind);
if (unlikely(skb->len < len)) {
ath10k_warn(ar, "received corrupted tx_mode_switch_ind event: buffer too short\n");
return;
}
info0 = le16_to_cpu(resp->tx_mode_switch_ind.info0);
info1 = le16_to_cpu(resp->tx_mode_switch_ind.info1);
enable = !!(info0 & HTT_TX_MODE_SWITCH_IND_INFO0_ENABLE);
num_records = MS(info0, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
mode = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_MODE);
threshold = MS(info1, HTT_TX_MODE_SWITCH_IND_INFO1_THRESHOLD);
ath10k_dbg(ar, ATH10K_DBG_HTT,
"htt rx tx mode switch ind info0 0x%04x info1 0x%04x enable %d num records %zd mode %d threshold %u\n",
info0, info1, enable, num_records, mode, threshold);
len += sizeof(resp->tx_mode_switch_ind.records[0]) * num_records;
if (unlikely(skb->len < len)) {
ath10k_warn(ar, "received corrupted tx_mode_switch_mode_ind event: too many records\n");
return;
}
switch (mode) {
case HTT_TX_MODE_SWITCH_PUSH:
case HTT_TX_MODE_SWITCH_PUSH_PULL:
break;
default:
ath10k_warn(ar, "received invalid tx_mode_switch_mode_ind mode %d, ignoring\n",
mode);
return;
}
if (!enable)
return;
ar->htt.tx_q_state.enabled = enable;
ar->htt.tx_q_state.mode = mode;
ar->htt.tx_q_state.num_push_allowed = threshold;
rcu_read_lock();
for (i = 0; i < num_records; i++) {
record = &resp->tx_mode_switch_ind.records[i];
info0 = le16_to_cpu(record->info0);
peer_id = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_PEER_ID);
tid = MS(info0, HTT_TX_MODE_SWITCH_RECORD_INFO0_TID);
if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) ||
unlikely(tid >= ar->htt.tx_q_state.num_tids)) {
ath10k_warn(ar, "received out of range peer_id %u tid %u\n",
peer_id, tid);
continue;
}
spin_lock_bh(&ar->data_lock);
txq = ath10k_mac_txq_lookup(ar, peer_id, tid);
spin_unlock_bh(&ar->data_lock);
/* It is okay to release the lock and use txq because RCU read
* lock is held.
*/
if (unlikely(!txq)) {
ath10k_warn(ar, "failed to lookup txq for peer_id %u tid %u\n",
peer_id, tid);
continue;
}
spin_lock_bh(&ar->htt.tx_lock);
artxq = (void *)txq->drv_priv;
artxq->num_push_allowed = le16_to_cpu(record->num_max_msdus);
spin_unlock_bh(&ar->htt.tx_lock);
}
rcu_read_unlock();
ath10k_mac_tx_push_pending(ar);
}
void ath10k_htt_htc_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
{
bool release;
release = ath10k_htt_t2h_msg_handler(ar, skb);
/* Free the indication buffer */
if (release)
dev_kfree_skb_any(skb);
}
static inline s8 ath10k_get_legacy_rate_idx(struct ath10k *ar, u8 rate)
{
static const u8 legacy_rates[] = {1, 2, 5, 11, 6, 9, 12,
18, 24, 36, 48, 54};
int i;
for (i = 0; i < ARRAY_SIZE(legacy_rates); i++) {
if (rate == legacy_rates[i])
return i;
}
ath10k_warn(ar, "Invalid legacy rate %d peer stats", rate);
return -EINVAL;
}
static void
ath10k_accumulate_per_peer_tx_stats(struct ath10k *ar,
struct ath10k_sta *arsta,
struct ath10k_per_peer_tx_stats *pstats,
s8 legacy_rate_idx)
{
struct rate_info *txrate = &arsta->txrate;
struct ath10k_htt_tx_stats *tx_stats;
int idx, ht_idx, gi, mcs, bw, nss;
unsigned long flags;
if (!arsta->tx_stats)
return;
tx_stats = arsta->tx_stats;
flags = txrate->flags;
gi = test_bit(ATH10K_RATE_INFO_FLAGS_SGI_BIT, &flags);
mcs = ATH10K_HW_MCS_RATE(pstats->ratecode);
bw = txrate->bw;
nss = txrate->nss;
ht_idx = mcs + (nss - 1) * 8;
idx = mcs * 8 + 8 * 10 * (nss - 1);
idx += bw * 2 + gi;
#define STATS_OP_FMT(name) tx_stats->stats[ATH10K_STATS_TYPE_##name]
if (txrate->flags & RATE_INFO_FLAGS_VHT_MCS) {
STATS_OP_FMT(SUCC).vht[0][mcs] += pstats->succ_bytes;
STATS_OP_FMT(SUCC).vht[1][mcs] += pstats->succ_pkts;
STATS_OP_FMT(FAIL).vht[0][mcs] += pstats->failed_bytes;
STATS_OP_FMT(FAIL).vht[1][mcs] += pstats->failed_pkts;
STATS_OP_FMT(RETRY).vht[0][mcs] += pstats->retry_bytes;
STATS_OP_FMT(RETRY).vht[1][mcs] += pstats->retry_pkts;
} else if (txrate->flags & RATE_INFO_FLAGS_MCS) {
STATS_OP_FMT(SUCC).ht[0][ht_idx] += pstats->succ_bytes;
STATS_OP_FMT(SUCC).ht[1][ht_idx] += pstats->succ_pkts;
STATS_OP_FMT(FAIL).ht[0][ht_idx] += pstats->failed_bytes;
STATS_OP_FMT(FAIL).ht[1][ht_idx] += pstats->failed_pkts;
STATS_OP_FMT(RETRY).ht[0][ht_idx] += pstats->retry_bytes;
STATS_OP_FMT(RETRY).ht[1][ht_idx] += pstats->retry_pkts;
} else {
mcs = legacy_rate_idx;
STATS_OP_FMT(SUCC).legacy[0][mcs] += pstats->succ_bytes;
STATS_OP_FMT(SUCC).legacy[1][mcs] += pstats->succ_pkts;
STATS_OP_FMT(FAIL).legacy[0][mcs] += pstats->failed_bytes;
STATS_OP_FMT(FAIL).legacy[1][mcs] += pstats->failed_pkts;
STATS_OP_FMT(RETRY).legacy[0][mcs] += pstats->retry_bytes;
STATS_OP_FMT(RETRY).legacy[1][mcs] += pstats->retry_pkts;
}
if (ATH10K_HW_AMPDU(pstats->flags)) {
tx_stats->ba_fails += ATH10K_HW_BA_FAIL(pstats->flags);
if (txrate->flags & RATE_INFO_FLAGS_MCS) {
STATS_OP_FMT(AMPDU).ht[0][ht_idx] +=
pstats->succ_bytes + pstats->retry_bytes;
STATS_OP_FMT(AMPDU).ht[1][ht_idx] +=
pstats->succ_pkts + pstats->retry_pkts;
} else {
STATS_OP_FMT(AMPDU).vht[0][mcs] +=
pstats->succ_bytes + pstats->retry_bytes;
STATS_OP_FMT(AMPDU).vht[1][mcs] +=
pstats->succ_pkts + pstats->retry_pkts;
}
STATS_OP_FMT(AMPDU).bw[0][bw] +=
pstats->succ_bytes + pstats->retry_bytes;
STATS_OP_FMT(AMPDU).nss[0][nss - 1] +=
pstats->succ_bytes + pstats->retry_bytes;
STATS_OP_FMT(AMPDU).gi[0][gi] +=
pstats->succ_bytes + pstats->retry_bytes;
STATS_OP_FMT(AMPDU).rate_table[0][idx] +=
pstats->succ_bytes + pstats->retry_bytes;
STATS_OP_FMT(AMPDU).bw[1][bw] +=
pstats->succ_pkts + pstats->retry_pkts;
STATS_OP_FMT(AMPDU).nss[1][nss - 1] +=
pstats->succ_pkts + pstats->retry_pkts;
STATS_OP_FMT(AMPDU).gi[1][gi] +=
pstats->succ_pkts + pstats->retry_pkts;
STATS_OP_FMT(AMPDU).rate_table[1][idx] +=
pstats->succ_pkts + pstats->retry_pkts;
} else {
tx_stats->ack_fails +=
ATH10K_HW_BA_FAIL(pstats->flags);
}
STATS_OP_FMT(SUCC).bw[0][bw] += pstats->succ_bytes;
STATS_OP_FMT(SUCC).nss[0][nss - 1] += pstats->succ_bytes;
STATS_OP_FMT(SUCC).gi[0][gi] += pstats->succ_bytes;
STATS_OP_FMT(SUCC).bw[1][bw] += pstats->succ_pkts;
STATS_OP_FMT(SUCC).nss[1][nss - 1] += pstats->succ_pkts;
STATS_OP_FMT(SUCC).gi[1][gi] += pstats->succ_pkts;
STATS_OP_FMT(FAIL).bw[0][bw] += pstats->failed_bytes;
STATS_OP_FMT(FAIL).nss[0][nss - 1] += pstats->failed_bytes;
STATS_OP_FMT(FAIL).gi[0][gi] += pstats->failed_bytes;
STATS_OP_FMT(FAIL).bw[1][bw] += pstats->failed_pkts;
STATS_OP_FMT(FAIL).nss[1][nss - 1] += pstats->failed_pkts;
STATS_OP_FMT(FAIL).gi[1][gi] += pstats->failed_pkts;
STATS_OP_FMT(RETRY).bw[0][bw] += pstats->retry_bytes;
STATS_OP_FMT(RETRY).nss[0][nss - 1] += pstats->retry_bytes;
STATS_OP_FMT(RETRY).gi[0][gi] += pstats->retry_bytes;
STATS_OP_FMT(RETRY).bw[1][bw] += pstats->retry_pkts;
STATS_OP_FMT(RETRY).nss[1][nss - 1] += pstats->retry_pkts;
STATS_OP_FMT(RETRY).gi[1][gi] += pstats->retry_pkts;
if (txrate->flags >= RATE_INFO_FLAGS_MCS) {
STATS_OP_FMT(SUCC).rate_table[0][idx] += pstats->succ_bytes;
STATS_OP_FMT(SUCC).rate_table[1][idx] += pstats->succ_pkts;
STATS_OP_FMT(FAIL).rate_table[0][idx] += pstats->failed_bytes;
STATS_OP_FMT(FAIL).rate_table[1][idx] += pstats->failed_pkts;
STATS_OP_FMT(RETRY).rate_table[0][idx] += pstats->retry_bytes;
STATS_OP_FMT(RETRY).rate_table[1][idx] += pstats->retry_pkts;
}
tx_stats->tx_duration += pstats->duration;
}
static void
ath10k_update_per_peer_tx_stats(struct ath10k *ar,
struct ieee80211_sta *sta,
struct ath10k_per_peer_tx_stats *peer_stats)
{
struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
struct ieee80211_chanctx_conf *conf = NULL;
u8 rate = 0, sgi;
s8 rate_idx = 0;
bool skip_auto_rate;
struct rate_info txrate;
lockdep_assert_held(&ar->data_lock);
txrate.flags = ATH10K_HW_PREAMBLE(peer_stats->ratecode);
txrate.bw = ATH10K_HW_BW(peer_stats->flags);
txrate.nss = ATH10K_HW_NSS(peer_stats->ratecode);
txrate.mcs = ATH10K_HW_MCS_RATE(peer_stats->ratecode);
sgi = ATH10K_HW_GI(peer_stats->flags);
skip_auto_rate = ATH10K_FW_SKIPPED_RATE_CTRL(peer_stats->flags);
/* Firmware's rate control skips broadcast/management frames,
* if host has configure fixed rates and in some other special cases.
*/
if (skip_auto_rate)
return;
if (txrate.flags == WMI_RATE_PREAMBLE_VHT && txrate.mcs > 9) {
ath10k_warn(ar, "Invalid VHT mcs %d peer stats", txrate.mcs);
return;
}
if (txrate.flags == WMI_RATE_PREAMBLE_HT &&
(txrate.mcs > 7 || txrate.nss < 1)) {
ath10k_warn(ar, "Invalid HT mcs %d nss %d peer stats",
txrate.mcs, txrate.nss);
return;
}
memset(&arsta->txrate, 0, sizeof(arsta->txrate));
memset(&arsta->tx_info.status, 0, sizeof(arsta->tx_info.status));
if (txrate.flags == WMI_RATE_PREAMBLE_CCK ||
txrate.flags == WMI_RATE_PREAMBLE_OFDM) {
rate = ATH10K_HW_LEGACY_RATE(peer_stats->ratecode);
/* This is hacky, FW sends CCK rate 5.5Mbps as 6 */
if (rate == 6 && txrate.flags == WMI_RATE_PREAMBLE_CCK)
rate = 5;
rate_idx = ath10k_get_legacy_rate_idx(ar, rate);
if (rate_idx < 0)
return;
arsta->txrate.legacy = rate;
} else if (txrate.flags == WMI_RATE_PREAMBLE_HT) {
arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
arsta->txrate.mcs = txrate.mcs + 8 * (txrate.nss - 1);
} else {
arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
arsta->txrate.mcs = txrate.mcs;
}
switch (txrate.flags) {
case WMI_RATE_PREAMBLE_OFDM:
if (arsta->arvif && arsta->arvif->vif)
conf = rcu_dereference(arsta->arvif->vif->bss_conf.chanctx_conf);
if (conf && conf->def.chan->band == NL80211_BAND_5GHZ)
arsta->tx_info.status.rates[0].idx = rate_idx - 4;
break;
case WMI_RATE_PREAMBLE_CCK:
arsta->tx_info.status.rates[0].idx = rate_idx;
if (sgi)
arsta->tx_info.status.rates[0].flags |=
(IEEE80211_TX_RC_USE_SHORT_PREAMBLE |
IEEE80211_TX_RC_SHORT_GI);
break;
case WMI_RATE_PREAMBLE_HT:
arsta->tx_info.status.rates[0].idx =
txrate.mcs + ((txrate.nss - 1) * 8);
if (sgi)
arsta->tx_info.status.rates[0].flags |=
IEEE80211_TX_RC_SHORT_GI;
arsta->tx_info.status.rates[0].flags |= IEEE80211_TX_RC_MCS;
break;
case WMI_RATE_PREAMBLE_VHT:
ieee80211_rate_set_vht(&arsta->tx_info.status.rates[0],
txrate.mcs, txrate.nss);
if (sgi)
arsta->tx_info.status.rates[0].flags |=
IEEE80211_TX_RC_SHORT_GI;
arsta->tx_info.status.rates[0].flags |= IEEE80211_TX_RC_VHT_MCS;
break;
}
arsta->txrate.nss = txrate.nss;
arsta->txrate.bw = ath10k_bw_to_mac80211_bw(txrate.bw);
arsta->last_tx_bitrate = cfg80211_calculate_bitrate(&arsta->txrate);
if (sgi)
arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
switch (arsta->txrate.bw) {
case RATE_INFO_BW_40:
arsta->tx_info.status.rates[0].flags |=
IEEE80211_TX_RC_40_MHZ_WIDTH;
break;
case RATE_INFO_BW_80:
arsta->tx_info.status.rates[0].flags |=
IEEE80211_TX_RC_80_MHZ_WIDTH;
break;
case RATE_INFO_BW_160:
arsta->tx_info.status.rates[0].flags |=
IEEE80211_TX_RC_160_MHZ_WIDTH;
break;
}
if (peer_stats->succ_pkts) {
arsta->tx_info.flags = IEEE80211_TX_STAT_ACK;
arsta->tx_info.status.rates[0].count = 1;
ieee80211_tx_rate_update(ar->hw, sta, &arsta->tx_info);
}
if (ar->htt.disable_tx_comp) {
arsta->tx_failed += peer_stats->failed_pkts;
ath10k_dbg(ar, ATH10K_DBG_HTT, "tx failed %d\n",
arsta->tx_failed);
}
arsta->tx_retries += peer_stats->retry_pkts;
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx retries %d", arsta->tx_retries);
if (ath10k_debug_is_extd_tx_stats_enabled(ar))
ath10k_accumulate_per_peer_tx_stats(ar, arsta, peer_stats,
rate_idx);
}
static void ath10k_htt_fetch_peer_stats(struct ath10k *ar,
struct sk_buff *skb)
{
struct htt_resp *resp = (struct htt_resp *)skb->data;
struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats;
struct htt_per_peer_tx_stats_ind *tx_stats;
struct ieee80211_sta *sta;
struct ath10k_peer *peer;
int peer_id, i;
u8 ppdu_len, num_ppdu;
num_ppdu = resp->peer_tx_stats.num_ppdu;
ppdu_len = resp->peer_tx_stats.ppdu_len * sizeof(__le32);
if (skb->len < sizeof(struct htt_resp_hdr) + num_ppdu * ppdu_len) {
ath10k_warn(ar, "Invalid peer stats buf length %d\n", skb->len);
return;
}
tx_stats = (struct htt_per_peer_tx_stats_ind *)
(resp->peer_tx_stats.payload);
peer_id = __le16_to_cpu(tx_stats->peer_id);
rcu_read_lock();
spin_lock_bh(&ar->data_lock);
peer = ath10k_peer_find_by_id(ar, peer_id);
if (!peer || !peer->sta) {
ath10k_warn(ar, "Invalid peer id %d peer stats buffer\n",
peer_id);
goto out;
}
sta = peer->sta;
for (i = 0; i < num_ppdu; i++) {
tx_stats = (struct htt_per_peer_tx_stats_ind *)
(resp->peer_tx_stats.payload + i * ppdu_len);
p_tx_stats->succ_bytes = __le32_to_cpu(tx_stats->succ_bytes);
p_tx_stats->retry_bytes = __le32_to_cpu(tx_stats->retry_bytes);
p_tx_stats->failed_bytes =
__le32_to_cpu(tx_stats->failed_bytes);
p_tx_stats->ratecode = tx_stats->ratecode;
p_tx_stats->flags = tx_stats->flags;
p_tx_stats->succ_pkts = __le16_to_cpu(tx_stats->succ_pkts);
p_tx_stats->retry_pkts = __le16_to_cpu(tx_stats->retry_pkts);
p_tx_stats->failed_pkts = __le16_to_cpu(tx_stats->failed_pkts);
p_tx_stats->duration = __le16_to_cpu(tx_stats->tx_duration);
ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats);
}
out:
spin_unlock_bh(&ar->data_lock);
rcu_read_unlock();
}
static void ath10k_fetch_10_2_tx_stats(struct ath10k *ar, u8 *data)
{
struct ath10k_pktlog_hdr *hdr = (struct ath10k_pktlog_hdr *)data;
struct ath10k_per_peer_tx_stats *p_tx_stats = &ar->peer_tx_stats;
struct ath10k_10_2_peer_tx_stats *tx_stats;
struct ieee80211_sta *sta;
struct ath10k_peer *peer;
u16 log_type = __le16_to_cpu(hdr->log_type);
u32 peer_id = 0, i;
if (log_type != ATH_PKTLOG_TYPE_TX_STAT)
return;
tx_stats = (struct ath10k_10_2_peer_tx_stats *)((hdr->payload) +
ATH10K_10_2_TX_STATS_OFFSET);
if (!tx_stats->tx_ppdu_cnt)
return;
peer_id = tx_stats->peer_id;
rcu_read_lock();
spin_lock_bh(&ar->data_lock);
peer = ath10k_peer_find_by_id(ar, peer_id);
if (!peer || !peer->sta) {
ath10k_warn(ar, "Invalid peer id %d in peer stats buffer\n",
peer_id);
goto out;
}
sta = peer->sta;
for (i = 0; i < tx_stats->tx_ppdu_cnt; i++) {
p_tx_stats->succ_bytes =
__le16_to_cpu(tx_stats->success_bytes[i]);
p_tx_stats->retry_bytes =
__le16_to_cpu(tx_stats->retry_bytes[i]);
p_tx_stats->failed_bytes =
__le16_to_cpu(tx_stats->failed_bytes[i]);
p_tx_stats->ratecode = tx_stats->ratecode[i];
p_tx_stats->flags = tx_stats->flags[i];
p_tx_stats->succ_pkts = tx_stats->success_pkts[i];
p_tx_stats->retry_pkts = tx_stats->retry_pkts[i];
p_tx_stats->failed_pkts = tx_stats->failed_pkts[i];
ath10k_update_per_peer_tx_stats(ar, sta, p_tx_stats);
}
spin_unlock_bh(&ar->data_lock);
rcu_read_unlock();
return;
out:
spin_unlock_bh(&ar->data_lock);
rcu_read_unlock();
}
static int ath10k_htt_rx_pn_len(enum htt_security_types sec_type)
{
switch (sec_type) {
case HTT_SECURITY_TKIP:
case HTT_SECURITY_TKIP_NOMIC:
case HTT_SECURITY_AES_CCMP:
return 48;
default:
return 0;
}
}
static void ath10k_htt_rx_sec_ind_handler(struct ath10k *ar,
struct htt_security_indication *ev)
{
enum htt_txrx_sec_cast_type sec_index;
enum htt_security_types sec_type;
struct ath10k_peer *peer;
spin_lock_bh(&ar->data_lock);
peer = ath10k_peer_find_by_id(ar, __le16_to_cpu(ev->peer_id));
if (!peer) {
ath10k_warn(ar, "failed to find peer id %d for security indication",
__le16_to_cpu(ev->peer_id));
goto out;
}
sec_type = MS(ev->flags, HTT_SECURITY_TYPE);
if (ev->flags & HTT_SECURITY_IS_UNICAST)
sec_index = HTT_TXRX_SEC_UCAST;
else
sec_index = HTT_TXRX_SEC_MCAST;
peer->rx_pn[sec_index].sec_type = sec_type;
peer->rx_pn[sec_index].pn_len = ath10k_htt_rx_pn_len(sec_type);
memset(peer->tids_last_pn_valid, 0, sizeof(peer->tids_last_pn_valid));
memset(peer->tids_last_pn, 0, sizeof(peer->tids_last_pn));
out:
spin_unlock_bh(&ar->data_lock);
}
bool ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
{
struct ath10k_htt *htt = &ar->htt;
struct htt_resp *resp = (struct htt_resp *)skb->data;
enum htt_t2h_msg_type type;
/* confirm alignment */
if (!IS_ALIGNED((unsigned long)skb->data, 4))
ath10k_warn(ar, "unaligned htt message, expect trouble\n");
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, msg_type: 0x%0X\n",
resp->hdr.msg_type);
if (resp->hdr.msg_type >= ar->htt.t2h_msg_types_max) {
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt rx, unsupported msg_type: 0x%0X\n max: 0x%0X",
resp->hdr.msg_type, ar->htt.t2h_msg_types_max);
return true;
}
type = ar->htt.t2h_msg_types[resp->hdr.msg_type];
switch (type) {
case HTT_T2H_MSG_TYPE_VERSION_CONF: {
htt->target_version_major = resp->ver_resp.major;
htt->target_version_minor = resp->ver_resp.minor;
complete(&htt->target_version_received);
break;
}
case HTT_T2H_MSG_TYPE_RX_IND:
if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL) {
ath10k_htt_rx_proc_rx_ind_ll(htt, &resp->rx_ind);
} else {
skb_queue_tail(&htt->rx_indication_head, skb);
return false;
}
break;
case HTT_T2H_MSG_TYPE_PEER_MAP: {
struct htt_peer_map_event ev = {
.vdev_id = resp->peer_map.vdev_id,
.peer_id = __le16_to_cpu(resp->peer_map.peer_id),
};
memcpy(ev.addr, resp->peer_map.addr, sizeof(ev.addr));
ath10k_peer_map_event(htt, &ev);
break;
}
case HTT_T2H_MSG_TYPE_PEER_UNMAP: {
struct htt_peer_unmap_event ev = {
.peer_id = __le16_to_cpu(resp->peer_unmap.peer_id),
};
ath10k_peer_unmap_event(htt, &ev);
break;
}
case HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION: {
struct htt_tx_done tx_done = {};
struct ath10k_htt *htt = &ar->htt;
struct ath10k_htc *htc = &ar->htc;
struct ath10k_htc_ep *ep = &ar->htc.endpoint[htt->eid];
int status = __le32_to_cpu(resp->mgmt_tx_completion.status);
int info = __le32_to_cpu(resp->mgmt_tx_completion.info);
tx_done.msdu_id = __le32_to_cpu(resp->mgmt_tx_completion.desc_id);
switch (status) {
case HTT_MGMT_TX_STATUS_OK:
tx_done.status = HTT_TX_COMPL_STATE_ACK;
if (test_bit(WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS,
ar->wmi.svc_map) &&
(resp->mgmt_tx_completion.flags &
HTT_MGMT_TX_CMPL_FLAG_ACK_RSSI)) {
tx_done.ack_rssi =
FIELD_GET(HTT_MGMT_TX_CMPL_INFO_ACK_RSSI_MASK,
info);
}
break;
case HTT_MGMT_TX_STATUS_RETRY:
tx_done.status = HTT_TX_COMPL_STATE_NOACK;
break;
case HTT_MGMT_TX_STATUS_DROP:
tx_done.status = HTT_TX_COMPL_STATE_DISCARD;
break;
}
if (htt->disable_tx_comp) {
spin_lock_bh(&htc->tx_lock);
ep->tx_credits++;
spin_unlock_bh(&htc->tx_lock);
}
status = ath10k_txrx_tx_unref(htt, &tx_done);
if (!status) {
spin_lock_bh(&htt->tx_lock);
ath10k_htt_tx_mgmt_dec_pending(htt);
spin_unlock_bh(&htt->tx_lock);
}
break;
}
case HTT_T2H_MSG_TYPE_TX_COMPL_IND:
ath10k_htt_rx_tx_compl_ind(htt->ar, skb);
break;
case HTT_T2H_MSG_TYPE_SEC_IND: {
struct ath10k *ar = htt->ar;
struct htt_security_indication *ev = &resp->security_indication;
ath10k_htt_rx_sec_ind_handler(ar, ev);
ath10k_dbg(ar, ATH10K_DBG_HTT,
"sec ind peer_id %d unicast %d type %d\n",
__le16_to_cpu(ev->peer_id),
!!(ev->flags & HTT_SECURITY_IS_UNICAST),
MS(ev->flags, HTT_SECURITY_TYPE));
complete(&ar->install_key_done);
break;
}
case HTT_T2H_MSG_TYPE_RX_FRAG_IND: {
ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
skb->data, skb->len);
atomic_inc(&htt->num_mpdus_ready);
return ath10k_htt_rx_proc_rx_frag_ind(htt,
&resp->rx_frag_ind,
skb);
}
case HTT_T2H_MSG_TYPE_TEST:
break;
case HTT_T2H_MSG_TYPE_STATS_CONF:
trace_ath10k_htt_stats(ar, skb->data, skb->len);
break;
case HTT_T2H_MSG_TYPE_TX_INSPECT_IND:
/* Firmware can return tx frames if it's unable to fully
* process them and suspects host may be able to fix it. ath10k
* sends all tx frames as already inspected so this shouldn't
* happen unless fw has a bug.
*/
ath10k_warn(ar, "received an unexpected htt tx inspect event\n");
break;
case HTT_T2H_MSG_TYPE_RX_ADDBA:
ath10k_htt_rx_addba(ar, resp);
break;
case HTT_T2H_MSG_TYPE_RX_DELBA:
ath10k_htt_rx_delba(ar, resp);
break;
case HTT_T2H_MSG_TYPE_PKTLOG: {
trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload,
skb->len -
offsetof(struct htt_resp,
pktlog_msg.payload));
if (ath10k_peer_stats_enabled(ar))
ath10k_fetch_10_2_tx_stats(ar,
resp->pktlog_msg.payload);
break;
}
case HTT_T2H_MSG_TYPE_RX_FLUSH: {
/* Ignore this event because mac80211 takes care of Rx
* aggregation reordering.
*/
break;
}
case HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND: {
skb_queue_tail(&htt->rx_in_ord_compl_q, skb);
return false;
}
case HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND: {
struct ath10k_htt *htt = &ar->htt;
struct ath10k_htc *htc = &ar->htc;
struct ath10k_htc_ep *ep = &ar->htc.endpoint[htt->eid];
u32 msg_word = __le32_to_cpu(*(__le32 *)resp);
int htt_credit_delta;
htt_credit_delta = HTT_TX_CREDIT_DELTA_ABS_GET(msg_word);
if (HTT_TX_CREDIT_SIGN_BIT_GET(msg_word))
htt_credit_delta = -htt_credit_delta;
ath10k_dbg(ar, ATH10K_DBG_HTT,
"htt credit update delta %d\n",
htt_credit_delta);
if (htt->disable_tx_comp) {
spin_lock_bh(&htc->tx_lock);
ep->tx_credits += htt_credit_delta;
spin_unlock_bh(&htc->tx_lock);
ath10k_dbg(ar, ATH10K_DBG_HTT,
"htt credit total %d\n",
ep->tx_credits);
ep->ep_ops.ep_tx_credits(htc->ar);
}
break;
}
case HTT_T2H_MSG_TYPE_CHAN_CHANGE: {
u32 phymode = __le32_to_cpu(resp->chan_change.phymode);
u32 freq = __le32_to_cpu(resp->chan_change.freq);
ar->tgt_oper_chan = ieee80211_get_channel(ar->hw->wiphy, freq);
ath10k_dbg(ar, ATH10K_DBG_HTT,
"htt chan change freq %u phymode %s\n",
freq, ath10k_wmi_phymode_str(phymode));
break;
}
case HTT_T2H_MSG_TYPE_AGGR_CONF:
break;
case HTT_T2H_MSG_TYPE_TX_FETCH_IND: {
struct sk_buff *tx_fetch_ind = skb_copy(skb, GFP_ATOMIC);
if (!tx_fetch_ind) {
ath10k_warn(ar, "failed to copy htt tx fetch ind\n");
break;
}
skb_queue_tail(&htt->tx_fetch_ind_q, tx_fetch_ind);
break;
}
case HTT_T2H_MSG_TYPE_TX_FETCH_CONFIRM:
ath10k_htt_rx_tx_fetch_confirm(ar, skb);
break;
case HTT_T2H_MSG_TYPE_TX_MODE_SWITCH_IND:
ath10k_htt_rx_tx_mode_switch_ind(ar, skb);
break;
case HTT_T2H_MSG_TYPE_PEER_STATS:
ath10k_htt_fetch_peer_stats(ar, skb);
break;
case HTT_T2H_MSG_TYPE_EN_STATS:
default:
ath10k_warn(ar, "htt event (%d) not handled\n",
resp->hdr.msg_type);
ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
skb->data, skb->len);
break;
}
return true;
}
EXPORT_SYMBOL(ath10k_htt_t2h_msg_handler);
void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
struct sk_buff *skb)
{
trace_ath10k_htt_pktlog(ar, skb->data, skb->len);
dev_kfree_skb_any(skb);
}
EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler);
static int ath10k_htt_rx_deliver_msdu(struct ath10k *ar, int quota, int budget)
{
struct sk_buff *skb;
while (quota < budget) {
if (skb_queue_empty(&ar->htt.rx_msdus_q))
break;
skb = skb_dequeue(&ar->htt.rx_msdus_q);
if (!skb)
break;
ath10k_process_rx(ar, skb);
quota++;
}
return quota;
}
int ath10k_htt_rx_hl_indication(struct ath10k *ar, int budget)
{
struct htt_resp *resp;
struct ath10k_htt *htt = &ar->htt;
struct sk_buff *skb;
bool release;
int quota;
for (quota = 0; quota < budget; quota++) {
skb = skb_dequeue(&htt->rx_indication_head);
if (!skb)
break;
resp = (struct htt_resp *)skb->data;
release = ath10k_htt_rx_proc_rx_ind_hl(htt,
&resp->rx_ind_hl,
skb,
HTT_RX_PN_CHECK,
HTT_RX_NON_TKIP_MIC);
if (release)
dev_kfree_skb_any(skb);
ath10k_dbg(ar, ATH10K_DBG_HTT, "rx indication poll pending count:%d\n",
skb_queue_len(&htt->rx_indication_head));
}
return quota;
}
EXPORT_SYMBOL(ath10k_htt_rx_hl_indication);
int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget)
{
struct ath10k_htt *htt = &ar->htt;
struct htt_tx_done tx_done = {};
struct sk_buff_head tx_ind_q;
struct sk_buff *skb;
unsigned long flags;
int quota = 0, done, ret;
bool resched_napi = false;
__skb_queue_head_init(&tx_ind_q);
/* Process pending frames before dequeuing more data
* from hardware.
*/
quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget);
if (quota == budget) {
resched_napi = true;
goto exit;
}
while ((skb = skb_dequeue(&htt->rx_in_ord_compl_q))) {
spin_lock_bh(&htt->rx_ring.lock);
ret = ath10k_htt_rx_in_ord_ind(ar, skb);
spin_unlock_bh(&htt->rx_ring.lock);
dev_kfree_skb_any(skb);
if (ret == -EIO) {
resched_napi = true;
goto exit;
}
}
while (atomic_read(&htt->num_mpdus_ready)) {
ret = ath10k_htt_rx_handle_amsdu(htt);
if (ret == -EIO) {
resched_napi = true;
goto exit;
}
atomic_dec(&htt->num_mpdus_ready);
}
/* Deliver received data after processing data from hardware */
quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget);
/* From NAPI documentation:
* The napi poll() function may also process TX completions, in which
* case if it processes the entire TX ring then it should count that
* work as the rest of the budget.
*/
if ((quota < budget) && !kfifo_is_empty(&htt->txdone_fifo))
quota = budget;
/* kfifo_get: called only within txrx_tasklet so it's neatly serialized.
* From kfifo_get() documentation:
* Note that with only one concurrent reader and one concurrent writer,
* you don't need extra locking to use these macro.
*/
while (kfifo_get(&htt->txdone_fifo, &tx_done))
ath10k_txrx_tx_unref(htt, &tx_done);
ath10k_mac_tx_push_pending(ar);
spin_lock_irqsave(&htt->tx_fetch_ind_q.lock, flags);
skb_queue_splice_init(&htt->tx_fetch_ind_q, &tx_ind_q);
spin_unlock_irqrestore(&htt->tx_fetch_ind_q.lock, flags);
while ((skb = __skb_dequeue(&tx_ind_q))) {
ath10k_htt_rx_tx_fetch_ind(ar, skb);
dev_kfree_skb_any(skb);
}
exit:
ath10k_htt_rx_msdu_buff_replenish(htt);
/* In case of rx failure or more data to read, report budget
* to reschedule NAPI poll
*/
done = resched_napi ? budget : quota;
return done;
}
EXPORT_SYMBOL(ath10k_htt_txrx_compl_task);
static const struct ath10k_htt_rx_ops htt_rx_ops_32 = {
.htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_32,
.htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_32,
.htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_32,
.htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_32,
.htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_32,
};
static const struct ath10k_htt_rx_ops htt_rx_ops_64 = {
.htt_get_rx_ring_size = ath10k_htt_get_rx_ring_size_64,
.htt_config_paddrs_ring = ath10k_htt_config_paddrs_ring_64,
.htt_set_paddrs_ring = ath10k_htt_set_paddrs_ring_64,
.htt_get_vaddr_ring = ath10k_htt_get_vaddr_ring_64,
.htt_reset_paddrs_ring = ath10k_htt_reset_paddrs_ring_64,
};
static const struct ath10k_htt_rx_ops htt_rx_ops_hl = {
.htt_rx_proc_rx_frag_ind = ath10k_htt_rx_proc_rx_frag_ind_hl,
};
void ath10k_htt_set_rx_ops(struct ath10k_htt *htt)
{
struct ath10k *ar = htt->ar;
if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
htt->rx_ops = &htt_rx_ops_hl;
else if (ar->hw_params.target_64bit)
htt->rx_ops = &htt_rx_ops_64;
else
htt->rx_ops = &htt_rx_ops_32;
}
|
linux-master
|
drivers/net/wireless/ath/ath10k/htt_rx.c
|
// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*/
#include "core.h"
#include "wmi-ops.h"
#include "txrx.h"
#include "debug.h"
static void ath10k_rx_stats_update_amsdu_subfrm(struct ath10k *ar,
struct ath10k_sta_tid_stats *stats,
u32 msdu_count)
{
if (msdu_count == 1)
stats->rx_pkt_amsdu[ATH10K_AMSDU_SUBFRM_NUM_1]++;
else if (msdu_count == 2)
stats->rx_pkt_amsdu[ATH10K_AMSDU_SUBFRM_NUM_2]++;
else if (msdu_count == 3)
stats->rx_pkt_amsdu[ATH10K_AMSDU_SUBFRM_NUM_3]++;
else if (msdu_count == 4)
stats->rx_pkt_amsdu[ATH10K_AMSDU_SUBFRM_NUM_4]++;
else if (msdu_count > 4)
stats->rx_pkt_amsdu[ATH10K_AMSDU_SUBFRM_NUM_MORE]++;
}
static void ath10k_rx_stats_update_ampdu_subfrm(struct ath10k *ar,
struct ath10k_sta_tid_stats *stats,
u32 mpdu_count)
{
if (mpdu_count <= 10)
stats->rx_pkt_ampdu[ATH10K_AMPDU_SUBFRM_NUM_10]++;
else if (mpdu_count <= 20)
stats->rx_pkt_ampdu[ATH10K_AMPDU_SUBFRM_NUM_20]++;
else if (mpdu_count <= 30)
stats->rx_pkt_ampdu[ATH10K_AMPDU_SUBFRM_NUM_30]++;
else if (mpdu_count <= 40)
stats->rx_pkt_ampdu[ATH10K_AMPDU_SUBFRM_NUM_40]++;
else if (mpdu_count <= 50)
stats->rx_pkt_ampdu[ATH10K_AMPDU_SUBFRM_NUM_50]++;
else if (mpdu_count <= 60)
stats->rx_pkt_ampdu[ATH10K_AMPDU_SUBFRM_NUM_60]++;
else if (mpdu_count > 60)
stats->rx_pkt_ampdu[ATH10K_AMPDU_SUBFRM_NUM_MORE]++;
}
void ath10k_sta_update_rx_tid_stats_ampdu(struct ath10k *ar, u16 peer_id, u8 tid,
struct htt_rx_indication_mpdu_range *ranges,
int num_ranges)
{
struct ath10k_sta *arsta;
struct ath10k_peer *peer;
int i;
if (tid > IEEE80211_NUM_TIDS || !(ar->sta_tid_stats_mask & BIT(tid)))
return;
rcu_read_lock();
spin_lock_bh(&ar->data_lock);
peer = ath10k_peer_find_by_id(ar, peer_id);
if (!peer || !peer->sta)
goto out;
arsta = (struct ath10k_sta *)peer->sta->drv_priv;
for (i = 0; i < num_ranges; i++)
ath10k_rx_stats_update_ampdu_subfrm(ar,
&arsta->tid_stats[tid],
ranges[i].mpdu_count);
out:
spin_unlock_bh(&ar->data_lock);
rcu_read_unlock();
}
void ath10k_sta_update_rx_tid_stats(struct ath10k *ar, u8 *first_hdr,
unsigned long num_msdus,
enum ath10k_pkt_rx_err err,
unsigned long unchain_cnt,
unsigned long drop_cnt,
unsigned long drop_cnt_filter,
unsigned long queued_msdus)
{
struct ieee80211_sta *sta;
struct ath10k_sta *arsta;
struct ieee80211_hdr *hdr;
struct ath10k_sta_tid_stats *stats;
u8 tid = IEEE80211_NUM_TIDS;
bool non_data_frm = false;
hdr = (struct ieee80211_hdr *)first_hdr;
if (!ieee80211_is_data(hdr->frame_control))
non_data_frm = true;
if (ieee80211_is_data_qos(hdr->frame_control))
tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK;
if (!(ar->sta_tid_stats_mask & BIT(tid)) || non_data_frm)
return;
rcu_read_lock();
sta = ieee80211_find_sta_by_ifaddr(ar->hw, hdr->addr2, NULL);
if (!sta)
goto exit;
arsta = (struct ath10k_sta *)sta->drv_priv;
spin_lock_bh(&ar->data_lock);
stats = &arsta->tid_stats[tid];
stats->rx_pkt_from_fw += num_msdus;
stats->rx_pkt_unchained += unchain_cnt;
stats->rx_pkt_drop_chained += drop_cnt;
stats->rx_pkt_drop_filter += drop_cnt_filter;
if (err != ATH10K_PKT_RX_ERR_MAX)
stats->rx_pkt_err[err] += queued_msdus;
stats->rx_pkt_queued_for_mac += queued_msdus;
ath10k_rx_stats_update_amsdu_subfrm(ar, &arsta->tid_stats[tid],
num_msdus);
spin_unlock_bh(&ar->data_lock);
exit:
rcu_read_unlock();
}
static void ath10k_sta_update_extd_stats_rx_duration(struct ath10k *ar,
struct ath10k_fw_stats *stats)
{
struct ath10k_fw_extd_stats_peer *peer;
struct ieee80211_sta *sta;
struct ath10k_sta *arsta;
rcu_read_lock();
list_for_each_entry(peer, &stats->peers_extd, list) {
sta = ieee80211_find_sta_by_ifaddr(ar->hw, peer->peer_macaddr,
NULL);
if (!sta)
continue;
arsta = (struct ath10k_sta *)sta->drv_priv;
arsta->rx_duration += (u64)peer->rx_duration;
}
rcu_read_unlock();
}
static void ath10k_sta_update_stats_rx_duration(struct ath10k *ar,
struct ath10k_fw_stats *stats)
{
struct ath10k_fw_stats_peer *peer;
struct ieee80211_sta *sta;
struct ath10k_sta *arsta;
rcu_read_lock();
list_for_each_entry(peer, &stats->peers, list) {
sta = ieee80211_find_sta_by_ifaddr(ar->hw, peer->peer_macaddr,
NULL);
if (!sta)
continue;
arsta = (struct ath10k_sta *)sta->drv_priv;
arsta->rx_duration += (u64)peer->rx_duration;
}
rcu_read_unlock();
}
void ath10k_sta_update_rx_duration(struct ath10k *ar,
struct ath10k_fw_stats *stats)
{
if (stats->extended)
ath10k_sta_update_extd_stats_rx_duration(ar, stats);
else
ath10k_sta_update_stats_rx_duration(ar, stats);
}
static ssize_t ath10k_dbg_sta_read_aggr_mode(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_sta *sta = file->private_data;
struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
struct ath10k *ar = arsta->arvif->ar;
char buf[32];
int len = 0;
mutex_lock(&ar->conf_mutex);
len = scnprintf(buf, sizeof(buf) - len, "aggregation mode: %s\n",
(arsta->aggr_mode == ATH10K_DBG_AGGR_MODE_AUTO) ?
"auto" : "manual");
mutex_unlock(&ar->conf_mutex);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static ssize_t ath10k_dbg_sta_write_aggr_mode(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_sta *sta = file->private_data;
struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
struct ath10k *ar = arsta->arvif->ar;
u32 aggr_mode;
int ret;
if (kstrtouint_from_user(user_buf, count, 0, &aggr_mode))
return -EINVAL;
if (aggr_mode >= ATH10K_DBG_AGGR_MODE_MAX)
return -EINVAL;
mutex_lock(&ar->conf_mutex);
if ((ar->state != ATH10K_STATE_ON) ||
(aggr_mode == arsta->aggr_mode)) {
ret = count;
goto out;
}
ret = ath10k_wmi_addba_clear_resp(ar, arsta->arvif->vdev_id, sta->addr);
if (ret) {
ath10k_warn(ar, "failed to clear addba session ret: %d\n", ret);
goto out;
}
arsta->aggr_mode = aggr_mode;
out:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static const struct file_operations fops_aggr_mode = {
.read = ath10k_dbg_sta_read_aggr_mode,
.write = ath10k_dbg_sta_write_aggr_mode,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static ssize_t ath10k_dbg_sta_write_addba(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_sta *sta = file->private_data;
struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
struct ath10k *ar = arsta->arvif->ar;
u32 tid, buf_size;
int ret;
char buf[64] = {0};
ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos,
user_buf, count);
if (ret <= 0)
return ret;
ret = sscanf(buf, "%u %u", &tid, &buf_size);
if (ret != 2)
return -EINVAL;
/* Valid TID values are 0 through 15 */
if (tid > HTT_DATA_TX_EXT_TID_MGMT - 2)
return -EINVAL;
mutex_lock(&ar->conf_mutex);
if ((ar->state != ATH10K_STATE_ON) ||
(arsta->aggr_mode != ATH10K_DBG_AGGR_MODE_MANUAL)) {
ret = count;
goto out;
}
ret = ath10k_wmi_addba_send(ar, arsta->arvif->vdev_id, sta->addr,
tid, buf_size);
if (ret) {
ath10k_warn(ar, "failed to send addba request: vdev_id %u peer %pM tid %u buf_size %u\n",
arsta->arvif->vdev_id, sta->addr, tid, buf_size);
}
ret = count;
out:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static const struct file_operations fops_addba = {
.write = ath10k_dbg_sta_write_addba,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static ssize_t ath10k_dbg_sta_write_addba_resp(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_sta *sta = file->private_data;
struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
struct ath10k *ar = arsta->arvif->ar;
u32 tid, status;
int ret;
char buf[64] = {0};
ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos,
user_buf, count);
if (ret <= 0)
return ret;
ret = sscanf(buf, "%u %u", &tid, &status);
if (ret != 2)
return -EINVAL;
/* Valid TID values are 0 through 15 */
if (tid > HTT_DATA_TX_EXT_TID_MGMT - 2)
return -EINVAL;
mutex_lock(&ar->conf_mutex);
if ((ar->state != ATH10K_STATE_ON) ||
(arsta->aggr_mode != ATH10K_DBG_AGGR_MODE_MANUAL)) {
ret = count;
goto out;
}
ret = ath10k_wmi_addba_set_resp(ar, arsta->arvif->vdev_id, sta->addr,
tid, status);
if (ret) {
ath10k_warn(ar, "failed to send addba response: vdev_id %u peer %pM tid %u status%u\n",
arsta->arvif->vdev_id, sta->addr, tid, status);
}
ret = count;
out:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static const struct file_operations fops_addba_resp = {
.write = ath10k_dbg_sta_write_addba_resp,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static ssize_t ath10k_dbg_sta_write_delba(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_sta *sta = file->private_data;
struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
struct ath10k *ar = arsta->arvif->ar;
u32 tid, initiator, reason;
int ret;
char buf[64] = {0};
ret = simple_write_to_buffer(buf, sizeof(buf) - 1, ppos,
user_buf, count);
if (ret <= 0)
return ret;
ret = sscanf(buf, "%u %u %u", &tid, &initiator, &reason);
if (ret != 3)
return -EINVAL;
/* Valid TID values are 0 through 15 */
if (tid > HTT_DATA_TX_EXT_TID_MGMT - 2)
return -EINVAL;
mutex_lock(&ar->conf_mutex);
if ((ar->state != ATH10K_STATE_ON) ||
(arsta->aggr_mode != ATH10K_DBG_AGGR_MODE_MANUAL)) {
ret = count;
goto out;
}
ret = ath10k_wmi_delba_send(ar, arsta->arvif->vdev_id, sta->addr,
tid, initiator, reason);
if (ret) {
ath10k_warn(ar, "failed to send delba: vdev_id %u peer %pM tid %u initiator %u reason %u\n",
arsta->arvif->vdev_id, sta->addr, tid, initiator,
reason);
}
ret = count;
out:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static const struct file_operations fops_delba = {
.write = ath10k_dbg_sta_write_delba,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static ssize_t ath10k_dbg_sta_read_peer_debug_trigger(struct file *file,
char __user *user_buf,
size_t count,
loff_t *ppos)
{
struct ieee80211_sta *sta = file->private_data;
struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
struct ath10k *ar = arsta->arvif->ar;
char buf[8];
int len = 0;
mutex_lock(&ar->conf_mutex);
len = scnprintf(buf, sizeof(buf) - len,
"Write 1 to once trigger the debug logs\n");
mutex_unlock(&ar->conf_mutex);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static ssize_t
ath10k_dbg_sta_write_peer_debug_trigger(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_sta *sta = file->private_data;
struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
struct ath10k *ar = arsta->arvif->ar;
u8 peer_debug_trigger;
int ret;
if (kstrtou8_from_user(user_buf, count, 0, &peer_debug_trigger))
return -EINVAL;
if (peer_debug_trigger != 1)
return -EINVAL;
mutex_lock(&ar->conf_mutex);
if (ar->state != ATH10K_STATE_ON) {
ret = -ENETDOWN;
goto out;
}
ret = ath10k_wmi_peer_set_param(ar, arsta->arvif->vdev_id, sta->addr,
ar->wmi.peer_param->debug, peer_debug_trigger);
if (ret) {
ath10k_warn(ar, "failed to set param to trigger peer tid logs for station ret: %d\n",
ret);
goto out;
}
out:
mutex_unlock(&ar->conf_mutex);
return count;
}
static const struct file_operations fops_peer_debug_trigger = {
.open = simple_open,
.read = ath10k_dbg_sta_read_peer_debug_trigger,
.write = ath10k_dbg_sta_write_peer_debug_trigger,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static ssize_t ath10k_dbg_sta_read_peer_ps_state(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_sta *sta = file->private_data;
struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
struct ath10k *ar = arsta->arvif->ar;
char buf[20];
int len = 0;
spin_lock_bh(&ar->data_lock);
len = scnprintf(buf, sizeof(buf) - len, "%d\n",
arsta->peer_ps_state);
spin_unlock_bh(&ar->data_lock);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static const struct file_operations fops_peer_ps_state = {
.open = simple_open,
.read = ath10k_dbg_sta_read_peer_ps_state,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static char *get_err_str(enum ath10k_pkt_rx_err i)
{
switch (i) {
case ATH10K_PKT_RX_ERR_FCS:
return "fcs_err";
case ATH10K_PKT_RX_ERR_TKIP:
return "tkip_err";
case ATH10K_PKT_RX_ERR_CRYPT:
return "crypt_err";
case ATH10K_PKT_RX_ERR_PEER_IDX_INVAL:
return "peer_idx_inval";
case ATH10K_PKT_RX_ERR_MAX:
return "unknown";
}
return "unknown";
}
static char *get_num_ampdu_subfrm_str(enum ath10k_ampdu_subfrm_num i)
{
switch (i) {
case ATH10K_AMPDU_SUBFRM_NUM_10:
return "up to 10";
case ATH10K_AMPDU_SUBFRM_NUM_20:
return "11-20";
case ATH10K_AMPDU_SUBFRM_NUM_30:
return "21-30";
case ATH10K_AMPDU_SUBFRM_NUM_40:
return "31-40";
case ATH10K_AMPDU_SUBFRM_NUM_50:
return "41-50";
case ATH10K_AMPDU_SUBFRM_NUM_60:
return "51-60";
case ATH10K_AMPDU_SUBFRM_NUM_MORE:
return ">60";
case ATH10K_AMPDU_SUBFRM_NUM_MAX:
return "0";
}
return "0";
}
static char *get_num_amsdu_subfrm_str(enum ath10k_amsdu_subfrm_num i)
{
switch (i) {
case ATH10K_AMSDU_SUBFRM_NUM_1:
return "1";
case ATH10K_AMSDU_SUBFRM_NUM_2:
return "2";
case ATH10K_AMSDU_SUBFRM_NUM_3:
return "3";
case ATH10K_AMSDU_SUBFRM_NUM_4:
return "4";
case ATH10K_AMSDU_SUBFRM_NUM_MORE:
return ">4";
case ATH10K_AMSDU_SUBFRM_NUM_MAX:
return "0";
}
return "0";
}
#define PRINT_TID_STATS(_field, _tabs) \
do { \
int k = 0; \
for (j = 0; j <= IEEE80211_NUM_TIDS; j++) { \
if (ar->sta_tid_stats_mask & BIT(j)) { \
len += scnprintf(buf + len, buf_len - len, \
"[%02d] %-10lu ", \
j, stats[j]._field); \
k++; \
if (k % 8 == 0) { \
len += scnprintf(buf + len, \
buf_len - len, "\n"); \
len += scnprintf(buf + len, \
buf_len - len, \
_tabs); \
} \
} \
} \
len += scnprintf(buf + len, buf_len - len, "\n"); \
} while (0)
static ssize_t ath10k_dbg_sta_read_tid_stats(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_sta *sta = file->private_data;
struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
struct ath10k *ar = arsta->arvif->ar;
struct ath10k_sta_tid_stats *stats = arsta->tid_stats;
size_t len = 0, buf_len = 1048 * IEEE80211_NUM_TIDS;
char *buf;
int i, j;
ssize_t ret;
buf = kzalloc(buf_len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
mutex_lock(&ar->conf_mutex);
spin_lock_bh(&ar->data_lock);
len += scnprintf(buf + len, buf_len - len,
"\n\t\tDriver Rx pkt stats per tid, ([tid] count)\n");
len += scnprintf(buf + len, buf_len - len,
"\t\t------------------------------------------\n");
len += scnprintf(buf + len, buf_len - len, "MSDUs from FW\t\t\t");
PRINT_TID_STATS(rx_pkt_from_fw, "\t\t\t\t");
len += scnprintf(buf + len, buf_len - len, "MSDUs unchained\t\t\t");
PRINT_TID_STATS(rx_pkt_unchained, "\t\t\t\t");
len += scnprintf(buf + len, buf_len - len,
"MSDUs locally dropped:chained\t");
PRINT_TID_STATS(rx_pkt_drop_chained, "\t\t\t\t");
len += scnprintf(buf + len, buf_len - len,
"MSDUs locally dropped:filtered\t");
PRINT_TID_STATS(rx_pkt_drop_filter, "\t\t\t\t");
len += scnprintf(buf + len, buf_len - len,
"MSDUs queued for mac80211\t");
PRINT_TID_STATS(rx_pkt_queued_for_mac, "\t\t\t\t");
for (i = 0; i < ATH10K_PKT_RX_ERR_MAX; i++) {
len += scnprintf(buf + len, buf_len - len,
"MSDUs with error:%s\t", get_err_str(i));
PRINT_TID_STATS(rx_pkt_err[i], "\t\t\t\t");
}
len += scnprintf(buf + len, buf_len - len, "\n");
for (i = 0; i < ATH10K_AMPDU_SUBFRM_NUM_MAX; i++) {
len += scnprintf(buf + len, buf_len - len,
"A-MPDU num subframes %s\t",
get_num_ampdu_subfrm_str(i));
PRINT_TID_STATS(rx_pkt_ampdu[i], "\t\t\t\t");
}
len += scnprintf(buf + len, buf_len - len, "\n");
for (i = 0; i < ATH10K_AMSDU_SUBFRM_NUM_MAX; i++) {
len += scnprintf(buf + len, buf_len - len,
"A-MSDU num subframes %s\t\t",
get_num_amsdu_subfrm_str(i));
PRINT_TID_STATS(rx_pkt_amsdu[i], "\t\t\t\t");
}
spin_unlock_bh(&ar->data_lock);
ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
kfree(buf);
mutex_unlock(&ar->conf_mutex);
return ret;
}
static const struct file_operations fops_tid_stats_dump = {
.open = simple_open,
.read = ath10k_dbg_sta_read_tid_stats,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static ssize_t ath10k_dbg_sta_dump_tx_stats(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_sta *sta = file->private_data;
struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
struct ath10k *ar = arsta->arvif->ar;
struct ath10k_htt_data_stats *stats;
const char *str_name[ATH10K_STATS_TYPE_MAX] = {"succ", "fail",
"retry", "ampdu"};
const char *str[ATH10K_COUNTER_TYPE_MAX] = {"bytes", "packets"};
int len = 0, i, j, k, retval = 0;
const int size = 16 * 4096;
char *buf;
buf = kzalloc(size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
mutex_lock(&ar->conf_mutex);
if (!arsta->tx_stats) {
ath10k_warn(ar, "failed to get tx stats");
mutex_unlock(&ar->conf_mutex);
kfree(buf);
return 0;
}
spin_lock_bh(&ar->data_lock);
for (k = 0; k < ATH10K_STATS_TYPE_MAX; k++) {
for (j = 0; j < ATH10K_COUNTER_TYPE_MAX; j++) {
stats = &arsta->tx_stats->stats[k];
len += scnprintf(buf + len, size - len, "%s_%s\n",
str_name[k],
str[j]);
len += scnprintf(buf + len, size - len,
" VHT MCS %s\n",
str[j]);
for (i = 0; i < ATH10K_VHT_MCS_NUM; i++)
len += scnprintf(buf + len, size - len,
" %llu ",
stats->vht[j][i]);
len += scnprintf(buf + len, size - len, "\n");
len += scnprintf(buf + len, size - len, " HT MCS %s\n",
str[j]);
for (i = 0; i < ATH10K_HT_MCS_NUM; i++)
len += scnprintf(buf + len, size - len,
" %llu ", stats->ht[j][i]);
len += scnprintf(buf + len, size - len, "\n");
len += scnprintf(buf + len, size - len,
" BW %s (20,5,10,40,80,160 MHz)\n", str[j]);
len += scnprintf(buf + len, size - len,
" %llu %llu %llu %llu %llu %llu\n",
stats->bw[j][0], stats->bw[j][1],
stats->bw[j][2], stats->bw[j][3],
stats->bw[j][4], stats->bw[j][5]);
len += scnprintf(buf + len, size - len,
" NSS %s (1x1,2x2,3x3,4x4)\n", str[j]);
len += scnprintf(buf + len, size - len,
" %llu %llu %llu %llu\n",
stats->nss[j][0], stats->nss[j][1],
stats->nss[j][2], stats->nss[j][3]);
len += scnprintf(buf + len, size - len,
" GI %s (LGI,SGI)\n",
str[j]);
len += scnprintf(buf + len, size - len, " %llu %llu\n",
stats->gi[j][0], stats->gi[j][1]);
len += scnprintf(buf + len, size - len,
" legacy rate %s (1,2 ... Mbps)\n ",
str[j]);
for (i = 0; i < ATH10K_LEGACY_NUM; i++)
len += scnprintf(buf + len, size - len, "%llu ",
stats->legacy[j][i]);
len += scnprintf(buf + len, size - len, "\n");
len += scnprintf(buf + len, size - len,
" Rate table %s (1,2 ... Mbps)\n ",
str[j]);
for (i = 0; i < ATH10K_RATE_TABLE_NUM; i++) {
len += scnprintf(buf + len, size - len, "%llu ",
stats->rate_table[j][i]);
if (!((i + 1) % 8))
len +=
scnprintf(buf + len, size - len, "\n ");
}
}
}
len += scnprintf(buf + len, size - len,
"\nTX duration\n %llu usecs\n",
arsta->tx_stats->tx_duration);
len += scnprintf(buf + len, size - len,
"BA fails\n %llu\n", arsta->tx_stats->ba_fails);
len += scnprintf(buf + len, size - len,
"ack fails\n %llu\n", arsta->tx_stats->ack_fails);
spin_unlock_bh(&ar->data_lock);
if (len > size)
len = size;
retval = simple_read_from_buffer(user_buf, count, ppos, buf, len);
kfree(buf);
mutex_unlock(&ar->conf_mutex);
return retval;
}
static const struct file_operations fops_tx_stats = {
.read = ath10k_dbg_sta_dump_tx_stats,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct dentry *dir)
{
struct ath10k *ar = hw->priv;
debugfs_create_file("aggr_mode", 0644, dir, sta, &fops_aggr_mode);
debugfs_create_file("addba", 0200, dir, sta, &fops_addba);
debugfs_create_file("addba_resp", 0200, dir, sta, &fops_addba_resp);
debugfs_create_file("delba", 0200, dir, sta, &fops_delba);
debugfs_create_file("peer_debug_trigger", 0600, dir, sta,
&fops_peer_debug_trigger);
debugfs_create_file("dump_tid_stats", 0400, dir, sta,
&fops_tid_stats_dump);
if (ath10k_peer_stats_enabled(ar) &&
ath10k_debug_is_extd_tx_stats_enabled(ar))
debugfs_create_file("tx_stats", 0400, dir, sta,
&fops_tx_stats);
debugfs_create_file("peer_ps_state", 0400, dir, sta,
&fops_peer_ps_state);
}
|
linux-master
|
drivers/net/wireless/ath/ath10k/debugfs_sta.c
|
// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2016 Qualcomm Atheros, Inc.
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*/
#include "core.h"
#include "txrx.h"
#include "htt.h"
#include "mac.h"
#include "debug.h"
static void ath10k_report_offchan_tx(struct ath10k *ar, struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
if (likely(!(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)))
return;
if (ath10k_mac_tx_frm_has_freq(ar))
return;
/* If the original wait_for_completion() timed out before
* {data,mgmt}_tx_completed() was called then we could complete
* offchan_tx_completed for a different skb. Prevent this by using
* offchan_tx_skb.
*/
spin_lock_bh(&ar->data_lock);
if (ar->offchan_tx_skb != skb) {
ath10k_warn(ar, "completed old offchannel frame\n");
goto out;
}
complete(&ar->offchan_tx_completed);
ar->offchan_tx_skb = NULL; /* just for sanity */
ath10k_dbg(ar, ATH10K_DBG_HTT, "completed offchannel skb %pK\n", skb);
out:
spin_unlock_bh(&ar->data_lock);
}
int ath10k_txrx_tx_unref(struct ath10k_htt *htt,
const struct htt_tx_done *tx_done)
{
struct ieee80211_tx_status status;
struct ath10k *ar = htt->ar;
struct device *dev = ar->dev;
struct ieee80211_tx_info *info;
struct ieee80211_txq *txq;
struct ath10k_skb_cb *skb_cb;
struct ath10k_txq *artxq;
struct sk_buff *msdu;
u8 flags;
ath10k_dbg(ar, ATH10K_DBG_HTT,
"htt tx completion msdu_id %u status %d\n",
tx_done->msdu_id, tx_done->status);
if (tx_done->msdu_id >= htt->max_num_pending_tx) {
ath10k_warn(ar, "warning: msdu_id %d too big, ignoring\n",
tx_done->msdu_id);
return -EINVAL;
}
spin_lock_bh(&htt->tx_lock);
msdu = idr_find(&htt->pending_tx, tx_done->msdu_id);
if (!msdu) {
ath10k_warn(ar, "received tx completion for invalid msdu_id: %d\n",
tx_done->msdu_id);
spin_unlock_bh(&htt->tx_lock);
return -ENOENT;
}
skb_cb = ATH10K_SKB_CB(msdu);
txq = skb_cb->txq;
if (txq) {
artxq = (void *)txq->drv_priv;
artxq->num_fw_queued--;
}
flags = skb_cb->flags;
ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
ath10k_htt_tx_dec_pending(htt);
spin_unlock_bh(&htt->tx_lock);
rcu_read_lock();
if (txq && txq->sta && skb_cb->airtime_est)
ieee80211_sta_register_airtime(txq->sta, txq->tid,
skb_cb->airtime_est, 0);
rcu_read_unlock();
if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL)
dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
ath10k_report_offchan_tx(htt->ar, msdu);
info = IEEE80211_SKB_CB(msdu);
memset(&info->status, 0, sizeof(info->status));
info->status.rates[0].idx = -1;
trace_ath10k_txrx_tx_unref(ar, tx_done->msdu_id);
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) &&
!(flags & ATH10K_SKB_F_NOACK_TID))
info->flags |= IEEE80211_TX_STAT_ACK;
if (tx_done->status == HTT_TX_COMPL_STATE_NOACK)
info->flags &= ~IEEE80211_TX_STAT_ACK;
if ((tx_done->status == HTT_TX_COMPL_STATE_ACK) &&
((info->flags & IEEE80211_TX_CTL_NO_ACK) ||
(flags & ATH10K_SKB_F_NOACK_TID)))
info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
if (tx_done->status == HTT_TX_COMPL_STATE_DISCARD) {
if ((info->flags & IEEE80211_TX_CTL_NO_ACK) ||
(flags & ATH10K_SKB_F_NOACK_TID))
info->flags &= ~IEEE80211_TX_STAT_NOACK_TRANSMITTED;
else
info->flags &= ~IEEE80211_TX_STAT_ACK;
}
if (tx_done->status == HTT_TX_COMPL_STATE_ACK &&
tx_done->ack_rssi != ATH10K_INVALID_RSSI) {
info->status.ack_signal = ATH10K_DEFAULT_NOISE_FLOOR +
tx_done->ack_rssi;
info->status.flags |= IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
}
memset(&status, 0, sizeof(status));
status.skb = msdu;
status.info = info;
rcu_read_lock();
if (txq)
status.sta = txq->sta;
ieee80211_tx_status_ext(htt->ar->hw, &status);
rcu_read_unlock();
/* we do not own the msdu anymore */
return 0;
}
struct ath10k_peer *ath10k_peer_find(struct ath10k *ar, int vdev_id,
const u8 *addr)
{
struct ath10k_peer *peer;
lockdep_assert_held(&ar->data_lock);
list_for_each_entry(peer, &ar->peers, list) {
if (peer->vdev_id != vdev_id)
continue;
if (!ether_addr_equal(peer->addr, addr))
continue;
return peer;
}
return NULL;
}
struct ath10k_peer *ath10k_peer_find_by_id(struct ath10k *ar, int peer_id)
{
struct ath10k_peer *peer;
if (peer_id >= BITS_PER_TYPE(peer->peer_ids))
return NULL;
lockdep_assert_held(&ar->data_lock);
list_for_each_entry(peer, &ar->peers, list)
if (test_bit(peer_id, peer->peer_ids))
return peer;
return NULL;
}
static int ath10k_wait_for_peer_common(struct ath10k *ar, int vdev_id,
const u8 *addr, bool expect_mapped)
{
long time_left;
time_left = wait_event_timeout(ar->peer_mapping_wq, ({
bool mapped;
spin_lock_bh(&ar->data_lock);
mapped = !!ath10k_peer_find(ar, vdev_id, addr);
spin_unlock_bh(&ar->data_lock);
(mapped == expect_mapped ||
test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags));
}), 3 * HZ);
if (time_left == 0)
return -ETIMEDOUT;
return 0;
}
int ath10k_wait_for_peer_created(struct ath10k *ar, int vdev_id, const u8 *addr)
{
return ath10k_wait_for_peer_common(ar, vdev_id, addr, true);
}
int ath10k_wait_for_peer_deleted(struct ath10k *ar, int vdev_id, const u8 *addr)
{
return ath10k_wait_for_peer_common(ar, vdev_id, addr, false);
}
void ath10k_peer_map_event(struct ath10k_htt *htt,
struct htt_peer_map_event *ev)
{
struct ath10k *ar = htt->ar;
struct ath10k_peer *peer;
if (ev->peer_id >= ATH10K_MAX_NUM_PEER_IDS) {
ath10k_warn(ar,
"received htt peer map event with idx out of bounds: %u\n",
ev->peer_id);
return;
}
spin_lock_bh(&ar->data_lock);
peer = ath10k_peer_find(ar, ev->vdev_id, ev->addr);
if (!peer) {
peer = kzalloc(sizeof(*peer), GFP_ATOMIC);
if (!peer)
goto exit;
peer->vdev_id = ev->vdev_id;
ether_addr_copy(peer->addr, ev->addr);
list_add(&peer->list, &ar->peers);
wake_up(&ar->peer_mapping_wq);
}
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt peer map vdev %d peer %pM id %d\n",
ev->vdev_id, ev->addr, ev->peer_id);
WARN_ON(ar->peer_map[ev->peer_id] && (ar->peer_map[ev->peer_id] != peer));
ar->peer_map[ev->peer_id] = peer;
set_bit(ev->peer_id, peer->peer_ids);
exit:
spin_unlock_bh(&ar->data_lock);
}
void ath10k_peer_unmap_event(struct ath10k_htt *htt,
struct htt_peer_unmap_event *ev)
{
struct ath10k *ar = htt->ar;
struct ath10k_peer *peer;
if (ev->peer_id >= ATH10K_MAX_NUM_PEER_IDS) {
ath10k_warn(ar,
"received htt peer unmap event with idx out of bounds: %u\n",
ev->peer_id);
return;
}
spin_lock_bh(&ar->data_lock);
peer = ath10k_peer_find_by_id(ar, ev->peer_id);
if (!peer) {
ath10k_warn(ar, "peer-unmap-event: unknown peer id %d\n",
ev->peer_id);
goto exit;
}
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt peer unmap vdev %d peer %pM id %d\n",
peer->vdev_id, peer->addr, ev->peer_id);
ar->peer_map[ev->peer_id] = NULL;
clear_bit(ev->peer_id, peer->peer_ids);
if (bitmap_empty(peer->peer_ids, ATH10K_MAX_NUM_PEER_IDS)) {
list_del(&peer->list);
kfree(peer);
wake_up(&ar->peer_mapping_wq);
}
exit:
spin_unlock_bh(&ar->data_lock);
}
|
linux-master
|
drivers/net/wireless/ath/ath10k/txrx.c
|
// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
*/
#include "core.h"
#include "hif.h"
#include "debug.h"
/********/
/* Send */
/********/
static void ath10k_htc_control_tx_complete(struct ath10k *ar,
struct sk_buff *skb)
{
kfree_skb(skb);
}
static struct sk_buff *ath10k_htc_build_tx_ctrl_skb(void *ar)
{
struct sk_buff *skb;
struct ath10k_skb_cb *skb_cb;
skb = dev_alloc_skb(ATH10K_HTC_CONTROL_BUFFER_SIZE);
if (!skb)
return NULL;
skb_reserve(skb, 20); /* FIXME: why 20 bytes? */
WARN_ONCE((unsigned long)skb->data & 3, "unaligned skb");
skb_cb = ATH10K_SKB_CB(skb);
memset(skb_cb, 0, sizeof(*skb_cb));
ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: skb %pK\n", __func__, skb);
return skb;
}
static inline void ath10k_htc_restore_tx_skb(struct ath10k_htc *htc,
struct sk_buff *skb)
{
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
if (htc->ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL)
dma_unmap_single(htc->ar->dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
skb_pull(skb, sizeof(struct ath10k_htc_hdr));
}
void ath10k_htc_notify_tx_completion(struct ath10k_htc_ep *ep,
struct sk_buff *skb)
{
struct ath10k *ar = ep->htc->ar;
struct ath10k_htc_hdr *hdr;
ath10k_dbg(ar, ATH10K_DBG_HTC, "%s: ep %d skb %pK\n", __func__,
ep->eid, skb);
/* A corner case where the copy completion is reaching to host but still
* copy engine is processing it due to which host unmaps corresponding
* memory and causes SMMU fault, hence as workaround adding delay
* the unmapping memory to avoid SMMU faults.
*/
if (ar->hw_params.delay_unmap_buffer &&
ep->ul_pipe_id == 3)
mdelay(2);
hdr = (struct ath10k_htc_hdr *)skb->data;
ath10k_htc_restore_tx_skb(ep->htc, skb);
if (!ep->ep_ops.ep_tx_complete) {
ath10k_warn(ar, "no tx handler for eid %d\n", ep->eid);
dev_kfree_skb_any(skb);
return;
}
if (hdr->flags & ATH10K_HTC_FLAG_SEND_BUNDLE) {
dev_kfree_skb_any(skb);
return;
}
ep->ep_ops.ep_tx_complete(ep->htc->ar, skb);
}
EXPORT_SYMBOL(ath10k_htc_notify_tx_completion);
static void ath10k_htc_prepare_tx_skb(struct ath10k_htc_ep *ep,
struct sk_buff *skb)
{
struct ath10k_htc_hdr *hdr;
hdr = (struct ath10k_htc_hdr *)skb->data;
memset(hdr, 0, sizeof(struct ath10k_htc_hdr));
hdr->eid = ep->eid;
hdr->len = __cpu_to_le16(skb->len - sizeof(*hdr));
hdr->flags = 0;
if (ep->tx_credit_flow_enabled && !ep->bundle_tx)
hdr->flags |= ATH10K_HTC_FLAG_NEED_CREDIT_UPDATE;
spin_lock_bh(&ep->htc->tx_lock);
hdr->seq_no = ep->seq_no++;
spin_unlock_bh(&ep->htc->tx_lock);
}
static int ath10k_htc_consume_credit(struct ath10k_htc_ep *ep,
unsigned int len,
bool consume)
{
struct ath10k_htc *htc = ep->htc;
struct ath10k *ar = htc->ar;
enum ath10k_htc_ep_id eid = ep->eid;
int credits, ret = 0;
if (!ep->tx_credit_flow_enabled)
return 0;
credits = DIV_ROUND_UP(len, ep->tx_credit_size);
spin_lock_bh(&htc->tx_lock);
if (ep->tx_credits < credits) {
ath10k_dbg(ar, ATH10K_DBG_HTC,
"htc insufficient credits ep %d required %d available %d consume %d\n",
eid, credits, ep->tx_credits, consume);
ret = -EAGAIN;
goto unlock;
}
if (consume) {
ep->tx_credits -= credits;
ath10k_dbg(ar, ATH10K_DBG_HTC,
"htc ep %d consumed %d credits total %d\n",
eid, credits, ep->tx_credits);
}
unlock:
spin_unlock_bh(&htc->tx_lock);
return ret;
}
static void ath10k_htc_release_credit(struct ath10k_htc_ep *ep, unsigned int len)
{
struct ath10k_htc *htc = ep->htc;
struct ath10k *ar = htc->ar;
enum ath10k_htc_ep_id eid = ep->eid;
int credits;
if (!ep->tx_credit_flow_enabled)
return;
credits = DIV_ROUND_UP(len, ep->tx_credit_size);
spin_lock_bh(&htc->tx_lock);
ep->tx_credits += credits;
ath10k_dbg(ar, ATH10K_DBG_HTC,
"htc ep %d reverted %d credits back total %d\n",
eid, credits, ep->tx_credits);
spin_unlock_bh(&htc->tx_lock);
if (ep->ep_ops.ep_tx_credits)
ep->ep_ops.ep_tx_credits(htc->ar);
}
int ath10k_htc_send(struct ath10k_htc *htc,
enum ath10k_htc_ep_id eid,
struct sk_buff *skb)
{
struct ath10k *ar = htc->ar;
struct ath10k_htc_ep *ep = &htc->endpoint[eid];
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
struct ath10k_hif_sg_item sg_item;
struct device *dev = htc->ar->dev;
int ret;
unsigned int skb_len;
if (htc->ar->state == ATH10K_STATE_WEDGED)
return -ECOMM;
if (eid >= ATH10K_HTC_EP_COUNT) {
ath10k_warn(ar, "Invalid endpoint id: %d\n", eid);
return -ENOENT;
}
skb_push(skb, sizeof(struct ath10k_htc_hdr));
skb_len = skb->len;
ret = ath10k_htc_consume_credit(ep, skb_len, true);
if (ret)
goto err_pull;
ath10k_htc_prepare_tx_skb(ep, skb);
skb_cb->eid = eid;
if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL) {
skb_cb->paddr = dma_map_single(dev, skb->data, skb->len,
DMA_TO_DEVICE);
ret = dma_mapping_error(dev, skb_cb->paddr);
if (ret) {
ret = -EIO;
goto err_credits;
}
}
sg_item.transfer_id = ep->eid;
sg_item.transfer_context = skb;
sg_item.vaddr = skb->data;
sg_item.paddr = skb_cb->paddr;
sg_item.len = skb->len;
ret = ath10k_hif_tx_sg(htc->ar, ep->ul_pipe_id, &sg_item, 1);
if (ret)
goto err_unmap;
return 0;
err_unmap:
if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL)
dma_unmap_single(dev, skb_cb->paddr, skb->len, DMA_TO_DEVICE);
err_credits:
ath10k_htc_release_credit(ep, skb_len);
err_pull:
skb_pull(skb, sizeof(struct ath10k_htc_hdr));
return ret;
}
void ath10k_htc_tx_completion_handler(struct ath10k *ar, struct sk_buff *skb)
{
struct ath10k_htc *htc = &ar->htc;
struct ath10k_skb_cb *skb_cb;
struct ath10k_htc_ep *ep;
if (WARN_ON_ONCE(!skb))
return;
skb_cb = ATH10K_SKB_CB(skb);
ep = &htc->endpoint[skb_cb->eid];
ath10k_htc_notify_tx_completion(ep, skb);
/* the skb now belongs to the completion handler */
}
EXPORT_SYMBOL(ath10k_htc_tx_completion_handler);
/***********/
/* Receive */
/***********/
static void
ath10k_htc_process_credit_report(struct ath10k_htc *htc,
const struct ath10k_htc_credit_report *report,
int len,
enum ath10k_htc_ep_id eid)
{
struct ath10k *ar = htc->ar;
struct ath10k_htc_ep *ep;
int i, n_reports;
if (len % sizeof(*report))
ath10k_warn(ar, "Uneven credit report len %d", len);
n_reports = len / sizeof(*report);
spin_lock_bh(&htc->tx_lock);
for (i = 0; i < n_reports; i++, report++) {
if (report->eid >= ATH10K_HTC_EP_COUNT)
break;
ep = &htc->endpoint[report->eid];
ep->tx_credits += report->credits;
ath10k_dbg(ar, ATH10K_DBG_HTC, "htc ep %d got %d credits (total %d)\n",
report->eid, report->credits, ep->tx_credits);
if (ep->ep_ops.ep_tx_credits) {
spin_unlock_bh(&htc->tx_lock);
ep->ep_ops.ep_tx_credits(htc->ar);
spin_lock_bh(&htc->tx_lock);
}
}
spin_unlock_bh(&htc->tx_lock);
}
static int
ath10k_htc_process_lookahead(struct ath10k_htc *htc,
const struct ath10k_htc_lookahead_report *report,
int len,
enum ath10k_htc_ep_id eid,
void *next_lookaheads,
int *next_lookaheads_len)
{
struct ath10k *ar = htc->ar;
/* Invalid lookahead flags are actually transmitted by
* the target in the HTC control message.
* Since this will happen at every boot we silently ignore
* the lookahead in this case
*/
if (report->pre_valid != ((~report->post_valid) & 0xFF))
return 0;
if (next_lookaheads && next_lookaheads_len) {
ath10k_dbg(ar, ATH10K_DBG_HTC,
"htc rx lookahead found pre_valid 0x%x post_valid 0x%x\n",
report->pre_valid, report->post_valid);
/* look ahead bytes are valid, copy them over */
memcpy((u8 *)next_lookaheads, report->lookahead, 4);
*next_lookaheads_len = 1;
}
return 0;
}
static int
ath10k_htc_process_lookahead_bundle(struct ath10k_htc *htc,
const struct ath10k_htc_lookahead_bundle *report,
int len,
enum ath10k_htc_ep_id eid,
void *next_lookaheads,
int *next_lookaheads_len)
{
struct ath10k *ar = htc->ar;
int bundle_cnt = len / sizeof(*report);
if (!bundle_cnt || (bundle_cnt > htc->max_msgs_per_htc_bundle)) {
ath10k_warn(ar, "Invalid lookahead bundle count: %d\n",
bundle_cnt);
return -EINVAL;
}
if (next_lookaheads && next_lookaheads_len) {
int i;
for (i = 0; i < bundle_cnt; i++) {
memcpy(((u8 *)next_lookaheads) + 4 * i,
report->lookahead, 4);
report++;
}
*next_lookaheads_len = bundle_cnt;
}
return 0;
}
int ath10k_htc_process_trailer(struct ath10k_htc *htc,
u8 *buffer,
int length,
enum ath10k_htc_ep_id src_eid,
void *next_lookaheads,
int *next_lookaheads_len)
{
struct ath10k_htc_lookahead_bundle *bundle;
struct ath10k *ar = htc->ar;
int status = 0;
struct ath10k_htc_record *record;
u8 *orig_buffer;
int orig_length;
size_t len;
orig_buffer = buffer;
orig_length = length;
while (length > 0) {
record = (struct ath10k_htc_record *)buffer;
if (length < sizeof(record->hdr)) {
status = -EINVAL;
break;
}
if (record->hdr.len > length) {
/* no room left in buffer for record */
ath10k_warn(ar, "Invalid record length: %d\n",
record->hdr.len);
status = -EINVAL;
break;
}
switch (record->hdr.id) {
case ATH10K_HTC_RECORD_CREDITS:
len = sizeof(struct ath10k_htc_credit_report);
if (record->hdr.len < len) {
ath10k_warn(ar, "Credit report too long\n");
status = -EINVAL;
break;
}
ath10k_htc_process_credit_report(htc,
record->credit_report,
record->hdr.len,
src_eid);
break;
case ATH10K_HTC_RECORD_LOOKAHEAD:
len = sizeof(struct ath10k_htc_lookahead_report);
if (record->hdr.len < len) {
ath10k_warn(ar, "Lookahead report too long\n");
status = -EINVAL;
break;
}
status = ath10k_htc_process_lookahead(htc,
record->lookahead_report,
record->hdr.len,
src_eid,
next_lookaheads,
next_lookaheads_len);
break;
case ATH10K_HTC_RECORD_LOOKAHEAD_BUNDLE:
bundle = record->lookahead_bundle;
status = ath10k_htc_process_lookahead_bundle(htc,
bundle,
record->hdr.len,
src_eid,
next_lookaheads,
next_lookaheads_len);
break;
default:
ath10k_warn(ar, "Unhandled record: id:%d length:%d\n",
record->hdr.id, record->hdr.len);
break;
}
if (status)
break;
/* multiple records may be present in a trailer */
buffer += sizeof(record->hdr) + record->hdr.len;
length -= sizeof(record->hdr) + record->hdr.len;
}
if (status)
ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc rx bad trailer", "",
orig_buffer, orig_length);
return status;
}
EXPORT_SYMBOL(ath10k_htc_process_trailer);
void ath10k_htc_rx_completion_handler(struct ath10k *ar, struct sk_buff *skb)
{
int status = 0;
struct ath10k_htc *htc = &ar->htc;
struct ath10k_htc_hdr *hdr;
struct ath10k_htc_ep *ep;
u16 payload_len;
u32 trailer_len = 0;
size_t min_len;
u8 eid;
bool trailer_present;
hdr = (struct ath10k_htc_hdr *)skb->data;
skb_pull(skb, sizeof(*hdr));
eid = hdr->eid;
if (eid >= ATH10K_HTC_EP_COUNT) {
ath10k_warn(ar, "HTC Rx: invalid eid %d\n", eid);
ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad header", "",
hdr, sizeof(*hdr));
goto out;
}
ep = &htc->endpoint[eid];
if (ep->service_id == ATH10K_HTC_SVC_ID_UNUSED) {
ath10k_warn(ar, "htc rx endpoint %d is not connected\n", eid);
goto out;
}
payload_len = __le16_to_cpu(hdr->len);
if (payload_len + sizeof(*hdr) > ATH10K_HTC_MAX_LEN) {
ath10k_warn(ar, "HTC rx frame too long, len: %zu\n",
payload_len + sizeof(*hdr));
ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad rx pkt len", "",
hdr, sizeof(*hdr));
goto out;
}
if (skb->len < payload_len) {
ath10k_dbg(ar, ATH10K_DBG_HTC,
"HTC Rx: insufficient length, got %d, expected %d\n",
skb->len, payload_len);
ath10k_dbg_dump(ar, ATH10K_DBG_HTC, "htc bad rx pkt len",
"", hdr, sizeof(*hdr));
goto out;
}
/* get flags to check for trailer */
trailer_present = hdr->flags & ATH10K_HTC_FLAG_TRAILER_PRESENT;
if (trailer_present) {
u8 *trailer;
trailer_len = hdr->trailer_len;
min_len = sizeof(struct ath10k_ath10k_htc_record_hdr);
if ((trailer_len < min_len) ||
(trailer_len > payload_len)) {
ath10k_warn(ar, "Invalid trailer length: %d\n",
trailer_len);
goto out;
}
trailer = (u8 *)hdr;
trailer += sizeof(*hdr);
trailer += payload_len;
trailer -= trailer_len;
status = ath10k_htc_process_trailer(htc, trailer,
trailer_len, hdr->eid,
NULL, NULL);
if (status)
goto out;
skb_trim(skb, skb->len - trailer_len);
}
if (((int)payload_len - (int)trailer_len) <= 0)
/* zero length packet with trailer data, just drop these */
goto out;
ath10k_dbg(ar, ATH10K_DBG_HTC, "htc rx completion ep %d skb %pK\n",
eid, skb);
ep->ep_ops.ep_rx_complete(ar, skb);
/* skb is now owned by the rx completion handler */
skb = NULL;
out:
kfree_skb(skb);
}
EXPORT_SYMBOL(ath10k_htc_rx_completion_handler);
static void ath10k_htc_control_rx_complete(struct ath10k *ar,
struct sk_buff *skb)
{
struct ath10k_htc *htc = &ar->htc;
struct ath10k_htc_msg *msg = (struct ath10k_htc_msg *)skb->data;
switch (__le16_to_cpu(msg->hdr.message_id)) {
case ATH10K_HTC_MSG_READY_ID:
case ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID:
/* handle HTC control message */
if (completion_done(&htc->ctl_resp)) {
/* this is a fatal error, target should not be
* sending unsolicited messages on the ep 0
*/
ath10k_warn(ar, "HTC rx ctrl still processing\n");
complete(&htc->ctl_resp);
goto out;
}
htc->control_resp_len =
min_t(int, skb->len,
ATH10K_HTC_MAX_CTRL_MSG_LEN);
memcpy(htc->control_resp_buffer, skb->data,
htc->control_resp_len);
complete(&htc->ctl_resp);
break;
case ATH10K_HTC_MSG_SEND_SUSPEND_COMPLETE:
htc->htc_ops.target_send_suspend_complete(ar);
break;
default:
ath10k_warn(ar, "ignoring unsolicited htc ep0 event\n");
break;
}
out:
kfree_skb(skb);
}
/***************/
/* Init/Deinit */
/***************/
static const char *htc_service_name(enum ath10k_htc_svc_id id)
{
switch (id) {
case ATH10K_HTC_SVC_ID_RESERVED:
return "Reserved";
case ATH10K_HTC_SVC_ID_RSVD_CTRL:
return "Control";
case ATH10K_HTC_SVC_ID_WMI_CONTROL:
return "WMI";
case ATH10K_HTC_SVC_ID_WMI_DATA_BE:
return "DATA BE";
case ATH10K_HTC_SVC_ID_WMI_DATA_BK:
return "DATA BK";
case ATH10K_HTC_SVC_ID_WMI_DATA_VI:
return "DATA VI";
case ATH10K_HTC_SVC_ID_WMI_DATA_VO:
return "DATA VO";
case ATH10K_HTC_SVC_ID_NMI_CONTROL:
return "NMI Control";
case ATH10K_HTC_SVC_ID_NMI_DATA:
return "NMI Data";
case ATH10K_HTC_SVC_ID_HTT_DATA_MSG:
return "HTT Data";
case ATH10K_HTC_SVC_ID_HTT_DATA2_MSG:
return "HTT Data";
case ATH10K_HTC_SVC_ID_HTT_DATA3_MSG:
return "HTT Data";
case ATH10K_HTC_SVC_ID_TEST_RAW_STREAMS:
return "RAW";
case ATH10K_HTC_SVC_ID_HTT_LOG_MSG:
return "PKTLOG";
}
return "Unknown";
}
static void ath10k_htc_reset_endpoint_states(struct ath10k_htc *htc)
{
struct ath10k_htc_ep *ep;
int i;
for (i = ATH10K_HTC_EP_0; i < ATH10K_HTC_EP_COUNT; i++) {
ep = &htc->endpoint[i];
ep->service_id = ATH10K_HTC_SVC_ID_UNUSED;
ep->max_ep_message_len = 0;
ep->max_tx_queue_depth = 0;
ep->eid = i;
ep->htc = htc;
ep->tx_credit_flow_enabled = true;
}
}
static u8 ath10k_htc_get_credit_allocation(struct ath10k_htc *htc,
u16 service_id)
{
u8 allocation = 0;
/* The WMI control service is the only service with flow control.
* Let it have all transmit credits.
*/
if (service_id == ATH10K_HTC_SVC_ID_WMI_CONTROL)
allocation = htc->total_transmit_credits;
return allocation;
}
static int ath10k_htc_send_bundle(struct ath10k_htc_ep *ep,
struct sk_buff *bundle_skb,
struct sk_buff_head *tx_save_head)
{
struct ath10k_hif_sg_item sg_item;
struct ath10k_htc *htc = ep->htc;
struct ath10k *ar = htc->ar;
struct sk_buff *skb;
int ret, cn = 0;
unsigned int skb_len;
ath10k_dbg(ar, ATH10K_DBG_HTC, "bundle skb len %d\n", bundle_skb->len);
skb_len = bundle_skb->len;
ret = ath10k_htc_consume_credit(ep, skb_len, true);
if (!ret) {
sg_item.transfer_id = ep->eid;
sg_item.transfer_context = bundle_skb;
sg_item.vaddr = bundle_skb->data;
sg_item.len = bundle_skb->len;
ret = ath10k_hif_tx_sg(htc->ar, ep->ul_pipe_id, &sg_item, 1);
if (ret)
ath10k_htc_release_credit(ep, skb_len);
}
if (ret)
dev_kfree_skb_any(bundle_skb);
for (cn = 0; (skb = skb_dequeue_tail(tx_save_head)); cn++) {
if (ret) {
skb_pull(skb, sizeof(struct ath10k_htc_hdr));
skb_queue_head(&ep->tx_req_head, skb);
} else {
skb_queue_tail(&ep->tx_complete_head, skb);
}
}
if (!ret)
queue_work(ar->workqueue_tx_complete, &ar->tx_complete_work);
ath10k_dbg(ar, ATH10K_DBG_HTC,
"bundle tx status %d eid %d req count %d count %d len %d\n",
ret, ep->eid, skb_queue_len(&ep->tx_req_head), cn, skb_len);
return ret;
}
static void ath10k_htc_send_one_skb(struct ath10k_htc_ep *ep, struct sk_buff *skb)
{
struct ath10k_htc *htc = ep->htc;
struct ath10k *ar = htc->ar;
int ret;
ret = ath10k_htc_send(htc, ep->eid, skb);
if (ret)
skb_queue_head(&ep->tx_req_head, skb);
ath10k_dbg(ar, ATH10K_DBG_HTC, "tx one status %d eid %d len %d pending count %d\n",
ret, ep->eid, skb->len, skb_queue_len(&ep->tx_req_head));
}
static int ath10k_htc_send_bundle_skbs(struct ath10k_htc_ep *ep)
{
struct ath10k_htc *htc = ep->htc;
struct sk_buff *bundle_skb, *skb;
struct sk_buff_head tx_save_head;
struct ath10k_htc_hdr *hdr;
u8 *bundle_buf;
int ret = 0, credit_pad, credit_remainder, trans_len, bundles_left = 0;
if (htc->ar->state == ATH10K_STATE_WEDGED)
return -ECOMM;
if (ep->tx_credit_flow_enabled &&
ep->tx_credits < ATH10K_MIN_CREDIT_PER_HTC_TX_BUNDLE)
return 0;
bundles_left = ATH10K_MAX_MSG_PER_HTC_TX_BUNDLE * ep->tx_credit_size;
bundle_skb = dev_alloc_skb(bundles_left);
if (!bundle_skb)
return -ENOMEM;
bundle_buf = bundle_skb->data;
skb_queue_head_init(&tx_save_head);
while (true) {
skb = skb_dequeue(&ep->tx_req_head);
if (!skb)
break;
credit_pad = 0;
trans_len = skb->len + sizeof(*hdr);
credit_remainder = trans_len % ep->tx_credit_size;
if (credit_remainder != 0) {
credit_pad = ep->tx_credit_size - credit_remainder;
trans_len += credit_pad;
}
ret = ath10k_htc_consume_credit(ep,
bundle_buf + trans_len - bundle_skb->data,
false);
if (ret) {
skb_queue_head(&ep->tx_req_head, skb);
break;
}
if (bundles_left < trans_len) {
bundle_skb->len = bundle_buf - bundle_skb->data;
ret = ath10k_htc_send_bundle(ep, bundle_skb, &tx_save_head);
if (ret) {
skb_queue_head(&ep->tx_req_head, skb);
return ret;
}
if (skb_queue_len(&ep->tx_req_head) == 0) {
ath10k_htc_send_one_skb(ep, skb);
return ret;
}
if (ep->tx_credit_flow_enabled &&
ep->tx_credits < ATH10K_MIN_CREDIT_PER_HTC_TX_BUNDLE) {
skb_queue_head(&ep->tx_req_head, skb);
return 0;
}
bundles_left =
ATH10K_MAX_MSG_PER_HTC_TX_BUNDLE * ep->tx_credit_size;
bundle_skb = dev_alloc_skb(bundles_left);
if (!bundle_skb) {
skb_queue_head(&ep->tx_req_head, skb);
return -ENOMEM;
}
bundle_buf = bundle_skb->data;
skb_queue_head_init(&tx_save_head);
}
skb_push(skb, sizeof(struct ath10k_htc_hdr));
ath10k_htc_prepare_tx_skb(ep, skb);
memcpy(bundle_buf, skb->data, skb->len);
hdr = (struct ath10k_htc_hdr *)bundle_buf;
hdr->flags |= ATH10K_HTC_FLAG_SEND_BUNDLE;
hdr->pad_len = __cpu_to_le16(credit_pad);
bundle_buf += trans_len;
bundles_left -= trans_len;
skb_queue_tail(&tx_save_head, skb);
}
if (bundle_buf != bundle_skb->data) {
bundle_skb->len = bundle_buf - bundle_skb->data;
ret = ath10k_htc_send_bundle(ep, bundle_skb, &tx_save_head);
} else {
dev_kfree_skb_any(bundle_skb);
}
return ret;
}
static void ath10k_htc_bundle_tx_work(struct work_struct *work)
{
struct ath10k *ar = container_of(work, struct ath10k, bundle_tx_work);
struct ath10k_htc_ep *ep;
struct sk_buff *skb;
int i;
for (i = 0; i < ARRAY_SIZE(ar->htc.endpoint); i++) {
ep = &ar->htc.endpoint[i];
if (!ep->bundle_tx)
continue;
ath10k_dbg(ar, ATH10K_DBG_HTC, "bundle tx work eid %d count %d\n",
ep->eid, skb_queue_len(&ep->tx_req_head));
if (skb_queue_len(&ep->tx_req_head) >=
ATH10K_MIN_MSG_PER_HTC_TX_BUNDLE) {
ath10k_htc_send_bundle_skbs(ep);
} else {
skb = skb_dequeue(&ep->tx_req_head);
if (!skb)
continue;
ath10k_htc_send_one_skb(ep, skb);
}
}
}
static void ath10k_htc_tx_complete_work(struct work_struct *work)
{
struct ath10k *ar = container_of(work, struct ath10k, tx_complete_work);
struct ath10k_htc_ep *ep;
enum ath10k_htc_ep_id eid;
struct sk_buff *skb;
int i;
for (i = 0; i < ARRAY_SIZE(ar->htc.endpoint); i++) {
ep = &ar->htc.endpoint[i];
eid = ep->eid;
if (ep->bundle_tx && eid == ar->htt.eid) {
ath10k_dbg(ar, ATH10K_DBG_HTC, "bundle tx complete eid %d pending complete count%d\n",
ep->eid, skb_queue_len(&ep->tx_complete_head));
while (true) {
skb = skb_dequeue(&ep->tx_complete_head);
if (!skb)
break;
ath10k_htc_notify_tx_completion(ep, skb);
}
}
}
}
int ath10k_htc_send_hl(struct ath10k_htc *htc,
enum ath10k_htc_ep_id eid,
struct sk_buff *skb)
{
struct ath10k_htc_ep *ep = &htc->endpoint[eid];
struct ath10k *ar = htc->ar;
if (sizeof(struct ath10k_htc_hdr) + skb->len > ep->tx_credit_size) {
ath10k_dbg(ar, ATH10K_DBG_HTC, "tx exceed max len %d\n", skb->len);
return -ENOMEM;
}
ath10k_dbg(ar, ATH10K_DBG_HTC, "htc send hl eid %d bundle %d tx count %d len %d\n",
eid, ep->bundle_tx, skb_queue_len(&ep->tx_req_head), skb->len);
if (ep->bundle_tx) {
skb_queue_tail(&ep->tx_req_head, skb);
queue_work(ar->workqueue, &ar->bundle_tx_work);
return 0;
} else {
return ath10k_htc_send(htc, eid, skb);
}
}
void ath10k_htc_setup_tx_req(struct ath10k_htc_ep *ep)
{
if (ep->htc->max_msgs_per_htc_bundle >= ATH10K_MIN_MSG_PER_HTC_TX_BUNDLE &&
!ep->bundle_tx) {
ep->bundle_tx = true;
skb_queue_head_init(&ep->tx_req_head);
skb_queue_head_init(&ep->tx_complete_head);
}
}
void ath10k_htc_stop_hl(struct ath10k *ar)
{
struct ath10k_htc_ep *ep;
int i;
cancel_work_sync(&ar->bundle_tx_work);
cancel_work_sync(&ar->tx_complete_work);
for (i = 0; i < ARRAY_SIZE(ar->htc.endpoint); i++) {
ep = &ar->htc.endpoint[i];
if (!ep->bundle_tx)
continue;
ath10k_dbg(ar, ATH10K_DBG_HTC, "stop tx work eid %d count %d\n",
ep->eid, skb_queue_len(&ep->tx_req_head));
skb_queue_purge(&ep->tx_req_head);
}
}
int ath10k_htc_wait_target(struct ath10k_htc *htc)
{
struct ath10k *ar = htc->ar;
int i, status = 0;
unsigned long time_left;
struct ath10k_htc_msg *msg;
u16 message_id;
time_left = wait_for_completion_timeout(&htc->ctl_resp,
ATH10K_HTC_WAIT_TIMEOUT_HZ);
if (!time_left) {
/* Workaround: In some cases the PCI HIF doesn't
* receive interrupt for the control response message
* even if the buffer was completed. It is suspected
* iomap writes unmasking PCI CE irqs aren't propagated
* properly in KVM PCI-passthrough sometimes.
*/
ath10k_warn(ar, "failed to receive control response completion, polling..\n");
for (i = 0; i < CE_COUNT; i++)
ath10k_hif_send_complete_check(htc->ar, i, 1);
time_left =
wait_for_completion_timeout(&htc->ctl_resp,
ATH10K_HTC_WAIT_TIMEOUT_HZ);
if (!time_left)
status = -ETIMEDOUT;
}
if (status < 0) {
ath10k_err(ar, "ctl_resp never came in (%d)\n", status);
return status;
}
if (htc->control_resp_len < sizeof(msg->hdr) + sizeof(msg->ready)) {
ath10k_err(ar, "Invalid HTC ready msg len:%d\n",
htc->control_resp_len);
return -ECOMM;
}
msg = (struct ath10k_htc_msg *)htc->control_resp_buffer;
message_id = __le16_to_cpu(msg->hdr.message_id);
if (message_id != ATH10K_HTC_MSG_READY_ID) {
ath10k_err(ar, "Invalid HTC ready msg: 0x%x\n", message_id);
return -ECOMM;
}
if (ar->hw_params.use_fw_tx_credits)
htc->total_transmit_credits = __le16_to_cpu(msg->ready.credit_count);
else
htc->total_transmit_credits = 1;
htc->target_credit_size = __le16_to_cpu(msg->ready.credit_size);
ath10k_dbg(ar, ATH10K_DBG_HTC,
"Target ready! transmit resources: %d size:%d actual credits:%d\n",
htc->total_transmit_credits,
htc->target_credit_size,
msg->ready.credit_count);
if ((htc->total_transmit_credits == 0) ||
(htc->target_credit_size == 0)) {
ath10k_err(ar, "Invalid credit size received\n");
return -ECOMM;
}
/* The only way to determine if the ready message is an extended
* message is from the size.
*/
if (htc->control_resp_len >=
sizeof(msg->hdr) + sizeof(msg->ready_ext)) {
htc->alt_data_credit_size =
__le16_to_cpu(msg->ready_ext.reserved) &
ATH10K_HTC_MSG_READY_EXT_ALT_DATA_MASK;
htc->max_msgs_per_htc_bundle =
min_t(u8, msg->ready_ext.max_msgs_per_htc_bundle,
HTC_HOST_MAX_MSG_PER_RX_BUNDLE);
ath10k_dbg(ar, ATH10K_DBG_HTC,
"Extended ready message RX bundle size %d alt size %d\n",
htc->max_msgs_per_htc_bundle,
htc->alt_data_credit_size);
}
INIT_WORK(&ar->bundle_tx_work, ath10k_htc_bundle_tx_work);
INIT_WORK(&ar->tx_complete_work, ath10k_htc_tx_complete_work);
return 0;
}
void ath10k_htc_change_tx_credit_flow(struct ath10k_htc *htc,
enum ath10k_htc_ep_id eid,
bool enable)
{
struct ath10k *ar = htc->ar;
struct ath10k_htc_ep *ep = &ar->htc.endpoint[eid];
ep->tx_credit_flow_enabled = enable;
}
int ath10k_htc_connect_service(struct ath10k_htc *htc,
struct ath10k_htc_svc_conn_req *conn_req,
struct ath10k_htc_svc_conn_resp *conn_resp)
{
struct ath10k *ar = htc->ar;
struct ath10k_htc_msg *msg;
struct ath10k_htc_conn_svc *req_msg;
struct ath10k_htc_conn_svc_response resp_msg_dummy;
struct ath10k_htc_conn_svc_response *resp_msg = &resp_msg_dummy;
enum ath10k_htc_ep_id assigned_eid = ATH10K_HTC_EP_COUNT;
struct ath10k_htc_ep *ep;
struct sk_buff *skb;
unsigned int max_msg_size = 0;
int length, status;
unsigned long time_left;
bool disable_credit_flow_ctrl = false;
u16 message_id, service_id, flags = 0;
u8 tx_alloc = 0;
/* special case for HTC pseudo control service */
if (conn_req->service_id == ATH10K_HTC_SVC_ID_RSVD_CTRL) {
disable_credit_flow_ctrl = true;
assigned_eid = ATH10K_HTC_EP_0;
max_msg_size = ATH10K_HTC_MAX_CTRL_MSG_LEN;
memset(&resp_msg_dummy, 0, sizeof(resp_msg_dummy));
goto setup;
}
tx_alloc = ath10k_htc_get_credit_allocation(htc,
conn_req->service_id);
if (!tx_alloc)
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"boot htc service %s does not allocate target credits\n",
htc_service_name(conn_req->service_id));
skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
if (!skb) {
ath10k_err(ar, "Failed to allocate HTC packet\n");
return -ENOMEM;
}
length = sizeof(msg->hdr) + sizeof(msg->connect_service);
skb_put(skb, length);
memset(skb->data, 0, length);
msg = (struct ath10k_htc_msg *)skb->data;
msg->hdr.message_id =
__cpu_to_le16(ATH10K_HTC_MSG_CONNECT_SERVICE_ID);
flags |= SM(tx_alloc, ATH10K_HTC_CONN_FLAGS_RECV_ALLOC);
/* Only enable credit flow control for WMI ctrl service */
if (conn_req->service_id != ATH10K_HTC_SVC_ID_WMI_CONTROL) {
flags |= ATH10K_HTC_CONN_FLAGS_DISABLE_CREDIT_FLOW_CTRL;
disable_credit_flow_ctrl = true;
}
req_msg = &msg->connect_service;
req_msg->flags = __cpu_to_le16(flags);
req_msg->service_id = __cpu_to_le16(conn_req->service_id);
reinit_completion(&htc->ctl_resp);
status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb);
if (status) {
kfree_skb(skb);
return status;
}
/* wait for response */
time_left = wait_for_completion_timeout(&htc->ctl_resp,
ATH10K_HTC_CONN_SVC_TIMEOUT_HZ);
if (!time_left) {
ath10k_err(ar, "Service connect timeout\n");
return -ETIMEDOUT;
}
/* we controlled the buffer creation, it's aligned */
msg = (struct ath10k_htc_msg *)htc->control_resp_buffer;
resp_msg = &msg->connect_service_response;
message_id = __le16_to_cpu(msg->hdr.message_id);
service_id = __le16_to_cpu(resp_msg->service_id);
if ((message_id != ATH10K_HTC_MSG_CONNECT_SERVICE_RESP_ID) ||
(htc->control_resp_len < sizeof(msg->hdr) +
sizeof(msg->connect_service_response))) {
ath10k_err(ar, "Invalid resp message ID 0x%x", message_id);
return -EPROTO;
}
ath10k_dbg(ar, ATH10K_DBG_HTC,
"HTC Service %s connect response: status: 0x%x, assigned ep: 0x%x\n",
htc_service_name(service_id),
resp_msg->status, resp_msg->eid);
conn_resp->connect_resp_code = resp_msg->status;
/* check response status */
if (resp_msg->status != ATH10K_HTC_CONN_SVC_STATUS_SUCCESS) {
ath10k_err(ar, "HTC Service %s connect request failed: 0x%x)\n",
htc_service_name(service_id),
resp_msg->status);
return -EPROTO;
}
assigned_eid = (enum ath10k_htc_ep_id)resp_msg->eid;
max_msg_size = __le16_to_cpu(resp_msg->max_msg_size);
setup:
if (assigned_eid >= ATH10K_HTC_EP_COUNT)
return -EPROTO;
if (max_msg_size == 0)
return -EPROTO;
ep = &htc->endpoint[assigned_eid];
ep->eid = assigned_eid;
if (ep->service_id != ATH10K_HTC_SVC_ID_UNUSED)
return -EPROTO;
/* return assigned endpoint to caller */
conn_resp->eid = assigned_eid;
conn_resp->max_msg_len = __le16_to_cpu(resp_msg->max_msg_size);
/* setup the endpoint */
ep->service_id = conn_req->service_id;
ep->max_tx_queue_depth = conn_req->max_send_queue_depth;
ep->max_ep_message_len = __le16_to_cpu(resp_msg->max_msg_size);
ep->tx_credits = tx_alloc;
ep->tx_credit_size = htc->target_credit_size;
if (conn_req->service_id == ATH10K_HTC_SVC_ID_HTT_DATA_MSG &&
htc->alt_data_credit_size != 0)
ep->tx_credit_size = htc->alt_data_credit_size;
/* copy all the callbacks */
ep->ep_ops = conn_req->ep_ops;
status = ath10k_hif_map_service_to_pipe(htc->ar,
ep->service_id,
&ep->ul_pipe_id,
&ep->dl_pipe_id);
if (status) {
ath10k_dbg(ar, ATH10K_DBG_BOOT, "unsupported HTC service id: %d\n",
ep->service_id);
return status;
}
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"boot htc service '%s' ul pipe %d dl pipe %d eid %d ready\n",
htc_service_name(ep->service_id), ep->ul_pipe_id,
ep->dl_pipe_id, ep->eid);
if (disable_credit_flow_ctrl && ep->tx_credit_flow_enabled) {
ep->tx_credit_flow_enabled = false;
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"boot htc service '%s' eid %d TX flow control disabled\n",
htc_service_name(ep->service_id), assigned_eid);
}
return status;
}
struct sk_buff *ath10k_htc_alloc_skb(struct ath10k *ar, int size)
{
struct sk_buff *skb;
skb = dev_alloc_skb(size + sizeof(struct ath10k_htc_hdr));
if (!skb)
return NULL;
skb_reserve(skb, sizeof(struct ath10k_htc_hdr));
/* FW/HTC requires 4-byte aligned streams */
if (!IS_ALIGNED((unsigned long)skb->data, 4))
ath10k_warn(ar, "Unaligned HTC tx skb\n");
return skb;
}
static void ath10k_htc_pktlog_process_rx(struct ath10k *ar, struct sk_buff *skb)
{
trace_ath10k_htt_pktlog(ar, skb->data, skb->len);
dev_kfree_skb_any(skb);
}
static int ath10k_htc_pktlog_connect(struct ath10k *ar)
{
struct ath10k_htc_svc_conn_resp conn_resp;
struct ath10k_htc_svc_conn_req conn_req;
int status;
memset(&conn_req, 0, sizeof(conn_req));
memset(&conn_resp, 0, sizeof(conn_resp));
conn_req.ep_ops.ep_tx_complete = NULL;
conn_req.ep_ops.ep_rx_complete = ath10k_htc_pktlog_process_rx;
conn_req.ep_ops.ep_tx_credits = NULL;
/* connect to control service */
conn_req.service_id = ATH10K_HTC_SVC_ID_HTT_LOG_MSG;
status = ath10k_htc_connect_service(&ar->htc, &conn_req, &conn_resp);
if (status) {
ath10k_warn(ar, "failed to connect to PKTLOG service: %d\n",
status);
return status;
}
return 0;
}
static bool ath10k_htc_pktlog_svc_supported(struct ath10k *ar)
{
u8 ul_pipe_id;
u8 dl_pipe_id;
int status;
status = ath10k_hif_map_service_to_pipe(ar, ATH10K_HTC_SVC_ID_HTT_LOG_MSG,
&ul_pipe_id,
&dl_pipe_id);
if (status) {
ath10k_dbg(ar, ATH10K_DBG_BOOT, "unsupported HTC pktlog service id: %d\n",
ATH10K_HTC_SVC_ID_HTT_LOG_MSG);
return false;
}
return true;
}
int ath10k_htc_start(struct ath10k_htc *htc)
{
struct ath10k *ar = htc->ar;
struct sk_buff *skb;
int status = 0;
struct ath10k_htc_msg *msg;
skb = ath10k_htc_build_tx_ctrl_skb(htc->ar);
if (!skb)
return -ENOMEM;
skb_put(skb, sizeof(msg->hdr) + sizeof(msg->setup_complete_ext));
memset(skb->data, 0, skb->len);
msg = (struct ath10k_htc_msg *)skb->data;
msg->hdr.message_id =
__cpu_to_le16(ATH10K_HTC_MSG_SETUP_COMPLETE_EX_ID);
if (ar->hif.bus == ATH10K_BUS_SDIO) {
/* Extra setup params used by SDIO */
msg->setup_complete_ext.flags =
__cpu_to_le32(ATH10K_HTC_SETUP_COMPLETE_FLAGS_RX_BNDL_EN);
msg->setup_complete_ext.max_msgs_per_bundled_recv =
htc->max_msgs_per_htc_bundle;
}
ath10k_dbg(ar, ATH10K_DBG_HTC, "HTC is using TX credit flow control\n");
status = ath10k_htc_send(htc, ATH10K_HTC_EP_0, skb);
if (status) {
kfree_skb(skb);
return status;
}
if (ath10k_htc_pktlog_svc_supported(ar)) {
status = ath10k_htc_pktlog_connect(ar);
if (status) {
ath10k_err(ar, "failed to connect to pktlog: %d\n", status);
return status;
}
}
return 0;
}
/* registered target arrival callback from the HIF layer */
int ath10k_htc_init(struct ath10k *ar)
{
int status;
struct ath10k_htc *htc = &ar->htc;
struct ath10k_htc_svc_conn_req conn_req;
struct ath10k_htc_svc_conn_resp conn_resp;
spin_lock_init(&htc->tx_lock);
ath10k_htc_reset_endpoint_states(htc);
htc->ar = ar;
/* setup our pseudo HTC control endpoint connection */
memset(&conn_req, 0, sizeof(conn_req));
memset(&conn_resp, 0, sizeof(conn_resp));
conn_req.ep_ops.ep_tx_complete = ath10k_htc_control_tx_complete;
conn_req.ep_ops.ep_rx_complete = ath10k_htc_control_rx_complete;
conn_req.max_send_queue_depth = ATH10K_NUM_CONTROL_TX_BUFFERS;
conn_req.service_id = ATH10K_HTC_SVC_ID_RSVD_CTRL;
/* connect fake service */
status = ath10k_htc_connect_service(htc, &conn_req, &conn_resp);
if (status) {
ath10k_err(ar, "could not connect to htc service (%d)\n",
status);
return status;
}
init_completion(&htc->ctl_resp);
return 0;
}
|
linux-master
|
drivers/net/wireless/ath/ath10k/htc.c
|
// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2015-2016 Qualcomm Atheros, Inc.
*/
/* This file has implementation for code swap logic. With code swap feature,
* target can run the fw binary with even smaller IRAM size by using host
* memory to store some of the code segments.
*/
#include "core.h"
#include "bmi.h"
#include "debug.h"
static int ath10k_swap_code_seg_fill(struct ath10k *ar,
struct ath10k_swap_code_seg_info *seg_info,
const void *data, size_t data_len)
{
u8 *virt_addr = seg_info->virt_address[0];
u8 swap_magic[ATH10K_SWAP_CODE_SEG_MAGIC_BYTES_SZ] = {};
const u8 *fw_data = data;
union ath10k_swap_code_seg_item *swap_item;
u32 length = 0;
u32 payload_len;
u32 total_payload_len = 0;
u32 size_left = data_len;
/* Parse swap bin and copy the content to host allocated memory.
* The format is Address, length and value. The last 4-bytes is
* target write address. Currently address field is not used.
*/
seg_info->target_addr = -1;
while (size_left >= sizeof(*swap_item)) {
swap_item = (union ath10k_swap_code_seg_item *)fw_data;
payload_len = __le32_to_cpu(swap_item->tlv.length);
if ((payload_len > size_left) ||
(payload_len == 0 &&
size_left != sizeof(struct ath10k_swap_code_seg_tail))) {
ath10k_err(ar, "refusing to parse invalid tlv length %d\n",
payload_len);
return -EINVAL;
}
if (payload_len == 0) {
if (memcmp(swap_item->tail.magic_signature, swap_magic,
ATH10K_SWAP_CODE_SEG_MAGIC_BYTES_SZ)) {
ath10k_err(ar, "refusing an invalid swap file\n");
return -EINVAL;
}
seg_info->target_addr =
__le32_to_cpu(swap_item->tail.bmi_write_addr);
break;
}
memcpy(virt_addr, swap_item->tlv.data, payload_len);
virt_addr += payload_len;
length = payload_len + sizeof(struct ath10k_swap_code_seg_tlv);
size_left -= length;
fw_data += length;
total_payload_len += payload_len;
}
if (seg_info->target_addr == -1) {
ath10k_err(ar, "failed to parse invalid swap file\n");
return -EINVAL;
}
seg_info->seg_hw_info.swap_size = __cpu_to_le32(total_payload_len);
return 0;
}
static void
ath10k_swap_code_seg_free(struct ath10k *ar,
struct ath10k_swap_code_seg_info *seg_info)
{
u32 seg_size;
if (!seg_info)
return;
if (!seg_info->virt_address[0])
return;
seg_size = __le32_to_cpu(seg_info->seg_hw_info.size);
dma_free_coherent(ar->dev, seg_size, seg_info->virt_address[0],
seg_info->paddr[0]);
}
static struct ath10k_swap_code_seg_info *
ath10k_swap_code_seg_alloc(struct ath10k *ar, size_t swap_bin_len)
{
struct ath10k_swap_code_seg_info *seg_info;
void *virt_addr;
dma_addr_t paddr;
swap_bin_len = roundup(swap_bin_len, 2);
if (swap_bin_len > ATH10K_SWAP_CODE_SEG_BIN_LEN_MAX) {
ath10k_err(ar, "refusing code swap bin because it is too big %zu > %d\n",
swap_bin_len, ATH10K_SWAP_CODE_SEG_BIN_LEN_MAX);
return NULL;
}
seg_info = devm_kzalloc(ar->dev, sizeof(*seg_info), GFP_KERNEL);
if (!seg_info)
return NULL;
virt_addr = dma_alloc_coherent(ar->dev, swap_bin_len, &paddr,
GFP_KERNEL);
if (!virt_addr)
return NULL;
seg_info->seg_hw_info.bus_addr[0] = __cpu_to_le32(paddr);
seg_info->seg_hw_info.size = __cpu_to_le32(swap_bin_len);
seg_info->seg_hw_info.swap_size = __cpu_to_le32(swap_bin_len);
seg_info->seg_hw_info.num_segs =
__cpu_to_le32(ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED);
seg_info->seg_hw_info.size_log2 = __cpu_to_le32(ilog2(swap_bin_len));
seg_info->virt_address[0] = virt_addr;
seg_info->paddr[0] = paddr;
return seg_info;
}
int ath10k_swap_code_seg_configure(struct ath10k *ar,
const struct ath10k_fw_file *fw_file)
{
int ret;
struct ath10k_swap_code_seg_info *seg_info = NULL;
if (!fw_file->firmware_swap_code_seg_info)
return 0;
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot found firmware code swap binary\n");
seg_info = fw_file->firmware_swap_code_seg_info;
ret = ath10k_bmi_write_memory(ar, seg_info->target_addr,
&seg_info->seg_hw_info,
sizeof(seg_info->seg_hw_info));
if (ret) {
ath10k_err(ar, "failed to write Code swap segment information (%d)\n",
ret);
return ret;
}
return 0;
}
void ath10k_swap_code_seg_release(struct ath10k *ar,
struct ath10k_fw_file *fw_file)
{
ath10k_swap_code_seg_free(ar, fw_file->firmware_swap_code_seg_info);
/* FIXME: these two assignments look to bein wrong place! Shouldn't
* they be in ath10k_core_free_firmware_files() like the rest?
*/
fw_file->codeswap_data = NULL;
fw_file->codeswap_len = 0;
fw_file->firmware_swap_code_seg_info = NULL;
}
int ath10k_swap_code_seg_init(struct ath10k *ar, struct ath10k_fw_file *fw_file)
{
int ret;
struct ath10k_swap_code_seg_info *seg_info;
const void *codeswap_data;
size_t codeswap_len;
codeswap_data = fw_file->codeswap_data;
codeswap_len = fw_file->codeswap_len;
if (!codeswap_len || !codeswap_data)
return 0;
seg_info = ath10k_swap_code_seg_alloc(ar, codeswap_len);
if (!seg_info) {
ath10k_err(ar, "failed to allocate fw code swap segment\n");
return -ENOMEM;
}
ret = ath10k_swap_code_seg_fill(ar, seg_info,
codeswap_data, codeswap_len);
if (ret) {
ath10k_warn(ar, "failed to initialize fw code swap segment: %d\n",
ret);
ath10k_swap_code_seg_free(ar, seg_info);
return ret;
}
fw_file->firmware_swap_code_seg_info = seg_info;
return 0;
}
|
linux-master
|
drivers/net/wireless/ath/ath10k/swap.c
|
// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2016-2017 Qualcomm Atheros, Inc. All rights reserved.
* Copyright (c) 2015 The Linux Foundation. All rights reserved.
*/
#include <linux/module.h>
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/reset.h>
#include "core.h"
#include "debug.h"
#include "pci.h"
#include "ahb.h"
static const struct of_device_id ath10k_ahb_of_match[] = {
{ .compatible = "qcom,ipq4019-wifi",
.data = (void *)ATH10K_HW_QCA4019
},
{ }
};
MODULE_DEVICE_TABLE(of, ath10k_ahb_of_match);
#define QCA4019_SRAM_ADDR 0x000C0000
#define QCA4019_SRAM_LEN 0x00040000 /* 256 kb */
static inline struct ath10k_ahb *ath10k_ahb_priv(struct ath10k *ar)
{
return &ath10k_pci_priv(ar)->ahb[0];
}
static void ath10k_ahb_write32(struct ath10k *ar, u32 offset, u32 value)
{
struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
iowrite32(value, ar_ahb->mem + offset);
}
static u32 ath10k_ahb_read32(struct ath10k *ar, u32 offset)
{
struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
return ioread32(ar_ahb->mem + offset);
}
static u32 ath10k_ahb_gcc_read32(struct ath10k *ar, u32 offset)
{
struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
return ioread32(ar_ahb->gcc_mem + offset);
}
static void ath10k_ahb_tcsr_write32(struct ath10k *ar, u32 offset, u32 value)
{
struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
iowrite32(value, ar_ahb->tcsr_mem + offset);
}
static u32 ath10k_ahb_tcsr_read32(struct ath10k *ar, u32 offset)
{
struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
return ioread32(ar_ahb->tcsr_mem + offset);
}
static u32 ath10k_ahb_soc_read32(struct ath10k *ar, u32 addr)
{
return ath10k_ahb_read32(ar, RTC_SOC_BASE_ADDRESS + addr);
}
static int ath10k_ahb_get_num_banks(struct ath10k *ar)
{
if (ar->hw_rev == ATH10K_HW_QCA4019)
return 1;
ath10k_warn(ar, "unknown number of banks, assuming 1\n");
return 1;
}
static int ath10k_ahb_clock_init(struct ath10k *ar)
{
struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
struct device *dev;
dev = &ar_ahb->pdev->dev;
ar_ahb->cmd_clk = devm_clk_get(dev, "wifi_wcss_cmd");
if (IS_ERR_OR_NULL(ar_ahb->cmd_clk)) {
ath10k_err(ar, "failed to get cmd clk: %ld\n",
PTR_ERR(ar_ahb->cmd_clk));
return ar_ahb->cmd_clk ? PTR_ERR(ar_ahb->cmd_clk) : -ENODEV;
}
ar_ahb->ref_clk = devm_clk_get(dev, "wifi_wcss_ref");
if (IS_ERR_OR_NULL(ar_ahb->ref_clk)) {
ath10k_err(ar, "failed to get ref clk: %ld\n",
PTR_ERR(ar_ahb->ref_clk));
return ar_ahb->ref_clk ? PTR_ERR(ar_ahb->ref_clk) : -ENODEV;
}
ar_ahb->rtc_clk = devm_clk_get(dev, "wifi_wcss_rtc");
if (IS_ERR_OR_NULL(ar_ahb->rtc_clk)) {
ath10k_err(ar, "failed to get rtc clk: %ld\n",
PTR_ERR(ar_ahb->rtc_clk));
return ar_ahb->rtc_clk ? PTR_ERR(ar_ahb->rtc_clk) : -ENODEV;
}
return 0;
}
static void ath10k_ahb_clock_deinit(struct ath10k *ar)
{
struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
ar_ahb->cmd_clk = NULL;
ar_ahb->ref_clk = NULL;
ar_ahb->rtc_clk = NULL;
}
static int ath10k_ahb_clock_enable(struct ath10k *ar)
{
struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
int ret;
if (IS_ERR_OR_NULL(ar_ahb->cmd_clk) ||
IS_ERR_OR_NULL(ar_ahb->ref_clk) ||
IS_ERR_OR_NULL(ar_ahb->rtc_clk)) {
ath10k_err(ar, "clock(s) is/are not initialized\n");
ret = -EIO;
goto out;
}
ret = clk_prepare_enable(ar_ahb->cmd_clk);
if (ret) {
ath10k_err(ar, "failed to enable cmd clk: %d\n", ret);
goto out;
}
ret = clk_prepare_enable(ar_ahb->ref_clk);
if (ret) {
ath10k_err(ar, "failed to enable ref clk: %d\n", ret);
goto err_cmd_clk_disable;
}
ret = clk_prepare_enable(ar_ahb->rtc_clk);
if (ret) {
ath10k_err(ar, "failed to enable rtc clk: %d\n", ret);
goto err_ref_clk_disable;
}
return 0;
err_ref_clk_disable:
clk_disable_unprepare(ar_ahb->ref_clk);
err_cmd_clk_disable:
clk_disable_unprepare(ar_ahb->cmd_clk);
out:
return ret;
}
static void ath10k_ahb_clock_disable(struct ath10k *ar)
{
struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
clk_disable_unprepare(ar_ahb->cmd_clk);
clk_disable_unprepare(ar_ahb->ref_clk);
clk_disable_unprepare(ar_ahb->rtc_clk);
}
static int ath10k_ahb_rst_ctrl_init(struct ath10k *ar)
{
struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
struct device *dev;
dev = &ar_ahb->pdev->dev;
ar_ahb->core_cold_rst = devm_reset_control_get_exclusive(dev,
"wifi_core_cold");
if (IS_ERR(ar_ahb->core_cold_rst)) {
ath10k_err(ar, "failed to get core cold rst ctrl: %ld\n",
PTR_ERR(ar_ahb->core_cold_rst));
return PTR_ERR(ar_ahb->core_cold_rst);
}
ar_ahb->radio_cold_rst = devm_reset_control_get_exclusive(dev,
"wifi_radio_cold");
if (IS_ERR(ar_ahb->radio_cold_rst)) {
ath10k_err(ar, "failed to get radio cold rst ctrl: %ld\n",
PTR_ERR(ar_ahb->radio_cold_rst));
return PTR_ERR(ar_ahb->radio_cold_rst);
}
ar_ahb->radio_warm_rst = devm_reset_control_get_exclusive(dev,
"wifi_radio_warm");
if (IS_ERR(ar_ahb->radio_warm_rst)) {
ath10k_err(ar, "failed to get radio warm rst ctrl: %ld\n",
PTR_ERR(ar_ahb->radio_warm_rst));
return PTR_ERR(ar_ahb->radio_warm_rst);
}
ar_ahb->radio_srif_rst = devm_reset_control_get_exclusive(dev,
"wifi_radio_srif");
if (IS_ERR(ar_ahb->radio_srif_rst)) {
ath10k_err(ar, "failed to get radio srif rst ctrl: %ld\n",
PTR_ERR(ar_ahb->radio_srif_rst));
return PTR_ERR(ar_ahb->radio_srif_rst);
}
ar_ahb->cpu_init_rst = devm_reset_control_get_exclusive(dev,
"wifi_cpu_init");
if (IS_ERR(ar_ahb->cpu_init_rst)) {
ath10k_err(ar, "failed to get cpu init rst ctrl: %ld\n",
PTR_ERR(ar_ahb->cpu_init_rst));
return PTR_ERR(ar_ahb->cpu_init_rst);
}
return 0;
}
static void ath10k_ahb_rst_ctrl_deinit(struct ath10k *ar)
{
struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
ar_ahb->core_cold_rst = NULL;
ar_ahb->radio_cold_rst = NULL;
ar_ahb->radio_warm_rst = NULL;
ar_ahb->radio_srif_rst = NULL;
ar_ahb->cpu_init_rst = NULL;
}
static int ath10k_ahb_release_reset(struct ath10k *ar)
{
struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
int ret;
if (IS_ERR_OR_NULL(ar_ahb->radio_cold_rst) ||
IS_ERR_OR_NULL(ar_ahb->radio_warm_rst) ||
IS_ERR_OR_NULL(ar_ahb->radio_srif_rst) ||
IS_ERR_OR_NULL(ar_ahb->cpu_init_rst)) {
ath10k_err(ar, "rst ctrl(s) is/are not initialized\n");
return -EINVAL;
}
ret = reset_control_deassert(ar_ahb->radio_cold_rst);
if (ret) {
ath10k_err(ar, "failed to deassert radio cold rst: %d\n", ret);
return ret;
}
ret = reset_control_deassert(ar_ahb->radio_warm_rst);
if (ret) {
ath10k_err(ar, "failed to deassert radio warm rst: %d\n", ret);
return ret;
}
ret = reset_control_deassert(ar_ahb->radio_srif_rst);
if (ret) {
ath10k_err(ar, "failed to deassert radio srif rst: %d\n", ret);
return ret;
}
ret = reset_control_deassert(ar_ahb->cpu_init_rst);
if (ret) {
ath10k_err(ar, "failed to deassert cpu init rst: %d\n", ret);
return ret;
}
return 0;
}
static void ath10k_ahb_halt_axi_bus(struct ath10k *ar, u32 haltreq_reg,
u32 haltack_reg)
{
unsigned long timeout;
u32 val;
/* Issue halt axi bus request */
val = ath10k_ahb_tcsr_read32(ar, haltreq_reg);
val |= AHB_AXI_BUS_HALT_REQ;
ath10k_ahb_tcsr_write32(ar, haltreq_reg, val);
/* Wait for axi bus halted ack */
timeout = jiffies + msecs_to_jiffies(ATH10K_AHB_AXI_BUS_HALT_TIMEOUT);
do {
val = ath10k_ahb_tcsr_read32(ar, haltack_reg);
if (val & AHB_AXI_BUS_HALT_ACK)
break;
mdelay(1);
} while (time_before(jiffies, timeout));
if (!(val & AHB_AXI_BUS_HALT_ACK)) {
ath10k_err(ar, "failed to halt axi bus: %d\n", val);
return;
}
ath10k_dbg(ar, ATH10K_DBG_AHB, "axi bus halted\n");
}
static void ath10k_ahb_halt_chip(struct ath10k *ar)
{
struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
u32 core_id, glb_cfg_reg, haltreq_reg, haltack_reg;
u32 val;
int ret;
if (IS_ERR_OR_NULL(ar_ahb->core_cold_rst) ||
IS_ERR_OR_NULL(ar_ahb->radio_cold_rst) ||
IS_ERR_OR_NULL(ar_ahb->radio_warm_rst) ||
IS_ERR_OR_NULL(ar_ahb->radio_srif_rst) ||
IS_ERR_OR_NULL(ar_ahb->cpu_init_rst)) {
ath10k_err(ar, "rst ctrl(s) is/are not initialized\n");
return;
}
core_id = ath10k_ahb_read32(ar, ATH10K_AHB_WLAN_CORE_ID_REG);
switch (core_id) {
case 0:
glb_cfg_reg = ATH10K_AHB_TCSR_WIFI0_GLB_CFG;
haltreq_reg = ATH10K_AHB_TCSR_WCSS0_HALTREQ;
haltack_reg = ATH10K_AHB_TCSR_WCSS0_HALTACK;
break;
case 1:
glb_cfg_reg = ATH10K_AHB_TCSR_WIFI1_GLB_CFG;
haltreq_reg = ATH10K_AHB_TCSR_WCSS1_HALTREQ;
haltack_reg = ATH10K_AHB_TCSR_WCSS1_HALTACK;
break;
default:
ath10k_err(ar, "invalid core id %d found, skipping reset sequence\n",
core_id);
return;
}
ath10k_ahb_halt_axi_bus(ar, haltreq_reg, haltack_reg);
val = ath10k_ahb_tcsr_read32(ar, glb_cfg_reg);
val |= TCSR_WIFIX_GLB_CFG_DISABLE_CORE_CLK;
ath10k_ahb_tcsr_write32(ar, glb_cfg_reg, val);
ret = reset_control_assert(ar_ahb->core_cold_rst);
if (ret)
ath10k_err(ar, "failed to assert core cold rst: %d\n", ret);
msleep(1);
ret = reset_control_assert(ar_ahb->radio_cold_rst);
if (ret)
ath10k_err(ar, "failed to assert radio cold rst: %d\n", ret);
msleep(1);
ret = reset_control_assert(ar_ahb->radio_warm_rst);
if (ret)
ath10k_err(ar, "failed to assert radio warm rst: %d\n", ret);
msleep(1);
ret = reset_control_assert(ar_ahb->radio_srif_rst);
if (ret)
ath10k_err(ar, "failed to assert radio srif rst: %d\n", ret);
msleep(1);
ret = reset_control_assert(ar_ahb->cpu_init_rst);
if (ret)
ath10k_err(ar, "failed to assert cpu init rst: %d\n", ret);
msleep(10);
/* Clear halt req and core clock disable req before
* deasserting wifi core reset.
*/
val = ath10k_ahb_tcsr_read32(ar, haltreq_reg);
val &= ~AHB_AXI_BUS_HALT_REQ;
ath10k_ahb_tcsr_write32(ar, haltreq_reg, val);
val = ath10k_ahb_tcsr_read32(ar, glb_cfg_reg);
val &= ~TCSR_WIFIX_GLB_CFG_DISABLE_CORE_CLK;
ath10k_ahb_tcsr_write32(ar, glb_cfg_reg, val);
ret = reset_control_deassert(ar_ahb->core_cold_rst);
if (ret)
ath10k_err(ar, "failed to deassert core cold rst: %d\n", ret);
ath10k_dbg(ar, ATH10K_DBG_AHB, "core %d reset done\n", core_id);
}
static irqreturn_t ath10k_ahb_interrupt_handler(int irq, void *arg)
{
struct ath10k *ar = arg;
if (!ath10k_pci_irq_pending(ar))
return IRQ_NONE;
ath10k_pci_disable_and_clear_legacy_irq(ar);
ath10k_pci_irq_msi_fw_mask(ar);
napi_schedule(&ar->napi);
return IRQ_HANDLED;
}
static int ath10k_ahb_request_irq_legacy(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
int ret;
ret = request_irq(ar_ahb->irq,
ath10k_ahb_interrupt_handler,
IRQF_SHARED, "ath10k_ahb", ar);
if (ret) {
ath10k_warn(ar, "failed to request legacy irq %d: %d\n",
ar_ahb->irq, ret);
return ret;
}
ar_pci->oper_irq_mode = ATH10K_PCI_IRQ_LEGACY;
return 0;
}
static void ath10k_ahb_release_irq_legacy(struct ath10k *ar)
{
struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
free_irq(ar_ahb->irq, ar);
}
static void ath10k_ahb_irq_disable(struct ath10k *ar)
{
ath10k_ce_disable_interrupts(ar);
ath10k_pci_disable_and_clear_legacy_irq(ar);
}
static int ath10k_ahb_resource_init(struct ath10k *ar)
{
struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
struct platform_device *pdev;
struct resource *res;
int ret;
pdev = ar_ahb->pdev;
ar_ahb->mem = devm_platform_get_and_ioremap_resource(pdev, 0, &res);
if (IS_ERR(ar_ahb->mem)) {
ath10k_err(ar, "mem ioremap error\n");
ret = PTR_ERR(ar_ahb->mem);
goto out;
}
ar_ahb->mem_len = resource_size(res);
ar_ahb->gcc_mem = ioremap(ATH10K_GCC_REG_BASE,
ATH10K_GCC_REG_SIZE);
if (!ar_ahb->gcc_mem) {
ath10k_err(ar, "gcc mem ioremap error\n");
ret = -ENOMEM;
goto err_mem_unmap;
}
ar_ahb->tcsr_mem = ioremap(ATH10K_TCSR_REG_BASE,
ATH10K_TCSR_REG_SIZE);
if (!ar_ahb->tcsr_mem) {
ath10k_err(ar, "tcsr mem ioremap error\n");
ret = -ENOMEM;
goto err_gcc_mem_unmap;
}
ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (ret) {
ath10k_err(ar, "failed to set 32-bit dma mask: %d\n", ret);
goto err_tcsr_mem_unmap;
}
ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (ret) {
ath10k_err(ar, "failed to set 32-bit consistent dma: %d\n",
ret);
goto err_tcsr_mem_unmap;
}
ret = ath10k_ahb_clock_init(ar);
if (ret)
goto err_tcsr_mem_unmap;
ret = ath10k_ahb_rst_ctrl_init(ar);
if (ret)
goto err_clock_deinit;
ar_ahb->irq = platform_get_irq_byname(pdev, "legacy");
if (ar_ahb->irq < 0) {
ath10k_err(ar, "failed to get irq number: %d\n", ar_ahb->irq);
ret = ar_ahb->irq;
goto err_clock_deinit;
}
ath10k_dbg(ar, ATH10K_DBG_BOOT, "irq: %d\n", ar_ahb->irq);
ath10k_dbg(ar, ATH10K_DBG_BOOT, "mem: 0x%pK mem_len: %lu gcc mem: 0x%pK tcsr_mem: 0x%pK\n",
ar_ahb->mem, ar_ahb->mem_len,
ar_ahb->gcc_mem, ar_ahb->tcsr_mem);
return 0;
err_clock_deinit:
ath10k_ahb_clock_deinit(ar);
err_tcsr_mem_unmap:
iounmap(ar_ahb->tcsr_mem);
err_gcc_mem_unmap:
ar_ahb->tcsr_mem = NULL;
iounmap(ar_ahb->gcc_mem);
err_mem_unmap:
ar_ahb->gcc_mem = NULL;
devm_iounmap(&pdev->dev, ar_ahb->mem);
out:
ar_ahb->mem = NULL;
return ret;
}
static void ath10k_ahb_resource_deinit(struct ath10k *ar)
{
struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
struct device *dev;
dev = &ar_ahb->pdev->dev;
if (ar_ahb->mem)
devm_iounmap(dev, ar_ahb->mem);
if (ar_ahb->gcc_mem)
iounmap(ar_ahb->gcc_mem);
if (ar_ahb->tcsr_mem)
iounmap(ar_ahb->tcsr_mem);
ar_ahb->mem = NULL;
ar_ahb->gcc_mem = NULL;
ar_ahb->tcsr_mem = NULL;
ath10k_ahb_clock_deinit(ar);
ath10k_ahb_rst_ctrl_deinit(ar);
}
static int ath10k_ahb_prepare_device(struct ath10k *ar)
{
u32 val;
int ret;
ret = ath10k_ahb_clock_enable(ar);
if (ret) {
ath10k_err(ar, "failed to enable clocks\n");
return ret;
}
/* Clock for the target is supplied from outside of target (ie,
* external clock module controlled by the host). Target needs
* to know what frequency target cpu is configured which is needed
* for target internal use. Read target cpu frequency info from
* gcc register and write into target's scratch register where
* target expects this information.
*/
val = ath10k_ahb_gcc_read32(ar, ATH10K_AHB_GCC_FEPLL_PLL_DIV);
ath10k_ahb_write32(ar, ATH10K_AHB_WIFI_SCRATCH_5_REG, val);
ret = ath10k_ahb_release_reset(ar);
if (ret)
goto err_clk_disable;
ath10k_ahb_irq_disable(ar);
ath10k_ahb_write32(ar, FW_INDICATOR_ADDRESS, FW_IND_HOST_READY);
ret = ath10k_pci_wait_for_target_init(ar);
if (ret)
goto err_halt_chip;
return 0;
err_halt_chip:
ath10k_ahb_halt_chip(ar);
err_clk_disable:
ath10k_ahb_clock_disable(ar);
return ret;
}
static int ath10k_ahb_chip_reset(struct ath10k *ar)
{
int ret;
ath10k_ahb_halt_chip(ar);
ath10k_ahb_clock_disable(ar);
ret = ath10k_ahb_prepare_device(ar);
if (ret)
return ret;
return 0;
}
static int ath10k_ahb_wake_target_cpu(struct ath10k *ar)
{
u32 addr, val;
addr = SOC_CORE_BASE_ADDRESS | CORE_CTRL_ADDRESS;
val = ath10k_ahb_read32(ar, addr);
val |= ATH10K_AHB_CORE_CTRL_CPU_INTR_MASK;
ath10k_ahb_write32(ar, addr, val);
return 0;
}
static int ath10k_ahb_hif_start(struct ath10k *ar)
{
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot ahb hif start\n");
ath10k_core_napi_enable(ar);
ath10k_ce_enable_interrupts(ar);
ath10k_pci_enable_legacy_irq(ar);
ath10k_pci_rx_post(ar);
return 0;
}
static void ath10k_ahb_hif_stop(struct ath10k *ar)
{
struct ath10k_ahb *ar_ahb = ath10k_ahb_priv(ar);
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot ahb hif stop\n");
ath10k_ahb_irq_disable(ar);
synchronize_irq(ar_ahb->irq);
ath10k_core_napi_sync_disable(ar);
ath10k_pci_flush(ar);
}
static int ath10k_ahb_hif_power_up(struct ath10k *ar,
enum ath10k_firmware_mode fw_mode)
{
int ret;
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot ahb hif power up\n");
ret = ath10k_ahb_chip_reset(ar);
if (ret) {
ath10k_err(ar, "failed to reset chip: %d\n", ret);
goto out;
}
ret = ath10k_pci_init_pipes(ar);
if (ret) {
ath10k_err(ar, "failed to initialize CE: %d\n", ret);
goto out;
}
ret = ath10k_pci_init_config(ar);
if (ret) {
ath10k_err(ar, "failed to setup init config: %d\n", ret);
goto err_ce_deinit;
}
ret = ath10k_ahb_wake_target_cpu(ar);
if (ret) {
ath10k_err(ar, "could not wake up target CPU: %d\n", ret);
goto err_ce_deinit;
}
return 0;
err_ce_deinit:
ath10k_pci_ce_deinit(ar);
out:
return ret;
}
static u32 ath10k_ahb_qca4019_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
{
u32 val = 0, region = addr & 0xfffff;
val = ath10k_pci_read32(ar, PCIE_BAR_REG_ADDRESS);
if (region >= QCA4019_SRAM_ADDR && region <=
(QCA4019_SRAM_ADDR + QCA4019_SRAM_LEN)) {
/* SRAM contents for QCA4019 can be directly accessed and
* no conversions are required
*/
val |= region;
} else {
val |= 0x100000 | region;
}
return val;
}
static const struct ath10k_hif_ops ath10k_ahb_hif_ops = {
.tx_sg = ath10k_pci_hif_tx_sg,
.diag_read = ath10k_pci_hif_diag_read,
.diag_write = ath10k_pci_diag_write_mem,
.exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
.start = ath10k_ahb_hif_start,
.stop = ath10k_ahb_hif_stop,
.map_service_to_pipe = ath10k_pci_hif_map_service_to_pipe,
.get_default_pipe = ath10k_pci_hif_get_default_pipe,
.send_complete_check = ath10k_pci_hif_send_complete_check,
.get_free_queue_number = ath10k_pci_hif_get_free_queue_number,
.power_up = ath10k_ahb_hif_power_up,
.power_down = ath10k_pci_hif_power_down,
.read32 = ath10k_ahb_read32,
.write32 = ath10k_ahb_write32,
};
static const struct ath10k_bus_ops ath10k_ahb_bus_ops = {
.read32 = ath10k_ahb_read32,
.write32 = ath10k_ahb_write32,
.get_num_banks = ath10k_ahb_get_num_banks,
};
static int ath10k_ahb_probe(struct platform_device *pdev)
{
struct ath10k *ar;
struct ath10k_ahb *ar_ahb;
struct ath10k_pci *ar_pci;
enum ath10k_hw_rev hw_rev;
size_t size;
int ret;
struct ath10k_bus_params bus_params = {};
hw_rev = (uintptr_t)of_device_get_match_data(&pdev->dev);
if (!hw_rev) {
dev_err(&pdev->dev, "OF data missing\n");
return -EINVAL;
}
size = sizeof(*ar_pci) + sizeof(*ar_ahb);
ar = ath10k_core_create(size, &pdev->dev, ATH10K_BUS_AHB,
hw_rev, &ath10k_ahb_hif_ops);
if (!ar) {
dev_err(&pdev->dev, "failed to allocate core\n");
return -ENOMEM;
}
ath10k_dbg(ar, ATH10K_DBG_BOOT, "ahb probe\n");
ar_pci = ath10k_pci_priv(ar);
ar_ahb = ath10k_ahb_priv(ar);
ar_ahb->pdev = pdev;
platform_set_drvdata(pdev, ar);
ret = ath10k_ahb_resource_init(ar);
if (ret)
goto err_core_destroy;
ar->dev_id = 0;
ar_pci->mem = ar_ahb->mem;
ar_pci->mem_len = ar_ahb->mem_len;
ar_pci->ar = ar;
ar_pci->ce.bus_ops = &ath10k_ahb_bus_ops;
ar_pci->targ_cpu_to_ce_addr = ath10k_ahb_qca4019_targ_cpu_to_ce_addr;
ar->ce_priv = &ar_pci->ce;
ret = ath10k_pci_setup_resource(ar);
if (ret) {
ath10k_err(ar, "failed to setup resource: %d\n", ret);
goto err_resource_deinit;
}
ath10k_pci_init_napi(ar);
ret = ath10k_ahb_request_irq_legacy(ar);
if (ret)
goto err_free_pipes;
ret = ath10k_ahb_prepare_device(ar);
if (ret)
goto err_free_irq;
ath10k_pci_ce_deinit(ar);
bus_params.dev_type = ATH10K_DEV_TYPE_LL;
bus_params.chip_id = ath10k_ahb_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
if (bus_params.chip_id == 0xffffffff) {
ath10k_err(ar, "failed to get chip id\n");
ret = -ENODEV;
goto err_halt_device;
}
ret = ath10k_core_register(ar, &bus_params);
if (ret) {
ath10k_err(ar, "failed to register driver core: %d\n", ret);
goto err_halt_device;
}
return 0;
err_halt_device:
ath10k_ahb_halt_chip(ar);
ath10k_ahb_clock_disable(ar);
err_free_irq:
ath10k_ahb_release_irq_legacy(ar);
err_free_pipes:
ath10k_pci_release_resource(ar);
err_resource_deinit:
ath10k_ahb_resource_deinit(ar);
err_core_destroy:
ath10k_core_destroy(ar);
return ret;
}
static void ath10k_ahb_remove(struct platform_device *pdev)
{
struct ath10k *ar = platform_get_drvdata(pdev);
ath10k_dbg(ar, ATH10K_DBG_AHB, "ahb remove\n");
ath10k_core_unregister(ar);
ath10k_ahb_irq_disable(ar);
ath10k_ahb_release_irq_legacy(ar);
ath10k_pci_release_resource(ar);
ath10k_ahb_halt_chip(ar);
ath10k_ahb_clock_disable(ar);
ath10k_ahb_resource_deinit(ar);
ath10k_core_destroy(ar);
}
static struct platform_driver ath10k_ahb_driver = {
.driver = {
.name = "ath10k_ahb",
.of_match_table = ath10k_ahb_of_match,
},
.probe = ath10k_ahb_probe,
.remove_new = ath10k_ahb_remove,
};
int ath10k_ahb_init(void)
{
int ret;
ret = platform_driver_register(&ath10k_ahb_driver);
if (ret)
printk(KERN_ERR "failed to register ath10k ahb driver: %d\n",
ret);
return ret;
}
void ath10k_ahb_exit(void)
{
platform_driver_unregister(&ath10k_ahb_driver);
}
|
linux-master
|
drivers/net/wireless/ath/ath10k/ahb.c
|
// SPDX-License-Identifier: ISC
/*
* Copyright (c) 2018 The Linux Foundation. All rights reserved.
*/
#include <linux/soc/qcom/qmi.h>
#include <linux/types.h>
#include "qmi_wlfw_v01.h"
static const struct qmi_elem_info wlfw_ce_tgt_pipe_cfg_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01,
pipe_num),
},
{
.data_type = QMI_SIGNED_4_BYTE_ENUM,
.elem_len = 1,
.elem_size = sizeof(enum wlfw_pipedir_enum_v01),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01,
pipe_dir),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01,
nentries),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01,
nbytes_max),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct wlfw_ce_tgt_pipe_cfg_s_v01,
flags),
},
{}
};
static const struct qmi_elem_info wlfw_ce_svc_pipe_cfg_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct wlfw_ce_svc_pipe_cfg_s_v01,
service_id),
},
{
.data_type = QMI_SIGNED_4_BYTE_ENUM,
.elem_len = 1,
.elem_size = sizeof(enum wlfw_pipedir_enum_v01),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct wlfw_ce_svc_pipe_cfg_s_v01,
pipe_dir),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct wlfw_ce_svc_pipe_cfg_s_v01,
pipe_num),
},
{}
};
static const struct qmi_elem_info wlfw_shadow_reg_cfg_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_2_BYTE,
.elem_len = 1,
.elem_size = sizeof(u16),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct wlfw_shadow_reg_cfg_s_v01,
id),
},
{
.data_type = QMI_UNSIGNED_2_BYTE,
.elem_len = 1,
.elem_size = sizeof(u16),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct wlfw_shadow_reg_cfg_s_v01,
offset),
},
{}
};
static const struct qmi_elem_info wlfw_shadow_reg_v2_cfg_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct wlfw_shadow_reg_v2_cfg_s_v01,
addr),
},
{}
};
static const struct qmi_elem_info wlfw_memory_region_info_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_8_BYTE,
.elem_len = 1,
.elem_size = sizeof(u64),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct wlfw_memory_region_info_s_v01,
region_addr),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct wlfw_memory_region_info_s_v01,
size),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct wlfw_memory_region_info_s_v01,
secure_flag),
},
{}
};
static const struct qmi_elem_info wlfw_mem_cfg_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_8_BYTE,
.elem_len = 1,
.elem_size = sizeof(u64),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct wlfw_mem_cfg_s_v01,
offset),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct wlfw_mem_cfg_s_v01,
size),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct wlfw_mem_cfg_s_v01,
secure_flag),
},
{}
};
static const struct qmi_elem_info wlfw_mem_seg_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct wlfw_mem_seg_s_v01,
size),
},
{
.data_type = QMI_SIGNED_4_BYTE_ENUM,
.elem_len = 1,
.elem_size = sizeof(enum wlfw_mem_type_enum_v01),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct wlfw_mem_seg_s_v01,
type),
},
{
.data_type = QMI_DATA_LEN,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct wlfw_mem_seg_s_v01,
mem_cfg_len),
},
{
.data_type = QMI_STRUCT,
.elem_len = QMI_WLFW_MAX_NUM_MEM_CFG_V01,
.elem_size = sizeof(struct wlfw_mem_cfg_s_v01),
.array_type = VAR_LEN_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct wlfw_mem_seg_s_v01,
mem_cfg),
.ei_array = wlfw_mem_cfg_s_v01_ei,
},
{}
};
static const struct qmi_elem_info wlfw_mem_seg_resp_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_8_BYTE,
.elem_len = 1,
.elem_size = sizeof(u64),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct wlfw_mem_seg_resp_s_v01,
addr),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct wlfw_mem_seg_resp_s_v01,
size),
},
{
.data_type = QMI_SIGNED_4_BYTE_ENUM,
.elem_len = 1,
.elem_size = sizeof(enum wlfw_mem_type_enum_v01),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct wlfw_mem_seg_resp_s_v01,
type),
},
{}
};
static const struct qmi_elem_info wlfw_rf_chip_info_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct wlfw_rf_chip_info_s_v01,
chip_id),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct wlfw_rf_chip_info_s_v01,
chip_family),
},
{}
};
static const struct qmi_elem_info wlfw_rf_board_info_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct wlfw_rf_board_info_s_v01,
board_id),
},
{}
};
static const struct qmi_elem_info wlfw_soc_info_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct wlfw_soc_info_s_v01,
soc_id),
},
{}
};
static const struct qmi_elem_info wlfw_fw_version_info_s_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct wlfw_fw_version_info_s_v01,
fw_version),
},
{
.data_type = QMI_STRING,
.elem_len = QMI_WLFW_MAX_TIMESTAMP_LEN_V01 + 1,
.elem_size = sizeof(char),
.array_type = NO_ARRAY,
.tlv_type = 0,
.offset = offsetof(struct wlfw_fw_version_info_s_v01,
fw_build_timestamp),
},
{}
};
const struct qmi_elem_info wlfw_ind_register_req_msg_v01_ei[] = {
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct wlfw_ind_register_req_msg_v01,
fw_ready_enable_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct wlfw_ind_register_req_msg_v01,
fw_ready_enable),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x11,
.offset = offsetof(struct wlfw_ind_register_req_msg_v01,
initiate_cal_download_enable_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x11,
.offset = offsetof(struct wlfw_ind_register_req_msg_v01,
initiate_cal_download_enable),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x12,
.offset = offsetof(struct wlfw_ind_register_req_msg_v01,
initiate_cal_update_enable_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x12,
.offset = offsetof(struct wlfw_ind_register_req_msg_v01,
initiate_cal_update_enable),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x13,
.offset = offsetof(struct wlfw_ind_register_req_msg_v01,
msa_ready_enable_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x13,
.offset = offsetof(struct wlfw_ind_register_req_msg_v01,
msa_ready_enable),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x14,
.offset = offsetof(struct wlfw_ind_register_req_msg_v01,
pin_connect_result_enable_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x14,
.offset = offsetof(struct wlfw_ind_register_req_msg_v01,
pin_connect_result_enable),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x15,
.offset = offsetof(struct wlfw_ind_register_req_msg_v01,
client_id_valid),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0x15,
.offset = offsetof(struct wlfw_ind_register_req_msg_v01,
client_id),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x16,
.offset = offsetof(struct wlfw_ind_register_req_msg_v01,
request_mem_enable_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x16,
.offset = offsetof(struct wlfw_ind_register_req_msg_v01,
request_mem_enable),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x17,
.offset = offsetof(struct wlfw_ind_register_req_msg_v01,
mem_ready_enable_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x17,
.offset = offsetof(struct wlfw_ind_register_req_msg_v01,
mem_ready_enable),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x18,
.offset = offsetof(struct wlfw_ind_register_req_msg_v01,
fw_init_done_enable_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x18,
.offset = offsetof(struct wlfw_ind_register_req_msg_v01,
fw_init_done_enable),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x19,
.offset = offsetof(struct wlfw_ind_register_req_msg_v01,
rejuvenate_enable_valid),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0x19,
.offset = offsetof(struct wlfw_ind_register_req_msg_v01,
rejuvenate_enable),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x1A,
.offset = offsetof(struct wlfw_ind_register_req_msg_v01,
xo_cal_enable_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x1A,
.offset = offsetof(struct wlfw_ind_register_req_msg_v01,
xo_cal_enable),
},
{}
};
const struct qmi_elem_info wlfw_ind_register_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct qmi_response_type_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x02,
.offset = offsetof(struct wlfw_ind_register_resp_msg_v01,
resp),
.ei_array = qmi_response_type_v01_ei,
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct wlfw_ind_register_resp_msg_v01,
fw_status_valid),
},
{
.data_type = QMI_UNSIGNED_8_BYTE,
.elem_len = 1,
.elem_size = sizeof(u64),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct wlfw_ind_register_resp_msg_v01,
fw_status),
},
{}
};
const struct qmi_elem_info wlfw_fw_ready_ind_msg_v01_ei[] = {
{}
};
const struct qmi_elem_info wlfw_msa_ready_ind_msg_v01_ei[] = {
{}
};
const struct qmi_elem_info wlfw_pin_connect_result_ind_msg_v01_ei[] = {
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct wlfw_pin_connect_result_ind_msg_v01,
pwr_pin_result_valid),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct wlfw_pin_connect_result_ind_msg_v01,
pwr_pin_result),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x11,
.offset = offsetof(struct wlfw_pin_connect_result_ind_msg_v01,
phy_io_pin_result_valid),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0x11,
.offset = offsetof(struct wlfw_pin_connect_result_ind_msg_v01,
phy_io_pin_result),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x12,
.offset = offsetof(struct wlfw_pin_connect_result_ind_msg_v01,
rf_pin_result_valid),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0x12,
.offset = offsetof(struct wlfw_pin_connect_result_ind_msg_v01,
rf_pin_result),
},
{}
};
const struct qmi_elem_info wlfw_wlan_mode_req_msg_v01_ei[] = {
{
.data_type = QMI_SIGNED_4_BYTE_ENUM,
.elem_len = 1,
.elem_size = sizeof(enum wlfw_driver_mode_enum_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x01,
.offset = offsetof(struct wlfw_wlan_mode_req_msg_v01,
mode),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct wlfw_wlan_mode_req_msg_v01,
hw_debug_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct wlfw_wlan_mode_req_msg_v01,
hw_debug),
},
{}
};
const struct qmi_elem_info wlfw_wlan_mode_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct qmi_response_type_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x02,
.offset = offsetof(struct wlfw_wlan_mode_resp_msg_v01,
resp),
.ei_array = qmi_response_type_v01_ei,
},
{}
};
const struct qmi_elem_info wlfw_wlan_cfg_req_msg_v01_ei[] = {
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
host_version_valid),
},
{
.data_type = QMI_STRING,
.elem_len = QMI_WLFW_MAX_STR_LEN_V01 + 1,
.elem_size = sizeof(char),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
host_version),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x11,
.offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
tgt_cfg_valid),
},
{
.data_type = QMI_DATA_LEN,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x11,
.offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
tgt_cfg_len),
},
{
.data_type = QMI_STRUCT,
.elem_len = QMI_WLFW_MAX_NUM_CE_V01,
.elem_size = sizeof(struct wlfw_ce_tgt_pipe_cfg_s_v01),
.array_type = VAR_LEN_ARRAY,
.tlv_type = 0x11,
.offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
tgt_cfg),
.ei_array = wlfw_ce_tgt_pipe_cfg_s_v01_ei,
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x12,
.offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
svc_cfg_valid),
},
{
.data_type = QMI_DATA_LEN,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x12,
.offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
svc_cfg_len),
},
{
.data_type = QMI_STRUCT,
.elem_len = QMI_WLFW_MAX_NUM_SVC_V01,
.elem_size = sizeof(struct wlfw_ce_svc_pipe_cfg_s_v01),
.array_type = VAR_LEN_ARRAY,
.tlv_type = 0x12,
.offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
svc_cfg),
.ei_array = wlfw_ce_svc_pipe_cfg_s_v01_ei,
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x13,
.offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
shadow_reg_valid),
},
{
.data_type = QMI_DATA_LEN,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x13,
.offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
shadow_reg_len),
},
{
.data_type = QMI_STRUCT,
.elem_len = QMI_WLFW_MAX_NUM_SHADOW_REG_V01,
.elem_size = sizeof(struct wlfw_shadow_reg_cfg_s_v01),
.array_type = VAR_LEN_ARRAY,
.tlv_type = 0x13,
.offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
shadow_reg),
.ei_array = wlfw_shadow_reg_cfg_s_v01_ei,
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x14,
.offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
shadow_reg_v2_valid),
},
{
.data_type = QMI_DATA_LEN,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x14,
.offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
shadow_reg_v2_len),
},
{
.data_type = QMI_STRUCT,
.elem_len = QMI_WLFW_MAX_SHADOW_REG_V2,
.elem_size = sizeof(struct wlfw_shadow_reg_v2_cfg_s_v01),
.array_type = VAR_LEN_ARRAY,
.tlv_type = 0x14,
.offset = offsetof(struct wlfw_wlan_cfg_req_msg_v01,
shadow_reg_v2),
.ei_array = wlfw_shadow_reg_v2_cfg_s_v01_ei,
},
{}
};
const struct qmi_elem_info wlfw_wlan_cfg_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct qmi_response_type_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x02,
.offset = offsetof(struct wlfw_wlan_cfg_resp_msg_v01,
resp),
.ei_array = qmi_response_type_v01_ei,
},
{}
};
const struct qmi_elem_info wlfw_cap_req_msg_v01_ei[] = {
{}
};
const struct qmi_elem_info wlfw_cap_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct qmi_response_type_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x02,
.offset = offsetof(struct wlfw_cap_resp_msg_v01,
resp),
.ei_array = qmi_response_type_v01_ei,
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct wlfw_cap_resp_msg_v01,
chip_info_valid),
},
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct wlfw_rf_chip_info_s_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct wlfw_cap_resp_msg_v01,
chip_info),
.ei_array = wlfw_rf_chip_info_s_v01_ei,
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x11,
.offset = offsetof(struct wlfw_cap_resp_msg_v01,
board_info_valid),
},
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct wlfw_rf_board_info_s_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x11,
.offset = offsetof(struct wlfw_cap_resp_msg_v01,
board_info),
.ei_array = wlfw_rf_board_info_s_v01_ei,
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x12,
.offset = offsetof(struct wlfw_cap_resp_msg_v01,
soc_info_valid),
},
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct wlfw_soc_info_s_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x12,
.offset = offsetof(struct wlfw_cap_resp_msg_v01,
soc_info),
.ei_array = wlfw_soc_info_s_v01_ei,
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x13,
.offset = offsetof(struct wlfw_cap_resp_msg_v01,
fw_version_info_valid),
},
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct wlfw_fw_version_info_s_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x13,
.offset = offsetof(struct wlfw_cap_resp_msg_v01,
fw_version_info),
.ei_array = wlfw_fw_version_info_s_v01_ei,
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x14,
.offset = offsetof(struct wlfw_cap_resp_msg_v01,
fw_build_id_valid),
},
{
.data_type = QMI_STRING,
.elem_len = QMI_WLFW_MAX_BUILD_ID_LEN_V01 + 1,
.elem_size = sizeof(char),
.array_type = NO_ARRAY,
.tlv_type = 0x14,
.offset = offsetof(struct wlfw_cap_resp_msg_v01,
fw_build_id),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x15,
.offset = offsetof(struct wlfw_cap_resp_msg_v01,
num_macs_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x15,
.offset = offsetof(struct wlfw_cap_resp_msg_v01,
num_macs),
},
{}
};
const struct qmi_elem_info wlfw_bdf_download_req_msg_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x01,
.offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
valid),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
file_id_valid),
},
{
.data_type = QMI_SIGNED_4_BYTE_ENUM,
.elem_len = 1,
.elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
file_id),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x11,
.offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
total_size_valid),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0x11,
.offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
total_size),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x12,
.offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
seg_id_valid),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0x12,
.offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
seg_id),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x13,
.offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
data_valid),
},
{
.data_type = QMI_DATA_LEN,
.elem_len = 1,
.elem_size = sizeof(u16),
.array_type = NO_ARRAY,
.tlv_type = 0x13,
.offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
data_len),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = QMI_WLFW_MAX_DATA_SIZE_V01,
.elem_size = sizeof(u8),
.array_type = VAR_LEN_ARRAY,
.tlv_type = 0x13,
.offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
data),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x14,
.offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
end_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x14,
.offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
end),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x15,
.offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
bdf_type_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x15,
.offset = offsetof(struct wlfw_bdf_download_req_msg_v01,
bdf_type),
},
{}
};
const struct qmi_elem_info wlfw_bdf_download_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct qmi_response_type_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x02,
.offset = offsetof(struct wlfw_bdf_download_resp_msg_v01,
resp),
.ei_array = qmi_response_type_v01_ei,
},
{}
};
const struct qmi_elem_info wlfw_cal_report_req_msg_v01_ei[] = {
{
.data_type = QMI_DATA_LEN,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x01,
.offset = offsetof(struct wlfw_cal_report_req_msg_v01,
meta_data_len),
},
{
.data_type = QMI_SIGNED_4_BYTE_ENUM,
.elem_len = QMI_WLFW_MAX_NUM_CAL_V01,
.elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01),
.array_type = VAR_LEN_ARRAY,
.tlv_type = 0x01,
.offset = offsetof(struct wlfw_cal_report_req_msg_v01,
meta_data),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct wlfw_cal_report_req_msg_v01,
xo_cal_data_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct wlfw_cal_report_req_msg_v01,
xo_cal_data),
},
{}
};
const struct qmi_elem_info wlfw_cal_report_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct qmi_response_type_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x02,
.offset = offsetof(struct wlfw_cal_report_resp_msg_v01,
resp),
.ei_array = qmi_response_type_v01_ei,
},
{}
};
const struct qmi_elem_info wlfw_initiate_cal_download_ind_msg_v01_ei[] = {
{
.data_type = QMI_SIGNED_4_BYTE_ENUM,
.elem_len = 1,
.elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x01,
.offset = offsetof(struct wlfw_initiate_cal_download_ind_msg_v01,
cal_id),
},
{}
};
const struct qmi_elem_info wlfw_cal_download_req_msg_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x01,
.offset = offsetof(struct wlfw_cal_download_req_msg_v01,
valid),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct wlfw_cal_download_req_msg_v01,
file_id_valid),
},
{
.data_type = QMI_SIGNED_4_BYTE_ENUM,
.elem_len = 1,
.elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct wlfw_cal_download_req_msg_v01,
file_id),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x11,
.offset = offsetof(struct wlfw_cal_download_req_msg_v01,
total_size_valid),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0x11,
.offset = offsetof(struct wlfw_cal_download_req_msg_v01,
total_size),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x12,
.offset = offsetof(struct wlfw_cal_download_req_msg_v01,
seg_id_valid),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0x12,
.offset = offsetof(struct wlfw_cal_download_req_msg_v01,
seg_id),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x13,
.offset = offsetof(struct wlfw_cal_download_req_msg_v01,
data_valid),
},
{
.data_type = QMI_DATA_LEN,
.elem_len = 1,
.elem_size = sizeof(u16),
.array_type = NO_ARRAY,
.tlv_type = 0x13,
.offset = offsetof(struct wlfw_cal_download_req_msg_v01,
data_len),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = QMI_WLFW_MAX_DATA_SIZE_V01,
.elem_size = sizeof(u8),
.array_type = VAR_LEN_ARRAY,
.tlv_type = 0x13,
.offset = offsetof(struct wlfw_cal_download_req_msg_v01,
data),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x14,
.offset = offsetof(struct wlfw_cal_download_req_msg_v01,
end_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x14,
.offset = offsetof(struct wlfw_cal_download_req_msg_v01,
end),
},
{}
};
const struct qmi_elem_info wlfw_cal_download_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct qmi_response_type_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x02,
.offset = offsetof(struct wlfw_cal_download_resp_msg_v01,
resp),
.ei_array = qmi_response_type_v01_ei,
},
{}
};
const struct qmi_elem_info wlfw_initiate_cal_update_ind_msg_v01_ei[] = {
{
.data_type = QMI_SIGNED_4_BYTE_ENUM,
.elem_len = 1,
.elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x01,
.offset = offsetof(struct wlfw_initiate_cal_update_ind_msg_v01,
cal_id),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0x02,
.offset = offsetof(struct wlfw_initiate_cal_update_ind_msg_v01,
total_size),
},
{}
};
const struct qmi_elem_info wlfw_cal_update_req_msg_v01_ei[] = {
{
.data_type = QMI_SIGNED_4_BYTE_ENUM,
.elem_len = 1,
.elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x01,
.offset = offsetof(struct wlfw_cal_update_req_msg_v01,
cal_id),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0x02,
.offset = offsetof(struct wlfw_cal_update_req_msg_v01,
seg_id),
},
{}
};
const struct qmi_elem_info wlfw_cal_update_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct qmi_response_type_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x02,
.offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
resp),
.ei_array = qmi_response_type_v01_ei,
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
file_id_valid),
},
{
.data_type = QMI_SIGNED_4_BYTE_ENUM,
.elem_len = 1,
.elem_size = sizeof(enum wlfw_cal_temp_id_enum_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
file_id),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x11,
.offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
total_size_valid),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0x11,
.offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
total_size),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x12,
.offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
seg_id_valid),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0x12,
.offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
seg_id),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x13,
.offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
data_valid),
},
{
.data_type = QMI_DATA_LEN,
.elem_len = 1,
.elem_size = sizeof(u16),
.array_type = NO_ARRAY,
.tlv_type = 0x13,
.offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
data_len),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = QMI_WLFW_MAX_DATA_SIZE_V01,
.elem_size = sizeof(u8),
.array_type = VAR_LEN_ARRAY,
.tlv_type = 0x13,
.offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
data),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x14,
.offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
end_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x14,
.offset = offsetof(struct wlfw_cal_update_resp_msg_v01,
end),
},
{}
};
const struct qmi_elem_info wlfw_msa_info_req_msg_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_8_BYTE,
.elem_len = 1,
.elem_size = sizeof(u64),
.array_type = NO_ARRAY,
.tlv_type = 0x01,
.offset = offsetof(struct wlfw_msa_info_req_msg_v01,
msa_addr),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0x02,
.offset = offsetof(struct wlfw_msa_info_req_msg_v01,
size),
},
{}
};
const struct qmi_elem_info wlfw_msa_info_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct qmi_response_type_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x02,
.offset = offsetof(struct wlfw_msa_info_resp_msg_v01,
resp),
.ei_array = qmi_response_type_v01_ei,
},
{
.data_type = QMI_DATA_LEN,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x03,
.offset = offsetof(struct wlfw_msa_info_resp_msg_v01,
mem_region_info_len),
},
{
.data_type = QMI_STRUCT,
.elem_len = QMI_WLFW_MAX_MEM_REG_V01,
.elem_size = sizeof(struct wlfw_memory_region_info_s_v01),
.array_type = VAR_LEN_ARRAY,
.tlv_type = 0x03,
.offset = offsetof(struct wlfw_msa_info_resp_msg_v01,
mem_region_info),
.ei_array = wlfw_memory_region_info_s_v01_ei,
},
{}
};
const struct qmi_elem_info wlfw_msa_ready_req_msg_v01_ei[] = {
{}
};
const struct qmi_elem_info wlfw_msa_ready_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct qmi_response_type_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x02,
.offset = offsetof(struct wlfw_msa_ready_resp_msg_v01,
resp),
.ei_array = qmi_response_type_v01_ei,
},
{}
};
const struct qmi_elem_info wlfw_ini_req_msg_v01_ei[] = {
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct wlfw_ini_req_msg_v01,
enablefwlog_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct wlfw_ini_req_msg_v01,
enablefwlog),
},
{}
};
const struct qmi_elem_info wlfw_ini_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct qmi_response_type_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x02,
.offset = offsetof(struct wlfw_ini_resp_msg_v01,
resp),
.ei_array = qmi_response_type_v01_ei,
},
{}
};
const struct qmi_elem_info wlfw_athdiag_read_req_msg_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0x01,
.offset = offsetof(struct wlfw_athdiag_read_req_msg_v01,
offset),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0x02,
.offset = offsetof(struct wlfw_athdiag_read_req_msg_v01,
mem_type),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0x03,
.offset = offsetof(struct wlfw_athdiag_read_req_msg_v01,
data_len),
},
{}
};
const struct qmi_elem_info wlfw_athdiag_read_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct qmi_response_type_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x02,
.offset = offsetof(struct wlfw_athdiag_read_resp_msg_v01,
resp),
.ei_array = qmi_response_type_v01_ei,
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct wlfw_athdiag_read_resp_msg_v01,
data_valid),
},
{
.data_type = QMI_DATA_LEN,
.elem_len = 1,
.elem_size = sizeof(u16),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct wlfw_athdiag_read_resp_msg_v01,
data_len),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01,
.elem_size = sizeof(u8),
.array_type = VAR_LEN_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct wlfw_athdiag_read_resp_msg_v01,
data),
},
{}
};
const struct qmi_elem_info wlfw_athdiag_write_req_msg_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0x01,
.offset = offsetof(struct wlfw_athdiag_write_req_msg_v01,
offset),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0x02,
.offset = offsetof(struct wlfw_athdiag_write_req_msg_v01,
mem_type),
},
{
.data_type = QMI_DATA_LEN,
.elem_len = 1,
.elem_size = sizeof(u16),
.array_type = NO_ARRAY,
.tlv_type = 0x03,
.offset = offsetof(struct wlfw_athdiag_write_req_msg_v01,
data_len),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = QMI_WLFW_MAX_ATHDIAG_DATA_SIZE_V01,
.elem_size = sizeof(u8),
.array_type = VAR_LEN_ARRAY,
.tlv_type = 0x03,
.offset = offsetof(struct wlfw_athdiag_write_req_msg_v01,
data),
},
{}
};
const struct qmi_elem_info wlfw_athdiag_write_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct qmi_response_type_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x02,
.offset = offsetof(struct wlfw_athdiag_write_resp_msg_v01,
resp),
.ei_array = qmi_response_type_v01_ei,
},
{}
};
const struct qmi_elem_info wlfw_vbatt_req_msg_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_8_BYTE,
.elem_len = 1,
.elem_size = sizeof(u64),
.array_type = NO_ARRAY,
.tlv_type = 0x01,
.offset = offsetof(struct wlfw_vbatt_req_msg_v01,
voltage_uv),
},
{}
};
const struct qmi_elem_info wlfw_vbatt_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct qmi_response_type_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x02,
.offset = offsetof(struct wlfw_vbatt_resp_msg_v01,
resp),
.ei_array = qmi_response_type_v01_ei,
},
{}
};
const struct qmi_elem_info wlfw_mac_addr_req_msg_v01_ei[] = {
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct wlfw_mac_addr_req_msg_v01,
mac_addr_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = QMI_WLFW_MAC_ADDR_SIZE_V01,
.elem_size = sizeof(u8),
.array_type = STATIC_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct wlfw_mac_addr_req_msg_v01,
mac_addr),
},
{}
};
const struct qmi_elem_info wlfw_mac_addr_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct qmi_response_type_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x02,
.offset = offsetof(struct wlfw_mac_addr_resp_msg_v01,
resp),
.ei_array = qmi_response_type_v01_ei,
},
{}
};
const struct qmi_elem_info wlfw_host_cap_req_msg_v01_ei[] = {
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct wlfw_host_cap_req_msg_v01,
daemon_support_valid),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct wlfw_host_cap_req_msg_v01,
daemon_support),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x11,
.offset = offsetof(struct wlfw_host_cap_req_msg_v01,
wake_msi_valid),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0x11,
.offset = offsetof(struct wlfw_host_cap_req_msg_v01,
wake_msi),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x12,
.offset = offsetof(struct wlfw_host_cap_req_msg_v01,
gpios_valid),
},
{
.data_type = QMI_DATA_LEN,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0x12,
.offset = offsetof(struct wlfw_host_cap_req_msg_v01,
gpios_len),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = QMI_WLFW_MAX_NUM_GPIO_V01,
.elem_size = sizeof(u32),
.array_type = VAR_LEN_ARRAY,
.tlv_type = 0x12,
.offset = offsetof(struct wlfw_host_cap_req_msg_v01,
gpios),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x13,
.offset = offsetof(struct wlfw_host_cap_req_msg_v01,
nm_modem_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x13,
.offset = offsetof(struct wlfw_host_cap_req_msg_v01,
nm_modem),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x14,
.offset = offsetof(struct wlfw_host_cap_req_msg_v01,
bdf_support_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x14,
.offset = offsetof(struct wlfw_host_cap_req_msg_v01,
bdf_support),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x15,
.offset = offsetof(struct wlfw_host_cap_req_msg_v01,
bdf_cache_support_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x15,
.offset = offsetof(struct wlfw_host_cap_req_msg_v01,
bdf_cache_support),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x16,
.offset = offsetof(struct wlfw_host_cap_req_msg_v01,
m3_support_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x16,
.offset = offsetof(struct wlfw_host_cap_req_msg_v01,
m3_support),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x17,
.offset = offsetof(struct wlfw_host_cap_req_msg_v01,
m3_cache_support_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x17,
.offset = offsetof(struct wlfw_host_cap_req_msg_v01,
m3_cache_support),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x18,
.offset = offsetof(struct wlfw_host_cap_req_msg_v01,
cal_filesys_support_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x18,
.offset = offsetof(struct wlfw_host_cap_req_msg_v01,
cal_filesys_support),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x19,
.offset = offsetof(struct wlfw_host_cap_req_msg_v01,
cal_cache_support_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x19,
.offset = offsetof(struct wlfw_host_cap_req_msg_v01,
cal_cache_support),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x1A,
.offset = offsetof(struct wlfw_host_cap_req_msg_v01,
cal_done_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x1A,
.offset = offsetof(struct wlfw_host_cap_req_msg_v01,
cal_done),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x1B,
.offset = offsetof(struct wlfw_host_cap_req_msg_v01,
mem_bucket_valid),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0x1B,
.offset = offsetof(struct wlfw_host_cap_req_msg_v01,
mem_bucket),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x1C,
.offset = offsetof(struct wlfw_host_cap_req_msg_v01,
mem_cfg_mode_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x1C,
.offset = offsetof(struct wlfw_host_cap_req_msg_v01,
mem_cfg_mode),
},
{}
};
const struct qmi_elem_info wlfw_host_cap_8bit_req_msg_v01_ei[] = {
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct wlfw_host_cap_req_msg_v01,
daemon_support_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct wlfw_host_cap_req_msg_v01,
daemon_support),
},
{}
};
const struct qmi_elem_info wlfw_host_cap_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct qmi_response_type_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x02,
.offset = offsetof(struct wlfw_host_cap_resp_msg_v01,
resp),
.ei_array = qmi_response_type_v01_ei,
},
{}
};
const struct qmi_elem_info wlfw_request_mem_ind_msg_v01_ei[] = {
{
.data_type = QMI_DATA_LEN,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x01,
.offset = offsetof(struct wlfw_request_mem_ind_msg_v01,
mem_seg_len),
},
{
.data_type = QMI_STRUCT,
.elem_len = QMI_WLFW_MAX_NUM_MEM_SEG_V01,
.elem_size = sizeof(struct wlfw_mem_seg_s_v01),
.array_type = VAR_LEN_ARRAY,
.tlv_type = 0x01,
.offset = offsetof(struct wlfw_request_mem_ind_msg_v01,
mem_seg),
.ei_array = wlfw_mem_seg_s_v01_ei,
},
{}
};
const struct qmi_elem_info wlfw_respond_mem_req_msg_v01_ei[] = {
{
.data_type = QMI_DATA_LEN,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x01,
.offset = offsetof(struct wlfw_respond_mem_req_msg_v01,
mem_seg_len),
},
{
.data_type = QMI_STRUCT,
.elem_len = QMI_WLFW_MAX_NUM_MEM_SEG_V01,
.elem_size = sizeof(struct wlfw_mem_seg_resp_s_v01),
.array_type = VAR_LEN_ARRAY,
.tlv_type = 0x01,
.offset = offsetof(struct wlfw_respond_mem_req_msg_v01,
mem_seg),
.ei_array = wlfw_mem_seg_resp_s_v01_ei,
},
{}
};
const struct qmi_elem_info wlfw_respond_mem_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct qmi_response_type_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x02,
.offset = offsetof(struct wlfw_respond_mem_resp_msg_v01,
resp),
.ei_array = qmi_response_type_v01_ei,
},
{}
};
const struct qmi_elem_info wlfw_mem_ready_ind_msg_v01_ei[] = {
{}
};
const struct qmi_elem_info wlfw_fw_init_done_ind_msg_v01_ei[] = {
{}
};
const struct qmi_elem_info wlfw_rejuvenate_ind_msg_v01_ei[] = {
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
cause_for_rejuvenation_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
cause_for_rejuvenation),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x11,
.offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
requesting_sub_system_valid),
},
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x11,
.offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
requesting_sub_system),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x12,
.offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
line_number_valid),
},
{
.data_type = QMI_UNSIGNED_2_BYTE,
.elem_len = 1,
.elem_size = sizeof(u16),
.array_type = NO_ARRAY,
.tlv_type = 0x12,
.offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
line_number),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x13,
.offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
function_name_valid),
},
{
.data_type = QMI_STRING,
.elem_len = QMI_WLFW_FUNCTION_NAME_LEN_V01 + 1,
.elem_size = sizeof(char),
.array_type = NO_ARRAY,
.tlv_type = 0x13,
.offset = offsetof(struct wlfw_rejuvenate_ind_msg_v01,
function_name),
},
{}
};
const struct qmi_elem_info wlfw_rejuvenate_ack_req_msg_v01_ei[] = {
{}
};
const struct qmi_elem_info wlfw_rejuvenate_ack_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct qmi_response_type_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x02,
.offset = offsetof(struct wlfw_rejuvenate_ack_resp_msg_v01,
resp),
.ei_array = qmi_response_type_v01_ei,
},
{}
};
const struct qmi_elem_info wlfw_dynamic_feature_mask_req_msg_v01_ei[] = {
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct wlfw_dynamic_feature_mask_req_msg_v01,
mask_valid),
},
{
.data_type = QMI_UNSIGNED_8_BYTE,
.elem_len = 1,
.elem_size = sizeof(u64),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct wlfw_dynamic_feature_mask_req_msg_v01,
mask),
},
{}
};
const struct qmi_elem_info wlfw_dynamic_feature_mask_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct qmi_response_type_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x02,
.offset = offsetof(struct wlfw_dynamic_feature_mask_resp_msg_v01,
resp),
.ei_array = qmi_response_type_v01_ei,
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct wlfw_dynamic_feature_mask_resp_msg_v01,
prev_mask_valid),
},
{
.data_type = QMI_UNSIGNED_8_BYTE,
.elem_len = 1,
.elem_size = sizeof(u64),
.array_type = NO_ARRAY,
.tlv_type = 0x10,
.offset = offsetof(struct wlfw_dynamic_feature_mask_resp_msg_v01,
prev_mask),
},
{
.data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x11,
.offset = offsetof(struct wlfw_dynamic_feature_mask_resp_msg_v01,
curr_mask_valid),
},
{
.data_type = QMI_UNSIGNED_8_BYTE,
.elem_len = 1,
.elem_size = sizeof(u64),
.array_type = NO_ARRAY,
.tlv_type = 0x11,
.offset = offsetof(struct wlfw_dynamic_feature_mask_resp_msg_v01,
curr_mask),
},
{}
};
const struct qmi_elem_info wlfw_m3_info_req_msg_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_8_BYTE,
.elem_len = 1,
.elem_size = sizeof(u64),
.array_type = NO_ARRAY,
.tlv_type = 0x01,
.offset = offsetof(struct wlfw_m3_info_req_msg_v01,
addr),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(u32),
.array_type = NO_ARRAY,
.tlv_type = 0x02,
.offset = offsetof(struct wlfw_m3_info_req_msg_v01,
size),
},
{}
};
const struct qmi_elem_info wlfw_m3_info_resp_msg_v01_ei[] = {
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct qmi_response_type_v01),
.array_type = NO_ARRAY,
.tlv_type = 0x02,
.offset = offsetof(struct wlfw_m3_info_resp_msg_v01,
resp),
.ei_array = qmi_response_type_v01_ei,
},
{}
};
const struct qmi_elem_info wlfw_xo_cal_ind_msg_v01_ei[] = {
{
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(u8),
.array_type = NO_ARRAY,
.tlv_type = 0x01,
.offset = offsetof(struct wlfw_xo_cal_ind_msg_v01,
xo_cal_data),
},
{}
};
|
linux-master
|
drivers/net/wireless/ath/ath10k/qmi_wlfw_v01.c
|
/*
* Copyright (c) 2013 Eugene Krasnikov <[email protected]>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include "wcn36xx.h"
#define WCN36XX_BMPS_FAIL_THREHOLD 3
int wcn36xx_pmc_enter_bmps_state(struct wcn36xx *wcn,
struct ieee80211_vif *vif)
{
int ret = 0;
struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
/* TODO: Make sure the TX chain clean */
ret = wcn36xx_smd_enter_bmps(wcn, vif);
if (!ret) {
wcn36xx_dbg(WCN36XX_DBG_PMC, "Entered BMPS\n");
vif_priv->pw_state = WCN36XX_BMPS;
vif_priv->bmps_fail_ct = 0;
vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER;
} else {
/*
* One of the reasons why HW will not enter BMPS is because
* driver is trying to enter bmps before first beacon was
* received just after auth complete
*/
wcn36xx_err("Can not enter BMPS!\n");
if (vif_priv->bmps_fail_ct++ == WCN36XX_BMPS_FAIL_THREHOLD) {
ieee80211_connection_loss(vif);
vif_priv->bmps_fail_ct = 0;
}
}
return ret;
}
int wcn36xx_pmc_exit_bmps_state(struct wcn36xx *wcn,
struct ieee80211_vif *vif)
{
struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
if (WCN36XX_BMPS != vif_priv->pw_state) {
/* Unbalanced call or last BMPS enter failed */
wcn36xx_dbg(WCN36XX_DBG_PMC,
"Not in BMPS mode, no need to exit\n");
return -EALREADY;
}
wcn36xx_smd_exit_bmps(wcn, vif);
vif_priv->pw_state = WCN36XX_FULL_POWER;
vif->driver_flags &= ~IEEE80211_VIF_BEACON_FILTER;
return 0;
}
int wcn36xx_enable_keep_alive_null_packet(struct wcn36xx *wcn,
struct ieee80211_vif *vif)
{
wcn36xx_dbg(WCN36XX_DBG_PMC, "%s\n", __func__);
return wcn36xx_smd_keep_alive_req(wcn, vif,
WCN36XX_HAL_KEEP_ALIVE_NULL_PKT);
}
|
linux-master
|
drivers/net/wireless/ath/wcn36xx/pmc.c
|
/*
* Copyright (c) 2013 Eugene Krasnikov <[email protected]>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
/* DXE - DMA transfer engine
* we have 2 channels(High prio and Low prio) for TX and 2 channels for RX.
* through low channels data packets are transfered
* through high channels managment packets are transfered
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/interrupt.h>
#include <linux/soc/qcom/smem_state.h>
#include "wcn36xx.h"
#include "txrx.h"
static void wcn36xx_ccu_write_register(struct wcn36xx *wcn, int addr, int data)
{
wcn36xx_dbg(WCN36XX_DBG_DXE,
"wcn36xx_ccu_write_register: addr=%x, data=%x\n",
addr, data);
writel(data, wcn->ccu_base + addr);
}
static void wcn36xx_dxe_write_register(struct wcn36xx *wcn, int addr, int data)
{
wcn36xx_dbg(WCN36XX_DBG_DXE,
"wcn36xx_dxe_write_register: addr=%x, data=%x\n",
addr, data);
writel(data, wcn->dxe_base + addr);
}
static void wcn36xx_dxe_read_register(struct wcn36xx *wcn, int addr, int *data)
{
*data = readl(wcn->dxe_base + addr);
wcn36xx_dbg(WCN36XX_DBG_DXE,
"wcn36xx_dxe_read_register: addr=%x, data=%x\n",
addr, *data);
}
static void wcn36xx_dxe_free_ctl_block(struct wcn36xx_dxe_ch *ch)
{
struct wcn36xx_dxe_ctl *ctl = ch->head_blk_ctl, *next;
int i;
for (i = 0; i < ch->desc_num && ctl; i++) {
next = ctl->next;
kfree(ctl);
ctl = next;
}
}
static int wcn36xx_dxe_allocate_ctl_block(struct wcn36xx_dxe_ch *ch)
{
struct wcn36xx_dxe_ctl *prev_ctl = NULL;
struct wcn36xx_dxe_ctl *cur_ctl = NULL;
int i;
spin_lock_init(&ch->lock);
for (i = 0; i < ch->desc_num; i++) {
cur_ctl = kzalloc(sizeof(*cur_ctl), GFP_KERNEL);
if (!cur_ctl)
goto out_fail;
cur_ctl->ctl_blk_order = i;
if (i == 0) {
ch->head_blk_ctl = cur_ctl;
ch->tail_blk_ctl = cur_ctl;
} else if (ch->desc_num - 1 == i) {
prev_ctl->next = cur_ctl;
cur_ctl->next = ch->head_blk_ctl;
} else {
prev_ctl->next = cur_ctl;
}
prev_ctl = cur_ctl;
}
return 0;
out_fail:
wcn36xx_dxe_free_ctl_block(ch);
return -ENOMEM;
}
int wcn36xx_dxe_alloc_ctl_blks(struct wcn36xx *wcn)
{
int ret;
wcn->dxe_tx_l_ch.ch_type = WCN36XX_DXE_CH_TX_L;
wcn->dxe_tx_h_ch.ch_type = WCN36XX_DXE_CH_TX_H;
wcn->dxe_rx_l_ch.ch_type = WCN36XX_DXE_CH_RX_L;
wcn->dxe_rx_h_ch.ch_type = WCN36XX_DXE_CH_RX_H;
wcn->dxe_tx_l_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_TX_L;
wcn->dxe_tx_h_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_TX_H;
wcn->dxe_rx_l_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_RX_L;
wcn->dxe_rx_h_ch.desc_num = WCN36XX_DXE_CH_DESC_NUMB_RX_H;
wcn->dxe_tx_l_ch.dxe_wq = WCN36XX_DXE_WQ_TX_L(wcn);
wcn->dxe_tx_h_ch.dxe_wq = WCN36XX_DXE_WQ_TX_H(wcn);
wcn->dxe_tx_l_ch.ctrl_bd = WCN36XX_DXE_CTRL_TX_L_BD;
wcn->dxe_tx_h_ch.ctrl_bd = WCN36XX_DXE_CTRL_TX_H_BD;
wcn->dxe_tx_l_ch.ctrl_skb = WCN36XX_DXE_CTRL_TX_L_SKB;
wcn->dxe_tx_h_ch.ctrl_skb = WCN36XX_DXE_CTRL_TX_H_SKB;
wcn->dxe_tx_l_ch.reg_ctrl = WCN36XX_DXE_REG_CTL_TX_L;
wcn->dxe_tx_h_ch.reg_ctrl = WCN36XX_DXE_REG_CTL_TX_H;
wcn->dxe_tx_l_ch.def_ctrl = WCN36XX_DXE_CH_DEFAULT_CTL_TX_L;
wcn->dxe_tx_h_ch.def_ctrl = WCN36XX_DXE_CH_DEFAULT_CTL_TX_H;
/* DXE control block allocation */
ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_tx_l_ch);
if (ret)
goto out_err;
ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_tx_h_ch);
if (ret)
goto out_err;
ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_rx_l_ch);
if (ret)
goto out_err;
ret = wcn36xx_dxe_allocate_ctl_block(&wcn->dxe_rx_h_ch);
if (ret)
goto out_err;
/* Initialize SMSM state Clear TX Enable RING EMPTY STATE */
ret = qcom_smem_state_update_bits(wcn->tx_enable_state,
WCN36XX_SMSM_WLAN_TX_ENABLE |
WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY,
WCN36XX_SMSM_WLAN_TX_RINGS_EMPTY);
if (ret)
goto out_err;
return 0;
out_err:
wcn36xx_err("Failed to allocate DXE control blocks\n");
wcn36xx_dxe_free_ctl_blks(wcn);
return -ENOMEM;
}
void wcn36xx_dxe_free_ctl_blks(struct wcn36xx *wcn)
{
wcn36xx_dxe_free_ctl_block(&wcn->dxe_tx_l_ch);
wcn36xx_dxe_free_ctl_block(&wcn->dxe_tx_h_ch);
wcn36xx_dxe_free_ctl_block(&wcn->dxe_rx_l_ch);
wcn36xx_dxe_free_ctl_block(&wcn->dxe_rx_h_ch);
}
static int wcn36xx_dxe_init_descs(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *wcn_ch)
{
struct device *dev = wcn->dev;
struct wcn36xx_dxe_desc *cur_dxe = NULL;
struct wcn36xx_dxe_desc *prev_dxe = NULL;
struct wcn36xx_dxe_ctl *cur_ctl = NULL;
size_t size;
int i;
size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc);
wcn_ch->cpu_addr = dma_alloc_coherent(dev, size, &wcn_ch->dma_addr,
GFP_KERNEL);
if (!wcn_ch->cpu_addr)
return -ENOMEM;
cur_dxe = (struct wcn36xx_dxe_desc *)wcn_ch->cpu_addr;
cur_ctl = wcn_ch->head_blk_ctl;
for (i = 0; i < wcn_ch->desc_num; i++) {
cur_ctl->desc = cur_dxe;
cur_ctl->desc_phy_addr = wcn_ch->dma_addr +
i * sizeof(struct wcn36xx_dxe_desc);
switch (wcn_ch->ch_type) {
case WCN36XX_DXE_CH_TX_L:
cur_dxe->ctrl = WCN36XX_DXE_CTRL_TX_L;
cur_dxe->dst_addr_l = WCN36XX_DXE_WQ_TX_L(wcn);
break;
case WCN36XX_DXE_CH_TX_H:
cur_dxe->ctrl = WCN36XX_DXE_CTRL_TX_H;
cur_dxe->dst_addr_l = WCN36XX_DXE_WQ_TX_H(wcn);
break;
case WCN36XX_DXE_CH_RX_L:
cur_dxe->ctrl = WCN36XX_DXE_CTRL_RX_L;
cur_dxe->src_addr_l = WCN36XX_DXE_WQ_RX_L;
break;
case WCN36XX_DXE_CH_RX_H:
cur_dxe->ctrl = WCN36XX_DXE_CTRL_RX_H;
cur_dxe->src_addr_l = WCN36XX_DXE_WQ_RX_H;
break;
}
if (0 == i) {
cur_dxe->phy_next_l = 0;
} else if ((0 < i) && (i < wcn_ch->desc_num - 1)) {
prev_dxe->phy_next_l =
cur_ctl->desc_phy_addr;
} else if (i == (wcn_ch->desc_num - 1)) {
prev_dxe->phy_next_l =
cur_ctl->desc_phy_addr;
cur_dxe->phy_next_l =
wcn_ch->head_blk_ctl->desc_phy_addr;
}
cur_ctl = cur_ctl->next;
prev_dxe = cur_dxe;
cur_dxe++;
}
return 0;
}
static void wcn36xx_dxe_deinit_descs(struct device *dev, struct wcn36xx_dxe_ch *wcn_ch)
{
size_t size;
size = wcn_ch->desc_num * sizeof(struct wcn36xx_dxe_desc);
dma_free_coherent(dev, size,wcn_ch->cpu_addr, wcn_ch->dma_addr);
}
static void wcn36xx_dxe_init_tx_bd(struct wcn36xx_dxe_ch *ch,
struct wcn36xx_dxe_mem_pool *pool)
{
int i, chunk_size = pool->chunk_size;
dma_addr_t bd_phy_addr = pool->phy_addr;
void *bd_cpu_addr = pool->virt_addr;
struct wcn36xx_dxe_ctl *cur = ch->head_blk_ctl;
for (i = 0; i < ch->desc_num; i++) {
/* Only every second dxe needs a bd pointer,
the other will point to the skb data */
if (!(i & 1)) {
cur->bd_phy_addr = bd_phy_addr;
cur->bd_cpu_addr = bd_cpu_addr;
bd_phy_addr += chunk_size;
bd_cpu_addr += chunk_size;
} else {
cur->bd_phy_addr = 0;
cur->bd_cpu_addr = NULL;
}
cur = cur->next;
}
}
static int wcn36xx_dxe_enable_ch_int(struct wcn36xx *wcn, u16 wcn_ch)
{
int reg_data = 0;
wcn36xx_dxe_read_register(wcn,
WCN36XX_DXE_INT_MASK_REG,
®_data);
reg_data |= wcn_ch;
wcn36xx_dxe_write_register(wcn,
WCN36XX_DXE_INT_MASK_REG,
(int)reg_data);
return 0;
}
static void wcn36xx_dxe_disable_ch_int(struct wcn36xx *wcn, u16 wcn_ch)
{
int reg_data = 0;
wcn36xx_dxe_read_register(wcn,
WCN36XX_DXE_INT_MASK_REG,
®_data);
reg_data &= ~wcn_ch;
wcn36xx_dxe_write_register(wcn,
WCN36XX_DXE_INT_MASK_REG,
(int)reg_data);
}
static int wcn36xx_dxe_fill_skb(struct device *dev,
struct wcn36xx_dxe_ctl *ctl,
gfp_t gfp)
{
struct wcn36xx_dxe_desc *dxe = ctl->desc;
struct sk_buff *skb;
skb = alloc_skb(WCN36XX_PKT_SIZE, gfp);
if (skb == NULL)
return -ENOMEM;
dxe->dst_addr_l = dma_map_single(dev,
skb_tail_pointer(skb),
WCN36XX_PKT_SIZE,
DMA_FROM_DEVICE);
if (dma_mapping_error(dev, dxe->dst_addr_l)) {
dev_err(dev, "unable to map skb\n");
kfree_skb(skb);
return -ENOMEM;
}
ctl->skb = skb;
return 0;
}
static int wcn36xx_dxe_ch_alloc_skb(struct wcn36xx *wcn,
struct wcn36xx_dxe_ch *wcn_ch)
{
int i;
struct wcn36xx_dxe_ctl *cur_ctl = NULL;
cur_ctl = wcn_ch->head_blk_ctl;
for (i = 0; i < wcn_ch->desc_num; i++) {
wcn36xx_dxe_fill_skb(wcn->dev, cur_ctl, GFP_KERNEL);
cur_ctl = cur_ctl->next;
}
return 0;
}
static void wcn36xx_dxe_ch_free_skbs(struct wcn36xx *wcn,
struct wcn36xx_dxe_ch *wcn_ch)
{
struct wcn36xx_dxe_ctl *cur = wcn_ch->head_blk_ctl;
int i;
for (i = 0; i < wcn_ch->desc_num; i++) {
kfree_skb(cur->skb);
cur = cur->next;
}
}
void wcn36xx_dxe_tx_ack_ind(struct wcn36xx *wcn, u32 status)
{
struct ieee80211_tx_info *info;
struct sk_buff *skb;
unsigned long flags;
spin_lock_irqsave(&wcn->dxe_lock, flags);
skb = wcn->tx_ack_skb;
wcn->tx_ack_skb = NULL;
del_timer(&wcn->tx_ack_timer);
spin_unlock_irqrestore(&wcn->dxe_lock, flags);
if (!skb) {
wcn36xx_warn("Spurious TX complete indication\n");
return;
}
info = IEEE80211_SKB_CB(skb);
if (status == 1)
info->flags |= IEEE80211_TX_STAT_ACK;
else
info->flags &= ~IEEE80211_TX_STAT_ACK;
wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ack status: %d\n", status);
ieee80211_tx_status_irqsafe(wcn->hw, skb);
ieee80211_wake_queues(wcn->hw);
}
static void wcn36xx_dxe_tx_timer(struct timer_list *t)
{
struct wcn36xx *wcn = from_timer(wcn, t, tx_ack_timer);
struct ieee80211_tx_info *info;
unsigned long flags;
struct sk_buff *skb;
/* TX Timeout */
wcn36xx_dbg(WCN36XX_DBG_DXE, "TX timeout\n");
spin_lock_irqsave(&wcn->dxe_lock, flags);
skb = wcn->tx_ack_skb;
wcn->tx_ack_skb = NULL;
spin_unlock_irqrestore(&wcn->dxe_lock, flags);
if (!skb)
return;
info = IEEE80211_SKB_CB(skb);
info->flags &= ~IEEE80211_TX_STAT_ACK;
info->flags &= ~IEEE80211_TX_STAT_NOACK_TRANSMITTED;
ieee80211_tx_status_irqsafe(wcn->hw, skb);
ieee80211_wake_queues(wcn->hw);
}
static void reap_tx_dxes(struct wcn36xx *wcn, struct wcn36xx_dxe_ch *ch)
{
struct wcn36xx_dxe_ctl *ctl;
struct ieee80211_tx_info *info;
unsigned long flags;
/*
* Make at least one loop of do-while because in case ring is
* completely full head and tail are pointing to the same element
* and while-do will not make any cycles.
*/
spin_lock_irqsave(&ch->lock, flags);
ctl = ch->tail_blk_ctl;
do {
if (READ_ONCE(ctl->desc->ctrl) & WCN36xx_DXE_CTRL_VLD)
break;
if (ctl->skb &&
READ_ONCE(ctl->desc->ctrl) & WCN36xx_DXE_CTRL_EOP) {
dma_unmap_single(wcn->dev, ctl->desc->src_addr_l,
ctl->skb->len, DMA_TO_DEVICE);
info = IEEE80211_SKB_CB(ctl->skb);
if (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) {
if (info->flags & IEEE80211_TX_CTL_NO_ACK) {
info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
ieee80211_tx_status_irqsafe(wcn->hw, ctl->skb);
} else {
/* Wait for the TX ack indication or timeout... */
spin_lock(&wcn->dxe_lock);
if (WARN_ON(wcn->tx_ack_skb))
ieee80211_free_txskb(wcn->hw, wcn->tx_ack_skb);
wcn->tx_ack_skb = ctl->skb; /* Tracking ref */
mod_timer(&wcn->tx_ack_timer, jiffies + HZ / 10);
spin_unlock(&wcn->dxe_lock);
}
/* do not free, ownership transferred to mac80211 status cb */
} else {
ieee80211_free_txskb(wcn->hw, ctl->skb);
}
if (wcn->queues_stopped) {
wcn->queues_stopped = false;
ieee80211_wake_queues(wcn->hw);
}
ctl->skb = NULL;
}
ctl = ctl->next;
} while (ctl != ch->head_blk_ctl);
ch->tail_blk_ctl = ctl;
spin_unlock_irqrestore(&ch->lock, flags);
}
static irqreturn_t wcn36xx_irq_tx_complete(int irq, void *dev)
{
struct wcn36xx *wcn = (struct wcn36xx *)dev;
int int_src, int_reason;
wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
if (int_src & WCN36XX_INT_MASK_CHAN_TX_H) {
wcn36xx_dxe_read_register(wcn,
WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_H,
&int_reason);
wcn36xx_dxe_write_register(wcn,
WCN36XX_DXE_0_INT_CLR,
WCN36XX_INT_MASK_CHAN_TX_H);
if (int_reason & WCN36XX_CH_STAT_INT_ERR_MASK ) {
wcn36xx_dxe_write_register(wcn,
WCN36XX_DXE_0_INT_ERR_CLR,
WCN36XX_INT_MASK_CHAN_TX_H);
wcn36xx_err("DXE IRQ reported error: 0x%x in high TX channel\n",
int_src);
}
if (int_reason & WCN36XX_CH_STAT_INT_DONE_MASK) {
wcn36xx_dxe_write_register(wcn,
WCN36XX_DXE_0_INT_DONE_CLR,
WCN36XX_INT_MASK_CHAN_TX_H);
}
if (int_reason & WCN36XX_CH_STAT_INT_ED_MASK) {
wcn36xx_dxe_write_register(wcn,
WCN36XX_DXE_0_INT_ED_CLR,
WCN36XX_INT_MASK_CHAN_TX_H);
}
wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready high, reason %08x\n",
int_reason);
if (int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK |
WCN36XX_CH_STAT_INT_ED_MASK)) {
reap_tx_dxes(wcn, &wcn->dxe_tx_h_ch);
}
}
if (int_src & WCN36XX_INT_MASK_CHAN_TX_L) {
wcn36xx_dxe_read_register(wcn,
WCN36XX_DXE_CH_STATUS_REG_ADDR_TX_L,
&int_reason);
wcn36xx_dxe_write_register(wcn,
WCN36XX_DXE_0_INT_CLR,
WCN36XX_INT_MASK_CHAN_TX_L);
if (int_reason & WCN36XX_CH_STAT_INT_ERR_MASK ) {
wcn36xx_dxe_write_register(wcn,
WCN36XX_DXE_0_INT_ERR_CLR,
WCN36XX_INT_MASK_CHAN_TX_L);
wcn36xx_err("DXE IRQ reported error: 0x%x in low TX channel\n",
int_src);
}
if (int_reason & WCN36XX_CH_STAT_INT_DONE_MASK) {
wcn36xx_dxe_write_register(wcn,
WCN36XX_DXE_0_INT_DONE_CLR,
WCN36XX_INT_MASK_CHAN_TX_L);
}
if (int_reason & WCN36XX_CH_STAT_INT_ED_MASK) {
wcn36xx_dxe_write_register(wcn,
WCN36XX_DXE_0_INT_ED_CLR,
WCN36XX_INT_MASK_CHAN_TX_L);
}
wcn36xx_dbg(WCN36XX_DBG_DXE, "dxe tx ready low, reason %08x\n",
int_reason);
if (int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK |
WCN36XX_CH_STAT_INT_ED_MASK)) {
reap_tx_dxes(wcn, &wcn->dxe_tx_l_ch);
}
}
return IRQ_HANDLED;
}
static irqreturn_t wcn36xx_irq_rx_ready(int irq, void *dev)
{
struct wcn36xx *wcn = (struct wcn36xx *)dev;
wcn36xx_dxe_rx_frame(wcn);
return IRQ_HANDLED;
}
static int wcn36xx_dxe_request_irqs(struct wcn36xx *wcn)
{
int ret;
ret = request_irq(wcn->tx_irq, wcn36xx_irq_tx_complete,
IRQF_TRIGGER_HIGH, "wcn36xx_tx", wcn);
if (ret) {
wcn36xx_err("failed to alloc tx irq\n");
goto out_err;
}
ret = request_irq(wcn->rx_irq, wcn36xx_irq_rx_ready, IRQF_TRIGGER_HIGH,
"wcn36xx_rx", wcn);
if (ret) {
wcn36xx_err("failed to alloc rx irq\n");
goto out_txirq;
}
enable_irq_wake(wcn->rx_irq);
return 0;
out_txirq:
free_irq(wcn->tx_irq, wcn);
out_err:
return ret;
}
static int wcn36xx_rx_handle_packets(struct wcn36xx *wcn,
struct wcn36xx_dxe_ch *ch,
u32 ctrl,
u32 en_mask,
u32 int_mask,
u32 status_reg)
{
struct wcn36xx_dxe_desc *dxe;
struct wcn36xx_dxe_ctl *ctl;
dma_addr_t dma_addr;
struct sk_buff *skb;
u32 int_reason;
int ret;
wcn36xx_dxe_read_register(wcn, status_reg, &int_reason);
wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_0_INT_CLR, int_mask);
if (int_reason & WCN36XX_CH_STAT_INT_ERR_MASK) {
wcn36xx_dxe_write_register(wcn,
WCN36XX_DXE_0_INT_ERR_CLR,
int_mask);
wcn36xx_err("DXE IRQ reported error on RX channel\n");
}
if (int_reason & WCN36XX_CH_STAT_INT_DONE_MASK)
wcn36xx_dxe_write_register(wcn,
WCN36XX_DXE_0_INT_DONE_CLR,
int_mask);
if (int_reason & WCN36XX_CH_STAT_INT_ED_MASK)
wcn36xx_dxe_write_register(wcn,
WCN36XX_DXE_0_INT_ED_CLR,
int_mask);
if (!(int_reason & (WCN36XX_CH_STAT_INT_DONE_MASK |
WCN36XX_CH_STAT_INT_ED_MASK)))
return 0;
spin_lock(&ch->lock);
ctl = ch->head_blk_ctl;
dxe = ctl->desc;
while (!(READ_ONCE(dxe->ctrl) & WCN36xx_DXE_CTRL_VLD)) {
/* do not read until we own DMA descriptor */
dma_rmb();
/* read/modify DMA descriptor */
skb = ctl->skb;
dma_addr = dxe->dst_addr_l;
ret = wcn36xx_dxe_fill_skb(wcn->dev, ctl, GFP_ATOMIC);
if (0 == ret) {
/* new skb allocation ok. Use the new one and queue
* the old one to network system.
*/
dma_unmap_single(wcn->dev, dma_addr, WCN36XX_PKT_SIZE,
DMA_FROM_DEVICE);
wcn36xx_rx_skb(wcn, skb);
}
/* else keep old skb not submitted and reuse it for rx DMA
* (dropping the packet that it contained)
*/
/* flush descriptor changes before re-marking as valid */
dma_wmb();
dxe->ctrl = ctrl;
ctl = ctl->next;
dxe = ctl->desc;
}
wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_ENCH_ADDR, en_mask);
ch->head_blk_ctl = ctl;
spin_unlock(&ch->lock);
return 0;
}
void wcn36xx_dxe_rx_frame(struct wcn36xx *wcn)
{
int int_src;
wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_INT_SRC_RAW_REG, &int_src);
/* RX_LOW_PRI */
if (int_src & WCN36XX_DXE_INT_CH1_MASK)
wcn36xx_rx_handle_packets(wcn, &wcn->dxe_rx_l_ch,
WCN36XX_DXE_CTRL_RX_L,
WCN36XX_DXE_INT_CH1_MASK,
WCN36XX_INT_MASK_CHAN_RX_L,
WCN36XX_DXE_CH_STATUS_REG_ADDR_RX_L);
/* RX_HIGH_PRI */
if (int_src & WCN36XX_DXE_INT_CH3_MASK)
wcn36xx_rx_handle_packets(wcn, &wcn->dxe_rx_h_ch,
WCN36XX_DXE_CTRL_RX_H,
WCN36XX_DXE_INT_CH3_MASK,
WCN36XX_INT_MASK_CHAN_RX_H,
WCN36XX_DXE_CH_STATUS_REG_ADDR_RX_H);
if (!int_src)
wcn36xx_warn("No DXE interrupt pending\n");
}
int wcn36xx_dxe_allocate_mem_pools(struct wcn36xx *wcn)
{
size_t s;
void *cpu_addr;
/* Allocate BD headers for MGMT frames */
/* Where this come from ask QC */
wcn->mgmt_mem_pool.chunk_size = WCN36XX_BD_CHUNK_SIZE +
16 - (WCN36XX_BD_CHUNK_SIZE % 8);
s = wcn->mgmt_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_H;
cpu_addr = dma_alloc_coherent(wcn->dev, s,
&wcn->mgmt_mem_pool.phy_addr,
GFP_KERNEL);
if (!cpu_addr)
goto out_err;
wcn->mgmt_mem_pool.virt_addr = cpu_addr;
/* Allocate BD headers for DATA frames */
/* Where this come from ask QC */
wcn->data_mem_pool.chunk_size = WCN36XX_BD_CHUNK_SIZE +
16 - (WCN36XX_BD_CHUNK_SIZE % 8);
s = wcn->data_mem_pool.chunk_size * WCN36XX_DXE_CH_DESC_NUMB_TX_L;
cpu_addr = dma_alloc_coherent(wcn->dev, s,
&wcn->data_mem_pool.phy_addr,
GFP_KERNEL);
if (!cpu_addr)
goto out_err;
wcn->data_mem_pool.virt_addr = cpu_addr;
return 0;
out_err:
wcn36xx_dxe_free_mem_pools(wcn);
wcn36xx_err("Failed to allocate BD mempool\n");
return -ENOMEM;
}
void wcn36xx_dxe_free_mem_pools(struct wcn36xx *wcn)
{
if (wcn->mgmt_mem_pool.virt_addr)
dma_free_coherent(wcn->dev, wcn->mgmt_mem_pool.chunk_size *
WCN36XX_DXE_CH_DESC_NUMB_TX_H,
wcn->mgmt_mem_pool.virt_addr,
wcn->mgmt_mem_pool.phy_addr);
if (wcn->data_mem_pool.virt_addr) {
dma_free_coherent(wcn->dev, wcn->data_mem_pool.chunk_size *
WCN36XX_DXE_CH_DESC_NUMB_TX_L,
wcn->data_mem_pool.virt_addr,
wcn->data_mem_pool.phy_addr);
}
}
int wcn36xx_dxe_tx_frame(struct wcn36xx *wcn,
struct wcn36xx_vif *vif_priv,
struct wcn36xx_tx_bd *bd,
struct sk_buff *skb,
bool is_low)
{
struct wcn36xx_dxe_desc *desc_bd, *desc_skb;
struct wcn36xx_dxe_ctl *ctl_bd, *ctl_skb;
struct wcn36xx_dxe_ch *ch = NULL;
unsigned long flags;
int ret;
ch = is_low ? &wcn->dxe_tx_l_ch : &wcn->dxe_tx_h_ch;
spin_lock_irqsave(&ch->lock, flags);
ctl_bd = ch->head_blk_ctl;
ctl_skb = ctl_bd->next;
/*
* If skb is not null that means that we reached the tail of the ring
* hence ring is full. Stop queues to let mac80211 back off until ring
* has an empty slot again.
*/
if (NULL != ctl_skb->skb) {
ieee80211_stop_queues(wcn->hw);
wcn->queues_stopped = true;
spin_unlock_irqrestore(&ch->lock, flags);
return -EBUSY;
}
if (unlikely(ctl_skb->bd_cpu_addr)) {
wcn36xx_err("bd_cpu_addr cannot be NULL for skb DXE\n");
ret = -EINVAL;
goto unlock;
}
desc_bd = ctl_bd->desc;
desc_skb = ctl_skb->desc;
ctl_bd->skb = NULL;
/* write buffer descriptor */
memcpy(ctl_bd->bd_cpu_addr, bd, sizeof(*bd));
/* Set source address of the BD we send */
desc_bd->src_addr_l = ctl_bd->bd_phy_addr;
desc_bd->dst_addr_l = ch->dxe_wq;
desc_bd->fr_len = sizeof(struct wcn36xx_tx_bd);
wcn36xx_dbg(WCN36XX_DBG_DXE, "DXE TX\n");
wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC1 >>> ",
(char *)desc_bd, sizeof(*desc_bd));
wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP,
"BD >>> ", (char *)ctl_bd->bd_cpu_addr,
sizeof(struct wcn36xx_tx_bd));
desc_skb->src_addr_l = dma_map_single(wcn->dev,
skb->data,
skb->len,
DMA_TO_DEVICE);
if (dma_mapping_error(wcn->dev, desc_skb->src_addr_l)) {
dev_err(wcn->dev, "unable to DMA map src_addr_l\n");
ret = -ENOMEM;
goto unlock;
}
ctl_skb->skb = skb;
desc_skb->dst_addr_l = ch->dxe_wq;
desc_skb->fr_len = ctl_skb->skb->len;
wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "DESC2 >>> ",
(char *)desc_skb, sizeof(*desc_skb));
wcn36xx_dbg_dump(WCN36XX_DBG_DXE_DUMP, "SKB >>> ",
(char *)ctl_skb->skb->data, ctl_skb->skb->len);
/* Move the head of the ring to the next empty descriptor */
ch->head_blk_ctl = ctl_skb->next;
/* Commit all previous writes and set descriptors to VALID */
wmb();
desc_skb->ctrl = ch->ctrl_skb;
wmb();
desc_bd->ctrl = ch->ctrl_bd;
/*
* When connected and trying to send data frame chip can be in sleep
* mode and writing to the register will not wake up the chip. Instead
* notify chip about new frame through SMSM bus.
*/
if (is_low && vif_priv->pw_state == WCN36XX_BMPS) {
qcom_smem_state_update_bits(wcn->tx_rings_empty_state,
WCN36XX_SMSM_WLAN_TX_ENABLE,
WCN36XX_SMSM_WLAN_TX_ENABLE);
} else {
/* indicate End Of Packet and generate interrupt on descriptor
* done.
*/
wcn36xx_dxe_write_register(wcn,
ch->reg_ctrl, ch->def_ctrl);
}
ret = 0;
unlock:
spin_unlock_irqrestore(&ch->lock, flags);
return ret;
}
static bool _wcn36xx_dxe_tx_channel_is_empty(struct wcn36xx_dxe_ch *ch)
{
unsigned long flags;
struct wcn36xx_dxe_ctl *ctl_bd_start, *ctl_skb_start;
struct wcn36xx_dxe_ctl *ctl_bd, *ctl_skb;
bool ret = true;
spin_lock_irqsave(&ch->lock, flags);
/* Loop through ring buffer looking for nonempty entries. */
ctl_bd_start = ch->head_blk_ctl;
ctl_bd = ctl_bd_start;
ctl_skb_start = ctl_bd_start->next;
ctl_skb = ctl_skb_start;
do {
if (ctl_skb->skb) {
ret = false;
goto unlock;
}
ctl_bd = ctl_skb->next;
ctl_skb = ctl_bd->next;
} while (ctl_skb != ctl_skb_start);
unlock:
spin_unlock_irqrestore(&ch->lock, flags);
return ret;
}
int wcn36xx_dxe_tx_flush(struct wcn36xx *wcn)
{
int i = 0;
/* Called with mac80211 queues stopped. Wait for empty HW queues. */
do {
if (_wcn36xx_dxe_tx_channel_is_empty(&wcn->dxe_tx_l_ch) &&
_wcn36xx_dxe_tx_channel_is_empty(&wcn->dxe_tx_h_ch)) {
return 0;
}
/* This ieee80211_ops callback is specifically allowed to
* sleep.
*/
usleep_range(1000, 1100);
} while (++i < 100);
return -EBUSY;
}
int wcn36xx_dxe_init(struct wcn36xx *wcn)
{
int reg_data = 0, ret;
reg_data = WCN36XX_DXE_REG_RESET;
wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_REG_CSR_RESET, reg_data);
/* Select channels for rx avail and xfer done interrupts... */
reg_data = (WCN36XX_DXE_INT_CH3_MASK | WCN36XX_DXE_INT_CH1_MASK) << 16 |
WCN36XX_DXE_INT_CH0_MASK | WCN36XX_DXE_INT_CH4_MASK;
if (wcn->is_pronto)
wcn36xx_ccu_write_register(wcn, WCN36XX_CCU_DXE_INT_SELECT_PRONTO, reg_data);
else
wcn36xx_ccu_write_register(wcn, WCN36XX_CCU_DXE_INT_SELECT_RIVA, reg_data);
/***************************************/
/* Init descriptors for TX LOW channel */
/***************************************/
ret = wcn36xx_dxe_init_descs(wcn, &wcn->dxe_tx_l_ch);
if (ret) {
dev_err(wcn->dev, "Error allocating descriptor\n");
return ret;
}
wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_l_ch, &wcn->data_mem_pool);
/* Write channel head to a NEXT register */
wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_L,
wcn->dxe_tx_l_ch.head_blk_ctl->desc_phy_addr);
/* Program DMA destination addr for TX LOW */
wcn36xx_dxe_write_register(wcn,
WCN36XX_DXE_CH_DEST_ADDR_TX_L,
WCN36XX_DXE_WQ_TX_L(wcn));
wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, ®_data);
/***************************************/
/* Init descriptors for TX HIGH channel */
/***************************************/
ret = wcn36xx_dxe_init_descs(wcn, &wcn->dxe_tx_h_ch);
if (ret) {
dev_err(wcn->dev, "Error allocating descriptor\n");
goto out_err_txh_ch;
}
wcn36xx_dxe_init_tx_bd(&wcn->dxe_tx_h_ch, &wcn->mgmt_mem_pool);
/* Write channel head to a NEXT register */
wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_TX_H,
wcn->dxe_tx_h_ch.head_blk_ctl->desc_phy_addr);
/* Program DMA destination addr for TX HIGH */
wcn36xx_dxe_write_register(wcn,
WCN36XX_DXE_CH_DEST_ADDR_TX_H,
WCN36XX_DXE_WQ_TX_H(wcn));
wcn36xx_dxe_read_register(wcn, WCN36XX_DXE_REG_CH_EN, ®_data);
/***************************************/
/* Init descriptors for RX LOW channel */
/***************************************/
ret = wcn36xx_dxe_init_descs(wcn, &wcn->dxe_rx_l_ch);
if (ret) {
dev_err(wcn->dev, "Error allocating descriptor\n");
goto out_err_rxl_ch;
}
/* For RX we need to preallocated buffers */
wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_l_ch);
/* Write channel head to a NEXT register */
wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_L,
wcn->dxe_rx_l_ch.head_blk_ctl->desc_phy_addr);
/* Write DMA source address */
wcn36xx_dxe_write_register(wcn,
WCN36XX_DXE_CH_SRC_ADDR_RX_L,
WCN36XX_DXE_WQ_RX_L);
/* Program preallocated destination address */
wcn36xx_dxe_write_register(wcn,
WCN36XX_DXE_CH_DEST_ADDR_RX_L,
wcn->dxe_rx_l_ch.head_blk_ctl->desc->phy_next_l);
/* Enable default control registers */
wcn36xx_dxe_write_register(wcn,
WCN36XX_DXE_REG_CTL_RX_L,
WCN36XX_DXE_CH_DEFAULT_CTL_RX_L);
/***************************************/
/* Init descriptors for RX HIGH channel */
/***************************************/
ret = wcn36xx_dxe_init_descs(wcn, &wcn->dxe_rx_h_ch);
if (ret) {
dev_err(wcn->dev, "Error allocating descriptor\n");
goto out_err_rxh_ch;
}
/* For RX we need to prealocat buffers */
wcn36xx_dxe_ch_alloc_skb(wcn, &wcn->dxe_rx_h_ch);
/* Write chanel head to a NEXT register */
wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_CH_NEXT_DESC_ADDR_RX_H,
wcn->dxe_rx_h_ch.head_blk_ctl->desc_phy_addr);
/* Write DMA source address */
wcn36xx_dxe_write_register(wcn,
WCN36XX_DXE_CH_SRC_ADDR_RX_H,
WCN36XX_DXE_WQ_RX_H);
/* Program preallocated destination address */
wcn36xx_dxe_write_register(wcn,
WCN36XX_DXE_CH_DEST_ADDR_RX_H,
wcn->dxe_rx_h_ch.head_blk_ctl->desc->phy_next_l);
/* Enable default control registers */
wcn36xx_dxe_write_register(wcn,
WCN36XX_DXE_REG_CTL_RX_H,
WCN36XX_DXE_CH_DEFAULT_CTL_RX_H);
ret = wcn36xx_dxe_request_irqs(wcn);
if (ret < 0)
goto out_err_irq;
timer_setup(&wcn->tx_ack_timer, wcn36xx_dxe_tx_timer, 0);
/* Enable channel interrupts */
wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_L);
wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_H);
wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_L);
wcn36xx_dxe_enable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_H);
return 0;
out_err_irq:
wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_rx_h_ch);
out_err_rxh_ch:
wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_rx_l_ch);
out_err_rxl_ch:
wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_tx_h_ch);
out_err_txh_ch:
wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_tx_l_ch);
return ret;
}
void wcn36xx_dxe_deinit(struct wcn36xx *wcn)
{
int reg_data = 0;
/* Disable channel interrupts */
wcn36xx_dxe_disable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_H);
wcn36xx_dxe_disable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_RX_L);
wcn36xx_dxe_disable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_H);
wcn36xx_dxe_disable_ch_int(wcn, WCN36XX_INT_MASK_CHAN_TX_L);
free_irq(wcn->tx_irq, wcn);
free_irq(wcn->rx_irq, wcn);
del_timer(&wcn->tx_ack_timer);
if (wcn->tx_ack_skb) {
ieee80211_tx_status_irqsafe(wcn->hw, wcn->tx_ack_skb);
wcn->tx_ack_skb = NULL;
}
/* Put the DXE block into reset before freeing memory */
reg_data = WCN36XX_DXE_REG_RESET;
wcn36xx_dxe_write_register(wcn, WCN36XX_DXE_REG_CSR_RESET, reg_data);
wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_l_ch);
wcn36xx_dxe_ch_free_skbs(wcn, &wcn->dxe_rx_h_ch);
wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_tx_l_ch);
wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_tx_h_ch);
wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_rx_l_ch);
wcn36xx_dxe_deinit_descs(wcn->dev, &wcn->dxe_rx_h_ch);
}
|
linux-master
|
drivers/net/wireless/ath/wcn36xx/dxe.c
|
// SPDX-License-Identifier: GPL-2.0-only
#include "wcn36xx.h"
#include "firmware.h"
#define DEFINE(s)[s] = #s
static const char * const wcn36xx_firmware_caps_names[] = {
DEFINE(MCC),
DEFINE(P2P),
DEFINE(DOT11AC),
DEFINE(SLM_SESSIONIZATION),
DEFINE(DOT11AC_OPMODE),
DEFINE(SAP32STA),
DEFINE(TDLS),
DEFINE(P2P_GO_NOA_DECOUPLE_INIT_SCAN),
DEFINE(WLANACTIVE_OFFLOAD),
DEFINE(BEACON_OFFLOAD),
DEFINE(SCAN_OFFLOAD),
DEFINE(ROAM_OFFLOAD),
DEFINE(BCN_MISS_OFFLOAD),
DEFINE(STA_POWERSAVE),
DEFINE(STA_ADVANCED_PWRSAVE),
DEFINE(AP_UAPSD),
DEFINE(AP_DFS),
DEFINE(BLOCKACK),
DEFINE(PHY_ERR),
DEFINE(BCN_FILTER),
DEFINE(RTT),
DEFINE(RATECTRL),
DEFINE(WOW),
DEFINE(WLAN_ROAM_SCAN_OFFLOAD),
DEFINE(SPECULATIVE_PS_POLL),
DEFINE(SCAN_SCH),
DEFINE(IBSS_HEARTBEAT_OFFLOAD),
DEFINE(WLAN_SCAN_OFFLOAD),
DEFINE(WLAN_PERIODIC_TX_PTRN),
DEFINE(ADVANCE_TDLS),
DEFINE(BATCH_SCAN),
DEFINE(FW_IN_TX_PATH),
DEFINE(EXTENDED_NSOFFLOAD_SLOT),
DEFINE(CH_SWITCH_V1),
DEFINE(HT40_OBSS_SCAN),
DEFINE(UPDATE_CHANNEL_LIST),
DEFINE(WLAN_MCADDR_FLT),
DEFINE(WLAN_CH144),
DEFINE(NAN),
DEFINE(TDLS_SCAN_COEXISTENCE),
DEFINE(LINK_LAYER_STATS_MEAS),
DEFINE(MU_MIMO),
DEFINE(EXTENDED_SCAN),
DEFINE(DYNAMIC_WMM_PS),
DEFINE(MAC_SPOOFED_SCAN),
DEFINE(BMU_ERROR_GENERIC_RECOVERY),
DEFINE(DISA),
DEFINE(FW_STATS),
DEFINE(WPS_PRBRSP_TMPL),
DEFINE(BCN_IE_FLT_DELTA),
DEFINE(TDLS_OFF_CHANNEL),
DEFINE(RTT3),
DEFINE(MGMT_FRAME_LOGGING),
DEFINE(ENHANCED_TXBD_COMPLETION),
DEFINE(LOGGING_ENHANCEMENT),
DEFINE(EXT_SCAN_ENHANCED),
DEFINE(MEMORY_DUMP_SUPPORTED),
DEFINE(PER_PKT_STATS_SUPPORTED),
DEFINE(EXT_LL_STAT),
DEFINE(WIFI_CONFIG),
DEFINE(ANTENNA_DIVERSITY_SELECTION),
};
#undef DEFINE
const char *wcn36xx_firmware_get_cap_name(enum wcn36xx_firmware_feat_caps x)
{
if (x >= ARRAY_SIZE(wcn36xx_firmware_caps_names))
return "UNKNOWN";
return wcn36xx_firmware_caps_names[x];
}
void wcn36xx_firmware_set_feat_caps(u32 *bitmap,
enum wcn36xx_firmware_feat_caps cap)
{
int arr_idx, bit_idx;
if (cap < 0 || cap > 127) {
wcn36xx_warn("error cap idx %d\n", cap);
return;
}
arr_idx = cap / 32;
bit_idx = cap % 32;
bitmap[arr_idx] |= (1 << bit_idx);
}
int wcn36xx_firmware_get_feat_caps(u32 *bitmap,
enum wcn36xx_firmware_feat_caps cap)
{
int arr_idx, bit_idx;
if (cap < 0 || cap > 127) {
wcn36xx_warn("error cap idx %d\n", cap);
return -EINVAL;
}
arr_idx = cap / 32;
bit_idx = cap % 32;
return (bitmap[arr_idx] & (1 << bit_idx)) ? 1 : 0;
}
void wcn36xx_firmware_clear_feat_caps(u32 *bitmap,
enum wcn36xx_firmware_feat_caps cap)
{
int arr_idx, bit_idx;
if (cap < 0 || cap > 127) {
wcn36xx_warn("error cap idx %d\n", cap);
return;
}
arr_idx = cap / 32;
bit_idx = cap % 32;
bitmap[arr_idx] &= ~(1 << bit_idx);
}
|
linux-master
|
drivers/net/wireless/ath/wcn36xx/firmware.c
|
/*
* Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <net/netlink.h>
#include <linux/firmware.h>
#include <net/cfg80211.h>
#include "wcn36xx.h"
#include "testmode.h"
#include "testmode_i.h"
#include "hal.h"
#include "smd.h"
static const struct nla_policy wcn36xx_tm_policy[WCN36XX_TM_ATTR_MAX + 1] = {
[WCN36XX_TM_ATTR_CMD] = { .type = NLA_U16 },
[WCN36XX_TM_ATTR_DATA] = { .type = NLA_BINARY,
.len = WCN36XX_TM_DATA_MAX_LEN },
};
struct build_release_number {
u16 drv_major;
u16 drv_minor;
u16 drv_patch;
u16 drv_build;
u16 ptt_max;
u16 ptt_min;
u16 fw_ver;
} __packed;
static int wcn36xx_tm_cmd_ptt(struct wcn36xx *wcn, struct ieee80211_vif *vif,
struct nlattr *tb[])
{
int ret = 0, buf_len;
void *buf;
struct ftm_rsp_msg *msg, *rsp = NULL;
struct sk_buff *skb;
if (!tb[WCN36XX_TM_ATTR_DATA])
return -EINVAL;
buf = nla_data(tb[WCN36XX_TM_ATTR_DATA]);
buf_len = nla_len(tb[WCN36XX_TM_ATTR_DATA]);
msg = (struct ftm_rsp_msg *)buf;
wcn36xx_dbg(WCN36XX_DBG_TESTMODE,
"testmode cmd wmi msg_id 0x%04X msg_len %d buf %pK buf_len %d\n",
msg->msg_id, msg->msg_body_length,
buf, buf_len);
wcn36xx_dbg_dump(WCN36XX_DBG_TESTMODE_DUMP, "REQ ", buf, buf_len);
if (msg->msg_id == MSG_GET_BUILD_RELEASE_NUMBER) {
struct build_release_number *body =
(struct build_release_number *)
msg->msg_response;
body->drv_major = wcn->fw_major;
body->drv_minor = wcn->fw_minor;
body->drv_patch = wcn->fw_version;
body->drv_build = wcn->fw_revision;
body->ptt_max = 10;
body->ptt_min = 0;
rsp = msg;
rsp->resp_status = 0;
} else {
wcn36xx_dbg(WCN36XX_DBG_TESTMODE,
"PPT Request >> HAL size %d\n",
msg->msg_body_length);
msg->resp_status = wcn36xx_smd_process_ptt_msg(wcn, vif, msg,
msg->msg_body_length, (void *)(&rsp));
wcn36xx_dbg(WCN36XX_DBG_TESTMODE,
"Response status = %d\n",
msg->resp_status);
if (rsp)
wcn36xx_dbg(WCN36XX_DBG_TESTMODE,
"PPT Response << HAL size %d\n",
rsp->msg_body_length);
}
if (!rsp) {
rsp = msg;
wcn36xx_warn("No response! Echoing request with response status %d\n",
rsp->resp_status);
}
wcn36xx_dbg_dump(WCN36XX_DBG_TESTMODE_DUMP, "RSP ",
rsp, rsp->msg_body_length);
skb = cfg80211_testmode_alloc_reply_skb(wcn->hw->wiphy,
nla_total_size(msg->msg_body_length));
if (!skb) {
ret = -ENOMEM;
goto out;
}
ret = nla_put(skb, WCN36XX_TM_ATTR_DATA, rsp->msg_body_length, rsp);
if (ret) {
kfree_skb(skb);
goto out;
}
ret = cfg80211_testmode_reply(skb);
out:
if (rsp != msg)
kfree(rsp);
return ret;
}
int wcn36xx_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
void *data, int len)
{
struct wcn36xx *wcn = hw->priv;
struct nlattr *tb[WCN36XX_TM_ATTR_MAX + 1];
int ret = 0;
unsigned short attr;
wcn36xx_dbg_dump(WCN36XX_DBG_TESTMODE_DUMP, "Data:", data, len);
ret = nla_parse_deprecated(tb, WCN36XX_TM_ATTR_MAX, data, len,
wcn36xx_tm_policy, NULL);
if (ret)
return ret;
if (!tb[WCN36XX_TM_ATTR_CMD])
return -EINVAL;
attr = nla_get_u16(tb[WCN36XX_TM_ATTR_CMD]);
if (attr != WCN36XX_TM_CMD_PTT)
return -EOPNOTSUPP;
return wcn36xx_tm_cmd_ptt(wcn, vif, tb);
}
|
linux-master
|
drivers/net/wireless/ath/wcn36xx/testmode.c
|
/*
* Copyright (c) 2013 Eugene Krasnikov <[email protected]>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/bitfield.h>
#include <linux/etherdevice.h>
#include <linux/firmware.h>
#include <linux/bitops.h>
#include <linux/rpmsg.h>
#include "smd.h"
#include "firmware.h"
struct wcn36xx_cfg_val {
u32 cfg_id;
u32 value;
};
#define WCN36XX_CFG_VAL(id, val) \
{ \
.cfg_id = WCN36XX_HAL_CFG_ ## id, \
.value = val \
}
static struct wcn36xx_cfg_val wcn36xx_cfg_vals[] = {
WCN36XX_CFG_VAL(CURRENT_TX_ANTENNA, 1),
WCN36XX_CFG_VAL(CURRENT_RX_ANTENNA, 1),
WCN36XX_CFG_VAL(LOW_GAIN_OVERRIDE, 0),
WCN36XX_CFG_VAL(POWER_STATE_PER_CHAIN, 785),
WCN36XX_CFG_VAL(CAL_PERIOD, 5),
WCN36XX_CFG_VAL(CAL_CONTROL, 1),
WCN36XX_CFG_VAL(PROXIMITY, 0),
WCN36XX_CFG_VAL(NETWORK_DENSITY, 3),
WCN36XX_CFG_VAL(MAX_MEDIUM_TIME, 6000),
WCN36XX_CFG_VAL(MAX_MPDUS_IN_AMPDU, 64),
WCN36XX_CFG_VAL(RTS_THRESHOLD, 2347),
WCN36XX_CFG_VAL(SHORT_RETRY_LIMIT, 15),
WCN36XX_CFG_VAL(LONG_RETRY_LIMIT, 15),
WCN36XX_CFG_VAL(FRAGMENTATION_THRESHOLD, 8000),
WCN36XX_CFG_VAL(DYNAMIC_THRESHOLD_ZERO, 5),
WCN36XX_CFG_VAL(DYNAMIC_THRESHOLD_ONE, 10),
WCN36XX_CFG_VAL(DYNAMIC_THRESHOLD_TWO, 15),
WCN36XX_CFG_VAL(FIXED_RATE, 0),
WCN36XX_CFG_VAL(RETRYRATE_POLICY, 4),
WCN36XX_CFG_VAL(RETRYRATE_SECONDARY, 0),
WCN36XX_CFG_VAL(RETRYRATE_TERTIARY, 0),
WCN36XX_CFG_VAL(FORCE_POLICY_PROTECTION, 5),
WCN36XX_CFG_VAL(FIXED_RATE_MULTICAST_24GHZ, 1),
WCN36XX_CFG_VAL(FIXED_RATE_MULTICAST_5GHZ, 5),
WCN36XX_CFG_VAL(DEFAULT_RATE_INDEX_5GHZ, 5),
WCN36XX_CFG_VAL(MAX_BA_SESSIONS, 40),
WCN36XX_CFG_VAL(PS_DATA_INACTIVITY_TIMEOUT, 200),
WCN36XX_CFG_VAL(PS_ENABLE_BCN_FILTER, 1),
WCN36XX_CFG_VAL(PS_ENABLE_RSSI_MONITOR, 1),
WCN36XX_CFG_VAL(NUM_BEACON_PER_RSSI_AVERAGE, 20),
WCN36XX_CFG_VAL(STATS_PERIOD, 10),
WCN36XX_CFG_VAL(CFP_MAX_DURATION, 30000),
WCN36XX_CFG_VAL(FRAME_TRANS_ENABLED, 0),
WCN36XX_CFG_VAL(BA_THRESHOLD_HIGH, 128),
WCN36XX_CFG_VAL(MAX_BA_BUFFERS, 2560),
WCN36XX_CFG_VAL(DYNAMIC_PS_POLL_VALUE, 0),
WCN36XX_CFG_VAL(TX_PWR_CTRL_ENABLE, 1),
WCN36XX_CFG_VAL(ENABLE_CLOSE_LOOP, 1),
WCN36XX_CFG_VAL(ENABLE_LPWR_IMG_TRANSITION, 0),
WCN36XX_CFG_VAL(BTC_STATIC_LEN_LE_BT, 120000),
WCN36XX_CFG_VAL(BTC_STATIC_LEN_LE_WLAN, 30000),
WCN36XX_CFG_VAL(MAX_ASSOC_LIMIT, 10),
WCN36XX_CFG_VAL(ENABLE_MCC_ADAPTIVE_SCHEDULER, 0),
WCN36XX_CFG_VAL(ENABLE_DYNAMIC_RA_START_RATE, 133), /* MCS 5 */
WCN36XX_CFG_VAL(LINK_FAIL_TX_CNT, 1000),
};
static struct wcn36xx_cfg_val wcn3680_cfg_vals[] = {
WCN36XX_CFG_VAL(CURRENT_TX_ANTENNA, 1),
WCN36XX_CFG_VAL(CURRENT_RX_ANTENNA, 1),
WCN36XX_CFG_VAL(LOW_GAIN_OVERRIDE, 0),
WCN36XX_CFG_VAL(POWER_STATE_PER_CHAIN, 785),
WCN36XX_CFG_VAL(CAL_PERIOD, 5),
WCN36XX_CFG_VAL(CAL_CONTROL, 1),
WCN36XX_CFG_VAL(PROXIMITY, 0),
WCN36XX_CFG_VAL(NETWORK_DENSITY, 3),
WCN36XX_CFG_VAL(MAX_MEDIUM_TIME, 4096),
WCN36XX_CFG_VAL(MAX_MPDUS_IN_AMPDU, 64),
WCN36XX_CFG_VAL(RTS_THRESHOLD, 2347),
WCN36XX_CFG_VAL(SHORT_RETRY_LIMIT, 15),
WCN36XX_CFG_VAL(LONG_RETRY_LIMIT, 15),
WCN36XX_CFG_VAL(FRAGMENTATION_THRESHOLD, 8000),
WCN36XX_CFG_VAL(DYNAMIC_THRESHOLD_ZERO, 5),
WCN36XX_CFG_VAL(DYNAMIC_THRESHOLD_ONE, 10),
WCN36XX_CFG_VAL(DYNAMIC_THRESHOLD_TWO, 15),
WCN36XX_CFG_VAL(FIXED_RATE, 0),
WCN36XX_CFG_VAL(RETRYRATE_POLICY, 4),
WCN36XX_CFG_VAL(RETRYRATE_SECONDARY, 0),
WCN36XX_CFG_VAL(RETRYRATE_TERTIARY, 0),
WCN36XX_CFG_VAL(FORCE_POLICY_PROTECTION, 5),
WCN36XX_CFG_VAL(FIXED_RATE_MULTICAST_24GHZ, 1),
WCN36XX_CFG_VAL(FIXED_RATE_MULTICAST_5GHZ, 5),
WCN36XX_CFG_VAL(DEFAULT_RATE_INDEX_24GHZ, 1),
WCN36XX_CFG_VAL(DEFAULT_RATE_INDEX_5GHZ, 5),
WCN36XX_CFG_VAL(MAX_BA_SESSIONS, 40),
WCN36XX_CFG_VAL(PS_DATA_INACTIVITY_TIMEOUT, 200),
WCN36XX_CFG_VAL(PS_ENABLE_BCN_FILTER, 1),
WCN36XX_CFG_VAL(PS_ENABLE_RSSI_MONITOR, 1),
WCN36XX_CFG_VAL(NUM_BEACON_PER_RSSI_AVERAGE, 20),
WCN36XX_CFG_VAL(STATS_PERIOD, 10),
WCN36XX_CFG_VAL(CFP_MAX_DURATION, 30000),
WCN36XX_CFG_VAL(FRAME_TRANS_ENABLED, 0),
WCN36XX_CFG_VAL(BA_THRESHOLD_HIGH, 128),
WCN36XX_CFG_VAL(MAX_BA_BUFFERS, 2560),
WCN36XX_CFG_VAL(DYNAMIC_PS_POLL_VALUE, 0),
WCN36XX_CFG_VAL(TX_PWR_CTRL_ENABLE, 1),
WCN36XX_CFG_VAL(ENABLE_CLOSE_LOOP, 1),
WCN36XX_CFG_VAL(ENABLE_LPWR_IMG_TRANSITION, 0),
WCN36XX_CFG_VAL(BTC_STATIC_LEN_LE_BT, 120000),
WCN36XX_CFG_VAL(BTC_STATIC_LEN_LE_WLAN, 30000),
WCN36XX_CFG_VAL(MAX_ASSOC_LIMIT, 10),
WCN36XX_CFG_VAL(ENABLE_MCC_ADAPTIVE_SCHEDULER, 0),
WCN36XX_CFG_VAL(TDLS_PUAPSD_MASK, 0),
WCN36XX_CFG_VAL(TDLS_PUAPSD_BUFFER_STA_CAPABLE, 1),
WCN36XX_CFG_VAL(TDLS_PUAPSD_INACTIVITY_TIME, 0),
WCN36XX_CFG_VAL(TDLS_PUAPSD_RX_FRAME_THRESHOLD, 10),
WCN36XX_CFG_VAL(TDLS_OFF_CHANNEL_CAPABLE, 1),
WCN36XX_CFG_VAL(ENABLE_ADAPTIVE_RX_DRAIN, 1),
WCN36XX_CFG_VAL(FLEXCONNECT_POWER_FACTOR, 0),
WCN36XX_CFG_VAL(ANTENNA_DIVERSITY, 3),
WCN36XX_CFG_VAL(ATH_DISABLE, 0),
WCN36XX_CFG_VAL(BTC_STATIC_OPP_WLAN_ACTIVE_WLAN_LEN, 60000),
WCN36XX_CFG_VAL(BTC_STATIC_OPP_WLAN_ACTIVE_BT_LEN, 90000),
WCN36XX_CFG_VAL(BTC_SAP_STATIC_OPP_ACTIVE_WLAN_LEN, 30000),
WCN36XX_CFG_VAL(BTC_SAP_STATIC_OPP_ACTIVE_BT_LEN, 30000),
WCN36XX_CFG_VAL(ASD_PROBE_INTERVAL, 50),
WCN36XX_CFG_VAL(ASD_TRIGGER_THRESHOLD, -60),
WCN36XX_CFG_VAL(ASD_RTT_RSSI_HYST_THRESHOLD, 3),
WCN36XX_CFG_VAL(BTC_CTS2S_ON_STA_DURING_SCO, 0),
WCN36XX_CFG_VAL(RA_FILTER_ENABLE, 0),
WCN36XX_CFG_VAL(RA_RATE_LIMIT_INTERVAL, 60),
WCN36XX_CFG_VAL(BTC_FATAL_HID_NSNIFF_BLK, 2),
WCN36XX_CFG_VAL(BTC_CRITICAL_HID_NSNIFF_BLK, 1),
WCN36XX_CFG_VAL(BTC_DYN_A2DP_TX_QUEUE_THOLD, 0),
WCN36XX_CFG_VAL(BTC_DYN_OPP_TX_QUEUE_THOLD, 1),
WCN36XX_CFG_VAL(MAX_UAPSD_CONSEC_SP, 10),
WCN36XX_CFG_VAL(MAX_UAPSD_CONSEC_RX_CNT, 50),
WCN36XX_CFG_VAL(MAX_UAPSD_CONSEC_TX_CNT, 50),
WCN36XX_CFG_VAL(MAX_UAPSD_CONSEC_TX_CNT_MEAS_WINDOW, 500),
WCN36XX_CFG_VAL(MAX_UAPSD_CONSEC_RX_CNT_MEAS_WINDOW, 500),
WCN36XX_CFG_VAL(MAX_PSPOLL_IN_WMM_UAPSD_PS_MODE, 0),
WCN36XX_CFG_VAL(MAX_UAPSD_INACTIVITY_INTERVALS, 10),
WCN36XX_CFG_VAL(ENABLE_DYNAMIC_WMMPS, 1),
WCN36XX_CFG_VAL(BURST_MODE_BE_TXOP_VALUE, 0),
WCN36XX_CFG_VAL(ENABLE_DYNAMIC_RA_START_RATE, 136),
WCN36XX_CFG_VAL(BTC_FAST_WLAN_CONN_PREF, 1),
WCN36XX_CFG_VAL(ENABLE_RTSCTS_HTVHT, 0),
WCN36XX_CFG_VAL(BTC_STATIC_OPP_WLAN_IDLE_WLAN_LEN, 30000),
WCN36XX_CFG_VAL(BTC_STATIC_OPP_WLAN_IDLE_BT_LEN, 120000),
WCN36XX_CFG_VAL(LINK_FAIL_TX_CNT, 1000),
WCN36XX_CFG_VAL(TOGGLE_ARP_BDRATES, 0),
WCN36XX_CFG_VAL(OPTIMIZE_CA_EVENT, 0),
WCN36XX_CFG_VAL(EXT_SCAN_CONC_MODE, 0),
WCN36XX_CFG_VAL(BAR_WAKEUP_HOST_DISABLE, 0),
WCN36XX_CFG_VAL(SAR_BOFFSET_CORRECTION_ENABLE, 0),
WCN36XX_CFG_VAL(BTC_DISABLE_WLAN_LINK_CRITICAL, 5),
WCN36XX_CFG_VAL(DISABLE_SCAN_DURING_SCO, 2),
WCN36XX_CFG_VAL(CONS_BCNMISS_COUNT, 0),
WCN36XX_CFG_VAL(UNITS_OF_BCN_WAIT_TIME, 0),
WCN36XX_CFG_VAL(TRIGGER_NULLFRAME_BEFORE_HB, 0),
WCN36XX_CFG_VAL(ENABLE_POWERSAVE_OFFLOAD, 0),
};
static int put_cfg_tlv_u32(struct wcn36xx *wcn, size_t *len, u32 id, u32 value)
{
struct wcn36xx_hal_cfg *entry;
u32 *val;
if (*len + sizeof(*entry) + sizeof(u32) >= WCN36XX_HAL_BUF_SIZE) {
wcn36xx_err("Not enough room for TLV entry\n");
return -ENOMEM;
}
entry = (struct wcn36xx_hal_cfg *) (wcn->hal_buf + *len);
entry->id = id;
entry->len = sizeof(u32);
entry->pad_bytes = 0;
entry->reserve = 0;
val = (u32 *) (entry + 1);
*val = value;
*len += sizeof(*entry) + sizeof(u32);
return 0;
}
static void wcn36xx_smd_set_bss_nw_type(struct wcn36xx *wcn,
struct ieee80211_sta *sta,
struct wcn36xx_hal_config_bss_params *bss_params)
{
if (NL80211_BAND_5GHZ == WCN36XX_BAND(wcn))
bss_params->nw_type = WCN36XX_HAL_11A_NW_TYPE;
else if (sta && sta->deflink.ht_cap.ht_supported)
bss_params->nw_type = WCN36XX_HAL_11N_NW_TYPE;
else if (sta && (sta->deflink.supp_rates[NL80211_BAND_2GHZ] & 0x7f))
bss_params->nw_type = WCN36XX_HAL_11G_NW_TYPE;
else
bss_params->nw_type = WCN36XX_HAL_11B_NW_TYPE;
}
static inline u8 is_cap_supported(unsigned long caps, unsigned long flag)
{
return caps & flag ? 1 : 0;
}
static void wcn36xx_smd_set_bss_ht_params(struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct wcn36xx_hal_config_bss_params *bss_params)
{
if (sta && sta->deflink.ht_cap.ht_supported) {
unsigned long caps = sta->deflink.ht_cap.cap;
bss_params->ht = sta->deflink.ht_cap.ht_supported;
bss_params->tx_channel_width_set = is_cap_supported(caps,
IEEE80211_HT_CAP_SUP_WIDTH_20_40);
bss_params->lsig_tx_op_protection_full_support =
is_cap_supported(caps,
IEEE80211_HT_CAP_LSIG_TXOP_PROT);
bss_params->ht_oper_mode = vif->bss_conf.ht_operation_mode;
bss_params->lln_non_gf_coexist =
!!(vif->bss_conf.ht_operation_mode &
IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
/* IEEE80211_HT_STBC_PARAM_DUAL_CTS_PROT */
bss_params->dual_cts_protection = 0;
/* IEEE80211_HT_OP_MODE_PROTECTION_20MHZ */
bss_params->ht20_coexist = 0;
}
}
static void
wcn36xx_smd_set_bss_vht_params(struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct wcn36xx_hal_config_bss_params_v1 *bss)
{
if (sta && sta->deflink.vht_cap.vht_supported)
bss->vht_capable = 1;
}
static void wcn36xx_smd_set_sta_ht_params(struct ieee80211_sta *sta,
struct wcn36xx_hal_config_sta_params *sta_params)
{
if (sta->deflink.ht_cap.ht_supported) {
unsigned long caps = sta->deflink.ht_cap.cap;
sta_params->ht_capable = sta->deflink.ht_cap.ht_supported;
sta_params->tx_channel_width_set = is_cap_supported(caps,
IEEE80211_HT_CAP_SUP_WIDTH_20_40);
sta_params->lsig_txop_protection = is_cap_supported(caps,
IEEE80211_HT_CAP_LSIG_TXOP_PROT);
sta_params->max_ampdu_size = sta->deflink.ht_cap.ampdu_factor;
sta_params->max_ampdu_density = sta->deflink.ht_cap.ampdu_density;
/* max_amsdu_size: 1 : 3839 bytes, 0 : 7935 bytes (max) */
sta_params->max_amsdu_size = !is_cap_supported(caps,
IEEE80211_HT_CAP_MAX_AMSDU);
sta_params->sgi_20Mhz = is_cap_supported(caps,
IEEE80211_HT_CAP_SGI_20);
sta_params->sgi_40mhz = is_cap_supported(caps,
IEEE80211_HT_CAP_SGI_40);
sta_params->green_field_capable = is_cap_supported(caps,
IEEE80211_HT_CAP_GRN_FLD);
sta_params->delayed_ba_support = is_cap_supported(caps,
IEEE80211_HT_CAP_DELAY_BA);
sta_params->dsss_cck_mode_40mhz = is_cap_supported(caps,
IEEE80211_HT_CAP_DSSSCCK40);
}
}
static void wcn36xx_smd_set_sta_vht_params(struct wcn36xx *wcn,
struct ieee80211_sta *sta,
struct wcn36xx_hal_config_sta_params_v1 *sta_params)
{
if (sta->deflink.vht_cap.vht_supported) {
unsigned long caps = sta->deflink.vht_cap.cap;
sta_params->vht_capable = sta->deflink.vht_cap.vht_supported;
sta_params->vht_ldpc_enabled =
is_cap_supported(caps, IEEE80211_VHT_CAP_RXLDPC);
if (wcn36xx_firmware_get_feat_caps(wcn->fw_feat_caps, MU_MIMO)) {
sta_params->vht_tx_mu_beamformee_capable =
is_cap_supported(caps, IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE);
if (sta_params->vht_tx_mu_beamformee_capable)
sta_params->vht_tx_bf_enabled = 1;
} else {
sta_params->vht_tx_mu_beamformee_capable = 0;
}
sta_params->vht_tx_channel_width_set = 0;
}
}
static void wcn36xx_smd_set_sta_ht_ldpc_params(struct ieee80211_sta *sta,
struct wcn36xx_hal_config_sta_params_v1 *sta_params)
{
if (sta->deflink.ht_cap.ht_supported) {
sta_params->ht_ldpc_enabled =
is_cap_supported(sta->deflink.ht_cap.cap,
IEEE80211_HT_CAP_LDPC_CODING);
}
}
static void wcn36xx_smd_set_sta_default_ht_params(
struct wcn36xx_hal_config_sta_params *sta_params)
{
sta_params->ht_capable = 1;
sta_params->tx_channel_width_set = 1;
sta_params->lsig_txop_protection = 1;
sta_params->max_ampdu_size = 3;
sta_params->max_ampdu_density = 5;
sta_params->max_amsdu_size = 0;
sta_params->sgi_20Mhz = 1;
sta_params->sgi_40mhz = 1;
sta_params->green_field_capable = 1;
sta_params->delayed_ba_support = 0;
sta_params->dsss_cck_mode_40mhz = 1;
}
static void wcn36xx_smd_set_sta_default_vht_params(struct wcn36xx *wcn,
struct wcn36xx_hal_config_sta_params_v1 *sta_params)
{
if (wcn->rf_id == RF_IRIS_WCN3680) {
sta_params->vht_capable = 1;
sta_params->vht_tx_mu_beamformee_capable = 1;
} else {
sta_params->vht_capable = 0;
sta_params->vht_tx_mu_beamformee_capable = 0;
}
sta_params->vht_ldpc_enabled = 0;
sta_params->vht_tx_channel_width_set = 0;
sta_params->vht_tx_bf_enabled = 0;
}
static void wcn36xx_smd_set_sta_default_ht_ldpc_params(struct wcn36xx *wcn,
struct wcn36xx_hal_config_sta_params_v1 *sta_params)
{
if (wcn->rf_id == RF_IRIS_WCN3680)
sta_params->ht_ldpc_enabled = 1;
else
sta_params->ht_ldpc_enabled = 0;
}
static void wcn36xx_smd_set_sta_params(struct wcn36xx *wcn,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct wcn36xx_hal_config_sta_params *sta_params)
{
struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
struct wcn36xx_sta *sta_priv = NULL;
if (vif->type == NL80211_IFTYPE_ADHOC ||
vif->type == NL80211_IFTYPE_AP ||
vif->type == NL80211_IFTYPE_MESH_POINT) {
sta_params->type = 1;
sta_params->sta_index = WCN36XX_HAL_STA_INVALID_IDX;
} else {
sta_params->type = 0;
sta_params->sta_index = vif_priv->self_sta_index;
}
sta_params->listen_interval = WCN36XX_LISTEN_INTERVAL(wcn);
/*
* In STA mode ieee80211_sta contains bssid and ieee80211_vif
* contains our mac address. In AP mode we are bssid so vif
* contains bssid and ieee80211_sta contains mac.
*/
if (NL80211_IFTYPE_STATION == vif->type)
memcpy(&sta_params->mac, vif->addr, ETH_ALEN);
else
memcpy(&sta_params->bssid, vif->addr, ETH_ALEN);
sta_params->encrypt_type = vif_priv->encrypt_type;
sta_params->short_preamble_supported = true;
sta_params->rifs_mode = 0;
sta_params->rmf = 0;
sta_params->action = 0;
sta_params->uapsd = 0;
sta_params->mimo_ps = WCN36XX_HAL_HT_MIMO_PS_STATIC;
sta_params->max_ampdu_duration = 0;
sta_params->bssid_index = vif_priv->bss_index;
sta_params->p2p = 0;
if (sta) {
sta_priv = wcn36xx_sta_to_priv(sta);
if (NL80211_IFTYPE_STATION == vif->type)
memcpy(&sta_params->bssid, sta->addr, ETH_ALEN);
else
memcpy(&sta_params->mac, sta->addr, ETH_ALEN);
sta_params->wmm_enabled = sta->wme;
sta_params->max_sp_len = sta->max_sp;
sta_params->aid = sta_priv->aid;
wcn36xx_smd_set_sta_ht_params(sta, sta_params);
memcpy(&sta_params->supported_rates, &sta_priv->supported_rates,
sizeof(struct wcn36xx_hal_supported_rates));
} else {
wcn36xx_set_default_rates((struct wcn36xx_hal_supported_rates *)
&sta_params->supported_rates);
wcn36xx_smd_set_sta_default_ht_params(sta_params);
}
}
static int wcn36xx_smd_send_and_wait(struct wcn36xx *wcn, size_t len)
{
int ret;
unsigned long start;
struct wcn36xx_hal_msg_header *hdr =
(struct wcn36xx_hal_msg_header *)wcn->hal_buf;
u16 req_type = hdr->msg_type;
wcn36xx_dbg_dump(WCN36XX_DBG_SMD_DUMP, "HAL >>> ", wcn->hal_buf, len);
init_completion(&wcn->hal_rsp_compl);
start = jiffies;
ret = rpmsg_send(wcn->smd_channel, wcn->hal_buf, len);
if (ret) {
wcn36xx_err("HAL TX failed for req %d\n", req_type);
goto out;
}
if (wait_for_completion_timeout(&wcn->hal_rsp_compl,
msecs_to_jiffies(HAL_MSG_TIMEOUT)) <= 0) {
wcn36xx_err("Timeout! No SMD response to req %d in %dms\n",
req_type, HAL_MSG_TIMEOUT);
ret = -ETIME;
goto out;
}
wcn36xx_dbg(WCN36XX_DBG_SMD,
"SMD command (req %d, rsp %d) completed in %dms\n",
req_type, hdr->msg_type,
jiffies_to_msecs(jiffies - start));
out:
return ret;
}
#define __INIT_HAL_MSG(msg_body, type, version) \
do { \
memset(&(msg_body), 0, sizeof(msg_body)); \
(msg_body).header.msg_type = type; \
(msg_body).header.msg_version = version; \
(msg_body).header.len = sizeof(msg_body); \
} while (0) \
#define INIT_HAL_MSG(msg_body, type) \
__INIT_HAL_MSG(msg_body, type, WCN36XX_HAL_MSG_VERSION0)
#define INIT_HAL_MSG_V1(msg_body, type) \
__INIT_HAL_MSG(msg_body, type, WCN36XX_HAL_MSG_VERSION1)
#define INIT_HAL_PTT_MSG(p_msg_body, ppt_msg_len) \
do { \
memset(p_msg_body, 0, sizeof(*p_msg_body) + ppt_msg_len); \
p_msg_body->header.msg_type = WCN36XX_HAL_PROCESS_PTT_REQ; \
p_msg_body->header.msg_version = WCN36XX_HAL_MSG_VERSION0; \
p_msg_body->header.len = sizeof(*p_msg_body) + ppt_msg_len; \
} while (0)
#define PREPARE_HAL_BUF(send_buf, msg_body) \
do { \
memcpy_and_pad(send_buf, msg_body.header.len, \
&msg_body, sizeof(msg_body), 0); \
} while (0) \
#define PREPARE_HAL_PTT_MSG_BUF(send_buf, p_msg_body) \
do { \
memcpy(send_buf, p_msg_body, p_msg_body->header.len); \
} while (0)
static int wcn36xx_smd_rsp_status_check(void *buf, size_t len)
{
struct wcn36xx_fw_msg_status_rsp *rsp;
if (len < sizeof(struct wcn36xx_hal_msg_header) +
sizeof(struct wcn36xx_fw_msg_status_rsp))
return -EIO;
rsp = (struct wcn36xx_fw_msg_status_rsp *)
(buf + sizeof(struct wcn36xx_hal_msg_header));
if (WCN36XX_FW_MSG_RESULT_SUCCESS != rsp->status)
return rsp->status;
return 0;
}
int wcn36xx_smd_load_nv(struct wcn36xx *wcn)
{
struct nv_data *nv_d;
struct wcn36xx_hal_nv_img_download_req_msg msg_body;
int fw_bytes_left;
int ret;
u16 fm_offset = 0;
if (!wcn->nv) {
ret = request_firmware(&wcn->nv, wcn->nv_file, wcn->dev);
if (ret) {
wcn36xx_err("Failed to load nv file %s: %d\n",
wcn->nv_file, ret);
goto out;
}
}
nv_d = (struct nv_data *)wcn->nv->data;
INIT_HAL_MSG(msg_body, WCN36XX_HAL_DOWNLOAD_NV_REQ);
msg_body.header.len += WCN36XX_NV_FRAGMENT_SIZE;
msg_body.frag_number = 0;
/* hal_buf must be protected with mutex */
mutex_lock(&wcn->hal_mutex);
do {
fw_bytes_left = wcn->nv->size - fm_offset - 4;
if (fw_bytes_left > WCN36XX_NV_FRAGMENT_SIZE) {
msg_body.last_fragment = 0;
msg_body.nv_img_buffer_size = WCN36XX_NV_FRAGMENT_SIZE;
} else {
msg_body.last_fragment = 1;
msg_body.nv_img_buffer_size = fw_bytes_left;
/* Do not forget update general message len */
msg_body.header.len = sizeof(msg_body) + fw_bytes_left;
}
/* Add load NV request message header */
memcpy(wcn->hal_buf, &msg_body, sizeof(msg_body));
/* Add NV body itself */
memcpy(wcn->hal_buf + sizeof(msg_body),
&nv_d->table + fm_offset,
msg_body.nv_img_buffer_size);
ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
if (ret)
goto out_unlock;
ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf,
wcn->hal_rsp_len);
if (ret) {
wcn36xx_err("hal_load_nv response failed err=%d\n",
ret);
goto out_unlock;
}
msg_body.frag_number++;
fm_offset += WCN36XX_NV_FRAGMENT_SIZE;
} while (msg_body.last_fragment != 1);
out_unlock:
mutex_unlock(&wcn->hal_mutex);
out: return ret;
}
static int wcn36xx_smd_start_rsp(struct wcn36xx *wcn, void *buf, size_t len)
{
struct wcn36xx_hal_mac_start_rsp_msg *rsp;
if (len < sizeof(*rsp))
return -EIO;
rsp = (struct wcn36xx_hal_mac_start_rsp_msg *)buf;
if (WCN36XX_FW_MSG_RESULT_SUCCESS != rsp->start_rsp_params.status)
return -EIO;
memcpy(wcn->crm_version, rsp->start_rsp_params.crm_version,
WCN36XX_HAL_VERSION_LENGTH);
memcpy(wcn->wlan_version, rsp->start_rsp_params.wlan_version,
WCN36XX_HAL_VERSION_LENGTH);
/* null terminate the strings, just in case */
wcn->crm_version[WCN36XX_HAL_VERSION_LENGTH] = '\0';
wcn->wlan_version[WCN36XX_HAL_VERSION_LENGTH] = '\0';
wcn->fw_revision = rsp->start_rsp_params.version.revision;
wcn->fw_version = rsp->start_rsp_params.version.version;
wcn->fw_minor = rsp->start_rsp_params.version.minor;
wcn->fw_major = rsp->start_rsp_params.version.major;
if (wcn->first_boot) {
wcn->first_boot = false;
wcn36xx_info("firmware WLAN version '%s' and CRM version '%s'\n",
wcn->wlan_version, wcn->crm_version);
wcn36xx_info("firmware API %u.%u.%u.%u, %u stations, %u bssids\n",
wcn->fw_major, wcn->fw_minor,
wcn->fw_version, wcn->fw_revision,
rsp->start_rsp_params.stations,
rsp->start_rsp_params.bssids);
}
return 0;
}
int wcn36xx_smd_start(struct wcn36xx *wcn)
{
struct wcn36xx_hal_mac_start_req_msg msg_body, *body;
int ret;
int i;
size_t len;
int cfg_elements;
static struct wcn36xx_cfg_val *cfg_vals;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_START_REQ);
msg_body.params.type = DRIVER_TYPE_PRODUCTION;
msg_body.params.len = 0;
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
body = (struct wcn36xx_hal_mac_start_req_msg *)wcn->hal_buf;
len = body->header.len;
if (wcn->rf_id == RF_IRIS_WCN3680) {
cfg_vals = wcn3680_cfg_vals;
cfg_elements = ARRAY_SIZE(wcn3680_cfg_vals);
} else {
cfg_vals = wcn36xx_cfg_vals;
cfg_elements = ARRAY_SIZE(wcn36xx_cfg_vals);
}
for (i = 0; i < cfg_elements; i++) {
ret = put_cfg_tlv_u32(wcn, &len, cfg_vals[i].cfg_id,
cfg_vals[i].value);
if (ret)
goto out;
}
body->header.len = len;
body->params.len = len - sizeof(*body);
wcn36xx_dbg(WCN36XX_DBG_HAL, "hal start type %d\n",
msg_body.params.type);
ret = wcn36xx_smd_send_and_wait(wcn, body->header.len);
if (ret) {
wcn36xx_err("Sending hal_start failed\n");
goto out;
}
ret = wcn36xx_smd_start_rsp(wcn, wcn->hal_buf, wcn->hal_rsp_len);
if (ret) {
wcn36xx_err("hal_start response failed err=%d\n", ret);
goto out;
}
out:
mutex_unlock(&wcn->hal_mutex);
return ret;
}
int wcn36xx_smd_stop(struct wcn36xx *wcn)
{
struct wcn36xx_hal_mac_stop_req_msg msg_body;
int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_STOP_REQ);
msg_body.stop_req_params.reason = HAL_STOP_TYPE_RF_KILL;
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
if (ret) {
wcn36xx_err("Sending hal_stop failed\n");
goto out;
}
ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
if (ret) {
wcn36xx_err("hal_stop response failed err=%d\n", ret);
goto out;
}
out:
mutex_unlock(&wcn->hal_mutex);
return ret;
}
int wcn36xx_smd_init_scan(struct wcn36xx *wcn, enum wcn36xx_hal_sys_mode mode,
struct ieee80211_vif *vif)
{
struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
struct wcn36xx_hal_init_scan_req_msg msg_body;
int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_INIT_SCAN_REQ);
msg_body.mode = mode;
if (vif_priv->bss_index != WCN36XX_HAL_BSS_INVALID_IDX) {
/* Notify BSSID with null DATA packet */
msg_body.frame_type = 2;
msg_body.notify = 1;
msg_body.scan_entry.bss_index[0] = vif_priv->bss_index;
msg_body.scan_entry.active_bss_count = 1;
}
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
wcn36xx_dbg(WCN36XX_DBG_HAL, "hal init scan mode %d\n", msg_body.mode);
ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
if (ret) {
wcn36xx_err("Sending hal_init_scan failed\n");
goto out;
}
ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
if (ret) {
wcn36xx_err("hal_init_scan response failed err=%d\n", ret);
goto out;
}
wcn->sw_scan_init = true;
out:
mutex_unlock(&wcn->hal_mutex);
return ret;
}
int wcn36xx_smd_start_scan(struct wcn36xx *wcn, u8 scan_channel)
{
struct wcn36xx_hal_start_scan_req_msg msg_body;
int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_START_SCAN_REQ);
msg_body.scan_channel = scan_channel;
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
wcn36xx_dbg(WCN36XX_DBG_HAL, "hal start scan channel %d\n",
msg_body.scan_channel);
ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
if (ret) {
wcn36xx_err("Sending hal_start_scan failed\n");
goto out;
}
ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
if (ret) {
wcn36xx_err("hal_start_scan response failed err=%d\n", ret);
goto out;
}
wcn->sw_scan_channel = scan_channel;
out:
mutex_unlock(&wcn->hal_mutex);
return ret;
}
int wcn36xx_smd_end_scan(struct wcn36xx *wcn, u8 scan_channel)
{
struct wcn36xx_hal_end_scan_req_msg msg_body;
int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_END_SCAN_REQ);
msg_body.scan_channel = scan_channel;
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
wcn36xx_dbg(WCN36XX_DBG_HAL, "hal end scan channel %d\n",
msg_body.scan_channel);
ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
if (ret) {
wcn36xx_err("Sending hal_end_scan failed\n");
goto out;
}
ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
if (ret) {
wcn36xx_err("hal_end_scan response failed err=%d\n", ret);
goto out;
}
wcn->sw_scan_channel = 0;
out:
mutex_unlock(&wcn->hal_mutex);
return ret;
}
int wcn36xx_smd_finish_scan(struct wcn36xx *wcn,
enum wcn36xx_hal_sys_mode mode,
struct ieee80211_vif *vif)
{
struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
struct wcn36xx_hal_finish_scan_req_msg msg_body;
int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_FINISH_SCAN_REQ);
msg_body.mode = mode;
msg_body.oper_channel = WCN36XX_HW_CHANNEL(wcn);
if (vif_priv->bss_index != WCN36XX_HAL_BSS_INVALID_IDX) {
/* Notify BSSID with null data packet */
msg_body.notify = 1;
msg_body.frame_type = 2;
msg_body.scan_entry.bss_index[0] = vif_priv->bss_index;
msg_body.scan_entry.active_bss_count = 1;
}
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
wcn36xx_dbg(WCN36XX_DBG_HAL, "hal finish scan mode %d\n",
msg_body.mode);
ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
if (ret) {
wcn36xx_err("Sending hal_finish_scan failed\n");
goto out;
}
ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
if (ret) {
wcn36xx_err("hal_finish_scan response failed err=%d\n", ret);
goto out;
}
wcn->sw_scan_init = false;
out:
mutex_unlock(&wcn->hal_mutex);
return ret;
}
int wcn36xx_smd_start_hw_scan(struct wcn36xx *wcn, struct ieee80211_vif *vif,
struct cfg80211_scan_request *req)
{
struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
struct wcn36xx_hal_start_scan_offload_req_msg *msg_body;
int ret, i;
if (req->ie_len > WCN36XX_MAX_SCAN_IE_LEN)
return -EINVAL;
mutex_lock(&wcn->hal_mutex);
msg_body = kzalloc(sizeof(*msg_body), GFP_KERNEL);
if (!msg_body) {
ret = -ENOMEM;
goto out;
}
INIT_HAL_MSG((*msg_body), WCN36XX_HAL_START_SCAN_OFFLOAD_REQ);
msg_body->scan_type = WCN36XX_HAL_SCAN_TYPE_ACTIVE;
msg_body->min_ch_time = 30;
msg_body->max_ch_time = 100;
msg_body->scan_hidden = 1;
memcpy(msg_body->mac, vif->addr, ETH_ALEN);
msg_body->bss_type = vif_priv->bss_type;
msg_body->p2p_search = vif->p2p;
msg_body->num_ssid = min_t(u8, req->n_ssids, ARRAY_SIZE(msg_body->ssids));
for (i = 0; i < msg_body->num_ssid; i++) {
msg_body->ssids[i].length = min_t(u8, req->ssids[i].ssid_len,
sizeof(msg_body->ssids[i].ssid));
memcpy(msg_body->ssids[i].ssid, req->ssids[i].ssid,
msg_body->ssids[i].length);
}
msg_body->num_channel = min_t(u8, req->n_channels,
sizeof(msg_body->channels));
for (i = 0; i < msg_body->num_channel; i++) {
msg_body->channels[i] =
HW_VALUE_CHANNEL(req->channels[i]->hw_value);
}
msg_body->header.len -= WCN36XX_MAX_SCAN_IE_LEN;
if (req->ie_len > 0) {
msg_body->ie_len = req->ie_len;
msg_body->header.len += req->ie_len;
memcpy(msg_body->ie, req->ie, req->ie_len);
}
PREPARE_HAL_BUF(wcn->hal_buf, (*msg_body));
wcn36xx_dbg(WCN36XX_DBG_HAL,
"hal start hw-scan (channels: %u; ssids: %u; p2p: %s)\n",
msg_body->num_channel, msg_body->num_ssid,
msg_body->p2p_search ? "yes" : "no");
ret = wcn36xx_smd_send_and_wait(wcn, msg_body->header.len);
if (ret) {
wcn36xx_err("Sending hal_start_scan_offload failed\n");
goto out;
}
ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
if (ret) {
wcn36xx_err("hal_start_scan_offload response failed err=%d\n",
ret);
goto out;
}
out:
kfree(msg_body);
mutex_unlock(&wcn->hal_mutex);
return ret;
}
int wcn36xx_smd_stop_hw_scan(struct wcn36xx *wcn)
{
struct wcn36xx_hal_stop_scan_offload_req_msg msg_body;
int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_STOP_SCAN_OFFLOAD_REQ);
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
wcn36xx_dbg(WCN36XX_DBG_HAL, "hal stop hw-scan\n");
ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
if (ret) {
wcn36xx_err("Sending hal_stop_scan_offload failed\n");
goto out;
}
ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
if (ret) {
wcn36xx_err("hal_stop_scan_offload response failed err=%d\n",
ret);
goto out;
}
out:
mutex_unlock(&wcn->hal_mutex);
return ret;
}
int wcn36xx_smd_update_channel_list(struct wcn36xx *wcn, struct cfg80211_scan_request *req)
{
struct wcn36xx_hal_update_channel_list_req_msg *msg_body;
int ret, i;
msg_body = kzalloc(sizeof(*msg_body), GFP_KERNEL);
if (!msg_body)
return -ENOMEM;
INIT_HAL_MSG((*msg_body), WCN36XX_HAL_UPDATE_CHANNEL_LIST_REQ);
msg_body->num_channel = min_t(u8, req->n_channels, ARRAY_SIZE(msg_body->channels));
for (i = 0; i < msg_body->num_channel; i++) {
struct wcn36xx_hal_channel_param *param = &msg_body->channels[i];
u32 min_power = WCN36XX_HAL_DEFAULT_MIN_POWER;
u32 ant_gain = WCN36XX_HAL_DEFAULT_ANT_GAIN;
param->mhz = req->channels[i]->center_freq;
param->band_center_freq1 = req->channels[i]->center_freq;
param->band_center_freq2 = 0;
if (req->channels[i]->flags & IEEE80211_CHAN_NO_IR)
param->channel_info |= WCN36XX_HAL_CHAN_INFO_FLAG_PASSIVE;
if (req->channels[i]->flags & IEEE80211_CHAN_RADAR)
param->channel_info |= WCN36XX_HAL_CHAN_INFO_FLAG_DFS;
if (req->channels[i]->band == NL80211_BAND_5GHZ) {
param->channel_info |= WCN36XX_HAL_CHAN_INFO_FLAG_HT;
param->channel_info |= WCN36XX_HAL_CHAN_INFO_FLAG_VHT;
param->channel_info |= WCN36XX_HAL_CHAN_INFO_PHY_11A;
} else {
param->channel_info |= WCN36XX_HAL_CHAN_INFO_PHY_11BG;
}
if (min_power > req->channels[i]->max_power)
min_power = req->channels[i]->max_power;
if (req->channels[i]->max_antenna_gain)
ant_gain = req->channels[i]->max_antenna_gain;
u32p_replace_bits(¶m->reg_info_1, min_power,
WCN36XX_HAL_CHAN_REG1_MIN_PWR_MASK);
u32p_replace_bits(¶m->reg_info_1, req->channels[i]->max_power,
WCN36XX_HAL_CHAN_REG1_MAX_PWR_MASK);
u32p_replace_bits(¶m->reg_info_1, req->channels[i]->max_reg_power,
WCN36XX_HAL_CHAN_REG1_REG_PWR_MASK);
u32p_replace_bits(¶m->reg_info_1, 0,
WCN36XX_HAL_CHAN_REG1_CLASS_ID_MASK);
u32p_replace_bits(¶m->reg_info_2, ant_gain,
WCN36XX_HAL_CHAN_REG2_ANT_GAIN_MASK);
wcn36xx_dbg(WCN36XX_DBG_HAL,
"%s: freq=%u, channel_info=%08x, reg_info1=%08x, reg_info2=%08x\n",
__func__, param->mhz, param->channel_info, param->reg_info_1,
param->reg_info_2);
}
mutex_lock(&wcn->hal_mutex);
PREPARE_HAL_BUF(wcn->hal_buf, (*msg_body));
ret = wcn36xx_smd_send_and_wait(wcn, msg_body->header.len);
if (ret) {
wcn36xx_err("Sending hal_update_channel_list failed\n");
goto out;
}
ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
if (ret) {
wcn36xx_err("hal_update_channel_list response failed err=%d\n", ret);
goto out;
}
out:
kfree(msg_body);
mutex_unlock(&wcn->hal_mutex);
return ret;
}
static int wcn36xx_smd_switch_channel_rsp(void *buf, size_t len)
{
struct wcn36xx_hal_switch_channel_rsp_msg *rsp;
int ret;
ret = wcn36xx_smd_rsp_status_check(buf, len);
if (ret)
return ret;
rsp = (struct wcn36xx_hal_switch_channel_rsp_msg *)buf;
wcn36xx_dbg(WCN36XX_DBG_HAL, "channel switched to: %d, status: %d\n",
rsp->channel_number, rsp->status);
return ret;
}
int wcn36xx_smd_switch_channel(struct wcn36xx *wcn,
struct ieee80211_vif *vif, int ch)
{
struct wcn36xx_hal_switch_channel_req_msg msg_body;
int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_CH_SWITCH_REQ);
msg_body.channel_number = (u8)ch;
msg_body.tx_mgmt_power = 0xbf;
msg_body.max_tx_power = 0xbf;
memcpy(msg_body.self_sta_mac_addr, vif->addr, ETH_ALEN);
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
if (ret) {
wcn36xx_err("Sending hal_switch_channel failed\n");
goto out;
}
ret = wcn36xx_smd_switch_channel_rsp(wcn->hal_buf, wcn->hal_rsp_len);
if (ret) {
wcn36xx_err("hal_switch_channel response failed err=%d\n", ret);
goto out;
}
out:
mutex_unlock(&wcn->hal_mutex);
return ret;
}
static int wcn36xx_smd_process_ptt_msg_rsp(void *buf, size_t len,
void **p_ptt_rsp_msg)
{
struct wcn36xx_hal_process_ptt_msg_rsp_msg *rsp;
int ret;
ret = wcn36xx_smd_rsp_status_check(buf, len);
if (ret)
return ret;
rsp = (struct wcn36xx_hal_process_ptt_msg_rsp_msg *)buf;
wcn36xx_dbg(WCN36XX_DBG_HAL, "process ptt msg responded with length %d\n",
rsp->header.len);
wcn36xx_dbg_dump(WCN36XX_DBG_HAL_DUMP, "HAL_PTT_MSG_RSP:", rsp->ptt_msg,
rsp->header.len - sizeof(rsp->ptt_msg_resp_status));
if (rsp->header.len > 0) {
*p_ptt_rsp_msg = kmemdup(rsp->ptt_msg, rsp->header.len,
GFP_ATOMIC);
if (!*p_ptt_rsp_msg)
return -ENOMEM;
}
return ret;
}
int wcn36xx_smd_process_ptt_msg(struct wcn36xx *wcn,
struct ieee80211_vif *vif, void *ptt_msg, size_t len,
void **ptt_rsp_msg)
{
struct wcn36xx_hal_process_ptt_msg_req_msg *p_msg_body;
int ret;
mutex_lock(&wcn->hal_mutex);
p_msg_body = kmalloc(
sizeof(struct wcn36xx_hal_process_ptt_msg_req_msg) + len,
GFP_ATOMIC);
if (!p_msg_body) {
ret = -ENOMEM;
goto out_nomem;
}
INIT_HAL_PTT_MSG(p_msg_body, len);
memcpy(&p_msg_body->ptt_msg, ptt_msg, len);
PREPARE_HAL_PTT_MSG_BUF(wcn->hal_buf, p_msg_body);
ret = wcn36xx_smd_send_and_wait(wcn, p_msg_body->header.len);
if (ret) {
wcn36xx_err("Sending hal_process_ptt_msg failed\n");
goto out;
}
ret = wcn36xx_smd_process_ptt_msg_rsp(wcn->hal_buf, wcn->hal_rsp_len,
ptt_rsp_msg);
if (ret) {
wcn36xx_err("process_ptt_msg response failed err=%d\n", ret);
goto out;
}
out:
kfree(p_msg_body);
out_nomem:
mutex_unlock(&wcn->hal_mutex);
return ret;
}
static int wcn36xx_smd_update_scan_params_rsp(void *buf, size_t len)
{
struct wcn36xx_hal_update_scan_params_resp *rsp;
rsp = (struct wcn36xx_hal_update_scan_params_resp *)buf;
/* Remove the PNO version bit */
rsp->status &= (~(WCN36XX_FW_MSG_PNO_VERSION_MASK));
if (WCN36XX_FW_MSG_RESULT_SUCCESS != rsp->status) {
wcn36xx_warn("error response from update scan\n");
return rsp->status;
}
return 0;
}
int wcn36xx_smd_update_scan_params(struct wcn36xx *wcn,
u8 *channels, size_t channel_count)
{
struct wcn36xx_hal_update_scan_params_req_ex msg_body;
int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_UPDATE_SCAN_PARAM_REQ);
msg_body.dot11d_enabled = false;
msg_body.dot11d_resolved = true;
msg_body.channel_count = channel_count;
memcpy(msg_body.channels, channels, channel_count);
msg_body.active_min_ch_time = 60;
msg_body.active_max_ch_time = 120;
msg_body.passive_min_ch_time = 60;
msg_body.passive_max_ch_time = 110;
msg_body.state = PHY_SINGLE_CHANNEL_CENTERED;
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
wcn36xx_dbg(WCN36XX_DBG_HAL,
"hal update scan params channel_count %d\n",
msg_body.channel_count);
ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
if (ret) {
wcn36xx_err("Sending hal_update_scan_params failed\n");
goto out;
}
ret = wcn36xx_smd_update_scan_params_rsp(wcn->hal_buf,
wcn->hal_rsp_len);
if (ret) {
wcn36xx_err("hal_update_scan_params response failed err=%d\n",
ret);
goto out;
}
out:
mutex_unlock(&wcn->hal_mutex);
return ret;
}
static int wcn36xx_smd_add_sta_self_rsp(struct wcn36xx *wcn,
struct ieee80211_vif *vif,
void *buf,
size_t len)
{
struct wcn36xx_hal_add_sta_self_rsp_msg *rsp;
struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
if (len < sizeof(*rsp))
return -EINVAL;
rsp = (struct wcn36xx_hal_add_sta_self_rsp_msg *)buf;
if (rsp->status != WCN36XX_FW_MSG_RESULT_SUCCESS) {
wcn36xx_warn("hal add sta self failure: %d\n",
rsp->status);
return rsp->status;
}
wcn36xx_dbg(WCN36XX_DBG_HAL,
"hal add sta self status %d self_sta_index %d dpu_index %d\n",
rsp->status, rsp->self_sta_index, rsp->dpu_index);
vif_priv->self_sta_index = rsp->self_sta_index;
vif_priv->self_dpu_desc_index = rsp->dpu_index;
return 0;
}
int wcn36xx_smd_add_sta_self(struct wcn36xx *wcn, struct ieee80211_vif *vif)
{
struct wcn36xx_hal_add_sta_self_req msg_body;
int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_ADD_STA_SELF_REQ);
memcpy(&msg_body.self_addr, vif->addr, ETH_ALEN);
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
wcn36xx_dbg(WCN36XX_DBG_HAL,
"hal add sta self self_addr %pM status %d\n",
msg_body.self_addr, msg_body.status);
ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
if (ret) {
wcn36xx_err("Sending hal_add_sta_self failed\n");
goto out;
}
ret = wcn36xx_smd_add_sta_self_rsp(wcn,
vif,
wcn->hal_buf,
wcn->hal_rsp_len);
if (ret) {
wcn36xx_err("hal_add_sta_self response failed err=%d\n", ret);
goto out;
}
out:
mutex_unlock(&wcn->hal_mutex);
return ret;
}
int wcn36xx_smd_delete_sta_self(struct wcn36xx *wcn, u8 *addr)
{
struct wcn36xx_hal_del_sta_self_req_msg msg_body;
int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_DEL_STA_SELF_REQ);
memcpy(&msg_body.self_addr, addr, ETH_ALEN);
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
if (ret) {
wcn36xx_err("Sending hal_delete_sta_self failed\n");
goto out;
}
ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
if (ret) {
wcn36xx_err("hal_delete_sta_self response failed err=%d\n",
ret);
goto out;
}
out:
mutex_unlock(&wcn->hal_mutex);
return ret;
}
int wcn36xx_smd_delete_sta(struct wcn36xx *wcn, u8 sta_index)
{
struct wcn36xx_hal_delete_sta_req_msg msg_body;
int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_DELETE_STA_REQ);
msg_body.sta_index = sta_index;
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
wcn36xx_dbg(WCN36XX_DBG_HAL,
"hal delete sta sta_index %d\n",
msg_body.sta_index);
ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
if (ret) {
wcn36xx_err("Sending hal_delete_sta failed\n");
goto out;
}
ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
if (ret) {
wcn36xx_err("hal_delete_sta response failed err=%d\n", ret);
goto out;
}
out:
mutex_unlock(&wcn->hal_mutex);
return ret;
}
static int wcn36xx_smd_join_rsp(void *buf, size_t len)
{
struct wcn36xx_hal_join_rsp_msg *rsp;
if (wcn36xx_smd_rsp_status_check(buf, len))
return -EIO;
rsp = (struct wcn36xx_hal_join_rsp_msg *)buf;
wcn36xx_dbg(WCN36XX_DBG_HAL,
"hal rsp join status %d tx_mgmt_power %d\n",
rsp->status, rsp->tx_mgmt_power);
return 0;
}
int wcn36xx_smd_join(struct wcn36xx *wcn, const u8 *bssid, u8 *vif, u8 ch)
{
struct wcn36xx_hal_join_req_msg msg_body;
int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_JOIN_REQ);
memcpy(&msg_body.bssid, bssid, ETH_ALEN);
memcpy(&msg_body.self_sta_mac_addr, vif, ETH_ALEN);
msg_body.channel = ch;
if (conf_is_ht40_minus(&wcn->hw->conf))
msg_body.secondary_channel_offset =
PHY_DOUBLE_CHANNEL_HIGH_PRIMARY;
else if (conf_is_ht40_plus(&wcn->hw->conf))
msg_body.secondary_channel_offset =
PHY_DOUBLE_CHANNEL_LOW_PRIMARY;
else
msg_body.secondary_channel_offset =
PHY_SINGLE_CHANNEL_CENTERED;
msg_body.link_state = WCN36XX_HAL_LINK_PREASSOC_STATE;
msg_body.max_tx_power = 0xbf;
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
wcn36xx_dbg(WCN36XX_DBG_HAL,
"hal join req bssid %pM self_sta_mac_addr %pM channel %d link_state %d\n",
msg_body.bssid, msg_body.self_sta_mac_addr,
msg_body.channel, msg_body.link_state);
ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
if (ret) {
wcn36xx_err("Sending hal_join failed\n");
goto out;
}
ret = wcn36xx_smd_join_rsp(wcn->hal_buf, wcn->hal_rsp_len);
if (ret) {
wcn36xx_err("hal_join response failed err=%d\n", ret);
goto out;
}
out:
mutex_unlock(&wcn->hal_mutex);
return ret;
}
int wcn36xx_smd_set_link_st(struct wcn36xx *wcn, const u8 *bssid,
const u8 *sta_mac,
enum wcn36xx_hal_link_state state)
{
struct wcn36xx_hal_set_link_state_req_msg msg_body;
int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_LINK_ST_REQ);
memcpy(&msg_body.bssid, bssid, ETH_ALEN);
memcpy(&msg_body.self_mac_addr, sta_mac, ETH_ALEN);
msg_body.state = state;
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
wcn36xx_dbg(WCN36XX_DBG_HAL,
"hal set link state bssid %pM self_mac_addr %pM state %d\n",
msg_body.bssid, msg_body.self_mac_addr, msg_body.state);
ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
if (ret) {
wcn36xx_err("Sending hal_set_link_st failed\n");
goto out;
}
ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
if (ret) {
wcn36xx_err("hal_set_link_st response failed err=%d\n", ret);
goto out;
}
out:
mutex_unlock(&wcn->hal_mutex);
return ret;
}
static void wcn36xx_smd_convert_sta_to_v1(struct wcn36xx *wcn,
const struct wcn36xx_hal_config_sta_params *orig,
struct wcn36xx_hal_config_sta_params_v1 *v1)
{
/* convert orig to v1 format */
memcpy(&v1->bssid, orig->bssid, ETH_ALEN);
memcpy(&v1->mac, orig->mac, ETH_ALEN);
v1->aid = orig->aid;
v1->type = orig->type;
v1->short_preamble_supported = orig->short_preamble_supported;
v1->listen_interval = orig->listen_interval;
v1->wmm_enabled = orig->wmm_enabled;
v1->ht_capable = orig->ht_capable;
v1->tx_channel_width_set = orig->tx_channel_width_set;
v1->rifs_mode = orig->rifs_mode;
v1->lsig_txop_protection = orig->lsig_txop_protection;
v1->max_ampdu_size = orig->max_ampdu_size;
v1->max_ampdu_density = orig->max_ampdu_density;
v1->sgi_40mhz = orig->sgi_40mhz;
v1->sgi_20Mhz = orig->sgi_20Mhz;
v1->rmf = orig->rmf;
v1->encrypt_type = orig->encrypt_type;
v1->action = orig->action;
v1->uapsd = orig->uapsd;
v1->max_sp_len = orig->max_sp_len;
v1->green_field_capable = orig->green_field_capable;
v1->mimo_ps = orig->mimo_ps;
v1->delayed_ba_support = orig->delayed_ba_support;
v1->max_ampdu_duration = orig->max_ampdu_duration;
v1->dsss_cck_mode_40mhz = orig->dsss_cck_mode_40mhz;
memcpy(&v1->supported_rates, &orig->supported_rates,
sizeof(orig->supported_rates));
v1->sta_index = orig->sta_index;
v1->bssid_index = orig->bssid_index;
v1->p2p = orig->p2p;
}
static void
wcn36xx_smd_set_sta_params_v1(struct wcn36xx *wcn,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct wcn36xx_hal_config_sta_params_v1 *sta_par)
{
struct wcn36xx_sta *sta_priv = NULL;
struct wcn36xx_hal_config_sta_params sta_par_v0;
wcn36xx_smd_set_sta_params(wcn, vif, sta, &sta_par_v0);
wcn36xx_smd_convert_sta_to_v1(wcn, &sta_par_v0, sta_par);
if (sta) {
sta_priv = wcn36xx_sta_to_priv(sta);
wcn36xx_smd_set_sta_vht_params(wcn, sta, sta_par);
wcn36xx_smd_set_sta_ht_ldpc_params(sta, sta_par);
memcpy(&sta_par->supported_rates, &sta_priv->supported_rates,
sizeof(sta_par->supported_rates));
} else {
wcn36xx_set_default_rates_v1(&sta_par->supported_rates);
wcn36xx_smd_set_sta_default_vht_params(wcn, sta_par);
wcn36xx_smd_set_sta_default_ht_ldpc_params(wcn, sta_par);
}
}
static int wcn36xx_smd_config_sta_rsp(struct wcn36xx *wcn,
struct ieee80211_sta *sta,
void *buf,
size_t len)
{
struct wcn36xx_hal_config_sta_rsp_msg *rsp;
struct config_sta_rsp_params *params;
struct wcn36xx_sta *sta_priv = wcn36xx_sta_to_priv(sta);
if (len < sizeof(*rsp))
return -EINVAL;
rsp = (struct wcn36xx_hal_config_sta_rsp_msg *)buf;
params = &rsp->params;
if (params->status != WCN36XX_FW_MSG_RESULT_SUCCESS) {
wcn36xx_warn("hal config sta response failure: %d\n",
params->status);
return -EIO;
}
sta_priv->sta_index = params->sta_index;
sta_priv->dpu_desc_index = params->dpu_index;
sta_priv->ucast_dpu_sign = params->uc_ucast_sig;
wcn36xx_dbg(WCN36XX_DBG_HAL,
"hal config sta rsp status %d sta_index %d bssid_index %d uc_ucast_sig %d p2p %d\n",
params->status, params->sta_index, params->bssid_index,
params->uc_ucast_sig, params->p2p);
return 0;
}
static int wcn36xx_smd_config_sta_v1(struct wcn36xx *wcn,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct wcn36xx_hal_config_sta_req_msg_v1 msg_body;
struct wcn36xx_hal_config_sta_params_v1 *sta_params;
if (wcn->rf_id == RF_IRIS_WCN3680) {
INIT_HAL_MSG_V1(msg_body, WCN36XX_HAL_CONFIG_STA_REQ);
} else {
INIT_HAL_MSG(msg_body, WCN36XX_HAL_CONFIG_STA_REQ);
msg_body.header.len -= WCN36XX_DIFF_STA_PARAMS_V1_NOVHT;
}
sta_params = &msg_body.sta_params;
wcn36xx_smd_set_sta_params_v1(wcn, vif, sta, sta_params);
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
wcn36xx_dbg(WCN36XX_DBG_HAL,
"hal config sta v1 action %d sta_index %d bssid_index %d bssid %pM type %d mac %pM aid %d\n",
sta_params->action, sta_params->sta_index, sta_params->bssid_index,
sta_params->bssid, sta_params->type, sta_params->mac, sta_params->aid);
return wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
}
static int wcn36xx_smd_config_sta_v0(struct wcn36xx *wcn,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct wcn36xx_hal_config_sta_req_msg msg;
struct wcn36xx_hal_config_sta_params *sta_params;
INIT_HAL_MSG(msg, WCN36XX_HAL_CONFIG_STA_REQ);
sta_params = &msg.sta_params;
wcn36xx_smd_set_sta_params(wcn, vif, sta, sta_params);
PREPARE_HAL_BUF(wcn->hal_buf, msg);
wcn36xx_dbg(WCN36XX_DBG_HAL,
"hal config sta action %d sta_index %d bssid_index %d bssid %pM type %d mac %pM aid %d\n",
sta_params->action, sta_params->sta_index,
sta_params->bssid_index, sta_params->bssid,
sta_params->type, sta_params->mac, sta_params->aid);
return wcn36xx_smd_send_and_wait(wcn, msg.header.len);
}
int wcn36xx_smd_config_sta(struct wcn36xx *wcn, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
int ret;
mutex_lock(&wcn->hal_mutex);
if (!wcn36xx_is_fw_version(wcn, 1, 2, 2, 24))
ret = wcn36xx_smd_config_sta_v1(wcn, vif, sta);
else
ret = wcn36xx_smd_config_sta_v0(wcn, vif, sta);
if (ret) {
wcn36xx_err("Sending hal_config_sta failed\n");
goto out;
}
ret = wcn36xx_smd_config_sta_rsp(wcn,
sta,
wcn->hal_buf,
wcn->hal_rsp_len);
if (ret) {
wcn36xx_err("hal_config_sta response failed err=%d\n", ret);
goto out;
}
out:
mutex_unlock(&wcn->hal_mutex);
return ret;
}
static void wcn36xx_smd_set_bss_params(struct wcn36xx *wcn,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
const u8 *bssid,
bool update,
struct wcn36xx_hal_config_bss_params *bss)
{
struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
WARN_ON(is_zero_ether_addr(bssid));
memcpy(&bss->bssid, bssid, ETH_ALEN);
memcpy(bss->self_mac_addr, vif->addr, ETH_ALEN);
if (vif->type == NL80211_IFTYPE_STATION) {
bss->bss_type = WCN36XX_HAL_INFRASTRUCTURE_MODE;
/* STA */
bss->oper_mode = 1;
bss->wcn36xx_hal_persona = WCN36XX_HAL_STA_MODE;
} else if (vif->type == NL80211_IFTYPE_AP ||
vif->type == NL80211_IFTYPE_MESH_POINT) {
bss->bss_type = WCN36XX_HAL_INFRA_AP_MODE;
/* AP */
bss->oper_mode = 0;
bss->wcn36xx_hal_persona = WCN36XX_HAL_STA_SAP_MODE;
} else if (vif->type == NL80211_IFTYPE_ADHOC) {
bss->bss_type = WCN36XX_HAL_IBSS_MODE;
/* STA */
bss->oper_mode = 1;
} else {
wcn36xx_warn("Unknown type for bss config: %d\n", vif->type);
}
if (vif->type == NL80211_IFTYPE_STATION)
wcn36xx_smd_set_bss_nw_type(wcn, sta, bss);
else
bss->nw_type = WCN36XX_HAL_11N_NW_TYPE;
bss->short_slot_time_supported = vif->bss_conf.use_short_slot;
bss->lla_coexist = 0;
bss->llb_coexist = 0;
bss->llg_coexist = 0;
bss->rifs_mode = 0;
bss->beacon_interval = vif->bss_conf.beacon_int;
bss->dtim_period = vif_priv->dtim_period;
wcn36xx_smd_set_bss_ht_params(vif, sta, bss);
bss->oper_channel = WCN36XX_HW_CHANNEL(wcn);
if (conf_is_ht40_minus(&wcn->hw->conf))
bss->ext_channel = IEEE80211_HT_PARAM_CHA_SEC_BELOW;
else if (conf_is_ht40_plus(&wcn->hw->conf))
bss->ext_channel = IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
else
bss->ext_channel = IEEE80211_HT_PARAM_CHA_SEC_NONE;
bss->reserved = 0;
/* wcn->ssid is only valid in AP and IBSS mode */
bss->ssid.length = vif_priv->ssid.length;
memcpy(bss->ssid.ssid, vif_priv->ssid.ssid, vif_priv->ssid.length);
bss->obss_prot_enabled = 0;
bss->rmf = 0;
bss->max_probe_resp_retry_limit = 0;
bss->hidden_ssid = vif->bss_conf.hidden_ssid;
bss->proxy_probe_resp = 0;
bss->edca_params_valid = 0;
/* FIXME: set acbe, acbk, acvi and acvo */
bss->ext_set_sta_key_param_valid = 0;
/* FIXME: set ext_set_sta_key_param */
bss->spectrum_mgt_enable = 0;
bss->tx_mgmt_power = 0;
bss->max_tx_power = WCN36XX_MAX_POWER(wcn);
bss->action = update;
vif_priv->bss_type = bss->bss_type;
}
static int wcn36xx_smd_config_bss_v1(struct wcn36xx *wcn,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta_80211,
const u8 *bssid,
bool update)
{
struct wcn36xx_hal_config_bss_req_msg_v1 *msg_body;
struct wcn36xx_hal_config_bss_params_v1 *bss;
struct wcn36xx_hal_config_bss_params bss_v0;
struct wcn36xx_hal_config_sta_params_v1 *sta;
struct cfg80211_chan_def *chandef;
int ret;
msg_body = kzalloc(sizeof(*msg_body), GFP_KERNEL);
if (!msg_body)
return -ENOMEM;
if (wcn->rf_id == RF_IRIS_WCN3680) {
INIT_HAL_MSG_V1((*msg_body), WCN36XX_HAL_CONFIG_BSS_REQ);
} else {
INIT_HAL_MSG((*msg_body), WCN36XX_HAL_CONFIG_BSS_REQ);
msg_body->header.len -= WCN36XX_DIFF_BSS_PARAMS_V1_NOVHT;
}
bss = &msg_body->bss_params;
sta = &bss->sta;
memset(&bss_v0, 0x00, sizeof(bss_v0));
wcn36xx_smd_set_bss_params(wcn, vif, sta_80211, bssid, update, &bss_v0);
wcn36xx_smd_set_sta_params_v1(wcn, vif, sta_80211, sta);
/* convert orig to v1 */
memcpy(bss->bssid, &bss_v0.bssid, ETH_ALEN);
memcpy(bss->self_mac_addr, &bss_v0.self_mac_addr, ETH_ALEN);
bss->bss_type = bss_v0.bss_type;
bss->oper_mode = bss_v0.oper_mode;
bss->nw_type = bss_v0.nw_type;
bss->short_slot_time_supported =
bss_v0.short_slot_time_supported;
bss->lla_coexist = bss_v0.lla_coexist;
bss->llb_coexist = bss_v0.llb_coexist;
bss->llg_coexist = bss_v0.llg_coexist;
bss->ht20_coexist = bss_v0.ht20_coexist;
bss->lln_non_gf_coexist = bss_v0.lln_non_gf_coexist;
bss->lsig_tx_op_protection_full_support =
bss_v0.lsig_tx_op_protection_full_support;
bss->rifs_mode = bss_v0.rifs_mode;
bss->beacon_interval = bss_v0.beacon_interval;
bss->dtim_period = bss_v0.dtim_period;
bss->tx_channel_width_set = bss_v0.tx_channel_width_set;
bss->oper_channel = bss_v0.oper_channel;
if (wcn->hw->conf.chandef.width == NL80211_CHAN_WIDTH_80) {
chandef = &wcn->hw->conf.chandef;
bss->ext_channel = HW_VALUE_PHY(chandef->chan->hw_value);
} else {
bss->ext_channel = bss_v0.ext_channel;
}
bss->reserved = bss_v0.reserved;
memcpy(&bss->ssid, &bss_v0.ssid,
sizeof(bss_v0.ssid));
bss->action = bss_v0.action;
bss->rateset = bss_v0.rateset;
bss->ht = bss_v0.ht;
bss->obss_prot_enabled = bss_v0.obss_prot_enabled;
bss->rmf = bss_v0.rmf;
bss->ht_oper_mode = bss_v0.ht_oper_mode;
bss->dual_cts_protection = bss_v0.dual_cts_protection;
bss->max_probe_resp_retry_limit =
bss_v0.max_probe_resp_retry_limit;
bss->hidden_ssid = bss_v0.hidden_ssid;
bss->proxy_probe_resp = bss_v0.proxy_probe_resp;
bss->edca_params_valid = bss_v0.edca_params_valid;
memcpy(&bss->acbe, &bss_v0.acbe,
sizeof(bss_v0.acbe));
memcpy(&bss->acbk, &bss_v0.acbk,
sizeof(bss_v0.acbk));
memcpy(&bss->acvi, &bss_v0.acvi,
sizeof(bss_v0.acvi));
memcpy(&bss->acvo, &bss_v0.acvo,
sizeof(bss_v0.acvo));
bss->ext_set_sta_key_param_valid =
bss_v0.ext_set_sta_key_param_valid;
memcpy(&bss->ext_set_sta_key_param,
&bss_v0.ext_set_sta_key_param,
sizeof(bss_v0.acvo));
bss->wcn36xx_hal_persona = bss_v0.wcn36xx_hal_persona;
bss->spectrum_mgt_enable = bss_v0.spectrum_mgt_enable;
bss->tx_mgmt_power = bss_v0.tx_mgmt_power;
bss->max_tx_power = bss_v0.max_tx_power;
wcn36xx_smd_set_bss_vht_params(vif, sta_80211, bss);
PREPARE_HAL_BUF(wcn->hal_buf, (*msg_body));
wcn36xx_dbg(WCN36XX_DBG_HAL,
"hal config bss v1 bssid %pM self_mac_addr %pM bss_type %d oper_mode %d nw_type %d\n",
bss->bssid, bss->self_mac_addr, bss->bss_type,
bss->oper_mode, bss->nw_type);
wcn36xx_dbg(WCN36XX_DBG_HAL,
"- sta bssid %pM action %d sta_index %d bssid_index %d aid %d type %d mac %pM\n",
sta->bssid, sta->action, sta->sta_index,
sta->bssid_index, sta->aid, sta->type, sta->mac);
ret = wcn36xx_smd_send_and_wait(wcn, msg_body->header.len);
kfree(msg_body);
return ret;
}
static int wcn36xx_smd_config_bss_v0(struct wcn36xx *wcn,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
const u8 *bssid,
bool update)
{
struct wcn36xx_hal_config_bss_req_msg *msg;
struct wcn36xx_hal_config_bss_params *bss;
struct wcn36xx_hal_config_sta_params *sta_params;
int ret;
msg = kzalloc(sizeof(*msg), GFP_KERNEL);
if (!msg)
return -ENOMEM;
INIT_HAL_MSG((*msg), WCN36XX_HAL_CONFIG_BSS_REQ);
bss = &msg->bss_params;
sta_params = &bss->sta;
wcn36xx_smd_set_bss_params(wcn, vif, sta, bssid, update, bss);
wcn36xx_smd_set_sta_params(wcn, vif, sta, sta_params);
PREPARE_HAL_BUF(wcn->hal_buf, (*msg));
wcn36xx_dbg(WCN36XX_DBG_HAL,
"hal config bss bssid %pM self_mac_addr %pM bss_type %d oper_mode %d nw_type %d\n",
bss->bssid, bss->self_mac_addr, bss->bss_type,
bss->oper_mode, bss->nw_type);
wcn36xx_dbg(WCN36XX_DBG_HAL,
"- sta bssid %pM action %d sta_index %d bssid_index %d aid %d type %d mac %pM\n",
sta_params->bssid, sta_params->action,
sta_params->sta_index, sta_params->bssid_index,
sta_params->aid, sta_params->type,
sta_params->mac);
ret = wcn36xx_smd_send_and_wait(wcn, msg->header.len);
kfree(msg);
return ret;
}
static int wcn36xx_smd_config_bss_rsp(struct wcn36xx *wcn,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
void *buf,
size_t len)
{
struct wcn36xx_hal_config_bss_rsp_msg *rsp;
struct wcn36xx_hal_config_bss_rsp_params *params;
struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
if (len < sizeof(*rsp))
return -EINVAL;
rsp = (struct wcn36xx_hal_config_bss_rsp_msg *)buf;
params = &rsp->bss_rsp_params;
if (params->status != WCN36XX_FW_MSG_RESULT_SUCCESS) {
wcn36xx_warn("hal config bss response failure: %d\n",
params->status);
return -EIO;
}
wcn36xx_dbg(WCN36XX_DBG_HAL,
"hal config bss rsp status %d bss_idx %d dpu_desc_index %d"
" sta_idx %d self_idx %d bcast_idx %d mac %pM"
" power %d ucast_dpu_signature %d\n",
params->status, params->bss_index, params->dpu_desc_index,
params->bss_sta_index, params->bss_self_sta_index,
params->bss_bcast_sta_idx, params->mac,
params->tx_mgmt_power, params->ucast_dpu_signature);
vif_priv->bss_index = params->bss_index;
if (sta) {
struct wcn36xx_sta *sta_priv = wcn36xx_sta_to_priv(sta);
sta_priv->bss_sta_index = params->bss_sta_index;
sta_priv->bss_dpu_desc_index = params->dpu_desc_index;
}
vif_priv->self_ucast_dpu_sign = params->ucast_dpu_signature;
return 0;
}
int wcn36xx_smd_config_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, const u8 *bssid,
bool update)
{
int ret;
mutex_lock(&wcn->hal_mutex);
if (!wcn36xx_is_fw_version(wcn, 1, 2, 2, 24))
ret = wcn36xx_smd_config_bss_v1(wcn, vif, sta, bssid, update);
else
ret = wcn36xx_smd_config_bss_v0(wcn, vif, sta, bssid, update);
if (ret) {
wcn36xx_err("Sending hal_config_bss failed\n");
goto out;
}
ret = wcn36xx_smd_config_bss_rsp(wcn,
vif,
sta,
wcn->hal_buf,
wcn->hal_rsp_len);
if (ret)
wcn36xx_err("hal_config_bss response failed err=%d\n", ret);
out:
mutex_unlock(&wcn->hal_mutex);
return ret;
}
int wcn36xx_smd_delete_bss(struct wcn36xx *wcn, struct ieee80211_vif *vif)
{
struct wcn36xx_hal_delete_bss_req_msg msg_body;
struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
int ret = 0;
mutex_lock(&wcn->hal_mutex);
if (vif_priv->bss_index == WCN36XX_HAL_BSS_INVALID_IDX)
goto out;
INIT_HAL_MSG(msg_body, WCN36XX_HAL_DELETE_BSS_REQ);
msg_body.bss_index = vif_priv->bss_index;
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
wcn36xx_dbg(WCN36XX_DBG_HAL, "hal delete bss %d\n", msg_body.bss_index);
ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
if (ret) {
wcn36xx_err("Sending hal_delete_bss failed\n");
goto out;
}
ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
if (ret) {
wcn36xx_err("hal_delete_bss response failed err=%d\n", ret);
goto out;
}
vif_priv->bss_index = WCN36XX_HAL_BSS_INVALID_IDX;
out:
mutex_unlock(&wcn->hal_mutex);
return ret;
}
int wcn36xx_smd_send_beacon(struct wcn36xx *wcn, struct ieee80211_vif *vif,
struct sk_buff *skb_beacon, u16 tim_off,
u16 p2p_off)
{
struct wcn36xx_hal_send_beacon_req_msg msg_body;
int ret, pad, pvm_len;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_SEND_BEACON_REQ);
pvm_len = skb_beacon->data[tim_off + 1] - 3;
pad = TIM_MIN_PVM_SIZE - pvm_len;
/* Padding is irrelevant to mesh mode since tim_off is always 0. */
if (vif->type == NL80211_IFTYPE_MESH_POINT)
pad = 0;
msg_body.beacon_length = skb_beacon->len + pad;
/* TODO need to find out why + 6 is needed */
msg_body.beacon_length6 = msg_body.beacon_length + 6;
if (msg_body.beacon_length > BEACON_TEMPLATE_SIZE) {
wcn36xx_err("Beacon is too big: beacon size=%d\n",
msg_body.beacon_length);
ret = -ENOMEM;
goto out;
}
memcpy(msg_body.beacon, skb_beacon->data, skb_beacon->len);
memcpy(msg_body.bssid, vif->addr, ETH_ALEN);
if (pad > 0) {
/*
* The wcn36xx FW has a fixed size for the PVM in the TIM. If
* given the beacon template from mac80211 with a PVM shorter
* than the FW expectes it will overwrite the data after the
* TIM.
*/
wcn36xx_dbg(WCN36XX_DBG_HAL, "Pad TIM PVM. %d bytes at %d\n",
pad, pvm_len);
memmove(&msg_body.beacon[tim_off + 5 + pvm_len + pad],
&msg_body.beacon[tim_off + 5 + pvm_len],
skb_beacon->len - (tim_off + 5 + pvm_len));
memset(&msg_body.beacon[tim_off + 5 + pvm_len], 0, pad);
msg_body.beacon[tim_off + 1] += pad;
}
/* TODO need to find out why this is needed? */
if (vif->type == NL80211_IFTYPE_MESH_POINT)
/* mesh beacon don't need this, so push further down */
msg_body.tim_ie_offset = 256;
else
msg_body.tim_ie_offset = tim_off+4;
msg_body.p2p_ie_offset = p2p_off;
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
wcn36xx_dbg(WCN36XX_DBG_HAL,
"hal send beacon beacon_length %d\n",
msg_body.beacon_length);
ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
if (ret) {
wcn36xx_err("Sending hal_send_beacon failed\n");
goto out;
}
ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
if (ret) {
wcn36xx_err("hal_send_beacon response failed err=%d\n", ret);
goto out;
}
out:
mutex_unlock(&wcn->hal_mutex);
return ret;
}
int wcn36xx_smd_update_proberesp_tmpl(struct wcn36xx *wcn,
struct ieee80211_vif *vif,
struct sk_buff *skb)
{
struct wcn36xx_hal_send_probe_resp_req_msg msg;
int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg, WCN36XX_HAL_UPDATE_PROBE_RSP_TEMPLATE_REQ);
if (skb->len > BEACON_TEMPLATE_SIZE) {
wcn36xx_warn("probe response template is too big: %d\n",
skb->len);
ret = -E2BIG;
goto out;
}
msg.probe_resp_template_len = skb->len;
memcpy(&msg.probe_resp_template, skb->data, skb->len);
memcpy(msg.bssid, vif->addr, ETH_ALEN);
PREPARE_HAL_BUF(wcn->hal_buf, msg);
wcn36xx_dbg(WCN36XX_DBG_HAL,
"hal update probe rsp len %d bssid %pM\n",
msg.probe_resp_template_len, msg.bssid);
ret = wcn36xx_smd_send_and_wait(wcn, msg.header.len);
if (ret) {
wcn36xx_err("Sending hal_update_proberesp_tmpl failed\n");
goto out;
}
ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
if (ret) {
wcn36xx_err("hal_update_proberesp_tmpl response failed err=%d\n",
ret);
goto out;
}
out:
mutex_unlock(&wcn->hal_mutex);
return ret;
}
int wcn36xx_smd_set_stakey(struct wcn36xx *wcn,
enum ani_ed_type enc_type,
u8 keyidx,
u8 keylen,
u8 *key,
u8 sta_index)
{
struct wcn36xx_hal_set_sta_key_req_msg msg_body;
int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_STAKEY_REQ);
msg_body.set_sta_key_params.sta_index = sta_index;
msg_body.set_sta_key_params.enc_type = enc_type;
if (enc_type == WCN36XX_HAL_ED_WEP104 ||
enc_type == WCN36XX_HAL_ED_WEP40) {
/* Use bss key for wep (static) */
msg_body.set_sta_key_params.def_wep_idx = keyidx;
msg_body.set_sta_key_params.wep_type = 0;
} else {
msg_body.set_sta_key_params.key[0].id = keyidx;
msg_body.set_sta_key_params.key[0].unicast = 1;
msg_body.set_sta_key_params.key[0].direction = WCN36XX_HAL_TX_RX;
msg_body.set_sta_key_params.key[0].pae_role = 0;
msg_body.set_sta_key_params.key[0].length = keylen;
memcpy(msg_body.set_sta_key_params.key[0].key, key, keylen);
}
msg_body.set_sta_key_params.single_tid_rc = 1;
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
if (ret) {
wcn36xx_err("Sending hal_set_stakey failed\n");
goto out;
}
ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
if (ret) {
wcn36xx_err("hal_set_stakey response failed err=%d\n", ret);
goto out;
}
out:
mutex_unlock(&wcn->hal_mutex);
return ret;
}
int wcn36xx_smd_set_bsskey(struct wcn36xx *wcn,
enum ani_ed_type enc_type,
u8 bssidx,
u8 keyidx,
u8 keylen,
u8 *key)
{
struct wcn36xx_hal_set_bss_key_req_msg msg_body;
int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_BSSKEY_REQ);
msg_body.bss_idx = bssidx;
msg_body.enc_type = enc_type;
msg_body.num_keys = 1;
msg_body.keys[0].id = keyidx;
msg_body.keys[0].unicast = 0;
msg_body.keys[0].direction = WCN36XX_HAL_RX_ONLY;
msg_body.keys[0].pae_role = 0;
msg_body.keys[0].length = keylen;
memcpy(msg_body.keys[0].key, key, keylen);
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
if (ret) {
wcn36xx_err("Sending hal_set_bsskey failed\n");
goto out;
}
ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
if (ret) {
wcn36xx_err("hal_set_bsskey response failed err=%d\n", ret);
goto out;
}
out:
mutex_unlock(&wcn->hal_mutex);
return ret;
}
int wcn36xx_smd_remove_stakey(struct wcn36xx *wcn,
enum ani_ed_type enc_type,
u8 keyidx,
u8 sta_index)
{
struct wcn36xx_hal_remove_sta_key_req_msg msg_body;
int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_RMV_STAKEY_REQ);
msg_body.sta_idx = sta_index;
msg_body.enc_type = enc_type;
msg_body.key_id = keyidx;
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
if (ret) {
wcn36xx_err("Sending hal_remove_stakey failed\n");
goto out;
}
ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
if (ret) {
wcn36xx_err("hal_remove_stakey response failed err=%d\n", ret);
goto out;
}
out:
mutex_unlock(&wcn->hal_mutex);
return ret;
}
int wcn36xx_smd_remove_bsskey(struct wcn36xx *wcn,
enum ani_ed_type enc_type,
u8 bssidx,
u8 keyidx)
{
struct wcn36xx_hal_remove_bss_key_req_msg msg_body;
int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_RMV_BSSKEY_REQ);
msg_body.bss_idx = bssidx;
msg_body.enc_type = enc_type;
msg_body.key_id = keyidx;
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
if (ret) {
wcn36xx_err("Sending hal_remove_bsskey failed\n");
goto out;
}
ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
if (ret) {
wcn36xx_err("hal_remove_bsskey response failed err=%d\n", ret);
goto out;
}
out:
mutex_unlock(&wcn->hal_mutex);
return ret;
}
int wcn36xx_smd_enter_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif)
{
struct wcn36xx_hal_enter_bmps_req_msg msg_body;
struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_ENTER_BMPS_REQ);
msg_body.bss_index = vif_priv->bss_index;
msg_body.tbtt = vif->bss_conf.sync_tsf;
msg_body.dtim_period = vif_priv->dtim_period;
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
if (ret) {
wcn36xx_err("Sending hal_enter_bmps failed\n");
goto out;
}
ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
if (ret) {
wcn36xx_err("hal_enter_bmps response failed err=%d\n", ret);
goto out;
}
out:
mutex_unlock(&wcn->hal_mutex);
return ret;
}
int wcn36xx_smd_exit_bmps(struct wcn36xx *wcn, struct ieee80211_vif *vif)
{
struct wcn36xx_hal_exit_bmps_req_msg msg_body;
struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_EXIT_BMPS_REQ);
msg_body.bss_index = vif_priv->bss_index;
msg_body.send_data_null = 1;
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
if (ret) {
wcn36xx_err("Sending hal_exit_bmps failed\n");
goto out;
}
ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
if (ret) {
wcn36xx_err("hal_exit_bmps response failed err=%d\n", ret);
goto out;
}
out:
mutex_unlock(&wcn->hal_mutex);
return ret;
}
int wcn36xx_smd_enter_imps(struct wcn36xx *wcn)
{
struct wcn36xx_hal_enter_imps_req_msg msg_body;
int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_ENTER_IMPS_REQ);
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
if (ret) {
wcn36xx_err("Sending hal_enter_imps failed\n");
goto out;
}
ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
if (ret) {
wcn36xx_err("hal_enter_imps response failed err=%d\n", ret);
goto out;
}
wcn36xx_dbg(WCN36XX_DBG_HAL, "Entered idle mode\n");
out:
mutex_unlock(&wcn->hal_mutex);
return ret;
}
int wcn36xx_smd_exit_imps(struct wcn36xx *wcn)
{
struct wcn36xx_hal_exit_imps_req_msg msg_body;
int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_EXIT_IMPS_REQ);
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
if (ret) {
wcn36xx_err("Sending hal_exit_imps failed\n");
goto out;
}
ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
if (ret) {
wcn36xx_err("hal_exit_imps response failed err=%d\n", ret);
goto out;
}
wcn36xx_dbg(WCN36XX_DBG_HAL, "Exited idle mode\n");
out:
mutex_unlock(&wcn->hal_mutex);
return ret;
}
int wcn36xx_smd_set_power_params(struct wcn36xx *wcn, bool ignore_dtim)
{
struct wcn36xx_hal_set_power_params_req_msg msg_body;
int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_SET_POWER_PARAMS_REQ);
/*
* When host is down ignore every second dtim
*/
if (ignore_dtim) {
msg_body.ignore_dtim = 1;
msg_body.dtim_period = 2;
}
msg_body.listen_interval = WCN36XX_LISTEN_INTERVAL(wcn);
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
if (ret) {
wcn36xx_err("Sending hal_set_power_params failed\n");
goto out;
}
out:
mutex_unlock(&wcn->hal_mutex);
return ret;
}
/* Notice: This function should be called after associated, or else it
* will be invalid
*/
int wcn36xx_smd_keep_alive_req(struct wcn36xx *wcn,
struct ieee80211_vif *vif,
int packet_type)
{
struct wcn36xx_hal_keep_alive_req_msg msg_body;
struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_KEEP_ALIVE_REQ);
if (packet_type == WCN36XX_HAL_KEEP_ALIVE_NULL_PKT) {
msg_body.bss_index = vif_priv->bss_index;
msg_body.packet_type = WCN36XX_HAL_KEEP_ALIVE_NULL_PKT;
msg_body.time_period = WCN36XX_KEEP_ALIVE_TIME_PERIOD;
} else if (packet_type == WCN36XX_HAL_KEEP_ALIVE_UNSOLICIT_ARP_RSP) {
/* TODO: it also support ARP response type */
} else {
wcn36xx_warn("unknown keep alive packet type %d\n", packet_type);
ret = -EINVAL;
goto out;
}
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
if (ret) {
wcn36xx_err("Sending hal_keep_alive failed\n");
goto out;
}
ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
if (ret) {
wcn36xx_err("hal_keep_alive response failed err=%d\n", ret);
goto out;
}
out:
mutex_unlock(&wcn->hal_mutex);
return ret;
}
int wcn36xx_smd_dump_cmd_req(struct wcn36xx *wcn, u32 arg1, u32 arg2,
u32 arg3, u32 arg4, u32 arg5)
{
struct wcn36xx_hal_dump_cmd_req_msg msg_body;
int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_DUMP_COMMAND_REQ);
msg_body.arg1 = arg1;
msg_body.arg2 = arg2;
msg_body.arg3 = arg3;
msg_body.arg4 = arg4;
msg_body.arg5 = arg5;
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
if (ret) {
wcn36xx_err("Sending hal_dump_cmd failed\n");
goto out;
}
ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
if (ret) {
wcn36xx_err("hal_dump_cmd response failed err=%d\n", ret);
goto out;
}
out:
mutex_unlock(&wcn->hal_mutex);
return ret;
}
int wcn36xx_smd_feature_caps_exchange(struct wcn36xx *wcn)
{
struct wcn36xx_hal_feat_caps_msg msg_body, *rsp;
int ret, i;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_REQ);
wcn36xx_firmware_set_feat_caps(msg_body.feat_caps, STA_POWERSAVE);
if (wcn->rf_id == RF_IRIS_WCN3680) {
wcn36xx_firmware_set_feat_caps(msg_body.feat_caps, DOT11AC);
wcn36xx_firmware_set_feat_caps(msg_body.feat_caps, WLAN_CH144);
wcn36xx_firmware_set_feat_caps(msg_body.feat_caps,
ANTENNA_DIVERSITY_SELECTION);
}
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
if (ret) {
wcn36xx_err("Sending hal_feature_caps_exchange failed\n");
goto out;
}
if (wcn->hal_rsp_len != sizeof(*rsp)) {
wcn36xx_err("Invalid hal_feature_caps_exchange response");
goto out;
}
rsp = (struct wcn36xx_hal_feat_caps_msg *) wcn->hal_buf;
for (i = 0; i < WCN36XX_HAL_CAPS_SIZE; i++)
wcn->fw_feat_caps[i] = rsp->feat_caps[i];
out:
mutex_unlock(&wcn->hal_mutex);
return ret;
}
static int wcn36xx_smd_add_ba_session_rsp(void *buf, int len, u8 *session)
{
struct wcn36xx_hal_add_ba_session_rsp_msg *rsp;
if (len < sizeof(*rsp))
return -EINVAL;
rsp = (struct wcn36xx_hal_add_ba_session_rsp_msg *)buf;
if (rsp->status != WCN36XX_FW_MSG_RESULT_SUCCESS)
return rsp->status;
*session = rsp->ba_session_id;
return 0;
}
int wcn36xx_smd_add_ba_session(struct wcn36xx *wcn,
struct ieee80211_sta *sta,
u16 tid,
u16 *ssn,
u8 direction,
u8 sta_index)
{
struct wcn36xx_hal_add_ba_session_req_msg msg_body;
u8 session_id;
int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_ADD_BA_SESSION_REQ);
msg_body.sta_index = sta_index;
memcpy(&msg_body.mac_addr, sta->addr, ETH_ALEN);
msg_body.dialog_token = 0x10;
msg_body.tid = tid;
/* Immediate BA because Delayed BA is not supported */
msg_body.policy = 1;
msg_body.buffer_size = WCN36XX_AGGR_BUFFER_SIZE;
msg_body.timeout = 0;
if (ssn)
msg_body.ssn = *ssn;
msg_body.direction = direction;
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
if (ret) {
wcn36xx_err("Sending hal_add_ba_session failed\n");
goto out;
}
ret = wcn36xx_smd_add_ba_session_rsp(wcn->hal_buf, wcn->hal_rsp_len,
&session_id);
if (ret) {
wcn36xx_err("hal_add_ba_session response failed err=%d\n", ret);
ret = -EINVAL;
goto out;
}
ret = session_id;
out:
mutex_unlock(&wcn->hal_mutex);
return ret;
}
int wcn36xx_smd_add_ba(struct wcn36xx *wcn, u8 session_id)
{
struct wcn36xx_hal_add_ba_req_msg msg_body;
int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_ADD_BA_REQ);
msg_body.session_id = session_id;
msg_body.win_size = WCN36XX_AGGR_BUFFER_SIZE;
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
if (ret) {
wcn36xx_err("Sending hal_add_ba failed\n");
goto out;
}
ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
if (ret) {
wcn36xx_err("hal_add_ba response failed err=%d\n", ret);
goto out;
}
out:
mutex_unlock(&wcn->hal_mutex);
return ret;
}
int wcn36xx_smd_del_ba(struct wcn36xx *wcn, u16 tid, u8 direction, u8 sta_index)
{
struct wcn36xx_hal_del_ba_req_msg msg_body;
int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_DEL_BA_REQ);
msg_body.sta_index = sta_index;
msg_body.tid = tid;
msg_body.direction = direction;
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
if (ret) {
wcn36xx_err("Sending hal_del_ba failed\n");
goto out;
}
ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
if (ret) {
wcn36xx_err("hal_del_ba response failed err=%d\n", ret);
goto out;
}
out:
mutex_unlock(&wcn->hal_mutex);
return ret;
}
int wcn36xx_smd_get_stats(struct wcn36xx *wcn, u8 sta_index, u32 stats_mask,
struct station_info *sinfo)
{
struct wcn36xx_hal_stats_req_msg msg_body;
struct wcn36xx_hal_stats_rsp_msg *rsp;
void *rsp_body;
int ret;
if (stats_mask & ~HAL_GLOBAL_CLASS_A_STATS_INFO) {
wcn36xx_err("stats_mask 0x%x contains unimplemented types\n",
stats_mask);
return -EINVAL;
}
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_GET_STATS_REQ);
msg_body.sta_id = sta_index;
msg_body.stats_mask = stats_mask;
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
if (ret) {
wcn36xx_err("sending hal_get_stats failed\n");
goto out;
}
ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
if (ret) {
wcn36xx_err("hal_get_stats response failed err=%d\n", ret);
goto out;
}
rsp = (struct wcn36xx_hal_stats_rsp_msg *)wcn->hal_buf;
rsp_body = (wcn->hal_buf + sizeof(struct wcn36xx_hal_stats_rsp_msg));
if (rsp->stats_mask != stats_mask) {
wcn36xx_err("stats_mask 0x%x differs from requested 0x%x\n",
rsp->stats_mask, stats_mask);
goto out;
}
if (rsp->stats_mask & HAL_GLOBAL_CLASS_A_STATS_INFO) {
struct ani_global_class_a_stats_info *stats_info = rsp_body;
wcn36xx_process_tx_rate(stats_info, &sinfo->txrate);
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
rsp_body += sizeof(struct ani_global_class_a_stats_info);
}
out:
mutex_unlock(&wcn->hal_mutex);
return ret;
}
static int wcn36xx_smd_trigger_ba_rsp(void *buf, int len, struct add_ba_info *ba_info)
{
struct wcn36xx_hal_trigger_ba_rsp_candidate *candidate;
struct wcn36xx_hal_trigger_ba_rsp_msg *rsp;
int i;
if (len < sizeof(*rsp))
return -EINVAL;
rsp = (struct wcn36xx_hal_trigger_ba_rsp_msg *) buf;
if (rsp->candidate_cnt < 1)
return rsp->status ? rsp->status : -EINVAL;
candidate = (struct wcn36xx_hal_trigger_ba_rsp_candidate *)(buf + sizeof(*rsp));
for (i = 0; i < STACFG_MAX_TC; i++) {
ba_info[i] = candidate->ba_info[i];
}
return rsp->status;
}
int wcn36xx_smd_trigger_ba(struct wcn36xx *wcn, u8 sta_index, u16 tid, u16 *ssn)
{
struct wcn36xx_hal_trigger_ba_req_msg msg_body;
struct wcn36xx_hal_trigger_ba_req_candidate *candidate;
struct add_ba_info ba_info[STACFG_MAX_TC];
int ret;
if (tid >= STACFG_MAX_TC)
return -EINVAL;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_TRIGGER_BA_REQ);
msg_body.session_id = 0; /* not really used */
msg_body.candidate_cnt = 1;
msg_body.header.len += sizeof(*candidate);
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
candidate = (struct wcn36xx_hal_trigger_ba_req_candidate *)
(wcn->hal_buf + sizeof(msg_body));
candidate->sta_index = sta_index;
candidate->tid_bitmap = 1 << tid;
ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
if (ret) {
wcn36xx_err("Sending hal_trigger_ba failed\n");
goto out;
}
ret = wcn36xx_smd_trigger_ba_rsp(wcn->hal_buf, wcn->hal_rsp_len, ba_info);
if (ret) {
wcn36xx_err("hal_trigger_ba response failed err=%d\n", ret);
goto out;
}
out:
mutex_unlock(&wcn->hal_mutex);
if (ssn)
*ssn = ba_info[tid].starting_seq_num;
return ret;
}
static int wcn36xx_smd_tx_compl_ind(struct wcn36xx *wcn, void *buf, size_t len)
{
struct wcn36xx_hal_tx_compl_ind_msg *rsp = buf;
if (len != sizeof(*rsp)) {
wcn36xx_warn("Bad TX complete indication\n");
return -EIO;
}
wcn36xx_dxe_tx_ack_ind(wcn, rsp->status);
return 0;
}
static int wcn36xx_smd_hw_scan_ind(struct wcn36xx *wcn, void *buf, size_t len)
{
struct wcn36xx_hal_scan_offload_ind *rsp = buf;
struct cfg80211_scan_info scan_info = {};
if (len != sizeof(*rsp)) {
wcn36xx_warn("Corrupted delete scan indication\n");
return -EIO;
}
wcn36xx_dbg(WCN36XX_DBG_HAL, "scan indication (type %x)\n", rsp->type);
switch (rsp->type) {
case WCN36XX_HAL_SCAN_IND_FAILED:
case WCN36XX_HAL_SCAN_IND_DEQUEUED:
scan_info.aborted = true;
fallthrough;
case WCN36XX_HAL_SCAN_IND_COMPLETED:
mutex_lock(&wcn->scan_lock);
wcn->scan_req = NULL;
if (wcn->scan_aborted)
scan_info.aborted = true;
mutex_unlock(&wcn->scan_lock);
ieee80211_scan_completed(wcn->hw, &scan_info);
break;
case WCN36XX_HAL_SCAN_IND_STARTED:
case WCN36XX_HAL_SCAN_IND_FOREIGN_CHANNEL:
case WCN36XX_HAL_SCAN_IND_PREEMPTED:
case WCN36XX_HAL_SCAN_IND_RESTARTED:
break;
default:
wcn36xx_warn("Unknown scan indication type %x\n", rsp->type);
}
return 0;
}
static int wcn36xx_smd_missed_beacon_ind(struct wcn36xx *wcn,
void *buf,
size_t len)
{
struct wcn36xx_hal_missed_beacon_ind_msg *rsp = buf;
struct ieee80211_vif *vif = NULL;
struct wcn36xx_vif *tmp;
/* Old FW does not have bss index */
if (wcn36xx_is_fw_version(wcn, 1, 2, 2, 24)) {
list_for_each_entry(tmp, &wcn->vif_list, list) {
wcn36xx_dbg(WCN36XX_DBG_HAL, "beacon missed bss_index %d\n",
tmp->bss_index);
vif = wcn36xx_priv_to_vif(tmp);
ieee80211_beacon_loss(vif);
}
return 0;
}
if (len != sizeof(*rsp)) {
wcn36xx_warn("Corrupted missed beacon indication\n");
return -EIO;
}
list_for_each_entry(tmp, &wcn->vif_list, list) {
if (tmp->bss_index == rsp->bss_index) {
wcn36xx_dbg(WCN36XX_DBG_HAL, "beacon missed bss_index %d\n",
rsp->bss_index);
vif = wcn36xx_priv_to_vif(tmp);
ieee80211_beacon_loss(vif);
return 0;
}
}
wcn36xx_warn("BSS index %d not found\n", rsp->bss_index);
return -ENOENT;
}
static int wcn36xx_smd_delete_sta_context_ind(struct wcn36xx *wcn,
void *buf,
size_t len)
{
struct wcn36xx_hal_delete_sta_context_ind_msg *rsp = buf;
struct wcn36xx_vif *vif_priv;
struct ieee80211_vif *vif;
struct ieee80211_bss_conf *bss_conf;
struct ieee80211_sta *sta;
bool found = false;
if (len != sizeof(*rsp)) {
wcn36xx_warn("Corrupted delete sta indication\n");
return -EIO;
}
wcn36xx_dbg(WCN36XX_DBG_HAL,
"delete station indication %pM index %d reason %d\n",
rsp->addr2, rsp->sta_id, rsp->reason_code);
list_for_each_entry(vif_priv, &wcn->vif_list, list) {
rcu_read_lock();
vif = wcn36xx_priv_to_vif(vif_priv);
if (vif->type == NL80211_IFTYPE_STATION) {
/* We could call ieee80211_find_sta too, but checking
* bss_conf is clearer.
*/
bss_conf = &vif->bss_conf;
if (vif_priv->sta_assoc &&
!memcmp(bss_conf->bssid, rsp->addr2, ETH_ALEN)) {
found = true;
wcn36xx_dbg(WCN36XX_DBG_HAL,
"connection loss bss_index %d\n",
vif_priv->bss_index);
ieee80211_connection_loss(vif);
}
} else {
sta = ieee80211_find_sta(vif, rsp->addr2);
if (sta) {
found = true;
ieee80211_report_low_ack(sta, 0);
}
}
rcu_read_unlock();
if (found)
return 0;
}
wcn36xx_warn("BSS or STA with addr %pM not found\n", rsp->addr2);
return -ENOENT;
}
static int wcn36xx_smd_print_reg_info_ind(struct wcn36xx *wcn,
void *buf,
size_t len)
{
struct wcn36xx_hal_print_reg_info_ind *rsp = buf;
int i;
if (len < sizeof(*rsp)) {
wcn36xx_warn("Corrupted print reg info indication\n");
return -EIO;
}
wcn36xx_dbg(WCN36XX_DBG_HAL,
"reginfo indication, scenario: 0x%x reason: 0x%x\n",
rsp->scenario, rsp->reason);
for (i = 0; i < rsp->count; i++) {
wcn36xx_dbg(WCN36XX_DBG_HAL, "\t0x%x: 0x%x\n",
rsp->regs[i].addr, rsp->regs[i].value);
}
return 0;
}
int wcn36xx_smd_update_cfg(struct wcn36xx *wcn, u32 cfg_id, u32 value)
{
struct wcn36xx_hal_update_cfg_req_msg msg_body, *body;
size_t len;
int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_UPDATE_CFG_REQ);
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
body = (struct wcn36xx_hal_update_cfg_req_msg *) wcn->hal_buf;
len = msg_body.header.len;
put_cfg_tlv_u32(wcn, &len, cfg_id, value);
body->header.len = len;
body->len = len - sizeof(*body);
ret = wcn36xx_smd_send_and_wait(wcn, body->header.len);
if (ret) {
wcn36xx_err("Sending hal_update_cfg failed\n");
goto out;
}
ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
if (ret) {
wcn36xx_err("hal_update_cfg response failed err=%d\n", ret);
goto out;
}
out:
mutex_unlock(&wcn->hal_mutex);
return ret;
}
int wcn36xx_smd_set_mc_list(struct wcn36xx *wcn,
struct ieee80211_vif *vif,
struct wcn36xx_hal_rcv_flt_mc_addr_list_type *fp)
{
struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
struct wcn36xx_hal_rcv_flt_pkt_set_mc_list_req_msg *msg_body = NULL;
int ret;
mutex_lock(&wcn->hal_mutex);
msg_body = (struct wcn36xx_hal_rcv_flt_pkt_set_mc_list_req_msg *)
wcn->hal_buf;
INIT_HAL_MSG(*msg_body, WCN36XX_HAL_8023_MULTICAST_LIST_REQ);
/* An empty list means all mc traffic will be received */
if (fp)
memcpy(&msg_body->mc_addr_list, fp,
sizeof(msg_body->mc_addr_list));
else
msg_body->mc_addr_list.mc_addr_count = 0;
msg_body->mc_addr_list.bss_index = vif_priv->bss_index;
ret = wcn36xx_smd_send_and_wait(wcn, msg_body->header.len);
if (ret) {
wcn36xx_err("Sending HAL_8023_MULTICAST_LIST failed\n");
goto out;
}
ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
if (ret) {
wcn36xx_err("HAL_8023_MULTICAST_LIST rsp failed err=%d\n", ret);
goto out;
}
out:
mutex_unlock(&wcn->hal_mutex);
return ret;
}
int wcn36xx_smd_arp_offload(struct wcn36xx *wcn, struct ieee80211_vif *vif,
bool enable)
{
struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
struct wcn36xx_hal_host_offload_req_msg msg_body;
int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_HOST_OFFLOAD_REQ);
msg_body.host_offload_params.offload_type =
WCN36XX_HAL_IPV4_ARP_REPLY_OFFLOAD;
if (enable) {
msg_body.host_offload_params.enable =
WCN36XX_HAL_OFFLOAD_ARP_AND_BCAST_FILTER_ENABLE;
memcpy(&msg_body.host_offload_params.u,
&vif->cfg.arp_addr_list[0], sizeof(__be32));
}
msg_body.ns_offload_params.bss_index = vif_priv->bss_index;
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
if (ret) {
wcn36xx_err("Sending host_offload_arp failed\n");
goto out;
}
ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
if (ret) {
wcn36xx_err("host_offload_arp failed err=%d\n", ret);
goto out;
}
out:
mutex_unlock(&wcn->hal_mutex);
return ret;
}
#if IS_ENABLED(CONFIG_IPV6)
int wcn36xx_smd_ipv6_ns_offload(struct wcn36xx *wcn, struct ieee80211_vif *vif,
bool enable)
{
struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
struct wcn36xx_hal_host_offload_req_msg msg_body;
struct wcn36xx_hal_ns_offload_params *ns_params;
struct wcn36xx_hal_host_offload_req *ho_params;
int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_HOST_OFFLOAD_REQ);
ho_params = &msg_body.host_offload_params;
ns_params = &msg_body.ns_offload_params;
ho_params->offload_type = WCN36XX_HAL_IPV6_NS_OFFLOAD;
if (enable) {
ho_params->enable =
WCN36XX_HAL_OFFLOAD_NS_AND_MCAST_FILTER_ENABLE;
if (vif_priv->num_target_ipv6_addrs) {
memcpy(&ho_params->u,
&vif_priv->target_ipv6_addrs[0].in6_u,
sizeof(struct in6_addr));
memcpy(&ns_params->target_ipv6_addr1,
&vif_priv->target_ipv6_addrs[0].in6_u,
sizeof(struct in6_addr));
ns_params->target_ipv6_addr1_valid = 1;
}
if (vif_priv->num_target_ipv6_addrs > 1) {
memcpy(&ns_params->target_ipv6_addr2,
&vif_priv->target_ipv6_addrs[1].in6_u,
sizeof(struct in6_addr));
ns_params->target_ipv6_addr2_valid = 1;
}
}
memcpy(&ns_params->self_addr, vif->addr, ETH_ALEN);
ns_params->bss_index = vif_priv->bss_index;
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
if (ret) {
wcn36xx_err("Sending host_offload_arp failed\n");
goto out;
}
ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
if (ret) {
wcn36xx_err("host_offload_arp failed err=%d\n", ret);
goto out;
}
out:
mutex_unlock(&wcn->hal_mutex);
return ret;
}
#else
int wcn36xx_smd_ipv6_ns_offload(struct wcn36xx *wcn, struct ieee80211_vif *vif,
bool enable)
{
return 0;
}
#endif
int wcn36xx_smd_gtk_offload(struct wcn36xx *wcn, struct ieee80211_vif *vif,
bool enable)
{
struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
struct wcn36xx_hal_gtk_offload_req_msg msg_body;
int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_GTK_OFFLOAD_REQ);
if (enable) {
memcpy(&msg_body.kek, vif_priv->rekey_data.kek, NL80211_KEK_LEN);
memcpy(&msg_body.kck, vif_priv->rekey_data.kck, NL80211_KCK_LEN);
msg_body.key_replay_counter =
le64_to_cpu(vif_priv->rekey_data.replay_ctr);
msg_body.bss_index = vif_priv->bss_index;
} else {
msg_body.flags = WCN36XX_HAL_GTK_OFFLOAD_FLAGS_DISABLE;
}
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
if (ret) {
wcn36xx_err("Sending host_offload_arp failed\n");
goto out;
}
ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
if (ret) {
wcn36xx_err("host_offload_arp failed err=%d\n", ret);
goto out;
}
out:
mutex_unlock(&wcn->hal_mutex);
return ret;
}
static int wcn36xx_smd_gtk_offload_get_info_rsp(struct wcn36xx *wcn,
struct ieee80211_vif *vif)
{
struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
struct wcn36xx_hal_gtk_offload_get_info_rsp_msg *rsp;
__be64 replay_ctr;
if (wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len))
return -EIO;
rsp = (struct wcn36xx_hal_gtk_offload_get_info_rsp_msg *)wcn->hal_buf;
if (rsp->bss_index != vif_priv->bss_index) {
wcn36xx_err("gtk_offload_info invalid response bss index %d\n",
rsp->bss_index);
return -ENOENT;
}
if (vif_priv->rekey_data.replay_ctr != cpu_to_le64(rsp->key_replay_counter)) {
replay_ctr = cpu_to_be64(rsp->key_replay_counter);
vif_priv->rekey_data.replay_ctr =
cpu_to_le64(rsp->key_replay_counter);
ieee80211_gtk_rekey_notify(vif, vif->bss_conf.bssid,
(void *)&replay_ctr, GFP_KERNEL);
wcn36xx_dbg(WCN36XX_DBG_HAL,
"GTK replay counter increment %llu\n",
rsp->key_replay_counter);
}
wcn36xx_dbg(WCN36XX_DBG_HAL,
"gtk offload info status %d last_rekey_status %d "
"replay_counter %llu total_rekey_count %d gtk_rekey_count %d "
"igtk_rekey_count %d bss_index %d\n",
rsp->status, rsp->last_rekey_status,
rsp->key_replay_counter, rsp->total_rekey_count,
rsp->gtk_rekey_count, rsp->igtk_rekey_count,
rsp->bss_index);
return 0;
}
int wcn36xx_smd_gtk_offload_get_info(struct wcn36xx *wcn,
struct ieee80211_vif *vif)
{
struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
struct wcn36xx_hal_gtk_offload_get_info_req_msg msg_body;
int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_GTK_OFFLOAD_GETINFO_REQ);
msg_body.bss_index = vif_priv->bss_index;
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
if (ret) {
wcn36xx_err("Sending gtk_offload_get_info failed\n");
goto out;
}
ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
if (ret) {
wcn36xx_err("gtk_offload_get_info failed err=%d\n", ret);
goto out;
}
ret = wcn36xx_smd_gtk_offload_get_info_rsp(wcn, vif);
out:
mutex_unlock(&wcn->hal_mutex);
return ret;
}
int wcn36xx_smd_wlan_host_suspend_ind(struct wcn36xx *wcn)
{
struct wcn36xx_hal_wlan_host_suspend_ind_msg msg_body;
int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_HOST_SUSPEND_IND);
msg_body.configured_mcst_bcst_filter_setting = 0;
msg_body.active_session_count = 1;
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
ret = rpmsg_send(wcn->smd_channel, wcn->hal_buf, msg_body.header.len);
mutex_unlock(&wcn->hal_mutex);
return ret;
}
int wcn36xx_smd_host_resume(struct wcn36xx *wcn)
{
struct wcn36xx_hal_wlan_host_resume_req_msg msg_body;
struct wcn36xx_hal_host_resume_rsp_msg *rsp;
int ret;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_HOST_RESUME_REQ);
msg_body.configured_mcst_bcst_filter_setting = 0;
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
ret = wcn36xx_smd_send_and_wait(wcn, msg_body.header.len);
if (ret) {
wcn36xx_err("Sending wlan_host_resume failed\n");
goto out;
}
ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
if (ret) {
wcn36xx_err("wlan_host_resume err=%d\n", ret);
goto out;
}
rsp = (struct wcn36xx_hal_host_resume_rsp_msg *)wcn->hal_buf;
if (rsp->status)
wcn36xx_warn("wlan_host_resume status=%d\n", rsp->status);
out:
mutex_unlock(&wcn->hal_mutex);
return ret;
}
#define BEACON_FILTER(eid, presence, offs, val, mask, ref_val) \
{ \
.element_id = eid, \
.check_ie_presence = presence, \
.offset = offs, \
.value = val, \
.bitmask = mask, \
.ref = ref_val, \
}
static const struct beacon_filter_ie bcn_filter_ies[] = {
BEACON_FILTER(WLAN_EID_DS_PARAMS, 0, 0, 0,
WCN36XX_FILTER_IE_DS_CHANNEL_MASK, 0),
BEACON_FILTER(WLAN_EID_ERP_INFO, 0, 0, 0,
WCN36XX_FILTER_IE_ERP_FILTER_MASK, 0),
BEACON_FILTER(WLAN_EID_EDCA_PARAM_SET, 0, 0, 0,
WCN36XX_FILTER_IE_EDCA_FILTER_MASK, 0),
BEACON_FILTER(WLAN_EID_QOS_CAPA, 0, 0, 0,
WCN36XX_FILTER_IE_QOS_FILTER_MASK, 0),
BEACON_FILTER(WLAN_EID_CHANNEL_SWITCH, 1, 0, 0,
WCN36XX_FILTER_IE_CHANNEL_SWITCH_MASK, 0),
BEACON_FILTER(WLAN_EID_HT_OPERATION, 0, 0, 0,
WCN36XX_FILTER_IE_HT_BYTE0_FILTER_MASK, 0),
BEACON_FILTER(WLAN_EID_HT_OPERATION, 0, 2, 0,
WCN36XX_FILTER_IE_HT_BYTE2_FILTER_MASK, 0),
BEACON_FILTER(WLAN_EID_HT_OPERATION, 0, 5, 0,
WCN36XX_FILTER_IE_HT_BYTE5_FILTER_MASK, 0),
BEACON_FILTER(WLAN_EID_PWR_CONSTRAINT, 0, 0, 0,
WCN36XX_FILTER_IE_PWR_CONSTRAINT_MASK, 0),
BEACON_FILTER(WLAN_EID_OPMODE_NOTIF, 0, 0, 0,
WCN36XX_FILTER_IE_OPMODE_NOTIF_MASK, 0),
BEACON_FILTER(WLAN_EID_VHT_OPERATION, 0, 0, 0,
WCN36XX_FILTER_IE_VHTOP_CHWIDTH_MASK, 0),
BEACON_FILTER(WLAN_EID_RSN, 1, 0, 0,
WCN36XX_FILTER_IE_RSN_MASK, 0),
BEACON_FILTER(WLAN_EID_VENDOR_SPECIFIC, 1, 0, 0,
WCN36XX_FILTER_IE_VENDOR_MASK, 0),
};
int wcn36xx_smd_add_beacon_filter(struct wcn36xx *wcn,
struct ieee80211_vif *vif)
{
struct wcn36xx_hal_add_bcn_filter_req_msg msg_body, *body;
struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
u8 *payload;
size_t payload_size;
int ret;
if (!wcn36xx_firmware_get_feat_caps(wcn->fw_feat_caps, BCN_FILTER))
return -EOPNOTSUPP;
mutex_lock(&wcn->hal_mutex);
INIT_HAL_MSG(msg_body, WCN36XX_HAL_ADD_BCN_FILTER_REQ);
PREPARE_HAL_BUF(wcn->hal_buf, msg_body);
body = (struct wcn36xx_hal_add_bcn_filter_req_msg *)wcn->hal_buf;
body->capability_info = vif->bss_conf.assoc_capability;
body->capability_mask = WCN36XX_FILTER_CAPABILITY_MASK;
body->beacon_interval = vif->bss_conf.beacon_int;
body->ie_num = ARRAY_SIZE(bcn_filter_ies);
body->bss_index = vif_priv->bss_index;
payload = ((u8 *)body) + body->header.len;
payload_size = sizeof(bcn_filter_ies);
memcpy(payload, &bcn_filter_ies, payload_size);
body->header.len += payload_size;
ret = wcn36xx_smd_send_and_wait(wcn, body->header.len);
if (ret) {
wcn36xx_err("Sending add bcn_filter failed\n");
goto out;
}
ret = wcn36xx_smd_rsp_status_check(wcn->hal_buf, wcn->hal_rsp_len);
if (ret) {
wcn36xx_err("add bcn filter response failed err=%d\n", ret);
goto out;
}
out:
mutex_unlock(&wcn->hal_mutex);
return ret;
}
int wcn36xx_smd_rsp_process(struct rpmsg_device *rpdev,
void *buf, int len, void *priv, u32 addr)
{
const struct wcn36xx_hal_msg_header *msg_header = buf;
struct ieee80211_hw *hw = priv;
struct wcn36xx *wcn = hw->priv;
struct wcn36xx_hal_ind_msg *msg_ind;
wcn36xx_dbg_dump(WCN36XX_DBG_SMD_DUMP, "SMD <<< ", buf, len);
switch (msg_header->msg_type) {
case WCN36XX_HAL_START_RSP:
case WCN36XX_HAL_CONFIG_STA_RSP:
case WCN36XX_HAL_CONFIG_BSS_RSP:
case WCN36XX_HAL_ADD_STA_SELF_RSP:
case WCN36XX_HAL_STOP_RSP:
case WCN36XX_HAL_DEL_STA_SELF_RSP:
case WCN36XX_HAL_DELETE_STA_RSP:
case WCN36XX_HAL_INIT_SCAN_RSP:
case WCN36XX_HAL_START_SCAN_RSP:
case WCN36XX_HAL_END_SCAN_RSP:
case WCN36XX_HAL_FINISH_SCAN_RSP:
case WCN36XX_HAL_DOWNLOAD_NV_RSP:
case WCN36XX_HAL_DELETE_BSS_RSP:
case WCN36XX_HAL_SEND_BEACON_RSP:
case WCN36XX_HAL_SET_LINK_ST_RSP:
case WCN36XX_HAL_UPDATE_PROBE_RSP_TEMPLATE_RSP:
case WCN36XX_HAL_SET_BSSKEY_RSP:
case WCN36XX_HAL_SET_STAKEY_RSP:
case WCN36XX_HAL_RMV_STAKEY_RSP:
case WCN36XX_HAL_RMV_BSSKEY_RSP:
case WCN36XX_HAL_ENTER_BMPS_RSP:
case WCN36XX_HAL_SET_POWER_PARAMS_RSP:
case WCN36XX_HAL_EXIT_BMPS_RSP:
case WCN36XX_HAL_KEEP_ALIVE_RSP:
case WCN36XX_HAL_DUMP_COMMAND_RSP:
case WCN36XX_HAL_ADD_BA_SESSION_RSP:
case WCN36XX_HAL_ADD_BA_RSP:
case WCN36XX_HAL_DEL_BA_RSP:
case WCN36XX_HAL_GET_STATS_RSP:
case WCN36XX_HAL_TRIGGER_BA_RSP:
case WCN36XX_HAL_UPDATE_CFG_RSP:
case WCN36XX_HAL_JOIN_RSP:
case WCN36XX_HAL_UPDATE_SCAN_PARAM_RSP:
case WCN36XX_HAL_CH_SWITCH_RSP:
case WCN36XX_HAL_PROCESS_PTT_RSP:
case WCN36XX_HAL_FEATURE_CAPS_EXCHANGE_RSP:
case WCN36XX_HAL_8023_MULTICAST_LIST_RSP:
case WCN36XX_HAL_START_SCAN_OFFLOAD_RSP:
case WCN36XX_HAL_STOP_SCAN_OFFLOAD_RSP:
case WCN36XX_HAL_HOST_OFFLOAD_RSP:
case WCN36XX_HAL_GTK_OFFLOAD_RSP:
case WCN36XX_HAL_GTK_OFFLOAD_GETINFO_RSP:
case WCN36XX_HAL_HOST_RESUME_RSP:
case WCN36XX_HAL_ENTER_IMPS_RSP:
case WCN36XX_HAL_EXIT_IMPS_RSP:
case WCN36XX_HAL_UPDATE_CHANNEL_LIST_RSP:
case WCN36XX_HAL_ADD_BCN_FILTER_RSP:
memcpy(wcn->hal_buf, buf, len);
wcn->hal_rsp_len = len;
complete(&wcn->hal_rsp_compl);
break;
case WCN36XX_HAL_COEX_IND:
case WCN36XX_HAL_AVOID_FREQ_RANGE_IND:
case WCN36XX_HAL_DEL_BA_IND:
case WCN36XX_HAL_OTA_TX_COMPL_IND:
case WCN36XX_HAL_MISSED_BEACON_IND:
case WCN36XX_HAL_DELETE_STA_CONTEXT_IND:
case WCN36XX_HAL_PRINT_REG_INFO_IND:
case WCN36XX_HAL_SCAN_OFFLOAD_IND:
msg_ind = kmalloc(struct_size(msg_ind, msg, len), GFP_ATOMIC);
if (!msg_ind) {
wcn36xx_err("Run out of memory while handling SMD_EVENT (%d)\n",
msg_header->msg_type);
return -ENOMEM;
}
msg_ind->msg_len = len;
memcpy(msg_ind->msg, buf, len);
spin_lock(&wcn->hal_ind_lock);
list_add_tail(&msg_ind->list, &wcn->hal_ind_queue);
queue_work(wcn->hal_ind_wq, &wcn->hal_ind_work);
spin_unlock(&wcn->hal_ind_lock);
wcn36xx_dbg(WCN36XX_DBG_HAL, "indication arrived\n");
break;
default:
wcn36xx_err("SMD_EVENT (%d) not supported\n",
msg_header->msg_type);
}
return 0;
}
static void wcn36xx_ind_smd_work(struct work_struct *work)
{
struct wcn36xx *wcn =
container_of(work, struct wcn36xx, hal_ind_work);
for (;;) {
struct wcn36xx_hal_msg_header *msg_header;
struct wcn36xx_hal_ind_msg *hal_ind_msg;
unsigned long flags;
spin_lock_irqsave(&wcn->hal_ind_lock, flags);
if (list_empty(&wcn->hal_ind_queue)) {
spin_unlock_irqrestore(&wcn->hal_ind_lock, flags);
return;
}
hal_ind_msg = list_first_entry(&wcn->hal_ind_queue,
struct wcn36xx_hal_ind_msg,
list);
list_del(&hal_ind_msg->list);
spin_unlock_irqrestore(&wcn->hal_ind_lock, flags);
msg_header = (struct wcn36xx_hal_msg_header *)hal_ind_msg->msg;
switch (msg_header->msg_type) {
case WCN36XX_HAL_COEX_IND:
case WCN36XX_HAL_DEL_BA_IND:
case WCN36XX_HAL_AVOID_FREQ_RANGE_IND:
break;
case WCN36XX_HAL_OTA_TX_COMPL_IND:
wcn36xx_smd_tx_compl_ind(wcn,
hal_ind_msg->msg,
hal_ind_msg->msg_len);
break;
case WCN36XX_HAL_MISSED_BEACON_IND:
wcn36xx_smd_missed_beacon_ind(wcn,
hal_ind_msg->msg,
hal_ind_msg->msg_len);
break;
case WCN36XX_HAL_DELETE_STA_CONTEXT_IND:
wcn36xx_smd_delete_sta_context_ind(wcn,
hal_ind_msg->msg,
hal_ind_msg->msg_len);
break;
case WCN36XX_HAL_PRINT_REG_INFO_IND:
wcn36xx_smd_print_reg_info_ind(wcn,
hal_ind_msg->msg,
hal_ind_msg->msg_len);
break;
case WCN36XX_HAL_SCAN_OFFLOAD_IND:
wcn36xx_smd_hw_scan_ind(wcn, hal_ind_msg->msg,
hal_ind_msg->msg_len);
break;
default:
wcn36xx_err("SMD_EVENT (%d) not supported\n",
msg_header->msg_type);
}
kfree(hal_ind_msg);
}
}
int wcn36xx_smd_open(struct wcn36xx *wcn)
{
wcn->hal_ind_wq = create_freezable_workqueue("wcn36xx_smd_ind");
if (!wcn->hal_ind_wq)
return -ENOMEM;
INIT_WORK(&wcn->hal_ind_work, wcn36xx_ind_smd_work);
INIT_LIST_HEAD(&wcn->hal_ind_queue);
spin_lock_init(&wcn->hal_ind_lock);
return 0;
}
void wcn36xx_smd_close(struct wcn36xx *wcn)
{
struct wcn36xx_hal_ind_msg *msg, *tmp;
cancel_work_sync(&wcn->hal_ind_work);
destroy_workqueue(wcn->hal_ind_wq);
list_for_each_entry_safe(msg, tmp, &wcn->hal_ind_queue, list)
kfree(msg);
}
|
linux-master
|
drivers/net/wireless/ath/wcn36xx/smd.c
|
/*
* Copyright (c) 2013 Eugene Krasnikov <[email protected]>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include "wcn36xx.h"
#include "debug.h"
#include "pmc.h"
#include "firmware.h"
#ifdef CONFIG_WCN36XX_DEBUGFS
static ssize_t read_file_bool_bmps(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct wcn36xx *wcn = file->private_data;
struct wcn36xx_vif *vif_priv = NULL;
struct ieee80211_vif *vif = NULL;
char buf[3];
list_for_each_entry(vif_priv, &wcn->vif_list, list) {
vif = wcn36xx_priv_to_vif(vif_priv);
if (NL80211_IFTYPE_STATION == vif->type) {
if (vif_priv->pw_state == WCN36XX_BMPS)
buf[0] = '1';
else
buf[0] = '0';
break;
}
}
buf[1] = '\n';
buf[2] = 0x00;
return simple_read_from_buffer(user_buf, count, ppos, buf, 2);
}
static ssize_t write_file_bool_bmps(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct wcn36xx *wcn = file->private_data;
struct wcn36xx_vif *vif_priv = NULL;
struct ieee80211_vif *vif = NULL;
char buf[32];
int buf_size;
buf_size = min(count, (sizeof(buf)-1));
if (copy_from_user(buf, user_buf, buf_size))
return -EFAULT;
switch (buf[0]) {
case 'y':
case 'Y':
case '1':
list_for_each_entry(vif_priv, &wcn->vif_list, list) {
vif = wcn36xx_priv_to_vif(vif_priv);
if (NL80211_IFTYPE_STATION == vif->type) {
wcn36xx_enable_keep_alive_null_packet(wcn, vif);
wcn36xx_pmc_enter_bmps_state(wcn, vif);
}
}
break;
case 'n':
case 'N':
case '0':
list_for_each_entry(vif_priv, &wcn->vif_list, list) {
vif = wcn36xx_priv_to_vif(vif_priv);
if (NL80211_IFTYPE_STATION == vif->type)
wcn36xx_pmc_exit_bmps_state(wcn, vif);
}
break;
}
return count;
}
static const struct file_operations fops_wcn36xx_bmps = {
.open = simple_open,
.read = read_file_bool_bmps,
.write = write_file_bool_bmps,
};
static ssize_t write_file_dump(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct wcn36xx *wcn = file->private_data;
char buf[255], *tmp;
int buf_size;
u32 arg[WCN36xx_MAX_DUMP_ARGS];
int i;
memset(buf, 0, sizeof(buf));
memset(arg, 0, sizeof(arg));
buf_size = min(count, (sizeof(buf) - 1));
if (copy_from_user(buf, user_buf, buf_size))
return -EFAULT;
tmp = buf;
for (i = 0; i < WCN36xx_MAX_DUMP_ARGS; i++) {
char *begin;
begin = strsep(&tmp, " ");
if (begin == NULL)
break;
if (kstrtos32(begin, 0, &arg[i]) != 0)
break;
}
wcn36xx_info("DUMP args is %d %d %d %d %d\n", arg[0], arg[1], arg[2],
arg[3], arg[4]);
wcn36xx_smd_dump_cmd_req(wcn, arg[0], arg[1], arg[2], arg[3], arg[4]);
return count;
}
static const struct file_operations fops_wcn36xx_dump = {
.open = simple_open,
.write = write_file_dump,
};
static ssize_t read_file_firmware_feature_caps(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct wcn36xx *wcn = file->private_data;
size_t len = 0, buf_len = 2048;
char *buf;
int i;
int ret;
buf = kzalloc(buf_len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
mutex_lock(&wcn->hal_mutex);
for (i = 0; i < MAX_FEATURE_SUPPORTED; i++) {
if (wcn36xx_firmware_get_feat_caps(wcn->fw_feat_caps, i)) {
len += scnprintf(buf + len, buf_len - len, "%s\n",
wcn36xx_firmware_get_cap_name(i));
}
if (len >= buf_len)
break;
}
mutex_unlock(&wcn->hal_mutex);
ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
kfree(buf);
return ret;
}
static const struct file_operations fops_wcn36xx_firmware_feat_caps = {
.open = simple_open,
.read = read_file_firmware_feature_caps,
};
#define ADD_FILE(name, mode, fop, priv_data) \
do { \
struct dentry *d; \
d = debugfs_create_file(__stringify(name), \
mode, dfs->rootdir, \
priv_data, fop); \
dfs->file_##name.dentry = d; \
if (IS_ERR(d)) { \
wcn36xx_warn("Create the debugfs entry failed");\
dfs->file_##name.dentry = NULL; \
} \
} while (0)
void wcn36xx_debugfs_init(struct wcn36xx *wcn)
{
struct wcn36xx_dfs_entry *dfs = &wcn->dfs;
dfs->rootdir = debugfs_create_dir(KBUILD_MODNAME,
wcn->hw->wiphy->debugfsdir);
if (IS_ERR(dfs->rootdir)) {
wcn36xx_warn("Create the debugfs failed\n");
dfs->rootdir = NULL;
}
ADD_FILE(bmps_switcher, 0600, &fops_wcn36xx_bmps, wcn);
ADD_FILE(dump, 0200, &fops_wcn36xx_dump, wcn);
ADD_FILE(firmware_feat_caps, 0200,
&fops_wcn36xx_firmware_feat_caps, wcn);
}
void wcn36xx_debugfs_exit(struct wcn36xx *wcn)
{
struct wcn36xx_dfs_entry *dfs = &wcn->dfs;
debugfs_remove_recursive(dfs->rootdir);
}
#endif /* CONFIG_WCN36XX_DEBUGFS */
|
linux-master
|
drivers/net/wireless/ath/wcn36xx/debug.c
|
/*
* Copyright (c) 2013 Eugene Krasnikov <[email protected]>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/firmware.h>
#include <linux/platform_device.h>
#include <linux/of.h>
#include <linux/of_address.h>
#include <linux/rpmsg.h>
#include <linux/soc/qcom/smem_state.h>
#include <linux/soc/qcom/wcnss_ctrl.h>
#include <net/ipv6.h>
#include "wcn36xx.h"
#include "testmode.h"
#include "firmware.h"
unsigned int wcn36xx_dbg_mask;
module_param_named(debug_mask, wcn36xx_dbg_mask, uint, 0644);
MODULE_PARM_DESC(debug_mask, "Debugging mask");
#define CHAN2G(_freq, _idx) { \
.band = NL80211_BAND_2GHZ, \
.center_freq = (_freq), \
.hw_value = (_idx), \
.max_power = 25, \
}
#define CHAN5G(_freq, _idx, _phy_val) { \
.band = NL80211_BAND_5GHZ, \
.center_freq = (_freq), \
.hw_value = (_phy_val) << HW_VALUE_PHY_SHIFT | HW_VALUE_CHANNEL(_idx), \
.max_power = 25, \
}
/* The wcn firmware expects channel values to matching
* their mnemonic values. So use these for .hw_value. */
static struct ieee80211_channel wcn_2ghz_channels[] = {
CHAN2G(2412, 1), /* Channel 1 */
CHAN2G(2417, 2), /* Channel 2 */
CHAN2G(2422, 3), /* Channel 3 */
CHAN2G(2427, 4), /* Channel 4 */
CHAN2G(2432, 5), /* Channel 5 */
CHAN2G(2437, 6), /* Channel 6 */
CHAN2G(2442, 7), /* Channel 7 */
CHAN2G(2447, 8), /* Channel 8 */
CHAN2G(2452, 9), /* Channel 9 */
CHAN2G(2457, 10), /* Channel 10 */
CHAN2G(2462, 11), /* Channel 11 */
CHAN2G(2467, 12), /* Channel 12 */
CHAN2G(2472, 13), /* Channel 13 */
CHAN2G(2484, 14) /* Channel 14 */
};
static struct ieee80211_channel wcn_5ghz_channels[] = {
CHAN5G(5180, 36, PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_LOW),
CHAN5G(5200, 40, PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_LOW),
CHAN5G(5220, 44, PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_HIGH),
CHAN5G(5240, 48, PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_HIGH),
CHAN5G(5260, 52, PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_LOW),
CHAN5G(5280, 56, PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_LOW),
CHAN5G(5300, 60, PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_HIGH),
CHAN5G(5320, 64, PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_HIGH),
CHAN5G(5500, 100, PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_LOW),
CHAN5G(5520, 104, PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_LOW),
CHAN5G(5540, 108, PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_HIGH),
CHAN5G(5560, 112, PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_HIGH),
CHAN5G(5580, 116, PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_LOW),
CHAN5G(5600, 120, PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_LOW),
CHAN5G(5620, 124, PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_HIGH),
CHAN5G(5640, 128, PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_HIGH),
CHAN5G(5660, 132, PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_LOW),
CHAN5G(5680, 136, PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_LOW),
CHAN5G(5700, 140, PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_HIGH),
CHAN5G(5720, 144, PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_HIGH),
CHAN5G(5745, 149, PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_LOW),
CHAN5G(5765, 153, PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_LOW),
CHAN5G(5785, 157, PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_HIGH),
CHAN5G(5805, 161, PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_HIGH),
CHAN5G(5825, 165, 0)
};
#define RATE(_bitrate, _hw_rate, _flags) { \
.bitrate = (_bitrate), \
.flags = (_flags), \
.hw_value = (_hw_rate), \
.hw_value_short = (_hw_rate) \
}
static struct ieee80211_rate wcn_2ghz_rates[] = {
RATE(10, HW_RATE_INDEX_1MBPS, 0),
RATE(20, HW_RATE_INDEX_2MBPS, IEEE80211_RATE_SHORT_PREAMBLE),
RATE(55, HW_RATE_INDEX_5_5MBPS, IEEE80211_RATE_SHORT_PREAMBLE),
RATE(110, HW_RATE_INDEX_11MBPS, IEEE80211_RATE_SHORT_PREAMBLE),
RATE(60, HW_RATE_INDEX_6MBPS, 0),
RATE(90, HW_RATE_INDEX_9MBPS, 0),
RATE(120, HW_RATE_INDEX_12MBPS, 0),
RATE(180, HW_RATE_INDEX_18MBPS, 0),
RATE(240, HW_RATE_INDEX_24MBPS, 0),
RATE(360, HW_RATE_INDEX_36MBPS, 0),
RATE(480, HW_RATE_INDEX_48MBPS, 0),
RATE(540, HW_RATE_INDEX_54MBPS, 0)
};
static struct ieee80211_rate wcn_5ghz_rates[] = {
RATE(60, HW_RATE_INDEX_6MBPS, 0),
RATE(90, HW_RATE_INDEX_9MBPS, 0),
RATE(120, HW_RATE_INDEX_12MBPS, 0),
RATE(180, HW_RATE_INDEX_18MBPS, 0),
RATE(240, HW_RATE_INDEX_24MBPS, 0),
RATE(360, HW_RATE_INDEX_36MBPS, 0),
RATE(480, HW_RATE_INDEX_48MBPS, 0),
RATE(540, HW_RATE_INDEX_54MBPS, 0)
};
static struct ieee80211_supported_band wcn_band_2ghz = {
.channels = wcn_2ghz_channels,
.n_channels = ARRAY_SIZE(wcn_2ghz_channels),
.bitrates = wcn_2ghz_rates,
.n_bitrates = ARRAY_SIZE(wcn_2ghz_rates),
.ht_cap = {
.cap = IEEE80211_HT_CAP_GRN_FLD |
IEEE80211_HT_CAP_SGI_20 |
IEEE80211_HT_CAP_DSSSCCK40 |
IEEE80211_HT_CAP_LSIG_TXOP_PROT |
IEEE80211_HT_CAP_SGI_40 |
IEEE80211_HT_CAP_SUP_WIDTH_20_40,
.ht_supported = true,
.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K,
.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16,
.mcs = {
.rx_mask = { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
.rx_highest = cpu_to_le16(72),
.tx_params = IEEE80211_HT_MCS_TX_DEFINED,
}
}
};
static struct ieee80211_supported_band wcn_band_5ghz = {
.channels = wcn_5ghz_channels,
.n_channels = ARRAY_SIZE(wcn_5ghz_channels),
.bitrates = wcn_5ghz_rates,
.n_bitrates = ARRAY_SIZE(wcn_5ghz_rates),
.ht_cap = {
.cap = IEEE80211_HT_CAP_GRN_FLD |
IEEE80211_HT_CAP_SGI_20 |
IEEE80211_HT_CAP_DSSSCCK40 |
IEEE80211_HT_CAP_LSIG_TXOP_PROT |
IEEE80211_HT_CAP_SGI_40 |
IEEE80211_HT_CAP_SUP_WIDTH_20_40,
.ht_supported = true,
.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K,
.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16,
.mcs = {
.rx_mask = { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
.rx_highest = cpu_to_le16(150),
.tx_params = IEEE80211_HT_MCS_TX_DEFINED,
}
}
};
#ifdef CONFIG_PM
static const struct wiphy_wowlan_support wowlan_support = {
.flags = WIPHY_WOWLAN_ANY |
WIPHY_WOWLAN_MAGIC_PKT |
WIPHY_WOWLAN_SUPPORTS_GTK_REKEY
};
#endif
static inline u8 get_sta_index(struct ieee80211_vif *vif,
struct wcn36xx_sta *sta_priv)
{
return NL80211_IFTYPE_STATION == vif->type ?
sta_priv->bss_sta_index :
sta_priv->sta_index;
}
static void wcn36xx_feat_caps_info(struct wcn36xx *wcn)
{
int i;
for (i = 0; i < MAX_FEATURE_SUPPORTED; i++) {
if (wcn36xx_firmware_get_feat_caps(wcn->fw_feat_caps, i)) {
wcn36xx_dbg(WCN36XX_DBG_MAC, "FW Cap %s\n",
wcn36xx_firmware_get_cap_name(i));
}
}
}
static int wcn36xx_start(struct ieee80211_hw *hw)
{
struct wcn36xx *wcn = hw->priv;
int ret;
wcn36xx_dbg(WCN36XX_DBG_MAC, "mac start\n");
/* SMD initialization */
ret = wcn36xx_smd_open(wcn);
if (ret) {
wcn36xx_err("Failed to open smd channel: %d\n", ret);
goto out_err;
}
/* Allocate memory pools for Mgmt BD headers and Data BD headers */
ret = wcn36xx_dxe_allocate_mem_pools(wcn);
if (ret) {
wcn36xx_err("Failed to alloc DXE mempool: %d\n", ret);
goto out_smd_close;
}
ret = wcn36xx_dxe_alloc_ctl_blks(wcn);
if (ret) {
wcn36xx_err("Failed to alloc DXE ctl blocks: %d\n", ret);
goto out_free_dxe_pool;
}
ret = wcn36xx_smd_load_nv(wcn);
if (ret) {
wcn36xx_err("Failed to push NV to chip\n");
goto out_free_dxe_ctl;
}
ret = wcn36xx_smd_start(wcn);
if (ret) {
wcn36xx_err("Failed to start chip\n");
goto out_free_dxe_ctl;
}
if (!wcn36xx_is_fw_version(wcn, 1, 2, 2, 24)) {
ret = wcn36xx_smd_feature_caps_exchange(wcn);
if (ret)
wcn36xx_warn("Exchange feature caps failed\n");
else
wcn36xx_feat_caps_info(wcn);
}
/* DMA channel initialization */
ret = wcn36xx_dxe_init(wcn);
if (ret) {
wcn36xx_err("DXE init failed\n");
goto out_smd_stop;
}
wcn36xx_debugfs_init(wcn);
INIT_LIST_HEAD(&wcn->vif_list);
spin_lock_init(&wcn->dxe_lock);
spin_lock_init(&wcn->survey_lock);
return 0;
out_smd_stop:
wcn36xx_smd_stop(wcn);
out_free_dxe_ctl:
wcn36xx_dxe_free_ctl_blks(wcn);
out_free_dxe_pool:
wcn36xx_dxe_free_mem_pools(wcn);
out_smd_close:
wcn36xx_smd_close(wcn);
out_err:
return ret;
}
static void wcn36xx_stop(struct ieee80211_hw *hw)
{
struct wcn36xx *wcn = hw->priv;
wcn36xx_dbg(WCN36XX_DBG_MAC, "mac stop\n");
mutex_lock(&wcn->scan_lock);
if (wcn->scan_req) {
struct cfg80211_scan_info scan_info = {
.aborted = true,
};
ieee80211_scan_completed(wcn->hw, &scan_info);
}
wcn->scan_req = NULL;
mutex_unlock(&wcn->scan_lock);
wcn36xx_debugfs_exit(wcn);
wcn36xx_smd_stop(wcn);
wcn36xx_dxe_deinit(wcn);
wcn36xx_smd_close(wcn);
wcn36xx_dxe_free_mem_pools(wcn);
wcn36xx_dxe_free_ctl_blks(wcn);
}
static void wcn36xx_change_ps(struct wcn36xx *wcn, bool enable)
{
struct ieee80211_vif *vif = NULL;
struct wcn36xx_vif *tmp;
list_for_each_entry(tmp, &wcn->vif_list, list) {
vif = wcn36xx_priv_to_vif(tmp);
if (enable && !wcn->sw_scan) {
if (vif->cfg.ps) /* ps allowed ? */
wcn36xx_pmc_enter_bmps_state(wcn, vif);
} else {
wcn36xx_pmc_exit_bmps_state(wcn, vif);
}
}
}
static void wcn36xx_change_opchannel(struct wcn36xx *wcn, int ch)
{
struct ieee80211_vif *vif = NULL;
struct wcn36xx_vif *tmp;
struct ieee80211_supported_band *band;
struct ieee80211_channel *channel = NULL;
unsigned long flags;
int i, j;
for (i = 0; i < ARRAY_SIZE(wcn->hw->wiphy->bands); i++) {
band = wcn->hw->wiphy->bands[i];
if (!band)
break;
for (j = 0; j < band->n_channels; j++) {
if (HW_VALUE_CHANNEL(band->channels[j].hw_value) == ch) {
channel = &band->channels[j];
break;
}
}
if (channel)
break;
}
if (!channel) {
wcn36xx_err("Cannot tune to channel %d\n", ch);
return;
}
spin_lock_irqsave(&wcn->survey_lock, flags);
wcn->band = band;
wcn->channel = channel;
spin_unlock_irqrestore(&wcn->survey_lock, flags);
list_for_each_entry(tmp, &wcn->vif_list, list) {
vif = wcn36xx_priv_to_vif(tmp);
wcn36xx_smd_switch_channel(wcn, vif, ch);
}
return;
}
static int wcn36xx_config(struct ieee80211_hw *hw, u32 changed)
{
struct wcn36xx *wcn = hw->priv;
int ret;
wcn36xx_dbg(WCN36XX_DBG_MAC, "mac config changed 0x%08x\n", changed);
mutex_lock(&wcn->conf_mutex);
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
int ch = WCN36XX_HW_CHANNEL(wcn);
wcn36xx_dbg(WCN36XX_DBG_MAC, "wcn36xx_config channel switch=%d\n",
ch);
if (wcn->sw_scan_opchannel == ch && wcn->sw_scan_channel) {
/* If channel is the initial operating channel, we may
* want to receive/transmit regular data packets, then
* simply stop the scan session and exit PS mode.
*/
if (wcn->sw_scan_channel)
wcn36xx_smd_end_scan(wcn, wcn->sw_scan_channel);
if (wcn->sw_scan_init) {
wcn36xx_smd_finish_scan(wcn, HAL_SYS_MODE_SCAN,
wcn->sw_scan_vif);
}
} else if (wcn->sw_scan) {
/* A scan is ongoing, do not change the operating
* channel, but start a scan session on the channel.
*/
if (wcn->sw_scan_channel)
wcn36xx_smd_end_scan(wcn, wcn->sw_scan_channel);
if (!wcn->sw_scan_init) {
/* This can fail if we are unable to notify the
* operating channel.
*/
ret = wcn36xx_smd_init_scan(wcn,
HAL_SYS_MODE_SCAN,
wcn->sw_scan_vif);
if (ret) {
mutex_unlock(&wcn->conf_mutex);
return -EIO;
}
}
wcn36xx_smd_start_scan(wcn, ch);
} else {
wcn36xx_change_opchannel(wcn, ch);
}
}
if (changed & IEEE80211_CONF_CHANGE_PS)
wcn36xx_change_ps(wcn, hw->conf.flags & IEEE80211_CONF_PS);
if (changed & IEEE80211_CONF_CHANGE_IDLE) {
if (hw->conf.flags & IEEE80211_CONF_IDLE)
wcn36xx_smd_enter_imps(wcn);
else
wcn36xx_smd_exit_imps(wcn);
}
mutex_unlock(&wcn->conf_mutex);
return 0;
}
static void wcn36xx_configure_filter(struct ieee80211_hw *hw,
unsigned int changed,
unsigned int *total, u64 multicast)
{
struct wcn36xx_hal_rcv_flt_mc_addr_list_type *fp;
struct wcn36xx *wcn = hw->priv;
struct wcn36xx_vif *tmp;
struct ieee80211_vif *vif = NULL;
wcn36xx_dbg(WCN36XX_DBG_MAC, "mac configure filter\n");
mutex_lock(&wcn->conf_mutex);
*total &= FIF_ALLMULTI;
fp = (void *)(unsigned long)multicast;
list_for_each_entry(tmp, &wcn->vif_list, list) {
vif = wcn36xx_priv_to_vif(tmp);
/* FW handles MC filtering only when connected as STA */
if (*total & FIF_ALLMULTI)
wcn36xx_smd_set_mc_list(wcn, vif, NULL);
else if (NL80211_IFTYPE_STATION == vif->type && tmp->sta_assoc)
wcn36xx_smd_set_mc_list(wcn, vif, fp);
}
mutex_unlock(&wcn->conf_mutex);
kfree(fp);
}
static u64 wcn36xx_prepare_multicast(struct ieee80211_hw *hw,
struct netdev_hw_addr_list *mc_list)
{
struct wcn36xx_hal_rcv_flt_mc_addr_list_type *fp;
struct netdev_hw_addr *ha;
wcn36xx_dbg(WCN36XX_DBG_MAC, "mac prepare multicast list\n");
fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
if (!fp) {
wcn36xx_err("Out of memory setting filters.\n");
return 0;
}
fp->mc_addr_count = 0;
/* update multicast filtering parameters */
if (netdev_hw_addr_list_count(mc_list) <=
WCN36XX_HAL_MAX_NUM_MULTICAST_ADDRESS) {
netdev_hw_addr_list_for_each(ha, mc_list) {
memcpy(fp->mc_addr[fp->mc_addr_count],
ha->addr, ETH_ALEN);
fp->mc_addr_count++;
}
}
return (u64)(unsigned long)fp;
}
static void wcn36xx_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_control *control,
struct sk_buff *skb)
{
struct wcn36xx *wcn = hw->priv;
struct wcn36xx_sta *sta_priv = NULL;
if (control->sta)
sta_priv = wcn36xx_sta_to_priv(control->sta);
if (wcn36xx_start_tx(wcn, sta_priv, skb))
ieee80211_free_txskb(wcn->hw, skb);
}
static int wcn36xx_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *key_conf)
{
struct wcn36xx *wcn = hw->priv;
struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
struct wcn36xx_sta *sta_priv = sta ? wcn36xx_sta_to_priv(sta) : NULL;
int ret = 0;
u8 key[WLAN_MAX_KEY_LEN];
wcn36xx_dbg(WCN36XX_DBG_MAC, "mac80211 set key\n");
wcn36xx_dbg(WCN36XX_DBG_MAC, "Key: cmd=0x%x algo:0x%x, id:%d, len:%d flags 0x%x\n",
cmd, key_conf->cipher, key_conf->keyidx,
key_conf->keylen, key_conf->flags);
wcn36xx_dbg_dump(WCN36XX_DBG_MAC, "KEY: ",
key_conf->key,
key_conf->keylen);
mutex_lock(&wcn->conf_mutex);
switch (key_conf->cipher) {
case WLAN_CIPHER_SUITE_WEP40:
vif_priv->encrypt_type = WCN36XX_HAL_ED_WEP40;
break;
case WLAN_CIPHER_SUITE_WEP104:
vif_priv->encrypt_type = WCN36XX_HAL_ED_WEP104;
break;
case WLAN_CIPHER_SUITE_CCMP:
vif_priv->encrypt_type = WCN36XX_HAL_ED_CCMP;
break;
case WLAN_CIPHER_SUITE_TKIP:
vif_priv->encrypt_type = WCN36XX_HAL_ED_TKIP;
break;
default:
wcn36xx_err("Unsupported key type 0x%x\n",
key_conf->cipher);
ret = -EOPNOTSUPP;
goto out;
}
switch (cmd) {
case SET_KEY:
if (WCN36XX_HAL_ED_TKIP == vif_priv->encrypt_type) {
/*
* Supplicant is sending key in the wrong order:
* Temporal Key (16 b) - TX MIC (8 b) - RX MIC (8 b)
* but HW expects it to be in the order as described in
* IEEE 802.11 spec (see chapter 11.7) like this:
* Temporal Key (16 b) - RX MIC (8 b) - TX MIC (8 b)
*/
memcpy(key, key_conf->key, 16);
memcpy(key + 16, key_conf->key + 24, 8);
memcpy(key + 24, key_conf->key + 16, 8);
} else {
memcpy(key, key_conf->key, key_conf->keylen);
}
if (IEEE80211_KEY_FLAG_PAIRWISE & key_conf->flags) {
sta_priv->is_data_encrypted = true;
/* Reconfigure bss with encrypt_type */
if (NL80211_IFTYPE_STATION == vif->type) {
wcn36xx_smd_config_bss(wcn,
vif,
sta,
sta->addr,
true);
wcn36xx_smd_config_sta(wcn, vif, sta);
}
wcn36xx_smd_set_stakey(wcn,
vif_priv->encrypt_type,
key_conf->keyidx,
key_conf->keylen,
key,
get_sta_index(vif, sta_priv));
} else {
wcn36xx_smd_set_bsskey(wcn,
vif_priv->encrypt_type,
vif_priv->bss_index,
key_conf->keyidx,
key_conf->keylen,
key);
if ((WLAN_CIPHER_SUITE_WEP40 == key_conf->cipher) ||
(WLAN_CIPHER_SUITE_WEP104 == key_conf->cipher)) {
list_for_each_entry(sta_priv,
&vif_priv->sta_list, list) {
sta_priv->is_data_encrypted = true;
wcn36xx_smd_set_stakey(wcn,
vif_priv->encrypt_type,
key_conf->keyidx,
key_conf->keylen,
key,
get_sta_index(vif, sta_priv));
}
}
}
break;
case DISABLE_KEY:
if (!(IEEE80211_KEY_FLAG_PAIRWISE & key_conf->flags)) {
if (vif_priv->bss_index != WCN36XX_HAL_BSS_INVALID_IDX)
wcn36xx_smd_remove_bsskey(wcn,
vif_priv->encrypt_type,
vif_priv->bss_index,
key_conf->keyidx);
vif_priv->encrypt_type = WCN36XX_HAL_ED_NONE;
} else {
sta_priv->is_data_encrypted = false;
/* do not remove key if disassociated */
if (sta_priv->aid)
wcn36xx_smd_remove_stakey(wcn,
vif_priv->encrypt_type,
key_conf->keyidx,
get_sta_index(vif, sta_priv));
}
break;
default:
wcn36xx_err("Unsupported key cmd 0x%x\n", cmd);
ret = -EOPNOTSUPP;
goto out;
}
out:
mutex_unlock(&wcn->conf_mutex);
return ret;
}
static int wcn36xx_hw_scan(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_scan_request *hw_req)
{
struct wcn36xx *wcn = hw->priv;
if (!wcn36xx_firmware_get_feat_caps(wcn->fw_feat_caps, SCAN_OFFLOAD)) {
/* fallback to mac80211 software scan */
return 1;
}
/* Firmware scan offload is limited to 48 channels, fallback to
* software driven scanning otherwise.
*/
if (hw_req->req.n_channels > 48) {
wcn36xx_warn("Offload scan aborted, n_channels=%u",
hw_req->req.n_channels);
return 1;
}
mutex_lock(&wcn->scan_lock);
if (wcn->scan_req) {
mutex_unlock(&wcn->scan_lock);
return -EBUSY;
}
wcn->scan_aborted = false;
wcn->scan_req = &hw_req->req;
mutex_unlock(&wcn->scan_lock);
wcn36xx_smd_update_channel_list(wcn, &hw_req->req);
return wcn36xx_smd_start_hw_scan(wcn, vif, &hw_req->req);
}
static void wcn36xx_cancel_hw_scan(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct wcn36xx *wcn = hw->priv;
mutex_lock(&wcn->scan_lock);
wcn->scan_aborted = true;
mutex_unlock(&wcn->scan_lock);
if (wcn36xx_firmware_get_feat_caps(wcn->fw_feat_caps, SCAN_OFFLOAD)) {
/* ieee80211_scan_completed will be called on FW scan
* indication */
wcn36xx_smd_stop_hw_scan(wcn);
}
}
static void wcn36xx_sw_scan_start(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
const u8 *mac_addr)
{
struct wcn36xx *wcn = hw->priv;
struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
wcn36xx_dbg(WCN36XX_DBG_MAC, "sw_scan_start");
wcn->sw_scan = true;
wcn->sw_scan_vif = vif;
wcn->sw_scan_channel = 0;
if (vif_priv->sta_assoc)
wcn->sw_scan_opchannel = WCN36XX_HW_CHANNEL(wcn);
else
wcn->sw_scan_opchannel = 0;
}
static void wcn36xx_sw_scan_complete(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct wcn36xx *wcn = hw->priv;
wcn36xx_dbg(WCN36XX_DBG_MAC, "sw_scan_complete");
/* ensure that any scan session is finished */
if (wcn->sw_scan_channel)
wcn36xx_smd_end_scan(wcn, wcn->sw_scan_channel);
if (wcn->sw_scan_init) {
wcn36xx_smd_finish_scan(wcn, HAL_SYS_MODE_SCAN,
wcn->sw_scan_vif);
}
wcn->sw_scan = false;
wcn->sw_scan_opchannel = 0;
}
static void wcn36xx_update_allowed_rates(struct ieee80211_sta *sta,
enum nl80211_band band)
{
int i, size;
u16 *rates_table;
struct wcn36xx_sta *sta_priv = wcn36xx_sta_to_priv(sta);
u32 rates = sta->deflink.supp_rates[band];
memset(&sta_priv->supported_rates, 0,
sizeof(sta_priv->supported_rates));
sta_priv->supported_rates.op_rate_mode = STA_11n;
size = ARRAY_SIZE(sta_priv->supported_rates.dsss_rates);
rates_table = sta_priv->supported_rates.dsss_rates;
if (band == NL80211_BAND_2GHZ) {
for (i = 0; i < size; i++) {
if (rates & 0x01) {
rates_table[i] = wcn_2ghz_rates[i].hw_value;
rates = rates >> 1;
}
}
}
size = ARRAY_SIZE(sta_priv->supported_rates.ofdm_rates);
rates_table = sta_priv->supported_rates.ofdm_rates;
for (i = 0; i < size; i++) {
if (rates & 0x01) {
rates_table[i] = wcn_5ghz_rates[i].hw_value;
rates = rates >> 1;
}
}
if (sta->deflink.ht_cap.ht_supported) {
BUILD_BUG_ON(sizeof(sta->deflink.ht_cap.mcs.rx_mask) >
sizeof(sta_priv->supported_rates.supported_mcs_set));
memcpy(sta_priv->supported_rates.supported_mcs_set,
sta->deflink.ht_cap.mcs.rx_mask,
sizeof(sta->deflink.ht_cap.mcs.rx_mask));
}
if (sta->deflink.vht_cap.vht_supported) {
sta_priv->supported_rates.op_rate_mode = STA_11ac;
sta_priv->supported_rates.vht_rx_mcs_map =
sta->deflink.vht_cap.vht_mcs.rx_mcs_map;
sta_priv->supported_rates.vht_tx_mcs_map =
sta->deflink.vht_cap.vht_mcs.tx_mcs_map;
}
}
void wcn36xx_set_default_rates(struct wcn36xx_hal_supported_rates *rates)
{
u16 ofdm_rates[WCN36XX_HAL_NUM_OFDM_RATES] = {
HW_RATE_INDEX_6MBPS,
HW_RATE_INDEX_9MBPS,
HW_RATE_INDEX_12MBPS,
HW_RATE_INDEX_18MBPS,
HW_RATE_INDEX_24MBPS,
HW_RATE_INDEX_36MBPS,
HW_RATE_INDEX_48MBPS,
HW_RATE_INDEX_54MBPS
};
u16 dsss_rates[WCN36XX_HAL_NUM_DSSS_RATES] = {
HW_RATE_INDEX_1MBPS,
HW_RATE_INDEX_2MBPS,
HW_RATE_INDEX_5_5MBPS,
HW_RATE_INDEX_11MBPS
};
rates->op_rate_mode = STA_11n;
memcpy(rates->dsss_rates, dsss_rates,
sizeof(*dsss_rates) * WCN36XX_HAL_NUM_DSSS_RATES);
memcpy(rates->ofdm_rates, ofdm_rates,
sizeof(*ofdm_rates) * WCN36XX_HAL_NUM_OFDM_RATES);
rates->supported_mcs_set[0] = 0xFF;
}
void wcn36xx_set_default_rates_v1(struct wcn36xx_hal_supported_rates_v1 *rates)
{
rates->op_rate_mode = STA_11ac;
rates->vht_rx_mcs_map = IEEE80211_VHT_MCS_SUPPORT_0_9;
rates->vht_tx_mcs_map = IEEE80211_VHT_MCS_SUPPORT_0_9;
}
static void wcn36xx_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
u64 changed)
{
struct wcn36xx *wcn = hw->priv;
struct sk_buff *skb = NULL;
u16 tim_off, tim_len;
enum wcn36xx_hal_link_state link_state;
struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
wcn36xx_dbg(WCN36XX_DBG_MAC, "mac bss info changed vif %p changed 0x%llx\n",
vif, changed);
mutex_lock(&wcn->conf_mutex);
if (changed & BSS_CHANGED_BEACON_INFO) {
wcn36xx_dbg(WCN36XX_DBG_MAC,
"mac bss changed dtim period %d\n",
bss_conf->dtim_period);
vif_priv->dtim_period = bss_conf->dtim_period;
}
if (changed & BSS_CHANGED_BSSID) {
wcn36xx_dbg(WCN36XX_DBG_MAC, "mac bss changed_bssid %pM\n",
bss_conf->bssid);
if (!is_zero_ether_addr(bss_conf->bssid)) {
vif_priv->is_joining = true;
vif_priv->bss_index = WCN36XX_HAL_BSS_INVALID_IDX;
wcn36xx_smd_set_link_st(wcn, bss_conf->bssid, vif->addr,
WCN36XX_HAL_LINK_PREASSOC_STATE);
wcn36xx_smd_join(wcn, bss_conf->bssid,
vif->addr, WCN36XX_HW_CHANNEL(wcn));
wcn36xx_smd_config_bss(wcn, vif, NULL,
bss_conf->bssid, false);
} else {
vif_priv->is_joining = false;
wcn36xx_smd_delete_bss(wcn, vif);
wcn36xx_smd_set_link_st(wcn, bss_conf->bssid, vif->addr,
WCN36XX_HAL_LINK_IDLE_STATE);
vif_priv->encrypt_type = WCN36XX_HAL_ED_NONE;
}
}
if (changed & BSS_CHANGED_SSID) {
wcn36xx_dbg(WCN36XX_DBG_MAC,
"mac bss changed ssid\n");
wcn36xx_dbg_dump(WCN36XX_DBG_MAC, "ssid ",
vif->cfg.ssid, vif->cfg.ssid_len);
vif_priv->ssid.length = vif->cfg.ssid_len;
memcpy(&vif_priv->ssid.ssid,
vif->cfg.ssid,
vif->cfg.ssid_len);
}
if (changed & BSS_CHANGED_ASSOC) {
vif_priv->is_joining = false;
if (vif->cfg.assoc) {
struct ieee80211_sta *sta;
struct wcn36xx_sta *sta_priv;
wcn36xx_dbg(WCN36XX_DBG_MAC,
"mac assoc bss %pM vif %pM AID=%d\n",
bss_conf->bssid,
vif->addr,
vif->cfg.aid);
vif_priv->sta_assoc = true;
/*
* Holding conf_mutex ensures mutal exclusion with
* wcn36xx_sta_remove() and as such ensures that sta
* won't be freed while we're operating on it. As such
* we do not need to hold the rcu_read_lock().
*/
sta = ieee80211_find_sta(vif, bss_conf->bssid);
if (!sta) {
wcn36xx_err("sta %pM is not found\n",
bss_conf->bssid);
goto out;
}
sta_priv = wcn36xx_sta_to_priv(sta);
wcn36xx_update_allowed_rates(sta, WCN36XX_BAND(wcn));
wcn36xx_smd_set_link_st(wcn, bss_conf->bssid,
vif->addr,
WCN36XX_HAL_LINK_POSTASSOC_STATE);
wcn36xx_smd_config_bss(wcn, vif, sta,
bss_conf->bssid,
true);
sta_priv->aid = vif->cfg.aid;
/*
* config_sta must be called from because this is the
* place where AID is available.
*/
wcn36xx_smd_config_sta(wcn, vif, sta);
if (vif->type == NL80211_IFTYPE_STATION)
wcn36xx_smd_add_beacon_filter(wcn, vif);
wcn36xx_enable_keep_alive_null_packet(wcn, vif);
} else {
wcn36xx_dbg(WCN36XX_DBG_MAC,
"disassociated bss %pM vif %pM AID=%d\n",
bss_conf->bssid,
vif->addr,
vif->cfg.aid);
vif_priv->sta_assoc = false;
wcn36xx_smd_set_link_st(wcn,
bss_conf->bssid,
vif->addr,
WCN36XX_HAL_LINK_IDLE_STATE);
}
}
if (changed & BSS_CHANGED_AP_PROBE_RESP) {
wcn36xx_dbg(WCN36XX_DBG_MAC, "mac bss changed ap probe resp\n");
skb = ieee80211_proberesp_get(hw, vif);
if (!skb) {
wcn36xx_err("failed to alloc probereq skb\n");
goto out;
}
wcn36xx_smd_update_proberesp_tmpl(wcn, vif, skb);
dev_kfree_skb(skb);
}
if (changed & BSS_CHANGED_BEACON_ENABLED ||
changed & BSS_CHANGED_BEACON) {
wcn36xx_dbg(WCN36XX_DBG_MAC,
"mac bss changed beacon enabled %d\n",
bss_conf->enable_beacon);
if (bss_conf->enable_beacon) {
vif_priv->dtim_period = bss_conf->dtim_period;
vif_priv->bss_index = WCN36XX_HAL_BSS_INVALID_IDX;
wcn36xx_smd_config_bss(wcn, vif, NULL,
vif->addr, false);
skb = ieee80211_beacon_get_tim(hw, vif, &tim_off,
&tim_len, 0);
if (!skb) {
wcn36xx_err("failed to alloc beacon skb\n");
goto out;
}
wcn36xx_smd_send_beacon(wcn, vif, skb, tim_off, 0);
dev_kfree_skb(skb);
if (vif->type == NL80211_IFTYPE_ADHOC ||
vif->type == NL80211_IFTYPE_MESH_POINT)
link_state = WCN36XX_HAL_LINK_IBSS_STATE;
else
link_state = WCN36XX_HAL_LINK_AP_STATE;
wcn36xx_smd_set_link_st(wcn, vif->addr, vif->addr,
link_state);
} else {
wcn36xx_smd_delete_bss(wcn, vif);
wcn36xx_smd_set_link_st(wcn, vif->addr, vif->addr,
WCN36XX_HAL_LINK_IDLE_STATE);
}
}
out:
mutex_unlock(&wcn->conf_mutex);
}
/* this is required when using IEEE80211_HW_HAS_RATE_CONTROL */
static int wcn36xx_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
{
struct wcn36xx *wcn = hw->priv;
wcn36xx_dbg(WCN36XX_DBG_MAC, "mac set RTS threshold %d\n", value);
mutex_lock(&wcn->conf_mutex);
wcn36xx_smd_update_cfg(wcn, WCN36XX_HAL_CFG_RTS_THRESHOLD, value);
mutex_unlock(&wcn->conf_mutex);
return 0;
}
static void wcn36xx_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct wcn36xx *wcn = hw->priv;
struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
wcn36xx_dbg(WCN36XX_DBG_MAC, "mac remove interface vif %p\n", vif);
mutex_lock(&wcn->conf_mutex);
list_del(&vif_priv->list);
wcn36xx_smd_delete_sta_self(wcn, vif->addr);
mutex_unlock(&wcn->conf_mutex);
}
static int wcn36xx_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct wcn36xx *wcn = hw->priv;
struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
wcn36xx_dbg(WCN36XX_DBG_MAC, "mac add interface vif %p type %d\n",
vif, vif->type);
if (!(NL80211_IFTYPE_STATION == vif->type ||
NL80211_IFTYPE_AP == vif->type ||
NL80211_IFTYPE_ADHOC == vif->type ||
NL80211_IFTYPE_MESH_POINT == vif->type)) {
wcn36xx_warn("Unsupported interface type requested: %d\n",
vif->type);
return -EOPNOTSUPP;
}
mutex_lock(&wcn->conf_mutex);
vif_priv->bss_index = WCN36XX_HAL_BSS_INVALID_IDX;
INIT_LIST_HEAD(&vif_priv->sta_list);
list_add(&vif_priv->list, &wcn->vif_list);
wcn36xx_smd_add_sta_self(wcn, vif);
mutex_unlock(&wcn->conf_mutex);
return 0;
}
static int wcn36xx_sta_add(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct wcn36xx *wcn = hw->priv;
struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
struct wcn36xx_sta *sta_priv = wcn36xx_sta_to_priv(sta);
wcn36xx_dbg(WCN36XX_DBG_MAC, "mac sta add vif %p sta %pM\n",
vif, sta->addr);
mutex_lock(&wcn->conf_mutex);
spin_lock_init(&sta_priv->ampdu_lock);
sta_priv->vif = vif_priv;
list_add(&sta_priv->list, &vif_priv->sta_list);
/*
* For STA mode HW will be configured on BSS_CHANGED_ASSOC because
* at this stage AID is not available yet.
*/
if (NL80211_IFTYPE_STATION != vif->type) {
wcn36xx_update_allowed_rates(sta, WCN36XX_BAND(wcn));
sta_priv->aid = sta->aid;
wcn36xx_smd_config_sta(wcn, vif, sta);
}
mutex_unlock(&wcn->conf_mutex);
return 0;
}
static int wcn36xx_sta_remove(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct wcn36xx *wcn = hw->priv;
struct wcn36xx_sta *sta_priv = wcn36xx_sta_to_priv(sta);
wcn36xx_dbg(WCN36XX_DBG_MAC, "mac sta remove vif %p sta %pM index %d\n",
vif, sta->addr, sta_priv->sta_index);
mutex_lock(&wcn->conf_mutex);
list_del(&sta_priv->list);
wcn36xx_smd_delete_sta(wcn, sta_priv->sta_index);
sta_priv->vif = NULL;
mutex_unlock(&wcn->conf_mutex);
return 0;
}
#ifdef CONFIG_PM
static struct ieee80211_vif *wcn36xx_get_first_assoc_vif(struct wcn36xx *wcn)
{
struct wcn36xx_vif *vif_priv = NULL;
struct ieee80211_vif *vif = NULL;
list_for_each_entry(vif_priv, &wcn->vif_list, list) {
if (vif_priv->sta_assoc) {
vif = wcn36xx_priv_to_vif(vif_priv);
break;
}
}
return vif;
}
static int wcn36xx_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wow)
{
struct wcn36xx *wcn = hw->priv;
struct ieee80211_vif *vif = NULL;
int ret = 0;
wcn36xx_dbg(WCN36XX_DBG_MAC, "mac suspend\n");
mutex_lock(&wcn->conf_mutex);
vif = wcn36xx_get_first_assoc_vif(wcn);
if (vif) {
ret = wcn36xx_smd_arp_offload(wcn, vif, true);
if (ret)
goto out;
ret = wcn36xx_smd_ipv6_ns_offload(wcn, vif, true);
if (ret)
goto out;
ret = wcn36xx_smd_gtk_offload(wcn, vif, true);
if (ret)
goto out;
ret = wcn36xx_smd_set_power_params(wcn, true);
if (ret)
goto out;
ret = wcn36xx_smd_wlan_host_suspend_ind(wcn);
}
/* Disable IRQ, we don't want to handle any packet before mac80211 is
* resumed and ready to receive packets.
*/
disable_irq(wcn->tx_irq);
disable_irq(wcn->rx_irq);
out:
mutex_unlock(&wcn->conf_mutex);
return ret;
}
static int wcn36xx_resume(struct ieee80211_hw *hw)
{
struct wcn36xx *wcn = hw->priv;
struct ieee80211_vif *vif = NULL;
wcn36xx_dbg(WCN36XX_DBG_MAC, "mac resume\n");
mutex_lock(&wcn->conf_mutex);
vif = wcn36xx_get_first_assoc_vif(wcn);
if (vif) {
wcn36xx_smd_host_resume(wcn);
wcn36xx_smd_set_power_params(wcn, false);
wcn36xx_smd_gtk_offload_get_info(wcn, vif);
wcn36xx_smd_gtk_offload(wcn, vif, false);
wcn36xx_smd_ipv6_ns_offload(wcn, vif, false);
wcn36xx_smd_arp_offload(wcn, vif, false);
}
enable_irq(wcn->tx_irq);
enable_irq(wcn->rx_irq);
mutex_unlock(&wcn->conf_mutex);
return 0;
}
static void wcn36xx_set_rekey_data(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct cfg80211_gtk_rekey_data *data)
{
struct wcn36xx *wcn = hw->priv;
struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
mutex_lock(&wcn->conf_mutex);
memcpy(vif_priv->rekey_data.kek, data->kek, NL80211_KEK_LEN);
memcpy(vif_priv->rekey_data.kck, data->kck, NL80211_KCK_LEN);
vif_priv->rekey_data.replay_ctr =
cpu_to_le64(be64_to_cpup((__be64 *)data->replay_ctr));
vif_priv->rekey_data.valid = true;
mutex_unlock(&wcn->conf_mutex);
}
#endif
static int wcn36xx_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_ampdu_params *params)
{
struct wcn36xx *wcn = hw->priv;
struct wcn36xx_sta *sta_priv = wcn36xx_sta_to_priv(params->sta);
struct ieee80211_sta *sta = params->sta;
enum ieee80211_ampdu_mlme_action action = params->action;
u16 tid = params->tid;
u16 *ssn = ¶ms->ssn;
int ret = 0;
int session;
wcn36xx_dbg(WCN36XX_DBG_MAC, "mac ampdu action action %d tid %d\n",
action, tid);
mutex_lock(&wcn->conf_mutex);
switch (action) {
case IEEE80211_AMPDU_RX_START:
sta_priv->tid = tid;
session = wcn36xx_smd_add_ba_session(wcn, sta, tid, ssn, 0,
get_sta_index(vif, sta_priv));
if (session < 0) {
ret = session;
goto out;
}
wcn36xx_smd_add_ba(wcn, session);
break;
case IEEE80211_AMPDU_RX_STOP:
wcn36xx_smd_del_ba(wcn, tid, 0, get_sta_index(vif, sta_priv));
break;
case IEEE80211_AMPDU_TX_START:
spin_lock_bh(&sta_priv->ampdu_lock);
sta_priv->ampdu_state[tid] = WCN36XX_AMPDU_START;
spin_unlock_bh(&sta_priv->ampdu_lock);
/* Replace the mac80211 ssn with the firmware one */
wcn36xx_dbg(WCN36XX_DBG_MAC, "mac ampdu ssn = %u\n", *ssn);
wcn36xx_smd_trigger_ba(wcn, get_sta_index(vif, sta_priv), tid, ssn);
wcn36xx_dbg(WCN36XX_DBG_MAC, "mac ampdu fw-ssn = %u\n", *ssn);
/* Start BA session */
session = wcn36xx_smd_add_ba_session(wcn, sta, tid, ssn, 1,
get_sta_index(vif, sta_priv));
if (session < 0) {
ret = session;
goto out;
}
ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
break;
case IEEE80211_AMPDU_TX_OPERATIONAL:
spin_lock_bh(&sta_priv->ampdu_lock);
sta_priv->ampdu_state[tid] = WCN36XX_AMPDU_OPERATIONAL;
spin_unlock_bh(&sta_priv->ampdu_lock);
break;
case IEEE80211_AMPDU_TX_STOP_FLUSH:
case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
case IEEE80211_AMPDU_TX_STOP_CONT:
spin_lock_bh(&sta_priv->ampdu_lock);
sta_priv->ampdu_state[tid] = WCN36XX_AMPDU_NONE;
spin_unlock_bh(&sta_priv->ampdu_lock);
wcn36xx_smd_del_ba(wcn, tid, 1, get_sta_index(vif, sta_priv));
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
default:
wcn36xx_err("Unknown AMPDU action\n");
}
out:
mutex_unlock(&wcn->conf_mutex);
return ret;
}
#if IS_ENABLED(CONFIG_IPV6)
static void wcn36xx_ipv6_addr_change(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct inet6_dev *idev)
{
struct wcn36xx_vif *vif_priv = wcn36xx_vif_to_priv(vif);
struct inet6_ifaddr *ifa;
int idx = 0;
memset(vif_priv->tentative_addrs, 0, sizeof(vif_priv->tentative_addrs));
read_lock_bh(&idev->lock);
list_for_each_entry(ifa, &idev->addr_list, if_list) {
vif_priv->target_ipv6_addrs[idx] = ifa->addr;
if (ifa->flags & IFA_F_TENTATIVE)
__set_bit(idx, vif_priv->tentative_addrs);
idx++;
if (idx >= WCN36XX_HAL_IPV6_OFFLOAD_ADDR_MAX)
break;
wcn36xx_dbg(WCN36XX_DBG_MAC, "%pI6 %s\n", &ifa->addr,
(ifa->flags & IFA_F_TENTATIVE) ? "tentative" : NULL);
}
read_unlock_bh(&idev->lock);
vif_priv->num_target_ipv6_addrs = idx;
}
#endif
static void wcn36xx_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 queues, bool drop)
{
struct wcn36xx *wcn = hw->priv;
if (wcn36xx_dxe_tx_flush(wcn)) {
wcn36xx_err("Failed to flush hardware tx queues\n");
}
}
static int wcn36xx_get_survey(struct ieee80211_hw *hw, int idx,
struct survey_info *survey)
{
struct wcn36xx *wcn = hw->priv;
struct ieee80211_supported_band *sband;
struct wcn36xx_chan_survey *chan_survey;
int band_idx;
unsigned long flags;
sband = wcn->hw->wiphy->bands[NL80211_BAND_2GHZ];
band_idx = idx;
if (band_idx >= sband->n_channels) {
band_idx -= sband->n_channels;
sband = wcn->hw->wiphy->bands[NL80211_BAND_5GHZ];
}
if (!sband || band_idx >= sband->n_channels)
return -ENOENT;
spin_lock_irqsave(&wcn->survey_lock, flags);
chan_survey = &wcn->chan_survey[idx];
survey->channel = &sband->channels[band_idx];
survey->noise = chan_survey->rssi - chan_survey->snr;
survey->filled = 0;
if (chan_survey->rssi > -100 && chan_survey->rssi < 0)
survey->filled |= SURVEY_INFO_NOISE_DBM;
if (survey->channel == wcn->channel)
survey->filled |= SURVEY_INFO_IN_USE;
spin_unlock_irqrestore(&wcn->survey_lock, flags);
wcn36xx_dbg(WCN36XX_DBG_MAC,
"ch %d rssi %d snr %d noise %d filled %x freq %d\n",
HW_VALUE_CHANNEL(survey->channel->hw_value),
chan_survey->rssi, chan_survey->snr, survey->noise,
survey->filled, survey->channel->center_freq);
return 0;
}
static void wcn36xx_sta_statistics(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct station_info *sinfo)
{
struct wcn36xx *wcn;
u8 sta_index;
int status;
wcn = hw->priv;
sta_index = get_sta_index(vif, wcn36xx_sta_to_priv(sta));
status = wcn36xx_smd_get_stats(wcn, sta_index, HAL_GLOBAL_CLASS_A_STATS_INFO, sinfo);
if (status)
wcn36xx_err("wcn36xx_smd_get_stats failed\n");
}
static const struct ieee80211_ops wcn36xx_ops = {
.start = wcn36xx_start,
.stop = wcn36xx_stop,
.add_interface = wcn36xx_add_interface,
.remove_interface = wcn36xx_remove_interface,
#ifdef CONFIG_PM
.suspend = wcn36xx_suspend,
.resume = wcn36xx_resume,
.set_rekey_data = wcn36xx_set_rekey_data,
#endif
.config = wcn36xx_config,
.prepare_multicast = wcn36xx_prepare_multicast,
.configure_filter = wcn36xx_configure_filter,
.tx = wcn36xx_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.set_key = wcn36xx_set_key,
.hw_scan = wcn36xx_hw_scan,
.cancel_hw_scan = wcn36xx_cancel_hw_scan,
.sw_scan_start = wcn36xx_sw_scan_start,
.sw_scan_complete = wcn36xx_sw_scan_complete,
.bss_info_changed = wcn36xx_bss_info_changed,
.set_rts_threshold = wcn36xx_set_rts_threshold,
.sta_add = wcn36xx_sta_add,
.sta_remove = wcn36xx_sta_remove,
.sta_statistics = wcn36xx_sta_statistics,
.ampdu_action = wcn36xx_ampdu_action,
#if IS_ENABLED(CONFIG_IPV6)
.ipv6_addr_change = wcn36xx_ipv6_addr_change,
#endif
.flush = wcn36xx_flush,
.get_survey = wcn36xx_get_survey,
CFG80211_TESTMODE_CMD(wcn36xx_tm_cmd)
};
static void
wcn36xx_set_ieee80211_vht_caps(struct ieee80211_sta_vht_cap *vht_cap)
{
vht_cap->vht_supported = true;
vht_cap->cap = (IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895 |
IEEE80211_VHT_CAP_SHORT_GI_80 |
IEEE80211_VHT_CAP_RXSTBC_1 |
IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE |
3 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT |
7 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT);
vht_cap->vht_mcs.rx_mcs_map =
cpu_to_le16(IEEE80211_VHT_MCS_SUPPORT_0_9 |
IEEE80211_VHT_MCS_NOT_SUPPORTED << 2 |
IEEE80211_VHT_MCS_NOT_SUPPORTED << 4 |
IEEE80211_VHT_MCS_NOT_SUPPORTED << 6 |
IEEE80211_VHT_MCS_NOT_SUPPORTED << 8 |
IEEE80211_VHT_MCS_NOT_SUPPORTED << 10 |
IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 |
IEEE80211_VHT_MCS_NOT_SUPPORTED << 14);
vht_cap->vht_mcs.rx_highest = cpu_to_le16(433);
vht_cap->vht_mcs.tx_highest = vht_cap->vht_mcs.rx_highest;
vht_cap->vht_mcs.tx_mcs_map = vht_cap->vht_mcs.rx_mcs_map;
}
static int wcn36xx_init_ieee80211(struct wcn36xx *wcn)
{
static const u32 cipher_suites[] = {
WLAN_CIPHER_SUITE_WEP40,
WLAN_CIPHER_SUITE_WEP104,
WLAN_CIPHER_SUITE_TKIP,
WLAN_CIPHER_SUITE_CCMP,
};
ieee80211_hw_set(wcn->hw, TIMING_BEACON_ONLY);
ieee80211_hw_set(wcn->hw, AMPDU_AGGREGATION);
ieee80211_hw_set(wcn->hw, SUPPORTS_PS);
ieee80211_hw_set(wcn->hw, SIGNAL_DBM);
ieee80211_hw_set(wcn->hw, HAS_RATE_CONTROL);
ieee80211_hw_set(wcn->hw, SINGLE_SCAN_ON_ALL_BANDS);
ieee80211_hw_set(wcn->hw, REPORTS_TX_ACK_STATUS);
wcn->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_ADHOC) |
BIT(NL80211_IFTYPE_MESH_POINT);
wcn->hw->wiphy->bands[NL80211_BAND_2GHZ] = &wcn_band_2ghz;
if (wcn->rf_id != RF_IRIS_WCN3620)
wcn->hw->wiphy->bands[NL80211_BAND_5GHZ] = &wcn_band_5ghz;
if (wcn->rf_id == RF_IRIS_WCN3680)
wcn36xx_set_ieee80211_vht_caps(&wcn_band_5ghz.vht_cap);
wcn->hw->wiphy->max_scan_ssids = WCN36XX_MAX_SCAN_SSIDS;
wcn->hw->wiphy->max_scan_ie_len = WCN36XX_MAX_SCAN_IE_LEN;
wcn->hw->wiphy->cipher_suites = cipher_suites;
wcn->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
#ifdef CONFIG_PM
wcn->hw->wiphy->wowlan = &wowlan_support;
#endif
wcn->hw->max_listen_interval = 200;
wcn->hw->queues = 4;
SET_IEEE80211_DEV(wcn->hw, wcn->dev);
wcn->hw->sta_data_size = sizeof(struct wcn36xx_sta);
wcn->hw->vif_data_size = sizeof(struct wcn36xx_vif);
wiphy_ext_feature_set(wcn->hw->wiphy,
NL80211_EXT_FEATURE_CQM_RSSI_LIST);
return 0;
}
static int wcn36xx_platform_get_resources(struct wcn36xx *wcn,
struct platform_device *pdev)
{
struct device_node *mmio_node;
struct device_node *iris_node;
int index;
int ret;
/* Set TX IRQ */
ret = platform_get_irq_byname(pdev, "tx");
if (ret < 0)
return ret;
wcn->tx_irq = ret;
/* Set RX IRQ */
ret = platform_get_irq_byname(pdev, "rx");
if (ret < 0)
return ret;
wcn->rx_irq = ret;
/* Acquire SMSM tx enable handle */
wcn->tx_enable_state = qcom_smem_state_get(&pdev->dev,
"tx-enable", &wcn->tx_enable_state_bit);
if (IS_ERR(wcn->tx_enable_state)) {
wcn36xx_err("failed to get tx-enable state\n");
return PTR_ERR(wcn->tx_enable_state);
}
/* Acquire SMSM tx rings empty handle */
wcn->tx_rings_empty_state = qcom_smem_state_get(&pdev->dev,
"tx-rings-empty", &wcn->tx_rings_empty_state_bit);
if (IS_ERR(wcn->tx_rings_empty_state)) {
wcn36xx_err("failed to get tx-rings-empty state\n");
return PTR_ERR(wcn->tx_rings_empty_state);
}
mmio_node = of_parse_phandle(pdev->dev.parent->of_node, "qcom,mmio", 0);
if (!mmio_node) {
wcn36xx_err("failed to acquire qcom,mmio reference\n");
return -EINVAL;
}
wcn->is_pronto = !!of_device_is_compatible(mmio_node, "qcom,pronto");
wcn->is_pronto_v3 = !!of_device_is_compatible(mmio_node, "qcom,pronto-v3-pil");
/* Map the CCU memory */
index = of_property_match_string(mmio_node, "reg-names", "ccu");
wcn->ccu_base = of_iomap(mmio_node, index);
if (!wcn->ccu_base) {
wcn36xx_err("failed to map ccu memory\n");
ret = -ENOMEM;
goto put_mmio_node;
}
/* Map the DXE memory */
index = of_property_match_string(mmio_node, "reg-names", "dxe");
wcn->dxe_base = of_iomap(mmio_node, index);
if (!wcn->dxe_base) {
wcn36xx_err("failed to map dxe memory\n");
ret = -ENOMEM;
goto unmap_ccu;
}
/* External RF module */
iris_node = of_get_child_by_name(mmio_node, "iris");
if (iris_node) {
if (of_device_is_compatible(iris_node, "qcom,wcn3620"))
wcn->rf_id = RF_IRIS_WCN3620;
if (of_device_is_compatible(iris_node, "qcom,wcn3660") ||
of_device_is_compatible(iris_node, "qcom,wcn3660b"))
wcn->rf_id = RF_IRIS_WCN3660;
if (of_device_is_compatible(iris_node, "qcom,wcn3680"))
wcn->rf_id = RF_IRIS_WCN3680;
of_node_put(iris_node);
}
of_node_put(mmio_node);
return 0;
unmap_ccu:
iounmap(wcn->ccu_base);
put_mmio_node:
of_node_put(mmio_node);
return ret;
}
static int wcn36xx_probe(struct platform_device *pdev)
{
struct ieee80211_hw *hw;
struct wcn36xx *wcn;
void *wcnss;
int ret;
const u8 *addr;
int n_channels;
wcn36xx_dbg(WCN36XX_DBG_MAC, "platform probe\n");
wcnss = dev_get_drvdata(pdev->dev.parent);
hw = ieee80211_alloc_hw(sizeof(struct wcn36xx), &wcn36xx_ops);
if (!hw) {
wcn36xx_err("failed to alloc hw\n");
ret = -ENOMEM;
goto out_err;
}
platform_set_drvdata(pdev, hw);
wcn = hw->priv;
wcn->hw = hw;
wcn->dev = &pdev->dev;
wcn->first_boot = true;
mutex_init(&wcn->conf_mutex);
mutex_init(&wcn->hal_mutex);
mutex_init(&wcn->scan_lock);
__skb_queue_head_init(&wcn->amsdu);
wcn->hal_buf = devm_kmalloc(wcn->dev, WCN36XX_HAL_BUF_SIZE, GFP_KERNEL);
if (!wcn->hal_buf) {
ret = -ENOMEM;
goto out_wq;
}
n_channels = wcn_band_2ghz.n_channels + wcn_band_5ghz.n_channels;
wcn->chan_survey = devm_kmalloc(wcn->dev, n_channels, GFP_KERNEL);
if (!wcn->chan_survey) {
ret = -ENOMEM;
goto out_wq;
}
ret = dma_set_mask_and_coherent(wcn->dev, DMA_BIT_MASK(32));
if (ret < 0) {
wcn36xx_err("failed to set DMA mask: %d\n", ret);
goto out_wq;
}
wcn->nv_file = WLAN_NV_FILE;
ret = of_property_read_string(wcn->dev->parent->of_node, "firmware-name", &wcn->nv_file);
if (ret < 0 && ret != -EINVAL) {
wcn36xx_err("failed to read \"firmware-name\" property: %d\n", ret);
goto out_wq;
}
wcn->smd_channel = qcom_wcnss_open_channel(wcnss, "WLAN_CTRL", wcn36xx_smd_rsp_process, hw);
if (IS_ERR(wcn->smd_channel)) {
wcn36xx_err("failed to open WLAN_CTRL channel\n");
ret = PTR_ERR(wcn->smd_channel);
goto out_wq;
}
addr = of_get_property(pdev->dev.of_node, "local-mac-address", &ret);
if (addr && ret != ETH_ALEN) {
wcn36xx_err("invalid local-mac-address\n");
ret = -EINVAL;
goto out_destroy_ept;
} else if (addr) {
wcn36xx_info("mac address: %pM\n", addr);
SET_IEEE80211_PERM_ADDR(wcn->hw, addr);
}
ret = wcn36xx_platform_get_resources(wcn, pdev);
if (ret)
goto out_destroy_ept;
wcn36xx_init_ieee80211(wcn);
ret = ieee80211_register_hw(wcn->hw);
if (ret)
goto out_unmap;
return 0;
out_unmap:
iounmap(wcn->ccu_base);
iounmap(wcn->dxe_base);
out_destroy_ept:
rpmsg_destroy_ept(wcn->smd_channel);
out_wq:
ieee80211_free_hw(hw);
out_err:
return ret;
}
static int wcn36xx_remove(struct platform_device *pdev)
{
struct ieee80211_hw *hw = platform_get_drvdata(pdev);
struct wcn36xx *wcn = hw->priv;
wcn36xx_dbg(WCN36XX_DBG_MAC, "platform remove\n");
release_firmware(wcn->nv);
ieee80211_unregister_hw(hw);
qcom_smem_state_put(wcn->tx_enable_state);
qcom_smem_state_put(wcn->tx_rings_empty_state);
rpmsg_destroy_ept(wcn->smd_channel);
iounmap(wcn->dxe_base);
iounmap(wcn->ccu_base);
__skb_queue_purge(&wcn->amsdu);
mutex_destroy(&wcn->hal_mutex);
ieee80211_free_hw(hw);
return 0;
}
static const struct of_device_id wcn36xx_of_match[] = {
{ .compatible = "qcom,wcnss-wlan" },
{}
};
MODULE_DEVICE_TABLE(of, wcn36xx_of_match);
static struct platform_driver wcn36xx_driver = {
.probe = wcn36xx_probe,
.remove = wcn36xx_remove,
.driver = {
.name = "wcn36xx",
.of_match_table = wcn36xx_of_match,
},
};
module_platform_driver(wcn36xx_driver);
MODULE_LICENSE("Dual BSD/GPL");
MODULE_AUTHOR("Eugene Krasnikov [email protected]");
MODULE_FIRMWARE(WLAN_NV_FILE);
|
linux-master
|
drivers/net/wireless/ath/wcn36xx/main.c
|
/*
* Copyright (c) 2013 Eugene Krasnikov <[email protected]>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY
* SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION
* OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
* CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/random.h>
#include "txrx.h"
static inline int get_rssi0(struct wcn36xx_rx_bd *bd)
{
return 100 - ((bd->phy_stat0 >> 24) & 0xff);
}
static inline int get_snr(struct wcn36xx_rx_bd *bd)
{
return ((bd->phy_stat1 >> 24) & 0xff);
}
struct wcn36xx_rate {
u16 bitrate;
u16 mcs_or_legacy_index;
enum mac80211_rx_encoding encoding;
enum mac80211_rx_encoding_flags encoding_flags;
enum rate_info_bw bw;
};
/* Buffer descriptor rx_ch field is limited to 5-bit (4+1), a mapping is used
* for 11A Channels.
*/
static const u8 ab_rx_ch_map[] = { 36, 40, 44, 48, 52, 56, 60, 64, 100, 104,
108, 112, 116, 120, 124, 128, 132, 136, 140,
149, 153, 157, 161, 165, 144 };
static const struct wcn36xx_rate wcn36xx_rate_table[] = {
/* 11b rates */
{ 10, 0, RX_ENC_LEGACY, 0, RATE_INFO_BW_20 },
{ 20, 1, RX_ENC_LEGACY, 0, RATE_INFO_BW_20 },
{ 55, 2, RX_ENC_LEGACY, 0, RATE_INFO_BW_20 },
{ 110, 3, RX_ENC_LEGACY, 0, RATE_INFO_BW_20 },
/* 11b SP (short preamble) */
{ 10, 0, RX_ENC_LEGACY, RX_ENC_FLAG_SHORTPRE, RATE_INFO_BW_20 },
{ 20, 1, RX_ENC_LEGACY, RX_ENC_FLAG_SHORTPRE, RATE_INFO_BW_20 },
{ 55, 2, RX_ENC_LEGACY, RX_ENC_FLAG_SHORTPRE, RATE_INFO_BW_20 },
{ 110, 3, RX_ENC_LEGACY, RX_ENC_FLAG_SHORTPRE, RATE_INFO_BW_20 },
/* 11ag */
{ 60, 4, RX_ENC_LEGACY, 0, RATE_INFO_BW_20 },
{ 90, 5, RX_ENC_LEGACY, 0, RATE_INFO_BW_20 },
{ 120, 6, RX_ENC_LEGACY, 0, RATE_INFO_BW_20 },
{ 180, 7, RX_ENC_LEGACY, 0, RATE_INFO_BW_20 },
{ 240, 8, RX_ENC_LEGACY, 0, RATE_INFO_BW_20 },
{ 360, 9, RX_ENC_LEGACY, 0, RATE_INFO_BW_20 },
{ 480, 10, RX_ENC_LEGACY, 0, RATE_INFO_BW_20 },
{ 540, 11, RX_ENC_LEGACY, 0, RATE_INFO_BW_20 },
/* 11n */
{ 65, 0, RX_ENC_HT, 0, RATE_INFO_BW_20 },
{ 130, 1, RX_ENC_HT, 0, RATE_INFO_BW_20 },
{ 195, 2, RX_ENC_HT, 0, RATE_INFO_BW_20 },
{ 260, 3, RX_ENC_HT, 0, RATE_INFO_BW_20 },
{ 390, 4, RX_ENC_HT, 0, RATE_INFO_BW_20 },
{ 520, 5, RX_ENC_HT, 0, RATE_INFO_BW_20 },
{ 585, 6, RX_ENC_HT, 0, RATE_INFO_BW_20 },
{ 650, 7, RX_ENC_HT, 0, RATE_INFO_BW_20 },
/* 11n SGI */
{ 72, 0, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_20 },
{ 144, 1, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_20 },
{ 217, 2, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_20 },
{ 289, 3, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_20 },
{ 434, 4, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_20 },
{ 578, 5, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_20 },
{ 650, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_20 },
{ 722, 7, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_20 },
/* 11n GF (greenfield) */
{ 65, 0, RX_ENC_HT, RX_ENC_FLAG_HT_GF, RATE_INFO_BW_20 },
{ 130, 1, RX_ENC_HT, RX_ENC_FLAG_HT_GF, RATE_INFO_BW_20 },
{ 195, 2, RX_ENC_HT, RX_ENC_FLAG_HT_GF, RATE_INFO_BW_20 },
{ 260, 3, RX_ENC_HT, RX_ENC_FLAG_HT_GF, RATE_INFO_BW_20 },
{ 390, 4, RX_ENC_HT, RX_ENC_FLAG_HT_GF, RATE_INFO_BW_20 },
{ 520, 5, RX_ENC_HT, RX_ENC_FLAG_HT_GF, RATE_INFO_BW_20 },
{ 585, 6, RX_ENC_HT, RX_ENC_FLAG_HT_GF, RATE_INFO_BW_20 },
{ 650, 7, RX_ENC_HT, RX_ENC_FLAG_HT_GF, RATE_INFO_BW_20 },
/* 11n CB (channel bonding) */
{ 135, 0, RX_ENC_HT, 0, RATE_INFO_BW_40 },
{ 270, 1, RX_ENC_HT, 0, RATE_INFO_BW_40 },
{ 405, 2, RX_ENC_HT, 0, RATE_INFO_BW_40 },
{ 540, 3, RX_ENC_HT, 0, RATE_INFO_BW_40 },
{ 810, 4, RX_ENC_HT, 0, RATE_INFO_BW_40 },
{ 1080, 5, RX_ENC_HT, 0, RATE_INFO_BW_40 },
{ 1215, 6, RX_ENC_HT, 0, RATE_INFO_BW_40 },
{ 1350, 7, RX_ENC_HT, 0, RATE_INFO_BW_40 },
/* 11n CB + SGI */
{ 150, 0, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
{ 300, 1, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
{ 450, 2, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
{ 600, 3, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
{ 900, 4, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
{ 1200, 5, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
{ 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
{ 1500, 7, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
/* 11n GF + CB */
{ 135, 0, RX_ENC_HT, RX_ENC_FLAG_HT_GF, RATE_INFO_BW_40 },
{ 270, 1, RX_ENC_HT, RX_ENC_FLAG_HT_GF, RATE_INFO_BW_40 },
{ 405, 2, RX_ENC_HT, RX_ENC_FLAG_HT_GF, RATE_INFO_BW_40 },
{ 540, 3, RX_ENC_HT, RX_ENC_FLAG_HT_GF, RATE_INFO_BW_40 },
{ 810, 4, RX_ENC_HT, RX_ENC_FLAG_HT_GF, RATE_INFO_BW_40 },
{ 1080, 5, RX_ENC_HT, RX_ENC_FLAG_HT_GF, RATE_INFO_BW_40 },
{ 1215, 6, RX_ENC_HT, RX_ENC_FLAG_HT_GF, RATE_INFO_BW_40 },
{ 1350, 7, RX_ENC_HT, RX_ENC_FLAG_HT_GF, RATE_INFO_BW_40 },
/* 11ac reserved indices */
{ 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
{ 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
/* 11ac 20 MHz 800ns GI MCS 0-8 */
{ 65, 0, RX_ENC_HT, 0, RATE_INFO_BW_20 },
{ 130, 1, RX_ENC_HT, 0, RATE_INFO_BW_20 },
{ 195, 2, RX_ENC_HT, 0, RATE_INFO_BW_20 },
{ 260, 3, RX_ENC_HT, 0, RATE_INFO_BW_20 },
{ 390, 4, RX_ENC_HT, 0, RATE_INFO_BW_20 },
{ 520, 5, RX_ENC_HT, 0, RATE_INFO_BW_20 },
{ 585, 6, RX_ENC_HT, 0, RATE_INFO_BW_20 },
{ 650, 7, RX_ENC_HT, 0, RATE_INFO_BW_20 },
{ 780, 8, RX_ENC_HT, 0, RATE_INFO_BW_20 },
/* 11ac reserved indices */
{ 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
{ 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
{ 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
{ 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
{ 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
{ 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
{ 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
{ 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
{ 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
/* 11ac 20 MHz 400ns SGI MCS 6-8 */
{ 655, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_20 },
{ 722, 7, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_20 },
{ 866, 8, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_20 },
/* 11ac reserved indices */
{ 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
{ 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
{ 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
/* 11ac 40 MHz 800ns GI MCS 0-9 */
{ 135, 0, RX_ENC_HT, 0, RATE_INFO_BW_40 },
{ 270, 1, RX_ENC_HT, 0, RATE_INFO_BW_40 },
{ 405, 2, RX_ENC_HT, 0, RATE_INFO_BW_40 },
{ 540, 3, RX_ENC_HT, 0, RATE_INFO_BW_40 },
{ 810, 4, RX_ENC_HT, 0, RATE_INFO_BW_40 },
{ 1080, 5, RX_ENC_HT, 0, RATE_INFO_BW_40 },
{ 1215, 6, RX_ENC_HT, 0, RATE_INFO_BW_40 },
{ 1350, 7, RX_ENC_HT, 0, RATE_INFO_BW_40 },
{ 1350, 7, RX_ENC_HT, 0, RATE_INFO_BW_40 },
{ 1620, 8, RX_ENC_HT, 0, RATE_INFO_BW_40 },
{ 1800, 9, RX_ENC_HT, 0, RATE_INFO_BW_40 },
/* 11ac reserved indices */
{ 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
{ 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
{ 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
{ 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
{ 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
{ 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
/* 11ac 40 MHz 400ns SGI MCS 5-7 */
{ 1200, 5, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
{ 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
{ 1500, 7, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
/* 11ac reserved index */
{ 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
/* 11ac 40 MHz 400ns SGI MCS 5-7 */
{ 1800, 8, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
{ 2000, 9, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
/* 11ac reserved index */
{ 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
/* 11ac 80 MHz 800ns GI MCS 0-7 */
{ 292, 0, RX_ENC_HT, 0, RATE_INFO_BW_80},
{ 585, 1, RX_ENC_HT, 0, RATE_INFO_BW_80},
{ 877, 2, RX_ENC_HT, 0, RATE_INFO_BW_80},
{ 1170, 3, RX_ENC_HT, 0, RATE_INFO_BW_80},
{ 1755, 4, RX_ENC_HT, 0, RATE_INFO_BW_80},
{ 2340, 5, RX_ENC_HT, 0, RATE_INFO_BW_80},
{ 2632, 6, RX_ENC_HT, 0, RATE_INFO_BW_80},
{ 2925, 7, RX_ENC_HT, 0, RATE_INFO_BW_80},
/* 11 ac reserved index */
{ 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
/* 11ac 80 MHz 800 ns GI MCS 8-9 */
{ 3510, 8, RX_ENC_HT, 0, RATE_INFO_BW_80},
{ 3900, 9, RX_ENC_HT, 0, RATE_INFO_BW_80},
/* 11 ac reserved indices */
{ 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
{ 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
{ 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
{ 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
{ 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
{ 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
{ 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
/* 11ac 80 MHz 400 ns SGI MCS 6-7 */
{ 2925, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_80 },
{ 3250, 7, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_80 },
/* 11ac reserved index */
{ 1350, 6, RX_ENC_HT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_40 },
/* 11ac 80 MHz 400ns SGI MCS 8-9 */
{ 3900, 8, RX_ENC_VHT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_80 },
{ 4333, 9, RX_ENC_VHT, RX_ENC_FLAG_SHORT_GI, RATE_INFO_BW_80 },
};
static struct sk_buff *wcn36xx_unchain_msdu(struct sk_buff_head *amsdu)
{
struct sk_buff *skb, *first;
int total_len = 0;
int space;
first = __skb_dequeue(amsdu);
skb_queue_walk(amsdu, skb)
total_len += skb->len;
space = total_len - skb_tailroom(first);
if (space > 0 && pskb_expand_head(first, 0, space, GFP_ATOMIC) < 0) {
__skb_queue_head(amsdu, first);
return NULL;
}
/* Walk list again, copying contents into msdu_head */
while ((skb = __skb_dequeue(amsdu))) {
skb_copy_from_linear_data(skb, skb_put(first, skb->len),
skb->len);
dev_kfree_skb_irq(skb);
}
return first;
}
static void __skb_queue_purge_irq(struct sk_buff_head *list)
{
struct sk_buff *skb;
while ((skb = __skb_dequeue(list)) != NULL)
dev_kfree_skb_irq(skb);
}
static void wcn36xx_update_survey(struct wcn36xx *wcn, int rssi, int snr,
int band, int freq)
{
static struct ieee80211_channel *channel;
struct ieee80211_supported_band *sband;
int idx;
int i;
u8 snr_sample = snr & 0xff;
idx = 0;
if (band == NL80211_BAND_5GHZ)
idx = wcn->hw->wiphy->bands[NL80211_BAND_2GHZ]->n_channels;
sband = wcn->hw->wiphy->bands[band];
channel = sband->channels;
for (i = 0; i < sband->n_channels; i++, channel++) {
if (channel->center_freq == freq) {
idx += i;
break;
}
}
spin_lock(&wcn->survey_lock);
wcn->chan_survey[idx].rssi = rssi;
wcn->chan_survey[idx].snr = snr;
spin_unlock(&wcn->survey_lock);
add_device_randomness(&snr_sample, sizeof(snr_sample));
}
int wcn36xx_rx_skb(struct wcn36xx *wcn, struct sk_buff *skb)
{
struct ieee80211_rx_status status;
const struct wcn36xx_rate *rate;
struct ieee80211_hdr *hdr;
struct wcn36xx_rx_bd *bd;
u16 fc, sn;
/*
* All fields must be 0, otherwise it can lead to
* unexpected consequences.
*/
memset(&status, 0, sizeof(status));
bd = (struct wcn36xx_rx_bd *)skb->data;
buff_to_be((u32 *)bd, sizeof(*bd)/sizeof(u32));
wcn36xx_dbg_dump(WCN36XX_DBG_RX_DUMP,
"BD <<< ", (char *)bd,
sizeof(struct wcn36xx_rx_bd));
if (bd->pdu.mpdu_data_off <= bd->pdu.mpdu_header_off ||
bd->pdu.mpdu_len < bd->pdu.mpdu_header_len)
goto drop;
if (bd->asf && !bd->esf) { /* chained A-MSDU chunks */
/* Sanity check */
if (bd->pdu.mpdu_data_off + bd->pdu.mpdu_len > WCN36XX_PKT_SIZE)
goto drop;
skb_put(skb, bd->pdu.mpdu_data_off + bd->pdu.mpdu_len);
skb_pull(skb, bd->pdu.mpdu_data_off);
/* Only set status for first chained BD (with mac header) */
goto done;
}
if (bd->pdu.mpdu_header_off < sizeof(*bd) ||
bd->pdu.mpdu_header_off + bd->pdu.mpdu_len > WCN36XX_PKT_SIZE)
goto drop;
skb_put(skb, bd->pdu.mpdu_header_off + bd->pdu.mpdu_len);
skb_pull(skb, bd->pdu.mpdu_header_off);
hdr = (struct ieee80211_hdr *) skb->data;
fc = __le16_to_cpu(hdr->frame_control);
sn = IEEE80211_SEQ_TO_SN(__le16_to_cpu(hdr->seq_ctrl));
status.mactime = 10;
status.signal = -get_rssi0(bd);
status.antenna = 1;
status.flag = 0;
status.rx_flags = 0;
status.flag |= RX_FLAG_IV_STRIPPED |
RX_FLAG_MMIC_STRIPPED |
RX_FLAG_DECRYPTED;
wcn36xx_dbg(WCN36XX_DBG_RX, "status.flags=%x\n", status.flag);
if (bd->scan_learn) {
/* If packet originate from hardware scanning, extract the
* band/channel from bd descriptor.
*/
u8 hwch = (bd->reserved0 << 4) + bd->rx_ch;
if (bd->rf_band != 1 && hwch <= sizeof(ab_rx_ch_map) && hwch >= 1) {
status.band = NL80211_BAND_5GHZ;
status.freq = ieee80211_channel_to_frequency(ab_rx_ch_map[hwch - 1],
status.band);
} else {
status.band = NL80211_BAND_2GHZ;
status.freq = ieee80211_channel_to_frequency(hwch, status.band);
}
} else {
status.band = WCN36XX_BAND(wcn);
status.freq = WCN36XX_CENTER_FREQ(wcn);
}
wcn36xx_update_survey(wcn, status.signal, get_snr(bd),
status.band, status.freq);
if (bd->rate_id < ARRAY_SIZE(wcn36xx_rate_table)) {
rate = &wcn36xx_rate_table[bd->rate_id];
status.encoding = rate->encoding;
status.enc_flags = rate->encoding_flags;
status.bw = rate->bw;
status.rate_idx = rate->mcs_or_legacy_index;
status.nss = 1;
if (status.band == NL80211_BAND_5GHZ &&
status.encoding == RX_ENC_LEGACY &&
status.rate_idx >= 4) {
/* no dsss rates in 5Ghz rates table */
status.rate_idx -= 4;
}
} else {
status.encoding = 0;
status.bw = 0;
status.enc_flags = 0;
status.rate_idx = 0;
}
if (ieee80211_is_beacon(hdr->frame_control) ||
ieee80211_is_probe_resp(hdr->frame_control))
status.boottime_ns = ktime_get_boottime_ns();
memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
if (ieee80211_is_beacon(hdr->frame_control)) {
wcn36xx_dbg(WCN36XX_DBG_BEACON, "beacon skb %p len %d fc %04x sn %d\n",
skb, skb->len, fc, sn);
wcn36xx_dbg_dump(WCN36XX_DBG_BEACON_DUMP, "SKB <<< ",
(char *)skb->data, skb->len);
} else {
wcn36xx_dbg(WCN36XX_DBG_RX, "rx skb %p len %d fc %04x sn %d\n",
skb, skb->len, fc, sn);
wcn36xx_dbg_dump(WCN36XX_DBG_RX_DUMP, "SKB <<< ",
(char *)skb->data, skb->len);
}
done:
/* Chained AMSDU ? slow path */
if (unlikely(bd->asf && !(bd->lsf && bd->esf))) {
if (bd->esf && !skb_queue_empty(&wcn->amsdu)) {
wcn36xx_err("Discarding non complete chain");
__skb_queue_purge_irq(&wcn->amsdu);
}
__skb_queue_tail(&wcn->amsdu, skb);
if (!bd->lsf)
return 0; /* Not the last AMSDU, wait for more */
skb = wcn36xx_unchain_msdu(&wcn->amsdu);
if (!skb)
goto drop;
}
ieee80211_rx_irqsafe(wcn->hw, skb);
return 0;
drop: /* drop everything */
wcn36xx_err("Drop frame! skb:%p len:%u hoff:%u doff:%u asf=%u esf=%u lsf=%u\n",
skb, bd->pdu.mpdu_len, bd->pdu.mpdu_header_off,
bd->pdu.mpdu_data_off, bd->asf, bd->esf, bd->lsf);
dev_kfree_skb_irq(skb);
__skb_queue_purge_irq(&wcn->amsdu);
return -EINVAL;
}
static void wcn36xx_set_tx_pdu(struct wcn36xx_tx_bd *bd,
u32 mpdu_header_len,
u32 len,
u16 tid)
{
bd->pdu.mpdu_header_len = mpdu_header_len;
bd->pdu.mpdu_header_off = sizeof(*bd);
bd->pdu.mpdu_data_off = bd->pdu.mpdu_header_len +
bd->pdu.mpdu_header_off;
bd->pdu.mpdu_len = len;
bd->pdu.tid = tid;
}
static inline struct wcn36xx_vif *get_vif_by_addr(struct wcn36xx *wcn,
u8 *addr)
{
struct wcn36xx_vif *vif_priv = NULL;
struct ieee80211_vif *vif = NULL;
list_for_each_entry(vif_priv, &wcn->vif_list, list) {
vif = wcn36xx_priv_to_vif(vif_priv);
if (memcmp(vif->addr, addr, ETH_ALEN) == 0)
return vif_priv;
}
wcn36xx_warn("vif %pM not found\n", addr);
return NULL;
}
static void wcn36xx_tx_start_ampdu(struct wcn36xx *wcn,
struct wcn36xx_sta *sta_priv,
struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_sta *sta;
u8 *qc, tid;
if (!conf_is_ht(&wcn->hw->conf))
return;
sta = wcn36xx_priv_to_sta(sta_priv);
if (WARN_ON(!ieee80211_is_data_qos(hdr->frame_control)))
return;
if (skb_get_queue_mapping(skb) == IEEE80211_AC_VO)
return;
qc = ieee80211_get_qos_ctl(hdr);
tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
spin_lock(&sta_priv->ampdu_lock);
if (sta_priv->ampdu_state[tid] != WCN36XX_AMPDU_NONE)
goto out_unlock;
if (sta_priv->non_agg_frame_ct++ >= WCN36XX_AMPDU_START_THRESH) {
sta_priv->ampdu_state[tid] = WCN36XX_AMPDU_START;
sta_priv->non_agg_frame_ct = 0;
ieee80211_start_tx_ba_session(sta, tid, 0);
}
out_unlock:
spin_unlock(&sta_priv->ampdu_lock);
}
static void wcn36xx_set_tx_data(struct wcn36xx_tx_bd *bd,
struct wcn36xx *wcn,
struct wcn36xx_vif **vif_priv,
struct wcn36xx_sta *sta_priv,
struct sk_buff *skb,
bool bcast)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_vif *vif = NULL;
struct wcn36xx_vif *__vif_priv = NULL;
bool is_data_qos = ieee80211_is_data_qos(hdr->frame_control);
u16 tid = 0;
bd->bd_rate = WCN36XX_BD_RATE_DATA;
/*
* For not unicast frames mac80211 will not set sta pointer so use
* self_sta_index instead.
*/
if (sta_priv) {
__vif_priv = sta_priv->vif;
vif = wcn36xx_priv_to_vif(__vif_priv);
bd->dpu_sign = sta_priv->ucast_dpu_sign;
if (vif->type == NL80211_IFTYPE_STATION) {
bd->sta_index = sta_priv->bss_sta_index;
bd->dpu_desc_idx = sta_priv->bss_dpu_desc_index;
} else if (vif->type == NL80211_IFTYPE_AP ||
vif->type == NL80211_IFTYPE_ADHOC ||
vif->type == NL80211_IFTYPE_MESH_POINT) {
bd->sta_index = sta_priv->sta_index;
bd->dpu_desc_idx = sta_priv->dpu_desc_index;
}
} else {
__vif_priv = get_vif_by_addr(wcn, hdr->addr2);
bd->sta_index = __vif_priv->self_sta_index;
bd->dpu_desc_idx = __vif_priv->self_dpu_desc_index;
bd->dpu_sign = __vif_priv->self_ucast_dpu_sign;
}
if (is_data_qos) {
tid = ieee80211_get_tid(hdr);
/* TID->QID is one-to-one mapping */
bd->queue_id = tid;
bd->pdu.bd_ssn = WCN36XX_TXBD_SSN_FILL_DPU_QOS;
} else {
bd->pdu.bd_ssn = WCN36XX_TXBD_SSN_FILL_DPU_NON_QOS;
}
if (info->flags & IEEE80211_TX_INTFL_DONT_ENCRYPT ||
(sta_priv && !sta_priv->is_data_encrypted)) {
bd->dpu_ne = 1;
}
if (ieee80211_is_any_nullfunc(hdr->frame_control)) {
/* Don't use a regular queue for null packet (no ampdu) */
bd->queue_id = WCN36XX_TX_U_WQ_ID;
bd->bd_rate = WCN36XX_BD_RATE_CTRL;
if (ieee80211_is_qos_nullfunc(hdr->frame_control))
bd->pdu.bd_ssn = WCN36XX_TXBD_SSN_FILL_HOST;
}
if (bcast) {
bd->ub = 1;
bd->ack_policy = 1;
}
*vif_priv = __vif_priv;
wcn36xx_set_tx_pdu(bd,
is_data_qos ?
sizeof(struct ieee80211_qos_hdr) :
sizeof(struct ieee80211_hdr_3addr),
skb->len, tid);
if (sta_priv && is_data_qos)
wcn36xx_tx_start_ampdu(wcn, sta_priv, skb);
}
static void wcn36xx_set_tx_mgmt(struct wcn36xx_tx_bd *bd,
struct wcn36xx *wcn,
struct wcn36xx_vif **vif_priv,
struct sk_buff *skb,
bool bcast)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct wcn36xx_vif *__vif_priv =
get_vif_by_addr(wcn, hdr->addr2);
bd->sta_index = __vif_priv->self_sta_index;
bd->dpu_desc_idx = __vif_priv->self_dpu_desc_index;
bd->dpu_ne = 1;
/* default rate for unicast */
if (ieee80211_is_mgmt(hdr->frame_control))
bd->bd_rate = (WCN36XX_BAND(wcn) == NL80211_BAND_5GHZ) ?
WCN36XX_BD_RATE_CTRL :
WCN36XX_BD_RATE_MGMT;
else if (ieee80211_is_ctl(hdr->frame_control))
bd->bd_rate = WCN36XX_BD_RATE_CTRL;
else
wcn36xx_warn("frame control type unknown\n");
/*
* In joining state trick hardware that probe is sent as
* unicast even if address is broadcast.
*/
if (__vif_priv->is_joining &&
ieee80211_is_probe_req(hdr->frame_control))
bcast = false;
if (bcast) {
/* broadcast */
bd->ub = 1;
/* No ack needed not unicast */
bd->ack_policy = 1;
bd->queue_id = WCN36XX_TX_B_WQ_ID;
} else
bd->queue_id = WCN36XX_TX_U_WQ_ID;
*vif_priv = __vif_priv;
bd->pdu.bd_ssn = WCN36XX_TXBD_SSN_FILL_DPU_NON_QOS;
wcn36xx_set_tx_pdu(bd,
ieee80211_is_data_qos(hdr->frame_control) ?
sizeof(struct ieee80211_qos_hdr) :
sizeof(struct ieee80211_hdr_3addr),
skb->len, WCN36XX_TID);
}
int wcn36xx_start_tx(struct wcn36xx *wcn,
struct wcn36xx_sta *sta_priv,
struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct wcn36xx_vif *vif_priv = NULL;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
bool is_low = ieee80211_is_data(hdr->frame_control);
bool bcast = is_broadcast_ether_addr(hdr->addr1) ||
is_multicast_ether_addr(hdr->addr1);
bool ack_ind = (info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS) &&
!(info->flags & IEEE80211_TX_CTL_NO_ACK);
struct wcn36xx_tx_bd bd;
int ret;
memset(&bd, 0, sizeof(bd));
wcn36xx_dbg(WCN36XX_DBG_TX,
"tx skb %p len %d fc %04x sn %d %s %s\n",
skb, skb->len, __le16_to_cpu(hdr->frame_control),
IEEE80211_SEQ_TO_SN(__le16_to_cpu(hdr->seq_ctrl)),
is_low ? "low" : "high", bcast ? "bcast" : "ucast");
wcn36xx_dbg_dump(WCN36XX_DBG_TX_DUMP, "", skb->data, skb->len);
bd.dpu_rf = WCN36XX_BMU_WQ_TX;
if (unlikely(ack_ind)) {
wcn36xx_dbg(WCN36XX_DBG_DXE, "TX_ACK status requested\n");
/* Only one at a time is supported by fw. Stop the TX queues
* until the ack status gets back.
*/
ieee80211_stop_queues(wcn->hw);
/* Request ack indication from the firmware */
bd.tx_comp = 1;
}
/* Data frames served first*/
if (is_low)
wcn36xx_set_tx_data(&bd, wcn, &vif_priv, sta_priv, skb, bcast);
else
/* MGMT and CTRL frames are handeld here*/
wcn36xx_set_tx_mgmt(&bd, wcn, &vif_priv, skb, bcast);
buff_to_be((u32 *)&bd, sizeof(bd)/sizeof(u32));
bd.tx_bd_sign = 0xbdbdbdbd;
ret = wcn36xx_dxe_tx_frame(wcn, vif_priv, &bd, skb, is_low);
if (unlikely(ret && ack_ind)) {
/* If the skb has not been transmitted, resume TX queue */
ieee80211_wake_queues(wcn->hw);
}
return ret;
}
void wcn36xx_process_tx_rate(struct ani_global_class_a_stats_info *stats, struct rate_info *info)
{
/* tx_rate is in units of 500kbps; mac80211 wants them in 100kbps */
if (stats->tx_rate_flags & HAL_TX_RATE_LEGACY)
info->legacy = stats->tx_rate * 5;
info->flags = 0;
info->mcs = stats->mcs_index;
info->nss = 1;
if (stats->tx_rate_flags & (HAL_TX_RATE_HT20 | HAL_TX_RATE_HT40))
info->flags |= RATE_INFO_FLAGS_MCS;
if (stats->tx_rate_flags & (HAL_TX_RATE_VHT20 | HAL_TX_RATE_VHT40 | HAL_TX_RATE_VHT80))
info->flags |= RATE_INFO_FLAGS_VHT_MCS;
if (stats->tx_rate_flags & HAL_TX_RATE_SGI)
info->flags |= RATE_INFO_FLAGS_SHORT_GI;
if (stats->tx_rate_flags & (HAL_TX_RATE_HT20 | HAL_TX_RATE_VHT20))
info->bw = RATE_INFO_BW_20;
if (stats->tx_rate_flags & (HAL_TX_RATE_HT40 | HAL_TX_RATE_VHT40))
info->bw = RATE_INFO_BW_40;
if (stats->tx_rate_flags & HAL_TX_RATE_VHT80)
info->bw = RATE_INFO_BW_80;
}
|
linux-master
|
drivers/net/wireless/ath/wcn36xx/txrx.c
|
/*
* Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
* Copyright (c) 2004-2005 Atheros Communications, Inc.
* Copyright (c) 2007 Jiri Slaby <[email protected]>
* Copyright (c) 2009 Bob Copeland <[email protected]>
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
* redistribution must be conditioned upon including a substantially
* similar Disclaimer requirement for further binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
* AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGES.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/pci.h>
#include "ath5k.h"
#define ATH_SDEVICE(subv, subd) \
.vendor = PCI_ANY_ID, .device = PCI_ANY_ID, \
.subvendor = (subv), .subdevice = (subd)
#define ATH_LED(pin, polarity) .driver_data = (((pin) << 8) | (polarity))
#define ATH_PIN(data) ((data) >> 8)
#define ATH_POLARITY(data) ((data) & 0xff)
/* Devices we match on for LED config info (typically laptops) */
static const struct pci_device_id ath5k_led_devices[] = {
/* AR5211 */
{ PCI_VDEVICE(ATHEROS, PCI_DEVICE_ID_ATHEROS_AR5211), ATH_LED(0, 0) },
/* HP Compaq nc6xx, nc4000, nx6000 */
{ ATH_SDEVICE(PCI_VENDOR_ID_COMPAQ, PCI_ANY_ID), ATH_LED(1, 1) },
/* Acer Aspire One A150 ([email protected]) */
{ ATH_SDEVICE(PCI_VENDOR_ID_FOXCONN, 0xe008), ATH_LED(3, 0) },
/* Acer Aspire One AO531h AO751h ([email protected]) */
{ ATH_SDEVICE(PCI_VENDOR_ID_FOXCONN, 0xe00d), ATH_LED(3, 0) },
/* Acer Ferrari 5000 ([email protected]) */
{ ATH_SDEVICE(PCI_VENDOR_ID_AMBIT, 0x0422), ATH_LED(1, 1) },
/* E-machines E510 ([email protected]) */
{ ATH_SDEVICE(PCI_VENDOR_ID_AMBIT, 0x0428), ATH_LED(3, 0) },
/* BenQ Joybook R55v ([email protected]) */
{ ATH_SDEVICE(PCI_VENDOR_ID_QMI, 0x0100), ATH_LED(1, 0) },
/* Acer Extensa 5620z ([email protected]) */
{ ATH_SDEVICE(PCI_VENDOR_ID_QMI, 0x0105), ATH_LED(3, 0) },
/* Fukato Datacask Jupiter 1014a ([email protected]) */
{ ATH_SDEVICE(PCI_VENDOR_ID_AZWAVE, 0x1026), ATH_LED(3, 0) },
/* IBM ThinkPad AR5BXB6 ([email protected]) */
{ ATH_SDEVICE(PCI_VENDOR_ID_IBM, 0x058a), ATH_LED(1, 0) },
/* HP Compaq CQ60-206US ([email protected]) */
{ ATH_SDEVICE(PCI_VENDOR_ID_HP, 0x0137a), ATH_LED(3, 1) },
/* HP Compaq C700 ([email protected]) */
{ ATH_SDEVICE(PCI_VENDOR_ID_HP, 0x0137b), ATH_LED(3, 0) },
/* LiteOn AR5BXB63 ([email protected]) */
{ ATH_SDEVICE(PCI_VENDOR_ID_ATHEROS, 0x3067), ATH_LED(3, 0) },
/* IBM-specific AR5212 (all others) */
{ PCI_VDEVICE(ATHEROS, PCI_DEVICE_ID_ATHEROS_AR5212_IBM), ATH_LED(0, 0) },
/* Dell Vostro A860 ([email protected]) */
{ ATH_SDEVICE(PCI_VENDOR_ID_QMI, 0x0112), ATH_LED(3, 0) },
{ }
};
void ath5k_led_enable(struct ath5k_hw *ah)
{
if (IS_ENABLED(CONFIG_MAC80211_LEDS) &&
test_bit(ATH_STAT_LEDSOFT, ah->status)) {
ath5k_hw_set_gpio_output(ah, ah->led_pin);
ath5k_led_off(ah);
}
}
static void ath5k_led_on(struct ath5k_hw *ah)
{
if (!test_bit(ATH_STAT_LEDSOFT, ah->status))
return;
ath5k_hw_set_gpio(ah, ah->led_pin, ah->led_on);
}
void ath5k_led_off(struct ath5k_hw *ah)
{
if (!IS_ENABLED(CONFIG_MAC80211_LEDS) ||
!test_bit(ATH_STAT_LEDSOFT, ah->status))
return;
ath5k_hw_set_gpio(ah, ah->led_pin, !ah->led_on);
}
static void
ath5k_led_brightness_set(struct led_classdev *led_dev,
enum led_brightness brightness)
{
struct ath5k_led *led = container_of(led_dev, struct ath5k_led,
led_dev);
if (brightness == LED_OFF)
ath5k_led_off(led->ah);
else
ath5k_led_on(led->ah);
}
static int
ath5k_register_led(struct ath5k_hw *ah, struct ath5k_led *led,
const char *name, const char *trigger)
{
int err;
led->ah = ah;
strncpy(led->name, name, sizeof(led->name));
led->name[sizeof(led->name)-1] = 0;
led->led_dev.name = led->name;
led->led_dev.default_trigger = trigger;
led->led_dev.brightness_set = ath5k_led_brightness_set;
err = led_classdev_register(ah->dev, &led->led_dev);
if (err) {
ATH5K_WARN(ah, "could not register LED %s\n", name);
led->ah = NULL;
}
return err;
}
static void
ath5k_unregister_led(struct ath5k_led *led)
{
if (!IS_ENABLED(CONFIG_MAC80211_LEDS) || !led->ah)
return;
led_classdev_unregister(&led->led_dev);
ath5k_led_off(led->ah);
led->ah = NULL;
}
void ath5k_unregister_leds(struct ath5k_hw *ah)
{
ath5k_unregister_led(&ah->rx_led);
ath5k_unregister_led(&ah->tx_led);
}
int ath5k_init_leds(struct ath5k_hw *ah)
{
int ret = 0;
struct ieee80211_hw *hw = ah->hw;
#ifndef CONFIG_ATH5K_AHB
struct pci_dev *pdev = ah->pdev;
#endif
char name[ATH5K_LED_MAX_NAME_LEN + 1];
const struct pci_device_id *match;
if (!IS_ENABLED(CONFIG_MAC80211_LEDS) || !ah->pdev)
return 0;
#ifdef CONFIG_ATH5K_AHB
match = NULL;
#else
match = pci_match_id(&ath5k_led_devices[0], pdev);
#endif
if (match) {
__set_bit(ATH_STAT_LEDSOFT, ah->status);
ah->led_pin = ATH_PIN(match->driver_data);
ah->led_on = ATH_POLARITY(match->driver_data);
}
if (!test_bit(ATH_STAT_LEDSOFT, ah->status))
goto out;
ath5k_led_enable(ah);
snprintf(name, sizeof(name), "ath5k-%s::rx", wiphy_name(hw->wiphy));
ret = ath5k_register_led(ah, &ah->rx_led, name,
ieee80211_get_rx_led_name(hw));
if (ret)
goto out;
snprintf(name, sizeof(name), "ath5k-%s::tx", wiphy_name(hw->wiphy));
ret = ath5k_register_led(ah, &ah->tx_led, name,
ieee80211_get_tx_led_name(hw));
out:
return ret;
}
|
linux-master
|
drivers/net/wireless/ath/ath5k/led.c
|
/*
* Initial register settings functions
*
* Copyright (c) 2004-2007 Reyk Floeter <[email protected]>
* Copyright (c) 2006-2009 Nick Kossifidis <[email protected]>
* Copyright (c) 2007-2008 Jiri Slaby <[email protected]>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include "ath5k.h"
#include "reg.h"
#include "debug.h"
/**
* struct ath5k_ini - Mode-independent initial register writes
* @ini_register: Register address
* @ini_value: Default value
* @ini_mode: 0 to write 1 to read (and clear)
*/
struct ath5k_ini {
u16 ini_register;
u32 ini_value;
enum {
AR5K_INI_WRITE = 0, /* Default */
AR5K_INI_READ = 1,
} ini_mode;
};
/**
* struct ath5k_ini_mode - Mode specific initial register values
* @mode_register: Register address
* @mode_value: Set of values for each enum ath5k_driver_mode
*/
struct ath5k_ini_mode {
u16 mode_register;
u32 mode_value[3];
};
/* Initial register settings for AR5210 */
static const struct ath5k_ini ar5210_ini[] = {
/* PCU and MAC registers */
{ AR5K_NOQCU_TXDP0, 0 },
{ AR5K_NOQCU_TXDP1, 0 },
{ AR5K_RXDP, 0 },
{ AR5K_CR, 0 },
{ AR5K_ISR, 0, AR5K_INI_READ },
{ AR5K_IMR, 0 },
{ AR5K_IER, AR5K_IER_DISABLE },
{ AR5K_BSR, 0, AR5K_INI_READ },
{ AR5K_TXCFG, AR5K_DMASIZE_128B },
{ AR5K_RXCFG, AR5K_DMASIZE_128B },
{ AR5K_CFG, AR5K_INIT_CFG },
{ AR5K_TOPS, 8 },
{ AR5K_RXNOFRM, 8 },
{ AR5K_RPGTO, 0 },
{ AR5K_TXNOFRM, 0 },
{ AR5K_SFR, 0 },
{ AR5K_MIBC, 0 },
{ AR5K_MISC, 0 },
{ AR5K_RX_FILTER_5210, 0 },
{ AR5K_MCAST_FILTER0_5210, 0 },
{ AR5K_MCAST_FILTER1_5210, 0 },
{ AR5K_TX_MASK0, 0 },
{ AR5K_TX_MASK1, 0 },
{ AR5K_CLR_TMASK, 0 },
{ AR5K_TRIG_LVL, AR5K_TUNE_MIN_TX_FIFO_THRES },
{ AR5K_DIAG_SW_5210, 0 },
{ AR5K_RSSI_THR, AR5K_TUNE_RSSI_THRES },
{ AR5K_TSF_L32_5210, 0 },
{ AR5K_TIMER0_5210, 0 },
{ AR5K_TIMER1_5210, 0xffffffff },
{ AR5K_TIMER2_5210, 0xffffffff },
{ AR5K_TIMER3_5210, 1 },
{ AR5K_CFP_DUR_5210, 0 },
{ AR5K_CFP_PERIOD_5210, 0 },
/* PHY registers */
{ AR5K_PHY(0), 0x00000047 },
{ AR5K_PHY_AGC, 0x00000000 },
{ AR5K_PHY(3), 0x09848ea6 },
{ AR5K_PHY(4), 0x3d32e000 },
{ AR5K_PHY(5), 0x0000076b },
{ AR5K_PHY_ACT, AR5K_PHY_ACT_DISABLE },
{ AR5K_PHY(8), 0x02020200 },
{ AR5K_PHY(9), 0x00000e0e },
{ AR5K_PHY(10), 0x0a020201 },
{ AR5K_PHY(11), 0x00036ffc },
{ AR5K_PHY(12), 0x00000000 },
{ AR5K_PHY(13), 0x00000e0e },
{ AR5K_PHY(14), 0x00000007 },
{ AR5K_PHY(15), 0x00020100 },
{ AR5K_PHY(16), 0x89630000 },
{ AR5K_PHY(17), 0x1372169c },
{ AR5K_PHY(18), 0x0018b633 },
{ AR5K_PHY(19), 0x1284613c },
{ AR5K_PHY(20), 0x0de8b8e0 },
{ AR5K_PHY(21), 0x00074859 },
{ AR5K_PHY(22), 0x7e80beba },
{ AR5K_PHY(23), 0x313a665e },
{ AR5K_PHY_AGCCTL, 0x00001d08 },
{ AR5K_PHY(25), 0x0001ce00 },
{ AR5K_PHY(26), 0x409a4190 },
{ AR5K_PHY(28), 0x0000000f },
{ AR5K_PHY(29), 0x00000080 },
{ AR5K_PHY(30), 0x00000004 },
{ AR5K_PHY(31), 0x00000018 }, /* 0x987c */
{ AR5K_PHY(64), 0x00000000 }, /* 0x9900 */
{ AR5K_PHY(65), 0x00000000 },
{ AR5K_PHY(66), 0x00000000 },
{ AR5K_PHY(67), 0x00800000 },
{ AR5K_PHY(68), 0x00000003 },
/* BB gain table (64bytes) */
{ AR5K_BB_GAIN(0), 0x00000000 },
{ AR5K_BB_GAIN(1), 0x00000020 },
{ AR5K_BB_GAIN(2), 0x00000010 },
{ AR5K_BB_GAIN(3), 0x00000030 },
{ AR5K_BB_GAIN(4), 0x00000008 },
{ AR5K_BB_GAIN(5), 0x00000028 },
{ AR5K_BB_GAIN(6), 0x00000028 },
{ AR5K_BB_GAIN(7), 0x00000004 },
{ AR5K_BB_GAIN(8), 0x00000024 },
{ AR5K_BB_GAIN(9), 0x00000014 },
{ AR5K_BB_GAIN(10), 0x00000034 },
{ AR5K_BB_GAIN(11), 0x0000000c },
{ AR5K_BB_GAIN(12), 0x0000002c },
{ AR5K_BB_GAIN(13), 0x00000002 },
{ AR5K_BB_GAIN(14), 0x00000022 },
{ AR5K_BB_GAIN(15), 0x00000012 },
{ AR5K_BB_GAIN(16), 0x00000032 },
{ AR5K_BB_GAIN(17), 0x0000000a },
{ AR5K_BB_GAIN(18), 0x0000002a },
{ AR5K_BB_GAIN(19), 0x00000001 },
{ AR5K_BB_GAIN(20), 0x00000021 },
{ AR5K_BB_GAIN(21), 0x00000011 },
{ AR5K_BB_GAIN(22), 0x00000031 },
{ AR5K_BB_GAIN(23), 0x00000009 },
{ AR5K_BB_GAIN(24), 0x00000029 },
{ AR5K_BB_GAIN(25), 0x00000005 },
{ AR5K_BB_GAIN(26), 0x00000025 },
{ AR5K_BB_GAIN(27), 0x00000015 },
{ AR5K_BB_GAIN(28), 0x00000035 },
{ AR5K_BB_GAIN(29), 0x0000000d },
{ AR5K_BB_GAIN(30), 0x0000002d },
{ AR5K_BB_GAIN(31), 0x00000003 },
{ AR5K_BB_GAIN(32), 0x00000023 },
{ AR5K_BB_GAIN(33), 0x00000013 },
{ AR5K_BB_GAIN(34), 0x00000033 },
{ AR5K_BB_GAIN(35), 0x0000000b },
{ AR5K_BB_GAIN(36), 0x0000002b },
{ AR5K_BB_GAIN(37), 0x00000007 },
{ AR5K_BB_GAIN(38), 0x00000027 },
{ AR5K_BB_GAIN(39), 0x00000017 },
{ AR5K_BB_GAIN(40), 0x00000037 },
{ AR5K_BB_GAIN(41), 0x0000000f },
{ AR5K_BB_GAIN(42), 0x0000002f },
{ AR5K_BB_GAIN(43), 0x0000002f },
{ AR5K_BB_GAIN(44), 0x0000002f },
{ AR5K_BB_GAIN(45), 0x0000002f },
{ AR5K_BB_GAIN(46), 0x0000002f },
{ AR5K_BB_GAIN(47), 0x0000002f },
{ AR5K_BB_GAIN(48), 0x0000002f },
{ AR5K_BB_GAIN(49), 0x0000002f },
{ AR5K_BB_GAIN(50), 0x0000002f },
{ AR5K_BB_GAIN(51), 0x0000002f },
{ AR5K_BB_GAIN(52), 0x0000002f },
{ AR5K_BB_GAIN(53), 0x0000002f },
{ AR5K_BB_GAIN(54), 0x0000002f },
{ AR5K_BB_GAIN(55), 0x0000002f },
{ AR5K_BB_GAIN(56), 0x0000002f },
{ AR5K_BB_GAIN(57), 0x0000002f },
{ AR5K_BB_GAIN(58), 0x0000002f },
{ AR5K_BB_GAIN(59), 0x0000002f },
{ AR5K_BB_GAIN(60), 0x0000002f },
{ AR5K_BB_GAIN(61), 0x0000002f },
{ AR5K_BB_GAIN(62), 0x0000002f },
{ AR5K_BB_GAIN(63), 0x0000002f },
/* 5110 RF gain table (64btes) */
{ AR5K_RF_GAIN(0), 0x0000001d },
{ AR5K_RF_GAIN(1), 0x0000005d },
{ AR5K_RF_GAIN(2), 0x0000009d },
{ AR5K_RF_GAIN(3), 0x000000dd },
{ AR5K_RF_GAIN(4), 0x0000011d },
{ AR5K_RF_GAIN(5), 0x00000021 },
{ AR5K_RF_GAIN(6), 0x00000061 },
{ AR5K_RF_GAIN(7), 0x000000a1 },
{ AR5K_RF_GAIN(8), 0x000000e1 },
{ AR5K_RF_GAIN(9), 0x00000031 },
{ AR5K_RF_GAIN(10), 0x00000071 },
{ AR5K_RF_GAIN(11), 0x000000b1 },
{ AR5K_RF_GAIN(12), 0x0000001c },
{ AR5K_RF_GAIN(13), 0x0000005c },
{ AR5K_RF_GAIN(14), 0x00000029 },
{ AR5K_RF_GAIN(15), 0x00000069 },
{ AR5K_RF_GAIN(16), 0x000000a9 },
{ AR5K_RF_GAIN(17), 0x00000020 },
{ AR5K_RF_GAIN(18), 0x00000019 },
{ AR5K_RF_GAIN(19), 0x00000059 },
{ AR5K_RF_GAIN(20), 0x00000099 },
{ AR5K_RF_GAIN(21), 0x00000030 },
{ AR5K_RF_GAIN(22), 0x00000005 },
{ AR5K_RF_GAIN(23), 0x00000025 },
{ AR5K_RF_GAIN(24), 0x00000065 },
{ AR5K_RF_GAIN(25), 0x000000a5 },
{ AR5K_RF_GAIN(26), 0x00000028 },
{ AR5K_RF_GAIN(27), 0x00000068 },
{ AR5K_RF_GAIN(28), 0x0000001f },
{ AR5K_RF_GAIN(29), 0x0000001e },
{ AR5K_RF_GAIN(30), 0x00000018 },
{ AR5K_RF_GAIN(31), 0x00000058 },
{ AR5K_RF_GAIN(32), 0x00000098 },
{ AR5K_RF_GAIN(33), 0x00000003 },
{ AR5K_RF_GAIN(34), 0x00000004 },
{ AR5K_RF_GAIN(35), 0x00000044 },
{ AR5K_RF_GAIN(36), 0x00000084 },
{ AR5K_RF_GAIN(37), 0x00000013 },
{ AR5K_RF_GAIN(38), 0x00000012 },
{ AR5K_RF_GAIN(39), 0x00000052 },
{ AR5K_RF_GAIN(40), 0x00000092 },
{ AR5K_RF_GAIN(41), 0x000000d2 },
{ AR5K_RF_GAIN(42), 0x0000002b },
{ AR5K_RF_GAIN(43), 0x0000002a },
{ AR5K_RF_GAIN(44), 0x0000006a },
{ AR5K_RF_GAIN(45), 0x000000aa },
{ AR5K_RF_GAIN(46), 0x0000001b },
{ AR5K_RF_GAIN(47), 0x0000001a },
{ AR5K_RF_GAIN(48), 0x0000005a },
{ AR5K_RF_GAIN(49), 0x0000009a },
{ AR5K_RF_GAIN(50), 0x000000da },
{ AR5K_RF_GAIN(51), 0x00000006 },
{ AR5K_RF_GAIN(52), 0x00000006 },
{ AR5K_RF_GAIN(53), 0x00000006 },
{ AR5K_RF_GAIN(54), 0x00000006 },
{ AR5K_RF_GAIN(55), 0x00000006 },
{ AR5K_RF_GAIN(56), 0x00000006 },
{ AR5K_RF_GAIN(57), 0x00000006 },
{ AR5K_RF_GAIN(58), 0x00000006 },
{ AR5K_RF_GAIN(59), 0x00000006 },
{ AR5K_RF_GAIN(60), 0x00000006 },
{ AR5K_RF_GAIN(61), 0x00000006 },
{ AR5K_RF_GAIN(62), 0x00000006 },
{ AR5K_RF_GAIN(63), 0x00000006 },
/* PHY activation */
{ AR5K_PHY(53), 0x00000020 },
{ AR5K_PHY(51), 0x00000004 },
{ AR5K_PHY(50), 0x00060106 },
{ AR5K_PHY(39), 0x0000006d },
{ AR5K_PHY(48), 0x00000000 },
{ AR5K_PHY(52), 0x00000014 },
{ AR5K_PHY_ACT, AR5K_PHY_ACT_ENABLE },
};
/* Initial register settings for AR5211 */
static const struct ath5k_ini ar5211_ini[] = {
{ AR5K_RXDP, 0x00000000 },
{ AR5K_RTSD0, 0x84849c9c },
{ AR5K_RTSD1, 0x7c7c7c7c },
{ AR5K_RXCFG, 0x00000005 },
{ AR5K_MIBC, 0x00000000 },
{ AR5K_TOPS, 0x00000008 },
{ AR5K_RXNOFRM, 0x00000008 },
{ AR5K_TXNOFRM, 0x00000010 },
{ AR5K_RPGTO, 0x00000000 },
{ AR5K_RFCNT, 0x0000001f },
{ AR5K_QUEUE_TXDP(0), 0x00000000 },
{ AR5K_QUEUE_TXDP(1), 0x00000000 },
{ AR5K_QUEUE_TXDP(2), 0x00000000 },
{ AR5K_QUEUE_TXDP(3), 0x00000000 },
{ AR5K_QUEUE_TXDP(4), 0x00000000 },
{ AR5K_QUEUE_TXDP(5), 0x00000000 },
{ AR5K_QUEUE_TXDP(6), 0x00000000 },
{ AR5K_QUEUE_TXDP(7), 0x00000000 },
{ AR5K_QUEUE_TXDP(8), 0x00000000 },
{ AR5K_QUEUE_TXDP(9), 0x00000000 },
{ AR5K_DCU_FP, 0x00000000 },
{ AR5K_STA_ID1, 0x00000000 },
{ AR5K_BSS_ID0, 0x00000000 },
{ AR5K_BSS_ID1, 0x00000000 },
{ AR5K_RSSI_THR, 0x00000000 },
{ AR5K_CFP_PERIOD_5211, 0x00000000 },
{ AR5K_TIMER0_5211, 0x00000030 },
{ AR5K_TIMER1_5211, 0x0007ffff },
{ AR5K_TIMER2_5211, 0x01ffffff },
{ AR5K_TIMER3_5211, 0x00000031 },
{ AR5K_CFP_DUR_5211, 0x00000000 },
{ AR5K_RX_FILTER_5211, 0x00000000 },
{ AR5K_MCAST_FILTER0_5211, 0x00000000 },
{ AR5K_MCAST_FILTER1_5211, 0x00000002 },
{ AR5K_DIAG_SW_5211, 0x00000000 },
{ AR5K_ADDAC_TEST, 0x00000000 },
{ AR5K_DEFAULT_ANTENNA, 0x00000000 },
/* PHY registers */
{ AR5K_PHY_AGC, 0x00000000 },
{ AR5K_PHY(3), 0x2d849093 },
{ AR5K_PHY(4), 0x7d32e000 },
{ AR5K_PHY(5), 0x00000f6b },
{ AR5K_PHY_ACT, 0x00000000 },
{ AR5K_PHY(11), 0x00026ffe },
{ AR5K_PHY(12), 0x00000000 },
{ AR5K_PHY(15), 0x00020100 },
{ AR5K_PHY(16), 0x206a017a },
{ AR5K_PHY(19), 0x1284613c },
{ AR5K_PHY(21), 0x00000859 },
{ AR5K_PHY(26), 0x409a4190 }, /* 0x9868 */
{ AR5K_PHY(27), 0x050cb081 },
{ AR5K_PHY(28), 0x0000000f },
{ AR5K_PHY(29), 0x00000080 },
{ AR5K_PHY(30), 0x0000000c },
{ AR5K_PHY(64), 0x00000000 },
{ AR5K_PHY(65), 0x00000000 },
{ AR5K_PHY(66), 0x00000000 },
{ AR5K_PHY(67), 0x00800000 },
{ AR5K_PHY(68), 0x00000001 },
{ AR5K_PHY(71), 0x0000092a },
{ AR5K_PHY_IQ, 0x00000000 },
{ AR5K_PHY(73), 0x00058a05 },
{ AR5K_PHY(74), 0x00000001 },
{ AR5K_PHY(75), 0x00000000 },
{ AR5K_PHY_PAPD_PROBE, 0x00000000 },
{ AR5K_PHY(77), 0x00000000 }, /* 0x9934 */
{ AR5K_PHY(78), 0x00000000 }, /* 0x9938 */
{ AR5K_PHY(79), 0x0000003f }, /* 0x993c */
{ AR5K_PHY(80), 0x00000004 },
{ AR5K_PHY(82), 0x00000000 },
{ AR5K_PHY(83), 0x00000000 },
{ AR5K_PHY(84), 0x00000000 },
{ AR5K_PHY_RADAR, 0x5d50f14c },
{ AR5K_PHY(86), 0x00000018 },
{ AR5K_PHY(87), 0x004b6a8e },
/* Initial Power table (32bytes)
* common on all cards/modes.
* Note: Table is rewritten during
* txpower setup later using calibration
* data etc. so next write is non-common */
{ AR5K_PHY_PCDAC_TXPOWER(1), 0x06ff05ff },
{ AR5K_PHY_PCDAC_TXPOWER(2), 0x07ff07ff },
{ AR5K_PHY_PCDAC_TXPOWER(3), 0x08ff08ff },
{ AR5K_PHY_PCDAC_TXPOWER(4), 0x09ff09ff },
{ AR5K_PHY_PCDAC_TXPOWER(5), 0x0aff0aff },
{ AR5K_PHY_PCDAC_TXPOWER(6), 0x0bff0bff },
{ AR5K_PHY_PCDAC_TXPOWER(7), 0x0cff0cff },
{ AR5K_PHY_PCDAC_TXPOWER(8), 0x0dff0dff },
{ AR5K_PHY_PCDAC_TXPOWER(9), 0x0fff0eff },
{ AR5K_PHY_PCDAC_TXPOWER(10), 0x12ff12ff },
{ AR5K_PHY_PCDAC_TXPOWER(11), 0x14ff13ff },
{ AR5K_PHY_PCDAC_TXPOWER(12), 0x16ff15ff },
{ AR5K_PHY_PCDAC_TXPOWER(13), 0x19ff17ff },
{ AR5K_PHY_PCDAC_TXPOWER(14), 0x1bff1aff },
{ AR5K_PHY_PCDAC_TXPOWER(15), 0x1eff1dff },
{ AR5K_PHY_PCDAC_TXPOWER(16), 0x23ff20ff },
{ AR5K_PHY_PCDAC_TXPOWER(17), 0x27ff25ff },
{ AR5K_PHY_PCDAC_TXPOWER(18), 0x2cff29ff },
{ AR5K_PHY_PCDAC_TXPOWER(19), 0x31ff2fff },
{ AR5K_PHY_PCDAC_TXPOWER(20), 0x37ff34ff },
{ AR5K_PHY_PCDAC_TXPOWER(21), 0x3aff3aff },
{ AR5K_PHY_PCDAC_TXPOWER(22), 0x3aff3aff },
{ AR5K_PHY_PCDAC_TXPOWER(23), 0x3aff3aff },
{ AR5K_PHY_PCDAC_TXPOWER(24), 0x3aff3aff },
{ AR5K_PHY_PCDAC_TXPOWER(25), 0x3aff3aff },
{ AR5K_PHY_PCDAC_TXPOWER(26), 0x3aff3aff },
{ AR5K_PHY_PCDAC_TXPOWER(27), 0x3aff3aff },
{ AR5K_PHY_PCDAC_TXPOWER(28), 0x3aff3aff },
{ AR5K_PHY_PCDAC_TXPOWER(29), 0x3aff3aff },
{ AR5K_PHY_PCDAC_TXPOWER(30), 0x3aff3aff },
{ AR5K_PHY_PCDAC_TXPOWER(31), 0x3aff3aff },
{ AR5K_PHY_CCKTXCTL, 0x00000000 },
{ AR5K_PHY(642), 0x503e4646 },
{ AR5K_PHY_GAIN_2GHZ, 0x6480416c },
{ AR5K_PHY(644), 0x0199a003 },
{ AR5K_PHY(645), 0x044cd610 },
{ AR5K_PHY(646), 0x13800040 },
{ AR5K_PHY(647), 0x1be00060 },
{ AR5K_PHY(648), 0x0c53800a },
{ AR5K_PHY(649), 0x0014df3b },
{ AR5K_PHY(650), 0x000001b5 },
{ AR5K_PHY(651), 0x00000020 },
};
/* Initial mode-specific settings for AR5211
* 5211 supports OFDM-only g (draft g) but we
* need to test it ! */
static const struct ath5k_ini_mode ar5211_ini_mode[] = {
{ AR5K_TXCFG,
/* A B G */
{ 0x00000015, 0x0000001d, 0x00000015 } },
{ AR5K_QUEUE_DFS_LOCAL_IFS(0),
{ 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
{ AR5K_QUEUE_DFS_LOCAL_IFS(1),
{ 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
{ AR5K_QUEUE_DFS_LOCAL_IFS(2),
{ 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
{ AR5K_QUEUE_DFS_LOCAL_IFS(3),
{ 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
{ AR5K_QUEUE_DFS_LOCAL_IFS(4),
{ 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
{ AR5K_QUEUE_DFS_LOCAL_IFS(5),
{ 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
{ AR5K_QUEUE_DFS_LOCAL_IFS(6),
{ 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
{ AR5K_QUEUE_DFS_LOCAL_IFS(7),
{ 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
{ AR5K_QUEUE_DFS_LOCAL_IFS(8),
{ 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
{ AR5K_QUEUE_DFS_LOCAL_IFS(9),
{ 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
{ AR5K_DCU_GBL_IFS_SLOT,
{ 0x00000168, 0x000001b8, 0x00000168 } },
{ AR5K_DCU_GBL_IFS_SIFS,
{ 0x00000230, 0x000000b0, 0x00000230 } },
{ AR5K_DCU_GBL_IFS_EIFS,
{ 0x00000d98, 0x00001f48, 0x00000d98 } },
{ AR5K_DCU_GBL_IFS_MISC,
{ 0x0000a0e0, 0x00005880, 0x0000a0e0 } },
{ AR5K_TIME_OUT,
{ 0x04000400, 0x20003000, 0x04000400 } },
{ AR5K_USEC_5211,
{ 0x0e8d8fa7, 0x01608f95, 0x0e8d8fa7 } },
{ AR5K_PHY(8),
{ 0x02020200, 0x02010200, 0x02020200 } },
{ AR5K_PHY_RF_CTL2,
{ 0x00000e0e, 0x00000707, 0x00000e0e } },
{ AR5K_PHY_RF_CTL3,
{ 0x0a020001, 0x05010000, 0x0a020001 } },
{ AR5K_PHY_RF_CTL4,
{ 0x00000e0e, 0x00000e0e, 0x00000e0e } },
{ AR5K_PHY_PA_CTL,
{ 0x00000007, 0x0000000b, 0x0000000b } },
{ AR5K_PHY_SETTLING,
{ 0x1372169c, 0x137216a8, 0x1372169c } },
{ AR5K_PHY_GAIN,
{ 0x0018ba67, 0x0018ba69, 0x0018ba69 } },
{ AR5K_PHY_DESIRED_SIZE,
{ 0x0c28b4e0, 0x0c28b4e0, 0x0c28b4e0 } },
{ AR5K_PHY_SIG,
{ 0x7e800d2e, 0x7ec00d2e, 0x7e800d2e } },
{ AR5K_PHY_AGCCOARSE,
{ 0x31375d5e, 0x313a5d5e, 0x31375d5e } },
{ AR5K_PHY_AGCCTL,
{ 0x0000bd10, 0x0000bd38, 0x0000bd10 } },
{ AR5K_PHY_NF,
{ 0x0001ce00, 0x0001ce00, 0x0001ce00 } },
{ AR5K_PHY_RX_DELAY,
{ 0x00002710, 0x0000157c, 0x00002710 } },
{ AR5K_PHY(70),
{ 0x00000190, 0x00000084, 0x00000190 } },
{ AR5K_PHY_FRAME_CTL_5211,
{ 0x6fe01020, 0x6fe00920, 0x6fe01020 } },
{ AR5K_PHY_PCDAC_TXPOWER_BASE,
{ 0x05ff14ff, 0x05ff14ff, 0x05ff19ff } },
{ AR5K_RF_BUFFER_CONTROL_4,
{ 0x00000010, 0x00000010, 0x00000010 } },
};
/* Initial register settings for AR5212 and newer chips */
static const struct ath5k_ini ar5212_ini_common_start[] = {
{ AR5K_RXDP, 0x00000000 },
{ AR5K_RXCFG, 0x00000005 },
{ AR5K_MIBC, 0x00000000 },
{ AR5K_TOPS, 0x00000008 },
{ AR5K_RXNOFRM, 0x00000008 },
{ AR5K_TXNOFRM, 0x00000010 },
{ AR5K_RPGTO, 0x00000000 },
{ AR5K_RFCNT, 0x0000001f },
{ AR5K_QUEUE_TXDP(0), 0x00000000 },
{ AR5K_QUEUE_TXDP(1), 0x00000000 },
{ AR5K_QUEUE_TXDP(2), 0x00000000 },
{ AR5K_QUEUE_TXDP(3), 0x00000000 },
{ AR5K_QUEUE_TXDP(4), 0x00000000 },
{ AR5K_QUEUE_TXDP(5), 0x00000000 },
{ AR5K_QUEUE_TXDP(6), 0x00000000 },
{ AR5K_QUEUE_TXDP(7), 0x00000000 },
{ AR5K_QUEUE_TXDP(8), 0x00000000 },
{ AR5K_QUEUE_TXDP(9), 0x00000000 },
{ AR5K_DCU_FP, 0x00000000 },
{ AR5K_DCU_TXP, 0x00000000 },
/* Tx filter table 0 (32 entries) */
{ AR5K_DCU_TX_FILTER_0(0), 0x00000000 }, /* DCU 0 */
{ AR5K_DCU_TX_FILTER_0(1), 0x00000000 },
{ AR5K_DCU_TX_FILTER_0(2), 0x00000000 },
{ AR5K_DCU_TX_FILTER_0(3), 0x00000000 },
{ AR5K_DCU_TX_FILTER_0(4), 0x00000000 }, /* DCU 1 */
{ AR5K_DCU_TX_FILTER_0(5), 0x00000000 },
{ AR5K_DCU_TX_FILTER_0(6), 0x00000000 },
{ AR5K_DCU_TX_FILTER_0(7), 0x00000000 },
{ AR5K_DCU_TX_FILTER_0(8), 0x00000000 }, /* DCU 2 */
{ AR5K_DCU_TX_FILTER_0(9), 0x00000000 },
{ AR5K_DCU_TX_FILTER_0(10), 0x00000000 },
{ AR5K_DCU_TX_FILTER_0(11), 0x00000000 },
{ AR5K_DCU_TX_FILTER_0(12), 0x00000000 }, /* DCU 3 */
{ AR5K_DCU_TX_FILTER_0(13), 0x00000000 },
{ AR5K_DCU_TX_FILTER_0(14), 0x00000000 },
{ AR5K_DCU_TX_FILTER_0(15), 0x00000000 },
{ AR5K_DCU_TX_FILTER_0(16), 0x00000000 }, /* DCU 4 */
{ AR5K_DCU_TX_FILTER_0(17), 0x00000000 },
{ AR5K_DCU_TX_FILTER_0(18), 0x00000000 },
{ AR5K_DCU_TX_FILTER_0(19), 0x00000000 },
{ AR5K_DCU_TX_FILTER_0(20), 0x00000000 }, /* DCU 5 */
{ AR5K_DCU_TX_FILTER_0(21), 0x00000000 },
{ AR5K_DCU_TX_FILTER_0(22), 0x00000000 },
{ AR5K_DCU_TX_FILTER_0(23), 0x00000000 },
{ AR5K_DCU_TX_FILTER_0(24), 0x00000000 }, /* DCU 6 */
{ AR5K_DCU_TX_FILTER_0(25), 0x00000000 },
{ AR5K_DCU_TX_FILTER_0(26), 0x00000000 },
{ AR5K_DCU_TX_FILTER_0(27), 0x00000000 },
{ AR5K_DCU_TX_FILTER_0(28), 0x00000000 }, /* DCU 7 */
{ AR5K_DCU_TX_FILTER_0(29), 0x00000000 },
{ AR5K_DCU_TX_FILTER_0(30), 0x00000000 },
{ AR5K_DCU_TX_FILTER_0(31), 0x00000000 },
/* Tx filter table 1 (16 entries) */
{ AR5K_DCU_TX_FILTER_1(0), 0x00000000 },
{ AR5K_DCU_TX_FILTER_1(1), 0x00000000 },
{ AR5K_DCU_TX_FILTER_1(2), 0x00000000 },
{ AR5K_DCU_TX_FILTER_1(3), 0x00000000 },
{ AR5K_DCU_TX_FILTER_1(4), 0x00000000 },
{ AR5K_DCU_TX_FILTER_1(5), 0x00000000 },
{ AR5K_DCU_TX_FILTER_1(6), 0x00000000 },
{ AR5K_DCU_TX_FILTER_1(7), 0x00000000 },
{ AR5K_DCU_TX_FILTER_1(8), 0x00000000 },
{ AR5K_DCU_TX_FILTER_1(9), 0x00000000 },
{ AR5K_DCU_TX_FILTER_1(10), 0x00000000 },
{ AR5K_DCU_TX_FILTER_1(11), 0x00000000 },
{ AR5K_DCU_TX_FILTER_1(12), 0x00000000 },
{ AR5K_DCU_TX_FILTER_1(13), 0x00000000 },
{ AR5K_DCU_TX_FILTER_1(14), 0x00000000 },
{ AR5K_DCU_TX_FILTER_1(15), 0x00000000 },
{ AR5K_DCU_TX_FILTER_CLR, 0x00000000 },
{ AR5K_DCU_TX_FILTER_SET, 0x00000000 },
{ AR5K_STA_ID1, 0x00000000 },
{ AR5K_BSS_ID0, 0x00000000 },
{ AR5K_BSS_ID1, 0x00000000 },
{ AR5K_BEACON_5211, 0x00000000 },
{ AR5K_CFP_PERIOD_5211, 0x00000000 },
{ AR5K_TIMER0_5211, 0x00000030 },
{ AR5K_TIMER1_5211, 0x0007ffff },
{ AR5K_TIMER2_5211, 0x01ffffff },
{ AR5K_TIMER3_5211, 0x00000031 },
{ AR5K_CFP_DUR_5211, 0x00000000 },
{ AR5K_RX_FILTER_5211, 0x00000000 },
{ AR5K_DIAG_SW_5211, 0x00000000 },
{ AR5K_ADDAC_TEST, 0x00000000 },
{ AR5K_DEFAULT_ANTENNA, 0x00000000 },
{ AR5K_FRAME_CTL_QOSM, 0x000fc78f },
{ AR5K_XRMODE, 0x2a82301a },
{ AR5K_XRDELAY, 0x05dc01e0 },
{ AR5K_XRTIMEOUT, 0x1f402710 },
{ AR5K_XRCHIRP, 0x01f40000 },
{ AR5K_XRSTOMP, 0x00001e1c },
{ AR5K_SLEEP0, 0x0002aaaa },
{ AR5K_SLEEP1, 0x02005555 },
{ AR5K_SLEEP2, 0x00000000 },
{ AR_BSSMSKL, 0xffffffff },
{ AR_BSSMSKU, 0x0000ffff },
{ AR5K_TXPC, 0x00000000 },
{ AR5K_PROFCNT_TX, 0x00000000 },
{ AR5K_PROFCNT_RX, 0x00000000 },
{ AR5K_PROFCNT_RXCLR, 0x00000000 },
{ AR5K_PROFCNT_CYCLE, 0x00000000 },
{ AR5K_QUIET_CTL1, 0x00000088 },
/* Initial rate duration table (32 entries )*/
{ AR5K_RATE_DUR(0), 0x00000000 },
{ AR5K_RATE_DUR(1), 0x0000008c },
{ AR5K_RATE_DUR(2), 0x000000e4 },
{ AR5K_RATE_DUR(3), 0x000002d5 },
{ AR5K_RATE_DUR(4), 0x00000000 },
{ AR5K_RATE_DUR(5), 0x00000000 },
{ AR5K_RATE_DUR(6), 0x000000a0 },
{ AR5K_RATE_DUR(7), 0x000001c9 },
{ AR5K_RATE_DUR(8), 0x0000002c },
{ AR5K_RATE_DUR(9), 0x0000002c },
{ AR5K_RATE_DUR(10), 0x00000030 },
{ AR5K_RATE_DUR(11), 0x0000003c },
{ AR5K_RATE_DUR(12), 0x0000002c },
{ AR5K_RATE_DUR(13), 0x0000002c },
{ AR5K_RATE_DUR(14), 0x00000030 },
{ AR5K_RATE_DUR(15), 0x0000003c },
{ AR5K_RATE_DUR(16), 0x00000000 },
{ AR5K_RATE_DUR(17), 0x00000000 },
{ AR5K_RATE_DUR(18), 0x00000000 },
{ AR5K_RATE_DUR(19), 0x00000000 },
{ AR5K_RATE_DUR(20), 0x00000000 },
{ AR5K_RATE_DUR(21), 0x00000000 },
{ AR5K_RATE_DUR(22), 0x00000000 },
{ AR5K_RATE_DUR(23), 0x00000000 },
{ AR5K_RATE_DUR(24), 0x000000d5 },
{ AR5K_RATE_DUR(25), 0x000000df },
{ AR5K_RATE_DUR(26), 0x00000102 },
{ AR5K_RATE_DUR(27), 0x0000013a },
{ AR5K_RATE_DUR(28), 0x00000075 },
{ AR5K_RATE_DUR(29), 0x0000007f },
{ AR5K_RATE_DUR(30), 0x000000a2 },
{ AR5K_RATE_DUR(31), 0x00000000 },
{ AR5K_QUIET_CTL2, 0x00010002 },
{ AR5K_TSF_PARM, 0x00000001 },
{ AR5K_QOS_NOACK, 0x000000c0 },
{ AR5K_PHY_ERR_FIL, 0x00000000 },
{ AR5K_XRLAT_TX, 0x00000168 },
{ AR5K_ACKSIFS, 0x00000000 },
/* Rate -> db table
* notice ...03<-02<-01<-00 ! */
{ AR5K_RATE2DB(0), 0x03020100 },
{ AR5K_RATE2DB(1), 0x07060504 },
{ AR5K_RATE2DB(2), 0x0b0a0908 },
{ AR5K_RATE2DB(3), 0x0f0e0d0c },
{ AR5K_RATE2DB(4), 0x13121110 },
{ AR5K_RATE2DB(5), 0x17161514 },
{ AR5K_RATE2DB(6), 0x1b1a1918 },
{ AR5K_RATE2DB(7), 0x1f1e1d1c },
/* Db -> Rate table */
{ AR5K_DB2RATE(0), 0x03020100 },
{ AR5K_DB2RATE(1), 0x07060504 },
{ AR5K_DB2RATE(2), 0x0b0a0908 },
{ AR5K_DB2RATE(3), 0x0f0e0d0c },
{ AR5K_DB2RATE(4), 0x13121110 },
{ AR5K_DB2RATE(5), 0x17161514 },
{ AR5K_DB2RATE(6), 0x1b1a1918 },
{ AR5K_DB2RATE(7), 0x1f1e1d1c },
/* PHY registers (Common settings
* for all chips/modes) */
{ AR5K_PHY(3), 0xad848e19 },
{ AR5K_PHY(4), 0x7d28e000 },
{ AR5K_PHY_TIMING_3, 0x9c0a9f6b },
{ AR5K_PHY_ACT, 0x00000000 },
{ AR5K_PHY(16), 0x206a017a },
{ AR5K_PHY(21), 0x00000859 },
{ AR5K_PHY_BIN_MASK_1, 0x00000000 },
{ AR5K_PHY_BIN_MASK_2, 0x00000000 },
{ AR5K_PHY_BIN_MASK_3, 0x00000000 },
{ AR5K_PHY_BIN_MASK_CTL, 0x00800000 },
{ AR5K_PHY_ANT_CTL, 0x00000001 },
/*{ AR5K_PHY(71), 0x0000092a },*/ /* Old value */
{ AR5K_PHY_MAX_RX_LEN, 0x00000c80 },
{ AR5K_PHY_IQ, 0x05100000 },
{ AR5K_PHY_WARM_RESET, 0x00000001 },
{ AR5K_PHY_CTL, 0x00000004 },
{ AR5K_PHY_TXPOWER_RATE1, 0x1e1f2022 },
{ AR5K_PHY_TXPOWER_RATE2, 0x0a0b0c0d },
{ AR5K_PHY_TXPOWER_RATE_MAX, 0x0000003f },
{ AR5K_PHY(82), 0x9280b212 },
{ AR5K_PHY_RADAR, 0x5d50e188 },
/*{ AR5K_PHY(86), 0x000000ff },*/
{ AR5K_PHY(87), 0x004b6a8e },
{ AR5K_PHY_NFTHRES, 0x000003ce },
{ AR5K_PHY_RESTART, 0x192fb515 },
{ AR5K_PHY(94), 0x00000001 },
{ AR5K_PHY_RFBUS_REQ, 0x00000000 },
/*{ AR5K_PHY(644), 0x0080a333 },*/ /* Old value */
/*{ AR5K_PHY(645), 0x00206c10 },*/ /* Old value */
{ AR5K_PHY(644), 0x00806333 },
{ AR5K_PHY(645), 0x00106c10 },
{ AR5K_PHY(646), 0x009c4060 },
/* { AR5K_PHY(647), 0x1483800a }, */
/* { AR5K_PHY(648), 0x01831061 }, */ /* Old value */
{ AR5K_PHY(648), 0x018830c6 },
{ AR5K_PHY(649), 0x00000400 },
/*{ AR5K_PHY(650), 0x000001b5 },*/
{ AR5K_PHY(651), 0x00000000 },
{ AR5K_PHY_TXPOWER_RATE3, 0x20202020 },
{ AR5K_PHY_TXPOWER_RATE4, 0x20202020 },
/*{ AR5K_PHY(655), 0x13c889af },*/
{ AR5K_PHY(656), 0x38490a20 },
{ AR5K_PHY(657), 0x00007bb6 },
{ AR5K_PHY(658), 0x0fff3ffc },
};
/* Initial mode-specific settings for AR5212 (Written before ar5212_ini) */
static const struct ath5k_ini_mode ar5212_ini_mode_start[] = {
{ AR5K_QUEUE_DFS_LOCAL_IFS(0),
/* A/XR B G */
{ 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
{ AR5K_QUEUE_DFS_LOCAL_IFS(1),
{ 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
{ AR5K_QUEUE_DFS_LOCAL_IFS(2),
{ 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
{ AR5K_QUEUE_DFS_LOCAL_IFS(3),
{ 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
{ AR5K_QUEUE_DFS_LOCAL_IFS(4),
{ 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
{ AR5K_QUEUE_DFS_LOCAL_IFS(5),
{ 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
{ AR5K_QUEUE_DFS_LOCAL_IFS(6),
{ 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
{ AR5K_QUEUE_DFS_LOCAL_IFS(7),
{ 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
{ AR5K_QUEUE_DFS_LOCAL_IFS(8),
{ 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
{ AR5K_QUEUE_DFS_LOCAL_IFS(9),
{ 0x002ffc0f, 0x002ffc1f, 0x002ffc0f } },
{ AR5K_DCU_GBL_IFS_SIFS,
{ 0x00000230, 0x000000b0, 0x00000160 } },
{ AR5K_DCU_GBL_IFS_SLOT,
{ 0x00000168, 0x000001b8, 0x0000018c } },
{ AR5K_DCU_GBL_IFS_EIFS,
{ 0x00000e60, 0x00001f1c, 0x00003e38 } },
{ AR5K_DCU_GBL_IFS_MISC,
{ 0x0000a0e0, 0x00005880, 0x0000b0e0 } },
{ AR5K_TIME_OUT,
{ 0x03e803e8, 0x04200420, 0x08400840 } },
{ AR5K_PHY(8),
{ 0x02020200, 0x02010200, 0x02020200 } },
{ AR5K_PHY_RF_CTL2,
{ 0x00000e0e, 0x00000707, 0x00000e0e } },
{ AR5K_PHY_SETTLING,
{ 0x1372161c, 0x13721722, 0x137216a2 } },
{ AR5K_PHY_AGCCTL,
{ 0x00009d10, 0x00009d18, 0x00009d18 } },
{ AR5K_PHY_NF,
{ 0x0001ce00, 0x0001ce00, 0x0001ce00 } },
{ AR5K_PHY_WEAK_OFDM_HIGH_THR,
{ 0x409a4190, 0x409a4190, 0x409a4190 } },
{ AR5K_PHY(70),
{ 0x000001b8, 0x00000084, 0x00000108 } },
{ AR5K_PHY_OFDM_SELFCORR,
{ 0x10058a05, 0x10058a05, 0x10058a05 } },
{ 0xa230,
{ 0x00000000, 0x00000000, 0x00000108 } },
};
/* Initial mode-specific settings for AR5212 + RF5111
* (Written after ar5212_ini) */
static const struct ath5k_ini_mode rf5111_ini_mode_end[] = {
{ AR5K_TXCFG,
/* A/XR B G */
{ 0x00008015, 0x00008015, 0x00008015 } },
{ AR5K_USEC_5211,
{ 0x128d8fa7, 0x04e00f95, 0x12e00fab } },
{ AR5K_PHY_RF_CTL3,
{ 0x0a020001, 0x05010100, 0x0a020001 } },
{ AR5K_PHY_RF_CTL4,
{ 0x00000e0e, 0x00000e0e, 0x00000e0e } },
{ AR5K_PHY_PA_CTL,
{ 0x00000007, 0x0000000b, 0x0000000b } },
{ AR5K_PHY_GAIN,
{ 0x0018da5a, 0x0018ca69, 0x0018ca69 } },
{ AR5K_PHY_DESIRED_SIZE,
{ 0x0de8b4e0, 0x0de8b4e0, 0x0de8b4e0 } },
{ AR5K_PHY_SIG,
{ 0x7e800d2e, 0x7ee84d2e, 0x7ee84d2e } },
{ AR5K_PHY_AGCCOARSE,
{ 0x3137665e, 0x3137665e, 0x3137665e } },
{ AR5K_PHY_WEAK_OFDM_LOW_THR,
{ 0x050cb081, 0x050cb081, 0x050cb080 } },
{ AR5K_PHY_RX_DELAY,
{ 0x00002710, 0x0000157c, 0x00002af8 } },
{ AR5K_PHY_FRAME_CTL_5211,
{ 0xf7b81020, 0xf7b80d20, 0xf7b81020 } },
{ AR5K_PHY_GAIN_2GHZ,
{ 0x642c416a, 0x6440416a, 0x6440416a } },
{ AR5K_PHY_CCK_RX_CTL_4,
{ 0x1883800a, 0x1873800a, 0x1883800a } },
};
/* Common for all modes */
static const struct ath5k_ini rf5111_ini_common_end[] = {
{ AR5K_DCU_FP, 0x00000000 },
{ AR5K_PHY_AGC, 0x00000000 },
{ AR5K_PHY_ADC_CTL, 0x00022ffe },
{ 0x983c, 0x00020100 },
{ AR5K_PHY_GAIN_OFFSET, 0x1284613c },
{ AR5K_PHY_PAPD_PROBE, 0x00004883 },
{ 0x9940, 0x00000004 },
{ 0x9958, 0x000000ff },
{ 0x9974, 0x00000000 },
{ AR5K_PHY_SPENDING, 0x00000018 },
{ AR5K_PHY_CCKTXCTL, 0x00000000 },
{ AR5K_PHY_CCK_CROSSCORR, 0xd03e6788 },
{ AR5K_PHY_DAG_CCK_CTL, 0x000001b5 },
{ 0xa23c, 0x13c889af },
};
/* Initial mode-specific settings for AR5212 + RF5112
* (Written after ar5212_ini) */
static const struct ath5k_ini_mode rf5112_ini_mode_end[] = {
{ AR5K_TXCFG,
/* A/XR B G */
{ 0x00008015, 0x00008015, 0x00008015 } },
{ AR5K_USEC_5211,
{ 0x128d93a7, 0x04e01395, 0x12e013ab } },
{ AR5K_PHY_RF_CTL3,
{ 0x0a020001, 0x05020100, 0x0a020001 } },
{ AR5K_PHY_RF_CTL4,
{ 0x00000e0e, 0x00000e0e, 0x00000e0e } },
{ AR5K_PHY_PA_CTL,
{ 0x00000007, 0x0000000b, 0x0000000b } },
{ AR5K_PHY_GAIN,
{ 0x0018da6d, 0x0018ca75, 0x0018ca75 } },
{ AR5K_PHY_DESIRED_SIZE,
{ 0x0de8b4e0, 0x0de8b4e0, 0x0de8b4e0 } },
{ AR5K_PHY_SIG,
{ 0x7e800d2e, 0x7ee80d2e, 0x7ee80d2e } },
{ AR5K_PHY_AGCCOARSE,
{ 0x3137665e, 0x3137665e, 0x3137665e } },
{ AR5K_PHY_WEAK_OFDM_LOW_THR,
{ 0x050cb081, 0x050cb081, 0x050cb081 } },
{ AR5K_PHY_RX_DELAY,
{ 0x000007d0, 0x0000044c, 0x00000898 } },
{ AR5K_PHY_FRAME_CTL_5211,
{ 0xf7b81020, 0xf7b80d10, 0xf7b81010 } },
{ AR5K_PHY_CCKTXCTL,
{ 0x00000000, 0x00000008, 0x00000008 } },
{ AR5K_PHY_CCK_CROSSCORR,
{ 0xd6be6788, 0xd03e6788, 0xd03e6788 } },
{ AR5K_PHY_GAIN_2GHZ,
{ 0x642c0140, 0x6442c160, 0x6442c160 } },
{ AR5K_PHY_CCK_RX_CTL_4,
{ 0x1883800a, 0x1873800a, 0x1883800a } },
};
static const struct ath5k_ini rf5112_ini_common_end[] = {
{ AR5K_DCU_FP, 0x00000000 },
{ AR5K_PHY_AGC, 0x00000000 },
{ AR5K_PHY_ADC_CTL, 0x00022ffe },
{ 0x983c, 0x00020100 },
{ AR5K_PHY_GAIN_OFFSET, 0x1284613c },
{ AR5K_PHY_PAPD_PROBE, 0x00004882 },
{ 0x9940, 0x00000004 },
{ 0x9958, 0x000000ff },
{ 0x9974, 0x00000000 },
{ AR5K_PHY_DAG_CCK_CTL, 0x000001b5 },
{ 0xa23c, 0x13c889af },
};
/* Initial mode-specific settings for RF5413/5414
* (Written after ar5212_ini) */
static const struct ath5k_ini_mode rf5413_ini_mode_end[] = {
{ AR5K_TXCFG,
/* A/XR B G */
{ 0x00000015, 0x00000015, 0x00000015 } },
{ AR5K_USEC_5211,
{ 0x128d93a7, 0x04e01395, 0x12e013ab } },
{ AR5K_PHY_RF_CTL3,
{ 0x0a020001, 0x05020100, 0x0a020001 } },
{ AR5K_PHY_RF_CTL4,
{ 0x00000e0e, 0x00000e0e, 0x00000e0e } },
{ AR5K_PHY_PA_CTL,
{ 0x00000007, 0x0000000b, 0x0000000b } },
{ AR5K_PHY_GAIN,
{ 0x0018fa61, 0x001a1a63, 0x001a1a63 } },
{ AR5K_PHY_DESIRED_SIZE,
{ 0x0c98b4e0, 0x0c98b0da, 0x0c98b0da } },
{ AR5K_PHY_SIG,
{ 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e } },
{ AR5K_PHY_AGCCOARSE,
{ 0x3139605e, 0x3139605e, 0x3139605e } },
{ AR5K_PHY_WEAK_OFDM_LOW_THR,
{ 0x050cb081, 0x050cb081, 0x050cb081 } },
{ AR5K_PHY_RX_DELAY,
{ 0x000007d0, 0x0000044c, 0x00000898 } },
{ AR5K_PHY_FRAME_CTL_5211,
{ 0xf7b81000, 0xf7b80d00, 0xf7b81000 } },
{ AR5K_PHY_CCKTXCTL,
{ 0x00000000, 0x00000000, 0x00000000 } },
{ AR5K_PHY_CCK_CROSSCORR,
{ 0xd6be6788, 0xd03e6788, 0xd03e6788 } },
{ AR5K_PHY_GAIN_2GHZ,
{ 0x002ec1e0, 0x002ac120, 0x002ac120 } },
{ AR5K_PHY_CCK_RX_CTL_4,
{ 0x1883800a, 0x1863800a, 0x1883800a } },
{ 0xa300,
{ 0x18010000, 0x18010000, 0x18010000 } },
{ 0xa304,
{ 0x30032602, 0x30032602, 0x30032602 } },
{ 0xa308,
{ 0x48073e06, 0x48073e06, 0x48073e06 } },
{ 0xa30c,
{ 0x560b4c0a, 0x560b4c0a, 0x560b4c0a } },
{ 0xa310,
{ 0x641a600f, 0x641a600f, 0x641a600f } },
{ 0xa314,
{ 0x784f6e1b, 0x784f6e1b, 0x784f6e1b } },
{ 0xa318,
{ 0x868f7c5a, 0x868f7c5a, 0x868f7c5a } },
{ 0xa31c,
{ 0x90cf865b, 0x8ecf865b, 0x8ecf865b } },
{ 0xa320,
{ 0x9d4f970f, 0x9b4f970f, 0x9b4f970f } },
{ 0xa324,
{ 0xa7cfa38f, 0xa3cf9f8f, 0xa3cf9f8f } },
{ 0xa328,
{ 0xb55faf1f, 0xb35faf1f, 0xb35faf1f } },
{ 0xa32c,
{ 0xbddfb99f, 0xbbdfb99f, 0xbbdfb99f } },
{ 0xa330,
{ 0xcb7fc53f, 0xcb7fc73f, 0xcb7fc73f } },
{ 0xa334,
{ 0xd5ffd1bf, 0xd3ffd1bf, 0xd3ffd1bf } },
};
static const struct ath5k_ini rf5413_ini_common_end[] = {
{ AR5K_DCU_FP, 0x000003e0 },
{ AR5K_5414_CBCFG, 0x00000010 },
{ AR5K_SEQ_MASK, 0x0000000f },
{ 0x809c, 0x00000000 },
{ 0x80a0, 0x00000000 },
{ AR5K_MIC_QOS_CTL, 0x00000000 },
{ AR5K_MIC_QOS_SEL, 0x00000000 },
{ AR5K_MISC_MODE, 0x00000000 },
{ AR5K_OFDM_FIL_CNT, 0x00000000 },
{ AR5K_CCK_FIL_CNT, 0x00000000 },
{ AR5K_PHYERR_CNT1, 0x00000000 },
{ AR5K_PHYERR_CNT1_MASK, 0x00000000 },
{ AR5K_PHYERR_CNT2, 0x00000000 },
{ AR5K_PHYERR_CNT2_MASK, 0x00000000 },
{ AR5K_TSF_THRES, 0x00000000 },
{ 0x8140, 0x800003f9 },
{ 0x8144, 0x00000000 },
{ AR5K_PHY_AGC, 0x00000000 },
{ AR5K_PHY_ADC_CTL, 0x0000a000 },
{ 0x983c, 0x00200400 },
{ AR5K_PHY_GAIN_OFFSET, 0x1284233c },
{ AR5K_PHY_SCR, 0x0000001f },
{ AR5K_PHY_SLMT, 0x00000080 },
{ AR5K_PHY_SCAL, 0x0000000e },
{ 0x9958, 0x00081fff },
{ AR5K_PHY_TIMING_7, 0x00000000 },
{ AR5K_PHY_TIMING_8, 0x02800000 },
{ AR5K_PHY_TIMING_11, 0x00000000 },
{ AR5K_PHY_HEAVY_CLIP_ENABLE, 0x00000000 },
{ 0x99e4, 0xaaaaaaaa },
{ 0x99e8, 0x3c466478 },
{ 0x99ec, 0x000000aa },
{ AR5K_PHY_SCLOCK, 0x0000000c },
{ AR5K_PHY_SDELAY, 0x000000ff },
{ AR5K_PHY_SPENDING, 0x00000014 },
{ AR5K_PHY_DAG_CCK_CTL, 0x000009b5 },
{ 0xa23c, 0x93c889af },
{ AR5K_PHY_FAST_ADC, 0x00000001 },
{ 0xa250, 0x0000a000 },
{ AR5K_PHY_BLUETOOTH, 0x00000000 },
{ AR5K_PHY_TPC_RG1, 0x0cc75380 },
{ 0xa25c, 0x0f0f0f01 },
{ 0xa260, 0x5f690f01 },
{ 0xa264, 0x00418a11 },
{ 0xa268, 0x00000000 },
{ AR5K_PHY_TPC_RG5, 0x0c30c16a },
{ 0xa270, 0x00820820 },
{ 0xa274, 0x081b7caa },
{ 0xa278, 0x1ce739ce },
{ 0xa27c, 0x051701ce },
{ 0xa338, 0x00000000 },
{ 0xa33c, 0x00000000 },
{ 0xa340, 0x00000000 },
{ 0xa344, 0x00000000 },
{ 0xa348, 0x3fffffff },
{ 0xa34c, 0x3fffffff },
{ 0xa350, 0x3fffffff },
{ 0xa354, 0x0003ffff },
{ 0xa358, 0x79a8aa1f },
{ 0xa35c, 0x066c420f },
{ 0xa360, 0x0f282207 },
{ 0xa364, 0x17601685 },
{ 0xa368, 0x1f801104 },
{ 0xa36c, 0x37a00c03 },
{ 0xa370, 0x3fc40883 },
{ 0xa374, 0x57c00803 },
{ 0xa378, 0x5fd80682 },
{ 0xa37c, 0x7fe00482 },
{ 0xa380, 0x7f3c7bba },
{ 0xa384, 0xf3307ff0 },
};
/* Initial mode-specific settings for RF2413/2414
* (Written after ar5212_ini) */
/* XXX: a mode ? */
static const struct ath5k_ini_mode rf2413_ini_mode_end[] = {
{ AR5K_TXCFG,
/* A/XR B G */
{ 0x00000015, 0x00000015, 0x00000015 } },
{ AR5K_USEC_5211,
{ 0x128d93a7, 0x04e01395, 0x12e013ab } },
{ AR5K_PHY_RF_CTL3,
{ 0x0a020001, 0x05020000, 0x0a020001 } },
{ AR5K_PHY_RF_CTL4,
{ 0x00000e00, 0x00000e00, 0x00000e00 } },
{ AR5K_PHY_PA_CTL,
{ 0x00000002, 0x0000000a, 0x0000000a } },
{ AR5K_PHY_GAIN,
{ 0x0018da6d, 0x001a6a64, 0x001a6a64 } },
{ AR5K_PHY_DESIRED_SIZE,
{ 0x0de8b4e0, 0x0de8b0da, 0x0c98b0da } },
{ AR5K_PHY_SIG,
{ 0x7e800d2e, 0x7ee80d2e, 0x7ec80d2e } },
{ AR5K_PHY_AGCCOARSE,
{ 0x3137665e, 0x3137665e, 0x3139605e } },
{ AR5K_PHY_WEAK_OFDM_LOW_THR,
{ 0x050cb081, 0x050cb081, 0x050cb081 } },
{ AR5K_PHY_RX_DELAY,
{ 0x000007d0, 0x0000044c, 0x00000898 } },
{ AR5K_PHY_FRAME_CTL_5211,
{ 0xf7b81000, 0xf7b80d00, 0xf7b81000 } },
{ AR5K_PHY_CCKTXCTL,
{ 0x00000000, 0x00000000, 0x00000000 } },
{ AR5K_PHY_CCK_CROSSCORR,
{ 0xd6be6788, 0xd03e6788, 0xd03e6788 } },
{ AR5K_PHY_GAIN_2GHZ,
{ 0x002c0140, 0x0042c140, 0x0042c140 } },
{ AR5K_PHY_CCK_RX_CTL_4,
{ 0x1883800a, 0x1863800a, 0x1883800a } },
};
static const struct ath5k_ini rf2413_ini_common_end[] = {
{ AR5K_DCU_FP, 0x000003e0 },
{ AR5K_SEQ_MASK, 0x0000000f },
{ AR5K_MIC_QOS_CTL, 0x00000000 },
{ AR5K_MIC_QOS_SEL, 0x00000000 },
{ AR5K_MISC_MODE, 0x00000000 },
{ AR5K_OFDM_FIL_CNT, 0x00000000 },
{ AR5K_CCK_FIL_CNT, 0x00000000 },
{ AR5K_PHYERR_CNT1, 0x00000000 },
{ AR5K_PHYERR_CNT1_MASK, 0x00000000 },
{ AR5K_PHYERR_CNT2, 0x00000000 },
{ AR5K_PHYERR_CNT2_MASK, 0x00000000 },
{ AR5K_TSF_THRES, 0x00000000 },
{ 0x8140, 0x800000a8 },
{ 0x8144, 0x00000000 },
{ AR5K_PHY_AGC, 0x00000000 },
{ AR5K_PHY_ADC_CTL, 0x0000a000 },
{ 0x983c, 0x00200400 },
{ AR5K_PHY_GAIN_OFFSET, 0x1284233c },
{ AR5K_PHY_SCR, 0x0000001f },
{ AR5K_PHY_SLMT, 0x00000080 },
{ AR5K_PHY_SCAL, 0x0000000e },
{ 0x9958, 0x000000ff },
{ AR5K_PHY_TIMING_7, 0x00000000 },
{ AR5K_PHY_TIMING_8, 0x02800000 },
{ AR5K_PHY_TIMING_11, 0x00000000 },
{ AR5K_PHY_HEAVY_CLIP_ENABLE, 0x00000000 },
{ 0x99e4, 0xaaaaaaaa },
{ 0x99e8, 0x3c466478 },
{ 0x99ec, 0x000000aa },
{ AR5K_PHY_SCLOCK, 0x0000000c },
{ AR5K_PHY_SDELAY, 0x000000ff },
{ AR5K_PHY_SPENDING, 0x00000014 },
{ AR5K_PHY_DAG_CCK_CTL, 0x000009b5 },
{ 0xa23c, 0x93c889af },
{ AR5K_PHY_FAST_ADC, 0x00000001 },
{ 0xa250, 0x0000a000 },
{ AR5K_PHY_BLUETOOTH, 0x00000000 },
{ AR5K_PHY_TPC_RG1, 0x0cc75380 },
{ 0xa25c, 0x0f0f0f01 },
{ 0xa260, 0x5f690f01 },
{ 0xa264, 0x00418a11 },
{ 0xa268, 0x00000000 },
{ AR5K_PHY_TPC_RG5, 0x0c30c16a },
{ 0xa270, 0x00820820 },
{ 0xa274, 0x001b7caa },
{ 0xa278, 0x1ce739ce },
{ 0xa27c, 0x051701ce },
{ 0xa300, 0x18010000 },
{ 0xa304, 0x30032602 },
{ 0xa308, 0x48073e06 },
{ 0xa30c, 0x560b4c0a },
{ 0xa310, 0x641a600f },
{ 0xa314, 0x784f6e1b },
{ 0xa318, 0x868f7c5a },
{ 0xa31c, 0x8ecf865b },
{ 0xa320, 0x9d4f970f },
{ 0xa324, 0xa5cfa18f },
{ 0xa328, 0xb55faf1f },
{ 0xa32c, 0xbddfb99f },
{ 0xa330, 0xcd7fc73f },
{ 0xa334, 0xd5ffd1bf },
{ 0xa338, 0x00000000 },
{ 0xa33c, 0x00000000 },
{ 0xa340, 0x00000000 },
{ 0xa344, 0x00000000 },
{ 0xa348, 0x3fffffff },
{ 0xa34c, 0x3fffffff },
{ 0xa350, 0x3fffffff },
{ 0xa354, 0x0003ffff },
{ 0xa358, 0x79a8aa1f },
{ 0xa35c, 0x066c420f },
{ 0xa360, 0x0f282207 },
{ 0xa364, 0x17601685 },
{ 0xa368, 0x1f801104 },
{ 0xa36c, 0x37a00c03 },
{ 0xa370, 0x3fc40883 },
{ 0xa374, 0x57c00803 },
{ 0xa378, 0x5fd80682 },
{ 0xa37c, 0x7fe00482 },
{ 0xa380, 0x7f3c7bba },
{ 0xa384, 0xf3307ff0 },
};
/* Initial mode-specific settings for RF2425
* (Written after ar5212_ini) */
/* XXX: a mode ? */
static const struct ath5k_ini_mode rf2425_ini_mode_end[] = {
{ AR5K_TXCFG,
/* A/XR B G */
{ 0x00000015, 0x00000015, 0x00000015 } },
{ AR5K_USEC_5211,
{ 0x128d93a7, 0x04e01395, 0x12e013ab } },
{ AR5K_PHY_RF_CTL3,
{ 0x0a020001, 0x05020100, 0x0a020001 } },
{ AR5K_PHY_RF_CTL4,
{ 0x00000e0e, 0x00000e0e, 0x00000e0e } },
{ AR5K_PHY_PA_CTL,
{ 0x00000003, 0x0000000b, 0x0000000b } },
{ AR5K_PHY_SETTLING,
{ 0x1372161c, 0x13721722, 0x13721422 } },
{ AR5K_PHY_GAIN,
{ 0x0018fa61, 0x00199a65, 0x00199a65 } },
{ AR5K_PHY_DESIRED_SIZE,
{ 0x0c98b4e0, 0x0c98b0da, 0x0c98b0da } },
{ AR5K_PHY_SIG,
{ 0x7ec80d2e, 0x7ec80d2e, 0x7ec80d2e } },
{ AR5K_PHY_AGCCOARSE,
{ 0x3139605e, 0x3139605e, 0x3139605e } },
{ AR5K_PHY_WEAK_OFDM_LOW_THR,
{ 0x050cb081, 0x050cb081, 0x050cb081 } },
{ AR5K_PHY_RX_DELAY,
{ 0x000007d0, 0x0000044c, 0x00000898 } },
{ AR5K_PHY_FRAME_CTL_5211,
{ 0xf7b81000, 0xf7b80d00, 0xf7b81000 } },
{ AR5K_PHY_CCKTXCTL,
{ 0x00000000, 0x00000000, 0x00000000 } },
{ AR5K_PHY_CCK_CROSSCORR,
{ 0xd6be6788, 0xd03e6788, 0xd03e6788 } },
{ AR5K_PHY_GAIN_2GHZ,
{ 0x00000140, 0x0052c140, 0x0052c140 } },
{ AR5K_PHY_CCK_RX_CTL_4,
{ 0x1883800a, 0x1863800a, 0x1883800a } },
{ 0xa324,
{ 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf } },
{ 0xa328,
{ 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf } },
{ 0xa32c,
{ 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf } },
{ 0xa330,
{ 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf } },
{ 0xa334,
{ 0xa7cfa7cf, 0xa7cfa7cf, 0xa7cfa7cf } },
};
static const struct ath5k_ini rf2425_ini_common_end[] = {
{ AR5K_DCU_FP, 0x000003e0 },
{ AR5K_SEQ_MASK, 0x0000000f },
{ 0x809c, 0x00000000 },
{ 0x80a0, 0x00000000 },
{ AR5K_MIC_QOS_CTL, 0x00000000 },
{ AR5K_MIC_QOS_SEL, 0x00000000 },
{ AR5K_MISC_MODE, 0x00000000 },
{ AR5K_OFDM_FIL_CNT, 0x00000000 },
{ AR5K_CCK_FIL_CNT, 0x00000000 },
{ AR5K_PHYERR_CNT1, 0x00000000 },
{ AR5K_PHYERR_CNT1_MASK, 0x00000000 },
{ AR5K_PHYERR_CNT2, 0x00000000 },
{ AR5K_PHYERR_CNT2_MASK, 0x00000000 },
{ AR5K_TSF_THRES, 0x00000000 },
{ 0x8140, 0x800003f9 },
{ 0x8144, 0x00000000 },
{ AR5K_PHY_AGC, 0x00000000 },
{ AR5K_PHY_ADC_CTL, 0x0000a000 },
{ 0x983c, 0x00200400 },
{ AR5K_PHY_GAIN_OFFSET, 0x1284233c },
{ AR5K_PHY_SCR, 0x0000001f },
{ AR5K_PHY_SLMT, 0x00000080 },
{ AR5K_PHY_SCAL, 0x0000000e },
{ 0x9958, 0x00081fff },
{ AR5K_PHY_TIMING_7, 0x00000000 },
{ AR5K_PHY_TIMING_8, 0x02800000 },
{ AR5K_PHY_TIMING_11, 0x00000000 },
{ 0x99dc, 0xfebadbe8 },
{ AR5K_PHY_HEAVY_CLIP_ENABLE, 0x00000000 },
{ 0x99e4, 0xaaaaaaaa },
{ 0x99e8, 0x3c466478 },
{ 0x99ec, 0x000000aa },
{ AR5K_PHY_SCLOCK, 0x0000000c },
{ AR5K_PHY_SDELAY, 0x000000ff },
{ AR5K_PHY_SPENDING, 0x00000014 },
{ AR5K_PHY_DAG_CCK_CTL, 0x000009b5 },
{ AR5K_PHY_TXPOWER_RATE3, 0x20202020 },
{ AR5K_PHY_TXPOWER_RATE4, 0x20202020 },
{ 0xa23c, 0x93c889af },
{ AR5K_PHY_FAST_ADC, 0x00000001 },
{ 0xa250, 0x0000a000 },
{ AR5K_PHY_BLUETOOTH, 0x00000000 },
{ AR5K_PHY_TPC_RG1, 0x0cc75380 },
{ 0xa25c, 0x0f0f0f01 },
{ 0xa260, 0x5f690f01 },
{ 0xa264, 0x00418a11 },
{ 0xa268, 0x00000000 },
{ AR5K_PHY_TPC_RG5, 0x0c30c166 },
{ 0xa270, 0x00820820 },
{ 0xa274, 0x081a3caa },
{ 0xa278, 0x1ce739ce },
{ 0xa27c, 0x051701ce },
{ 0xa300, 0x16010000 },
{ 0xa304, 0x2c032402 },
{ 0xa308, 0x48433e42 },
{ 0xa30c, 0x5a0f500b },
{ 0xa310, 0x6c4b624a },
{ 0xa314, 0x7e8b748a },
{ 0xa318, 0x96cf8ccb },
{ 0xa31c, 0xa34f9d0f },
{ 0xa320, 0xa7cfa58f },
{ 0xa348, 0x3fffffff },
{ 0xa34c, 0x3fffffff },
{ 0xa350, 0x3fffffff },
{ 0xa354, 0x0003ffff },
{ 0xa358, 0x79a8aa1f },
{ 0xa35c, 0x066c420f },
{ 0xa360, 0x0f282207 },
{ 0xa364, 0x17601685 },
{ 0xa368, 0x1f801104 },
{ 0xa36c, 0x37a00c03 },
{ 0xa370, 0x3fc40883 },
{ 0xa374, 0x57c00803 },
{ 0xa378, 0x5fd80682 },
{ 0xa37c, 0x7fe00482 },
{ 0xa380, 0x7f3c7bba },
{ 0xa384, 0xf3307ff0 },
};
/*
* Initial BaseBand Gain settings for RF5111/5112 (AR5210 comes with
* RF5110 only so initial BB Gain settings are included in AR5K_AR5210_INI)
*/
/* RF5111 Initial BaseBand Gain settings */
static const struct ath5k_ini rf5111_ini_bbgain[] = {
{ AR5K_BB_GAIN(0), 0x00000000 },
{ AR5K_BB_GAIN(1), 0x00000020 },
{ AR5K_BB_GAIN(2), 0x00000010 },
{ AR5K_BB_GAIN(3), 0x00000030 },
{ AR5K_BB_GAIN(4), 0x00000008 },
{ AR5K_BB_GAIN(5), 0x00000028 },
{ AR5K_BB_GAIN(6), 0x00000004 },
{ AR5K_BB_GAIN(7), 0x00000024 },
{ AR5K_BB_GAIN(8), 0x00000014 },
{ AR5K_BB_GAIN(9), 0x00000034 },
{ AR5K_BB_GAIN(10), 0x0000000c },
{ AR5K_BB_GAIN(11), 0x0000002c },
{ AR5K_BB_GAIN(12), 0x00000002 },
{ AR5K_BB_GAIN(13), 0x00000022 },
{ AR5K_BB_GAIN(14), 0x00000012 },
{ AR5K_BB_GAIN(15), 0x00000032 },
{ AR5K_BB_GAIN(16), 0x0000000a },
{ AR5K_BB_GAIN(17), 0x0000002a },
{ AR5K_BB_GAIN(18), 0x00000006 },
{ AR5K_BB_GAIN(19), 0x00000026 },
{ AR5K_BB_GAIN(20), 0x00000016 },
{ AR5K_BB_GAIN(21), 0x00000036 },
{ AR5K_BB_GAIN(22), 0x0000000e },
{ AR5K_BB_GAIN(23), 0x0000002e },
{ AR5K_BB_GAIN(24), 0x00000001 },
{ AR5K_BB_GAIN(25), 0x00000021 },
{ AR5K_BB_GAIN(26), 0x00000011 },
{ AR5K_BB_GAIN(27), 0x00000031 },
{ AR5K_BB_GAIN(28), 0x00000009 },
{ AR5K_BB_GAIN(29), 0x00000029 },
{ AR5K_BB_GAIN(30), 0x00000005 },
{ AR5K_BB_GAIN(31), 0x00000025 },
{ AR5K_BB_GAIN(32), 0x00000015 },
{ AR5K_BB_GAIN(33), 0x00000035 },
{ AR5K_BB_GAIN(34), 0x0000000d },
{ AR5K_BB_GAIN(35), 0x0000002d },
{ AR5K_BB_GAIN(36), 0x00000003 },
{ AR5K_BB_GAIN(37), 0x00000023 },
{ AR5K_BB_GAIN(38), 0x00000013 },
{ AR5K_BB_GAIN(39), 0x00000033 },
{ AR5K_BB_GAIN(40), 0x0000000b },
{ AR5K_BB_GAIN(41), 0x0000002b },
{ AR5K_BB_GAIN(42), 0x0000002b },
{ AR5K_BB_GAIN(43), 0x0000002b },
{ AR5K_BB_GAIN(44), 0x0000002b },
{ AR5K_BB_GAIN(45), 0x0000002b },
{ AR5K_BB_GAIN(46), 0x0000002b },
{ AR5K_BB_GAIN(47), 0x0000002b },
{ AR5K_BB_GAIN(48), 0x0000002b },
{ AR5K_BB_GAIN(49), 0x0000002b },
{ AR5K_BB_GAIN(50), 0x0000002b },
{ AR5K_BB_GAIN(51), 0x0000002b },
{ AR5K_BB_GAIN(52), 0x0000002b },
{ AR5K_BB_GAIN(53), 0x0000002b },
{ AR5K_BB_GAIN(54), 0x0000002b },
{ AR5K_BB_GAIN(55), 0x0000002b },
{ AR5K_BB_GAIN(56), 0x0000002b },
{ AR5K_BB_GAIN(57), 0x0000002b },
{ AR5K_BB_GAIN(58), 0x0000002b },
{ AR5K_BB_GAIN(59), 0x0000002b },
{ AR5K_BB_GAIN(60), 0x0000002b },
{ AR5K_BB_GAIN(61), 0x0000002b },
{ AR5K_BB_GAIN(62), 0x00000002 },
{ AR5K_BB_GAIN(63), 0x00000016 },
};
/* RF5112 Initial BaseBand Gain settings (Same for RF5413/5414+) */
static const struct ath5k_ini rf5112_ini_bbgain[] = {
{ AR5K_BB_GAIN(0), 0x00000000 },
{ AR5K_BB_GAIN(1), 0x00000001 },
{ AR5K_BB_GAIN(2), 0x00000002 },
{ AR5K_BB_GAIN(3), 0x00000003 },
{ AR5K_BB_GAIN(4), 0x00000004 },
{ AR5K_BB_GAIN(5), 0x00000005 },
{ AR5K_BB_GAIN(6), 0x00000008 },
{ AR5K_BB_GAIN(7), 0x00000009 },
{ AR5K_BB_GAIN(8), 0x0000000a },
{ AR5K_BB_GAIN(9), 0x0000000b },
{ AR5K_BB_GAIN(10), 0x0000000c },
{ AR5K_BB_GAIN(11), 0x0000000d },
{ AR5K_BB_GAIN(12), 0x00000010 },
{ AR5K_BB_GAIN(13), 0x00000011 },
{ AR5K_BB_GAIN(14), 0x00000012 },
{ AR5K_BB_GAIN(15), 0x00000013 },
{ AR5K_BB_GAIN(16), 0x00000014 },
{ AR5K_BB_GAIN(17), 0x00000015 },
{ AR5K_BB_GAIN(18), 0x00000018 },
{ AR5K_BB_GAIN(19), 0x00000019 },
{ AR5K_BB_GAIN(20), 0x0000001a },
{ AR5K_BB_GAIN(21), 0x0000001b },
{ AR5K_BB_GAIN(22), 0x0000001c },
{ AR5K_BB_GAIN(23), 0x0000001d },
{ AR5K_BB_GAIN(24), 0x00000020 },
{ AR5K_BB_GAIN(25), 0x00000021 },
{ AR5K_BB_GAIN(26), 0x00000022 },
{ AR5K_BB_GAIN(27), 0x00000023 },
{ AR5K_BB_GAIN(28), 0x00000024 },
{ AR5K_BB_GAIN(29), 0x00000025 },
{ AR5K_BB_GAIN(30), 0x00000028 },
{ AR5K_BB_GAIN(31), 0x00000029 },
{ AR5K_BB_GAIN(32), 0x0000002a },
{ AR5K_BB_GAIN(33), 0x0000002b },
{ AR5K_BB_GAIN(34), 0x0000002c },
{ AR5K_BB_GAIN(35), 0x0000002d },
{ AR5K_BB_GAIN(36), 0x00000030 },
{ AR5K_BB_GAIN(37), 0x00000031 },
{ AR5K_BB_GAIN(38), 0x00000032 },
{ AR5K_BB_GAIN(39), 0x00000033 },
{ AR5K_BB_GAIN(40), 0x00000034 },
{ AR5K_BB_GAIN(41), 0x00000035 },
{ AR5K_BB_GAIN(42), 0x00000035 },
{ AR5K_BB_GAIN(43), 0x00000035 },
{ AR5K_BB_GAIN(44), 0x00000035 },
{ AR5K_BB_GAIN(45), 0x00000035 },
{ AR5K_BB_GAIN(46), 0x00000035 },
{ AR5K_BB_GAIN(47), 0x00000035 },
{ AR5K_BB_GAIN(48), 0x00000035 },
{ AR5K_BB_GAIN(49), 0x00000035 },
{ AR5K_BB_GAIN(50), 0x00000035 },
{ AR5K_BB_GAIN(51), 0x00000035 },
{ AR5K_BB_GAIN(52), 0x00000035 },
{ AR5K_BB_GAIN(53), 0x00000035 },
{ AR5K_BB_GAIN(54), 0x00000035 },
{ AR5K_BB_GAIN(55), 0x00000035 },
{ AR5K_BB_GAIN(56), 0x00000035 },
{ AR5K_BB_GAIN(57), 0x00000035 },
{ AR5K_BB_GAIN(58), 0x00000035 },
{ AR5K_BB_GAIN(59), 0x00000035 },
{ AR5K_BB_GAIN(60), 0x00000035 },
{ AR5K_BB_GAIN(61), 0x00000035 },
{ AR5K_BB_GAIN(62), 0x00000010 },
{ AR5K_BB_GAIN(63), 0x0000001a },
};
/**
* ath5k_hw_ini_registers() - Write initial register dump common for all modes
* @ah: The &struct ath5k_hw
* @size: Dump size
* @ini_regs: The array of &struct ath5k_ini
* @skip_pcu: Skip PCU registers
*/
static void
ath5k_hw_ini_registers(struct ath5k_hw *ah, unsigned int size,
const struct ath5k_ini *ini_regs, bool skip_pcu)
{
unsigned int i;
/* Write initial registers */
for (i = 0; i < size; i++) {
/* Skip PCU registers if
* requested */
if (skip_pcu &&
ini_regs[i].ini_register >= AR5K_PCU_MIN &&
ini_regs[i].ini_register <= AR5K_PCU_MAX)
continue;
switch (ini_regs[i].ini_mode) {
case AR5K_INI_READ:
/* Cleared on read */
ath5k_hw_reg_read(ah, ini_regs[i].ini_register);
break;
case AR5K_INI_WRITE:
default:
AR5K_REG_WAIT(i);
ath5k_hw_reg_write(ah, ini_regs[i].ini_value,
ini_regs[i].ini_register);
}
}
}
/**
* ath5k_hw_ini_mode_registers() - Write initial mode-specific register dump
* @ah: The &struct ath5k_hw
* @size: Dump size
* @ini_mode: The array of &struct ath5k_ini_mode
* @mode: One of enum ath5k_driver_mode
*/
static void
ath5k_hw_ini_mode_registers(struct ath5k_hw *ah,
unsigned int size, const struct ath5k_ini_mode *ini_mode,
u8 mode)
{
unsigned int i;
for (i = 0; i < size; i++) {
AR5K_REG_WAIT(i);
ath5k_hw_reg_write(ah, ini_mode[i].mode_value[mode],
(u32)ini_mode[i].mode_register);
}
}
/**
* ath5k_hw_write_initvals() - Write initial chip-specific register dump
* @ah: The &struct ath5k_hw
* @mode: One of enum ath5k_driver_mode
* @skip_pcu: Skip PCU registers
*
* Write initial chip-specific register dump, to get the chipset on a
* clean and ready-to-work state after warm reset.
*/
int
ath5k_hw_write_initvals(struct ath5k_hw *ah, u8 mode, bool skip_pcu)
{
/*
* Write initial register settings
*/
/* For AR5212 and compatible */
if (ah->ah_version == AR5K_AR5212) {
/* First set of mode-specific settings */
ath5k_hw_ini_mode_registers(ah,
ARRAY_SIZE(ar5212_ini_mode_start),
ar5212_ini_mode_start, mode);
/*
* Write initial settings common for all modes
*/
ath5k_hw_ini_registers(ah, ARRAY_SIZE(ar5212_ini_common_start),
ar5212_ini_common_start, skip_pcu);
/* Second set of mode-specific settings */
switch (ah->ah_radio) {
case AR5K_RF5111:
ath5k_hw_ini_mode_registers(ah,
ARRAY_SIZE(rf5111_ini_mode_end),
rf5111_ini_mode_end, mode);
ath5k_hw_ini_registers(ah,
ARRAY_SIZE(rf5111_ini_common_end),
rf5111_ini_common_end, skip_pcu);
/* Baseband gain table */
ath5k_hw_ini_registers(ah,
ARRAY_SIZE(rf5111_ini_bbgain),
rf5111_ini_bbgain, skip_pcu);
break;
case AR5K_RF5112:
ath5k_hw_ini_mode_registers(ah,
ARRAY_SIZE(rf5112_ini_mode_end),
rf5112_ini_mode_end, mode);
ath5k_hw_ini_registers(ah,
ARRAY_SIZE(rf5112_ini_common_end),
rf5112_ini_common_end, skip_pcu);
ath5k_hw_ini_registers(ah,
ARRAY_SIZE(rf5112_ini_bbgain),
rf5112_ini_bbgain, skip_pcu);
break;
case AR5K_RF5413:
ath5k_hw_ini_mode_registers(ah,
ARRAY_SIZE(rf5413_ini_mode_end),
rf5413_ini_mode_end, mode);
ath5k_hw_ini_registers(ah,
ARRAY_SIZE(rf5413_ini_common_end),
rf5413_ini_common_end, skip_pcu);
ath5k_hw_ini_registers(ah,
ARRAY_SIZE(rf5112_ini_bbgain),
rf5112_ini_bbgain, skip_pcu);
break;
case AR5K_RF2316:
case AR5K_RF2413:
ath5k_hw_ini_mode_registers(ah,
ARRAY_SIZE(rf2413_ini_mode_end),
rf2413_ini_mode_end, mode);
ath5k_hw_ini_registers(ah,
ARRAY_SIZE(rf2413_ini_common_end),
rf2413_ini_common_end, skip_pcu);
/* Override settings from rf2413_ini_common_end */
if (ah->ah_radio == AR5K_RF2316) {
ath5k_hw_reg_write(ah, 0x00004000,
AR5K_PHY_AGC);
ath5k_hw_reg_write(ah, 0x081b7caa,
0xa274);
}
ath5k_hw_ini_registers(ah,
ARRAY_SIZE(rf5112_ini_bbgain),
rf5112_ini_bbgain, skip_pcu);
break;
case AR5K_RF2317:
ath5k_hw_ini_mode_registers(ah,
ARRAY_SIZE(rf2413_ini_mode_end),
rf2413_ini_mode_end, mode);
ath5k_hw_ini_registers(ah,
ARRAY_SIZE(rf2425_ini_common_end),
rf2425_ini_common_end, skip_pcu);
/* Override settings from rf2413_ini_mode_end */
ath5k_hw_reg_write(ah, 0x00180a65, AR5K_PHY_GAIN);
/* Override settings from rf2413_ini_common_end */
ath5k_hw_reg_write(ah, 0x00004000, AR5K_PHY_AGC);
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_TPC_RG5,
AR5K_PHY_TPC_RG5_PD_GAIN_OVERLAP, 0xa);
ath5k_hw_reg_write(ah, 0x800000a8, 0x8140);
ath5k_hw_reg_write(ah, 0x000000ff, 0x9958);
ath5k_hw_ini_registers(ah,
ARRAY_SIZE(rf5112_ini_bbgain),
rf5112_ini_bbgain, skip_pcu);
break;
case AR5K_RF2425:
ath5k_hw_ini_mode_registers(ah,
ARRAY_SIZE(rf2425_ini_mode_end),
rf2425_ini_mode_end, mode);
ath5k_hw_ini_registers(ah,
ARRAY_SIZE(rf2425_ini_common_end),
rf2425_ini_common_end, skip_pcu);
ath5k_hw_ini_registers(ah,
ARRAY_SIZE(rf5112_ini_bbgain),
rf5112_ini_bbgain, skip_pcu);
break;
default:
return -EINVAL;
}
/* For AR5211 */
} else if (ah->ah_version == AR5K_AR5211) {
/* AR5K_MODE_11B */
if (mode > 2) {
ATH5K_ERR(ah, "unsupported channel mode: %d\n", mode);
return -EINVAL;
}
/* Mode-specific settings */
ath5k_hw_ini_mode_registers(ah, ARRAY_SIZE(ar5211_ini_mode),
ar5211_ini_mode, mode);
/*
* Write initial settings common for all modes
*/
ath5k_hw_ini_registers(ah, ARRAY_SIZE(ar5211_ini),
ar5211_ini, skip_pcu);
/* AR5211 only comes with 5111 */
/* Baseband gain table */
ath5k_hw_ini_registers(ah, ARRAY_SIZE(rf5111_ini_bbgain),
rf5111_ini_bbgain, skip_pcu);
/* For AR5210 (for mode settings check out ath5k_hw_reset_tx_queue) */
} else if (ah->ah_version == AR5K_AR5210) {
ath5k_hw_ini_registers(ah, ARRAY_SIZE(ar5210_ini),
ar5210_ini, skip_pcu);
}
return 0;
}
|
linux-master
|
drivers/net/wireless/ath/ath5k/initvals.c
|
// SPDX-License-Identifier: GPL-2.0
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/device.h>
#include <linux/pci.h>
#include "ath5k.h"
#include "reg.h"
#define SIMPLE_SHOW_STORE(name, get, set) \
static ssize_t ath5k_attr_show_##name(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ \
struct ieee80211_hw *hw = dev_get_drvdata(dev); \
struct ath5k_hw *ah = hw->priv; \
return sysfs_emit(buf, "%d\n", get); \
} \
\
static ssize_t ath5k_attr_store_##name(struct device *dev, \
struct device_attribute *attr, \
const char *buf, size_t count) \
{ \
struct ieee80211_hw *hw = dev_get_drvdata(dev); \
struct ath5k_hw *ah = hw->priv; \
int val, ret; \
\
ret = kstrtoint(buf, 10, &val); \
if (ret < 0) \
return ret; \
set(ah, val); \
return count; \
} \
static DEVICE_ATTR(name, 0644, \
ath5k_attr_show_##name, ath5k_attr_store_##name)
#define SIMPLE_SHOW(name, get) \
static ssize_t ath5k_attr_show_##name(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ \
struct ieee80211_hw *hw = dev_get_drvdata(dev); \
struct ath5k_hw *ah = hw->priv; \
return sysfs_emit(buf, "%d\n", get); \
} \
static DEVICE_ATTR(name, 0444, ath5k_attr_show_##name, NULL)
/*** ANI ***/
SIMPLE_SHOW_STORE(ani_mode, ah->ani_state.ani_mode, ath5k_ani_init);
SIMPLE_SHOW_STORE(noise_immunity_level, ah->ani_state.noise_imm_level,
ath5k_ani_set_noise_immunity_level);
SIMPLE_SHOW_STORE(spur_level, ah->ani_state.spur_level,
ath5k_ani_set_spur_immunity_level);
SIMPLE_SHOW_STORE(firstep_level, ah->ani_state.firstep_level,
ath5k_ani_set_firstep_level);
SIMPLE_SHOW_STORE(ofdm_weak_signal_detection, ah->ani_state.ofdm_weak_sig,
ath5k_ani_set_ofdm_weak_signal_detection);
SIMPLE_SHOW_STORE(cck_weak_signal_detection, ah->ani_state.cck_weak_sig,
ath5k_ani_set_cck_weak_signal_detection);
SIMPLE_SHOW(spur_level_max, ah->ani_state.max_spur_level);
static ssize_t ath5k_attr_show_noise_immunity_level_max(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return sysfs_emit(buf, "%d\n", ATH5K_ANI_MAX_NOISE_IMM_LVL);
}
static DEVICE_ATTR(noise_immunity_level_max, 0444,
ath5k_attr_show_noise_immunity_level_max, NULL);
static ssize_t ath5k_attr_show_firstep_level_max(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return sysfs_emit(buf, "%d\n", ATH5K_ANI_MAX_FIRSTEP_LVL);
}
static DEVICE_ATTR(firstep_level_max, 0444,
ath5k_attr_show_firstep_level_max, NULL);
static struct attribute *ath5k_sysfs_entries_ani[] = {
&dev_attr_ani_mode.attr,
&dev_attr_noise_immunity_level.attr,
&dev_attr_spur_level.attr,
&dev_attr_firstep_level.attr,
&dev_attr_ofdm_weak_signal_detection.attr,
&dev_attr_cck_weak_signal_detection.attr,
&dev_attr_noise_immunity_level_max.attr,
&dev_attr_spur_level_max.attr,
&dev_attr_firstep_level_max.attr,
NULL
};
static struct attribute_group ath5k_attribute_group_ani = {
.name = "ani",
.attrs = ath5k_sysfs_entries_ani,
};
/*** register / unregister ***/
int
ath5k_sysfs_register(struct ath5k_hw *ah)
{
struct device *dev = ah->dev;
int err;
err = sysfs_create_group(&dev->kobj, &ath5k_attribute_group_ani);
if (err) {
ATH5K_ERR(ah, "failed to create sysfs group\n");
return err;
}
return 0;
}
void
ath5k_sysfs_unregister(struct ath5k_hw *ah)
{
struct device *dev = ah->dev;
sysfs_remove_group(&dev->kobj, &ath5k_attribute_group_ani);
}
|
linux-master
|
drivers/net/wireless/ath/ath5k/sysfs.c
|
/*
* Copyright (c) 2004-2008 Reyk Floeter <[email protected]>
* Copyright (c) 2006-2008 Nick Kossifidis <[email protected]>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
*/
/****************\
GPIO Functions
\****************/
#include "ath5k.h"
#include "reg.h"
#include "debug.h"
/**
* DOC: GPIO/LED functions
*
* Here we control the 6 bidirectional GPIO pins provided by the hw.
* We can set a GPIO pin to be an input or an output pin on GPIO control
* register and then read or set its status from GPIO data input/output
* registers.
*
* We also control the two LED pins provided by the hw, LED_0 is our
* "power" LED and LED_1 is our "network activity" LED but many scenarios
* are available from hw. Vendors might also provide LEDs connected to the
* GPIO pins, we handle them through the LED subsystem on led.c
*/
/**
* ath5k_hw_set_ledstate() - Set led state
* @ah: The &struct ath5k_hw
* @state: One of AR5K_LED_*
*
* Used to set the LED blinking state. This only
* works for the LED connected to the LED_0, LED_1 pins,
* not the GPIO based.
*/
void
ath5k_hw_set_ledstate(struct ath5k_hw *ah, unsigned int state)
{
u32 led;
/*5210 has different led mode handling*/
u32 led_5210;
/*Reset led status*/
if (ah->ah_version != AR5K_AR5210)
AR5K_REG_DISABLE_BITS(ah, AR5K_PCICFG,
AR5K_PCICFG_LEDMODE | AR5K_PCICFG_LED);
else
AR5K_REG_DISABLE_BITS(ah, AR5K_PCICFG, AR5K_PCICFG_LED);
/*
* Some blinking values, define at your wish
*/
switch (state) {
case AR5K_LED_SCAN:
case AR5K_LED_AUTH:
led = AR5K_PCICFG_LEDMODE_PROP | AR5K_PCICFG_LED_PEND;
led_5210 = AR5K_PCICFG_LED_PEND | AR5K_PCICFG_LED_BCTL;
break;
case AR5K_LED_INIT:
led = AR5K_PCICFG_LEDMODE_PROP | AR5K_PCICFG_LED_NONE;
led_5210 = AR5K_PCICFG_LED_PEND;
break;
case AR5K_LED_ASSOC:
case AR5K_LED_RUN:
led = AR5K_PCICFG_LEDMODE_PROP | AR5K_PCICFG_LED_ASSOC;
led_5210 = AR5K_PCICFG_LED_ASSOC;
break;
default:
led = AR5K_PCICFG_LEDMODE_PROM | AR5K_PCICFG_LED_NONE;
led_5210 = AR5K_PCICFG_LED_PEND;
break;
}
/*Write new status to the register*/
if (ah->ah_version != AR5K_AR5210)
AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, led);
else
AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, led_5210);
}
/**
* ath5k_hw_set_gpio_input() - Set GPIO inputs
* @ah: The &struct ath5k_hw
* @gpio: GPIO pin to set as input
*/
int
ath5k_hw_set_gpio_input(struct ath5k_hw *ah, u32 gpio)
{
if (gpio >= AR5K_NUM_GPIO)
return -EINVAL;
ath5k_hw_reg_write(ah,
(ath5k_hw_reg_read(ah, AR5K_GPIOCR) & ~AR5K_GPIOCR_OUT(gpio))
| AR5K_GPIOCR_IN(gpio), AR5K_GPIOCR);
return 0;
}
/**
* ath5k_hw_set_gpio_output() - Set GPIO outputs
* @ah: The &struct ath5k_hw
* @gpio: The GPIO pin to set as output
*/
int
ath5k_hw_set_gpio_output(struct ath5k_hw *ah, u32 gpio)
{
if (gpio >= AR5K_NUM_GPIO)
return -EINVAL;
ath5k_hw_reg_write(ah,
(ath5k_hw_reg_read(ah, AR5K_GPIOCR) & ~AR5K_GPIOCR_OUT(gpio))
| AR5K_GPIOCR_OUT(gpio), AR5K_GPIOCR);
return 0;
}
/**
* ath5k_hw_get_gpio() - Get GPIO state
* @ah: The &struct ath5k_hw
* @gpio: The GPIO pin to read
*/
u32
ath5k_hw_get_gpio(struct ath5k_hw *ah, u32 gpio)
{
if (gpio >= AR5K_NUM_GPIO)
return 0xffffffff;
/* GPIO input magic */
return ((ath5k_hw_reg_read(ah, AR5K_GPIODI) & AR5K_GPIODI_M) >> gpio) &
0x1;
}
/**
* ath5k_hw_set_gpio() - Set GPIO state
* @ah: The &struct ath5k_hw
* @gpio: The GPIO pin to set
* @val: Value to set (boolean)
*/
int
ath5k_hw_set_gpio(struct ath5k_hw *ah, u32 gpio, u32 val)
{
u32 data;
if (gpio >= AR5K_NUM_GPIO)
return -EINVAL;
/* GPIO output magic */
data = ath5k_hw_reg_read(ah, AR5K_GPIODO);
data &= ~(1 << gpio);
data |= (val & 1) << gpio;
ath5k_hw_reg_write(ah, data, AR5K_GPIODO);
return 0;
}
/**
* ath5k_hw_set_gpio_intr() - Initialize the GPIO interrupt (RFKill switch)
* @ah: The &struct ath5k_hw
* @gpio: The GPIO pin to use
* @interrupt_level: True to generate interrupt on active pin (high)
*
* This function is used to set up the GPIO interrupt for the hw RFKill switch.
* That switch is connected to a GPIO pin and it's number is stored on EEPROM.
* It can either open or close the circuit to indicate that we should disable
* RF/Wireless to save power (we also get that from EEPROM).
*/
void
ath5k_hw_set_gpio_intr(struct ath5k_hw *ah, unsigned int gpio,
u32 interrupt_level)
{
u32 data;
if (gpio >= AR5K_NUM_GPIO)
return;
/*
* Set the GPIO interrupt
*/
data = (ath5k_hw_reg_read(ah, AR5K_GPIOCR) &
~(AR5K_GPIOCR_INT_SEL(gpio) | AR5K_GPIOCR_INT_SELH |
AR5K_GPIOCR_INT_ENA | AR5K_GPIOCR_OUT(gpio))) |
(AR5K_GPIOCR_INT_SEL(gpio) | AR5K_GPIOCR_INT_ENA);
ath5k_hw_reg_write(ah, interrupt_level ? data :
(data | AR5K_GPIOCR_INT_SELH), AR5K_GPIOCR);
ah->ah_imr |= AR5K_IMR_GPIO;
/* Enable GPIO interrupts */
AR5K_REG_ENABLE_BITS(ah, AR5K_PIMR, AR5K_IMR_GPIO);
}
|
linux-master
|
drivers/net/wireless/ath/ath5k/gpio.c
|
/*
* Copyright (c) 2008-2009 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/nl80211.h>
#include <linux/pci.h>
#include <linux/etherdevice.h>
#include <linux/module.h>
#include "../ath.h"
#include "ath5k.h"
#include "debug.h"
#include "base.h"
#include "reg.h"
/* Known PCI ids */
static const struct pci_device_id ath5k_pci_id_table[] = {
{ PCI_VDEVICE(ATHEROS, 0x0207) }, /* 5210 early */
{ PCI_VDEVICE(ATHEROS, 0x0007) }, /* 5210 */
{ PCI_VDEVICE(ATHEROS, 0x0011) }, /* 5311 - this is on AHB bus !*/
{ PCI_VDEVICE(ATHEROS, 0x0012) }, /* 5211 */
{ PCI_VDEVICE(ATHEROS, 0x0013) }, /* 5212 */
{ PCI_VDEVICE(3COM_2, 0x0013) }, /* 3com 5212 */
{ PCI_VDEVICE(3COM, 0x0013) }, /* 3com 3CRDAG675 5212 */
{ PCI_VDEVICE(ATHEROS, 0x1014) }, /* IBM minipci 5212 */
{ PCI_VDEVICE(ATHEROS, 0x0014) }, /* 5212 compatible */
{ PCI_VDEVICE(ATHEROS, 0x0015) }, /* 5212 compatible */
{ PCI_VDEVICE(ATHEROS, 0x0016) }, /* 5212 compatible */
{ PCI_VDEVICE(ATHEROS, 0x0017) }, /* 5212 compatible */
{ PCI_VDEVICE(ATHEROS, 0x0018) }, /* 5212 compatible */
{ PCI_VDEVICE(ATHEROS, 0x0019) }, /* 5212 compatible */
{ PCI_VDEVICE(ATHEROS, 0x001a) }, /* 2413 Griffin-lite */
{ PCI_VDEVICE(ATHEROS, 0x001b) }, /* 5413 Eagle */
{ PCI_VDEVICE(ATHEROS, 0x001c) }, /* PCI-E cards */
{ PCI_VDEVICE(ATHEROS, 0x001d) }, /* 2417 Nala */
{ PCI_VDEVICE(ATHEROS, 0xff1b) }, /* AR5BXB63 */
{ 0 }
};
MODULE_DEVICE_TABLE(pci, ath5k_pci_id_table);
/* return bus cachesize in 4B word units */
static void ath5k_pci_read_cachesize(struct ath_common *common, int *csz)
{
struct ath5k_hw *ah = (struct ath5k_hw *) common->priv;
u8 u8tmp;
pci_read_config_byte(ah->pdev, PCI_CACHE_LINE_SIZE, &u8tmp);
*csz = (int)u8tmp;
/*
* This check was put in to avoid "unpleasant" consequences if
* the bootrom has not fully initialized all PCI devices.
* Sometimes the cache line size register is not set
*/
if (*csz == 0)
*csz = L1_CACHE_BYTES >> 2; /* Use the default size */
}
/*
* Read from eeprom
*/
static bool
ath5k_pci_eeprom_read(struct ath_common *common, u32 offset, u16 *data)
{
struct ath5k_hw *ah = (struct ath5k_hw *) common->ah;
u32 status, timeout;
/*
* Initialize EEPROM access
*/
if (ah->ah_version == AR5K_AR5210) {
AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, AR5K_PCICFG_EEAE);
(void)ath5k_hw_reg_read(ah, AR5K_EEPROM_BASE + (4 * offset));
} else {
ath5k_hw_reg_write(ah, offset, AR5K_EEPROM_BASE);
AR5K_REG_ENABLE_BITS(ah, AR5K_EEPROM_CMD,
AR5K_EEPROM_CMD_READ);
}
for (timeout = AR5K_TUNE_REGISTER_TIMEOUT; timeout > 0; timeout--) {
status = ath5k_hw_reg_read(ah, AR5K_EEPROM_STATUS);
if (status & AR5K_EEPROM_STAT_RDDONE) {
if (status & AR5K_EEPROM_STAT_RDERR)
return false;
*data = (u16)(ath5k_hw_reg_read(ah, AR5K_EEPROM_DATA) &
0xffff);
return true;
}
usleep_range(15, 20);
}
return false;
}
int ath5k_hw_read_srev(struct ath5k_hw *ah)
{
ah->ah_mac_srev = ath5k_hw_reg_read(ah, AR5K_SREV);
return 0;
}
/*
* Read the MAC address from eeprom or platform_data
*/
static int ath5k_pci_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac)
{
u8 mac_d[ETH_ALEN] = {};
u32 total, offset;
u16 data;
int octet;
AR5K_EEPROM_READ(0x20, data);
for (offset = 0x1f, octet = 0, total = 0; offset >= 0x1d; offset--) {
AR5K_EEPROM_READ(offset, data);
total += data;
mac_d[octet + 1] = data & 0xff;
mac_d[octet] = data >> 8;
octet += 2;
}
if (!total || total == 3 * 0xffff)
return -EINVAL;
memcpy(mac, mac_d, ETH_ALEN);
return 0;
}
/* Common ath_bus_opts structure */
static const struct ath_bus_ops ath_pci_bus_ops = {
.ath_bus_type = ATH_PCI,
.read_cachesize = ath5k_pci_read_cachesize,
.eeprom_read = ath5k_pci_eeprom_read,
.eeprom_read_mac = ath5k_pci_eeprom_read_mac,
};
/********************\
* PCI Initialization *
\********************/
static int
ath5k_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
void __iomem *mem;
struct ath5k_hw *ah;
struct ieee80211_hw *hw;
int ret;
u8 csz;
/*
* L0s needs to be disabled on all ath5k cards.
*
* For distributions shipping with CONFIG_PCIEASPM (this will be enabled
* by default in the future in 2.6.36) this will also mean both L1 and
* L0s will be disabled when a pre 1.1 PCIe device is detected. We do
* know L1 works correctly even for all ath5k pre 1.1 PCIe devices
* though but cannot currently undue the effect of a blacklist, for
* details you can read pcie_aspm_sanity_check() and see how it adjusts
* the device link capability.
*
* It may be possible in the future to implement some PCI API to allow
* drivers to override blacklists for pre 1.1 PCIe but for now it is
* best to accept that both L0s and L1 will be disabled completely for
* distributions shipping with CONFIG_PCIEASPM rather than having this
* issue present. Motivation for adding this new API will be to help
* with power consumption for some of these devices.
*/
pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S);
ret = pci_enable_device(pdev);
if (ret) {
dev_err(&pdev->dev, "can't enable device\n");
goto err;
}
/* XXX 32-bit addressing only */
ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
if (ret) {
dev_err(&pdev->dev, "32-bit DMA not available\n");
goto err_dis;
}
/*
* Cache line size is used to size and align various
* structures used to communicate with the hardware.
*/
pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &csz);
if (csz == 0) {
/*
* Linux 2.4.18 (at least) writes the cache line size
* register as a 16-bit wide register which is wrong.
* We must have this setup properly for rx buffer
* DMA to work so force a reasonable value here if it
* comes up zero.
*/
csz = L1_CACHE_BYTES >> 2;
pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, csz);
}
/*
* The default setting of latency timer yields poor results,
* set it to the value used by other systems. It may be worth
* tweaking this setting more.
*/
pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xa8);
/* Enable bus mastering */
pci_set_master(pdev);
/*
* Disable the RETRY_TIMEOUT register (0x41) to keep
* PCI Tx retries from interfering with C3 CPU state.
*/
pci_write_config_byte(pdev, 0x41, 0);
ret = pci_request_region(pdev, 0, "ath5k");
if (ret) {
dev_err(&pdev->dev, "cannot reserve PCI memory region\n");
goto err_dis;
}
mem = pci_iomap(pdev, 0, 0);
if (!mem) {
dev_err(&pdev->dev, "cannot remap PCI memory region\n");
ret = -EIO;
goto err_reg;
}
/*
* Allocate hw (mac80211 main struct)
* and hw->priv (driver private data)
*/
hw = ieee80211_alloc_hw(sizeof(*ah), &ath5k_hw_ops);
if (hw == NULL) {
dev_err(&pdev->dev, "cannot allocate ieee80211_hw\n");
ret = -ENOMEM;
goto err_map;
}
dev_info(&pdev->dev, "registered as '%s'\n", wiphy_name(hw->wiphy));
ah = hw->priv;
ah->hw = hw;
ah->pdev = pdev;
ah->dev = &pdev->dev;
ah->irq = pdev->irq;
ah->devid = id->device;
ah->iobase = mem; /* So we can unmap it on detach */
/* Initialize */
ret = ath5k_init_ah(ah, &ath_pci_bus_ops);
if (ret)
goto err_free;
/* Set private data */
pci_set_drvdata(pdev, hw);
return 0;
err_free:
ieee80211_free_hw(hw);
err_map:
pci_iounmap(pdev, mem);
err_reg:
pci_release_region(pdev, 0);
err_dis:
pci_disable_device(pdev);
err:
return ret;
}
static void
ath5k_pci_remove(struct pci_dev *pdev)
{
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
struct ath5k_hw *ah = hw->priv;
ath5k_deinit_ah(ah);
pci_iounmap(pdev, ah->iobase);
pci_release_region(pdev, 0);
pci_disable_device(pdev);
ieee80211_free_hw(hw);
}
#ifdef CONFIG_PM_SLEEP
static int ath5k_pci_suspend(struct device *dev)
{
struct ieee80211_hw *hw = dev_get_drvdata(dev);
struct ath5k_hw *ah = hw->priv;
ath5k_led_off(ah);
return 0;
}
static int ath5k_pci_resume(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct ieee80211_hw *hw = pci_get_drvdata(pdev);
struct ath5k_hw *ah = hw->priv;
/*
* Suspend/Resume resets the PCI configuration space, so we have to
* re-disable the RETRY_TIMEOUT register (0x41) to keep
* PCI Tx retries from interfering with C3 CPU state
*/
pci_write_config_byte(pdev, 0x41, 0);
ath5k_led_enable(ah);
return 0;
}
static SIMPLE_DEV_PM_OPS(ath5k_pm_ops, ath5k_pci_suspend, ath5k_pci_resume);
#define ATH5K_PM_OPS (&ath5k_pm_ops)
#else
#define ATH5K_PM_OPS NULL
#endif /* CONFIG_PM_SLEEP */
static struct pci_driver ath5k_pci_driver = {
.name = KBUILD_MODNAME,
.id_table = ath5k_pci_id_table,
.probe = ath5k_pci_probe,
.remove = ath5k_pci_remove,
.driver.pm = ATH5K_PM_OPS,
};
module_pci_driver(ath5k_pci_driver);
|
linux-master
|
drivers/net/wireless/ath/ath5k/pci.c
|
/*
* RFKILL support for ath5k
*
* Copyright (c) 2009 Tobias Doerffel <[email protected]>
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
* redistribution must be conditioned upon including a substantially
* similar Disclaimer requirement for further binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
* AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGES.
*/
#include "ath5k.h"
static inline void ath5k_rfkill_disable(struct ath5k_hw *ah)
{
ATH5K_DBG(ah, ATH5K_DEBUG_ANY, "rfkill disable (gpio:%d polarity:%d)\n",
ah->rf_kill.gpio, ah->rf_kill.polarity);
ath5k_hw_set_gpio_output(ah, ah->rf_kill.gpio);
ath5k_hw_set_gpio(ah, ah->rf_kill.gpio, !ah->rf_kill.polarity);
}
static inline void ath5k_rfkill_enable(struct ath5k_hw *ah)
{
ATH5K_DBG(ah, ATH5K_DEBUG_ANY, "rfkill enable (gpio:%d polarity:%d)\n",
ah->rf_kill.gpio, ah->rf_kill.polarity);
ath5k_hw_set_gpio_output(ah, ah->rf_kill.gpio);
ath5k_hw_set_gpio(ah, ah->rf_kill.gpio, ah->rf_kill.polarity);
}
static inline void ath5k_rfkill_set_intr(struct ath5k_hw *ah, bool enable)
{
u32 curval;
ath5k_hw_set_gpio_input(ah, ah->rf_kill.gpio);
curval = ath5k_hw_get_gpio(ah, ah->rf_kill.gpio);
ath5k_hw_set_gpio_intr(ah, ah->rf_kill.gpio, enable ?
!!curval : !curval);
}
static bool
ath5k_is_rfkill_set(struct ath5k_hw *ah)
{
/* configuring GPIO for input for some reason disables rfkill */
/*ath5k_hw_set_gpio_input(ah, ah->rf_kill.gpio);*/
return ath5k_hw_get_gpio(ah, ah->rf_kill.gpio) ==
ah->rf_kill.polarity;
}
static void
ath5k_tasklet_rfkill_toggle(struct tasklet_struct *t)
{
struct ath5k_hw *ah = from_tasklet(ah, t, rf_kill.toggleq);
bool blocked;
blocked = ath5k_is_rfkill_set(ah);
wiphy_rfkill_set_hw_state(ah->hw->wiphy, blocked);
}
void
ath5k_rfkill_hw_start(struct ath5k_hw *ah)
{
/* read rfkill GPIO configuration from EEPROM header */
ah->rf_kill.gpio = ah->ah_capabilities.cap_eeprom.ee_rfkill_pin;
ah->rf_kill.polarity = ah->ah_capabilities.cap_eeprom.ee_rfkill_pol;
tasklet_setup(&ah->rf_kill.toggleq, ath5k_tasklet_rfkill_toggle);
ath5k_rfkill_disable(ah);
/* enable interrupt for rfkill switch */
if (AR5K_EEPROM_HDR_RFKILL(ah->ah_capabilities.cap_eeprom.ee_header))
ath5k_rfkill_set_intr(ah, true);
}
void
ath5k_rfkill_hw_stop(struct ath5k_hw *ah)
{
/* disable interrupt for rfkill switch */
if (AR5K_EEPROM_HDR_RFKILL(ah->ah_capabilities.cap_eeprom.ee_header))
ath5k_rfkill_set_intr(ah, false);
tasklet_kill(&ah->rf_kill.toggleq);
/* enable RFKILL when stopping HW so Wifi LED is turned off */
ath5k_rfkill_enable(ah);
}
|
linux-master
|
drivers/net/wireless/ath/ath5k/rfkill.c
|
/*-
* Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
* Copyright (c) 2004-2005 Atheros Communications, Inc.
* Copyright (c) 2006 Devicescape Software, Inc.
* Copyright (c) 2007 Jiri Slaby <[email protected]>
* Copyright (c) 2007 Luis R. Rodriguez <[email protected]>
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
* redistribution must be conditioned upon including a substantially
* similar Disclaimer requirement for further binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
* AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGES.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/hardirq.h>
#include <linux/if.h>
#include <linux/io.h>
#include <linux/netdevice.h>
#include <linux/cache.h>
#include <linux/ethtool.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/etherdevice.h>
#include <linux/nl80211.h>
#include <net/cfg80211.h>
#include <net/ieee80211_radiotap.h>
#include <asm/unaligned.h>
#include <net/mac80211.h>
#include "base.h"
#include "reg.h"
#include "debug.h"
#include "ani.h"
#include "ath5k.h"
#include "../regd.h"
#define CREATE_TRACE_POINTS
#include "trace.h"
bool ath5k_modparam_nohwcrypt;
module_param_named(nohwcrypt, ath5k_modparam_nohwcrypt, bool, 0444);
MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
static bool modparam_fastchanswitch;
module_param_named(fastchanswitch, modparam_fastchanswitch, bool, 0444);
MODULE_PARM_DESC(fastchanswitch, "Enable fast channel switching for AR2413/AR5413 radios.");
static bool ath5k_modparam_no_hw_rfkill_switch;
module_param_named(no_hw_rfkill_switch, ath5k_modparam_no_hw_rfkill_switch,
bool, 0444);
MODULE_PARM_DESC(no_hw_rfkill_switch, "Ignore the GPIO RFKill switch state");
/* Module info */
MODULE_AUTHOR("Jiri Slaby");
MODULE_AUTHOR("Nick Kossifidis");
MODULE_DESCRIPTION("Support for 5xxx series of Atheros 802.11 wireless LAN cards.");
MODULE_LICENSE("Dual BSD/GPL");
static int ath5k_init(struct ieee80211_hw *hw);
static int ath5k_reset(struct ath5k_hw *ah, struct ieee80211_channel *chan,
bool skip_pcu);
/* Known SREVs */
static const struct ath5k_srev_name srev_names[] = {
#ifdef CONFIG_ATH5K_AHB
{ "5312", AR5K_VERSION_MAC, AR5K_SREV_AR5312_R2 },
{ "5312", AR5K_VERSION_MAC, AR5K_SREV_AR5312_R7 },
{ "2313", AR5K_VERSION_MAC, AR5K_SREV_AR2313_R8 },
{ "2315", AR5K_VERSION_MAC, AR5K_SREV_AR2315_R6 },
{ "2315", AR5K_VERSION_MAC, AR5K_SREV_AR2315_R7 },
{ "2317", AR5K_VERSION_MAC, AR5K_SREV_AR2317_R1 },
{ "2317", AR5K_VERSION_MAC, AR5K_SREV_AR2317_R2 },
#else
{ "5210", AR5K_VERSION_MAC, AR5K_SREV_AR5210 },
{ "5311", AR5K_VERSION_MAC, AR5K_SREV_AR5311 },
{ "5311A", AR5K_VERSION_MAC, AR5K_SREV_AR5311A },
{ "5311B", AR5K_VERSION_MAC, AR5K_SREV_AR5311B },
{ "5211", AR5K_VERSION_MAC, AR5K_SREV_AR5211 },
{ "5212", AR5K_VERSION_MAC, AR5K_SREV_AR5212 },
{ "5213", AR5K_VERSION_MAC, AR5K_SREV_AR5213 },
{ "5213A", AR5K_VERSION_MAC, AR5K_SREV_AR5213A },
{ "2413", AR5K_VERSION_MAC, AR5K_SREV_AR2413 },
{ "2414", AR5K_VERSION_MAC, AR5K_SREV_AR2414 },
{ "5424", AR5K_VERSION_MAC, AR5K_SREV_AR5424 },
{ "5413", AR5K_VERSION_MAC, AR5K_SREV_AR5413 },
{ "5414", AR5K_VERSION_MAC, AR5K_SREV_AR5414 },
{ "2415", AR5K_VERSION_MAC, AR5K_SREV_AR2415 },
{ "5416", AR5K_VERSION_MAC, AR5K_SREV_AR5416 },
{ "5418", AR5K_VERSION_MAC, AR5K_SREV_AR5418 },
{ "2425", AR5K_VERSION_MAC, AR5K_SREV_AR2425 },
{ "2417", AR5K_VERSION_MAC, AR5K_SREV_AR2417 },
#endif
{ "xxxxx", AR5K_VERSION_MAC, AR5K_SREV_UNKNOWN },
{ "5110", AR5K_VERSION_RAD, AR5K_SREV_RAD_5110 },
{ "5111", AR5K_VERSION_RAD, AR5K_SREV_RAD_5111 },
{ "5111A", AR5K_VERSION_RAD, AR5K_SREV_RAD_5111A },
{ "2111", AR5K_VERSION_RAD, AR5K_SREV_RAD_2111 },
{ "5112", AR5K_VERSION_RAD, AR5K_SREV_RAD_5112 },
{ "5112A", AR5K_VERSION_RAD, AR5K_SREV_RAD_5112A },
{ "5112B", AR5K_VERSION_RAD, AR5K_SREV_RAD_5112B },
{ "2112", AR5K_VERSION_RAD, AR5K_SREV_RAD_2112 },
{ "2112A", AR5K_VERSION_RAD, AR5K_SREV_RAD_2112A },
{ "2112B", AR5K_VERSION_RAD, AR5K_SREV_RAD_2112B },
{ "2413", AR5K_VERSION_RAD, AR5K_SREV_RAD_2413 },
{ "5413", AR5K_VERSION_RAD, AR5K_SREV_RAD_5413 },
{ "5424", AR5K_VERSION_RAD, AR5K_SREV_RAD_5424 },
{ "5133", AR5K_VERSION_RAD, AR5K_SREV_RAD_5133 },
#ifdef CONFIG_ATH5K_AHB
{ "2316", AR5K_VERSION_RAD, AR5K_SREV_RAD_2316 },
{ "2317", AR5K_VERSION_RAD, AR5K_SREV_RAD_2317 },
#endif
{ "xxxxx", AR5K_VERSION_RAD, AR5K_SREV_UNKNOWN },
};
static const struct ieee80211_rate ath5k_rates[] = {
{ .bitrate = 10,
.hw_value = ATH5K_RATE_CODE_1M, },
{ .bitrate = 20,
.hw_value = ATH5K_RATE_CODE_2M,
.hw_value_short = ATH5K_RATE_CODE_2M | AR5K_SET_SHORT_PREAMBLE,
.flags = IEEE80211_RATE_SHORT_PREAMBLE },
{ .bitrate = 55,
.hw_value = ATH5K_RATE_CODE_5_5M,
.hw_value_short = ATH5K_RATE_CODE_5_5M | AR5K_SET_SHORT_PREAMBLE,
.flags = IEEE80211_RATE_SHORT_PREAMBLE },
{ .bitrate = 110,
.hw_value = ATH5K_RATE_CODE_11M,
.hw_value_short = ATH5K_RATE_CODE_11M | AR5K_SET_SHORT_PREAMBLE,
.flags = IEEE80211_RATE_SHORT_PREAMBLE },
{ .bitrate = 60,
.hw_value = ATH5K_RATE_CODE_6M,
.flags = IEEE80211_RATE_SUPPORTS_5MHZ |
IEEE80211_RATE_SUPPORTS_10MHZ },
{ .bitrate = 90,
.hw_value = ATH5K_RATE_CODE_9M,
.flags = IEEE80211_RATE_SUPPORTS_5MHZ |
IEEE80211_RATE_SUPPORTS_10MHZ },
{ .bitrate = 120,
.hw_value = ATH5K_RATE_CODE_12M,
.flags = IEEE80211_RATE_SUPPORTS_5MHZ |
IEEE80211_RATE_SUPPORTS_10MHZ },
{ .bitrate = 180,
.hw_value = ATH5K_RATE_CODE_18M,
.flags = IEEE80211_RATE_SUPPORTS_5MHZ |
IEEE80211_RATE_SUPPORTS_10MHZ },
{ .bitrate = 240,
.hw_value = ATH5K_RATE_CODE_24M,
.flags = IEEE80211_RATE_SUPPORTS_5MHZ |
IEEE80211_RATE_SUPPORTS_10MHZ },
{ .bitrate = 360,
.hw_value = ATH5K_RATE_CODE_36M,
.flags = IEEE80211_RATE_SUPPORTS_5MHZ |
IEEE80211_RATE_SUPPORTS_10MHZ },
{ .bitrate = 480,
.hw_value = ATH5K_RATE_CODE_48M,
.flags = IEEE80211_RATE_SUPPORTS_5MHZ |
IEEE80211_RATE_SUPPORTS_10MHZ },
{ .bitrate = 540,
.hw_value = ATH5K_RATE_CODE_54M,
.flags = IEEE80211_RATE_SUPPORTS_5MHZ |
IEEE80211_RATE_SUPPORTS_10MHZ },
};
static inline u64 ath5k_extend_tsf(struct ath5k_hw *ah, u32 rstamp)
{
u64 tsf = ath5k_hw_get_tsf64(ah);
if ((tsf & 0x7fff) < rstamp)
tsf -= 0x8000;
return (tsf & ~0x7fff) | rstamp;
}
const char *
ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val)
{
const char *name = "xxxxx";
unsigned int i;
for (i = 0; i < ARRAY_SIZE(srev_names); i++) {
if (srev_names[i].sr_type != type)
continue;
if ((val & 0xf0) == srev_names[i].sr_val)
name = srev_names[i].sr_name;
if ((val & 0xff) == srev_names[i].sr_val) {
name = srev_names[i].sr_name;
break;
}
}
return name;
}
static unsigned int ath5k_ioread32(void *hw_priv, u32 reg_offset)
{
struct ath5k_hw *ah = (struct ath5k_hw *) hw_priv;
return ath5k_hw_reg_read(ah, reg_offset);
}
static void ath5k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
{
struct ath5k_hw *ah = (struct ath5k_hw *) hw_priv;
ath5k_hw_reg_write(ah, val, reg_offset);
}
static const struct ath_ops ath5k_common_ops = {
.read = ath5k_ioread32,
.write = ath5k_iowrite32,
};
/***********************\
* Driver Initialization *
\***********************/
static void ath5k_reg_notifier(struct wiphy *wiphy,
struct regulatory_request *request)
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct ath5k_hw *ah = hw->priv;
struct ath_regulatory *regulatory = ath5k_hw_regulatory(ah);
ath_reg_notifier_apply(wiphy, request, regulatory);
}
/********************\
* Channel/mode setup *
\********************/
/*
* Returns true for the channel numbers used.
*/
#ifdef CONFIG_ATH5K_TEST_CHANNELS
static bool ath5k_is_standard_channel(short chan, enum nl80211_band band)
{
return true;
}
#else
static bool ath5k_is_standard_channel(short chan, enum nl80211_band band)
{
if (band == NL80211_BAND_2GHZ && chan <= 14)
return true;
return /* UNII 1,2 */
(((chan & 3) == 0 && chan >= 36 && chan <= 64) ||
/* midband */
((chan & 3) == 0 && chan >= 100 && chan <= 140) ||
/* UNII-3 */
((chan & 3) == 1 && chan >= 149 && chan <= 165) ||
/* 802.11j 5.030-5.080 GHz (20MHz) */
(chan == 8 || chan == 12 || chan == 16) ||
/* 802.11j 4.9GHz (20MHz) */
(chan == 184 || chan == 188 || chan == 192 || chan == 196));
}
#endif
static unsigned int
ath5k_setup_channels(struct ath5k_hw *ah, struct ieee80211_channel *channels,
unsigned int mode, unsigned int max)
{
unsigned int count, size, freq, ch;
enum nl80211_band band;
switch (mode) {
case AR5K_MODE_11A:
/* 1..220, but 2GHz frequencies are filtered by check_channel */
size = 220;
band = NL80211_BAND_5GHZ;
break;
case AR5K_MODE_11B:
case AR5K_MODE_11G:
size = 26;
band = NL80211_BAND_2GHZ;
break;
default:
ATH5K_WARN(ah, "bad mode, not copying channels\n");
return 0;
}
count = 0;
for (ch = 1; ch <= size && count < max; ch++) {
freq = ieee80211_channel_to_frequency(ch, band);
if (freq == 0) /* mapping failed - not a standard channel */
continue;
/* Write channel info, needed for ath5k_channel_ok() */
channels[count].center_freq = freq;
channels[count].band = band;
channels[count].hw_value = mode;
/* Check if channel is supported by the chipset */
if (!ath5k_channel_ok(ah, &channels[count]))
continue;
if (!ath5k_is_standard_channel(ch, band))
continue;
count++;
}
return count;
}
static void
ath5k_setup_rate_idx(struct ath5k_hw *ah, struct ieee80211_supported_band *b)
{
u8 i;
for (i = 0; i < AR5K_MAX_RATES; i++)
ah->rate_idx[b->band][i] = -1;
for (i = 0; i < b->n_bitrates; i++) {
ah->rate_idx[b->band][b->bitrates[i].hw_value] = i;
if (b->bitrates[i].hw_value_short)
ah->rate_idx[b->band][b->bitrates[i].hw_value_short] = i;
}
}
static int
ath5k_setup_bands(struct ieee80211_hw *hw)
{
struct ath5k_hw *ah = hw->priv;
struct ieee80211_supported_band *sband;
int max_c, count_c = 0;
int i;
BUILD_BUG_ON(ARRAY_SIZE(ah->sbands) < NUM_NL80211_BANDS);
max_c = ARRAY_SIZE(ah->channels);
/* 2GHz band */
sband = &ah->sbands[NL80211_BAND_2GHZ];
sband->band = NL80211_BAND_2GHZ;
sband->bitrates = &ah->rates[NL80211_BAND_2GHZ][0];
if (test_bit(AR5K_MODE_11G, ah->ah_capabilities.cap_mode)) {
/* G mode */
memcpy(sband->bitrates, &ath5k_rates[0],
sizeof(struct ieee80211_rate) * 12);
sband->n_bitrates = 12;
sband->channels = ah->channels;
sband->n_channels = ath5k_setup_channels(ah, sband->channels,
AR5K_MODE_11G, max_c);
hw->wiphy->bands[NL80211_BAND_2GHZ] = sband;
count_c = sband->n_channels;
max_c -= count_c;
} else if (test_bit(AR5K_MODE_11B, ah->ah_capabilities.cap_mode)) {
/* B mode */
memcpy(sband->bitrates, &ath5k_rates[0],
sizeof(struct ieee80211_rate) * 4);
sband->n_bitrates = 4;
/* 5211 only supports B rates and uses 4bit rate codes
* (e.g normally we have 0x1B for 1M, but on 5211 we have 0x0B)
* fix them up here:
*/
if (ah->ah_version == AR5K_AR5211) {
for (i = 0; i < 4; i++) {
sband->bitrates[i].hw_value =
sband->bitrates[i].hw_value & 0xF;
sband->bitrates[i].hw_value_short =
sband->bitrates[i].hw_value_short & 0xF;
}
}
sband->channels = ah->channels;
sband->n_channels = ath5k_setup_channels(ah, sband->channels,
AR5K_MODE_11B, max_c);
hw->wiphy->bands[NL80211_BAND_2GHZ] = sband;
count_c = sband->n_channels;
max_c -= count_c;
}
ath5k_setup_rate_idx(ah, sband);
/* 5GHz band, A mode */
if (test_bit(AR5K_MODE_11A, ah->ah_capabilities.cap_mode)) {
sband = &ah->sbands[NL80211_BAND_5GHZ];
sband->band = NL80211_BAND_5GHZ;
sband->bitrates = &ah->rates[NL80211_BAND_5GHZ][0];
memcpy(sband->bitrates, &ath5k_rates[4],
sizeof(struct ieee80211_rate) * 8);
sband->n_bitrates = 8;
sband->channels = &ah->channels[count_c];
sband->n_channels = ath5k_setup_channels(ah, sband->channels,
AR5K_MODE_11A, max_c);
hw->wiphy->bands[NL80211_BAND_5GHZ] = sband;
}
ath5k_setup_rate_idx(ah, sband);
ath5k_debug_dump_bands(ah);
return 0;
}
/*
* Set/change channels. We always reset the chip.
* To accomplish this we must first cleanup any pending DMA,
* then restart stuff after a la ath5k_init.
*
* Called with ah->lock.
*/
int
ath5k_chan_set(struct ath5k_hw *ah, struct cfg80211_chan_def *chandef)
{
ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
"channel set, resetting (%u -> %u MHz)\n",
ah->curchan->center_freq, chandef->chan->center_freq);
switch (chandef->width) {
case NL80211_CHAN_WIDTH_20:
case NL80211_CHAN_WIDTH_20_NOHT:
ah->ah_bwmode = AR5K_BWMODE_DEFAULT;
break;
case NL80211_CHAN_WIDTH_5:
ah->ah_bwmode = AR5K_BWMODE_5MHZ;
break;
case NL80211_CHAN_WIDTH_10:
ah->ah_bwmode = AR5K_BWMODE_10MHZ;
break;
default:
WARN_ON(1);
return -EINVAL;
}
/*
* To switch channels clear any pending DMA operations;
* wait long enough for the RX fifo to drain, reset the
* hardware at the new frequency, and then re-enable
* the relevant bits of the h/w.
*/
return ath5k_reset(ah, chandef->chan, true);
}
void ath5k_vif_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
{
struct ath5k_vif_iter_data *iter_data = data;
int i;
struct ath5k_vif *avf = (void *)vif->drv_priv;
if (iter_data->hw_macaddr)
for (i = 0; i < ETH_ALEN; i++)
iter_data->mask[i] &=
~(iter_data->hw_macaddr[i] ^ mac[i]);
if (!iter_data->found_active) {
iter_data->found_active = true;
memcpy(iter_data->active_mac, mac, ETH_ALEN);
}
if (iter_data->need_set_hw_addr && iter_data->hw_macaddr)
if (ether_addr_equal(iter_data->hw_macaddr, mac))
iter_data->need_set_hw_addr = false;
if (!iter_data->any_assoc) {
if (avf->assoc)
iter_data->any_assoc = true;
}
/* Calculate combined mode - when APs are active, operate in AP mode.
* Otherwise use the mode of the new interface. This can currently
* only deal with combinations of APs and STAs. Only one ad-hoc
* interfaces is allowed.
*/
if (avf->opmode == NL80211_IFTYPE_AP)
iter_data->opmode = NL80211_IFTYPE_AP;
else {
if (avf->opmode == NL80211_IFTYPE_STATION)
iter_data->n_stas++;
if (iter_data->opmode == NL80211_IFTYPE_UNSPECIFIED)
iter_data->opmode = avf->opmode;
}
}
void
ath5k_update_bssid_mask_and_opmode(struct ath5k_hw *ah,
struct ieee80211_vif *vif)
{
struct ath_common *common = ath5k_hw_common(ah);
struct ath5k_vif_iter_data iter_data;
u32 rfilt;
/*
* Use the hardware MAC address as reference, the hardware uses it
* together with the BSSID mask when matching addresses.
*/
iter_data.hw_macaddr = common->macaddr;
eth_broadcast_addr(iter_data.mask);
iter_data.found_active = false;
iter_data.need_set_hw_addr = true;
iter_data.opmode = NL80211_IFTYPE_UNSPECIFIED;
iter_data.n_stas = 0;
if (vif)
ath5k_vif_iter(&iter_data, vif->addr, vif);
/* Get list of all active MAC addresses */
ieee80211_iterate_active_interfaces_atomic(
ah->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
ath5k_vif_iter, &iter_data);
memcpy(ah->bssidmask, iter_data.mask, ETH_ALEN);
ah->opmode = iter_data.opmode;
if (ah->opmode == NL80211_IFTYPE_UNSPECIFIED)
/* Nothing active, default to station mode */
ah->opmode = NL80211_IFTYPE_STATION;
ath5k_hw_set_opmode(ah, ah->opmode);
ATH5K_DBG(ah, ATH5K_DEBUG_MODE, "mode setup opmode %d (%s)\n",
ah->opmode, ath_opmode_to_string(ah->opmode));
if (iter_data.need_set_hw_addr && iter_data.found_active)
ath5k_hw_set_lladdr(ah, iter_data.active_mac);
if (ath5k_hw_hasbssidmask(ah))
ath5k_hw_set_bssid_mask(ah, ah->bssidmask);
/* Set up RX Filter */
if (iter_data.n_stas > 1) {
/* If you have multiple STA interfaces connected to
* different APs, ARPs are not received (most of the time?)
* Enabling PROMISC appears to fix that problem.
*/
ah->filter_flags |= AR5K_RX_FILTER_PROM;
}
rfilt = ah->filter_flags;
ath5k_hw_set_rx_filter(ah, rfilt);
ATH5K_DBG(ah, ATH5K_DEBUG_MODE, "RX filter 0x%x\n", rfilt);
}
static inline int
ath5k_hw_to_driver_rix(struct ath5k_hw *ah, int hw_rix)
{
int rix;
/* return base rate on errors */
if (WARN(hw_rix < 0 || hw_rix >= AR5K_MAX_RATES,
"hw_rix out of bounds: %x\n", hw_rix))
return 0;
rix = ah->rate_idx[ah->curchan->band][hw_rix];
if (WARN(rix < 0, "invalid hw_rix: %x\n", hw_rix))
rix = 0;
return rix;
}
/***************\
* Buffers setup *
\***************/
static
struct sk_buff *ath5k_rx_skb_alloc(struct ath5k_hw *ah, dma_addr_t *skb_addr)
{
struct ath_common *common = ath5k_hw_common(ah);
struct sk_buff *skb;
/*
* Allocate buffer with headroom_needed space for the
* fake physical layer header at the start.
*/
skb = ath_rxbuf_alloc(common,
common->rx_bufsize,
GFP_ATOMIC);
if (!skb) {
ATH5K_ERR(ah, "can't alloc skbuff of size %u\n",
common->rx_bufsize);
return NULL;
}
*skb_addr = dma_map_single(ah->dev,
skb->data, common->rx_bufsize,
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(ah->dev, *skb_addr))) {
ATH5K_ERR(ah, "%s: DMA mapping failed\n", __func__);
dev_kfree_skb(skb);
return NULL;
}
return skb;
}
static int
ath5k_rxbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf)
{
struct sk_buff *skb = bf->skb;
struct ath5k_desc *ds;
int ret;
if (!skb) {
skb = ath5k_rx_skb_alloc(ah, &bf->skbaddr);
if (!skb)
return -ENOMEM;
bf->skb = skb;
}
/*
* Setup descriptors. For receive we always terminate
* the descriptor list with a self-linked entry so we'll
* not get overrun under high load (as can happen with a
* 5212 when ANI processing enables PHY error frames).
*
* To ensure the last descriptor is self-linked we create
* each descriptor as self-linked and add it to the end. As
* each additional descriptor is added the previous self-linked
* entry is "fixed" naturally. This should be safe even
* if DMA is happening. When processing RX interrupts we
* never remove/process the last, self-linked, entry on the
* descriptor list. This ensures the hardware always has
* someplace to write a new frame.
*/
ds = bf->desc;
ds->ds_link = bf->daddr; /* link to self */
ds->ds_data = bf->skbaddr;
ret = ath5k_hw_setup_rx_desc(ah, ds, ah->common.rx_bufsize, 0);
if (ret) {
ATH5K_ERR(ah, "%s: could not setup RX desc\n", __func__);
return ret;
}
if (ah->rxlink != NULL)
*ah->rxlink = bf->daddr;
ah->rxlink = &ds->ds_link;
return 0;
}
static enum ath5k_pkt_type get_hw_packet_type(struct sk_buff *skb)
{
struct ieee80211_hdr *hdr;
enum ath5k_pkt_type htype;
__le16 fc;
hdr = (struct ieee80211_hdr *)skb->data;
fc = hdr->frame_control;
if (ieee80211_is_beacon(fc))
htype = AR5K_PKT_TYPE_BEACON;
else if (ieee80211_is_probe_resp(fc))
htype = AR5K_PKT_TYPE_PROBE_RESP;
else if (ieee80211_is_atim(fc))
htype = AR5K_PKT_TYPE_ATIM;
else if (ieee80211_is_pspoll(fc))
htype = AR5K_PKT_TYPE_PSPOLL;
else
htype = AR5K_PKT_TYPE_NORMAL;
return htype;
}
static struct ieee80211_rate *
ath5k_get_rate(const struct ieee80211_hw *hw,
const struct ieee80211_tx_info *info,
struct ath5k_buf *bf, int idx)
{
/*
* convert a ieee80211_tx_rate RC-table entry to
* the respective ieee80211_rate struct
*/
if (bf->rates[idx].idx < 0) {
return NULL;
}
return &hw->wiphy->bands[info->band]->bitrates[ bf->rates[idx].idx ];
}
static u16
ath5k_get_rate_hw_value(const struct ieee80211_hw *hw,
const struct ieee80211_tx_info *info,
struct ath5k_buf *bf, int idx)
{
struct ieee80211_rate *rate;
u16 hw_rate;
u8 rc_flags;
rate = ath5k_get_rate(hw, info, bf, idx);
if (!rate)
return 0;
rc_flags = bf->rates[idx].flags;
hw_rate = (rc_flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) ?
rate->hw_value_short : rate->hw_value;
return hw_rate;
}
static bool ath5k_merge_ratetbl(struct ieee80211_sta *sta,
struct ath5k_buf *bf,
struct ieee80211_tx_info *tx_info)
{
struct ieee80211_sta_rates *ratetbl;
u8 i;
if (!sta)
return false;
ratetbl = rcu_dereference(sta->rates);
if (!ratetbl)
return false;
if (tx_info->control.rates[0].idx < 0 ||
tx_info->control.rates[0].count == 0)
{
i = 0;
} else {
bf->rates[0] = tx_info->control.rates[0];
i = 1;
}
for ( ; i < IEEE80211_TX_MAX_RATES; i++) {
bf->rates[i].idx = ratetbl->rate[i].idx;
bf->rates[i].flags = ratetbl->rate[i].flags;
if (tx_info->control.use_rts)
bf->rates[i].count = ratetbl->rate[i].count_rts;
else if (tx_info->control.use_cts_prot)
bf->rates[i].count = ratetbl->rate[i].count_cts;
else
bf->rates[i].count = ratetbl->rate[i].count;
}
return true;
}
static int
ath5k_txbuf_setup(struct ath5k_hw *ah, struct ath5k_buf *bf,
struct ath5k_txq *txq, int padsize,
struct ieee80211_tx_control *control)
{
struct ath5k_desc *ds = bf->desc;
struct sk_buff *skb = bf->skb;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
unsigned int pktlen, flags, keyidx = AR5K_TXKEYIX_INVALID;
struct ieee80211_rate *rate;
struct ieee80211_sta *sta;
unsigned int mrr_rate[3], mrr_tries[3];
int i, ret;
u16 hw_rate;
u16 cts_rate = 0;
u16 duration = 0;
u8 rc_flags;
flags = AR5K_TXDESC_INTREQ | AR5K_TXDESC_CLRDMASK;
/* XXX endianness */
bf->skbaddr = dma_map_single(ah->dev, skb->data, skb->len,
DMA_TO_DEVICE);
if (dma_mapping_error(ah->dev, bf->skbaddr))
return -ENOSPC;
if (control)
sta = control->sta;
else
sta = NULL;
if (!ath5k_merge_ratetbl(sta, bf, info)) {
ieee80211_get_tx_rates(info->control.vif,
sta, skb, bf->rates,
ARRAY_SIZE(bf->rates));
}
rate = ath5k_get_rate(ah->hw, info, bf, 0);
if (!rate) {
ret = -EINVAL;
goto err_unmap;
}
if (info->flags & IEEE80211_TX_CTL_NO_ACK)
flags |= AR5K_TXDESC_NOACK;
rc_flags = bf->rates[0].flags;
hw_rate = ath5k_get_rate_hw_value(ah->hw, info, bf, 0);
pktlen = skb->len;
/* FIXME: If we are in g mode and rate is a CCK rate
* subtract ah->ah_txpower.txp_cck_ofdm_pwr_delta
* from tx power (value is in dB units already) */
if (info->control.hw_key) {
keyidx = info->control.hw_key->hw_key_idx;
pktlen += info->control.hw_key->icv_len;
}
if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS) {
flags |= AR5K_TXDESC_RTSENA;
cts_rate = ieee80211_get_rts_cts_rate(ah->hw, info)->hw_value;
duration = le16_to_cpu(ieee80211_rts_duration(ah->hw,
info->control.vif, pktlen, info));
}
if (rc_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
flags |= AR5K_TXDESC_CTSENA;
cts_rate = ieee80211_get_rts_cts_rate(ah->hw, info)->hw_value;
duration = le16_to_cpu(ieee80211_ctstoself_duration(ah->hw,
info->control.vif, pktlen, info));
}
ret = ah->ah_setup_tx_desc(ah, ds, pktlen,
ieee80211_get_hdrlen_from_skb(skb), padsize,
get_hw_packet_type(skb),
(ah->ah_txpower.txp_requested * 2),
hw_rate,
bf->rates[0].count, keyidx, ah->ah_tx_ant, flags,
cts_rate, duration);
if (ret)
goto err_unmap;
/* Set up MRR descriptor */
if (ah->ah_capabilities.cap_has_mrr_support) {
memset(mrr_rate, 0, sizeof(mrr_rate));
memset(mrr_tries, 0, sizeof(mrr_tries));
for (i = 0; i < 3; i++) {
rate = ath5k_get_rate(ah->hw, info, bf, i);
if (!rate)
break;
mrr_rate[i] = ath5k_get_rate_hw_value(ah->hw, info, bf, i);
mrr_tries[i] = bf->rates[i].count;
}
ath5k_hw_setup_mrr_tx_desc(ah, ds,
mrr_rate[0], mrr_tries[0],
mrr_rate[1], mrr_tries[1],
mrr_rate[2], mrr_tries[2]);
}
ds->ds_link = 0;
ds->ds_data = bf->skbaddr;
spin_lock_bh(&txq->lock);
list_add_tail(&bf->list, &txq->q);
txq->txq_len++;
if (txq->link == NULL) /* is this first packet? */
ath5k_hw_set_txdp(ah, txq->qnum, bf->daddr);
else /* no, so only link it */
*txq->link = bf->daddr;
txq->link = &ds->ds_link;
ath5k_hw_start_tx_dma(ah, txq->qnum);
spin_unlock_bh(&txq->lock);
return 0;
err_unmap:
dma_unmap_single(ah->dev, bf->skbaddr, skb->len, DMA_TO_DEVICE);
return ret;
}
/*******************\
* Descriptors setup *
\*******************/
static int
ath5k_desc_alloc(struct ath5k_hw *ah)
{
struct ath5k_desc *ds;
struct ath5k_buf *bf;
dma_addr_t da;
unsigned int i;
int ret;
/* allocate descriptors */
ah->desc_len = sizeof(struct ath5k_desc) *
(ATH_TXBUF + ATH_RXBUF + ATH_BCBUF + 1);
ah->desc = dma_alloc_coherent(ah->dev, ah->desc_len,
&ah->desc_daddr, GFP_KERNEL);
if (ah->desc == NULL) {
ATH5K_ERR(ah, "can't allocate descriptors\n");
ret = -ENOMEM;
goto err;
}
ds = ah->desc;
da = ah->desc_daddr;
ATH5K_DBG(ah, ATH5K_DEBUG_ANY, "DMA map: %p (%zu) -> %llx\n",
ds, ah->desc_len, (unsigned long long)ah->desc_daddr);
bf = kcalloc(1 + ATH_TXBUF + ATH_RXBUF + ATH_BCBUF,
sizeof(struct ath5k_buf), GFP_KERNEL);
if (bf == NULL) {
ATH5K_ERR(ah, "can't allocate bufptr\n");
ret = -ENOMEM;
goto err_free;
}
ah->bufptr = bf;
INIT_LIST_HEAD(&ah->rxbuf);
for (i = 0; i < ATH_RXBUF; i++, bf++, ds++, da += sizeof(*ds)) {
bf->desc = ds;
bf->daddr = da;
list_add_tail(&bf->list, &ah->rxbuf);
}
INIT_LIST_HEAD(&ah->txbuf);
ah->txbuf_len = ATH_TXBUF;
for (i = 0; i < ATH_TXBUF; i++, bf++, ds++, da += sizeof(*ds)) {
bf->desc = ds;
bf->daddr = da;
list_add_tail(&bf->list, &ah->txbuf);
}
/* beacon buffers */
INIT_LIST_HEAD(&ah->bcbuf);
for (i = 0; i < ATH_BCBUF; i++, bf++, ds++, da += sizeof(*ds)) {
bf->desc = ds;
bf->daddr = da;
list_add_tail(&bf->list, &ah->bcbuf);
}
return 0;
err_free:
dma_free_coherent(ah->dev, ah->desc_len, ah->desc, ah->desc_daddr);
err:
ah->desc = NULL;
return ret;
}
void
ath5k_txbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf)
{
BUG_ON(!bf);
if (!bf->skb)
return;
dma_unmap_single(ah->dev, bf->skbaddr, bf->skb->len,
DMA_TO_DEVICE);
ieee80211_free_txskb(ah->hw, bf->skb);
bf->skb = NULL;
bf->skbaddr = 0;
bf->desc->ds_data = 0;
}
void
ath5k_rxbuf_free_skb(struct ath5k_hw *ah, struct ath5k_buf *bf)
{
struct ath_common *common = ath5k_hw_common(ah);
BUG_ON(!bf);
if (!bf->skb)
return;
dma_unmap_single(ah->dev, bf->skbaddr, common->rx_bufsize,
DMA_FROM_DEVICE);
dev_kfree_skb_any(bf->skb);
bf->skb = NULL;
bf->skbaddr = 0;
bf->desc->ds_data = 0;
}
static void
ath5k_desc_free(struct ath5k_hw *ah)
{
struct ath5k_buf *bf;
list_for_each_entry(bf, &ah->txbuf, list)
ath5k_txbuf_free_skb(ah, bf);
list_for_each_entry(bf, &ah->rxbuf, list)
ath5k_rxbuf_free_skb(ah, bf);
list_for_each_entry(bf, &ah->bcbuf, list)
ath5k_txbuf_free_skb(ah, bf);
/* Free memory associated with all descriptors */
dma_free_coherent(ah->dev, ah->desc_len, ah->desc, ah->desc_daddr);
ah->desc = NULL;
ah->desc_daddr = 0;
kfree(ah->bufptr);
ah->bufptr = NULL;
}
/**************\
* Queues setup *
\**************/
static struct ath5k_txq *
ath5k_txq_setup(struct ath5k_hw *ah,
int qtype, int subtype)
{
struct ath5k_txq *txq;
struct ath5k_txq_info qi = {
.tqi_subtype = subtype,
/* XXX: default values not correct for B and XR channels,
* but who cares? */
.tqi_aifs = AR5K_TUNE_AIFS,
.tqi_cw_min = AR5K_TUNE_CWMIN,
.tqi_cw_max = AR5K_TUNE_CWMAX
};
int qnum;
/*
* Enable interrupts only for EOL and DESC conditions.
* We mark tx descriptors to receive a DESC interrupt
* when a tx queue gets deep; otherwise we wait for the
* EOL to reap descriptors. Note that this is done to
* reduce interrupt load and this only defers reaping
* descriptors, never transmitting frames. Aside from
* reducing interrupts this also permits more concurrency.
* The only potential downside is if the tx queue backs
* up in which case the top half of the kernel may backup
* due to a lack of tx descriptors.
*/
qi.tqi_flags = AR5K_TXQ_FLAG_TXEOLINT_ENABLE |
AR5K_TXQ_FLAG_TXDESCINT_ENABLE;
qnum = ath5k_hw_setup_tx_queue(ah, qtype, &qi);
if (qnum < 0) {
/*
* NB: don't print a message, this happens
* normally on parts with too few tx queues
*/
return ERR_PTR(qnum);
}
txq = &ah->txqs[qnum];
if (!txq->setup) {
txq->qnum = qnum;
txq->link = NULL;
INIT_LIST_HEAD(&txq->q);
spin_lock_init(&txq->lock);
txq->setup = true;
txq->txq_len = 0;
txq->txq_max = ATH5K_TXQ_LEN_MAX;
txq->txq_poll_mark = false;
txq->txq_stuck = 0;
}
return &ah->txqs[qnum];
}
static int
ath5k_beaconq_setup(struct ath5k_hw *ah)
{
struct ath5k_txq_info qi = {
/* XXX: default values not correct for B and XR channels,
* but who cares? */
.tqi_aifs = AR5K_TUNE_AIFS,
.tqi_cw_min = AR5K_TUNE_CWMIN,
.tqi_cw_max = AR5K_TUNE_CWMAX,
/* NB: for dynamic turbo, don't enable any other interrupts */
.tqi_flags = AR5K_TXQ_FLAG_TXDESCINT_ENABLE
};
return ath5k_hw_setup_tx_queue(ah, AR5K_TX_QUEUE_BEACON, &qi);
}
static int
ath5k_beaconq_config(struct ath5k_hw *ah)
{
struct ath5k_txq_info qi;
int ret;
ret = ath5k_hw_get_tx_queueprops(ah, ah->bhalq, &qi);
if (ret)
goto err;
if (ah->opmode == NL80211_IFTYPE_AP ||
ah->opmode == NL80211_IFTYPE_MESH_POINT) {
/*
* Always burst out beacon and CAB traffic
* (aifs = cwmin = cwmax = 0)
*/
qi.tqi_aifs = 0;
qi.tqi_cw_min = 0;
qi.tqi_cw_max = 0;
} else if (ah->opmode == NL80211_IFTYPE_ADHOC) {
/*
* Adhoc mode; backoff between 0 and (2 * cw_min).
*/
qi.tqi_aifs = 0;
qi.tqi_cw_min = 0;
qi.tqi_cw_max = 2 * AR5K_TUNE_CWMIN;
}
ATH5K_DBG(ah, ATH5K_DEBUG_BEACON,
"beacon queueprops tqi_aifs:%d tqi_cw_min:%d tqi_cw_max:%d\n",
qi.tqi_aifs, qi.tqi_cw_min, qi.tqi_cw_max);
ret = ath5k_hw_set_tx_queueprops(ah, ah->bhalq, &qi);
if (ret) {
ATH5K_ERR(ah, "%s: unable to update parameters for beacon "
"hardware queue!\n", __func__);
goto err;
}
ret = ath5k_hw_reset_tx_queue(ah, ah->bhalq); /* push to h/w */
if (ret)
goto err;
/* reconfigure cabq with ready time to 80% of beacon_interval */
ret = ath5k_hw_get_tx_queueprops(ah, AR5K_TX_QUEUE_ID_CAB, &qi);
if (ret)
goto err;
qi.tqi_ready_time = (ah->bintval * 80) / 100;
ret = ath5k_hw_set_tx_queueprops(ah, AR5K_TX_QUEUE_ID_CAB, &qi);
if (ret)
goto err;
ret = ath5k_hw_reset_tx_queue(ah, AR5K_TX_QUEUE_ID_CAB);
err:
return ret;
}
/**
* ath5k_drain_tx_buffs - Empty tx buffers
*
* @ah: The &struct ath5k_hw
*
* Empty tx buffers from all queues in preparation
* of a reset or during shutdown.
*
* NB: this assumes output has been stopped and
* we do not need to block ath5k_tx_tasklet
*/
static void
ath5k_drain_tx_buffs(struct ath5k_hw *ah)
{
struct ath5k_txq *txq;
struct ath5k_buf *bf, *bf0;
int i;
for (i = 0; i < ARRAY_SIZE(ah->txqs); i++) {
if (ah->txqs[i].setup) {
txq = &ah->txqs[i];
spin_lock_bh(&txq->lock);
list_for_each_entry_safe(bf, bf0, &txq->q, list) {
ath5k_debug_printtxbuf(ah, bf);
ath5k_txbuf_free_skb(ah, bf);
spin_lock(&ah->txbuflock);
list_move_tail(&bf->list, &ah->txbuf);
ah->txbuf_len++;
txq->txq_len--;
spin_unlock(&ah->txbuflock);
}
txq->link = NULL;
txq->txq_poll_mark = false;
spin_unlock_bh(&txq->lock);
}
}
}
static void
ath5k_txq_release(struct ath5k_hw *ah)
{
struct ath5k_txq *txq = ah->txqs;
unsigned int i;
for (i = 0; i < ARRAY_SIZE(ah->txqs); i++, txq++)
if (txq->setup) {
ath5k_hw_release_tx_queue(ah, txq->qnum);
txq->setup = false;
}
}
/*************\
* RX Handling *
\*************/
/*
* Enable the receive h/w following a reset.
*/
static int
ath5k_rx_start(struct ath5k_hw *ah)
{
struct ath_common *common = ath5k_hw_common(ah);
struct ath5k_buf *bf;
int ret;
common->rx_bufsize = roundup(IEEE80211_MAX_FRAME_LEN, common->cachelsz);
ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "cachelsz %u rx_bufsize %u\n",
common->cachelsz, common->rx_bufsize);
spin_lock_bh(&ah->rxbuflock);
ah->rxlink = NULL;
list_for_each_entry(bf, &ah->rxbuf, list) {
ret = ath5k_rxbuf_setup(ah, bf);
if (ret != 0) {
spin_unlock_bh(&ah->rxbuflock);
goto err;
}
}
bf = list_first_entry(&ah->rxbuf, struct ath5k_buf, list);
ath5k_hw_set_rxdp(ah, bf->daddr);
spin_unlock_bh(&ah->rxbuflock);
ath5k_hw_start_rx_dma(ah); /* enable recv descriptors */
ath5k_update_bssid_mask_and_opmode(ah, NULL); /* set filters, etc. */
ath5k_hw_start_rx_pcu(ah); /* re-enable PCU/DMA engine */
return 0;
err:
return ret;
}
/*
* Disable the receive logic on PCU (DRU)
* In preparation for a shutdown.
*
* Note: Doesn't stop rx DMA, ath5k_hw_dma_stop
* does.
*/
static void
ath5k_rx_stop(struct ath5k_hw *ah)
{
ath5k_hw_set_rx_filter(ah, 0); /* clear recv filter */
ath5k_hw_stop_rx_pcu(ah); /* disable PCU */
ath5k_debug_printrxbuffs(ah);
}
static unsigned int
ath5k_rx_decrypted(struct ath5k_hw *ah, struct sk_buff *skb,
struct ath5k_rx_status *rs)
{
struct ath_common *common = ath5k_hw_common(ah);
struct ieee80211_hdr *hdr = (void *)skb->data;
unsigned int keyix, hlen;
if (!(rs->rs_status & AR5K_RXERR_DECRYPT) &&
rs->rs_keyix != AR5K_RXKEYIX_INVALID)
return RX_FLAG_DECRYPTED;
/* Apparently when a default key is used to decrypt the packet
the hw does not set the index used to decrypt. In such cases
get the index from the packet. */
hlen = ieee80211_hdrlen(hdr->frame_control);
if (ieee80211_has_protected(hdr->frame_control) &&
!(rs->rs_status & AR5K_RXERR_DECRYPT) &&
skb->len >= hlen + 4) {
keyix = skb->data[hlen + 3] >> 6;
if (test_bit(keyix, common->keymap))
return RX_FLAG_DECRYPTED;
}
return 0;
}
static void
ath5k_check_ibss_tsf(struct ath5k_hw *ah, struct sk_buff *skb,
struct ieee80211_rx_status *rxs)
{
u64 tsf, bc_tstamp;
u32 hw_tu;
struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
if (le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS) {
/*
* Received an IBSS beacon with the same BSSID. Hardware *must*
* have updated the local TSF. We have to work around various
* hardware bugs, though...
*/
tsf = ath5k_hw_get_tsf64(ah);
bc_tstamp = le64_to_cpu(mgmt->u.beacon.timestamp);
hw_tu = TSF_TO_TU(tsf);
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
"beacon %llx mactime %llx (diff %lld) tsf now %llx\n",
(unsigned long long)bc_tstamp,
(unsigned long long)rxs->mactime,
(unsigned long long)(rxs->mactime - bc_tstamp),
(unsigned long long)tsf);
/*
* Sometimes the HW will give us a wrong tstamp in the rx
* status, causing the timestamp extension to go wrong.
* (This seems to happen especially with beacon frames bigger
* than 78 byte (incl. FCS))
* But we know that the receive timestamp must be later than the
* timestamp of the beacon since HW must have synced to that.
*
* NOTE: here we assume mactime to be after the frame was
* received, not like mac80211 which defines it at the start.
*/
if (bc_tstamp > rxs->mactime) {
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
"fixing mactime from %llx to %llx\n",
(unsigned long long)rxs->mactime,
(unsigned long long)tsf);
rxs->mactime = tsf;
}
/*
* Local TSF might have moved higher than our beacon timers,
* in that case we have to update them to continue sending
* beacons. This also takes care of synchronizing beacon sending
* times with other stations.
*/
if (hw_tu >= ah->nexttbtt)
ath5k_beacon_update_timers(ah, bc_tstamp);
/* Check if the beacon timers are still correct, because a TSF
* update might have created a window between them - for a
* longer description see the comment of this function: */
if (!ath5k_hw_check_beacon_timers(ah, ah->bintval)) {
ath5k_beacon_update_timers(ah, bc_tstamp);
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
"fixed beacon timers after beacon receive\n");
}
}
}
/*
* Compute padding position. skb must contain an IEEE 802.11 frame
*/
static int ath5k_common_padpos(struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
__le16 frame_control = hdr->frame_control;
int padpos = 24;
if (ieee80211_has_a4(frame_control))
padpos += ETH_ALEN;
if (ieee80211_is_data_qos(frame_control))
padpos += IEEE80211_QOS_CTL_LEN;
return padpos;
}
/*
* This function expects an 802.11 frame and returns the number of
* bytes added, or -1 if we don't have enough header room.
*/
static int ath5k_add_padding(struct sk_buff *skb)
{
int padpos = ath5k_common_padpos(skb);
int padsize = padpos & 3;
if (padsize && skb->len > padpos) {
if (skb_headroom(skb) < padsize)
return -1;
skb_push(skb, padsize);
memmove(skb->data, skb->data + padsize, padpos);
return padsize;
}
return 0;
}
/*
* The MAC header is padded to have 32-bit boundary if the
* packet payload is non-zero. The general calculation for
* padsize would take into account odd header lengths:
* padsize = 4 - (hdrlen & 3); however, since only
* even-length headers are used, padding can only be 0 or 2
* bytes and we can optimize this a bit. We must not try to
* remove padding from short control frames that do not have a
* payload.
*
* This function expects an 802.11 frame and returns the number of
* bytes removed.
*/
static int ath5k_remove_padding(struct sk_buff *skb)
{
int padpos = ath5k_common_padpos(skb);
int padsize = padpos & 3;
if (padsize && skb->len >= padpos + padsize) {
memmove(skb->data + padsize, skb->data, padpos);
skb_pull(skb, padsize);
return padsize;
}
return 0;
}
static void
ath5k_receive_frame(struct ath5k_hw *ah, struct sk_buff *skb,
struct ath5k_rx_status *rs)
{
struct ieee80211_rx_status *rxs;
struct ath_common *common = ath5k_hw_common(ah);
ath5k_remove_padding(skb);
rxs = IEEE80211_SKB_RXCB(skb);
rxs->flag = 0;
if (unlikely(rs->rs_status & AR5K_RXERR_MIC))
rxs->flag |= RX_FLAG_MMIC_ERROR;
if (unlikely(rs->rs_status & AR5K_RXERR_CRC))
rxs->flag |= RX_FLAG_FAILED_FCS_CRC;
/*
* always extend the mac timestamp, since this information is
* also needed for proper IBSS merging.
*
* XXX: it might be too late to do it here, since rs_tstamp is
* 15bit only. that means TSF extension has to be done within
* 32768usec (about 32ms). it might be necessary to move this to
* the interrupt handler, like it is done in madwifi.
*/
rxs->mactime = ath5k_extend_tsf(ah, rs->rs_tstamp);
rxs->flag |= RX_FLAG_MACTIME_END;
rxs->freq = ah->curchan->center_freq;
rxs->band = ah->curchan->band;
rxs->signal = ah->ah_noise_floor + rs->rs_rssi;
rxs->antenna = rs->rs_antenna;
if (rs->rs_antenna > 0 && rs->rs_antenna < 5)
ah->stats.antenna_rx[rs->rs_antenna]++;
else
ah->stats.antenna_rx[0]++; /* invalid */
rxs->rate_idx = ath5k_hw_to_driver_rix(ah, rs->rs_rate);
rxs->flag |= ath5k_rx_decrypted(ah, skb, rs);
switch (ah->ah_bwmode) {
case AR5K_BWMODE_5MHZ:
rxs->bw = RATE_INFO_BW_5;
break;
case AR5K_BWMODE_10MHZ:
rxs->bw = RATE_INFO_BW_10;
break;
default:
break;
}
if (rs->rs_rate ==
ah->sbands[ah->curchan->band].bitrates[rxs->rate_idx].hw_value_short)
rxs->enc_flags |= RX_ENC_FLAG_SHORTPRE;
trace_ath5k_rx(ah, skb);
if (ath_is_mybeacon(common, (struct ieee80211_hdr *)skb->data)) {
ewma_beacon_rssi_add(&ah->ah_beacon_rssi_avg, rs->rs_rssi);
/* check beacons in IBSS mode */
if (ah->opmode == NL80211_IFTYPE_ADHOC)
ath5k_check_ibss_tsf(ah, skb, rxs);
}
ieee80211_rx(ah->hw, skb);
}
/** ath5k_frame_receive_ok() - Do we want to receive this frame or not?
*
* Check if we want to further process this frame or not. Also update
* statistics. Return true if we want this frame, false if not.
*/
static bool
ath5k_receive_frame_ok(struct ath5k_hw *ah, struct ath5k_rx_status *rs)
{
ah->stats.rx_all_count++;
ah->stats.rx_bytes_count += rs->rs_datalen;
if (unlikely(rs->rs_status)) {
unsigned int filters;
if (rs->rs_status & AR5K_RXERR_CRC)
ah->stats.rxerr_crc++;
if (rs->rs_status & AR5K_RXERR_FIFO)
ah->stats.rxerr_fifo++;
if (rs->rs_status & AR5K_RXERR_PHY) {
ah->stats.rxerr_phy++;
if (rs->rs_phyerr > 0 && rs->rs_phyerr < 32)
ah->stats.rxerr_phy_code[rs->rs_phyerr]++;
/*
* Treat packets that underwent a CCK or OFDM reset as having a bad CRC.
* These restarts happen when the radio resynchronizes to a stronger frame
* while receiving a weaker frame. Here we receive the prefix of the weak
* frame. Since these are incomplete packets, mark their CRC as invalid.
*/
if (rs->rs_phyerr == AR5K_RX_PHY_ERROR_OFDM_RESTART ||
rs->rs_phyerr == AR5K_RX_PHY_ERROR_CCK_RESTART) {
rs->rs_status |= AR5K_RXERR_CRC;
rs->rs_status &= ~AR5K_RXERR_PHY;
} else {
return false;
}
}
if (rs->rs_status & AR5K_RXERR_DECRYPT) {
/*
* Decrypt error. If the error occurred
* because there was no hardware key, then
* let the frame through so the upper layers
* can process it. This is necessary for 5210
* parts which have no way to setup a ``clear''
* key cache entry.
*
* XXX do key cache faulting
*/
ah->stats.rxerr_decrypt++;
if (rs->rs_keyix == AR5K_RXKEYIX_INVALID &&
!(rs->rs_status & AR5K_RXERR_CRC))
return true;
}
if (rs->rs_status & AR5K_RXERR_MIC) {
ah->stats.rxerr_mic++;
return true;
}
/*
* Reject any frames with non-crypto errors, and take into account the
* current FIF_* filters.
*/
filters = AR5K_RXERR_DECRYPT;
if (ah->fif_filter_flags & FIF_FCSFAIL)
filters |= AR5K_RXERR_CRC;
if (rs->rs_status & ~filters)
return false;
}
if (unlikely(rs->rs_more)) {
ah->stats.rxerr_jumbo++;
return false;
}
return true;
}
static void
ath5k_set_current_imask(struct ath5k_hw *ah)
{
enum ath5k_int imask;
unsigned long flags;
if (test_bit(ATH_STAT_RESET, ah->status))
return;
spin_lock_irqsave(&ah->irqlock, flags);
imask = ah->imask;
if (ah->rx_pending)
imask &= ~AR5K_INT_RX_ALL;
if (ah->tx_pending)
imask &= ~AR5K_INT_TX_ALL;
ath5k_hw_set_imr(ah, imask);
spin_unlock_irqrestore(&ah->irqlock, flags);
}
static void
ath5k_tasklet_rx(struct tasklet_struct *t)
{
struct ath5k_rx_status rs = {};
struct sk_buff *skb, *next_skb;
dma_addr_t next_skb_addr;
struct ath5k_hw *ah = from_tasklet(ah, t, rxtq);
struct ath_common *common = ath5k_hw_common(ah);
struct ath5k_buf *bf;
struct ath5k_desc *ds;
int ret;
spin_lock(&ah->rxbuflock);
if (list_empty(&ah->rxbuf)) {
ATH5K_WARN(ah, "empty rx buf pool\n");
goto unlock;
}
do {
bf = list_first_entry(&ah->rxbuf, struct ath5k_buf, list);
BUG_ON(bf->skb == NULL);
skb = bf->skb;
ds = bf->desc;
/* bail if HW is still using self-linked descriptor */
if (ath5k_hw_get_rxdp(ah) == bf->daddr)
break;
ret = ah->ah_proc_rx_desc(ah, ds, &rs);
if (unlikely(ret == -EINPROGRESS))
break;
else if (unlikely(ret)) {
ATH5K_ERR(ah, "error in processing rx descriptor\n");
ah->stats.rxerr_proc++;
break;
}
if (ath5k_receive_frame_ok(ah, &rs)) {
next_skb = ath5k_rx_skb_alloc(ah, &next_skb_addr);
/*
* If we can't replace bf->skb with a new skb under
* memory pressure, just skip this packet
*/
if (!next_skb)
goto next;
dma_unmap_single(ah->dev, bf->skbaddr,
common->rx_bufsize,
DMA_FROM_DEVICE);
skb_put(skb, rs.rs_datalen);
ath5k_receive_frame(ah, skb, &rs);
bf->skb = next_skb;
bf->skbaddr = next_skb_addr;
}
next:
list_move_tail(&bf->list, &ah->rxbuf);
} while (ath5k_rxbuf_setup(ah, bf) == 0);
unlock:
spin_unlock(&ah->rxbuflock);
ah->rx_pending = false;
ath5k_set_current_imask(ah);
}
/*************\
* TX Handling *
\*************/
void
ath5k_tx_queue(struct ieee80211_hw *hw, struct sk_buff *skb,
struct ath5k_txq *txq, struct ieee80211_tx_control *control)
{
struct ath5k_hw *ah = hw->priv;
struct ath5k_buf *bf;
unsigned long flags;
int padsize;
trace_ath5k_tx(ah, skb, txq);
/*
* The hardware expects the header padded to 4 byte boundaries.
* If this is not the case, we add the padding after the header.
*/
padsize = ath5k_add_padding(skb);
if (padsize < 0) {
ATH5K_ERR(ah, "tx hdrlen not %%4: not enough"
" headroom to pad");
goto drop_packet;
}
if (txq->txq_len >= txq->txq_max &&
txq->qnum <= AR5K_TX_QUEUE_ID_DATA_MAX)
ieee80211_stop_queue(hw, txq->qnum);
spin_lock_irqsave(&ah->txbuflock, flags);
if (list_empty(&ah->txbuf)) {
ATH5K_ERR(ah, "no further txbuf available, dropping packet\n");
spin_unlock_irqrestore(&ah->txbuflock, flags);
ieee80211_stop_queues(hw);
goto drop_packet;
}
bf = list_first_entry(&ah->txbuf, struct ath5k_buf, list);
list_del(&bf->list);
ah->txbuf_len--;
if (list_empty(&ah->txbuf))
ieee80211_stop_queues(hw);
spin_unlock_irqrestore(&ah->txbuflock, flags);
bf->skb = skb;
if (ath5k_txbuf_setup(ah, bf, txq, padsize, control)) {
bf->skb = NULL;
spin_lock_irqsave(&ah->txbuflock, flags);
list_add_tail(&bf->list, &ah->txbuf);
ah->txbuf_len++;
spin_unlock_irqrestore(&ah->txbuflock, flags);
goto drop_packet;
}
return;
drop_packet:
ieee80211_free_txskb(hw, skb);
}
static void
ath5k_tx_frame_completed(struct ath5k_hw *ah, struct sk_buff *skb,
struct ath5k_txq *txq, struct ath5k_tx_status *ts,
struct ath5k_buf *bf)
{
struct ieee80211_tx_info *info;
u8 tries[3];
int i;
int size = 0;
ah->stats.tx_all_count++;
ah->stats.tx_bytes_count += skb->len;
info = IEEE80211_SKB_CB(skb);
size = min_t(int, sizeof(info->status.rates), sizeof(bf->rates));
memcpy(info->status.rates, bf->rates, size);
tries[0] = info->status.rates[0].count;
tries[1] = info->status.rates[1].count;
tries[2] = info->status.rates[2].count;
ieee80211_tx_info_clear_status(info);
for (i = 0; i < ts->ts_final_idx; i++) {
struct ieee80211_tx_rate *r =
&info->status.rates[i];
r->count = tries[i];
}
info->status.rates[ts->ts_final_idx].count = ts->ts_final_retry;
info->status.rates[ts->ts_final_idx + 1].idx = -1;
if (unlikely(ts->ts_status)) {
ah->stats.ack_fail++;
if (ts->ts_status & AR5K_TXERR_FILT) {
info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
ah->stats.txerr_filt++;
}
if (ts->ts_status & AR5K_TXERR_XRETRY)
ah->stats.txerr_retry++;
if (ts->ts_status & AR5K_TXERR_FIFO)
ah->stats.txerr_fifo++;
} else {
info->flags |= IEEE80211_TX_STAT_ACK;
info->status.ack_signal = ts->ts_rssi;
/* count the successful attempt as well */
info->status.rates[ts->ts_final_idx].count++;
}
/*
* Remove MAC header padding before giving the frame
* back to mac80211.
*/
ath5k_remove_padding(skb);
if (ts->ts_antenna > 0 && ts->ts_antenna < 5)
ah->stats.antenna_tx[ts->ts_antenna]++;
else
ah->stats.antenna_tx[0]++; /* invalid */
trace_ath5k_tx_complete(ah, skb, txq, ts);
ieee80211_tx_status(ah->hw, skb);
}
static void
ath5k_tx_processq(struct ath5k_hw *ah, struct ath5k_txq *txq)
{
struct ath5k_tx_status ts = {};
struct ath5k_buf *bf, *bf0;
struct ath5k_desc *ds;
struct sk_buff *skb;
int ret;
spin_lock(&txq->lock);
list_for_each_entry_safe(bf, bf0, &txq->q, list) {
txq->txq_poll_mark = false;
/* skb might already have been processed last time. */
if (bf->skb != NULL) {
ds = bf->desc;
ret = ah->ah_proc_tx_desc(ah, ds, &ts);
if (unlikely(ret == -EINPROGRESS))
break;
else if (unlikely(ret)) {
ATH5K_ERR(ah,
"error %d while processing "
"queue %u\n", ret, txq->qnum);
break;
}
skb = bf->skb;
bf->skb = NULL;
dma_unmap_single(ah->dev, bf->skbaddr, skb->len,
DMA_TO_DEVICE);
ath5k_tx_frame_completed(ah, skb, txq, &ts, bf);
}
/*
* It's possible that the hardware can say the buffer is
* completed when it hasn't yet loaded the ds_link from
* host memory and moved on.
* Always keep the last descriptor to avoid HW races...
*/
if (ath5k_hw_get_txdp(ah, txq->qnum) != bf->daddr) {
spin_lock(&ah->txbuflock);
list_move_tail(&bf->list, &ah->txbuf);
ah->txbuf_len++;
txq->txq_len--;
spin_unlock(&ah->txbuflock);
}
}
spin_unlock(&txq->lock);
if (txq->txq_len < ATH5K_TXQ_LEN_LOW && txq->qnum < 4)
ieee80211_wake_queue(ah->hw, txq->qnum);
}
static void
ath5k_tasklet_tx(struct tasklet_struct *t)
{
int i;
struct ath5k_hw *ah = from_tasklet(ah, t, txtq);
for (i = 0; i < AR5K_NUM_TX_QUEUES; i++)
if (ah->txqs[i].setup && (ah->ah_txq_isr_txok_all & BIT(i)))
ath5k_tx_processq(ah, &ah->txqs[i]);
ah->tx_pending = false;
ath5k_set_current_imask(ah);
}
/*****************\
* Beacon handling *
\*****************/
/*
* Setup the beacon frame for transmit.
*/
static int
ath5k_beacon_setup(struct ath5k_hw *ah, struct ath5k_buf *bf)
{
struct sk_buff *skb = bf->skb;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ath5k_desc *ds;
int ret = 0;
u8 antenna;
u32 flags;
const int padsize = 0;
bf->skbaddr = dma_map_single(ah->dev, skb->data, skb->len,
DMA_TO_DEVICE);
ATH5K_DBG(ah, ATH5K_DEBUG_BEACON, "skb %p [data %p len %u] "
"skbaddr %llx\n", skb, skb->data, skb->len,
(unsigned long long)bf->skbaddr);
if (dma_mapping_error(ah->dev, bf->skbaddr)) {
ATH5K_ERR(ah, "beacon DMA mapping failed\n");
dev_kfree_skb_any(skb);
bf->skb = NULL;
return -EIO;
}
ds = bf->desc;
antenna = ah->ah_tx_ant;
flags = AR5K_TXDESC_NOACK;
if (ah->opmode == NL80211_IFTYPE_ADHOC && ath5k_hw_hasveol(ah)) {
ds->ds_link = bf->daddr; /* self-linked */
flags |= AR5K_TXDESC_VEOL;
} else
ds->ds_link = 0;
/*
* If we use multiple antennas on AP and use
* the Sectored AP scenario, switch antenna every
* 4 beacons to make sure everybody hears our AP.
* When a client tries to associate, hw will keep
* track of the tx antenna to be used for this client
* automatically, based on ACKed packets.
*
* Note: AP still listens and transmits RTS on the
* default antenna which is supposed to be an omni.
*
* Note2: On sectored scenarios it's possible to have
* multiple antennas (1 omni -- the default -- and 14
* sectors), so if we choose to actually support this
* mode, we need to allow the user to set how many antennas
* we have and tweak the code below to send beacons
* on all of them.
*/
if (ah->ah_ant_mode == AR5K_ANTMODE_SECTOR_AP)
antenna = ah->bsent & 4 ? 2 : 1;
/* FIXME: If we are in g mode and rate is a CCK rate
* subtract ah->ah_txpower.txp_cck_ofdm_pwr_delta
* from tx power (value is in dB units already) */
ds->ds_data = bf->skbaddr;
ret = ah->ah_setup_tx_desc(ah, ds, skb->len,
ieee80211_get_hdrlen_from_skb(skb), padsize,
AR5K_PKT_TYPE_BEACON,
(ah->ah_txpower.txp_requested * 2),
ieee80211_get_tx_rate(ah->hw, info)->hw_value,
1, AR5K_TXKEYIX_INVALID,
antenna, flags, 0, 0);
if (ret)
goto err_unmap;
return 0;
err_unmap:
dma_unmap_single(ah->dev, bf->skbaddr, skb->len, DMA_TO_DEVICE);
return ret;
}
/*
* Updates the beacon that is sent by ath5k_beacon_send. For adhoc,
* this is called only once at config_bss time, for AP we do it every
* SWBA interrupt so that the TIM will reflect buffered frames.
*
* Called with the beacon lock.
*/
int
ath5k_beacon_update(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
int ret;
struct ath5k_hw *ah = hw->priv;
struct ath5k_vif *avf;
struct sk_buff *skb;
if (WARN_ON(!vif)) {
ret = -EINVAL;
goto out;
}
skb = ieee80211_beacon_get(hw, vif, 0);
if (!skb) {
ret = -ENOMEM;
goto out;
}
avf = (void *)vif->drv_priv;
ath5k_txbuf_free_skb(ah, avf->bbuf);
avf->bbuf->skb = skb;
ret = ath5k_beacon_setup(ah, avf->bbuf);
out:
return ret;
}
/*
* Transmit a beacon frame at SWBA. Dynamic updates to the
* frame contents are done as needed and the slot time is
* also adjusted based on current state.
*
* This is called from software irq context (beacontq tasklets)
* or user context from ath5k_beacon_config.
*/
static void
ath5k_beacon_send(struct ath5k_hw *ah)
{
struct ieee80211_vif *vif;
struct ath5k_vif *avf;
struct ath5k_buf *bf;
struct sk_buff *skb;
int err;
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON, "in beacon_send\n");
/*
* Check if the previous beacon has gone out. If
* not, don't try to post another: skip this
* period and wait for the next. Missed beacons
* indicate a problem and should not occur. If we
* miss too many consecutive beacons reset the device.
*/
if (unlikely(ath5k_hw_num_tx_pending(ah, ah->bhalq) != 0)) {
ah->bmisscount++;
ATH5K_DBG(ah, ATH5K_DEBUG_BEACON,
"missed %u consecutive beacons\n", ah->bmisscount);
if (ah->bmisscount > 10) { /* NB: 10 is a guess */
ATH5K_DBG(ah, ATH5K_DEBUG_BEACON,
"stuck beacon time (%u missed)\n",
ah->bmisscount);
ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
"stuck beacon, resetting\n");
ieee80211_queue_work(ah->hw, &ah->reset_work);
}
return;
}
if (unlikely(ah->bmisscount != 0)) {
ATH5K_DBG(ah, ATH5K_DEBUG_BEACON,
"resume beacon xmit after %u misses\n",
ah->bmisscount);
ah->bmisscount = 0;
}
if ((ah->opmode == NL80211_IFTYPE_AP && ah->num_ap_vifs +
ah->num_mesh_vifs > 1) ||
ah->opmode == NL80211_IFTYPE_MESH_POINT) {
u64 tsf = ath5k_hw_get_tsf64(ah);
u32 tsftu = TSF_TO_TU(tsf);
int slot = ((tsftu % ah->bintval) * ATH_BCBUF) / ah->bintval;
vif = ah->bslot[(slot + 1) % ATH_BCBUF];
ATH5K_DBG(ah, ATH5K_DEBUG_BEACON,
"tsf %llx tsftu %x intval %u slot %u vif %p\n",
(unsigned long long)tsf, tsftu, ah->bintval, slot, vif);
} else /* only one interface */
vif = ah->bslot[0];
if (!vif)
return;
avf = (void *)vif->drv_priv;
bf = avf->bbuf;
/*
* Stop any current dma and put the new frame on the queue.
* This should never fail since we check above that no frames
* are still pending on the queue.
*/
if (unlikely(ath5k_hw_stop_beacon_queue(ah, ah->bhalq))) {
ATH5K_WARN(ah, "beacon queue %u didn't start/stop ?\n", ah->bhalq);
/* NB: hw still stops DMA, so proceed */
}
/* refresh the beacon for AP or MESH mode */
if (ah->opmode == NL80211_IFTYPE_AP ||
ah->opmode == NL80211_IFTYPE_MESH_POINT) {
err = ath5k_beacon_update(ah->hw, vif);
if (err)
return;
}
if (unlikely(bf->skb == NULL || ah->opmode == NL80211_IFTYPE_STATION ||
ah->opmode == NL80211_IFTYPE_MONITOR)) {
ATH5K_WARN(ah, "bf=%p bf_skb=%p\n", bf, bf->skb);
return;
}
trace_ath5k_tx(ah, bf->skb, &ah->txqs[ah->bhalq]);
ath5k_hw_set_txdp(ah, ah->bhalq, bf->daddr);
ath5k_hw_start_tx_dma(ah, ah->bhalq);
ATH5K_DBG(ah, ATH5K_DEBUG_BEACON, "TXDP[%u] = %llx (%p)\n",
ah->bhalq, (unsigned long long)bf->daddr, bf->desc);
skb = ieee80211_get_buffered_bc(ah->hw, vif);
while (skb) {
ath5k_tx_queue(ah->hw, skb, ah->cabq, NULL);
if (ah->cabq->txq_len >= ah->cabq->txq_max)
break;
skb = ieee80211_get_buffered_bc(ah->hw, vif);
}
ah->bsent++;
}
/**
* ath5k_beacon_update_timers - update beacon timers
*
* @ah: struct ath5k_hw pointer we are operating on
* @bc_tsf: the timestamp of the beacon. 0 to reset the TSF. -1 to perform a
* beacon timer update based on the current HW TSF.
*
* Calculate the next target beacon transmit time (TBTT) based on the timestamp
* of a received beacon or the current local hardware TSF and write it to the
* beacon timer registers.
*
* This is called in a variety of situations, e.g. when a beacon is received,
* when a TSF update has been detected, but also when an new IBSS is created or
* when we otherwise know we have to update the timers, but we keep it in this
* function to have it all together in one place.
*/
void
ath5k_beacon_update_timers(struct ath5k_hw *ah, u64 bc_tsf)
{
u32 nexttbtt, intval, hw_tu, bc_tu;
u64 hw_tsf;
intval = ah->bintval & AR5K_BEACON_PERIOD;
if (ah->opmode == NL80211_IFTYPE_AP && ah->num_ap_vifs
+ ah->num_mesh_vifs > 1) {
intval /= ATH_BCBUF; /* staggered multi-bss beacons */
if (intval < 15)
ATH5K_WARN(ah, "intval %u is too low, min 15\n",
intval);
}
if (WARN_ON(!intval))
return;
/* beacon TSF converted to TU */
bc_tu = TSF_TO_TU(bc_tsf);
/* current TSF converted to TU */
hw_tsf = ath5k_hw_get_tsf64(ah);
hw_tu = TSF_TO_TU(hw_tsf);
#define FUDGE (AR5K_TUNE_SW_BEACON_RESP + 3)
/* We use FUDGE to make sure the next TBTT is ahead of the current TU.
* Since we later subtract AR5K_TUNE_SW_BEACON_RESP (10) in the timer
* configuration we need to make sure it is bigger than that. */
if (bc_tsf == -1) {
/*
* no beacons received, called internally.
* just need to refresh timers based on HW TSF.
*/
nexttbtt = roundup(hw_tu + FUDGE, intval);
} else if (bc_tsf == 0) {
/*
* no beacon received, probably called by ath5k_reset_tsf().
* reset TSF to start with 0.
*/
nexttbtt = intval;
intval |= AR5K_BEACON_RESET_TSF;
} else if (bc_tsf > hw_tsf) {
/*
* beacon received, SW merge happened but HW TSF not yet updated.
* not possible to reconfigure timers yet, but next time we
* receive a beacon with the same BSSID, the hardware will
* automatically update the TSF and then we need to reconfigure
* the timers.
*/
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
"need to wait for HW TSF sync\n");
return;
} else {
/*
* most important case for beacon synchronization between STA.
*
* beacon received and HW TSF has been already updated by HW.
* update next TBTT based on the TSF of the beacon, but make
* sure it is ahead of our local TSF timer.
*/
nexttbtt = bc_tu + roundup(hw_tu + FUDGE - bc_tu, intval);
}
#undef FUDGE
ah->nexttbtt = nexttbtt;
intval |= AR5K_BEACON_ENA;
ath5k_hw_init_beacon_timers(ah, nexttbtt, intval);
/*
* debugging output last in order to preserve the time critical aspect
* of this function
*/
if (bc_tsf == -1)
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
"reconfigured timers based on HW TSF\n");
else if (bc_tsf == 0)
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
"reset HW TSF and timers\n");
else
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
"updated timers based on beacon TSF\n");
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON,
"bc_tsf %llx hw_tsf %llx bc_tu %u hw_tu %u nexttbtt %u\n",
(unsigned long long) bc_tsf,
(unsigned long long) hw_tsf, bc_tu, hw_tu, nexttbtt);
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_BEACON, "intval %u %s %s\n",
intval & AR5K_BEACON_PERIOD,
intval & AR5K_BEACON_ENA ? "AR5K_BEACON_ENA" : "",
intval & AR5K_BEACON_RESET_TSF ? "AR5K_BEACON_RESET_TSF" : "");
}
/**
* ath5k_beacon_config - Configure the beacon queues and interrupts
*
* @ah: struct ath5k_hw pointer we are operating on
*
* In IBSS mode we use a self-linked tx descriptor if possible. We enable SWBA
* interrupts to detect TSF updates only.
*/
void
ath5k_beacon_config(struct ath5k_hw *ah)
{
spin_lock_bh(&ah->block);
ah->bmisscount = 0;
ah->imask &= ~(AR5K_INT_BMISS | AR5K_INT_SWBA);
if (ah->enable_beacon) {
/*
* In IBSS mode we use a self-linked tx descriptor and let the
* hardware send the beacons automatically. We have to load it
* only once here.
* We use the SWBA interrupt only to keep track of the beacon
* timers in order to detect automatic TSF updates.
*/
ath5k_beaconq_config(ah);
ah->imask |= AR5K_INT_SWBA;
if (ah->opmode == NL80211_IFTYPE_ADHOC) {
if (ath5k_hw_hasveol(ah))
ath5k_beacon_send(ah);
} else
ath5k_beacon_update_timers(ah, -1);
} else {
ath5k_hw_stop_beacon_queue(ah, ah->bhalq);
}
ath5k_hw_set_imr(ah, ah->imask);
spin_unlock_bh(&ah->block);
}
static void ath5k_tasklet_beacon(struct tasklet_struct *t)
{
struct ath5k_hw *ah = from_tasklet(ah, t, beacontq);
/*
* Software beacon alert--time to send a beacon.
*
* In IBSS mode we use this interrupt just to
* keep track of the next TBTT (target beacon
* transmission time) in order to detect whether
* automatic TSF updates happened.
*/
if (ah->opmode == NL80211_IFTYPE_ADHOC) {
/* XXX: only if VEOL supported */
u64 tsf = ath5k_hw_get_tsf64(ah);
ah->nexttbtt += ah->bintval;
ATH5K_DBG(ah, ATH5K_DEBUG_BEACON,
"SWBA nexttbtt: %x hw_tu: %x "
"TSF: %llx\n",
ah->nexttbtt,
TSF_TO_TU(tsf),
(unsigned long long) tsf);
} else {
spin_lock(&ah->block);
ath5k_beacon_send(ah);
spin_unlock(&ah->block);
}
}
/********************\
* Interrupt handling *
\********************/
static void
ath5k_intr_calibration_poll(struct ath5k_hw *ah)
{
if (time_is_before_eq_jiffies(ah->ah_cal_next_ani) &&
!(ah->ah_cal_mask & AR5K_CALIBRATION_FULL) &&
!(ah->ah_cal_mask & AR5K_CALIBRATION_SHORT)) {
/* Run ANI only when calibration is not active */
ah->ah_cal_next_ani = jiffies +
msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_ANI);
tasklet_schedule(&ah->ani_tasklet);
} else if (time_is_before_eq_jiffies(ah->ah_cal_next_short) &&
!(ah->ah_cal_mask & AR5K_CALIBRATION_FULL) &&
!(ah->ah_cal_mask & AR5K_CALIBRATION_SHORT)) {
/* Run calibration only when another calibration
* is not running.
*
* Note: This is for both full/short calibration,
* if it's time for a full one, ath5k_calibrate_work will deal
* with it. */
ah->ah_cal_next_short = jiffies +
msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_SHORT);
ieee80211_queue_work(ah->hw, &ah->calib_work);
}
/* we could use SWI to generate enough interrupts to meet our
* calibration interval requirements, if necessary:
* AR5K_REG_ENABLE_BITS(ah, AR5K_CR, AR5K_CR_SWI); */
}
static void
ath5k_schedule_rx(struct ath5k_hw *ah)
{
ah->rx_pending = true;
tasklet_schedule(&ah->rxtq);
}
static void
ath5k_schedule_tx(struct ath5k_hw *ah)
{
ah->tx_pending = true;
tasklet_schedule(&ah->txtq);
}
static irqreturn_t
ath5k_intr(int irq, void *dev_id)
{
struct ath5k_hw *ah = dev_id;
enum ath5k_int status;
unsigned int counter = 1000;
/*
* If hw is not ready (or detached) and we get an
* interrupt, or if we have no interrupts pending
* (that means it's not for us) skip it.
*
* NOTE: Group 0/1 PCI interface registers are not
* supported on WiSOCs, so we can't check for pending
* interrupts (ISR belongs to another register group
* so we are ok).
*/
if (unlikely(test_bit(ATH_STAT_INVALID, ah->status) ||
((ath5k_get_bus_type(ah) != ATH_AHB) &&
!ath5k_hw_is_intr_pending(ah))))
return IRQ_NONE;
/** Main loop **/
do {
ath5k_hw_get_isr(ah, &status); /* NB: clears IRQ too */
ATH5K_DBG(ah, ATH5K_DEBUG_INTR, "status 0x%x/0x%x\n",
status, ah->imask);
/*
* Fatal hw error -> Log and reset
*
* Fatal errors are unrecoverable so we have to
* reset the card. These errors include bus and
* dma errors.
*/
if (unlikely(status & AR5K_INT_FATAL)) {
ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
"fatal int, resetting\n");
ieee80211_queue_work(ah->hw, &ah->reset_work);
/*
* RX Overrun -> Count and reset if needed
*
* Receive buffers are full. Either the bus is busy or
* the CPU is not fast enough to process all received
* frames.
*/
} else if (unlikely(status & AR5K_INT_RXORN)) {
/*
* Older chipsets need a reset to come out of this
* condition, but we treat it as RX for newer chips.
* We don't know exactly which versions need a reset
* this guess is copied from the HAL.
*/
ah->stats.rxorn_intr++;
if (ah->ah_mac_srev < AR5K_SREV_AR5212) {
ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
"rx overrun, resetting\n");
ieee80211_queue_work(ah->hw, &ah->reset_work);
} else
ath5k_schedule_rx(ah);
} else {
/* Software Beacon Alert -> Schedule beacon tasklet */
if (status & AR5K_INT_SWBA)
tasklet_hi_schedule(&ah->beacontq);
/*
* No more RX descriptors -> Just count
*
* NB: the hardware should re-read the link when
* RXE bit is written, but it doesn't work at
* least on older hardware revs.
*/
if (status & AR5K_INT_RXEOL)
ah->stats.rxeol_intr++;
/* TX Underrun -> Bump tx trigger level */
if (status & AR5K_INT_TXURN)
ath5k_hw_update_tx_triglevel(ah, true);
/* RX -> Schedule rx tasklet */
if (status & (AR5K_INT_RXOK | AR5K_INT_RXERR))
ath5k_schedule_rx(ah);
/* TX -> Schedule tx tasklet */
if (status & (AR5K_INT_TXOK
| AR5K_INT_TXDESC
| AR5K_INT_TXERR
| AR5K_INT_TXEOL))
ath5k_schedule_tx(ah);
/* Missed beacon -> TODO
if (status & AR5K_INT_BMISS)
*/
/* MIB event -> Update counters and notify ANI */
if (status & AR5K_INT_MIB) {
ah->stats.mib_intr++;
ath5k_hw_update_mib_counters(ah);
ath5k_ani_mib_intr(ah);
}
/* GPIO -> Notify RFKill layer */
if (status & AR5K_INT_GPIO)
tasklet_schedule(&ah->rf_kill.toggleq);
}
if (ath5k_get_bus_type(ah) == ATH_AHB)
break;
} while (ath5k_hw_is_intr_pending(ah) && --counter > 0);
/*
* Until we handle rx/tx interrupts mask them on IMR
*
* NOTE: ah->(rx/tx)_pending are set when scheduling the tasklets
* and unset after we 've handled the interrupts.
*/
if (ah->rx_pending || ah->tx_pending)
ath5k_set_current_imask(ah);
if (unlikely(!counter))
ATH5K_WARN(ah, "too many interrupts, giving up for now\n");
/* Fire up calibration poll */
ath5k_intr_calibration_poll(ah);
return IRQ_HANDLED;
}
/*
* Periodically recalibrate the PHY to account
* for temperature/environment changes.
*/
static void
ath5k_calibrate_work(struct work_struct *work)
{
struct ath5k_hw *ah = container_of(work, struct ath5k_hw,
calib_work);
/* Should we run a full calibration ? */
if (time_is_before_eq_jiffies(ah->ah_cal_next_full)) {
ah->ah_cal_next_full = jiffies +
msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL);
ah->ah_cal_mask |= AR5K_CALIBRATION_FULL;
ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE,
"running full calibration\n");
if (ath5k_hw_gainf_calibrate(ah) == AR5K_RFGAIN_NEED_CHANGE) {
/*
* Rfgain is out of bounds, reset the chip
* to load new gain values.
*/
ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
"got new rfgain, resetting\n");
ieee80211_queue_work(ah->hw, &ah->reset_work);
}
} else
ah->ah_cal_mask |= AR5K_CALIBRATION_SHORT;
ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE, "channel %u/%x\n",
ieee80211_frequency_to_channel(ah->curchan->center_freq),
ah->curchan->hw_value);
if (ath5k_hw_phy_calibrate(ah, ah->curchan))
ATH5K_ERR(ah, "calibration of channel %u failed\n",
ieee80211_frequency_to_channel(
ah->curchan->center_freq));
/* Clear calibration flags */
if (ah->ah_cal_mask & AR5K_CALIBRATION_FULL)
ah->ah_cal_mask &= ~AR5K_CALIBRATION_FULL;
else if (ah->ah_cal_mask & AR5K_CALIBRATION_SHORT)
ah->ah_cal_mask &= ~AR5K_CALIBRATION_SHORT;
}
static void
ath5k_tasklet_ani(struct tasklet_struct *t)
{
struct ath5k_hw *ah = from_tasklet(ah, t, ani_tasklet);
ah->ah_cal_mask |= AR5K_CALIBRATION_ANI;
ath5k_ani_calibration(ah);
ah->ah_cal_mask &= ~AR5K_CALIBRATION_ANI;
}
static void
ath5k_tx_complete_poll_work(struct work_struct *work)
{
struct ath5k_hw *ah = container_of(work, struct ath5k_hw,
tx_complete_work.work);
struct ath5k_txq *txq;
int i;
bool needreset = false;
if (!test_bit(ATH_STAT_STARTED, ah->status))
return;
mutex_lock(&ah->lock);
for (i = 0; i < ARRAY_SIZE(ah->txqs); i++) {
if (ah->txqs[i].setup) {
txq = &ah->txqs[i];
spin_lock_bh(&txq->lock);
if (txq->txq_len > 1) {
if (txq->txq_poll_mark) {
ATH5K_DBG(ah, ATH5K_DEBUG_XMIT,
"TX queue stuck %d\n",
txq->qnum);
needreset = true;
txq->txq_stuck++;
spin_unlock_bh(&txq->lock);
break;
} else {
txq->txq_poll_mark = true;
}
}
spin_unlock_bh(&txq->lock);
}
}
if (needreset) {
ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
"TX queues stuck, resetting\n");
ath5k_reset(ah, NULL, true);
}
mutex_unlock(&ah->lock);
ieee80211_queue_delayed_work(ah->hw, &ah->tx_complete_work,
msecs_to_jiffies(ATH5K_TX_COMPLETE_POLL_INT));
}
/*************************\
* Initialization routines *
\*************************/
static const struct ieee80211_iface_limit if_limits[] = {
{ .max = 2048, .types = BIT(NL80211_IFTYPE_STATION) },
{ .max = 4, .types =
#ifdef CONFIG_MAC80211_MESH
BIT(NL80211_IFTYPE_MESH_POINT) |
#endif
BIT(NL80211_IFTYPE_AP) },
};
static const struct ieee80211_iface_combination if_comb = {
.limits = if_limits,
.n_limits = ARRAY_SIZE(if_limits),
.max_interfaces = 2048,
.num_different_channels = 1,
};
int
ath5k_init_ah(struct ath5k_hw *ah, const struct ath_bus_ops *bus_ops)
{
struct ieee80211_hw *hw = ah->hw;
struct ath_common *common;
int ret;
int csz;
/* Initialize driver private data */
SET_IEEE80211_DEV(hw, ah->dev);
ieee80211_hw_set(hw, SUPPORTS_RC_TABLE);
ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS);
ieee80211_hw_set(hw, MFP_CAPABLE);
ieee80211_hw_set(hw, SIGNAL_DBM);
ieee80211_hw_set(hw, RX_INCLUDES_FCS);
ieee80211_hw_set(hw, HOST_BROADCAST_PS_BUFFERING);
hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC) |
BIT(NL80211_IFTYPE_MESH_POINT);
hw->wiphy->iface_combinations = &if_comb;
hw->wiphy->n_iface_combinations = 1;
/* SW support for IBSS_RSN is provided by mac80211 */
hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_5_10_MHZ;
/* both antennas can be configured as RX or TX */
hw->wiphy->available_antennas_tx = 0x3;
hw->wiphy->available_antennas_rx = 0x3;
hw->extra_tx_headroom = 2;
wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
/*
* Mark the device as detached to avoid processing
* interrupts until setup is complete.
*/
__set_bit(ATH_STAT_INVALID, ah->status);
ah->opmode = NL80211_IFTYPE_STATION;
ah->bintval = 1000;
mutex_init(&ah->lock);
spin_lock_init(&ah->rxbuflock);
spin_lock_init(&ah->txbuflock);
spin_lock_init(&ah->block);
spin_lock_init(&ah->irqlock);
/* Setup interrupt handler */
ret = request_irq(ah->irq, ath5k_intr, IRQF_SHARED, "ath", ah);
if (ret) {
ATH5K_ERR(ah, "request_irq failed\n");
goto err;
}
common = ath5k_hw_common(ah);
common->ops = &ath5k_common_ops;
common->bus_ops = bus_ops;
common->ah = ah;
common->hw = hw;
common->priv = ah;
common->clockrate = 40;
/*
* Cache line size is used to size and align various
* structures used to communicate with the hardware.
*/
ath5k_read_cachesize(common, &csz);
common->cachelsz = csz << 2; /* convert to bytes */
spin_lock_init(&common->cc_lock);
/* Initialize device */
ret = ath5k_hw_init(ah);
if (ret)
goto err_irq;
/* Set up multi-rate retry capabilities */
if (ah->ah_capabilities.cap_has_mrr_support) {
hw->max_rates = 4;
hw->max_rate_tries = max(AR5K_INIT_RETRY_SHORT,
AR5K_INIT_RETRY_LONG);
}
hw->vif_data_size = sizeof(struct ath5k_vif);
/* Finish private driver data initialization */
ret = ath5k_init(hw);
if (ret)
goto err_ah;
ATH5K_INFO(ah, "Atheros AR%s chip found (MAC: 0x%x, PHY: 0x%x)\n",
ath5k_chip_name(AR5K_VERSION_MAC, ah->ah_mac_srev),
ah->ah_mac_srev,
ah->ah_phy_revision);
if (!ah->ah_single_chip) {
/* Single chip radio (!RF5111) */
if (ah->ah_radio_5ghz_revision &&
!ah->ah_radio_2ghz_revision) {
/* No 5GHz support -> report 2GHz radio */
if (!test_bit(AR5K_MODE_11A,
ah->ah_capabilities.cap_mode)) {
ATH5K_INFO(ah, "RF%s 2GHz radio found (0x%x)\n",
ath5k_chip_name(AR5K_VERSION_RAD,
ah->ah_radio_5ghz_revision),
ah->ah_radio_5ghz_revision);
/* No 2GHz support (5110 and some
* 5GHz only cards) -> report 5GHz radio */
} else if (!test_bit(AR5K_MODE_11B,
ah->ah_capabilities.cap_mode)) {
ATH5K_INFO(ah, "RF%s 5GHz radio found (0x%x)\n",
ath5k_chip_name(AR5K_VERSION_RAD,
ah->ah_radio_5ghz_revision),
ah->ah_radio_5ghz_revision);
/* Multiband radio */
} else {
ATH5K_INFO(ah, "RF%s multiband radio found"
" (0x%x)\n",
ath5k_chip_name(AR5K_VERSION_RAD,
ah->ah_radio_5ghz_revision),
ah->ah_radio_5ghz_revision);
}
}
/* Multi chip radio (RF5111 - RF2111) ->
* report both 2GHz/5GHz radios */
else if (ah->ah_radio_5ghz_revision &&
ah->ah_radio_2ghz_revision) {
ATH5K_INFO(ah, "RF%s 5GHz radio found (0x%x)\n",
ath5k_chip_name(AR5K_VERSION_RAD,
ah->ah_radio_5ghz_revision),
ah->ah_radio_5ghz_revision);
ATH5K_INFO(ah, "RF%s 2GHz radio found (0x%x)\n",
ath5k_chip_name(AR5K_VERSION_RAD,
ah->ah_radio_2ghz_revision),
ah->ah_radio_2ghz_revision);
}
}
ath5k_debug_init_device(ah);
/* ready to process interrupts */
__clear_bit(ATH_STAT_INVALID, ah->status);
return 0;
err_ah:
ath5k_hw_deinit(ah);
err_irq:
free_irq(ah->irq, ah);
err:
return ret;
}
static int
ath5k_stop_locked(struct ath5k_hw *ah)
{
ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "invalid %u\n",
test_bit(ATH_STAT_INVALID, ah->status));
/*
* Shutdown the hardware and driver:
* stop output from above
* disable interrupts
* turn off timers
* turn off the radio
* clear transmit machinery
* clear receive machinery
* drain and release tx queues
* reclaim beacon resources
* power down hardware
*
* Note that some of this work is not possible if the
* hardware is gone (invalid).
*/
ieee80211_stop_queues(ah->hw);
if (!test_bit(ATH_STAT_INVALID, ah->status)) {
ath5k_led_off(ah);
ath5k_hw_set_imr(ah, 0);
synchronize_irq(ah->irq);
ath5k_rx_stop(ah);
ath5k_hw_dma_stop(ah);
ath5k_drain_tx_buffs(ah);
ath5k_hw_phy_disable(ah);
}
return 0;
}
int ath5k_start(struct ieee80211_hw *hw)
{
struct ath5k_hw *ah = hw->priv;
struct ath_common *common = ath5k_hw_common(ah);
int ret, i;
mutex_lock(&ah->lock);
ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "mode %d\n", ah->opmode);
/*
* Stop anything previously setup. This is safe
* no matter this is the first time through or not.
*/
ath5k_stop_locked(ah);
/*
* The basic interface to setting the hardware in a good
* state is ``reset''. On return the hardware is known to
* be powered up and with interrupts disabled. This must
* be followed by initialization of the appropriate bits
* and then setup of the interrupt mask.
*/
ah->curchan = ah->hw->conf.chandef.chan;
ah->imask = AR5K_INT_RXOK
| AR5K_INT_RXERR
| AR5K_INT_RXEOL
| AR5K_INT_RXORN
| AR5K_INT_TXDESC
| AR5K_INT_TXEOL
| AR5K_INT_FATAL
| AR5K_INT_GLOBAL
| AR5K_INT_MIB;
ret = ath5k_reset(ah, NULL, false);
if (ret)
goto done;
if (!ath5k_modparam_no_hw_rfkill_switch)
ath5k_rfkill_hw_start(ah);
/*
* Reset the key cache since some parts do not reset the
* contents on initial power up or resume from suspend.
*/
for (i = 0; i < common->keymax; i++)
ath_hw_keyreset(common, (u16) i);
/* Use higher rates for acks instead of base
* rate */
ah->ah_ack_bitrate_high = true;
for (i = 0; i < ARRAY_SIZE(ah->bslot); i++)
ah->bslot[i] = NULL;
ret = 0;
done:
mutex_unlock(&ah->lock);
set_bit(ATH_STAT_STARTED, ah->status);
ieee80211_queue_delayed_work(ah->hw, &ah->tx_complete_work,
msecs_to_jiffies(ATH5K_TX_COMPLETE_POLL_INT));
return ret;
}
static void ath5k_stop_tasklets(struct ath5k_hw *ah)
{
ah->rx_pending = false;
ah->tx_pending = false;
tasklet_kill(&ah->rxtq);
tasklet_kill(&ah->txtq);
tasklet_kill(&ah->beacontq);
tasklet_kill(&ah->ani_tasklet);
}
/*
* Stop the device, grabbing the top-level lock to protect
* against concurrent entry through ath5k_init (which can happen
* if another thread does a system call and the thread doing the
* stop is preempted).
*/
void ath5k_stop(struct ieee80211_hw *hw)
{
struct ath5k_hw *ah = hw->priv;
int ret;
mutex_lock(&ah->lock);
ret = ath5k_stop_locked(ah);
if (ret == 0 && !test_bit(ATH_STAT_INVALID, ah->status)) {
/*
* Don't set the card in full sleep mode!
*
* a) When the device is in this state it must be carefully
* woken up or references to registers in the PCI clock
* domain may freeze the bus (and system). This varies
* by chip and is mostly an issue with newer parts
* (madwifi sources mentioned srev >= 0x78) that go to
* sleep more quickly.
*
* b) On older chips full sleep results a weird behaviour
* during wakeup. I tested various cards with srev < 0x78
* and they don't wake up after module reload, a second
* module reload is needed to bring the card up again.
*
* Until we figure out what's going on don't enable
* full chip reset on any chip (this is what Legacy HAL
* and Sam's HAL do anyway). Instead Perform a full reset
* on the device (same as initial state after attach) and
* leave it idle (keep MAC/BB on warm reset) */
ret = ath5k_hw_on_hold(ah);
ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
"putting device to sleep\n");
}
mutex_unlock(&ah->lock);
ath5k_stop_tasklets(ah);
clear_bit(ATH_STAT_STARTED, ah->status);
cancel_delayed_work_sync(&ah->tx_complete_work);
if (!ath5k_modparam_no_hw_rfkill_switch)
ath5k_rfkill_hw_stop(ah);
}
/*
* Reset the hardware. If chan is not NULL, then also pause rx/tx
* and change to the given channel.
*
* This should be called with ah->lock.
*/
static int
ath5k_reset(struct ath5k_hw *ah, struct ieee80211_channel *chan,
bool skip_pcu)
{
struct ath_common *common = ath5k_hw_common(ah);
int ret, ani_mode;
bool fast = chan && modparam_fastchanswitch ? 1 : 0;
ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "resetting\n");
__set_bit(ATH_STAT_RESET, ah->status);
ath5k_hw_set_imr(ah, 0);
synchronize_irq(ah->irq);
ath5k_stop_tasklets(ah);
/* Save ani mode and disable ANI during
* reset. If we don't we might get false
* PHY error interrupts. */
ani_mode = ah->ani_state.ani_mode;
ath5k_ani_init(ah, ATH5K_ANI_MODE_OFF);
/* We are going to empty hw queues
* so we should also free any remaining
* tx buffers */
ath5k_drain_tx_buffs(ah);
/* Stop PCU */
ath5k_hw_stop_rx_pcu(ah);
/* Stop DMA
*
* Note: If DMA didn't stop continue
* since only a reset will fix it.
*/
ret = ath5k_hw_dma_stop(ah);
/* RF Bus grant won't work if we have pending
* frames
*/
if (ret && fast) {
ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
"DMA didn't stop, falling back to normal reset\n");
fast = false;
}
if (chan)
ah->curchan = chan;
ret = ath5k_hw_reset(ah, ah->opmode, ah->curchan, fast, skip_pcu);
if (ret) {
ATH5K_ERR(ah, "can't reset hardware (%d)\n", ret);
goto err;
}
ret = ath5k_rx_start(ah);
if (ret) {
ATH5K_ERR(ah, "can't start recv logic\n");
goto err;
}
ath5k_ani_init(ah, ani_mode);
/*
* Set calibration intervals
*
* Note: We don't need to run calibration imediately
* since some initial calibration is done on reset
* even for fast channel switching. Also on scanning
* this will get set again and again and it won't get
* executed unless we connect somewhere and spend some
* time on the channel (that's what calibration needs
* anyway to be accurate).
*/
ah->ah_cal_next_full = jiffies +
msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_FULL);
ah->ah_cal_next_ani = jiffies +
msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_ANI);
ah->ah_cal_next_short = jiffies +
msecs_to_jiffies(ATH5K_TUNE_CALIBRATION_INTERVAL_SHORT);
ewma_beacon_rssi_init(&ah->ah_beacon_rssi_avg);
/* clear survey data and cycle counters */
memset(&ah->survey, 0, sizeof(ah->survey));
spin_lock_bh(&common->cc_lock);
ath_hw_cycle_counters_update(common);
memset(&common->cc_survey, 0, sizeof(common->cc_survey));
memset(&common->cc_ani, 0, sizeof(common->cc_ani));
spin_unlock_bh(&common->cc_lock);
/*
* Change channels and update the h/w rate map if we're switching;
* e.g. 11a to 11b/g.
*
* We may be doing a reset in response to an ioctl that changes the
* channel so update any state that might change as a result.
*
* XXX needed?
*/
/* ath5k_chan_change(ah, c); */
__clear_bit(ATH_STAT_RESET, ah->status);
ath5k_beacon_config(ah);
/* intrs are enabled by ath5k_beacon_config */
ieee80211_wake_queues(ah->hw);
return 0;
err:
return ret;
}
static void ath5k_reset_work(struct work_struct *work)
{
struct ath5k_hw *ah = container_of(work, struct ath5k_hw,
reset_work);
mutex_lock(&ah->lock);
ath5k_reset(ah, NULL, true);
mutex_unlock(&ah->lock);
}
static int
ath5k_init(struct ieee80211_hw *hw)
{
struct ath5k_hw *ah = hw->priv;
struct ath_regulatory *regulatory = ath5k_hw_regulatory(ah);
struct ath5k_txq *txq;
u8 mac[ETH_ALEN] = {};
int ret;
/*
* Collect the channel list. The 802.11 layer
* is responsible for filtering this list based
* on settings like the phy mode and regulatory
* domain restrictions.
*/
ret = ath5k_setup_bands(hw);
if (ret) {
ATH5K_ERR(ah, "can't get channels\n");
goto err;
}
/*
* Allocate tx+rx descriptors and populate the lists.
*/
ret = ath5k_desc_alloc(ah);
if (ret) {
ATH5K_ERR(ah, "can't allocate descriptors\n");
goto err;
}
/*
* Allocate hardware transmit queues: one queue for
* beacon frames and one data queue for each QoS
* priority. Note that hw functions handle resetting
* these queues at the needed time.
*/
ret = ath5k_beaconq_setup(ah);
if (ret < 0) {
ATH5K_ERR(ah, "can't setup a beacon xmit queue\n");
goto err_desc;
}
ah->bhalq = ret;
ah->cabq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_CAB, 0);
if (IS_ERR(ah->cabq)) {
ATH5K_ERR(ah, "can't setup cab queue\n");
ret = PTR_ERR(ah->cabq);
goto err_bhal;
}
/* 5211 and 5212 usually support 10 queues but we better rely on the
* capability information */
if (ah->ah_capabilities.cap_queues.q_tx_num >= 6) {
/* This order matches mac80211's queue priority, so we can
* directly use the mac80211 queue number without any mapping */
txq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_VO);
if (IS_ERR(txq)) {
ATH5K_ERR(ah, "can't setup xmit queue\n");
ret = PTR_ERR(txq);
goto err_queues;
}
txq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_VI);
if (IS_ERR(txq)) {
ATH5K_ERR(ah, "can't setup xmit queue\n");
ret = PTR_ERR(txq);
goto err_queues;
}
txq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BE);
if (IS_ERR(txq)) {
ATH5K_ERR(ah, "can't setup xmit queue\n");
ret = PTR_ERR(txq);
goto err_queues;
}
txq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BK);
if (IS_ERR(txq)) {
ATH5K_ERR(ah, "can't setup xmit queue\n");
ret = PTR_ERR(txq);
goto err_queues;
}
hw->queues = 4;
} else {
/* older hardware (5210) can only support one data queue */
txq = ath5k_txq_setup(ah, AR5K_TX_QUEUE_DATA, AR5K_WME_AC_BE);
if (IS_ERR(txq)) {
ATH5K_ERR(ah, "can't setup xmit queue\n");
ret = PTR_ERR(txq);
goto err_queues;
}
hw->queues = 1;
}
tasklet_setup(&ah->rxtq, ath5k_tasklet_rx);
tasklet_setup(&ah->txtq, ath5k_tasklet_tx);
tasklet_setup(&ah->beacontq, ath5k_tasklet_beacon);
tasklet_setup(&ah->ani_tasklet, ath5k_tasklet_ani);
INIT_WORK(&ah->reset_work, ath5k_reset_work);
INIT_WORK(&ah->calib_work, ath5k_calibrate_work);
INIT_DELAYED_WORK(&ah->tx_complete_work, ath5k_tx_complete_poll_work);
ret = ath5k_hw_common(ah)->bus_ops->eeprom_read_mac(ah, mac);
if (ret) {
ATH5K_ERR(ah, "unable to read address from EEPROM\n");
goto err_queues;
}
SET_IEEE80211_PERM_ADDR(hw, mac);
/* All MAC address bits matter for ACKs */
ath5k_update_bssid_mask_and_opmode(ah, NULL);
regulatory->current_rd = ah->ah_capabilities.cap_eeprom.ee_regdomain;
ret = ath_regd_init(regulatory, hw->wiphy, ath5k_reg_notifier);
if (ret) {
ATH5K_ERR(ah, "can't initialize regulatory system\n");
goto err_queues;
}
ret = ieee80211_register_hw(hw);
if (ret) {
ATH5K_ERR(ah, "can't register ieee80211 hw\n");
goto err_queues;
}
if (!ath_is_world_regd(regulatory))
regulatory_hint(hw->wiphy, regulatory->alpha2);
ath5k_init_leds(ah);
ath5k_sysfs_register(ah);
return 0;
err_queues:
ath5k_txq_release(ah);
err_bhal:
ath5k_hw_release_tx_queue(ah, ah->bhalq);
err_desc:
ath5k_desc_free(ah);
err:
return ret;
}
void
ath5k_deinit_ah(struct ath5k_hw *ah)
{
struct ieee80211_hw *hw = ah->hw;
/*
* NB: the order of these is important:
* o call the 802.11 layer before detaching ath5k_hw to
* ensure callbacks into the driver to delete global
* key cache entries can be handled
* o reclaim the tx queue data structures after calling
* the 802.11 layer as we'll get called back to reclaim
* node state and potentially want to use them
* o to cleanup the tx queues the hal is called, so detach
* it last
* XXX: ??? detach ath5k_hw ???
* Other than that, it's straightforward...
*/
ieee80211_unregister_hw(hw);
ath5k_desc_free(ah);
ath5k_txq_release(ah);
ath5k_hw_release_tx_queue(ah, ah->bhalq);
ath5k_unregister_leds(ah);
ath5k_sysfs_unregister(ah);
/*
* NB: can't reclaim these until after ieee80211_ifdetach
* returns because we'll get called back to reclaim node
* state and potentially want to use them.
*/
ath5k_hw_deinit(ah);
free_irq(ah->irq, ah);
}
bool
ath5k_any_vif_assoc(struct ath5k_hw *ah)
{
struct ath5k_vif_iter_data iter_data;
iter_data.hw_macaddr = NULL;
iter_data.any_assoc = false;
iter_data.need_set_hw_addr = false;
iter_data.found_active = true;
ieee80211_iterate_active_interfaces_atomic(
ah->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
ath5k_vif_iter, &iter_data);
return iter_data.any_assoc;
}
void
ath5k_set_beacon_filter(struct ieee80211_hw *hw, bool enable)
{
struct ath5k_hw *ah = hw->priv;
u32 rfilt;
rfilt = ath5k_hw_get_rx_filter(ah);
if (enable)
rfilt |= AR5K_RX_FILTER_BEACON;
else
rfilt &= ~AR5K_RX_FILTER_BEACON;
ath5k_hw_set_rx_filter(ah, rfilt);
ah->filter_flags = rfilt;
}
void _ath5k_printk(const struct ath5k_hw *ah, const char *level,
const char *fmt, ...)
{
struct va_format vaf;
va_list args;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
if (ah && ah->hw)
printk("%s" pr_fmt("%s: %pV"),
level, wiphy_name(ah->hw->wiphy), &vaf);
else
printk("%s" pr_fmt("%pV"), level, &vaf);
va_end(args);
}
|
linux-master
|
drivers/net/wireless/ath/ath5k/base.c
|
/*
* Copyright (c) 2004-2008 Reyk Floeter <[email protected]>
* Copyright (c) 2006-2008 Nick Kossifidis <[email protected]>
* Copyright (c) 2007-2008 Matthew W. S. Bell <[email protected]>
* Copyright (c) 2007-2008 Luis Rodriguez <[email protected]>
* Copyright (c) 2007-2008 Pavel Roskin <[email protected]>
* Copyright (c) 2007-2008 Jiri Slaby <[email protected]>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
*/
/*********************************\
* Protocol Control Unit Functions *
\*********************************/
#include <asm/unaligned.h>
#include "ath5k.h"
#include "reg.h"
#include "debug.h"
/**
* DOC: Protocol Control Unit (PCU) functions
*
* Protocol control unit is responsible to maintain various protocol
* properties before a frame is send and after a frame is received to/from
* baseband. To be more specific, PCU handles:
*
* - Buffering of RX and TX frames (after QCU/DCUs)
*
* - Encrypting and decrypting (using the built-in engine)
*
* - Generating ACKs, RTS/CTS frames
*
* - Maintaining TSF
*
* - FCS
*
* - Updating beacon data (with TSF etc)
*
* - Generating virtual CCA
*
* - RX/Multicast filtering
*
* - BSSID filtering
*
* - Various statistics
*
* -Different operating modes: AP, STA, IBSS
*
* Note: Most of these functions can be tweaked/bypassed so you can do
* them on sw above for debugging or research. For more infos check out PCU
* registers on reg.h.
*/
/**
* DOC: ACK rates
*
* AR5212+ can use higher rates for ack transmission
* based on current tx rate instead of the base rate.
* It does this to better utilize channel usage.
* There is a mapping between G rates (that cover both
* CCK and OFDM) and ack rates that we use when setting
* rate -> duration table. This mapping is hw-based so
* don't change anything.
*
* To enable this functionality we must set
* ah->ah_ack_bitrate_high to true else base rate is
* used (1Mb for CCK, 6Mb for OFDM).
*/
static const unsigned int ack_rates_high[] =
/* Tx -> ACK */
/* 1Mb -> 1Mb */ { 0,
/* 2MB -> 2Mb */ 1,
/* 5.5Mb -> 2Mb */ 1,
/* 11Mb -> 2Mb */ 1,
/* 6Mb -> 6Mb */ 4,
/* 9Mb -> 6Mb */ 4,
/* 12Mb -> 12Mb */ 6,
/* 18Mb -> 12Mb */ 6,
/* 24Mb -> 24Mb */ 8,
/* 36Mb -> 24Mb */ 8,
/* 48Mb -> 24Mb */ 8,
/* 54Mb -> 24Mb */ 8 };
/*******************\
* Helper functions *
\*******************/
/**
* ath5k_hw_get_frame_duration() - Get tx time of a frame
* @ah: The &struct ath5k_hw
* @band: One of enum nl80211_band
* @len: Frame's length in bytes
* @rate: The @struct ieee80211_rate
* @shortpre: Indicate short preample
*
* Calculate tx duration of a frame given it's rate and length
* It extends ieee80211_generic_frame_duration for non standard
* bwmodes.
*/
int
ath5k_hw_get_frame_duration(struct ath5k_hw *ah, enum nl80211_band band,
int len, struct ieee80211_rate *rate, bool shortpre)
{
int sifs, preamble, plcp_bits, sym_time;
int bitrate, bits, symbols, symbol_bits;
int dur;
/* Fallback */
if (!ah->ah_bwmode) {
__le16 raw_dur = ieee80211_generic_frame_duration(ah->hw,
NULL, band, len, rate);
/* subtract difference between long and short preamble */
dur = le16_to_cpu(raw_dur);
if (shortpre)
dur -= 96;
return dur;
}
bitrate = rate->bitrate;
preamble = AR5K_INIT_OFDM_PREAMPLE_TIME;
plcp_bits = AR5K_INIT_OFDM_PLCP_BITS;
sym_time = AR5K_INIT_OFDM_SYMBOL_TIME;
switch (ah->ah_bwmode) {
case AR5K_BWMODE_40MHZ:
sifs = AR5K_INIT_SIFS_TURBO;
preamble = AR5K_INIT_OFDM_PREAMBLE_TIME_MIN;
break;
case AR5K_BWMODE_10MHZ:
sifs = AR5K_INIT_SIFS_HALF_RATE;
preamble *= 2;
sym_time *= 2;
bitrate = DIV_ROUND_UP(bitrate, 2);
break;
case AR5K_BWMODE_5MHZ:
sifs = AR5K_INIT_SIFS_QUARTER_RATE;
preamble *= 4;
sym_time *= 4;
bitrate = DIV_ROUND_UP(bitrate, 4);
break;
default:
sifs = AR5K_INIT_SIFS_DEFAULT_BG;
break;
}
bits = plcp_bits + (len << 3);
/* Bit rate is in 100Kbits */
symbol_bits = bitrate * sym_time;
symbols = DIV_ROUND_UP(bits * 10, symbol_bits);
dur = sifs + preamble + (sym_time * symbols);
return dur;
}
/**
* ath5k_hw_get_default_slottime() - Get the default slot time for current mode
* @ah: The &struct ath5k_hw
*/
unsigned int
ath5k_hw_get_default_slottime(struct ath5k_hw *ah)
{
struct ieee80211_channel *channel = ah->ah_current_channel;
unsigned int slot_time;
switch (ah->ah_bwmode) {
case AR5K_BWMODE_40MHZ:
slot_time = AR5K_INIT_SLOT_TIME_TURBO;
break;
case AR5K_BWMODE_10MHZ:
slot_time = AR5K_INIT_SLOT_TIME_HALF_RATE;
break;
case AR5K_BWMODE_5MHZ:
slot_time = AR5K_INIT_SLOT_TIME_QUARTER_RATE;
break;
case AR5K_BWMODE_DEFAULT:
default:
slot_time = AR5K_INIT_SLOT_TIME_DEFAULT;
if ((channel->hw_value == AR5K_MODE_11B) && !ah->ah_short_slot)
slot_time = AR5K_INIT_SLOT_TIME_B;
break;
}
return slot_time;
}
/**
* ath5k_hw_get_default_sifs() - Get the default SIFS for current mode
* @ah: The &struct ath5k_hw
*/
unsigned int
ath5k_hw_get_default_sifs(struct ath5k_hw *ah)
{
struct ieee80211_channel *channel = ah->ah_current_channel;
unsigned int sifs;
switch (ah->ah_bwmode) {
case AR5K_BWMODE_40MHZ:
sifs = AR5K_INIT_SIFS_TURBO;
break;
case AR5K_BWMODE_10MHZ:
sifs = AR5K_INIT_SIFS_HALF_RATE;
break;
case AR5K_BWMODE_5MHZ:
sifs = AR5K_INIT_SIFS_QUARTER_RATE;
break;
case AR5K_BWMODE_DEFAULT:
default:
sifs = AR5K_INIT_SIFS_DEFAULT_BG;
if (channel->band == NL80211_BAND_5GHZ)
sifs = AR5K_INIT_SIFS_DEFAULT_A;
break;
}
return sifs;
}
/**
* ath5k_hw_update_mib_counters() - Update MIB counters (mac layer statistics)
* @ah: The &struct ath5k_hw
*
* Reads MIB counters from PCU and updates sw statistics. Is called after a
* MIB interrupt, because one of these counters might have reached their maximum
* and triggered the MIB interrupt, to let us read and clear the counter.
*
* NOTE: Is called in interrupt context!
*/
void
ath5k_hw_update_mib_counters(struct ath5k_hw *ah)
{
struct ath5k_statistics *stats = &ah->stats;
/* Read-And-Clear */
stats->ack_fail += ath5k_hw_reg_read(ah, AR5K_ACK_FAIL);
stats->rts_fail += ath5k_hw_reg_read(ah, AR5K_RTS_FAIL);
stats->rts_ok += ath5k_hw_reg_read(ah, AR5K_RTS_OK);
stats->fcs_error += ath5k_hw_reg_read(ah, AR5K_FCS_FAIL);
stats->beacons += ath5k_hw_reg_read(ah, AR5K_BEACON_CNT);
}
/******************\
* ACK/CTS Timeouts *
\******************/
/**
* ath5k_hw_write_rate_duration() - Fill rate code to duration table
* @ah: The &struct ath5k_hw
*
* Write the rate code to duration table upon hw reset. This is a helper for
* ath5k_hw_pcu_init(). It seems all this is doing is setting an ACK timeout on
* the hardware, based on current mode, for each rate. The rates which are
* capable of short preamble (802.11b rates 2Mbps, 5.5Mbps, and 11Mbps) have
* different rate code so we write their value twice (one for long preamble
* and one for short).
*
* Note: Band doesn't matter here, if we set the values for OFDM it works
* on both a and g modes. So all we have to do is set values for all g rates
* that include all OFDM and CCK rates.
*
*/
static inline void
ath5k_hw_write_rate_duration(struct ath5k_hw *ah)
{
struct ieee80211_rate *rate;
unsigned int i;
/* 802.11g covers both OFDM and CCK */
u8 band = NL80211_BAND_2GHZ;
/* Write rate duration table */
for (i = 0; i < ah->sbands[band].n_bitrates; i++) {
u32 reg;
u16 tx_time;
if (ah->ah_ack_bitrate_high)
rate = &ah->sbands[band].bitrates[ack_rates_high[i]];
/* CCK -> 1Mb */
else if (i < 4)
rate = &ah->sbands[band].bitrates[0];
/* OFDM -> 6Mb */
else
rate = &ah->sbands[band].bitrates[4];
/* Set ACK timeout */
reg = AR5K_RATE_DUR(rate->hw_value);
/* An ACK frame consists of 10 bytes. If you add the FCS,
* which ieee80211_generic_frame_duration() adds,
* its 14 bytes. Note we use the control rate and not the
* actual rate for this rate. See mac80211 tx.c
* ieee80211_duration() for a brief description of
* what rate we should choose to TX ACKs. */
tx_time = ath5k_hw_get_frame_duration(ah, band, 10,
rate, false);
ath5k_hw_reg_write(ah, tx_time, reg);
if (!(rate->flags & IEEE80211_RATE_SHORT_PREAMBLE))
continue;
tx_time = ath5k_hw_get_frame_duration(ah, band, 10, rate, true);
ath5k_hw_reg_write(ah, tx_time,
reg + (AR5K_SET_SHORT_PREAMBLE << 2));
}
}
/**
* ath5k_hw_set_ack_timeout() - Set ACK timeout on PCU
* @ah: The &struct ath5k_hw
* @timeout: Timeout in usec
*/
static int
ath5k_hw_set_ack_timeout(struct ath5k_hw *ah, unsigned int timeout)
{
if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_ACK))
<= timeout)
return -EINVAL;
AR5K_REG_WRITE_BITS(ah, AR5K_TIME_OUT, AR5K_TIME_OUT_ACK,
ath5k_hw_htoclock(ah, timeout));
return 0;
}
/**
* ath5k_hw_set_cts_timeout() - Set CTS timeout on PCU
* @ah: The &struct ath5k_hw
* @timeout: Timeout in usec
*/
static int
ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout)
{
if (ath5k_hw_clocktoh(ah, AR5K_REG_MS(0xffffffff, AR5K_TIME_OUT_CTS))
<= timeout)
return -EINVAL;
AR5K_REG_WRITE_BITS(ah, AR5K_TIME_OUT, AR5K_TIME_OUT_CTS,
ath5k_hw_htoclock(ah, timeout));
return 0;
}
/*******************\
* RX filter Control *
\*******************/
/**
* ath5k_hw_set_lladdr() - Set station id
* @ah: The &struct ath5k_hw
* @mac: The card's mac address (array of octets)
*
* Set station id on hw using the provided mac address
*/
int
ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac)
{
struct ath_common *common = ath5k_hw_common(ah);
u32 low_id, high_id;
u32 pcu_reg;
/* Set new station ID */
memcpy(common->macaddr, mac, ETH_ALEN);
pcu_reg = ath5k_hw_reg_read(ah, AR5K_STA_ID1) & 0xffff0000;
low_id = get_unaligned_le32(mac);
high_id = get_unaligned_le16(mac + 4);
ath5k_hw_reg_write(ah, low_id, AR5K_STA_ID0);
ath5k_hw_reg_write(ah, pcu_reg | high_id, AR5K_STA_ID1);
return 0;
}
/**
* ath5k_hw_set_bssid() - Set current BSSID on hw
* @ah: The &struct ath5k_hw
*
* Sets the current BSSID and BSSID mask we have from the
* common struct into the hardware
*/
void
ath5k_hw_set_bssid(struct ath5k_hw *ah)
{
struct ath_common *common = ath5k_hw_common(ah);
u16 tim_offset = 0;
/*
* Set BSSID mask on 5212
*/
if (ah->ah_version == AR5K_AR5212)
ath_hw_setbssidmask(common);
/*
* Set BSSID
*/
ath5k_hw_reg_write(ah,
get_unaligned_le32(common->curbssid),
AR5K_BSS_ID0);
ath5k_hw_reg_write(ah,
get_unaligned_le16(common->curbssid + 4) |
((common->curaid & 0x3fff) << AR5K_BSS_ID1_AID_S),
AR5K_BSS_ID1);
if (common->curaid == 0) {
ath5k_hw_disable_pspoll(ah);
return;
}
AR5K_REG_WRITE_BITS(ah, AR5K_BEACON, AR5K_BEACON_TIM,
tim_offset ? tim_offset + 4 : 0);
ath5k_hw_enable_pspoll(ah, NULL, 0);
}
/**
* ath5k_hw_set_bssid_mask() - Filter out bssids we listen
* @ah: The &struct ath5k_hw
* @mask: The BSSID mask to set (array of octets)
*
* BSSID masking is a method used by AR5212 and newer hardware to inform PCU
* which bits of the interface's MAC address should be looked at when trying
* to decide which packets to ACK. In station mode and AP mode with a single
* BSS every bit matters since we lock to only one BSS. In AP mode with
* multiple BSSes (virtual interfaces) not every bit matters because hw must
* accept frames for all BSSes and so we tweak some bits of our mac address
* in order to have multiple BSSes.
*
* For more information check out ../hw.c of the common ath module.
*/
void
ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask)
{
struct ath_common *common = ath5k_hw_common(ah);
/* Cache bssid mask so that we can restore it
* on reset */
memcpy(common->bssidmask, mask, ETH_ALEN);
if (ah->ah_version == AR5K_AR5212)
ath_hw_setbssidmask(common);
}
/**
* ath5k_hw_set_mcast_filter() - Set multicast filter
* @ah: The &struct ath5k_hw
* @filter0: Lower 32bits of muticast filter
* @filter1: Higher 16bits of multicast filter
*/
void
ath5k_hw_set_mcast_filter(struct ath5k_hw *ah, u32 filter0, u32 filter1)
{
ath5k_hw_reg_write(ah, filter0, AR5K_MCAST_FILTER0);
ath5k_hw_reg_write(ah, filter1, AR5K_MCAST_FILTER1);
}
/**
* ath5k_hw_get_rx_filter() - Get current rx filter
* @ah: The &struct ath5k_hw
*
* Returns the RX filter by reading rx filter and
* phy error filter registers. RX filter is used
* to set the allowed frame types that PCU will accept
* and pass to the driver. For a list of frame types
* check out reg.h.
*/
u32
ath5k_hw_get_rx_filter(struct ath5k_hw *ah)
{
u32 data, filter = 0;
filter = ath5k_hw_reg_read(ah, AR5K_RX_FILTER);
/*Radar detection for 5212*/
if (ah->ah_version == AR5K_AR5212) {
data = ath5k_hw_reg_read(ah, AR5K_PHY_ERR_FIL);
if (data & AR5K_PHY_ERR_FIL_RADAR)
filter |= AR5K_RX_FILTER_RADARERR;
if (data & (AR5K_PHY_ERR_FIL_OFDM | AR5K_PHY_ERR_FIL_CCK))
filter |= AR5K_RX_FILTER_PHYERR;
}
return filter;
}
/**
* ath5k_hw_set_rx_filter() - Set rx filter
* @ah: The &struct ath5k_hw
* @filter: RX filter mask (see reg.h)
*
* Sets RX filter register and also handles PHY error filter
* register on 5212 and newer chips so that we have proper PHY
* error reporting.
*/
void
ath5k_hw_set_rx_filter(struct ath5k_hw *ah, u32 filter)
{
u32 data = 0;
/* Set PHY error filter register on 5212*/
if (ah->ah_version == AR5K_AR5212) {
if (filter & AR5K_RX_FILTER_RADARERR)
data |= AR5K_PHY_ERR_FIL_RADAR;
if (filter & AR5K_RX_FILTER_PHYERR)
data |= AR5K_PHY_ERR_FIL_OFDM | AR5K_PHY_ERR_FIL_CCK;
}
/*
* The AR5210 uses promiscuous mode to detect radar activity
*/
if (ah->ah_version == AR5K_AR5210 &&
(filter & AR5K_RX_FILTER_RADARERR)) {
filter &= ~AR5K_RX_FILTER_RADARERR;
filter |= AR5K_RX_FILTER_PROM;
}
/*Zero length DMA (phy error reporting) */
if (data)
AR5K_REG_ENABLE_BITS(ah, AR5K_RXCFG, AR5K_RXCFG_ZLFDMA);
else
AR5K_REG_DISABLE_BITS(ah, AR5K_RXCFG, AR5K_RXCFG_ZLFDMA);
/*Write RX Filter register*/
ath5k_hw_reg_write(ah, filter & 0xff, AR5K_RX_FILTER);
/*Write PHY error filter register on 5212*/
if (ah->ah_version == AR5K_AR5212)
ath5k_hw_reg_write(ah, data, AR5K_PHY_ERR_FIL);
}
/****************\
* Beacon control *
\****************/
#define ATH5K_MAX_TSF_READ 10
/**
* ath5k_hw_get_tsf64() - Get the full 64bit TSF
* @ah: The &struct ath5k_hw
*
* Returns the current TSF
*/
u64
ath5k_hw_get_tsf64(struct ath5k_hw *ah)
{
u32 tsf_lower, tsf_upper1, tsf_upper2;
int i;
unsigned long flags;
/* This code is time critical - we don't want to be interrupted here */
local_irq_save(flags);
/*
* While reading TSF upper and then lower part, the clock is still
* counting (or jumping in case of IBSS merge) so we might get
* inconsistent values. To avoid this, we read the upper part again
* and check it has not been changed. We make the hypothesis that a
* maximum of 3 changes can happens in a row (we use 10 as a safe
* value).
*
* Impact on performance is pretty small, since in most cases, only
* 3 register reads are needed.
*/
tsf_upper1 = ath5k_hw_reg_read(ah, AR5K_TSF_U32);
for (i = 0; i < ATH5K_MAX_TSF_READ; i++) {
tsf_lower = ath5k_hw_reg_read(ah, AR5K_TSF_L32);
tsf_upper2 = ath5k_hw_reg_read(ah, AR5K_TSF_U32);
if (tsf_upper2 == tsf_upper1)
break;
tsf_upper1 = tsf_upper2;
}
local_irq_restore(flags);
WARN_ON(i == ATH5K_MAX_TSF_READ);
return ((u64)tsf_upper1 << 32) | tsf_lower;
}
#undef ATH5K_MAX_TSF_READ
/**
* ath5k_hw_set_tsf64() - Set a new 64bit TSF
* @ah: The &struct ath5k_hw
* @tsf64: The new 64bit TSF
*
* Sets the new TSF
*/
void
ath5k_hw_set_tsf64(struct ath5k_hw *ah, u64 tsf64)
{
ath5k_hw_reg_write(ah, tsf64 & 0xffffffff, AR5K_TSF_L32);
ath5k_hw_reg_write(ah, (tsf64 >> 32) & 0xffffffff, AR5K_TSF_U32);
}
/**
* ath5k_hw_reset_tsf() - Force a TSF reset
* @ah: The &struct ath5k_hw
*
* Forces a TSF reset on PCU
*/
void
ath5k_hw_reset_tsf(struct ath5k_hw *ah)
{
u32 val;
val = ath5k_hw_reg_read(ah, AR5K_BEACON) | AR5K_BEACON_RESET_TSF;
/*
* Each write to the RESET_TSF bit toggles a hardware internal
* signal to reset TSF, but if left high it will cause a TSF reset
* on the next chip reset as well. Thus we always write the value
* twice to clear the signal.
*/
ath5k_hw_reg_write(ah, val, AR5K_BEACON);
ath5k_hw_reg_write(ah, val, AR5K_BEACON);
}
/**
* ath5k_hw_init_beacon_timers() - Initialize beacon timers
* @ah: The &struct ath5k_hw
* @next_beacon: Next TBTT
* @interval: Current beacon interval
*
* This function is used to initialize beacon timers based on current
* operation mode and settings.
*/
void
ath5k_hw_init_beacon_timers(struct ath5k_hw *ah, u32 next_beacon, u32 interval)
{
u32 timer1, timer2, timer3;
/*
* Set the additional timers by mode
*/
switch (ah->opmode) {
case NL80211_IFTYPE_MONITOR:
case NL80211_IFTYPE_STATION:
/* In STA mode timer1 is used as next wakeup
* timer and timer2 as next CFP duration start
* timer. Both in 1/8TUs. */
/* TODO: PCF handling */
if (ah->ah_version == AR5K_AR5210) {
timer1 = 0xffffffff;
timer2 = 0xffffffff;
} else {
timer1 = 0x0000ffff;
timer2 = 0x0007ffff;
}
/* Mark associated AP as PCF incapable for now */
AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1, AR5K_STA_ID1_PCF);
break;
case NL80211_IFTYPE_ADHOC:
AR5K_REG_ENABLE_BITS(ah, AR5K_TXCFG, AR5K_TXCFG_ADHOC_BCN_ATIM);
fallthrough;
default:
/* On non-STA modes timer1 is used as next DMA
* beacon alert (DBA) timer and timer2 as next
* software beacon alert. Both in 1/8TUs. */
timer1 = (next_beacon - AR5K_TUNE_DMA_BEACON_RESP) << 3;
timer2 = (next_beacon - AR5K_TUNE_SW_BEACON_RESP) << 3;
break;
}
/* Timer3 marks the end of our ATIM window
* a zero length window is not allowed because
* we 'll get no beacons */
timer3 = next_beacon + 1;
/*
* Set the beacon register and enable all timers.
*/
/* When in AP or Mesh Point mode zero timer0 to start TSF */
if (ah->opmode == NL80211_IFTYPE_AP ||
ah->opmode == NL80211_IFTYPE_MESH_POINT)
ath5k_hw_reg_write(ah, 0, AR5K_TIMER0);
ath5k_hw_reg_write(ah, next_beacon, AR5K_TIMER0);
ath5k_hw_reg_write(ah, timer1, AR5K_TIMER1);
ath5k_hw_reg_write(ah, timer2, AR5K_TIMER2);
ath5k_hw_reg_write(ah, timer3, AR5K_TIMER3);
/* Force a TSF reset if requested and enable beacons */
if (interval & AR5K_BEACON_RESET_TSF)
ath5k_hw_reset_tsf(ah);
ath5k_hw_reg_write(ah, interval & (AR5K_BEACON_PERIOD |
AR5K_BEACON_ENABLE),
AR5K_BEACON);
/* Flush any pending BMISS interrupts on ISR by
* performing a clear-on-write operation on PISR
* register for the BMISS bit (writing a bit on
* ISR toggles a reset for that bit and leaves
* the remaining bits intact) */
if (ah->ah_version == AR5K_AR5210)
ath5k_hw_reg_write(ah, AR5K_ISR_BMISS, AR5K_ISR);
else
ath5k_hw_reg_write(ah, AR5K_ISR_BMISS, AR5K_PISR);
/* TODO: Set enhanced sleep registers on AR5212
* based on vif->bss_conf params, until then
* disable power save reporting.*/
AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1, AR5K_STA_ID1_PWR_SV);
}
/**
* ath5k_check_timer_win() - Check if timer B is timer A + window
* @a: timer a (before b)
* @b: timer b (after a)
* @window: difference between a and b
* @intval: timers are increased by this interval
*
* This helper function checks if timer B is timer A + window and covers
* cases where timer A or B might have already been updated or wrapped
* around (Timers are 16 bit).
*
* Returns true if O.K.
*/
static inline bool
ath5k_check_timer_win(int a, int b, int window, int intval)
{
/*
* 1.) usually B should be A + window
* 2.) A already updated, B not updated yet
* 3.) A already updated and has wrapped around
* 4.) B has wrapped around
*/
if ((b - a == window) || /* 1.) */
(a - b == intval - window) || /* 2.) */
((a | 0x10000) - b == intval - window) || /* 3.) */
((b | 0x10000) - a == window)) /* 4.) */
return true; /* O.K. */
return false;
}
/**
* ath5k_hw_check_beacon_timers() - Check if the beacon timers are correct
* @ah: The &struct ath5k_hw
* @intval: beacon interval
*
* This is a workaround for IBSS mode
*
* The need for this function arises from the fact that we have 4 separate
* HW timer registers (TIMER0 - TIMER3), which are closely related to the
* next beacon target time (NBTT), and that the HW updates these timers
* separately based on the current TSF value. The hardware increments each
* timer by the beacon interval, when the local TSF converted to TU is equal
* to the value stored in the timer.
*
* The reception of a beacon with the same BSSID can update the local HW TSF
* at any time - this is something we can't avoid. If the TSF jumps to a
* time which is later than the time stored in a timer, this timer will not
* be updated until the TSF in TU wraps around at 16 bit (the size of the
* timers) and reaches the time which is stored in the timer.
*
* The problem is that these timers are closely related to TIMER0 (NBTT) and
* that they define a time "window". When the TSF jumps between two timers
* (e.g. ATIM and NBTT), the one in the past will be left behind (not
* updated), while the one in the future will be updated every beacon
* interval. This causes the window to get larger, until the TSF wraps
* around as described above and the timer which was left behind gets
* updated again. But - because the beacon interval is usually not an exact
* divisor of the size of the timers (16 bit), an unwanted "window" between
* these timers has developed!
*
* This is especially important with the ATIM window, because during
* the ATIM window only ATIM frames and no data frames are allowed to be
* sent, which creates transmission pauses after each beacon. This symptom
* has been described as "ramping ping" because ping times increase linearly
* for some time and then drop down again. A wrong window on the DMA beacon
* timer has the same effect, so we check for these two conditions.
*
* Returns true if O.K.
*/
bool
ath5k_hw_check_beacon_timers(struct ath5k_hw *ah, int intval)
{
unsigned int nbtt, atim, dma;
nbtt = ath5k_hw_reg_read(ah, AR5K_TIMER0);
atim = ath5k_hw_reg_read(ah, AR5K_TIMER3);
dma = ath5k_hw_reg_read(ah, AR5K_TIMER1) >> 3;
/* NOTE: SWBA is different. Having a wrong window there does not
* stop us from sending data and this condition is caught by
* other means (SWBA interrupt) */
if (ath5k_check_timer_win(nbtt, atim, 1, intval) &&
ath5k_check_timer_win(dma, nbtt, AR5K_TUNE_DMA_BEACON_RESP,
intval))
return true; /* O.K. */
return false;
}
/**
* ath5k_hw_set_coverage_class() - Set IEEE 802.11 coverage class
* @ah: The &struct ath5k_hw
* @coverage_class: IEEE 802.11 coverage class number
*
* Sets IFS intervals and ACK/CTS timeouts for given coverage class.
*/
void
ath5k_hw_set_coverage_class(struct ath5k_hw *ah, u8 coverage_class)
{
/* As defined by IEEE 802.11-2007 17.3.8.6 */
int slot_time = ath5k_hw_get_default_slottime(ah) + 3 * coverage_class;
int ack_timeout = ath5k_hw_get_default_sifs(ah) + slot_time;
int cts_timeout = ack_timeout;
ath5k_hw_set_ifs_intervals(ah, slot_time);
ath5k_hw_set_ack_timeout(ah, ack_timeout);
ath5k_hw_set_cts_timeout(ah, cts_timeout);
ah->ah_coverage_class = coverage_class;
}
/***************************\
* Init/Start/Stop functions *
\***************************/
/**
* ath5k_hw_start_rx_pcu() - Start RX engine
* @ah: The &struct ath5k_hw
*
* Starts RX engine on PCU so that hw can process RXed frames
* (ACK etc).
*
* NOTE: RX DMA should be already enabled using ath5k_hw_start_rx_dma
*/
void
ath5k_hw_start_rx_pcu(struct ath5k_hw *ah)
{
AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX);
}
/**
* ath5k_hw_stop_rx_pcu() - Stop RX engine
* @ah: The &struct ath5k_hw
*
* Stops RX engine on PCU
*/
void
ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah)
{
AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW, AR5K_DIAG_SW_DIS_RX);
}
/**
* ath5k_hw_set_opmode() - Set PCU operating mode
* @ah: The &struct ath5k_hw
* @op_mode: One of enum nl80211_iftype
*
* Configure PCU for the various operating modes (AP/STA etc)
*/
int
ath5k_hw_set_opmode(struct ath5k_hw *ah, enum nl80211_iftype op_mode)
{
struct ath_common *common = ath5k_hw_common(ah);
u32 pcu_reg, beacon_reg, low_id, high_id;
ATH5K_DBG(ah, ATH5K_DEBUG_MODE, "mode %d\n", op_mode);
/* Preserve rest settings */
pcu_reg = ath5k_hw_reg_read(ah, AR5K_STA_ID1) & 0xffff0000;
pcu_reg &= ~(AR5K_STA_ID1_ADHOC | AR5K_STA_ID1_AP
| AR5K_STA_ID1_KEYSRCH_MODE
| (ah->ah_version == AR5K_AR5210 ?
(AR5K_STA_ID1_PWR_SV | AR5K_STA_ID1_NO_PSPOLL) : 0));
beacon_reg = 0;
switch (op_mode) {
case NL80211_IFTYPE_ADHOC:
pcu_reg |= AR5K_STA_ID1_ADHOC | AR5K_STA_ID1_KEYSRCH_MODE;
beacon_reg |= AR5K_BCR_ADHOC;
if (ah->ah_version == AR5K_AR5210)
pcu_reg |= AR5K_STA_ID1_NO_PSPOLL;
else
AR5K_REG_ENABLE_BITS(ah, AR5K_CFG, AR5K_CFG_IBSS);
break;
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_MESH_POINT:
pcu_reg |= AR5K_STA_ID1_AP | AR5K_STA_ID1_KEYSRCH_MODE;
beacon_reg |= AR5K_BCR_AP;
if (ah->ah_version == AR5K_AR5210)
pcu_reg |= AR5K_STA_ID1_NO_PSPOLL;
else
AR5K_REG_DISABLE_BITS(ah, AR5K_CFG, AR5K_CFG_IBSS);
break;
case NL80211_IFTYPE_STATION:
pcu_reg |= AR5K_STA_ID1_KEYSRCH_MODE
| (ah->ah_version == AR5K_AR5210 ?
AR5K_STA_ID1_PWR_SV : 0);
fallthrough;
case NL80211_IFTYPE_MONITOR:
pcu_reg |= AR5K_STA_ID1_KEYSRCH_MODE
| (ah->ah_version == AR5K_AR5210 ?
AR5K_STA_ID1_NO_PSPOLL : 0);
break;
default:
return -EINVAL;
}
/*
* Set PCU registers
*/
low_id = get_unaligned_le32(common->macaddr);
high_id = get_unaligned_le16(common->macaddr + 4);
ath5k_hw_reg_write(ah, low_id, AR5K_STA_ID0);
ath5k_hw_reg_write(ah, pcu_reg | high_id, AR5K_STA_ID1);
/*
* Set Beacon Control Register on 5210
*/
if (ah->ah_version == AR5K_AR5210)
ath5k_hw_reg_write(ah, beacon_reg, AR5K_BCR);
return 0;
}
/**
* ath5k_hw_pcu_init() - Initialize PCU
* @ah: The &struct ath5k_hw
* @op_mode: One of enum nl80211_iftype
*
* This function is used to initialize PCU by setting current
* operation mode and various other settings.
*/
void
ath5k_hw_pcu_init(struct ath5k_hw *ah, enum nl80211_iftype op_mode)
{
/* Set bssid and bssid mask */
ath5k_hw_set_bssid(ah);
/* Set PCU config */
ath5k_hw_set_opmode(ah, op_mode);
/* Write rate duration table only on AR5212 and if
* virtual interface has already been brought up
* XXX: rethink this after new mode changes to
* mac80211 are integrated */
if (ah->ah_version == AR5K_AR5212 &&
ah->nvifs)
ath5k_hw_write_rate_duration(ah);
/* Set RSSI/BRSSI thresholds
*
* Note: If we decide to set this value
* dynamically, have in mind that when AR5K_RSSI_THR
* register is read it might return 0x40 if we haven't
* wrote anything to it plus BMISS RSSI threshold is zeroed.
* So doing a save/restore procedure here isn't the right
* choice. Instead store it on ath5k_hw */
ath5k_hw_reg_write(ah, (AR5K_TUNE_RSSI_THRES |
AR5K_TUNE_BMISS_THRES <<
AR5K_RSSI_THR_BMISS_S),
AR5K_RSSI_THR);
/* MIC QoS support */
if (ah->ah_mac_srev >= AR5K_SREV_AR2413) {
ath5k_hw_reg_write(ah, 0x000100aa, AR5K_MIC_QOS_CTL);
ath5k_hw_reg_write(ah, 0x00003210, AR5K_MIC_QOS_SEL);
}
/* QoS NOACK Policy */
if (ah->ah_version == AR5K_AR5212) {
ath5k_hw_reg_write(ah,
AR5K_REG_SM(2, AR5K_QOS_NOACK_2BIT_VALUES) |
AR5K_REG_SM(5, AR5K_QOS_NOACK_BIT_OFFSET) |
AR5K_REG_SM(0, AR5K_QOS_NOACK_BYTE_OFFSET),
AR5K_QOS_NOACK);
}
/* Restore slot time and ACK timeouts */
if (ah->ah_coverage_class > 0)
ath5k_hw_set_coverage_class(ah, ah->ah_coverage_class);
/* Set ACK bitrate mode (see ack_rates_high) */
if (ah->ah_version == AR5K_AR5212) {
u32 val = AR5K_STA_ID1_BASE_RATE_11B | AR5K_STA_ID1_ACKCTS_6MB;
if (ah->ah_ack_bitrate_high)
AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1, val);
else
AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1, val);
}
return;
}
|
linux-master
|
drivers/net/wireless/ath/ath5k/pcu.c
|
/*
* Copyright (c) 2004-2008 Reyk Floeter <[email protected]>
* Copyright (c) 2006-2008 Nick Kossifidis <[email protected]>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
*/
/********************************************\
Queue Control Unit, DCF Control Unit Functions
\********************************************/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include "ath5k.h"
#include "reg.h"
#include "debug.h"
#include <linux/log2.h>
/**
* DOC: Queue Control Unit (QCU)/DCF Control Unit (DCU) functions
*
* Here we setup parameters for the 12 available TX queues. Note that
* on the various registers we can usually only map the first 10 of them so
* basically we have 10 queues to play with. Each queue has a matching
* QCU that controls when the queue will get triggered and multiple QCUs
* can be mapped to a single DCU that controls the various DFS parameters
* for the various queues. In our setup we have a 1:1 mapping between QCUs
* and DCUs allowing us to have different DFS settings for each queue.
*
* When a frame goes into a TX queue, QCU decides when it'll trigger a
* transmission based on various criteria (such as how many data we have inside
* it's buffer or -if it's a beacon queue- if it's time to fire up the queue
* based on TSF etc), DCU adds backoff, IFSes etc and then a scheduler
* (arbitrator) decides the priority of each QCU based on it's configuration
* (e.g. beacons are always transmitted when they leave DCU bypassing all other
* frames from other queues waiting to be transmitted). After a frame leaves
* the DCU it goes to PCU for further processing and then to PHY for
* the actual transmission.
*/
/******************\
* Helper functions *
\******************/
/**
* ath5k_hw_num_tx_pending() - Get number of pending frames for a given queue
* @ah: The &struct ath5k_hw
* @queue: One of enum ath5k_tx_queue_id
*/
u32
ath5k_hw_num_tx_pending(struct ath5k_hw *ah, unsigned int queue)
{
u32 pending;
AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
/* Return if queue is declared inactive */
if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
return false;
/* XXX: How about AR5K_CFG_TXCNT ? */
if (ah->ah_version == AR5K_AR5210)
return false;
pending = ath5k_hw_reg_read(ah, AR5K_QUEUE_STATUS(queue));
pending &= AR5K_QCU_STS_FRMPENDCNT;
/* It's possible to have no frames pending even if TXE
* is set. To indicate that q has not stopped return
* true */
if (!pending && AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue))
return true;
return pending;
}
/**
* ath5k_hw_release_tx_queue() - Set a transmit queue inactive
* @ah: The &struct ath5k_hw
* @queue: One of enum ath5k_tx_queue_id
*/
void
ath5k_hw_release_tx_queue(struct ath5k_hw *ah, unsigned int queue)
{
if (WARN_ON(queue >= ah->ah_capabilities.cap_queues.q_tx_num))
return;
/* This queue will be skipped in further operations */
ah->ah_txq[queue].tqi_type = AR5K_TX_QUEUE_INACTIVE;
/*For SIMR setup*/
AR5K_Q_DISABLE_BITS(ah->ah_txq_status, queue);
}
/**
* ath5k_cw_validate() - Make sure the given cw is valid
* @cw_req: The contention window value to check
*
* Make sure cw is a power of 2 minus 1 and smaller than 1024
*/
static u16
ath5k_cw_validate(u16 cw_req)
{
cw_req = min(cw_req, (u16)1023);
/* Check if cw_req + 1 a power of 2 */
if (is_power_of_2(cw_req + 1))
return cw_req;
/* Check if cw_req is a power of 2 */
if (is_power_of_2(cw_req))
return cw_req - 1;
/* If none of the above is correct
* find the closest power of 2 */
cw_req = (u16) roundup_pow_of_two(cw_req) - 1;
return cw_req;
}
/**
* ath5k_hw_get_tx_queueprops() - Get properties for a transmit queue
* @ah: The &struct ath5k_hw
* @queue: One of enum ath5k_tx_queue_id
* @queue_info: The &struct ath5k_txq_info to fill
*/
int
ath5k_hw_get_tx_queueprops(struct ath5k_hw *ah, int queue,
struct ath5k_txq_info *queue_info)
{
memcpy(queue_info, &ah->ah_txq[queue], sizeof(struct ath5k_txq_info));
return 0;
}
/**
* ath5k_hw_set_tx_queueprops() - Set properties for a transmit queue
* @ah: The &struct ath5k_hw
* @queue: One of enum ath5k_tx_queue_id
* @qinfo: The &struct ath5k_txq_info to use
*
* Returns 0 on success or -EIO if queue is inactive
*/
int
ath5k_hw_set_tx_queueprops(struct ath5k_hw *ah, int queue,
const struct ath5k_txq_info *qinfo)
{
struct ath5k_txq_info *qi;
AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
qi = &ah->ah_txq[queue];
if (qi->tqi_type == AR5K_TX_QUEUE_INACTIVE)
return -EIO;
/* copy and validate values */
qi->tqi_type = qinfo->tqi_type;
qi->tqi_subtype = qinfo->tqi_subtype;
qi->tqi_flags = qinfo->tqi_flags;
/*
* According to the docs: Although the AIFS field is 8 bit wide,
* the maximum supported value is 0xFC. Setting it higher than that
* will cause the DCU to hang.
*/
qi->tqi_aifs = min(qinfo->tqi_aifs, (u8)0xFC);
qi->tqi_cw_min = ath5k_cw_validate(qinfo->tqi_cw_min);
qi->tqi_cw_max = ath5k_cw_validate(qinfo->tqi_cw_max);
qi->tqi_cbr_period = qinfo->tqi_cbr_period;
qi->tqi_cbr_overflow_limit = qinfo->tqi_cbr_overflow_limit;
qi->tqi_burst_time = qinfo->tqi_burst_time;
qi->tqi_ready_time = qinfo->tqi_ready_time;
/*XXX: Is this supported on 5210 ?*/
/*XXX: Is this correct for AR5K_WME_AC_VI,VO ???*/
if ((qinfo->tqi_type == AR5K_TX_QUEUE_DATA &&
((qinfo->tqi_subtype == AR5K_WME_AC_VI) ||
(qinfo->tqi_subtype == AR5K_WME_AC_VO))) ||
qinfo->tqi_type == AR5K_TX_QUEUE_UAPSD)
qi->tqi_flags |= AR5K_TXQ_FLAG_POST_FR_BKOFF_DIS;
return 0;
}
/**
* ath5k_hw_setup_tx_queue() - Initialize a transmit queue
* @ah: The &struct ath5k_hw
* @queue_type: One of enum ath5k_tx_queue
* @queue_info: The &struct ath5k_txq_info to use
*
* Returns 0 on success, -EINVAL on invalid arguments
*/
int
ath5k_hw_setup_tx_queue(struct ath5k_hw *ah, enum ath5k_tx_queue queue_type,
struct ath5k_txq_info *queue_info)
{
unsigned int queue;
int ret;
/*
* Get queue by type
*/
/* 5210 only has 2 queues */
if (ah->ah_capabilities.cap_queues.q_tx_num == 2) {
switch (queue_type) {
case AR5K_TX_QUEUE_DATA:
queue = AR5K_TX_QUEUE_ID_NOQCU_DATA;
break;
case AR5K_TX_QUEUE_BEACON:
case AR5K_TX_QUEUE_CAB:
queue = AR5K_TX_QUEUE_ID_NOQCU_BEACON;
break;
default:
return -EINVAL;
}
} else {
switch (queue_type) {
case AR5K_TX_QUEUE_DATA:
queue = queue_info->tqi_subtype;
break;
case AR5K_TX_QUEUE_UAPSD:
queue = AR5K_TX_QUEUE_ID_UAPSD;
break;
case AR5K_TX_QUEUE_BEACON:
queue = AR5K_TX_QUEUE_ID_BEACON;
break;
case AR5K_TX_QUEUE_CAB:
queue = AR5K_TX_QUEUE_ID_CAB;
break;
default:
return -EINVAL;
}
}
/*
* Setup internal queue structure
*/
memset(&ah->ah_txq[queue], 0, sizeof(struct ath5k_txq_info));
ah->ah_txq[queue].tqi_type = queue_type;
if (queue_info != NULL) {
queue_info->tqi_type = queue_type;
ret = ath5k_hw_set_tx_queueprops(ah, queue, queue_info);
if (ret)
return ret;
}
/*
* We use ah_txq_status to hold a temp value for
* the Secondary interrupt mask registers on 5211+
* check out ath5k_hw_reset_tx_queue
*/
AR5K_Q_ENABLE_BITS(ah->ah_txq_status, queue);
return queue;
}
/*******************************\
* Single QCU/DCU initialization *
\*******************************/
/**
* ath5k_hw_set_tx_retry_limits() - Set tx retry limits on DCU
* @ah: The &struct ath5k_hw
* @queue: One of enum ath5k_tx_queue_id
*
* This function is used when initializing a queue, to set
* retry limits based on ah->ah_retry_* and the chipset used.
*/
void
ath5k_hw_set_tx_retry_limits(struct ath5k_hw *ah,
unsigned int queue)
{
/* Single data queue on AR5210 */
if (ah->ah_version == AR5K_AR5210) {
struct ath5k_txq_info *tq = &ah->ah_txq[queue];
if (queue > 0)
return;
ath5k_hw_reg_write(ah,
(tq->tqi_cw_min << AR5K_NODCU_RETRY_LMT_CW_MIN_S)
| AR5K_REG_SM(ah->ah_retry_long,
AR5K_NODCU_RETRY_LMT_SLG_RETRY)
| AR5K_REG_SM(ah->ah_retry_short,
AR5K_NODCU_RETRY_LMT_SSH_RETRY)
| AR5K_REG_SM(ah->ah_retry_long,
AR5K_NODCU_RETRY_LMT_LG_RETRY)
| AR5K_REG_SM(ah->ah_retry_short,
AR5K_NODCU_RETRY_LMT_SH_RETRY),
AR5K_NODCU_RETRY_LMT);
/* DCU on AR5211+ */
} else {
ath5k_hw_reg_write(ah,
AR5K_REG_SM(ah->ah_retry_long,
AR5K_DCU_RETRY_LMT_RTS)
| AR5K_REG_SM(ah->ah_retry_long,
AR5K_DCU_RETRY_LMT_STA_RTS)
| AR5K_REG_SM(max(ah->ah_retry_long, ah->ah_retry_short),
AR5K_DCU_RETRY_LMT_STA_DATA),
AR5K_QUEUE_DFS_RETRY_LIMIT(queue));
}
}
/**
* ath5k_hw_reset_tx_queue() - Initialize a single hw queue
* @ah: The &struct ath5k_hw
* @queue: One of enum ath5k_tx_queue_id
*
* Set DCF properties for the given transmit queue on DCU
* and configures all queue-specific parameters.
*/
int
ath5k_hw_reset_tx_queue(struct ath5k_hw *ah, unsigned int queue)
{
struct ath5k_txq_info *tq = &ah->ah_txq[queue];
AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
/* Skip if queue inactive or if we are on AR5210
* that doesn't have QCU/DCU */
if ((ah->ah_version == AR5K_AR5210) ||
(tq->tqi_type == AR5K_TX_QUEUE_INACTIVE))
return 0;
/*
* Set contention window (cw_min/cw_max)
* and arbitrated interframe space (aifs)...
*/
ath5k_hw_reg_write(ah,
AR5K_REG_SM(tq->tqi_cw_min, AR5K_DCU_LCL_IFS_CW_MIN) |
AR5K_REG_SM(tq->tqi_cw_max, AR5K_DCU_LCL_IFS_CW_MAX) |
AR5K_REG_SM(tq->tqi_aifs, AR5K_DCU_LCL_IFS_AIFS),
AR5K_QUEUE_DFS_LOCAL_IFS(queue));
/*
* Set tx retry limits for this queue
*/
ath5k_hw_set_tx_retry_limits(ah, queue);
/*
* Set misc registers
*/
/* Enable DCU to wait for next fragment from QCU */
AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
AR5K_DCU_MISC_FRAG_WAIT);
/* On Maui and Spirit use the global seqnum on DCU */
if (ah->ah_mac_version < AR5K_SREV_AR5211)
AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
AR5K_DCU_MISC_SEQNUM_CTL);
/* Constant bit rate period */
if (tq->tqi_cbr_period) {
ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_cbr_period,
AR5K_QCU_CBRCFG_INTVAL) |
AR5K_REG_SM(tq->tqi_cbr_overflow_limit,
AR5K_QCU_CBRCFG_ORN_THRES),
AR5K_QUEUE_CBRCFG(queue));
AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
AR5K_QCU_MISC_FRSHED_CBR);
if (tq->tqi_cbr_overflow_limit)
AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
AR5K_QCU_MISC_CBR_THRES_ENABLE);
}
/* Ready time interval */
if (tq->tqi_ready_time && (tq->tqi_type != AR5K_TX_QUEUE_CAB))
ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_ready_time,
AR5K_QCU_RDYTIMECFG_INTVAL) |
AR5K_QCU_RDYTIMECFG_ENABLE,
AR5K_QUEUE_RDYTIMECFG(queue));
if (tq->tqi_burst_time) {
ath5k_hw_reg_write(ah, AR5K_REG_SM(tq->tqi_burst_time,
AR5K_DCU_CHAN_TIME_DUR) |
AR5K_DCU_CHAN_TIME_ENABLE,
AR5K_QUEUE_DFS_CHANNEL_TIME(queue));
if (tq->tqi_flags & AR5K_TXQ_FLAG_RDYTIME_EXP_POLICY_ENABLE)
AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
AR5K_QCU_MISC_RDY_VEOL_POLICY);
}
/* Enable/disable Post frame backoff */
if (tq->tqi_flags & AR5K_TXQ_FLAG_BACKOFF_DISABLE)
ath5k_hw_reg_write(ah, AR5K_DCU_MISC_POST_FR_BKOFF_DIS,
AR5K_QUEUE_DFS_MISC(queue));
/* Enable/disable fragmentation burst backoff */
if (tq->tqi_flags & AR5K_TXQ_FLAG_FRAG_BURST_BACKOFF_ENABLE)
ath5k_hw_reg_write(ah, AR5K_DCU_MISC_BACKOFF_FRAG,
AR5K_QUEUE_DFS_MISC(queue));
/*
* Set registers by queue type
*/
switch (tq->tqi_type) {
case AR5K_TX_QUEUE_BEACON:
AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
AR5K_QCU_MISC_FRSHED_DBA_GT |
AR5K_QCU_MISC_CBREXP_BCN_DIS |
AR5K_QCU_MISC_BCN_ENABLE);
AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
(AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL <<
AR5K_DCU_MISC_ARBLOCK_CTL_S) |
AR5K_DCU_MISC_ARBLOCK_IGNORE |
AR5K_DCU_MISC_POST_FR_BKOFF_DIS |
AR5K_DCU_MISC_BCN_ENABLE);
break;
case AR5K_TX_QUEUE_CAB:
/* XXX: use BCN_SENT_GT, if we can figure out how */
AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
AR5K_QCU_MISC_FRSHED_DBA_GT |
AR5K_QCU_MISC_CBREXP_DIS |
AR5K_QCU_MISC_CBREXP_BCN_DIS);
ath5k_hw_reg_write(ah, ((tq->tqi_ready_time -
(AR5K_TUNE_SW_BEACON_RESP -
AR5K_TUNE_DMA_BEACON_RESP) -
AR5K_TUNE_ADDITIONAL_SWBA_BACKOFF) * 1024) |
AR5K_QCU_RDYTIMECFG_ENABLE,
AR5K_QUEUE_RDYTIMECFG(queue));
AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_DFS_MISC(queue),
(AR5K_DCU_MISC_ARBLOCK_CTL_GLOBAL <<
AR5K_DCU_MISC_ARBLOCK_CTL_S));
break;
case AR5K_TX_QUEUE_UAPSD:
AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
AR5K_QCU_MISC_CBREXP_DIS);
break;
case AR5K_TX_QUEUE_DATA:
default:
break;
}
/* TODO: Handle frame compression */
/*
* Enable interrupts for this tx queue
* in the secondary interrupt mask registers
*/
if (tq->tqi_flags & AR5K_TXQ_FLAG_TXOKINT_ENABLE)
AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txok, queue);
if (tq->tqi_flags & AR5K_TXQ_FLAG_TXERRINT_ENABLE)
AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txerr, queue);
if (tq->tqi_flags & AR5K_TXQ_FLAG_TXURNINT_ENABLE)
AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txurn, queue);
if (tq->tqi_flags & AR5K_TXQ_FLAG_TXDESCINT_ENABLE)
AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txdesc, queue);
if (tq->tqi_flags & AR5K_TXQ_FLAG_TXEOLINT_ENABLE)
AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_txeol, queue);
if (tq->tqi_flags & AR5K_TXQ_FLAG_CBRORNINT_ENABLE)
AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_cbrorn, queue);
if (tq->tqi_flags & AR5K_TXQ_FLAG_CBRURNINT_ENABLE)
AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_cbrurn, queue);
if (tq->tqi_flags & AR5K_TXQ_FLAG_QTRIGINT_ENABLE)
AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_qtrig, queue);
if (tq->tqi_flags & AR5K_TXQ_FLAG_TXNOFRMINT_ENABLE)
AR5K_Q_ENABLE_BITS(ah->ah_txq_imr_nofrm, queue);
/* Update secondary interrupt mask registers */
/* Filter out inactive queues */
ah->ah_txq_imr_txok &= ah->ah_txq_status;
ah->ah_txq_imr_txerr &= ah->ah_txq_status;
ah->ah_txq_imr_txurn &= ah->ah_txq_status;
ah->ah_txq_imr_txdesc &= ah->ah_txq_status;
ah->ah_txq_imr_txeol &= ah->ah_txq_status;
ah->ah_txq_imr_cbrorn &= ah->ah_txq_status;
ah->ah_txq_imr_cbrurn &= ah->ah_txq_status;
ah->ah_txq_imr_qtrig &= ah->ah_txq_status;
ah->ah_txq_imr_nofrm &= ah->ah_txq_status;
ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txok,
AR5K_SIMR0_QCU_TXOK) |
AR5K_REG_SM(ah->ah_txq_imr_txdesc,
AR5K_SIMR0_QCU_TXDESC),
AR5K_SIMR0);
ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_txerr,
AR5K_SIMR1_QCU_TXERR) |
AR5K_REG_SM(ah->ah_txq_imr_txeol,
AR5K_SIMR1_QCU_TXEOL),
AR5K_SIMR1);
/* Update SIMR2 but don't overwrite rest simr2 settings */
AR5K_REG_DISABLE_BITS(ah, AR5K_SIMR2, AR5K_SIMR2_QCU_TXURN);
AR5K_REG_ENABLE_BITS(ah, AR5K_SIMR2,
AR5K_REG_SM(ah->ah_txq_imr_txurn,
AR5K_SIMR2_QCU_TXURN));
ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_cbrorn,
AR5K_SIMR3_QCBRORN) |
AR5K_REG_SM(ah->ah_txq_imr_cbrurn,
AR5K_SIMR3_QCBRURN),
AR5K_SIMR3);
ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_qtrig,
AR5K_SIMR4_QTRIG), AR5K_SIMR4);
/* Set TXNOFRM_QCU for the queues with TXNOFRM enabled */
ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txq_imr_nofrm,
AR5K_TXNOFRM_QCU), AR5K_TXNOFRM);
/* No queue has TXNOFRM enabled, disable the interrupt
* by setting AR5K_TXNOFRM to zero */
if (ah->ah_txq_imr_nofrm == 0)
ath5k_hw_reg_write(ah, 0, AR5K_TXNOFRM);
/* Set QCU mask for this DCU to save power */
AR5K_REG_WRITE_Q(ah, AR5K_QUEUE_QCUMASK(queue), queue);
return 0;
}
/**************************\
* Global QCU/DCU functions *
\**************************/
/**
* ath5k_hw_set_ifs_intervals() - Set global inter-frame spaces on DCU
* @ah: The &struct ath5k_hw
* @slot_time: Slot time in us
*
* Sets the global IFS intervals on DCU (also works on AR5210) for
* the given slot time and the current bwmode.
*/
int ath5k_hw_set_ifs_intervals(struct ath5k_hw *ah, unsigned int slot_time)
{
struct ieee80211_channel *channel = ah->ah_current_channel;
enum nl80211_band band;
struct ieee80211_supported_band *sband;
struct ieee80211_rate *rate;
u32 ack_tx_time, eifs, eifs_clock, sifs, sifs_clock;
u32 slot_time_clock = ath5k_hw_htoclock(ah, slot_time);
u32 rate_flags, i;
if (slot_time < 6 || slot_time_clock > AR5K_SLOT_TIME_MAX)
return -EINVAL;
sifs = ath5k_hw_get_default_sifs(ah);
sifs_clock = ath5k_hw_htoclock(ah, sifs - 2);
/* EIFS
* Txtime of ack at lowest rate + SIFS + DIFS
* (DIFS = SIFS + 2 * Slot time)
*
* Note: HAL has some predefined values for EIFS
* Turbo: (37 + 2 * 6)
* Default: (74 + 2 * 9)
* Half: (149 + 2 * 13)
* Quarter: (298 + 2 * 21)
*
* (74 + 2 * 6) for AR5210 default and turbo !
*
* According to the formula we have
* ack_tx_time = 25 for turbo and
* ack_tx_time = 42.5 * clock multiplier
* for default/half/quarter.
*
* This can't be right, 42 is what we would get
* from ath5k_hw_get_frame_dur_for_bwmode or
* ieee80211_generic_frame_duration for zero frame
* length and without SIFS !
*
* Also we have different lowest rate for 802.11a
*/
if (channel->band == NL80211_BAND_5GHZ)
band = NL80211_BAND_5GHZ;
else
band = NL80211_BAND_2GHZ;
switch (ah->ah_bwmode) {
case AR5K_BWMODE_5MHZ:
rate_flags = IEEE80211_RATE_SUPPORTS_5MHZ;
break;
case AR5K_BWMODE_10MHZ:
rate_flags = IEEE80211_RATE_SUPPORTS_10MHZ;
break;
default:
rate_flags = 0;
break;
}
sband = &ah->sbands[band];
rate = NULL;
for (i = 0; i < sband->n_bitrates; i++) {
if ((rate_flags & sband->bitrates[i].flags) != rate_flags)
continue;
rate = &sband->bitrates[i];
break;
}
if (WARN_ON(!rate))
return -EINVAL;
ack_tx_time = ath5k_hw_get_frame_duration(ah, band, 10, rate, false);
/* ack_tx_time includes an SIFS already */
eifs = ack_tx_time + sifs + 2 * slot_time;
eifs_clock = ath5k_hw_htoclock(ah, eifs);
/* Set IFS settings on AR5210 */
if (ah->ah_version == AR5K_AR5210) {
u32 pifs, pifs_clock, difs, difs_clock;
/* Set slot time */
ath5k_hw_reg_write(ah, slot_time_clock, AR5K_SLOT_TIME);
/* Set EIFS */
eifs_clock = AR5K_REG_SM(eifs_clock, AR5K_IFS1_EIFS);
/* PIFS = Slot time + SIFS */
pifs = slot_time + sifs;
pifs_clock = ath5k_hw_htoclock(ah, pifs);
pifs_clock = AR5K_REG_SM(pifs_clock, AR5K_IFS1_PIFS);
/* DIFS = SIFS + 2 * Slot time */
difs = sifs + 2 * slot_time;
difs_clock = ath5k_hw_htoclock(ah, difs);
/* Set SIFS/DIFS */
ath5k_hw_reg_write(ah, (difs_clock <<
AR5K_IFS0_DIFS_S) | sifs_clock,
AR5K_IFS0);
/* Set PIFS/EIFS and preserve AR5K_INIT_CARR_SENSE_EN */
ath5k_hw_reg_write(ah, pifs_clock | eifs_clock |
(AR5K_INIT_CARR_SENSE_EN << AR5K_IFS1_CS_EN_S),
AR5K_IFS1);
return 0;
}
/* Set IFS slot time */
ath5k_hw_reg_write(ah, slot_time_clock, AR5K_DCU_GBL_IFS_SLOT);
/* Set EIFS interval */
ath5k_hw_reg_write(ah, eifs_clock, AR5K_DCU_GBL_IFS_EIFS);
/* Set SIFS interval in usecs */
AR5K_REG_WRITE_BITS(ah, AR5K_DCU_GBL_IFS_MISC,
AR5K_DCU_GBL_IFS_MISC_SIFS_DUR_USEC,
sifs);
/* Set SIFS interval in clock cycles */
ath5k_hw_reg_write(ah, sifs_clock, AR5K_DCU_GBL_IFS_SIFS);
return 0;
}
/**
* ath5k_hw_init_queues() - Initialize tx queues
* @ah: The &struct ath5k_hw
*
* Initializes all tx queues based on information on
* ah->ah_txq* set by the driver
*/
int
ath5k_hw_init_queues(struct ath5k_hw *ah)
{
int i, ret;
/* TODO: HW Compression support for data queues */
/* TODO: Burst prefetch for data queues */
/*
* Reset queues and start beacon timers at the end of the reset routine
* This also sets QCU mask on each DCU for 1:1 qcu to dcu mapping
* Note: If we want we can assign multiple qcus on one dcu.
*/
if (ah->ah_version != AR5K_AR5210)
for (i = 0; i < ah->ah_capabilities.cap_queues.q_tx_num; i++) {
ret = ath5k_hw_reset_tx_queue(ah, i);
if (ret) {
ATH5K_ERR(ah,
"failed to reset TX queue #%d\n", i);
return ret;
}
}
else
/* No QCU/DCU on AR5210, just set tx
* retry limits. We set IFS parameters
* on ath5k_hw_set_ifs_intervals */
ath5k_hw_set_tx_retry_limits(ah, 0);
/* Set the turbo flag when operating on 40MHz */
if (ah->ah_bwmode == AR5K_BWMODE_40MHZ)
AR5K_REG_ENABLE_BITS(ah, AR5K_DCU_GBL_IFS_MISC,
AR5K_DCU_GBL_IFS_MISC_TURBO_MODE);
/* If we didn't set IFS timings through
* ath5k_hw_set_coverage_class make sure
* we set them here */
if (!ah->ah_coverage_class) {
unsigned int slot_time = ath5k_hw_get_default_slottime(ah);
ath5k_hw_set_ifs_intervals(ah, slot_time);
}
return 0;
}
|
linux-master
|
drivers/net/wireless/ath/ath5k/qcu.c
|
/*
* Copyright (c) 2004-2008 Reyk Floeter <[email protected]>
* Copyright (c) 2006-2008 Nick Kossifidis <[email protected]>
* Copyright (c) 2007-2008 Luis Rodriguez <[email protected]>
* Copyright (c) 2007-2008 Pavel Roskin <[email protected]>
* Copyright (c) 2007-2008 Jiri Slaby <[email protected]>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
*/
/****************************\
Reset function and helpers
\****************************/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <asm/unaligned.h>
#include <linux/pci.h> /* To determine if a card is pci-e */
#include <linux/log2.h>
#include <linux/platform_device.h>
#include "ath5k.h"
#include "reg.h"
#include "debug.h"
/**
* DOC: Reset function and helpers
*
* Here we implement the main reset routine, used to bring the card
* to a working state and ready to receive. We also handle routines
* that don't fit on other places such as clock, sleep and power control
*/
/******************\
* Helper functions *
\******************/
/**
* ath5k_hw_register_timeout() - Poll a register for a flag/field change
* @ah: The &struct ath5k_hw
* @reg: The register to read
* @flag: The flag/field to check on the register
* @val: The field value we expect (if we check a field)
* @is_set: Instead of checking if the flag got cleared, check if it got set
*
* Some registers contain flags that indicate that an operation is
* running. We use this function to poll these registers and check
* if these flags get cleared. We also use it to poll a register
* field (containing multiple flags) until it gets a specific value.
*
* Returns -EAGAIN if we exceeded AR5K_TUNE_REGISTER_TIMEOUT * 15us or 0
*/
int
ath5k_hw_register_timeout(struct ath5k_hw *ah, u32 reg, u32 flag, u32 val,
bool is_set)
{
int i;
u32 data;
for (i = AR5K_TUNE_REGISTER_TIMEOUT; i > 0; i--) {
data = ath5k_hw_reg_read(ah, reg);
if (is_set && (data & flag))
break;
else if ((data & flag) == val)
break;
udelay(15);
}
return (i <= 0) ? -EAGAIN : 0;
}
/*************************\
* Clock related functions *
\*************************/
/**
* ath5k_hw_htoclock() - Translate usec to hw clock units
* @ah: The &struct ath5k_hw
* @usec: value in microseconds
*
* Translate usecs to hw clock units based on the current
* hw clock rate.
*
* Returns number of clock units
*/
unsigned int
ath5k_hw_htoclock(struct ath5k_hw *ah, unsigned int usec)
{
struct ath_common *common = ath5k_hw_common(ah);
return usec * common->clockrate;
}
/**
* ath5k_hw_clocktoh() - Translate hw clock units to usec
* @ah: The &struct ath5k_hw
* @clock: value in hw clock units
*
* Translate hw clock units to usecs based on the current
* hw clock rate.
*
* Returns number of usecs
*/
unsigned int
ath5k_hw_clocktoh(struct ath5k_hw *ah, unsigned int clock)
{
struct ath_common *common = ath5k_hw_common(ah);
return clock / common->clockrate;
}
/**
* ath5k_hw_init_core_clock() - Initialize core clock
* @ah: The &struct ath5k_hw
*
* Initialize core clock parameters (usec, usec32, latencies etc),
* based on current bwmode and chipset properties.
*/
static void
ath5k_hw_init_core_clock(struct ath5k_hw *ah)
{
struct ieee80211_channel *channel = ah->ah_current_channel;
struct ath_common *common = ath5k_hw_common(ah);
u32 usec_reg, txlat, rxlat, usec, clock, sclock, txf2txs;
/*
* Set core clock frequency
*/
switch (channel->hw_value) {
case AR5K_MODE_11A:
clock = 40;
break;
case AR5K_MODE_11B:
clock = 22;
break;
case AR5K_MODE_11G:
default:
clock = 44;
break;
}
/* Use clock multiplier for non-default
* bwmode */
switch (ah->ah_bwmode) {
case AR5K_BWMODE_40MHZ:
clock *= 2;
break;
case AR5K_BWMODE_10MHZ:
clock /= 2;
break;
case AR5K_BWMODE_5MHZ:
clock /= 4;
break;
default:
break;
}
common->clockrate = clock;
/*
* Set USEC parameters
*/
/* Set USEC counter on PCU*/
usec = clock - 1;
usec = AR5K_REG_SM(usec, AR5K_USEC_1);
/* Set usec duration on DCU */
if (ah->ah_version != AR5K_AR5210)
AR5K_REG_WRITE_BITS(ah, AR5K_DCU_GBL_IFS_MISC,
AR5K_DCU_GBL_IFS_MISC_USEC_DUR,
clock);
/* Set 32MHz USEC counter */
if ((ah->ah_radio == AR5K_RF5112) ||
(ah->ah_radio == AR5K_RF2413) ||
(ah->ah_radio == AR5K_RF5413) ||
(ah->ah_radio == AR5K_RF2316) ||
(ah->ah_radio == AR5K_RF2317))
/* Remain on 40MHz clock ? */
sclock = 40 - 1;
else
sclock = 32 - 1;
sclock = AR5K_REG_SM(sclock, AR5K_USEC_32);
/*
* Set tx/rx latencies
*/
usec_reg = ath5k_hw_reg_read(ah, AR5K_USEC_5211);
txlat = AR5K_REG_MS(usec_reg, AR5K_USEC_TX_LATENCY_5211);
rxlat = AR5K_REG_MS(usec_reg, AR5K_USEC_RX_LATENCY_5211);
/*
* Set default Tx frame to Tx data start delay
*/
txf2txs = AR5K_INIT_TXF2TXD_START_DEFAULT;
/*
* 5210 initvals don't include usec settings
* so we need to use magic values here for
* tx/rx latencies
*/
if (ah->ah_version == AR5K_AR5210) {
/* same for turbo */
txlat = AR5K_INIT_TX_LATENCY_5210;
rxlat = AR5K_INIT_RX_LATENCY_5210;
}
if (ah->ah_mac_srev < AR5K_SREV_AR5211) {
/* 5311 has different tx/rx latency masks
* from 5211, since we deal 5311 the same
* as 5211 when setting initvals, shift
* values here to their proper locations
*
* Note: Initvals indicate tx/rx/ latencies
* are the same for turbo mode */
txlat = AR5K_REG_SM(txlat, AR5K_USEC_TX_LATENCY_5210);
rxlat = AR5K_REG_SM(rxlat, AR5K_USEC_RX_LATENCY_5210);
} else
switch (ah->ah_bwmode) {
case AR5K_BWMODE_10MHZ:
txlat = AR5K_REG_SM(txlat * 2,
AR5K_USEC_TX_LATENCY_5211);
rxlat = AR5K_REG_SM(AR5K_INIT_RX_LAT_MAX,
AR5K_USEC_RX_LATENCY_5211);
txf2txs = AR5K_INIT_TXF2TXD_START_DELAY_10MHZ;
break;
case AR5K_BWMODE_5MHZ:
txlat = AR5K_REG_SM(txlat * 4,
AR5K_USEC_TX_LATENCY_5211);
rxlat = AR5K_REG_SM(AR5K_INIT_RX_LAT_MAX,
AR5K_USEC_RX_LATENCY_5211);
txf2txs = AR5K_INIT_TXF2TXD_START_DELAY_5MHZ;
break;
case AR5K_BWMODE_40MHZ:
txlat = AR5K_INIT_TX_LAT_MIN;
rxlat = AR5K_REG_SM(rxlat / 2,
AR5K_USEC_RX_LATENCY_5211);
txf2txs = AR5K_INIT_TXF2TXD_START_DEFAULT;
break;
default:
break;
}
usec_reg = (usec | sclock | txlat | rxlat);
ath5k_hw_reg_write(ah, usec_reg, AR5K_USEC);
/* On 5112 set tx frame to tx data start delay */
if (ah->ah_radio == AR5K_RF5112) {
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_RF_CTL2,
AR5K_PHY_RF_CTL2_TXF2TXD_START,
txf2txs);
}
}
/**
* ath5k_hw_set_sleep_clock() - Setup sleep clock operation
* @ah: The &struct ath5k_hw
* @enable: Enable sleep clock operation (false to disable)
*
* If there is an external 32KHz crystal available, use it
* as ref. clock instead of 32/40MHz clock and baseband clocks
* to save power during sleep or restore normal 32/40MHz
* operation.
*
* NOTE: When operating on 32KHz certain PHY registers (27 - 31,
* 123 - 127) require delay on access.
*/
static void
ath5k_hw_set_sleep_clock(struct ath5k_hw *ah, bool enable)
{
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
u32 scal, spending, sclock;
/* Only set 32KHz settings if we have an external
* 32KHz crystal present */
if ((AR5K_EEPROM_HAS32KHZCRYSTAL(ee->ee_misc1) ||
AR5K_EEPROM_HAS32KHZCRYSTAL_OLD(ee->ee_misc1)) &&
enable) {
/* 1 usec/cycle */
AR5K_REG_WRITE_BITS(ah, AR5K_USEC_5211, AR5K_USEC_32, 1);
/* Set up tsf increment on each cycle */
AR5K_REG_WRITE_BITS(ah, AR5K_TSF_PARM, AR5K_TSF_PARM_INC, 61);
/* Set baseband sleep control registers
* and sleep control rate */
ath5k_hw_reg_write(ah, 0x1f, AR5K_PHY_SCR);
if ((ah->ah_radio == AR5K_RF5112) ||
(ah->ah_radio == AR5K_RF5413) ||
(ah->ah_radio == AR5K_RF2316) ||
(ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4)))
spending = 0x14;
else
spending = 0x18;
ath5k_hw_reg_write(ah, spending, AR5K_PHY_SPENDING);
if ((ah->ah_radio == AR5K_RF5112) ||
(ah->ah_radio == AR5K_RF5413) ||
(ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4))) {
ath5k_hw_reg_write(ah, 0x26, AR5K_PHY_SLMT);
ath5k_hw_reg_write(ah, 0x0d, AR5K_PHY_SCAL);
ath5k_hw_reg_write(ah, 0x07, AR5K_PHY_SCLOCK);
ath5k_hw_reg_write(ah, 0x3f, AR5K_PHY_SDELAY);
AR5K_REG_WRITE_BITS(ah, AR5K_PCICFG,
AR5K_PCICFG_SLEEP_CLOCK_RATE, 0x02);
} else {
ath5k_hw_reg_write(ah, 0x0a, AR5K_PHY_SLMT);
ath5k_hw_reg_write(ah, 0x0c, AR5K_PHY_SCAL);
ath5k_hw_reg_write(ah, 0x03, AR5K_PHY_SCLOCK);
ath5k_hw_reg_write(ah, 0x20, AR5K_PHY_SDELAY);
AR5K_REG_WRITE_BITS(ah, AR5K_PCICFG,
AR5K_PCICFG_SLEEP_CLOCK_RATE, 0x03);
}
/* Enable sleep clock operation */
AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG,
AR5K_PCICFG_SLEEP_CLOCK_EN);
} else {
/* Disable sleep clock operation and
* restore default parameters */
AR5K_REG_DISABLE_BITS(ah, AR5K_PCICFG,
AR5K_PCICFG_SLEEP_CLOCK_EN);
AR5K_REG_WRITE_BITS(ah, AR5K_PCICFG,
AR5K_PCICFG_SLEEP_CLOCK_RATE, 0);
/* Set DAC/ADC delays */
ath5k_hw_reg_write(ah, 0x1f, AR5K_PHY_SCR);
ath5k_hw_reg_write(ah, AR5K_PHY_SLMT_32MHZ, AR5K_PHY_SLMT);
if (ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4))
scal = AR5K_PHY_SCAL_32MHZ_2417;
else if (ee->ee_is_hb63)
scal = AR5K_PHY_SCAL_32MHZ_HB63;
else
scal = AR5K_PHY_SCAL_32MHZ;
ath5k_hw_reg_write(ah, scal, AR5K_PHY_SCAL);
ath5k_hw_reg_write(ah, AR5K_PHY_SCLOCK_32MHZ, AR5K_PHY_SCLOCK);
ath5k_hw_reg_write(ah, AR5K_PHY_SDELAY_32MHZ, AR5K_PHY_SDELAY);
if ((ah->ah_radio == AR5K_RF5112) ||
(ah->ah_radio == AR5K_RF5413) ||
(ah->ah_radio == AR5K_RF2316) ||
(ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4)))
spending = 0x14;
else
spending = 0x18;
ath5k_hw_reg_write(ah, spending, AR5K_PHY_SPENDING);
/* Set up tsf increment on each cycle */
AR5K_REG_WRITE_BITS(ah, AR5K_TSF_PARM, AR5K_TSF_PARM_INC, 1);
if ((ah->ah_radio == AR5K_RF5112) ||
(ah->ah_radio == AR5K_RF5413) ||
(ah->ah_radio == AR5K_RF2316) ||
(ah->ah_radio == AR5K_RF2317))
sclock = 40 - 1;
else
sclock = 32 - 1;
AR5K_REG_WRITE_BITS(ah, AR5K_USEC_5211, AR5K_USEC_32, sclock);
}
}
/*********************\
* Reset/Sleep control *
\*********************/
/**
* ath5k_hw_nic_reset() - Reset the various chipset units
* @ah: The &struct ath5k_hw
* @val: Mask to indicate what units to reset
*
* To reset the various chipset units we need to write
* the mask to AR5K_RESET_CTL and poll the register until
* all flags are cleared.
*
* Returns 0 if we are O.K. or -EAGAIN (from athk5_hw_register_timeout)
*/
static int
ath5k_hw_nic_reset(struct ath5k_hw *ah, u32 val)
{
int ret;
u32 mask = val ? val : ~0U;
/* Read-and-clear RX Descriptor Pointer*/
ath5k_hw_reg_read(ah, AR5K_RXDP);
/*
* Reset the device and wait until success
*/
ath5k_hw_reg_write(ah, val, AR5K_RESET_CTL);
/* Wait at least 128 PCI clocks */
usleep_range(15, 20);
if (ah->ah_version == AR5K_AR5210) {
val &= AR5K_RESET_CTL_PCU | AR5K_RESET_CTL_DMA
| AR5K_RESET_CTL_MAC | AR5K_RESET_CTL_PHY;
mask &= AR5K_RESET_CTL_PCU | AR5K_RESET_CTL_DMA
| AR5K_RESET_CTL_MAC | AR5K_RESET_CTL_PHY;
} else {
val &= AR5K_RESET_CTL_PCU | AR5K_RESET_CTL_BASEBAND;
mask &= AR5K_RESET_CTL_PCU | AR5K_RESET_CTL_BASEBAND;
}
ret = ath5k_hw_register_timeout(ah, AR5K_RESET_CTL, mask, val, false);
/*
* Reset configuration register (for hw byte-swap). Note that this
* is only set for big endian. We do the necessary magic in
* AR5K_INIT_CFG.
*/
if ((val & AR5K_RESET_CTL_PCU) == 0)
ath5k_hw_reg_write(ah, AR5K_INIT_CFG, AR5K_CFG);
return ret;
}
/**
* ath5k_hw_wisoc_reset() - Reset AHB chipset
* @ah: The &struct ath5k_hw
* @flags: Mask to indicate what units to reset
*
* Same as ath5k_hw_nic_reset but for AHB based devices
*
* Returns 0 if we are O.K. or -EAGAIN (from athk5_hw_register_timeout)
*/
static int
ath5k_hw_wisoc_reset(struct ath5k_hw *ah, u32 flags)
{
u32 mask = flags ? flags : ~0U;
u32 __iomem *reg;
u32 regval;
u32 val = 0;
/* ah->ah_mac_srev is not available at this point yet */
if (ah->devid >= AR5K_SREV_AR2315_R6) {
reg = (u32 __iomem *) AR5K_AR2315_RESET;
if (mask & AR5K_RESET_CTL_PCU)
val |= AR5K_AR2315_RESET_WMAC;
if (mask & AR5K_RESET_CTL_BASEBAND)
val |= AR5K_AR2315_RESET_BB_WARM;
} else {
reg = (u32 __iomem *) AR5K_AR5312_RESET;
if (to_platform_device(ah->dev)->id == 0) {
if (mask & AR5K_RESET_CTL_PCU)
val |= AR5K_AR5312_RESET_WMAC0;
if (mask & AR5K_RESET_CTL_BASEBAND)
val |= AR5K_AR5312_RESET_BB0_COLD |
AR5K_AR5312_RESET_BB0_WARM;
} else {
if (mask & AR5K_RESET_CTL_PCU)
val |= AR5K_AR5312_RESET_WMAC1;
if (mask & AR5K_RESET_CTL_BASEBAND)
val |= AR5K_AR5312_RESET_BB1_COLD |
AR5K_AR5312_RESET_BB1_WARM;
}
}
/* Put BB/MAC into reset */
regval = ioread32(reg);
iowrite32(regval | val, reg);
regval = ioread32(reg);
udelay(100); /* NB: should be atomic */
/* Bring BB/MAC out of reset */
iowrite32(regval & ~val, reg);
regval = ioread32(reg);
/*
* Reset configuration register (for hw byte-swap). Note that this
* is only set for big endian. We do the necessary magic in
* AR5K_INIT_CFG.
*/
if ((flags & AR5K_RESET_CTL_PCU) == 0)
ath5k_hw_reg_write(ah, AR5K_INIT_CFG, AR5K_CFG);
return 0;
}
/**
* ath5k_hw_set_power_mode() - Set power mode
* @ah: The &struct ath5k_hw
* @mode: One of enum ath5k_power_mode
* @set_chip: Set to true to write sleep control register
* @sleep_duration: How much time the device is allowed to sleep
* when sleep logic is enabled (in 128 microsecond increments).
*
* This function is used to configure sleep policy and allowed
* sleep modes. For more information check out the sleep control
* register on reg.h and STA_ID1.
*
* Returns 0 on success, -EIO if chip didn't wake up or -EINVAL if an invalid
* mode is requested.
*/
static int
ath5k_hw_set_power_mode(struct ath5k_hw *ah, enum ath5k_power_mode mode,
bool set_chip, u16 sleep_duration)
{
unsigned int i;
u32 staid, data;
staid = ath5k_hw_reg_read(ah, AR5K_STA_ID1);
switch (mode) {
case AR5K_PM_AUTO:
staid &= ~AR5K_STA_ID1_DEFAULT_ANTENNA;
fallthrough;
case AR5K_PM_NETWORK_SLEEP:
if (set_chip)
ath5k_hw_reg_write(ah,
AR5K_SLEEP_CTL_SLE_ALLOW |
sleep_duration,
AR5K_SLEEP_CTL);
staid |= AR5K_STA_ID1_PWR_SV;
break;
case AR5K_PM_FULL_SLEEP:
if (set_chip)
ath5k_hw_reg_write(ah, AR5K_SLEEP_CTL_SLE_SLP,
AR5K_SLEEP_CTL);
staid |= AR5K_STA_ID1_PWR_SV;
break;
case AR5K_PM_AWAKE:
staid &= ~AR5K_STA_ID1_PWR_SV;
if (!set_chip)
goto commit;
data = ath5k_hw_reg_read(ah, AR5K_SLEEP_CTL);
/* If card is down we 'll get 0xffff... so we
* need to clean this up before we write the register
*/
if (data & 0xffc00000)
data = 0;
else
/* Preserve sleep duration etc */
data = data & ~AR5K_SLEEP_CTL_SLE;
ath5k_hw_reg_write(ah, data | AR5K_SLEEP_CTL_SLE_WAKE,
AR5K_SLEEP_CTL);
usleep_range(15, 20);
for (i = 200; i > 0; i--) {
/* Check if the chip did wake up */
if ((ath5k_hw_reg_read(ah, AR5K_PCICFG) &
AR5K_PCICFG_SPWR_DN) == 0)
break;
/* Wait a bit and retry */
usleep_range(50, 75);
ath5k_hw_reg_write(ah, data | AR5K_SLEEP_CTL_SLE_WAKE,
AR5K_SLEEP_CTL);
}
/* Fail if the chip didn't wake up */
if (i == 0)
return -EIO;
break;
default:
return -EINVAL;
}
commit:
ath5k_hw_reg_write(ah, staid, AR5K_STA_ID1);
return 0;
}
/**
* ath5k_hw_on_hold() - Put device on hold
* @ah: The &struct ath5k_hw
*
* Put MAC and Baseband on warm reset and keep that state
* (don't clean sleep control register). After this MAC
* and Baseband are disabled and a full reset is needed
* to come back. This way we save as much power as possible
* without putting the card on full sleep.
*
* Returns 0 on success or -EIO on error
*/
int
ath5k_hw_on_hold(struct ath5k_hw *ah)
{
struct pci_dev *pdev = ah->pdev;
u32 bus_flags;
int ret;
if (ath5k_get_bus_type(ah) == ATH_AHB)
return 0;
/* Make sure device is awake */
ret = ath5k_hw_set_power_mode(ah, AR5K_PM_AWAKE, true, 0);
if (ret) {
ATH5K_ERR(ah, "failed to wakeup the MAC Chip\n");
return ret;
}
/*
* Put chipset on warm reset...
*
* Note: putting PCI core on warm reset on PCI-E cards
* results card to hang and always return 0xffff... so
* we ignore that flag for PCI-E cards. On PCI cards
* this flag gets cleared after 64 PCI clocks.
*/
bus_flags = (pdev && pci_is_pcie(pdev)) ? 0 : AR5K_RESET_CTL_PCI;
if (ah->ah_version == AR5K_AR5210) {
ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU |
AR5K_RESET_CTL_MAC | AR5K_RESET_CTL_DMA |
AR5K_RESET_CTL_PHY | AR5K_RESET_CTL_PCI);
usleep_range(2000, 2500);
} else {
ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU |
AR5K_RESET_CTL_BASEBAND | bus_flags);
}
if (ret) {
ATH5K_ERR(ah, "failed to put device on warm reset\n");
return -EIO;
}
/* ...wakeup again!*/
ret = ath5k_hw_set_power_mode(ah, AR5K_PM_AWAKE, true, 0);
if (ret) {
ATH5K_ERR(ah, "failed to put device on hold\n");
return ret;
}
return ret;
}
/**
* ath5k_hw_nic_wakeup() - Force card out of sleep
* @ah: The &struct ath5k_hw
* @channel: The &struct ieee80211_channel
*
* Bring up MAC + PHY Chips and program PLL
* NOTE: Channel is NULL for the initial wakeup.
*
* Returns 0 on success, -EIO on hw failure or -EINVAL for false channel infos
*/
int
ath5k_hw_nic_wakeup(struct ath5k_hw *ah, struct ieee80211_channel *channel)
{
struct pci_dev *pdev = ah->pdev;
u32 turbo, mode, clock, bus_flags;
int ret;
turbo = 0;
mode = 0;
clock = 0;
if ((ath5k_get_bus_type(ah) != ATH_AHB) || channel) {
/* Wakeup the device */
ret = ath5k_hw_set_power_mode(ah, AR5K_PM_AWAKE, true, 0);
if (ret) {
ATH5K_ERR(ah, "failed to wakeup the MAC Chip\n");
return ret;
}
}
/*
* Put chipset on warm reset...
*
* Note: putting PCI core on warm reset on PCI-E cards
* results card to hang and always return 0xffff... so
* we ignore that flag for PCI-E cards. On PCI cards
* this flag gets cleared after 64 PCI clocks.
*/
bus_flags = (pdev && pci_is_pcie(pdev)) ? 0 : AR5K_RESET_CTL_PCI;
if (ah->ah_version == AR5K_AR5210) {
ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU |
AR5K_RESET_CTL_MAC | AR5K_RESET_CTL_DMA |
AR5K_RESET_CTL_PHY | AR5K_RESET_CTL_PCI);
usleep_range(2000, 2500);
} else {
if (ath5k_get_bus_type(ah) == ATH_AHB)
ret = ath5k_hw_wisoc_reset(ah, AR5K_RESET_CTL_PCU |
AR5K_RESET_CTL_BASEBAND);
else
ret = ath5k_hw_nic_reset(ah, AR5K_RESET_CTL_PCU |
AR5K_RESET_CTL_BASEBAND | bus_flags);
}
if (ret) {
ATH5K_ERR(ah, "failed to reset the MAC Chip\n");
return -EIO;
}
/* ...wakeup again!...*/
ret = ath5k_hw_set_power_mode(ah, AR5K_PM_AWAKE, true, 0);
if (ret) {
ATH5K_ERR(ah, "failed to resume the MAC Chip\n");
return ret;
}
/* ...reset configuration register on Wisoc ...
* ...clear reset control register and pull device out of
* warm reset on others */
if (ath5k_get_bus_type(ah) == ATH_AHB)
ret = ath5k_hw_wisoc_reset(ah, 0);
else
ret = ath5k_hw_nic_reset(ah, 0);
if (ret) {
ATH5K_ERR(ah, "failed to warm reset the MAC Chip\n");
return -EIO;
}
/* On initialization skip PLL programming since we don't have
* a channel / mode set yet */
if (!channel)
return 0;
if (ah->ah_version != AR5K_AR5210) {
/*
* Get channel mode flags
*/
if (ah->ah_radio >= AR5K_RF5112) {
mode = AR5K_PHY_MODE_RAD_RF5112;
clock = AR5K_PHY_PLL_RF5112;
} else {
mode = AR5K_PHY_MODE_RAD_RF5111; /*Zero*/
clock = AR5K_PHY_PLL_RF5111; /*Zero*/
}
if (channel->band == NL80211_BAND_2GHZ) {
mode |= AR5K_PHY_MODE_FREQ_2GHZ;
clock |= AR5K_PHY_PLL_44MHZ;
if (channel->hw_value == AR5K_MODE_11B) {
mode |= AR5K_PHY_MODE_MOD_CCK;
} else {
/* XXX Dynamic OFDM/CCK is not supported by the
* AR5211 so we set MOD_OFDM for plain g (no
* CCK headers) operation. We need to test
* this, 5211 might support ofdm-only g after
* all, there are also initial register values
* in the code for g mode (see initvals.c).
*/
if (ah->ah_version == AR5K_AR5211)
mode |= AR5K_PHY_MODE_MOD_OFDM;
else
mode |= AR5K_PHY_MODE_MOD_DYN;
}
} else if (channel->band == NL80211_BAND_5GHZ) {
mode |= (AR5K_PHY_MODE_FREQ_5GHZ |
AR5K_PHY_MODE_MOD_OFDM);
/* Different PLL setting for 5413 */
if (ah->ah_radio == AR5K_RF5413)
clock = AR5K_PHY_PLL_40MHZ_5413;
else
clock |= AR5K_PHY_PLL_40MHZ;
} else {
ATH5K_ERR(ah, "invalid radio frequency mode\n");
return -EINVAL;
}
/*XXX: Can bwmode be used with dynamic mode ?
* (I don't think it supports 44MHz) */
/* On 2425 initvals TURBO_SHORT is not present */
if (ah->ah_bwmode == AR5K_BWMODE_40MHZ) {
turbo = AR5K_PHY_TURBO_MODE;
if (ah->ah_radio != AR5K_RF2425)
turbo |= AR5K_PHY_TURBO_SHORT;
} else if (ah->ah_bwmode != AR5K_BWMODE_DEFAULT) {
if (ah->ah_radio == AR5K_RF5413) {
mode |= (ah->ah_bwmode == AR5K_BWMODE_10MHZ) ?
AR5K_PHY_MODE_HALF_RATE :
AR5K_PHY_MODE_QUARTER_RATE;
} else if (ah->ah_version == AR5K_AR5212) {
clock |= (ah->ah_bwmode == AR5K_BWMODE_10MHZ) ?
AR5K_PHY_PLL_HALF_RATE :
AR5K_PHY_PLL_QUARTER_RATE;
}
}
} else { /* Reset the device */
/* ...enable Atheros turbo mode if requested */
if (ah->ah_bwmode == AR5K_BWMODE_40MHZ)
ath5k_hw_reg_write(ah, AR5K_PHY_TURBO_MODE,
AR5K_PHY_TURBO);
}
if (ah->ah_version != AR5K_AR5210) {
/* ...update PLL if needed */
if (ath5k_hw_reg_read(ah, AR5K_PHY_PLL) != clock) {
ath5k_hw_reg_write(ah, clock, AR5K_PHY_PLL);
usleep_range(300, 350);
}
/* ...set the PHY operating mode */
ath5k_hw_reg_write(ah, mode, AR5K_PHY_MODE);
ath5k_hw_reg_write(ah, turbo, AR5K_PHY_TURBO);
}
return 0;
}
/**************************************\
* Post-initvals register modifications *
\**************************************/
/**
* ath5k_hw_tweak_initval_settings() - Tweak initial settings
* @ah: The &struct ath5k_hw
* @channel: The &struct ieee80211_channel
*
* Some settings are not handled on initvals, e.g. bwmode
* settings, some phy settings, workarounds etc that in general
* don't fit anywhere else or are too small to introduce a separate
* function for each one. So we have this function to handle
* them all during reset and complete card's initialization.
*/
static void
ath5k_hw_tweak_initval_settings(struct ath5k_hw *ah,
struct ieee80211_channel *channel)
{
if (ah->ah_version == AR5K_AR5212 &&
ah->ah_phy_revision >= AR5K_SREV_PHY_5212A) {
/* Setup ADC control */
ath5k_hw_reg_write(ah,
(AR5K_REG_SM(2,
AR5K_PHY_ADC_CTL_INBUFGAIN_OFF) |
AR5K_REG_SM(2,
AR5K_PHY_ADC_CTL_INBUFGAIN_ON) |
AR5K_PHY_ADC_CTL_PWD_DAC_OFF |
AR5K_PHY_ADC_CTL_PWD_ADC_OFF),
AR5K_PHY_ADC_CTL);
/* Disable barker RSSI threshold */
AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_DAG_CCK_CTL,
AR5K_PHY_DAG_CCK_CTL_EN_RSSI_THR);
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_DAG_CCK_CTL,
AR5K_PHY_DAG_CCK_CTL_RSSI_THR, 2);
/* Set the mute mask */
ath5k_hw_reg_write(ah, 0x0000000f, AR5K_SEQ_MASK);
}
/* Clear PHY_BLUETOOTH to allow RX_CLEAR line debug */
if (ah->ah_phy_revision >= AR5K_SREV_PHY_5212B)
ath5k_hw_reg_write(ah, 0, AR5K_PHY_BLUETOOTH);
/* Enable DCU double buffering */
if (ah->ah_phy_revision > AR5K_SREV_PHY_5212B)
AR5K_REG_DISABLE_BITS(ah, AR5K_TXCFG,
AR5K_TXCFG_DCU_DBL_BUF_DIS);
/* Set fast ADC */
if ((ah->ah_radio == AR5K_RF5413) ||
(ah->ah_radio == AR5K_RF2317) ||
(ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4))) {
u32 fast_adc = true;
if (channel->center_freq == 2462 ||
channel->center_freq == 2467)
fast_adc = 0;
/* Only update if needed */
if (ath5k_hw_reg_read(ah, AR5K_PHY_FAST_ADC) != fast_adc)
ath5k_hw_reg_write(ah, fast_adc,
AR5K_PHY_FAST_ADC);
}
/* Fix for first revision of the RF5112 RF chipset */
if (ah->ah_radio == AR5K_RF5112 &&
ah->ah_radio_5ghz_revision <
AR5K_SREV_RAD_5112A) {
u32 data;
ath5k_hw_reg_write(ah, AR5K_PHY_CCKTXCTL_WORLD,
AR5K_PHY_CCKTXCTL);
if (channel->band == NL80211_BAND_5GHZ)
data = 0xffb81020;
else
data = 0xffb80d20;
ath5k_hw_reg_write(ah, data, AR5K_PHY_FRAME_CTL);
}
if (ah->ah_mac_srev < AR5K_SREV_AR5211) {
/* Clear QCU/DCU clock gating register */
ath5k_hw_reg_write(ah, 0, AR5K_QCUDCU_CLKGT);
/* Set DAC/ADC delays */
ath5k_hw_reg_write(ah, AR5K_PHY_SCAL_32MHZ_5311,
AR5K_PHY_SCAL);
/* Enable PCU FIFO corruption ECO */
AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW_5211,
AR5K_DIAG_SW_ECO_ENABLE);
}
if (ah->ah_bwmode) {
/* Increase PHY switch and AGC settling time
* on turbo mode (ath5k_hw_commit_eeprom_settings
* will override settling time if available) */
if (ah->ah_bwmode == AR5K_BWMODE_40MHZ) {
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_SETTLING,
AR5K_PHY_SETTLING_AGC,
AR5K_AGC_SETTLING_TURBO);
/* XXX: Initvals indicate we only increase
* switch time on AR5212, 5211 and 5210
* only change agc time (bug?) */
if (ah->ah_version == AR5K_AR5212)
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_SETTLING,
AR5K_PHY_SETTLING_SWITCH,
AR5K_SWITCH_SETTLING_TURBO);
if (ah->ah_version == AR5K_AR5210) {
/* Set Frame Control Register */
ath5k_hw_reg_write(ah,
(AR5K_PHY_FRAME_CTL_INI |
AR5K_PHY_TURBO_MODE |
AR5K_PHY_TURBO_SHORT | 0x2020),
AR5K_PHY_FRAME_CTL_5210);
}
/* On 5413 PHY force window length for half/quarter rate*/
} else if ((ah->ah_mac_srev >= AR5K_SREV_AR5424) &&
(ah->ah_mac_srev <= AR5K_SREV_AR5414)) {
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_FRAME_CTL_5211,
AR5K_PHY_FRAME_CTL_WIN_LEN,
3);
}
} else if (ah->ah_version == AR5K_AR5210) {
/* Set Frame Control Register for normal operation */
ath5k_hw_reg_write(ah, (AR5K_PHY_FRAME_CTL_INI | 0x1020),
AR5K_PHY_FRAME_CTL_5210);
}
}
/**
* ath5k_hw_commit_eeprom_settings() - Commit settings from EEPROM
* @ah: The &struct ath5k_hw
* @channel: The &struct ieee80211_channel
*
* Use settings stored on EEPROM to properly initialize the card
* based on various infos and per-mode calibration data.
*/
static void
ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah,
struct ieee80211_channel *channel)
{
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
s16 cck_ofdm_pwr_delta;
u8 ee_mode;
/* TODO: Add support for AR5210 EEPROM */
if (ah->ah_version == AR5K_AR5210)
return;
ee_mode = ath5k_eeprom_mode_from_channel(ah, channel);
/* Adjust power delta for channel 14 */
if (channel->center_freq == 2484)
cck_ofdm_pwr_delta =
((ee->ee_cck_ofdm_power_delta -
ee->ee_scaled_cck_delta) * 2) / 10;
else
cck_ofdm_pwr_delta =
(ee->ee_cck_ofdm_power_delta * 2) / 10;
/* Set CCK to OFDM power delta on tx power
* adjustment register */
if (ah->ah_phy_revision >= AR5K_SREV_PHY_5212A) {
if (channel->hw_value == AR5K_MODE_11G)
ath5k_hw_reg_write(ah,
AR5K_REG_SM((ee->ee_cck_ofdm_gain_delta * -1),
AR5K_PHY_TX_PWR_ADJ_CCK_GAIN_DELTA) |
AR5K_REG_SM((cck_ofdm_pwr_delta * -1),
AR5K_PHY_TX_PWR_ADJ_CCK_PCDAC_INDEX),
AR5K_PHY_TX_PWR_ADJ);
else
ath5k_hw_reg_write(ah, 0, AR5K_PHY_TX_PWR_ADJ);
} else {
/* For older revs we scale power on sw during tx power
* setup */
ah->ah_txpower.txp_cck_ofdm_pwr_delta = cck_ofdm_pwr_delta;
ah->ah_txpower.txp_cck_ofdm_gainf_delta =
ee->ee_cck_ofdm_gain_delta;
}
/* XXX: necessary here? is called from ath5k_hw_set_antenna_mode()
* too */
ath5k_hw_set_antenna_switch(ah, ee_mode);
/* Noise floor threshold */
ath5k_hw_reg_write(ah,
AR5K_PHY_NF_SVAL(ee->ee_noise_floor_thr[ee_mode]),
AR5K_PHY_NFTHRES);
if ((ah->ah_bwmode == AR5K_BWMODE_40MHZ) &&
(ah->ah_ee_version >= AR5K_EEPROM_VERSION_5_0)) {
/* Switch settling time (Turbo) */
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_SETTLING,
AR5K_PHY_SETTLING_SWITCH,
ee->ee_switch_settling_turbo[ee_mode]);
/* Tx/Rx attenuation (Turbo) */
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_GAIN,
AR5K_PHY_GAIN_TXRX_ATTEN,
ee->ee_atn_tx_rx_turbo[ee_mode]);
/* ADC/PGA desired size (Turbo) */
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_DESIRED_SIZE,
AR5K_PHY_DESIRED_SIZE_ADC,
ee->ee_adc_desired_size_turbo[ee_mode]);
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_DESIRED_SIZE,
AR5K_PHY_DESIRED_SIZE_PGA,
ee->ee_pga_desired_size_turbo[ee_mode]);
/* Tx/Rx margin (Turbo) */
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_GAIN_2GHZ,
AR5K_PHY_GAIN_2GHZ_MARGIN_TXRX,
ee->ee_margin_tx_rx_turbo[ee_mode]);
} else {
/* Switch settling time */
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_SETTLING,
AR5K_PHY_SETTLING_SWITCH,
ee->ee_switch_settling[ee_mode]);
/* Tx/Rx attenuation */
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_GAIN,
AR5K_PHY_GAIN_TXRX_ATTEN,
ee->ee_atn_tx_rx[ee_mode]);
/* ADC/PGA desired size */
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_DESIRED_SIZE,
AR5K_PHY_DESIRED_SIZE_ADC,
ee->ee_adc_desired_size[ee_mode]);
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_DESIRED_SIZE,
AR5K_PHY_DESIRED_SIZE_PGA,
ee->ee_pga_desired_size[ee_mode]);
/* Tx/Rx margin */
if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_1)
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_GAIN_2GHZ,
AR5K_PHY_GAIN_2GHZ_MARGIN_TXRX,
ee->ee_margin_tx_rx[ee_mode]);
}
/* XPA delays */
ath5k_hw_reg_write(ah,
(ee->ee_tx_end2xpa_disable[ee_mode] << 24) |
(ee->ee_tx_end2xpa_disable[ee_mode] << 16) |
(ee->ee_tx_frm2xpa_enable[ee_mode] << 8) |
(ee->ee_tx_frm2xpa_enable[ee_mode]), AR5K_PHY_RF_CTL4);
/* XLNA delay */
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_RF_CTL3,
AR5K_PHY_RF_CTL3_TXE2XLNA_ON,
ee->ee_tx_end2xlna_enable[ee_mode]);
/* Thresh64 (ANI) */
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_NF,
AR5K_PHY_NF_THRESH62,
ee->ee_thr_62[ee_mode]);
/* False detect backoff for channels
* that have spur noise. Write the new
* cyclic power RSSI threshold. */
if (ath5k_hw_chan_has_spur_noise(ah, channel))
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_OFDM_SELFCORR,
AR5K_PHY_OFDM_SELFCORR_CYPWR_THR1,
AR5K_INIT_CYCRSSI_THR1 +
ee->ee_false_detect[ee_mode]);
else
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_OFDM_SELFCORR,
AR5K_PHY_OFDM_SELFCORR_CYPWR_THR1,
AR5K_INIT_CYCRSSI_THR1);
/* I/Q correction (set enable bit last to match HAL sources) */
/* TODO: Per channel i/q infos ? */
if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0) {
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_IQ, AR5K_PHY_IQ_CORR_Q_I_COFF,
ee->ee_i_cal[ee_mode]);
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_IQ, AR5K_PHY_IQ_CORR_Q_Q_COFF,
ee->ee_q_cal[ee_mode]);
AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ, AR5K_PHY_IQ_CORR_ENABLE);
}
/* Heavy clipping -disable for now */
if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_5_1)
ath5k_hw_reg_write(ah, 0, AR5K_PHY_HEAVY_CLIP_ENABLE);
}
/*********************\
* Main reset function *
\*********************/
/**
* ath5k_hw_reset() - The main reset function
* @ah: The &struct ath5k_hw
* @op_mode: One of enum nl80211_iftype
* @channel: The &struct ieee80211_channel
* @fast: Enable fast channel switching
* @skip_pcu: Skip pcu initialization
*
* This is the function we call each time we want to (re)initialize the
* card and pass new settings to hw. We also call it when hw runs into
* trouble to make it come back to a working state.
*
* Returns 0 on success, -EINVAL on false op_mode or channel infos, or -EIO
* on failure.
*/
int
ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
struct ieee80211_channel *channel, bool fast, bool skip_pcu)
{
u32 s_seq[10], s_led[3], tsf_up, tsf_lo;
u8 mode;
int i, ret;
tsf_up = 0;
tsf_lo = 0;
mode = 0;
/*
* Sanity check for fast flag
* Fast channel change only available
* on AR2413/AR5413.
*/
if (fast && (ah->ah_radio != AR5K_RF2413) &&
(ah->ah_radio != AR5K_RF5413))
fast = false;
/* Disable sleep clock operation
* to avoid register access delay on certain
* PHY registers */
if (ah->ah_version == AR5K_AR5212)
ath5k_hw_set_sleep_clock(ah, false);
mode = channel->hw_value;
switch (mode) {
case AR5K_MODE_11A:
break;
case AR5K_MODE_11G:
if (ah->ah_version <= AR5K_AR5211) {
ATH5K_ERR(ah,
"G mode not available on 5210/5211");
return -EINVAL;
}
break;
case AR5K_MODE_11B:
if (ah->ah_version < AR5K_AR5211) {
ATH5K_ERR(ah,
"B mode not available on 5210");
return -EINVAL;
}
break;
default:
ATH5K_ERR(ah,
"invalid channel: %d\n", channel->center_freq);
return -EINVAL;
}
/*
* If driver requested fast channel change and DMA has stopped
* go on. If it fails continue with a normal reset.
*/
if (fast) {
ret = ath5k_hw_phy_init(ah, channel, mode, true);
if (ret) {
ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
"fast chan change failed, falling back to normal reset\n");
/* Non fatal, can happen eg.
* on mode change */
ret = 0;
} else {
ATH5K_DBG(ah, ATH5K_DEBUG_RESET,
"fast chan change successful\n");
return 0;
}
}
/*
* Save some registers before a reset
*/
if (ah->ah_version != AR5K_AR5210) {
/*
* Save frame sequence count
* For revs. after Oahu, only save
* seq num for DCU 0 (Global seq num)
*/
if (ah->ah_mac_srev < AR5K_SREV_AR5211) {
for (i = 0; i < 10; i++)
s_seq[i] = ath5k_hw_reg_read(ah,
AR5K_QUEUE_DCU_SEQNUM(i));
} else {
s_seq[0] = ath5k_hw_reg_read(ah,
AR5K_QUEUE_DCU_SEQNUM(0));
}
/* TSF accelerates on AR5211 during reset
* As a workaround save it here and restore
* it later so that it's back in time after
* reset. This way it'll get re-synced on the
* next beacon without breaking ad-hoc.
*
* On AR5212 TSF is almost preserved across a
* reset so it stays back in time anyway and
* we don't have to save/restore it.
*
* XXX: Since this breaks power saving we have
* to disable power saving until we receive the
* next beacon, so we can resync beacon timers */
if (ah->ah_version == AR5K_AR5211) {
tsf_up = ath5k_hw_reg_read(ah, AR5K_TSF_U32);
tsf_lo = ath5k_hw_reg_read(ah, AR5K_TSF_L32);
}
}
/*GPIOs*/
s_led[0] = ath5k_hw_reg_read(ah, AR5K_PCICFG) &
AR5K_PCICFG_LEDSTATE;
s_led[1] = ath5k_hw_reg_read(ah, AR5K_GPIOCR);
s_led[2] = ath5k_hw_reg_read(ah, AR5K_GPIODO);
/*
* Since we are going to write rf buffer
* check if we have any pending gain_F
* optimization settings
*/
if (ah->ah_version == AR5K_AR5212 &&
(ah->ah_radio <= AR5K_RF5112)) {
if (!fast && ah->ah_rf_banks != NULL)
ath5k_hw_gainf_calibrate(ah);
}
/* Wakeup the device */
ret = ath5k_hw_nic_wakeup(ah, channel);
if (ret)
return ret;
/* PHY access enable */
if (ah->ah_mac_srev >= AR5K_SREV_AR5211)
ath5k_hw_reg_write(ah, AR5K_PHY_SHIFT_5GHZ, AR5K_PHY(0));
else
ath5k_hw_reg_write(ah, AR5K_PHY_SHIFT_5GHZ | 0x40,
AR5K_PHY(0));
/* Write initial settings */
ret = ath5k_hw_write_initvals(ah, mode, skip_pcu);
if (ret)
return ret;
/* Initialize core clock settings */
ath5k_hw_init_core_clock(ah);
/*
* Tweak initval settings for revised
* chipsets and add some more config
* bits
*/
ath5k_hw_tweak_initval_settings(ah, channel);
/* Commit values from EEPROM */
ath5k_hw_commit_eeprom_settings(ah, channel);
/*
* Restore saved values
*/
/* Seqnum, TSF */
if (ah->ah_version != AR5K_AR5210) {
if (ah->ah_mac_srev < AR5K_SREV_AR5211) {
for (i = 0; i < 10; i++)
ath5k_hw_reg_write(ah, s_seq[i],
AR5K_QUEUE_DCU_SEQNUM(i));
} else {
ath5k_hw_reg_write(ah, s_seq[0],
AR5K_QUEUE_DCU_SEQNUM(0));
}
if (ah->ah_version == AR5K_AR5211) {
ath5k_hw_reg_write(ah, tsf_up, AR5K_TSF_U32);
ath5k_hw_reg_write(ah, tsf_lo, AR5K_TSF_L32);
}
}
/* Ledstate */
AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, s_led[0]);
/* Gpio settings */
ath5k_hw_reg_write(ah, s_led[1], AR5K_GPIOCR);
ath5k_hw_reg_write(ah, s_led[2], AR5K_GPIODO);
/*
* Initialize PCU
*/
ath5k_hw_pcu_init(ah, op_mode);
/*
* Initialize PHY
*/
ret = ath5k_hw_phy_init(ah, channel, mode, false);
if (ret) {
ATH5K_ERR(ah,
"failed to initialize PHY (%i) !\n", ret);
return ret;
}
/*
* Configure QCUs/DCUs
*/
ret = ath5k_hw_init_queues(ah);
if (ret)
return ret;
/*
* Initialize DMA/Interrupts
*/
ath5k_hw_dma_init(ah);
/*
* Enable 32KHz clock function for AR5212+ chips
* Set clocks to 32KHz operation and use an
* external 32KHz crystal when sleeping if one
* exists.
* Disabled by default because it is also disabled in
* other drivers and it is known to cause stability
* issues on some devices
*/
if (ah->ah_use_32khz_clock && ah->ah_version == AR5K_AR5212 &&
op_mode != NL80211_IFTYPE_AP)
ath5k_hw_set_sleep_clock(ah, true);
/*
* Disable beacons and reset the TSF
*/
AR5K_REG_DISABLE_BITS(ah, AR5K_BEACON, AR5K_BEACON_ENABLE);
ath5k_hw_reset_tsf(ah);
return 0;
}
|
linux-master
|
drivers/net/wireless/ath/ath5k/reset.c
|
/*
* Copyright (c) 2007-2008 Bruno Randolf <[email protected]>
*
* This file is free software: you may copy, redistribute and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation, either version 2 of the License, or (at your
* option) any later version.
*
* This file is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*
*
* This file incorporates work covered by the following copyright and
* permission notice:
*
* Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
* Copyright (c) 2004-2005 Atheros Communications, Inc.
* Copyright (c) 2006 Devicescape Software, Inc.
* Copyright (c) 2007 Jiri Slaby <[email protected]>
* Copyright (c) 2007 Luis R. Rodriguez <[email protected]>
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
* redistribution must be conditioned upon including a substantially
* similar Disclaimer requirement for further binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
* AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGES.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/export.h>
#include <linux/moduleparam.h>
#include <linux/vmalloc.h>
#include <linux/seq_file.h>
#include <linux/list.h>
#include "debug.h"
#include "ath5k.h"
#include "reg.h"
#include "base.h"
static unsigned int ath5k_debug;
module_param_named(debug, ath5k_debug, uint, 0);
/* debugfs: registers */
struct reg {
const char *name;
int addr;
};
#define REG_STRUCT_INIT(r) { #r, r }
/* just a few random registers, might want to add more */
static const struct reg regs[] = {
REG_STRUCT_INIT(AR5K_CR),
REG_STRUCT_INIT(AR5K_RXDP),
REG_STRUCT_INIT(AR5K_CFG),
REG_STRUCT_INIT(AR5K_IER),
REG_STRUCT_INIT(AR5K_BCR),
REG_STRUCT_INIT(AR5K_RTSD0),
REG_STRUCT_INIT(AR5K_RTSD1),
REG_STRUCT_INIT(AR5K_TXCFG),
REG_STRUCT_INIT(AR5K_RXCFG),
REG_STRUCT_INIT(AR5K_RXJLA),
REG_STRUCT_INIT(AR5K_MIBC),
REG_STRUCT_INIT(AR5K_TOPS),
REG_STRUCT_INIT(AR5K_RXNOFRM),
REG_STRUCT_INIT(AR5K_TXNOFRM),
REG_STRUCT_INIT(AR5K_RPGTO),
REG_STRUCT_INIT(AR5K_RFCNT),
REG_STRUCT_INIT(AR5K_MISC),
REG_STRUCT_INIT(AR5K_QCUDCU_CLKGT),
REG_STRUCT_INIT(AR5K_ISR),
REG_STRUCT_INIT(AR5K_PISR),
REG_STRUCT_INIT(AR5K_SISR0),
REG_STRUCT_INIT(AR5K_SISR1),
REG_STRUCT_INIT(AR5K_SISR2),
REG_STRUCT_INIT(AR5K_SISR3),
REG_STRUCT_INIT(AR5K_SISR4),
REG_STRUCT_INIT(AR5K_IMR),
REG_STRUCT_INIT(AR5K_PIMR),
REG_STRUCT_INIT(AR5K_SIMR0),
REG_STRUCT_INIT(AR5K_SIMR1),
REG_STRUCT_INIT(AR5K_SIMR2),
REG_STRUCT_INIT(AR5K_SIMR3),
REG_STRUCT_INIT(AR5K_SIMR4),
REG_STRUCT_INIT(AR5K_DCM_ADDR),
REG_STRUCT_INIT(AR5K_DCCFG),
REG_STRUCT_INIT(AR5K_CCFG),
REG_STRUCT_INIT(AR5K_CPC0),
REG_STRUCT_INIT(AR5K_CPC1),
REG_STRUCT_INIT(AR5K_CPC2),
REG_STRUCT_INIT(AR5K_CPC3),
REG_STRUCT_INIT(AR5K_CPCOVF),
REG_STRUCT_INIT(AR5K_RESET_CTL),
REG_STRUCT_INIT(AR5K_SLEEP_CTL),
REG_STRUCT_INIT(AR5K_INTPEND),
REG_STRUCT_INIT(AR5K_SFR),
REG_STRUCT_INIT(AR5K_PCICFG),
REG_STRUCT_INIT(AR5K_GPIOCR),
REG_STRUCT_INIT(AR5K_GPIODO),
REG_STRUCT_INIT(AR5K_SREV),
};
static void *reg_start(struct seq_file *seq, loff_t *pos)
{
return *pos < ARRAY_SIZE(regs) ? (void *)®s[*pos] : NULL;
}
static void reg_stop(struct seq_file *seq, void *p)
{
/* nothing to do */
}
static void *reg_next(struct seq_file *seq, void *p, loff_t *pos)
{
++*pos;
return *pos < ARRAY_SIZE(regs) ? (void *)®s[*pos] : NULL;
}
static int reg_show(struct seq_file *seq, void *p)
{
struct ath5k_hw *ah = seq->private;
struct reg *r = p;
seq_printf(seq, "%-25s0x%08x\n", r->name,
ath5k_hw_reg_read(ah, r->addr));
return 0;
}
static const struct seq_operations registers_sops = {
.start = reg_start,
.next = reg_next,
.stop = reg_stop,
.show = reg_show
};
DEFINE_SEQ_ATTRIBUTE(registers);
/* debugfs: beacons */
static ssize_t read_file_beacon(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath5k_hw *ah = file->private_data;
char buf[500];
unsigned int len = 0;
unsigned int v;
u64 tsf;
v = ath5k_hw_reg_read(ah, AR5K_BEACON);
len += scnprintf(buf + len, sizeof(buf) - len,
"%-24s0x%08x\tintval: %d\tTIM: 0x%x\n",
"AR5K_BEACON", v, v & AR5K_BEACON_PERIOD,
(v & AR5K_BEACON_TIM) >> AR5K_BEACON_TIM_S);
len += scnprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\n",
"AR5K_LAST_TSTP", ath5k_hw_reg_read(ah, AR5K_LAST_TSTP));
len += scnprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\n\n",
"AR5K_BEACON_CNT", ath5k_hw_reg_read(ah, AR5K_BEACON_CNT));
v = ath5k_hw_reg_read(ah, AR5K_TIMER0);
len += scnprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\tTU: %08x\n",
"AR5K_TIMER0 (TBTT)", v, v);
v = ath5k_hw_reg_read(ah, AR5K_TIMER1);
len += scnprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\tTU: %08x\n",
"AR5K_TIMER1 (DMA)", v, v >> 3);
v = ath5k_hw_reg_read(ah, AR5K_TIMER2);
len += scnprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\tTU: %08x\n",
"AR5K_TIMER2 (SWBA)", v, v >> 3);
v = ath5k_hw_reg_read(ah, AR5K_TIMER3);
len += scnprintf(buf + len, sizeof(buf) - len, "%-24s0x%08x\tTU: %08x\n",
"AR5K_TIMER3 (ATIM)", v, v);
tsf = ath5k_hw_get_tsf64(ah);
len += scnprintf(buf + len, sizeof(buf) - len,
"TSF\t\t0x%016llx\tTU: %08x\n",
(unsigned long long)tsf, TSF_TO_TU(tsf));
if (len > sizeof(buf))
len = sizeof(buf);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static ssize_t write_file_beacon(struct file *file,
const char __user *userbuf,
size_t count, loff_t *ppos)
{
struct ath5k_hw *ah = file->private_data;
char buf[20];
count = min_t(size_t, count, sizeof(buf) - 1);
if (copy_from_user(buf, userbuf, count))
return -EFAULT;
buf[count] = '\0';
if (strncmp(buf, "disable", 7) == 0) {
AR5K_REG_DISABLE_BITS(ah, AR5K_BEACON, AR5K_BEACON_ENABLE);
pr_info("debugfs disable beacons\n");
} else if (strncmp(buf, "enable", 6) == 0) {
AR5K_REG_ENABLE_BITS(ah, AR5K_BEACON, AR5K_BEACON_ENABLE);
pr_info("debugfs enable beacons\n");
}
return count;
}
static const struct file_operations fops_beacon = {
.read = read_file_beacon,
.write = write_file_beacon,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
/* debugfs: reset */
static ssize_t write_file_reset(struct file *file,
const char __user *userbuf,
size_t count, loff_t *ppos)
{
struct ath5k_hw *ah = file->private_data;
ATH5K_DBG(ah, ATH5K_DEBUG_RESET, "debug file triggered reset\n");
ieee80211_queue_work(ah->hw, &ah->reset_work);
return count;
}
static const struct file_operations fops_reset = {
.write = write_file_reset,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = noop_llseek,
};
/* debugfs: debug level */
static const struct {
enum ath5k_debug_level level;
const char *name;
const char *desc;
} dbg_info[] = {
{ ATH5K_DEBUG_RESET, "reset", "reset and initialization" },
{ ATH5K_DEBUG_INTR, "intr", "interrupt handling" },
{ ATH5K_DEBUG_MODE, "mode", "mode init/setup" },
{ ATH5K_DEBUG_XMIT, "xmit", "basic xmit operation" },
{ ATH5K_DEBUG_BEACON, "beacon", "beacon handling" },
{ ATH5K_DEBUG_CALIBRATE, "calib", "periodic calibration" },
{ ATH5K_DEBUG_TXPOWER, "txpower", "transmit power setting" },
{ ATH5K_DEBUG_LED, "led", "LED management" },
{ ATH5K_DEBUG_DUMPBANDS, "dumpbands", "dump bands" },
{ ATH5K_DEBUG_DMA, "dma", "dma start/stop" },
{ ATH5K_DEBUG_ANI, "ani", "adaptive noise immunity" },
{ ATH5K_DEBUG_DESC, "desc", "descriptor chains" },
{ ATH5K_DEBUG_ANY, "all", "show all debug levels" },
};
static ssize_t read_file_debug(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath5k_hw *ah = file->private_data;
char buf[700];
unsigned int len = 0;
unsigned int i;
len += scnprintf(buf + len, sizeof(buf) - len,
"DEBUG LEVEL: 0x%08x\n\n", ah->debug.level);
for (i = 0; i < ARRAY_SIZE(dbg_info) - 1; i++) {
len += scnprintf(buf + len, sizeof(buf) - len,
"%10s %c 0x%08x - %s\n", dbg_info[i].name,
ah->debug.level & dbg_info[i].level ? '+' : ' ',
dbg_info[i].level, dbg_info[i].desc);
}
len += scnprintf(buf + len, sizeof(buf) - len,
"%10s %c 0x%08x - %s\n", dbg_info[i].name,
ah->debug.level == dbg_info[i].level ? '+' : ' ',
dbg_info[i].level, dbg_info[i].desc);
if (len > sizeof(buf))
len = sizeof(buf);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static ssize_t write_file_debug(struct file *file,
const char __user *userbuf,
size_t count, loff_t *ppos)
{
struct ath5k_hw *ah = file->private_data;
unsigned int i;
char buf[20];
count = min_t(size_t, count, sizeof(buf) - 1);
if (copy_from_user(buf, userbuf, count))
return -EFAULT;
buf[count] = '\0';
for (i = 0; i < ARRAY_SIZE(dbg_info); i++) {
if (strncmp(buf, dbg_info[i].name,
strlen(dbg_info[i].name)) == 0) {
ah->debug.level ^= dbg_info[i].level; /* toggle bit */
break;
}
}
return count;
}
static const struct file_operations fops_debug = {
.read = read_file_debug,
.write = write_file_debug,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
/* debugfs: antenna */
static ssize_t read_file_antenna(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath5k_hw *ah = file->private_data;
char buf[700];
unsigned int len = 0;
unsigned int i;
unsigned int v;
len += scnprintf(buf + len, sizeof(buf) - len, "antenna mode\t%d\n",
ah->ah_ant_mode);
len += scnprintf(buf + len, sizeof(buf) - len, "default antenna\t%d\n",
ah->ah_def_ant);
len += scnprintf(buf + len, sizeof(buf) - len, "tx antenna\t%d\n",
ah->ah_tx_ant);
len += scnprintf(buf + len, sizeof(buf) - len, "\nANTENNA\t\tRX\tTX\n");
for (i = 1; i < ARRAY_SIZE(ah->stats.antenna_rx); i++) {
len += scnprintf(buf + len, sizeof(buf) - len,
"[antenna %d]\t%d\t%d\n",
i, ah->stats.antenna_rx[i], ah->stats.antenna_tx[i]);
}
len += scnprintf(buf + len, sizeof(buf) - len, "[invalid]\t%d\t%d\n",
ah->stats.antenna_rx[0], ah->stats.antenna_tx[0]);
v = ath5k_hw_reg_read(ah, AR5K_DEFAULT_ANTENNA);
len += scnprintf(buf + len, sizeof(buf) - len,
"\nAR5K_DEFAULT_ANTENNA\t0x%08x\n", v);
v = ath5k_hw_reg_read(ah, AR5K_STA_ID1);
len += scnprintf(buf + len, sizeof(buf) - len,
"AR5K_STA_ID1_DEFAULT_ANTENNA\t%d\n",
(v & AR5K_STA_ID1_DEFAULT_ANTENNA) != 0);
len += scnprintf(buf + len, sizeof(buf) - len,
"AR5K_STA_ID1_DESC_ANTENNA\t%d\n",
(v & AR5K_STA_ID1_DESC_ANTENNA) != 0);
len += scnprintf(buf + len, sizeof(buf) - len,
"AR5K_STA_ID1_RTS_DEF_ANTENNA\t%d\n",
(v & AR5K_STA_ID1_RTS_DEF_ANTENNA) != 0);
len += scnprintf(buf + len, sizeof(buf) - len,
"AR5K_STA_ID1_SELFGEN_DEF_ANT\t%d\n",
(v & AR5K_STA_ID1_SELFGEN_DEF_ANT) != 0);
v = ath5k_hw_reg_read(ah, AR5K_PHY_AGCCTL);
len += scnprintf(buf + len, sizeof(buf) - len,
"\nAR5K_PHY_AGCCTL_OFDM_DIV_DIS\t%d\n",
(v & AR5K_PHY_AGCCTL_OFDM_DIV_DIS) != 0);
v = ath5k_hw_reg_read(ah, AR5K_PHY_RESTART);
len += scnprintf(buf + len, sizeof(buf) - len,
"AR5K_PHY_RESTART_DIV_GC\t\t%x\n",
(v & AR5K_PHY_RESTART_DIV_GC) >> AR5K_PHY_RESTART_DIV_GC_S);
v = ath5k_hw_reg_read(ah, AR5K_PHY_FAST_ANT_DIV);
len += scnprintf(buf + len, sizeof(buf) - len,
"AR5K_PHY_FAST_ANT_DIV_EN\t%d\n",
(v & AR5K_PHY_FAST_ANT_DIV_EN) != 0);
v = ath5k_hw_reg_read(ah, AR5K_PHY_ANT_SWITCH_TABLE_0);
len += scnprintf(buf + len, sizeof(buf) - len,
"\nAR5K_PHY_ANT_SWITCH_TABLE_0\t0x%08x\n", v);
v = ath5k_hw_reg_read(ah, AR5K_PHY_ANT_SWITCH_TABLE_1);
len += scnprintf(buf + len, sizeof(buf) - len,
"AR5K_PHY_ANT_SWITCH_TABLE_1\t0x%08x\n", v);
if (len > sizeof(buf))
len = sizeof(buf);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static ssize_t write_file_antenna(struct file *file,
const char __user *userbuf,
size_t count, loff_t *ppos)
{
struct ath5k_hw *ah = file->private_data;
unsigned int i;
char buf[20];
count = min_t(size_t, count, sizeof(buf) - 1);
if (copy_from_user(buf, userbuf, count))
return -EFAULT;
buf[count] = '\0';
if (strncmp(buf, "diversity", 9) == 0) {
ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_DEFAULT);
pr_info("debug: enable diversity\n");
} else if (strncmp(buf, "fixed-a", 7) == 0) {
ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_FIXED_A);
pr_info("debug: fixed antenna A\n");
} else if (strncmp(buf, "fixed-b", 7) == 0) {
ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_FIXED_B);
pr_info("debug: fixed antenna B\n");
} else if (strncmp(buf, "clear", 5) == 0) {
for (i = 0; i < ARRAY_SIZE(ah->stats.antenna_rx); i++) {
ah->stats.antenna_rx[i] = 0;
ah->stats.antenna_tx[i] = 0;
}
pr_info("debug: cleared antenna stats\n");
}
return count;
}
static const struct file_operations fops_antenna = {
.read = read_file_antenna,
.write = write_file_antenna,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
/* debugfs: misc */
static ssize_t read_file_misc(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath5k_hw *ah = file->private_data;
char buf[700];
unsigned int len = 0;
u32 filt = ath5k_hw_get_rx_filter(ah);
len += scnprintf(buf + len, sizeof(buf) - len, "bssid-mask: %pM\n",
ah->bssidmask);
len += scnprintf(buf + len, sizeof(buf) - len, "filter-flags: 0x%x ",
filt);
if (filt & AR5K_RX_FILTER_UCAST)
len += scnprintf(buf + len, sizeof(buf) - len, " UCAST");
if (filt & AR5K_RX_FILTER_MCAST)
len += scnprintf(buf + len, sizeof(buf) - len, " MCAST");
if (filt & AR5K_RX_FILTER_BCAST)
len += scnprintf(buf + len, sizeof(buf) - len, " BCAST");
if (filt & AR5K_RX_FILTER_CONTROL)
len += scnprintf(buf + len, sizeof(buf) - len, " CONTROL");
if (filt & AR5K_RX_FILTER_BEACON)
len += scnprintf(buf + len, sizeof(buf) - len, " BEACON");
if (filt & AR5K_RX_FILTER_PROM)
len += scnprintf(buf + len, sizeof(buf) - len, " PROM");
if (filt & AR5K_RX_FILTER_XRPOLL)
len += scnprintf(buf + len, sizeof(buf) - len, " XRPOLL");
if (filt & AR5K_RX_FILTER_PROBEREQ)
len += scnprintf(buf + len, sizeof(buf) - len, " PROBEREQ");
if (filt & AR5K_RX_FILTER_PHYERR_5212)
len += scnprintf(buf + len, sizeof(buf) - len, " PHYERR-5212");
if (filt & AR5K_RX_FILTER_RADARERR_5212)
len += scnprintf(buf + len, sizeof(buf) - len, " RADARERR-5212");
if (filt & AR5K_RX_FILTER_PHYERR_5211)
snprintf(buf + len, sizeof(buf) - len, " PHYERR-5211");
if (filt & AR5K_RX_FILTER_RADARERR_5211)
len += scnprintf(buf + len, sizeof(buf) - len, " RADARERR-5211");
len += scnprintf(buf + len, sizeof(buf) - len, "\nopmode: %s (%d)\n",
ath_opmode_to_string(ah->opmode), ah->opmode);
if (len > sizeof(buf))
len = sizeof(buf);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static const struct file_operations fops_misc = {
.read = read_file_misc,
.open = simple_open,
.owner = THIS_MODULE,
};
/* debugfs: frameerrors */
static ssize_t read_file_frameerrors(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath5k_hw *ah = file->private_data;
struct ath5k_statistics *st = &ah->stats;
char buf[700];
unsigned int len = 0;
int i;
len += scnprintf(buf + len, sizeof(buf) - len,
"RX\n---------------------\n");
len += scnprintf(buf + len, sizeof(buf) - len, "CRC\t%u\t(%u%%)\n",
st->rxerr_crc,
st->rx_all_count > 0 ?
st->rxerr_crc * 100 / st->rx_all_count : 0);
len += scnprintf(buf + len, sizeof(buf) - len, "PHY\t%u\t(%u%%)\n",
st->rxerr_phy,
st->rx_all_count > 0 ?
st->rxerr_phy * 100 / st->rx_all_count : 0);
for (i = 0; i < 32; i++) {
if (st->rxerr_phy_code[i])
len += scnprintf(buf + len, sizeof(buf) - len,
" phy_err[%u]\t%u\n",
i, st->rxerr_phy_code[i]);
}
len += scnprintf(buf + len, sizeof(buf) - len, "FIFO\t%u\t(%u%%)\n",
st->rxerr_fifo,
st->rx_all_count > 0 ?
st->rxerr_fifo * 100 / st->rx_all_count : 0);
len += scnprintf(buf + len, sizeof(buf) - len, "decrypt\t%u\t(%u%%)\n",
st->rxerr_decrypt,
st->rx_all_count > 0 ?
st->rxerr_decrypt * 100 / st->rx_all_count : 0);
len += scnprintf(buf + len, sizeof(buf) - len, "MIC\t%u\t(%u%%)\n",
st->rxerr_mic,
st->rx_all_count > 0 ?
st->rxerr_mic * 100 / st->rx_all_count : 0);
len += scnprintf(buf + len, sizeof(buf) - len, "process\t%u\t(%u%%)\n",
st->rxerr_proc,
st->rx_all_count > 0 ?
st->rxerr_proc * 100 / st->rx_all_count : 0);
len += scnprintf(buf + len, sizeof(buf) - len, "jumbo\t%u\t(%u%%)\n",
st->rxerr_jumbo,
st->rx_all_count > 0 ?
st->rxerr_jumbo * 100 / st->rx_all_count : 0);
len += scnprintf(buf + len, sizeof(buf) - len, "[RX all\t%u]\n",
st->rx_all_count);
len += scnprintf(buf + len, sizeof(buf) - len, "RX-all-bytes\t%u\n",
st->rx_bytes_count);
len += scnprintf(buf + len, sizeof(buf) - len,
"\nTX\n---------------------\n");
len += scnprintf(buf + len, sizeof(buf) - len, "retry\t%u\t(%u%%)\n",
st->txerr_retry,
st->tx_all_count > 0 ?
st->txerr_retry * 100 / st->tx_all_count : 0);
len += scnprintf(buf + len, sizeof(buf) - len, "FIFO\t%u\t(%u%%)\n",
st->txerr_fifo,
st->tx_all_count > 0 ?
st->txerr_fifo * 100 / st->tx_all_count : 0);
len += scnprintf(buf + len, sizeof(buf) - len, "filter\t%u\t(%u%%)\n",
st->txerr_filt,
st->tx_all_count > 0 ?
st->txerr_filt * 100 / st->tx_all_count : 0);
len += scnprintf(buf + len, sizeof(buf) - len, "[TX all\t%u]\n",
st->tx_all_count);
len += scnprintf(buf + len, sizeof(buf) - len, "TX-all-bytes\t%u\n",
st->tx_bytes_count);
if (len > sizeof(buf))
len = sizeof(buf);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static ssize_t write_file_frameerrors(struct file *file,
const char __user *userbuf,
size_t count, loff_t *ppos)
{
struct ath5k_hw *ah = file->private_data;
struct ath5k_statistics *st = &ah->stats;
char buf[20];
count = min_t(size_t, count, sizeof(buf) - 1);
if (copy_from_user(buf, userbuf, count))
return -EFAULT;
buf[count] = '\0';
if (strncmp(buf, "clear", 5) == 0) {
st->rxerr_crc = 0;
st->rxerr_phy = 0;
st->rxerr_fifo = 0;
st->rxerr_decrypt = 0;
st->rxerr_mic = 0;
st->rxerr_proc = 0;
st->rxerr_jumbo = 0;
st->rx_all_count = 0;
st->txerr_retry = 0;
st->txerr_fifo = 0;
st->txerr_filt = 0;
st->tx_all_count = 0;
pr_info("debug: cleared frameerrors stats\n");
}
return count;
}
static const struct file_operations fops_frameerrors = {
.read = read_file_frameerrors,
.write = write_file_frameerrors,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
/* debugfs: ani */
static ssize_t read_file_ani(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath5k_hw *ah = file->private_data;
struct ath5k_statistics *st = &ah->stats;
struct ath5k_ani_state *as = &ah->ani_state;
char buf[700];
unsigned int len = 0;
len += scnprintf(buf + len, sizeof(buf) - len,
"HW has PHY error counters:\t%s\n",
ah->ah_capabilities.cap_has_phyerr_counters ?
"yes" : "no");
len += scnprintf(buf + len, sizeof(buf) - len,
"HW max spur immunity level:\t%d\n",
as->max_spur_level);
len += scnprintf(buf + len, sizeof(buf) - len,
"\nANI state\n--------------------------------------------\n");
len += scnprintf(buf + len, sizeof(buf) - len, "operating mode:\t\t\t");
switch (as->ani_mode) {
case ATH5K_ANI_MODE_OFF:
len += scnprintf(buf + len, sizeof(buf) - len, "OFF\n");
break;
case ATH5K_ANI_MODE_MANUAL_LOW:
len += scnprintf(buf + len, sizeof(buf) - len,
"MANUAL LOW\n");
break;
case ATH5K_ANI_MODE_MANUAL_HIGH:
len += scnprintf(buf + len, sizeof(buf) - len,
"MANUAL HIGH\n");
break;
case ATH5K_ANI_MODE_AUTO:
len += scnprintf(buf + len, sizeof(buf) - len, "AUTO\n");
break;
default:
len += scnprintf(buf + len, sizeof(buf) - len,
"??? (not good)\n");
break;
}
len += scnprintf(buf + len, sizeof(buf) - len,
"noise immunity level:\t\t%d\n",
as->noise_imm_level);
len += scnprintf(buf + len, sizeof(buf) - len,
"spur immunity level:\t\t%d\n",
as->spur_level);
len += scnprintf(buf + len, sizeof(buf) - len,
"firstep level:\t\t\t%d\n",
as->firstep_level);
len += scnprintf(buf + len, sizeof(buf) - len,
"OFDM weak signal detection:\t%s\n",
as->ofdm_weak_sig ? "on" : "off");
len += scnprintf(buf + len, sizeof(buf) - len,
"CCK weak signal detection:\t%s\n",
as->cck_weak_sig ? "on" : "off");
len += scnprintf(buf + len, sizeof(buf) - len,
"\nMIB INTERRUPTS:\t\t%u\n",
st->mib_intr);
len += scnprintf(buf + len, sizeof(buf) - len,
"beacon RSSI average:\t%d\n",
(int)ewma_beacon_rssi_read(&ah->ah_beacon_rssi_avg));
#define CC_PRINT(_struct, _field) \
_struct._field, \
_struct.cycles > 0 ? \
_struct._field * 100 / _struct.cycles : 0
len += scnprintf(buf + len, sizeof(buf) - len,
"profcnt tx\t\t%u\t(%d%%)\n",
CC_PRINT(as->last_cc, tx_frame));
len += scnprintf(buf + len, sizeof(buf) - len,
"profcnt rx\t\t%u\t(%d%%)\n",
CC_PRINT(as->last_cc, rx_frame));
len += scnprintf(buf + len, sizeof(buf) - len,
"profcnt busy\t\t%u\t(%d%%)\n",
CC_PRINT(as->last_cc, rx_busy));
#undef CC_PRINT
len += scnprintf(buf + len, sizeof(buf) - len, "profcnt cycles\t\t%u\n",
as->last_cc.cycles);
len += scnprintf(buf + len, sizeof(buf) - len,
"listen time\t\t%d\tlast: %d\n",
as->listen_time, as->last_listen);
len += scnprintf(buf + len, sizeof(buf) - len,
"OFDM errors\t\t%u\tlast: %u\tsum: %u\n",
as->ofdm_errors, as->last_ofdm_errors,
as->sum_ofdm_errors);
len += scnprintf(buf + len, sizeof(buf) - len,
"CCK errors\t\t%u\tlast: %u\tsum: %u\n",
as->cck_errors, as->last_cck_errors,
as->sum_cck_errors);
len += scnprintf(buf + len, sizeof(buf) - len,
"AR5K_PHYERR_CNT1\t%x\t(=%d)\n",
ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT1),
ATH5K_ANI_OFDM_TRIG_HIGH - (ATH5K_PHYERR_CNT_MAX -
ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT1)));
len += scnprintf(buf + len, sizeof(buf) - len,
"AR5K_PHYERR_CNT2\t%x\t(=%d)\n",
ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT2),
ATH5K_ANI_CCK_TRIG_HIGH - (ATH5K_PHYERR_CNT_MAX -
ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT2)));
if (len > sizeof(buf))
len = sizeof(buf);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static ssize_t write_file_ani(struct file *file,
const char __user *userbuf,
size_t count, loff_t *ppos)
{
struct ath5k_hw *ah = file->private_data;
char buf[20];
count = min_t(size_t, count, sizeof(buf) - 1);
if (copy_from_user(buf, userbuf, count))
return -EFAULT;
buf[count] = '\0';
if (strncmp(buf, "sens-low", 8) == 0) {
ath5k_ani_init(ah, ATH5K_ANI_MODE_MANUAL_HIGH);
} else if (strncmp(buf, "sens-high", 9) == 0) {
ath5k_ani_init(ah, ATH5K_ANI_MODE_MANUAL_LOW);
} else if (strncmp(buf, "ani-off", 7) == 0) {
ath5k_ani_init(ah, ATH5K_ANI_MODE_OFF);
} else if (strncmp(buf, "ani-on", 6) == 0) {
ath5k_ani_init(ah, ATH5K_ANI_MODE_AUTO);
} else if (strncmp(buf, "noise-low", 9) == 0) {
ath5k_ani_set_noise_immunity_level(ah, 0);
} else if (strncmp(buf, "noise-high", 10) == 0) {
ath5k_ani_set_noise_immunity_level(ah,
ATH5K_ANI_MAX_NOISE_IMM_LVL);
} else if (strncmp(buf, "spur-low", 8) == 0) {
ath5k_ani_set_spur_immunity_level(ah, 0);
} else if (strncmp(buf, "spur-high", 9) == 0) {
ath5k_ani_set_spur_immunity_level(ah,
ah->ani_state.max_spur_level);
} else if (strncmp(buf, "fir-low", 7) == 0) {
ath5k_ani_set_firstep_level(ah, 0);
} else if (strncmp(buf, "fir-high", 8) == 0) {
ath5k_ani_set_firstep_level(ah, ATH5K_ANI_MAX_FIRSTEP_LVL);
} else if (strncmp(buf, "ofdm-off", 8) == 0) {
ath5k_ani_set_ofdm_weak_signal_detection(ah, false);
} else if (strncmp(buf, "ofdm-on", 7) == 0) {
ath5k_ani_set_ofdm_weak_signal_detection(ah, true);
} else if (strncmp(buf, "cck-off", 7) == 0) {
ath5k_ani_set_cck_weak_signal_detection(ah, false);
} else if (strncmp(buf, "cck-on", 6) == 0) {
ath5k_ani_set_cck_weak_signal_detection(ah, true);
}
return count;
}
static const struct file_operations fops_ani = {
.read = read_file_ani,
.write = write_file_ani,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
/* debugfs: queues etc */
static ssize_t read_file_queue(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath5k_hw *ah = file->private_data;
char buf[700];
unsigned int len = 0;
struct ath5k_txq *txq;
struct ath5k_buf *bf, *bf0;
int i, n;
len += scnprintf(buf + len, sizeof(buf) - len,
"available txbuffers: %d\n", ah->txbuf_len);
for (i = 0; i < ARRAY_SIZE(ah->txqs); i++) {
txq = &ah->txqs[i];
len += scnprintf(buf + len, sizeof(buf) - len,
"%02d: %ssetup\n", i, txq->setup ? "" : "not ");
if (!txq->setup)
continue;
n = 0;
spin_lock_bh(&txq->lock);
list_for_each_entry_safe(bf, bf0, &txq->q, list)
n++;
spin_unlock_bh(&txq->lock);
len += scnprintf(buf + len, sizeof(buf) - len,
" len: %d bufs: %d\n", txq->txq_len, n);
len += scnprintf(buf + len, sizeof(buf) - len,
" stuck: %d\n", txq->txq_stuck);
}
if (len > sizeof(buf))
len = sizeof(buf);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static ssize_t write_file_queue(struct file *file,
const char __user *userbuf,
size_t count, loff_t *ppos)
{
struct ath5k_hw *ah = file->private_data;
char buf[20];
count = min_t(size_t, count, sizeof(buf) - 1);
if (copy_from_user(buf, userbuf, count))
return -EFAULT;
buf[count] = '\0';
if (strncmp(buf, "start", 5) == 0)
ieee80211_wake_queues(ah->hw);
else if (strncmp(buf, "stop", 4) == 0)
ieee80211_stop_queues(ah->hw);
return count;
}
static const struct file_operations fops_queue = {
.read = read_file_queue,
.write = write_file_queue,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
/* debugfs: eeprom */
struct eeprom_private {
u16 *buf;
int len;
};
static int open_file_eeprom(struct inode *inode, struct file *file)
{
struct eeprom_private *ep;
struct ath5k_hw *ah = inode->i_private;
bool res;
int i, ret;
u32 eesize; /* NB: in 16-bit words */
u16 val, *buf;
/* Get eeprom size */
res = ath5k_hw_nvram_read(ah, AR5K_EEPROM_SIZE_UPPER, &val);
if (!res)
return -EACCES;
if (val == 0) {
eesize = AR5K_EEPROM_INFO_MAX + AR5K_EEPROM_INFO_BASE;
} else {
eesize = (val & AR5K_EEPROM_SIZE_UPPER_MASK) <<
AR5K_EEPROM_SIZE_ENDLOC_SHIFT;
ath5k_hw_nvram_read(ah, AR5K_EEPROM_SIZE_LOWER, &val);
eesize = eesize | val;
}
if (eesize > 4096)
return -EINVAL;
/* Create buffer and read in eeprom */
buf = vmalloc(array_size(eesize, 2));
if (!buf) {
ret = -ENOMEM;
goto err;
}
for (i = 0; i < eesize; ++i) {
if (!ath5k_hw_nvram_read(ah, i, &val)) {
ret = -EIO;
goto freebuf;
}
buf[i] = val;
}
/* Create private struct and assign to file */
ep = kmalloc(sizeof(*ep), GFP_KERNEL);
if (!ep) {
ret = -ENOMEM;
goto freebuf;
}
ep->buf = buf;
ep->len = eesize * 2;
file->private_data = (void *)ep;
return 0;
freebuf:
vfree(buf);
err:
return ret;
}
static ssize_t read_file_eeprom(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct eeprom_private *ep = file->private_data;
return simple_read_from_buffer(user_buf, count, ppos, ep->buf, ep->len);
}
static int release_file_eeprom(struct inode *inode, struct file *file)
{
struct eeprom_private *ep = file->private_data;
vfree(ep->buf);
kfree(ep);
return 0;
}
static const struct file_operations fops_eeprom = {
.open = open_file_eeprom,
.read = read_file_eeprom,
.release = release_file_eeprom,
.owner = THIS_MODULE,
};
void
ath5k_debug_init_device(struct ath5k_hw *ah)
{
struct dentry *phydir;
ah->debug.level = ath5k_debug;
phydir = debugfs_create_dir("ath5k", ah->hw->wiphy->debugfsdir);
debugfs_create_file("debug", 0600, phydir, ah, &fops_debug);
debugfs_create_file("registers", 0400, phydir, ah, ®isters_fops);
debugfs_create_file("beacon", 0600, phydir, ah, &fops_beacon);
debugfs_create_file("reset", 0200, phydir, ah, &fops_reset);
debugfs_create_file("antenna", 0600, phydir, ah, &fops_antenna);
debugfs_create_file("misc", 0400, phydir, ah, &fops_misc);
debugfs_create_file("eeprom", 0400, phydir, ah, &fops_eeprom);
debugfs_create_file("frameerrors", 0600, phydir, ah, &fops_frameerrors);
debugfs_create_file("ani", 0600, phydir, ah, &fops_ani);
debugfs_create_file("queue", 0600, phydir, ah, &fops_queue);
debugfs_create_bool("32khz_clock", 0600, phydir,
&ah->ah_use_32khz_clock);
}
/* functions used in other places */
void
ath5k_debug_dump_bands(struct ath5k_hw *ah)
{
unsigned int b, i;
if (likely(!(ah->debug.level & ATH5K_DEBUG_DUMPBANDS)))
return;
for (b = 0; b < NUM_NL80211_BANDS; b++) {
struct ieee80211_supported_band *band = &ah->sbands[b];
char bname[6];
switch (band->band) {
case NL80211_BAND_2GHZ:
strcpy(bname, "2 GHz");
break;
case NL80211_BAND_5GHZ:
strcpy(bname, "5 GHz");
break;
default:
printk(KERN_DEBUG "Band not supported: %d\n",
band->band);
return;
}
printk(KERN_DEBUG "Band %s: channels %d, rates %d\n", bname,
band->n_channels, band->n_bitrates);
printk(KERN_DEBUG " channels:\n");
for (i = 0; i < band->n_channels; i++)
printk(KERN_DEBUG " %3d %d %.4x %.4x\n",
ieee80211_frequency_to_channel(
band->channels[i].center_freq),
band->channels[i].center_freq,
band->channels[i].hw_value,
band->channels[i].flags);
printk(KERN_DEBUG " rates:\n");
for (i = 0; i < band->n_bitrates; i++)
printk(KERN_DEBUG " %4d %.4x %.4x %.4x\n",
band->bitrates[i].bitrate,
band->bitrates[i].hw_value,
band->bitrates[i].flags,
band->bitrates[i].hw_value_short);
}
}
static inline void
ath5k_debug_printrxbuf(struct ath5k_buf *bf, int done,
struct ath5k_rx_status *rs)
{
struct ath5k_desc *ds = bf->desc;
struct ath5k_hw_all_rx_desc *rd = &ds->ud.ds_rx;
printk(KERN_DEBUG "R (%p %llx) %08x %08x %08x %08x %08x %08x %c\n",
ds, (unsigned long long)bf->daddr,
ds->ds_link, ds->ds_data,
rd->rx_ctl.rx_control_0, rd->rx_ctl.rx_control_1,
rd->rx_stat.rx_status_0, rd->rx_stat.rx_status_1,
!done ? ' ' : (rs->rs_status == 0) ? '*' : '!');
}
void
ath5k_debug_printrxbuffs(struct ath5k_hw *ah)
{
struct ath5k_desc *ds;
struct ath5k_buf *bf;
struct ath5k_rx_status rs = {};
int status;
if (likely(!(ah->debug.level & ATH5K_DEBUG_DESC)))
return;
printk(KERN_DEBUG "rxdp %x, rxlink %p\n",
ath5k_hw_get_rxdp(ah), ah->rxlink);
spin_lock_bh(&ah->rxbuflock);
list_for_each_entry(bf, &ah->rxbuf, list) {
ds = bf->desc;
status = ah->ah_proc_rx_desc(ah, ds, &rs);
if (!status)
ath5k_debug_printrxbuf(bf, status == 0, &rs);
}
spin_unlock_bh(&ah->rxbuflock);
}
void
ath5k_debug_printtxbuf(struct ath5k_hw *ah, struct ath5k_buf *bf)
{
struct ath5k_desc *ds = bf->desc;
struct ath5k_hw_5212_tx_desc *td = &ds->ud.ds_tx5212;
struct ath5k_tx_status ts = {};
int done;
if (likely(!(ah->debug.level & ATH5K_DEBUG_DESC)))
return;
done = ah->ah_proc_tx_desc(ah, bf->desc, &ts);
printk(KERN_DEBUG "T (%p %llx) %08x %08x %08x %08x %08x %08x %08x "
"%08x %c\n", ds, (unsigned long long)bf->daddr, ds->ds_link,
ds->ds_data, td->tx_ctl.tx_control_0, td->tx_ctl.tx_control_1,
td->tx_ctl.tx_control_2, td->tx_ctl.tx_control_3,
td->tx_stat.tx_status_0, td->tx_stat.tx_status_1,
done ? ' ' : (ts.ts_status == 0) ? '*' : '!');
}
|
linux-master
|
drivers/net/wireless/ath/ath5k/debug.c
|
/*
* Copyright (c) 2004-2008 Reyk Floeter <[email protected]>
* Copyright (c) 2006-2009 Nick Kossifidis <[email protected]>
* Copyright (c) 2008-2009 Felix Fietkau <[email protected]>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
*/
/*************************************\
* EEPROM access functions and helpers *
\*************************************/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/slab.h>
#include "ath5k.h"
#include "reg.h"
#include "debug.h"
/******************\
* Helper functions *
\******************/
/*
* Translate binary channel representation in EEPROM to frequency
*/
static u16 ath5k_eeprom_bin2freq(struct ath5k_eeprom_info *ee, u16 bin,
unsigned int mode)
{
u16 val;
if (bin == AR5K_EEPROM_CHANNEL_DIS)
return bin;
if (mode == AR5K_EEPROM_MODE_11A) {
if (ee->ee_version > AR5K_EEPROM_VERSION_3_2)
val = (5 * bin) + 4800;
else
val = bin > 62 ? (10 * 62) + (5 * (bin - 62)) + 5100 :
(bin * 10) + 5100;
} else {
if (ee->ee_version > AR5K_EEPROM_VERSION_3_2)
val = bin + 2300;
else
val = bin + 2400;
}
return val;
}
/*********\
* Parsers *
\*********/
/*
* Initialize eeprom & capabilities structs
*/
static int
ath5k_eeprom_init_header(struct ath5k_hw *ah)
{
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
u16 val;
u32 cksum, offset, eep_max = AR5K_EEPROM_INFO_MAX;
/*
* Read values from EEPROM and store them in the capability structure
*/
AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MAGIC, ee_magic);
AR5K_EEPROM_READ_HDR(AR5K_EEPROM_PROTECT, ee_protect);
AR5K_EEPROM_READ_HDR(AR5K_EEPROM_REG_DOMAIN, ee_regdomain);
AR5K_EEPROM_READ_HDR(AR5K_EEPROM_VERSION, ee_version);
AR5K_EEPROM_READ_HDR(AR5K_EEPROM_HDR, ee_header);
/* Return if we have an old EEPROM */
if (ah->ah_ee_version < AR5K_EEPROM_VERSION_3_0)
return 0;
/*
* Validate the checksum of the EEPROM date. There are some
* devices with invalid EEPROMs.
*/
AR5K_EEPROM_READ(AR5K_EEPROM_SIZE_UPPER, val);
if (val) {
eep_max = (val & AR5K_EEPROM_SIZE_UPPER_MASK) <<
AR5K_EEPROM_SIZE_ENDLOC_SHIFT;
AR5K_EEPROM_READ(AR5K_EEPROM_SIZE_LOWER, val);
eep_max = (eep_max | val) - AR5K_EEPROM_INFO_BASE;
/*
* Fail safe check to prevent stupid loops due
* to busted EEPROMs. XXX: This value is likely too
* big still, waiting on a better value.
*/
if (eep_max > (3 * AR5K_EEPROM_INFO_MAX)) {
ATH5K_ERR(ah, "Invalid max custom EEPROM size: "
"%d (0x%04x) max expected: %d (0x%04x)\n",
eep_max, eep_max,
3 * AR5K_EEPROM_INFO_MAX,
3 * AR5K_EEPROM_INFO_MAX);
return -EIO;
}
}
for (cksum = 0, offset = 0; offset < eep_max; offset++) {
AR5K_EEPROM_READ(AR5K_EEPROM_INFO(offset), val);
cksum ^= val;
}
if (cksum != AR5K_EEPROM_INFO_CKSUM) {
ATH5K_ERR(ah, "Invalid EEPROM "
"checksum: 0x%04x eep_max: 0x%04x (%s)\n",
cksum, eep_max,
eep_max == AR5K_EEPROM_INFO_MAX ?
"default size" : "custom size");
return -EIO;
}
AR5K_EEPROM_READ_HDR(AR5K_EEPROM_ANT_GAIN(ah->ah_ee_version),
ee_ant_gain);
if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0) {
AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MISC0, ee_misc0);
AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MISC1, ee_misc1);
/* XXX: Don't know which versions include these two */
AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MISC2, ee_misc2);
if (ee->ee_version >= AR5K_EEPROM_VERSION_4_3)
AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MISC3, ee_misc3);
if (ee->ee_version >= AR5K_EEPROM_VERSION_5_0) {
AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MISC4, ee_misc4);
AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MISC5, ee_misc5);
AR5K_EEPROM_READ_HDR(AR5K_EEPROM_MISC6, ee_misc6);
}
}
if (ah->ah_ee_version < AR5K_EEPROM_VERSION_3_3) {
AR5K_EEPROM_READ(AR5K_EEPROM_OBDB0_2GHZ, val);
ee->ee_ob[AR5K_EEPROM_MODE_11B][0] = val & 0x7;
ee->ee_db[AR5K_EEPROM_MODE_11B][0] = (val >> 3) & 0x7;
AR5K_EEPROM_READ(AR5K_EEPROM_OBDB1_2GHZ, val);
ee->ee_ob[AR5K_EEPROM_MODE_11G][0] = val & 0x7;
ee->ee_db[AR5K_EEPROM_MODE_11G][0] = (val >> 3) & 0x7;
}
AR5K_EEPROM_READ(AR5K_EEPROM_IS_HB63, val);
if ((ah->ah_mac_version == (AR5K_SREV_AR2425 >> 4)) && val)
ee->ee_is_hb63 = true;
else
ee->ee_is_hb63 = false;
AR5K_EEPROM_READ(AR5K_EEPROM_RFKILL, val);
ee->ee_rfkill_pin = (u8) AR5K_REG_MS(val, AR5K_EEPROM_RFKILL_GPIO_SEL);
ee->ee_rfkill_pol = val & AR5K_EEPROM_RFKILL_POLARITY ? true : false;
/* Check if PCIE_OFFSET points to PCIE_SERDES_SECTION
* and enable serdes programming if needed.
*
* XXX: Serdes values seem to be fixed so
* no need to read them here, we write them
* during ath5k_hw_init */
AR5K_EEPROM_READ(AR5K_EEPROM_PCIE_OFFSET, val);
ee->ee_serdes = (val == AR5K_EEPROM_PCIE_SERDES_SECTION) ?
true : false;
return 0;
}
/*
* Read antenna infos from eeprom
*/
static int ath5k_eeprom_read_ants(struct ath5k_hw *ah, u32 *offset,
unsigned int mode)
{
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
u32 o = *offset;
u16 val;
int i = 0;
AR5K_EEPROM_READ(o++, val);
ee->ee_switch_settling[mode] = (val >> 8) & 0x7f;
ee->ee_atn_tx_rx[mode] = (val >> 2) & 0x3f;
ee->ee_ant_control[mode][i] = (val << 4) & 0x3f;
AR5K_EEPROM_READ(o++, val);
ee->ee_ant_control[mode][i++] |= (val >> 12) & 0xf;
ee->ee_ant_control[mode][i++] = (val >> 6) & 0x3f;
ee->ee_ant_control[mode][i++] = val & 0x3f;
AR5K_EEPROM_READ(o++, val);
ee->ee_ant_control[mode][i++] = (val >> 10) & 0x3f;
ee->ee_ant_control[mode][i++] = (val >> 4) & 0x3f;
ee->ee_ant_control[mode][i] = (val << 2) & 0x3f;
AR5K_EEPROM_READ(o++, val);
ee->ee_ant_control[mode][i++] |= (val >> 14) & 0x3;
ee->ee_ant_control[mode][i++] = (val >> 8) & 0x3f;
ee->ee_ant_control[mode][i++] = (val >> 2) & 0x3f;
ee->ee_ant_control[mode][i] = (val << 4) & 0x3f;
AR5K_EEPROM_READ(o++, val);
ee->ee_ant_control[mode][i++] |= (val >> 12) & 0xf;
ee->ee_ant_control[mode][i++] = (val >> 6) & 0x3f;
ee->ee_ant_control[mode][i++] = val & 0x3f;
/* Get antenna switch tables */
ah->ah_ant_ctl[mode][AR5K_ANT_CTL] =
(ee->ee_ant_control[mode][0] << 4);
ah->ah_ant_ctl[mode][AR5K_ANT_SWTABLE_A] =
ee->ee_ant_control[mode][1] |
(ee->ee_ant_control[mode][2] << 6) |
(ee->ee_ant_control[mode][3] << 12) |
(ee->ee_ant_control[mode][4] << 18) |
(ee->ee_ant_control[mode][5] << 24);
ah->ah_ant_ctl[mode][AR5K_ANT_SWTABLE_B] =
ee->ee_ant_control[mode][6] |
(ee->ee_ant_control[mode][7] << 6) |
(ee->ee_ant_control[mode][8] << 12) |
(ee->ee_ant_control[mode][9] << 18) |
(ee->ee_ant_control[mode][10] << 24);
/* return new offset */
*offset = o;
return 0;
}
/*
* Read supported modes and some mode-specific calibration data
* from eeprom
*/
static int ath5k_eeprom_read_modes(struct ath5k_hw *ah, u32 *offset,
unsigned int mode)
{
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
u32 o = *offset;
u16 val;
ee->ee_n_piers[mode] = 0;
AR5K_EEPROM_READ(o++, val);
ee->ee_adc_desired_size[mode] = (s8)((val >> 8) & 0xff);
switch (mode) {
case AR5K_EEPROM_MODE_11A:
ee->ee_ob[mode][3] = (val >> 5) & 0x7;
ee->ee_db[mode][3] = (val >> 2) & 0x7;
ee->ee_ob[mode][2] = (val << 1) & 0x7;
AR5K_EEPROM_READ(o++, val);
ee->ee_ob[mode][2] |= (val >> 15) & 0x1;
ee->ee_db[mode][2] = (val >> 12) & 0x7;
ee->ee_ob[mode][1] = (val >> 9) & 0x7;
ee->ee_db[mode][1] = (val >> 6) & 0x7;
ee->ee_ob[mode][0] = (val >> 3) & 0x7;
ee->ee_db[mode][0] = val & 0x7;
break;
case AR5K_EEPROM_MODE_11G:
case AR5K_EEPROM_MODE_11B:
ee->ee_ob[mode][1] = (val >> 4) & 0x7;
ee->ee_db[mode][1] = val & 0x7;
break;
}
AR5K_EEPROM_READ(o++, val);
ee->ee_tx_end2xlna_enable[mode] = (val >> 8) & 0xff;
ee->ee_thr_62[mode] = val & 0xff;
if (ah->ah_ee_version <= AR5K_EEPROM_VERSION_3_2)
ee->ee_thr_62[mode] = mode == AR5K_EEPROM_MODE_11A ? 15 : 28;
AR5K_EEPROM_READ(o++, val);
ee->ee_tx_end2xpa_disable[mode] = (val >> 8) & 0xff;
ee->ee_tx_frm2xpa_enable[mode] = val & 0xff;
AR5K_EEPROM_READ(o++, val);
ee->ee_pga_desired_size[mode] = (val >> 8) & 0xff;
if ((val & 0xff) & 0x80)
ee->ee_noise_floor_thr[mode] = -((((val & 0xff) ^ 0xff)) + 1);
else
ee->ee_noise_floor_thr[mode] = val & 0xff;
if (ah->ah_ee_version <= AR5K_EEPROM_VERSION_3_2)
ee->ee_noise_floor_thr[mode] =
mode == AR5K_EEPROM_MODE_11A ? -54 : -1;
AR5K_EEPROM_READ(o++, val);
ee->ee_xlna_gain[mode] = (val >> 5) & 0xff;
ee->ee_x_gain[mode] = (val >> 1) & 0xf;
ee->ee_xpd[mode] = val & 0x1;
if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0 &&
mode != AR5K_EEPROM_MODE_11B)
ee->ee_fixed_bias[mode] = (val >> 13) & 0x1;
if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_3_3) {
AR5K_EEPROM_READ(o++, val);
ee->ee_false_detect[mode] = (val >> 6) & 0x7f;
if (mode == AR5K_EEPROM_MODE_11A)
ee->ee_xr_power[mode] = val & 0x3f;
else {
/* b_DB_11[bg] and b_OB_11[bg] */
ee->ee_ob[mode][0] = val & 0x7;
ee->ee_db[mode][0] = (val >> 3) & 0x7;
}
}
if (ah->ah_ee_version < AR5K_EEPROM_VERSION_3_4) {
ee->ee_i_gain[mode] = AR5K_EEPROM_I_GAIN;
ee->ee_cck_ofdm_power_delta = AR5K_EEPROM_CCK_OFDM_DELTA;
} else {
ee->ee_i_gain[mode] = (val >> 13) & 0x7;
AR5K_EEPROM_READ(o++, val);
ee->ee_i_gain[mode] |= (val << 3) & 0x38;
if (mode == AR5K_EEPROM_MODE_11G) {
ee->ee_cck_ofdm_power_delta = (val >> 3) & 0xff;
if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_6)
ee->ee_scaled_cck_delta = (val >> 11) & 0x1f;
}
}
if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0 &&
mode == AR5K_EEPROM_MODE_11A) {
ee->ee_i_cal[mode] = (val >> 8) & 0x3f;
ee->ee_q_cal[mode] = (val >> 3) & 0x1f;
}
if (ah->ah_ee_version < AR5K_EEPROM_VERSION_4_0)
goto done;
/* Note: >= v5 have bg freq piers on another location
* so these freq piers are ignored for >= v5 (should be 0xff
* anyway) */
switch (mode) {
case AR5K_EEPROM_MODE_11A:
if (ah->ah_ee_version < AR5K_EEPROM_VERSION_4_1)
break;
AR5K_EEPROM_READ(o++, val);
ee->ee_margin_tx_rx[mode] = val & 0x3f;
break;
case AR5K_EEPROM_MODE_11B:
AR5K_EEPROM_READ(o++, val);
ee->ee_pwr_cal_b[0].freq =
ath5k_eeprom_bin2freq(ee, val & 0xff, mode);
if (ee->ee_pwr_cal_b[0].freq != AR5K_EEPROM_CHANNEL_DIS)
ee->ee_n_piers[mode]++;
ee->ee_pwr_cal_b[1].freq =
ath5k_eeprom_bin2freq(ee, (val >> 8) & 0xff, mode);
if (ee->ee_pwr_cal_b[1].freq != AR5K_EEPROM_CHANNEL_DIS)
ee->ee_n_piers[mode]++;
AR5K_EEPROM_READ(o++, val);
ee->ee_pwr_cal_b[2].freq =
ath5k_eeprom_bin2freq(ee, val & 0xff, mode);
if (ee->ee_pwr_cal_b[2].freq != AR5K_EEPROM_CHANNEL_DIS)
ee->ee_n_piers[mode]++;
if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_1)
ee->ee_margin_tx_rx[mode] = (val >> 8) & 0x3f;
break;
case AR5K_EEPROM_MODE_11G:
AR5K_EEPROM_READ(o++, val);
ee->ee_pwr_cal_g[0].freq =
ath5k_eeprom_bin2freq(ee, val & 0xff, mode);
if (ee->ee_pwr_cal_g[0].freq != AR5K_EEPROM_CHANNEL_DIS)
ee->ee_n_piers[mode]++;
ee->ee_pwr_cal_g[1].freq =
ath5k_eeprom_bin2freq(ee, (val >> 8) & 0xff, mode);
if (ee->ee_pwr_cal_g[1].freq != AR5K_EEPROM_CHANNEL_DIS)
ee->ee_n_piers[mode]++;
AR5K_EEPROM_READ(o++, val);
ee->ee_turbo_max_power[mode] = val & 0x7f;
ee->ee_xr_power[mode] = (val >> 7) & 0x3f;
AR5K_EEPROM_READ(o++, val);
ee->ee_pwr_cal_g[2].freq =
ath5k_eeprom_bin2freq(ee, val & 0xff, mode);
if (ee->ee_pwr_cal_g[2].freq != AR5K_EEPROM_CHANNEL_DIS)
ee->ee_n_piers[mode]++;
if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_1)
ee->ee_margin_tx_rx[mode] = (val >> 8) & 0x3f;
AR5K_EEPROM_READ(o++, val);
ee->ee_i_cal[mode] = (val >> 5) & 0x3f;
ee->ee_q_cal[mode] = val & 0x1f;
if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_2) {
AR5K_EEPROM_READ(o++, val);
ee->ee_cck_ofdm_gain_delta = val & 0xff;
}
break;
}
/*
* Read turbo mode information on newer EEPROM versions
*/
if (ee->ee_version < AR5K_EEPROM_VERSION_5_0)
goto done;
switch (mode) {
case AR5K_EEPROM_MODE_11A:
ee->ee_switch_settling_turbo[mode] = (val >> 6) & 0x7f;
ee->ee_atn_tx_rx_turbo[mode] = (val >> 13) & 0x7;
AR5K_EEPROM_READ(o++, val);
ee->ee_atn_tx_rx_turbo[mode] |= (val & 0x7) << 3;
ee->ee_margin_tx_rx_turbo[mode] = (val >> 3) & 0x3f;
ee->ee_adc_desired_size_turbo[mode] = (val >> 9) & 0x7f;
AR5K_EEPROM_READ(o++, val);
ee->ee_adc_desired_size_turbo[mode] |= (val & 0x1) << 7;
ee->ee_pga_desired_size_turbo[mode] = (val >> 1) & 0xff;
if (AR5K_EEPROM_EEMAP(ee->ee_misc0) >= 2)
ee->ee_pd_gain_overlap = (val >> 9) & 0xf;
break;
case AR5K_EEPROM_MODE_11G:
ee->ee_switch_settling_turbo[mode] = (val >> 8) & 0x7f;
ee->ee_atn_tx_rx_turbo[mode] = (val >> 15) & 0x7;
AR5K_EEPROM_READ(o++, val);
ee->ee_atn_tx_rx_turbo[mode] |= (val & 0x1f) << 1;
ee->ee_margin_tx_rx_turbo[mode] = (val >> 5) & 0x3f;
ee->ee_adc_desired_size_turbo[mode] = (val >> 11) & 0x7f;
AR5K_EEPROM_READ(o++, val);
ee->ee_adc_desired_size_turbo[mode] |= (val & 0x7) << 5;
ee->ee_pga_desired_size_turbo[mode] = (val >> 3) & 0xff;
break;
}
done:
/* return new offset */
*offset = o;
return 0;
}
/* Read mode-specific data (except power calibration data) */
static int
ath5k_eeprom_init_modes(struct ath5k_hw *ah)
{
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
u32 mode_offset[3];
unsigned int mode;
u32 offset;
int ret;
/*
* Get values for all modes
*/
mode_offset[AR5K_EEPROM_MODE_11A] = AR5K_EEPROM_MODES_11A(ah->ah_ee_version);
mode_offset[AR5K_EEPROM_MODE_11B] = AR5K_EEPROM_MODES_11B(ah->ah_ee_version);
mode_offset[AR5K_EEPROM_MODE_11G] = AR5K_EEPROM_MODES_11G(ah->ah_ee_version);
ee->ee_turbo_max_power[AR5K_EEPROM_MODE_11A] =
AR5K_EEPROM_HDR_T_5GHZ_DBM(ee->ee_header);
for (mode = AR5K_EEPROM_MODE_11A; mode <= AR5K_EEPROM_MODE_11G; mode++) {
offset = mode_offset[mode];
ret = ath5k_eeprom_read_ants(ah, &offset, mode);
if (ret)
return ret;
ret = ath5k_eeprom_read_modes(ah, &offset, mode);
if (ret)
return ret;
}
/* override for older eeprom versions for better performance */
if (ah->ah_ee_version <= AR5K_EEPROM_VERSION_3_2) {
ee->ee_thr_62[AR5K_EEPROM_MODE_11A] = 15;
ee->ee_thr_62[AR5K_EEPROM_MODE_11B] = 28;
ee->ee_thr_62[AR5K_EEPROM_MODE_11G] = 28;
}
return 0;
}
/* Read the frequency piers for each mode (mostly used on newer eeproms with 0xff
* frequency mask) */
static inline int
ath5k_eeprom_read_freq_list(struct ath5k_hw *ah, int *offset, int max,
struct ath5k_chan_pcal_info *pc, unsigned int mode)
{
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
int o = *offset;
int i = 0;
u8 freq1, freq2;
u16 val;
ee->ee_n_piers[mode] = 0;
while (i < max) {
AR5K_EEPROM_READ(o++, val);
freq1 = val & 0xff;
if (!freq1)
break;
pc[i++].freq = ath5k_eeprom_bin2freq(ee,
freq1, mode);
ee->ee_n_piers[mode]++;
freq2 = (val >> 8) & 0xff;
if (!freq2 || i >= max)
break;
pc[i++].freq = ath5k_eeprom_bin2freq(ee,
freq2, mode);
ee->ee_n_piers[mode]++;
}
/* return new offset */
*offset = o;
return 0;
}
/* Read frequency piers for 802.11a */
static int
ath5k_eeprom_init_11a_pcal_freq(struct ath5k_hw *ah, int offset)
{
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
struct ath5k_chan_pcal_info *pcal = ee->ee_pwr_cal_a;
int i;
u16 val;
u8 mask;
if (ee->ee_version >= AR5K_EEPROM_VERSION_3_3) {
ath5k_eeprom_read_freq_list(ah, &offset,
AR5K_EEPROM_N_5GHZ_CHAN, pcal,
AR5K_EEPROM_MODE_11A);
} else {
mask = AR5K_EEPROM_FREQ_M(ah->ah_ee_version);
AR5K_EEPROM_READ(offset++, val);
pcal[0].freq = (val >> 9) & mask;
pcal[1].freq = (val >> 2) & mask;
pcal[2].freq = (val << 5) & mask;
AR5K_EEPROM_READ(offset++, val);
pcal[2].freq |= (val >> 11) & 0x1f;
pcal[3].freq = (val >> 4) & mask;
pcal[4].freq = (val << 3) & mask;
AR5K_EEPROM_READ(offset++, val);
pcal[4].freq |= (val >> 13) & 0x7;
pcal[5].freq = (val >> 6) & mask;
pcal[6].freq = (val << 1) & mask;
AR5K_EEPROM_READ(offset++, val);
pcal[6].freq |= (val >> 15) & 0x1;
pcal[7].freq = (val >> 8) & mask;
pcal[8].freq = (val >> 1) & mask;
pcal[9].freq = (val << 6) & mask;
AR5K_EEPROM_READ(offset++, val);
pcal[9].freq |= (val >> 10) & 0x3f;
/* Fixed number of piers */
ee->ee_n_piers[AR5K_EEPROM_MODE_11A] = 10;
for (i = 0; i < AR5K_EEPROM_N_5GHZ_CHAN; i++) {
pcal[i].freq = ath5k_eeprom_bin2freq(ee,
pcal[i].freq, AR5K_EEPROM_MODE_11A);
}
}
return 0;
}
/* Read frequency piers for 802.11bg on eeprom versions >= 5 and eemap >= 2 */
static inline int
ath5k_eeprom_init_11bg_2413(struct ath5k_hw *ah, unsigned int mode, int offset)
{
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
struct ath5k_chan_pcal_info *pcal;
switch (mode) {
case AR5K_EEPROM_MODE_11B:
pcal = ee->ee_pwr_cal_b;
break;
case AR5K_EEPROM_MODE_11G:
pcal = ee->ee_pwr_cal_g;
break;
default:
return -EINVAL;
}
ath5k_eeprom_read_freq_list(ah, &offset,
AR5K_EEPROM_N_2GHZ_CHAN_2413, pcal,
mode);
return 0;
}
/*
* Read power calibration for RF5111 chips
*
* For RF5111 we have an XPD -eXternal Power Detector- curve
* for each calibrated channel. Each curve has 0,5dB Power steps
* on x axis and PCDAC steps (offsets) on y axis and looks like an
* exponential function. To recreate the curve we read 11 points
* here and interpolate later.
*/
/* Used to match PCDAC steps with power values on RF5111 chips
* (eeprom versions < 4). For RF5111 we have 11 pre-defined PCDAC
* steps that match with the power values we read from eeprom. On
* older eeprom versions (< 3.2) these steps are equally spaced at
* 10% of the pcdac curve -until the curve reaches its maximum-
* (11 steps from 0 to 100%) but on newer eeprom versions (>= 3.2)
* these 11 steps are spaced in a different way. This function returns
* the pcdac steps based on eeprom version and curve min/max so that we
* can have pcdac/pwr points.
*/
static inline void
ath5k_get_pcdac_intercepts(struct ath5k_hw *ah, u8 min, u8 max, u8 *vp)
{
static const u16 intercepts3[] = {
0, 5, 10, 20, 30, 50, 70, 85, 90, 95, 100
};
static const u16 intercepts3_2[] = {
0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100
};
const u16 *ip;
int i;
if (ah->ah_ee_version >= AR5K_EEPROM_VERSION_3_2)
ip = intercepts3_2;
else
ip = intercepts3;
for (i = 0; i < ARRAY_SIZE(intercepts3); i++)
vp[i] = (ip[i] * max + (100 - ip[i]) * min) / 100;
}
static int
ath5k_eeprom_free_pcal_info(struct ath5k_hw *ah, int mode)
{
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
struct ath5k_chan_pcal_info *chinfo;
u8 pier, pdg;
switch (mode) {
case AR5K_EEPROM_MODE_11A:
if (!AR5K_EEPROM_HDR_11A(ee->ee_header))
return 0;
chinfo = ee->ee_pwr_cal_a;
break;
case AR5K_EEPROM_MODE_11B:
if (!AR5K_EEPROM_HDR_11B(ee->ee_header))
return 0;
chinfo = ee->ee_pwr_cal_b;
break;
case AR5K_EEPROM_MODE_11G:
if (!AR5K_EEPROM_HDR_11G(ee->ee_header))
return 0;
chinfo = ee->ee_pwr_cal_g;
break;
default:
return -EINVAL;
}
for (pier = 0; pier < ee->ee_n_piers[mode]; pier++) {
if (!chinfo[pier].pd_curves)
continue;
for (pdg = 0; pdg < AR5K_EEPROM_N_PD_CURVES; pdg++) {
struct ath5k_pdgain_info *pd =
&chinfo[pier].pd_curves[pdg];
kfree(pd->pd_step);
kfree(pd->pd_pwr);
}
kfree(chinfo[pier].pd_curves);
}
return 0;
}
/* Convert RF5111 specific data to generic raw data
* used by interpolation code */
static int
ath5k_eeprom_convert_pcal_info_5111(struct ath5k_hw *ah, int mode,
struct ath5k_chan_pcal_info *chinfo)
{
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
struct ath5k_chan_pcal_info_rf5111 *pcinfo;
struct ath5k_pdgain_info *pd;
u8 pier, point, idx;
u8 *pdgain_idx = ee->ee_pdc_to_idx[mode];
/* Fill raw data for each calibration pier */
for (pier = 0; pier < ee->ee_n_piers[mode]; pier++) {
pcinfo = &chinfo[pier].rf5111_info;
/* Allocate pd_curves for this cal pier */
chinfo[pier].pd_curves =
kcalloc(AR5K_EEPROM_N_PD_CURVES,
sizeof(struct ath5k_pdgain_info),
GFP_KERNEL);
if (!chinfo[pier].pd_curves)
goto err_out;
/* Only one curve for RF5111
* find out which one and place
* in pd_curves.
* Note: ee_x_gain is reversed here */
for (idx = 0; idx < AR5K_EEPROM_N_PD_CURVES; idx++) {
if (!((ee->ee_x_gain[mode] >> idx) & 0x1)) {
pdgain_idx[0] = idx;
break;
}
}
if (idx == AR5K_EEPROM_N_PD_CURVES)
goto err_out;
ee->ee_pd_gains[mode] = 1;
pd = &chinfo[pier].pd_curves[idx];
pd->pd_points = AR5K_EEPROM_N_PWR_POINTS_5111;
/* Allocate pd points for this curve */
pd->pd_step = kcalloc(AR5K_EEPROM_N_PWR_POINTS_5111,
sizeof(u8), GFP_KERNEL);
if (!pd->pd_step)
goto err_out;
pd->pd_pwr = kcalloc(AR5K_EEPROM_N_PWR_POINTS_5111,
sizeof(s16), GFP_KERNEL);
if (!pd->pd_pwr)
goto err_out;
/* Fill raw dataset
* (convert power to 0.25dB units
* for RF5112 compatibility) */
for (point = 0; point < pd->pd_points; point++) {
/* Absolute values */
pd->pd_pwr[point] = 2 * pcinfo->pwr[point];
/* Already sorted */
pd->pd_step[point] = pcinfo->pcdac[point];
}
/* Set min/max pwr */
chinfo[pier].min_pwr = pd->pd_pwr[0];
chinfo[pier].max_pwr = pd->pd_pwr[10];
}
return 0;
err_out:
ath5k_eeprom_free_pcal_info(ah, mode);
return -ENOMEM;
}
/* Parse EEPROM data */
static int
ath5k_eeprom_read_pcal_info_5111(struct ath5k_hw *ah, int mode)
{
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
struct ath5k_chan_pcal_info *pcal;
int offset, ret;
int i;
u16 val;
offset = AR5K_EEPROM_GROUPS_START(ee->ee_version);
switch (mode) {
case AR5K_EEPROM_MODE_11A:
if (!AR5K_EEPROM_HDR_11A(ee->ee_header))
return 0;
ret = ath5k_eeprom_init_11a_pcal_freq(ah,
offset + AR5K_EEPROM_GROUP1_OFFSET);
if (ret < 0)
return ret;
offset += AR5K_EEPROM_GROUP2_OFFSET;
pcal = ee->ee_pwr_cal_a;
break;
case AR5K_EEPROM_MODE_11B:
if (!AR5K_EEPROM_HDR_11B(ee->ee_header) &&
!AR5K_EEPROM_HDR_11G(ee->ee_header))
return 0;
pcal = ee->ee_pwr_cal_b;
offset += AR5K_EEPROM_GROUP3_OFFSET;
/* fixed piers */
pcal[0].freq = 2412;
pcal[1].freq = 2447;
pcal[2].freq = 2484;
ee->ee_n_piers[mode] = 3;
break;
case AR5K_EEPROM_MODE_11G:
if (!AR5K_EEPROM_HDR_11G(ee->ee_header))
return 0;
pcal = ee->ee_pwr_cal_g;
offset += AR5K_EEPROM_GROUP4_OFFSET;
/* fixed piers */
pcal[0].freq = 2312;
pcal[1].freq = 2412;
pcal[2].freq = 2484;
ee->ee_n_piers[mode] = 3;
break;
default:
return -EINVAL;
}
for (i = 0; i < ee->ee_n_piers[mode]; i++) {
struct ath5k_chan_pcal_info_rf5111 *cdata =
&pcal[i].rf5111_info;
AR5K_EEPROM_READ(offset++, val);
cdata->pcdac_max = ((val >> 10) & AR5K_EEPROM_PCDAC_M);
cdata->pcdac_min = ((val >> 4) & AR5K_EEPROM_PCDAC_M);
cdata->pwr[0] = ((val << 2) & AR5K_EEPROM_POWER_M);
AR5K_EEPROM_READ(offset++, val);
cdata->pwr[0] |= ((val >> 14) & 0x3);
cdata->pwr[1] = ((val >> 8) & AR5K_EEPROM_POWER_M);
cdata->pwr[2] = ((val >> 2) & AR5K_EEPROM_POWER_M);
cdata->pwr[3] = ((val << 4) & AR5K_EEPROM_POWER_M);
AR5K_EEPROM_READ(offset++, val);
cdata->pwr[3] |= ((val >> 12) & 0xf);
cdata->pwr[4] = ((val >> 6) & AR5K_EEPROM_POWER_M);
cdata->pwr[5] = (val & AR5K_EEPROM_POWER_M);
AR5K_EEPROM_READ(offset++, val);
cdata->pwr[6] = ((val >> 10) & AR5K_EEPROM_POWER_M);
cdata->pwr[7] = ((val >> 4) & AR5K_EEPROM_POWER_M);
cdata->pwr[8] = ((val << 2) & AR5K_EEPROM_POWER_M);
AR5K_EEPROM_READ(offset++, val);
cdata->pwr[8] |= ((val >> 14) & 0x3);
cdata->pwr[9] = ((val >> 8) & AR5K_EEPROM_POWER_M);
cdata->pwr[10] = ((val >> 2) & AR5K_EEPROM_POWER_M);
ath5k_get_pcdac_intercepts(ah, cdata->pcdac_min,
cdata->pcdac_max, cdata->pcdac);
}
return ath5k_eeprom_convert_pcal_info_5111(ah, mode, pcal);
}
/*
* Read power calibration for RF5112 chips
*
* For RF5112 we have 4 XPD -eXternal Power Detector- curves
* for each calibrated channel on 0, -6, -12 and -18dBm but we only
* use the higher (3) and the lower (0) curves. Each curve has 0.5dB
* power steps on x axis and PCDAC steps on y axis and looks like a
* linear function. To recreate the curve and pass the power values
* on hw, we read 4 points for xpd 0 (lower gain -> max power)
* and 3 points for xpd 3 (higher gain -> lower power) here and
* interpolate later.
*
* Note: Many vendors just use xpd 0 so xpd 3 is zeroed.
*/
/* Convert RF5112 specific data to generic raw data
* used by interpolation code */
static int
ath5k_eeprom_convert_pcal_info_5112(struct ath5k_hw *ah, int mode,
struct ath5k_chan_pcal_info *chinfo)
{
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
struct ath5k_chan_pcal_info_rf5112 *pcinfo;
u8 *pdgain_idx = ee->ee_pdc_to_idx[mode];
unsigned int pier, pdg, point;
/* Fill raw data for each calibration pier */
for (pier = 0; pier < ee->ee_n_piers[mode]; pier++) {
pcinfo = &chinfo[pier].rf5112_info;
/* Allocate pd_curves for this cal pier */
chinfo[pier].pd_curves =
kcalloc(AR5K_EEPROM_N_PD_CURVES,
sizeof(struct ath5k_pdgain_info),
GFP_KERNEL);
if (!chinfo[pier].pd_curves)
goto err_out;
/* Fill pd_curves */
for (pdg = 0; pdg < ee->ee_pd_gains[mode]; pdg++) {
u8 idx = pdgain_idx[pdg];
struct ath5k_pdgain_info *pd =
&chinfo[pier].pd_curves[idx];
/* Lowest gain curve (max power) */
if (pdg == 0) {
/* One more point for better accuracy */
pd->pd_points = AR5K_EEPROM_N_XPD0_POINTS;
/* Allocate pd points for this curve */
pd->pd_step = kcalloc(pd->pd_points,
sizeof(u8), GFP_KERNEL);
if (!pd->pd_step)
goto err_out;
pd->pd_pwr = kcalloc(pd->pd_points,
sizeof(s16), GFP_KERNEL);
if (!pd->pd_pwr)
goto err_out;
/* Fill raw dataset
* (all power levels are in 0.25dB units) */
pd->pd_step[0] = pcinfo->pcdac_x0[0];
pd->pd_pwr[0] = pcinfo->pwr_x0[0];
for (point = 1; point < pd->pd_points;
point++) {
/* Absolute values */
pd->pd_pwr[point] =
pcinfo->pwr_x0[point];
/* Deltas */
pd->pd_step[point] =
pd->pd_step[point - 1] +
pcinfo->pcdac_x0[point];
}
/* Set min power for this frequency */
chinfo[pier].min_pwr = pd->pd_pwr[0];
/* Highest gain curve (min power) */
} else if (pdg == 1) {
pd->pd_points = AR5K_EEPROM_N_XPD3_POINTS;
/* Allocate pd points for this curve */
pd->pd_step = kcalloc(pd->pd_points,
sizeof(u8), GFP_KERNEL);
if (!pd->pd_step)
goto err_out;
pd->pd_pwr = kcalloc(pd->pd_points,
sizeof(s16), GFP_KERNEL);
if (!pd->pd_pwr)
goto err_out;
/* Fill raw dataset
* (all power levels are in 0.25dB units) */
for (point = 0; point < pd->pd_points;
point++) {
/* Absolute values */
pd->pd_pwr[point] =
pcinfo->pwr_x3[point];
/* Fixed points */
pd->pd_step[point] =
pcinfo->pcdac_x3[point];
}
/* Since we have a higher gain curve
* override min power */
chinfo[pier].min_pwr = pd->pd_pwr[0];
}
}
}
return 0;
err_out:
ath5k_eeprom_free_pcal_info(ah, mode);
return -ENOMEM;
}
/* Parse EEPROM data */
static int
ath5k_eeprom_read_pcal_info_5112(struct ath5k_hw *ah, int mode)
{
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
struct ath5k_chan_pcal_info_rf5112 *chan_pcal_info;
struct ath5k_chan_pcal_info *gen_chan_info;
u8 *pdgain_idx = ee->ee_pdc_to_idx[mode];
u32 offset;
u8 i, c;
u16 val;
u8 pd_gains = 0;
/* Count how many curves we have and
* identify them (which one of the 4
* available curves we have on each count).
* Curves are stored from lower (x0) to
* higher (x3) gain */
for (i = 0; i < AR5K_EEPROM_N_PD_CURVES; i++) {
/* ee_x_gain[mode] is x gain mask */
if ((ee->ee_x_gain[mode] >> i) & 0x1)
pdgain_idx[pd_gains++] = i;
}
ee->ee_pd_gains[mode] = pd_gains;
if (pd_gains == 0 || pd_gains > 2)
return -EINVAL;
switch (mode) {
case AR5K_EEPROM_MODE_11A:
/*
* Read 5GHz EEPROM channels
*/
offset = AR5K_EEPROM_GROUPS_START(ee->ee_version);
ath5k_eeprom_init_11a_pcal_freq(ah, offset);
offset += AR5K_EEPROM_GROUP2_OFFSET;
gen_chan_info = ee->ee_pwr_cal_a;
break;
case AR5K_EEPROM_MODE_11B:
offset = AR5K_EEPROM_GROUPS_START(ee->ee_version);
if (AR5K_EEPROM_HDR_11A(ee->ee_header))
offset += AR5K_EEPROM_GROUP3_OFFSET;
/* NB: frequency piers parsed during mode init */
gen_chan_info = ee->ee_pwr_cal_b;
break;
case AR5K_EEPROM_MODE_11G:
offset = AR5K_EEPROM_GROUPS_START(ee->ee_version);
if (AR5K_EEPROM_HDR_11A(ee->ee_header))
offset += AR5K_EEPROM_GROUP4_OFFSET;
else if (AR5K_EEPROM_HDR_11B(ee->ee_header))
offset += AR5K_EEPROM_GROUP2_OFFSET;
/* NB: frequency piers parsed during mode init */
gen_chan_info = ee->ee_pwr_cal_g;
break;
default:
return -EINVAL;
}
for (i = 0; i < ee->ee_n_piers[mode]; i++) {
chan_pcal_info = &gen_chan_info[i].rf5112_info;
/* Power values in quarter dB
* for the lower xpd gain curve
* (0 dBm -> higher output power) */
for (c = 0; c < AR5K_EEPROM_N_XPD0_POINTS; c++) {
AR5K_EEPROM_READ(offset++, val);
chan_pcal_info->pwr_x0[c] = (s8) (val & 0xff);
chan_pcal_info->pwr_x0[++c] = (s8) ((val >> 8) & 0xff);
}
/* PCDAC steps
* corresponding to the above power
* measurements */
AR5K_EEPROM_READ(offset++, val);
chan_pcal_info->pcdac_x0[1] = (val & 0x1f);
chan_pcal_info->pcdac_x0[2] = ((val >> 5) & 0x1f);
chan_pcal_info->pcdac_x0[3] = ((val >> 10) & 0x1f);
/* Power values in quarter dB
* for the higher xpd gain curve
* (18 dBm -> lower output power) */
AR5K_EEPROM_READ(offset++, val);
chan_pcal_info->pwr_x3[0] = (s8) (val & 0xff);
chan_pcal_info->pwr_x3[1] = (s8) ((val >> 8) & 0xff);
AR5K_EEPROM_READ(offset++, val);
chan_pcal_info->pwr_x3[2] = (val & 0xff);
/* PCDAC steps
* corresponding to the above power
* measurements (fixed) */
chan_pcal_info->pcdac_x3[0] = 20;
chan_pcal_info->pcdac_x3[1] = 35;
chan_pcal_info->pcdac_x3[2] = 63;
if (ee->ee_version >= AR5K_EEPROM_VERSION_4_3) {
chan_pcal_info->pcdac_x0[0] = ((val >> 8) & 0x3f);
/* Last xpd0 power level is also channel maximum */
gen_chan_info[i].max_pwr = chan_pcal_info->pwr_x0[3];
} else {
chan_pcal_info->pcdac_x0[0] = 1;
gen_chan_info[i].max_pwr = (s8) ((val >> 8) & 0xff);
}
}
return ath5k_eeprom_convert_pcal_info_5112(ah, mode, gen_chan_info);
}
/*
* Read power calibration for RF2413 chips
*
* For RF2413 we have a Power to PDDAC table (Power Detector)
* instead of a PCDAC and 4 pd gain curves for each calibrated channel.
* Each curve has power on x axis in 0.5 db steps and PDDADC steps on y
* axis and looks like an exponential function like the RF5111 curve.
*
* To recreate the curves we read here the points and interpolate
* later. Note that in most cases only 2 (higher and lower) curves are
* used (like RF5112) but vendors have the opportunity to include all
* 4 curves on eeprom. The final curve (higher power) has an extra
* point for better accuracy like RF5112.
*/
/* For RF2413 power calibration data doesn't start on a fixed location and
* if a mode is not supported, its section is missing -not zeroed-.
* So we need to calculate the starting offset for each section by using
* these two functions */
/* Return the size of each section based on the mode and the number of pd
* gains available (maximum 4). */
static inline unsigned int
ath5k_pdgains_size_2413(struct ath5k_eeprom_info *ee, unsigned int mode)
{
static const unsigned int pdgains_size[] = { 4, 6, 9, 12 };
unsigned int sz;
sz = pdgains_size[ee->ee_pd_gains[mode] - 1];
sz *= ee->ee_n_piers[mode];
return sz;
}
/* Return the starting offset for a section based on the modes supported
* and each section's size. */
static unsigned int
ath5k_cal_data_offset_2413(struct ath5k_eeprom_info *ee, int mode)
{
u32 offset = AR5K_EEPROM_CAL_DATA_START(ee->ee_misc4);
switch (mode) {
case AR5K_EEPROM_MODE_11G:
if (AR5K_EEPROM_HDR_11B(ee->ee_header))
offset += ath5k_pdgains_size_2413(ee,
AR5K_EEPROM_MODE_11B) +
AR5K_EEPROM_N_2GHZ_CHAN_2413 / 2;
fallthrough;
case AR5K_EEPROM_MODE_11B:
if (AR5K_EEPROM_HDR_11A(ee->ee_header))
offset += ath5k_pdgains_size_2413(ee,
AR5K_EEPROM_MODE_11A) +
AR5K_EEPROM_N_5GHZ_CHAN / 2;
fallthrough;
case AR5K_EEPROM_MODE_11A:
break;
default:
break;
}
return offset;
}
/* Convert RF2413 specific data to generic raw data
* used by interpolation code */
static int
ath5k_eeprom_convert_pcal_info_2413(struct ath5k_hw *ah, int mode,
struct ath5k_chan_pcal_info *chinfo)
{
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
struct ath5k_chan_pcal_info_rf2413 *pcinfo;
u8 *pdgain_idx = ee->ee_pdc_to_idx[mode];
unsigned int pier, pdg, point;
/* Fill raw data for each calibration pier */
for (pier = 0; pier < ee->ee_n_piers[mode]; pier++) {
pcinfo = &chinfo[pier].rf2413_info;
/* Allocate pd_curves for this cal pier */
chinfo[pier].pd_curves =
kcalloc(AR5K_EEPROM_N_PD_CURVES,
sizeof(struct ath5k_pdgain_info),
GFP_KERNEL);
if (!chinfo[pier].pd_curves)
goto err_out;
/* Fill pd_curves */
for (pdg = 0; pdg < ee->ee_pd_gains[mode]; pdg++) {
u8 idx = pdgain_idx[pdg];
struct ath5k_pdgain_info *pd =
&chinfo[pier].pd_curves[idx];
/* One more point for the highest power
* curve (lowest gain) */
if (pdg == ee->ee_pd_gains[mode] - 1)
pd->pd_points = AR5K_EEPROM_N_PD_POINTS;
else
pd->pd_points = AR5K_EEPROM_N_PD_POINTS - 1;
/* Allocate pd points for this curve */
pd->pd_step = kcalloc(pd->pd_points,
sizeof(u8), GFP_KERNEL);
if (!pd->pd_step)
goto err_out;
pd->pd_pwr = kcalloc(pd->pd_points,
sizeof(s16), GFP_KERNEL);
if (!pd->pd_pwr)
goto err_out;
/* Fill raw dataset
* convert all pwr levels to
* quarter dB for RF5112 compatibility */
pd->pd_step[0] = pcinfo->pddac_i[pdg];
pd->pd_pwr[0] = 4 * pcinfo->pwr_i[pdg];
for (point = 1; point < pd->pd_points; point++) {
pd->pd_pwr[point] = pd->pd_pwr[point - 1] +
2 * pcinfo->pwr[pdg][point - 1];
pd->pd_step[point] = pd->pd_step[point - 1] +
pcinfo->pddac[pdg][point - 1];
}
/* Highest gain curve -> min power */
if (pdg == 0)
chinfo[pier].min_pwr = pd->pd_pwr[0];
/* Lowest gain curve -> max power */
if (pdg == ee->ee_pd_gains[mode] - 1)
chinfo[pier].max_pwr =
pd->pd_pwr[pd->pd_points - 1];
}
}
return 0;
err_out:
ath5k_eeprom_free_pcal_info(ah, mode);
return -ENOMEM;
}
/* Parse EEPROM data */
static int
ath5k_eeprom_read_pcal_info_2413(struct ath5k_hw *ah, int mode)
{
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
struct ath5k_chan_pcal_info_rf2413 *pcinfo;
struct ath5k_chan_pcal_info *chinfo;
u8 *pdgain_idx = ee->ee_pdc_to_idx[mode];
u32 offset;
int idx, i;
u16 val;
u8 pd_gains = 0;
/* Count how many curves we have and
* identify them (which one of the 4
* available curves we have on each count).
* Curves are stored from higher to
* lower gain so we go backwards */
for (idx = AR5K_EEPROM_N_PD_CURVES - 1; idx >= 0; idx--) {
/* ee_x_gain[mode] is x gain mask */
if ((ee->ee_x_gain[mode] >> idx) & 0x1)
pdgain_idx[pd_gains++] = idx;
}
ee->ee_pd_gains[mode] = pd_gains;
if (pd_gains == 0)
return -EINVAL;
offset = ath5k_cal_data_offset_2413(ee, mode);
switch (mode) {
case AR5K_EEPROM_MODE_11A:
if (!AR5K_EEPROM_HDR_11A(ee->ee_header))
return 0;
ath5k_eeprom_init_11a_pcal_freq(ah, offset);
offset += AR5K_EEPROM_N_5GHZ_CHAN / 2;
chinfo = ee->ee_pwr_cal_a;
break;
case AR5K_EEPROM_MODE_11B:
if (!AR5K_EEPROM_HDR_11B(ee->ee_header))
return 0;
ath5k_eeprom_init_11bg_2413(ah, mode, offset);
offset += AR5K_EEPROM_N_2GHZ_CHAN_2413 / 2;
chinfo = ee->ee_pwr_cal_b;
break;
case AR5K_EEPROM_MODE_11G:
if (!AR5K_EEPROM_HDR_11G(ee->ee_header))
return 0;
ath5k_eeprom_init_11bg_2413(ah, mode, offset);
offset += AR5K_EEPROM_N_2GHZ_CHAN_2413 / 2;
chinfo = ee->ee_pwr_cal_g;
break;
default:
return -EINVAL;
}
for (i = 0; i < ee->ee_n_piers[mode]; i++) {
pcinfo = &chinfo[i].rf2413_info;
/*
* Read pwr_i, pddac_i and the first
* 2 pd points (pwr, pddac)
*/
AR5K_EEPROM_READ(offset++, val);
pcinfo->pwr_i[0] = val & 0x1f;
pcinfo->pddac_i[0] = (val >> 5) & 0x7f;
pcinfo->pwr[0][0] = (val >> 12) & 0xf;
AR5K_EEPROM_READ(offset++, val);
pcinfo->pddac[0][0] = val & 0x3f;
pcinfo->pwr[0][1] = (val >> 6) & 0xf;
pcinfo->pddac[0][1] = (val >> 10) & 0x3f;
AR5K_EEPROM_READ(offset++, val);
pcinfo->pwr[0][2] = val & 0xf;
pcinfo->pddac[0][2] = (val >> 4) & 0x3f;
pcinfo->pwr[0][3] = 0;
pcinfo->pddac[0][3] = 0;
if (pd_gains > 1) {
/*
* Pd gain 0 is not the last pd gain
* so it only has 2 pd points.
* Continue with pd gain 1.
*/
pcinfo->pwr_i[1] = (val >> 10) & 0x1f;
pcinfo->pddac_i[1] = (val >> 15) & 0x1;
AR5K_EEPROM_READ(offset++, val);
pcinfo->pddac_i[1] |= (val & 0x3F) << 1;
pcinfo->pwr[1][0] = (val >> 6) & 0xf;
pcinfo->pddac[1][0] = (val >> 10) & 0x3f;
AR5K_EEPROM_READ(offset++, val);
pcinfo->pwr[1][1] = val & 0xf;
pcinfo->pddac[1][1] = (val >> 4) & 0x3f;
pcinfo->pwr[1][2] = (val >> 10) & 0xf;
pcinfo->pddac[1][2] = (val >> 14) & 0x3;
AR5K_EEPROM_READ(offset++, val);
pcinfo->pddac[1][2] |= (val & 0xF) << 2;
pcinfo->pwr[1][3] = 0;
pcinfo->pddac[1][3] = 0;
} else if (pd_gains == 1) {
/*
* Pd gain 0 is the last one so
* read the extra point.
*/
pcinfo->pwr[0][3] = (val >> 10) & 0xf;
pcinfo->pddac[0][3] = (val >> 14) & 0x3;
AR5K_EEPROM_READ(offset++, val);
pcinfo->pddac[0][3] |= (val & 0xF) << 2;
}
/*
* Proceed with the other pd_gains
* as above.
*/
if (pd_gains > 2) {
pcinfo->pwr_i[2] = (val >> 4) & 0x1f;
pcinfo->pddac_i[2] = (val >> 9) & 0x7f;
AR5K_EEPROM_READ(offset++, val);
pcinfo->pwr[2][0] = (val >> 0) & 0xf;
pcinfo->pddac[2][0] = (val >> 4) & 0x3f;
pcinfo->pwr[2][1] = (val >> 10) & 0xf;
pcinfo->pddac[2][1] = (val >> 14) & 0x3;
AR5K_EEPROM_READ(offset++, val);
pcinfo->pddac[2][1] |= (val & 0xF) << 2;
pcinfo->pwr[2][2] = (val >> 4) & 0xf;
pcinfo->pddac[2][2] = (val >> 8) & 0x3f;
pcinfo->pwr[2][3] = 0;
pcinfo->pddac[2][3] = 0;
} else if (pd_gains == 2) {
pcinfo->pwr[1][3] = (val >> 4) & 0xf;
pcinfo->pddac[1][3] = (val >> 8) & 0x3f;
}
if (pd_gains > 3) {
pcinfo->pwr_i[3] = (val >> 14) & 0x3;
AR5K_EEPROM_READ(offset++, val);
pcinfo->pwr_i[3] |= ((val >> 0) & 0x7) << 2;
pcinfo->pddac_i[3] = (val >> 3) & 0x7f;
pcinfo->pwr[3][0] = (val >> 10) & 0xf;
pcinfo->pddac[3][0] = (val >> 14) & 0x3;
AR5K_EEPROM_READ(offset++, val);
pcinfo->pddac[3][0] |= (val & 0xF) << 2;
pcinfo->pwr[3][1] = (val >> 4) & 0xf;
pcinfo->pddac[3][1] = (val >> 8) & 0x3f;
pcinfo->pwr[3][2] = (val >> 14) & 0x3;
AR5K_EEPROM_READ(offset++, val);
pcinfo->pwr[3][2] |= ((val >> 0) & 0x3) << 2;
pcinfo->pddac[3][2] = (val >> 2) & 0x3f;
pcinfo->pwr[3][3] = (val >> 8) & 0xf;
pcinfo->pddac[3][3] = (val >> 12) & 0xF;
AR5K_EEPROM_READ(offset++, val);
pcinfo->pddac[3][3] |= ((val >> 0) & 0x3) << 4;
} else if (pd_gains == 3) {
pcinfo->pwr[2][3] = (val >> 14) & 0x3;
AR5K_EEPROM_READ(offset++, val);
pcinfo->pwr[2][3] |= ((val >> 0) & 0x3) << 2;
pcinfo->pddac[2][3] = (val >> 2) & 0x3f;
}
}
return ath5k_eeprom_convert_pcal_info_2413(ah, mode, chinfo);
}
/*
* Read per rate target power (this is the maximum tx power
* supported by the card). This info is used when setting
* tx power, no matter the channel.
*
* This also works for v5 EEPROMs.
*/
static int
ath5k_eeprom_read_target_rate_pwr_info(struct ath5k_hw *ah, unsigned int mode)
{
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
struct ath5k_rate_pcal_info *rate_pcal_info;
u8 *rate_target_pwr_num;
u32 offset;
u16 val;
int i;
offset = AR5K_EEPROM_TARGET_PWRSTART(ee->ee_misc1);
rate_target_pwr_num = &ee->ee_rate_target_pwr_num[mode];
switch (mode) {
case AR5K_EEPROM_MODE_11A:
offset += AR5K_EEPROM_TARGET_PWR_OFF_11A(ee->ee_version);
rate_pcal_info = ee->ee_rate_tpwr_a;
ee->ee_rate_target_pwr_num[mode] = AR5K_EEPROM_N_5GHZ_RATE_CHAN;
break;
case AR5K_EEPROM_MODE_11B:
offset += AR5K_EEPROM_TARGET_PWR_OFF_11B(ee->ee_version);
rate_pcal_info = ee->ee_rate_tpwr_b;
ee->ee_rate_target_pwr_num[mode] = 2; /* 3rd is g mode's 1st */
break;
case AR5K_EEPROM_MODE_11G:
offset += AR5K_EEPROM_TARGET_PWR_OFF_11G(ee->ee_version);
rate_pcal_info = ee->ee_rate_tpwr_g;
ee->ee_rate_target_pwr_num[mode] = AR5K_EEPROM_N_2GHZ_CHAN;
break;
default:
return -EINVAL;
}
/* Different freq mask for older eeproms (<= v3.2) */
if (ee->ee_version <= AR5K_EEPROM_VERSION_3_2) {
for (i = 0; i < (*rate_target_pwr_num); i++) {
AR5K_EEPROM_READ(offset++, val);
rate_pcal_info[i].freq =
ath5k_eeprom_bin2freq(ee, (val >> 9) & 0x7f, mode);
rate_pcal_info[i].target_power_6to24 = ((val >> 3) & 0x3f);
rate_pcal_info[i].target_power_36 = (val << 3) & 0x3f;
AR5K_EEPROM_READ(offset++, val);
if (rate_pcal_info[i].freq == AR5K_EEPROM_CHANNEL_DIS ||
val == 0) {
(*rate_target_pwr_num) = i;
break;
}
rate_pcal_info[i].target_power_36 |= ((val >> 13) & 0x7);
rate_pcal_info[i].target_power_48 = ((val >> 7) & 0x3f);
rate_pcal_info[i].target_power_54 = ((val >> 1) & 0x3f);
}
} else {
for (i = 0; i < (*rate_target_pwr_num); i++) {
AR5K_EEPROM_READ(offset++, val);
rate_pcal_info[i].freq =
ath5k_eeprom_bin2freq(ee, (val >> 8) & 0xff, mode);
rate_pcal_info[i].target_power_6to24 = ((val >> 2) & 0x3f);
rate_pcal_info[i].target_power_36 = (val << 4) & 0x3f;
AR5K_EEPROM_READ(offset++, val);
if (rate_pcal_info[i].freq == AR5K_EEPROM_CHANNEL_DIS ||
val == 0) {
(*rate_target_pwr_num) = i;
break;
}
rate_pcal_info[i].target_power_36 |= (val >> 12) & 0xf;
rate_pcal_info[i].target_power_48 = ((val >> 6) & 0x3f);
rate_pcal_info[i].target_power_54 = (val & 0x3f);
}
}
return 0;
}
/*
* Read per channel calibration info from EEPROM
*
* This info is used to calibrate the baseband power table. Imagine
* that for each channel there is a power curve that's hw specific
* (depends on amplifier etc) and we try to "correct" this curve using
* offsets we pass on to phy chip (baseband -> before amplifier) so that
* it can use accurate power values when setting tx power (takes amplifier's
* performance on each channel into account).
*
* EEPROM provides us with the offsets for some pre-calibrated channels
* and we have to interpolate to create the full table for these channels and
* also the table for any channel.
*/
static int
ath5k_eeprom_read_pcal_info(struct ath5k_hw *ah)
{
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
int (*read_pcal)(struct ath5k_hw *hw, int mode);
int mode;
int err;
if ((ah->ah_ee_version >= AR5K_EEPROM_VERSION_4_0) &&
(AR5K_EEPROM_EEMAP(ee->ee_misc0) == 1))
read_pcal = ath5k_eeprom_read_pcal_info_5112;
else if ((ah->ah_ee_version >= AR5K_EEPROM_VERSION_5_0) &&
(AR5K_EEPROM_EEMAP(ee->ee_misc0) == 2))
read_pcal = ath5k_eeprom_read_pcal_info_2413;
else
read_pcal = ath5k_eeprom_read_pcal_info_5111;
for (mode = AR5K_EEPROM_MODE_11A; mode <= AR5K_EEPROM_MODE_11G;
mode++) {
err = read_pcal(ah, mode);
if (err)
return err;
err = ath5k_eeprom_read_target_rate_pwr_info(ah, mode);
if (err < 0)
return err;
}
return 0;
}
/* Read conformance test limits used for regulatory control */
static int
ath5k_eeprom_read_ctl_info(struct ath5k_hw *ah)
{
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
struct ath5k_edge_power *rep;
unsigned int fmask, pmask;
unsigned int ctl_mode;
int i, j;
u32 offset;
u16 val;
pmask = AR5K_EEPROM_POWER_M;
fmask = AR5K_EEPROM_FREQ_M(ee->ee_version);
offset = AR5K_EEPROM_CTL(ee->ee_version);
ee->ee_ctls = AR5K_EEPROM_N_CTLS(ee->ee_version);
for (i = 0; i < ee->ee_ctls; i += 2) {
AR5K_EEPROM_READ(offset++, val);
ee->ee_ctl[i] = (val >> 8) & 0xff;
ee->ee_ctl[i + 1] = val & 0xff;
}
offset = AR5K_EEPROM_GROUP8_OFFSET;
if (ee->ee_version >= AR5K_EEPROM_VERSION_4_0)
offset += AR5K_EEPROM_TARGET_PWRSTART(ee->ee_misc1) -
AR5K_EEPROM_GROUP5_OFFSET;
else
offset += AR5K_EEPROM_GROUPS_START(ee->ee_version);
rep = ee->ee_ctl_pwr;
for (i = 0; i < ee->ee_ctls; i++) {
switch (ee->ee_ctl[i] & AR5K_CTL_MODE_M) {
case AR5K_CTL_11A:
case AR5K_CTL_TURBO:
ctl_mode = AR5K_EEPROM_MODE_11A;
break;
default:
ctl_mode = AR5K_EEPROM_MODE_11G;
break;
}
if (ee->ee_ctl[i] == 0) {
if (ee->ee_version >= AR5K_EEPROM_VERSION_3_3)
offset += 8;
else
offset += 7;
rep += AR5K_EEPROM_N_EDGES;
continue;
}
if (ee->ee_version >= AR5K_EEPROM_VERSION_3_3) {
for (j = 0; j < AR5K_EEPROM_N_EDGES; j += 2) {
AR5K_EEPROM_READ(offset++, val);
rep[j].freq = (val >> 8) & fmask;
rep[j + 1].freq = val & fmask;
}
for (j = 0; j < AR5K_EEPROM_N_EDGES; j += 2) {
AR5K_EEPROM_READ(offset++, val);
rep[j].edge = (val >> 8) & pmask;
rep[j].flag = (val >> 14) & 1;
rep[j + 1].edge = val & pmask;
rep[j + 1].flag = (val >> 6) & 1;
}
} else {
AR5K_EEPROM_READ(offset++, val);
rep[0].freq = (val >> 9) & fmask;
rep[1].freq = (val >> 2) & fmask;
rep[2].freq = (val << 5) & fmask;
AR5K_EEPROM_READ(offset++, val);
rep[2].freq |= (val >> 11) & 0x1f;
rep[3].freq = (val >> 4) & fmask;
rep[4].freq = (val << 3) & fmask;
AR5K_EEPROM_READ(offset++, val);
rep[4].freq |= (val >> 13) & 0x7;
rep[5].freq = (val >> 6) & fmask;
rep[6].freq = (val << 1) & fmask;
AR5K_EEPROM_READ(offset++, val);
rep[6].freq |= (val >> 15) & 0x1;
rep[7].freq = (val >> 8) & fmask;
rep[0].edge = (val >> 2) & pmask;
rep[1].edge = (val << 4) & pmask;
AR5K_EEPROM_READ(offset++, val);
rep[1].edge |= (val >> 12) & 0xf;
rep[2].edge = (val >> 6) & pmask;
rep[3].edge = val & pmask;
AR5K_EEPROM_READ(offset++, val);
rep[4].edge = (val >> 10) & pmask;
rep[5].edge = (val >> 4) & pmask;
rep[6].edge = (val << 2) & pmask;
AR5K_EEPROM_READ(offset++, val);
rep[6].edge |= (val >> 14) & 0x3;
rep[7].edge = (val >> 8) & pmask;
}
for (j = 0; j < AR5K_EEPROM_N_EDGES; j++) {
rep[j].freq = ath5k_eeprom_bin2freq(ee,
rep[j].freq, ctl_mode);
}
rep += AR5K_EEPROM_N_EDGES;
}
return 0;
}
static int
ath5k_eeprom_read_spur_chans(struct ath5k_hw *ah)
{
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
u32 offset;
u16 val;
int i;
offset = AR5K_EEPROM_CTL(ee->ee_version) +
AR5K_EEPROM_N_CTLS(ee->ee_version);
if (ee->ee_version < AR5K_EEPROM_VERSION_5_3) {
/* No spur info for 5GHz */
ee->ee_spur_chans[0][0] = AR5K_EEPROM_NO_SPUR;
/* 2 channels for 2GHz (2464/2420) */
ee->ee_spur_chans[0][1] = AR5K_EEPROM_5413_SPUR_CHAN_1;
ee->ee_spur_chans[1][1] = AR5K_EEPROM_5413_SPUR_CHAN_2;
ee->ee_spur_chans[2][1] = AR5K_EEPROM_NO_SPUR;
} else if (ee->ee_version >= AR5K_EEPROM_VERSION_5_3) {
for (i = 0; i < AR5K_EEPROM_N_SPUR_CHANS; i++) {
AR5K_EEPROM_READ(offset, val);
ee->ee_spur_chans[i][0] = val;
AR5K_EEPROM_READ(offset + AR5K_EEPROM_N_SPUR_CHANS,
val);
ee->ee_spur_chans[i][1] = val;
offset++;
}
}
return 0;
}
/***********************\
* Init/Detach functions *
\***********************/
/*
* Initialize eeprom data structure
*/
int
ath5k_eeprom_init(struct ath5k_hw *ah)
{
int err;
err = ath5k_eeprom_init_header(ah);
if (err < 0)
return err;
err = ath5k_eeprom_init_modes(ah);
if (err < 0)
return err;
err = ath5k_eeprom_read_pcal_info(ah);
if (err < 0)
return err;
err = ath5k_eeprom_read_ctl_info(ah);
if (err < 0)
return err;
err = ath5k_eeprom_read_spur_chans(ah);
if (err < 0)
return err;
return 0;
}
void
ath5k_eeprom_detach(struct ath5k_hw *ah)
{
u8 mode;
for (mode = AR5K_EEPROM_MODE_11A; mode <= AR5K_EEPROM_MODE_11G; mode++)
ath5k_eeprom_free_pcal_info(ah, mode);
}
int
ath5k_eeprom_mode_from_channel(struct ath5k_hw *ah,
struct ieee80211_channel *channel)
{
switch (channel->hw_value) {
case AR5K_MODE_11A:
return AR5K_EEPROM_MODE_11A;
case AR5K_MODE_11G:
return AR5K_EEPROM_MODE_11G;
case AR5K_MODE_11B:
return AR5K_EEPROM_MODE_11B;
default:
ATH5K_WARN(ah, "channel is not A/B/G!");
return AR5K_EEPROM_MODE_11A;
}
}
|
linux-master
|
drivers/net/wireless/ath/ath5k/eeprom.c
|
/*
* Copyright (c) 2004-2008 Reyk Floeter <[email protected]>
* Copyright (c) 2006-2008 Nick Kossifidis <[email protected]>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
*/
/*************************************\
* DMA and interrupt masking functions *
\*************************************/
/**
* DOC: DMA and interrupt masking functions
*
* Here we setup descriptor pointers (rxdp/txdp) start/stop dma engine and
* handle queue setup for 5210 chipset (rest are handled on qcu.c).
* Also we setup interrupt mask register (IMR) and read the various interrupt
* status registers (ISR).
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include "ath5k.h"
#include "reg.h"
#include "debug.h"
/*********\
* Receive *
\*********/
/**
* ath5k_hw_start_rx_dma() - Start DMA receive
* @ah: The &struct ath5k_hw
*/
void
ath5k_hw_start_rx_dma(struct ath5k_hw *ah)
{
ath5k_hw_reg_write(ah, AR5K_CR_RXE, AR5K_CR);
ath5k_hw_reg_read(ah, AR5K_CR);
}
/**
* ath5k_hw_stop_rx_dma() - Stop DMA receive
* @ah: The &struct ath5k_hw
*/
static int
ath5k_hw_stop_rx_dma(struct ath5k_hw *ah)
{
unsigned int i;
ath5k_hw_reg_write(ah, AR5K_CR_RXD, AR5K_CR);
/*
* It may take some time to disable the DMA receive unit
*/
for (i = 1000; i > 0 &&
(ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_CR_RXE) != 0;
i--)
udelay(100);
if (!i)
ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
"failed to stop RX DMA !\n");
return i ? 0 : -EBUSY;
}
/**
* ath5k_hw_get_rxdp() - Get RX Descriptor's address
* @ah: The &struct ath5k_hw
*/
u32
ath5k_hw_get_rxdp(struct ath5k_hw *ah)
{
return ath5k_hw_reg_read(ah, AR5K_RXDP);
}
/**
* ath5k_hw_set_rxdp() - Set RX Descriptor's address
* @ah: The &struct ath5k_hw
* @phys_addr: RX descriptor address
*
* Returns -EIO if rx is active
*/
int
ath5k_hw_set_rxdp(struct ath5k_hw *ah, u32 phys_addr)
{
if (ath5k_hw_reg_read(ah, AR5K_CR) & AR5K_CR_RXE) {
ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
"tried to set RXDP while rx was active !\n");
return -EIO;
}
ath5k_hw_reg_write(ah, phys_addr, AR5K_RXDP);
return 0;
}
/**********\
* Transmit *
\**********/
/**
* ath5k_hw_start_tx_dma() - Start DMA transmit for a specific queue
* @ah: The &struct ath5k_hw
* @queue: The hw queue number
*
* Start DMA transmit for a specific queue and since 5210 doesn't have
* QCU/DCU, set up queue parameters for 5210 here based on queue type (one
* queue for normal data and one queue for beacons). For queue setup
* on newer chips check out qcu.c. Returns -EINVAL if queue number is out
* of range or if queue is already disabled.
*
* NOTE: Must be called after setting up tx control descriptor for that
* queue (see below).
*/
int
ath5k_hw_start_tx_dma(struct ath5k_hw *ah, unsigned int queue)
{
u32 tx_queue;
AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
/* Return if queue is declared inactive */
if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
return -EINVAL;
if (ah->ah_version == AR5K_AR5210) {
tx_queue = ath5k_hw_reg_read(ah, AR5K_CR);
/*
* Set the queue by type on 5210
*/
switch (ah->ah_txq[queue].tqi_type) {
case AR5K_TX_QUEUE_DATA:
tx_queue |= AR5K_CR_TXE0 & ~AR5K_CR_TXD0;
break;
case AR5K_TX_QUEUE_BEACON:
tx_queue |= AR5K_CR_TXE1 & ~AR5K_CR_TXD1;
ath5k_hw_reg_write(ah, AR5K_BCR_TQ1V | AR5K_BCR_BDMAE,
AR5K_BSR);
break;
case AR5K_TX_QUEUE_CAB:
tx_queue |= AR5K_CR_TXE1 & ~AR5K_CR_TXD1;
ath5k_hw_reg_write(ah, AR5K_BCR_TQ1FV | AR5K_BCR_TQ1V |
AR5K_BCR_BDMAE, AR5K_BSR);
break;
default:
return -EINVAL;
}
/* Start queue */
ath5k_hw_reg_write(ah, tx_queue, AR5K_CR);
ath5k_hw_reg_read(ah, AR5K_CR);
} else {
/* Return if queue is disabled */
if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXD, queue))
return -EIO;
/* Start queue */
AR5K_REG_WRITE_Q(ah, AR5K_QCU_TXE, queue);
}
return 0;
}
/**
* ath5k_hw_stop_tx_dma() - Stop DMA transmit on a specific queue
* @ah: The &struct ath5k_hw
* @queue: The hw queue number
*
* Stop DMA transmit on a specific hw queue and drain queue so we don't
* have any pending frames. Returns -EBUSY if we still have pending frames,
* -EINVAL if queue number is out of range or inactive.
*/
static int
ath5k_hw_stop_tx_dma(struct ath5k_hw *ah, unsigned int queue)
{
unsigned int i = 40;
u32 tx_queue, pending;
AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
/* Return if queue is declared inactive */
if (ah->ah_txq[queue].tqi_type == AR5K_TX_QUEUE_INACTIVE)
return -EINVAL;
if (ah->ah_version == AR5K_AR5210) {
tx_queue = ath5k_hw_reg_read(ah, AR5K_CR);
/*
* Set by queue type
*/
switch (ah->ah_txq[queue].tqi_type) {
case AR5K_TX_QUEUE_DATA:
tx_queue |= AR5K_CR_TXD0 & ~AR5K_CR_TXE0;
break;
case AR5K_TX_QUEUE_BEACON:
case AR5K_TX_QUEUE_CAB:
/* XXX Fix me... */
tx_queue |= AR5K_CR_TXD1 & ~AR5K_CR_TXD1;
ath5k_hw_reg_write(ah, 0, AR5K_BSR);
break;
default:
return -EINVAL;
}
/* Stop queue */
ath5k_hw_reg_write(ah, tx_queue, AR5K_CR);
ath5k_hw_reg_read(ah, AR5K_CR);
} else {
/*
* Enable DCU early termination to quickly
* flush any pending frames from QCU
*/
AR5K_REG_ENABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
AR5K_QCU_MISC_DCU_EARLY);
/*
* Schedule TX disable and wait until queue is empty
*/
AR5K_REG_WRITE_Q(ah, AR5K_QCU_TXD, queue);
/* Wait for queue to stop */
for (i = 1000; i > 0 &&
(AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue) != 0);
i--)
udelay(100);
if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue))
ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
"queue %i didn't stop !\n", queue);
/* Check for pending frames */
i = 1000;
do {
pending = ath5k_hw_reg_read(ah,
AR5K_QUEUE_STATUS(queue)) &
AR5K_QCU_STS_FRMPENDCNT;
udelay(100);
} while (--i && pending);
/* For 2413+ order PCU to drop packets using
* QUIET mechanism */
if (ah->ah_mac_version >= (AR5K_SREV_AR2414 >> 4) &&
pending) {
/* Set periodicity and duration */
ath5k_hw_reg_write(ah,
AR5K_REG_SM(100, AR5K_QUIET_CTL2_QT_PER)|
AR5K_REG_SM(10, AR5K_QUIET_CTL2_QT_DUR),
AR5K_QUIET_CTL2);
/* Enable quiet period for current TSF */
ath5k_hw_reg_write(ah,
AR5K_QUIET_CTL1_QT_EN |
AR5K_REG_SM(ath5k_hw_reg_read(ah,
AR5K_TSF_L32_5211) >> 10,
AR5K_QUIET_CTL1_NEXT_QT_TSF),
AR5K_QUIET_CTL1);
/* Force channel idle high */
AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW_5211,
AR5K_DIAG_SW_CHANNEL_IDLE_HIGH);
/* Wait a while and disable mechanism */
udelay(400);
AR5K_REG_DISABLE_BITS(ah, AR5K_QUIET_CTL1,
AR5K_QUIET_CTL1_QT_EN);
/* Re-check for pending frames */
i = 100;
do {
pending = ath5k_hw_reg_read(ah,
AR5K_QUEUE_STATUS(queue)) &
AR5K_QCU_STS_FRMPENDCNT;
udelay(100);
} while (--i && pending);
AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW_5211,
AR5K_DIAG_SW_CHANNEL_IDLE_HIGH);
if (pending)
ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
"quiet mechanism didn't work q:%i !\n",
queue);
}
/*
* Disable DCU early termination
*/
AR5K_REG_DISABLE_BITS(ah, AR5K_QUEUE_MISC(queue),
AR5K_QCU_MISC_DCU_EARLY);
/* Clear register */
ath5k_hw_reg_write(ah, 0, AR5K_QCU_TXD);
if (pending) {
ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
"tx dma didn't stop (q:%i, frm:%i) !\n",
queue, pending);
return -EBUSY;
}
}
/* TODO: Check for success on 5210 else return error */
return 0;
}
/**
* ath5k_hw_stop_beacon_queue() - Stop beacon queue
* @ah: The &struct ath5k_hw
* @queue: The queue number
*
* Returns -EIO if queue didn't stop
*/
int
ath5k_hw_stop_beacon_queue(struct ath5k_hw *ah, unsigned int queue)
{
int ret;
ret = ath5k_hw_stop_tx_dma(ah, queue);
if (ret) {
ATH5K_DBG(ah, ATH5K_DEBUG_DMA,
"beacon queue didn't stop !\n");
return -EIO;
}
return 0;
}
/**
* ath5k_hw_get_txdp() - Get TX Descriptor's address for a specific queue
* @ah: The &struct ath5k_hw
* @queue: The hw queue number
*
* Get TX descriptor's address for a specific queue. For 5210 we ignore
* the queue number and use tx queue type since we only have 2 queues.
* We use TXDP0 for normal data queue and TXDP1 for beacon queue.
* For newer chips with QCU/DCU we just read the corresponding TXDP register.
*
* XXX: Is TXDP read and clear ?
*/
u32
ath5k_hw_get_txdp(struct ath5k_hw *ah, unsigned int queue)
{
u16 tx_reg;
AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
/*
* Get the transmit queue descriptor pointer from the selected queue
*/
/*5210 doesn't have QCU*/
if (ah->ah_version == AR5K_AR5210) {
switch (ah->ah_txq[queue].tqi_type) {
case AR5K_TX_QUEUE_DATA:
tx_reg = AR5K_NOQCU_TXDP0;
break;
case AR5K_TX_QUEUE_BEACON:
case AR5K_TX_QUEUE_CAB:
tx_reg = AR5K_NOQCU_TXDP1;
break;
default:
return 0xffffffff;
}
} else {
tx_reg = AR5K_QUEUE_TXDP(queue);
}
return ath5k_hw_reg_read(ah, tx_reg);
}
/**
* ath5k_hw_set_txdp() - Set TX Descriptor's address for a specific queue
* @ah: The &struct ath5k_hw
* @queue: The hw queue number
* @phys_addr: The physical address
*
* Set TX descriptor's address for a specific queue. For 5210 we ignore
* the queue number and we use tx queue type since we only have 2 queues
* so as above we use TXDP0 for normal data queue and TXDP1 for beacon queue.
* For newer chips with QCU/DCU we just set the corresponding TXDP register.
* Returns -EINVAL if queue type is invalid for 5210 and -EIO if queue is still
* active.
*/
int
ath5k_hw_set_txdp(struct ath5k_hw *ah, unsigned int queue, u32 phys_addr)
{
u16 tx_reg;
AR5K_ASSERT_ENTRY(queue, ah->ah_capabilities.cap_queues.q_tx_num);
/*
* Set the transmit queue descriptor pointer register by type
* on 5210
*/
if (ah->ah_version == AR5K_AR5210) {
switch (ah->ah_txq[queue].tqi_type) {
case AR5K_TX_QUEUE_DATA:
tx_reg = AR5K_NOQCU_TXDP0;
break;
case AR5K_TX_QUEUE_BEACON:
case AR5K_TX_QUEUE_CAB:
tx_reg = AR5K_NOQCU_TXDP1;
break;
default:
return -EINVAL;
}
} else {
/*
* Set the transmit queue descriptor pointer for
* the selected queue on QCU for 5211+
* (this won't work if the queue is still active)
*/
if (AR5K_REG_READ_Q(ah, AR5K_QCU_TXE, queue))
return -EIO;
tx_reg = AR5K_QUEUE_TXDP(queue);
}
/* Set descriptor pointer */
ath5k_hw_reg_write(ah, phys_addr, tx_reg);
return 0;
}
/**
* ath5k_hw_update_tx_triglevel() - Update tx trigger level
* @ah: The &struct ath5k_hw
* @increase: Flag to force increase of trigger level
*
* This function increases/decreases the tx trigger level for the tx fifo
* buffer (aka FIFO threshold) that is used to indicate when PCU flushes
* the buffer and transmits its data. Lowering this results sending small
* frames more quickly but can lead to tx underruns, raising it a lot can
* result other problems. Right now we start with the lowest possible
* (64Bytes) and if we get tx underrun we increase it using the increase
* flag. Returns -EIO if we have reached maximum/minimum.
*
* XXX: Link this with tx DMA size ?
* XXX2: Use it to save interrupts ?
*/
int
ath5k_hw_update_tx_triglevel(struct ath5k_hw *ah, bool increase)
{
u32 trigger_level, imr;
int ret = -EIO;
/*
* Disable interrupts by setting the mask
*/
imr = ath5k_hw_set_imr(ah, ah->ah_imr & ~AR5K_INT_GLOBAL);
trigger_level = AR5K_REG_MS(ath5k_hw_reg_read(ah, AR5K_TXCFG),
AR5K_TXCFG_TXFULL);
if (!increase) {
if (--trigger_level < AR5K_TUNE_MIN_TX_FIFO_THRES)
goto done;
} else
trigger_level +=
((AR5K_TUNE_MAX_TX_FIFO_THRES - trigger_level) / 2);
/*
* Update trigger level on success
*/
if (ah->ah_version == AR5K_AR5210)
ath5k_hw_reg_write(ah, trigger_level, AR5K_TRIG_LVL);
else
AR5K_REG_WRITE_BITS(ah, AR5K_TXCFG,
AR5K_TXCFG_TXFULL, trigger_level);
ret = 0;
done:
/*
* Restore interrupt mask
*/
ath5k_hw_set_imr(ah, imr);
return ret;
}
/*******************\
* Interrupt masking *
\*******************/
/**
* ath5k_hw_is_intr_pending() - Check if we have pending interrupts
* @ah: The &struct ath5k_hw
*
* Check if we have pending interrupts to process. Returns 1 if we
* have pending interrupts and 0 if we haven't.
*/
bool
ath5k_hw_is_intr_pending(struct ath5k_hw *ah)
{
return ath5k_hw_reg_read(ah, AR5K_INTPEND) == 1 ? 1 : 0;
}
/**
* ath5k_hw_get_isr() - Get interrupt status
* @ah: The @struct ath5k_hw
* @interrupt_mask: Driver's interrupt mask used to filter out
* interrupts in sw.
*
* This function is used inside our interrupt handler to determine the reason
* for the interrupt by reading Primary Interrupt Status Register. Returns an
* abstract interrupt status mask which is mostly ISR with some uncommon bits
* being mapped on some standard non hw-specific positions
* (check out &ath5k_int).
*
* NOTE: We do write-to-clear, so the active PISR/SISR bits at the time this
* function gets called are cleared on return.
*/
int
ath5k_hw_get_isr(struct ath5k_hw *ah, enum ath5k_int *interrupt_mask)
{
u32 data = 0;
/*
* Read interrupt status from Primary Interrupt
* Register.
*
* Note: PISR/SISR Not available on 5210
*/
if (ah->ah_version == AR5K_AR5210) {
u32 isr = 0;
isr = ath5k_hw_reg_read(ah, AR5K_ISR);
if (unlikely(isr == AR5K_INT_NOCARD)) {
*interrupt_mask = isr;
return -ENODEV;
}
/*
* Filter out the non-common bits from the interrupt
* status.
*/
*interrupt_mask = (isr & AR5K_INT_COMMON) & ah->ah_imr;
/* Hanlde INT_FATAL */
if (unlikely(isr & (AR5K_ISR_SSERR | AR5K_ISR_MCABT
| AR5K_ISR_DPERR)))
*interrupt_mask |= AR5K_INT_FATAL;
/*
* XXX: BMISS interrupts may occur after association.
* I found this on 5210 code but it needs testing. If this is
* true we should disable them before assoc and re-enable them
* after a successful assoc + some jiffies.
interrupt_mask &= ~AR5K_INT_BMISS;
*/
data = isr;
} else {
u32 pisr = 0;
u32 pisr_clear = 0;
u32 sisr0 = 0;
u32 sisr1 = 0;
u32 sisr2 = 0;
u32 sisr3 = 0;
u32 sisr4 = 0;
/* Read PISR and SISRs... */
pisr = ath5k_hw_reg_read(ah, AR5K_PISR);
if (unlikely(pisr == AR5K_INT_NOCARD)) {
*interrupt_mask = pisr;
return -ENODEV;
}
sisr0 = ath5k_hw_reg_read(ah, AR5K_SISR0);
sisr1 = ath5k_hw_reg_read(ah, AR5K_SISR1);
sisr2 = ath5k_hw_reg_read(ah, AR5K_SISR2);
sisr3 = ath5k_hw_reg_read(ah, AR5K_SISR3);
sisr4 = ath5k_hw_reg_read(ah, AR5K_SISR4);
/*
* PISR holds the logical OR of interrupt bits
* from SISR registers:
*
* TXOK and TXDESC -> Logical OR of TXOK and TXDESC
* per-queue bits on SISR0
*
* TXERR and TXEOL -> Logical OR of TXERR and TXEOL
* per-queue bits on SISR1
*
* TXURN -> Logical OR of TXURN per-queue bits on SISR2
*
* HIUERR -> Logical OR of MCABT, SSERR and DPER bits on SISR2
*
* BCNMISC -> Logical OR of TIM, CAB_END, DTIM_SYNC
* BCN_TIMEOUT, CAB_TIMEOUT and DTIM
* (and TSFOOR ?) bits on SISR2
*
* QCBRORN and QCBRURN -> Logical OR of QCBRORN and
* QCBRURN per-queue bits on SISR3
* QTRIG -> Logical OR of QTRIG per-queue bits on SISR4
*
* If we clean these bits on PISR we 'll also clear all
* related bits from SISRs, e.g. if we write the TXOK bit on
* PISR we 'll clean all TXOK bits from SISR0 so if a new TXOK
* interrupt got fired for another queue while we were reading
* the interrupt registers and we write back the TXOK bit on
* PISR we 'll lose it. So make sure that we don't write back
* on PISR any bits that come from SISRs. Clearing them from
* SISRs will also clear PISR so no need to worry here.
*/
/* XXX: There seems to be an issue on some cards
* with tx interrupt flags not being updated
* on PISR despite that all Tx interrupt bits
* are cleared on SISRs. Since we handle all
* Tx queues all together it shouldn't be an
* issue if we clear Tx interrupt flags also
* on PISR to avoid that.
*/
pisr_clear = (pisr & ~AR5K_ISR_BITS_FROM_SISRS) |
(pisr & AR5K_INT_TX_ALL);
/*
* Write to clear them...
* Note: This means that each bit we write back
* to the registers will get cleared, leaving the
* rest unaffected. So this won't affect new interrupts
* we didn't catch while reading/processing, we 'll get
* them next time get_isr gets called.
*/
ath5k_hw_reg_write(ah, sisr0, AR5K_SISR0);
ath5k_hw_reg_write(ah, sisr1, AR5K_SISR1);
ath5k_hw_reg_write(ah, sisr2, AR5K_SISR2);
ath5k_hw_reg_write(ah, sisr3, AR5K_SISR3);
ath5k_hw_reg_write(ah, sisr4, AR5K_SISR4);
ath5k_hw_reg_write(ah, pisr_clear, AR5K_PISR);
/* Flush previous write */
ath5k_hw_reg_read(ah, AR5K_PISR);
/*
* Filter out the non-common bits from the interrupt
* status.
*/
*interrupt_mask = (pisr & AR5K_INT_COMMON) & ah->ah_imr;
ah->ah_txq_isr_txok_all = 0;
/* We treat TXOK,TXDESC, TXERR and TXEOL
* the same way (schedule the tx tasklet)
* so we track them all together per queue */
if (pisr & AR5K_ISR_TXOK)
ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr0,
AR5K_SISR0_QCU_TXOK);
if (pisr & AR5K_ISR_TXDESC)
ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr0,
AR5K_SISR0_QCU_TXDESC);
if (pisr & AR5K_ISR_TXERR)
ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr1,
AR5K_SISR1_QCU_TXERR);
if (pisr & AR5K_ISR_TXEOL)
ah->ah_txq_isr_txok_all |= AR5K_REG_MS(sisr1,
AR5K_SISR1_QCU_TXEOL);
/* Misc Beacon related interrupts */
/* For AR5211 */
if (pisr & AR5K_ISR_TIM)
*interrupt_mask |= AR5K_INT_TIM;
/* For AR5212+ */
if (pisr & AR5K_ISR_BCNMISC) {
if (sisr2 & AR5K_SISR2_TIM)
*interrupt_mask |= AR5K_INT_TIM;
if (sisr2 & AR5K_SISR2_DTIM)
*interrupt_mask |= AR5K_INT_DTIM;
if (sisr2 & AR5K_SISR2_DTIM_SYNC)
*interrupt_mask |= AR5K_INT_DTIM_SYNC;
if (sisr2 & AR5K_SISR2_BCN_TIMEOUT)
*interrupt_mask |= AR5K_INT_BCN_TIMEOUT;
if (sisr2 & AR5K_SISR2_CAB_TIMEOUT)
*interrupt_mask |= AR5K_INT_CAB_TIMEOUT;
}
/* Below interrupts are unlikely to happen */
/* HIU = Host Interface Unit (PCI etc)
* Can be one of MCABT, SSERR, DPERR from SISR2 */
if (unlikely(pisr & (AR5K_ISR_HIUERR)))
*interrupt_mask |= AR5K_INT_FATAL;
/*Beacon Not Ready*/
if (unlikely(pisr & (AR5K_ISR_BNR)))
*interrupt_mask |= AR5K_INT_BNR;
/* A queue got CBR overrun */
if (unlikely(pisr & (AR5K_ISR_QCBRORN)))
*interrupt_mask |= AR5K_INT_QCBRORN;
/* A queue got CBR underrun */
if (unlikely(pisr & (AR5K_ISR_QCBRURN)))
*interrupt_mask |= AR5K_INT_QCBRURN;
/* A queue got triggered */
if (unlikely(pisr & (AR5K_ISR_QTRIG)))
*interrupt_mask |= AR5K_INT_QTRIG;
data = pisr;
}
/*
* In case we didn't handle anything,
* print the register value.
*/
if (unlikely(*interrupt_mask == 0 && net_ratelimit()))
ATH5K_PRINTF("ISR: 0x%08x IMR: 0x%08x\n", data, ah->ah_imr);
return 0;
}
/**
* ath5k_hw_set_imr() - Set interrupt mask
* @ah: The &struct ath5k_hw
* @new_mask: The new interrupt mask to be set
*
* Set the interrupt mask in hw to save interrupts. We do that by mapping
* ath5k_int bits to hw-specific bits to remove abstraction and writing
* Interrupt Mask Register.
*/
enum ath5k_int
ath5k_hw_set_imr(struct ath5k_hw *ah, enum ath5k_int new_mask)
{
enum ath5k_int old_mask, int_mask;
old_mask = ah->ah_imr;
/*
* Disable card interrupts to prevent any race conditions
* (they will be re-enabled afterwards if AR5K_INT GLOBAL
* is set again on the new mask).
*/
if (old_mask & AR5K_INT_GLOBAL) {
ath5k_hw_reg_write(ah, AR5K_IER_DISABLE, AR5K_IER);
ath5k_hw_reg_read(ah, AR5K_IER);
}
/*
* Add additional, chipset-dependent interrupt mask flags
* and write them to the IMR (interrupt mask register).
*/
int_mask = new_mask & AR5K_INT_COMMON;
if (ah->ah_version != AR5K_AR5210) {
/* Preserve per queue TXURN interrupt mask */
u32 simr2 = ath5k_hw_reg_read(ah, AR5K_SIMR2)
& AR5K_SIMR2_QCU_TXURN;
/* Fatal interrupt abstraction for 5211+ */
if (new_mask & AR5K_INT_FATAL) {
int_mask |= AR5K_IMR_HIUERR;
simr2 |= (AR5K_SIMR2_MCABT | AR5K_SIMR2_SSERR
| AR5K_SIMR2_DPERR);
}
/* Misc beacon related interrupts */
if (new_mask & AR5K_INT_TIM)
int_mask |= AR5K_IMR_TIM;
if (new_mask & AR5K_INT_TIM)
simr2 |= AR5K_SISR2_TIM;
if (new_mask & AR5K_INT_DTIM)
simr2 |= AR5K_SISR2_DTIM;
if (new_mask & AR5K_INT_DTIM_SYNC)
simr2 |= AR5K_SISR2_DTIM_SYNC;
if (new_mask & AR5K_INT_BCN_TIMEOUT)
simr2 |= AR5K_SISR2_BCN_TIMEOUT;
if (new_mask & AR5K_INT_CAB_TIMEOUT)
simr2 |= AR5K_SISR2_CAB_TIMEOUT;
/*Beacon Not Ready*/
if (new_mask & AR5K_INT_BNR)
int_mask |= AR5K_INT_BNR;
/* Note: Per queue interrupt masks
* are set via ath5k_hw_reset_tx_queue() (qcu.c) */
ath5k_hw_reg_write(ah, int_mask, AR5K_PIMR);
ath5k_hw_reg_write(ah, simr2, AR5K_SIMR2);
} else {
/* Fatal interrupt abstraction for 5210 */
if (new_mask & AR5K_INT_FATAL)
int_mask |= (AR5K_IMR_SSERR | AR5K_IMR_MCABT
| AR5K_IMR_HIUERR | AR5K_IMR_DPERR);
/* Only common interrupts left for 5210 (no SIMRs) */
ath5k_hw_reg_write(ah, int_mask, AR5K_IMR);
}
/* If RXNOFRM interrupt is masked disable it
* by setting AR5K_RXNOFRM to zero */
if (!(new_mask & AR5K_INT_RXNOFRM))
ath5k_hw_reg_write(ah, 0, AR5K_RXNOFRM);
/* Store new interrupt mask */
ah->ah_imr = new_mask;
/* ..re-enable interrupts if AR5K_INT_GLOBAL is set */
if (new_mask & AR5K_INT_GLOBAL) {
ath5k_hw_reg_write(ah, AR5K_IER_ENABLE, AR5K_IER);
ath5k_hw_reg_read(ah, AR5K_IER);
}
return old_mask;
}
/********************\
Init/Stop functions
\********************/
/**
* ath5k_hw_dma_init() - Initialize DMA unit
* @ah: The &struct ath5k_hw
*
* Set DMA size and pre-enable interrupts
* (driver handles tx/rx buffer setup and
* dma start/stop)
*
* XXX: Save/restore RXDP/TXDP registers ?
*/
void
ath5k_hw_dma_init(struct ath5k_hw *ah)
{
/*
* Set Rx/Tx DMA Configuration
*
* Set standard DMA size (128). Note that
* a DMA size of 512 causes rx overruns and tx errors
* on pci-e cards (tested on 5424 but since rx overruns
* also occur on 5416/5418 with madwifi we set 128
* for all PCI-E cards to be safe).
*
* XXX: need to check 5210 for this
* TODO: Check out tx trigger level, it's always 64 on dumps but I
* guess we can tweak it and see how it goes ;-)
*/
if (ah->ah_version != AR5K_AR5210) {
AR5K_REG_WRITE_BITS(ah, AR5K_TXCFG,
AR5K_TXCFG_SDMAMR, AR5K_DMASIZE_128B);
AR5K_REG_WRITE_BITS(ah, AR5K_RXCFG,
AR5K_RXCFG_SDMAMW, AR5K_DMASIZE_128B);
}
/* Pre-enable interrupts on 5211/5212*/
if (ah->ah_version != AR5K_AR5210)
ath5k_hw_set_imr(ah, ah->ah_imr);
}
/**
* ath5k_hw_dma_stop() - stop DMA unit
* @ah: The &struct ath5k_hw
*
* Stop tx/rx DMA and interrupts. Returns
* -EBUSY if tx or rx dma failed to stop.
*
* XXX: Sometimes DMA unit hangs and we have
* stuck frames on tx queues, only a reset
* can fix that.
*/
int
ath5k_hw_dma_stop(struct ath5k_hw *ah)
{
int i, qmax, err;
err = 0;
/* Disable interrupts */
ath5k_hw_set_imr(ah, 0);
/* Stop rx dma */
err = ath5k_hw_stop_rx_dma(ah);
if (err)
return err;
/* Clear any pending interrupts
* and disable tx dma */
if (ah->ah_version != AR5K_AR5210) {
ath5k_hw_reg_write(ah, 0xffffffff, AR5K_PISR);
qmax = AR5K_NUM_TX_QUEUES;
} else {
/* PISR/SISR Not available on 5210 */
ath5k_hw_reg_read(ah, AR5K_ISR);
qmax = AR5K_NUM_TX_QUEUES_NOQCU;
}
for (i = 0; i < qmax; i++) {
err = ath5k_hw_stop_tx_dma(ah, i);
/* -EINVAL -> queue inactive */
if (err && err != -EINVAL)
return err;
}
return 0;
}
|
linux-master
|
drivers/net/wireless/ath/ath5k/dma.c
|
/*
* Copyright (c) 2004-2008 Reyk Floeter <[email protected]>
* Copyright (c) 2006-2008 Nick Kossifidis <[email protected]>
* Copyright (c) 2007-2008 Jiri Slaby <[email protected]>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
*/
/**************\
* Capabilities *
\**************/
#include "ath5k.h"
#include "reg.h"
#include "debug.h"
#include "../regd.h"
/*
* Fill the capabilities struct
* TODO: Merge this with EEPROM code when we are done with it
*/
int ath5k_hw_set_capabilities(struct ath5k_hw *ah)
{
struct ath5k_capabilities *caps = &ah->ah_capabilities;
u16 ee_header;
/* Capabilities stored in the EEPROM */
ee_header = caps->cap_eeprom.ee_header;
if (ah->ah_version == AR5K_AR5210) {
/*
* Set radio capabilities
* (The AR5110 only supports the middle 5GHz band)
*/
caps->cap_range.range_5ghz_min = 5120;
caps->cap_range.range_5ghz_max = 5430;
caps->cap_range.range_2ghz_min = 0;
caps->cap_range.range_2ghz_max = 0;
/* Set supported modes */
__set_bit(AR5K_MODE_11A, caps->cap_mode);
} else {
/*
* XXX The transceiver supports frequencies from 4920 to 6100MHz
* XXX and from 2312 to 2732MHz. There are problems with the
* XXX current ieee80211 implementation because the IEEE
* XXX channel mapping does not support negative channel
* XXX numbers (2312MHz is channel -19). Of course, this
* XXX doesn't matter because these channels are out of the
* XXX legal range.
*/
/*
* Set radio capabilities
*/
if (AR5K_EEPROM_HDR_11A(ee_header)) {
if (ath_is_49ghz_allowed(caps->cap_eeprom.ee_regdomain))
caps->cap_range.range_5ghz_min = 4920;
else
caps->cap_range.range_5ghz_min = 5005;
caps->cap_range.range_5ghz_max = 6100;
/* Set supported modes */
__set_bit(AR5K_MODE_11A, caps->cap_mode);
}
/* Enable 802.11b if a 2GHz capable radio (2111/5112) is
* connected */
if (AR5K_EEPROM_HDR_11B(ee_header) ||
(AR5K_EEPROM_HDR_11G(ee_header) &&
ah->ah_version != AR5K_AR5211)) {
/* 2312 */
caps->cap_range.range_2ghz_min = 2412;
caps->cap_range.range_2ghz_max = 2732;
/* Override 2GHz modes on SoCs that need it
* NOTE: cap_needs_2GHz_ovr gets set from
* ath_ahb_probe */
if (!caps->cap_needs_2GHz_ovr) {
if (AR5K_EEPROM_HDR_11B(ee_header))
__set_bit(AR5K_MODE_11B,
caps->cap_mode);
if (AR5K_EEPROM_HDR_11G(ee_header) &&
ah->ah_version != AR5K_AR5211)
__set_bit(AR5K_MODE_11G,
caps->cap_mode);
}
}
}
if ((ah->ah_radio_5ghz_revision & 0xf0) == AR5K_SREV_RAD_2112)
__clear_bit(AR5K_MODE_11A, caps->cap_mode);
/* Set number of supported TX queues */
if (ah->ah_version == AR5K_AR5210)
caps->cap_queues.q_tx_num = AR5K_NUM_TX_QUEUES_NOQCU;
else
caps->cap_queues.q_tx_num = AR5K_NUM_TX_QUEUES;
/* Newer hardware has PHY error counters */
if (ah->ah_mac_srev >= AR5K_SREV_AR5213A)
caps->cap_has_phyerr_counters = true;
else
caps->cap_has_phyerr_counters = false;
/* MACs since AR5212 have MRR support */
if (ah->ah_version == AR5K_AR5212)
caps->cap_has_mrr_support = true;
else
caps->cap_has_mrr_support = false;
return 0;
}
/*
* TODO: Following functions should be part of a new function
* set_capability
*/
int ath5k_hw_enable_pspoll(struct ath5k_hw *ah, u8 *bssid,
u16 assoc_id)
{
if (ah->ah_version == AR5K_AR5210) {
AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1,
AR5K_STA_ID1_NO_PSPOLL | AR5K_STA_ID1_DEFAULT_ANTENNA);
return 0;
}
return -EIO;
}
int ath5k_hw_disable_pspoll(struct ath5k_hw *ah)
{
if (ah->ah_version == AR5K_AR5210) {
AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1,
AR5K_STA_ID1_NO_PSPOLL | AR5K_STA_ID1_DEFAULT_ANTENNA);
return 0;
}
return -EIO;
}
|
linux-master
|
drivers/net/wireless/ath/ath5k/caps.c
|
/*
* Copyright (c) 2004-2007 Reyk Floeter <[email protected]>
* Copyright (c) 2006-2009 Nick Kossifidis <[email protected]>
* Copyright (c) 2007-2008 Jiri Slaby <[email protected]>
* Copyright (c) 2008-2009 Felix Fietkau <[email protected]>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
*/
/***********************\
* PHY related functions *
\***********************/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/delay.h>
#include <linux/slab.h>
#include <linux/sort.h>
#include <asm/unaligned.h>
#include "ath5k.h"
#include "reg.h"
#include "rfbuffer.h"
#include "rfgain.h"
#include "../regd.h"
/**
* DOC: PHY related functions
*
* Here we handle the low-level functions related to baseband
* and analog frontend (RF) parts. This is by far the most complex
* part of the hw code so make sure you know what you are doing.
*
* Here is a list of what this is all about:
*
* - Channel setting/switching
*
* - Automatic Gain Control (AGC) calibration
*
* - Noise Floor calibration
*
* - I/Q imbalance calibration (QAM correction)
*
* - Calibration due to thermal changes (gain_F)
*
* - Spur noise mitigation
*
* - RF/PHY initialization for the various operating modes and bwmodes
*
* - Antenna control
*
* - TX power control per channel/rate/packet type
*
* Also have in mind we never got documentation for most of these
* functions, what we have comes mostly from Atheros's code, reverse
* engineering and patent docs/presentations etc.
*/
/******************\
* Helper functions *
\******************/
/**
* ath5k_hw_radio_revision() - Get the PHY Chip revision
* @ah: The &struct ath5k_hw
* @band: One of enum nl80211_band
*
* Returns the revision number of a 2GHz, 5GHz or single chip
* radio.
*/
u16
ath5k_hw_radio_revision(struct ath5k_hw *ah, enum nl80211_band band)
{
unsigned int i;
u32 srev;
u16 ret;
/*
* Set the radio chip access register
*/
switch (band) {
case NL80211_BAND_2GHZ:
ath5k_hw_reg_write(ah, AR5K_PHY_SHIFT_2GHZ, AR5K_PHY(0));
break;
case NL80211_BAND_5GHZ:
ath5k_hw_reg_write(ah, AR5K_PHY_SHIFT_5GHZ, AR5K_PHY(0));
break;
default:
return 0;
}
usleep_range(2000, 2500);
/* ...wait until PHY is ready and read the selected radio revision */
ath5k_hw_reg_write(ah, 0x00001c16, AR5K_PHY(0x34));
for (i = 0; i < 8; i++)
ath5k_hw_reg_write(ah, 0x00010000, AR5K_PHY(0x20));
if (ah->ah_version == AR5K_AR5210) {
srev = (ath5k_hw_reg_read(ah, AR5K_PHY(256)) >> 28) & 0xf;
ret = (u16)ath5k_hw_bitswap(srev, 4) + 1;
} else {
srev = (ath5k_hw_reg_read(ah, AR5K_PHY(0x100)) >> 24) & 0xff;
ret = (u16)ath5k_hw_bitswap(((srev & 0xf0) >> 4) |
((srev & 0x0f) << 4), 8);
}
/* Reset to the 5GHz mode */
ath5k_hw_reg_write(ah, AR5K_PHY_SHIFT_5GHZ, AR5K_PHY(0));
return ret;
}
/**
* ath5k_channel_ok() - Check if a channel is supported by the hw
* @ah: The &struct ath5k_hw
* @channel: The &struct ieee80211_channel
*
* Note: We don't do any regulatory domain checks here, it's just
* a sanity check.
*/
bool
ath5k_channel_ok(struct ath5k_hw *ah, struct ieee80211_channel *channel)
{
u16 freq = channel->center_freq;
/* Check if the channel is in our supported range */
if (channel->band == NL80211_BAND_2GHZ) {
if ((freq >= ah->ah_capabilities.cap_range.range_2ghz_min) &&
(freq <= ah->ah_capabilities.cap_range.range_2ghz_max))
return true;
} else if (channel->band == NL80211_BAND_5GHZ)
if ((freq >= ah->ah_capabilities.cap_range.range_5ghz_min) &&
(freq <= ah->ah_capabilities.cap_range.range_5ghz_max))
return true;
return false;
}
/**
* ath5k_hw_chan_has_spur_noise() - Check if channel is sensitive to spur noise
* @ah: The &struct ath5k_hw
* @channel: The &struct ieee80211_channel
*/
bool
ath5k_hw_chan_has_spur_noise(struct ath5k_hw *ah,
struct ieee80211_channel *channel)
{
u8 refclk_freq;
if ((ah->ah_radio == AR5K_RF5112) ||
(ah->ah_radio == AR5K_RF5413) ||
(ah->ah_radio == AR5K_RF2413) ||
(ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4)))
refclk_freq = 40;
else
refclk_freq = 32;
if ((channel->center_freq % refclk_freq != 0) &&
((channel->center_freq % refclk_freq < 10) ||
(channel->center_freq % refclk_freq > 22)))
return true;
else
return false;
}
/**
* ath5k_hw_rfb_op() - Perform an operation on the given RF Buffer
* @ah: The &struct ath5k_hw
* @rf_regs: The struct ath5k_rf_reg
* @val: New value
* @reg_id: RF register ID
* @set: Indicate we need to swap data
*
* This is an internal function used to modify RF Banks before
* writing them to AR5K_RF_BUFFER. Check out rfbuffer.h for more
* infos.
*/
static unsigned int
ath5k_hw_rfb_op(struct ath5k_hw *ah, const struct ath5k_rf_reg *rf_regs,
u32 val, u8 reg_id, bool set)
{
const struct ath5k_rf_reg *rfreg = NULL;
u8 offset, bank, num_bits, col, position;
u16 entry;
u32 mask, data, last_bit, bits_shifted, first_bit;
u32 *rfb;
s32 bits_left;
int i;
data = 0;
rfb = ah->ah_rf_banks;
for (i = 0; i < ah->ah_rf_regs_count; i++) {
if (rf_regs[i].index == reg_id) {
rfreg = &rf_regs[i];
break;
}
}
if (rfb == NULL || rfreg == NULL) {
ATH5K_PRINTF("Rf register not found!\n");
/* should not happen */
return 0;
}
bank = rfreg->bank;
num_bits = rfreg->field.len;
first_bit = rfreg->field.pos;
col = rfreg->field.col;
/* first_bit is an offset from bank's
* start. Since we have all banks on
* the same array, we use this offset
* to mark each bank's start */
offset = ah->ah_offset[bank];
/* Boundary check */
if (!(col <= 3 && num_bits <= 32 && first_bit + num_bits <= 319)) {
ATH5K_PRINTF("invalid values at offset %u\n", offset);
return 0;
}
entry = ((first_bit - 1) / 8) + offset;
position = (first_bit - 1) % 8;
if (set)
data = ath5k_hw_bitswap(val, num_bits);
for (bits_shifted = 0, bits_left = num_bits; bits_left > 0;
position = 0, entry++) {
last_bit = (position + bits_left > 8) ? 8 :
position + bits_left;
mask = (((1 << last_bit) - 1) ^ ((1 << position) - 1)) <<
(col * 8);
if (set) {
rfb[entry] &= ~mask;
rfb[entry] |= ((data << position) << (col * 8)) & mask;
data >>= (8 - position);
} else {
data |= (((rfb[entry] & mask) >> (col * 8)) >> position)
<< bits_shifted;
bits_shifted += last_bit - position;
}
bits_left -= 8 - position;
}
data = set ? 1 : ath5k_hw_bitswap(data, num_bits);
return data;
}
/**
* ath5k_hw_write_ofdm_timings() - set OFDM timings on AR5212
* @ah: the &struct ath5k_hw
* @channel: the currently set channel upon reset
*
* Write the delta slope coefficient (used on pilot tracking ?) for OFDM
* operation on the AR5212 upon reset. This is a helper for ath5k_hw_phy_init.
*
* Since delta slope is floating point we split it on its exponent and
* mantissa and provide these values on hw.
*
* For more infos i think this patent is related
* "http://www.freepatentsonline.com/7184495.html"
*/
static inline int
ath5k_hw_write_ofdm_timings(struct ath5k_hw *ah,
struct ieee80211_channel *channel)
{
/* Get exponent and mantissa and set it */
u32 coef_scaled, coef_exp, coef_man,
ds_coef_exp, ds_coef_man, clock;
BUG_ON(!(ah->ah_version == AR5K_AR5212) ||
(channel->hw_value == AR5K_MODE_11B));
/* Get coefficient
* ALGO: coef = (5 * clock / carrier_freq) / 2
* we scale coef by shifting clock value by 24 for
* better precision since we use integers */
switch (ah->ah_bwmode) {
case AR5K_BWMODE_40MHZ:
clock = 40 * 2;
break;
case AR5K_BWMODE_10MHZ:
clock = 40 / 2;
break;
case AR5K_BWMODE_5MHZ:
clock = 40 / 4;
break;
default:
clock = 40;
break;
}
coef_scaled = ((5 * (clock << 24)) / 2) / channel->center_freq;
/* Get exponent
* ALGO: coef_exp = 14 - highest set bit position */
coef_exp = ilog2(coef_scaled);
/* Doesn't make sense if it's zero*/
if (!coef_scaled || !coef_exp)
return -EINVAL;
/* Note: we've shifted coef_scaled by 24 */
coef_exp = 14 - (coef_exp - 24);
/* Get mantissa (significant digits)
* ALGO: coef_mant = floor(coef_scaled* 2^coef_exp+0.5) */
coef_man = coef_scaled +
(1 << (24 - coef_exp - 1));
/* Calculate delta slope coefficient exponent
* and mantissa (remove scaling) and set them on hw */
ds_coef_man = coef_man >> (24 - coef_exp);
ds_coef_exp = coef_exp - 16;
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_TIMING_3,
AR5K_PHY_TIMING_3_DSC_MAN, ds_coef_man);
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_TIMING_3,
AR5K_PHY_TIMING_3_DSC_EXP, ds_coef_exp);
return 0;
}
/**
* ath5k_hw_phy_disable() - Disable PHY
* @ah: The &struct ath5k_hw
*/
int ath5k_hw_phy_disable(struct ath5k_hw *ah)
{
/*Just a try M.F.*/
ath5k_hw_reg_write(ah, AR5K_PHY_ACT_DISABLE, AR5K_PHY_ACT);
return 0;
}
/**
* ath5k_hw_wait_for_synth() - Wait for synth to settle
* @ah: The &struct ath5k_hw
* @channel: The &struct ieee80211_channel
*/
static void
ath5k_hw_wait_for_synth(struct ath5k_hw *ah,
struct ieee80211_channel *channel)
{
/*
* On 5211+ read activation -> rx delay
* and use it (100ns steps).
*/
if (ah->ah_version != AR5K_AR5210) {
u32 delay;
delay = ath5k_hw_reg_read(ah, AR5K_PHY_RX_DELAY) &
AR5K_PHY_RX_DELAY_M;
delay = (channel->hw_value == AR5K_MODE_11B) ?
((delay << 2) / 22) : (delay / 10);
if (ah->ah_bwmode == AR5K_BWMODE_10MHZ)
delay = delay << 1;
if (ah->ah_bwmode == AR5K_BWMODE_5MHZ)
delay = delay << 2;
/* XXX: /2 on turbo ? Let's be safe
* for now */
usleep_range(100 + delay, 100 + (2 * delay));
} else {
usleep_range(1000, 1500);
}
}
/**********************\
* RF Gain optimization *
\**********************/
/**
* DOC: RF Gain optimization
*
* This code is used to optimize RF gain on different environments
* (temperature mostly) based on feedback from a power detector.
*
* It's only used on RF5111 and RF5112, later RF chips seem to have
* auto adjustment on hw -notice they have a much smaller BANK 7 and
* no gain optimization ladder-.
*
* For more infos check out this patent doc
* "http://www.freepatentsonline.com/7400691.html"
*
* This paper describes power drops as seen on the receiver due to
* probe packets
* "http://www.cnri.dit.ie/publications/ICT08%20-%20Practical%20Issues
* %20of%20Power%20Control.pdf"
*
* And this is the MadWiFi bug entry related to the above
* "http://madwifi-project.org/ticket/1659"
* with various measurements and diagrams
*/
/**
* ath5k_hw_rfgain_opt_init() - Initialize ah_gain during attach
* @ah: The &struct ath5k_hw
*/
int ath5k_hw_rfgain_opt_init(struct ath5k_hw *ah)
{
/* Initialize the gain optimization values */
switch (ah->ah_radio) {
case AR5K_RF5111:
ah->ah_gain.g_step_idx = rfgain_opt_5111.go_default;
ah->ah_gain.g_low = 20;
ah->ah_gain.g_high = 35;
ah->ah_gain.g_state = AR5K_RFGAIN_ACTIVE;
break;
case AR5K_RF5112:
ah->ah_gain.g_step_idx = rfgain_opt_5112.go_default;
ah->ah_gain.g_low = 20;
ah->ah_gain.g_high = 85;
ah->ah_gain.g_state = AR5K_RFGAIN_ACTIVE;
break;
default:
return -EINVAL;
}
return 0;
}
/**
* ath5k_hw_request_rfgain_probe() - Request a PAPD probe packet
* @ah: The &struct ath5k_hw
*
* Schedules a gain probe check on the next transmitted packet.
* That means our next packet is going to be sent with lower
* tx power and a Peak to Average Power Detector (PAPD) will try
* to measure the gain.
*
* TODO: Force a tx packet (bypassing PCU arbitrator etc)
* just after we enable the probe so that we don't mess with
* standard traffic.
*/
static void
ath5k_hw_request_rfgain_probe(struct ath5k_hw *ah)
{
/* Skip if gain calibration is inactive or
* we already handle a probe request */
if (ah->ah_gain.g_state != AR5K_RFGAIN_ACTIVE)
return;
/* Send the packet with 2dB below max power as
* patent doc suggest */
ath5k_hw_reg_write(ah, AR5K_REG_SM(ah->ah_txpower.txp_ofdm - 4,
AR5K_PHY_PAPD_PROBE_TXPOWER) |
AR5K_PHY_PAPD_PROBE_TX_NEXT, AR5K_PHY_PAPD_PROBE);
ah->ah_gain.g_state = AR5K_RFGAIN_READ_REQUESTED;
}
/**
* ath5k_hw_rf_gainf_corr() - Calculate Gain_F measurement correction
* @ah: The &struct ath5k_hw
*
* Calculate Gain_F measurement correction
* based on the current step for RF5112 rev. 2
*/
static u32
ath5k_hw_rf_gainf_corr(struct ath5k_hw *ah)
{
u32 mix, step;
const struct ath5k_gain_opt *go;
const struct ath5k_gain_opt_step *g_step;
const struct ath5k_rf_reg *rf_regs;
/* Only RF5112 Rev. 2 supports it */
if ((ah->ah_radio != AR5K_RF5112) ||
(ah->ah_radio_5ghz_revision <= AR5K_SREV_RAD_5112A))
return 0;
go = &rfgain_opt_5112;
rf_regs = rf_regs_5112a;
ah->ah_rf_regs_count = ARRAY_SIZE(rf_regs_5112a);
g_step = &go->go_step[ah->ah_gain.g_step_idx];
if (ah->ah_rf_banks == NULL)
return 0;
ah->ah_gain.g_f_corr = 0;
/* No VGA (Variable Gain Amplifier) override, skip */
if (ath5k_hw_rfb_op(ah, rf_regs, 0, AR5K_RF_MIXVGA_OVR, false) != 1)
return 0;
/* Mix gain stepping */
step = ath5k_hw_rfb_op(ah, rf_regs, 0, AR5K_RF_MIXGAIN_STEP, false);
/* Mix gain override */
mix = g_step->gos_param[0];
switch (mix) {
case 3:
ah->ah_gain.g_f_corr = step * 2;
break;
case 2:
ah->ah_gain.g_f_corr = (step - 5) * 2;
break;
case 1:
ah->ah_gain.g_f_corr = step;
break;
default:
ah->ah_gain.g_f_corr = 0;
break;
}
return ah->ah_gain.g_f_corr;
}
/**
* ath5k_hw_rf_check_gainf_readback() - Validate Gain_F feedback from detector
* @ah: The &struct ath5k_hw
*
* Check if current gain_F measurement is in the range of our
* power detector windows. If we get a measurement outside range
* we know it's not accurate (detectors can't measure anything outside
* their detection window) so we must ignore it.
*
* Returns true if readback was O.K. or false on failure
*/
static bool
ath5k_hw_rf_check_gainf_readback(struct ath5k_hw *ah)
{
const struct ath5k_rf_reg *rf_regs;
u32 step, mix_ovr, level[4];
if (ah->ah_rf_banks == NULL)
return false;
if (ah->ah_radio == AR5K_RF5111) {
rf_regs = rf_regs_5111;
ah->ah_rf_regs_count = ARRAY_SIZE(rf_regs_5111);
step = ath5k_hw_rfb_op(ah, rf_regs, 0, AR5K_RF_RFGAIN_STEP,
false);
level[0] = 0;
level[1] = (step == 63) ? 50 : step + 4;
level[2] = (step != 63) ? 64 : level[0];
level[3] = level[2] + 50;
ah->ah_gain.g_high = level[3] -
(step == 63 ? AR5K_GAIN_DYN_ADJUST_HI_MARGIN : -5);
ah->ah_gain.g_low = level[0] +
(step == 63 ? AR5K_GAIN_DYN_ADJUST_LO_MARGIN : 0);
} else {
rf_regs = rf_regs_5112;
ah->ah_rf_regs_count = ARRAY_SIZE(rf_regs_5112);
mix_ovr = ath5k_hw_rfb_op(ah, rf_regs, 0, AR5K_RF_MIXVGA_OVR,
false);
level[0] = level[2] = 0;
if (mix_ovr == 1) {
level[1] = level[3] = 83;
} else {
level[1] = level[3] = 107;
ah->ah_gain.g_high = 55;
}
}
return (ah->ah_gain.g_current >= level[0] &&
ah->ah_gain.g_current <= level[1]) ||
(ah->ah_gain.g_current >= level[2] &&
ah->ah_gain.g_current <= level[3]);
}
/**
* ath5k_hw_rf_gainf_adjust() - Perform Gain_F adjustment
* @ah: The &struct ath5k_hw
*
* Choose the right target gain based on current gain
* and RF gain optimization ladder
*/
static s8
ath5k_hw_rf_gainf_adjust(struct ath5k_hw *ah)
{
const struct ath5k_gain_opt *go;
const struct ath5k_gain_opt_step *g_step;
int ret = 0;
switch (ah->ah_radio) {
case AR5K_RF5111:
go = &rfgain_opt_5111;
break;
case AR5K_RF5112:
go = &rfgain_opt_5112;
break;
default:
return 0;
}
g_step = &go->go_step[ah->ah_gain.g_step_idx];
if (ah->ah_gain.g_current >= ah->ah_gain.g_high) {
/* Reached maximum */
if (ah->ah_gain.g_step_idx == 0)
return -1;
for (ah->ah_gain.g_target = ah->ah_gain.g_current;
ah->ah_gain.g_target >= ah->ah_gain.g_high &&
ah->ah_gain.g_step_idx > 0;
g_step = &go->go_step[ah->ah_gain.g_step_idx])
ah->ah_gain.g_target -= 2 *
(go->go_step[--(ah->ah_gain.g_step_idx)].gos_gain -
g_step->gos_gain);
ret = 1;
goto done;
}
if (ah->ah_gain.g_current <= ah->ah_gain.g_low) {
/* Reached minimum */
if (ah->ah_gain.g_step_idx == (go->go_steps_count - 1))
return -2;
for (ah->ah_gain.g_target = ah->ah_gain.g_current;
ah->ah_gain.g_target <= ah->ah_gain.g_low &&
ah->ah_gain.g_step_idx < go->go_steps_count - 1;
g_step = &go->go_step[ah->ah_gain.g_step_idx])
ah->ah_gain.g_target -= 2 *
(go->go_step[++ah->ah_gain.g_step_idx].gos_gain -
g_step->gos_gain);
ret = 2;
goto done;
}
done:
ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE,
"ret %d, gain step %u, current gain %u, target gain %u\n",
ret, ah->ah_gain.g_step_idx, ah->ah_gain.g_current,
ah->ah_gain.g_target);
return ret;
}
/**
* ath5k_hw_gainf_calibrate() - Do a gain_F calibration
* @ah: The &struct ath5k_hw
*
* Main callback for thermal RF gain calibration engine
* Check for a new gain reading and schedule an adjustment
* if needed.
*
* Returns one of enum ath5k_rfgain codes
*/
enum ath5k_rfgain
ath5k_hw_gainf_calibrate(struct ath5k_hw *ah)
{
u32 data, type;
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
if (ah->ah_rf_banks == NULL ||
ah->ah_gain.g_state == AR5K_RFGAIN_INACTIVE)
return AR5K_RFGAIN_INACTIVE;
/* No check requested, either engine is inactive
* or an adjustment is already requested */
if (ah->ah_gain.g_state != AR5K_RFGAIN_READ_REQUESTED)
goto done;
/* Read the PAPD (Peak to Average Power Detector)
* register */
data = ath5k_hw_reg_read(ah, AR5K_PHY_PAPD_PROBE);
/* No probe is scheduled, read gain_F measurement */
if (!(data & AR5K_PHY_PAPD_PROBE_TX_NEXT)) {
ah->ah_gain.g_current = data >> AR5K_PHY_PAPD_PROBE_GAINF_S;
type = AR5K_REG_MS(data, AR5K_PHY_PAPD_PROBE_TYPE);
/* If tx packet is CCK correct the gain_F measurement
* by cck ofdm gain delta */
if (type == AR5K_PHY_PAPD_PROBE_TYPE_CCK) {
if (ah->ah_radio_5ghz_revision >= AR5K_SREV_RAD_5112A)
ah->ah_gain.g_current +=
ee->ee_cck_ofdm_gain_delta;
else
ah->ah_gain.g_current +=
AR5K_GAIN_CCK_PROBE_CORR;
}
/* Further correct gain_F measurement for
* RF5112A radios */
if (ah->ah_radio_5ghz_revision >= AR5K_SREV_RAD_5112A) {
ath5k_hw_rf_gainf_corr(ah);
ah->ah_gain.g_current =
ah->ah_gain.g_current >= ah->ah_gain.g_f_corr ?
(ah->ah_gain.g_current - ah->ah_gain.g_f_corr) :
0;
}
/* Check if measurement is ok and if we need
* to adjust gain, schedule a gain adjustment,
* else switch back to the active state */
if (ath5k_hw_rf_check_gainf_readback(ah) &&
AR5K_GAIN_CHECK_ADJUST(&ah->ah_gain) &&
ath5k_hw_rf_gainf_adjust(ah)) {
ah->ah_gain.g_state = AR5K_RFGAIN_NEED_CHANGE;
} else {
ah->ah_gain.g_state = AR5K_RFGAIN_ACTIVE;
}
}
done:
return ah->ah_gain.g_state;
}
/**
* ath5k_hw_rfgain_init() - Write initial RF gain settings to hw
* @ah: The &struct ath5k_hw
* @band: One of enum nl80211_band
*
* Write initial RF gain table to set the RF sensitivity.
*
* NOTE: This one works on all RF chips and has nothing to do
* with Gain_F calibration
*/
static int
ath5k_hw_rfgain_init(struct ath5k_hw *ah, enum nl80211_band band)
{
const struct ath5k_ini_rfgain *ath5k_rfg;
unsigned int i, size, index;
switch (ah->ah_radio) {
case AR5K_RF5111:
ath5k_rfg = rfgain_5111;
size = ARRAY_SIZE(rfgain_5111);
break;
case AR5K_RF5112:
ath5k_rfg = rfgain_5112;
size = ARRAY_SIZE(rfgain_5112);
break;
case AR5K_RF2413:
ath5k_rfg = rfgain_2413;
size = ARRAY_SIZE(rfgain_2413);
break;
case AR5K_RF2316:
ath5k_rfg = rfgain_2316;
size = ARRAY_SIZE(rfgain_2316);
break;
case AR5K_RF5413:
ath5k_rfg = rfgain_5413;
size = ARRAY_SIZE(rfgain_5413);
break;
case AR5K_RF2317:
case AR5K_RF2425:
ath5k_rfg = rfgain_2425;
size = ARRAY_SIZE(rfgain_2425);
break;
default:
return -EINVAL;
}
index = (band == NL80211_BAND_2GHZ) ? 1 : 0;
for (i = 0; i < size; i++) {
AR5K_REG_WAIT(i);
ath5k_hw_reg_write(ah, ath5k_rfg[i].rfg_value[index],
(u32)ath5k_rfg[i].rfg_register);
}
return 0;
}
/********************\
* RF Registers setup *
\********************/
/**
* ath5k_hw_rfregs_init() - Initialize RF register settings
* @ah: The &struct ath5k_hw
* @channel: The &struct ieee80211_channel
* @mode: One of enum ath5k_driver_mode
*
* Setup RF registers by writing RF buffer on hw. For
* more infos on this, check out rfbuffer.h
*/
static int
ath5k_hw_rfregs_init(struct ath5k_hw *ah,
struct ieee80211_channel *channel,
unsigned int mode)
{
const struct ath5k_rf_reg *rf_regs;
const struct ath5k_ini_rfbuffer *ini_rfb;
const struct ath5k_gain_opt *go = NULL;
const struct ath5k_gain_opt_step *g_step;
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
u8 ee_mode = 0;
u32 *rfb;
int i, obdb = -1, bank = -1;
switch (ah->ah_radio) {
case AR5K_RF5111:
rf_regs = rf_regs_5111;
ah->ah_rf_regs_count = ARRAY_SIZE(rf_regs_5111);
ini_rfb = rfb_5111;
ah->ah_rf_banks_size = ARRAY_SIZE(rfb_5111);
go = &rfgain_opt_5111;
break;
case AR5K_RF5112:
if (ah->ah_radio_5ghz_revision >= AR5K_SREV_RAD_5112A) {
rf_regs = rf_regs_5112a;
ah->ah_rf_regs_count = ARRAY_SIZE(rf_regs_5112a);
ini_rfb = rfb_5112a;
ah->ah_rf_banks_size = ARRAY_SIZE(rfb_5112a);
} else {
rf_regs = rf_regs_5112;
ah->ah_rf_regs_count = ARRAY_SIZE(rf_regs_5112);
ini_rfb = rfb_5112;
ah->ah_rf_banks_size = ARRAY_SIZE(rfb_5112);
}
go = &rfgain_opt_5112;
break;
case AR5K_RF2413:
rf_regs = rf_regs_2413;
ah->ah_rf_regs_count = ARRAY_SIZE(rf_regs_2413);
ini_rfb = rfb_2413;
ah->ah_rf_banks_size = ARRAY_SIZE(rfb_2413);
break;
case AR5K_RF2316:
rf_regs = rf_regs_2316;
ah->ah_rf_regs_count = ARRAY_SIZE(rf_regs_2316);
ini_rfb = rfb_2316;
ah->ah_rf_banks_size = ARRAY_SIZE(rfb_2316);
break;
case AR5K_RF5413:
rf_regs = rf_regs_5413;
ah->ah_rf_regs_count = ARRAY_SIZE(rf_regs_5413);
ini_rfb = rfb_5413;
ah->ah_rf_banks_size = ARRAY_SIZE(rfb_5413);
break;
case AR5K_RF2317:
rf_regs = rf_regs_2425;
ah->ah_rf_regs_count = ARRAY_SIZE(rf_regs_2425);
ini_rfb = rfb_2317;
ah->ah_rf_banks_size = ARRAY_SIZE(rfb_2317);
break;
case AR5K_RF2425:
rf_regs = rf_regs_2425;
ah->ah_rf_regs_count = ARRAY_SIZE(rf_regs_2425);
if (ah->ah_mac_srev < AR5K_SREV_AR2417) {
ini_rfb = rfb_2425;
ah->ah_rf_banks_size = ARRAY_SIZE(rfb_2425);
} else {
ini_rfb = rfb_2417;
ah->ah_rf_banks_size = ARRAY_SIZE(rfb_2417);
}
break;
default:
return -EINVAL;
}
/* If it's the first time we set RF buffer, allocate
* ah->ah_rf_banks based on ah->ah_rf_banks_size
* we set above */
if (ah->ah_rf_banks == NULL) {
ah->ah_rf_banks = kmalloc_array(ah->ah_rf_banks_size,
sizeof(u32),
GFP_KERNEL);
if (ah->ah_rf_banks == NULL) {
ATH5K_ERR(ah, "out of memory\n");
return -ENOMEM;
}
}
/* Copy values to modify them */
rfb = ah->ah_rf_banks;
for (i = 0; i < ah->ah_rf_banks_size; i++) {
if (ini_rfb[i].rfb_bank >= AR5K_MAX_RF_BANKS) {
ATH5K_ERR(ah, "invalid bank\n");
return -EINVAL;
}
/* Bank changed, write down the offset */
if (bank != ini_rfb[i].rfb_bank) {
bank = ini_rfb[i].rfb_bank;
ah->ah_offset[bank] = i;
}
rfb[i] = ini_rfb[i].rfb_mode_data[mode];
}
/* Set Output and Driver bias current (OB/DB) */
if (channel->band == NL80211_BAND_2GHZ) {
if (channel->hw_value == AR5K_MODE_11B)
ee_mode = AR5K_EEPROM_MODE_11B;
else
ee_mode = AR5K_EEPROM_MODE_11G;
/* For RF511X/RF211X combination we
* use b_OB and b_DB parameters stored
* in eeprom on ee->ee_ob[ee_mode][0]
*
* For all other chips we use OB/DB for 2GHz
* stored in the b/g modal section just like
* 802.11a on ee->ee_ob[ee_mode][1] */
if ((ah->ah_radio == AR5K_RF5111) ||
(ah->ah_radio == AR5K_RF5112))
obdb = 0;
else
obdb = 1;
ath5k_hw_rfb_op(ah, rf_regs, ee->ee_ob[ee_mode][obdb],
AR5K_RF_OB_2GHZ, true);
ath5k_hw_rfb_op(ah, rf_regs, ee->ee_db[ee_mode][obdb],
AR5K_RF_DB_2GHZ, true);
/* RF5111 always needs OB/DB for 5GHz, even if we use 2GHz */
} else if ((channel->band == NL80211_BAND_5GHZ) ||
(ah->ah_radio == AR5K_RF5111)) {
/* For 11a, Turbo and XR we need to choose
* OB/DB based on frequency range */
ee_mode = AR5K_EEPROM_MODE_11A;
obdb = channel->center_freq >= 5725 ? 3 :
(channel->center_freq >= 5500 ? 2 :
(channel->center_freq >= 5260 ? 1 :
(channel->center_freq > 4000 ? 0 : -1)));
if (obdb < 0)
return -EINVAL;
ath5k_hw_rfb_op(ah, rf_regs, ee->ee_ob[ee_mode][obdb],
AR5K_RF_OB_5GHZ, true);
ath5k_hw_rfb_op(ah, rf_regs, ee->ee_db[ee_mode][obdb],
AR5K_RF_DB_5GHZ, true);
}
g_step = &go->go_step[ah->ah_gain.g_step_idx];
/* Set turbo mode (N/A on RF5413) */
if ((ah->ah_bwmode == AR5K_BWMODE_40MHZ) &&
(ah->ah_radio != AR5K_RF5413))
ath5k_hw_rfb_op(ah, rf_regs, 1, AR5K_RF_TURBO, false);
/* Bank Modifications (chip-specific) */
if (ah->ah_radio == AR5K_RF5111) {
/* Set gain_F settings according to current step */
if (channel->hw_value != AR5K_MODE_11B) {
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_FRAME_CTL,
AR5K_PHY_FRAME_CTL_TX_CLIP,
g_step->gos_param[0]);
ath5k_hw_rfb_op(ah, rf_regs, g_step->gos_param[1],
AR5K_RF_PWD_90, true);
ath5k_hw_rfb_op(ah, rf_regs, g_step->gos_param[2],
AR5K_RF_PWD_84, true);
ath5k_hw_rfb_op(ah, rf_regs, g_step->gos_param[3],
AR5K_RF_RFGAIN_SEL, true);
/* We programmed gain_F parameters, switch back
* to active state */
ah->ah_gain.g_state = AR5K_RFGAIN_ACTIVE;
}
/* Bank 6/7 setup */
ath5k_hw_rfb_op(ah, rf_regs, !ee->ee_xpd[ee_mode],
AR5K_RF_PWD_XPD, true);
ath5k_hw_rfb_op(ah, rf_regs, ee->ee_x_gain[ee_mode],
AR5K_RF_XPD_GAIN, true);
ath5k_hw_rfb_op(ah, rf_regs, ee->ee_i_gain[ee_mode],
AR5K_RF_GAIN_I, true);
ath5k_hw_rfb_op(ah, rf_regs, ee->ee_xpd[ee_mode],
AR5K_RF_PLO_SEL, true);
/* Tweak power detectors for half/quarter rate support */
if (ah->ah_bwmode == AR5K_BWMODE_5MHZ ||
ah->ah_bwmode == AR5K_BWMODE_10MHZ) {
u8 wait_i;
ath5k_hw_rfb_op(ah, rf_regs, 0x1f,
AR5K_RF_WAIT_S, true);
wait_i = (ah->ah_bwmode == AR5K_BWMODE_5MHZ) ?
0x1f : 0x10;
ath5k_hw_rfb_op(ah, rf_regs, wait_i,
AR5K_RF_WAIT_I, true);
ath5k_hw_rfb_op(ah, rf_regs, 3,
AR5K_RF_MAX_TIME, true);
}
}
if (ah->ah_radio == AR5K_RF5112) {
/* Set gain_F settings according to current step */
if (channel->hw_value != AR5K_MODE_11B) {
ath5k_hw_rfb_op(ah, rf_regs, g_step->gos_param[0],
AR5K_RF_MIXGAIN_OVR, true);
ath5k_hw_rfb_op(ah, rf_regs, g_step->gos_param[1],
AR5K_RF_PWD_138, true);
ath5k_hw_rfb_op(ah, rf_regs, g_step->gos_param[2],
AR5K_RF_PWD_137, true);
ath5k_hw_rfb_op(ah, rf_regs, g_step->gos_param[3],
AR5K_RF_PWD_136, true);
ath5k_hw_rfb_op(ah, rf_regs, g_step->gos_param[4],
AR5K_RF_PWD_132, true);
ath5k_hw_rfb_op(ah, rf_regs, g_step->gos_param[5],
AR5K_RF_PWD_131, true);
ath5k_hw_rfb_op(ah, rf_regs, g_step->gos_param[6],
AR5K_RF_PWD_130, true);
/* We programmed gain_F parameters, switch back
* to active state */
ah->ah_gain.g_state = AR5K_RFGAIN_ACTIVE;
}
/* Bank 6/7 setup */
ath5k_hw_rfb_op(ah, rf_regs, ee->ee_xpd[ee_mode],
AR5K_RF_XPD_SEL, true);
if (ah->ah_radio_5ghz_revision < AR5K_SREV_RAD_5112A) {
/* Rev. 1 supports only one xpd */
ath5k_hw_rfb_op(ah, rf_regs,
ee->ee_x_gain[ee_mode],
AR5K_RF_XPD_GAIN, true);
} else {
u8 *pdg_curve_to_idx = ee->ee_pdc_to_idx[ee_mode];
if (ee->ee_pd_gains[ee_mode] > 1) {
ath5k_hw_rfb_op(ah, rf_regs,
pdg_curve_to_idx[0],
AR5K_RF_PD_GAIN_LO, true);
ath5k_hw_rfb_op(ah, rf_regs,
pdg_curve_to_idx[1],
AR5K_RF_PD_GAIN_HI, true);
} else {
ath5k_hw_rfb_op(ah, rf_regs,
pdg_curve_to_idx[0],
AR5K_RF_PD_GAIN_LO, true);
ath5k_hw_rfb_op(ah, rf_regs,
pdg_curve_to_idx[0],
AR5K_RF_PD_GAIN_HI, true);
}
/* Lower synth voltage on Rev 2 */
if (ah->ah_radio == AR5K_RF5112 &&
(ah->ah_radio_5ghz_revision & AR5K_SREV_REV) > 0) {
ath5k_hw_rfb_op(ah, rf_regs, 2,
AR5K_RF_HIGH_VC_CP, true);
ath5k_hw_rfb_op(ah, rf_regs, 2,
AR5K_RF_MID_VC_CP, true);
ath5k_hw_rfb_op(ah, rf_regs, 2,
AR5K_RF_LOW_VC_CP, true);
ath5k_hw_rfb_op(ah, rf_regs, 2,
AR5K_RF_PUSH_UP, true);
}
/* Decrease power consumption on 5213+ BaseBand */
if (ah->ah_phy_revision >= AR5K_SREV_PHY_5212A) {
ath5k_hw_rfb_op(ah, rf_regs, 1,
AR5K_RF_PAD2GND, true);
ath5k_hw_rfb_op(ah, rf_regs, 1,
AR5K_RF_XB2_LVL, true);
ath5k_hw_rfb_op(ah, rf_regs, 1,
AR5K_RF_XB5_LVL, true);
ath5k_hw_rfb_op(ah, rf_regs, 1,
AR5K_RF_PWD_167, true);
ath5k_hw_rfb_op(ah, rf_regs, 1,
AR5K_RF_PWD_166, true);
}
}
ath5k_hw_rfb_op(ah, rf_regs, ee->ee_i_gain[ee_mode],
AR5K_RF_GAIN_I, true);
/* Tweak power detector for half/quarter rates */
if (ah->ah_bwmode == AR5K_BWMODE_5MHZ ||
ah->ah_bwmode == AR5K_BWMODE_10MHZ) {
u8 pd_delay;
pd_delay = (ah->ah_bwmode == AR5K_BWMODE_5MHZ) ?
0xf : 0x8;
ath5k_hw_rfb_op(ah, rf_regs, pd_delay,
AR5K_RF_PD_PERIOD_A, true);
ath5k_hw_rfb_op(ah, rf_regs, 0xf,
AR5K_RF_PD_DELAY_A, true);
}
}
if (ah->ah_radio == AR5K_RF5413 &&
channel->band == NL80211_BAND_2GHZ) {
ath5k_hw_rfb_op(ah, rf_regs, 1, AR5K_RF_DERBY_CHAN_SEL_MODE,
true);
/* Set optimum value for early revisions (on pci-e chips) */
if (ah->ah_mac_srev >= AR5K_SREV_AR5424 &&
ah->ah_mac_srev < AR5K_SREV_AR5413)
ath5k_hw_rfb_op(ah, rf_regs, ath5k_hw_bitswap(6, 3),
AR5K_RF_PWD_ICLOBUF_2G, true);
}
/* Write RF banks on hw */
for (i = 0; i < ah->ah_rf_banks_size; i++) {
AR5K_REG_WAIT(i);
ath5k_hw_reg_write(ah, rfb[i], ini_rfb[i].rfb_ctrl_register);
}
return 0;
}
/**************************\
PHY/RF channel functions
\**************************/
/**
* ath5k_hw_rf5110_chan2athchan() - Convert channel freq on RF5110
* @channel: The &struct ieee80211_channel
*
* Map channel frequency to IEEE channel number and convert it
* to an internal channel value used by the RF5110 chipset.
*/
static u32
ath5k_hw_rf5110_chan2athchan(struct ieee80211_channel *channel)
{
u32 athchan;
athchan = (ath5k_hw_bitswap(
(ieee80211_frequency_to_channel(
channel->center_freq) - 24) / 2, 5)
<< 1) | (1 << 6) | 0x1;
return athchan;
}
/**
* ath5k_hw_rf5110_channel() - Set channel frequency on RF5110
* @ah: The &struct ath5k_hw
* @channel: The &struct ieee80211_channel
*/
static int
ath5k_hw_rf5110_channel(struct ath5k_hw *ah,
struct ieee80211_channel *channel)
{
u32 data;
/*
* Set the channel and wait
*/
data = ath5k_hw_rf5110_chan2athchan(channel);
ath5k_hw_reg_write(ah, data, AR5K_RF_BUFFER);
ath5k_hw_reg_write(ah, 0, AR5K_RF_BUFFER_CONTROL_0);
usleep_range(1000, 1500);
return 0;
}
/**
* ath5k_hw_rf5111_chan2athchan() - Handle 2GHz channels on RF5111/2111
* @ieee: IEEE channel number
* @athchan: The &struct ath5k_athchan_2ghz
*
* In order to enable the RF2111 frequency converter on RF5111/2111 setups
* we need to add some offsets and extra flags to the data values we pass
* on to the PHY. So for every 2GHz channel this function gets called
* to do the conversion.
*/
static int
ath5k_hw_rf5111_chan2athchan(unsigned int ieee,
struct ath5k_athchan_2ghz *athchan)
{
int channel;
/* Cast this value to catch negative channel numbers (>= -19) */
channel = (int)ieee;
/*
* Map 2GHz IEEE channel to 5GHz Atheros channel
*/
if (channel <= 13) {
athchan->a2_athchan = 115 + channel;
athchan->a2_flags = 0x46;
} else if (channel == 14) {
athchan->a2_athchan = 124;
athchan->a2_flags = 0x44;
} else if (channel >= 15 && channel <= 26) {
athchan->a2_athchan = ((channel - 14) * 4) + 132;
athchan->a2_flags = 0x46;
} else
return -EINVAL;
return 0;
}
/**
* ath5k_hw_rf5111_channel() - Set channel frequency on RF5111/2111
* @ah: The &struct ath5k_hw
* @channel: The &struct ieee80211_channel
*/
static int
ath5k_hw_rf5111_channel(struct ath5k_hw *ah,
struct ieee80211_channel *channel)
{
struct ath5k_athchan_2ghz ath5k_channel_2ghz;
unsigned int ath5k_channel =
ieee80211_frequency_to_channel(channel->center_freq);
u32 data0, data1, clock;
int ret;
/*
* Set the channel on the RF5111 radio
*/
data0 = data1 = 0;
if (channel->band == NL80211_BAND_2GHZ) {
/* Map 2GHz channel to 5GHz Atheros channel ID */
ret = ath5k_hw_rf5111_chan2athchan(
ieee80211_frequency_to_channel(channel->center_freq),
&ath5k_channel_2ghz);
if (ret)
return ret;
ath5k_channel = ath5k_channel_2ghz.a2_athchan;
data0 = ((ath5k_hw_bitswap(ath5k_channel_2ghz.a2_flags, 8) & 0xff)
<< 5) | (1 << 4);
}
if (ath5k_channel < 145 || !(ath5k_channel & 1)) {
clock = 1;
data1 = ((ath5k_hw_bitswap(ath5k_channel - 24, 8) & 0xff) << 2) |
(clock << 1) | (1 << 10) | 1;
} else {
clock = 0;
data1 = ((ath5k_hw_bitswap((ath5k_channel - 24) / 2, 8) & 0xff)
<< 2) | (clock << 1) | (1 << 10) | 1;
}
ath5k_hw_reg_write(ah, (data1 & 0xff) | ((data0 & 0xff) << 8),
AR5K_RF_BUFFER);
ath5k_hw_reg_write(ah, ((data1 >> 8) & 0xff) | (data0 & 0xff00),
AR5K_RF_BUFFER_CONTROL_3);
return 0;
}
/**
* ath5k_hw_rf5112_channel() - Set channel frequency on 5112 and newer
* @ah: The &struct ath5k_hw
* @channel: The &struct ieee80211_channel
*
* On RF5112/2112 and newer we don't need to do any conversion.
* We pass the frequency value after a few modifications to the
* chip directly.
*
* NOTE: Make sure channel frequency given is within our range or else
* we might damage the chip ! Use ath5k_channel_ok before calling this one.
*/
static int
ath5k_hw_rf5112_channel(struct ath5k_hw *ah,
struct ieee80211_channel *channel)
{
u32 data, data0, data1, data2;
u16 c;
data = data0 = data1 = data2 = 0;
c = channel->center_freq;
/* My guess based on code:
* 2GHz RF has 2 synth modes, one with a Local Oscillator
* at 2224Hz and one with a LO at 2192Hz. IF is 1520Hz
* (3040/2). data0 is used to set the PLL divider and data1
* selects synth mode. */
if (c < 4800) {
/* Channel 14 and all frequencies with 2Hz spacing
* below/above (non-standard channels) */
if (!((c - 2224) % 5)) {
/* Same as (c - 2224) / 5 */
data0 = ((2 * (c - 704)) - 3040) / 10;
data1 = 1;
/* Channel 1 and all frequencies with 5Hz spacing
* below/above (standard channels without channel 14) */
} else if (!((c - 2192) % 5)) {
/* Same as (c - 2192) / 5 */
data0 = ((2 * (c - 672)) - 3040) / 10;
data1 = 0;
} else
return -EINVAL;
data0 = ath5k_hw_bitswap((data0 << 2) & 0xff, 8);
/* This is more complex, we have a single synthesizer with
* 4 reference clock settings (?) based on frequency spacing
* and set using data2. LO is at 4800Hz and data0 is again used
* to set some divider.
*
* NOTE: There is an old atheros presentation at Stanford
* that mentions a method called dual direct conversion
* with 1GHz sliding IF for RF5110. Maybe that's what we
* have here, or an updated version. */
} else if ((c % 5) != 2 || c > 5435) {
if (!(c % 20) && c >= 5120) {
data0 = ath5k_hw_bitswap(((c - 4800) / 20 << 2), 8);
data2 = ath5k_hw_bitswap(3, 2);
} else if (!(c % 10)) {
data0 = ath5k_hw_bitswap(((c - 4800) / 10 << 1), 8);
data2 = ath5k_hw_bitswap(2, 2);
} else if (!(c % 5)) {
data0 = ath5k_hw_bitswap((c - 4800) / 5, 8);
data2 = ath5k_hw_bitswap(1, 2);
} else
return -EINVAL;
} else {
data0 = ath5k_hw_bitswap((10 * (c - 2 - 4800)) / 25 + 1, 8);
data2 = ath5k_hw_bitswap(0, 2);
}
data = (data0 << 4) | (data1 << 1) | (data2 << 2) | 0x1001;
ath5k_hw_reg_write(ah, data & 0xff, AR5K_RF_BUFFER);
ath5k_hw_reg_write(ah, (data >> 8) & 0x7f, AR5K_RF_BUFFER_CONTROL_5);
return 0;
}
/**
* ath5k_hw_rf2425_channel() - Set channel frequency on RF2425
* @ah: The &struct ath5k_hw
* @channel: The &struct ieee80211_channel
*
* AR2425/2417 have a different 2GHz RF so code changes
* a little bit from RF5112.
*/
static int
ath5k_hw_rf2425_channel(struct ath5k_hw *ah,
struct ieee80211_channel *channel)
{
u32 data, data0, data2;
u16 c;
data = data0 = data2 = 0;
c = channel->center_freq;
if (c < 4800) {
data0 = ath5k_hw_bitswap((c - 2272), 8);
data2 = 0;
/* ? 5GHz ? */
} else if ((c % 5) != 2 || c > 5435) {
if (!(c % 20) && c < 5120)
data0 = ath5k_hw_bitswap(((c - 4800) / 20 << 2), 8);
else if (!(c % 10))
data0 = ath5k_hw_bitswap(((c - 4800) / 10 << 1), 8);
else if (!(c % 5))
data0 = ath5k_hw_bitswap((c - 4800) / 5, 8);
else
return -EINVAL;
data2 = ath5k_hw_bitswap(1, 2);
} else {
data0 = ath5k_hw_bitswap((10 * (c - 2 - 4800)) / 25 + 1, 8);
data2 = ath5k_hw_bitswap(0, 2);
}
data = (data0 << 4) | data2 << 2 | 0x1001;
ath5k_hw_reg_write(ah, data & 0xff, AR5K_RF_BUFFER);
ath5k_hw_reg_write(ah, (data >> 8) & 0x7f, AR5K_RF_BUFFER_CONTROL_5);
return 0;
}
/**
* ath5k_hw_channel() - Set a channel on the radio chip
* @ah: The &struct ath5k_hw
* @channel: The &struct ieee80211_channel
*
* This is the main function called to set a channel on the
* radio chip based on the radio chip version.
*/
static int
ath5k_hw_channel(struct ath5k_hw *ah,
struct ieee80211_channel *channel)
{
int ret;
/*
* Check bounds supported by the PHY (we don't care about regulatory
* restrictions at this point).
*/
if (!ath5k_channel_ok(ah, channel)) {
ATH5K_ERR(ah,
"channel frequency (%u MHz) out of supported "
"band range\n",
channel->center_freq);
return -EINVAL;
}
/*
* Set the channel and wait
*/
switch (ah->ah_radio) {
case AR5K_RF5110:
ret = ath5k_hw_rf5110_channel(ah, channel);
break;
case AR5K_RF5111:
ret = ath5k_hw_rf5111_channel(ah, channel);
break;
case AR5K_RF2317:
case AR5K_RF2425:
ret = ath5k_hw_rf2425_channel(ah, channel);
break;
default:
ret = ath5k_hw_rf5112_channel(ah, channel);
break;
}
if (ret)
return ret;
/* Set JAPAN setting for channel 14 */
if (channel->center_freq == 2484) {
AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_CCKTXCTL,
AR5K_PHY_CCKTXCTL_JAPAN);
} else {
AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_CCKTXCTL,
AR5K_PHY_CCKTXCTL_WORLD);
}
ah->ah_current_channel = channel;
return 0;
}
/*****************\
PHY calibration
\*****************/
/**
* DOC: PHY Calibration routines
*
* Noise floor calibration: When we tell the hardware to
* perform a noise floor calibration by setting the
* AR5K_PHY_AGCCTL_NF bit on AR5K_PHY_AGCCTL, it will periodically
* sample-and-hold the minimum noise level seen at the antennas.
* This value is then stored in a ring buffer of recently measured
* noise floor values so we have a moving window of the last few
* samples. The median of the values in the history is then loaded
* into the hardware for its own use for RSSI and CCA measurements.
* This type of calibration doesn't interfere with traffic.
*
* AGC calibration: When we tell the hardware to perform
* an AGC (Automatic Gain Control) calibration by setting the
* AR5K_PHY_AGCCTL_CAL, hw disconnects the antennas and does
* a calibration on the DC offsets of ADCs. During this period
* rx/tx gets disabled so we have to deal with it on the driver
* part.
*
* I/Q calibration: When we tell the hardware to perform
* an I/Q calibration, it tries to correct I/Q imbalance and
* fix QAM constellation by sampling data from rxed frames.
* It doesn't interfere with traffic.
*
* For more infos on AGC and I/Q calibration check out patent doc
* #03/094463.
*/
/**
* ath5k_hw_read_measured_noise_floor() - Read measured NF from hw
* @ah: The &struct ath5k_hw
*/
static s32
ath5k_hw_read_measured_noise_floor(struct ath5k_hw *ah)
{
s32 val;
val = ath5k_hw_reg_read(ah, AR5K_PHY_NF);
return sign_extend32(AR5K_REG_MS(val, AR5K_PHY_NF_MINCCA_PWR), 8);
}
/**
* ath5k_hw_init_nfcal_hist() - Initialize NF calibration history buffer
* @ah: The &struct ath5k_hw
*/
void
ath5k_hw_init_nfcal_hist(struct ath5k_hw *ah)
{
int i;
ah->ah_nfcal_hist.index = 0;
for (i = 0; i < ATH5K_NF_CAL_HIST_MAX; i++)
ah->ah_nfcal_hist.nfval[i] = AR5K_TUNE_CCA_MAX_GOOD_VALUE;
}
/**
* ath5k_hw_update_nfcal_hist() - Update NF calibration history buffer
* @ah: The &struct ath5k_hw
* @noise_floor: The NF we got from hw
*/
static void ath5k_hw_update_nfcal_hist(struct ath5k_hw *ah, s16 noise_floor)
{
struct ath5k_nfcal_hist *hist = &ah->ah_nfcal_hist;
hist->index = (hist->index + 1) & (ATH5K_NF_CAL_HIST_MAX - 1);
hist->nfval[hist->index] = noise_floor;
}
static int cmps16(const void *a, const void *b)
{
return *(s16 *)a - *(s16 *)b;
}
/**
* ath5k_hw_get_median_noise_floor() - Get median NF from history buffer
* @ah: The &struct ath5k_hw
*/
static s16
ath5k_hw_get_median_noise_floor(struct ath5k_hw *ah)
{
s16 sorted_nfval[ATH5K_NF_CAL_HIST_MAX];
int i;
memcpy(sorted_nfval, ah->ah_nfcal_hist.nfval, sizeof(sorted_nfval));
sort(sorted_nfval, ATH5K_NF_CAL_HIST_MAX, sizeof(s16), cmps16, NULL);
for (i = 0; i < ATH5K_NF_CAL_HIST_MAX; i++) {
ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE,
"cal %d:%d\n", i, sorted_nfval[i]);
}
return sorted_nfval[(ATH5K_NF_CAL_HIST_MAX - 1) / 2];
}
/**
* ath5k_hw_update_noise_floor() - Update NF on hardware
* @ah: The &struct ath5k_hw
*
* This is the main function we call to perform a NF calibration,
* it reads NF from hardware, calculates the median and updates
* NF on hw.
*/
void
ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
{
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
u32 val;
s16 nf, threshold;
u8 ee_mode;
/* keep last value if calibration hasn't completed */
if (ath5k_hw_reg_read(ah, AR5K_PHY_AGCCTL) & AR5K_PHY_AGCCTL_NF) {
ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE,
"NF did not complete in calibration window\n");
return;
}
ah->ah_cal_mask |= AR5K_CALIBRATION_NF;
ee_mode = ath5k_eeprom_mode_from_channel(ah, ah->ah_current_channel);
/* completed NF calibration, test threshold */
nf = ath5k_hw_read_measured_noise_floor(ah);
threshold = ee->ee_noise_floor_thr[ee_mode];
if (nf > threshold) {
ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE,
"noise floor failure detected; "
"read %d, threshold %d\n",
nf, threshold);
nf = AR5K_TUNE_CCA_MAX_GOOD_VALUE;
}
ath5k_hw_update_nfcal_hist(ah, nf);
nf = ath5k_hw_get_median_noise_floor(ah);
/* load noise floor (in .5 dBm) so the hardware will use it */
val = ath5k_hw_reg_read(ah, AR5K_PHY_NF) & ~AR5K_PHY_NF_M;
val |= (nf * 2) & AR5K_PHY_NF_M;
ath5k_hw_reg_write(ah, val, AR5K_PHY_NF);
AR5K_REG_MASKED_BITS(ah, AR5K_PHY_AGCCTL, AR5K_PHY_AGCCTL_NF,
~(AR5K_PHY_AGCCTL_NF_EN | AR5K_PHY_AGCCTL_NF_NOUPDATE));
ath5k_hw_register_timeout(ah, AR5K_PHY_AGCCTL, AR5K_PHY_AGCCTL_NF,
0, false);
/*
* Load a high max CCA Power value (-50 dBm in .5 dBm units)
* so that we're not capped by the median we just loaded.
* This will be used as the initial value for the next noise
* floor calibration.
*/
val = (val & ~AR5K_PHY_NF_M) | ((-50 * 2) & AR5K_PHY_NF_M);
ath5k_hw_reg_write(ah, val, AR5K_PHY_NF);
AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL,
AR5K_PHY_AGCCTL_NF_EN |
AR5K_PHY_AGCCTL_NF_NOUPDATE |
AR5K_PHY_AGCCTL_NF);
ah->ah_noise_floor = nf;
ah->ah_cal_mask &= ~AR5K_CALIBRATION_NF;
ATH5K_DBG(ah, ATH5K_DEBUG_CALIBRATE,
"noise floor calibrated: %d\n", nf);
}
/**
* ath5k_hw_rf5110_calibrate() - Perform a PHY calibration on RF5110
* @ah: The &struct ath5k_hw
* @channel: The &struct ieee80211_channel
*
* Do a complete PHY calibration (AGC + NF + I/Q) on RF5110
*/
static int
ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah,
struct ieee80211_channel *channel)
{
u32 phy_sig, phy_agc, phy_sat, beacon;
int ret;
if (!(ah->ah_cal_mask & AR5K_CALIBRATION_FULL))
return 0;
/*
* Disable beacons and RX/TX queues, wait
*/
AR5K_REG_ENABLE_BITS(ah, AR5K_DIAG_SW_5210,
AR5K_DIAG_SW_DIS_TX_5210 | AR5K_DIAG_SW_DIS_RX_5210);
beacon = ath5k_hw_reg_read(ah, AR5K_BEACON_5210);
ath5k_hw_reg_write(ah, beacon & ~AR5K_BEACON_ENABLE, AR5K_BEACON_5210);
usleep_range(2000, 2500);
/*
* Set the channel (with AGC turned off)
*/
AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGC, AR5K_PHY_AGC_DISABLE);
udelay(10);
ret = ath5k_hw_channel(ah, channel);
/*
* Activate PHY and wait
*/
ath5k_hw_reg_write(ah, AR5K_PHY_ACT_ENABLE, AR5K_PHY_ACT);
usleep_range(1000, 1500);
AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_AGC, AR5K_PHY_AGC_DISABLE);
if (ret)
return ret;
/*
* Calibrate the radio chip
*/
/* Remember normal state */
phy_sig = ath5k_hw_reg_read(ah, AR5K_PHY_SIG);
phy_agc = ath5k_hw_reg_read(ah, AR5K_PHY_AGCCOARSE);
phy_sat = ath5k_hw_reg_read(ah, AR5K_PHY_ADCSAT);
/* Update radio registers */
ath5k_hw_reg_write(ah, (phy_sig & ~(AR5K_PHY_SIG_FIRPWR)) |
AR5K_REG_SM(-1, AR5K_PHY_SIG_FIRPWR), AR5K_PHY_SIG);
ath5k_hw_reg_write(ah, (phy_agc & ~(AR5K_PHY_AGCCOARSE_HI |
AR5K_PHY_AGCCOARSE_LO)) |
AR5K_REG_SM(-1, AR5K_PHY_AGCCOARSE_HI) |
AR5K_REG_SM(-127, AR5K_PHY_AGCCOARSE_LO), AR5K_PHY_AGCCOARSE);
ath5k_hw_reg_write(ah, (phy_sat & ~(AR5K_PHY_ADCSAT_ICNT |
AR5K_PHY_ADCSAT_THR)) |
AR5K_REG_SM(2, AR5K_PHY_ADCSAT_ICNT) |
AR5K_REG_SM(12, AR5K_PHY_ADCSAT_THR), AR5K_PHY_ADCSAT);
udelay(20);
AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGC, AR5K_PHY_AGC_DISABLE);
udelay(10);
ath5k_hw_reg_write(ah, AR5K_PHY_RFSTG_DISABLE, AR5K_PHY_RFSTG);
AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_AGC, AR5K_PHY_AGC_DISABLE);
usleep_range(1000, 1500);
/*
* Enable calibration and wait until completion
*/
AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL, AR5K_PHY_AGCCTL_CAL);
ret = ath5k_hw_register_timeout(ah, AR5K_PHY_AGCCTL,
AR5K_PHY_AGCCTL_CAL, 0, false);
/* Reset to normal state */
ath5k_hw_reg_write(ah, phy_sig, AR5K_PHY_SIG);
ath5k_hw_reg_write(ah, phy_agc, AR5K_PHY_AGCCOARSE);
ath5k_hw_reg_write(ah, phy_sat, AR5K_PHY_ADCSAT);
if (ret) {
ATH5K_ERR(ah, "calibration timeout (%uMHz)\n",
channel->center_freq);
return ret;
}
/*
* Re-enable RX/TX and beacons
*/
AR5K_REG_DISABLE_BITS(ah, AR5K_DIAG_SW_5210,
AR5K_DIAG_SW_DIS_TX_5210 | AR5K_DIAG_SW_DIS_RX_5210);
ath5k_hw_reg_write(ah, beacon, AR5K_BEACON_5210);
return 0;
}
/**
* ath5k_hw_rf511x_iq_calibrate() - Perform I/Q calibration on RF5111 and newer
* @ah: The &struct ath5k_hw
*/
static int
ath5k_hw_rf511x_iq_calibrate(struct ath5k_hw *ah)
{
u32 i_pwr, q_pwr;
s32 iq_corr, i_coff, i_coffd, q_coff, q_coffd;
int i;
/* Skip if I/Q calibration is not needed or if it's still running */
if (!ah->ah_iq_cal_needed)
return -EINVAL;
else if (ath5k_hw_reg_read(ah, AR5K_PHY_IQ) & AR5K_PHY_IQ_RUN) {
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_CALIBRATE,
"I/Q calibration still running");
return -EBUSY;
}
/* Calibration has finished, get the results and re-run */
/* Work around for empty results which can apparently happen on 5212:
* Read registers up to 10 times until we get both i_pr and q_pwr */
for (i = 0; i <= 10; i++) {
iq_corr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_CORR);
i_pwr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_PWR_I);
q_pwr = ath5k_hw_reg_read(ah, AR5K_PHY_IQRES_CAL_PWR_Q);
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_CALIBRATE,
"iq_corr:%x i_pwr:%x q_pwr:%x", iq_corr, i_pwr, q_pwr);
if (i_pwr && q_pwr)
break;
}
i_coffd = ((i_pwr >> 1) + (q_pwr >> 1)) >> 7;
if (ah->ah_version == AR5K_AR5211)
q_coffd = q_pwr >> 6;
else
q_coffd = q_pwr >> 7;
/* In case i_coffd became zero, cancel calibration
* not only it's too small, it'll also result a divide
* by zero later on. */
if (i_coffd == 0 || q_coffd < 2)
return -ECANCELED;
/* Protect against loss of sign bits */
i_coff = (-iq_corr) / i_coffd;
i_coff = clamp(i_coff, -32, 31); /* signed 6 bit */
if (ah->ah_version == AR5K_AR5211)
q_coff = (i_pwr / q_coffd) - 64;
else
q_coff = (i_pwr / q_coffd) - 128;
q_coff = clamp(q_coff, -16, 15); /* signed 5 bit */
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_CALIBRATE,
"new I:%d Q:%d (i_coffd:%x q_coffd:%x)",
i_coff, q_coff, i_coffd, q_coffd);
/* Commit new I/Q values (set enable bit last to match HAL sources) */
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_IQ, AR5K_PHY_IQ_CORR_Q_I_COFF, i_coff);
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_IQ, AR5K_PHY_IQ_CORR_Q_Q_COFF, q_coff);
AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ, AR5K_PHY_IQ_CORR_ENABLE);
/* Re-enable calibration -if we don't we'll commit
* the same values again and again */
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_IQ,
AR5K_PHY_IQ_CAL_NUM_LOG_MAX, 15);
AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ, AR5K_PHY_IQ_RUN);
return 0;
}
/**
* ath5k_hw_phy_calibrate() - Perform a PHY calibration
* @ah: The &struct ath5k_hw
* @channel: The &struct ieee80211_channel
*
* The main function we call from above to perform
* a short or full PHY calibration based on RF chip
* and current channel
*/
int
ath5k_hw_phy_calibrate(struct ath5k_hw *ah,
struct ieee80211_channel *channel)
{
int ret;
if (ah->ah_radio == AR5K_RF5110)
return ath5k_hw_rf5110_calibrate(ah, channel);
ret = ath5k_hw_rf511x_iq_calibrate(ah);
if (ret) {
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_CALIBRATE,
"No I/Q correction performed (%uMHz)\n",
channel->center_freq);
/* Happens all the time if there is not much
* traffic, consider it normal behaviour. */
ret = 0;
}
/* On full calibration request a PAPD probe for
* gainf calibration if needed */
if ((ah->ah_cal_mask & AR5K_CALIBRATION_FULL) &&
(ah->ah_radio == AR5K_RF5111 ||
ah->ah_radio == AR5K_RF5112) &&
channel->hw_value != AR5K_MODE_11B)
ath5k_hw_request_rfgain_probe(ah);
/* Update noise floor */
if (!(ah->ah_cal_mask & AR5K_CALIBRATION_NF))
ath5k_hw_update_noise_floor(ah);
return ret;
}
/***************************\
* Spur mitigation functions *
\***************************/
/**
* ath5k_hw_set_spur_mitigation_filter() - Configure SPUR filter
* @ah: The &struct ath5k_hw
* @channel: The &struct ieee80211_channel
*
* This function gets called during PHY initialization to
* configure the spur filter for the given channel. Spur is noise
* generated due to "reflection" effects, for more information on this
* method check out patent US7643810
*/
static void
ath5k_hw_set_spur_mitigation_filter(struct ath5k_hw *ah,
struct ieee80211_channel *channel)
{
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
u32 mag_mask[4] = {0, 0, 0, 0};
u32 pilot_mask[2] = {0, 0};
/* Note: fbin values are scaled up by 2 */
u16 spur_chan_fbin, chan_fbin, symbol_width, spur_detection_window;
s32 spur_delta_phase, spur_freq_sigma_delta;
s32 spur_offset, num_symbols_x16;
u8 num_symbol_offsets, i, freq_band;
/* Convert current frequency to fbin value (the same way channels
* are stored on EEPROM, check out ath5k_eeprom_bin2freq) and scale
* up by 2 so we can compare it later */
if (channel->band == NL80211_BAND_2GHZ) {
chan_fbin = (channel->center_freq - 2300) * 10;
freq_band = AR5K_EEPROM_BAND_2GHZ;
} else {
chan_fbin = (channel->center_freq - 4900) * 10;
freq_band = AR5K_EEPROM_BAND_5GHZ;
}
/* Check if any spur_chan_fbin from EEPROM is
* within our current channel's spur detection range */
spur_chan_fbin = AR5K_EEPROM_NO_SPUR;
spur_detection_window = AR5K_SPUR_CHAN_WIDTH;
/* XXX: Half/Quarter channels ?*/
if (ah->ah_bwmode == AR5K_BWMODE_40MHZ)
spur_detection_window *= 2;
for (i = 0; i < AR5K_EEPROM_N_SPUR_CHANS; i++) {
spur_chan_fbin = ee->ee_spur_chans[i][freq_band];
/* Note: mask cleans AR5K_EEPROM_NO_SPUR flag
* so it's zero if we got nothing from EEPROM */
if (spur_chan_fbin == AR5K_EEPROM_NO_SPUR) {
spur_chan_fbin &= AR5K_EEPROM_SPUR_CHAN_MASK;
break;
}
if ((chan_fbin - spur_detection_window <=
(spur_chan_fbin & AR5K_EEPROM_SPUR_CHAN_MASK)) &&
(chan_fbin + spur_detection_window >=
(spur_chan_fbin & AR5K_EEPROM_SPUR_CHAN_MASK))) {
spur_chan_fbin &= AR5K_EEPROM_SPUR_CHAN_MASK;
break;
}
}
/* We need to enable spur filter for this channel */
if (spur_chan_fbin) {
spur_offset = spur_chan_fbin - chan_fbin;
/*
* Calculate deltas:
* spur_freq_sigma_delta -> spur_offset / sample_freq << 21
* spur_delta_phase -> spur_offset / chip_freq << 11
* Note: Both values have 100Hz resolution
*/
switch (ah->ah_bwmode) {
case AR5K_BWMODE_40MHZ:
/* Both sample_freq and chip_freq are 80MHz */
spur_delta_phase = (spur_offset << 16) / 25;
spur_freq_sigma_delta = (spur_delta_phase >> 10);
symbol_width = AR5K_SPUR_SYMBOL_WIDTH_BASE_100Hz * 2;
break;
case AR5K_BWMODE_10MHZ:
/* Both sample_freq and chip_freq are 20MHz (?) */
spur_delta_phase = (spur_offset << 18) / 25;
spur_freq_sigma_delta = (spur_delta_phase >> 10);
symbol_width = AR5K_SPUR_SYMBOL_WIDTH_BASE_100Hz / 2;
break;
case AR5K_BWMODE_5MHZ:
/* Both sample_freq and chip_freq are 10MHz (?) */
spur_delta_phase = (spur_offset << 19) / 25;
spur_freq_sigma_delta = (spur_delta_phase >> 10);
symbol_width = AR5K_SPUR_SYMBOL_WIDTH_BASE_100Hz / 4;
break;
default:
if (channel->band == NL80211_BAND_5GHZ) {
/* Both sample_freq and chip_freq are 40MHz */
spur_delta_phase = (spur_offset << 17) / 25;
spur_freq_sigma_delta =
(spur_delta_phase >> 10);
symbol_width =
AR5K_SPUR_SYMBOL_WIDTH_BASE_100Hz;
} else {
/* sample_freq -> 40MHz chip_freq -> 44MHz
* (for b compatibility) */
spur_delta_phase = (spur_offset << 17) / 25;
spur_freq_sigma_delta =
(spur_offset << 8) / 55;
symbol_width =
AR5K_SPUR_SYMBOL_WIDTH_BASE_100Hz;
}
break;
}
/* Calculate pilot and magnitude masks */
/* Scale up spur_offset by 1000 to switch to 100HZ resolution
* and divide by symbol_width to find how many symbols we have
* Note: number of symbols is scaled up by 16 */
num_symbols_x16 = ((spur_offset * 1000) << 4) / symbol_width;
/* Spur is on a symbol if num_symbols_x16 % 16 is zero */
if (!(num_symbols_x16 & 0xF))
/* _X_ */
num_symbol_offsets = 3;
else
/* _xx_ */
num_symbol_offsets = 4;
for (i = 0; i < num_symbol_offsets; i++) {
/* Calculate pilot mask */
s32 curr_sym_off =
(num_symbols_x16 / 16) + i + 25;
/* Pilot magnitude mask seems to be a way to
* declare the boundaries for our detection
* window or something, it's 2 for the middle
* value(s) where the symbol is expected to be
* and 1 on the boundary values */
u8 plt_mag_map =
(i == 0 || i == (num_symbol_offsets - 1))
? 1 : 2;
if (curr_sym_off >= 0 && curr_sym_off <= 32) {
if (curr_sym_off <= 25)
pilot_mask[0] |= 1 << curr_sym_off;
else if (curr_sym_off >= 27)
pilot_mask[0] |= 1 << (curr_sym_off - 1);
} else if (curr_sym_off >= 33 && curr_sym_off <= 52)
pilot_mask[1] |= 1 << (curr_sym_off - 33);
/* Calculate magnitude mask (for viterbi decoder) */
if (curr_sym_off >= -1 && curr_sym_off <= 14)
mag_mask[0] |=
plt_mag_map << (curr_sym_off + 1) * 2;
else if (curr_sym_off >= 15 && curr_sym_off <= 30)
mag_mask[1] |=
plt_mag_map << (curr_sym_off - 15) * 2;
else if (curr_sym_off >= 31 && curr_sym_off <= 46)
mag_mask[2] |=
plt_mag_map << (curr_sym_off - 31) * 2;
else if (curr_sym_off >= 47 && curr_sym_off <= 53)
mag_mask[3] |=
plt_mag_map << (curr_sym_off - 47) * 2;
}
/* Write settings on hw to enable spur filter */
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_BIN_MASK_CTL,
AR5K_PHY_BIN_MASK_CTL_RATE, 0xff);
/* XXX: Self correlator also ? */
AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ,
AR5K_PHY_IQ_PILOT_MASK_EN |
AR5K_PHY_IQ_CHAN_MASK_EN |
AR5K_PHY_IQ_SPUR_FILT_EN);
/* Set delta phase and freq sigma delta */
ath5k_hw_reg_write(ah,
AR5K_REG_SM(spur_delta_phase,
AR5K_PHY_TIMING_11_SPUR_DELTA_PHASE) |
AR5K_REG_SM(spur_freq_sigma_delta,
AR5K_PHY_TIMING_11_SPUR_FREQ_SD) |
AR5K_PHY_TIMING_11_USE_SPUR_IN_AGC,
AR5K_PHY_TIMING_11);
/* Write pilot masks */
ath5k_hw_reg_write(ah, pilot_mask[0], AR5K_PHY_TIMING_7);
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_TIMING_8,
AR5K_PHY_TIMING_8_PILOT_MASK_2,
pilot_mask[1]);
ath5k_hw_reg_write(ah, pilot_mask[0], AR5K_PHY_TIMING_9);
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_TIMING_10,
AR5K_PHY_TIMING_10_PILOT_MASK_2,
pilot_mask[1]);
/* Write magnitude masks */
ath5k_hw_reg_write(ah, mag_mask[0], AR5K_PHY_BIN_MASK_1);
ath5k_hw_reg_write(ah, mag_mask[1], AR5K_PHY_BIN_MASK_2);
ath5k_hw_reg_write(ah, mag_mask[2], AR5K_PHY_BIN_MASK_3);
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_BIN_MASK_CTL,
AR5K_PHY_BIN_MASK_CTL_MASK_4,
mag_mask[3]);
ath5k_hw_reg_write(ah, mag_mask[0], AR5K_PHY_BIN_MASK2_1);
ath5k_hw_reg_write(ah, mag_mask[1], AR5K_PHY_BIN_MASK2_2);
ath5k_hw_reg_write(ah, mag_mask[2], AR5K_PHY_BIN_MASK2_3);
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_BIN_MASK2_4,
AR5K_PHY_BIN_MASK2_4_MASK_4,
mag_mask[3]);
} else if (ath5k_hw_reg_read(ah, AR5K_PHY_IQ) &
AR5K_PHY_IQ_SPUR_FILT_EN) {
/* Clean up spur mitigation settings and disable filter */
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_BIN_MASK_CTL,
AR5K_PHY_BIN_MASK_CTL_RATE, 0);
AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_IQ,
AR5K_PHY_IQ_PILOT_MASK_EN |
AR5K_PHY_IQ_CHAN_MASK_EN |
AR5K_PHY_IQ_SPUR_FILT_EN);
ath5k_hw_reg_write(ah, 0, AR5K_PHY_TIMING_11);
/* Clear pilot masks */
ath5k_hw_reg_write(ah, 0, AR5K_PHY_TIMING_7);
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_TIMING_8,
AR5K_PHY_TIMING_8_PILOT_MASK_2,
0);
ath5k_hw_reg_write(ah, 0, AR5K_PHY_TIMING_9);
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_TIMING_10,
AR5K_PHY_TIMING_10_PILOT_MASK_2,
0);
/* Clear magnitude masks */
ath5k_hw_reg_write(ah, 0, AR5K_PHY_BIN_MASK_1);
ath5k_hw_reg_write(ah, 0, AR5K_PHY_BIN_MASK_2);
ath5k_hw_reg_write(ah, 0, AR5K_PHY_BIN_MASK_3);
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_BIN_MASK_CTL,
AR5K_PHY_BIN_MASK_CTL_MASK_4,
0);
ath5k_hw_reg_write(ah, 0, AR5K_PHY_BIN_MASK2_1);
ath5k_hw_reg_write(ah, 0, AR5K_PHY_BIN_MASK2_2);
ath5k_hw_reg_write(ah, 0, AR5K_PHY_BIN_MASK2_3);
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_BIN_MASK2_4,
AR5K_PHY_BIN_MASK2_4_MASK_4,
0);
}
}
/*****************\
* Antenna control *
\*****************/
/**
* DOC: Antenna control
*
* Hw supports up to 14 antennas ! I haven't found any card that implements
* that. The maximum number of antennas I've seen is up to 4 (2 for 2GHz and 2
* for 5GHz). Antenna 1 (MAIN) should be omnidirectional, 2 (AUX)
* omnidirectional or sectorial and antennas 3-14 sectorial (or directional).
*
* We can have a single antenna for RX and multiple antennas for TX.
* RX antenna is our "default" antenna (usually antenna 1) set on
* DEFAULT_ANTENNA register and TX antenna is set on each TX control descriptor
* (0 for automatic selection, 1 - 14 antenna number).
*
* We can let hw do all the work doing fast antenna diversity for both
* tx and rx or we can do things manually. Here are the options we have
* (all are bits of STA_ID1 register):
*
* AR5K_STA_ID1_DEFAULT_ANTENNA -> When 0 is set as the TX antenna on TX
* control descriptor, use the default antenna to transmit or else use the last
* antenna on which we received an ACK.
*
* AR5K_STA_ID1_DESC_ANTENNA -> Update default antenna after each TX frame to
* the antenna on which we got the ACK for that frame.
*
* AR5K_STA_ID1_RTS_DEF_ANTENNA -> Use default antenna for RTS or else use the
* one on the TX descriptor.
*
* AR5K_STA_ID1_SELFGEN_DEF_ANT -> Use default antenna for self generated frames
* (ACKs etc), or else use current antenna (the one we just used for TX).
*
* Using the above we support the following scenarios:
*
* AR5K_ANTMODE_DEFAULT -> Hw handles antenna diversity etc automatically
*
* AR5K_ANTMODE_FIXED_A -> Only antenna A (MAIN) is present
*
* AR5K_ANTMODE_FIXED_B -> Only antenna B (AUX) is present
*
* AR5K_ANTMODE_SINGLE_AP -> Sta locked on a single ap
*
* AR5K_ANTMODE_SECTOR_AP -> AP with tx antenna set on tx desc
*
* AR5K_ANTMODE_SECTOR_STA -> STA with tx antenna set on tx desc
*
* AR5K_ANTMODE_DEBUG Debug mode -A -> Rx, B-> Tx-
*
* Also note that when setting antenna to F on tx descriptor card inverts
* current tx antenna.
*/
/**
* ath5k_hw_set_def_antenna() - Set default rx antenna on AR5211/5212 and newer
* @ah: The &struct ath5k_hw
* @ant: Antenna number
*/
static void
ath5k_hw_set_def_antenna(struct ath5k_hw *ah, u8 ant)
{
if (ah->ah_version != AR5K_AR5210)
ath5k_hw_reg_write(ah, ant & 0x7, AR5K_DEFAULT_ANTENNA);
}
/**
* ath5k_hw_set_fast_div() - Enable/disable fast rx antenna diversity
* @ah: The &struct ath5k_hw
* @ee_mode: One of enum ath5k_driver_mode
* @enable: True to enable, false to disable
*/
static void
ath5k_hw_set_fast_div(struct ath5k_hw *ah, u8 ee_mode, bool enable)
{
switch (ee_mode) {
case AR5K_EEPROM_MODE_11G:
/* XXX: This is set to
* disabled on initvals !!! */
case AR5K_EEPROM_MODE_11A:
if (enable)
AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_AGCCTL,
AR5K_PHY_AGCCTL_OFDM_DIV_DIS);
else
AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL,
AR5K_PHY_AGCCTL_OFDM_DIV_DIS);
break;
case AR5K_EEPROM_MODE_11B:
AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL,
AR5K_PHY_AGCCTL_OFDM_DIV_DIS);
break;
default:
return;
}
if (enable) {
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_RESTART,
AR5K_PHY_RESTART_DIV_GC, 4);
AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_FAST_ANT_DIV,
AR5K_PHY_FAST_ANT_DIV_EN);
} else {
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_RESTART,
AR5K_PHY_RESTART_DIV_GC, 0);
AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_FAST_ANT_DIV,
AR5K_PHY_FAST_ANT_DIV_EN);
}
}
/**
* ath5k_hw_set_antenna_switch() - Set up antenna switch table
* @ah: The &struct ath5k_hw
* @ee_mode: One of enum ath5k_driver_mode
*
* Switch table comes from EEPROM and includes information on controlling
* the 2 antenna RX attenuators
*/
void
ath5k_hw_set_antenna_switch(struct ath5k_hw *ah, u8 ee_mode)
{
u8 ant0, ant1;
/*
* In case a fixed antenna was set as default
* use the same switch table twice.
*/
if (ah->ah_ant_mode == AR5K_ANTMODE_FIXED_A)
ant0 = ant1 = AR5K_ANT_SWTABLE_A;
else if (ah->ah_ant_mode == AR5K_ANTMODE_FIXED_B)
ant0 = ant1 = AR5K_ANT_SWTABLE_B;
else {
ant0 = AR5K_ANT_SWTABLE_A;
ant1 = AR5K_ANT_SWTABLE_B;
}
/* Set antenna idle switch table */
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_ANT_CTL,
AR5K_PHY_ANT_CTL_SWTABLE_IDLE,
(ah->ah_ant_ctl[ee_mode][AR5K_ANT_CTL] |
AR5K_PHY_ANT_CTL_TXRX_EN));
/* Set antenna switch tables */
ath5k_hw_reg_write(ah, ah->ah_ant_ctl[ee_mode][ant0],
AR5K_PHY_ANT_SWITCH_TABLE_0);
ath5k_hw_reg_write(ah, ah->ah_ant_ctl[ee_mode][ant1],
AR5K_PHY_ANT_SWITCH_TABLE_1);
}
/**
* ath5k_hw_set_antenna_mode() - Set antenna operating mode
* @ah: The &struct ath5k_hw
* @ant_mode: One of enum ath5k_ant_mode
*/
void
ath5k_hw_set_antenna_mode(struct ath5k_hw *ah, u8 ant_mode)
{
struct ieee80211_channel *channel = ah->ah_current_channel;
bool use_def_for_tx, update_def_on_tx, use_def_for_rts, fast_div;
bool use_def_for_sg;
int ee_mode;
u8 def_ant, tx_ant;
u32 sta_id1 = 0;
/* if channel is not initialized yet we can't set the antennas
* so just store the mode. it will be set on the next reset */
if (channel == NULL) {
ah->ah_ant_mode = ant_mode;
return;
}
def_ant = ah->ah_def_ant;
ee_mode = ath5k_eeprom_mode_from_channel(ah, channel);
switch (ant_mode) {
case AR5K_ANTMODE_DEFAULT:
tx_ant = 0;
use_def_for_tx = false;
update_def_on_tx = false;
use_def_for_rts = false;
use_def_for_sg = false;
fast_div = true;
break;
case AR5K_ANTMODE_FIXED_A:
def_ant = 1;
tx_ant = 1;
use_def_for_tx = true;
update_def_on_tx = false;
use_def_for_rts = true;
use_def_for_sg = true;
fast_div = false;
break;
case AR5K_ANTMODE_FIXED_B:
def_ant = 2;
tx_ant = 2;
use_def_for_tx = true;
update_def_on_tx = false;
use_def_for_rts = true;
use_def_for_sg = true;
fast_div = false;
break;
case AR5K_ANTMODE_SINGLE_AP:
def_ant = 1; /* updated on tx */
tx_ant = 0;
use_def_for_tx = true;
update_def_on_tx = true;
use_def_for_rts = true;
use_def_for_sg = true;
fast_div = true;
break;
case AR5K_ANTMODE_SECTOR_AP:
tx_ant = 1; /* variable */
use_def_for_tx = false;
update_def_on_tx = false;
use_def_for_rts = true;
use_def_for_sg = false;
fast_div = false;
break;
case AR5K_ANTMODE_SECTOR_STA:
tx_ant = 1; /* variable */
use_def_for_tx = true;
update_def_on_tx = false;
use_def_for_rts = true;
use_def_for_sg = false;
fast_div = true;
break;
case AR5K_ANTMODE_DEBUG:
def_ant = 1;
tx_ant = 2;
use_def_for_tx = false;
update_def_on_tx = false;
use_def_for_rts = false;
use_def_for_sg = false;
fast_div = false;
break;
default:
return;
}
ah->ah_tx_ant = tx_ant;
ah->ah_ant_mode = ant_mode;
ah->ah_def_ant = def_ant;
sta_id1 |= use_def_for_tx ? AR5K_STA_ID1_DEFAULT_ANTENNA : 0;
sta_id1 |= update_def_on_tx ? AR5K_STA_ID1_DESC_ANTENNA : 0;
sta_id1 |= use_def_for_rts ? AR5K_STA_ID1_RTS_DEF_ANTENNA : 0;
sta_id1 |= use_def_for_sg ? AR5K_STA_ID1_SELFGEN_DEF_ANT : 0;
AR5K_REG_DISABLE_BITS(ah, AR5K_STA_ID1, AR5K_STA_ID1_ANTENNA_SETTINGS);
if (sta_id1)
AR5K_REG_ENABLE_BITS(ah, AR5K_STA_ID1, sta_id1);
ath5k_hw_set_antenna_switch(ah, ee_mode);
/* Note: set diversity before default antenna
* because it won't work correctly */
ath5k_hw_set_fast_div(ah, ee_mode, fast_div);
ath5k_hw_set_def_antenna(ah, def_ant);
}
/****************\
* TX power setup *
\****************/
/*
* Helper functions
*/
/**
* ath5k_get_interpolated_value() - Get interpolated Y val between two points
* @target: X value of the middle point
* @x_left: X value of the left point
* @x_right: X value of the right point
* @y_left: Y value of the left point
* @y_right: Y value of the right point
*/
static s16
ath5k_get_interpolated_value(s16 target, s16 x_left, s16 x_right,
s16 y_left, s16 y_right)
{
s16 ratio, result;
/* Avoid divide by zero and skip interpolation
* if we have the same point */
if ((x_left == x_right) || (y_left == y_right))
return y_left;
/*
* Since we use ints and not fps, we need to scale up in
* order to get a sane ratio value (or else we 'll eg. get
* always 1 instead of 1.25, 1.75 etc). We scale up by 100
* to have some accuracy both for 0.5 and 0.25 steps.
*/
ratio = ((100 * y_right - 100 * y_left) / (x_right - x_left));
/* Now scale down to be in range */
result = y_left + (ratio * (target - x_left) / 100);
return result;
}
/**
* ath5k_get_linear_pcdac_min() - Find vertical boundary (min pwr) for the
* linear PCDAC curve
* @stepL: Left array with y values (pcdac steps)
* @stepR: Right array with y values (pcdac steps)
* @pwrL: Left array with x values (power steps)
* @pwrR: Right array with x values (power steps)
*
* Since we have the top of the curve and we draw the line below
* until we reach 1 (1 pcdac step) we need to know which point
* (x value) that is so that we don't go below x axis and have negative
* pcdac values when creating the curve, or fill the table with zeros.
*/
static s16
ath5k_get_linear_pcdac_min(const u8 *stepL, const u8 *stepR,
const s16 *pwrL, const s16 *pwrR)
{
s8 tmp;
s16 min_pwrL, min_pwrR;
s16 pwr_i;
/* Some vendors write the same pcdac value twice !!! */
if (stepL[0] == stepL[1] || stepR[0] == stepR[1])
return max(pwrL[0], pwrR[0]);
if (pwrL[0] == pwrL[1])
min_pwrL = pwrL[0];
else {
pwr_i = pwrL[0];
do {
pwr_i--;
tmp = (s8) ath5k_get_interpolated_value(pwr_i,
pwrL[0], pwrL[1],
stepL[0], stepL[1]);
} while (tmp > 1);
min_pwrL = pwr_i;
}
if (pwrR[0] == pwrR[1])
min_pwrR = pwrR[0];
else {
pwr_i = pwrR[0];
do {
pwr_i--;
tmp = (s8) ath5k_get_interpolated_value(pwr_i,
pwrR[0], pwrR[1],
stepR[0], stepR[1]);
} while (tmp > 1);
min_pwrR = pwr_i;
}
/* Keep the right boundary so that it works for both curves */
return max(min_pwrL, min_pwrR);
}
/**
* ath5k_create_power_curve() - Create a Power to PDADC or PCDAC curve
* @pmin: Minimum power value (xmin)
* @pmax: Maximum power value (xmax)
* @pwr: Array of power steps (x values)
* @vpd: Array of matching PCDAC/PDADC steps (y values)
* @num_points: Number of provided points
* @vpd_table: Array to fill with the full PCDAC/PDADC values (y values)
* @type: One of enum ath5k_powertable_type (eeprom.h)
*
* Interpolate (pwr,vpd) points to create a Power to PDADC or a
* Power to PCDAC curve.
*
* Each curve has power on x axis (in 0.5dB units) and PCDAC/PDADC
* steps (offsets) on y axis. Power can go up to 31.5dB and max
* PCDAC/PDADC step for each curve is 64 but we can write more than
* one curves on hw so we can go up to 128 (which is the max step we
* can write on the final table).
*
* We write y values (PCDAC/PDADC steps) on hw.
*/
static void
ath5k_create_power_curve(s16 pmin, s16 pmax,
const s16 *pwr, const u8 *vpd,
u8 num_points,
u8 *vpd_table, u8 type)
{
u8 idx[2] = { 0, 1 };
s16 pwr_i = 2 * pmin;
int i;
if (num_points < 2)
return;
/* We want the whole line, so adjust boundaries
* to cover the entire power range. Note that
* power values are already 0.25dB so no need
* to multiply pwr_i by 2 */
if (type == AR5K_PWRTABLE_LINEAR_PCDAC) {
pwr_i = pmin;
pmin = 0;
pmax = 63;
}
/* Find surrounding turning points (TPs)
* and interpolate between them */
for (i = 0; (i <= (u16) (pmax - pmin)) &&
(i < AR5K_EEPROM_POWER_TABLE_SIZE); i++) {
/* We passed the right TP, move to the next set of TPs
* if we pass the last TP, extrapolate above using the last
* two TPs for ratio */
if ((pwr_i > pwr[idx[1]]) && (idx[1] < num_points - 1)) {
idx[0]++;
idx[1]++;
}
vpd_table[i] = (u8) ath5k_get_interpolated_value(pwr_i,
pwr[idx[0]], pwr[idx[1]],
vpd[idx[0]], vpd[idx[1]]);
/* Increase by 0.5dB
* (0.25 dB units) */
pwr_i += 2;
}
}
/**
* ath5k_get_chan_pcal_surrounding_piers() - Get surrounding calibration piers
* for a given channel.
* @ah: The &struct ath5k_hw
* @channel: The &struct ieee80211_channel
* @pcinfo_l: The &struct ath5k_chan_pcal_info to put the left cal. pier
* @pcinfo_r: The &struct ath5k_chan_pcal_info to put the right cal. pier
*
* Get the surrounding per-channel power calibration piers
* for a given frequency so that we can interpolate between
* them and come up with an appropriate dataset for our current
* channel.
*/
static void
ath5k_get_chan_pcal_surrounding_piers(struct ath5k_hw *ah,
struct ieee80211_channel *channel,
struct ath5k_chan_pcal_info **pcinfo_l,
struct ath5k_chan_pcal_info **pcinfo_r)
{
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
struct ath5k_chan_pcal_info *pcinfo;
u8 idx_l, idx_r;
u8 mode, max, i;
u32 target = channel->center_freq;
idx_l = 0;
idx_r = 0;
switch (channel->hw_value) {
case AR5K_EEPROM_MODE_11A:
pcinfo = ee->ee_pwr_cal_a;
mode = AR5K_EEPROM_MODE_11A;
break;
case AR5K_EEPROM_MODE_11B:
pcinfo = ee->ee_pwr_cal_b;
mode = AR5K_EEPROM_MODE_11B;
break;
case AR5K_EEPROM_MODE_11G:
default:
pcinfo = ee->ee_pwr_cal_g;
mode = AR5K_EEPROM_MODE_11G;
break;
}
max = ee->ee_n_piers[mode] - 1;
/* Frequency is below our calibrated
* range. Use the lowest power curve
* we have */
if (target < pcinfo[0].freq) {
idx_l = idx_r = 0;
goto done;
}
/* Frequency is above our calibrated
* range. Use the highest power curve
* we have */
if (target > pcinfo[max].freq) {
idx_l = idx_r = max;
goto done;
}
/* Frequency is inside our calibrated
* channel range. Pick the surrounding
* calibration piers so that we can
* interpolate */
for (i = 0; i <= max; i++) {
/* Frequency matches one of our calibration
* piers, no need to interpolate, just use
* that calibration pier */
if (pcinfo[i].freq == target) {
idx_l = idx_r = i;
goto done;
}
/* We found a calibration pier that's above
* frequency, use this pier and the previous
* one to interpolate */
if (target < pcinfo[i].freq) {
idx_r = i;
idx_l = idx_r - 1;
goto done;
}
}
done:
*pcinfo_l = &pcinfo[idx_l];
*pcinfo_r = &pcinfo[idx_r];
}
/**
* ath5k_get_rate_pcal_data() - Get the interpolated per-rate power
* calibration data
* @ah: The &struct ath5k_hw *ah,
* @channel: The &struct ieee80211_channel
* @rates: The &struct ath5k_rate_pcal_info to fill
*
* Get the surrounding per-rate power calibration data
* for a given frequency and interpolate between power
* values to set max target power supported by hw for
* each rate on this frequency.
*/
static void
ath5k_get_rate_pcal_data(struct ath5k_hw *ah,
struct ieee80211_channel *channel,
struct ath5k_rate_pcal_info *rates)
{
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
struct ath5k_rate_pcal_info *rpinfo;
u8 idx_l, idx_r;
u8 mode, max, i;
u32 target = channel->center_freq;
idx_l = 0;
idx_r = 0;
switch (channel->hw_value) {
case AR5K_MODE_11A:
rpinfo = ee->ee_rate_tpwr_a;
mode = AR5K_EEPROM_MODE_11A;
break;
case AR5K_MODE_11B:
rpinfo = ee->ee_rate_tpwr_b;
mode = AR5K_EEPROM_MODE_11B;
break;
case AR5K_MODE_11G:
default:
rpinfo = ee->ee_rate_tpwr_g;
mode = AR5K_EEPROM_MODE_11G;
break;
}
max = ee->ee_rate_target_pwr_num[mode] - 1;
/* Get the surrounding calibration
* piers - same as above */
if (target < rpinfo[0].freq) {
idx_l = idx_r = 0;
goto done;
}
if (target > rpinfo[max].freq) {
idx_l = idx_r = max;
goto done;
}
for (i = 0; i <= max; i++) {
if (rpinfo[i].freq == target) {
idx_l = idx_r = i;
goto done;
}
if (target < rpinfo[i].freq) {
idx_r = i;
idx_l = idx_r - 1;
goto done;
}
}
done:
/* Now interpolate power value, based on the frequency */
rates->freq = target;
rates->target_power_6to24 =
ath5k_get_interpolated_value(target, rpinfo[idx_l].freq,
rpinfo[idx_r].freq,
rpinfo[idx_l].target_power_6to24,
rpinfo[idx_r].target_power_6to24);
rates->target_power_36 =
ath5k_get_interpolated_value(target, rpinfo[idx_l].freq,
rpinfo[idx_r].freq,
rpinfo[idx_l].target_power_36,
rpinfo[idx_r].target_power_36);
rates->target_power_48 =
ath5k_get_interpolated_value(target, rpinfo[idx_l].freq,
rpinfo[idx_r].freq,
rpinfo[idx_l].target_power_48,
rpinfo[idx_r].target_power_48);
rates->target_power_54 =
ath5k_get_interpolated_value(target, rpinfo[idx_l].freq,
rpinfo[idx_r].freq,
rpinfo[idx_l].target_power_54,
rpinfo[idx_r].target_power_54);
}
/**
* ath5k_get_max_ctl_power() - Get max edge power for a given frequency
* @ah: the &struct ath5k_hw
* @channel: The &struct ieee80211_channel
*
* Get the max edge power for this channel if
* we have such data from EEPROM's Conformance Test
* Limits (CTL), and limit max power if needed.
*/
static void
ath5k_get_max_ctl_power(struct ath5k_hw *ah,
struct ieee80211_channel *channel)
{
struct ath_regulatory *regulatory = ath5k_hw_regulatory(ah);
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
struct ath5k_edge_power *rep = ee->ee_ctl_pwr;
u8 *ctl_val = ee->ee_ctl;
s16 max_chan_pwr = ah->ah_txpower.txp_max_pwr / 4;
s16 edge_pwr = 0;
u8 rep_idx;
u8 i, ctl_mode;
u8 ctl_idx = 0xFF;
u32 target = channel->center_freq;
ctl_mode = ath_regd_get_band_ctl(regulatory, channel->band);
switch (channel->hw_value) {
case AR5K_MODE_11A:
if (ah->ah_bwmode == AR5K_BWMODE_40MHZ)
ctl_mode |= AR5K_CTL_TURBO;
else
ctl_mode |= AR5K_CTL_11A;
break;
case AR5K_MODE_11G:
if (ah->ah_bwmode == AR5K_BWMODE_40MHZ)
ctl_mode |= AR5K_CTL_TURBOG;
else
ctl_mode |= AR5K_CTL_11G;
break;
case AR5K_MODE_11B:
ctl_mode |= AR5K_CTL_11B;
break;
default:
return;
}
for (i = 0; i < ee->ee_ctls; i++) {
if (ctl_val[i] == ctl_mode) {
ctl_idx = i;
break;
}
}
/* If we have a CTL dataset available grab it and find the
* edge power for our frequency */
if (ctl_idx == 0xFF)
return;
/* Edge powers are sorted by frequency from lower
* to higher. Each CTL corresponds to 8 edge power
* measurements. */
rep_idx = ctl_idx * AR5K_EEPROM_N_EDGES;
/* Don't do boundaries check because we
* might have more that one bands defined
* for this mode */
/* Get the edge power that's closer to our
* frequency */
for (i = 0; i < AR5K_EEPROM_N_EDGES; i++) {
rep_idx += i;
if (target <= rep[rep_idx].freq)
edge_pwr = (s16) rep[rep_idx].edge;
}
if (edge_pwr)
ah->ah_txpower.txp_max_pwr = 4 * min(edge_pwr, max_chan_pwr);
}
/*
* Power to PCDAC table functions
*/
/**
* DOC: Power to PCDAC table functions
*
* For RF5111 we have an XPD -eXternal Power Detector- curve
* for each calibrated channel. Each curve has 0,5dB Power steps
* on x axis and PCDAC steps (offsets) on y axis and looks like an
* exponential function. To recreate the curve we read 11 points
* from eeprom (eeprom.c) and interpolate here.
*
* For RF5112 we have 4 XPD -eXternal Power Detector- curves
* for each calibrated channel on 0, -6, -12 and -18dBm but we only
* use the higher (3) and the lower (0) curves. Each curve again has 0.5dB
* power steps on x axis and PCDAC steps on y axis and looks like a
* linear function. To recreate the curve and pass the power values
* on hw, we get 4 points for xpd 0 (lower gain -> max power)
* and 3 points for xpd 3 (higher gain -> lower power) from eeprom (eeprom.c)
* and interpolate here.
*
* For a given channel we get the calibrated points (piers) for it or
* -if we don't have calibration data for this specific channel- from the
* available surrounding channels we have calibration data for, after we do a
* linear interpolation between them. Then since we have our calibrated points
* for this channel, we do again a linear interpolation between them to get the
* whole curve.
*
* We finally write the Y values of the curve(s) (the PCDAC values) on hw
*/
/**
* ath5k_fill_pwr_to_pcdac_table() - Fill Power to PCDAC table on RF5111
* @ah: The &struct ath5k_hw
* @table_min: Minimum power (x min)
* @table_max: Maximum power (x max)
*
* No further processing is needed for RF5111, the only thing we have to
* do is fill the values below and above calibration range since eeprom data
* may not cover the entire PCDAC table.
*/
static void
ath5k_fill_pwr_to_pcdac_table(struct ath5k_hw *ah, s16* table_min,
s16 *table_max)
{
u8 *pcdac_out = ah->ah_txpower.txp_pd_table;
u8 *pcdac_tmp = ah->ah_txpower.tmpL[0];
u8 pcdac_0, pcdac_n, pcdac_i, pwr_idx, i;
s16 min_pwr, max_pwr;
/* Get table boundaries */
min_pwr = table_min[0];
pcdac_0 = pcdac_tmp[0];
max_pwr = table_max[0];
pcdac_n = pcdac_tmp[table_max[0] - table_min[0]];
/* Extrapolate below minimum using pcdac_0 */
pcdac_i = 0;
for (i = 0; i < min_pwr; i++)
pcdac_out[pcdac_i++] = pcdac_0;
/* Copy values from pcdac_tmp */
pwr_idx = min_pwr;
for (i = 0; pwr_idx <= max_pwr &&
pcdac_i < AR5K_EEPROM_POWER_TABLE_SIZE; i++) {
pcdac_out[pcdac_i++] = pcdac_tmp[i];
pwr_idx++;
}
/* Extrapolate above maximum */
while (pcdac_i < AR5K_EEPROM_POWER_TABLE_SIZE)
pcdac_out[pcdac_i++] = pcdac_n;
}
/**
* ath5k_combine_linear_pcdac_curves() - Combine available PCDAC Curves
* @ah: The &struct ath5k_hw
* @table_min: Minimum power (x min)
* @table_max: Maximum power (x max)
* @pdcurves: Number of pd curves
*
* Combine available XPD Curves and fill Linear Power to PCDAC table on RF5112
* RFX112 can have up to 2 curves (one for low txpower range and one for
* higher txpower range). We need to put them both on pcdac_out and place
* them in the correct location. In case we only have one curve available
* just fit it on pcdac_out (it's supposed to cover the entire range of
* available pwr levels since it's always the higher power curve). Extrapolate
* below and above final table if needed.
*/
static void
ath5k_combine_linear_pcdac_curves(struct ath5k_hw *ah, s16* table_min,
s16 *table_max, u8 pdcurves)
{
u8 *pcdac_out = ah->ah_txpower.txp_pd_table;
u8 *pcdac_low_pwr;
u8 *pcdac_high_pwr;
u8 *pcdac_tmp;
u8 pwr;
s16 max_pwr_idx;
s16 min_pwr_idx;
s16 mid_pwr_idx = 0;
/* Edge flag turns on the 7nth bit on the PCDAC
* to declare the higher power curve (force values
* to be greater than 64). If we only have one curve
* we don't need to set this, if we have 2 curves and
* fill the table backwards this can also be used to
* switch from higher power curve to lower power curve */
u8 edge_flag;
int i;
/* When we have only one curve available
* that's the higher power curve. If we have
* two curves the first is the high power curve
* and the next is the low power curve. */
if (pdcurves > 1) {
pcdac_low_pwr = ah->ah_txpower.tmpL[1];
pcdac_high_pwr = ah->ah_txpower.tmpL[0];
mid_pwr_idx = table_max[1] - table_min[1] - 1;
max_pwr_idx = (table_max[0] - table_min[0]) / 2;
/* If table size goes beyond 31.5dB, keep the
* upper 31.5dB range when setting tx power.
* Note: 126 = 31.5 dB in quarter dB steps */
if (table_max[0] - table_min[1] > 126)
min_pwr_idx = table_max[0] - 126;
else
min_pwr_idx = table_min[1];
/* Since we fill table backwards
* start from high power curve */
pcdac_tmp = pcdac_high_pwr;
edge_flag = 0x40;
} else {
pcdac_low_pwr = ah->ah_txpower.tmpL[1]; /* Zeroed */
pcdac_high_pwr = ah->ah_txpower.tmpL[0];
min_pwr_idx = table_min[0];
max_pwr_idx = (table_max[0] - table_min[0]) / 2;
pcdac_tmp = pcdac_high_pwr;
edge_flag = 0;
}
/* This is used when setting tx power*/
ah->ah_txpower.txp_min_idx = min_pwr_idx / 2;
/* Fill Power to PCDAC table backwards */
pwr = max_pwr_idx;
for (i = 63; i >= 0; i--) {
/* Entering lower power range, reset
* edge flag and set pcdac_tmp to lower
* power curve.*/
if (edge_flag == 0x40 &&
(2 * pwr <= (table_max[1] - table_min[0]) || pwr == 0)) {
edge_flag = 0x00;
pcdac_tmp = pcdac_low_pwr;
pwr = mid_pwr_idx / 2;
}
/* Don't go below 1, extrapolate below if we have
* already switched to the lower power curve -or
* we only have one curve and edge_flag is zero
* anyway */
if (pcdac_tmp[pwr] < 1 && (edge_flag == 0x00)) {
while (i >= 0) {
pcdac_out[i] = pcdac_out[i + 1];
i--;
}
break;
}
pcdac_out[i] = pcdac_tmp[pwr] | edge_flag;
/* Extrapolate above if pcdac is greater than
* 126 -this can happen because we OR pcdac_out
* value with edge_flag on high power curve */
if (pcdac_out[i] > 126)
pcdac_out[i] = 126;
/* Decrease by a 0.5dB step */
pwr--;
}
}
/**
* ath5k_write_pcdac_table() - Write the PCDAC values on hw
* @ah: The &struct ath5k_hw
*/
static void
ath5k_write_pcdac_table(struct ath5k_hw *ah)
{
u8 *pcdac_out = ah->ah_txpower.txp_pd_table;
int i;
/*
* Write TX power values
*/
for (i = 0; i < (AR5K_EEPROM_POWER_TABLE_SIZE / 2); i++) {
ath5k_hw_reg_write(ah,
(((pcdac_out[2 * i + 0] << 8 | 0xff) & 0xffff) << 0) |
(((pcdac_out[2 * i + 1] << 8 | 0xff) & 0xffff) << 16),
AR5K_PHY_PCDAC_TXPOWER(i));
}
}
/*
* Power to PDADC table functions
*/
/**
* DOC: Power to PDADC table functions
*
* For RF2413 and later we have a Power to PDADC table (Power Detector)
* instead of a PCDAC (Power Control) and 4 pd gain curves for each
* calibrated channel. Each curve has power on x axis in 0.5 db steps and
* PDADC steps on y axis and looks like an exponential function like the
* RF5111 curve.
*
* To recreate the curves we read the points from eeprom (eeprom.c)
* and interpolate here. Note that in most cases only 2 (higher and lower)
* curves are used (like RF5112) but vendors have the opportunity to include
* all 4 curves on eeprom. The final curve (higher power) has an extra
* point for better accuracy like RF5112.
*
* The process is similar to what we do above for RF5111/5112
*/
/**
* ath5k_combine_pwr_to_pdadc_curves() - Combine the various PDADC curves
* @ah: The &struct ath5k_hw
* @pwr_min: Minimum power (x min)
* @pwr_max: Maximum power (x max)
* @pdcurves: Number of available curves
*
* Combine the various pd curves and create the final Power to PDADC table
* We can have up to 4 pd curves, we need to do a similar process
* as we do for RF5112. This time we don't have an edge_flag but we
* set the gain boundaries on a separate register.
*/
static void
ath5k_combine_pwr_to_pdadc_curves(struct ath5k_hw *ah,
s16 *pwr_min, s16 *pwr_max, u8 pdcurves)
{
u8 gain_boundaries[AR5K_EEPROM_N_PD_GAINS];
u8 *pdadc_out = ah->ah_txpower.txp_pd_table;
u8 *pdadc_tmp;
s16 pdadc_0;
u8 pdadc_i, pdadc_n, pwr_step, pdg, max_idx, table_size;
u8 pd_gain_overlap;
/* Note: Register value is initialized on initvals
* there is no feedback from hw.
* XXX: What about pd_gain_overlap from EEPROM ? */
pd_gain_overlap = (u8) ath5k_hw_reg_read(ah, AR5K_PHY_TPC_RG5) &
AR5K_PHY_TPC_RG5_PD_GAIN_OVERLAP;
/* Create final PDADC table */
for (pdg = 0, pdadc_i = 0; pdg < pdcurves; pdg++) {
pdadc_tmp = ah->ah_txpower.tmpL[pdg];
if (pdg == pdcurves - 1)
/* 2 dB boundary stretch for last
* (higher power) curve */
gain_boundaries[pdg] = pwr_max[pdg] + 4;
else
/* Set gain boundary in the middle
* between this curve and the next one */
gain_boundaries[pdg] =
(pwr_max[pdg] + pwr_min[pdg + 1]) / 2;
/* Sanity check in case our 2 db stretch got out of
* range. */
if (gain_boundaries[pdg] > AR5K_TUNE_MAX_TXPOWER)
gain_boundaries[pdg] = AR5K_TUNE_MAX_TXPOWER;
/* For the first curve (lower power)
* start from 0 dB */
if (pdg == 0)
pdadc_0 = 0;
else
/* For the other curves use the gain overlap */
pdadc_0 = (gain_boundaries[pdg - 1] - pwr_min[pdg]) -
pd_gain_overlap;
/* Force each power step to be at least 0.5 dB */
if ((pdadc_tmp[1] - pdadc_tmp[0]) > 1)
pwr_step = pdadc_tmp[1] - pdadc_tmp[0];
else
pwr_step = 1;
/* If pdadc_0 is negative, we need to extrapolate
* below this pdgain by a number of pwr_steps */
while ((pdadc_0 < 0) && (pdadc_i < 128)) {
s16 tmp = pdadc_tmp[0] + pdadc_0 * pwr_step;
pdadc_out[pdadc_i++] = (tmp < 0) ? 0 : (u8) tmp;
pdadc_0++;
}
/* Set last pwr level, using gain boundaries */
pdadc_n = gain_boundaries[pdg] + pd_gain_overlap - pwr_min[pdg];
/* Limit it to be inside pwr range */
table_size = pwr_max[pdg] - pwr_min[pdg];
max_idx = min(pdadc_n, table_size);
/* Fill pdadc_out table */
while (pdadc_0 < max_idx && pdadc_i < 128)
pdadc_out[pdadc_i++] = pdadc_tmp[pdadc_0++];
/* Need to extrapolate above this pdgain? */
if (pdadc_n <= max_idx)
continue;
/* Force each power step to be at least 0.5 dB */
if ((pdadc_tmp[table_size - 1] - pdadc_tmp[table_size - 2]) > 1)
pwr_step = pdadc_tmp[table_size - 1] -
pdadc_tmp[table_size - 2];
else
pwr_step = 1;
/* Extrapolate above */
while ((pdadc_0 < (s16) pdadc_n) &&
(pdadc_i < AR5K_EEPROM_POWER_TABLE_SIZE * 2)) {
s16 tmp = pdadc_tmp[table_size - 1] +
(pdadc_0 - max_idx) * pwr_step;
pdadc_out[pdadc_i++] = (tmp > 127) ? 127 : (u8) tmp;
pdadc_0++;
}
}
while (pdg < AR5K_EEPROM_N_PD_GAINS) {
gain_boundaries[pdg] = gain_boundaries[pdg - 1];
pdg++;
}
while (pdadc_i < AR5K_EEPROM_POWER_TABLE_SIZE * 2) {
pdadc_out[pdadc_i] = pdadc_out[pdadc_i - 1];
pdadc_i++;
}
/* Set gain boundaries */
ath5k_hw_reg_write(ah,
AR5K_REG_SM(pd_gain_overlap,
AR5K_PHY_TPC_RG5_PD_GAIN_OVERLAP) |
AR5K_REG_SM(gain_boundaries[0],
AR5K_PHY_TPC_RG5_PD_GAIN_BOUNDARY_1) |
AR5K_REG_SM(gain_boundaries[1],
AR5K_PHY_TPC_RG5_PD_GAIN_BOUNDARY_2) |
AR5K_REG_SM(gain_boundaries[2],
AR5K_PHY_TPC_RG5_PD_GAIN_BOUNDARY_3) |
AR5K_REG_SM(gain_boundaries[3],
AR5K_PHY_TPC_RG5_PD_GAIN_BOUNDARY_4),
AR5K_PHY_TPC_RG5);
/* Used for setting rate power table */
ah->ah_txpower.txp_min_idx = pwr_min[0];
}
/**
* ath5k_write_pwr_to_pdadc_table() - Write the PDADC values on hw
* @ah: The &struct ath5k_hw
* @ee_mode: One of enum ath5k_driver_mode
*/
static void
ath5k_write_pwr_to_pdadc_table(struct ath5k_hw *ah, u8 ee_mode)
{
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
u8 *pdadc_out = ah->ah_txpower.txp_pd_table;
u8 *pdg_to_idx = ee->ee_pdc_to_idx[ee_mode];
u8 pdcurves = ee->ee_pd_gains[ee_mode];
u32 reg;
u8 i;
/* Select the right pdgain curves */
/* Clear current settings */
reg = ath5k_hw_reg_read(ah, AR5K_PHY_TPC_RG1);
reg &= ~(AR5K_PHY_TPC_RG1_PDGAIN_1 |
AR5K_PHY_TPC_RG1_PDGAIN_2 |
AR5K_PHY_TPC_RG1_PDGAIN_3 |
AR5K_PHY_TPC_RG1_NUM_PD_GAIN);
/*
* Use pd_gains curve from eeprom
*
* This overrides the default setting from initvals
* in case some vendors (e.g. Zcomax) don't use the default
* curves. If we don't honor their settings we 'll get a
* 5dB (1 * gain overlap ?) drop.
*/
reg |= AR5K_REG_SM(pdcurves, AR5K_PHY_TPC_RG1_NUM_PD_GAIN);
switch (pdcurves) {
case 3:
reg |= AR5K_REG_SM(pdg_to_idx[2], AR5K_PHY_TPC_RG1_PDGAIN_3);
fallthrough;
case 2:
reg |= AR5K_REG_SM(pdg_to_idx[1], AR5K_PHY_TPC_RG1_PDGAIN_2);
fallthrough;
case 1:
reg |= AR5K_REG_SM(pdg_to_idx[0], AR5K_PHY_TPC_RG1_PDGAIN_1);
break;
}
ath5k_hw_reg_write(ah, reg, AR5K_PHY_TPC_RG1);
/*
* Write TX power values
*/
for (i = 0; i < (AR5K_EEPROM_POWER_TABLE_SIZE / 2); i++) {
u32 val = get_unaligned_le32(&pdadc_out[4 * i]);
ath5k_hw_reg_write(ah, val, AR5K_PHY_PDADC_TXPOWER(i));
}
}
/*
* Common code for PCDAC/PDADC tables
*/
/**
* ath5k_setup_channel_powertable() - Set up power table for this channel
* @ah: The &struct ath5k_hw
* @channel: The &struct ieee80211_channel
* @ee_mode: One of enum ath5k_driver_mode
* @type: One of enum ath5k_powertable_type (eeprom.h)
*
* This is the main function that uses all of the above
* to set PCDAC/PDADC table on hw for the current channel.
* This table is used for tx power calibration on the baseband,
* without it we get weird tx power levels and in some cases
* distorted spectral mask
*/
static int
ath5k_setup_channel_powertable(struct ath5k_hw *ah,
struct ieee80211_channel *channel,
u8 ee_mode, u8 type)
{
struct ath5k_pdgain_info *pdg_L, *pdg_R;
struct ath5k_chan_pcal_info *pcinfo_L;
struct ath5k_chan_pcal_info *pcinfo_R;
struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
u8 *pdg_curve_to_idx = ee->ee_pdc_to_idx[ee_mode];
s16 table_min[AR5K_EEPROM_N_PD_GAINS];
s16 table_max[AR5K_EEPROM_N_PD_GAINS];
u8 *tmpL;
u8 *tmpR;
u32 target = channel->center_freq;
int pdg, i;
/* Get surrounding freq piers for this channel */
ath5k_get_chan_pcal_surrounding_piers(ah, channel,
&pcinfo_L,
&pcinfo_R);
/* Loop over pd gain curves on
* surrounding freq piers by index */
for (pdg = 0; pdg < ee->ee_pd_gains[ee_mode]; pdg++) {
/* Fill curves in reverse order
* from lower power (max gain)
* to higher power. Use curve -> idx
* backmapping we did on eeprom init */
u8 idx = pdg_curve_to_idx[pdg];
/* Grab the needed curves by index */
pdg_L = &pcinfo_L->pd_curves[idx];
pdg_R = &pcinfo_R->pd_curves[idx];
/* Initialize the temp tables */
tmpL = ah->ah_txpower.tmpL[pdg];
tmpR = ah->ah_txpower.tmpR[pdg];
/* Set curve's x boundaries and create
* curves so that they cover the same
* range (if we don't do that one table
* will have values on some range and the
* other one won't have any so interpolation
* will fail) */
table_min[pdg] = min(pdg_L->pd_pwr[0],
pdg_R->pd_pwr[0]) / 2;
table_max[pdg] = max(pdg_L->pd_pwr[pdg_L->pd_points - 1],
pdg_R->pd_pwr[pdg_R->pd_points - 1]) / 2;
/* Now create the curves on surrounding channels
* and interpolate if needed to get the final
* curve for this gain on this channel */
switch (type) {
case AR5K_PWRTABLE_LINEAR_PCDAC:
/* Override min/max so that we don't loose
* accuracy (don't divide by 2) */
table_min[pdg] = min(pdg_L->pd_pwr[0],
pdg_R->pd_pwr[0]);
table_max[pdg] =
max(pdg_L->pd_pwr[pdg_L->pd_points - 1],
pdg_R->pd_pwr[pdg_R->pd_points - 1]);
/* Override minimum so that we don't get
* out of bounds while extrapolating
* below. Don't do this when we have 2
* curves and we are on the high power curve
* because table_min is ok in this case */
if (!(ee->ee_pd_gains[ee_mode] > 1 && pdg == 0)) {
table_min[pdg] =
ath5k_get_linear_pcdac_min(pdg_L->pd_step,
pdg_R->pd_step,
pdg_L->pd_pwr,
pdg_R->pd_pwr);
/* Don't go too low because we will
* miss the upper part of the curve.
* Note: 126 = 31.5dB (max power supported)
* in 0.25dB units */
if (table_max[pdg] - table_min[pdg] > 126)
table_min[pdg] = table_max[pdg] - 126;
}
fallthrough;
case AR5K_PWRTABLE_PWR_TO_PCDAC:
case AR5K_PWRTABLE_PWR_TO_PDADC:
ath5k_create_power_curve(table_min[pdg],
table_max[pdg],
pdg_L->pd_pwr,
pdg_L->pd_step,
pdg_L->pd_points, tmpL, type);
/* We are in a calibration
* pier, no need to interpolate
* between freq piers */
if (pcinfo_L == pcinfo_R)
continue;
ath5k_create_power_curve(table_min[pdg],
table_max[pdg],
pdg_R->pd_pwr,
pdg_R->pd_step,
pdg_R->pd_points, tmpR, type);
break;
default:
return -EINVAL;
}
/* Interpolate between curves
* of surrounding freq piers to
* get the final curve for this
* pd gain. Re-use tmpL for interpolation
* output */
for (i = 0; (i < (u16) (table_max[pdg] - table_min[pdg])) &&
(i < AR5K_EEPROM_POWER_TABLE_SIZE); i++) {
tmpL[i] = (u8) ath5k_get_interpolated_value(target,
(s16) pcinfo_L->freq,
(s16) pcinfo_R->freq,
(s16) tmpL[i],
(s16) tmpR[i]);
}
}
/* Now we have a set of curves for this
* channel on tmpL (x range is table_max - table_min
* and y values are tmpL[pdg][]) sorted in the same
* order as EEPROM (because we've used the backmapping).
* So for RF5112 it's from higher power to lower power
* and for RF2413 it's from lower power to higher power.
* For RF5111 we only have one curve. */
/* Fill min and max power levels for this
* channel by interpolating the values on
* surrounding channels to complete the dataset */
ah->ah_txpower.txp_min_pwr = ath5k_get_interpolated_value(target,
(s16) pcinfo_L->freq,
(s16) pcinfo_R->freq,
pcinfo_L->min_pwr, pcinfo_R->min_pwr);
ah->ah_txpower.txp_max_pwr = ath5k_get_interpolated_value(target,
(s16) pcinfo_L->freq,
(s16) pcinfo_R->freq,
pcinfo_L->max_pwr, pcinfo_R->max_pwr);
/* Fill PCDAC/PDADC table */
switch (type) {
case AR5K_PWRTABLE_LINEAR_PCDAC:
/* For RF5112 we can have one or two curves
* and each curve covers a certain power lvl
* range so we need to do some more processing */
ath5k_combine_linear_pcdac_curves(ah, table_min, table_max,
ee->ee_pd_gains[ee_mode]);
/* Set txp.offset so that we can
* match max power value with max
* table index */
ah->ah_txpower.txp_offset = 64 - (table_max[0] / 2);
break;
case AR5K_PWRTABLE_PWR_TO_PCDAC:
/* We are done for RF5111 since it has only
* one curve, just fit the curve on the table */
ath5k_fill_pwr_to_pcdac_table(ah, table_min, table_max);
/* No rate powertable adjustment for RF5111 */
ah->ah_txpower.txp_min_idx = 0;
ah->ah_txpower.txp_offset = 0;
break;
case AR5K_PWRTABLE_PWR_TO_PDADC:
/* Set PDADC boundaries and fill
* final PDADC table */
ath5k_combine_pwr_to_pdadc_curves(ah, table_min, table_max,
ee->ee_pd_gains[ee_mode]);
/* Set txp.offset, note that table_min
* can be negative */
ah->ah_txpower.txp_offset = table_min[0];
break;
default:
return -EINVAL;
}
ah->ah_txpower.txp_setup = true;
return 0;
}
/**
* ath5k_write_channel_powertable() - Set power table for current channel on hw
* @ah: The &struct ath5k_hw
* @ee_mode: One of enum ath5k_driver_mode
* @type: One of enum ath5k_powertable_type (eeprom.h)
*/
static void
ath5k_write_channel_powertable(struct ath5k_hw *ah, u8 ee_mode, u8 type)
{
if (type == AR5K_PWRTABLE_PWR_TO_PDADC)
ath5k_write_pwr_to_pdadc_table(ah, ee_mode);
else
ath5k_write_pcdac_table(ah);
}
/**
* DOC: Per-rate tx power setting
*
* This is the code that sets the desired tx power limit (below
* maximum) on hw for each rate (we also have TPC that sets
* power per packet type). We do that by providing an index on the
* PCDAC/PDADC table we set up above, for each rate.
*
* For now we only limit txpower based on maximum tx power
* supported by hw (what's inside rate_info) + conformance test
* limits. We need to limit this even more, based on regulatory domain
* etc to be safe. Normally this is done from above so we don't care
* here, all we care is that the tx power we set will be O.K.
* for the hw (e.g. won't create noise on PA etc).
*
* Rate power table contains indices to PCDAC/PDADC table (0.5dB steps -
* x values) and is indexed as follows:
* rates[0] - rates[7] -> OFDM rates
* rates[8] - rates[14] -> CCK rates
* rates[15] -> XR rates (they all have the same power)
*/
/**
* ath5k_setup_rate_powertable() - Set up rate power table for a given tx power
* @ah: The &struct ath5k_hw
* @max_pwr: The maximum tx power requested in 0.5dB steps
* @rate_info: The &struct ath5k_rate_pcal_info to fill
* @ee_mode: One of enum ath5k_driver_mode
*/
static void
ath5k_setup_rate_powertable(struct ath5k_hw *ah, u16 max_pwr,
struct ath5k_rate_pcal_info *rate_info,
u8 ee_mode)
{
unsigned int i;
u16 *rates;
s16 rate_idx_scaled = 0;
/* max_pwr is power level we got from driver/user in 0.5dB
* units, switch to 0.25dB units so we can compare */
max_pwr *= 2;
max_pwr = min(max_pwr, (u16) ah->ah_txpower.txp_max_pwr) / 2;
/* apply rate limits */
rates = ah->ah_txpower.txp_rates_power_table;
/* OFDM rates 6 to 24Mb/s */
for (i = 0; i < 5; i++)
rates[i] = min(max_pwr, rate_info->target_power_6to24);
/* Rest OFDM rates */
rates[5] = min(rates[0], rate_info->target_power_36);
rates[6] = min(rates[0], rate_info->target_power_48);
rates[7] = min(rates[0], rate_info->target_power_54);
/* CCK rates */
/* 1L */
rates[8] = min(rates[0], rate_info->target_power_6to24);
/* 2L */
rates[9] = min(rates[0], rate_info->target_power_36);
/* 2S */
rates[10] = min(rates[0], rate_info->target_power_36);
/* 5L */
rates[11] = min(rates[0], rate_info->target_power_48);
/* 5S */
rates[12] = min(rates[0], rate_info->target_power_48);
/* 11L */
rates[13] = min(rates[0], rate_info->target_power_54);
/* 11S */
rates[14] = min(rates[0], rate_info->target_power_54);
/* XR rates */
rates[15] = min(rates[0], rate_info->target_power_6to24);
/* CCK rates have different peak to average ratio
* so we have to tweak their power so that gainf
* correction works ok. For this we use OFDM to
* CCK delta from eeprom */
if ((ee_mode == AR5K_EEPROM_MODE_11G) &&
(ah->ah_phy_revision < AR5K_SREV_PHY_5212A))
for (i = 8; i <= 15; i++)
rates[i] -= ah->ah_txpower.txp_cck_ofdm_gainf_delta;
/* Save min/max and current tx power for this channel
* in 0.25dB units.
*
* Note: We use rates[0] for current tx power because
* it covers most of the rates, in most cases. It's our
* tx power limit and what the user expects to see. */
ah->ah_txpower.txp_min_pwr = 2 * rates[7];
ah->ah_txpower.txp_cur_pwr = 2 * rates[0];
/* Set max txpower for correct OFDM operation on all rates
* -that is the txpower for 54Mbit-, it's used for the PAPD
* gain probe and it's in 0.5dB units */
ah->ah_txpower.txp_ofdm = rates[7];
/* Now that we have all rates setup use table offset to
* match the power range set by user with the power indices
* on PCDAC/PDADC table */
for (i = 0; i < 16; i++) {
rate_idx_scaled = rates[i] + ah->ah_txpower.txp_offset;
/* Don't get out of bounds */
if (rate_idx_scaled > 63)
rate_idx_scaled = 63;
if (rate_idx_scaled < 0)
rate_idx_scaled = 0;
rates[i] = rate_idx_scaled;
}
}
/**
* ath5k_hw_txpower() - Set transmission power limit for a given channel
* @ah: The &struct ath5k_hw
* @channel: The &struct ieee80211_channel
* @txpower: Requested tx power in 0.5dB steps
*
* Combines all of the above to set the requested tx power limit
* on hw.
*/
static int
ath5k_hw_txpower(struct ath5k_hw *ah, struct ieee80211_channel *channel,
u8 txpower)
{
struct ath5k_rate_pcal_info rate_info;
struct ieee80211_channel *curr_channel = ah->ah_current_channel;
int ee_mode;
u8 type;
int ret;
if (txpower > AR5K_TUNE_MAX_TXPOWER) {
ATH5K_ERR(ah, "invalid tx power: %u\n", txpower);
return -EINVAL;
}
ee_mode = ath5k_eeprom_mode_from_channel(ah, channel);
/* Initialize TX power table */
switch (ah->ah_radio) {
case AR5K_RF5110:
/* TODO */
return 0;
case AR5K_RF5111:
type = AR5K_PWRTABLE_PWR_TO_PCDAC;
break;
case AR5K_RF5112:
type = AR5K_PWRTABLE_LINEAR_PCDAC;
break;
case AR5K_RF2413:
case AR5K_RF5413:
case AR5K_RF2316:
case AR5K_RF2317:
case AR5K_RF2425:
type = AR5K_PWRTABLE_PWR_TO_PDADC;
break;
default:
return -EINVAL;
}
/*
* If we don't change channel/mode skip tx powertable calculation
* and use the cached one.
*/
if (!ah->ah_txpower.txp_setup ||
(channel->hw_value != curr_channel->hw_value) ||
(channel->center_freq != curr_channel->center_freq)) {
/* Reset TX power values but preserve requested
* tx power from above */
int requested_txpower = ah->ah_txpower.txp_requested;
memset(&ah->ah_txpower, 0, sizeof(ah->ah_txpower));
/* Restore TPC setting and requested tx power */
ah->ah_txpower.txp_tpc = AR5K_TUNE_TPC_TXPOWER;
ah->ah_txpower.txp_requested = requested_txpower;
/* Calculate the powertable */
ret = ath5k_setup_channel_powertable(ah, channel,
ee_mode, type);
if (ret)
return ret;
}
/* Write table on hw */
ath5k_write_channel_powertable(ah, ee_mode, type);
/* Limit max power if we have a CTL available */
ath5k_get_max_ctl_power(ah, channel);
/* FIXME: Antenna reduction stuff */
/* FIXME: Limit power on turbo modes */
/* FIXME: TPC scale reduction */
/* Get surrounding channels for per-rate power table
* calibration */
ath5k_get_rate_pcal_data(ah, channel, &rate_info);
/* Setup rate power table */
ath5k_setup_rate_powertable(ah, txpower, &rate_info, ee_mode);
/* Write rate power table on hw */
ath5k_hw_reg_write(ah, AR5K_TXPOWER_OFDM(3, 24) |
AR5K_TXPOWER_OFDM(2, 16) | AR5K_TXPOWER_OFDM(1, 8) |
AR5K_TXPOWER_OFDM(0, 0), AR5K_PHY_TXPOWER_RATE1);
ath5k_hw_reg_write(ah, AR5K_TXPOWER_OFDM(7, 24) |
AR5K_TXPOWER_OFDM(6, 16) | AR5K_TXPOWER_OFDM(5, 8) |
AR5K_TXPOWER_OFDM(4, 0), AR5K_PHY_TXPOWER_RATE2);
ath5k_hw_reg_write(ah, AR5K_TXPOWER_CCK(10, 24) |
AR5K_TXPOWER_CCK(9, 16) | AR5K_TXPOWER_CCK(15, 8) |
AR5K_TXPOWER_CCK(8, 0), AR5K_PHY_TXPOWER_RATE3);
ath5k_hw_reg_write(ah, AR5K_TXPOWER_CCK(14, 24) |
AR5K_TXPOWER_CCK(13, 16) | AR5K_TXPOWER_CCK(12, 8) |
AR5K_TXPOWER_CCK(11, 0), AR5K_PHY_TXPOWER_RATE4);
/* FIXME: TPC support */
if (ah->ah_txpower.txp_tpc) {
ath5k_hw_reg_write(ah, AR5K_PHY_TXPOWER_RATE_MAX_TPC_ENABLE |
AR5K_TUNE_MAX_TXPOWER, AR5K_PHY_TXPOWER_RATE_MAX);
ath5k_hw_reg_write(ah,
AR5K_REG_MS(AR5K_TUNE_MAX_TXPOWER, AR5K_TPC_ACK) |
AR5K_REG_MS(AR5K_TUNE_MAX_TXPOWER, AR5K_TPC_CTS) |
AR5K_REG_MS(AR5K_TUNE_MAX_TXPOWER, AR5K_TPC_CHIRP),
AR5K_TPC);
} else {
ath5k_hw_reg_write(ah, AR5K_TUNE_MAX_TXPOWER,
AR5K_PHY_TXPOWER_RATE_MAX);
}
return 0;
}
/**
* ath5k_hw_set_txpower_limit() - Set txpower limit for the current channel
* @ah: The &struct ath5k_hw
* @txpower: The requested tx power limit in 0.5dB steps
*
* This function provides access to ath5k_hw_txpower to the driver in
* case user or an application changes it while PHY is running.
*/
int
ath5k_hw_set_txpower_limit(struct ath5k_hw *ah, u8 txpower)
{
ATH5K_DBG(ah, ATH5K_DEBUG_TXPOWER,
"changing txpower to %d\n", txpower);
return ath5k_hw_txpower(ah, ah->ah_current_channel, txpower);
}
/*************\
Init function
\*************/
/**
* ath5k_hw_phy_init() - Initialize PHY
* @ah: The &struct ath5k_hw
* @channel: The @struct ieee80211_channel
* @mode: One of enum ath5k_driver_mode
* @fast: Try a fast channel switch instead
*
* This is the main function used during reset to initialize PHY
* or do a fast channel change if possible.
*
* NOTE: Do not call this one from the driver, it assumes PHY is in a
* warm reset state !
*/
int
ath5k_hw_phy_init(struct ath5k_hw *ah, struct ieee80211_channel *channel,
u8 mode, bool fast)
{
struct ieee80211_channel *curr_channel;
int ret, i;
u32 phy_tst1;
ret = 0;
/*
* Sanity check for fast flag
* Don't try fast channel change when changing modulation
* mode/band. We check for chip compatibility on
* ath5k_hw_reset.
*/
curr_channel = ah->ah_current_channel;
if (fast && (channel->hw_value != curr_channel->hw_value))
return -EINVAL;
/*
* On fast channel change we only set the synth parameters
* while PHY is running, enable calibration and skip the rest.
*/
if (fast) {
AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_RFBUS_REQ,
AR5K_PHY_RFBUS_REQ_REQUEST);
for (i = 0; i < 100; i++) {
if (ath5k_hw_reg_read(ah, AR5K_PHY_RFBUS_GRANT))
break;
udelay(5);
}
/* Failed */
if (i >= 100)
return -EIO;
/* Set channel and wait for synth */
ret = ath5k_hw_channel(ah, channel);
if (ret)
return ret;
ath5k_hw_wait_for_synth(ah, channel);
}
/*
* Set TX power
*
* Note: We need to do that before we set
* RF buffer settings on 5211/5212+ so that we
* properly set curve indices.
*/
ret = ath5k_hw_txpower(ah, channel, ah->ah_txpower.txp_requested ?
ah->ah_txpower.txp_requested * 2 :
AR5K_TUNE_MAX_TXPOWER);
if (ret)
return ret;
/* Write OFDM timings on 5212*/
if (ah->ah_version == AR5K_AR5212 &&
channel->hw_value != AR5K_MODE_11B) {
ret = ath5k_hw_write_ofdm_timings(ah, channel);
if (ret)
return ret;
/* Spur info is available only from EEPROM versions
* greater than 5.3, but the EEPROM routines will use
* static values for older versions */
if (ah->ah_mac_srev >= AR5K_SREV_AR5424)
ath5k_hw_set_spur_mitigation_filter(ah,
channel);
}
/* If we used fast channel switching
* we are done, release RF bus and
* fire up NF calibration.
*
* Note: Only NF calibration due to
* channel change, not AGC calibration
* since AGC is still running !
*/
if (fast) {
/*
* Release RF Bus grant
*/
AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_RFBUS_REQ,
AR5K_PHY_RFBUS_REQ_REQUEST);
/*
* Start NF calibration
*/
AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL,
AR5K_PHY_AGCCTL_NF);
return ret;
}
/*
* For 5210 we do all initialization using
* initvals, so we don't have to modify
* any settings (5210 also only supports
* a/aturbo modes)
*/
if (ah->ah_version != AR5K_AR5210) {
/*
* Write initial RF gain settings
* This should work for both 5111/5112
*/
ret = ath5k_hw_rfgain_init(ah, channel->band);
if (ret)
return ret;
usleep_range(1000, 1500);
/*
* Write RF buffer
*/
ret = ath5k_hw_rfregs_init(ah, channel, mode);
if (ret)
return ret;
/*Enable/disable 802.11b mode on 5111
(enable 2111 frequency converter + CCK)*/
if (ah->ah_radio == AR5K_RF5111) {
if (mode == AR5K_MODE_11B)
AR5K_REG_ENABLE_BITS(ah, AR5K_TXCFG,
AR5K_TXCFG_B_MODE);
else
AR5K_REG_DISABLE_BITS(ah, AR5K_TXCFG,
AR5K_TXCFG_B_MODE);
}
} else if (ah->ah_version == AR5K_AR5210) {
usleep_range(1000, 1500);
/* Disable phy and wait */
ath5k_hw_reg_write(ah, AR5K_PHY_ACT_DISABLE, AR5K_PHY_ACT);
usleep_range(1000, 1500);
}
/* Set channel on PHY */
ret = ath5k_hw_channel(ah, channel);
if (ret)
return ret;
/*
* Enable the PHY and wait until completion
* This includes BaseBand and Synthesizer
* activation.
*/
ath5k_hw_reg_write(ah, AR5K_PHY_ACT_ENABLE, AR5K_PHY_ACT);
ath5k_hw_wait_for_synth(ah, channel);
/*
* Perform ADC test to see if baseband is ready
* Set tx hold and check adc test register
*/
phy_tst1 = ath5k_hw_reg_read(ah, AR5K_PHY_TST1);
ath5k_hw_reg_write(ah, AR5K_PHY_TST1_TXHOLD, AR5K_PHY_TST1);
for (i = 0; i <= 20; i++) {
if (!(ath5k_hw_reg_read(ah, AR5K_PHY_ADC_TEST) & 0x10))
break;
usleep_range(200, 250);
}
ath5k_hw_reg_write(ah, phy_tst1, AR5K_PHY_TST1);
/*
* Start automatic gain control calibration
*
* During AGC calibration RX path is re-routed to
* a power detector so we don't receive anything.
*
* This method is used to calibrate some static offsets
* used together with on-the fly I/Q calibration (the
* one performed via ath5k_hw_phy_calibrate), which doesn't
* interrupt rx path.
*
* While rx path is re-routed to the power detector we also
* start a noise floor calibration to measure the
* card's noise floor (the noise we measure when we are not
* transmitting or receiving anything).
*
* If we are in a noisy environment, AGC calibration may time
* out and/or noise floor calibration might timeout.
*/
AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL,
AR5K_PHY_AGCCTL_CAL | AR5K_PHY_AGCCTL_NF);
/* At the same time start I/Q calibration for QAM constellation
* -no need for CCK- */
ah->ah_iq_cal_needed = false;
if (!(mode == AR5K_MODE_11B)) {
ah->ah_iq_cal_needed = true;
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_IQ,
AR5K_PHY_IQ_CAL_NUM_LOG_MAX, 15);
AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_IQ,
AR5K_PHY_IQ_RUN);
}
/* Wait for gain calibration to finish (we check for I/Q calibration
* during ath5k_phy_calibrate) */
if (ath5k_hw_register_timeout(ah, AR5K_PHY_AGCCTL,
AR5K_PHY_AGCCTL_CAL, 0, false)) {
ATH5K_ERR(ah, "gain calibration timeout (%uMHz)\n",
channel->center_freq);
}
/* Restore antenna mode */
ath5k_hw_set_antenna_mode(ah, ah->ah_ant_mode);
return ret;
}
|
linux-master
|
drivers/net/wireless/ath/ath5k/phy.c
|
/*
* Copyright (c) 2004-2008 Reyk Floeter <[email protected]>
* Copyright (c) 2006-2008 Nick Kossifidis <[email protected]>
* Copyright (c) 2007-2008 Pavel Roskin <[email protected]>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
*/
/******************************\
Hardware Descriptor Functions
\******************************/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include "ath5k.h"
#include "reg.h"
#include "debug.h"
/**
* DOC: Hardware descriptor functions
*
* Here we handle the processing of the low-level hw descriptors
* that hw reads and writes via DMA for each TX and RX attempt (that means
* we can also have descriptors for failed TX/RX tries). We have two kind of
* descriptors for RX and TX, control descriptors tell the hw how to send or
* receive a packet where to read/write it from/to etc and status descriptors
* that contain information about how the packet was sent or received (errors
* included).
*
* Descriptor format is not exactly the same for each MAC chip version so we
* have function pointers on &struct ath5k_hw we initialize at runtime based on
* the chip used.
*/
/************************\
* TX Control descriptors *
\************************/
/**
* ath5k_hw_setup_2word_tx_desc() - Initialize a 2-word tx control descriptor
* @ah: The &struct ath5k_hw
* @desc: The &struct ath5k_desc
* @pkt_len: Frame length in bytes
* @hdr_len: Header length in bytes (only used on AR5210)
* @padsize: Any padding we've added to the frame length
* @type: One of enum ath5k_pkt_type
* @tx_power: Tx power in 0.5dB steps
* @tx_rate0: HW idx for transmission rate
* @tx_tries0: Max number of retransmissions
* @key_index: Index on key table to use for encryption
* @antenna_mode: Which antenna to use (0 for auto)
* @flags: One of AR5K_TXDESC_* flags (desc.h)
* @rtscts_rate: HW idx for RTS/CTS transmission rate
* @rtscts_duration: What to put on duration field on the header of RTS/CTS
*
* Internal function to initialize a 2-Word TX control descriptor
* found on AR5210 and AR5211 MACs chips.
*
* Returns 0 on success or -EINVAL on false input
*/
static int
ath5k_hw_setup_2word_tx_desc(struct ath5k_hw *ah,
struct ath5k_desc *desc,
unsigned int pkt_len, unsigned int hdr_len,
int padsize,
enum ath5k_pkt_type type,
unsigned int tx_power,
unsigned int tx_rate0, unsigned int tx_tries0,
unsigned int key_index,
unsigned int antenna_mode,
unsigned int flags,
unsigned int rtscts_rate, unsigned int rtscts_duration)
{
u32 frame_type;
struct ath5k_hw_2w_tx_ctl *tx_ctl;
unsigned int frame_len;
tx_ctl = &desc->ud.ds_tx5210.tx_ctl;
/*
* Validate input
* - Zero retries don't make sense.
* - A zero rate will put the HW into a mode where it continuously sends
* noise on the channel, so it is important to avoid this.
*/
if (unlikely(tx_tries0 == 0)) {
ATH5K_ERR(ah, "zero retries\n");
WARN_ON(1);
return -EINVAL;
}
if (unlikely(tx_rate0 == 0)) {
ATH5K_ERR(ah, "zero rate\n");
WARN_ON(1);
return -EINVAL;
}
/* Clear descriptor */
memset(&desc->ud.ds_tx5210, 0, sizeof(struct ath5k_hw_5210_tx_desc));
/* Setup control descriptor */
/* Verify and set frame length */
/* remove padding we might have added before */
frame_len = pkt_len - padsize + FCS_LEN;
if (frame_len & ~AR5K_2W_TX_DESC_CTL0_FRAME_LEN)
return -EINVAL;
tx_ctl->tx_control_0 = frame_len & AR5K_2W_TX_DESC_CTL0_FRAME_LEN;
/* Verify and set buffer length */
/* NB: beacon's BufLen must be a multiple of 4 bytes */
if (type == AR5K_PKT_TYPE_BEACON)
pkt_len = roundup(pkt_len, 4);
if (pkt_len & ~AR5K_2W_TX_DESC_CTL1_BUF_LEN)
return -EINVAL;
tx_ctl->tx_control_1 = pkt_len & AR5K_2W_TX_DESC_CTL1_BUF_LEN;
/*
* Verify and set header length (only 5210)
*/
if (ah->ah_version == AR5K_AR5210) {
if (hdr_len & ~AR5K_2W_TX_DESC_CTL0_HEADER_LEN_5210)
return -EINVAL;
tx_ctl->tx_control_0 |=
AR5K_REG_SM(hdr_len, AR5K_2W_TX_DESC_CTL0_HEADER_LEN_5210);
}
/*Differences between 5210-5211*/
if (ah->ah_version == AR5K_AR5210) {
switch (type) {
case AR5K_PKT_TYPE_BEACON:
case AR5K_PKT_TYPE_PROBE_RESP:
frame_type = AR5K_AR5210_TX_DESC_FRAME_TYPE_NO_DELAY;
break;
case AR5K_PKT_TYPE_PIFS:
frame_type = AR5K_AR5210_TX_DESC_FRAME_TYPE_PIFS;
break;
default:
frame_type = type;
break;
}
tx_ctl->tx_control_0 |=
AR5K_REG_SM(frame_type, AR5K_2W_TX_DESC_CTL0_FRAME_TYPE_5210) |
AR5K_REG_SM(tx_rate0, AR5K_2W_TX_DESC_CTL0_XMIT_RATE);
} else {
tx_ctl->tx_control_0 |=
AR5K_REG_SM(tx_rate0, AR5K_2W_TX_DESC_CTL0_XMIT_RATE) |
AR5K_REG_SM(antenna_mode,
AR5K_2W_TX_DESC_CTL0_ANT_MODE_XMIT);
tx_ctl->tx_control_1 |=
AR5K_REG_SM(type, AR5K_2W_TX_DESC_CTL1_FRAME_TYPE_5211);
}
#define _TX_FLAGS(_c, _flag) \
if (flags & AR5K_TXDESC_##_flag) { \
tx_ctl->tx_control_##_c |= \
AR5K_2W_TX_DESC_CTL##_c##_##_flag; \
}
#define _TX_FLAGS_5211(_c, _flag) \
if (flags & AR5K_TXDESC_##_flag) { \
tx_ctl->tx_control_##_c |= \
AR5K_2W_TX_DESC_CTL##_c##_##_flag##_5211; \
}
_TX_FLAGS(0, CLRDMASK);
_TX_FLAGS(0, INTREQ);
_TX_FLAGS(0, RTSENA);
if (ah->ah_version == AR5K_AR5211) {
_TX_FLAGS_5211(0, VEOL);
_TX_FLAGS_5211(1, NOACK);
}
#undef _TX_FLAGS
#undef _TX_FLAGS_5211
/*
* WEP crap
*/
if (key_index != AR5K_TXKEYIX_INVALID) {
tx_ctl->tx_control_0 |=
AR5K_2W_TX_DESC_CTL0_ENCRYPT_KEY_VALID;
tx_ctl->tx_control_1 |=
AR5K_REG_SM(key_index,
AR5K_2W_TX_DESC_CTL1_ENC_KEY_IDX);
}
/*
* RTS/CTS Duration [5210 ?]
*/
if ((ah->ah_version == AR5K_AR5210) &&
(flags & (AR5K_TXDESC_RTSENA | AR5K_TXDESC_CTSENA)))
tx_ctl->tx_control_1 |= rtscts_duration &
AR5K_2W_TX_DESC_CTL1_RTS_DURATION_5210;
return 0;
}
/**
* ath5k_hw_setup_4word_tx_desc() - Initialize a 4-word tx control descriptor
* @ah: The &struct ath5k_hw
* @desc: The &struct ath5k_desc
* @pkt_len: Frame length in bytes
* @hdr_len: Header length in bytes (only used on AR5210)
* @padsize: Any padding we've added to the frame length
* @type: One of enum ath5k_pkt_type
* @tx_power: Tx power in 0.5dB steps
* @tx_rate0: HW idx for transmission rate
* @tx_tries0: Max number of retransmissions
* @key_index: Index on key table to use for encryption
* @antenna_mode: Which antenna to use (0 for auto)
* @flags: One of AR5K_TXDESC_* flags (desc.h)
* @rtscts_rate: HW idx for RTS/CTS transmission rate
* @rtscts_duration: What to put on duration field on the header of RTS/CTS
*
* Internal function to initialize a 4-Word TX control descriptor
* found on AR5212 and later MACs chips.
*
* Returns 0 on success or -EINVAL on false input
*/
static int
ath5k_hw_setup_4word_tx_desc(struct ath5k_hw *ah,
struct ath5k_desc *desc,
unsigned int pkt_len, unsigned int hdr_len,
int padsize,
enum ath5k_pkt_type type,
unsigned int tx_power,
unsigned int tx_rate0, unsigned int tx_tries0,
unsigned int key_index,
unsigned int antenna_mode,
unsigned int flags,
unsigned int rtscts_rate, unsigned int rtscts_duration)
{
struct ath5k_hw_4w_tx_ctl *tx_ctl;
unsigned int frame_len;
/*
* Use local variables for these to reduce load/store access on
* uncached memory
*/
u32 txctl0 = 0, txctl1 = 0, txctl2 = 0, txctl3 = 0;
tx_ctl = &desc->ud.ds_tx5212.tx_ctl;
/*
* Validate input
* - Zero retries don't make sense.
* - A zero rate will put the HW into a mode where it continuously sends
* noise on the channel, so it is important to avoid this.
*/
if (unlikely(tx_tries0 == 0)) {
ATH5K_ERR(ah, "zero retries\n");
WARN_ON(1);
return -EINVAL;
}
if (unlikely(tx_rate0 == 0)) {
ATH5K_ERR(ah, "zero rate\n");
WARN_ON(1);
return -EINVAL;
}
tx_power += ah->ah_txpower.txp_offset;
if (tx_power > AR5K_TUNE_MAX_TXPOWER)
tx_power = AR5K_TUNE_MAX_TXPOWER;
/* Clear descriptor status area */
memset(&desc->ud.ds_tx5212.tx_stat, 0,
sizeof(desc->ud.ds_tx5212.tx_stat));
/* Setup control descriptor */
/* Verify and set frame length */
/* remove padding we might have added before */
frame_len = pkt_len - padsize + FCS_LEN;
if (frame_len & ~AR5K_4W_TX_DESC_CTL0_FRAME_LEN)
return -EINVAL;
txctl0 = frame_len & AR5K_4W_TX_DESC_CTL0_FRAME_LEN;
/* Verify and set buffer length */
/* NB: beacon's BufLen must be a multiple of 4 bytes */
if (type == AR5K_PKT_TYPE_BEACON)
pkt_len = roundup(pkt_len, 4);
if (pkt_len & ~AR5K_4W_TX_DESC_CTL1_BUF_LEN)
return -EINVAL;
txctl1 = pkt_len & AR5K_4W_TX_DESC_CTL1_BUF_LEN;
txctl0 |= AR5K_REG_SM(tx_power, AR5K_4W_TX_DESC_CTL0_XMIT_POWER) |
AR5K_REG_SM(antenna_mode, AR5K_4W_TX_DESC_CTL0_ANT_MODE_XMIT);
txctl1 |= AR5K_REG_SM(type, AR5K_4W_TX_DESC_CTL1_FRAME_TYPE);
txctl2 = AR5K_REG_SM(tx_tries0, AR5K_4W_TX_DESC_CTL2_XMIT_TRIES0);
txctl3 = tx_rate0 & AR5K_4W_TX_DESC_CTL3_XMIT_RATE0;
#define _TX_FLAGS(_c, _flag) \
if (flags & AR5K_TXDESC_##_flag) { \
txctl##_c |= AR5K_4W_TX_DESC_CTL##_c##_##_flag; \
}
_TX_FLAGS(0, CLRDMASK);
_TX_FLAGS(0, VEOL);
_TX_FLAGS(0, INTREQ);
_TX_FLAGS(0, RTSENA);
_TX_FLAGS(0, CTSENA);
_TX_FLAGS(1, NOACK);
#undef _TX_FLAGS
/*
* WEP crap
*/
if (key_index != AR5K_TXKEYIX_INVALID) {
txctl0 |= AR5K_4W_TX_DESC_CTL0_ENCRYPT_KEY_VALID;
txctl1 |= AR5K_REG_SM(key_index,
AR5K_4W_TX_DESC_CTL1_ENCRYPT_KEY_IDX);
}
/*
* RTS/CTS
*/
if (flags & (AR5K_TXDESC_RTSENA | AR5K_TXDESC_CTSENA)) {
if ((flags & AR5K_TXDESC_RTSENA) &&
(flags & AR5K_TXDESC_CTSENA))
return -EINVAL;
txctl2 |= rtscts_duration & AR5K_4W_TX_DESC_CTL2_RTS_DURATION;
txctl3 |= AR5K_REG_SM(rtscts_rate,
AR5K_4W_TX_DESC_CTL3_RTS_CTS_RATE);
}
tx_ctl->tx_control_0 = txctl0;
tx_ctl->tx_control_1 = txctl1;
tx_ctl->tx_control_2 = txctl2;
tx_ctl->tx_control_3 = txctl3;
return 0;
}
/**
* ath5k_hw_setup_mrr_tx_desc() - Initialize an MRR tx control descriptor
* @ah: The &struct ath5k_hw
* @desc: The &struct ath5k_desc
* @tx_rate1: HW idx for rate used on transmission series 1
* @tx_tries1: Max number of retransmissions for transmission series 1
* @tx_rate2: HW idx for rate used on transmission series 2
* @tx_tries2: Max number of retransmissions for transmission series 2
* @tx_rate3: HW idx for rate used on transmission series 3
* @tx_tries3: Max number of retransmissions for transmission series 3
*
* Multi rate retry (MRR) tx control descriptors are available only on AR5212
* MACs, they are part of the normal 4-word tx control descriptor (see above)
* but we handle them through a separate function for better abstraction.
*
* Returns 0 on success or -EINVAL on invalid input
*/
int
ath5k_hw_setup_mrr_tx_desc(struct ath5k_hw *ah,
struct ath5k_desc *desc,
u_int tx_rate1, u_int tx_tries1,
u_int tx_rate2, u_int tx_tries2,
u_int tx_rate3, u_int tx_tries3)
{
struct ath5k_hw_4w_tx_ctl *tx_ctl;
/* no mrr support for cards older than 5212 */
if (ah->ah_version < AR5K_AR5212)
return 0;
/*
* Rates can be 0 as long as the retry count is 0 too.
* A zero rate and nonzero retry count will put the HW into a mode where
* it continuously sends noise on the channel, so it is important to
* avoid this.
*/
if (unlikely((tx_rate1 == 0 && tx_tries1 != 0) ||
(tx_rate2 == 0 && tx_tries2 != 0) ||
(tx_rate3 == 0 && tx_tries3 != 0))) {
ATH5K_ERR(ah, "zero rate\n");
WARN_ON(1);
return -EINVAL;
}
if (ah->ah_version == AR5K_AR5212) {
tx_ctl = &desc->ud.ds_tx5212.tx_ctl;
#define _XTX_TRIES(_n) \
if (tx_tries##_n) { \
tx_ctl->tx_control_2 |= \
AR5K_REG_SM(tx_tries##_n, \
AR5K_4W_TX_DESC_CTL2_XMIT_TRIES##_n); \
tx_ctl->tx_control_3 |= \
AR5K_REG_SM(tx_rate##_n, \
AR5K_4W_TX_DESC_CTL3_XMIT_RATE##_n); \
}
_XTX_TRIES(1);
_XTX_TRIES(2);
_XTX_TRIES(3);
#undef _XTX_TRIES
return 1;
}
return 0;
}
/***********************\
* TX Status descriptors *
\***********************/
/**
* ath5k_hw_proc_2word_tx_status() - Process a tx status descriptor on 5210/1
* @ah: The &struct ath5k_hw
* @desc: The &struct ath5k_desc
* @ts: The &struct ath5k_tx_status
*/
static int
ath5k_hw_proc_2word_tx_status(struct ath5k_hw *ah,
struct ath5k_desc *desc,
struct ath5k_tx_status *ts)
{
struct ath5k_hw_tx_status *tx_status;
tx_status = &desc->ud.ds_tx5210.tx_stat;
/* No frame has been send or error */
if (unlikely((tx_status->tx_status_1 & AR5K_DESC_TX_STATUS1_DONE) == 0))
return -EINPROGRESS;
/*
* Get descriptor status
*/
ts->ts_tstamp = AR5K_REG_MS(tx_status->tx_status_0,
AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP);
ts->ts_shortretry = AR5K_REG_MS(tx_status->tx_status_0,
AR5K_DESC_TX_STATUS0_SHORT_RETRY_COUNT);
ts->ts_final_retry = AR5K_REG_MS(tx_status->tx_status_0,
AR5K_DESC_TX_STATUS0_LONG_RETRY_COUNT);
/*TODO: ts->ts_virtcol + test*/
ts->ts_seqnum = AR5K_REG_MS(tx_status->tx_status_1,
AR5K_DESC_TX_STATUS1_SEQ_NUM);
ts->ts_rssi = AR5K_REG_MS(tx_status->tx_status_1,
AR5K_DESC_TX_STATUS1_ACK_SIG_STRENGTH);
ts->ts_antenna = 1;
ts->ts_status = 0;
ts->ts_final_idx = 0;
if (!(tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FRAME_XMIT_OK)) {
if (tx_status->tx_status_0 &
AR5K_DESC_TX_STATUS0_EXCESSIVE_RETRIES)
ts->ts_status |= AR5K_TXERR_XRETRY;
if (tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FIFO_UNDERRUN)
ts->ts_status |= AR5K_TXERR_FIFO;
if (tx_status->tx_status_0 & AR5K_DESC_TX_STATUS0_FILTERED)
ts->ts_status |= AR5K_TXERR_FILT;
}
return 0;
}
/**
* ath5k_hw_proc_4word_tx_status() - Process a tx status descriptor on 5212
* @ah: The &struct ath5k_hw
* @desc: The &struct ath5k_desc
* @ts: The &struct ath5k_tx_status
*/
static int
ath5k_hw_proc_4word_tx_status(struct ath5k_hw *ah,
struct ath5k_desc *desc,
struct ath5k_tx_status *ts)
{
struct ath5k_hw_tx_status *tx_status;
u32 txstat0, txstat1;
tx_status = &desc->ud.ds_tx5212.tx_stat;
txstat1 = READ_ONCE(tx_status->tx_status_1);
/* No frame has been send or error */
if (unlikely(!(txstat1 & AR5K_DESC_TX_STATUS1_DONE)))
return -EINPROGRESS;
txstat0 = READ_ONCE(tx_status->tx_status_0);
/*
* Get descriptor status
*/
ts->ts_tstamp = AR5K_REG_MS(txstat0,
AR5K_DESC_TX_STATUS0_SEND_TIMESTAMP);
ts->ts_shortretry = AR5K_REG_MS(txstat0,
AR5K_DESC_TX_STATUS0_SHORT_RETRY_COUNT);
ts->ts_final_retry = AR5K_REG_MS(txstat0,
AR5K_DESC_TX_STATUS0_LONG_RETRY_COUNT);
ts->ts_seqnum = AR5K_REG_MS(txstat1,
AR5K_DESC_TX_STATUS1_SEQ_NUM);
ts->ts_rssi = AR5K_REG_MS(txstat1,
AR5K_DESC_TX_STATUS1_ACK_SIG_STRENGTH);
ts->ts_antenna = (txstat1 &
AR5K_DESC_TX_STATUS1_XMIT_ANTENNA_5212) ? 2 : 1;
ts->ts_status = 0;
ts->ts_final_idx = AR5K_REG_MS(txstat1,
AR5K_DESC_TX_STATUS1_FINAL_TS_IX_5212);
/* TX error */
if (!(txstat0 & AR5K_DESC_TX_STATUS0_FRAME_XMIT_OK)) {
if (txstat0 & AR5K_DESC_TX_STATUS0_EXCESSIVE_RETRIES)
ts->ts_status |= AR5K_TXERR_XRETRY;
if (txstat0 & AR5K_DESC_TX_STATUS0_FIFO_UNDERRUN)
ts->ts_status |= AR5K_TXERR_FIFO;
if (txstat0 & AR5K_DESC_TX_STATUS0_FILTERED)
ts->ts_status |= AR5K_TXERR_FILT;
}
return 0;
}
/****************\
* RX Descriptors *
\****************/
/**
* ath5k_hw_setup_rx_desc() - Initialize an rx control descriptor
* @ah: The &struct ath5k_hw
* @desc: The &struct ath5k_desc
* @size: RX buffer length in bytes
* @flags: One of AR5K_RXDESC_* flags
*/
int
ath5k_hw_setup_rx_desc(struct ath5k_hw *ah,
struct ath5k_desc *desc,
u32 size, unsigned int flags)
{
struct ath5k_hw_rx_ctl *rx_ctl;
rx_ctl = &desc->ud.ds_rx.rx_ctl;
/*
* Clear the descriptor
* If we don't clean the status descriptor,
* while scanning we get too many results,
* most of them virtual, after some secs
* of scanning system hangs. M.F.
*/
memset(&desc->ud.ds_rx, 0, sizeof(struct ath5k_hw_all_rx_desc));
if (unlikely(size & ~AR5K_DESC_RX_CTL1_BUF_LEN))
return -EINVAL;
/* Setup descriptor */
rx_ctl->rx_control_1 = size & AR5K_DESC_RX_CTL1_BUF_LEN;
if (flags & AR5K_RXDESC_INTREQ)
rx_ctl->rx_control_1 |= AR5K_DESC_RX_CTL1_INTREQ;
return 0;
}
/**
* ath5k_hw_proc_5210_rx_status() - Process the rx status descriptor on 5210/1
* @ah: The &struct ath5k_hw
* @desc: The &struct ath5k_desc
* @rs: The &struct ath5k_rx_status
*
* Internal function used to process an RX status descriptor
* on AR5210/5211 MAC.
*
* Returns 0 on success or -EINPROGRESS in case we haven't received the who;e
* frame yet.
*/
static int
ath5k_hw_proc_5210_rx_status(struct ath5k_hw *ah,
struct ath5k_desc *desc,
struct ath5k_rx_status *rs)
{
struct ath5k_hw_rx_status *rx_status;
rx_status = &desc->ud.ds_rx.rx_stat;
/* No frame received / not ready */
if (unlikely(!(rx_status->rx_status_1 &
AR5K_5210_RX_DESC_STATUS1_DONE)))
return -EINPROGRESS;
memset(rs, 0, sizeof(struct ath5k_rx_status));
/*
* Frame receive status
*/
rs->rs_datalen = rx_status->rx_status_0 &
AR5K_5210_RX_DESC_STATUS0_DATA_LEN;
rs->rs_rssi = AR5K_REG_MS(rx_status->rx_status_0,
AR5K_5210_RX_DESC_STATUS0_RECEIVE_SIGNAL);
rs->rs_rate = AR5K_REG_MS(rx_status->rx_status_0,
AR5K_5210_RX_DESC_STATUS0_RECEIVE_RATE);
rs->rs_more = !!(rx_status->rx_status_0 &
AR5K_5210_RX_DESC_STATUS0_MORE);
/* TODO: this timestamp is 13 bit, later on we assume 15 bit!
* also the HAL code for 5210 says the timestamp is bits [10..22] of the
* TSF, and extends the timestamp here to 15 bit.
* we need to check on 5210...
*/
rs->rs_tstamp = AR5K_REG_MS(rx_status->rx_status_1,
AR5K_5210_RX_DESC_STATUS1_RECEIVE_TIMESTAMP);
if (ah->ah_version == AR5K_AR5211)
rs->rs_antenna = AR5K_REG_MS(rx_status->rx_status_0,
AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANT_5211);
else
rs->rs_antenna = (rx_status->rx_status_0 &
AR5K_5210_RX_DESC_STATUS0_RECEIVE_ANT_5210)
? 2 : 1;
/*
* Key table status
*/
if (rx_status->rx_status_1 & AR5K_5210_RX_DESC_STATUS1_KEY_INDEX_VALID)
rs->rs_keyix = AR5K_REG_MS(rx_status->rx_status_1,
AR5K_5210_RX_DESC_STATUS1_KEY_INDEX);
else
rs->rs_keyix = AR5K_RXKEYIX_INVALID;
/*
* Receive/descriptor errors
*/
if (!(rx_status->rx_status_1 &
AR5K_5210_RX_DESC_STATUS1_FRAME_RECEIVE_OK)) {
if (rx_status->rx_status_1 &
AR5K_5210_RX_DESC_STATUS1_CRC_ERROR)
rs->rs_status |= AR5K_RXERR_CRC;
/* only on 5210 */
if ((ah->ah_version == AR5K_AR5210) &&
(rx_status->rx_status_1 &
AR5K_5210_RX_DESC_STATUS1_FIFO_OVERRUN_5210))
rs->rs_status |= AR5K_RXERR_FIFO;
if (rx_status->rx_status_1 &
AR5K_5210_RX_DESC_STATUS1_PHY_ERROR) {
rs->rs_status |= AR5K_RXERR_PHY;
rs->rs_phyerr = AR5K_REG_MS(rx_status->rx_status_1,
AR5K_5210_RX_DESC_STATUS1_PHY_ERROR);
}
if (rx_status->rx_status_1 &
AR5K_5210_RX_DESC_STATUS1_DECRYPT_CRC_ERROR)
rs->rs_status |= AR5K_RXERR_DECRYPT;
}
return 0;
}
/**
* ath5k_hw_proc_5212_rx_status() - Process the rx status descriptor on 5212
* @ah: The &struct ath5k_hw
* @desc: The &struct ath5k_desc
* @rs: The &struct ath5k_rx_status
*
* Internal function used to process an RX status descriptor
* on AR5212 and later MAC.
*
* Returns 0 on success or -EINPROGRESS in case we haven't received the who;e
* frame yet.
*/
static int
ath5k_hw_proc_5212_rx_status(struct ath5k_hw *ah,
struct ath5k_desc *desc,
struct ath5k_rx_status *rs)
{
struct ath5k_hw_rx_status *rx_status;
u32 rxstat0, rxstat1;
rx_status = &desc->ud.ds_rx.rx_stat;
rxstat1 = READ_ONCE(rx_status->rx_status_1);
/* No frame received / not ready */
if (unlikely(!(rxstat1 & AR5K_5212_RX_DESC_STATUS1_DONE)))
return -EINPROGRESS;
memset(rs, 0, sizeof(struct ath5k_rx_status));
rxstat0 = READ_ONCE(rx_status->rx_status_0);
/*
* Frame receive status
*/
rs->rs_datalen = rxstat0 & AR5K_5212_RX_DESC_STATUS0_DATA_LEN;
rs->rs_rssi = AR5K_REG_MS(rxstat0,
AR5K_5212_RX_DESC_STATUS0_RECEIVE_SIGNAL);
rs->rs_rate = AR5K_REG_MS(rxstat0,
AR5K_5212_RX_DESC_STATUS0_RECEIVE_RATE);
rs->rs_antenna = AR5K_REG_MS(rxstat0,
AR5K_5212_RX_DESC_STATUS0_RECEIVE_ANTENNA);
rs->rs_more = !!(rxstat0 & AR5K_5212_RX_DESC_STATUS0_MORE);
rs->rs_tstamp = AR5K_REG_MS(rxstat1,
AR5K_5212_RX_DESC_STATUS1_RECEIVE_TIMESTAMP);
/*
* Key table status
*/
if (rxstat1 & AR5K_5212_RX_DESC_STATUS1_KEY_INDEX_VALID)
rs->rs_keyix = AR5K_REG_MS(rxstat1,
AR5K_5212_RX_DESC_STATUS1_KEY_INDEX);
else
rs->rs_keyix = AR5K_RXKEYIX_INVALID;
/*
* Receive/descriptor errors
*/
if (!(rxstat1 & AR5K_5212_RX_DESC_STATUS1_FRAME_RECEIVE_OK)) {
if (rxstat1 & AR5K_5212_RX_DESC_STATUS1_CRC_ERROR)
rs->rs_status |= AR5K_RXERR_CRC;
if (rxstat1 & AR5K_5212_RX_DESC_STATUS1_PHY_ERROR) {
rs->rs_status |= AR5K_RXERR_PHY;
rs->rs_phyerr = AR5K_REG_MS(rxstat1,
AR5K_5212_RX_DESC_STATUS1_PHY_ERROR_CODE);
if (!ah->ah_capabilities.cap_has_phyerr_counters)
ath5k_ani_phy_error_report(ah, rs->rs_phyerr);
}
if (rxstat1 & AR5K_5212_RX_DESC_STATUS1_DECRYPT_CRC_ERROR)
rs->rs_status |= AR5K_RXERR_DECRYPT;
if (rxstat1 & AR5K_5212_RX_DESC_STATUS1_MIC_ERROR)
rs->rs_status |= AR5K_RXERR_MIC;
}
return 0;
}
/********\
* Attach *
\********/
/**
* ath5k_hw_init_desc_functions() - Init function pointers inside ah
* @ah: The &struct ath5k_hw
*
* Maps the internal descriptor functions to the function pointers on ah, used
* from above. This is used as an abstraction layer to handle the various chips
* the same way.
*/
int
ath5k_hw_init_desc_functions(struct ath5k_hw *ah)
{
if (ah->ah_version == AR5K_AR5212) {
ah->ah_setup_tx_desc = ath5k_hw_setup_4word_tx_desc;
ah->ah_proc_tx_desc = ath5k_hw_proc_4word_tx_status;
ah->ah_proc_rx_desc = ath5k_hw_proc_5212_rx_status;
} else if (ah->ah_version <= AR5K_AR5211) {
ah->ah_setup_tx_desc = ath5k_hw_setup_2word_tx_desc;
ah->ah_proc_tx_desc = ath5k_hw_proc_2word_tx_status;
ah->ah_proc_rx_desc = ath5k_hw_proc_5210_rx_status;
} else
return -ENOTSUPP;
return 0;
}
|
linux-master
|
drivers/net/wireless/ath/ath5k/desc.c
|
/*-
* Copyright (c) 2002-2005 Sam Leffler, Errno Consulting
* Copyright (c) 2004-2005 Atheros Communications, Inc.
* Copyright (c) 2006 Devicescape Software, Inc.
* Copyright (c) 2007 Jiri Slaby <[email protected]>
* Copyright (c) 2007 Luis R. Rodriguez <[email protected]>
* Copyright (c) 2010 Bruno Randolf <[email protected]>
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer,
* without modification.
* 2. Redistributions in binary form must reproduce at minimum a disclaimer
* similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
* redistribution must be conditioned upon including a substantially
* similar Disclaimer requirement for further binary redistribution.
* 3. Neither the names of the above-listed copyright holders nor the names
* of any contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Alternatively, this software may be distributed under the terms of the
* GNU General Public License ("GPL") version 2 as published by the Free
* Software Foundation.
*
* NO WARRANTY
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
* AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
* THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
* OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
* IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
* THE POSSIBILITY OF SUCH DAMAGES.
*
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <net/mac80211.h>
#include <asm/unaligned.h>
#include "ath5k.h"
#include "base.h"
#include "reg.h"
/********************\
* Mac80211 functions *
\********************/
static void
ath5k_tx(struct ieee80211_hw *hw, struct ieee80211_tx_control *control,
struct sk_buff *skb)
{
struct ath5k_hw *ah = hw->priv;
u16 qnum = skb_get_queue_mapping(skb);
if (WARN_ON(qnum >= ah->ah_capabilities.cap_queues.q_tx_num)) {
ieee80211_free_txskb(hw, skb);
return;
}
ath5k_tx_queue(hw, skb, &ah->txqs[qnum], control);
}
static int
ath5k_add_interface(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
struct ath5k_hw *ah = hw->priv;
int ret;
struct ath5k_vif *avf = (void *)vif->drv_priv;
mutex_lock(&ah->lock);
if ((vif->type == NL80211_IFTYPE_AP ||
vif->type == NL80211_IFTYPE_ADHOC)
&& (ah->num_ap_vifs + ah->num_adhoc_vifs) >= ATH_BCBUF) {
ret = -ELNRNG;
goto end;
}
/* Don't allow other interfaces if one ad-hoc is configured.
* TODO: Fix the problems with ad-hoc and multiple other interfaces.
* We would need to operate the HW in ad-hoc mode to allow TSF updates
* for the IBSS, but this breaks with additional AP or STA interfaces
* at the moment. */
if (ah->num_adhoc_vifs ||
(ah->nvifs && vif->type == NL80211_IFTYPE_ADHOC)) {
ATH5K_ERR(ah, "Only one single ad-hoc interface is allowed.\n");
ret = -ELNRNG;
goto end;
}
switch (vif->type) {
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_MESH_POINT:
avf->opmode = vif->type;
break;
default:
ret = -EOPNOTSUPP;
goto end;
}
ah->nvifs++;
ATH5K_DBG(ah, ATH5K_DEBUG_MODE, "add interface mode %d\n", avf->opmode);
/* Assign the vap/adhoc to a beacon xmit slot. */
if ((avf->opmode == NL80211_IFTYPE_AP) ||
(avf->opmode == NL80211_IFTYPE_ADHOC) ||
(avf->opmode == NL80211_IFTYPE_MESH_POINT)) {
int slot;
WARN_ON(list_empty(&ah->bcbuf));
avf->bbuf = list_first_entry(&ah->bcbuf, struct ath5k_buf,
list);
list_del(&avf->bbuf->list);
avf->bslot = 0;
for (slot = 0; slot < ATH_BCBUF; slot++) {
if (!ah->bslot[slot]) {
avf->bslot = slot;
break;
}
}
BUG_ON(ah->bslot[avf->bslot] != NULL);
ah->bslot[avf->bslot] = vif;
if (avf->opmode == NL80211_IFTYPE_AP)
ah->num_ap_vifs++;
else if (avf->opmode == NL80211_IFTYPE_ADHOC)
ah->num_adhoc_vifs++;
else if (avf->opmode == NL80211_IFTYPE_MESH_POINT)
ah->num_mesh_vifs++;
}
/* Any MAC address is fine, all others are included through the
* filter.
*/
ath5k_hw_set_lladdr(ah, vif->addr);
ath5k_update_bssid_mask_and_opmode(ah, vif);
ret = 0;
end:
mutex_unlock(&ah->lock);
return ret;
}
static void
ath5k_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct ath5k_hw *ah = hw->priv;
struct ath5k_vif *avf = (void *)vif->drv_priv;
unsigned int i;
mutex_lock(&ah->lock);
ah->nvifs--;
if (avf->bbuf) {
ath5k_txbuf_free_skb(ah, avf->bbuf);
list_add_tail(&avf->bbuf->list, &ah->bcbuf);
for (i = 0; i < ATH_BCBUF; i++) {
if (ah->bslot[i] == vif) {
ah->bslot[i] = NULL;
break;
}
}
avf->bbuf = NULL;
}
if (avf->opmode == NL80211_IFTYPE_AP)
ah->num_ap_vifs--;
else if (avf->opmode == NL80211_IFTYPE_ADHOC)
ah->num_adhoc_vifs--;
else if (avf->opmode == NL80211_IFTYPE_MESH_POINT)
ah->num_mesh_vifs--;
ath5k_update_bssid_mask_and_opmode(ah, NULL);
mutex_unlock(&ah->lock);
}
/*
* TODO: Phy disable/diversity etc
*/
static int
ath5k_config(struct ieee80211_hw *hw, u32 changed)
{
struct ath5k_hw *ah = hw->priv;
struct ieee80211_conf *conf = &hw->conf;
int ret = 0;
int i;
mutex_lock(&ah->lock);
if (changed & IEEE80211_CONF_CHANGE_CHANNEL) {
ret = ath5k_chan_set(ah, &conf->chandef);
if (ret < 0)
goto unlock;
}
if ((changed & IEEE80211_CONF_CHANGE_POWER) &&
(ah->ah_txpower.txp_requested != conf->power_level)) {
ah->ah_txpower.txp_requested = conf->power_level;
/* Half dB steps */
ath5k_hw_set_txpower_limit(ah, (conf->power_level * 2));
}
if (changed & IEEE80211_CONF_CHANGE_RETRY_LIMITS) {
ah->ah_retry_long = conf->long_frame_max_tx_count;
ah->ah_retry_short = conf->short_frame_max_tx_count;
for (i = 0; i < ah->ah_capabilities.cap_queues.q_tx_num; i++)
ath5k_hw_set_tx_retry_limits(ah, i);
}
/* TODO:
* 1) Move this on config_interface and handle each case
* separately eg. when we have only one STA vif, use
* AR5K_ANTMODE_SINGLE_AP
*
* 2) Allow the user to change antenna mode eg. when only
* one antenna is present
*
* 3) Allow the user to set default/tx antenna when possible
*
* 4) Default mode should handle 90% of the cases, together
* with fixed a/b and single AP modes we should be able to
* handle 99%. Sectored modes are extreme cases and i still
* haven't found a usage for them. If we decide to support them,
* then we must allow the user to set how many tx antennas we
* have available
*/
ath5k_hw_set_antenna_mode(ah, ah->ah_ant_mode);
unlock:
mutex_unlock(&ah->lock);
return ret;
}
static void
ath5k_bss_info_changed(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf, u64 changes)
{
struct ath5k_vif *avf = (void *)vif->drv_priv;
struct ath5k_hw *ah = hw->priv;
struct ath_common *common = ath5k_hw_common(ah);
mutex_lock(&ah->lock);
if (changes & BSS_CHANGED_BSSID) {
/* Cache for later use during resets */
memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
common->curaid = 0;
ath5k_hw_set_bssid(ah);
}
if (changes & BSS_CHANGED_BEACON_INT)
ah->bintval = bss_conf->beacon_int;
if (changes & BSS_CHANGED_ERP_SLOT) {
int slot_time;
ah->ah_short_slot = bss_conf->use_short_slot;
slot_time = ath5k_hw_get_default_slottime(ah) +
3 * ah->ah_coverage_class;
ath5k_hw_set_ifs_intervals(ah, slot_time);
}
if (changes & BSS_CHANGED_ASSOC) {
avf->assoc = vif->cfg.assoc;
if (vif->cfg.assoc)
ah->assoc = vif->cfg.assoc;
else
ah->assoc = ath5k_any_vif_assoc(ah);
if (ah->opmode == NL80211_IFTYPE_STATION)
ath5k_set_beacon_filter(hw, ah->assoc);
ath5k_hw_set_ledstate(ah, ah->assoc ?
AR5K_LED_ASSOC : AR5K_LED_INIT);
if (vif->cfg.assoc) {
ATH5K_DBG(ah, ATH5K_DEBUG_ANY,
"Bss Info ASSOC %d, bssid: %pM\n",
vif->cfg.aid, common->curbssid);
common->curaid = vif->cfg.aid;
ath5k_hw_set_bssid(ah);
/* Once ANI is available you would start it here */
}
}
if (changes & BSS_CHANGED_BEACON) {
spin_lock_bh(&ah->block);
ath5k_beacon_update(hw, vif);
spin_unlock_bh(&ah->block);
}
if (changes & BSS_CHANGED_BEACON_ENABLED)
ah->enable_beacon = bss_conf->enable_beacon;
if (changes & (BSS_CHANGED_BEACON | BSS_CHANGED_BEACON_ENABLED |
BSS_CHANGED_BEACON_INT))
ath5k_beacon_config(ah);
mutex_unlock(&ah->lock);
}
static u64
ath5k_prepare_multicast(struct ieee80211_hw *hw,
struct netdev_hw_addr_list *mc_list)
{
u32 mfilt[2], val;
u8 pos;
struct netdev_hw_addr *ha;
mfilt[0] = 0;
mfilt[1] = 0;
netdev_hw_addr_list_for_each(ha, mc_list) {
/* calculate XOR of eight 6-bit values */
val = get_unaligned_le32(ha->addr + 0);
pos = (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
val = get_unaligned_le32(ha->addr + 3);
pos ^= (val >> 18) ^ (val >> 12) ^ (val >> 6) ^ val;
pos &= 0x3f;
mfilt[pos / 32] |= (1 << (pos % 32));
/* XXX: we might be able to just do this instead,
* but not sure, needs testing, if we do use this we'd
* need to inform below not to reset the mcast */
/* ath5k_hw_set_mcast_filterindex(ah,
* ha->addr[5]); */
}
return ((u64)(mfilt[1]) << 32) | mfilt[0];
}
/*
* o always accept unicast, broadcast, and multicast traffic
* o multicast traffic for all BSSIDs will be enabled if mac80211
* says it should be
* o maintain current state of phy ofdm or phy cck error reception.
* If the hardware detects any of these type of errors then
* ath5k_hw_get_rx_filter() will pass to us the respective
* hardware filters to be able to receive these type of frames.
* o probe request frames are accepted only when operating in
* hostap, adhoc, or monitor modes
* o enable promiscuous mode according to the interface state
* o accept beacons:
* - when operating in adhoc mode so the 802.11 layer creates
* node table entries for peers,
* - when operating in station mode for collecting rssi data when
* the station is otherwise quiet, or
* - when scanning
*/
static void
ath5k_configure_filter(struct ieee80211_hw *hw, unsigned int changed_flags,
unsigned int *new_flags, u64 multicast)
{
#define SUPPORTED_FIF_FLAGS \
(FIF_ALLMULTI | FIF_FCSFAIL | \
FIF_PLCPFAIL | FIF_CONTROL | FIF_OTHER_BSS | \
FIF_BCN_PRBRESP_PROMISC)
struct ath5k_hw *ah = hw->priv;
u32 mfilt[2], rfilt;
struct ath5k_vif_iter_data iter_data; /* to count STA interfaces */
mutex_lock(&ah->lock);
mfilt[0] = multicast;
mfilt[1] = multicast >> 32;
/* Only deal with supported flags */
*new_flags &= SUPPORTED_FIF_FLAGS;
/* If HW detects any phy or radar errors, leave those filters on.
* Also, always enable Unicast, Broadcasts and Multicast
* XXX: move unicast, bssid broadcasts and multicast to mac80211 */
rfilt = (ath5k_hw_get_rx_filter(ah) & (AR5K_RX_FILTER_PHYERR)) |
(AR5K_RX_FILTER_UCAST | AR5K_RX_FILTER_BCAST |
AR5K_RX_FILTER_MCAST);
/* Note, AR5K_RX_FILTER_MCAST is already enabled */
if (*new_flags & FIF_ALLMULTI) {
mfilt[0] = ~0;
mfilt[1] = ~0;
}
/* This is the best we can do */
if (*new_flags & (FIF_FCSFAIL | FIF_PLCPFAIL))
rfilt |= AR5K_RX_FILTER_PHYERR;
/* FIF_BCN_PRBRESP_PROMISC really means to enable beacons
* and probes for any BSSID */
if ((*new_flags & FIF_BCN_PRBRESP_PROMISC) || (ah->nvifs > 1))
rfilt |= AR5K_RX_FILTER_BEACON;
/* FIF_CONTROL doc says we should only pass on control frames for this
* station. This needs testing. I believe right now this
* enables *all* control frames, which is OK.. but
* we should see if we can improve on granularity */
if (*new_flags & FIF_CONTROL)
rfilt |= AR5K_RX_FILTER_CONTROL;
/* Additional settings per mode -- this is per ath5k */
/* XXX move these to mac80211, and add a beacon IFF flag to mac80211 */
switch (ah->opmode) {
case NL80211_IFTYPE_MESH_POINT:
rfilt |= AR5K_RX_FILTER_CONTROL |
AR5K_RX_FILTER_BEACON |
AR5K_RX_FILTER_PROBEREQ |
AR5K_RX_FILTER_PROM;
break;
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_ADHOC:
rfilt |= AR5K_RX_FILTER_PROBEREQ |
AR5K_RX_FILTER_BEACON;
break;
case NL80211_IFTYPE_STATION:
if (ah->assoc)
rfilt |= AR5K_RX_FILTER_BEACON;
break;
default:
break;
}
iter_data.hw_macaddr = NULL;
iter_data.n_stas = 0;
iter_data.need_set_hw_addr = false;
ieee80211_iterate_active_interfaces_atomic(
ah->hw, IEEE80211_IFACE_ITER_RESUME_ALL,
ath5k_vif_iter, &iter_data);
/* Set up RX Filter */
if (iter_data.n_stas > 1) {
/* If you have multiple STA interfaces connected to
* different APs, ARPs are not received (most of the time?)
* Enabling PROMISC appears to fix that problem.
*/
rfilt |= AR5K_RX_FILTER_PROM;
}
/* Set filters */
ath5k_hw_set_rx_filter(ah, rfilt);
/* Set multicast bits */
ath5k_hw_set_mcast_filter(ah, mfilt[0], mfilt[1]);
/* Set the cached hw filter flags, this will later actually
* be set in HW */
ah->filter_flags = rfilt;
/* Store current FIF filter flags */
ah->fif_filter_flags = *new_flags;
mutex_unlock(&ah->lock);
}
static int
ath5k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif, struct ieee80211_sta *sta,
struct ieee80211_key_conf *key)
{
struct ath5k_hw *ah = hw->priv;
struct ath_common *common = ath5k_hw_common(ah);
int ret = 0;
if (ath5k_modparam_nohwcrypt)
return -EOPNOTSUPP;
if (key->flags & IEEE80211_KEY_FLAG_RX_MGMT)
return -EOPNOTSUPP;
if (vif->type == NL80211_IFTYPE_ADHOC &&
(key->cipher == WLAN_CIPHER_SUITE_TKIP ||
key->cipher == WLAN_CIPHER_SUITE_CCMP) &&
!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
/* don't program group keys when using IBSS_RSN */
return -EOPNOTSUPP;
}
switch (key->cipher) {
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
case WLAN_CIPHER_SUITE_TKIP:
break;
case WLAN_CIPHER_SUITE_CCMP:
if (common->crypt_caps & ATH_CRYPT_CAP_CIPHER_AESCCM)
break;
return -EOPNOTSUPP;
default:
return -EOPNOTSUPP;
}
mutex_lock(&ah->lock);
switch (cmd) {
case SET_KEY:
ret = ath_key_config(common, vif, sta, key);
if (ret >= 0) {
key->hw_key_idx = ret;
/* push IV and Michael MIC generation to stack */
key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
if (key->cipher == WLAN_CIPHER_SUITE_TKIP)
key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC;
if (key->cipher == WLAN_CIPHER_SUITE_CCMP)
key->flags |= IEEE80211_KEY_FLAG_SW_MGMT_TX;
ret = 0;
}
break;
case DISABLE_KEY:
ath_key_delete(common, key->hw_key_idx);
break;
default:
ret = -EINVAL;
}
mutex_unlock(&ah->lock);
return ret;
}
static void
ath5k_sw_scan_start(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
const u8 *mac_addr)
{
struct ath5k_hw *ah = hw->priv;
if (!ah->assoc)
ath5k_hw_set_ledstate(ah, AR5K_LED_SCAN);
}
static void
ath5k_sw_scan_complete(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
struct ath5k_hw *ah = hw->priv;
ath5k_hw_set_ledstate(ah, ah->assoc ?
AR5K_LED_ASSOC : AR5K_LED_INIT);
}
static int
ath5k_get_stats(struct ieee80211_hw *hw,
struct ieee80211_low_level_stats *stats)
{
struct ath5k_hw *ah = hw->priv;
/* Force update */
ath5k_hw_update_mib_counters(ah);
stats->dot11ACKFailureCount = ah->stats.ack_fail;
stats->dot11RTSFailureCount = ah->stats.rts_fail;
stats->dot11RTSSuccessCount = ah->stats.rts_ok;
stats->dot11FCSErrorCount = ah->stats.fcs_error;
return 0;
}
static int
ath5k_conf_tx(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
unsigned int link_id, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
struct ath5k_hw *ah = hw->priv;
struct ath5k_txq_info qi;
int ret = 0;
if (queue >= ah->ah_capabilities.cap_queues.q_tx_num)
return 0;
mutex_lock(&ah->lock);
ath5k_hw_get_tx_queueprops(ah, queue, &qi);
qi.tqi_aifs = params->aifs;
qi.tqi_cw_min = params->cw_min;
qi.tqi_cw_max = params->cw_max;
qi.tqi_burst_time = params->txop * 32;
ATH5K_DBG(ah, ATH5K_DEBUG_ANY,
"Configure tx [queue %d], "
"aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n",
queue, params->aifs, params->cw_min,
params->cw_max, params->txop);
if (ath5k_hw_set_tx_queueprops(ah, queue, &qi)) {
ATH5K_ERR(ah,
"Unable to update hardware queue %u!\n", queue);
ret = -EIO;
} else
ath5k_hw_reset_tx_queue(ah, queue);
mutex_unlock(&ah->lock);
return ret;
}
static u64
ath5k_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
struct ath5k_hw *ah = hw->priv;
return ath5k_hw_get_tsf64(ah);
}
static void
ath5k_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif, u64 tsf)
{
struct ath5k_hw *ah = hw->priv;
ath5k_hw_set_tsf64(ah, tsf);
}
static void
ath5k_reset_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
{
struct ath5k_hw *ah = hw->priv;
/*
* in IBSS mode we need to update the beacon timers too.
* this will also reset the TSF if we call it with 0
*/
if (ah->opmode == NL80211_IFTYPE_ADHOC)
ath5k_beacon_update_timers(ah, 0);
else
ath5k_hw_reset_tsf(ah);
}
static int
ath5k_get_survey(struct ieee80211_hw *hw, int idx, struct survey_info *survey)
{
struct ath5k_hw *ah = hw->priv;
struct ieee80211_conf *conf = &hw->conf;
struct ath_common *common = ath5k_hw_common(ah);
struct ath_cycle_counters *cc = &common->cc_survey;
unsigned int div = common->clockrate * 1000;
if (idx != 0)
return -ENOENT;
spin_lock_bh(&common->cc_lock);
ath_hw_cycle_counters_update(common);
if (cc->cycles > 0) {
ah->survey.time += cc->cycles / div;
ah->survey.time_busy += cc->rx_busy / div;
ah->survey.time_rx += cc->rx_frame / div;
ah->survey.time_tx += cc->tx_frame / div;
}
memset(cc, 0, sizeof(*cc));
spin_unlock_bh(&common->cc_lock);
memcpy(survey, &ah->survey, sizeof(*survey));
survey->channel = conf->chandef.chan;
survey->noise = ah->ah_noise_floor;
survey->filled = SURVEY_INFO_NOISE_DBM |
SURVEY_INFO_IN_USE |
SURVEY_INFO_TIME |
SURVEY_INFO_TIME_BUSY |
SURVEY_INFO_TIME_RX |
SURVEY_INFO_TIME_TX;
return 0;
}
/**
* ath5k_set_coverage_class - Set IEEE 802.11 coverage class
*
* @hw: struct ieee80211_hw pointer
* @coverage_class: IEEE 802.11 coverage class number
*
* Mac80211 callback. Sets slot time, ACK timeout and CTS timeout for given
* coverage class. The values are persistent, they are restored after device
* reset.
*/
static void
ath5k_set_coverage_class(struct ieee80211_hw *hw, s16 coverage_class)
{
struct ath5k_hw *ah = hw->priv;
mutex_lock(&ah->lock);
ath5k_hw_set_coverage_class(ah, coverage_class);
mutex_unlock(&ah->lock);
}
static int
ath5k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
{
struct ath5k_hw *ah = hw->priv;
if (tx_ant == 1 && rx_ant == 1)
ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_FIXED_A);
else if (tx_ant == 2 && rx_ant == 2)
ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_FIXED_B);
else if ((tx_ant & 3) == 3 && (rx_ant & 3) == 3)
ath5k_hw_set_antenna_mode(ah, AR5K_ANTMODE_DEFAULT);
else
return -EINVAL;
return 0;
}
static int
ath5k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
{
struct ath5k_hw *ah = hw->priv;
switch (ah->ah_ant_mode) {
case AR5K_ANTMODE_FIXED_A:
*tx_ant = 1; *rx_ant = 1; break;
case AR5K_ANTMODE_FIXED_B:
*tx_ant = 2; *rx_ant = 2; break;
case AR5K_ANTMODE_DEFAULT:
*tx_ant = 3; *rx_ant = 3; break;
}
return 0;
}
static void ath5k_get_ringparam(struct ieee80211_hw *hw,
u32 *tx, u32 *tx_max, u32 *rx, u32 *rx_max)
{
struct ath5k_hw *ah = hw->priv;
*tx = ah->txqs[AR5K_TX_QUEUE_ID_DATA_MIN].txq_max;
*tx_max = ATH5K_TXQ_LEN_MAX;
*rx = *rx_max = ATH_RXBUF;
}
static int ath5k_set_ringparam(struct ieee80211_hw *hw, u32 tx, u32 rx)
{
struct ath5k_hw *ah = hw->priv;
u16 qnum;
/* only support setting tx ring size for now */
if (rx != ATH_RXBUF)
return -EINVAL;
/* restrict tx ring size min/max */
if (!tx || tx > ATH5K_TXQ_LEN_MAX)
return -EINVAL;
for (qnum = 0; qnum < ARRAY_SIZE(ah->txqs); qnum++) {
if (!ah->txqs[qnum].setup)
continue;
if (ah->txqs[qnum].qnum < AR5K_TX_QUEUE_ID_DATA_MIN ||
ah->txqs[qnum].qnum > AR5K_TX_QUEUE_ID_DATA_MAX)
continue;
ah->txqs[qnum].txq_max = tx;
if (ah->txqs[qnum].txq_len >= ah->txqs[qnum].txq_max)
ieee80211_stop_queue(hw, ah->txqs[qnum].qnum);
}
return 0;
}
const struct ieee80211_ops ath5k_hw_ops = {
.tx = ath5k_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = ath5k_start,
.stop = ath5k_stop,
.add_interface = ath5k_add_interface,
/* .change_interface = not implemented */
.remove_interface = ath5k_remove_interface,
.config = ath5k_config,
.bss_info_changed = ath5k_bss_info_changed,
.prepare_multicast = ath5k_prepare_multicast,
.configure_filter = ath5k_configure_filter,
/* .set_tim = not implemented */
.set_key = ath5k_set_key,
/* .update_tkip_key = not implemented */
/* .hw_scan = not implemented */
.sw_scan_start = ath5k_sw_scan_start,
.sw_scan_complete = ath5k_sw_scan_complete,
.get_stats = ath5k_get_stats,
/* .set_frag_threshold = not implemented */
/* .set_rts_threshold = not implemented */
/* .sta_add = not implemented */
/* .sta_remove = not implemented */
/* .sta_notify = not implemented */
.conf_tx = ath5k_conf_tx,
.get_tsf = ath5k_get_tsf,
.set_tsf = ath5k_set_tsf,
.reset_tsf = ath5k_reset_tsf,
/* .tx_last_beacon = not implemented */
/* .ampdu_action = not needed */
.get_survey = ath5k_get_survey,
.set_coverage_class = ath5k_set_coverage_class,
/* .rfkill_poll = not implemented */
/* .flush = not implemented */
/* .channel_switch = not implemented */
/* .napi_poll = not implemented */
.set_antenna = ath5k_set_antenna,
.get_antenna = ath5k_get_antenna,
.set_ringparam = ath5k_set_ringparam,
.get_ringparam = ath5k_get_ringparam,
};
|
linux-master
|
drivers/net/wireless/ath/ath5k/mac80211-ops.c
|
/*
* Copyright (c) 2004-2008 Reyk Floeter <[email protected]>
* Copyright (c) 2006-2008 Nick Kossifidis <[email protected]>
*
* Permission to use, copy, modify, and distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*
*/
/*************************************\
* Attach/Detach Functions and helpers *
\*************************************/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/pci.h>
#include <linux/slab.h>
#include "ath5k.h"
#include "reg.h"
#include "debug.h"
/**
* ath5k_hw_post() - Power On Self Test helper function
* @ah: The &struct ath5k_hw
*/
static int ath5k_hw_post(struct ath5k_hw *ah)
{
static const u32 static_pattern[4] = {
0x55555555, 0xaaaaaaaa,
0x66666666, 0x99999999
};
static const u16 regs[2] = { AR5K_STA_ID0, AR5K_PHY(8) };
int i, c;
u16 cur_reg;
u32 var_pattern;
u32 init_val;
u32 cur_val;
for (c = 0; c < 2; c++) {
cur_reg = regs[c];
/* Save previous value */
init_val = ath5k_hw_reg_read(ah, cur_reg);
for (i = 0; i < 256; i++) {
var_pattern = i << 16 | i;
ath5k_hw_reg_write(ah, var_pattern, cur_reg);
cur_val = ath5k_hw_reg_read(ah, cur_reg);
if (cur_val != var_pattern) {
ATH5K_ERR(ah, "POST Failed !!!\n");
return -EAGAIN;
}
/* Found on ndiswrapper dumps */
var_pattern = 0x0039080f;
ath5k_hw_reg_write(ah, var_pattern, cur_reg);
}
for (i = 0; i < 4; i++) {
var_pattern = static_pattern[i];
ath5k_hw_reg_write(ah, var_pattern, cur_reg);
cur_val = ath5k_hw_reg_read(ah, cur_reg);
if (cur_val != var_pattern) {
ATH5K_ERR(ah, "POST Failed !!!\n");
return -EAGAIN;
}
/* Found on ndiswrapper dumps */
var_pattern = 0x003b080f;
ath5k_hw_reg_write(ah, var_pattern, cur_reg);
}
/* Restore previous value */
ath5k_hw_reg_write(ah, init_val, cur_reg);
}
return 0;
}
/**
* ath5k_hw_init() - Check if hw is supported and init the needed structs
* @ah: The &struct ath5k_hw associated with the device
*
* Check if the device is supported, perform a POST and initialize the needed
* structs. Returns -ENOMEM if we don't have memory for the needed structs,
* -ENODEV if the device is not supported or prints an error msg if something
* else went wrong.
*/
int ath5k_hw_init(struct ath5k_hw *ah)
{
static const u8 zero_mac[ETH_ALEN] = { };
struct ath_common *common = ath5k_hw_common(ah);
struct pci_dev *pdev = ah->pdev;
struct ath5k_eeprom_info *ee;
int ret;
u32 srev;
/*
* HW information
*/
ah->ah_bwmode = AR5K_BWMODE_DEFAULT;
ah->ah_txpower.txp_tpc = AR5K_TUNE_TPC_TXPOWER;
ah->ah_imr = 0;
ah->ah_retry_short = AR5K_INIT_RETRY_SHORT;
ah->ah_retry_long = AR5K_INIT_RETRY_LONG;
ah->ah_ant_mode = AR5K_ANTMODE_DEFAULT;
ah->ah_noise_floor = -95; /* until first NF calibration is run */
ah->ani_state.ani_mode = ATH5K_ANI_MODE_AUTO;
ah->ah_current_channel = &ah->channels[0];
/*
* Find the mac version
*/
ath5k_hw_read_srev(ah);
srev = ah->ah_mac_srev;
if (srev < AR5K_SREV_AR5311)
ah->ah_version = AR5K_AR5210;
else if (srev < AR5K_SREV_AR5212)
ah->ah_version = AR5K_AR5211;
else
ah->ah_version = AR5K_AR5212;
/* Get the MAC version */
ah->ah_mac_version = AR5K_REG_MS(srev, AR5K_SREV_VER);
/* Fill the ath5k_hw struct with the needed functions */
ret = ath5k_hw_init_desc_functions(ah);
if (ret)
goto err;
/* Bring device out of sleep and reset its units */
ret = ath5k_hw_nic_wakeup(ah, NULL);
if (ret)
goto err;
/* Get PHY and RADIO revisions */
ah->ah_phy_revision = ath5k_hw_reg_read(ah, AR5K_PHY_CHIP_ID) &
0xffffffff;
ah->ah_radio_5ghz_revision = ath5k_hw_radio_revision(ah,
NL80211_BAND_5GHZ);
/* Try to identify radio chip based on its srev */
switch (ah->ah_radio_5ghz_revision & 0xf0) {
case AR5K_SREV_RAD_5111:
ah->ah_radio = AR5K_RF5111;
ah->ah_single_chip = false;
ah->ah_radio_2ghz_revision = ath5k_hw_radio_revision(ah,
NL80211_BAND_2GHZ);
break;
case AR5K_SREV_RAD_5112:
case AR5K_SREV_RAD_2112:
ah->ah_radio = AR5K_RF5112;
ah->ah_single_chip = false;
ah->ah_radio_2ghz_revision = ath5k_hw_radio_revision(ah,
NL80211_BAND_2GHZ);
break;
case AR5K_SREV_RAD_2413:
ah->ah_radio = AR5K_RF2413;
ah->ah_single_chip = true;
break;
case AR5K_SREV_RAD_5413:
ah->ah_radio = AR5K_RF5413;
ah->ah_single_chip = true;
break;
case AR5K_SREV_RAD_2316:
ah->ah_radio = AR5K_RF2316;
ah->ah_single_chip = true;
break;
case AR5K_SREV_RAD_2317:
ah->ah_radio = AR5K_RF2317;
ah->ah_single_chip = true;
break;
case AR5K_SREV_RAD_5424:
if (ah->ah_mac_version == AR5K_SREV_AR2425 ||
ah->ah_mac_version == AR5K_SREV_AR2417) {
ah->ah_radio = AR5K_RF2425;
ah->ah_single_chip = true;
} else {
ah->ah_radio = AR5K_RF5413;
ah->ah_single_chip = true;
}
break;
default:
/* Identify radio based on mac/phy srev */
if (ah->ah_version == AR5K_AR5210) {
ah->ah_radio = AR5K_RF5110;
ah->ah_single_chip = false;
} else if (ah->ah_version == AR5K_AR5211) {
ah->ah_radio = AR5K_RF5111;
ah->ah_single_chip = false;
ah->ah_radio_2ghz_revision = ath5k_hw_radio_revision(ah,
NL80211_BAND_2GHZ);
} else if (ah->ah_mac_version == (AR5K_SREV_AR2425 >> 4) ||
ah->ah_mac_version == (AR5K_SREV_AR2417 >> 4) ||
ah->ah_phy_revision == AR5K_SREV_PHY_2425) {
ah->ah_radio = AR5K_RF2425;
ah->ah_single_chip = true;
ah->ah_radio_5ghz_revision = AR5K_SREV_RAD_2425;
} else if (srev == AR5K_SREV_AR5213A &&
ah->ah_phy_revision == AR5K_SREV_PHY_5212B) {
ah->ah_radio = AR5K_RF5112;
ah->ah_single_chip = false;
ah->ah_radio_5ghz_revision = AR5K_SREV_RAD_5112B;
} else if (ah->ah_mac_version == (AR5K_SREV_AR2415 >> 4) ||
ah->ah_mac_version == (AR5K_SREV_AR2315_R6 >> 4)) {
ah->ah_radio = AR5K_RF2316;
ah->ah_single_chip = true;
ah->ah_radio_5ghz_revision = AR5K_SREV_RAD_2316;
} else if (ah->ah_mac_version == (AR5K_SREV_AR5414 >> 4) ||
ah->ah_phy_revision == AR5K_SREV_PHY_5413) {
ah->ah_radio = AR5K_RF5413;
ah->ah_single_chip = true;
ah->ah_radio_5ghz_revision = AR5K_SREV_RAD_5413;
} else if (ah->ah_mac_version == (AR5K_SREV_AR2414 >> 4) ||
ah->ah_phy_revision == AR5K_SREV_PHY_2413) {
ah->ah_radio = AR5K_RF2413;
ah->ah_single_chip = true;
ah->ah_radio_5ghz_revision = AR5K_SREV_RAD_2413;
} else {
ATH5K_ERR(ah, "Couldn't identify radio revision.\n");
ret = -ENODEV;
goto err;
}
}
/* Return on unsupported chips (unsupported eeprom etc) */
if ((srev >= AR5K_SREV_AR5416) && (srev < AR5K_SREV_AR2425)) {
ATH5K_ERR(ah, "Device not yet supported.\n");
ret = -ENODEV;
goto err;
}
/*
* POST
*/
ret = ath5k_hw_post(ah);
if (ret)
goto err;
/* Enable pci core retry fix on Hainan (5213A) and later chips */
if (srev >= AR5K_SREV_AR5213A)
AR5K_REG_ENABLE_BITS(ah, AR5K_PCICFG, AR5K_PCICFG_RETRY_FIX);
/*
* Get card capabilities, calibration values etc
* TODO: EEPROM work
*/
ret = ath5k_eeprom_init(ah);
if (ret) {
ATH5K_ERR(ah, "unable to init EEPROM\n");
goto err;
}
ee = &ah->ah_capabilities.cap_eeprom;
/*
* Write PCI-E power save settings
*/
if ((ah->ah_version == AR5K_AR5212) && pdev && (pci_is_pcie(pdev))) {
ath5k_hw_reg_write(ah, 0x9248fc00, AR5K_PCIE_SERDES);
ath5k_hw_reg_write(ah, 0x24924924, AR5K_PCIE_SERDES);
/* Shut off RX when elecidle is asserted */
ath5k_hw_reg_write(ah, 0x28000039, AR5K_PCIE_SERDES);
ath5k_hw_reg_write(ah, 0x53160824, AR5K_PCIE_SERDES);
/* If serdes programming is enabled, increase PCI-E
* tx power for systems with long trace from host
* to minicard connector. */
if (ee->ee_serdes)
ath5k_hw_reg_write(ah, 0xe5980579, AR5K_PCIE_SERDES);
else
ath5k_hw_reg_write(ah, 0xf6800579, AR5K_PCIE_SERDES);
/* Shut off PLL and CLKREQ active in L1 */
ath5k_hw_reg_write(ah, 0x001defff, AR5K_PCIE_SERDES);
/* Preserve other settings */
ath5k_hw_reg_write(ah, 0x1aaabe40, AR5K_PCIE_SERDES);
ath5k_hw_reg_write(ah, 0xbe105554, AR5K_PCIE_SERDES);
ath5k_hw_reg_write(ah, 0x000e3007, AR5K_PCIE_SERDES);
/* Reset SERDES to load new settings */
ath5k_hw_reg_write(ah, 0x00000000, AR5K_PCIE_SERDES_RESET);
usleep_range(1000, 1500);
}
/* Get misc capabilities */
ret = ath5k_hw_set_capabilities(ah);
if (ret) {
ATH5K_ERR(ah, "unable to get device capabilities\n");
goto err;
}
/* Crypto settings */
common->keymax = (ah->ah_version == AR5K_AR5210 ?
AR5K_KEYTABLE_SIZE_5210 : AR5K_KEYTABLE_SIZE_5211);
if (srev >= AR5K_SREV_AR5212_V4 &&
(ee->ee_version < AR5K_EEPROM_VERSION_5_0 ||
!AR5K_EEPROM_AES_DIS(ee->ee_misc5)))
common->crypt_caps |= ATH_CRYPT_CAP_CIPHER_AESCCM;
if (srev >= AR5K_SREV_AR2414) {
common->crypt_caps |= ATH_CRYPT_CAP_MIC_COMBINED;
AR5K_REG_ENABLE_BITS(ah, AR5K_MISC_MODE,
AR5K_MISC_MODE_COMBINED_MIC);
}
/* MAC address is cleared until add_interface */
ath5k_hw_set_lladdr(ah, zero_mac);
/* Set BSSID to bcast address: ff:ff:ff:ff:ff:ff for now */
eth_broadcast_addr(common->curbssid);
ath5k_hw_set_bssid(ah);
ath5k_hw_set_opmode(ah, ah->opmode);
ath5k_hw_rfgain_opt_init(ah);
ath5k_hw_init_nfcal_hist(ah);
/* turn on HW LEDs */
ath5k_hw_set_ledstate(ah, AR5K_LED_INIT);
return 0;
err:
return ret;
}
/**
* ath5k_hw_deinit() - Free the &struct ath5k_hw
* @ah: The &struct ath5k_hw
*/
void ath5k_hw_deinit(struct ath5k_hw *ah)
{
__set_bit(ATH_STAT_INVALID, ah->status);
kfree(ah->ah_rf_banks);
ath5k_eeprom_detach(ah);
/* assume interrupts are down */
}
|
linux-master
|
drivers/net/wireless/ath/ath5k/attach.c
|
/*
* Copyright (c) 2008-2009 Atheros Communications Inc.
* Copyright (c) 2009 Gabor Juhos <[email protected]>
* Copyright (c) 2009 Imre Kaloz <[email protected]>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/module.h>
#include <linux/nl80211.h>
#include <linux/platform_device.h>
#include <linux/etherdevice.h>
#include <ath25_platform.h>
#include "ath5k.h"
#include "debug.h"
#include "base.h"
#include "reg.h"
/* return bus cachesize in 4B word units */
static void ath5k_ahb_read_cachesize(struct ath_common *common, int *csz)
{
*csz = L1_CACHE_BYTES >> 2;
}
static bool
ath5k_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
{
struct ath5k_hw *ah = common->priv;
struct platform_device *pdev = to_platform_device(ah->dev);
struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev);
u16 *eeprom, *eeprom_end;
eeprom = (u16 *) bcfg->radio;
eeprom_end = ((void *) bcfg->config) + BOARD_CONFIG_BUFSZ;
eeprom += off;
if (eeprom > eeprom_end)
return false;
*data = *eeprom;
return true;
}
int ath5k_hw_read_srev(struct ath5k_hw *ah)
{
struct platform_device *pdev = to_platform_device(ah->dev);
struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev);
ah->ah_mac_srev = bcfg->devid;
return 0;
}
static int ath5k_ahb_eeprom_read_mac(struct ath5k_hw *ah, u8 *mac)
{
struct platform_device *pdev = to_platform_device(ah->dev);
struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev);
u8 *cfg_mac;
if (to_platform_device(ah->dev)->id == 0)
cfg_mac = bcfg->config->wlan0_mac;
else
cfg_mac = bcfg->config->wlan1_mac;
memcpy(mac, cfg_mac, ETH_ALEN);
return 0;
}
static const struct ath_bus_ops ath_ahb_bus_ops = {
.ath_bus_type = ATH_AHB,
.read_cachesize = ath5k_ahb_read_cachesize,
.eeprom_read = ath5k_ahb_eeprom_read,
.eeprom_read_mac = ath5k_ahb_eeprom_read_mac,
};
/*Initialization*/
static int ath_ahb_probe(struct platform_device *pdev)
{
struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev);
struct ath5k_hw *ah;
struct ieee80211_hw *hw;
struct resource *res;
void __iomem *mem;
int irq;
int ret = 0;
u32 reg;
if (!dev_get_platdata(&pdev->dev)) {
dev_err(&pdev->dev, "no platform data specified\n");
ret = -EINVAL;
goto err_out;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (res == NULL) {
dev_err(&pdev->dev, "no memory resource found\n");
ret = -ENXIO;
goto err_out;
}
mem = ioremap(res->start, resource_size(res));
if (mem == NULL) {
dev_err(&pdev->dev, "ioremap failed\n");
ret = -ENOMEM;
goto err_out;
}
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
ret = irq;
goto err_iounmap;
}
hw = ieee80211_alloc_hw(sizeof(struct ath5k_hw), &ath5k_hw_ops);
if (hw == NULL) {
dev_err(&pdev->dev, "no memory for ieee80211_hw\n");
ret = -ENOMEM;
goto err_iounmap;
}
ah = hw->priv;
ah->hw = hw;
ah->dev = &pdev->dev;
ah->iobase = mem;
ah->irq = irq;
ah->devid = bcfg->devid;
if (bcfg->devid >= AR5K_SREV_AR2315_R6) {
/* Enable WMAC AHB arbitration */
reg = ioread32((void __iomem *) AR5K_AR2315_AHB_ARB_CTL);
reg |= AR5K_AR2315_AHB_ARB_CTL_WLAN;
iowrite32(reg, (void __iomem *) AR5K_AR2315_AHB_ARB_CTL);
/* Enable global WMAC swapping */
reg = ioread32((void __iomem *) AR5K_AR2315_BYTESWAP);
reg |= AR5K_AR2315_BYTESWAP_WMAC;
iowrite32(reg, (void __iomem *) AR5K_AR2315_BYTESWAP);
} else {
/* Enable WMAC DMA access (assuming 5312 or 231x*/
/* TODO: check other platforms */
reg = ioread32((void __iomem *) AR5K_AR5312_ENABLE);
if (to_platform_device(ah->dev)->id == 0)
reg |= AR5K_AR5312_ENABLE_WLAN0;
else
reg |= AR5K_AR5312_ENABLE_WLAN1;
iowrite32(reg, (void __iomem *) AR5K_AR5312_ENABLE);
/*
* On a dual-band AR5312, the multiband radio is only
* used as pass-through. Disable 2 GHz support in the
* driver for it
*/
if (to_platform_device(ah->dev)->id == 0 &&
(bcfg->config->flags & (BD_WLAN0 | BD_WLAN1)) ==
(BD_WLAN1 | BD_WLAN0))
ah->ah_capabilities.cap_needs_2GHz_ovr = true;
else
ah->ah_capabilities.cap_needs_2GHz_ovr = false;
}
ret = ath5k_init_ah(ah, &ath_ahb_bus_ops);
if (ret != 0) {
dev_err(&pdev->dev, "failed to attach device, err=%d\n", ret);
ret = -ENODEV;
goto err_free_hw;
}
platform_set_drvdata(pdev, hw);
return 0;
err_free_hw:
ieee80211_free_hw(hw);
err_iounmap:
iounmap(mem);
err_out:
return ret;
}
static int ath_ahb_remove(struct platform_device *pdev)
{
struct ar231x_board_config *bcfg = dev_get_platdata(&pdev->dev);
struct ieee80211_hw *hw = platform_get_drvdata(pdev);
struct ath5k_hw *ah;
u32 reg;
if (!hw)
return 0;
ah = hw->priv;
if (bcfg->devid >= AR5K_SREV_AR2315_R6) {
/* Disable WMAC AHB arbitration */
reg = ioread32((void __iomem *) AR5K_AR2315_AHB_ARB_CTL);
reg &= ~AR5K_AR2315_AHB_ARB_CTL_WLAN;
iowrite32(reg, (void __iomem *) AR5K_AR2315_AHB_ARB_CTL);
} else {
/*Stop DMA access */
reg = ioread32((void __iomem *) AR5K_AR5312_ENABLE);
if (to_platform_device(ah->dev)->id == 0)
reg &= ~AR5K_AR5312_ENABLE_WLAN0;
else
reg &= ~AR5K_AR5312_ENABLE_WLAN1;
iowrite32(reg, (void __iomem *) AR5K_AR5312_ENABLE);
}
ath5k_deinit_ah(ah);
iounmap(ah->iobase);
ieee80211_free_hw(hw);
return 0;
}
static struct platform_driver ath_ahb_driver = {
.probe = ath_ahb_probe,
.remove = ath_ahb_remove,
.driver = {
.name = "ar231x-wmac",
},
};
module_platform_driver(ath_ahb_driver);
|
linux-master
|
drivers/net/wireless/ath/ath5k/ahb.c
|
/*
* Copyright (C) 2010 Bruno Randolf <[email protected]>
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include "ath5k.h"
#include "reg.h"
#include "debug.h"
#include "ani.h"
/**
* DOC: Basic ANI Operation
*
* Adaptive Noise Immunity (ANI) controls five noise immunity parameters
* depending on the amount of interference in the environment, increasing
* or reducing sensitivity as necessary.
*
* The parameters are:
*
* - "noise immunity"
*
* - "spur immunity"
*
* - "firstep level"
*
* - "OFDM weak signal detection"
*
* - "CCK weak signal detection"
*
* Basically we look at the amount of ODFM and CCK timing errors we get and then
* raise or lower immunity accordingly by setting one or more of these
* parameters.
*
* Newer chipsets have PHY error counters in hardware which will generate a MIB
* interrupt when they overflow. Older hardware has too enable PHY error frames
* by setting a RX flag and then count every single PHY error. When a specified
* threshold of errors has been reached we will raise immunity.
* Also we regularly check the amount of errors and lower or raise immunity as
* necessary.
*/
/***********************\
* ANI parameter control *
\***********************/
/**
* ath5k_ani_set_noise_immunity_level() - Set noise immunity level
* @ah: The &struct ath5k_hw
* @level: level between 0 and @ATH5K_ANI_MAX_NOISE_IMM_LVL
*/
void
ath5k_ani_set_noise_immunity_level(struct ath5k_hw *ah, int level)
{
/* TODO:
* ANI documents suggest the following five levels to use, but the HAL
* and ath9k use only the last two levels, making this
* essentially an on/off option. There *may* be a reason for this (???),
* so i stick with the HAL version for now...
*/
#if 0
static const s8 lo[] = { -52, -56, -60, -64, -70 };
static const s8 hi[] = { -18, -18, -16, -14, -12 };
static const s8 sz[] = { -34, -41, -48, -55, -62 };
static const s8 fr[] = { -70, -72, -75, -78, -80 };
#else
static const s8 lo[] = { -64, -70 };
static const s8 hi[] = { -14, -12 };
static const s8 sz[] = { -55, -62 };
static const s8 fr[] = { -78, -80 };
#endif
if (level < 0 || level >= ARRAY_SIZE(sz)) {
ATH5K_ERR(ah, "noise immunity level %d out of range",
level);
return;
}
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_DESIRED_SIZE,
AR5K_PHY_DESIRED_SIZE_TOT, sz[level]);
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_AGCCOARSE,
AR5K_PHY_AGCCOARSE_LO, lo[level]);
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_AGCCOARSE,
AR5K_PHY_AGCCOARSE_HI, hi[level]);
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_SIG,
AR5K_PHY_SIG_FIRPWR, fr[level]);
ah->ani_state.noise_imm_level = level;
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "new level %d", level);
}
/**
* ath5k_ani_set_spur_immunity_level() - Set spur immunity level
* @ah: The &struct ath5k_hw
* @level: level between 0 and @max_spur_level (the maximum level is dependent
* on the chip revision).
*/
void
ath5k_ani_set_spur_immunity_level(struct ath5k_hw *ah, int level)
{
static const int val[] = { 2, 4, 6, 8, 10, 12, 14, 16 };
if (level < 0 || level >= ARRAY_SIZE(val) ||
level > ah->ani_state.max_spur_level) {
ATH5K_ERR(ah, "spur immunity level %d out of range",
level);
return;
}
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_OFDM_SELFCORR,
AR5K_PHY_OFDM_SELFCORR_CYPWR_THR1, val[level]);
ah->ani_state.spur_level = level;
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "new level %d", level);
}
/**
* ath5k_ani_set_firstep_level() - Set "firstep" level
* @ah: The &struct ath5k_hw
* @level: level between 0 and @ATH5K_ANI_MAX_FIRSTEP_LVL
*/
void
ath5k_ani_set_firstep_level(struct ath5k_hw *ah, int level)
{
static const int val[] = { 0, 4, 8 };
if (level < 0 || level >= ARRAY_SIZE(val)) {
ATH5K_ERR(ah, "firstep level %d out of range", level);
return;
}
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_SIG,
AR5K_PHY_SIG_FIRSTEP, val[level]);
ah->ani_state.firstep_level = level;
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "new level %d", level);
}
/**
* ath5k_ani_set_ofdm_weak_signal_detection() - Set OFDM weak signal detection
* @ah: The &struct ath5k_hw
* @on: turn on or off
*/
void
ath5k_ani_set_ofdm_weak_signal_detection(struct ath5k_hw *ah, bool on)
{
static const int m1l[] = { 127, 50 };
static const int m2l[] = { 127, 40 };
static const int m1[] = { 127, 0x4d };
static const int m2[] = { 127, 0x40 };
static const int m2cnt[] = { 31, 16 };
static const int m2lcnt[] = { 63, 48 };
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_WEAK_OFDM_LOW_THR,
AR5K_PHY_WEAK_OFDM_LOW_THR_M1, m1l[on]);
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_WEAK_OFDM_LOW_THR,
AR5K_PHY_WEAK_OFDM_LOW_THR_M2, m2l[on]);
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_WEAK_OFDM_HIGH_THR,
AR5K_PHY_WEAK_OFDM_HIGH_THR_M1, m1[on]);
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_WEAK_OFDM_HIGH_THR,
AR5K_PHY_WEAK_OFDM_HIGH_THR_M2, m2[on]);
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_WEAK_OFDM_HIGH_THR,
AR5K_PHY_WEAK_OFDM_HIGH_THR_M2_COUNT, m2cnt[on]);
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_WEAK_OFDM_LOW_THR,
AR5K_PHY_WEAK_OFDM_LOW_THR_M2_COUNT, m2lcnt[on]);
if (on)
AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_WEAK_OFDM_LOW_THR,
AR5K_PHY_WEAK_OFDM_LOW_THR_SELFCOR_EN);
else
AR5K_REG_DISABLE_BITS(ah, AR5K_PHY_WEAK_OFDM_LOW_THR,
AR5K_PHY_WEAK_OFDM_LOW_THR_SELFCOR_EN);
ah->ani_state.ofdm_weak_sig = on;
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "turned %s",
on ? "on" : "off");
}
/**
* ath5k_ani_set_cck_weak_signal_detection() - Set CCK weak signal detection
* @ah: The &struct ath5k_hw
* @on: turn on or off
*/
void
ath5k_ani_set_cck_weak_signal_detection(struct ath5k_hw *ah, bool on)
{
static const int val[] = { 8, 6 };
AR5K_REG_WRITE_BITS(ah, AR5K_PHY_CCK_CROSSCORR,
AR5K_PHY_CCK_CROSSCORR_WEAK_SIG_THR, val[on]);
ah->ani_state.cck_weak_sig = on;
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "turned %s",
on ? "on" : "off");
}
/***************\
* ANI algorithm *
\***************/
/**
* ath5k_ani_raise_immunity() - Increase noise immunity
* @ah: The &struct ath5k_hw
* @as: The &struct ath5k_ani_state
* @ofdm_trigger: If this is true we are called because of too many OFDM errors,
* the algorithm will tune more parameters then.
*
* Try to raise noise immunity (=decrease sensitivity) in several steps
* depending on the average RSSI of the beacons we received.
*/
static void
ath5k_ani_raise_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as,
bool ofdm_trigger)
{
int rssi = ewma_beacon_rssi_read(&ah->ah_beacon_rssi_avg);
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "raise immunity (%s)",
ofdm_trigger ? "ODFM" : "CCK");
/* first: raise noise immunity */
if (as->noise_imm_level < ATH5K_ANI_MAX_NOISE_IMM_LVL) {
ath5k_ani_set_noise_immunity_level(ah, as->noise_imm_level + 1);
return;
}
/* only OFDM: raise spur immunity level */
if (ofdm_trigger &&
as->spur_level < ah->ani_state.max_spur_level) {
ath5k_ani_set_spur_immunity_level(ah, as->spur_level + 1);
return;
}
/* AP mode */
if (ah->opmode == NL80211_IFTYPE_AP) {
if (as->firstep_level < ATH5K_ANI_MAX_FIRSTEP_LVL)
ath5k_ani_set_firstep_level(ah, as->firstep_level + 1);
return;
}
/* STA and IBSS mode */
/* TODO: for IBSS mode it would be better to keep a beacon RSSI average
* per each neighbour node and use the minimum of these, to make sure we
* don't shut out a remote node by raising immunity too high. */
if (rssi > ATH5K_ANI_RSSI_THR_HIGH) {
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI,
"beacon RSSI high");
/* only OFDM: beacon RSSI is high, we can disable ODFM weak
* signal detection */
if (ofdm_trigger && as->ofdm_weak_sig) {
ath5k_ani_set_ofdm_weak_signal_detection(ah, false);
ath5k_ani_set_spur_immunity_level(ah, 0);
return;
}
/* as a last resort or CCK: raise firstep level */
if (as->firstep_level < ATH5K_ANI_MAX_FIRSTEP_LVL) {
ath5k_ani_set_firstep_level(ah, as->firstep_level + 1);
return;
}
} else if (rssi > ATH5K_ANI_RSSI_THR_LOW) {
/* beacon RSSI in mid range, we need OFDM weak signal detect,
* but can raise firstep level */
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI,
"beacon RSSI mid");
if (ofdm_trigger && !as->ofdm_weak_sig)
ath5k_ani_set_ofdm_weak_signal_detection(ah, true);
if (as->firstep_level < ATH5K_ANI_MAX_FIRSTEP_LVL)
ath5k_ani_set_firstep_level(ah, as->firstep_level + 1);
return;
} else if (ah->ah_current_channel->band == NL80211_BAND_2GHZ) {
/* beacon RSSI is low. in B/G mode turn of OFDM weak signal
* detect and zero firstep level to maximize CCK sensitivity */
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI,
"beacon RSSI low, 2GHz");
if (ofdm_trigger && as->ofdm_weak_sig)
ath5k_ani_set_ofdm_weak_signal_detection(ah, false);
if (as->firstep_level > 0)
ath5k_ani_set_firstep_level(ah, 0);
return;
}
/* TODO: why not?:
if (as->cck_weak_sig == true) {
ath5k_ani_set_cck_weak_signal_detection(ah, false);
}
*/
}
/**
* ath5k_ani_lower_immunity() - Decrease noise immunity
* @ah: The &struct ath5k_hw
* @as: The &struct ath5k_ani_state
*
* Try to lower noise immunity (=increase sensitivity) in several steps
* depending on the average RSSI of the beacons we received.
*/
static void
ath5k_ani_lower_immunity(struct ath5k_hw *ah, struct ath5k_ani_state *as)
{
int rssi = ewma_beacon_rssi_read(&ah->ah_beacon_rssi_avg);
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "lower immunity");
if (ah->opmode == NL80211_IFTYPE_AP) {
/* AP mode */
if (as->firstep_level > 0) {
ath5k_ani_set_firstep_level(ah, as->firstep_level - 1);
return;
}
} else {
/* STA and IBSS mode (see TODO above) */
if (rssi > ATH5K_ANI_RSSI_THR_HIGH) {
/* beacon signal is high, leave OFDM weak signal
* detection off or it may oscillate
* TODO: who said it's off??? */
} else if (rssi > ATH5K_ANI_RSSI_THR_LOW) {
/* beacon RSSI is mid-range: turn on ODFM weak signal
* detection and next, lower firstep level */
if (!as->ofdm_weak_sig) {
ath5k_ani_set_ofdm_weak_signal_detection(ah,
true);
return;
}
if (as->firstep_level > 0) {
ath5k_ani_set_firstep_level(ah,
as->firstep_level - 1);
return;
}
} else {
/* beacon signal is low: only reduce firstep level */
if (as->firstep_level > 0) {
ath5k_ani_set_firstep_level(ah,
as->firstep_level - 1);
return;
}
}
}
/* all modes */
if (as->spur_level > 0) {
ath5k_ani_set_spur_immunity_level(ah, as->spur_level - 1);
return;
}
/* finally, reduce noise immunity */
if (as->noise_imm_level > 0) {
ath5k_ani_set_noise_immunity_level(ah, as->noise_imm_level - 1);
return;
}
}
/**
* ath5k_hw_ani_get_listen_time() - Update counters and return listening time
* @ah: The &struct ath5k_hw
* @as: The &struct ath5k_ani_state
*
* Return an approximation of the time spent "listening" in milliseconds (ms)
* since the last call of this function.
* Save a snapshot of the counter values for debugging/statistics.
*/
static int
ath5k_hw_ani_get_listen_time(struct ath5k_hw *ah, struct ath5k_ani_state *as)
{
struct ath_common *common = ath5k_hw_common(ah);
int listen;
spin_lock_bh(&common->cc_lock);
ath_hw_cycle_counters_update(common);
memcpy(&as->last_cc, &common->cc_ani, sizeof(as->last_cc));
/* clears common->cc_ani */
listen = ath_hw_get_listen_time(common);
spin_unlock_bh(&common->cc_lock);
return listen;
}
/**
* ath5k_ani_save_and_clear_phy_errors() - Clear and save PHY error counters
* @ah: The &struct ath5k_hw
* @as: The &struct ath5k_ani_state
*
* Clear the PHY error counters as soon as possible, since this might be called
* from a MIB interrupt and we want to make sure we don't get interrupted again.
* Add the count of CCK and OFDM errors to our internal state, so it can be used
* by the algorithm later.
*
* Will be called from interrupt and tasklet context.
* Returns 0 if both counters are zero.
*/
static int
ath5k_ani_save_and_clear_phy_errors(struct ath5k_hw *ah,
struct ath5k_ani_state *as)
{
unsigned int ofdm_err, cck_err;
if (!ah->ah_capabilities.cap_has_phyerr_counters)
return 0;
ofdm_err = ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT1);
cck_err = ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT2);
/* reset counters first, we might be in a hurry (interrupt) */
ath5k_hw_reg_write(ah, ATH5K_PHYERR_CNT_MAX - ATH5K_ANI_OFDM_TRIG_HIGH,
AR5K_PHYERR_CNT1);
ath5k_hw_reg_write(ah, ATH5K_PHYERR_CNT_MAX - ATH5K_ANI_CCK_TRIG_HIGH,
AR5K_PHYERR_CNT2);
ofdm_err = ATH5K_ANI_OFDM_TRIG_HIGH - (ATH5K_PHYERR_CNT_MAX - ofdm_err);
cck_err = ATH5K_ANI_CCK_TRIG_HIGH - (ATH5K_PHYERR_CNT_MAX - cck_err);
/* sometimes both can be zero, especially when there is a superfluous
* second interrupt. detect that here and return an error. */
if (ofdm_err <= 0 && cck_err <= 0)
return 0;
/* avoid negative values should one of the registers overflow */
if (ofdm_err > 0) {
as->ofdm_errors += ofdm_err;
as->sum_ofdm_errors += ofdm_err;
}
if (cck_err > 0) {
as->cck_errors += cck_err;
as->sum_cck_errors += cck_err;
}
return 1;
}
/**
* ath5k_ani_period_restart() - Restart ANI period
* @as: The &struct ath5k_ani_state
*
* Just reset counters, so they are clear for the next "ani period".
*/
static void
ath5k_ani_period_restart(struct ath5k_ani_state *as)
{
/* keep last values for debugging */
as->last_ofdm_errors = as->ofdm_errors;
as->last_cck_errors = as->cck_errors;
as->last_listen = as->listen_time;
as->ofdm_errors = 0;
as->cck_errors = 0;
as->listen_time = 0;
}
/**
* ath5k_ani_calibration() - The main ANI calibration function
* @ah: The &struct ath5k_hw
*
* We count OFDM and CCK errors relative to the time where we did not send or
* receive ("listen" time) and raise or lower immunity accordingly.
* This is called regularly (every second) from the calibration timer, but also
* when an error threshold has been reached.
*
* In order to synchronize access from different contexts, this should be
* called only indirectly by scheduling the ANI tasklet!
*/
void
ath5k_ani_calibration(struct ath5k_hw *ah)
{
struct ath5k_ani_state *as = &ah->ani_state;
int listen, ofdm_high, ofdm_low, cck_high, cck_low;
/* get listen time since last call and add it to the counter because we
* might not have restarted the "ani period" last time.
* always do this to calculate the busy time also in manual mode */
listen = ath5k_hw_ani_get_listen_time(ah, as);
as->listen_time += listen;
if (as->ani_mode != ATH5K_ANI_MODE_AUTO)
return;
ath5k_ani_save_and_clear_phy_errors(ah, as);
ofdm_high = as->listen_time * ATH5K_ANI_OFDM_TRIG_HIGH / 1000;
cck_high = as->listen_time * ATH5K_ANI_CCK_TRIG_HIGH / 1000;
ofdm_low = as->listen_time * ATH5K_ANI_OFDM_TRIG_LOW / 1000;
cck_low = as->listen_time * ATH5K_ANI_CCK_TRIG_LOW / 1000;
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI,
"listen %d (now %d)", as->listen_time, listen);
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI,
"check high ofdm %d/%d cck %d/%d",
as->ofdm_errors, ofdm_high, as->cck_errors, cck_high);
if (as->ofdm_errors > ofdm_high || as->cck_errors > cck_high) {
/* too many PHY errors - we have to raise immunity */
bool ofdm_flag = as->ofdm_errors > ofdm_high;
ath5k_ani_raise_immunity(ah, as, ofdm_flag);
ath5k_ani_period_restart(as);
} else if (as->listen_time > 5 * ATH5K_ANI_LISTEN_PERIOD) {
/* If more than 5 (TODO: why 5?) periods have passed and we got
* relatively little errors we can try to lower immunity */
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI,
"check low ofdm %d/%d cck %d/%d",
as->ofdm_errors, ofdm_low, as->cck_errors, cck_low);
if (as->ofdm_errors <= ofdm_low && as->cck_errors <= cck_low)
ath5k_ani_lower_immunity(ah, as);
ath5k_ani_period_restart(as);
}
}
/*******************\
* Interrupt handler *
\*******************/
/**
* ath5k_ani_mib_intr() - Interrupt handler for ANI MIB counters
* @ah: The &struct ath5k_hw
*
* Just read & reset the registers quickly, so they don't generate more
* interrupts, save the counters and schedule the tasklet to decide whether
* to raise immunity or not.
*
* We just need to handle PHY error counters, ath5k_hw_update_mib_counters()
* should take care of all "normal" MIB interrupts.
*/
void
ath5k_ani_mib_intr(struct ath5k_hw *ah)
{
struct ath5k_ani_state *as = &ah->ani_state;
/* nothing to do here if HW does not have PHY error counters - they
* can't be the reason for the MIB interrupt then */
if (!ah->ah_capabilities.cap_has_phyerr_counters)
return;
/* not in use but clear anyways */
ath5k_hw_reg_write(ah, 0, AR5K_OFDM_FIL_CNT);
ath5k_hw_reg_write(ah, 0, AR5K_CCK_FIL_CNT);
if (ah->ani_state.ani_mode != ATH5K_ANI_MODE_AUTO)
return;
/* If one of the errors triggered, we can get a superfluous second
* interrupt, even though we have already reset the register. The
* function detects that so we can return early. */
if (ath5k_ani_save_and_clear_phy_errors(ah, as) == 0)
return;
if (as->ofdm_errors > ATH5K_ANI_OFDM_TRIG_HIGH ||
as->cck_errors > ATH5K_ANI_CCK_TRIG_HIGH)
tasklet_schedule(&ah->ani_tasklet);
}
/**
* ath5k_ani_phy_error_report - Used by older HW to report PHY errors
*
* @ah: The &struct ath5k_hw
* @phyerr: One of enum ath5k_phy_error_code
*
* This is used by hardware without PHY error counters to report PHY errors
* on a frame-by-frame basis, instead of the interrupt.
*/
void
ath5k_ani_phy_error_report(struct ath5k_hw *ah,
enum ath5k_phy_error_code phyerr)
{
struct ath5k_ani_state *as = &ah->ani_state;
if (phyerr == AR5K_RX_PHY_ERROR_OFDM_TIMING) {
as->ofdm_errors++;
if (as->ofdm_errors > ATH5K_ANI_OFDM_TRIG_HIGH)
tasklet_schedule(&ah->ani_tasklet);
} else if (phyerr == AR5K_RX_PHY_ERROR_CCK_TIMING) {
as->cck_errors++;
if (as->cck_errors > ATH5K_ANI_CCK_TRIG_HIGH)
tasklet_schedule(&ah->ani_tasklet);
}
}
/****************\
* Initialization *
\****************/
/**
* ath5k_enable_phy_err_counters() - Enable PHY error counters
* @ah: The &struct ath5k_hw
*
* Enable PHY error counters for OFDM and CCK timing errors.
*/
static void
ath5k_enable_phy_err_counters(struct ath5k_hw *ah)
{
ath5k_hw_reg_write(ah, ATH5K_PHYERR_CNT_MAX - ATH5K_ANI_OFDM_TRIG_HIGH,
AR5K_PHYERR_CNT1);
ath5k_hw_reg_write(ah, ATH5K_PHYERR_CNT_MAX - ATH5K_ANI_CCK_TRIG_HIGH,
AR5K_PHYERR_CNT2);
ath5k_hw_reg_write(ah, AR5K_PHY_ERR_FIL_OFDM, AR5K_PHYERR_CNT1_MASK);
ath5k_hw_reg_write(ah, AR5K_PHY_ERR_FIL_CCK, AR5K_PHYERR_CNT2_MASK);
/* not in use */
ath5k_hw_reg_write(ah, 0, AR5K_OFDM_FIL_CNT);
ath5k_hw_reg_write(ah, 0, AR5K_CCK_FIL_CNT);
}
/**
* ath5k_disable_phy_err_counters() - Disable PHY error counters
* @ah: The &struct ath5k_hw
*
* Disable PHY error counters for OFDM and CCK timing errors.
*/
static void
ath5k_disable_phy_err_counters(struct ath5k_hw *ah)
{
ath5k_hw_reg_write(ah, 0, AR5K_PHYERR_CNT1);
ath5k_hw_reg_write(ah, 0, AR5K_PHYERR_CNT2);
ath5k_hw_reg_write(ah, 0, AR5K_PHYERR_CNT1_MASK);
ath5k_hw_reg_write(ah, 0, AR5K_PHYERR_CNT2_MASK);
/* not in use */
ath5k_hw_reg_write(ah, 0, AR5K_OFDM_FIL_CNT);
ath5k_hw_reg_write(ah, 0, AR5K_CCK_FIL_CNT);
}
/**
* ath5k_ani_init() - Initialize ANI
* @ah: The &struct ath5k_hw
* @mode: One of enum ath5k_ani_mode
*
* Initialize ANI according to mode.
*/
void
ath5k_ani_init(struct ath5k_hw *ah, enum ath5k_ani_mode mode)
{
/* ANI is only possible on 5212 and newer */
if (ah->ah_version < AR5K_AR5212)
return;
if (mode < ATH5K_ANI_MODE_OFF || mode > ATH5K_ANI_MODE_AUTO) {
ATH5K_ERR(ah, "ANI mode %d out of range", mode);
return;
}
/* clear old state information */
memset(&ah->ani_state, 0, sizeof(ah->ani_state));
/* older hardware has more spur levels than newer */
if (ah->ah_mac_srev < AR5K_SREV_AR2414)
ah->ani_state.max_spur_level = 7;
else
ah->ani_state.max_spur_level = 2;
/* initial values for our ani parameters */
if (mode == ATH5K_ANI_MODE_OFF) {
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "ANI off\n");
} else if (mode == ATH5K_ANI_MODE_MANUAL_LOW) {
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI,
"ANI manual low -> high sensitivity\n");
ath5k_ani_set_noise_immunity_level(ah, 0);
ath5k_ani_set_spur_immunity_level(ah, 0);
ath5k_ani_set_firstep_level(ah, 0);
ath5k_ani_set_ofdm_weak_signal_detection(ah, true);
ath5k_ani_set_cck_weak_signal_detection(ah, true);
} else if (mode == ATH5K_ANI_MODE_MANUAL_HIGH) {
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI,
"ANI manual high -> low sensitivity\n");
ath5k_ani_set_noise_immunity_level(ah,
ATH5K_ANI_MAX_NOISE_IMM_LVL);
ath5k_ani_set_spur_immunity_level(ah,
ah->ani_state.max_spur_level);
ath5k_ani_set_firstep_level(ah, ATH5K_ANI_MAX_FIRSTEP_LVL);
ath5k_ani_set_ofdm_weak_signal_detection(ah, false);
ath5k_ani_set_cck_weak_signal_detection(ah, false);
} else if (mode == ATH5K_ANI_MODE_AUTO) {
ATH5K_DBG_UNLIMIT(ah, ATH5K_DEBUG_ANI, "ANI auto\n");
ath5k_ani_set_noise_immunity_level(ah, 0);
ath5k_ani_set_spur_immunity_level(ah, 0);
ath5k_ani_set_firstep_level(ah, 0);
ath5k_ani_set_ofdm_weak_signal_detection(ah, true);
ath5k_ani_set_cck_weak_signal_detection(ah, false);
}
/* newer hardware has PHY error counter registers which we can use to
* get OFDM and CCK error counts. older hardware has to set rxfilter and
* report every single PHY error by calling ath5k_ani_phy_error_report()
*/
if (mode == ATH5K_ANI_MODE_AUTO) {
if (ah->ah_capabilities.cap_has_phyerr_counters)
ath5k_enable_phy_err_counters(ah);
else
ath5k_hw_set_rx_filter(ah, ath5k_hw_get_rx_filter(ah) |
AR5K_RX_FILTER_PHYERR);
} else {
if (ah->ah_capabilities.cap_has_phyerr_counters)
ath5k_disable_phy_err_counters(ah);
else
ath5k_hw_set_rx_filter(ah, ath5k_hw_get_rx_filter(ah) &
~AR5K_RX_FILTER_PHYERR);
}
ah->ani_state.ani_mode = mode;
}
/**************\
* Debug output *
\**************/
#ifdef CONFIG_ATH5K_DEBUG
/**
* ath5k_ani_print_counters() - Print ANI counters
* @ah: The &struct ath5k_hw
*
* Used for debugging ANI
*/
void
ath5k_ani_print_counters(struct ath5k_hw *ah)
{
/* clears too */
pr_notice("ACK fail\t%d\n", ath5k_hw_reg_read(ah, AR5K_ACK_FAIL));
pr_notice("RTS fail\t%d\n", ath5k_hw_reg_read(ah, AR5K_RTS_FAIL));
pr_notice("RTS success\t%d\n", ath5k_hw_reg_read(ah, AR5K_RTS_OK));
pr_notice("FCS error\t%d\n", ath5k_hw_reg_read(ah, AR5K_FCS_FAIL));
/* no clear */
pr_notice("tx\t%d\n", ath5k_hw_reg_read(ah, AR5K_PROFCNT_TX));
pr_notice("rx\t%d\n", ath5k_hw_reg_read(ah, AR5K_PROFCNT_RX));
pr_notice("busy\t%d\n", ath5k_hw_reg_read(ah, AR5K_PROFCNT_RXCLR));
pr_notice("cycles\t%d\n", ath5k_hw_reg_read(ah, AR5K_PROFCNT_CYCLE));
pr_notice("AR5K_PHYERR_CNT1\t%d\n",
ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT1));
pr_notice("AR5K_PHYERR_CNT2\t%d\n",
ath5k_hw_reg_read(ah, AR5K_PHYERR_CNT2));
pr_notice("AR5K_OFDM_FIL_CNT\t%d\n",
ath5k_hw_reg_read(ah, AR5K_OFDM_FIL_CNT));
pr_notice("AR5K_CCK_FIL_CNT\t%d\n",
ath5k_hw_reg_read(ah, AR5K_CCK_FIL_CNT));
}
#endif
|
linux-master
|
drivers/net/wireless/ath/ath5k/ani.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Linux device driver for ADMtek ADM8211 (IEEE 802.11b MAC/BBP)
*
* Copyright (c) 2003, Jouni Malinen <[email protected]>
* Copyright (c) 2004-2007, Michael Wu <[email protected]>
* Some parts copyright (c) 2003 by David Young <[email protected]>
* and used with permission.
*
* Much thanks to Infineon-ADMtek for their support of this driver.
*/
#include <linux/interrupt.h>
#include <linux/if.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/etherdevice.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/crc32.h>
#include <linux/eeprom_93cx6.h>
#include <linux/module.h>
#include <net/mac80211.h>
#include "adm8211.h"
MODULE_AUTHOR("Michael Wu <[email protected]>");
MODULE_AUTHOR("Jouni Malinen <[email protected]>");
MODULE_DESCRIPTION("Driver for IEEE 802.11b wireless cards based on ADMtek ADM8211");
MODULE_LICENSE("GPL");
static unsigned int tx_ring_size __read_mostly = 16;
static unsigned int rx_ring_size __read_mostly = 16;
module_param(tx_ring_size, uint, 0);
module_param(rx_ring_size, uint, 0);
static const struct pci_device_id adm8211_pci_id_table[] = {
/* ADMtek ADM8211 */
{ PCI_DEVICE(0x10B7, 0x6000) }, /* 3Com 3CRSHPW796 */
{ PCI_DEVICE(0x1200, 0x8201) }, /* ? */
{ PCI_DEVICE(0x1317, 0x8201) }, /* ADM8211A */
{ PCI_DEVICE(0x1317, 0x8211) }, /* ADM8211B/C */
{ 0 }
};
static struct ieee80211_rate adm8211_rates[] = {
{ .bitrate = 10, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
{ .bitrate = 20, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
{ .bitrate = 55, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
{ .bitrate = 110, .flags = IEEE80211_RATE_SHORT_PREAMBLE },
{ .bitrate = 220, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, /* XX ?? */
};
static const struct ieee80211_channel adm8211_channels[] = {
{ .center_freq = 2412},
{ .center_freq = 2417},
{ .center_freq = 2422},
{ .center_freq = 2427},
{ .center_freq = 2432},
{ .center_freq = 2437},
{ .center_freq = 2442},
{ .center_freq = 2447},
{ .center_freq = 2452},
{ .center_freq = 2457},
{ .center_freq = 2462},
{ .center_freq = 2467},
{ .center_freq = 2472},
{ .center_freq = 2484},
};
static void adm8211_eeprom_register_read(struct eeprom_93cx6 *eeprom)
{
struct adm8211_priv *priv = eeprom->data;
u32 reg = ADM8211_CSR_READ(SPR);
eeprom->reg_data_in = reg & ADM8211_SPR_SDI;
eeprom->reg_data_out = reg & ADM8211_SPR_SDO;
eeprom->reg_data_clock = reg & ADM8211_SPR_SCLK;
eeprom->reg_chip_select = reg & ADM8211_SPR_SCS;
}
static void adm8211_eeprom_register_write(struct eeprom_93cx6 *eeprom)
{
struct adm8211_priv *priv = eeprom->data;
u32 reg = 0x4000 | ADM8211_SPR_SRS;
if (eeprom->reg_data_in)
reg |= ADM8211_SPR_SDI;
if (eeprom->reg_data_out)
reg |= ADM8211_SPR_SDO;
if (eeprom->reg_data_clock)
reg |= ADM8211_SPR_SCLK;
if (eeprom->reg_chip_select)
reg |= ADM8211_SPR_SCS;
ADM8211_CSR_WRITE(SPR, reg);
ADM8211_CSR_READ(SPR); /* eeprom_delay */
}
static int adm8211_read_eeprom(struct ieee80211_hw *dev)
{
struct adm8211_priv *priv = dev->priv;
unsigned int words, i;
struct ieee80211_chan_range chan_range;
u16 cr49;
struct eeprom_93cx6 eeprom = {
.data = priv,
.register_read = adm8211_eeprom_register_read,
.register_write = adm8211_eeprom_register_write
};
if (ADM8211_CSR_READ(CSR_TEST0) & ADM8211_CSR_TEST0_EPTYP) {
/* 256 * 16-bit = 512 bytes */
eeprom.width = PCI_EEPROM_WIDTH_93C66;
words = 256;
} else {
/* 64 * 16-bit = 128 bytes */
eeprom.width = PCI_EEPROM_WIDTH_93C46;
words = 64;
}
priv->eeprom_len = words * 2;
priv->eeprom = kmalloc(priv->eeprom_len, GFP_KERNEL);
if (!priv->eeprom)
return -ENOMEM;
eeprom_93cx6_multiread(&eeprom, 0, (__le16 *)priv->eeprom, words);
cr49 = le16_to_cpu(priv->eeprom->cr49);
priv->rf_type = (cr49 >> 3) & 0x7;
switch (priv->rf_type) {
case ADM8211_TYPE_INTERSIL:
case ADM8211_TYPE_RFMD:
case ADM8211_TYPE_MARVEL:
case ADM8211_TYPE_AIROHA:
case ADM8211_TYPE_ADMTEK:
break;
default:
if (priv->pdev->revision < ADM8211_REV_CA)
priv->rf_type = ADM8211_TYPE_RFMD;
else
priv->rf_type = ADM8211_TYPE_AIROHA;
printk(KERN_WARNING "%s (adm8211): Unknown RFtype %d\n",
pci_name(priv->pdev), (cr49 >> 3) & 0x7);
}
priv->bbp_type = cr49 & 0x7;
switch (priv->bbp_type) {
case ADM8211_TYPE_INTERSIL:
case ADM8211_TYPE_RFMD:
case ADM8211_TYPE_MARVEL:
case ADM8211_TYPE_AIROHA:
case ADM8211_TYPE_ADMTEK:
break;
default:
if (priv->pdev->revision < ADM8211_REV_CA)
priv->bbp_type = ADM8211_TYPE_RFMD;
else
priv->bbp_type = ADM8211_TYPE_ADMTEK;
printk(KERN_WARNING "%s (adm8211): Unknown BBPtype: %d\n",
pci_name(priv->pdev), cr49 >> 3);
}
if (priv->eeprom->country_code >= ARRAY_SIZE(cranges)) {
printk(KERN_WARNING "%s (adm8211): Invalid country code (%d)\n",
pci_name(priv->pdev), priv->eeprom->country_code);
chan_range = cranges[2];
} else
chan_range = cranges[priv->eeprom->country_code];
printk(KERN_DEBUG "%s (adm8211): Channel range: %d - %d\n",
pci_name(priv->pdev), (int)chan_range.min, (int)chan_range.max);
BUILD_BUG_ON(sizeof(priv->channels) != sizeof(adm8211_channels));
memcpy(priv->channels, adm8211_channels, sizeof(priv->channels));
priv->band.channels = priv->channels;
priv->band.n_channels = ARRAY_SIZE(adm8211_channels);
priv->band.bitrates = adm8211_rates;
priv->band.n_bitrates = ARRAY_SIZE(adm8211_rates);
for (i = 1; i <= ARRAY_SIZE(adm8211_channels); i++)
if (i < chan_range.min || i > chan_range.max)
priv->channels[i - 1].flags |= IEEE80211_CHAN_DISABLED;
switch (priv->eeprom->specific_bbptype) {
case ADM8211_BBP_RFMD3000:
case ADM8211_BBP_RFMD3002:
case ADM8211_BBP_ADM8011:
priv->specific_bbptype = priv->eeprom->specific_bbptype;
break;
default:
if (priv->pdev->revision < ADM8211_REV_CA)
priv->specific_bbptype = ADM8211_BBP_RFMD3000;
else
priv->specific_bbptype = ADM8211_BBP_ADM8011;
printk(KERN_WARNING "%s (adm8211): Unknown specific BBP: %d\n",
pci_name(priv->pdev), priv->eeprom->specific_bbptype);
}
switch (priv->eeprom->specific_rftype) {
case ADM8211_RFMD2948:
case ADM8211_RFMD2958:
case ADM8211_RFMD2958_RF3000_CONTROL_POWER:
case ADM8211_MAX2820:
case ADM8211_AL2210L:
priv->transceiver_type = priv->eeprom->specific_rftype;
break;
default:
if (priv->pdev->revision == ADM8211_REV_BA)
priv->transceiver_type = ADM8211_RFMD2958_RF3000_CONTROL_POWER;
else if (priv->pdev->revision == ADM8211_REV_CA)
priv->transceiver_type = ADM8211_AL2210L;
else if (priv->pdev->revision == ADM8211_REV_AB)
priv->transceiver_type = ADM8211_RFMD2948;
printk(KERN_WARNING "%s (adm8211): Unknown transceiver: %d\n",
pci_name(priv->pdev), priv->eeprom->specific_rftype);
break;
}
printk(KERN_DEBUG "%s (adm8211): RFtype=%d BBPtype=%d Specific BBP=%d "
"Transceiver=%d\n", pci_name(priv->pdev), priv->rf_type,
priv->bbp_type, priv->specific_bbptype, priv->transceiver_type);
return 0;
}
static inline void adm8211_write_sram(struct ieee80211_hw *dev,
u32 addr, u32 data)
{
struct adm8211_priv *priv = dev->priv;
ADM8211_CSR_WRITE(WEPCTL, addr | ADM8211_WEPCTL_TABLE_WR |
(priv->pdev->revision < ADM8211_REV_BA ?
0 : ADM8211_WEPCTL_SEL_WEPTABLE ));
ADM8211_CSR_READ(WEPCTL);
msleep(1);
ADM8211_CSR_WRITE(WESK, data);
ADM8211_CSR_READ(WESK);
msleep(1);
}
static void adm8211_write_sram_bytes(struct ieee80211_hw *dev,
unsigned int addr, u8 *buf,
unsigned int len)
{
struct adm8211_priv *priv = dev->priv;
u32 reg = ADM8211_CSR_READ(WEPCTL);
unsigned int i;
if (priv->pdev->revision < ADM8211_REV_BA) {
for (i = 0; i < len; i += 2) {
u16 val = buf[i] | (buf[i + 1] << 8);
adm8211_write_sram(dev, addr + i / 2, val);
}
} else {
for (i = 0; i < len; i += 4) {
u32 val = (buf[i + 0] << 0 ) | (buf[i + 1] << 8 ) |
(buf[i + 2] << 16) | (buf[i + 3] << 24);
adm8211_write_sram(dev, addr + i / 4, val);
}
}
ADM8211_CSR_WRITE(WEPCTL, reg);
}
static void adm8211_clear_sram(struct ieee80211_hw *dev)
{
struct adm8211_priv *priv = dev->priv;
u32 reg = ADM8211_CSR_READ(WEPCTL);
unsigned int addr;
for (addr = 0; addr < ADM8211_SRAM_SIZE; addr++)
adm8211_write_sram(dev, addr, 0);
ADM8211_CSR_WRITE(WEPCTL, reg);
}
static int adm8211_get_stats(struct ieee80211_hw *dev,
struct ieee80211_low_level_stats *stats)
{
struct adm8211_priv *priv = dev->priv;
memcpy(stats, &priv->stats, sizeof(*stats));
return 0;
}
static void adm8211_interrupt_tci(struct ieee80211_hw *dev)
{
struct adm8211_priv *priv = dev->priv;
unsigned int dirty_tx;
spin_lock(&priv->lock);
for (dirty_tx = priv->dirty_tx; priv->cur_tx - dirty_tx; dirty_tx++) {
unsigned int entry = dirty_tx % priv->tx_ring_size;
u32 status = le32_to_cpu(priv->tx_ring[entry].status);
struct ieee80211_tx_info *txi;
struct adm8211_tx_ring_info *info;
struct sk_buff *skb;
if (status & TDES0_CONTROL_OWN ||
!(status & TDES0_CONTROL_DONE))
break;
info = &priv->tx_buffers[entry];
skb = info->skb;
txi = IEEE80211_SKB_CB(skb);
/* TODO: check TDES0_STATUS_TUF and TDES0_STATUS_TRO */
dma_unmap_single(&priv->pdev->dev, info->mapping,
info->skb->len, DMA_TO_DEVICE);
ieee80211_tx_info_clear_status(txi);
skb_pull(skb, sizeof(struct adm8211_tx_hdr));
memcpy(skb_push(skb, info->hdrlen), skb->cb, info->hdrlen);
if (!(txi->flags & IEEE80211_TX_CTL_NO_ACK) &&
!(status & TDES0_STATUS_ES))
txi->flags |= IEEE80211_TX_STAT_ACK;
ieee80211_tx_status_irqsafe(dev, skb);
info->skb = NULL;
}
if (priv->cur_tx - dirty_tx < priv->tx_ring_size - 2)
ieee80211_wake_queue(dev, 0);
priv->dirty_tx = dirty_tx;
spin_unlock(&priv->lock);
}
static void adm8211_interrupt_rci(struct ieee80211_hw *dev)
{
struct adm8211_priv *priv = dev->priv;
unsigned int entry = priv->cur_rx % priv->rx_ring_size;
u32 status;
unsigned int pktlen;
struct sk_buff *skb, *newskb;
unsigned int limit = priv->rx_ring_size;
u8 rssi, rate;
while (!(priv->rx_ring[entry].status & cpu_to_le32(RDES0_STATUS_OWN))) {
if (!limit--)
break;
status = le32_to_cpu(priv->rx_ring[entry].status);
rate = (status & RDES0_STATUS_RXDR) >> 12;
rssi = le32_to_cpu(priv->rx_ring[entry].length) &
RDES1_STATUS_RSSI;
pktlen = status & RDES0_STATUS_FL;
if (pktlen > RX_PKT_SIZE) {
if (net_ratelimit())
wiphy_debug(dev->wiphy, "frame too long (%d)\n",
pktlen);
pktlen = RX_PKT_SIZE;
}
if (!priv->soft_rx_crc && status & RDES0_STATUS_ES) {
skb = NULL; /* old buffer will be reused */
/* TODO: update RX error stats */
/* TODO: check RDES0_STATUS_CRC*E */
} else if (pktlen < RX_COPY_BREAK) {
skb = dev_alloc_skb(pktlen);
if (skb) {
dma_sync_single_for_cpu(&priv->pdev->dev,
priv->rx_buffers[entry].mapping,
pktlen,
DMA_FROM_DEVICE);
skb_put_data(skb,
skb_tail_pointer(priv->rx_buffers[entry].skb),
pktlen);
dma_sync_single_for_device(&priv->pdev->dev,
priv->rx_buffers[entry].mapping,
RX_PKT_SIZE,
DMA_FROM_DEVICE);
}
} else {
newskb = dev_alloc_skb(RX_PKT_SIZE);
if (newskb) {
skb = priv->rx_buffers[entry].skb;
skb_put(skb, pktlen);
dma_unmap_single(&priv->pdev->dev,
priv->rx_buffers[entry].mapping,
RX_PKT_SIZE, DMA_FROM_DEVICE);
priv->rx_buffers[entry].skb = newskb;
priv->rx_buffers[entry].mapping =
dma_map_single(&priv->pdev->dev,
skb_tail_pointer(newskb),
RX_PKT_SIZE,
DMA_FROM_DEVICE);
if (dma_mapping_error(&priv->pdev->dev,
priv->rx_buffers[entry].mapping)) {
priv->rx_buffers[entry].skb = NULL;
dev_kfree_skb(newskb);
skb = NULL;
/* TODO: update rx dropped stats */
}
} else {
skb = NULL;
/* TODO: update rx dropped stats */
}
priv->rx_ring[entry].buffer1 =
cpu_to_le32(priv->rx_buffers[entry].mapping);
}
priv->rx_ring[entry].status = cpu_to_le32(RDES0_STATUS_OWN |
RDES0_STATUS_SQL);
priv->rx_ring[entry].length =
cpu_to_le32(RX_PKT_SIZE |
(entry == priv->rx_ring_size - 1 ?
RDES1_CONTROL_RER : 0));
if (skb) {
struct ieee80211_rx_status rx_status = {0};
if (priv->pdev->revision < ADM8211_REV_CA)
rx_status.signal = rssi;
else
rx_status.signal = 100 - rssi;
rx_status.rate_idx = rate;
rx_status.freq = adm8211_channels[priv->channel - 1].center_freq;
rx_status.band = NL80211_BAND_2GHZ;
memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
ieee80211_rx_irqsafe(dev, skb);
}
entry = (++priv->cur_rx) % priv->rx_ring_size;
}
/* TODO: check LPC and update stats? */
}
static irqreturn_t adm8211_interrupt(int irq, void *dev_id)
{
#define ADM8211_INT(x) \
do { \
if (unlikely(stsr & ADM8211_STSR_ ## x)) \
wiphy_debug(dev->wiphy, "%s\n", #x); \
} while (0)
struct ieee80211_hw *dev = dev_id;
struct adm8211_priv *priv = dev->priv;
u32 stsr = ADM8211_CSR_READ(STSR);
ADM8211_CSR_WRITE(STSR, stsr);
if (stsr == 0xffffffff)
return IRQ_HANDLED;
if (!(stsr & (ADM8211_STSR_NISS | ADM8211_STSR_AISS)))
return IRQ_HANDLED;
if (stsr & ADM8211_STSR_RCI)
adm8211_interrupt_rci(dev);
if (stsr & ADM8211_STSR_TCI)
adm8211_interrupt_tci(dev);
ADM8211_INT(PCF);
ADM8211_INT(BCNTC);
ADM8211_INT(GPINT);
ADM8211_INT(ATIMTC);
ADM8211_INT(TSFTF);
ADM8211_INT(TSCZ);
ADM8211_INT(SQL);
ADM8211_INT(WEPTD);
ADM8211_INT(ATIME);
ADM8211_INT(TEIS);
ADM8211_INT(FBE);
ADM8211_INT(REIS);
ADM8211_INT(GPTT);
ADM8211_INT(RPS);
ADM8211_INT(RDU);
ADM8211_INT(TUF);
ADM8211_INT(TPS);
return IRQ_HANDLED;
#undef ADM8211_INT
}
#define WRITE_SYN(name,v_mask,v_shift,a_mask,a_shift,bits,prewrite,postwrite)\
static void adm8211_rf_write_syn_ ## name (struct ieee80211_hw *dev, \
u16 addr, u32 value) { \
struct adm8211_priv *priv = dev->priv; \
unsigned int i; \
u32 reg, bitbuf; \
\
value &= v_mask; \
addr &= a_mask; \
bitbuf = (value << v_shift) | (addr << a_shift); \
\
ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_1); \
ADM8211_CSR_READ(SYNRF); \
ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_IF_SELECT_0); \
ADM8211_CSR_READ(SYNRF); \
\
if (prewrite) { \
ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_WRITE_SYNDATA_0); \
ADM8211_CSR_READ(SYNRF); \
} \
\
for (i = 0; i <= bits; i++) { \
if (bitbuf & (1 << (bits - i))) \
reg = ADM8211_SYNRF_WRITE_SYNDATA_1; \
else \
reg = ADM8211_SYNRF_WRITE_SYNDATA_0; \
\
ADM8211_CSR_WRITE(SYNRF, reg); \
ADM8211_CSR_READ(SYNRF); \
\
ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_1); \
ADM8211_CSR_READ(SYNRF); \
ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_WRITE_CLOCK_0); \
ADM8211_CSR_READ(SYNRF); \
} \
\
if (postwrite == 1) { \
ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_0); \
ADM8211_CSR_READ(SYNRF); \
} \
if (postwrite == 2) { \
ADM8211_CSR_WRITE(SYNRF, reg | ADM8211_SYNRF_IF_SELECT_1); \
ADM8211_CSR_READ(SYNRF); \
} \
\
ADM8211_CSR_WRITE(SYNRF, 0); \
ADM8211_CSR_READ(SYNRF); \
}
WRITE_SYN(max2820, 0x00FFF, 0, 0x0F, 12, 15, 1, 1)
WRITE_SYN(al2210l, 0xFFFFF, 4, 0x0F, 0, 23, 1, 1)
WRITE_SYN(rfmd2958, 0x3FFFF, 0, 0x1F, 18, 23, 0, 1)
WRITE_SYN(rfmd2948, 0x0FFFF, 4, 0x0F, 0, 21, 0, 2)
#undef WRITE_SYN
static int adm8211_write_bbp(struct ieee80211_hw *dev, u8 addr, u8 data)
{
struct adm8211_priv *priv = dev->priv;
unsigned int timeout;
u32 reg;
timeout = 10;
while (timeout > 0) {
reg = ADM8211_CSR_READ(BBPCTL);
if (!(reg & (ADM8211_BBPCTL_WR | ADM8211_BBPCTL_RD)))
break;
timeout--;
msleep(2);
}
if (timeout == 0) {
wiphy_debug(dev->wiphy,
"adm8211_write_bbp(%d,%d) failed prewrite (reg=0x%08x)\n",
addr, data, reg);
return -ETIMEDOUT;
}
switch (priv->bbp_type) {
case ADM8211_TYPE_INTERSIL:
reg = ADM8211_BBPCTL_MMISEL; /* three wire interface */
break;
case ADM8211_TYPE_RFMD:
reg = (0x20 << 24) | ADM8211_BBPCTL_TXCE | ADM8211_BBPCTL_CCAP |
(0x01 << 18);
break;
case ADM8211_TYPE_ADMTEK:
reg = (0x20 << 24) | ADM8211_BBPCTL_TXCE | ADM8211_BBPCTL_CCAP |
(0x05 << 18);
break;
}
reg |= ADM8211_BBPCTL_WR | (addr << 8) | data;
ADM8211_CSR_WRITE(BBPCTL, reg);
timeout = 10;
while (timeout > 0) {
reg = ADM8211_CSR_READ(BBPCTL);
if (!(reg & ADM8211_BBPCTL_WR))
break;
timeout--;
msleep(2);
}
if (timeout == 0) {
ADM8211_CSR_WRITE(BBPCTL, ADM8211_CSR_READ(BBPCTL) &
~ADM8211_BBPCTL_WR);
wiphy_debug(dev->wiphy,
"adm8211_write_bbp(%d,%d) failed postwrite (reg=0x%08x)\n",
addr, data, reg);
return -ETIMEDOUT;
}
return 0;
}
static int adm8211_rf_set_channel(struct ieee80211_hw *dev, unsigned int chan)
{
static const u32 adm8211_rfmd2958_reg5[] =
{0x22BD, 0x22D2, 0x22E8, 0x22FE, 0x2314, 0x232A, 0x2340,
0x2355, 0x236B, 0x2381, 0x2397, 0x23AD, 0x23C2, 0x23F7};
static const u32 adm8211_rfmd2958_reg6[] =
{0x05D17, 0x3A2E8, 0x2E8BA, 0x22E8B, 0x1745D, 0x0BA2E, 0x00000,
0x345D1, 0x28BA2, 0x1D174, 0x11745, 0x05D17, 0x3A2E8, 0x11745};
struct adm8211_priv *priv = dev->priv;
u8 ant_power = priv->ant_power > 0x3F ?
priv->eeprom->antenna_power[chan - 1] : priv->ant_power;
u8 tx_power = priv->tx_power > 0x3F ?
priv->eeprom->tx_power[chan - 1] : priv->tx_power;
u8 lpf_cutoff = priv->lpf_cutoff == 0xFF ?
priv->eeprom->lpf_cutoff[chan - 1] : priv->lpf_cutoff;
u8 lnags_thresh = priv->lnags_threshold == 0xFF ?
priv->eeprom->lnags_threshold[chan - 1] : priv->lnags_threshold;
u32 reg;
ADM8211_IDLE();
/* Program synthesizer to new channel */
switch (priv->transceiver_type) {
case ADM8211_RFMD2958:
case ADM8211_RFMD2958_RF3000_CONTROL_POWER:
adm8211_rf_write_syn_rfmd2958(dev, 0x00, 0x04007);
adm8211_rf_write_syn_rfmd2958(dev, 0x02, 0x00033);
adm8211_rf_write_syn_rfmd2958(dev, 0x05,
adm8211_rfmd2958_reg5[chan - 1]);
adm8211_rf_write_syn_rfmd2958(dev, 0x06,
adm8211_rfmd2958_reg6[chan - 1]);
break;
case ADM8211_RFMD2948:
adm8211_rf_write_syn_rfmd2948(dev, SI4126_MAIN_CONF,
SI4126_MAIN_XINDIV2);
adm8211_rf_write_syn_rfmd2948(dev, SI4126_POWERDOWN,
SI4126_POWERDOWN_PDIB |
SI4126_POWERDOWN_PDRB);
adm8211_rf_write_syn_rfmd2948(dev, SI4126_PHASE_DET_GAIN, 0);
adm8211_rf_write_syn_rfmd2948(dev, SI4126_RF2_N_DIV,
(chan == 14 ?
2110 : (2033 + (chan * 5))));
adm8211_rf_write_syn_rfmd2948(dev, SI4126_IF_N_DIV, 1496);
adm8211_rf_write_syn_rfmd2948(dev, SI4126_RF2_R_DIV, 44);
adm8211_rf_write_syn_rfmd2948(dev, SI4126_IF_R_DIV, 44);
break;
case ADM8211_MAX2820:
adm8211_rf_write_syn_max2820(dev, 0x3,
(chan == 14 ? 0x054 : (0x7 + (chan * 5))));
break;
case ADM8211_AL2210L:
adm8211_rf_write_syn_al2210l(dev, 0x0,
(chan == 14 ? 0x229B4 : (0x22967 + (chan * 5))));
break;
default:
wiphy_debug(dev->wiphy, "unsupported transceiver type %d\n",
priv->transceiver_type);
break;
}
/* write BBP regs */
if (priv->bbp_type == ADM8211_TYPE_RFMD) {
/* SMC 2635W specific? adm8211b doesn't use the 2948 though.. */
/* TODO: remove if SMC 2635W doesn't need this */
if (priv->transceiver_type == ADM8211_RFMD2948) {
reg = ADM8211_CSR_READ(GPIO);
reg &= 0xfffc0000;
reg |= ADM8211_CSR_GPIO_EN0;
if (chan != 14)
reg |= ADM8211_CSR_GPIO_O0;
ADM8211_CSR_WRITE(GPIO, reg);
}
if (priv->transceiver_type == ADM8211_RFMD2958) {
/* set PCNT2 */
adm8211_rf_write_syn_rfmd2958(dev, 0x0B, 0x07100);
/* set PCNT1 P_DESIRED/MID_BIAS */
reg = le16_to_cpu(priv->eeprom->cr49);
reg >>= 13;
reg <<= 15;
reg |= ant_power << 9;
adm8211_rf_write_syn_rfmd2958(dev, 0x0A, reg);
/* set TXRX TX_GAIN */
adm8211_rf_write_syn_rfmd2958(dev, 0x09, 0x00050 |
(priv->pdev->revision < ADM8211_REV_CA ? tx_power : 0));
} else {
reg = ADM8211_CSR_READ(PLCPHD);
reg &= 0xff00ffff;
reg |= tx_power << 18;
ADM8211_CSR_WRITE(PLCPHD, reg);
}
ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_SELRF |
ADM8211_SYNRF_PE1 | ADM8211_SYNRF_PHYRST);
ADM8211_CSR_READ(SYNRF);
msleep(30);
/* RF3000 BBP */
if (priv->transceiver_type != ADM8211_RFMD2958)
adm8211_write_bbp(dev, RF3000_TX_VAR_GAIN__TX_LEN_EXT,
tx_power<<2);
adm8211_write_bbp(dev, RF3000_LOW_GAIN_CALIB, lpf_cutoff);
adm8211_write_bbp(dev, RF3000_HIGH_GAIN_CALIB, lnags_thresh);
adm8211_write_bbp(dev, 0x1c, priv->pdev->revision == ADM8211_REV_BA ?
priv->eeprom->cr28 : 0);
adm8211_write_bbp(dev, 0x1d, priv->eeprom->cr29);
ADM8211_CSR_WRITE(SYNRF, 0);
/* Nothing to do for ADMtek BBP */
} else if (priv->bbp_type != ADM8211_TYPE_ADMTEK)
wiphy_debug(dev->wiphy, "unsupported BBP type %d\n",
priv->bbp_type);
ADM8211_RESTORE();
/* update current channel for adhoc (and maybe AP mode) */
reg = ADM8211_CSR_READ(CAP0);
reg &= ~0xF;
reg |= chan;
ADM8211_CSR_WRITE(CAP0, reg);
return 0;
}
static void adm8211_update_mode(struct ieee80211_hw *dev)
{
struct adm8211_priv *priv = dev->priv;
ADM8211_IDLE();
priv->soft_rx_crc = 0;
switch (priv->mode) {
case NL80211_IFTYPE_STATION:
priv->nar &= ~(ADM8211_NAR_PR | ADM8211_NAR_EA);
priv->nar |= ADM8211_NAR_ST | ADM8211_NAR_SR;
break;
case NL80211_IFTYPE_ADHOC:
priv->nar &= ~ADM8211_NAR_PR;
priv->nar |= ADM8211_NAR_EA | ADM8211_NAR_ST | ADM8211_NAR_SR;
/* don't trust the error bits on rev 0x20 and up in adhoc */
if (priv->pdev->revision >= ADM8211_REV_BA)
priv->soft_rx_crc = 1;
break;
case NL80211_IFTYPE_MONITOR:
priv->nar &= ~(ADM8211_NAR_EA | ADM8211_NAR_ST);
priv->nar |= ADM8211_NAR_PR | ADM8211_NAR_SR;
break;
}
ADM8211_RESTORE();
}
static void adm8211_hw_init_syn(struct ieee80211_hw *dev)
{
struct adm8211_priv *priv = dev->priv;
switch (priv->transceiver_type) {
case ADM8211_RFMD2958:
case ADM8211_RFMD2958_RF3000_CONTROL_POWER:
/* comments taken from ADMtek vendor driver */
/* Reset RF2958 after power on */
adm8211_rf_write_syn_rfmd2958(dev, 0x1F, 0x00000);
/* Initialize RF VCO Core Bias to maximum */
adm8211_rf_write_syn_rfmd2958(dev, 0x0C, 0x3001F);
/* Initialize IF PLL */
adm8211_rf_write_syn_rfmd2958(dev, 0x01, 0x29C03);
/* Initialize IF PLL Coarse Tuning */
adm8211_rf_write_syn_rfmd2958(dev, 0x03, 0x1FF6F);
/* Initialize RF PLL */
adm8211_rf_write_syn_rfmd2958(dev, 0x04, 0x29403);
/* Initialize RF PLL Coarse Tuning */
adm8211_rf_write_syn_rfmd2958(dev, 0x07, 0x1456F);
/* Initialize TX gain and filter BW (R9) */
adm8211_rf_write_syn_rfmd2958(dev, 0x09,
(priv->transceiver_type == ADM8211_RFMD2958 ?
0x10050 : 0x00050));
/* Initialize CAL register */
adm8211_rf_write_syn_rfmd2958(dev, 0x08, 0x3FFF8);
break;
case ADM8211_MAX2820:
adm8211_rf_write_syn_max2820(dev, 0x1, 0x01E);
adm8211_rf_write_syn_max2820(dev, 0x2, 0x001);
adm8211_rf_write_syn_max2820(dev, 0x3, 0x054);
adm8211_rf_write_syn_max2820(dev, 0x4, 0x310);
adm8211_rf_write_syn_max2820(dev, 0x5, 0x000);
break;
case ADM8211_AL2210L:
adm8211_rf_write_syn_al2210l(dev, 0x0, 0x0196C);
adm8211_rf_write_syn_al2210l(dev, 0x1, 0x007CB);
adm8211_rf_write_syn_al2210l(dev, 0x2, 0x3582F);
adm8211_rf_write_syn_al2210l(dev, 0x3, 0x010A9);
adm8211_rf_write_syn_al2210l(dev, 0x4, 0x77280);
adm8211_rf_write_syn_al2210l(dev, 0x5, 0x45641);
adm8211_rf_write_syn_al2210l(dev, 0x6, 0xEA130);
adm8211_rf_write_syn_al2210l(dev, 0x7, 0x80000);
adm8211_rf_write_syn_al2210l(dev, 0x8, 0x7850F);
adm8211_rf_write_syn_al2210l(dev, 0x9, 0xF900C);
adm8211_rf_write_syn_al2210l(dev, 0xA, 0x00000);
adm8211_rf_write_syn_al2210l(dev, 0xB, 0x00000);
break;
case ADM8211_RFMD2948:
default:
break;
}
}
static int adm8211_hw_init_bbp(struct ieee80211_hw *dev)
{
struct adm8211_priv *priv = dev->priv;
u32 reg;
/* write addresses */
if (priv->bbp_type == ADM8211_TYPE_INTERSIL) {
ADM8211_CSR_WRITE(MMIWA, 0x100E0C0A);
ADM8211_CSR_WRITE(MMIRD0, 0x00007C7E);
ADM8211_CSR_WRITE(MMIRD1, 0x00100000);
} else if (priv->bbp_type == ADM8211_TYPE_RFMD ||
priv->bbp_type == ADM8211_TYPE_ADMTEK) {
/* check specific BBP type */
switch (priv->specific_bbptype) {
case ADM8211_BBP_RFMD3000:
case ADM8211_BBP_RFMD3002:
ADM8211_CSR_WRITE(MMIWA, 0x00009101);
ADM8211_CSR_WRITE(MMIRD0, 0x00000301);
break;
case ADM8211_BBP_ADM8011:
ADM8211_CSR_WRITE(MMIWA, 0x00008903);
ADM8211_CSR_WRITE(MMIRD0, 0x00001716);
reg = ADM8211_CSR_READ(BBPCTL);
reg &= ~ADM8211_BBPCTL_TYPE;
reg |= 0x5 << 18;
ADM8211_CSR_WRITE(BBPCTL, reg);
break;
}
switch (priv->pdev->revision) {
case ADM8211_REV_CA:
if (priv->transceiver_type == ADM8211_RFMD2958 ||
priv->transceiver_type == ADM8211_RFMD2958_RF3000_CONTROL_POWER ||
priv->transceiver_type == ADM8211_RFMD2948)
ADM8211_CSR_WRITE(SYNCTL, 0x1 << 22);
else if (priv->transceiver_type == ADM8211_MAX2820 ||
priv->transceiver_type == ADM8211_AL2210L)
ADM8211_CSR_WRITE(SYNCTL, 0x3 << 22);
break;
case ADM8211_REV_BA:
reg = ADM8211_CSR_READ(MMIRD1);
reg &= 0x0000FFFF;
reg |= 0x7e100000;
ADM8211_CSR_WRITE(MMIRD1, reg);
break;
case ADM8211_REV_AB:
case ADM8211_REV_AF:
default:
ADM8211_CSR_WRITE(MMIRD1, 0x7e100000);
break;
}
/* For RFMD */
ADM8211_CSR_WRITE(MACTEST, 0x800);
}
adm8211_hw_init_syn(dev);
/* Set RF Power control IF pin to PE1+PHYRST# */
ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_SELRF |
ADM8211_SYNRF_PE1 | ADM8211_SYNRF_PHYRST);
ADM8211_CSR_READ(SYNRF);
msleep(20);
/* write BBP regs */
if (priv->bbp_type == ADM8211_TYPE_RFMD) {
/* RF3000 BBP */
/* another set:
* 11: c8
* 14: 14
* 15: 50 (chan 1..13; chan 14: d0)
* 1c: 00
* 1d: 84
*/
adm8211_write_bbp(dev, RF3000_CCA_CTRL, 0x80);
/* antenna selection: diversity */
adm8211_write_bbp(dev, RF3000_DIVERSITY__RSSI, 0x80);
adm8211_write_bbp(dev, RF3000_TX_VAR_GAIN__TX_LEN_EXT, 0x74);
adm8211_write_bbp(dev, RF3000_LOW_GAIN_CALIB, 0x38);
adm8211_write_bbp(dev, RF3000_HIGH_GAIN_CALIB, 0x40);
if (priv->eeprom->major_version < 2) {
adm8211_write_bbp(dev, 0x1c, 0x00);
adm8211_write_bbp(dev, 0x1d, 0x80);
} else {
if (priv->pdev->revision == ADM8211_REV_BA)
adm8211_write_bbp(dev, 0x1c, priv->eeprom->cr28);
else
adm8211_write_bbp(dev, 0x1c, 0x00);
adm8211_write_bbp(dev, 0x1d, priv->eeprom->cr29);
}
} else if (priv->bbp_type == ADM8211_TYPE_ADMTEK) {
/* reset baseband */
adm8211_write_bbp(dev, 0x00, 0xFF);
/* antenna selection: diversity */
adm8211_write_bbp(dev, 0x07, 0x0A);
/* TODO: find documentation for this */
switch (priv->transceiver_type) {
case ADM8211_RFMD2958:
case ADM8211_RFMD2958_RF3000_CONTROL_POWER:
adm8211_write_bbp(dev, 0x00, 0x00);
adm8211_write_bbp(dev, 0x01, 0x00);
adm8211_write_bbp(dev, 0x02, 0x00);
adm8211_write_bbp(dev, 0x03, 0x00);
adm8211_write_bbp(dev, 0x06, 0x0f);
adm8211_write_bbp(dev, 0x09, 0x00);
adm8211_write_bbp(dev, 0x0a, 0x00);
adm8211_write_bbp(dev, 0x0b, 0x00);
adm8211_write_bbp(dev, 0x0c, 0x00);
adm8211_write_bbp(dev, 0x0f, 0xAA);
adm8211_write_bbp(dev, 0x10, 0x8c);
adm8211_write_bbp(dev, 0x11, 0x43);
adm8211_write_bbp(dev, 0x18, 0x40);
adm8211_write_bbp(dev, 0x20, 0x23);
adm8211_write_bbp(dev, 0x21, 0x02);
adm8211_write_bbp(dev, 0x22, 0x28);
adm8211_write_bbp(dev, 0x23, 0x30);
adm8211_write_bbp(dev, 0x24, 0x2d);
adm8211_write_bbp(dev, 0x28, 0x35);
adm8211_write_bbp(dev, 0x2a, 0x8c);
adm8211_write_bbp(dev, 0x2b, 0x81);
adm8211_write_bbp(dev, 0x2c, 0x44);
adm8211_write_bbp(dev, 0x2d, 0x0A);
adm8211_write_bbp(dev, 0x29, 0x40);
adm8211_write_bbp(dev, 0x60, 0x08);
adm8211_write_bbp(dev, 0x64, 0x01);
break;
case ADM8211_MAX2820:
adm8211_write_bbp(dev, 0x00, 0x00);
adm8211_write_bbp(dev, 0x01, 0x00);
adm8211_write_bbp(dev, 0x02, 0x00);
adm8211_write_bbp(dev, 0x03, 0x00);
adm8211_write_bbp(dev, 0x06, 0x0f);
adm8211_write_bbp(dev, 0x09, 0x05);
adm8211_write_bbp(dev, 0x0a, 0x02);
adm8211_write_bbp(dev, 0x0b, 0x00);
adm8211_write_bbp(dev, 0x0c, 0x0f);
adm8211_write_bbp(dev, 0x0f, 0x55);
adm8211_write_bbp(dev, 0x10, 0x8d);
adm8211_write_bbp(dev, 0x11, 0x43);
adm8211_write_bbp(dev, 0x18, 0x4a);
adm8211_write_bbp(dev, 0x20, 0x20);
adm8211_write_bbp(dev, 0x21, 0x02);
adm8211_write_bbp(dev, 0x22, 0x23);
adm8211_write_bbp(dev, 0x23, 0x30);
adm8211_write_bbp(dev, 0x24, 0x2d);
adm8211_write_bbp(dev, 0x2a, 0x8c);
adm8211_write_bbp(dev, 0x2b, 0x81);
adm8211_write_bbp(dev, 0x2c, 0x44);
adm8211_write_bbp(dev, 0x29, 0x4a);
adm8211_write_bbp(dev, 0x60, 0x2b);
adm8211_write_bbp(dev, 0x64, 0x01);
break;
case ADM8211_AL2210L:
adm8211_write_bbp(dev, 0x00, 0x00);
adm8211_write_bbp(dev, 0x01, 0x00);
adm8211_write_bbp(dev, 0x02, 0x00);
adm8211_write_bbp(dev, 0x03, 0x00);
adm8211_write_bbp(dev, 0x06, 0x0f);
adm8211_write_bbp(dev, 0x07, 0x05);
adm8211_write_bbp(dev, 0x08, 0x03);
adm8211_write_bbp(dev, 0x09, 0x00);
adm8211_write_bbp(dev, 0x0a, 0x00);
adm8211_write_bbp(dev, 0x0b, 0x00);
adm8211_write_bbp(dev, 0x0c, 0x10);
adm8211_write_bbp(dev, 0x0f, 0x55);
adm8211_write_bbp(dev, 0x10, 0x8d);
adm8211_write_bbp(dev, 0x11, 0x43);
adm8211_write_bbp(dev, 0x18, 0x4a);
adm8211_write_bbp(dev, 0x20, 0x20);
adm8211_write_bbp(dev, 0x21, 0x02);
adm8211_write_bbp(dev, 0x22, 0x23);
adm8211_write_bbp(dev, 0x23, 0x30);
adm8211_write_bbp(dev, 0x24, 0x2d);
adm8211_write_bbp(dev, 0x2a, 0xaa);
adm8211_write_bbp(dev, 0x2b, 0x81);
adm8211_write_bbp(dev, 0x2c, 0x44);
adm8211_write_bbp(dev, 0x29, 0xfa);
adm8211_write_bbp(dev, 0x60, 0x2d);
adm8211_write_bbp(dev, 0x64, 0x01);
break;
case ADM8211_RFMD2948:
break;
default:
wiphy_debug(dev->wiphy, "unsupported transceiver %d\n",
priv->transceiver_type);
break;
}
} else
wiphy_debug(dev->wiphy, "unsupported BBP %d\n", priv->bbp_type);
ADM8211_CSR_WRITE(SYNRF, 0);
/* Set RF CAL control source to MAC control */
reg = ADM8211_CSR_READ(SYNCTL);
reg |= ADM8211_SYNCTL_SELCAL;
ADM8211_CSR_WRITE(SYNCTL, reg);
return 0;
}
/* configures hw beacons/probe responses */
static int adm8211_set_rate(struct ieee80211_hw *dev)
{
struct adm8211_priv *priv = dev->priv;
u32 reg;
int i = 0;
u8 rate_buf[12] = {0};
/* write supported rates */
if (priv->pdev->revision != ADM8211_REV_BA) {
rate_buf[0] = ARRAY_SIZE(adm8211_rates);
for (i = 0; i < ARRAY_SIZE(adm8211_rates); i++)
rate_buf[i + 1] = (adm8211_rates[i].bitrate / 5) | 0x80;
} else {
/* workaround for rev BA specific bug */
rate_buf[0] = 0x04;
rate_buf[1] = 0x82;
rate_buf[2] = 0x04;
rate_buf[3] = 0x0b;
rate_buf[4] = 0x16;
}
adm8211_write_sram_bytes(dev, ADM8211_SRAM_SUPP_RATE, rate_buf,
ARRAY_SIZE(adm8211_rates) + 1);
reg = ADM8211_CSR_READ(PLCPHD) & 0x00FFFFFF; /* keep bits 0-23 */
reg |= 1 << 15; /* short preamble */
reg |= 110 << 24;
ADM8211_CSR_WRITE(PLCPHD, reg);
/* MTMLT = 512 TU (max TX MSDU lifetime)
* BCNTSIG = plcp_signal (beacon, probe resp, and atim TX rate)
* SRTYLIM = 224 (short retry limit, TX header value is default) */
ADM8211_CSR_WRITE(TXLMT, (512 << 16) | (110 << 8) | (224 << 0));
return 0;
}
static void adm8211_hw_init(struct ieee80211_hw *dev)
{
struct adm8211_priv *priv = dev->priv;
u32 reg;
u8 cline;
reg = ADM8211_CSR_READ(PAR);
reg |= ADM8211_PAR_MRLE | ADM8211_PAR_MRME;
reg &= ~(ADM8211_PAR_BAR | ADM8211_PAR_CAL);
if (!pci_set_mwi(priv->pdev)) {
reg |= 0x1 << 24;
pci_read_config_byte(priv->pdev, PCI_CACHE_LINE_SIZE, &cline);
switch (cline) {
case 0x8:
reg |= (0x1 << 14);
break;
case 0x10:
reg |= (0x2 << 14);
break;
case 0x20:
reg |= (0x3 << 14);
break;
default:
reg |= (0x0 << 14);
break;
}
}
ADM8211_CSR_WRITE(PAR, reg);
reg = ADM8211_CSR_READ(CSR_TEST1);
reg &= ~(0xF << 28);
reg |= (1 << 28) | (1 << 31);
ADM8211_CSR_WRITE(CSR_TEST1, reg);
/* lose link after 4 lost beacons */
reg = (0x04 << 21) | ADM8211_WCSR_TSFTWE | ADM8211_WCSR_LSOE;
ADM8211_CSR_WRITE(WCSR, reg);
/* Disable APM, enable receive FIFO threshold, and set drain receive
* threshold to store-and-forward */
reg = ADM8211_CSR_READ(CMDR);
reg &= ~(ADM8211_CMDR_APM | ADM8211_CMDR_DRT);
reg |= ADM8211_CMDR_RTE | ADM8211_CMDR_DRT_SF;
ADM8211_CSR_WRITE(CMDR, reg);
adm8211_set_rate(dev);
/* 4-bit values:
* PWR1UP = 8 * 2 ms
* PWR0PAPE = 8 us or 5 us
* PWR1PAPE = 1 us or 3 us
* PWR0TRSW = 5 us
* PWR1TRSW = 12 us
* PWR0PE2 = 13 us
* PWR1PE2 = 1 us
* PWR0TXPE = 8 or 6 */
if (priv->pdev->revision < ADM8211_REV_CA)
ADM8211_CSR_WRITE(TOFS2, 0x8815cd18);
else
ADM8211_CSR_WRITE(TOFS2, 0x8535cd16);
/* Enable store and forward for transmit */
priv->nar = ADM8211_NAR_SF | ADM8211_NAR_PB;
ADM8211_CSR_WRITE(NAR, priv->nar);
/* Reset RF */
ADM8211_CSR_WRITE(SYNRF, ADM8211_SYNRF_RADIO);
ADM8211_CSR_READ(SYNRF);
msleep(10);
ADM8211_CSR_WRITE(SYNRF, 0);
ADM8211_CSR_READ(SYNRF);
msleep(5);
/* Set CFP Max Duration to 0x10 TU */
reg = ADM8211_CSR_READ(CFPP);
reg &= ~(0xffff << 8);
reg |= 0x0010 << 8;
ADM8211_CSR_WRITE(CFPP, reg);
/* USCNT = 0x16 (number of system clocks, 22 MHz, in 1us
* TUCNT = 0x3ff - Tu counter 1024 us */
ADM8211_CSR_WRITE(TOFS0, (0x16 << 24) | 0x3ff);
/* SLOT=20 us, SIFS=110 cycles of 22 MHz (5 us),
* DIFS=50 us, EIFS=100 us */
if (priv->pdev->revision < ADM8211_REV_CA)
ADM8211_CSR_WRITE(IFST, (20 << 23) | (110 << 15) |
(50 << 9) | 100);
else
ADM8211_CSR_WRITE(IFST, (20 << 23) | (24 << 15) |
(50 << 9) | 100);
/* PCNT = 1 (MAC idle time awake/sleep, unit S)
* RMRD = 2346 * 8 + 1 us (max RX duration) */
ADM8211_CSR_WRITE(RMD, (1 << 16) | 18769);
/* MART=65535 us, MIRT=256 us, TSFTOFST=0 us */
ADM8211_CSR_WRITE(RSPT, 0xffffff00);
/* Initialize BBP (and SYN) */
adm8211_hw_init_bbp(dev);
/* make sure interrupts are off */
ADM8211_CSR_WRITE(IER, 0);
/* ACK interrupts */
ADM8211_CSR_WRITE(STSR, ADM8211_CSR_READ(STSR));
/* Setup WEP (turns it off for now) */
reg = ADM8211_CSR_READ(MACTEST);
reg &= ~(7 << 20);
ADM8211_CSR_WRITE(MACTEST, reg);
reg = ADM8211_CSR_READ(WEPCTL);
reg &= ~ADM8211_WEPCTL_WEPENABLE;
reg |= ADM8211_WEPCTL_WEPRXBYP;
ADM8211_CSR_WRITE(WEPCTL, reg);
/* Clear the missed-packet counter. */
ADM8211_CSR_READ(LPC);
}
static int adm8211_hw_reset(struct ieee80211_hw *dev)
{
struct adm8211_priv *priv = dev->priv;
u32 reg, tmp;
int timeout = 100;
/* Power-on issue */
/* TODO: check if this is necessary */
ADM8211_CSR_WRITE(FRCTL, 0);
/* Reset the chip */
tmp = ADM8211_CSR_READ(PAR);
ADM8211_CSR_WRITE(PAR, ADM8211_PAR_SWR);
while ((ADM8211_CSR_READ(PAR) & ADM8211_PAR_SWR) && timeout--)
msleep(50);
if (timeout <= 0)
return -ETIMEDOUT;
ADM8211_CSR_WRITE(PAR, tmp);
if (priv->pdev->revision == ADM8211_REV_BA &&
(priv->transceiver_type == ADM8211_RFMD2958_RF3000_CONTROL_POWER ||
priv->transceiver_type == ADM8211_RFMD2958)) {
reg = ADM8211_CSR_READ(CSR_TEST1);
reg |= (1 << 4) | (1 << 5);
ADM8211_CSR_WRITE(CSR_TEST1, reg);
} else if (priv->pdev->revision == ADM8211_REV_CA) {
reg = ADM8211_CSR_READ(CSR_TEST1);
reg &= ~((1 << 4) | (1 << 5));
ADM8211_CSR_WRITE(CSR_TEST1, reg);
}
ADM8211_CSR_WRITE(FRCTL, 0);
reg = ADM8211_CSR_READ(CSR_TEST0);
reg |= ADM8211_CSR_TEST0_EPRLD; /* EEPROM Recall */
ADM8211_CSR_WRITE(CSR_TEST0, reg);
adm8211_clear_sram(dev);
return 0;
}
static u64 adm8211_get_tsft(struct ieee80211_hw *dev,
struct ieee80211_vif *vif)
{
struct adm8211_priv *priv = dev->priv;
u32 tsftl;
u64 tsft;
tsftl = ADM8211_CSR_READ(TSFTL);
tsft = ADM8211_CSR_READ(TSFTH);
tsft <<= 32;
tsft |= tsftl;
return tsft;
}
static void adm8211_set_interval(struct ieee80211_hw *dev,
unsigned short bi, unsigned short li)
{
struct adm8211_priv *priv = dev->priv;
u32 reg;
/* BP (beacon interval) = data->beacon_interval
* LI (listen interval) = data->listen_interval (in beacon intervals) */
reg = (bi << 16) | li;
ADM8211_CSR_WRITE(BPLI, reg);
}
static void adm8211_set_bssid(struct ieee80211_hw *dev, const u8 *bssid)
{
struct adm8211_priv *priv = dev->priv;
u32 reg;
ADM8211_CSR_WRITE(BSSID0, le32_to_cpu(*(__le32 *)bssid));
reg = ADM8211_CSR_READ(ABDA1);
reg &= 0x0000ffff;
reg |= (bssid[4] << 16) | (bssid[5] << 24);
ADM8211_CSR_WRITE(ABDA1, reg);
}
static int adm8211_config(struct ieee80211_hw *dev, u32 changed)
{
struct adm8211_priv *priv = dev->priv;
struct ieee80211_conf *conf = &dev->conf;
int channel =
ieee80211_frequency_to_channel(conf->chandef.chan->center_freq);
if (channel != priv->channel) {
priv->channel = channel;
adm8211_rf_set_channel(dev, priv->channel);
}
return 0;
}
static void adm8211_bss_info_changed(struct ieee80211_hw *dev,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *conf,
u64 changes)
{
struct adm8211_priv *priv = dev->priv;
if (!(changes & BSS_CHANGED_BSSID))
return;
if (!ether_addr_equal(conf->bssid, priv->bssid)) {
adm8211_set_bssid(dev, conf->bssid);
memcpy(priv->bssid, conf->bssid, ETH_ALEN);
}
}
static u64 adm8211_prepare_multicast(struct ieee80211_hw *hw,
struct netdev_hw_addr_list *mc_list)
{
unsigned int bit_nr;
u32 mc_filter[2];
struct netdev_hw_addr *ha;
mc_filter[1] = mc_filter[0] = 0;
netdev_hw_addr_list_for_each(ha, mc_list) {
bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
bit_nr &= 0x3F;
mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
}
return mc_filter[0] | ((u64)(mc_filter[1]) << 32);
}
static void adm8211_configure_filter(struct ieee80211_hw *dev,
unsigned int changed_flags,
unsigned int *total_flags,
u64 multicast)
{
static const u8 bcast[ETH_ALEN] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
struct adm8211_priv *priv = dev->priv;
unsigned int new_flags;
u32 mc_filter[2];
mc_filter[0] = multicast;
mc_filter[1] = multicast >> 32;
new_flags = 0;
if (*total_flags & FIF_ALLMULTI || multicast == ~(0ULL)) {
new_flags |= FIF_ALLMULTI;
priv->nar &= ~ADM8211_NAR_PR;
priv->nar |= ADM8211_NAR_MM;
mc_filter[1] = mc_filter[0] = ~0;
} else {
priv->nar &= ~(ADM8211_NAR_MM | ADM8211_NAR_PR);
}
ADM8211_IDLE_RX();
ADM8211_CSR_WRITE(MAR0, mc_filter[0]);
ADM8211_CSR_WRITE(MAR1, mc_filter[1]);
ADM8211_CSR_READ(NAR);
if (priv->nar & ADM8211_NAR_PR)
ieee80211_hw_set(dev, RX_INCLUDES_FCS);
else
__clear_bit(IEEE80211_HW_RX_INCLUDES_FCS, dev->flags);
if (*total_flags & FIF_BCN_PRBRESP_PROMISC)
adm8211_set_bssid(dev, bcast);
else
adm8211_set_bssid(dev, priv->bssid);
ADM8211_RESTORE();
*total_flags = new_flags;
}
static int adm8211_add_interface(struct ieee80211_hw *dev,
struct ieee80211_vif *vif)
{
struct adm8211_priv *priv = dev->priv;
if (priv->mode != NL80211_IFTYPE_MONITOR)
return -EOPNOTSUPP;
switch (vif->type) {
case NL80211_IFTYPE_STATION:
priv->mode = vif->type;
break;
default:
return -EOPNOTSUPP;
}
ADM8211_IDLE();
ADM8211_CSR_WRITE(PAR0, le32_to_cpu(*(__le32 *)vif->addr));
ADM8211_CSR_WRITE(PAR1, le16_to_cpu(*(__le16 *)(vif->addr + 4)));
adm8211_update_mode(dev);
ADM8211_RESTORE();
return 0;
}
static void adm8211_remove_interface(struct ieee80211_hw *dev,
struct ieee80211_vif *vif)
{
struct adm8211_priv *priv = dev->priv;
priv->mode = NL80211_IFTYPE_MONITOR;
}
static int adm8211_init_rings(struct ieee80211_hw *dev)
{
struct adm8211_priv *priv = dev->priv;
struct adm8211_desc *desc = NULL;
struct adm8211_rx_ring_info *rx_info;
struct adm8211_tx_ring_info *tx_info;
unsigned int i;
for (i = 0; i < priv->rx_ring_size; i++) {
desc = &priv->rx_ring[i];
desc->status = 0;
desc->length = cpu_to_le32(RX_PKT_SIZE);
priv->rx_buffers[i].skb = NULL;
}
/* Mark the end of RX ring; hw returns to base address after this
* descriptor */
desc->length |= cpu_to_le32(RDES1_CONTROL_RER);
for (i = 0; i < priv->rx_ring_size; i++) {
desc = &priv->rx_ring[i];
rx_info = &priv->rx_buffers[i];
rx_info->skb = dev_alloc_skb(RX_PKT_SIZE);
if (rx_info->skb == NULL)
break;
rx_info->mapping = dma_map_single(&priv->pdev->dev,
skb_tail_pointer(rx_info->skb),
RX_PKT_SIZE,
DMA_FROM_DEVICE);
if (dma_mapping_error(&priv->pdev->dev, rx_info->mapping)) {
dev_kfree_skb(rx_info->skb);
rx_info->skb = NULL;
break;
}
desc->buffer1 = cpu_to_le32(rx_info->mapping);
desc->status = cpu_to_le32(RDES0_STATUS_OWN | RDES0_STATUS_SQL);
}
/* Setup TX ring. TX buffers descriptors will be filled in as needed */
for (i = 0; i < priv->tx_ring_size; i++) {
desc = &priv->tx_ring[i];
tx_info = &priv->tx_buffers[i];
tx_info->skb = NULL;
tx_info->mapping = 0;
desc->status = 0;
}
desc->length = cpu_to_le32(TDES1_CONTROL_TER);
priv->cur_rx = priv->cur_tx = priv->dirty_tx = 0;
ADM8211_CSR_WRITE(RDB, priv->rx_ring_dma);
ADM8211_CSR_WRITE(TDBD, priv->tx_ring_dma);
return 0;
}
static void adm8211_free_rings(struct ieee80211_hw *dev)
{
struct adm8211_priv *priv = dev->priv;
unsigned int i;
for (i = 0; i < priv->rx_ring_size; i++) {
if (!priv->rx_buffers[i].skb)
continue;
dma_unmap_single(&priv->pdev->dev,
priv->rx_buffers[i].mapping, RX_PKT_SIZE,
DMA_FROM_DEVICE);
dev_kfree_skb(priv->rx_buffers[i].skb);
}
for (i = 0; i < priv->tx_ring_size; i++) {
if (!priv->tx_buffers[i].skb)
continue;
dma_unmap_single(&priv->pdev->dev,
priv->tx_buffers[i].mapping,
priv->tx_buffers[i].skb->len, DMA_TO_DEVICE);
dev_kfree_skb(priv->tx_buffers[i].skb);
}
}
static int adm8211_start(struct ieee80211_hw *dev)
{
struct adm8211_priv *priv = dev->priv;
int retval;
/* Power up MAC and RF chips */
retval = adm8211_hw_reset(dev);
if (retval) {
wiphy_err(dev->wiphy, "hardware reset failed\n");
goto fail;
}
retval = adm8211_init_rings(dev);
if (retval) {
wiphy_err(dev->wiphy, "failed to initialize rings\n");
goto fail;
}
/* Init hardware */
adm8211_hw_init(dev);
adm8211_rf_set_channel(dev, priv->channel);
retval = request_irq(priv->pdev->irq, adm8211_interrupt,
IRQF_SHARED, "adm8211", dev);
if (retval) {
wiphy_err(dev->wiphy, "failed to register IRQ handler\n");
goto fail;
}
ADM8211_CSR_WRITE(IER, ADM8211_IER_NIE | ADM8211_IER_AIE |
ADM8211_IER_RCIE | ADM8211_IER_TCIE |
ADM8211_IER_TDUIE | ADM8211_IER_GPTIE);
priv->mode = NL80211_IFTYPE_MONITOR;
adm8211_update_mode(dev);
ADM8211_CSR_WRITE(RDR, 0);
adm8211_set_interval(dev, 100, 10);
return 0;
fail:
return retval;
}
static void adm8211_stop(struct ieee80211_hw *dev)
{
struct adm8211_priv *priv = dev->priv;
priv->mode = NL80211_IFTYPE_UNSPECIFIED;
priv->nar = 0;
ADM8211_CSR_WRITE(NAR, 0);
ADM8211_CSR_WRITE(IER, 0);
ADM8211_CSR_READ(NAR);
free_irq(priv->pdev->irq, dev);
adm8211_free_rings(dev);
}
static void adm8211_calc_durations(int *dur, int *plcp, size_t payload_len, int len,
int plcp_signal, int short_preamble)
{
/* Alternative calculation from NetBSD: */
/* IEEE 802.11b durations for DSSS PHY in microseconds */
#define IEEE80211_DUR_DS_LONG_PREAMBLE 144
#define IEEE80211_DUR_DS_SHORT_PREAMBLE 72
#define IEEE80211_DUR_DS_FAST_PLCPHDR 24
#define IEEE80211_DUR_DS_SLOW_PLCPHDR 48
#define IEEE80211_DUR_DS_SLOW_ACK 112
#define IEEE80211_DUR_DS_FAST_ACK 56
#define IEEE80211_DUR_DS_SLOW_CTS 112
#define IEEE80211_DUR_DS_FAST_CTS 56
#define IEEE80211_DUR_DS_SLOT 20
#define IEEE80211_DUR_DS_SIFS 10
int remainder;
*dur = (80 * (24 + payload_len) + plcp_signal - 1)
/ plcp_signal;
if (plcp_signal <= PLCP_SIGNAL_2M)
/* 1-2Mbps WLAN: send ACK/CTS at 1Mbps */
*dur += 3 * (IEEE80211_DUR_DS_SIFS +
IEEE80211_DUR_DS_SHORT_PREAMBLE +
IEEE80211_DUR_DS_FAST_PLCPHDR) +
IEEE80211_DUR_DS_SLOW_CTS + IEEE80211_DUR_DS_SLOW_ACK;
else
/* 5-11Mbps WLAN: send ACK/CTS at 2Mbps */
*dur += 3 * (IEEE80211_DUR_DS_SIFS +
IEEE80211_DUR_DS_SHORT_PREAMBLE +
IEEE80211_DUR_DS_FAST_PLCPHDR) +
IEEE80211_DUR_DS_FAST_CTS + IEEE80211_DUR_DS_FAST_ACK;
/* lengthen duration if long preamble */
if (!short_preamble)
*dur += 3 * (IEEE80211_DUR_DS_LONG_PREAMBLE -
IEEE80211_DUR_DS_SHORT_PREAMBLE) +
3 * (IEEE80211_DUR_DS_SLOW_PLCPHDR -
IEEE80211_DUR_DS_FAST_PLCPHDR);
*plcp = (80 * len) / plcp_signal;
remainder = (80 * len) % plcp_signal;
if (plcp_signal == PLCP_SIGNAL_11M &&
remainder <= 30 && remainder > 0)
*plcp = (*plcp | 0x8000) + 1;
else if (remainder)
(*plcp)++;
}
/* Transmit skb w/adm8211_tx_hdr (802.11 header created by hardware) */
static int adm8211_tx_raw(struct ieee80211_hw *dev, struct sk_buff *skb,
u16 plcp_signal,
size_t hdrlen)
{
struct adm8211_priv *priv = dev->priv;
unsigned long flags;
dma_addr_t mapping;
unsigned int entry;
u32 flag;
mapping = dma_map_single(&priv->pdev->dev, skb->data, skb->len,
DMA_TO_DEVICE);
if (dma_mapping_error(&priv->pdev->dev, mapping))
return -ENOMEM;
spin_lock_irqsave(&priv->lock, flags);
if (priv->cur_tx - priv->dirty_tx == priv->tx_ring_size / 2)
flag = TDES1_CONTROL_IC | TDES1_CONTROL_LS | TDES1_CONTROL_FS;
else
flag = TDES1_CONTROL_LS | TDES1_CONTROL_FS;
if (priv->cur_tx - priv->dirty_tx == priv->tx_ring_size - 2)
ieee80211_stop_queue(dev, 0);
entry = priv->cur_tx % priv->tx_ring_size;
priv->tx_buffers[entry].skb = skb;
priv->tx_buffers[entry].mapping = mapping;
priv->tx_buffers[entry].hdrlen = hdrlen;
priv->tx_ring[entry].buffer1 = cpu_to_le32(mapping);
if (entry == priv->tx_ring_size - 1)
flag |= TDES1_CONTROL_TER;
priv->tx_ring[entry].length = cpu_to_le32(flag | skb->len);
/* Set TX rate (SIGNAL field in PLCP PPDU format) */
flag = TDES0_CONTROL_OWN | (plcp_signal << 20) | 8 /* ? */;
priv->tx_ring[entry].status = cpu_to_le32(flag);
priv->cur_tx++;
spin_unlock_irqrestore(&priv->lock, flags);
/* Trigger transmit poll */
ADM8211_CSR_WRITE(TDR, 0);
return 0;
}
/* Put adm8211_tx_hdr on skb and transmit */
static void adm8211_tx(struct ieee80211_hw *dev,
struct ieee80211_tx_control *control,
struct sk_buff *skb)
{
struct adm8211_tx_hdr *txhdr;
size_t payload_len, hdrlen;
int plcp, dur, len, plcp_signal, short_preamble;
struct ieee80211_hdr *hdr;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_rate *txrate = ieee80211_get_tx_rate(dev, info);
u8 rc_flags;
rc_flags = info->control.rates[0].flags;
short_preamble = !!(rc_flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE);
plcp_signal = txrate->bitrate;
hdr = (struct ieee80211_hdr *)skb->data;
hdrlen = ieee80211_hdrlen(hdr->frame_control);
memcpy(skb->cb, skb->data, hdrlen);
hdr = (struct ieee80211_hdr *)skb->cb;
skb_pull(skb, hdrlen);
payload_len = skb->len;
txhdr = skb_push(skb, sizeof(*txhdr));
memset(txhdr, 0, sizeof(*txhdr));
memcpy(txhdr->da, ieee80211_get_DA(hdr), ETH_ALEN);
txhdr->signal = plcp_signal;
txhdr->frame_body_size = cpu_to_le16(payload_len);
txhdr->frame_control = hdr->frame_control;
len = hdrlen + payload_len + FCS_LEN;
txhdr->frag = cpu_to_le16(0x0FFF);
adm8211_calc_durations(&dur, &plcp, payload_len,
len, plcp_signal, short_preamble);
txhdr->plcp_frag_head_len = cpu_to_le16(plcp);
txhdr->plcp_frag_tail_len = cpu_to_le16(plcp);
txhdr->dur_frag_head = cpu_to_le16(dur);
txhdr->dur_frag_tail = cpu_to_le16(dur);
txhdr->header_control = cpu_to_le16(ADM8211_TXHDRCTL_ENABLE_EXTEND_HEADER);
if (short_preamble)
txhdr->header_control |= cpu_to_le16(ADM8211_TXHDRCTL_SHORT_PREAMBLE);
if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS)
txhdr->header_control |= cpu_to_le16(ADM8211_TXHDRCTL_ENABLE_RTS);
txhdr->retry_limit = info->control.rates[0].count;
if (adm8211_tx_raw(dev, skb, plcp_signal, hdrlen)) {
/* Drop packet */
ieee80211_free_txskb(dev, skb);
}
}
static int adm8211_alloc_rings(struct ieee80211_hw *dev)
{
struct adm8211_priv *priv = dev->priv;
unsigned int ring_size;
priv->rx_buffers = kmalloc(sizeof(*priv->rx_buffers) * priv->rx_ring_size +
sizeof(*priv->tx_buffers) * priv->tx_ring_size, GFP_KERNEL);
if (!priv->rx_buffers)
return -ENOMEM;
priv->tx_buffers = (void *)priv->rx_buffers +
sizeof(*priv->rx_buffers) * priv->rx_ring_size;
/* Allocate TX/RX descriptors */
ring_size = sizeof(struct adm8211_desc) * priv->rx_ring_size +
sizeof(struct adm8211_desc) * priv->tx_ring_size;
priv->rx_ring = dma_alloc_coherent(&priv->pdev->dev, ring_size,
&priv->rx_ring_dma, GFP_KERNEL);
if (!priv->rx_ring) {
kfree(priv->rx_buffers);
priv->rx_buffers = NULL;
priv->tx_buffers = NULL;
return -ENOMEM;
}
priv->tx_ring = priv->rx_ring + priv->rx_ring_size;
priv->tx_ring_dma = priv->rx_ring_dma +
sizeof(struct adm8211_desc) * priv->rx_ring_size;
return 0;
}
static const struct ieee80211_ops adm8211_ops = {
.tx = adm8211_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.start = adm8211_start,
.stop = adm8211_stop,
.add_interface = adm8211_add_interface,
.remove_interface = adm8211_remove_interface,
.config = adm8211_config,
.bss_info_changed = adm8211_bss_info_changed,
.prepare_multicast = adm8211_prepare_multicast,
.configure_filter = adm8211_configure_filter,
.get_stats = adm8211_get_stats,
.get_tsf = adm8211_get_tsft
};
static int adm8211_probe(struct pci_dev *pdev,
const struct pci_device_id *id)
{
struct ieee80211_hw *dev;
struct adm8211_priv *priv;
unsigned long mem_len;
unsigned int io_len;
int err;
u32 reg;
u8 perm_addr[ETH_ALEN];
err = pci_enable_device(pdev);
if (err) {
printk(KERN_ERR "%s (adm8211): Cannot enable new PCI device\n",
pci_name(pdev));
return err;
}
io_len = pci_resource_len(pdev, 0);
mem_len = pci_resource_len(pdev, 1);
if (io_len < 256 || mem_len < 1024) {
printk(KERN_ERR "%s (adm8211): Too short PCI resources\n",
pci_name(pdev));
err = -ENOMEM;
goto err_disable_pdev;
}
/* check signature */
pci_read_config_dword(pdev, 0x80 /* CR32 */, ®);
if (reg != ADM8211_SIG1 && reg != ADM8211_SIG2) {
printk(KERN_ERR "%s (adm8211): Invalid signature (0x%x)\n",
pci_name(pdev), reg);
err = -EINVAL;
goto err_disable_pdev;
}
err = pci_request_regions(pdev, "adm8211");
if (err) {
printk(KERN_ERR "%s (adm8211): Cannot obtain PCI resources\n",
pci_name(pdev));
return err; /* someone else grabbed it? don't disable it */
}
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err) {
printk(KERN_ERR "%s (adm8211): No suitable DMA available\n",
pci_name(pdev));
goto err_free_reg;
}
pci_set_master(pdev);
dev = ieee80211_alloc_hw(sizeof(*priv), &adm8211_ops);
if (!dev) {
printk(KERN_ERR "%s (adm8211): ieee80211 alloc failed\n",
pci_name(pdev));
err = -ENOMEM;
goto err_free_reg;
}
priv = dev->priv;
priv->pdev = pdev;
spin_lock_init(&priv->lock);
SET_IEEE80211_DEV(dev, &pdev->dev);
pci_set_drvdata(pdev, dev);
priv->map = pci_iomap(pdev, 1, mem_len);
if (!priv->map)
priv->map = pci_iomap(pdev, 0, io_len);
if (!priv->map) {
printk(KERN_ERR "%s (adm8211): Cannot map device memory\n",
pci_name(pdev));
err = -ENOMEM;
goto err_free_dev;
}
priv->rx_ring_size = rx_ring_size;
priv->tx_ring_size = tx_ring_size;
err = adm8211_alloc_rings(dev);
if (err) {
printk(KERN_ERR "%s (adm8211): Cannot allocate TX/RX ring\n",
pci_name(pdev));
goto err_iounmap;
}
*(__le32 *)perm_addr = cpu_to_le32(ADM8211_CSR_READ(PAR0));
*(__le16 *)&perm_addr[4] =
cpu_to_le16(ADM8211_CSR_READ(PAR1) & 0xFFFF);
if (!is_valid_ether_addr(perm_addr)) {
printk(KERN_WARNING "%s (adm8211): Invalid hwaddr in EEPROM!\n",
pci_name(pdev));
eth_random_addr(perm_addr);
}
SET_IEEE80211_PERM_ADDR(dev, perm_addr);
dev->extra_tx_headroom = sizeof(struct adm8211_tx_hdr);
/* dev->flags = RX_INCLUDES_FCS in promisc mode */
ieee80211_hw_set(dev, SIGNAL_UNSPEC);
dev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
dev->max_signal = 100; /* FIXME: find better value */
dev->queues = 1; /* ADM8211C supports more, maybe ADM8211B too */
priv->retry_limit = 3;
priv->ant_power = 0x40;
priv->tx_power = 0x40;
priv->lpf_cutoff = 0xFF;
priv->lnags_threshold = 0xFF;
priv->mode = NL80211_IFTYPE_UNSPECIFIED;
/* Power-on issue. EEPROM won't read correctly without */
if (pdev->revision >= ADM8211_REV_BA) {
ADM8211_CSR_WRITE(FRCTL, 0);
ADM8211_CSR_READ(FRCTL);
ADM8211_CSR_WRITE(FRCTL, 1);
ADM8211_CSR_READ(FRCTL);
msleep(100);
}
err = adm8211_read_eeprom(dev);
if (err) {
printk(KERN_ERR "%s (adm8211): Can't alloc eeprom buffer\n",
pci_name(pdev));
goto err_free_desc;
}
priv->channel = 1;
dev->wiphy->bands[NL80211_BAND_2GHZ] = &priv->band;
wiphy_ext_feature_set(dev->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
err = ieee80211_register_hw(dev);
if (err) {
printk(KERN_ERR "%s (adm8211): Cannot register device\n",
pci_name(pdev));
goto err_free_eeprom;
}
wiphy_info(dev->wiphy, "hwaddr %pM, Rev 0x%02x\n",
dev->wiphy->perm_addr, pdev->revision);
return 0;
err_free_eeprom:
kfree(priv->eeprom);
err_free_desc:
dma_free_coherent(&pdev->dev,
sizeof(struct adm8211_desc) * priv->rx_ring_size +
sizeof(struct adm8211_desc) * priv->tx_ring_size,
priv->rx_ring, priv->rx_ring_dma);
kfree(priv->rx_buffers);
err_iounmap:
pci_iounmap(pdev, priv->map);
err_free_dev:
ieee80211_free_hw(dev);
err_free_reg:
pci_release_regions(pdev);
err_disable_pdev:
pci_disable_device(pdev);
return err;
}
static void adm8211_remove(struct pci_dev *pdev)
{
struct ieee80211_hw *dev = pci_get_drvdata(pdev);
struct adm8211_priv *priv;
if (!dev)
return;
ieee80211_unregister_hw(dev);
priv = dev->priv;
dma_free_coherent(&pdev->dev,
sizeof(struct adm8211_desc) * priv->rx_ring_size +
sizeof(struct adm8211_desc) * priv->tx_ring_size,
priv->rx_ring, priv->rx_ring_dma);
kfree(priv->rx_buffers);
kfree(priv->eeprom);
pci_iounmap(pdev, priv->map);
pci_release_regions(pdev);
pci_disable_device(pdev);
ieee80211_free_hw(dev);
}
#define adm8211_suspend NULL
#define adm8211_resume NULL
MODULE_DEVICE_TABLE(pci, adm8211_pci_id_table);
static SIMPLE_DEV_PM_OPS(adm8211_pm_ops, adm8211_suspend, adm8211_resume);
/* TODO: implement enable_wake */
static struct pci_driver adm8211_driver = {
.name = "adm8211",
.id_table = adm8211_pci_id_table,
.probe = adm8211_probe,
.remove = adm8211_remove,
.driver.pm = &adm8211_pm_ops,
};
module_pci_driver(adm8211_driver);
|
linux-master
|
drivers/net/wireless/admtek/adm8211.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* This file is part of wl18xx
*
* Copyright (C) 2011 Texas Instruments Inc.
*/
#include "../wlcore/cmd.h"
#include "../wlcore/debug.h"
#include "../wlcore/acx.h"
#include "acx.h"
#include "wl18xx.h"
int wl18xx_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap,
u32 sdio_blk_size, u32 extra_mem_blks,
u32 len_field_size)
{
struct wl18xx_acx_host_config_bitmap *bitmap_conf;
int ret;
wl1271_debug(DEBUG_ACX, "acx cfg bitmap %d blk %d spare %d field %d",
host_cfg_bitmap, sdio_blk_size, extra_mem_blks,
len_field_size);
bitmap_conf = kzalloc(sizeof(*bitmap_conf), GFP_KERNEL);
if (!bitmap_conf) {
ret = -ENOMEM;
goto out;
}
bitmap_conf->host_cfg_bitmap = cpu_to_le32(host_cfg_bitmap);
bitmap_conf->host_sdio_block_size = cpu_to_le32(sdio_blk_size);
bitmap_conf->extra_mem_blocks = cpu_to_le32(extra_mem_blks);
bitmap_conf->length_field_size = cpu_to_le32(len_field_size);
ret = wl1271_cmd_configure(wl, ACX_HOST_IF_CFG_BITMAP,
bitmap_conf, sizeof(*bitmap_conf));
if (ret < 0) {
wl1271_warning("wl1271 bitmap config opt failed: %d", ret);
goto out;
}
out:
kfree(bitmap_conf);
return ret;
}
int wl18xx_acx_set_checksum_state(struct wl1271 *wl)
{
struct wl18xx_acx_checksum_state *acx;
int ret;
wl1271_debug(DEBUG_ACX, "acx checksum state");
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
if (!acx) {
ret = -ENOMEM;
goto out;
}
acx->checksum_state = CHECKSUM_OFFLOAD_ENABLED;
ret = wl1271_cmd_configure(wl, ACX_CSUM_CONFIG, acx, sizeof(*acx));
if (ret < 0) {
wl1271_warning("failed to set Tx checksum state: %d", ret);
goto out;
}
out:
kfree(acx);
return ret;
}
int wl18xx_acx_clear_statistics(struct wl1271 *wl)
{
struct wl18xx_acx_clear_statistics *acx;
int ret = 0;
wl1271_debug(DEBUG_ACX, "acx clear statistics");
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
if (!acx) {
ret = -ENOMEM;
goto out;
}
ret = wl1271_cmd_configure(wl, ACX_CLEAR_STATISTICS, acx, sizeof(*acx));
if (ret < 0) {
wl1271_warning("failed to clear firmware statistics: %d", ret);
goto out;
}
out:
kfree(acx);
return ret;
}
int wl18xx_acx_peer_ht_operation_mode(struct wl1271 *wl, u8 hlid, bool wide)
{
struct wlcore_peer_ht_operation_mode *acx;
int ret;
wl1271_debug(DEBUG_ACX, "acx peer ht operation mode hlid %d bw %d",
hlid, wide);
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
if (!acx) {
ret = -ENOMEM;
goto out;
}
acx->hlid = hlid;
acx->bandwidth = wide ? WLCORE_BANDWIDTH_40MHZ : WLCORE_BANDWIDTH_20MHZ;
ret = wl1271_cmd_configure(wl, ACX_PEER_HT_OPERATION_MODE_CFG, acx,
sizeof(*acx));
if (ret < 0) {
wl1271_warning("acx peer ht operation mode failed: %d", ret);
goto out;
}
out:
kfree(acx);
return ret;
}
/*
* this command is basically the same as wl1271_acx_ht_capabilities,
* with the addition of supported rates. they should be unified in
* the next fw api change
*/
int wl18xx_acx_set_peer_cap(struct wl1271 *wl,
struct ieee80211_sta_ht_cap *ht_cap,
bool allow_ht_operation,
u32 rate_set, u8 hlid)
{
struct wlcore_acx_peer_cap *acx;
int ret = 0;
u32 ht_capabilites = 0;
wl1271_debug(DEBUG_ACX,
"acx set cap ht_supp: %d ht_cap: %d rates: 0x%x",
ht_cap->ht_supported, ht_cap->cap, rate_set);
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
if (!acx) {
ret = -ENOMEM;
goto out;
}
if (allow_ht_operation && ht_cap->ht_supported) {
/* no need to translate capabilities - use the spec values */
ht_capabilites = ht_cap->cap;
/*
* this bit is not employed by the spec but only by FW to
* indicate peer HT support
*/
ht_capabilites |= WL12XX_HT_CAP_HT_OPERATION;
/* get data from A-MPDU parameters field */
acx->ampdu_max_length = ht_cap->ampdu_factor;
acx->ampdu_min_spacing = ht_cap->ampdu_density;
}
acx->hlid = hlid;
acx->ht_capabilites = cpu_to_le32(ht_capabilites);
acx->supported_rates = cpu_to_le32(rate_set);
ret = wl1271_cmd_configure(wl, ACX_PEER_CAP, acx, sizeof(*acx));
if (ret < 0) {
wl1271_warning("acx ht capabilities setting failed: %d", ret);
goto out;
}
out:
kfree(acx);
return ret;
}
/*
* When the host is suspended, we don't want to get any fast-link/PSM
* notifications
*/
int wl18xx_acx_interrupt_notify_config(struct wl1271 *wl,
bool action)
{
struct wl18xx_acx_interrupt_notify *acx;
int ret = 0;
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
if (!acx) {
ret = -ENOMEM;
goto out;
}
acx->enable = action;
ret = wl1271_cmd_configure(wl, ACX_INTERRUPT_NOTIFY, acx, sizeof(*acx));
if (ret < 0) {
wl1271_warning("acx interrupt notify setting failed: %d", ret);
goto out;
}
out:
kfree(acx);
return ret;
}
/*
* When the host is suspended, we can configure the FW to disable RX BA
* notifications.
*/
int wl18xx_acx_rx_ba_filter(struct wl1271 *wl, bool action)
{
struct wl18xx_acx_rx_ba_filter *acx;
int ret = 0;
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
if (!acx) {
ret = -ENOMEM;
goto out;
}
acx->enable = (u32)action;
ret = wl1271_cmd_configure(wl, ACX_RX_BA_FILTER, acx, sizeof(*acx));
if (ret < 0) {
wl1271_warning("acx rx ba activity filter setting failed: %d",
ret);
goto out;
}
out:
kfree(acx);
return ret;
}
int wl18xx_acx_ap_sleep(struct wl1271 *wl)
{
struct wl18xx_priv *priv = wl->priv;
struct acx_ap_sleep_cfg *acx;
struct conf_ap_sleep_settings *conf = &priv->conf.ap_sleep;
int ret;
wl1271_debug(DEBUG_ACX, "acx config ap sleep");
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
if (!acx) {
ret = -ENOMEM;
goto out;
}
acx->idle_duty_cycle = conf->idle_duty_cycle;
acx->connected_duty_cycle = conf->connected_duty_cycle;
acx->max_stations_thresh = conf->max_stations_thresh;
acx->idle_conn_thresh = conf->idle_conn_thresh;
ret = wl1271_cmd_configure(wl, ACX_AP_SLEEP_CFG, acx, sizeof(*acx));
if (ret < 0) {
wl1271_warning("acx config ap-sleep failed: %d", ret);
goto out;
}
out:
kfree(acx);
return ret;
}
int wl18xx_acx_dynamic_fw_traces(struct wl1271 *wl)
{
struct acx_dynamic_fw_traces_cfg *acx;
int ret;
wl1271_debug(DEBUG_ACX, "acx dynamic fw traces config %d",
wl->dynamic_fw_traces);
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
if (!acx) {
ret = -ENOMEM;
goto out;
}
acx->dynamic_fw_traces = cpu_to_le32(wl->dynamic_fw_traces);
ret = wl1271_cmd_configure(wl, ACX_DYNAMIC_TRACES_CFG,
acx, sizeof(*acx));
if (ret < 0) {
wl1271_warning("acx config dynamic fw traces failed: %d", ret);
goto out;
}
out:
kfree(acx);
return ret;
}
int wl18xx_acx_time_sync_cfg(struct wl1271 *wl)
{
struct acx_time_sync_cfg *acx;
int ret;
wl1271_debug(DEBUG_ACX, "acx time sync cfg: mode %d, addr: %pM",
wl->conf.sg.params[WL18XX_CONF_SG_TIME_SYNC],
wl->zone_master_mac_addr);
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
if (!acx) {
ret = -ENOMEM;
goto out;
}
acx->sync_mode = wl->conf.sg.params[WL18XX_CONF_SG_TIME_SYNC];
memcpy(acx->zone_mac_addr, wl->zone_master_mac_addr, ETH_ALEN);
ret = wl1271_cmd_configure(wl, ACX_TIME_SYNC_CFG,
acx, sizeof(*acx));
if (ret < 0) {
wl1271_warning("acx time sync cfg failed: %d", ret);
goto out;
}
out:
kfree(acx);
return ret;
}
|
linux-master
|
drivers/net/wireless/ti/wl18xx/acx.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* This file is part of wl18xx
*
* Copyright (C) 2011 Texas Instruments
*/
#include "../wlcore/wlcore.h"
#include "../wlcore/io.h"
#include "io.h"
int wl18xx_top_reg_write(struct wl1271 *wl, int addr, u16 val)
{
u32 tmp;
int ret;
if (WARN_ON(addr % 2))
return -EINVAL;
if ((addr % 4) == 0) {
ret = wlcore_read32(wl, addr, &tmp);
if (ret < 0)
goto out;
tmp = (tmp & 0xffff0000) | val;
ret = wlcore_write32(wl, addr, tmp);
} else {
ret = wlcore_read32(wl, addr - 2, &tmp);
if (ret < 0)
goto out;
tmp = (tmp & 0xffff) | (val << 16);
ret = wlcore_write32(wl, addr - 2, tmp);
}
out:
return ret;
}
int wl18xx_top_reg_read(struct wl1271 *wl, int addr, u16 *out)
{
u32 val = 0;
int ret;
if (WARN_ON(addr % 2))
return -EINVAL;
if ((addr % 4) == 0) {
/* address is 4-bytes aligned */
ret = wlcore_read32(wl, addr, &val);
if (ret >= 0 && out)
*out = val & 0xffff;
} else {
ret = wlcore_read32(wl, addr - 2, &val);
if (ret >= 0 && out)
*out = (val & 0xffff0000) >> 16;
}
return ret;
}
|
linux-master
|
drivers/net/wireless/ti/wl18xx/io.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* This file is part of wl18xx
*
* Copyright (C) 2009 Nokia Corporation
* Copyright (C) 2011-2012 Texas Instruments
*/
#include <linux/pm_runtime.h>
#include "../wlcore/debugfs.h"
#include "../wlcore/wlcore.h"
#include "../wlcore/debug.h"
#include "../wlcore/ps.h"
#include "wl18xx.h"
#include "acx.h"
#include "cmd.h"
#include "debugfs.h"
#define WL18XX_DEBUGFS_FWSTATS_FILE(a, b, c) \
DEBUGFS_FWSTATS_FILE(a, b, c, wl18xx_acx_statistics)
#define WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(a, b, c) \
DEBUGFS_FWSTATS_FILE_ARRAY(a, b, c, wl18xx_acx_statistics)
WL18XX_DEBUGFS_FWSTATS_FILE(error, error_frame_non_ctrl, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(error, error_frame_ctrl, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(error, error_frame_during_protection, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(error, null_frame_tx_start, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(error, null_frame_cts_start, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(error, bar_retry, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(error, num_frame_cts_nul_flid, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(error, tx_abort_failure, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(error, tx_resume_failure, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(error, rx_cmplt_db_overflow_cnt, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(error, elp_while_rx_exch, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(error, elp_while_tx_exch, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(error, elp_while_tx, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(error, elp_while_nvic_pending, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(error, rx_excessive_frame_len, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(error, burst_mismatch, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(error, tbc_exch_mismatch, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_prepared_descs, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_cmplt, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_template_prepared, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_data_prepared, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_template_programmed, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_data_programmed, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_burst_programmed, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_starts, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_stop, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_start_templates, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_start_int_templates, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_start_fw_gen, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_start_data, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_start_null_frame, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_exch, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_retry_template, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_retry_data, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(tx, tx_retry_per_rate,
NUM_OF_RATES_INDEXES);
WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_exch_pending, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_exch_expiry, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_done_template, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_done_data, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_done_int_template, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_cfe1, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, tx_cfe2, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_called, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_mpdu_alloc_failed, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_init_called, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_in_process_called, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_tkip_called, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_key_not_found, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_need_fragmentation, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_bad_mblk_num, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_failed, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_cache_hit, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(tx, frag_cache_miss, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_beacon_early_term, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_out_of_mpdu_nodes, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_hdr_overflow, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_dropped_frame, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_done, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_defrag, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_defrag_end, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_cmplt, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_pre_complt, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_cmplt_task, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_phy_hdr, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_timeout, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_rts_timeout, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_timeout_wa, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_called, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_init_called, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_in_process_called, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_tkip_called, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_need_defrag, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_decrypt_failed, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(rx, decrypt_key_not_found, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(rx, defrag_need_decrypt, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_tkip_replays, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(rx, rx_xfr, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(isr, irqs, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(pwr, missing_bcns_cnt, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(pwr, rcvd_bcns_cnt, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(pwr, connection_out_of_sync, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(pwr, cont_miss_bcns_spread,
PWR_STAT_MAX_CONT_MISSED_BCNS_SPREAD);
WL18XX_DEBUGFS_FWSTATS_FILE(pwr, rcvd_awake_bcns_cnt, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(pwr, sleep_time_count, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(pwr, sleep_time_avg, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(pwr, sleep_cycle_avg, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(pwr, sleep_percent, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(pwr, ap_sleep_active_conf, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(pwr, ap_sleep_user_conf, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(pwr, ap_sleep_counter, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, beacon_filter, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, arp_filter, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, mc_filter, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, dup_filter, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, data_filter, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, ibss_filter, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, protection_filter, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, accum_arp_pend_requests, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(rx_filter, max_arp_queue_dep, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(rx_rate, rx_frames_per_rates, 50);
WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(aggr_size, tx_agg_rate,
AGGR_STATS_TX_AGG);
WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(aggr_size, tx_agg_len,
AGGR_STATS_TX_AGG);
WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(aggr_size, rx_size,
AGGR_STATS_RX_SIZE_LEN);
WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, hs_tx_stat_fifo_int, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, enc_tx_stat_fifo_int, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, enc_rx_stat_fifo_int, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, rx_complete_stat_fifo_int, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, pre_proc_swi, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, post_proc_swi, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, sec_frag_swi, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, pre_to_defrag_swi, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, defrag_to_rx_xfer_swi, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, dec_packet_in, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, dec_packet_in_fifo_full, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(pipeline, dec_packet_out, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(pipeline, pipeline_fifo_full,
PIPE_STATS_HW_FIFO);
WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(diversity, num_of_packets_per_ant,
DIVERSITY_STATS_NUM_OF_ANT);
WL18XX_DEBUGFS_FWSTATS_FILE(diversity, total_num_of_toggles, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(thermal, irq_thr_low, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(thermal, irq_thr_high, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(thermal, tx_stop, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(thermal, tx_resume, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(thermal, false_irq, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(thermal, adc_source_unexpected, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE_ARRAY(calib, fail_count,
WL18XX_NUM_OF_CALIBRATIONS_ERRORS);
WL18XX_DEBUGFS_FWSTATS_FILE(calib, calib_count, "%u");
WL18XX_DEBUGFS_FWSTATS_FILE(roaming, rssi_level, "%d");
WL18XX_DEBUGFS_FWSTATS_FILE(dfs, num_of_radar_detections, "%d");
static ssize_t conf_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct wl1271 *wl = file->private_data;
struct wl18xx_priv *priv = wl->priv;
struct wlcore_conf_header header;
char *buf, *pos;
size_t len;
int ret;
len = WL18XX_CONF_SIZE;
buf = kmalloc(len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
header.magic = cpu_to_le32(WL18XX_CONF_MAGIC);
header.version = cpu_to_le32(WL18XX_CONF_VERSION);
header.checksum = 0;
mutex_lock(&wl->mutex);
pos = buf;
memcpy(pos, &header, sizeof(header));
pos += sizeof(header);
memcpy(pos, &wl->conf, sizeof(wl->conf));
pos += sizeof(wl->conf);
memcpy(pos, &priv->conf, sizeof(priv->conf));
mutex_unlock(&wl->mutex);
ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
kfree(buf);
return ret;
}
static const struct file_operations conf_ops = {
.read = conf_read,
.open = simple_open,
.llseek = default_llseek,
};
static ssize_t clear_fw_stats_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct wl1271 *wl = file->private_data;
int ret;
mutex_lock(&wl->mutex);
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
ret = wl18xx_acx_clear_statistics(wl);
if (ret < 0) {
count = ret;
goto out;
}
out:
mutex_unlock(&wl->mutex);
return count;
}
static const struct file_operations clear_fw_stats_ops = {
.write = clear_fw_stats_write,
.open = simple_open,
.llseek = default_llseek,
};
static ssize_t radar_detection_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct wl1271 *wl = file->private_data;
int ret;
u8 channel;
ret = kstrtou8_from_user(user_buf, count, 10, &channel);
if (ret < 0) {
wl1271_warning("illegal channel");
return -EINVAL;
}
mutex_lock(&wl->mutex);
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
ret = pm_runtime_resume_and_get(wl->dev);
if (ret < 0)
goto out;
ret = wl18xx_cmd_radar_detection_debug(wl, channel);
if (ret < 0)
count = ret;
pm_runtime_mark_last_busy(wl->dev);
pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
return count;
}
static const struct file_operations radar_detection_ops = {
.write = radar_detection_write,
.open = simple_open,
.llseek = default_llseek,
};
static ssize_t dynamic_fw_traces_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct wl1271 *wl = file->private_data;
unsigned long value;
int ret;
ret = kstrtoul_from_user(user_buf, count, 0, &value);
if (ret < 0)
return ret;
mutex_lock(&wl->mutex);
wl->dynamic_fw_traces = value;
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
ret = pm_runtime_resume_and_get(wl->dev);
if (ret < 0)
goto out;
ret = wl18xx_acx_dynamic_fw_traces(wl);
if (ret < 0)
count = ret;
pm_runtime_mark_last_busy(wl->dev);
pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
return count;
}
static ssize_t dynamic_fw_traces_read(struct file *file,
char __user *userbuf,
size_t count, loff_t *ppos)
{
struct wl1271 *wl = file->private_data;
return wl1271_format_buffer(userbuf, count, ppos,
"%d\n", wl->dynamic_fw_traces);
}
static const struct file_operations dynamic_fw_traces_ops = {
.read = dynamic_fw_traces_read,
.write = dynamic_fw_traces_write,
.open = simple_open,
.llseek = default_llseek,
};
#ifdef CONFIG_CFG80211_CERTIFICATION_ONUS
static ssize_t radar_debug_mode_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct wl1271 *wl = file->private_data;
struct wl12xx_vif *wlvif;
unsigned long value;
int ret;
ret = kstrtoul_from_user(user_buf, count, 10, &value);
if (ret < 0) {
wl1271_warning("illegal radar_debug_mode value!");
return -EINVAL;
}
/* valid values: 0/1 */
if (!(value == 0 || value == 1)) {
wl1271_warning("value is not in valid!");
return -EINVAL;
}
mutex_lock(&wl->mutex);
wl->radar_debug_mode = value;
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
ret = pm_runtime_resume_and_get(wl->dev);
if (ret < 0)
goto out;
wl12xx_for_each_wlvif_ap(wl, wlvif) {
wlcore_cmd_generic_cfg(wl, wlvif,
WLCORE_CFG_FEATURE_RADAR_DEBUG,
wl->radar_debug_mode, 0);
}
pm_runtime_mark_last_busy(wl->dev);
pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
return count;
}
static ssize_t radar_debug_mode_read(struct file *file,
char __user *userbuf,
size_t count, loff_t *ppos)
{
struct wl1271 *wl = file->private_data;
return wl1271_format_buffer(userbuf, count, ppos,
"%d\n", wl->radar_debug_mode);
}
static const struct file_operations radar_debug_mode_ops = {
.write = radar_debug_mode_write,
.read = radar_debug_mode_read,
.open = simple_open,
.llseek = default_llseek,
};
#endif /* CFG80211_CERTIFICATION_ONUS */
int wl18xx_debugfs_add_files(struct wl1271 *wl,
struct dentry *rootdir)
{
struct dentry *stats, *moddir;
moddir = debugfs_create_dir(KBUILD_MODNAME, rootdir);
stats = debugfs_create_dir("fw_stats", moddir);
DEBUGFS_ADD(clear_fw_stats, stats);
DEBUGFS_FWSTATS_ADD(error, error_frame_non_ctrl);
DEBUGFS_FWSTATS_ADD(error, error_frame_ctrl);
DEBUGFS_FWSTATS_ADD(error, error_frame_during_protection);
DEBUGFS_FWSTATS_ADD(error, null_frame_tx_start);
DEBUGFS_FWSTATS_ADD(error, null_frame_cts_start);
DEBUGFS_FWSTATS_ADD(error, bar_retry);
DEBUGFS_FWSTATS_ADD(error, num_frame_cts_nul_flid);
DEBUGFS_FWSTATS_ADD(error, tx_abort_failure);
DEBUGFS_FWSTATS_ADD(error, tx_resume_failure);
DEBUGFS_FWSTATS_ADD(error, rx_cmplt_db_overflow_cnt);
DEBUGFS_FWSTATS_ADD(error, elp_while_rx_exch);
DEBUGFS_FWSTATS_ADD(error, elp_while_tx_exch);
DEBUGFS_FWSTATS_ADD(error, elp_while_tx);
DEBUGFS_FWSTATS_ADD(error, elp_while_nvic_pending);
DEBUGFS_FWSTATS_ADD(error, rx_excessive_frame_len);
DEBUGFS_FWSTATS_ADD(error, burst_mismatch);
DEBUGFS_FWSTATS_ADD(error, tbc_exch_mismatch);
DEBUGFS_FWSTATS_ADD(tx, tx_prepared_descs);
DEBUGFS_FWSTATS_ADD(tx, tx_cmplt);
DEBUGFS_FWSTATS_ADD(tx, tx_template_prepared);
DEBUGFS_FWSTATS_ADD(tx, tx_data_prepared);
DEBUGFS_FWSTATS_ADD(tx, tx_template_programmed);
DEBUGFS_FWSTATS_ADD(tx, tx_data_programmed);
DEBUGFS_FWSTATS_ADD(tx, tx_burst_programmed);
DEBUGFS_FWSTATS_ADD(tx, tx_starts);
DEBUGFS_FWSTATS_ADD(tx, tx_stop);
DEBUGFS_FWSTATS_ADD(tx, tx_start_templates);
DEBUGFS_FWSTATS_ADD(tx, tx_start_int_templates);
DEBUGFS_FWSTATS_ADD(tx, tx_start_fw_gen);
DEBUGFS_FWSTATS_ADD(tx, tx_start_data);
DEBUGFS_FWSTATS_ADD(tx, tx_start_null_frame);
DEBUGFS_FWSTATS_ADD(tx, tx_exch);
DEBUGFS_FWSTATS_ADD(tx, tx_retry_template);
DEBUGFS_FWSTATS_ADD(tx, tx_retry_data);
DEBUGFS_FWSTATS_ADD(tx, tx_retry_per_rate);
DEBUGFS_FWSTATS_ADD(tx, tx_exch_pending);
DEBUGFS_FWSTATS_ADD(tx, tx_exch_expiry);
DEBUGFS_FWSTATS_ADD(tx, tx_done_template);
DEBUGFS_FWSTATS_ADD(tx, tx_done_data);
DEBUGFS_FWSTATS_ADD(tx, tx_done_int_template);
DEBUGFS_FWSTATS_ADD(tx, tx_cfe1);
DEBUGFS_FWSTATS_ADD(tx, tx_cfe2);
DEBUGFS_FWSTATS_ADD(tx, frag_called);
DEBUGFS_FWSTATS_ADD(tx, frag_mpdu_alloc_failed);
DEBUGFS_FWSTATS_ADD(tx, frag_init_called);
DEBUGFS_FWSTATS_ADD(tx, frag_in_process_called);
DEBUGFS_FWSTATS_ADD(tx, frag_tkip_called);
DEBUGFS_FWSTATS_ADD(tx, frag_key_not_found);
DEBUGFS_FWSTATS_ADD(tx, frag_need_fragmentation);
DEBUGFS_FWSTATS_ADD(tx, frag_bad_mblk_num);
DEBUGFS_FWSTATS_ADD(tx, frag_failed);
DEBUGFS_FWSTATS_ADD(tx, frag_cache_hit);
DEBUGFS_FWSTATS_ADD(tx, frag_cache_miss);
DEBUGFS_FWSTATS_ADD(rx, rx_beacon_early_term);
DEBUGFS_FWSTATS_ADD(rx, rx_out_of_mpdu_nodes);
DEBUGFS_FWSTATS_ADD(rx, rx_hdr_overflow);
DEBUGFS_FWSTATS_ADD(rx, rx_dropped_frame);
DEBUGFS_FWSTATS_ADD(rx, rx_done);
DEBUGFS_FWSTATS_ADD(rx, rx_defrag);
DEBUGFS_FWSTATS_ADD(rx, rx_defrag_end);
DEBUGFS_FWSTATS_ADD(rx, rx_cmplt);
DEBUGFS_FWSTATS_ADD(rx, rx_pre_complt);
DEBUGFS_FWSTATS_ADD(rx, rx_cmplt_task);
DEBUGFS_FWSTATS_ADD(rx, rx_phy_hdr);
DEBUGFS_FWSTATS_ADD(rx, rx_timeout);
DEBUGFS_FWSTATS_ADD(rx, rx_rts_timeout);
DEBUGFS_FWSTATS_ADD(rx, rx_timeout_wa);
DEBUGFS_FWSTATS_ADD(rx, defrag_called);
DEBUGFS_FWSTATS_ADD(rx, defrag_init_called);
DEBUGFS_FWSTATS_ADD(rx, defrag_in_process_called);
DEBUGFS_FWSTATS_ADD(rx, defrag_tkip_called);
DEBUGFS_FWSTATS_ADD(rx, defrag_need_defrag);
DEBUGFS_FWSTATS_ADD(rx, defrag_decrypt_failed);
DEBUGFS_FWSTATS_ADD(rx, decrypt_key_not_found);
DEBUGFS_FWSTATS_ADD(rx, defrag_need_decrypt);
DEBUGFS_FWSTATS_ADD(rx, rx_tkip_replays);
DEBUGFS_FWSTATS_ADD(rx, rx_xfr);
DEBUGFS_FWSTATS_ADD(isr, irqs);
DEBUGFS_FWSTATS_ADD(pwr, missing_bcns_cnt);
DEBUGFS_FWSTATS_ADD(pwr, rcvd_bcns_cnt);
DEBUGFS_FWSTATS_ADD(pwr, connection_out_of_sync);
DEBUGFS_FWSTATS_ADD(pwr, cont_miss_bcns_spread);
DEBUGFS_FWSTATS_ADD(pwr, rcvd_awake_bcns_cnt);
DEBUGFS_FWSTATS_ADD(pwr, sleep_time_count);
DEBUGFS_FWSTATS_ADD(pwr, sleep_time_avg);
DEBUGFS_FWSTATS_ADD(pwr, sleep_cycle_avg);
DEBUGFS_FWSTATS_ADD(pwr, sleep_percent);
DEBUGFS_FWSTATS_ADD(pwr, ap_sleep_active_conf);
DEBUGFS_FWSTATS_ADD(pwr, ap_sleep_user_conf);
DEBUGFS_FWSTATS_ADD(pwr, ap_sleep_counter);
DEBUGFS_FWSTATS_ADD(rx_filter, beacon_filter);
DEBUGFS_FWSTATS_ADD(rx_filter, arp_filter);
DEBUGFS_FWSTATS_ADD(rx_filter, mc_filter);
DEBUGFS_FWSTATS_ADD(rx_filter, dup_filter);
DEBUGFS_FWSTATS_ADD(rx_filter, data_filter);
DEBUGFS_FWSTATS_ADD(rx_filter, ibss_filter);
DEBUGFS_FWSTATS_ADD(rx_filter, protection_filter);
DEBUGFS_FWSTATS_ADD(rx_filter, accum_arp_pend_requests);
DEBUGFS_FWSTATS_ADD(rx_filter, max_arp_queue_dep);
DEBUGFS_FWSTATS_ADD(rx_rate, rx_frames_per_rates);
DEBUGFS_FWSTATS_ADD(aggr_size, tx_agg_rate);
DEBUGFS_FWSTATS_ADD(aggr_size, tx_agg_len);
DEBUGFS_FWSTATS_ADD(aggr_size, rx_size);
DEBUGFS_FWSTATS_ADD(pipeline, hs_tx_stat_fifo_int);
DEBUGFS_FWSTATS_ADD(pipeline, enc_tx_stat_fifo_int);
DEBUGFS_FWSTATS_ADD(pipeline, enc_rx_stat_fifo_int);
DEBUGFS_FWSTATS_ADD(pipeline, rx_complete_stat_fifo_int);
DEBUGFS_FWSTATS_ADD(pipeline, pre_proc_swi);
DEBUGFS_FWSTATS_ADD(pipeline, post_proc_swi);
DEBUGFS_FWSTATS_ADD(pipeline, sec_frag_swi);
DEBUGFS_FWSTATS_ADD(pipeline, pre_to_defrag_swi);
DEBUGFS_FWSTATS_ADD(pipeline, defrag_to_rx_xfer_swi);
DEBUGFS_FWSTATS_ADD(pipeline, dec_packet_in);
DEBUGFS_FWSTATS_ADD(pipeline, dec_packet_in_fifo_full);
DEBUGFS_FWSTATS_ADD(pipeline, dec_packet_out);
DEBUGFS_FWSTATS_ADD(pipeline, pipeline_fifo_full);
DEBUGFS_FWSTATS_ADD(diversity, num_of_packets_per_ant);
DEBUGFS_FWSTATS_ADD(diversity, total_num_of_toggles);
DEBUGFS_FWSTATS_ADD(thermal, irq_thr_low);
DEBUGFS_FWSTATS_ADD(thermal, irq_thr_high);
DEBUGFS_FWSTATS_ADD(thermal, tx_stop);
DEBUGFS_FWSTATS_ADD(thermal, tx_resume);
DEBUGFS_FWSTATS_ADD(thermal, false_irq);
DEBUGFS_FWSTATS_ADD(thermal, adc_source_unexpected);
DEBUGFS_FWSTATS_ADD(calib, fail_count);
DEBUGFS_FWSTATS_ADD(calib, calib_count);
DEBUGFS_FWSTATS_ADD(roaming, rssi_level);
DEBUGFS_FWSTATS_ADD(dfs, num_of_radar_detections);
DEBUGFS_ADD(conf, moddir);
DEBUGFS_ADD(radar_detection, moddir);
#ifdef CONFIG_CFG80211_CERTIFICATION_ONUS
DEBUGFS_ADD(radar_debug_mode, moddir);
#endif
DEBUGFS_ADD(dynamic_fw_traces, moddir);
return 0;
}
|
linux-master
|
drivers/net/wireless/ti/wl18xx/debugfs.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* This file is part of wl18xx
*
* Copyright (C) 2012 Texas Instruments. All rights reserved.
*/
#include <linux/ieee80211.h>
#include "scan.h"
#include "../wlcore/debug.h"
static void wl18xx_adjust_channels(struct wl18xx_cmd_scan_params *cmd,
struct wlcore_scan_channels *cmd_channels)
{
memcpy(cmd->passive, cmd_channels->passive, sizeof(cmd->passive));
memcpy(cmd->active, cmd_channels->active, sizeof(cmd->active));
cmd->dfs = cmd_channels->dfs;
cmd->passive_active = cmd_channels->passive_active;
memcpy(cmd->channels_2, cmd_channels->channels_2,
sizeof(cmd->channels_2));
memcpy(cmd->channels_5, cmd_channels->channels_5,
sizeof(cmd->channels_5));
/* channels_4 are not supported, so no need to copy them */
}
static int wl18xx_scan_send(struct wl1271 *wl, struct wl12xx_vif *wlvif,
struct cfg80211_scan_request *req)
{
struct wl18xx_cmd_scan_params *cmd;
struct wlcore_scan_channels *cmd_channels = NULL;
int ret;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd) {
ret = -ENOMEM;
goto out;
}
/* scan on the dev role if the regular one is not started */
if (wlcore_is_p2p_mgmt(wlvif))
cmd->role_id = wlvif->dev_role_id;
else
cmd->role_id = wlvif->role_id;
if (WARN_ON(cmd->role_id == WL12XX_INVALID_ROLE_ID)) {
ret = -EINVAL;
goto out;
}
cmd->scan_type = SCAN_TYPE_SEARCH;
cmd->rssi_threshold = -127;
cmd->snr_threshold = 0;
cmd->bss_type = SCAN_BSS_TYPE_ANY;
cmd->ssid_from_list = 0;
cmd->filter = 0;
cmd->add_broadcast = 0;
cmd->urgency = 0;
cmd->protect = 0;
cmd->n_probe_reqs = wl->conf.scan.num_probe_reqs;
cmd->terminate_after = 0;
/* configure channels */
WARN_ON(req->n_ssids > 1);
cmd_channels = kzalloc(sizeof(*cmd_channels), GFP_KERNEL);
if (!cmd_channels) {
ret = -ENOMEM;
goto out;
}
wlcore_set_scan_chan_params(wl, cmd_channels, req->channels,
req->n_channels, req->n_ssids,
SCAN_TYPE_SEARCH);
wl18xx_adjust_channels(cmd, cmd_channels);
/*
* all the cycles params (except total cycles) should
* remain 0 for normal scan
*/
cmd->total_cycles = 1;
if (req->no_cck)
cmd->rate = WL18XX_SCAN_RATE_6;
cmd->tag = WL1271_SCAN_DEFAULT_TAG;
if (req->n_ssids) {
cmd->ssid_len = req->ssids[0].ssid_len;
memcpy(cmd->ssid, req->ssids[0].ssid, cmd->ssid_len);
}
/* TODO: per-band ies? */
if (cmd->active[0]) {
u8 band = NL80211_BAND_2GHZ;
ret = wl12xx_cmd_build_probe_req(wl, wlvif,
cmd->role_id, band,
req->ssids ? req->ssids[0].ssid : NULL,
req->ssids ? req->ssids[0].ssid_len : 0,
req->ie,
req->ie_len,
NULL,
0,
false);
if (ret < 0) {
wl1271_error("2.4GHz PROBE request template failed");
goto out;
}
}
if (cmd->active[1] || cmd->dfs) {
u8 band = NL80211_BAND_5GHZ;
ret = wl12xx_cmd_build_probe_req(wl, wlvif,
cmd->role_id, band,
req->ssids ? req->ssids[0].ssid : NULL,
req->ssids ? req->ssids[0].ssid_len : 0,
req->ie,
req->ie_len,
NULL,
0,
false);
if (ret < 0) {
wl1271_error("5GHz PROBE request template failed");
goto out;
}
}
wl1271_dump(DEBUG_SCAN, "SCAN: ", cmd, sizeof(*cmd));
ret = wl1271_cmd_send(wl, CMD_SCAN, cmd, sizeof(*cmd), 0);
if (ret < 0) {
wl1271_error("SCAN failed");
goto out;
}
out:
kfree(cmd_channels);
kfree(cmd);
return ret;
}
void wl18xx_scan_completed(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
wl->scan.failed = false;
cancel_delayed_work(&wl->scan_complete_work);
ieee80211_queue_delayed_work(wl->hw, &wl->scan_complete_work,
msecs_to_jiffies(0));
}
static
int wl18xx_scan_sched_scan_config(struct wl1271 *wl,
struct wl12xx_vif *wlvif,
struct cfg80211_sched_scan_request *req,
struct ieee80211_scan_ies *ies)
{
struct wl18xx_cmd_scan_params *cmd;
struct wlcore_scan_channels *cmd_channels = NULL;
struct conf_sched_scan_settings *c = &wl->conf.sched_scan;
int ret;
int filter_type;
wl1271_debug(DEBUG_CMD, "cmd sched_scan scan config");
filter_type = wlcore_scan_sched_scan_ssid_list(wl, wlvif, req);
if (filter_type < 0)
return filter_type;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd) {
ret = -ENOMEM;
goto out;
}
cmd->role_id = wlvif->role_id;
if (WARN_ON(cmd->role_id == WL12XX_INVALID_ROLE_ID)) {
ret = -EINVAL;
goto out;
}
cmd->scan_type = SCAN_TYPE_PERIODIC;
cmd->rssi_threshold = c->rssi_threshold;
cmd->snr_threshold = c->snr_threshold;
/* don't filter on BSS type */
cmd->bss_type = SCAN_BSS_TYPE_ANY;
cmd->ssid_from_list = 1;
if (filter_type == SCAN_SSID_FILTER_LIST)
cmd->filter = 1;
cmd->add_broadcast = 0;
cmd->urgency = 0;
cmd->protect = 0;
cmd->n_probe_reqs = c->num_probe_reqs;
/* don't stop scanning automatically when something is found */
cmd->terminate_after = 0;
cmd_channels = kzalloc(sizeof(*cmd_channels), GFP_KERNEL);
if (!cmd_channels) {
ret = -ENOMEM;
goto out;
}
/* configure channels */
wlcore_set_scan_chan_params(wl, cmd_channels, req->channels,
req->n_channels, req->n_ssids,
SCAN_TYPE_PERIODIC);
wl18xx_adjust_channels(cmd, cmd_channels);
if (c->num_short_intervals && c->long_interval &&
c->long_interval > req->scan_plans[0].interval * MSEC_PER_SEC) {
cmd->short_cycles_msec =
cpu_to_le16(req->scan_plans[0].interval * MSEC_PER_SEC);
cmd->long_cycles_msec = cpu_to_le16(c->long_interval);
cmd->short_cycles_count = c->num_short_intervals;
} else {
cmd->short_cycles_msec = 0;
cmd->long_cycles_msec =
cpu_to_le16(req->scan_plans[0].interval * MSEC_PER_SEC);
cmd->short_cycles_count = 0;
}
wl1271_debug(DEBUG_SCAN, "short_interval: %d, long_interval: %d, num_short: %d",
le16_to_cpu(cmd->short_cycles_msec),
le16_to_cpu(cmd->long_cycles_msec),
cmd->short_cycles_count);
cmd->total_cycles = 0;
cmd->tag = WL1271_SCAN_DEFAULT_TAG;
/* create a PERIODIC_SCAN_REPORT_EVENT whenever we've got a match */
cmd->report_threshold = 1;
cmd->terminate_on_report = 0;
if (cmd->active[0]) {
u8 band = NL80211_BAND_2GHZ;
ret = wl12xx_cmd_build_probe_req(wl, wlvif,
cmd->role_id, band,
req->ssids ? req->ssids[0].ssid : NULL,
req->ssids ? req->ssids[0].ssid_len : 0,
ies->ies[band],
ies->len[band],
ies->common_ies,
ies->common_ie_len,
true);
if (ret < 0) {
wl1271_error("2.4GHz PROBE request template failed");
goto out;
}
}
if (cmd->active[1] || cmd->dfs) {
u8 band = NL80211_BAND_5GHZ;
ret = wl12xx_cmd_build_probe_req(wl, wlvif,
cmd->role_id, band,
req->ssids ? req->ssids[0].ssid : NULL,
req->ssids ? req->ssids[0].ssid_len : 0,
ies->ies[band],
ies->len[band],
ies->common_ies,
ies->common_ie_len,
true);
if (ret < 0) {
wl1271_error("5GHz PROBE request template failed");
goto out;
}
}
wl1271_dump(DEBUG_SCAN, "SCAN: ", cmd, sizeof(*cmd));
ret = wl1271_cmd_send(wl, CMD_SCAN, cmd, sizeof(*cmd), 0);
if (ret < 0) {
wl1271_error("SCAN failed");
goto out;
}
out:
kfree(cmd_channels);
kfree(cmd);
return ret;
}
int wl18xx_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif,
struct cfg80211_sched_scan_request *req,
struct ieee80211_scan_ies *ies)
{
return wl18xx_scan_sched_scan_config(wl, wlvif, req, ies);
}
static int __wl18xx_scan_stop(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u8 scan_type)
{
struct wl18xx_cmd_scan_stop *stop;
int ret;
wl1271_debug(DEBUG_CMD, "cmd periodic scan stop");
stop = kzalloc(sizeof(*stop), GFP_KERNEL);
if (!stop) {
wl1271_error("failed to alloc memory to send sched scan stop");
return -ENOMEM;
}
stop->role_id = wlvif->role_id;
stop->scan_type = scan_type;
ret = wl1271_cmd_send(wl, CMD_STOP_SCAN, stop, sizeof(*stop), 0);
if (ret < 0) {
wl1271_error("failed to send sched scan stop command");
goto out_free;
}
out_free:
kfree(stop);
return ret;
}
void wl18xx_scan_sched_scan_stop(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
__wl18xx_scan_stop(wl, wlvif, SCAN_TYPE_PERIODIC);
}
int wl18xx_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif,
struct cfg80211_scan_request *req)
{
return wl18xx_scan_send(wl, wlvif, req);
}
int wl18xx_scan_stop(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
return __wl18xx_scan_stop(wl, wlvif, SCAN_TYPE_SEARCH);
}
|
linux-master
|
drivers/net/wireless/ti/wl18xx/scan.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* This file is part of wl18xx
*
* Copyright (C) 2011 Texas Instruments Inc.
*/
#include "../wlcore/cmd.h"
#include "../wlcore/debug.h"
#include "../wlcore/hw_ops.h"
#include "cmd.h"
int wl18xx_cmd_channel_switch(struct wl1271 *wl,
struct wl12xx_vif *wlvif,
struct ieee80211_channel_switch *ch_switch)
{
struct wl18xx_cmd_channel_switch *cmd;
u32 supported_rates;
int ret;
wl1271_debug(DEBUG_ACX, "cmd channel switch (count=%d)",
ch_switch->count);
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd) {
ret = -ENOMEM;
goto out;
}
cmd->role_id = wlvif->role_id;
cmd->channel = ch_switch->chandef.chan->hw_value;
cmd->switch_time = ch_switch->count;
cmd->stop_tx = ch_switch->block_tx;
switch (ch_switch->chandef.chan->band) {
case NL80211_BAND_2GHZ:
cmd->band = WLCORE_BAND_2_4GHZ;
break;
case NL80211_BAND_5GHZ:
cmd->band = WLCORE_BAND_5GHZ;
break;
default:
wl1271_error("invalid channel switch band: %d",
ch_switch->chandef.chan->band);
ret = -EINVAL;
goto out_free;
}
supported_rates = CONF_TX_ENABLED_RATES | CONF_TX_MCS_RATES;
if (wlvif->bss_type == BSS_TYPE_STA_BSS)
supported_rates |= wlcore_hw_sta_get_ap_rate_mask(wl, wlvif);
else
supported_rates |=
wlcore_hw_ap_get_mimo_wide_rate_mask(wl, wlvif);
if (wlvif->p2p)
supported_rates &= ~CONF_TX_CCK_RATES;
cmd->local_supported_rates = cpu_to_le32(supported_rates);
cmd->channel_type = wlvif->channel_type;
ret = wl1271_cmd_send(wl, CMD_CHANNEL_SWITCH, cmd, sizeof(*cmd), 0);
if (ret < 0) {
wl1271_error("failed to send channel switch command");
goto out_free;
}
out_free:
kfree(cmd);
out:
return ret;
}
int wl18xx_cmd_smart_config_start(struct wl1271 *wl, u32 group_bitmap)
{
struct wl18xx_cmd_smart_config_start *cmd;
int ret = 0;
wl1271_debug(DEBUG_CMD, "cmd smart config start group_bitmap=0x%x",
group_bitmap);
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd) {
ret = -ENOMEM;
goto out;
}
cmd->group_id_bitmask = cpu_to_le32(group_bitmap);
ret = wl1271_cmd_send(wl, CMD_SMART_CONFIG_START, cmd, sizeof(*cmd), 0);
if (ret < 0) {
wl1271_error("failed to send smart config start command");
goto out_free;
}
out_free:
kfree(cmd);
out:
return ret;
}
int wl18xx_cmd_smart_config_stop(struct wl1271 *wl)
{
struct wl1271_cmd_header *cmd;
int ret = 0;
wl1271_debug(DEBUG_CMD, "cmd smart config stop");
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd) {
ret = -ENOMEM;
goto out;
}
ret = wl1271_cmd_send(wl, CMD_SMART_CONFIG_STOP, cmd, sizeof(*cmd), 0);
if (ret < 0) {
wl1271_error("failed to send smart config stop command");
goto out_free;
}
out_free:
kfree(cmd);
out:
return ret;
}
int wl18xx_cmd_smart_config_set_group_key(struct wl1271 *wl, u16 group_id,
u8 key_len, u8 *key)
{
struct wl18xx_cmd_smart_config_set_group_key *cmd;
int ret = 0;
wl1271_debug(DEBUG_CMD, "cmd smart config set group key id=0x%x",
group_id);
if (key_len != sizeof(cmd->key)) {
wl1271_error("invalid group key size: %d", key_len);
return -E2BIG;
}
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd) {
ret = -ENOMEM;
goto out;
}
cmd->group_id = cpu_to_le32(group_id);
memcpy(cmd->key, key, key_len);
ret = wl1271_cmd_send(wl, CMD_SMART_CONFIG_SET_GROUP_KEY, cmd,
sizeof(*cmd), 0);
if (ret < 0) {
wl1271_error("failed to send smart config set group key cmd");
goto out_free;
}
out_free:
kfree(cmd);
out:
return ret;
}
int wl18xx_cmd_set_cac(struct wl1271 *wl, struct wl12xx_vif *wlvif, bool start)
{
struct wlcore_cmd_cac_start *cmd;
int ret = 0;
wl1271_debug(DEBUG_CMD, "cmd cac (channel %d) %s",
wlvif->channel, start ? "start" : "stop");
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
cmd->role_id = wlvif->role_id;
cmd->channel = wlvif->channel;
if (wlvif->band == NL80211_BAND_5GHZ)
cmd->band = WLCORE_BAND_5GHZ;
cmd->bandwidth = wlcore_get_native_channel_type(wlvif->channel_type);
ret = wl1271_cmd_send(wl,
start ? CMD_CAC_START : CMD_CAC_STOP,
cmd, sizeof(*cmd), 0);
if (ret < 0) {
wl1271_error("failed to send cac command");
goto out_free;
}
out_free:
kfree(cmd);
return ret;
}
int wl18xx_cmd_radar_detection_debug(struct wl1271 *wl, u8 channel)
{
struct wl18xx_cmd_dfs_radar_debug *cmd;
int ret = 0;
wl1271_debug(DEBUG_CMD, "cmd radar detection debug (chan %d)",
channel);
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
cmd->channel = channel;
ret = wl1271_cmd_send(wl, CMD_DFS_RADAR_DETECTION_DEBUG,
cmd, sizeof(*cmd), 0);
if (ret < 0) {
wl1271_error("failed to send radar detection debug command");
goto out_free;
}
out_free:
kfree(cmd);
return ret;
}
int wl18xx_cmd_dfs_master_restart(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct wl18xx_cmd_dfs_master_restart *cmd;
int ret = 0;
wl1271_debug(DEBUG_CMD, "cmd dfs master restart (role %d)",
wlvif->role_id);
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
cmd->role_id = wlvif->role_id;
ret = wl1271_cmd_send(wl, CMD_DFS_MASTER_RESTART,
cmd, sizeof(*cmd), 0);
if (ret < 0) {
wl1271_error("failed to send dfs master restart command");
goto out_free;
}
out_free:
kfree(cmd);
return ret;
}
|
linux-master
|
drivers/net/wireless/ti/wl18xx/cmd.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* This file is part of wl18xx
*
* Copyright (C) 2011 Texas Instruments
*/
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/ip.h>
#include <linux/firmware.h>
#include <linux/etherdevice.h>
#include <linux/irq.h>
#include "../wlcore/wlcore.h"
#include "../wlcore/debug.h"
#include "../wlcore/io.h"
#include "../wlcore/acx.h"
#include "../wlcore/tx.h"
#include "../wlcore/rx.h"
#include "../wlcore/boot.h"
#include "reg.h"
#include "conf.h"
#include "cmd.h"
#include "acx.h"
#include "tx.h"
#include "wl18xx.h"
#include "io.h"
#include "scan.h"
#include "event.h"
#include "debugfs.h"
#define WL18XX_RX_CHECKSUM_MASK 0x40
static char *ht_mode_param = NULL;
static char *board_type_param = NULL;
static bool checksum_param = false;
static int num_rx_desc_param = -1;
/* phy paramters */
static int dc2dc_param = -1;
static int n_antennas_2_param = -1;
static int n_antennas_5_param = -1;
static int low_band_component_param = -1;
static int low_band_component_type_param = -1;
static int high_band_component_param = -1;
static int high_band_component_type_param = -1;
static int pwr_limit_reference_11_abg_param = -1;
static const u8 wl18xx_rate_to_idx_2ghz[] = {
/* MCS rates are used only with 11n */
15, /* WL18XX_CONF_HW_RXTX_RATE_MCS15 */
14, /* WL18XX_CONF_HW_RXTX_RATE_MCS14 */
13, /* WL18XX_CONF_HW_RXTX_RATE_MCS13 */
12, /* WL18XX_CONF_HW_RXTX_RATE_MCS12 */
11, /* WL18XX_CONF_HW_RXTX_RATE_MCS11 */
10, /* WL18XX_CONF_HW_RXTX_RATE_MCS10 */
9, /* WL18XX_CONF_HW_RXTX_RATE_MCS9 */
8, /* WL18XX_CONF_HW_RXTX_RATE_MCS8 */
7, /* WL18XX_CONF_HW_RXTX_RATE_MCS7 */
6, /* WL18XX_CONF_HW_RXTX_RATE_MCS6 */
5, /* WL18XX_CONF_HW_RXTX_RATE_MCS5 */
4, /* WL18XX_CONF_HW_RXTX_RATE_MCS4 */
3, /* WL18XX_CONF_HW_RXTX_RATE_MCS3 */
2, /* WL18XX_CONF_HW_RXTX_RATE_MCS2 */
1, /* WL18XX_CONF_HW_RXTX_RATE_MCS1 */
0, /* WL18XX_CONF_HW_RXTX_RATE_MCS0 */
11, /* WL18XX_CONF_HW_RXTX_RATE_54 */
10, /* WL18XX_CONF_HW_RXTX_RATE_48 */
9, /* WL18XX_CONF_HW_RXTX_RATE_36 */
8, /* WL18XX_CONF_HW_RXTX_RATE_24 */
/* TI-specific rate */
CONF_HW_RXTX_RATE_UNSUPPORTED, /* WL18XX_CONF_HW_RXTX_RATE_22 */
7, /* WL18XX_CONF_HW_RXTX_RATE_18 */
6, /* WL18XX_CONF_HW_RXTX_RATE_12 */
3, /* WL18XX_CONF_HW_RXTX_RATE_11 */
5, /* WL18XX_CONF_HW_RXTX_RATE_9 */
4, /* WL18XX_CONF_HW_RXTX_RATE_6 */
2, /* WL18XX_CONF_HW_RXTX_RATE_5_5 */
1, /* WL18XX_CONF_HW_RXTX_RATE_2 */
0 /* WL18XX_CONF_HW_RXTX_RATE_1 */
};
static const u8 wl18xx_rate_to_idx_5ghz[] = {
/* MCS rates are used only with 11n */
15, /* WL18XX_CONF_HW_RXTX_RATE_MCS15 */
14, /* WL18XX_CONF_HW_RXTX_RATE_MCS14 */
13, /* WL18XX_CONF_HW_RXTX_RATE_MCS13 */
12, /* WL18XX_CONF_HW_RXTX_RATE_MCS12 */
11, /* WL18XX_CONF_HW_RXTX_RATE_MCS11 */
10, /* WL18XX_CONF_HW_RXTX_RATE_MCS10 */
9, /* WL18XX_CONF_HW_RXTX_RATE_MCS9 */
8, /* WL18XX_CONF_HW_RXTX_RATE_MCS8 */
7, /* WL18XX_CONF_HW_RXTX_RATE_MCS7 */
6, /* WL18XX_CONF_HW_RXTX_RATE_MCS6 */
5, /* WL18XX_CONF_HW_RXTX_RATE_MCS5 */
4, /* WL18XX_CONF_HW_RXTX_RATE_MCS4 */
3, /* WL18XX_CONF_HW_RXTX_RATE_MCS3 */
2, /* WL18XX_CONF_HW_RXTX_RATE_MCS2 */
1, /* WL18XX_CONF_HW_RXTX_RATE_MCS1 */
0, /* WL18XX_CONF_HW_RXTX_RATE_MCS0 */
7, /* WL18XX_CONF_HW_RXTX_RATE_54 */
6, /* WL18XX_CONF_HW_RXTX_RATE_48 */
5, /* WL18XX_CONF_HW_RXTX_RATE_36 */
4, /* WL18XX_CONF_HW_RXTX_RATE_24 */
/* TI-specific rate */
CONF_HW_RXTX_RATE_UNSUPPORTED, /* WL18XX_CONF_HW_RXTX_RATE_22 */
3, /* WL18XX_CONF_HW_RXTX_RATE_18 */
2, /* WL18XX_CONF_HW_RXTX_RATE_12 */
CONF_HW_RXTX_RATE_UNSUPPORTED, /* WL18XX_CONF_HW_RXTX_RATE_11 */
1, /* WL18XX_CONF_HW_RXTX_RATE_9 */
0, /* WL18XX_CONF_HW_RXTX_RATE_6 */
CONF_HW_RXTX_RATE_UNSUPPORTED, /* WL18XX_CONF_HW_RXTX_RATE_5_5 */
CONF_HW_RXTX_RATE_UNSUPPORTED, /* WL18XX_CONF_HW_RXTX_RATE_2 */
CONF_HW_RXTX_RATE_UNSUPPORTED, /* WL18XX_CONF_HW_RXTX_RATE_1 */
};
static const u8 *wl18xx_band_rate_to_idx[] = {
[NL80211_BAND_2GHZ] = wl18xx_rate_to_idx_2ghz,
[NL80211_BAND_5GHZ] = wl18xx_rate_to_idx_5ghz
};
enum wl18xx_hw_rates {
WL18XX_CONF_HW_RXTX_RATE_MCS15 = 0,
WL18XX_CONF_HW_RXTX_RATE_MCS14,
WL18XX_CONF_HW_RXTX_RATE_MCS13,
WL18XX_CONF_HW_RXTX_RATE_MCS12,
WL18XX_CONF_HW_RXTX_RATE_MCS11,
WL18XX_CONF_HW_RXTX_RATE_MCS10,
WL18XX_CONF_HW_RXTX_RATE_MCS9,
WL18XX_CONF_HW_RXTX_RATE_MCS8,
WL18XX_CONF_HW_RXTX_RATE_MCS7,
WL18XX_CONF_HW_RXTX_RATE_MCS6,
WL18XX_CONF_HW_RXTX_RATE_MCS5,
WL18XX_CONF_HW_RXTX_RATE_MCS4,
WL18XX_CONF_HW_RXTX_RATE_MCS3,
WL18XX_CONF_HW_RXTX_RATE_MCS2,
WL18XX_CONF_HW_RXTX_RATE_MCS1,
WL18XX_CONF_HW_RXTX_RATE_MCS0,
WL18XX_CONF_HW_RXTX_RATE_54,
WL18XX_CONF_HW_RXTX_RATE_48,
WL18XX_CONF_HW_RXTX_RATE_36,
WL18XX_CONF_HW_RXTX_RATE_24,
WL18XX_CONF_HW_RXTX_RATE_22,
WL18XX_CONF_HW_RXTX_RATE_18,
WL18XX_CONF_HW_RXTX_RATE_12,
WL18XX_CONF_HW_RXTX_RATE_11,
WL18XX_CONF_HW_RXTX_RATE_9,
WL18XX_CONF_HW_RXTX_RATE_6,
WL18XX_CONF_HW_RXTX_RATE_5_5,
WL18XX_CONF_HW_RXTX_RATE_2,
WL18XX_CONF_HW_RXTX_RATE_1,
WL18XX_CONF_HW_RXTX_RATE_MAX,
};
static struct wlcore_conf wl18xx_conf = {
.sg = {
.params = {
[WL18XX_CONF_SG_PARAM_0] = 0,
/* Configuration Parameters */
[WL18XX_CONF_SG_ANTENNA_CONFIGURATION] = 0,
[WL18XX_CONF_SG_ZIGBEE_COEX] = 0,
[WL18XX_CONF_SG_TIME_SYNC] = 0,
[WL18XX_CONF_SG_PARAM_4] = 0,
[WL18XX_CONF_SG_PARAM_5] = 0,
[WL18XX_CONF_SG_PARAM_6] = 0,
[WL18XX_CONF_SG_PARAM_7] = 0,
[WL18XX_CONF_SG_PARAM_8] = 0,
[WL18XX_CONF_SG_PARAM_9] = 0,
[WL18XX_CONF_SG_PARAM_10] = 0,
[WL18XX_CONF_SG_PARAM_11] = 0,
[WL18XX_CONF_SG_PARAM_12] = 0,
[WL18XX_CONF_SG_PARAM_13] = 0,
[WL18XX_CONF_SG_PARAM_14] = 0,
[WL18XX_CONF_SG_PARAM_15] = 0,
[WL18XX_CONF_SG_PARAM_16] = 0,
[WL18XX_CONF_SG_PARAM_17] = 0,
[WL18XX_CONF_SG_PARAM_18] = 0,
[WL18XX_CONF_SG_PARAM_19] = 0,
[WL18XX_CONF_SG_PARAM_20] = 0,
[WL18XX_CONF_SG_PARAM_21] = 0,
[WL18XX_CONF_SG_PARAM_22] = 0,
[WL18XX_CONF_SG_PARAM_23] = 0,
[WL18XX_CONF_SG_PARAM_24] = 0,
[WL18XX_CONF_SG_PARAM_25] = 0,
/* Active Scan Parameters */
[WL18XX_CONF_SG_AUTO_SCAN_PROBE_REQ] = 170,
[WL18XX_CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_HV3] = 50,
[WL18XX_CONF_SG_PARAM_28] = 0,
/* Passive Scan Parameters */
[WL18XX_CONF_SG_PARAM_29] = 0,
[WL18XX_CONF_SG_PARAM_30] = 0,
[WL18XX_CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_HV3] = 200,
/* Passive Scan in Dual Antenna Parameters */
[WL18XX_CONF_SG_CONSECUTIVE_HV3_IN_PASSIVE_SCAN] = 0,
[WL18XX_CONF_SG_BEACON_HV3_COLL_TH_IN_PASSIVE_SCAN] = 0,
[WL18XX_CONF_SG_TX_RX_PROTECT_BW_IN_PASSIVE_SCAN] = 0,
/* General Parameters */
[WL18XX_CONF_SG_STA_FORCE_PS_IN_BT_SCO] = 1,
[WL18XX_CONF_SG_PARAM_36] = 0,
[WL18XX_CONF_SG_BEACON_MISS_PERCENT] = 60,
[WL18XX_CONF_SG_PARAM_38] = 0,
[WL18XX_CONF_SG_RXT] = 1200,
[WL18XX_CONF_SG_UNUSED] = 0,
[WL18XX_CONF_SG_ADAPTIVE_RXT_TXT] = 1,
[WL18XX_CONF_SG_GENERAL_USAGE_BIT_MAP] = 3,
[WL18XX_CONF_SG_HV3_MAX_SERVED] = 6,
[WL18XX_CONF_SG_PARAM_44] = 0,
[WL18XX_CONF_SG_PARAM_45] = 0,
[WL18XX_CONF_SG_CONSECUTIVE_CTS_THRESHOLD] = 2,
[WL18XX_CONF_SG_GEMINI_PARAM_47] = 0,
[WL18XX_CONF_SG_STA_CONNECTION_PROTECTION_TIME] = 0,
/* AP Parameters */
[WL18XX_CONF_SG_AP_BEACON_MISS_TX] = 3,
[WL18XX_CONF_SG_PARAM_50] = 0,
[WL18XX_CONF_SG_AP_BEACON_WINDOW_INTERVAL] = 2,
[WL18XX_CONF_SG_AP_CONNECTION_PROTECTION_TIME] = 30,
[WL18XX_CONF_SG_PARAM_53] = 0,
[WL18XX_CONF_SG_PARAM_54] = 0,
/* CTS Diluting Parameters */
[WL18XX_CONF_SG_CTS_DILUTED_BAD_RX_PACKETS_TH] = 0,
[WL18XX_CONF_SG_CTS_CHOP_IN_DUAL_ANT_SCO_MASTER] = 0,
[WL18XX_CONF_SG_TEMP_PARAM_1] = 0,
[WL18XX_CONF_SG_TEMP_PARAM_2] = 0,
[WL18XX_CONF_SG_TEMP_PARAM_3] = 0,
[WL18XX_CONF_SG_TEMP_PARAM_4] = 0,
[WL18XX_CONF_SG_TEMP_PARAM_5] = 0,
[WL18XX_CONF_SG_TEMP_PARAM_6] = 0,
[WL18XX_CONF_SG_TEMP_PARAM_7] = 0,
[WL18XX_CONF_SG_TEMP_PARAM_8] = 0,
[WL18XX_CONF_SG_TEMP_PARAM_9] = 0,
[WL18XX_CONF_SG_TEMP_PARAM_10] = 0,
},
.state = CONF_SG_PROTECTIVE,
},
.rx = {
.rx_msdu_life_time = 512000,
.packet_detection_threshold = 0,
.ps_poll_timeout = 15,
.upsd_timeout = 15,
.rts_threshold = IEEE80211_MAX_RTS_THRESHOLD,
.rx_cca_threshold = 0,
.irq_blk_threshold = 0xFFFF,
.irq_pkt_threshold = 0,
.irq_timeout = 600,
.queue_type = CONF_RX_QUEUE_TYPE_LOW_PRIORITY,
},
.tx = {
.tx_energy_detection = 0,
.sta_rc_conf = {
.enabled_rates = 0,
.short_retry_limit = 10,
.long_retry_limit = 10,
.aflags = 0,
},
.ac_conf_count = 4,
.ac_conf = {
[CONF_TX_AC_BE] = {
.ac = CONF_TX_AC_BE,
.cw_min = 15,
.cw_max = 63,
.aifsn = 3,
.tx_op_limit = 0,
},
[CONF_TX_AC_BK] = {
.ac = CONF_TX_AC_BK,
.cw_min = 15,
.cw_max = 63,
.aifsn = 7,
.tx_op_limit = 0,
},
[CONF_TX_AC_VI] = {
.ac = CONF_TX_AC_VI,
.cw_min = 15,
.cw_max = 63,
.aifsn = CONF_TX_AIFS_PIFS,
.tx_op_limit = 3008,
},
[CONF_TX_AC_VO] = {
.ac = CONF_TX_AC_VO,
.cw_min = 15,
.cw_max = 63,
.aifsn = CONF_TX_AIFS_PIFS,
.tx_op_limit = 1504,
},
},
.max_tx_retries = 100,
.ap_aging_period = 300,
.tid_conf_count = 4,
.tid_conf = {
[CONF_TX_AC_BE] = {
.queue_id = CONF_TX_AC_BE,
.channel_type = CONF_CHANNEL_TYPE_EDCF,
.tsid = CONF_TX_AC_BE,
.ps_scheme = CONF_PS_SCHEME_LEGACY,
.ack_policy = CONF_ACK_POLICY_LEGACY,
.apsd_conf = {0, 0},
},
[CONF_TX_AC_BK] = {
.queue_id = CONF_TX_AC_BK,
.channel_type = CONF_CHANNEL_TYPE_EDCF,
.tsid = CONF_TX_AC_BK,
.ps_scheme = CONF_PS_SCHEME_LEGACY,
.ack_policy = CONF_ACK_POLICY_LEGACY,
.apsd_conf = {0, 0},
},
[CONF_TX_AC_VI] = {
.queue_id = CONF_TX_AC_VI,
.channel_type = CONF_CHANNEL_TYPE_EDCF,
.tsid = CONF_TX_AC_VI,
.ps_scheme = CONF_PS_SCHEME_LEGACY,
.ack_policy = CONF_ACK_POLICY_LEGACY,
.apsd_conf = {0, 0},
},
[CONF_TX_AC_VO] = {
.queue_id = CONF_TX_AC_VO,
.channel_type = CONF_CHANNEL_TYPE_EDCF,
.tsid = CONF_TX_AC_VO,
.ps_scheme = CONF_PS_SCHEME_LEGACY,
.ack_policy = CONF_ACK_POLICY_LEGACY,
.apsd_conf = {0, 0},
},
},
.frag_threshold = IEEE80211_MAX_FRAG_THRESHOLD,
.tx_compl_timeout = 350,
.tx_compl_threshold = 10,
.basic_rate = CONF_HW_BIT_RATE_1MBPS,
.basic_rate_5 = CONF_HW_BIT_RATE_6MBPS,
.tmpl_short_retry_limit = 10,
.tmpl_long_retry_limit = 10,
.tx_watchdog_timeout = 5000,
.slow_link_thold = 3,
.fast_link_thold = 30,
},
.conn = {
.wake_up_event = CONF_WAKE_UP_EVENT_DTIM,
.listen_interval = 1,
.suspend_wake_up_event = CONF_WAKE_UP_EVENT_N_DTIM,
.suspend_listen_interval = 3,
.bcn_filt_mode = CONF_BCN_FILT_MODE_ENABLED,
.bcn_filt_ie_count = 3,
.bcn_filt_ie = {
[0] = {
.ie = WLAN_EID_CHANNEL_SWITCH,
.rule = CONF_BCN_RULE_PASS_ON_APPEARANCE,
},
[1] = {
.ie = WLAN_EID_HT_OPERATION,
.rule = CONF_BCN_RULE_PASS_ON_CHANGE,
},
[2] = {
.ie = WLAN_EID_ERP_INFO,
.rule = CONF_BCN_RULE_PASS_ON_CHANGE,
},
},
.synch_fail_thold = 12,
.bss_lose_timeout = 400,
.beacon_rx_timeout = 10000,
.broadcast_timeout = 20000,
.rx_broadcast_in_ps = 1,
.ps_poll_threshold = 10,
.bet_enable = CONF_BET_MODE_ENABLE,
.bet_max_consecutive = 50,
.psm_entry_retries = 8,
.psm_exit_retries = 16,
.psm_entry_nullfunc_retries = 3,
.dynamic_ps_timeout = 1500,
.forced_ps = false,
.keep_alive_interval = 55000,
.max_listen_interval = 20,
.sta_sleep_auth = WL1271_PSM_ILLEGAL,
.suspend_rx_ba_activity = 0,
},
.itrim = {
.enable = false,
.timeout = 50000,
},
.pm_config = {
.host_clk_settling_time = 5000,
.host_fast_wakeup_support = CONF_FAST_WAKEUP_DISABLE,
},
.roam_trigger = {
.trigger_pacing = 1,
.avg_weight_rssi_beacon = 20,
.avg_weight_rssi_data = 10,
.avg_weight_snr_beacon = 20,
.avg_weight_snr_data = 10,
},
.scan = {
.min_dwell_time_active = 7500,
.max_dwell_time_active = 30000,
.min_dwell_time_active_long = 25000,
.max_dwell_time_active_long = 50000,
.dwell_time_passive = 100000,
.dwell_time_dfs = 150000,
.num_probe_reqs = 2,
.split_scan_timeout = 50000,
},
.sched_scan = {
/*
* Values are in TU/1000 but since sched scan FW command
* params are in TUs rounding up may occur.
*/
.base_dwell_time = 7500,
.max_dwell_time_delta = 22500,
/* based on 250bits per probe @1Mbps */
.dwell_time_delta_per_probe = 2000,
/* based on 250bits per probe @6Mbps (plus a bit more) */
.dwell_time_delta_per_probe_5 = 350,
.dwell_time_passive = 100000,
.dwell_time_dfs = 150000,
.num_probe_reqs = 2,
.rssi_threshold = -90,
.snr_threshold = 0,
.num_short_intervals = SCAN_MAX_SHORT_INTERVALS,
.long_interval = 30000,
},
.ht = {
.rx_ba_win_size = 32,
.tx_ba_win_size = 64,
.inactivity_timeout = 10000,
.tx_ba_tid_bitmap = CONF_TX_BA_ENABLED_TID_BITMAP,
},
.mem = {
.num_stations = 1,
.ssid_profiles = 1,
.rx_block_num = 40,
.tx_min_block_num = 40,
.dynamic_memory = 1,
.min_req_tx_blocks = 45,
.min_req_rx_blocks = 22,
.tx_min = 27,
},
.fm_coex = {
.enable = true,
.swallow_period = 5,
.n_divider_fref_set_1 = 0xff, /* default */
.n_divider_fref_set_2 = 12,
.m_divider_fref_set_1 = 0xffff,
.m_divider_fref_set_2 = 148, /* default */
.coex_pll_stabilization_time = 0xffffffff, /* default */
.ldo_stabilization_time = 0xffff, /* default */
.fm_disturbed_band_margin = 0xff, /* default */
.swallow_clk_diff = 0xff, /* default */
},
.rx_streaming = {
.duration = 150,
.queues = 0x1,
.interval = 20,
.always = 0,
},
.fwlog = {
.mode = WL12XX_FWLOG_CONTINUOUS,
.mem_blocks = 0,
.severity = 0,
.timestamp = WL12XX_FWLOG_TIMESTAMP_DISABLED,
.output = WL12XX_FWLOG_OUTPUT_DBG_PINS,
.threshold = 0,
},
.rate = {
.rate_retry_score = 32000,
.per_add = 8192,
.per_th1 = 2048,
.per_th2 = 4096,
.max_per = 8100,
.inverse_curiosity_factor = 5,
.tx_fail_low_th = 4,
.tx_fail_high_th = 10,
.per_alpha_shift = 4,
.per_add_shift = 13,
.per_beta1_shift = 10,
.per_beta2_shift = 8,
.rate_check_up = 2,
.rate_check_down = 12,
.rate_retry_policy = {
0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00,
},
},
.hangover = {
.recover_time = 0,
.hangover_period = 20,
.dynamic_mode = 1,
.early_termination_mode = 1,
.max_period = 20,
.min_period = 1,
.increase_delta = 1,
.decrease_delta = 2,
.quiet_time = 4,
.increase_time = 1,
.window_size = 16,
},
.recovery = {
.bug_on_recovery = 0,
.no_recovery = 0,
},
};
static struct wl18xx_priv_conf wl18xx_default_priv_conf = {
.ht = {
.mode = HT_MODE_WIDE,
},
.phy = {
.phy_standalone = 0x00,
.primary_clock_setting_time = 0x05,
.clock_valid_on_wake_up = 0x00,
.secondary_clock_setting_time = 0x05,
.board_type = BOARD_TYPE_HDK_18XX,
.auto_detect = 0x00,
.dedicated_fem = FEM_NONE,
.low_band_component = COMPONENT_3_WAY_SWITCH,
.low_band_component_type = 0x05,
.high_band_component = COMPONENT_2_WAY_SWITCH,
.high_band_component_type = 0x09,
.tcxo_ldo_voltage = 0x00,
.xtal_itrim_val = 0x04,
.srf_state = 0x00,
.io_configuration = 0x01,
.sdio_configuration = 0x00,
.settings = 0x00,
.enable_clpc = 0x00,
.enable_tx_low_pwr_on_siso_rdl = 0x00,
.rx_profile = 0x00,
.pwr_limit_reference_11_abg = 0x64,
.per_chan_pwr_limit_arr_11abg = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff, 0xff, 0xff, 0xff },
.pwr_limit_reference_11p = 0x64,
.per_chan_bo_mode_11_abg = { 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00,
0x00 },
.per_chan_bo_mode_11_p = { 0x00, 0x00, 0x00, 0x00 },
.per_chan_pwr_limit_arr_11p = { 0xff, 0xff, 0xff, 0xff,
0xff, 0xff, 0xff },
.psat = 0,
.external_pa_dc2dc = 0,
.number_of_assembled_ant2_4 = 2,
.number_of_assembled_ant5 = 1,
.low_power_val = 0xff,
.med_power_val = 0xff,
.high_power_val = 0xff,
.low_power_val_2nd = 0xff,
.med_power_val_2nd = 0xff,
.high_power_val_2nd = 0xff,
.tx_rf_margin = 1,
},
.ap_sleep = { /* disabled by default */
.idle_duty_cycle = 0,
.connected_duty_cycle = 0,
.max_stations_thresh = 0,
.idle_conn_thresh = 0,
},
};
static const struct wlcore_partition_set wl18xx_ptable[PART_TABLE_LEN] = {
[PART_TOP_PRCM_ELP_SOC] = {
.mem = { .start = 0x00A00000, .size = 0x00012000 },
.reg = { .start = 0x00807000, .size = 0x00005000 },
.mem2 = { .start = 0x00800000, .size = 0x0000B000 },
.mem3 = { .start = 0x00401594, .size = 0x00001020 },
},
[PART_DOWN] = {
.mem = { .start = 0x00000000, .size = 0x00014000 },
.reg = { .start = 0x00810000, .size = 0x0000BFFF },
.mem2 = { .start = 0x00000000, .size = 0x00000000 },
.mem3 = { .start = 0x00000000, .size = 0x00000000 },
},
[PART_BOOT] = {
.mem = { .start = 0x00700000, .size = 0x0000030c },
.reg = { .start = 0x00802000, .size = 0x00014578 },
.mem2 = { .start = 0x00B00404, .size = 0x00001000 },
.mem3 = { .start = 0x00C00000, .size = 0x00000400 },
},
[PART_WORK] = {
.mem = { .start = 0x00800000, .size = 0x000050FC },
.reg = { .start = 0x00B00404, .size = 0x00001000 },
.mem2 = { .start = 0x00C00000, .size = 0x00000400 },
.mem3 = { .start = 0x00401594, .size = 0x00001020 },
},
[PART_PHY_INIT] = {
.mem = { .start = WL18XX_PHY_INIT_MEM_ADDR,
.size = WL18XX_PHY_INIT_MEM_SIZE },
.reg = { .start = 0x00000000, .size = 0x00000000 },
.mem2 = { .start = 0x00000000, .size = 0x00000000 },
.mem3 = { .start = 0x00000000, .size = 0x00000000 },
},
};
static const int wl18xx_rtable[REG_TABLE_LEN] = {
[REG_ECPU_CONTROL] = WL18XX_REG_ECPU_CONTROL,
[REG_INTERRUPT_NO_CLEAR] = WL18XX_REG_INTERRUPT_NO_CLEAR,
[REG_INTERRUPT_ACK] = WL18XX_REG_INTERRUPT_ACK,
[REG_COMMAND_MAILBOX_PTR] = WL18XX_REG_COMMAND_MAILBOX_PTR,
[REG_EVENT_MAILBOX_PTR] = WL18XX_REG_EVENT_MAILBOX_PTR,
[REG_INTERRUPT_TRIG] = WL18XX_REG_INTERRUPT_TRIG_H,
[REG_INTERRUPT_MASK] = WL18XX_REG_INTERRUPT_MASK,
[REG_PC_ON_RECOVERY] = WL18XX_SCR_PAD4,
[REG_CHIP_ID_B] = WL18XX_REG_CHIP_ID_B,
[REG_CMD_MBOX_ADDRESS] = WL18XX_CMD_MBOX_ADDRESS,
/* data access memory addresses, used with partition translation */
[REG_SLV_MEM_DATA] = WL18XX_SLV_MEM_DATA,
[REG_SLV_REG_DATA] = WL18XX_SLV_REG_DATA,
/* raw data access memory addresses */
[REG_RAW_FW_STATUS_ADDR] = WL18XX_FW_STATUS_ADDR,
};
static const struct wl18xx_clk_cfg wl18xx_clk_table_coex[NUM_CLOCK_CONFIGS] = {
[CLOCK_CONFIG_16_2_M] = { 8, 121, 0, 0, false },
[CLOCK_CONFIG_16_368_M] = { 8, 120, 0, 0, false },
[CLOCK_CONFIG_16_8_M] = { 8, 117, 0, 0, false },
[CLOCK_CONFIG_19_2_M] = { 10, 128, 0, 0, false },
[CLOCK_CONFIG_26_M] = { 11, 104, 0, 0, false },
[CLOCK_CONFIG_32_736_M] = { 8, 120, 0, 0, false },
[CLOCK_CONFIG_33_6_M] = { 8, 117, 0, 0, false },
[CLOCK_CONFIG_38_468_M] = { 10, 128, 0, 0, false },
[CLOCK_CONFIG_52_M] = { 11, 104, 0, 0, false },
};
static const struct wl18xx_clk_cfg wl18xx_clk_table[NUM_CLOCK_CONFIGS] = {
[CLOCK_CONFIG_16_2_M] = { 7, 104, 801, 4, true },
[CLOCK_CONFIG_16_368_M] = { 9, 132, 3751, 4, true },
[CLOCK_CONFIG_16_8_M] = { 7, 100, 0, 0, false },
[CLOCK_CONFIG_19_2_M] = { 8, 100, 0, 0, false },
[CLOCK_CONFIG_26_M] = { 13, 120, 0, 0, false },
[CLOCK_CONFIG_32_736_M] = { 9, 132, 3751, 4, true },
[CLOCK_CONFIG_33_6_M] = { 7, 100, 0, 0, false },
[CLOCK_CONFIG_38_468_M] = { 8, 100, 0, 0, false },
[CLOCK_CONFIG_52_M] = { 13, 120, 0, 0, false },
};
/* TODO: maybe move to a new header file? */
#define WL18XX_FW_NAME "ti-connectivity/wl18xx-fw-4.bin"
static int wl18xx_identify_chip(struct wl1271 *wl)
{
int ret = 0;
switch (wl->chip.id) {
case CHIP_ID_185x_PG20:
wl1271_debug(DEBUG_BOOT, "chip id 0x%x (185x PG20)",
wl->chip.id);
wl->sr_fw_name = WL18XX_FW_NAME;
/* wl18xx uses the same firmware for PLT */
wl->plt_fw_name = WL18XX_FW_NAME;
wl->quirks |= WLCORE_QUIRK_RX_BLOCKSIZE_ALIGN |
WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN |
WLCORE_QUIRK_NO_SCHED_SCAN_WHILE_CONN |
WLCORE_QUIRK_TX_PAD_LAST_FRAME |
WLCORE_QUIRK_REGDOMAIN_CONF |
WLCORE_QUIRK_DUAL_PROBE_TMPL;
wlcore_set_min_fw_ver(wl, WL18XX_CHIP_VER,
WL18XX_IFTYPE_VER, WL18XX_MAJOR_VER,
WL18XX_SUBTYPE_VER, WL18XX_MINOR_VER,
/* there's no separate multi-role FW */
0, 0, 0, 0);
break;
case CHIP_ID_185x_PG10:
wl1271_warning("chip id 0x%x (185x PG10) is deprecated",
wl->chip.id);
ret = -ENODEV;
goto out;
default:
wl1271_warning("unsupported chip id: 0x%x", wl->chip.id);
ret = -ENODEV;
goto out;
}
wl->fw_mem_block_size = 272;
wl->fwlog_end = 0x40000000;
wl->scan_templ_id_2_4 = CMD_TEMPL_CFG_PROBE_REQ_2_4;
wl->scan_templ_id_5 = CMD_TEMPL_CFG_PROBE_REQ_5;
wl->sched_scan_templ_id_2_4 = CMD_TEMPL_PROBE_REQ_2_4_PERIODIC;
wl->sched_scan_templ_id_5 = CMD_TEMPL_PROBE_REQ_5_PERIODIC;
wl->max_channels_5 = WL18XX_MAX_CHANNELS_5GHZ;
wl->ba_rx_session_count_max = WL18XX_RX_BA_MAX_SESSIONS;
out:
return ret;
}
static int wl18xx_set_clk(struct wl1271 *wl)
{
u16 clk_freq;
int ret;
ret = wlcore_set_partition(wl, &wl->ptable[PART_TOP_PRCM_ELP_SOC]);
if (ret < 0)
goto out;
/* TODO: PG2: apparently we need to read the clk type */
ret = wl18xx_top_reg_read(wl, PRIMARY_CLK_DETECT, &clk_freq);
if (ret < 0)
goto out;
wl1271_debug(DEBUG_BOOT, "clock freq %d (%d, %d, %d, %d, %s)", clk_freq,
wl18xx_clk_table[clk_freq].n, wl18xx_clk_table[clk_freq].m,
wl18xx_clk_table[clk_freq].p, wl18xx_clk_table[clk_freq].q,
wl18xx_clk_table[clk_freq].swallow ? "swallow" : "spit");
/* coex PLL configuration */
ret = wl18xx_top_reg_write(wl, PLLSH_COEX_PLL_N,
wl18xx_clk_table_coex[clk_freq].n);
if (ret < 0)
goto out;
ret = wl18xx_top_reg_write(wl, PLLSH_COEX_PLL_M,
wl18xx_clk_table_coex[clk_freq].m);
if (ret < 0)
goto out;
/* bypass the swallowing logic */
ret = wl18xx_top_reg_write(wl, PLLSH_COEX_PLL_SWALLOW_EN,
PLLSH_COEX_PLL_SWALLOW_EN_VAL1);
if (ret < 0)
goto out;
ret = wl18xx_top_reg_write(wl, PLLSH_WCS_PLL_N,
wl18xx_clk_table[clk_freq].n);
if (ret < 0)
goto out;
ret = wl18xx_top_reg_write(wl, PLLSH_WCS_PLL_M,
wl18xx_clk_table[clk_freq].m);
if (ret < 0)
goto out;
if (wl18xx_clk_table[clk_freq].swallow) {
/* first the 16 lower bits */
ret = wl18xx_top_reg_write(wl, PLLSH_WCS_PLL_Q_FACTOR_CFG_1,
wl18xx_clk_table[clk_freq].q &
PLLSH_WCS_PLL_Q_FACTOR_CFG_1_MASK);
if (ret < 0)
goto out;
/* then the 16 higher bits, masked out */
ret = wl18xx_top_reg_write(wl, PLLSH_WCS_PLL_Q_FACTOR_CFG_2,
(wl18xx_clk_table[clk_freq].q >> 16) &
PLLSH_WCS_PLL_Q_FACTOR_CFG_2_MASK);
if (ret < 0)
goto out;
/* first the 16 lower bits */
ret = wl18xx_top_reg_write(wl, PLLSH_WCS_PLL_P_FACTOR_CFG_1,
wl18xx_clk_table[clk_freq].p &
PLLSH_WCS_PLL_P_FACTOR_CFG_1_MASK);
if (ret < 0)
goto out;
/* then the 16 higher bits, masked out */
ret = wl18xx_top_reg_write(wl, PLLSH_WCS_PLL_P_FACTOR_CFG_2,
(wl18xx_clk_table[clk_freq].p >> 16) &
PLLSH_WCS_PLL_P_FACTOR_CFG_2_MASK);
if (ret < 0)
goto out;
} else {
ret = wl18xx_top_reg_write(wl, PLLSH_WCS_PLL_SWALLOW_EN,
PLLSH_WCS_PLL_SWALLOW_EN_VAL2);
if (ret < 0)
goto out;
}
/* choose WCS PLL */
ret = wl18xx_top_reg_write(wl, PLLSH_WL_PLL_SEL,
PLLSH_WL_PLL_SEL_WCS_PLL);
if (ret < 0)
goto out;
/* enable both PLLs */
ret = wl18xx_top_reg_write(wl, PLLSH_WL_PLL_EN, PLLSH_WL_PLL_EN_VAL1);
if (ret < 0)
goto out;
udelay(1000);
/* disable coex PLL */
ret = wl18xx_top_reg_write(wl, PLLSH_WL_PLL_EN, PLLSH_WL_PLL_EN_VAL2);
if (ret < 0)
goto out;
/* reset the swallowing logic */
ret = wl18xx_top_reg_write(wl, PLLSH_COEX_PLL_SWALLOW_EN,
PLLSH_COEX_PLL_SWALLOW_EN_VAL2);
out:
return ret;
}
static int wl18xx_boot_soft_reset(struct wl1271 *wl)
{
int ret;
/* disable Rx/Tx */
ret = wlcore_write32(wl, WL18XX_ENABLE, 0x0);
if (ret < 0)
goto out;
/* disable auto calibration on start*/
ret = wlcore_write32(wl, WL18XX_SPARE_A2, 0xffff);
out:
return ret;
}
static int wl18xx_pre_boot(struct wl1271 *wl)
{
int ret;
ret = wl18xx_set_clk(wl);
if (ret < 0)
goto out;
/* Continue the ELP wake up sequence */
ret = wlcore_write32(wl, WL18XX_WELP_ARM_COMMAND, WELP_ARM_COMMAND_VAL);
if (ret < 0)
goto out;
udelay(500);
ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
if (ret < 0)
goto out;
/* Disable interrupts */
ret = wlcore_write_reg(wl, REG_INTERRUPT_MASK, WL1271_ACX_INTR_ALL);
if (ret < 0)
goto out;
ret = wl18xx_boot_soft_reset(wl);
out:
return ret;
}
static int wl18xx_pre_upload(struct wl1271 *wl)
{
u32 tmp;
int ret;
u16 irq_invert;
BUILD_BUG_ON(sizeof(struct wl18xx_mac_and_phy_params) >
WL18XX_PHY_INIT_MEM_SIZE);
ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
if (ret < 0)
goto out;
/* TODO: check if this is all needed */
ret = wlcore_write32(wl, WL18XX_EEPROMLESS_IND, WL18XX_EEPROMLESS_IND);
if (ret < 0)
goto out;
ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &tmp);
if (ret < 0)
goto out;
wl1271_debug(DEBUG_BOOT, "chip id 0x%x", tmp);
ret = wlcore_read32(wl, WL18XX_SCR_PAD2, &tmp);
if (ret < 0)
goto out;
/*
* Workaround for FDSP code RAM corruption (needed for PG2.1
* and newer; for older chips it's a NOP). Change FDSP clock
* settings so that it's muxed to the ATGP clock instead of
* its own clock.
*/
ret = wlcore_set_partition(wl, &wl->ptable[PART_PHY_INIT]);
if (ret < 0)
goto out;
/* disable FDSP clock */
ret = wlcore_write32(wl, WL18XX_PHY_FPGA_SPARE_1,
MEM_FDSP_CLK_120_DISABLE);
if (ret < 0)
goto out;
/* set ATPG clock toward FDSP Code RAM rather than its own clock */
ret = wlcore_write32(wl, WL18XX_PHY_FPGA_SPARE_1,
MEM_FDSP_CODERAM_FUNC_CLK_SEL);
if (ret < 0)
goto out;
/* re-enable FDSP clock */
ret = wlcore_write32(wl, WL18XX_PHY_FPGA_SPARE_1,
MEM_FDSP_CLK_120_ENABLE);
if (ret < 0)
goto out;
ret = irq_get_trigger_type(wl->irq);
if ((ret == IRQ_TYPE_LEVEL_LOW) || (ret == IRQ_TYPE_EDGE_FALLING)) {
wl1271_info("using inverted interrupt logic: %d", ret);
ret = wlcore_set_partition(wl,
&wl->ptable[PART_TOP_PRCM_ELP_SOC]);
if (ret < 0)
goto out;
ret = wl18xx_top_reg_read(wl, TOP_FN0_CCCR_REG_32, &irq_invert);
if (ret < 0)
goto out;
irq_invert |= BIT(1);
ret = wl18xx_top_reg_write(wl, TOP_FN0_CCCR_REG_32, irq_invert);
if (ret < 0)
goto out;
ret = wlcore_set_partition(wl, &wl->ptable[PART_PHY_INIT]);
}
out:
return ret;
}
static int wl18xx_set_mac_and_phy(struct wl1271 *wl)
{
struct wl18xx_priv *priv = wl->priv;
struct wl18xx_mac_and_phy_params *params;
int ret;
params = kmemdup(&priv->conf.phy, sizeof(*params), GFP_KERNEL);
if (!params) {
ret = -ENOMEM;
goto out;
}
ret = wlcore_set_partition(wl, &wl->ptable[PART_PHY_INIT]);
if (ret < 0)
goto out;
ret = wlcore_write(wl, WL18XX_PHY_INIT_MEM_ADDR, params,
sizeof(*params), false);
out:
kfree(params);
return ret;
}
static int wl18xx_enable_interrupts(struct wl1271 *wl)
{
u32 event_mask, intr_mask;
int ret;
event_mask = WL18XX_ACX_EVENTS_VECTOR;
intr_mask = WL18XX_INTR_MASK;
ret = wlcore_write_reg(wl, REG_INTERRUPT_MASK, event_mask);
if (ret < 0)
goto out;
wlcore_enable_interrupts(wl);
ret = wlcore_write_reg(wl, REG_INTERRUPT_MASK,
WL1271_ACX_INTR_ALL & ~intr_mask);
if (ret < 0)
goto disable_interrupts;
return ret;
disable_interrupts:
wlcore_disable_interrupts(wl);
out:
return ret;
}
static int wl18xx_boot(struct wl1271 *wl)
{
int ret;
ret = wl18xx_pre_boot(wl);
if (ret < 0)
goto out;
ret = wl18xx_pre_upload(wl);
if (ret < 0)
goto out;
ret = wlcore_boot_upload_firmware(wl);
if (ret < 0)
goto out;
ret = wl18xx_set_mac_and_phy(wl);
if (ret < 0)
goto out;
wl->event_mask = BSS_LOSS_EVENT_ID |
SCAN_COMPLETE_EVENT_ID |
RADAR_DETECTED_EVENT_ID |
RSSI_SNR_TRIGGER_0_EVENT_ID |
PERIODIC_SCAN_COMPLETE_EVENT_ID |
PERIODIC_SCAN_REPORT_EVENT_ID |
DUMMY_PACKET_EVENT_ID |
PEER_REMOVE_COMPLETE_EVENT_ID |
BA_SESSION_RX_CONSTRAINT_EVENT_ID |
REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID |
INACTIVE_STA_EVENT_ID |
CHANNEL_SWITCH_COMPLETE_EVENT_ID |
DFS_CHANNELS_CONFIG_COMPLETE_EVENT |
SMART_CONFIG_SYNC_EVENT_ID |
SMART_CONFIG_DECODE_EVENT_ID |
TIME_SYNC_EVENT_ID |
FW_LOGGER_INDICATION |
RX_BA_WIN_SIZE_CHANGE_EVENT_ID;
wl->ap_event_mask = MAX_TX_FAILURE_EVENT_ID;
ret = wlcore_boot_run_firmware(wl);
if (ret < 0)
goto out;
ret = wl18xx_enable_interrupts(wl);
out:
return ret;
}
static int wl18xx_trigger_cmd(struct wl1271 *wl, int cmd_box_addr,
void *buf, size_t len)
{
struct wl18xx_priv *priv = wl->priv;
memcpy(priv->cmd_buf, buf, len);
memset(priv->cmd_buf + len, 0, WL18XX_CMD_MAX_SIZE - len);
return wlcore_write(wl, cmd_box_addr, priv->cmd_buf,
WL18XX_CMD_MAX_SIZE, false);
}
static int wl18xx_ack_event(struct wl1271 *wl)
{
return wlcore_write_reg(wl, REG_INTERRUPT_TRIG,
WL18XX_INTR_TRIG_EVENT_ACK);
}
static u32 wl18xx_calc_tx_blocks(struct wl1271 *wl, u32 len, u32 spare_blks)
{
u32 blk_size = WL18XX_TX_HW_BLOCK_SIZE;
return (len + blk_size - 1) / blk_size + spare_blks;
}
static void
wl18xx_set_tx_desc_blocks(struct wl1271 *wl, struct wl1271_tx_hw_descr *desc,
u32 blks, u32 spare_blks)
{
desc->wl18xx_mem.total_mem_blocks = blks;
}
static void
wl18xx_set_tx_desc_data_len(struct wl1271 *wl, struct wl1271_tx_hw_descr *desc,
struct sk_buff *skb)
{
desc->length = cpu_to_le16(skb->len);
/* if only the last frame is to be padded, we unset this bit on Tx */
if (wl->quirks & WLCORE_QUIRK_TX_PAD_LAST_FRAME)
desc->wl18xx_mem.ctrl = WL18XX_TX_CTRL_NOT_PADDED;
else
desc->wl18xx_mem.ctrl = 0;
wl1271_debug(DEBUG_TX, "tx_fill_hdr: hlid: %d "
"len: %d life: %d mem: %d", desc->hlid,
le16_to_cpu(desc->length),
le16_to_cpu(desc->life_time),
desc->wl18xx_mem.total_mem_blocks);
}
static enum wl_rx_buf_align
wl18xx_get_rx_buf_align(struct wl1271 *wl, u32 rx_desc)
{
if (rx_desc & RX_BUF_PADDED_PAYLOAD)
return WLCORE_RX_BUF_PADDED;
return WLCORE_RX_BUF_ALIGNED;
}
static u32 wl18xx_get_rx_packet_len(struct wl1271 *wl, void *rx_data,
u32 data_len)
{
struct wl1271_rx_descriptor *desc = rx_data;
/* invalid packet */
if (data_len < sizeof(*desc))
return 0;
return data_len - sizeof(*desc);
}
static void wl18xx_tx_immediate_completion(struct wl1271 *wl)
{
wl18xx_tx_immediate_complete(wl);
}
static int wl18xx_set_host_cfg_bitmap(struct wl1271 *wl, u32 extra_mem_blk)
{
int ret;
u32 sdio_align_size = 0;
u32 host_cfg_bitmap = HOST_IF_CFG_RX_FIFO_ENABLE |
HOST_IF_CFG_ADD_RX_ALIGNMENT;
/* Enable Tx SDIO padding */
if (wl->quirks & WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN) {
host_cfg_bitmap |= HOST_IF_CFG_TX_PAD_TO_SDIO_BLK;
sdio_align_size = WL12XX_BUS_BLOCK_SIZE;
}
/* Enable Rx SDIO padding */
if (wl->quirks & WLCORE_QUIRK_RX_BLOCKSIZE_ALIGN) {
host_cfg_bitmap |= HOST_IF_CFG_RX_PAD_TO_SDIO_BLK;
sdio_align_size = WL12XX_BUS_BLOCK_SIZE;
}
ret = wl18xx_acx_host_if_cfg_bitmap(wl, host_cfg_bitmap,
sdio_align_size, extra_mem_blk,
WL18XX_HOST_IF_LEN_SIZE_FIELD);
if (ret < 0)
return ret;
return 0;
}
static int wl18xx_hw_init(struct wl1271 *wl)
{
int ret;
struct wl18xx_priv *priv = wl->priv;
/* (re)init private structures. Relevant on recovery as well. */
priv->last_fw_rls_idx = 0;
priv->extra_spare_key_count = 0;
/* set the default amount of spare blocks in the bitmap */
ret = wl18xx_set_host_cfg_bitmap(wl, WL18XX_TX_HW_BLOCK_SPARE);
if (ret < 0)
return ret;
/* set the dynamic fw traces bitmap */
ret = wl18xx_acx_dynamic_fw_traces(wl);
if (ret < 0)
return ret;
if (checksum_param) {
ret = wl18xx_acx_set_checksum_state(wl);
if (ret != 0)
return ret;
}
return ret;
}
static void wl18xx_convert_fw_status(struct wl1271 *wl, void *raw_fw_status,
struct wl_fw_status *fw_status)
{
struct wl18xx_fw_status *int_fw_status = raw_fw_status;
fw_status->intr = le32_to_cpu(int_fw_status->intr);
fw_status->fw_rx_counter = int_fw_status->fw_rx_counter;
fw_status->drv_rx_counter = int_fw_status->drv_rx_counter;
fw_status->tx_results_counter = int_fw_status->tx_results_counter;
fw_status->rx_pkt_descs = int_fw_status->rx_pkt_descs;
fw_status->fw_localtime = le32_to_cpu(int_fw_status->fw_localtime);
fw_status->link_ps_bitmap = le32_to_cpu(int_fw_status->link_ps_bitmap);
fw_status->link_fast_bitmap =
le32_to_cpu(int_fw_status->link_fast_bitmap);
fw_status->total_released_blks =
le32_to_cpu(int_fw_status->total_released_blks);
fw_status->tx_total = le32_to_cpu(int_fw_status->tx_total);
fw_status->counters.tx_released_pkts =
int_fw_status->counters.tx_released_pkts;
fw_status->counters.tx_lnk_free_pkts =
int_fw_status->counters.tx_lnk_free_pkts;
fw_status->counters.tx_voice_released_blks =
int_fw_status->counters.tx_voice_released_blks;
fw_status->counters.tx_last_rate =
int_fw_status->counters.tx_last_rate;
fw_status->counters.tx_last_rate_mbps =
int_fw_status->counters.tx_last_rate_mbps;
fw_status->counters.hlid =
int_fw_status->counters.hlid;
fw_status->log_start_addr = le32_to_cpu(int_fw_status->log_start_addr);
fw_status->priv = &int_fw_status->priv;
}
static void wl18xx_set_tx_desc_csum(struct wl1271 *wl,
struct wl1271_tx_hw_descr *desc,
struct sk_buff *skb)
{
u32 ip_hdr_offset;
struct iphdr *ip_hdr;
if (!checksum_param) {
desc->wl18xx_checksum_data = 0;
return;
}
if (skb->ip_summed != CHECKSUM_PARTIAL) {
desc->wl18xx_checksum_data = 0;
return;
}
ip_hdr_offset = skb_network_header(skb) - skb_mac_header(skb);
if (WARN_ON(ip_hdr_offset >= (1<<7))) {
desc->wl18xx_checksum_data = 0;
return;
}
desc->wl18xx_checksum_data = ip_hdr_offset << 1;
/* FW is interested only in the LSB of the protocol TCP=0 UDP=1 */
ip_hdr = (void *)skb_network_header(skb);
desc->wl18xx_checksum_data |= (ip_hdr->protocol & 0x01);
}
static void wl18xx_set_rx_csum(struct wl1271 *wl,
struct wl1271_rx_descriptor *desc,
struct sk_buff *skb)
{
if (desc->status & WL18XX_RX_CHECKSUM_MASK)
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
static bool wl18xx_is_mimo_supported(struct wl1271 *wl)
{
struct wl18xx_priv *priv = wl->priv;
/* only support MIMO with multiple antennas, and when SISO
* is not forced through config
*/
return (priv->conf.phy.number_of_assembled_ant2_4 >= 2) &&
(priv->conf.ht.mode != HT_MODE_WIDE) &&
(priv->conf.ht.mode != HT_MODE_SISO20);
}
/*
* TODO: instead of having these two functions to get the rate mask,
* we should modify the wlvif->rate_set instead
*/
static u32 wl18xx_sta_get_ap_rate_mask(struct wl1271 *wl,
struct wl12xx_vif *wlvif)
{
u32 hw_rate_set = wlvif->rate_set;
if (wlvif->channel_type == NL80211_CHAN_HT40MINUS ||
wlvif->channel_type == NL80211_CHAN_HT40PLUS) {
wl1271_debug(DEBUG_ACX, "using wide channel rate mask");
hw_rate_set |= CONF_TX_RATE_USE_WIDE_CHAN;
/* we don't support MIMO in wide-channel mode */
hw_rate_set &= ~CONF_TX_MIMO_RATES;
} else if (wl18xx_is_mimo_supported(wl)) {
wl1271_debug(DEBUG_ACX, "using MIMO channel rate mask");
hw_rate_set |= CONF_TX_MIMO_RATES;
}
return hw_rate_set;
}
static u32 wl18xx_ap_get_mimo_wide_rate_mask(struct wl1271 *wl,
struct wl12xx_vif *wlvif)
{
if (wlvif->channel_type == NL80211_CHAN_HT40MINUS ||
wlvif->channel_type == NL80211_CHAN_HT40PLUS) {
wl1271_debug(DEBUG_ACX, "using wide channel rate mask");
/* sanity check - we don't support this */
if (WARN_ON(wlvif->band != NL80211_BAND_5GHZ))
return 0;
return CONF_TX_RATE_USE_WIDE_CHAN;
} else if (wl18xx_is_mimo_supported(wl) &&
wlvif->band == NL80211_BAND_2GHZ) {
wl1271_debug(DEBUG_ACX, "using MIMO rate mask");
/*
* we don't care about HT channel here - if a peer doesn't
* support MIMO, we won't enable it in its rates
*/
return CONF_TX_MIMO_RATES;
} else {
return 0;
}
}
static const char *wl18xx_rdl_name(enum wl18xx_rdl_num rdl_num)
{
switch (rdl_num) {
case RDL_1_HP:
return "183xH";
case RDL_2_SP:
return "183x or 180x";
case RDL_3_HP:
return "187xH";
case RDL_4_SP:
return "187x";
case RDL_5_SP:
return "RDL11 - Not Supported";
case RDL_6_SP:
return "180xD";
case RDL_7_SP:
return "RDL13 - Not Supported (1893Q)";
case RDL_8_SP:
return "18xxQ";
case RDL_NONE:
return "UNTRIMMED";
default:
return "UNKNOWN";
}
}
static int wl18xx_get_pg_ver(struct wl1271 *wl, s8 *ver)
{
u32 fuse;
s8 rom = 0, metal = 0, pg_ver = 0, rdl_ver = 0, package_type = 0;
int ret;
ret = wlcore_set_partition(wl, &wl->ptable[PART_TOP_PRCM_ELP_SOC]);
if (ret < 0)
goto out;
ret = wlcore_read32(wl, WL18XX_REG_FUSE_DATA_2_3, &fuse);
if (ret < 0)
goto out;
package_type = (fuse >> WL18XX_PACKAGE_TYPE_OFFSET) & 1;
ret = wlcore_read32(wl, WL18XX_REG_FUSE_DATA_1_3, &fuse);
if (ret < 0)
goto out;
pg_ver = (fuse & WL18XX_PG_VER_MASK) >> WL18XX_PG_VER_OFFSET;
rom = (fuse & WL18XX_ROM_VER_MASK) >> WL18XX_ROM_VER_OFFSET;
if ((rom <= 0xE) && (package_type == WL18XX_PACKAGE_TYPE_WSP))
metal = (fuse & WL18XX_METAL_VER_MASK) >>
WL18XX_METAL_VER_OFFSET;
else
metal = (fuse & WL18XX_NEW_METAL_VER_MASK) >>
WL18XX_NEW_METAL_VER_OFFSET;
ret = wlcore_read32(wl, WL18XX_REG_FUSE_DATA_2_3, &fuse);
if (ret < 0)
goto out;
rdl_ver = (fuse & WL18XX_RDL_VER_MASK) >> WL18XX_RDL_VER_OFFSET;
wl1271_info("wl18xx HW: %s, PG %d.%d (ROM 0x%x)",
wl18xx_rdl_name(rdl_ver), pg_ver, metal, rom);
if (ver)
*ver = pg_ver;
ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
out:
return ret;
}
static int wl18xx_load_conf_file(struct device *dev, struct wlcore_conf *conf,
struct wl18xx_priv_conf *priv_conf,
const char *file)
{
struct wlcore_conf_file *conf_file;
const struct firmware *fw;
int ret;
ret = request_firmware(&fw, file, dev);
if (ret < 0) {
wl1271_error("could not get configuration binary %s: %d",
file, ret);
return ret;
}
if (fw->size != WL18XX_CONF_SIZE) {
wl1271_error("%s configuration binary size is wrong, expected %zu got %zu",
file, WL18XX_CONF_SIZE, fw->size);
ret = -EINVAL;
goto out_release;
}
conf_file = (struct wlcore_conf_file *) fw->data;
if (conf_file->header.magic != cpu_to_le32(WL18XX_CONF_MAGIC)) {
wl1271_error("configuration binary file magic number mismatch, "
"expected 0x%0x got 0x%0x", WL18XX_CONF_MAGIC,
conf_file->header.magic);
ret = -EINVAL;
goto out_release;
}
if (conf_file->header.version != cpu_to_le32(WL18XX_CONF_VERSION)) {
wl1271_error("configuration binary file version not supported, "
"expected 0x%08x got 0x%08x",
WL18XX_CONF_VERSION, conf_file->header.version);
ret = -EINVAL;
goto out_release;
}
memcpy(conf, &conf_file->core, sizeof(*conf));
memcpy(priv_conf, &conf_file->priv, sizeof(*priv_conf));
out_release:
release_firmware(fw);
return ret;
}
static int wl18xx_conf_init(struct wl1271 *wl, struct device *dev)
{
struct platform_device *pdev = wl->pdev;
struct wlcore_platdev_data *pdata = dev_get_platdata(&pdev->dev);
struct wl18xx_priv *priv = wl->priv;
if (wl18xx_load_conf_file(dev, &wl->conf, &priv->conf,
pdata->family->cfg_name) < 0) {
wl1271_warning("falling back to default config");
/* apply driver default configuration */
memcpy(&wl->conf, &wl18xx_conf, sizeof(wl->conf));
/* apply default private configuration */
memcpy(&priv->conf, &wl18xx_default_priv_conf,
sizeof(priv->conf));
}
return 0;
}
static int wl18xx_plt_init(struct wl1271 *wl)
{
int ret;
/* calibrator based auto/fem detect not supported for 18xx */
if (wl->plt_mode == PLT_FEM_DETECT) {
wl1271_error("wl18xx_plt_init: PLT FEM_DETECT not supported");
return -EINVAL;
}
ret = wlcore_write32(wl, WL18XX_SCR_PAD8, WL18XX_SCR_PAD8_PLT);
if (ret < 0)
return ret;
return wl->ops->boot(wl);
}
static int wl18xx_get_mac(struct wl1271 *wl)
{
u32 mac1, mac2;
int ret;
ret = wlcore_set_partition(wl, &wl->ptable[PART_TOP_PRCM_ELP_SOC]);
if (ret < 0)
goto out;
ret = wlcore_read32(wl, WL18XX_REG_FUSE_BD_ADDR_1, &mac1);
if (ret < 0)
goto out;
ret = wlcore_read32(wl, WL18XX_REG_FUSE_BD_ADDR_2, &mac2);
if (ret < 0)
goto out;
/* these are the two parts of the BD_ADDR */
wl->fuse_oui_addr = ((mac2 & 0xffff) << 8) +
((mac1 & 0xff000000) >> 24);
wl->fuse_nic_addr = (mac1 & 0xffffff);
if (!wl->fuse_oui_addr && !wl->fuse_nic_addr) {
u8 mac[ETH_ALEN];
eth_random_addr(mac);
wl->fuse_oui_addr = (mac[0] << 16) + (mac[1] << 8) + mac[2];
wl->fuse_nic_addr = (mac[3] << 16) + (mac[4] << 8) + mac[5];
wl1271_warning("MAC address from fuse not available, using random locally administered addresses.");
}
ret = wlcore_set_partition(wl, &wl->ptable[PART_DOWN]);
out:
return ret;
}
static int wl18xx_handle_static_data(struct wl1271 *wl,
struct wl1271_static_data *static_data)
{
struct wl18xx_static_data_priv *static_data_priv =
(struct wl18xx_static_data_priv *) static_data->priv;
strncpy(wl->chip.phy_fw_ver_str, static_data_priv->phy_version,
sizeof(wl->chip.phy_fw_ver_str));
/* make sure the string is NULL-terminated */
wl->chip.phy_fw_ver_str[sizeof(wl->chip.phy_fw_ver_str) - 1] = '\0';
wl1271_info("PHY firmware version: %s", static_data_priv->phy_version);
return 0;
}
static int wl18xx_get_spare_blocks(struct wl1271 *wl, bool is_gem)
{
struct wl18xx_priv *priv = wl->priv;
/* If we have keys requiring extra spare, indulge them */
if (priv->extra_spare_key_count)
return WL18XX_TX_HW_EXTRA_BLOCK_SPARE;
return WL18XX_TX_HW_BLOCK_SPARE;
}
static int wl18xx_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *key_conf)
{
struct wl18xx_priv *priv = wl->priv;
bool change_spare = false, special_enc;
int ret;
wl1271_debug(DEBUG_CRYPT, "extra spare keys before: %d",
priv->extra_spare_key_count);
special_enc = key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
ret = wlcore_set_key(wl, cmd, vif, sta, key_conf);
if (ret < 0)
goto out;
/*
* when adding the first or removing the last GEM/TKIP key,
* we have to adjust the number of spare blocks.
*/
if (special_enc) {
if (cmd == SET_KEY) {
/* first key */
change_spare = (priv->extra_spare_key_count == 0);
priv->extra_spare_key_count++;
} else if (cmd == DISABLE_KEY) {
/* last key */
change_spare = (priv->extra_spare_key_count == 1);
priv->extra_spare_key_count--;
}
}
wl1271_debug(DEBUG_CRYPT, "extra spare keys after: %d",
priv->extra_spare_key_count);
if (!change_spare)
goto out;
/* key is now set, change the spare blocks */
if (priv->extra_spare_key_count)
ret = wl18xx_set_host_cfg_bitmap(wl,
WL18XX_TX_HW_EXTRA_BLOCK_SPARE);
else
ret = wl18xx_set_host_cfg_bitmap(wl,
WL18XX_TX_HW_BLOCK_SPARE);
out:
return ret;
}
static u32 wl18xx_pre_pkt_send(struct wl1271 *wl,
u32 buf_offset, u32 last_len)
{
if (wl->quirks & WLCORE_QUIRK_TX_PAD_LAST_FRAME) {
struct wl1271_tx_hw_descr *last_desc;
/* get the last TX HW descriptor written to the aggr buf */
last_desc = (struct wl1271_tx_hw_descr *)(wl->aggr_buf +
buf_offset - last_len);
/* the last frame is padded up to an SDIO block */
last_desc->wl18xx_mem.ctrl &= ~WL18XX_TX_CTRL_NOT_PADDED;
return ALIGN(buf_offset, WL12XX_BUS_BLOCK_SIZE);
}
/* no modifications */
return buf_offset;
}
static void wl18xx_sta_rc_update(struct wl1271 *wl,
struct wl12xx_vif *wlvif)
{
bool wide = wlvif->rc_update_bw >= IEEE80211_STA_RX_BW_40;
wl1271_debug(DEBUG_MAC80211, "mac80211 sta_rc_update wide %d", wide);
/* sanity */
if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
return;
/* ignore the change before association */
if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
return;
/*
* If we started out as wide, we can change the operation mode. If we
* thought this was a 20mhz AP, we have to reconnect
*/
if (wlvif->sta.role_chan_type == NL80211_CHAN_HT40MINUS ||
wlvif->sta.role_chan_type == NL80211_CHAN_HT40PLUS)
wl18xx_acx_peer_ht_operation_mode(wl, wlvif->sta.hlid, wide);
else
ieee80211_connection_loss(wl12xx_wlvif_to_vif(wlvif));
}
static int wl18xx_set_peer_cap(struct wl1271 *wl,
struct ieee80211_sta_ht_cap *ht_cap,
bool allow_ht_operation,
u32 rate_set, u8 hlid)
{
return wl18xx_acx_set_peer_cap(wl, ht_cap, allow_ht_operation,
rate_set, hlid);
}
static bool wl18xx_lnk_high_prio(struct wl1271 *wl, u8 hlid,
struct wl1271_link *lnk)
{
u8 thold;
struct wl18xx_fw_status_priv *status_priv =
(struct wl18xx_fw_status_priv *)wl->fw_status->priv;
unsigned long suspend_bitmap;
/* if we don't have the link map yet, assume they all low prio */
if (!status_priv)
return false;
/* suspended links are never high priority */
suspend_bitmap = le32_to_cpu(status_priv->link_suspend_bitmap);
if (test_bit(hlid, &suspend_bitmap))
return false;
/* the priority thresholds are taken from FW */
if (test_bit(hlid, &wl->fw_fast_lnk_map) &&
!test_bit(hlid, &wl->ap_fw_ps_map))
thold = status_priv->tx_fast_link_prio_threshold;
else
thold = status_priv->tx_slow_link_prio_threshold;
return lnk->allocated_pkts < thold;
}
static bool wl18xx_lnk_low_prio(struct wl1271 *wl, u8 hlid,
struct wl1271_link *lnk)
{
u8 thold;
struct wl18xx_fw_status_priv *status_priv =
(struct wl18xx_fw_status_priv *)wl->fw_status->priv;
unsigned long suspend_bitmap;
/* if we don't have the link map yet, assume they all low prio */
if (!status_priv)
return true;
suspend_bitmap = le32_to_cpu(status_priv->link_suspend_bitmap);
if (test_bit(hlid, &suspend_bitmap))
thold = status_priv->tx_suspend_threshold;
else if (test_bit(hlid, &wl->fw_fast_lnk_map) &&
!test_bit(hlid, &wl->ap_fw_ps_map))
thold = status_priv->tx_fast_stop_threshold;
else
thold = status_priv->tx_slow_stop_threshold;
return lnk->allocated_pkts < thold;
}
static u32 wl18xx_convert_hwaddr(struct wl1271 *wl, u32 hwaddr)
{
return hwaddr & ~0x80000000;
}
static int wl18xx_setup(struct wl1271 *wl);
static struct wlcore_ops wl18xx_ops = {
.setup = wl18xx_setup,
.identify_chip = wl18xx_identify_chip,
.boot = wl18xx_boot,
.plt_init = wl18xx_plt_init,
.trigger_cmd = wl18xx_trigger_cmd,
.ack_event = wl18xx_ack_event,
.wait_for_event = wl18xx_wait_for_event,
.process_mailbox_events = wl18xx_process_mailbox_events,
.calc_tx_blocks = wl18xx_calc_tx_blocks,
.set_tx_desc_blocks = wl18xx_set_tx_desc_blocks,
.set_tx_desc_data_len = wl18xx_set_tx_desc_data_len,
.get_rx_buf_align = wl18xx_get_rx_buf_align,
.get_rx_packet_len = wl18xx_get_rx_packet_len,
.tx_immediate_compl = wl18xx_tx_immediate_completion,
.tx_delayed_compl = NULL,
.hw_init = wl18xx_hw_init,
.convert_fw_status = wl18xx_convert_fw_status,
.set_tx_desc_csum = wl18xx_set_tx_desc_csum,
.get_pg_ver = wl18xx_get_pg_ver,
.set_rx_csum = wl18xx_set_rx_csum,
.sta_get_ap_rate_mask = wl18xx_sta_get_ap_rate_mask,
.ap_get_mimo_wide_rate_mask = wl18xx_ap_get_mimo_wide_rate_mask,
.get_mac = wl18xx_get_mac,
.debugfs_init = wl18xx_debugfs_add_files,
.scan_start = wl18xx_scan_start,
.scan_stop = wl18xx_scan_stop,
.sched_scan_start = wl18xx_sched_scan_start,
.sched_scan_stop = wl18xx_scan_sched_scan_stop,
.handle_static_data = wl18xx_handle_static_data,
.get_spare_blocks = wl18xx_get_spare_blocks,
.set_key = wl18xx_set_key,
.channel_switch = wl18xx_cmd_channel_switch,
.pre_pkt_send = wl18xx_pre_pkt_send,
.sta_rc_update = wl18xx_sta_rc_update,
.set_peer_cap = wl18xx_set_peer_cap,
.convert_hwaddr = wl18xx_convert_hwaddr,
.lnk_high_prio = wl18xx_lnk_high_prio,
.lnk_low_prio = wl18xx_lnk_low_prio,
.smart_config_start = wl18xx_cmd_smart_config_start,
.smart_config_stop = wl18xx_cmd_smart_config_stop,
.smart_config_set_group_key = wl18xx_cmd_smart_config_set_group_key,
.interrupt_notify = wl18xx_acx_interrupt_notify_config,
.rx_ba_filter = wl18xx_acx_rx_ba_filter,
.ap_sleep = wl18xx_acx_ap_sleep,
.set_cac = wl18xx_cmd_set_cac,
.dfs_master_restart = wl18xx_cmd_dfs_master_restart,
};
/* HT cap appropriate for wide channels in 2Ghz */
static struct ieee80211_sta_ht_cap wl18xx_siso40_ht_cap_2ghz = {
.cap = IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40 |
IEEE80211_HT_CAP_SUP_WIDTH_20_40 | IEEE80211_HT_CAP_DSSSCCK40 |
IEEE80211_HT_CAP_GRN_FLD,
.ht_supported = true,
.ampdu_factor = IEEE80211_HT_MAX_AMPDU_16K,
.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16,
.mcs = {
.rx_mask = { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
.rx_highest = cpu_to_le16(150),
.tx_params = IEEE80211_HT_MCS_TX_DEFINED,
},
};
/* HT cap appropriate for wide channels in 5Ghz */
static struct ieee80211_sta_ht_cap wl18xx_siso40_ht_cap_5ghz = {
.cap = IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40 |
IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
IEEE80211_HT_CAP_GRN_FLD,
.ht_supported = true,
.ampdu_factor = IEEE80211_HT_MAX_AMPDU_16K,
.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16,
.mcs = {
.rx_mask = { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
.rx_highest = cpu_to_le16(150),
.tx_params = IEEE80211_HT_MCS_TX_DEFINED,
},
};
/* HT cap appropriate for SISO 20 */
static struct ieee80211_sta_ht_cap wl18xx_siso20_ht_cap = {
.cap = IEEE80211_HT_CAP_SGI_20 |
IEEE80211_HT_CAP_GRN_FLD,
.ht_supported = true,
.ampdu_factor = IEEE80211_HT_MAX_AMPDU_16K,
.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16,
.mcs = {
.rx_mask = { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
.rx_highest = cpu_to_le16(72),
.tx_params = IEEE80211_HT_MCS_TX_DEFINED,
},
};
/* HT cap appropriate for MIMO rates in 20mhz channel */
static struct ieee80211_sta_ht_cap wl18xx_mimo_ht_cap_2ghz = {
.cap = IEEE80211_HT_CAP_SGI_20 |
IEEE80211_HT_CAP_GRN_FLD,
.ht_supported = true,
.ampdu_factor = IEEE80211_HT_MAX_AMPDU_16K,
.ampdu_density = IEEE80211_HT_MPDU_DENSITY_16,
.mcs = {
.rx_mask = { 0xff, 0xff, 0, 0, 0, 0, 0, 0, 0, 0, },
.rx_highest = cpu_to_le16(144),
.tx_params = IEEE80211_HT_MCS_TX_DEFINED,
},
};
static const struct ieee80211_iface_limit wl18xx_iface_limits[] = {
{
.max = 2,
.types = BIT(NL80211_IFTYPE_STATION),
},
{
.max = 1,
.types = BIT(NL80211_IFTYPE_AP)
| BIT(NL80211_IFTYPE_P2P_GO)
| BIT(NL80211_IFTYPE_P2P_CLIENT)
#ifdef CONFIG_MAC80211_MESH
| BIT(NL80211_IFTYPE_MESH_POINT)
#endif
},
{
.max = 1,
.types = BIT(NL80211_IFTYPE_P2P_DEVICE),
},
};
static const struct ieee80211_iface_limit wl18xx_iface_ap_limits[] = {
{
.max = 2,
.types = BIT(NL80211_IFTYPE_AP),
},
#ifdef CONFIG_MAC80211_MESH
{
.max = 1,
.types = BIT(NL80211_IFTYPE_MESH_POINT),
},
#endif
{
.max = 1,
.types = BIT(NL80211_IFTYPE_P2P_DEVICE),
},
};
static const struct ieee80211_iface_combination
wl18xx_iface_combinations[] = {
{
.max_interfaces = 3,
.limits = wl18xx_iface_limits,
.n_limits = ARRAY_SIZE(wl18xx_iface_limits),
.num_different_channels = 2,
},
{
.max_interfaces = 2,
.limits = wl18xx_iface_ap_limits,
.n_limits = ARRAY_SIZE(wl18xx_iface_ap_limits),
.num_different_channels = 1,
.radar_detect_widths = BIT(NL80211_CHAN_NO_HT) |
BIT(NL80211_CHAN_HT20) |
BIT(NL80211_CHAN_HT40MINUS) |
BIT(NL80211_CHAN_HT40PLUS),
}
};
static int wl18xx_setup(struct wl1271 *wl)
{
struct wl18xx_priv *priv = wl->priv;
int ret;
BUILD_BUG_ON(WL18XX_MAX_LINKS > WLCORE_MAX_LINKS);
BUILD_BUG_ON(WL18XX_MAX_AP_STATIONS > WL18XX_MAX_LINKS);
BUILD_BUG_ON(WL18XX_CONF_SG_PARAMS_MAX > WLCORE_CONF_SG_PARAMS_MAX);
wl->rtable = wl18xx_rtable;
wl->num_tx_desc = WL18XX_NUM_TX_DESCRIPTORS;
wl->num_rx_desc = WL18XX_NUM_RX_DESCRIPTORS;
wl->num_links = WL18XX_MAX_LINKS;
wl->max_ap_stations = WL18XX_MAX_AP_STATIONS;
wl->iface_combinations = wl18xx_iface_combinations;
wl->n_iface_combinations = ARRAY_SIZE(wl18xx_iface_combinations);
wl->num_mac_addr = WL18XX_NUM_MAC_ADDRESSES;
wl->band_rate_to_idx = wl18xx_band_rate_to_idx;
wl->hw_tx_rate_tbl_size = WL18XX_CONF_HW_RXTX_RATE_MAX;
wl->hw_min_ht_rate = WL18XX_CONF_HW_RXTX_RATE_MCS0;
wl->fw_status_len = sizeof(struct wl18xx_fw_status);
wl->fw_status_priv_len = sizeof(struct wl18xx_fw_status_priv);
wl->stats.fw_stats_len = sizeof(struct wl18xx_acx_statistics);
wl->static_data_priv_len = sizeof(struct wl18xx_static_data_priv);
if (num_rx_desc_param != -1)
wl->num_rx_desc = num_rx_desc_param;
ret = wl18xx_conf_init(wl, wl->dev);
if (ret < 0)
return ret;
/* If the module param is set, update it in conf */
if (board_type_param) {
if (!strcmp(board_type_param, "fpga")) {
priv->conf.phy.board_type = BOARD_TYPE_FPGA_18XX;
} else if (!strcmp(board_type_param, "hdk")) {
priv->conf.phy.board_type = BOARD_TYPE_HDK_18XX;
} else if (!strcmp(board_type_param, "dvp")) {
priv->conf.phy.board_type = BOARD_TYPE_DVP_18XX;
} else if (!strcmp(board_type_param, "evb")) {
priv->conf.phy.board_type = BOARD_TYPE_EVB_18XX;
} else if (!strcmp(board_type_param, "com8")) {
priv->conf.phy.board_type = BOARD_TYPE_COM8_18XX;
} else {
wl1271_error("invalid board type '%s'",
board_type_param);
return -EINVAL;
}
}
if (priv->conf.phy.board_type >= NUM_BOARD_TYPES) {
wl1271_error("invalid board type '%d'",
priv->conf.phy.board_type);
return -EINVAL;
}
if (low_band_component_param != -1)
priv->conf.phy.low_band_component = low_band_component_param;
if (low_band_component_type_param != -1)
priv->conf.phy.low_band_component_type =
low_band_component_type_param;
if (high_band_component_param != -1)
priv->conf.phy.high_band_component = high_band_component_param;
if (high_band_component_type_param != -1)
priv->conf.phy.high_band_component_type =
high_band_component_type_param;
if (pwr_limit_reference_11_abg_param != -1)
priv->conf.phy.pwr_limit_reference_11_abg =
pwr_limit_reference_11_abg_param;
if (n_antennas_2_param != -1)
priv->conf.phy.number_of_assembled_ant2_4 = n_antennas_2_param;
if (n_antennas_5_param != -1)
priv->conf.phy.number_of_assembled_ant5 = n_antennas_5_param;
if (dc2dc_param != -1)
priv->conf.phy.external_pa_dc2dc = dc2dc_param;
if (ht_mode_param) {
if (!strcmp(ht_mode_param, "default"))
priv->conf.ht.mode = HT_MODE_DEFAULT;
else if (!strcmp(ht_mode_param, "wide"))
priv->conf.ht.mode = HT_MODE_WIDE;
else if (!strcmp(ht_mode_param, "siso20"))
priv->conf.ht.mode = HT_MODE_SISO20;
else {
wl1271_error("invalid ht_mode '%s'", ht_mode_param);
return -EINVAL;
}
}
if (priv->conf.ht.mode == HT_MODE_DEFAULT) {
/*
* Only support mimo with multiple antennas. Fall back to
* siso40.
*/
if (wl18xx_is_mimo_supported(wl))
wlcore_set_ht_cap(wl, NL80211_BAND_2GHZ,
&wl18xx_mimo_ht_cap_2ghz);
else
wlcore_set_ht_cap(wl, NL80211_BAND_2GHZ,
&wl18xx_siso40_ht_cap_2ghz);
/* 5Ghz is always wide */
wlcore_set_ht_cap(wl, NL80211_BAND_5GHZ,
&wl18xx_siso40_ht_cap_5ghz);
} else if (priv->conf.ht.mode == HT_MODE_WIDE) {
wlcore_set_ht_cap(wl, NL80211_BAND_2GHZ,
&wl18xx_siso40_ht_cap_2ghz);
wlcore_set_ht_cap(wl, NL80211_BAND_5GHZ,
&wl18xx_siso40_ht_cap_5ghz);
} else if (priv->conf.ht.mode == HT_MODE_SISO20) {
wlcore_set_ht_cap(wl, NL80211_BAND_2GHZ,
&wl18xx_siso20_ht_cap);
wlcore_set_ht_cap(wl, NL80211_BAND_5GHZ,
&wl18xx_siso20_ht_cap);
}
if (!checksum_param) {
wl18xx_ops.set_rx_csum = NULL;
wl18xx_ops.init_vif = NULL;
}
/* Enable 11a Band only if we have 5G antennas */
wl->enable_11a = (priv->conf.phy.number_of_assembled_ant5 != 0);
return 0;
}
static int wl18xx_probe(struct platform_device *pdev)
{
struct wl1271 *wl;
struct ieee80211_hw *hw;
int ret;
hw = wlcore_alloc_hw(sizeof(struct wl18xx_priv),
WL18XX_AGGR_BUFFER_SIZE,
sizeof(struct wl18xx_event_mailbox));
if (IS_ERR(hw)) {
wl1271_error("can't allocate hw");
ret = PTR_ERR(hw);
goto out;
}
wl = hw->priv;
wl->ops = &wl18xx_ops;
wl->ptable = wl18xx_ptable;
ret = wlcore_probe(wl, pdev);
if (ret)
goto out_free;
return ret;
out_free:
wlcore_free_hw(wl);
out:
return ret;
}
static const struct platform_device_id wl18xx_id_table[] = {
{ "wl18xx", 0 },
{ } /* Terminating Entry */
};
MODULE_DEVICE_TABLE(platform, wl18xx_id_table);
static struct platform_driver wl18xx_driver = {
.probe = wl18xx_probe,
.remove = wlcore_remove,
.id_table = wl18xx_id_table,
.driver = {
.name = "wl18xx_driver",
}
};
module_platform_driver(wl18xx_driver);
module_param_named(ht_mode, ht_mode_param, charp, 0400);
MODULE_PARM_DESC(ht_mode, "Force HT mode: wide or siso20");
module_param_named(board_type, board_type_param, charp, 0400);
MODULE_PARM_DESC(board_type, "Board type: fpga, hdk (default), evb, com8 or "
"dvp");
module_param_named(checksum, checksum_param, bool, 0400);
MODULE_PARM_DESC(checksum, "Enable TCP checksum: boolean (defaults to false)");
module_param_named(dc2dc, dc2dc_param, int, 0400);
MODULE_PARM_DESC(dc2dc, "External DC2DC: u8 (defaults to 0)");
module_param_named(n_antennas_2, n_antennas_2_param, int, 0400);
MODULE_PARM_DESC(n_antennas_2,
"Number of installed 2.4GHz antennas: 1 (default) or 2");
module_param_named(n_antennas_5, n_antennas_5_param, int, 0400);
MODULE_PARM_DESC(n_antennas_5,
"Number of installed 5GHz antennas: 1 (default) or 2");
module_param_named(low_band_component, low_band_component_param, int, 0400);
MODULE_PARM_DESC(low_band_component, "Low band component: u8 "
"(default is 0x01)");
module_param_named(low_band_component_type, low_band_component_type_param,
int, 0400);
MODULE_PARM_DESC(low_band_component_type, "Low band component type: u8 "
"(default is 0x05 or 0x06 depending on the board_type)");
module_param_named(high_band_component, high_band_component_param, int, 0400);
MODULE_PARM_DESC(high_band_component, "High band component: u8, "
"(default is 0x01)");
module_param_named(high_band_component_type, high_band_component_type_param,
int, 0400);
MODULE_PARM_DESC(high_band_component_type, "High band component type: u8 "
"(default is 0x09)");
module_param_named(pwr_limit_reference_11_abg,
pwr_limit_reference_11_abg_param, int, 0400);
MODULE_PARM_DESC(pwr_limit_reference_11_abg, "Power limit reference: u8 "
"(default is 0xc8)");
module_param_named(num_rx_desc, num_rx_desc_param, int, 0400);
MODULE_PARM_DESC(num_rx_desc_param,
"Number of Rx descriptors: u8 (default is 32)");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Luciano Coelho <[email protected]>");
MODULE_FIRMWARE(WL18XX_FW_NAME);
|
linux-master
|
drivers/net/wireless/ti/wl18xx/main.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* This file is part of wl12xx
*
* Copyright (C) 2012 Texas Instruments. All rights reserved.
*/
#include <net/genetlink.h>
#include "event.h"
#include "scan.h"
#include "conf.h"
#include "../wlcore/cmd.h"
#include "../wlcore/debug.h"
#include "../wlcore/vendor_cmd.h"
int wl18xx_wait_for_event(struct wl1271 *wl, enum wlcore_wait_event event,
bool *timeout)
{
u32 local_event;
switch (event) {
case WLCORE_EVENT_PEER_REMOVE_COMPLETE:
local_event = PEER_REMOVE_COMPLETE_EVENT_ID;
break;
case WLCORE_EVENT_DFS_CONFIG_COMPLETE:
local_event = DFS_CHANNELS_CONFIG_COMPLETE_EVENT;
break;
default:
/* event not implemented */
return 0;
}
return wlcore_cmd_wait_for_event_or_timeout(wl, local_event, timeout);
}
static const char *wl18xx_radar_type_decode(u8 radar_type)
{
switch (radar_type) {
case RADAR_TYPE_REGULAR:
return "REGULAR";
case RADAR_TYPE_CHIRP:
return "CHIRP";
case RADAR_TYPE_NONE:
default:
return "N/A";
}
}
static int wlcore_smart_config_sync_event(struct wl1271 *wl, u8 sync_channel,
u8 sync_band)
{
struct sk_buff *skb;
enum nl80211_band band;
int freq;
if (sync_band == WLCORE_BAND_5GHZ)
band = NL80211_BAND_5GHZ;
else
band = NL80211_BAND_2GHZ;
freq = ieee80211_channel_to_frequency(sync_channel, band);
wl1271_debug(DEBUG_EVENT,
"SMART_CONFIG_SYNC_EVENT_ID, freq: %d (chan: %d band %d)",
freq, sync_channel, sync_band);
skb = cfg80211_vendor_event_alloc(wl->hw->wiphy, NULL, 20,
WLCORE_VENDOR_EVENT_SC_SYNC,
GFP_KERNEL);
if (nla_put_u32(skb, WLCORE_VENDOR_ATTR_FREQ, freq)) {
kfree_skb(skb);
return -EMSGSIZE;
}
cfg80211_vendor_event(skb, GFP_KERNEL);
return 0;
}
static int wlcore_smart_config_decode_event(struct wl1271 *wl,
u8 ssid_len, u8 *ssid,
u8 pwd_len, u8 *pwd)
{
struct sk_buff *skb;
wl1271_debug(DEBUG_EVENT, "SMART_CONFIG_DECODE_EVENT_ID");
wl1271_dump_ascii(DEBUG_EVENT, "SSID:", ssid, ssid_len);
skb = cfg80211_vendor_event_alloc(wl->hw->wiphy, NULL,
ssid_len + pwd_len + 20,
WLCORE_VENDOR_EVENT_SC_DECODE,
GFP_KERNEL);
if (nla_put(skb, WLCORE_VENDOR_ATTR_SSID, ssid_len, ssid) ||
nla_put(skb, WLCORE_VENDOR_ATTR_PSK, pwd_len, pwd)) {
kfree_skb(skb);
return -EMSGSIZE;
}
cfg80211_vendor_event(skb, GFP_KERNEL);
return 0;
}
static void wlcore_event_time_sync(struct wl1271 *wl,
u16 tsf_high_msb, u16 tsf_high_lsb,
u16 tsf_low_msb, u16 tsf_low_lsb)
{
u32 clock_low;
u32 clock_high;
clock_high = (tsf_high_msb << 16) | tsf_high_lsb;
clock_low = (tsf_low_msb << 16) | tsf_low_lsb;
wl1271_info("TIME_SYNC_EVENT_ID: clock_high %u, clock low %u",
clock_high, clock_low);
}
int wl18xx_process_mailbox_events(struct wl1271 *wl)
{
struct wl18xx_event_mailbox *mbox = wl->mbox;
u32 vector;
vector = le32_to_cpu(mbox->events_vector);
wl1271_debug(DEBUG_EVENT, "MBOX vector: 0x%x", vector);
if (vector & SCAN_COMPLETE_EVENT_ID) {
wl1271_debug(DEBUG_EVENT, "scan results: %d",
mbox->number_of_scan_results);
if (wl->scan_wlvif)
wl18xx_scan_completed(wl, wl->scan_wlvif);
}
if (vector & TIME_SYNC_EVENT_ID)
wlcore_event_time_sync(wl,
le16_to_cpu(mbox->time_sync_tsf_high_msb),
le16_to_cpu(mbox->time_sync_tsf_high_lsb),
le16_to_cpu(mbox->time_sync_tsf_low_msb),
le16_to_cpu(mbox->time_sync_tsf_low_lsb));
if (vector & RADAR_DETECTED_EVENT_ID) {
wl1271_info("radar event: channel %d type %s",
mbox->radar_channel,
wl18xx_radar_type_decode(mbox->radar_type));
if (!wl->radar_debug_mode)
ieee80211_radar_detected(wl->hw);
}
if (vector & PERIODIC_SCAN_REPORT_EVENT_ID) {
wl1271_debug(DEBUG_EVENT,
"PERIODIC_SCAN_REPORT_EVENT (results %d)",
mbox->number_of_sched_scan_results);
wlcore_scan_sched_scan_results(wl);
}
if (vector & PERIODIC_SCAN_COMPLETE_EVENT_ID)
wlcore_event_sched_scan_completed(wl, 1);
if (vector & RSSI_SNR_TRIGGER_0_EVENT_ID)
wlcore_event_rssi_trigger(wl, mbox->rssi_snr_trigger_metric);
if (vector & BA_SESSION_RX_CONSTRAINT_EVENT_ID)
wlcore_event_ba_rx_constraint(wl,
le16_to_cpu(mbox->rx_ba_role_id_bitmap),
le16_to_cpu(mbox->rx_ba_allowed_bitmap));
if (vector & BSS_LOSS_EVENT_ID)
wlcore_event_beacon_loss(wl,
le16_to_cpu(mbox->bss_loss_bitmap));
if (vector & CHANNEL_SWITCH_COMPLETE_EVENT_ID)
wlcore_event_channel_switch(wl,
le16_to_cpu(mbox->channel_switch_role_id_bitmap),
true);
if (vector & DUMMY_PACKET_EVENT_ID)
wlcore_event_dummy_packet(wl);
/*
* "TX retries exceeded" has a different meaning according to mode.
* In AP mode the offending station is disconnected.
*/
if (vector & MAX_TX_FAILURE_EVENT_ID)
wlcore_event_max_tx_failure(wl,
le16_to_cpu(mbox->tx_retry_exceeded_bitmap));
if (vector & INACTIVE_STA_EVENT_ID)
wlcore_event_inactive_sta(wl,
le16_to_cpu(mbox->inactive_sta_bitmap));
if (vector & REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID)
wlcore_event_roc_complete(wl);
if (vector & SMART_CONFIG_SYNC_EVENT_ID)
wlcore_smart_config_sync_event(wl, mbox->sc_sync_channel,
mbox->sc_sync_band);
if (vector & SMART_CONFIG_DECODE_EVENT_ID)
wlcore_smart_config_decode_event(wl,
mbox->sc_ssid_len,
mbox->sc_ssid,
mbox->sc_pwd_len,
mbox->sc_pwd);
if (vector & FW_LOGGER_INDICATION)
wlcore_event_fw_logger(wl);
if (vector & RX_BA_WIN_SIZE_CHANGE_EVENT_ID) {
struct wl12xx_vif *wlvif;
struct ieee80211_vif *vif;
struct ieee80211_sta *sta;
u8 link_id = mbox->rx_ba_link_id;
u8 win_size = mbox->rx_ba_win_size;
const u8 *addr;
wlvif = wl->links[link_id].wlvif;
vif = wl12xx_wlvif_to_vif(wlvif);
/* Update RX aggregation window size and call
* MAC routine to stop active RX aggregations for this link
*/
if (wlvif->bss_type != BSS_TYPE_AP_BSS)
addr = vif->bss_conf.bssid;
else
addr = wl->links[link_id].addr;
sta = ieee80211_find_sta(vif, addr);
if (sta) {
sta->max_rx_aggregation_subframes = win_size;
ieee80211_stop_rx_ba_session(vif,
wl->links[link_id].ba_bitmap,
addr);
}
}
return 0;
}
|
linux-master
|
drivers/net/wireless/ti/wl18xx/event.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* This file is part of wl18xx
*
* Copyright (C) 2011 Texas Instruments Inc.
*/
#include "../wlcore/wlcore.h"
#include "../wlcore/cmd.h"
#include "../wlcore/debug.h"
#include "../wlcore/acx.h"
#include "../wlcore/tx.h"
#include "wl18xx.h"
#include "tx.h"
static
void wl18xx_get_last_tx_rate(struct wl1271 *wl, struct ieee80211_vif *vif,
u8 band, struct ieee80211_tx_rate *rate, u8 hlid)
{
u8 fw_rate = wl->links[hlid].fw_rate_idx;
if (fw_rate > CONF_HW_RATE_INDEX_MAX) {
wl1271_error("last Tx rate invalid: %d", fw_rate);
rate->idx = 0;
rate->flags = 0;
return;
}
if (fw_rate <= CONF_HW_RATE_INDEX_54MBPS) {
rate->idx = fw_rate;
if (band == NL80211_BAND_5GHZ)
rate->idx -= CONF_HW_RATE_INDEX_6MBPS;
rate->flags = 0;
} else {
rate->flags = IEEE80211_TX_RC_MCS;
rate->idx = fw_rate - CONF_HW_RATE_INDEX_MCS0;
/* SGI modifier is counted as a separate rate */
if (fw_rate >= CONF_HW_RATE_INDEX_MCS7_SGI)
(rate->idx)--;
if (fw_rate == CONF_HW_RATE_INDEX_MCS15_SGI)
(rate->idx)--;
/* this also covers the 40Mhz SGI case (= MCS15) */
if (fw_rate == CONF_HW_RATE_INDEX_MCS7_SGI ||
fw_rate == CONF_HW_RATE_INDEX_MCS15_SGI)
rate->flags |= IEEE80211_TX_RC_SHORT_GI;
if (fw_rate > CONF_HW_RATE_INDEX_MCS7_SGI && vif) {
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
if (wlvif->channel_type == NL80211_CHAN_HT40MINUS ||
wlvif->channel_type == NL80211_CHAN_HT40PLUS) {
/* adjustment needed for range 0-7 */
rate->idx -= 8;
rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
}
}
}
}
static void wl18xx_tx_complete_packet(struct wl1271 *wl, u8 tx_stat_byte)
{
struct ieee80211_tx_info *info;
struct sk_buff *skb;
int id = tx_stat_byte & WL18XX_TX_STATUS_DESC_ID_MASK;
bool tx_success;
struct wl1271_tx_hw_descr *tx_desc;
/* check for id legality */
if (unlikely(id >= wl->num_tx_desc || wl->tx_frames[id] == NULL)) {
wl1271_warning("illegal id in tx completion: %d", id);
return;
}
/* a zero bit indicates Tx success */
tx_success = !(tx_stat_byte & BIT(WL18XX_TX_STATUS_STAT_BIT_IDX));
skb = wl->tx_frames[id];
info = IEEE80211_SKB_CB(skb);
tx_desc = (struct wl1271_tx_hw_descr *)skb->data;
if (wl12xx_is_dummy_packet(wl, skb)) {
wl1271_free_tx_id(wl, id);
return;
}
/* update the TX status info */
if (tx_success && !(info->flags & IEEE80211_TX_CTL_NO_ACK))
info->flags |= IEEE80211_TX_STAT_ACK;
/*
* first pass info->control.vif while it's valid, and then fill out
* the info->status structures
*/
wl18xx_get_last_tx_rate(wl, info->control.vif,
info->band,
&info->status.rates[0],
tx_desc->hlid);
info->status.rates[0].count = 1; /* no data about retries */
info->status.ack_signal = -1;
if (!tx_success)
wl->stats.retry_count++;
/*
* TODO: update sequence number for encryption? seems to be
* unsupported for now. needed for recovery with encryption.
*/
/* remove private header from packet */
skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
/* remove TKIP header space if present */
if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) &&
info->control.hw_key &&
info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) {
int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
memmove(skb->data + WL1271_EXTRA_SPACE_TKIP, skb->data, hdrlen);
skb_pull(skb, WL1271_EXTRA_SPACE_TKIP);
}
wl1271_debug(DEBUG_TX, "tx status id %u skb 0x%p success %d",
id, skb, tx_success);
/* return the packet to the stack */
skb_queue_tail(&wl->deferred_tx_queue, skb);
queue_work(wl->freezable_wq, &wl->netstack_work);
wl1271_free_tx_id(wl, id);
}
void wl18xx_tx_immediate_complete(struct wl1271 *wl)
{
struct wl18xx_fw_status_priv *status_priv =
(struct wl18xx_fw_status_priv *)wl->fw_status->priv;
struct wl18xx_priv *priv = wl->priv;
u8 i, hlid;
/* nothing to do here */
if (priv->last_fw_rls_idx == status_priv->fw_release_idx)
return;
/* update rates per link */
hlid = wl->fw_status->counters.hlid;
if (hlid < WLCORE_MAX_LINKS) {
wl->links[hlid].fw_rate_idx =
wl->fw_status->counters.tx_last_rate;
wl->links[hlid].fw_rate_mbps =
wl->fw_status->counters.tx_last_rate_mbps;
}
/* freed Tx descriptors */
wl1271_debug(DEBUG_TX, "last released desc = %d, current idx = %d",
priv->last_fw_rls_idx, status_priv->fw_release_idx);
if (status_priv->fw_release_idx >= WL18XX_FW_MAX_TX_STATUS_DESC) {
wl1271_error("invalid desc release index %d",
status_priv->fw_release_idx);
WARN_ON(1);
return;
}
for (i = priv->last_fw_rls_idx;
i != status_priv->fw_release_idx;
i = (i + 1) % WL18XX_FW_MAX_TX_STATUS_DESC) {
wl18xx_tx_complete_packet(wl,
status_priv->released_tx_desc[i]);
wl->tx_results_count++;
}
priv->last_fw_rls_idx = status_priv->fw_release_idx;
}
|
linux-master
|
drivers/net/wireless/ti/wl18xx/tx.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* This file is part of wl12xx
*
* Copyright (C) 2008-2009 Nokia Corporation
* Copyright (C) 2011 Texas Instruments Inc.
*/
#include "../wlcore/cmd.h"
#include "../wlcore/debug.h"
#include "../wlcore/acx.h"
#include "acx.h"
int wl1271_acx_host_if_cfg_bitmap(struct wl1271 *wl, u32 host_cfg_bitmap)
{
struct wl1271_acx_host_config_bitmap *bitmap_conf;
int ret;
bitmap_conf = kzalloc(sizeof(*bitmap_conf), GFP_KERNEL);
if (!bitmap_conf) {
ret = -ENOMEM;
goto out;
}
bitmap_conf->host_cfg_bitmap = cpu_to_le32(host_cfg_bitmap);
ret = wl1271_cmd_configure(wl, ACX_HOST_IF_CFG_BITMAP,
bitmap_conf, sizeof(*bitmap_conf));
if (ret < 0) {
wl1271_warning("wl1271 bitmap config opt failed: %d", ret);
goto out;
}
out:
kfree(bitmap_conf);
return ret;
}
|
linux-master
|
drivers/net/wireless/ti/wl12xx/acx.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* This file is part of wl12xx
*
* Copyright (C) 2009 Nokia Corporation
* Copyright (C) 2011-2012 Texas Instruments
*/
#include "../wlcore/debugfs.h"
#include "../wlcore/wlcore.h"
#include "wl12xx.h"
#include "acx.h"
#include "debugfs.h"
#define WL12XX_DEBUGFS_FWSTATS_FILE(a, b, c) \
DEBUGFS_FWSTATS_FILE(a, b, c, wl12xx_acx_statistics)
WL12XX_DEBUGFS_FWSTATS_FILE(tx, internal_desc_overflow, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(rx, out_of_mem, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(rx, hdr_overflow, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(rx, hw_stuck, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(rx, dropped, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(rx, fcs_err, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(rx, xfr_hint_trig, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(rx, path_reset, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(rx, reset_counter, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(dma, rx_requested, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(dma, rx_errors, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(dma, tx_requested, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(dma, tx_errors, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(isr, cmd_cmplt, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(isr, fiqs, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(isr, rx_headers, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(isr, rx_mem_overflow, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(isr, rx_rdys, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(isr, irqs, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(isr, tx_procs, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(isr, decrypt_done, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(isr, dma0_done, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(isr, dma1_done, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(isr, tx_exch_complete, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(isr, commands, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(isr, rx_procs, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(isr, hw_pm_mode_changes, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(isr, host_acknowledges, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(isr, pci_pm, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(isr, wakeups, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(isr, low_rssi, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(wep, addr_key_count, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(wep, default_key_count, "%u");
/* skipping wep.reserved */
WL12XX_DEBUGFS_FWSTATS_FILE(wep, key_not_found, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(wep, decrypt_fail, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(wep, packets, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(wep, interrupt, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(pwr, ps_enter, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(pwr, elp_enter, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(pwr, missing_bcns, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(pwr, wake_on_host, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(pwr, wake_on_timer_exp, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(pwr, tx_with_ps, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(pwr, tx_without_ps, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(pwr, rcvd_beacons, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(pwr, power_save_off, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(pwr, enable_ps, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(pwr, disable_ps, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(pwr, fix_tsf_ps, "%u");
/* skipping cont_miss_bcns_spread for now */
WL12XX_DEBUGFS_FWSTATS_FILE(pwr, rcvd_awake_beacons, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(mic, rx_pkts, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(mic, calc_failure, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(aes, encrypt_fail, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(aes, decrypt_fail, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(aes, encrypt_packets, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(aes, decrypt_packets, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(aes, encrypt_interrupt, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(aes, decrypt_interrupt, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(event, heart_beat, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(event, calibration, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(event, rx_mismatch, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(event, rx_mem_empty, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(event, rx_pool, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(event, oom_late, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(event, phy_transmit_error, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(event, tx_stuck, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(ps, pspoll_timeouts, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(ps, upsd_timeouts, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(ps, upsd_max_sptime, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(ps, upsd_max_apturn, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(ps, pspoll_max_apturn, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(ps, pspoll_utilization, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(ps, upsd_utilization, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(rxpipe, rx_prep_beacon_drop, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(rxpipe, descr_host_int_trig_rx_data, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(rxpipe, beacon_buffer_thres_host_int_trig_rx_data,
"%u");
WL12XX_DEBUGFS_FWSTATS_FILE(rxpipe, missed_beacon_host_int_trig_rx_data, "%u");
WL12XX_DEBUGFS_FWSTATS_FILE(rxpipe, tx_xfr_host_int_trig_rx_data, "%u");
int wl12xx_debugfs_add_files(struct wl1271 *wl,
struct dentry *rootdir)
{
struct dentry *stats, *moddir;
moddir = debugfs_create_dir(KBUILD_MODNAME, rootdir);
stats = debugfs_create_dir("fw_stats", moddir);
DEBUGFS_FWSTATS_ADD(tx, internal_desc_overflow);
DEBUGFS_FWSTATS_ADD(rx, out_of_mem);
DEBUGFS_FWSTATS_ADD(rx, hdr_overflow);
DEBUGFS_FWSTATS_ADD(rx, hw_stuck);
DEBUGFS_FWSTATS_ADD(rx, dropped);
DEBUGFS_FWSTATS_ADD(rx, fcs_err);
DEBUGFS_FWSTATS_ADD(rx, xfr_hint_trig);
DEBUGFS_FWSTATS_ADD(rx, path_reset);
DEBUGFS_FWSTATS_ADD(rx, reset_counter);
DEBUGFS_FWSTATS_ADD(dma, rx_requested);
DEBUGFS_FWSTATS_ADD(dma, rx_errors);
DEBUGFS_FWSTATS_ADD(dma, tx_requested);
DEBUGFS_FWSTATS_ADD(dma, tx_errors);
DEBUGFS_FWSTATS_ADD(isr, cmd_cmplt);
DEBUGFS_FWSTATS_ADD(isr, fiqs);
DEBUGFS_FWSTATS_ADD(isr, rx_headers);
DEBUGFS_FWSTATS_ADD(isr, rx_mem_overflow);
DEBUGFS_FWSTATS_ADD(isr, rx_rdys);
DEBUGFS_FWSTATS_ADD(isr, irqs);
DEBUGFS_FWSTATS_ADD(isr, tx_procs);
DEBUGFS_FWSTATS_ADD(isr, decrypt_done);
DEBUGFS_FWSTATS_ADD(isr, dma0_done);
DEBUGFS_FWSTATS_ADD(isr, dma1_done);
DEBUGFS_FWSTATS_ADD(isr, tx_exch_complete);
DEBUGFS_FWSTATS_ADD(isr, commands);
DEBUGFS_FWSTATS_ADD(isr, rx_procs);
DEBUGFS_FWSTATS_ADD(isr, hw_pm_mode_changes);
DEBUGFS_FWSTATS_ADD(isr, host_acknowledges);
DEBUGFS_FWSTATS_ADD(isr, pci_pm);
DEBUGFS_FWSTATS_ADD(isr, wakeups);
DEBUGFS_FWSTATS_ADD(isr, low_rssi);
DEBUGFS_FWSTATS_ADD(wep, addr_key_count);
DEBUGFS_FWSTATS_ADD(wep, default_key_count);
/* skipping wep.reserved */
DEBUGFS_FWSTATS_ADD(wep, key_not_found);
DEBUGFS_FWSTATS_ADD(wep, decrypt_fail);
DEBUGFS_FWSTATS_ADD(wep, packets);
DEBUGFS_FWSTATS_ADD(wep, interrupt);
DEBUGFS_FWSTATS_ADD(pwr, ps_enter);
DEBUGFS_FWSTATS_ADD(pwr, elp_enter);
DEBUGFS_FWSTATS_ADD(pwr, missing_bcns);
DEBUGFS_FWSTATS_ADD(pwr, wake_on_host);
DEBUGFS_FWSTATS_ADD(pwr, wake_on_timer_exp);
DEBUGFS_FWSTATS_ADD(pwr, tx_with_ps);
DEBUGFS_FWSTATS_ADD(pwr, tx_without_ps);
DEBUGFS_FWSTATS_ADD(pwr, rcvd_beacons);
DEBUGFS_FWSTATS_ADD(pwr, power_save_off);
DEBUGFS_FWSTATS_ADD(pwr, enable_ps);
DEBUGFS_FWSTATS_ADD(pwr, disable_ps);
DEBUGFS_FWSTATS_ADD(pwr, fix_tsf_ps);
/* skipping cont_miss_bcns_spread for now */
DEBUGFS_FWSTATS_ADD(pwr, rcvd_awake_beacons);
DEBUGFS_FWSTATS_ADD(mic, rx_pkts);
DEBUGFS_FWSTATS_ADD(mic, calc_failure);
DEBUGFS_FWSTATS_ADD(aes, encrypt_fail);
DEBUGFS_FWSTATS_ADD(aes, decrypt_fail);
DEBUGFS_FWSTATS_ADD(aes, encrypt_packets);
DEBUGFS_FWSTATS_ADD(aes, decrypt_packets);
DEBUGFS_FWSTATS_ADD(aes, encrypt_interrupt);
DEBUGFS_FWSTATS_ADD(aes, decrypt_interrupt);
DEBUGFS_FWSTATS_ADD(event, heart_beat);
DEBUGFS_FWSTATS_ADD(event, calibration);
DEBUGFS_FWSTATS_ADD(event, rx_mismatch);
DEBUGFS_FWSTATS_ADD(event, rx_mem_empty);
DEBUGFS_FWSTATS_ADD(event, rx_pool);
DEBUGFS_FWSTATS_ADD(event, oom_late);
DEBUGFS_FWSTATS_ADD(event, phy_transmit_error);
DEBUGFS_FWSTATS_ADD(event, tx_stuck);
DEBUGFS_FWSTATS_ADD(ps, pspoll_timeouts);
DEBUGFS_FWSTATS_ADD(ps, upsd_timeouts);
DEBUGFS_FWSTATS_ADD(ps, upsd_max_sptime);
DEBUGFS_FWSTATS_ADD(ps, upsd_max_apturn);
DEBUGFS_FWSTATS_ADD(ps, pspoll_max_apturn);
DEBUGFS_FWSTATS_ADD(ps, pspoll_utilization);
DEBUGFS_FWSTATS_ADD(ps, upsd_utilization);
DEBUGFS_FWSTATS_ADD(rxpipe, rx_prep_beacon_drop);
DEBUGFS_FWSTATS_ADD(rxpipe, descr_host_int_trig_rx_data);
DEBUGFS_FWSTATS_ADD(rxpipe, beacon_buffer_thres_host_int_trig_rx_data);
DEBUGFS_FWSTATS_ADD(rxpipe, missed_beacon_host_int_trig_rx_data);
DEBUGFS_FWSTATS_ADD(rxpipe, tx_xfr_host_int_trig_rx_data);
return 0;
}
|
linux-master
|
drivers/net/wireless/ti/wl12xx/debugfs.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* This file is part of wl12xx
*
* Copyright (C) 2012 Texas Instruments. All rights reserved.
*/
#include <linux/ieee80211.h>
#include "scan.h"
#include "../wlcore/debug.h"
#include "../wlcore/tx.h"
static int wl1271_get_scan_channels(struct wl1271 *wl,
struct cfg80211_scan_request *req,
struct basic_scan_channel_params *channels,
enum nl80211_band band, bool passive)
{
struct conf_scan_settings *c = &wl->conf.scan;
int i, j;
u32 flags;
for (i = 0, j = 0;
i < req->n_channels && j < WL1271_SCAN_MAX_CHANNELS;
i++) {
flags = req->channels[i]->flags;
if (!test_bit(i, wl->scan.scanned_ch) &&
!(flags & IEEE80211_CHAN_DISABLED) &&
(req->channels[i]->band == band) &&
/*
* In passive scans, we scan all remaining
* channels, even if not marked as such.
* In active scans, we only scan channels not
* marked as passive.
*/
(passive || !(flags & IEEE80211_CHAN_NO_IR))) {
wl1271_debug(DEBUG_SCAN, "band %d, center_freq %d ",
req->channels[i]->band,
req->channels[i]->center_freq);
wl1271_debug(DEBUG_SCAN, "hw_value %d, flags %X",
req->channels[i]->hw_value,
req->channels[i]->flags);
wl1271_debug(DEBUG_SCAN,
"max_antenna_gain %d, max_power %d",
req->channels[i]->max_antenna_gain,
req->channels[i]->max_power);
wl1271_debug(DEBUG_SCAN, "beacon_found %d",
req->channels[i]->beacon_found);
if (!passive) {
channels[j].min_duration =
cpu_to_le32(c->min_dwell_time_active);
channels[j].max_duration =
cpu_to_le32(c->max_dwell_time_active);
} else {
channels[j].min_duration =
cpu_to_le32(c->dwell_time_passive);
channels[j].max_duration =
cpu_to_le32(c->dwell_time_passive);
}
channels[j].early_termination = 0;
channels[j].tx_power_att = req->channels[i]->max_power;
channels[j].channel = req->channels[i]->hw_value;
memset(&channels[j].bssid_lsb, 0xff, 4);
memset(&channels[j].bssid_msb, 0xff, 2);
/* Mark the channels we already used */
set_bit(i, wl->scan.scanned_ch);
j++;
}
}
return j;
}
#define WL1271_NOTHING_TO_SCAN 1
static int wl1271_scan_send(struct wl1271 *wl, struct wl12xx_vif *wlvif,
enum nl80211_band band,
bool passive, u32 basic_rate)
{
struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
struct wl1271_cmd_scan *cmd;
struct wl1271_cmd_trigger_scan_to *trigger;
int ret;
u16 scan_options = 0;
/* skip active scans if we don't have SSIDs */
if (!passive && wl->scan.req->n_ssids == 0)
return WL1271_NOTHING_TO_SCAN;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
trigger = kzalloc(sizeof(*trigger), GFP_KERNEL);
if (!cmd || !trigger) {
ret = -ENOMEM;
goto out;
}
if (wl->conf.scan.split_scan_timeout)
scan_options |= WL1271_SCAN_OPT_SPLIT_SCAN;
if (passive)
scan_options |= WL1271_SCAN_OPT_PASSIVE;
/* scan on the dev role if the regular one is not started */
if (wlcore_is_p2p_mgmt(wlvif))
cmd->params.role_id = wlvif->dev_role_id;
else
cmd->params.role_id = wlvif->role_id;
if (WARN_ON(cmd->params.role_id == WL12XX_INVALID_ROLE_ID)) {
ret = -EINVAL;
goto out;
}
cmd->params.scan_options = cpu_to_le16(scan_options);
cmd->params.n_ch = wl1271_get_scan_channels(wl, wl->scan.req,
cmd->channels,
band, passive);
if (cmd->params.n_ch == 0) {
ret = WL1271_NOTHING_TO_SCAN;
goto out;
}
cmd->params.tx_rate = cpu_to_le32(basic_rate);
cmd->params.n_probe_reqs = wl->conf.scan.num_probe_reqs;
cmd->params.tid_trigger = CONF_TX_AC_ANY_TID;
cmd->params.scan_tag = WL1271_SCAN_DEFAULT_TAG;
if (band == NL80211_BAND_2GHZ)
cmd->params.band = WL1271_SCAN_BAND_2_4_GHZ;
else
cmd->params.band = WL1271_SCAN_BAND_5_GHZ;
if (wl->scan.ssid_len) {
cmd->params.ssid_len = wl->scan.ssid_len;
memcpy(cmd->params.ssid, wl->scan.ssid, wl->scan.ssid_len);
}
memcpy(cmd->addr, vif->addr, ETH_ALEN);
ret = wl12xx_cmd_build_probe_req(wl, wlvif,
cmd->params.role_id, band,
wl->scan.ssid, wl->scan.ssid_len,
wl->scan.req->ie,
wl->scan.req->ie_len, NULL, 0, false);
if (ret < 0) {
wl1271_error("PROBE request template failed");
goto out;
}
trigger->timeout = cpu_to_le32(wl->conf.scan.split_scan_timeout);
ret = wl1271_cmd_send(wl, CMD_TRIGGER_SCAN_TO, trigger,
sizeof(*trigger), 0);
if (ret < 0) {
wl1271_error("trigger scan to failed for hw scan");
goto out;
}
wl1271_dump(DEBUG_SCAN, "SCAN: ", cmd, sizeof(*cmd));
ret = wl1271_cmd_send(wl, CMD_SCAN, cmd, sizeof(*cmd), 0);
if (ret < 0) {
wl1271_error("SCAN failed");
goto out;
}
out:
kfree(cmd);
kfree(trigger);
return ret;
}
int wl12xx_scan_stop(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct wl1271_cmd_header *cmd = NULL;
int ret = 0;
if (WARN_ON(wl->scan.state == WL1271_SCAN_STATE_IDLE))
return -EINVAL;
wl1271_debug(DEBUG_CMD, "cmd scan stop");
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd) {
ret = -ENOMEM;
goto out;
}
ret = wl1271_cmd_send(wl, CMD_STOP_SCAN, cmd,
sizeof(*cmd), 0);
if (ret < 0) {
wl1271_error("cmd stop_scan failed");
goto out;
}
out:
kfree(cmd);
return ret;
}
void wl1271_scan_stm(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
int ret = 0;
enum nl80211_band band;
u32 rate, mask;
switch (wl->scan.state) {
case WL1271_SCAN_STATE_IDLE:
break;
case WL1271_SCAN_STATE_2GHZ_ACTIVE:
band = NL80211_BAND_2GHZ;
mask = wlvif->bitrate_masks[band];
if (wl->scan.req->no_cck) {
mask &= ~CONF_TX_CCK_RATES;
if (!mask)
mask = CONF_TX_RATE_MASK_BASIC_P2P;
}
rate = wl1271_tx_min_rate_get(wl, mask);
ret = wl1271_scan_send(wl, wlvif, band, false, rate);
if (ret == WL1271_NOTHING_TO_SCAN) {
wl->scan.state = WL1271_SCAN_STATE_2GHZ_PASSIVE;
wl1271_scan_stm(wl, wlvif);
}
break;
case WL1271_SCAN_STATE_2GHZ_PASSIVE:
band = NL80211_BAND_2GHZ;
mask = wlvif->bitrate_masks[band];
if (wl->scan.req->no_cck) {
mask &= ~CONF_TX_CCK_RATES;
if (!mask)
mask = CONF_TX_RATE_MASK_BASIC_P2P;
}
rate = wl1271_tx_min_rate_get(wl, mask);
ret = wl1271_scan_send(wl, wlvif, band, true, rate);
if (ret == WL1271_NOTHING_TO_SCAN) {
if (wl->enable_11a)
wl->scan.state = WL1271_SCAN_STATE_5GHZ_ACTIVE;
else
wl->scan.state = WL1271_SCAN_STATE_DONE;
wl1271_scan_stm(wl, wlvif);
}
break;
case WL1271_SCAN_STATE_5GHZ_ACTIVE:
band = NL80211_BAND_5GHZ;
rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]);
ret = wl1271_scan_send(wl, wlvif, band, false, rate);
if (ret == WL1271_NOTHING_TO_SCAN) {
wl->scan.state = WL1271_SCAN_STATE_5GHZ_PASSIVE;
wl1271_scan_stm(wl, wlvif);
}
break;
case WL1271_SCAN_STATE_5GHZ_PASSIVE:
band = NL80211_BAND_5GHZ;
rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]);
ret = wl1271_scan_send(wl, wlvif, band, true, rate);
if (ret == WL1271_NOTHING_TO_SCAN) {
wl->scan.state = WL1271_SCAN_STATE_DONE;
wl1271_scan_stm(wl, wlvif);
}
break;
case WL1271_SCAN_STATE_DONE:
wl->scan.failed = false;
cancel_delayed_work(&wl->scan_complete_work);
ieee80211_queue_delayed_work(wl->hw, &wl->scan_complete_work,
msecs_to_jiffies(0));
break;
default:
wl1271_error("invalid scan state");
break;
}
if (ret < 0) {
cancel_delayed_work(&wl->scan_complete_work);
ieee80211_queue_delayed_work(wl->hw, &wl->scan_complete_work,
msecs_to_jiffies(0));
}
}
static void wl12xx_adjust_channels(struct wl1271_cmd_sched_scan_config *cmd,
struct wlcore_scan_channels *cmd_channels)
{
memcpy(cmd->passive, cmd_channels->passive, sizeof(cmd->passive));
memcpy(cmd->active, cmd_channels->active, sizeof(cmd->active));
cmd->dfs = cmd_channels->dfs;
cmd->n_pactive_ch = cmd_channels->passive_active;
memcpy(cmd->channels_2, cmd_channels->channels_2,
sizeof(cmd->channels_2));
memcpy(cmd->channels_5, cmd_channels->channels_5,
sizeof(cmd->channels_5));
/* channels_4 are not supported, so no need to copy them */
}
int wl1271_scan_sched_scan_config(struct wl1271 *wl,
struct wl12xx_vif *wlvif,
struct cfg80211_sched_scan_request *req,
struct ieee80211_scan_ies *ies)
{
struct wl1271_cmd_sched_scan_config *cfg = NULL;
struct wlcore_scan_channels *cfg_channels = NULL;
struct conf_sched_scan_settings *c = &wl->conf.sched_scan;
int i, ret;
bool force_passive = !req->n_ssids;
wl1271_debug(DEBUG_CMD, "cmd sched_scan scan config");
cfg = kzalloc(sizeof(*cfg), GFP_KERNEL);
if (!cfg)
return -ENOMEM;
cfg->role_id = wlvif->role_id;
cfg->rssi_threshold = c->rssi_threshold;
cfg->snr_threshold = c->snr_threshold;
cfg->n_probe_reqs = c->num_probe_reqs;
/* cycles set to 0 it means infinite (until manually stopped) */
cfg->cycles = 0;
/* report APs when at least 1 is found */
cfg->report_after = 1;
/* don't stop scanning automatically when something is found */
cfg->terminate = 0;
cfg->tag = WL1271_SCAN_DEFAULT_TAG;
/* don't filter on BSS type */
cfg->bss_type = SCAN_BSS_TYPE_ANY;
/* currently NL80211 supports only a single interval */
for (i = 0; i < SCAN_MAX_CYCLE_INTERVALS; i++)
cfg->intervals[i] = cpu_to_le32(req->scan_plans[0].interval *
MSEC_PER_SEC);
cfg->ssid_len = 0;
ret = wlcore_scan_sched_scan_ssid_list(wl, wlvif, req);
if (ret < 0)
goto out;
cfg->filter_type = ret;
wl1271_debug(DEBUG_SCAN, "filter_type = %d", cfg->filter_type);
cfg_channels = kzalloc(sizeof(*cfg_channels), GFP_KERNEL);
if (!cfg_channels) {
ret = -ENOMEM;
goto out;
}
if (!wlcore_set_scan_chan_params(wl, cfg_channels, req->channels,
req->n_channels, req->n_ssids,
SCAN_TYPE_PERIODIC)) {
wl1271_error("scan channel list is empty");
ret = -EINVAL;
goto out;
}
wl12xx_adjust_channels(cfg, cfg_channels);
if (!force_passive && cfg->active[0]) {
u8 band = NL80211_BAND_2GHZ;
ret = wl12xx_cmd_build_probe_req(wl, wlvif,
wlvif->role_id, band,
req->ssids[0].ssid,
req->ssids[0].ssid_len,
ies->ies[band],
ies->len[band],
ies->common_ies,
ies->common_ie_len,
true);
if (ret < 0) {
wl1271_error("2.4GHz PROBE request template failed");
goto out;
}
}
if (!force_passive && cfg->active[1]) {
u8 band = NL80211_BAND_5GHZ;
ret = wl12xx_cmd_build_probe_req(wl, wlvif,
wlvif->role_id, band,
req->ssids[0].ssid,
req->ssids[0].ssid_len,
ies->ies[band],
ies->len[band],
ies->common_ies,
ies->common_ie_len,
true);
if (ret < 0) {
wl1271_error("5GHz PROBE request template failed");
goto out;
}
}
wl1271_dump(DEBUG_SCAN, "SCAN_CFG: ", cfg, sizeof(*cfg));
ret = wl1271_cmd_send(wl, CMD_CONNECTION_SCAN_CFG, cfg,
sizeof(*cfg), 0);
if (ret < 0) {
wl1271_error("SCAN configuration failed");
goto out;
}
out:
kfree(cfg_channels);
kfree(cfg);
return ret;
}
int wl1271_scan_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct wl1271_cmd_sched_scan_start *start;
int ret = 0;
wl1271_debug(DEBUG_CMD, "cmd periodic scan start");
if (wlvif->bss_type != BSS_TYPE_STA_BSS)
return -EOPNOTSUPP;
if ((wl->quirks & WLCORE_QUIRK_NO_SCHED_SCAN_WHILE_CONN) &&
test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags))
return -EBUSY;
start = kzalloc(sizeof(*start), GFP_KERNEL);
if (!start)
return -ENOMEM;
start->role_id = wlvif->role_id;
start->tag = WL1271_SCAN_DEFAULT_TAG;
ret = wl1271_cmd_send(wl, CMD_START_PERIODIC_SCAN, start,
sizeof(*start), 0);
if (ret < 0) {
wl1271_error("failed to send scan start command");
goto out_free;
}
out_free:
kfree(start);
return ret;
}
int wl12xx_sched_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif,
struct cfg80211_sched_scan_request *req,
struct ieee80211_scan_ies *ies)
{
int ret;
ret = wl1271_scan_sched_scan_config(wl, wlvif, req, ies);
if (ret < 0)
return ret;
return wl1271_scan_sched_scan_start(wl, wlvif);
}
void wl12xx_scan_sched_scan_stop(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct wl1271_cmd_sched_scan_stop *stop;
int ret = 0;
wl1271_debug(DEBUG_CMD, "cmd periodic scan stop");
/* FIXME: what to do if alloc'ing to stop fails? */
stop = kzalloc(sizeof(*stop), GFP_KERNEL);
if (!stop) {
wl1271_error("failed to alloc memory to send sched scan stop");
return;
}
stop->role_id = wlvif->role_id;
stop->tag = WL1271_SCAN_DEFAULT_TAG;
ret = wl1271_cmd_send(wl, CMD_STOP_PERIODIC_SCAN, stop,
sizeof(*stop), 0);
if (ret < 0) {
wl1271_error("failed to send sched scan stop command");
goto out_free;
}
out_free:
kfree(stop);
}
int wl12xx_scan_start(struct wl1271 *wl, struct wl12xx_vif *wlvif,
struct cfg80211_scan_request *req)
{
wl1271_scan_stm(wl, wlvif);
return 0;
}
void wl12xx_scan_completed(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
wl1271_scan_stm(wl, wlvif);
}
|
linux-master
|
drivers/net/wireless/ti/wl12xx/scan.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* This file is part of wl12xx
*
* Copyright (C) 2009-2010 Nokia Corporation
* Copyright (C) 2011 Texas Instruments Inc.
*/
#include "../wlcore/cmd.h"
#include "../wlcore/debug.h"
#include "wl12xx.h"
#include "cmd.h"
int wl1271_cmd_ext_radio_parms(struct wl1271 *wl)
{
struct wl1271_ext_radio_parms_cmd *ext_radio_parms;
struct wl12xx_priv *priv = wl->priv;
struct wl12xx_conf_rf *rf = &priv->conf.rf;
int ret;
if (!wl->nvs)
return -ENODEV;
ext_radio_parms = kzalloc(sizeof(*ext_radio_parms), GFP_KERNEL);
if (!ext_radio_parms)
return -ENOMEM;
ext_radio_parms->test.id = TEST_CMD_INI_FILE_RF_EXTENDED_PARAM;
memcpy(ext_radio_parms->tx_per_channel_power_compensation_2,
rf->tx_per_channel_power_compensation_2,
CONF_TX_PWR_COMPENSATION_LEN_2);
memcpy(ext_radio_parms->tx_per_channel_power_compensation_5,
rf->tx_per_channel_power_compensation_5,
CONF_TX_PWR_COMPENSATION_LEN_5);
wl1271_dump(DEBUG_CMD, "TEST_CMD_INI_FILE_EXT_RADIO_PARAM: ",
ext_radio_parms, sizeof(*ext_radio_parms));
ret = wl1271_cmd_test(wl, ext_radio_parms, sizeof(*ext_radio_parms), 0);
if (ret < 0)
wl1271_warning("TEST_CMD_INI_FILE_RF_EXTENDED_PARAM failed");
kfree(ext_radio_parms);
return ret;
}
int wl1271_cmd_general_parms(struct wl1271 *wl)
{
struct wl1271_general_parms_cmd *gen_parms;
struct wl1271_ini_general_params *gp =
&((struct wl1271_nvs_file *)wl->nvs)->general_params;
struct wl12xx_priv *priv = wl->priv;
bool answer = false;
int ret;
if (!wl->nvs)
return -ENODEV;
if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) {
wl1271_warning("FEM index from INI out of bounds");
return -EINVAL;
}
gen_parms = kzalloc(sizeof(*gen_parms), GFP_KERNEL);
if (!gen_parms)
return -ENOMEM;
gen_parms->test.id = TEST_CMD_INI_FILE_GENERAL_PARAM;
memcpy(&gen_parms->general_params, gp, sizeof(*gp));
/* If we started in PLT FEM_DETECT mode, force auto detect */
if (wl->plt_mode == PLT_FEM_DETECT)
gen_parms->general_params.tx_bip_fem_auto_detect = true;
if (gen_parms->general_params.tx_bip_fem_auto_detect)
answer = true;
/* Override the REF CLK from the NVS with the one from platform data */
gen_parms->general_params.ref_clock = priv->ref_clock;
ret = wl1271_cmd_test(wl, gen_parms, sizeof(*gen_parms), answer);
if (ret < 0) {
wl1271_warning("CMD_INI_FILE_GENERAL_PARAM failed");
goto out;
}
gp->tx_bip_fem_manufacturer =
gen_parms->general_params.tx_bip_fem_manufacturer;
if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) {
wl1271_warning("FEM index from FW out of bounds");
ret = -EINVAL;
goto out;
}
/* If we are in calibrator based fem auto detect - save fem nr */
if (wl->plt_mode == PLT_FEM_DETECT)
wl->fem_manuf = gp->tx_bip_fem_manufacturer;
wl1271_debug(DEBUG_CMD, "FEM autodetect: %s, manufacturer: %d\n",
answer == false ?
"manual" :
wl->plt_mode == PLT_FEM_DETECT ?
"calibrator_fem_detect" :
"auto",
gp->tx_bip_fem_manufacturer);
out:
kfree(gen_parms);
return ret;
}
int wl128x_cmd_general_parms(struct wl1271 *wl)
{
struct wl128x_general_parms_cmd *gen_parms;
struct wl128x_ini_general_params *gp =
&((struct wl128x_nvs_file *)wl->nvs)->general_params;
struct wl12xx_priv *priv = wl->priv;
bool answer = false;
int ret;
if (!wl->nvs)
return -ENODEV;
if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) {
wl1271_warning("FEM index from ini out of bounds");
return -EINVAL;
}
gen_parms = kzalloc(sizeof(*gen_parms), GFP_KERNEL);
if (!gen_parms)
return -ENOMEM;
gen_parms->test.id = TEST_CMD_INI_FILE_GENERAL_PARAM;
memcpy(&gen_parms->general_params, gp, sizeof(*gp));
/* If we started in PLT FEM_DETECT mode, force auto detect */
if (wl->plt_mode == PLT_FEM_DETECT)
gen_parms->general_params.tx_bip_fem_auto_detect = true;
if (gen_parms->general_params.tx_bip_fem_auto_detect)
answer = true;
/* Replace REF and TCXO CLKs with the ones from platform data */
gen_parms->general_params.ref_clock = priv->ref_clock;
gen_parms->general_params.tcxo_ref_clock = priv->tcxo_clock;
ret = wl1271_cmd_test(wl, gen_parms, sizeof(*gen_parms), answer);
if (ret < 0) {
wl1271_warning("CMD_INI_FILE_GENERAL_PARAM failed");
goto out;
}
gp->tx_bip_fem_manufacturer =
gen_parms->general_params.tx_bip_fem_manufacturer;
if (gp->tx_bip_fem_manufacturer >= WL1271_INI_FEM_MODULE_COUNT) {
wl1271_warning("FEM index from FW out of bounds");
ret = -EINVAL;
goto out;
}
/* If we are in calibrator based fem auto detect - save fem nr */
if (wl->plt_mode == PLT_FEM_DETECT)
wl->fem_manuf = gp->tx_bip_fem_manufacturer;
wl1271_debug(DEBUG_CMD, "FEM autodetect: %s, manufacturer: %d\n",
answer == false ?
"manual" :
wl->plt_mode == PLT_FEM_DETECT ?
"calibrator_fem_detect" :
"auto",
gp->tx_bip_fem_manufacturer);
out:
kfree(gen_parms);
return ret;
}
int wl1271_cmd_radio_parms(struct wl1271 *wl)
{
struct wl1271_nvs_file *nvs = (struct wl1271_nvs_file *)wl->nvs;
struct wl1271_radio_parms_cmd *radio_parms;
struct wl1271_ini_general_params *gp = &nvs->general_params;
int ret, fem_idx;
if (!wl->nvs)
return -ENODEV;
radio_parms = kzalloc(sizeof(*radio_parms), GFP_KERNEL);
if (!radio_parms)
return -ENOMEM;
radio_parms->test.id = TEST_CMD_INI_FILE_RADIO_PARAM;
fem_idx = WL12XX_FEM_TO_NVS_ENTRY(gp->tx_bip_fem_manufacturer);
/* 2.4GHz parameters */
memcpy(&radio_parms->static_params_2, &nvs->stat_radio_params_2,
sizeof(struct wl1271_ini_band_params_2));
memcpy(&radio_parms->dyn_params_2,
&nvs->dyn_radio_params_2[fem_idx].params,
sizeof(struct wl1271_ini_fem_params_2));
/* 5GHz parameters */
memcpy(&radio_parms->static_params_5,
&nvs->stat_radio_params_5,
sizeof(struct wl1271_ini_band_params_5));
memcpy(&radio_parms->dyn_params_5,
&nvs->dyn_radio_params_5[fem_idx].params,
sizeof(struct wl1271_ini_fem_params_5));
wl1271_dump(DEBUG_CMD, "TEST_CMD_INI_FILE_RADIO_PARAM: ",
radio_parms, sizeof(*radio_parms));
ret = wl1271_cmd_test(wl, radio_parms, sizeof(*radio_parms), 0);
if (ret < 0)
wl1271_warning("CMD_INI_FILE_RADIO_PARAM failed");
kfree(radio_parms);
return ret;
}
int wl128x_cmd_radio_parms(struct wl1271 *wl)
{
struct wl128x_nvs_file *nvs = (struct wl128x_nvs_file *)wl->nvs;
struct wl128x_radio_parms_cmd *radio_parms;
struct wl128x_ini_general_params *gp = &nvs->general_params;
int ret, fem_idx;
if (!wl->nvs)
return -ENODEV;
radio_parms = kzalloc(sizeof(*radio_parms), GFP_KERNEL);
if (!radio_parms)
return -ENOMEM;
radio_parms->test.id = TEST_CMD_INI_FILE_RADIO_PARAM;
fem_idx = WL12XX_FEM_TO_NVS_ENTRY(gp->tx_bip_fem_manufacturer);
/* 2.4GHz parameters */
memcpy(&radio_parms->static_params_2, &nvs->stat_radio_params_2,
sizeof(struct wl128x_ini_band_params_2));
memcpy(&radio_parms->dyn_params_2,
&nvs->dyn_radio_params_2[fem_idx].params,
sizeof(struct wl128x_ini_fem_params_2));
/* 5GHz parameters */
memcpy(&radio_parms->static_params_5,
&nvs->stat_radio_params_5,
sizeof(struct wl128x_ini_band_params_5));
memcpy(&radio_parms->dyn_params_5,
&nvs->dyn_radio_params_5[fem_idx].params,
sizeof(struct wl128x_ini_fem_params_5));
radio_parms->fem_vendor_and_options = nvs->fem_vendor_and_options;
wl1271_dump(DEBUG_CMD, "TEST_CMD_INI_FILE_RADIO_PARAM: ",
radio_parms, sizeof(*radio_parms));
ret = wl1271_cmd_test(wl, radio_parms, sizeof(*radio_parms), 0);
if (ret < 0)
wl1271_warning("CMD_INI_FILE_RADIO_PARAM failed");
kfree(radio_parms);
return ret;
}
int wl12xx_cmd_channel_switch(struct wl1271 *wl,
struct wl12xx_vif *wlvif,
struct ieee80211_channel_switch *ch_switch)
{
struct wl12xx_cmd_channel_switch *cmd;
int ret;
wl1271_debug(DEBUG_ACX, "cmd channel switch");
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd) {
ret = -ENOMEM;
goto out;
}
cmd->role_id = wlvif->role_id;
cmd->channel = ch_switch->chandef.chan->hw_value;
cmd->switch_time = ch_switch->count;
cmd->stop_tx = ch_switch->block_tx;
/* FIXME: control from mac80211 in the future */
/* Enable TX on the target channel */
cmd->post_switch_tx_disable = 0;
ret = wl1271_cmd_send(wl, CMD_CHANNEL_SWITCH, cmd, sizeof(*cmd), 0);
if (ret < 0) {
wl1271_error("failed to send channel switch command");
goto out_free;
}
out_free:
kfree(cmd);
out:
return ret;
}
|
linux-master
|
drivers/net/wireless/ti/wl12xx/cmd.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* This file is part of wl1271
*
* Copyright (C) 2008-2010 Nokia Corporation
*/
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/err.h>
#include "../wlcore/wlcore.h"
#include "../wlcore/debug.h"
#include "../wlcore/io.h"
#include "../wlcore/acx.h"
#include "../wlcore/tx.h"
#include "../wlcore/rx.h"
#include "../wlcore/boot.h"
#include "wl12xx.h"
#include "reg.h"
#include "cmd.h"
#include "acx.h"
#include "scan.h"
#include "event.h"
#include "debugfs.h"
#include "conf.h"
static char *fref_param;
static char *tcxo_param;
static struct wlcore_conf wl12xx_conf = {
.sg = {
.params = {
[WL12XX_CONF_SG_ACL_BT_MASTER_MIN_BR] = 10,
[WL12XX_CONF_SG_ACL_BT_MASTER_MAX_BR] = 180,
[WL12XX_CONF_SG_ACL_BT_SLAVE_MIN_BR] = 10,
[WL12XX_CONF_SG_ACL_BT_SLAVE_MAX_BR] = 180,
[WL12XX_CONF_SG_ACL_BT_MASTER_MIN_EDR] = 10,
[WL12XX_CONF_SG_ACL_BT_MASTER_MAX_EDR] = 80,
[WL12XX_CONF_SG_ACL_BT_SLAVE_MIN_EDR] = 10,
[WL12XX_CONF_SG_ACL_BT_SLAVE_MAX_EDR] = 80,
[WL12XX_CONF_SG_ACL_WLAN_PS_MASTER_BR] = 8,
[WL12XX_CONF_SG_ACL_WLAN_PS_SLAVE_BR] = 8,
[WL12XX_CONF_SG_ACL_WLAN_PS_MASTER_EDR] = 20,
[WL12XX_CONF_SG_ACL_WLAN_PS_SLAVE_EDR] = 20,
[WL12XX_CONF_SG_ACL_WLAN_ACTIVE_MASTER_MIN_BR] = 20,
[WL12XX_CONF_SG_ACL_WLAN_ACTIVE_MASTER_MAX_BR] = 35,
[WL12XX_CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MIN_BR] = 16,
[WL12XX_CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MAX_BR] = 35,
[WL12XX_CONF_SG_ACL_WLAN_ACTIVE_MASTER_MIN_EDR] = 32,
[WL12XX_CONF_SG_ACL_WLAN_ACTIVE_MASTER_MAX_EDR] = 50,
[WL12XX_CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MIN_EDR] = 28,
[WL12XX_CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MAX_EDR] = 50,
[WL12XX_CONF_SG_ACL_ACTIVE_SCAN_WLAN_BR] = 10,
[WL12XX_CONF_SG_ACL_ACTIVE_SCAN_WLAN_EDR] = 20,
[WL12XX_CONF_SG_ACL_PASSIVE_SCAN_BT_BR] = 75,
[WL12XX_CONF_SG_ACL_PASSIVE_SCAN_WLAN_BR] = 15,
[WL12XX_CONF_SG_ACL_PASSIVE_SCAN_BT_EDR] = 27,
[WL12XX_CONF_SG_ACL_PASSIVE_SCAN_WLAN_EDR] = 17,
/* active scan params */
[WL12XX_CONF_SG_AUTO_SCAN_PROBE_REQ] = 170,
[WL12XX_CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_HV3] = 50,
[WL12XX_CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_A2DP] = 100,
/* passive scan params */
[WL12XX_CONF_SG_PASSIVE_SCAN_DUR_FACTOR_A2DP_BR] = 800,
[WL12XX_CONF_SG_PASSIVE_SCAN_DUR_FACTOR_A2DP_EDR] = 200,
[WL12XX_CONF_SG_PASSIVE_SCAN_DUR_FACTOR_HV3] = 200,
/* passive scan in dual antenna params */
[WL12XX_CONF_SG_CONSECUTIVE_HV3_IN_PASSIVE_SCAN] = 0,
[WL12XX_CONF_SG_BCN_HV3_COLL_THR_IN_PASSIVE_SCAN] = 0,
[WL12XX_CONF_SG_TX_RX_PROTECT_BW_IN_PASSIVE_SCAN] = 0,
/* general params */
[WL12XX_CONF_SG_STA_FORCE_PS_IN_BT_SCO] = 1,
[WL12XX_CONF_SG_ANTENNA_CONFIGURATION] = 0,
[WL12XX_CONF_SG_BEACON_MISS_PERCENT] = 60,
[WL12XX_CONF_SG_DHCP_TIME] = 5000,
[WL12XX_CONF_SG_RXT] = 1200,
[WL12XX_CONF_SG_TXT] = 1000,
[WL12XX_CONF_SG_ADAPTIVE_RXT_TXT] = 1,
[WL12XX_CONF_SG_GENERAL_USAGE_BIT_MAP] = 3,
[WL12XX_CONF_SG_HV3_MAX_SERVED] = 6,
[WL12XX_CONF_SG_PS_POLL_TIMEOUT] = 10,
[WL12XX_CONF_SG_UPSD_TIMEOUT] = 10,
[WL12XX_CONF_SG_CONSECUTIVE_CTS_THRESHOLD] = 2,
[WL12XX_CONF_SG_STA_RX_WINDOW_AFTER_DTIM] = 5,
[WL12XX_CONF_SG_STA_CONNECTION_PROTECTION_TIME] = 30,
/* AP params */
[WL12XX_CONF_AP_BEACON_MISS_TX] = 3,
[WL12XX_CONF_AP_RX_WINDOW_AFTER_BEACON] = 10,
[WL12XX_CONF_AP_BEACON_WINDOW_INTERVAL] = 2,
[WL12XX_CONF_AP_CONNECTION_PROTECTION_TIME] = 0,
[WL12XX_CONF_AP_BT_ACL_VAL_BT_SERVE_TIME] = 25,
[WL12XX_CONF_AP_BT_ACL_VAL_WL_SERVE_TIME] = 25,
/* CTS Diluting params */
[WL12XX_CONF_SG_CTS_DILUTED_BAD_RX_PACKETS_TH] = 0,
[WL12XX_CONF_SG_CTS_CHOP_IN_DUAL_ANT_SCO_MASTER] = 0,
},
.state = CONF_SG_PROTECTIVE,
},
.rx = {
.rx_msdu_life_time = 512000,
.packet_detection_threshold = 0,
.ps_poll_timeout = 15,
.upsd_timeout = 15,
.rts_threshold = IEEE80211_MAX_RTS_THRESHOLD,
.rx_cca_threshold = 0,
.irq_blk_threshold = 0xFFFF,
.irq_pkt_threshold = 0,
.irq_timeout = 600,
.queue_type = CONF_RX_QUEUE_TYPE_LOW_PRIORITY,
},
.tx = {
.tx_energy_detection = 0,
.sta_rc_conf = {
.enabled_rates = 0,
.short_retry_limit = 10,
.long_retry_limit = 10,
.aflags = 0,
},
.ac_conf_count = 4,
.ac_conf = {
[CONF_TX_AC_BE] = {
.ac = CONF_TX_AC_BE,
.cw_min = 15,
.cw_max = 63,
.aifsn = 3,
.tx_op_limit = 0,
},
[CONF_TX_AC_BK] = {
.ac = CONF_TX_AC_BK,
.cw_min = 15,
.cw_max = 63,
.aifsn = 7,
.tx_op_limit = 0,
},
[CONF_TX_AC_VI] = {
.ac = CONF_TX_AC_VI,
.cw_min = 15,
.cw_max = 63,
.aifsn = CONF_TX_AIFS_PIFS,
.tx_op_limit = 3008,
},
[CONF_TX_AC_VO] = {
.ac = CONF_TX_AC_VO,
.cw_min = 15,
.cw_max = 63,
.aifsn = CONF_TX_AIFS_PIFS,
.tx_op_limit = 1504,
},
},
.max_tx_retries = 100,
.ap_aging_period = 300,
.tid_conf_count = 4,
.tid_conf = {
[CONF_TX_AC_BE] = {
.queue_id = CONF_TX_AC_BE,
.channel_type = CONF_CHANNEL_TYPE_EDCF,
.tsid = CONF_TX_AC_BE,
.ps_scheme = CONF_PS_SCHEME_LEGACY,
.ack_policy = CONF_ACK_POLICY_LEGACY,
.apsd_conf = {0, 0},
},
[CONF_TX_AC_BK] = {
.queue_id = CONF_TX_AC_BK,
.channel_type = CONF_CHANNEL_TYPE_EDCF,
.tsid = CONF_TX_AC_BK,
.ps_scheme = CONF_PS_SCHEME_LEGACY,
.ack_policy = CONF_ACK_POLICY_LEGACY,
.apsd_conf = {0, 0},
},
[CONF_TX_AC_VI] = {
.queue_id = CONF_TX_AC_VI,
.channel_type = CONF_CHANNEL_TYPE_EDCF,
.tsid = CONF_TX_AC_VI,
.ps_scheme = CONF_PS_SCHEME_LEGACY,
.ack_policy = CONF_ACK_POLICY_LEGACY,
.apsd_conf = {0, 0},
},
[CONF_TX_AC_VO] = {
.queue_id = CONF_TX_AC_VO,
.channel_type = CONF_CHANNEL_TYPE_EDCF,
.tsid = CONF_TX_AC_VO,
.ps_scheme = CONF_PS_SCHEME_LEGACY,
.ack_policy = CONF_ACK_POLICY_LEGACY,
.apsd_conf = {0, 0},
},
},
.frag_threshold = IEEE80211_MAX_FRAG_THRESHOLD,
.tx_compl_timeout = 700,
.tx_compl_threshold = 4,
.basic_rate = CONF_HW_BIT_RATE_1MBPS,
.basic_rate_5 = CONF_HW_BIT_RATE_6MBPS,
.tmpl_short_retry_limit = 10,
.tmpl_long_retry_limit = 10,
.tx_watchdog_timeout = 5000,
.slow_link_thold = 3,
.fast_link_thold = 10,
},
.conn = {
.wake_up_event = CONF_WAKE_UP_EVENT_DTIM,
.listen_interval = 1,
.suspend_wake_up_event = CONF_WAKE_UP_EVENT_N_DTIM,
.suspend_listen_interval = 3,
.bcn_filt_mode = CONF_BCN_FILT_MODE_ENABLED,
.bcn_filt_ie_count = 3,
.bcn_filt_ie = {
[0] = {
.ie = WLAN_EID_CHANNEL_SWITCH,
.rule = CONF_BCN_RULE_PASS_ON_APPEARANCE,
},
[1] = {
.ie = WLAN_EID_HT_OPERATION,
.rule = CONF_BCN_RULE_PASS_ON_CHANGE,
},
[2] = {
.ie = WLAN_EID_ERP_INFO,
.rule = CONF_BCN_RULE_PASS_ON_CHANGE,
},
},
.synch_fail_thold = 12,
.bss_lose_timeout = 400,
.beacon_rx_timeout = 10000,
.broadcast_timeout = 20000,
.rx_broadcast_in_ps = 1,
.ps_poll_threshold = 10,
.bet_enable = CONF_BET_MODE_ENABLE,
.bet_max_consecutive = 50,
.psm_entry_retries = 8,
.psm_exit_retries = 16,
.psm_entry_nullfunc_retries = 3,
.dynamic_ps_timeout = 1500,
.forced_ps = false,
.keep_alive_interval = 55000,
.max_listen_interval = 20,
.sta_sleep_auth = WL1271_PSM_ILLEGAL,
.suspend_rx_ba_activity = 0,
},
.itrim = {
.enable = false,
.timeout = 50000,
},
.pm_config = {
.host_clk_settling_time = 5000,
.host_fast_wakeup_support = CONF_FAST_WAKEUP_DISABLE,
},
.roam_trigger = {
.trigger_pacing = 1,
.avg_weight_rssi_beacon = 20,
.avg_weight_rssi_data = 10,
.avg_weight_snr_beacon = 20,
.avg_weight_snr_data = 10,
},
.scan = {
.min_dwell_time_active = 7500,
.max_dwell_time_active = 30000,
.min_dwell_time_active_long = 25000,
.max_dwell_time_active_long = 50000,
.dwell_time_passive = 100000,
.dwell_time_dfs = 150000,
.num_probe_reqs = 2,
.split_scan_timeout = 50000,
},
.sched_scan = {
/*
* Values are in TU/1000 but since sched scan FW command
* params are in TUs rounding up may occur.
*/
.base_dwell_time = 7500,
.max_dwell_time_delta = 22500,
/* based on 250bits per probe @1Mbps */
.dwell_time_delta_per_probe = 2000,
/* based on 250bits per probe @6Mbps (plus a bit more) */
.dwell_time_delta_per_probe_5 = 350,
.dwell_time_passive = 100000,
.dwell_time_dfs = 150000,
.num_probe_reqs = 2,
.rssi_threshold = -90,
.snr_threshold = 0,
},
.ht = {
.rx_ba_win_size = 8,
.tx_ba_win_size = 64,
.inactivity_timeout = 10000,
.tx_ba_tid_bitmap = CONF_TX_BA_ENABLED_TID_BITMAP,
},
/*
* Memory config for wl127x chips is given in the
* wl12xx_default_priv_conf struct. The below configuration is
* for wl128x chips.
*/
.mem = {
.num_stations = 1,
.ssid_profiles = 1,
.rx_block_num = 40,
.tx_min_block_num = 40,
.dynamic_memory = 1,
.min_req_tx_blocks = 45,
.min_req_rx_blocks = 22,
.tx_min = 27,
},
.fm_coex = {
.enable = true,
.swallow_period = 5,
.n_divider_fref_set_1 = 0xff, /* default */
.n_divider_fref_set_2 = 12,
.m_divider_fref_set_1 = 0xffff,
.m_divider_fref_set_2 = 148, /* default */
.coex_pll_stabilization_time = 0xffffffff, /* default */
.ldo_stabilization_time = 0xffff, /* default */
.fm_disturbed_band_margin = 0xff, /* default */
.swallow_clk_diff = 0xff, /* default */
},
.rx_streaming = {
.duration = 150,
.queues = 0x1,
.interval = 20,
.always = 0,
},
.fwlog = {
.mode = WL12XX_FWLOG_CONTINUOUS,
.mem_blocks = 2,
.severity = 0,
.timestamp = WL12XX_FWLOG_TIMESTAMP_DISABLED,
.output = WL12XX_FWLOG_OUTPUT_DBG_PINS,
.threshold = 0,
},
.rate = {
.rate_retry_score = 32000,
.per_add = 8192,
.per_th1 = 2048,
.per_th2 = 4096,
.max_per = 8100,
.inverse_curiosity_factor = 5,
.tx_fail_low_th = 4,
.tx_fail_high_th = 10,
.per_alpha_shift = 4,
.per_add_shift = 13,
.per_beta1_shift = 10,
.per_beta2_shift = 8,
.rate_check_up = 2,
.rate_check_down = 12,
.rate_retry_policy = {
0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00,
},
},
.hangover = {
.recover_time = 0,
.hangover_period = 20,
.dynamic_mode = 1,
.early_termination_mode = 1,
.max_period = 20,
.min_period = 1,
.increase_delta = 1,
.decrease_delta = 2,
.quiet_time = 4,
.increase_time = 1,
.window_size = 16,
},
.recovery = {
.bug_on_recovery = 0,
.no_recovery = 0,
},
};
static struct wl12xx_priv_conf wl12xx_default_priv_conf = {
.rf = {
.tx_per_channel_power_compensation_2 = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
},
.tx_per_channel_power_compensation_5 = {
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
},
},
.mem_wl127x = {
.num_stations = 1,
.ssid_profiles = 1,
.rx_block_num = 70,
.tx_min_block_num = 40,
.dynamic_memory = 1,
.min_req_tx_blocks = 100,
.min_req_rx_blocks = 22,
.tx_min = 27,
},
};
#define WL12XX_TX_HW_BLOCK_SPARE_DEFAULT 1
#define WL12XX_TX_HW_BLOCK_GEM_SPARE 2
#define WL12XX_TX_HW_BLOCK_SIZE 252
static const u8 wl12xx_rate_to_idx_2ghz[] = {
/* MCS rates are used only with 11n */
7, /* WL12XX_CONF_HW_RXTX_RATE_MCS7_SGI */
7, /* WL12XX_CONF_HW_RXTX_RATE_MCS7 */
6, /* WL12XX_CONF_HW_RXTX_RATE_MCS6 */
5, /* WL12XX_CONF_HW_RXTX_RATE_MCS5 */
4, /* WL12XX_CONF_HW_RXTX_RATE_MCS4 */
3, /* WL12XX_CONF_HW_RXTX_RATE_MCS3 */
2, /* WL12XX_CONF_HW_RXTX_RATE_MCS2 */
1, /* WL12XX_CONF_HW_RXTX_RATE_MCS1 */
0, /* WL12XX_CONF_HW_RXTX_RATE_MCS0 */
11, /* WL12XX_CONF_HW_RXTX_RATE_54 */
10, /* WL12XX_CONF_HW_RXTX_RATE_48 */
9, /* WL12XX_CONF_HW_RXTX_RATE_36 */
8, /* WL12XX_CONF_HW_RXTX_RATE_24 */
/* TI-specific rate */
CONF_HW_RXTX_RATE_UNSUPPORTED, /* WL12XX_CONF_HW_RXTX_RATE_22 */
7, /* WL12XX_CONF_HW_RXTX_RATE_18 */
6, /* WL12XX_CONF_HW_RXTX_RATE_12 */
3, /* WL12XX_CONF_HW_RXTX_RATE_11 */
5, /* WL12XX_CONF_HW_RXTX_RATE_9 */
4, /* WL12XX_CONF_HW_RXTX_RATE_6 */
2, /* WL12XX_CONF_HW_RXTX_RATE_5_5 */
1, /* WL12XX_CONF_HW_RXTX_RATE_2 */
0 /* WL12XX_CONF_HW_RXTX_RATE_1 */
};
static const u8 wl12xx_rate_to_idx_5ghz[] = {
/* MCS rates are used only with 11n */
7, /* WL12XX_CONF_HW_RXTX_RATE_MCS7_SGI */
7, /* WL12XX_CONF_HW_RXTX_RATE_MCS7 */
6, /* WL12XX_CONF_HW_RXTX_RATE_MCS6 */
5, /* WL12XX_CONF_HW_RXTX_RATE_MCS5 */
4, /* WL12XX_CONF_HW_RXTX_RATE_MCS4 */
3, /* WL12XX_CONF_HW_RXTX_RATE_MCS3 */
2, /* WL12XX_CONF_HW_RXTX_RATE_MCS2 */
1, /* WL12XX_CONF_HW_RXTX_RATE_MCS1 */
0, /* WL12XX_CONF_HW_RXTX_RATE_MCS0 */
7, /* WL12XX_CONF_HW_RXTX_RATE_54 */
6, /* WL12XX_CONF_HW_RXTX_RATE_48 */
5, /* WL12XX_CONF_HW_RXTX_RATE_36 */
4, /* WL12XX_CONF_HW_RXTX_RATE_24 */
/* TI-specific rate */
CONF_HW_RXTX_RATE_UNSUPPORTED, /* WL12XX_CONF_HW_RXTX_RATE_22 */
3, /* WL12XX_CONF_HW_RXTX_RATE_18 */
2, /* WL12XX_CONF_HW_RXTX_RATE_12 */
CONF_HW_RXTX_RATE_UNSUPPORTED, /* WL12XX_CONF_HW_RXTX_RATE_11 */
1, /* WL12XX_CONF_HW_RXTX_RATE_9 */
0, /* WL12XX_CONF_HW_RXTX_RATE_6 */
CONF_HW_RXTX_RATE_UNSUPPORTED, /* WL12XX_CONF_HW_RXTX_RATE_5_5 */
CONF_HW_RXTX_RATE_UNSUPPORTED, /* WL12XX_CONF_HW_RXTX_RATE_2 */
CONF_HW_RXTX_RATE_UNSUPPORTED /* WL12XX_CONF_HW_RXTX_RATE_1 */
};
static const u8 *wl12xx_band_rate_to_idx[] = {
[NL80211_BAND_2GHZ] = wl12xx_rate_to_idx_2ghz,
[NL80211_BAND_5GHZ] = wl12xx_rate_to_idx_5ghz
};
enum wl12xx_hw_rates {
WL12XX_CONF_HW_RXTX_RATE_MCS7_SGI = 0,
WL12XX_CONF_HW_RXTX_RATE_MCS7,
WL12XX_CONF_HW_RXTX_RATE_MCS6,
WL12XX_CONF_HW_RXTX_RATE_MCS5,
WL12XX_CONF_HW_RXTX_RATE_MCS4,
WL12XX_CONF_HW_RXTX_RATE_MCS3,
WL12XX_CONF_HW_RXTX_RATE_MCS2,
WL12XX_CONF_HW_RXTX_RATE_MCS1,
WL12XX_CONF_HW_RXTX_RATE_MCS0,
WL12XX_CONF_HW_RXTX_RATE_54,
WL12XX_CONF_HW_RXTX_RATE_48,
WL12XX_CONF_HW_RXTX_RATE_36,
WL12XX_CONF_HW_RXTX_RATE_24,
WL12XX_CONF_HW_RXTX_RATE_22,
WL12XX_CONF_HW_RXTX_RATE_18,
WL12XX_CONF_HW_RXTX_RATE_12,
WL12XX_CONF_HW_RXTX_RATE_11,
WL12XX_CONF_HW_RXTX_RATE_9,
WL12XX_CONF_HW_RXTX_RATE_6,
WL12XX_CONF_HW_RXTX_RATE_5_5,
WL12XX_CONF_HW_RXTX_RATE_2,
WL12XX_CONF_HW_RXTX_RATE_1,
WL12XX_CONF_HW_RXTX_RATE_MAX,
};
static struct wlcore_partition_set wl12xx_ptable[PART_TABLE_LEN] = {
[PART_DOWN] = {
.mem = {
.start = 0x00000000,
.size = 0x000177c0
},
.reg = {
.start = REGISTERS_BASE,
.size = 0x00008800
},
.mem2 = {
.start = 0x00000000,
.size = 0x00000000
},
.mem3 = {
.start = 0x00000000,
.size = 0x00000000
},
},
[PART_BOOT] = { /* in wl12xx we can use a mix of work and down
* partition here */
.mem = {
.start = 0x00040000,
.size = 0x00014fc0
},
.reg = {
.start = REGISTERS_BASE,
.size = 0x00008800
},
.mem2 = {
.start = 0x00000000,
.size = 0x00000000
},
.mem3 = {
.start = 0x00000000,
.size = 0x00000000
},
},
[PART_WORK] = {
.mem = {
.start = 0x00040000,
.size = 0x00014fc0
},
.reg = {
.start = REGISTERS_BASE,
.size = 0x0000a000
},
.mem2 = {
.start = 0x003004f8,
.size = 0x00000004
},
.mem3 = {
.start = 0x00000000,
.size = 0x00040404
},
},
[PART_DRPW] = {
.mem = {
.start = 0x00040000,
.size = 0x00014fc0
},
.reg = {
.start = DRPW_BASE,
.size = 0x00006000
},
.mem2 = {
.start = 0x00000000,
.size = 0x00000000
},
.mem3 = {
.start = 0x00000000,
.size = 0x00000000
}
}
};
static const int wl12xx_rtable[REG_TABLE_LEN] = {
[REG_ECPU_CONTROL] = WL12XX_REG_ECPU_CONTROL,
[REG_INTERRUPT_NO_CLEAR] = WL12XX_REG_INTERRUPT_NO_CLEAR,
[REG_INTERRUPT_ACK] = WL12XX_REG_INTERRUPT_ACK,
[REG_COMMAND_MAILBOX_PTR] = WL12XX_REG_COMMAND_MAILBOX_PTR,
[REG_EVENT_MAILBOX_PTR] = WL12XX_REG_EVENT_MAILBOX_PTR,
[REG_INTERRUPT_TRIG] = WL12XX_REG_INTERRUPT_TRIG,
[REG_INTERRUPT_MASK] = WL12XX_REG_INTERRUPT_MASK,
[REG_PC_ON_RECOVERY] = WL12XX_SCR_PAD4,
[REG_CHIP_ID_B] = WL12XX_CHIP_ID_B,
[REG_CMD_MBOX_ADDRESS] = WL12XX_CMD_MBOX_ADDRESS,
/* data access memory addresses, used with partition translation */
[REG_SLV_MEM_DATA] = WL1271_SLV_MEM_DATA,
[REG_SLV_REG_DATA] = WL1271_SLV_REG_DATA,
/* raw data access memory addresses */
[REG_RAW_FW_STATUS_ADDR] = FW_STATUS_ADDR,
};
/* TODO: maybe move to a new header file? */
#define WL127X_FW_NAME_MULTI "ti-connectivity/wl127x-fw-5-mr.bin"
#define WL127X_FW_NAME_SINGLE "ti-connectivity/wl127x-fw-5-sr.bin"
#define WL127X_PLT_FW_NAME "ti-connectivity/wl127x-fw-5-plt.bin"
#define WL128X_FW_NAME_MULTI "ti-connectivity/wl128x-fw-5-mr.bin"
#define WL128X_FW_NAME_SINGLE "ti-connectivity/wl128x-fw-5-sr.bin"
#define WL128X_PLT_FW_NAME "ti-connectivity/wl128x-fw-5-plt.bin"
static int wl127x_prepare_read(struct wl1271 *wl, u32 rx_desc, u32 len)
{
int ret;
if (wl->chip.id != CHIP_ID_128X_PG20) {
struct wl1271_acx_mem_map *wl_mem_map = wl->target_mem_map;
struct wl12xx_priv *priv = wl->priv;
/*
* Choose the block we want to read
* For aggregated packets, only the first memory block
* should be retrieved. The FW takes care of the rest.
*/
u32 mem_block = rx_desc & RX_MEM_BLOCK_MASK;
priv->rx_mem_addr->addr = (mem_block << 8) +
le32_to_cpu(wl_mem_map->packet_memory_pool_start);
priv->rx_mem_addr->addr_extra = priv->rx_mem_addr->addr + 4;
ret = wlcore_write(wl, WL1271_SLV_REG_DATA, priv->rx_mem_addr,
sizeof(*priv->rx_mem_addr), false);
if (ret < 0)
return ret;
}
return 0;
}
static int wl12xx_identify_chip(struct wl1271 *wl)
{
int ret = 0;
switch (wl->chip.id) {
case CHIP_ID_127X_PG10:
wl1271_warning("chip id 0x%x (1271 PG10) support is obsolete",
wl->chip.id);
wl->quirks |= WLCORE_QUIRK_LEGACY_NVS |
WLCORE_QUIRK_DUAL_PROBE_TMPL |
WLCORE_QUIRK_TKIP_HEADER_SPACE |
WLCORE_QUIRK_AP_ZERO_SESSION_ID;
wl->sr_fw_name = WL127X_FW_NAME_SINGLE;
wl->mr_fw_name = WL127X_FW_NAME_MULTI;
memcpy(&wl->conf.mem, &wl12xx_default_priv_conf.mem_wl127x,
sizeof(wl->conf.mem));
/* read data preparation is only needed by wl127x */
wl->ops->prepare_read = wl127x_prepare_read;
wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
WL127X_SUBTYPE_SR_VER, WL127X_MINOR_SR_VER,
WL127X_IFTYPE_MR_VER, WL127X_MAJOR_MR_VER,
WL127X_SUBTYPE_MR_VER, WL127X_MINOR_MR_VER);
break;
case CHIP_ID_127X_PG20:
wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1271 PG20)",
wl->chip.id);
wl->quirks |= WLCORE_QUIRK_LEGACY_NVS |
WLCORE_QUIRK_DUAL_PROBE_TMPL |
WLCORE_QUIRK_TKIP_HEADER_SPACE |
WLCORE_QUIRK_AP_ZERO_SESSION_ID;
wl->plt_fw_name = WL127X_PLT_FW_NAME;
wl->sr_fw_name = WL127X_FW_NAME_SINGLE;
wl->mr_fw_name = WL127X_FW_NAME_MULTI;
memcpy(&wl->conf.mem, &wl12xx_default_priv_conf.mem_wl127x,
sizeof(wl->conf.mem));
/* read data preparation is only needed by wl127x */
wl->ops->prepare_read = wl127x_prepare_read;
wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
WL127X_SUBTYPE_SR_VER, WL127X_MINOR_SR_VER,
WL127X_IFTYPE_MR_VER, WL127X_MAJOR_MR_VER,
WL127X_SUBTYPE_MR_VER, WL127X_MINOR_MR_VER);
break;
case CHIP_ID_128X_PG20:
wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1283 PG20)",
wl->chip.id);
wl->plt_fw_name = WL128X_PLT_FW_NAME;
wl->sr_fw_name = WL128X_FW_NAME_SINGLE;
wl->mr_fw_name = WL128X_FW_NAME_MULTI;
/* wl128x requires TX blocksize alignment */
wl->quirks |= WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN |
WLCORE_QUIRK_DUAL_PROBE_TMPL |
WLCORE_QUIRK_TKIP_HEADER_SPACE |
WLCORE_QUIRK_AP_ZERO_SESSION_ID;
wlcore_set_min_fw_ver(wl, WL128X_CHIP_VER,
WL128X_IFTYPE_SR_VER, WL128X_MAJOR_SR_VER,
WL128X_SUBTYPE_SR_VER, WL128X_MINOR_SR_VER,
WL128X_IFTYPE_MR_VER, WL128X_MAJOR_MR_VER,
WL128X_SUBTYPE_MR_VER, WL128X_MINOR_MR_VER);
break;
case CHIP_ID_128X_PG10:
default:
wl1271_warning("unsupported chip id: 0x%x", wl->chip.id);
ret = -ENODEV;
goto out;
}
wl->fw_mem_block_size = 256;
wl->fwlog_end = 0x2000000;
/* common settings */
wl->scan_templ_id_2_4 = CMD_TEMPL_APP_PROBE_REQ_2_4_LEGACY;
wl->scan_templ_id_5 = CMD_TEMPL_APP_PROBE_REQ_5_LEGACY;
wl->sched_scan_templ_id_2_4 = CMD_TEMPL_CFG_PROBE_REQ_2_4;
wl->sched_scan_templ_id_5 = CMD_TEMPL_CFG_PROBE_REQ_5;
wl->max_channels_5 = WL12XX_MAX_CHANNELS_5GHZ;
wl->ba_rx_session_count_max = WL12XX_RX_BA_MAX_SESSIONS;
out:
return ret;
}
static int __must_check wl12xx_top_reg_write(struct wl1271 *wl, int addr,
u16 val)
{
int ret;
/* write address >> 1 + 0x30000 to OCP_POR_CTR */
addr = (addr >> 1) + 0x30000;
ret = wlcore_write32(wl, WL12XX_OCP_POR_CTR, addr);
if (ret < 0)
goto out;
/* write value to OCP_POR_WDATA */
ret = wlcore_write32(wl, WL12XX_OCP_DATA_WRITE, val);
if (ret < 0)
goto out;
/* write 1 to OCP_CMD */
ret = wlcore_write32(wl, WL12XX_OCP_CMD, OCP_CMD_WRITE);
if (ret < 0)
goto out;
out:
return ret;
}
static int __must_check wl12xx_top_reg_read(struct wl1271 *wl, int addr,
u16 *out)
{
u32 val;
int timeout = OCP_CMD_LOOP;
int ret;
/* write address >> 1 + 0x30000 to OCP_POR_CTR */
addr = (addr >> 1) + 0x30000;
ret = wlcore_write32(wl, WL12XX_OCP_POR_CTR, addr);
if (ret < 0)
return ret;
/* write 2 to OCP_CMD */
ret = wlcore_write32(wl, WL12XX_OCP_CMD, OCP_CMD_READ);
if (ret < 0)
return ret;
/* poll for data ready */
do {
ret = wlcore_read32(wl, WL12XX_OCP_DATA_READ, &val);
if (ret < 0)
return ret;
} while (!(val & OCP_READY_MASK) && --timeout);
if (!timeout) {
wl1271_warning("Top register access timed out.");
return -ETIMEDOUT;
}
/* check data status and return if OK */
if ((val & OCP_STATUS_MASK) != OCP_STATUS_OK) {
wl1271_warning("Top register access returned error.");
return -EIO;
}
if (out)
*out = val & 0xffff;
return 0;
}
static int wl128x_switch_tcxo_to_fref(struct wl1271 *wl)
{
u16 spare_reg;
int ret;
/* Mask bits [2] & [8:4] in the sys_clk_cfg register */
ret = wl12xx_top_reg_read(wl, WL_SPARE_REG, &spare_reg);
if (ret < 0)
return ret;
if (spare_reg == 0xFFFF)
return -EFAULT;
spare_reg |= (BIT(3) | BIT(5) | BIT(6));
ret = wl12xx_top_reg_write(wl, WL_SPARE_REG, spare_reg);
if (ret < 0)
return ret;
/* Enable FREF_CLK_REQ & mux MCS and coex PLLs to FREF */
ret = wl12xx_top_reg_write(wl, SYS_CLK_CFG_REG,
WL_CLK_REQ_TYPE_PG2 | MCS_PLL_CLK_SEL_FREF);
if (ret < 0)
return ret;
/* Delay execution for 15msec, to let the HW settle */
mdelay(15);
return 0;
}
static bool wl128x_is_tcxo_valid(struct wl1271 *wl)
{
u16 tcxo_detection;
int ret;
ret = wl12xx_top_reg_read(wl, TCXO_CLK_DETECT_REG, &tcxo_detection);
if (ret < 0)
return false;
if (tcxo_detection & TCXO_DET_FAILED)
return false;
return true;
}
static bool wl128x_is_fref_valid(struct wl1271 *wl)
{
u16 fref_detection;
int ret;
ret = wl12xx_top_reg_read(wl, FREF_CLK_DETECT_REG, &fref_detection);
if (ret < 0)
return false;
if (fref_detection & FREF_CLK_DETECT_FAIL)
return false;
return true;
}
static int wl128x_manually_configure_mcs_pll(struct wl1271 *wl)
{
int ret;
ret = wl12xx_top_reg_write(wl, MCS_PLL_M_REG, MCS_PLL_M_REG_VAL);
if (ret < 0)
goto out;
ret = wl12xx_top_reg_write(wl, MCS_PLL_N_REG, MCS_PLL_N_REG_VAL);
if (ret < 0)
goto out;
ret = wl12xx_top_reg_write(wl, MCS_PLL_CONFIG_REG,
MCS_PLL_CONFIG_REG_VAL);
out:
return ret;
}
static int wl128x_configure_mcs_pll(struct wl1271 *wl, int clk)
{
u16 spare_reg;
u16 pll_config;
u8 input_freq;
struct wl12xx_priv *priv = wl->priv;
int ret;
/* Mask bits [3:1] in the sys_clk_cfg register */
ret = wl12xx_top_reg_read(wl, WL_SPARE_REG, &spare_reg);
if (ret < 0)
return ret;
if (spare_reg == 0xFFFF)
return -EFAULT;
spare_reg |= BIT(2);
ret = wl12xx_top_reg_write(wl, WL_SPARE_REG, spare_reg);
if (ret < 0)
return ret;
/* Handle special cases of the TCXO clock */
if (priv->tcxo_clock == WL12XX_TCXOCLOCK_16_8 ||
priv->tcxo_clock == WL12XX_TCXOCLOCK_33_6)
return wl128x_manually_configure_mcs_pll(wl);
/* Set the input frequency according to the selected clock source */
input_freq = (clk & 1) + 1;
ret = wl12xx_top_reg_read(wl, MCS_PLL_CONFIG_REG, &pll_config);
if (ret < 0)
return ret;
if (pll_config == 0xFFFF)
return -EFAULT;
pll_config |= (input_freq << MCS_SEL_IN_FREQ_SHIFT);
pll_config |= MCS_PLL_ENABLE_HP;
ret = wl12xx_top_reg_write(wl, MCS_PLL_CONFIG_REG, pll_config);
return ret;
}
/*
* WL128x has two clocks input - TCXO and FREF.
* TCXO is the main clock of the device, while FREF is used to sync
* between the GPS and the cellular modem.
* In cases where TCXO is 32.736MHz or 16.368MHz, the FREF will be used
* as the WLAN/BT main clock.
*/
static int wl128x_boot_clk(struct wl1271 *wl, int *selected_clock)
{
struct wl12xx_priv *priv = wl->priv;
u16 sys_clk_cfg;
int ret;
/* For XTAL-only modes, FREF will be used after switching from TCXO */
if (priv->ref_clock == WL12XX_REFCLOCK_26_XTAL ||
priv->ref_clock == WL12XX_REFCLOCK_38_XTAL) {
if (!wl128x_switch_tcxo_to_fref(wl))
return -EINVAL;
goto fref_clk;
}
/* Query the HW, to determine which clock source we should use */
ret = wl12xx_top_reg_read(wl, SYS_CLK_CFG_REG, &sys_clk_cfg);
if (ret < 0)
return ret;
if (sys_clk_cfg == 0xFFFF)
return -EINVAL;
if (sys_clk_cfg & PRCM_CM_EN_MUX_WLAN_FREF)
goto fref_clk;
/* If TCXO is either 32.736MHz or 16.368MHz, switch to FREF */
if (priv->tcxo_clock == WL12XX_TCXOCLOCK_16_368 ||
priv->tcxo_clock == WL12XX_TCXOCLOCK_32_736) {
if (!wl128x_switch_tcxo_to_fref(wl))
return -EINVAL;
goto fref_clk;
}
/* TCXO clock is selected */
if (!wl128x_is_tcxo_valid(wl))
return -EINVAL;
*selected_clock = priv->tcxo_clock;
goto config_mcs_pll;
fref_clk:
/* FREF clock is selected */
if (!wl128x_is_fref_valid(wl))
return -EINVAL;
*selected_clock = priv->ref_clock;
config_mcs_pll:
return wl128x_configure_mcs_pll(wl, *selected_clock);
}
static int wl127x_boot_clk(struct wl1271 *wl)
{
struct wl12xx_priv *priv = wl->priv;
u32 pause;
u32 clk;
int ret;
if (WL127X_PG_GET_MAJOR(wl->hw_pg_ver) < 3)
wl->quirks |= WLCORE_QUIRK_END_OF_TRANSACTION;
if (priv->ref_clock == CONF_REF_CLK_19_2_E ||
priv->ref_clock == CONF_REF_CLK_38_4_E ||
priv->ref_clock == CONF_REF_CLK_38_4_M_XTAL)
/* ref clk: 19.2/38.4/38.4-XTAL */
clk = 0x3;
else if (priv->ref_clock == CONF_REF_CLK_26_E ||
priv->ref_clock == CONF_REF_CLK_26_M_XTAL ||
priv->ref_clock == CONF_REF_CLK_52_E)
/* ref clk: 26/52 */
clk = 0x5;
else
return -EINVAL;
if (priv->ref_clock != CONF_REF_CLK_19_2_E) {
u16 val;
/* Set clock type (open drain) */
ret = wl12xx_top_reg_read(wl, OCP_REG_CLK_TYPE, &val);
if (ret < 0)
goto out;
val &= FREF_CLK_TYPE_BITS;
ret = wl12xx_top_reg_write(wl, OCP_REG_CLK_TYPE, val);
if (ret < 0)
goto out;
/* Set clock pull mode (no pull) */
ret = wl12xx_top_reg_read(wl, OCP_REG_CLK_PULL, &val);
if (ret < 0)
goto out;
val |= NO_PULL;
ret = wl12xx_top_reg_write(wl, OCP_REG_CLK_PULL, val);
if (ret < 0)
goto out;
} else {
u16 val;
/* Set clock polarity */
ret = wl12xx_top_reg_read(wl, OCP_REG_CLK_POLARITY, &val);
if (ret < 0)
goto out;
val &= FREF_CLK_POLARITY_BITS;
val |= CLK_REQ_OUTN_SEL;
ret = wl12xx_top_reg_write(wl, OCP_REG_CLK_POLARITY, val);
if (ret < 0)
goto out;
}
ret = wlcore_write32(wl, WL12XX_PLL_PARAMETERS, clk);
if (ret < 0)
goto out;
ret = wlcore_read32(wl, WL12XX_PLL_PARAMETERS, &pause);
if (ret < 0)
goto out;
wl1271_debug(DEBUG_BOOT, "pause1 0x%x", pause);
pause &= ~(WU_COUNTER_PAUSE_VAL);
pause |= WU_COUNTER_PAUSE_VAL;
ret = wlcore_write32(wl, WL12XX_WU_COUNTER_PAUSE, pause);
out:
return ret;
}
static int wl1271_boot_soft_reset(struct wl1271 *wl)
{
unsigned long timeout;
u32 boot_data;
int ret = 0;
/* perform soft reset */
ret = wlcore_write32(wl, WL12XX_SLV_SOFT_RESET, ACX_SLV_SOFT_RESET_BIT);
if (ret < 0)
goto out;
/* SOFT_RESET is self clearing */
timeout = jiffies + usecs_to_jiffies(SOFT_RESET_MAX_TIME);
while (1) {
ret = wlcore_read32(wl, WL12XX_SLV_SOFT_RESET, &boot_data);
if (ret < 0)
goto out;
wl1271_debug(DEBUG_BOOT, "soft reset bootdata 0x%x", boot_data);
if ((boot_data & ACX_SLV_SOFT_RESET_BIT) == 0)
break;
if (time_after(jiffies, timeout)) {
/* 1.2 check pWhalBus->uSelfClearTime if the
* timeout was reached */
wl1271_error("soft reset timeout");
return -1;
}
udelay(SOFT_RESET_STALL_TIME);
}
/* disable Rx/Tx */
ret = wlcore_write32(wl, WL12XX_ENABLE, 0x0);
if (ret < 0)
goto out;
/* disable auto calibration on start*/
ret = wlcore_write32(wl, WL12XX_SPARE_A2, 0xffff);
out:
return ret;
}
static int wl12xx_pre_boot(struct wl1271 *wl)
{
struct wl12xx_priv *priv = wl->priv;
int ret = 0;
u32 clk;
int selected_clock = -1;
if (wl->chip.id == CHIP_ID_128X_PG20) {
ret = wl128x_boot_clk(wl, &selected_clock);
if (ret < 0)
goto out;
} else {
ret = wl127x_boot_clk(wl);
if (ret < 0)
goto out;
}
/* Continue the ELP wake up sequence */
ret = wlcore_write32(wl, WL12XX_WELP_ARM_COMMAND, WELP_ARM_COMMAND_VAL);
if (ret < 0)
goto out;
udelay(500);
ret = wlcore_set_partition(wl, &wl->ptable[PART_DRPW]);
if (ret < 0)
goto out;
/* Read-modify-write DRPW_SCRATCH_START register (see next state)
to be used by DRPw FW. The RTRIM value will be added by the FW
before taking DRPw out of reset */
ret = wlcore_read32(wl, WL12XX_DRPW_SCRATCH_START, &clk);
if (ret < 0)
goto out;
wl1271_debug(DEBUG_BOOT, "clk2 0x%x", clk);
if (wl->chip.id == CHIP_ID_128X_PG20)
clk |= ((selected_clock & 0x3) << 1) << 4;
else
clk |= (priv->ref_clock << 1) << 4;
ret = wlcore_write32(wl, WL12XX_DRPW_SCRATCH_START, clk);
if (ret < 0)
goto out;
ret = wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
if (ret < 0)
goto out;
/* Disable interrupts */
ret = wlcore_write_reg(wl, REG_INTERRUPT_MASK, WL1271_ACX_INTR_ALL);
if (ret < 0)
goto out;
ret = wl1271_boot_soft_reset(wl);
if (ret < 0)
goto out;
out:
return ret;
}
static int wl12xx_pre_upload(struct wl1271 *wl)
{
u32 tmp;
u16 polarity;
int ret;
/* write firmware's last address (ie. it's length) to
* ACX_EEPROMLESS_IND_REG */
wl1271_debug(DEBUG_BOOT, "ACX_EEPROMLESS_IND_REG");
ret = wlcore_write32(wl, WL12XX_EEPROMLESS_IND, WL12XX_EEPROMLESS_IND);
if (ret < 0)
goto out;
ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &tmp);
if (ret < 0)
goto out;
wl1271_debug(DEBUG_BOOT, "chip id 0x%x", tmp);
/* 6. read the EEPROM parameters */
ret = wlcore_read32(wl, WL12XX_SCR_PAD2, &tmp);
if (ret < 0)
goto out;
/* WL1271: The reference driver skips steps 7 to 10 (jumps directly
* to upload_fw) */
if (wl->chip.id == CHIP_ID_128X_PG20) {
ret = wl12xx_top_reg_write(wl, SDIO_IO_DS, HCI_IO_DS_6MA);
if (ret < 0)
goto out;
}
/* polarity must be set before the firmware is loaded */
ret = wl12xx_top_reg_read(wl, OCP_REG_POLARITY, &polarity);
if (ret < 0)
goto out;
/* We use HIGH polarity, so unset the LOW bit */
polarity &= ~POLARITY_LOW;
ret = wl12xx_top_reg_write(wl, OCP_REG_POLARITY, polarity);
out:
return ret;
}
static int wl12xx_enable_interrupts(struct wl1271 *wl)
{
int ret;
ret = wlcore_write_reg(wl, REG_INTERRUPT_MASK,
WL12XX_ACX_ALL_EVENTS_VECTOR);
if (ret < 0)
goto out;
wlcore_enable_interrupts(wl);
ret = wlcore_write_reg(wl, REG_INTERRUPT_MASK,
WL1271_ACX_INTR_ALL & ~(WL12XX_INTR_MASK));
if (ret < 0)
goto disable_interrupts;
ret = wlcore_write32(wl, WL12XX_HI_CFG, HI_CFG_DEF_VAL);
if (ret < 0)
goto disable_interrupts;
return ret;
disable_interrupts:
wlcore_disable_interrupts(wl);
out:
return ret;
}
static int wl12xx_boot(struct wl1271 *wl)
{
int ret;
ret = wl12xx_pre_boot(wl);
if (ret < 0)
goto out;
ret = wlcore_boot_upload_nvs(wl);
if (ret < 0)
goto out;
ret = wl12xx_pre_upload(wl);
if (ret < 0)
goto out;
ret = wlcore_boot_upload_firmware(wl);
if (ret < 0)
goto out;
wl->event_mask = BSS_LOSE_EVENT_ID |
REGAINED_BSS_EVENT_ID |
SCAN_COMPLETE_EVENT_ID |
ROLE_STOP_COMPLETE_EVENT_ID |
RSSI_SNR_TRIGGER_0_EVENT_ID |
PSPOLL_DELIVERY_FAILURE_EVENT_ID |
SOFT_GEMINI_SENSE_EVENT_ID |
PERIODIC_SCAN_REPORT_EVENT_ID |
PERIODIC_SCAN_COMPLETE_EVENT_ID |
DUMMY_PACKET_EVENT_ID |
PEER_REMOVE_COMPLETE_EVENT_ID |
BA_SESSION_RX_CONSTRAINT_EVENT_ID |
REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID |
INACTIVE_STA_EVENT_ID |
CHANNEL_SWITCH_COMPLETE_EVENT_ID;
wl->ap_event_mask = MAX_TX_RETRY_EVENT_ID;
ret = wlcore_boot_run_firmware(wl);
if (ret < 0)
goto out;
ret = wl12xx_enable_interrupts(wl);
out:
return ret;
}
static int wl12xx_trigger_cmd(struct wl1271 *wl, int cmd_box_addr,
void *buf, size_t len)
{
int ret;
ret = wlcore_write(wl, cmd_box_addr, buf, len, false);
if (ret < 0)
return ret;
ret = wlcore_write_reg(wl, REG_INTERRUPT_TRIG, WL12XX_INTR_TRIG_CMD);
return ret;
}
static int wl12xx_ack_event(struct wl1271 *wl)
{
return wlcore_write_reg(wl, REG_INTERRUPT_TRIG,
WL12XX_INTR_TRIG_EVENT_ACK);
}
static u32 wl12xx_calc_tx_blocks(struct wl1271 *wl, u32 len, u32 spare_blks)
{
u32 blk_size = WL12XX_TX_HW_BLOCK_SIZE;
u32 align_len = wlcore_calc_packet_alignment(wl, len);
return (align_len + blk_size - 1) / blk_size + spare_blks;
}
static void
wl12xx_set_tx_desc_blocks(struct wl1271 *wl, struct wl1271_tx_hw_descr *desc,
u32 blks, u32 spare_blks)
{
if (wl->chip.id == CHIP_ID_128X_PG20) {
desc->wl128x_mem.total_mem_blocks = blks;
} else {
desc->wl127x_mem.extra_blocks = spare_blks;
desc->wl127x_mem.total_mem_blocks = blks;
}
}
static void
wl12xx_set_tx_desc_data_len(struct wl1271 *wl, struct wl1271_tx_hw_descr *desc,
struct sk_buff *skb)
{
u32 aligned_len = wlcore_calc_packet_alignment(wl, skb->len);
if (wl->chip.id == CHIP_ID_128X_PG20) {
desc->wl128x_mem.extra_bytes = aligned_len - skb->len;
desc->length = cpu_to_le16(aligned_len >> 2);
wl1271_debug(DEBUG_TX,
"tx_fill_hdr: hlid: %d len: %d life: %d mem: %d extra: %d",
desc->hlid,
le16_to_cpu(desc->length),
le16_to_cpu(desc->life_time),
desc->wl128x_mem.total_mem_blocks,
desc->wl128x_mem.extra_bytes);
} else {
/* calculate number of padding bytes */
int pad = aligned_len - skb->len;
desc->tx_attr |=
cpu_to_le16(pad << TX_HW_ATTR_OFST_LAST_WORD_PAD);
/* Store the aligned length in terms of words */
desc->length = cpu_to_le16(aligned_len >> 2);
wl1271_debug(DEBUG_TX,
"tx_fill_hdr: pad: %d hlid: %d len: %d life: %d mem: %d",
pad, desc->hlid,
le16_to_cpu(desc->length),
le16_to_cpu(desc->life_time),
desc->wl127x_mem.total_mem_blocks);
}
}
static enum wl_rx_buf_align
wl12xx_get_rx_buf_align(struct wl1271 *wl, u32 rx_desc)
{
if (rx_desc & RX_BUF_UNALIGNED_PAYLOAD)
return WLCORE_RX_BUF_UNALIGNED;
return WLCORE_RX_BUF_ALIGNED;
}
static u32 wl12xx_get_rx_packet_len(struct wl1271 *wl, void *rx_data,
u32 data_len)
{
struct wl1271_rx_descriptor *desc = rx_data;
/* invalid packet */
if (data_len < sizeof(*desc) ||
data_len < sizeof(*desc) + desc->pad_len)
return 0;
return data_len - sizeof(*desc) - desc->pad_len;
}
static int wl12xx_tx_delayed_compl(struct wl1271 *wl)
{
if (wl->fw_status->tx_results_counter ==
(wl->tx_results_count & 0xff))
return 0;
return wlcore_tx_complete(wl);
}
static int wl12xx_hw_init(struct wl1271 *wl)
{
int ret;
if (wl->chip.id == CHIP_ID_128X_PG20) {
u32 host_cfg_bitmap = HOST_IF_CFG_RX_FIFO_ENABLE;
ret = wl128x_cmd_general_parms(wl);
if (ret < 0)
goto out;
/*
* If we are in calibrator based auto detect then we got the FEM nr
* in wl->fem_manuf. No need to continue further
*/
if (wl->plt_mode == PLT_FEM_DETECT)
goto out;
ret = wl128x_cmd_radio_parms(wl);
if (ret < 0)
goto out;
if (wl->quirks & WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN)
/* Enable SDIO padding */
host_cfg_bitmap |= HOST_IF_CFG_TX_PAD_TO_SDIO_BLK;
/* Must be before wl1271_acx_init_mem_config() */
ret = wl1271_acx_host_if_cfg_bitmap(wl, host_cfg_bitmap);
if (ret < 0)
goto out;
} else {
ret = wl1271_cmd_general_parms(wl);
if (ret < 0)
goto out;
/*
* If we are in calibrator based auto detect then we got the FEM nr
* in wl->fem_manuf. No need to continue further
*/
if (wl->plt_mode == PLT_FEM_DETECT)
goto out;
ret = wl1271_cmd_radio_parms(wl);
if (ret < 0)
goto out;
ret = wl1271_cmd_ext_radio_parms(wl);
if (ret < 0)
goto out;
}
out:
return ret;
}
static void wl12xx_convert_fw_status(struct wl1271 *wl, void *raw_fw_status,
struct wl_fw_status *fw_status)
{
struct wl12xx_fw_status *int_fw_status = raw_fw_status;
fw_status->intr = le32_to_cpu(int_fw_status->intr);
fw_status->fw_rx_counter = int_fw_status->fw_rx_counter;
fw_status->drv_rx_counter = int_fw_status->drv_rx_counter;
fw_status->tx_results_counter = int_fw_status->tx_results_counter;
fw_status->rx_pkt_descs = int_fw_status->rx_pkt_descs;
fw_status->fw_localtime = le32_to_cpu(int_fw_status->fw_localtime);
fw_status->link_ps_bitmap = le32_to_cpu(int_fw_status->link_ps_bitmap);
fw_status->link_fast_bitmap =
le32_to_cpu(int_fw_status->link_fast_bitmap);
fw_status->total_released_blks =
le32_to_cpu(int_fw_status->total_released_blks);
fw_status->tx_total = le32_to_cpu(int_fw_status->tx_total);
fw_status->counters.tx_released_pkts =
int_fw_status->counters.tx_released_pkts;
fw_status->counters.tx_lnk_free_pkts =
int_fw_status->counters.tx_lnk_free_pkts;
fw_status->counters.tx_voice_released_blks =
int_fw_status->counters.tx_voice_released_blks;
fw_status->counters.tx_last_rate =
int_fw_status->counters.tx_last_rate;
fw_status->log_start_addr = le32_to_cpu(int_fw_status->log_start_addr);
}
static u32 wl12xx_sta_get_ap_rate_mask(struct wl1271 *wl,
struct wl12xx_vif *wlvif)
{
return wlvif->rate_set;
}
static void wl12xx_conf_init(struct wl1271 *wl)
{
struct wl12xx_priv *priv = wl->priv;
/* apply driver default configuration */
memcpy(&wl->conf, &wl12xx_conf, sizeof(wl12xx_conf));
/* apply default private configuration */
memcpy(&priv->conf, &wl12xx_default_priv_conf, sizeof(priv->conf));
}
static bool wl12xx_mac_in_fuse(struct wl1271 *wl)
{
bool supported = false;
u8 major, minor;
if (wl->chip.id == CHIP_ID_128X_PG20) {
major = WL128X_PG_GET_MAJOR(wl->hw_pg_ver);
minor = WL128X_PG_GET_MINOR(wl->hw_pg_ver);
/* in wl128x we have the MAC address if the PG is >= (2, 1) */
if (major > 2 || (major == 2 && minor >= 1))
supported = true;
} else {
major = WL127X_PG_GET_MAJOR(wl->hw_pg_ver);
minor = WL127X_PG_GET_MINOR(wl->hw_pg_ver);
/* in wl127x we have the MAC address if the PG is >= (3, 1) */
if (major == 3 && minor >= 1)
supported = true;
}
wl1271_debug(DEBUG_PROBE,
"PG Ver major = %d minor = %d, MAC %s present",
major, minor, supported ? "is" : "is not");
return supported;
}
static int wl12xx_get_fuse_mac(struct wl1271 *wl)
{
u32 mac1, mac2;
int ret;
/* Device may be in ELP from the bootloader or kexec */
ret = wlcore_write32(wl, WL12XX_WELP_ARM_COMMAND, WELP_ARM_COMMAND_VAL);
if (ret < 0)
goto out;
usleep_range(500000, 700000);
ret = wlcore_set_partition(wl, &wl->ptable[PART_DRPW]);
if (ret < 0)
goto out;
ret = wlcore_read32(wl, WL12XX_REG_FUSE_BD_ADDR_1, &mac1);
if (ret < 0)
goto out;
ret = wlcore_read32(wl, WL12XX_REG_FUSE_BD_ADDR_2, &mac2);
if (ret < 0)
goto out;
/* these are the two parts of the BD_ADDR */
wl->fuse_oui_addr = ((mac2 & 0xffff) << 8) +
((mac1 & 0xff000000) >> 24);
wl->fuse_nic_addr = mac1 & 0xffffff;
ret = wlcore_set_partition(wl, &wl->ptable[PART_DOWN]);
out:
return ret;
}
static int wl12xx_get_pg_ver(struct wl1271 *wl, s8 *ver)
{
u16 die_info;
int ret;
if (wl->chip.id == CHIP_ID_128X_PG20)
ret = wl12xx_top_reg_read(wl, WL128X_REG_FUSE_DATA_2_1,
&die_info);
else
ret = wl12xx_top_reg_read(wl, WL127X_REG_FUSE_DATA_2_1,
&die_info);
if (ret >= 0 && ver)
*ver = (s8)((die_info & PG_VER_MASK) >> PG_VER_OFFSET);
return ret;
}
static int wl12xx_get_mac(struct wl1271 *wl)
{
if (wl12xx_mac_in_fuse(wl))
return wl12xx_get_fuse_mac(wl);
return 0;
}
static void wl12xx_set_tx_desc_csum(struct wl1271 *wl,
struct wl1271_tx_hw_descr *desc,
struct sk_buff *skb)
{
desc->wl12xx_reserved = 0;
}
static int wl12xx_plt_init(struct wl1271 *wl)
{
int ret;
ret = wl->ops->boot(wl);
if (ret < 0)
goto out;
ret = wl->ops->hw_init(wl);
if (ret < 0)
goto out_irq_disable;
/*
* If we are in calibrator based auto detect then we got the FEM nr
* in wl->fem_manuf. No need to continue further
*/
if (wl->plt_mode == PLT_FEM_DETECT)
goto out;
ret = wl1271_acx_init_mem_config(wl);
if (ret < 0)
goto out_irq_disable;
ret = wl12xx_acx_mem_cfg(wl);
if (ret < 0)
goto out_free_memmap;
/* Enable data path */
ret = wl1271_cmd_data_path(wl, 1);
if (ret < 0)
goto out_free_memmap;
/* Configure for CAM power saving (ie. always active) */
ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
if (ret < 0)
goto out_free_memmap;
/* configure PM */
ret = wl1271_acx_pm_config(wl);
if (ret < 0)
goto out_free_memmap;
goto out;
out_free_memmap:
kfree(wl->target_mem_map);
wl->target_mem_map = NULL;
out_irq_disable:
mutex_unlock(&wl->mutex);
/* Unlocking the mutex in the middle of handling is
inherently unsafe. In this case we deem it safe to do,
because we need to let any possibly pending IRQ out of
the system (and while we are WL1271_STATE_OFF the IRQ
work function will not do anything.) Also, any other
possible concurrent operations will fail due to the
current state, hence the wl1271 struct should be safe. */
wlcore_disable_interrupts(wl);
mutex_lock(&wl->mutex);
out:
return ret;
}
static int wl12xx_get_spare_blocks(struct wl1271 *wl, bool is_gem)
{
if (is_gem)
return WL12XX_TX_HW_BLOCK_GEM_SPARE;
return WL12XX_TX_HW_BLOCK_SPARE_DEFAULT;
}
static int wl12xx_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *key_conf)
{
return wlcore_set_key(wl, cmd, vif, sta, key_conf);
}
static int wl12xx_set_peer_cap(struct wl1271 *wl,
struct ieee80211_sta_ht_cap *ht_cap,
bool allow_ht_operation,
u32 rate_set, u8 hlid)
{
return wl1271_acx_set_ht_capabilities(wl, ht_cap, allow_ht_operation,
hlid);
}
static bool wl12xx_lnk_high_prio(struct wl1271 *wl, u8 hlid,
struct wl1271_link *lnk)
{
u8 thold;
if (test_bit(hlid, &wl->fw_fast_lnk_map))
thold = wl->conf.tx.fast_link_thold;
else
thold = wl->conf.tx.slow_link_thold;
return lnk->allocated_pkts < thold;
}
static bool wl12xx_lnk_low_prio(struct wl1271 *wl, u8 hlid,
struct wl1271_link *lnk)
{
/* any link is good for low priority */
return true;
}
static u32 wl12xx_convert_hwaddr(struct wl1271 *wl, u32 hwaddr)
{
return hwaddr << 5;
}
static int wl12xx_setup(struct wl1271 *wl);
static struct wlcore_ops wl12xx_ops = {
.setup = wl12xx_setup,
.identify_chip = wl12xx_identify_chip,
.boot = wl12xx_boot,
.plt_init = wl12xx_plt_init,
.trigger_cmd = wl12xx_trigger_cmd,
.ack_event = wl12xx_ack_event,
.wait_for_event = wl12xx_wait_for_event,
.process_mailbox_events = wl12xx_process_mailbox_events,
.calc_tx_blocks = wl12xx_calc_tx_blocks,
.set_tx_desc_blocks = wl12xx_set_tx_desc_blocks,
.set_tx_desc_data_len = wl12xx_set_tx_desc_data_len,
.get_rx_buf_align = wl12xx_get_rx_buf_align,
.get_rx_packet_len = wl12xx_get_rx_packet_len,
.tx_immediate_compl = NULL,
.tx_delayed_compl = wl12xx_tx_delayed_compl,
.hw_init = wl12xx_hw_init,
.init_vif = NULL,
.convert_fw_status = wl12xx_convert_fw_status,
.sta_get_ap_rate_mask = wl12xx_sta_get_ap_rate_mask,
.get_pg_ver = wl12xx_get_pg_ver,
.get_mac = wl12xx_get_mac,
.set_tx_desc_csum = wl12xx_set_tx_desc_csum,
.set_rx_csum = NULL,
.ap_get_mimo_wide_rate_mask = NULL,
.debugfs_init = wl12xx_debugfs_add_files,
.scan_start = wl12xx_scan_start,
.scan_stop = wl12xx_scan_stop,
.sched_scan_start = wl12xx_sched_scan_start,
.sched_scan_stop = wl12xx_scan_sched_scan_stop,
.get_spare_blocks = wl12xx_get_spare_blocks,
.set_key = wl12xx_set_key,
.channel_switch = wl12xx_cmd_channel_switch,
.pre_pkt_send = NULL,
.set_peer_cap = wl12xx_set_peer_cap,
.convert_hwaddr = wl12xx_convert_hwaddr,
.lnk_high_prio = wl12xx_lnk_high_prio,
.lnk_low_prio = wl12xx_lnk_low_prio,
.interrupt_notify = NULL,
.rx_ba_filter = NULL,
.ap_sleep = NULL,
};
static struct ieee80211_sta_ht_cap wl12xx_ht_cap = {
.cap = IEEE80211_HT_CAP_GRN_FLD | IEEE80211_HT_CAP_SGI_20 |
(1 << IEEE80211_HT_CAP_RX_STBC_SHIFT),
.ht_supported = true,
.ampdu_factor = IEEE80211_HT_MAX_AMPDU_8K,
.ampdu_density = IEEE80211_HT_MPDU_DENSITY_8,
.mcs = {
.rx_mask = { 0xff, 0, 0, 0, 0, 0, 0, 0, 0, 0, },
.rx_highest = cpu_to_le16(72),
.tx_params = IEEE80211_HT_MCS_TX_DEFINED,
},
};
static const struct ieee80211_iface_limit wl12xx_iface_limits[] = {
{
.max = 3,
.types = BIT(NL80211_IFTYPE_STATION),
},
{
.max = 1,
.types = BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_P2P_GO) |
BIT(NL80211_IFTYPE_P2P_CLIENT),
},
};
static const struct ieee80211_iface_combination
wl12xx_iface_combinations[] = {
{
.max_interfaces = 3,
.limits = wl12xx_iface_limits,
.n_limits = ARRAY_SIZE(wl12xx_iface_limits),
.num_different_channels = 1,
},
};
static const struct wl12xx_clock wl12xx_refclock_table[] = {
{ 19200000, false, WL12XX_REFCLOCK_19 },
{ 26000000, false, WL12XX_REFCLOCK_26 },
{ 26000000, true, WL12XX_REFCLOCK_26_XTAL },
{ 38400000, false, WL12XX_REFCLOCK_38 },
{ 38400000, true, WL12XX_REFCLOCK_38_XTAL },
{ 52000000, false, WL12XX_REFCLOCK_52 },
{ 0, false, 0 }
};
static const struct wl12xx_clock wl12xx_tcxoclock_table[] = {
{ 16368000, true, WL12XX_TCXOCLOCK_16_368 },
{ 16800000, true, WL12XX_TCXOCLOCK_16_8 },
{ 19200000, true, WL12XX_TCXOCLOCK_19_2 },
{ 26000000, true, WL12XX_TCXOCLOCK_26 },
{ 32736000, true, WL12XX_TCXOCLOCK_32_736 },
{ 33600000, true, WL12XX_TCXOCLOCK_33_6 },
{ 38400000, true, WL12XX_TCXOCLOCK_38_4 },
{ 52000000, true, WL12XX_TCXOCLOCK_52 },
{ 0, false, 0 }
};
static int wl12xx_get_clock_idx(const struct wl12xx_clock *table,
u32 freq, bool xtal)
{
int i;
for (i = 0; table[i].freq != 0; i++)
if ((table[i].freq == freq) && (table[i].xtal == xtal))
return table[i].hw_idx;
return -EINVAL;
}
static int wl12xx_setup(struct wl1271 *wl)
{
struct wl12xx_priv *priv = wl->priv;
struct wlcore_platdev_data *pdev_data = dev_get_platdata(&wl->pdev->dev);
BUILD_BUG_ON(WL12XX_MAX_LINKS > WLCORE_MAX_LINKS);
BUILD_BUG_ON(WL12XX_MAX_AP_STATIONS > WL12XX_MAX_LINKS);
BUILD_BUG_ON(WL12XX_CONF_SG_PARAMS_MAX > WLCORE_CONF_SG_PARAMS_MAX);
wl->rtable = wl12xx_rtable;
wl->num_tx_desc = WL12XX_NUM_TX_DESCRIPTORS;
wl->num_rx_desc = WL12XX_NUM_RX_DESCRIPTORS;
wl->num_links = WL12XX_MAX_LINKS;
wl->max_ap_stations = WL12XX_MAX_AP_STATIONS;
wl->iface_combinations = wl12xx_iface_combinations;
wl->n_iface_combinations = ARRAY_SIZE(wl12xx_iface_combinations);
wl->num_mac_addr = WL12XX_NUM_MAC_ADDRESSES;
wl->band_rate_to_idx = wl12xx_band_rate_to_idx;
wl->hw_tx_rate_tbl_size = WL12XX_CONF_HW_RXTX_RATE_MAX;
wl->hw_min_ht_rate = WL12XX_CONF_HW_RXTX_RATE_MCS0;
wl->fw_status_len = sizeof(struct wl12xx_fw_status);
wl->fw_status_priv_len = 0;
wl->stats.fw_stats_len = sizeof(struct wl12xx_acx_statistics);
wl->ofdm_only_ap = true;
wlcore_set_ht_cap(wl, NL80211_BAND_2GHZ, &wl12xx_ht_cap);
wlcore_set_ht_cap(wl, NL80211_BAND_5GHZ, &wl12xx_ht_cap);
wl12xx_conf_init(wl);
if (!fref_param) {
priv->ref_clock = wl12xx_get_clock_idx(wl12xx_refclock_table,
pdev_data->ref_clock_freq,
pdev_data->ref_clock_xtal);
if (priv->ref_clock < 0) {
wl1271_error("Invalid ref_clock frequency (%d Hz, %s)",
pdev_data->ref_clock_freq,
pdev_data->ref_clock_xtal ?
"XTAL" : "not XTAL");
return priv->ref_clock;
}
} else {
if (!strcmp(fref_param, "19.2"))
priv->ref_clock = WL12XX_REFCLOCK_19;
else if (!strcmp(fref_param, "26"))
priv->ref_clock = WL12XX_REFCLOCK_26;
else if (!strcmp(fref_param, "26x"))
priv->ref_clock = WL12XX_REFCLOCK_26_XTAL;
else if (!strcmp(fref_param, "38.4"))
priv->ref_clock = WL12XX_REFCLOCK_38;
else if (!strcmp(fref_param, "38.4x"))
priv->ref_clock = WL12XX_REFCLOCK_38_XTAL;
else if (!strcmp(fref_param, "52"))
priv->ref_clock = WL12XX_REFCLOCK_52;
else
wl1271_error("Invalid fref parameter %s", fref_param);
}
if (!tcxo_param && pdev_data->tcxo_clock_freq) {
priv->tcxo_clock = wl12xx_get_clock_idx(wl12xx_tcxoclock_table,
pdev_data->tcxo_clock_freq,
true);
if (priv->tcxo_clock < 0) {
wl1271_error("Invalid tcxo_clock frequency (%d Hz)",
pdev_data->tcxo_clock_freq);
return priv->tcxo_clock;
}
} else if (tcxo_param) {
if (!strcmp(tcxo_param, "19.2"))
priv->tcxo_clock = WL12XX_TCXOCLOCK_19_2;
else if (!strcmp(tcxo_param, "26"))
priv->tcxo_clock = WL12XX_TCXOCLOCK_26;
else if (!strcmp(tcxo_param, "38.4"))
priv->tcxo_clock = WL12XX_TCXOCLOCK_38_4;
else if (!strcmp(tcxo_param, "52"))
priv->tcxo_clock = WL12XX_TCXOCLOCK_52;
else if (!strcmp(tcxo_param, "16.368"))
priv->tcxo_clock = WL12XX_TCXOCLOCK_16_368;
else if (!strcmp(tcxo_param, "32.736"))
priv->tcxo_clock = WL12XX_TCXOCLOCK_32_736;
else if (!strcmp(tcxo_param, "16.8"))
priv->tcxo_clock = WL12XX_TCXOCLOCK_16_8;
else if (!strcmp(tcxo_param, "33.6"))
priv->tcxo_clock = WL12XX_TCXOCLOCK_33_6;
else
wl1271_error("Invalid tcxo parameter %s", tcxo_param);
}
priv->rx_mem_addr = kmalloc(sizeof(*priv->rx_mem_addr), GFP_KERNEL);
if (!priv->rx_mem_addr)
return -ENOMEM;
return 0;
}
static int wl12xx_probe(struct platform_device *pdev)
{
struct wl1271 *wl;
struct ieee80211_hw *hw;
int ret;
hw = wlcore_alloc_hw(sizeof(struct wl12xx_priv),
WL12XX_AGGR_BUFFER_SIZE,
sizeof(struct wl12xx_event_mailbox));
if (IS_ERR(hw)) {
wl1271_error("can't allocate hw");
ret = PTR_ERR(hw);
goto out;
}
wl = hw->priv;
wl->ops = &wl12xx_ops;
wl->ptable = wl12xx_ptable;
ret = wlcore_probe(wl, pdev);
if (ret)
goto out_free;
return ret;
out_free:
wlcore_free_hw(wl);
out:
return ret;
}
static int wl12xx_remove(struct platform_device *pdev)
{
struct wl1271 *wl = platform_get_drvdata(pdev);
struct wl12xx_priv *priv;
priv = wl->priv;
kfree(priv->rx_mem_addr);
return wlcore_remove(pdev);
}
static const struct platform_device_id wl12xx_id_table[] = {
{ "wl12xx", 0 },
{ } /* Terminating Entry */
};
MODULE_DEVICE_TABLE(platform, wl12xx_id_table);
static struct platform_driver wl12xx_driver = {
.probe = wl12xx_probe,
.remove = wl12xx_remove,
.id_table = wl12xx_id_table,
.driver = {
.name = "wl12xx_driver",
}
};
module_platform_driver(wl12xx_driver);
module_param_named(fref, fref_param, charp, 0);
MODULE_PARM_DESC(fref, "FREF clock: 19.2, 26, 26x, 38.4, 38.4x, 52");
module_param_named(tcxo, tcxo_param, charp, 0);
MODULE_PARM_DESC(tcxo,
"TCXO clock: 19.2, 26, 38.4, 52, 16.368, 32.736, 16.8, 33.6");
MODULE_LICENSE("GPL v2");
MODULE_AUTHOR("Luciano Coelho <[email protected]>");
MODULE_FIRMWARE(WL127X_FW_NAME_SINGLE);
MODULE_FIRMWARE(WL127X_FW_NAME_MULTI);
MODULE_FIRMWARE(WL127X_PLT_FW_NAME);
MODULE_FIRMWARE(WL128X_FW_NAME_SINGLE);
MODULE_FIRMWARE(WL128X_FW_NAME_MULTI);
MODULE_FIRMWARE(WL128X_PLT_FW_NAME);
|
linux-master
|
drivers/net/wireless/ti/wl12xx/main.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* This file is part of wl12xx
*
* Copyright (C) 2012 Texas Instruments. All rights reserved.
*/
#include "event.h"
#include "scan.h"
#include "../wlcore/cmd.h"
#include "../wlcore/debug.h"
int wl12xx_wait_for_event(struct wl1271 *wl, enum wlcore_wait_event event,
bool *timeout)
{
u32 local_event;
switch (event) {
case WLCORE_EVENT_ROLE_STOP_COMPLETE:
local_event = ROLE_STOP_COMPLETE_EVENT_ID;
break;
case WLCORE_EVENT_PEER_REMOVE_COMPLETE:
local_event = PEER_REMOVE_COMPLETE_EVENT_ID;
break;
default:
/* event not implemented */
return 0;
}
return wlcore_cmd_wait_for_event_or_timeout(wl, local_event, timeout);
}
int wl12xx_process_mailbox_events(struct wl1271 *wl)
{
struct wl12xx_event_mailbox *mbox = wl->mbox;
u32 vector;
vector = le32_to_cpu(mbox->events_vector);
vector &= ~(le32_to_cpu(mbox->events_mask));
wl1271_debug(DEBUG_EVENT, "MBOX vector: 0x%x", vector);
if (vector & SCAN_COMPLETE_EVENT_ID) {
wl1271_debug(DEBUG_EVENT, "status: 0x%x",
mbox->scheduled_scan_status);
if (wl->scan_wlvif)
wl12xx_scan_completed(wl, wl->scan_wlvif);
}
if (vector & PERIODIC_SCAN_REPORT_EVENT_ID) {
wl1271_debug(DEBUG_EVENT,
"PERIODIC_SCAN_REPORT_EVENT (status 0x%0x)",
mbox->scheduled_scan_status);
wlcore_scan_sched_scan_results(wl);
}
if (vector & PERIODIC_SCAN_COMPLETE_EVENT_ID)
wlcore_event_sched_scan_completed(wl,
mbox->scheduled_scan_status);
if (vector & SOFT_GEMINI_SENSE_EVENT_ID)
wlcore_event_soft_gemini_sense(wl,
mbox->soft_gemini_sense_info);
if (vector & BSS_LOSE_EVENT_ID)
wlcore_event_beacon_loss(wl, 0xff);
if (vector & RSSI_SNR_TRIGGER_0_EVENT_ID)
wlcore_event_rssi_trigger(wl, mbox->rssi_snr_trigger_metric);
if (vector & BA_SESSION_RX_CONSTRAINT_EVENT_ID)
wlcore_event_ba_rx_constraint(wl,
BIT(mbox->role_id),
mbox->rx_ba_allowed);
if (vector & CHANNEL_SWITCH_COMPLETE_EVENT_ID)
wlcore_event_channel_switch(wl, 0xff,
mbox->channel_switch_status);
if (vector & DUMMY_PACKET_EVENT_ID)
wlcore_event_dummy_packet(wl);
/*
* "TX retries exceeded" has a different meaning according to mode.
* In AP mode the offending station is disconnected.
*/
if (vector & MAX_TX_RETRY_EVENT_ID)
wlcore_event_max_tx_failure(wl,
le16_to_cpu(mbox->sta_tx_retry_exceeded));
if (vector & INACTIVE_STA_EVENT_ID)
wlcore_event_inactive_sta(wl,
le16_to_cpu(mbox->sta_aging_status));
if (vector & REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID)
wlcore_event_roc_complete(wl);
return 0;
}
|
linux-master
|
drivers/net/wireless/ti/wl12xx/event.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* This file is part of wl1271
*
* Copyright (C) 2008-2009 Nokia Corporation
*
* Contact: Luciano Coelho <[email protected]>
*/
#include "acx.h"
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
#include <linux/slab.h>
#include "wlcore.h"
#include "debug.h"
#include "wl12xx_80211.h"
#include "hw_ops.h"
int wl1271_acx_wake_up_conditions(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u8 wake_up_event, u8 listen_interval)
{
struct acx_wake_up_condition *wake_up;
int ret;
wl1271_debug(DEBUG_ACX, "acx wake up conditions (wake_up_event %d listen_interval %d)",
wake_up_event, listen_interval);
wake_up = kzalloc(sizeof(*wake_up), GFP_KERNEL);
if (!wake_up) {
ret = -ENOMEM;
goto out;
}
wake_up->role_id = wlvif->role_id;
wake_up->wake_up_event = wake_up_event;
wake_up->listen_interval = listen_interval;
ret = wl1271_cmd_configure(wl, ACX_WAKE_UP_CONDITIONS,
wake_up, sizeof(*wake_up));
if (ret < 0) {
wl1271_warning("could not set wake up conditions: %d", ret);
goto out;
}
out:
kfree(wake_up);
return ret;
}
int wl1271_acx_sleep_auth(struct wl1271 *wl, u8 sleep_auth)
{
struct acx_sleep_auth *auth;
int ret;
wl1271_debug(DEBUG_ACX, "acx sleep auth %d", sleep_auth);
auth = kzalloc(sizeof(*auth), GFP_KERNEL);
if (!auth) {
ret = -ENOMEM;
goto out;
}
auth->sleep_auth = sleep_auth;
ret = wl1271_cmd_configure(wl, ACX_SLEEP_AUTH, auth, sizeof(*auth));
if (ret < 0) {
wl1271_error("could not configure sleep_auth to %d: %d",
sleep_auth, ret);
goto out;
}
wl->sleep_auth = sleep_auth;
out:
kfree(auth);
return ret;
}
EXPORT_SYMBOL_GPL(wl1271_acx_sleep_auth);
int wl1271_acx_tx_power(struct wl1271 *wl, struct wl12xx_vif *wlvif,
int power)
{
struct acx_current_tx_power *acx;
int ret;
wl1271_debug(DEBUG_ACX, "acx dot11_cur_tx_pwr %d", power);
if (power < 0 || power > 25)
return -EINVAL;
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
if (!acx) {
ret = -ENOMEM;
goto out;
}
acx->role_id = wlvif->role_id;
acx->current_tx_power = power * 10;
ret = wl1271_cmd_configure(wl, DOT11_CUR_TX_PWR, acx, sizeof(*acx));
if (ret < 0) {
wl1271_warning("configure of tx power failed: %d", ret);
goto out;
}
out:
kfree(acx);
return ret;
}
int wl1271_acx_feature_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct acx_feature_config *feature;
int ret;
wl1271_debug(DEBUG_ACX, "acx feature cfg");
feature = kzalloc(sizeof(*feature), GFP_KERNEL);
if (!feature) {
ret = -ENOMEM;
goto out;
}
/* DF_ENCRYPTION_DISABLE and DF_SNIFF_MODE_ENABLE are disabled */
feature->role_id = wlvif->role_id;
feature->data_flow_options = 0;
feature->options = 0;
ret = wl1271_cmd_configure(wl, ACX_FEATURE_CFG,
feature, sizeof(*feature));
if (ret < 0) {
wl1271_error("Couldn't set HW encryption");
goto out;
}
out:
kfree(feature);
return ret;
}
int wl1271_acx_mem_map(struct wl1271 *wl, struct acx_header *mem_map,
size_t len)
{
int ret;
wl1271_debug(DEBUG_ACX, "acx mem map");
ret = wl1271_cmd_interrogate(wl, ACX_MEM_MAP, mem_map,
sizeof(struct acx_header), len);
if (ret < 0)
return ret;
return 0;
}
int wl1271_acx_rx_msdu_life_time(struct wl1271 *wl)
{
struct acx_rx_msdu_lifetime *acx;
int ret;
wl1271_debug(DEBUG_ACX, "acx rx msdu life time");
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
if (!acx) {
ret = -ENOMEM;
goto out;
}
acx->lifetime = cpu_to_le32(wl->conf.rx.rx_msdu_life_time);
ret = wl1271_cmd_configure(wl, DOT11_RX_MSDU_LIFE_TIME,
acx, sizeof(*acx));
if (ret < 0) {
wl1271_warning("failed to set rx msdu life time: %d", ret);
goto out;
}
out:
kfree(acx);
return ret;
}
int wl1271_acx_slot(struct wl1271 *wl, struct wl12xx_vif *wlvif,
enum acx_slot_type slot_time)
{
struct acx_slot *slot;
int ret;
wl1271_debug(DEBUG_ACX, "acx slot");
slot = kzalloc(sizeof(*slot), GFP_KERNEL);
if (!slot) {
ret = -ENOMEM;
goto out;
}
slot->role_id = wlvif->role_id;
slot->wone_index = STATION_WONE_INDEX;
slot->slot_time = slot_time;
ret = wl1271_cmd_configure(wl, ACX_SLOT, slot, sizeof(*slot));
if (ret < 0) {
wl1271_warning("failed to set slot time: %d", ret);
goto out;
}
out:
kfree(slot);
return ret;
}
int wl1271_acx_group_address_tbl(struct wl1271 *wl, struct wl12xx_vif *wlvif,
bool enable, void *mc_list, u32 mc_list_len)
{
struct acx_dot11_grp_addr_tbl *acx;
int ret;
wl1271_debug(DEBUG_ACX, "acx group address tbl");
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
if (!acx) {
ret = -ENOMEM;
goto out;
}
/* MAC filtering */
acx->role_id = wlvif->role_id;
acx->enabled = enable;
acx->num_groups = mc_list_len;
memcpy(acx->mac_table, mc_list, mc_list_len * ETH_ALEN);
ret = wl1271_cmd_configure(wl, DOT11_GROUP_ADDRESS_TBL,
acx, sizeof(*acx));
if (ret < 0) {
wl1271_warning("failed to set group addr table: %d", ret);
goto out;
}
out:
kfree(acx);
return ret;
}
int wl1271_acx_service_period_timeout(struct wl1271 *wl,
struct wl12xx_vif *wlvif)
{
struct acx_rx_timeout *rx_timeout;
int ret;
rx_timeout = kzalloc(sizeof(*rx_timeout), GFP_KERNEL);
if (!rx_timeout) {
ret = -ENOMEM;
goto out;
}
wl1271_debug(DEBUG_ACX, "acx service period timeout");
rx_timeout->role_id = wlvif->role_id;
rx_timeout->ps_poll_timeout = cpu_to_le16(wl->conf.rx.ps_poll_timeout);
rx_timeout->upsd_timeout = cpu_to_le16(wl->conf.rx.upsd_timeout);
ret = wl1271_cmd_configure(wl, ACX_SERVICE_PERIOD_TIMEOUT,
rx_timeout, sizeof(*rx_timeout));
if (ret < 0) {
wl1271_warning("failed to set service period timeout: %d",
ret);
goto out;
}
out:
kfree(rx_timeout);
return ret;
}
int wl1271_acx_rts_threshold(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u32 rts_threshold)
{
struct acx_rts_threshold *rts;
int ret;
/*
* If the RTS threshold is not configured or out of range, use the
* default value.
*/
if (rts_threshold > IEEE80211_MAX_RTS_THRESHOLD)
rts_threshold = wl->conf.rx.rts_threshold;
wl1271_debug(DEBUG_ACX, "acx rts threshold: %d", rts_threshold);
rts = kzalloc(sizeof(*rts), GFP_KERNEL);
if (!rts) {
ret = -ENOMEM;
goto out;
}
rts->role_id = wlvif->role_id;
rts->threshold = cpu_to_le16((u16)rts_threshold);
ret = wl1271_cmd_configure(wl, DOT11_RTS_THRESHOLD, rts, sizeof(*rts));
if (ret < 0) {
wl1271_warning("failed to set rts threshold: %d", ret);
goto out;
}
out:
kfree(rts);
return ret;
}
int wl1271_acx_dco_itrim_params(struct wl1271 *wl)
{
struct acx_dco_itrim_params *dco;
struct conf_itrim_settings *c = &wl->conf.itrim;
int ret;
wl1271_debug(DEBUG_ACX, "acx dco itrim parameters");
dco = kzalloc(sizeof(*dco), GFP_KERNEL);
if (!dco) {
ret = -ENOMEM;
goto out;
}
dco->enable = c->enable;
dco->timeout = cpu_to_le32(c->timeout);
ret = wl1271_cmd_configure(wl, ACX_SET_DCO_ITRIM_PARAMS,
dco, sizeof(*dco));
if (ret < 0) {
wl1271_warning("failed to set dco itrim parameters: %d", ret);
goto out;
}
out:
kfree(dco);
return ret;
}
int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, struct wl12xx_vif *wlvif,
bool enable_filter)
{
struct acx_beacon_filter_option *beacon_filter = NULL;
int ret = 0;
wl1271_debug(DEBUG_ACX, "acx beacon filter opt enable=%d",
enable_filter);
if (enable_filter &&
wl->conf.conn.bcn_filt_mode == CONF_BCN_FILT_MODE_DISABLED)
goto out;
beacon_filter = kzalloc(sizeof(*beacon_filter), GFP_KERNEL);
if (!beacon_filter) {
ret = -ENOMEM;
goto out;
}
beacon_filter->role_id = wlvif->role_id;
beacon_filter->enable = enable_filter;
/*
* When set to zero, and the filter is enabled, beacons
* without the unicast TIM bit set are dropped.
*/
beacon_filter->max_num_beacons = 0;
ret = wl1271_cmd_configure(wl, ACX_BEACON_FILTER_OPT,
beacon_filter, sizeof(*beacon_filter));
if (ret < 0) {
wl1271_warning("failed to set beacon filter opt: %d", ret);
goto out;
}
out:
kfree(beacon_filter);
return ret;
}
int wl1271_acx_beacon_filter_table(struct wl1271 *wl,
struct wl12xx_vif *wlvif)
{
struct acx_beacon_filter_ie_table *ie_table;
int i, idx = 0;
int ret;
bool vendor_spec = false;
wl1271_debug(DEBUG_ACX, "acx beacon filter table");
ie_table = kzalloc(sizeof(*ie_table), GFP_KERNEL);
if (!ie_table) {
ret = -ENOMEM;
goto out;
}
/* configure default beacon pass-through rules */
ie_table->role_id = wlvif->role_id;
ie_table->num_ie = 0;
for (i = 0; i < wl->conf.conn.bcn_filt_ie_count; i++) {
struct conf_bcn_filt_rule *r = &(wl->conf.conn.bcn_filt_ie[i]);
ie_table->table[idx++] = r->ie;
ie_table->table[idx++] = r->rule;
if (r->ie == WLAN_EID_VENDOR_SPECIFIC) {
/* only one vendor specific ie allowed */
if (vendor_spec)
continue;
/* for vendor specific rules configure the
additional fields */
memcpy(&(ie_table->table[idx]), r->oui,
CONF_BCN_IE_OUI_LEN);
idx += CONF_BCN_IE_OUI_LEN;
ie_table->table[idx++] = r->type;
memcpy(&(ie_table->table[idx]), r->version,
CONF_BCN_IE_VER_LEN);
idx += CONF_BCN_IE_VER_LEN;
vendor_spec = true;
}
ie_table->num_ie++;
}
ret = wl1271_cmd_configure(wl, ACX_BEACON_FILTER_TABLE,
ie_table, sizeof(*ie_table));
if (ret < 0) {
wl1271_warning("failed to set beacon filter table: %d", ret);
goto out;
}
out:
kfree(ie_table);
return ret;
}
#define ACX_CONN_MONIT_DISABLE_VALUE 0xffffffff
int wl1271_acx_conn_monit_params(struct wl1271 *wl, struct wl12xx_vif *wlvif,
bool enable)
{
struct acx_conn_monit_params *acx;
u32 threshold = ACX_CONN_MONIT_DISABLE_VALUE;
u32 timeout = ACX_CONN_MONIT_DISABLE_VALUE;
int ret;
wl1271_debug(DEBUG_ACX, "acx connection monitor parameters: %s",
enable ? "enabled" : "disabled");
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
if (!acx) {
ret = -ENOMEM;
goto out;
}
if (enable) {
threshold = wl->conf.conn.synch_fail_thold;
timeout = wl->conf.conn.bss_lose_timeout;
}
acx->role_id = wlvif->role_id;
acx->synch_fail_thold = cpu_to_le32(threshold);
acx->bss_lose_timeout = cpu_to_le32(timeout);
ret = wl1271_cmd_configure(wl, ACX_CONN_MONIT_PARAMS,
acx, sizeof(*acx));
if (ret < 0) {
wl1271_warning("failed to set connection monitor "
"parameters: %d", ret);
goto out;
}
out:
kfree(acx);
return ret;
}
int wl1271_acx_sg_enable(struct wl1271 *wl, bool enable)
{
struct acx_bt_wlan_coex *pta;
int ret;
wl1271_debug(DEBUG_ACX, "acx sg enable");
pta = kzalloc(sizeof(*pta), GFP_KERNEL);
if (!pta) {
ret = -ENOMEM;
goto out;
}
if (enable)
pta->enable = wl->conf.sg.state;
else
pta->enable = CONF_SG_DISABLE;
ret = wl1271_cmd_configure(wl, ACX_SG_ENABLE, pta, sizeof(*pta));
if (ret < 0) {
wl1271_warning("failed to set softgemini enable: %d", ret);
goto out;
}
out:
kfree(pta);
return ret;
}
int wl12xx_acx_sg_cfg(struct wl1271 *wl)
{
struct acx_bt_wlan_coex_param *param;
struct conf_sg_settings *c = &wl->conf.sg;
int i, ret;
wl1271_debug(DEBUG_ACX, "acx sg cfg");
param = kzalloc(sizeof(*param), GFP_KERNEL);
if (!param) {
ret = -ENOMEM;
goto out;
}
/* BT-WLAN coext parameters */
for (i = 0; i < WLCORE_CONF_SG_PARAMS_MAX; i++)
param->params[i] = cpu_to_le32(c->params[i]);
param->param_idx = WLCORE_CONF_SG_PARAMS_ALL;
ret = wl1271_cmd_configure(wl, ACX_SG_CFG, param, sizeof(*param));
if (ret < 0) {
wl1271_warning("failed to set sg config: %d", ret);
goto out;
}
out:
kfree(param);
return ret;
}
int wl1271_acx_cca_threshold(struct wl1271 *wl)
{
struct acx_energy_detection *detection;
int ret;
wl1271_debug(DEBUG_ACX, "acx cca threshold");
detection = kzalloc(sizeof(*detection), GFP_KERNEL);
if (!detection) {
ret = -ENOMEM;
goto out;
}
detection->rx_cca_threshold = cpu_to_le16(wl->conf.rx.rx_cca_threshold);
detection->tx_energy_detection = wl->conf.tx.tx_energy_detection;
ret = wl1271_cmd_configure(wl, ACX_CCA_THRESHOLD,
detection, sizeof(*detection));
if (ret < 0)
wl1271_warning("failed to set cca threshold: %d", ret);
out:
kfree(detection);
return ret;
}
int wl1271_acx_bcn_dtim_options(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct acx_beacon_broadcast *bb;
int ret;
wl1271_debug(DEBUG_ACX, "acx bcn dtim options");
bb = kzalloc(sizeof(*bb), GFP_KERNEL);
if (!bb) {
ret = -ENOMEM;
goto out;
}
bb->role_id = wlvif->role_id;
bb->beacon_rx_timeout = cpu_to_le16(wl->conf.conn.beacon_rx_timeout);
bb->broadcast_timeout = cpu_to_le16(wl->conf.conn.broadcast_timeout);
bb->rx_broadcast_in_ps = wl->conf.conn.rx_broadcast_in_ps;
bb->ps_poll_threshold = wl->conf.conn.ps_poll_threshold;
ret = wl1271_cmd_configure(wl, ACX_BCN_DTIM_OPTIONS, bb, sizeof(*bb));
if (ret < 0) {
wl1271_warning("failed to set rx config: %d", ret);
goto out;
}
out:
kfree(bb);
return ret;
}
int wl1271_acx_aid(struct wl1271 *wl, struct wl12xx_vif *wlvif, u16 aid)
{
struct acx_aid *acx_aid;
int ret;
wl1271_debug(DEBUG_ACX, "acx aid");
acx_aid = kzalloc(sizeof(*acx_aid), GFP_KERNEL);
if (!acx_aid) {
ret = -ENOMEM;
goto out;
}
acx_aid->role_id = wlvif->role_id;
acx_aid->aid = cpu_to_le16(aid);
ret = wl1271_cmd_configure(wl, ACX_AID, acx_aid, sizeof(*acx_aid));
if (ret < 0) {
wl1271_warning("failed to set aid: %d", ret);
goto out;
}
out:
kfree(acx_aid);
return ret;
}
int wl1271_acx_event_mbox_mask(struct wl1271 *wl, u32 event_mask)
{
struct acx_event_mask *mask;
int ret;
wl1271_debug(DEBUG_ACX, "acx event mbox mask");
mask = kzalloc(sizeof(*mask), GFP_KERNEL);
if (!mask) {
ret = -ENOMEM;
goto out;
}
/* high event mask is unused */
mask->high_event_mask = cpu_to_le32(0xffffffff);
mask->event_mask = cpu_to_le32(event_mask);
ret = wl1271_cmd_configure(wl, ACX_EVENT_MBOX_MASK,
mask, sizeof(*mask));
if (ret < 0) {
wl1271_warning("failed to set acx_event_mbox_mask: %d", ret);
goto out;
}
out:
kfree(mask);
return ret;
}
int wl1271_acx_set_preamble(struct wl1271 *wl, struct wl12xx_vif *wlvif,
enum acx_preamble_type preamble)
{
struct acx_preamble *acx;
int ret;
wl1271_debug(DEBUG_ACX, "acx_set_preamble");
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
if (!acx) {
ret = -ENOMEM;
goto out;
}
acx->role_id = wlvif->role_id;
acx->preamble = preamble;
ret = wl1271_cmd_configure(wl, ACX_PREAMBLE_TYPE, acx, sizeof(*acx));
if (ret < 0) {
wl1271_warning("Setting of preamble failed: %d", ret);
goto out;
}
out:
kfree(acx);
return ret;
}
int wl1271_acx_cts_protect(struct wl1271 *wl, struct wl12xx_vif *wlvif,
enum acx_ctsprotect_type ctsprotect)
{
struct acx_ctsprotect *acx;
int ret;
wl1271_debug(DEBUG_ACX, "acx_set_ctsprotect");
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
if (!acx) {
ret = -ENOMEM;
goto out;
}
acx->role_id = wlvif->role_id;
acx->ctsprotect = ctsprotect;
ret = wl1271_cmd_configure(wl, ACX_CTS_PROTECTION, acx, sizeof(*acx));
if (ret < 0) {
wl1271_warning("Setting of ctsprotect failed: %d", ret);
goto out;
}
out:
kfree(acx);
return ret;
}
int wl1271_acx_statistics(struct wl1271 *wl, void *stats)
{
int ret;
wl1271_debug(DEBUG_ACX, "acx statistics");
ret = wl1271_cmd_interrogate(wl, ACX_STATISTICS, stats,
sizeof(struct acx_header),
wl->stats.fw_stats_len);
if (ret < 0) {
wl1271_warning("acx statistics failed: %d", ret);
return -ENOMEM;
}
return 0;
}
int wl1271_acx_sta_rate_policies(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct acx_rate_policy *acx;
struct conf_tx_rate_class *c = &wl->conf.tx.sta_rc_conf;
int ret = 0;
wl1271_debug(DEBUG_ACX, "acx rate policies");
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
if (!acx) {
ret = -ENOMEM;
goto out;
}
wl1271_debug(DEBUG_ACX, "basic_rate: 0x%x, full_rate: 0x%x",
wlvif->basic_rate, wlvif->rate_set);
/* configure one basic rate class */
acx->rate_policy_idx = cpu_to_le32(wlvif->sta.basic_rate_idx);
acx->rate_policy.enabled_rates = cpu_to_le32(wlvif->basic_rate);
acx->rate_policy.short_retry_limit = c->short_retry_limit;
acx->rate_policy.long_retry_limit = c->long_retry_limit;
acx->rate_policy.aflags = c->aflags;
ret = wl1271_cmd_configure(wl, ACX_RATE_POLICY, acx, sizeof(*acx));
if (ret < 0) {
wl1271_warning("Setting of rate policies failed: %d", ret);
goto out;
}
/* configure one AP supported rate class */
acx->rate_policy_idx = cpu_to_le32(wlvif->sta.ap_rate_idx);
/* the AP policy is HW specific */
acx->rate_policy.enabled_rates =
cpu_to_le32(wlcore_hw_sta_get_ap_rate_mask(wl, wlvif));
acx->rate_policy.short_retry_limit = c->short_retry_limit;
acx->rate_policy.long_retry_limit = c->long_retry_limit;
acx->rate_policy.aflags = c->aflags;
ret = wl1271_cmd_configure(wl, ACX_RATE_POLICY, acx, sizeof(*acx));
if (ret < 0) {
wl1271_warning("Setting of rate policies failed: %d", ret);
goto out;
}
/*
* configure one rate class for basic p2p operations.
* (p2p packets should always go out with OFDM rates, even
* if we are currently connected to 11b AP)
*/
acx->rate_policy_idx = cpu_to_le32(wlvif->sta.p2p_rate_idx);
acx->rate_policy.enabled_rates =
cpu_to_le32(CONF_TX_RATE_MASK_BASIC_P2P);
acx->rate_policy.short_retry_limit = c->short_retry_limit;
acx->rate_policy.long_retry_limit = c->long_retry_limit;
acx->rate_policy.aflags = c->aflags;
ret = wl1271_cmd_configure(wl, ACX_RATE_POLICY, acx, sizeof(*acx));
if (ret < 0) {
wl1271_warning("Setting of rate policies failed: %d", ret);
goto out;
}
out:
kfree(acx);
return ret;
}
int wl1271_acx_ap_rate_policy(struct wl1271 *wl, struct conf_tx_rate_class *c,
u8 idx)
{
struct acx_rate_policy *acx;
int ret = 0;
wl1271_debug(DEBUG_ACX, "acx ap rate policy %d rates 0x%x",
idx, c->enabled_rates);
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
if (!acx) {
ret = -ENOMEM;
goto out;
}
acx->rate_policy.enabled_rates = cpu_to_le32(c->enabled_rates);
acx->rate_policy.short_retry_limit = c->short_retry_limit;
acx->rate_policy.long_retry_limit = c->long_retry_limit;
acx->rate_policy.aflags = c->aflags;
acx->rate_policy_idx = cpu_to_le32(idx);
ret = wl1271_cmd_configure(wl, ACX_RATE_POLICY, acx, sizeof(*acx));
if (ret < 0) {
wl1271_warning("Setting of ap rate policy failed: %d", ret);
goto out;
}
out:
kfree(acx);
return ret;
}
int wl1271_acx_ac_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u8 ac, u8 cw_min, u16 cw_max, u8 aifsn, u16 txop)
{
struct acx_ac_cfg *acx;
int ret = 0;
wl1271_debug(DEBUG_ACX, "acx ac cfg %d cw_ming %d cw_max %d "
"aifs %d txop %d", ac, cw_min, cw_max, aifsn, txop);
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
if (!acx) {
ret = -ENOMEM;
goto out;
}
acx->role_id = wlvif->role_id;
acx->ac = ac;
acx->cw_min = cw_min;
acx->cw_max = cpu_to_le16(cw_max);
acx->aifsn = aifsn;
acx->tx_op_limit = cpu_to_le16(txop);
ret = wl1271_cmd_configure(wl, ACX_AC_CFG, acx, sizeof(*acx));
if (ret < 0) {
wl1271_warning("acx ac cfg failed: %d", ret);
goto out;
}
out:
kfree(acx);
return ret;
}
int wl1271_acx_tid_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u8 queue_id, u8 channel_type,
u8 tsid, u8 ps_scheme, u8 ack_policy,
u32 apsd_conf0, u32 apsd_conf1)
{
struct acx_tid_config *acx;
int ret = 0;
wl1271_debug(DEBUG_ACX, "acx tid config");
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
if (!acx) {
ret = -ENOMEM;
goto out;
}
acx->role_id = wlvif->role_id;
acx->queue_id = queue_id;
acx->channel_type = channel_type;
acx->tsid = tsid;
acx->ps_scheme = ps_scheme;
acx->ack_policy = ack_policy;
acx->apsd_conf[0] = cpu_to_le32(apsd_conf0);
acx->apsd_conf[1] = cpu_to_le32(apsd_conf1);
ret = wl1271_cmd_configure(wl, ACX_TID_CFG, acx, sizeof(*acx));
if (ret < 0) {
wl1271_warning("Setting of tid config failed: %d", ret);
goto out;
}
out:
kfree(acx);
return ret;
}
int wl1271_acx_frag_threshold(struct wl1271 *wl, u32 frag_threshold)
{
struct acx_frag_threshold *acx;
int ret = 0;
/*
* If the fragmentation is not configured or out of range, use the
* default value.
*/
if (frag_threshold > IEEE80211_MAX_FRAG_THRESHOLD)
frag_threshold = wl->conf.tx.frag_threshold;
wl1271_debug(DEBUG_ACX, "acx frag threshold: %d", frag_threshold);
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
if (!acx) {
ret = -ENOMEM;
goto out;
}
acx->frag_threshold = cpu_to_le16((u16)frag_threshold);
ret = wl1271_cmd_configure(wl, ACX_FRAG_CFG, acx, sizeof(*acx));
if (ret < 0) {
wl1271_warning("Setting of frag threshold failed: %d", ret);
goto out;
}
out:
kfree(acx);
return ret;
}
int wl1271_acx_tx_config_options(struct wl1271 *wl)
{
struct acx_tx_config_options *acx;
int ret = 0;
wl1271_debug(DEBUG_ACX, "acx tx config options");
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
if (!acx) {
ret = -ENOMEM;
goto out;
}
acx->tx_compl_timeout = cpu_to_le16(wl->conf.tx.tx_compl_timeout);
acx->tx_compl_threshold = cpu_to_le16(wl->conf.tx.tx_compl_threshold);
ret = wl1271_cmd_configure(wl, ACX_TX_CONFIG_OPT, acx, sizeof(*acx));
if (ret < 0) {
wl1271_warning("Setting of tx options failed: %d", ret);
goto out;
}
out:
kfree(acx);
return ret;
}
int wl12xx_acx_mem_cfg(struct wl1271 *wl)
{
struct wl12xx_acx_config_memory *mem_conf;
struct conf_memory_settings *mem;
int ret;
wl1271_debug(DEBUG_ACX, "wl1271 mem cfg");
mem_conf = kzalloc(sizeof(*mem_conf), GFP_KERNEL);
if (!mem_conf) {
ret = -ENOMEM;
goto out;
}
mem = &wl->conf.mem;
/* memory config */
mem_conf->num_stations = mem->num_stations;
mem_conf->rx_mem_block_num = mem->rx_block_num;
mem_conf->tx_min_mem_block_num = mem->tx_min_block_num;
mem_conf->num_ssid_profiles = mem->ssid_profiles;
mem_conf->total_tx_descriptors = cpu_to_le32(wl->num_tx_desc);
mem_conf->dyn_mem_enable = mem->dynamic_memory;
mem_conf->tx_free_req = mem->min_req_tx_blocks;
mem_conf->rx_free_req = mem->min_req_rx_blocks;
mem_conf->tx_min = mem->tx_min;
mem_conf->fwlog_blocks = wl->conf.fwlog.mem_blocks;
ret = wl1271_cmd_configure(wl, ACX_MEM_CFG, mem_conf,
sizeof(*mem_conf));
if (ret < 0) {
wl1271_warning("wl1271 mem config failed: %d", ret);
goto out;
}
out:
kfree(mem_conf);
return ret;
}
EXPORT_SYMBOL_GPL(wl12xx_acx_mem_cfg);
int wl1271_acx_init_mem_config(struct wl1271 *wl)
{
int ret;
wl->target_mem_map = kzalloc(sizeof(struct wl1271_acx_mem_map),
GFP_KERNEL);
if (!wl->target_mem_map) {
wl1271_error("couldn't allocate target memory map");
return -ENOMEM;
}
/* we now ask for the firmware built memory map */
ret = wl1271_acx_mem_map(wl, (void *)wl->target_mem_map,
sizeof(struct wl1271_acx_mem_map));
if (ret < 0) {
wl1271_error("couldn't retrieve firmware memory map");
kfree(wl->target_mem_map);
wl->target_mem_map = NULL;
return ret;
}
/* initialize TX block book keeping */
wl->tx_blocks_available =
le32_to_cpu(wl->target_mem_map->num_tx_mem_blocks);
wl1271_debug(DEBUG_TX, "available tx blocks: %d",
wl->tx_blocks_available);
return 0;
}
EXPORT_SYMBOL_GPL(wl1271_acx_init_mem_config);
int wl1271_acx_init_rx_interrupt(struct wl1271 *wl)
{
struct wl1271_acx_rx_config_opt *rx_conf;
int ret;
wl1271_debug(DEBUG_ACX, "wl1271 rx interrupt config");
rx_conf = kzalloc(sizeof(*rx_conf), GFP_KERNEL);
if (!rx_conf) {
ret = -ENOMEM;
goto out;
}
rx_conf->threshold = cpu_to_le16(wl->conf.rx.irq_pkt_threshold);
rx_conf->timeout = cpu_to_le16(wl->conf.rx.irq_timeout);
rx_conf->mblk_threshold = cpu_to_le16(wl->conf.rx.irq_blk_threshold);
rx_conf->queue_type = wl->conf.rx.queue_type;
ret = wl1271_cmd_configure(wl, ACX_RX_CONFIG_OPT, rx_conf,
sizeof(*rx_conf));
if (ret < 0) {
wl1271_warning("wl1271 rx config opt failed: %d", ret);
goto out;
}
out:
kfree(rx_conf);
return ret;
}
int wl1271_acx_bet_enable(struct wl1271 *wl, struct wl12xx_vif *wlvif,
bool enable)
{
struct wl1271_acx_bet_enable *acx = NULL;
int ret = 0;
wl1271_debug(DEBUG_ACX, "acx bet enable");
if (enable && wl->conf.conn.bet_enable == CONF_BET_MODE_DISABLE)
goto out;
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
if (!acx) {
ret = -ENOMEM;
goto out;
}
acx->role_id = wlvif->role_id;
acx->enable = enable ? CONF_BET_MODE_ENABLE : CONF_BET_MODE_DISABLE;
acx->max_consecutive = wl->conf.conn.bet_max_consecutive;
ret = wl1271_cmd_configure(wl, ACX_BET_ENABLE, acx, sizeof(*acx));
if (ret < 0) {
wl1271_warning("acx bet enable failed: %d", ret);
goto out;
}
out:
kfree(acx);
return ret;
}
int wl1271_acx_arp_ip_filter(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u8 enable, __be32 address)
{
struct wl1271_acx_arp_filter *acx;
int ret;
wl1271_debug(DEBUG_ACX, "acx arp ip filter, enable: %d", enable);
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
if (!acx) {
ret = -ENOMEM;
goto out;
}
acx->role_id = wlvif->role_id;
acx->version = ACX_IPV4_VERSION;
acx->enable = enable;
if (enable)
memcpy(acx->address, &address, ACX_IPV4_ADDR_SIZE);
ret = wl1271_cmd_configure(wl, ACX_ARP_IP_FILTER,
acx, sizeof(*acx));
if (ret < 0) {
wl1271_warning("failed to set arp ip filter: %d", ret);
goto out;
}
out:
kfree(acx);
return ret;
}
int wl1271_acx_pm_config(struct wl1271 *wl)
{
struct wl1271_acx_pm_config *acx = NULL;
struct conf_pm_config_settings *c = &wl->conf.pm_config;
int ret = 0;
wl1271_debug(DEBUG_ACX, "acx pm config");
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
if (!acx) {
ret = -ENOMEM;
goto out;
}
acx->host_clk_settling_time = cpu_to_le32(c->host_clk_settling_time);
acx->host_fast_wakeup_support = c->host_fast_wakeup_support;
ret = wl1271_cmd_configure(wl, ACX_PM_CONFIG, acx, sizeof(*acx));
if (ret < 0) {
wl1271_warning("acx pm config failed: %d", ret);
goto out;
}
out:
kfree(acx);
return ret;
}
EXPORT_SYMBOL_GPL(wl1271_acx_pm_config);
int wl1271_acx_keep_alive_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
bool enable)
{
struct wl1271_acx_keep_alive_mode *acx = NULL;
int ret = 0;
wl1271_debug(DEBUG_ACX, "acx keep alive mode: %d", enable);
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
if (!acx) {
ret = -ENOMEM;
goto out;
}
acx->role_id = wlvif->role_id;
acx->enabled = enable;
ret = wl1271_cmd_configure(wl, ACX_KEEP_ALIVE_MODE, acx, sizeof(*acx));
if (ret < 0) {
wl1271_warning("acx keep alive mode failed: %d", ret);
goto out;
}
out:
kfree(acx);
return ret;
}
int wl1271_acx_keep_alive_config(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u8 index, u8 tpl_valid)
{
struct wl1271_acx_keep_alive_config *acx = NULL;
int ret = 0;
wl1271_debug(DEBUG_ACX, "acx keep alive config");
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
if (!acx) {
ret = -ENOMEM;
goto out;
}
acx->role_id = wlvif->role_id;
acx->period = cpu_to_le32(wl->conf.conn.keep_alive_interval);
acx->index = index;
acx->tpl_validation = tpl_valid;
acx->trigger = ACX_KEEP_ALIVE_NO_TX;
ret = wl1271_cmd_configure(wl, ACX_SET_KEEP_ALIVE_CONFIG,
acx, sizeof(*acx));
if (ret < 0) {
wl1271_warning("acx keep alive config failed: %d", ret);
goto out;
}
out:
kfree(acx);
return ret;
}
int wl1271_acx_rssi_snr_trigger(struct wl1271 *wl, struct wl12xx_vif *wlvif,
bool enable, s16 thold, u8 hyst)
{
struct wl1271_acx_rssi_snr_trigger *acx = NULL;
int ret = 0;
wl1271_debug(DEBUG_ACX, "acx rssi snr trigger");
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
if (!acx) {
ret = -ENOMEM;
goto out;
}
wlvif->last_rssi_event = -1;
acx->role_id = wlvif->role_id;
acx->pacing = cpu_to_le16(wl->conf.roam_trigger.trigger_pacing);
acx->metric = WL1271_ACX_TRIG_METRIC_RSSI_BEACON;
acx->type = WL1271_ACX_TRIG_TYPE_EDGE;
if (enable)
acx->enable = WL1271_ACX_TRIG_ENABLE;
else
acx->enable = WL1271_ACX_TRIG_DISABLE;
acx->index = WL1271_ACX_TRIG_IDX_RSSI;
acx->dir = WL1271_ACX_TRIG_DIR_BIDIR;
acx->threshold = cpu_to_le16(thold);
acx->hysteresis = hyst;
ret = wl1271_cmd_configure(wl, ACX_RSSI_SNR_TRIGGER, acx, sizeof(*acx));
if (ret < 0) {
wl1271_warning("acx rssi snr trigger setting failed: %d", ret);
goto out;
}
out:
kfree(acx);
return ret;
}
int wl1271_acx_rssi_snr_avg_weights(struct wl1271 *wl,
struct wl12xx_vif *wlvif)
{
struct wl1271_acx_rssi_snr_avg_weights *acx = NULL;
struct conf_roam_trigger_settings *c = &wl->conf.roam_trigger;
int ret = 0;
wl1271_debug(DEBUG_ACX, "acx rssi snr avg weights");
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
if (!acx) {
ret = -ENOMEM;
goto out;
}
acx->role_id = wlvif->role_id;
acx->rssi_beacon = c->avg_weight_rssi_beacon;
acx->rssi_data = c->avg_weight_rssi_data;
acx->snr_beacon = c->avg_weight_snr_beacon;
acx->snr_data = c->avg_weight_snr_data;
ret = wl1271_cmd_configure(wl, ACX_RSSI_SNR_WEIGHTS, acx, sizeof(*acx));
if (ret < 0) {
wl1271_warning("acx rssi snr trigger weights failed: %d", ret);
goto out;
}
out:
kfree(acx);
return ret;
}
int wl1271_acx_set_ht_capabilities(struct wl1271 *wl,
struct ieee80211_sta_ht_cap *ht_cap,
bool allow_ht_operation, u8 hlid)
{
struct wl1271_acx_ht_capabilities *acx;
int ret = 0;
u32 ht_capabilites = 0;
wl1271_debug(DEBUG_ACX, "acx ht capabilities setting "
"sta supp: %d sta cap: %d", ht_cap->ht_supported,
ht_cap->cap);
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
if (!acx) {
ret = -ENOMEM;
goto out;
}
if (allow_ht_operation && ht_cap->ht_supported) {
/* no need to translate capabilities - use the spec values */
ht_capabilites = ht_cap->cap;
/*
* this bit is not employed by the spec but only by FW to
* indicate peer HT support
*/
ht_capabilites |= WL12XX_HT_CAP_HT_OPERATION;
/* get data from A-MPDU parameters field */
acx->ampdu_max_length = ht_cap->ampdu_factor;
acx->ampdu_min_spacing = ht_cap->ampdu_density;
}
acx->hlid = hlid;
acx->ht_capabilites = cpu_to_le32(ht_capabilites);
ret = wl1271_cmd_configure(wl, ACX_PEER_HT_CAP, acx, sizeof(*acx));
if (ret < 0) {
wl1271_warning("acx ht capabilities setting failed: %d", ret);
goto out;
}
out:
kfree(acx);
return ret;
}
EXPORT_SYMBOL_GPL(wl1271_acx_set_ht_capabilities);
int wl1271_acx_set_ht_information(struct wl1271 *wl,
struct wl12xx_vif *wlvif,
u16 ht_operation_mode)
{
struct wl1271_acx_ht_information *acx;
int ret = 0;
wl1271_debug(DEBUG_ACX, "acx ht information setting");
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
if (!acx) {
ret = -ENOMEM;
goto out;
}
acx->role_id = wlvif->role_id;
acx->ht_protection =
(u8)(ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION);
acx->rifs_mode = 0;
acx->gf_protection =
!!(ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
acx->ht_tx_burst_limit = 0;
acx->dual_cts_protection = 0;
ret = wl1271_cmd_configure(wl, ACX_HT_BSS_OPERATION, acx, sizeof(*acx));
if (ret < 0) {
wl1271_warning("acx ht information setting failed: %d", ret);
goto out;
}
out:
kfree(acx);
return ret;
}
/* Configure BA session initiator/receiver parameters setting in the FW. */
int wl12xx_acx_set_ba_initiator_policy(struct wl1271 *wl,
struct wl12xx_vif *wlvif)
{
struct wl1271_acx_ba_initiator_policy *acx;
int ret;
wl1271_debug(DEBUG_ACX, "acx ba initiator policy");
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
if (!acx) {
ret = -ENOMEM;
goto out;
}
/* set for the current role */
acx->role_id = wlvif->role_id;
acx->tid_bitmap = wl->conf.ht.tx_ba_tid_bitmap;
acx->win_size = wl->conf.ht.tx_ba_win_size;
acx->inactivity_timeout = wl->conf.ht.inactivity_timeout;
ret = wl1271_cmd_configure(wl,
ACX_BA_SESSION_INIT_POLICY,
acx,
sizeof(*acx));
if (ret < 0) {
wl1271_warning("acx ba initiator policy failed: %d", ret);
goto out;
}
out:
kfree(acx);
return ret;
}
/* setup BA session receiver setting in the FW. */
int wl12xx_acx_set_ba_receiver_session(struct wl1271 *wl, u8 tid_index,
u16 ssn, bool enable, u8 peer_hlid,
u8 win_size)
{
struct wl1271_acx_ba_receiver_setup *acx;
int ret;
wl1271_debug(DEBUG_ACX, "acx ba receiver session setting");
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
if (!acx) {
ret = -ENOMEM;
goto out;
}
acx->hlid = peer_hlid;
acx->tid = tid_index;
acx->enable = enable;
acx->win_size = win_size;
acx->ssn = ssn;
ret = wlcore_cmd_configure_failsafe(wl, ACX_BA_SESSION_RX_SETUP, acx,
sizeof(*acx),
BIT(CMD_STATUS_NO_RX_BA_SESSION));
if (ret < 0) {
wl1271_warning("acx ba receiver session failed: %d", ret);
goto out;
}
/* sometimes we can't start the session */
if (ret == CMD_STATUS_NO_RX_BA_SESSION) {
wl1271_warning("no fw rx ba on tid %d", tid_index);
ret = -EBUSY;
goto out;
}
ret = 0;
out:
kfree(acx);
return ret;
}
int wl12xx_acx_tsf_info(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u64 *mactime)
{
struct wl12xx_acx_fw_tsf_information *tsf_info;
int ret;
tsf_info = kzalloc(sizeof(*tsf_info), GFP_KERNEL);
if (!tsf_info) {
ret = -ENOMEM;
goto out;
}
tsf_info->role_id = wlvif->role_id;
ret = wl1271_cmd_interrogate(wl, ACX_TSF_INFO, tsf_info,
sizeof(struct acx_header), sizeof(*tsf_info));
if (ret < 0) {
wl1271_warning("acx tsf info interrogate failed");
goto out;
}
*mactime = le32_to_cpu(tsf_info->current_tsf_low) |
((u64) le32_to_cpu(tsf_info->current_tsf_high) << 32);
out:
kfree(tsf_info);
return ret;
}
int wl1271_acx_ps_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
bool enable)
{
struct wl1271_acx_ps_rx_streaming *rx_streaming;
u32 conf_queues, enable_queues;
int i, ret = 0;
wl1271_debug(DEBUG_ACX, "acx ps rx streaming");
rx_streaming = kzalloc(sizeof(*rx_streaming), GFP_KERNEL);
if (!rx_streaming) {
ret = -ENOMEM;
goto out;
}
conf_queues = wl->conf.rx_streaming.queues;
if (enable)
enable_queues = conf_queues;
else
enable_queues = 0;
for (i = 0; i < 8; i++) {
/*
* Skip non-changed queues, to avoid redundant acxs.
* this check assumes conf.rx_streaming.queues can't
* be changed while rx_streaming is enabled.
*/
if (!(conf_queues & BIT(i)))
continue;
rx_streaming->role_id = wlvif->role_id;
rx_streaming->tid = i;
rx_streaming->enable = enable_queues & BIT(i);
rx_streaming->period = wl->conf.rx_streaming.interval;
rx_streaming->timeout = wl->conf.rx_streaming.interval;
ret = wl1271_cmd_configure(wl, ACX_PS_RX_STREAMING,
rx_streaming,
sizeof(*rx_streaming));
if (ret < 0) {
wl1271_warning("acx ps rx streaming failed: %d", ret);
goto out;
}
}
out:
kfree(rx_streaming);
return ret;
}
int wl1271_acx_ap_max_tx_retry(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct wl1271_acx_ap_max_tx_retry *acx = NULL;
int ret;
wl1271_debug(DEBUG_ACX, "acx ap max tx retry");
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
if (!acx)
return -ENOMEM;
acx->role_id = wlvif->role_id;
acx->max_tx_retry = cpu_to_le16(wl->conf.tx.max_tx_retries);
ret = wl1271_cmd_configure(wl, ACX_MAX_TX_FAILURE, acx, sizeof(*acx));
if (ret < 0) {
wl1271_warning("acx ap max tx retry failed: %d", ret);
goto out;
}
out:
kfree(acx);
return ret;
}
int wl12xx_acx_config_ps(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct wl1271_acx_config_ps *config_ps;
int ret;
wl1271_debug(DEBUG_ACX, "acx config ps");
config_ps = kzalloc(sizeof(*config_ps), GFP_KERNEL);
if (!config_ps) {
ret = -ENOMEM;
goto out;
}
config_ps->exit_retries = wl->conf.conn.psm_exit_retries;
config_ps->enter_retries = wl->conf.conn.psm_entry_retries;
config_ps->null_data_rate = cpu_to_le32(wlvif->basic_rate);
ret = wl1271_cmd_configure(wl, ACX_CONFIG_PS, config_ps,
sizeof(*config_ps));
if (ret < 0) {
wl1271_warning("acx config ps failed: %d", ret);
goto out;
}
out:
kfree(config_ps);
return ret;
}
int wl1271_acx_set_inconnection_sta(struct wl1271 *wl,
struct wl12xx_vif *wlvif, u8 *addr)
{
struct wl1271_acx_inconnection_sta *acx = NULL;
int ret;
wl1271_debug(DEBUG_ACX, "acx set inconnaction sta %pM", addr);
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
if (!acx)
return -ENOMEM;
memcpy(acx->addr, addr, ETH_ALEN);
acx->role_id = wlvif->role_id;
ret = wl1271_cmd_configure(wl, ACX_UPDATE_INCONNECTION_STA_LIST,
acx, sizeof(*acx));
if (ret < 0) {
wl1271_warning("acx set inconnaction sta failed: %d", ret);
goto out;
}
out:
kfree(acx);
return ret;
}
int wl1271_acx_fm_coex(struct wl1271 *wl)
{
struct wl1271_acx_fm_coex *acx;
int ret;
wl1271_debug(DEBUG_ACX, "acx fm coex setting");
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
if (!acx) {
ret = -ENOMEM;
goto out;
}
acx->enable = wl->conf.fm_coex.enable;
acx->swallow_period = wl->conf.fm_coex.swallow_period;
acx->n_divider_fref_set_1 = wl->conf.fm_coex.n_divider_fref_set_1;
acx->n_divider_fref_set_2 = wl->conf.fm_coex.n_divider_fref_set_2;
acx->m_divider_fref_set_1 =
cpu_to_le16(wl->conf.fm_coex.m_divider_fref_set_1);
acx->m_divider_fref_set_2 =
cpu_to_le16(wl->conf.fm_coex.m_divider_fref_set_2);
acx->coex_pll_stabilization_time =
cpu_to_le32(wl->conf.fm_coex.coex_pll_stabilization_time);
acx->ldo_stabilization_time =
cpu_to_le16(wl->conf.fm_coex.ldo_stabilization_time);
acx->fm_disturbed_band_margin =
wl->conf.fm_coex.fm_disturbed_band_margin;
acx->swallow_clk_diff = wl->conf.fm_coex.swallow_clk_diff;
ret = wl1271_cmd_configure(wl, ACX_FM_COEX_CFG, acx, sizeof(*acx));
if (ret < 0) {
wl1271_warning("acx fm coex setting failed: %d", ret);
goto out;
}
out:
kfree(acx);
return ret;
}
int wl12xx_acx_set_rate_mgmt_params(struct wl1271 *wl)
{
struct wl12xx_acx_set_rate_mgmt_params *acx = NULL;
struct conf_rate_policy_settings *conf = &wl->conf.rate;
int ret;
wl1271_debug(DEBUG_ACX, "acx set rate mgmt params");
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
if (!acx)
return -ENOMEM;
acx->index = ACX_RATE_MGMT_ALL_PARAMS;
acx->rate_retry_score = cpu_to_le16(conf->rate_retry_score);
acx->per_add = cpu_to_le16(conf->per_add);
acx->per_th1 = cpu_to_le16(conf->per_th1);
acx->per_th2 = cpu_to_le16(conf->per_th2);
acx->max_per = cpu_to_le16(conf->max_per);
acx->inverse_curiosity_factor = conf->inverse_curiosity_factor;
acx->tx_fail_low_th = conf->tx_fail_low_th;
acx->tx_fail_high_th = conf->tx_fail_high_th;
acx->per_alpha_shift = conf->per_alpha_shift;
acx->per_add_shift = conf->per_add_shift;
acx->per_beta1_shift = conf->per_beta1_shift;
acx->per_beta2_shift = conf->per_beta2_shift;
acx->rate_check_up = conf->rate_check_up;
acx->rate_check_down = conf->rate_check_down;
memcpy(acx->rate_retry_policy, conf->rate_retry_policy,
sizeof(acx->rate_retry_policy));
ret = wl1271_cmd_configure(wl, ACX_SET_RATE_MGMT_PARAMS,
acx, sizeof(*acx));
if (ret < 0) {
wl1271_warning("acx set rate mgmt params failed: %d", ret);
goto out;
}
out:
kfree(acx);
return ret;
}
int wl12xx_acx_config_hangover(struct wl1271 *wl)
{
struct wl12xx_acx_config_hangover *acx;
struct conf_hangover_settings *conf = &wl->conf.hangover;
int ret;
wl1271_debug(DEBUG_ACX, "acx config hangover");
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
if (!acx) {
ret = -ENOMEM;
goto out;
}
acx->recover_time = cpu_to_le32(conf->recover_time);
acx->hangover_period = conf->hangover_period;
acx->dynamic_mode = conf->dynamic_mode;
acx->early_termination_mode = conf->early_termination_mode;
acx->max_period = conf->max_period;
acx->min_period = conf->min_period;
acx->increase_delta = conf->increase_delta;
acx->decrease_delta = conf->decrease_delta;
acx->quiet_time = conf->quiet_time;
acx->increase_time = conf->increase_time;
acx->window_size = conf->window_size;
ret = wl1271_cmd_configure(wl, ACX_CONFIG_HANGOVER, acx,
sizeof(*acx));
if (ret < 0) {
wl1271_warning("acx config hangover failed: %d", ret);
goto out;
}
out:
kfree(acx);
return ret;
}
int wlcore_acx_average_rssi(struct wl1271 *wl, struct wl12xx_vif *wlvif,
s8 *avg_rssi)
{
struct acx_roaming_stats *acx;
int ret = 0;
wl1271_debug(DEBUG_ACX, "acx roaming statistics");
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
if (!acx) {
ret = -ENOMEM;
goto out;
}
acx->role_id = wlvif->role_id;
ret = wl1271_cmd_interrogate(wl, ACX_ROAMING_STATISTICS_TBL,
acx, sizeof(*acx), sizeof(*acx));
if (ret < 0) {
wl1271_warning("acx roaming statistics failed: %d", ret);
ret = -ENOMEM;
goto out;
}
*avg_rssi = acx->rssi_beacon;
out:
kfree(acx);
return ret;
}
#ifdef CONFIG_PM
/* Set the global behaviour of RX filters - On/Off + default action */
int wl1271_acx_default_rx_filter_enable(struct wl1271 *wl, bool enable,
enum rx_filter_action action)
{
struct acx_default_rx_filter *acx;
int ret;
wl1271_debug(DEBUG_ACX, "acx default rx filter en: %d act: %d",
enable, action);
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
if (!acx)
return -ENOMEM;
acx->enable = enable;
acx->default_action = action;
ret = wl1271_cmd_configure(wl, ACX_ENABLE_RX_DATA_FILTER, acx,
sizeof(*acx));
if (ret < 0) {
wl1271_warning("acx default rx filter enable failed: %d", ret);
goto out;
}
out:
kfree(acx);
return ret;
}
/* Configure or disable a specific RX filter pattern */
int wl1271_acx_set_rx_filter(struct wl1271 *wl, u8 index, bool enable,
struct wl12xx_rx_filter *filter)
{
struct acx_rx_filter_cfg *acx;
int fields_size = 0;
int acx_size;
int ret;
WARN_ON(enable && !filter);
WARN_ON(index >= WL1271_MAX_RX_FILTERS);
wl1271_debug(DEBUG_ACX,
"acx set rx filter idx: %d enable: %d filter: %p",
index, enable, filter);
if (enable) {
fields_size = wl1271_rx_filter_get_fields_size(filter);
wl1271_debug(DEBUG_ACX, "act: %d num_fields: %d field_size: %d",
filter->action, filter->num_fields, fields_size);
}
acx_size = ALIGN(sizeof(*acx) + fields_size, 4);
acx = kzalloc(acx_size, GFP_KERNEL);
if (!acx)
return -ENOMEM;
acx->enable = enable;
acx->index = index;
if (enable) {
acx->num_fields = filter->num_fields;
acx->action = filter->action;
wl1271_rx_filter_flatten_fields(filter, acx->fields);
}
wl1271_dump(DEBUG_ACX, "RX_FILTER: ", acx, acx_size);
ret = wl1271_cmd_configure(wl, ACX_SET_RX_DATA_FILTER, acx, acx_size);
if (ret < 0) {
wl1271_warning("setting rx filter failed: %d", ret);
goto out;
}
out:
kfree(acx);
return ret;
}
#endif /* CONFIG_PM */
|
linux-master
|
drivers/net/wireless/ti/wlcore/acx.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* This file is part of wl1271
*
* Copyright (C) 2009-2010 Nokia Corporation
*
* Contact: Luciano Coelho <[email protected]>
*/
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/platform_device.h>
#include <linux/mmc/sdio.h>
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/sdio_ids.h>
#include <linux/mmc/card.h>
#include <linux/mmc/host.h>
#include <linux/gpio.h>
#include <linux/pm_runtime.h>
#include <linux/printk.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include "wlcore.h"
#include "wl12xx_80211.h"
#include "io.h"
static bool dump;
struct wl12xx_sdio_glue {
struct device *dev;
struct platform_device *core;
};
static const struct sdio_device_id wl1271_devices[] = {
{ SDIO_DEVICE(SDIO_VENDOR_ID_TI, SDIO_DEVICE_ID_TI_WL1271) },
{}
};
MODULE_DEVICE_TABLE(sdio, wl1271_devices);
static void wl1271_sdio_set_block_size(struct device *child,
unsigned int blksz)
{
struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent);
struct sdio_func *func = dev_to_sdio_func(glue->dev);
sdio_claim_host(func);
sdio_set_block_size(func, blksz);
sdio_release_host(func);
}
static int __must_check wl12xx_sdio_raw_read(struct device *child, int addr,
void *buf, size_t len, bool fixed)
{
int ret;
struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent);
struct sdio_func *func = dev_to_sdio_func(glue->dev);
sdio_claim_host(func);
if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG)) {
((u8 *)buf)[0] = sdio_f0_readb(func, addr, &ret);
dev_dbg(child->parent, "sdio read 52 addr 0x%x, byte 0x%02x\n",
addr, ((u8 *)buf)[0]);
} else {
if (fixed)
ret = sdio_readsb(func, buf, addr, len);
else
ret = sdio_memcpy_fromio(func, buf, addr, len);
dev_dbg(child->parent, "sdio read 53 addr 0x%x, %zu bytes\n",
addr, len);
}
sdio_release_host(func);
if (WARN_ON(ret))
dev_err(child->parent, "sdio read failed (%d)\n", ret);
if (unlikely(dump)) {
printk(KERN_DEBUG "wlcore_sdio: READ from 0x%04x\n", addr);
print_hex_dump(KERN_DEBUG, "wlcore_sdio: READ ",
DUMP_PREFIX_OFFSET, 16, 1,
buf, len, false);
}
return ret;
}
static int __must_check wl12xx_sdio_raw_write(struct device *child, int addr,
void *buf, size_t len, bool fixed)
{
int ret;
struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent);
struct sdio_func *func = dev_to_sdio_func(glue->dev);
sdio_claim_host(func);
if (unlikely(dump)) {
printk(KERN_DEBUG "wlcore_sdio: WRITE to 0x%04x\n", addr);
print_hex_dump(KERN_DEBUG, "wlcore_sdio: WRITE ",
DUMP_PREFIX_OFFSET, 16, 1,
buf, len, false);
}
if (unlikely(addr == HW_ACCESS_ELP_CTRL_REG)) {
sdio_f0_writeb(func, ((u8 *)buf)[0], addr, &ret);
dev_dbg(child->parent, "sdio write 52 addr 0x%x, byte 0x%02x\n",
addr, ((u8 *)buf)[0]);
} else {
dev_dbg(child->parent, "sdio write 53 addr 0x%x, %zu bytes\n",
addr, len);
if (fixed)
ret = sdio_writesb(func, addr, buf, len);
else
ret = sdio_memcpy_toio(func, addr, buf, len);
}
sdio_release_host(func);
if (WARN_ON(ret))
dev_err(child->parent, "sdio write failed (%d)\n", ret);
return ret;
}
static int wl12xx_sdio_power_on(struct wl12xx_sdio_glue *glue)
{
int ret;
struct sdio_func *func = dev_to_sdio_func(glue->dev);
struct mmc_card *card = func->card;
ret = pm_runtime_resume_and_get(&card->dev);
if (ret < 0) {
dev_err(glue->dev, "%s: failed to get_sync(%d)\n",
__func__, ret);
return ret;
}
sdio_claim_host(func);
/*
* To guarantee that the SDIO card is power cycled, as required to make
* the FW programming to succeed, let's do a brute force HW reset.
*/
mmc_hw_reset(card);
sdio_enable_func(func);
sdio_release_host(func);
return 0;
}
static int wl12xx_sdio_power_off(struct wl12xx_sdio_glue *glue)
{
struct sdio_func *func = dev_to_sdio_func(glue->dev);
struct mmc_card *card = func->card;
sdio_claim_host(func);
sdio_disable_func(func);
sdio_release_host(func);
/* Let runtime PM know the card is powered off */
pm_runtime_put(&card->dev);
return 0;
}
static int wl12xx_sdio_set_power(struct device *child, bool enable)
{
struct wl12xx_sdio_glue *glue = dev_get_drvdata(child->parent);
if (enable)
return wl12xx_sdio_power_on(glue);
else
return wl12xx_sdio_power_off(glue);
}
static struct wl1271_if_operations sdio_ops = {
.read = wl12xx_sdio_raw_read,
.write = wl12xx_sdio_raw_write,
.power = wl12xx_sdio_set_power,
.set_block_size = wl1271_sdio_set_block_size,
};
#ifdef CONFIG_OF
static const struct wilink_family_data wl127x_data = {
.name = "wl127x",
.nvs_name = "ti-connectivity/wl127x-nvs.bin",
};
static const struct wilink_family_data wl128x_data = {
.name = "wl128x",
.nvs_name = "ti-connectivity/wl128x-nvs.bin",
};
static const struct wilink_family_data wl18xx_data = {
.name = "wl18xx",
.cfg_name = "ti-connectivity/wl18xx-conf.bin",
.nvs_name = "ti-connectivity/wl1271-nvs.bin",
};
static const struct of_device_id wlcore_sdio_of_match_table[] = {
{ .compatible = "ti,wl1271", .data = &wl127x_data },
{ .compatible = "ti,wl1273", .data = &wl127x_data },
{ .compatible = "ti,wl1281", .data = &wl128x_data },
{ .compatible = "ti,wl1283", .data = &wl128x_data },
{ .compatible = "ti,wl1285", .data = &wl128x_data },
{ .compatible = "ti,wl1801", .data = &wl18xx_data },
{ .compatible = "ti,wl1805", .data = &wl18xx_data },
{ .compatible = "ti,wl1807", .data = &wl18xx_data },
{ .compatible = "ti,wl1831", .data = &wl18xx_data },
{ .compatible = "ti,wl1835", .data = &wl18xx_data },
{ .compatible = "ti,wl1837", .data = &wl18xx_data },
{ }
};
static int wlcore_probe_of(struct device *dev, int *irq, int *wakeirq,
struct wlcore_platdev_data *pdev_data)
{
struct device_node *np = dev->of_node;
const struct of_device_id *of_id;
of_id = of_match_node(wlcore_sdio_of_match_table, np);
if (!of_id)
return -ENODEV;
pdev_data->family = of_id->data;
*irq = irq_of_parse_and_map(np, 0);
if (!*irq) {
dev_err(dev, "No irq in platform data\n");
return -EINVAL;
}
*wakeirq = irq_of_parse_and_map(np, 1);
/* optional clock frequency params */
of_property_read_u32(np, "ref-clock-frequency",
&pdev_data->ref_clock_freq);
of_property_read_u32(np, "tcxo-clock-frequency",
&pdev_data->tcxo_clock_freq);
return 0;
}
#else
static int wlcore_probe_of(struct device *dev, int *irq, int *wakeirq,
struct wlcore_platdev_data *pdev_data)
{
return -ENODATA;
}
#endif
static int wl1271_probe(struct sdio_func *func,
const struct sdio_device_id *id)
{
struct wlcore_platdev_data *pdev_data;
struct wl12xx_sdio_glue *glue;
struct resource res[2];
mmc_pm_flag_t mmcflags;
int ret = -ENOMEM;
int irq, wakeirq, num_irqs;
const char *chip_family;
/* We are only able to handle the wlan function */
if (func->num != 0x02)
return -ENODEV;
pdev_data = devm_kzalloc(&func->dev, sizeof(*pdev_data), GFP_KERNEL);
if (!pdev_data)
return -ENOMEM;
pdev_data->if_ops = &sdio_ops;
glue = devm_kzalloc(&func->dev, sizeof(*glue), GFP_KERNEL);
if (!glue)
return -ENOMEM;
glue->dev = &func->dev;
/* Grab access to FN0 for ELP reg. */
func->card->quirks |= MMC_QUIRK_LENIENT_FN0;
/* Use block mode for transferring over one block size of data */
func->card->quirks |= MMC_QUIRK_BLKSZ_FOR_BYTE_MODE;
ret = wlcore_probe_of(&func->dev, &irq, &wakeirq, pdev_data);
if (ret)
goto out;
/* if sdio can keep power while host is suspended, enable wow */
mmcflags = sdio_get_host_pm_caps(func);
dev_dbg(glue->dev, "sdio PM caps = 0x%x\n", mmcflags);
if (mmcflags & MMC_PM_KEEP_POWER)
pdev_data->pwr_in_suspend = true;
sdio_set_drvdata(func, glue);
/* Tell PM core that we don't need the card to be powered now */
pm_runtime_put_noidle(&func->dev);
/*
* Due to a hardware bug, we can't differentiate wl18xx from
* wl12xx, because both report the same device ID. The only
* way to differentiate is by checking the SDIO revision,
* which is 3.00 on the wl18xx chips.
*/
if (func->card->cccr.sdio_vsn == SDIO_SDIO_REV_3_00)
chip_family = "wl18xx";
else
chip_family = "wl12xx";
glue->core = platform_device_alloc(chip_family, PLATFORM_DEVID_AUTO);
if (!glue->core) {
dev_err(glue->dev, "can't allocate platform_device");
ret = -ENOMEM;
goto out;
}
glue->core->dev.parent = &func->dev;
memset(res, 0x00, sizeof(res));
res[0].start = irq;
res[0].flags = IORESOURCE_IRQ |
irqd_get_trigger_type(irq_get_irq_data(irq));
res[0].name = "irq";
if (wakeirq > 0) {
res[1].start = wakeirq;
res[1].flags = IORESOURCE_IRQ |
irqd_get_trigger_type(irq_get_irq_data(wakeirq));
res[1].name = "wakeirq";
num_irqs = 2;
} else {
num_irqs = 1;
}
ret = platform_device_add_resources(glue->core, res, num_irqs);
if (ret) {
dev_err(glue->dev, "can't add resources\n");
goto out_dev_put;
}
ret = platform_device_add_data(glue->core, pdev_data,
sizeof(*pdev_data));
if (ret) {
dev_err(glue->dev, "can't add platform data\n");
goto out_dev_put;
}
ret = platform_device_add(glue->core);
if (ret) {
dev_err(glue->dev, "can't add platform device\n");
goto out_dev_put;
}
return 0;
out_dev_put:
platform_device_put(glue->core);
out:
return ret;
}
static void wl1271_remove(struct sdio_func *func)
{
struct wl12xx_sdio_glue *glue = sdio_get_drvdata(func);
/* Undo decrement done above in wl1271_probe */
pm_runtime_get_noresume(&func->dev);
platform_device_unregister(glue->core);
}
#ifdef CONFIG_PM
static int wl1271_suspend(struct device *dev)
{
/* Tell MMC/SDIO core it's OK to power down the card
* (if it isn't already), but not to remove it completely */
struct sdio_func *func = dev_to_sdio_func(dev);
struct wl12xx_sdio_glue *glue = sdio_get_drvdata(func);
struct wl1271 *wl = platform_get_drvdata(glue->core);
mmc_pm_flag_t sdio_flags;
int ret = 0;
if (!wl) {
dev_err(dev, "no wilink module was probed\n");
goto out;
}
dev_dbg(dev, "wl1271 suspend. wow_enabled: %d\n",
wl->wow_enabled);
/* check whether sdio should keep power */
if (wl->wow_enabled) {
sdio_flags = sdio_get_host_pm_caps(func);
if (!(sdio_flags & MMC_PM_KEEP_POWER)) {
dev_err(dev, "can't keep power while host "
"is suspended\n");
ret = -EINVAL;
goto out;
}
/* keep power while host suspended */
ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER);
if (ret) {
dev_err(dev, "error while trying to keep power\n");
goto out;
}
}
out:
return ret;
}
static int wl1271_resume(struct device *dev)
{
dev_dbg(dev, "wl1271 resume\n");
return 0;
}
static const struct dev_pm_ops wl1271_sdio_pm_ops = {
.suspend = wl1271_suspend,
.resume = wl1271_resume,
};
#endif
static struct sdio_driver wl1271_sdio_driver = {
.name = "wl1271_sdio",
.id_table = wl1271_devices,
.probe = wl1271_probe,
.remove = wl1271_remove,
#ifdef CONFIG_PM
.drv = {
.pm = &wl1271_sdio_pm_ops,
},
#endif
};
module_sdio_driver(wl1271_sdio_driver);
module_param(dump, bool, 0600);
MODULE_PARM_DESC(dump, "Enable sdio read/write dumps.");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Luciano Coelho <[email protected]>");
MODULE_AUTHOR("Juuso Oikarinen <[email protected]>");
|
linux-master
|
drivers/net/wireless/ti/wlcore/sdio.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* This file is part of wl1271
*
* Copyright (C) 2009 Nokia Corporation
*
* Contact: Luciano Coelho <[email protected]>
*/
#include <linux/gfp.h>
#include <linux/sched.h>
#include "wlcore.h"
#include "debug.h"
#include "acx.h"
#include "rx.h"
#include "tx.h"
#include "io.h"
#include "hw_ops.h"
/*
* TODO: this is here just for now, it must be removed when the data
* operations are in place.
*/
#include "../wl12xx/reg.h"
static u32 wlcore_rx_get_buf_size(struct wl1271 *wl,
u32 rx_pkt_desc)
{
if (wl->quirks & WLCORE_QUIRK_RX_BLOCKSIZE_ALIGN)
return (rx_pkt_desc & ALIGNED_RX_BUF_SIZE_MASK) >>
ALIGNED_RX_BUF_SIZE_SHIFT;
return (rx_pkt_desc & RX_BUF_SIZE_MASK) >> RX_BUF_SIZE_SHIFT_DIV;
}
static u32 wlcore_rx_get_align_buf_size(struct wl1271 *wl, u32 pkt_len)
{
if (wl->quirks & WLCORE_QUIRK_RX_BLOCKSIZE_ALIGN)
return ALIGN(pkt_len, WL12XX_BUS_BLOCK_SIZE);
return pkt_len;
}
static void wl1271_rx_status(struct wl1271 *wl,
struct wl1271_rx_descriptor *desc,
struct ieee80211_rx_status *status,
u8 beacon, u8 probe_rsp)
{
memset(status, 0, sizeof(struct ieee80211_rx_status));
if ((desc->flags & WL1271_RX_DESC_BAND_MASK) == WL1271_RX_DESC_BAND_BG)
status->band = NL80211_BAND_2GHZ;
else
status->band = NL80211_BAND_5GHZ;
status->rate_idx = wlcore_rate_to_idx(wl, desc->rate, status->band);
/* 11n support */
if (desc->rate <= wl->hw_min_ht_rate)
status->encoding = RX_ENC_HT;
/*
* Read the signal level and antenna diversity indication.
* The msb in the signal level is always set as it is a
* negative number.
* The antenna indication is the msb of the rssi.
*/
status->signal = ((desc->rssi & RSSI_LEVEL_BITMASK) | BIT(7));
status->antenna = ((desc->rssi & ANT_DIVERSITY_BITMASK) >> 7);
/*
* FIXME: In wl1251, the SNR should be divided by two. In wl1271 we
* need to divide by two for now, but TI has been discussing about
* changing it. This needs to be rechecked.
*/
wl->noise = desc->rssi - (desc->snr >> 1);
status->freq = ieee80211_channel_to_frequency(desc->channel,
status->band);
if (desc->flags & WL1271_RX_DESC_ENCRYPT_MASK) {
u8 desc_err_code = desc->status & WL1271_RX_DESC_STATUS_MASK;
status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED |
RX_FLAG_DECRYPTED;
if (unlikely(desc_err_code & WL1271_RX_DESC_MIC_FAIL)) {
status->flag |= RX_FLAG_MMIC_ERROR;
wl1271_warning("Michael MIC error. Desc: 0x%x",
desc_err_code);
}
}
if (beacon || probe_rsp)
status->boottime_ns = ktime_get_boottime_ns();
if (beacon)
wlcore_set_pending_regdomain_ch(wl, (u16)desc->channel,
status->band);
}
static int wl1271_rx_handle_data(struct wl1271 *wl, u8 *data, u32 length,
enum wl_rx_buf_align rx_align, u8 *hlid)
{
struct wl1271_rx_descriptor *desc;
struct sk_buff *skb;
struct ieee80211_hdr *hdr;
u8 beacon = 0;
u8 is_data = 0;
u8 reserved = 0, offset_to_data = 0;
u16 seq_num;
u32 pkt_data_len;
/*
* In PLT mode we seem to get frames and mac80211 warns about them,
* workaround this by not retrieving them at all.
*/
if (unlikely(wl->plt))
return -EINVAL;
pkt_data_len = wlcore_hw_get_rx_packet_len(wl, data, length);
if (!pkt_data_len) {
wl1271_error("Invalid packet arrived from HW. length %d",
length);
return -EINVAL;
}
if (rx_align == WLCORE_RX_BUF_UNALIGNED)
reserved = RX_BUF_ALIGN;
else if (rx_align == WLCORE_RX_BUF_PADDED)
offset_to_data = RX_BUF_ALIGN;
/* the data read starts with the descriptor */
desc = (struct wl1271_rx_descriptor *) data;
if (desc->packet_class == WL12XX_RX_CLASS_LOGGER) {
size_t len = length - sizeof(*desc);
wl12xx_copy_fwlog(wl, data + sizeof(*desc), len);
return 0;
}
/* discard corrupted packets */
if (desc->status & WL1271_RX_DESC_DECRYPT_FAIL) {
hdr = (void *)(data + sizeof(*desc) + offset_to_data);
wl1271_warning("corrupted packet in RX: status: 0x%x len: %d",
desc->status & WL1271_RX_DESC_STATUS_MASK,
pkt_data_len);
wl1271_dump((DEBUG_RX|DEBUG_CMD), "PKT: ", data + sizeof(*desc),
min(pkt_data_len,
ieee80211_hdrlen(hdr->frame_control)));
return -EINVAL;
}
/* skb length not including rx descriptor */
skb = __dev_alloc_skb(pkt_data_len + reserved, GFP_KERNEL);
if (!skb) {
wl1271_error("Couldn't allocate RX frame");
return -ENOMEM;
}
/* reserve the unaligned payload(if any) */
skb_reserve(skb, reserved);
/*
* Copy packets from aggregation buffer to the skbs without rx
* descriptor and with packet payload aligned care. In case of unaligned
* packets copy the packets in offset of 2 bytes guarantee IP header
* payload aligned to 4 bytes.
*/
skb_put_data(skb, data + sizeof(*desc), pkt_data_len);
if (rx_align == WLCORE_RX_BUF_PADDED)
skb_pull(skb, RX_BUF_ALIGN);
*hlid = desc->hlid;
hdr = (struct ieee80211_hdr *)skb->data;
if (ieee80211_is_beacon(hdr->frame_control))
beacon = 1;
if (ieee80211_is_data_present(hdr->frame_control))
is_data = 1;
wl1271_rx_status(wl, desc, IEEE80211_SKB_RXCB(skb), beacon,
ieee80211_is_probe_resp(hdr->frame_control));
wlcore_hw_set_rx_csum(wl, desc, skb);
seq_num = (le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_SEQ) >> 4;
wl1271_debug(DEBUG_RX, "rx skb 0x%p: %d B %s seq %d hlid %d", skb,
skb->len - desc->pad_len,
beacon ? "beacon" : "",
seq_num, *hlid);
skb_queue_tail(&wl->deferred_rx_queue, skb);
queue_work(wl->freezable_wq, &wl->netstack_work);
return is_data;
}
int wlcore_rx(struct wl1271 *wl, struct wl_fw_status *status)
{
unsigned long active_hlids[BITS_TO_LONGS(WLCORE_MAX_LINKS)] = {0};
u32 buf_size;
u32 fw_rx_counter = status->fw_rx_counter % wl->num_rx_desc;
u32 drv_rx_counter = wl->rx_counter % wl->num_rx_desc;
u32 rx_counter;
u32 pkt_len, align_pkt_len;
u32 pkt_offset, des;
u8 hlid;
enum wl_rx_buf_align rx_align;
int ret = 0;
/* update rates per link */
hlid = status->counters.hlid;
if (hlid < WLCORE_MAX_LINKS)
wl->links[hlid].fw_rate_mbps =
status->counters.tx_last_rate_mbps;
while (drv_rx_counter != fw_rx_counter) {
buf_size = 0;
rx_counter = drv_rx_counter;
while (rx_counter != fw_rx_counter) {
des = le32_to_cpu(status->rx_pkt_descs[rx_counter]);
pkt_len = wlcore_rx_get_buf_size(wl, des);
align_pkt_len = wlcore_rx_get_align_buf_size(wl,
pkt_len);
if (buf_size + align_pkt_len > wl->aggr_buf_size)
break;
buf_size += align_pkt_len;
rx_counter++;
rx_counter %= wl->num_rx_desc;
}
if (buf_size == 0) {
wl1271_warning("received empty data");
break;
}
/* Read all available packets at once */
des = le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]);
ret = wlcore_hw_prepare_read(wl, des, buf_size);
if (ret < 0)
goto out;
ret = wlcore_read_data(wl, REG_SLV_MEM_DATA, wl->aggr_buf,
buf_size, true);
if (ret < 0)
goto out;
/* Split data into separate packets */
pkt_offset = 0;
while (pkt_offset < buf_size) {
des = le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]);
pkt_len = wlcore_rx_get_buf_size(wl, des);
rx_align = wlcore_hw_get_rx_buf_align(wl, des);
/*
* the handle data call can only fail in memory-outage
* conditions, in that case the received frame will just
* be dropped.
*/
if (wl1271_rx_handle_data(wl,
wl->aggr_buf + pkt_offset,
pkt_len, rx_align,
&hlid) == 1) {
if (hlid < wl->num_links)
__set_bit(hlid, active_hlids);
else
WARN(1,
"hlid (%d) exceeded MAX_LINKS\n",
hlid);
}
wl->rx_counter++;
drv_rx_counter++;
drv_rx_counter %= wl->num_rx_desc;
pkt_offset += wlcore_rx_get_align_buf_size(wl, pkt_len);
}
}
/*
* Write the driver's packet counter to the FW. This is only required
* for older hardware revisions
*/
if (wl->quirks & WLCORE_QUIRK_END_OF_TRANSACTION) {
ret = wlcore_write32(wl, WL12XX_REG_RX_DRIVER_COUNTER,
wl->rx_counter);
if (ret < 0)
goto out;
}
wl12xx_rearm_rx_streaming(wl, active_hlids);
out:
return ret;
}
#ifdef CONFIG_PM
int wl1271_rx_filter_enable(struct wl1271 *wl,
int index, bool enable,
struct wl12xx_rx_filter *filter)
{
int ret;
if (!!test_bit(index, wl->rx_filter_enabled) == enable) {
wl1271_warning("Request to enable an already "
"enabled rx filter %d", index);
return 0;
}
ret = wl1271_acx_set_rx_filter(wl, index, enable, filter);
if (ret) {
wl1271_error("Failed to %s rx data filter %d (err=%d)",
enable ? "enable" : "disable", index, ret);
return ret;
}
if (enable)
__set_bit(index, wl->rx_filter_enabled);
else
__clear_bit(index, wl->rx_filter_enabled);
return 0;
}
int wl1271_rx_filter_clear_all(struct wl1271 *wl)
{
int i, ret = 0;
for (i = 0; i < WL1271_MAX_RX_FILTERS; i++) {
if (!test_bit(i, wl->rx_filter_enabled))
continue;
ret = wl1271_rx_filter_enable(wl, i, 0, NULL);
if (ret)
goto out;
}
out:
return ret;
}
#endif /* CONFIG_PM */
|
linux-master
|
drivers/net/wireless/ti/wlcore/rx.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* This file is part of wlcore
*
* Copyright (C) 2013 Texas Instruments Inc.
*/
#include <linux/pm_runtime.h>
#include "acx.h"
#include "wlcore.h"
#include "debug.h"
#include "sysfs.h"
static ssize_t bt_coex_state_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct wl1271 *wl = dev_get_drvdata(dev);
ssize_t len;
len = PAGE_SIZE;
mutex_lock(&wl->mutex);
len = snprintf(buf, len, "%d\n\n0 - off\n1 - on\n",
wl->sg_enabled);
mutex_unlock(&wl->mutex);
return len;
}
static ssize_t bt_coex_state_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct wl1271 *wl = dev_get_drvdata(dev);
unsigned long res;
int ret;
ret = kstrtoul(buf, 10, &res);
if (ret < 0) {
wl1271_warning("incorrect value written to bt_coex_mode");
return count;
}
mutex_lock(&wl->mutex);
res = !!res;
if (res == wl->sg_enabled)
goto out;
wl->sg_enabled = res;
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
ret = pm_runtime_resume_and_get(wl->dev);
if (ret < 0)
goto out;
wl1271_acx_sg_enable(wl, wl->sg_enabled);
pm_runtime_mark_last_busy(wl->dev);
pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
return count;
}
static DEVICE_ATTR_RW(bt_coex_state);
static ssize_t hw_pg_ver_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct wl1271 *wl = dev_get_drvdata(dev);
ssize_t len;
len = PAGE_SIZE;
mutex_lock(&wl->mutex);
if (wl->hw_pg_ver >= 0)
len = snprintf(buf, len, "%d\n", wl->hw_pg_ver);
else
len = snprintf(buf, len, "n/a\n");
mutex_unlock(&wl->mutex);
return len;
}
static DEVICE_ATTR_RO(hw_pg_ver);
static ssize_t wl1271_sysfs_read_fwlog(struct file *filp, struct kobject *kobj,
struct bin_attribute *bin_attr,
char *buffer, loff_t pos, size_t count)
{
struct device *dev = kobj_to_dev(kobj);
struct wl1271 *wl = dev_get_drvdata(dev);
ssize_t len;
int ret;
ret = mutex_lock_interruptible(&wl->mutex);
if (ret < 0)
return -ERESTARTSYS;
/* Check if the fwlog is still valid */
if (wl->fwlog_size < 0) {
mutex_unlock(&wl->mutex);
return 0;
}
/* Seeking is not supported - old logs are not kept. Disregard pos. */
len = min_t(size_t, count, wl->fwlog_size);
wl->fwlog_size -= len;
memcpy(buffer, wl->fwlog, len);
/* Make room for new messages */
memmove(wl->fwlog, wl->fwlog + len, wl->fwlog_size);
mutex_unlock(&wl->mutex);
return len;
}
static const struct bin_attribute fwlog_attr = {
.attr = { .name = "fwlog", .mode = 0400 },
.read = wl1271_sysfs_read_fwlog,
};
int wlcore_sysfs_init(struct wl1271 *wl)
{
int ret;
/* Create sysfs file to control bt coex state */
ret = device_create_file(wl->dev, &dev_attr_bt_coex_state);
if (ret < 0) {
wl1271_error("failed to create sysfs file bt_coex_state");
goto out;
}
/* Create sysfs file to get HW PG version */
ret = device_create_file(wl->dev, &dev_attr_hw_pg_ver);
if (ret < 0) {
wl1271_error("failed to create sysfs file hw_pg_ver");
goto out_bt_coex_state;
}
/* Create sysfs file for the FW log */
ret = device_create_bin_file(wl->dev, &fwlog_attr);
if (ret < 0) {
wl1271_error("failed to create sysfs file fwlog");
goto out_hw_pg_ver;
}
goto out;
out_hw_pg_ver:
device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
out_bt_coex_state:
device_remove_file(wl->dev, &dev_attr_bt_coex_state);
out:
return ret;
}
void wlcore_sysfs_free(struct wl1271 *wl)
{
device_remove_bin_file(wl->dev, &fwlog_attr);
device_remove_file(wl->dev, &dev_attr_hw_pg_ver);
device_remove_file(wl->dev, &dev_attr_bt_coex_state);
}
|
linux-master
|
drivers/net/wireless/ti/wlcore/sysfs.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* This file is part of wl1271
*
* Copyright (C) 2008-2009 Nokia Corporation
*
* Contact: Luciano Coelho <[email protected]>
*/
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/swab.h>
#include <linux/crc7.h>
#include <linux/spi/spi.h>
#include <linux/platform_device.h>
#include <linux/of_irq.h>
#include <linux/regulator/consumer.h>
#include "wlcore.h"
#include "wl12xx_80211.h"
#include "io.h"
#define WSPI_CMD_READ 0x40000000
#define WSPI_CMD_WRITE 0x00000000
#define WSPI_CMD_FIXED 0x20000000
#define WSPI_CMD_BYTE_LENGTH 0x1FFE0000
#define WSPI_CMD_BYTE_LENGTH_OFFSET 17
#define WSPI_CMD_BYTE_ADDR 0x0001FFFF
#define WSPI_INIT_CMD_CRC_LEN 5
#define WSPI_INIT_CMD_START 0x00
#define WSPI_INIT_CMD_TX 0x40
/* the extra bypass bit is sampled by the TNET as '1' */
#define WSPI_INIT_CMD_BYPASS_BIT 0x80
#define WSPI_INIT_CMD_FIXEDBUSY_LEN 0x07
#define WSPI_INIT_CMD_EN_FIXEDBUSY 0x80
#define WSPI_INIT_CMD_DIS_FIXEDBUSY 0x00
#define WSPI_INIT_CMD_IOD 0x40
#define WSPI_INIT_CMD_IP 0x20
#define WSPI_INIT_CMD_CS 0x10
#define WSPI_INIT_CMD_WS 0x08
#define WSPI_INIT_CMD_WSPI 0x01
#define WSPI_INIT_CMD_END 0x01
#define WSPI_INIT_CMD_LEN 8
#define HW_ACCESS_WSPI_FIXED_BUSY_LEN \
((WL1271_BUSY_WORD_LEN - 4) / sizeof(u32))
#define HW_ACCESS_WSPI_INIT_CMD_MASK 0
/* HW limitation: maximum possible chunk size is 4095 bytes */
#define WSPI_MAX_CHUNK_SIZE 4092
/*
* wl18xx driver aggregation buffer size is (13 * 4K) compared to
* (4 * 4K) for wl12xx, so use the larger buffer needed for wl18xx
*/
#define SPI_AGGR_BUFFER_SIZE (13 * SZ_4K)
/* Maximum number of SPI write chunks */
#define WSPI_MAX_NUM_OF_CHUNKS \
((SPI_AGGR_BUFFER_SIZE / WSPI_MAX_CHUNK_SIZE) + 1)
static const struct wilink_family_data wl127x_data = {
.name = "wl127x",
.nvs_name = "ti-connectivity/wl127x-nvs.bin",
};
static const struct wilink_family_data wl128x_data = {
.name = "wl128x",
.nvs_name = "ti-connectivity/wl128x-nvs.bin",
};
static const struct wilink_family_data wl18xx_data = {
.name = "wl18xx",
.cfg_name = "ti-connectivity/wl18xx-conf.bin",
.nvs_name = "ti-connectivity/wl1271-nvs.bin",
};
struct wl12xx_spi_glue {
struct device *dev;
struct platform_device *core;
struct regulator *reg; /* Power regulator */
};
static void wl12xx_spi_reset(struct device *child)
{
struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
u8 *cmd;
struct spi_transfer t;
struct spi_message m;
cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL);
if (!cmd) {
dev_err(child->parent,
"could not allocate cmd for spi reset\n");
return;
}
memset(&t, 0, sizeof(t));
spi_message_init(&m);
memset(cmd, 0xff, WSPI_INIT_CMD_LEN);
t.tx_buf = cmd;
t.len = WSPI_INIT_CMD_LEN;
spi_message_add_tail(&t, &m);
spi_sync(to_spi_device(glue->dev), &m);
kfree(cmd);
}
static void wl12xx_spi_init(struct device *child)
{
struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
struct spi_transfer t;
struct spi_message m;
struct spi_device *spi = to_spi_device(glue->dev);
u8 *cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL);
if (!cmd) {
dev_err(child->parent,
"could not allocate cmd for spi init\n");
return;
}
memset(&t, 0, sizeof(t));
spi_message_init(&m);
/*
* Set WSPI_INIT_COMMAND
* the data is being send from the MSB to LSB
*/
cmd[0] = 0xff;
cmd[1] = 0xff;
cmd[2] = WSPI_INIT_CMD_START | WSPI_INIT_CMD_TX;
cmd[3] = 0;
cmd[4] = 0;
cmd[5] = HW_ACCESS_WSPI_INIT_CMD_MASK << 3;
cmd[5] |= HW_ACCESS_WSPI_FIXED_BUSY_LEN & WSPI_INIT_CMD_FIXEDBUSY_LEN;
cmd[6] = WSPI_INIT_CMD_IOD | WSPI_INIT_CMD_IP | WSPI_INIT_CMD_CS
| WSPI_INIT_CMD_WSPI | WSPI_INIT_CMD_WS;
if (HW_ACCESS_WSPI_FIXED_BUSY_LEN == 0)
cmd[6] |= WSPI_INIT_CMD_DIS_FIXEDBUSY;
else
cmd[6] |= WSPI_INIT_CMD_EN_FIXEDBUSY;
cmd[7] = crc7_be(0, cmd+2, WSPI_INIT_CMD_CRC_LEN) | WSPI_INIT_CMD_END;
/*
* The above is the logical order; it must actually be stored
* in the buffer byte-swapped.
*/
__swab32s((u32 *)cmd);
__swab32s((u32 *)cmd+1);
t.tx_buf = cmd;
t.len = WSPI_INIT_CMD_LEN;
spi_message_add_tail(&t, &m);
spi_sync(to_spi_device(glue->dev), &m);
/* Send extra clocks with inverted CS (high). this is required
* by the wilink family in order to successfully enter WSPI mode.
*/
spi->mode ^= SPI_CS_HIGH;
memset(&m, 0, sizeof(m));
spi_message_init(&m);
cmd[0] = 0xff;
cmd[1] = 0xff;
cmd[2] = 0xff;
cmd[3] = 0xff;
__swab32s((u32 *)cmd);
t.tx_buf = cmd;
t.len = 4;
spi_message_add_tail(&t, &m);
spi_sync(to_spi_device(glue->dev), &m);
/* Restore chip select configuration to normal */
spi->mode ^= SPI_CS_HIGH;
kfree(cmd);
}
#define WL1271_BUSY_WORD_TIMEOUT 1000
static int wl12xx_spi_read_busy(struct device *child)
{
struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
struct wl1271 *wl = dev_get_drvdata(child);
struct spi_transfer t[1];
struct spi_message m;
u32 *busy_buf;
int num_busy_bytes = 0;
/*
* Read further busy words from SPI until a non-busy word is
* encountered, then read the data itself into the buffer.
*/
num_busy_bytes = WL1271_BUSY_WORD_TIMEOUT;
busy_buf = wl->buffer_busyword;
while (num_busy_bytes) {
num_busy_bytes--;
spi_message_init(&m);
memset(t, 0, sizeof(t));
t[0].rx_buf = busy_buf;
t[0].len = sizeof(u32);
t[0].cs_change = true;
spi_message_add_tail(&t[0], &m);
spi_sync(to_spi_device(glue->dev), &m);
if (*busy_buf & 0x1)
return 0;
}
/* The SPI bus is unresponsive, the read failed. */
dev_err(child->parent, "SPI read busy-word timeout!\n");
return -ETIMEDOUT;
}
static int __must_check wl12xx_spi_raw_read(struct device *child, int addr,
void *buf, size_t len, bool fixed)
{
struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
struct wl1271 *wl = dev_get_drvdata(child);
struct spi_transfer t[2];
struct spi_message m;
u32 *busy_buf;
u32 *cmd;
u32 chunk_len;
while (len > 0) {
chunk_len = min_t(size_t, WSPI_MAX_CHUNK_SIZE, len);
cmd = &wl->buffer_cmd;
busy_buf = wl->buffer_busyword;
*cmd = 0;
*cmd |= WSPI_CMD_READ;
*cmd |= (chunk_len << WSPI_CMD_BYTE_LENGTH_OFFSET) &
WSPI_CMD_BYTE_LENGTH;
*cmd |= addr & WSPI_CMD_BYTE_ADDR;
if (fixed)
*cmd |= WSPI_CMD_FIXED;
spi_message_init(&m);
memset(t, 0, sizeof(t));
t[0].tx_buf = cmd;
t[0].len = 4;
t[0].cs_change = true;
spi_message_add_tail(&t[0], &m);
/* Busy and non busy words read */
t[1].rx_buf = busy_buf;
t[1].len = WL1271_BUSY_WORD_LEN;
t[1].cs_change = true;
spi_message_add_tail(&t[1], &m);
spi_sync(to_spi_device(glue->dev), &m);
if (!(busy_buf[WL1271_BUSY_WORD_CNT - 1] & 0x1) &&
wl12xx_spi_read_busy(child)) {
memset(buf, 0, chunk_len);
return 0;
}
spi_message_init(&m);
memset(t, 0, sizeof(t));
t[0].rx_buf = buf;
t[0].len = chunk_len;
t[0].cs_change = true;
spi_message_add_tail(&t[0], &m);
spi_sync(to_spi_device(glue->dev), &m);
if (!fixed)
addr += chunk_len;
buf += chunk_len;
len -= chunk_len;
}
return 0;
}
static int __wl12xx_spi_raw_write(struct device *child, int addr,
void *buf, size_t len, bool fixed)
{
struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
struct spi_transfer *t;
struct spi_message m;
u32 commands[WSPI_MAX_NUM_OF_CHUNKS]; /* 1 command per chunk */
u32 *cmd;
u32 chunk_len;
int i;
/* SPI write buffers - 2 for each chunk */
t = kzalloc(sizeof(*t) * 2 * WSPI_MAX_NUM_OF_CHUNKS, GFP_KERNEL);
if (!t)
return -ENOMEM;
WARN_ON(len > SPI_AGGR_BUFFER_SIZE);
spi_message_init(&m);
cmd = &commands[0];
i = 0;
while (len > 0) {
chunk_len = min_t(size_t, WSPI_MAX_CHUNK_SIZE, len);
*cmd = 0;
*cmd |= WSPI_CMD_WRITE;
*cmd |= (chunk_len << WSPI_CMD_BYTE_LENGTH_OFFSET) &
WSPI_CMD_BYTE_LENGTH;
*cmd |= addr & WSPI_CMD_BYTE_ADDR;
if (fixed)
*cmd |= WSPI_CMD_FIXED;
t[i].tx_buf = cmd;
t[i].len = sizeof(*cmd);
spi_message_add_tail(&t[i++], &m);
t[i].tx_buf = buf;
t[i].len = chunk_len;
spi_message_add_tail(&t[i++], &m);
if (!fixed)
addr += chunk_len;
buf += chunk_len;
len -= chunk_len;
cmd++;
}
spi_sync(to_spi_device(glue->dev), &m);
kfree(t);
return 0;
}
static int __must_check wl12xx_spi_raw_write(struct device *child, int addr,
void *buf, size_t len, bool fixed)
{
/* The ELP wakeup write may fail the first time due to internal
* hardware latency. It is safer to send the wakeup command twice to
* avoid unexpected failures.
*/
if (addr == HW_ACCESS_ELP_CTRL_REG)
__wl12xx_spi_raw_write(child, addr, buf, len, fixed);
return __wl12xx_spi_raw_write(child, addr, buf, len, fixed);
}
/**
* wl12xx_spi_set_power - power on/off the wl12xx unit
* @child: wl12xx device handle.
* @enable: true/false to power on/off the unit.
*
* use the WiFi enable regulator to enable/disable the WiFi unit.
*/
static int wl12xx_spi_set_power(struct device *child, bool enable)
{
int ret = 0;
struct wl12xx_spi_glue *glue = dev_get_drvdata(child->parent);
WARN_ON(!glue->reg);
/* Update regulator state */
if (enable) {
ret = regulator_enable(glue->reg);
if (ret)
dev_err(child, "Power enable failure\n");
} else {
ret = regulator_disable(glue->reg);
if (ret)
dev_err(child, "Power disable failure\n");
}
return ret;
}
/*
* wl12xx_spi_set_block_size
*
* This function is not needed for spi mode, but need to be present.
* Without it defined the wlcore fallback to use the wrong packet
* allignment on tx.
*/
static void wl12xx_spi_set_block_size(struct device *child,
unsigned int blksz)
{
}
static struct wl1271_if_operations spi_ops = {
.read = wl12xx_spi_raw_read,
.write = wl12xx_spi_raw_write,
.reset = wl12xx_spi_reset,
.init = wl12xx_spi_init,
.power = wl12xx_spi_set_power,
.set_block_size = wl12xx_spi_set_block_size,
};
static const struct of_device_id wlcore_spi_of_match_table[] = {
{ .compatible = "ti,wl1271", .data = &wl127x_data},
{ .compatible = "ti,wl1273", .data = &wl127x_data},
{ .compatible = "ti,wl1281", .data = &wl128x_data},
{ .compatible = "ti,wl1283", .data = &wl128x_data},
{ .compatible = "ti,wl1285", .data = &wl128x_data},
{ .compatible = "ti,wl1801", .data = &wl18xx_data},
{ .compatible = "ti,wl1805", .data = &wl18xx_data},
{ .compatible = "ti,wl1807", .data = &wl18xx_data},
{ .compatible = "ti,wl1831", .data = &wl18xx_data},
{ .compatible = "ti,wl1835", .data = &wl18xx_data},
{ .compatible = "ti,wl1837", .data = &wl18xx_data},
{ }
};
MODULE_DEVICE_TABLE(of, wlcore_spi_of_match_table);
/**
* wlcore_probe_of - DT node parsing.
* @spi: SPI slave device parameters.
* @glue: wl12xx SPI bus to slave device glue parameters.
* @pdev_data: wlcore device parameters
*/
static int wlcore_probe_of(struct spi_device *spi, struct wl12xx_spi_glue *glue,
struct wlcore_platdev_data *pdev_data)
{
struct device_node *dt_node = spi->dev.of_node;
const struct of_device_id *of_id;
of_id = of_match_node(wlcore_spi_of_match_table, dt_node);
if (!of_id)
return -ENODEV;
pdev_data->family = of_id->data;
dev_info(&spi->dev, "selected chip family is %s\n",
pdev_data->family->name);
pdev_data->ref_clock_xtal = of_property_read_bool(dt_node, "clock-xtal");
/* optional clock frequency params */
of_property_read_u32(dt_node, "ref-clock-frequency",
&pdev_data->ref_clock_freq);
of_property_read_u32(dt_node, "tcxo-clock-frequency",
&pdev_data->tcxo_clock_freq);
return 0;
}
static int wl1271_probe(struct spi_device *spi)
{
struct wl12xx_spi_glue *glue;
struct wlcore_platdev_data *pdev_data;
struct resource res[1];
int ret;
pdev_data = devm_kzalloc(&spi->dev, sizeof(*pdev_data), GFP_KERNEL);
if (!pdev_data)
return -ENOMEM;
pdev_data->if_ops = &spi_ops;
glue = devm_kzalloc(&spi->dev, sizeof(*glue), GFP_KERNEL);
if (!glue) {
dev_err(&spi->dev, "can't allocate glue\n");
return -ENOMEM;
}
glue->dev = &spi->dev;
spi_set_drvdata(spi, glue);
/* This is the only SPI value that we need to set here, the rest
* comes from the board-peripherals file */
spi->bits_per_word = 32;
glue->reg = devm_regulator_get(&spi->dev, "vwlan");
if (IS_ERR(glue->reg))
return dev_err_probe(glue->dev, PTR_ERR(glue->reg),
"can't get regulator\n");
ret = wlcore_probe_of(spi, glue, pdev_data);
if (ret) {
dev_err(glue->dev,
"can't get device tree parameters (%d)\n", ret);
return ret;
}
ret = spi_setup(spi);
if (ret < 0) {
dev_err(glue->dev, "spi_setup failed\n");
return ret;
}
glue->core = platform_device_alloc(pdev_data->family->name,
PLATFORM_DEVID_AUTO);
if (!glue->core) {
dev_err(glue->dev, "can't allocate platform_device\n");
return -ENOMEM;
}
glue->core->dev.parent = &spi->dev;
memset(res, 0x00, sizeof(res));
res[0].start = spi->irq;
res[0].flags = IORESOURCE_IRQ | irq_get_trigger_type(spi->irq);
res[0].name = "irq";
ret = platform_device_add_resources(glue->core, res, ARRAY_SIZE(res));
if (ret) {
dev_err(glue->dev, "can't add resources\n");
goto out_dev_put;
}
ret = platform_device_add_data(glue->core, pdev_data,
sizeof(*pdev_data));
if (ret) {
dev_err(glue->dev, "can't add platform data\n");
goto out_dev_put;
}
ret = platform_device_add(glue->core);
if (ret) {
dev_err(glue->dev, "can't register platform device\n");
goto out_dev_put;
}
return 0;
out_dev_put:
platform_device_put(glue->core);
return ret;
}
static void wl1271_remove(struct spi_device *spi)
{
struct wl12xx_spi_glue *glue = spi_get_drvdata(spi);
platform_device_unregister(glue->core);
}
static struct spi_driver wl1271_spi_driver = {
.driver = {
.name = "wl1271_spi",
.of_match_table = wlcore_spi_of_match_table,
},
.probe = wl1271_probe,
.remove = wl1271_remove,
};
module_spi_driver(wl1271_spi_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Luciano Coelho <[email protected]>");
MODULE_AUTHOR("Juuso Oikarinen <[email protected]>");
MODULE_ALIAS("spi:wl1271");
|
linux-master
|
drivers/net/wireless/ti/wlcore/spi.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* This file is part of wl1271
*
* Copyright (C) 2008-2009 Nokia Corporation
*
* Contact: Luciano Coelho <[email protected]>
*/
#include "ps.h"
#include "io.h"
#include "tx.h"
#include "debug.h"
int wl1271_ps_set_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
enum wl1271_cmd_ps_mode mode)
{
int ret;
u16 timeout = wl->conf.conn.dynamic_ps_timeout;
switch (mode) {
case STATION_AUTO_PS_MODE:
case STATION_POWER_SAVE_MODE:
wl1271_debug(DEBUG_PSM, "entering psm (mode=%d,timeout=%u)",
mode, timeout);
ret = wl1271_acx_wake_up_conditions(wl, wlvif,
wl->conf.conn.wake_up_event,
wl->conf.conn.listen_interval);
if (ret < 0) {
wl1271_error("couldn't set wake up conditions");
return ret;
}
ret = wl1271_cmd_ps_mode(wl, wlvif, mode, timeout);
if (ret < 0)
return ret;
set_bit(WLVIF_FLAG_IN_PS, &wlvif->flags);
/*
* enable beacon early termination.
* Not relevant for 5GHz and for high rates.
*/
if ((wlvif->band == NL80211_BAND_2GHZ) &&
(wlvif->basic_rate < CONF_HW_BIT_RATE_9MBPS)) {
ret = wl1271_acx_bet_enable(wl, wlvif, true);
if (ret < 0)
return ret;
}
break;
case STATION_ACTIVE_MODE:
wl1271_debug(DEBUG_PSM, "leaving psm");
/* disable beacon early termination */
if ((wlvif->band == NL80211_BAND_2GHZ) &&
(wlvif->basic_rate < CONF_HW_BIT_RATE_9MBPS)) {
ret = wl1271_acx_bet_enable(wl, wlvif, false);
if (ret < 0)
return ret;
}
ret = wl1271_cmd_ps_mode(wl, wlvif, mode, 0);
if (ret < 0)
return ret;
clear_bit(WLVIF_FLAG_IN_PS, &wlvif->flags);
break;
default:
wl1271_warning("trying to set ps to unsupported mode %d", mode);
ret = -EINVAL;
}
return ret;
}
static void wl1271_ps_filter_frames(struct wl1271 *wl, u8 hlid)
{
int i;
struct sk_buff *skb;
struct ieee80211_tx_info *info;
unsigned long flags;
int filtered[NUM_TX_QUEUES];
struct wl1271_link *lnk = &wl->links[hlid];
/* filter all frames currently in the low level queues for this hlid */
for (i = 0; i < NUM_TX_QUEUES; i++) {
filtered[i] = 0;
while ((skb = skb_dequeue(&lnk->tx_queue[i]))) {
filtered[i]++;
if (WARN_ON(wl12xx_is_dummy_packet(wl, skb)))
continue;
info = IEEE80211_SKB_CB(skb);
info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
info->status.rates[0].idx = -1;
ieee80211_tx_status_ni(wl->hw, skb);
}
}
spin_lock_irqsave(&wl->wl_lock, flags);
for (i = 0; i < NUM_TX_QUEUES; i++) {
wl->tx_queue_count[i] -= filtered[i];
if (lnk->wlvif)
lnk->wlvif->tx_queue_count[i] -= filtered[i];
}
spin_unlock_irqrestore(&wl->wl_lock, flags);
wl1271_handle_tx_low_watermark(wl);
}
void wl12xx_ps_link_start(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u8 hlid, bool clean_queues)
{
struct ieee80211_sta *sta;
struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
if (WARN_ON_ONCE(wlvif->bss_type != BSS_TYPE_AP_BSS))
return;
if (!test_bit(hlid, wlvif->ap.sta_hlid_map) ||
test_bit(hlid, &wl->ap_ps_map))
return;
wl1271_debug(DEBUG_PSM, "start mac80211 PSM on hlid %d pkts %d "
"clean_queues %d", hlid, wl->links[hlid].allocated_pkts,
clean_queues);
rcu_read_lock();
sta = ieee80211_find_sta(vif, wl->links[hlid].addr);
if (!sta) {
wl1271_error("could not find sta %pM for starting ps",
wl->links[hlid].addr);
rcu_read_unlock();
return;
}
ieee80211_sta_ps_transition_ni(sta, true);
rcu_read_unlock();
/* do we want to filter all frames from this link's queues? */
if (clean_queues)
wl1271_ps_filter_frames(wl, hlid);
__set_bit(hlid, &wl->ap_ps_map);
}
void wl12xx_ps_link_end(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
{
struct ieee80211_sta *sta;
struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
if (!test_bit(hlid, &wl->ap_ps_map))
return;
wl1271_debug(DEBUG_PSM, "end mac80211 PSM on hlid %d", hlid);
__clear_bit(hlid, &wl->ap_ps_map);
rcu_read_lock();
sta = ieee80211_find_sta(vif, wl->links[hlid].addr);
if (!sta) {
wl1271_error("could not find sta %pM for ending ps",
wl->links[hlid].addr);
goto end;
}
ieee80211_sta_ps_transition_ni(sta, false);
end:
rcu_read_unlock();
}
|
linux-master
|
drivers/net/wireless/ti/wlcore/ps.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* This file is part of wl1271
*
* Copyright (C) 2008-2010 Nokia Corporation
*
* Contact: Luciano Coelho <[email protected]>
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/spi/spi.h>
#include <linux/interrupt.h>
#include "wlcore.h"
#include "debug.h"
#include "wl12xx_80211.h"
#include "io.h"
#include "tx.h"
bool wl1271_set_block_size(struct wl1271 *wl)
{
if (wl->if_ops->set_block_size) {
wl->if_ops->set_block_size(wl->dev, WL12XX_BUS_BLOCK_SIZE);
return true;
}
return false;
}
void wlcore_disable_interrupts(struct wl1271 *wl)
{
disable_irq(wl->irq);
}
EXPORT_SYMBOL_GPL(wlcore_disable_interrupts);
void wlcore_disable_interrupts_nosync(struct wl1271 *wl)
{
disable_irq_nosync(wl->irq);
}
EXPORT_SYMBOL_GPL(wlcore_disable_interrupts_nosync);
void wlcore_enable_interrupts(struct wl1271 *wl)
{
enable_irq(wl->irq);
}
EXPORT_SYMBOL_GPL(wlcore_enable_interrupts);
void wlcore_synchronize_interrupts(struct wl1271 *wl)
{
synchronize_irq(wl->irq);
}
EXPORT_SYMBOL_GPL(wlcore_synchronize_interrupts);
int wlcore_translate_addr(struct wl1271 *wl, int addr)
{
struct wlcore_partition_set *part = &wl->curr_part;
/*
* To translate, first check to which window of addresses the
* particular address belongs. Then subtract the starting address
* of that window from the address. Then, add offset of the
* translated region.
*
* The translated regions occur next to each other in physical device
* memory, so just add the sizes of the preceding address regions to
* get the offset to the new region.
*/
if ((addr >= part->mem.start) &&
(addr < part->mem.start + part->mem.size))
return addr - part->mem.start;
else if ((addr >= part->reg.start) &&
(addr < part->reg.start + part->reg.size))
return addr - part->reg.start + part->mem.size;
else if ((addr >= part->mem2.start) &&
(addr < part->mem2.start + part->mem2.size))
return addr - part->mem2.start + part->mem.size +
part->reg.size;
else if ((addr >= part->mem3.start) &&
(addr < part->mem3.start + part->mem3.size))
return addr - part->mem3.start + part->mem.size +
part->reg.size + part->mem2.size;
WARN(1, "HW address 0x%x out of range", addr);
return 0;
}
EXPORT_SYMBOL_GPL(wlcore_translate_addr);
/* Set the partitions to access the chip addresses
*
* To simplify driver code, a fixed (virtual) memory map is defined for
* register and memory addresses. Because in the chipset, in different stages
* of operation, those addresses will move around, an address translation
* mechanism is required.
*
* There are four partitions (three memory and one register partition),
* which are mapped to two different areas of the hardware memory.
*
* Virtual address
* space
*
* | |
* ...+----+--> mem.start
* Physical address ... | |
* space ... | | [PART_0]
* ... | |
* 00000000 <--+----+... ...+----+--> mem.start + mem.size
* | | ... | |
* |MEM | ... | |
* | | ... | |
* mem.size <--+----+... | | {unused area)
* | | ... | |
* |REG | ... | |
* mem.size | | ... | |
* + <--+----+... ...+----+--> reg.start
* reg.size | | ... | |
* |MEM2| ... | | [PART_1]
* | | ... | |
* ...+----+--> reg.start + reg.size
* | |
*
*/
int wlcore_set_partition(struct wl1271 *wl,
const struct wlcore_partition_set *p)
{
int ret;
/* copy partition info */
memcpy(&wl->curr_part, p, sizeof(*p));
wl1271_debug(DEBUG_IO, "mem_start %08X mem_size %08X",
p->mem.start, p->mem.size);
wl1271_debug(DEBUG_IO, "reg_start %08X reg_size %08X",
p->reg.start, p->reg.size);
wl1271_debug(DEBUG_IO, "mem2_start %08X mem2_size %08X",
p->mem2.start, p->mem2.size);
wl1271_debug(DEBUG_IO, "mem3_start %08X mem3_size %08X",
p->mem3.start, p->mem3.size);
ret = wlcore_raw_write32(wl, HW_PART0_START_ADDR, p->mem.start);
if (ret < 0)
goto out;
ret = wlcore_raw_write32(wl, HW_PART0_SIZE_ADDR, p->mem.size);
if (ret < 0)
goto out;
ret = wlcore_raw_write32(wl, HW_PART1_START_ADDR, p->reg.start);
if (ret < 0)
goto out;
ret = wlcore_raw_write32(wl, HW_PART1_SIZE_ADDR, p->reg.size);
if (ret < 0)
goto out;
ret = wlcore_raw_write32(wl, HW_PART2_START_ADDR, p->mem2.start);
if (ret < 0)
goto out;
ret = wlcore_raw_write32(wl, HW_PART2_SIZE_ADDR, p->mem2.size);
if (ret < 0)
goto out;
/* wl12xx only: We don't need the size of the last partition,
* as it is automatically calculated based on the total memory
* size and the sizes of the previous partitions.
*
* wl18xx re-defines the HW_PART3 addresses for logger over
* SDIO support. wl12xx is expecting the write to
* HW_PART3_START_ADDR at offset 24. This creates conflict
* between the addresses.
* In order to fix this the expected value is written to
* HW_PART3_SIZE_ADDR instead which is at offset 24 after changes.
*/
ret = wlcore_raw_write32(wl, HW_PART3_START_ADDR, p->mem3.start);
if (ret < 0)
goto out;
ret = wlcore_raw_write32(wl, HW_PART3_SIZE_ADDR, p->mem3.size);
if (ret < 0)
goto out;
out:
return ret;
}
EXPORT_SYMBOL_GPL(wlcore_set_partition);
void wl1271_io_reset(struct wl1271 *wl)
{
if (wl->if_ops->reset)
wl->if_ops->reset(wl->dev);
}
void wl1271_io_init(struct wl1271 *wl)
{
if (wl->if_ops->init)
wl->if_ops->init(wl->dev);
}
|
linux-master
|
drivers/net/wireless/ti/wlcore/io.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* This file is part of wl1271
*
* Copyright (C) 2009 Nokia Corporation
*
* Contact: Luciano Coelho <[email protected]>
*/
#include "debugfs.h"
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/pm_runtime.h>
#include "wlcore.h"
#include "debug.h"
#include "acx.h"
#include "ps.h"
#include "io.h"
#include "tx.h"
#include "hw_ops.h"
/* ms */
#define WL1271_DEBUGFS_STATS_LIFETIME 1000
#define WLCORE_MAX_BLOCK_SIZE ((size_t)(4*PAGE_SIZE))
/* debugfs macros idea from mac80211 */
int wl1271_format_buffer(char __user *userbuf, size_t count,
loff_t *ppos, char *fmt, ...)
{
va_list args;
char buf[DEBUGFS_FORMAT_BUFFER_SIZE];
int res;
va_start(args, fmt);
res = vscnprintf(buf, sizeof(buf), fmt, args);
va_end(args);
return simple_read_from_buffer(userbuf, count, ppos, buf, res);
}
EXPORT_SYMBOL_GPL(wl1271_format_buffer);
void wl1271_debugfs_update_stats(struct wl1271 *wl)
{
int ret;
mutex_lock(&wl->mutex);
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
ret = pm_runtime_resume_and_get(wl->dev);
if (ret < 0)
goto out;
if (!wl->plt &&
time_after(jiffies, wl->stats.fw_stats_update +
msecs_to_jiffies(WL1271_DEBUGFS_STATS_LIFETIME))) {
wl1271_acx_statistics(wl, wl->stats.fw_stats);
wl->stats.fw_stats_update = jiffies;
}
pm_runtime_mark_last_busy(wl->dev);
pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
}
EXPORT_SYMBOL_GPL(wl1271_debugfs_update_stats);
DEBUGFS_READONLY_FILE(retry_count, "%u", wl->stats.retry_count);
DEBUGFS_READONLY_FILE(excessive_retries, "%u",
wl->stats.excessive_retries);
static ssize_t tx_queue_len_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
struct wl1271 *wl = file->private_data;
u32 queue_len;
char buf[20];
int res;
queue_len = wl1271_tx_total_queue_count(wl);
res = scnprintf(buf, sizeof(buf), "%u\n", queue_len);
return simple_read_from_buffer(userbuf, count, ppos, buf, res);
}
static const struct file_operations tx_queue_len_ops = {
.read = tx_queue_len_read,
.open = simple_open,
.llseek = default_llseek,
};
static void chip_op_handler(struct wl1271 *wl, unsigned long value,
void *arg)
{
int ret;
int (*chip_op) (struct wl1271 *wl);
if (!arg) {
wl1271_warning("debugfs chip_op_handler with no callback");
return;
}
ret = pm_runtime_resume_and_get(wl->dev);
if (ret < 0)
return;
chip_op = arg;
chip_op(wl);
pm_runtime_mark_last_busy(wl->dev);
pm_runtime_put_autosuspend(wl->dev);
}
#define WL12XX_CONF_DEBUGFS(param, conf_sub_struct, \
min_val, max_val, write_handler_locked, \
write_handler_arg) \
static ssize_t param##_read(struct file *file, \
char __user *user_buf, \
size_t count, loff_t *ppos) \
{ \
struct wl1271 *wl = file->private_data; \
return wl1271_format_buffer(user_buf, count, \
ppos, "%d\n", \
wl->conf.conf_sub_struct.param); \
} \
\
static ssize_t param##_write(struct file *file, \
const char __user *user_buf, \
size_t count, loff_t *ppos) \
{ \
struct wl1271 *wl = file->private_data; \
unsigned long value; \
int ret; \
\
ret = kstrtoul_from_user(user_buf, count, 10, &value); \
if (ret < 0) { \
wl1271_warning("illegal value for " #param); \
return -EINVAL; \
} \
\
if (value < min_val || value > max_val) { \
wl1271_warning(#param " is not in valid range"); \
return -ERANGE; \
} \
\
mutex_lock(&wl->mutex); \
wl->conf.conf_sub_struct.param = value; \
\
write_handler_locked(wl, value, write_handler_arg); \
\
mutex_unlock(&wl->mutex); \
return count; \
} \
\
static const struct file_operations param##_ops = { \
.read = param##_read, \
.write = param##_write, \
.open = simple_open, \
.llseek = default_llseek, \
};
WL12XX_CONF_DEBUGFS(irq_pkt_threshold, rx, 0, 65535,
chip_op_handler, wl1271_acx_init_rx_interrupt)
WL12XX_CONF_DEBUGFS(irq_blk_threshold, rx, 0, 65535,
chip_op_handler, wl1271_acx_init_rx_interrupt)
WL12XX_CONF_DEBUGFS(irq_timeout, rx, 0, 100,
chip_op_handler, wl1271_acx_init_rx_interrupt)
static ssize_t gpio_power_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct wl1271 *wl = file->private_data;
bool state = test_bit(WL1271_FLAG_GPIO_POWER, &wl->flags);
int res;
char buf[10];
res = scnprintf(buf, sizeof(buf), "%d\n", state);
return simple_read_from_buffer(user_buf, count, ppos, buf, res);
}
static ssize_t gpio_power_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct wl1271 *wl = file->private_data;
unsigned long value;
int ret;
ret = kstrtoul_from_user(user_buf, count, 10, &value);
if (ret < 0) {
wl1271_warning("illegal value in gpio_power");
return -EINVAL;
}
mutex_lock(&wl->mutex);
if (value)
wl1271_power_on(wl);
else
wl1271_power_off(wl);
mutex_unlock(&wl->mutex);
return count;
}
static const struct file_operations gpio_power_ops = {
.read = gpio_power_read,
.write = gpio_power_write,
.open = simple_open,
.llseek = default_llseek,
};
static ssize_t start_recovery_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct wl1271 *wl = file->private_data;
mutex_lock(&wl->mutex);
wl12xx_queue_recovery_work(wl);
mutex_unlock(&wl->mutex);
return count;
}
static const struct file_operations start_recovery_ops = {
.write = start_recovery_write,
.open = simple_open,
.llseek = default_llseek,
};
static ssize_t dynamic_ps_timeout_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct wl1271 *wl = file->private_data;
return wl1271_format_buffer(user_buf, count,
ppos, "%d\n",
wl->conf.conn.dynamic_ps_timeout);
}
static ssize_t dynamic_ps_timeout_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct wl1271 *wl = file->private_data;
struct wl12xx_vif *wlvif;
unsigned long value;
int ret;
ret = kstrtoul_from_user(user_buf, count, 10, &value);
if (ret < 0) {
wl1271_warning("illegal value in dynamic_ps");
return -EINVAL;
}
if (value < 1 || value > 65535) {
wl1271_warning("dynamic_ps_timeout is not in valid range");
return -ERANGE;
}
mutex_lock(&wl->mutex);
wl->conf.conn.dynamic_ps_timeout = value;
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
ret = pm_runtime_resume_and_get(wl->dev);
if (ret < 0)
goto out;
/* In case we're already in PSM, trigger it again to set new timeout
* immediately without waiting for re-association
*/
wl12xx_for_each_wlvif_sta(wl, wlvif) {
if (test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags))
wl1271_ps_set_mode(wl, wlvif, STATION_AUTO_PS_MODE);
}
pm_runtime_mark_last_busy(wl->dev);
pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
return count;
}
static const struct file_operations dynamic_ps_timeout_ops = {
.read = dynamic_ps_timeout_read,
.write = dynamic_ps_timeout_write,
.open = simple_open,
.llseek = default_llseek,
};
static ssize_t forced_ps_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct wl1271 *wl = file->private_data;
return wl1271_format_buffer(user_buf, count,
ppos, "%d\n",
wl->conf.conn.forced_ps);
}
static ssize_t forced_ps_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct wl1271 *wl = file->private_data;
struct wl12xx_vif *wlvif;
unsigned long value;
int ret, ps_mode;
ret = kstrtoul_from_user(user_buf, count, 10, &value);
if (ret < 0) {
wl1271_warning("illegal value in forced_ps");
return -EINVAL;
}
if (value != 1 && value != 0) {
wl1271_warning("forced_ps should be either 0 or 1");
return -ERANGE;
}
mutex_lock(&wl->mutex);
if (wl->conf.conn.forced_ps == value)
goto out;
wl->conf.conn.forced_ps = value;
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
ret = pm_runtime_resume_and_get(wl->dev);
if (ret < 0)
goto out;
/* In case we're already in PSM, trigger it again to switch mode
* immediately without waiting for re-association
*/
ps_mode = value ? STATION_POWER_SAVE_MODE : STATION_AUTO_PS_MODE;
wl12xx_for_each_wlvif_sta(wl, wlvif) {
if (test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags))
wl1271_ps_set_mode(wl, wlvif, ps_mode);
}
pm_runtime_mark_last_busy(wl->dev);
pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
return count;
}
static const struct file_operations forced_ps_ops = {
.read = forced_ps_read,
.write = forced_ps_write,
.open = simple_open,
.llseek = default_llseek,
};
static ssize_t split_scan_timeout_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct wl1271 *wl = file->private_data;
return wl1271_format_buffer(user_buf, count,
ppos, "%d\n",
wl->conf.scan.split_scan_timeout / 1000);
}
static ssize_t split_scan_timeout_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct wl1271 *wl = file->private_data;
unsigned long value;
int ret;
ret = kstrtoul_from_user(user_buf, count, 10, &value);
if (ret < 0) {
wl1271_warning("illegal value in split_scan_timeout");
return -EINVAL;
}
if (value == 0)
wl1271_info("split scan will be disabled");
mutex_lock(&wl->mutex);
wl->conf.scan.split_scan_timeout = value * 1000;
mutex_unlock(&wl->mutex);
return count;
}
static const struct file_operations split_scan_timeout_ops = {
.read = split_scan_timeout_read,
.write = split_scan_timeout_write,
.open = simple_open,
.llseek = default_llseek,
};
static ssize_t driver_state_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct wl1271 *wl = file->private_data;
int res = 0;
ssize_t ret;
char *buf;
struct wl12xx_vif *wlvif;
#define DRIVER_STATE_BUF_LEN 1024
buf = kmalloc(DRIVER_STATE_BUF_LEN, GFP_KERNEL);
if (!buf)
return -ENOMEM;
mutex_lock(&wl->mutex);
#define DRIVER_STATE_PRINT(x, fmt) \
(res += scnprintf(buf + res, DRIVER_STATE_BUF_LEN - res,\
#x " = " fmt "\n", wl->x))
#define DRIVER_STATE_PRINT_GENERIC(x, fmt, args...) \
(res += scnprintf(buf + res, DRIVER_STATE_BUF_LEN - res,\
#x " = " fmt "\n", args))
#define DRIVER_STATE_PRINT_LONG(x) DRIVER_STATE_PRINT(x, "%ld")
#define DRIVER_STATE_PRINT_INT(x) DRIVER_STATE_PRINT(x, "%d")
#define DRIVER_STATE_PRINT_STR(x) DRIVER_STATE_PRINT(x, "%s")
#define DRIVER_STATE_PRINT_LHEX(x) DRIVER_STATE_PRINT(x, "0x%lx")
#define DRIVER_STATE_PRINT_HEX(x) DRIVER_STATE_PRINT(x, "0x%x")
wl12xx_for_each_wlvif_sta(wl, wlvif) {
if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
continue;
DRIVER_STATE_PRINT_GENERIC(channel, "%d (%s)", wlvif->channel,
wlvif->p2p ? "P2P-CL" : "STA");
}
wl12xx_for_each_wlvif_ap(wl, wlvif)
DRIVER_STATE_PRINT_GENERIC(channel, "%d (%s)", wlvif->channel,
wlvif->p2p ? "P2P-GO" : "AP");
DRIVER_STATE_PRINT_INT(tx_blocks_available);
DRIVER_STATE_PRINT_INT(tx_allocated_blocks);
DRIVER_STATE_PRINT_INT(tx_allocated_pkts[0]);
DRIVER_STATE_PRINT_INT(tx_allocated_pkts[1]);
DRIVER_STATE_PRINT_INT(tx_allocated_pkts[2]);
DRIVER_STATE_PRINT_INT(tx_allocated_pkts[3]);
DRIVER_STATE_PRINT_INT(tx_frames_cnt);
DRIVER_STATE_PRINT_LHEX(tx_frames_map[0]);
DRIVER_STATE_PRINT_INT(tx_queue_count[0]);
DRIVER_STATE_PRINT_INT(tx_queue_count[1]);
DRIVER_STATE_PRINT_INT(tx_queue_count[2]);
DRIVER_STATE_PRINT_INT(tx_queue_count[3]);
DRIVER_STATE_PRINT_INT(tx_packets_count);
DRIVER_STATE_PRINT_INT(tx_results_count);
DRIVER_STATE_PRINT_LHEX(flags);
DRIVER_STATE_PRINT_INT(tx_blocks_freed);
DRIVER_STATE_PRINT_INT(rx_counter);
DRIVER_STATE_PRINT_INT(state);
DRIVER_STATE_PRINT_INT(band);
DRIVER_STATE_PRINT_INT(power_level);
DRIVER_STATE_PRINT_INT(sg_enabled);
DRIVER_STATE_PRINT_INT(enable_11a);
DRIVER_STATE_PRINT_INT(noise);
DRIVER_STATE_PRINT_LHEX(ap_fw_ps_map);
DRIVER_STATE_PRINT_LHEX(ap_ps_map);
DRIVER_STATE_PRINT_HEX(quirks);
DRIVER_STATE_PRINT_HEX(irq);
/* TODO: ref_clock and tcxo_clock were moved to wl12xx priv */
DRIVER_STATE_PRINT_HEX(hw_pg_ver);
DRIVER_STATE_PRINT_HEX(irq_flags);
DRIVER_STATE_PRINT_HEX(chip.id);
DRIVER_STATE_PRINT_STR(chip.fw_ver_str);
DRIVER_STATE_PRINT_STR(chip.phy_fw_ver_str);
DRIVER_STATE_PRINT_INT(recovery_count);
#undef DRIVER_STATE_PRINT_INT
#undef DRIVER_STATE_PRINT_LONG
#undef DRIVER_STATE_PRINT_HEX
#undef DRIVER_STATE_PRINT_LHEX
#undef DRIVER_STATE_PRINT_STR
#undef DRIVER_STATE_PRINT
#undef DRIVER_STATE_BUF_LEN
mutex_unlock(&wl->mutex);
ret = simple_read_from_buffer(user_buf, count, ppos, buf, res);
kfree(buf);
return ret;
}
static const struct file_operations driver_state_ops = {
.read = driver_state_read,
.open = simple_open,
.llseek = default_llseek,
};
static ssize_t vifs_state_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct wl1271 *wl = file->private_data;
struct wl12xx_vif *wlvif;
int ret, res = 0;
const int buf_size = 4096;
char *buf;
char tmp_buf[64];
buf = kzalloc(buf_size, GFP_KERNEL);
if (!buf)
return -ENOMEM;
mutex_lock(&wl->mutex);
#define VIF_STATE_PRINT(x, fmt) \
(res += scnprintf(buf + res, buf_size - res, \
#x " = " fmt "\n", wlvif->x))
#define VIF_STATE_PRINT_LONG(x) VIF_STATE_PRINT(x, "%ld")
#define VIF_STATE_PRINT_INT(x) VIF_STATE_PRINT(x, "%d")
#define VIF_STATE_PRINT_STR(x) VIF_STATE_PRINT(x, "%s")
#define VIF_STATE_PRINT_LHEX(x) VIF_STATE_PRINT(x, "0x%lx")
#define VIF_STATE_PRINT_LLHEX(x) VIF_STATE_PRINT(x, "0x%llx")
#define VIF_STATE_PRINT_HEX(x) VIF_STATE_PRINT(x, "0x%x")
#define VIF_STATE_PRINT_NSTR(x, len) \
do { \
memset(tmp_buf, 0, sizeof(tmp_buf)); \
memcpy(tmp_buf, wlvif->x, \
min_t(u8, len, sizeof(tmp_buf) - 1)); \
res += scnprintf(buf + res, buf_size - res, \
#x " = %s\n", tmp_buf); \
} while (0)
wl12xx_for_each_wlvif(wl, wlvif) {
VIF_STATE_PRINT_INT(role_id);
VIF_STATE_PRINT_INT(bss_type);
VIF_STATE_PRINT_LHEX(flags);
VIF_STATE_PRINT_INT(p2p);
VIF_STATE_PRINT_INT(dev_role_id);
VIF_STATE_PRINT_INT(dev_hlid);
if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
wlvif->bss_type == BSS_TYPE_IBSS) {
VIF_STATE_PRINT_INT(sta.hlid);
VIF_STATE_PRINT_INT(sta.basic_rate_idx);
VIF_STATE_PRINT_INT(sta.ap_rate_idx);
VIF_STATE_PRINT_INT(sta.p2p_rate_idx);
VIF_STATE_PRINT_INT(sta.qos);
} else {
VIF_STATE_PRINT_INT(ap.global_hlid);
VIF_STATE_PRINT_INT(ap.bcast_hlid);
VIF_STATE_PRINT_LHEX(ap.sta_hlid_map[0]);
VIF_STATE_PRINT_INT(ap.mgmt_rate_idx);
VIF_STATE_PRINT_INT(ap.bcast_rate_idx);
VIF_STATE_PRINT_INT(ap.ucast_rate_idx[0]);
VIF_STATE_PRINT_INT(ap.ucast_rate_idx[1]);
VIF_STATE_PRINT_INT(ap.ucast_rate_idx[2]);
VIF_STATE_PRINT_INT(ap.ucast_rate_idx[3]);
}
VIF_STATE_PRINT_INT(last_tx_hlid);
VIF_STATE_PRINT_INT(tx_queue_count[0]);
VIF_STATE_PRINT_INT(tx_queue_count[1]);
VIF_STATE_PRINT_INT(tx_queue_count[2]);
VIF_STATE_PRINT_INT(tx_queue_count[3]);
VIF_STATE_PRINT_LHEX(links_map[0]);
VIF_STATE_PRINT_NSTR(ssid, wlvif->ssid_len);
VIF_STATE_PRINT_INT(band);
VIF_STATE_PRINT_INT(channel);
VIF_STATE_PRINT_HEX(bitrate_masks[0]);
VIF_STATE_PRINT_HEX(bitrate_masks[1]);
VIF_STATE_PRINT_HEX(basic_rate_set);
VIF_STATE_PRINT_HEX(basic_rate);
VIF_STATE_PRINT_HEX(rate_set);
VIF_STATE_PRINT_INT(beacon_int);
VIF_STATE_PRINT_INT(default_key);
VIF_STATE_PRINT_INT(aid);
VIF_STATE_PRINT_INT(psm_entry_retry);
VIF_STATE_PRINT_INT(power_level);
VIF_STATE_PRINT_INT(rssi_thold);
VIF_STATE_PRINT_INT(last_rssi_event);
VIF_STATE_PRINT_INT(ba_support);
VIF_STATE_PRINT_INT(ba_allowed);
VIF_STATE_PRINT_LLHEX(total_freed_pkts);
}
#undef VIF_STATE_PRINT_INT
#undef VIF_STATE_PRINT_LONG
#undef VIF_STATE_PRINT_HEX
#undef VIF_STATE_PRINT_LHEX
#undef VIF_STATE_PRINT_LLHEX
#undef VIF_STATE_PRINT_STR
#undef VIF_STATE_PRINT_NSTR
#undef VIF_STATE_PRINT
mutex_unlock(&wl->mutex);
ret = simple_read_from_buffer(user_buf, count, ppos, buf, res);
kfree(buf);
return ret;
}
static const struct file_operations vifs_state_ops = {
.read = vifs_state_read,
.open = simple_open,
.llseek = default_llseek,
};
static ssize_t dtim_interval_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct wl1271 *wl = file->private_data;
u8 value;
if (wl->conf.conn.wake_up_event == CONF_WAKE_UP_EVENT_DTIM ||
wl->conf.conn.wake_up_event == CONF_WAKE_UP_EVENT_N_DTIM)
value = wl->conf.conn.listen_interval;
else
value = 0;
return wl1271_format_buffer(user_buf, count, ppos, "%d\n", value);
}
static ssize_t dtim_interval_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct wl1271 *wl = file->private_data;
unsigned long value;
int ret;
ret = kstrtoul_from_user(user_buf, count, 10, &value);
if (ret < 0) {
wl1271_warning("illegal value for dtim_interval");
return -EINVAL;
}
if (value < 1 || value > 10) {
wl1271_warning("dtim value is not in valid range");
return -ERANGE;
}
mutex_lock(&wl->mutex);
wl->conf.conn.listen_interval = value;
/* for some reason there are different event types for 1 and >1 */
if (value == 1)
wl->conf.conn.wake_up_event = CONF_WAKE_UP_EVENT_DTIM;
else
wl->conf.conn.wake_up_event = CONF_WAKE_UP_EVENT_N_DTIM;
/*
* we don't reconfigure ACX_WAKE_UP_CONDITIONS now, so it will only
* take effect on the next time we enter psm.
*/
mutex_unlock(&wl->mutex);
return count;
}
static const struct file_operations dtim_interval_ops = {
.read = dtim_interval_read,
.write = dtim_interval_write,
.open = simple_open,
.llseek = default_llseek,
};
static ssize_t suspend_dtim_interval_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct wl1271 *wl = file->private_data;
u8 value;
if (wl->conf.conn.suspend_wake_up_event == CONF_WAKE_UP_EVENT_DTIM ||
wl->conf.conn.suspend_wake_up_event == CONF_WAKE_UP_EVENT_N_DTIM)
value = wl->conf.conn.suspend_listen_interval;
else
value = 0;
return wl1271_format_buffer(user_buf, count, ppos, "%d\n", value);
}
static ssize_t suspend_dtim_interval_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct wl1271 *wl = file->private_data;
unsigned long value;
int ret;
ret = kstrtoul_from_user(user_buf, count, 10, &value);
if (ret < 0) {
wl1271_warning("illegal value for suspend_dtim_interval");
return -EINVAL;
}
if (value < 1 || value > 10) {
wl1271_warning("suspend_dtim value is not in valid range");
return -ERANGE;
}
mutex_lock(&wl->mutex);
wl->conf.conn.suspend_listen_interval = value;
/* for some reason there are different event types for 1 and >1 */
if (value == 1)
wl->conf.conn.suspend_wake_up_event = CONF_WAKE_UP_EVENT_DTIM;
else
wl->conf.conn.suspend_wake_up_event = CONF_WAKE_UP_EVENT_N_DTIM;
mutex_unlock(&wl->mutex);
return count;
}
static const struct file_operations suspend_dtim_interval_ops = {
.read = suspend_dtim_interval_read,
.write = suspend_dtim_interval_write,
.open = simple_open,
.llseek = default_llseek,
};
static ssize_t beacon_interval_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct wl1271 *wl = file->private_data;
u8 value;
if (wl->conf.conn.wake_up_event == CONF_WAKE_UP_EVENT_BEACON ||
wl->conf.conn.wake_up_event == CONF_WAKE_UP_EVENT_N_BEACONS)
value = wl->conf.conn.listen_interval;
else
value = 0;
return wl1271_format_buffer(user_buf, count, ppos, "%d\n", value);
}
static ssize_t beacon_interval_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct wl1271 *wl = file->private_data;
unsigned long value;
int ret;
ret = kstrtoul_from_user(user_buf, count, 10, &value);
if (ret < 0) {
wl1271_warning("illegal value for beacon_interval");
return -EINVAL;
}
if (value < 1 || value > 255) {
wl1271_warning("beacon interval value is not in valid range");
return -ERANGE;
}
mutex_lock(&wl->mutex);
wl->conf.conn.listen_interval = value;
/* for some reason there are different event types for 1 and >1 */
if (value == 1)
wl->conf.conn.wake_up_event = CONF_WAKE_UP_EVENT_BEACON;
else
wl->conf.conn.wake_up_event = CONF_WAKE_UP_EVENT_N_BEACONS;
/*
* we don't reconfigure ACX_WAKE_UP_CONDITIONS now, so it will only
* take effect on the next time we enter psm.
*/
mutex_unlock(&wl->mutex);
return count;
}
static const struct file_operations beacon_interval_ops = {
.read = beacon_interval_read,
.write = beacon_interval_write,
.open = simple_open,
.llseek = default_llseek,
};
static ssize_t rx_streaming_interval_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct wl1271 *wl = file->private_data;
struct wl12xx_vif *wlvif;
unsigned long value;
int ret;
ret = kstrtoul_from_user(user_buf, count, 10, &value);
if (ret < 0) {
wl1271_warning("illegal value in rx_streaming_interval!");
return -EINVAL;
}
/* valid values: 0, 10-100 */
if (value && (value < 10 || value > 100)) {
wl1271_warning("value is not in range!");
return -ERANGE;
}
mutex_lock(&wl->mutex);
wl->conf.rx_streaming.interval = value;
ret = pm_runtime_resume_and_get(wl->dev);
if (ret < 0)
goto out;
wl12xx_for_each_wlvif_sta(wl, wlvif) {
wl1271_recalc_rx_streaming(wl, wlvif);
}
pm_runtime_mark_last_busy(wl->dev);
pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
return count;
}
static ssize_t rx_streaming_interval_read(struct file *file,
char __user *userbuf,
size_t count, loff_t *ppos)
{
struct wl1271 *wl = file->private_data;
return wl1271_format_buffer(userbuf, count, ppos,
"%d\n", wl->conf.rx_streaming.interval);
}
static const struct file_operations rx_streaming_interval_ops = {
.read = rx_streaming_interval_read,
.write = rx_streaming_interval_write,
.open = simple_open,
.llseek = default_llseek,
};
static ssize_t rx_streaming_always_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct wl1271 *wl = file->private_data;
struct wl12xx_vif *wlvif;
unsigned long value;
int ret;
ret = kstrtoul_from_user(user_buf, count, 10, &value);
if (ret < 0) {
wl1271_warning("illegal value in rx_streaming_write!");
return -EINVAL;
}
/* valid values: 0, 10-100 */
if (!(value == 0 || value == 1)) {
wl1271_warning("value is not in valid!");
return -EINVAL;
}
mutex_lock(&wl->mutex);
wl->conf.rx_streaming.always = value;
ret = pm_runtime_resume_and_get(wl->dev);
if (ret < 0)
goto out;
wl12xx_for_each_wlvif_sta(wl, wlvif) {
wl1271_recalc_rx_streaming(wl, wlvif);
}
pm_runtime_mark_last_busy(wl->dev);
pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
return count;
}
static ssize_t rx_streaming_always_read(struct file *file,
char __user *userbuf,
size_t count, loff_t *ppos)
{
struct wl1271 *wl = file->private_data;
return wl1271_format_buffer(userbuf, count, ppos,
"%d\n", wl->conf.rx_streaming.always);
}
static const struct file_operations rx_streaming_always_ops = {
.read = rx_streaming_always_read,
.write = rx_streaming_always_write,
.open = simple_open,
.llseek = default_llseek,
};
static ssize_t beacon_filtering_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct wl1271 *wl = file->private_data;
struct wl12xx_vif *wlvif;
unsigned long value;
int ret;
ret = kstrtoul_from_user(user_buf, count, 0, &value);
if (ret < 0) {
wl1271_warning("illegal value for beacon_filtering!");
return -EINVAL;
}
mutex_lock(&wl->mutex);
ret = pm_runtime_resume_and_get(wl->dev);
if (ret < 0)
goto out;
wl12xx_for_each_wlvif(wl, wlvif) {
ret = wl1271_acx_beacon_filter_opt(wl, wlvif, !!value);
}
pm_runtime_mark_last_busy(wl->dev);
pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
return count;
}
static const struct file_operations beacon_filtering_ops = {
.write = beacon_filtering_write,
.open = simple_open,
.llseek = default_llseek,
};
static ssize_t fw_stats_raw_read(struct file *file,
char __user *userbuf,
size_t count, loff_t *ppos)
{
struct wl1271 *wl = file->private_data;
wl1271_debugfs_update_stats(wl);
return simple_read_from_buffer(userbuf, count, ppos,
wl->stats.fw_stats,
wl->stats.fw_stats_len);
}
static const struct file_operations fw_stats_raw_ops = {
.read = fw_stats_raw_read,
.open = simple_open,
.llseek = default_llseek,
};
static ssize_t sleep_auth_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct wl1271 *wl = file->private_data;
return wl1271_format_buffer(user_buf, count,
ppos, "%d\n",
wl->sleep_auth);
}
static ssize_t sleep_auth_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct wl1271 *wl = file->private_data;
unsigned long value;
int ret;
ret = kstrtoul_from_user(user_buf, count, 0, &value);
if (ret < 0) {
wl1271_warning("illegal value in sleep_auth");
return -EINVAL;
}
if (value > WL1271_PSM_MAX) {
wl1271_warning("sleep_auth must be between 0 and %d",
WL1271_PSM_MAX);
return -ERANGE;
}
mutex_lock(&wl->mutex);
wl->conf.conn.sta_sleep_auth = value;
if (unlikely(wl->state != WLCORE_STATE_ON)) {
/* this will show up on "read" in case we are off */
wl->sleep_auth = value;
goto out;
}
ret = pm_runtime_resume_and_get(wl->dev);
if (ret < 0)
goto out;
ret = wl1271_acx_sleep_auth(wl, value);
if (ret < 0)
goto out_sleep;
out_sleep:
pm_runtime_mark_last_busy(wl->dev);
pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
return count;
}
static const struct file_operations sleep_auth_ops = {
.read = sleep_auth_read,
.write = sleep_auth_write,
.open = simple_open,
.llseek = default_llseek,
};
static ssize_t dev_mem_read(struct file *file,
char __user *user_buf, size_t count,
loff_t *ppos)
{
struct wl1271 *wl = file->private_data;
struct wlcore_partition_set part, old_part;
size_t bytes = count;
int ret;
char *buf;
/* only requests of dword-aligned size and offset are supported */
if (bytes % 4)
return -EINVAL;
if (*ppos % 4)
return -EINVAL;
/* function should return in reasonable time */
bytes = min(bytes, WLCORE_MAX_BLOCK_SIZE);
if (bytes == 0)
return -EINVAL;
memset(&part, 0, sizeof(part));
part.mem.start = *ppos;
part.mem.size = bytes;
buf = kmalloc(bytes, GFP_KERNEL);
if (!buf)
return -ENOMEM;
mutex_lock(&wl->mutex);
if (unlikely(wl->state == WLCORE_STATE_OFF)) {
ret = -EFAULT;
goto skip_read;
}
/*
* Don't fail if elp_wakeup returns an error, so the device's memory
* could be read even if the FW crashed
*/
pm_runtime_get_sync(wl->dev);
/* store current partition and switch partition */
memcpy(&old_part, &wl->curr_part, sizeof(old_part));
ret = wlcore_set_partition(wl, &part);
if (ret < 0)
goto part_err;
ret = wlcore_raw_read(wl, 0, buf, bytes, false);
if (ret < 0)
goto read_err;
read_err:
/* recover partition */
ret = wlcore_set_partition(wl, &old_part);
if (ret < 0)
goto part_err;
part_err:
pm_runtime_mark_last_busy(wl->dev);
pm_runtime_put_autosuspend(wl->dev);
skip_read:
mutex_unlock(&wl->mutex);
if (ret == 0) {
ret = copy_to_user(user_buf, buf, bytes);
if (ret < bytes) {
bytes -= ret;
*ppos += bytes;
ret = 0;
} else {
ret = -EFAULT;
}
}
kfree(buf);
return ((ret == 0) ? bytes : ret);
}
static ssize_t dev_mem_write(struct file *file, const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct wl1271 *wl = file->private_data;
struct wlcore_partition_set part, old_part;
size_t bytes = count;
int ret;
char *buf;
/* only requests of dword-aligned size and offset are supported */
if (bytes % 4)
return -EINVAL;
if (*ppos % 4)
return -EINVAL;
/* function should return in reasonable time */
bytes = min(bytes, WLCORE_MAX_BLOCK_SIZE);
if (bytes == 0)
return -EINVAL;
memset(&part, 0, sizeof(part));
part.mem.start = *ppos;
part.mem.size = bytes;
buf = memdup_user(user_buf, bytes);
if (IS_ERR(buf))
return PTR_ERR(buf);
mutex_lock(&wl->mutex);
if (unlikely(wl->state == WLCORE_STATE_OFF)) {
ret = -EFAULT;
goto skip_write;
}
/*
* Don't fail if elp_wakeup returns an error, so the device's memory
* could be read even if the FW crashed
*/
pm_runtime_get_sync(wl->dev);
/* store current partition and switch partition */
memcpy(&old_part, &wl->curr_part, sizeof(old_part));
ret = wlcore_set_partition(wl, &part);
if (ret < 0)
goto part_err;
ret = wlcore_raw_write(wl, 0, buf, bytes, false);
if (ret < 0)
goto write_err;
write_err:
/* recover partition */
ret = wlcore_set_partition(wl, &old_part);
if (ret < 0)
goto part_err;
part_err:
pm_runtime_mark_last_busy(wl->dev);
pm_runtime_put_autosuspend(wl->dev);
skip_write:
mutex_unlock(&wl->mutex);
if (ret == 0)
*ppos += bytes;
kfree(buf);
return ((ret == 0) ? bytes : ret);
}
static loff_t dev_mem_seek(struct file *file, loff_t offset, int orig)
{
/* only requests of dword-aligned size and offset are supported */
if (offset % 4)
return -EINVAL;
return no_seek_end_llseek(file, offset, orig);
}
static const struct file_operations dev_mem_ops = {
.open = simple_open,
.read = dev_mem_read,
.write = dev_mem_write,
.llseek = dev_mem_seek,
};
static ssize_t fw_logger_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
struct wl1271 *wl = file->private_data;
return wl1271_format_buffer(user_buf, count,
ppos, "%d\n",
wl->conf.fwlog.output);
}
static ssize_t fw_logger_write(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct wl1271 *wl = file->private_data;
unsigned long value;
int ret;
ret = kstrtoul_from_user(user_buf, count, 0, &value);
if (ret < 0) {
wl1271_warning("illegal value in fw_logger");
return -EINVAL;
}
if ((value > 2) || (value == 0)) {
wl1271_warning("fw_logger value must be 1-UART 2-SDIO");
return -ERANGE;
}
if (wl->conf.fwlog.output == 0) {
wl1271_warning("invalid operation - fw logger disabled by default, please change mode via wlconf");
return -EINVAL;
}
mutex_lock(&wl->mutex);
ret = pm_runtime_resume_and_get(wl->dev);
if (ret < 0) {
count = ret;
goto out;
}
wl->conf.fwlog.output = value;
ret = wl12xx_cmd_config_fwlog(wl);
pm_runtime_mark_last_busy(wl->dev);
pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
return count;
}
static const struct file_operations fw_logger_ops = {
.open = simple_open,
.read = fw_logger_read,
.write = fw_logger_write,
.llseek = default_llseek,
};
static void wl1271_debugfs_add_files(struct wl1271 *wl,
struct dentry *rootdir)
{
struct dentry *streaming;
DEBUGFS_ADD(tx_queue_len, rootdir);
DEBUGFS_ADD(retry_count, rootdir);
DEBUGFS_ADD(excessive_retries, rootdir);
DEBUGFS_ADD(gpio_power, rootdir);
DEBUGFS_ADD(start_recovery, rootdir);
DEBUGFS_ADD(driver_state, rootdir);
DEBUGFS_ADD(vifs_state, rootdir);
DEBUGFS_ADD(dtim_interval, rootdir);
DEBUGFS_ADD(suspend_dtim_interval, rootdir);
DEBUGFS_ADD(beacon_interval, rootdir);
DEBUGFS_ADD(beacon_filtering, rootdir);
DEBUGFS_ADD(dynamic_ps_timeout, rootdir);
DEBUGFS_ADD(forced_ps, rootdir);
DEBUGFS_ADD(split_scan_timeout, rootdir);
DEBUGFS_ADD(irq_pkt_threshold, rootdir);
DEBUGFS_ADD(irq_blk_threshold, rootdir);
DEBUGFS_ADD(irq_timeout, rootdir);
DEBUGFS_ADD(fw_stats_raw, rootdir);
DEBUGFS_ADD(sleep_auth, rootdir);
DEBUGFS_ADD(fw_logger, rootdir);
streaming = debugfs_create_dir("rx_streaming", rootdir);
DEBUGFS_ADD_PREFIX(rx_streaming, interval, streaming);
DEBUGFS_ADD_PREFIX(rx_streaming, always, streaming);
DEBUGFS_ADD_PREFIX(dev, mem, rootdir);
}
void wl1271_debugfs_reset(struct wl1271 *wl)
{
if (!wl->stats.fw_stats)
return;
memset(wl->stats.fw_stats, 0, wl->stats.fw_stats_len);
wl->stats.retry_count = 0;
wl->stats.excessive_retries = 0;
}
int wl1271_debugfs_init(struct wl1271 *wl)
{
int ret;
struct dentry *rootdir;
rootdir = debugfs_create_dir(KBUILD_MODNAME,
wl->hw->wiphy->debugfsdir);
wl->stats.fw_stats = kzalloc(wl->stats.fw_stats_len, GFP_KERNEL);
if (!wl->stats.fw_stats) {
ret = -ENOMEM;
goto out_remove;
}
wl->stats.fw_stats_update = jiffies;
wl1271_debugfs_add_files(wl, rootdir);
ret = wlcore_debugfs_init(wl, rootdir);
if (ret < 0)
goto out_exit;
goto out;
out_exit:
wl1271_debugfs_exit(wl);
out_remove:
debugfs_remove_recursive(rootdir);
out:
return ret;
}
void wl1271_debugfs_exit(struct wl1271 *wl)
{
kfree(wl->stats.fw_stats);
wl->stats.fw_stats = NULL;
}
|
linux-master
|
drivers/net/wireless/ti/wlcore/debugfs.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* This file is part of wl1271
*
* Copyright (C) 2009 Nokia Corporation
*
* Contact: Luciano Coelho <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include "debug.h"
#include "init.h"
#include "wl12xx_80211.h"
#include "acx.h"
#include "cmd.h"
#include "tx.h"
#include "io.h"
#include "hw_ops.h"
int wl1271_init_templates_config(struct wl1271 *wl)
{
int ret, i;
size_t max_size;
/* send empty templates for fw memory reservation */
ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID,
wl->scan_templ_id_2_4, NULL,
WL1271_CMD_TEMPL_MAX_SIZE,
0, WL1271_RATE_AUTOMATIC);
if (ret < 0)
return ret;
ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID,
wl->scan_templ_id_5,
NULL, WL1271_CMD_TEMPL_MAX_SIZE, 0,
WL1271_RATE_AUTOMATIC);
if (ret < 0)
return ret;
if (wl->quirks & WLCORE_QUIRK_DUAL_PROBE_TMPL) {
ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID,
wl->sched_scan_templ_id_2_4,
NULL,
WL1271_CMD_TEMPL_MAX_SIZE,
0, WL1271_RATE_AUTOMATIC);
if (ret < 0)
return ret;
ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID,
wl->sched_scan_templ_id_5,
NULL,
WL1271_CMD_TEMPL_MAX_SIZE,
0, WL1271_RATE_AUTOMATIC);
if (ret < 0)
return ret;
}
ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID,
CMD_TEMPL_NULL_DATA, NULL,
sizeof(struct wl12xx_null_data_template),
0, WL1271_RATE_AUTOMATIC);
if (ret < 0)
return ret;
ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID,
CMD_TEMPL_PS_POLL, NULL,
sizeof(struct wl12xx_ps_poll_template),
0, WL1271_RATE_AUTOMATIC);
if (ret < 0)
return ret;
ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID,
CMD_TEMPL_QOS_NULL_DATA, NULL,
sizeof
(struct ieee80211_qos_hdr),
0, WL1271_RATE_AUTOMATIC);
if (ret < 0)
return ret;
ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID,
CMD_TEMPL_PROBE_RESPONSE, NULL,
WL1271_CMD_TEMPL_DFLT_SIZE,
0, WL1271_RATE_AUTOMATIC);
if (ret < 0)
return ret;
ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID,
CMD_TEMPL_BEACON, NULL,
WL1271_CMD_TEMPL_DFLT_SIZE,
0, WL1271_RATE_AUTOMATIC);
if (ret < 0)
return ret;
max_size = sizeof(struct wl12xx_arp_rsp_template) +
WL1271_EXTRA_SPACE_MAX;
ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID,
CMD_TEMPL_ARP_RSP, NULL,
max_size,
0, WL1271_RATE_AUTOMATIC);
if (ret < 0)
return ret;
/*
* Put very large empty placeholders for all templates. These
* reserve memory for later.
*/
ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID,
CMD_TEMPL_AP_PROBE_RESPONSE, NULL,
WL1271_CMD_TEMPL_MAX_SIZE,
0, WL1271_RATE_AUTOMATIC);
if (ret < 0)
return ret;
ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID,
CMD_TEMPL_AP_BEACON, NULL,
WL1271_CMD_TEMPL_MAX_SIZE,
0, WL1271_RATE_AUTOMATIC);
if (ret < 0)
return ret;
ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID,
CMD_TEMPL_DEAUTH_AP, NULL,
sizeof
(struct wl12xx_disconn_template),
0, WL1271_RATE_AUTOMATIC);
if (ret < 0)
return ret;
for (i = 0; i < WLCORE_MAX_KLV_TEMPLATES; i++) {
ret = wl1271_cmd_template_set(wl, WL12XX_INVALID_ROLE_ID,
CMD_TEMPL_KLV, NULL,
sizeof(struct ieee80211_qos_hdr),
i, WL1271_RATE_AUTOMATIC);
if (ret < 0)
return ret;
}
return 0;
}
static int wl1271_ap_init_deauth_template(struct wl1271 *wl,
struct wl12xx_vif *wlvif)
{
struct wl12xx_disconn_template *tmpl;
int ret;
u32 rate;
tmpl = kzalloc(sizeof(*tmpl), GFP_KERNEL);
if (!tmpl) {
ret = -ENOMEM;
goto out;
}
tmpl->header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_DEAUTH);
rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
ret = wl1271_cmd_template_set(wl, wlvif->role_id,
CMD_TEMPL_DEAUTH_AP,
tmpl, sizeof(*tmpl), 0, rate);
out:
kfree(tmpl);
return ret;
}
static int wl1271_ap_init_null_template(struct wl1271 *wl,
struct ieee80211_vif *vif)
{
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
struct ieee80211_hdr_3addr *nullfunc;
int ret;
u32 rate;
nullfunc = kzalloc(sizeof(*nullfunc), GFP_KERNEL);
if (!nullfunc) {
ret = -ENOMEM;
goto out;
}
nullfunc->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
IEEE80211_STYPE_NULLFUNC |
IEEE80211_FCTL_FROMDS);
/* nullfunc->addr1 is filled by FW */
memcpy(nullfunc->addr2, vif->addr, ETH_ALEN);
memcpy(nullfunc->addr3, vif->addr, ETH_ALEN);
rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
ret = wl1271_cmd_template_set(wl, wlvif->role_id,
CMD_TEMPL_NULL_DATA, nullfunc,
sizeof(*nullfunc), 0, rate);
out:
kfree(nullfunc);
return ret;
}
static int wl1271_ap_init_qos_null_template(struct wl1271 *wl,
struct ieee80211_vif *vif)
{
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
struct ieee80211_qos_hdr *qosnull;
int ret;
u32 rate;
qosnull = kzalloc(sizeof(*qosnull), GFP_KERNEL);
if (!qosnull) {
ret = -ENOMEM;
goto out;
}
qosnull->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
IEEE80211_STYPE_QOS_NULLFUNC |
IEEE80211_FCTL_FROMDS);
/* qosnull->addr1 is filled by FW */
memcpy(qosnull->addr2, vif->addr, ETH_ALEN);
memcpy(qosnull->addr3, vif->addr, ETH_ALEN);
rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
ret = wl1271_cmd_template_set(wl, wlvif->role_id,
CMD_TEMPL_QOS_NULL_DATA, qosnull,
sizeof(*qosnull), 0, rate);
out:
kfree(qosnull);
return ret;
}
static int wl12xx_init_rx_config(struct wl1271 *wl)
{
int ret;
ret = wl1271_acx_rx_msdu_life_time(wl);
if (ret < 0)
return ret;
return 0;
}
static int wl12xx_init_phy_vif_config(struct wl1271 *wl,
struct wl12xx_vif *wlvif)
{
int ret;
ret = wl1271_acx_slot(wl, wlvif, DEFAULT_SLOT_TIME);
if (ret < 0)
return ret;
ret = wl1271_acx_service_period_timeout(wl, wlvif);
if (ret < 0)
return ret;
ret = wl1271_acx_rts_threshold(wl, wlvif, wl->hw->wiphy->rts_threshold);
if (ret < 0)
return ret;
return 0;
}
static int wl1271_init_sta_beacon_filter(struct wl1271 *wl,
struct wl12xx_vif *wlvif)
{
int ret;
ret = wl1271_acx_beacon_filter_table(wl, wlvif);
if (ret < 0)
return ret;
/* disable beacon filtering until we get the first beacon */
ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
if (ret < 0)
return ret;
return 0;
}
int wl1271_init_pta(struct wl1271 *wl)
{
int ret;
ret = wl12xx_acx_sg_cfg(wl);
if (ret < 0)
return ret;
ret = wl1271_acx_sg_enable(wl, wl->sg_enabled);
if (ret < 0)
return ret;
return 0;
}
int wl1271_init_energy_detection(struct wl1271 *wl)
{
int ret;
ret = wl1271_acx_cca_threshold(wl);
if (ret < 0)
return ret;
return 0;
}
static int wl1271_init_beacon_broadcast(struct wl1271 *wl,
struct wl12xx_vif *wlvif)
{
int ret;
ret = wl1271_acx_bcn_dtim_options(wl, wlvif);
if (ret < 0)
return ret;
return 0;
}
static int wl12xx_init_fwlog(struct wl1271 *wl)
{
int ret;
if (wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED)
return 0;
ret = wl12xx_cmd_config_fwlog(wl);
if (ret < 0)
return ret;
return 0;
}
/* generic sta initialization (non vif-specific) */
int wl1271_sta_hw_init(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
int ret;
/* PS config */
ret = wl12xx_acx_config_ps(wl, wlvif);
if (ret < 0)
return ret;
/* FM WLAN coexistence */
ret = wl1271_acx_fm_coex(wl);
if (ret < 0)
return ret;
ret = wl1271_acx_sta_rate_policies(wl, wlvif);
if (ret < 0)
return ret;
return 0;
}
static int wl1271_sta_hw_init_post_mem(struct wl1271 *wl,
struct ieee80211_vif *vif)
{
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
int ret;
/* disable the keep-alive feature */
ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
if (ret < 0)
return ret;
return 0;
}
/* generic ap initialization (non vif-specific) */
static int wl1271_ap_hw_init(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
int ret;
ret = wl1271_init_ap_rates(wl, wlvif);
if (ret < 0)
return ret;
/* configure AP sleep, if enabled */
ret = wlcore_hw_ap_sleep(wl);
if (ret < 0)
return ret;
return 0;
}
int wl1271_ap_init_templates(struct wl1271 *wl, struct ieee80211_vif *vif)
{
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
int ret;
ret = wl1271_ap_init_deauth_template(wl, wlvif);
if (ret < 0)
return ret;
ret = wl1271_ap_init_null_template(wl, vif);
if (ret < 0)
return ret;
ret = wl1271_ap_init_qos_null_template(wl, vif);
if (ret < 0)
return ret;
/*
* when operating as AP we want to receive external beacons for
* configuring ERP protection.
*/
ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
if (ret < 0)
return ret;
return 0;
}
static int wl1271_ap_hw_init_post_mem(struct wl1271 *wl,
struct ieee80211_vif *vif)
{
return wl1271_ap_init_templates(wl, vif);
}
int wl1271_init_ap_rates(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
int i, ret;
struct conf_tx_rate_class rc;
u32 supported_rates;
wl1271_debug(DEBUG_AP, "AP basic rate set: 0x%x",
wlvif->basic_rate_set);
if (wlvif->basic_rate_set == 0)
return -EINVAL;
rc.enabled_rates = wlvif->basic_rate_set;
rc.long_retry_limit = 10;
rc.short_retry_limit = 10;
rc.aflags = 0;
ret = wl1271_acx_ap_rate_policy(wl, &rc, wlvif->ap.mgmt_rate_idx);
if (ret < 0)
return ret;
/* use the min basic rate for AP broadcast/multicast */
rc.enabled_rates = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
rc.short_retry_limit = 10;
rc.long_retry_limit = 10;
rc.aflags = 0;
ret = wl1271_acx_ap_rate_policy(wl, &rc, wlvif->ap.bcast_rate_idx);
if (ret < 0)
return ret;
/*
* If the basic rates contain OFDM rates, use OFDM only
* rates for unicast TX as well. Else use all supported rates.
*/
if (wl->ofdm_only_ap && (wlvif->basic_rate_set & CONF_TX_OFDM_RATES))
supported_rates = CONF_TX_OFDM_RATES;
else
supported_rates = CONF_TX_ENABLED_RATES;
/* unconditionally enable HT rates */
supported_rates |= CONF_TX_MCS_RATES;
/* get extra MIMO or wide-chan rates where the HW supports it */
supported_rates |= wlcore_hw_ap_get_mimo_wide_rate_mask(wl, wlvif);
/* configure unicast TX rate classes */
for (i = 0; i < wl->conf.tx.ac_conf_count; i++) {
rc.enabled_rates = supported_rates;
rc.short_retry_limit = 10;
rc.long_retry_limit = 10;
rc.aflags = 0;
ret = wl1271_acx_ap_rate_policy(wl, &rc,
wlvif->ap.ucast_rate_idx[i]);
if (ret < 0)
return ret;
}
return 0;
}
static int wl1271_set_ba_policies(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
/* Reset the BA RX indicators */
wlvif->ba_allowed = true;
wl->ba_rx_session_count = 0;
/* BA is supported in STA/AP modes */
if (wlvif->bss_type != BSS_TYPE_AP_BSS &&
wlvif->bss_type != BSS_TYPE_STA_BSS) {
wlvif->ba_support = false;
return 0;
}
wlvif->ba_support = true;
/* 802.11n initiator BA session setting */
return wl12xx_acx_set_ba_initiator_policy(wl, wlvif);
}
/* vif-specifc initialization */
static int wl12xx_init_sta_role(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
int ret;
ret = wl1271_acx_group_address_tbl(wl, wlvif, true, NULL, 0);
if (ret < 0)
return ret;
/* Initialize connection monitoring thresholds */
ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
if (ret < 0)
return ret;
/* Beacon filtering */
ret = wl1271_init_sta_beacon_filter(wl, wlvif);
if (ret < 0)
return ret;
/* Beacons and broadcast settings */
ret = wl1271_init_beacon_broadcast(wl, wlvif);
if (ret < 0)
return ret;
/* Configure rssi/snr averaging weights */
ret = wl1271_acx_rssi_snr_avg_weights(wl, wlvif);
if (ret < 0)
return ret;
return 0;
}
/* vif-specific initialization */
static int wl12xx_init_ap_role(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
int ret;
ret = wl1271_acx_ap_max_tx_retry(wl, wlvif);
if (ret < 0)
return ret;
/* initialize Tx power */
ret = wl1271_acx_tx_power(wl, wlvif, wlvif->power_level);
if (ret < 0)
return ret;
if (wl->radar_debug_mode)
wlcore_cmd_generic_cfg(wl, wlvif,
WLCORE_CFG_FEATURE_RADAR_DEBUG,
wl->radar_debug_mode, 0);
return 0;
}
int wl1271_init_vif_specific(struct wl1271 *wl, struct ieee80211_vif *vif)
{
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
struct conf_tx_ac_category *conf_ac;
struct conf_tx_tid *conf_tid;
bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
int ret, i;
/* consider all existing roles before configuring psm. */
if (wl->ap_count == 0 && is_ap) { /* first AP */
ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
if (ret < 0)
return ret;
/* unmask ap events */
wl->event_mask |= wl->ap_event_mask;
ret = wl1271_event_unmask(wl);
if (ret < 0)
return ret;
/* first STA, no APs */
} else if (wl->sta_count == 0 && wl->ap_count == 0 && !is_ap) {
u8 sta_auth = wl->conf.conn.sta_sleep_auth;
/* Configure for power according to debugfs */
if (sta_auth != WL1271_PSM_ILLEGAL)
ret = wl1271_acx_sleep_auth(wl, sta_auth);
/* Configure for ELP power saving */
else
ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
if (ret < 0)
return ret;
}
/* Mode specific init */
if (is_ap) {
ret = wl1271_ap_hw_init(wl, wlvif);
if (ret < 0)
return ret;
ret = wl12xx_init_ap_role(wl, wlvif);
if (ret < 0)
return ret;
} else {
ret = wl1271_sta_hw_init(wl, wlvif);
if (ret < 0)
return ret;
ret = wl12xx_init_sta_role(wl, wlvif);
if (ret < 0)
return ret;
}
wl12xx_init_phy_vif_config(wl, wlvif);
/* Default TID/AC configuration */
BUG_ON(wl->conf.tx.tid_conf_count != wl->conf.tx.ac_conf_count);
for (i = 0; i < wl->conf.tx.tid_conf_count; i++) {
conf_ac = &wl->conf.tx.ac_conf[i];
ret = wl1271_acx_ac_cfg(wl, wlvif, conf_ac->ac,
conf_ac->cw_min, conf_ac->cw_max,
conf_ac->aifsn, conf_ac->tx_op_limit);
if (ret < 0)
return ret;
conf_tid = &wl->conf.tx.tid_conf[i];
ret = wl1271_acx_tid_cfg(wl, wlvif,
conf_tid->queue_id,
conf_tid->channel_type,
conf_tid->tsid,
conf_tid->ps_scheme,
conf_tid->ack_policy,
conf_tid->apsd_conf[0],
conf_tid->apsd_conf[1]);
if (ret < 0)
return ret;
}
/* Configure HW encryption */
ret = wl1271_acx_feature_cfg(wl, wlvif);
if (ret < 0)
return ret;
/* Mode specific init - post mem init */
if (is_ap)
ret = wl1271_ap_hw_init_post_mem(wl, vif);
else
ret = wl1271_sta_hw_init_post_mem(wl, vif);
if (ret < 0)
return ret;
/* Configure initiator BA sessions policies */
ret = wl1271_set_ba_policies(wl, wlvif);
if (ret < 0)
return ret;
ret = wlcore_hw_init_vif(wl, wlvif);
if (ret < 0)
return ret;
return 0;
}
int wl1271_hw_init(struct wl1271 *wl)
{
int ret;
/* Chip-specific hw init */
ret = wl->ops->hw_init(wl);
if (ret < 0)
return ret;
/* Init templates */
ret = wl1271_init_templates_config(wl);
if (ret < 0)
return ret;
ret = wl12xx_acx_mem_cfg(wl);
if (ret < 0)
return ret;
/* Configure the FW logger */
ret = wl12xx_init_fwlog(wl);
if (ret < 0)
return ret;
ret = wlcore_cmd_regdomain_config_locked(wl);
if (ret < 0)
return ret;
/* Bluetooth WLAN coexistence */
ret = wl1271_init_pta(wl);
if (ret < 0)
return ret;
/* Default memory configuration */
ret = wl1271_acx_init_mem_config(wl);
if (ret < 0)
return ret;
/* RX config */
ret = wl12xx_init_rx_config(wl);
if (ret < 0)
goto out_free_memmap;
ret = wl1271_acx_dco_itrim_params(wl);
if (ret < 0)
goto out_free_memmap;
/* Configure TX patch complete interrupt behavior */
ret = wl1271_acx_tx_config_options(wl);
if (ret < 0)
goto out_free_memmap;
/* RX complete interrupt pacing */
ret = wl1271_acx_init_rx_interrupt(wl);
if (ret < 0)
goto out_free_memmap;
/* Energy detection */
ret = wl1271_init_energy_detection(wl);
if (ret < 0)
goto out_free_memmap;
/* Default fragmentation threshold */
ret = wl1271_acx_frag_threshold(wl, wl->hw->wiphy->frag_threshold);
if (ret < 0)
goto out_free_memmap;
/* Enable data path */
ret = wl1271_cmd_data_path(wl, 1);
if (ret < 0)
goto out_free_memmap;
/* configure PM */
ret = wl1271_acx_pm_config(wl);
if (ret < 0)
goto out_free_memmap;
ret = wl12xx_acx_set_rate_mgmt_params(wl);
if (ret < 0)
goto out_free_memmap;
/* configure hangover */
ret = wl12xx_acx_config_hangover(wl);
if (ret < 0)
goto out_free_memmap;
return 0;
out_free_memmap:
kfree(wl->target_mem_map);
wl->target_mem_map = NULL;
return ret;
}
|
linux-master
|
drivers/net/wireless/ti/wlcore/init.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* This file is part of wl1271
*
* Copyright (C) 2010 Nokia Corporation
*
* Contact: Luciano Coelho <[email protected]>
*/
#include "testmode.h"
#include <linux/pm_runtime.h>
#include <linux/slab.h>
#include <net/genetlink.h>
#include "wlcore.h"
#include "debug.h"
#include "acx.h"
#include "io.h"
#define WL1271_TM_MAX_DATA_LENGTH 1024
enum wl1271_tm_commands {
WL1271_TM_CMD_UNSPEC,
WL1271_TM_CMD_TEST,
WL1271_TM_CMD_INTERROGATE,
WL1271_TM_CMD_CONFIGURE,
WL1271_TM_CMD_NVS_PUSH, /* Not in use. Keep to not break ABI */
WL1271_TM_CMD_SET_PLT_MODE,
WL1271_TM_CMD_RECOVER, /* Not in use. Keep to not break ABI */
WL1271_TM_CMD_GET_MAC,
__WL1271_TM_CMD_AFTER_LAST
};
#define WL1271_TM_CMD_MAX (__WL1271_TM_CMD_AFTER_LAST - 1)
enum wl1271_tm_attrs {
WL1271_TM_ATTR_UNSPEC,
WL1271_TM_ATTR_CMD_ID,
WL1271_TM_ATTR_ANSWER,
WL1271_TM_ATTR_DATA,
WL1271_TM_ATTR_IE_ID,
WL1271_TM_ATTR_PLT_MODE,
__WL1271_TM_ATTR_AFTER_LAST
};
#define WL1271_TM_ATTR_MAX (__WL1271_TM_ATTR_AFTER_LAST - 1)
static struct nla_policy wl1271_tm_policy[WL1271_TM_ATTR_MAX + 1] = {
[WL1271_TM_ATTR_CMD_ID] = { .type = NLA_U32 },
[WL1271_TM_ATTR_ANSWER] = { .type = NLA_U8 },
[WL1271_TM_ATTR_DATA] = { .type = NLA_BINARY,
.len = WL1271_TM_MAX_DATA_LENGTH },
[WL1271_TM_ATTR_IE_ID] = { .type = NLA_U32 },
[WL1271_TM_ATTR_PLT_MODE] = { .type = NLA_U32 },
};
static int wl1271_tm_cmd_test(struct wl1271 *wl, struct nlattr *tb[])
{
int buf_len, ret, len;
struct sk_buff *skb;
void *buf;
u8 answer = 0;
wl1271_debug(DEBUG_TESTMODE, "testmode cmd test");
if (!tb[WL1271_TM_ATTR_DATA])
return -EINVAL;
buf = nla_data(tb[WL1271_TM_ATTR_DATA]);
buf_len = nla_len(tb[WL1271_TM_ATTR_DATA]);
if (tb[WL1271_TM_ATTR_ANSWER])
answer = nla_get_u8(tb[WL1271_TM_ATTR_ANSWER]);
if (buf_len > sizeof(struct wl1271_command))
return -EMSGSIZE;
mutex_lock(&wl->mutex);
if (unlikely(wl->state != WLCORE_STATE_ON)) {
ret = -EINVAL;
goto out;
}
ret = pm_runtime_resume_and_get(wl->dev);
if (ret < 0)
goto out;
ret = wl1271_cmd_test(wl, buf, buf_len, answer);
if (ret < 0) {
wl1271_warning("testmode cmd test failed: %d", ret);
goto out_sleep;
}
if (answer) {
/* If we got bip calibration answer print radio status */
struct wl1271_cmd_cal_p2g *params =
(struct wl1271_cmd_cal_p2g *) buf;
s16 radio_status = (s16) le16_to_cpu(params->radio_status);
if (params->test.id == TEST_CMD_P2G_CAL &&
radio_status < 0)
wl1271_warning("testmode cmd: radio status=%d",
radio_status);
else
wl1271_info("testmode cmd: radio status=%d",
radio_status);
len = nla_total_size(buf_len);
skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, len);
if (!skb) {
ret = -ENOMEM;
goto out_sleep;
}
if (nla_put(skb, WL1271_TM_ATTR_DATA, buf_len, buf)) {
kfree_skb(skb);
ret = -EMSGSIZE;
goto out_sleep;
}
ret = cfg80211_testmode_reply(skb);
if (ret < 0)
goto out_sleep;
}
out_sleep:
pm_runtime_mark_last_busy(wl->dev);
pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
return ret;
}
static int wl1271_tm_cmd_interrogate(struct wl1271 *wl, struct nlattr *tb[])
{
int ret;
struct wl1271_command *cmd;
struct sk_buff *skb;
u8 ie_id;
wl1271_debug(DEBUG_TESTMODE, "testmode cmd interrogate");
if (!tb[WL1271_TM_ATTR_IE_ID])
return -EINVAL;
ie_id = nla_get_u8(tb[WL1271_TM_ATTR_IE_ID]);
mutex_lock(&wl->mutex);
if (unlikely(wl->state != WLCORE_STATE_ON)) {
ret = -EINVAL;
goto out;
}
ret = pm_runtime_resume_and_get(wl->dev);
if (ret < 0)
goto out;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd) {
ret = -ENOMEM;
goto out_sleep;
}
ret = wl1271_cmd_interrogate(wl, ie_id, cmd,
sizeof(struct acx_header), sizeof(*cmd));
if (ret < 0) {
wl1271_warning("testmode cmd interrogate failed: %d", ret);
goto out_free;
}
skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, sizeof(*cmd));
if (!skb) {
ret = -ENOMEM;
goto out_free;
}
if (nla_put(skb, WL1271_TM_ATTR_DATA, sizeof(*cmd), cmd)) {
kfree_skb(skb);
ret = -EMSGSIZE;
goto out_free;
}
ret = cfg80211_testmode_reply(skb);
if (ret < 0)
goto out_free;
out_free:
kfree(cmd);
out_sleep:
pm_runtime_mark_last_busy(wl->dev);
pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
return ret;
}
static int wl1271_tm_cmd_configure(struct wl1271 *wl, struct nlattr *tb[])
{
int buf_len, ret;
void *buf;
u8 ie_id;
wl1271_debug(DEBUG_TESTMODE, "testmode cmd configure");
if (!tb[WL1271_TM_ATTR_DATA])
return -EINVAL;
if (!tb[WL1271_TM_ATTR_IE_ID])
return -EINVAL;
ie_id = nla_get_u8(tb[WL1271_TM_ATTR_IE_ID]);
buf = nla_data(tb[WL1271_TM_ATTR_DATA]);
buf_len = nla_len(tb[WL1271_TM_ATTR_DATA]);
if (buf_len > sizeof(struct wl1271_command))
return -EMSGSIZE;
mutex_lock(&wl->mutex);
ret = wl1271_cmd_configure(wl, ie_id, buf, buf_len);
mutex_unlock(&wl->mutex);
if (ret < 0) {
wl1271_warning("testmode cmd configure failed: %d", ret);
return ret;
}
return 0;
}
static int wl1271_tm_detect_fem(struct wl1271 *wl, struct nlattr *tb[])
{
/* return FEM type */
int ret, len;
struct sk_buff *skb;
ret = wl1271_plt_start(wl, PLT_FEM_DETECT);
if (ret < 0)
goto out;
mutex_lock(&wl->mutex);
len = nla_total_size(sizeof(wl->fem_manuf));
skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, len);
if (!skb) {
ret = -ENOMEM;
goto out_mutex;
}
if (nla_put(skb, WL1271_TM_ATTR_DATA, sizeof(wl->fem_manuf),
&wl->fem_manuf)) {
kfree_skb(skb);
ret = -EMSGSIZE;
goto out_mutex;
}
ret = cfg80211_testmode_reply(skb);
out_mutex:
mutex_unlock(&wl->mutex);
/* We always stop plt after DETECT mode */
wl1271_plt_stop(wl);
out:
return ret;
}
static int wl1271_tm_cmd_set_plt_mode(struct wl1271 *wl, struct nlattr *tb[])
{
u32 val;
int ret;
wl1271_debug(DEBUG_TESTMODE, "testmode cmd set plt mode");
if (!tb[WL1271_TM_ATTR_PLT_MODE])
return -EINVAL;
val = nla_get_u32(tb[WL1271_TM_ATTR_PLT_MODE]);
switch (val) {
case PLT_OFF:
ret = wl1271_plt_stop(wl);
break;
case PLT_ON:
case PLT_CHIP_AWAKE:
ret = wl1271_plt_start(wl, val);
break;
case PLT_FEM_DETECT:
ret = wl1271_tm_detect_fem(wl, tb);
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
static int wl12xx_tm_cmd_get_mac(struct wl1271 *wl, struct nlattr *tb[])
{
struct sk_buff *skb;
u8 mac_addr[ETH_ALEN];
int ret = 0;
mutex_lock(&wl->mutex);
if (!wl->plt) {
ret = -EINVAL;
goto out;
}
if (wl->fuse_oui_addr == 0 && wl->fuse_nic_addr == 0) {
ret = -EOPNOTSUPP;
goto out;
}
mac_addr[0] = (u8)(wl->fuse_oui_addr >> 16);
mac_addr[1] = (u8)(wl->fuse_oui_addr >> 8);
mac_addr[2] = (u8) wl->fuse_oui_addr;
mac_addr[3] = (u8)(wl->fuse_nic_addr >> 16);
mac_addr[4] = (u8)(wl->fuse_nic_addr >> 8);
mac_addr[5] = (u8) wl->fuse_nic_addr;
skb = cfg80211_testmode_alloc_reply_skb(wl->hw->wiphy, ETH_ALEN);
if (!skb) {
ret = -ENOMEM;
goto out;
}
if (nla_put(skb, WL1271_TM_ATTR_DATA, ETH_ALEN, mac_addr)) {
kfree_skb(skb);
ret = -EMSGSIZE;
goto out;
}
ret = cfg80211_testmode_reply(skb);
if (ret < 0)
goto out;
out:
mutex_unlock(&wl->mutex);
return ret;
}
int wl1271_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
void *data, int len)
{
struct wl1271 *wl = hw->priv;
struct nlattr *tb[WL1271_TM_ATTR_MAX + 1];
u32 nla_cmd;
int err;
err = nla_parse_deprecated(tb, WL1271_TM_ATTR_MAX, data, len,
wl1271_tm_policy, NULL);
if (err)
return err;
if (!tb[WL1271_TM_ATTR_CMD_ID])
return -EINVAL;
nla_cmd = nla_get_u32(tb[WL1271_TM_ATTR_CMD_ID]);
/* Only SET_PLT_MODE is allowed in case of mode PLT_CHIP_AWAKE */
if (wl->plt_mode == PLT_CHIP_AWAKE &&
nla_cmd != WL1271_TM_CMD_SET_PLT_MODE)
return -EOPNOTSUPP;
switch (nla_cmd) {
case WL1271_TM_CMD_TEST:
return wl1271_tm_cmd_test(wl, tb);
case WL1271_TM_CMD_INTERROGATE:
return wl1271_tm_cmd_interrogate(wl, tb);
case WL1271_TM_CMD_CONFIGURE:
return wl1271_tm_cmd_configure(wl, tb);
case WL1271_TM_CMD_SET_PLT_MODE:
return wl1271_tm_cmd_set_plt_mode(wl, tb);
case WL1271_TM_CMD_GET_MAC:
return wl12xx_tm_cmd_get_mac(wl, tb);
default:
return -EOPNOTSUPP;
}
}
|
linux-master
|
drivers/net/wireless/ti/wlcore/testmode.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* This file is part of wl1271
*
* Copyright (C) 2009-2010 Nokia Corporation
*
* Contact: Luciano Coelho <[email protected]>
*/
#include <linux/ieee80211.h>
#include <linux/pm_runtime.h>
#include "wlcore.h"
#include "debug.h"
#include "cmd.h"
#include "scan.h"
#include "acx.h"
#include "tx.h"
void wl1271_scan_complete_work(struct work_struct *work)
{
struct delayed_work *dwork;
struct wl1271 *wl;
struct wl12xx_vif *wlvif;
struct cfg80211_scan_info info = {
.aborted = false,
};
int ret;
dwork = to_delayed_work(work);
wl = container_of(dwork, struct wl1271, scan_complete_work);
wl1271_debug(DEBUG_SCAN, "Scanning complete");
mutex_lock(&wl->mutex);
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
goto out;
wlvif = wl->scan_wlvif;
/*
* Rearm the tx watchdog just before idling scan. This
* prevents just-finished scans from triggering the watchdog
*/
wl12xx_rearm_tx_watchdog_locked(wl);
wl->scan.state = WL1271_SCAN_STATE_IDLE;
memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
wl->scan.req = NULL;
wl->scan_wlvif = NULL;
ret = pm_runtime_resume_and_get(wl->dev);
if (ret < 0)
goto out;
if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
/* restore hardware connection monitoring template */
wl1271_cmd_build_ap_probe_req(wl, wlvif, wlvif->probereq);
}
if (wl->scan.failed) {
wl1271_info("Scan completed due to error.");
wl12xx_queue_recovery_work(wl);
}
wlcore_cmd_regdomain_config_locked(wl);
pm_runtime_mark_last_busy(wl->dev);
pm_runtime_put_autosuspend(wl->dev);
ieee80211_scan_completed(wl->hw, &info);
out:
mutex_unlock(&wl->mutex);
}
static void wlcore_started_vifs_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
bool active = false;
int *count = (int *)data;
/*
* count active interfaces according to interface type.
* checking only bss_conf.idle is bad for some cases, e.g.
* we don't want to count sta in p2p_find as active interface.
*/
switch (wlvif->bss_type) {
case BSS_TYPE_STA_BSS:
if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
active = true;
break;
case BSS_TYPE_AP_BSS:
if (wlvif->wl->active_sta_count > 0)
active = true;
break;
default:
break;
}
if (active)
(*count)++;
}
static int wlcore_count_started_vifs(struct wl1271 *wl)
{
int count = 0;
ieee80211_iterate_active_interfaces_atomic(wl->hw,
IEEE80211_IFACE_ITER_RESUME_ALL,
wlcore_started_vifs_iter, &count);
return count;
}
static int
wlcore_scan_get_channels(struct wl1271 *wl,
struct ieee80211_channel *req_channels[],
u32 n_channels,
u32 n_ssids,
struct conn_scan_ch_params *channels,
u32 band, bool radar, bool passive,
int start, int max_channels,
u8 *n_pactive_ch,
int scan_type)
{
int i, j;
u32 flags;
bool force_passive = !n_ssids;
u32 min_dwell_time_active, max_dwell_time_active;
u32 dwell_time_passive, dwell_time_dfs;
/* configure dwell times according to scan type */
if (scan_type == SCAN_TYPE_SEARCH) {
struct conf_scan_settings *c = &wl->conf.scan;
bool active_vif_exists = !!wlcore_count_started_vifs(wl);
min_dwell_time_active = active_vif_exists ?
c->min_dwell_time_active :
c->min_dwell_time_active_long;
max_dwell_time_active = active_vif_exists ?
c->max_dwell_time_active :
c->max_dwell_time_active_long;
dwell_time_passive = c->dwell_time_passive;
dwell_time_dfs = c->dwell_time_dfs;
} else {
struct conf_sched_scan_settings *c = &wl->conf.sched_scan;
u32 delta_per_probe;
if (band == NL80211_BAND_5GHZ)
delta_per_probe = c->dwell_time_delta_per_probe_5;
else
delta_per_probe = c->dwell_time_delta_per_probe;
min_dwell_time_active = c->base_dwell_time +
n_ssids * c->num_probe_reqs * delta_per_probe;
max_dwell_time_active = min_dwell_time_active +
c->max_dwell_time_delta;
dwell_time_passive = c->dwell_time_passive;
dwell_time_dfs = c->dwell_time_dfs;
}
min_dwell_time_active = DIV_ROUND_UP(min_dwell_time_active, 1000);
max_dwell_time_active = DIV_ROUND_UP(max_dwell_time_active, 1000);
dwell_time_passive = DIV_ROUND_UP(dwell_time_passive, 1000);
dwell_time_dfs = DIV_ROUND_UP(dwell_time_dfs, 1000);
for (i = 0, j = start;
i < n_channels && j < max_channels;
i++) {
flags = req_channels[i]->flags;
if (force_passive)
flags |= IEEE80211_CHAN_NO_IR;
if ((req_channels[i]->band == band) &&
!(flags & IEEE80211_CHAN_DISABLED) &&
(!!(flags & IEEE80211_CHAN_RADAR) == radar) &&
/* if radar is set, we ignore the passive flag */
(radar ||
!!(flags & IEEE80211_CHAN_NO_IR) == passive)) {
if (flags & IEEE80211_CHAN_RADAR) {
channels[j].flags |= SCAN_CHANNEL_FLAGS_DFS;
channels[j].passive_duration =
cpu_to_le16(dwell_time_dfs);
} else {
channels[j].passive_duration =
cpu_to_le16(dwell_time_passive);
}
channels[j].min_duration =
cpu_to_le16(min_dwell_time_active);
channels[j].max_duration =
cpu_to_le16(max_dwell_time_active);
channels[j].tx_power_att = req_channels[i]->max_power;
channels[j].channel = req_channels[i]->hw_value;
if (n_pactive_ch &&
(band == NL80211_BAND_2GHZ) &&
(channels[j].channel >= 12) &&
(channels[j].channel <= 14) &&
(flags & IEEE80211_CHAN_NO_IR) &&
!force_passive) {
/* pactive channels treated as DFS */
channels[j].flags = SCAN_CHANNEL_FLAGS_DFS;
/*
* n_pactive_ch is counted down from the end of
* the passive channel list
*/
(*n_pactive_ch)++;
wl1271_debug(DEBUG_SCAN, "n_pactive_ch = %d",
*n_pactive_ch);
}
wl1271_debug(DEBUG_SCAN, "freq %d, ch. %d, flags 0x%x, power %d, min/max_dwell %d/%d%s%s",
req_channels[i]->center_freq,
req_channels[i]->hw_value,
req_channels[i]->flags,
req_channels[i]->max_power,
min_dwell_time_active,
max_dwell_time_active,
flags & IEEE80211_CHAN_RADAR ?
", DFS" : "",
flags & IEEE80211_CHAN_NO_IR ?
", NO-IR" : "");
j++;
}
}
return j - start;
}
bool
wlcore_set_scan_chan_params(struct wl1271 *wl,
struct wlcore_scan_channels *cfg,
struct ieee80211_channel *channels[],
u32 n_channels,
u32 n_ssids,
int scan_type)
{
u8 n_pactive_ch = 0;
cfg->passive[0] =
wlcore_scan_get_channels(wl,
channels,
n_channels,
n_ssids,
cfg->channels_2,
NL80211_BAND_2GHZ,
false, true, 0,
MAX_CHANNELS_2GHZ,
&n_pactive_ch,
scan_type);
cfg->active[0] =
wlcore_scan_get_channels(wl,
channels,
n_channels,
n_ssids,
cfg->channels_2,
NL80211_BAND_2GHZ,
false, false,
cfg->passive[0],
MAX_CHANNELS_2GHZ,
&n_pactive_ch,
scan_type);
cfg->passive[1] =
wlcore_scan_get_channels(wl,
channels,
n_channels,
n_ssids,
cfg->channels_5,
NL80211_BAND_5GHZ,
false, true, 0,
wl->max_channels_5,
&n_pactive_ch,
scan_type);
cfg->dfs =
wlcore_scan_get_channels(wl,
channels,
n_channels,
n_ssids,
cfg->channels_5,
NL80211_BAND_5GHZ,
true, true,
cfg->passive[1],
wl->max_channels_5,
&n_pactive_ch,
scan_type);
cfg->active[1] =
wlcore_scan_get_channels(wl,
channels,
n_channels,
n_ssids,
cfg->channels_5,
NL80211_BAND_5GHZ,
false, false,
cfg->passive[1] + cfg->dfs,
wl->max_channels_5,
&n_pactive_ch,
scan_type);
/* 802.11j channels are not supported yet */
cfg->passive[2] = 0;
cfg->active[2] = 0;
cfg->passive_active = n_pactive_ch;
wl1271_debug(DEBUG_SCAN, " 2.4GHz: active %d passive %d",
cfg->active[0], cfg->passive[0]);
wl1271_debug(DEBUG_SCAN, " 5GHz: active %d passive %d",
cfg->active[1], cfg->passive[1]);
wl1271_debug(DEBUG_SCAN, " DFS: %d", cfg->dfs);
return cfg->passive[0] || cfg->active[0] ||
cfg->passive[1] || cfg->active[1] || cfg->dfs ||
cfg->passive[2] || cfg->active[2];
}
EXPORT_SYMBOL_GPL(wlcore_set_scan_chan_params);
int wlcore_scan(struct wl1271 *wl, struct ieee80211_vif *vif,
const u8 *ssid, size_t ssid_len,
struct cfg80211_scan_request *req)
{
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
/*
* cfg80211 should guarantee that we don't get more channels
* than what we have registered.
*/
BUG_ON(req->n_channels > WL1271_MAX_CHANNELS);
if (wl->scan.state != WL1271_SCAN_STATE_IDLE)
return -EBUSY;
wl->scan.state = WL1271_SCAN_STATE_2GHZ_ACTIVE;
if (ssid_len && ssid) {
wl->scan.ssid_len = ssid_len;
memcpy(wl->scan.ssid, ssid, ssid_len);
} else {
wl->scan.ssid_len = 0;
}
wl->scan_wlvif = wlvif;
wl->scan.req = req;
memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
/* we assume failure so that timeout scenarios are handled correctly */
wl->scan.failed = true;
ieee80211_queue_delayed_work(wl->hw, &wl->scan_complete_work,
msecs_to_jiffies(WL1271_SCAN_TIMEOUT));
wl->ops->scan_start(wl, wlvif, req);
return 0;
}
/* Returns the scan type to be used or a negative value on error */
int
wlcore_scan_sched_scan_ssid_list(struct wl1271 *wl,
struct wl12xx_vif *wlvif,
struct cfg80211_sched_scan_request *req)
{
struct wl1271_cmd_sched_scan_ssid_list *cmd = NULL;
struct cfg80211_match_set *sets = req->match_sets;
struct cfg80211_ssid *ssids = req->ssids;
int ret = 0, type, i, j, n_match_ssids = 0;
wl1271_debug((DEBUG_CMD | DEBUG_SCAN), "cmd sched scan ssid list");
/* count the match sets that contain SSIDs */
for (i = 0; i < req->n_match_sets; i++)
if (sets[i].ssid.ssid_len > 0)
n_match_ssids++;
/* No filter, no ssids or only bcast ssid */
if (!n_match_ssids &&
(!req->n_ssids ||
(req->n_ssids == 1 && req->ssids[0].ssid_len == 0))) {
type = SCAN_SSID_FILTER_ANY;
goto out;
}
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd) {
ret = -ENOMEM;
goto out;
}
cmd->role_id = wlvif->role_id;
if (!n_match_ssids) {
/* No filter, with ssids */
type = SCAN_SSID_FILTER_DISABLED;
for (i = 0; i < req->n_ssids; i++) {
cmd->ssids[cmd->n_ssids].type = (ssids[i].ssid_len) ?
SCAN_SSID_TYPE_HIDDEN : SCAN_SSID_TYPE_PUBLIC;
cmd->ssids[cmd->n_ssids].len = ssids[i].ssid_len;
memcpy(cmd->ssids[cmd->n_ssids].ssid, ssids[i].ssid,
ssids[i].ssid_len);
cmd->n_ssids++;
}
} else {
type = SCAN_SSID_FILTER_LIST;
/* Add all SSIDs from the filters */
for (i = 0; i < req->n_match_sets; i++) {
/* ignore sets without SSIDs */
if (!sets[i].ssid.ssid_len)
continue;
cmd->ssids[cmd->n_ssids].type = SCAN_SSID_TYPE_PUBLIC;
cmd->ssids[cmd->n_ssids].len = sets[i].ssid.ssid_len;
memcpy(cmd->ssids[cmd->n_ssids].ssid,
sets[i].ssid.ssid, sets[i].ssid.ssid_len);
cmd->n_ssids++;
}
if ((req->n_ssids > 1) ||
(req->n_ssids == 1 && req->ssids[0].ssid_len > 0)) {
/*
* Mark all the SSIDs passed in the SSID list as HIDDEN,
* so they're used in probe requests.
*/
for (i = 0; i < req->n_ssids; i++) {
if (!req->ssids[i].ssid_len)
continue;
for (j = 0; j < cmd->n_ssids; j++)
if ((req->ssids[i].ssid_len ==
cmd->ssids[j].len) &&
!memcmp(req->ssids[i].ssid,
cmd->ssids[j].ssid,
req->ssids[i].ssid_len)) {
cmd->ssids[j].type =
SCAN_SSID_TYPE_HIDDEN;
break;
}
/* Fail if SSID isn't present in the filters */
if (j == cmd->n_ssids) {
ret = -EINVAL;
goto out_free;
}
}
}
}
ret = wl1271_cmd_send(wl, CMD_CONNECTION_SCAN_SSID_CFG, cmd,
sizeof(*cmd), 0);
if (ret < 0) {
wl1271_error("cmd sched scan ssid list failed");
goto out_free;
}
out_free:
kfree(cmd);
out:
if (ret < 0)
return ret;
return type;
}
EXPORT_SYMBOL_GPL(wlcore_scan_sched_scan_ssid_list);
void wlcore_scan_sched_scan_results(struct wl1271 *wl)
{
wl1271_debug(DEBUG_SCAN, "got periodic scan results");
ieee80211_sched_scan_results(wl->hw);
}
EXPORT_SYMBOL_GPL(wlcore_scan_sched_scan_results);
|
linux-master
|
drivers/net/wireless/ti/wlcore/scan.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* This file is part of wlcore
*
* Copyright (C) 2014 Texas Instruments. All rights reserved.
*/
#include <linux/pm_runtime.h>
#include <net/mac80211.h>
#include <net/netlink.h>
#include "wlcore.h"
#include "debug.h"
#include "hw_ops.h"
#include "vendor_cmd.h"
static const
struct nla_policy wlcore_vendor_attr_policy[NUM_WLCORE_VENDOR_ATTR] = {
[WLCORE_VENDOR_ATTR_FREQ] = { .type = NLA_U32 },
[WLCORE_VENDOR_ATTR_GROUP_ID] = { .type = NLA_U32 },
[WLCORE_VENDOR_ATTR_GROUP_KEY] = { .type = NLA_BINARY,
.len = WLAN_MAX_KEY_LEN },
};
static int
wlcore_vendor_cmd_smart_config_start(struct wiphy *wiphy,
struct wireless_dev *wdev,
const void *data, int data_len)
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct wl1271 *wl = hw->priv;
struct nlattr *tb[NUM_WLCORE_VENDOR_ATTR];
int ret;
wl1271_debug(DEBUG_CMD, "vendor cmd smart config start");
if (!data)
return -EINVAL;
ret = nla_parse_deprecated(tb, MAX_WLCORE_VENDOR_ATTR, data, data_len,
wlcore_vendor_attr_policy, NULL);
if (ret)
return ret;
if (!tb[WLCORE_VENDOR_ATTR_GROUP_ID])
return -EINVAL;
mutex_lock(&wl->mutex);
if (unlikely(wl->state != WLCORE_STATE_ON)) {
ret = -EINVAL;
goto out;
}
ret = pm_runtime_resume_and_get(wl->dev);
if (ret < 0)
goto out;
ret = wlcore_smart_config_start(wl,
nla_get_u32(tb[WLCORE_VENDOR_ATTR_GROUP_ID]));
pm_runtime_mark_last_busy(wl->dev);
pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
return ret;
}
static int
wlcore_vendor_cmd_smart_config_stop(struct wiphy *wiphy,
struct wireless_dev *wdev,
const void *data, int data_len)
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct wl1271 *wl = hw->priv;
int ret;
wl1271_debug(DEBUG_CMD, "testmode cmd smart config stop");
mutex_lock(&wl->mutex);
if (unlikely(wl->state != WLCORE_STATE_ON)) {
ret = -EINVAL;
goto out;
}
ret = pm_runtime_resume_and_get(wl->dev);
if (ret < 0)
goto out;
ret = wlcore_smart_config_stop(wl);
pm_runtime_mark_last_busy(wl->dev);
pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
return ret;
}
static int
wlcore_vendor_cmd_smart_config_set_group_key(struct wiphy *wiphy,
struct wireless_dev *wdev,
const void *data, int data_len)
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct wl1271 *wl = hw->priv;
struct nlattr *tb[NUM_WLCORE_VENDOR_ATTR];
int ret;
wl1271_debug(DEBUG_CMD, "testmode cmd smart config set group key");
if (!data)
return -EINVAL;
ret = nla_parse_deprecated(tb, MAX_WLCORE_VENDOR_ATTR, data, data_len,
wlcore_vendor_attr_policy, NULL);
if (ret)
return ret;
if (!tb[WLCORE_VENDOR_ATTR_GROUP_ID] ||
!tb[WLCORE_VENDOR_ATTR_GROUP_KEY])
return -EINVAL;
mutex_lock(&wl->mutex);
if (unlikely(wl->state != WLCORE_STATE_ON)) {
ret = -EINVAL;
goto out;
}
ret = pm_runtime_resume_and_get(wl->dev);
if (ret < 0)
goto out;
ret = wlcore_smart_config_set_group_key(wl,
nla_get_u32(tb[WLCORE_VENDOR_ATTR_GROUP_ID]),
nla_len(tb[WLCORE_VENDOR_ATTR_GROUP_KEY]),
nla_data(tb[WLCORE_VENDOR_ATTR_GROUP_KEY]));
pm_runtime_mark_last_busy(wl->dev);
pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
return ret;
}
static const struct wiphy_vendor_command wlcore_vendor_commands[] = {
{
.info = {
.vendor_id = TI_OUI,
.subcmd = WLCORE_VENDOR_CMD_SMART_CONFIG_START,
},
.flags = WIPHY_VENDOR_CMD_NEED_NETDEV |
WIPHY_VENDOR_CMD_NEED_RUNNING,
.doit = wlcore_vendor_cmd_smart_config_start,
.policy = wlcore_vendor_attr_policy,
},
{
.info = {
.vendor_id = TI_OUI,
.subcmd = WLCORE_VENDOR_CMD_SMART_CONFIG_STOP,
},
.flags = WIPHY_VENDOR_CMD_NEED_NETDEV |
WIPHY_VENDOR_CMD_NEED_RUNNING,
.doit = wlcore_vendor_cmd_smart_config_stop,
.policy = wlcore_vendor_attr_policy,
},
{
.info = {
.vendor_id = TI_OUI,
.subcmd = WLCORE_VENDOR_CMD_SMART_CONFIG_SET_GROUP_KEY,
},
.flags = WIPHY_VENDOR_CMD_NEED_NETDEV |
WIPHY_VENDOR_CMD_NEED_RUNNING,
.doit = wlcore_vendor_cmd_smart_config_set_group_key,
.policy = wlcore_vendor_attr_policy,
},
};
static const struct nl80211_vendor_cmd_info wlcore_vendor_events[] = {
{
.vendor_id = TI_OUI,
.subcmd = WLCORE_VENDOR_EVENT_SC_SYNC,
},
{
.vendor_id = TI_OUI,
.subcmd = WLCORE_VENDOR_EVENT_SC_DECODE,
},
};
void wlcore_set_vendor_commands(struct wiphy *wiphy)
{
wiphy->vendor_commands = wlcore_vendor_commands;
wiphy->n_vendor_commands = ARRAY_SIZE(wlcore_vendor_commands);
wiphy->vendor_events = wlcore_vendor_events;
wiphy->n_vendor_events = ARRAY_SIZE(wlcore_vendor_events);
}
|
linux-master
|
drivers/net/wireless/ti/wlcore/vendor_cmd.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* This file is part of wl1271
*
* Copyright (C) 2009-2010 Nokia Corporation
*
* Contact: Luciano Coelho <[email protected]>
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/spi/spi.h>
#include <linux/etherdevice.h>
#include <linux/ieee80211.h>
#include <linux/slab.h>
#include "wlcore.h"
#include "debug.h"
#include "io.h"
#include "acx.h"
#include "wl12xx_80211.h"
#include "cmd.h"
#include "event.h"
#include "tx.h"
#include "hw_ops.h"
#define WL1271_CMD_FAST_POLL_COUNT 50
#define WL1271_WAIT_EVENT_FAST_POLL_COUNT 20
/*
* send command to firmware
*
* @wl: wl struct
* @id: command id
* @buf: buffer containing the command, must work with dma
* @len: length of the buffer
* return the cmd status code on success.
*/
static int __wlcore_cmd_send(struct wl1271 *wl, u16 id, void *buf,
size_t len, size_t res_len)
{
struct wl1271_cmd_header *cmd;
unsigned long timeout;
u32 intr;
int ret;
u16 status;
u16 poll_count = 0;
if (unlikely(wl->state == WLCORE_STATE_RESTARTING &&
id != CMD_STOP_FWLOGGER))
return -EIO;
if (WARN_ON_ONCE(len < sizeof(*cmd)))
return -EIO;
cmd = buf;
cmd->id = cpu_to_le16(id);
cmd->status = 0;
WARN_ON(len % 4 != 0);
WARN_ON(test_bit(WL1271_FLAG_IN_ELP, &wl->flags));
ret = wlcore_write(wl, wl->cmd_box_addr, buf, len, false);
if (ret < 0)
return ret;
/*
* TODO: we just need this because one bit is in a different
* place. Is there any better way?
*/
ret = wl->ops->trigger_cmd(wl, wl->cmd_box_addr, buf, len);
if (ret < 0)
return ret;
timeout = jiffies + msecs_to_jiffies(WL1271_COMMAND_TIMEOUT);
ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &intr);
if (ret < 0)
return ret;
while (!(intr & WL1271_ACX_INTR_CMD_COMPLETE)) {
if (time_after(jiffies, timeout)) {
wl1271_error("command complete timeout");
return -ETIMEDOUT;
}
poll_count++;
if (poll_count < WL1271_CMD_FAST_POLL_COUNT)
udelay(10);
else
msleep(1);
ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &intr);
if (ret < 0)
return ret;
}
/* read back the status code of the command */
if (res_len == 0)
res_len = sizeof(struct wl1271_cmd_header);
ret = wlcore_read(wl, wl->cmd_box_addr, cmd, res_len, false);
if (ret < 0)
return ret;
status = le16_to_cpu(cmd->status);
ret = wlcore_write_reg(wl, REG_INTERRUPT_ACK,
WL1271_ACX_INTR_CMD_COMPLETE);
if (ret < 0)
return ret;
return status;
}
/*
* send command to fw and return cmd status on success
* valid_rets contains a bitmap of allowed error codes
*/
static int wlcore_cmd_send_failsafe(struct wl1271 *wl, u16 id, void *buf,
size_t len, size_t res_len,
unsigned long valid_rets)
{
int ret = __wlcore_cmd_send(wl, id, buf, len, res_len);
if (ret < 0)
goto fail;
/* success is always a valid status */
valid_rets |= BIT(CMD_STATUS_SUCCESS);
if (ret >= MAX_COMMAND_STATUS ||
!test_bit(ret, &valid_rets)) {
wl1271_error("command execute failure %d", ret);
ret = -EIO;
goto fail;
}
return ret;
fail:
wl12xx_queue_recovery_work(wl);
return ret;
}
/*
* wrapper for wlcore_cmd_send that accept only CMD_STATUS_SUCCESS
* return 0 on success.
*/
int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
size_t res_len)
{
int ret = wlcore_cmd_send_failsafe(wl, id, buf, len, res_len, 0);
if (ret < 0)
return ret;
return 0;
}
EXPORT_SYMBOL_GPL(wl1271_cmd_send);
/*
* Poll the mailbox event field until any of the bits in the mask is set or a
* timeout occurs (WL1271_EVENT_TIMEOUT in msecs)
*/
int wlcore_cmd_wait_for_event_or_timeout(struct wl1271 *wl,
u32 mask, bool *timeout)
{
u32 *events_vector;
u32 event;
unsigned long timeout_time;
u16 poll_count = 0;
int ret = 0;
*timeout = false;
events_vector = kmalloc(sizeof(*events_vector), GFP_KERNEL | GFP_DMA);
if (!events_vector)
return -ENOMEM;
timeout_time = jiffies + msecs_to_jiffies(WL1271_EVENT_TIMEOUT);
ret = pm_runtime_resume_and_get(wl->dev);
if (ret < 0)
goto free_vector;
do {
if (time_after(jiffies, timeout_time)) {
wl1271_debug(DEBUG_CMD, "timeout waiting for event %d",
(int)mask);
*timeout = true;
goto out;
}
poll_count++;
if (poll_count < WL1271_WAIT_EVENT_FAST_POLL_COUNT)
usleep_range(50, 51);
else
usleep_range(1000, 5000);
/* read from both event fields */
ret = wlcore_read(wl, wl->mbox_ptr[0], events_vector,
sizeof(*events_vector), false);
if (ret < 0)
goto out;
event = *events_vector & mask;
ret = wlcore_read(wl, wl->mbox_ptr[1], events_vector,
sizeof(*events_vector), false);
if (ret < 0)
goto out;
event |= *events_vector & mask;
} while (!event);
out:
pm_runtime_mark_last_busy(wl->dev);
pm_runtime_put_autosuspend(wl->dev);
free_vector:
kfree(events_vector);
return ret;
}
EXPORT_SYMBOL_GPL(wlcore_cmd_wait_for_event_or_timeout);
int wl12xx_cmd_role_enable(struct wl1271 *wl, u8 *addr, u8 role_type,
u8 *role_id)
{
struct wl12xx_cmd_role_enable *cmd;
int ret;
wl1271_debug(DEBUG_CMD, "cmd role enable");
if (WARN_ON(*role_id != WL12XX_INVALID_ROLE_ID))
return -EBUSY;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd) {
ret = -ENOMEM;
goto out;
}
/* get role id */
cmd->role_id = find_first_zero_bit(wl->roles_map, WL12XX_MAX_ROLES);
if (cmd->role_id >= WL12XX_MAX_ROLES) {
ret = -EBUSY;
goto out_free;
}
memcpy(cmd->mac_address, addr, ETH_ALEN);
cmd->role_type = role_type;
ret = wl1271_cmd_send(wl, CMD_ROLE_ENABLE, cmd, sizeof(*cmd), 0);
if (ret < 0) {
wl1271_error("failed to initiate cmd role enable");
goto out_free;
}
__set_bit(cmd->role_id, wl->roles_map);
*role_id = cmd->role_id;
out_free:
kfree(cmd);
out:
return ret;
}
int wl12xx_cmd_role_disable(struct wl1271 *wl, u8 *role_id)
{
struct wl12xx_cmd_role_disable *cmd;
int ret;
wl1271_debug(DEBUG_CMD, "cmd role disable");
if (WARN_ON(*role_id == WL12XX_INVALID_ROLE_ID))
return -ENOENT;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd) {
ret = -ENOMEM;
goto out;
}
cmd->role_id = *role_id;
ret = wl1271_cmd_send(wl, CMD_ROLE_DISABLE, cmd, sizeof(*cmd), 0);
if (ret < 0) {
wl1271_error("failed to initiate cmd role disable");
goto out_free;
}
__clear_bit(*role_id, wl->roles_map);
*role_id = WL12XX_INVALID_ROLE_ID;
out_free:
kfree(cmd);
out:
return ret;
}
static int wlcore_get_new_session_id(struct wl1271 *wl, u8 hlid)
{
if (wl->session_ids[hlid] >= SESSION_COUNTER_MAX)
wl->session_ids[hlid] = 0;
wl->session_ids[hlid]++;
return wl->session_ids[hlid];
}
int wl12xx_allocate_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid)
{
unsigned long flags;
u8 link = find_first_zero_bit(wl->links_map, wl->num_links);
if (link >= wl->num_links)
return -EBUSY;
wl->session_ids[link] = wlcore_get_new_session_id(wl, link);
/* these bits are used by op_tx */
spin_lock_irqsave(&wl->wl_lock, flags);
__set_bit(link, wl->links_map);
__set_bit(link, wlvif->links_map);
spin_unlock_irqrestore(&wl->wl_lock, flags);
/*
* take the last "freed packets" value from the current FW status.
* on recovery, we might not have fw_status yet, and
* tx_lnk_free_pkts will be NULL. check for it.
*/
if (wl->fw_status->counters.tx_lnk_free_pkts)
wl->links[link].prev_freed_pkts =
wl->fw_status->counters.tx_lnk_free_pkts[link];
wl->links[link].wlvif = wlvif;
/*
* Take saved value for total freed packets from wlvif, in case this is
* recovery/resume
*/
if (wlvif->bss_type != BSS_TYPE_AP_BSS)
wl->links[link].total_freed_pkts = wlvif->total_freed_pkts;
*hlid = link;
wl->active_link_count++;
return 0;
}
void wl12xx_free_link(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 *hlid)
{
unsigned long flags;
if (*hlid == WL12XX_INVALID_LINK_ID)
return;
/* these bits are used by op_tx */
spin_lock_irqsave(&wl->wl_lock, flags);
__clear_bit(*hlid, wl->links_map);
__clear_bit(*hlid, wlvif->links_map);
spin_unlock_irqrestore(&wl->wl_lock, flags);
wl->links[*hlid].allocated_pkts = 0;
wl->links[*hlid].prev_freed_pkts = 0;
wl->links[*hlid].ba_bitmap = 0;
eth_zero_addr(wl->links[*hlid].addr);
/*
* At this point op_tx() will not add more packets to the queues. We
* can purge them.
*/
wl1271_tx_reset_link_queues(wl, *hlid);
wl->links[*hlid].wlvif = NULL;
if (wlvif->bss_type == BSS_TYPE_AP_BSS &&
*hlid == wlvif->ap.bcast_hlid) {
u32 sqn_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING;
/*
* save the total freed packets in the wlvif, in case this is
* recovery or suspend
*/
wlvif->total_freed_pkts = wl->links[*hlid].total_freed_pkts;
/*
* increment the initial seq number on recovery to account for
* transmitted packets that we haven't yet got in the FW status
*/
if (wlvif->encryption_type == KEY_GEM)
sqn_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING_GEM;
if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
wlvif->total_freed_pkts += sqn_padding;
}
wl->links[*hlid].total_freed_pkts = 0;
*hlid = WL12XX_INVALID_LINK_ID;
wl->active_link_count--;
WARN_ON_ONCE(wl->active_link_count < 0);
}
u8 wlcore_get_native_channel_type(u8 nl_channel_type)
{
switch (nl_channel_type) {
case NL80211_CHAN_NO_HT:
return WLCORE_CHAN_NO_HT;
case NL80211_CHAN_HT20:
return WLCORE_CHAN_HT20;
case NL80211_CHAN_HT40MINUS:
return WLCORE_CHAN_HT40MINUS;
case NL80211_CHAN_HT40PLUS:
return WLCORE_CHAN_HT40PLUS;
default:
WARN_ON(1);
return WLCORE_CHAN_NO_HT;
}
}
EXPORT_SYMBOL_GPL(wlcore_get_native_channel_type);
static int wl12xx_cmd_role_start_dev(struct wl1271 *wl,
struct wl12xx_vif *wlvif,
enum nl80211_band band,
int channel)
{
struct wl12xx_cmd_role_start *cmd;
int ret;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd) {
ret = -ENOMEM;
goto out;
}
wl1271_debug(DEBUG_CMD, "cmd role start dev %d", wlvif->dev_role_id);
cmd->role_id = wlvif->dev_role_id;
if (band == NL80211_BAND_5GHZ)
cmd->band = WLCORE_BAND_5GHZ;
cmd->channel = channel;
if (wlvif->dev_hlid == WL12XX_INVALID_LINK_ID) {
ret = wl12xx_allocate_link(wl, wlvif, &wlvif->dev_hlid);
if (ret)
goto out_free;
}
cmd->device.hlid = wlvif->dev_hlid;
cmd->device.session = wl->session_ids[wlvif->dev_hlid];
wl1271_debug(DEBUG_CMD, "role start: roleid=%d, hlid=%d, session=%d",
cmd->role_id, cmd->device.hlid, cmd->device.session);
ret = wl1271_cmd_send(wl, CMD_ROLE_START, cmd, sizeof(*cmd), 0);
if (ret < 0) {
wl1271_error("failed to initiate cmd role enable");
goto err_hlid;
}
goto out_free;
err_hlid:
/* clear links on error */
wl12xx_free_link(wl, wlvif, &wlvif->dev_hlid);
out_free:
kfree(cmd);
out:
return ret;
}
static int wl12xx_cmd_role_stop_dev(struct wl1271 *wl,
struct wl12xx_vif *wlvif)
{
struct wl12xx_cmd_role_stop *cmd;
int ret;
if (WARN_ON(wlvif->dev_hlid == WL12XX_INVALID_LINK_ID))
return -EINVAL;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd) {
ret = -ENOMEM;
goto out;
}
wl1271_debug(DEBUG_CMD, "cmd role stop dev");
cmd->role_id = wlvif->dev_role_id;
cmd->disc_type = DISCONNECT_IMMEDIATE;
cmd->reason = cpu_to_le16(WLAN_REASON_UNSPECIFIED);
ret = wl1271_cmd_send(wl, CMD_ROLE_STOP, cmd, sizeof(*cmd), 0);
if (ret < 0) {
wl1271_error("failed to initiate cmd role stop");
goto out_free;
}
wl12xx_free_link(wl, wlvif, &wlvif->dev_hlid);
out_free:
kfree(cmd);
out:
return ret;
}
int wl12xx_cmd_role_start_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
struct wl12xx_cmd_role_start *cmd;
u32 supported_rates;
int ret;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd) {
ret = -ENOMEM;
goto out;
}
wl1271_debug(DEBUG_CMD, "cmd role start sta %d", wlvif->role_id);
cmd->role_id = wlvif->role_id;
if (wlvif->band == NL80211_BAND_5GHZ)
cmd->band = WLCORE_BAND_5GHZ;
cmd->channel = wlvif->channel;
cmd->sta.basic_rate_set = cpu_to_le32(wlvif->basic_rate_set);
cmd->sta.beacon_interval = cpu_to_le16(wlvif->beacon_int);
cmd->sta.ssid_type = WL12XX_SSID_TYPE_ANY;
cmd->sta.ssid_len = wlvif->ssid_len;
memcpy(cmd->sta.ssid, wlvif->ssid, wlvif->ssid_len);
memcpy(cmd->sta.bssid, vif->bss_conf.bssid, ETH_ALEN);
supported_rates = CONF_TX_ENABLED_RATES | CONF_TX_MCS_RATES |
wlcore_hw_sta_get_ap_rate_mask(wl, wlvif);
if (wlvif->p2p)
supported_rates &= ~CONF_TX_CCK_RATES;
cmd->sta.local_rates = cpu_to_le32(supported_rates);
cmd->channel_type = wlcore_get_native_channel_type(wlvif->channel_type);
if (wlvif->sta.hlid == WL12XX_INVALID_LINK_ID) {
ret = wl12xx_allocate_link(wl, wlvif, &wlvif->sta.hlid);
if (ret)
goto out_free;
}
cmd->sta.hlid = wlvif->sta.hlid;
cmd->sta.session = wl->session_ids[wlvif->sta.hlid];
/*
* We don't have the correct remote rates in this stage. The
* rates will be reconfigured later, after association, if the
* firmware supports ACX_PEER_CAP. Otherwise, there's nothing
* we can do, so use all supported_rates here.
*/
cmd->sta.remote_rates = cpu_to_le32(supported_rates);
wl1271_debug(DEBUG_CMD, "role start: roleid=%d, hlid=%d, session=%d "
"basic_rate_set: 0x%x, remote_rates: 0x%x",
wlvif->role_id, cmd->sta.hlid, cmd->sta.session,
wlvif->basic_rate_set, wlvif->rate_set);
ret = wl1271_cmd_send(wl, CMD_ROLE_START, cmd, sizeof(*cmd), 0);
if (ret < 0) {
wl1271_error("failed to initiate cmd role start sta");
goto err_hlid;
}
wlvif->sta.role_chan_type = wlvif->channel_type;
goto out_free;
err_hlid:
/* clear links on error. */
wl12xx_free_link(wl, wlvif, &wlvif->sta.hlid);
out_free:
kfree(cmd);
out:
return ret;
}
/* use this function to stop ibss as well */
int wl12xx_cmd_role_stop_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct wl12xx_cmd_role_stop *cmd;
int ret;
if (WARN_ON(wlvif->sta.hlid == WL12XX_INVALID_LINK_ID))
return -EINVAL;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd) {
ret = -ENOMEM;
goto out;
}
wl1271_debug(DEBUG_CMD, "cmd role stop sta %d", wlvif->role_id);
cmd->role_id = wlvif->role_id;
cmd->disc_type = DISCONNECT_IMMEDIATE;
cmd->reason = cpu_to_le16(WLAN_REASON_UNSPECIFIED);
ret = wl1271_cmd_send(wl, CMD_ROLE_STOP, cmd, sizeof(*cmd), 0);
if (ret < 0) {
wl1271_error("failed to initiate cmd role stop sta");
goto out_free;
}
wl12xx_free_link(wl, wlvif, &wlvif->sta.hlid);
out_free:
kfree(cmd);
out:
return ret;
}
int wl12xx_cmd_role_start_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct wl12xx_cmd_role_start *cmd;
struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
u32 supported_rates;
int ret;
wl1271_debug(DEBUG_CMD, "cmd role start ap %d", wlvif->role_id);
/* If MESH --> ssid_len is always 0 */
if (!ieee80211_vif_is_mesh(vif)) {
/* trying to use hidden SSID with an old hostapd version */
if (wlvif->ssid_len == 0 && !bss_conf->hidden_ssid) {
wl1271_error("got a null SSID from beacon/bss");
ret = -EINVAL;
goto out;
}
}
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd) {
ret = -ENOMEM;
goto out;
}
ret = wl12xx_allocate_link(wl, wlvif, &wlvif->ap.global_hlid);
if (ret < 0)
goto out_free;
ret = wl12xx_allocate_link(wl, wlvif, &wlvif->ap.bcast_hlid);
if (ret < 0)
goto out_free_global;
/* use the previous security seq, if this is a recovery/resume */
wl->links[wlvif->ap.bcast_hlid].total_freed_pkts =
wlvif->total_freed_pkts;
cmd->role_id = wlvif->role_id;
cmd->ap.aging_period = cpu_to_le16(wl->conf.tx.ap_aging_period);
cmd->ap.bss_index = WL1271_AP_BSS_INDEX;
cmd->ap.global_hlid = wlvif->ap.global_hlid;
cmd->ap.broadcast_hlid = wlvif->ap.bcast_hlid;
cmd->ap.global_session_id = wl->session_ids[wlvif->ap.global_hlid];
cmd->ap.bcast_session_id = wl->session_ids[wlvif->ap.bcast_hlid];
cmd->ap.basic_rate_set = cpu_to_le32(wlvif->basic_rate_set);
cmd->ap.beacon_interval = cpu_to_le16(wlvif->beacon_int);
cmd->ap.dtim_interval = bss_conf->dtim_period;
cmd->ap.beacon_expiry = WL1271_AP_DEF_BEACON_EXP;
/* FIXME: Change when adding DFS */
cmd->ap.reset_tsf = 1; /* By default reset AP TSF */
cmd->ap.wmm = wlvif->wmm_enabled;
cmd->channel = wlvif->channel;
cmd->channel_type = wlcore_get_native_channel_type(wlvif->channel_type);
if (!bss_conf->hidden_ssid) {
/* take the SSID from the beacon for backward compatibility */
cmd->ap.ssid_type = WL12XX_SSID_TYPE_PUBLIC;
cmd->ap.ssid_len = wlvif->ssid_len;
memcpy(cmd->ap.ssid, wlvif->ssid, wlvif->ssid_len);
} else {
cmd->ap.ssid_type = WL12XX_SSID_TYPE_HIDDEN;
cmd->ap.ssid_len = vif->cfg.ssid_len;
memcpy(cmd->ap.ssid, vif->cfg.ssid, vif->cfg.ssid_len);
}
supported_rates = CONF_TX_ENABLED_RATES | CONF_TX_MCS_RATES |
wlcore_hw_ap_get_mimo_wide_rate_mask(wl, wlvif);
if (wlvif->p2p)
supported_rates &= ~CONF_TX_CCK_RATES;
wl1271_debug(DEBUG_CMD, "cmd role start ap with supported_rates 0x%08x",
supported_rates);
cmd->ap.local_rates = cpu_to_le32(supported_rates);
switch (wlvif->band) {
case NL80211_BAND_2GHZ:
cmd->band = WLCORE_BAND_2_4GHZ;
break;
case NL80211_BAND_5GHZ:
cmd->band = WLCORE_BAND_5GHZ;
break;
default:
wl1271_warning("ap start - unknown band: %d", (int)wlvif->band);
cmd->band = WLCORE_BAND_2_4GHZ;
break;
}
ret = wl1271_cmd_send(wl, CMD_ROLE_START, cmd, sizeof(*cmd), 0);
if (ret < 0) {
wl1271_error("failed to initiate cmd role start ap");
goto out_free_bcast;
}
goto out_free;
out_free_bcast:
wl12xx_free_link(wl, wlvif, &wlvif->ap.bcast_hlid);
out_free_global:
wl12xx_free_link(wl, wlvif, &wlvif->ap.global_hlid);
out_free:
kfree(cmd);
out:
return ret;
}
int wl12xx_cmd_role_stop_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct wl12xx_cmd_role_stop *cmd;
int ret;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd) {
ret = -ENOMEM;
goto out;
}
wl1271_debug(DEBUG_CMD, "cmd role stop ap %d", wlvif->role_id);
cmd->role_id = wlvif->role_id;
ret = wl1271_cmd_send(wl, CMD_ROLE_STOP, cmd, sizeof(*cmd), 0);
if (ret < 0) {
wl1271_error("failed to initiate cmd role stop ap");
goto out_free;
}
wl12xx_free_link(wl, wlvif, &wlvif->ap.bcast_hlid);
wl12xx_free_link(wl, wlvif, &wlvif->ap.global_hlid);
out_free:
kfree(cmd);
out:
return ret;
}
int wl12xx_cmd_role_start_ibss(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
struct wl12xx_cmd_role_start *cmd;
struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
int ret;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd) {
ret = -ENOMEM;
goto out;
}
wl1271_debug(DEBUG_CMD, "cmd role start ibss %d", wlvif->role_id);
cmd->role_id = wlvif->role_id;
if (wlvif->band == NL80211_BAND_5GHZ)
cmd->band = WLCORE_BAND_5GHZ;
cmd->channel = wlvif->channel;
cmd->ibss.basic_rate_set = cpu_to_le32(wlvif->basic_rate_set);
cmd->ibss.beacon_interval = cpu_to_le16(wlvif->beacon_int);
cmd->ibss.dtim_interval = bss_conf->dtim_period;
cmd->ibss.ssid_type = WL12XX_SSID_TYPE_ANY;
cmd->ibss.ssid_len = wlvif->ssid_len;
memcpy(cmd->ibss.ssid, wlvif->ssid, wlvif->ssid_len);
memcpy(cmd->ibss.bssid, vif->bss_conf.bssid, ETH_ALEN);
cmd->sta.local_rates = cpu_to_le32(wlvif->rate_set);
if (wlvif->sta.hlid == WL12XX_INVALID_LINK_ID) {
ret = wl12xx_allocate_link(wl, wlvif, &wlvif->sta.hlid);
if (ret)
goto out_free;
}
cmd->ibss.hlid = wlvif->sta.hlid;
cmd->ibss.remote_rates = cpu_to_le32(wlvif->rate_set);
wl1271_debug(DEBUG_CMD, "role start: roleid=%d, hlid=%d, session=%d "
"basic_rate_set: 0x%x, remote_rates: 0x%x",
wlvif->role_id, cmd->sta.hlid, cmd->sta.session,
wlvif->basic_rate_set, wlvif->rate_set);
wl1271_debug(DEBUG_CMD, "vif->bss_conf.bssid = %pM",
vif->bss_conf.bssid);
ret = wl1271_cmd_send(wl, CMD_ROLE_START, cmd, sizeof(*cmd), 0);
if (ret < 0) {
wl1271_error("failed to initiate cmd role enable");
goto err_hlid;
}
goto out_free;
err_hlid:
/* clear links on error. */
wl12xx_free_link(wl, wlvif, &wlvif->sta.hlid);
out_free:
kfree(cmd);
out:
return ret;
}
/**
* wl1271_cmd_test - send test command to firmware
*
* @wl: wl struct
* @buf: buffer containing the command, with all headers, must work with dma
* @buf_len: length of the buffer
* @answer: is answer needed
*/
int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer)
{
int ret;
size_t res_len = 0;
wl1271_debug(DEBUG_CMD, "cmd test");
if (answer)
res_len = buf_len;
ret = wl1271_cmd_send(wl, CMD_TEST, buf, buf_len, res_len);
if (ret < 0) {
wl1271_warning("TEST command failed");
return ret;
}
return ret;
}
EXPORT_SYMBOL_GPL(wl1271_cmd_test);
/**
* wl1271_cmd_interrogate - read acx from firmware
*
* @wl: wl struct
* @id: acx id
* @buf: buffer for the response, including all headers, must work with dma
* @cmd_len: length of command
* @res_len: length of payload
*/
int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf,
size_t cmd_len, size_t res_len)
{
struct acx_header *acx = buf;
int ret;
wl1271_debug(DEBUG_CMD, "cmd interrogate");
acx->id = cpu_to_le16(id);
/* response payload length, does not include any headers */
acx->len = cpu_to_le16(res_len - sizeof(*acx));
ret = wl1271_cmd_send(wl, CMD_INTERROGATE, acx, cmd_len, res_len);
if (ret < 0)
wl1271_error("INTERROGATE command failed");
return ret;
}
/**
* wlcore_cmd_configure_failsafe - write acx value to firmware
*
* @wl: wl struct
* @id: acx id
* @buf: buffer containing acx, including all headers, must work with dma
* @len: length of buf
* @valid_rets: bitmap of valid cmd status codes (i.e. return values).
* return the cmd status on success.
*/
int wlcore_cmd_configure_failsafe(struct wl1271 *wl, u16 id, void *buf,
size_t len, unsigned long valid_rets)
{
struct acx_header *acx = buf;
int ret;
wl1271_debug(DEBUG_CMD, "cmd configure (%d)", id);
if (WARN_ON_ONCE(len < sizeof(*acx)))
return -EIO;
acx->id = cpu_to_le16(id);
/* payload length, does not include any headers */
acx->len = cpu_to_le16(len - sizeof(*acx));
ret = wlcore_cmd_send_failsafe(wl, CMD_CONFIGURE, acx, len, 0,
valid_rets);
if (ret < 0) {
wl1271_warning("CONFIGURE command NOK");
return ret;
}
return ret;
}
/*
* wrapper for wlcore_cmd_configure that accepts only success status.
* return 0 on success
*/
int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len)
{
int ret = wlcore_cmd_configure_failsafe(wl, id, buf, len, 0);
if (ret < 0)
return ret;
return 0;
}
EXPORT_SYMBOL_GPL(wl1271_cmd_configure);
int wl1271_cmd_data_path(struct wl1271 *wl, bool enable)
{
struct cmd_enabledisable_path *cmd;
int ret;
u16 cmd_rx, cmd_tx;
wl1271_debug(DEBUG_CMD, "cmd data path");
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd) {
ret = -ENOMEM;
goto out;
}
/* the channel here is only used for calibration, so hardcoded to 1 */
cmd->channel = 1;
if (enable) {
cmd_rx = CMD_ENABLE_RX;
cmd_tx = CMD_ENABLE_TX;
} else {
cmd_rx = CMD_DISABLE_RX;
cmd_tx = CMD_DISABLE_TX;
}
ret = wl1271_cmd_send(wl, cmd_rx, cmd, sizeof(*cmd), 0);
if (ret < 0) {
wl1271_error("rx %s cmd for channel %d failed",
enable ? "start" : "stop", cmd->channel);
goto out;
}
wl1271_debug(DEBUG_BOOT, "rx %s cmd channel %d",
enable ? "start" : "stop", cmd->channel);
ret = wl1271_cmd_send(wl, cmd_tx, cmd, sizeof(*cmd), 0);
if (ret < 0) {
wl1271_error("tx %s cmd for channel %d failed",
enable ? "start" : "stop", cmd->channel);
goto out;
}
wl1271_debug(DEBUG_BOOT, "tx %s cmd channel %d",
enable ? "start" : "stop", cmd->channel);
out:
kfree(cmd);
return ret;
}
EXPORT_SYMBOL_GPL(wl1271_cmd_data_path);
int wl1271_cmd_ps_mode(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u8 ps_mode, u16 auto_ps_timeout)
{
struct wl1271_cmd_ps_params *ps_params = NULL;
int ret = 0;
wl1271_debug(DEBUG_CMD, "cmd set ps mode");
ps_params = kzalloc(sizeof(*ps_params), GFP_KERNEL);
if (!ps_params) {
ret = -ENOMEM;
goto out;
}
ps_params->role_id = wlvif->role_id;
ps_params->ps_mode = ps_mode;
ps_params->auto_ps_timeout = auto_ps_timeout;
ret = wl1271_cmd_send(wl, CMD_SET_PS_MODE, ps_params,
sizeof(*ps_params), 0);
if (ret < 0) {
wl1271_error("cmd set_ps_mode failed");
goto out;
}
out:
kfree(ps_params);
return ret;
}
int wl1271_cmd_template_set(struct wl1271 *wl, u8 role_id,
u16 template_id, void *buf, size_t buf_len,
int index, u32 rates)
{
struct wl1271_cmd_template_set *cmd;
int ret = 0;
wl1271_debug(DEBUG_CMD, "cmd template_set %d (role %d)",
template_id, role_id);
WARN_ON(buf_len > WL1271_CMD_TEMPL_MAX_SIZE);
buf_len = min_t(size_t, buf_len, WL1271_CMD_TEMPL_MAX_SIZE);
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd) {
ret = -ENOMEM;
goto out;
}
/* during initialization wlvif is NULL */
cmd->role_id = role_id;
cmd->len = cpu_to_le16(buf_len);
cmd->template_type = template_id;
cmd->enabled_rates = cpu_to_le32(rates);
cmd->short_retry_limit = wl->conf.tx.tmpl_short_retry_limit;
cmd->long_retry_limit = wl->conf.tx.tmpl_long_retry_limit;
cmd->index = index;
if (buf)
memcpy(cmd->template_data, buf, buf_len);
ret = wl1271_cmd_send(wl, CMD_SET_TEMPLATE, cmd, sizeof(*cmd), 0);
if (ret < 0) {
wl1271_warning("cmd set_template failed: %d", ret);
goto out_free;
}
out_free:
kfree(cmd);
out:
return ret;
}
int wl12xx_cmd_build_null_data(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct sk_buff *skb = NULL;
int size;
void *ptr;
int ret = -ENOMEM;
if (wlvif->bss_type == BSS_TYPE_IBSS) {
size = sizeof(struct wl12xx_null_data_template);
ptr = NULL;
} else {
skb = ieee80211_nullfunc_get(wl->hw,
wl12xx_wlvif_to_vif(wlvif),
-1, false);
if (!skb)
goto out;
size = skb->len;
ptr = skb->data;
}
ret = wl1271_cmd_template_set(wl, wlvif->role_id,
CMD_TEMPL_NULL_DATA, ptr, size, 0,
wlvif->basic_rate);
out:
dev_kfree_skb(skb);
if (ret)
wl1271_warning("cmd build null data failed %d", ret);
return ret;
}
int wl12xx_cmd_build_klv_null_data(struct wl1271 *wl,
struct wl12xx_vif *wlvif)
{
struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
struct sk_buff *skb = NULL;
int ret = -ENOMEM;
skb = ieee80211_nullfunc_get(wl->hw, vif,-1, false);
if (!skb)
goto out;
ret = wl1271_cmd_template_set(wl, wlvif->role_id, CMD_TEMPL_KLV,
skb->data, skb->len,
wlvif->sta.klv_template_id,
wlvif->basic_rate);
out:
dev_kfree_skb(skb);
if (ret)
wl1271_warning("cmd build klv null data failed %d", ret);
return ret;
}
int wl1271_cmd_build_ps_poll(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u16 aid)
{
struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
struct sk_buff *skb;
int ret = 0;
skb = ieee80211_pspoll_get(wl->hw, vif);
if (!skb)
goto out;
ret = wl1271_cmd_template_set(wl, wlvif->role_id,
CMD_TEMPL_PS_POLL, skb->data,
skb->len, 0, wlvif->basic_rate_set);
out:
dev_kfree_skb(skb);
return ret;
}
int wl12xx_cmd_build_probe_req(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u8 role_id, u8 band,
const u8 *ssid, size_t ssid_len,
const u8 *ie0, size_t ie0_len, const u8 *ie1,
size_t ie1_len, bool sched_scan)
{
struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
struct sk_buff *skb;
int ret;
u32 rate;
u16 template_id_2_4 = wl->scan_templ_id_2_4;
u16 template_id_5 = wl->scan_templ_id_5;
wl1271_debug(DEBUG_SCAN, "build probe request band %d", band);
skb = ieee80211_probereq_get(wl->hw, vif->addr, ssid, ssid_len,
ie0_len + ie1_len);
if (!skb) {
ret = -ENOMEM;
goto out;
}
if (ie0_len)
skb_put_data(skb, ie0, ie0_len);
if (ie1_len)
skb_put_data(skb, ie1, ie1_len);
if (sched_scan &&
(wl->quirks & WLCORE_QUIRK_DUAL_PROBE_TMPL)) {
template_id_2_4 = wl->sched_scan_templ_id_2_4;
template_id_5 = wl->sched_scan_templ_id_5;
}
rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[band]);
if (band == NL80211_BAND_2GHZ)
ret = wl1271_cmd_template_set(wl, role_id,
template_id_2_4,
skb->data, skb->len, 0, rate);
else
ret = wl1271_cmd_template_set(wl, role_id,
template_id_5,
skb->data, skb->len, 0, rate);
out:
dev_kfree_skb(skb);
return ret;
}
EXPORT_SYMBOL_GPL(wl12xx_cmd_build_probe_req);
struct sk_buff *wl1271_cmd_build_ap_probe_req(struct wl1271 *wl,
struct wl12xx_vif *wlvif,
struct sk_buff *skb)
{
struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
int ret;
u32 rate;
if (!skb)
skb = ieee80211_ap_probereq_get(wl->hw, vif);
if (!skb)
goto out;
wl1271_debug(DEBUG_SCAN, "set ap probe request template");
rate = wl1271_tx_min_rate_get(wl, wlvif->bitrate_masks[wlvif->band]);
if (wlvif->band == NL80211_BAND_2GHZ)
ret = wl1271_cmd_template_set(wl, wlvif->role_id,
CMD_TEMPL_CFG_PROBE_REQ_2_4,
skb->data, skb->len, 0, rate);
else
ret = wl1271_cmd_template_set(wl, wlvif->role_id,
CMD_TEMPL_CFG_PROBE_REQ_5,
skb->data, skb->len, 0, rate);
if (ret < 0)
wl1271_error("Unable to set ap probe request template.");
out:
return skb;
}
int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
int ret, extra = 0;
u16 fc;
struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
struct sk_buff *skb;
struct wl12xx_arp_rsp_template *tmpl;
struct ieee80211_hdr_3addr *hdr;
struct arphdr *arp_hdr;
skb = dev_alloc_skb(sizeof(*hdr) + sizeof(__le16) + sizeof(*tmpl) +
WL1271_EXTRA_SPACE_MAX);
if (!skb) {
wl1271_error("failed to allocate buffer for arp rsp template");
return -ENOMEM;
}
skb_reserve(skb, sizeof(*hdr) + WL1271_EXTRA_SPACE_MAX);
tmpl = skb_put_zero(skb, sizeof(*tmpl));
/* llc layer */
memcpy(tmpl->llc_hdr, rfc1042_header, sizeof(rfc1042_header));
tmpl->llc_type = cpu_to_be16(ETH_P_ARP);
/* arp header */
arp_hdr = &tmpl->arp_hdr;
arp_hdr->ar_hrd = cpu_to_be16(ARPHRD_ETHER);
arp_hdr->ar_pro = cpu_to_be16(ETH_P_IP);
arp_hdr->ar_hln = ETH_ALEN;
arp_hdr->ar_pln = 4;
arp_hdr->ar_op = cpu_to_be16(ARPOP_REPLY);
/* arp payload */
memcpy(tmpl->sender_hw, vif->addr, ETH_ALEN);
tmpl->sender_ip = wlvif->ip_addr;
/* encryption space */
switch (wlvif->encryption_type) {
case KEY_TKIP:
if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
extra = WL1271_EXTRA_SPACE_TKIP;
break;
case KEY_AES:
extra = WL1271_EXTRA_SPACE_AES;
break;
case KEY_NONE:
case KEY_WEP:
case KEY_GEM:
extra = 0;
break;
default:
wl1271_warning("Unknown encryption type: %d",
wlvif->encryption_type);
ret = -EINVAL;
goto out;
}
if (extra) {
u8 *space = skb_push(skb, extra);
memset(space, 0, extra);
}
/* QoS header - BE */
if (wlvif->sta.qos)
memset(skb_push(skb, sizeof(__le16)), 0, sizeof(__le16));
/* mac80211 header */
hdr = skb_push(skb, sizeof(*hdr));
memset(hdr, 0, sizeof(*hdr));
fc = IEEE80211_FTYPE_DATA | IEEE80211_FCTL_TODS;
if (wlvif->sta.qos)
fc |= IEEE80211_STYPE_QOS_DATA;
else
fc |= IEEE80211_STYPE_DATA;
if (wlvif->encryption_type != KEY_NONE)
fc |= IEEE80211_FCTL_PROTECTED;
hdr->frame_control = cpu_to_le16(fc);
memcpy(hdr->addr1, vif->bss_conf.bssid, ETH_ALEN);
memcpy(hdr->addr2, vif->addr, ETH_ALEN);
eth_broadcast_addr(hdr->addr3);
ret = wl1271_cmd_template_set(wl, wlvif->role_id, CMD_TEMPL_ARP_RSP,
skb->data, skb->len, 0,
wlvif->basic_rate);
out:
dev_kfree_skb(skb);
return ret;
}
int wl1271_build_qos_null_data(struct wl1271 *wl, struct ieee80211_vif *vif)
{
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
struct ieee80211_qos_hdr template;
memset(&template, 0, sizeof(template));
memcpy(template.addr1, vif->bss_conf.bssid, ETH_ALEN);
memcpy(template.addr2, vif->addr, ETH_ALEN);
memcpy(template.addr3, vif->bss_conf.bssid, ETH_ALEN);
template.frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
IEEE80211_STYPE_QOS_NULLFUNC |
IEEE80211_FCTL_TODS);
/* FIXME: not sure what priority to use here */
template.qos_ctrl = cpu_to_le16(0);
return wl1271_cmd_template_set(wl, wlvif->role_id,
CMD_TEMPL_QOS_NULL_DATA, &template,
sizeof(template), 0,
wlvif->basic_rate);
}
int wl12xx_cmd_set_default_wep_key(struct wl1271 *wl, u8 id, u8 hlid)
{
struct wl1271_cmd_set_keys *cmd;
int ret = 0;
wl1271_debug(DEBUG_CMD, "cmd set_default_wep_key %d", id);
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd) {
ret = -ENOMEM;
goto out;
}
cmd->hlid = hlid;
cmd->key_id = id;
cmd->lid_key_type = WEP_DEFAULT_LID_TYPE;
cmd->key_action = cpu_to_le16(KEY_SET_ID);
cmd->key_type = KEY_WEP;
ret = wl1271_cmd_send(wl, CMD_SET_KEYS, cmd, sizeof(*cmd), 0);
if (ret < 0) {
wl1271_warning("cmd set_default_wep_key failed: %d", ret);
goto out;
}
out:
kfree(cmd);
return ret;
}
int wl1271_cmd_set_sta_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u16 action, u8 id, u8 key_type,
u8 key_size, const u8 *key, const u8 *addr,
u32 tx_seq_32, u16 tx_seq_16)
{
struct wl1271_cmd_set_keys *cmd;
int ret = 0;
/* hlid might have already been deleted */
if (wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
return 0;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd) {
ret = -ENOMEM;
goto out;
}
cmd->hlid = wlvif->sta.hlid;
if (key_type == KEY_WEP)
cmd->lid_key_type = WEP_DEFAULT_LID_TYPE;
else if (is_broadcast_ether_addr(addr))
cmd->lid_key_type = BROADCAST_LID_TYPE;
else
cmd->lid_key_type = UNICAST_LID_TYPE;
cmd->key_action = cpu_to_le16(action);
cmd->key_size = key_size;
cmd->key_type = key_type;
cmd->ac_seq_num16[0] = cpu_to_le16(tx_seq_16);
cmd->ac_seq_num32[0] = cpu_to_le32(tx_seq_32);
cmd->key_id = id;
if (key_type == KEY_TKIP) {
/*
* We get the key in the following form:
* TKIP (16 bytes) - TX MIC (8 bytes) - RX MIC (8 bytes)
* but the target is expecting:
* TKIP - RX MIC - TX MIC
*/
memcpy(cmd->key, key, 16);
memcpy(cmd->key + 16, key + 24, 8);
memcpy(cmd->key + 24, key + 16, 8);
} else {
memcpy(cmd->key, key, key_size);
}
wl1271_dump(DEBUG_CRYPT, "TARGET KEY: ", cmd, sizeof(*cmd));
ret = wl1271_cmd_send(wl, CMD_SET_KEYS, cmd, sizeof(*cmd), 0);
if (ret < 0) {
wl1271_warning("could not set keys");
goto out;
}
out:
kfree(cmd);
return ret;
}
/*
* TODO: merge with sta/ibss into 1 set_key function.
* note there are slight diffs
*/
int wl1271_cmd_set_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u16 action, u8 id, u8 key_type,
u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32,
u16 tx_seq_16, bool is_pairwise)
{
struct wl1271_cmd_set_keys *cmd;
int ret = 0;
u8 lid_type;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
if (hlid == wlvif->ap.bcast_hlid) {
if (key_type == KEY_WEP)
lid_type = WEP_DEFAULT_LID_TYPE;
else
lid_type = BROADCAST_LID_TYPE;
} else if (is_pairwise) {
lid_type = UNICAST_LID_TYPE;
} else {
lid_type = BROADCAST_LID_TYPE;
}
wl1271_debug(DEBUG_CRYPT, "ap key action: %d id: %d lid: %d type: %d"
" hlid: %d", (int)action, (int)id, (int)lid_type,
(int)key_type, (int)hlid);
cmd->lid_key_type = lid_type;
cmd->hlid = hlid;
cmd->key_action = cpu_to_le16(action);
cmd->key_size = key_size;
cmd->key_type = key_type;
cmd->key_id = id;
cmd->ac_seq_num16[0] = cpu_to_le16(tx_seq_16);
cmd->ac_seq_num32[0] = cpu_to_le32(tx_seq_32);
if (key_type == KEY_TKIP) {
/*
* We get the key in the following form:
* TKIP (16 bytes) - TX MIC (8 bytes) - RX MIC (8 bytes)
* but the target is expecting:
* TKIP - RX MIC - TX MIC
*/
memcpy(cmd->key, key, 16);
memcpy(cmd->key + 16, key + 24, 8);
memcpy(cmd->key + 24, key + 16, 8);
} else {
memcpy(cmd->key, key, key_size);
}
wl1271_dump(DEBUG_CRYPT, "TARGET AP KEY: ", cmd, sizeof(*cmd));
ret = wl1271_cmd_send(wl, CMD_SET_KEYS, cmd, sizeof(*cmd), 0);
if (ret < 0) {
wl1271_warning("could not set ap keys");
goto out;
}
out:
kfree(cmd);
return ret;
}
int wl12xx_cmd_set_peer_state(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u8 hlid)
{
struct wl12xx_cmd_set_peer_state *cmd;
int ret = 0;
wl1271_debug(DEBUG_CMD, "cmd set peer state (hlid=%d)", hlid);
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd) {
ret = -ENOMEM;
goto out;
}
cmd->hlid = hlid;
cmd->state = WL1271_CMD_STA_STATE_CONNECTED;
/* wmm param is valid only for station role */
if (wlvif->bss_type == BSS_TYPE_STA_BSS)
cmd->wmm = wlvif->wmm_enabled;
ret = wl1271_cmd_send(wl, CMD_SET_PEER_STATE, cmd, sizeof(*cmd), 0);
if (ret < 0) {
wl1271_error("failed to send set peer state command");
goto out_free;
}
out_free:
kfree(cmd);
out:
return ret;
}
int wl12xx_cmd_add_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
struct ieee80211_sta *sta, u8 hlid)
{
struct wl12xx_cmd_add_peer *cmd;
int i, ret;
u32 sta_rates;
wl1271_debug(DEBUG_CMD, "cmd add peer %d", (int)hlid);
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd) {
ret = -ENOMEM;
goto out;
}
memcpy(cmd->addr, sta->addr, ETH_ALEN);
cmd->bss_index = WL1271_AP_BSS_INDEX;
cmd->aid = sta->aid;
cmd->hlid = hlid;
cmd->sp_len = sta->max_sp;
cmd->wmm = sta->wme ? 1 : 0;
cmd->session_id = wl->session_ids[hlid];
cmd->role_id = wlvif->role_id;
for (i = 0; i < NUM_ACCESS_CATEGORIES_COPY; i++)
if (sta->wme && (sta->uapsd_queues & BIT(i)))
cmd->psd_type[NUM_ACCESS_CATEGORIES_COPY-1-i] =
WL1271_PSD_UPSD_TRIGGER;
else
cmd->psd_type[NUM_ACCESS_CATEGORIES_COPY-1-i] =
WL1271_PSD_LEGACY;
sta_rates = sta->deflink.supp_rates[wlvif->band];
if (sta->deflink.ht_cap.ht_supported)
sta_rates |=
(sta->deflink.ht_cap.mcs.rx_mask[0] << HW_HT_RATES_OFFSET) |
(sta->deflink.ht_cap.mcs.rx_mask[1] << HW_MIMO_RATES_OFFSET);
cmd->supported_rates =
cpu_to_le32(wl1271_tx_enabled_rates_get(wl, sta_rates,
wlvif->band));
if (!cmd->supported_rates) {
wl1271_debug(DEBUG_CMD,
"peer has no supported rates yet, configuring basic rates: 0x%x",
wlvif->basic_rate_set);
cmd->supported_rates = cpu_to_le32(wlvif->basic_rate_set);
}
wl1271_debug(DEBUG_CMD, "new peer rates=0x%x queues=0x%x",
cmd->supported_rates, sta->uapsd_queues);
ret = wl1271_cmd_send(wl, CMD_ADD_PEER, cmd, sizeof(*cmd), 0);
if (ret < 0) {
wl1271_error("failed to initiate cmd add peer");
goto out_free;
}
out_free:
kfree(cmd);
out:
return ret;
}
int wl12xx_cmd_remove_peer(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u8 hlid)
{
struct wl12xx_cmd_remove_peer *cmd;
int ret;
bool timeout = false;
wl1271_debug(DEBUG_CMD, "cmd remove peer %d", (int)hlid);
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd) {
ret = -ENOMEM;
goto out;
}
cmd->hlid = hlid;
/* We never send a deauth, mac80211 is in charge of this */
cmd->reason_opcode = 0;
cmd->send_deauth_flag = 0;
cmd->role_id = wlvif->role_id;
ret = wl1271_cmd_send(wl, CMD_REMOVE_PEER, cmd, sizeof(*cmd), 0);
if (ret < 0) {
wl1271_error("failed to initiate cmd remove peer");
goto out_free;
}
ret = wl->ops->wait_for_event(wl,
WLCORE_EVENT_PEER_REMOVE_COMPLETE,
&timeout);
/*
* We are ok with a timeout here. The event is sometimes not sent
* due to a firmware bug. In case of another error (like SDIO timeout)
* queue a recovery.
*/
if (ret)
wl12xx_queue_recovery_work(wl);
out_free:
kfree(cmd);
out:
return ret;
}
static int wlcore_get_reg_conf_ch_idx(enum nl80211_band band, u16 ch)
{
/*
* map the given band/channel to the respective predefined
* bit expected by the fw
*/
switch (band) {
case NL80211_BAND_2GHZ:
/* channels 1..14 are mapped to 0..13 */
if (ch >= 1 && ch <= 14)
return ch - 1;
break;
case NL80211_BAND_5GHZ:
switch (ch) {
case 8 ... 16:
/* channels 8,12,16 are mapped to 18,19,20 */
return 18 + (ch-8)/4;
case 34 ... 48:
/* channels 34,36..48 are mapped to 21..28 */
return 21 + (ch-34)/2;
case 52 ... 64:
/* channels 52,56..64 are mapped to 29..32 */
return 29 + (ch-52)/4;
case 100 ... 140:
/* channels 100,104..140 are mapped to 33..43 */
return 33 + (ch-100)/4;
case 149 ... 165:
/* channels 149,153..165 are mapped to 44..48 */
return 44 + (ch-149)/4;
default:
break;
}
break;
default:
break;
}
wl1271_error("%s: unknown band/channel: %d/%d", __func__, band, ch);
return -1;
}
void wlcore_set_pending_regdomain_ch(struct wl1271 *wl, u16 channel,
enum nl80211_band band)
{
int ch_bit_idx = 0;
if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
return;
ch_bit_idx = wlcore_get_reg_conf_ch_idx(band, channel);
if (ch_bit_idx >= 0 && ch_bit_idx <= WL1271_MAX_CHANNELS)
__set_bit_le(ch_bit_idx, (long *)wl->reg_ch_conf_pending);
}
int wlcore_cmd_regdomain_config_locked(struct wl1271 *wl)
{
struct wl12xx_cmd_regdomain_dfs_config *cmd = NULL;
int ret = 0, i, b, ch_bit_idx;
__le32 tmp_ch_bitmap[2] __aligned(sizeof(unsigned long));
struct wiphy *wiphy = wl->hw->wiphy;
struct ieee80211_supported_band *band;
bool timeout = false;
if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
return 0;
wl1271_debug(DEBUG_CMD, "cmd reg domain config");
memcpy(tmp_ch_bitmap, wl->reg_ch_conf_pending, sizeof(tmp_ch_bitmap));
for (b = NL80211_BAND_2GHZ; b <= NL80211_BAND_5GHZ; b++) {
band = wiphy->bands[b];
for (i = 0; i < band->n_channels; i++) {
struct ieee80211_channel *channel = &band->channels[i];
u16 ch = channel->hw_value;
u32 flags = channel->flags;
if (flags & (IEEE80211_CHAN_DISABLED |
IEEE80211_CHAN_NO_IR))
continue;
if ((flags & IEEE80211_CHAN_RADAR) &&
channel->dfs_state != NL80211_DFS_AVAILABLE)
continue;
ch_bit_idx = wlcore_get_reg_conf_ch_idx(b, ch);
if (ch_bit_idx < 0)
continue;
__set_bit_le(ch_bit_idx, (long *)tmp_ch_bitmap);
}
}
if (!memcmp(tmp_ch_bitmap, wl->reg_ch_conf_last, sizeof(tmp_ch_bitmap)))
goto out;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd) {
ret = -ENOMEM;
goto out;
}
cmd->ch_bit_map1 = tmp_ch_bitmap[0];
cmd->ch_bit_map2 = tmp_ch_bitmap[1];
cmd->dfs_region = wl->dfs_region;
wl1271_debug(DEBUG_CMD,
"cmd reg domain bitmap1: 0x%08x, bitmap2: 0x%08x",
cmd->ch_bit_map1, cmd->ch_bit_map2);
ret = wl1271_cmd_send(wl, CMD_DFS_CHANNEL_CONFIG, cmd, sizeof(*cmd), 0);
if (ret < 0) {
wl1271_error("failed to send reg domain dfs config");
goto out;
}
ret = wl->ops->wait_for_event(wl,
WLCORE_EVENT_DFS_CONFIG_COMPLETE,
&timeout);
if (ret < 0 || timeout) {
wl1271_error("reg domain conf %serror",
timeout ? "completion " : "");
ret = timeout ? -ETIMEDOUT : ret;
goto out;
}
memcpy(wl->reg_ch_conf_last, tmp_ch_bitmap, sizeof(tmp_ch_bitmap));
memset(wl->reg_ch_conf_pending, 0, sizeof(wl->reg_ch_conf_pending));
out:
kfree(cmd);
return ret;
}
int wl12xx_cmd_config_fwlog(struct wl1271 *wl)
{
struct wl12xx_cmd_config_fwlog *cmd;
int ret = 0;
wl1271_debug(DEBUG_CMD, "cmd config firmware logger");
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd) {
ret = -ENOMEM;
goto out;
}
cmd->logger_mode = wl->conf.fwlog.mode;
cmd->log_severity = wl->conf.fwlog.severity;
cmd->timestamp = wl->conf.fwlog.timestamp;
cmd->output = wl->conf.fwlog.output;
cmd->threshold = wl->conf.fwlog.threshold;
ret = wl1271_cmd_send(wl, CMD_CONFIG_FWLOGGER, cmd, sizeof(*cmd), 0);
if (ret < 0) {
wl1271_error("failed to send config firmware logger command");
goto out_free;
}
out_free:
kfree(cmd);
out:
return ret;
}
int wl12xx_cmd_start_fwlog(struct wl1271 *wl)
{
struct wl12xx_cmd_start_fwlog *cmd;
int ret = 0;
wl1271_debug(DEBUG_CMD, "cmd start firmware logger");
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd) {
ret = -ENOMEM;
goto out;
}
ret = wl1271_cmd_send(wl, CMD_START_FWLOGGER, cmd, sizeof(*cmd), 0);
if (ret < 0) {
wl1271_error("failed to send start firmware logger command");
goto out_free;
}
out_free:
kfree(cmd);
out:
return ret;
}
int wl12xx_cmd_stop_fwlog(struct wl1271 *wl)
{
struct wl12xx_cmd_stop_fwlog *cmd;
int ret = 0;
wl1271_debug(DEBUG_CMD, "cmd stop firmware logger");
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd) {
ret = -ENOMEM;
goto out;
}
ret = wl1271_cmd_send(wl, CMD_STOP_FWLOGGER, cmd, sizeof(*cmd), 0);
if (ret < 0) {
wl1271_error("failed to send stop firmware logger command");
goto out_free;
}
out_free:
kfree(cmd);
out:
return ret;
}
static int wl12xx_cmd_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u8 role_id, enum nl80211_band band, u8 channel)
{
struct wl12xx_cmd_roc *cmd;
int ret = 0;
wl1271_debug(DEBUG_CMD, "cmd roc %d (%d)", channel, role_id);
if (WARN_ON(role_id == WL12XX_INVALID_ROLE_ID))
return -EINVAL;
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd) {
ret = -ENOMEM;
goto out;
}
cmd->role_id = role_id;
cmd->channel = channel;
switch (band) {
case NL80211_BAND_2GHZ:
cmd->band = WLCORE_BAND_2_4GHZ;
break;
case NL80211_BAND_5GHZ:
cmd->band = WLCORE_BAND_5GHZ;
break;
default:
wl1271_error("roc - unknown band: %d", (int)wlvif->band);
ret = -EINVAL;
goto out_free;
}
ret = wl1271_cmd_send(wl, CMD_REMAIN_ON_CHANNEL, cmd, sizeof(*cmd), 0);
if (ret < 0) {
wl1271_error("failed to send ROC command");
goto out_free;
}
out_free:
kfree(cmd);
out:
return ret;
}
static int wl12xx_cmd_croc(struct wl1271 *wl, u8 role_id)
{
struct wl12xx_cmd_croc *cmd;
int ret = 0;
wl1271_debug(DEBUG_CMD, "cmd croc (%d)", role_id);
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd) {
ret = -ENOMEM;
goto out;
}
cmd->role_id = role_id;
ret = wl1271_cmd_send(wl, CMD_CANCEL_REMAIN_ON_CHANNEL, cmd,
sizeof(*cmd), 0);
if (ret < 0) {
wl1271_error("failed to send ROC command");
goto out_free;
}
out_free:
kfree(cmd);
out:
return ret;
}
int wl12xx_roc(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 role_id,
enum nl80211_band band, u8 channel)
{
int ret = 0;
if (WARN_ON(test_bit(role_id, wl->roc_map)))
return 0;
ret = wl12xx_cmd_roc(wl, wlvif, role_id, band, channel);
if (ret < 0)
goto out;
__set_bit(role_id, wl->roc_map);
out:
return ret;
}
int wl12xx_croc(struct wl1271 *wl, u8 role_id)
{
int ret = 0;
if (WARN_ON(!test_bit(role_id, wl->roc_map)))
return 0;
ret = wl12xx_cmd_croc(wl, role_id);
if (ret < 0)
goto out;
__clear_bit(role_id, wl->roc_map);
/*
* Rearm the tx watchdog when removing the last ROC. This prevents
* recoveries due to just finished ROCs - when Tx hasn't yet had
* a chance to get out.
*/
if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES)
wl12xx_rearm_tx_watchdog_locked(wl);
out:
return ret;
}
int wl12xx_cmd_stop_channel_switch(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct wl12xx_cmd_stop_channel_switch *cmd;
int ret;
wl1271_debug(DEBUG_ACX, "cmd stop channel switch");
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd) {
ret = -ENOMEM;
goto out;
}
cmd->role_id = wlvif->role_id;
ret = wl1271_cmd_send(wl, CMD_STOP_CHANNEL_SWICTH, cmd, sizeof(*cmd), 0);
if (ret < 0) {
wl1271_error("failed to stop channel switch command");
goto out_free;
}
out_free:
kfree(cmd);
out:
return ret;
}
/* start dev role and roc on its channel */
int wl12xx_start_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif,
enum nl80211_band band, int channel)
{
int ret;
if (WARN_ON(!(wlvif->bss_type == BSS_TYPE_STA_BSS ||
wlvif->bss_type == BSS_TYPE_IBSS)))
return -EINVAL;
/* the dev role is already started for p2p mgmt interfaces */
if (!wlcore_is_p2p_mgmt(wlvif)) {
ret = wl12xx_cmd_role_enable(wl,
wl12xx_wlvif_to_vif(wlvif)->addr,
WL1271_ROLE_DEVICE,
&wlvif->dev_role_id);
if (ret < 0)
goto out;
}
ret = wl12xx_cmd_role_start_dev(wl, wlvif, band, channel);
if (ret < 0)
goto out_disable;
ret = wl12xx_roc(wl, wlvif, wlvif->dev_role_id, band, channel);
if (ret < 0)
goto out_stop;
return 0;
out_stop:
wl12xx_cmd_role_stop_dev(wl, wlvif);
out_disable:
if (!wlcore_is_p2p_mgmt(wlvif))
wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
out:
return ret;
}
/* croc dev hlid, and stop the role */
int wl12xx_stop_dev(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
int ret;
if (WARN_ON(!(wlvif->bss_type == BSS_TYPE_STA_BSS ||
wlvif->bss_type == BSS_TYPE_IBSS)))
return -EINVAL;
/* flush all pending packets */
ret = wlcore_tx_work_locked(wl);
if (ret < 0)
goto out;
if (test_bit(wlvif->dev_role_id, wl->roc_map)) {
ret = wl12xx_croc(wl, wlvif->dev_role_id);
if (ret < 0)
goto out;
}
ret = wl12xx_cmd_role_stop_dev(wl, wlvif);
if (ret < 0)
goto out;
if (!wlcore_is_p2p_mgmt(wlvif)) {
ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
if (ret < 0)
goto out;
}
out:
return ret;
}
int wlcore_cmd_generic_cfg(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u8 feature, u8 enable, u8 value)
{
struct wlcore_cmd_generic_cfg *cmd;
int ret;
wl1271_debug(DEBUG_CMD,
"cmd generic cfg (role %d feature %d enable %d value %d)",
wlvif->role_id, feature, enable, value);
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
cmd->role_id = wlvif->role_id;
cmd->feature = feature;
cmd->enable = enable;
cmd->value = value;
ret = wl1271_cmd_send(wl, CMD_GENERIC_CFG, cmd, sizeof(*cmd), 0);
if (ret < 0) {
wl1271_error("failed to send generic cfg command");
goto out_free;
}
out_free:
kfree(cmd);
return ret;
}
EXPORT_SYMBOL_GPL(wlcore_cmd_generic_cfg);
|
linux-master
|
drivers/net/wireless/ti/wlcore/cmd.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* This file is part of wlcore
*
* Copyright (C) 2008-2010 Nokia Corporation
* Copyright (C) 2011-2013 Texas Instruments Inc.
*/
#include <linux/module.h>
#include <linux/firmware.h>
#include <linux/etherdevice.h>
#include <linux/vmalloc.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/pm_runtime.h>
#include <linux/pm_wakeirq.h>
#include "wlcore.h"
#include "debug.h"
#include "wl12xx_80211.h"
#include "io.h"
#include "tx.h"
#include "ps.h"
#include "init.h"
#include "debugfs.h"
#include "testmode.h"
#include "vendor_cmd.h"
#include "scan.h"
#include "hw_ops.h"
#include "sysfs.h"
#define WL1271_BOOT_RETRIES 3
#define WL1271_WAKEUP_TIMEOUT 500
static char *fwlog_param;
static int fwlog_mem_blocks = -1;
static int bug_on_recovery = -1;
static int no_recovery = -1;
static void __wl1271_op_remove_interface(struct wl1271 *wl,
struct ieee80211_vif *vif,
bool reset_tx_queues);
static void wlcore_op_stop_locked(struct wl1271 *wl);
static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif);
static int wl12xx_set_authorized(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
int ret;
if (WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS))
return -EINVAL;
if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
return 0;
if (test_and_set_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags))
return 0;
ret = wl12xx_cmd_set_peer_state(wl, wlvif, wlvif->sta.hlid);
if (ret < 0)
return ret;
wl1271_info("Association completed.");
return 0;
}
static void wl1271_reg_notify(struct wiphy *wiphy,
struct regulatory_request *request)
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct wl1271 *wl = hw->priv;
/* copy the current dfs region */
if (request)
wl->dfs_region = request->dfs_region;
wlcore_regdomain_config(wl);
}
static int wl1271_set_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif,
bool enable)
{
int ret = 0;
/* we should hold wl->mutex */
ret = wl1271_acx_ps_rx_streaming(wl, wlvif, enable);
if (ret < 0)
goto out;
if (enable)
set_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
else
clear_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags);
out:
return ret;
}
/*
* this function is being called when the rx_streaming interval
* has beed changed or rx_streaming should be disabled
*/
int wl1271_recalc_rx_streaming(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
int ret = 0;
int period = wl->conf.rx_streaming.interval;
/* don't reconfigure if rx_streaming is disabled */
if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
goto out;
/* reconfigure/disable according to new streaming_period */
if (period &&
test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
(wl->conf.rx_streaming.always ||
test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
ret = wl1271_set_rx_streaming(wl, wlvif, true);
else {
ret = wl1271_set_rx_streaming(wl, wlvif, false);
/* don't cancel_work_sync since we might deadlock */
del_timer_sync(&wlvif->rx_streaming_timer);
}
out:
return ret;
}
static void wl1271_rx_streaming_enable_work(struct work_struct *work)
{
int ret;
struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
rx_streaming_enable_work);
struct wl1271 *wl = wlvif->wl;
mutex_lock(&wl->mutex);
if (test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags) ||
!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
(!wl->conf.rx_streaming.always &&
!test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags)))
goto out;
if (!wl->conf.rx_streaming.interval)
goto out;
ret = pm_runtime_resume_and_get(wl->dev);
if (ret < 0)
goto out;
ret = wl1271_set_rx_streaming(wl, wlvif, true);
if (ret < 0)
goto out_sleep;
/* stop it after some time of inactivity */
mod_timer(&wlvif->rx_streaming_timer,
jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration));
out_sleep:
pm_runtime_mark_last_busy(wl->dev);
pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
}
static void wl1271_rx_streaming_disable_work(struct work_struct *work)
{
int ret;
struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
rx_streaming_disable_work);
struct wl1271 *wl = wlvif->wl;
mutex_lock(&wl->mutex);
if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
goto out;
ret = pm_runtime_resume_and_get(wl->dev);
if (ret < 0)
goto out;
ret = wl1271_set_rx_streaming(wl, wlvif, false);
if (ret)
goto out_sleep;
out_sleep:
pm_runtime_mark_last_busy(wl->dev);
pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
}
static void wl1271_rx_streaming_timer(struct timer_list *t)
{
struct wl12xx_vif *wlvif = from_timer(wlvif, t, rx_streaming_timer);
struct wl1271 *wl = wlvif->wl;
ieee80211_queue_work(wl->hw, &wlvif->rx_streaming_disable_work);
}
/* wl->mutex must be taken */
void wl12xx_rearm_tx_watchdog_locked(struct wl1271 *wl)
{
/* if the watchdog is not armed, don't do anything */
if (wl->tx_allocated_blocks == 0)
return;
cancel_delayed_work(&wl->tx_watchdog_work);
ieee80211_queue_delayed_work(wl->hw, &wl->tx_watchdog_work,
msecs_to_jiffies(wl->conf.tx.tx_watchdog_timeout));
}
static void wlcore_rc_update_work(struct work_struct *work)
{
int ret;
struct wl12xx_vif *wlvif = container_of(work, struct wl12xx_vif,
rc_update_work);
struct wl1271 *wl = wlvif->wl;
struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
mutex_lock(&wl->mutex);
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
ret = pm_runtime_resume_and_get(wl->dev);
if (ret < 0)
goto out;
if (ieee80211_vif_is_mesh(vif)) {
ret = wl1271_acx_set_ht_capabilities(wl, &wlvif->rc_ht_cap,
true, wlvif->sta.hlid);
if (ret < 0)
goto out_sleep;
} else {
wlcore_hw_sta_rc_update(wl, wlvif);
}
out_sleep:
pm_runtime_mark_last_busy(wl->dev);
pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
}
static void wl12xx_tx_watchdog_work(struct work_struct *work)
{
struct delayed_work *dwork;
struct wl1271 *wl;
dwork = to_delayed_work(work);
wl = container_of(dwork, struct wl1271, tx_watchdog_work);
mutex_lock(&wl->mutex);
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
/* Tx went out in the meantime - everything is ok */
if (unlikely(wl->tx_allocated_blocks == 0))
goto out;
/*
* if a ROC is in progress, we might not have any Tx for a long
* time (e.g. pending Tx on the non-ROC channels)
*/
if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to ROC",
wl->conf.tx.tx_watchdog_timeout);
wl12xx_rearm_tx_watchdog_locked(wl);
goto out;
}
/*
* if a scan is in progress, we might not have any Tx for a long
* time
*/
if (wl->scan.state != WL1271_SCAN_STATE_IDLE) {
wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms due to scan",
wl->conf.tx.tx_watchdog_timeout);
wl12xx_rearm_tx_watchdog_locked(wl);
goto out;
}
/*
* AP might cache a frame for a long time for a sleeping station,
* so rearm the timer if there's an AP interface with stations. If
* Tx is genuinely stuck we will most hopefully discover it when all
* stations are removed due to inactivity.
*/
if (wl->active_sta_count) {
wl1271_debug(DEBUG_TX, "No Tx (in FW) for %d ms. AP has "
" %d stations",
wl->conf.tx.tx_watchdog_timeout,
wl->active_sta_count);
wl12xx_rearm_tx_watchdog_locked(wl);
goto out;
}
wl1271_error("Tx stuck (in FW) for %d ms. Starting recovery",
wl->conf.tx.tx_watchdog_timeout);
wl12xx_queue_recovery_work(wl);
out:
mutex_unlock(&wl->mutex);
}
static void wlcore_adjust_conf(struct wl1271 *wl)
{
if (fwlog_param) {
if (!strcmp(fwlog_param, "continuous")) {
wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_HOST;
} else if (!strcmp(fwlog_param, "dbgpins")) {
wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS;
wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS;
} else if (!strcmp(fwlog_param, "disable")) {
wl->conf.fwlog.mem_blocks = 0;
wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE;
} else {
wl1271_error("Unknown fwlog parameter %s", fwlog_param);
}
}
if (bug_on_recovery != -1)
wl->conf.recovery.bug_on_recovery = (u8) bug_on_recovery;
if (no_recovery != -1)
wl->conf.recovery.no_recovery = (u8) no_recovery;
}
static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl,
struct wl12xx_vif *wlvif,
u8 hlid, u8 tx_pkts)
{
bool fw_ps;
fw_ps = test_bit(hlid, &wl->ap_fw_ps_map);
/*
* Wake up from high level PS if the STA is asleep with too little
* packets in FW or if the STA is awake.
*/
if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS)
wl12xx_ps_link_end(wl, wlvif, hlid);
/*
* Start high-level PS if the STA is asleep with enough blocks in FW.
* Make an exception if this is the only connected link. In this
* case FW-memory congestion is less of a problem.
* Note that a single connected STA means 2*ap_count + 1 active links,
* since we must account for the global and broadcast AP links
* for each AP. The "fw_ps" check assures us the other link is a STA
* connected to the AP. Otherwise the FW would not set the PSM bit.
*/
else if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
wl12xx_ps_link_start(wl, wlvif, hlid, true);
}
static void wl12xx_irq_update_links_status(struct wl1271 *wl,
struct wl12xx_vif *wlvif,
struct wl_fw_status *status)
{
unsigned long cur_fw_ps_map;
u8 hlid;
cur_fw_ps_map = status->link_ps_bitmap;
if (wl->ap_fw_ps_map != cur_fw_ps_map) {
wl1271_debug(DEBUG_PSM,
"link ps prev 0x%lx cur 0x%lx changed 0x%lx",
wl->ap_fw_ps_map, cur_fw_ps_map,
wl->ap_fw_ps_map ^ cur_fw_ps_map);
wl->ap_fw_ps_map = cur_fw_ps_map;
}
for_each_set_bit(hlid, wlvif->ap.sta_hlid_map, wl->num_links)
wl12xx_irq_ps_regulate_link(wl, wlvif, hlid,
wl->links[hlid].allocated_pkts);
}
static int wlcore_fw_status(struct wl1271 *wl, struct wl_fw_status *status)
{
struct wl12xx_vif *wlvif;
u32 old_tx_blk_count = wl->tx_blocks_available;
int avail, freed_blocks;
int i;
int ret;
struct wl1271_link *lnk;
ret = wlcore_raw_read_data(wl, REG_RAW_FW_STATUS_ADDR,
wl->raw_fw_status,
wl->fw_status_len, false);
if (ret < 0)
return ret;
wlcore_hw_convert_fw_status(wl, wl->raw_fw_status, wl->fw_status);
wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
"drv_rx_counter = %d, tx_results_counter = %d)",
status->intr,
status->fw_rx_counter,
status->drv_rx_counter,
status->tx_results_counter);
for (i = 0; i < NUM_TX_QUEUES; i++) {
/* prevent wrap-around in freed-packets counter */
wl->tx_allocated_pkts[i] -=
(status->counters.tx_released_pkts[i] -
wl->tx_pkts_freed[i]) & 0xff;
wl->tx_pkts_freed[i] = status->counters.tx_released_pkts[i];
}
for_each_set_bit(i, wl->links_map, wl->num_links) {
u8 diff;
lnk = &wl->links[i];
/* prevent wrap-around in freed-packets counter */
diff = (status->counters.tx_lnk_free_pkts[i] -
lnk->prev_freed_pkts) & 0xff;
if (diff == 0)
continue;
lnk->allocated_pkts -= diff;
lnk->prev_freed_pkts = status->counters.tx_lnk_free_pkts[i];
/* accumulate the prev_freed_pkts counter */
lnk->total_freed_pkts += diff;
}
/* prevent wrap-around in total blocks counter */
if (likely(wl->tx_blocks_freed <= status->total_released_blks))
freed_blocks = status->total_released_blks -
wl->tx_blocks_freed;
else
freed_blocks = 0x100000000LL - wl->tx_blocks_freed +
status->total_released_blks;
wl->tx_blocks_freed = status->total_released_blks;
wl->tx_allocated_blocks -= freed_blocks;
/*
* If the FW freed some blocks:
* If we still have allocated blocks - re-arm the timer, Tx is
* not stuck. Otherwise, cancel the timer (no Tx currently).
*/
if (freed_blocks) {
if (wl->tx_allocated_blocks)
wl12xx_rearm_tx_watchdog_locked(wl);
else
cancel_delayed_work(&wl->tx_watchdog_work);
}
avail = status->tx_total - wl->tx_allocated_blocks;
/*
* The FW might change the total number of TX memblocks before
* we get a notification about blocks being released. Thus, the
* available blocks calculation might yield a temporary result
* which is lower than the actual available blocks. Keeping in
* mind that only blocks that were allocated can be moved from
* TX to RX, tx_blocks_available should never decrease here.
*/
wl->tx_blocks_available = max((int)wl->tx_blocks_available,
avail);
/* if more blocks are available now, tx work can be scheduled */
if (wl->tx_blocks_available > old_tx_blk_count)
clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
/* for AP update num of allocated TX blocks per link and ps status */
wl12xx_for_each_wlvif_ap(wl, wlvif) {
wl12xx_irq_update_links_status(wl, wlvif, status);
}
/* update the host-chipset time offset */
wl->time_offset = (ktime_get_boottime_ns() >> 10) -
(s64)(status->fw_localtime);
wl->fw_fast_lnk_map = status->link_fast_bitmap;
return 0;
}
static void wl1271_flush_deferred_work(struct wl1271 *wl)
{
struct sk_buff *skb;
/* Pass all received frames to the network stack */
while ((skb = skb_dequeue(&wl->deferred_rx_queue)))
ieee80211_rx_ni(wl->hw, skb);
/* Return sent skbs to the network stack */
while ((skb = skb_dequeue(&wl->deferred_tx_queue)))
ieee80211_tx_status_ni(wl->hw, skb);
}
static void wl1271_netstack_work(struct work_struct *work)
{
struct wl1271 *wl =
container_of(work, struct wl1271, netstack_work);
do {
wl1271_flush_deferred_work(wl);
} while (skb_queue_len(&wl->deferred_rx_queue));
}
#define WL1271_IRQ_MAX_LOOPS 256
static int wlcore_irq_locked(struct wl1271 *wl)
{
int ret = 0;
u32 intr;
int loopcount = WL1271_IRQ_MAX_LOOPS;
bool run_tx_queue = true;
bool done = false;
unsigned int defer_count;
unsigned long flags;
/*
* In case edge triggered interrupt must be used, we cannot iterate
* more than once without introducing race conditions with the hardirq.
*/
if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
loopcount = 1;
wl1271_debug(DEBUG_IRQ, "IRQ work");
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
ret = pm_runtime_resume_and_get(wl->dev);
if (ret < 0)
goto out;
while (!done && loopcount--) {
smp_mb__after_atomic();
ret = wlcore_fw_status(wl, wl->fw_status);
if (ret < 0)
goto err_ret;
wlcore_hw_tx_immediate_compl(wl);
intr = wl->fw_status->intr;
intr &= WLCORE_ALL_INTR_MASK;
if (!intr) {
done = true;
continue;
}
if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) {
wl1271_error("HW watchdog interrupt received! starting recovery.");
wl->watchdog_recovery = true;
ret = -EIO;
/* restarting the chip. ignore any other interrupt. */
goto err_ret;
}
if (unlikely(intr & WL1271_ACX_SW_INTR_WATCHDOG)) {
wl1271_error("SW watchdog interrupt received! "
"starting recovery.");
wl->watchdog_recovery = true;
ret = -EIO;
/* restarting the chip. ignore any other interrupt. */
goto err_ret;
}
if (likely(intr & WL1271_ACX_INTR_DATA)) {
wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
ret = wlcore_rx(wl, wl->fw_status);
if (ret < 0)
goto err_ret;
/* Check if any tx blocks were freed */
if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags)) {
if (spin_trylock_irqsave(&wl->wl_lock, flags)) {
if (!wl1271_tx_total_queue_count(wl))
run_tx_queue = false;
spin_unlock_irqrestore(&wl->wl_lock, flags);
}
/*
* In order to avoid starvation of the TX path,
* call the work function directly.
*/
if (run_tx_queue) {
ret = wlcore_tx_work_locked(wl);
if (ret < 0)
goto err_ret;
}
}
/* check for tx results */
ret = wlcore_hw_tx_delayed_compl(wl);
if (ret < 0)
goto err_ret;
/* Make sure the deferred queues don't get too long */
defer_count = skb_queue_len(&wl->deferred_tx_queue) +
skb_queue_len(&wl->deferred_rx_queue);
if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT)
wl1271_flush_deferred_work(wl);
}
if (intr & WL1271_ACX_INTR_EVENT_A) {
wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
ret = wl1271_event_handle(wl, 0);
if (ret < 0)
goto err_ret;
}
if (intr & WL1271_ACX_INTR_EVENT_B) {
wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
ret = wl1271_event_handle(wl, 1);
if (ret < 0)
goto err_ret;
}
if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
wl1271_debug(DEBUG_IRQ,
"WL1271_ACX_INTR_INIT_COMPLETE");
if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
}
err_ret:
pm_runtime_mark_last_busy(wl->dev);
pm_runtime_put_autosuspend(wl->dev);
out:
return ret;
}
static irqreturn_t wlcore_irq(int irq, void *cookie)
{
int ret;
unsigned long flags;
struct wl1271 *wl = cookie;
bool queue_tx_work = true;
set_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
/* complete the ELP completion */
if (test_bit(WL1271_FLAG_IN_ELP, &wl->flags)) {
spin_lock_irqsave(&wl->wl_lock, flags);
if (wl->elp_compl)
complete(wl->elp_compl);
spin_unlock_irqrestore(&wl->wl_lock, flags);
}
if (test_bit(WL1271_FLAG_SUSPENDED, &wl->flags)) {
/* don't enqueue a work right now. mark it as pending */
set_bit(WL1271_FLAG_PENDING_WORK, &wl->flags);
wl1271_debug(DEBUG_IRQ, "should not enqueue work");
spin_lock_irqsave(&wl->wl_lock, flags);
disable_irq_nosync(wl->irq);
pm_wakeup_event(wl->dev, 0);
spin_unlock_irqrestore(&wl->wl_lock, flags);
goto out_handled;
}
/* TX might be handled here, avoid redundant work */
set_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
cancel_work_sync(&wl->tx_work);
mutex_lock(&wl->mutex);
ret = wlcore_irq_locked(wl);
if (ret)
wl12xx_queue_recovery_work(wl);
/* In case TX was not handled in wlcore_irq_locked(), queue TX work */
clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags);
if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags)) {
if (spin_trylock_irqsave(&wl->wl_lock, flags)) {
if (!wl1271_tx_total_queue_count(wl))
queue_tx_work = false;
spin_unlock_irqrestore(&wl->wl_lock, flags);
}
if (queue_tx_work)
ieee80211_queue_work(wl->hw, &wl->tx_work);
}
mutex_unlock(&wl->mutex);
out_handled:
clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags);
return IRQ_HANDLED;
}
struct vif_counter_data {
u8 counter;
struct ieee80211_vif *cur_vif;
bool cur_vif_running;
};
static void wl12xx_vif_count_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
struct vif_counter_data *counter = data;
counter->counter++;
if (counter->cur_vif == vif)
counter->cur_vif_running = true;
}
/* caller must not hold wl->mutex, as it might deadlock */
static void wl12xx_get_vif_count(struct ieee80211_hw *hw,
struct ieee80211_vif *cur_vif,
struct vif_counter_data *data)
{
memset(data, 0, sizeof(*data));
data->cur_vif = cur_vif;
ieee80211_iterate_active_interfaces(hw, IEEE80211_IFACE_ITER_RESUME_ALL,
wl12xx_vif_count_iter, data);
}
static int wl12xx_fetch_firmware(struct wl1271 *wl, bool plt)
{
const struct firmware *fw;
const char *fw_name;
enum wl12xx_fw_type fw_type;
int ret;
if (plt) {
fw_type = WL12XX_FW_TYPE_PLT;
fw_name = wl->plt_fw_name;
} else {
/*
* we can't call wl12xx_get_vif_count() here because
* wl->mutex is taken, so use the cached last_vif_count value
*/
if (wl->last_vif_count > 1 && wl->mr_fw_name) {
fw_type = WL12XX_FW_TYPE_MULTI;
fw_name = wl->mr_fw_name;
} else {
fw_type = WL12XX_FW_TYPE_NORMAL;
fw_name = wl->sr_fw_name;
}
}
if (wl->fw_type == fw_type)
return 0;
wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name);
ret = request_firmware(&fw, fw_name, wl->dev);
if (ret < 0) {
wl1271_error("could not get firmware %s: %d", fw_name, ret);
return ret;
}
if (fw->size % 4) {
wl1271_error("firmware size is not multiple of 32 bits: %zu",
fw->size);
ret = -EILSEQ;
goto out;
}
vfree(wl->fw);
wl->fw_type = WL12XX_FW_TYPE_NONE;
wl->fw_len = fw->size;
wl->fw = vmalloc(wl->fw_len);
if (!wl->fw) {
wl1271_error("could not allocate memory for the firmware");
ret = -ENOMEM;
goto out;
}
memcpy(wl->fw, fw->data, wl->fw_len);
ret = 0;
wl->fw_type = fw_type;
out:
release_firmware(fw);
return ret;
}
void wl12xx_queue_recovery_work(struct wl1271 *wl)
{
/* Avoid a recursive recovery */
if (wl->state == WLCORE_STATE_ON) {
WARN_ON(!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY,
&wl->flags));
wl->state = WLCORE_STATE_RESTARTING;
set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
ieee80211_queue_work(wl->hw, &wl->recovery_work);
}
}
size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen)
{
size_t len;
/* Make sure we have enough room */
len = min_t(size_t, maxlen, PAGE_SIZE - wl->fwlog_size);
/* Fill the FW log file, consumed by the sysfs fwlog entry */
memcpy(wl->fwlog + wl->fwlog_size, memblock, len);
wl->fwlog_size += len;
return len;
}
static void wl12xx_read_fwlog_panic(struct wl1271 *wl)
{
u32 end_of_log = 0;
int error;
if (wl->quirks & WLCORE_QUIRK_FWLOG_NOT_IMPLEMENTED)
return;
wl1271_info("Reading FW panic log");
/*
* Make sure the chip is awake and the logger isn't active.
* Do not send a stop fwlog command if the fw is hanged or if
* dbgpins are used (due to some fw bug).
*/
error = pm_runtime_resume_and_get(wl->dev);
if (error < 0)
return;
if (!wl->watchdog_recovery &&
wl->conf.fwlog.output != WL12XX_FWLOG_OUTPUT_DBG_PINS)
wl12xx_cmd_stop_fwlog(wl);
/* Traverse the memory blocks linked list */
do {
end_of_log = wlcore_event_fw_logger(wl);
if (end_of_log == 0) {
msleep(100);
end_of_log = wlcore_event_fw_logger(wl);
}
} while (end_of_log != 0);
}
static void wlcore_save_freed_pkts(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u8 hlid, struct ieee80211_sta *sta)
{
struct wl1271_station *wl_sta;
u32 sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING;
wl_sta = (void *)sta->drv_priv;
wl_sta->total_freed_pkts = wl->links[hlid].total_freed_pkts;
/*
* increment the initial seq number on recovery to account for
* transmitted packets that we haven't yet got in the FW status
*/
if (wlvif->encryption_type == KEY_GEM)
sqn_recovery_padding = WL1271_TX_SQN_POST_RECOVERY_PADDING_GEM;
if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
wl_sta->total_freed_pkts += sqn_recovery_padding;
}
static void wlcore_save_freed_pkts_addr(struct wl1271 *wl,
struct wl12xx_vif *wlvif,
u8 hlid, const u8 *addr)
{
struct ieee80211_sta *sta;
struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
if (WARN_ON(hlid == WL12XX_INVALID_LINK_ID ||
is_zero_ether_addr(addr)))
return;
rcu_read_lock();
sta = ieee80211_find_sta(vif, addr);
if (sta)
wlcore_save_freed_pkts(wl, wlvif, hlid, sta);
rcu_read_unlock();
}
static void wlcore_print_recovery(struct wl1271 *wl)
{
u32 pc = 0;
u32 hint_sts = 0;
int ret;
wl1271_info("Hardware recovery in progress. FW ver: %s",
wl->chip.fw_ver_str);
/* change partitions momentarily so we can read the FW pc */
ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
if (ret < 0)
return;
ret = wlcore_read_reg(wl, REG_PC_ON_RECOVERY, &pc);
if (ret < 0)
return;
ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &hint_sts);
if (ret < 0)
return;
wl1271_info("pc: 0x%x, hint_sts: 0x%08x count: %d",
pc, hint_sts, ++wl->recovery_count);
wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
}
static void wl1271_recovery_work(struct work_struct *work)
{
struct wl1271 *wl =
container_of(work, struct wl1271, recovery_work);
struct wl12xx_vif *wlvif;
struct ieee80211_vif *vif;
int error;
mutex_lock(&wl->mutex);
if (wl->state == WLCORE_STATE_OFF || wl->plt)
goto out_unlock;
error = pm_runtime_resume_and_get(wl->dev);
if (error < 0)
wl1271_warning("Enable for recovery failed");
wlcore_disable_interrupts_nosync(wl);
if (!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags)) {
if (wl->conf.fwlog.output == WL12XX_FWLOG_OUTPUT_HOST)
wl12xx_read_fwlog_panic(wl);
wlcore_print_recovery(wl);
}
BUG_ON(wl->conf.recovery.bug_on_recovery &&
!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags));
clear_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
if (wl->conf.recovery.no_recovery) {
wl1271_info("No recovery (chosen on module load). Fw will remain stuck.");
goto out_unlock;
}
/* Prevent spurious TX during FW restart */
wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
/* reboot the chipset */
while (!list_empty(&wl->wlvif_list)) {
wlvif = list_first_entry(&wl->wlvif_list,
struct wl12xx_vif, list);
vif = wl12xx_wlvif_to_vif(wlvif);
if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
wlcore_save_freed_pkts_addr(wl, wlvif, wlvif->sta.hlid,
vif->bss_conf.bssid);
}
__wl1271_op_remove_interface(wl, vif, false);
}
wlcore_op_stop_locked(wl);
pm_runtime_mark_last_busy(wl->dev);
pm_runtime_put_autosuspend(wl->dev);
ieee80211_restart_hw(wl->hw);
/*
* Its safe to enable TX now - the queues are stopped after a request
* to restart the HW.
*/
wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FW_RESTART);
out_unlock:
wl->watchdog_recovery = false;
clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags);
mutex_unlock(&wl->mutex);
}
static int wlcore_fw_wakeup(struct wl1271 *wl)
{
return wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
}
static int wl1271_setup(struct wl1271 *wl)
{
wl->raw_fw_status = kzalloc(wl->fw_status_len, GFP_KERNEL);
if (!wl->raw_fw_status)
goto err;
wl->fw_status = kzalloc(sizeof(*wl->fw_status), GFP_KERNEL);
if (!wl->fw_status)
goto err;
wl->tx_res_if = kzalloc(sizeof(*wl->tx_res_if), GFP_KERNEL);
if (!wl->tx_res_if)
goto err;
return 0;
err:
kfree(wl->fw_status);
kfree(wl->raw_fw_status);
return -ENOMEM;
}
static int wl12xx_set_power_on(struct wl1271 *wl)
{
int ret;
msleep(WL1271_PRE_POWER_ON_SLEEP);
ret = wl1271_power_on(wl);
if (ret < 0)
goto out;
msleep(WL1271_POWER_ON_SLEEP);
wl1271_io_reset(wl);
wl1271_io_init(wl);
ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
if (ret < 0)
goto fail;
/* ELP module wake up */
ret = wlcore_fw_wakeup(wl);
if (ret < 0)
goto fail;
out:
return ret;
fail:
wl1271_power_off(wl);
return ret;
}
static int wl12xx_chip_wakeup(struct wl1271 *wl, bool plt)
{
int ret = 0;
ret = wl12xx_set_power_on(wl);
if (ret < 0)
goto out;
/*
* For wl127x based devices we could use the default block
* size (512 bytes), but due to a bug in the sdio driver, we
* need to set it explicitly after the chip is powered on. To
* simplify the code and since the performance impact is
* negligible, we use the same block size for all different
* chip types.
*
* Check if the bus supports blocksize alignment and, if it
* doesn't, make sure we don't have the quirk.
*/
if (!wl1271_set_block_size(wl))
wl->quirks &= ~WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN;
/* TODO: make sure the lower driver has set things up correctly */
ret = wl1271_setup(wl);
if (ret < 0)
goto out;
ret = wl12xx_fetch_firmware(wl, plt);
if (ret < 0) {
kfree(wl->fw_status);
kfree(wl->raw_fw_status);
kfree(wl->tx_res_if);
}
out:
return ret;
}
int wl1271_plt_start(struct wl1271 *wl, const enum plt_mode plt_mode)
{
int retries = WL1271_BOOT_RETRIES;
struct wiphy *wiphy = wl->hw->wiphy;
static const char* const PLT_MODE[] = {
"PLT_OFF",
"PLT_ON",
"PLT_FEM_DETECT",
"PLT_CHIP_AWAKE"
};
int ret;
mutex_lock(&wl->mutex);
wl1271_notice("power up");
if (wl->state != WLCORE_STATE_OFF) {
wl1271_error("cannot go into PLT state because not "
"in off state: %d", wl->state);
ret = -EBUSY;
goto out;
}
/* Indicate to lower levels that we are now in PLT mode */
wl->plt = true;
wl->plt_mode = plt_mode;
while (retries) {
retries--;
ret = wl12xx_chip_wakeup(wl, true);
if (ret < 0)
goto power_off;
if (plt_mode != PLT_CHIP_AWAKE) {
ret = wl->ops->plt_init(wl);
if (ret < 0)
goto power_off;
}
wl->state = WLCORE_STATE_ON;
wl1271_notice("firmware booted in PLT mode %s (%s)",
PLT_MODE[plt_mode],
wl->chip.fw_ver_str);
/* update hw/fw version info in wiphy struct */
wiphy->hw_version = wl->chip.id;
strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
sizeof(wiphy->fw_version));
goto out;
power_off:
wl1271_power_off(wl);
}
wl->plt = false;
wl->plt_mode = PLT_OFF;
wl1271_error("firmware boot in PLT mode failed despite %d retries",
WL1271_BOOT_RETRIES);
out:
mutex_unlock(&wl->mutex);
return ret;
}
int wl1271_plt_stop(struct wl1271 *wl)
{
int ret = 0;
wl1271_notice("power down");
/*
* Interrupts must be disabled before setting the state to OFF.
* Otherwise, the interrupt handler might be called and exit without
* reading the interrupt status.
*/
wlcore_disable_interrupts(wl);
mutex_lock(&wl->mutex);
if (!wl->plt) {
mutex_unlock(&wl->mutex);
/*
* This will not necessarily enable interrupts as interrupts
* may have been disabled when op_stop was called. It will,
* however, balance the above call to disable_interrupts().
*/
wlcore_enable_interrupts(wl);
wl1271_error("cannot power down because not in PLT "
"state: %d", wl->state);
ret = -EBUSY;
goto out;
}
mutex_unlock(&wl->mutex);
wl1271_flush_deferred_work(wl);
cancel_work_sync(&wl->netstack_work);
cancel_work_sync(&wl->recovery_work);
cancel_delayed_work_sync(&wl->tx_watchdog_work);
mutex_lock(&wl->mutex);
wl1271_power_off(wl);
wl->flags = 0;
wl->sleep_auth = WL1271_PSM_ILLEGAL;
wl->state = WLCORE_STATE_OFF;
wl->plt = false;
wl->plt_mode = PLT_OFF;
wl->rx_counter = 0;
mutex_unlock(&wl->mutex);
out:
return ret;
}
static void wl1271_op_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_control *control,
struct sk_buff *skb)
{
struct wl1271 *wl = hw->priv;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_vif *vif = info->control.vif;
struct wl12xx_vif *wlvif = NULL;
unsigned long flags;
int q, mapping;
u8 hlid;
if (!vif) {
wl1271_debug(DEBUG_TX, "DROP skb with no vif");
ieee80211_free_txskb(hw, skb);
return;
}
wlvif = wl12xx_vif_to_data(vif);
mapping = skb_get_queue_mapping(skb);
q = wl1271_tx_get_queue(mapping);
hlid = wl12xx_tx_get_hlid(wl, wlvif, skb, control->sta);
spin_lock_irqsave(&wl->wl_lock, flags);
/*
* drop the packet if the link is invalid or the queue is stopped
* for any reason but watermark. Watermark is a "soft"-stop so we
* allow these packets through.
*/
if (hlid == WL12XX_INVALID_LINK_ID ||
(!test_bit(hlid, wlvif->links_map)) ||
(wlcore_is_queue_stopped_locked(wl, wlvif, q) &&
!wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
WLCORE_QUEUE_STOP_REASON_WATERMARK))) {
wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", hlid, q);
ieee80211_free_txskb(hw, skb);
goto out;
}
wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d len %d",
hlid, q, skb->len);
skb_queue_tail(&wl->links[hlid].tx_queue[q], skb);
wl->tx_queue_count[q]++;
wlvif->tx_queue_count[q]++;
/*
* The workqueue is slow to process the tx_queue and we need stop
* the queue here, otherwise the queue will get too long.
*/
if (wlvif->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK &&
!wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, q,
WLCORE_QUEUE_STOP_REASON_WATERMARK)) {
wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q);
wlcore_stop_queue_locked(wl, wlvif, q,
WLCORE_QUEUE_STOP_REASON_WATERMARK);
}
/*
* The chip specific setup must run before the first TX packet -
* before that, the tx_work will not be initialized!
*/
if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) &&
!test_bit(WL1271_FLAG_TX_PENDING, &wl->flags))
ieee80211_queue_work(wl->hw, &wl->tx_work);
out:
spin_unlock_irqrestore(&wl->wl_lock, flags);
}
int wl1271_tx_dummy_packet(struct wl1271 *wl)
{
unsigned long flags;
int q;
/* no need to queue a new dummy packet if one is already pending */
if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags))
return 0;
q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet));
spin_lock_irqsave(&wl->wl_lock, flags);
set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
wl->tx_queue_count[q]++;
spin_unlock_irqrestore(&wl->wl_lock, flags);
/* The FW is low on RX memory blocks, so send the dummy packet asap */
if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags))
return wlcore_tx_work_locked(wl);
/*
* If the FW TX is busy, TX work will be scheduled by the threaded
* interrupt handler function
*/
return 0;
}
/*
* The size of the dummy packet should be at least 1400 bytes. However, in
* order to minimize the number of bus transactions, aligning it to 512 bytes
* boundaries could be beneficial, performance wise
*/
#define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512))
static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl)
{
struct sk_buff *skb;
struct ieee80211_hdr_3addr *hdr;
unsigned int dummy_packet_size;
dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE -
sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr);
skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE);
if (!skb) {
wl1271_warning("Failed to allocate a dummy packet skb");
return NULL;
}
skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr));
hdr = skb_put_zero(skb, sizeof(*hdr));
hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
IEEE80211_STYPE_NULLFUNC |
IEEE80211_FCTL_TODS);
skb_put_zero(skb, dummy_packet_size);
/* Dummy packets require the TID to be management */
skb->priority = WL1271_TID_MGMT;
/* Initialize all fields that might be used */
skb_set_queue_mapping(skb, 0);
memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info));
return skb;
}
static int
wl1271_validate_wowlan_pattern(struct cfg80211_pkt_pattern *p)
{
int num_fields = 0, in_field = 0, fields_size = 0;
int i, pattern_len = 0;
if (!p->mask) {
wl1271_warning("No mask in WoWLAN pattern");
return -EINVAL;
}
/*
* The pattern is broken up into segments of bytes at different offsets
* that need to be checked by the FW filter. Each segment is called
* a field in the FW API. We verify that the total number of fields
* required for this pattern won't exceed FW limits (8)
* as well as the total fields buffer won't exceed the FW limit.
* Note that if there's a pattern which crosses Ethernet/IP header
* boundary a new field is required.
*/
for (i = 0; i < p->pattern_len; i++) {
if (test_bit(i, (unsigned long *)p->mask)) {
if (!in_field) {
in_field = 1;
pattern_len = 1;
} else {
if (i == WL1271_RX_FILTER_ETH_HEADER_SIZE) {
num_fields++;
fields_size += pattern_len +
RX_FILTER_FIELD_OVERHEAD;
pattern_len = 1;
} else
pattern_len++;
}
} else {
if (in_field) {
in_field = 0;
fields_size += pattern_len +
RX_FILTER_FIELD_OVERHEAD;
num_fields++;
}
}
}
if (in_field) {
fields_size += pattern_len + RX_FILTER_FIELD_OVERHEAD;
num_fields++;
}
if (num_fields > WL1271_RX_FILTER_MAX_FIELDS) {
wl1271_warning("RX Filter too complex. Too many segments");
return -EINVAL;
}
if (fields_size > WL1271_RX_FILTER_MAX_FIELDS_SIZE) {
wl1271_warning("RX filter pattern is too big");
return -E2BIG;
}
return 0;
}
struct wl12xx_rx_filter *wl1271_rx_filter_alloc(void)
{
return kzalloc(sizeof(struct wl12xx_rx_filter), GFP_KERNEL);
}
void wl1271_rx_filter_free(struct wl12xx_rx_filter *filter)
{
int i;
if (filter == NULL)
return;
for (i = 0; i < filter->num_fields; i++)
kfree(filter->fields[i].pattern);
kfree(filter);
}
int wl1271_rx_filter_alloc_field(struct wl12xx_rx_filter *filter,
u16 offset, u8 flags,
const u8 *pattern, u8 len)
{
struct wl12xx_rx_filter_field *field;
if (filter->num_fields == WL1271_RX_FILTER_MAX_FIELDS) {
wl1271_warning("Max fields per RX filter. can't alloc another");
return -EINVAL;
}
field = &filter->fields[filter->num_fields];
field->pattern = kmemdup(pattern, len, GFP_KERNEL);
if (!field->pattern) {
wl1271_warning("Failed to allocate RX filter pattern");
return -ENOMEM;
}
filter->num_fields++;
field->offset = cpu_to_le16(offset);
field->flags = flags;
field->len = len;
return 0;
}
int wl1271_rx_filter_get_fields_size(struct wl12xx_rx_filter *filter)
{
int i, fields_size = 0;
for (i = 0; i < filter->num_fields; i++)
fields_size += filter->fields[i].len +
sizeof(struct wl12xx_rx_filter_field) -
sizeof(u8 *);
return fields_size;
}
void wl1271_rx_filter_flatten_fields(struct wl12xx_rx_filter *filter,
u8 *buf)
{
int i;
struct wl12xx_rx_filter_field *field;
for (i = 0; i < filter->num_fields; i++) {
field = (struct wl12xx_rx_filter_field *)buf;
field->offset = filter->fields[i].offset;
field->flags = filter->fields[i].flags;
field->len = filter->fields[i].len;
memcpy(&field->pattern, filter->fields[i].pattern, field->len);
buf += sizeof(struct wl12xx_rx_filter_field) -
sizeof(u8 *) + field->len;
}
}
/*
* Allocates an RX filter returned through f
* which needs to be freed using rx_filter_free()
*/
static int
wl1271_convert_wowlan_pattern_to_rx_filter(struct cfg80211_pkt_pattern *p,
struct wl12xx_rx_filter **f)
{
int i, j, ret = 0;
struct wl12xx_rx_filter *filter;
u16 offset;
u8 flags, len;
filter = wl1271_rx_filter_alloc();
if (!filter) {
wl1271_warning("Failed to alloc rx filter");
ret = -ENOMEM;
goto err;
}
i = 0;
while (i < p->pattern_len) {
if (!test_bit(i, (unsigned long *)p->mask)) {
i++;
continue;
}
for (j = i; j < p->pattern_len; j++) {
if (!test_bit(j, (unsigned long *)p->mask))
break;
if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE &&
j >= WL1271_RX_FILTER_ETH_HEADER_SIZE)
break;
}
if (i < WL1271_RX_FILTER_ETH_HEADER_SIZE) {
offset = i;
flags = WL1271_RX_FILTER_FLAG_ETHERNET_HEADER;
} else {
offset = i - WL1271_RX_FILTER_ETH_HEADER_SIZE;
flags = WL1271_RX_FILTER_FLAG_IP_HEADER;
}
len = j - i;
ret = wl1271_rx_filter_alloc_field(filter,
offset,
flags,
&p->pattern[i], len);
if (ret)
goto err;
i = j;
}
filter->action = FILTER_SIGNAL;
*f = filter;
return 0;
err:
wl1271_rx_filter_free(filter);
*f = NULL;
return ret;
}
static int wl1271_configure_wowlan(struct wl1271 *wl,
struct cfg80211_wowlan *wow)
{
int i, ret;
if (!wow || wow->any || !wow->n_patterns) {
ret = wl1271_acx_default_rx_filter_enable(wl, 0,
FILTER_SIGNAL);
if (ret)
goto out;
ret = wl1271_rx_filter_clear_all(wl);
if (ret)
goto out;
return 0;
}
if (WARN_ON(wow->n_patterns > WL1271_MAX_RX_FILTERS))
return -EINVAL;
/* Validate all incoming patterns before clearing current FW state */
for (i = 0; i < wow->n_patterns; i++) {
ret = wl1271_validate_wowlan_pattern(&wow->patterns[i]);
if (ret) {
wl1271_warning("Bad wowlan pattern %d", i);
return ret;
}
}
ret = wl1271_acx_default_rx_filter_enable(wl, 0, FILTER_SIGNAL);
if (ret)
goto out;
ret = wl1271_rx_filter_clear_all(wl);
if (ret)
goto out;
/* Translate WoWLAN patterns into filters */
for (i = 0; i < wow->n_patterns; i++) {
struct cfg80211_pkt_pattern *p;
struct wl12xx_rx_filter *filter = NULL;
p = &wow->patterns[i];
ret = wl1271_convert_wowlan_pattern_to_rx_filter(p, &filter);
if (ret) {
wl1271_warning("Failed to create an RX filter from "
"wowlan pattern %d", i);
goto out;
}
ret = wl1271_rx_filter_enable(wl, i, 1, filter);
wl1271_rx_filter_free(filter);
if (ret)
goto out;
}
ret = wl1271_acx_default_rx_filter_enable(wl, 1, FILTER_DROP);
out:
return ret;
}
static int wl1271_configure_suspend_sta(struct wl1271 *wl,
struct wl12xx_vif *wlvif,
struct cfg80211_wowlan *wow)
{
int ret = 0;
if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
goto out;
ret = wl1271_configure_wowlan(wl, wow);
if (ret < 0)
goto out;
if ((wl->conf.conn.suspend_wake_up_event ==
wl->conf.conn.wake_up_event) &&
(wl->conf.conn.suspend_listen_interval ==
wl->conf.conn.listen_interval))
goto out;
ret = wl1271_acx_wake_up_conditions(wl, wlvif,
wl->conf.conn.suspend_wake_up_event,
wl->conf.conn.suspend_listen_interval);
if (ret < 0)
wl1271_error("suspend: set wake up conditions failed: %d", ret);
out:
return ret;
}
static int wl1271_configure_suspend_ap(struct wl1271 *wl,
struct wl12xx_vif *wlvif,
struct cfg80211_wowlan *wow)
{
int ret = 0;
if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
goto out;
ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
if (ret < 0)
goto out;
ret = wl1271_configure_wowlan(wl, wow);
if (ret < 0)
goto out;
out:
return ret;
}
static int wl1271_configure_suspend(struct wl1271 *wl,
struct wl12xx_vif *wlvif,
struct cfg80211_wowlan *wow)
{
if (wlvif->bss_type == BSS_TYPE_STA_BSS)
return wl1271_configure_suspend_sta(wl, wlvif, wow);
if (wlvif->bss_type == BSS_TYPE_AP_BSS)
return wl1271_configure_suspend_ap(wl, wlvif, wow);
return 0;
}
static void wl1271_configure_resume(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
int ret = 0;
bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
if ((!is_ap) && (!is_sta))
return;
if ((is_sta && !test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) ||
(is_ap && !test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)))
return;
wl1271_configure_wowlan(wl, NULL);
if (is_sta) {
if ((wl->conf.conn.suspend_wake_up_event ==
wl->conf.conn.wake_up_event) &&
(wl->conf.conn.suspend_listen_interval ==
wl->conf.conn.listen_interval))
return;
ret = wl1271_acx_wake_up_conditions(wl, wlvif,
wl->conf.conn.wake_up_event,
wl->conf.conn.listen_interval);
if (ret < 0)
wl1271_error("resume: wake up conditions failed: %d",
ret);
} else if (is_ap) {
ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
}
}
static int __maybe_unused wl1271_op_suspend(struct ieee80211_hw *hw,
struct cfg80211_wowlan *wow)
{
struct wl1271 *wl = hw->priv;
struct wl12xx_vif *wlvif;
unsigned long flags;
int ret;
wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow);
WARN_ON(!wow);
/* we want to perform the recovery before suspending */
if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
wl1271_warning("postponing suspend to perform recovery");
return -EBUSY;
}
wl1271_tx_flush(wl);
mutex_lock(&wl->mutex);
ret = pm_runtime_resume_and_get(wl->dev);
if (ret < 0) {
mutex_unlock(&wl->mutex);
return ret;
}
wl->wow_enabled = true;
wl12xx_for_each_wlvif(wl, wlvif) {
if (wlcore_is_p2p_mgmt(wlvif))
continue;
ret = wl1271_configure_suspend(wl, wlvif, wow);
if (ret < 0) {
goto out_sleep;
}
}
/* disable fast link flow control notifications from FW */
ret = wlcore_hw_interrupt_notify(wl, false);
if (ret < 0)
goto out_sleep;
/* if filtering is enabled, configure the FW to drop all RX BA frames */
ret = wlcore_hw_rx_ba_filter(wl,
!!wl->conf.conn.suspend_rx_ba_activity);
if (ret < 0)
goto out_sleep;
out_sleep:
pm_runtime_put_noidle(wl->dev);
mutex_unlock(&wl->mutex);
if (ret < 0) {
wl1271_warning("couldn't prepare device to suspend");
return ret;
}
/* flush any remaining work */
wl1271_debug(DEBUG_MAC80211, "flushing remaining works");
flush_work(&wl->tx_work);
/*
* Cancel the watchdog even if above tx_flush failed. We will detect
* it on resume anyway.
*/
cancel_delayed_work(&wl->tx_watchdog_work);
/*
* set suspended flag to avoid triggering a new threaded_irq
* work.
*/
spin_lock_irqsave(&wl->wl_lock, flags);
set_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
spin_unlock_irqrestore(&wl->wl_lock, flags);
return pm_runtime_force_suspend(wl->dev);
}
static int __maybe_unused wl1271_op_resume(struct ieee80211_hw *hw)
{
struct wl1271 *wl = hw->priv;
struct wl12xx_vif *wlvif;
unsigned long flags;
bool run_irq_work = false, pending_recovery;
int ret;
wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d",
wl->wow_enabled);
WARN_ON(!wl->wow_enabled);
ret = pm_runtime_force_resume(wl->dev);
if (ret < 0) {
wl1271_error("ELP wakeup failure!");
goto out_sleep;
}
/*
* re-enable irq_work enqueuing, and call irq_work directly if
* there is a pending work.
*/
spin_lock_irqsave(&wl->wl_lock, flags);
clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags);
if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags))
run_irq_work = true;
spin_unlock_irqrestore(&wl->wl_lock, flags);
mutex_lock(&wl->mutex);
/* test the recovery flag before calling any SDIO functions */
pending_recovery = test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
&wl->flags);
if (run_irq_work) {
wl1271_debug(DEBUG_MAC80211,
"run postponed irq_work directly");
/* don't talk to the HW if recovery is pending */
if (!pending_recovery) {
ret = wlcore_irq_locked(wl);
if (ret)
wl12xx_queue_recovery_work(wl);
}
wlcore_enable_interrupts(wl);
}
if (pending_recovery) {
wl1271_warning("queuing forgotten recovery on resume");
ieee80211_queue_work(wl->hw, &wl->recovery_work);
goto out_sleep;
}
ret = pm_runtime_resume_and_get(wl->dev);
if (ret < 0)
goto out;
wl12xx_for_each_wlvif(wl, wlvif) {
if (wlcore_is_p2p_mgmt(wlvif))
continue;
wl1271_configure_resume(wl, wlvif);
}
ret = wlcore_hw_interrupt_notify(wl, true);
if (ret < 0)
goto out_sleep;
/* if filtering is enabled, configure the FW to drop all RX BA frames */
ret = wlcore_hw_rx_ba_filter(wl, false);
if (ret < 0)
goto out_sleep;
out_sleep:
pm_runtime_mark_last_busy(wl->dev);
pm_runtime_put_autosuspend(wl->dev);
out:
wl->wow_enabled = false;
/*
* Set a flag to re-init the watchdog on the first Tx after resume.
* That way we avoid possible conditions where Tx-complete interrupts
* fail to arrive and we perform a spurious recovery.
*/
set_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags);
mutex_unlock(&wl->mutex);
return 0;
}
static int wl1271_op_start(struct ieee80211_hw *hw)
{
wl1271_debug(DEBUG_MAC80211, "mac80211 start");
/*
* We have to delay the booting of the hardware because
* we need to know the local MAC address before downloading and
* initializing the firmware. The MAC address cannot be changed
* after boot, and without the proper MAC address, the firmware
* will not function properly.
*
* The MAC address is first known when the corresponding interface
* is added. That is where we will initialize the hardware.
*/
return 0;
}
static void wlcore_op_stop_locked(struct wl1271 *wl)
{
int i;
if (wl->state == WLCORE_STATE_OFF) {
if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS,
&wl->flags))
wlcore_enable_interrupts(wl);
return;
}
/*
* this must be before the cancel_work calls below, so that the work
* functions don't perform further work.
*/
wl->state = WLCORE_STATE_OFF;
/*
* Use the nosync variant to disable interrupts, so the mutex could be
* held while doing so without deadlocking.
*/
wlcore_disable_interrupts_nosync(wl);
mutex_unlock(&wl->mutex);
wlcore_synchronize_interrupts(wl);
if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
cancel_work_sync(&wl->recovery_work);
wl1271_flush_deferred_work(wl);
cancel_delayed_work_sync(&wl->scan_complete_work);
cancel_work_sync(&wl->netstack_work);
cancel_work_sync(&wl->tx_work);
cancel_delayed_work_sync(&wl->tx_watchdog_work);
/* let's notify MAC80211 about the remaining pending TX frames */
mutex_lock(&wl->mutex);
wl12xx_tx_reset(wl);
wl1271_power_off(wl);
/*
* In case a recovery was scheduled, interrupts were disabled to avoid
* an interrupt storm. Now that the power is down, it is safe to
* re-enable interrupts to balance the disable depth
*/
if (test_and_clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags))
wlcore_enable_interrupts(wl);
wl->band = NL80211_BAND_2GHZ;
wl->rx_counter = 0;
wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
wl->channel_type = NL80211_CHAN_NO_HT;
wl->tx_blocks_available = 0;
wl->tx_allocated_blocks = 0;
wl->tx_results_count = 0;
wl->tx_packets_count = 0;
wl->time_offset = 0;
wl->ap_fw_ps_map = 0;
wl->ap_ps_map = 0;
wl->sleep_auth = WL1271_PSM_ILLEGAL;
memset(wl->roles_map, 0, sizeof(wl->roles_map));
memset(wl->links_map, 0, sizeof(wl->links_map));
memset(wl->roc_map, 0, sizeof(wl->roc_map));
memset(wl->session_ids, 0, sizeof(wl->session_ids));
memset(wl->rx_filter_enabled, 0, sizeof(wl->rx_filter_enabled));
wl->active_sta_count = 0;
wl->active_link_count = 0;
/* The system link is always allocated */
wl->links[WL12XX_SYSTEM_HLID].allocated_pkts = 0;
wl->links[WL12XX_SYSTEM_HLID].prev_freed_pkts = 0;
__set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
/*
* this is performed after the cancel_work calls and the associated
* mutex_lock, so that wl1271_op_add_interface does not accidentally
* get executed before all these vars have been reset.
*/
wl->flags = 0;
wl->tx_blocks_freed = 0;
for (i = 0; i < NUM_TX_QUEUES; i++) {
wl->tx_pkts_freed[i] = 0;
wl->tx_allocated_pkts[i] = 0;
}
wl1271_debugfs_reset(wl);
kfree(wl->raw_fw_status);
wl->raw_fw_status = NULL;
kfree(wl->fw_status);
wl->fw_status = NULL;
kfree(wl->tx_res_if);
wl->tx_res_if = NULL;
kfree(wl->target_mem_map);
wl->target_mem_map = NULL;
/*
* FW channels must be re-calibrated after recovery,
* save current Reg-Domain channel configuration and clear it.
*/
memcpy(wl->reg_ch_conf_pending, wl->reg_ch_conf_last,
sizeof(wl->reg_ch_conf_pending));
memset(wl->reg_ch_conf_last, 0, sizeof(wl->reg_ch_conf_last));
}
static void wlcore_op_stop(struct ieee80211_hw *hw)
{
struct wl1271 *wl = hw->priv;
wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
mutex_lock(&wl->mutex);
wlcore_op_stop_locked(wl);
mutex_unlock(&wl->mutex);
}
static void wlcore_channel_switch_work(struct work_struct *work)
{
struct delayed_work *dwork;
struct wl1271 *wl;
struct ieee80211_vif *vif;
struct wl12xx_vif *wlvif;
int ret;
dwork = to_delayed_work(work);
wlvif = container_of(dwork, struct wl12xx_vif, channel_switch_work);
wl = wlvif->wl;
wl1271_info("channel switch failed (role_id: %d).", wlvif->role_id);
mutex_lock(&wl->mutex);
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
/* check the channel switch is still ongoing */
if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags))
goto out;
vif = wl12xx_wlvif_to_vif(wlvif);
ieee80211_chswitch_done(vif, false);
ret = pm_runtime_resume_and_get(wl->dev);
if (ret < 0)
goto out;
wl12xx_cmd_stop_channel_switch(wl, wlvif);
pm_runtime_mark_last_busy(wl->dev);
pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
}
static void wlcore_connection_loss_work(struct work_struct *work)
{
struct delayed_work *dwork;
struct wl1271 *wl;
struct ieee80211_vif *vif;
struct wl12xx_vif *wlvif;
dwork = to_delayed_work(work);
wlvif = container_of(dwork, struct wl12xx_vif, connection_loss_work);
wl = wlvif->wl;
wl1271_info("Connection loss work (role_id: %d).", wlvif->role_id);
mutex_lock(&wl->mutex);
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
/* Call mac80211 connection loss */
if (!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
goto out;
vif = wl12xx_wlvif_to_vif(wlvif);
ieee80211_connection_loss(vif);
out:
mutex_unlock(&wl->mutex);
}
static void wlcore_pending_auth_complete_work(struct work_struct *work)
{
struct delayed_work *dwork;
struct wl1271 *wl;
struct wl12xx_vif *wlvif;
unsigned long time_spare;
int ret;
dwork = to_delayed_work(work);
wlvif = container_of(dwork, struct wl12xx_vif,
pending_auth_complete_work);
wl = wlvif->wl;
mutex_lock(&wl->mutex);
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
/*
* Make sure a second really passed since the last auth reply. Maybe
* a second auth reply arrived while we were stuck on the mutex.
* Check for a little less than the timeout to protect from scheduler
* irregularities.
*/
time_spare = jiffies +
msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT - 50);
if (!time_after(time_spare, wlvif->pending_auth_reply_time))
goto out;
ret = pm_runtime_resume_and_get(wl->dev);
if (ret < 0)
goto out;
/* cancel the ROC if active */
wlcore_update_inconn_sta(wl, wlvif, NULL, false);
pm_runtime_mark_last_busy(wl->dev);
pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
}
static int wl12xx_allocate_rate_policy(struct wl1271 *wl, u8 *idx)
{
u8 policy = find_first_zero_bit(wl->rate_policies_map,
WL12XX_MAX_RATE_POLICIES);
if (policy >= WL12XX_MAX_RATE_POLICIES)
return -EBUSY;
__set_bit(policy, wl->rate_policies_map);
*idx = policy;
return 0;
}
static void wl12xx_free_rate_policy(struct wl1271 *wl, u8 *idx)
{
if (WARN_ON(*idx >= WL12XX_MAX_RATE_POLICIES))
return;
__clear_bit(*idx, wl->rate_policies_map);
*idx = WL12XX_MAX_RATE_POLICIES;
}
static int wlcore_allocate_klv_template(struct wl1271 *wl, u8 *idx)
{
u8 policy = find_first_zero_bit(wl->klv_templates_map,
WLCORE_MAX_KLV_TEMPLATES);
if (policy >= WLCORE_MAX_KLV_TEMPLATES)
return -EBUSY;
__set_bit(policy, wl->klv_templates_map);
*idx = policy;
return 0;
}
static void wlcore_free_klv_template(struct wl1271 *wl, u8 *idx)
{
if (WARN_ON(*idx >= WLCORE_MAX_KLV_TEMPLATES))
return;
__clear_bit(*idx, wl->klv_templates_map);
*idx = WLCORE_MAX_KLV_TEMPLATES;
}
static u8 wl12xx_get_role_type(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
switch (wlvif->bss_type) {
case BSS_TYPE_AP_BSS:
if (wlvif->p2p)
return WL1271_ROLE_P2P_GO;
else if (ieee80211_vif_is_mesh(vif))
return WL1271_ROLE_MESH_POINT;
else
return WL1271_ROLE_AP;
case BSS_TYPE_STA_BSS:
if (wlvif->p2p)
return WL1271_ROLE_P2P_CL;
else
return WL1271_ROLE_STA;
case BSS_TYPE_IBSS:
return WL1271_ROLE_IBSS;
default:
wl1271_error("invalid bss_type: %d", wlvif->bss_type);
}
return WL12XX_INVALID_ROLE_TYPE;
}
static int wl12xx_init_vif_data(struct wl1271 *wl, struct ieee80211_vif *vif)
{
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
int i;
/* clear everything but the persistent data */
memset(wlvif, 0, offsetof(struct wl12xx_vif, persistent));
switch (ieee80211_vif_type_p2p(vif)) {
case NL80211_IFTYPE_P2P_CLIENT:
wlvif->p2p = 1;
fallthrough;
case NL80211_IFTYPE_STATION:
case NL80211_IFTYPE_P2P_DEVICE:
wlvif->bss_type = BSS_TYPE_STA_BSS;
break;
case NL80211_IFTYPE_ADHOC:
wlvif->bss_type = BSS_TYPE_IBSS;
break;
case NL80211_IFTYPE_P2P_GO:
wlvif->p2p = 1;
fallthrough;
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_MESH_POINT:
wlvif->bss_type = BSS_TYPE_AP_BSS;
break;
default:
wlvif->bss_type = MAX_BSS_TYPE;
return -EOPNOTSUPP;
}
wlvif->role_id = WL12XX_INVALID_ROLE_ID;
wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
wlvif->bss_type == BSS_TYPE_IBSS) {
/* init sta/ibss data */
wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
wl12xx_allocate_rate_policy(wl, &wlvif->sta.basic_rate_idx);
wl12xx_allocate_rate_policy(wl, &wlvif->sta.ap_rate_idx);
wl12xx_allocate_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
wlcore_allocate_klv_template(wl, &wlvif->sta.klv_template_id);
wlvif->basic_rate_set = CONF_TX_RATE_MASK_BASIC;
wlvif->basic_rate = CONF_TX_RATE_MASK_BASIC;
wlvif->rate_set = CONF_TX_RATE_MASK_BASIC;
} else {
/* init ap data */
wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
wl12xx_allocate_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
wl12xx_allocate_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
wl12xx_allocate_rate_policy(wl,
&wlvif->ap.ucast_rate_idx[i]);
wlvif->basic_rate_set = CONF_TX_ENABLED_RATES;
/*
* TODO: check if basic_rate shouldn't be
* wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
* instead (the same thing for STA above).
*/
wlvif->basic_rate = CONF_TX_ENABLED_RATES;
/* TODO: this seems to be used only for STA, check it */
wlvif->rate_set = CONF_TX_ENABLED_RATES;
}
wlvif->bitrate_masks[NL80211_BAND_2GHZ] = wl->conf.tx.basic_rate;
wlvif->bitrate_masks[NL80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5;
wlvif->beacon_int = WL1271_DEFAULT_BEACON_INT;
/*
* mac80211 configures some values globally, while we treat them
* per-interface. thus, on init, we have to copy them from wl
*/
wlvif->band = wl->band;
wlvif->channel = wl->channel;
wlvif->power_level = wl->power_level;
wlvif->channel_type = wl->channel_type;
INIT_WORK(&wlvif->rx_streaming_enable_work,
wl1271_rx_streaming_enable_work);
INIT_WORK(&wlvif->rx_streaming_disable_work,
wl1271_rx_streaming_disable_work);
INIT_WORK(&wlvif->rc_update_work, wlcore_rc_update_work);
INIT_DELAYED_WORK(&wlvif->channel_switch_work,
wlcore_channel_switch_work);
INIT_DELAYED_WORK(&wlvif->connection_loss_work,
wlcore_connection_loss_work);
INIT_DELAYED_WORK(&wlvif->pending_auth_complete_work,
wlcore_pending_auth_complete_work);
INIT_LIST_HEAD(&wlvif->list);
timer_setup(&wlvif->rx_streaming_timer, wl1271_rx_streaming_timer, 0);
return 0;
}
static int wl12xx_init_fw(struct wl1271 *wl)
{
int retries = WL1271_BOOT_RETRIES;
bool booted = false;
struct wiphy *wiphy = wl->hw->wiphy;
int ret;
while (retries) {
retries--;
ret = wl12xx_chip_wakeup(wl, false);
if (ret < 0)
goto power_off;
ret = wl->ops->boot(wl);
if (ret < 0)
goto power_off;
ret = wl1271_hw_init(wl);
if (ret < 0)
goto irq_disable;
booted = true;
break;
irq_disable:
mutex_unlock(&wl->mutex);
/* Unlocking the mutex in the middle of handling is
inherently unsafe. In this case we deem it safe to do,
because we need to let any possibly pending IRQ out of
the system (and while we are WLCORE_STATE_OFF the IRQ
work function will not do anything.) Also, any other
possible concurrent operations will fail due to the
current state, hence the wl1271 struct should be safe. */
wlcore_disable_interrupts(wl);
wl1271_flush_deferred_work(wl);
cancel_work_sync(&wl->netstack_work);
mutex_lock(&wl->mutex);
power_off:
wl1271_power_off(wl);
}
if (!booted) {
wl1271_error("firmware boot failed despite %d retries",
WL1271_BOOT_RETRIES);
goto out;
}
wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str);
/* update hw/fw version info in wiphy struct */
wiphy->hw_version = wl->chip.id;
strncpy(wiphy->fw_version, wl->chip.fw_ver_str,
sizeof(wiphy->fw_version));
/*
* Now we know if 11a is supported (info from the NVS), so disable
* 11a channels if not supported
*/
if (!wl->enable_11a)
wiphy->bands[NL80211_BAND_5GHZ]->n_channels = 0;
wl1271_debug(DEBUG_MAC80211, "11a is %ssupported",
wl->enable_11a ? "" : "not ");
wl->state = WLCORE_STATE_ON;
out:
return ret;
}
static bool wl12xx_dev_role_started(struct wl12xx_vif *wlvif)
{
return wlvif->dev_hlid != WL12XX_INVALID_LINK_ID;
}
/*
* Check whether a fw switch (i.e. moving from one loaded
* fw to another) is needed. This function is also responsible
* for updating wl->last_vif_count, so it must be called before
* loading a non-plt fw (so the correct fw (single-role/multi-role)
* will be used).
*/
static bool wl12xx_need_fw_change(struct wl1271 *wl,
struct vif_counter_data vif_counter_data,
bool add)
{
enum wl12xx_fw_type current_fw = wl->fw_type;
u8 vif_count = vif_counter_data.counter;
if (test_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags))
return false;
/* increase the vif count if this is a new vif */
if (add && !vif_counter_data.cur_vif_running)
vif_count++;
wl->last_vif_count = vif_count;
/* no need for fw change if the device is OFF */
if (wl->state == WLCORE_STATE_OFF)
return false;
/* no need for fw change if a single fw is used */
if (!wl->mr_fw_name)
return false;
if (vif_count > 1 && current_fw == WL12XX_FW_TYPE_NORMAL)
return true;
if (vif_count <= 1 && current_fw == WL12XX_FW_TYPE_MULTI)
return true;
return false;
}
/*
* Enter "forced psm". Make sure the sta is in psm against the ap,
* to make the fw switch a bit more disconnection-persistent.
*/
static void wl12xx_force_active_psm(struct wl1271 *wl)
{
struct wl12xx_vif *wlvif;
wl12xx_for_each_wlvif_sta(wl, wlvif) {
wl1271_ps_set_mode(wl, wlvif, STATION_POWER_SAVE_MODE);
}
}
struct wlcore_hw_queue_iter_data {
unsigned long hw_queue_map[BITS_TO_LONGS(WLCORE_NUM_MAC_ADDRESSES)];
/* current vif */
struct ieee80211_vif *vif;
/* is the current vif among those iterated */
bool cur_running;
};
static void wlcore_hw_queue_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
struct wlcore_hw_queue_iter_data *iter_data = data;
if (vif->type == NL80211_IFTYPE_P2P_DEVICE ||
WARN_ON_ONCE(vif->hw_queue[0] == IEEE80211_INVAL_HW_QUEUE))
return;
if (iter_data->cur_running || vif == iter_data->vif) {
iter_data->cur_running = true;
return;
}
__set_bit(vif->hw_queue[0] / NUM_TX_QUEUES, iter_data->hw_queue_map);
}
static int wlcore_allocate_hw_queue_base(struct wl1271 *wl,
struct wl12xx_vif *wlvif)
{
struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
struct wlcore_hw_queue_iter_data iter_data = {};
int i, q_base;
if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
return 0;
}
iter_data.vif = vif;
/* mark all bits taken by active interfaces */
ieee80211_iterate_active_interfaces_atomic(wl->hw,
IEEE80211_IFACE_ITER_RESUME_ALL,
wlcore_hw_queue_iter, &iter_data);
/* the current vif is already running in mac80211 (resume/recovery) */
if (iter_data.cur_running) {
wlvif->hw_queue_base = vif->hw_queue[0];
wl1271_debug(DEBUG_MAC80211,
"using pre-allocated hw queue base %d",
wlvif->hw_queue_base);
/* interface type might have changed type */
goto adjust_cab_queue;
}
q_base = find_first_zero_bit(iter_data.hw_queue_map,
WLCORE_NUM_MAC_ADDRESSES);
if (q_base >= WLCORE_NUM_MAC_ADDRESSES)
return -EBUSY;
wlvif->hw_queue_base = q_base * NUM_TX_QUEUES;
wl1271_debug(DEBUG_MAC80211, "allocating hw queue base: %d",
wlvif->hw_queue_base);
for (i = 0; i < NUM_TX_QUEUES; i++) {
wl->queue_stop_reasons[wlvif->hw_queue_base + i] = 0;
/* register hw queues in mac80211 */
vif->hw_queue[i] = wlvif->hw_queue_base + i;
}
adjust_cab_queue:
/* the last places are reserved for cab queues per interface */
if (wlvif->bss_type == BSS_TYPE_AP_BSS)
vif->cab_queue = NUM_TX_QUEUES * WLCORE_NUM_MAC_ADDRESSES +
wlvif->hw_queue_base / NUM_TX_QUEUES;
else
vif->cab_queue = IEEE80211_INVAL_HW_QUEUE;
return 0;
}
static int wl1271_op_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct wl1271 *wl = hw->priv;
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
struct vif_counter_data vif_count;
int ret = 0;
u8 role_type;
if (wl->plt) {
wl1271_error("Adding Interface not allowed while in PLT mode");
return -EBUSY;
}
vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
IEEE80211_VIF_SUPPORTS_UAPSD |
IEEE80211_VIF_SUPPORTS_CQM_RSSI;
wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
ieee80211_vif_type_p2p(vif), vif->addr);
wl12xx_get_vif_count(hw, vif, &vif_count);
mutex_lock(&wl->mutex);
/*
* in some very corner case HW recovery scenarios its possible to
* get here before __wl1271_op_remove_interface is complete, so
* opt out if that is the case.
*/
if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) ||
test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)) {
ret = -EBUSY;
goto out;
}
ret = wl12xx_init_vif_data(wl, vif);
if (ret < 0)
goto out;
wlvif->wl = wl;
role_type = wl12xx_get_role_type(wl, wlvif);
if (role_type == WL12XX_INVALID_ROLE_TYPE) {
ret = -EINVAL;
goto out;
}
ret = wlcore_allocate_hw_queue_base(wl, wlvif);
if (ret < 0)
goto out;
/*
* TODO: after the nvs issue will be solved, move this block
* to start(), and make sure here the driver is ON.
*/
if (wl->state == WLCORE_STATE_OFF) {
/*
* we still need this in order to configure the fw
* while uploading the nvs
*/
memcpy(wl->addresses[0].addr, vif->addr, ETH_ALEN);
ret = wl12xx_init_fw(wl);
if (ret < 0)
goto out;
}
/*
* Call runtime PM only after possible wl12xx_init_fw() above
* is done. Otherwise we do not have interrupts enabled.
*/
ret = pm_runtime_resume_and_get(wl->dev);
if (ret < 0)
goto out_unlock;
if (wl12xx_need_fw_change(wl, vif_count, true)) {
wl12xx_force_active_psm(wl);
set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
mutex_unlock(&wl->mutex);
wl1271_recovery_work(&wl->recovery_work);
return 0;
}
if (!wlcore_is_p2p_mgmt(wlvif)) {
ret = wl12xx_cmd_role_enable(wl, vif->addr,
role_type, &wlvif->role_id);
if (ret < 0)
goto out;
ret = wl1271_init_vif_specific(wl, vif);
if (ret < 0)
goto out;
} else {
ret = wl12xx_cmd_role_enable(wl, vif->addr, WL1271_ROLE_DEVICE,
&wlvif->dev_role_id);
if (ret < 0)
goto out;
/* needed mainly for configuring rate policies */
ret = wl1271_sta_hw_init(wl, wlvif);
if (ret < 0)
goto out;
}
list_add(&wlvif->list, &wl->wlvif_list);
set_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags);
if (wlvif->bss_type == BSS_TYPE_AP_BSS)
wl->ap_count++;
else
wl->sta_count++;
out:
pm_runtime_mark_last_busy(wl->dev);
pm_runtime_put_autosuspend(wl->dev);
out_unlock:
mutex_unlock(&wl->mutex);
return ret;
}
static void __wl1271_op_remove_interface(struct wl1271 *wl,
struct ieee80211_vif *vif,
bool reset_tx_queues)
{
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
int i, ret;
bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
if (!test_and_clear_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
return;
/* because of hardware recovery, we may get here twice */
if (wl->state == WLCORE_STATE_OFF)
return;
wl1271_info("down");
if (wl->scan.state != WL1271_SCAN_STATE_IDLE &&
wl->scan_wlvif == wlvif) {
struct cfg80211_scan_info info = {
.aborted = true,
};
/*
* Rearm the tx watchdog just before idling scan. This
* prevents just-finished scans from triggering the watchdog
*/
wl12xx_rearm_tx_watchdog_locked(wl);
wl->scan.state = WL1271_SCAN_STATE_IDLE;
memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
wl->scan_wlvif = NULL;
wl->scan.req = NULL;
ieee80211_scan_completed(wl->hw, &info);
}
if (wl->sched_vif == wlvif)
wl->sched_vif = NULL;
if (wl->roc_vif == vif) {
wl->roc_vif = NULL;
ieee80211_remain_on_channel_expired(wl->hw);
}
if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) {
/* disable active roles */
ret = pm_runtime_resume_and_get(wl->dev);
if (ret < 0)
goto deinit;
if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
wlvif->bss_type == BSS_TYPE_IBSS) {
if (wl12xx_dev_role_started(wlvif))
wl12xx_stop_dev(wl, wlvif);
}
if (!wlcore_is_p2p_mgmt(wlvif)) {
ret = wl12xx_cmd_role_disable(wl, &wlvif->role_id);
if (ret < 0) {
pm_runtime_put_noidle(wl->dev);
goto deinit;
}
} else {
ret = wl12xx_cmd_role_disable(wl, &wlvif->dev_role_id);
if (ret < 0) {
pm_runtime_put_noidle(wl->dev);
goto deinit;
}
}
pm_runtime_mark_last_busy(wl->dev);
pm_runtime_put_autosuspend(wl->dev);
}
deinit:
wl12xx_tx_reset_wlvif(wl, wlvif);
/* clear all hlids (except system_hlid) */
wlvif->dev_hlid = WL12XX_INVALID_LINK_ID;
if (wlvif->bss_type == BSS_TYPE_STA_BSS ||
wlvif->bss_type == BSS_TYPE_IBSS) {
wlvif->sta.hlid = WL12XX_INVALID_LINK_ID;
wl12xx_free_rate_policy(wl, &wlvif->sta.basic_rate_idx);
wl12xx_free_rate_policy(wl, &wlvif->sta.ap_rate_idx);
wl12xx_free_rate_policy(wl, &wlvif->sta.p2p_rate_idx);
wlcore_free_klv_template(wl, &wlvif->sta.klv_template_id);
} else {
wlvif->ap.bcast_hlid = WL12XX_INVALID_LINK_ID;
wlvif->ap.global_hlid = WL12XX_INVALID_LINK_ID;
wl12xx_free_rate_policy(wl, &wlvif->ap.mgmt_rate_idx);
wl12xx_free_rate_policy(wl, &wlvif->ap.bcast_rate_idx);
for (i = 0; i < CONF_TX_MAX_AC_COUNT; i++)
wl12xx_free_rate_policy(wl,
&wlvif->ap.ucast_rate_idx[i]);
wl1271_free_ap_keys(wl, wlvif);
}
dev_kfree_skb(wlvif->probereq);
wlvif->probereq = NULL;
if (wl->last_wlvif == wlvif)
wl->last_wlvif = NULL;
list_del(&wlvif->list);
memset(wlvif->ap.sta_hlid_map, 0, sizeof(wlvif->ap.sta_hlid_map));
wlvif->role_id = WL12XX_INVALID_ROLE_ID;
wlvif->dev_role_id = WL12XX_INVALID_ROLE_ID;
if (is_ap)
wl->ap_count--;
else
wl->sta_count--;
/*
* Last AP, have more stations. Configure sleep auth according to STA.
* Don't do thin on unintended recovery.
*/
if (test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags) &&
!test_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags))
goto unlock;
if (wl->ap_count == 0 && is_ap) {
/* mask ap events */
wl->event_mask &= ~wl->ap_event_mask;
wl1271_event_unmask(wl);
}
if (wl->ap_count == 0 && is_ap && wl->sta_count) {
u8 sta_auth = wl->conf.conn.sta_sleep_auth;
/* Configure for power according to debugfs */
if (sta_auth != WL1271_PSM_ILLEGAL)
wl1271_acx_sleep_auth(wl, sta_auth);
/* Configure for ELP power saving */
else
wl1271_acx_sleep_auth(wl, WL1271_PSM_ELP);
}
unlock:
mutex_unlock(&wl->mutex);
del_timer_sync(&wlvif->rx_streaming_timer);
cancel_work_sync(&wlvif->rx_streaming_enable_work);
cancel_work_sync(&wlvif->rx_streaming_disable_work);
cancel_work_sync(&wlvif->rc_update_work);
cancel_delayed_work_sync(&wlvif->connection_loss_work);
cancel_delayed_work_sync(&wlvif->channel_switch_work);
cancel_delayed_work_sync(&wlvif->pending_auth_complete_work);
mutex_lock(&wl->mutex);
}
static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct wl1271 *wl = hw->priv;
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
struct wl12xx_vif *iter;
struct vif_counter_data vif_count;
wl12xx_get_vif_count(hw, vif, &vif_count);
mutex_lock(&wl->mutex);
if (wl->state == WLCORE_STATE_OFF ||
!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
goto out;
/*
* wl->vif can be null here if someone shuts down the interface
* just when hardware recovery has been started.
*/
wl12xx_for_each_wlvif(wl, iter) {
if (iter != wlvif)
continue;
__wl1271_op_remove_interface(wl, vif, true);
break;
}
WARN_ON(iter != wlvif);
if (wl12xx_need_fw_change(wl, vif_count, false)) {
wl12xx_force_active_psm(wl);
set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
wl12xx_queue_recovery_work(wl);
}
out:
mutex_unlock(&wl->mutex);
}
static int wl12xx_op_change_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
enum nl80211_iftype new_type, bool p2p)
{
struct wl1271 *wl = hw->priv;
int ret;
set_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
wl1271_op_remove_interface(hw, vif);
vif->type = new_type;
vif->p2p = p2p;
ret = wl1271_op_add_interface(hw, vif);
clear_bit(WL1271_FLAG_VIF_CHANGE_IN_PROGRESS, &wl->flags);
return ret;
}
static int wlcore_join(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
int ret;
bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
/*
* One of the side effects of the JOIN command is that is clears
* WPA/WPA2 keys from the chipset. Performing a JOIN while associated
* to a WPA/WPA2 access point will therefore kill the data-path.
* Currently the only valid scenario for JOIN during association
* is on roaming, in which case we will also be given new keys.
* Keep the below message for now, unless it starts bothering
* users who really like to roam a lot :)
*/
if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
wl1271_info("JOIN while associated.");
/* clear encryption type */
wlvif->encryption_type = KEY_NONE;
if (is_ibss)
ret = wl12xx_cmd_role_start_ibss(wl, wlvif);
else
ret = wl12xx_cmd_role_start_sta(wl, wlvif);
return ret;
}
static int wl1271_ssid_set(struct wl12xx_vif *wlvif, struct sk_buff *skb,
int offset)
{
u8 ssid_len;
const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset,
skb->len - offset);
if (!ptr) {
wl1271_error("No SSID in IEs!");
return -ENOENT;
}
ssid_len = ptr[1];
if (ssid_len > IEEE80211_MAX_SSID_LEN) {
wl1271_error("SSID is too long!");
return -EINVAL;
}
wlvif->ssid_len = ssid_len;
memcpy(wlvif->ssid, ptr+2, ssid_len);
return 0;
}
static int wlcore_set_ssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
struct sk_buff *skb;
int ieoffset;
/* we currently only support setting the ssid from the ap probe req */
if (wlvif->bss_type != BSS_TYPE_STA_BSS)
return -EINVAL;
skb = ieee80211_ap_probereq_get(wl->hw, vif);
if (!skb)
return -EINVAL;
ieoffset = offsetof(struct ieee80211_mgmt,
u.probe_req.variable);
wl1271_ssid_set(wlvif, skb, ieoffset);
dev_kfree_skb(skb);
return 0;
}
static int wlcore_set_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif,
struct ieee80211_bss_conf *bss_conf,
u32 sta_rate_set)
{
struct ieee80211_vif *vif = container_of(bss_conf, struct ieee80211_vif,
bss_conf);
int ieoffset;
int ret;
wlvif->aid = vif->cfg.aid;
wlvif->channel_type = cfg80211_get_chandef_type(&bss_conf->chandef);
wlvif->beacon_int = bss_conf->beacon_int;
wlvif->wmm_enabled = bss_conf->qos;
set_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags);
/*
* with wl1271, we don't need to update the
* beacon_int and dtim_period, because the firmware
* updates it by itself when the first beacon is
* received after a join.
*/
ret = wl1271_cmd_build_ps_poll(wl, wlvif, wlvif->aid);
if (ret < 0)
return ret;
/*
* Get a template for hardware connection maintenance
*/
dev_kfree_skb(wlvif->probereq);
wlvif->probereq = wl1271_cmd_build_ap_probe_req(wl,
wlvif,
NULL);
ieoffset = offsetof(struct ieee80211_mgmt,
u.probe_req.variable);
wl1271_ssid_set(wlvif, wlvif->probereq, ieoffset);
/* enable the connection monitoring feature */
ret = wl1271_acx_conn_monit_params(wl, wlvif, true);
if (ret < 0)
return ret;
/*
* The join command disable the keep-alive mode, shut down its process,
* and also clear the template config, so we need to reset it all after
* the join. The acx_aid starts the keep-alive process, and the order
* of the commands below is relevant.
*/
ret = wl1271_acx_keep_alive_mode(wl, wlvif, true);
if (ret < 0)
return ret;
ret = wl1271_acx_aid(wl, wlvif, wlvif->aid);
if (ret < 0)
return ret;
ret = wl12xx_cmd_build_klv_null_data(wl, wlvif);
if (ret < 0)
return ret;
ret = wl1271_acx_keep_alive_config(wl, wlvif,
wlvif->sta.klv_template_id,
ACX_KEEP_ALIVE_TPL_VALID);
if (ret < 0)
return ret;
/*
* The default fw psm configuration is AUTO, while mac80211 default
* setting is off (ACTIVE), so sync the fw with the correct value.
*/
ret = wl1271_ps_set_mode(wl, wlvif, STATION_ACTIVE_MODE);
if (ret < 0)
return ret;
if (sta_rate_set) {
wlvif->rate_set =
wl1271_tx_enabled_rates_get(wl,
sta_rate_set,
wlvif->band);
ret = wl1271_acx_sta_rate_policies(wl, wlvif);
if (ret < 0)
return ret;
}
return ret;
}
static int wlcore_unset_assoc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
int ret;
bool sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
/* make sure we are connected (sta) joined */
if (sta &&
!test_and_clear_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
return false;
/* make sure we are joined (ibss) */
if (!sta &&
test_and_clear_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags))
return false;
if (sta) {
/* use defaults when not associated */
wlvif->aid = 0;
/* free probe-request template */
dev_kfree_skb(wlvif->probereq);
wlvif->probereq = NULL;
/* disable connection monitor features */
ret = wl1271_acx_conn_monit_params(wl, wlvif, false);
if (ret < 0)
return ret;
/* Disable the keep-alive feature */
ret = wl1271_acx_keep_alive_mode(wl, wlvif, false);
if (ret < 0)
return ret;
/* disable beacon filtering */
ret = wl1271_acx_beacon_filter_opt(wl, wlvif, false);
if (ret < 0)
return ret;
}
if (test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags)) {
struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
wl12xx_cmd_stop_channel_switch(wl, wlvif);
ieee80211_chswitch_done(vif, false);
cancel_delayed_work(&wlvif->channel_switch_work);
}
/* invalidate keep-alive template */
wl1271_acx_keep_alive_config(wl, wlvif,
wlvif->sta.klv_template_id,
ACX_KEEP_ALIVE_TPL_INVALID);
return 0;
}
static void wl1271_set_band_rate(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
wlvif->basic_rate_set = wlvif->bitrate_masks[wlvif->band];
wlvif->rate_set = wlvif->basic_rate_set;
}
static void wl1271_sta_handle_idle(struct wl1271 *wl, struct wl12xx_vif *wlvif,
bool idle)
{
bool cur_idle = !test_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
if (idle == cur_idle)
return;
if (idle) {
clear_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
} else {
/* The current firmware only supports sched_scan in idle */
if (wl->sched_vif == wlvif)
wl->ops->sched_scan_stop(wl, wlvif);
set_bit(WLVIF_FLAG_ACTIVE, &wlvif->flags);
}
}
static int wl12xx_config_vif(struct wl1271 *wl, struct wl12xx_vif *wlvif,
struct ieee80211_conf *conf, u32 changed)
{
int ret;
if (wlcore_is_p2p_mgmt(wlvif))
return 0;
if (conf->power_level != wlvif->power_level) {
ret = wl1271_acx_tx_power(wl, wlvif, conf->power_level);
if (ret < 0)
return ret;
wlvif->power_level = conf->power_level;
}
return 0;
}
static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
{
struct wl1271 *wl = hw->priv;
struct wl12xx_vif *wlvif;
struct ieee80211_conf *conf = &hw->conf;
int ret = 0;
wl1271_debug(DEBUG_MAC80211, "mac80211 config psm %s power %d %s"
" changed 0x%x",
conf->flags & IEEE80211_CONF_PS ? "on" : "off",
conf->power_level,
conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use",
changed);
mutex_lock(&wl->mutex);
if (changed & IEEE80211_CONF_CHANGE_POWER)
wl->power_level = conf->power_level;
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
ret = pm_runtime_resume_and_get(wl->dev);
if (ret < 0)
goto out;
/* configure each interface */
wl12xx_for_each_wlvif(wl, wlvif) {
ret = wl12xx_config_vif(wl, wlvif, conf, changed);
if (ret < 0)
goto out_sleep;
}
out_sleep:
pm_runtime_mark_last_busy(wl->dev);
pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
return ret;
}
struct wl1271_filter_params {
bool enabled;
int mc_list_length;
u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
};
static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw,
struct netdev_hw_addr_list *mc_list)
{
struct wl1271_filter_params *fp;
struct netdev_hw_addr *ha;
fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
if (!fp) {
wl1271_error("Out of memory setting filters.");
return 0;
}
/* update multicast filtering parameters */
fp->mc_list_length = 0;
if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
fp->enabled = false;
} else {
fp->enabled = true;
netdev_hw_addr_list_for_each(ha, mc_list) {
memcpy(fp->mc_list[fp->mc_list_length],
ha->addr, ETH_ALEN);
fp->mc_list_length++;
}
}
return (u64)(unsigned long)fp;
}
#define WL1271_SUPPORTED_FILTERS (FIF_ALLMULTI | \
FIF_FCSFAIL | \
FIF_BCN_PRBRESP_PROMISC | \
FIF_CONTROL | \
FIF_OTHER_BSS)
static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
unsigned int changed,
unsigned int *total, u64 multicast)
{
struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
struct wl1271 *wl = hw->priv;
struct wl12xx_vif *wlvif;
int ret;
wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x"
" total %x", changed, *total);
mutex_lock(&wl->mutex);
*total &= WL1271_SUPPORTED_FILTERS;
changed &= WL1271_SUPPORTED_FILTERS;
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
ret = pm_runtime_resume_and_get(wl->dev);
if (ret < 0)
goto out;
wl12xx_for_each_wlvif(wl, wlvif) {
if (wlcore_is_p2p_mgmt(wlvif))
continue;
if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
if (*total & FIF_ALLMULTI)
ret = wl1271_acx_group_address_tbl(wl, wlvif,
false,
NULL, 0);
else if (fp)
ret = wl1271_acx_group_address_tbl(wl, wlvif,
fp->enabled,
fp->mc_list,
fp->mc_list_length);
if (ret < 0)
goto out_sleep;
}
/*
* If interface in AP mode and created with allmulticast then disable
* the firmware filters so that all multicast packets are passed
* This is mandatory for MDNS based discovery protocols
*/
if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
if (*total & FIF_ALLMULTI) {
ret = wl1271_acx_group_address_tbl(wl, wlvif,
false,
NULL, 0);
if (ret < 0)
goto out_sleep;
}
}
}
/*
* the fw doesn't provide an api to configure the filters. instead,
* the filters configuration is based on the active roles / ROC
* state.
*/
out_sleep:
pm_runtime_mark_last_busy(wl->dev);
pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
kfree(fp);
}
static int wl1271_record_ap_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u8 id, u8 key_type, u8 key_size,
const u8 *key, u8 hlid, u32 tx_seq_32,
u16 tx_seq_16, bool is_pairwise)
{
struct wl1271_ap_key *ap_key;
int i;
wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id);
if (key_size > MAX_KEY_SIZE)
return -EINVAL;
/*
* Find next free entry in ap_keys. Also check we are not replacing
* an existing key.
*/
for (i = 0; i < MAX_NUM_KEYS; i++) {
if (wlvif->ap.recorded_keys[i] == NULL)
break;
if (wlvif->ap.recorded_keys[i]->id == id) {
wl1271_warning("trying to record key replacement");
return -EINVAL;
}
}
if (i == MAX_NUM_KEYS)
return -EBUSY;
ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL);
if (!ap_key)
return -ENOMEM;
ap_key->id = id;
ap_key->key_type = key_type;
ap_key->key_size = key_size;
memcpy(ap_key->key, key, key_size);
ap_key->hlid = hlid;
ap_key->tx_seq_32 = tx_seq_32;
ap_key->tx_seq_16 = tx_seq_16;
ap_key->is_pairwise = is_pairwise;
wlvif->ap.recorded_keys[i] = ap_key;
return 0;
}
static void wl1271_free_ap_keys(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
int i;
for (i = 0; i < MAX_NUM_KEYS; i++) {
kfree(wlvif->ap.recorded_keys[i]);
wlvif->ap.recorded_keys[i] = NULL;
}
}
static int wl1271_ap_init_hwenc(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
int i, ret = 0;
struct wl1271_ap_key *key;
bool wep_key_added = false;
for (i = 0; i < MAX_NUM_KEYS; i++) {
u8 hlid;
if (wlvif->ap.recorded_keys[i] == NULL)
break;
key = wlvif->ap.recorded_keys[i];
hlid = key->hlid;
if (hlid == WL12XX_INVALID_LINK_ID)
hlid = wlvif->ap.bcast_hlid;
ret = wl1271_cmd_set_ap_key(wl, wlvif, KEY_ADD_OR_REPLACE,
key->id, key->key_type,
key->key_size, key->key,
hlid, key->tx_seq_32,
key->tx_seq_16, key->is_pairwise);
if (ret < 0)
goto out;
if (key->key_type == KEY_WEP)
wep_key_added = true;
}
if (wep_key_added) {
ret = wl12xx_cmd_set_default_wep_key(wl, wlvif->default_key,
wlvif->ap.bcast_hlid);
if (ret < 0)
goto out;
}
out:
wl1271_free_ap_keys(wl, wlvif);
return ret;
}
static int wl1271_set_key(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u16 action, u8 id, u8 key_type,
u8 key_size, const u8 *key, u32 tx_seq_32,
u16 tx_seq_16, struct ieee80211_sta *sta,
bool is_pairwise)
{
int ret;
bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
if (is_ap) {
struct wl1271_station *wl_sta;
u8 hlid;
if (sta) {
wl_sta = (struct wl1271_station *)sta->drv_priv;
hlid = wl_sta->hlid;
} else {
hlid = wlvif->ap.bcast_hlid;
}
if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
/*
* We do not support removing keys after AP shutdown.
* Pretend we do to make mac80211 happy.
*/
if (action != KEY_ADD_OR_REPLACE)
return 0;
ret = wl1271_record_ap_key(wl, wlvif, id,
key_type, key_size,
key, hlid, tx_seq_32,
tx_seq_16, is_pairwise);
} else {
ret = wl1271_cmd_set_ap_key(wl, wlvif, action,
id, key_type, key_size,
key, hlid, tx_seq_32,
tx_seq_16, is_pairwise);
}
if (ret < 0)
return ret;
} else {
const u8 *addr;
static const u8 bcast_addr[ETH_ALEN] = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff
};
addr = sta ? sta->addr : bcast_addr;
if (is_zero_ether_addr(addr)) {
/* We dont support TX only encryption */
return -EOPNOTSUPP;
}
/* The wl1271 does not allow to remove unicast keys - they
will be cleared automatically on next CMD_JOIN. Ignore the
request silently, as we dont want the mac80211 to emit
an error message. */
if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr))
return 0;
/* don't remove key if hlid was already deleted */
if (action == KEY_REMOVE &&
wlvif->sta.hlid == WL12XX_INVALID_LINK_ID)
return 0;
ret = wl1271_cmd_set_sta_key(wl, wlvif, action,
id, key_type, key_size,
key, addr, tx_seq_32,
tx_seq_16);
if (ret < 0)
return ret;
}
return 0;
}
static int wlcore_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *key_conf)
{
struct wl1271 *wl = hw->priv;
int ret;
bool might_change_spare =
key_conf->cipher == WL1271_CIPHER_SUITE_GEM ||
key_conf->cipher == WLAN_CIPHER_SUITE_TKIP;
if (might_change_spare) {
/*
* stop the queues and flush to ensure the next packets are
* in sync with FW spare block accounting
*/
wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
wl1271_tx_flush(wl);
}
mutex_lock(&wl->mutex);
if (unlikely(wl->state != WLCORE_STATE_ON)) {
ret = -EAGAIN;
goto out_wake_queues;
}
ret = pm_runtime_resume_and_get(wl->dev);
if (ret < 0)
goto out_wake_queues;
ret = wlcore_hw_set_key(wl, cmd, vif, sta, key_conf);
pm_runtime_mark_last_busy(wl->dev);
pm_runtime_put_autosuspend(wl->dev);
out_wake_queues:
if (might_change_spare)
wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_SPARE_BLK);
mutex_unlock(&wl->mutex);
return ret;
}
int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *key_conf)
{
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
int ret;
u32 tx_seq_32 = 0;
u16 tx_seq_16 = 0;
u8 key_type;
u8 hlid;
bool is_pairwise;
wl1271_debug(DEBUG_MAC80211, "mac80211 set key");
wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta);
wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
key_conf->cipher, key_conf->keyidx,
key_conf->keylen, key_conf->flags);
wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen);
if (wlvif->bss_type == BSS_TYPE_AP_BSS)
if (sta) {
struct wl1271_station *wl_sta = (void *)sta->drv_priv;
hlid = wl_sta->hlid;
} else {
hlid = wlvif->ap.bcast_hlid;
}
else
hlid = wlvif->sta.hlid;
if (hlid != WL12XX_INVALID_LINK_ID) {
u64 tx_seq = wl->links[hlid].total_freed_pkts;
tx_seq_32 = WL1271_TX_SECURITY_HI32(tx_seq);
tx_seq_16 = WL1271_TX_SECURITY_LO16(tx_seq);
}
switch (key_conf->cipher) {
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
key_type = KEY_WEP;
key_conf->hw_key_idx = key_conf->keyidx;
break;
case WLAN_CIPHER_SUITE_TKIP:
key_type = KEY_TKIP;
key_conf->hw_key_idx = key_conf->keyidx;
break;
case WLAN_CIPHER_SUITE_CCMP:
key_type = KEY_AES;
key_conf->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE;
break;
case WL1271_CIPHER_SUITE_GEM:
key_type = KEY_GEM;
break;
default:
wl1271_error("Unknown key algo 0x%x", key_conf->cipher);
return -EOPNOTSUPP;
}
is_pairwise = key_conf->flags & IEEE80211_KEY_FLAG_PAIRWISE;
switch (cmd) {
case SET_KEY:
ret = wl1271_set_key(wl, wlvif, KEY_ADD_OR_REPLACE,
key_conf->keyidx, key_type,
key_conf->keylen, key_conf->key,
tx_seq_32, tx_seq_16, sta, is_pairwise);
if (ret < 0) {
wl1271_error("Could not add or replace key");
return ret;
}
/*
* reconfiguring arp response if the unicast (or common)
* encryption key type was changed
*/
if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
(sta || key_type == KEY_WEP) &&
wlvif->encryption_type != key_type) {
wlvif->encryption_type = key_type;
ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
if (ret < 0) {
wl1271_warning("build arp rsp failed: %d", ret);
return ret;
}
}
break;
case DISABLE_KEY:
ret = wl1271_set_key(wl, wlvif, KEY_REMOVE,
key_conf->keyidx, key_type,
key_conf->keylen, key_conf->key,
0, 0, sta, is_pairwise);
if (ret < 0) {
wl1271_error("Could not remove key");
return ret;
}
break;
default:
wl1271_error("Unsupported key cmd 0x%x", cmd);
return -EOPNOTSUPP;
}
return ret;
}
EXPORT_SYMBOL_GPL(wlcore_set_key);
static void wl1271_op_set_default_key_idx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
int key_idx)
{
struct wl1271 *wl = hw->priv;
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
int ret;
wl1271_debug(DEBUG_MAC80211, "mac80211 set default key idx %d",
key_idx);
/* we don't handle unsetting of default key */
if (key_idx == -1)
return;
mutex_lock(&wl->mutex);
if (unlikely(wl->state != WLCORE_STATE_ON)) {
ret = -EAGAIN;
goto out_unlock;
}
ret = pm_runtime_resume_and_get(wl->dev);
if (ret < 0)
goto out_unlock;
wlvif->default_key = key_idx;
/* the default WEP key needs to be configured at least once */
if (wlvif->encryption_type == KEY_WEP) {
ret = wl12xx_cmd_set_default_wep_key(wl,
key_idx,
wlvif->sta.hlid);
if (ret < 0)
goto out_sleep;
}
out_sleep:
pm_runtime_mark_last_busy(wl->dev);
pm_runtime_put_autosuspend(wl->dev);
out_unlock:
mutex_unlock(&wl->mutex);
}
void wlcore_regdomain_config(struct wl1271 *wl)
{
int ret;
if (!(wl->quirks & WLCORE_QUIRK_REGDOMAIN_CONF))
return;
mutex_lock(&wl->mutex);
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
ret = pm_runtime_resume_and_get(wl->dev);
if (ret < 0)
goto out;
ret = wlcore_cmd_regdomain_config_locked(wl);
if (ret < 0) {
wl12xx_queue_recovery_work(wl);
goto out;
}
pm_runtime_mark_last_busy(wl->dev);
pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
}
static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_scan_request *hw_req)
{
struct cfg80211_scan_request *req = &hw_req->req;
struct wl1271 *wl = hw->priv;
int ret;
u8 *ssid = NULL;
size_t len = 0;
wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
if (req->n_ssids) {
ssid = req->ssids[0].ssid;
len = req->ssids[0].ssid_len;
}
mutex_lock(&wl->mutex);
if (unlikely(wl->state != WLCORE_STATE_ON)) {
/*
* We cannot return -EBUSY here because cfg80211 will expect
* a call to ieee80211_scan_completed if we do - in this case
* there won't be any call.
*/
ret = -EAGAIN;
goto out;
}
ret = pm_runtime_resume_and_get(wl->dev);
if (ret < 0)
goto out;
/* fail if there is any role in ROC */
if (find_first_bit(wl->roc_map, WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES) {
/* don't allow scanning right now */
ret = -EBUSY;
goto out_sleep;
}
ret = wlcore_scan(hw->priv, vif, ssid, len, req);
out_sleep:
pm_runtime_mark_last_busy(wl->dev);
pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
return ret;
}
static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct wl1271 *wl = hw->priv;
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
struct cfg80211_scan_info info = {
.aborted = true,
};
int ret;
wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan");
mutex_lock(&wl->mutex);
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
if (wl->scan.state == WL1271_SCAN_STATE_IDLE)
goto out;
ret = pm_runtime_resume_and_get(wl->dev);
if (ret < 0)
goto out;
if (wl->scan.state != WL1271_SCAN_STATE_DONE) {
ret = wl->ops->scan_stop(wl, wlvif);
if (ret < 0)
goto out_sleep;
}
/*
* Rearm the tx watchdog just before idling scan. This
* prevents just-finished scans from triggering the watchdog
*/
wl12xx_rearm_tx_watchdog_locked(wl);
wl->scan.state = WL1271_SCAN_STATE_IDLE;
memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch));
wl->scan_wlvif = NULL;
wl->scan.req = NULL;
ieee80211_scan_completed(wl->hw, &info);
out_sleep:
pm_runtime_mark_last_busy(wl->dev);
pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
cancel_delayed_work_sync(&wl->scan_complete_work);
}
static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct cfg80211_sched_scan_request *req,
struct ieee80211_scan_ies *ies)
{
struct wl1271 *wl = hw->priv;
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
int ret;
wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start");
mutex_lock(&wl->mutex);
if (unlikely(wl->state != WLCORE_STATE_ON)) {
ret = -EAGAIN;
goto out;
}
ret = pm_runtime_resume_and_get(wl->dev);
if (ret < 0)
goto out;
ret = wl->ops->sched_scan_start(wl, wlvif, req, ies);
if (ret < 0)
goto out_sleep;
wl->sched_vif = wlvif;
out_sleep:
pm_runtime_mark_last_busy(wl->dev);
pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
return ret;
}
static int wl1271_op_sched_scan_stop(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct wl1271 *wl = hw->priv;
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
int ret;
wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop");
mutex_lock(&wl->mutex);
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
ret = pm_runtime_resume_and_get(wl->dev);
if (ret < 0)
goto out;
wl->ops->sched_scan_stop(wl, wlvif);
pm_runtime_mark_last_busy(wl->dev);
pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
return 0;
}
static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
{
struct wl1271 *wl = hw->priv;
int ret = 0;
mutex_lock(&wl->mutex);
if (unlikely(wl->state != WLCORE_STATE_ON)) {
ret = -EAGAIN;
goto out;
}
ret = pm_runtime_resume_and_get(wl->dev);
if (ret < 0)
goto out;
ret = wl1271_acx_frag_threshold(wl, value);
if (ret < 0)
wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret);
pm_runtime_mark_last_busy(wl->dev);
pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
return ret;
}
static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
{
struct wl1271 *wl = hw->priv;
struct wl12xx_vif *wlvif;
int ret = 0;
mutex_lock(&wl->mutex);
if (unlikely(wl->state != WLCORE_STATE_ON)) {
ret = -EAGAIN;
goto out;
}
ret = pm_runtime_resume_and_get(wl->dev);
if (ret < 0)
goto out;
wl12xx_for_each_wlvif(wl, wlvif) {
ret = wl1271_acx_rts_threshold(wl, wlvif, value);
if (ret < 0)
wl1271_warning("set rts threshold failed: %d", ret);
}
pm_runtime_mark_last_busy(wl->dev);
pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
return ret;
}
static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset)
{
int len;
const u8 *next, *end = skb->data + skb->len;
u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset,
skb->len - ieoffset);
if (!ie)
return;
len = ie[1] + 2;
next = ie + len;
memmove(ie, next, end - next);
skb_trim(skb, skb->len - len);
}
static void wl12xx_remove_vendor_ie(struct sk_buff *skb,
unsigned int oui, u8 oui_type,
int ieoffset)
{
int len;
const u8 *next, *end = skb->data + skb->len;
u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
skb->data + ieoffset,
skb->len - ieoffset);
if (!ie)
return;
len = ie[1] + 2;
next = ie + len;
memmove(ie, next, end - next);
skb_trim(skb, skb->len - len);
}
static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, u32 rates,
struct ieee80211_vif *vif)
{
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
struct sk_buff *skb;
int ret;
skb = ieee80211_proberesp_get(wl->hw, vif);
if (!skb)
return -EOPNOTSUPP;
ret = wl1271_cmd_template_set(wl, wlvif->role_id,
CMD_TEMPL_AP_PROBE_RESPONSE,
skb->data,
skb->len, 0,
rates);
dev_kfree_skb(skb);
if (ret < 0)
goto out;
wl1271_debug(DEBUG_AP, "probe response updated");
set_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags);
out:
return ret;
}
static int wl1271_ap_set_probe_resp_tmpl_legacy(struct wl1271 *wl,
struct ieee80211_vif *vif,
u8 *probe_rsp_data,
size_t probe_rsp_len,
u32 rates)
{
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
u8 probe_rsp_templ[WL1271_CMD_TEMPL_MAX_SIZE];
int ssid_ie_offset, ie_offset, templ_len;
const u8 *ptr;
/* no need to change probe response if the SSID is set correctly */
if (wlvif->ssid_len > 0)
return wl1271_cmd_template_set(wl, wlvif->role_id,
CMD_TEMPL_AP_PROBE_RESPONSE,
probe_rsp_data,
probe_rsp_len, 0,
rates);
if (probe_rsp_len + vif->cfg.ssid_len > WL1271_CMD_TEMPL_MAX_SIZE) {
wl1271_error("probe_rsp template too big");
return -EINVAL;
}
/* start searching from IE offset */
ie_offset = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
ptr = cfg80211_find_ie(WLAN_EID_SSID, probe_rsp_data + ie_offset,
probe_rsp_len - ie_offset);
if (!ptr) {
wl1271_error("No SSID in beacon!");
return -EINVAL;
}
ssid_ie_offset = ptr - probe_rsp_data;
ptr += (ptr[1] + 2);
memcpy(probe_rsp_templ, probe_rsp_data, ssid_ie_offset);
/* insert SSID from bss_conf */
probe_rsp_templ[ssid_ie_offset] = WLAN_EID_SSID;
probe_rsp_templ[ssid_ie_offset + 1] = vif->cfg.ssid_len;
memcpy(probe_rsp_templ + ssid_ie_offset + 2,
vif->cfg.ssid, vif->cfg.ssid_len);
templ_len = ssid_ie_offset + 2 + vif->cfg.ssid_len;
memcpy(probe_rsp_templ + ssid_ie_offset + 2 + vif->cfg.ssid_len,
ptr, probe_rsp_len - (ptr - probe_rsp_data));
templ_len += probe_rsp_len - (ptr - probe_rsp_data);
return wl1271_cmd_template_set(wl, wlvif->role_id,
CMD_TEMPL_AP_PROBE_RESPONSE,
probe_rsp_templ,
templ_len, 0,
rates);
}
static int wl1271_bss_erp_info_changed(struct wl1271 *wl,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
u32 changed)
{
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
int ret = 0;
if (changed & BSS_CHANGED_ERP_SLOT) {
if (bss_conf->use_short_slot)
ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_SHORT);
else
ret = wl1271_acx_slot(wl, wlvif, SLOT_TIME_LONG);
if (ret < 0) {
wl1271_warning("Set slot time failed %d", ret);
goto out;
}
}
if (changed & BSS_CHANGED_ERP_PREAMBLE) {
if (bss_conf->use_short_preamble)
wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_SHORT);
else
wl1271_acx_set_preamble(wl, wlvif, ACX_PREAMBLE_LONG);
}
if (changed & BSS_CHANGED_ERP_CTS_PROT) {
if (bss_conf->use_cts_prot)
ret = wl1271_acx_cts_protect(wl, wlvif,
CTSPROTECT_ENABLE);
else
ret = wl1271_acx_cts_protect(wl, wlvif,
CTSPROTECT_DISABLE);
if (ret < 0) {
wl1271_warning("Set ctsprotect failed %d", ret);
goto out;
}
}
out:
return ret;
}
static int wlcore_set_beacon_template(struct wl1271 *wl,
struct ieee80211_vif *vif,
bool is_ap)
{
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
struct ieee80211_hdr *hdr;
u32 min_rate;
int ret;
int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
struct sk_buff *beacon = ieee80211_beacon_get(wl->hw, vif, 0);
u16 tmpl_id;
if (!beacon) {
ret = -EINVAL;
goto out;
}
wl1271_debug(DEBUG_MASTER, "beacon updated");
ret = wl1271_ssid_set(wlvif, beacon, ieoffset);
if (ret < 0) {
dev_kfree_skb(beacon);
goto out;
}
min_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
tmpl_id = is_ap ? CMD_TEMPL_AP_BEACON :
CMD_TEMPL_BEACON;
ret = wl1271_cmd_template_set(wl, wlvif->role_id, tmpl_id,
beacon->data,
beacon->len, 0,
min_rate);
if (ret < 0) {
dev_kfree_skb(beacon);
goto out;
}
wlvif->wmm_enabled =
cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
WLAN_OUI_TYPE_MICROSOFT_WMM,
beacon->data + ieoffset,
beacon->len - ieoffset);
/*
* In case we already have a probe-resp beacon set explicitly
* by usermode, don't use the beacon data.
*/
if (test_bit(WLVIF_FLAG_AP_PROBE_RESP_SET, &wlvif->flags))
goto end_bcn;
/* remove TIM ie from probe response */
wl12xx_remove_ie(beacon, WLAN_EID_TIM, ieoffset);
/*
* remove p2p ie from probe response.
* the fw reponds to probe requests that don't include
* the p2p ie. probe requests with p2p ie will be passed,
* and will be responded by the supplicant (the spec
* forbids including the p2p ie when responding to probe
* requests that didn't include it).
*/
wl12xx_remove_vendor_ie(beacon, WLAN_OUI_WFA,
WLAN_OUI_TYPE_WFA_P2P, ieoffset);
hdr = (struct ieee80211_hdr *) beacon->data;
hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
IEEE80211_STYPE_PROBE_RESP);
if (is_ap)
ret = wl1271_ap_set_probe_resp_tmpl_legacy(wl, vif,
beacon->data,
beacon->len,
min_rate);
else
ret = wl1271_cmd_template_set(wl, wlvif->role_id,
CMD_TEMPL_PROBE_RESPONSE,
beacon->data,
beacon->len, 0,
min_rate);
end_bcn:
dev_kfree_skb(beacon);
if (ret < 0)
goto out;
out:
return ret;
}
static int wl1271_bss_beacon_info_changed(struct wl1271 *wl,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
u32 changed)
{
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
int ret = 0;
if (changed & BSS_CHANGED_BEACON_INT) {
wl1271_debug(DEBUG_MASTER, "beacon interval updated: %d",
bss_conf->beacon_int);
wlvif->beacon_int = bss_conf->beacon_int;
}
if ((changed & BSS_CHANGED_AP_PROBE_RESP) && is_ap) {
u32 rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
wl1271_ap_set_probe_resp_tmpl(wl, rate, vif);
}
if (changed & BSS_CHANGED_BEACON) {
ret = wlcore_set_beacon_template(wl, vif, is_ap);
if (ret < 0)
goto out;
if (test_and_clear_bit(WLVIF_FLAG_BEACON_DISABLED,
&wlvif->flags)) {
ret = wlcore_hw_dfs_master_restart(wl, wlvif);
if (ret < 0)
goto out;
}
}
out:
if (ret != 0)
wl1271_error("beacon info change failed: %d", ret);
return ret;
}
/* AP mode changes */
static void wl1271_bss_info_changed_ap(struct wl1271 *wl,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
u32 changed)
{
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
int ret = 0;
if (changed & BSS_CHANGED_BASIC_RATES) {
u32 rates = bss_conf->basic_rates;
wlvif->basic_rate_set = wl1271_tx_enabled_rates_get(wl, rates,
wlvif->band);
wlvif->basic_rate = wl1271_tx_min_rate_get(wl,
wlvif->basic_rate_set);
ret = wl1271_init_ap_rates(wl, wlvif);
if (ret < 0) {
wl1271_error("AP rate policy change failed %d", ret);
goto out;
}
ret = wl1271_ap_init_templates(wl, vif);
if (ret < 0)
goto out;
/* No need to set probe resp template for mesh */
if (!ieee80211_vif_is_mesh(vif)) {
ret = wl1271_ap_set_probe_resp_tmpl(wl,
wlvif->basic_rate,
vif);
if (ret < 0)
goto out;
}
ret = wlcore_set_beacon_template(wl, vif, true);
if (ret < 0)
goto out;
}
ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf, changed);
if (ret < 0)
goto out;
if (changed & BSS_CHANGED_BEACON_ENABLED) {
if (bss_conf->enable_beacon) {
if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
ret = wl12xx_cmd_role_start_ap(wl, wlvif);
if (ret < 0)
goto out;
ret = wl1271_ap_init_hwenc(wl, wlvif);
if (ret < 0)
goto out;
set_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
wl1271_debug(DEBUG_AP, "started AP");
}
} else {
if (test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags)) {
/*
* AP might be in ROC in case we have just
* sent auth reply. handle it.
*/
if (test_bit(wlvif->role_id, wl->roc_map))
wl12xx_croc(wl, wlvif->role_id);
ret = wl12xx_cmd_role_stop_ap(wl, wlvif);
if (ret < 0)
goto out;
clear_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags);
clear_bit(WLVIF_FLAG_AP_PROBE_RESP_SET,
&wlvif->flags);
wl1271_debug(DEBUG_AP, "stopped AP");
}
}
}
ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
if (ret < 0)
goto out;
/* Handle HT information change */
if ((changed & BSS_CHANGED_HT) &&
(bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT)) {
ret = wl1271_acx_set_ht_information(wl, wlvif,
bss_conf->ht_operation_mode);
if (ret < 0) {
wl1271_warning("Set ht information failed %d", ret);
goto out;
}
}
out:
return;
}
static int wlcore_set_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
struct ieee80211_vif *vif, u32 sta_rate_set)
{
struct ieee80211_bss_conf *bss_conf = &vif->bss_conf;
u32 rates;
int ret;
wl1271_debug(DEBUG_MAC80211,
"changed_bssid: %pM, aid: %d, bcn_int: %d, brates: 0x%x sta_rate_set: 0x%x",
bss_conf->bssid, vif->cfg.aid,
bss_conf->beacon_int,
bss_conf->basic_rates, sta_rate_set);
wlvif->beacon_int = bss_conf->beacon_int;
rates = bss_conf->basic_rates;
wlvif->basic_rate_set =
wl1271_tx_enabled_rates_get(wl, rates,
wlvif->band);
wlvif->basic_rate =
wl1271_tx_min_rate_get(wl,
wlvif->basic_rate_set);
if (sta_rate_set)
wlvif->rate_set =
wl1271_tx_enabled_rates_get(wl,
sta_rate_set,
wlvif->band);
/* we only support sched_scan while not connected */
if (wl->sched_vif == wlvif)
wl->ops->sched_scan_stop(wl, wlvif);
ret = wl1271_acx_sta_rate_policies(wl, wlvif);
if (ret < 0)
return ret;
ret = wl12xx_cmd_build_null_data(wl, wlvif);
if (ret < 0)
return ret;
ret = wl1271_build_qos_null_data(wl, wl12xx_wlvif_to_vif(wlvif));
if (ret < 0)
return ret;
wlcore_set_ssid(wl, wlvif);
set_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
return 0;
}
static int wlcore_clear_bssid(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
int ret;
/* revert back to minimum rates for the current band */
wl1271_set_band_rate(wl, wlvif);
wlvif->basic_rate = wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
ret = wl1271_acx_sta_rate_policies(wl, wlvif);
if (ret < 0)
return ret;
if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags)) {
ret = wl12xx_cmd_role_stop_sta(wl, wlvif);
if (ret < 0)
return ret;
}
clear_bit(WLVIF_FLAG_IN_USE, &wlvif->flags);
return 0;
}
/* STA/IBSS mode changes */
static void wl1271_bss_info_changed_sta(struct wl1271 *wl,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
u32 changed)
{
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
bool do_join = false;
bool is_ibss = (wlvif->bss_type == BSS_TYPE_IBSS);
bool ibss_joined = false;
u32 sta_rate_set = 0;
int ret;
struct ieee80211_sta *sta;
bool sta_exists = false;
struct ieee80211_sta_ht_cap sta_ht_cap;
if (is_ibss) {
ret = wl1271_bss_beacon_info_changed(wl, vif, bss_conf,
changed);
if (ret < 0)
goto out;
}
if (changed & BSS_CHANGED_IBSS) {
if (vif->cfg.ibss_joined) {
set_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags);
ibss_joined = true;
} else {
wlcore_unset_assoc(wl, wlvif);
wl12xx_cmd_role_stop_sta(wl, wlvif);
}
}
if ((changed & BSS_CHANGED_BEACON_INT) && ibss_joined)
do_join = true;
/* Need to update the SSID (for filtering etc) */
if ((changed & BSS_CHANGED_BEACON) && ibss_joined)
do_join = true;
if ((changed & BSS_CHANGED_BEACON_ENABLED) && ibss_joined) {
wl1271_debug(DEBUG_ADHOC, "ad-hoc beaconing: %s",
bss_conf->enable_beacon ? "enabled" : "disabled");
do_join = true;
}
if (changed & BSS_CHANGED_IDLE && !is_ibss)
wl1271_sta_handle_idle(wl, wlvif, vif->cfg.idle);
if (changed & BSS_CHANGED_CQM) {
bool enable = false;
if (bss_conf->cqm_rssi_thold)
enable = true;
ret = wl1271_acx_rssi_snr_trigger(wl, wlvif, enable,
bss_conf->cqm_rssi_thold,
bss_conf->cqm_rssi_hyst);
if (ret < 0)
goto out;
wlvif->rssi_thold = bss_conf->cqm_rssi_thold;
}
if (changed & (BSS_CHANGED_BSSID | BSS_CHANGED_HT |
BSS_CHANGED_ASSOC)) {
rcu_read_lock();
sta = ieee80211_find_sta(vif, bss_conf->bssid);
if (sta) {
u8 *rx_mask = sta->deflink.ht_cap.mcs.rx_mask;
/* save the supp_rates of the ap */
sta_rate_set = sta->deflink.supp_rates[wlvif->band];
if (sta->deflink.ht_cap.ht_supported)
sta_rate_set |=
(rx_mask[0] << HW_HT_RATES_OFFSET) |
(rx_mask[1] << HW_MIMO_RATES_OFFSET);
sta_ht_cap = sta->deflink.ht_cap;
sta_exists = true;
}
rcu_read_unlock();
}
if (changed & BSS_CHANGED_BSSID) {
if (!is_zero_ether_addr(bss_conf->bssid)) {
ret = wlcore_set_bssid(wl, wlvif, vif,
sta_rate_set);
if (ret < 0)
goto out;
/* Need to update the BSSID (for filtering etc) */
do_join = true;
} else {
ret = wlcore_clear_bssid(wl, wlvif);
if (ret < 0)
goto out;
}
}
if (changed & BSS_CHANGED_IBSS) {
wl1271_debug(DEBUG_ADHOC, "ibss_joined: %d",
vif->cfg.ibss_joined);
if (vif->cfg.ibss_joined) {
u32 rates = bss_conf->basic_rates;
wlvif->basic_rate_set =
wl1271_tx_enabled_rates_get(wl, rates,
wlvif->band);
wlvif->basic_rate =
wl1271_tx_min_rate_get(wl,
wlvif->basic_rate_set);
/* by default, use 11b + OFDM rates */
wlvif->rate_set = CONF_TX_IBSS_DEFAULT_RATES;
ret = wl1271_acx_sta_rate_policies(wl, wlvif);
if (ret < 0)
goto out;
}
}
if ((changed & BSS_CHANGED_BEACON_INFO) && bss_conf->dtim_period) {
/* enable beacon filtering */
ret = wl1271_acx_beacon_filter_opt(wl, wlvif, true);
if (ret < 0)
goto out;
}
ret = wl1271_bss_erp_info_changed(wl, vif, bss_conf, changed);
if (ret < 0)
goto out;
if (do_join) {
ret = wlcore_join(wl, wlvif);
if (ret < 0) {
wl1271_warning("cmd join failed %d", ret);
goto out;
}
}
if (changed & BSS_CHANGED_ASSOC) {
if (vif->cfg.assoc) {
ret = wlcore_set_assoc(wl, wlvif, bss_conf,
sta_rate_set);
if (ret < 0)
goto out;
if (test_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags))
wl12xx_set_authorized(wl, wlvif);
} else {
wlcore_unset_assoc(wl, wlvif);
}
}
if (changed & BSS_CHANGED_PS) {
if (vif->cfg.ps &&
test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) &&
!test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
int ps_mode;
char *ps_mode_str;
if (wl->conf.conn.forced_ps) {
ps_mode = STATION_POWER_SAVE_MODE;
ps_mode_str = "forced";
} else {
ps_mode = STATION_AUTO_PS_MODE;
ps_mode_str = "auto";
}
wl1271_debug(DEBUG_PSM, "%s ps enabled", ps_mode_str);
ret = wl1271_ps_set_mode(wl, wlvif, ps_mode);
if (ret < 0)
wl1271_warning("enter %s ps failed %d",
ps_mode_str, ret);
} else if (!vif->cfg.ps &&
test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags)) {
wl1271_debug(DEBUG_PSM, "auto ps disabled");
ret = wl1271_ps_set_mode(wl, wlvif,
STATION_ACTIVE_MODE);
if (ret < 0)
wl1271_warning("exit auto ps failed %d", ret);
}
}
/* Handle new association with HT. Do this after join. */
if (sta_exists) {
bool enabled =
bss_conf->chandef.width != NL80211_CHAN_WIDTH_20_NOHT;
ret = wlcore_hw_set_peer_cap(wl,
&sta_ht_cap,
enabled,
wlvif->rate_set,
wlvif->sta.hlid);
if (ret < 0) {
wl1271_warning("Set ht cap failed %d", ret);
goto out;
}
if (enabled) {
ret = wl1271_acx_set_ht_information(wl, wlvif,
bss_conf->ht_operation_mode);
if (ret < 0) {
wl1271_warning("Set ht information failed %d",
ret);
goto out;
}
}
}
/* Handle arp filtering. Done after join. */
if ((changed & BSS_CHANGED_ARP_FILTER) ||
(!is_ibss && (changed & BSS_CHANGED_QOS))) {
__be32 addr = vif->cfg.arp_addr_list[0];
wlvif->sta.qos = bss_conf->qos;
WARN_ON(wlvif->bss_type != BSS_TYPE_STA_BSS);
if (vif->cfg.arp_addr_cnt == 1 && vif->cfg.assoc) {
wlvif->ip_addr = addr;
/*
* The template should have been configured only upon
* association. however, it seems that the correct ip
* isn't being set (when sending), so we have to
* reconfigure the template upon every ip change.
*/
ret = wl1271_cmd_build_arp_rsp(wl, wlvif);
if (ret < 0) {
wl1271_warning("build arp rsp failed: %d", ret);
goto out;
}
ret = wl1271_acx_arp_ip_filter(wl, wlvif,
(ACX_ARP_FILTER_ARP_FILTERING |
ACX_ARP_FILTER_AUTO_ARP),
addr);
} else {
wlvif->ip_addr = 0;
ret = wl1271_acx_arp_ip_filter(wl, wlvif, 0, addr);
}
if (ret < 0)
goto out;
}
out:
return;
}
static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
u64 changed)
{
struct wl1271 *wl = hw->priv;
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
int ret;
wl1271_debug(DEBUG_MAC80211, "mac80211 bss info role %d changed 0x%x",
wlvif->role_id, (int)changed);
/*
* make sure to cancel pending disconnections if our association
* state changed
*/
if (!is_ap && (changed & BSS_CHANGED_ASSOC))
cancel_delayed_work_sync(&wlvif->connection_loss_work);
if (is_ap && (changed & BSS_CHANGED_BEACON_ENABLED) &&
!bss_conf->enable_beacon)
wl1271_tx_flush(wl);
mutex_lock(&wl->mutex);
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
goto out;
ret = pm_runtime_resume_and_get(wl->dev);
if (ret < 0)
goto out;
if ((changed & BSS_CHANGED_TXPOWER) &&
bss_conf->txpower != wlvif->power_level) {
ret = wl1271_acx_tx_power(wl, wlvif, bss_conf->txpower);
if (ret < 0)
goto out;
wlvif->power_level = bss_conf->txpower;
}
if (is_ap)
wl1271_bss_info_changed_ap(wl, vif, bss_conf, changed);
else
wl1271_bss_info_changed_sta(wl, vif, bss_conf, changed);
pm_runtime_mark_last_busy(wl->dev);
pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
}
static int wlcore_op_add_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx)
{
wl1271_debug(DEBUG_MAC80211, "mac80211 add chanctx %d (type %d)",
ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
cfg80211_get_chandef_type(&ctx->def));
return 0;
}
static void wlcore_op_remove_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx)
{
wl1271_debug(DEBUG_MAC80211, "mac80211 remove chanctx %d (type %d)",
ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
cfg80211_get_chandef_type(&ctx->def));
}
static void wlcore_op_change_chanctx(struct ieee80211_hw *hw,
struct ieee80211_chanctx_conf *ctx,
u32 changed)
{
struct wl1271 *wl = hw->priv;
struct wl12xx_vif *wlvif;
int ret;
int channel = ieee80211_frequency_to_channel(
ctx->def.chan->center_freq);
wl1271_debug(DEBUG_MAC80211,
"mac80211 change chanctx %d (type %d) changed 0x%x",
channel, cfg80211_get_chandef_type(&ctx->def), changed);
mutex_lock(&wl->mutex);
ret = pm_runtime_resume_and_get(wl->dev);
if (ret < 0)
goto out;
wl12xx_for_each_wlvif(wl, wlvif) {
struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
rcu_read_lock();
if (rcu_access_pointer(vif->bss_conf.chanctx_conf) != ctx) {
rcu_read_unlock();
continue;
}
rcu_read_unlock();
/* start radar if needed */
if (changed & IEEE80211_CHANCTX_CHANGE_RADAR &&
wlvif->bss_type == BSS_TYPE_AP_BSS &&
ctx->radar_enabled && !wlvif->radar_enabled &&
ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
wl1271_debug(DEBUG_MAC80211, "Start radar detection");
wlcore_hw_set_cac(wl, wlvif, true);
wlvif->radar_enabled = true;
}
}
pm_runtime_mark_last_busy(wl->dev);
pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
}
static int wlcore_op_assign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *ctx)
{
struct wl1271 *wl = hw->priv;
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
int channel = ieee80211_frequency_to_channel(
ctx->def.chan->center_freq);
int ret = -EINVAL;
wl1271_debug(DEBUG_MAC80211,
"mac80211 assign chanctx (role %d) %d (type %d) (radar %d dfs_state %d)",
wlvif->role_id, channel,
cfg80211_get_chandef_type(&ctx->def),
ctx->radar_enabled, ctx->def.chan->dfs_state);
mutex_lock(&wl->mutex);
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
goto out;
ret = pm_runtime_resume_and_get(wl->dev);
if (ret < 0)
goto out;
wlvif->band = ctx->def.chan->band;
wlvif->channel = channel;
wlvif->channel_type = cfg80211_get_chandef_type(&ctx->def);
/* update default rates according to the band */
wl1271_set_band_rate(wl, wlvif);
if (ctx->radar_enabled &&
ctx->def.chan->dfs_state == NL80211_DFS_USABLE) {
wl1271_debug(DEBUG_MAC80211, "Start radar detection");
wlcore_hw_set_cac(wl, wlvif, true);
wlvif->radar_enabled = true;
}
pm_runtime_mark_last_busy(wl->dev);
pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
return 0;
}
static void wlcore_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *link_conf,
struct ieee80211_chanctx_conf *ctx)
{
struct wl1271 *wl = hw->priv;
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
int ret;
wl1271_debug(DEBUG_MAC80211,
"mac80211 unassign chanctx (role %d) %d (type %d)",
wlvif->role_id,
ieee80211_frequency_to_channel(ctx->def.chan->center_freq),
cfg80211_get_chandef_type(&ctx->def));
wl1271_tx_flush(wl);
mutex_lock(&wl->mutex);
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
if (unlikely(!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags)))
goto out;
ret = pm_runtime_resume_and_get(wl->dev);
if (ret < 0)
goto out;
if (wlvif->radar_enabled) {
wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
wlcore_hw_set_cac(wl, wlvif, false);
wlvif->radar_enabled = false;
}
pm_runtime_mark_last_busy(wl->dev);
pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
}
static int __wlcore_switch_vif_chan(struct wl1271 *wl,
struct wl12xx_vif *wlvif,
struct ieee80211_chanctx_conf *new_ctx)
{
int channel = ieee80211_frequency_to_channel(
new_ctx->def.chan->center_freq);
wl1271_debug(DEBUG_MAC80211,
"switch vif (role %d) %d -> %d chan_type: %d",
wlvif->role_id, wlvif->channel, channel,
cfg80211_get_chandef_type(&new_ctx->def));
if (WARN_ON_ONCE(wlvif->bss_type != BSS_TYPE_AP_BSS))
return 0;
WARN_ON(!test_bit(WLVIF_FLAG_BEACON_DISABLED, &wlvif->flags));
if (wlvif->radar_enabled) {
wl1271_debug(DEBUG_MAC80211, "Stop radar detection");
wlcore_hw_set_cac(wl, wlvif, false);
wlvif->radar_enabled = false;
}
wlvif->band = new_ctx->def.chan->band;
wlvif->channel = channel;
wlvif->channel_type = cfg80211_get_chandef_type(&new_ctx->def);
/* start radar if needed */
if (new_ctx->radar_enabled) {
wl1271_debug(DEBUG_MAC80211, "Start radar detection");
wlcore_hw_set_cac(wl, wlvif, true);
wlvif->radar_enabled = true;
}
return 0;
}
static int
wlcore_op_switch_vif_chanctx(struct ieee80211_hw *hw,
struct ieee80211_vif_chanctx_switch *vifs,
int n_vifs,
enum ieee80211_chanctx_switch_mode mode)
{
struct wl1271 *wl = hw->priv;
int i, ret;
wl1271_debug(DEBUG_MAC80211,
"mac80211 switch chanctx n_vifs %d mode %d",
n_vifs, mode);
mutex_lock(&wl->mutex);
ret = pm_runtime_resume_and_get(wl->dev);
if (ret < 0)
goto out;
for (i = 0; i < n_vifs; i++) {
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vifs[i].vif);
ret = __wlcore_switch_vif_chan(wl, wlvif, vifs[i].new_ctx);
if (ret)
goto out_sleep;
}
out_sleep:
pm_runtime_mark_last_busy(wl->dev);
pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
return 0;
}
static int wl1271_op_conf_tx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
unsigned int link_id, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
struct wl1271 *wl = hw->priv;
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
u8 ps_scheme;
int ret = 0;
if (wlcore_is_p2p_mgmt(wlvif))
return 0;
mutex_lock(&wl->mutex);
wl1271_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
if (params->uapsd)
ps_scheme = CONF_PS_SCHEME_UPSD_TRIGGER;
else
ps_scheme = CONF_PS_SCHEME_LEGACY;
if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
goto out;
ret = pm_runtime_resume_and_get(wl->dev);
if (ret < 0)
goto out;
/*
* the txop is confed in units of 32us by the mac80211,
* we need us
*/
ret = wl1271_acx_ac_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
params->cw_min, params->cw_max,
params->aifs, params->txop << 5);
if (ret < 0)
goto out_sleep;
ret = wl1271_acx_tid_cfg(wl, wlvif, wl1271_tx_get_queue(queue),
CONF_CHANNEL_TYPE_EDCF,
wl1271_tx_get_queue(queue),
ps_scheme, CONF_ACK_POLICY_LEGACY,
0, 0);
out_sleep:
pm_runtime_mark_last_busy(wl->dev);
pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
return ret;
}
static u64 wl1271_op_get_tsf(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct wl1271 *wl = hw->priv;
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
u64 mactime = ULLONG_MAX;
int ret;
wl1271_debug(DEBUG_MAC80211, "mac80211 get tsf");
mutex_lock(&wl->mutex);
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
ret = pm_runtime_resume_and_get(wl->dev);
if (ret < 0)
goto out;
ret = wl12xx_acx_tsf_info(wl, wlvif, &mactime);
if (ret < 0)
goto out_sleep;
out_sleep:
pm_runtime_mark_last_busy(wl->dev);
pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
return mactime;
}
static int wl1271_op_get_survey(struct ieee80211_hw *hw, int idx,
struct survey_info *survey)
{
struct ieee80211_conf *conf = &hw->conf;
if (idx != 0)
return -ENOENT;
survey->channel = conf->chandef.chan;
survey->filled = 0;
return 0;
}
static int wl1271_allocate_sta(struct wl1271 *wl,
struct wl12xx_vif *wlvif,
struct ieee80211_sta *sta)
{
struct wl1271_station *wl_sta;
int ret;
if (wl->active_sta_count >= wl->max_ap_stations) {
wl1271_warning("could not allocate HLID - too much stations");
return -EBUSY;
}
wl_sta = (struct wl1271_station *)sta->drv_priv;
ret = wl12xx_allocate_link(wl, wlvif, &wl_sta->hlid);
if (ret < 0) {
wl1271_warning("could not allocate HLID - too many links");
return -EBUSY;
}
/* use the previous security seq, if this is a recovery/resume */
wl->links[wl_sta->hlid].total_freed_pkts = wl_sta->total_freed_pkts;
set_bit(wl_sta->hlid, wlvif->ap.sta_hlid_map);
memcpy(wl->links[wl_sta->hlid].addr, sta->addr, ETH_ALEN);
wl->active_sta_count++;
return 0;
}
void wl1271_free_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 hlid)
{
if (!test_bit(hlid, wlvif->ap.sta_hlid_map))
return;
clear_bit(hlid, wlvif->ap.sta_hlid_map);
__clear_bit(hlid, &wl->ap_ps_map);
__clear_bit(hlid, &wl->ap_fw_ps_map);
/*
* save the last used PN in the private part of iee80211_sta,
* in case of recovery/suspend
*/
wlcore_save_freed_pkts_addr(wl, wlvif, hlid, wl->links[hlid].addr);
wl12xx_free_link(wl, wlvif, &hlid);
wl->active_sta_count--;
/*
* rearm the tx watchdog when the last STA is freed - give the FW a
* chance to return STA-buffered packets before complaining.
*/
if (wl->active_sta_count == 0)
wl12xx_rearm_tx_watchdog_locked(wl);
}
static int wl12xx_sta_add(struct wl1271 *wl,
struct wl12xx_vif *wlvif,
struct ieee80211_sta *sta)
{
struct wl1271_station *wl_sta;
int ret = 0;
u8 hlid;
wl1271_debug(DEBUG_MAC80211, "mac80211 add sta %d", (int)sta->aid);
ret = wl1271_allocate_sta(wl, wlvif, sta);
if (ret < 0)
return ret;
wl_sta = (struct wl1271_station *)sta->drv_priv;
hlid = wl_sta->hlid;
ret = wl12xx_cmd_add_peer(wl, wlvif, sta, hlid);
if (ret < 0)
wl1271_free_sta(wl, wlvif, hlid);
return ret;
}
static int wl12xx_sta_remove(struct wl1271 *wl,
struct wl12xx_vif *wlvif,
struct ieee80211_sta *sta)
{
struct wl1271_station *wl_sta;
int ret = 0, id;
wl1271_debug(DEBUG_MAC80211, "mac80211 remove sta %d", (int)sta->aid);
wl_sta = (struct wl1271_station *)sta->drv_priv;
id = wl_sta->hlid;
if (WARN_ON(!test_bit(id, wlvif->ap.sta_hlid_map)))
return -EINVAL;
ret = wl12xx_cmd_remove_peer(wl, wlvif, wl_sta->hlid);
if (ret < 0)
return ret;
wl1271_free_sta(wl, wlvif, wl_sta->hlid);
return ret;
}
static void wlcore_roc_if_possible(struct wl1271 *wl,
struct wl12xx_vif *wlvif)
{
if (find_first_bit(wl->roc_map,
WL12XX_MAX_ROLES) < WL12XX_MAX_ROLES)
return;
if (WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID))
return;
wl12xx_roc(wl, wlvif, wlvif->role_id, wlvif->band, wlvif->channel);
}
/*
* when wl_sta is NULL, we treat this call as if coming from a
* pending auth reply.
* wl->mutex must be taken and the FW must be awake when the call
* takes place.
*/
void wlcore_update_inconn_sta(struct wl1271 *wl, struct wl12xx_vif *wlvif,
struct wl1271_station *wl_sta, bool in_conn)
{
if (in_conn) {
if (WARN_ON(wl_sta && wl_sta->in_connection))
return;
if (!wlvif->ap_pending_auth_reply &&
!wlvif->inconn_count)
wlcore_roc_if_possible(wl, wlvif);
if (wl_sta) {
wl_sta->in_connection = true;
wlvif->inconn_count++;
} else {
wlvif->ap_pending_auth_reply = true;
}
} else {
if (wl_sta && !wl_sta->in_connection)
return;
if (WARN_ON(!wl_sta && !wlvif->ap_pending_auth_reply))
return;
if (WARN_ON(wl_sta && !wlvif->inconn_count))
return;
if (wl_sta) {
wl_sta->in_connection = false;
wlvif->inconn_count--;
} else {
wlvif->ap_pending_auth_reply = false;
}
if (!wlvif->inconn_count && !wlvif->ap_pending_auth_reply &&
test_bit(wlvif->role_id, wl->roc_map))
wl12xx_croc(wl, wlvif->role_id);
}
}
static int wl12xx_update_sta_state(struct wl1271 *wl,
struct wl12xx_vif *wlvif,
struct ieee80211_sta *sta,
enum ieee80211_sta_state old_state,
enum ieee80211_sta_state new_state)
{
struct wl1271_station *wl_sta;
bool is_ap = wlvif->bss_type == BSS_TYPE_AP_BSS;
bool is_sta = wlvif->bss_type == BSS_TYPE_STA_BSS;
int ret;
wl_sta = (struct wl1271_station *)sta->drv_priv;
/* Add station (AP mode) */
if (is_ap &&
old_state == IEEE80211_STA_NOTEXIST &&
new_state == IEEE80211_STA_NONE) {
ret = wl12xx_sta_add(wl, wlvif, sta);
if (ret)
return ret;
wlcore_update_inconn_sta(wl, wlvif, wl_sta, true);
}
/* Remove station (AP mode) */
if (is_ap &&
old_state == IEEE80211_STA_NONE &&
new_state == IEEE80211_STA_NOTEXIST) {
/* must not fail */
wl12xx_sta_remove(wl, wlvif, sta);
wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
}
/* Authorize station (AP mode) */
if (is_ap &&
new_state == IEEE80211_STA_AUTHORIZED) {
ret = wl12xx_cmd_set_peer_state(wl, wlvif, wl_sta->hlid);
if (ret < 0)
return ret;
/* reconfigure rates */
ret = wl12xx_cmd_add_peer(wl, wlvif, sta, wl_sta->hlid);
if (ret < 0)
return ret;
ret = wl1271_acx_set_ht_capabilities(wl, &sta->deflink.ht_cap,
true,
wl_sta->hlid);
if (ret)
return ret;
wlcore_update_inconn_sta(wl, wlvif, wl_sta, false);
}
/* Authorize station */
if (is_sta &&
new_state == IEEE80211_STA_AUTHORIZED) {
set_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
ret = wl12xx_set_authorized(wl, wlvif);
if (ret)
return ret;
}
if (is_sta &&
old_state == IEEE80211_STA_AUTHORIZED &&
new_state == IEEE80211_STA_ASSOC) {
clear_bit(WLVIF_FLAG_STA_AUTHORIZED, &wlvif->flags);
clear_bit(WLVIF_FLAG_STA_STATE_SENT, &wlvif->flags);
}
/* save seq number on disassoc (suspend) */
if (is_sta &&
old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTH) {
wlcore_save_freed_pkts(wl, wlvif, wlvif->sta.hlid, sta);
wlvif->total_freed_pkts = 0;
}
/* restore seq number on assoc (resume) */
if (is_sta &&
old_state == IEEE80211_STA_AUTH &&
new_state == IEEE80211_STA_ASSOC) {
wlvif->total_freed_pkts = wl_sta->total_freed_pkts;
}
/* clear ROCs on failure or authorization */
if (is_sta &&
(new_state == IEEE80211_STA_AUTHORIZED ||
new_state == IEEE80211_STA_NOTEXIST)) {
if (test_bit(wlvif->role_id, wl->roc_map))
wl12xx_croc(wl, wlvif->role_id);
}
if (is_sta &&
old_state == IEEE80211_STA_NOTEXIST &&
new_state == IEEE80211_STA_NONE) {
if (find_first_bit(wl->roc_map,
WL12XX_MAX_ROLES) >= WL12XX_MAX_ROLES) {
WARN_ON(wlvif->role_id == WL12XX_INVALID_ROLE_ID);
wl12xx_roc(wl, wlvif, wlvif->role_id,
wlvif->band, wlvif->channel);
}
}
return 0;
}
static int wl12xx_op_sta_state(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
enum ieee80211_sta_state old_state,
enum ieee80211_sta_state new_state)
{
struct wl1271 *wl = hw->priv;
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
int ret;
wl1271_debug(DEBUG_MAC80211, "mac80211 sta %d state=%d->%d",
sta->aid, old_state, new_state);
mutex_lock(&wl->mutex);
if (unlikely(wl->state != WLCORE_STATE_ON)) {
ret = -EBUSY;
goto out;
}
ret = pm_runtime_resume_and_get(wl->dev);
if (ret < 0)
goto out;
ret = wl12xx_update_sta_state(wl, wlvif, sta, old_state, new_state);
pm_runtime_mark_last_busy(wl->dev);
pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
if (new_state < old_state)
return 0;
return ret;
}
static int wl1271_op_ampdu_action(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_ampdu_params *params)
{
struct wl1271 *wl = hw->priv;
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
int ret;
u8 hlid, *ba_bitmap;
struct ieee80211_sta *sta = params->sta;
enum ieee80211_ampdu_mlme_action action = params->action;
u16 tid = params->tid;
u16 *ssn = ¶ms->ssn;
wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu action %d tid %d", action,
tid);
/* sanity check - the fields in FW are only 8bits wide */
if (WARN_ON(tid > 0xFF))
return -ENOTSUPP;
mutex_lock(&wl->mutex);
if (unlikely(wl->state != WLCORE_STATE_ON)) {
ret = -EAGAIN;
goto out;
}
if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
hlid = wlvif->sta.hlid;
} else if (wlvif->bss_type == BSS_TYPE_AP_BSS) {
struct wl1271_station *wl_sta;
wl_sta = (struct wl1271_station *)sta->drv_priv;
hlid = wl_sta->hlid;
} else {
ret = -EINVAL;
goto out;
}
ba_bitmap = &wl->links[hlid].ba_bitmap;
ret = pm_runtime_resume_and_get(wl->dev);
if (ret < 0)
goto out;
wl1271_debug(DEBUG_MAC80211, "mac80211 ampdu: Rx tid %d action %d",
tid, action);
switch (action) {
case IEEE80211_AMPDU_RX_START:
if (!wlvif->ba_support || !wlvif->ba_allowed) {
ret = -ENOTSUPP;
break;
}
if (wl->ba_rx_session_count >= wl->ba_rx_session_count_max) {
ret = -EBUSY;
wl1271_debug(DEBUG_RX, "exceeded max RX BA sessions");
break;
}
if (*ba_bitmap & BIT(tid)) {
ret = -EINVAL;
wl1271_error("cannot enable RX BA session on active "
"tid: %d", tid);
break;
}
ret = wl12xx_acx_set_ba_receiver_session(wl, tid, *ssn, true,
hlid,
params->buf_size);
if (!ret) {
*ba_bitmap |= BIT(tid);
wl->ba_rx_session_count++;
}
break;
case IEEE80211_AMPDU_RX_STOP:
if (!(*ba_bitmap & BIT(tid))) {
/*
* this happens on reconfig - so only output a debug
* message for now, and don't fail the function.
*/
wl1271_debug(DEBUG_MAC80211,
"no active RX BA session on tid: %d",
tid);
ret = 0;
break;
}
ret = wl12xx_acx_set_ba_receiver_session(wl, tid, 0, false,
hlid, 0);
if (!ret) {
*ba_bitmap &= ~BIT(tid);
wl->ba_rx_session_count--;
}
break;
/*
* The BA initiator session management in FW independently.
* Falling break here on purpose for all TX APDU commands.
*/
case IEEE80211_AMPDU_TX_START:
case IEEE80211_AMPDU_TX_STOP_CONT:
case IEEE80211_AMPDU_TX_STOP_FLUSH:
case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
case IEEE80211_AMPDU_TX_OPERATIONAL:
ret = -EINVAL;
break;
default:
wl1271_error("Incorrect ampdu action id=%x\n", action);
ret = -EINVAL;
}
pm_runtime_mark_last_busy(wl->dev);
pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
return ret;
}
static int wl12xx_set_bitrate_mask(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
const struct cfg80211_bitrate_mask *mask)
{
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
struct wl1271 *wl = hw->priv;
int i, ret = 0;
wl1271_debug(DEBUG_MAC80211, "mac80211 set_bitrate_mask 0x%x 0x%x",
mask->control[NL80211_BAND_2GHZ].legacy,
mask->control[NL80211_BAND_5GHZ].legacy);
mutex_lock(&wl->mutex);
for (i = 0; i < WLCORE_NUM_BANDS; i++)
wlvif->bitrate_masks[i] =
wl1271_tx_enabled_rates_get(wl,
mask->control[i].legacy,
i);
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
if (wlvif->bss_type == BSS_TYPE_STA_BSS &&
!test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
ret = pm_runtime_resume_and_get(wl->dev);
if (ret < 0)
goto out;
wl1271_set_band_rate(wl, wlvif);
wlvif->basic_rate =
wl1271_tx_min_rate_get(wl, wlvif->basic_rate_set);
ret = wl1271_acx_sta_rate_policies(wl, wlvif);
pm_runtime_mark_last_busy(wl->dev);
pm_runtime_put_autosuspend(wl->dev);
}
out:
mutex_unlock(&wl->mutex);
return ret;
}
static void wl12xx_op_channel_switch(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_channel_switch *ch_switch)
{
struct wl1271 *wl = hw->priv;
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
int ret;
wl1271_debug(DEBUG_MAC80211, "mac80211 channel switch");
wl1271_tx_flush(wl);
mutex_lock(&wl->mutex);
if (unlikely(wl->state == WLCORE_STATE_OFF)) {
if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags))
ieee80211_chswitch_done(vif, false);
goto out;
} else if (unlikely(wl->state != WLCORE_STATE_ON)) {
goto out;
}
ret = pm_runtime_resume_and_get(wl->dev);
if (ret < 0)
goto out;
/* TODO: change mac80211 to pass vif as param */
if (test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags)) {
unsigned long delay_usec;
ret = wl->ops->channel_switch(wl, wlvif, ch_switch);
if (ret)
goto out_sleep;
set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
/* indicate failure 5 seconds after channel switch time */
delay_usec = ieee80211_tu_to_usec(wlvif->beacon_int) *
ch_switch->count;
ieee80211_queue_delayed_work(hw, &wlvif->channel_switch_work,
usecs_to_jiffies(delay_usec) +
msecs_to_jiffies(5000));
}
out_sleep:
pm_runtime_mark_last_busy(wl->dev);
pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
}
static const void *wlcore_get_beacon_ie(struct wl1271 *wl,
struct wl12xx_vif *wlvif,
u8 eid)
{
int ieoffset = offsetof(struct ieee80211_mgmt, u.beacon.variable);
struct sk_buff *beacon =
ieee80211_beacon_get(wl->hw, wl12xx_wlvif_to_vif(wlvif), 0);
if (!beacon)
return NULL;
return cfg80211_find_ie(eid,
beacon->data + ieoffset,
beacon->len - ieoffset);
}
static int wlcore_get_csa_count(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u8 *csa_count)
{
const u8 *ie;
const struct ieee80211_channel_sw_ie *ie_csa;
ie = wlcore_get_beacon_ie(wl, wlvif, WLAN_EID_CHANNEL_SWITCH);
if (!ie)
return -EINVAL;
ie_csa = (struct ieee80211_channel_sw_ie *)&ie[2];
*csa_count = ie_csa->count;
return 0;
}
static void wlcore_op_channel_switch_beacon(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct cfg80211_chan_def *chandef)
{
struct wl1271 *wl = hw->priv;
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
struct ieee80211_channel_switch ch_switch = {
.block_tx = true,
.chandef = *chandef,
};
int ret;
wl1271_debug(DEBUG_MAC80211,
"mac80211 channel switch beacon (role %d)",
wlvif->role_id);
ret = wlcore_get_csa_count(wl, wlvif, &ch_switch.count);
if (ret < 0) {
wl1271_error("error getting beacon (for CSA counter)");
return;
}
mutex_lock(&wl->mutex);
if (unlikely(wl->state != WLCORE_STATE_ON)) {
ret = -EBUSY;
goto out;
}
ret = pm_runtime_resume_and_get(wl->dev);
if (ret < 0)
goto out;
ret = wl->ops->channel_switch(wl, wlvif, &ch_switch);
if (ret)
goto out_sleep;
set_bit(WLVIF_FLAG_CS_PROGRESS, &wlvif->flags);
out_sleep:
pm_runtime_mark_last_busy(wl->dev);
pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
}
static void wlcore_op_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
u32 queues, bool drop)
{
struct wl1271 *wl = hw->priv;
wl1271_tx_flush(wl);
}
static int wlcore_op_remain_on_channel(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_channel *chan,
int duration,
enum ieee80211_roc_type type)
{
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
struct wl1271 *wl = hw->priv;
int channel, active_roc, ret = 0;
channel = ieee80211_frequency_to_channel(chan->center_freq);
wl1271_debug(DEBUG_MAC80211, "mac80211 roc %d (%d)",
channel, wlvif->role_id);
mutex_lock(&wl->mutex);
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
/* return EBUSY if we can't ROC right now */
active_roc = find_first_bit(wl->roc_map, WL12XX_MAX_ROLES);
if (wl->roc_vif || active_roc < WL12XX_MAX_ROLES) {
wl1271_warning("active roc on role %d", active_roc);
ret = -EBUSY;
goto out;
}
ret = pm_runtime_resume_and_get(wl->dev);
if (ret < 0)
goto out;
ret = wl12xx_start_dev(wl, wlvif, chan->band, channel);
if (ret < 0)
goto out_sleep;
wl->roc_vif = vif;
ieee80211_queue_delayed_work(hw, &wl->roc_complete_work,
msecs_to_jiffies(duration));
out_sleep:
pm_runtime_mark_last_busy(wl->dev);
pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
return ret;
}
static int __wlcore_roc_completed(struct wl1271 *wl)
{
struct wl12xx_vif *wlvif;
int ret;
/* already completed */
if (unlikely(!wl->roc_vif))
return 0;
wlvif = wl12xx_vif_to_data(wl->roc_vif);
if (!test_bit(WLVIF_FLAG_INITIALIZED, &wlvif->flags))
return -EBUSY;
ret = wl12xx_stop_dev(wl, wlvif);
if (ret < 0)
return ret;
wl->roc_vif = NULL;
return 0;
}
static int wlcore_roc_completed(struct wl1271 *wl)
{
int ret;
wl1271_debug(DEBUG_MAC80211, "roc complete");
mutex_lock(&wl->mutex);
if (unlikely(wl->state != WLCORE_STATE_ON)) {
ret = -EBUSY;
goto out;
}
ret = pm_runtime_resume_and_get(wl->dev);
if (ret < 0)
goto out;
ret = __wlcore_roc_completed(wl);
pm_runtime_mark_last_busy(wl->dev);
pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
return ret;
}
static void wlcore_roc_complete_work(struct work_struct *work)
{
struct delayed_work *dwork;
struct wl1271 *wl;
int ret;
dwork = to_delayed_work(work);
wl = container_of(dwork, struct wl1271, roc_complete_work);
ret = wlcore_roc_completed(wl);
if (!ret)
ieee80211_remain_on_channel_expired(wl->hw);
}
static int wlcore_op_cancel_remain_on_channel(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct wl1271 *wl = hw->priv;
wl1271_debug(DEBUG_MAC80211, "mac80211 croc");
/* TODO: per-vif */
wl1271_tx_flush(wl);
/*
* we can't just flush_work here, because it might deadlock
* (as we might get called from the same workqueue)
*/
cancel_delayed_work_sync(&wl->roc_complete_work);
wlcore_roc_completed(wl);
return 0;
}
static void wlcore_op_sta_rc_update(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
u32 changed)
{
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
wl1271_debug(DEBUG_MAC80211, "mac80211 sta_rc_update");
if (!(changed & IEEE80211_RC_BW_CHANGED))
return;
/* this callback is atomic, so schedule a new work */
wlvif->rc_update_bw = sta->deflink.bandwidth;
memcpy(&wlvif->rc_ht_cap, &sta->deflink.ht_cap,
sizeof(sta->deflink.ht_cap));
ieee80211_queue_work(hw, &wlvif->rc_update_work);
}
static void wlcore_op_sta_statistics(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct station_info *sinfo)
{
struct wl1271 *wl = hw->priv;
struct wl12xx_vif *wlvif = wl12xx_vif_to_data(vif);
s8 rssi_dbm;
int ret;
wl1271_debug(DEBUG_MAC80211, "mac80211 get_rssi");
mutex_lock(&wl->mutex);
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
ret = pm_runtime_resume_and_get(wl->dev);
if (ret < 0)
goto out_sleep;
ret = wlcore_acx_average_rssi(wl, wlvif, &rssi_dbm);
if (ret < 0)
goto out_sleep;
sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL);
sinfo->signal = rssi_dbm;
out_sleep:
pm_runtime_mark_last_busy(wl->dev);
pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
}
static u32 wlcore_op_get_expected_throughput(struct ieee80211_hw *hw,
struct ieee80211_sta *sta)
{
struct wl1271_station *wl_sta = (struct wl1271_station *)sta->drv_priv;
struct wl1271 *wl = hw->priv;
u8 hlid = wl_sta->hlid;
/* return in units of Kbps */
return (wl->links[hlid].fw_rate_mbps * 1000);
}
static bool wl1271_tx_frames_pending(struct ieee80211_hw *hw)
{
struct wl1271 *wl = hw->priv;
bool ret = false;
mutex_lock(&wl->mutex);
if (unlikely(wl->state != WLCORE_STATE_ON))
goto out;
/* packets are considered pending if in the TX queue or the FW */
ret = (wl1271_tx_total_queue_count(wl) > 0) || (wl->tx_frames_cnt > 0);
out:
mutex_unlock(&wl->mutex);
return ret;
}
/* can't be const, mac80211 writes to this */
static struct ieee80211_rate wl1271_rates[] = {
{ .bitrate = 10,
.hw_value = CONF_HW_BIT_RATE_1MBPS,
.hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
{ .bitrate = 20,
.hw_value = CONF_HW_BIT_RATE_2MBPS,
.hw_value_short = CONF_HW_BIT_RATE_2MBPS,
.flags = IEEE80211_RATE_SHORT_PREAMBLE },
{ .bitrate = 55,
.hw_value = CONF_HW_BIT_RATE_5_5MBPS,
.hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
.flags = IEEE80211_RATE_SHORT_PREAMBLE },
{ .bitrate = 110,
.hw_value = CONF_HW_BIT_RATE_11MBPS,
.hw_value_short = CONF_HW_BIT_RATE_11MBPS,
.flags = IEEE80211_RATE_SHORT_PREAMBLE },
{ .bitrate = 60,
.hw_value = CONF_HW_BIT_RATE_6MBPS,
.hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
{ .bitrate = 90,
.hw_value = CONF_HW_BIT_RATE_9MBPS,
.hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
{ .bitrate = 120,
.hw_value = CONF_HW_BIT_RATE_12MBPS,
.hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
{ .bitrate = 180,
.hw_value = CONF_HW_BIT_RATE_18MBPS,
.hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
{ .bitrate = 240,
.hw_value = CONF_HW_BIT_RATE_24MBPS,
.hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
{ .bitrate = 360,
.hw_value = CONF_HW_BIT_RATE_36MBPS,
.hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
{ .bitrate = 480,
.hw_value = CONF_HW_BIT_RATE_48MBPS,
.hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
{ .bitrate = 540,
.hw_value = CONF_HW_BIT_RATE_54MBPS,
.hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
};
/* can't be const, mac80211 writes to this */
static struct ieee80211_channel wl1271_channels[] = {
{ .hw_value = 1, .center_freq = 2412, .max_power = WLCORE_MAX_TXPWR },
{ .hw_value = 2, .center_freq = 2417, .max_power = WLCORE_MAX_TXPWR },
{ .hw_value = 3, .center_freq = 2422, .max_power = WLCORE_MAX_TXPWR },
{ .hw_value = 4, .center_freq = 2427, .max_power = WLCORE_MAX_TXPWR },
{ .hw_value = 5, .center_freq = 2432, .max_power = WLCORE_MAX_TXPWR },
{ .hw_value = 6, .center_freq = 2437, .max_power = WLCORE_MAX_TXPWR },
{ .hw_value = 7, .center_freq = 2442, .max_power = WLCORE_MAX_TXPWR },
{ .hw_value = 8, .center_freq = 2447, .max_power = WLCORE_MAX_TXPWR },
{ .hw_value = 9, .center_freq = 2452, .max_power = WLCORE_MAX_TXPWR },
{ .hw_value = 10, .center_freq = 2457, .max_power = WLCORE_MAX_TXPWR },
{ .hw_value = 11, .center_freq = 2462, .max_power = WLCORE_MAX_TXPWR },
{ .hw_value = 12, .center_freq = 2467, .max_power = WLCORE_MAX_TXPWR },
{ .hw_value = 13, .center_freq = 2472, .max_power = WLCORE_MAX_TXPWR },
{ .hw_value = 14, .center_freq = 2484, .max_power = WLCORE_MAX_TXPWR },
};
/* can't be const, mac80211 writes to this */
static struct ieee80211_supported_band wl1271_band_2ghz = {
.channels = wl1271_channels,
.n_channels = ARRAY_SIZE(wl1271_channels),
.bitrates = wl1271_rates,
.n_bitrates = ARRAY_SIZE(wl1271_rates),
};
/* 5 GHz data rates for WL1273 */
static struct ieee80211_rate wl1271_rates_5ghz[] = {
{ .bitrate = 60,
.hw_value = CONF_HW_BIT_RATE_6MBPS,
.hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
{ .bitrate = 90,
.hw_value = CONF_HW_BIT_RATE_9MBPS,
.hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
{ .bitrate = 120,
.hw_value = CONF_HW_BIT_RATE_12MBPS,
.hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
{ .bitrate = 180,
.hw_value = CONF_HW_BIT_RATE_18MBPS,
.hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
{ .bitrate = 240,
.hw_value = CONF_HW_BIT_RATE_24MBPS,
.hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
{ .bitrate = 360,
.hw_value = CONF_HW_BIT_RATE_36MBPS,
.hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
{ .bitrate = 480,
.hw_value = CONF_HW_BIT_RATE_48MBPS,
.hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
{ .bitrate = 540,
.hw_value = CONF_HW_BIT_RATE_54MBPS,
.hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
};
/* 5 GHz band channels for WL1273 */
static struct ieee80211_channel wl1271_channels_5ghz[] = {
{ .hw_value = 8, .center_freq = 5040, .max_power = WLCORE_MAX_TXPWR },
{ .hw_value = 12, .center_freq = 5060, .max_power = WLCORE_MAX_TXPWR },
{ .hw_value = 16, .center_freq = 5080, .max_power = WLCORE_MAX_TXPWR },
{ .hw_value = 34, .center_freq = 5170, .max_power = WLCORE_MAX_TXPWR },
{ .hw_value = 36, .center_freq = 5180, .max_power = WLCORE_MAX_TXPWR },
{ .hw_value = 38, .center_freq = 5190, .max_power = WLCORE_MAX_TXPWR },
{ .hw_value = 40, .center_freq = 5200, .max_power = WLCORE_MAX_TXPWR },
{ .hw_value = 42, .center_freq = 5210, .max_power = WLCORE_MAX_TXPWR },
{ .hw_value = 44, .center_freq = 5220, .max_power = WLCORE_MAX_TXPWR },
{ .hw_value = 46, .center_freq = 5230, .max_power = WLCORE_MAX_TXPWR },
{ .hw_value = 48, .center_freq = 5240, .max_power = WLCORE_MAX_TXPWR },
{ .hw_value = 52, .center_freq = 5260, .max_power = WLCORE_MAX_TXPWR },
{ .hw_value = 56, .center_freq = 5280, .max_power = WLCORE_MAX_TXPWR },
{ .hw_value = 60, .center_freq = 5300, .max_power = WLCORE_MAX_TXPWR },
{ .hw_value = 64, .center_freq = 5320, .max_power = WLCORE_MAX_TXPWR },
{ .hw_value = 100, .center_freq = 5500, .max_power = WLCORE_MAX_TXPWR },
{ .hw_value = 104, .center_freq = 5520, .max_power = WLCORE_MAX_TXPWR },
{ .hw_value = 108, .center_freq = 5540, .max_power = WLCORE_MAX_TXPWR },
{ .hw_value = 112, .center_freq = 5560, .max_power = WLCORE_MAX_TXPWR },
{ .hw_value = 116, .center_freq = 5580, .max_power = WLCORE_MAX_TXPWR },
{ .hw_value = 120, .center_freq = 5600, .max_power = WLCORE_MAX_TXPWR },
{ .hw_value = 124, .center_freq = 5620, .max_power = WLCORE_MAX_TXPWR },
{ .hw_value = 128, .center_freq = 5640, .max_power = WLCORE_MAX_TXPWR },
{ .hw_value = 132, .center_freq = 5660, .max_power = WLCORE_MAX_TXPWR },
{ .hw_value = 136, .center_freq = 5680, .max_power = WLCORE_MAX_TXPWR },
{ .hw_value = 140, .center_freq = 5700, .max_power = WLCORE_MAX_TXPWR },
{ .hw_value = 149, .center_freq = 5745, .max_power = WLCORE_MAX_TXPWR },
{ .hw_value = 153, .center_freq = 5765, .max_power = WLCORE_MAX_TXPWR },
{ .hw_value = 157, .center_freq = 5785, .max_power = WLCORE_MAX_TXPWR },
{ .hw_value = 161, .center_freq = 5805, .max_power = WLCORE_MAX_TXPWR },
{ .hw_value = 165, .center_freq = 5825, .max_power = WLCORE_MAX_TXPWR },
};
static struct ieee80211_supported_band wl1271_band_5ghz = {
.channels = wl1271_channels_5ghz,
.n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
.bitrates = wl1271_rates_5ghz,
.n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
};
static const struct ieee80211_ops wl1271_ops = {
.start = wl1271_op_start,
.stop = wlcore_op_stop,
.add_interface = wl1271_op_add_interface,
.remove_interface = wl1271_op_remove_interface,
.change_interface = wl12xx_op_change_interface,
#ifdef CONFIG_PM
.suspend = wl1271_op_suspend,
.resume = wl1271_op_resume,
#endif
.config = wl1271_op_config,
.prepare_multicast = wl1271_op_prepare_multicast,
.configure_filter = wl1271_op_configure_filter,
.tx = wl1271_op_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.set_key = wlcore_op_set_key,
.hw_scan = wl1271_op_hw_scan,
.cancel_hw_scan = wl1271_op_cancel_hw_scan,
.sched_scan_start = wl1271_op_sched_scan_start,
.sched_scan_stop = wl1271_op_sched_scan_stop,
.bss_info_changed = wl1271_op_bss_info_changed,
.set_frag_threshold = wl1271_op_set_frag_threshold,
.set_rts_threshold = wl1271_op_set_rts_threshold,
.conf_tx = wl1271_op_conf_tx,
.get_tsf = wl1271_op_get_tsf,
.get_survey = wl1271_op_get_survey,
.sta_state = wl12xx_op_sta_state,
.ampdu_action = wl1271_op_ampdu_action,
.tx_frames_pending = wl1271_tx_frames_pending,
.set_bitrate_mask = wl12xx_set_bitrate_mask,
.set_default_unicast_key = wl1271_op_set_default_key_idx,
.channel_switch = wl12xx_op_channel_switch,
.channel_switch_beacon = wlcore_op_channel_switch_beacon,
.flush = wlcore_op_flush,
.remain_on_channel = wlcore_op_remain_on_channel,
.cancel_remain_on_channel = wlcore_op_cancel_remain_on_channel,
.add_chanctx = wlcore_op_add_chanctx,
.remove_chanctx = wlcore_op_remove_chanctx,
.change_chanctx = wlcore_op_change_chanctx,
.assign_vif_chanctx = wlcore_op_assign_vif_chanctx,
.unassign_vif_chanctx = wlcore_op_unassign_vif_chanctx,
.switch_vif_chanctx = wlcore_op_switch_vif_chanctx,
.sta_rc_update = wlcore_op_sta_rc_update,
.sta_statistics = wlcore_op_sta_statistics,
.get_expected_throughput = wlcore_op_get_expected_throughput,
CFG80211_TESTMODE_CMD(wl1271_tm_cmd)
};
u8 wlcore_rate_to_idx(struct wl1271 *wl, u8 rate, enum nl80211_band band)
{
u8 idx;
BUG_ON(band >= 2);
if (unlikely(rate >= wl->hw_tx_rate_tbl_size)) {
wl1271_error("Illegal RX rate from HW: %d", rate);
return 0;
}
idx = wl->band_rate_to_idx[band][rate];
if (unlikely(idx == CONF_HW_RXTX_RATE_UNSUPPORTED)) {
wl1271_error("Unsupported RX rate from HW: %d", rate);
return 0;
}
return idx;
}
static void wl12xx_derive_mac_addresses(struct wl1271 *wl, u32 oui, u32 nic)
{
int i;
wl1271_debug(DEBUG_PROBE, "base address: oui %06x nic %06x",
oui, nic);
if (nic + WLCORE_NUM_MAC_ADDRESSES - wl->num_mac_addr > 0xffffff)
wl1271_warning("NIC part of the MAC address wraps around!");
for (i = 0; i < wl->num_mac_addr; i++) {
wl->addresses[i].addr[0] = (u8)(oui >> 16);
wl->addresses[i].addr[1] = (u8)(oui >> 8);
wl->addresses[i].addr[2] = (u8) oui;
wl->addresses[i].addr[3] = (u8)(nic >> 16);
wl->addresses[i].addr[4] = (u8)(nic >> 8);
wl->addresses[i].addr[5] = (u8) nic;
nic++;
}
/* we may be one address short at the most */
WARN_ON(wl->num_mac_addr + 1 < WLCORE_NUM_MAC_ADDRESSES);
/*
* turn on the LAA bit in the first address and use it as
* the last address.
*/
if (wl->num_mac_addr < WLCORE_NUM_MAC_ADDRESSES) {
int idx = WLCORE_NUM_MAC_ADDRESSES - 1;
memcpy(&wl->addresses[idx], &wl->addresses[0],
sizeof(wl->addresses[0]));
/* LAA bit */
wl->addresses[idx].addr[0] |= BIT(1);
}
wl->hw->wiphy->n_addresses = WLCORE_NUM_MAC_ADDRESSES;
wl->hw->wiphy->addresses = wl->addresses;
}
static int wl12xx_get_hw_info(struct wl1271 *wl)
{
int ret;
ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &wl->chip.id);
if (ret < 0)
goto out;
wl->fuse_oui_addr = 0;
wl->fuse_nic_addr = 0;
ret = wl->ops->get_pg_ver(wl, &wl->hw_pg_ver);
if (ret < 0)
goto out;
if (wl->ops->get_mac)
ret = wl->ops->get_mac(wl);
out:
return ret;
}
static int wl1271_register_hw(struct wl1271 *wl)
{
int ret;
u32 oui_addr = 0, nic_addr = 0;
struct platform_device *pdev = wl->pdev;
struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
if (wl->mac80211_registered)
return 0;
if (wl->nvs_len >= 12) {
/* NOTE: The wl->nvs->nvs element must be first, in
* order to simplify the casting, we assume it is at
* the beginning of the wl->nvs structure.
*/
u8 *nvs_ptr = (u8 *)wl->nvs;
oui_addr =
(nvs_ptr[11] << 16) + (nvs_ptr[10] << 8) + nvs_ptr[6];
nic_addr =
(nvs_ptr[5] << 16) + (nvs_ptr[4] << 8) + nvs_ptr[3];
}
/* if the MAC address is zeroed in the NVS derive from fuse */
if (oui_addr == 0 && nic_addr == 0) {
oui_addr = wl->fuse_oui_addr;
/* fuse has the BD_ADDR, the WLAN addresses are the next two */
nic_addr = wl->fuse_nic_addr + 1;
}
if (oui_addr == 0xdeadbe && nic_addr == 0xef0000) {
wl1271_warning("Detected unconfigured mac address in nvs, derive from fuse instead.");
if (!strcmp(pdev_data->family->name, "wl18xx")) {
wl1271_warning("This default nvs file can be removed from the file system");
} else {
wl1271_warning("Your device performance is not optimized.");
wl1271_warning("Please use the calibrator tool to configure your device.");
}
if (wl->fuse_oui_addr == 0 && wl->fuse_nic_addr == 0) {
wl1271_warning("Fuse mac address is zero. using random mac");
/* Use TI oui and a random nic */
oui_addr = WLCORE_TI_OUI_ADDRESS;
nic_addr = get_random_u32();
} else {
oui_addr = wl->fuse_oui_addr;
/* fuse has the BD_ADDR, the WLAN addresses are the next two */
nic_addr = wl->fuse_nic_addr + 1;
}
}
wl12xx_derive_mac_addresses(wl, oui_addr, nic_addr);
ret = ieee80211_register_hw(wl->hw);
if (ret < 0) {
wl1271_error("unable to register mac80211 hw: %d", ret);
goto out;
}
wl->mac80211_registered = true;
wl1271_debugfs_init(wl);
wl1271_notice("loaded");
out:
return ret;
}
static void wl1271_unregister_hw(struct wl1271 *wl)
{
if (wl->plt)
wl1271_plt_stop(wl);
ieee80211_unregister_hw(wl->hw);
wl->mac80211_registered = false;
}
static int wl1271_init_ieee80211(struct wl1271 *wl)
{
int i;
static const u32 cipher_suites[] = {
WLAN_CIPHER_SUITE_WEP40,
WLAN_CIPHER_SUITE_WEP104,
WLAN_CIPHER_SUITE_TKIP,
WLAN_CIPHER_SUITE_CCMP,
WL1271_CIPHER_SUITE_GEM,
};
/* The tx descriptor buffer */
wl->hw->extra_tx_headroom = sizeof(struct wl1271_tx_hw_descr);
if (wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE)
wl->hw->extra_tx_headroom += WL1271_EXTRA_SPACE_TKIP;
/* unit us */
/* FIXME: find a proper value */
wl->hw->max_listen_interval = wl->conf.conn.max_listen_interval;
ieee80211_hw_set(wl->hw, SUPPORT_FAST_XMIT);
ieee80211_hw_set(wl->hw, CHANCTX_STA_CSA);
ieee80211_hw_set(wl->hw, SUPPORTS_PER_STA_GTK);
ieee80211_hw_set(wl->hw, QUEUE_CONTROL);
ieee80211_hw_set(wl->hw, TX_AMPDU_SETUP_IN_HW);
ieee80211_hw_set(wl->hw, AMPDU_AGGREGATION);
ieee80211_hw_set(wl->hw, AP_LINK_PS);
ieee80211_hw_set(wl->hw, SPECTRUM_MGMT);
ieee80211_hw_set(wl->hw, REPORTS_TX_ACK_STATUS);
ieee80211_hw_set(wl->hw, CONNECTION_MONITOR);
ieee80211_hw_set(wl->hw, HAS_RATE_CONTROL);
ieee80211_hw_set(wl->hw, SUPPORTS_DYNAMIC_PS);
ieee80211_hw_set(wl->hw, SIGNAL_DBM);
ieee80211_hw_set(wl->hw, SUPPORTS_PS);
ieee80211_hw_set(wl->hw, SUPPORTS_TX_FRAG);
wl->hw->wiphy->cipher_suites = cipher_suites;
wl->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_P2P_DEVICE) |
BIT(NL80211_IFTYPE_P2P_CLIENT) |
#ifdef CONFIG_MAC80211_MESH
BIT(NL80211_IFTYPE_MESH_POINT) |
#endif
BIT(NL80211_IFTYPE_P2P_GO);
wl->hw->wiphy->max_scan_ssids = 1;
wl->hw->wiphy->max_sched_scan_ssids = 16;
wl->hw->wiphy->max_match_sets = 16;
/*
* Maximum length of elements in scanning probe request templates
* should be the maximum length possible for a template, without
* the IEEE80211 header of the template
*/
wl->hw->wiphy->max_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
sizeof(struct ieee80211_header);
wl->hw->wiphy->max_sched_scan_reqs = 1;
wl->hw->wiphy->max_sched_scan_ie_len = WL1271_CMD_TEMPL_MAX_SIZE -
sizeof(struct ieee80211_header);
wl->hw->wiphy->max_remain_on_channel_duration = 30000;
wl->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD |
WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL |
WIPHY_FLAG_HAS_CHANNEL_SWITCH |
WIPHY_FLAG_IBSS_RSN;
wl->hw->wiphy->features |= NL80211_FEATURE_AP_SCAN;
/* make sure all our channels fit in the scanned_ch bitmask */
BUILD_BUG_ON(ARRAY_SIZE(wl1271_channels) +
ARRAY_SIZE(wl1271_channels_5ghz) >
WL1271_MAX_CHANNELS);
/*
* clear channel flags from the previous usage
* and restore max_power & max_antenna_gain values.
*/
for (i = 0; i < ARRAY_SIZE(wl1271_channels); i++) {
wl1271_band_2ghz.channels[i].flags = 0;
wl1271_band_2ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
wl1271_band_2ghz.channels[i].max_antenna_gain = 0;
}
for (i = 0; i < ARRAY_SIZE(wl1271_channels_5ghz); i++) {
wl1271_band_5ghz.channels[i].flags = 0;
wl1271_band_5ghz.channels[i].max_power = WLCORE_MAX_TXPWR;
wl1271_band_5ghz.channels[i].max_antenna_gain = 0;
}
/*
* We keep local copies of the band structs because we need to
* modify them on a per-device basis.
*/
memcpy(&wl->bands[NL80211_BAND_2GHZ], &wl1271_band_2ghz,
sizeof(wl1271_band_2ghz));
memcpy(&wl->bands[NL80211_BAND_2GHZ].ht_cap,
&wl->ht_cap[NL80211_BAND_2GHZ],
sizeof(*wl->ht_cap));
memcpy(&wl->bands[NL80211_BAND_5GHZ], &wl1271_band_5ghz,
sizeof(wl1271_band_5ghz));
memcpy(&wl->bands[NL80211_BAND_5GHZ].ht_cap,
&wl->ht_cap[NL80211_BAND_5GHZ],
sizeof(*wl->ht_cap));
wl->hw->wiphy->bands[NL80211_BAND_2GHZ] =
&wl->bands[NL80211_BAND_2GHZ];
wl->hw->wiphy->bands[NL80211_BAND_5GHZ] =
&wl->bands[NL80211_BAND_5GHZ];
/*
* allow 4 queues per mac address we support +
* 1 cab queue per mac + one global offchannel Tx queue
*/
wl->hw->queues = (NUM_TX_QUEUES + 1) * WLCORE_NUM_MAC_ADDRESSES + 1;
/* the last queue is the offchannel queue */
wl->hw->offchannel_tx_hw_queue = wl->hw->queues - 1;
wl->hw->max_rates = 1;
wl->hw->wiphy->reg_notifier = wl1271_reg_notify;
/* the FW answers probe-requests in AP-mode */
wl->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
wl->hw->wiphy->probe_resp_offload =
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
/* allowed interface combinations */
wl->hw->wiphy->iface_combinations = wl->iface_combinations;
wl->hw->wiphy->n_iface_combinations = wl->n_iface_combinations;
/* register vendor commands */
wlcore_set_vendor_commands(wl->hw->wiphy);
SET_IEEE80211_DEV(wl->hw, wl->dev);
wl->hw->sta_data_size = sizeof(struct wl1271_station);
wl->hw->vif_data_size = sizeof(struct wl12xx_vif);
wl->hw->max_rx_aggregation_subframes = wl->conf.ht.rx_ba_win_size;
return 0;
}
struct ieee80211_hw *wlcore_alloc_hw(size_t priv_size, u32 aggr_buf_size,
u32 mbox_size)
{
struct ieee80211_hw *hw;
struct wl1271 *wl;
int i, j, ret;
unsigned int order;
hw = ieee80211_alloc_hw(sizeof(*wl), &wl1271_ops);
if (!hw) {
wl1271_error("could not alloc ieee80211_hw");
ret = -ENOMEM;
goto err_hw_alloc;
}
wl = hw->priv;
memset(wl, 0, sizeof(*wl));
wl->priv = kzalloc(priv_size, GFP_KERNEL);
if (!wl->priv) {
wl1271_error("could not alloc wl priv");
ret = -ENOMEM;
goto err_priv_alloc;
}
INIT_LIST_HEAD(&wl->wlvif_list);
wl->hw = hw;
/*
* wl->num_links is not configured yet, so just use WLCORE_MAX_LINKS.
* we don't allocate any additional resource here, so that's fine.
*/
for (i = 0; i < NUM_TX_QUEUES; i++)
for (j = 0; j < WLCORE_MAX_LINKS; j++)
skb_queue_head_init(&wl->links[j].tx_queue[i]);
skb_queue_head_init(&wl->deferred_rx_queue);
skb_queue_head_init(&wl->deferred_tx_queue);
INIT_WORK(&wl->netstack_work, wl1271_netstack_work);
INIT_WORK(&wl->tx_work, wl1271_tx_work);
INIT_WORK(&wl->recovery_work, wl1271_recovery_work);
INIT_DELAYED_WORK(&wl->scan_complete_work, wl1271_scan_complete_work);
INIT_DELAYED_WORK(&wl->roc_complete_work, wlcore_roc_complete_work);
INIT_DELAYED_WORK(&wl->tx_watchdog_work, wl12xx_tx_watchdog_work);
wl->freezable_wq = create_freezable_workqueue("wl12xx_wq");
if (!wl->freezable_wq) {
ret = -ENOMEM;
goto err_hw;
}
wl->channel = 0;
wl->rx_counter = 0;
wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
wl->band = NL80211_BAND_2GHZ;
wl->channel_type = NL80211_CHAN_NO_HT;
wl->flags = 0;
wl->sg_enabled = true;
wl->sleep_auth = WL1271_PSM_ILLEGAL;
wl->recovery_count = 0;
wl->hw_pg_ver = -1;
wl->ap_ps_map = 0;
wl->ap_fw_ps_map = 0;
wl->quirks = 0;
wl->system_hlid = WL12XX_SYSTEM_HLID;
wl->active_sta_count = 0;
wl->active_link_count = 0;
wl->fwlog_size = 0;
/* The system link is always allocated */
__set_bit(WL12XX_SYSTEM_HLID, wl->links_map);
memset(wl->tx_frames_map, 0, sizeof(wl->tx_frames_map));
for (i = 0; i < wl->num_tx_desc; i++)
wl->tx_frames[i] = NULL;
spin_lock_init(&wl->wl_lock);
wl->state = WLCORE_STATE_OFF;
wl->fw_type = WL12XX_FW_TYPE_NONE;
mutex_init(&wl->mutex);
mutex_init(&wl->flush_mutex);
init_completion(&wl->nvs_loading_complete);
order = get_order(aggr_buf_size);
wl->aggr_buf = (u8 *)__get_free_pages(GFP_KERNEL, order);
if (!wl->aggr_buf) {
ret = -ENOMEM;
goto err_wq;
}
wl->aggr_buf_size = aggr_buf_size;
wl->dummy_packet = wl12xx_alloc_dummy_packet(wl);
if (!wl->dummy_packet) {
ret = -ENOMEM;
goto err_aggr;
}
/* Allocate one page for the FW log */
wl->fwlog = (u8 *)get_zeroed_page(GFP_KERNEL);
if (!wl->fwlog) {
ret = -ENOMEM;
goto err_dummy_packet;
}
wl->mbox_size = mbox_size;
wl->mbox = kmalloc(wl->mbox_size, GFP_KERNEL | GFP_DMA);
if (!wl->mbox) {
ret = -ENOMEM;
goto err_fwlog;
}
wl->buffer_32 = kmalloc(sizeof(*wl->buffer_32), GFP_KERNEL);
if (!wl->buffer_32) {
ret = -ENOMEM;
goto err_mbox;
}
return hw;
err_mbox:
kfree(wl->mbox);
err_fwlog:
free_page((unsigned long)wl->fwlog);
err_dummy_packet:
dev_kfree_skb(wl->dummy_packet);
err_aggr:
free_pages((unsigned long)wl->aggr_buf, order);
err_wq:
destroy_workqueue(wl->freezable_wq);
err_hw:
wl1271_debugfs_exit(wl);
kfree(wl->priv);
err_priv_alloc:
ieee80211_free_hw(hw);
err_hw_alloc:
return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(wlcore_alloc_hw);
int wlcore_free_hw(struct wl1271 *wl)
{
/* Unblock any fwlog readers */
mutex_lock(&wl->mutex);
wl->fwlog_size = -1;
mutex_unlock(&wl->mutex);
wlcore_sysfs_free(wl);
kfree(wl->buffer_32);
kfree(wl->mbox);
free_page((unsigned long)wl->fwlog);
dev_kfree_skb(wl->dummy_packet);
free_pages((unsigned long)wl->aggr_buf, get_order(wl->aggr_buf_size));
wl1271_debugfs_exit(wl);
vfree(wl->fw);
wl->fw = NULL;
wl->fw_type = WL12XX_FW_TYPE_NONE;
kfree(wl->nvs);
wl->nvs = NULL;
kfree(wl->raw_fw_status);
kfree(wl->fw_status);
kfree(wl->tx_res_if);
destroy_workqueue(wl->freezable_wq);
kfree(wl->priv);
ieee80211_free_hw(wl->hw);
return 0;
}
EXPORT_SYMBOL_GPL(wlcore_free_hw);
#ifdef CONFIG_PM
static const struct wiphy_wowlan_support wlcore_wowlan_support = {
.flags = WIPHY_WOWLAN_ANY,
.n_patterns = WL1271_MAX_RX_FILTERS,
.pattern_min_len = 1,
.pattern_max_len = WL1271_RX_FILTER_MAX_PATTERN_SIZE,
};
#endif
static irqreturn_t wlcore_hardirq(int irq, void *cookie)
{
return IRQ_WAKE_THREAD;
}
static void wlcore_nvs_cb(const struct firmware *fw, void *context)
{
struct wl1271 *wl = context;
struct platform_device *pdev = wl->pdev;
struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
struct resource *res;
int ret;
irq_handler_t hardirq_fn = NULL;
if (fw) {
wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
if (!wl->nvs) {
wl1271_error("Could not allocate nvs data");
goto out;
}
wl->nvs_len = fw->size;
} else if (pdev_data->family->nvs_name) {
wl1271_debug(DEBUG_BOOT, "Could not get nvs file %s",
pdev_data->family->nvs_name);
wl->nvs = NULL;
wl->nvs_len = 0;
} else {
wl->nvs = NULL;
wl->nvs_len = 0;
}
ret = wl->ops->setup(wl);
if (ret < 0)
goto out_free_nvs;
BUG_ON(wl->num_tx_desc > WLCORE_MAX_TX_DESCRIPTORS);
/* adjust some runtime configuration parameters */
wlcore_adjust_conf(wl);
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (!res) {
wl1271_error("Could not get IRQ resource");
goto out_free_nvs;
}
wl->irq = res->start;
wl->irq_flags = res->flags & IRQF_TRIGGER_MASK;
wl->if_ops = pdev_data->if_ops;
if (wl->irq_flags & (IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING))
hardirq_fn = wlcore_hardirq;
else
wl->irq_flags |= IRQF_ONESHOT;
ret = wl12xx_set_power_on(wl);
if (ret < 0)
goto out_free_nvs;
ret = wl12xx_get_hw_info(wl);
if (ret < 0) {
wl1271_error("couldn't get hw info");
wl1271_power_off(wl);
goto out_free_nvs;
}
ret = request_threaded_irq(wl->irq, hardirq_fn, wlcore_irq,
wl->irq_flags, pdev->name, wl);
if (ret < 0) {
wl1271_error("interrupt configuration failed");
wl1271_power_off(wl);
goto out_free_nvs;
}
#ifdef CONFIG_PM
device_init_wakeup(wl->dev, true);
ret = enable_irq_wake(wl->irq);
if (!ret) {
wl->irq_wake_enabled = true;
if (pdev_data->pwr_in_suspend)
wl->hw->wiphy->wowlan = &wlcore_wowlan_support;
}
res = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
if (res) {
wl->wakeirq = res->start;
wl->wakeirq_flags = res->flags & IRQF_TRIGGER_MASK;
ret = dev_pm_set_dedicated_wake_irq(wl->dev, wl->wakeirq);
if (ret)
wl->wakeirq = -ENODEV;
} else {
wl->wakeirq = -ENODEV;
}
#endif
disable_irq(wl->irq);
wl1271_power_off(wl);
ret = wl->ops->identify_chip(wl);
if (ret < 0)
goto out_irq;
ret = wl1271_init_ieee80211(wl);
if (ret)
goto out_irq;
ret = wl1271_register_hw(wl);
if (ret)
goto out_irq;
ret = wlcore_sysfs_init(wl);
if (ret)
goto out_unreg;
wl->initialized = true;
goto out;
out_unreg:
wl1271_unregister_hw(wl);
out_irq:
if (wl->wakeirq >= 0)
dev_pm_clear_wake_irq(wl->dev);
device_init_wakeup(wl->dev, false);
free_irq(wl->irq, wl);
out_free_nvs:
kfree(wl->nvs);
out:
release_firmware(fw);
complete_all(&wl->nvs_loading_complete);
}
static int __maybe_unused wlcore_runtime_suspend(struct device *dev)
{
struct wl1271 *wl = dev_get_drvdata(dev);
struct wl12xx_vif *wlvif;
int error;
/* We do not enter elp sleep in PLT mode */
if (wl->plt)
return 0;
/* Nothing to do if no ELP mode requested */
if (wl->sleep_auth != WL1271_PSM_ELP)
return 0;
wl12xx_for_each_wlvif(wl, wlvif) {
if (!test_bit(WLVIF_FLAG_IN_PS, &wlvif->flags) &&
test_bit(WLVIF_FLAG_IN_USE, &wlvif->flags))
return -EBUSY;
}
wl1271_debug(DEBUG_PSM, "chip to elp");
error = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_SLEEP);
if (error < 0) {
wl12xx_queue_recovery_work(wl);
return error;
}
set_bit(WL1271_FLAG_IN_ELP, &wl->flags);
return 0;
}
static int __maybe_unused wlcore_runtime_resume(struct device *dev)
{
struct wl1271 *wl = dev_get_drvdata(dev);
DECLARE_COMPLETION_ONSTACK(compl);
unsigned long flags;
int ret;
unsigned long start_time = jiffies;
bool recovery = false;
/* Nothing to do if no ELP mode requested */
if (!test_bit(WL1271_FLAG_IN_ELP, &wl->flags))
return 0;
wl1271_debug(DEBUG_PSM, "waking up chip from elp");
spin_lock_irqsave(&wl->wl_lock, flags);
wl->elp_compl = &compl;
spin_unlock_irqrestore(&wl->wl_lock, flags);
ret = wlcore_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG, ELPCTRL_WAKE_UP);
if (ret < 0) {
recovery = true;
} else if (!test_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags)) {
ret = wait_for_completion_timeout(&compl,
msecs_to_jiffies(WL1271_WAKEUP_TIMEOUT));
if (ret == 0) {
wl1271_warning("ELP wakeup timeout!");
recovery = true;
}
}
spin_lock_irqsave(&wl->wl_lock, flags);
wl->elp_compl = NULL;
spin_unlock_irqrestore(&wl->wl_lock, flags);
clear_bit(WL1271_FLAG_IN_ELP, &wl->flags);
if (recovery) {
set_bit(WL1271_FLAG_INTENDED_FW_RECOVERY, &wl->flags);
wl12xx_queue_recovery_work(wl);
} else {
wl1271_debug(DEBUG_PSM, "wakeup time: %u ms",
jiffies_to_msecs(jiffies - start_time));
}
return 0;
}
static const struct dev_pm_ops wlcore_pm_ops = {
SET_RUNTIME_PM_OPS(wlcore_runtime_suspend,
wlcore_runtime_resume,
NULL)
};
int wlcore_probe(struct wl1271 *wl, struct platform_device *pdev)
{
struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
const char *nvs_name;
int ret = 0;
if (!wl->ops || !wl->ptable || !pdev_data)
return -EINVAL;
wl->dev = &pdev->dev;
wl->pdev = pdev;
platform_set_drvdata(pdev, wl);
if (pdev_data->family && pdev_data->family->nvs_name) {
nvs_name = pdev_data->family->nvs_name;
ret = request_firmware_nowait(THIS_MODULE, FW_ACTION_UEVENT,
nvs_name, &pdev->dev, GFP_KERNEL,
wl, wlcore_nvs_cb);
if (ret < 0) {
wl1271_error("request_firmware_nowait failed for %s: %d",
nvs_name, ret);
complete_all(&wl->nvs_loading_complete);
}
} else {
wlcore_nvs_cb(NULL, wl);
}
wl->dev->driver->pm = &wlcore_pm_ops;
pm_runtime_set_autosuspend_delay(wl->dev, 50);
pm_runtime_use_autosuspend(wl->dev);
pm_runtime_enable(wl->dev);
return ret;
}
EXPORT_SYMBOL_GPL(wlcore_probe);
int wlcore_remove(struct platform_device *pdev)
{
struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
struct wl1271 *wl = platform_get_drvdata(pdev);
int error;
error = pm_runtime_get_sync(wl->dev);
if (error < 0)
dev_warn(wl->dev, "PM runtime failed: %i\n", error);
wl->dev->driver->pm = NULL;
if (pdev_data->family && pdev_data->family->nvs_name)
wait_for_completion(&wl->nvs_loading_complete);
if (!wl->initialized)
return 0;
if (wl->wakeirq >= 0) {
dev_pm_clear_wake_irq(wl->dev);
wl->wakeirq = -ENODEV;
}
device_init_wakeup(wl->dev, false);
if (wl->irq_wake_enabled)
disable_irq_wake(wl->irq);
wl1271_unregister_hw(wl);
pm_runtime_put_sync(wl->dev);
pm_runtime_dont_use_autosuspend(wl->dev);
pm_runtime_disable(wl->dev);
free_irq(wl->irq, wl);
wlcore_free_hw(wl);
return 0;
}
EXPORT_SYMBOL_GPL(wlcore_remove);
u32 wl12xx_debug_level = DEBUG_NONE;
EXPORT_SYMBOL_GPL(wl12xx_debug_level);
module_param_named(debug_level, wl12xx_debug_level, uint, 0600);
MODULE_PARM_DESC(debug_level, "wl12xx debugging level");
module_param_named(fwlog, fwlog_param, charp, 0);
MODULE_PARM_DESC(fwlog,
"FW logger options: continuous, dbgpins or disable");
module_param(fwlog_mem_blocks, int, 0600);
MODULE_PARM_DESC(fwlog_mem_blocks, "fwlog mem_blocks");
module_param(bug_on_recovery, int, 0600);
MODULE_PARM_DESC(bug_on_recovery, "BUG() on fw recovery");
module_param(no_recovery, int, 0600);
MODULE_PARM_DESC(no_recovery, "Prevent HW recovery. FW will remain stuck.");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Luciano Coelho <[email protected]>");
MODULE_AUTHOR("Juuso Oikarinen <[email protected]>");
|
linux-master
|
drivers/net/wireless/ti/wlcore/main.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* This file is part of wl1271
*
* Copyright (C) 2008-2009 Nokia Corporation
*
* Contact: Luciano Coelho <[email protected]>
*/
#include "wlcore.h"
#include "debug.h"
#include "io.h"
#include "event.h"
#include "ps.h"
#include "scan.h"
#include "wl12xx_80211.h"
#include "hw_ops.h"
#define WL18XX_LOGGER_SDIO_BUFF_MAX (0x1020)
#define WL18XX_DATA_RAM_BASE_ADDRESS (0x20000000)
#define WL18XX_LOGGER_SDIO_BUFF_ADDR (0x40159c)
#define WL18XX_LOGGER_BUFF_OFFSET (sizeof(struct fw_logger_information))
#define WL18XX_LOGGER_READ_POINT_OFFSET (12)
int wlcore_event_fw_logger(struct wl1271 *wl)
{
int ret;
struct fw_logger_information fw_log;
u8 *buffer;
u32 internal_fw_addrbase = WL18XX_DATA_RAM_BASE_ADDRESS;
u32 addr = WL18XX_LOGGER_SDIO_BUFF_ADDR;
u32 addr_ptr;
u32 buff_start_ptr;
u32 buff_read_ptr;
u32 buff_end_ptr;
u32 available_len;
u32 actual_len;
u32 clear_ptr;
size_t len;
u32 start_loc;
buffer = kzalloc(WL18XX_LOGGER_SDIO_BUFF_MAX, GFP_KERNEL);
if (!buffer) {
wl1271_error("Fail to allocate fw logger memory");
actual_len = 0;
goto out;
}
ret = wlcore_read(wl, addr, buffer, WL18XX_LOGGER_SDIO_BUFF_MAX,
false);
if (ret < 0) {
wl1271_error("Fail to read logger buffer, error_id = %d",
ret);
actual_len = 0;
goto free_out;
}
memcpy(&fw_log, buffer, sizeof(fw_log));
actual_len = le32_to_cpu(fw_log.actual_buff_size);
if (actual_len == 0)
goto free_out;
/* Calculate the internal pointer to the fwlog structure */
addr_ptr = internal_fw_addrbase + addr;
/* Calculate the internal pointers to the start and end of log buffer */
buff_start_ptr = addr_ptr + WL18XX_LOGGER_BUFF_OFFSET;
buff_end_ptr = buff_start_ptr + le32_to_cpu(fw_log.max_buff_size);
/* Read the read pointer and validate it */
buff_read_ptr = le32_to_cpu(fw_log.buff_read_ptr);
if (buff_read_ptr < buff_start_ptr ||
buff_read_ptr >= buff_end_ptr) {
wl1271_error("buffer read pointer out of bounds: %x not in (%x-%x)\n",
buff_read_ptr, buff_start_ptr, buff_end_ptr);
goto free_out;
}
start_loc = buff_read_ptr - addr_ptr;
available_len = buff_end_ptr - buff_read_ptr;
/* Copy initial part up to the end of ring buffer */
len = min(actual_len, available_len);
wl12xx_copy_fwlog(wl, &buffer[start_loc], len);
clear_ptr = addr_ptr + start_loc + actual_len;
if (clear_ptr == buff_end_ptr)
clear_ptr = buff_start_ptr;
/* Copy any remaining part from beginning of ring buffer */
len = actual_len - len;
if (len) {
wl12xx_copy_fwlog(wl,
&buffer[WL18XX_LOGGER_BUFF_OFFSET],
len);
clear_ptr = addr_ptr + WL18XX_LOGGER_BUFF_OFFSET + len;
}
/* Update the read pointer */
ret = wlcore_write32(wl, addr + WL18XX_LOGGER_READ_POINT_OFFSET,
clear_ptr);
free_out:
kfree(buffer);
out:
return actual_len;
}
EXPORT_SYMBOL_GPL(wlcore_event_fw_logger);
void wlcore_event_rssi_trigger(struct wl1271 *wl, s8 *metric_arr)
{
struct wl12xx_vif *wlvif;
struct ieee80211_vif *vif;
enum nl80211_cqm_rssi_threshold_event event;
s8 metric = metric_arr[0];
wl1271_debug(DEBUG_EVENT, "RSSI trigger metric: %d", metric);
/* TODO: check actual multi-role support */
wl12xx_for_each_wlvif_sta(wl, wlvif) {
if (metric <= wlvif->rssi_thold)
event = NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW;
else
event = NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH;
vif = wl12xx_wlvif_to_vif(wlvif);
if (event != wlvif->last_rssi_event)
ieee80211_cqm_rssi_notify(vif, event, metric,
GFP_KERNEL);
wlvif->last_rssi_event = event;
}
}
EXPORT_SYMBOL_GPL(wlcore_event_rssi_trigger);
static void wl1271_stop_ba_event(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
u8 hlid = wlvif->sta.hlid;
if (!wl->links[hlid].ba_bitmap)
return;
ieee80211_stop_rx_ba_session(vif, wl->links[hlid].ba_bitmap,
vif->bss_conf.bssid);
} else {
u8 hlid;
struct wl1271_link *lnk;
for_each_set_bit(hlid, wlvif->ap.sta_hlid_map,
wl->num_links) {
lnk = &wl->links[hlid];
if (!lnk->ba_bitmap)
continue;
ieee80211_stop_rx_ba_session(vif,
lnk->ba_bitmap,
lnk->addr);
}
}
}
void wlcore_event_soft_gemini_sense(struct wl1271 *wl, u8 enable)
{
struct wl12xx_vif *wlvif;
if (enable) {
set_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags);
} else {
clear_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags);
wl12xx_for_each_wlvif_sta(wl, wlvif) {
wl1271_recalc_rx_streaming(wl, wlvif);
}
}
}
EXPORT_SYMBOL_GPL(wlcore_event_soft_gemini_sense);
void wlcore_event_sched_scan_completed(struct wl1271 *wl,
u8 status)
{
wl1271_debug(DEBUG_EVENT, "PERIODIC_SCAN_COMPLETE_EVENT (status 0x%0x)",
status);
if (wl->sched_vif) {
ieee80211_sched_scan_stopped(wl->hw);
wl->sched_vif = NULL;
}
}
EXPORT_SYMBOL_GPL(wlcore_event_sched_scan_completed);
void wlcore_event_ba_rx_constraint(struct wl1271 *wl,
unsigned long roles_bitmap,
unsigned long allowed_bitmap)
{
struct wl12xx_vif *wlvif;
wl1271_debug(DEBUG_EVENT, "%s: roles=0x%lx allowed=0x%lx",
__func__, roles_bitmap, allowed_bitmap);
wl12xx_for_each_wlvif(wl, wlvif) {
if (wlvif->role_id == WL12XX_INVALID_ROLE_ID ||
!test_bit(wlvif->role_id , &roles_bitmap))
continue;
wlvif->ba_allowed = !!test_bit(wlvif->role_id,
&allowed_bitmap);
if (!wlvif->ba_allowed)
wl1271_stop_ba_event(wl, wlvif);
}
}
EXPORT_SYMBOL_GPL(wlcore_event_ba_rx_constraint);
void wlcore_event_channel_switch(struct wl1271 *wl,
unsigned long roles_bitmap,
bool success)
{
struct wl12xx_vif *wlvif;
struct ieee80211_vif *vif;
wl1271_debug(DEBUG_EVENT, "%s: roles=0x%lx success=%d",
__func__, roles_bitmap, success);
wl12xx_for_each_wlvif(wl, wlvif) {
if (wlvif->role_id == WL12XX_INVALID_ROLE_ID ||
!test_bit(wlvif->role_id , &roles_bitmap))
continue;
if (!test_and_clear_bit(WLVIF_FLAG_CS_PROGRESS,
&wlvif->flags))
continue;
vif = wl12xx_wlvif_to_vif(wlvif);
if (wlvif->bss_type == BSS_TYPE_STA_BSS) {
ieee80211_chswitch_done(vif, success);
cancel_delayed_work(&wlvif->channel_switch_work);
} else {
set_bit(WLVIF_FLAG_BEACON_DISABLED, &wlvif->flags);
ieee80211_csa_finish(vif);
}
}
}
EXPORT_SYMBOL_GPL(wlcore_event_channel_switch);
void wlcore_event_dummy_packet(struct wl1271 *wl)
{
if (wl->plt) {
wl1271_info("Got DUMMY_PACKET event in PLT mode. FW bug, ignoring.");
return;
}
wl1271_debug(DEBUG_EVENT, "DUMMY_PACKET_ID_EVENT_ID");
wl1271_tx_dummy_packet(wl);
}
EXPORT_SYMBOL_GPL(wlcore_event_dummy_packet);
static void wlcore_disconnect_sta(struct wl1271 *wl, unsigned long sta_bitmap)
{
u32 num_packets = wl->conf.tx.max_tx_retries;
struct wl12xx_vif *wlvif;
struct ieee80211_vif *vif;
struct ieee80211_sta *sta;
const u8 *addr;
int h;
for_each_set_bit(h, &sta_bitmap, wl->num_links) {
bool found = false;
/* find the ap vif connected to this sta */
wl12xx_for_each_wlvif_ap(wl, wlvif) {
if (!test_bit(h, wlvif->ap.sta_hlid_map))
continue;
found = true;
break;
}
if (!found)
continue;
vif = wl12xx_wlvif_to_vif(wlvif);
addr = wl->links[h].addr;
rcu_read_lock();
sta = ieee80211_find_sta(vif, addr);
if (sta) {
wl1271_debug(DEBUG_EVENT, "remove sta %d", h);
ieee80211_report_low_ack(sta, num_packets);
}
rcu_read_unlock();
}
}
void wlcore_event_max_tx_failure(struct wl1271 *wl, unsigned long sta_bitmap)
{
wl1271_debug(DEBUG_EVENT, "MAX_TX_FAILURE_EVENT_ID");
wlcore_disconnect_sta(wl, sta_bitmap);
}
EXPORT_SYMBOL_GPL(wlcore_event_max_tx_failure);
void wlcore_event_inactive_sta(struct wl1271 *wl, unsigned long sta_bitmap)
{
wl1271_debug(DEBUG_EVENT, "INACTIVE_STA_EVENT_ID");
wlcore_disconnect_sta(wl, sta_bitmap);
}
EXPORT_SYMBOL_GPL(wlcore_event_inactive_sta);
void wlcore_event_roc_complete(struct wl1271 *wl)
{
wl1271_debug(DEBUG_EVENT, "REMAIN_ON_CHANNEL_COMPLETE_EVENT_ID");
if (wl->roc_vif)
ieee80211_ready_on_channel(wl->hw);
}
EXPORT_SYMBOL_GPL(wlcore_event_roc_complete);
void wlcore_event_beacon_loss(struct wl1271 *wl, unsigned long roles_bitmap)
{
/*
* We are HW_MONITOR device. On beacon loss - queue
* connection loss work. Cancel it on REGAINED event.
*/
struct wl12xx_vif *wlvif;
struct ieee80211_vif *vif;
int delay = wl->conf.conn.synch_fail_thold *
wl->conf.conn.bss_lose_timeout;
wl1271_info("Beacon loss detected. roles:0x%lx", roles_bitmap);
wl12xx_for_each_wlvif_sta(wl, wlvif) {
if (wlvif->role_id == WL12XX_INVALID_ROLE_ID ||
!test_bit(wlvif->role_id , &roles_bitmap))
continue;
vif = wl12xx_wlvif_to_vif(wlvif);
/* don't attempt roaming in case of p2p */
if (wlvif->p2p) {
ieee80211_connection_loss(vif);
continue;
}
/*
* if the work is already queued, it should take place.
* We don't want to delay the connection loss
* indication any more.
*/
ieee80211_queue_delayed_work(wl->hw,
&wlvif->connection_loss_work,
msecs_to_jiffies(delay));
ieee80211_cqm_beacon_loss_notify(vif, GFP_KERNEL);
}
}
EXPORT_SYMBOL_GPL(wlcore_event_beacon_loss);
int wl1271_event_unmask(struct wl1271 *wl)
{
int ret;
wl1271_debug(DEBUG_EVENT, "unmasking event_mask 0x%x", wl->event_mask);
ret = wl1271_acx_event_mbox_mask(wl, ~(wl->event_mask));
if (ret < 0)
return ret;
return 0;
}
int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num)
{
int ret;
wl1271_debug(DEBUG_EVENT, "EVENT on mbox %d", mbox_num);
if (mbox_num > 1)
return -EINVAL;
/* first we read the mbox descriptor */
ret = wlcore_read(wl, wl->mbox_ptr[mbox_num], wl->mbox,
wl->mbox_size, false);
if (ret < 0)
return ret;
/* process the descriptor */
ret = wl->ops->process_mailbox_events(wl);
if (ret < 0)
return ret;
/*
* TODO: we just need this because one bit is in a different
* place. Is there any better way?
*/
ret = wl->ops->ack_event(wl);
return ret;
}
|
linux-master
|
drivers/net/wireless/ti/wlcore/event.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* This file is part of wl1271
*
* Copyright (C) 2009 Nokia Corporation
*
* Contact: Luciano Coelho <[email protected]>
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/etherdevice.h>
#include <linux/pm_runtime.h>
#include <linux/spinlock.h>
#include "wlcore.h"
#include "debug.h"
#include "io.h"
#include "ps.h"
#include "tx.h"
#include "event.h"
#include "hw_ops.h"
/*
* TODO: this is here just for now, it must be removed when the data
* operations are in place.
*/
#include "../wl12xx/reg.h"
static int wl1271_set_default_wep_key(struct wl1271 *wl,
struct wl12xx_vif *wlvif, u8 id)
{
int ret;
bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
if (is_ap)
ret = wl12xx_cmd_set_default_wep_key(wl, id,
wlvif->ap.bcast_hlid);
else
ret = wl12xx_cmd_set_default_wep_key(wl, id, wlvif->sta.hlid);
if (ret < 0)
return ret;
wl1271_debug(DEBUG_CRYPT, "default wep key idx: %d", (int)id);
return 0;
}
static int wl1271_alloc_tx_id(struct wl1271 *wl, struct sk_buff *skb)
{
int id;
id = find_first_zero_bit(wl->tx_frames_map, wl->num_tx_desc);
if (id >= wl->num_tx_desc)
return -EBUSY;
__set_bit(id, wl->tx_frames_map);
wl->tx_frames[id] = skb;
wl->tx_frames_cnt++;
return id;
}
void wl1271_free_tx_id(struct wl1271 *wl, int id)
{
if (__test_and_clear_bit(id, wl->tx_frames_map)) {
if (unlikely(wl->tx_frames_cnt == wl->num_tx_desc))
clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
wl->tx_frames[id] = NULL;
wl->tx_frames_cnt--;
}
}
EXPORT_SYMBOL(wl1271_free_tx_id);
static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl,
struct wl12xx_vif *wlvif,
struct sk_buff *skb)
{
struct ieee80211_hdr *hdr;
hdr = (struct ieee80211_hdr *)(skb->data +
sizeof(struct wl1271_tx_hw_descr));
if (!ieee80211_is_auth(hdr->frame_control))
return;
/*
* add the station to the known list before transmitting the
* authentication response. this way it won't get de-authed by FW
* when transmitting too soon.
*/
wl1271_acx_set_inconnection_sta(wl, wlvif, hdr->addr1);
/*
* ROC for 1 second on the AP channel for completing the connection.
* Note the ROC will be continued by the update_sta_state callbacks
* once the station reaches the associated state.
*/
wlcore_update_inconn_sta(wl, wlvif, NULL, true);
wlvif->pending_auth_reply_time = jiffies;
cancel_delayed_work(&wlvif->pending_auth_complete_work);
ieee80211_queue_delayed_work(wl->hw,
&wlvif->pending_auth_complete_work,
msecs_to_jiffies(WLCORE_PEND_AUTH_ROC_TIMEOUT));
}
static void wl1271_tx_regulate_link(struct wl1271 *wl,
struct wl12xx_vif *wlvif,
u8 hlid)
{
bool fw_ps;
u8 tx_pkts;
if (WARN_ON(!test_bit(hlid, wlvif->links_map)))
return;
fw_ps = test_bit(hlid, &wl->ap_fw_ps_map);
tx_pkts = wl->links[hlid].allocated_pkts;
/*
* if in FW PS and there is enough data in FW we can put the link
* into high-level PS and clean out its TX queues.
* Make an exception if this is the only connected link. In this
* case FW-memory congestion is less of a problem.
* Note that a single connected STA means 2*ap_count + 1 active links,
* since we must account for the global and broadcast AP links
* for each AP. The "fw_ps" check assures us the other link is a STA
* connected to the AP. Otherwise the FW would not set the PSM bit.
*/
if (wl->active_link_count > (wl->ap_count*2 + 1) && fw_ps &&
tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
wl12xx_ps_link_start(wl, wlvif, hlid, true);
}
bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb)
{
return wl->dummy_packet == skb;
}
EXPORT_SYMBOL(wl12xx_is_dummy_packet);
static u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
struct sk_buff *skb, struct ieee80211_sta *sta)
{
if (sta) {
struct wl1271_station *wl_sta;
wl_sta = (struct wl1271_station *)sta->drv_priv;
return wl_sta->hlid;
} else {
struct ieee80211_hdr *hdr;
if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
return wl->system_hlid;
hdr = (struct ieee80211_hdr *)skb->data;
if (is_multicast_ether_addr(ieee80211_get_DA(hdr)))
return wlvif->ap.bcast_hlid;
else
return wlvif->ap.global_hlid;
}
}
u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
struct sk_buff *skb, struct ieee80211_sta *sta)
{
struct ieee80211_tx_info *control;
if (wlvif->bss_type == BSS_TYPE_AP_BSS)
return wl12xx_tx_get_hlid_ap(wl, wlvif, skb, sta);
control = IEEE80211_SKB_CB(skb);
if (control->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
wl1271_debug(DEBUG_TX, "tx offchannel");
return wlvif->dev_hlid;
}
return wlvif->sta.hlid;
}
unsigned int wlcore_calc_packet_alignment(struct wl1271 *wl,
unsigned int packet_length)
{
if ((wl->quirks & WLCORE_QUIRK_TX_PAD_LAST_FRAME) ||
!(wl->quirks & WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN))
return ALIGN(packet_length, WL1271_TX_ALIGN_TO);
else
return ALIGN(packet_length, WL12XX_BUS_BLOCK_SIZE);
}
EXPORT_SYMBOL(wlcore_calc_packet_alignment);
static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif,
struct sk_buff *skb, u32 extra, u32 buf_offset,
u8 hlid, bool is_gem)
{
struct wl1271_tx_hw_descr *desc;
u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra;
u32 total_blocks;
int id, ret = -EBUSY, ac;
u32 spare_blocks;
if (buf_offset + total_len > wl->aggr_buf_size)
return -EAGAIN;
spare_blocks = wlcore_hw_get_spare_blocks(wl, is_gem);
/* allocate free identifier for the packet */
id = wl1271_alloc_tx_id(wl, skb);
if (id < 0)
return id;
total_blocks = wlcore_hw_calc_tx_blocks(wl, total_len, spare_blocks);
if (total_blocks <= wl->tx_blocks_available) {
desc = skb_push(skb, total_len - skb->len);
wlcore_hw_set_tx_desc_blocks(wl, desc, total_blocks,
spare_blocks);
desc->id = id;
wl->tx_blocks_available -= total_blocks;
wl->tx_allocated_blocks += total_blocks;
/*
* If the FW was empty before, arm the Tx watchdog. Also do
* this on the first Tx after resume, as we always cancel the
* watchdog on suspend.
*/
if (wl->tx_allocated_blocks == total_blocks ||
test_and_clear_bit(WL1271_FLAG_REINIT_TX_WDOG, &wl->flags))
wl12xx_rearm_tx_watchdog_locked(wl);
ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
wl->tx_allocated_pkts[ac]++;
if (test_bit(hlid, wl->links_map))
wl->links[hlid].allocated_pkts++;
ret = 0;
wl1271_debug(DEBUG_TX,
"tx_allocate: size: %d, blocks: %d, id: %d",
total_len, total_blocks, id);
} else {
wl1271_free_tx_id(wl, id);
}
return ret;
}
static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
struct sk_buff *skb, u32 extra,
struct ieee80211_tx_info *control, u8 hlid)
{
struct wl1271_tx_hw_descr *desc;
int ac, rate_idx;
s64 hosttime;
u16 tx_attr = 0;
__le16 frame_control;
struct ieee80211_hdr *hdr;
u8 *frame_start;
bool is_dummy;
desc = (struct wl1271_tx_hw_descr *) skb->data;
frame_start = (u8 *)(desc + 1);
hdr = (struct ieee80211_hdr *)(frame_start + extra);
frame_control = hdr->frame_control;
/* relocate space for security header */
if (extra) {
int hdrlen = ieee80211_hdrlen(frame_control);
memmove(frame_start, hdr, hdrlen);
skb_set_network_header(skb, skb_network_offset(skb) + extra);
}
/* configure packet life time */
hosttime = (ktime_get_boottime_ns() >> 10);
desc->start_time = cpu_to_le32(hosttime - wl->time_offset);
is_dummy = wl12xx_is_dummy_packet(wl, skb);
if (is_dummy || !wlvif || wlvif->bss_type != BSS_TYPE_AP_BSS)
desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU);
else
desc->life_time = cpu_to_le16(TX_HW_AP_MODE_PKT_LIFETIME_TU);
/* queue */
ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
desc->tid = skb->priority;
if (is_dummy) {
/*
* FW expects the dummy packet to have an invalid session id -
* any session id that is different than the one set in the join
*/
tx_attr = (SESSION_COUNTER_INVALID <<
TX_HW_ATTR_OFST_SESSION_COUNTER) &
TX_HW_ATTR_SESSION_COUNTER;
tx_attr |= TX_HW_ATTR_TX_DUMMY_REQ;
} else if (wlvif) {
u8 session_id = wl->session_ids[hlid];
if ((wl->quirks & WLCORE_QUIRK_AP_ZERO_SESSION_ID) &&
(wlvif->bss_type == BSS_TYPE_AP_BSS))
session_id = 0;
/* configure the tx attributes */
tx_attr = session_id << TX_HW_ATTR_OFST_SESSION_COUNTER;
}
desc->hlid = hlid;
if (is_dummy || !wlvif)
rate_idx = 0;
else if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
/*
* if the packets are data packets
* send them with AP rate policies (EAPOLs are an exception),
* otherwise use default basic rates
*/
if (skb->protocol == cpu_to_be16(ETH_P_PAE))
rate_idx = wlvif->sta.basic_rate_idx;
else if (control->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
rate_idx = wlvif->sta.p2p_rate_idx;
else if (ieee80211_is_data(frame_control))
rate_idx = wlvif->sta.ap_rate_idx;
else
rate_idx = wlvif->sta.basic_rate_idx;
} else {
if (hlid == wlvif->ap.global_hlid)
rate_idx = wlvif->ap.mgmt_rate_idx;
else if (hlid == wlvif->ap.bcast_hlid ||
skb->protocol == cpu_to_be16(ETH_P_PAE) ||
!ieee80211_is_data(frame_control))
/*
* send non-data, bcast and EAPOLs using the
* min basic rate
*/
rate_idx = wlvif->ap.bcast_rate_idx;
else
rate_idx = wlvif->ap.ucast_rate_idx[ac];
}
tx_attr |= rate_idx << TX_HW_ATTR_OFST_RATE_POLICY;
/* for WEP shared auth - no fw encryption is needed */
if (ieee80211_is_auth(frame_control) &&
ieee80211_has_protected(frame_control))
tx_attr |= TX_HW_ATTR_HOST_ENCRYPT;
/* send EAPOL frames as voice */
if (control->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO)
tx_attr |= TX_HW_ATTR_EAPOL_FRAME;
desc->tx_attr = cpu_to_le16(tx_attr);
wlcore_hw_set_tx_desc_csum(wl, desc, skb);
wlcore_hw_set_tx_desc_data_len(wl, desc, skb);
}
/* caller must hold wl->mutex */
static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
struct sk_buff *skb, u32 buf_offset, u8 hlid)
{
struct ieee80211_tx_info *info;
u32 extra = 0;
int ret = 0;
u32 total_len;
bool is_dummy;
bool is_gem = false;
if (!skb) {
wl1271_error("discarding null skb");
return -EINVAL;
}
if (hlid == WL12XX_INVALID_LINK_ID) {
wl1271_error("invalid hlid. dropping skb 0x%p", skb);
return -EINVAL;
}
info = IEEE80211_SKB_CB(skb);
is_dummy = wl12xx_is_dummy_packet(wl, skb);
if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) &&
info->control.hw_key &&
info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP)
extra = WL1271_EXTRA_SPACE_TKIP;
if (info->control.hw_key) {
bool is_wep;
u8 idx = info->control.hw_key->hw_key_idx;
u32 cipher = info->control.hw_key->cipher;
is_wep = (cipher == WLAN_CIPHER_SUITE_WEP40) ||
(cipher == WLAN_CIPHER_SUITE_WEP104);
if (WARN_ON(is_wep && wlvif && wlvif->default_key != idx)) {
ret = wl1271_set_default_wep_key(wl, wlvif, idx);
if (ret < 0)
return ret;
wlvif->default_key = idx;
}
is_gem = (cipher == WL1271_CIPHER_SUITE_GEM);
}
ret = wl1271_tx_allocate(wl, wlvif, skb, extra, buf_offset, hlid,
is_gem);
if (ret < 0)
return ret;
wl1271_tx_fill_hdr(wl, wlvif, skb, extra, info, hlid);
if (!is_dummy && wlvif && wlvif->bss_type == BSS_TYPE_AP_BSS) {
wl1271_tx_ap_update_inconnection_sta(wl, wlvif, skb);
wl1271_tx_regulate_link(wl, wlvif, hlid);
}
/*
* The length of each packet is stored in terms of
* words. Thus, we must pad the skb data to make sure its
* length is aligned. The number of padding bytes is computed
* and set in wl1271_tx_fill_hdr.
* In special cases, we want to align to a specific block size
* (eg. for wl128x with SDIO we align to 256).
*/
total_len = wlcore_calc_packet_alignment(wl, skb->len);
memcpy(wl->aggr_buf + buf_offset, skb->data, skb->len);
memset(wl->aggr_buf + buf_offset + skb->len, 0, total_len - skb->len);
/* Revert side effects in the dummy packet skb, so it can be reused */
if (is_dummy)
skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
return total_len;
}
u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set,
enum nl80211_band rate_band)
{
struct ieee80211_supported_band *band;
u32 enabled_rates = 0;
int bit;
band = wl->hw->wiphy->bands[rate_band];
for (bit = 0; bit < band->n_bitrates; bit++) {
if (rate_set & 0x1)
enabled_rates |= band->bitrates[bit].hw_value;
rate_set >>= 1;
}
/* MCS rates indication are on bits 16 - 31 */
rate_set >>= HW_HT_RATES_OFFSET - band->n_bitrates;
for (bit = 0; bit < 16; bit++) {
if (rate_set & 0x1)
enabled_rates |= (CONF_HW_BIT_RATE_MCS_0 << bit);
rate_set >>= 1;
}
return enabled_rates;
}
void wl1271_handle_tx_low_watermark(struct wl1271 *wl)
{
int i;
struct wl12xx_vif *wlvif;
wl12xx_for_each_wlvif(wl, wlvif) {
for (i = 0; i < NUM_TX_QUEUES; i++) {
if (wlcore_is_queue_stopped_by_reason(wl, wlvif, i,
WLCORE_QUEUE_STOP_REASON_WATERMARK) &&
wlvif->tx_queue_count[i] <=
WL1271_TX_QUEUE_LOW_WATERMARK)
/* firmware buffer has space, restart queues */
wlcore_wake_queue(wl, wlvif, i,
WLCORE_QUEUE_STOP_REASON_WATERMARK);
}
}
}
static int wlcore_select_ac(struct wl1271 *wl)
{
int i, q = -1, ac;
u32 min_pkts = 0xffffffff;
/*
* Find a non-empty ac where:
* 1. There are packets to transmit
* 2. The FW has the least allocated blocks
*
* We prioritize the ACs according to VO>VI>BE>BK
*/
for (i = 0; i < NUM_TX_QUEUES; i++) {
ac = wl1271_tx_get_queue(i);
if (wl->tx_queue_count[ac] &&
wl->tx_allocated_pkts[ac] < min_pkts) {
q = ac;
min_pkts = wl->tx_allocated_pkts[q];
}
}
return q;
}
static struct sk_buff *wlcore_lnk_dequeue(struct wl1271 *wl,
struct wl1271_link *lnk, u8 q)
{
struct sk_buff *skb;
unsigned long flags;
skb = skb_dequeue(&lnk->tx_queue[q]);
if (skb) {
spin_lock_irqsave(&wl->wl_lock, flags);
WARN_ON_ONCE(wl->tx_queue_count[q] <= 0);
wl->tx_queue_count[q]--;
if (lnk->wlvif) {
WARN_ON_ONCE(lnk->wlvif->tx_queue_count[q] <= 0);
lnk->wlvif->tx_queue_count[q]--;
}
spin_unlock_irqrestore(&wl->wl_lock, flags);
}
return skb;
}
static struct sk_buff *wlcore_lnk_dequeue_high_prio(struct wl1271 *wl,
u8 hlid, u8 ac,
u8 *low_prio_hlid)
{
struct wl1271_link *lnk = &wl->links[hlid];
if (!wlcore_hw_lnk_high_prio(wl, hlid, lnk)) {
if (*low_prio_hlid == WL12XX_INVALID_LINK_ID &&
!skb_queue_empty(&lnk->tx_queue[ac]) &&
wlcore_hw_lnk_low_prio(wl, hlid, lnk))
/* we found the first non-empty low priority queue */
*low_prio_hlid = hlid;
return NULL;
}
return wlcore_lnk_dequeue(wl, lnk, ac);
}
static struct sk_buff *wlcore_vif_dequeue_high_prio(struct wl1271 *wl,
struct wl12xx_vif *wlvif,
u8 ac, u8 *hlid,
u8 *low_prio_hlid)
{
struct sk_buff *skb = NULL;
int i, h, start_hlid;
/* start from the link after the last one */
start_hlid = (wlvif->last_tx_hlid + 1) % wl->num_links;
/* dequeue according to AC, round robin on each link */
for (i = 0; i < wl->num_links; i++) {
h = (start_hlid + i) % wl->num_links;
/* only consider connected stations */
if (!test_bit(h, wlvif->links_map))
continue;
skb = wlcore_lnk_dequeue_high_prio(wl, h, ac,
low_prio_hlid);
if (!skb)
continue;
wlvif->last_tx_hlid = h;
break;
}
if (!skb)
wlvif->last_tx_hlid = 0;
*hlid = wlvif->last_tx_hlid;
return skb;
}
static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl, u8 *hlid)
{
unsigned long flags;
struct wl12xx_vif *wlvif = wl->last_wlvif;
struct sk_buff *skb = NULL;
int ac;
u8 low_prio_hlid = WL12XX_INVALID_LINK_ID;
ac = wlcore_select_ac(wl);
if (ac < 0)
goto out;
/* continue from last wlvif (round robin) */
if (wlvif) {
wl12xx_for_each_wlvif_continue(wl, wlvif) {
if (!wlvif->tx_queue_count[ac])
continue;
skb = wlcore_vif_dequeue_high_prio(wl, wlvif, ac, hlid,
&low_prio_hlid);
if (!skb)
continue;
wl->last_wlvif = wlvif;
break;
}
}
/* dequeue from the system HLID before the restarting wlvif list */
if (!skb) {
skb = wlcore_lnk_dequeue_high_prio(wl, wl->system_hlid,
ac, &low_prio_hlid);
if (skb) {
*hlid = wl->system_hlid;
wl->last_wlvif = NULL;
}
}
/* Do a new pass over the wlvif list. But no need to continue
* after last_wlvif. The previous pass should have found it. */
if (!skb) {
wl12xx_for_each_wlvif(wl, wlvif) {
if (!wlvif->tx_queue_count[ac])
goto next;
skb = wlcore_vif_dequeue_high_prio(wl, wlvif, ac, hlid,
&low_prio_hlid);
if (skb) {
wl->last_wlvif = wlvif;
break;
}
next:
if (wlvif == wl->last_wlvif)
break;
}
}
/* no high priority skbs found - but maybe a low priority one? */
if (!skb && low_prio_hlid != WL12XX_INVALID_LINK_ID) {
struct wl1271_link *lnk = &wl->links[low_prio_hlid];
skb = wlcore_lnk_dequeue(wl, lnk, ac);
WARN_ON(!skb); /* we checked this before */
*hlid = low_prio_hlid;
/* ensure proper round robin in the vif/link levels */
wl->last_wlvif = lnk->wlvif;
if (lnk->wlvif)
lnk->wlvif->last_tx_hlid = low_prio_hlid;
}
out:
if (!skb &&
test_and_clear_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags)) {
int q;
skb = wl->dummy_packet;
*hlid = wl->system_hlid;
q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
spin_lock_irqsave(&wl->wl_lock, flags);
WARN_ON_ONCE(wl->tx_queue_count[q] <= 0);
wl->tx_queue_count[q]--;
spin_unlock_irqrestore(&wl->wl_lock, flags);
}
return skb;
}
static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif,
struct sk_buff *skb, u8 hlid)
{
unsigned long flags;
int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
if (wl12xx_is_dummy_packet(wl, skb)) {
set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
} else {
skb_queue_head(&wl->links[hlid].tx_queue[q], skb);
/* make sure we dequeue the same packet next time */
wlvif->last_tx_hlid = (hlid + wl->num_links - 1) %
wl->num_links;
}
spin_lock_irqsave(&wl->wl_lock, flags);
wl->tx_queue_count[q]++;
if (wlvif)
wlvif->tx_queue_count[q]++;
spin_unlock_irqrestore(&wl->wl_lock, flags);
}
static bool wl1271_tx_is_data_present(struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
return ieee80211_is_data_present(hdr->frame_control);
}
void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids)
{
struct wl12xx_vif *wlvif;
u32 timeout;
u8 hlid;
if (!wl->conf.rx_streaming.interval)
return;
if (!wl->conf.rx_streaming.always &&
!test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))
return;
timeout = wl->conf.rx_streaming.duration;
wl12xx_for_each_wlvif_sta(wl, wlvif) {
bool found = false;
for_each_set_bit(hlid, active_hlids, wl->num_links) {
if (test_bit(hlid, wlvif->links_map)) {
found = true;
break;
}
}
if (!found)
continue;
/* enable rx streaming */
if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
ieee80211_queue_work(wl->hw,
&wlvif->rx_streaming_enable_work);
mod_timer(&wlvif->rx_streaming_timer,
jiffies + msecs_to_jiffies(timeout));
}
}
/*
* Returns failure values only in case of failed bus ops within this function.
* wl1271_prepare_tx_frame retvals won't be returned in order to avoid
* triggering recovery by higher layers when not necessary.
* In case a FW command fails within wl1271_prepare_tx_frame fails a recovery
* will be queued in wl1271_cmd_send. -EAGAIN/-EBUSY from prepare_tx_frame
* can occur and are legitimate so don't propagate. -EINVAL will emit a WARNING
* within prepare_tx_frame code but there's nothing we should do about those
* as well.
*/
int wlcore_tx_work_locked(struct wl1271 *wl)
{
struct wl12xx_vif *wlvif;
struct sk_buff *skb;
struct wl1271_tx_hw_descr *desc;
u32 buf_offset = 0, last_len = 0;
bool sent_packets = false;
unsigned long active_hlids[BITS_TO_LONGS(WLCORE_MAX_LINKS)] = {0};
int ret = 0;
int bus_ret = 0;
u8 hlid;
if (unlikely(wl->state != WLCORE_STATE_ON))
return 0;
while ((skb = wl1271_skb_dequeue(wl, &hlid))) {
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
bool has_data = false;
wlvif = NULL;
if (!wl12xx_is_dummy_packet(wl, skb))
wlvif = wl12xx_vif_to_data(info->control.vif);
else
hlid = wl->system_hlid;
has_data = wlvif && wl1271_tx_is_data_present(skb);
ret = wl1271_prepare_tx_frame(wl, wlvif, skb, buf_offset,
hlid);
if (ret == -EAGAIN) {
/*
* Aggregation buffer is full.
* Flush buffer and try again.
*/
wl1271_skb_queue_head(wl, wlvif, skb, hlid);
buf_offset = wlcore_hw_pre_pkt_send(wl, buf_offset,
last_len);
bus_ret = wlcore_write_data(wl, REG_SLV_MEM_DATA,
wl->aggr_buf, buf_offset, true);
if (bus_ret < 0)
goto out;
sent_packets = true;
buf_offset = 0;
continue;
} else if (ret == -EBUSY) {
/*
* Firmware buffer is full.
* Queue back last skb, and stop aggregating.
*/
wl1271_skb_queue_head(wl, wlvif, skb, hlid);
/* No work left, avoid scheduling redundant tx work */
set_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
goto out_ack;
} else if (ret < 0) {
if (wl12xx_is_dummy_packet(wl, skb))
/*
* fw still expects dummy packet,
* so re-enqueue it
*/
wl1271_skb_queue_head(wl, wlvif, skb, hlid);
else
ieee80211_free_txskb(wl->hw, skb);
goto out_ack;
}
last_len = ret;
buf_offset += last_len;
wl->tx_packets_count++;
if (has_data) {
desc = (struct wl1271_tx_hw_descr *) skb->data;
__set_bit(desc->hlid, active_hlids);
}
}
out_ack:
if (buf_offset) {
buf_offset = wlcore_hw_pre_pkt_send(wl, buf_offset, last_len);
bus_ret = wlcore_write_data(wl, REG_SLV_MEM_DATA, wl->aggr_buf,
buf_offset, true);
if (bus_ret < 0)
goto out;
sent_packets = true;
}
if (sent_packets) {
/*
* Interrupt the firmware with the new packets. This is only
* required for older hardware revisions
*/
if (wl->quirks & WLCORE_QUIRK_END_OF_TRANSACTION) {
bus_ret = wlcore_write32(wl, WL12XX_HOST_WR_ACCESS,
wl->tx_packets_count);
if (bus_ret < 0)
goto out;
}
wl1271_handle_tx_low_watermark(wl);
}
wl12xx_rearm_rx_streaming(wl, active_hlids);
out:
return bus_ret;
}
void wl1271_tx_work(struct work_struct *work)
{
struct wl1271 *wl = container_of(work, struct wl1271, tx_work);
int ret;
mutex_lock(&wl->mutex);
ret = pm_runtime_resume_and_get(wl->dev);
if (ret < 0)
goto out;
ret = wlcore_tx_work_locked(wl);
if (ret < 0) {
pm_runtime_put_noidle(wl->dev);
wl12xx_queue_recovery_work(wl);
goto out;
}
pm_runtime_mark_last_busy(wl->dev);
pm_runtime_put_autosuspend(wl->dev);
out:
mutex_unlock(&wl->mutex);
}
static u8 wl1271_tx_get_rate_flags(u8 rate_class_index)
{
u8 flags = 0;
/*
* TODO: use wl12xx constants when this code is moved to wl12xx, as
* only it uses Tx-completion.
*/
if (rate_class_index <= 8)
flags |= IEEE80211_TX_RC_MCS;
/*
* TODO: use wl12xx constants when this code is moved to wl12xx, as
* only it uses Tx-completion.
*/
if (rate_class_index == 0)
flags |= IEEE80211_TX_RC_SHORT_GI;
return flags;
}
static void wl1271_tx_complete_packet(struct wl1271 *wl,
struct wl1271_tx_hw_res_descr *result)
{
struct ieee80211_tx_info *info;
struct ieee80211_vif *vif;
struct wl12xx_vif *wlvif;
struct sk_buff *skb;
int id = result->id;
int rate = -1;
u8 rate_flags = 0;
u8 retries = 0;
/* check for id legality */
if (unlikely(id >= wl->num_tx_desc || wl->tx_frames[id] == NULL)) {
wl1271_warning("TX result illegal id: %d", id);
return;
}
skb = wl->tx_frames[id];
info = IEEE80211_SKB_CB(skb);
if (wl12xx_is_dummy_packet(wl, skb)) {
wl1271_free_tx_id(wl, id);
return;
}
/* info->control is valid as long as we don't update info->status */
vif = info->control.vif;
wlvif = wl12xx_vif_to_data(vif);
/* update the TX status info */
if (result->status == TX_SUCCESS) {
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
info->flags |= IEEE80211_TX_STAT_ACK;
rate = wlcore_rate_to_idx(wl, result->rate_class_index,
wlvif->band);
rate_flags = wl1271_tx_get_rate_flags(result->rate_class_index);
retries = result->ack_failures;
} else if (result->status == TX_RETRY_EXCEEDED) {
wl->stats.excessive_retries++;
retries = result->ack_failures;
}
info->status.rates[0].idx = rate;
info->status.rates[0].count = retries;
info->status.rates[0].flags = rate_flags;
info->status.ack_signal = -1;
wl->stats.retry_count += result->ack_failures;
/* remove private header from packet */
skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
/* remove TKIP header space if present */
if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) &&
info->control.hw_key &&
info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) {
int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
memmove(skb->data + WL1271_EXTRA_SPACE_TKIP, skb->data,
hdrlen);
skb_pull(skb, WL1271_EXTRA_SPACE_TKIP);
}
wl1271_debug(DEBUG_TX, "tx status id %u skb 0x%p failures %u rate 0x%x"
" status 0x%x",
result->id, skb, result->ack_failures,
result->rate_class_index, result->status);
/* return the packet to the stack */
skb_queue_tail(&wl->deferred_tx_queue, skb);
queue_work(wl->freezable_wq, &wl->netstack_work);
wl1271_free_tx_id(wl, result->id);
}
/* Called upon reception of a TX complete interrupt */
int wlcore_tx_complete(struct wl1271 *wl)
{
struct wl1271_acx_mem_map *memmap = wl->target_mem_map;
u32 count, fw_counter;
u32 i;
int ret;
/* read the tx results from the chipset */
ret = wlcore_read(wl, le32_to_cpu(memmap->tx_result),
wl->tx_res_if, sizeof(*wl->tx_res_if), false);
if (ret < 0)
goto out;
fw_counter = le32_to_cpu(wl->tx_res_if->tx_result_fw_counter);
/* write host counter to chipset (to ack) */
ret = wlcore_write32(wl, le32_to_cpu(memmap->tx_result) +
offsetof(struct wl1271_tx_hw_res_if,
tx_result_host_counter), fw_counter);
if (ret < 0)
goto out;
count = fw_counter - wl->tx_results_count;
wl1271_debug(DEBUG_TX, "tx_complete received, packets: %d", count);
/* verify that the result buffer is not getting overrun */
if (unlikely(count > TX_HW_RESULT_QUEUE_LEN))
wl1271_warning("TX result overflow from chipset: %d", count);
/* process the results */
for (i = 0; i < count; i++) {
struct wl1271_tx_hw_res_descr *result;
u8 offset = wl->tx_results_count & TX_HW_RESULT_QUEUE_LEN_MASK;
/* process the packet */
result = &(wl->tx_res_if->tx_results_queue[offset]);
wl1271_tx_complete_packet(wl, result);
wl->tx_results_count++;
}
out:
return ret;
}
EXPORT_SYMBOL(wlcore_tx_complete);
void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid)
{
struct sk_buff *skb;
int i;
unsigned long flags;
struct ieee80211_tx_info *info;
int total[NUM_TX_QUEUES];
struct wl1271_link *lnk = &wl->links[hlid];
for (i = 0; i < NUM_TX_QUEUES; i++) {
total[i] = 0;
while ((skb = skb_dequeue(&lnk->tx_queue[i]))) {
wl1271_debug(DEBUG_TX, "link freeing skb 0x%p", skb);
if (!wl12xx_is_dummy_packet(wl, skb)) {
info = IEEE80211_SKB_CB(skb);
info->status.rates[0].idx = -1;
info->status.rates[0].count = 0;
ieee80211_tx_status_ni(wl->hw, skb);
}
total[i]++;
}
}
spin_lock_irqsave(&wl->wl_lock, flags);
for (i = 0; i < NUM_TX_QUEUES; i++) {
wl->tx_queue_count[i] -= total[i];
if (lnk->wlvif)
lnk->wlvif->tx_queue_count[i] -= total[i];
}
spin_unlock_irqrestore(&wl->wl_lock, flags);
wl1271_handle_tx_low_watermark(wl);
}
/* caller must hold wl->mutex and TX must be stopped */
void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
int i;
/* TX failure */
for_each_set_bit(i, wlvif->links_map, wl->num_links) {
if (wlvif->bss_type == BSS_TYPE_AP_BSS &&
i != wlvif->ap.bcast_hlid && i != wlvif->ap.global_hlid) {
/* this calls wl12xx_free_link */
wl1271_free_sta(wl, wlvif, i);
} else {
u8 hlid = i;
wl12xx_free_link(wl, wlvif, &hlid);
}
}
wlvif->last_tx_hlid = 0;
for (i = 0; i < NUM_TX_QUEUES; i++)
wlvif->tx_queue_count[i] = 0;
}
/* caller must hold wl->mutex and TX must be stopped */
void wl12xx_tx_reset(struct wl1271 *wl)
{
int i;
struct sk_buff *skb;
struct ieee80211_tx_info *info;
/* only reset the queues if something bad happened */
if (wl1271_tx_total_queue_count(wl) != 0) {
for (i = 0; i < wl->num_links; i++)
wl1271_tx_reset_link_queues(wl, i);
for (i = 0; i < NUM_TX_QUEUES; i++)
wl->tx_queue_count[i] = 0;
}
/*
* Make sure the driver is at a consistent state, in case this
* function is called from a context other than interface removal.
* This call will always wake the TX queues.
*/
wl1271_handle_tx_low_watermark(wl);
for (i = 0; i < wl->num_tx_desc; i++) {
if (wl->tx_frames[i] == NULL)
continue;
skb = wl->tx_frames[i];
wl1271_free_tx_id(wl, i);
wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb);
if (!wl12xx_is_dummy_packet(wl, skb)) {
/*
* Remove private headers before passing the skb to
* mac80211
*/
info = IEEE80211_SKB_CB(skb);
skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
if ((wl->quirks & WLCORE_QUIRK_TKIP_HEADER_SPACE) &&
info->control.hw_key &&
info->control.hw_key->cipher ==
WLAN_CIPHER_SUITE_TKIP) {
int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
memmove(skb->data + WL1271_EXTRA_SPACE_TKIP,
skb->data, hdrlen);
skb_pull(skb, WL1271_EXTRA_SPACE_TKIP);
}
info->status.rates[0].idx = -1;
info->status.rates[0].count = 0;
ieee80211_tx_status_ni(wl->hw, skb);
}
}
}
#define WL1271_TX_FLUSH_TIMEOUT 500000
/* caller must *NOT* hold wl->mutex */
void wl1271_tx_flush(struct wl1271 *wl)
{
unsigned long timeout, start_time;
int i;
start_time = jiffies;
timeout = start_time + usecs_to_jiffies(WL1271_TX_FLUSH_TIMEOUT);
/* only one flush should be in progress, for consistent queue state */
mutex_lock(&wl->flush_mutex);
mutex_lock(&wl->mutex);
if (wl->tx_frames_cnt == 0 && wl1271_tx_total_queue_count(wl) == 0) {
mutex_unlock(&wl->mutex);
goto out;
}
wlcore_stop_queues(wl, WLCORE_QUEUE_STOP_REASON_FLUSH);
while (!time_after(jiffies, timeout)) {
wl1271_debug(DEBUG_MAC80211, "flushing tx buffer: %d %d",
wl->tx_frames_cnt,
wl1271_tx_total_queue_count(wl));
/* force Tx and give the driver some time to flush data */
mutex_unlock(&wl->mutex);
if (wl1271_tx_total_queue_count(wl))
wl1271_tx_work(&wl->tx_work);
msleep(20);
mutex_lock(&wl->mutex);
if ((wl->tx_frames_cnt == 0) &&
(wl1271_tx_total_queue_count(wl) == 0)) {
wl1271_debug(DEBUG_MAC80211, "tx flush took %d ms",
jiffies_to_msecs(jiffies - start_time));
goto out_wake;
}
}
wl1271_warning("Unable to flush all TX buffers, "
"timed out (timeout %d ms",
WL1271_TX_FLUSH_TIMEOUT / 1000);
/* forcibly flush all Tx buffers on our queues */
for (i = 0; i < wl->num_links; i++)
wl1271_tx_reset_link_queues(wl, i);
out_wake:
wlcore_wake_queues(wl, WLCORE_QUEUE_STOP_REASON_FLUSH);
mutex_unlock(&wl->mutex);
out:
mutex_unlock(&wl->flush_mutex);
}
EXPORT_SYMBOL_GPL(wl1271_tx_flush);
u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set)
{
if (WARN_ON(!rate_set))
return 0;
return BIT(__ffs(rate_set));
}
EXPORT_SYMBOL_GPL(wl1271_tx_min_rate_get);
void wlcore_stop_queue_locked(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u8 queue, enum wlcore_queue_stop_reason reason)
{
int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue);
bool stopped = !!wl->queue_stop_reasons[hwq];
/* queue should not be stopped for this reason */
WARN_ON_ONCE(test_and_set_bit(reason, &wl->queue_stop_reasons[hwq]));
if (stopped)
return;
ieee80211_stop_queue(wl->hw, hwq);
}
void wlcore_stop_queue(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 queue,
enum wlcore_queue_stop_reason reason)
{
unsigned long flags;
spin_lock_irqsave(&wl->wl_lock, flags);
wlcore_stop_queue_locked(wl, wlvif, queue, reason);
spin_unlock_irqrestore(&wl->wl_lock, flags);
}
void wlcore_wake_queue(struct wl1271 *wl, struct wl12xx_vif *wlvif, u8 queue,
enum wlcore_queue_stop_reason reason)
{
unsigned long flags;
int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue);
spin_lock_irqsave(&wl->wl_lock, flags);
/* queue should not be clear for this reason */
WARN_ON_ONCE(!test_and_clear_bit(reason, &wl->queue_stop_reasons[hwq]));
if (wl->queue_stop_reasons[hwq])
goto out;
ieee80211_wake_queue(wl->hw, hwq);
out:
spin_unlock_irqrestore(&wl->wl_lock, flags);
}
void wlcore_stop_queues(struct wl1271 *wl,
enum wlcore_queue_stop_reason reason)
{
int i;
unsigned long flags;
spin_lock_irqsave(&wl->wl_lock, flags);
/* mark all possible queues as stopped */
for (i = 0; i < WLCORE_NUM_MAC_ADDRESSES * NUM_TX_QUEUES; i++)
WARN_ON_ONCE(test_and_set_bit(reason,
&wl->queue_stop_reasons[i]));
/* use the global version to make sure all vifs in mac80211 we don't
* know are stopped.
*/
ieee80211_stop_queues(wl->hw);
spin_unlock_irqrestore(&wl->wl_lock, flags);
}
void wlcore_wake_queues(struct wl1271 *wl,
enum wlcore_queue_stop_reason reason)
{
int i;
unsigned long flags;
spin_lock_irqsave(&wl->wl_lock, flags);
/* mark all possible queues as awake */
for (i = 0; i < WLCORE_NUM_MAC_ADDRESSES * NUM_TX_QUEUES; i++)
WARN_ON_ONCE(!test_and_clear_bit(reason,
&wl->queue_stop_reasons[i]));
/* use the global version to make sure all vifs in mac80211 we don't
* know are woken up.
*/
ieee80211_wake_queues(wl->hw);
spin_unlock_irqrestore(&wl->wl_lock, flags);
}
bool wlcore_is_queue_stopped_by_reason(struct wl1271 *wl,
struct wl12xx_vif *wlvif, u8 queue,
enum wlcore_queue_stop_reason reason)
{
unsigned long flags;
bool stopped;
spin_lock_irqsave(&wl->wl_lock, flags);
stopped = wlcore_is_queue_stopped_by_reason_locked(wl, wlvif, queue,
reason);
spin_unlock_irqrestore(&wl->wl_lock, flags);
return stopped;
}
bool wlcore_is_queue_stopped_by_reason_locked(struct wl1271 *wl,
struct wl12xx_vif *wlvif, u8 queue,
enum wlcore_queue_stop_reason reason)
{
int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue);
assert_spin_locked(&wl->wl_lock);
return test_bit(reason, &wl->queue_stop_reasons[hwq]);
}
bool wlcore_is_queue_stopped_locked(struct wl1271 *wl, struct wl12xx_vif *wlvif,
u8 queue)
{
int hwq = wlcore_tx_get_mac80211_queue(wlvif, queue);
assert_spin_locked(&wl->wl_lock);
return !!wl->queue_stop_reasons[hwq];
}
|
linux-master
|
drivers/net/wireless/ti/wlcore/tx.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* This file is part of wl1271
*
* Copyright (C) 2008-2010 Nokia Corporation
*
* Contact: Luciano Coelho <[email protected]>
*/
#include <linux/slab.h>
#include <linux/export.h>
#include "debug.h"
#include "acx.h"
#include "boot.h"
#include "io.h"
#include "event.h"
#include "rx.h"
#include "hw_ops.h"
static int wl1271_boot_set_ecpu_ctrl(struct wl1271 *wl, u32 flag)
{
u32 cpu_ctrl;
int ret;
/* 10.5.0 run the firmware (I) */
ret = wlcore_read_reg(wl, REG_ECPU_CONTROL, &cpu_ctrl);
if (ret < 0)
goto out;
/* 10.5.1 run the firmware (II) */
cpu_ctrl |= flag;
ret = wlcore_write_reg(wl, REG_ECPU_CONTROL, cpu_ctrl);
out:
return ret;
}
static int wlcore_boot_parse_fw_ver(struct wl1271 *wl,
struct wl1271_static_data *static_data)
{
int ret;
strncpy(wl->chip.fw_ver_str, static_data->fw_version,
sizeof(wl->chip.fw_ver_str));
/* make sure the string is NULL-terminated */
wl->chip.fw_ver_str[sizeof(wl->chip.fw_ver_str) - 1] = '\0';
ret = sscanf(wl->chip.fw_ver_str + 4, "%u.%u.%u.%u.%u",
&wl->chip.fw_ver[0], &wl->chip.fw_ver[1],
&wl->chip.fw_ver[2], &wl->chip.fw_ver[3],
&wl->chip.fw_ver[4]);
if (ret != 5) {
wl1271_warning("fw version incorrect value");
memset(wl->chip.fw_ver, 0, sizeof(wl->chip.fw_ver));
ret = -EINVAL;
goto out;
}
ret = wlcore_identify_fw(wl);
if (ret < 0)
goto out;
out:
return ret;
}
static int wlcore_validate_fw_ver(struct wl1271 *wl)
{
unsigned int *fw_ver = wl->chip.fw_ver;
unsigned int *min_ver = (wl->fw_type == WL12XX_FW_TYPE_MULTI) ?
wl->min_mr_fw_ver : wl->min_sr_fw_ver;
char min_fw_str[32] = "";
int off = 0;
int i;
/* the chip must be exactly equal */
if ((min_ver[FW_VER_CHIP] != WLCORE_FW_VER_IGNORE) &&
(min_ver[FW_VER_CHIP] != fw_ver[FW_VER_CHIP]))
goto fail;
/* the firmware type must be equal */
if ((min_ver[FW_VER_IF_TYPE] != WLCORE_FW_VER_IGNORE) &&
(min_ver[FW_VER_IF_TYPE] != fw_ver[FW_VER_IF_TYPE]))
goto fail;
/* the project number must be equal */
if ((min_ver[FW_VER_SUBTYPE] != WLCORE_FW_VER_IGNORE) &&
(min_ver[FW_VER_SUBTYPE] != fw_ver[FW_VER_SUBTYPE]))
goto fail;
/* the API version must be greater or equal */
if ((min_ver[FW_VER_MAJOR] != WLCORE_FW_VER_IGNORE) &&
(min_ver[FW_VER_MAJOR] > fw_ver[FW_VER_MAJOR]))
goto fail;
/* if the API version is equal... */
if (((min_ver[FW_VER_MAJOR] == WLCORE_FW_VER_IGNORE) ||
(min_ver[FW_VER_MAJOR] == fw_ver[FW_VER_MAJOR])) &&
/* ...the minor must be greater or equal */
((min_ver[FW_VER_MINOR] != WLCORE_FW_VER_IGNORE) &&
(min_ver[FW_VER_MINOR] > fw_ver[FW_VER_MINOR])))
goto fail;
return 0;
fail:
for (i = 0; i < NUM_FW_VER && off < sizeof(min_fw_str); i++)
if (min_ver[i] == WLCORE_FW_VER_IGNORE)
off += snprintf(min_fw_str + off,
sizeof(min_fw_str) - off,
"*.");
else
off += snprintf(min_fw_str + off,
sizeof(min_fw_str) - off,
"%u.", min_ver[i]);
wl1271_error("Your WiFi FW version (%u.%u.%u.%u.%u) is invalid.\n"
"Please use at least FW %s\n"
"You can get the latest firmwares at:\n"
"git://git.ti.com/wilink8-wlan/wl18xx_fw.git",
fw_ver[FW_VER_CHIP], fw_ver[FW_VER_IF_TYPE],
fw_ver[FW_VER_MAJOR], fw_ver[FW_VER_SUBTYPE],
fw_ver[FW_VER_MINOR], min_fw_str);
return -EINVAL;
}
static int wlcore_boot_static_data(struct wl1271 *wl)
{
struct wl1271_static_data *static_data;
size_t len = sizeof(*static_data) + wl->static_data_priv_len;
int ret;
static_data = kmalloc(len, GFP_KERNEL);
if (!static_data) {
ret = -ENOMEM;
goto out;
}
ret = wlcore_read(wl, wl->cmd_box_addr, static_data, len, false);
if (ret < 0)
goto out_free;
ret = wlcore_boot_parse_fw_ver(wl, static_data);
if (ret < 0)
goto out_free;
ret = wlcore_validate_fw_ver(wl);
if (ret < 0)
goto out_free;
ret = wlcore_handle_static_data(wl, static_data);
if (ret < 0)
goto out_free;
out_free:
kfree(static_data);
out:
return ret;
}
static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf,
size_t fw_data_len, u32 dest)
{
struct wlcore_partition_set partition;
int addr, chunk_num, partition_limit;
u8 *p, *chunk;
int ret;
/* whal_FwCtrl_LoadFwImageSm() */
wl1271_debug(DEBUG_BOOT, "starting firmware upload");
wl1271_debug(DEBUG_BOOT, "fw_data_len %zd chunk_size %d",
fw_data_len, CHUNK_SIZE);
if ((fw_data_len % 4) != 0) {
wl1271_error("firmware length not multiple of four");
return -EIO;
}
chunk = kmalloc(CHUNK_SIZE, GFP_KERNEL);
if (!chunk) {
wl1271_error("allocation for firmware upload chunk failed");
return -ENOMEM;
}
memcpy(&partition, &wl->ptable[PART_DOWN], sizeof(partition));
partition.mem.start = dest;
ret = wlcore_set_partition(wl, &partition);
if (ret < 0)
goto out;
/* 10.1 set partition limit and chunk num */
chunk_num = 0;
partition_limit = wl->ptable[PART_DOWN].mem.size;
while (chunk_num < fw_data_len / CHUNK_SIZE) {
/* 10.2 update partition, if needed */
addr = dest + (chunk_num + 2) * CHUNK_SIZE;
if (addr > partition_limit) {
addr = dest + chunk_num * CHUNK_SIZE;
partition_limit = chunk_num * CHUNK_SIZE +
wl->ptable[PART_DOWN].mem.size;
partition.mem.start = addr;
ret = wlcore_set_partition(wl, &partition);
if (ret < 0)
goto out;
}
/* 10.3 upload the chunk */
addr = dest + chunk_num * CHUNK_SIZE;
p = buf + chunk_num * CHUNK_SIZE;
memcpy(chunk, p, CHUNK_SIZE);
wl1271_debug(DEBUG_BOOT, "uploading fw chunk 0x%p to 0x%x",
p, addr);
ret = wlcore_write(wl, addr, chunk, CHUNK_SIZE, false);
if (ret < 0)
goto out;
chunk_num++;
}
/* 10.4 upload the last chunk */
addr = dest + chunk_num * CHUNK_SIZE;
p = buf + chunk_num * CHUNK_SIZE;
memcpy(chunk, p, fw_data_len % CHUNK_SIZE);
wl1271_debug(DEBUG_BOOT, "uploading fw last chunk (%zd B) 0x%p to 0x%x",
fw_data_len % CHUNK_SIZE, p, addr);
ret = wlcore_write(wl, addr, chunk, fw_data_len % CHUNK_SIZE, false);
out:
kfree(chunk);
return ret;
}
int wlcore_boot_upload_firmware(struct wl1271 *wl)
{
u32 chunks, addr, len;
int ret = 0;
u8 *fw;
fw = wl->fw;
chunks = be32_to_cpup((__be32 *) fw);
fw += sizeof(u32);
wl1271_debug(DEBUG_BOOT, "firmware chunks to be uploaded: %u", chunks);
while (chunks--) {
addr = be32_to_cpup((__be32 *) fw);
fw += sizeof(u32);
len = be32_to_cpup((__be32 *) fw);
fw += sizeof(u32);
if (len > 300000) {
wl1271_info("firmware chunk too long: %u", len);
return -EINVAL;
}
wl1271_debug(DEBUG_BOOT, "chunk %d addr 0x%x len %u",
chunks, addr, len);
ret = wl1271_boot_upload_firmware_chunk(wl, fw, len, addr);
if (ret != 0)
break;
fw += len;
}
return ret;
}
EXPORT_SYMBOL_GPL(wlcore_boot_upload_firmware);
int wlcore_boot_upload_nvs(struct wl1271 *wl)
{
struct platform_device *pdev = wl->pdev;
struct wlcore_platdev_data *pdev_data = dev_get_platdata(&pdev->dev);
const char *nvs_name = "unknown";
size_t nvs_len, burst_len;
int i;
u32 dest_addr, val;
u8 *nvs_ptr, *nvs_aligned;
int ret;
if (wl->nvs == NULL) {
wl1271_error("NVS file is needed during boot");
return -ENODEV;
}
if (pdev_data && pdev_data->family)
nvs_name = pdev_data->family->nvs_name;
if (wl->quirks & WLCORE_QUIRK_LEGACY_NVS) {
struct wl1271_nvs_file *nvs =
(struct wl1271_nvs_file *)wl->nvs;
/*
* FIXME: the LEGACY NVS image support (NVS's missing the 5GHz
* band configurations) can be removed when those NVS files stop
* floating around.
*/
if (wl->nvs_len == sizeof(struct wl1271_nvs_file) ||
wl->nvs_len == WL1271_INI_LEGACY_NVS_FILE_SIZE) {
if (nvs->general_params.dual_mode_select)
wl->enable_11a = true;
}
if (wl->nvs_len != sizeof(struct wl1271_nvs_file) &&
(wl->nvs_len != WL1271_INI_LEGACY_NVS_FILE_SIZE ||
wl->enable_11a)) {
wl1271_error("%s size is not as expected: %zu != %zu",
nvs_name, wl->nvs_len,
sizeof(struct wl1271_nvs_file));
kfree(wl->nvs);
wl->nvs = NULL;
wl->nvs_len = 0;
return -EILSEQ;
}
/* only the first part of the NVS needs to be uploaded */
nvs_len = sizeof(nvs->nvs);
nvs_ptr = (u8 *) nvs->nvs;
} else {
struct wl128x_nvs_file *nvs = (struct wl128x_nvs_file *)wl->nvs;
if (wl->nvs_len == sizeof(struct wl128x_nvs_file)) {
if (nvs->general_params.dual_mode_select)
wl->enable_11a = true;
} else {
wl1271_error("%s size is not as expected: %zu != %zu",
nvs_name, wl->nvs_len,
sizeof(struct wl128x_nvs_file));
kfree(wl->nvs);
wl->nvs = NULL;
wl->nvs_len = 0;
return -EILSEQ;
}
/* only the first part of the NVS needs to be uploaded */
nvs_len = sizeof(nvs->nvs);
nvs_ptr = (u8 *)nvs->nvs;
}
/* update current MAC address to NVS */
nvs_ptr[11] = wl->addresses[0].addr[0];
nvs_ptr[10] = wl->addresses[0].addr[1];
nvs_ptr[6] = wl->addresses[0].addr[2];
nvs_ptr[5] = wl->addresses[0].addr[3];
nvs_ptr[4] = wl->addresses[0].addr[4];
nvs_ptr[3] = wl->addresses[0].addr[5];
/*
* Layout before the actual NVS tables:
* 1 byte : burst length.
* 2 bytes: destination address.
* n bytes: data to burst copy.
*
* This is ended by a 0 length, then the NVS tables.
*/
/* FIXME: Do we need to check here whether the LSB is 1? */
while (nvs_ptr[0]) {
burst_len = nvs_ptr[0];
dest_addr = (nvs_ptr[1] & 0xfe) | ((u32)(nvs_ptr[2] << 8));
/*
* Due to our new wl1271_translate_reg_addr function,
* we need to add the register partition start address
* to the destination
*/
dest_addr += wl->curr_part.reg.start;
/* We move our pointer to the data */
nvs_ptr += 3;
for (i = 0; i < burst_len; i++) {
if (nvs_ptr + 3 >= (u8 *) wl->nvs + nvs_len)
goto out_badnvs;
val = (nvs_ptr[0] | (nvs_ptr[1] << 8)
| (nvs_ptr[2] << 16) | (nvs_ptr[3] << 24));
wl1271_debug(DEBUG_BOOT,
"nvs burst write 0x%x: 0x%x",
dest_addr, val);
ret = wlcore_write32(wl, dest_addr, val);
if (ret < 0)
return ret;
nvs_ptr += 4;
dest_addr += 4;
}
if (nvs_ptr >= (u8 *) wl->nvs + nvs_len)
goto out_badnvs;
}
/*
* We've reached the first zero length, the first NVS table
* is located at an aligned offset which is at least 7 bytes further.
* NOTE: The wl->nvs->nvs element must be first, in order to
* simplify the casting, we assume it is at the beginning of
* the wl->nvs structure.
*/
nvs_ptr = (u8 *)wl->nvs +
ALIGN(nvs_ptr - (u8 *)wl->nvs + 7, 4);
if (nvs_ptr >= (u8 *) wl->nvs + nvs_len)
goto out_badnvs;
nvs_len -= nvs_ptr - (u8 *)wl->nvs;
/* Now we must set the partition correctly */
ret = wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
if (ret < 0)
return ret;
/* Copy the NVS tables to a new block to ensure alignment */
nvs_aligned = kmemdup(nvs_ptr, nvs_len, GFP_KERNEL);
if (!nvs_aligned)
return -ENOMEM;
/* And finally we upload the NVS tables */
ret = wlcore_write_data(wl, REG_CMD_MBOX_ADDRESS, nvs_aligned, nvs_len,
false);
kfree(nvs_aligned);
return ret;
out_badnvs:
wl1271_error("nvs data is malformed");
return -EILSEQ;
}
EXPORT_SYMBOL_GPL(wlcore_boot_upload_nvs);
int wlcore_boot_run_firmware(struct wl1271 *wl)
{
int loop, ret;
u32 chip_id, intr;
/* Make sure we have the boot partition */
ret = wlcore_set_partition(wl, &wl->ptable[PART_BOOT]);
if (ret < 0)
return ret;
ret = wl1271_boot_set_ecpu_ctrl(wl, ECPU_CONTROL_HALT);
if (ret < 0)
return ret;
ret = wlcore_read_reg(wl, REG_CHIP_ID_B, &chip_id);
if (ret < 0)
return ret;
wl1271_debug(DEBUG_BOOT, "chip id after firmware boot: 0x%x", chip_id);
if (chip_id != wl->chip.id) {
wl1271_error("chip id doesn't match after firmware boot");
return -EIO;
}
/* wait for init to complete */
loop = 0;
while (loop++ < INIT_LOOP) {
udelay(INIT_LOOP_DELAY);
ret = wlcore_read_reg(wl, REG_INTERRUPT_NO_CLEAR, &intr);
if (ret < 0)
return ret;
if (intr == 0xffffffff) {
wl1271_error("error reading hardware complete "
"init indication");
return -EIO;
}
/* check that ACX_INTR_INIT_COMPLETE is enabled */
else if (intr & WL1271_ACX_INTR_INIT_COMPLETE) {
ret = wlcore_write_reg(wl, REG_INTERRUPT_ACK,
WL1271_ACX_INTR_INIT_COMPLETE);
if (ret < 0)
return ret;
break;
}
}
if (loop > INIT_LOOP) {
wl1271_error("timeout waiting for the hardware to "
"complete initialization");
return -EIO;
}
/* get hardware config command mail box */
ret = wlcore_read_reg(wl, REG_COMMAND_MAILBOX_PTR, &wl->cmd_box_addr);
if (ret < 0)
return ret;
wl1271_debug(DEBUG_MAILBOX, "cmd_box_addr 0x%x", wl->cmd_box_addr);
/* get hardware config event mail box */
ret = wlcore_read_reg(wl, REG_EVENT_MAILBOX_PTR, &wl->mbox_ptr[0]);
if (ret < 0)
return ret;
wl->mbox_ptr[1] = wl->mbox_ptr[0] + wl->mbox_size;
wl1271_debug(DEBUG_MAILBOX, "MBOX ptrs: 0x%x 0x%x",
wl->mbox_ptr[0], wl->mbox_ptr[1]);
ret = wlcore_boot_static_data(wl);
if (ret < 0) {
wl1271_error("error getting static data");
return ret;
}
/*
* in case of full asynchronous mode the firmware event must be
* ready to receive event from the command mailbox
*/
/* unmask required mbox events */
ret = wl1271_event_unmask(wl);
if (ret < 0) {
wl1271_error("EVENT mask setting failed");
return ret;
}
/* set the working partition to its "running" mode offset */
ret = wlcore_set_partition(wl, &wl->ptable[PART_WORK]);
/* firmware startup completed */
return ret;
}
EXPORT_SYMBOL_GPL(wlcore_boot_run_firmware);
|
linux-master
|
drivers/net/wireless/ti/wlcore/boot.c
|
// SPDX-License-Identifier: GPL-2.0
#include "acx.h"
#include <linux/module.h>
#include <linux/slab.h>
#include "wl1251.h"
#include "reg.h"
#include "cmd.h"
#include "ps.h"
int wl1251_acx_frame_rates(struct wl1251 *wl, u8 ctrl_rate, u8 ctrl_mod,
u8 mgt_rate, u8 mgt_mod)
{
struct acx_fw_gen_frame_rates *rates;
int ret;
wl1251_debug(DEBUG_ACX, "acx frame rates");
rates = kzalloc(sizeof(*rates), GFP_KERNEL);
if (!rates)
return -ENOMEM;
rates->tx_ctrl_frame_rate = ctrl_rate;
rates->tx_ctrl_frame_mod = ctrl_mod;
rates->tx_mgt_frame_rate = mgt_rate;
rates->tx_mgt_frame_mod = mgt_mod;
ret = wl1251_cmd_configure(wl, ACX_FW_GEN_FRAME_RATES,
rates, sizeof(*rates));
if (ret < 0) {
wl1251_error("Failed to set FW rates and modulation");
goto out;
}
out:
kfree(rates);
return ret;
}
int wl1251_acx_station_id(struct wl1251 *wl)
{
struct acx_dot11_station_id *mac;
int ret, i;
wl1251_debug(DEBUG_ACX, "acx dot11_station_id");
mac = kzalloc(sizeof(*mac), GFP_KERNEL);
if (!mac)
return -ENOMEM;
for (i = 0; i < ETH_ALEN; i++)
mac->mac[i] = wl->mac_addr[ETH_ALEN - 1 - i];
ret = wl1251_cmd_configure(wl, DOT11_STATION_ID, mac, sizeof(*mac));
kfree(mac);
return ret;
}
int wl1251_acx_default_key(struct wl1251 *wl, u8 key_id)
{
struct acx_dot11_default_key *default_key;
int ret;
wl1251_debug(DEBUG_ACX, "acx dot11_default_key (%d)", key_id);
default_key = kzalloc(sizeof(*default_key), GFP_KERNEL);
if (!default_key)
return -ENOMEM;
default_key->id = key_id;
ret = wl1251_cmd_configure(wl, DOT11_DEFAULT_KEY,
default_key, sizeof(*default_key));
if (ret < 0) {
wl1251_error("Couldn't set default key");
goto out;
}
wl->default_key = key_id;
out:
kfree(default_key);
return ret;
}
int wl1251_acx_wake_up_conditions(struct wl1251 *wl, u8 wake_up_event,
u8 listen_interval)
{
struct acx_wake_up_condition *wake_up;
int ret;
wl1251_debug(DEBUG_ACX, "acx wake up conditions");
wake_up = kzalloc(sizeof(*wake_up), GFP_KERNEL);
if (!wake_up)
return -ENOMEM;
wake_up->wake_up_event = wake_up_event;
wake_up->listen_interval = listen_interval;
ret = wl1251_cmd_configure(wl, ACX_WAKE_UP_CONDITIONS,
wake_up, sizeof(*wake_up));
if (ret < 0) {
wl1251_warning("could not set wake up conditions: %d", ret);
goto out;
}
out:
kfree(wake_up);
return ret;
}
int wl1251_acx_sleep_auth(struct wl1251 *wl, u8 sleep_auth)
{
struct acx_sleep_auth *auth;
int ret;
wl1251_debug(DEBUG_ACX, "acx sleep auth");
auth = kzalloc(sizeof(*auth), GFP_KERNEL);
if (!auth)
return -ENOMEM;
auth->sleep_auth = sleep_auth;
ret = wl1251_cmd_configure(wl, ACX_SLEEP_AUTH, auth, sizeof(*auth));
kfree(auth);
return ret;
}
int wl1251_acx_fw_version(struct wl1251 *wl, char *buf, size_t len)
{
struct acx_revision *rev;
int ret;
wl1251_debug(DEBUG_ACX, "acx fw rev");
rev = kzalloc(sizeof(*rev), GFP_KERNEL);
if (!rev)
return -ENOMEM;
ret = wl1251_cmd_interrogate(wl, ACX_FW_REV, rev, sizeof(*rev));
if (ret < 0) {
wl1251_warning("ACX_FW_REV interrogate failed");
goto out;
}
/* be careful with the buffer sizes */
strncpy(buf, rev->fw_version, min(len, sizeof(rev->fw_version)));
/*
* if the firmware version string is exactly
* sizeof(rev->fw_version) long or fw_len is less than
* sizeof(rev->fw_version) it won't be null terminated
*/
buf[min(len, sizeof(rev->fw_version)) - 1] = '\0';
out:
kfree(rev);
return ret;
}
int wl1251_acx_tx_power(struct wl1251 *wl, int power)
{
struct acx_current_tx_power *acx;
int ret;
wl1251_debug(DEBUG_ACX, "acx dot11_cur_tx_pwr");
if (power < 0 || power > 25)
return -EINVAL;
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
if (!acx)
return -ENOMEM;
acx->current_tx_power = power * 10;
ret = wl1251_cmd_configure(wl, DOT11_CUR_TX_PWR, acx, sizeof(*acx));
if (ret < 0) {
wl1251_warning("configure of tx power failed: %d", ret);
goto out;
}
out:
kfree(acx);
return ret;
}
int wl1251_acx_feature_cfg(struct wl1251 *wl, u32 data_flow_options)
{
struct acx_feature_config *feature;
int ret;
wl1251_debug(DEBUG_ACX, "acx feature cfg");
feature = kzalloc(sizeof(*feature), GFP_KERNEL);
if (!feature)
return -ENOMEM;
/* DF_ENCRYPTION_DISABLE and DF_SNIFF_MODE_ENABLE can be set */
feature->data_flow_options = data_flow_options;
feature->options = 0;
ret = wl1251_cmd_configure(wl, ACX_FEATURE_CFG,
feature, sizeof(*feature));
if (ret < 0) {
wl1251_error("Couldn't set HW encryption");
goto out;
}
out:
kfree(feature);
return ret;
}
int wl1251_acx_mem_map(struct wl1251 *wl, struct acx_header *mem_map,
size_t len)
{
int ret;
wl1251_debug(DEBUG_ACX, "acx mem map");
ret = wl1251_cmd_interrogate(wl, ACX_MEM_MAP, mem_map, len);
if (ret < 0)
return ret;
return 0;
}
int wl1251_acx_data_path_params(struct wl1251 *wl,
struct acx_data_path_params_resp *resp)
{
struct acx_data_path_params *params;
int ret;
wl1251_debug(DEBUG_ACX, "acx data path params");
params = kzalloc(sizeof(*params), GFP_KERNEL);
if (!params)
return -ENOMEM;
params->rx_packet_ring_chunk_size = DP_RX_PACKET_RING_CHUNK_SIZE;
params->tx_packet_ring_chunk_size = DP_TX_PACKET_RING_CHUNK_SIZE;
params->rx_packet_ring_chunk_num = DP_RX_PACKET_RING_CHUNK_NUM;
params->tx_packet_ring_chunk_num = DP_TX_PACKET_RING_CHUNK_NUM;
params->tx_complete_threshold = 1;
params->tx_complete_ring_depth = FW_TX_CMPLT_BLOCK_SIZE;
params->tx_complete_timeout = DP_TX_COMPLETE_TIME_OUT;
ret = wl1251_cmd_configure(wl, ACX_DATA_PATH_PARAMS,
params, sizeof(*params));
if (ret < 0)
goto out;
/* FIXME: shouldn't this be ACX_DATA_PATH_RESP_PARAMS? */
ret = wl1251_cmd_interrogate(wl, ACX_DATA_PATH_PARAMS,
resp, sizeof(*resp));
if (ret < 0) {
wl1251_warning("failed to read data path parameters: %d", ret);
goto out;
} else if (resp->header.cmd.status != CMD_STATUS_SUCCESS) {
wl1251_warning("data path parameter acx status failed");
ret = -EIO;
goto out;
}
out:
kfree(params);
return ret;
}
int wl1251_acx_rx_msdu_life_time(struct wl1251 *wl, u32 life_time)
{
struct acx_rx_msdu_lifetime *acx;
int ret;
wl1251_debug(DEBUG_ACX, "acx rx msdu life time");
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
if (!acx)
return -ENOMEM;
acx->lifetime = life_time;
ret = wl1251_cmd_configure(wl, DOT11_RX_MSDU_LIFE_TIME,
acx, sizeof(*acx));
if (ret < 0) {
wl1251_warning("failed to set rx msdu life time: %d", ret);
goto out;
}
out:
kfree(acx);
return ret;
}
int wl1251_acx_rx_config(struct wl1251 *wl, u32 config, u32 filter)
{
struct acx_rx_config *rx_config;
int ret;
wl1251_debug(DEBUG_ACX, "acx rx config");
rx_config = kzalloc(sizeof(*rx_config), GFP_KERNEL);
if (!rx_config)
return -ENOMEM;
rx_config->config_options = config;
rx_config->filter_options = filter;
ret = wl1251_cmd_configure(wl, ACX_RX_CFG,
rx_config, sizeof(*rx_config));
if (ret < 0) {
wl1251_warning("failed to set rx config: %d", ret);
goto out;
}
out:
kfree(rx_config);
return ret;
}
int wl1251_acx_pd_threshold(struct wl1251 *wl)
{
struct acx_packet_detection *pd;
int ret;
wl1251_debug(DEBUG_ACX, "acx data pd threshold");
pd = kzalloc(sizeof(*pd), GFP_KERNEL);
if (!pd)
return -ENOMEM;
/* FIXME: threshold value not set */
ret = wl1251_cmd_configure(wl, ACX_PD_THRESHOLD, pd, sizeof(*pd));
if (ret < 0) {
wl1251_warning("failed to set pd threshold: %d", ret);
goto out;
}
out:
kfree(pd);
return ret;
}
int wl1251_acx_slot(struct wl1251 *wl, enum acx_slot_type slot_time)
{
struct acx_slot *slot;
int ret;
wl1251_debug(DEBUG_ACX, "acx slot");
slot = kzalloc(sizeof(*slot), GFP_KERNEL);
if (!slot)
return -ENOMEM;
slot->wone_index = STATION_WONE_INDEX;
slot->slot_time = slot_time;
ret = wl1251_cmd_configure(wl, ACX_SLOT, slot, sizeof(*slot));
if (ret < 0) {
wl1251_warning("failed to set slot time: %d", ret);
goto out;
}
out:
kfree(slot);
return ret;
}
int wl1251_acx_group_address_tbl(struct wl1251 *wl, bool enable,
void *mc_list, u32 mc_list_len)
{
struct acx_dot11_grp_addr_tbl *acx;
int ret;
wl1251_debug(DEBUG_ACX, "acx group address tbl");
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
if (!acx)
return -ENOMEM;
/* MAC filtering */
acx->enabled = enable;
acx->num_groups = mc_list_len;
memcpy(acx->mac_table, mc_list, mc_list_len * ETH_ALEN);
ret = wl1251_cmd_configure(wl, DOT11_GROUP_ADDRESS_TBL,
acx, sizeof(*acx));
if (ret < 0) {
wl1251_warning("failed to set group addr table: %d", ret);
goto out;
}
out:
kfree(acx);
return ret;
}
int wl1251_acx_service_period_timeout(struct wl1251 *wl)
{
struct acx_rx_timeout *rx_timeout;
int ret;
rx_timeout = kzalloc(sizeof(*rx_timeout), GFP_KERNEL);
if (!rx_timeout)
return -ENOMEM;
wl1251_debug(DEBUG_ACX, "acx service period timeout");
rx_timeout->ps_poll_timeout = RX_TIMEOUT_PS_POLL_DEF;
rx_timeout->upsd_timeout = RX_TIMEOUT_UPSD_DEF;
ret = wl1251_cmd_configure(wl, ACX_SERVICE_PERIOD_TIMEOUT,
rx_timeout, sizeof(*rx_timeout));
if (ret < 0) {
wl1251_warning("failed to set service period timeout: %d",
ret);
goto out;
}
out:
kfree(rx_timeout);
return ret;
}
int wl1251_acx_rts_threshold(struct wl1251 *wl, u16 rts_threshold)
{
struct acx_rts_threshold *rts;
int ret;
wl1251_debug(DEBUG_ACX, "acx rts threshold");
rts = kzalloc(sizeof(*rts), GFP_KERNEL);
if (!rts)
return -ENOMEM;
rts->threshold = rts_threshold;
ret = wl1251_cmd_configure(wl, DOT11_RTS_THRESHOLD, rts, sizeof(*rts));
if (ret < 0) {
wl1251_warning("failed to set rts threshold: %d", ret);
goto out;
}
out:
kfree(rts);
return ret;
}
int wl1251_acx_beacon_filter_opt(struct wl1251 *wl, bool enable_filter)
{
struct acx_beacon_filter_option *beacon_filter;
int ret;
wl1251_debug(DEBUG_ACX, "acx beacon filter opt");
beacon_filter = kzalloc(sizeof(*beacon_filter), GFP_KERNEL);
if (!beacon_filter)
return -ENOMEM;
beacon_filter->enable = enable_filter;
beacon_filter->max_num_beacons = 0;
ret = wl1251_cmd_configure(wl, ACX_BEACON_FILTER_OPT,
beacon_filter, sizeof(*beacon_filter));
if (ret < 0) {
wl1251_warning("failed to set beacon filter opt: %d", ret);
goto out;
}
out:
kfree(beacon_filter);
return ret;
}
int wl1251_acx_beacon_filter_table(struct wl1251 *wl)
{
struct acx_beacon_filter_ie_table *ie_table;
int idx = 0;
int ret;
wl1251_debug(DEBUG_ACX, "acx beacon filter table");
ie_table = kzalloc(sizeof(*ie_table), GFP_KERNEL);
if (!ie_table)
return -ENOMEM;
/* configure default beacon pass-through rules */
ie_table->num_ie = 1;
ie_table->table[idx++] = BEACON_FILTER_IE_ID_CHANNEL_SWITCH_ANN;
ie_table->table[idx++] = BEACON_RULE_PASS_ON_APPEARANCE;
ret = wl1251_cmd_configure(wl, ACX_BEACON_FILTER_TABLE,
ie_table, sizeof(*ie_table));
if (ret < 0) {
wl1251_warning("failed to set beacon filter table: %d", ret);
goto out;
}
out:
kfree(ie_table);
return ret;
}
int wl1251_acx_conn_monit_params(struct wl1251 *wl)
{
struct acx_conn_monit_params *acx;
int ret;
wl1251_debug(DEBUG_ACX, "acx connection monitor parameters");
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
if (!acx)
return -ENOMEM;
acx->synch_fail_thold = SYNCH_FAIL_DEFAULT_THRESHOLD;
acx->bss_lose_timeout = NO_BEACON_DEFAULT_TIMEOUT;
ret = wl1251_cmd_configure(wl, ACX_CONN_MONIT_PARAMS,
acx, sizeof(*acx));
if (ret < 0) {
wl1251_warning("failed to set connection monitor "
"parameters: %d", ret);
goto out;
}
out:
kfree(acx);
return ret;
}
int wl1251_acx_sg_enable(struct wl1251 *wl)
{
struct acx_bt_wlan_coex *pta;
int ret;
wl1251_debug(DEBUG_ACX, "acx sg enable");
pta = kzalloc(sizeof(*pta), GFP_KERNEL);
if (!pta)
return -ENOMEM;
pta->enable = SG_ENABLE;
ret = wl1251_cmd_configure(wl, ACX_SG_ENABLE, pta, sizeof(*pta));
if (ret < 0) {
wl1251_warning("failed to set softgemini enable: %d", ret);
goto out;
}
out:
kfree(pta);
return ret;
}
int wl1251_acx_sg_cfg(struct wl1251 *wl)
{
struct acx_bt_wlan_coex_param *param;
int ret;
wl1251_debug(DEBUG_ACX, "acx sg cfg");
param = kzalloc(sizeof(*param), GFP_KERNEL);
if (!param)
return -ENOMEM;
/* BT-WLAN coext parameters */
param->min_rate = RATE_INDEX_24MBPS;
param->bt_hp_max_time = PTA_BT_HP_MAXTIME_DEF;
param->wlan_hp_max_time = PTA_WLAN_HP_MAX_TIME_DEF;
param->sense_disable_timer = PTA_SENSE_DISABLE_TIMER_DEF;
param->rx_time_bt_hp = PTA_PROTECTIVE_RX_TIME_DEF;
param->tx_time_bt_hp = PTA_PROTECTIVE_TX_TIME_DEF;
param->rx_time_bt_hp_fast = PTA_PROTECTIVE_RX_TIME_FAST_DEF;
param->tx_time_bt_hp_fast = PTA_PROTECTIVE_TX_TIME_FAST_DEF;
param->wlan_cycle_fast = PTA_CYCLE_TIME_FAST_DEF;
param->bt_anti_starvation_period = PTA_ANTI_STARVE_PERIOD_DEF;
param->next_bt_lp_packet = PTA_TIMEOUT_NEXT_BT_LP_PACKET_DEF;
param->wake_up_beacon = PTA_TIME_BEFORE_BEACON_DEF;
param->hp_dm_max_guard_time = PTA_HPDM_MAX_TIME_DEF;
param->next_wlan_packet = PTA_TIME_OUT_NEXT_WLAN_DEF;
param->antenna_type = PTA_ANTENNA_TYPE_DEF;
param->signal_type = PTA_SIGNALING_TYPE_DEF;
param->afh_leverage_on = PTA_AFH_LEVERAGE_ON_DEF;
param->quiet_cycle_num = PTA_NUMBER_QUIET_CYCLE_DEF;
param->max_cts = PTA_MAX_NUM_CTS_DEF;
param->wlan_packets_num = PTA_NUMBER_OF_WLAN_PACKETS_DEF;
param->bt_packets_num = PTA_NUMBER_OF_BT_PACKETS_DEF;
param->missed_rx_avalanche = PTA_RX_FOR_AVALANCHE_DEF;
param->wlan_elp_hp = PTA_ELP_HP_DEF;
param->bt_anti_starvation_cycles = PTA_ANTI_STARVE_NUM_CYCLE_DEF;
param->ack_mode_dual_ant = PTA_ACK_MODE_DEF;
param->pa_sd_enable = PTA_ALLOW_PA_SD_DEF;
param->pta_auto_mode_enable = PTA_AUTO_MODE_NO_CTS_DEF;
param->bt_hp_respected_num = PTA_BT_HP_RESPECTED_DEF;
ret = wl1251_cmd_configure(wl, ACX_SG_CFG, param, sizeof(*param));
if (ret < 0) {
wl1251_warning("failed to set sg config: %d", ret);
goto out;
}
out:
kfree(param);
return ret;
}
int wl1251_acx_cca_threshold(struct wl1251 *wl)
{
struct acx_energy_detection *detection;
int ret;
wl1251_debug(DEBUG_ACX, "acx cca threshold");
detection = kzalloc(sizeof(*detection), GFP_KERNEL);
if (!detection)
return -ENOMEM;
detection->rx_cca_threshold = CCA_THRSH_DISABLE_ENERGY_D;
detection->tx_energy_detection = 0;
ret = wl1251_cmd_configure(wl, ACX_CCA_THRESHOLD,
detection, sizeof(*detection));
if (ret < 0)
wl1251_warning("failed to set cca threshold: %d", ret);
kfree(detection);
return ret;
}
int wl1251_acx_bcn_dtim_options(struct wl1251 *wl)
{
struct acx_beacon_broadcast *bb;
int ret;
wl1251_debug(DEBUG_ACX, "acx bcn dtim options");
bb = kzalloc(sizeof(*bb), GFP_KERNEL);
if (!bb)
return -ENOMEM;
bb->beacon_rx_timeout = BCN_RX_TIMEOUT_DEF_VALUE;
bb->broadcast_timeout = BROADCAST_RX_TIMEOUT_DEF_VALUE;
bb->rx_broadcast_in_ps = RX_BROADCAST_IN_PS_DEF_VALUE;
bb->ps_poll_threshold = CONSECUTIVE_PS_POLL_FAILURE_DEF;
ret = wl1251_cmd_configure(wl, ACX_BCN_DTIM_OPTIONS, bb, sizeof(*bb));
if (ret < 0) {
wl1251_warning("failed to set rx config: %d", ret);
goto out;
}
out:
kfree(bb);
return ret;
}
int wl1251_acx_aid(struct wl1251 *wl, u16 aid)
{
struct acx_aid *acx_aid;
int ret;
wl1251_debug(DEBUG_ACX, "acx aid");
acx_aid = kzalloc(sizeof(*acx_aid), GFP_KERNEL);
if (!acx_aid)
return -ENOMEM;
acx_aid->aid = aid;
ret = wl1251_cmd_configure(wl, ACX_AID, acx_aid, sizeof(*acx_aid));
if (ret < 0) {
wl1251_warning("failed to set aid: %d", ret);
goto out;
}
out:
kfree(acx_aid);
return ret;
}
int wl1251_acx_event_mbox_mask(struct wl1251 *wl, u32 event_mask)
{
struct acx_event_mask *mask;
int ret;
wl1251_debug(DEBUG_ACX, "acx event mbox mask");
mask = kzalloc(sizeof(*mask), GFP_KERNEL);
if (!mask)
return -ENOMEM;
/* high event mask is unused */
mask->high_event_mask = 0xffffffff;
mask->event_mask = event_mask;
ret = wl1251_cmd_configure(wl, ACX_EVENT_MBOX_MASK,
mask, sizeof(*mask));
if (ret < 0) {
wl1251_warning("failed to set acx_event_mbox_mask: %d", ret);
goto out;
}
out:
kfree(mask);
return ret;
}
int wl1251_acx_low_rssi(struct wl1251 *wl, s8 threshold, u8 weight,
u8 depth, enum wl1251_acx_low_rssi_type type)
{
struct acx_low_rssi *rssi;
int ret;
wl1251_debug(DEBUG_ACX, "acx low rssi");
rssi = kzalloc(sizeof(*rssi), GFP_KERNEL);
if (!rssi)
return -ENOMEM;
rssi->threshold = threshold;
rssi->weight = weight;
rssi->depth = depth;
rssi->type = type;
ret = wl1251_cmd_configure(wl, ACX_LOW_RSSI, rssi, sizeof(*rssi));
if (ret < 0)
wl1251_warning("failed to set low rssi threshold: %d", ret);
kfree(rssi);
return ret;
}
int wl1251_acx_set_preamble(struct wl1251 *wl, enum acx_preamble_type preamble)
{
struct acx_preamble *acx;
int ret;
wl1251_debug(DEBUG_ACX, "acx_set_preamble");
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
if (!acx)
return -ENOMEM;
acx->preamble = preamble;
ret = wl1251_cmd_configure(wl, ACX_PREAMBLE_TYPE, acx, sizeof(*acx));
if (ret < 0) {
wl1251_warning("Setting of preamble failed: %d", ret);
goto out;
}
out:
kfree(acx);
return ret;
}
int wl1251_acx_cts_protect(struct wl1251 *wl,
enum acx_ctsprotect_type ctsprotect)
{
struct acx_ctsprotect *acx;
int ret;
wl1251_debug(DEBUG_ACX, "acx_set_ctsprotect");
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
if (!acx)
return -ENOMEM;
acx->ctsprotect = ctsprotect;
ret = wl1251_cmd_configure(wl, ACX_CTS_PROTECTION, acx, sizeof(*acx));
if (ret < 0) {
wl1251_warning("Setting of ctsprotect failed: %d", ret);
goto out;
}
out:
kfree(acx);
return ret;
}
int wl1251_acx_tsf_info(struct wl1251 *wl, u64 *mactime)
{
struct acx_tsf_info *tsf_info;
int ret;
tsf_info = kzalloc(sizeof(*tsf_info), GFP_KERNEL);
if (!tsf_info)
return -ENOMEM;
ret = wl1251_cmd_interrogate(wl, ACX_TSF_INFO,
tsf_info, sizeof(*tsf_info));
if (ret < 0) {
wl1251_warning("ACX_FW_REV interrogate failed");
goto out;
}
*mactime = tsf_info->current_tsf_lsb |
((u64)tsf_info->current_tsf_msb << 32);
out:
kfree(tsf_info);
return ret;
}
int wl1251_acx_statistics(struct wl1251 *wl, struct acx_statistics *stats)
{
int ret;
wl1251_debug(DEBUG_ACX, "acx statistics");
ret = wl1251_cmd_interrogate(wl, ACX_STATISTICS, stats,
sizeof(*stats));
if (ret < 0) {
wl1251_warning("acx statistics failed: %d", ret);
return -ENOMEM;
}
return 0;
}
int wl1251_acx_rate_policies(struct wl1251 *wl)
{
struct acx_rate_policy *acx;
int ret = 0;
wl1251_debug(DEBUG_ACX, "acx rate policies");
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
if (!acx)
return -ENOMEM;
/* configure one default (one-size-fits-all) rate class */
acx->rate_class_cnt = 2;
acx->rate_class[0].enabled_rates = ACX_RATE_MASK_UNSPECIFIED;
acx->rate_class[0].short_retry_limit = ACX_RATE_RETRY_LIMIT;
acx->rate_class[0].long_retry_limit = ACX_RATE_RETRY_LIMIT;
acx->rate_class[0].aflags = 0;
/* no-retry rate class */
acx->rate_class[1].enabled_rates = ACX_RATE_MASK_UNSPECIFIED;
acx->rate_class[1].short_retry_limit = 0;
acx->rate_class[1].long_retry_limit = 0;
acx->rate_class[1].aflags = 0;
ret = wl1251_cmd_configure(wl, ACX_RATE_POLICY, acx, sizeof(*acx));
if (ret < 0) {
wl1251_warning("Setting of rate policies failed: %d", ret);
goto out;
}
out:
kfree(acx);
return ret;
}
int wl1251_acx_mem_cfg(struct wl1251 *wl)
{
struct wl1251_acx_config_memory *mem_conf;
int ret, i;
wl1251_debug(DEBUG_ACX, "acx mem cfg");
mem_conf = kzalloc(sizeof(*mem_conf), GFP_KERNEL);
if (!mem_conf)
return -ENOMEM;
/* memory config */
mem_conf->mem_config.num_stations = cpu_to_le16(DEFAULT_NUM_STATIONS);
mem_conf->mem_config.rx_mem_block_num = 35;
mem_conf->mem_config.tx_min_mem_block_num = 64;
mem_conf->mem_config.num_tx_queues = MAX_TX_QUEUES;
mem_conf->mem_config.host_if_options = HOSTIF_PKT_RING;
mem_conf->mem_config.num_ssid_profiles = 1;
mem_conf->mem_config.debug_buffer_size =
cpu_to_le16(TRACE_BUFFER_MAX_SIZE);
/* RX queue config */
mem_conf->rx_queue_config.dma_address = 0;
mem_conf->rx_queue_config.num_descs = ACX_RX_DESC_DEF;
mem_conf->rx_queue_config.priority = DEFAULT_RXQ_PRIORITY;
mem_conf->rx_queue_config.type = DEFAULT_RXQ_TYPE;
/* TX queue config */
for (i = 0; i < MAX_TX_QUEUES; i++) {
mem_conf->tx_queue_config[i].num_descs = ACX_TX_DESC_DEF;
mem_conf->tx_queue_config[i].attributes = i;
}
ret = wl1251_cmd_configure(wl, ACX_MEM_CFG, mem_conf,
sizeof(*mem_conf));
if (ret < 0) {
wl1251_warning("wl1251 mem config failed: %d", ret);
goto out;
}
out:
kfree(mem_conf);
return ret;
}
int wl1251_acx_wr_tbtt_and_dtim(struct wl1251 *wl, u16 tbtt, u8 dtim)
{
struct wl1251_acx_wr_tbtt_and_dtim *acx;
int ret;
wl1251_debug(DEBUG_ACX, "acx tbtt and dtim");
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
if (!acx)
return -ENOMEM;
acx->tbtt = tbtt;
acx->dtim = dtim;
ret = wl1251_cmd_configure(wl, ACX_WR_TBTT_AND_DTIM,
acx, sizeof(*acx));
if (ret < 0) {
wl1251_warning("failed to set tbtt and dtim: %d", ret);
goto out;
}
out:
kfree(acx);
return ret;
}
int wl1251_acx_bet_enable(struct wl1251 *wl, enum wl1251_acx_bet_mode mode,
u8 max_consecutive)
{
struct wl1251_acx_bet_enable *acx;
int ret;
wl1251_debug(DEBUG_ACX, "acx bet enable");
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
if (!acx)
return -ENOMEM;
acx->enable = mode;
acx->max_consecutive = max_consecutive;
ret = wl1251_cmd_configure(wl, ACX_BET_ENABLE, acx, sizeof(*acx));
if (ret < 0) {
wl1251_warning("wl1251 acx bet enable failed: %d", ret);
goto out;
}
out:
kfree(acx);
return ret;
}
int wl1251_acx_arp_ip_filter(struct wl1251 *wl, bool enable, __be32 address)
{
struct wl1251_acx_arp_filter *acx;
int ret;
wl1251_debug(DEBUG_ACX, "acx arp ip filter, enable: %d", enable);
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
if (!acx)
return -ENOMEM;
acx->version = ACX_IPV4_VERSION;
acx->enable = enable;
if (enable)
memcpy(acx->address, &address, ACX_IPV4_ADDR_SIZE);
ret = wl1251_cmd_configure(wl, ACX_ARP_IP_FILTER,
acx, sizeof(*acx));
if (ret < 0)
wl1251_warning("failed to set arp ip filter: %d", ret);
kfree(acx);
return ret;
}
int wl1251_acx_ac_cfg(struct wl1251 *wl, u8 ac, u8 cw_min, u16 cw_max,
u8 aifs, u16 txop)
{
struct wl1251_acx_ac_cfg *acx;
int ret = 0;
wl1251_debug(DEBUG_ACX, "acx ac cfg %d cw_ming %d cw_max %d "
"aifs %d txop %d", ac, cw_min, cw_max, aifs, txop);
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
if (!acx)
return -ENOMEM;
acx->ac = ac;
acx->cw_min = cw_min;
acx->cw_max = cw_max;
acx->aifsn = aifs;
acx->txop_limit = txop;
ret = wl1251_cmd_configure(wl, ACX_AC_CFG, acx, sizeof(*acx));
if (ret < 0) {
wl1251_warning("acx ac cfg failed: %d", ret);
goto out;
}
out:
kfree(acx);
return ret;
}
int wl1251_acx_tid_cfg(struct wl1251 *wl, u8 queue,
enum wl1251_acx_channel_type type,
u8 tsid, enum wl1251_acx_ps_scheme ps_scheme,
enum wl1251_acx_ack_policy ack_policy)
{
struct wl1251_acx_tid_cfg *acx;
int ret = 0;
wl1251_debug(DEBUG_ACX, "acx tid cfg %d type %d tsid %d "
"ps_scheme %d ack_policy %d", queue, type, tsid,
ps_scheme, ack_policy);
acx = kzalloc(sizeof(*acx), GFP_KERNEL);
if (!acx)
return -ENOMEM;
acx->queue = queue;
acx->type = type;
acx->tsid = tsid;
acx->ps_scheme = ps_scheme;
acx->ack_policy = ack_policy;
ret = wl1251_cmd_configure(wl, ACX_TID_CFG, acx, sizeof(*acx));
if (ret < 0) {
wl1251_warning("acx tid cfg failed: %d", ret);
goto out;
}
out:
kfree(acx);
return ret;
}
|
linux-master
|
drivers/net/wireless/ti/wl1251/acx.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* wl12xx SDIO routines
*
* Copyright (C) 2005 Texas Instruments Incorporated
* Copyright (C) 2008 Google Inc
* Copyright (C) 2009 Bob Copeland ([email protected])
*/
#include <linux/interrupt.h>
#include <linux/module.h>
#include <linux/mod_devicetable.h>
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/sdio_ids.h>
#include <linux/platform_device.h>
#include <linux/irq.h>
#include <linux/pm_runtime.h>
#include <linux/of.h>
#include <linux/of_irq.h>
#include "wl1251.h"
struct wl1251_sdio {
struct sdio_func *func;
u32 elp_val;
};
static struct sdio_func *wl_to_func(struct wl1251 *wl)
{
struct wl1251_sdio *wl_sdio = wl->if_priv;
return wl_sdio->func;
}
static void wl1251_sdio_interrupt(struct sdio_func *func)
{
struct wl1251 *wl = sdio_get_drvdata(func);
wl1251_debug(DEBUG_IRQ, "IRQ");
/* FIXME should be synchronous for sdio */
ieee80211_queue_work(wl->hw, &wl->irq_work);
}
static const struct sdio_device_id wl1251_devices[] = {
{ SDIO_DEVICE(SDIO_VENDOR_ID_TI_WL1251, SDIO_DEVICE_ID_TI_WL1251) },
{}
};
MODULE_DEVICE_TABLE(sdio, wl1251_devices);
static void wl1251_sdio_read(struct wl1251 *wl, int addr,
void *buf, size_t len)
{
int ret;
struct sdio_func *func = wl_to_func(wl);
sdio_claim_host(func);
ret = sdio_memcpy_fromio(func, buf, addr, len);
if (ret)
wl1251_error("sdio read failed (%d)", ret);
sdio_release_host(func);
}
static void wl1251_sdio_write(struct wl1251 *wl, int addr,
void *buf, size_t len)
{
int ret;
struct sdio_func *func = wl_to_func(wl);
sdio_claim_host(func);
ret = sdio_memcpy_toio(func, addr, buf, len);
if (ret)
wl1251_error("sdio write failed (%d)", ret);
sdio_release_host(func);
}
static void wl1251_sdio_read_elp(struct wl1251 *wl, int addr, u32 *val)
{
int ret = 0;
struct wl1251_sdio *wl_sdio = wl->if_priv;
struct sdio_func *func = wl_sdio->func;
/*
* The hardware only supports RAW (read after write) access for
* reading, regular sdio_readb won't work here (it interprets
* the unused bits of CMD52 as write data even if we send read
* request).
*/
sdio_claim_host(func);
*val = sdio_writeb_readb(func, wl_sdio->elp_val, addr, &ret);
sdio_release_host(func);
if (ret)
wl1251_error("sdio_readb failed (%d)", ret);
}
static void wl1251_sdio_write_elp(struct wl1251 *wl, int addr, u32 val)
{
int ret = 0;
struct wl1251_sdio *wl_sdio = wl->if_priv;
struct sdio_func *func = wl_sdio->func;
sdio_claim_host(func);
sdio_writeb(func, val, addr, &ret);
sdio_release_host(func);
if (ret)
wl1251_error("sdio_writeb failed (%d)", ret);
else
wl_sdio->elp_val = val;
}
static void wl1251_sdio_reset(struct wl1251 *wl)
{
}
static void wl1251_sdio_enable_irq(struct wl1251 *wl)
{
struct sdio_func *func = wl_to_func(wl);
sdio_claim_host(func);
sdio_claim_irq(func, wl1251_sdio_interrupt);
sdio_release_host(func);
}
static void wl1251_sdio_disable_irq(struct wl1251 *wl)
{
struct sdio_func *func = wl_to_func(wl);
sdio_claim_host(func);
sdio_release_irq(func);
sdio_release_host(func);
}
/* Interrupts when using dedicated WLAN_IRQ pin */
static irqreturn_t wl1251_line_irq(int irq, void *cookie)
{
struct wl1251 *wl = cookie;
ieee80211_queue_work(wl->hw, &wl->irq_work);
return IRQ_HANDLED;
}
static void wl1251_enable_line_irq(struct wl1251 *wl)
{
return enable_irq(wl->irq);
}
static void wl1251_disable_line_irq(struct wl1251 *wl)
{
return disable_irq(wl->irq);
}
static int wl1251_sdio_set_power(struct wl1251 *wl, bool enable)
{
struct sdio_func *func = wl_to_func(wl);
int ret;
if (enable) {
ret = pm_runtime_get_sync(&func->dev);
if (ret < 0) {
pm_runtime_put_sync(&func->dev);
goto out;
}
sdio_claim_host(func);
sdio_enable_func(func);
sdio_release_host(func);
} else {
sdio_claim_host(func);
sdio_disable_func(func);
sdio_release_host(func);
ret = pm_runtime_put_sync(&func->dev);
if (ret < 0)
goto out;
}
out:
return ret;
}
static struct wl1251_if_operations wl1251_sdio_ops = {
.read = wl1251_sdio_read,
.write = wl1251_sdio_write,
.write_elp = wl1251_sdio_write_elp,
.read_elp = wl1251_sdio_read_elp,
.reset = wl1251_sdio_reset,
.power = wl1251_sdio_set_power,
};
static int wl1251_sdio_probe(struct sdio_func *func,
const struct sdio_device_id *id)
{
int ret;
struct wl1251 *wl;
struct ieee80211_hw *hw;
struct wl1251_sdio *wl_sdio;
struct device_node *np = func->dev.of_node;
hw = wl1251_alloc_hw();
if (IS_ERR(hw))
return PTR_ERR(hw);
wl = hw->priv;
wl_sdio = kzalloc(sizeof(*wl_sdio), GFP_KERNEL);
if (wl_sdio == NULL) {
ret = -ENOMEM;
goto out_free_hw;
}
sdio_claim_host(func);
ret = sdio_enable_func(func);
if (ret)
goto release;
sdio_set_block_size(func, 512);
sdio_release_host(func);
SET_IEEE80211_DEV(hw, &func->dev);
wl_sdio->func = func;
wl->if_priv = wl_sdio;
wl->if_ops = &wl1251_sdio_ops;
if (np) {
wl->use_eeprom = of_property_read_bool(np, "ti,wl1251-has-eeprom");
wl->irq = of_irq_get(np, 0);
if (wl->irq == -EPROBE_DEFER) {
ret = -EPROBE_DEFER;
goto disable;
}
}
if (wl->irq) {
irq_set_status_flags(wl->irq, IRQ_NOAUTOEN);
ret = request_irq(wl->irq, wl1251_line_irq, 0, "wl1251", wl);
if (ret < 0) {
wl1251_error("request_irq() failed: %d", ret);
goto disable;
}
irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
wl1251_info("using dedicated interrupt line");
} else {
wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
wl1251_info("using SDIO interrupt");
}
ret = wl1251_init_ieee80211(wl);
if (ret)
goto out_free_irq;
sdio_set_drvdata(func, wl);
/* Tell PM core that we don't need the card to be powered now */
pm_runtime_put_noidle(&func->dev);
return ret;
out_free_irq:
if (wl->irq)
free_irq(wl->irq, wl);
disable:
sdio_claim_host(func);
sdio_disable_func(func);
release:
sdio_release_host(func);
kfree(wl_sdio);
out_free_hw:
wl1251_free_hw(wl);
return ret;
}
static void wl1251_sdio_remove(struct sdio_func *func)
{
struct wl1251 *wl = sdio_get_drvdata(func);
struct wl1251_sdio *wl_sdio = wl->if_priv;
/* Undo decrement done above in wl1251_probe */
pm_runtime_get_noresume(&func->dev);
if (wl->irq)
free_irq(wl->irq, wl);
wl1251_free_hw(wl);
kfree(wl_sdio);
sdio_claim_host(func);
sdio_release_irq(func);
sdio_disable_func(func);
sdio_release_host(func);
}
static int wl1251_suspend(struct device *dev)
{
/*
* Tell MMC/SDIO core it's OK to power down the card
* (if it isn't already), but not to remove it completely.
*/
return 0;
}
static int wl1251_resume(struct device *dev)
{
return 0;
}
static const struct dev_pm_ops wl1251_sdio_pm_ops = {
.suspend = wl1251_suspend,
.resume = wl1251_resume,
};
static struct sdio_driver wl1251_sdio_driver = {
.name = "wl1251_sdio",
.id_table = wl1251_devices,
.probe = wl1251_sdio_probe,
.remove = wl1251_sdio_remove,
.drv.pm = &wl1251_sdio_pm_ops,
};
static int __init wl1251_sdio_init(void)
{
int err;
err = sdio_register_driver(&wl1251_sdio_driver);
if (err)
wl1251_error("failed to register sdio driver: %d", err);
return err;
}
static void __exit wl1251_sdio_exit(void)
{
sdio_unregister_driver(&wl1251_sdio_driver);
wl1251_notice("unloaded");
}
module_init(wl1251_sdio_init);
module_exit(wl1251_sdio_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Kalle Valo <[email protected]>");
|
linux-master
|
drivers/net/wireless/ti/wl1251/sdio.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* This file is part of wl1251
*
* Copyright (c) 1998-2007 Texas Instruments Incorporated
* Copyright (C) 2008 Nokia Corporation
*/
#include <linux/skbuff.h>
#include <linux/gfp.h>
#include <net/mac80211.h>
#include "wl1251.h"
#include "reg.h"
#include "io.h"
#include "rx.h"
#include "cmd.h"
#include "acx.h"
static void wl1251_rx_header(struct wl1251 *wl,
struct wl1251_rx_descriptor *desc)
{
u32 rx_packet_ring_addr;
rx_packet_ring_addr = wl->data_path->rx_packet_ring_addr;
if (wl->rx_current_buffer)
rx_packet_ring_addr += wl->data_path->rx_packet_ring_chunk_size;
wl1251_mem_read(wl, rx_packet_ring_addr, desc, sizeof(*desc));
}
static void wl1251_rx_status(struct wl1251 *wl,
struct wl1251_rx_descriptor *desc,
struct ieee80211_rx_status *status,
u8 beacon)
{
u64 mactime;
int ret;
memset(status, 0, sizeof(struct ieee80211_rx_status));
status->band = NL80211_BAND_2GHZ;
status->mactime = desc->timestamp;
/*
* The rx status timestamp is a 32 bits value while the TSF is a
* 64 bits one.
* For IBSS merging, TSF is mandatory, so we have to get it
* somehow, so we ask for ACX_TSF_INFO.
* That could be moved to the get_tsf() hook, but unfortunately,
* this one must be atomic, while our SPI routines can sleep.
*/
if ((wl->bss_type == BSS_TYPE_IBSS) && beacon) {
ret = wl1251_acx_tsf_info(wl, &mactime);
if (ret == 0)
status->mactime = mactime;
}
status->signal = desc->rssi;
/*
* FIXME: guessing that snr needs to be divided by two, otherwise
* the values don't make any sense
*/
wl->noise = desc->rssi - desc->snr / 2;
status->freq = ieee80211_channel_to_frequency(desc->channel,
status->band);
status->flag |= RX_FLAG_MACTIME_START;
if (!wl->monitor_present && (desc->flags & RX_DESC_ENCRYPTION_MASK)) {
status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED;
if (likely(!(desc->flags & RX_DESC_DECRYPT_FAIL)))
status->flag |= RX_FLAG_DECRYPTED;
if (unlikely(desc->flags & RX_DESC_MIC_FAIL))
status->flag |= RX_FLAG_MMIC_ERROR;
}
if (unlikely(!(desc->flags & RX_DESC_VALID_FCS)))
status->flag |= RX_FLAG_FAILED_FCS_CRC;
switch (desc->rate) {
/* skip 1 and 12 Mbps because they have same value 0x0a */
case RATE_2MBPS:
status->rate_idx = 1;
break;
case RATE_5_5MBPS:
status->rate_idx = 2;
break;
case RATE_11MBPS:
status->rate_idx = 3;
break;
case RATE_6MBPS:
status->rate_idx = 4;
break;
case RATE_9MBPS:
status->rate_idx = 5;
break;
case RATE_18MBPS:
status->rate_idx = 7;
break;
case RATE_24MBPS:
status->rate_idx = 8;
break;
case RATE_36MBPS:
status->rate_idx = 9;
break;
case RATE_48MBPS:
status->rate_idx = 10;
break;
case RATE_54MBPS:
status->rate_idx = 11;
break;
}
/* for 1 and 12 Mbps we have to check the modulation */
if (desc->rate == RATE_1MBPS) {
if (!(desc->mod_pre & OFDM_RATE_BIT))
/* CCK -> RATE_1MBPS */
status->rate_idx = 0;
else
/* OFDM -> RATE_12MBPS */
status->rate_idx = 6;
}
if (desc->mod_pre & SHORT_PREAMBLE_BIT)
status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
}
static void wl1251_rx_body(struct wl1251 *wl,
struct wl1251_rx_descriptor *desc)
{
struct sk_buff *skb;
struct ieee80211_rx_status status;
u8 *rx_buffer, beacon = 0;
u16 length, *fc;
u32 curr_id, last_id_inc, rx_packet_ring_addr;
length = WL1251_RX_ALIGN(desc->length - PLCP_HEADER_LENGTH);
curr_id = (desc->flags & RX_DESC_SEQNUM_MASK) >> RX_DESC_PACKETID_SHIFT;
last_id_inc = (wl->rx_last_id + 1) % (RX_MAX_PACKET_ID + 1);
if (last_id_inc != curr_id) {
wl1251_warning("curr ID:%d, last ID inc:%d",
curr_id, last_id_inc);
wl->rx_last_id = curr_id;
} else {
wl->rx_last_id = last_id_inc;
}
rx_packet_ring_addr = wl->data_path->rx_packet_ring_addr +
sizeof(struct wl1251_rx_descriptor) + 20;
if (wl->rx_current_buffer)
rx_packet_ring_addr += wl->data_path->rx_packet_ring_chunk_size;
skb = __dev_alloc_skb(length, GFP_KERNEL);
if (!skb) {
wl1251_error("Couldn't allocate RX frame");
return;
}
rx_buffer = skb_put(skb, length);
wl1251_mem_read(wl, rx_packet_ring_addr, rx_buffer, length);
/* The actual length doesn't include the target's alignment */
skb_trim(skb, desc->length - PLCP_HEADER_LENGTH);
fc = (u16 *)skb->data;
if ((*fc & IEEE80211_FCTL_STYPE) == IEEE80211_STYPE_BEACON)
beacon = 1;
wl1251_rx_status(wl, desc, &status, beacon);
wl1251_debug(DEBUG_RX, "rx skb 0x%p: %d B %s", skb, skb->len,
beacon ? "beacon" : "");
memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
ieee80211_rx_ni(wl->hw, skb);
}
static void wl1251_rx_ack(struct wl1251 *wl)
{
u32 data, addr;
if (wl->rx_current_buffer) {
addr = ACX_REG_INTERRUPT_TRIG_H;
data = INTR_TRIG_RX_PROC1;
} else {
addr = ACX_REG_INTERRUPT_TRIG;
data = INTR_TRIG_RX_PROC0;
}
wl1251_reg_write32(wl, addr, data);
/* Toggle buffer ring */
wl->rx_current_buffer = !wl->rx_current_buffer;
}
void wl1251_rx(struct wl1251 *wl)
{
struct wl1251_rx_descriptor *rx_desc;
if (wl->state != WL1251_STATE_ON)
return;
rx_desc = wl->rx_descriptor;
/* We first read the frame's header */
wl1251_rx_header(wl, rx_desc);
/* Now we can read the body */
wl1251_rx_body(wl, rx_desc);
/* Finally, we need to ACK the RX */
wl1251_rx_ack(wl);
}
|
linux-master
|
drivers/net/wireless/ti/wl1251/rx.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* This file is part of wl1251
*
* Copyright (C) 2008 Nokia Corporation
*/
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/swab.h>
#include <linux/crc7.h>
#include <linux/spi/spi.h>
#include <linux/gpio/consumer.h>
#include <linux/of.h>
#include <linux/regulator/consumer.h>
#include "wl1251.h"
#include "reg.h"
#include "spi.h"
struct wl1251_spi {
struct spi_device *spi;
struct gpio_desc *power_gpio;
};
static irqreturn_t wl1251_irq(int irq, void *cookie)
{
struct wl1251 *wl;
wl1251_debug(DEBUG_IRQ, "IRQ");
wl = cookie;
ieee80211_queue_work(wl->hw, &wl->irq_work);
return IRQ_HANDLED;
}
static void wl1251_spi_reset(struct wl1251 *wl)
{
struct wl1251_spi *wl_spi = wl->if_priv;
u8 *cmd;
struct spi_transfer t;
struct spi_message m;
cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL);
if (!cmd) {
wl1251_error("could not allocate cmd for spi reset");
return;
}
memset(&t, 0, sizeof(t));
spi_message_init(&m);
memset(cmd, 0xff, WSPI_INIT_CMD_LEN);
t.tx_buf = cmd;
t.len = WSPI_INIT_CMD_LEN;
spi_message_add_tail(&t, &m);
spi_sync(wl_spi->spi, &m);
wl1251_dump(DEBUG_SPI, "spi reset -> ", cmd, WSPI_INIT_CMD_LEN);
kfree(cmd);
}
static void wl1251_spi_wake(struct wl1251 *wl)
{
struct wl1251_spi *wl_spi = wl->if_priv;
struct spi_transfer t;
struct spi_message m;
u8 *cmd = kzalloc(WSPI_INIT_CMD_LEN, GFP_KERNEL);
if (!cmd) {
wl1251_error("could not allocate cmd for spi init");
return;
}
memset(&t, 0, sizeof(t));
spi_message_init(&m);
/* Set WSPI_INIT_COMMAND
* the data is being send from the MSB to LSB
*/
cmd[0] = 0xff;
cmd[1] = 0xff;
cmd[2] = WSPI_INIT_CMD_START | WSPI_INIT_CMD_TX;
cmd[3] = 0;
cmd[4] = 0;
cmd[5] = HW_ACCESS_WSPI_INIT_CMD_MASK << 3;
cmd[5] |= HW_ACCESS_WSPI_FIXED_BUSY_LEN & WSPI_INIT_CMD_FIXEDBUSY_LEN;
cmd[6] = WSPI_INIT_CMD_IOD | WSPI_INIT_CMD_IP | WSPI_INIT_CMD_CS
| WSPI_INIT_CMD_WSPI | WSPI_INIT_CMD_WS;
if (HW_ACCESS_WSPI_FIXED_BUSY_LEN == 0)
cmd[6] |= WSPI_INIT_CMD_DIS_FIXEDBUSY;
else
cmd[6] |= WSPI_INIT_CMD_EN_FIXEDBUSY;
cmd[7] = crc7_be(0, cmd+2, WSPI_INIT_CMD_CRC_LEN) | WSPI_INIT_CMD_END;
/*
* The above is the logical order; it must actually be stored
* in the buffer byte-swapped.
*/
__swab32s((u32 *)cmd);
__swab32s((u32 *)cmd+1);
t.tx_buf = cmd;
t.len = WSPI_INIT_CMD_LEN;
spi_message_add_tail(&t, &m);
spi_sync(wl_spi->spi, &m);
wl1251_dump(DEBUG_SPI, "spi init -> ", cmd, WSPI_INIT_CMD_LEN);
kfree(cmd);
}
static void wl1251_spi_reset_wake(struct wl1251 *wl)
{
wl1251_spi_reset(wl);
wl1251_spi_wake(wl);
}
static void wl1251_spi_read(struct wl1251 *wl, int addr, void *buf,
size_t len)
{
struct wl1251_spi *wl_spi = wl->if_priv;
struct spi_transfer t[3];
struct spi_message m;
u8 *busy_buf;
u32 *cmd;
cmd = &wl->buffer_cmd;
busy_buf = wl->buffer_busyword;
*cmd = 0;
*cmd |= WSPI_CMD_READ;
*cmd |= (len << WSPI_CMD_BYTE_LENGTH_OFFSET) & WSPI_CMD_BYTE_LENGTH;
*cmd |= addr & WSPI_CMD_BYTE_ADDR;
spi_message_init(&m);
memset(t, 0, sizeof(t));
t[0].tx_buf = cmd;
t[0].len = 4;
spi_message_add_tail(&t[0], &m);
/* Busy and non busy words read */
t[1].rx_buf = busy_buf;
t[1].len = WL1251_BUSY_WORD_LEN;
spi_message_add_tail(&t[1], &m);
t[2].rx_buf = buf;
t[2].len = len;
spi_message_add_tail(&t[2], &m);
spi_sync(wl_spi->spi, &m);
/* FIXME: check busy words */
wl1251_dump(DEBUG_SPI, "spi_read cmd -> ", cmd, sizeof(*cmd));
wl1251_dump(DEBUG_SPI, "spi_read buf <- ", buf, len);
}
static void wl1251_spi_write(struct wl1251 *wl, int addr, void *buf,
size_t len)
{
struct wl1251_spi *wl_spi = wl->if_priv;
struct spi_transfer t[2];
struct spi_message m;
u32 *cmd;
cmd = &wl->buffer_cmd;
*cmd = 0;
*cmd |= WSPI_CMD_WRITE;
*cmd |= (len << WSPI_CMD_BYTE_LENGTH_OFFSET) & WSPI_CMD_BYTE_LENGTH;
*cmd |= addr & WSPI_CMD_BYTE_ADDR;
spi_message_init(&m);
memset(t, 0, sizeof(t));
t[0].tx_buf = cmd;
t[0].len = sizeof(*cmd);
spi_message_add_tail(&t[0], &m);
t[1].tx_buf = buf;
t[1].len = len;
spi_message_add_tail(&t[1], &m);
spi_sync(wl_spi->spi, &m);
wl1251_dump(DEBUG_SPI, "spi_write cmd -> ", cmd, sizeof(*cmd));
wl1251_dump(DEBUG_SPI, "spi_write buf -> ", buf, len);
}
static void wl1251_spi_enable_irq(struct wl1251 *wl)
{
return enable_irq(wl->irq);
}
static void wl1251_spi_disable_irq(struct wl1251 *wl)
{
return disable_irq(wl->irq);
}
static int wl1251_spi_set_power(struct wl1251 *wl, bool enable)
{
struct wl1251_spi *wl_spi = wl->if_priv;
if (wl_spi->power_gpio)
gpiod_set_value_cansleep(wl_spi->power_gpio, enable);
return 0;
}
static const struct wl1251_if_operations wl1251_spi_ops = {
.read = wl1251_spi_read,
.write = wl1251_spi_write,
.reset = wl1251_spi_reset_wake,
.enable_irq = wl1251_spi_enable_irq,
.disable_irq = wl1251_spi_disable_irq,
.power = wl1251_spi_set_power,
};
static int wl1251_spi_probe(struct spi_device *spi)
{
struct device_node *np = spi->dev.of_node;
struct ieee80211_hw *hw;
struct wl1251_spi *wl_spi;
struct wl1251 *wl;
int ret;
if (!np)
return -ENODEV;
wl_spi = devm_kzalloc(&spi->dev, sizeof(*wl_spi), GFP_KERNEL);
if (!wl_spi)
return -ENOMEM;
wl_spi->spi = spi;
hw = wl1251_alloc_hw();
if (IS_ERR(hw))
return PTR_ERR(hw);
wl = hw->priv;
SET_IEEE80211_DEV(hw, &spi->dev);
spi_set_drvdata(spi, wl);
wl->if_priv = wl_spi;
wl->if_ops = &wl1251_spi_ops;
/* This is the only SPI value that we need to set here, the rest
* comes from the board-peripherals file
*/
spi->bits_per_word = 32;
ret = spi_setup(spi);
if (ret < 0) {
wl1251_error("spi_setup failed");
goto out_free;
}
wl->use_eeprom = of_property_read_bool(np, "ti,wl1251-has-eeprom");
wl_spi->power_gpio = devm_gpiod_get_optional(&spi->dev, "ti,power",
GPIOD_OUT_LOW);
ret = PTR_ERR_OR_ZERO(wl_spi->power_gpio);
if (ret) {
if (ret != -EPROBE_DEFER)
wl1251_error("Failed to request gpio: %d\n", ret);
goto out_free;
}
gpiod_set_consumer_name(wl_spi->power_gpio, "wl1251 power");
wl->irq = spi->irq;
if (wl->irq < 0) {
wl1251_error("irq missing in platform data");
ret = -ENODEV;
goto out_free;
}
irq_set_status_flags(wl->irq, IRQ_NOAUTOEN);
ret = devm_request_irq(&spi->dev, wl->irq, wl1251_irq, 0,
DRIVER_NAME, wl);
if (ret < 0) {
wl1251_error("request_irq() failed: %d", ret);
goto out_free;
}
irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
wl->vio = devm_regulator_get(&spi->dev, "vio");
if (IS_ERR(wl->vio)) {
ret = PTR_ERR(wl->vio);
wl1251_error("vio regulator missing: %d", ret);
goto out_free;
}
ret = regulator_enable(wl->vio);
if (ret)
goto out_free;
ret = wl1251_init_ieee80211(wl);
if (ret)
goto disable_regulator;
return 0;
disable_regulator:
regulator_disable(wl->vio);
out_free:
ieee80211_free_hw(hw);
return ret;
}
static void wl1251_spi_remove(struct spi_device *spi)
{
struct wl1251 *wl = spi_get_drvdata(spi);
wl1251_free_hw(wl);
regulator_disable(wl->vio);
}
static struct spi_driver wl1251_spi_driver = {
.driver = {
.name = DRIVER_NAME,
},
.probe = wl1251_spi_probe,
.remove = wl1251_spi_remove,
};
module_spi_driver(wl1251_spi_driver);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Kalle Valo <[email protected]>");
MODULE_ALIAS("spi:wl1251");
|
linux-master
|
drivers/net/wireless/ti/wl1251/spi.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* This file is part of wl1251
*
* Copyright (C) 2008 Nokia Corporation
*/
#include "reg.h"
#include "ps.h"
#include "cmd.h"
#include "io.h"
/* in ms */
#define WL1251_WAKEUP_TIMEOUT 100
void wl1251_elp_work(struct work_struct *work)
{
struct delayed_work *dwork;
struct wl1251 *wl;
dwork = to_delayed_work(work);
wl = container_of(dwork, struct wl1251, elp_work);
wl1251_debug(DEBUG_PSM, "elp work");
mutex_lock(&wl->mutex);
if (wl->elp || wl->station_mode == STATION_ACTIVE_MODE)
goto out;
wl1251_debug(DEBUG_PSM, "chip to elp");
wl1251_write_elp(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_SLEEP);
wl->elp = true;
out:
mutex_unlock(&wl->mutex);
}
#define ELP_ENTRY_DELAY 5
/* Routines to toggle sleep mode while in ELP */
void wl1251_ps_elp_sleep(struct wl1251 *wl)
{
unsigned long delay;
if (wl->station_mode != STATION_ACTIVE_MODE) {
delay = msecs_to_jiffies(ELP_ENTRY_DELAY);
ieee80211_queue_delayed_work(wl->hw, &wl->elp_work, delay);
}
}
int wl1251_ps_elp_wakeup(struct wl1251 *wl)
{
unsigned long timeout, start;
u32 elp_reg;
cancel_delayed_work(&wl->elp_work);
if (!wl->elp)
return 0;
wl1251_debug(DEBUG_PSM, "waking up chip from elp");
start = jiffies;
timeout = jiffies + msecs_to_jiffies(WL1251_WAKEUP_TIMEOUT);
wl1251_write_elp(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_WAKE_UP);
elp_reg = wl1251_read_elp(wl, HW_ACCESS_ELP_CTRL_REG_ADDR);
/*
* FIXME: we should wait for irq from chip but, as a temporary
* solution to simplify locking, let's poll instead
*/
while (!(elp_reg & ELPCTRL_WLAN_READY)) {
if (time_after(jiffies, timeout)) {
wl1251_error("elp wakeup timeout");
return -ETIMEDOUT;
}
msleep(1);
elp_reg = wl1251_read_elp(wl, HW_ACCESS_ELP_CTRL_REG_ADDR);
}
wl1251_debug(DEBUG_PSM, "wakeup time: %u ms",
jiffies_to_msecs(jiffies - start));
wl->elp = false;
return 0;
}
int wl1251_ps_set_mode(struct wl1251 *wl, enum wl1251_station_mode mode)
{
int ret;
switch (mode) {
case STATION_POWER_SAVE_MODE:
wl1251_debug(DEBUG_PSM, "entering psm");
/* enable beacon filtering */
ret = wl1251_acx_beacon_filter_opt(wl, true);
if (ret < 0)
return ret;
ret = wl1251_acx_wake_up_conditions(wl,
WAKE_UP_EVENT_DTIM_BITMAP,
wl->listen_int);
if (ret < 0)
return ret;
ret = wl1251_acx_bet_enable(wl, WL1251_ACX_BET_ENABLE,
WL1251_DEFAULT_BET_CONSECUTIVE);
if (ret < 0)
return ret;
ret = wl1251_cmd_ps_mode(wl, CHIP_POWER_SAVE_MODE);
if (ret < 0)
return ret;
ret = wl1251_acx_sleep_auth(wl, WL1251_PSM_ELP);
if (ret < 0)
return ret;
break;
case STATION_IDLE:
wl1251_debug(DEBUG_PSM, "entering idle");
ret = wl1251_acx_sleep_auth(wl, WL1251_PSM_ELP);
if (ret < 0)
return ret;
ret = wl1251_cmd_template_set(wl, CMD_DISCONNECT, NULL, 0);
if (ret < 0)
return ret;
break;
case STATION_ACTIVE_MODE:
default:
wl1251_debug(DEBUG_PSM, "leaving psm");
ret = wl1251_acx_sleep_auth(wl, WL1251_PSM_CAM);
if (ret < 0)
return ret;
/* disable BET */
ret = wl1251_acx_bet_enable(wl, WL1251_ACX_BET_DISABLE,
WL1251_DEFAULT_BET_CONSECUTIVE);
if (ret < 0)
return ret;
/* disable beacon filtering */
ret = wl1251_acx_beacon_filter_opt(wl, false);
if (ret < 0)
return ret;
ret = wl1251_acx_wake_up_conditions(wl,
WAKE_UP_EVENT_DTIM_BITMAP,
wl->listen_int);
if (ret < 0)
return ret;
ret = wl1251_cmd_ps_mode(wl, CHIP_ACTIVE_MODE);
if (ret < 0)
return ret;
break;
}
wl->station_mode = mode;
return ret;
}
|
linux-master
|
drivers/net/wireless/ti/wl1251/ps.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* This file is part of wl12xx
*
* Copyright (C) 2008 Nokia Corporation
*/
#include "wl1251.h"
#include "reg.h"
#include "io.h"
/* FIXME: this is static data nowadays and the table can be removed */
static enum wl12xx_acx_int_reg wl1251_io_reg_table[ACX_REG_TABLE_LEN] = {
[ACX_REG_INTERRUPT_TRIG] = (REGISTERS_BASE + 0x0474),
[ACX_REG_INTERRUPT_TRIG_H] = (REGISTERS_BASE + 0x0478),
[ACX_REG_INTERRUPT_MASK] = (REGISTERS_BASE + 0x0494),
[ACX_REG_HINT_MASK_SET] = (REGISTERS_BASE + 0x0498),
[ACX_REG_HINT_MASK_CLR] = (REGISTERS_BASE + 0x049C),
[ACX_REG_INTERRUPT_NO_CLEAR] = (REGISTERS_BASE + 0x04B0),
[ACX_REG_INTERRUPT_CLEAR] = (REGISTERS_BASE + 0x04A4),
[ACX_REG_INTERRUPT_ACK] = (REGISTERS_BASE + 0x04A8),
[ACX_REG_SLV_SOFT_RESET] = (REGISTERS_BASE + 0x0000),
[ACX_REG_EE_START] = (REGISTERS_BASE + 0x080C),
[ACX_REG_ECPU_CONTROL] = (REGISTERS_BASE + 0x0804)
};
static int wl1251_translate_reg_addr(struct wl1251 *wl, int addr)
{
/* If the address is lower than REGISTERS_BASE, it means that this is
* a chip-specific register address, so look it up in the registers
* table */
if (addr < REGISTERS_BASE) {
/* Make sure we don't go over the table */
if (addr >= ACX_REG_TABLE_LEN) {
wl1251_error("address out of range (%d)", addr);
return -EINVAL;
}
addr = wl1251_io_reg_table[addr];
}
return addr - wl->physical_reg_addr + wl->virtual_reg_addr;
}
static int wl1251_translate_mem_addr(struct wl1251 *wl, int addr)
{
return addr - wl->physical_mem_addr + wl->virtual_mem_addr;
}
void wl1251_mem_read(struct wl1251 *wl, int addr, void *buf, size_t len)
{
int physical;
physical = wl1251_translate_mem_addr(wl, addr);
wl->if_ops->read(wl, physical, buf, len);
}
void wl1251_mem_write(struct wl1251 *wl, int addr, void *buf, size_t len)
{
int physical;
physical = wl1251_translate_mem_addr(wl, addr);
wl->if_ops->write(wl, physical, buf, len);
}
u32 wl1251_mem_read32(struct wl1251 *wl, int addr)
{
return wl1251_read32(wl, wl1251_translate_mem_addr(wl, addr));
}
void wl1251_mem_write32(struct wl1251 *wl, int addr, u32 val)
{
wl1251_write32(wl, wl1251_translate_mem_addr(wl, addr), val);
}
u32 wl1251_reg_read32(struct wl1251 *wl, int addr)
{
return wl1251_read32(wl, wl1251_translate_reg_addr(wl, addr));
}
void wl1251_reg_write32(struct wl1251 *wl, int addr, u32 val)
{
wl1251_write32(wl, wl1251_translate_reg_addr(wl, addr), val);
}
/* Set the partitions to access the chip addresses.
*
* There are two VIRTUAL partitions (the memory partition and the
* registers partition), which are mapped to two different areas of the
* PHYSICAL (hardware) memory. This function also makes other checks to
* ensure that the partitions are not overlapping. In the diagram below, the
* memory partition comes before the register partition, but the opposite is
* also supported.
*
* PHYSICAL address
* space
*
* | |
* ...+----+--> mem_start
* VIRTUAL address ... | |
* space ... | | [PART_0]
* ... | |
* 0x00000000 <--+----+... ...+----+--> mem_start + mem_size
* | | ... | |
* |MEM | ... | |
* | | ... | |
* part_size <--+----+... | | {unused area)
* | | ... | |
* |REG | ... | |
* part_size | | ... | |
* + <--+----+... ...+----+--> reg_start
* reg_size ... | |
* ... | | [PART_1]
* ... | |
* ...+----+--> reg_start + reg_size
* | |
*
*/
void wl1251_set_partition(struct wl1251 *wl,
u32 mem_start, u32 mem_size,
u32 reg_start, u32 reg_size)
{
struct wl1251_partition_set *partition;
partition = kmalloc(sizeof(*partition), GFP_KERNEL);
if (!partition) {
wl1251_error("can not allocate partition buffer");
return;
}
wl1251_debug(DEBUG_SPI, "mem_start %08X mem_size %08X",
mem_start, mem_size);
wl1251_debug(DEBUG_SPI, "reg_start %08X reg_size %08X",
reg_start, reg_size);
/* Make sure that the two partitions together don't exceed the
* address range */
if ((mem_size + reg_size) > HW_ACCESS_MEMORY_MAX_RANGE) {
wl1251_debug(DEBUG_SPI, "Total size exceeds maximum virtual"
" address range. Truncating partition[0].");
mem_size = HW_ACCESS_MEMORY_MAX_RANGE - reg_size;
wl1251_debug(DEBUG_SPI, "mem_start %08X mem_size %08X",
mem_start, mem_size);
wl1251_debug(DEBUG_SPI, "reg_start %08X reg_size %08X",
reg_start, reg_size);
}
if ((mem_start < reg_start) &&
((mem_start + mem_size) > reg_start)) {
/* Guarantee that the memory partition doesn't overlap the
* registers partition */
wl1251_debug(DEBUG_SPI, "End of partition[0] is "
"overlapping partition[1]. Adjusted.");
mem_size = reg_start - mem_start;
wl1251_debug(DEBUG_SPI, "mem_start %08X mem_size %08X",
mem_start, mem_size);
wl1251_debug(DEBUG_SPI, "reg_start %08X reg_size %08X",
reg_start, reg_size);
} else if ((reg_start < mem_start) &&
((reg_start + reg_size) > mem_start)) {
/* Guarantee that the register partition doesn't overlap the
* memory partition */
wl1251_debug(DEBUG_SPI, "End of partition[1] is"
" overlapping partition[0]. Adjusted.");
reg_size = mem_start - reg_start;
wl1251_debug(DEBUG_SPI, "mem_start %08X mem_size %08X",
mem_start, mem_size);
wl1251_debug(DEBUG_SPI, "reg_start %08X reg_size %08X",
reg_start, reg_size);
}
partition->mem.start = mem_start;
partition->mem.size = mem_size;
partition->reg.start = reg_start;
partition->reg.size = reg_size;
wl->physical_mem_addr = mem_start;
wl->physical_reg_addr = reg_start;
wl->virtual_mem_addr = 0;
wl->virtual_reg_addr = mem_size;
wl->if_ops->write(wl, HW_ACCESS_PART0_SIZE_ADDR, partition,
sizeof(*partition));
kfree(partition);
}
|
linux-master
|
drivers/net/wireless/ti/wl1251/io.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* This file is part of wl1251
*
* Copyright (C) 2009 Nokia Corporation
*/
#include "debugfs.h"
#include <linux/skbuff.h>
#include <linux/slab.h>
#include "wl1251.h"
#include "acx.h"
#include "ps.h"
/* ms */
#define WL1251_DEBUGFS_STATS_LIFETIME 1000
/* debugfs macros idea from mac80211 */
#define DEBUGFS_READONLY_FILE(name, buflen, fmt, value...) \
static ssize_t name## _read(struct file *file, char __user *userbuf, \
size_t count, loff_t *ppos) \
{ \
struct wl1251 *wl = file->private_data; \
char buf[buflen]; \
int res; \
\
res = scnprintf(buf, buflen, fmt "\n", ##value); \
return simple_read_from_buffer(userbuf, count, ppos, buf, res); \
} \
\
static const struct file_operations name## _ops = { \
.read = name## _read, \
.open = simple_open, \
.llseek = generic_file_llseek, \
};
#define DEBUGFS_ADD(name, parent) \
wl->debugfs.name = debugfs_create_file(#name, 0400, parent, \
wl, &name## _ops) \
#define DEBUGFS_DEL(name) \
do { \
debugfs_remove(wl->debugfs.name); \
wl->debugfs.name = NULL; \
} while (0)
#define DEBUGFS_FWSTATS_FILE(sub, name, buflen, fmt) \
static ssize_t sub## _ ##name## _read(struct file *file, \
char __user *userbuf, \
size_t count, loff_t *ppos) \
{ \
struct wl1251 *wl = file->private_data; \
char buf[buflen]; \
int res; \
\
wl1251_debugfs_update_stats(wl); \
\
res = scnprintf(buf, buflen, fmt "\n", \
wl->stats.fw_stats->sub.name); \
return simple_read_from_buffer(userbuf, count, ppos, buf, res); \
} \
\
static const struct file_operations sub## _ ##name## _ops = { \
.read = sub## _ ##name## _read, \
.open = simple_open, \
.llseek = generic_file_llseek, \
};
#define DEBUGFS_FWSTATS_ADD(sub, name) \
DEBUGFS_ADD(sub## _ ##name, wl->debugfs.fw_statistics)
#define DEBUGFS_FWSTATS_DEL(sub, name) \
DEBUGFS_DEL(sub## _ ##name)
static void wl1251_debugfs_update_stats(struct wl1251 *wl)
{
int ret;
mutex_lock(&wl->mutex);
ret = wl1251_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
if (wl->state == WL1251_STATE_ON &&
time_after(jiffies, wl->stats.fw_stats_update +
msecs_to_jiffies(WL1251_DEBUGFS_STATS_LIFETIME))) {
wl1251_acx_statistics(wl, wl->stats.fw_stats);
wl->stats.fw_stats_update = jiffies;
}
wl1251_ps_elp_sleep(wl);
out:
mutex_unlock(&wl->mutex);
}
DEBUGFS_FWSTATS_FILE(tx, internal_desc_overflow, 20, "%u");
DEBUGFS_FWSTATS_FILE(rx, out_of_mem, 20, "%u");
DEBUGFS_FWSTATS_FILE(rx, hdr_overflow, 20, "%u");
DEBUGFS_FWSTATS_FILE(rx, hw_stuck, 20, "%u");
DEBUGFS_FWSTATS_FILE(rx, dropped, 20, "%u");
DEBUGFS_FWSTATS_FILE(rx, fcs_err, 20, "%u");
DEBUGFS_FWSTATS_FILE(rx, xfr_hint_trig, 20, "%u");
DEBUGFS_FWSTATS_FILE(rx, path_reset, 20, "%u");
DEBUGFS_FWSTATS_FILE(rx, reset_counter, 20, "%u");
DEBUGFS_FWSTATS_FILE(dma, rx_requested, 20, "%u");
DEBUGFS_FWSTATS_FILE(dma, rx_errors, 20, "%u");
DEBUGFS_FWSTATS_FILE(dma, tx_requested, 20, "%u");
DEBUGFS_FWSTATS_FILE(dma, tx_errors, 20, "%u");
DEBUGFS_FWSTATS_FILE(isr, cmd_cmplt, 20, "%u");
DEBUGFS_FWSTATS_FILE(isr, fiqs, 20, "%u");
DEBUGFS_FWSTATS_FILE(isr, rx_headers, 20, "%u");
DEBUGFS_FWSTATS_FILE(isr, rx_mem_overflow, 20, "%u");
DEBUGFS_FWSTATS_FILE(isr, rx_rdys, 20, "%u");
DEBUGFS_FWSTATS_FILE(isr, irqs, 20, "%u");
DEBUGFS_FWSTATS_FILE(isr, tx_procs, 20, "%u");
DEBUGFS_FWSTATS_FILE(isr, decrypt_done, 20, "%u");
DEBUGFS_FWSTATS_FILE(isr, dma0_done, 20, "%u");
DEBUGFS_FWSTATS_FILE(isr, dma1_done, 20, "%u");
DEBUGFS_FWSTATS_FILE(isr, tx_exch_complete, 20, "%u");
DEBUGFS_FWSTATS_FILE(isr, commands, 20, "%u");
DEBUGFS_FWSTATS_FILE(isr, rx_procs, 20, "%u");
DEBUGFS_FWSTATS_FILE(isr, hw_pm_mode_changes, 20, "%u");
DEBUGFS_FWSTATS_FILE(isr, host_acknowledges, 20, "%u");
DEBUGFS_FWSTATS_FILE(isr, pci_pm, 20, "%u");
DEBUGFS_FWSTATS_FILE(isr, wakeups, 20, "%u");
DEBUGFS_FWSTATS_FILE(isr, low_rssi, 20, "%u");
DEBUGFS_FWSTATS_FILE(wep, addr_key_count, 20, "%u");
DEBUGFS_FWSTATS_FILE(wep, default_key_count, 20, "%u");
/* skipping wep.reserved */
DEBUGFS_FWSTATS_FILE(wep, key_not_found, 20, "%u");
DEBUGFS_FWSTATS_FILE(wep, decrypt_fail, 20, "%u");
DEBUGFS_FWSTATS_FILE(wep, packets, 20, "%u");
DEBUGFS_FWSTATS_FILE(wep, interrupt, 20, "%u");
DEBUGFS_FWSTATS_FILE(pwr, ps_enter, 20, "%u");
DEBUGFS_FWSTATS_FILE(pwr, elp_enter, 20, "%u");
DEBUGFS_FWSTATS_FILE(pwr, missing_bcns, 20, "%u");
DEBUGFS_FWSTATS_FILE(pwr, wake_on_host, 20, "%u");
DEBUGFS_FWSTATS_FILE(pwr, wake_on_timer_exp, 20, "%u");
DEBUGFS_FWSTATS_FILE(pwr, tx_with_ps, 20, "%u");
DEBUGFS_FWSTATS_FILE(pwr, tx_without_ps, 20, "%u");
DEBUGFS_FWSTATS_FILE(pwr, rcvd_beacons, 20, "%u");
DEBUGFS_FWSTATS_FILE(pwr, power_save_off, 20, "%u");
DEBUGFS_FWSTATS_FILE(pwr, enable_ps, 20, "%u");
DEBUGFS_FWSTATS_FILE(pwr, disable_ps, 20, "%u");
DEBUGFS_FWSTATS_FILE(pwr, fix_tsf_ps, 20, "%u");
/* skipping cont_miss_bcns_spread for now */
DEBUGFS_FWSTATS_FILE(pwr, rcvd_awake_beacons, 20, "%u");
DEBUGFS_FWSTATS_FILE(mic, rx_pkts, 20, "%u");
DEBUGFS_FWSTATS_FILE(mic, calc_failure, 20, "%u");
DEBUGFS_FWSTATS_FILE(aes, encrypt_fail, 20, "%u");
DEBUGFS_FWSTATS_FILE(aes, decrypt_fail, 20, "%u");
DEBUGFS_FWSTATS_FILE(aes, encrypt_packets, 20, "%u");
DEBUGFS_FWSTATS_FILE(aes, decrypt_packets, 20, "%u");
DEBUGFS_FWSTATS_FILE(aes, encrypt_interrupt, 20, "%u");
DEBUGFS_FWSTATS_FILE(aes, decrypt_interrupt, 20, "%u");
DEBUGFS_FWSTATS_FILE(event, heart_beat, 20, "%u");
DEBUGFS_FWSTATS_FILE(event, calibration, 20, "%u");
DEBUGFS_FWSTATS_FILE(event, rx_mismatch, 20, "%u");
DEBUGFS_FWSTATS_FILE(event, rx_mem_empty, 20, "%u");
DEBUGFS_FWSTATS_FILE(event, rx_pool, 20, "%u");
DEBUGFS_FWSTATS_FILE(event, oom_late, 20, "%u");
DEBUGFS_FWSTATS_FILE(event, phy_transmit_error, 20, "%u");
DEBUGFS_FWSTATS_FILE(event, tx_stuck, 20, "%u");
DEBUGFS_FWSTATS_FILE(ps, pspoll_timeouts, 20, "%u");
DEBUGFS_FWSTATS_FILE(ps, upsd_timeouts, 20, "%u");
DEBUGFS_FWSTATS_FILE(ps, upsd_max_sptime, 20, "%u");
DEBUGFS_FWSTATS_FILE(ps, upsd_max_apturn, 20, "%u");
DEBUGFS_FWSTATS_FILE(ps, pspoll_max_apturn, 20, "%u");
DEBUGFS_FWSTATS_FILE(ps, pspoll_utilization, 20, "%u");
DEBUGFS_FWSTATS_FILE(ps, upsd_utilization, 20, "%u");
DEBUGFS_FWSTATS_FILE(rxpipe, rx_prep_beacon_drop, 20, "%u");
DEBUGFS_FWSTATS_FILE(rxpipe, descr_host_int_trig_rx_data, 20, "%u");
DEBUGFS_FWSTATS_FILE(rxpipe, beacon_buffer_thres_host_int_trig_rx_data,
20, "%u");
DEBUGFS_FWSTATS_FILE(rxpipe, missed_beacon_host_int_trig_rx_data, 20, "%u");
DEBUGFS_FWSTATS_FILE(rxpipe, tx_xfr_host_int_trig_rx_data, 20, "%u");
DEBUGFS_READONLY_FILE(retry_count, 20, "%u", wl->stats.retry_count);
DEBUGFS_READONLY_FILE(excessive_retries, 20, "%u",
wl->stats.excessive_retries);
static ssize_t tx_queue_len_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
struct wl1251 *wl = file->private_data;
u32 queue_len;
char buf[20];
int res;
queue_len = skb_queue_len(&wl->tx_queue);
res = scnprintf(buf, sizeof(buf), "%u\n", queue_len);
return simple_read_from_buffer(userbuf, count, ppos, buf, res);
}
static const struct file_operations tx_queue_len_ops = {
.read = tx_queue_len_read,
.open = simple_open,
.llseek = generic_file_llseek,
};
static ssize_t tx_queue_status_read(struct file *file, char __user *userbuf,
size_t count, loff_t *ppos)
{
struct wl1251 *wl = file->private_data;
char buf[3], status;
int len;
if (wl->tx_queue_stopped)
status = 's';
else
status = 'r';
len = scnprintf(buf, sizeof(buf), "%c\n", status);
return simple_read_from_buffer(userbuf, count, ppos, buf, len);
}
static const struct file_operations tx_queue_status_ops = {
.read = tx_queue_status_read,
.open = simple_open,
.llseek = generic_file_llseek,
};
static void wl1251_debugfs_delete_files(struct wl1251 *wl)
{
DEBUGFS_FWSTATS_DEL(tx, internal_desc_overflow);
DEBUGFS_FWSTATS_DEL(rx, out_of_mem);
DEBUGFS_FWSTATS_DEL(rx, hdr_overflow);
DEBUGFS_FWSTATS_DEL(rx, hw_stuck);
DEBUGFS_FWSTATS_DEL(rx, dropped);
DEBUGFS_FWSTATS_DEL(rx, fcs_err);
DEBUGFS_FWSTATS_DEL(rx, xfr_hint_trig);
DEBUGFS_FWSTATS_DEL(rx, path_reset);
DEBUGFS_FWSTATS_DEL(rx, reset_counter);
DEBUGFS_FWSTATS_DEL(dma, rx_requested);
DEBUGFS_FWSTATS_DEL(dma, rx_errors);
DEBUGFS_FWSTATS_DEL(dma, tx_requested);
DEBUGFS_FWSTATS_DEL(dma, tx_errors);
DEBUGFS_FWSTATS_DEL(isr, cmd_cmplt);
DEBUGFS_FWSTATS_DEL(isr, fiqs);
DEBUGFS_FWSTATS_DEL(isr, rx_headers);
DEBUGFS_FWSTATS_DEL(isr, rx_mem_overflow);
DEBUGFS_FWSTATS_DEL(isr, rx_rdys);
DEBUGFS_FWSTATS_DEL(isr, irqs);
DEBUGFS_FWSTATS_DEL(isr, tx_procs);
DEBUGFS_FWSTATS_DEL(isr, decrypt_done);
DEBUGFS_FWSTATS_DEL(isr, dma0_done);
DEBUGFS_FWSTATS_DEL(isr, dma1_done);
DEBUGFS_FWSTATS_DEL(isr, tx_exch_complete);
DEBUGFS_FWSTATS_DEL(isr, commands);
DEBUGFS_FWSTATS_DEL(isr, rx_procs);
DEBUGFS_FWSTATS_DEL(isr, hw_pm_mode_changes);
DEBUGFS_FWSTATS_DEL(isr, host_acknowledges);
DEBUGFS_FWSTATS_DEL(isr, pci_pm);
DEBUGFS_FWSTATS_DEL(isr, wakeups);
DEBUGFS_FWSTATS_DEL(isr, low_rssi);
DEBUGFS_FWSTATS_DEL(wep, addr_key_count);
DEBUGFS_FWSTATS_DEL(wep, default_key_count);
/* skipping wep.reserved */
DEBUGFS_FWSTATS_DEL(wep, key_not_found);
DEBUGFS_FWSTATS_DEL(wep, decrypt_fail);
DEBUGFS_FWSTATS_DEL(wep, packets);
DEBUGFS_FWSTATS_DEL(wep, interrupt);
DEBUGFS_FWSTATS_DEL(pwr, ps_enter);
DEBUGFS_FWSTATS_DEL(pwr, elp_enter);
DEBUGFS_FWSTATS_DEL(pwr, missing_bcns);
DEBUGFS_FWSTATS_DEL(pwr, wake_on_host);
DEBUGFS_FWSTATS_DEL(pwr, wake_on_timer_exp);
DEBUGFS_FWSTATS_DEL(pwr, tx_with_ps);
DEBUGFS_FWSTATS_DEL(pwr, tx_without_ps);
DEBUGFS_FWSTATS_DEL(pwr, rcvd_beacons);
DEBUGFS_FWSTATS_DEL(pwr, power_save_off);
DEBUGFS_FWSTATS_DEL(pwr, enable_ps);
DEBUGFS_FWSTATS_DEL(pwr, disable_ps);
DEBUGFS_FWSTATS_DEL(pwr, fix_tsf_ps);
/* skipping cont_miss_bcns_spread for now */
DEBUGFS_FWSTATS_DEL(pwr, rcvd_awake_beacons);
DEBUGFS_FWSTATS_DEL(mic, rx_pkts);
DEBUGFS_FWSTATS_DEL(mic, calc_failure);
DEBUGFS_FWSTATS_DEL(aes, encrypt_fail);
DEBUGFS_FWSTATS_DEL(aes, decrypt_fail);
DEBUGFS_FWSTATS_DEL(aes, encrypt_packets);
DEBUGFS_FWSTATS_DEL(aes, decrypt_packets);
DEBUGFS_FWSTATS_DEL(aes, encrypt_interrupt);
DEBUGFS_FWSTATS_DEL(aes, decrypt_interrupt);
DEBUGFS_FWSTATS_DEL(event, heart_beat);
DEBUGFS_FWSTATS_DEL(event, calibration);
DEBUGFS_FWSTATS_DEL(event, rx_mismatch);
DEBUGFS_FWSTATS_DEL(event, rx_mem_empty);
DEBUGFS_FWSTATS_DEL(event, rx_pool);
DEBUGFS_FWSTATS_DEL(event, oom_late);
DEBUGFS_FWSTATS_DEL(event, phy_transmit_error);
DEBUGFS_FWSTATS_DEL(event, tx_stuck);
DEBUGFS_FWSTATS_DEL(ps, pspoll_timeouts);
DEBUGFS_FWSTATS_DEL(ps, upsd_timeouts);
DEBUGFS_FWSTATS_DEL(ps, upsd_max_sptime);
DEBUGFS_FWSTATS_DEL(ps, upsd_max_apturn);
DEBUGFS_FWSTATS_DEL(ps, pspoll_max_apturn);
DEBUGFS_FWSTATS_DEL(ps, pspoll_utilization);
DEBUGFS_FWSTATS_DEL(ps, upsd_utilization);
DEBUGFS_FWSTATS_DEL(rxpipe, rx_prep_beacon_drop);
DEBUGFS_FWSTATS_DEL(rxpipe, descr_host_int_trig_rx_data);
DEBUGFS_FWSTATS_DEL(rxpipe, beacon_buffer_thres_host_int_trig_rx_data);
DEBUGFS_FWSTATS_DEL(rxpipe, missed_beacon_host_int_trig_rx_data);
DEBUGFS_FWSTATS_DEL(rxpipe, tx_xfr_host_int_trig_rx_data);
DEBUGFS_DEL(tx_queue_len);
DEBUGFS_DEL(tx_queue_status);
DEBUGFS_DEL(retry_count);
DEBUGFS_DEL(excessive_retries);
}
static void wl1251_debugfs_add_files(struct wl1251 *wl)
{
DEBUGFS_FWSTATS_ADD(tx, internal_desc_overflow);
DEBUGFS_FWSTATS_ADD(rx, out_of_mem);
DEBUGFS_FWSTATS_ADD(rx, hdr_overflow);
DEBUGFS_FWSTATS_ADD(rx, hw_stuck);
DEBUGFS_FWSTATS_ADD(rx, dropped);
DEBUGFS_FWSTATS_ADD(rx, fcs_err);
DEBUGFS_FWSTATS_ADD(rx, xfr_hint_trig);
DEBUGFS_FWSTATS_ADD(rx, path_reset);
DEBUGFS_FWSTATS_ADD(rx, reset_counter);
DEBUGFS_FWSTATS_ADD(dma, rx_requested);
DEBUGFS_FWSTATS_ADD(dma, rx_errors);
DEBUGFS_FWSTATS_ADD(dma, tx_requested);
DEBUGFS_FWSTATS_ADD(dma, tx_errors);
DEBUGFS_FWSTATS_ADD(isr, cmd_cmplt);
DEBUGFS_FWSTATS_ADD(isr, fiqs);
DEBUGFS_FWSTATS_ADD(isr, rx_headers);
DEBUGFS_FWSTATS_ADD(isr, rx_mem_overflow);
DEBUGFS_FWSTATS_ADD(isr, rx_rdys);
DEBUGFS_FWSTATS_ADD(isr, irqs);
DEBUGFS_FWSTATS_ADD(isr, tx_procs);
DEBUGFS_FWSTATS_ADD(isr, decrypt_done);
DEBUGFS_FWSTATS_ADD(isr, dma0_done);
DEBUGFS_FWSTATS_ADD(isr, dma1_done);
DEBUGFS_FWSTATS_ADD(isr, tx_exch_complete);
DEBUGFS_FWSTATS_ADD(isr, commands);
DEBUGFS_FWSTATS_ADD(isr, rx_procs);
DEBUGFS_FWSTATS_ADD(isr, hw_pm_mode_changes);
DEBUGFS_FWSTATS_ADD(isr, host_acknowledges);
DEBUGFS_FWSTATS_ADD(isr, pci_pm);
DEBUGFS_FWSTATS_ADD(isr, wakeups);
DEBUGFS_FWSTATS_ADD(isr, low_rssi);
DEBUGFS_FWSTATS_ADD(wep, addr_key_count);
DEBUGFS_FWSTATS_ADD(wep, default_key_count);
/* skipping wep.reserved */
DEBUGFS_FWSTATS_ADD(wep, key_not_found);
DEBUGFS_FWSTATS_ADD(wep, decrypt_fail);
DEBUGFS_FWSTATS_ADD(wep, packets);
DEBUGFS_FWSTATS_ADD(wep, interrupt);
DEBUGFS_FWSTATS_ADD(pwr, ps_enter);
DEBUGFS_FWSTATS_ADD(pwr, elp_enter);
DEBUGFS_FWSTATS_ADD(pwr, missing_bcns);
DEBUGFS_FWSTATS_ADD(pwr, wake_on_host);
DEBUGFS_FWSTATS_ADD(pwr, wake_on_timer_exp);
DEBUGFS_FWSTATS_ADD(pwr, tx_with_ps);
DEBUGFS_FWSTATS_ADD(pwr, tx_without_ps);
DEBUGFS_FWSTATS_ADD(pwr, rcvd_beacons);
DEBUGFS_FWSTATS_ADD(pwr, power_save_off);
DEBUGFS_FWSTATS_ADD(pwr, enable_ps);
DEBUGFS_FWSTATS_ADD(pwr, disable_ps);
DEBUGFS_FWSTATS_ADD(pwr, fix_tsf_ps);
/* skipping cont_miss_bcns_spread for now */
DEBUGFS_FWSTATS_ADD(pwr, rcvd_awake_beacons);
DEBUGFS_FWSTATS_ADD(mic, rx_pkts);
DEBUGFS_FWSTATS_ADD(mic, calc_failure);
DEBUGFS_FWSTATS_ADD(aes, encrypt_fail);
DEBUGFS_FWSTATS_ADD(aes, decrypt_fail);
DEBUGFS_FWSTATS_ADD(aes, encrypt_packets);
DEBUGFS_FWSTATS_ADD(aes, decrypt_packets);
DEBUGFS_FWSTATS_ADD(aes, encrypt_interrupt);
DEBUGFS_FWSTATS_ADD(aes, decrypt_interrupt);
DEBUGFS_FWSTATS_ADD(event, heart_beat);
DEBUGFS_FWSTATS_ADD(event, calibration);
DEBUGFS_FWSTATS_ADD(event, rx_mismatch);
DEBUGFS_FWSTATS_ADD(event, rx_mem_empty);
DEBUGFS_FWSTATS_ADD(event, rx_pool);
DEBUGFS_FWSTATS_ADD(event, oom_late);
DEBUGFS_FWSTATS_ADD(event, phy_transmit_error);
DEBUGFS_FWSTATS_ADD(event, tx_stuck);
DEBUGFS_FWSTATS_ADD(ps, pspoll_timeouts);
DEBUGFS_FWSTATS_ADD(ps, upsd_timeouts);
DEBUGFS_FWSTATS_ADD(ps, upsd_max_sptime);
DEBUGFS_FWSTATS_ADD(ps, upsd_max_apturn);
DEBUGFS_FWSTATS_ADD(ps, pspoll_max_apturn);
DEBUGFS_FWSTATS_ADD(ps, pspoll_utilization);
DEBUGFS_FWSTATS_ADD(ps, upsd_utilization);
DEBUGFS_FWSTATS_ADD(rxpipe, rx_prep_beacon_drop);
DEBUGFS_FWSTATS_ADD(rxpipe, descr_host_int_trig_rx_data);
DEBUGFS_FWSTATS_ADD(rxpipe, beacon_buffer_thres_host_int_trig_rx_data);
DEBUGFS_FWSTATS_ADD(rxpipe, missed_beacon_host_int_trig_rx_data);
DEBUGFS_FWSTATS_ADD(rxpipe, tx_xfr_host_int_trig_rx_data);
DEBUGFS_ADD(tx_queue_len, wl->debugfs.rootdir);
DEBUGFS_ADD(tx_queue_status, wl->debugfs.rootdir);
DEBUGFS_ADD(retry_count, wl->debugfs.rootdir);
DEBUGFS_ADD(excessive_retries, wl->debugfs.rootdir);
}
void wl1251_debugfs_reset(struct wl1251 *wl)
{
if (wl->stats.fw_stats != NULL)
memset(wl->stats.fw_stats, 0, sizeof(*wl->stats.fw_stats));
wl->stats.retry_count = 0;
wl->stats.excessive_retries = 0;
}
int wl1251_debugfs_init(struct wl1251 *wl)
{
wl->stats.fw_stats = kzalloc(sizeof(*wl->stats.fw_stats), GFP_KERNEL);
if (!wl->stats.fw_stats)
return -ENOMEM;
wl->debugfs.rootdir = debugfs_create_dir(KBUILD_MODNAME, NULL);
wl->debugfs.fw_statistics = debugfs_create_dir("fw-statistics",
wl->debugfs.rootdir);
wl->stats.fw_stats_update = jiffies;
wl1251_debugfs_add_files(wl);
return 0;
}
void wl1251_debugfs_exit(struct wl1251 *wl)
{
wl1251_debugfs_delete_files(wl);
kfree(wl->stats.fw_stats);
wl->stats.fw_stats = NULL;
debugfs_remove(wl->debugfs.fw_statistics);
wl->debugfs.fw_statistics = NULL;
debugfs_remove(wl->debugfs.rootdir);
wl->debugfs.rootdir = NULL;
}
|
linux-master
|
drivers/net/wireless/ti/wl1251/debugfs.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* This file is part of wl1251
*
* Copyright (C) 2009 Nokia Corporation
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/slab.h>
#include "init.h"
#include "wl12xx_80211.h"
#include "acx.h"
#include "cmd.h"
#include "reg.h"
int wl1251_hw_init_hwenc_config(struct wl1251 *wl)
{
int ret;
ret = wl1251_acx_feature_cfg(wl, 0);
if (ret < 0) {
wl1251_warning("couldn't set feature config");
return ret;
}
ret = wl1251_acx_default_key(wl, wl->default_key);
if (ret < 0) {
wl1251_warning("couldn't set default key");
return ret;
}
return 0;
}
int wl1251_hw_init_templates_config(struct wl1251 *wl)
{
int ret;
u8 partial_vbm[PARTIAL_VBM_MAX];
/* send empty templates for fw memory reservation */
ret = wl1251_cmd_template_set(wl, CMD_PROBE_REQ, NULL,
sizeof(struct wl12xx_probe_req_template));
if (ret < 0)
return ret;
ret = wl1251_cmd_template_set(wl, CMD_NULL_DATA, NULL,
sizeof(struct wl12xx_null_data_template));
if (ret < 0)
return ret;
ret = wl1251_cmd_template_set(wl, CMD_PS_POLL, NULL,
sizeof(struct wl12xx_ps_poll_template));
if (ret < 0)
return ret;
ret = wl1251_cmd_template_set(wl, CMD_QOS_NULL_DATA, NULL,
sizeof
(struct wl12xx_qos_null_data_template));
if (ret < 0)
return ret;
ret = wl1251_cmd_template_set(wl, CMD_PROBE_RESP, NULL,
sizeof
(struct wl12xx_probe_resp_template));
if (ret < 0)
return ret;
ret = wl1251_cmd_template_set(wl, CMD_BEACON, NULL,
sizeof
(struct wl12xx_beacon_template));
if (ret < 0)
return ret;
/* tim templates, first reserve space then allocate an empty one */
memset(partial_vbm, 0, PARTIAL_VBM_MAX);
ret = wl1251_cmd_vbm(wl, TIM_ELE_ID, partial_vbm, PARTIAL_VBM_MAX, 0);
if (ret < 0)
return ret;
ret = wl1251_cmd_vbm(wl, TIM_ELE_ID, partial_vbm, 1, 0);
if (ret < 0)
return ret;
return 0;
}
int wl1251_hw_init_rx_config(struct wl1251 *wl, u32 config, u32 filter)
{
int ret;
ret = wl1251_acx_rx_msdu_life_time(wl, RX_MSDU_LIFETIME_DEF);
if (ret < 0)
return ret;
ret = wl1251_acx_rx_config(wl, config, filter);
if (ret < 0)
return ret;
return 0;
}
int wl1251_hw_init_phy_config(struct wl1251 *wl)
{
int ret;
ret = wl1251_acx_pd_threshold(wl);
if (ret < 0)
return ret;
ret = wl1251_acx_slot(wl, DEFAULT_SLOT_TIME);
if (ret < 0)
return ret;
ret = wl1251_acx_group_address_tbl(wl, true, NULL, 0);
if (ret < 0)
return ret;
ret = wl1251_acx_service_period_timeout(wl);
if (ret < 0)
return ret;
ret = wl1251_acx_rts_threshold(wl, RTS_THRESHOLD_DEF);
if (ret < 0)
return ret;
return 0;
}
int wl1251_hw_init_beacon_filter(struct wl1251 *wl)
{
int ret;
/* disable beacon filtering at this stage */
ret = wl1251_acx_beacon_filter_opt(wl, false);
if (ret < 0)
return ret;
ret = wl1251_acx_beacon_filter_table(wl);
if (ret < 0)
return ret;
return 0;
}
int wl1251_hw_init_pta(struct wl1251 *wl)
{
int ret;
ret = wl1251_acx_sg_enable(wl);
if (ret < 0)
return ret;
ret = wl1251_acx_sg_cfg(wl);
if (ret < 0)
return ret;
return 0;
}
int wl1251_hw_init_energy_detection(struct wl1251 *wl)
{
int ret;
ret = wl1251_acx_cca_threshold(wl);
if (ret < 0)
return ret;
return 0;
}
int wl1251_hw_init_beacon_broadcast(struct wl1251 *wl)
{
int ret;
ret = wl1251_acx_bcn_dtim_options(wl);
if (ret < 0)
return ret;
return 0;
}
int wl1251_hw_init_power_auth(struct wl1251 *wl)
{
return wl1251_acx_sleep_auth(wl, WL1251_PSM_CAM);
}
int wl1251_hw_init_mem_config(struct wl1251 *wl)
{
int ret;
ret = wl1251_acx_mem_cfg(wl);
if (ret < 0)
return ret;
wl->target_mem_map = kzalloc(sizeof(struct wl1251_acx_mem_map),
GFP_KERNEL);
if (!wl->target_mem_map) {
wl1251_error("couldn't allocate target memory map");
return -ENOMEM;
}
/* we now ask for the firmware built memory map */
ret = wl1251_acx_mem_map(wl, wl->target_mem_map,
sizeof(struct wl1251_acx_mem_map));
if (ret < 0) {
wl1251_error("couldn't retrieve firmware memory map");
kfree(wl->target_mem_map);
wl->target_mem_map = NULL;
return ret;
}
return 0;
}
static int wl1251_hw_init_txq_fill(u8 qid,
struct acx_tx_queue_qos_config *config,
u32 num_blocks)
{
config->qid = qid;
switch (qid) {
case QOS_AC_BE:
config->high_threshold =
(QOS_TX_HIGH_BE_DEF * num_blocks) / 100;
config->low_threshold =
(QOS_TX_LOW_BE_DEF * num_blocks) / 100;
break;
case QOS_AC_BK:
config->high_threshold =
(QOS_TX_HIGH_BK_DEF * num_blocks) / 100;
config->low_threshold =
(QOS_TX_LOW_BK_DEF * num_blocks) / 100;
break;
case QOS_AC_VI:
config->high_threshold =
(QOS_TX_HIGH_VI_DEF * num_blocks) / 100;
config->low_threshold =
(QOS_TX_LOW_VI_DEF * num_blocks) / 100;
break;
case QOS_AC_VO:
config->high_threshold =
(QOS_TX_HIGH_VO_DEF * num_blocks) / 100;
config->low_threshold =
(QOS_TX_LOW_VO_DEF * num_blocks) / 100;
break;
default:
wl1251_error("Invalid TX queue id: %d", qid);
return -EINVAL;
}
return 0;
}
static int wl1251_hw_init_tx_queue_config(struct wl1251 *wl)
{
struct acx_tx_queue_qos_config *config;
struct wl1251_acx_mem_map *wl_mem_map = wl->target_mem_map;
int ret, i;
wl1251_debug(DEBUG_ACX, "acx tx queue config");
config = kzalloc(sizeof(*config), GFP_KERNEL);
if (!config) {
ret = -ENOMEM;
goto out;
}
for (i = 0; i < MAX_NUM_OF_AC; i++) {
ret = wl1251_hw_init_txq_fill(i, config,
wl_mem_map->num_tx_mem_blocks);
if (ret < 0)
goto out;
ret = wl1251_cmd_configure(wl, ACX_TX_QUEUE_CFG,
config, sizeof(*config));
if (ret < 0)
goto out;
}
wl1251_acx_ac_cfg(wl, AC_BE, CWMIN_BE, CWMAX_BE, AIFS_DIFS, TXOP_BE);
wl1251_acx_ac_cfg(wl, AC_BK, CWMIN_BK, CWMAX_BK, AIFS_DIFS, TXOP_BK);
wl1251_acx_ac_cfg(wl, AC_VI, CWMIN_VI, CWMAX_VI, AIFS_DIFS, TXOP_VI);
wl1251_acx_ac_cfg(wl, AC_VO, CWMIN_VO, CWMAX_VO, AIFS_DIFS, TXOP_VO);
out:
kfree(config);
return ret;
}
static int wl1251_hw_init_data_path_config(struct wl1251 *wl)
{
int ret;
/* asking for the data path parameters */
wl->data_path = kzalloc(sizeof(struct acx_data_path_params_resp),
GFP_KERNEL);
if (!wl->data_path)
return -ENOMEM;
ret = wl1251_acx_data_path_params(wl, wl->data_path);
if (ret < 0) {
kfree(wl->data_path);
wl->data_path = NULL;
return ret;
}
return 0;
}
int wl1251_hw_init(struct wl1251 *wl)
{
struct wl1251_acx_mem_map *wl_mem_map;
int ret;
ret = wl1251_hw_init_hwenc_config(wl);
if (ret < 0)
return ret;
/* Template settings */
ret = wl1251_hw_init_templates_config(wl);
if (ret < 0)
return ret;
/* Default memory configuration */
ret = wl1251_hw_init_mem_config(wl);
if (ret < 0)
return ret;
/* Default data path configuration */
ret = wl1251_hw_init_data_path_config(wl);
if (ret < 0)
goto out_free_memmap;
/* RX config */
ret = wl1251_hw_init_rx_config(wl,
RX_CFG_PROMISCUOUS | RX_CFG_TSF,
RX_FILTER_OPTION_DEF);
/* RX_CONFIG_OPTION_ANY_DST_ANY_BSS,
RX_FILTER_OPTION_FILTER_ALL); */
if (ret < 0)
goto out_free_data_path;
/* TX queues config */
ret = wl1251_hw_init_tx_queue_config(wl);
if (ret < 0)
goto out_free_data_path;
/* PHY layer config */
ret = wl1251_hw_init_phy_config(wl);
if (ret < 0)
goto out_free_data_path;
/* Initialize connection monitoring thresholds */
ret = wl1251_acx_conn_monit_params(wl);
if (ret < 0)
goto out_free_data_path;
/* Beacon filtering */
ret = wl1251_hw_init_beacon_filter(wl);
if (ret < 0)
goto out_free_data_path;
/* Bluetooth WLAN coexistence */
ret = wl1251_hw_init_pta(wl);
if (ret < 0)
goto out_free_data_path;
/* Energy detection */
ret = wl1251_hw_init_energy_detection(wl);
if (ret < 0)
goto out_free_data_path;
/* Beacons and broadcast settings */
ret = wl1251_hw_init_beacon_broadcast(wl);
if (ret < 0)
goto out_free_data_path;
/* Enable rx data path */
ret = wl1251_cmd_data_path_rx(wl, wl->channel, 1);
if (ret < 0)
goto out_free_data_path;
/* Enable tx data path */
ret = wl1251_cmd_data_path_tx(wl, wl->channel, 1);
if (ret < 0)
goto out_free_data_path;
/* Default power state */
ret = wl1251_hw_init_power_auth(wl);
if (ret < 0)
goto out_free_data_path;
wl_mem_map = wl->target_mem_map;
wl1251_info("%d tx blocks at 0x%x, %d rx blocks at 0x%x",
wl_mem_map->num_tx_mem_blocks,
wl->data_path->tx_control_addr,
wl_mem_map->num_rx_mem_blocks,
wl->data_path->rx_control_addr);
return 0;
out_free_data_path:
kfree(wl->data_path);
out_free_memmap:
kfree(wl->target_mem_map);
return ret;
}
|
linux-master
|
drivers/net/wireless/ti/wl1251/init.c
|
// SPDX-License-Identifier: GPL-2.0
#include "cmd.h"
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/etherdevice.h>
#include "wl1251.h"
#include "reg.h"
#include "io.h"
#include "ps.h"
#include "acx.h"
/**
* wl1251_cmd_send - Send command to firmware
*
* @wl: wl struct
* @id: command id
* @buf: buffer containing the command, must work with dma
* @len: length of the buffer
*/
int wl1251_cmd_send(struct wl1251 *wl, u16 id, void *buf, size_t len)
{
struct wl1251_cmd_header *cmd;
unsigned long timeout;
u32 intr;
int ret = 0;
cmd = buf;
cmd->id = id;
cmd->status = 0;
WARN_ON(len % 4 != 0);
wl1251_mem_write(wl, wl->cmd_box_addr, buf, len);
wl1251_reg_write32(wl, ACX_REG_INTERRUPT_TRIG, INTR_TRIG_CMD);
timeout = jiffies + msecs_to_jiffies(WL1251_COMMAND_TIMEOUT);
intr = wl1251_reg_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR);
while (!(intr & WL1251_ACX_INTR_CMD_COMPLETE)) {
if (time_after(jiffies, timeout)) {
wl1251_error("command complete timeout");
ret = -ETIMEDOUT;
goto out;
}
msleep(1);
intr = wl1251_reg_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR);
}
wl1251_reg_write32(wl, ACX_REG_INTERRUPT_ACK,
WL1251_ACX_INTR_CMD_COMPLETE);
out:
return ret;
}
/**
* wl1251_cmd_test - Send test command to firmware
*
* @wl: wl struct
* @buf: buffer containing the command, with all headers, must work with dma
* @buf_len: length of the buffer
* @answer: is answer needed
*/
int wl1251_cmd_test(struct wl1251 *wl, void *buf, size_t buf_len, u8 answer)
{
int ret;
wl1251_debug(DEBUG_CMD, "cmd test");
ret = wl1251_cmd_send(wl, CMD_TEST, buf, buf_len);
if (ret < 0) {
wl1251_warning("TEST command failed");
return ret;
}
if (answer) {
struct wl1251_command *cmd_answer;
/*
* The test command got in, we can read the answer.
* The answer would be a wl1251_command, where the
* parameter array contains the actual answer.
*/
wl1251_mem_read(wl, wl->cmd_box_addr, buf, buf_len);
cmd_answer = buf;
if (cmd_answer->header.status != CMD_STATUS_SUCCESS)
wl1251_error("TEST command answer error: %d",
cmd_answer->header.status);
}
return 0;
}
/**
* wl1251_cmd_interrogate - Read acx from firmware
*
* @wl: wl struct
* @id: acx id
* @buf: buffer for the response, including all headers, must work with dma
* @len: length of buf
*/
int wl1251_cmd_interrogate(struct wl1251 *wl, u16 id, void *buf, size_t len)
{
struct acx_header *acx = buf;
int ret;
wl1251_debug(DEBUG_CMD, "cmd interrogate");
acx->id = id;
/* payload length, does not include any headers */
acx->len = len - sizeof(*acx);
ret = wl1251_cmd_send(wl, CMD_INTERROGATE, acx, sizeof(*acx));
if (ret < 0) {
wl1251_error("INTERROGATE command failed");
goto out;
}
/* the interrogate command got in, we can read the answer */
wl1251_mem_read(wl, wl->cmd_box_addr, buf, len);
acx = buf;
if (acx->cmd.status != CMD_STATUS_SUCCESS)
wl1251_error("INTERROGATE command error: %d",
acx->cmd.status);
out:
return ret;
}
/**
* wl1251_cmd_configure - Write acx value to firmware
*
* @wl: wl struct
* @id: acx id
* @buf: buffer containing acx, including all headers, must work with dma
* @len: length of buf
*/
int wl1251_cmd_configure(struct wl1251 *wl, u16 id, void *buf, size_t len)
{
struct acx_header *acx = buf;
int ret;
wl1251_debug(DEBUG_CMD, "cmd configure");
acx->id = id;
/* payload length, does not include any headers */
acx->len = len - sizeof(*acx);
ret = wl1251_cmd_send(wl, CMD_CONFIGURE, acx, len);
if (ret < 0) {
wl1251_warning("CONFIGURE command NOK");
return ret;
}
return 0;
}
int wl1251_cmd_vbm(struct wl1251 *wl, u8 identity,
void *bitmap, u16 bitmap_len, u8 bitmap_control)
{
struct wl1251_cmd_vbm_update *vbm;
int ret;
wl1251_debug(DEBUG_CMD, "cmd vbm");
vbm = kzalloc(sizeof(*vbm), GFP_KERNEL);
if (!vbm)
return -ENOMEM;
/* Count and period will be filled by the target */
vbm->tim.bitmap_ctrl = bitmap_control;
if (bitmap_len > PARTIAL_VBM_MAX) {
wl1251_warning("cmd vbm len is %d B, truncating to %d",
bitmap_len, PARTIAL_VBM_MAX);
bitmap_len = PARTIAL_VBM_MAX;
}
memcpy(vbm->tim.pvb_field, bitmap, bitmap_len);
vbm->tim.identity = identity;
vbm->tim.length = bitmap_len + 3;
vbm->len = cpu_to_le16(bitmap_len + 5);
ret = wl1251_cmd_send(wl, CMD_VBM, vbm, sizeof(*vbm));
if (ret < 0) {
wl1251_error("VBM command failed");
goto out;
}
out:
kfree(vbm);
return ret;
}
int wl1251_cmd_data_path_rx(struct wl1251 *wl, u8 channel, bool enable)
{
struct cmd_enabledisable_path *cmd;
int ret;
u16 cmd_rx;
wl1251_debug(DEBUG_CMD, "cmd data path");
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
cmd->channel = channel;
if (enable)
cmd_rx = CMD_ENABLE_RX;
else
cmd_rx = CMD_DISABLE_RX;
ret = wl1251_cmd_send(wl, cmd_rx, cmd, sizeof(*cmd));
if (ret < 0) {
wl1251_error("rx %s cmd for channel %d failed",
enable ? "start" : "stop", channel);
goto out;
}
wl1251_debug(DEBUG_BOOT, "rx %s cmd channel %d",
enable ? "start" : "stop", channel);
out:
kfree(cmd);
return ret;
}
int wl1251_cmd_data_path_tx(struct wl1251 *wl, u8 channel, bool enable)
{
struct cmd_enabledisable_path *cmd;
int ret;
u16 cmd_tx;
wl1251_debug(DEBUG_CMD, "cmd data path");
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
cmd->channel = channel;
if (enable)
cmd_tx = CMD_ENABLE_TX;
else
cmd_tx = CMD_DISABLE_TX;
ret = wl1251_cmd_send(wl, cmd_tx, cmd, sizeof(*cmd));
if (ret < 0)
wl1251_error("tx %s cmd for channel %d failed",
enable ? "start" : "stop", channel);
else
wl1251_debug(DEBUG_BOOT, "tx %s cmd channel %d",
enable ? "start" : "stop", channel);
kfree(cmd);
return ret;
}
int wl1251_cmd_join(struct wl1251 *wl, u8 bss_type, u8 channel,
u16 beacon_interval, u8 dtim_interval)
{
struct cmd_join *join;
int ret, i;
u8 *bssid;
join = kzalloc(sizeof(*join), GFP_KERNEL);
if (!join)
return -ENOMEM;
wl1251_debug(DEBUG_CMD, "cmd join%s ch %d %d/%d",
bss_type == BSS_TYPE_IBSS ? " ibss" : "",
channel, beacon_interval, dtim_interval);
/* Reverse order BSSID */
bssid = (u8 *) &join->bssid_lsb;
for (i = 0; i < ETH_ALEN; i++)
bssid[i] = wl->bssid[ETH_ALEN - i - 1];
join->rx_config_options = wl->rx_config;
join->rx_filter_options = wl->rx_filter;
join->basic_rate_set = RATE_MASK_1MBPS | RATE_MASK_2MBPS |
RATE_MASK_5_5MBPS | RATE_MASK_11MBPS;
join->beacon_interval = beacon_interval;
join->dtim_interval = dtim_interval;
join->bss_type = bss_type;
join->channel = channel;
join->ctrl = JOIN_CMD_CTRL_TX_FLUSH;
ret = wl1251_cmd_send(wl, CMD_START_JOIN, join, sizeof(*join));
if (ret < 0) {
wl1251_error("failed to initiate cmd join");
goto out;
}
out:
kfree(join);
return ret;
}
int wl1251_cmd_ps_mode(struct wl1251 *wl, u8 ps_mode)
{
struct wl1251_cmd_ps_params *ps_params = NULL;
int ret = 0;
wl1251_debug(DEBUG_CMD, "cmd set ps mode");
ps_params = kzalloc(sizeof(*ps_params), GFP_KERNEL);
if (!ps_params)
return -ENOMEM;
ps_params->ps_mode = ps_mode;
ps_params->send_null_data = 1;
ps_params->retries = 5;
ps_params->hang_over_period = 128;
ps_params->null_data_rate = 1; /* 1 Mbps */
ret = wl1251_cmd_send(wl, CMD_SET_PS_MODE, ps_params,
sizeof(*ps_params));
if (ret < 0) {
wl1251_error("cmd set_ps_mode failed");
goto out;
}
out:
kfree(ps_params);
return ret;
}
int wl1251_cmd_read_memory(struct wl1251 *wl, u32 addr, void *answer,
size_t len)
{
struct cmd_read_write_memory *cmd;
int ret = 0;
wl1251_debug(DEBUG_CMD, "cmd read memory");
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
WARN_ON(len > MAX_READ_SIZE);
len = min_t(size_t, len, MAX_READ_SIZE);
cmd->addr = addr;
cmd->size = len;
ret = wl1251_cmd_send(wl, CMD_READ_MEMORY, cmd, sizeof(*cmd));
if (ret < 0) {
wl1251_error("read memory command failed: %d", ret);
goto out;
}
/* the read command got in, we can now read the answer */
wl1251_mem_read(wl, wl->cmd_box_addr, cmd, sizeof(*cmd));
if (cmd->header.status != CMD_STATUS_SUCCESS)
wl1251_error("error in read command result: %d",
cmd->header.status);
memcpy(answer, cmd->value, len);
out:
kfree(cmd);
return ret;
}
int wl1251_cmd_template_set(struct wl1251 *wl, u16 cmd_id,
void *buf, size_t buf_len)
{
struct wl1251_cmd_packet_template *cmd;
size_t cmd_len;
int ret = 0;
wl1251_debug(DEBUG_CMD, "cmd template %d", cmd_id);
WARN_ON(buf_len > WL1251_MAX_TEMPLATE_SIZE);
buf_len = min_t(size_t, buf_len, WL1251_MAX_TEMPLATE_SIZE);
cmd_len = ALIGN(sizeof(*cmd) + buf_len, 4);
cmd = kzalloc(cmd_len, GFP_KERNEL);
if (!cmd)
return -ENOMEM;
cmd->size = cpu_to_le16(buf_len);
if (buf)
memcpy(cmd->data, buf, buf_len);
ret = wl1251_cmd_send(wl, cmd_id, cmd, cmd_len);
if (ret < 0) {
wl1251_warning("cmd set_template failed: %d", ret);
goto out;
}
out:
kfree(cmd);
return ret;
}
int wl1251_cmd_scan(struct wl1251 *wl, u8 *ssid, size_t ssid_len,
struct ieee80211_channel *channels[],
unsigned int n_channels, unsigned int n_probes)
{
struct wl1251_cmd_scan *cmd;
int i, ret = 0;
wl1251_debug(DEBUG_CMD, "cmd scan channels %d", n_channels);
WARN_ON(n_channels > SCAN_MAX_NUM_OF_CHANNELS);
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
cmd->params.rx_config_options = cpu_to_le32(CFG_RX_ALL_GOOD);
cmd->params.rx_filter_options = cpu_to_le32(CFG_RX_PRSP_EN |
CFG_RX_MGMT_EN |
CFG_RX_BCN_EN);
cmd->params.scan_options = 0;
/*
* Use high priority scan when not associated to prevent fw issue
* causing never-ending scans (sometimes 20+ minutes).
* Note: This bug may be caused by the fw's DTIM handling.
*/
if (is_zero_ether_addr(wl->bssid))
cmd->params.scan_options |= cpu_to_le16(WL1251_SCAN_OPT_PRIORITY_HIGH);
cmd->params.num_channels = n_channels;
cmd->params.num_probe_requests = n_probes;
cmd->params.tx_rate = cpu_to_le16(1 << 1); /* 2 Mbps */
cmd->params.tid_trigger = 0;
for (i = 0; i < n_channels; i++) {
cmd->channels[i].min_duration =
cpu_to_le32(WL1251_SCAN_MIN_DURATION);
cmd->channels[i].max_duration =
cpu_to_le32(WL1251_SCAN_MAX_DURATION);
memset(&cmd->channels[i].bssid_lsb, 0xff, 4);
memset(&cmd->channels[i].bssid_msb, 0xff, 2);
cmd->channels[i].early_termination = 0;
cmd->channels[i].tx_power_att = 0;
cmd->channels[i].channel = channels[i]->hw_value;
}
if (ssid) {
int len = clamp_val(ssid_len, 0, IEEE80211_MAX_SSID_LEN);
cmd->params.ssid_len = len;
memcpy(cmd->params.ssid, ssid, len);
}
ret = wl1251_cmd_send(wl, CMD_SCAN, cmd, sizeof(*cmd));
if (ret < 0) {
wl1251_error("cmd scan failed: %d", ret);
goto out;
}
wl1251_mem_read(wl, wl->cmd_box_addr, cmd, sizeof(*cmd));
if (cmd->header.status != CMD_STATUS_SUCCESS) {
wl1251_error("cmd scan status wasn't success: %d",
cmd->header.status);
ret = -EIO;
goto out;
}
out:
kfree(cmd);
return ret;
}
int wl1251_cmd_trigger_scan_to(struct wl1251 *wl, u32 timeout)
{
struct wl1251_cmd_trigger_scan_to *cmd;
int ret;
wl1251_debug(DEBUG_CMD, "cmd trigger scan to");
cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
if (!cmd)
return -ENOMEM;
cmd->timeout = timeout;
ret = wl1251_cmd_send(wl, CMD_TRIGGER_SCAN_TO, cmd, sizeof(*cmd));
if (ret < 0) {
wl1251_error("cmd trigger scan to failed: %d", ret);
goto out;
}
out:
kfree(cmd);
return ret;
}
|
linux-master
|
drivers/net/wireless/ti/wl1251/cmd.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* This file is part of wl1251
*
* Copyright (C) 2008-2009 Nokia Corporation
*/
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/firmware.h>
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/crc32.h>
#include <linux/etherdevice.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/netdevice.h>
#include "wl1251.h"
#include "wl12xx_80211.h"
#include "reg.h"
#include "io.h"
#include "cmd.h"
#include "event.h"
#include "tx.h"
#include "rx.h"
#include "ps.h"
#include "init.h"
#include "debugfs.h"
#include "boot.h"
void wl1251_enable_interrupts(struct wl1251 *wl)
{
wl->if_ops->enable_irq(wl);
}
void wl1251_disable_interrupts(struct wl1251 *wl)
{
wl->if_ops->disable_irq(wl);
}
static int wl1251_power_off(struct wl1251 *wl)
{
return wl->if_ops->power(wl, false);
}
static int wl1251_power_on(struct wl1251 *wl)
{
return wl->if_ops->power(wl, true);
}
static int wl1251_fetch_firmware(struct wl1251 *wl)
{
const struct firmware *fw;
struct device *dev = wiphy_dev(wl->hw->wiphy);
int ret;
ret = request_firmware(&fw, WL1251_FW_NAME, dev);
if (ret < 0) {
wl1251_error("could not get firmware: %d", ret);
return ret;
}
if (fw->size % 4) {
wl1251_error("firmware size is not multiple of 32 bits: %zu",
fw->size);
ret = -EILSEQ;
goto out;
}
wl->fw_len = fw->size;
wl->fw = vmalloc(wl->fw_len);
if (!wl->fw) {
wl1251_error("could not allocate memory for the firmware");
ret = -ENOMEM;
goto out;
}
memcpy(wl->fw, fw->data, wl->fw_len);
ret = 0;
out:
release_firmware(fw);
return ret;
}
static int wl1251_fetch_nvs(struct wl1251 *wl)
{
const struct firmware *fw;
struct device *dev = wiphy_dev(wl->hw->wiphy);
int ret;
ret = request_firmware(&fw, WL1251_NVS_NAME, dev);
if (ret < 0) {
wl1251_error("could not get nvs file: %d", ret);
return ret;
}
if (fw->size % 4) {
wl1251_error("nvs size is not multiple of 32 bits: %zu",
fw->size);
ret = -EILSEQ;
goto out;
}
wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL);
if (!wl->nvs) {
wl1251_error("could not allocate memory for the nvs file");
ret = -ENOMEM;
goto out;
}
wl->nvs_len = fw->size;
ret = 0;
out:
release_firmware(fw);
return ret;
}
static void wl1251_fw_wakeup(struct wl1251 *wl)
{
u32 elp_reg;
elp_reg = ELPCTRL_WAKE_UP;
wl1251_write_elp(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, elp_reg);
elp_reg = wl1251_read_elp(wl, HW_ACCESS_ELP_CTRL_REG_ADDR);
if (!(elp_reg & ELPCTRL_WLAN_READY))
wl1251_warning("WLAN not ready");
}
static int wl1251_chip_wakeup(struct wl1251 *wl)
{
int ret;
ret = wl1251_power_on(wl);
if (ret < 0)
return ret;
msleep(WL1251_POWER_ON_SLEEP);
wl->if_ops->reset(wl);
/* We don't need a real memory partition here, because we only want
* to use the registers at this point. */
wl1251_set_partition(wl,
0x00000000,
0x00000000,
REGISTERS_BASE,
REGISTERS_DOWN_SIZE);
/* ELP module wake up */
wl1251_fw_wakeup(wl);
/* whal_FwCtrl_BootSm() */
/* 0. read chip id from CHIP_ID */
wl->chip_id = wl1251_reg_read32(wl, CHIP_ID_B);
/* 1. check if chip id is valid */
switch (wl->chip_id) {
case CHIP_ID_1251_PG12:
wl1251_debug(DEBUG_BOOT, "chip id 0x%x (1251 PG12)",
wl->chip_id);
break;
case CHIP_ID_1251_PG11:
wl1251_debug(DEBUG_BOOT, "chip id 0x%x (1251 PG11)",
wl->chip_id);
break;
case CHIP_ID_1251_PG10:
default:
wl1251_error("unsupported chip id: 0x%x", wl->chip_id);
ret = -ENODEV;
goto out;
}
if (wl->fw == NULL) {
ret = wl1251_fetch_firmware(wl);
if (ret < 0)
goto out;
}
out:
return ret;
}
#define WL1251_IRQ_LOOP_COUNT 10
static void wl1251_irq_work(struct work_struct *work)
{
u32 intr, ctr = WL1251_IRQ_LOOP_COUNT;
struct wl1251 *wl =
container_of(work, struct wl1251, irq_work);
int ret;
mutex_lock(&wl->mutex);
wl1251_debug(DEBUG_IRQ, "IRQ work");
if (wl->state == WL1251_STATE_OFF)
goto out;
ret = wl1251_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
wl1251_reg_write32(wl, ACX_REG_INTERRUPT_MASK, WL1251_ACX_INTR_ALL);
intr = wl1251_reg_read32(wl, ACX_REG_INTERRUPT_CLEAR);
wl1251_debug(DEBUG_IRQ, "intr: 0x%x", intr);
do {
if (wl->data_path) {
wl->rx_counter = wl1251_mem_read32(
wl, wl->data_path->rx_control_addr);
/* We handle a frmware bug here */
switch ((wl->rx_counter - wl->rx_handled) & 0xf) {
case 0:
wl1251_debug(DEBUG_IRQ,
"RX: FW and host in sync");
intr &= ~WL1251_ACX_INTR_RX0_DATA;
intr &= ~WL1251_ACX_INTR_RX1_DATA;
break;
case 1:
wl1251_debug(DEBUG_IRQ, "RX: FW +1");
intr |= WL1251_ACX_INTR_RX0_DATA;
intr &= ~WL1251_ACX_INTR_RX1_DATA;
break;
case 2:
wl1251_debug(DEBUG_IRQ, "RX: FW +2");
intr |= WL1251_ACX_INTR_RX0_DATA;
intr |= WL1251_ACX_INTR_RX1_DATA;
break;
default:
wl1251_warning(
"RX: FW and host out of sync: %d",
wl->rx_counter - wl->rx_handled);
break;
}
wl->rx_handled = wl->rx_counter;
wl1251_debug(DEBUG_IRQ, "RX counter: %d",
wl->rx_counter);
}
intr &= wl->intr_mask;
if (intr == 0) {
wl1251_debug(DEBUG_IRQ, "INTR is 0");
goto out_sleep;
}
if (intr & WL1251_ACX_INTR_RX0_DATA) {
wl1251_debug(DEBUG_IRQ, "WL1251_ACX_INTR_RX0_DATA");
wl1251_rx(wl);
}
if (intr & WL1251_ACX_INTR_RX1_DATA) {
wl1251_debug(DEBUG_IRQ, "WL1251_ACX_INTR_RX1_DATA");
wl1251_rx(wl);
}
if (intr & WL1251_ACX_INTR_TX_RESULT) {
wl1251_debug(DEBUG_IRQ, "WL1251_ACX_INTR_TX_RESULT");
wl1251_tx_complete(wl);
}
if (intr & WL1251_ACX_INTR_EVENT_A) {
wl1251_debug(DEBUG_IRQ, "WL1251_ACX_INTR_EVENT_A");
wl1251_event_handle(wl, 0);
}
if (intr & WL1251_ACX_INTR_EVENT_B) {
wl1251_debug(DEBUG_IRQ, "WL1251_ACX_INTR_EVENT_B");
wl1251_event_handle(wl, 1);
}
if (intr & WL1251_ACX_INTR_INIT_COMPLETE)
wl1251_debug(DEBUG_IRQ,
"WL1251_ACX_INTR_INIT_COMPLETE");
if (--ctr == 0)
break;
intr = wl1251_reg_read32(wl, ACX_REG_INTERRUPT_CLEAR);
} while (intr);
out_sleep:
wl1251_reg_write32(wl, ACX_REG_INTERRUPT_MASK, ~(wl->intr_mask));
wl1251_ps_elp_sleep(wl);
out:
mutex_unlock(&wl->mutex);
}
static int wl1251_join(struct wl1251 *wl, u8 bss_type, u8 channel,
u16 beacon_interval, u8 dtim_period)
{
int ret;
ret = wl1251_acx_frame_rates(wl, DEFAULT_HW_GEN_TX_RATE,
DEFAULT_HW_GEN_MODULATION_TYPE,
wl->tx_mgmt_frm_rate,
wl->tx_mgmt_frm_mod);
if (ret < 0)
goto out;
/*
* Join command applies filters, and if we are not associated,
* BSSID filter must be disabled for association to work.
*/
if (is_zero_ether_addr(wl->bssid))
wl->rx_config &= ~CFG_BSSID_FILTER_EN;
ret = wl1251_cmd_join(wl, bss_type, channel, beacon_interval,
dtim_period);
if (ret < 0)
goto out;
ret = wl1251_event_wait(wl, JOIN_EVENT_COMPLETE_ID, 100);
if (ret < 0)
wl1251_warning("join timeout");
out:
return ret;
}
static void wl1251_op_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_control *control,
struct sk_buff *skb)
{
struct wl1251 *wl = hw->priv;
unsigned long flags;
skb_queue_tail(&wl->tx_queue, skb);
/*
* The chip specific setup must run before the first TX packet -
* before that, the tx_work will not be initialized!
*/
ieee80211_queue_work(wl->hw, &wl->tx_work);
/*
* The workqueue is slow to process the tx_queue and we need stop
* the queue here, otherwise the queue will get too long.
*/
if (skb_queue_len(&wl->tx_queue) >= WL1251_TX_QUEUE_HIGH_WATERMARK) {
wl1251_debug(DEBUG_TX, "op_tx: tx_queue full, stop queues");
spin_lock_irqsave(&wl->wl_lock, flags);
ieee80211_stop_queues(wl->hw);
wl->tx_queue_stopped = true;
spin_unlock_irqrestore(&wl->wl_lock, flags);
}
}
static int wl1251_op_start(struct ieee80211_hw *hw)
{
struct wl1251 *wl = hw->priv;
struct wiphy *wiphy = hw->wiphy;
int ret = 0;
wl1251_debug(DEBUG_MAC80211, "mac80211 start");
mutex_lock(&wl->mutex);
if (wl->state != WL1251_STATE_OFF) {
wl1251_error("cannot start because not in off state: %d",
wl->state);
ret = -EBUSY;
goto out;
}
ret = wl1251_chip_wakeup(wl);
if (ret < 0)
goto out;
ret = wl1251_boot(wl);
if (ret < 0)
goto out;
ret = wl1251_hw_init(wl);
if (ret < 0)
goto out;
ret = wl1251_acx_station_id(wl);
if (ret < 0)
goto out;
wl->state = WL1251_STATE_ON;
wl1251_info("firmware booted (%s)", wl->fw_ver);
/* update hw/fw version info in wiphy struct */
wiphy->hw_version = wl->chip_id;
strncpy(wiphy->fw_version, wl->fw_ver, sizeof(wiphy->fw_version));
out:
if (ret < 0)
wl1251_power_off(wl);
mutex_unlock(&wl->mutex);
return ret;
}
static void wl1251_op_stop(struct ieee80211_hw *hw)
{
struct wl1251 *wl = hw->priv;
wl1251_info("down");
wl1251_debug(DEBUG_MAC80211, "mac80211 stop");
mutex_lock(&wl->mutex);
WARN_ON(wl->state != WL1251_STATE_ON);
if (wl->scanning) {
struct cfg80211_scan_info info = {
.aborted = true,
};
ieee80211_scan_completed(wl->hw, &info);
wl->scanning = false;
}
wl->state = WL1251_STATE_OFF;
wl1251_disable_interrupts(wl);
mutex_unlock(&wl->mutex);
cancel_work_sync(&wl->irq_work);
cancel_work_sync(&wl->tx_work);
cancel_delayed_work_sync(&wl->elp_work);
mutex_lock(&wl->mutex);
/* let's notify MAC80211 about the remaining pending TX frames */
wl1251_tx_flush(wl);
wl1251_power_off(wl);
eth_zero_addr(wl->bssid);
wl->listen_int = 1;
wl->bss_type = MAX_BSS_TYPE;
wl->data_in_count = 0;
wl->rx_counter = 0;
wl->rx_handled = 0;
wl->rx_current_buffer = 0;
wl->rx_last_id = 0;
wl->next_tx_complete = 0;
wl->elp = false;
wl->station_mode = STATION_ACTIVE_MODE;
wl->psm_entry_retry = 0;
wl->tx_queue_stopped = false;
wl->power_level = WL1251_DEFAULT_POWER_LEVEL;
wl->rssi_thold = 0;
wl->channel = WL1251_DEFAULT_CHANNEL;
wl->monitor_present = false;
wl->joined = false;
wl1251_debugfs_reset(wl);
mutex_unlock(&wl->mutex);
}
static int wl1251_op_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct wl1251 *wl = hw->priv;
int ret = 0;
vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER |
IEEE80211_VIF_SUPPORTS_UAPSD |
IEEE80211_VIF_SUPPORTS_CQM_RSSI;
wl1251_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM",
vif->type, vif->addr);
mutex_lock(&wl->mutex);
if (wl->vif) {
ret = -EBUSY;
goto out;
}
wl->vif = vif;
switch (vif->type) {
case NL80211_IFTYPE_STATION:
wl->bss_type = BSS_TYPE_STA_BSS;
break;
case NL80211_IFTYPE_ADHOC:
wl->bss_type = BSS_TYPE_IBSS;
break;
default:
ret = -EOPNOTSUPP;
goto out;
}
if (!ether_addr_equal_unaligned(wl->mac_addr, vif->addr)) {
memcpy(wl->mac_addr, vif->addr, ETH_ALEN);
SET_IEEE80211_PERM_ADDR(wl->hw, wl->mac_addr);
ret = wl1251_acx_station_id(wl);
if (ret < 0)
goto out;
}
out:
mutex_unlock(&wl->mutex);
return ret;
}
static void wl1251_op_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct wl1251 *wl = hw->priv;
mutex_lock(&wl->mutex);
wl1251_debug(DEBUG_MAC80211, "mac80211 remove interface");
wl->vif = NULL;
eth_zero_addr(wl->bssid);
mutex_unlock(&wl->mutex);
}
static int wl1251_build_null_data(struct wl1251 *wl)
{
struct sk_buff *skb = NULL;
int size;
void *ptr;
int ret = -ENOMEM;
if (wl->bss_type == BSS_TYPE_IBSS) {
size = sizeof(struct wl12xx_null_data_template);
ptr = NULL;
} else {
skb = ieee80211_nullfunc_get(wl->hw, wl->vif, -1, false);
if (!skb)
goto out;
size = skb->len;
ptr = skb->data;
}
ret = wl1251_cmd_template_set(wl, CMD_NULL_DATA, ptr, size);
out:
dev_kfree_skb(skb);
if (ret)
wl1251_warning("cmd build null data failed: %d", ret);
return ret;
}
static int wl1251_build_qos_null_data(struct wl1251 *wl)
{
struct ieee80211_qos_hdr template;
memset(&template, 0, sizeof(template));
memcpy(template.addr1, wl->bssid, ETH_ALEN);
memcpy(template.addr2, wl->mac_addr, ETH_ALEN);
memcpy(template.addr3, wl->bssid, ETH_ALEN);
template.frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA |
IEEE80211_STYPE_QOS_NULLFUNC |
IEEE80211_FCTL_TODS);
/* FIXME: not sure what priority to use here */
template.qos_ctrl = cpu_to_le16(0);
return wl1251_cmd_template_set(wl, CMD_QOS_NULL_DATA, &template,
sizeof(template));
}
static bool wl1251_can_do_pm(struct ieee80211_conf *conf, struct wl1251 *wl)
{
return (conf->flags & IEEE80211_CONF_PS) && !wl->monitor_present;
}
static int wl1251_op_config(struct ieee80211_hw *hw, u32 changed)
{
struct wl1251 *wl = hw->priv;
struct ieee80211_conf *conf = &hw->conf;
int channel, ret = 0;
channel = ieee80211_frequency_to_channel(
conf->chandef.chan->center_freq);
wl1251_debug(DEBUG_MAC80211,
"mac80211 config ch %d monitor %s psm %s power %d",
channel,
conf->flags & IEEE80211_CONF_MONITOR ? "on" : "off",
conf->flags & IEEE80211_CONF_PS ? "on" : "off",
conf->power_level);
mutex_lock(&wl->mutex);
ret = wl1251_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
u32 mode;
if (conf->flags & IEEE80211_CONF_MONITOR) {
wl->monitor_present = true;
mode = DF_SNIFF_MODE_ENABLE | DF_ENCRYPTION_DISABLE;
} else {
wl->monitor_present = false;
mode = 0;
}
ret = wl1251_acx_feature_cfg(wl, mode);
if (ret < 0)
goto out_sleep;
}
if (channel != wl->channel) {
wl->channel = channel;
/*
* Use ENABLE_RX command for channel switching when no
* interface is present (monitor mode only).
* This leaves the tx path disabled in firmware, whereas
* the usual JOIN command seems to transmit some frames
* at firmware level.
*/
if (wl->vif == NULL) {
wl->joined = false;
ret = wl1251_cmd_data_path_rx(wl, wl->channel, 1);
} else {
ret = wl1251_join(wl, wl->bss_type, wl->channel,
wl->beacon_int, wl->dtim_period);
}
if (ret < 0)
goto out_sleep;
}
if (wl1251_can_do_pm(conf, wl) && !wl->psm_requested) {
wl1251_debug(DEBUG_PSM, "psm enabled");
wl->psm_requested = true;
wl->dtim_period = conf->ps_dtim_period;
ret = wl1251_acx_wr_tbtt_and_dtim(wl, wl->beacon_int,
wl->dtim_period);
/*
* mac80211 enables PSM only if we're already associated.
*/
ret = wl1251_ps_set_mode(wl, STATION_POWER_SAVE_MODE);
if (ret < 0)
goto out_sleep;
} else if (!wl1251_can_do_pm(conf, wl) && wl->psm_requested) {
wl1251_debug(DEBUG_PSM, "psm disabled");
wl->psm_requested = false;
if (wl->station_mode != STATION_ACTIVE_MODE) {
ret = wl1251_ps_set_mode(wl, STATION_ACTIVE_MODE);
if (ret < 0)
goto out_sleep;
}
}
if (changed & IEEE80211_CONF_CHANGE_IDLE && !wl->scanning) {
if (conf->flags & IEEE80211_CONF_IDLE) {
ret = wl1251_ps_set_mode(wl, STATION_IDLE);
if (ret < 0)
goto out_sleep;
} else {
ret = wl1251_ps_set_mode(wl, STATION_ACTIVE_MODE);
if (ret < 0)
goto out_sleep;
ret = wl1251_join(wl, wl->bss_type, wl->channel,
wl->beacon_int, wl->dtim_period);
if (ret < 0)
goto out_sleep;
}
}
if (conf->power_level != wl->power_level) {
ret = wl1251_acx_tx_power(wl, conf->power_level);
if (ret < 0)
goto out_sleep;
wl->power_level = conf->power_level;
}
out_sleep:
wl1251_ps_elp_sleep(wl);
out:
mutex_unlock(&wl->mutex);
return ret;
}
struct wl1251_filter_params {
bool enabled;
int mc_list_length;
u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
};
static u64 wl1251_op_prepare_multicast(struct ieee80211_hw *hw,
struct netdev_hw_addr_list *mc_list)
{
struct wl1251_filter_params *fp;
struct netdev_hw_addr *ha;
struct wl1251 *wl = hw->priv;
if (unlikely(wl->state == WL1251_STATE_OFF))
return 0;
fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
if (!fp) {
wl1251_error("Out of memory setting filters.");
return 0;
}
/* update multicast filtering parameters */
fp->mc_list_length = 0;
if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) {
fp->enabled = false;
} else {
fp->enabled = true;
netdev_hw_addr_list_for_each(ha, mc_list) {
memcpy(fp->mc_list[fp->mc_list_length],
ha->addr, ETH_ALEN);
fp->mc_list_length++;
}
}
return (u64)(unsigned long)fp;
}
#define WL1251_SUPPORTED_FILTERS (FIF_ALLMULTI | \
FIF_FCSFAIL | \
FIF_BCN_PRBRESP_PROMISC | \
FIF_CONTROL | \
FIF_OTHER_BSS | \
FIF_PROBE_REQ)
static void wl1251_op_configure_filter(struct ieee80211_hw *hw,
unsigned int changed,
unsigned int *total, u64 multicast)
{
struct wl1251_filter_params *fp = (void *)(unsigned long)multicast;
struct wl1251 *wl = hw->priv;
int ret;
wl1251_debug(DEBUG_MAC80211, "mac80211 configure filter");
*total &= WL1251_SUPPORTED_FILTERS;
changed &= WL1251_SUPPORTED_FILTERS;
if (changed == 0) {
/* no filters which we support changed */
kfree(fp);
return;
}
mutex_lock(&wl->mutex);
wl->rx_config = WL1251_DEFAULT_RX_CONFIG;
wl->rx_filter = WL1251_DEFAULT_RX_FILTER;
if (*total & FIF_ALLMULTI)
/*
* CFG_MC_FILTER_EN in rx_config needs to be 0 to receive
* all multicast frames
*/
wl->rx_config &= ~CFG_MC_FILTER_EN;
if (*total & FIF_FCSFAIL)
wl->rx_filter |= CFG_RX_FCS_ERROR;
if (*total & FIF_BCN_PRBRESP_PROMISC) {
wl->rx_config &= ~CFG_BSSID_FILTER_EN;
wl->rx_config &= ~CFG_SSID_FILTER_EN;
}
if (*total & FIF_CONTROL)
wl->rx_filter |= CFG_RX_CTL_EN;
if (*total & FIF_OTHER_BSS || is_zero_ether_addr(wl->bssid))
wl->rx_config &= ~CFG_BSSID_FILTER_EN;
if (*total & FIF_PROBE_REQ)
wl->rx_filter |= CFG_RX_PREQ_EN;
if (wl->state == WL1251_STATE_OFF)
goto out;
ret = wl1251_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
if (*total & FIF_ALLMULTI)
ret = wl1251_acx_group_address_tbl(wl, false, NULL, 0);
else if (fp)
ret = wl1251_acx_group_address_tbl(wl, fp->enabled,
fp->mc_list,
fp->mc_list_length);
if (ret < 0)
goto out;
/* send filters to firmware */
wl1251_acx_rx_config(wl, wl->rx_config, wl->rx_filter);
wl1251_ps_elp_sleep(wl);
out:
mutex_unlock(&wl->mutex);
kfree(fp);
}
/* HW encryption */
static int wl1251_set_key_type(struct wl1251 *wl,
struct wl1251_cmd_set_keys *key,
enum set_key_cmd cmd,
struct ieee80211_key_conf *mac80211_key,
const u8 *addr)
{
switch (mac80211_key->cipher) {
case WLAN_CIPHER_SUITE_WEP40:
case WLAN_CIPHER_SUITE_WEP104:
if (is_broadcast_ether_addr(addr))
key->key_type = KEY_WEP_DEFAULT;
else
key->key_type = KEY_WEP_ADDR;
mac80211_key->hw_key_idx = mac80211_key->keyidx;
break;
case WLAN_CIPHER_SUITE_TKIP:
if (is_broadcast_ether_addr(addr))
key->key_type = KEY_TKIP_MIC_GROUP;
else
key->key_type = KEY_TKIP_MIC_PAIRWISE;
mac80211_key->hw_key_idx = mac80211_key->keyidx;
break;
case WLAN_CIPHER_SUITE_CCMP:
if (is_broadcast_ether_addr(addr))
key->key_type = KEY_AES_GROUP;
else
key->key_type = KEY_AES_PAIRWISE;
mac80211_key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
break;
default:
wl1251_error("Unknown key cipher 0x%x", mac80211_key->cipher);
return -EOPNOTSUPP;
}
return 0;
}
static int wl1251_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct ieee80211_key_conf *key)
{
struct wl1251 *wl = hw->priv;
struct wl1251_cmd_set_keys *wl_cmd;
const u8 *addr;
int ret;
static const u8 bcast_addr[ETH_ALEN] =
{ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff };
wl1251_debug(DEBUG_MAC80211, "mac80211 set key");
wl_cmd = kzalloc(sizeof(*wl_cmd), GFP_KERNEL);
if (!wl_cmd) {
ret = -ENOMEM;
goto out;
}
addr = sta ? sta->addr : bcast_addr;
wl1251_debug(DEBUG_CRYPT, "CMD: 0x%x", cmd);
wl1251_dump(DEBUG_CRYPT, "ADDR: ", addr, ETH_ALEN);
wl1251_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x",
key->cipher, key->keyidx, key->keylen, key->flags);
wl1251_dump(DEBUG_CRYPT, "KEY: ", key->key, key->keylen);
if (is_zero_ether_addr(addr)) {
/* We dont support TX only encryption */
ret = -EOPNOTSUPP;
goto out;
}
mutex_lock(&wl->mutex);
switch (cmd) {
case SET_KEY:
if (wl->monitor_present) {
ret = -EOPNOTSUPP;
goto out_unlock;
}
wl_cmd->key_action = KEY_ADD_OR_REPLACE;
break;
case DISABLE_KEY:
wl_cmd->key_action = KEY_REMOVE;
break;
default:
wl1251_error("Unsupported key cmd 0x%x", cmd);
break;
}
ret = wl1251_ps_elp_wakeup(wl);
if (ret < 0)
goto out_unlock;
ret = wl1251_set_key_type(wl, wl_cmd, cmd, key, addr);
if (ret < 0) {
wl1251_error("Set KEY type failed");
goto out_sleep;
}
if (wl_cmd->key_type != KEY_WEP_DEFAULT)
memcpy(wl_cmd->addr, addr, ETH_ALEN);
if ((wl_cmd->key_type == KEY_TKIP_MIC_GROUP) ||
(wl_cmd->key_type == KEY_TKIP_MIC_PAIRWISE)) {
/*
* We get the key in the following form:
* TKIP (16 bytes) - TX MIC (8 bytes) - RX MIC (8 bytes)
* but the target is expecting:
* TKIP - RX MIC - TX MIC
*/
memcpy(wl_cmd->key, key->key, 16);
memcpy(wl_cmd->key + 16, key->key + 24, 8);
memcpy(wl_cmd->key + 24, key->key + 16, 8);
} else {
memcpy(wl_cmd->key, key->key, key->keylen);
}
wl_cmd->key_size = key->keylen;
wl_cmd->id = key->keyidx;
wl_cmd->ssid_profile = 0;
wl1251_dump(DEBUG_CRYPT, "TARGET KEY: ", wl_cmd, sizeof(*wl_cmd));
ret = wl1251_cmd_send(wl, CMD_SET_KEYS, wl_cmd, sizeof(*wl_cmd));
if (ret < 0) {
wl1251_warning("could not set keys");
goto out_sleep;
}
out_sleep:
wl1251_ps_elp_sleep(wl);
out_unlock:
mutex_unlock(&wl->mutex);
out:
kfree(wl_cmd);
return ret;
}
static int wl1251_op_hw_scan(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_scan_request *hw_req)
{
struct cfg80211_scan_request *req = &hw_req->req;
struct wl1251 *wl = hw->priv;
struct sk_buff *skb;
size_t ssid_len = 0;
u8 *ssid = NULL;
int ret;
wl1251_debug(DEBUG_MAC80211, "mac80211 hw scan");
if (req->n_ssids) {
ssid = req->ssids[0].ssid;
ssid_len = req->ssids[0].ssid_len;
}
mutex_lock(&wl->mutex);
if (wl->scanning) {
wl1251_debug(DEBUG_SCAN, "scan already in progress");
ret = -EINVAL;
goto out;
}
ret = wl1251_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
if (hw->conf.flags & IEEE80211_CONF_IDLE) {
ret = wl1251_ps_set_mode(wl, STATION_ACTIVE_MODE);
if (ret < 0)
goto out_sleep;
ret = wl1251_join(wl, wl->bss_type, wl->channel,
wl->beacon_int, wl->dtim_period);
if (ret < 0)
goto out_sleep;
}
skb = ieee80211_probereq_get(wl->hw, wl->vif->addr, ssid, ssid_len,
req->ie_len);
if (!skb) {
ret = -ENOMEM;
goto out_idle;
}
if (req->ie_len)
skb_put_data(skb, req->ie, req->ie_len);
ret = wl1251_cmd_template_set(wl, CMD_PROBE_REQ, skb->data,
skb->len);
dev_kfree_skb(skb);
if (ret < 0)
goto out_idle;
ret = wl1251_cmd_trigger_scan_to(wl, 0);
if (ret < 0)
goto out_idle;
wl->scanning = true;
ret = wl1251_cmd_scan(wl, ssid, ssid_len, req->channels,
req->n_channels, WL1251_SCAN_NUM_PROBES);
if (ret < 0) {
wl1251_debug(DEBUG_SCAN, "scan failed %d", ret);
wl->scanning = false;
goto out_idle;
}
goto out_sleep;
out_idle:
if (hw->conf.flags & IEEE80211_CONF_IDLE)
ret = wl1251_ps_set_mode(wl, STATION_IDLE);
out_sleep:
wl1251_ps_elp_sleep(wl);
out:
mutex_unlock(&wl->mutex);
return ret;
}
static int wl1251_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
{
struct wl1251 *wl = hw->priv;
int ret;
mutex_lock(&wl->mutex);
ret = wl1251_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
ret = wl1251_acx_rts_threshold(wl, (u16) value);
if (ret < 0)
wl1251_warning("wl1251_op_set_rts_threshold failed: %d", ret);
wl1251_ps_elp_sleep(wl);
out:
mutex_unlock(&wl->mutex);
return ret;
}
static void wl1251_op_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
u64 changed)
{
struct wl1251 *wl = hw->priv;
struct sk_buff *beacon, *skb;
bool enable;
int ret;
wl1251_debug(DEBUG_MAC80211, "mac80211 bss info changed");
mutex_lock(&wl->mutex);
ret = wl1251_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
if (changed & BSS_CHANGED_CQM) {
ret = wl1251_acx_low_rssi(wl, bss_conf->cqm_rssi_thold,
WL1251_DEFAULT_LOW_RSSI_WEIGHT,
WL1251_DEFAULT_LOW_RSSI_DEPTH,
WL1251_ACX_LOW_RSSI_TYPE_EDGE);
if (ret < 0)
goto out;
wl->rssi_thold = bss_conf->cqm_rssi_thold;
}
if ((changed & BSS_CHANGED_BSSID) &&
memcmp(wl->bssid, bss_conf->bssid, ETH_ALEN)) {
memcpy(wl->bssid, bss_conf->bssid, ETH_ALEN);
if (!is_zero_ether_addr(wl->bssid)) {
ret = wl1251_build_null_data(wl);
if (ret < 0)
goto out_sleep;
ret = wl1251_build_qos_null_data(wl);
if (ret < 0)
goto out_sleep;
ret = wl1251_join(wl, wl->bss_type, wl->channel,
wl->beacon_int, wl->dtim_period);
if (ret < 0)
goto out_sleep;
}
}
if (changed & BSS_CHANGED_ASSOC) {
if (vif->cfg.assoc) {
wl->beacon_int = bss_conf->beacon_int;
skb = ieee80211_pspoll_get(wl->hw, wl->vif);
if (!skb)
goto out_sleep;
ret = wl1251_cmd_template_set(wl, CMD_PS_POLL,
skb->data,
skb->len);
dev_kfree_skb(skb);
if (ret < 0)
goto out_sleep;
ret = wl1251_acx_aid(wl, vif->cfg.aid);
if (ret < 0)
goto out_sleep;
} else {
/* use defaults when not associated */
wl->beacon_int = WL1251_DEFAULT_BEACON_INT;
wl->dtim_period = WL1251_DEFAULT_DTIM_PERIOD;
}
}
if (changed & BSS_CHANGED_ERP_SLOT) {
if (bss_conf->use_short_slot)
ret = wl1251_acx_slot(wl, SLOT_TIME_SHORT);
else
ret = wl1251_acx_slot(wl, SLOT_TIME_LONG);
if (ret < 0) {
wl1251_warning("Set slot time failed %d", ret);
goto out_sleep;
}
}
if (changed & BSS_CHANGED_ERP_PREAMBLE) {
if (bss_conf->use_short_preamble)
wl1251_acx_set_preamble(wl, ACX_PREAMBLE_SHORT);
else
wl1251_acx_set_preamble(wl, ACX_PREAMBLE_LONG);
}
if (changed & BSS_CHANGED_ERP_CTS_PROT) {
if (bss_conf->use_cts_prot)
ret = wl1251_acx_cts_protect(wl, CTSPROTECT_ENABLE);
else
ret = wl1251_acx_cts_protect(wl, CTSPROTECT_DISABLE);
if (ret < 0) {
wl1251_warning("Set ctsprotect failed %d", ret);
goto out_sleep;
}
}
if (changed & BSS_CHANGED_ARP_FILTER) {
__be32 addr = vif->cfg.arp_addr_list[0];
WARN_ON(wl->bss_type != BSS_TYPE_STA_BSS);
enable = vif->cfg.arp_addr_cnt == 1 && vif->cfg.assoc;
ret = wl1251_acx_arp_ip_filter(wl, enable, addr);
if (ret < 0)
goto out_sleep;
}
if (changed & BSS_CHANGED_BEACON) {
beacon = ieee80211_beacon_get(hw, vif, 0);
if (!beacon)
goto out_sleep;
ret = wl1251_cmd_template_set(wl, CMD_BEACON, beacon->data,
beacon->len);
if (ret < 0) {
dev_kfree_skb(beacon);
goto out_sleep;
}
ret = wl1251_cmd_template_set(wl, CMD_PROBE_RESP, beacon->data,
beacon->len);
dev_kfree_skb(beacon);
if (ret < 0)
goto out_sleep;
ret = wl1251_join(wl, wl->bss_type, wl->channel,
wl->beacon_int, wl->dtim_period);
if (ret < 0)
goto out_sleep;
}
out_sleep:
wl1251_ps_elp_sleep(wl);
out:
mutex_unlock(&wl->mutex);
}
/* can't be const, mac80211 writes to this */
static struct ieee80211_rate wl1251_rates[] = {
{ .bitrate = 10,
.hw_value = 0x1,
.hw_value_short = 0x1, },
{ .bitrate = 20,
.hw_value = 0x2,
.hw_value_short = 0x2,
.flags = IEEE80211_RATE_SHORT_PREAMBLE },
{ .bitrate = 55,
.hw_value = 0x4,
.hw_value_short = 0x4,
.flags = IEEE80211_RATE_SHORT_PREAMBLE },
{ .bitrate = 110,
.hw_value = 0x20,
.hw_value_short = 0x20,
.flags = IEEE80211_RATE_SHORT_PREAMBLE },
{ .bitrate = 60,
.hw_value = 0x8,
.hw_value_short = 0x8, },
{ .bitrate = 90,
.hw_value = 0x10,
.hw_value_short = 0x10, },
{ .bitrate = 120,
.hw_value = 0x40,
.hw_value_short = 0x40, },
{ .bitrate = 180,
.hw_value = 0x80,
.hw_value_short = 0x80, },
{ .bitrate = 240,
.hw_value = 0x200,
.hw_value_short = 0x200, },
{ .bitrate = 360,
.hw_value = 0x400,
.hw_value_short = 0x400, },
{ .bitrate = 480,
.hw_value = 0x800,
.hw_value_short = 0x800, },
{ .bitrate = 540,
.hw_value = 0x1000,
.hw_value_short = 0x1000, },
};
/* can't be const, mac80211 writes to this */
static struct ieee80211_channel wl1251_channels[] = {
{ .hw_value = 1, .center_freq = 2412},
{ .hw_value = 2, .center_freq = 2417},
{ .hw_value = 3, .center_freq = 2422},
{ .hw_value = 4, .center_freq = 2427},
{ .hw_value = 5, .center_freq = 2432},
{ .hw_value = 6, .center_freq = 2437},
{ .hw_value = 7, .center_freq = 2442},
{ .hw_value = 8, .center_freq = 2447},
{ .hw_value = 9, .center_freq = 2452},
{ .hw_value = 10, .center_freq = 2457},
{ .hw_value = 11, .center_freq = 2462},
{ .hw_value = 12, .center_freq = 2467},
{ .hw_value = 13, .center_freq = 2472},
};
static int wl1251_op_conf_tx(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
unsigned int link_id, u16 queue,
const struct ieee80211_tx_queue_params *params)
{
enum wl1251_acx_ps_scheme ps_scheme;
struct wl1251 *wl = hw->priv;
int ret;
mutex_lock(&wl->mutex);
wl1251_debug(DEBUG_MAC80211, "mac80211 conf tx %d", queue);
ret = wl1251_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
/* mac80211 uses units of 32 usec */
ret = wl1251_acx_ac_cfg(wl, wl1251_tx_get_queue(queue),
params->cw_min, params->cw_max,
params->aifs, params->txop * 32);
if (ret < 0)
goto out_sleep;
if (params->uapsd)
ps_scheme = WL1251_ACX_PS_SCHEME_UPSD_TRIGGER;
else
ps_scheme = WL1251_ACX_PS_SCHEME_LEGACY;
ret = wl1251_acx_tid_cfg(wl, wl1251_tx_get_queue(queue),
CHANNEL_TYPE_EDCF,
wl1251_tx_get_queue(queue), ps_scheme,
WL1251_ACX_ACK_POLICY_LEGACY);
if (ret < 0)
goto out_sleep;
out_sleep:
wl1251_ps_elp_sleep(wl);
out:
mutex_unlock(&wl->mutex);
return ret;
}
static int wl1251_op_get_survey(struct ieee80211_hw *hw, int idx,
struct survey_info *survey)
{
struct wl1251 *wl = hw->priv;
struct ieee80211_conf *conf = &hw->conf;
if (idx != 0)
return -ENOENT;
survey->channel = conf->chandef.chan;
survey->filled = SURVEY_INFO_NOISE_DBM;
survey->noise = wl->noise;
return 0;
}
/* can't be const, mac80211 writes to this */
static struct ieee80211_supported_band wl1251_band_2ghz = {
.channels = wl1251_channels,
.n_channels = ARRAY_SIZE(wl1251_channels),
.bitrates = wl1251_rates,
.n_bitrates = ARRAY_SIZE(wl1251_rates),
};
static const struct ieee80211_ops wl1251_ops = {
.start = wl1251_op_start,
.stop = wl1251_op_stop,
.add_interface = wl1251_op_add_interface,
.remove_interface = wl1251_op_remove_interface,
.config = wl1251_op_config,
.prepare_multicast = wl1251_op_prepare_multicast,
.configure_filter = wl1251_op_configure_filter,
.tx = wl1251_op_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.set_key = wl1251_op_set_key,
.hw_scan = wl1251_op_hw_scan,
.bss_info_changed = wl1251_op_bss_info_changed,
.set_rts_threshold = wl1251_op_set_rts_threshold,
.conf_tx = wl1251_op_conf_tx,
.get_survey = wl1251_op_get_survey,
};
static int wl1251_read_eeprom_byte(struct wl1251 *wl, off_t offset, u8 *data)
{
unsigned long timeout;
wl1251_reg_write32(wl, EE_ADDR, offset);
wl1251_reg_write32(wl, EE_CTL, EE_CTL_READ);
/* EE_CTL_READ clears when data is ready */
timeout = jiffies + msecs_to_jiffies(100);
while (1) {
if (!(wl1251_reg_read32(wl, EE_CTL) & EE_CTL_READ))
break;
if (time_after(jiffies, timeout))
return -ETIMEDOUT;
msleep(1);
}
*data = wl1251_reg_read32(wl, EE_DATA);
return 0;
}
static int wl1251_read_eeprom(struct wl1251 *wl, off_t offset,
u8 *data, size_t len)
{
size_t i;
int ret;
wl1251_reg_write32(wl, EE_START, 0);
for (i = 0; i < len; i++) {
ret = wl1251_read_eeprom_byte(wl, offset + i, &data[i]);
if (ret < 0)
return ret;
}
return 0;
}
static int wl1251_read_eeprom_mac(struct wl1251 *wl)
{
u8 mac[ETH_ALEN];
int i, ret;
wl1251_set_partition(wl, 0, 0, REGISTERS_BASE, REGISTERS_DOWN_SIZE);
ret = wl1251_read_eeprom(wl, 0x1c, mac, sizeof(mac));
if (ret < 0) {
wl1251_warning("failed to read MAC address from EEPROM");
return ret;
}
/* MAC is stored in reverse order */
for (i = 0; i < ETH_ALEN; i++)
wl->mac_addr[i] = mac[ETH_ALEN - i - 1];
return 0;
}
#define NVS_OFF_MAC_LEN 0x19
#define NVS_OFF_MAC_ADDR_LO 0x1a
#define NVS_OFF_MAC_ADDR_HI 0x1b
#define NVS_OFF_MAC_DATA 0x1c
static int wl1251_check_nvs_mac(struct wl1251 *wl)
{
if (wl->nvs_len < 0x24)
return -ENODATA;
/* length is 2 and data address is 0x546c (ANDed with 0xfffe) */
if (wl->nvs[NVS_OFF_MAC_LEN] != 2 ||
wl->nvs[NVS_OFF_MAC_ADDR_LO] != 0x6d ||
wl->nvs[NVS_OFF_MAC_ADDR_HI] != 0x54)
return -EINVAL;
return 0;
}
static int wl1251_read_nvs_mac(struct wl1251 *wl)
{
u8 mac[ETH_ALEN];
int i, ret;
ret = wl1251_check_nvs_mac(wl);
if (ret)
return ret;
/* MAC is stored in reverse order */
for (i = 0; i < ETH_ALEN; i++)
mac[i] = wl->nvs[NVS_OFF_MAC_DATA + ETH_ALEN - i - 1];
/* 00:00:20:07:03:09 is in example file wl1251-nvs.bin, so invalid */
if (ether_addr_equal_unaligned(mac, "\x00\x00\x20\x07\x03\x09"))
return -EINVAL;
memcpy(wl->mac_addr, mac, ETH_ALEN);
return 0;
}
static int wl1251_write_nvs_mac(struct wl1251 *wl)
{
int i, ret;
ret = wl1251_check_nvs_mac(wl);
if (ret)
return ret;
/* MAC is stored in reverse order */
for (i = 0; i < ETH_ALEN; i++)
wl->nvs[NVS_OFF_MAC_DATA + i] = wl->mac_addr[ETH_ALEN - i - 1];
return 0;
}
static int wl1251_register_hw(struct wl1251 *wl)
{
int ret;
if (wl->mac80211_registered)
return 0;
SET_IEEE80211_PERM_ADDR(wl->hw, wl->mac_addr);
ret = ieee80211_register_hw(wl->hw);
if (ret < 0) {
wl1251_error("unable to register mac80211 hw: %d", ret);
return ret;
}
wl->mac80211_registered = true;
wl1251_notice("loaded");
return 0;
}
int wl1251_init_ieee80211(struct wl1251 *wl)
{
int ret;
/* The tx descriptor buffer and the TKIP space */
wl->hw->extra_tx_headroom = sizeof(struct tx_double_buffer_desc)
+ WL1251_TKIP_IV_SPACE;
/* unit us */
/* FIXME: find a proper value */
ieee80211_hw_set(wl->hw, SIGNAL_DBM);
ieee80211_hw_set(wl->hw, SUPPORTS_PS);
wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_ADHOC);
wl->hw->wiphy->max_scan_ssids = 1;
/* We set max_scan_ie_len to a random value to make wpa_supplicant scans not
* fail, as the driver will the ignore the extra passed IEs anyway
*/
wl->hw->wiphy->max_scan_ie_len = 512;
wl->hw->wiphy->bands[NL80211_BAND_2GHZ] = &wl1251_band_2ghz;
wl->hw->queues = 4;
if (wl->nvs == NULL && !wl->use_eeprom) {
ret = wl1251_fetch_nvs(wl);
if (ret < 0)
goto out;
}
if (wl->use_eeprom)
ret = wl1251_read_eeprom_mac(wl);
else
ret = wl1251_read_nvs_mac(wl);
if (ret == 0 && !is_valid_ether_addr(wl->mac_addr))
ret = -EINVAL;
if (ret < 0) {
/*
* In case our MAC address is not correctly set,
* we use a random but Nokia MAC.
*/
static const u8 nokia_oui[3] = {0x00, 0x1f, 0xdf};
memcpy(wl->mac_addr, nokia_oui, 3);
get_random_bytes(wl->mac_addr + 3, 3);
if (!wl->use_eeprom)
wl1251_write_nvs_mac(wl);
wl1251_warning("MAC address in eeprom or nvs data is not valid");
wl1251_warning("Setting random MAC address: %pM", wl->mac_addr);
}
ret = wl1251_register_hw(wl);
if (ret)
goto out;
wl1251_debugfs_init(wl);
wl1251_notice("initialized");
ret = 0;
out:
return ret;
}
EXPORT_SYMBOL_GPL(wl1251_init_ieee80211);
struct ieee80211_hw *wl1251_alloc_hw(void)
{
struct ieee80211_hw *hw;
struct wl1251 *wl;
int i;
hw = ieee80211_alloc_hw(sizeof(*wl), &wl1251_ops);
if (!hw) {
wl1251_error("could not alloc ieee80211_hw");
return ERR_PTR(-ENOMEM);
}
wl = hw->priv;
memset(wl, 0, sizeof(*wl));
wl->hw = hw;
wl->data_in_count = 0;
skb_queue_head_init(&wl->tx_queue);
INIT_DELAYED_WORK(&wl->elp_work, wl1251_elp_work);
wl->channel = WL1251_DEFAULT_CHANNEL;
wl->monitor_present = false;
wl->joined = false;
wl->scanning = false;
wl->bss_type = MAX_BSS_TYPE;
wl->default_key = 0;
wl->listen_int = 1;
wl->rx_counter = 0;
wl->rx_handled = 0;
wl->rx_current_buffer = 0;
wl->rx_last_id = 0;
wl->rx_config = WL1251_DEFAULT_RX_CONFIG;
wl->rx_filter = WL1251_DEFAULT_RX_FILTER;
wl->elp = false;
wl->station_mode = STATION_ACTIVE_MODE;
wl->psm_requested = false;
wl->psm_entry_retry = 0;
wl->tx_queue_stopped = false;
wl->power_level = WL1251_DEFAULT_POWER_LEVEL;
wl->rssi_thold = 0;
wl->beacon_int = WL1251_DEFAULT_BEACON_INT;
wl->dtim_period = WL1251_DEFAULT_DTIM_PERIOD;
wl->vif = NULL;
for (i = 0; i < FW_TX_CMPLT_BLOCK_SIZE; i++)
wl->tx_frames[i] = NULL;
wl->next_tx_complete = 0;
INIT_WORK(&wl->irq_work, wl1251_irq_work);
INIT_WORK(&wl->tx_work, wl1251_tx_work);
wl->state = WL1251_STATE_OFF;
mutex_init(&wl->mutex);
spin_lock_init(&wl->wl_lock);
wl->tx_mgmt_frm_rate = DEFAULT_HW_GEN_TX_RATE;
wl->tx_mgmt_frm_mod = DEFAULT_HW_GEN_MODULATION_TYPE;
wl->rx_descriptor = kmalloc(sizeof(*wl->rx_descriptor), GFP_KERNEL);
if (!wl->rx_descriptor) {
wl1251_error("could not allocate memory for rx descriptor");
ieee80211_free_hw(hw);
return ERR_PTR(-ENOMEM);
}
return hw;
}
EXPORT_SYMBOL_GPL(wl1251_alloc_hw);
int wl1251_free_hw(struct wl1251 *wl)
{
ieee80211_unregister_hw(wl->hw);
wl1251_debugfs_exit(wl);
kfree(wl->target_mem_map);
kfree(wl->data_path);
vfree(wl->fw);
wl->fw = NULL;
kfree(wl->nvs);
wl->nvs = NULL;
kfree(wl->rx_descriptor);
wl->rx_descriptor = NULL;
ieee80211_free_hw(wl->hw);
return 0;
}
EXPORT_SYMBOL_GPL(wl1251_free_hw);
MODULE_DESCRIPTION("TI wl1251 Wireless LAN Driver Core");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Kalle Valo <[email protected]>");
MODULE_FIRMWARE(WL1251_FW_NAME);
MODULE_FIRMWARE(WL1251_NVS_NAME);
|
linux-master
|
drivers/net/wireless/ti/wl1251/main.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* This file is part of wl1251
*
* Copyright (c) 1998-2007 Texas Instruments Incorporated
* Copyright (C) 2008 Nokia Corporation
*/
#include "wl1251.h"
#include "reg.h"
#include "io.h"
#include "event.h"
#include "ps.h"
static int wl1251_event_scan_complete(struct wl1251 *wl,
struct event_mailbox *mbox)
{
int ret = 0;
wl1251_debug(DEBUG_EVENT, "status: 0x%x, channels: %d",
mbox->scheduled_scan_status,
mbox->scheduled_scan_channels);
if (wl->scanning) {
struct cfg80211_scan_info info = {
.aborted = false,
};
ieee80211_scan_completed(wl->hw, &info);
wl1251_debug(DEBUG_MAC80211, "mac80211 hw scan completed");
wl->scanning = false;
if (wl->hw->conf.flags & IEEE80211_CONF_IDLE)
ret = wl1251_ps_set_mode(wl, STATION_IDLE);
}
return ret;
}
#define WL1251_PSM_ENTRY_RETRIES 3
static int wl1251_event_ps_report(struct wl1251 *wl,
struct event_mailbox *mbox)
{
int ret = 0;
wl1251_debug(DEBUG_EVENT, "ps status: %x", mbox->ps_status);
switch (mbox->ps_status) {
case EVENT_ENTER_POWER_SAVE_FAIL:
wl1251_debug(DEBUG_PSM, "PSM entry failed");
if (wl->station_mode != STATION_POWER_SAVE_MODE) {
/* remain in active mode */
wl->psm_entry_retry = 0;
break;
}
if (wl->psm_entry_retry < WL1251_PSM_ENTRY_RETRIES) {
wl->psm_entry_retry++;
ret = wl1251_ps_set_mode(wl, STATION_POWER_SAVE_MODE);
} else {
wl1251_error("Power save entry failed, giving up");
wl->psm_entry_retry = 0;
}
break;
case EVENT_ENTER_POWER_SAVE_SUCCESS:
case EVENT_EXIT_POWER_SAVE_FAIL:
case EVENT_EXIT_POWER_SAVE_SUCCESS:
default:
wl->psm_entry_retry = 0;
break;
}
return ret;
}
static void wl1251_event_mbox_dump(struct event_mailbox *mbox)
{
wl1251_debug(DEBUG_EVENT, "MBOX DUMP:");
wl1251_debug(DEBUG_EVENT, "\tvector: 0x%x", mbox->events_vector);
wl1251_debug(DEBUG_EVENT, "\tmask: 0x%x", mbox->events_mask);
}
static int wl1251_event_process(struct wl1251 *wl, struct event_mailbox *mbox)
{
int ret;
u32 vector;
wl1251_event_mbox_dump(mbox);
vector = mbox->events_vector & ~(mbox->events_mask);
wl1251_debug(DEBUG_EVENT, "vector: 0x%x", vector);
if (vector & SCAN_COMPLETE_EVENT_ID) {
ret = wl1251_event_scan_complete(wl, mbox);
if (ret < 0)
return ret;
}
if (vector & BSS_LOSE_EVENT_ID) {
wl1251_debug(DEBUG_EVENT, "BSS_LOSE_EVENT");
if (wl->psm_requested &&
wl->station_mode != STATION_ACTIVE_MODE) {
ret = wl1251_ps_set_mode(wl, STATION_ACTIVE_MODE);
if (ret < 0)
return ret;
}
}
if (vector & PS_REPORT_EVENT_ID) {
wl1251_debug(DEBUG_EVENT, "PS_REPORT_EVENT");
ret = wl1251_event_ps_report(wl, mbox);
if (ret < 0)
return ret;
}
if (vector & SYNCHRONIZATION_TIMEOUT_EVENT_ID) {
wl1251_debug(DEBUG_EVENT, "SYNCHRONIZATION_TIMEOUT_EVENT");
/* indicate to the stack, that beacons have been lost */
if (wl->vif && wl->vif->type == NL80211_IFTYPE_STATION)
ieee80211_beacon_loss(wl->vif);
}
if (vector & REGAINED_BSS_EVENT_ID) {
if (wl->psm_requested) {
ret = wl1251_ps_set_mode(wl, STATION_POWER_SAVE_MODE);
if (ret < 0)
return ret;
}
}
if (wl->vif && wl->rssi_thold) {
if (vector & ROAMING_TRIGGER_LOW_RSSI_EVENT_ID) {
wl1251_debug(DEBUG_EVENT,
"ROAMING_TRIGGER_LOW_RSSI_EVENT");
ieee80211_cqm_rssi_notify(wl->vif,
NL80211_CQM_RSSI_THRESHOLD_EVENT_LOW,
0, GFP_KERNEL);
}
if (vector & ROAMING_TRIGGER_REGAINED_RSSI_EVENT_ID) {
wl1251_debug(DEBUG_EVENT,
"ROAMING_TRIGGER_REGAINED_RSSI_EVENT");
ieee80211_cqm_rssi_notify(wl->vif,
NL80211_CQM_RSSI_THRESHOLD_EVENT_HIGH,
0, GFP_KERNEL);
}
}
return 0;
}
/*
* Poll the mailbox event field until any of the bits in the mask is set or a
* timeout occurs (WL1251_EVENT_TIMEOUT in msecs)
*/
int wl1251_event_wait(struct wl1251 *wl, u32 mask, int timeout_ms)
{
u32 events_vector, event;
unsigned long timeout;
timeout = jiffies + msecs_to_jiffies(timeout_ms);
do {
if (time_after(jiffies, timeout))
return -ETIMEDOUT;
msleep(1);
/* read from both event fields */
events_vector = wl1251_mem_read32(wl, wl->mbox_ptr[0]);
event = events_vector & mask;
events_vector = wl1251_mem_read32(wl, wl->mbox_ptr[1]);
event |= events_vector & mask;
} while (!event);
return 0;
}
int wl1251_event_unmask(struct wl1251 *wl)
{
int ret;
ret = wl1251_acx_event_mbox_mask(wl, ~(wl->event_mask));
if (ret < 0)
return ret;
return 0;
}
void wl1251_event_mbox_config(struct wl1251 *wl)
{
wl->mbox_ptr[0] = wl1251_reg_read32(wl, REG_EVENT_MAILBOX_PTR);
wl->mbox_ptr[1] = wl->mbox_ptr[0] + sizeof(struct event_mailbox);
wl1251_debug(DEBUG_EVENT, "MBOX ptrs: 0x%x 0x%x",
wl->mbox_ptr[0], wl->mbox_ptr[1]);
}
int wl1251_event_handle(struct wl1251 *wl, u8 mbox_num)
{
struct event_mailbox *mbox;
int ret;
wl1251_debug(DEBUG_EVENT, "EVENT on mbox %d", mbox_num);
if (mbox_num > 1)
return -EINVAL;
mbox = kmalloc(sizeof(*mbox), GFP_KERNEL);
if (!mbox) {
wl1251_error("can not allocate mbox buffer");
return -ENOMEM;
}
/* first we read the mbox descriptor */
wl1251_mem_read(wl, wl->mbox_ptr[mbox_num], mbox,
sizeof(*mbox));
/* process the descriptor */
ret = wl1251_event_process(wl, mbox);
kfree(mbox);
if (ret < 0)
return ret;
/* then we let the firmware know it can go on...*/
wl1251_reg_write32(wl, ACX_REG_INTERRUPT_TRIG, INTR_TRIG_EVENT_ACK);
return 0;
}
|
linux-master
|
drivers/net/wireless/ti/wl1251/event.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* This file is part of wl1251
*
* Copyright (c) 1998-2007 Texas Instruments Incorporated
* Copyright (C) 2008 Nokia Corporation
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include "wl1251.h"
#include "reg.h"
#include "tx.h"
#include "ps.h"
#include "io.h"
#include "event.h"
static bool wl1251_tx_double_buffer_busy(struct wl1251 *wl, u32 data_out_count)
{
int used, data_in_count;
data_in_count = wl->data_in_count;
if (data_in_count < data_out_count)
/* data_in_count has wrapped */
data_in_count += TX_STATUS_DATA_OUT_COUNT_MASK + 1;
used = data_in_count - data_out_count;
WARN_ON(used < 0);
WARN_ON(used > DP_TX_PACKET_RING_CHUNK_NUM);
if (used >= DP_TX_PACKET_RING_CHUNK_NUM)
return true;
else
return false;
}
static int wl1251_tx_path_status(struct wl1251 *wl)
{
u32 status, addr, data_out_count;
bool busy;
addr = wl->data_path->tx_control_addr;
status = wl1251_mem_read32(wl, addr);
data_out_count = status & TX_STATUS_DATA_OUT_COUNT_MASK;
busy = wl1251_tx_double_buffer_busy(wl, data_out_count);
if (busy)
return -EBUSY;
return 0;
}
static int wl1251_tx_id(struct wl1251 *wl, struct sk_buff *skb)
{
int i;
for (i = 0; i < FW_TX_CMPLT_BLOCK_SIZE; i++)
if (wl->tx_frames[i] == NULL) {
wl->tx_frames[i] = skb;
return i;
}
return -EBUSY;
}
static void wl1251_tx_control(struct tx_double_buffer_desc *tx_hdr,
struct ieee80211_tx_info *control, u16 fc)
{
*(u16 *)&tx_hdr->control = 0;
tx_hdr->control.rate_policy = 0;
/* 802.11 packets */
tx_hdr->control.packet_type = 0;
/* Also disable retry and ACK policy for injected packets */
if ((control->flags & IEEE80211_TX_CTL_NO_ACK) ||
(control->flags & IEEE80211_TX_CTL_INJECTED)) {
tx_hdr->control.rate_policy = 1;
tx_hdr->control.ack_policy = 1;
}
tx_hdr->control.tx_complete = 1;
if ((fc & IEEE80211_FTYPE_DATA) &&
((fc & IEEE80211_STYPE_QOS_DATA) ||
(fc & IEEE80211_STYPE_QOS_NULLFUNC)))
tx_hdr->control.qos = 1;
}
/* RSN + MIC = 8 + 8 = 16 bytes (worst case - AES). */
#define MAX_MSDU_SECURITY_LENGTH 16
#define MAX_MPDU_SECURITY_LENGTH 16
#define WLAN_QOS_HDR_LEN 26
#define MAX_MPDU_HEADER_AND_SECURITY (MAX_MPDU_SECURITY_LENGTH + \
WLAN_QOS_HDR_LEN)
#define HW_BLOCK_SIZE 252
static void wl1251_tx_frag_block_num(struct tx_double_buffer_desc *tx_hdr)
{
u16 payload_len, frag_threshold, mem_blocks;
u16 num_mpdus, mem_blocks_per_frag;
frag_threshold = IEEE80211_MAX_FRAG_THRESHOLD;
tx_hdr->frag_threshold = cpu_to_le16(frag_threshold);
payload_len = le16_to_cpu(tx_hdr->length) + MAX_MSDU_SECURITY_LENGTH;
if (payload_len > frag_threshold) {
mem_blocks_per_frag =
((frag_threshold + MAX_MPDU_HEADER_AND_SECURITY) /
HW_BLOCK_SIZE) + 1;
num_mpdus = payload_len / frag_threshold;
mem_blocks = num_mpdus * mem_blocks_per_frag;
payload_len -= num_mpdus * frag_threshold;
num_mpdus++;
} else {
mem_blocks_per_frag = 0;
mem_blocks = 0;
num_mpdus = 1;
}
mem_blocks += (payload_len / HW_BLOCK_SIZE) + 1;
if (num_mpdus > 1)
mem_blocks += min(num_mpdus, mem_blocks_per_frag);
tx_hdr->num_mem_blocks = mem_blocks;
}
static int wl1251_tx_fill_hdr(struct wl1251 *wl, struct sk_buff *skb,
struct ieee80211_tx_info *control)
{
struct tx_double_buffer_desc *tx_hdr;
struct ieee80211_rate *rate;
int id;
u16 fc;
if (!skb)
return -EINVAL;
id = wl1251_tx_id(wl, skb);
if (id < 0)
return id;
fc = *(u16 *)skb->data;
tx_hdr = skb_push(skb, sizeof(*tx_hdr));
tx_hdr->length = cpu_to_le16(skb->len - sizeof(*tx_hdr));
rate = ieee80211_get_tx_rate(wl->hw, control);
tx_hdr->rate = cpu_to_le16(rate->hw_value);
tx_hdr->expiry_time = cpu_to_le32(1 << 16);
tx_hdr->id = id;
tx_hdr->xmit_queue = wl1251_tx_get_queue(skb_get_queue_mapping(skb));
wl1251_tx_control(tx_hdr, control, fc);
wl1251_tx_frag_block_num(tx_hdr);
return 0;
}
/* We copy the packet to the target */
static int wl1251_tx_send_packet(struct wl1251 *wl, struct sk_buff *skb,
struct ieee80211_tx_info *control)
{
struct tx_double_buffer_desc *tx_hdr;
int len;
u32 addr;
if (!skb)
return -EINVAL;
tx_hdr = (struct tx_double_buffer_desc *) skb->data;
if (control->control.hw_key &&
control->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) {
int hdrlen;
__le16 fc;
u16 length;
u8 *pos;
fc = *(__le16 *)(skb->data + sizeof(*tx_hdr));
length = le16_to_cpu(tx_hdr->length) + WL1251_TKIP_IV_SPACE;
tx_hdr->length = cpu_to_le16(length);
hdrlen = ieee80211_hdrlen(fc);
pos = skb_push(skb, WL1251_TKIP_IV_SPACE);
memmove(pos, pos + WL1251_TKIP_IV_SPACE,
sizeof(*tx_hdr) + hdrlen);
}
/* Revisit. This is a workaround for getting non-aligned packets.
This happens at least with EAPOL packets from the user space.
Our DMA requires packets to be aligned on a 4-byte boundary.
*/
if (unlikely((long)skb->data & 0x03)) {
int offset = (4 - (long)skb->data) & 0x03;
wl1251_debug(DEBUG_TX, "skb offset %d", offset);
/* check whether the current skb can be used */
if (skb_cloned(skb) || (skb_tailroom(skb) < offset)) {
struct sk_buff *newskb = skb_copy_expand(skb, 0, 3,
GFP_KERNEL);
if (unlikely(newskb == NULL))
return -EINVAL;
tx_hdr = (struct tx_double_buffer_desc *) newskb->data;
dev_kfree_skb_any(skb);
wl->tx_frames[tx_hdr->id] = skb = newskb;
offset = (4 - (long)skb->data) & 0x03;
wl1251_debug(DEBUG_TX, "new skb offset %d", offset);
}
/* align the buffer on a 4-byte boundary */
if (offset) {
unsigned char *src = skb->data;
skb_reserve(skb, offset);
memmove(skb->data, src, skb->len);
tx_hdr = (struct tx_double_buffer_desc *) skb->data;
}
}
/* Our skb->data at this point includes the HW header */
len = WL1251_TX_ALIGN(skb->len);
if (wl->data_in_count & 0x1)
addr = wl->data_path->tx_packet_ring_addr +
wl->data_path->tx_packet_ring_chunk_size;
else
addr = wl->data_path->tx_packet_ring_addr;
wl1251_mem_write(wl, addr, skb->data, len);
wl1251_debug(DEBUG_TX, "tx id %u skb 0x%p payload %u rate 0x%x "
"queue %d", tx_hdr->id, skb, tx_hdr->length,
tx_hdr->rate, tx_hdr->xmit_queue);
return 0;
}
static void wl1251_tx_trigger(struct wl1251 *wl)
{
u32 data, addr;
if (wl->data_in_count & 0x1) {
addr = ACX_REG_INTERRUPT_TRIG_H;
data = INTR_TRIG_TX_PROC1;
} else {
addr = ACX_REG_INTERRUPT_TRIG;
data = INTR_TRIG_TX_PROC0;
}
wl1251_reg_write32(wl, addr, data);
/* Bumping data in */
wl->data_in_count = (wl->data_in_count + 1) &
TX_STATUS_DATA_OUT_COUNT_MASK;
}
static void enable_tx_for_packet_injection(struct wl1251 *wl)
{
int ret;
ret = wl1251_cmd_join(wl, BSS_TYPE_STA_BSS, wl->channel,
wl->beacon_int, wl->dtim_period);
if (ret < 0) {
wl1251_warning("join failed");
return;
}
ret = wl1251_event_wait(wl, JOIN_EVENT_COMPLETE_ID, 100);
if (ret < 0) {
wl1251_warning("join timeout");
return;
}
wl->joined = true;
}
/* caller must hold wl->mutex */
static int wl1251_tx_frame(struct wl1251 *wl, struct sk_buff *skb)
{
struct ieee80211_tx_info *info;
int ret = 0;
u8 idx;
info = IEEE80211_SKB_CB(skb);
if (info->control.hw_key) {
if (unlikely(wl->monitor_present))
return -EINVAL;
idx = info->control.hw_key->hw_key_idx;
if (unlikely(wl->default_key != idx)) {
ret = wl1251_acx_default_key(wl, idx);
if (ret < 0)
return ret;
}
}
/* Enable tx path in monitor mode for packet injection */
if ((wl->vif == NULL) && !wl->joined)
enable_tx_for_packet_injection(wl);
ret = wl1251_tx_path_status(wl);
if (ret < 0)
return ret;
ret = wl1251_tx_fill_hdr(wl, skb, info);
if (ret < 0)
return ret;
ret = wl1251_tx_send_packet(wl, skb, info);
if (ret < 0)
return ret;
wl1251_tx_trigger(wl);
return ret;
}
void wl1251_tx_work(struct work_struct *work)
{
struct wl1251 *wl = container_of(work, struct wl1251, tx_work);
struct sk_buff *skb;
bool woken_up = false;
int ret;
mutex_lock(&wl->mutex);
if (unlikely(wl->state == WL1251_STATE_OFF))
goto out;
while ((skb = skb_dequeue(&wl->tx_queue))) {
if (!woken_up) {
ret = wl1251_ps_elp_wakeup(wl);
if (ret < 0)
goto out;
woken_up = true;
}
ret = wl1251_tx_frame(wl, skb);
if (ret == -EBUSY) {
skb_queue_head(&wl->tx_queue, skb);
goto out;
} else if (ret < 0) {
dev_kfree_skb(skb);
goto out;
}
}
out:
if (woken_up)
wl1251_ps_elp_sleep(wl);
mutex_unlock(&wl->mutex);
}
static const char *wl1251_tx_parse_status(u8 status)
{
/* 8 bit status field, one character per bit plus null */
static char buf[9];
int i = 0;
memset(buf, 0, sizeof(buf));
if (status & TX_DMA_ERROR)
buf[i++] = 'm';
if (status & TX_DISABLED)
buf[i++] = 'd';
if (status & TX_RETRY_EXCEEDED)
buf[i++] = 'r';
if (status & TX_TIMEOUT)
buf[i++] = 't';
if (status & TX_KEY_NOT_FOUND)
buf[i++] = 'k';
if (status & TX_ENCRYPT_FAIL)
buf[i++] = 'e';
if (status & TX_UNAVAILABLE_PRIORITY)
buf[i++] = 'p';
/* bit 0 is unused apparently */
return buf;
}
static void wl1251_tx_packet_cb(struct wl1251 *wl,
struct tx_result *result)
{
struct ieee80211_tx_info *info;
struct sk_buff *skb;
int hdrlen;
u8 *frame;
skb = wl->tx_frames[result->id];
if (skb == NULL) {
wl1251_error("SKB for packet %d is NULL", result->id);
return;
}
info = IEEE80211_SKB_CB(skb);
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) &&
!(info->flags & IEEE80211_TX_CTL_INJECTED) &&
(result->status == TX_SUCCESS))
info->flags |= IEEE80211_TX_STAT_ACK;
info->status.rates[0].count = result->ack_failures + 1;
wl->stats.retry_count += result->ack_failures;
/*
* We have to remove our private TX header before pushing
* the skb back to mac80211.
*/
frame = skb_pull(skb, sizeof(struct tx_double_buffer_desc));
if (info->control.hw_key &&
info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) {
hdrlen = ieee80211_get_hdrlen_from_skb(skb);
memmove(frame + WL1251_TKIP_IV_SPACE, frame, hdrlen);
skb_pull(skb, WL1251_TKIP_IV_SPACE);
}
wl1251_debug(DEBUG_TX, "tx status id %u skb 0x%p failures %u rate 0x%x"
" status 0x%x (%s)",
result->id, skb, result->ack_failures, result->rate,
result->status, wl1251_tx_parse_status(result->status));
ieee80211_tx_status(wl->hw, skb);
wl->tx_frames[result->id] = NULL;
}
/* Called upon reception of a TX complete interrupt */
void wl1251_tx_complete(struct wl1251 *wl)
{
int i, result_index, num_complete = 0, queue_len;
struct tx_result *result, *result_ptr;
unsigned long flags;
if (unlikely(wl->state != WL1251_STATE_ON))
return;
result = kmalloc_array(FW_TX_CMPLT_BLOCK_SIZE, sizeof(*result), GFP_KERNEL);
if (!result) {
wl1251_error("can not allocate result buffer");
return;
}
/* First we read the result */
wl1251_mem_read(wl, wl->data_path->tx_complete_addr, result,
FW_TX_CMPLT_BLOCK_SIZE * sizeof(*result));
result_index = wl->next_tx_complete;
for (i = 0; i < FW_TX_CMPLT_BLOCK_SIZE; i++) {
result_ptr = &result[result_index];
if (result_ptr->done_1 == 1 &&
result_ptr->done_2 == 1) {
wl1251_tx_packet_cb(wl, result_ptr);
result_ptr->done_1 = 0;
result_ptr->done_2 = 0;
result_index = (result_index + 1) &
(FW_TX_CMPLT_BLOCK_SIZE - 1);
num_complete++;
} else {
break;
}
}
queue_len = skb_queue_len(&wl->tx_queue);
if ((num_complete > 0) && (queue_len > 0)) {
/* firmware buffer has space, reschedule tx_work */
wl1251_debug(DEBUG_TX, "tx_complete: reschedule tx_work");
ieee80211_queue_work(wl->hw, &wl->tx_work);
}
if (wl->tx_queue_stopped &&
queue_len <= WL1251_TX_QUEUE_LOW_WATERMARK) {
/* tx_queue has space, restart queues */
wl1251_debug(DEBUG_TX, "tx_complete: waking queues");
spin_lock_irqsave(&wl->wl_lock, flags);
ieee80211_wake_queues(wl->hw);
wl->tx_queue_stopped = false;
spin_unlock_irqrestore(&wl->wl_lock, flags);
}
/* Every completed frame needs to be acknowledged */
if (num_complete) {
/*
* If we've wrapped, we have to clear
* the results in 2 steps.
*/
if (result_index > wl->next_tx_complete) {
/* Only 1 write is needed */
wl1251_mem_write(wl,
wl->data_path->tx_complete_addr +
(wl->next_tx_complete *
sizeof(struct tx_result)),
&result[wl->next_tx_complete],
num_complete *
sizeof(struct tx_result));
} else if (result_index < wl->next_tx_complete) {
/* 2 writes are needed */
wl1251_mem_write(wl,
wl->data_path->tx_complete_addr +
(wl->next_tx_complete *
sizeof(struct tx_result)),
&result[wl->next_tx_complete],
(FW_TX_CMPLT_BLOCK_SIZE -
wl->next_tx_complete) *
sizeof(struct tx_result));
wl1251_mem_write(wl,
wl->data_path->tx_complete_addr,
result,
(num_complete -
FW_TX_CMPLT_BLOCK_SIZE +
wl->next_tx_complete) *
sizeof(struct tx_result));
} else {
/* We have to write the whole array */
wl1251_mem_write(wl,
wl->data_path->tx_complete_addr,
result,
FW_TX_CMPLT_BLOCK_SIZE *
sizeof(struct tx_result));
}
}
kfree(result);
wl->next_tx_complete = result_index;
}
/* caller must hold wl->mutex */
void wl1251_tx_flush(struct wl1251 *wl)
{
int i;
struct sk_buff *skb;
struct ieee80211_tx_info *info;
/* TX failure */
/* control->flags = 0; FIXME */
while ((skb = skb_dequeue(&wl->tx_queue))) {
info = IEEE80211_SKB_CB(skb);
wl1251_debug(DEBUG_TX, "flushing skb 0x%p", skb);
if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS))
continue;
ieee80211_tx_status(wl->hw, skb);
}
for (i = 0; i < FW_TX_CMPLT_BLOCK_SIZE; i++)
if (wl->tx_frames[i] != NULL) {
skb = wl->tx_frames[i];
info = IEEE80211_SKB_CB(skb);
if (!(info->flags & IEEE80211_TX_CTL_REQ_TX_STATUS))
continue;
ieee80211_tx_status(wl->hw, skb);
wl->tx_frames[i] = NULL;
}
}
|
linux-master
|
drivers/net/wireless/ti/wl1251/tx.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* This file is part of wl1251
*
* Copyright (C) 2008 Nokia Corporation
*/
#include <linux/slab.h>
#include "reg.h"
#include "boot.h"
#include "io.h"
#include "spi.h"
#include "event.h"
#include "acx.h"
void wl1251_boot_target_enable_interrupts(struct wl1251 *wl)
{
wl1251_reg_write32(wl, ACX_REG_INTERRUPT_MASK, ~(wl->intr_mask));
wl1251_reg_write32(wl, HI_CFG, HI_CFG_DEF_VAL);
}
int wl1251_boot_soft_reset(struct wl1251 *wl)
{
unsigned long timeout;
u32 boot_data;
/* perform soft reset */
wl1251_reg_write32(wl, ACX_REG_SLV_SOFT_RESET, ACX_SLV_SOFT_RESET_BIT);
/* SOFT_RESET is self clearing */
timeout = jiffies + usecs_to_jiffies(SOFT_RESET_MAX_TIME);
while (1) {
boot_data = wl1251_reg_read32(wl, ACX_REG_SLV_SOFT_RESET);
wl1251_debug(DEBUG_BOOT, "soft reset bootdata 0x%x", boot_data);
if ((boot_data & ACX_SLV_SOFT_RESET_BIT) == 0)
break;
if (time_after(jiffies, timeout)) {
/* 1.2 check pWhalBus->uSelfClearTime if the
* timeout was reached */
wl1251_error("soft reset timeout");
return -1;
}
udelay(SOFT_RESET_STALL_TIME);
}
/* disable Rx/Tx */
wl1251_reg_write32(wl, ENABLE, 0x0);
/* disable auto calibration on start*/
wl1251_reg_write32(wl, SPARE_A2, 0xffff);
return 0;
}
int wl1251_boot_init_seq(struct wl1251 *wl)
{
u32 scr_pad6, init_data, tmp, elp_cmd, ref_freq;
/*
* col #1: INTEGER_DIVIDER
* col #2: FRACTIONAL_DIVIDER
* col #3: ATTN_BB
* col #4: ALPHA_BB
* col #5: STOP_TIME_BB
* col #6: BB_PLL_LOOP_FILTER
*/
static const u32 LUT[REF_FREQ_NUM][LUT_PARAM_NUM] = {
{ 83, 87381, 0xB, 5, 0xF00, 3}, /* REF_FREQ_19_2*/
{ 61, 141154, 0xB, 5, 0x1450, 2}, /* REF_FREQ_26_0*/
{ 41, 174763, 0xC, 6, 0x2D00, 1}, /* REF_FREQ_38_4*/
{ 40, 0, 0xC, 6, 0x2EE0, 1}, /* REF_FREQ_40_0*/
{ 47, 162280, 0xC, 6, 0x2760, 1} /* REF_FREQ_33_6 */
};
/* read NVS params */
scr_pad6 = wl1251_reg_read32(wl, SCR_PAD6);
wl1251_debug(DEBUG_BOOT, "scr_pad6 0x%x", scr_pad6);
/* read ELP_CMD */
elp_cmd = wl1251_reg_read32(wl, ELP_CMD);
wl1251_debug(DEBUG_BOOT, "elp_cmd 0x%x", elp_cmd);
/* set the BB calibration time to be 300 usec (PLL_CAL_TIME) */
ref_freq = scr_pad6 & 0x000000FF;
wl1251_debug(DEBUG_BOOT, "ref_freq 0x%x", ref_freq);
wl1251_reg_write32(wl, PLL_CAL_TIME, 0x9);
/*
* PG 1.2: set the clock buffer time to be 210 usec (CLK_BUF_TIME)
*/
wl1251_reg_write32(wl, CLK_BUF_TIME, 0x6);
/*
* set the clock detect feature to work in the restart wu procedure
* (ELP_CFG_MODE[14]) and Select the clock source type
* (ELP_CFG_MODE[13:12])
*/
tmp = ((scr_pad6 & 0x0000FF00) << 4) | 0x00004000;
wl1251_reg_write32(wl, ELP_CFG_MODE, tmp);
/* PG 1.2: enable the BB PLL fix. Enable the PLL_LIMP_CLK_EN_CMD */
elp_cmd |= 0x00000040;
wl1251_reg_write32(wl, ELP_CMD, elp_cmd);
/* PG 1.2: Set the BB PLL stable time to be 1000usec
* (PLL_STABLE_TIME) */
wl1251_reg_write32(wl, CFG_PLL_SYNC_CNT, 0x20);
/* PG 1.2: read clock request time */
init_data = wl1251_reg_read32(wl, CLK_REQ_TIME);
/*
* PG 1.2: set the clock request time to be ref_clk_settling_time -
* 1ms = 4ms
*/
if (init_data > 0x21)
tmp = init_data - 0x21;
else
tmp = 0;
wl1251_reg_write32(wl, CLK_REQ_TIME, tmp);
/* set BB PLL configurations in RF AFE */
wl1251_reg_write32(wl, 0x003058cc, 0x4B5);
/* set RF_AFE_REG_5 */
wl1251_reg_write32(wl, 0x003058d4, 0x50);
/* set RF_AFE_CTRL_REG_2 */
wl1251_reg_write32(wl, 0x00305948, 0x11c001);
/*
* change RF PLL and BB PLL divider for VCO clock and adjust VCO
* bais current(RF_AFE_REG_13)
*/
wl1251_reg_write32(wl, 0x003058f4, 0x1e);
/* set BB PLL configurations */
tmp = LUT[ref_freq][LUT_PARAM_INTEGER_DIVIDER] | 0x00017000;
wl1251_reg_write32(wl, 0x00305840, tmp);
/* set fractional divider according to Appendix C-BB PLL
* Calculations
*/
tmp = LUT[ref_freq][LUT_PARAM_FRACTIONAL_DIVIDER];
wl1251_reg_write32(wl, 0x00305844, tmp);
/* set the initial data for the sigma delta */
wl1251_reg_write32(wl, 0x00305848, 0x3039);
/*
* set the accumulator attenuation value, calibration loop1
* (alpha), calibration loop2 (beta), calibration loop3 (gamma) and
* the VCO gain
*/
tmp = (LUT[ref_freq][LUT_PARAM_ATTN_BB] << 16) |
(LUT[ref_freq][LUT_PARAM_ALPHA_BB] << 12) | 0x1;
wl1251_reg_write32(wl, 0x00305854, tmp);
/*
* set the calibration stop time after holdoff time expires and set
* settling time HOLD_OFF_TIME_BB
*/
tmp = LUT[ref_freq][LUT_PARAM_STOP_TIME_BB] | 0x000A0000;
wl1251_reg_write32(wl, 0x00305858, tmp);
/*
* set BB PLL Loop filter capacitor3- BB_C3[2:0] and set BB PLL
* constant leakage current to linearize PFD to 0uA -
* BB_ILOOPF[7:3]
*/
tmp = LUT[ref_freq][LUT_PARAM_BB_PLL_LOOP_FILTER] | 0x00000030;
wl1251_reg_write32(wl, 0x003058f8, tmp);
/*
* set regulator output voltage for n divider to
* 1.35-BB_REFDIV[1:0], set charge pump current- BB_CPGAIN[4:2],
* set BB PLL Loop filter capacitor2- BB_C2[7:5], set gain of BB
* PLL auto-call to normal mode- BB_CALGAIN_3DB[8]
*/
wl1251_reg_write32(wl, 0x003058f0, 0x29);
/* enable restart wakeup sequence (ELP_CMD[0]) */
wl1251_reg_write32(wl, ELP_CMD, elp_cmd | 0x1);
/* restart sequence completed */
udelay(2000);
return 0;
}
static void wl1251_boot_set_ecpu_ctrl(struct wl1251 *wl, u32 flag)
{
u32 cpu_ctrl;
/* 10.5.0 run the firmware (I) */
cpu_ctrl = wl1251_reg_read32(wl, ACX_REG_ECPU_CONTROL);
/* 10.5.1 run the firmware (II) */
cpu_ctrl &= ~flag;
wl1251_reg_write32(wl, ACX_REG_ECPU_CONTROL, cpu_ctrl);
}
int wl1251_boot_run_firmware(struct wl1251 *wl)
{
int loop, ret;
u32 chip_id, acx_intr;
wl1251_boot_set_ecpu_ctrl(wl, ECPU_CONTROL_HALT);
chip_id = wl1251_reg_read32(wl, CHIP_ID_B);
wl1251_debug(DEBUG_BOOT, "chip id after firmware boot: 0x%x", chip_id);
if (chip_id != wl->chip_id) {
wl1251_error("chip id doesn't match after firmware boot");
return -EIO;
}
/* wait for init to complete */
loop = 0;
while (loop++ < INIT_LOOP) {
udelay(INIT_LOOP_DELAY);
acx_intr = wl1251_reg_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR);
if (acx_intr == 0xffffffff) {
wl1251_error("error reading hardware complete "
"init indication");
return -EIO;
}
/* check that ACX_INTR_INIT_COMPLETE is enabled */
else if (acx_intr & WL1251_ACX_INTR_INIT_COMPLETE) {
wl1251_reg_write32(wl, ACX_REG_INTERRUPT_ACK,
WL1251_ACX_INTR_INIT_COMPLETE);
break;
}
}
if (loop > INIT_LOOP) {
wl1251_error("timeout waiting for the hardware to "
"complete initialization");
return -EIO;
}
/* get hardware config command mail box */
wl->cmd_box_addr = wl1251_reg_read32(wl, REG_COMMAND_MAILBOX_PTR);
/* get hardware config event mail box */
wl->event_box_addr = wl1251_reg_read32(wl, REG_EVENT_MAILBOX_PTR);
/* set the working partition to its "running" mode offset */
wl1251_set_partition(wl, WL1251_PART_WORK_MEM_START,
WL1251_PART_WORK_MEM_SIZE,
WL1251_PART_WORK_REG_START,
WL1251_PART_WORK_REG_SIZE);
wl1251_debug(DEBUG_MAILBOX, "cmd_box_addr 0x%x event_box_addr 0x%x",
wl->cmd_box_addr, wl->event_box_addr);
wl1251_acx_fw_version(wl, wl->fw_ver, sizeof(wl->fw_ver));
/*
* in case of full asynchronous mode the firmware event must be
* ready to receive event from the command mailbox
*/
/* enable gpio interrupts */
wl1251_enable_interrupts(wl);
/* Enable target's interrupts */
wl->intr_mask = WL1251_ACX_INTR_RX0_DATA |
WL1251_ACX_INTR_RX1_DATA |
WL1251_ACX_INTR_TX_RESULT |
WL1251_ACX_INTR_EVENT_A |
WL1251_ACX_INTR_EVENT_B |
WL1251_ACX_INTR_INIT_COMPLETE;
wl1251_boot_target_enable_interrupts(wl);
wl->event_mask = SCAN_COMPLETE_EVENT_ID | BSS_LOSE_EVENT_ID |
SYNCHRONIZATION_TIMEOUT_EVENT_ID |
ROAMING_TRIGGER_LOW_RSSI_EVENT_ID |
ROAMING_TRIGGER_REGAINED_RSSI_EVENT_ID |
REGAINED_BSS_EVENT_ID | BT_PTA_SENSE_EVENT_ID |
BT_PTA_PREDICTION_EVENT_ID | JOIN_EVENT_COMPLETE_ID |
PS_REPORT_EVENT_ID;
ret = wl1251_event_unmask(wl);
if (ret < 0) {
wl1251_error("EVENT mask setting failed");
return ret;
}
wl1251_event_mbox_config(wl);
/* firmware startup completed */
return 0;
}
static int wl1251_boot_upload_firmware(struct wl1251 *wl)
{
int addr, chunk_num, partition_limit;
size_t fw_data_len, len;
u8 *p, *buf;
/* whal_FwCtrl_LoadFwImageSm() */
wl1251_debug(DEBUG_BOOT, "chip id before fw upload: 0x%x",
wl1251_reg_read32(wl, CHIP_ID_B));
/* 10.0 check firmware length and set partition */
fw_data_len = (wl->fw[4] << 24) | (wl->fw[5] << 16) |
(wl->fw[6] << 8) | (wl->fw[7]);
wl1251_debug(DEBUG_BOOT, "fw_data_len %zu chunk_size %d", fw_data_len,
CHUNK_SIZE);
if ((fw_data_len % 4) != 0) {
wl1251_error("firmware length not multiple of four");
return -EIO;
}
buf = kmalloc(CHUNK_SIZE, GFP_KERNEL);
if (!buf) {
wl1251_error("allocation for firmware upload chunk failed");
return -ENOMEM;
}
wl1251_set_partition(wl, WL1251_PART_DOWN_MEM_START,
WL1251_PART_DOWN_MEM_SIZE,
WL1251_PART_DOWN_REG_START,
WL1251_PART_DOWN_REG_SIZE);
/* 10.1 set partition limit and chunk num */
chunk_num = 0;
partition_limit = WL1251_PART_DOWN_MEM_SIZE;
while (chunk_num < fw_data_len / CHUNK_SIZE) {
/* 10.2 update partition, if needed */
addr = WL1251_PART_DOWN_MEM_START +
(chunk_num + 2) * CHUNK_SIZE;
if (addr > partition_limit) {
addr = WL1251_PART_DOWN_MEM_START +
chunk_num * CHUNK_SIZE;
partition_limit = chunk_num * CHUNK_SIZE +
WL1251_PART_DOWN_MEM_SIZE;
wl1251_set_partition(wl,
addr,
WL1251_PART_DOWN_MEM_SIZE,
WL1251_PART_DOWN_REG_START,
WL1251_PART_DOWN_REG_SIZE);
}
/* 10.3 upload the chunk */
addr = WL1251_PART_DOWN_MEM_START + chunk_num * CHUNK_SIZE;
p = wl->fw + FW_HDR_SIZE + chunk_num * CHUNK_SIZE;
wl1251_debug(DEBUG_BOOT, "uploading fw chunk 0x%p to 0x%x",
p, addr);
/* need to copy the chunk for dma */
len = CHUNK_SIZE;
memcpy(buf, p, len);
wl1251_mem_write(wl, addr, buf, len);
chunk_num++;
}
/* 10.4 upload the last chunk */
addr = WL1251_PART_DOWN_MEM_START + chunk_num * CHUNK_SIZE;
p = wl->fw + FW_HDR_SIZE + chunk_num * CHUNK_SIZE;
/* need to copy the chunk for dma */
len = fw_data_len % CHUNK_SIZE;
memcpy(buf, p, len);
wl1251_debug(DEBUG_BOOT, "uploading fw last chunk (%zu B) 0x%p to 0x%x",
len, p, addr);
wl1251_mem_write(wl, addr, buf, len);
kfree(buf);
return 0;
}
static int wl1251_boot_upload_nvs(struct wl1251 *wl)
{
size_t nvs_len, nvs_bytes_written, burst_len;
int nvs_start, i;
u32 dest_addr, val;
u8 *nvs_ptr, *nvs;
nvs = wl->nvs;
if (nvs == NULL)
return -ENODEV;
nvs_ptr = nvs;
nvs_len = wl->nvs_len;
nvs_start = wl->fw_len;
/*
* Layout before the actual NVS tables:
* 1 byte : burst length.
* 2 bytes: destination address.
* n bytes: data to burst copy.
*
* This is ended by a 0 length, then the NVS tables.
*/
while (nvs_ptr[0]) {
burst_len = nvs_ptr[0];
dest_addr = (nvs_ptr[1] & 0xfe) | ((u32)(nvs_ptr[2] << 8));
/* We move our pointer to the data */
nvs_ptr += 3;
for (i = 0; i < burst_len; i++) {
val = (nvs_ptr[0] | (nvs_ptr[1] << 8)
| (nvs_ptr[2] << 16) | (nvs_ptr[3] << 24));
wl1251_debug(DEBUG_BOOT,
"nvs burst write 0x%x: 0x%x",
dest_addr, val);
wl1251_mem_write32(wl, dest_addr, val);
nvs_ptr += 4;
dest_addr += 4;
}
}
/*
* We've reached the first zero length, the first NVS table
* is 7 bytes further.
*/
nvs_ptr += 7;
nvs_len -= nvs_ptr - nvs;
nvs_len = ALIGN(nvs_len, 4);
/* Now we must set the partition correctly */
wl1251_set_partition(wl, nvs_start,
WL1251_PART_DOWN_MEM_SIZE,
WL1251_PART_DOWN_REG_START,
WL1251_PART_DOWN_REG_SIZE);
/* And finally we upload the NVS tables */
nvs_bytes_written = 0;
while (nvs_bytes_written < nvs_len) {
val = (nvs_ptr[0] | (nvs_ptr[1] << 8)
| (nvs_ptr[2] << 16) | (nvs_ptr[3] << 24));
wl1251_debug(DEBUG_BOOT,
"nvs write table 0x%x: 0x%x",
nvs_start, val);
wl1251_mem_write32(wl, nvs_start, val);
nvs_ptr += 4;
nvs_bytes_written += 4;
nvs_start += 4;
}
return 0;
}
int wl1251_boot(struct wl1251 *wl)
{
int ret = 0, minor_minor_e2_ver;
u32 tmp, boot_data;
/* halt embedded ARM CPU while loading firmware */
wl1251_reg_write32(wl, ACX_REG_ECPU_CONTROL, ECPU_CONTROL_HALT);
ret = wl1251_boot_soft_reset(wl);
if (ret < 0)
goto out;
/* 2. start processing NVS file */
if (wl->use_eeprom) {
wl1251_reg_write32(wl, ACX_REG_EE_START, START_EEPROM_MGR);
/* Wait for EEPROM NVS burst read to complete */
msleep(40);
wl1251_reg_write32(wl, ACX_EEPROMLESS_IND_REG, USE_EEPROM);
} else {
ret = wl1251_boot_upload_nvs(wl);
if (ret < 0)
goto out;
/* write firmware's last address (ie. it's length) to
* ACX_EEPROMLESS_IND_REG */
wl1251_reg_write32(wl, ACX_EEPROMLESS_IND_REG, wl->fw_len);
}
/* 6. read the EEPROM parameters */
tmp = wl1251_reg_read32(wl, SCR_PAD2);
/* 7. read bootdata */
wl->boot_attr.radio_type = (tmp & 0x0000FF00) >> 8;
wl->boot_attr.major = (tmp & 0x00FF0000) >> 16;
tmp = wl1251_reg_read32(wl, SCR_PAD3);
/* 8. check bootdata and call restart sequence */
wl->boot_attr.minor = (tmp & 0x00FF0000) >> 16;
minor_minor_e2_ver = (tmp & 0xFF000000) >> 24;
wl1251_debug(DEBUG_BOOT, "radioType 0x%x majorE2Ver 0x%x "
"minorE2Ver 0x%x minor_minor_e2_ver 0x%x",
wl->boot_attr.radio_type, wl->boot_attr.major,
wl->boot_attr.minor, minor_minor_e2_ver);
ret = wl1251_boot_init_seq(wl);
if (ret < 0)
goto out;
/* 9. NVS processing done */
boot_data = wl1251_reg_read32(wl, ACX_REG_ECPU_CONTROL);
wl1251_debug(DEBUG_BOOT, "halt boot_data 0x%x", boot_data);
/* 10. check that ECPU_CONTROL_HALT bits are set in
* pWhalBus->uBootData and start uploading firmware
*/
if ((boot_data & ECPU_CONTROL_HALT) == 0) {
wl1251_error("boot failed, ECPU_CONTROL_HALT not set");
ret = -EIO;
goto out;
}
ret = wl1251_boot_upload_firmware(wl);
if (ret < 0)
goto out;
/* 10.5 start firmware */
ret = wl1251_boot_run_firmware(wl);
if (ret < 0)
goto out;
out:
return ret;
}
|
linux-master
|
drivers/net/wireless/ti/wl1251/boot.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* at76c503/at76c505 USB driver
*
* Copyright (c) 2002 - 2003 Oliver Kurth
* Copyright (c) 2004 Joerg Albert <[email protected]>
* Copyright (c) 2004 Nick Jones
* Copyright (c) 2004 Balint Seeber <[email protected]>
* Copyright (c) 2007 Guido Guenther <[email protected]>
* Copyright (c) 2007 Kalle Valo <[email protected]>
* Copyright (c) 2010 Sebastian Smolorz <[email protected]>
*
* This file is part of the Berlios driver for USB WLAN devices based on the
* Atmel AT76C503A/505/505A.
*
* Some iw_handler code was taken from airo.c, (C) 1999 Benjamin Reed
*
* TODO list is at the wiki:
*
* https://wireless.wiki.kernel.org/en/users/Drivers/at76c50x-usb#TODO
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/errno.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/usb.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/etherdevice.h>
#include <linux/ethtool.h>
#include <linux/wireless.h>
#include <net/iw_handler.h>
#include <net/ieee80211_radiotap.h>
#include <linux/firmware.h>
#include <linux/leds.h>
#include <net/mac80211.h>
#include "at76c50x-usb.h"
/* Version information */
#define DRIVER_NAME "at76c50x-usb"
#define DRIVER_VERSION "0.17"
#define DRIVER_DESC "Atmel at76x USB Wireless LAN Driver"
/* at76_debug bits */
#define DBG_PROGRESS 0x00000001 /* authentication/accociation */
#define DBG_BSS_TABLE 0x00000002 /* show BSS table after scans */
#define DBG_IOCTL 0x00000004 /* ioctl calls / settings */
#define DBG_MAC_STATE 0x00000008 /* MAC state transitions */
#define DBG_TX_DATA 0x00000010 /* tx header */
#define DBG_TX_DATA_CONTENT 0x00000020 /* tx content */
#define DBG_TX_MGMT 0x00000040 /* tx management */
#define DBG_RX_DATA 0x00000080 /* rx data header */
#define DBG_RX_DATA_CONTENT 0x00000100 /* rx data content */
#define DBG_RX_MGMT 0x00000200 /* rx mgmt frame headers */
#define DBG_RX_BEACON 0x00000400 /* rx beacon */
#define DBG_RX_CTRL 0x00000800 /* rx control */
#define DBG_RX_MGMT_CONTENT 0x00001000 /* rx mgmt content */
#define DBG_RX_FRAGS 0x00002000 /* rx data fragment handling */
#define DBG_DEVSTART 0x00004000 /* fw download, device start */
#define DBG_URB 0x00008000 /* rx urb status, ... */
#define DBG_RX_ATMEL_HDR 0x00010000 /* Atmel-specific Rx headers */
#define DBG_PROC_ENTRY 0x00020000 /* procedure entries/exits */
#define DBG_PM 0x00040000 /* power management settings */
#define DBG_BSS_MATCH 0x00080000 /* BSS match failures */
#define DBG_PARAMS 0x00100000 /* show configured parameters */
#define DBG_WAIT_COMPLETE 0x00200000 /* command completion */
#define DBG_RX_FRAGS_SKB 0x00400000 /* skb header of Rx fragments */
#define DBG_BSS_TABLE_RM 0x00800000 /* purging bss table entries */
#define DBG_MONITOR_MODE 0x01000000 /* monitor mode */
#define DBG_MIB 0x02000000 /* dump all MIBs on startup */
#define DBG_MGMT_TIMER 0x04000000 /* dump mgmt_timer ops */
#define DBG_WE_EVENTS 0x08000000 /* dump wireless events */
#define DBG_FW 0x10000000 /* firmware download */
#define DBG_DFU 0x20000000 /* device firmware upgrade */
#define DBG_CMD 0x40000000
#define DBG_MAC80211 0x80000000
#define DBG_DEFAULTS 0
/* Use our own dbg macro */
#define at76_dbg(bits, format, arg...) \
do { \
if (at76_debug & (bits)) \
printk(KERN_DEBUG DRIVER_NAME ": " format "\n", ##arg); \
} while (0)
#define at76_dbg_dump(bits, buf, len, format, arg...) \
do { \
if (at76_debug & (bits)) { \
printk(KERN_DEBUG DRIVER_NAME ": " format "\n", ##arg); \
print_hex_dump_bytes("", DUMP_PREFIX_OFFSET, buf, len); \
} \
} while (0)
static uint at76_debug = DBG_DEFAULTS;
/* Protect against concurrent firmware loading and parsing */
static DEFINE_MUTEX(fw_mutex);
static struct fwentry firmwares[] = {
[0] = { "" },
[BOARD_503_ISL3861] = { "atmel_at76c503-i3861.bin" },
[BOARD_503_ISL3863] = { "atmel_at76c503-i3863.bin" },
[BOARD_503] = { "atmel_at76c503-rfmd.bin" },
[BOARD_503_ACC] = { "atmel_at76c503-rfmd-acc.bin" },
[BOARD_505] = { "atmel_at76c505-rfmd.bin" },
[BOARD_505_2958] = { "atmel_at76c505-rfmd2958.bin" },
[BOARD_505A] = { "atmel_at76c505a-rfmd2958.bin" },
[BOARD_505AMX] = { "atmel_at76c505amx-rfmd.bin" },
};
MODULE_FIRMWARE("atmel_at76c503-i3861.bin");
MODULE_FIRMWARE("atmel_at76c503-i3863.bin");
MODULE_FIRMWARE("atmel_at76c503-rfmd.bin");
MODULE_FIRMWARE("atmel_at76c503-rfmd-acc.bin");
MODULE_FIRMWARE("atmel_at76c505-rfmd.bin");
MODULE_FIRMWARE("atmel_at76c505-rfmd2958.bin");
MODULE_FIRMWARE("atmel_at76c505a-rfmd2958.bin");
MODULE_FIRMWARE("atmel_at76c505amx-rfmd.bin");
#define USB_DEVICE_DATA(__ops) .driver_info = (kernel_ulong_t)(__ops)
static const struct usb_device_id dev_table[] = {
/*
* at76c503-i3861
*/
/* Generic AT76C503/3861 device */
{ USB_DEVICE(0x03eb, 0x7603), USB_DEVICE_DATA(BOARD_503_ISL3861) },
/* Linksys WUSB11 v2.1/v2.6 */
{ USB_DEVICE(0x066b, 0x2211), USB_DEVICE_DATA(BOARD_503_ISL3861) },
/* Netgear MA101 rev. A */
{ USB_DEVICE(0x0864, 0x4100), USB_DEVICE_DATA(BOARD_503_ISL3861) },
/* Tekram U300C / Allnet ALL0193 */
{ USB_DEVICE(0x0b3b, 0x1612), USB_DEVICE_DATA(BOARD_503_ISL3861) },
/* HP HN210W J7801A */
{ USB_DEVICE(0x03f0, 0x011c), USB_DEVICE_DATA(BOARD_503_ISL3861) },
/* Sitecom/Z-Com/Zyxel M4Y-750 */
{ USB_DEVICE(0x0cde, 0x0001), USB_DEVICE_DATA(BOARD_503_ISL3861) },
/* Dynalink/Askey WLL013 (intersil) */
{ USB_DEVICE(0x069a, 0x0320), USB_DEVICE_DATA(BOARD_503_ISL3861) },
/* EZ connect 11Mpbs USB Wireless Adapter SMC2662W v1 */
{ USB_DEVICE(0x0d5c, 0xa001), USB_DEVICE_DATA(BOARD_503_ISL3861) },
/* BenQ AWL300 */
{ USB_DEVICE(0x04a5, 0x9000), USB_DEVICE_DATA(BOARD_503_ISL3861) },
/* Addtron AWU-120, Compex WLU11 */
{ USB_DEVICE(0x05dd, 0xff31), USB_DEVICE_DATA(BOARD_503_ISL3861) },
/* Intel AP310 AnyPoint II USB */
{ USB_DEVICE(0x8086, 0x0200), USB_DEVICE_DATA(BOARD_503_ISL3861) },
/* Dynalink L11U */
{ USB_DEVICE(0x0d8e, 0x7100), USB_DEVICE_DATA(BOARD_503_ISL3861) },
/* Arescom WL-210, FCC id 07J-GL2411USB */
{ USB_DEVICE(0x0d8e, 0x7110), USB_DEVICE_DATA(BOARD_503_ISL3861) },
/* I-O DATA WN-B11/USB */
{ USB_DEVICE(0x04bb, 0x0919), USB_DEVICE_DATA(BOARD_503_ISL3861) },
/* BT Voyager 1010 */
{ USB_DEVICE(0x069a, 0x0821), USB_DEVICE_DATA(BOARD_503_ISL3861) },
/*
* at76c503-i3863
*/
/* Generic AT76C503/3863 device */
{ USB_DEVICE(0x03eb, 0x7604), USB_DEVICE_DATA(BOARD_503_ISL3863) },
/* Samsung SWL-2100U */
{ USB_DEVICE(0x055d, 0xa000), USB_DEVICE_DATA(BOARD_503_ISL3863) },
/*
* at76c503-rfmd
*/
/* Generic AT76C503/RFMD device */
{ USB_DEVICE(0x03eb, 0x7605), USB_DEVICE_DATA(BOARD_503) },
/* Dynalink/Askey WLL013 (rfmd) */
{ USB_DEVICE(0x069a, 0x0321), USB_DEVICE_DATA(BOARD_503) },
/* Linksys WUSB11 v2.6 */
{ USB_DEVICE(0x077b, 0x2219), USB_DEVICE_DATA(BOARD_503) },
/* Network Everywhere NWU11B */
{ USB_DEVICE(0x077b, 0x2227), USB_DEVICE_DATA(BOARD_503) },
/* Netgear MA101 rev. B */
{ USB_DEVICE(0x0864, 0x4102), USB_DEVICE_DATA(BOARD_503) },
/* D-Link DWL-120 rev. E */
{ USB_DEVICE(0x2001, 0x3200), USB_DEVICE_DATA(BOARD_503) },
/* Actiontec 802UAT1, HWU01150-01UK */
{ USB_DEVICE(0x1668, 0x7605), USB_DEVICE_DATA(BOARD_503) },
/* AirVast W-Buddie WN210 */
{ USB_DEVICE(0x03eb, 0x4102), USB_DEVICE_DATA(BOARD_503) },
/* Dick Smith Electronics XH1153 802.11b USB adapter */
{ USB_DEVICE(0x1371, 0x5743), USB_DEVICE_DATA(BOARD_503) },
/* CNet CNUSB611 */
{ USB_DEVICE(0x1371, 0x0001), USB_DEVICE_DATA(BOARD_503) },
/* FiberLine FL-WL200U */
{ USB_DEVICE(0x1371, 0x0002), USB_DEVICE_DATA(BOARD_503) },
/* BenQ AWL400 USB stick */
{ USB_DEVICE(0x04a5, 0x9001), USB_DEVICE_DATA(BOARD_503) },
/* 3Com 3CRSHEW696 */
{ USB_DEVICE(0x0506, 0x0a01), USB_DEVICE_DATA(BOARD_503) },
/* Siemens Santis ADSL USB WLAN adapter WLL 013 */
{ USB_DEVICE(0x0681, 0x001b), USB_DEVICE_DATA(BOARD_503) },
/* Belkin F5D6050, version 2 */
{ USB_DEVICE(0x050d, 0x0050), USB_DEVICE_DATA(BOARD_503) },
/* iBlitzz, BWU613 (not *B or *SB) */
{ USB_DEVICE(0x07b8, 0xb000), USB_DEVICE_DATA(BOARD_503) },
/* Gigabyte GN-WLBM101 */
{ USB_DEVICE(0x1044, 0x8003), USB_DEVICE_DATA(BOARD_503) },
/* Planex GW-US11S */
{ USB_DEVICE(0x2019, 0x3220), USB_DEVICE_DATA(BOARD_503) },
/* Internal WLAN adapter in h5[4,5]xx series iPAQs */
{ USB_DEVICE(0x049f, 0x0032), USB_DEVICE_DATA(BOARD_503) },
/* Corega Wireless LAN USB-11 mini */
{ USB_DEVICE(0x07aa, 0x0011), USB_DEVICE_DATA(BOARD_503) },
/* Corega Wireless LAN USB-11 mini2 */
{ USB_DEVICE(0x07aa, 0x0018), USB_DEVICE_DATA(BOARD_503) },
/* Uniden PCW100 */
{ USB_DEVICE(0x05dd, 0xff35), USB_DEVICE_DATA(BOARD_503) },
/*
* at76c503-rfmd-acc
*/
/* SMC2664W */
{ USB_DEVICE(0x083a, 0x3501), USB_DEVICE_DATA(BOARD_503_ACC) },
/* Belkin F5D6050, SMC2662W v2, SMC2662W-AR */
{ USB_DEVICE(0x0d5c, 0xa002), USB_DEVICE_DATA(BOARD_503_ACC) },
/*
* at76c505-rfmd
*/
/* Generic AT76C505/RFMD */
{ USB_DEVICE(0x03eb, 0x7606), USB_DEVICE_DATA(BOARD_505) },
/*
* at76c505-rfmd2958
*/
/* Generic AT76C505/RFMD, OvisLink WL-1130USB */
{ USB_DEVICE(0x03eb, 0x7613), USB_DEVICE_DATA(BOARD_505_2958) },
/* Fiberline FL-WL240U */
{ USB_DEVICE(0x1371, 0x0014), USB_DEVICE_DATA(BOARD_505_2958) },
/* CNet CNUSB-611G */
{ USB_DEVICE(0x1371, 0x0013), USB_DEVICE_DATA(BOARD_505_2958) },
/* Linksys WUSB11 v2.8 */
{ USB_DEVICE(0x1915, 0x2233), USB_DEVICE_DATA(BOARD_505_2958) },
/* Xterasys XN-2122B, IBlitzz BWU613B/BWU613SB */
{ USB_DEVICE(0x12fd, 0x1001), USB_DEVICE_DATA(BOARD_505_2958) },
/* Corega USB WLAN Stick 11 */
{ USB_DEVICE(0x07aa, 0x7613), USB_DEVICE_DATA(BOARD_505_2958) },
/* Microstar MSI Box MS6978 */
{ USB_DEVICE(0x0db0, 0x1020), USB_DEVICE_DATA(BOARD_505_2958) },
/*
* at76c505a-rfmd2958
*/
/* Generic AT76C505A device */
{ USB_DEVICE(0x03eb, 0x7614), USB_DEVICE_DATA(BOARD_505A) },
/* Generic AT76C505AS device */
{ USB_DEVICE(0x03eb, 0x7617), USB_DEVICE_DATA(BOARD_505A) },
/* Siemens Gigaset USB WLAN Adapter 11 */
{ USB_DEVICE(0x1690, 0x0701), USB_DEVICE_DATA(BOARD_505A) },
/* OQO Model 01+ Internal Wi-Fi */
{ USB_DEVICE(0x1557, 0x0002), USB_DEVICE_DATA(BOARD_505A) },
/*
* at76c505amx-rfmd
*/
/* Generic AT76C505AMX device */
{ USB_DEVICE(0x03eb, 0x7615), USB_DEVICE_DATA(BOARD_505AMX) },
{ }
};
MODULE_DEVICE_TABLE(usb, dev_table);
/* Supported rates of this hardware, bit 7 marks basic rates */
static const u8 hw_rates[] = { 0x82, 0x84, 0x0b, 0x16 };
static const char *const preambles[] = { "long", "short", "auto" };
/* Firmware download */
/* DFU states */
#define STATE_IDLE 0x00
#define STATE_DETACH 0x01
#define STATE_DFU_IDLE 0x02
#define STATE_DFU_DOWNLOAD_SYNC 0x03
#define STATE_DFU_DOWNLOAD_BUSY 0x04
#define STATE_DFU_DOWNLOAD_IDLE 0x05
#define STATE_DFU_MANIFEST_SYNC 0x06
#define STATE_DFU_MANIFEST 0x07
#define STATE_DFU_MANIFEST_WAIT_RESET 0x08
#define STATE_DFU_UPLOAD_IDLE 0x09
#define STATE_DFU_ERROR 0x0a
/* DFU commands */
#define DFU_DETACH 0
#define DFU_DNLOAD 1
#define DFU_UPLOAD 2
#define DFU_GETSTATUS 3
#define DFU_CLRSTATUS 4
#define DFU_GETSTATE 5
#define DFU_ABORT 6
#define FW_BLOCK_SIZE 1024
struct dfu_status {
unsigned char status;
unsigned char poll_timeout[3];
unsigned char state;
unsigned char string;
} __packed;
static inline int at76_is_intersil(enum board_type board)
{
return (board == BOARD_503_ISL3861 || board == BOARD_503_ISL3863);
}
static inline int at76_is_503rfmd(enum board_type board)
{
return (board == BOARD_503 || board == BOARD_503_ACC);
}
static inline int at76_is_505a(enum board_type board)
{
return (board == BOARD_505A || board == BOARD_505AMX);
}
/* Load a block of the first (internal) part of the firmware */
static int at76_load_int_fw_block(struct usb_device *udev, int blockno,
void *block, int size)
{
return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), DFU_DNLOAD,
USB_TYPE_CLASS | USB_DIR_OUT |
USB_RECIP_INTERFACE, blockno, 0, block, size,
USB_CTRL_GET_TIMEOUT);
}
static int at76_dfu_get_status(struct usb_device *udev,
struct dfu_status *status)
{
int ret;
ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), DFU_GETSTATUS,
USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE,
0, 0, status, sizeof(struct dfu_status),
USB_CTRL_GET_TIMEOUT);
return ret;
}
static int at76_dfu_get_state(struct usb_device *udev, u8 *state)
{
int ret;
ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), DFU_GETSTATE,
USB_TYPE_CLASS | USB_DIR_IN | USB_RECIP_INTERFACE,
0, 0, state, 1, USB_CTRL_GET_TIMEOUT);
return ret;
}
/* Convert timeout from the DFU status to jiffies */
static inline unsigned long at76_get_timeout(struct dfu_status *s)
{
return msecs_to_jiffies((s->poll_timeout[2] << 16)
| (s->poll_timeout[1] << 8)
| (s->poll_timeout[0]));
}
/* Load internal firmware from the buffer. If manifest_sync_timeout > 0, use
* its value in jiffies in the MANIFEST_SYNC state. */
static int at76_usbdfu_download(struct usb_device *udev, u8 *buf, u32 size,
int manifest_sync_timeout)
{
int ret = 0;
int need_dfu_state = 1;
int is_done = 0;
u32 dfu_timeout = 0;
int bsize = 0;
int blockno = 0;
struct dfu_status *dfu_stat_buf = NULL;
u8 *dfu_state = NULL;
u8 *block = NULL;
at76_dbg(DBG_DFU, "%s( %p, %u, %d)", __func__, buf, size,
manifest_sync_timeout);
if (!size) {
dev_err(&udev->dev, "FW buffer length invalid!\n");
return -EINVAL;
}
dfu_stat_buf = kmalloc(sizeof(struct dfu_status), GFP_KERNEL);
if (!dfu_stat_buf) {
ret = -ENOMEM;
goto exit;
}
block = kmalloc(FW_BLOCK_SIZE, GFP_KERNEL);
if (!block) {
ret = -ENOMEM;
goto exit;
}
dfu_state = kmalloc(sizeof(u8), GFP_KERNEL);
if (!dfu_state) {
ret = -ENOMEM;
goto exit;
}
*dfu_state = 0;
do {
if (need_dfu_state) {
ret = at76_dfu_get_state(udev, dfu_state);
if (ret < 0) {
dev_err(&udev->dev,
"cannot get DFU state: %d\n", ret);
goto exit;
}
need_dfu_state = 0;
}
switch (*dfu_state) {
case STATE_DFU_DOWNLOAD_SYNC:
at76_dbg(DBG_DFU, "STATE_DFU_DOWNLOAD_SYNC");
ret = at76_dfu_get_status(udev, dfu_stat_buf);
if (ret >= 0) {
*dfu_state = dfu_stat_buf->state;
dfu_timeout = at76_get_timeout(dfu_stat_buf);
need_dfu_state = 0;
} else
dev_err(&udev->dev,
"at76_dfu_get_status returned %d\n",
ret);
break;
case STATE_DFU_DOWNLOAD_BUSY:
at76_dbg(DBG_DFU, "STATE_DFU_DOWNLOAD_BUSY");
need_dfu_state = 1;
at76_dbg(DBG_DFU, "DFU: Resetting device");
schedule_timeout_interruptible(dfu_timeout);
break;
case STATE_DFU_DOWNLOAD_IDLE:
at76_dbg(DBG_DFU, "DOWNLOAD...");
fallthrough;
case STATE_DFU_IDLE:
at76_dbg(DBG_DFU, "DFU IDLE");
bsize = min_t(int, size, FW_BLOCK_SIZE);
memcpy(block, buf, bsize);
at76_dbg(DBG_DFU, "int fw, size left = %5d, "
"bsize = %4d, blockno = %2d", size, bsize,
blockno);
ret =
at76_load_int_fw_block(udev, blockno, block, bsize);
buf += bsize;
size -= bsize;
blockno++;
if (ret != bsize)
dev_err(&udev->dev,
"at76_load_int_fw_block returned %d\n",
ret);
need_dfu_state = 1;
break;
case STATE_DFU_MANIFEST_SYNC:
at76_dbg(DBG_DFU, "STATE_DFU_MANIFEST_SYNC");
ret = at76_dfu_get_status(udev, dfu_stat_buf);
if (ret < 0)
break;
*dfu_state = dfu_stat_buf->state;
dfu_timeout = at76_get_timeout(dfu_stat_buf);
need_dfu_state = 0;
/* override the timeout from the status response,
needed for AT76C505A */
if (manifest_sync_timeout > 0)
dfu_timeout = manifest_sync_timeout;
at76_dbg(DBG_DFU, "DFU: Waiting for manifest phase");
schedule_timeout_interruptible(dfu_timeout);
break;
case STATE_DFU_MANIFEST:
at76_dbg(DBG_DFU, "STATE_DFU_MANIFEST");
is_done = 1;
break;
case STATE_DFU_MANIFEST_WAIT_RESET:
at76_dbg(DBG_DFU, "STATE_DFU_MANIFEST_WAIT_RESET");
is_done = 1;
break;
case STATE_DFU_UPLOAD_IDLE:
at76_dbg(DBG_DFU, "STATE_DFU_UPLOAD_IDLE");
break;
case STATE_DFU_ERROR:
at76_dbg(DBG_DFU, "STATE_DFU_ERROR");
ret = -EPIPE;
break;
default:
at76_dbg(DBG_DFU, "DFU UNKNOWN STATE (%d)", *dfu_state);
ret = -EINVAL;
break;
}
} while (!is_done && (ret >= 0));
exit:
kfree(dfu_state);
kfree(block);
kfree(dfu_stat_buf);
if (ret >= 0)
ret = 0;
return ret;
}
/* LED trigger */
static int tx_activity;
static void at76_ledtrig_tx_timerfunc(struct timer_list *unused);
static DEFINE_TIMER(ledtrig_tx_timer, at76_ledtrig_tx_timerfunc);
DEFINE_LED_TRIGGER(ledtrig_tx);
static void at76_ledtrig_tx_timerfunc(struct timer_list *unused)
{
static int tx_lastactivity;
if (tx_lastactivity != tx_activity) {
tx_lastactivity = tx_activity;
led_trigger_event(ledtrig_tx, LED_FULL);
mod_timer(&ledtrig_tx_timer, jiffies + HZ / 4);
} else
led_trigger_event(ledtrig_tx, LED_OFF);
}
static void at76_ledtrig_tx_activity(void)
{
tx_activity++;
if (!timer_pending(&ledtrig_tx_timer))
mod_timer(&ledtrig_tx_timer, jiffies + HZ / 4);
}
static int at76_remap(struct usb_device *udev)
{
int ret;
ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x0a,
USB_TYPE_VENDOR | USB_DIR_OUT |
USB_RECIP_INTERFACE, 0, 0, NULL, 0,
USB_CTRL_GET_TIMEOUT);
if (ret < 0)
return ret;
return 0;
}
static int at76_get_op_mode(struct usb_device *udev)
{
int ret;
u8 saved;
u8 *op_mode;
op_mode = kmalloc(1, GFP_NOIO);
if (!op_mode)
return -ENOMEM;
ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 0x33,
USB_TYPE_VENDOR | USB_DIR_IN |
USB_RECIP_INTERFACE, 0x01, 0, op_mode, 1,
USB_CTRL_GET_TIMEOUT);
saved = *op_mode;
kfree(op_mode);
if (ret < 0)
return ret;
else if (ret < 1)
return -EIO;
else
return saved;
}
/* Load a block of the second ("external") part of the firmware */
static inline int at76_load_ext_fw_block(struct usb_device *udev, int blockno,
void *block, int size)
{
return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x0e,
USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE,
0x0802, blockno, block, size,
USB_CTRL_GET_TIMEOUT);
}
static inline int at76_get_hw_cfg(struct usb_device *udev,
union at76_hwcfg *buf, int buf_size)
{
return usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 0x33,
USB_TYPE_VENDOR | USB_DIR_IN |
USB_RECIP_INTERFACE, 0x0a02, 0,
buf, buf_size, USB_CTRL_GET_TIMEOUT);
}
/* Intersil boards use a different "value" for GetHWConfig requests */
static inline int at76_get_hw_cfg_intersil(struct usb_device *udev,
union at76_hwcfg *buf, int buf_size)
{
return usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 0x33,
USB_TYPE_VENDOR | USB_DIR_IN |
USB_RECIP_INTERFACE, 0x0902, 0,
buf, buf_size, USB_CTRL_GET_TIMEOUT);
}
/* Get the hardware configuration for the adapter and put it to the appropriate
* fields of 'priv' (the GetHWConfig request and interpretation of the result
* depends on the board type) */
static int at76_get_hw_config(struct at76_priv *priv)
{
int ret;
union at76_hwcfg *hwcfg = kmalloc(sizeof(*hwcfg), GFP_KERNEL);
if (!hwcfg)
return -ENOMEM;
if (at76_is_intersil(priv->board_type)) {
ret = at76_get_hw_cfg_intersil(priv->udev, hwcfg,
sizeof(hwcfg->i));
if (ret < 0)
goto exit;
memcpy(priv->mac_addr, hwcfg->i.mac_addr, ETH_ALEN);
priv->regulatory_domain = hwcfg->i.regulatory_domain;
} else if (at76_is_503rfmd(priv->board_type)) {
ret = at76_get_hw_cfg(priv->udev, hwcfg, sizeof(hwcfg->r3));
if (ret < 0)
goto exit;
memcpy(priv->mac_addr, hwcfg->r3.mac_addr, ETH_ALEN);
priv->regulatory_domain = hwcfg->r3.regulatory_domain;
} else {
ret = at76_get_hw_cfg(priv->udev, hwcfg, sizeof(hwcfg->r5));
if (ret < 0)
goto exit;
memcpy(priv->mac_addr, hwcfg->r5.mac_addr, ETH_ALEN);
priv->regulatory_domain = hwcfg->r5.regulatory_domain;
}
exit:
kfree(hwcfg);
if (ret < 0)
wiphy_err(priv->hw->wiphy, "cannot get HW Config (error %d)\n",
ret);
return ret;
}
static struct reg_domain const *at76_get_reg_domain(u16 code)
{
int i;
static struct reg_domain const fd_tab[] = {
{ 0x10, "FCC (USA)", 0x7ff }, /* ch 1-11 */
{ 0x20, "IC (Canada)", 0x7ff }, /* ch 1-11 */
{ 0x30, "ETSI (most of Europe)", 0x1fff }, /* ch 1-13 */
{ 0x31, "Spain", 0x600 }, /* ch 10-11 */
{ 0x32, "France", 0x1e00 }, /* ch 10-13 */
{ 0x40, "MKK (Japan)", 0x2000 }, /* ch 14 */
{ 0x41, "MKK1 (Japan)", 0x3fff }, /* ch 1-14 */
{ 0x50, "Israel", 0x3fc }, /* ch 3-9 */
{ 0x00, "<unknown>", 0xffffffff } /* ch 1-32 */
};
/* Last entry is fallback for unknown domain code */
for (i = 0; i < ARRAY_SIZE(fd_tab) - 1; i++)
if (code == fd_tab[i].code)
break;
return &fd_tab[i];
}
static inline int at76_get_mib(struct usb_device *udev, u16 mib, void *buf,
int buf_size)
{
int ret;
ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 0x33,
USB_TYPE_VENDOR | USB_DIR_IN |
USB_RECIP_INTERFACE, mib << 8, 0, buf, buf_size,
USB_CTRL_GET_TIMEOUT);
if (ret >= 0 && ret != buf_size)
return -EIO;
return ret;
}
/* Return positive number for status, negative for an error */
static inline int at76_get_cmd_status(struct usb_device *udev, u8 cmd)
{
u8 *stat_buf;
int ret;
stat_buf = kmalloc(40, GFP_NOIO);
if (!stat_buf)
return -ENOMEM;
ret = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), 0x22,
USB_TYPE_VENDOR | USB_DIR_IN |
USB_RECIP_INTERFACE, cmd, 0, stat_buf,
40, USB_CTRL_GET_TIMEOUT);
if (ret >= 0)
ret = stat_buf[5];
kfree(stat_buf);
return ret;
}
#define MAKE_CMD_CASE(c) case (c): return #c
static const char *at76_get_cmd_string(u8 cmd_status)
{
switch (cmd_status) {
MAKE_CMD_CASE(CMD_SET_MIB);
MAKE_CMD_CASE(CMD_GET_MIB);
MAKE_CMD_CASE(CMD_SCAN);
MAKE_CMD_CASE(CMD_JOIN);
MAKE_CMD_CASE(CMD_START_IBSS);
MAKE_CMD_CASE(CMD_RADIO_ON);
MAKE_CMD_CASE(CMD_RADIO_OFF);
MAKE_CMD_CASE(CMD_STARTUP);
}
return "UNKNOWN";
}
static int at76_set_card_command(struct usb_device *udev, u8 cmd, void *buf,
int buf_size)
{
int ret;
struct at76_command *cmd_buf = kmalloc(sizeof(struct at76_command) +
buf_size, GFP_KERNEL);
if (!cmd_buf)
return -ENOMEM;
cmd_buf->cmd = cmd;
cmd_buf->reserved = 0;
cmd_buf->size = cpu_to_le16(buf_size);
memcpy(cmd_buf->data, buf, buf_size);
at76_dbg_dump(DBG_CMD, cmd_buf, sizeof(struct at76_command) + buf_size,
"issuing command %s (0x%02x)",
at76_get_cmd_string(cmd), cmd);
ret = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x0e,
USB_TYPE_VENDOR | USB_DIR_OUT | USB_RECIP_DEVICE,
0, 0, cmd_buf,
sizeof(struct at76_command) + buf_size,
USB_CTRL_GET_TIMEOUT);
kfree(cmd_buf);
return ret;
}
#define MAKE_CMD_STATUS_CASE(c) case (c): return #c
static const char *at76_get_cmd_status_string(u8 cmd_status)
{
switch (cmd_status) {
MAKE_CMD_STATUS_CASE(CMD_STATUS_IDLE);
MAKE_CMD_STATUS_CASE(CMD_STATUS_COMPLETE);
MAKE_CMD_STATUS_CASE(CMD_STATUS_UNKNOWN);
MAKE_CMD_STATUS_CASE(CMD_STATUS_INVALID_PARAMETER);
MAKE_CMD_STATUS_CASE(CMD_STATUS_FUNCTION_NOT_SUPPORTED);
MAKE_CMD_STATUS_CASE(CMD_STATUS_TIME_OUT);
MAKE_CMD_STATUS_CASE(CMD_STATUS_IN_PROGRESS);
MAKE_CMD_STATUS_CASE(CMD_STATUS_HOST_FAILURE);
MAKE_CMD_STATUS_CASE(CMD_STATUS_SCAN_FAILED);
}
return "UNKNOWN";
}
/* Wait until the command is completed */
static int at76_wait_completion(struct at76_priv *priv, int cmd)
{
int status = 0;
unsigned long timeout = jiffies + CMD_COMPLETION_TIMEOUT;
do {
status = at76_get_cmd_status(priv->udev, cmd);
if (status < 0) {
wiphy_err(priv->hw->wiphy,
"at76_get_cmd_status failed: %d\n",
status);
break;
}
at76_dbg(DBG_WAIT_COMPLETE,
"%s: Waiting on cmd %d, status = %d (%s)",
wiphy_name(priv->hw->wiphy), cmd, status,
at76_get_cmd_status_string(status));
if (status != CMD_STATUS_IN_PROGRESS
&& status != CMD_STATUS_IDLE)
break;
schedule_timeout_interruptible(HZ / 10); /* 100 ms */
if (time_after(jiffies, timeout)) {
wiphy_err(priv->hw->wiphy,
"completion timeout for command %d\n", cmd);
status = -ETIMEDOUT;
break;
}
} while (1);
return status;
}
static int at76_set_mib(struct at76_priv *priv, struct set_mib_buffer *buf)
{
int ret;
ret = at76_set_card_command(priv->udev, CMD_SET_MIB, buf,
offsetof(struct set_mib_buffer,
data) + buf->size);
if (ret < 0)
return ret;
ret = at76_wait_completion(priv, CMD_SET_MIB);
if (ret != CMD_STATUS_COMPLETE) {
wiphy_info(priv->hw->wiphy,
"set_mib: at76_wait_completion failed with %d\n",
ret);
ret = -EIO;
}
return ret;
}
/* Return < 0 on error, == 0 if no command sent, == 1 if cmd sent */
static int at76_set_radio(struct at76_priv *priv, int enable)
{
int ret;
int cmd;
if (priv->radio_on == enable)
return 0;
cmd = enable ? CMD_RADIO_ON : CMD_RADIO_OFF;
ret = at76_set_card_command(priv->udev, cmd, NULL, 0);
if (ret < 0)
wiphy_err(priv->hw->wiphy,
"at76_set_card_command(%d) failed: %d\n", cmd, ret);
else
ret = 1;
priv->radio_on = enable;
return ret;
}
/* Set current power save mode (AT76_PM_OFF/AT76_PM_ON/AT76_PM_SMART) */
static int at76_set_pm_mode(struct at76_priv *priv)
{
int ret = 0;
priv->mib_buf.type = MIB_MAC_MGMT;
priv->mib_buf.size = 1;
priv->mib_buf.index = offsetof(struct mib_mac_mgmt, power_mgmt_mode);
priv->mib_buf.data.byte = priv->pm_mode;
ret = at76_set_mib(priv, &priv->mib_buf);
if (ret < 0)
wiphy_err(priv->hw->wiphy, "set_mib (pm_mode) failed: %d\n",
ret);
return ret;
}
static int at76_set_preamble(struct at76_priv *priv, u8 type)
{
int ret = 0;
priv->mib_buf.type = MIB_LOCAL;
priv->mib_buf.size = 1;
priv->mib_buf.index = offsetof(struct mib_local, preamble_type);
priv->mib_buf.data.byte = type;
ret = at76_set_mib(priv, &priv->mib_buf);
if (ret < 0)
wiphy_err(priv->hw->wiphy, "set_mib (preamble) failed: %d\n",
ret);
return ret;
}
static int at76_set_frag(struct at76_priv *priv, u16 size)
{
int ret = 0;
priv->mib_buf.type = MIB_MAC;
priv->mib_buf.size = 2;
priv->mib_buf.index = offsetof(struct mib_mac, frag_threshold);
priv->mib_buf.data.word = cpu_to_le16(size);
ret = at76_set_mib(priv, &priv->mib_buf);
if (ret < 0)
wiphy_err(priv->hw->wiphy,
"set_mib (frag threshold) failed: %d\n", ret);
return ret;
}
static int at76_set_rts(struct at76_priv *priv, u16 size)
{
int ret = 0;
priv->mib_buf.type = MIB_MAC;
priv->mib_buf.size = 2;
priv->mib_buf.index = offsetof(struct mib_mac, rts_threshold);
priv->mib_buf.data.word = cpu_to_le16(size);
ret = at76_set_mib(priv, &priv->mib_buf);
if (ret < 0)
wiphy_err(priv->hw->wiphy, "set_mib (rts) failed: %d\n", ret);
return ret;
}
static int at76_set_autorate_fallback(struct at76_priv *priv, int onoff)
{
int ret = 0;
priv->mib_buf.type = MIB_LOCAL;
priv->mib_buf.size = 1;
priv->mib_buf.index = offsetof(struct mib_local, txautorate_fallback);
priv->mib_buf.data.byte = onoff;
ret = at76_set_mib(priv, &priv->mib_buf);
if (ret < 0)
wiphy_err(priv->hw->wiphy,
"set_mib (autorate fallback) failed: %d\n", ret);
return ret;
}
static void at76_dump_mib_mac_addr(struct at76_priv *priv)
{
int i;
int ret;
struct mib_mac_addr *m = kmalloc(sizeof(struct mib_mac_addr),
GFP_KERNEL);
if (!m)
return;
ret = at76_get_mib(priv->udev, MIB_MAC_ADDR, m,
sizeof(struct mib_mac_addr));
if (ret < 0) {
wiphy_err(priv->hw->wiphy,
"at76_get_mib (MAC_ADDR) failed: %d\n", ret);
goto exit;
}
at76_dbg(DBG_MIB, "%s: MIB MAC_ADDR: mac_addr %pM res 0x%x 0x%x",
wiphy_name(priv->hw->wiphy),
m->mac_addr, m->res[0], m->res[1]);
for (i = 0; i < ARRAY_SIZE(m->group_addr); i++)
at76_dbg(DBG_MIB, "%s: MIB MAC_ADDR: group addr %d: %pM, "
"status %d", wiphy_name(priv->hw->wiphy), i,
m->group_addr[i], m->group_addr_status[i]);
exit:
kfree(m);
}
static void at76_dump_mib_mac_wep(struct at76_priv *priv)
{
int i;
int ret;
int key_len;
struct mib_mac_wep *m = kmalloc(sizeof(struct mib_mac_wep), GFP_KERNEL);
if (!m)
return;
ret = at76_get_mib(priv->udev, MIB_MAC_WEP, m,
sizeof(struct mib_mac_wep));
if (ret < 0) {
wiphy_err(priv->hw->wiphy,
"at76_get_mib (MAC_WEP) failed: %d\n", ret);
goto exit;
}
at76_dbg(DBG_MIB, "%s: MIB MAC_WEP: priv_invoked %u def_key_id %u "
"key_len %u excl_unencr %u wep_icv_err %u wep_excluded %u "
"encr_level %u key %d", wiphy_name(priv->hw->wiphy),
m->privacy_invoked, m->wep_default_key_id,
m->wep_key_mapping_len, m->exclude_unencrypted,
le32_to_cpu(m->wep_icv_error_count),
le32_to_cpu(m->wep_excluded_count), m->encryption_level,
m->wep_default_key_id);
key_len = (m->encryption_level == 1) ?
WEP_SMALL_KEY_LEN : WEP_LARGE_KEY_LEN;
for (i = 0; i < WEP_KEYS; i++)
at76_dbg(DBG_MIB, "%s: MIB MAC_WEP: key %d: %*phD",
wiphy_name(priv->hw->wiphy), i,
key_len, m->wep_default_keyvalue[i]);
exit:
kfree(m);
}
static void at76_dump_mib_mac_mgmt(struct at76_priv *priv)
{
int ret;
struct mib_mac_mgmt *m = kmalloc(sizeof(struct mib_mac_mgmt),
GFP_KERNEL);
if (!m)
return;
ret = at76_get_mib(priv->udev, MIB_MAC_MGMT, m,
sizeof(struct mib_mac_mgmt));
if (ret < 0) {
wiphy_err(priv->hw->wiphy,
"at76_get_mib (MAC_MGMT) failed: %d\n", ret);
goto exit;
}
at76_dbg(DBG_MIB, "%s: MIB MAC_MGMT: beacon_period %d CFP_max_duration "
"%d medium_occupancy_limit %d station_id 0x%x ATIM_window %d "
"CFP_mode %d privacy_opt_impl %d DTIM_period %d CFP_period %d "
"current_bssid %pM current_essid %*phD current_bss_type %d "
"pm_mode %d ibss_change %d res %d "
"multi_domain_capability_implemented %d "
"international_roaming %d country_string %.3s",
wiphy_name(priv->hw->wiphy), le16_to_cpu(m->beacon_period),
le16_to_cpu(m->CFP_max_duration),
le16_to_cpu(m->medium_occupancy_limit),
le16_to_cpu(m->station_id), le16_to_cpu(m->ATIM_window),
m->CFP_mode, m->privacy_option_implemented, m->DTIM_period,
m->CFP_period, m->current_bssid,
IW_ESSID_MAX_SIZE, m->current_essid,
m->current_bss_type, m->power_mgmt_mode, m->ibss_change,
m->res, m->multi_domain_capability_implemented,
m->multi_domain_capability_enabled, m->country_string);
exit:
kfree(m);
}
static void at76_dump_mib_mac(struct at76_priv *priv)
{
int ret;
struct mib_mac *m = kmalloc(sizeof(struct mib_mac), GFP_KERNEL);
if (!m)
return;
ret = at76_get_mib(priv->udev, MIB_MAC, m, sizeof(struct mib_mac));
if (ret < 0) {
wiphy_err(priv->hw->wiphy,
"at76_get_mib (MAC) failed: %d\n", ret);
goto exit;
}
at76_dbg(DBG_MIB, "%s: MIB MAC: max_tx_msdu_lifetime %d "
"max_rx_lifetime %d frag_threshold %d rts_threshold %d "
"cwmin %d cwmax %d short_retry_time %d long_retry_time %d "
"scan_type %d scan_channel %d probe_delay %u "
"min_channel_time %d max_channel_time %d listen_int %d "
"desired_ssid %*phD desired_bssid %pM desired_bsstype %d",
wiphy_name(priv->hw->wiphy),
le32_to_cpu(m->max_tx_msdu_lifetime),
le32_to_cpu(m->max_rx_lifetime),
le16_to_cpu(m->frag_threshold), le16_to_cpu(m->rts_threshold),
le16_to_cpu(m->cwmin), le16_to_cpu(m->cwmax),
m->short_retry_time, m->long_retry_time, m->scan_type,
m->scan_channel, le16_to_cpu(m->probe_delay),
le16_to_cpu(m->min_channel_time),
le16_to_cpu(m->max_channel_time),
le16_to_cpu(m->listen_interval),
IW_ESSID_MAX_SIZE, m->desired_ssid,
m->desired_bssid, m->desired_bsstype);
exit:
kfree(m);
}
static void at76_dump_mib_phy(struct at76_priv *priv)
{
int ret;
struct mib_phy *m = kmalloc(sizeof(struct mib_phy), GFP_KERNEL);
if (!m)
return;
ret = at76_get_mib(priv->udev, MIB_PHY, m, sizeof(struct mib_phy));
if (ret < 0) {
wiphy_err(priv->hw->wiphy,
"at76_get_mib (PHY) failed: %d\n", ret);
goto exit;
}
at76_dbg(DBG_MIB, "%s: MIB PHY: ed_threshold %d slot_time %d "
"sifs_time %d preamble_length %d plcp_header_length %d "
"mpdu_max_length %d cca_mode_supported %d operation_rate_set "
"0x%x 0x%x 0x%x 0x%x channel_id %d current_cca_mode %d "
"phy_type %d current_reg_domain %d",
wiphy_name(priv->hw->wiphy), le32_to_cpu(m->ed_threshold),
le16_to_cpu(m->slot_time), le16_to_cpu(m->sifs_time),
le16_to_cpu(m->preamble_length),
le16_to_cpu(m->plcp_header_length),
le16_to_cpu(m->mpdu_max_length),
le16_to_cpu(m->cca_mode_supported), m->operation_rate_set[0],
m->operation_rate_set[1], m->operation_rate_set[2],
m->operation_rate_set[3], m->channel_id, m->current_cca_mode,
m->phy_type, m->current_reg_domain);
exit:
kfree(m);
}
static void at76_dump_mib_local(struct at76_priv *priv)
{
int ret;
struct mib_local *m = kmalloc(sizeof(*m), GFP_KERNEL);
if (!m)
return;
ret = at76_get_mib(priv->udev, MIB_LOCAL, m, sizeof(*m));
if (ret < 0) {
wiphy_err(priv->hw->wiphy,
"at76_get_mib (LOCAL) failed: %d\n", ret);
goto exit;
}
at76_dbg(DBG_MIB, "%s: MIB LOCAL: beacon_enable %d "
"txautorate_fallback %d ssid_size %d promiscuous_mode %d "
"preamble_type %d", wiphy_name(priv->hw->wiphy),
m->beacon_enable,
m->txautorate_fallback, m->ssid_size, m->promiscuous_mode,
m->preamble_type);
exit:
kfree(m);
}
static void at76_dump_mib_mdomain(struct at76_priv *priv)
{
int ret;
struct mib_mdomain *m = kmalloc(sizeof(struct mib_mdomain), GFP_KERNEL);
if (!m)
return;
ret = at76_get_mib(priv->udev, MIB_MDOMAIN, m,
sizeof(struct mib_mdomain));
if (ret < 0) {
wiphy_err(priv->hw->wiphy,
"at76_get_mib (MDOMAIN) failed: %d\n", ret);
goto exit;
}
at76_dbg(DBG_MIB, "%s: MIB MDOMAIN: channel_list %*phD",
wiphy_name(priv->hw->wiphy),
(int)sizeof(m->channel_list), m->channel_list);
at76_dbg(DBG_MIB, "%s: MIB MDOMAIN: tx_powerlevel %*phD",
wiphy_name(priv->hw->wiphy),
(int)sizeof(m->tx_powerlevel), m->tx_powerlevel);
exit:
kfree(m);
}
/* Enable monitor mode */
static int at76_start_monitor(struct at76_priv *priv)
{
struct at76_req_scan scan;
int ret;
memset(&scan, 0, sizeof(struct at76_req_scan));
eth_broadcast_addr(scan.bssid);
scan.channel = priv->channel;
scan.scan_type = SCAN_TYPE_PASSIVE;
scan.international_scan = 0;
scan.min_channel_time = cpu_to_le16(priv->scan_min_time);
scan.max_channel_time = cpu_to_le16(priv->scan_max_time);
scan.probe_delay = cpu_to_le16(0);
ret = at76_set_card_command(priv->udev, CMD_SCAN, &scan, sizeof(scan));
if (ret >= 0)
ret = at76_get_cmd_status(priv->udev, CMD_SCAN);
return ret;
}
/* Calculate padding from txbuf->wlength (which excludes the USB TX header),
likely to compensate a flaw in the AT76C503A USB part ... */
static inline int at76_calc_padding(int wlen)
{
/* add the USB TX header */
wlen += AT76_TX_HDRLEN;
wlen = wlen % 64;
if (wlen < 50)
return 50 - wlen;
if (wlen >= 61)
return 64 + 50 - wlen;
return 0;
}
static void at76_rx_callback(struct urb *urb)
{
struct at76_priv *priv = urb->context;
tasklet_schedule(&priv->rx_tasklet);
}
static int at76_submit_rx_urb(struct at76_priv *priv)
{
int ret;
int size;
struct sk_buff *skb = priv->rx_skb;
if (!priv->rx_urb) {
wiphy_err(priv->hw->wiphy, "%s: priv->rx_urb is NULL\n",
__func__);
return -EFAULT;
}
if (!skb) {
skb = dev_alloc_skb(sizeof(struct at76_rx_buffer));
if (!skb) {
wiphy_err(priv->hw->wiphy,
"cannot allocate rx skbuff\n");
ret = -ENOMEM;
goto exit;
}
priv->rx_skb = skb;
} else {
skb_push(skb, skb_headroom(skb));
skb_trim(skb, 0);
}
size = skb_tailroom(skb);
usb_fill_bulk_urb(priv->rx_urb, priv->udev, priv->rx_pipe,
skb_put(skb, size), size, at76_rx_callback, priv);
ret = usb_submit_urb(priv->rx_urb, GFP_ATOMIC);
if (ret < 0) {
if (ret == -ENODEV)
at76_dbg(DBG_DEVSTART,
"usb_submit_urb returned -ENODEV");
else
wiphy_err(priv->hw->wiphy,
"rx, usb_submit_urb failed: %d\n", ret);
}
exit:
if (ret < 0 && ret != -ENODEV)
wiphy_err(priv->hw->wiphy,
"cannot submit rx urb - please unload the driver and/or power cycle the device\n");
return ret;
}
/* Download external firmware */
static int at76_load_external_fw(struct usb_device *udev, struct fwentry *fwe)
{
int ret;
int op_mode;
int blockno = 0;
int bsize;
u8 *block;
u8 *buf = fwe->extfw;
int size = fwe->extfw_size;
if (!buf || !size)
return -ENOENT;
op_mode = at76_get_op_mode(udev);
at76_dbg(DBG_DEVSTART, "opmode %d", op_mode);
if (op_mode != OPMODE_NORMAL_NIC_WITHOUT_FLASH) {
dev_err(&udev->dev, "unexpected opmode %d\n", op_mode);
return -EINVAL;
}
block = kmalloc(FW_BLOCK_SIZE, GFP_KERNEL);
if (!block)
return -ENOMEM;
at76_dbg(DBG_DEVSTART, "downloading external firmware");
/* for fw >= 0.100, the device needs an extra empty block */
do {
bsize = min_t(int, size, FW_BLOCK_SIZE);
memcpy(block, buf, bsize);
at76_dbg(DBG_DEVSTART,
"ext fw, size left = %5d, bsize = %4d, blockno = %2d",
size, bsize, blockno);
ret = at76_load_ext_fw_block(udev, blockno, block, bsize);
if (ret != bsize) {
dev_err(&udev->dev,
"loading %dth firmware block failed: %d\n",
blockno, ret);
ret = -EIO;
goto exit;
}
buf += bsize;
size -= bsize;
blockno++;
} while (bsize > 0);
if (at76_is_505a(fwe->board_type)) {
at76_dbg(DBG_DEVSTART, "200 ms delay for 505a");
schedule_timeout_interruptible(HZ / 5 + 1);
}
exit:
kfree(block);
if (ret < 0)
dev_err(&udev->dev,
"downloading external firmware failed: %d\n", ret);
return ret;
}
/* Download internal firmware */
static int at76_load_internal_fw(struct usb_device *udev, struct fwentry *fwe)
{
int ret;
int need_remap = !at76_is_505a(fwe->board_type);
ret = at76_usbdfu_download(udev, fwe->intfw, fwe->intfw_size,
need_remap ? 0 : 2 * HZ);
if (ret < 0) {
dev_err(&udev->dev,
"downloading internal fw failed with %d\n", ret);
goto exit;
}
at76_dbg(DBG_DEVSTART, "sending REMAP");
/* no REMAP for 505A (see SF driver) */
if (need_remap) {
ret = at76_remap(udev);
if (ret < 0) {
dev_err(&udev->dev,
"sending REMAP failed with %d\n", ret);
goto exit;
}
}
at76_dbg(DBG_DEVSTART, "sleeping for 2 seconds");
schedule_timeout_interruptible(2 * HZ + 1);
usb_reset_device(udev);
exit:
return ret;
}
static int at76_startup_device(struct at76_priv *priv)
{
struct at76_card_config *ccfg = &priv->card_config;
int ret;
at76_dbg(DBG_PARAMS,
"%s param: ssid %.*s (%*phD) mode %s ch %d wep %s key %d "
"keylen %d", wiphy_name(priv->hw->wiphy), priv->essid_size,
priv->essid, IW_ESSID_MAX_SIZE, priv->essid,
priv->iw_mode == IW_MODE_ADHOC ? "adhoc" : "infra",
priv->channel, priv->wep_enabled ? "enabled" : "disabled",
priv->wep_key_id, priv->wep_keys_len[priv->wep_key_id]);
at76_dbg(DBG_PARAMS,
"%s param: preamble %s rts %d retry %d frag %d "
"txrate %s auth_mode %d", wiphy_name(priv->hw->wiphy),
preambles[priv->preamble_type], priv->rts_threshold,
priv->short_retry_limit, priv->frag_threshold,
priv->txrate == TX_RATE_1MBIT ? "1MBit" : priv->txrate ==
TX_RATE_2MBIT ? "2MBit" : priv->txrate ==
TX_RATE_5_5MBIT ? "5.5MBit" : priv->txrate ==
TX_RATE_11MBIT ? "11MBit" : priv->txrate ==
TX_RATE_AUTO ? "auto" : "<invalid>", priv->auth_mode);
at76_dbg(DBG_PARAMS,
"%s param: pm_mode %d pm_period %d auth_mode %s "
"scan_times %d %d scan_mode %s",
wiphy_name(priv->hw->wiphy), priv->pm_mode, priv->pm_period,
priv->auth_mode == WLAN_AUTH_OPEN ? "open" : "shared_secret",
priv->scan_min_time, priv->scan_max_time,
priv->scan_mode == SCAN_TYPE_ACTIVE ? "active" : "passive");
memset(ccfg, 0, sizeof(struct at76_card_config));
ccfg->promiscuous_mode = 0;
ccfg->short_retry_limit = priv->short_retry_limit;
if (priv->wep_enabled) {
if (priv->wep_keys_len[priv->wep_key_id] > WEP_SMALL_KEY_LEN)
ccfg->encryption_type = 2;
else
ccfg->encryption_type = 1;
/* jal: always exclude unencrypted if WEP is active */
ccfg->exclude_unencrypted = 1;
} else {
ccfg->exclude_unencrypted = 0;
ccfg->encryption_type = 0;
}
ccfg->rts_threshold = cpu_to_le16(priv->rts_threshold);
ccfg->fragmentation_threshold = cpu_to_le16(priv->frag_threshold);
memcpy(ccfg->basic_rate_set, hw_rates, 4);
/* jal: really needed, we do a set_mib for autorate later ??? */
ccfg->auto_rate_fallback = (priv->txrate == TX_RATE_AUTO ? 1 : 0);
ccfg->channel = priv->channel;
ccfg->privacy_invoked = priv->wep_enabled;
memcpy(ccfg->current_ssid, priv->essid, IW_ESSID_MAX_SIZE);
ccfg->ssid_len = priv->essid_size;
ccfg->wep_default_key_id = priv->wep_key_id;
memcpy(ccfg->wep_default_key_value, priv->wep_keys,
sizeof(priv->wep_keys));
ccfg->short_preamble = priv->preamble_type;
ccfg->beacon_period = cpu_to_le16(priv->beacon_period);
ret = at76_set_card_command(priv->udev, CMD_STARTUP, &priv->card_config,
sizeof(struct at76_card_config));
if (ret < 0) {
wiphy_err(priv->hw->wiphy, "at76_set_card_command failed: %d\n",
ret);
return ret;
}
at76_wait_completion(priv, CMD_STARTUP);
/* remove BSSID from previous run */
eth_zero_addr(priv->bssid);
priv->scanning = false;
if (at76_set_radio(priv, 1) == 1)
at76_wait_completion(priv, CMD_RADIO_ON);
ret = at76_set_preamble(priv, priv->preamble_type);
if (ret < 0)
return ret;
ret = at76_set_frag(priv, priv->frag_threshold);
if (ret < 0)
return ret;
ret = at76_set_rts(priv, priv->rts_threshold);
if (ret < 0)
return ret;
ret = at76_set_autorate_fallback(priv,
priv->txrate == TX_RATE_AUTO ? 1 : 0);
if (ret < 0)
return ret;
ret = at76_set_pm_mode(priv);
if (ret < 0)
return ret;
if (at76_debug & DBG_MIB) {
at76_dump_mib_mac(priv);
at76_dump_mib_mac_addr(priv);
at76_dump_mib_mac_mgmt(priv);
at76_dump_mib_mac_wep(priv);
at76_dump_mib_mdomain(priv);
at76_dump_mib_phy(priv);
at76_dump_mib_local(priv);
}
return 0;
}
/* Enable or disable promiscuous mode */
static void at76_work_set_promisc(struct work_struct *work)
{
struct at76_priv *priv = container_of(work, struct at76_priv,
work_set_promisc);
int ret = 0;
if (priv->device_unplugged)
return;
mutex_lock(&priv->mtx);
priv->mib_buf.type = MIB_LOCAL;
priv->mib_buf.size = 1;
priv->mib_buf.index = offsetof(struct mib_local, promiscuous_mode);
priv->mib_buf.data.byte = priv->promisc ? 1 : 0;
ret = at76_set_mib(priv, &priv->mib_buf);
if (ret < 0)
wiphy_err(priv->hw->wiphy,
"set_mib (promiscuous_mode) failed: %d\n", ret);
mutex_unlock(&priv->mtx);
}
/* Submit Rx urb back to the device */
static void at76_work_submit_rx(struct work_struct *work)
{
struct at76_priv *priv = container_of(work, struct at76_priv,
work_submit_rx);
mutex_lock(&priv->mtx);
at76_submit_rx_urb(priv);
mutex_unlock(&priv->mtx);
}
/* This is a workaround to make scan working:
* currently mac80211 does not process frames with no frequency
* information.
* However during scan the HW performs a sweep by itself, and we
* are unable to know where the radio is actually tuned.
* This function tries to do its best to guess this information..
* During scan, If the current frame is a beacon or a probe response,
* the channel information is extracted from it.
* When not scanning, for other frames, or if it happens that for
* whatever reason we fail to parse beacons and probe responses, this
* function returns the priv->channel information, that should be correct
* at least when we are not scanning.
*/
static inline int at76_guess_freq(struct at76_priv *priv)
{
size_t el_off;
const u8 *el;
int channel = priv->channel;
int len = priv->rx_skb->len;
struct ieee80211_hdr *hdr = (void *)priv->rx_skb->data;
if (!priv->scanning)
goto exit;
if (len < 24)
goto exit;
if (ieee80211_is_probe_resp(hdr->frame_control)) {
el_off = offsetof(struct ieee80211_mgmt, u.probe_resp.variable);
el = ((struct ieee80211_mgmt *)hdr)->u.probe_resp.variable;
} else if (ieee80211_is_beacon(hdr->frame_control)) {
el_off = offsetof(struct ieee80211_mgmt, u.beacon.variable);
el = ((struct ieee80211_mgmt *)hdr)->u.beacon.variable;
} else {
goto exit;
}
len -= el_off;
el = cfg80211_find_ie(WLAN_EID_DS_PARAMS, el, len);
if (el && el[1] > 0)
channel = el[2];
exit:
return ieee80211_channel_to_frequency(channel, NL80211_BAND_2GHZ);
}
static void at76_rx_tasklet(struct tasklet_struct *t)
{
struct at76_priv *priv = from_tasklet(priv, t, rx_tasklet);
struct urb *urb = priv->rx_urb;
struct at76_rx_buffer *buf;
struct ieee80211_rx_status rx_status = { 0 };
if (priv->device_unplugged) {
at76_dbg(DBG_DEVSTART, "device unplugged");
at76_dbg(DBG_DEVSTART, "urb status %d", urb->status);
return;
}
if (!priv->rx_skb || !priv->rx_skb->data)
return;
buf = (struct at76_rx_buffer *)priv->rx_skb->data;
if (urb->status != 0) {
if (urb->status != -ENOENT && urb->status != -ECONNRESET)
at76_dbg(DBG_URB,
"%s %s: - nonzero Rx bulk status received: %d",
__func__, wiphy_name(priv->hw->wiphy),
urb->status);
return;
}
at76_dbg(DBG_RX_ATMEL_HDR,
"%s: rx frame: rate %d rssi %d noise %d link %d",
wiphy_name(priv->hw->wiphy), buf->rx_rate, buf->rssi,
buf->noise_level, buf->link_quality);
skb_pull(priv->rx_skb, AT76_RX_HDRLEN);
skb_trim(priv->rx_skb, le16_to_cpu(buf->wlength));
at76_dbg_dump(DBG_RX_DATA, priv->rx_skb->data,
priv->rx_skb->len, "RX: len=%d", priv->rx_skb->len);
rx_status.signal = buf->rssi;
rx_status.flag |= RX_FLAG_DECRYPTED;
rx_status.flag |= RX_FLAG_IV_STRIPPED;
rx_status.band = NL80211_BAND_2GHZ;
rx_status.freq = at76_guess_freq(priv);
at76_dbg(DBG_MAC80211, "calling ieee80211_rx_irqsafe(): %d/%d",
priv->rx_skb->len, priv->rx_skb->data_len);
memcpy(IEEE80211_SKB_RXCB(priv->rx_skb), &rx_status, sizeof(rx_status));
ieee80211_rx_irqsafe(priv->hw, priv->rx_skb);
/* Use a new skb for the next receive */
priv->rx_skb = NULL;
at76_submit_rx_urb(priv);
}
/* Load firmware into kernel memory and parse it */
static struct fwentry *at76_load_firmware(struct usb_device *udev,
enum board_type board_type)
{
int ret;
char *str;
struct at76_fw_header *fwh;
struct fwentry *fwe = &firmwares[board_type];
mutex_lock(&fw_mutex);
if (fwe->loaded) {
at76_dbg(DBG_FW, "re-using previously loaded fw");
goto exit;
}
at76_dbg(DBG_FW, "downloading firmware %s", fwe->fwname);
ret = request_firmware(&fwe->fw, fwe->fwname, &udev->dev);
if (ret < 0) {
dev_err(&udev->dev, "firmware %s not found!\n",
fwe->fwname);
dev_err(&udev->dev,
"you may need to download the firmware from http://developer.berlios.de/projects/at76c503a/\n");
goto exit;
}
at76_dbg(DBG_FW, "got it.");
fwh = (struct at76_fw_header *)(fwe->fw->data);
if (fwe->fw->size <= sizeof(*fwh)) {
dev_err(&udev->dev,
"firmware is too short (0x%zx)\n", fwe->fw->size);
goto exit;
}
/* CRC currently not checked */
fwe->board_type = le32_to_cpu(fwh->board_type);
if (fwe->board_type != board_type) {
dev_err(&udev->dev,
"board type mismatch, requested %u, got %u\n",
board_type, fwe->board_type);
goto exit;
}
fwe->fw_version.major = fwh->major;
fwe->fw_version.minor = fwh->minor;
fwe->fw_version.patch = fwh->patch;
fwe->fw_version.build = fwh->build;
str = (char *)fwh + le32_to_cpu(fwh->str_offset);
fwe->intfw = (u8 *)fwh + le32_to_cpu(fwh->int_fw_offset);
fwe->intfw_size = le32_to_cpu(fwh->int_fw_len);
fwe->extfw = (u8 *)fwh + le32_to_cpu(fwh->ext_fw_offset);
fwe->extfw_size = le32_to_cpu(fwh->ext_fw_len);
fwe->loaded = 1;
dev_printk(KERN_DEBUG, &udev->dev,
"using firmware %s (version %d.%d.%d-%d)\n",
fwe->fwname, fwh->major, fwh->minor, fwh->patch, fwh->build);
at76_dbg(DBG_DEVSTART, "board %u, int %d:%d, ext %d:%d", board_type,
le32_to_cpu(fwh->int_fw_offset), le32_to_cpu(fwh->int_fw_len),
le32_to_cpu(fwh->ext_fw_offset), le32_to_cpu(fwh->ext_fw_len));
at76_dbg(DBG_DEVSTART, "firmware id %s", str);
exit:
mutex_unlock(&fw_mutex);
if (fwe->loaded)
return fwe;
else
return NULL;
}
static int at76_join(struct at76_priv *priv)
{
struct at76_req_join join;
int ret;
memset(&join, 0, sizeof(struct at76_req_join));
memcpy(join.essid, priv->essid, priv->essid_size);
join.essid_size = priv->essid_size;
memcpy(join.bssid, priv->bssid, ETH_ALEN);
join.bss_type = INFRASTRUCTURE_MODE;
join.channel = priv->channel;
join.timeout = cpu_to_le16(2000);
at76_dbg(DBG_MAC80211, "%s: sending CMD_JOIN", __func__);
ret = at76_set_card_command(priv->udev, CMD_JOIN, &join,
sizeof(struct at76_req_join));
if (ret < 0) {
wiphy_err(priv->hw->wiphy, "at76_set_card_command failed: %d\n",
ret);
return 0;
}
ret = at76_wait_completion(priv, CMD_JOIN);
at76_dbg(DBG_MAC80211, "%s: CMD_JOIN returned: 0x%02x", __func__, ret);
if (ret != CMD_STATUS_COMPLETE) {
wiphy_err(priv->hw->wiphy, "at76_wait_completion failed: %d\n",
ret);
return 0;
}
at76_set_pm_mode(priv);
return 0;
}
static void at76_work_join_bssid(struct work_struct *work)
{
struct at76_priv *priv = container_of(work, struct at76_priv,
work_join_bssid);
if (priv->device_unplugged)
return;
mutex_lock(&priv->mtx);
if (is_valid_ether_addr(priv->bssid))
at76_join(priv);
mutex_unlock(&priv->mtx);
}
static void at76_mac80211_tx_callback(struct urb *urb)
{
struct at76_priv *priv = urb->context;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(priv->tx_skb);
at76_dbg(DBG_MAC80211, "%s()", __func__);
switch (urb->status) {
case 0:
/* success */
info->flags |= IEEE80211_TX_STAT_ACK;
break;
case -ENOENT:
case -ECONNRESET:
/* fail, urb has been unlinked */
/* FIXME: add error message */
break;
default:
at76_dbg(DBG_URB, "%s - nonzero tx status received: %d",
__func__, urb->status);
break;
}
memset(&info->status, 0, sizeof(info->status));
ieee80211_tx_status_irqsafe(priv->hw, priv->tx_skb);
priv->tx_skb = NULL;
ieee80211_wake_queues(priv->hw);
}
static void at76_mac80211_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_control *control,
struct sk_buff *skb)
{
struct at76_priv *priv = hw->priv;
struct at76_tx_buffer *tx_buffer = priv->bulk_out_buffer;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
int padding, submit_len, ret;
at76_dbg(DBG_MAC80211, "%s()", __func__);
if (priv->tx_urb->status == -EINPROGRESS) {
wiphy_err(priv->hw->wiphy,
"%s called while tx urb is pending\n", __func__);
dev_kfree_skb_any(skb);
return;
}
/* The following code lines are important when the device is going to
* authenticate with a new bssid. The driver must send CMD_JOIN before
* an authentication frame is transmitted. For this to succeed, the
* correct bssid of the AP must be known. As mac80211 does not inform
* drivers about the bssid prior to the authentication process the
* following workaround is necessary. If the TX frame is an
* authentication frame extract the bssid and send the CMD_JOIN. */
if (mgmt->frame_control & cpu_to_le16(IEEE80211_STYPE_AUTH)) {
if (!ether_addr_equal_64bits(priv->bssid, mgmt->bssid)) {
memcpy(priv->bssid, mgmt->bssid, ETH_ALEN);
ieee80211_queue_work(hw, &priv->work_join_bssid);
dev_kfree_skb_any(skb);
return;
}
}
ieee80211_stop_queues(hw);
at76_ledtrig_tx_activity(); /* tell ledtrigger we send a packet */
WARN_ON(priv->tx_skb != NULL);
priv->tx_skb = skb;
padding = at76_calc_padding(skb->len);
submit_len = AT76_TX_HDRLEN + skb->len + padding;
/* setup 'Atmel' header */
memset(tx_buffer, 0, sizeof(*tx_buffer));
tx_buffer->padding = padding;
tx_buffer->wlength = cpu_to_le16(skb->len);
tx_buffer->tx_rate = ieee80211_get_tx_rate(hw, info)->hw_value;
memset(tx_buffer->reserved, 0, sizeof(tx_buffer->reserved));
memcpy(tx_buffer->packet, skb->data, skb->len);
at76_dbg(DBG_TX_DATA, "%s tx: wlen 0x%x pad 0x%x rate %d hdr",
wiphy_name(priv->hw->wiphy), le16_to_cpu(tx_buffer->wlength),
tx_buffer->padding, tx_buffer->tx_rate);
/* send stuff */
at76_dbg_dump(DBG_TX_DATA_CONTENT, tx_buffer, submit_len,
"%s(): tx_buffer %d bytes:", __func__, submit_len);
usb_fill_bulk_urb(priv->tx_urb, priv->udev, priv->tx_pipe, tx_buffer,
submit_len, at76_mac80211_tx_callback, priv);
ret = usb_submit_urb(priv->tx_urb, GFP_ATOMIC);
if (ret) {
wiphy_err(priv->hw->wiphy, "error in tx submit urb: %d\n", ret);
if (ret == -EINVAL)
wiphy_err(priv->hw->wiphy,
"-EINVAL: tx urb %p hcpriv %p complete %p\n",
priv->tx_urb,
priv->tx_urb->hcpriv, priv->tx_urb->complete);
}
}
static int at76_mac80211_start(struct ieee80211_hw *hw)
{
struct at76_priv *priv = hw->priv;
int ret;
at76_dbg(DBG_MAC80211, "%s()", __func__);
mutex_lock(&priv->mtx);
ret = at76_submit_rx_urb(priv);
if (ret < 0) {
wiphy_err(priv->hw->wiphy, "open: submit_rx_urb failed: %d\n",
ret);
goto error;
}
at76_startup_device(priv);
at76_start_monitor(priv);
error:
mutex_unlock(&priv->mtx);
return 0;
}
static void at76_mac80211_stop(struct ieee80211_hw *hw)
{
struct at76_priv *priv = hw->priv;
at76_dbg(DBG_MAC80211, "%s()", __func__);
cancel_delayed_work(&priv->dwork_hw_scan);
cancel_work_sync(&priv->work_join_bssid);
cancel_work_sync(&priv->work_set_promisc);
mutex_lock(&priv->mtx);
if (!priv->device_unplugged) {
/* We are called by "ifconfig ethX down", not because the
* device is not available anymore. */
at76_set_radio(priv, 0);
/* We unlink rx_urb because at76_open() re-submits it.
* If unplugged, at76_delete_device() takes care of it. */
usb_kill_urb(priv->rx_urb);
}
mutex_unlock(&priv->mtx);
}
static int at76_add_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct at76_priv *priv = hw->priv;
int ret = 0;
at76_dbg(DBG_MAC80211, "%s()", __func__);
mutex_lock(&priv->mtx);
switch (vif->type) {
case NL80211_IFTYPE_STATION:
priv->iw_mode = IW_MODE_INFRA;
break;
default:
ret = -EOPNOTSUPP;
goto exit;
}
exit:
mutex_unlock(&priv->mtx);
return ret;
}
static void at76_remove_interface(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
at76_dbg(DBG_MAC80211, "%s()", __func__);
}
static void at76_dwork_hw_scan(struct work_struct *work)
{
struct at76_priv *priv = container_of(work, struct at76_priv,
dwork_hw_scan.work);
struct cfg80211_scan_info info = {
.aborted = false,
};
int ret;
if (priv->device_unplugged)
return;
mutex_lock(&priv->mtx);
ret = at76_get_cmd_status(priv->udev, CMD_SCAN);
at76_dbg(DBG_MAC80211, "%s: CMD_SCAN status 0x%02x", __func__, ret);
/* FIXME: add maximum time for scan to complete */
if (ret != CMD_STATUS_COMPLETE) {
ieee80211_queue_delayed_work(priv->hw, &priv->dwork_hw_scan,
SCAN_POLL_INTERVAL);
mutex_unlock(&priv->mtx);
return;
}
if (is_valid_ether_addr(priv->bssid))
at76_join(priv);
priv->scanning = false;
mutex_unlock(&priv->mtx);
ieee80211_scan_completed(priv->hw, &info);
ieee80211_wake_queues(priv->hw);
}
static int at76_hw_scan(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_scan_request *hw_req)
{
struct cfg80211_scan_request *req = &hw_req->req;
struct at76_priv *priv = hw->priv;
struct at76_req_scan scan;
u8 *ssid = NULL;
int ret, len = 0;
at76_dbg(DBG_MAC80211, "%s():", __func__);
if (priv->device_unplugged)
return 0;
mutex_lock(&priv->mtx);
ieee80211_stop_queues(hw);
memset(&scan, 0, sizeof(struct at76_req_scan));
eth_broadcast_addr(scan.bssid);
if (req->n_ssids) {
scan.scan_type = SCAN_TYPE_ACTIVE;
ssid = req->ssids[0].ssid;
len = req->ssids[0].ssid_len;
} else {
scan.scan_type = SCAN_TYPE_PASSIVE;
}
if (len) {
memcpy(scan.essid, ssid, len);
scan.essid_size = len;
}
scan.min_channel_time = cpu_to_le16(priv->scan_min_time);
scan.max_channel_time = cpu_to_le16(priv->scan_max_time);
scan.probe_delay = cpu_to_le16(priv->scan_min_time * 1000);
scan.international_scan = 0;
at76_dbg(DBG_MAC80211, "%s: sending CMD_SCAN", __func__);
ret = at76_set_card_command(priv->udev, CMD_SCAN, &scan, sizeof(scan));
if (ret < 0) {
wiphy_err(priv->hw->wiphy, "CMD_SCAN failed: %d\n", ret);
goto exit;
}
priv->scanning = true;
ieee80211_queue_delayed_work(priv->hw, &priv->dwork_hw_scan,
SCAN_POLL_INTERVAL);
exit:
mutex_unlock(&priv->mtx);
return 0;
}
static int at76_config(struct ieee80211_hw *hw, u32 changed)
{
struct at76_priv *priv = hw->priv;
at76_dbg(DBG_MAC80211, "%s(): channel %d",
__func__, hw->conf.chandef.chan->hw_value);
at76_dbg_dump(DBG_MAC80211, priv->bssid, ETH_ALEN, "bssid:");
mutex_lock(&priv->mtx);
priv->channel = hw->conf.chandef.chan->hw_value;
if (is_valid_ether_addr(priv->bssid))
at76_join(priv);
else
at76_start_monitor(priv);
mutex_unlock(&priv->mtx);
return 0;
}
static void at76_bss_info_changed(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *conf,
u64 changed)
{
struct at76_priv *priv = hw->priv;
at76_dbg(DBG_MAC80211, "%s():", __func__);
if (!(changed & BSS_CHANGED_BSSID))
return;
at76_dbg_dump(DBG_MAC80211, conf->bssid, ETH_ALEN, "bssid:");
mutex_lock(&priv->mtx);
memcpy(priv->bssid, conf->bssid, ETH_ALEN);
if (is_valid_ether_addr(priv->bssid))
/* mac80211 is joining a bss */
at76_join(priv);
mutex_unlock(&priv->mtx);
}
/* must be atomic */
static void at76_configure_filter(struct ieee80211_hw *hw,
unsigned int changed_flags,
unsigned int *total_flags, u64 multicast)
{
struct at76_priv *priv = hw->priv;
int flags;
at76_dbg(DBG_MAC80211, "%s(): changed_flags=0x%08x "
"total_flags=0x%08x",
__func__, changed_flags, *total_flags);
flags = changed_flags & AT76_SUPPORTED_FILTERS;
*total_flags = AT76_SUPPORTED_FILTERS;
/* Bail out after updating flags to prevent a WARN_ON in mac80211. */
if (priv->device_unplugged)
return;
/* FIXME: access to priv->promisc should be protected with
* priv->mtx, but it's impossible because this function needs to be
* atomic */
if (flags && !priv->promisc) {
/* mac80211 wants us to enable promiscuous mode */
priv->promisc = 1;
} else if (!flags && priv->promisc) {
/* we need to disable promiscuous mode */
priv->promisc = 0;
} else
return;
ieee80211_queue_work(hw, &priv->work_set_promisc);
}
static int at76_set_wep(struct at76_priv *priv)
{
int ret = 0;
struct mib_mac_wep *mib_data = &priv->mib_buf.data.wep_mib;
priv->mib_buf.type = MIB_MAC_WEP;
priv->mib_buf.size = sizeof(struct mib_mac_wep);
priv->mib_buf.index = 0;
memset(mib_data, 0, sizeof(*mib_data));
if (priv->wep_enabled) {
if (priv->wep_keys_len[priv->wep_key_id] > WEP_SMALL_KEY_LEN)
mib_data->encryption_level = 2;
else
mib_data->encryption_level = 1;
/* always exclude unencrypted if WEP is active */
mib_data->exclude_unencrypted = 1;
} else {
mib_data->exclude_unencrypted = 0;
mib_data->encryption_level = 0;
}
mib_data->privacy_invoked = priv->wep_enabled;
mib_data->wep_default_key_id = priv->wep_key_id;
memcpy(mib_data->wep_default_keyvalue, priv->wep_keys,
sizeof(priv->wep_keys));
ret = at76_set_mib(priv, &priv->mib_buf);
if (ret < 0)
wiphy_err(priv->hw->wiphy,
"set_mib (wep) failed: %d\n", ret);
return ret;
}
static int at76_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
struct ieee80211_vif *vif, struct ieee80211_sta *sta,
struct ieee80211_key_conf *key)
{
struct at76_priv *priv = hw->priv;
int i;
at76_dbg(DBG_MAC80211, "%s(): cmd %d key->cipher %d key->keyidx %d "
"key->keylen %d",
__func__, cmd, key->cipher, key->keyidx, key->keylen);
if ((key->cipher != WLAN_CIPHER_SUITE_WEP40) &&
(key->cipher != WLAN_CIPHER_SUITE_WEP104))
return -EOPNOTSUPP;
key->hw_key_idx = key->keyidx;
mutex_lock(&priv->mtx);
switch (cmd) {
case SET_KEY:
memcpy(priv->wep_keys[key->keyidx], key->key, key->keylen);
priv->wep_keys_len[key->keyidx] = key->keylen;
/* FIXME: find out how to do this properly */
priv->wep_key_id = key->keyidx;
break;
case DISABLE_KEY:
default:
priv->wep_keys_len[key->keyidx] = 0;
break;
}
priv->wep_enabled = 0;
for (i = 0; i < WEP_KEYS; i++) {
if (priv->wep_keys_len[i] != 0)
priv->wep_enabled = 1;
}
at76_set_wep(priv);
mutex_unlock(&priv->mtx);
return 0;
}
static const struct ieee80211_ops at76_ops = {
.tx = at76_mac80211_tx,
.wake_tx_queue = ieee80211_handle_wake_tx_queue,
.add_interface = at76_add_interface,
.remove_interface = at76_remove_interface,
.config = at76_config,
.bss_info_changed = at76_bss_info_changed,
.configure_filter = at76_configure_filter,
.start = at76_mac80211_start,
.stop = at76_mac80211_stop,
.hw_scan = at76_hw_scan,
.set_key = at76_set_key,
};
/* Allocate network device and initialize private data */
static struct at76_priv *at76_alloc_new_device(struct usb_device *udev)
{
struct ieee80211_hw *hw;
struct at76_priv *priv;
hw = ieee80211_alloc_hw(sizeof(struct at76_priv), &at76_ops);
if (!hw) {
printk(KERN_ERR DRIVER_NAME ": could not register"
" ieee80211_hw\n");
return NULL;
}
priv = hw->priv;
priv->hw = hw;
priv->udev = udev;
mutex_init(&priv->mtx);
INIT_WORK(&priv->work_set_promisc, at76_work_set_promisc);
INIT_WORK(&priv->work_submit_rx, at76_work_submit_rx);
INIT_WORK(&priv->work_join_bssid, at76_work_join_bssid);
INIT_DELAYED_WORK(&priv->dwork_hw_scan, at76_dwork_hw_scan);
tasklet_setup(&priv->rx_tasklet, at76_rx_tasklet);
priv->pm_mode = AT76_PM_OFF;
priv->pm_period = 0;
/* unit us */
return priv;
}
static int at76_alloc_urbs(struct at76_priv *priv,
struct usb_interface *interface)
{
struct usb_endpoint_descriptor *endpoint, *ep_in, *ep_out;
int i;
int buffer_size;
struct usb_host_interface *iface_desc;
at76_dbg(DBG_PROC_ENTRY, "%s: ENTER", __func__);
at76_dbg(DBG_URB, "%s: NumEndpoints %d ", __func__,
interface->cur_altsetting->desc.bNumEndpoints);
ep_in = NULL;
ep_out = NULL;
iface_desc = interface->cur_altsetting;
for (i = 0; i < iface_desc->desc.bNumEndpoints; i++) {
endpoint = &iface_desc->endpoint[i].desc;
at76_dbg(DBG_URB, "%s: %d. endpoint: addr 0x%x attr 0x%x",
__func__, i, endpoint->bEndpointAddress,
endpoint->bmAttributes);
if (!ep_in && usb_endpoint_is_bulk_in(endpoint))
ep_in = endpoint;
if (!ep_out && usb_endpoint_is_bulk_out(endpoint))
ep_out = endpoint;
}
if (!ep_in || !ep_out) {
dev_err(&interface->dev, "bulk endpoints missing\n");
return -ENXIO;
}
priv->rx_pipe = usb_rcvbulkpipe(priv->udev, ep_in->bEndpointAddress);
priv->tx_pipe = usb_sndbulkpipe(priv->udev, ep_out->bEndpointAddress);
priv->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
priv->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
if (!priv->rx_urb || !priv->tx_urb) {
dev_err(&interface->dev, "cannot allocate URB\n");
return -ENOMEM;
}
buffer_size = sizeof(struct at76_tx_buffer) + MAX_PADDING_SIZE;
priv->bulk_out_buffer = kmalloc(buffer_size, GFP_KERNEL);
if (!priv->bulk_out_buffer)
return -ENOMEM;
at76_dbg(DBG_PROC_ENTRY, "%s: EXIT", __func__);
return 0;
}
static struct ieee80211_rate at76_rates[] = {
{ .bitrate = 10, .hw_value = TX_RATE_1MBIT, },
{ .bitrate = 20, .hw_value = TX_RATE_2MBIT, },
{ .bitrate = 55, .hw_value = TX_RATE_5_5MBIT, },
{ .bitrate = 110, .hw_value = TX_RATE_11MBIT, },
};
static struct ieee80211_channel at76_channels[] = {
{ .center_freq = 2412, .hw_value = 1 },
{ .center_freq = 2417, .hw_value = 2 },
{ .center_freq = 2422, .hw_value = 3 },
{ .center_freq = 2427, .hw_value = 4 },
{ .center_freq = 2432, .hw_value = 5 },
{ .center_freq = 2437, .hw_value = 6 },
{ .center_freq = 2442, .hw_value = 7 },
{ .center_freq = 2447, .hw_value = 8 },
{ .center_freq = 2452, .hw_value = 9 },
{ .center_freq = 2457, .hw_value = 10 },
{ .center_freq = 2462, .hw_value = 11 },
{ .center_freq = 2467, .hw_value = 12 },
{ .center_freq = 2472, .hw_value = 13 },
{ .center_freq = 2484, .hw_value = 14 }
};
static struct ieee80211_supported_band at76_supported_band = {
.channels = at76_channels,
.n_channels = ARRAY_SIZE(at76_channels),
.bitrates = at76_rates,
.n_bitrates = ARRAY_SIZE(at76_rates),
};
/* Register network device and initialize the hardware */
static int at76_init_new_device(struct at76_priv *priv,
struct usb_interface *interface)
{
struct wiphy *wiphy;
size_t len;
int ret;
/* set up the endpoint information */
/* check out the endpoints */
at76_dbg(DBG_DEVSTART, "USB interface: %d endpoints",
interface->cur_altsetting->desc.bNumEndpoints);
ret = at76_alloc_urbs(priv, interface);
if (ret < 0)
goto exit;
/* MAC address */
ret = at76_get_hw_config(priv);
if (ret < 0) {
dev_err(&interface->dev, "cannot get MAC address\n");
goto exit;
}
priv->domain = at76_get_reg_domain(priv->regulatory_domain);
priv->channel = DEF_CHANNEL;
priv->iw_mode = IW_MODE_INFRA;
priv->rts_threshold = DEF_RTS_THRESHOLD;
priv->frag_threshold = DEF_FRAG_THRESHOLD;
priv->short_retry_limit = DEF_SHORT_RETRY_LIMIT;
priv->txrate = TX_RATE_AUTO;
priv->preamble_type = PREAMBLE_TYPE_LONG;
priv->beacon_period = 100;
priv->auth_mode = WLAN_AUTH_OPEN;
priv->scan_min_time = DEF_SCAN_MIN_TIME;
priv->scan_max_time = DEF_SCAN_MAX_TIME;
priv->scan_mode = SCAN_TYPE_ACTIVE;
priv->device_unplugged = 0;
/* mac80211 initialisation */
wiphy = priv->hw->wiphy;
priv->hw->wiphy->max_scan_ssids = 1;
priv->hw->wiphy->max_scan_ie_len = 0;
priv->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
priv->hw->wiphy->bands[NL80211_BAND_2GHZ] = &at76_supported_band;
ieee80211_hw_set(priv->hw, RX_INCLUDES_FCS);
ieee80211_hw_set(priv->hw, SIGNAL_UNSPEC);
priv->hw->max_signal = 100;
SET_IEEE80211_DEV(priv->hw, &interface->dev);
SET_IEEE80211_PERM_ADDR(priv->hw, priv->mac_addr);
len = sizeof(wiphy->fw_version);
snprintf(wiphy->fw_version, len, "%d.%d.%d-%d",
priv->fw_version.major, priv->fw_version.minor,
priv->fw_version.patch, priv->fw_version.build);
wiphy->hw_version = priv->board_type;
wiphy_ext_feature_set(wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
ret = ieee80211_register_hw(priv->hw);
if (ret) {
printk(KERN_ERR "cannot register mac80211 hw (status %d)!\n",
ret);
goto exit;
}
priv->mac80211_registered = 1;
wiphy_info(priv->hw->wiphy, "USB %s, MAC %pM, firmware %d.%d.%d-%d\n",
dev_name(&interface->dev), priv->mac_addr,
priv->fw_version.major, priv->fw_version.minor,
priv->fw_version.patch, priv->fw_version.build);
wiphy_info(priv->hw->wiphy, "regulatory domain 0x%02x: %s\n",
priv->regulatory_domain, priv->domain->name);
exit:
return ret;
}
static void at76_delete_device(struct at76_priv *priv)
{
at76_dbg(DBG_PROC_ENTRY, "%s: ENTER", __func__);
/* The device is gone, don't bother turning it off */
priv->device_unplugged = 1;
tasklet_kill(&priv->rx_tasklet);
if (priv->mac80211_registered)
ieee80211_unregister_hw(priv->hw);
if (priv->tx_urb) {
usb_kill_urb(priv->tx_urb);
usb_free_urb(priv->tx_urb);
}
if (priv->rx_urb) {
usb_kill_urb(priv->rx_urb);
usb_free_urb(priv->rx_urb);
}
at76_dbg(DBG_PROC_ENTRY, "%s: unlinked urbs", __func__);
kfree(priv->bulk_out_buffer);
del_timer_sync(&ledtrig_tx_timer);
kfree_skb(priv->rx_skb);
at76_dbg(DBG_PROC_ENTRY, "%s: before freeing priv/ieee80211_hw",
__func__);
ieee80211_free_hw(priv->hw);
at76_dbg(DBG_PROC_ENTRY, "%s: EXIT", __func__);
}
static int at76_probe(struct usb_interface *interface,
const struct usb_device_id *id)
{
int ret;
struct at76_priv *priv;
struct fwentry *fwe;
struct usb_device *udev;
int op_mode;
int need_ext_fw = 0;
struct mib_fw_version *fwv = NULL;
int board_type = (int)id->driver_info;
udev = usb_get_dev(interface_to_usbdev(interface));
fwv = kmalloc(sizeof(*fwv), GFP_KERNEL);
if (!fwv) {
ret = -ENOMEM;
goto exit;
}
/* Load firmware into kernel memory */
fwe = at76_load_firmware(udev, board_type);
if (!fwe) {
ret = -ENOENT;
goto exit;
}
op_mode = at76_get_op_mode(udev);
at76_dbg(DBG_DEVSTART, "opmode %d", op_mode);
/* we get OPMODE_NONE with 2.4.23, SMC2662W-AR ???
we get 204 with 2.4.23, Fiberline FL-WL240u (505A+RFMD2958) ??? */
if (op_mode == OPMODE_HW_CONFIG_MODE) {
dev_err(&interface->dev,
"cannot handle a device in HW_CONFIG_MODE\n");
ret = -EBUSY;
goto exit;
}
if (op_mode != OPMODE_NORMAL_NIC_WITH_FLASH
&& op_mode != OPMODE_NORMAL_NIC_WITHOUT_FLASH) {
/* download internal firmware part */
dev_printk(KERN_DEBUG, &interface->dev,
"downloading internal firmware\n");
ret = at76_load_internal_fw(udev, fwe);
if (ret < 0) {
dev_err(&interface->dev,
"error %d downloading internal firmware\n",
ret);
}
goto exit;
}
/* Internal firmware already inside the device. Get firmware
* version to test if external firmware is loaded.
* This works only for newer firmware, e.g. the Intersil 0.90.x
* says "control timeout on ep0in" and subsequent
* at76_get_op_mode() fail too :-( */
/* if version >= 0.100.x.y or device with built-in flash we can
* query the device for the fw version */
if ((fwe->fw_version.major > 0 || fwe->fw_version.minor >= 100)
|| (op_mode == OPMODE_NORMAL_NIC_WITH_FLASH)) {
ret = at76_get_mib(udev, MIB_FW_VERSION, fwv, sizeof(*fwv));
if (ret < 0 || (fwv->major | fwv->minor) == 0)
need_ext_fw = 1;
} else
/* No way to check firmware version, reload to be sure */
need_ext_fw = 1;
if (need_ext_fw) {
dev_printk(KERN_DEBUG, &interface->dev,
"downloading external firmware\n");
ret = at76_load_external_fw(udev, fwe);
if (ret < 0)
goto exit;
/* Re-check firmware version */
ret = at76_get_mib(udev, MIB_FW_VERSION, fwv, sizeof(*fwv));
if (ret < 0) {
dev_err(&interface->dev,
"error %d getting firmware version\n", ret);
goto exit;
}
}
priv = at76_alloc_new_device(udev);
if (!priv) {
ret = -ENOMEM;
goto exit;
}
usb_set_intfdata(interface, priv);
memcpy(&priv->fw_version, fwv, sizeof(struct mib_fw_version));
priv->board_type = board_type;
ret = at76_init_new_device(priv, interface);
if (ret < 0)
at76_delete_device(priv);
exit:
kfree(fwv);
if (ret < 0)
usb_put_dev(udev);
return ret;
}
static void at76_disconnect(struct usb_interface *interface)
{
struct at76_priv *priv;
priv = usb_get_intfdata(interface);
usb_set_intfdata(interface, NULL);
/* Disconnect after loading internal firmware */
if (!priv)
return;
wiphy_info(priv->hw->wiphy, "disconnecting\n");
at76_delete_device(priv);
usb_put_dev(priv->udev);
dev_info(&interface->dev, "disconnected\n");
}
/* Structure for registering this driver with the USB subsystem */
static struct usb_driver at76_driver = {
.name = DRIVER_NAME,
.probe = at76_probe,
.disconnect = at76_disconnect,
.id_table = dev_table,
.disable_hub_initiated_lpm = 1,
};
static int __init at76_mod_init(void)
{
int result;
printk(KERN_INFO DRIVER_DESC " " DRIVER_VERSION " loading\n");
/* register this driver with the USB subsystem */
result = usb_register(&at76_driver);
if (result < 0)
printk(KERN_ERR DRIVER_NAME
": usb_register failed (status %d)\n", result);
else
led_trigger_register_simple("at76_usb-tx", &ledtrig_tx);
return result;
}
static void __exit at76_mod_exit(void)
{
int i;
printk(KERN_INFO DRIVER_DESC " " DRIVER_VERSION " unloading\n");
usb_deregister(&at76_driver);
for (i = 0; i < ARRAY_SIZE(firmwares); i++)
release_firmware(firmwares[i].fw);
led_trigger_unregister_simple(ledtrig_tx);
}
module_param_named(debug, at76_debug, uint, 0600);
MODULE_PARM_DESC(debug, "Debugging level");
module_init(at76_mod_init);
module_exit(at76_mod_exit);
MODULE_AUTHOR("Oliver Kurth <[email protected]>");
MODULE_AUTHOR("Joerg Albert <[email protected]>");
MODULE_AUTHOR("Alex <[email protected]>");
MODULE_AUTHOR("Nick Jones");
MODULE_AUTHOR("Balint Seeber <[email protected]>");
MODULE_AUTHOR("Pavel Roskin <[email protected]>");
MODULE_AUTHOR("Guido Guenther <[email protected]>");
MODULE_AUTHOR("Kalle Valo <[email protected]>");
MODULE_AUTHOR("Sebastian Smolorz <[email protected]>");
MODULE_DESCRIPTION(DRIVER_DESC);
MODULE_LICENSE("GPL");
|
linux-master
|
drivers/net/wireless/atmel/at76c50x-usb.c
|
/*** -*- linux-c -*- **********************************************************
Driver for Atmel at76c502 at76c504 and at76c506 wireless cards.
Copyright 2000-2001 ATMEL Corporation.
Copyright 2003 Simon Kelley.
This code was developed from version 2.1.1 of the Atmel drivers,
released by Atmel corp. under the GPL in December 2002. It also
includes code from the Linux aironet drivers (C) Benjamin Reed,
and the Linux PCMCIA package, (C) David Hinds.
For all queries about this code, please contact the current author,
Simon Kelley <[email protected]> and not Atmel Corporation.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This software is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Atmel wireless lan drivers; if not, see
<http://www.gnu.org/licenses/>.
******************************************************************************/
#ifdef __IN_PCMCIA_PACKAGE__
#include <pcmcia/k_compat.h>
#endif
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/ptrace.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/netdevice.h>
#include <linux/moduleparam.h>
#include <linux/device.h>
#include <pcmcia/cistpl.h>
#include <pcmcia/cisreg.h>
#include <pcmcia/ds.h>
#include <pcmcia/ciscode.h>
#include <asm/io.h>
#include <linux/wireless.h>
#include "atmel.h"
/*====================================================================*/
MODULE_AUTHOR("Simon Kelley");
MODULE_DESCRIPTION("Support for Atmel at76c50x 802.11 wireless ethernet cards.");
MODULE_LICENSE("GPL");
/*====================================================================*/
static int atmel_config(struct pcmcia_device *link);
static void atmel_release(struct pcmcia_device *link);
static void atmel_detach(struct pcmcia_device *p_dev);
struct local_info {
struct net_device *eth_dev;
};
static int atmel_probe(struct pcmcia_device *p_dev)
{
struct local_info *local;
int ret;
dev_dbg(&p_dev->dev, "atmel_attach()\n");
/* Allocate space for private device-specific data */
local = kzalloc(sizeof(*local), GFP_KERNEL);
if (!local)
return -ENOMEM;
p_dev->priv = local;
ret = atmel_config(p_dev);
if (ret)
goto err_free_priv;
return 0;
err_free_priv:
kfree(p_dev->priv);
return ret;
}
static void atmel_detach(struct pcmcia_device *link)
{
dev_dbg(&link->dev, "atmel_detach\n");
atmel_release(link);
kfree(link->priv);
}
/* Call-back function to interrogate PCMCIA-specific information
about the current existence of the card */
static int card_present(void *arg)
{
struct pcmcia_device *link = (struct pcmcia_device *)arg;
if (pcmcia_dev_present(link))
return 1;
return 0;
}
static int atmel_config_check(struct pcmcia_device *p_dev, void *priv_data)
{
if (p_dev->config_index == 0)
return -EINVAL;
return pcmcia_request_io(p_dev);
}
static int atmel_config(struct pcmcia_device *link)
{
int ret;
const struct pcmcia_device_id *did;
did = dev_get_drvdata(&link->dev);
dev_dbg(&link->dev, "atmel_config\n");
link->config_flags |= CONF_ENABLE_IRQ | CONF_AUTO_SET_VPP |
CONF_AUTO_AUDIO | CONF_AUTO_SET_IO;
if (pcmcia_loop_config(link, atmel_config_check, NULL))
goto failed;
if (!link->irq) {
dev_err(&link->dev, "atmel: cannot assign IRQ: check that CONFIG_ISA is set in kernel config.");
goto failed;
}
ret = pcmcia_enable_device(link);
if (ret)
goto failed;
((struct local_info *)link->priv)->eth_dev =
init_atmel_card(link->irq,
link->resource[0]->start,
did ? did->driver_info : ATMEL_FW_TYPE_NONE,
&link->dev,
card_present,
link);
if (!((struct local_info *)link->priv)->eth_dev)
goto failed;
return 0;
failed:
atmel_release(link);
return -ENODEV;
}
static void atmel_release(struct pcmcia_device *link)
{
struct net_device *dev = ((struct local_info *)link->priv)->eth_dev;
dev_dbg(&link->dev, "atmel_release\n");
if (dev)
stop_atmel_card(dev);
((struct local_info *)link->priv)->eth_dev = NULL;
pcmcia_disable_device(link);
}
static int atmel_suspend(struct pcmcia_device *link)
{
struct local_info *local = link->priv;
netif_device_detach(local->eth_dev);
return 0;
}
static int atmel_resume(struct pcmcia_device *link)
{
struct local_info *local = link->priv;
atmel_open(local->eth_dev);
netif_device_attach(local->eth_dev);
return 0;
}
/*====================================================================*/
/* We use the driver_info field to store the correct firmware type for a card. */
#define PCMCIA_DEVICE_MANF_CARD_INFO(manf, card, info) { \
.match_flags = PCMCIA_DEV_ID_MATCH_MANF_ID| \
PCMCIA_DEV_ID_MATCH_CARD_ID, \
.manf_id = (manf), \
.card_id = (card), \
.driver_info = (kernel_ulong_t)(info), }
#define PCMCIA_DEVICE_PROD_ID12_INFO(v1, v2, vh1, vh2, info) { \
.match_flags = PCMCIA_DEV_ID_MATCH_PROD_ID1| \
PCMCIA_DEV_ID_MATCH_PROD_ID2, \
.prod_id = { (v1), (v2), NULL, NULL }, \
.prod_id_hash = { (vh1), (vh2), 0, 0 }, \
.driver_info = (kernel_ulong_t)(info), }
static const struct pcmcia_device_id atmel_ids[] = {
PCMCIA_DEVICE_MANF_CARD_INFO(0x0101, 0x0620, ATMEL_FW_TYPE_502_3COM),
PCMCIA_DEVICE_MANF_CARD_INFO(0x0101, 0x0696, ATMEL_FW_TYPE_502_3COM),
PCMCIA_DEVICE_MANF_CARD_INFO(0x01bf, 0x3302, ATMEL_FW_TYPE_502E),
PCMCIA_DEVICE_MANF_CARD_INFO(0xd601, 0x0007, ATMEL_FW_TYPE_502),
PCMCIA_DEVICE_PROD_ID12_INFO("11WAVE", "11WP611AL-E", 0x9eb2da1f, 0xc9a0d3f9, ATMEL_FW_TYPE_502E),
PCMCIA_DEVICE_PROD_ID12_INFO("ATMEL", "AT76C502AR", 0xabda4164, 0x41b37e1f, ATMEL_FW_TYPE_502),
PCMCIA_DEVICE_PROD_ID12_INFO("ATMEL", "AT76C502AR_D", 0xabda4164, 0x3675d704, ATMEL_FW_TYPE_502D),
PCMCIA_DEVICE_PROD_ID12_INFO("ATMEL", "AT76C502AR_E", 0xabda4164, 0x4172e792, ATMEL_FW_TYPE_502E),
PCMCIA_DEVICE_PROD_ID12_INFO("ATMEL", "AT76C504_R", 0xabda4164, 0x917f3d72, ATMEL_FW_TYPE_504_2958),
PCMCIA_DEVICE_PROD_ID12_INFO("ATMEL", "AT76C504", 0xabda4164, 0x5040670a, ATMEL_FW_TYPE_504),
PCMCIA_DEVICE_PROD_ID12_INFO("ATMEL", "AT76C504A", 0xabda4164, 0xe15ed87f, ATMEL_FW_TYPE_504A_2958),
PCMCIA_DEVICE_PROD_ID12_INFO("BT", "Voyager 1020 Laptop Adapter", 0xae49b86a, 0x1e957cd5, ATMEL_FW_TYPE_502),
PCMCIA_DEVICE_PROD_ID12_INFO("CNet", "CNWLC 11Mbps Wireless PC Card V-5", 0xbc477dde, 0x502fae6b, ATMEL_FW_TYPE_502E),
PCMCIA_DEVICE_PROD_ID12_INFO("IEEE 802.11b", "Wireless LAN PC Card", 0x5b878724, 0x122f1df6, ATMEL_FW_TYPE_502),
PCMCIA_DEVICE_PROD_ID12_INFO("IEEE 802.11b", "Wireless LAN Card S", 0x5b878724, 0x5fba533a, ATMEL_FW_TYPE_504_2958),
PCMCIA_DEVICE_PROD_ID12_INFO("OEM", "11Mbps Wireless LAN PC Card V-3", 0xfea54c90, 0x1c5b0f68, ATMEL_FW_TYPE_502),
PCMCIA_DEVICE_PROD_ID12_INFO("SMC", "2632W", 0xc4f8b18b, 0x30f38774, ATMEL_FW_TYPE_502D),
PCMCIA_DEVICE_PROD_ID12_INFO("SMC", "2632W-V2", 0xc4f8b18b, 0x172d1377, ATMEL_FW_TYPE_502),
PCMCIA_DEVICE_PROD_ID12_INFO("Wireless", "PC_CARD", 0xa407ecdd, 0x119f6314, ATMEL_FW_TYPE_502D),
PCMCIA_DEVICE_PROD_ID12_INFO("WLAN", "802.11b PC CARD", 0x575c516c, 0xb1f6dbc4, ATMEL_FW_TYPE_502D),
PCMCIA_DEVICE_PROD_ID12_INFO("LG", "LW2100N", 0xb474d43a, 0x6b1fec94, ATMEL_FW_TYPE_502E),
PCMCIA_DEVICE_NULL
};
MODULE_DEVICE_TABLE(pcmcia, atmel_ids);
static struct pcmcia_driver atmel_driver = {
.owner = THIS_MODULE,
.name = "atmel_cs",
.probe = atmel_probe,
.remove = atmel_detach,
.id_table = atmel_ids,
.suspend = atmel_suspend,
.resume = atmel_resume,
};
module_pcmcia_driver(atmel_driver);
/*
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
In addition:
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions
are met:
1. Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
3. The name of the author may not be used to endorse or promote
products derived from this software without specific prior written
permission.
THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
*/
|
linux-master
|
drivers/net/wireless/atmel/atmel_cs.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*** -*- linux-c -*- **********************************************************
Driver for Atmel at76c502 at76c504 and at76c506 wireless cards.
Copyright 2004 Simon Kelley.
******************************************************************************/
#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include "atmel.h"
MODULE_AUTHOR("Simon Kelley");
MODULE_DESCRIPTION("Support for Atmel at76c50x 802.11 wireless ethernet cards.");
MODULE_LICENSE("GPL");
static const struct pci_device_id card_ids[] = {
{ 0x1114, 0x0506, PCI_ANY_ID, PCI_ANY_ID },
{ 0, }
};
MODULE_DEVICE_TABLE(pci, card_ids);
static int atmel_pci_probe(struct pci_dev *, const struct pci_device_id *);
static void atmel_pci_remove(struct pci_dev *);
static struct pci_driver atmel_driver = {
.name = "atmel",
.id_table = card_ids,
.probe = atmel_pci_probe,
.remove = atmel_pci_remove,
};
static int atmel_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *pent)
{
struct net_device *dev;
if (pci_enable_device(pdev))
return -ENODEV;
pci_set_master(pdev);
dev = init_atmel_card(pdev->irq, pdev->resource[1].start,
ATMEL_FW_TYPE_506,
&pdev->dev, NULL, NULL);
if (!dev) {
pci_disable_device(pdev);
return -ENODEV;
}
pci_set_drvdata(pdev, dev);
return 0;
}
static void atmel_pci_remove(struct pci_dev *pdev)
{
stop_atmel_card(pci_get_drvdata(pdev));
}
module_pci_driver(atmel_driver);
|
linux-master
|
drivers/net/wireless/atmel/atmel_pci.c
|
/*** -*- linux-c -*- **********************************************************
Driver for Atmel at76c502 at76c504 and at76c506 wireless cards.
Copyright 2000-2001 ATMEL Corporation.
Copyright 2003-2004 Simon Kelley.
This code was developed from version 2.1.1 of the Atmel drivers,
released by Atmel corp. under the GPL in December 2002. It also
includes code from the Linux aironet drivers (C) Benjamin Reed,
and the Linux PCMCIA package, (C) David Hinds and the Linux wireless
extensions, (C) Jean Tourrilhes.
The firmware module for reading the MAC address of the card comes from
net.russotto.AtmelMACFW, written by Matthew T. Russotto and copyright
by him. net.russotto.AtmelMACFW is used under the GPL license version 2.
This file contains the module in binary form and, under the terms
of the GPL, in source form. The source is located at the end of the file.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This software is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with Atmel wireless lan drivers; if not, see
<http://www.gnu.org/licenses/>.
For all queries about this code, please contact the current author,
Simon Kelley <[email protected]> and not Atmel Corporation.
Credit is due to HP UK and Cambridge Online Systems Ltd for supplying
hardware used during development of this driver.
******************************************************************************/
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/ptrace.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/timer.h>
#include <asm/byteorder.h>
#include <asm/io.h>
#include <linux/uaccess.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
#include <linux/if_arp.h>
#include <linux/ioport.h>
#include <linux/fcntl.h>
#include <linux/delay.h>
#include <linux/wireless.h>
#include <net/iw_handler.h>
#include <linux/crc32.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/device.h>
#include <linux/moduleparam.h>
#include <linux/firmware.h>
#include <linux/jiffies.h>
#include <net/cfg80211.h>
#include "atmel.h"
#define DRIVER_MAJOR 0
#define DRIVER_MINOR 98
MODULE_AUTHOR("Simon Kelley");
MODULE_DESCRIPTION("Support for Atmel at76c50x 802.11 wireless ethernet cards.");
MODULE_LICENSE("GPL");
/* The name of the firmware file to be loaded
over-rides any automatic selection */
static char *firmware = NULL;
module_param(firmware, charp, 0);
/* table of firmware file names */
static struct {
AtmelFWType fw_type;
const char *fw_file;
const char *fw_file_ext;
} fw_table[] = {
{ ATMEL_FW_TYPE_502, "atmel_at76c502", "bin" },
{ ATMEL_FW_TYPE_502D, "atmel_at76c502d", "bin" },
{ ATMEL_FW_TYPE_502E, "atmel_at76c502e", "bin" },
{ ATMEL_FW_TYPE_502_3COM, "atmel_at76c502_3com", "bin" },
{ ATMEL_FW_TYPE_504, "atmel_at76c504", "bin" },
{ ATMEL_FW_TYPE_504_2958, "atmel_at76c504_2958", "bin" },
{ ATMEL_FW_TYPE_504A_2958, "atmel_at76c504a_2958", "bin" },
{ ATMEL_FW_TYPE_506, "atmel_at76c506", "bin" },
{ ATMEL_FW_TYPE_NONE, NULL, NULL }
};
MODULE_FIRMWARE("atmel_at76c502-wpa.bin");
MODULE_FIRMWARE("atmel_at76c502.bin");
MODULE_FIRMWARE("atmel_at76c502d-wpa.bin");
MODULE_FIRMWARE("atmel_at76c502d.bin");
MODULE_FIRMWARE("atmel_at76c502e-wpa.bin");
MODULE_FIRMWARE("atmel_at76c502e.bin");
MODULE_FIRMWARE("atmel_at76c502_3com-wpa.bin");
MODULE_FIRMWARE("atmel_at76c502_3com.bin");
MODULE_FIRMWARE("atmel_at76c504-wpa.bin");
MODULE_FIRMWARE("atmel_at76c504.bin");
MODULE_FIRMWARE("atmel_at76c504_2958-wpa.bin");
MODULE_FIRMWARE("atmel_at76c504_2958.bin");
MODULE_FIRMWARE("atmel_at76c504a_2958-wpa.bin");
MODULE_FIRMWARE("atmel_at76c504a_2958.bin");
MODULE_FIRMWARE("atmel_at76c506-wpa.bin");
MODULE_FIRMWARE("atmel_at76c506.bin");
#define MAX_SSID_LENGTH 32
#define MGMT_JIFFIES (256 * HZ / 100)
#define MAX_BSS_ENTRIES 64
/* registers */
#define GCR 0x00 /* (SIR0) General Configuration Register */
#define BSR 0x02 /* (SIR1) Bank Switching Select Register */
#define AR 0x04
#define DR 0x08
#define MR1 0x12 /* Mirror Register 1 */
#define MR2 0x14 /* Mirror Register 2 */
#define MR3 0x16 /* Mirror Register 3 */
#define MR4 0x18 /* Mirror Register 4 */
#define GPR1 0x0c
#define GPR2 0x0e
#define GPR3 0x10
/*
* Constants for the GCR register.
*/
#define GCR_REMAP 0x0400 /* Remap internal SRAM to 0 */
#define GCR_SWRES 0x0080 /* BIU reset (ARM and PAI are NOT reset) */
#define GCR_CORES 0x0060 /* Core Reset (ARM and PAI are reset) */
#define GCR_ENINT 0x0002 /* Enable Interrupts */
#define GCR_ACKINT 0x0008 /* Acknowledge Interrupts */
#define BSS_SRAM 0x0200 /* AMBA module selection --> SRAM */
#define BSS_IRAM 0x0100 /* AMBA module selection --> IRAM */
/*
*Constants for the MR registers.
*/
#define MAC_INIT_COMPLETE 0x0001 /* MAC init has been completed */
#define MAC_BOOT_COMPLETE 0x0010 /* MAC boot has been completed */
#define MAC_INIT_OK 0x0002 /* MAC boot has been completed */
#define MIB_MAX_DATA_BYTES 212
#define MIB_HEADER_SIZE 4 /* first four fields */
struct get_set_mib {
u8 type;
u8 size;
u8 index;
u8 reserved;
u8 data[MIB_MAX_DATA_BYTES];
};
struct rx_desc {
u32 Next;
u16 MsduPos;
u16 MsduSize;
u8 State;
u8 Status;
u8 Rate;
u8 Rssi;
u8 LinkQuality;
u8 PreambleType;
u16 Duration;
u32 RxTime;
};
#define RX_DESC_FLAG_VALID 0x80
#define RX_DESC_FLAG_CONSUMED 0x40
#define RX_DESC_FLAG_IDLE 0x00
#define RX_STATUS_SUCCESS 0x00
#define RX_DESC_MSDU_POS_OFFSET 4
#define RX_DESC_MSDU_SIZE_OFFSET 6
#define RX_DESC_FLAGS_OFFSET 8
#define RX_DESC_STATUS_OFFSET 9
#define RX_DESC_RSSI_OFFSET 11
#define RX_DESC_LINK_QUALITY_OFFSET 12
#define RX_DESC_PREAMBLE_TYPE_OFFSET 13
#define RX_DESC_DURATION_OFFSET 14
#define RX_DESC_RX_TIME_OFFSET 16
struct tx_desc {
u32 NextDescriptor;
u16 TxStartOfFrame;
u16 TxLength;
u8 TxState;
u8 TxStatus;
u8 RetryCount;
u8 TxRate;
u8 KeyIndex;
u8 ChiperType;
u8 ChipreLength;
u8 Reserved1;
u8 Reserved;
u8 PacketType;
u16 HostTxLength;
};
#define TX_DESC_NEXT_OFFSET 0
#define TX_DESC_POS_OFFSET 4
#define TX_DESC_SIZE_OFFSET 6
#define TX_DESC_FLAGS_OFFSET 8
#define TX_DESC_STATUS_OFFSET 9
#define TX_DESC_RETRY_OFFSET 10
#define TX_DESC_RATE_OFFSET 11
#define TX_DESC_KEY_INDEX_OFFSET 12
#define TX_DESC_CIPHER_TYPE_OFFSET 13
#define TX_DESC_CIPHER_LENGTH_OFFSET 14
#define TX_DESC_PACKET_TYPE_OFFSET 17
#define TX_DESC_HOST_LENGTH_OFFSET 18
/*
* Host-MAC interface
*/
#define TX_STATUS_SUCCESS 0x00
#define TX_FIRM_OWN 0x80
#define TX_DONE 0x40
#define TX_ERROR 0x01
#define TX_PACKET_TYPE_DATA 0x01
#define TX_PACKET_TYPE_MGMT 0x02
#define ISR_EMPTY 0x00 /* no bits set in ISR */
#define ISR_TxCOMPLETE 0x01 /* packet transmitted */
#define ISR_RxCOMPLETE 0x02 /* packet received */
#define ISR_RxFRAMELOST 0x04 /* Rx Frame lost */
#define ISR_FATAL_ERROR 0x08 /* Fatal error */
#define ISR_COMMAND_COMPLETE 0x10 /* command completed */
#define ISR_OUT_OF_RANGE 0x20 /* command completed */
#define ISR_IBSS_MERGE 0x40 /* (4.1.2.30): IBSS merge */
#define ISR_GENERIC_IRQ 0x80
#define Local_Mib_Type 0x01
#define Mac_Address_Mib_Type 0x02
#define Mac_Mib_Type 0x03
#define Statistics_Mib_Type 0x04
#define Mac_Mgmt_Mib_Type 0x05
#define Mac_Wep_Mib_Type 0x06
#define Phy_Mib_Type 0x07
#define Multi_Domain_MIB 0x08
#define MAC_MGMT_MIB_CUR_BSSID_POS 14
#define MAC_MIB_FRAG_THRESHOLD_POS 8
#define MAC_MIB_RTS_THRESHOLD_POS 10
#define MAC_MIB_SHORT_RETRY_POS 16
#define MAC_MIB_LONG_RETRY_POS 17
#define MAC_MIB_SHORT_RETRY_LIMIT_POS 16
#define MAC_MGMT_MIB_BEACON_PER_POS 0
#define MAC_MGMT_MIB_STATION_ID_POS 6
#define MAC_MGMT_MIB_CUR_PRIVACY_POS 11
#define MAC_MGMT_MIB_CUR_BSSID_POS 14
#define MAC_MGMT_MIB_PS_MODE_POS 53
#define MAC_MGMT_MIB_LISTEN_INTERVAL_POS 54
#define MAC_MGMT_MIB_MULTI_DOMAIN_IMPLEMENTED 56
#define MAC_MGMT_MIB_MULTI_DOMAIN_ENABLED 57
#define PHY_MIB_CHANNEL_POS 14
#define PHY_MIB_RATE_SET_POS 20
#define PHY_MIB_REG_DOMAIN_POS 26
#define LOCAL_MIB_AUTO_TX_RATE_POS 3
#define LOCAL_MIB_SSID_SIZE 5
#define LOCAL_MIB_TX_PROMISCUOUS_POS 6
#define LOCAL_MIB_TX_MGMT_RATE_POS 7
#define LOCAL_MIB_TX_CONTROL_RATE_POS 8
#define LOCAL_MIB_PREAMBLE_TYPE 9
#define MAC_ADDR_MIB_MAC_ADDR_POS 0
#define CMD_Set_MIB_Vars 0x01
#define CMD_Get_MIB_Vars 0x02
#define CMD_Scan 0x03
#define CMD_Join 0x04
#define CMD_Start 0x05
#define CMD_EnableRadio 0x06
#define CMD_DisableRadio 0x07
#define CMD_SiteSurvey 0x0B
#define CMD_STATUS_IDLE 0x00
#define CMD_STATUS_COMPLETE 0x01
#define CMD_STATUS_UNKNOWN 0x02
#define CMD_STATUS_INVALID_PARAMETER 0x03
#define CMD_STATUS_FUNCTION_NOT_SUPPORTED 0x04
#define CMD_STATUS_TIME_OUT 0x07
#define CMD_STATUS_IN_PROGRESS 0x08
#define CMD_STATUS_REJECTED_RADIO_OFF 0x09
#define CMD_STATUS_HOST_ERROR 0xFF
#define CMD_STATUS_BUSY 0xFE
#define CMD_BLOCK_COMMAND_OFFSET 0
#define CMD_BLOCK_STATUS_OFFSET 1
#define CMD_BLOCK_PARAMETERS_OFFSET 4
#define SCAN_OPTIONS_SITE_SURVEY 0x80
#define MGMT_FRAME_BODY_OFFSET 24
#define MAX_AUTHENTICATION_RETRIES 3
#define MAX_ASSOCIATION_RETRIES 3
#define AUTHENTICATION_RESPONSE_TIME_OUT 1000
#define MAX_WIRELESS_BODY 2316 /* mtu is 2312, CRC is 4 */
#define LOOP_RETRY_LIMIT 500000
#define ACTIVE_MODE 1
#define PS_MODE 2
#define MAX_ENCRYPTION_KEYS 4
#define MAX_ENCRYPTION_KEY_SIZE 40
/*
* 802.11 related definitions
*/
/*
* Regulatory Domains
*/
#define REG_DOMAIN_FCC 0x10 /* Channels 1-11 USA */
#define REG_DOMAIN_DOC 0x20 /* Channel 1-11 Canada */
#define REG_DOMAIN_ETSI 0x30 /* Channel 1-13 Europe (ex Spain/France) */
#define REG_DOMAIN_SPAIN 0x31 /* Channel 10-11 Spain */
#define REG_DOMAIN_FRANCE 0x32 /* Channel 10-13 France */
#define REG_DOMAIN_MKK 0x40 /* Channel 14 Japan */
#define REG_DOMAIN_MKK1 0x41 /* Channel 1-14 Japan(MKK1) */
#define REG_DOMAIN_ISRAEL 0x50 /* Channel 3-9 ISRAEL */
#define BSS_TYPE_AD_HOC 1
#define BSS_TYPE_INFRASTRUCTURE 2
#define SCAN_TYPE_ACTIVE 0
#define SCAN_TYPE_PASSIVE 1
#define LONG_PREAMBLE 0
#define SHORT_PREAMBLE 1
#define AUTO_PREAMBLE 2
#define DATA_FRAME_WS_HEADER_SIZE 30
/* promiscuous mode control */
#define PROM_MODE_OFF 0x0
#define PROM_MODE_UNKNOWN 0x1
#define PROM_MODE_CRC_FAILED 0x2
#define PROM_MODE_DUPLICATED 0x4
#define PROM_MODE_MGMT 0x8
#define PROM_MODE_CTRL 0x10
#define PROM_MODE_BAD_PROTOCOL 0x20
#define IFACE_INT_STATUS_OFFSET 0
#define IFACE_INT_MASK_OFFSET 1
#define IFACE_LOCKOUT_HOST_OFFSET 2
#define IFACE_LOCKOUT_MAC_OFFSET 3
#define IFACE_FUNC_CTRL_OFFSET 28
#define IFACE_MAC_STAT_OFFSET 30
#define IFACE_GENERIC_INT_TYPE_OFFSET 32
#define CIPHER_SUITE_NONE 0
#define CIPHER_SUITE_WEP_64 1
#define CIPHER_SUITE_TKIP 2
#define CIPHER_SUITE_AES 3
#define CIPHER_SUITE_CCX 4
#define CIPHER_SUITE_WEP_128 5
/*
* IFACE MACROS & definitions
*/
/*
* FuncCtrl field:
*/
#define FUNC_CTRL_TxENABLE 0x10
#define FUNC_CTRL_RxENABLE 0x20
#define FUNC_CTRL_INIT_COMPLETE 0x01
/* A stub firmware image which reads the MAC address from NVRAM on the card.
For copyright information and source see the end of this file. */
static u8 mac_reader[] = {
0x06, 0x00, 0x00, 0xea, 0x04, 0x00, 0x00, 0xea, 0x03, 0x00, 0x00, 0xea, 0x02, 0x00, 0x00, 0xea,
0x01, 0x00, 0x00, 0xea, 0x00, 0x00, 0x00, 0xea, 0xff, 0xff, 0xff, 0xea, 0xfe, 0xff, 0xff, 0xea,
0xd3, 0x00, 0xa0, 0xe3, 0x00, 0xf0, 0x21, 0xe1, 0x0e, 0x04, 0xa0, 0xe3, 0x00, 0x10, 0xa0, 0xe3,
0x81, 0x11, 0xa0, 0xe1, 0x00, 0x10, 0x81, 0xe3, 0x00, 0x10, 0x80, 0xe5, 0x1c, 0x10, 0x90, 0xe5,
0x10, 0x10, 0xc1, 0xe3, 0x1c, 0x10, 0x80, 0xe5, 0x01, 0x10, 0xa0, 0xe3, 0x08, 0x10, 0x80, 0xe5,
0x02, 0x03, 0xa0, 0xe3, 0x00, 0x10, 0xa0, 0xe3, 0xb0, 0x10, 0xc0, 0xe1, 0xb4, 0x10, 0xc0, 0xe1,
0xb8, 0x10, 0xc0, 0xe1, 0xbc, 0x10, 0xc0, 0xe1, 0x56, 0xdc, 0xa0, 0xe3, 0x21, 0x00, 0x00, 0xeb,
0x0a, 0x00, 0xa0, 0xe3, 0x1a, 0x00, 0x00, 0xeb, 0x10, 0x00, 0x00, 0xeb, 0x07, 0x00, 0x00, 0xeb,
0x02, 0x03, 0xa0, 0xe3, 0x02, 0x14, 0xa0, 0xe3, 0xb4, 0x10, 0xc0, 0xe1, 0x4c, 0x10, 0x9f, 0xe5,
0xbc, 0x10, 0xc0, 0xe1, 0x10, 0x10, 0xa0, 0xe3, 0xb8, 0x10, 0xc0, 0xe1, 0xfe, 0xff, 0xff, 0xea,
0x00, 0x40, 0x2d, 0xe9, 0x00, 0x20, 0xa0, 0xe3, 0x02, 0x3c, 0xa0, 0xe3, 0x00, 0x10, 0xa0, 0xe3,
0x28, 0x00, 0x9f, 0xe5, 0x37, 0x00, 0x00, 0xeb, 0x00, 0x40, 0xbd, 0xe8, 0x1e, 0xff, 0x2f, 0xe1,
0x00, 0x40, 0x2d, 0xe9, 0x12, 0x2e, 0xa0, 0xe3, 0x06, 0x30, 0xa0, 0xe3, 0x00, 0x10, 0xa0, 0xe3,
0x02, 0x04, 0xa0, 0xe3, 0x2f, 0x00, 0x00, 0xeb, 0x00, 0x40, 0xbd, 0xe8, 0x1e, 0xff, 0x2f, 0xe1,
0x00, 0x02, 0x00, 0x02, 0x80, 0x01, 0x90, 0xe0, 0x01, 0x00, 0x00, 0x0a, 0x01, 0x00, 0x50, 0xe2,
0xfc, 0xff, 0xff, 0xea, 0x1e, 0xff, 0x2f, 0xe1, 0x80, 0x10, 0xa0, 0xe3, 0xf3, 0x06, 0xa0, 0xe3,
0x00, 0x10, 0x80, 0xe5, 0x00, 0x10, 0xa0, 0xe3, 0x00, 0x10, 0x80, 0xe5, 0x01, 0x10, 0xa0, 0xe3,
0x04, 0x10, 0x80, 0xe5, 0x00, 0x10, 0x80, 0xe5, 0x0e, 0x34, 0xa0, 0xe3, 0x1c, 0x10, 0x93, 0xe5,
0x02, 0x1a, 0x81, 0xe3, 0x1c, 0x10, 0x83, 0xe5, 0x58, 0x11, 0x9f, 0xe5, 0x30, 0x10, 0x80, 0xe5,
0x54, 0x11, 0x9f, 0xe5, 0x34, 0x10, 0x80, 0xe5, 0x38, 0x10, 0x80, 0xe5, 0x3c, 0x10, 0x80, 0xe5,
0x10, 0x10, 0x90, 0xe5, 0x08, 0x00, 0x90, 0xe5, 0x1e, 0xff, 0x2f, 0xe1, 0xf3, 0x16, 0xa0, 0xe3,
0x08, 0x00, 0x91, 0xe5, 0x05, 0x00, 0xa0, 0xe3, 0x0c, 0x00, 0x81, 0xe5, 0x10, 0x00, 0x91, 0xe5,
0x02, 0x00, 0x10, 0xe3, 0xfc, 0xff, 0xff, 0x0a, 0xff, 0x00, 0xa0, 0xe3, 0x0c, 0x00, 0x81, 0xe5,
0x10, 0x00, 0x91, 0xe5, 0x02, 0x00, 0x10, 0xe3, 0xfc, 0xff, 0xff, 0x0a, 0x08, 0x00, 0x91, 0xe5,
0x10, 0x00, 0x91, 0xe5, 0x01, 0x00, 0x10, 0xe3, 0xfc, 0xff, 0xff, 0x0a, 0x08, 0x00, 0x91, 0xe5,
0xff, 0x00, 0x00, 0xe2, 0x1e, 0xff, 0x2f, 0xe1, 0x30, 0x40, 0x2d, 0xe9, 0x00, 0x50, 0xa0, 0xe1,
0x03, 0x40, 0xa0, 0xe1, 0xa2, 0x02, 0xa0, 0xe1, 0x08, 0x00, 0x00, 0xe2, 0x03, 0x00, 0x80, 0xe2,
0xd8, 0x10, 0x9f, 0xe5, 0x00, 0x00, 0xc1, 0xe5, 0x01, 0x20, 0xc1, 0xe5, 0xe2, 0xff, 0xff, 0xeb,
0x01, 0x00, 0x10, 0xe3, 0xfc, 0xff, 0xff, 0x1a, 0x14, 0x00, 0xa0, 0xe3, 0xc4, 0xff, 0xff, 0xeb,
0x04, 0x20, 0xa0, 0xe1, 0x05, 0x10, 0xa0, 0xe1, 0x02, 0x00, 0xa0, 0xe3, 0x01, 0x00, 0x00, 0xeb,
0x30, 0x40, 0xbd, 0xe8, 0x1e, 0xff, 0x2f, 0xe1, 0x70, 0x40, 0x2d, 0xe9, 0xf3, 0x46, 0xa0, 0xe3,
0x00, 0x30, 0xa0, 0xe3, 0x00, 0x00, 0x50, 0xe3, 0x08, 0x00, 0x00, 0x9a, 0x8c, 0x50, 0x9f, 0xe5,
0x03, 0x60, 0xd5, 0xe7, 0x0c, 0x60, 0x84, 0xe5, 0x10, 0x60, 0x94, 0xe5, 0x02, 0x00, 0x16, 0xe3,
0xfc, 0xff, 0xff, 0x0a, 0x01, 0x30, 0x83, 0xe2, 0x00, 0x00, 0x53, 0xe1, 0xf7, 0xff, 0xff, 0x3a,
0xff, 0x30, 0xa0, 0xe3, 0x0c, 0x30, 0x84, 0xe5, 0x08, 0x00, 0x94, 0xe5, 0x10, 0x00, 0x94, 0xe5,
0x01, 0x00, 0x10, 0xe3, 0xfc, 0xff, 0xff, 0x0a, 0x08, 0x00, 0x94, 0xe5, 0x00, 0x00, 0xa0, 0xe3,
0x00, 0x00, 0x52, 0xe3, 0x0b, 0x00, 0x00, 0x9a, 0x10, 0x50, 0x94, 0xe5, 0x02, 0x00, 0x15, 0xe3,
0xfc, 0xff, 0xff, 0x0a, 0x0c, 0x30, 0x84, 0xe5, 0x10, 0x50, 0x94, 0xe5, 0x01, 0x00, 0x15, 0xe3,
0xfc, 0xff, 0xff, 0x0a, 0x08, 0x50, 0x94, 0xe5, 0x01, 0x50, 0xc1, 0xe4, 0x01, 0x00, 0x80, 0xe2,
0x02, 0x00, 0x50, 0xe1, 0xf3, 0xff, 0xff, 0x3a, 0xc8, 0x00, 0xa0, 0xe3, 0x98, 0xff, 0xff, 0xeb,
0x70, 0x40, 0xbd, 0xe8, 0x1e, 0xff, 0x2f, 0xe1, 0x01, 0x0c, 0x00, 0x02, 0x01, 0x02, 0x00, 0x02,
0x00, 0x01, 0x00, 0x02
};
struct atmel_private {
void *card; /* Bus dependent structure varies for PCcard */
int (*present_callback)(void *); /* And callback which uses it */
char firmware_id[32];
AtmelFWType firmware_type;
u8 *firmware;
int firmware_length;
struct timer_list management_timer;
struct net_device *dev;
struct device *sys_dev;
struct iw_statistics wstats;
spinlock_t irqlock, timerlock; /* spinlocks */
enum { BUS_TYPE_PCCARD, BUS_TYPE_PCI } bus_type;
enum {
CARD_TYPE_PARALLEL_FLASH,
CARD_TYPE_SPI_FLASH,
CARD_TYPE_EEPROM
} card_type;
int do_rx_crc; /* If we need to CRC incoming packets */
int probe_crc; /* set if we don't yet know */
int crc_ok_cnt, crc_ko_cnt; /* counters for probing */
u16 rx_desc_head;
u16 tx_desc_free, tx_desc_head, tx_desc_tail, tx_desc_previous;
u16 tx_free_mem, tx_buff_head, tx_buff_tail;
u16 frag_seq, frag_len, frag_no;
u8 frag_source[6];
u8 wep_is_on, default_key, exclude_unencrypted, encryption_level;
u8 group_cipher_suite, pairwise_cipher_suite;
u8 wep_keys[MAX_ENCRYPTION_KEYS][MAX_ENCRYPTION_KEY_SIZE];
int wep_key_len[MAX_ENCRYPTION_KEYS];
int use_wpa, radio_on_broken; /* firmware dependent stuff. */
u16 host_info_base;
struct host_info_struct {
/* NB this is matched to the hardware, don't change. */
u8 volatile int_status;
u8 volatile int_mask;
u8 volatile lockout_host;
u8 volatile lockout_mac;
u16 tx_buff_pos;
u16 tx_buff_size;
u16 tx_desc_pos;
u16 tx_desc_count;
u16 rx_buff_pos;
u16 rx_buff_size;
u16 rx_desc_pos;
u16 rx_desc_count;
u16 build_version;
u16 command_pos;
u16 major_version;
u16 minor_version;
u16 func_ctrl;
u16 mac_status;
u16 generic_IRQ_type;
u8 reserved[2];
} host_info;
enum {
STATION_STATE_SCANNING,
STATION_STATE_JOINNING,
STATION_STATE_AUTHENTICATING,
STATION_STATE_ASSOCIATING,
STATION_STATE_READY,
STATION_STATE_REASSOCIATING,
STATION_STATE_DOWN,
STATION_STATE_MGMT_ERROR
} station_state;
int operating_mode, power_mode;
unsigned long last_qual;
int beacons_this_sec;
int channel;
int reg_domain, config_reg_domain;
int tx_rate;
int auto_tx_rate;
int rts_threshold;
int frag_threshold;
int long_retry, short_retry;
int preamble;
int default_beacon_period, beacon_period, listen_interval;
int CurrentAuthentTransactionSeqNum, ExpectedAuthentTransactionSeqNum;
int AuthenticationRequestRetryCnt, AssociationRequestRetryCnt, ReAssociationRequestRetryCnt;
enum {
SITE_SURVEY_IDLE,
SITE_SURVEY_IN_PROGRESS,
SITE_SURVEY_COMPLETED
} site_survey_state;
unsigned long last_survey;
int station_was_associated, station_is_associated;
int fast_scan;
struct bss_info {
int channel;
int SSIDsize;
int RSSI;
int UsingWEP;
int preamble;
int beacon_period;
int BSStype;
u8 BSSID[6];
u8 SSID[MAX_SSID_LENGTH];
} BSSinfo[MAX_BSS_ENTRIES];
int BSS_list_entries, current_BSS;
int connect_to_any_BSS;
int SSID_size, new_SSID_size;
u8 CurrentBSSID[6], BSSID[6];
u8 SSID[MAX_SSID_LENGTH], new_SSID[MAX_SSID_LENGTH];
u64 last_beacon_timestamp;
u8 rx_buf[MAX_WIRELESS_BODY];
};
static u8 atmel_basic_rates[4] = {0x82, 0x84, 0x0b, 0x16};
static const struct {
int reg_domain;
int min, max;
char *name;
} channel_table[] = { { REG_DOMAIN_FCC, 1, 11, "USA" },
{ REG_DOMAIN_DOC, 1, 11, "Canada" },
{ REG_DOMAIN_ETSI, 1, 13, "Europe" },
{ REG_DOMAIN_SPAIN, 10, 11, "Spain" },
{ REG_DOMAIN_FRANCE, 10, 13, "France" },
{ REG_DOMAIN_MKK, 14, 14, "MKK" },
{ REG_DOMAIN_MKK1, 1, 14, "MKK1" },
{ REG_DOMAIN_ISRAEL, 3, 9, "Israel"} };
static void build_wpa_mib(struct atmel_private *priv);
static int atmel_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
static void atmel_copy_to_card(struct net_device *dev, u16 dest,
const unsigned char *src, u16 len);
static void atmel_copy_to_host(struct net_device *dev, unsigned char *dest,
u16 src, u16 len);
static void atmel_set_gcr(struct net_device *dev, u16 mask);
static void atmel_clear_gcr(struct net_device *dev, u16 mask);
static int atmel_lock_mac(struct atmel_private *priv);
static void atmel_wmem32(struct atmel_private *priv, u16 pos, u32 data);
static void atmel_command_irq(struct atmel_private *priv);
static int atmel_validate_channel(struct atmel_private *priv, int channel);
static void atmel_management_frame(struct atmel_private *priv,
struct ieee80211_hdr *header,
u16 frame_len, u8 rssi);
static void atmel_management_timer(struct timer_list *t);
static void atmel_send_command(struct atmel_private *priv, int command,
void *cmd, int cmd_size);
static int atmel_send_command_wait(struct atmel_private *priv, int command,
void *cmd, int cmd_size);
static void atmel_transmit_management_frame(struct atmel_private *priv,
struct ieee80211_hdr *header,
u8 *body, int body_len);
static u8 atmel_get_mib8(struct atmel_private *priv, u8 type, u8 index);
static void atmel_set_mib8(struct atmel_private *priv, u8 type, u8 index,
u8 data);
static void atmel_set_mib16(struct atmel_private *priv, u8 type, u8 index,
u16 data);
static void atmel_set_mib(struct atmel_private *priv, u8 type, u8 index,
const u8 *data, int data_len);
static void atmel_get_mib(struct atmel_private *priv, u8 type, u8 index,
u8 *data, int data_len);
static void atmel_scan(struct atmel_private *priv, int specific_ssid);
static void atmel_join_bss(struct atmel_private *priv, int bss_index);
static void atmel_smooth_qual(struct atmel_private *priv);
static void atmel_writeAR(struct net_device *dev, u16 data);
static int probe_atmel_card(struct net_device *dev);
static int reset_atmel_card(struct net_device *dev);
static void atmel_enter_state(struct atmel_private *priv, int new_state);
int atmel_open (struct net_device *dev);
static inline u16 atmel_hi(struct atmel_private *priv, u16 offset)
{
return priv->host_info_base + offset;
}
static inline u16 atmel_co(struct atmel_private *priv, u16 offset)
{
return priv->host_info.command_pos + offset;
}
static inline u16 atmel_rx(struct atmel_private *priv, u16 offset, u16 desc)
{
return priv->host_info.rx_desc_pos + (sizeof(struct rx_desc) * desc) + offset;
}
static inline u16 atmel_tx(struct atmel_private *priv, u16 offset, u16 desc)
{
return priv->host_info.tx_desc_pos + (sizeof(struct tx_desc) * desc) + offset;
}
static inline u8 atmel_read8(struct net_device *dev, u16 offset)
{
return inb(dev->base_addr + offset);
}
static inline void atmel_write8(struct net_device *dev, u16 offset, u8 data)
{
outb(data, dev->base_addr + offset);
}
static inline u16 atmel_read16(struct net_device *dev, u16 offset)
{
return inw(dev->base_addr + offset);
}
static inline void atmel_write16(struct net_device *dev, u16 offset, u16 data)
{
outw(data, dev->base_addr + offset);
}
static inline u8 atmel_rmem8(struct atmel_private *priv, u16 pos)
{
atmel_writeAR(priv->dev, pos);
return atmel_read8(priv->dev, DR);
}
static inline void atmel_wmem8(struct atmel_private *priv, u16 pos, u16 data)
{
atmel_writeAR(priv->dev, pos);
atmel_write8(priv->dev, DR, data);
}
static inline u16 atmel_rmem16(struct atmel_private *priv, u16 pos)
{
atmel_writeAR(priv->dev, pos);
return atmel_read16(priv->dev, DR);
}
static inline void atmel_wmem16(struct atmel_private *priv, u16 pos, u16 data)
{
atmel_writeAR(priv->dev, pos);
atmel_write16(priv->dev, DR, data);
}
static const struct iw_handler_def atmel_handler_def;
static void tx_done_irq(struct atmel_private *priv)
{
int i;
for (i = 0;
atmel_rmem8(priv, atmel_tx(priv, TX_DESC_FLAGS_OFFSET, priv->tx_desc_head)) == TX_DONE &&
i < priv->host_info.tx_desc_count;
i++) {
u8 status = atmel_rmem8(priv, atmel_tx(priv, TX_DESC_STATUS_OFFSET, priv->tx_desc_head));
u16 msdu_size = atmel_rmem16(priv, atmel_tx(priv, TX_DESC_SIZE_OFFSET, priv->tx_desc_head));
u8 type = atmel_rmem8(priv, atmel_tx(priv, TX_DESC_PACKET_TYPE_OFFSET, priv->tx_desc_head));
atmel_wmem8(priv, atmel_tx(priv, TX_DESC_FLAGS_OFFSET, priv->tx_desc_head), 0);
priv->tx_free_mem += msdu_size;
priv->tx_desc_free++;
if (priv->tx_buff_head + msdu_size > (priv->host_info.tx_buff_pos + priv->host_info.tx_buff_size))
priv->tx_buff_head = 0;
else
priv->tx_buff_head += msdu_size;
if (priv->tx_desc_head < (priv->host_info.tx_desc_count - 1))
priv->tx_desc_head++ ;
else
priv->tx_desc_head = 0;
if (type == TX_PACKET_TYPE_DATA) {
if (status == TX_STATUS_SUCCESS)
priv->dev->stats.tx_packets++;
else
priv->dev->stats.tx_errors++;
netif_wake_queue(priv->dev);
}
}
}
static u16 find_tx_buff(struct atmel_private *priv, u16 len)
{
u16 bottom_free = priv->host_info.tx_buff_size - priv->tx_buff_tail;
if (priv->tx_desc_free == 3 || priv->tx_free_mem < len)
return 0;
if (bottom_free >= len)
return priv->host_info.tx_buff_pos + priv->tx_buff_tail;
if (priv->tx_free_mem - bottom_free >= len) {
priv->tx_buff_tail = 0;
return priv->host_info.tx_buff_pos;
}
return 0;
}
static void tx_update_descriptor(struct atmel_private *priv, int is_bcast,
u16 len, u16 buff, u8 type)
{
atmel_wmem16(priv, atmel_tx(priv, TX_DESC_POS_OFFSET, priv->tx_desc_tail), buff);
atmel_wmem16(priv, atmel_tx(priv, TX_DESC_SIZE_OFFSET, priv->tx_desc_tail), len);
if (!priv->use_wpa)
atmel_wmem16(priv, atmel_tx(priv, TX_DESC_HOST_LENGTH_OFFSET, priv->tx_desc_tail), len);
atmel_wmem8(priv, atmel_tx(priv, TX_DESC_PACKET_TYPE_OFFSET, priv->tx_desc_tail), type);
atmel_wmem8(priv, atmel_tx(priv, TX_DESC_RATE_OFFSET, priv->tx_desc_tail), priv->tx_rate);
atmel_wmem8(priv, atmel_tx(priv, TX_DESC_RETRY_OFFSET, priv->tx_desc_tail), 0);
if (priv->use_wpa) {
int cipher_type, cipher_length;
if (is_bcast) {
cipher_type = priv->group_cipher_suite;
if (cipher_type == CIPHER_SUITE_WEP_64 ||
cipher_type == CIPHER_SUITE_WEP_128)
cipher_length = 8;
else if (cipher_type == CIPHER_SUITE_TKIP)
cipher_length = 12;
else if (priv->pairwise_cipher_suite == CIPHER_SUITE_WEP_64 ||
priv->pairwise_cipher_suite == CIPHER_SUITE_WEP_128) {
cipher_type = priv->pairwise_cipher_suite;
cipher_length = 8;
} else {
cipher_type = CIPHER_SUITE_NONE;
cipher_length = 0;
}
} else {
cipher_type = priv->pairwise_cipher_suite;
if (cipher_type == CIPHER_SUITE_WEP_64 ||
cipher_type == CIPHER_SUITE_WEP_128)
cipher_length = 8;
else if (cipher_type == CIPHER_SUITE_TKIP)
cipher_length = 12;
else if (priv->group_cipher_suite == CIPHER_SUITE_WEP_64 ||
priv->group_cipher_suite == CIPHER_SUITE_WEP_128) {
cipher_type = priv->group_cipher_suite;
cipher_length = 8;
} else {
cipher_type = CIPHER_SUITE_NONE;
cipher_length = 0;
}
}
atmel_wmem8(priv, atmel_tx(priv, TX_DESC_CIPHER_TYPE_OFFSET, priv->tx_desc_tail),
cipher_type);
atmel_wmem8(priv, atmel_tx(priv, TX_DESC_CIPHER_LENGTH_OFFSET, priv->tx_desc_tail),
cipher_length);
}
atmel_wmem32(priv, atmel_tx(priv, TX_DESC_NEXT_OFFSET, priv->tx_desc_tail), 0x80000000L);
atmel_wmem8(priv, atmel_tx(priv, TX_DESC_FLAGS_OFFSET, priv->tx_desc_tail), TX_FIRM_OWN);
if (priv->tx_desc_previous != priv->tx_desc_tail)
atmel_wmem32(priv, atmel_tx(priv, TX_DESC_NEXT_OFFSET, priv->tx_desc_previous), 0);
priv->tx_desc_previous = priv->tx_desc_tail;
if (priv->tx_desc_tail < (priv->host_info.tx_desc_count - 1))
priv->tx_desc_tail++;
else
priv->tx_desc_tail = 0;
priv->tx_desc_free--;
priv->tx_free_mem -= len;
}
static netdev_tx_t start_tx(struct sk_buff *skb, struct net_device *dev)
{
struct atmel_private *priv = netdev_priv(dev);
struct ieee80211_hdr header;
unsigned long flags;
u16 buff, frame_ctl, len = (ETH_ZLEN < skb->len) ? skb->len : ETH_ZLEN;
if (priv->card && priv->present_callback &&
!(*priv->present_callback)(priv->card)) {
dev->stats.tx_errors++;
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
if (priv->station_state != STATION_STATE_READY) {
dev->stats.tx_errors++;
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
/* first ensure the timer func cannot run */
spin_lock_bh(&priv->timerlock);
/* then stop the hardware ISR */
spin_lock_irqsave(&priv->irqlock, flags);
/* nb doing the above in the opposite order will deadlock */
/* The Wireless Header is 30 bytes. In the Ethernet packet we "cut" the
12 first bytes (containing DA/SA) and put them in the appropriate
fields of the Wireless Header. Thus the packet length is then the
initial + 18 (+30-12) */
if (!(buff = find_tx_buff(priv, len + 18))) {
dev->stats.tx_dropped++;
spin_unlock_irqrestore(&priv->irqlock, flags);
spin_unlock_bh(&priv->timerlock);
netif_stop_queue(dev);
return NETDEV_TX_BUSY;
}
frame_ctl = IEEE80211_FTYPE_DATA;
header.duration_id = 0;
header.seq_ctrl = 0;
if (priv->wep_is_on)
frame_ctl |= IEEE80211_FCTL_PROTECTED;
if (priv->operating_mode == IW_MODE_ADHOC) {
skb_copy_from_linear_data(skb, &header.addr1, ETH_ALEN);
memcpy(&header.addr2, dev->dev_addr, ETH_ALEN);
memcpy(&header.addr3, priv->BSSID, ETH_ALEN);
} else {
frame_ctl |= IEEE80211_FCTL_TODS;
memcpy(&header.addr1, priv->CurrentBSSID, ETH_ALEN);
memcpy(&header.addr2, dev->dev_addr, ETH_ALEN);
skb_copy_from_linear_data(skb, &header.addr3, ETH_ALEN);
}
if (priv->use_wpa)
memcpy(&header.addr4, rfc1042_header, ETH_ALEN);
header.frame_control = cpu_to_le16(frame_ctl);
/* Copy the wireless header into the card */
atmel_copy_to_card(dev, buff, (unsigned char *)&header, DATA_FRAME_WS_HEADER_SIZE);
/* Copy the packet sans its 802.3 header addresses which have been replaced */
atmel_copy_to_card(dev, buff + DATA_FRAME_WS_HEADER_SIZE, skb->data + 12, len - 12);
priv->tx_buff_tail += len - 12 + DATA_FRAME_WS_HEADER_SIZE;
/* low bit of first byte of destination tells us if broadcast */
tx_update_descriptor(priv, *(skb->data) & 0x01, len + 18, buff, TX_PACKET_TYPE_DATA);
dev->stats.tx_bytes += len;
spin_unlock_irqrestore(&priv->irqlock, flags);
spin_unlock_bh(&priv->timerlock);
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
static void atmel_transmit_management_frame(struct atmel_private *priv,
struct ieee80211_hdr *header,
u8 *body, int body_len)
{
u16 buff;
int len = MGMT_FRAME_BODY_OFFSET + body_len;
if (!(buff = find_tx_buff(priv, len)))
return;
atmel_copy_to_card(priv->dev, buff, (u8 *)header, MGMT_FRAME_BODY_OFFSET);
atmel_copy_to_card(priv->dev, buff + MGMT_FRAME_BODY_OFFSET, body, body_len);
priv->tx_buff_tail += len;
tx_update_descriptor(priv, header->addr1[0] & 0x01, len, buff, TX_PACKET_TYPE_MGMT);
}
static void fast_rx_path(struct atmel_private *priv,
struct ieee80211_hdr *header,
u16 msdu_size, u16 rx_packet_loc, u32 crc)
{
/* fast path: unfragmented packet copy directly into skbuf */
u8 mac4[6];
struct sk_buff *skb;
unsigned char *skbp;
/* get the final, mac 4 header field, this tells us encapsulation */
atmel_copy_to_host(priv->dev, mac4, rx_packet_loc + 24, 6);
msdu_size -= 6;
if (priv->do_rx_crc) {
crc = crc32_le(crc, mac4, 6);
msdu_size -= 4;
}
if (!(skb = dev_alloc_skb(msdu_size + 14))) {
priv->dev->stats.rx_dropped++;
return;
}
skb_reserve(skb, 2);
skbp = skb_put(skb, msdu_size + 12);
atmel_copy_to_host(priv->dev, skbp + 12, rx_packet_loc + 30, msdu_size);
if (priv->do_rx_crc) {
u32 netcrc;
crc = crc32_le(crc, skbp + 12, msdu_size);
atmel_copy_to_host(priv->dev, (void *)&netcrc, rx_packet_loc + 30 + msdu_size, 4);
if ((crc ^ 0xffffffff) != netcrc) {
priv->dev->stats.rx_crc_errors++;
dev_kfree_skb(skb);
return;
}
}
memcpy(skbp, header->addr1, ETH_ALEN); /* destination address */
if (le16_to_cpu(header->frame_control) & IEEE80211_FCTL_FROMDS)
memcpy(&skbp[ETH_ALEN], header->addr3, ETH_ALEN);
else
memcpy(&skbp[ETH_ALEN], header->addr2, ETH_ALEN); /* source address */
skb->protocol = eth_type_trans(skb, priv->dev);
skb->ip_summed = CHECKSUM_NONE;
netif_rx(skb);
priv->dev->stats.rx_bytes += 12 + msdu_size;
priv->dev->stats.rx_packets++;
}
/* Test to see if the packet in card memory at packet_loc has a valid CRC
It doesn't matter that this is slow: it is only used to proble the first few
packets. */
static int probe_crc(struct atmel_private *priv, u16 packet_loc, u16 msdu_size)
{
int i = msdu_size - 4;
u32 netcrc, crc = 0xffffffff;
if (msdu_size < 4)
return 0;
atmel_copy_to_host(priv->dev, (void *)&netcrc, packet_loc + i, 4);
atmel_writeAR(priv->dev, packet_loc);
while (i--) {
u8 octet = atmel_read8(priv->dev, DR);
crc = crc32_le(crc, &octet, 1);
}
return (crc ^ 0xffffffff) == netcrc;
}
static void frag_rx_path(struct atmel_private *priv,
struct ieee80211_hdr *header,
u16 msdu_size, u16 rx_packet_loc, u32 crc, u16 seq_no,
u8 frag_no, int more_frags)
{
u8 mac4[ETH_ALEN];
u8 source[ETH_ALEN];
struct sk_buff *skb;
if (le16_to_cpu(header->frame_control) & IEEE80211_FCTL_FROMDS)
memcpy(source, header->addr3, ETH_ALEN);
else
memcpy(source, header->addr2, ETH_ALEN);
rx_packet_loc += 24; /* skip header */
if (priv->do_rx_crc)
msdu_size -= 4;
if (frag_no == 0) { /* first fragment */
atmel_copy_to_host(priv->dev, mac4, rx_packet_loc, ETH_ALEN);
msdu_size -= ETH_ALEN;
rx_packet_loc += ETH_ALEN;
if (priv->do_rx_crc)
crc = crc32_le(crc, mac4, 6);
priv->frag_seq = seq_no;
priv->frag_no = 1;
priv->frag_len = msdu_size;
memcpy(priv->frag_source, source, ETH_ALEN);
memcpy(&priv->rx_buf[ETH_ALEN], source, ETH_ALEN);
memcpy(priv->rx_buf, header->addr1, ETH_ALEN);
atmel_copy_to_host(priv->dev, &priv->rx_buf[12], rx_packet_loc, msdu_size);
if (priv->do_rx_crc) {
u32 netcrc;
crc = crc32_le(crc, &priv->rx_buf[12], msdu_size);
atmel_copy_to_host(priv->dev, (void *)&netcrc, rx_packet_loc + msdu_size, 4);
if ((crc ^ 0xffffffff) != netcrc) {
priv->dev->stats.rx_crc_errors++;
eth_broadcast_addr(priv->frag_source);
}
}
} else if (priv->frag_no == frag_no &&
priv->frag_seq == seq_no &&
memcmp(priv->frag_source, source, ETH_ALEN) == 0) {
atmel_copy_to_host(priv->dev, &priv->rx_buf[12 + priv->frag_len],
rx_packet_loc, msdu_size);
if (priv->do_rx_crc) {
u32 netcrc;
crc = crc32_le(crc,
&priv->rx_buf[12 + priv->frag_len],
msdu_size);
atmel_copy_to_host(priv->dev, (void *)&netcrc, rx_packet_loc + msdu_size, 4);
if ((crc ^ 0xffffffff) != netcrc) {
priv->dev->stats.rx_crc_errors++;
eth_broadcast_addr(priv->frag_source);
more_frags = 1; /* don't send broken assembly */
}
}
priv->frag_len += msdu_size;
priv->frag_no++;
if (!more_frags) { /* last one */
eth_broadcast_addr(priv->frag_source);
if (!(skb = dev_alloc_skb(priv->frag_len + 14))) {
priv->dev->stats.rx_dropped++;
} else {
skb_reserve(skb, 2);
skb_put_data(skb, priv->rx_buf,
priv->frag_len + 12);
skb->protocol = eth_type_trans(skb, priv->dev);
skb->ip_summed = CHECKSUM_NONE;
netif_rx(skb);
priv->dev->stats.rx_bytes += priv->frag_len + 12;
priv->dev->stats.rx_packets++;
}
}
} else
priv->wstats.discard.fragment++;
}
static void rx_done_irq(struct atmel_private *priv)
{
int i;
struct ieee80211_hdr header;
for (i = 0;
atmel_rmem8(priv, atmel_rx(priv, RX_DESC_FLAGS_OFFSET, priv->rx_desc_head)) == RX_DESC_FLAG_VALID &&
i < priv->host_info.rx_desc_count;
i++) {
u16 msdu_size, rx_packet_loc, frame_ctl, seq_control;
u8 status = atmel_rmem8(priv, atmel_rx(priv, RX_DESC_STATUS_OFFSET, priv->rx_desc_head));
u32 crc = 0xffffffff;
if (status != RX_STATUS_SUCCESS) {
if (status == 0xc1) /* determined by experiment */
priv->wstats.discard.nwid++;
else
priv->dev->stats.rx_errors++;
goto next;
}
msdu_size = atmel_rmem16(priv, atmel_rx(priv, RX_DESC_MSDU_SIZE_OFFSET, priv->rx_desc_head));
rx_packet_loc = atmel_rmem16(priv, atmel_rx(priv, RX_DESC_MSDU_POS_OFFSET, priv->rx_desc_head));
if (msdu_size < 30) {
priv->dev->stats.rx_errors++;
goto next;
}
/* Get header as far as end of seq_ctrl */
atmel_copy_to_host(priv->dev, (char *)&header, rx_packet_loc, 24);
frame_ctl = le16_to_cpu(header.frame_control);
seq_control = le16_to_cpu(header.seq_ctrl);
/* probe for CRC use here if needed once five packets have
arrived with the same crc status, we assume we know what's
happening and stop probing */
if (priv->probe_crc) {
if (!priv->wep_is_on || !(frame_ctl & IEEE80211_FCTL_PROTECTED)) {
priv->do_rx_crc = probe_crc(priv, rx_packet_loc, msdu_size);
} else {
priv->do_rx_crc = probe_crc(priv, rx_packet_loc + 24, msdu_size - 24);
}
if (priv->do_rx_crc) {
if (priv->crc_ok_cnt++ > 5)
priv->probe_crc = 0;
} else {
if (priv->crc_ko_cnt++ > 5)
priv->probe_crc = 0;
}
}
/* don't CRC header when WEP in use */
if (priv->do_rx_crc && (!priv->wep_is_on || !(frame_ctl & IEEE80211_FCTL_PROTECTED))) {
crc = crc32_le(0xffffffff, (unsigned char *)&header, 24);
}
msdu_size -= 24; /* header */
if ((frame_ctl & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_DATA) {
int more_fragments = frame_ctl & IEEE80211_FCTL_MOREFRAGS;
u8 packet_fragment_no = seq_control & IEEE80211_SCTL_FRAG;
u16 packet_sequence_no = (seq_control & IEEE80211_SCTL_SEQ) >> 4;
if (!more_fragments && packet_fragment_no == 0) {
fast_rx_path(priv, &header, msdu_size, rx_packet_loc, crc);
} else {
frag_rx_path(priv, &header, msdu_size, rx_packet_loc, crc,
packet_sequence_no, packet_fragment_no, more_fragments);
}
}
if ((frame_ctl & IEEE80211_FCTL_FTYPE) == IEEE80211_FTYPE_MGMT) {
/* copy rest of packet into buffer */
atmel_copy_to_host(priv->dev, (unsigned char *)&priv->rx_buf, rx_packet_loc + 24, msdu_size);
/* we use the same buffer for frag reassembly and control packets */
eth_broadcast_addr(priv->frag_source);
if (priv->do_rx_crc) {
/* last 4 octets is crc */
msdu_size -= 4;
crc = crc32_le(crc, (unsigned char *)&priv->rx_buf, msdu_size);
if ((crc ^ 0xffffffff) != (*((u32 *)&priv->rx_buf[msdu_size]))) {
priv->dev->stats.rx_crc_errors++;
goto next;
}
}
atmel_management_frame(priv, &header, msdu_size,
atmel_rmem8(priv, atmel_rx(priv, RX_DESC_RSSI_OFFSET, priv->rx_desc_head)));
}
next:
/* release descriptor */
atmel_wmem8(priv, atmel_rx(priv, RX_DESC_FLAGS_OFFSET, priv->rx_desc_head), RX_DESC_FLAG_CONSUMED);
if (priv->rx_desc_head < (priv->host_info.rx_desc_count - 1))
priv->rx_desc_head++;
else
priv->rx_desc_head = 0;
}
}
static irqreturn_t service_interrupt(int irq, void *dev_id)
{
struct net_device *dev = (struct net_device *) dev_id;
struct atmel_private *priv = netdev_priv(dev);
u8 isr;
int i = -1;
static const u8 irq_order[] = {
ISR_OUT_OF_RANGE,
ISR_RxCOMPLETE,
ISR_TxCOMPLETE,
ISR_RxFRAMELOST,
ISR_FATAL_ERROR,
ISR_COMMAND_COMPLETE,
ISR_IBSS_MERGE,
ISR_GENERIC_IRQ
};
if (priv->card && priv->present_callback &&
!(*priv->present_callback)(priv->card))
return IRQ_HANDLED;
/* In this state upper-level code assumes it can mess with
the card unhampered by interrupts which may change register state.
Note that even though the card shouldn't generate interrupts
the inturrupt line may be shared. This allows card setup
to go on without disabling interrupts for a long time. */
if (priv->station_state == STATION_STATE_DOWN)
return IRQ_NONE;
atmel_clear_gcr(dev, GCR_ENINT); /* disable interrupts */
while (1) {
if (!atmel_lock_mac(priv)) {
/* failed to contact card */
printk(KERN_ALERT "%s: failed to contact MAC.\n", dev->name);
return IRQ_HANDLED;
}
isr = atmel_rmem8(priv, atmel_hi(priv, IFACE_INT_STATUS_OFFSET));
atmel_wmem8(priv, atmel_hi(priv, IFACE_LOCKOUT_MAC_OFFSET), 0);
if (!isr) {
atmel_set_gcr(dev, GCR_ENINT); /* enable interrupts */
return i == -1 ? IRQ_NONE : IRQ_HANDLED;
}
atmel_set_gcr(dev, GCR_ACKINT); /* acknowledge interrupt */
for (i = 0; i < ARRAY_SIZE(irq_order); i++)
if (isr & irq_order[i])
break;
if (!atmel_lock_mac(priv)) {
/* failed to contact card */
printk(KERN_ALERT "%s: failed to contact MAC.\n", dev->name);
return IRQ_HANDLED;
}
isr = atmel_rmem8(priv, atmel_hi(priv, IFACE_INT_STATUS_OFFSET));
isr ^= irq_order[i];
atmel_wmem8(priv, atmel_hi(priv, IFACE_INT_STATUS_OFFSET), isr);
atmel_wmem8(priv, atmel_hi(priv, IFACE_LOCKOUT_MAC_OFFSET), 0);
switch (irq_order[i]) {
case ISR_OUT_OF_RANGE:
if (priv->operating_mode == IW_MODE_INFRA &&
priv->station_state == STATION_STATE_READY) {
priv->station_is_associated = 0;
atmel_scan(priv, 1);
}
break;
case ISR_RxFRAMELOST:
priv->wstats.discard.misc++;
fallthrough;
case ISR_RxCOMPLETE:
rx_done_irq(priv);
break;
case ISR_TxCOMPLETE:
tx_done_irq(priv);
break;
case ISR_FATAL_ERROR:
printk(KERN_ALERT "%s: *** FATAL error interrupt ***\n", dev->name);
atmel_enter_state(priv, STATION_STATE_MGMT_ERROR);
break;
case ISR_COMMAND_COMPLETE:
atmel_command_irq(priv);
break;
case ISR_IBSS_MERGE:
atmel_get_mib(priv, Mac_Mgmt_Mib_Type, MAC_MGMT_MIB_CUR_BSSID_POS,
priv->CurrentBSSID, 6);
/* The WPA stuff cares about the current AP address */
if (priv->use_wpa)
build_wpa_mib(priv);
break;
case ISR_GENERIC_IRQ:
printk(KERN_INFO "%s: Generic_irq received.\n", dev->name);
break;
}
}
}
static struct iw_statistics *atmel_get_wireless_stats(struct net_device *dev)
{
struct atmel_private *priv = netdev_priv(dev);
/* update the link quality here in case we are seeing no beacons
at all to drive the process */
atmel_smooth_qual(priv);
priv->wstats.status = priv->station_state;
if (priv->operating_mode == IW_MODE_INFRA) {
if (priv->station_state != STATION_STATE_READY) {
priv->wstats.qual.qual = 0;
priv->wstats.qual.level = 0;
priv->wstats.qual.updated = (IW_QUAL_QUAL_INVALID
| IW_QUAL_LEVEL_INVALID);
}
priv->wstats.qual.noise = 0;
priv->wstats.qual.updated |= IW_QUAL_NOISE_INVALID;
} else {
/* Quality levels cannot be determined in ad-hoc mode,
because we can 'hear' more that one remote station. */
priv->wstats.qual.qual = 0;
priv->wstats.qual.level = 0;
priv->wstats.qual.noise = 0;
priv->wstats.qual.updated = IW_QUAL_QUAL_INVALID
| IW_QUAL_LEVEL_INVALID
| IW_QUAL_NOISE_INVALID;
priv->wstats.miss.beacon = 0;
}
return &priv->wstats;
}
static int atmel_set_mac_address(struct net_device *dev, void *p)
{
struct sockaddr *addr = p;
eth_hw_addr_set(dev, addr->sa_data);
return atmel_open(dev);
}
EXPORT_SYMBOL(atmel_open);
int atmel_open(struct net_device *dev)
{
struct atmel_private *priv = netdev_priv(dev);
int i, channel, err;
/* any scheduled timer is no longer needed and might screw things up.. */
del_timer_sync(&priv->management_timer);
/* Interrupts will not touch the card once in this state... */
priv->station_state = STATION_STATE_DOWN;
if (priv->new_SSID_size) {
memcpy(priv->SSID, priv->new_SSID, priv->new_SSID_size);
priv->SSID_size = priv->new_SSID_size;
priv->new_SSID_size = 0;
}
priv->BSS_list_entries = 0;
priv->AuthenticationRequestRetryCnt = 0;
priv->AssociationRequestRetryCnt = 0;
priv->ReAssociationRequestRetryCnt = 0;
priv->CurrentAuthentTransactionSeqNum = 0x0001;
priv->ExpectedAuthentTransactionSeqNum = 0x0002;
priv->site_survey_state = SITE_SURVEY_IDLE;
priv->station_is_associated = 0;
err = reset_atmel_card(dev);
if (err)
return err;
if (priv->config_reg_domain) {
priv->reg_domain = priv->config_reg_domain;
atmel_set_mib8(priv, Phy_Mib_Type, PHY_MIB_REG_DOMAIN_POS, priv->reg_domain);
} else {
priv->reg_domain = atmel_get_mib8(priv, Phy_Mib_Type, PHY_MIB_REG_DOMAIN_POS);
for (i = 0; i < ARRAY_SIZE(channel_table); i++)
if (priv->reg_domain == channel_table[i].reg_domain)
break;
if (i == ARRAY_SIZE(channel_table)) {
priv->reg_domain = REG_DOMAIN_MKK1;
printk(KERN_ALERT "%s: failed to get regulatory domain: assuming MKK1.\n", dev->name);
}
}
if ((channel = atmel_validate_channel(priv, priv->channel)))
priv->channel = channel;
/* this moves station_state on.... */
atmel_scan(priv, 1);
atmel_set_gcr(priv->dev, GCR_ENINT); /* enable interrupts */
return 0;
}
static int atmel_close(struct net_device *dev)
{
struct atmel_private *priv = netdev_priv(dev);
/* Send event to userspace that we are disassociating */
if (priv->station_state == STATION_STATE_READY) {
union iwreq_data wrqu;
wrqu.data.length = 0;
wrqu.data.flags = 0;
wrqu.ap_addr.sa_family = ARPHRD_ETHER;
eth_zero_addr(wrqu.ap_addr.sa_data);
wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL);
}
atmel_enter_state(priv, STATION_STATE_DOWN);
if (priv->bus_type == BUS_TYPE_PCCARD)
atmel_write16(dev, GCR, 0x0060);
atmel_write16(dev, GCR, 0x0040);
return 0;
}
static int atmel_validate_channel(struct atmel_private *priv, int channel)
{
/* check that channel is OK, if so return zero,
else return suitable default channel */
int i;
for (i = 0; i < ARRAY_SIZE(channel_table); i++)
if (priv->reg_domain == channel_table[i].reg_domain) {
if (channel >= channel_table[i].min &&
channel <= channel_table[i].max)
return 0;
else
return channel_table[i].min;
}
return 0;
}
#ifdef CONFIG_PROC_FS
static int atmel_proc_show(struct seq_file *m, void *v)
{
struct atmel_private *priv = m->private;
int i;
char *s, *r, *c;
seq_printf(m, "Driver version:\t\t%d.%d\n", DRIVER_MAJOR, DRIVER_MINOR);
if (priv->station_state != STATION_STATE_DOWN) {
seq_printf(m,
"Firmware version:\t%d.%d build %d\n"
"Firmware location:\t",
priv->host_info.major_version,
priv->host_info.minor_version,
priv->host_info.build_version);
if (priv->card_type != CARD_TYPE_EEPROM)
seq_puts(m, "on card\n");
else if (priv->firmware)
seq_printf(m, "%s loaded by host\n", priv->firmware_id);
else
seq_printf(m, "%s loaded by hotplug\n", priv->firmware_id);
switch (priv->card_type) {
case CARD_TYPE_PARALLEL_FLASH:
c = "Parallel flash";
break;
case CARD_TYPE_SPI_FLASH:
c = "SPI flash\n";
break;
case CARD_TYPE_EEPROM:
c = "EEPROM";
break;
default:
c = "<unknown>";
}
r = "<unknown>";
for (i = 0; i < ARRAY_SIZE(channel_table); i++)
if (priv->reg_domain == channel_table[i].reg_domain)
r = channel_table[i].name;
seq_printf(m, "MAC memory type:\t%s\n", c);
seq_printf(m, "Regulatory domain:\t%s\n", r);
seq_printf(m, "Host CRC checking:\t%s\n",
priv->do_rx_crc ? "On" : "Off");
seq_printf(m, "WPA-capable firmware:\t%s\n",
priv->use_wpa ? "Yes" : "No");
}
switch (priv->station_state) {
case STATION_STATE_SCANNING:
s = "Scanning";
break;
case STATION_STATE_JOINNING:
s = "Joining";
break;
case STATION_STATE_AUTHENTICATING:
s = "Authenticating";
break;
case STATION_STATE_ASSOCIATING:
s = "Associating";
break;
case STATION_STATE_READY:
s = "Ready";
break;
case STATION_STATE_REASSOCIATING:
s = "Reassociating";
break;
case STATION_STATE_MGMT_ERROR:
s = "Management error";
break;
case STATION_STATE_DOWN:
s = "Down";
break;
default:
s = "<unknown>";
}
seq_printf(m, "Current state:\t\t%s\n", s);
return 0;
}
#endif
static const struct net_device_ops atmel_netdev_ops = {
.ndo_open = atmel_open,
.ndo_stop = atmel_close,
.ndo_set_mac_address = atmel_set_mac_address,
.ndo_start_xmit = start_tx,
.ndo_do_ioctl = atmel_ioctl,
.ndo_validate_addr = eth_validate_addr,
};
struct net_device *init_atmel_card(unsigned short irq, unsigned long port,
const AtmelFWType fw_type,
struct device *sys_dev,
int (*card_present)(void *), void *card)
{
struct net_device *dev;
struct atmel_private *priv;
int rc;
/* Create the network device object. */
dev = alloc_etherdev(sizeof(*priv));
if (!dev)
return NULL;
if (dev_alloc_name(dev, dev->name) < 0) {
printk(KERN_ERR "atmel: Couldn't get name!\n");
goto err_out_free;
}
priv = netdev_priv(dev);
priv->dev = dev;
priv->sys_dev = sys_dev;
priv->present_callback = card_present;
priv->card = card;
priv->firmware = NULL;
priv->firmware_type = fw_type;
if (firmware) /* module parameter */
strscpy(priv->firmware_id, firmware, sizeof(priv->firmware_id));
priv->bus_type = card_present ? BUS_TYPE_PCCARD : BUS_TYPE_PCI;
priv->station_state = STATION_STATE_DOWN;
priv->do_rx_crc = 0;
/* For PCMCIA cards, some chips need CRC, some don't
so we have to probe. */
if (priv->bus_type == BUS_TYPE_PCCARD) {
priv->probe_crc = 1;
priv->crc_ok_cnt = priv->crc_ko_cnt = 0;
} else
priv->probe_crc = 0;
priv->last_qual = jiffies;
priv->last_beacon_timestamp = 0;
memset(priv->frag_source, 0xff, sizeof(priv->frag_source));
eth_zero_addr(priv->BSSID);
priv->CurrentBSSID[0] = 0xFF; /* Initialize to something invalid.... */
priv->station_was_associated = 0;
priv->last_survey = jiffies;
priv->preamble = LONG_PREAMBLE;
priv->operating_mode = IW_MODE_INFRA;
priv->connect_to_any_BSS = 0;
priv->config_reg_domain = 0;
priv->reg_domain = 0;
priv->tx_rate = 3;
priv->auto_tx_rate = 1;
priv->channel = 4;
priv->power_mode = 0;
priv->SSID[0] = '\0';
priv->SSID_size = 0;
priv->new_SSID_size = 0;
priv->frag_threshold = 2346;
priv->rts_threshold = 2347;
priv->short_retry = 7;
priv->long_retry = 4;
priv->wep_is_on = 0;
priv->default_key = 0;
priv->encryption_level = 0;
priv->exclude_unencrypted = 0;
priv->group_cipher_suite = priv->pairwise_cipher_suite = CIPHER_SUITE_NONE;
priv->use_wpa = 0;
memset(priv->wep_keys, 0, sizeof(priv->wep_keys));
memset(priv->wep_key_len, 0, sizeof(priv->wep_key_len));
priv->default_beacon_period = priv->beacon_period = 100;
priv->listen_interval = 1;
timer_setup(&priv->management_timer, atmel_management_timer, 0);
spin_lock_init(&priv->irqlock);
spin_lock_init(&priv->timerlock);
dev->netdev_ops = &atmel_netdev_ops;
dev->wireless_handlers = &atmel_handler_def;
dev->irq = irq;
dev->base_addr = port;
/* MTU range: 68 - 2312 */
dev->min_mtu = 68;
dev->max_mtu = MAX_WIRELESS_BODY - ETH_FCS_LEN;
SET_NETDEV_DEV(dev, sys_dev);
if ((rc = request_irq(dev->irq, service_interrupt, IRQF_SHARED, dev->name, dev))) {
printk(KERN_ERR "%s: register interrupt %d failed, rc %d\n", dev->name, irq, rc);
goto err_out_free;
}
if (!request_region(dev->base_addr, 32,
priv->bus_type == BUS_TYPE_PCCARD ? "atmel_cs" : "atmel_pci")) {
goto err_out_irq;
}
if (register_netdev(dev))
goto err_out_res;
if (!probe_atmel_card(dev)) {
unregister_netdev(dev);
goto err_out_res;
}
netif_carrier_off(dev);
if (!proc_create_single_data("driver/atmel", 0, NULL, atmel_proc_show,
priv))
printk(KERN_WARNING "atmel: unable to create /proc entry.\n");
printk(KERN_INFO "%s: Atmel at76c50x. Version %d.%d. MAC %pM\n",
dev->name, DRIVER_MAJOR, DRIVER_MINOR, dev->dev_addr);
return dev;
err_out_res:
release_region(dev->base_addr, 32);
err_out_irq:
free_irq(dev->irq, dev);
err_out_free:
free_netdev(dev);
return NULL;
}
EXPORT_SYMBOL(init_atmel_card);
void stop_atmel_card(struct net_device *dev)
{
struct atmel_private *priv = netdev_priv(dev);
/* put a brick on it... */
if (priv->bus_type == BUS_TYPE_PCCARD)
atmel_write16(dev, GCR, 0x0060);
atmel_write16(dev, GCR, 0x0040);
del_timer_sync(&priv->management_timer);
unregister_netdev(dev);
remove_proc_entry("driver/atmel", NULL);
free_irq(dev->irq, dev);
kfree(priv->firmware);
release_region(dev->base_addr, 32);
free_netdev(dev);
}
EXPORT_SYMBOL(stop_atmel_card);
static int atmel_set_essid(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu,
char *extra)
{
struct iw_point *dwrq = &wrqu->essid;
struct atmel_private *priv = netdev_priv(dev);
/* Check if we asked for `any' */
if (dwrq->flags == 0) {
priv->connect_to_any_BSS = 1;
} else {
int index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
priv->connect_to_any_BSS = 0;
/* Check the size of the string */
if (dwrq->length > MAX_SSID_LENGTH)
return -E2BIG;
if (index != 0)
return -EINVAL;
memcpy(priv->new_SSID, extra, dwrq->length);
priv->new_SSID_size = dwrq->length;
}
return -EINPROGRESS;
}
static int atmel_get_essid(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu,
char *extra)
{
struct iw_point *dwrq = &wrqu->essid;
struct atmel_private *priv = netdev_priv(dev);
/* Get the current SSID */
if (priv->new_SSID_size != 0) {
memcpy(extra, priv->new_SSID, priv->new_SSID_size);
dwrq->length = priv->new_SSID_size;
} else {
memcpy(extra, priv->SSID, priv->SSID_size);
dwrq->length = priv->SSID_size;
}
dwrq->flags = !priv->connect_to_any_BSS; /* active */
return 0;
}
static int atmel_get_wap(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu,
char *extra)
{
struct sockaddr *awrq = &wrqu->ap_addr;
struct atmel_private *priv = netdev_priv(dev);
memcpy(awrq->sa_data, priv->CurrentBSSID, ETH_ALEN);
awrq->sa_family = ARPHRD_ETHER;
return 0;
}
static int atmel_set_encode(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu,
char *extra)
{
struct iw_point *dwrq = &wrqu->encoding;
struct atmel_private *priv = netdev_priv(dev);
/* Basic checking: do we have a key to set ?
* Note : with the new API, it's impossible to get a NULL pointer.
* Therefore, we need to check a key size == 0 instead.
* New version of iwconfig properly set the IW_ENCODE_NOKEY flag
* when no key is present (only change flags), but older versions
* don't do it. - Jean II */
if (dwrq->length > 0) {
int index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
int current_index = priv->default_key;
/* Check the size of the key */
if (dwrq->length > 13) {
return -EINVAL;
}
/* Check the index (none -> use current) */
if (index < 0 || index >= 4)
index = current_index;
else
priv->default_key = index;
/* Set the length */
if (dwrq->length > 5)
priv->wep_key_len[index] = 13;
else
if (dwrq->length > 0)
priv->wep_key_len[index] = 5;
else
/* Disable the key */
priv->wep_key_len[index] = 0;
/* Check if the key is not marked as invalid */
if (!(dwrq->flags & IW_ENCODE_NOKEY)) {
/* Cleanup */
memset(priv->wep_keys[index], 0, 13);
/* Copy the key in the driver */
memcpy(priv->wep_keys[index], extra, dwrq->length);
}
/* WE specify that if a valid key is set, encryption
* should be enabled (user may turn it off later)
* This is also how "iwconfig ethX key on" works */
if (index == current_index &&
priv->wep_key_len[index] > 0) {
priv->wep_is_on = 1;
priv->exclude_unencrypted = 1;
if (priv->wep_key_len[index] > 5) {
priv->pairwise_cipher_suite = CIPHER_SUITE_WEP_128;
priv->encryption_level = 2;
} else {
priv->pairwise_cipher_suite = CIPHER_SUITE_WEP_64;
priv->encryption_level = 1;
}
}
} else {
/* Do we want to just set the transmit key index ? */
int index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
if (index >= 0 && index < 4) {
priv->default_key = index;
} else
/* Don't complain if only change the mode */
if (!(dwrq->flags & IW_ENCODE_MODE))
return -EINVAL;
}
/* Read the flags */
if (dwrq->flags & IW_ENCODE_DISABLED) {
priv->wep_is_on = 0;
priv->encryption_level = 0;
priv->pairwise_cipher_suite = CIPHER_SUITE_NONE;
} else {
priv->wep_is_on = 1;
if (priv->wep_key_len[priv->default_key] > 5) {
priv->pairwise_cipher_suite = CIPHER_SUITE_WEP_128;
priv->encryption_level = 2;
} else {
priv->pairwise_cipher_suite = CIPHER_SUITE_WEP_64;
priv->encryption_level = 1;
}
}
if (dwrq->flags & IW_ENCODE_RESTRICTED)
priv->exclude_unencrypted = 1;
if (dwrq->flags & IW_ENCODE_OPEN)
priv->exclude_unencrypted = 0;
return -EINPROGRESS; /* Call commit handler */
}
static int atmel_get_encode(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu,
char *extra)
{
struct iw_point *dwrq = &wrqu->encoding;
struct atmel_private *priv = netdev_priv(dev);
int index = (dwrq->flags & IW_ENCODE_INDEX) - 1;
if (!priv->wep_is_on)
dwrq->flags = IW_ENCODE_DISABLED;
else {
if (priv->exclude_unencrypted)
dwrq->flags = IW_ENCODE_RESTRICTED;
else
dwrq->flags = IW_ENCODE_OPEN;
}
/* Which key do we want ? -1 -> tx index */
if (index < 0 || index >= 4)
index = priv->default_key;
dwrq->flags |= index + 1;
/* Copy the key to the user buffer */
dwrq->length = priv->wep_key_len[index];
if (dwrq->length > 16) {
dwrq->length = 0;
} else {
memset(extra, 0, 16);
memcpy(extra, priv->wep_keys[index], dwrq->length);
}
return 0;
}
static int atmel_set_encodeext(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu,
char *extra)
{
struct atmel_private *priv = netdev_priv(dev);
struct iw_point *encoding = &wrqu->encoding;
struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
int idx, key_len, alg = ext->alg, set_key = 1;
/* Determine and validate the key index */
idx = encoding->flags & IW_ENCODE_INDEX;
if (idx) {
if (idx < 1 || idx > 4)
return -EINVAL;
idx--;
} else
idx = priv->default_key;
if (encoding->flags & IW_ENCODE_DISABLED)
alg = IW_ENCODE_ALG_NONE;
if (ext->ext_flags & IW_ENCODE_EXT_SET_TX_KEY) {
priv->default_key = idx;
set_key = ext->key_len > 0 ? 1 : 0;
}
if (set_key) {
/* Set the requested key first */
switch (alg) {
case IW_ENCODE_ALG_NONE:
priv->wep_is_on = 0;
priv->encryption_level = 0;
priv->pairwise_cipher_suite = CIPHER_SUITE_NONE;
break;
case IW_ENCODE_ALG_WEP:
if (ext->key_len > 5) {
priv->wep_key_len[idx] = 13;
priv->pairwise_cipher_suite = CIPHER_SUITE_WEP_128;
priv->encryption_level = 2;
} else if (ext->key_len > 0) {
priv->wep_key_len[idx] = 5;
priv->pairwise_cipher_suite = CIPHER_SUITE_WEP_64;
priv->encryption_level = 1;
} else {
return -EINVAL;
}
priv->wep_is_on = 1;
memset(priv->wep_keys[idx], 0, 13);
key_len = min ((int)ext->key_len, priv->wep_key_len[idx]);
memcpy(priv->wep_keys[idx], ext->key, key_len);
break;
default:
return -EINVAL;
}
}
return -EINPROGRESS;
}
static int atmel_get_encodeext(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu,
char *extra)
{
struct atmel_private *priv = netdev_priv(dev);
struct iw_point *encoding = &wrqu->encoding;
struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
int idx, max_key_len;
max_key_len = encoding->length - sizeof(*ext);
if (max_key_len < 0)
return -EINVAL;
idx = encoding->flags & IW_ENCODE_INDEX;
if (idx) {
if (idx < 1 || idx > 4)
return -EINVAL;
idx--;
} else
idx = priv->default_key;
encoding->flags = idx + 1;
memset(ext, 0, sizeof(*ext));
if (!priv->wep_is_on) {
ext->alg = IW_ENCODE_ALG_NONE;
ext->key_len = 0;
encoding->flags |= IW_ENCODE_DISABLED;
} else {
if (priv->encryption_level > 0)
ext->alg = IW_ENCODE_ALG_WEP;
else
return -EINVAL;
ext->key_len = priv->wep_key_len[idx];
memcpy(ext->key, priv->wep_keys[idx], ext->key_len);
encoding->flags |= IW_ENCODE_ENABLED;
}
return 0;
}
static int atmel_set_auth(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct atmel_private *priv = netdev_priv(dev);
struct iw_param *param = &wrqu->param;
switch (param->flags & IW_AUTH_INDEX) {
case IW_AUTH_WPA_VERSION:
case IW_AUTH_CIPHER_PAIRWISE:
case IW_AUTH_CIPHER_GROUP:
case IW_AUTH_KEY_MGMT:
case IW_AUTH_RX_UNENCRYPTED_EAPOL:
case IW_AUTH_PRIVACY_INVOKED:
/*
* atmel does not use these parameters
*/
break;
case IW_AUTH_DROP_UNENCRYPTED:
priv->exclude_unencrypted = param->value ? 1 : 0;
break;
case IW_AUTH_80211_AUTH_ALG: {
if (param->value & IW_AUTH_ALG_SHARED_KEY) {
priv->exclude_unencrypted = 1;
} else if (param->value & IW_AUTH_ALG_OPEN_SYSTEM) {
priv->exclude_unencrypted = 0;
} else
return -EINVAL;
break;
}
case IW_AUTH_WPA_ENABLED:
/* Silently accept disable of WPA */
if (param->value > 0)
return -EOPNOTSUPP;
break;
default:
return -EOPNOTSUPP;
}
return -EINPROGRESS;
}
static int atmel_get_auth(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct atmel_private *priv = netdev_priv(dev);
struct iw_param *param = &wrqu->param;
switch (param->flags & IW_AUTH_INDEX) {
case IW_AUTH_DROP_UNENCRYPTED:
param->value = priv->exclude_unencrypted;
break;
case IW_AUTH_80211_AUTH_ALG:
if (priv->exclude_unencrypted == 1)
param->value = IW_AUTH_ALG_SHARED_KEY;
else
param->value = IW_AUTH_ALG_OPEN_SYSTEM;
break;
case IW_AUTH_WPA_ENABLED:
param->value = 0;
break;
default:
return -EOPNOTSUPP;
}
return 0;
}
static int atmel_get_name(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu,
char *extra)
{
strcpy(wrqu->name, "IEEE 802.11-DS");
return 0;
}
static int atmel_set_rate(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu,
char *extra)
{
struct iw_param *vwrq = &wrqu->bitrate;
struct atmel_private *priv = netdev_priv(dev);
if (vwrq->fixed == 0) {
priv->tx_rate = 3;
priv->auto_tx_rate = 1;
} else {
priv->auto_tx_rate = 0;
/* Which type of value ? */
if ((vwrq->value < 4) && (vwrq->value >= 0)) {
/* Setting by rate index */
priv->tx_rate = vwrq->value;
} else {
/* Setting by frequency value */
switch (vwrq->value) {
case 1000000:
priv->tx_rate = 0;
break;
case 2000000:
priv->tx_rate = 1;
break;
case 5500000:
priv->tx_rate = 2;
break;
case 11000000:
priv->tx_rate = 3;
break;
default:
return -EINVAL;
}
}
}
return -EINPROGRESS;
}
static int atmel_set_mode(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu,
char *extra)
{
__u32 *uwrq = &wrqu->mode;
struct atmel_private *priv = netdev_priv(dev);
if (*uwrq != IW_MODE_ADHOC && *uwrq != IW_MODE_INFRA)
return -EINVAL;
priv->operating_mode = *uwrq;
return -EINPROGRESS;
}
static int atmel_get_mode(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu,
char *extra)
{
__u32 *uwrq = &wrqu->mode;
struct atmel_private *priv = netdev_priv(dev);
*uwrq = priv->operating_mode;
return 0;
}
static int atmel_get_rate(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu,
char *extra)
{
struct iw_param *vwrq = &wrqu->bitrate;
struct atmel_private *priv = netdev_priv(dev);
if (priv->auto_tx_rate) {
vwrq->fixed = 0;
vwrq->value = 11000000;
} else {
vwrq->fixed = 1;
switch (priv->tx_rate) {
case 0:
vwrq->value = 1000000;
break;
case 1:
vwrq->value = 2000000;
break;
case 2:
vwrq->value = 5500000;
break;
case 3:
vwrq->value = 11000000;
break;
}
}
return 0;
}
static int atmel_set_power(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu,
char *extra)
{
struct iw_param *vwrq = &wrqu->power;
struct atmel_private *priv = netdev_priv(dev);
priv->power_mode = vwrq->disabled ? 0 : 1;
return -EINPROGRESS;
}
static int atmel_get_power(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu,
char *extra)
{
struct iw_param *vwrq = &wrqu->power;
struct atmel_private *priv = netdev_priv(dev);
vwrq->disabled = priv->power_mode ? 0 : 1;
vwrq->flags = IW_POWER_ON;
return 0;
}
static int atmel_set_retry(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu,
char *extra)
{
struct iw_param *vwrq = &wrqu->retry;
struct atmel_private *priv = netdev_priv(dev);
if (!vwrq->disabled && (vwrq->flags & IW_RETRY_LIMIT)) {
if (vwrq->flags & IW_RETRY_LONG)
priv->long_retry = vwrq->value;
else if (vwrq->flags & IW_RETRY_SHORT)
priv->short_retry = vwrq->value;
else {
/* No modifier : set both */
priv->long_retry = vwrq->value;
priv->short_retry = vwrq->value;
}
return -EINPROGRESS;
}
return -EINVAL;
}
static int atmel_get_retry(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu,
char *extra)
{
struct iw_param *vwrq = &wrqu->retry;
struct atmel_private *priv = netdev_priv(dev);
vwrq->disabled = 0; /* Can't be disabled */
/* Note : by default, display the short retry number */
if (vwrq->flags & IW_RETRY_LONG) {
vwrq->flags = IW_RETRY_LIMIT | IW_RETRY_LONG;
vwrq->value = priv->long_retry;
} else {
vwrq->flags = IW_RETRY_LIMIT;
vwrq->value = priv->short_retry;
if (priv->long_retry != priv->short_retry)
vwrq->flags |= IW_RETRY_SHORT;
}
return 0;
}
static int atmel_set_rts(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu,
char *extra)
{
struct iw_param *vwrq = &wrqu->rts;
struct atmel_private *priv = netdev_priv(dev);
int rthr = vwrq->value;
if (vwrq->disabled)
rthr = 2347;
if ((rthr < 0) || (rthr > 2347)) {
return -EINVAL;
}
priv->rts_threshold = rthr;
return -EINPROGRESS; /* Call commit handler */
}
static int atmel_get_rts(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu,
char *extra)
{
struct iw_param *vwrq = &wrqu->rts;
struct atmel_private *priv = netdev_priv(dev);
vwrq->value = priv->rts_threshold;
vwrq->disabled = (vwrq->value >= 2347);
vwrq->fixed = 1;
return 0;
}
static int atmel_set_frag(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu,
char *extra)
{
struct iw_param *vwrq = &wrqu->frag;
struct atmel_private *priv = netdev_priv(dev);
int fthr = vwrq->value;
if (vwrq->disabled)
fthr = 2346;
if ((fthr < 256) || (fthr > 2346)) {
return -EINVAL;
}
fthr &= ~0x1; /* Get an even value - is it really needed ??? */
priv->frag_threshold = fthr;
return -EINPROGRESS; /* Call commit handler */
}
static int atmel_get_frag(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu,
char *extra)
{
struct iw_param *vwrq = &wrqu->frag;
struct atmel_private *priv = netdev_priv(dev);
vwrq->value = priv->frag_threshold;
vwrq->disabled = (vwrq->value >= 2346);
vwrq->fixed = 1;
return 0;
}
static int atmel_set_freq(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu,
char *extra)
{
struct iw_freq *fwrq = &wrqu->freq;
struct atmel_private *priv = netdev_priv(dev);
int rc = -EINPROGRESS; /* Call commit handler */
/* If setting by frequency, convert to a channel */
if (fwrq->e == 1) {
int f = fwrq->m / 100000;
/* Hack to fall through... */
fwrq->e = 0;
fwrq->m = ieee80211_frequency_to_channel(f);
}
/* Setting by channel number */
if (fwrq->m < 0 || fwrq->m > 1000 || fwrq->e > 0)
rc = -EOPNOTSUPP;
else {
int channel = fwrq->m;
if (atmel_validate_channel(priv, channel) == 0) {
priv->channel = channel;
} else {
rc = -EINVAL;
}
}
return rc;
}
static int atmel_get_freq(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu,
char *extra)
{
struct iw_freq *fwrq = &wrqu->freq;
struct atmel_private *priv = netdev_priv(dev);
fwrq->m = priv->channel;
fwrq->e = 0;
return 0;
}
static int atmel_set_scan(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *dwrq,
char *extra)
{
struct atmel_private *priv = netdev_priv(dev);
unsigned long flags;
/* Note : you may have realised that, as this is a SET operation,
* this is privileged and therefore a normal user can't
* perform scanning.
* This is not an error, while the device perform scanning,
* traffic doesn't flow, so it's a perfect DoS...
* Jean II */
if (priv->station_state == STATION_STATE_DOWN)
return -EAGAIN;
/* Timeout old surveys. */
if (time_after(jiffies, priv->last_survey + 20 * HZ))
priv->site_survey_state = SITE_SURVEY_IDLE;
priv->last_survey = jiffies;
/* Initiate a scan command */
if (priv->site_survey_state == SITE_SURVEY_IN_PROGRESS)
return -EBUSY;
del_timer_sync(&priv->management_timer);
spin_lock_irqsave(&priv->irqlock, flags);
priv->site_survey_state = SITE_SURVEY_IN_PROGRESS;
priv->fast_scan = 0;
atmel_scan(priv, 0);
spin_unlock_irqrestore(&priv->irqlock, flags);
return 0;
}
static int atmel_get_scan(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu,
char *extra)
{
struct iw_point *dwrq = &wrqu->data;
struct atmel_private *priv = netdev_priv(dev);
int i;
char *current_ev = extra;
struct iw_event iwe;
if (priv->site_survey_state != SITE_SURVEY_COMPLETED)
return -EAGAIN;
for (i = 0; i < priv->BSS_list_entries; i++) {
iwe.cmd = SIOCGIWAP;
iwe.u.ap_addr.sa_family = ARPHRD_ETHER;
memcpy(iwe.u.ap_addr.sa_data, priv->BSSinfo[i].BSSID, ETH_ALEN);
current_ev = iwe_stream_add_event(info, current_ev,
extra + IW_SCAN_MAX_DATA,
&iwe, IW_EV_ADDR_LEN);
iwe.u.data.length = priv->BSSinfo[i].SSIDsize;
if (iwe.u.data.length > 32)
iwe.u.data.length = 32;
iwe.cmd = SIOCGIWESSID;
iwe.u.data.flags = 1;
current_ev = iwe_stream_add_point(info, current_ev,
extra + IW_SCAN_MAX_DATA,
&iwe, priv->BSSinfo[i].SSID);
iwe.cmd = SIOCGIWMODE;
iwe.u.mode = priv->BSSinfo[i].BSStype;
current_ev = iwe_stream_add_event(info, current_ev,
extra + IW_SCAN_MAX_DATA,
&iwe, IW_EV_UINT_LEN);
iwe.cmd = SIOCGIWFREQ;
iwe.u.freq.m = priv->BSSinfo[i].channel;
iwe.u.freq.e = 0;
current_ev = iwe_stream_add_event(info, current_ev,
extra + IW_SCAN_MAX_DATA,
&iwe, IW_EV_FREQ_LEN);
/* Add quality statistics */
iwe.cmd = IWEVQUAL;
iwe.u.qual.level = priv->BSSinfo[i].RSSI;
iwe.u.qual.qual = iwe.u.qual.level;
/* iwe.u.qual.noise = SOMETHING */
current_ev = iwe_stream_add_event(info, current_ev,
extra + IW_SCAN_MAX_DATA,
&iwe, IW_EV_QUAL_LEN);
iwe.cmd = SIOCGIWENCODE;
if (priv->BSSinfo[i].UsingWEP)
iwe.u.data.flags = IW_ENCODE_ENABLED | IW_ENCODE_NOKEY;
else
iwe.u.data.flags = IW_ENCODE_DISABLED;
iwe.u.data.length = 0;
current_ev = iwe_stream_add_point(info, current_ev,
extra + IW_SCAN_MAX_DATA,
&iwe, NULL);
}
/* Length of data */
dwrq->length = (current_ev - extra);
dwrq->flags = 0;
return 0;
}
static int atmel_get_range(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu,
char *extra)
{
struct iw_point *dwrq = &wrqu->data;
struct atmel_private *priv = netdev_priv(dev);
struct iw_range *range = (struct iw_range *) extra;
int k, i, j;
dwrq->length = sizeof(struct iw_range);
memset(range, 0, sizeof(struct iw_range));
range->min_nwid = 0x0000;
range->max_nwid = 0x0000;
range->num_channels = 0;
for (j = 0; j < ARRAY_SIZE(channel_table); j++)
if (priv->reg_domain == channel_table[j].reg_domain) {
range->num_channels = channel_table[j].max - channel_table[j].min + 1;
break;
}
if (range->num_channels != 0) {
for (k = 0, i = channel_table[j].min; i <= channel_table[j].max; i++) {
range->freq[k].i = i; /* List index */
/* Values in MHz -> * 10^5 * 10 */
range->freq[k].m = 100000 *
ieee80211_channel_to_frequency(i, NL80211_BAND_2GHZ);
range->freq[k++].e = 1;
}
range->num_frequency = k;
}
range->max_qual.qual = 100;
range->max_qual.level = 100;
range->max_qual.noise = 0;
range->max_qual.updated = IW_QUAL_NOISE_INVALID;
range->avg_qual.qual = 50;
range->avg_qual.level = 50;
range->avg_qual.noise = 0;
range->avg_qual.updated = IW_QUAL_NOISE_INVALID;
range->sensitivity = 0;
range->bitrate[0] = 1000000;
range->bitrate[1] = 2000000;
range->bitrate[2] = 5500000;
range->bitrate[3] = 11000000;
range->num_bitrates = 4;
range->min_rts = 0;
range->max_rts = 2347;
range->min_frag = 256;
range->max_frag = 2346;
range->encoding_size[0] = 5;
range->encoding_size[1] = 13;
range->num_encoding_sizes = 2;
range->max_encoding_tokens = 4;
range->pmp_flags = IW_POWER_ON;
range->pmt_flags = IW_POWER_ON;
range->pm_capa = 0;
range->we_version_source = WIRELESS_EXT;
range->we_version_compiled = WIRELESS_EXT;
range->retry_capa = IW_RETRY_LIMIT ;
range->retry_flags = IW_RETRY_LIMIT;
range->r_time_flags = 0;
range->min_retry = 1;
range->max_retry = 65535;
return 0;
}
static int atmel_set_wap(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu,
char *extra)
{
struct sockaddr *awrq = &wrqu->ap_addr;
struct atmel_private *priv = netdev_priv(dev);
int i;
static const u8 any[] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
static const u8 off[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 };
unsigned long flags;
if (awrq->sa_family != ARPHRD_ETHER)
return -EINVAL;
if (!memcmp(any, awrq->sa_data, 6) ||
!memcmp(off, awrq->sa_data, 6)) {
del_timer_sync(&priv->management_timer);
spin_lock_irqsave(&priv->irqlock, flags);
atmel_scan(priv, 1);
spin_unlock_irqrestore(&priv->irqlock, flags);
return 0;
}
for (i = 0; i < priv->BSS_list_entries; i++) {
if (memcmp(priv->BSSinfo[i].BSSID, awrq->sa_data, 6) == 0) {
if (!priv->wep_is_on && priv->BSSinfo[i].UsingWEP) {
return -EINVAL;
} else if (priv->wep_is_on && !priv->BSSinfo[i].UsingWEP) {
return -EINVAL;
} else {
del_timer_sync(&priv->management_timer);
spin_lock_irqsave(&priv->irqlock, flags);
atmel_join_bss(priv, i);
spin_unlock_irqrestore(&priv->irqlock, flags);
return 0;
}
}
}
return -EINVAL;
}
static int atmel_config_commit(struct net_device *dev,
struct iw_request_info *info, /* NULL */
union iwreq_data *zwrq, /* NULL */
char *extra) /* NULL */
{
return atmel_open(dev);
}
static const iw_handler atmel_handler[] =
{
IW_HANDLER(SIOCSIWCOMMIT, atmel_config_commit),
IW_HANDLER(SIOCGIWNAME, atmel_get_name),
IW_HANDLER(SIOCSIWFREQ, atmel_set_freq),
IW_HANDLER(SIOCGIWFREQ, atmel_get_freq),
IW_HANDLER(SIOCSIWMODE, atmel_set_mode),
IW_HANDLER(SIOCGIWMODE, atmel_get_mode),
IW_HANDLER(SIOCGIWRANGE, atmel_get_range),
IW_HANDLER(SIOCSIWAP, atmel_set_wap),
IW_HANDLER(SIOCGIWAP, atmel_get_wap),
IW_HANDLER(SIOCSIWSCAN, atmel_set_scan),
IW_HANDLER(SIOCGIWSCAN, atmel_get_scan),
IW_HANDLER(SIOCSIWESSID, atmel_set_essid),
IW_HANDLER(SIOCGIWESSID, atmel_get_essid),
IW_HANDLER(SIOCSIWRATE, atmel_set_rate),
IW_HANDLER(SIOCGIWRATE, atmel_get_rate),
IW_HANDLER(SIOCSIWRTS, atmel_set_rts),
IW_HANDLER(SIOCGIWRTS, atmel_get_rts),
IW_HANDLER(SIOCSIWFRAG, atmel_set_frag),
IW_HANDLER(SIOCGIWFRAG, atmel_get_frag),
IW_HANDLER(SIOCSIWRETRY, atmel_set_retry),
IW_HANDLER(SIOCGIWRETRY, atmel_get_retry),
IW_HANDLER(SIOCSIWENCODE, atmel_set_encode),
IW_HANDLER(SIOCGIWENCODE, atmel_get_encode),
IW_HANDLER(SIOCSIWPOWER, atmel_set_power),
IW_HANDLER(SIOCGIWPOWER, atmel_get_power),
IW_HANDLER(SIOCSIWAUTH, atmel_set_auth),
IW_HANDLER(SIOCGIWAUTH, atmel_get_auth),
IW_HANDLER(SIOCSIWENCODEEXT, atmel_set_encodeext),
IW_HANDLER(SIOCGIWENCODEEXT, atmel_get_encodeext),
};
static const iw_handler atmel_private_handler[] =
{
NULL, /* SIOCIWFIRSTPRIV */
};
struct atmel_priv_ioctl {
char id[32];
unsigned char __user *data;
unsigned short len;
};
#define ATMELFWL SIOCIWFIRSTPRIV
#define ATMELIDIFC ATMELFWL + 1
#define ATMELRD ATMELFWL + 2
#define ATMELMAGIC 0x51807
#define REGDOMAINSZ 20
static const struct iw_priv_args atmel_private_args[] = {
{
.cmd = ATMELFWL,
.set_args = IW_PRIV_TYPE_BYTE
| IW_PRIV_SIZE_FIXED
| sizeof(struct atmel_priv_ioctl),
.get_args = IW_PRIV_TYPE_NONE,
.name = "atmelfwl"
}, {
.cmd = ATMELIDIFC,
.set_args = IW_PRIV_TYPE_NONE,
.get_args = IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1,
.name = "atmelidifc"
}, {
.cmd = ATMELRD,
.set_args = IW_PRIV_TYPE_CHAR | REGDOMAINSZ,
.get_args = IW_PRIV_TYPE_NONE,
.name = "regdomain"
},
};
static const struct iw_handler_def atmel_handler_def = {
.num_standard = ARRAY_SIZE(atmel_handler),
.num_private = ARRAY_SIZE(atmel_private_handler),
.num_private_args = ARRAY_SIZE(atmel_private_args),
.standard = atmel_handler,
.private = atmel_private_handler,
.private_args = (struct iw_priv_args *) atmel_private_args,
.get_wireless_stats = atmel_get_wireless_stats
};
static int atmel_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
{
int i, rc = 0;
struct atmel_private *priv = netdev_priv(dev);
struct atmel_priv_ioctl com;
struct iwreq *wrq = (struct iwreq *) rq;
unsigned char *new_firmware;
char domain[REGDOMAINSZ + 1];
switch (cmd) {
case ATMELIDIFC:
wrq->u.param.value = ATMELMAGIC;
break;
case ATMELFWL:
if (copy_from_user(&com, rq->ifr_data, sizeof(com))) {
rc = -EFAULT;
break;
}
if (!capable(CAP_NET_ADMIN)) {
rc = -EPERM;
break;
}
new_firmware = memdup_user(com.data, com.len);
if (IS_ERR(new_firmware)) {
rc = PTR_ERR(new_firmware);
break;
}
kfree(priv->firmware);
priv->firmware = new_firmware;
priv->firmware_length = com.len;
strncpy(priv->firmware_id, com.id, 31);
priv->firmware_id[31] = '\0';
break;
case ATMELRD:
if (copy_from_user(domain, rq->ifr_data, REGDOMAINSZ)) {
rc = -EFAULT;
break;
}
if (!capable(CAP_NET_ADMIN)) {
rc = -EPERM;
break;
}
domain[REGDOMAINSZ] = 0;
rc = -EINVAL;
for (i = 0; i < ARRAY_SIZE(channel_table); i++) {
if (!strcasecmp(channel_table[i].name, domain)) {
priv->config_reg_domain = channel_table[i].reg_domain;
rc = 0;
}
}
if (rc == 0 && priv->station_state != STATION_STATE_DOWN)
rc = atmel_open(dev);
break;
default:
rc = -EOPNOTSUPP;
}
return rc;
}
struct auth_body {
__le16 alg;
__le16 trans_seq;
__le16 status;
u8 el_id;
u8 chall_text_len;
u8 chall_text[253];
};
static void atmel_enter_state(struct atmel_private *priv, int new_state)
{
int old_state = priv->station_state;
if (new_state == old_state)
return;
priv->station_state = new_state;
if (new_state == STATION_STATE_READY) {
netif_start_queue(priv->dev);
netif_carrier_on(priv->dev);
}
if (old_state == STATION_STATE_READY) {
netif_carrier_off(priv->dev);
if (netif_running(priv->dev))
netif_stop_queue(priv->dev);
priv->last_beacon_timestamp = 0;
}
}
static void atmel_scan(struct atmel_private *priv, int specific_ssid)
{
struct {
u8 BSSID[ETH_ALEN];
u8 SSID[MAX_SSID_LENGTH];
u8 scan_type;
u8 channel;
__le16 BSS_type;
__le16 min_channel_time;
__le16 max_channel_time;
u8 options;
u8 SSID_size;
} cmd;
eth_broadcast_addr(cmd.BSSID);
if (priv->fast_scan) {
cmd.SSID_size = priv->SSID_size;
memcpy(cmd.SSID, priv->SSID, priv->SSID_size);
cmd.min_channel_time = cpu_to_le16(10);
cmd.max_channel_time = cpu_to_le16(50);
} else {
priv->BSS_list_entries = 0;
cmd.SSID_size = 0;
cmd.min_channel_time = cpu_to_le16(10);
cmd.max_channel_time = cpu_to_le16(120);
}
cmd.options = 0;
if (!specific_ssid)
cmd.options |= SCAN_OPTIONS_SITE_SURVEY;
cmd.channel = (priv->channel & 0x7f);
cmd.scan_type = SCAN_TYPE_ACTIVE;
cmd.BSS_type = cpu_to_le16(priv->operating_mode == IW_MODE_ADHOC ?
BSS_TYPE_AD_HOC : BSS_TYPE_INFRASTRUCTURE);
atmel_send_command(priv, CMD_Scan, &cmd, sizeof(cmd));
/* This must come after all hardware access to avoid being messed up
by stuff happening in interrupt context after we leave STATE_DOWN */
atmel_enter_state(priv, STATION_STATE_SCANNING);
}
static void join(struct atmel_private *priv, int type)
{
struct {
u8 BSSID[6];
u8 SSID[MAX_SSID_LENGTH];
u8 BSS_type; /* this is a short in a scan command - weird */
u8 channel;
__le16 timeout;
u8 SSID_size;
u8 reserved;
} cmd;
cmd.SSID_size = priv->SSID_size;
memcpy(cmd.SSID, priv->SSID, priv->SSID_size);
memcpy(cmd.BSSID, priv->CurrentBSSID, ETH_ALEN);
cmd.channel = (priv->channel & 0x7f);
cmd.BSS_type = type;
cmd.timeout = cpu_to_le16(2000);
atmel_send_command(priv, CMD_Join, &cmd, sizeof(cmd));
}
static void start(struct atmel_private *priv, int type)
{
struct {
u8 BSSID[6];
u8 SSID[MAX_SSID_LENGTH];
u8 BSS_type;
u8 channel;
u8 SSID_size;
u8 reserved[3];
} cmd;
cmd.SSID_size = priv->SSID_size;
memcpy(cmd.SSID, priv->SSID, priv->SSID_size);
memcpy(cmd.BSSID, priv->BSSID, ETH_ALEN);
cmd.BSS_type = type;
cmd.channel = (priv->channel & 0x7f);
atmel_send_command(priv, CMD_Start, &cmd, sizeof(cmd));
}
static void handle_beacon_probe(struct atmel_private *priv, u16 capability,
u8 channel)
{
int rejoin = 0;
int new = capability & WLAN_CAPABILITY_SHORT_PREAMBLE ?
SHORT_PREAMBLE : LONG_PREAMBLE;
if (priv->preamble != new) {
priv->preamble = new;
rejoin = 1;
atmel_set_mib8(priv, Local_Mib_Type, LOCAL_MIB_PREAMBLE_TYPE, new);
}
if (priv->channel != channel) {
priv->channel = channel;
rejoin = 1;
atmel_set_mib8(priv, Phy_Mib_Type, PHY_MIB_CHANNEL_POS, channel);
}
if (rejoin) {
priv->station_is_associated = 0;
atmel_enter_state(priv, STATION_STATE_JOINNING);
if (priv->operating_mode == IW_MODE_INFRA)
join(priv, BSS_TYPE_INFRASTRUCTURE);
else
join(priv, BSS_TYPE_AD_HOC);
}
}
static void send_authentication_request(struct atmel_private *priv, u16 system,
u8 *challenge, int challenge_len)
{
struct ieee80211_hdr header;
struct auth_body auth;
header.frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_AUTH);
header.duration_id = cpu_to_le16(0x8000);
header.seq_ctrl = 0;
memcpy(header.addr1, priv->CurrentBSSID, ETH_ALEN);
memcpy(header.addr2, priv->dev->dev_addr, ETH_ALEN);
memcpy(header.addr3, priv->CurrentBSSID, ETH_ALEN);
if (priv->wep_is_on && priv->CurrentAuthentTransactionSeqNum != 1)
/* no WEP for authentication frames with TrSeqNo 1 */
header.frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
auth.alg = cpu_to_le16(system);
auth.status = 0;
auth.trans_seq = cpu_to_le16(priv->CurrentAuthentTransactionSeqNum);
priv->ExpectedAuthentTransactionSeqNum = priv->CurrentAuthentTransactionSeqNum+1;
priv->CurrentAuthentTransactionSeqNum += 2;
if (challenge_len != 0) {
auth.el_id = 16; /* challenge_text */
auth.chall_text_len = challenge_len;
memcpy(auth.chall_text, challenge, challenge_len);
atmel_transmit_management_frame(priv, &header, (u8 *)&auth, 8 + challenge_len);
} else {
atmel_transmit_management_frame(priv, &header, (u8 *)&auth, 6);
}
}
static void send_association_request(struct atmel_private *priv, int is_reassoc)
{
u8 *ssid_el_p;
int bodysize;
struct ieee80211_hdr header;
struct ass_req_format {
__le16 capability;
__le16 listen_interval;
u8 ap[ETH_ALEN]; /* nothing after here directly accessible */
u8 ssid_el_id;
u8 ssid_len;
u8 ssid[MAX_SSID_LENGTH];
u8 sup_rates_el_id;
u8 sup_rates_len;
u8 rates[4];
} body;
header.frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
(is_reassoc ? IEEE80211_STYPE_REASSOC_REQ : IEEE80211_STYPE_ASSOC_REQ));
header.duration_id = cpu_to_le16(0x8000);
header.seq_ctrl = 0;
memcpy(header.addr1, priv->CurrentBSSID, ETH_ALEN);
memcpy(header.addr2, priv->dev->dev_addr, ETH_ALEN);
memcpy(header.addr3, priv->CurrentBSSID, ETH_ALEN);
body.capability = cpu_to_le16(WLAN_CAPABILITY_ESS);
if (priv->wep_is_on)
body.capability |= cpu_to_le16(WLAN_CAPABILITY_PRIVACY);
if (priv->preamble == SHORT_PREAMBLE)
body.capability |= cpu_to_le16(WLAN_CAPABILITY_SHORT_PREAMBLE);
body.listen_interval = cpu_to_le16(priv->listen_interval * priv->beacon_period);
/* current AP address - only in reassoc frame */
if (is_reassoc) {
memcpy(body.ap, priv->CurrentBSSID, ETH_ALEN);
ssid_el_p = &body.ssid_el_id;
bodysize = 18 + priv->SSID_size;
} else {
ssid_el_p = &body.ap[0];
bodysize = 12 + priv->SSID_size;
}
ssid_el_p[0] = WLAN_EID_SSID;
ssid_el_p[1] = priv->SSID_size;
memcpy(ssid_el_p + 2, priv->SSID, priv->SSID_size);
ssid_el_p[2 + priv->SSID_size] = WLAN_EID_SUPP_RATES;
ssid_el_p[3 + priv->SSID_size] = 4; /* len of supported rates */
memcpy(ssid_el_p + 4 + priv->SSID_size, atmel_basic_rates, 4);
atmel_transmit_management_frame(priv, &header, (void *)&body, bodysize);
}
static int is_frame_from_current_bss(struct atmel_private *priv,
struct ieee80211_hdr *header)
{
if (le16_to_cpu(header->frame_control) & IEEE80211_FCTL_FROMDS)
return memcmp(header->addr3, priv->CurrentBSSID, 6) == 0;
else
return memcmp(header->addr2, priv->CurrentBSSID, 6) == 0;
}
static int retrieve_bss(struct atmel_private *priv)
{
int i;
int max_rssi = -128;
int max_index = -1;
if (priv->BSS_list_entries == 0)
return -1;
if (priv->connect_to_any_BSS) {
/* Select a BSS with the max-RSSI but of the same type and of
the same WEP mode and that it is not marked as 'bad' (i.e.
we had previously failed to connect to this BSS with the
settings that we currently use) */
priv->current_BSS = 0;
for (i = 0; i < priv->BSS_list_entries; i++) {
if (priv->operating_mode == priv->BSSinfo[i].BSStype &&
((!priv->wep_is_on && !priv->BSSinfo[i].UsingWEP) ||
(priv->wep_is_on && priv->BSSinfo[i].UsingWEP)) &&
!(priv->BSSinfo[i].channel & 0x80)) {
max_rssi = priv->BSSinfo[i].RSSI;
priv->current_BSS = max_index = i;
}
}
return max_index;
}
for (i = 0; i < priv->BSS_list_entries; i++) {
if (priv->SSID_size == priv->BSSinfo[i].SSIDsize &&
memcmp(priv->SSID, priv->BSSinfo[i].SSID, priv->SSID_size) == 0 &&
priv->operating_mode == priv->BSSinfo[i].BSStype &&
atmel_validate_channel(priv, priv->BSSinfo[i].channel) == 0) {
if (priv->BSSinfo[i].RSSI >= max_rssi) {
max_rssi = priv->BSSinfo[i].RSSI;
max_index = i;
}
}
}
return max_index;
}
static void store_bss_info(struct atmel_private *priv,
struct ieee80211_hdr *header, u16 capability,
u16 beacon_period, u8 channel, u8 rssi, u8 ssid_len,
u8 *ssid, int is_beacon)
{
u8 *bss = capability & WLAN_CAPABILITY_ESS ? header->addr2 : header->addr3;
int i, index;
for (index = -1, i = 0; i < priv->BSS_list_entries; i++)
if (memcmp(bss, priv->BSSinfo[i].BSSID, ETH_ALEN) == 0)
index = i;
/* If we process a probe and an entry from this BSS exists
we will update the BSS entry with the info from this BSS.
If we process a beacon we will only update RSSI */
if (index == -1) {
if (priv->BSS_list_entries == MAX_BSS_ENTRIES)
return;
index = priv->BSS_list_entries++;
memcpy(priv->BSSinfo[index].BSSID, bss, ETH_ALEN);
priv->BSSinfo[index].RSSI = rssi;
} else {
if (rssi > priv->BSSinfo[index].RSSI)
priv->BSSinfo[index].RSSI = rssi;
if (is_beacon)
return;
}
priv->BSSinfo[index].channel = channel;
priv->BSSinfo[index].beacon_period = beacon_period;
priv->BSSinfo[index].UsingWEP = capability & WLAN_CAPABILITY_PRIVACY;
memcpy(priv->BSSinfo[index].SSID, ssid, ssid_len);
priv->BSSinfo[index].SSIDsize = ssid_len;
if (capability & WLAN_CAPABILITY_IBSS)
priv->BSSinfo[index].BSStype = IW_MODE_ADHOC;
else if (capability & WLAN_CAPABILITY_ESS)
priv->BSSinfo[index].BSStype = IW_MODE_INFRA;
priv->BSSinfo[index].preamble = capability & WLAN_CAPABILITY_SHORT_PREAMBLE ?
SHORT_PREAMBLE : LONG_PREAMBLE;
}
static void authenticate(struct atmel_private *priv, u16 frame_len)
{
struct auth_body *auth = (struct auth_body *)priv->rx_buf;
u16 status = le16_to_cpu(auth->status);
u16 trans_seq_no = le16_to_cpu(auth->trans_seq);
u16 system = le16_to_cpu(auth->alg);
if (status == WLAN_STATUS_SUCCESS && !priv->wep_is_on) {
/* no WEP */
if (priv->station_was_associated) {
atmel_enter_state(priv, STATION_STATE_REASSOCIATING);
send_association_request(priv, 1);
return;
} else {
atmel_enter_state(priv, STATION_STATE_ASSOCIATING);
send_association_request(priv, 0);
return;
}
}
if (status == WLAN_STATUS_SUCCESS && priv->wep_is_on) {
int should_associate = 0;
/* WEP */
if (trans_seq_no != priv->ExpectedAuthentTransactionSeqNum)
return;
if (system == WLAN_AUTH_OPEN) {
if (trans_seq_no == 0x0002) {
should_associate = 1;
}
} else if (system == WLAN_AUTH_SHARED_KEY) {
if (trans_seq_no == 0x0002 &&
auth->el_id == WLAN_EID_CHALLENGE) {
send_authentication_request(priv, system, auth->chall_text, auth->chall_text_len);
return;
} else if (trans_seq_no == 0x0004) {
should_associate = 1;
}
}
if (should_associate) {
if (priv->station_was_associated) {
atmel_enter_state(priv, STATION_STATE_REASSOCIATING);
send_association_request(priv, 1);
return;
} else {
atmel_enter_state(priv, STATION_STATE_ASSOCIATING);
send_association_request(priv, 0);
return;
}
}
}
if (status == WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG) {
/* Flip back and forth between WEP auth modes until the max
* authentication tries has been exceeded.
*/
if (system == WLAN_AUTH_OPEN) {
priv->CurrentAuthentTransactionSeqNum = 0x001;
priv->exclude_unencrypted = 1;
send_authentication_request(priv, WLAN_AUTH_SHARED_KEY, NULL, 0);
return;
} else if (system == WLAN_AUTH_SHARED_KEY
&& priv->wep_is_on) {
priv->CurrentAuthentTransactionSeqNum = 0x001;
priv->exclude_unencrypted = 0;
send_authentication_request(priv, WLAN_AUTH_OPEN, NULL, 0);
return;
} else if (priv->connect_to_any_BSS) {
int bss_index;
priv->BSSinfo[(int)(priv->current_BSS)].channel |= 0x80;
if ((bss_index = retrieve_bss(priv)) != -1) {
atmel_join_bss(priv, bss_index);
return;
}
}
}
priv->AuthenticationRequestRetryCnt = 0;
atmel_enter_state(priv, STATION_STATE_MGMT_ERROR);
priv->station_is_associated = 0;
}
static void associate(struct atmel_private *priv, u16 frame_len, u16 subtype)
{
struct ass_resp_format {
__le16 capability;
__le16 status;
__le16 ass_id;
u8 el_id;
u8 length;
u8 rates[4];
} *ass_resp = (struct ass_resp_format *)priv->rx_buf;
u16 status = le16_to_cpu(ass_resp->status);
u16 ass_id = le16_to_cpu(ass_resp->ass_id);
u16 rates_len = ass_resp->length > 4 ? 4 : ass_resp->length;
union iwreq_data wrqu;
if (frame_len < 8 + rates_len)
return;
if (status == WLAN_STATUS_SUCCESS) {
if (subtype == IEEE80211_STYPE_ASSOC_RESP)
priv->AssociationRequestRetryCnt = 0;
else
priv->ReAssociationRequestRetryCnt = 0;
atmel_set_mib16(priv, Mac_Mgmt_Mib_Type,
MAC_MGMT_MIB_STATION_ID_POS, ass_id & 0x3fff);
atmel_set_mib(priv, Phy_Mib_Type,
PHY_MIB_RATE_SET_POS, ass_resp->rates, rates_len);
if (priv->power_mode == 0) {
priv->listen_interval = 1;
atmel_set_mib8(priv, Mac_Mgmt_Mib_Type,
MAC_MGMT_MIB_PS_MODE_POS, ACTIVE_MODE);
atmel_set_mib16(priv, Mac_Mgmt_Mib_Type,
MAC_MGMT_MIB_LISTEN_INTERVAL_POS, 1);
} else {
priv->listen_interval = 2;
atmel_set_mib8(priv, Mac_Mgmt_Mib_Type,
MAC_MGMT_MIB_PS_MODE_POS, PS_MODE);
atmel_set_mib16(priv, Mac_Mgmt_Mib_Type,
MAC_MGMT_MIB_LISTEN_INTERVAL_POS, 2);
}
priv->station_is_associated = 1;
priv->station_was_associated = 1;
atmel_enter_state(priv, STATION_STATE_READY);
/* Send association event to userspace */
wrqu.data.length = 0;
wrqu.data.flags = 0;
memcpy(wrqu.ap_addr.sa_data, priv->CurrentBSSID, ETH_ALEN);
wrqu.ap_addr.sa_family = ARPHRD_ETHER;
wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL);
return;
}
if (subtype == IEEE80211_STYPE_ASSOC_RESP &&
status != WLAN_STATUS_ASSOC_DENIED_RATES &&
status != WLAN_STATUS_CAPS_UNSUPPORTED &&
priv->AssociationRequestRetryCnt < MAX_ASSOCIATION_RETRIES) {
mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES);
priv->AssociationRequestRetryCnt++;
send_association_request(priv, 0);
return;
}
if (subtype == IEEE80211_STYPE_REASSOC_RESP &&
status != WLAN_STATUS_ASSOC_DENIED_RATES &&
status != WLAN_STATUS_CAPS_UNSUPPORTED &&
priv->ReAssociationRequestRetryCnt < MAX_ASSOCIATION_RETRIES) {
mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES);
priv->ReAssociationRequestRetryCnt++;
send_association_request(priv, 1);
return;
}
atmel_enter_state(priv, STATION_STATE_MGMT_ERROR);
priv->station_is_associated = 0;
if (priv->connect_to_any_BSS) {
int bss_index;
priv->BSSinfo[(int)(priv->current_BSS)].channel |= 0x80;
if ((bss_index = retrieve_bss(priv)) != -1)
atmel_join_bss(priv, bss_index);
}
}
static void atmel_join_bss(struct atmel_private *priv, int bss_index)
{
struct bss_info *bss = &priv->BSSinfo[bss_index];
memcpy(priv->CurrentBSSID, bss->BSSID, ETH_ALEN);
memcpy(priv->SSID, bss->SSID, priv->SSID_size = bss->SSIDsize);
/* The WPA stuff cares about the current AP address */
if (priv->use_wpa)
build_wpa_mib(priv);
/* When switching to AdHoc turn OFF Power Save if needed */
if (bss->BSStype == IW_MODE_ADHOC &&
priv->operating_mode != IW_MODE_ADHOC &&
priv->power_mode) {
priv->power_mode = 0;
priv->listen_interval = 1;
atmel_set_mib8(priv, Mac_Mgmt_Mib_Type,
MAC_MGMT_MIB_PS_MODE_POS, ACTIVE_MODE);
atmel_set_mib16(priv, Mac_Mgmt_Mib_Type,
MAC_MGMT_MIB_LISTEN_INTERVAL_POS, 1);
}
priv->operating_mode = bss->BSStype;
priv->channel = bss->channel & 0x7f;
priv->beacon_period = bss->beacon_period;
if (priv->preamble != bss->preamble) {
priv->preamble = bss->preamble;
atmel_set_mib8(priv, Local_Mib_Type,
LOCAL_MIB_PREAMBLE_TYPE, bss->preamble);
}
if (!priv->wep_is_on && bss->UsingWEP) {
atmel_enter_state(priv, STATION_STATE_MGMT_ERROR);
priv->station_is_associated = 0;
return;
}
if (priv->wep_is_on && !bss->UsingWEP) {
atmel_enter_state(priv, STATION_STATE_MGMT_ERROR);
priv->station_is_associated = 0;
return;
}
atmel_enter_state(priv, STATION_STATE_JOINNING);
if (priv->operating_mode == IW_MODE_INFRA)
join(priv, BSS_TYPE_INFRASTRUCTURE);
else
join(priv, BSS_TYPE_AD_HOC);
}
static void restart_search(struct atmel_private *priv)
{
int bss_index;
if (!priv->connect_to_any_BSS) {
atmel_scan(priv, 1);
} else {
priv->BSSinfo[(int)(priv->current_BSS)].channel |= 0x80;
if ((bss_index = retrieve_bss(priv)) != -1)
atmel_join_bss(priv, bss_index);
else
atmel_scan(priv, 0);
}
}
static void smooth_rssi(struct atmel_private *priv, u8 rssi)
{
u8 old = priv->wstats.qual.level;
u8 max_rssi = 42; /* 502-rmfd-revd max by experiment, default for now */
switch (priv->firmware_type) {
case ATMEL_FW_TYPE_502E:
max_rssi = 63; /* 502-rmfd-reve max by experiment */
break;
default:
break;
}
rssi = rssi * 100 / max_rssi;
if ((rssi + old) % 2)
priv->wstats.qual.level = (rssi + old) / 2 + 1;
else
priv->wstats.qual.level = (rssi + old) / 2;
priv->wstats.qual.updated |= IW_QUAL_LEVEL_UPDATED;
priv->wstats.qual.updated &= ~IW_QUAL_LEVEL_INVALID;
}
static void atmel_smooth_qual(struct atmel_private *priv)
{
unsigned long time_diff = (jiffies - priv->last_qual) / HZ;
while (time_diff--) {
priv->last_qual += HZ;
priv->wstats.qual.qual = priv->wstats.qual.qual / 2;
priv->wstats.qual.qual +=
priv->beacons_this_sec * priv->beacon_period * (priv->wstats.qual.level + 100) / 4000;
priv->beacons_this_sec = 0;
}
priv->wstats.qual.updated |= IW_QUAL_QUAL_UPDATED;
priv->wstats.qual.updated &= ~IW_QUAL_QUAL_INVALID;
}
/* deals with incoming management frames. */
static void atmel_management_frame(struct atmel_private *priv,
struct ieee80211_hdr *header,
u16 frame_len, u8 rssi)
{
u16 subtype;
subtype = le16_to_cpu(header->frame_control) & IEEE80211_FCTL_STYPE;
switch (subtype) {
case IEEE80211_STYPE_BEACON:
case IEEE80211_STYPE_PROBE_RESP:
/* beacon frame has multiple variable-length fields -
never let an engineer loose with a data structure design. */
{
struct beacon_format {
__le64 timestamp;
__le16 interval;
__le16 capability;
u8 ssid_el_id;
u8 ssid_length;
/* ssid here */
u8 rates_el_id;
u8 rates_length;
/* rates here */
u8 ds_el_id;
u8 ds_length;
/* ds here */
} *beacon = (struct beacon_format *)priv->rx_buf;
u8 channel, rates_length, ssid_length;
u64 timestamp = le64_to_cpu(beacon->timestamp);
u16 beacon_interval = le16_to_cpu(beacon->interval);
u16 capability = le16_to_cpu(beacon->capability);
u8 *beaconp = priv->rx_buf;
ssid_length = beacon->ssid_length;
/* this blows chunks. */
if (frame_len < 14 || frame_len < ssid_length + 15)
return;
rates_length = beaconp[beacon->ssid_length + 15];
if (frame_len < ssid_length + rates_length + 18)
return;
if (ssid_length > MAX_SSID_LENGTH)
return;
channel = beaconp[ssid_length + rates_length + 18];
if (priv->station_state == STATION_STATE_READY) {
smooth_rssi(priv, rssi);
if (is_frame_from_current_bss(priv, header)) {
priv->beacons_this_sec++;
atmel_smooth_qual(priv);
if (priv->last_beacon_timestamp) {
/* Note truncate this to 32 bits - kernel can't divide a long */
u32 beacon_delay = timestamp - priv->last_beacon_timestamp;
int beacons = beacon_delay / (beacon_interval * 1000);
if (beacons > 1)
priv->wstats.miss.beacon += beacons - 1;
}
priv->last_beacon_timestamp = timestamp;
handle_beacon_probe(priv, capability, channel);
}
}
if (priv->station_state == STATION_STATE_SCANNING)
store_bss_info(priv, header, capability,
beacon_interval, channel, rssi,
ssid_length,
&beacon->rates_el_id,
subtype == IEEE80211_STYPE_BEACON);
}
break;
case IEEE80211_STYPE_AUTH:
if (priv->station_state == STATION_STATE_AUTHENTICATING)
authenticate(priv, frame_len);
break;
case IEEE80211_STYPE_ASSOC_RESP:
case IEEE80211_STYPE_REASSOC_RESP:
if (priv->station_state == STATION_STATE_ASSOCIATING ||
priv->station_state == STATION_STATE_REASSOCIATING)
associate(priv, frame_len, subtype);
break;
case IEEE80211_STYPE_DISASSOC:
if (priv->station_is_associated &&
priv->operating_mode == IW_MODE_INFRA &&
is_frame_from_current_bss(priv, header)) {
priv->station_was_associated = 0;
priv->station_is_associated = 0;
atmel_enter_state(priv, STATION_STATE_JOINNING);
join(priv, BSS_TYPE_INFRASTRUCTURE);
}
break;
case IEEE80211_STYPE_DEAUTH:
if (priv->operating_mode == IW_MODE_INFRA &&
is_frame_from_current_bss(priv, header)) {
priv->station_was_associated = 0;
atmel_enter_state(priv, STATION_STATE_JOINNING);
join(priv, BSS_TYPE_INFRASTRUCTURE);
}
break;
}
}
/* run when timer expires */
static void atmel_management_timer(struct timer_list *t)
{
struct atmel_private *priv = from_timer(priv, t, management_timer);
unsigned long flags;
/* Check if the card has been yanked. */
if (priv->card && priv->present_callback &&
!(*priv->present_callback)(priv->card))
return;
spin_lock_irqsave(&priv->irqlock, flags);
switch (priv->station_state) {
case STATION_STATE_AUTHENTICATING:
if (priv->AuthenticationRequestRetryCnt >= MAX_AUTHENTICATION_RETRIES) {
atmel_enter_state(priv, STATION_STATE_MGMT_ERROR);
priv->station_is_associated = 0;
priv->AuthenticationRequestRetryCnt = 0;
restart_search(priv);
} else {
int auth = WLAN_AUTH_OPEN;
priv->AuthenticationRequestRetryCnt++;
priv->CurrentAuthentTransactionSeqNum = 0x0001;
mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES);
if (priv->wep_is_on && priv->exclude_unencrypted)
auth = WLAN_AUTH_SHARED_KEY;
send_authentication_request(priv, auth, NULL, 0);
}
break;
case STATION_STATE_ASSOCIATING:
if (priv->AssociationRequestRetryCnt == MAX_ASSOCIATION_RETRIES) {
atmel_enter_state(priv, STATION_STATE_MGMT_ERROR);
priv->station_is_associated = 0;
priv->AssociationRequestRetryCnt = 0;
restart_search(priv);
} else {
priv->AssociationRequestRetryCnt++;
mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES);
send_association_request(priv, 0);
}
break;
case STATION_STATE_REASSOCIATING:
if (priv->ReAssociationRequestRetryCnt == MAX_ASSOCIATION_RETRIES) {
atmel_enter_state(priv, STATION_STATE_MGMT_ERROR);
priv->station_is_associated = 0;
priv->ReAssociationRequestRetryCnt = 0;
restart_search(priv);
} else {
priv->ReAssociationRequestRetryCnt++;
mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES);
send_association_request(priv, 1);
}
break;
default:
break;
}
spin_unlock_irqrestore(&priv->irqlock, flags);
}
static void atmel_command_irq(struct atmel_private *priv)
{
u8 status = atmel_rmem8(priv, atmel_co(priv, CMD_BLOCK_STATUS_OFFSET));
u8 command = atmel_rmem8(priv, atmel_co(priv, CMD_BLOCK_COMMAND_OFFSET));
int fast_scan;
union iwreq_data wrqu;
if (status == CMD_STATUS_IDLE ||
status == CMD_STATUS_IN_PROGRESS)
return;
switch (command) {
case CMD_Start:
if (status == CMD_STATUS_COMPLETE) {
priv->station_was_associated = priv->station_is_associated;
atmel_get_mib(priv, Mac_Mgmt_Mib_Type, MAC_MGMT_MIB_CUR_BSSID_POS,
(u8 *)priv->CurrentBSSID, 6);
atmel_enter_state(priv, STATION_STATE_READY);
}
break;
case CMD_Scan:
fast_scan = priv->fast_scan;
priv->fast_scan = 0;
if (status != CMD_STATUS_COMPLETE) {
atmel_scan(priv, 1);
} else {
int bss_index = retrieve_bss(priv);
int notify_scan_complete = 1;
if (bss_index != -1) {
atmel_join_bss(priv, bss_index);
} else if (priv->operating_mode == IW_MODE_ADHOC &&
priv->SSID_size != 0) {
start(priv, BSS_TYPE_AD_HOC);
} else {
priv->fast_scan = !fast_scan;
atmel_scan(priv, 1);
notify_scan_complete = 0;
}
priv->site_survey_state = SITE_SURVEY_COMPLETED;
if (notify_scan_complete) {
wrqu.data.length = 0;
wrqu.data.flags = 0;
wireless_send_event(priv->dev, SIOCGIWSCAN, &wrqu, NULL);
}
}
break;
case CMD_SiteSurvey:
priv->fast_scan = 0;
if (status != CMD_STATUS_COMPLETE)
return;
priv->site_survey_state = SITE_SURVEY_COMPLETED;
if (priv->station_is_associated) {
atmel_enter_state(priv, STATION_STATE_READY);
wrqu.data.length = 0;
wrqu.data.flags = 0;
wireless_send_event(priv->dev, SIOCGIWSCAN, &wrqu, NULL);
} else {
atmel_scan(priv, 1);
}
break;
case CMD_Join:
if (status == CMD_STATUS_COMPLETE) {
if (priv->operating_mode == IW_MODE_ADHOC) {
priv->station_was_associated = priv->station_is_associated;
atmel_enter_state(priv, STATION_STATE_READY);
} else {
int auth = WLAN_AUTH_OPEN;
priv->AuthenticationRequestRetryCnt = 0;
atmel_enter_state(priv, STATION_STATE_AUTHENTICATING);
mod_timer(&priv->management_timer, jiffies + MGMT_JIFFIES);
priv->CurrentAuthentTransactionSeqNum = 0x0001;
if (priv->wep_is_on && priv->exclude_unencrypted)
auth = WLAN_AUTH_SHARED_KEY;
send_authentication_request(priv, auth, NULL, 0);
}
return;
}
atmel_scan(priv, 1);
}
}
static int atmel_wakeup_firmware(struct atmel_private *priv)
{
struct host_info_struct *iface = &priv->host_info;
u16 mr1, mr3;
int i;
if (priv->card_type == CARD_TYPE_SPI_FLASH)
atmel_set_gcr(priv->dev, GCR_REMAP);
/* wake up on-board processor */
atmel_clear_gcr(priv->dev, 0x0040);
atmel_write16(priv->dev, BSR, BSS_SRAM);
if (priv->card_type == CARD_TYPE_SPI_FLASH)
mdelay(100);
/* and wait for it */
for (i = LOOP_RETRY_LIMIT; i; i--) {
mr1 = atmel_read16(priv->dev, MR1);
mr3 = atmel_read16(priv->dev, MR3);
if (mr3 & MAC_BOOT_COMPLETE)
break;
if (mr1 & MAC_BOOT_COMPLETE &&
priv->bus_type == BUS_TYPE_PCCARD)
break;
}
if (i == 0) {
printk(KERN_ALERT "%s: MAC failed to boot.\n", priv->dev->name);
return -EIO;
}
if ((priv->host_info_base = atmel_read16(priv->dev, MR2)) == 0xffff) {
printk(KERN_ALERT "%s: card missing.\n", priv->dev->name);
return -ENODEV;
}
/* now check for completion of MAC initialization through
the FunCtrl field of the IFACE, poll MR1 to detect completion of
MAC initialization, check completion status, set interrupt mask,
enables interrupts and calls Tx and Rx initialization functions */
atmel_wmem8(priv, atmel_hi(priv, IFACE_FUNC_CTRL_OFFSET), FUNC_CTRL_INIT_COMPLETE);
for (i = LOOP_RETRY_LIMIT; i; i--) {
mr1 = atmel_read16(priv->dev, MR1);
mr3 = atmel_read16(priv->dev, MR3);
if (mr3 & MAC_INIT_COMPLETE)
break;
if (mr1 & MAC_INIT_COMPLETE &&
priv->bus_type == BUS_TYPE_PCCARD)
break;
}
if (i == 0) {
printk(KERN_ALERT "%s: MAC failed to initialise.\n",
priv->dev->name);
return -EIO;
}
/* Check for MAC_INIT_OK only on the register that the MAC_INIT_OK was set */
if ((mr3 & MAC_INIT_COMPLETE) &&
!(atmel_read16(priv->dev, MR3) & MAC_INIT_OK)) {
printk(KERN_ALERT "%s: MAC failed MR3 self-test.\n", priv->dev->name);
return -EIO;
}
if ((mr1 & MAC_INIT_COMPLETE) &&
!(atmel_read16(priv->dev, MR1) & MAC_INIT_OK)) {
printk(KERN_ALERT "%s: MAC failed MR1 self-test.\n", priv->dev->name);
return -EIO;
}
atmel_copy_to_host(priv->dev, (unsigned char *)iface,
priv->host_info_base, sizeof(*iface));
iface->tx_buff_pos = le16_to_cpu(iface->tx_buff_pos);
iface->tx_buff_size = le16_to_cpu(iface->tx_buff_size);
iface->tx_desc_pos = le16_to_cpu(iface->tx_desc_pos);
iface->tx_desc_count = le16_to_cpu(iface->tx_desc_count);
iface->rx_buff_pos = le16_to_cpu(iface->rx_buff_pos);
iface->rx_buff_size = le16_to_cpu(iface->rx_buff_size);
iface->rx_desc_pos = le16_to_cpu(iface->rx_desc_pos);
iface->rx_desc_count = le16_to_cpu(iface->rx_desc_count);
iface->build_version = le16_to_cpu(iface->build_version);
iface->command_pos = le16_to_cpu(iface->command_pos);
iface->major_version = le16_to_cpu(iface->major_version);
iface->minor_version = le16_to_cpu(iface->minor_version);
iface->func_ctrl = le16_to_cpu(iface->func_ctrl);
iface->mac_status = le16_to_cpu(iface->mac_status);
return 0;
}
/* determine type of memory and MAC address */
static int probe_atmel_card(struct net_device *dev)
{
int rc = 0;
struct atmel_private *priv = netdev_priv(dev);
u8 addr[ETH_ALEN] = {};
/* reset pccard */
if (priv->bus_type == BUS_TYPE_PCCARD)
atmel_write16(dev, GCR, 0x0060);
atmel_write16(dev, GCR, 0x0040);
msleep(500);
if (atmel_read16(dev, MR2) == 0) {
/* No stored firmware so load a small stub which just
tells us the MAC address */
int i;
priv->card_type = CARD_TYPE_EEPROM;
atmel_write16(dev, BSR, BSS_IRAM);
atmel_copy_to_card(dev, 0, mac_reader, sizeof(mac_reader));
atmel_set_gcr(dev, GCR_REMAP);
atmel_clear_gcr(priv->dev, 0x0040);
atmel_write16(dev, BSR, BSS_SRAM);
for (i = LOOP_RETRY_LIMIT; i; i--)
if (atmel_read16(dev, MR3) & MAC_BOOT_COMPLETE)
break;
if (i == 0) {
printk(KERN_ALERT "%s: MAC failed to boot MAC address reader.\n", dev->name);
} else {
atmel_copy_to_host(dev, addr, atmel_read16(dev, MR2), 6);
eth_hw_addr_set(dev, addr);
/* got address, now squash it again until the network
interface is opened */
if (priv->bus_type == BUS_TYPE_PCCARD)
atmel_write16(dev, GCR, 0x0060);
atmel_write16(dev, GCR, 0x0040);
rc = 1;
}
} else if (atmel_read16(dev, MR4) == 0) {
/* Mac address easy in this case. */
priv->card_type = CARD_TYPE_PARALLEL_FLASH;
atmel_write16(dev, BSR, 1);
atmel_copy_to_host(dev, addr, 0xc000, 6);
eth_hw_addr_set(dev, addr);
atmel_write16(dev, BSR, 0x200);
rc = 1;
} else {
/* Standard firmware in flash, boot it up and ask
for the Mac Address */
priv->card_type = CARD_TYPE_SPI_FLASH;
if (atmel_wakeup_firmware(priv) == 0) {
atmel_get_mib(priv, Mac_Address_Mib_Type, 0, addr, 6);
eth_hw_addr_set(dev, addr);
/* got address, now squash it again until the network
interface is opened */
if (priv->bus_type == BUS_TYPE_PCCARD)
atmel_write16(dev, GCR, 0x0060);
atmel_write16(dev, GCR, 0x0040);
rc = 1;
}
}
if (rc) {
if (dev->dev_addr[0] == 0xFF) {
static const u8 default_mac[] = {
0x00, 0x04, 0x25, 0x00, 0x00, 0x00
};
printk(KERN_ALERT "%s: *** Invalid MAC address. UPGRADE Firmware ****\n", dev->name);
eth_hw_addr_set(dev, default_mac);
}
}
return rc;
}
/* Move the encyption information on the MIB structure.
This routine is for the pre-WPA firmware: later firmware has
a different format MIB and a different routine. */
static void build_wep_mib(struct atmel_private *priv)
{
struct { /* NB this is matched to the hardware, don't change. */
u8 wep_is_on;
u8 default_key; /* 0..3 */
u8 reserved;
u8 exclude_unencrypted;
u32 WEPICV_error_count;
u32 WEP_excluded_count;
u8 wep_keys[MAX_ENCRYPTION_KEYS][13];
u8 encryption_level; /* 0, 1, 2 */
u8 reserved2[3];
} mib;
int i;
mib.wep_is_on = priv->wep_is_on;
if (priv->wep_is_on) {
if (priv->wep_key_len[priv->default_key] > 5)
mib.encryption_level = 2;
else
mib.encryption_level = 1;
} else {
mib.encryption_level = 0;
}
mib.default_key = priv->default_key;
mib.exclude_unencrypted = priv->exclude_unencrypted;
for (i = 0; i < MAX_ENCRYPTION_KEYS; i++)
memcpy(mib.wep_keys[i], priv->wep_keys[i], 13);
atmel_set_mib(priv, Mac_Wep_Mib_Type, 0, (u8 *)&mib, sizeof(mib));
}
static void build_wpa_mib(struct atmel_private *priv)
{
/* This is for the later (WPA enabled) firmware. */
struct { /* NB this is matched to the hardware, don't change. */
u8 cipher_default_key_value[MAX_ENCRYPTION_KEYS][MAX_ENCRYPTION_KEY_SIZE];
u8 receiver_address[ETH_ALEN];
u8 wep_is_on;
u8 default_key; /* 0..3 */
u8 group_key;
u8 exclude_unencrypted;
u8 encryption_type;
u8 reserved;
u32 WEPICV_error_count;
u32 WEP_excluded_count;
u8 key_RSC[4][8];
} mib;
int i;
mib.wep_is_on = priv->wep_is_on;
mib.exclude_unencrypted = priv->exclude_unencrypted;
memcpy(mib.receiver_address, priv->CurrentBSSID, ETH_ALEN);
/* zero all the keys before adding in valid ones. */
memset(mib.cipher_default_key_value, 0, sizeof(mib.cipher_default_key_value));
if (priv->wep_is_on) {
/* There's a comment in the Atmel code to the effect that this
is only valid when still using WEP, it may need to be set to
something to use WPA */
memset(mib.key_RSC, 0, sizeof(mib.key_RSC));
mib.default_key = mib.group_key = 255;
for (i = 0; i < MAX_ENCRYPTION_KEYS; i++) {
if (priv->wep_key_len[i] > 0) {
memcpy(mib.cipher_default_key_value[i], priv->wep_keys[i], MAX_ENCRYPTION_KEY_SIZE);
if (i == priv->default_key) {
mib.default_key = i;
mib.cipher_default_key_value[i][MAX_ENCRYPTION_KEY_SIZE-1] = 7;
mib.cipher_default_key_value[i][MAX_ENCRYPTION_KEY_SIZE-2] = priv->pairwise_cipher_suite;
} else {
mib.group_key = i;
priv->group_cipher_suite = priv->pairwise_cipher_suite;
mib.cipher_default_key_value[i][MAX_ENCRYPTION_KEY_SIZE-1] = 1;
mib.cipher_default_key_value[i][MAX_ENCRYPTION_KEY_SIZE-2] = priv->group_cipher_suite;
}
}
}
if (mib.default_key == 255)
mib.default_key = mib.group_key != 255 ? mib.group_key : 0;
if (mib.group_key == 255)
mib.group_key = mib.default_key;
}
atmel_set_mib(priv, Mac_Wep_Mib_Type, 0, (u8 *)&mib, sizeof(mib));
}
static int reset_atmel_card(struct net_device *dev)
{
/* do everything necessary to wake up the hardware, including
waiting for the lightning strike and throwing the knife switch....
set all the Mib values which matter in the card to match
their settings in the atmel_private structure. Some of these
can be altered on the fly, but many (WEP, infrastructure or ad-hoc)
can only be changed by tearing down the world and coming back through
here.
This routine is also responsible for initialising some
hardware-specific fields in the atmel_private structure,
including a copy of the firmware's hostinfo structure
which is the route into the rest of the firmware datastructures. */
struct atmel_private *priv = netdev_priv(dev);
u8 configuration;
int old_state = priv->station_state;
int err = 0;
/* data to add to the firmware names, in priority order
this implemenents firmware versioning */
static char *firmware_modifier[] = {
"-wpa",
"",
NULL
};
/* reset pccard */
if (priv->bus_type == BUS_TYPE_PCCARD)
atmel_write16(priv->dev, GCR, 0x0060);
/* stop card , disable interrupts */
atmel_write16(priv->dev, GCR, 0x0040);
if (priv->card_type == CARD_TYPE_EEPROM) {
/* copy in firmware if needed */
const struct firmware *fw_entry = NULL;
const unsigned char *fw;
int len = priv->firmware_length;
if (!(fw = priv->firmware)) {
if (priv->firmware_type == ATMEL_FW_TYPE_NONE) {
if (strlen(priv->firmware_id) == 0) {
printk(KERN_INFO
"%s: card type is unknown: assuming at76c502 firmware is OK.\n",
dev->name);
printk(KERN_INFO
"%s: if not, use the firmware= module parameter.\n",
dev->name);
strcpy(priv->firmware_id, "atmel_at76c502.bin");
}
err = request_firmware(&fw_entry, priv->firmware_id, priv->sys_dev);
if (err != 0) {
printk(KERN_ALERT
"%s: firmware %s is missing, cannot continue.\n",
dev->name, priv->firmware_id);
return err;
}
} else {
int fw_index = 0;
int success = 0;
/* get firmware filename entry based on firmware type ID */
while (fw_table[fw_index].fw_type != priv->firmware_type
&& fw_table[fw_index].fw_type != ATMEL_FW_TYPE_NONE)
fw_index++;
/* construct the actual firmware file name */
if (fw_table[fw_index].fw_type != ATMEL_FW_TYPE_NONE) {
int i;
for (i = 0; firmware_modifier[i]; i++) {
snprintf(priv->firmware_id, 32, "%s%s.%s", fw_table[fw_index].fw_file,
firmware_modifier[i], fw_table[fw_index].fw_file_ext);
priv->firmware_id[31] = '\0';
if (request_firmware(&fw_entry, priv->firmware_id, priv->sys_dev) == 0) {
success = 1;
break;
}
}
}
if (!success) {
printk(KERN_ALERT
"%s: firmware %s is missing, cannot start.\n",
dev->name, priv->firmware_id);
priv->firmware_id[0] = '\0';
return -ENOENT;
}
}
fw = fw_entry->data;
len = fw_entry->size;
}
if (len <= 0x6000) {
atmel_write16(priv->dev, BSR, BSS_IRAM);
atmel_copy_to_card(priv->dev, 0, fw, len);
atmel_set_gcr(priv->dev, GCR_REMAP);
} else {
/* Remap */
atmel_set_gcr(priv->dev, GCR_REMAP);
atmel_write16(priv->dev, BSR, BSS_IRAM);
atmel_copy_to_card(priv->dev, 0, fw, 0x6000);
atmel_write16(priv->dev, BSR, 0x2ff);
atmel_copy_to_card(priv->dev, 0x8000, &fw[0x6000], len - 0x6000);
}
release_firmware(fw_entry);
}
err = atmel_wakeup_firmware(priv);
if (err != 0)
return err;
/* Check the version and set the correct flag for wpa stuff,
old and new firmware is incompatible.
The pre-wpa 3com firmware reports major version 5,
the wpa 3com firmware is major version 4 and doesn't need
the 3com broken-ness filter. */
priv->use_wpa = (priv->host_info.major_version == 4);
priv->radio_on_broken = (priv->host_info.major_version == 5);
/* unmask all irq sources */
atmel_wmem8(priv, atmel_hi(priv, IFACE_INT_MASK_OFFSET), 0xff);
/* int Tx system and enable Tx */
atmel_wmem8(priv, atmel_tx(priv, TX_DESC_FLAGS_OFFSET, 0), 0);
atmel_wmem32(priv, atmel_tx(priv, TX_DESC_NEXT_OFFSET, 0), 0x80000000L);
atmel_wmem16(priv, atmel_tx(priv, TX_DESC_POS_OFFSET, 0), 0);
atmel_wmem16(priv, atmel_tx(priv, TX_DESC_SIZE_OFFSET, 0), 0);
priv->tx_desc_free = priv->host_info.tx_desc_count;
priv->tx_desc_head = 0;
priv->tx_desc_tail = 0;
priv->tx_desc_previous = 0;
priv->tx_free_mem = priv->host_info.tx_buff_size;
priv->tx_buff_head = 0;
priv->tx_buff_tail = 0;
configuration = atmel_rmem8(priv, atmel_hi(priv, IFACE_FUNC_CTRL_OFFSET));
atmel_wmem8(priv, atmel_hi(priv, IFACE_FUNC_CTRL_OFFSET),
configuration | FUNC_CTRL_TxENABLE);
/* init Rx system and enable */
priv->rx_desc_head = 0;
configuration = atmel_rmem8(priv, atmel_hi(priv, IFACE_FUNC_CTRL_OFFSET));
atmel_wmem8(priv, atmel_hi(priv, IFACE_FUNC_CTRL_OFFSET),
configuration | FUNC_CTRL_RxENABLE);
if (!priv->radio_on_broken) {
if (atmel_send_command_wait(priv, CMD_EnableRadio, NULL, 0) ==
CMD_STATUS_REJECTED_RADIO_OFF) {
printk(KERN_INFO "%s: cannot turn the radio on.\n",
dev->name);
return -EIO;
}
}
/* set up enough MIB values to run. */
atmel_set_mib8(priv, Local_Mib_Type, LOCAL_MIB_AUTO_TX_RATE_POS, priv->auto_tx_rate);
atmel_set_mib8(priv, Local_Mib_Type, LOCAL_MIB_TX_PROMISCUOUS_POS, PROM_MODE_OFF);
atmel_set_mib16(priv, Mac_Mib_Type, MAC_MIB_RTS_THRESHOLD_POS, priv->rts_threshold);
atmel_set_mib16(priv, Mac_Mib_Type, MAC_MIB_FRAG_THRESHOLD_POS, priv->frag_threshold);
atmel_set_mib8(priv, Mac_Mib_Type, MAC_MIB_SHORT_RETRY_POS, priv->short_retry);
atmel_set_mib8(priv, Mac_Mib_Type, MAC_MIB_LONG_RETRY_POS, priv->long_retry);
atmel_set_mib8(priv, Local_Mib_Type, LOCAL_MIB_PREAMBLE_TYPE, priv->preamble);
atmel_set_mib(priv, Mac_Address_Mib_Type, MAC_ADDR_MIB_MAC_ADDR_POS,
priv->dev->dev_addr, 6);
atmel_set_mib8(priv, Mac_Mgmt_Mib_Type, MAC_MGMT_MIB_PS_MODE_POS, ACTIVE_MODE);
atmel_set_mib16(priv, Mac_Mgmt_Mib_Type, MAC_MGMT_MIB_LISTEN_INTERVAL_POS, 1);
atmel_set_mib16(priv, Mac_Mgmt_Mib_Type, MAC_MGMT_MIB_BEACON_PER_POS, priv->default_beacon_period);
atmel_set_mib(priv, Phy_Mib_Type, PHY_MIB_RATE_SET_POS, atmel_basic_rates, 4);
atmel_set_mib8(priv, Mac_Mgmt_Mib_Type, MAC_MGMT_MIB_CUR_PRIVACY_POS, priv->wep_is_on);
if (priv->use_wpa)
build_wpa_mib(priv);
else
build_wep_mib(priv);
if (old_state == STATION_STATE_READY) {
union iwreq_data wrqu;
wrqu.data.length = 0;
wrqu.data.flags = 0;
wrqu.ap_addr.sa_family = ARPHRD_ETHER;
eth_zero_addr(wrqu.ap_addr.sa_data);
wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL);
}
return 0;
}
static void atmel_send_command(struct atmel_private *priv, int command,
void *cmd, int cmd_size)
{
if (cmd)
atmel_copy_to_card(priv->dev, atmel_co(priv, CMD_BLOCK_PARAMETERS_OFFSET),
cmd, cmd_size);
atmel_wmem8(priv, atmel_co(priv, CMD_BLOCK_COMMAND_OFFSET), command);
atmel_wmem8(priv, atmel_co(priv, CMD_BLOCK_STATUS_OFFSET), 0);
}
static int atmel_send_command_wait(struct atmel_private *priv, int command,
void *cmd, int cmd_size)
{
int i, status;
atmel_send_command(priv, command, cmd, cmd_size);
for (i = 5000; i; i--) {
status = atmel_rmem8(priv, atmel_co(priv, CMD_BLOCK_STATUS_OFFSET));
if (status != CMD_STATUS_IDLE &&
status != CMD_STATUS_IN_PROGRESS)
break;
udelay(20);
}
if (i == 0) {
printk(KERN_ALERT "%s: failed to contact MAC.\n", priv->dev->name);
status = CMD_STATUS_HOST_ERROR;
} else {
if (command != CMD_EnableRadio)
status = CMD_STATUS_COMPLETE;
}
return status;
}
static u8 atmel_get_mib8(struct atmel_private *priv, u8 type, u8 index)
{
struct get_set_mib m;
m.type = type;
m.size = 1;
m.index = index;
atmel_send_command_wait(priv, CMD_Get_MIB_Vars, &m, MIB_HEADER_SIZE + 1);
return atmel_rmem8(priv, atmel_co(priv, CMD_BLOCK_PARAMETERS_OFFSET + MIB_HEADER_SIZE));
}
static void atmel_set_mib8(struct atmel_private *priv, u8 type, u8 index, u8 data)
{
struct get_set_mib m;
m.type = type;
m.size = 1;
m.index = index;
m.data[0] = data;
atmel_send_command_wait(priv, CMD_Set_MIB_Vars, &m, MIB_HEADER_SIZE + 1);
}
static void atmel_set_mib16(struct atmel_private *priv, u8 type, u8 index,
u16 data)
{
struct get_set_mib m;
m.type = type;
m.size = 2;
m.index = index;
m.data[0] = data;
m.data[1] = data >> 8;
atmel_send_command_wait(priv, CMD_Set_MIB_Vars, &m, MIB_HEADER_SIZE + 2);
}
static void atmel_set_mib(struct atmel_private *priv, u8 type, u8 index,
const u8 *data, int data_len)
{
struct get_set_mib m;
m.type = type;
m.size = data_len;
m.index = index;
if (data_len > MIB_MAX_DATA_BYTES)
printk(KERN_ALERT "%s: MIB buffer too small.\n", priv->dev->name);
memcpy(m.data, data, data_len);
atmel_send_command_wait(priv, CMD_Set_MIB_Vars, &m, MIB_HEADER_SIZE + data_len);
}
static void atmel_get_mib(struct atmel_private *priv, u8 type, u8 index,
u8 *data, int data_len)
{
struct get_set_mib m;
m.type = type;
m.size = data_len;
m.index = index;
if (data_len > MIB_MAX_DATA_BYTES)
printk(KERN_ALERT "%s: MIB buffer too small.\n", priv->dev->name);
atmel_send_command_wait(priv, CMD_Get_MIB_Vars, &m, MIB_HEADER_SIZE + data_len);
atmel_copy_to_host(priv->dev, data,
atmel_co(priv, CMD_BLOCK_PARAMETERS_OFFSET + MIB_HEADER_SIZE), data_len);
}
static void atmel_writeAR(struct net_device *dev, u16 data)
{
int i;
outw(data, dev->base_addr + AR);
/* Address register appears to need some convincing..... */
for (i = 0; data != inw(dev->base_addr + AR) && i < 10; i++)
outw(data, dev->base_addr + AR);
}
static void atmel_copy_to_card(struct net_device *dev, u16 dest,
const unsigned char *src, u16 len)
{
int i;
atmel_writeAR(dev, dest);
if (dest % 2) {
atmel_write8(dev, DR, *src);
src++; len--;
}
for (i = len; i > 1 ; i -= 2) {
u8 lb = *src++;
u8 hb = *src++;
atmel_write16(dev, DR, lb | (hb << 8));
}
if (i)
atmel_write8(dev, DR, *src);
}
static void atmel_copy_to_host(struct net_device *dev, unsigned char *dest,
u16 src, u16 len)
{
int i;
atmel_writeAR(dev, src);
if (src % 2) {
*dest = atmel_read8(dev, DR);
dest++; len--;
}
for (i = len; i > 1 ; i -= 2) {
u16 hw = atmel_read16(dev, DR);
*dest++ = hw;
*dest++ = hw >> 8;
}
if (i)
*dest = atmel_read8(dev, DR);
}
static void atmel_set_gcr(struct net_device *dev, u16 mask)
{
outw(inw(dev->base_addr + GCR) | mask, dev->base_addr + GCR);
}
static void atmel_clear_gcr(struct net_device *dev, u16 mask)
{
outw(inw(dev->base_addr + GCR) & ~mask, dev->base_addr + GCR);
}
static int atmel_lock_mac(struct atmel_private *priv)
{
int i, j = 20;
retry:
for (i = 5000; i; i--) {
if (!atmel_rmem8(priv, atmel_hi(priv, IFACE_LOCKOUT_HOST_OFFSET)))
break;
udelay(20);
}
if (!i)
return 0; /* timed out */
atmel_wmem8(priv, atmel_hi(priv, IFACE_LOCKOUT_MAC_OFFSET), 1);
if (atmel_rmem8(priv, atmel_hi(priv, IFACE_LOCKOUT_HOST_OFFSET))) {
atmel_wmem8(priv, atmel_hi(priv, IFACE_LOCKOUT_MAC_OFFSET), 0);
if (!j--)
return 0; /* timed out */
goto retry;
}
return 1;
}
static void atmel_wmem32(struct atmel_private *priv, u16 pos, u32 data)
{
atmel_writeAR(priv->dev, pos);
atmel_write16(priv->dev, DR, data); /* card is little-endian */
atmel_write16(priv->dev, DR, data >> 16);
}
/***************************************************************************/
/* There follows the source form of the MAC address reading firmware */
/***************************************************************************/
#if 0
/* Copyright 2003 Matthew T. Russotto */
/* But derived from the Atmel 76C502 firmware written by Atmel and */
/* included in "atmel wireless lan drivers" package */
/*
This file is part of net.russotto.AtmelMACFW, hereto referred to
as AtmelMACFW
AtmelMACFW is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License version 2
as published by the Free Software Foundation.
AtmelMACFW is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with AtmelMACFW; if not, see <http://www.gnu.org/licenses/>.
****************************************************************************/
/* This firmware should work on the 76C502 RFMD, RFMD_D, and RFMD_E */
/* It will probably work on the 76C504 and 76C502 RFMD_3COM */
/* It only works on SPI EEPROM versions of the card. */
/* This firmware initializes the SPI controller and clock, reads the MAC */
/* address from the EEPROM into SRAM, and puts the SRAM offset of the MAC */
/* address in MR2, and sets MR3 to 0x10 to indicate it is done */
/* It also puts a complete copy of the EEPROM in SRAM with the offset in */
/* MR4, for investigational purposes (maybe we can determine chip type */
/* from that?) */
.org 0
.set MRBASE, 0x8000000
.set CPSR_INITIAL, 0xD3 /* IRQ/FIQ disabled, ARM mode, Supervisor state */
.set CPSR_USER, 0xD1 /* IRQ/FIQ disabled, ARM mode, USER state */
.set SRAM_BASE, 0x02000000
.set SP_BASE, 0x0F300000
.set UNK_BASE, 0x0F000000 /* Some internal device, but which one? */
.set SPI_CGEN_BASE, 0x0E000000 /* Some internal device, but which one? */
.set UNK3_BASE, 0x02014000 /* Some internal device, but which one? */
.set STACK_BASE, 0x5600
.set SP_SR, 0x10
.set SP_TDRE, 2 /* status register bit -- TDR empty */
.set SP_RDRF, 1 /* status register bit -- RDR full */
.set SP_SWRST, 0x80
.set SP_SPIEN, 0x1
.set SP_CR, 0 /* control register */
.set SP_MR, 4 /* mode register */
.set SP_RDR, 0x08 /* Read Data Register */
.set SP_TDR, 0x0C /* Transmit Data Register */
.set SP_CSR0, 0x30 /* chip select registers */
.set SP_CSR1, 0x34
.set SP_CSR2, 0x38
.set SP_CSR3, 0x3C
.set NVRAM_CMD_RDSR, 5 /* read status register */
.set NVRAM_CMD_READ, 3 /* read data */
.set NVRAM_SR_RDY, 1 /* RDY bit. This bit is inverted */
.set SPI_8CLOCKS, 0xFF /* Writing this to the TDR doesn't do anything to the
serial output, since SO is normally high. But it
does cause 8 clock cycles and thus 8 bits to be
clocked in to the chip. See Atmel's SPI
controller (e.g. AT91M55800) timing and 4K
SPI EEPROM manuals */
.set NVRAM_SCRATCH, 0x02000100 /* arbitrary area for scratchpad memory */
.set NVRAM_IMAGE, 0x02000200
.set NVRAM_LENGTH, 0x0200
.set MAC_ADDRESS_MIB, SRAM_BASE
.set MAC_ADDRESS_LENGTH, 6
.set MAC_BOOT_FLAG, 0x10
.set MR1, 0
.set MR2, 4
.set MR3, 8
.set MR4, 0xC
RESET_VECTOR:
b RESET_HANDLER
UNDEF_VECTOR:
b HALT1
SWI_VECTOR:
b HALT1
IABORT_VECTOR:
b HALT1
DABORT_VECTOR:
RESERVED_VECTOR:
b HALT1
IRQ_VECTOR:
b HALT1
FIQ_VECTOR:
b HALT1
HALT1: b HALT1
RESET_HANDLER:
mov r0, #CPSR_INITIAL
msr CPSR_c, r0 /* This is probably unnecessary */
/* I'm guessing this is initializing clock generator electronics for SPI */
ldr r0, =SPI_CGEN_BASE
mov r1, #0
mov r1, r1, lsl #3
orr r1, r1, #0
str r1, [r0]
ldr r1, [r0, #28]
bic r1, r1, #16
str r1, [r0, #28]
mov r1, #1
str r1, [r0, #8]
ldr r0, =MRBASE
mov r1, #0
strh r1, [r0, #MR1]
strh r1, [r0, #MR2]
strh r1, [r0, #MR3]
strh r1, [r0, #MR4]
mov sp, #STACK_BASE
bl SP_INIT
mov r0, #10
bl DELAY9
bl GET_MAC_ADDR
bl GET_WHOLE_NVRAM
ldr r0, =MRBASE
ldr r1, =MAC_ADDRESS_MIB
strh r1, [r0, #MR2]
ldr r1, =NVRAM_IMAGE
strh r1, [r0, #MR4]
mov r1, #MAC_BOOT_FLAG
strh r1, [r0, #MR3]
HALT2: b HALT2
.func Get_Whole_NVRAM, GET_WHOLE_NVRAM
GET_WHOLE_NVRAM:
stmdb sp!, {lr}
mov r2, #0 /* 0th bytes of NVRAM */
mov r3, #NVRAM_LENGTH
mov r1, #0 /* not used in routine */
ldr r0, =NVRAM_IMAGE
bl NVRAM_XFER
ldmia sp!, {lr}
bx lr
.endfunc
.func Get_MAC_Addr, GET_MAC_ADDR
GET_MAC_ADDR:
stmdb sp!, {lr}
mov r2, #0x120 /* address of MAC Address within NVRAM */
mov r3, #MAC_ADDRESS_LENGTH
mov r1, #0 /* not used in routine */
ldr r0, =MAC_ADDRESS_MIB
bl NVRAM_XFER
ldmia sp!, {lr}
bx lr
.endfunc
.ltorg
.func Delay9, DELAY9
DELAY9:
adds r0, r0, r0, LSL #3 /* r0 = r0 * 9 */
DELAYLOOP:
beq DELAY9_done
subs r0, r0, #1
b DELAYLOOP
DELAY9_done:
bx lr
.endfunc
.func SP_Init, SP_INIT
SP_INIT:
mov r1, #SP_SWRST
ldr r0, =SP_BASE
str r1, [r0, #SP_CR] /* reset the SPI */
mov r1, #0
str r1, [r0, #SP_CR] /* release SPI from reset state */
mov r1, #SP_SPIEN
str r1, [r0, #SP_MR] /* set the SPI to MASTER mode*/
str r1, [r0, #SP_CR] /* enable the SPI */
/* My guess would be this turns on the SPI clock */
ldr r3, =SPI_CGEN_BASE
ldr r1, [r3, #28]
orr r1, r1, #0x2000
str r1, [r3, #28]
ldr r1, =0x2000c01
str r1, [r0, #SP_CSR0]
ldr r1, =0x2000201
str r1, [r0, #SP_CSR1]
str r1, [r0, #SP_CSR2]
str r1, [r0, #SP_CSR3]
ldr r1, [r0, #SP_SR]
ldr r0, [r0, #SP_RDR]
bx lr
.endfunc
.func NVRAM_Init, NVRAM_INIT
NVRAM_INIT:
ldr r1, =SP_BASE
ldr r0, [r1, #SP_RDR]
mov r0, #NVRAM_CMD_RDSR
str r0, [r1, #SP_TDR]
SP_loop1:
ldr r0, [r1, #SP_SR]
tst r0, #SP_TDRE
beq SP_loop1
mov r0, #SPI_8CLOCKS
str r0, [r1, #SP_TDR]
SP_loop2:
ldr r0, [r1, #SP_SR]
tst r0, #SP_TDRE
beq SP_loop2
ldr r0, [r1, #SP_RDR]
SP_loop3:
ldr r0, [r1, #SP_SR]
tst r0, #SP_RDRF
beq SP_loop3
ldr r0, [r1, #SP_RDR]
and r0, r0, #255
bx lr
.endfunc
.func NVRAM_Xfer, NVRAM_XFER
/* r0 = dest address */
/* r1 = not used */
/* r2 = src address within NVRAM */
/* r3 = length */
NVRAM_XFER:
stmdb sp!, {r4, r5, lr}
mov r5, r0 /* save r0 (dest address) */
mov r4, r3 /* save r3 (length) */
mov r0, r2, LSR #5 /* SPI memories put A8 in the command field */
and r0, r0, #8
add r0, r0, #NVRAM_CMD_READ
ldr r1, =NVRAM_SCRATCH
strb r0, [r1, #0] /* save command in NVRAM_SCRATCH[0] */
strb r2, [r1, #1] /* save low byte of source address in NVRAM_SCRATCH[1] */
_local1:
bl NVRAM_INIT
tst r0, #NVRAM_SR_RDY
bne _local1
mov r0, #20
bl DELAY9
mov r2, r4 /* length */
mov r1, r5 /* dest address */
mov r0, #2 /* bytes to transfer in command */
bl NVRAM_XFER2
ldmia sp!, {r4, r5, lr}
bx lr
.endfunc
.func NVRAM_Xfer2, NVRAM_XFER2
NVRAM_XFER2:
stmdb sp!, {r4, r5, r6, lr}
ldr r4, =SP_BASE
mov r3, #0
cmp r0, #0
bls _local2
ldr r5, =NVRAM_SCRATCH
_local4:
ldrb r6, [r5, r3]
str r6, [r4, #SP_TDR]
_local3:
ldr r6, [r4, #SP_SR]
tst r6, #SP_TDRE
beq _local3
add r3, r3, #1
cmp r3, r0 /* r0 is # of bytes to send out (command+addr) */
blo _local4
_local2:
mov r3, #SPI_8CLOCKS
str r3, [r4, #SP_TDR]
ldr r0, [r4, #SP_RDR]
_local5:
ldr r0, [r4, #SP_SR]
tst r0, #SP_RDRF
beq _local5
ldr r0, [r4, #SP_RDR] /* what's this byte? It's the byte read while writing the TDR -- nonsense, because the NVRAM doesn't read and write at the same time */
mov r0, #0
cmp r2, #0 /* r2 is # of bytes to copy in */
bls _local6
_local7:
ldr r5, [r4, #SP_SR]
tst r5, #SP_TDRE
beq _local7
str r3, [r4, #SP_TDR] /* r3 has SPI_8CLOCKS */
_local8:
ldr r5, [r4, #SP_SR]
tst r5, #SP_RDRF
beq _local8
ldr r5, [r4, #SP_RDR] /* but didn't we read this byte above? */
strb r5, [r1], #1 /* postindexed */
add r0, r0, #1
cmp r0, r2
blo _local7 /* since we don't send another address, the NVRAM must be capable of sequential reads */
_local6:
mov r0, #200
bl DELAY9
ldmia sp!, {r4, r5, r6, lr}
bx lr
#endif
|
linux-master
|
drivers/net/wireless/atmel/atmel.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/* Freescale QUICC Engine HDLC Device Driver
*
* Copyright 2016 Freescale Semiconductor Inc.
*/
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <linux/hdlc.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/of_address.h>
#include <linux/of_irq.h>
#include <linux/of_platform.h>
#include <linux/platform_device.h>
#include <linux/sched.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/stddef.h>
#include <soc/fsl/qe/qe_tdm.h>
#include <uapi/linux/if_arp.h>
#include "fsl_ucc_hdlc.h"
#define DRV_DESC "Freescale QE UCC HDLC Driver"
#define DRV_NAME "ucc_hdlc"
#define TDM_PPPOHT_SLIC_MAXIN
#define RX_BD_ERRORS (R_CD_S | R_OV_S | R_CR_S | R_AB_S | R_NO_S | R_LG_S)
static struct ucc_tdm_info utdm_primary_info = {
.uf_info = {
.tsa = 0,
.cdp = 0,
.cds = 1,
.ctsp = 1,
.ctss = 1,
.revd = 0,
.urfs = 256,
.utfs = 256,
.urfet = 128,
.urfset = 192,
.utfet = 128,
.utftt = 0x40,
.ufpt = 256,
.mode = UCC_FAST_PROTOCOL_MODE_HDLC,
.ttx_trx = UCC_FAST_GUMR_TRANSPARENT_TTX_TRX_NORMAL,
.tenc = UCC_FAST_TX_ENCODING_NRZ,
.renc = UCC_FAST_RX_ENCODING_NRZ,
.tcrc = UCC_FAST_16_BIT_CRC,
.synl = UCC_FAST_SYNC_LEN_NOT_USED,
},
.si_info = {
#ifdef TDM_PPPOHT_SLIC_MAXIN
.simr_rfsd = 1,
.simr_tfsd = 2,
#else
.simr_rfsd = 0,
.simr_tfsd = 0,
#endif
.simr_crt = 0,
.simr_sl = 0,
.simr_ce = 1,
.simr_fe = 1,
.simr_gm = 0,
},
};
static struct ucc_tdm_info utdm_info[UCC_MAX_NUM];
static int uhdlc_init(struct ucc_hdlc_private *priv)
{
struct ucc_tdm_info *ut_info;
struct ucc_fast_info *uf_info;
u32 cecr_subblock;
u16 bd_status;
int ret, i;
void *bd_buffer;
dma_addr_t bd_dma_addr;
s32 riptr;
s32 tiptr;
u32 gumr;
ut_info = priv->ut_info;
uf_info = &ut_info->uf_info;
if (priv->tsa) {
uf_info->tsa = 1;
uf_info->ctsp = 1;
uf_info->cds = 1;
uf_info->ctss = 1;
} else {
uf_info->cds = 0;
uf_info->ctsp = 0;
uf_info->ctss = 0;
}
/* This sets HPM register in CMXUCR register which configures a
* open drain connected HDLC bus
*/
if (priv->hdlc_bus)
uf_info->brkpt_support = 1;
uf_info->uccm_mask = ((UCC_HDLC_UCCE_RXB | UCC_HDLC_UCCE_RXF |
UCC_HDLC_UCCE_TXB) << 16);
ret = ucc_fast_init(uf_info, &priv->uccf);
if (ret) {
dev_err(priv->dev, "Failed to init uccf.");
return ret;
}
priv->uf_regs = priv->uccf->uf_regs;
ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
/* Loopback mode */
if (priv->loopback) {
dev_info(priv->dev, "Loopback Mode\n");
/* use the same clock when work in loopback */
qe_setbrg(ut_info->uf_info.rx_clock, 20000000, 1);
gumr = ioread32be(&priv->uf_regs->gumr);
gumr |= (UCC_FAST_GUMR_LOOPBACK | UCC_FAST_GUMR_CDS |
UCC_FAST_GUMR_TCI);
gumr &= ~(UCC_FAST_GUMR_CTSP | UCC_FAST_GUMR_RSYN);
iowrite32be(gumr, &priv->uf_regs->gumr);
}
/* Initialize SI */
if (priv->tsa)
ucc_tdm_init(priv->utdm, priv->ut_info);
/* Write to QE CECR, UCCx channel to Stop Transmission */
cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
ret = qe_issue_cmd(QE_STOP_TX, cecr_subblock,
QE_CR_PROTOCOL_UNSPECIFIED, 0);
/* Set UPSMR normal mode (need fixed)*/
iowrite32be(0, &priv->uf_regs->upsmr);
/* hdlc_bus mode */
if (priv->hdlc_bus) {
u32 upsmr;
dev_info(priv->dev, "HDLC bus Mode\n");
upsmr = ioread32be(&priv->uf_regs->upsmr);
/* bus mode and retransmit enable, with collision window
* set to 8 bytes
*/
upsmr |= UCC_HDLC_UPSMR_RTE | UCC_HDLC_UPSMR_BUS |
UCC_HDLC_UPSMR_CW8;
iowrite32be(upsmr, &priv->uf_regs->upsmr);
/* explicitly disable CDS & CTSP */
gumr = ioread32be(&priv->uf_regs->gumr);
gumr &= ~(UCC_FAST_GUMR_CDS | UCC_FAST_GUMR_CTSP);
/* set automatic sync to explicitly ignore CD signal */
gumr |= UCC_FAST_GUMR_SYNL_AUTO;
iowrite32be(gumr, &priv->uf_regs->gumr);
}
priv->rx_ring_size = RX_BD_RING_LEN;
priv->tx_ring_size = TX_BD_RING_LEN;
/* Alloc Rx BD */
priv->rx_bd_base = dma_alloc_coherent(priv->dev,
RX_BD_RING_LEN * sizeof(struct qe_bd),
&priv->dma_rx_bd, GFP_KERNEL);
if (!priv->rx_bd_base) {
dev_err(priv->dev, "Cannot allocate MURAM memory for RxBDs\n");
ret = -ENOMEM;
goto free_uccf;
}
/* Alloc Tx BD */
priv->tx_bd_base = dma_alloc_coherent(priv->dev,
TX_BD_RING_LEN * sizeof(struct qe_bd),
&priv->dma_tx_bd, GFP_KERNEL);
if (!priv->tx_bd_base) {
dev_err(priv->dev, "Cannot allocate MURAM memory for TxBDs\n");
ret = -ENOMEM;
goto free_rx_bd;
}
/* Alloc parameter ram for ucc hdlc */
priv->ucc_pram_offset = qe_muram_alloc(sizeof(struct ucc_hdlc_param),
ALIGNMENT_OF_UCC_HDLC_PRAM);
if (priv->ucc_pram_offset < 0) {
dev_err(priv->dev, "Can not allocate MURAM for hdlc parameter.\n");
ret = -ENOMEM;
goto free_tx_bd;
}
priv->rx_skbuff = kcalloc(priv->rx_ring_size,
sizeof(*priv->rx_skbuff),
GFP_KERNEL);
if (!priv->rx_skbuff) {
ret = -ENOMEM;
goto free_ucc_pram;
}
priv->tx_skbuff = kcalloc(priv->tx_ring_size,
sizeof(*priv->tx_skbuff),
GFP_KERNEL);
if (!priv->tx_skbuff) {
ret = -ENOMEM;
goto free_rx_skbuff;
}
priv->skb_curtx = 0;
priv->skb_dirtytx = 0;
priv->curtx_bd = priv->tx_bd_base;
priv->dirty_tx = priv->tx_bd_base;
priv->currx_bd = priv->rx_bd_base;
priv->currx_bdnum = 0;
/* init parameter base */
cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
ret = qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock,
QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset);
priv->ucc_pram = (struct ucc_hdlc_param __iomem *)
qe_muram_addr(priv->ucc_pram_offset);
/* Zero out parameter ram */
memset_io(priv->ucc_pram, 0, sizeof(struct ucc_hdlc_param));
/* Alloc riptr, tiptr */
riptr = qe_muram_alloc(32, 32);
if (riptr < 0) {
dev_err(priv->dev, "Cannot allocate MURAM mem for Receive internal temp data pointer\n");
ret = -ENOMEM;
goto free_tx_skbuff;
}
tiptr = qe_muram_alloc(32, 32);
if (tiptr < 0) {
dev_err(priv->dev, "Cannot allocate MURAM mem for Transmit internal temp data pointer\n");
ret = -ENOMEM;
goto free_riptr;
}
if (riptr != (u16)riptr || tiptr != (u16)tiptr) {
dev_err(priv->dev, "MURAM allocation out of addressable range\n");
ret = -ENOMEM;
goto free_tiptr;
}
/* Set RIPTR, TIPTR */
iowrite16be(riptr, &priv->ucc_pram->riptr);
iowrite16be(tiptr, &priv->ucc_pram->tiptr);
/* Set MRBLR */
iowrite16be(MAX_RX_BUF_LENGTH, &priv->ucc_pram->mrblr);
/* Set RBASE, TBASE */
iowrite32be(priv->dma_rx_bd, &priv->ucc_pram->rbase);
iowrite32be(priv->dma_tx_bd, &priv->ucc_pram->tbase);
/* Set RSTATE, TSTATE */
iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->rstate);
iowrite32be(BMR_GBL | BMR_BIG_ENDIAN, &priv->ucc_pram->tstate);
/* Set C_MASK, C_PRES for 16bit CRC */
iowrite32be(CRC_16BIT_MASK, &priv->ucc_pram->c_mask);
iowrite32be(CRC_16BIT_PRES, &priv->ucc_pram->c_pres);
iowrite16be(MAX_FRAME_LENGTH, &priv->ucc_pram->mflr);
iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfthr);
iowrite16be(DEFAULT_RFTHR, &priv->ucc_pram->rfcnt);
iowrite16be(priv->hmask, &priv->ucc_pram->hmask);
iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr1);
iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr2);
iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr3);
iowrite16be(DEFAULT_HDLC_ADDR, &priv->ucc_pram->haddr4);
/* Get BD buffer */
bd_buffer = dma_alloc_coherent(priv->dev,
(RX_BD_RING_LEN + TX_BD_RING_LEN) * MAX_RX_BUF_LENGTH,
&bd_dma_addr, GFP_KERNEL);
if (!bd_buffer) {
dev_err(priv->dev, "Could not allocate buffer descriptors\n");
ret = -ENOMEM;
goto free_tiptr;
}
priv->rx_buffer = bd_buffer;
priv->tx_buffer = bd_buffer + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH;
priv->dma_rx_addr = bd_dma_addr;
priv->dma_tx_addr = bd_dma_addr + RX_BD_RING_LEN * MAX_RX_BUF_LENGTH;
for (i = 0; i < RX_BD_RING_LEN; i++) {
if (i < (RX_BD_RING_LEN - 1))
bd_status = R_E_S | R_I_S;
else
bd_status = R_E_S | R_I_S | R_W_S;
priv->rx_bd_base[i].status = cpu_to_be16(bd_status);
priv->rx_bd_base[i].buf = cpu_to_be32(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH);
}
for (i = 0; i < TX_BD_RING_LEN; i++) {
if (i < (TX_BD_RING_LEN - 1))
bd_status = T_I_S | T_TC_S;
else
bd_status = T_I_S | T_TC_S | T_W_S;
priv->tx_bd_base[i].status = cpu_to_be16(bd_status);
priv->tx_bd_base[i].buf = cpu_to_be32(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH);
}
dma_wmb();
return 0;
free_tiptr:
qe_muram_free(tiptr);
free_riptr:
qe_muram_free(riptr);
free_tx_skbuff:
kfree(priv->tx_skbuff);
free_rx_skbuff:
kfree(priv->rx_skbuff);
free_ucc_pram:
qe_muram_free(priv->ucc_pram_offset);
free_tx_bd:
dma_free_coherent(priv->dev,
TX_BD_RING_LEN * sizeof(struct qe_bd),
priv->tx_bd_base, priv->dma_tx_bd);
free_rx_bd:
dma_free_coherent(priv->dev,
RX_BD_RING_LEN * sizeof(struct qe_bd),
priv->rx_bd_base, priv->dma_rx_bd);
free_uccf:
ucc_fast_free(priv->uccf);
return ret;
}
static netdev_tx_t ucc_hdlc_tx(struct sk_buff *skb, struct net_device *dev)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)hdlc->priv;
struct qe_bd *bd;
u16 bd_status;
unsigned long flags;
__be16 *proto_head;
switch (dev->type) {
case ARPHRD_RAWHDLC:
if (skb_headroom(skb) < HDLC_HEAD_LEN) {
dev->stats.tx_dropped++;
dev_kfree_skb(skb);
netdev_err(dev, "No enough space for hdlc head\n");
return -ENOMEM;
}
skb_push(skb, HDLC_HEAD_LEN);
proto_head = (__be16 *)skb->data;
*proto_head = htons(DEFAULT_HDLC_HEAD);
dev->stats.tx_bytes += skb->len;
break;
case ARPHRD_PPP:
proto_head = (__be16 *)skb->data;
if (*proto_head != htons(DEFAULT_PPP_HEAD)) {
dev->stats.tx_dropped++;
dev_kfree_skb(skb);
netdev_err(dev, "Wrong ppp header\n");
return -ENOMEM;
}
dev->stats.tx_bytes += skb->len;
break;
case ARPHRD_ETHER:
dev->stats.tx_bytes += skb->len;
break;
default:
dev->stats.tx_dropped++;
dev_kfree_skb(skb);
return -ENOMEM;
}
netdev_sent_queue(dev, skb->len);
spin_lock_irqsave(&priv->lock, flags);
dma_rmb();
/* Start from the next BD that should be filled */
bd = priv->curtx_bd;
bd_status = be16_to_cpu(bd->status);
/* Save the skb pointer so we can free it later */
priv->tx_skbuff[priv->skb_curtx] = skb;
/* Update the current skb pointer (wrapping if this was the last) */
priv->skb_curtx =
(priv->skb_curtx + 1) & TX_RING_MOD_MASK(TX_BD_RING_LEN);
/* copy skb data to tx buffer for sdma processing */
memcpy(priv->tx_buffer + (be32_to_cpu(bd->buf) - priv->dma_tx_addr),
skb->data, skb->len);
/* set bd status and length */
bd_status = (bd_status & T_W_S) | T_R_S | T_I_S | T_L_S | T_TC_S;
bd->length = cpu_to_be16(skb->len);
bd->status = cpu_to_be16(bd_status);
/* Move to next BD in the ring */
if (!(bd_status & T_W_S))
bd += 1;
else
bd = priv->tx_bd_base;
if (bd == priv->dirty_tx) {
if (!netif_queue_stopped(dev))
netif_stop_queue(dev);
}
priv->curtx_bd = bd;
spin_unlock_irqrestore(&priv->lock, flags);
return NETDEV_TX_OK;
}
static int hdlc_tx_restart(struct ucc_hdlc_private *priv)
{
u32 cecr_subblock;
cecr_subblock =
ucc_fast_get_qe_cr_subblock(priv->ut_info->uf_info.ucc_num);
qe_issue_cmd(QE_RESTART_TX, cecr_subblock,
QE_CR_PROTOCOL_UNSPECIFIED, 0);
return 0;
}
static int hdlc_tx_done(struct ucc_hdlc_private *priv)
{
/* Start from the next BD that should be filled */
struct net_device *dev = priv->ndev;
unsigned int bytes_sent = 0;
int howmany = 0;
struct qe_bd *bd; /* BD pointer */
u16 bd_status;
int tx_restart = 0;
dma_rmb();
bd = priv->dirty_tx;
bd_status = be16_to_cpu(bd->status);
/* Normal processing. */
while ((bd_status & T_R_S) == 0) {
struct sk_buff *skb;
if (bd_status & T_UN_S) { /* Underrun */
dev->stats.tx_fifo_errors++;
tx_restart = 1;
}
if (bd_status & T_CT_S) { /* Carrier lost */
dev->stats.tx_carrier_errors++;
tx_restart = 1;
}
/* BD contains already transmitted buffer. */
/* Handle the transmitted buffer and release */
/* the BD to be used with the current frame */
skb = priv->tx_skbuff[priv->skb_dirtytx];
if (!skb)
break;
howmany++;
bytes_sent += skb->len;
dev->stats.tx_packets++;
memset(priv->tx_buffer +
(be32_to_cpu(bd->buf) - priv->dma_tx_addr),
0, skb->len);
dev_consume_skb_irq(skb);
priv->tx_skbuff[priv->skb_dirtytx] = NULL;
priv->skb_dirtytx =
(priv->skb_dirtytx +
1) & TX_RING_MOD_MASK(TX_BD_RING_LEN);
/* We freed a buffer, so now we can restart transmission */
if (netif_queue_stopped(dev))
netif_wake_queue(dev);
/* Advance the confirmation BD pointer */
if (!(bd_status & T_W_S))
bd += 1;
else
bd = priv->tx_bd_base;
bd_status = be16_to_cpu(bd->status);
}
priv->dirty_tx = bd;
if (tx_restart)
hdlc_tx_restart(priv);
netdev_completed_queue(dev, howmany, bytes_sent);
return 0;
}
static int hdlc_rx_done(struct ucc_hdlc_private *priv, int rx_work_limit)
{
struct net_device *dev = priv->ndev;
struct sk_buff *skb = NULL;
hdlc_device *hdlc = dev_to_hdlc(dev);
struct qe_bd *bd;
u16 bd_status;
u16 length, howmany = 0;
u8 *bdbuffer;
dma_rmb();
bd = priv->currx_bd;
bd_status = be16_to_cpu(bd->status);
/* while there are received buffers and BD is full (~R_E) */
while (!((bd_status & (R_E_S)) || (--rx_work_limit < 0))) {
if (bd_status & (RX_BD_ERRORS)) {
dev->stats.rx_errors++;
if (bd_status & R_CD_S)
dev->stats.collisions++;
if (bd_status & R_OV_S)
dev->stats.rx_fifo_errors++;
if (bd_status & R_CR_S)
dev->stats.rx_crc_errors++;
if (bd_status & R_AB_S)
dev->stats.rx_over_errors++;
if (bd_status & R_NO_S)
dev->stats.rx_frame_errors++;
if (bd_status & R_LG_S)
dev->stats.rx_length_errors++;
goto recycle;
}
bdbuffer = priv->rx_buffer +
(priv->currx_bdnum * MAX_RX_BUF_LENGTH);
length = be16_to_cpu(bd->length);
switch (dev->type) {
case ARPHRD_RAWHDLC:
bdbuffer += HDLC_HEAD_LEN;
length -= (HDLC_HEAD_LEN + HDLC_CRC_SIZE);
skb = dev_alloc_skb(length);
if (!skb) {
dev->stats.rx_dropped++;
return -ENOMEM;
}
skb_put(skb, length);
skb->len = length;
skb->dev = dev;
memcpy(skb->data, bdbuffer, length);
break;
case ARPHRD_PPP:
case ARPHRD_ETHER:
length -= HDLC_CRC_SIZE;
skb = dev_alloc_skb(length);
if (!skb) {
dev->stats.rx_dropped++;
return -ENOMEM;
}
skb_put(skb, length);
skb->len = length;
skb->dev = dev;
memcpy(skb->data, bdbuffer, length);
break;
}
dev->stats.rx_packets++;
dev->stats.rx_bytes += skb->len;
howmany++;
if (hdlc->proto)
skb->protocol = hdlc_type_trans(skb, dev);
netif_receive_skb(skb);
recycle:
bd->status = cpu_to_be16((bd_status & R_W_S) | R_E_S | R_I_S);
/* update to point at the next bd */
if (bd_status & R_W_S) {
priv->currx_bdnum = 0;
bd = priv->rx_bd_base;
} else {
if (priv->currx_bdnum < (RX_BD_RING_LEN - 1))
priv->currx_bdnum += 1;
else
priv->currx_bdnum = RX_BD_RING_LEN - 1;
bd += 1;
}
bd_status = be16_to_cpu(bd->status);
}
dma_rmb();
priv->currx_bd = bd;
return howmany;
}
static int ucc_hdlc_poll(struct napi_struct *napi, int budget)
{
struct ucc_hdlc_private *priv = container_of(napi,
struct ucc_hdlc_private,
napi);
int howmany;
/* Tx event processing */
spin_lock(&priv->lock);
hdlc_tx_done(priv);
spin_unlock(&priv->lock);
howmany = 0;
howmany += hdlc_rx_done(priv, budget - howmany);
if (howmany < budget) {
napi_complete_done(napi, howmany);
qe_setbits_be32(priv->uccf->p_uccm,
(UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS) << 16);
}
return howmany;
}
static irqreturn_t ucc_hdlc_irq_handler(int irq, void *dev_id)
{
struct ucc_hdlc_private *priv = (struct ucc_hdlc_private *)dev_id;
struct net_device *dev = priv->ndev;
struct ucc_fast_private *uccf;
u32 ucce;
u32 uccm;
uccf = priv->uccf;
ucce = ioread32be(uccf->p_ucce);
uccm = ioread32be(uccf->p_uccm);
ucce &= uccm;
iowrite32be(ucce, uccf->p_ucce);
if (!ucce)
return IRQ_NONE;
if ((ucce >> 16) & (UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS)) {
if (napi_schedule_prep(&priv->napi)) {
uccm &= ~((UCCE_HDLC_RX_EVENTS | UCCE_HDLC_TX_EVENTS)
<< 16);
iowrite32be(uccm, uccf->p_uccm);
__napi_schedule(&priv->napi);
}
}
/* Errors and other events */
if (ucce >> 16 & UCC_HDLC_UCCE_BSY)
dev->stats.rx_missed_errors++;
if (ucce >> 16 & UCC_HDLC_UCCE_TXE)
dev->stats.tx_errors++;
return IRQ_HANDLED;
}
static int uhdlc_ioctl(struct net_device *dev, struct if_settings *ifs)
{
const size_t size = sizeof(te1_settings);
te1_settings line;
struct ucc_hdlc_private *priv = netdev_priv(dev);
switch (ifs->type) {
case IF_GET_IFACE:
ifs->type = IF_IFACE_E1;
if (ifs->size < size) {
ifs->size = size; /* data size wanted */
return -ENOBUFS;
}
memset(&line, 0, sizeof(line));
line.clock_type = priv->clocking;
if (copy_to_user(ifs->ifs_ifsu.sync, &line, size))
return -EFAULT;
return 0;
default:
return hdlc_ioctl(dev, ifs);
}
}
static int uhdlc_open(struct net_device *dev)
{
u32 cecr_subblock;
hdlc_device *hdlc = dev_to_hdlc(dev);
struct ucc_hdlc_private *priv = hdlc->priv;
struct ucc_tdm *utdm = priv->utdm;
if (priv->hdlc_busy != 1) {
if (request_irq(priv->ut_info->uf_info.irq,
ucc_hdlc_irq_handler, 0, "hdlc", priv))
return -ENODEV;
cecr_subblock = ucc_fast_get_qe_cr_subblock(
priv->ut_info->uf_info.ucc_num);
qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock,
QE_CR_PROTOCOL_UNSPECIFIED, 0);
ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
/* Enable the TDM port */
if (priv->tsa)
qe_setbits_8(&utdm->si_regs->siglmr1_h, 0x1 << utdm->tdm_port);
priv->hdlc_busy = 1;
netif_device_attach(priv->ndev);
napi_enable(&priv->napi);
netdev_reset_queue(dev);
netif_start_queue(dev);
hdlc_open(dev);
}
return 0;
}
static void uhdlc_memclean(struct ucc_hdlc_private *priv)
{
qe_muram_free(ioread16be(&priv->ucc_pram->riptr));
qe_muram_free(ioread16be(&priv->ucc_pram->tiptr));
if (priv->rx_bd_base) {
dma_free_coherent(priv->dev,
RX_BD_RING_LEN * sizeof(struct qe_bd),
priv->rx_bd_base, priv->dma_rx_bd);
priv->rx_bd_base = NULL;
priv->dma_rx_bd = 0;
}
if (priv->tx_bd_base) {
dma_free_coherent(priv->dev,
TX_BD_RING_LEN * sizeof(struct qe_bd),
priv->tx_bd_base, priv->dma_tx_bd);
priv->tx_bd_base = NULL;
priv->dma_tx_bd = 0;
}
if (priv->ucc_pram) {
qe_muram_free(priv->ucc_pram_offset);
priv->ucc_pram = NULL;
priv->ucc_pram_offset = 0;
}
kfree(priv->rx_skbuff);
priv->rx_skbuff = NULL;
kfree(priv->tx_skbuff);
priv->tx_skbuff = NULL;
if (priv->uf_regs) {
iounmap(priv->uf_regs);
priv->uf_regs = NULL;
}
if (priv->uccf) {
ucc_fast_free(priv->uccf);
priv->uccf = NULL;
}
if (priv->rx_buffer) {
dma_free_coherent(priv->dev,
RX_BD_RING_LEN * MAX_RX_BUF_LENGTH,
priv->rx_buffer, priv->dma_rx_addr);
priv->rx_buffer = NULL;
priv->dma_rx_addr = 0;
}
if (priv->tx_buffer) {
dma_free_coherent(priv->dev,
TX_BD_RING_LEN * MAX_RX_BUF_LENGTH,
priv->tx_buffer, priv->dma_tx_addr);
priv->tx_buffer = NULL;
priv->dma_tx_addr = 0;
}
}
static int uhdlc_close(struct net_device *dev)
{
struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv;
struct ucc_tdm *utdm = priv->utdm;
u32 cecr_subblock;
napi_disable(&priv->napi);
cecr_subblock = ucc_fast_get_qe_cr_subblock(
priv->ut_info->uf_info.ucc_num);
qe_issue_cmd(QE_GRACEFUL_STOP_TX, cecr_subblock,
(u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
qe_issue_cmd(QE_CLOSE_RX_BD, cecr_subblock,
(u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
if (priv->tsa)
qe_clrbits_8(&utdm->si_regs->siglmr1_h, 0x1 << utdm->tdm_port);
ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
free_irq(priv->ut_info->uf_info.irq, priv);
netif_stop_queue(dev);
netdev_reset_queue(dev);
priv->hdlc_busy = 0;
return 0;
}
static int ucc_hdlc_attach(struct net_device *dev, unsigned short encoding,
unsigned short parity)
{
struct ucc_hdlc_private *priv = dev_to_hdlc(dev)->priv;
if (encoding != ENCODING_NRZ &&
encoding != ENCODING_NRZI)
return -EINVAL;
if (parity != PARITY_NONE &&
parity != PARITY_CRC32_PR1_CCITT &&
parity != PARITY_CRC16_PR0_CCITT &&
parity != PARITY_CRC16_PR1_CCITT)
return -EINVAL;
priv->encoding = encoding;
priv->parity = parity;
return 0;
}
#ifdef CONFIG_PM
static void store_clk_config(struct ucc_hdlc_private *priv)
{
struct qe_mux __iomem *qe_mux_reg = &qe_immr->qmx;
/* store si clk */
priv->cmxsi1cr_h = ioread32be(&qe_mux_reg->cmxsi1cr_h);
priv->cmxsi1cr_l = ioread32be(&qe_mux_reg->cmxsi1cr_l);
/* store si sync */
priv->cmxsi1syr = ioread32be(&qe_mux_reg->cmxsi1syr);
/* store ucc clk */
memcpy_fromio(priv->cmxucr, qe_mux_reg->cmxucr, 4 * sizeof(u32));
}
static void resume_clk_config(struct ucc_hdlc_private *priv)
{
struct qe_mux __iomem *qe_mux_reg = &qe_immr->qmx;
memcpy_toio(qe_mux_reg->cmxucr, priv->cmxucr, 4 * sizeof(u32));
iowrite32be(priv->cmxsi1cr_h, &qe_mux_reg->cmxsi1cr_h);
iowrite32be(priv->cmxsi1cr_l, &qe_mux_reg->cmxsi1cr_l);
iowrite32be(priv->cmxsi1syr, &qe_mux_reg->cmxsi1syr);
}
static int uhdlc_suspend(struct device *dev)
{
struct ucc_hdlc_private *priv = dev_get_drvdata(dev);
struct ucc_fast __iomem *uf_regs;
if (!priv)
return -EINVAL;
if (!netif_running(priv->ndev))
return 0;
netif_device_detach(priv->ndev);
napi_disable(&priv->napi);
uf_regs = priv->uf_regs;
/* backup gumr guemr*/
priv->gumr = ioread32be(&uf_regs->gumr);
priv->guemr = ioread8(&uf_regs->guemr);
priv->ucc_pram_bak = kmalloc(sizeof(*priv->ucc_pram_bak),
GFP_KERNEL);
if (!priv->ucc_pram_bak)
return -ENOMEM;
/* backup HDLC parameter */
memcpy_fromio(priv->ucc_pram_bak, priv->ucc_pram,
sizeof(struct ucc_hdlc_param));
/* store the clk configuration */
store_clk_config(priv);
/* save power */
ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
return 0;
}
static int uhdlc_resume(struct device *dev)
{
struct ucc_hdlc_private *priv = dev_get_drvdata(dev);
struct ucc_tdm *utdm;
struct ucc_tdm_info *ut_info;
struct ucc_fast __iomem *uf_regs;
struct ucc_fast_private *uccf;
struct ucc_fast_info *uf_info;
int i;
u32 cecr_subblock;
u16 bd_status;
if (!priv)
return -EINVAL;
if (!netif_running(priv->ndev))
return 0;
utdm = priv->utdm;
ut_info = priv->ut_info;
uf_info = &ut_info->uf_info;
uf_regs = priv->uf_regs;
uccf = priv->uccf;
/* restore gumr guemr */
iowrite8(priv->guemr, &uf_regs->guemr);
iowrite32be(priv->gumr, &uf_regs->gumr);
/* Set Virtual Fifo registers */
iowrite16be(uf_info->urfs, &uf_regs->urfs);
iowrite16be(uf_info->urfet, &uf_regs->urfet);
iowrite16be(uf_info->urfset, &uf_regs->urfset);
iowrite16be(uf_info->utfs, &uf_regs->utfs);
iowrite16be(uf_info->utfet, &uf_regs->utfet);
iowrite16be(uf_info->utftt, &uf_regs->utftt);
/* utfb, urfb are offsets from MURAM base */
iowrite32be(uccf->ucc_fast_tx_virtual_fifo_base_offset, &uf_regs->utfb);
iowrite32be(uccf->ucc_fast_rx_virtual_fifo_base_offset, &uf_regs->urfb);
/* Rx Tx and sync clock routing */
resume_clk_config(priv);
iowrite32be(uf_info->uccm_mask, &uf_regs->uccm);
iowrite32be(0xffffffff, &uf_regs->ucce);
ucc_fast_disable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
/* rebuild SIRAM */
if (priv->tsa)
ucc_tdm_init(priv->utdm, priv->ut_info);
/* Write to QE CECR, UCCx channel to Stop Transmission */
cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
qe_issue_cmd(QE_STOP_TX, cecr_subblock,
(u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
/* Set UPSMR normal mode */
iowrite32be(0, &uf_regs->upsmr);
/* init parameter base */
cecr_subblock = ucc_fast_get_qe_cr_subblock(uf_info->ucc_num);
qe_issue_cmd(QE_ASSIGN_PAGE_TO_DEVICE, cecr_subblock,
QE_CR_PROTOCOL_UNSPECIFIED, priv->ucc_pram_offset);
priv->ucc_pram = (struct ucc_hdlc_param __iomem *)
qe_muram_addr(priv->ucc_pram_offset);
/* restore ucc parameter */
memcpy_toio(priv->ucc_pram, priv->ucc_pram_bak,
sizeof(struct ucc_hdlc_param));
kfree(priv->ucc_pram_bak);
/* rebuild BD entry */
for (i = 0; i < RX_BD_RING_LEN; i++) {
if (i < (RX_BD_RING_LEN - 1))
bd_status = R_E_S | R_I_S;
else
bd_status = R_E_S | R_I_S | R_W_S;
priv->rx_bd_base[i].status = cpu_to_be16(bd_status);
priv->rx_bd_base[i].buf = cpu_to_be32(priv->dma_rx_addr + i * MAX_RX_BUF_LENGTH);
}
for (i = 0; i < TX_BD_RING_LEN; i++) {
if (i < (TX_BD_RING_LEN - 1))
bd_status = T_I_S | T_TC_S;
else
bd_status = T_I_S | T_TC_S | T_W_S;
priv->tx_bd_base[i].status = cpu_to_be16(bd_status);
priv->tx_bd_base[i].buf = cpu_to_be32(priv->dma_tx_addr + i * MAX_RX_BUF_LENGTH);
}
dma_wmb();
/* if hdlc is busy enable TX and RX */
if (priv->hdlc_busy == 1) {
cecr_subblock = ucc_fast_get_qe_cr_subblock(
priv->ut_info->uf_info.ucc_num);
qe_issue_cmd(QE_INIT_TX_RX, cecr_subblock,
(u8)QE_CR_PROTOCOL_UNSPECIFIED, 0);
ucc_fast_enable(priv->uccf, COMM_DIR_RX | COMM_DIR_TX);
/* Enable the TDM port */
if (priv->tsa)
qe_setbits_8(&utdm->si_regs->siglmr1_h, 0x1 << utdm->tdm_port);
}
napi_enable(&priv->napi);
netif_device_attach(priv->ndev);
return 0;
}
static const struct dev_pm_ops uhdlc_pm_ops = {
.suspend = uhdlc_suspend,
.resume = uhdlc_resume,
.freeze = uhdlc_suspend,
.thaw = uhdlc_resume,
};
#define HDLC_PM_OPS (&uhdlc_pm_ops)
#else
#define HDLC_PM_OPS NULL
#endif
static void uhdlc_tx_timeout(struct net_device *ndev, unsigned int txqueue)
{
netdev_err(ndev, "%s\n", __func__);
}
static const struct net_device_ops uhdlc_ops = {
.ndo_open = uhdlc_open,
.ndo_stop = uhdlc_close,
.ndo_start_xmit = hdlc_start_xmit,
.ndo_siocwandev = uhdlc_ioctl,
.ndo_tx_timeout = uhdlc_tx_timeout,
};
static int hdlc_map_iomem(char *name, int init_flag, void __iomem **ptr)
{
struct device_node *np;
struct platform_device *pdev;
struct resource *res;
static int siram_init_flag;
int ret = 0;
np = of_find_compatible_node(NULL, NULL, name);
if (!np)
return -EINVAL;
pdev = of_find_device_by_node(np);
if (!pdev) {
pr_err("%pOFn: failed to lookup pdev\n", np);
of_node_put(np);
return -EINVAL;
}
of_node_put(np);
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
ret = -EINVAL;
goto error_put_device;
}
*ptr = ioremap(res->start, resource_size(res));
if (!*ptr) {
ret = -ENOMEM;
goto error_put_device;
}
/* We've remapped the addresses, and we don't need the device any
* more, so we should release it.
*/
put_device(&pdev->dev);
if (init_flag && siram_init_flag == 0) {
memset_io(*ptr, 0, resource_size(res));
siram_init_flag = 1;
}
return 0;
error_put_device:
put_device(&pdev->dev);
return ret;
}
static int ucc_hdlc_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
struct ucc_hdlc_private *uhdlc_priv = NULL;
struct ucc_tdm_info *ut_info;
struct ucc_tdm *utdm = NULL;
struct resource res;
struct net_device *dev;
hdlc_device *hdlc;
int ucc_num;
const char *sprop;
int ret;
u32 val;
ret = of_property_read_u32_index(np, "cell-index", 0, &val);
if (ret) {
dev_err(&pdev->dev, "Invalid ucc property\n");
return -ENODEV;
}
ucc_num = val - 1;
if (ucc_num > (UCC_MAX_NUM - 1) || ucc_num < 0) {
dev_err(&pdev->dev, ": Invalid UCC num\n");
return -EINVAL;
}
memcpy(&utdm_info[ucc_num], &utdm_primary_info,
sizeof(utdm_primary_info));
ut_info = &utdm_info[ucc_num];
ut_info->uf_info.ucc_num = ucc_num;
sprop = of_get_property(np, "rx-clock-name", NULL);
if (sprop) {
ut_info->uf_info.rx_clock = qe_clock_source(sprop);
if ((ut_info->uf_info.rx_clock < QE_CLK_NONE) ||
(ut_info->uf_info.rx_clock > QE_CLK24)) {
dev_err(&pdev->dev, "Invalid rx-clock-name property\n");
return -EINVAL;
}
} else {
dev_err(&pdev->dev, "Invalid rx-clock-name property\n");
return -EINVAL;
}
sprop = of_get_property(np, "tx-clock-name", NULL);
if (sprop) {
ut_info->uf_info.tx_clock = qe_clock_source(sprop);
if ((ut_info->uf_info.tx_clock < QE_CLK_NONE) ||
(ut_info->uf_info.tx_clock > QE_CLK24)) {
dev_err(&pdev->dev, "Invalid tx-clock-name property\n");
return -EINVAL;
}
} else {
dev_err(&pdev->dev, "Invalid tx-clock-name property\n");
return -EINVAL;
}
ret = of_address_to_resource(np, 0, &res);
if (ret)
return -EINVAL;
ut_info->uf_info.regs = res.start;
ut_info->uf_info.irq = irq_of_parse_and_map(np, 0);
uhdlc_priv = kzalloc(sizeof(*uhdlc_priv), GFP_KERNEL);
if (!uhdlc_priv)
return -ENOMEM;
dev_set_drvdata(&pdev->dev, uhdlc_priv);
uhdlc_priv->dev = &pdev->dev;
uhdlc_priv->ut_info = ut_info;
uhdlc_priv->tsa = of_property_read_bool(np, "fsl,tdm-interface");
uhdlc_priv->loopback = of_property_read_bool(np, "fsl,ucc-internal-loopback");
uhdlc_priv->hdlc_bus = of_property_read_bool(np, "fsl,hdlc-bus");
if (uhdlc_priv->tsa == 1) {
utdm = kzalloc(sizeof(*utdm), GFP_KERNEL);
if (!utdm) {
ret = -ENOMEM;
dev_err(&pdev->dev, "No mem to alloc ucc tdm data\n");
goto free_uhdlc_priv;
}
uhdlc_priv->utdm = utdm;
ret = ucc_of_parse_tdm(np, utdm, ut_info);
if (ret)
goto free_utdm;
ret = hdlc_map_iomem("fsl,t1040-qe-si", 0,
(void __iomem **)&utdm->si_regs);
if (ret)
goto free_utdm;
ret = hdlc_map_iomem("fsl,t1040-qe-siram", 1,
(void __iomem **)&utdm->siram);
if (ret)
goto unmap_si_regs;
}
if (of_property_read_u16(np, "fsl,hmask", &uhdlc_priv->hmask))
uhdlc_priv->hmask = DEFAULT_ADDR_MASK;
ret = uhdlc_init(uhdlc_priv);
if (ret) {
dev_err(&pdev->dev, "Failed to init uhdlc\n");
goto undo_uhdlc_init;
}
dev = alloc_hdlcdev(uhdlc_priv);
if (!dev) {
ret = -ENOMEM;
pr_err("ucc_hdlc: unable to allocate memory\n");
goto undo_uhdlc_init;
}
uhdlc_priv->ndev = dev;
hdlc = dev_to_hdlc(dev);
dev->tx_queue_len = 16;
dev->netdev_ops = &uhdlc_ops;
dev->watchdog_timeo = 2 * HZ;
hdlc->attach = ucc_hdlc_attach;
hdlc->xmit = ucc_hdlc_tx;
netif_napi_add_weight(dev, &uhdlc_priv->napi, ucc_hdlc_poll, 32);
if (register_hdlc_device(dev)) {
ret = -ENOBUFS;
pr_err("ucc_hdlc: unable to register hdlc device\n");
goto free_dev;
}
return 0;
free_dev:
free_netdev(dev);
undo_uhdlc_init:
if (utdm)
iounmap(utdm->siram);
unmap_si_regs:
if (utdm)
iounmap(utdm->si_regs);
free_utdm:
if (uhdlc_priv->tsa)
kfree(utdm);
free_uhdlc_priv:
kfree(uhdlc_priv);
return ret;
}
static int ucc_hdlc_remove(struct platform_device *pdev)
{
struct ucc_hdlc_private *priv = dev_get_drvdata(&pdev->dev);
uhdlc_memclean(priv);
if (priv->utdm->si_regs) {
iounmap(priv->utdm->si_regs);
priv->utdm->si_regs = NULL;
}
if (priv->utdm->siram) {
iounmap(priv->utdm->siram);
priv->utdm->siram = NULL;
}
kfree(priv);
dev_info(&pdev->dev, "UCC based hdlc module removed\n");
return 0;
}
static const struct of_device_id fsl_ucc_hdlc_of_match[] = {
{
.compatible = "fsl,ucc-hdlc",
},
{},
};
MODULE_DEVICE_TABLE(of, fsl_ucc_hdlc_of_match);
static struct platform_driver ucc_hdlc_driver = {
.probe = ucc_hdlc_probe,
.remove = ucc_hdlc_remove,
.driver = {
.name = DRV_NAME,
.pm = HDLC_PM_OPS,
.of_match_table = fsl_ucc_hdlc_of_match,
},
};
module_platform_driver(ucc_hdlc_driver);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION(DRV_DESC);
|
linux-master
|
drivers/net/wan/fsl_ucc_hdlc.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Generic HDLC support routines for Linux
* X.25 support
*
* Copyright (C) 1999 - 2006 Krzysztof Halasa <[email protected]>
*/
#include <linux/errno.h>
#include <linux/gfp.h>
#include <linux/hdlc.h>
#include <linux/if_arp.h>
#include <linux/inetdevice.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/lapb.h>
#include <linux/module.h>
#include <linux/pkt_sched.h>
#include <linux/poll.h>
#include <linux/rtnetlink.h>
#include <linux/skbuff.h>
#include <net/x25device.h>
struct x25_state {
x25_hdlc_proto settings;
bool up;
spinlock_t up_lock; /* Protects "up" */
struct sk_buff_head rx_queue;
struct tasklet_struct rx_tasklet;
};
static int x25_ioctl(struct net_device *dev, struct if_settings *ifs);
static struct x25_state *state(hdlc_device *hdlc)
{
return hdlc->state;
}
static void x25_rx_queue_kick(struct tasklet_struct *t)
{
struct x25_state *x25st = from_tasklet(x25st, t, rx_tasklet);
struct sk_buff *skb = skb_dequeue(&x25st->rx_queue);
while (skb) {
netif_receive_skb_core(skb);
skb = skb_dequeue(&x25st->rx_queue);
}
}
/* These functions are callbacks called by LAPB layer */
static void x25_connect_disconnect(struct net_device *dev, int reason, int code)
{
struct x25_state *x25st = state(dev_to_hdlc(dev));
struct sk_buff *skb;
unsigned char *ptr;
skb = __dev_alloc_skb(1, GFP_ATOMIC | __GFP_NOMEMALLOC);
if (!skb)
return;
ptr = skb_put(skb, 1);
*ptr = code;
skb->protocol = x25_type_trans(skb, dev);
skb_queue_tail(&x25st->rx_queue, skb);
tasklet_schedule(&x25st->rx_tasklet);
}
static void x25_connected(struct net_device *dev, int reason)
{
x25_connect_disconnect(dev, reason, X25_IFACE_CONNECT);
}
static void x25_disconnected(struct net_device *dev, int reason)
{
x25_connect_disconnect(dev, reason, X25_IFACE_DISCONNECT);
}
static int x25_data_indication(struct net_device *dev, struct sk_buff *skb)
{
struct x25_state *x25st = state(dev_to_hdlc(dev));
unsigned char *ptr;
if (skb_cow(skb, 1)) {
kfree_skb(skb);
return NET_RX_DROP;
}
skb_push(skb, 1);
ptr = skb->data;
*ptr = X25_IFACE_DATA;
skb->protocol = x25_type_trans(skb, dev);
skb_queue_tail(&x25st->rx_queue, skb);
tasklet_schedule(&x25st->rx_tasklet);
return NET_RX_SUCCESS;
}
static void x25_data_transmit(struct net_device *dev, struct sk_buff *skb)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
skb_reset_network_header(skb);
skb->protocol = hdlc_type_trans(skb, dev);
if (dev_nit_active(dev))
dev_queue_xmit_nit(skb, dev);
hdlc->xmit(skb, dev); /* Ignore return value :-( */
}
static netdev_tx_t x25_xmit(struct sk_buff *skb, struct net_device *dev)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
struct x25_state *x25st = state(hdlc);
int result;
/* There should be a pseudo header of 1 byte added by upper layers.
* Check to make sure it is there before reading it.
*/
if (skb->len < 1) {
kfree_skb(skb);
return NETDEV_TX_OK;
}
spin_lock_bh(&x25st->up_lock);
if (!x25st->up) {
spin_unlock_bh(&x25st->up_lock);
kfree_skb(skb);
return NETDEV_TX_OK;
}
switch (skb->data[0]) {
case X25_IFACE_DATA: /* Data to be transmitted */
skb_pull(skb, 1);
result = lapb_data_request(dev, skb);
if (result != LAPB_OK)
dev_kfree_skb(skb);
spin_unlock_bh(&x25st->up_lock);
return NETDEV_TX_OK;
case X25_IFACE_CONNECT:
result = lapb_connect_request(dev);
if (result != LAPB_OK) {
if (result == LAPB_CONNECTED)
/* Send connect confirm. msg to level 3 */
x25_connected(dev, 0);
else
netdev_err(dev, "LAPB connect request failed, error code = %i\n",
result);
}
break;
case X25_IFACE_DISCONNECT:
result = lapb_disconnect_request(dev);
if (result != LAPB_OK) {
if (result == LAPB_NOTCONNECTED)
/* Send disconnect confirm. msg to level 3 */
x25_disconnected(dev, 0);
else
netdev_err(dev, "LAPB disconnect request failed, error code = %i\n",
result);
}
break;
default: /* to be defined */
break;
}
spin_unlock_bh(&x25st->up_lock);
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
static int x25_open(struct net_device *dev)
{
static const struct lapb_register_struct cb = {
.connect_confirmation = x25_connected,
.connect_indication = x25_connected,
.disconnect_confirmation = x25_disconnected,
.disconnect_indication = x25_disconnected,
.data_indication = x25_data_indication,
.data_transmit = x25_data_transmit,
};
hdlc_device *hdlc = dev_to_hdlc(dev);
struct x25_state *x25st = state(hdlc);
struct lapb_parms_struct params;
int result;
result = lapb_register(dev, &cb);
if (result != LAPB_OK)
return -ENOMEM;
result = lapb_getparms(dev, ¶ms);
if (result != LAPB_OK)
return -EINVAL;
if (state(hdlc)->settings.dce)
params.mode = params.mode | LAPB_DCE;
if (state(hdlc)->settings.modulo == 128)
params.mode = params.mode | LAPB_EXTENDED;
params.window = state(hdlc)->settings.window;
params.t1 = state(hdlc)->settings.t1;
params.t2 = state(hdlc)->settings.t2;
params.n2 = state(hdlc)->settings.n2;
result = lapb_setparms(dev, ¶ms);
if (result != LAPB_OK)
return -EINVAL;
spin_lock_bh(&x25st->up_lock);
x25st->up = true;
spin_unlock_bh(&x25st->up_lock);
return 0;
}
static void x25_close(struct net_device *dev)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
struct x25_state *x25st = state(hdlc);
spin_lock_bh(&x25st->up_lock);
x25st->up = false;
spin_unlock_bh(&x25st->up_lock);
lapb_unregister(dev);
tasklet_kill(&x25st->rx_tasklet);
}
static int x25_rx(struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
hdlc_device *hdlc = dev_to_hdlc(dev);
struct x25_state *x25st = state(hdlc);
skb = skb_share_check(skb, GFP_ATOMIC);
if (!skb) {
dev->stats.rx_dropped++;
return NET_RX_DROP;
}
spin_lock_bh(&x25st->up_lock);
if (!x25st->up) {
spin_unlock_bh(&x25st->up_lock);
kfree_skb(skb);
dev->stats.rx_dropped++;
return NET_RX_DROP;
}
if (lapb_data_received(dev, skb) == LAPB_OK) {
spin_unlock_bh(&x25st->up_lock);
return NET_RX_SUCCESS;
}
spin_unlock_bh(&x25st->up_lock);
dev->stats.rx_errors++;
dev_kfree_skb_any(skb);
return NET_RX_DROP;
}
static struct hdlc_proto proto = {
.open = x25_open,
.close = x25_close,
.ioctl = x25_ioctl,
.netif_rx = x25_rx,
.xmit = x25_xmit,
.module = THIS_MODULE,
};
static int x25_ioctl(struct net_device *dev, struct if_settings *ifs)
{
x25_hdlc_proto __user *x25_s = ifs->ifs_ifsu.x25;
const size_t size = sizeof(x25_hdlc_proto);
hdlc_device *hdlc = dev_to_hdlc(dev);
x25_hdlc_proto new_settings;
int result;
switch (ifs->type) {
case IF_GET_PROTO:
if (dev_to_hdlc(dev)->proto != &proto)
return -EINVAL;
ifs->type = IF_PROTO_X25;
if (ifs->size < size) {
ifs->size = size; /* data size wanted */
return -ENOBUFS;
}
if (copy_to_user(x25_s, &state(hdlc)->settings, size))
return -EFAULT;
return 0;
case IF_PROTO_X25:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (dev->flags & IFF_UP)
return -EBUSY;
/* backward compatibility */
if (ifs->size == 0) {
new_settings.dce = 0;
new_settings.modulo = 8;
new_settings.window = 7;
new_settings.t1 = 3;
new_settings.t2 = 1;
new_settings.n2 = 10;
} else {
if (copy_from_user(&new_settings, x25_s, size))
return -EFAULT;
if ((new_settings.dce != 0 &&
new_settings.dce != 1) ||
(new_settings.modulo != 8 &&
new_settings.modulo != 128) ||
new_settings.window < 1 ||
(new_settings.modulo == 8 &&
new_settings.window > 7) ||
(new_settings.modulo == 128 &&
new_settings.window > 127) ||
new_settings.t1 < 1 ||
new_settings.t1 > 255 ||
new_settings.t2 < 1 ||
new_settings.t2 > 255 ||
new_settings.n2 < 1 ||
new_settings.n2 > 255)
return -EINVAL;
}
result = hdlc->attach(dev, ENCODING_NRZ,
PARITY_CRC16_PR1_CCITT);
if (result)
return result;
result = attach_hdlc_protocol(dev, &proto,
sizeof(struct x25_state));
if (result)
return result;
memcpy(&state(hdlc)->settings, &new_settings, size);
state(hdlc)->up = false;
spin_lock_init(&state(hdlc)->up_lock);
skb_queue_head_init(&state(hdlc)->rx_queue);
tasklet_setup(&state(hdlc)->rx_tasklet, x25_rx_queue_kick);
/* There's no header_ops so hard_header_len should be 0. */
dev->hard_header_len = 0;
/* When transmitting data:
* first we'll remove a pseudo header of 1 byte,
* then we'll prepend an LAPB header of at most 3 bytes.
*/
dev->needed_headroom = 3 - 1;
dev->type = ARPHRD_X25;
call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
netif_dormant_off(dev);
return 0;
}
return -EINVAL;
}
static int __init hdlc_x25_init(void)
{
register_hdlc_protocol(&proto);
return 0;
}
static void __exit hdlc_x25_exit(void)
{
unregister_hdlc_protocol(&proto);
}
module_init(hdlc_x25_init);
module_exit(hdlc_x25_exit);
MODULE_AUTHOR("Krzysztof Halasa <[email protected]>");
MODULE_DESCRIPTION("X.25 protocol support for generic HDLC");
MODULE_LICENSE("GPL v2");
|
linux-master
|
drivers/net/wan/hdlc_x25.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Generic HDLC support routines for Linux
* Frame Relay support
*
* Copyright (C) 1999 - 2006 Krzysztof Halasa <[email protected]>
*
Theory of PVC state
DCE mode:
(exist,new) -> 0,0 when "PVC create" or if "link unreliable"
0,x -> 1,1 if "link reliable" when sending FULL STATUS
1,1 -> 1,0 if received FULL STATUS ACK
(active) -> 0 when "ifconfig PVC down" or "link unreliable" or "PVC create"
-> 1 when "PVC up" and (exist,new) = 1,0
DTE mode:
(exist,new,active) = FULL STATUS if "link reliable"
= 0, 0, 0 if "link unreliable"
No LMI:
active = open and "link reliable"
exist = new = not used
CCITT LMI: ITU-T Q.933 Annex A
ANSI LMI: ANSI T1.617 Annex D
CISCO LMI: the original, aka "Gang of Four" LMI
*/
#include <linux/errno.h>
#include <linux/etherdevice.h>
#include <linux/hdlc.h>
#include <linux/if_arp.h>
#include <linux/inetdevice.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pkt_sched.h>
#include <linux/poll.h>
#include <linux/rtnetlink.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#undef DEBUG_PKT
#undef DEBUG_ECN
#undef DEBUG_LINK
#undef DEBUG_PROTO
#undef DEBUG_PVC
#define FR_UI 0x03
#define FR_PAD 0x00
#define NLPID_IP 0xCC
#define NLPID_IPV6 0x8E
#define NLPID_SNAP 0x80
#define NLPID_PAD 0x00
#define NLPID_CCITT_ANSI_LMI 0x08
#define NLPID_CISCO_LMI 0x09
#define LMI_CCITT_ANSI_DLCI 0 /* LMI DLCI */
#define LMI_CISCO_DLCI 1023
#define LMI_CALLREF 0x00 /* Call Reference */
#define LMI_ANSI_LOCKSHIFT 0x95 /* ANSI locking shift */
#define LMI_ANSI_CISCO_REPTYPE 0x01 /* report type */
#define LMI_CCITT_REPTYPE 0x51
#define LMI_ANSI_CISCO_ALIVE 0x03 /* keep alive */
#define LMI_CCITT_ALIVE 0x53
#define LMI_ANSI_CISCO_PVCSTAT 0x07 /* PVC status */
#define LMI_CCITT_PVCSTAT 0x57
#define LMI_FULLREP 0x00 /* full report */
#define LMI_INTEGRITY 0x01 /* link integrity report */
#define LMI_SINGLE 0x02 /* single PVC report */
#define LMI_STATUS_ENQUIRY 0x75
#define LMI_STATUS 0x7D /* reply */
#define LMI_REPT_LEN 1 /* report type element length */
#define LMI_INTEG_LEN 2 /* link integrity element length */
#define LMI_CCITT_CISCO_LENGTH 13 /* LMI frame lengths */
#define LMI_ANSI_LENGTH 14
struct fr_hdr {
#if defined(__LITTLE_ENDIAN_BITFIELD)
unsigned ea1: 1;
unsigned cr: 1;
unsigned dlcih: 6;
unsigned ea2: 1;
unsigned de: 1;
unsigned becn: 1;
unsigned fecn: 1;
unsigned dlcil: 4;
#else
unsigned dlcih: 6;
unsigned cr: 1;
unsigned ea1: 1;
unsigned dlcil: 4;
unsigned fecn: 1;
unsigned becn: 1;
unsigned de: 1;
unsigned ea2: 1;
#endif
} __packed;
struct pvc_device {
struct net_device *frad;
struct net_device *main;
struct net_device *ether; /* bridged Ethernet interface */
struct pvc_device *next; /* Sorted in ascending DLCI order */
int dlci;
int open_count;
struct {
unsigned int new: 1;
unsigned int active: 1;
unsigned int exist: 1;
unsigned int deleted: 1;
unsigned int fecn: 1;
unsigned int becn: 1;
unsigned int bandwidth; /* Cisco LMI reporting only */
} state;
};
struct frad_state {
fr_proto settings;
struct pvc_device *first_pvc;
int dce_pvc_count;
struct timer_list timer;
struct net_device *dev;
unsigned long last_poll;
int reliable;
int dce_changed;
int request;
int fullrep_sent;
u32 last_errors; /* last errors bit list */
u8 n391cnt;
u8 txseq; /* TX sequence number */
u8 rxseq; /* RX sequence number */
};
static int fr_ioctl(struct net_device *dev, struct if_settings *ifs);
static inline u16 q922_to_dlci(u8 *hdr)
{
return ((hdr[0] & 0xFC) << 2) | ((hdr[1] & 0xF0) >> 4);
}
static inline void dlci_to_q922(u8 *hdr, u16 dlci)
{
hdr[0] = (dlci >> 2) & 0xFC;
hdr[1] = ((dlci << 4) & 0xF0) | 0x01;
}
static inline struct frad_state *state(hdlc_device *hdlc)
{
return (struct frad_state *)(hdlc->state);
}
static inline struct pvc_device *find_pvc(hdlc_device *hdlc, u16 dlci)
{
struct pvc_device *pvc = state(hdlc)->first_pvc;
while (pvc) {
if (pvc->dlci == dlci)
return pvc;
if (pvc->dlci > dlci)
return NULL; /* the list is sorted */
pvc = pvc->next;
}
return NULL;
}
static struct pvc_device *add_pvc(struct net_device *dev, u16 dlci)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
struct pvc_device *pvc, **pvc_p = &state(hdlc)->first_pvc;
while (*pvc_p) {
if ((*pvc_p)->dlci == dlci)
return *pvc_p;
if ((*pvc_p)->dlci > dlci)
break; /* the list is sorted */
pvc_p = &(*pvc_p)->next;
}
pvc = kzalloc(sizeof(*pvc), GFP_ATOMIC);
#ifdef DEBUG_PVC
printk(KERN_DEBUG "add_pvc: allocated pvc %p, frad %p\n", pvc, dev);
#endif
if (!pvc)
return NULL;
pvc->dlci = dlci;
pvc->frad = dev;
pvc->next = *pvc_p; /* Put it in the chain */
*pvc_p = pvc;
return pvc;
}
static inline int pvc_is_used(struct pvc_device *pvc)
{
return pvc->main || pvc->ether;
}
static inline void pvc_carrier(int on, struct pvc_device *pvc)
{
if (on) {
if (pvc->main)
if (!netif_carrier_ok(pvc->main))
netif_carrier_on(pvc->main);
if (pvc->ether)
if (!netif_carrier_ok(pvc->ether))
netif_carrier_on(pvc->ether);
} else {
if (pvc->main)
if (netif_carrier_ok(pvc->main))
netif_carrier_off(pvc->main);
if (pvc->ether)
if (netif_carrier_ok(pvc->ether))
netif_carrier_off(pvc->ether);
}
}
static inline void delete_unused_pvcs(hdlc_device *hdlc)
{
struct pvc_device **pvc_p = &state(hdlc)->first_pvc;
while (*pvc_p) {
if (!pvc_is_used(*pvc_p)) {
struct pvc_device *pvc = *pvc_p;
#ifdef DEBUG_PVC
printk(KERN_DEBUG "freeing unused pvc: %p\n", pvc);
#endif
*pvc_p = pvc->next;
kfree(pvc);
continue;
}
pvc_p = &(*pvc_p)->next;
}
}
static inline struct net_device **get_dev_p(struct pvc_device *pvc,
int type)
{
if (type == ARPHRD_ETHER)
return &pvc->ether;
else
return &pvc->main;
}
static int fr_hard_header(struct sk_buff *skb, u16 dlci)
{
if (!skb->dev) { /* Control packets */
switch (dlci) {
case LMI_CCITT_ANSI_DLCI:
skb_push(skb, 4);
skb->data[3] = NLPID_CCITT_ANSI_LMI;
break;
case LMI_CISCO_DLCI:
skb_push(skb, 4);
skb->data[3] = NLPID_CISCO_LMI;
break;
default:
return -EINVAL;
}
} else if (skb->dev->type == ARPHRD_DLCI) {
switch (skb->protocol) {
case htons(ETH_P_IP):
skb_push(skb, 4);
skb->data[3] = NLPID_IP;
break;
case htons(ETH_P_IPV6):
skb_push(skb, 4);
skb->data[3] = NLPID_IPV6;
break;
default:
skb_push(skb, 10);
skb->data[3] = FR_PAD;
skb->data[4] = NLPID_SNAP;
/* OUI 00-00-00 indicates an Ethertype follows */
skb->data[5] = 0x00;
skb->data[6] = 0x00;
skb->data[7] = 0x00;
/* This should be an Ethertype: */
*(__be16 *)(skb->data + 8) = skb->protocol;
}
} else if (skb->dev->type == ARPHRD_ETHER) {
skb_push(skb, 10);
skb->data[3] = FR_PAD;
skb->data[4] = NLPID_SNAP;
/* OUI 00-80-C2 stands for the 802.1 organization */
skb->data[5] = 0x00;
skb->data[6] = 0x80;
skb->data[7] = 0xC2;
/* PID 00-07 stands for Ethernet frames without FCS */
skb->data[8] = 0x00;
skb->data[9] = 0x07;
} else {
return -EINVAL;
}
dlci_to_q922(skb->data, dlci);
skb->data[2] = FR_UI;
return 0;
}
static int pvc_open(struct net_device *dev)
{
struct pvc_device *pvc = dev->ml_priv;
if ((pvc->frad->flags & IFF_UP) == 0)
return -EIO; /* Frad must be UP in order to activate PVC */
if (pvc->open_count++ == 0) {
hdlc_device *hdlc = dev_to_hdlc(pvc->frad);
if (state(hdlc)->settings.lmi == LMI_NONE)
pvc->state.active = netif_carrier_ok(pvc->frad);
pvc_carrier(pvc->state.active, pvc);
state(hdlc)->dce_changed = 1;
}
return 0;
}
static int pvc_close(struct net_device *dev)
{
struct pvc_device *pvc = dev->ml_priv;
if (--pvc->open_count == 0) {
hdlc_device *hdlc = dev_to_hdlc(pvc->frad);
if (state(hdlc)->settings.lmi == LMI_NONE)
pvc->state.active = 0;
if (state(hdlc)->settings.dce) {
state(hdlc)->dce_changed = 1;
pvc->state.active = 0;
}
}
return 0;
}
static int pvc_ioctl(struct net_device *dev, struct if_settings *ifs)
{
struct pvc_device *pvc = dev->ml_priv;
fr_proto_pvc_info info;
if (ifs->type == IF_GET_PROTO) {
if (dev->type == ARPHRD_ETHER)
ifs->type = IF_PROTO_FR_ETH_PVC;
else
ifs->type = IF_PROTO_FR_PVC;
if (ifs->size < sizeof(info)) {
/* data size wanted */
ifs->size = sizeof(info);
return -ENOBUFS;
}
info.dlci = pvc->dlci;
memcpy(info.master, pvc->frad->name, IFNAMSIZ);
if (copy_to_user(ifs->ifs_ifsu.fr_pvc_info,
&info, sizeof(info)))
return -EFAULT;
return 0;
}
return -EINVAL;
}
static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct pvc_device *pvc = dev->ml_priv;
if (!pvc->state.active)
goto drop;
if (dev->type == ARPHRD_ETHER) {
int pad = ETH_ZLEN - skb->len;
if (pad > 0) { /* Pad the frame with zeros */
if (__skb_pad(skb, pad, false))
goto drop;
skb_put(skb, pad);
}
}
/* We already requested the header space with dev->needed_headroom.
* So this is just a protection in case the upper layer didn't take
* dev->needed_headroom into consideration.
*/
if (skb_headroom(skb) < 10) {
struct sk_buff *skb2 = skb_realloc_headroom(skb, 10);
if (!skb2)
goto drop;
dev_kfree_skb(skb);
skb = skb2;
}
skb->dev = dev;
if (fr_hard_header(skb, pvc->dlci))
goto drop;
dev->stats.tx_bytes += skb->len;
dev->stats.tx_packets++;
if (pvc->state.fecn) /* TX Congestion counter */
dev->stats.tx_compressed++;
skb->dev = pvc->frad;
skb->protocol = htons(ETH_P_HDLC);
skb_reset_network_header(skb);
dev_queue_xmit(skb);
return NETDEV_TX_OK;
drop:
dev->stats.tx_dropped++;
kfree_skb(skb);
return NETDEV_TX_OK;
}
static inline void fr_log_dlci_active(struct pvc_device *pvc)
{
netdev_info(pvc->frad, "DLCI %d [%s%s%s]%s %s\n",
pvc->dlci,
pvc->main ? pvc->main->name : "",
pvc->main && pvc->ether ? " " : "",
pvc->ether ? pvc->ether->name : "",
pvc->state.new ? " new" : "",
!pvc->state.exist ? "deleted" :
pvc->state.active ? "active" : "inactive");
}
static inline u8 fr_lmi_nextseq(u8 x)
{
x++;
return x ? x : 1;
}
static void fr_lmi_send(struct net_device *dev, int fullrep)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
struct sk_buff *skb;
struct pvc_device *pvc = state(hdlc)->first_pvc;
int lmi = state(hdlc)->settings.lmi;
int dce = state(hdlc)->settings.dce;
int len = lmi == LMI_ANSI ? LMI_ANSI_LENGTH : LMI_CCITT_CISCO_LENGTH;
int stat_len = (lmi == LMI_CISCO) ? 6 : 3;
u8 *data;
int i = 0;
if (dce && fullrep) {
len += state(hdlc)->dce_pvc_count * (2 + stat_len);
if (len > HDLC_MAX_MRU) {
netdev_warn(dev, "Too many PVCs while sending LMI full report\n");
return;
}
}
skb = dev_alloc_skb(len);
if (!skb)
return;
memset(skb->data, 0, len);
skb_reserve(skb, 4);
if (lmi == LMI_CISCO)
fr_hard_header(skb, LMI_CISCO_DLCI);
else
fr_hard_header(skb, LMI_CCITT_ANSI_DLCI);
data = skb_tail_pointer(skb);
data[i++] = LMI_CALLREF;
data[i++] = dce ? LMI_STATUS : LMI_STATUS_ENQUIRY;
if (lmi == LMI_ANSI)
data[i++] = LMI_ANSI_LOCKSHIFT;
data[i++] = lmi == LMI_CCITT ? LMI_CCITT_REPTYPE :
LMI_ANSI_CISCO_REPTYPE;
data[i++] = LMI_REPT_LEN;
data[i++] = fullrep ? LMI_FULLREP : LMI_INTEGRITY;
data[i++] = lmi == LMI_CCITT ? LMI_CCITT_ALIVE : LMI_ANSI_CISCO_ALIVE;
data[i++] = LMI_INTEG_LEN;
data[i++] = state(hdlc)->txseq =
fr_lmi_nextseq(state(hdlc)->txseq);
data[i++] = state(hdlc)->rxseq;
if (dce && fullrep) {
while (pvc) {
data[i++] = lmi == LMI_CCITT ? LMI_CCITT_PVCSTAT :
LMI_ANSI_CISCO_PVCSTAT;
data[i++] = stat_len;
/* LMI start/restart */
if (state(hdlc)->reliable && !pvc->state.exist) {
pvc->state.exist = pvc->state.new = 1;
fr_log_dlci_active(pvc);
}
/* ifconfig PVC up */
if (pvc->open_count && !pvc->state.active &&
pvc->state.exist && !pvc->state.new) {
pvc_carrier(1, pvc);
pvc->state.active = 1;
fr_log_dlci_active(pvc);
}
if (lmi == LMI_CISCO) {
data[i] = pvc->dlci >> 8;
data[i + 1] = pvc->dlci & 0xFF;
} else {
data[i] = (pvc->dlci >> 4) & 0x3F;
data[i + 1] = ((pvc->dlci << 3) & 0x78) | 0x80;
data[i + 2] = 0x80;
}
if (pvc->state.new)
data[i + 2] |= 0x08;
else if (pvc->state.active)
data[i + 2] |= 0x02;
i += stat_len;
pvc = pvc->next;
}
}
skb_put(skb, i);
skb->priority = TC_PRIO_CONTROL;
skb->dev = dev;
skb->protocol = htons(ETH_P_HDLC);
skb_reset_network_header(skb);
dev_queue_xmit(skb);
}
static void fr_set_link_state(int reliable, struct net_device *dev)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
struct pvc_device *pvc = state(hdlc)->first_pvc;
state(hdlc)->reliable = reliable;
if (reliable) {
netif_dormant_off(dev);
state(hdlc)->n391cnt = 0; /* Request full status */
state(hdlc)->dce_changed = 1;
if (state(hdlc)->settings.lmi == LMI_NONE) {
while (pvc) { /* Activate all PVCs */
pvc_carrier(1, pvc);
pvc->state.exist = pvc->state.active = 1;
pvc->state.new = 0;
pvc = pvc->next;
}
}
} else {
netif_dormant_on(dev);
while (pvc) { /* Deactivate all PVCs */
pvc_carrier(0, pvc);
pvc->state.exist = pvc->state.active = 0;
pvc->state.new = 0;
if (!state(hdlc)->settings.dce)
pvc->state.bandwidth = 0;
pvc = pvc->next;
}
}
}
static void fr_timer(struct timer_list *t)
{
struct frad_state *st = from_timer(st, t, timer);
struct net_device *dev = st->dev;
hdlc_device *hdlc = dev_to_hdlc(dev);
int i, cnt = 0, reliable;
u32 list;
if (state(hdlc)->settings.dce) {
reliable = state(hdlc)->request &&
time_before(jiffies, state(hdlc)->last_poll +
state(hdlc)->settings.t392 * HZ);
state(hdlc)->request = 0;
} else {
state(hdlc)->last_errors <<= 1; /* Shift the list */
if (state(hdlc)->request) {
if (state(hdlc)->reliable)
netdev_info(dev, "No LMI status reply received\n");
state(hdlc)->last_errors |= 1;
}
list = state(hdlc)->last_errors;
for (i = 0; i < state(hdlc)->settings.n393; i++, list >>= 1)
cnt += (list & 1); /* errors count */
reliable = (cnt < state(hdlc)->settings.n392);
}
if (state(hdlc)->reliable != reliable) {
netdev_info(dev, "Link %sreliable\n", reliable ? "" : "un");
fr_set_link_state(reliable, dev);
}
if (state(hdlc)->settings.dce) {
state(hdlc)->timer.expires = jiffies +
state(hdlc)->settings.t392 * HZ;
} else {
if (state(hdlc)->n391cnt)
state(hdlc)->n391cnt--;
fr_lmi_send(dev, state(hdlc)->n391cnt == 0);
state(hdlc)->last_poll = jiffies;
state(hdlc)->request = 1;
state(hdlc)->timer.expires = jiffies +
state(hdlc)->settings.t391 * HZ;
}
add_timer(&state(hdlc)->timer);
}
static int fr_lmi_recv(struct net_device *dev, struct sk_buff *skb)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
struct pvc_device *pvc;
u8 rxseq, txseq;
int lmi = state(hdlc)->settings.lmi;
int dce = state(hdlc)->settings.dce;
int stat_len = (lmi == LMI_CISCO) ? 6 : 3, reptype, error, no_ram, i;
if (skb->len < (lmi == LMI_ANSI ? LMI_ANSI_LENGTH :
LMI_CCITT_CISCO_LENGTH)) {
netdev_info(dev, "Short LMI frame\n");
return 1;
}
if (skb->data[3] != (lmi == LMI_CISCO ? NLPID_CISCO_LMI :
NLPID_CCITT_ANSI_LMI)) {
netdev_info(dev, "Received non-LMI frame with LMI DLCI\n");
return 1;
}
if (skb->data[4] != LMI_CALLREF) {
netdev_info(dev, "Invalid LMI Call reference (0x%02X)\n",
skb->data[4]);
return 1;
}
if (skb->data[5] != (dce ? LMI_STATUS_ENQUIRY : LMI_STATUS)) {
netdev_info(dev, "Invalid LMI Message type (0x%02X)\n",
skb->data[5]);
return 1;
}
if (lmi == LMI_ANSI) {
if (skb->data[6] != LMI_ANSI_LOCKSHIFT) {
netdev_info(dev, "Not ANSI locking shift in LMI message (0x%02X)\n",
skb->data[6]);
return 1;
}
i = 7;
} else {
i = 6;
}
if (skb->data[i] != (lmi == LMI_CCITT ? LMI_CCITT_REPTYPE :
LMI_ANSI_CISCO_REPTYPE)) {
netdev_info(dev, "Not an LMI Report type IE (0x%02X)\n",
skb->data[i]);
return 1;
}
if (skb->data[++i] != LMI_REPT_LEN) {
netdev_info(dev, "Invalid LMI Report type IE length (%u)\n",
skb->data[i]);
return 1;
}
reptype = skb->data[++i];
if (reptype != LMI_INTEGRITY && reptype != LMI_FULLREP) {
netdev_info(dev, "Unsupported LMI Report type (0x%02X)\n",
reptype);
return 1;
}
if (skb->data[++i] != (lmi == LMI_CCITT ? LMI_CCITT_ALIVE :
LMI_ANSI_CISCO_ALIVE)) {
netdev_info(dev, "Not an LMI Link integrity verification IE (0x%02X)\n",
skb->data[i]);
return 1;
}
if (skb->data[++i] != LMI_INTEG_LEN) {
netdev_info(dev, "Invalid LMI Link integrity verification IE length (%u)\n",
skb->data[i]);
return 1;
}
i++;
state(hdlc)->rxseq = skb->data[i++]; /* TX sequence from peer */
rxseq = skb->data[i++]; /* Should confirm our sequence */
txseq = state(hdlc)->txseq;
if (dce)
state(hdlc)->last_poll = jiffies;
error = 0;
if (!state(hdlc)->reliable)
error = 1;
if (rxseq == 0 || rxseq != txseq) { /* Ask for full report next time */
state(hdlc)->n391cnt = 0;
error = 1;
}
if (dce) {
if (state(hdlc)->fullrep_sent && !error) {
/* Stop sending full report - the last one has been confirmed by DTE */
state(hdlc)->fullrep_sent = 0;
pvc = state(hdlc)->first_pvc;
while (pvc) {
if (pvc->state.new) {
pvc->state.new = 0;
/* Tell DTE that new PVC is now active */
state(hdlc)->dce_changed = 1;
}
pvc = pvc->next;
}
}
if (state(hdlc)->dce_changed) {
reptype = LMI_FULLREP;
state(hdlc)->fullrep_sent = 1;
state(hdlc)->dce_changed = 0;
}
state(hdlc)->request = 1; /* got request */
fr_lmi_send(dev, reptype == LMI_FULLREP ? 1 : 0);
return 0;
}
/* DTE */
state(hdlc)->request = 0; /* got response, no request pending */
if (error)
return 0;
if (reptype != LMI_FULLREP)
return 0;
pvc = state(hdlc)->first_pvc;
while (pvc) {
pvc->state.deleted = 1;
pvc = pvc->next;
}
no_ram = 0;
while (skb->len >= i + 2 + stat_len) {
u16 dlci;
u32 bw;
unsigned int active, new;
if (skb->data[i] != (lmi == LMI_CCITT ? LMI_CCITT_PVCSTAT :
LMI_ANSI_CISCO_PVCSTAT)) {
netdev_info(dev, "Not an LMI PVC status IE (0x%02X)\n",
skb->data[i]);
return 1;
}
if (skb->data[++i] != stat_len) {
netdev_info(dev, "Invalid LMI PVC status IE length (%u)\n",
skb->data[i]);
return 1;
}
i++;
new = !!(skb->data[i + 2] & 0x08);
active = !!(skb->data[i + 2] & 0x02);
if (lmi == LMI_CISCO) {
dlci = (skb->data[i] << 8) | skb->data[i + 1];
bw = (skb->data[i + 3] << 16) |
(skb->data[i + 4] << 8) |
(skb->data[i + 5]);
} else {
dlci = ((skb->data[i] & 0x3F) << 4) |
((skb->data[i + 1] & 0x78) >> 3);
bw = 0;
}
pvc = add_pvc(dev, dlci);
if (!pvc && !no_ram) {
netdev_warn(dev, "Memory squeeze on fr_lmi_recv()\n");
no_ram = 1;
}
if (pvc) {
pvc->state.exist = 1;
pvc->state.deleted = 0;
if (active != pvc->state.active ||
new != pvc->state.new ||
bw != pvc->state.bandwidth ||
!pvc->state.exist) {
pvc->state.new = new;
pvc->state.active = active;
pvc->state.bandwidth = bw;
pvc_carrier(active, pvc);
fr_log_dlci_active(pvc);
}
}
i += stat_len;
}
pvc = state(hdlc)->first_pvc;
while (pvc) {
if (pvc->state.deleted && pvc->state.exist) {
pvc_carrier(0, pvc);
pvc->state.active = pvc->state.new = 0;
pvc->state.exist = 0;
pvc->state.bandwidth = 0;
fr_log_dlci_active(pvc);
}
pvc = pvc->next;
}
/* Next full report after N391 polls */
state(hdlc)->n391cnt = state(hdlc)->settings.n391;
return 0;
}
static int fr_snap_parse(struct sk_buff *skb, struct pvc_device *pvc)
{
/* OUI 00-00-00 indicates an Ethertype follows */
if (skb->data[0] == 0x00 &&
skb->data[1] == 0x00 &&
skb->data[2] == 0x00) {
if (!pvc->main)
return -1;
skb->dev = pvc->main;
skb->protocol = *(__be16 *)(skb->data + 3); /* Ethertype */
skb_pull(skb, 5);
skb_reset_mac_header(skb);
return 0;
/* OUI 00-80-C2 stands for the 802.1 organization */
} else if (skb->data[0] == 0x00 &&
skb->data[1] == 0x80 &&
skb->data[2] == 0xC2) {
/* PID 00-07 stands for Ethernet frames without FCS */
if (skb->data[3] == 0x00 &&
skb->data[4] == 0x07) {
if (!pvc->ether)
return -1;
skb_pull(skb, 5);
if (skb->len < ETH_HLEN)
return -1;
skb->protocol = eth_type_trans(skb, pvc->ether);
return 0;
/* PID unsupported */
} else {
return -1;
}
/* OUI unsupported */
} else {
return -1;
}
}
static int fr_rx(struct sk_buff *skb)
{
struct net_device *frad = skb->dev;
hdlc_device *hdlc = dev_to_hdlc(frad);
struct fr_hdr *fh = (struct fr_hdr *)skb->data;
u8 *data = skb->data;
u16 dlci;
struct pvc_device *pvc;
struct net_device *dev;
if (skb->len < 4 || fh->ea1 || !fh->ea2 || data[2] != FR_UI)
goto rx_error;
dlci = q922_to_dlci(skb->data);
if ((dlci == LMI_CCITT_ANSI_DLCI &&
(state(hdlc)->settings.lmi == LMI_ANSI ||
state(hdlc)->settings.lmi == LMI_CCITT)) ||
(dlci == LMI_CISCO_DLCI &&
state(hdlc)->settings.lmi == LMI_CISCO)) {
if (fr_lmi_recv(frad, skb))
goto rx_error;
dev_kfree_skb_any(skb);
return NET_RX_SUCCESS;
}
pvc = find_pvc(hdlc, dlci);
if (!pvc) {
#ifdef DEBUG_PKT
netdev_info(frad, "No PVC for received frame's DLCI %d\n",
dlci);
#endif
goto rx_drop;
}
if (pvc->state.fecn != fh->fecn) {
#ifdef DEBUG_ECN
printk(KERN_DEBUG "%s: DLCI %d FECN O%s\n", frad->name,
dlci, fh->fecn ? "N" : "FF");
#endif
pvc->state.fecn ^= 1;
}
if (pvc->state.becn != fh->becn) {
#ifdef DEBUG_ECN
printk(KERN_DEBUG "%s: DLCI %d BECN O%s\n", frad->name,
dlci, fh->becn ? "N" : "FF");
#endif
pvc->state.becn ^= 1;
}
skb = skb_share_check(skb, GFP_ATOMIC);
if (!skb) {
frad->stats.rx_dropped++;
return NET_RX_DROP;
}
if (data[3] == NLPID_IP) {
if (!pvc->main)
goto rx_drop;
skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */
skb->dev = pvc->main;
skb->protocol = htons(ETH_P_IP);
skb_reset_mac_header(skb);
} else if (data[3] == NLPID_IPV6) {
if (!pvc->main)
goto rx_drop;
skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */
skb->dev = pvc->main;
skb->protocol = htons(ETH_P_IPV6);
skb_reset_mac_header(skb);
} else if (data[3] == FR_PAD) {
if (skb->len < 5)
goto rx_error;
if (data[4] == NLPID_SNAP) { /* A SNAP header follows */
skb_pull(skb, 5);
if (skb->len < 5) /* Incomplete SNAP header */
goto rx_error;
if (fr_snap_parse(skb, pvc))
goto rx_drop;
} else {
goto rx_drop;
}
} else {
netdev_info(frad, "Unsupported protocol, NLPID=%x length=%i\n",
data[3], skb->len);
goto rx_drop;
}
dev = skb->dev;
dev->stats.rx_packets++; /* PVC traffic */
dev->stats.rx_bytes += skb->len;
if (pvc->state.becn)
dev->stats.rx_compressed++;
netif_rx(skb);
return NET_RX_SUCCESS;
rx_error:
frad->stats.rx_errors++; /* Mark error */
rx_drop:
dev_kfree_skb_any(skb);
return NET_RX_DROP;
}
static void fr_start(struct net_device *dev)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
#ifdef DEBUG_LINK
printk(KERN_DEBUG "fr_start\n");
#endif
if (state(hdlc)->settings.lmi != LMI_NONE) {
state(hdlc)->reliable = 0;
state(hdlc)->dce_changed = 1;
state(hdlc)->request = 0;
state(hdlc)->fullrep_sent = 0;
state(hdlc)->last_errors = 0xFFFFFFFF;
state(hdlc)->n391cnt = 0;
state(hdlc)->txseq = state(hdlc)->rxseq = 0;
state(hdlc)->dev = dev;
timer_setup(&state(hdlc)->timer, fr_timer, 0);
/* First poll after 1 s */
state(hdlc)->timer.expires = jiffies + HZ;
add_timer(&state(hdlc)->timer);
} else {
fr_set_link_state(1, dev);
}
}
static void fr_stop(struct net_device *dev)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
#ifdef DEBUG_LINK
printk(KERN_DEBUG "fr_stop\n");
#endif
if (state(hdlc)->settings.lmi != LMI_NONE)
del_timer_sync(&state(hdlc)->timer);
fr_set_link_state(0, dev);
}
static void fr_close(struct net_device *dev)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
struct pvc_device *pvc = state(hdlc)->first_pvc;
while (pvc) { /* Shutdown all PVCs for this FRAD */
if (pvc->main)
dev_close(pvc->main);
if (pvc->ether)
dev_close(pvc->ether);
pvc = pvc->next;
}
}
static void pvc_setup(struct net_device *dev)
{
dev->type = ARPHRD_DLCI;
dev->flags = IFF_POINTOPOINT;
dev->hard_header_len = 0;
dev->addr_len = 2;
netif_keep_dst(dev);
}
static const struct net_device_ops pvc_ops = {
.ndo_open = pvc_open,
.ndo_stop = pvc_close,
.ndo_start_xmit = pvc_xmit,
.ndo_siocwandev = pvc_ioctl,
};
static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
{
hdlc_device *hdlc = dev_to_hdlc(frad);
struct pvc_device *pvc;
struct net_device *dev;
int used;
pvc = add_pvc(frad, dlci);
if (!pvc) {
netdev_warn(frad, "Memory squeeze on fr_add_pvc()\n");
return -ENOBUFS;
}
if (*get_dev_p(pvc, type))
return -EEXIST;
used = pvc_is_used(pvc);
if (type == ARPHRD_ETHER)
dev = alloc_netdev(0, "pvceth%d", NET_NAME_UNKNOWN,
ether_setup);
else
dev = alloc_netdev(0, "pvc%d", NET_NAME_UNKNOWN, pvc_setup);
if (!dev) {
netdev_warn(frad, "Memory squeeze on fr_pvc()\n");
delete_unused_pvcs(hdlc);
return -ENOBUFS;
}
if (type == ARPHRD_ETHER) {
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
eth_hw_addr_random(dev);
} else {
__be16 addr = htons(dlci);
dev_addr_set(dev, (u8 *)&addr);
dlci_to_q922(dev->broadcast, dlci);
}
dev->netdev_ops = &pvc_ops;
dev->mtu = HDLC_MAX_MTU;
dev->min_mtu = 68;
dev->max_mtu = HDLC_MAX_MTU;
dev->needed_headroom = 10;
dev->priv_flags |= IFF_NO_QUEUE;
dev->ml_priv = pvc;
if (register_netdevice(dev) != 0) {
free_netdev(dev);
delete_unused_pvcs(hdlc);
return -EIO;
}
dev->needs_free_netdev = true;
*get_dev_p(pvc, type) = dev;
if (!used) {
state(hdlc)->dce_changed = 1;
state(hdlc)->dce_pvc_count++;
}
return 0;
}
static int fr_del_pvc(hdlc_device *hdlc, unsigned int dlci, int type)
{
struct pvc_device *pvc;
struct net_device *dev;
pvc = find_pvc(hdlc, dlci);
if (!pvc)
return -ENOENT;
dev = *get_dev_p(pvc, type);
if (!dev)
return -ENOENT;
if (dev->flags & IFF_UP)
return -EBUSY; /* PVC in use */
unregister_netdevice(dev); /* the destructor will free_netdev(dev) */
*get_dev_p(pvc, type) = NULL;
if (!pvc_is_used(pvc)) {
state(hdlc)->dce_pvc_count--;
state(hdlc)->dce_changed = 1;
}
delete_unused_pvcs(hdlc);
return 0;
}
static void fr_destroy(struct net_device *frad)
{
hdlc_device *hdlc = dev_to_hdlc(frad);
struct pvc_device *pvc = state(hdlc)->first_pvc;
state(hdlc)->first_pvc = NULL; /* All PVCs destroyed */
state(hdlc)->dce_pvc_count = 0;
state(hdlc)->dce_changed = 1;
while (pvc) {
struct pvc_device *next = pvc->next;
/* destructors will free_netdev() main and ether */
if (pvc->main)
unregister_netdevice(pvc->main);
if (pvc->ether)
unregister_netdevice(pvc->ether);
kfree(pvc);
pvc = next;
}
}
static struct hdlc_proto proto = {
.close = fr_close,
.start = fr_start,
.stop = fr_stop,
.detach = fr_destroy,
.ioctl = fr_ioctl,
.netif_rx = fr_rx,
.module = THIS_MODULE,
};
static int fr_ioctl(struct net_device *dev, struct if_settings *ifs)
{
fr_proto __user *fr_s = ifs->ifs_ifsu.fr;
const size_t size = sizeof(fr_proto);
fr_proto new_settings;
hdlc_device *hdlc = dev_to_hdlc(dev);
fr_proto_pvc pvc;
int result;
switch (ifs->type) {
case IF_GET_PROTO:
if (dev_to_hdlc(dev)->proto != &proto) /* Different proto */
return -EINVAL;
ifs->type = IF_PROTO_FR;
if (ifs->size < size) {
ifs->size = size; /* data size wanted */
return -ENOBUFS;
}
if (copy_to_user(fr_s, &state(hdlc)->settings, size))
return -EFAULT;
return 0;
case IF_PROTO_FR:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (dev->flags & IFF_UP)
return -EBUSY;
if (copy_from_user(&new_settings, fr_s, size))
return -EFAULT;
if (new_settings.lmi == LMI_DEFAULT)
new_settings.lmi = LMI_ANSI;
if ((new_settings.lmi != LMI_NONE &&
new_settings.lmi != LMI_ANSI &&
new_settings.lmi != LMI_CCITT &&
new_settings.lmi != LMI_CISCO) ||
new_settings.t391 < 1 ||
new_settings.t392 < 2 ||
new_settings.n391 < 1 ||
new_settings.n392 < 1 ||
new_settings.n393 < new_settings.n392 ||
new_settings.n393 > 32 ||
(new_settings.dce != 0 &&
new_settings.dce != 1))
return -EINVAL;
result = hdlc->attach(dev, ENCODING_NRZ,
PARITY_CRC16_PR1_CCITT);
if (result)
return result;
if (dev_to_hdlc(dev)->proto != &proto) { /* Different proto */
result = attach_hdlc_protocol(dev, &proto,
sizeof(struct frad_state));
if (result)
return result;
state(hdlc)->first_pvc = NULL;
state(hdlc)->dce_pvc_count = 0;
}
memcpy(&state(hdlc)->settings, &new_settings, size);
dev->type = ARPHRD_FRAD;
call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
return 0;
case IF_PROTO_FR_ADD_PVC:
case IF_PROTO_FR_DEL_PVC:
case IF_PROTO_FR_ADD_ETH_PVC:
case IF_PROTO_FR_DEL_ETH_PVC:
if (dev_to_hdlc(dev)->proto != &proto) /* Different proto */
return -EINVAL;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (copy_from_user(&pvc, ifs->ifs_ifsu.fr_pvc,
sizeof(fr_proto_pvc)))
return -EFAULT;
if (pvc.dlci <= 0 || pvc.dlci >= 1024)
return -EINVAL; /* Only 10 bits, DLCI 0 reserved */
if (ifs->type == IF_PROTO_FR_ADD_ETH_PVC ||
ifs->type == IF_PROTO_FR_DEL_ETH_PVC)
result = ARPHRD_ETHER; /* bridged Ethernet device */
else
result = ARPHRD_DLCI;
if (ifs->type == IF_PROTO_FR_ADD_PVC ||
ifs->type == IF_PROTO_FR_ADD_ETH_PVC)
return fr_add_pvc(dev, pvc.dlci, result);
else
return fr_del_pvc(hdlc, pvc.dlci, result);
}
return -EINVAL;
}
static int __init hdlc_fr_init(void)
{
register_hdlc_protocol(&proto);
return 0;
}
static void __exit hdlc_fr_exit(void)
{
unregister_hdlc_protocol(&proto);
}
module_init(hdlc_fr_init);
module_exit(hdlc_fr_exit);
MODULE_AUTHOR("Krzysztof Halasa <[email protected]>");
MODULE_DESCRIPTION("Frame-Relay protocol support for generic HDLC");
MODULE_LICENSE("GPL v2");
|
linux-master
|
drivers/net/wan/hdlc_fr.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* wanXL serial card driver for Linux
* host part
*
* Copyright (C) 2003 Krzysztof Halasa <[email protected]>
*
* Status:
* - Only DTE (external clock) support with NRZ and NRZI encodings
* - wanXL100 will require minor driver modifications, no access to hw
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/netdevice.h>
#include <linux/hdlc.h>
#include <linux/pci.h>
#include <linux/dma-mapping.h>
#include <linux/delay.h>
#include <asm/io.h>
#include "wanxl.h"
static const char *version = "wanXL serial card driver version: 0.48";
#define PLX_CTL_RESET 0x40000000 /* adapter reset */
#undef DEBUG_PKT
#undef DEBUG_PCI
/* MAILBOX #1 - PUTS COMMANDS */
#define MBX1_CMD_ABORTJ 0x85000000 /* Abort and Jump */
#ifdef __LITTLE_ENDIAN
#define MBX1_CMD_BSWAP 0x8C000001 /* little-endian Byte Swap Mode */
#else
#define MBX1_CMD_BSWAP 0x8C000000 /* big-endian Byte Swap Mode */
#endif
/* MAILBOX #2 - DRAM SIZE */
#define MBX2_MEMSZ_MASK 0xFFFF0000 /* PUTS Memory Size Register mask */
struct port {
struct net_device *dev;
struct card *card;
spinlock_t lock; /* for wanxl_xmit */
int node; /* physical port #0 - 3 */
unsigned int clock_type;
int tx_in, tx_out;
struct sk_buff *tx_skbs[TX_BUFFERS];
};
struct card_status {
desc_t rx_descs[RX_QUEUE_LENGTH];
port_status_t port_status[4];
};
struct card {
int n_ports; /* 1, 2 or 4 ports */
u8 irq;
u8 __iomem *plx; /* PLX PCI9060 virtual base address */
struct pci_dev *pdev; /* for pci_name(pdev) */
int rx_in;
struct sk_buff *rx_skbs[RX_QUEUE_LENGTH];
struct card_status *status; /* shared between host and card */
dma_addr_t status_address;
struct port ports[]; /* 1 - 4 port structures follow */
};
static inline struct port *dev_to_port(struct net_device *dev)
{
return (struct port *)dev_to_hdlc(dev)->priv;
}
static inline port_status_t *get_status(struct port *port)
{
return &port->card->status->port_status[port->node];
}
#ifdef DEBUG_PCI
static inline dma_addr_t pci_map_single_debug(struct pci_dev *pdev, void *ptr,
size_t size, int direction)
{
dma_addr_t addr = dma_map_single(&pdev->dev, ptr, size, direction);
if (addr + size > 0x100000000LL)
pr_crit("%s: pci_map_single() returned memory at 0x%llx!\n",
pci_name(pdev), (unsigned long long)addr);
return addr;
}
#undef pci_map_single
#define pci_map_single pci_map_single_debug
#endif
/* Cable and/or personality module change interrupt service */
static inline void wanxl_cable_intr(struct port *port)
{
u32 value = get_status(port)->cable;
int valid = 1;
const char *cable, *pm, *dte = "", *dsr = "", *dcd = "";
switch (value & 0x7) {
case STATUS_CABLE_V35:
cable = "V.35";
break;
case STATUS_CABLE_X21:
cable = "X.21";
break;
case STATUS_CABLE_V24:
cable = "V.24";
break;
case STATUS_CABLE_EIA530:
cable = "EIA530";
break;
case STATUS_CABLE_NONE:
cable = "no";
break;
default:
cable = "invalid";
}
switch ((value >> STATUS_CABLE_PM_SHIFT) & 0x7) {
case STATUS_CABLE_V35:
pm = "V.35";
break;
case STATUS_CABLE_X21:
pm = "X.21";
break;
case STATUS_CABLE_V24:
pm = "V.24";
break;
case STATUS_CABLE_EIA530:
pm = "EIA530";
break;
case STATUS_CABLE_NONE:
pm = "no personality";
valid = 0;
break;
default:
pm = "invalid personality";
valid = 0;
}
if (valid) {
if ((value & 7) == ((value >> STATUS_CABLE_PM_SHIFT) & 7)) {
dsr = (value & STATUS_CABLE_DSR) ? ", DSR ON" :
", DSR off";
dcd = (value & STATUS_CABLE_DCD) ? ", carrier ON" :
", carrier off";
}
dte = (value & STATUS_CABLE_DCE) ? " DCE" : " DTE";
}
netdev_info(port->dev, "%s%s module, %s cable%s%s\n",
pm, dte, cable, dsr, dcd);
if (value & STATUS_CABLE_DCD)
netif_carrier_on(port->dev);
else
netif_carrier_off(port->dev);
}
/* Transmit complete interrupt service */
static inline void wanxl_tx_intr(struct port *port)
{
struct net_device *dev = port->dev;
while (1) {
desc_t *desc = &get_status(port)->tx_descs[port->tx_in];
struct sk_buff *skb = port->tx_skbs[port->tx_in];
switch (desc->stat) {
case PACKET_FULL:
case PACKET_EMPTY:
netif_wake_queue(dev);
return;
case PACKET_UNDERRUN:
dev->stats.tx_errors++;
dev->stats.tx_fifo_errors++;
break;
default:
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
}
desc->stat = PACKET_EMPTY; /* Free descriptor */
dma_unmap_single(&port->card->pdev->dev, desc->address,
skb->len, DMA_TO_DEVICE);
dev_consume_skb_irq(skb);
port->tx_in = (port->tx_in + 1) % TX_BUFFERS;
}
}
/* Receive complete interrupt service */
static inline void wanxl_rx_intr(struct card *card)
{
desc_t *desc;
while (desc = &card->status->rx_descs[card->rx_in],
desc->stat != PACKET_EMPTY) {
if ((desc->stat & PACKET_PORT_MASK) > card->n_ports) {
pr_crit("%s: received packet for nonexistent port\n",
pci_name(card->pdev));
} else {
struct sk_buff *skb = card->rx_skbs[card->rx_in];
struct port *port = &card->ports[desc->stat &
PACKET_PORT_MASK];
struct net_device *dev = port->dev;
if (!skb) {
dev->stats.rx_dropped++;
} else {
dma_unmap_single(&card->pdev->dev,
desc->address, BUFFER_LENGTH,
DMA_FROM_DEVICE);
skb_put(skb, desc->length);
#ifdef DEBUG_PKT
printk(KERN_DEBUG "%s RX(%i):", dev->name,
skb->len);
debug_frame(skb);
#endif
dev->stats.rx_packets++;
dev->stats.rx_bytes += skb->len;
skb->protocol = hdlc_type_trans(skb, dev);
netif_rx(skb);
skb = NULL;
}
if (!skb) {
skb = dev_alloc_skb(BUFFER_LENGTH);
desc->address = skb ?
dma_map_single(&card->pdev->dev,
skb->data,
BUFFER_LENGTH,
DMA_FROM_DEVICE) : 0;
card->rx_skbs[card->rx_in] = skb;
}
}
desc->stat = PACKET_EMPTY; /* Free descriptor */
card->rx_in = (card->rx_in + 1) % RX_QUEUE_LENGTH;
}
}
static irqreturn_t wanxl_intr(int irq, void *dev_id)
{
struct card *card = dev_id;
int i;
u32 stat;
int handled = 0;
while ((stat = readl(card->plx + PLX_DOORBELL_FROM_CARD)) != 0) {
handled = 1;
writel(stat, card->plx + PLX_DOORBELL_FROM_CARD);
for (i = 0; i < card->n_ports; i++) {
if (stat & (1 << (DOORBELL_FROM_CARD_TX_0 + i)))
wanxl_tx_intr(&card->ports[i]);
if (stat & (1 << (DOORBELL_FROM_CARD_CABLE_0 + i)))
wanxl_cable_intr(&card->ports[i]);
}
if (stat & (1 << DOORBELL_FROM_CARD_RX))
wanxl_rx_intr(card);
}
return IRQ_RETVAL(handled);
}
static netdev_tx_t wanxl_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct port *port = dev_to_port(dev);
desc_t *desc;
spin_lock(&port->lock);
desc = &get_status(port)->tx_descs[port->tx_out];
if (desc->stat != PACKET_EMPTY) {
/* should never happen - previous xmit should stop queue */
#ifdef DEBUG_PKT
printk(KERN_DEBUG "%s: transmitter buffer full\n", dev->name);
#endif
netif_stop_queue(dev);
spin_unlock(&port->lock);
return NETDEV_TX_BUSY; /* request packet to be queued */
}
#ifdef DEBUG_PKT
printk(KERN_DEBUG "%s TX(%i):", dev->name, skb->len);
debug_frame(skb);
#endif
port->tx_skbs[port->tx_out] = skb;
desc->address = dma_map_single(&port->card->pdev->dev, skb->data,
skb->len, DMA_TO_DEVICE);
desc->length = skb->len;
desc->stat = PACKET_FULL;
writel(1 << (DOORBELL_TO_CARD_TX_0 + port->node),
port->card->plx + PLX_DOORBELL_TO_CARD);
port->tx_out = (port->tx_out + 1) % TX_BUFFERS;
if (get_status(port)->tx_descs[port->tx_out].stat != PACKET_EMPTY) {
netif_stop_queue(dev);
#ifdef DEBUG_PKT
printk(KERN_DEBUG "%s: transmitter buffer full\n", dev->name);
#endif
}
spin_unlock(&port->lock);
return NETDEV_TX_OK;
}
static int wanxl_attach(struct net_device *dev, unsigned short encoding,
unsigned short parity)
{
struct port *port = dev_to_port(dev);
if (encoding != ENCODING_NRZ &&
encoding != ENCODING_NRZI)
return -EINVAL;
if (parity != PARITY_NONE &&
parity != PARITY_CRC32_PR1_CCITT &&
parity != PARITY_CRC16_PR1_CCITT &&
parity != PARITY_CRC32_PR0_CCITT &&
parity != PARITY_CRC16_PR0_CCITT)
return -EINVAL;
get_status(port)->encoding = encoding;
get_status(port)->parity = parity;
return 0;
}
static int wanxl_ioctl(struct net_device *dev, struct if_settings *ifs)
{
const size_t size = sizeof(sync_serial_settings);
sync_serial_settings line;
struct port *port = dev_to_port(dev);
switch (ifs->type) {
case IF_GET_IFACE:
ifs->type = IF_IFACE_SYNC_SERIAL;
if (ifs->size < size) {
ifs->size = size; /* data size wanted */
return -ENOBUFS;
}
memset(&line, 0, sizeof(line));
line.clock_type = get_status(port)->clocking;
line.clock_rate = 0;
line.loopback = 0;
if (copy_to_user(ifs->ifs_ifsu.sync, &line, size))
return -EFAULT;
return 0;
case IF_IFACE_SYNC_SERIAL:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (dev->flags & IFF_UP)
return -EBUSY;
if (copy_from_user(&line, ifs->ifs_ifsu.sync,
size))
return -EFAULT;
if (line.clock_type != CLOCK_EXT &&
line.clock_type != CLOCK_TXFROMRX)
return -EINVAL; /* No such clock setting */
if (line.loopback != 0)
return -EINVAL;
get_status(port)->clocking = line.clock_type;
return 0;
default:
return hdlc_ioctl(dev, ifs);
}
}
static int wanxl_open(struct net_device *dev)
{
struct port *port = dev_to_port(dev);
u8 __iomem *dbr = port->card->plx + PLX_DOORBELL_TO_CARD;
unsigned long timeout;
int i;
if (get_status(port)->open) {
netdev_err(dev, "port already open\n");
return -EIO;
}
i = hdlc_open(dev);
if (i)
return i;
port->tx_in = port->tx_out = 0;
for (i = 0; i < TX_BUFFERS; i++)
get_status(port)->tx_descs[i].stat = PACKET_EMPTY;
/* signal the card */
writel(1 << (DOORBELL_TO_CARD_OPEN_0 + port->node), dbr);
timeout = jiffies + HZ;
do {
if (get_status(port)->open) {
netif_start_queue(dev);
return 0;
}
} while (time_after(timeout, jiffies));
netdev_err(dev, "unable to open port\n");
/* ask the card to close the port, should it be still alive */
writel(1 << (DOORBELL_TO_CARD_CLOSE_0 + port->node), dbr);
return -EFAULT;
}
static int wanxl_close(struct net_device *dev)
{
struct port *port = dev_to_port(dev);
unsigned long timeout;
int i;
hdlc_close(dev);
/* signal the card */
writel(1 << (DOORBELL_TO_CARD_CLOSE_0 + port->node),
port->card->plx + PLX_DOORBELL_TO_CARD);
timeout = jiffies + HZ;
do {
if (!get_status(port)->open)
break;
} while (time_after(timeout, jiffies));
if (get_status(port)->open)
netdev_err(dev, "unable to close port\n");
netif_stop_queue(dev);
for (i = 0; i < TX_BUFFERS; i++) {
desc_t *desc = &get_status(port)->tx_descs[i];
if (desc->stat != PACKET_EMPTY) {
desc->stat = PACKET_EMPTY;
dma_unmap_single(&port->card->pdev->dev,
desc->address, port->tx_skbs[i]->len,
DMA_TO_DEVICE);
dev_kfree_skb(port->tx_skbs[i]);
}
}
return 0;
}
static struct net_device_stats *wanxl_get_stats(struct net_device *dev)
{
struct port *port = dev_to_port(dev);
dev->stats.rx_over_errors = get_status(port)->rx_overruns;
dev->stats.rx_frame_errors = get_status(port)->rx_frame_errors;
dev->stats.rx_errors = dev->stats.rx_over_errors +
dev->stats.rx_frame_errors;
return &dev->stats;
}
static int wanxl_puts_command(struct card *card, u32 cmd)
{
unsigned long timeout = jiffies + 5 * HZ;
writel(cmd, card->plx + PLX_MAILBOX_1);
do {
if (readl(card->plx + PLX_MAILBOX_1) == 0)
return 0;
schedule();
} while (time_after(timeout, jiffies));
return -1;
}
static void wanxl_reset(struct card *card)
{
u32 old_value = readl(card->plx + PLX_CONTROL) & ~PLX_CTL_RESET;
writel(0x80, card->plx + PLX_MAILBOX_0);
writel(old_value | PLX_CTL_RESET, card->plx + PLX_CONTROL);
readl(card->plx + PLX_CONTROL); /* wait for posted write */
udelay(1);
writel(old_value, card->plx + PLX_CONTROL);
readl(card->plx + PLX_CONTROL); /* wait for posted write */
}
static void wanxl_pci_remove_one(struct pci_dev *pdev)
{
struct card *card = pci_get_drvdata(pdev);
int i;
for (i = 0; i < card->n_ports; i++) {
unregister_hdlc_device(card->ports[i].dev);
free_netdev(card->ports[i].dev);
}
/* unregister and free all host resources */
if (card->irq)
free_irq(card->irq, card);
wanxl_reset(card);
for (i = 0; i < RX_QUEUE_LENGTH; i++)
if (card->rx_skbs[i]) {
dma_unmap_single(&card->pdev->dev,
card->status->rx_descs[i].address,
BUFFER_LENGTH, DMA_FROM_DEVICE);
dev_kfree_skb(card->rx_skbs[i]);
}
if (card->plx)
iounmap(card->plx);
if (card->status)
dma_free_coherent(&pdev->dev, sizeof(struct card_status),
card->status, card->status_address);
pci_release_regions(pdev);
pci_disable_device(pdev);
kfree(card);
}
#include "wanxlfw.inc"
static const struct net_device_ops wanxl_ops = {
.ndo_open = wanxl_open,
.ndo_stop = wanxl_close,
.ndo_start_xmit = hdlc_start_xmit,
.ndo_siocwandev = wanxl_ioctl,
.ndo_get_stats = wanxl_get_stats,
};
static int wanxl_pci_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
struct card *card;
u32 ramsize, stat;
unsigned long timeout;
u32 plx_phy; /* PLX PCI base address */
u32 mem_phy; /* memory PCI base addr */
u8 __iomem *mem; /* memory virtual base addr */
int i, ports;
#ifndef MODULE
pr_info_once("%s\n", version);
#endif
i = pci_enable_device(pdev);
if (i)
return i;
/* QUICC can only access first 256 MB of host RAM directly,
* but PLX9060 DMA does 32-bits for actual packet data transfers
*/
/* FIXME when PCI/DMA subsystems are fixed.
* We set both dma_mask and consistent_dma_mask to 28 bits
* and pray pci_alloc_consistent() will use this info. It should
* work on most platforms
*/
if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(28)) ||
dma_set_mask(&pdev->dev, DMA_BIT_MASK(28))) {
pr_err("No usable DMA configuration\n");
pci_disable_device(pdev);
return -EIO;
}
i = pci_request_regions(pdev, "wanXL");
if (i) {
pci_disable_device(pdev);
return i;
}
switch (pdev->device) {
case PCI_DEVICE_ID_SBE_WANXL100:
ports = 1;
break;
case PCI_DEVICE_ID_SBE_WANXL200:
ports = 2;
break;
default:
ports = 4;
}
card = kzalloc(struct_size(card, ports, ports), GFP_KERNEL);
if (!card) {
pci_release_regions(pdev);
pci_disable_device(pdev);
return -ENOBUFS;
}
pci_set_drvdata(pdev, card);
card->pdev = pdev;
card->status = dma_alloc_coherent(&pdev->dev,
sizeof(struct card_status),
&card->status_address, GFP_KERNEL);
if (!card->status) {
wanxl_pci_remove_one(pdev);
return -ENOBUFS;
}
#ifdef DEBUG_PCI
printk(KERN_DEBUG "wanXL %s: pci_alloc_consistent() returned memory"
" at 0x%LX\n", pci_name(pdev),
(unsigned long long)card->status_address);
#endif
/* FIXME when PCI/DMA subsystems are fixed.
* We set both dma_mask and consistent_dma_mask back to 32 bits
* to indicate the card can do 32-bit DMA addressing
*/
if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)) ||
dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
pr_err("No usable DMA configuration\n");
wanxl_pci_remove_one(pdev);
return -EIO;
}
/* set up PLX mapping */
plx_phy = pci_resource_start(pdev, 0);
card->plx = ioremap(plx_phy, 0x70);
if (!card->plx) {
pr_err("ioremap() failed\n");
wanxl_pci_remove_one(pdev);
return -EFAULT;
}
#if RESET_WHILE_LOADING
wanxl_reset(card);
#endif
timeout = jiffies + 20 * HZ;
while ((stat = readl(card->plx + PLX_MAILBOX_0)) != 0) {
if (time_before(timeout, jiffies)) {
pr_warn("%s: timeout waiting for PUTS to complete\n",
pci_name(pdev));
wanxl_pci_remove_one(pdev);
return -ENODEV;
}
switch (stat & 0xC0) {
case 0x00: /* hmm - PUTS completed with non-zero code? */
case 0x80: /* PUTS still testing the hardware */
break;
default:
pr_warn("%s: PUTS test 0x%X failed\n",
pci_name(pdev), stat & 0x30);
wanxl_pci_remove_one(pdev);
return -ENODEV;
}
schedule();
}
/* get on-board memory size (PUTS detects no more than 4 MB) */
ramsize = readl(card->plx + PLX_MAILBOX_2) & MBX2_MEMSZ_MASK;
/* set up on-board RAM mapping */
mem_phy = pci_resource_start(pdev, 2);
/* sanity check the board's reported memory size */
if (ramsize < BUFFERS_ADDR +
(TX_BUFFERS + RX_BUFFERS) * BUFFER_LENGTH * ports) {
pr_warn("%s: no enough on-board RAM (%u bytes detected, %u bytes required)\n",
pci_name(pdev), ramsize,
BUFFERS_ADDR +
(TX_BUFFERS + RX_BUFFERS) * BUFFER_LENGTH * ports);
wanxl_pci_remove_one(pdev);
return -ENODEV;
}
if (wanxl_puts_command(card, MBX1_CMD_BSWAP)) {
pr_warn("%s: unable to Set Byte Swap Mode\n", pci_name(pdev));
wanxl_pci_remove_one(pdev);
return -ENODEV;
}
for (i = 0; i < RX_QUEUE_LENGTH; i++) {
struct sk_buff *skb = dev_alloc_skb(BUFFER_LENGTH);
card->rx_skbs[i] = skb;
if (skb)
card->status->rx_descs[i].address =
dma_map_single(&card->pdev->dev, skb->data,
BUFFER_LENGTH, DMA_FROM_DEVICE);
}
mem = ioremap(mem_phy, PDM_OFFSET + sizeof(firmware));
if (!mem) {
pr_err("ioremap() failed\n");
wanxl_pci_remove_one(pdev);
return -EFAULT;
}
for (i = 0; i < sizeof(firmware); i += 4)
writel(ntohl(*(__be32 *)(firmware + i)), mem + PDM_OFFSET + i);
for (i = 0; i < ports; i++)
writel(card->status_address +
(void *)&card->status->port_status[i] -
(void *)card->status, mem + PDM_OFFSET + 4 + i * 4);
writel(card->status_address, mem + PDM_OFFSET + 20);
writel(PDM_OFFSET, mem);
iounmap(mem);
writel(0, card->plx + PLX_MAILBOX_5);
if (wanxl_puts_command(card, MBX1_CMD_ABORTJ)) {
pr_warn("%s: unable to Abort and Jump\n", pci_name(pdev));
wanxl_pci_remove_one(pdev);
return -ENODEV;
}
timeout = jiffies + 5 * HZ;
do {
stat = readl(card->plx + PLX_MAILBOX_5);
if (stat)
break;
schedule();
} while (time_after(timeout, jiffies));
if (!stat) {
pr_warn("%s: timeout while initializing card firmware\n",
pci_name(pdev));
wanxl_pci_remove_one(pdev);
return -ENODEV;
}
#if DETECT_RAM
ramsize = stat;
#endif
pr_info("%s: at 0x%X, %u KB of RAM at 0x%X, irq %u\n",
pci_name(pdev), plx_phy, ramsize / 1024, mem_phy, pdev->irq);
/* Allocate IRQ */
if (request_irq(pdev->irq, wanxl_intr, IRQF_SHARED, "wanXL", card)) {
pr_warn("%s: could not allocate IRQ%i\n",
pci_name(pdev), pdev->irq);
wanxl_pci_remove_one(pdev);
return -EBUSY;
}
card->irq = pdev->irq;
for (i = 0; i < ports; i++) {
hdlc_device *hdlc;
struct port *port = &card->ports[i];
struct net_device *dev = alloc_hdlcdev(port);
if (!dev) {
pr_err("%s: unable to allocate memory\n",
pci_name(pdev));
wanxl_pci_remove_one(pdev);
return -ENOMEM;
}
port->dev = dev;
hdlc = dev_to_hdlc(dev);
spin_lock_init(&port->lock);
dev->tx_queue_len = 50;
dev->netdev_ops = &wanxl_ops;
hdlc->attach = wanxl_attach;
hdlc->xmit = wanxl_xmit;
port->card = card;
port->node = i;
get_status(port)->clocking = CLOCK_EXT;
if (register_hdlc_device(dev)) {
pr_err("%s: unable to register hdlc device\n",
pci_name(pdev));
free_netdev(dev);
wanxl_pci_remove_one(pdev);
return -ENOBUFS;
}
card->n_ports++;
}
pr_info("%s: port", pci_name(pdev));
for (i = 0; i < ports; i++)
pr_cont("%s #%i: %s",
i ? "," : "", i, card->ports[i].dev->name);
pr_cont("\n");
for (i = 0; i < ports; i++)
wanxl_cable_intr(&card->ports[i]); /* get carrier status etc.*/
return 0;
}
static const struct pci_device_id wanxl_pci_tbl[] = {
{ PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_SBE_WANXL100, PCI_ANY_ID,
PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_SBE_WANXL200, PCI_ANY_ID,
PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_SBE, PCI_DEVICE_ID_SBE_WANXL400, PCI_ANY_ID,
PCI_ANY_ID, 0, 0, 0 },
{ 0, }
};
static struct pci_driver wanxl_pci_driver = {
.name = "wanXL",
.id_table = wanxl_pci_tbl,
.probe = wanxl_pci_init_one,
.remove = wanxl_pci_remove_one,
};
static int __init wanxl_init_module(void)
{
#ifdef MODULE
pr_info("%s\n", version);
#endif
return pci_register_driver(&wanxl_pci_driver);
}
static void __exit wanxl_cleanup_module(void)
{
pci_unregister_driver(&wanxl_pci_driver);
}
MODULE_AUTHOR("Krzysztof Halasa <[email protected]>");
MODULE_DESCRIPTION("SBE Inc. wanXL serial port driver");
MODULE_LICENSE("GPL v2");
MODULE_DEVICE_TABLE(pci, wanxl_pci_tbl);
module_init(wanxl_init_module);
module_exit(wanxl_cleanup_module);
|
linux-master
|
drivers/net/wan/wanxl.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/*
* "LAPB via ethernet" driver release 001
*
* This code REQUIRES 2.1.15 or higher/ NET3.038
*
* This is a "pseudo" network driver to allow LAPB over Ethernet.
*
* This driver can use any ethernet destination address, and can be
* limited to accept frames from one dedicated ethernet card only.
*
* History
* LAPBETH 001 Jonathan Naylor Cloned from bpqether.c
* 2000-10-29 Henner Eisen lapb_data_indication() return status.
* 2000-11-14 Henner Eisen dev_hold/put, NETDEV_GOING_DOWN support
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/in.h>
#include <linux/slab.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/net.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <linux/uaccess.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/notifier.h>
#include <linux/stat.h>
#include <linux/module.h>
#include <linux/lapb.h>
#include <linux/init.h>
#include <net/x25device.h>
static const u8 bcast_addr[6] = { 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF };
/* If this number is made larger, check that the temporary string buffer
* in lapbeth_new_device is large enough to store the probe device name.
*/
#define MAXLAPBDEV 100
struct lapbethdev {
struct list_head node;
struct net_device *ethdev; /* link to ethernet device */
struct net_device *axdev; /* lapbeth device (lapb#) */
bool up;
spinlock_t up_lock; /* Protects "up" */
struct sk_buff_head rx_queue;
struct napi_struct napi;
};
static LIST_HEAD(lapbeth_devices);
static void lapbeth_connected(struct net_device *dev, int reason);
static void lapbeth_disconnected(struct net_device *dev, int reason);
/* ------------------------------------------------------------------------ */
/* Get the LAPB device for the ethernet device
*/
static struct lapbethdev *lapbeth_get_x25_dev(struct net_device *dev)
{
struct lapbethdev *lapbeth;
list_for_each_entry_rcu(lapbeth, &lapbeth_devices, node, lockdep_rtnl_is_held()) {
if (lapbeth->ethdev == dev)
return lapbeth;
}
return NULL;
}
static __inline__ int dev_is_ethdev(struct net_device *dev)
{
return dev->type == ARPHRD_ETHER && strncmp(dev->name, "dummy", 5);
}
/* ------------------------------------------------------------------------ */
static int lapbeth_napi_poll(struct napi_struct *napi, int budget)
{
struct lapbethdev *lapbeth = container_of(napi, struct lapbethdev,
napi);
struct sk_buff *skb;
int processed = 0;
for (; processed < budget; ++processed) {
skb = skb_dequeue(&lapbeth->rx_queue);
if (!skb)
break;
netif_receive_skb_core(skb);
}
if (processed < budget)
napi_complete(napi);
return processed;
}
/* Receive a LAPB frame via an ethernet interface.
*/
static int lapbeth_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *ptype, struct net_device *orig_dev)
{
int len, err;
struct lapbethdev *lapbeth;
if (dev_net(dev) != &init_net)
goto drop;
skb = skb_share_check(skb, GFP_ATOMIC);
if (!skb)
return NET_RX_DROP;
if (!pskb_may_pull(skb, 2))
goto drop;
rcu_read_lock();
lapbeth = lapbeth_get_x25_dev(dev);
if (!lapbeth)
goto drop_unlock_rcu;
spin_lock_bh(&lapbeth->up_lock);
if (!lapbeth->up)
goto drop_unlock;
len = skb->data[0] + skb->data[1] * 256;
dev->stats.rx_packets++;
dev->stats.rx_bytes += len;
skb_pull(skb, 2); /* Remove the length bytes */
skb_trim(skb, len); /* Set the length of the data */
err = lapb_data_received(lapbeth->axdev, skb);
if (err != LAPB_OK) {
printk(KERN_DEBUG "lapbether: lapb_data_received err - %d\n", err);
goto drop_unlock;
}
out:
spin_unlock_bh(&lapbeth->up_lock);
rcu_read_unlock();
return 0;
drop_unlock:
kfree_skb(skb);
goto out;
drop_unlock_rcu:
rcu_read_unlock();
drop:
kfree_skb(skb);
return 0;
}
static int lapbeth_data_indication(struct net_device *dev, struct sk_buff *skb)
{
struct lapbethdev *lapbeth = netdev_priv(dev);
unsigned char *ptr;
if (skb_cow(skb, 1)) {
kfree_skb(skb);
return NET_RX_DROP;
}
skb_push(skb, 1);
ptr = skb->data;
*ptr = X25_IFACE_DATA;
skb->protocol = x25_type_trans(skb, dev);
skb_queue_tail(&lapbeth->rx_queue, skb);
napi_schedule(&lapbeth->napi);
return NET_RX_SUCCESS;
}
/* Send a LAPB frame via an ethernet interface
*/
static netdev_tx_t lapbeth_xmit(struct sk_buff *skb,
struct net_device *dev)
{
struct lapbethdev *lapbeth = netdev_priv(dev);
int err;
spin_lock_bh(&lapbeth->up_lock);
if (!lapbeth->up)
goto drop;
/* There should be a pseudo header of 1 byte added by upper layers.
* Check to make sure it is there before reading it.
*/
if (skb->len < 1)
goto drop;
switch (skb->data[0]) {
case X25_IFACE_DATA:
break;
case X25_IFACE_CONNECT:
err = lapb_connect_request(dev);
if (err == LAPB_CONNECTED)
lapbeth_connected(dev, LAPB_OK);
else if (err != LAPB_OK)
pr_err("lapb_connect_request error: %d\n", err);
goto drop;
case X25_IFACE_DISCONNECT:
err = lapb_disconnect_request(dev);
if (err == LAPB_NOTCONNECTED)
lapbeth_disconnected(dev, LAPB_OK);
else if (err != LAPB_OK)
pr_err("lapb_disconnect_request err: %d\n", err);
fallthrough;
default:
goto drop;
}
skb_pull(skb, 1);
err = lapb_data_request(dev, skb);
if (err != LAPB_OK) {
pr_err("lapb_data_request error - %d\n", err);
goto drop;
}
out:
spin_unlock_bh(&lapbeth->up_lock);
return NETDEV_TX_OK;
drop:
kfree_skb(skb);
goto out;
}
static void lapbeth_data_transmit(struct net_device *ndev, struct sk_buff *skb)
{
struct lapbethdev *lapbeth = netdev_priv(ndev);
unsigned char *ptr;
struct net_device *dev;
int size = skb->len;
ptr = skb_push(skb, 2);
*ptr++ = size % 256;
*ptr++ = size / 256;
ndev->stats.tx_packets++;
ndev->stats.tx_bytes += size;
skb->dev = dev = lapbeth->ethdev;
skb->protocol = htons(ETH_P_DEC);
skb_reset_network_header(skb);
dev_hard_header(skb, dev, ETH_P_DEC, bcast_addr, NULL, 0);
dev_queue_xmit(skb);
}
static void lapbeth_connected(struct net_device *dev, int reason)
{
struct lapbethdev *lapbeth = netdev_priv(dev);
unsigned char *ptr;
struct sk_buff *skb = __dev_alloc_skb(1, GFP_ATOMIC | __GFP_NOMEMALLOC);
if (!skb)
return;
ptr = skb_put(skb, 1);
*ptr = X25_IFACE_CONNECT;
skb->protocol = x25_type_trans(skb, dev);
skb_queue_tail(&lapbeth->rx_queue, skb);
napi_schedule(&lapbeth->napi);
}
static void lapbeth_disconnected(struct net_device *dev, int reason)
{
struct lapbethdev *lapbeth = netdev_priv(dev);
unsigned char *ptr;
struct sk_buff *skb = __dev_alloc_skb(1, GFP_ATOMIC | __GFP_NOMEMALLOC);
if (!skb)
return;
ptr = skb_put(skb, 1);
*ptr = X25_IFACE_DISCONNECT;
skb->protocol = x25_type_trans(skb, dev);
skb_queue_tail(&lapbeth->rx_queue, skb);
napi_schedule(&lapbeth->napi);
}
/* Set AX.25 callsign
*/
static int lapbeth_set_mac_address(struct net_device *dev, void *addr)
{
struct sockaddr *sa = addr;
dev_addr_set(dev, sa->sa_data);
return 0;
}
static const struct lapb_register_struct lapbeth_callbacks = {
.connect_confirmation = lapbeth_connected,
.connect_indication = lapbeth_connected,
.disconnect_confirmation = lapbeth_disconnected,
.disconnect_indication = lapbeth_disconnected,
.data_indication = lapbeth_data_indication,
.data_transmit = lapbeth_data_transmit,
};
/* open/close a device
*/
static int lapbeth_open(struct net_device *dev)
{
struct lapbethdev *lapbeth = netdev_priv(dev);
int err;
napi_enable(&lapbeth->napi);
err = lapb_register(dev, &lapbeth_callbacks);
if (err != LAPB_OK) {
napi_disable(&lapbeth->napi);
pr_err("lapb_register error: %d\n", err);
return -ENODEV;
}
spin_lock_bh(&lapbeth->up_lock);
lapbeth->up = true;
spin_unlock_bh(&lapbeth->up_lock);
return 0;
}
static int lapbeth_close(struct net_device *dev)
{
struct lapbethdev *lapbeth = netdev_priv(dev);
int err;
spin_lock_bh(&lapbeth->up_lock);
lapbeth->up = false;
spin_unlock_bh(&lapbeth->up_lock);
err = lapb_unregister(dev);
if (err != LAPB_OK)
pr_err("lapb_unregister error: %d\n", err);
napi_disable(&lapbeth->napi);
return 0;
}
/* ------------------------------------------------------------------------ */
static const struct net_device_ops lapbeth_netdev_ops = {
.ndo_open = lapbeth_open,
.ndo_stop = lapbeth_close,
.ndo_start_xmit = lapbeth_xmit,
.ndo_set_mac_address = lapbeth_set_mac_address,
};
static void lapbeth_setup(struct net_device *dev)
{
dev->netdev_ops = &lapbeth_netdev_ops;
dev->needs_free_netdev = true;
dev->type = ARPHRD_X25;
dev->hard_header_len = 0;
dev->mtu = 1000;
dev->addr_len = 0;
}
/* Setup a new device.
*/
static int lapbeth_new_device(struct net_device *dev)
{
struct net_device *ndev;
struct lapbethdev *lapbeth;
int rc = -ENOMEM;
ASSERT_RTNL();
if (dev->type != ARPHRD_ETHER)
return -EINVAL;
ndev = alloc_netdev(sizeof(*lapbeth), "lapb%d", NET_NAME_UNKNOWN,
lapbeth_setup);
if (!ndev)
goto out;
/* When transmitting data:
* first this driver removes a pseudo header of 1 byte,
* then the lapb module prepends an LAPB header of at most 3 bytes,
* then this driver prepends a length field of 2 bytes,
* then the underlying Ethernet device prepends its own header.
*/
ndev->needed_headroom = -1 + 3 + 2 + dev->hard_header_len
+ dev->needed_headroom;
ndev->needed_tailroom = dev->needed_tailroom;
lapbeth = netdev_priv(ndev);
lapbeth->axdev = ndev;
dev_hold(dev);
lapbeth->ethdev = dev;
lapbeth->up = false;
spin_lock_init(&lapbeth->up_lock);
skb_queue_head_init(&lapbeth->rx_queue);
netif_napi_add_weight(ndev, &lapbeth->napi, lapbeth_napi_poll, 16);
rc = -EIO;
if (register_netdevice(ndev))
goto fail;
list_add_rcu(&lapbeth->node, &lapbeth_devices);
rc = 0;
out:
return rc;
fail:
dev_put(dev);
free_netdev(ndev);
goto out;
}
/* Free a lapb network device.
*/
static void lapbeth_free_device(struct lapbethdev *lapbeth)
{
dev_put(lapbeth->ethdev);
list_del_rcu(&lapbeth->node);
unregister_netdevice(lapbeth->axdev);
}
/* Handle device status changes.
*
* Called from notifier with RTNL held.
*/
static int lapbeth_device_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct lapbethdev *lapbeth;
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
if (dev_net(dev) != &init_net)
return NOTIFY_DONE;
if (!dev_is_ethdev(dev) && !lapbeth_get_x25_dev(dev))
return NOTIFY_DONE;
switch (event) {
case NETDEV_UP:
/* New ethernet device -> new LAPB interface */
if (!lapbeth_get_x25_dev(dev))
lapbeth_new_device(dev);
break;
case NETDEV_GOING_DOWN:
/* ethernet device closes -> close LAPB interface */
lapbeth = lapbeth_get_x25_dev(dev);
if (lapbeth)
dev_close(lapbeth->axdev);
break;
case NETDEV_UNREGISTER:
/* ethernet device disappears -> remove LAPB interface */
lapbeth = lapbeth_get_x25_dev(dev);
if (lapbeth)
lapbeth_free_device(lapbeth);
break;
}
return NOTIFY_DONE;
}
/* ------------------------------------------------------------------------ */
static struct packet_type lapbeth_packet_type __read_mostly = {
.type = cpu_to_be16(ETH_P_DEC),
.func = lapbeth_rcv,
};
static struct notifier_block lapbeth_dev_notifier = {
.notifier_call = lapbeth_device_event,
};
static const char banner[] __initconst =
KERN_INFO "LAPB Ethernet driver version 0.02\n";
static int __init lapbeth_init_driver(void)
{
dev_add_pack(&lapbeth_packet_type);
register_netdevice_notifier(&lapbeth_dev_notifier);
printk(banner);
return 0;
}
module_init(lapbeth_init_driver);
static void __exit lapbeth_cleanup_driver(void)
{
struct lapbethdev *lapbeth;
struct list_head *entry, *tmp;
dev_remove_pack(&lapbeth_packet_type);
unregister_netdevice_notifier(&lapbeth_dev_notifier);
rtnl_lock();
list_for_each_safe(entry, tmp, &lapbeth_devices) {
lapbeth = list_entry(entry, struct lapbethdev, node);
dev_put(lapbeth->ethdev);
unregister_netdevice(lapbeth->axdev);
}
rtnl_unlock();
}
module_exit(lapbeth_cleanup_driver);
MODULE_AUTHOR("Jonathan Naylor <[email protected]>");
MODULE_DESCRIPTION("The unofficial LAPB over Ethernet driver");
MODULE_LICENSE("GPL");
|
linux-master
|
drivers/net/wan/lapbether.c
|
// SPDX-License-Identifier: GPL-2.0-or-later
/* FarSync WAN driver for Linux (2.6.x kernel version)
*
* Actually sync driver for X.21, V.35 and V.24 on FarSync T-series cards
*
* Copyright (C) 2001-2004 FarSite Communications Ltd.
* www.farsite.co.uk
*
* Author: R.J.Dunlop <[email protected]>
* Maintainer: Kevin Curtis <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/version.h>
#include <linux/pci.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/ioport.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <linux/if.h>
#include <linux/hdlc.h>
#include <asm/io.h>
#include <linux/uaccess.h>
#include "farsync.h"
/* Module info
*/
MODULE_AUTHOR("R.J.Dunlop <[email protected]>");
MODULE_DESCRIPTION("FarSync T-Series WAN driver. FarSite Communications Ltd.");
MODULE_LICENSE("GPL");
/* Driver configuration and global parameters
* ==========================================
*/
/* Number of ports (per card) and cards supported
*/
#define FST_MAX_PORTS 4
#define FST_MAX_CARDS 32
/* Default parameters for the link
*/
#define FST_TX_QUEUE_LEN 100 /* At 8Mbps a longer queue length is
* useful
*/
#define FST_TXQ_DEPTH 16 /* This one is for the buffering
* of frames on the way down to the card
* so that we can keep the card busy
* and maximise throughput
*/
#define FST_HIGH_WATER_MARK 12 /* Point at which we flow control
* network layer
*/
#define FST_LOW_WATER_MARK 8 /* Point at which we remove flow
* control from network layer
*/
#define FST_MAX_MTU 8000 /* Huge but possible */
#define FST_DEF_MTU 1500 /* Common sane value */
#define FST_TX_TIMEOUT (2 * HZ)
#ifdef ARPHRD_RAWHDLC
#define ARPHRD_MYTYPE ARPHRD_RAWHDLC /* Raw frames */
#else
#define ARPHRD_MYTYPE ARPHRD_HDLC /* Cisco-HDLC (keepalives etc) */
#endif
/* Modules parameters and associated variables
*/
static int fst_txq_low = FST_LOW_WATER_MARK;
static int fst_txq_high = FST_HIGH_WATER_MARK;
static int fst_max_reads = 7;
static int fst_excluded_cards;
static int fst_excluded_list[FST_MAX_CARDS];
module_param(fst_txq_low, int, 0);
module_param(fst_txq_high, int, 0);
module_param(fst_max_reads, int, 0);
module_param(fst_excluded_cards, int, 0);
module_param_array(fst_excluded_list, int, NULL, 0);
/* Card shared memory layout
* =========================
*/
#pragma pack(1)
/* This information is derived in part from the FarSite FarSync Smc.h
* file. Unfortunately various name clashes and the non-portability of the
* bit field declarations in that file have meant that I have chosen to
* recreate the information here.
*
* The SMC (Shared Memory Configuration) has a version number that is
* incremented every time there is a significant change. This number can
* be used to check that we have not got out of step with the firmware
* contained in the .CDE files.
*/
#define SMC_VERSION 24
#define FST_MEMSIZE 0x100000 /* Size of card memory (1Mb) */
#define SMC_BASE 0x00002000L /* Base offset of the shared memory window main
* configuration structure
*/
#define BFM_BASE 0x00010000L /* Base offset of the shared memory window DMA
* buffers
*/
#define LEN_TX_BUFFER 8192 /* Size of packet buffers */
#define LEN_RX_BUFFER 8192
#define LEN_SMALL_TX_BUFFER 256 /* Size of obsolete buffs used for DOS diags */
#define LEN_SMALL_RX_BUFFER 256
#define NUM_TX_BUFFER 2 /* Must be power of 2. Fixed by firmware */
#define NUM_RX_BUFFER 8
/* Interrupt retry time in milliseconds */
#define INT_RETRY_TIME 2
/* The Am186CH/CC processors support a SmartDMA mode using circular pools
* of buffer descriptors. The structure is almost identical to that used
* in the LANCE Ethernet controllers. Details available as PDF from the
* AMD web site: https://www.amd.com/products/epd/processors/\
* 2.16bitcont/3.am186cxfa/a21914/21914.pdf
*/
struct txdesc { /* Transmit descriptor */
volatile u16 ladr; /* Low order address of packet. This is a
* linear address in the Am186 memory space
*/
volatile u8 hadr; /* High order address. Low 4 bits only, high 4
* bits must be zero
*/
volatile u8 bits; /* Status and config */
volatile u16 bcnt; /* 2s complement of packet size in low 15 bits.
* Transmit terminal count interrupt enable in
* top bit.
*/
u16 unused; /* Not used in Tx */
};
struct rxdesc { /* Receive descriptor */
volatile u16 ladr; /* Low order address of packet */
volatile u8 hadr; /* High order address */
volatile u8 bits; /* Status and config */
volatile u16 bcnt; /* 2s complement of buffer size in low 15 bits.
* Receive terminal count interrupt enable in
* top bit.
*/
volatile u16 mcnt; /* Message byte count (15 bits) */
};
/* Convert a length into the 15 bit 2's complement */
/* #define cnv_bcnt(len) (( ~(len) + 1 ) & 0x7FFF ) */
/* Since we need to set the high bit to enable the completion interrupt this
* can be made a lot simpler
*/
#define cnv_bcnt(len) (-(len))
/* Status and config bits for the above */
#define DMA_OWN 0x80 /* SmartDMA owns the descriptor */
#define TX_STP 0x02 /* Tx: start of packet */
#define TX_ENP 0x01 /* Tx: end of packet */
#define RX_ERR 0x40 /* Rx: error (OR of next 4 bits) */
#define RX_FRAM 0x20 /* Rx: framing error */
#define RX_OFLO 0x10 /* Rx: overflow error */
#define RX_CRC 0x08 /* Rx: CRC error */
#define RX_HBUF 0x04 /* Rx: buffer error */
#define RX_STP 0x02 /* Rx: start of packet */
#define RX_ENP 0x01 /* Rx: end of packet */
/* Interrupts from the card are caused by various events which are presented
* in a circular buffer as several events may be processed on one physical int
*/
#define MAX_CIRBUFF 32
struct cirbuff {
u8 rdindex; /* read, then increment and wrap */
u8 wrindex; /* write, then increment and wrap */
u8 evntbuff[MAX_CIRBUFF];
};
/* Interrupt event codes.
* Where appropriate the two low order bits indicate the port number
*/
#define CTLA_CHG 0x18 /* Control signal changed */
#define CTLB_CHG 0x19
#define CTLC_CHG 0x1A
#define CTLD_CHG 0x1B
#define INIT_CPLT 0x20 /* Initialisation complete */
#define INIT_FAIL 0x21 /* Initialisation failed */
#define ABTA_SENT 0x24 /* Abort sent */
#define ABTB_SENT 0x25
#define ABTC_SENT 0x26
#define ABTD_SENT 0x27
#define TXA_UNDF 0x28 /* Transmission underflow */
#define TXB_UNDF 0x29
#define TXC_UNDF 0x2A
#define TXD_UNDF 0x2B
#define F56_INT 0x2C
#define M32_INT 0x2D
#define TE1_ALMA 0x30
/* Port physical configuration. See farsync.h for field values */
struct port_cfg {
u16 lineInterface; /* Physical interface type */
u8 x25op; /* Unused at present */
u8 internalClock; /* 1 => internal clock, 0 => external */
u8 transparentMode; /* 1 => on, 0 => off */
u8 invertClock; /* 0 => normal, 1 => inverted */
u8 padBytes[6]; /* Padding */
u32 lineSpeed; /* Speed in bps */
};
/* TE1 port physical configuration */
struct su_config {
u32 dataRate;
u8 clocking;
u8 framing;
u8 structure;
u8 interface;
u8 coding;
u8 lineBuildOut;
u8 equalizer;
u8 transparentMode;
u8 loopMode;
u8 range;
u8 txBufferMode;
u8 rxBufferMode;
u8 startingSlot;
u8 losThreshold;
u8 enableIdleCode;
u8 idleCode;
u8 spare[44];
};
/* TE1 Status */
struct su_status {
u32 receiveBufferDelay;
u32 framingErrorCount;
u32 codeViolationCount;
u32 crcErrorCount;
u32 lineAttenuation;
u8 portStarted;
u8 lossOfSignal;
u8 receiveRemoteAlarm;
u8 alarmIndicationSignal;
u8 spare[40];
};
/* Finally sling all the above together into the shared memory structure.
* Sorry it's a hodge podge of arrays, structures and unused bits, it's been
* evolving under NT for some time so I guess we're stuck with it.
* The structure starts at offset SMC_BASE.
* See farsync.h for some field values.
*/
struct fst_shared {
/* DMA descriptor rings */
struct rxdesc rxDescrRing[FST_MAX_PORTS][NUM_RX_BUFFER];
struct txdesc txDescrRing[FST_MAX_PORTS][NUM_TX_BUFFER];
/* Obsolete small buffers */
u8 smallRxBuffer[FST_MAX_PORTS][NUM_RX_BUFFER][LEN_SMALL_RX_BUFFER];
u8 smallTxBuffer[FST_MAX_PORTS][NUM_TX_BUFFER][LEN_SMALL_TX_BUFFER];
u8 taskStatus; /* 0x00 => initialising, 0x01 => running,
* 0xFF => halted
*/
u8 interruptHandshake; /* Set to 0x01 by adapter to signal interrupt,
* set to 0xEE by host to acknowledge interrupt
*/
u16 smcVersion; /* Must match SMC_VERSION */
u32 smcFirmwareVersion; /* 0xIIVVRRBB where II = product ID, VV = major
* version, RR = revision and BB = build
*/
u16 txa_done; /* Obsolete completion flags */
u16 rxa_done;
u16 txb_done;
u16 rxb_done;
u16 txc_done;
u16 rxc_done;
u16 txd_done;
u16 rxd_done;
u16 mailbox[4]; /* Diagnostics mailbox. Not used */
struct cirbuff interruptEvent; /* interrupt causes */
u32 v24IpSts[FST_MAX_PORTS]; /* V.24 control input status */
u32 v24OpSts[FST_MAX_PORTS]; /* V.24 control output status */
struct port_cfg portConfig[FST_MAX_PORTS];
u16 clockStatus[FST_MAX_PORTS]; /* lsb: 0=> present, 1=> absent */
u16 cableStatus; /* lsb: 0=> present, 1=> absent */
u16 txDescrIndex[FST_MAX_PORTS]; /* transmit descriptor ring index */
u16 rxDescrIndex[FST_MAX_PORTS]; /* receive descriptor ring index */
u16 portMailbox[FST_MAX_PORTS][2]; /* command, modifier */
u16 cardMailbox[4]; /* Not used */
/* Number of times the card thinks the host has
* missed an interrupt by not acknowledging
* within 2mS (I guess NT has problems)
*/
u32 interruptRetryCount;
/* Driver private data used as an ID. We'll not
* use this as I'd rather keep such things
* in main memory rather than on the PCI bus
*/
u32 portHandle[FST_MAX_PORTS];
/* Count of Tx underflows for stats */
u32 transmitBufferUnderflow[FST_MAX_PORTS];
/* Debounced V.24 control input status */
u32 v24DebouncedSts[FST_MAX_PORTS];
/* Adapter debounce timers. Don't touch */
u32 ctsTimer[FST_MAX_PORTS];
u32 ctsTimerRun[FST_MAX_PORTS];
u32 dcdTimer[FST_MAX_PORTS];
u32 dcdTimerRun[FST_MAX_PORTS];
u32 numberOfPorts; /* Number of ports detected at startup */
u16 _reserved[64];
u16 cardMode; /* Bit-mask to enable features:
* Bit 0: 1 enables LED identify mode
*/
u16 portScheduleOffset;
struct su_config suConfig; /* TE1 Bits */
struct su_status suStatus;
u32 endOfSmcSignature; /* endOfSmcSignature MUST be the last member of
* the structure and marks the end of shared
* memory. Adapter code initializes it as
* END_SIG.
*/
};
/* endOfSmcSignature value */
#define END_SIG 0x12345678
/* Mailbox values. (portMailbox) */
#define NOP 0 /* No operation */
#define ACK 1 /* Positive acknowledgement to PC driver */
#define NAK 2 /* Negative acknowledgement to PC driver */
#define STARTPORT 3 /* Start an HDLC port */
#define STOPPORT 4 /* Stop an HDLC port */
#define ABORTTX 5 /* Abort the transmitter for a port */
#define SETV24O 6 /* Set V24 outputs */
/* PLX Chip Register Offsets */
#define CNTRL_9052 0x50 /* Control Register */
#define CNTRL_9054 0x6c /* Control Register */
#define INTCSR_9052 0x4c /* Interrupt control/status register */
#define INTCSR_9054 0x68 /* Interrupt control/status register */
/* 9054 DMA Registers */
/* Note that we will be using DMA Channel 0 for copying rx data
* and Channel 1 for copying tx data
*/
#define DMAMODE0 0x80
#define DMAPADR0 0x84
#define DMALADR0 0x88
#define DMASIZ0 0x8c
#define DMADPR0 0x90
#define DMAMODE1 0x94
#define DMAPADR1 0x98
#define DMALADR1 0x9c
#define DMASIZ1 0xa0
#define DMADPR1 0xa4
#define DMACSR0 0xa8
#define DMACSR1 0xa9
#define DMAARB 0xac
#define DMATHR 0xb0
#define DMADAC0 0xb4
#define DMADAC1 0xb8
#define DMAMARBR 0xac
#define FST_MIN_DMA_LEN 64
#define FST_RX_DMA_INT 0x01
#define FST_TX_DMA_INT 0x02
#define FST_CARD_INT 0x04
/* Larger buffers are positioned in memory at offset BFM_BASE */
struct buf_window {
u8 txBuffer[FST_MAX_PORTS][NUM_TX_BUFFER][LEN_TX_BUFFER];
u8 rxBuffer[FST_MAX_PORTS][NUM_RX_BUFFER][LEN_RX_BUFFER];
};
/* Calculate offset of a buffer object within the shared memory window */
#define BUF_OFFSET(X) (BFM_BASE + offsetof(struct buf_window, X))
#pragma pack()
/* Device driver private information
* =================================
*/
/* Per port (line or channel) information
*/
struct fst_port_info {
struct net_device *dev; /* Device struct - must be first */
struct fst_card_info *card; /* Card we're associated with */
int index; /* Port index on the card */
int hwif; /* Line hardware (lineInterface copy) */
int run; /* Port is running */
int mode; /* Normal or FarSync raw */
int rxpos; /* Next Rx buffer to use */
int txpos; /* Next Tx buffer to use */
int txipos; /* Next Tx buffer to check for free */
int start; /* Indication of start/stop to network */
/* A sixteen entry transmit queue
*/
int txqs; /* index to get next buffer to tx */
int txqe; /* index to queue next packet */
struct sk_buff *txq[FST_TXQ_DEPTH]; /* The queue */
int rxqdepth;
};
/* Per card information
*/
struct fst_card_info {
char __iomem *mem; /* Card memory mapped to kernel space */
char __iomem *ctlmem; /* Control memory for PCI cards */
unsigned int phys_mem; /* Physical memory window address */
unsigned int phys_ctlmem; /* Physical control memory address */
unsigned int irq; /* Interrupt request line number */
unsigned int nports; /* Number of serial ports */
unsigned int type; /* Type index of card */
unsigned int state; /* State of card */
spinlock_t card_lock; /* Lock for SMP access */
unsigned short pci_conf; /* PCI card config in I/O space */
/* Per port info */
struct fst_port_info ports[FST_MAX_PORTS];
struct pci_dev *device; /* Information about the pci device */
int card_no; /* Inst of the card on the system */
int family; /* TxP or TxU */
int dmarx_in_progress;
int dmatx_in_progress;
unsigned long int_count;
unsigned long int_time_ave;
void *rx_dma_handle_host;
dma_addr_t rx_dma_handle_card;
void *tx_dma_handle_host;
dma_addr_t tx_dma_handle_card;
struct sk_buff *dma_skb_rx;
struct fst_port_info *dma_port_rx;
struct fst_port_info *dma_port_tx;
int dma_len_rx;
int dma_len_tx;
int dma_txpos;
int dma_rxpos;
};
/* Convert an HDLC device pointer into a port info pointer and similar */
#define dev_to_port(D) (dev_to_hdlc(D)->priv)
#define port_to_dev(P) ((P)->dev)
/* Shared memory window access macros
*
* We have a nice memory based structure above, which could be directly
* mapped on i386 but might not work on other architectures unless we use
* the readb,w,l and writeb,w,l macros. Unfortunately these macros take
* physical offsets so we have to convert. The only saving grace is that
* this should all collapse back to a simple indirection eventually.
*/
#define WIN_OFFSET(X) ((long)&(((struct fst_shared *)SMC_BASE)->X))
#define FST_RDB(C, E) (readb((C)->mem + WIN_OFFSET(E)))
#define FST_RDW(C, E) (readw((C)->mem + WIN_OFFSET(E)))
#define FST_RDL(C, E) (readl((C)->mem + WIN_OFFSET(E)))
#define FST_WRB(C, E, B) (writeb((B), (C)->mem + WIN_OFFSET(E)))
#define FST_WRW(C, E, W) (writew((W), (C)->mem + WIN_OFFSET(E)))
#define FST_WRL(C, E, L) (writel((L), (C)->mem + WIN_OFFSET(E)))
/* Debug support
*/
#if FST_DEBUG
static int fst_debug_mask = { FST_DEBUG };
/* Most common debug activity is to print something if the corresponding bit
* is set in the debug mask. Note: this uses a non-ANSI extension in GCC to
* support variable numbers of macro parameters. The inverted if prevents us
* eating someone else's else clause.
*/
#define dbg(F, fmt, args...) \
do { \
if (fst_debug_mask & (F)) \
printk(KERN_DEBUG pr_fmt(fmt), ##args); \
} while (0)
#else
#define dbg(F, fmt, args...) \
do { \
if (0) \
printk(KERN_DEBUG pr_fmt(fmt), ##args); \
} while (0)
#endif
/* PCI ID lookup table
*/
static const struct pci_device_id fst_pci_dev_id[] = {
{PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_T2P, PCI_ANY_ID,
PCI_ANY_ID, 0, 0, FST_TYPE_T2P},
{PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_T4P, PCI_ANY_ID,
PCI_ANY_ID, 0, 0, FST_TYPE_T4P},
{PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_T1U, PCI_ANY_ID,
PCI_ANY_ID, 0, 0, FST_TYPE_T1U},
{PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_T2U, PCI_ANY_ID,
PCI_ANY_ID, 0, 0, FST_TYPE_T2U},
{PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_T4U, PCI_ANY_ID,
PCI_ANY_ID, 0, 0, FST_TYPE_T4U},
{PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_TE1, PCI_ANY_ID,
PCI_ANY_ID, 0, 0, FST_TYPE_TE1},
{PCI_VENDOR_ID_FARSITE, PCI_DEVICE_ID_FARSITE_TE1C, PCI_ANY_ID,
PCI_ANY_ID, 0, 0, FST_TYPE_TE1},
{0,} /* End */
};
MODULE_DEVICE_TABLE(pci, fst_pci_dev_id);
/* Device Driver Work Queues
*
* So that we don't spend too much time processing events in the
* Interrupt Service routine, we will declare a work queue per Card
* and make the ISR schedule a task in the queue for later execution.
* In the 2.4 Kernel we used to use the immediate queue for BH's
* Now that they are gone, tasklets seem to be much better than work
* queues.
*/
static void do_bottom_half_tx(struct fst_card_info *card);
static void do_bottom_half_rx(struct fst_card_info *card);
static void fst_process_tx_work_q(struct tasklet_struct *unused);
static void fst_process_int_work_q(struct tasklet_struct *unused);
static DECLARE_TASKLET(fst_tx_task, fst_process_tx_work_q);
static DECLARE_TASKLET(fst_int_task, fst_process_int_work_q);
static struct fst_card_info *fst_card_array[FST_MAX_CARDS];
static DEFINE_SPINLOCK(fst_work_q_lock);
static u64 fst_work_txq;
static u64 fst_work_intq;
static void
fst_q_work_item(u64 *queue, int card_index)
{
unsigned long flags;
u64 mask;
/* Grab the queue exclusively
*/
spin_lock_irqsave(&fst_work_q_lock, flags);
/* Making an entry in the queue is simply a matter of setting
* a bit for the card indicating that there is work to do in the
* bottom half for the card. Note the limitation of 64 cards.
* That ought to be enough
*/
mask = (u64)1 << card_index;
*queue |= mask;
spin_unlock_irqrestore(&fst_work_q_lock, flags);
}
static void
fst_process_tx_work_q(struct tasklet_struct *unused)
{
unsigned long flags;
u64 work_txq;
int i;
/* Grab the queue exclusively
*/
dbg(DBG_TX, "fst_process_tx_work_q\n");
spin_lock_irqsave(&fst_work_q_lock, flags);
work_txq = fst_work_txq;
fst_work_txq = 0;
spin_unlock_irqrestore(&fst_work_q_lock, flags);
/* Call the bottom half for each card with work waiting
*/
for (i = 0; i < FST_MAX_CARDS; i++) {
if (work_txq & 0x01) {
if (fst_card_array[i]) {
dbg(DBG_TX, "Calling tx bh for card %d\n", i);
do_bottom_half_tx(fst_card_array[i]);
}
}
work_txq = work_txq >> 1;
}
}
static void
fst_process_int_work_q(struct tasklet_struct *unused)
{
unsigned long flags;
u64 work_intq;
int i;
/* Grab the queue exclusively
*/
dbg(DBG_INTR, "fst_process_int_work_q\n");
spin_lock_irqsave(&fst_work_q_lock, flags);
work_intq = fst_work_intq;
fst_work_intq = 0;
spin_unlock_irqrestore(&fst_work_q_lock, flags);
/* Call the bottom half for each card with work waiting
*/
for (i = 0; i < FST_MAX_CARDS; i++) {
if (work_intq & 0x01) {
if (fst_card_array[i]) {
dbg(DBG_INTR,
"Calling rx & tx bh for card %d\n", i);
do_bottom_half_rx(fst_card_array[i]);
do_bottom_half_tx(fst_card_array[i]);
}
}
work_intq = work_intq >> 1;
}
}
/* Card control functions
* ======================
*/
/* Place the processor in reset state
*
* Used to be a simple write to card control space but a glitch in the latest
* AMD Am186CH processor means that we now have to do it by asserting and de-
* asserting the PLX chip PCI Adapter Software Reset. Bit 30 in CNTRL register
* at offset 9052_CNTRL. Note the updates for the TXU.
*/
static inline void
fst_cpureset(struct fst_card_info *card)
{
unsigned char interrupt_line_register;
unsigned int regval;
if (card->family == FST_FAMILY_TXU) {
if (pci_read_config_byte
(card->device, PCI_INTERRUPT_LINE, &interrupt_line_register)) {
dbg(DBG_ASS,
"Error in reading interrupt line register\n");
}
/* Assert PLX software reset and Am186 hardware reset
* and then deassert the PLX software reset but 186 still in reset
*/
outw(0x440f, card->pci_conf + CNTRL_9054 + 2);
outw(0x040f, card->pci_conf + CNTRL_9054 + 2);
/* We are delaying here to allow the 9054 to reset itself
*/
usleep_range(10, 20);
outw(0x240f, card->pci_conf + CNTRL_9054 + 2);
/* We are delaying here to allow the 9054 to reload its eeprom
*/
usleep_range(10, 20);
outw(0x040f, card->pci_conf + CNTRL_9054 + 2);
if (pci_write_config_byte
(card->device, PCI_INTERRUPT_LINE, interrupt_line_register)) {
dbg(DBG_ASS,
"Error in writing interrupt line register\n");
}
} else {
regval = inl(card->pci_conf + CNTRL_9052);
outl(regval | 0x40000000, card->pci_conf + CNTRL_9052);
outl(regval & ~0x40000000, card->pci_conf + CNTRL_9052);
}
}
/* Release the processor from reset
*/
static inline void
fst_cpurelease(struct fst_card_info *card)
{
if (card->family == FST_FAMILY_TXU) {
/* Force posted writes to complete
*/
(void)readb(card->mem);
/* Release LRESET DO = 1
* Then release Local Hold, DO = 1
*/
outw(0x040e, card->pci_conf + CNTRL_9054 + 2);
outw(0x040f, card->pci_conf + CNTRL_9054 + 2);
} else {
(void)readb(card->ctlmem);
}
}
/* Clear the cards interrupt flag
*/
static inline void
fst_clear_intr(struct fst_card_info *card)
{
if (card->family == FST_FAMILY_TXU) {
(void)readb(card->ctlmem);
} else {
/* Poke the appropriate PLX chip register (same as enabling interrupts)
*/
outw(0x0543, card->pci_conf + INTCSR_9052);
}
}
/* Enable card interrupts
*/
static inline void
fst_enable_intr(struct fst_card_info *card)
{
if (card->family == FST_FAMILY_TXU)
outl(0x0f0c0900, card->pci_conf + INTCSR_9054);
else
outw(0x0543, card->pci_conf + INTCSR_9052);
}
/* Disable card interrupts
*/
static inline void
fst_disable_intr(struct fst_card_info *card)
{
if (card->family == FST_FAMILY_TXU)
outl(0x00000000, card->pci_conf + INTCSR_9054);
else
outw(0x0000, card->pci_conf + INTCSR_9052);
}
/* Process the result of trying to pass a received frame up the stack
*/
static void
fst_process_rx_status(int rx_status, char *name)
{
switch (rx_status) {
case NET_RX_SUCCESS:
{
/* Nothing to do here
*/
break;
}
case NET_RX_DROP:
{
dbg(DBG_ASS, "%s: Received packet dropped\n", name);
break;
}
}
}
/* Initilaise DMA for PLX 9054
*/
static inline void
fst_init_dma(struct fst_card_info *card)
{
/* This is only required for the PLX 9054
*/
if (card->family == FST_FAMILY_TXU) {
pci_set_master(card->device);
outl(0x00020441, card->pci_conf + DMAMODE0);
outl(0x00020441, card->pci_conf + DMAMODE1);
outl(0x0, card->pci_conf + DMATHR);
}
}
/* Tx dma complete interrupt
*/
static void
fst_tx_dma_complete(struct fst_card_info *card, struct fst_port_info *port,
int len, int txpos)
{
struct net_device *dev = port_to_dev(port);
/* Everything is now set, just tell the card to go
*/
dbg(DBG_TX, "fst_tx_dma_complete\n");
FST_WRB(card, txDescrRing[port->index][txpos].bits,
DMA_OWN | TX_STP | TX_ENP);
dev->stats.tx_packets++;
dev->stats.tx_bytes += len;
netif_trans_update(dev);
}
/* Mark it for our own raw sockets interface
*/
static __be16 farsync_type_trans(struct sk_buff *skb, struct net_device *dev)
{
skb->dev = dev;
skb_reset_mac_header(skb);
skb->pkt_type = PACKET_HOST;
return htons(ETH_P_CUST);
}
/* Rx dma complete interrupt
*/
static void
fst_rx_dma_complete(struct fst_card_info *card, struct fst_port_info *port,
int len, struct sk_buff *skb, int rxp)
{
struct net_device *dev = port_to_dev(port);
int pi;
int rx_status;
dbg(DBG_TX, "fst_rx_dma_complete\n");
pi = port->index;
skb_put_data(skb, card->rx_dma_handle_host, len);
/* Reset buffer descriptor */
FST_WRB(card, rxDescrRing[pi][rxp].bits, DMA_OWN);
/* Update stats */
dev->stats.rx_packets++;
dev->stats.rx_bytes += len;
/* Push upstream */
dbg(DBG_RX, "Pushing the frame up the stack\n");
if (port->mode == FST_RAW)
skb->protocol = farsync_type_trans(skb, dev);
else
skb->protocol = hdlc_type_trans(skb, dev);
rx_status = netif_rx(skb);
fst_process_rx_status(rx_status, port_to_dev(port)->name);
if (rx_status == NET_RX_DROP)
dev->stats.rx_dropped++;
}
/* Receive a frame through the DMA
*/
static inline void
fst_rx_dma(struct fst_card_info *card, dma_addr_t dma, u32 mem, int len)
{
/* This routine will setup the DMA and start it
*/
dbg(DBG_RX, "In fst_rx_dma %x %x %d\n", (u32)dma, mem, len);
if (card->dmarx_in_progress)
dbg(DBG_ASS, "In fst_rx_dma while dma in progress\n");
outl(dma, card->pci_conf + DMAPADR0); /* Copy to here */
outl(mem, card->pci_conf + DMALADR0); /* from here */
outl(len, card->pci_conf + DMASIZ0); /* for this length */
outl(0x00000000c, card->pci_conf + DMADPR0); /* In this direction */
/* We use the dmarx_in_progress flag to flag the channel as busy
*/
card->dmarx_in_progress = 1;
outb(0x03, card->pci_conf + DMACSR0); /* Start the transfer */
}
/* Send a frame through the DMA
*/
static inline void
fst_tx_dma(struct fst_card_info *card, dma_addr_t dma, u32 mem, int len)
{
/* This routine will setup the DMA and start it.
*/
dbg(DBG_TX, "In fst_tx_dma %x %x %d\n", (u32)dma, mem, len);
if (card->dmatx_in_progress)
dbg(DBG_ASS, "In fst_tx_dma while dma in progress\n");
outl(dma, card->pci_conf + DMAPADR1); /* Copy from here */
outl(mem, card->pci_conf + DMALADR1); /* to here */
outl(len, card->pci_conf + DMASIZ1); /* for this length */
outl(0x000000004, card->pci_conf + DMADPR1); /* In this direction */
/* We use the dmatx_in_progress to flag the channel as busy
*/
card->dmatx_in_progress = 1;
outb(0x03, card->pci_conf + DMACSR1); /* Start the transfer */
}
/* Issue a Mailbox command for a port.
* Note we issue them on a fire and forget basis, not expecting to see an
* error and not waiting for completion.
*/
static void
fst_issue_cmd(struct fst_port_info *port, unsigned short cmd)
{
struct fst_card_info *card;
unsigned short mbval;
unsigned long flags;
int safety;
card = port->card;
spin_lock_irqsave(&card->card_lock, flags);
mbval = FST_RDW(card, portMailbox[port->index][0]);
safety = 0;
/* Wait for any previous command to complete */
while (mbval > NAK) {
spin_unlock_irqrestore(&card->card_lock, flags);
schedule_timeout_uninterruptible(1);
spin_lock_irqsave(&card->card_lock, flags);
if (++safety > 2000) {
pr_err("Mailbox safety timeout\n");
break;
}
mbval = FST_RDW(card, portMailbox[port->index][0]);
}
if (safety > 0)
dbg(DBG_CMD, "Mailbox clear after %d jiffies\n", safety);
if (mbval == NAK)
dbg(DBG_CMD, "issue_cmd: previous command was NAK'd\n");
FST_WRW(card, portMailbox[port->index][0], cmd);
if (cmd == ABORTTX || cmd == STARTPORT) {
port->txpos = 0;
port->txipos = 0;
port->start = 0;
}
spin_unlock_irqrestore(&card->card_lock, flags);
}
/* Port output signals control
*/
static inline void
fst_op_raise(struct fst_port_info *port, unsigned int outputs)
{
outputs |= FST_RDL(port->card, v24OpSts[port->index]);
FST_WRL(port->card, v24OpSts[port->index], outputs);
if (port->run)
fst_issue_cmd(port, SETV24O);
}
static inline void
fst_op_lower(struct fst_port_info *port, unsigned int outputs)
{
outputs = ~outputs & FST_RDL(port->card, v24OpSts[port->index]);
FST_WRL(port->card, v24OpSts[port->index], outputs);
if (port->run)
fst_issue_cmd(port, SETV24O);
}
/* Setup port Rx buffers
*/
static void
fst_rx_config(struct fst_port_info *port)
{
int i;
int pi;
unsigned int offset;
unsigned long flags;
struct fst_card_info *card;
pi = port->index;
card = port->card;
spin_lock_irqsave(&card->card_lock, flags);
for (i = 0; i < NUM_RX_BUFFER; i++) {
offset = BUF_OFFSET(rxBuffer[pi][i][0]);
FST_WRW(card, rxDescrRing[pi][i].ladr, (u16)offset);
FST_WRB(card, rxDescrRing[pi][i].hadr, (u8)(offset >> 16));
FST_WRW(card, rxDescrRing[pi][i].bcnt, cnv_bcnt(LEN_RX_BUFFER));
FST_WRW(card, rxDescrRing[pi][i].mcnt, LEN_RX_BUFFER);
FST_WRB(card, rxDescrRing[pi][i].bits, DMA_OWN);
}
port->rxpos = 0;
spin_unlock_irqrestore(&card->card_lock, flags);
}
/* Setup port Tx buffers
*/
static void
fst_tx_config(struct fst_port_info *port)
{
int i;
int pi;
unsigned int offset;
unsigned long flags;
struct fst_card_info *card;
pi = port->index;
card = port->card;
spin_lock_irqsave(&card->card_lock, flags);
for (i = 0; i < NUM_TX_BUFFER; i++) {
offset = BUF_OFFSET(txBuffer[pi][i][0]);
FST_WRW(card, txDescrRing[pi][i].ladr, (u16)offset);
FST_WRB(card, txDescrRing[pi][i].hadr, (u8)(offset >> 16));
FST_WRW(card, txDescrRing[pi][i].bcnt, 0);
FST_WRB(card, txDescrRing[pi][i].bits, 0);
}
port->txpos = 0;
port->txipos = 0;
port->start = 0;
spin_unlock_irqrestore(&card->card_lock, flags);
}
/* TE1 Alarm change interrupt event
*/
static void
fst_intr_te1_alarm(struct fst_card_info *card, struct fst_port_info *port)
{
u8 los;
u8 rra;
u8 ais;
los = FST_RDB(card, suStatus.lossOfSignal);
rra = FST_RDB(card, suStatus.receiveRemoteAlarm);
ais = FST_RDB(card, suStatus.alarmIndicationSignal);
if (los) {
/* Lost the link
*/
if (netif_carrier_ok(port_to_dev(port))) {
dbg(DBG_INTR, "Net carrier off\n");
netif_carrier_off(port_to_dev(port));
}
} else {
/* Link available
*/
if (!netif_carrier_ok(port_to_dev(port))) {
dbg(DBG_INTR, "Net carrier on\n");
netif_carrier_on(port_to_dev(port));
}
}
if (los)
dbg(DBG_INTR, "Assert LOS Alarm\n");
else
dbg(DBG_INTR, "De-assert LOS Alarm\n");
if (rra)
dbg(DBG_INTR, "Assert RRA Alarm\n");
else
dbg(DBG_INTR, "De-assert RRA Alarm\n");
if (ais)
dbg(DBG_INTR, "Assert AIS Alarm\n");
else
dbg(DBG_INTR, "De-assert AIS Alarm\n");
}
/* Control signal change interrupt event
*/
static void
fst_intr_ctlchg(struct fst_card_info *card, struct fst_port_info *port)
{
int signals;
signals = FST_RDL(card, v24DebouncedSts[port->index]);
if (signals & ((port->hwif == X21 || port->hwif == X21D)
? IPSTS_INDICATE : IPSTS_DCD)) {
if (!netif_carrier_ok(port_to_dev(port))) {
dbg(DBG_INTR, "DCD active\n");
netif_carrier_on(port_to_dev(port));
}
} else {
if (netif_carrier_ok(port_to_dev(port))) {
dbg(DBG_INTR, "DCD lost\n");
netif_carrier_off(port_to_dev(port));
}
}
}
/* Log Rx Errors
*/
static void
fst_log_rx_error(struct fst_card_info *card, struct fst_port_info *port,
unsigned char dmabits, int rxp, unsigned short len)
{
struct net_device *dev = port_to_dev(port);
/* Increment the appropriate error counter
*/
dev->stats.rx_errors++;
if (dmabits & RX_OFLO) {
dev->stats.rx_fifo_errors++;
dbg(DBG_ASS, "Rx fifo error on card %d port %d buffer %d\n",
card->card_no, port->index, rxp);
}
if (dmabits & RX_CRC) {
dev->stats.rx_crc_errors++;
dbg(DBG_ASS, "Rx crc error on card %d port %d\n",
card->card_no, port->index);
}
if (dmabits & RX_FRAM) {
dev->stats.rx_frame_errors++;
dbg(DBG_ASS, "Rx frame error on card %d port %d\n",
card->card_no, port->index);
}
if (dmabits == (RX_STP | RX_ENP)) {
dev->stats.rx_length_errors++;
dbg(DBG_ASS, "Rx length error (%d) on card %d port %d\n",
len, card->card_no, port->index);
}
}
/* Rx Error Recovery
*/
static void
fst_recover_rx_error(struct fst_card_info *card, struct fst_port_info *port,
unsigned char dmabits, int rxp, unsigned short len)
{
int i;
int pi;
pi = port->index;
/* Discard buffer descriptors until we see the start of the
* next frame. Note that for long frames this could be in
* a subsequent interrupt.
*/
i = 0;
while ((dmabits & (DMA_OWN | RX_STP)) == 0) {
FST_WRB(card, rxDescrRing[pi][rxp].bits, DMA_OWN);
rxp = (rxp + 1) % NUM_RX_BUFFER;
if (++i > NUM_RX_BUFFER) {
dbg(DBG_ASS, "intr_rx: Discarding more bufs"
" than we have\n");
break;
}
dmabits = FST_RDB(card, rxDescrRing[pi][rxp].bits);
dbg(DBG_ASS, "DMA Bits of next buffer was %x\n", dmabits);
}
dbg(DBG_ASS, "There were %d subsequent buffers in error\n", i);
/* Discard the terminal buffer */
if (!(dmabits & DMA_OWN)) {
FST_WRB(card, rxDescrRing[pi][rxp].bits, DMA_OWN);
rxp = (rxp + 1) % NUM_RX_BUFFER;
}
port->rxpos = rxp;
}
/* Rx complete interrupt
*/
static void
fst_intr_rx(struct fst_card_info *card, struct fst_port_info *port)
{
unsigned char dmabits;
int pi;
int rxp;
int rx_status;
unsigned short len;
struct sk_buff *skb;
struct net_device *dev = port_to_dev(port);
/* Check we have a buffer to process */
pi = port->index;
rxp = port->rxpos;
dmabits = FST_RDB(card, rxDescrRing[pi][rxp].bits);
if (dmabits & DMA_OWN) {
dbg(DBG_RX | DBG_INTR, "intr_rx: No buffer port %d pos %d\n",
pi, rxp);
return;
}
if (card->dmarx_in_progress)
return;
/* Get buffer length */
len = FST_RDW(card, rxDescrRing[pi][rxp].mcnt);
/* Discard the CRC */
len -= 2;
if (len == 0) {
/* This seems to happen on the TE1 interface sometimes
* so throw the frame away and log the event.
*/
pr_err("Frame received with 0 length. Card %d Port %d\n",
card->card_no, port->index);
/* Return descriptor to card */
FST_WRB(card, rxDescrRing[pi][rxp].bits, DMA_OWN);
rxp = (rxp + 1) % NUM_RX_BUFFER;
port->rxpos = rxp;
return;
}
/* Check buffer length and for other errors. We insist on one packet
* in one buffer. This simplifies things greatly and since we've
* allocated 8K it shouldn't be a real world limitation
*/
dbg(DBG_RX, "intr_rx: %d,%d: flags %x len %d\n", pi, rxp, dmabits, len);
if (dmabits != (RX_STP | RX_ENP) || len > LEN_RX_BUFFER - 2) {
fst_log_rx_error(card, port, dmabits, rxp, len);
fst_recover_rx_error(card, port, dmabits, rxp, len);
return;
}
/* Allocate SKB */
skb = dev_alloc_skb(len);
if (!skb) {
dbg(DBG_RX, "intr_rx: can't allocate buffer\n");
dev->stats.rx_dropped++;
/* Return descriptor to card */
FST_WRB(card, rxDescrRing[pi][rxp].bits, DMA_OWN);
rxp = (rxp + 1) % NUM_RX_BUFFER;
port->rxpos = rxp;
return;
}
/* We know the length we need to receive, len.
* It's not worth using the DMA for reads of less than
* FST_MIN_DMA_LEN
*/
if (len < FST_MIN_DMA_LEN || card->family == FST_FAMILY_TXP) {
memcpy_fromio(skb_put(skb, len),
card->mem + BUF_OFFSET(rxBuffer[pi][rxp][0]),
len);
/* Reset buffer descriptor */
FST_WRB(card, rxDescrRing[pi][rxp].bits, DMA_OWN);
/* Update stats */
dev->stats.rx_packets++;
dev->stats.rx_bytes += len;
/* Push upstream */
dbg(DBG_RX, "Pushing frame up the stack\n");
if (port->mode == FST_RAW)
skb->protocol = farsync_type_trans(skb, dev);
else
skb->protocol = hdlc_type_trans(skb, dev);
rx_status = netif_rx(skb);
fst_process_rx_status(rx_status, port_to_dev(port)->name);
if (rx_status == NET_RX_DROP)
dev->stats.rx_dropped++;
} else {
card->dma_skb_rx = skb;
card->dma_port_rx = port;
card->dma_len_rx = len;
card->dma_rxpos = rxp;
fst_rx_dma(card, card->rx_dma_handle_card,
BUF_OFFSET(rxBuffer[pi][rxp][0]), len);
}
if (rxp != port->rxpos) {
dbg(DBG_ASS, "About to increment rxpos by more than 1\n");
dbg(DBG_ASS, "rxp = %d rxpos = %d\n", rxp, port->rxpos);
}
rxp = (rxp + 1) % NUM_RX_BUFFER;
port->rxpos = rxp;
}
/* The bottom half to the ISR
*
*/
static void
do_bottom_half_tx(struct fst_card_info *card)
{
struct fst_port_info *port;
int pi;
int txq_length;
struct sk_buff *skb;
unsigned long flags;
struct net_device *dev;
/* Find a free buffer for the transmit
* Step through each port on this card
*/
dbg(DBG_TX, "do_bottom_half_tx\n");
for (pi = 0, port = card->ports; pi < card->nports; pi++, port++) {
if (!port->run)
continue;
dev = port_to_dev(port);
while (!(FST_RDB(card, txDescrRing[pi][port->txpos].bits) &
DMA_OWN) &&
!(card->dmatx_in_progress)) {
/* There doesn't seem to be a txdone event per-se
* We seem to have to deduce it, by checking the DMA_OWN
* bit on the next buffer we think we can use
*/
spin_lock_irqsave(&card->card_lock, flags);
txq_length = port->txqe - port->txqs;
if (txq_length < 0) {
/* This is the case where one has wrapped and the
* maths gives us a negative number
*/
txq_length = txq_length + FST_TXQ_DEPTH;
}
spin_unlock_irqrestore(&card->card_lock, flags);
if (txq_length > 0) {
/* There is something to send
*/
spin_lock_irqsave(&card->card_lock, flags);
skb = port->txq[port->txqs];
port->txqs++;
if (port->txqs == FST_TXQ_DEPTH)
port->txqs = 0;
spin_unlock_irqrestore(&card->card_lock, flags);
/* copy the data and set the required indicators on the
* card.
*/
FST_WRW(card, txDescrRing[pi][port->txpos].bcnt,
cnv_bcnt(skb->len));
if (skb->len < FST_MIN_DMA_LEN ||
card->family == FST_FAMILY_TXP) {
/* Enqueue the packet with normal io */
memcpy_toio(card->mem +
BUF_OFFSET(txBuffer[pi]
[port->
txpos][0]),
skb->data, skb->len);
FST_WRB(card,
txDescrRing[pi][port->txpos].
bits,
DMA_OWN | TX_STP | TX_ENP);
dev->stats.tx_packets++;
dev->stats.tx_bytes += skb->len;
netif_trans_update(dev);
} else {
/* Or do it through dma */
memcpy(card->tx_dma_handle_host,
skb->data, skb->len);
card->dma_port_tx = port;
card->dma_len_tx = skb->len;
card->dma_txpos = port->txpos;
fst_tx_dma(card,
card->tx_dma_handle_card,
BUF_OFFSET(txBuffer[pi]
[port->txpos][0]),
skb->len);
}
if (++port->txpos >= NUM_TX_BUFFER)
port->txpos = 0;
/* If we have flow control on, can we now release it?
*/
if (port->start) {
if (txq_length < fst_txq_low) {
netif_wake_queue(port_to_dev
(port));
port->start = 0;
}
}
dev_kfree_skb(skb);
} else {
/* Nothing to send so break out of the while loop
*/
break;
}
}
}
}
static void
do_bottom_half_rx(struct fst_card_info *card)
{
struct fst_port_info *port;
int pi;
int rx_count = 0;
/* Check for rx completions on all ports on this card */
dbg(DBG_RX, "do_bottom_half_rx\n");
for (pi = 0, port = card->ports; pi < card->nports; pi++, port++) {
if (!port->run)
continue;
while (!(FST_RDB(card, rxDescrRing[pi][port->rxpos].bits)
& DMA_OWN) && !(card->dmarx_in_progress)) {
if (rx_count > fst_max_reads) {
/* Don't spend forever in receive processing
* Schedule another event
*/
fst_q_work_item(&fst_work_intq, card->card_no);
tasklet_schedule(&fst_int_task);
break; /* Leave the loop */
}
fst_intr_rx(card, port);
rx_count++;
}
}
}
/* The interrupt service routine
* Dev_id is our fst_card_info pointer
*/
static irqreturn_t
fst_intr(int dummy, void *dev_id)
{
struct fst_card_info *card = dev_id;
struct fst_port_info *port;
int rdidx; /* Event buffer indices */
int wridx;
int event; /* Actual event for processing */
unsigned int dma_intcsr = 0;
unsigned int do_card_interrupt;
unsigned int int_retry_count;
/* Check to see if the interrupt was for this card
* return if not
* Note that the call to clear the interrupt is important
*/
dbg(DBG_INTR, "intr: %d %p\n", card->irq, card);
if (card->state != FST_RUNNING) {
pr_err("Interrupt received for card %d in a non running state (%d)\n",
card->card_no, card->state);
/* It is possible to really be running, i.e. we have re-loaded
* a running card
* Clear and reprime the interrupt source
*/
fst_clear_intr(card);
return IRQ_HANDLED;
}
/* Clear and reprime the interrupt source */
fst_clear_intr(card);
/* Is the interrupt for this card (handshake == 1)
*/
do_card_interrupt = 0;
if (FST_RDB(card, interruptHandshake) == 1) {
do_card_interrupt += FST_CARD_INT;
/* Set the software acknowledge */
FST_WRB(card, interruptHandshake, 0xEE);
}
if (card->family == FST_FAMILY_TXU) {
/* Is it a DMA Interrupt
*/
dma_intcsr = inl(card->pci_conf + INTCSR_9054);
if (dma_intcsr & 0x00200000) {
/* DMA Channel 0 (Rx transfer complete)
*/
dbg(DBG_RX, "DMA Rx xfer complete\n");
outb(0x8, card->pci_conf + DMACSR0);
fst_rx_dma_complete(card, card->dma_port_rx,
card->dma_len_rx, card->dma_skb_rx,
card->dma_rxpos);
card->dmarx_in_progress = 0;
do_card_interrupt += FST_RX_DMA_INT;
}
if (dma_intcsr & 0x00400000) {
/* DMA Channel 1 (Tx transfer complete)
*/
dbg(DBG_TX, "DMA Tx xfer complete\n");
outb(0x8, card->pci_conf + DMACSR1);
fst_tx_dma_complete(card, card->dma_port_tx,
card->dma_len_tx, card->dma_txpos);
card->dmatx_in_progress = 0;
do_card_interrupt += FST_TX_DMA_INT;
}
}
/* Have we been missing Interrupts
*/
int_retry_count = FST_RDL(card, interruptRetryCount);
if (int_retry_count) {
dbg(DBG_ASS, "Card %d int_retry_count is %d\n",
card->card_no, int_retry_count);
FST_WRL(card, interruptRetryCount, 0);
}
if (!do_card_interrupt)
return IRQ_HANDLED;
/* Scehdule the bottom half of the ISR */
fst_q_work_item(&fst_work_intq, card->card_no);
tasklet_schedule(&fst_int_task);
/* Drain the event queue */
rdidx = FST_RDB(card, interruptEvent.rdindex) & 0x1f;
wridx = FST_RDB(card, interruptEvent.wrindex) & 0x1f;
while (rdidx != wridx) {
event = FST_RDB(card, interruptEvent.evntbuff[rdidx]);
port = &card->ports[event & 0x03];
dbg(DBG_INTR, "Processing Interrupt event: %x\n", event);
switch (event) {
case TE1_ALMA:
dbg(DBG_INTR, "TE1 Alarm intr\n");
if (port->run)
fst_intr_te1_alarm(card, port);
break;
case CTLA_CHG:
case CTLB_CHG:
case CTLC_CHG:
case CTLD_CHG:
if (port->run)
fst_intr_ctlchg(card, port);
break;
case ABTA_SENT:
case ABTB_SENT:
case ABTC_SENT:
case ABTD_SENT:
dbg(DBG_TX, "Abort complete port %d\n", port->index);
break;
case TXA_UNDF:
case TXB_UNDF:
case TXC_UNDF:
case TXD_UNDF:
/* Difficult to see how we'd get this given that we
* always load up the entire packet for DMA.
*/
dbg(DBG_TX, "Tx underflow port %d\n", port->index);
port_to_dev(port)->stats.tx_errors++;
port_to_dev(port)->stats.tx_fifo_errors++;
dbg(DBG_ASS, "Tx underflow on card %d port %d\n",
card->card_no, port->index);
break;
case INIT_CPLT:
dbg(DBG_INIT, "Card init OK intr\n");
break;
case INIT_FAIL:
dbg(DBG_INIT, "Card init FAILED intr\n");
card->state = FST_IFAILED;
break;
default:
pr_err("intr: unknown card event %d. ignored\n", event);
break;
}
/* Bump and wrap the index */
if (++rdidx >= MAX_CIRBUFF)
rdidx = 0;
}
FST_WRB(card, interruptEvent.rdindex, rdidx);
return IRQ_HANDLED;
}
/* Check that the shared memory configuration is one that we can handle
* and that some basic parameters are correct
*/
static void
check_started_ok(struct fst_card_info *card)
{
int i;
/* Check structure version and end marker */
if (FST_RDW(card, smcVersion) != SMC_VERSION) {
pr_err("Bad shared memory version %d expected %d\n",
FST_RDW(card, smcVersion), SMC_VERSION);
card->state = FST_BADVERSION;
return;
}
if (FST_RDL(card, endOfSmcSignature) != END_SIG) {
pr_err("Missing shared memory signature\n");
card->state = FST_BADVERSION;
return;
}
/* Firmware status flag, 0x00 = initialising, 0x01 = OK, 0xFF = fail */
i = FST_RDB(card, taskStatus);
if (i == 0x01) {
card->state = FST_RUNNING;
} else if (i == 0xFF) {
pr_err("Firmware initialisation failed. Card halted\n");
card->state = FST_HALTED;
return;
} else if (i != 0x00) {
pr_err("Unknown firmware status 0x%x\n", i);
card->state = FST_HALTED;
return;
}
/* Finally check the number of ports reported by firmware against the
* number we assumed at card detection. Should never happen with
* existing firmware etc so we just report it for the moment.
*/
if (FST_RDL(card, numberOfPorts) != card->nports) {
pr_warn("Port count mismatch on card %d. Firmware thinks %d we say %d\n",
card->card_no,
FST_RDL(card, numberOfPorts), card->nports);
}
}
static int
set_conf_from_info(struct fst_card_info *card, struct fst_port_info *port,
struct fstioc_info *info)
{
int err;
unsigned char my_framing;
/* Set things according to the user set valid flags
* Several of the old options have been invalidated/replaced by the
* generic hdlc package.
*/
err = 0;
if (info->valid & FSTVAL_PROTO) {
if (info->proto == FST_RAW)
port->mode = FST_RAW;
else
port->mode = FST_GEN_HDLC;
}
if (info->valid & FSTVAL_CABLE)
err = -EINVAL;
if (info->valid & FSTVAL_SPEED)
err = -EINVAL;
if (info->valid & FSTVAL_PHASE)
FST_WRB(card, portConfig[port->index].invertClock,
info->invertClock);
if (info->valid & FSTVAL_MODE)
FST_WRW(card, cardMode, info->cardMode);
if (info->valid & FSTVAL_TE1) {
FST_WRL(card, suConfig.dataRate, info->lineSpeed);
FST_WRB(card, suConfig.clocking, info->clockSource);
my_framing = FRAMING_E1;
if (info->framing == E1)
my_framing = FRAMING_E1;
if (info->framing == T1)
my_framing = FRAMING_T1;
if (info->framing == J1)
my_framing = FRAMING_J1;
FST_WRB(card, suConfig.framing, my_framing);
FST_WRB(card, suConfig.structure, info->structure);
FST_WRB(card, suConfig.interface, info->interface);
FST_WRB(card, suConfig.coding, info->coding);
FST_WRB(card, suConfig.lineBuildOut, info->lineBuildOut);
FST_WRB(card, suConfig.equalizer, info->equalizer);
FST_WRB(card, suConfig.transparentMode, info->transparentMode);
FST_WRB(card, suConfig.loopMode, info->loopMode);
FST_WRB(card, suConfig.range, info->range);
FST_WRB(card, suConfig.txBufferMode, info->txBufferMode);
FST_WRB(card, suConfig.rxBufferMode, info->rxBufferMode);
FST_WRB(card, suConfig.startingSlot, info->startingSlot);
FST_WRB(card, suConfig.losThreshold, info->losThreshold);
if (info->idleCode)
FST_WRB(card, suConfig.enableIdleCode, 1);
else
FST_WRB(card, suConfig.enableIdleCode, 0);
FST_WRB(card, suConfig.idleCode, info->idleCode);
#if FST_DEBUG
if (info->valid & FSTVAL_TE1) {
printk("Setting TE1 data\n");
printk("Line Speed = %d\n", info->lineSpeed);
printk("Start slot = %d\n", info->startingSlot);
printk("Clock source = %d\n", info->clockSource);
printk("Framing = %d\n", my_framing);
printk("Structure = %d\n", info->structure);
printk("interface = %d\n", info->interface);
printk("Coding = %d\n", info->coding);
printk("Line build out = %d\n", info->lineBuildOut);
printk("Equaliser = %d\n", info->equalizer);
printk("Transparent mode = %d\n",
info->transparentMode);
printk("Loop mode = %d\n", info->loopMode);
printk("Range = %d\n", info->range);
printk("Tx Buffer mode = %d\n", info->txBufferMode);
printk("Rx Buffer mode = %d\n", info->rxBufferMode);
printk("LOS Threshold = %d\n", info->losThreshold);
printk("Idle Code = %d\n", info->idleCode);
}
#endif
}
#if FST_DEBUG
if (info->valid & FSTVAL_DEBUG)
fst_debug_mask = info->debug;
#endif
return err;
}
static void
gather_conf_info(struct fst_card_info *card, struct fst_port_info *port,
struct fstioc_info *info)
{
int i;
memset(info, 0, sizeof(struct fstioc_info));
i = port->index;
info->kernelVersion = LINUX_VERSION_CODE;
info->nports = card->nports;
info->type = card->type;
info->state = card->state;
info->proto = FST_GEN_HDLC;
info->index = i;
#if FST_DEBUG
info->debug = fst_debug_mask;
#endif
/* Only mark information as valid if card is running.
* Copy the data anyway in case it is useful for diagnostics
*/
info->valid = ((card->state == FST_RUNNING) ? FSTVAL_ALL : FSTVAL_CARD)
#if FST_DEBUG
| FSTVAL_DEBUG
#endif
;
info->lineInterface = FST_RDW(card, portConfig[i].lineInterface);
info->internalClock = FST_RDB(card, portConfig[i].internalClock);
info->lineSpeed = FST_RDL(card, portConfig[i].lineSpeed);
info->invertClock = FST_RDB(card, portConfig[i].invertClock);
info->v24IpSts = FST_RDL(card, v24IpSts[i]);
info->v24OpSts = FST_RDL(card, v24OpSts[i]);
info->clockStatus = FST_RDW(card, clockStatus[i]);
info->cableStatus = FST_RDW(card, cableStatus);
info->cardMode = FST_RDW(card, cardMode);
info->smcFirmwareVersion = FST_RDL(card, smcFirmwareVersion);
/* The T2U can report cable presence for both A or B
* in bits 0 and 1 of cableStatus. See which port we are and
* do the mapping.
*/
if (card->family == FST_FAMILY_TXU) {
if (port->index == 0) {
/* Port A
*/
info->cableStatus = info->cableStatus & 1;
} else {
/* Port B
*/
info->cableStatus = info->cableStatus >> 1;
info->cableStatus = info->cableStatus & 1;
}
}
/* Some additional bits if we are TE1
*/
if (card->type == FST_TYPE_TE1) {
info->lineSpeed = FST_RDL(card, suConfig.dataRate);
info->clockSource = FST_RDB(card, suConfig.clocking);
info->framing = FST_RDB(card, suConfig.framing);
info->structure = FST_RDB(card, suConfig.structure);
info->interface = FST_RDB(card, suConfig.interface);
info->coding = FST_RDB(card, suConfig.coding);
info->lineBuildOut = FST_RDB(card, suConfig.lineBuildOut);
info->equalizer = FST_RDB(card, suConfig.equalizer);
info->loopMode = FST_RDB(card, suConfig.loopMode);
info->range = FST_RDB(card, suConfig.range);
info->txBufferMode = FST_RDB(card, suConfig.txBufferMode);
info->rxBufferMode = FST_RDB(card, suConfig.rxBufferMode);
info->startingSlot = FST_RDB(card, suConfig.startingSlot);
info->losThreshold = FST_RDB(card, suConfig.losThreshold);
if (FST_RDB(card, suConfig.enableIdleCode))
info->idleCode = FST_RDB(card, suConfig.idleCode);
else
info->idleCode = 0;
info->receiveBufferDelay =
FST_RDL(card, suStatus.receiveBufferDelay);
info->framingErrorCount =
FST_RDL(card, suStatus.framingErrorCount);
info->codeViolationCount =
FST_RDL(card, suStatus.codeViolationCount);
info->crcErrorCount = FST_RDL(card, suStatus.crcErrorCount);
info->lineAttenuation = FST_RDL(card, suStatus.lineAttenuation);
info->lossOfSignal = FST_RDB(card, suStatus.lossOfSignal);
info->receiveRemoteAlarm =
FST_RDB(card, suStatus.receiveRemoteAlarm);
info->alarmIndicationSignal =
FST_RDB(card, suStatus.alarmIndicationSignal);
}
}
static int
fst_set_iface(struct fst_card_info *card, struct fst_port_info *port,
struct if_settings *ifs)
{
sync_serial_settings sync;
int i;
if (ifs->size != sizeof(sync))
return -ENOMEM;
if (copy_from_user(&sync, ifs->ifs_ifsu.sync, sizeof(sync)))
return -EFAULT;
if (sync.loopback)
return -EINVAL;
i = port->index;
switch (ifs->type) {
case IF_IFACE_V35:
FST_WRW(card, portConfig[i].lineInterface, V35);
port->hwif = V35;
break;
case IF_IFACE_V24:
FST_WRW(card, portConfig[i].lineInterface, V24);
port->hwif = V24;
break;
case IF_IFACE_X21:
FST_WRW(card, portConfig[i].lineInterface, X21);
port->hwif = X21;
break;
case IF_IFACE_X21D:
FST_WRW(card, portConfig[i].lineInterface, X21D);
port->hwif = X21D;
break;
case IF_IFACE_T1:
FST_WRW(card, portConfig[i].lineInterface, T1);
port->hwif = T1;
break;
case IF_IFACE_E1:
FST_WRW(card, portConfig[i].lineInterface, E1);
port->hwif = E1;
break;
case IF_IFACE_SYNC_SERIAL:
break;
default:
return -EINVAL;
}
switch (sync.clock_type) {
case CLOCK_EXT:
FST_WRB(card, portConfig[i].internalClock, EXTCLK);
break;
case CLOCK_INT:
FST_WRB(card, portConfig[i].internalClock, INTCLK);
break;
default:
return -EINVAL;
}
FST_WRL(card, portConfig[i].lineSpeed, sync.clock_rate);
return 0;
}
static int
fst_get_iface(struct fst_card_info *card, struct fst_port_info *port,
struct if_settings *ifs)
{
sync_serial_settings sync;
int i;
/* First check what line type is set, we'll default to reporting X.21
* if nothing is set as IF_IFACE_SYNC_SERIAL implies it can't be
* changed
*/
switch (port->hwif) {
case E1:
ifs->type = IF_IFACE_E1;
break;
case T1:
ifs->type = IF_IFACE_T1;
break;
case V35:
ifs->type = IF_IFACE_V35;
break;
case V24:
ifs->type = IF_IFACE_V24;
break;
case X21D:
ifs->type = IF_IFACE_X21D;
break;
case X21:
default:
ifs->type = IF_IFACE_X21;
break;
}
if (!ifs->size)
return 0; /* only type requested */
if (ifs->size < sizeof(sync))
return -ENOMEM;
i = port->index;
memset(&sync, 0, sizeof(sync));
sync.clock_rate = FST_RDL(card, portConfig[i].lineSpeed);
/* Lucky card and linux use same encoding here */
sync.clock_type = FST_RDB(card, portConfig[i].internalClock) ==
INTCLK ? CLOCK_INT : CLOCK_EXT;
sync.loopback = 0;
if (copy_to_user(ifs->ifs_ifsu.sync, &sync, sizeof(sync)))
return -EFAULT;
ifs->size = sizeof(sync);
return 0;
}
static int
fst_siocdevprivate(struct net_device *dev, struct ifreq *ifr, void __user *data, int cmd)
{
struct fst_card_info *card;
struct fst_port_info *port;
struct fstioc_write wrthdr;
struct fstioc_info info;
unsigned long flags;
void *buf;
dbg(DBG_IOCTL, "ioctl: %x, %p\n", cmd, data);
port = dev_to_port(dev);
card = port->card;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
switch (cmd) {
case FSTCPURESET:
fst_cpureset(card);
card->state = FST_RESET;
return 0;
case FSTCPURELEASE:
fst_cpurelease(card);
card->state = FST_STARTING;
return 0;
case FSTWRITE: /* Code write (download) */
/* First copy in the header with the length and offset of data
* to write
*/
if (!data)
return -EINVAL;
if (copy_from_user(&wrthdr, data, sizeof(struct fstioc_write)))
return -EFAULT;
/* Sanity check the parameters. We don't support partial writes
* when going over the top
*/
if (wrthdr.size > FST_MEMSIZE || wrthdr.offset > FST_MEMSIZE ||
wrthdr.size + wrthdr.offset > FST_MEMSIZE)
return -ENXIO;
/* Now copy the data to the card. */
buf = memdup_user(data + sizeof(struct fstioc_write),
wrthdr.size);
if (IS_ERR(buf))
return PTR_ERR(buf);
memcpy_toio(card->mem + wrthdr.offset, buf, wrthdr.size);
kfree(buf);
/* Writes to the memory of a card in the reset state constitute
* a download
*/
if (card->state == FST_RESET)
card->state = FST_DOWNLOAD;
return 0;
case FSTGETCONF:
/* If card has just been started check the shared memory config
* version and marker
*/
if (card->state == FST_STARTING) {
check_started_ok(card);
/* If everything checked out enable card interrupts */
if (card->state == FST_RUNNING) {
spin_lock_irqsave(&card->card_lock, flags);
fst_enable_intr(card);
FST_WRB(card, interruptHandshake, 0xEE);
spin_unlock_irqrestore(&card->card_lock, flags);
}
}
if (!data)
return -EINVAL;
gather_conf_info(card, port, &info);
if (copy_to_user(data, &info, sizeof(info)))
return -EFAULT;
return 0;
case FSTSETCONF:
/* Most of the settings have been moved to the generic ioctls
* this just covers debug and board ident now
*/
if (card->state != FST_RUNNING) {
pr_err("Attempt to configure card %d in non-running state (%d)\n",
card->card_no, card->state);
return -EIO;
}
if (copy_from_user(&info, data, sizeof(info)))
return -EFAULT;
return set_conf_from_info(card, port, &info);
default:
return -EINVAL;
}
}
static int
fst_ioctl(struct net_device *dev, struct if_settings *ifs)
{
struct fst_card_info *card;
struct fst_port_info *port;
dbg(DBG_IOCTL, "SIOCDEVPRIVATE, %x\n", ifs->type);
port = dev_to_port(dev);
card = port->card;
if (!capable(CAP_NET_ADMIN))
return -EPERM;
switch (ifs->type) {
case IF_GET_IFACE:
return fst_get_iface(card, port, ifs);
case IF_IFACE_SYNC_SERIAL:
case IF_IFACE_V35:
case IF_IFACE_V24:
case IF_IFACE_X21:
case IF_IFACE_X21D:
case IF_IFACE_T1:
case IF_IFACE_E1:
return fst_set_iface(card, port, ifs);
case IF_PROTO_RAW:
port->mode = FST_RAW;
return 0;
case IF_GET_PROTO:
if (port->mode == FST_RAW) {
ifs->type = IF_PROTO_RAW;
return 0;
}
return hdlc_ioctl(dev, ifs);
default:
port->mode = FST_GEN_HDLC;
dbg(DBG_IOCTL, "Passing this type to hdlc %x\n",
ifs->type);
return hdlc_ioctl(dev, ifs);
}
}
static void
fst_openport(struct fst_port_info *port)
{
int signals;
/* Only init things if card is actually running. This allows open to
* succeed for downloads etc.
*/
if (port->card->state == FST_RUNNING) {
if (port->run) {
dbg(DBG_OPEN, "open: found port already running\n");
fst_issue_cmd(port, STOPPORT);
port->run = 0;
}
fst_rx_config(port);
fst_tx_config(port);
fst_op_raise(port, OPSTS_RTS | OPSTS_DTR);
fst_issue_cmd(port, STARTPORT);
port->run = 1;
signals = FST_RDL(port->card, v24DebouncedSts[port->index]);
if (signals & ((port->hwif == X21 || port->hwif == X21D)
? IPSTS_INDICATE : IPSTS_DCD))
netif_carrier_on(port_to_dev(port));
else
netif_carrier_off(port_to_dev(port));
port->txqe = 0;
port->txqs = 0;
}
}
static void
fst_closeport(struct fst_port_info *port)
{
if (port->card->state == FST_RUNNING) {
if (port->run) {
port->run = 0;
fst_op_lower(port, OPSTS_RTS | OPSTS_DTR);
fst_issue_cmd(port, STOPPORT);
} else {
dbg(DBG_OPEN, "close: port not running\n");
}
}
}
static int
fst_open(struct net_device *dev)
{
int err;
struct fst_port_info *port;
port = dev_to_port(dev);
if (!try_module_get(THIS_MODULE))
return -EBUSY;
if (port->mode != FST_RAW) {
err = hdlc_open(dev);
if (err) {
module_put(THIS_MODULE);
return err;
}
}
fst_openport(port);
netif_wake_queue(dev);
return 0;
}
static int
fst_close(struct net_device *dev)
{
struct fst_port_info *port;
struct fst_card_info *card;
unsigned char tx_dma_done;
unsigned char rx_dma_done;
port = dev_to_port(dev);
card = port->card;
tx_dma_done = inb(card->pci_conf + DMACSR1);
rx_dma_done = inb(card->pci_conf + DMACSR0);
dbg(DBG_OPEN,
"Port Close: tx_dma_in_progress = %d (%x) rx_dma_in_progress = %d (%x)\n",
card->dmatx_in_progress, tx_dma_done, card->dmarx_in_progress,
rx_dma_done);
netif_stop_queue(dev);
fst_closeport(dev_to_port(dev));
if (port->mode != FST_RAW)
hdlc_close(dev);
module_put(THIS_MODULE);
return 0;
}
static int
fst_attach(struct net_device *dev, unsigned short encoding, unsigned short parity)
{
/* Setting currently fixed in FarSync card so we check and forget
*/
if (encoding != ENCODING_NRZ || parity != PARITY_CRC16_PR1_CCITT)
return -EINVAL;
return 0;
}
static void
fst_tx_timeout(struct net_device *dev, unsigned int txqueue)
{
struct fst_port_info *port;
struct fst_card_info *card;
port = dev_to_port(dev);
card = port->card;
dev->stats.tx_errors++;
dev->stats.tx_aborted_errors++;
dbg(DBG_ASS, "Tx timeout card %d port %d\n",
card->card_no, port->index);
fst_issue_cmd(port, ABORTTX);
netif_trans_update(dev);
netif_wake_queue(dev);
port->start = 0;
}
static netdev_tx_t
fst_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct fst_card_info *card;
struct fst_port_info *port;
unsigned long flags;
int txq_length;
port = dev_to_port(dev);
card = port->card;
dbg(DBG_TX, "fst_start_xmit: length = %d\n", skb->len);
/* Drop packet with error if we don't have carrier */
if (!netif_carrier_ok(dev)) {
dev_kfree_skb(skb);
dev->stats.tx_errors++;
dev->stats.tx_carrier_errors++;
dbg(DBG_ASS,
"Tried to transmit but no carrier on card %d port %d\n",
card->card_no, port->index);
return NETDEV_TX_OK;
}
/* Drop it if it's too big! MTU failure ? */
if (skb->len > LEN_TX_BUFFER) {
dbg(DBG_ASS, "Packet too large %d vs %d\n", skb->len,
LEN_TX_BUFFER);
dev_kfree_skb(skb);
dev->stats.tx_errors++;
return NETDEV_TX_OK;
}
/* We are always going to queue the packet
* so that the bottom half is the only place we tx from
* Check there is room in the port txq
*/
spin_lock_irqsave(&card->card_lock, flags);
txq_length = port->txqe - port->txqs;
if (txq_length < 0) {
/* This is the case where the next free has wrapped but the
* last used hasn't
*/
txq_length = txq_length + FST_TXQ_DEPTH;
}
spin_unlock_irqrestore(&card->card_lock, flags);
if (txq_length > fst_txq_high) {
/* We have got enough buffers in the pipeline. Ask the network
* layer to stop sending frames down
*/
netif_stop_queue(dev);
port->start = 1; /* I'm using this to signal stop sent up */
}
if (txq_length == FST_TXQ_DEPTH - 1) {
/* This shouldn't have happened but such is life
*/
dev_kfree_skb(skb);
dev->stats.tx_errors++;
dbg(DBG_ASS, "Tx queue overflow card %d port %d\n",
card->card_no, port->index);
return NETDEV_TX_OK;
}
/* queue the buffer
*/
spin_lock_irqsave(&card->card_lock, flags);
port->txq[port->txqe] = skb;
port->txqe++;
if (port->txqe == FST_TXQ_DEPTH)
port->txqe = 0;
spin_unlock_irqrestore(&card->card_lock, flags);
/* Scehdule the bottom half which now does transmit processing */
fst_q_work_item(&fst_work_txq, card->card_no);
tasklet_schedule(&fst_tx_task);
return NETDEV_TX_OK;
}
/* Card setup having checked hardware resources.
* Should be pretty bizarre if we get an error here (kernel memory
* exhaustion is one possibility). If we do see a problem we report it
* via a printk and leave the corresponding interface and all that follow
* disabled.
*/
static char *type_strings[] = {
"no hardware", /* Should never be seen */
"FarSync T2P",
"FarSync T4P",
"FarSync T1U",
"FarSync T2U",
"FarSync T4U",
"FarSync TE1"
};
static int
fst_init_card(struct fst_card_info *card)
{
int i;
int err;
/* We're working on a number of ports based on the card ID. If the
* firmware detects something different later (should never happen)
* we'll have to revise it in some way then.
*/
for (i = 0; i < card->nports; i++) {
err = register_hdlc_device(card->ports[i].dev);
if (err < 0) {
pr_err("Cannot register HDLC device for port %d (errno %d)\n",
i, -err);
while (i--)
unregister_hdlc_device(card->ports[i].dev);
return err;
}
}
pr_info("%s-%s: %s IRQ%d, %d ports\n",
port_to_dev(&card->ports[0])->name,
port_to_dev(&card->ports[card->nports - 1])->name,
type_strings[card->type], card->irq, card->nports);
return 0;
}
static const struct net_device_ops fst_ops = {
.ndo_open = fst_open,
.ndo_stop = fst_close,
.ndo_start_xmit = hdlc_start_xmit,
.ndo_siocwandev = fst_ioctl,
.ndo_siocdevprivate = fst_siocdevprivate,
.ndo_tx_timeout = fst_tx_timeout,
};
/* Initialise card when detected.
* Returns 0 to indicate success, or errno otherwise.
*/
static int
fst_add_one(struct pci_dev *pdev, const struct pci_device_id *ent)
{
static int no_of_cards_added;
struct fst_card_info *card;
int err = 0;
int i;
printk_once(KERN_INFO
pr_fmt("FarSync WAN driver " FST_USER_VERSION
" (c) 2001-2004 FarSite Communications Ltd.\n"));
#if FST_DEBUG
dbg(DBG_ASS, "The value of debug mask is %x\n", fst_debug_mask);
#endif
/* We are going to be clever and allow certain cards not to be
* configured. An exclude list can be provided in /etc/modules.conf
*/
if (fst_excluded_cards != 0) {
/* There are cards to exclude
*
*/
for (i = 0; i < fst_excluded_cards; i++) {
if (pdev->devfn >> 3 == fst_excluded_list[i]) {
pr_info("FarSync PCI device %d not assigned\n",
(pdev->devfn) >> 3);
return -EBUSY;
}
}
}
/* Allocate driver private data */
card = kzalloc(sizeof(struct fst_card_info), GFP_KERNEL);
if (!card)
return -ENOMEM;
/* Try to enable the device */
err = pci_enable_device(pdev);
if (err) {
pr_err("Failed to enable card. Err %d\n", -err);
goto enable_fail;
}
err = pci_request_regions(pdev, "FarSync");
if (err) {
pr_err("Failed to allocate regions. Err %d\n", -err);
goto regions_fail;
}
/* Get virtual addresses of memory regions */
card->pci_conf = pci_resource_start(pdev, 1);
card->phys_mem = pci_resource_start(pdev, 2);
card->phys_ctlmem = pci_resource_start(pdev, 3);
card->mem = ioremap(card->phys_mem, FST_MEMSIZE);
if (!card->mem) {
pr_err("Physical memory remap failed\n");
err = -ENODEV;
goto ioremap_physmem_fail;
}
card->ctlmem = ioremap(card->phys_ctlmem, 0x10);
if (!card->ctlmem) {
pr_err("Control memory remap failed\n");
err = -ENODEV;
goto ioremap_ctlmem_fail;
}
dbg(DBG_PCI, "kernel mem %p, ctlmem %p\n", card->mem, card->ctlmem);
/* Register the interrupt handler */
if (request_irq(pdev->irq, fst_intr, IRQF_SHARED, FST_DEV_NAME, card)) {
pr_err("Unable to register interrupt %d\n", card->irq);
err = -ENODEV;
goto irq_fail;
}
/* Record info we need */
card->irq = pdev->irq;
card->type = ent->driver_data;
card->family = ((ent->driver_data == FST_TYPE_T2P) ||
(ent->driver_data == FST_TYPE_T4P))
? FST_FAMILY_TXP : FST_FAMILY_TXU;
if (ent->driver_data == FST_TYPE_T1U ||
ent->driver_data == FST_TYPE_TE1)
card->nports = 1;
else
card->nports = ((ent->driver_data == FST_TYPE_T2P) ||
(ent->driver_data == FST_TYPE_T2U)) ? 2 : 4;
card->state = FST_UNINIT;
spin_lock_init(&card->card_lock);
for (i = 0; i < card->nports; i++) {
struct net_device *dev = alloc_hdlcdev(&card->ports[i]);
hdlc_device *hdlc;
if (!dev) {
while (i--)
free_netdev(card->ports[i].dev);
pr_err("FarSync: out of memory\n");
err = -ENOMEM;
goto hdlcdev_fail;
}
card->ports[i].dev = dev;
card->ports[i].card = card;
card->ports[i].index = i;
card->ports[i].run = 0;
hdlc = dev_to_hdlc(dev);
/* Fill in the net device info */
/* Since this is a PCI setup this is purely
* informational. Give them the buffer addresses
* and basic card I/O.
*/
dev->mem_start = card->phys_mem
+ BUF_OFFSET(txBuffer[i][0][0]);
dev->mem_end = card->phys_mem
+ BUF_OFFSET(txBuffer[i][NUM_TX_BUFFER - 1][LEN_RX_BUFFER - 1]);
dev->base_addr = card->pci_conf;
dev->irq = card->irq;
dev->netdev_ops = &fst_ops;
dev->tx_queue_len = FST_TX_QUEUE_LEN;
dev->watchdog_timeo = FST_TX_TIMEOUT;
hdlc->attach = fst_attach;
hdlc->xmit = fst_start_xmit;
}
card->device = pdev;
dbg(DBG_PCI, "type %d nports %d irq %d\n", card->type,
card->nports, card->irq);
dbg(DBG_PCI, "conf %04x mem %08x ctlmem %08x\n",
card->pci_conf, card->phys_mem, card->phys_ctlmem);
/* Reset the card's processor */
fst_cpureset(card);
card->state = FST_RESET;
/* Initialise DMA (if required) */
fst_init_dma(card);
/* Record driver data for later use */
pci_set_drvdata(pdev, card);
/* Remainder of card setup */
if (no_of_cards_added >= FST_MAX_CARDS) {
pr_err("FarSync: too many cards\n");
err = -ENOMEM;
goto card_array_fail;
}
fst_card_array[no_of_cards_added] = card;
card->card_no = no_of_cards_added++; /* Record instance and bump it */
err = fst_init_card(card);
if (err)
goto init_card_fail;
if (card->family == FST_FAMILY_TXU) {
/* Allocate a dma buffer for transmit and receives
*/
card->rx_dma_handle_host =
dma_alloc_coherent(&card->device->dev, FST_MAX_MTU,
&card->rx_dma_handle_card, GFP_KERNEL);
if (!card->rx_dma_handle_host) {
pr_err("Could not allocate rx dma buffer\n");
err = -ENOMEM;
goto rx_dma_fail;
}
card->tx_dma_handle_host =
dma_alloc_coherent(&card->device->dev, FST_MAX_MTU,
&card->tx_dma_handle_card, GFP_KERNEL);
if (!card->tx_dma_handle_host) {
pr_err("Could not allocate tx dma buffer\n");
err = -ENOMEM;
goto tx_dma_fail;
}
}
return 0; /* Success */
tx_dma_fail:
dma_free_coherent(&card->device->dev, FST_MAX_MTU,
card->rx_dma_handle_host, card->rx_dma_handle_card);
rx_dma_fail:
fst_disable_intr(card);
for (i = 0 ; i < card->nports ; i++)
unregister_hdlc_device(card->ports[i].dev);
init_card_fail:
fst_card_array[card->card_no] = NULL;
card_array_fail:
for (i = 0 ; i < card->nports ; i++)
free_netdev(card->ports[i].dev);
hdlcdev_fail:
free_irq(card->irq, card);
irq_fail:
iounmap(card->ctlmem);
ioremap_ctlmem_fail:
iounmap(card->mem);
ioremap_physmem_fail:
pci_release_regions(pdev);
regions_fail:
pci_disable_device(pdev);
enable_fail:
kfree(card);
return err;
}
/* Cleanup and close down a card
*/
static void
fst_remove_one(struct pci_dev *pdev)
{
struct fst_card_info *card;
int i;
card = pci_get_drvdata(pdev);
for (i = 0; i < card->nports; i++) {
struct net_device *dev = port_to_dev(&card->ports[i]);
unregister_hdlc_device(dev);
free_netdev(dev);
}
fst_disable_intr(card);
free_irq(card->irq, card);
iounmap(card->ctlmem);
iounmap(card->mem);
pci_release_regions(pdev);
if (card->family == FST_FAMILY_TXU) {
/* Free dma buffers
*/
dma_free_coherent(&card->device->dev, FST_MAX_MTU,
card->rx_dma_handle_host,
card->rx_dma_handle_card);
dma_free_coherent(&card->device->dev, FST_MAX_MTU,
card->tx_dma_handle_host,
card->tx_dma_handle_card);
}
fst_card_array[card->card_no] = NULL;
kfree(card);
}
static struct pci_driver fst_driver = {
.name = FST_NAME,
.id_table = fst_pci_dev_id,
.probe = fst_add_one,
.remove = fst_remove_one,
};
static int __init
fst_init(void)
{
int i;
for (i = 0; i < FST_MAX_CARDS; i++)
fst_card_array[i] = NULL;
return pci_register_driver(&fst_driver);
}
static void __exit
fst_cleanup_module(void)
{
pr_info("FarSync WAN driver unloading\n");
pci_unregister_driver(&fst_driver);
}
module_init(fst_init);
module_exit(fst_cleanup_module);
|
linux-master
|
drivers/net/wan/farsync.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Generic HDLC support routines for Linux
* Point-to-point protocol support
*
* Copyright (C) 1999 - 2008 Krzysztof Halasa <[email protected]>
*/
#include <linux/errno.h>
#include <linux/hdlc.h>
#include <linux/if_arp.h>
#include <linux/inetdevice.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pkt_sched.h>
#include <linux/poll.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#define DEBUG_CP 0 /* also bytes# to dump */
#define DEBUG_STATE 0
#define DEBUG_HARD_HEADER 0
#define HDLC_ADDR_ALLSTATIONS 0xFF
#define HDLC_CTRL_UI 0x03
#define PID_LCP 0xC021
#define PID_IP 0x0021
#define PID_IPCP 0x8021
#define PID_IPV6 0x0057
#define PID_IPV6CP 0x8057
enum {IDX_LCP = 0, IDX_IPCP, IDX_IPV6CP, IDX_COUNT};
enum {CP_CONF_REQ = 1, CP_CONF_ACK, CP_CONF_NAK, CP_CONF_REJ, CP_TERM_REQ,
CP_TERM_ACK, CP_CODE_REJ, LCP_PROTO_REJ, LCP_ECHO_REQ, LCP_ECHO_REPLY,
LCP_DISC_REQ, CP_CODES};
#if DEBUG_CP
static const char *const code_names[CP_CODES] = {
"0", "ConfReq", "ConfAck", "ConfNak", "ConfRej", "TermReq",
"TermAck", "CodeRej", "ProtoRej", "EchoReq", "EchoReply", "Discard"
};
static char debug_buffer[64 + 3 * DEBUG_CP];
#endif
enum {LCP_OPTION_MRU = 1, LCP_OPTION_ACCM, LCP_OPTION_MAGIC = 5};
struct hdlc_header {
u8 address;
u8 control;
__be16 protocol;
};
struct cp_header {
u8 code;
u8 id;
__be16 len;
};
struct proto {
struct net_device *dev;
struct timer_list timer;
unsigned long timeout;
u16 pid; /* protocol ID */
u8 state;
u8 cr_id; /* ID of last Configuration-Request */
u8 restart_counter;
};
struct ppp {
struct proto protos[IDX_COUNT];
spinlock_t lock;
unsigned long last_pong;
unsigned int req_timeout, cr_retries, term_retries;
unsigned int keepalive_interval, keepalive_timeout;
u8 seq; /* local sequence number for requests */
u8 echo_id; /* ID of last Echo-Request (LCP) */
};
enum {CLOSED = 0, STOPPED, STOPPING, REQ_SENT, ACK_RECV, ACK_SENT, OPENED,
STATES, STATE_MASK = 0xF};
enum {START = 0, STOP, TO_GOOD, TO_BAD, RCR_GOOD, RCR_BAD, RCA, RCN, RTR, RTA,
RUC, RXJ_GOOD, RXJ_BAD, EVENTS};
enum {INV = 0x10, IRC = 0x20, ZRC = 0x40, SCR = 0x80, SCA = 0x100,
SCN = 0x200, STR = 0x400, STA = 0x800, SCJ = 0x1000};
#if DEBUG_STATE
static const char *const state_names[STATES] = {
"Closed", "Stopped", "Stopping", "ReqSent", "AckRecv", "AckSent",
"Opened"
};
static const char *const event_names[EVENTS] = {
"Start", "Stop", "TO+", "TO-", "RCR+", "RCR-", "RCA", "RCN",
"RTR", "RTA", "RUC", "RXJ+", "RXJ-"
};
#endif
static struct sk_buff_head tx_queue; /* used when holding the spin lock */
static int ppp_ioctl(struct net_device *dev, struct if_settings *ifs);
static inline struct ppp *get_ppp(struct net_device *dev)
{
return (struct ppp *)dev_to_hdlc(dev)->state;
}
static inline struct proto *get_proto(struct net_device *dev, u16 pid)
{
struct ppp *ppp = get_ppp(dev);
switch (pid) {
case PID_LCP:
return &ppp->protos[IDX_LCP];
case PID_IPCP:
return &ppp->protos[IDX_IPCP];
case PID_IPV6CP:
return &ppp->protos[IDX_IPV6CP];
default:
return NULL;
}
}
static inline const char *proto_name(u16 pid)
{
switch (pid) {
case PID_LCP:
return "LCP";
case PID_IPCP:
return "IPCP";
case PID_IPV6CP:
return "IPV6CP";
default:
return NULL;
}
}
static __be16 ppp_type_trans(struct sk_buff *skb, struct net_device *dev)
{
struct hdlc_header *data = (struct hdlc_header *)skb->data;
if (skb->len < sizeof(struct hdlc_header))
return htons(ETH_P_HDLC);
if (data->address != HDLC_ADDR_ALLSTATIONS ||
data->control != HDLC_CTRL_UI)
return htons(ETH_P_HDLC);
switch (data->protocol) {
case cpu_to_be16(PID_IP):
skb_pull(skb, sizeof(struct hdlc_header));
return htons(ETH_P_IP);
case cpu_to_be16(PID_IPV6):
skb_pull(skb, sizeof(struct hdlc_header));
return htons(ETH_P_IPV6);
default:
return htons(ETH_P_HDLC);
}
}
static int ppp_hard_header(struct sk_buff *skb, struct net_device *dev,
u16 type, const void *daddr, const void *saddr,
unsigned int len)
{
struct hdlc_header *data;
#if DEBUG_HARD_HEADER
printk(KERN_DEBUG "%s: ppp_hard_header() called\n", dev->name);
#endif
skb_push(skb, sizeof(struct hdlc_header));
data = (struct hdlc_header *)skb->data;
data->address = HDLC_ADDR_ALLSTATIONS;
data->control = HDLC_CTRL_UI;
switch (type) {
case ETH_P_IP:
data->protocol = htons(PID_IP);
break;
case ETH_P_IPV6:
data->protocol = htons(PID_IPV6);
break;
case PID_LCP:
case PID_IPCP:
case PID_IPV6CP:
data->protocol = htons(type);
break;
default: /* unknown protocol */
data->protocol = 0;
}
return sizeof(struct hdlc_header);
}
static void ppp_tx_flush(void)
{
struct sk_buff *skb;
while ((skb = skb_dequeue(&tx_queue)) != NULL)
dev_queue_xmit(skb);
}
static void ppp_tx_cp(struct net_device *dev, u16 pid, u8 code,
u8 id, unsigned int len, const void *data)
{
struct sk_buff *skb;
struct cp_header *cp;
unsigned int magic_len = 0;
static u32 magic;
#if DEBUG_CP
int i;
char *ptr;
#endif
if (pid == PID_LCP && (code == LCP_ECHO_REQ || code == LCP_ECHO_REPLY))
magic_len = sizeof(magic);
skb = dev_alloc_skb(sizeof(struct hdlc_header) +
sizeof(struct cp_header) + magic_len + len);
if (!skb)
return;
skb_reserve(skb, sizeof(struct hdlc_header));
cp = skb_put(skb, sizeof(struct cp_header));
cp->code = code;
cp->id = id;
cp->len = htons(sizeof(struct cp_header) + magic_len + len);
if (magic_len)
skb_put_data(skb, &magic, magic_len);
if (len)
skb_put_data(skb, data, len);
#if DEBUG_CP
BUG_ON(code >= CP_CODES);
ptr = debug_buffer;
*ptr = '\x0';
for (i = 0; i < min_t(unsigned int, magic_len + len, DEBUG_CP); i++) {
sprintf(ptr, " %02X", skb->data[sizeof(struct cp_header) + i]);
ptr += strlen(ptr);
}
printk(KERN_DEBUG "%s: TX %s [%s id 0x%X]%s\n", dev->name,
proto_name(pid), code_names[code], id, debug_buffer);
#endif
ppp_hard_header(skb, dev, pid, NULL, NULL, 0);
skb->priority = TC_PRIO_CONTROL;
skb->dev = dev;
skb->protocol = htons(ETH_P_HDLC);
skb_reset_network_header(skb);
skb_queue_tail(&tx_queue, skb);
}
/* State transition table (compare STD-51)
Events Actions
TO+ = Timeout with counter > 0 irc = Initialize-Restart-Count
TO- = Timeout with counter expired zrc = Zero-Restart-Count
RCR+ = Receive-Configure-Request (Good) scr = Send-Configure-Request
RCR- = Receive-Configure-Request (Bad)
RCA = Receive-Configure-Ack sca = Send-Configure-Ack
RCN = Receive-Configure-Nak/Rej scn = Send-Configure-Nak/Rej
RTR = Receive-Terminate-Request str = Send-Terminate-Request
RTA = Receive-Terminate-Ack sta = Send-Terminate-Ack
RUC = Receive-Unknown-Code scj = Send-Code-Reject
RXJ+ = Receive-Code-Reject (permitted)
or Receive-Protocol-Reject
RXJ- = Receive-Code-Reject (catastrophic)
or Receive-Protocol-Reject
*/
static int cp_table[EVENTS][STATES] = {
/* CLOSED STOPPED STOPPING REQ_SENT ACK_RECV ACK_SENT OPENED
0 1 2 3 4 5 6 */
{IRC|SCR|3, INV , INV , INV , INV , INV , INV }, /* START */
{ INV , 0 , 0 , 0 , 0 , 0 , 0 }, /* STOP */
{ INV , INV ,STR|2, SCR|3 ,SCR|3, SCR|5 , INV }, /* TO+ */
{ INV , INV , 1 , 1 , 1 , 1 , INV }, /* TO- */
{ STA|0 ,IRC|SCR|SCA|5, 2 , SCA|5 ,SCA|6, SCA|5 ,SCR|SCA|5}, /* RCR+ */
{ STA|0 ,IRC|SCR|SCN|3, 2 , SCN|3 ,SCN|4, SCN|3 ,SCR|SCN|3}, /* RCR- */
{ STA|0 , STA|1 , 2 , IRC|4 ,SCR|3, 6 , SCR|3 }, /* RCA */
{ STA|0 , STA|1 , 2 ,IRC|SCR|3,SCR|3,IRC|SCR|5, SCR|3 }, /* RCN */
{ STA|0 , STA|1 ,STA|2, STA|3 ,STA|3, STA|3 ,ZRC|STA|2}, /* RTR */
{ 0 , 1 , 1 , 3 , 3 , 5 , SCR|3 }, /* RTA */
{ SCJ|0 , SCJ|1 ,SCJ|2, SCJ|3 ,SCJ|4, SCJ|5 , SCJ|6 }, /* RUC */
{ 0 , 1 , 2 , 3 , 3 , 5 , 6 }, /* RXJ+ */
{ 0 , 1 , 1 , 1 , 1 , 1 ,IRC|STR|2}, /* RXJ- */
};
/* SCA: RCR+ must supply id, len and data
SCN: RCR- must supply code, id, len and data
STA: RTR must supply id
SCJ: RUC must supply CP packet len and data */
static void ppp_cp_event(struct net_device *dev, u16 pid, u16 event, u8 code,
u8 id, unsigned int len, const void *data)
{
int old_state, action;
struct ppp *ppp = get_ppp(dev);
struct proto *proto = get_proto(dev, pid);
old_state = proto->state;
BUG_ON(old_state >= STATES);
BUG_ON(event >= EVENTS);
#if DEBUG_STATE
printk(KERN_DEBUG "%s: %s ppp_cp_event(%s) %s ...\n", dev->name,
proto_name(pid), event_names[event], state_names[proto->state]);
#endif
action = cp_table[event][old_state];
proto->state = action & STATE_MASK;
if (action & (SCR | STR)) /* set Configure-Req/Terminate-Req timer */
mod_timer(&proto->timer, proto->timeout =
jiffies + ppp->req_timeout * HZ);
if (action & ZRC)
proto->restart_counter = 0;
if (action & IRC)
proto->restart_counter = (proto->state == STOPPING) ?
ppp->term_retries : ppp->cr_retries;
if (action & SCR) /* send Configure-Request */
ppp_tx_cp(dev, pid, CP_CONF_REQ, proto->cr_id = ++ppp->seq,
0, NULL);
if (action & SCA) /* send Configure-Ack */
ppp_tx_cp(dev, pid, CP_CONF_ACK, id, len, data);
if (action & SCN) /* send Configure-Nak/Reject */
ppp_tx_cp(dev, pid, code, id, len, data);
if (action & STR) /* send Terminate-Request */
ppp_tx_cp(dev, pid, CP_TERM_REQ, ++ppp->seq, 0, NULL);
if (action & STA) /* send Terminate-Ack */
ppp_tx_cp(dev, pid, CP_TERM_ACK, id, 0, NULL);
if (action & SCJ) /* send Code-Reject */
ppp_tx_cp(dev, pid, CP_CODE_REJ, ++ppp->seq, len, data);
if (old_state != OPENED && proto->state == OPENED) {
netdev_info(dev, "%s up\n", proto_name(pid));
if (pid == PID_LCP) {
netif_dormant_off(dev);
ppp_cp_event(dev, PID_IPCP, START, 0, 0, 0, NULL);
ppp_cp_event(dev, PID_IPV6CP, START, 0, 0, 0, NULL);
ppp->last_pong = jiffies;
mod_timer(&proto->timer, proto->timeout =
jiffies + ppp->keepalive_interval * HZ);
}
}
if (old_state == OPENED && proto->state != OPENED) {
netdev_info(dev, "%s down\n", proto_name(pid));
if (pid == PID_LCP) {
netif_dormant_on(dev);
ppp_cp_event(dev, PID_IPCP, STOP, 0, 0, 0, NULL);
ppp_cp_event(dev, PID_IPV6CP, STOP, 0, 0, 0, NULL);
}
}
if (old_state != CLOSED && proto->state == CLOSED)
del_timer(&proto->timer);
#if DEBUG_STATE
printk(KERN_DEBUG "%s: %s ppp_cp_event(%s) ... %s\n", dev->name,
proto_name(pid), event_names[event], state_names[proto->state]);
#endif
}
static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id,
unsigned int req_len, const u8 *data)
{
static u8 const valid_accm[6] = { LCP_OPTION_ACCM, 6, 0, 0, 0, 0 };
const u8 *opt;
u8 *out;
unsigned int len = req_len, nak_len = 0, rej_len = 0;
out = kmalloc(len, GFP_ATOMIC);
if (!out) {
dev->stats.rx_dropped++;
return; /* out of memory, ignore CR packet */
}
for (opt = data; len; len -= opt[1], opt += opt[1]) {
if (len < 2 || opt[1] < 2 || len < opt[1])
goto err_out;
if (pid == PID_LCP)
switch (opt[0]) {
case LCP_OPTION_MRU:
continue; /* MRU always OK and > 1500 bytes? */
case LCP_OPTION_ACCM: /* async control character map */
if (opt[1] < sizeof(valid_accm))
goto err_out;
if (!memcmp(opt, valid_accm,
sizeof(valid_accm)))
continue;
if (!rej_len) { /* NAK it */
memcpy(out + nak_len, valid_accm,
sizeof(valid_accm));
nak_len += sizeof(valid_accm);
continue;
}
break;
case LCP_OPTION_MAGIC:
if (len < 6)
goto err_out;
if (opt[1] != 6 || (!opt[2] && !opt[3] &&
!opt[4] && !opt[5]))
break; /* reject invalid magic number */
continue;
}
/* reject this option */
memcpy(out + rej_len, opt, opt[1]);
rej_len += opt[1];
}
if (rej_len)
ppp_cp_event(dev, pid, RCR_BAD, CP_CONF_REJ, id, rej_len, out);
else if (nak_len)
ppp_cp_event(dev, pid, RCR_BAD, CP_CONF_NAK, id, nak_len, out);
else
ppp_cp_event(dev, pid, RCR_GOOD, CP_CONF_ACK, id, req_len, data);
kfree(out);
return;
err_out:
dev->stats.rx_errors++;
kfree(out);
}
static int ppp_rx(struct sk_buff *skb)
{
struct hdlc_header *hdr = (struct hdlc_header *)skb->data;
struct net_device *dev = skb->dev;
struct ppp *ppp = get_ppp(dev);
struct proto *proto;
struct cp_header *cp;
unsigned long flags;
unsigned int len;
u16 pid;
#if DEBUG_CP
int i;
char *ptr;
#endif
spin_lock_irqsave(&ppp->lock, flags);
/* Check HDLC header */
if (skb->len < sizeof(struct hdlc_header))
goto rx_error;
cp = skb_pull(skb, sizeof(struct hdlc_header));
if (hdr->address != HDLC_ADDR_ALLSTATIONS ||
hdr->control != HDLC_CTRL_UI)
goto rx_error;
pid = ntohs(hdr->protocol);
proto = get_proto(dev, pid);
if (!proto) {
if (ppp->protos[IDX_LCP].state == OPENED)
ppp_tx_cp(dev, PID_LCP, LCP_PROTO_REJ,
++ppp->seq, skb->len + 2, &hdr->protocol);
goto rx_error;
}
len = ntohs(cp->len);
if (len < sizeof(struct cp_header) /* no complete CP header? */ ||
skb->len < len /* truncated packet? */)
goto rx_error;
skb_pull(skb, sizeof(struct cp_header));
len -= sizeof(struct cp_header);
/* HDLC and CP headers stripped from skb */
#if DEBUG_CP
if (cp->code < CP_CODES)
sprintf(debug_buffer, "[%s id 0x%X]", code_names[cp->code],
cp->id);
else
sprintf(debug_buffer, "[code %u id 0x%X]", cp->code, cp->id);
ptr = debug_buffer + strlen(debug_buffer);
for (i = 0; i < min_t(unsigned int, len, DEBUG_CP); i++) {
sprintf(ptr, " %02X", skb->data[i]);
ptr += strlen(ptr);
}
printk(KERN_DEBUG "%s: RX %s %s\n", dev->name, proto_name(pid),
debug_buffer);
#endif
/* LCP only */
if (pid == PID_LCP)
switch (cp->code) {
case LCP_PROTO_REJ:
pid = ntohs(*(__be16 *)skb->data);
if (pid == PID_LCP || pid == PID_IPCP ||
pid == PID_IPV6CP)
ppp_cp_event(dev, pid, RXJ_BAD, 0, 0,
0, NULL);
goto out;
case LCP_ECHO_REQ: /* send Echo-Reply */
if (len >= 4 && proto->state == OPENED)
ppp_tx_cp(dev, PID_LCP, LCP_ECHO_REPLY,
cp->id, len - 4, skb->data + 4);
goto out;
case LCP_ECHO_REPLY:
if (cp->id == ppp->echo_id)
ppp->last_pong = jiffies;
goto out;
case LCP_DISC_REQ: /* discard */
goto out;
}
/* LCP, IPCP and IPV6CP */
switch (cp->code) {
case CP_CONF_REQ:
ppp_cp_parse_cr(dev, pid, cp->id, len, skb->data);
break;
case CP_CONF_ACK:
if (cp->id == proto->cr_id)
ppp_cp_event(dev, pid, RCA, 0, 0, 0, NULL);
break;
case CP_CONF_REJ:
case CP_CONF_NAK:
if (cp->id == proto->cr_id)
ppp_cp_event(dev, pid, RCN, 0, 0, 0, NULL);
break;
case CP_TERM_REQ:
ppp_cp_event(dev, pid, RTR, 0, cp->id, 0, NULL);
break;
case CP_TERM_ACK:
ppp_cp_event(dev, pid, RTA, 0, 0, 0, NULL);
break;
case CP_CODE_REJ:
ppp_cp_event(dev, pid, RXJ_BAD, 0, 0, 0, NULL);
break;
default:
len += sizeof(struct cp_header);
if (len > dev->mtu)
len = dev->mtu;
ppp_cp_event(dev, pid, RUC, 0, 0, len, cp);
break;
}
goto out;
rx_error:
dev->stats.rx_errors++;
out:
spin_unlock_irqrestore(&ppp->lock, flags);
dev_kfree_skb_any(skb);
ppp_tx_flush();
return NET_RX_DROP;
}
static void ppp_timer(struct timer_list *t)
{
struct proto *proto = from_timer(proto, t, timer);
struct ppp *ppp = get_ppp(proto->dev);
unsigned long flags;
spin_lock_irqsave(&ppp->lock, flags);
/* mod_timer could be called after we entered this function but
* before we got the lock.
*/
if (timer_pending(&proto->timer)) {
spin_unlock_irqrestore(&ppp->lock, flags);
return;
}
switch (proto->state) {
case STOPPING:
case REQ_SENT:
case ACK_RECV:
case ACK_SENT:
if (proto->restart_counter) {
ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0,
0, NULL);
proto->restart_counter--;
} else if (netif_carrier_ok(proto->dev))
ppp_cp_event(proto->dev, proto->pid, TO_GOOD, 0, 0,
0, NULL);
else
ppp_cp_event(proto->dev, proto->pid, TO_BAD, 0, 0,
0, NULL);
break;
case OPENED:
if (proto->pid != PID_LCP)
break;
if (time_after(jiffies, ppp->last_pong +
ppp->keepalive_timeout * HZ)) {
netdev_info(proto->dev, "Link down\n");
ppp_cp_event(proto->dev, PID_LCP, STOP, 0, 0, 0, NULL);
ppp_cp_event(proto->dev, PID_LCP, START, 0, 0, 0, NULL);
} else { /* send keep-alive packet */
ppp->echo_id = ++ppp->seq;
ppp_tx_cp(proto->dev, PID_LCP, LCP_ECHO_REQ,
ppp->echo_id, 0, NULL);
proto->timer.expires = jiffies +
ppp->keepalive_interval * HZ;
add_timer(&proto->timer);
}
break;
}
spin_unlock_irqrestore(&ppp->lock, flags);
ppp_tx_flush();
}
static void ppp_start(struct net_device *dev)
{
struct ppp *ppp = get_ppp(dev);
int i;
for (i = 0; i < IDX_COUNT; i++) {
struct proto *proto = &ppp->protos[i];
proto->dev = dev;
timer_setup(&proto->timer, ppp_timer, 0);
proto->state = CLOSED;
}
ppp->protos[IDX_LCP].pid = PID_LCP;
ppp->protos[IDX_IPCP].pid = PID_IPCP;
ppp->protos[IDX_IPV6CP].pid = PID_IPV6CP;
ppp_cp_event(dev, PID_LCP, START, 0, 0, 0, NULL);
}
static void ppp_stop(struct net_device *dev)
{
ppp_cp_event(dev, PID_LCP, STOP, 0, 0, 0, NULL);
}
static void ppp_close(struct net_device *dev)
{
ppp_tx_flush();
}
static struct hdlc_proto proto = {
.start = ppp_start,
.stop = ppp_stop,
.close = ppp_close,
.type_trans = ppp_type_trans,
.ioctl = ppp_ioctl,
.netif_rx = ppp_rx,
.module = THIS_MODULE,
};
static const struct header_ops ppp_header_ops = {
.create = ppp_hard_header,
};
static int ppp_ioctl(struct net_device *dev, struct if_settings *ifs)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
struct ppp *ppp;
int result;
switch (ifs->type) {
case IF_GET_PROTO:
if (dev_to_hdlc(dev)->proto != &proto)
return -EINVAL;
ifs->type = IF_PROTO_PPP;
return 0; /* return protocol only, no settable parameters */
case IF_PROTO_PPP:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (dev->flags & IFF_UP)
return -EBUSY;
/* no settable parameters */
result = hdlc->attach(dev, ENCODING_NRZ,
PARITY_CRC16_PR1_CCITT);
if (result)
return result;
result = attach_hdlc_protocol(dev, &proto, sizeof(struct ppp));
if (result)
return result;
ppp = get_ppp(dev);
spin_lock_init(&ppp->lock);
ppp->req_timeout = 2;
ppp->cr_retries = 10;
ppp->term_retries = 2;
ppp->keepalive_interval = 10;
ppp->keepalive_timeout = 60;
dev->hard_header_len = sizeof(struct hdlc_header);
dev->header_ops = &ppp_header_ops;
dev->type = ARPHRD_PPP;
call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
netif_dormant_on(dev);
return 0;
}
return -EINVAL;
}
static int __init hdlc_ppp_init(void)
{
skb_queue_head_init(&tx_queue);
register_hdlc_protocol(&proto);
return 0;
}
static void __exit hdlc_ppp_exit(void)
{
unregister_hdlc_protocol(&proto);
}
module_init(hdlc_ppp_init);
module_exit(hdlc_ppp_exit);
MODULE_AUTHOR("Krzysztof Halasa <[email protected]>");
MODULE_DESCRIPTION("PPP protocol support for generic HDLC");
MODULE_LICENSE("GPL v2");
|
linux-master
|
drivers/net/wan/hdlc_ppp.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Hitachi SCA HD64570 driver for Linux
*
* Copyright (C) 1998-2003 Krzysztof Halasa <[email protected]>
*
* Source of information: Hitachi HD64570 SCA User's Manual
*
* We use the following SCA memory map:
*
* Packet buffer descriptor rings - starting from winbase or win0base:
* rx_ring_buffers * sizeof(pkt_desc) = logical channel #0 RX ring
* tx_ring_buffers * sizeof(pkt_desc) = logical channel #0 TX ring
* rx_ring_buffers * sizeof(pkt_desc) = logical channel #1 RX ring (if used)
* tx_ring_buffers * sizeof(pkt_desc) = logical channel #1 TX ring (if used)
*
* Packet data buffers - starting from winbase + buff_offset:
* rx_ring_buffers * HDLC_MAX_MRU = logical channel #0 RX buffers
* tx_ring_buffers * HDLC_MAX_MRU = logical channel #0 TX buffers
* rx_ring_buffers * HDLC_MAX_MRU = logical channel #0 RX buffers (if used)
* tx_ring_buffers * HDLC_MAX_MRU = logical channel #0 TX buffers (if used)
*/
#include <linux/bitops.h>
#include <linux/errno.h>
#include <linux/fcntl.h>
#include <linux/hdlc.h>
#include <linux/in.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/netdevice.h>
#include <linux/skbuff.h>
#include <linux/string.h>
#include <linux/types.h>
#include <asm/io.h>
#include <linux/uaccess.h>
#include "hd64570.h"
#define get_msci(port) (phy_node(port) ? MSCI1_OFFSET : MSCI0_OFFSET)
#define get_dmac_rx(port) (phy_node(port) ? DMAC1RX_OFFSET : DMAC0RX_OFFSET)
#define get_dmac_tx(port) (phy_node(port) ? DMAC1TX_OFFSET : DMAC0TX_OFFSET)
#define SCA_INTR_MSCI(node) (node ? 0x10 : 0x01)
#define SCA_INTR_DMAC_RX(node) (node ? 0x20 : 0x02)
#define SCA_INTR_DMAC_TX(node) (node ? 0x40 : 0x04)
static inline struct net_device *port_to_dev(port_t *port)
{
return port->dev;
}
static inline int sca_intr_status(card_t *card)
{
u8 result = 0;
u8 isr0 = sca_in(ISR0, card);
u8 isr1 = sca_in(ISR1, card);
if (isr1 & 0x03)
result |= SCA_INTR_DMAC_RX(0);
if (isr1 & 0x0C)
result |= SCA_INTR_DMAC_TX(0);
if (isr1 & 0x30)
result |= SCA_INTR_DMAC_RX(1);
if (isr1 & 0xC0)
result |= SCA_INTR_DMAC_TX(1);
if (isr0 & 0x0F)
result |= SCA_INTR_MSCI(0);
if (isr0 & 0xF0)
result |= SCA_INTR_MSCI(1);
if (!(result & SCA_INTR_DMAC_TX(0)))
if (sca_in(DSR_TX(0), card) & DSR_EOM)
result |= SCA_INTR_DMAC_TX(0);
if (!(result & SCA_INTR_DMAC_TX(1)))
if (sca_in(DSR_TX(1), card) & DSR_EOM)
result |= SCA_INTR_DMAC_TX(1);
return result;
}
static inline port_t *dev_to_port(struct net_device *dev)
{
return dev_to_hdlc(dev)->priv;
}
static inline u16 next_desc(port_t *port, u16 desc, int transmit)
{
return (desc + 1) % (transmit ? port_to_card(port)->tx_ring_buffers
: port_to_card(port)->rx_ring_buffers);
}
static inline u16 desc_abs_number(port_t *port, u16 desc, int transmit)
{
u16 rx_buffs = port_to_card(port)->rx_ring_buffers;
u16 tx_buffs = port_to_card(port)->tx_ring_buffers;
desc %= (transmit ? tx_buffs : rx_buffs); // called with "X + 1" etc.
return log_node(port) * (rx_buffs + tx_buffs) +
transmit * rx_buffs + desc;
}
static inline u16 desc_offset(port_t *port, u16 desc, int transmit)
{
/* Descriptor offset always fits in 16 bits */
return desc_abs_number(port, desc, transmit) * sizeof(pkt_desc);
}
static inline pkt_desc __iomem *desc_address(port_t *port, u16 desc,
int transmit)
{
#ifdef PAGE0_ALWAYS_MAPPED
return (pkt_desc __iomem *)(win0base(port_to_card(port))
+ desc_offset(port, desc, transmit));
#else
return (pkt_desc __iomem *)(winbase(port_to_card(port))
+ desc_offset(port, desc, transmit));
#endif
}
static inline u32 buffer_offset(port_t *port, u16 desc, int transmit)
{
return port_to_card(port)->buff_offset +
desc_abs_number(port, desc, transmit) * (u32)HDLC_MAX_MRU;
}
static inline void sca_set_carrier(port_t *port)
{
if (!(sca_in(get_msci(port) + ST3, port_to_card(port)) & ST3_DCD)) {
#ifdef DEBUG_LINK
printk(KERN_DEBUG "%s: sca_set_carrier on\n",
port_to_dev(port)->name);
#endif
netif_carrier_on(port_to_dev(port));
} else {
#ifdef DEBUG_LINK
printk(KERN_DEBUG "%s: sca_set_carrier off\n",
port_to_dev(port)->name);
#endif
netif_carrier_off(port_to_dev(port));
}
}
static void sca_init_port(port_t *port)
{
card_t *card = port_to_card(port);
int transmit, i;
port->rxin = 0;
port->txin = 0;
port->txlast = 0;
#ifndef PAGE0_ALWAYS_MAPPED
openwin(card, 0);
#endif
for (transmit = 0; transmit < 2; transmit++) {
u16 dmac = transmit ? get_dmac_tx(port) : get_dmac_rx(port);
u16 buffs = transmit ? card->tx_ring_buffers
: card->rx_ring_buffers;
for (i = 0; i < buffs; i++) {
pkt_desc __iomem *desc = desc_address(port, i, transmit);
u16 chain_off = desc_offset(port, i + 1, transmit);
u32 buff_off = buffer_offset(port, i, transmit);
writew(chain_off, &desc->cp);
writel(buff_off, &desc->bp);
writew(0, &desc->len);
writeb(0, &desc->stat);
}
/* DMA disable - to halt state */
sca_out(0, transmit ? DSR_TX(phy_node(port)) :
DSR_RX(phy_node(port)), card);
/* software ABORT - to initial state */
sca_out(DCR_ABORT, transmit ? DCR_TX(phy_node(port)) :
DCR_RX(phy_node(port)), card);
/* current desc addr */
sca_out(0, dmac + CPB, card); /* pointer base */
sca_outw(desc_offset(port, 0, transmit), dmac + CDAL, card);
if (!transmit)
sca_outw(desc_offset(port, buffs - 1, transmit),
dmac + EDAL, card);
else
sca_outw(desc_offset(port, 0, transmit), dmac + EDAL,
card);
/* clear frame end interrupt counter */
sca_out(DCR_CLEAR_EOF, transmit ? DCR_TX(phy_node(port)) :
DCR_RX(phy_node(port)), card);
if (!transmit) { /* Receive */
/* set buffer length */
sca_outw(HDLC_MAX_MRU, dmac + BFLL, card);
/* Chain mode, Multi-frame */
sca_out(0x14, DMR_RX(phy_node(port)), card);
sca_out(DIR_EOME | DIR_BOFE, DIR_RX(phy_node(port)),
card);
/* DMA enable */
sca_out(DSR_DE, DSR_RX(phy_node(port)), card);
} else { /* Transmit */
/* Chain mode, Multi-frame */
sca_out(0x14, DMR_TX(phy_node(port)), card);
/* enable underflow interrupts */
sca_out(DIR_BOFE, DIR_TX(phy_node(port)), card);
}
}
sca_set_carrier(port);
}
#ifdef NEED_SCA_MSCI_INTR
/* MSCI interrupt service */
static inline void sca_msci_intr(port_t *port)
{
u16 msci = get_msci(port);
card_t *card = port_to_card(port);
u8 stat = sca_in(msci + ST1, card); /* read MSCI ST1 status */
/* Reset MSCI TX underrun and CDCD status bit */
sca_out(stat & (ST1_UDRN | ST1_CDCD), msci + ST1, card);
if (stat & ST1_UDRN) {
/* TX Underrun error detected */
port_to_dev(port)->stats.tx_errors++;
port_to_dev(port)->stats.tx_fifo_errors++;
}
if (stat & ST1_CDCD)
sca_set_carrier(port);
}
#endif
static inline void sca_rx(card_t *card, port_t *port, pkt_desc __iomem *desc,
u16 rxin)
{
struct net_device *dev = port_to_dev(port);
struct sk_buff *skb;
u16 len;
u32 buff;
u32 maxlen;
u8 page;
len = readw(&desc->len);
skb = dev_alloc_skb(len);
if (!skb) {
dev->stats.rx_dropped++;
return;
}
buff = buffer_offset(port, rxin, 0);
page = buff / winsize(card);
buff = buff % winsize(card);
maxlen = winsize(card) - buff;
openwin(card, page);
if (len > maxlen) {
memcpy_fromio(skb->data, winbase(card) + buff, maxlen);
openwin(card, page + 1);
memcpy_fromio(skb->data + maxlen, winbase(card), len - maxlen);
} else {
memcpy_fromio(skb->data, winbase(card) + buff, len);
}
#ifndef PAGE0_ALWAYS_MAPPED
openwin(card, 0); /* select pkt_desc table page back */
#endif
skb_put(skb, len);
#ifdef DEBUG_PKT
printk(KERN_DEBUG "%s RX(%i):", dev->name, skb->len);
debug_frame(skb);
#endif
dev->stats.rx_packets++;
dev->stats.rx_bytes += skb->len;
skb->protocol = hdlc_type_trans(skb, dev);
netif_rx(skb);
}
/* Receive DMA interrupt service */
static inline void sca_rx_intr(port_t *port)
{
struct net_device *dev = port_to_dev(port);
u16 dmac = get_dmac_rx(port);
card_t *card = port_to_card(port);
u8 stat = sca_in(DSR_RX(phy_node(port)), card); /* read DMA Status */
/* Reset DSR status bits */
sca_out((stat & (DSR_EOT | DSR_EOM | DSR_BOF | DSR_COF)) | DSR_DWE,
DSR_RX(phy_node(port)), card);
if (stat & DSR_BOF)
/* Dropped one or more frames */
dev->stats.rx_over_errors++;
while (1) {
u32 desc_off = desc_offset(port, port->rxin, 0);
pkt_desc __iomem *desc;
u32 cda = sca_inw(dmac + CDAL, card);
if (cda >= desc_off && (cda < desc_off + sizeof(pkt_desc)))
break; /* No frame received */
desc = desc_address(port, port->rxin, 0);
stat = readb(&desc->stat);
if (!(stat & ST_RX_EOM))
port->rxpart = 1; /* partial frame received */
else if ((stat & ST_ERROR_MASK) || port->rxpart) {
dev->stats.rx_errors++;
if (stat & ST_RX_OVERRUN)
dev->stats.rx_fifo_errors++;
else if ((stat & (ST_RX_SHORT | ST_RX_ABORT |
ST_RX_RESBIT)) || port->rxpart)
dev->stats.rx_frame_errors++;
else if (stat & ST_RX_CRC)
dev->stats.rx_crc_errors++;
if (stat & ST_RX_EOM)
port->rxpart = 0; /* received last fragment */
} else {
sca_rx(card, port, desc, port->rxin);
}
/* Set new error descriptor address */
sca_outw(desc_off, dmac + EDAL, card);
port->rxin = next_desc(port, port->rxin, 0);
}
/* make sure RX DMA is enabled */
sca_out(DSR_DE, DSR_RX(phy_node(port)), card);
}
/* Transmit DMA interrupt service */
static inline void sca_tx_intr(port_t *port)
{
struct net_device *dev = port_to_dev(port);
u16 dmac = get_dmac_tx(port);
card_t *card = port_to_card(port);
u8 stat;
spin_lock(&port->lock);
stat = sca_in(DSR_TX(phy_node(port)), card); /* read DMA Status */
/* Reset DSR status bits */
sca_out((stat & (DSR_EOT | DSR_EOM | DSR_BOF | DSR_COF)) | DSR_DWE,
DSR_TX(phy_node(port)), card);
while (1) {
pkt_desc __iomem *desc;
u32 desc_off = desc_offset(port, port->txlast, 1);
u32 cda = sca_inw(dmac + CDAL, card);
if (cda >= desc_off && (cda < desc_off + sizeof(pkt_desc)))
break; /* Transmitter is/will_be sending this frame */
desc = desc_address(port, port->txlast, 1);
dev->stats.tx_packets++;
dev->stats.tx_bytes += readw(&desc->len);
writeb(0, &desc->stat); /* Free descriptor */
port->txlast = next_desc(port, port->txlast, 1);
}
netif_wake_queue(dev);
spin_unlock(&port->lock);
}
static irqreturn_t sca_intr(int irq, void *dev_id)
{
card_t *card = dev_id;
int i;
u8 stat;
int handled = 0;
u8 page = sca_get_page(card);
while ((stat = sca_intr_status(card)) != 0) {
handled = 1;
for (i = 0; i < 2; i++) {
port_t *port = get_port(card, i);
if (port) {
if (stat & SCA_INTR_MSCI(i))
sca_msci_intr(port);
if (stat & SCA_INTR_DMAC_RX(i))
sca_rx_intr(port);
if (stat & SCA_INTR_DMAC_TX(i))
sca_tx_intr(port);
}
}
}
openwin(card, page); /* Restore original page */
return IRQ_RETVAL(handled);
}
static void sca_set_port(port_t *port)
{
card_t *card = port_to_card(port);
u16 msci = get_msci(port);
u8 md2 = sca_in(msci + MD2, card);
unsigned int tmc, br = 10, brv = 1024;
if (port->settings.clock_rate > 0) {
/* Try lower br for better accuracy*/
do {
br--;
brv >>= 1; /* brv = 2^9 = 512 max in specs */
/* Baud Rate = CLOCK_BASE / TMC / 2^BR */
tmc = CLOCK_BASE / brv / port->settings.clock_rate;
} while (br > 1 && tmc <= 128);
if (tmc < 1) {
tmc = 1;
br = 0; /* For baud=CLOCK_BASE we use tmc=1 br=0 */
brv = 1;
} else if (tmc > 255) {
tmc = 256; /* tmc=0 means 256 - low baud rates */
}
port->settings.clock_rate = CLOCK_BASE / brv / tmc;
} else {
br = 9; /* Minimum clock rate */
tmc = 256; /* 8bit = 0 */
port->settings.clock_rate = CLOCK_BASE / (256 * 512);
}
port->rxs = (port->rxs & ~CLK_BRG_MASK) | br;
port->txs = (port->txs & ~CLK_BRG_MASK) | br;
port->tmc = tmc;
/* baud divisor - time constant*/
sca_out(port->tmc, msci + TMC, card);
/* Set BRG bits */
sca_out(port->rxs, msci + RXS, card);
sca_out(port->txs, msci + TXS, card);
if (port->settings.loopback)
md2 |= MD2_LOOPBACK;
else
md2 &= ~MD2_LOOPBACK;
sca_out(md2, msci + MD2, card);
}
static void sca_open(struct net_device *dev)
{
port_t *port = dev_to_port(dev);
card_t *card = port_to_card(port);
u16 msci = get_msci(port);
u8 md0, md2;
switch (port->encoding) {
case ENCODING_NRZ:
md2 = MD2_NRZ;
break;
case ENCODING_NRZI:
md2 = MD2_NRZI;
break;
case ENCODING_FM_MARK:
md2 = MD2_FM_MARK;
break;
case ENCODING_FM_SPACE:
md2 = MD2_FM_SPACE;
break;
default:
md2 = MD2_MANCHESTER;
}
if (port->settings.loopback)
md2 |= MD2_LOOPBACK;
switch (port->parity) {
case PARITY_CRC16_PR0:
md0 = MD0_HDLC | MD0_CRC_16_0;
break;
case PARITY_CRC16_PR1:
md0 = MD0_HDLC | MD0_CRC_16;
break;
case PARITY_CRC16_PR0_CCITT:
md0 = MD0_HDLC | MD0_CRC_ITU_0;
break;
case PARITY_CRC16_PR1_CCITT:
md0 = MD0_HDLC | MD0_CRC_ITU;
break;
default:
md0 = MD0_HDLC | MD0_CRC_NONE;
}
sca_out(CMD_RESET, msci + CMD, card);
sca_out(md0, msci + MD0, card);
sca_out(0x00, msci + MD1, card); /* no address field check */
sca_out(md2, msci + MD2, card);
sca_out(0x7E, msci + IDL, card); /* flag character 0x7E */
sca_out(CTL_IDLE, msci + CTL, card);
/* Allow at least 8 bytes before requesting RX DMA operation */
/* TX with higher priority and possibly with shorter transfers */
sca_out(0x07, msci + RRC, card); /* +1=RXRDY/DMA activation condition*/
sca_out(0x10, msci + TRC0, card); /* = TXRDY/DMA activation condition*/
sca_out(0x14, msci + TRC1, card); /* +1=TXRDY/DMA deactiv condition */
/* We're using the following interrupts:
* - TXINT (DMAC completed all transmisions, underrun or DCD change)
* - all DMA interrupts
*/
sca_set_carrier(port);
/* MSCI TX INT and RX INT A IRQ enable */
sca_out(IE0_TXINT | IE0_RXINTA, msci + IE0, card);
sca_out(IE1_UDRN | IE1_CDCD, msci + IE1, card);
sca_out(sca_in(IER0, card) | (phy_node(port) ? 0xC0 : 0x0C),
IER0, card); /* TXINT and RXINT */
/* enable DMA IRQ */
sca_out(sca_in(IER1, card) | (phy_node(port) ? 0xF0 : 0x0F),
IER1, card);
sca_out(port->tmc, msci + TMC, card); /* Restore registers */
sca_out(port->rxs, msci + RXS, card);
sca_out(port->txs, msci + TXS, card);
sca_out(CMD_TX_ENABLE, msci + CMD, card);
sca_out(CMD_RX_ENABLE, msci + CMD, card);
netif_start_queue(dev);
}
static void sca_close(struct net_device *dev)
{
port_t *port = dev_to_port(dev);
card_t *card = port_to_card(port);
/* reset channel */
sca_out(CMD_RESET, get_msci(port) + CMD, port_to_card(port));
/* disable MSCI interrupts */
sca_out(sca_in(IER0, card) & (phy_node(port) ? 0x0F : 0xF0),
IER0, card);
/* disable DMA interrupts */
sca_out(sca_in(IER1, card) & (phy_node(port) ? 0x0F : 0xF0),
IER1, card);
netif_stop_queue(dev);
}
static int sca_attach(struct net_device *dev, unsigned short encoding,
unsigned short parity)
{
if (encoding != ENCODING_NRZ &&
encoding != ENCODING_NRZI &&
encoding != ENCODING_FM_MARK &&
encoding != ENCODING_FM_SPACE &&
encoding != ENCODING_MANCHESTER)
return -EINVAL;
if (parity != PARITY_NONE &&
parity != PARITY_CRC16_PR0 &&
parity != PARITY_CRC16_PR1 &&
parity != PARITY_CRC16_PR0_CCITT &&
parity != PARITY_CRC16_PR1_CCITT)
return -EINVAL;
dev_to_port(dev)->encoding = encoding;
dev_to_port(dev)->parity = parity;
return 0;
}
#ifdef DEBUG_RINGS
static void sca_dump_rings(struct net_device *dev)
{
port_t *port = dev_to_port(dev);
card_t *card = port_to_card(port);
u16 cnt;
#ifndef PAGE0_ALWAYS_MAPPED
u8 page = sca_get_page(card);
openwin(card, 0);
#endif
printk(KERN_DEBUG "RX ring: CDA=%u EDA=%u DSR=%02X in=%u %sactive",
sca_inw(get_dmac_rx(port) + CDAL, card),
sca_inw(get_dmac_rx(port) + EDAL, card),
sca_in(DSR_RX(phy_node(port)), card), port->rxin,
sca_in(DSR_RX(phy_node(port)), card) & DSR_DE ? "" : "in");
for (cnt = 0; cnt < port_to_card(port)->rx_ring_buffers; cnt++)
pr_cont(" %02X", readb(&(desc_address(port, cnt, 0)->stat)));
pr_cont("\n");
printk(KERN_DEBUG "TX ring: CDA=%u EDA=%u DSR=%02X in=%u "
"last=%u %sactive",
sca_inw(get_dmac_tx(port) + CDAL, card),
sca_inw(get_dmac_tx(port) + EDAL, card),
sca_in(DSR_TX(phy_node(port)), card), port->txin, port->txlast,
sca_in(DSR_TX(phy_node(port)), card) & DSR_DE ? "" : "in");
for (cnt = 0; cnt < port_to_card(port)->tx_ring_buffers; cnt++)
pr_cont(" %02X", readb(&(desc_address(port, cnt, 1)->stat)));
pr_cont("\n");
printk(KERN_DEBUG "MSCI: MD: %02x %02x %02x, ST: %02x %02x %02x %02x,"
" FST: %02x CST: %02x %02x\n",
sca_in(get_msci(port) + MD0, card),
sca_in(get_msci(port) + MD1, card),
sca_in(get_msci(port) + MD2, card),
sca_in(get_msci(port) + ST0, card),
sca_in(get_msci(port) + ST1, card),
sca_in(get_msci(port) + ST2, card),
sca_in(get_msci(port) + ST3, card),
sca_in(get_msci(port) + FST, card),
sca_in(get_msci(port) + CST0, card),
sca_in(get_msci(port) + CST1, card));
printk(KERN_DEBUG "ISR: %02x %02x %02x\n", sca_in(ISR0, card),
sca_in(ISR1, card), sca_in(ISR2, card));
#ifndef PAGE0_ALWAYS_MAPPED
openwin(card, page); /* Restore original page */
#endif
}
#endif /* DEBUG_RINGS */
static netdev_tx_t sca_xmit(struct sk_buff *skb, struct net_device *dev)
{
port_t *port = dev_to_port(dev);
card_t *card = port_to_card(port);
pkt_desc __iomem *desc;
u32 buff, len;
u8 page;
u32 maxlen;
spin_lock_irq(&port->lock);
desc = desc_address(port, port->txin + 1, 1);
BUG_ON(readb(&desc->stat)); /* previous xmit should stop queue */
#ifdef DEBUG_PKT
printk(KERN_DEBUG "%s TX(%i):", dev->name, skb->len);
debug_frame(skb);
#endif
desc = desc_address(port, port->txin, 1);
buff = buffer_offset(port, port->txin, 1);
len = skb->len;
page = buff / winsize(card);
buff = buff % winsize(card);
maxlen = winsize(card) - buff;
openwin(card, page);
if (len > maxlen) {
memcpy_toio(winbase(card) + buff, skb->data, maxlen);
openwin(card, page + 1);
memcpy_toio(winbase(card), skb->data + maxlen, len - maxlen);
} else {
memcpy_toio(winbase(card) + buff, skb->data, len);
}
#ifndef PAGE0_ALWAYS_MAPPED
openwin(card, 0); /* select pkt_desc table page back */
#endif
writew(len, &desc->len);
writeb(ST_TX_EOM, &desc->stat);
port->txin = next_desc(port, port->txin, 1);
sca_outw(desc_offset(port, port->txin, 1),
get_dmac_tx(port) + EDAL, card);
sca_out(DSR_DE, DSR_TX(phy_node(port)), card); /* Enable TX DMA */
desc = desc_address(port, port->txin + 1, 1);
if (readb(&desc->stat)) /* allow 1 packet gap */
netif_stop_queue(dev);
spin_unlock_irq(&port->lock);
dev_kfree_skb(skb);
return NETDEV_TX_OK;
}
#ifdef NEED_DETECT_RAM
static u32 sca_detect_ram(card_t *card, u8 __iomem *rambase, u32 ramsize)
{
/* Round RAM size to 32 bits, fill from end to start */
u32 i = ramsize &= ~3;
u32 size = winsize(card);
openwin(card, (i - 4) / size); /* select last window */
do {
i -= 4;
if ((i + 4) % size == 0)
openwin(card, i / size);
writel(i ^ 0x12345678, rambase + i % size);
} while (i > 0);
for (i = 0; i < ramsize ; i += 4) {
if (i % size == 0)
openwin(card, i / size);
if (readl(rambase + i % size) != (i ^ 0x12345678))
break;
}
return i;
}
#endif /* NEED_DETECT_RAM */
static void sca_init(card_t *card, int wait_states)
{
sca_out(wait_states, WCRL, card); /* Wait Control */
sca_out(wait_states, WCRM, card);
sca_out(wait_states, WCRH, card);
sca_out(0, DMER, card); /* DMA Master disable */
sca_out(0x03, PCR, card); /* DMA priority */
sca_out(0, DSR_RX(0), card); /* DMA disable - to halt state */
sca_out(0, DSR_TX(0), card);
sca_out(0, DSR_RX(1), card);
sca_out(0, DSR_TX(1), card);
sca_out(DMER_DME, DMER, card); /* DMA Master enable */
}
|
linux-master
|
drivers/net/wan/hd64570.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Generic HDLC support routines for Linux
* Cisco HDLC support
*
* Copyright (C) 2000 - 2006 Krzysztof Halasa <[email protected]>
*/
#include <linux/errno.h>
#include <linux/hdlc.h>
#include <linux/if_arp.h>
#include <linux/inetdevice.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pkt_sched.h>
#include <linux/poll.h>
#include <linux/rtnetlink.h>
#include <linux/skbuff.h>
#undef DEBUG_HARD_HEADER
#define CISCO_MULTICAST 0x8F /* Cisco multicast address */
#define CISCO_UNICAST 0x0F /* Cisco unicast address */
#define CISCO_KEEPALIVE 0x8035 /* Cisco keepalive protocol */
#define CISCO_SYS_INFO 0x2000 /* Cisco interface/system info */
#define CISCO_ADDR_REQ 0 /* Cisco address request */
#define CISCO_ADDR_REPLY 1 /* Cisco address reply */
#define CISCO_KEEPALIVE_REQ 2 /* Cisco keepalive request */
struct hdlc_header {
u8 address;
u8 control;
__be16 protocol;
} __packed;
struct cisco_packet {
__be32 type; /* code */
__be32 par1;
__be32 par2;
__be16 rel; /* reliability */
__be32 time;
} __packed;
#define CISCO_PACKET_LEN 18
#define CISCO_BIG_PACKET_LEN 20
struct cisco_state {
cisco_proto settings;
struct timer_list timer;
struct net_device *dev;
spinlock_t lock;
unsigned long last_poll;
int up;
u32 txseq; /* TX sequence number, 0 = none */
u32 rxseq; /* RX sequence number */
};
static int cisco_ioctl(struct net_device *dev, struct if_settings *ifs);
static inline struct cisco_state *state(hdlc_device *hdlc)
{
return (struct cisco_state *)hdlc->state;
}
static int cisco_hard_header(struct sk_buff *skb, struct net_device *dev,
u16 type, const void *daddr, const void *saddr,
unsigned int len)
{
struct hdlc_header *data;
#ifdef DEBUG_HARD_HEADER
netdev_dbg(dev, "%s called\n", __func__);
#endif
skb_push(skb, sizeof(struct hdlc_header));
data = (struct hdlc_header *)skb->data;
if (type == CISCO_KEEPALIVE)
data->address = CISCO_MULTICAST;
else
data->address = CISCO_UNICAST;
data->control = 0;
data->protocol = htons(type);
return sizeof(struct hdlc_header);
}
static void cisco_keepalive_send(struct net_device *dev, u32 type,
__be32 par1, __be32 par2)
{
struct sk_buff *skb;
struct cisco_packet *data;
skb = dev_alloc_skb(sizeof(struct hdlc_header) +
sizeof(struct cisco_packet));
if (!skb)
return;
skb_reserve(skb, 4);
cisco_hard_header(skb, dev, CISCO_KEEPALIVE, NULL, NULL, 0);
data = (struct cisco_packet *)(skb->data + 4);
data->type = htonl(type);
data->par1 = par1;
data->par2 = par2;
data->rel = cpu_to_be16(0xFFFF);
/* we will need do_div here if 1000 % HZ != 0 */
data->time = htonl((jiffies - INITIAL_JIFFIES) * (1000 / HZ));
skb_put(skb, sizeof(struct cisco_packet));
skb->priority = TC_PRIO_CONTROL;
skb->dev = dev;
skb->protocol = htons(ETH_P_HDLC);
skb_reset_network_header(skb);
dev_queue_xmit(skb);
}
static __be16 cisco_type_trans(struct sk_buff *skb, struct net_device *dev)
{
struct hdlc_header *data = (struct hdlc_header *)skb->data;
if (skb->len < sizeof(struct hdlc_header))
return cpu_to_be16(ETH_P_HDLC);
if (data->address != CISCO_MULTICAST &&
data->address != CISCO_UNICAST)
return cpu_to_be16(ETH_P_HDLC);
switch (data->protocol) {
case cpu_to_be16(ETH_P_IP):
case cpu_to_be16(ETH_P_IPX):
case cpu_to_be16(ETH_P_IPV6):
skb_pull(skb, sizeof(struct hdlc_header));
return data->protocol;
default:
return cpu_to_be16(ETH_P_HDLC);
}
}
static int cisco_rx(struct sk_buff *skb)
{
struct net_device *dev = skb->dev;
hdlc_device *hdlc = dev_to_hdlc(dev);
struct cisco_state *st = state(hdlc);
struct hdlc_header *data = (struct hdlc_header *)skb->data;
struct cisco_packet *cisco_data;
struct in_device *in_dev;
__be32 addr, mask;
u32 ack;
if (skb->len < sizeof(struct hdlc_header))
goto rx_error;
if (data->address != CISCO_MULTICAST &&
data->address != CISCO_UNICAST)
goto rx_error;
switch (ntohs(data->protocol)) {
case CISCO_SYS_INFO:
/* Packet is not needed, drop it. */
dev_kfree_skb_any(skb);
return NET_RX_SUCCESS;
case CISCO_KEEPALIVE:
if ((skb->len != sizeof(struct hdlc_header) +
CISCO_PACKET_LEN) &&
(skb->len != sizeof(struct hdlc_header) +
CISCO_BIG_PACKET_LEN)) {
netdev_info(dev, "Invalid length of Cisco control packet (%d bytes)\n",
skb->len);
goto rx_error;
}
cisco_data = (struct cisco_packet *)(skb->data + sizeof
(struct hdlc_header));
switch (ntohl(cisco_data->type)) {
case CISCO_ADDR_REQ: /* Stolen from syncppp.c :-) */
rcu_read_lock();
in_dev = __in_dev_get_rcu(dev);
addr = 0;
mask = ~cpu_to_be32(0); /* is the mask correct? */
if (in_dev != NULL) {
const struct in_ifaddr *ifa;
in_dev_for_each_ifa_rcu(ifa, in_dev) {
if (strcmp(dev->name,
ifa->ifa_label) == 0) {
addr = ifa->ifa_local;
mask = ifa->ifa_mask;
break;
}
}
cisco_keepalive_send(dev, CISCO_ADDR_REPLY,
addr, mask);
}
rcu_read_unlock();
dev_kfree_skb_any(skb);
return NET_RX_SUCCESS;
case CISCO_ADDR_REPLY:
netdev_info(dev, "Unexpected Cisco IP address reply\n");
goto rx_error;
case CISCO_KEEPALIVE_REQ:
spin_lock(&st->lock);
st->rxseq = ntohl(cisco_data->par1);
ack = ntohl(cisco_data->par2);
if (ack && (ack == st->txseq ||
/* our current REQ may be in transit */
ack == st->txseq - 1)) {
st->last_poll = jiffies;
if (!st->up) {
u32 sec, min, hrs, days;
sec = ntohl(cisco_data->time) / 1000;
min = sec / 60; sec -= min * 60;
hrs = min / 60; min -= hrs * 60;
days = hrs / 24; hrs -= days * 24;
netdev_info(dev, "Link up (peer uptime %ud%uh%um%us)\n",
days, hrs, min, sec);
netif_dormant_off(dev);
st->up = 1;
}
}
spin_unlock(&st->lock);
dev_kfree_skb_any(skb);
return NET_RX_SUCCESS;
} /* switch (keepalive type) */
} /* switch (protocol) */
netdev_info(dev, "Unsupported protocol %x\n", ntohs(data->protocol));
dev_kfree_skb_any(skb);
return NET_RX_DROP;
rx_error:
dev->stats.rx_errors++; /* Mark error */
dev_kfree_skb_any(skb);
return NET_RX_DROP;
}
static void cisco_timer(struct timer_list *t)
{
struct cisco_state *st = from_timer(st, t, timer);
struct net_device *dev = st->dev;
spin_lock(&st->lock);
if (st->up &&
time_after(jiffies, st->last_poll + st->settings.timeout * HZ)) {
st->up = 0;
netdev_info(dev, "Link down\n");
netif_dormant_on(dev);
}
cisco_keepalive_send(dev, CISCO_KEEPALIVE_REQ, htonl(++st->txseq),
htonl(st->rxseq));
spin_unlock(&st->lock);
st->timer.expires = jiffies + st->settings.interval * HZ;
add_timer(&st->timer);
}
static void cisco_start(struct net_device *dev)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
struct cisco_state *st = state(hdlc);
unsigned long flags;
spin_lock_irqsave(&st->lock, flags);
st->up = st->txseq = st->rxseq = 0;
spin_unlock_irqrestore(&st->lock, flags);
st->dev = dev;
timer_setup(&st->timer, cisco_timer, 0);
st->timer.expires = jiffies + HZ; /* First poll after 1 s */
add_timer(&st->timer);
}
static void cisco_stop(struct net_device *dev)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
struct cisco_state *st = state(hdlc);
unsigned long flags;
del_timer_sync(&st->timer);
spin_lock_irqsave(&st->lock, flags);
netif_dormant_on(dev);
st->up = st->txseq = 0;
spin_unlock_irqrestore(&st->lock, flags);
}
static struct hdlc_proto proto = {
.start = cisco_start,
.stop = cisco_stop,
.type_trans = cisco_type_trans,
.ioctl = cisco_ioctl,
.netif_rx = cisco_rx,
.module = THIS_MODULE,
};
static const struct header_ops cisco_header_ops = {
.create = cisco_hard_header,
};
static int cisco_ioctl(struct net_device *dev, struct if_settings *ifs)
{
cisco_proto __user *cisco_s = ifs->ifs_ifsu.cisco;
const size_t size = sizeof(cisco_proto);
cisco_proto new_settings;
hdlc_device *hdlc = dev_to_hdlc(dev);
int result;
switch (ifs->type) {
case IF_GET_PROTO:
if (dev_to_hdlc(dev)->proto != &proto)
return -EINVAL;
ifs->type = IF_PROTO_CISCO;
if (ifs->size < size) {
ifs->size = size; /* data size wanted */
return -ENOBUFS;
}
if (copy_to_user(cisco_s, &state(hdlc)->settings, size))
return -EFAULT;
return 0;
case IF_PROTO_CISCO:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (dev->flags & IFF_UP)
return -EBUSY;
if (copy_from_user(&new_settings, cisco_s, size))
return -EFAULT;
if (new_settings.interval < 1 ||
new_settings.timeout < 2)
return -EINVAL;
result = hdlc->attach(dev, ENCODING_NRZ,
PARITY_CRC16_PR1_CCITT);
if (result)
return result;
result = attach_hdlc_protocol(dev, &proto,
sizeof(struct cisco_state));
if (result)
return result;
memcpy(&state(hdlc)->settings, &new_settings, size);
spin_lock_init(&state(hdlc)->lock);
dev->header_ops = &cisco_header_ops;
dev->hard_header_len = sizeof(struct hdlc_header);
dev->type = ARPHRD_CISCO;
call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
netif_dormant_on(dev);
return 0;
}
return -EINVAL;
}
static int __init hdlc_cisco_init(void)
{
register_hdlc_protocol(&proto);
return 0;
}
static void __exit hdlc_cisco_exit(void)
{
unregister_hdlc_protocol(&proto);
}
module_init(hdlc_cisco_init);
module_exit(hdlc_cisco_exit);
MODULE_AUTHOR("Krzysztof Halasa <[email protected]>");
MODULE_DESCRIPTION("Cisco HDLC protocol support for generic HDLC");
MODULE_LICENSE("GPL v2");
|
linux-master
|
drivers/net/wan/hdlc_cisco.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Generic HDLC support routines for Linux
*
* Copyright (C) 1999 - 2008 Krzysztof Halasa <[email protected]>
*
* Currently supported:
* * raw IP-in-HDLC
* * Cisco HDLC
* * Frame Relay with ANSI or CCITT LMI (both user and network side)
* * PPP
* * X.25
*
* Use sethdlc utility to set line parameters, protocol and PVCs
*
* How does it work:
* - proto->open(), close(), start(), stop() calls are serialized.
* The order is: open, [ start, stop ... ] close ...
* - proto->start() and stop() are called with spin_lock_irq held.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/errno.h>
#include <linux/hdlc.h>
#include <linux/if_arp.h>
#include <linux/inetdevice.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/notifier.h>
#include <linux/pkt_sched.h>
#include <linux/poll.h>
#include <linux/rtnetlink.h>
#include <linux/skbuff.h>
#include <linux/slab.h>
#include <net/net_namespace.h>
static const char *version = "HDLC support module revision 1.22";
#undef DEBUG_LINK
static struct hdlc_proto *first_proto;
static int hdlc_rcv(struct sk_buff *skb, struct net_device *dev,
struct packet_type *p, struct net_device *orig_dev)
{
struct hdlc_device *hdlc;
/* First make sure "dev" is an HDLC device */
if (!(dev->priv_flags & IFF_WAN_HDLC)) {
kfree_skb(skb);
return NET_RX_SUCCESS;
}
hdlc = dev_to_hdlc(dev);
if (!net_eq(dev_net(dev), &init_net)) {
kfree_skb(skb);
return 0;
}
BUG_ON(!hdlc->proto->netif_rx);
return hdlc->proto->netif_rx(skb);
}
netdev_tx_t hdlc_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
if (hdlc->proto->xmit)
return hdlc->proto->xmit(skb, dev);
return hdlc->xmit(skb, dev); /* call hardware driver directly */
}
EXPORT_SYMBOL(hdlc_start_xmit);
static inline void hdlc_proto_start(struct net_device *dev)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
if (hdlc->proto->start)
hdlc->proto->start(dev);
}
static inline void hdlc_proto_stop(struct net_device *dev)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
if (hdlc->proto->stop)
hdlc->proto->stop(dev);
}
static int hdlc_device_event(struct notifier_block *this, unsigned long event,
void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
hdlc_device *hdlc;
unsigned long flags;
int on;
if (!net_eq(dev_net(dev), &init_net))
return NOTIFY_DONE;
if (!(dev->priv_flags & IFF_WAN_HDLC))
return NOTIFY_DONE; /* not an HDLC device */
if (event != NETDEV_CHANGE)
return NOTIFY_DONE; /* Only interested in carrier changes */
on = netif_carrier_ok(dev);
#ifdef DEBUG_LINK
printk(KERN_DEBUG "%s: hdlc_device_event NETDEV_CHANGE, carrier %i\n",
dev->name, on);
#endif
hdlc = dev_to_hdlc(dev);
spin_lock_irqsave(&hdlc->state_lock, flags);
if (hdlc->carrier == on)
goto carrier_exit; /* no change in DCD line level */
hdlc->carrier = on;
if (!hdlc->open)
goto carrier_exit;
if (hdlc->carrier) {
netdev_info(dev, "Carrier detected\n");
hdlc_proto_start(dev);
} else {
netdev_info(dev, "Carrier lost\n");
hdlc_proto_stop(dev);
}
carrier_exit:
spin_unlock_irqrestore(&hdlc->state_lock, flags);
return NOTIFY_DONE;
}
/* Must be called by hardware driver when HDLC device is being opened */
int hdlc_open(struct net_device *dev)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
#ifdef DEBUG_LINK
printk(KERN_DEBUG "%s: hdlc_open() carrier %i open %i\n", dev->name,
hdlc->carrier, hdlc->open);
#endif
if (!hdlc->proto)
return -ENOSYS; /* no protocol attached */
if (hdlc->proto->open) {
int result = hdlc->proto->open(dev);
if (result)
return result;
}
spin_lock_irq(&hdlc->state_lock);
if (hdlc->carrier) {
netdev_info(dev, "Carrier detected\n");
hdlc_proto_start(dev);
} else {
netdev_info(dev, "No carrier\n");
}
hdlc->open = 1;
spin_unlock_irq(&hdlc->state_lock);
return 0;
}
EXPORT_SYMBOL(hdlc_open);
/* Must be called by hardware driver when HDLC device is being closed */
void hdlc_close(struct net_device *dev)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
#ifdef DEBUG_LINK
printk(KERN_DEBUG "%s: hdlc_close() carrier %i open %i\n", dev->name,
hdlc->carrier, hdlc->open);
#endif
spin_lock_irq(&hdlc->state_lock);
hdlc->open = 0;
if (hdlc->carrier)
hdlc_proto_stop(dev);
spin_unlock_irq(&hdlc->state_lock);
if (hdlc->proto->close)
hdlc->proto->close(dev);
}
EXPORT_SYMBOL(hdlc_close);
int hdlc_ioctl(struct net_device *dev, struct if_settings *ifs)
{
struct hdlc_proto *proto = first_proto;
int result;
if (dev_to_hdlc(dev)->proto) {
result = dev_to_hdlc(dev)->proto->ioctl(dev, ifs);
if (result != -EINVAL)
return result;
}
/* Not handled by currently attached protocol (if any) */
while (proto) {
result = proto->ioctl(dev, ifs);
if (result != -EINVAL)
return result;
proto = proto->next;
}
return -EINVAL;
}
EXPORT_SYMBOL(hdlc_ioctl);
static const struct header_ops hdlc_null_ops;
static void hdlc_setup_dev(struct net_device *dev)
{
/* Re-init all variables changed by HDLC protocol drivers,
* including ether_setup() called from hdlc_raw_eth.c.
*/
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
dev->priv_flags = IFF_WAN_HDLC;
dev->mtu = HDLC_MAX_MTU;
dev->min_mtu = 68;
dev->max_mtu = HDLC_MAX_MTU;
dev->type = ARPHRD_RAWHDLC;
dev->hard_header_len = 0;
dev->needed_headroom = 0;
dev->addr_len = 0;
dev->header_ops = &hdlc_null_ops;
}
static void hdlc_setup(struct net_device *dev)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
hdlc_setup_dev(dev);
hdlc->carrier = 1;
hdlc->open = 0;
spin_lock_init(&hdlc->state_lock);
}
struct net_device *alloc_hdlcdev(void *priv)
{
struct net_device *dev;
dev = alloc_netdev(sizeof(struct hdlc_device), "hdlc%d",
NET_NAME_UNKNOWN, hdlc_setup);
if (dev)
dev_to_hdlc(dev)->priv = priv;
return dev;
}
EXPORT_SYMBOL(alloc_hdlcdev);
void unregister_hdlc_device(struct net_device *dev)
{
rtnl_lock();
detach_hdlc_protocol(dev);
unregister_netdevice(dev);
rtnl_unlock();
}
EXPORT_SYMBOL(unregister_hdlc_device);
int attach_hdlc_protocol(struct net_device *dev, struct hdlc_proto *proto,
size_t size)
{
int err;
err = detach_hdlc_protocol(dev);
if (err)
return err;
if (!try_module_get(proto->module))
return -ENOSYS;
if (size) {
dev_to_hdlc(dev)->state = kmalloc(size, GFP_KERNEL);
if (!dev_to_hdlc(dev)->state) {
module_put(proto->module);
return -ENOBUFS;
}
}
dev_to_hdlc(dev)->proto = proto;
return 0;
}
EXPORT_SYMBOL(attach_hdlc_protocol);
int detach_hdlc_protocol(struct net_device *dev)
{
hdlc_device *hdlc = dev_to_hdlc(dev);
int err;
if (hdlc->proto) {
err = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE, dev);
err = notifier_to_errno(err);
if (err) {
netdev_err(dev, "Refused to change device type\n");
return err;
}
if (hdlc->proto->detach)
hdlc->proto->detach(dev);
module_put(hdlc->proto->module);
hdlc->proto = NULL;
}
kfree(hdlc->state);
hdlc->state = NULL;
hdlc_setup_dev(dev);
return 0;
}
EXPORT_SYMBOL(detach_hdlc_protocol);
void register_hdlc_protocol(struct hdlc_proto *proto)
{
rtnl_lock();
proto->next = first_proto;
first_proto = proto;
rtnl_unlock();
}
EXPORT_SYMBOL(register_hdlc_protocol);
void unregister_hdlc_protocol(struct hdlc_proto *proto)
{
struct hdlc_proto **p;
rtnl_lock();
p = &first_proto;
while (*p != proto) {
BUG_ON(!*p);
p = &((*p)->next);
}
*p = proto->next;
rtnl_unlock();
}
EXPORT_SYMBOL(unregister_hdlc_protocol);
MODULE_AUTHOR("Krzysztof Halasa <[email protected]>");
MODULE_DESCRIPTION("HDLC support module");
MODULE_LICENSE("GPL v2");
static struct packet_type hdlc_packet_type __read_mostly = {
.type = cpu_to_be16(ETH_P_HDLC),
.func = hdlc_rcv,
};
static struct notifier_block hdlc_notifier = {
.notifier_call = hdlc_device_event,
};
static int __init hdlc_module_init(void)
{
int result;
pr_info("%s\n", version);
result = register_netdevice_notifier(&hdlc_notifier);
if (result)
return result;
dev_add_pack(&hdlc_packet_type);
return 0;
}
static void __exit hdlc_module_exit(void)
{
dev_remove_pack(&hdlc_packet_type);
unregister_netdevice_notifier(&hdlc_notifier);
}
module_init(hdlc_module_init);
module_exit(hdlc_module_exit);
|
linux-master
|
drivers/net/wan/hdlc.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Intel IXP4xx HSS (synchronous serial port) driver for Linux
*
* Copyright (C) 2007-2008 Krzysztof Hałasa <[email protected]>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/bitops.h>
#include <linux/cdev.h>
#include <linux/dma-mapping.h>
#include <linux/dmapool.h>
#include <linux/fs.h>
#include <linux/hdlc.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/mfd/syscon.h>
#include <linux/platform_device.h>
#include <linux/poll.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/gpio/consumer.h>
#include <linux/of.h>
#include <linux/soc/ixp4xx/npe.h>
#include <linux/soc/ixp4xx/qmgr.h>
#include <linux/soc/ixp4xx/cpu.h>
/* This is what all IXP4xx platforms we know uses, if more frequencies
* are needed, we need to migrate to the clock framework.
*/
#define IXP4XX_TIMER_FREQ 66666000
#define DEBUG_DESC 0
#define DEBUG_RX 0
#define DEBUG_TX 0
#define DEBUG_PKT_BYTES 0
#define DEBUG_CLOSE 0
#define DRV_NAME "ixp4xx_hss"
#define PKT_EXTRA_FLAGS 0 /* orig 1 */
#define PKT_NUM_PIPES 1 /* 1, 2 or 4 */
#define PKT_PIPE_FIFO_SIZEW 4 /* total 4 dwords per HSS */
#define RX_DESCS 16 /* also length of all RX queues */
#define TX_DESCS 16 /* also length of all TX queues */
#define POOL_ALLOC_SIZE (sizeof(struct desc) * (RX_DESCS + TX_DESCS))
#define RX_SIZE (HDLC_MAX_MRU + 4) /* NPE needs more space */
#define MAX_CLOSE_WAIT 1000 /* microseconds */
#define HSS_COUNT 2
#define FRAME_SIZE 256 /* doesn't matter at this point */
#define FRAME_OFFSET 0
#define MAX_CHANNELS (FRAME_SIZE / 8)
#define NAPI_WEIGHT 16
/* Queue IDs */
#define HSS0_PKT_RX_QUEUE 13 /* orig size = 32 dwords */
#define HSS0_PKT_TX0_QUEUE 14 /* orig size = 16 dwords */
#define HSS0_PKT_TX1_QUEUE 15
#define HSS0_PKT_TX2_QUEUE 16
#define HSS0_PKT_TX3_QUEUE 17
#define HSS0_PKT_RXFREE0_QUEUE 18 /* orig size = 16 dwords */
#define HSS0_PKT_RXFREE1_QUEUE 19
#define HSS0_PKT_RXFREE2_QUEUE 20
#define HSS0_PKT_RXFREE3_QUEUE 21
#define HSS0_PKT_TXDONE_QUEUE 22 /* orig size = 64 dwords */
#define HSS1_PKT_RX_QUEUE 0
#define HSS1_PKT_TX0_QUEUE 5
#define HSS1_PKT_TX1_QUEUE 6
#define HSS1_PKT_TX2_QUEUE 7
#define HSS1_PKT_TX3_QUEUE 8
#define HSS1_PKT_RXFREE0_QUEUE 1
#define HSS1_PKT_RXFREE1_QUEUE 2
#define HSS1_PKT_RXFREE2_QUEUE 3
#define HSS1_PKT_RXFREE3_QUEUE 4
#define HSS1_PKT_TXDONE_QUEUE 9
#define NPE_PKT_MODE_HDLC 0
#define NPE_PKT_MODE_RAW 1
#define NPE_PKT_MODE_56KMODE 2
#define NPE_PKT_MODE_56KENDIAN_MSB 4
/* PKT_PIPE_HDLC_CFG_WRITE flags */
#define PKT_HDLC_IDLE_ONES 0x1 /* default = flags */
#define PKT_HDLC_CRC_32 0x2 /* default = CRC-16 */
#define PKT_HDLC_MSB_ENDIAN 0x4 /* default = LE */
/* hss_config, PCRs */
/* Frame sync sampling, default = active low */
#define PCR_FRM_SYNC_ACTIVE_HIGH 0x40000000
#define PCR_FRM_SYNC_FALLINGEDGE 0x80000000
#define PCR_FRM_SYNC_RISINGEDGE 0xC0000000
/* Frame sync pin: input (default) or output generated off a given clk edge */
#define PCR_FRM_SYNC_OUTPUT_FALLING 0x20000000
#define PCR_FRM_SYNC_OUTPUT_RISING 0x30000000
/* Frame and data clock sampling on edge, default = falling */
#define PCR_FCLK_EDGE_RISING 0x08000000
#define PCR_DCLK_EDGE_RISING 0x04000000
/* Clock direction, default = input */
#define PCR_SYNC_CLK_DIR_OUTPUT 0x02000000
/* Generate/Receive frame pulses, default = enabled */
#define PCR_FRM_PULSE_DISABLED 0x01000000
/* Data rate is full (default) or half the configured clk speed */
#define PCR_HALF_CLK_RATE 0x00200000
/* Invert data between NPE and HSS FIFOs? (default = no) */
#define PCR_DATA_POLARITY_INVERT 0x00100000
/* TX/RX endianness, default = LSB */
#define PCR_MSB_ENDIAN 0x00080000
/* Normal (default) / open drain mode (TX only) */
#define PCR_TX_PINS_OPEN_DRAIN 0x00040000
/* No framing bit transmitted and expected on RX? (default = framing bit) */
#define PCR_SOF_NO_FBIT 0x00020000
/* Drive data pins? */
#define PCR_TX_DATA_ENABLE 0x00010000
/* Voice 56k type: drive the data pins low (default), high, high Z */
#define PCR_TX_V56K_HIGH 0x00002000
#define PCR_TX_V56K_HIGH_IMP 0x00004000
/* Unassigned type: drive the data pins low (default), high, high Z */
#define PCR_TX_UNASS_HIGH 0x00000800
#define PCR_TX_UNASS_HIGH_IMP 0x00001000
/* T1 @ 1.544MHz only: Fbit dictated in FIFO (default) or high Z */
#define PCR_TX_FB_HIGH_IMP 0x00000400
/* 56k data endiannes - which bit unused: high (default) or low */
#define PCR_TX_56KE_BIT_0_UNUSED 0x00000200
/* 56k data transmission type: 32/8 bit data (default) or 56K data */
#define PCR_TX_56KS_56K_DATA 0x00000100
/* hss_config, cCR */
/* Number of packetized clients, default = 1 */
#define CCR_NPE_HFIFO_2_HDLC 0x04000000
#define CCR_NPE_HFIFO_3_OR_4HDLC 0x08000000
/* default = no loopback */
#define CCR_LOOPBACK 0x02000000
/* HSS number, default = 0 (first) */
#define CCR_SECOND_HSS 0x01000000
/* hss_config, clkCR: main:10, num:10, denom:12 */
#define CLK42X_SPEED_EXP ((0x3FF << 22) | (2 << 12) | 15) /*65 KHz*/
#define CLK42X_SPEED_512KHZ ((130 << 22) | (2 << 12) | 15)
#define CLK42X_SPEED_1536KHZ ((43 << 22) | (18 << 12) | 47)
#define CLK42X_SPEED_1544KHZ ((43 << 22) | (33 << 12) | 192)
#define CLK42X_SPEED_2048KHZ ((32 << 22) | (34 << 12) | 63)
#define CLK42X_SPEED_4096KHZ ((16 << 22) | (34 << 12) | 127)
#define CLK42X_SPEED_8192KHZ ((8 << 22) | (34 << 12) | 255)
#define CLK46X_SPEED_512KHZ ((130 << 22) | (24 << 12) | 127)
#define CLK46X_SPEED_1536KHZ ((43 << 22) | (152 << 12) | 383)
#define CLK46X_SPEED_1544KHZ ((43 << 22) | (66 << 12) | 385)
#define CLK46X_SPEED_2048KHZ ((32 << 22) | (280 << 12) | 511)
#define CLK46X_SPEED_4096KHZ ((16 << 22) | (280 << 12) | 1023)
#define CLK46X_SPEED_8192KHZ ((8 << 22) | (280 << 12) | 2047)
/* HSS_CONFIG_CLOCK_CR register consists of 3 parts:
* A (10 bits), B (10 bits) and C (12 bits).
* IXP42x HSS clock generator operation (verified with an oscilloscope):
* Each clock bit takes 7.5 ns (1 / 133.xx MHz).
* The clock sequence consists of (C - B) states of 0s and 1s, each state is
* A bits wide. It's followed by (B + 1) states of 0s and 1s, each state is
* (A + 1) bits wide.
*
* The resulting average clock frequency (assuming 33.333 MHz oscillator) is:
* freq = 66.666 MHz / (A + (B + 1) / (C + 1))
* minimum freq = 66.666 MHz / (A + 1)
* maximum freq = 66.666 MHz / A
*
* Example: A = 2, B = 2, C = 7, CLOCK_CR register = 2 << 22 | 2 << 12 | 7
* freq = 66.666 MHz / (2 + (2 + 1) / (7 + 1)) = 28.07 MHz (Mb/s).
* The clock sequence is: 1100110011 (5 doubles) 000111000 (3 triples).
* The sequence takes (C - B) * A + (B + 1) * (A + 1) = 5 * 2 + 3 * 3 bits
* = 19 bits (each 7.5 ns long) = 142.5 ns (then the sequence repeats).
* The sequence consists of 4 complete clock periods, thus the average
* frequency (= clock rate) is 4 / 142.5 ns = 28.07 MHz (Mb/s).
* (max specified clock rate for IXP42x HSS is 8.192 Mb/s).
*/
/* hss_config, LUT entries */
#define TDMMAP_UNASSIGNED 0
#define TDMMAP_HDLC 1 /* HDLC - packetized */
#define TDMMAP_VOICE56K 2 /* Voice56K - 7-bit channelized */
#define TDMMAP_VOICE64K 3 /* Voice64K - 8-bit channelized */
/* offsets into HSS config */
#define HSS_CONFIG_TX_PCR 0x00 /* port configuration registers */
#define HSS_CONFIG_RX_PCR 0x04
#define HSS_CONFIG_CORE_CR 0x08 /* loopback control, HSS# */
#define HSS_CONFIG_CLOCK_CR 0x0C /* clock generator control */
#define HSS_CONFIG_TX_FCR 0x10 /* frame configuration registers */
#define HSS_CONFIG_RX_FCR 0x14
#define HSS_CONFIG_TX_LUT 0x18 /* channel look-up tables */
#define HSS_CONFIG_RX_LUT 0x38
/* NPE command codes */
/* writes the ConfigWord value to the location specified by offset */
#define PORT_CONFIG_WRITE 0x40
/* triggers the NPE to load the contents of the configuration table */
#define PORT_CONFIG_LOAD 0x41
/* triggers the NPE to return an HssErrorReadResponse message */
#define PORT_ERROR_READ 0x42
/* triggers the NPE to reset internal status and enable the HssPacketized
* operation for the flow specified by pPipe
*/
#define PKT_PIPE_FLOW_ENABLE 0x50
#define PKT_PIPE_FLOW_DISABLE 0x51
#define PKT_NUM_PIPES_WRITE 0x52
#define PKT_PIPE_FIFO_SIZEW_WRITE 0x53
#define PKT_PIPE_HDLC_CFG_WRITE 0x54
#define PKT_PIPE_IDLE_PATTERN_WRITE 0x55
#define PKT_PIPE_RX_SIZE_WRITE 0x56
#define PKT_PIPE_MODE_WRITE 0x57
/* HDLC packet status values - desc->status */
#define ERR_SHUTDOWN 1 /* stop or shutdown occurrence */
#define ERR_HDLC_ALIGN 2 /* HDLC alignment error */
#define ERR_HDLC_FCS 3 /* HDLC Frame Check Sum error */
#define ERR_RXFREE_Q_EMPTY 4 /* RX-free queue became empty while receiving
* this packet (if buf_len < pkt_len)
*/
#define ERR_HDLC_TOO_LONG 5 /* HDLC frame size too long */
#define ERR_HDLC_ABORT 6 /* abort sequence received */
#define ERR_DISCONNECTING 7 /* disconnect is in progress */
#ifdef __ARMEB__
typedef struct sk_buff buffer_t;
#define free_buffer dev_kfree_skb
#define free_buffer_irq dev_consume_skb_irq
#else
typedef void buffer_t;
#define free_buffer kfree
#define free_buffer_irq kfree
#endif
struct port {
struct device *dev;
struct npe *npe;
unsigned int txreadyq;
unsigned int rxtrigq;
unsigned int rxfreeq;
unsigned int rxq;
unsigned int txq;
unsigned int txdoneq;
struct gpio_desc *cts;
struct gpio_desc *rts;
struct gpio_desc *dcd;
struct gpio_desc *dtr;
struct gpio_desc *clk_internal;
struct net_device *netdev;
struct napi_struct napi;
buffer_t *rx_buff_tab[RX_DESCS], *tx_buff_tab[TX_DESCS];
struct desc *desc_tab; /* coherent */
dma_addr_t desc_tab_phys;
unsigned int id;
unsigned int clock_type, clock_rate, loopback;
unsigned int initialized, carrier;
u8 hdlc_cfg;
u32 clock_reg;
};
/* NPE message structure */
struct msg {
#ifdef __ARMEB__
u8 cmd, unused, hss_port, index;
union {
struct { u8 data8a, data8b, data8c, data8d; };
struct { u16 data16a, data16b; };
struct { u32 data32; };
};
#else
u8 index, hss_port, unused, cmd;
union {
struct { u8 data8d, data8c, data8b, data8a; };
struct { u16 data16b, data16a; };
struct { u32 data32; };
};
#endif
};
/* HDLC packet descriptor */
struct desc {
u32 next; /* pointer to next buffer, unused */
#ifdef __ARMEB__
u16 buf_len; /* buffer length */
u16 pkt_len; /* packet length */
u32 data; /* pointer to data buffer in RAM */
u8 status;
u8 error_count;
u16 __reserved;
#else
u16 pkt_len; /* packet length */
u16 buf_len; /* buffer length */
u32 data; /* pointer to data buffer in RAM */
u16 __reserved;
u8 error_count;
u8 status;
#endif
u32 __reserved1[4];
};
#define rx_desc_phys(port, n) ((port)->desc_tab_phys + \
(n) * sizeof(struct desc))
#define rx_desc_ptr(port, n) (&(port)->desc_tab[n])
#define tx_desc_phys(port, n) ((port)->desc_tab_phys + \
((n) + RX_DESCS) * sizeof(struct desc))
#define tx_desc_ptr(port, n) (&(port)->desc_tab[(n) + RX_DESCS])
/*****************************************************************************
* global variables
****************************************************************************/
static int ports_open;
static struct dma_pool *dma_pool;
static DEFINE_SPINLOCK(npe_lock);
/*****************************************************************************
* utility functions
****************************************************************************/
static inline struct port *dev_to_port(struct net_device *dev)
{
return dev_to_hdlc(dev)->priv;
}
#ifndef __ARMEB__
static inline void memcpy_swab32(u32 *dest, u32 *src, int cnt)
{
int i;
for (i = 0; i < cnt; i++)
dest[i] = swab32(src[i]);
}
#endif
/*****************************************************************************
* HSS access
****************************************************************************/
static void hss_npe_send(struct port *port, struct msg *msg, const char *what)
{
u32 *val = (u32 *)msg;
if (npe_send_message(port->npe, msg, what)) {
pr_crit("HSS-%i: unable to send command [%08X:%08X] to %s\n",
port->id, val[0], val[1], npe_name(port->npe));
BUG();
}
}
static void hss_config_set_lut(struct port *port)
{
struct msg msg;
int ch;
memset(&msg, 0, sizeof(msg));
msg.cmd = PORT_CONFIG_WRITE;
msg.hss_port = port->id;
for (ch = 0; ch < MAX_CHANNELS; ch++) {
msg.data32 >>= 2;
msg.data32 |= TDMMAP_HDLC << 30;
if (ch % 16 == 15) {
msg.index = HSS_CONFIG_TX_LUT + ((ch / 4) & ~3);
hss_npe_send(port, &msg, "HSS_SET_TX_LUT");
msg.index += HSS_CONFIG_RX_LUT - HSS_CONFIG_TX_LUT;
hss_npe_send(port, &msg, "HSS_SET_RX_LUT");
}
}
}
static void hss_config(struct port *port)
{
struct msg msg;
memset(&msg, 0, sizeof(msg));
msg.cmd = PORT_CONFIG_WRITE;
msg.hss_port = port->id;
msg.index = HSS_CONFIG_TX_PCR;
msg.data32 = PCR_FRM_PULSE_DISABLED | PCR_MSB_ENDIAN |
PCR_TX_DATA_ENABLE | PCR_SOF_NO_FBIT;
if (port->clock_type == CLOCK_INT)
msg.data32 |= PCR_SYNC_CLK_DIR_OUTPUT;
hss_npe_send(port, &msg, "HSS_SET_TX_PCR");
msg.index = HSS_CONFIG_RX_PCR;
msg.data32 ^= PCR_TX_DATA_ENABLE | PCR_DCLK_EDGE_RISING;
hss_npe_send(port, &msg, "HSS_SET_RX_PCR");
memset(&msg, 0, sizeof(msg));
msg.cmd = PORT_CONFIG_WRITE;
msg.hss_port = port->id;
msg.index = HSS_CONFIG_CORE_CR;
msg.data32 = (port->loopback ? CCR_LOOPBACK : 0) |
(port->id ? CCR_SECOND_HSS : 0);
hss_npe_send(port, &msg, "HSS_SET_CORE_CR");
memset(&msg, 0, sizeof(msg));
msg.cmd = PORT_CONFIG_WRITE;
msg.hss_port = port->id;
msg.index = HSS_CONFIG_CLOCK_CR;
msg.data32 = port->clock_reg;
hss_npe_send(port, &msg, "HSS_SET_CLOCK_CR");
memset(&msg, 0, sizeof(msg));
msg.cmd = PORT_CONFIG_WRITE;
msg.hss_port = port->id;
msg.index = HSS_CONFIG_TX_FCR;
msg.data16a = FRAME_OFFSET;
msg.data16b = FRAME_SIZE - 1;
hss_npe_send(port, &msg, "HSS_SET_TX_FCR");
memset(&msg, 0, sizeof(msg));
msg.cmd = PORT_CONFIG_WRITE;
msg.hss_port = port->id;
msg.index = HSS_CONFIG_RX_FCR;
msg.data16a = FRAME_OFFSET;
msg.data16b = FRAME_SIZE - 1;
hss_npe_send(port, &msg, "HSS_SET_RX_FCR");
hss_config_set_lut(port);
memset(&msg, 0, sizeof(msg));
msg.cmd = PORT_CONFIG_LOAD;
msg.hss_port = port->id;
hss_npe_send(port, &msg, "HSS_LOAD_CONFIG");
if (npe_recv_message(port->npe, &msg, "HSS_LOAD_CONFIG") ||
/* HSS_LOAD_CONFIG for port #1 returns port_id = #4 */
msg.cmd != PORT_CONFIG_LOAD || msg.data32) {
pr_crit("HSS-%i: HSS_LOAD_CONFIG failed\n", port->id);
BUG();
}
/* HDLC may stop working without this - check FIXME */
npe_recv_message(port->npe, &msg, "FLUSH_IT");
}
static void hss_set_hdlc_cfg(struct port *port)
{
struct msg msg;
memset(&msg, 0, sizeof(msg));
msg.cmd = PKT_PIPE_HDLC_CFG_WRITE;
msg.hss_port = port->id;
msg.data8a = port->hdlc_cfg; /* rx_cfg */
msg.data8b = port->hdlc_cfg | (PKT_EXTRA_FLAGS << 3); /* tx_cfg */
hss_npe_send(port, &msg, "HSS_SET_HDLC_CFG");
}
static u32 hss_get_status(struct port *port)
{
struct msg msg;
memset(&msg, 0, sizeof(msg));
msg.cmd = PORT_ERROR_READ;
msg.hss_port = port->id;
hss_npe_send(port, &msg, "PORT_ERROR_READ");
if (npe_recv_message(port->npe, &msg, "PORT_ERROR_READ")) {
pr_crit("HSS-%i: unable to read HSS status\n", port->id);
BUG();
}
return msg.data32;
}
static void hss_start_hdlc(struct port *port)
{
struct msg msg;
memset(&msg, 0, sizeof(msg));
msg.cmd = PKT_PIPE_FLOW_ENABLE;
msg.hss_port = port->id;
msg.data32 = 0;
hss_npe_send(port, &msg, "HSS_ENABLE_PKT_PIPE");
}
static void hss_stop_hdlc(struct port *port)
{
struct msg msg;
memset(&msg, 0, sizeof(msg));
msg.cmd = PKT_PIPE_FLOW_DISABLE;
msg.hss_port = port->id;
hss_npe_send(port, &msg, "HSS_DISABLE_PKT_PIPE");
hss_get_status(port); /* make sure it's halted */
}
static int hss_load_firmware(struct port *port)
{
struct msg msg;
int err;
if (port->initialized)
return 0;
if (!npe_running(port->npe)) {
err = npe_load_firmware(port->npe, npe_name(port->npe),
port->dev);
if (err)
return err;
}
/* HDLC mode configuration */
memset(&msg, 0, sizeof(msg));
msg.cmd = PKT_NUM_PIPES_WRITE;
msg.hss_port = port->id;
msg.data8a = PKT_NUM_PIPES;
hss_npe_send(port, &msg, "HSS_SET_PKT_PIPES");
msg.cmd = PKT_PIPE_FIFO_SIZEW_WRITE;
msg.data8a = PKT_PIPE_FIFO_SIZEW;
hss_npe_send(port, &msg, "HSS_SET_PKT_FIFO");
msg.cmd = PKT_PIPE_MODE_WRITE;
msg.data8a = NPE_PKT_MODE_HDLC;
/* msg.data8b = inv_mask */
/* msg.data8c = or_mask */
hss_npe_send(port, &msg, "HSS_SET_PKT_MODE");
msg.cmd = PKT_PIPE_RX_SIZE_WRITE;
msg.data16a = HDLC_MAX_MRU; /* including CRC */
hss_npe_send(port, &msg, "HSS_SET_PKT_RX_SIZE");
msg.cmd = PKT_PIPE_IDLE_PATTERN_WRITE;
msg.data32 = 0x7F7F7F7F; /* ??? FIXME */
hss_npe_send(port, &msg, "HSS_SET_PKT_IDLE");
port->initialized = 1;
return 0;
}
/*****************************************************************************
* packetized (HDLC) operation
****************************************************************************/
static inline void debug_pkt(struct net_device *dev, const char *func,
u8 *data, int len)
{
#if DEBUG_PKT_BYTES
int i;
printk(KERN_DEBUG "%s: %s(%i)", dev->name, func, len);
for (i = 0; i < len; i++) {
if (i >= DEBUG_PKT_BYTES)
break;
printk("%s%02X", !(i % 4) ? " " : "", data[i]);
}
printk("\n");
#endif
}
static inline void debug_desc(u32 phys, struct desc *desc)
{
#if DEBUG_DESC
printk(KERN_DEBUG "%X: %X %3X %3X %08X %X %X\n",
phys, desc->next, desc->buf_len, desc->pkt_len,
desc->data, desc->status, desc->error_count);
#endif
}
static inline int queue_get_desc(unsigned int queue, struct port *port,
int is_tx)
{
u32 phys, tab_phys, n_desc;
struct desc *tab;
phys = qmgr_get_entry(queue);
if (!phys)
return -1;
BUG_ON(phys & 0x1F);
tab_phys = is_tx ? tx_desc_phys(port, 0) : rx_desc_phys(port, 0);
tab = is_tx ? tx_desc_ptr(port, 0) : rx_desc_ptr(port, 0);
n_desc = (phys - tab_phys) / sizeof(struct desc);
BUG_ON(n_desc >= (is_tx ? TX_DESCS : RX_DESCS));
debug_desc(phys, &tab[n_desc]);
BUG_ON(tab[n_desc].next);
return n_desc;
}
static inline void queue_put_desc(unsigned int queue, u32 phys,
struct desc *desc)
{
debug_desc(phys, desc);
BUG_ON(phys & 0x1F);
qmgr_put_entry(queue, phys);
/* Don't check for queue overflow here, we've allocated sufficient
* length and queues >= 32 don't support this check anyway.
*/
}
static inline void dma_unmap_tx(struct port *port, struct desc *desc)
{
#ifdef __ARMEB__
dma_unmap_single(&port->netdev->dev, desc->data,
desc->buf_len, DMA_TO_DEVICE);
#else
dma_unmap_single(&port->netdev->dev, desc->data & ~3,
ALIGN((desc->data & 3) + desc->buf_len, 4),
DMA_TO_DEVICE);
#endif
}
static void hss_hdlc_set_carrier(void *pdev, int carrier)
{
struct net_device *netdev = pdev;
struct port *port = dev_to_port(netdev);
unsigned long flags;
spin_lock_irqsave(&npe_lock, flags);
port->carrier = carrier;
if (!port->loopback) {
if (carrier)
netif_carrier_on(netdev);
else
netif_carrier_off(netdev);
}
spin_unlock_irqrestore(&npe_lock, flags);
}
static void hss_hdlc_rx_irq(void *pdev)
{
struct net_device *dev = pdev;
struct port *port = dev_to_port(dev);
#if DEBUG_RX
printk(KERN_DEBUG "%s: hss_hdlc_rx_irq\n", dev->name);
#endif
qmgr_disable_irq(port->rxq);
napi_schedule(&port->napi);
}
static int hss_hdlc_poll(struct napi_struct *napi, int budget)
{
struct port *port = container_of(napi, struct port, napi);
struct net_device *dev = port->netdev;
unsigned int rxq = port->rxq;
unsigned int rxfreeq = port->rxfreeq;
int received = 0;
#if DEBUG_RX
printk(KERN_DEBUG "%s: hss_hdlc_poll\n", dev->name);
#endif
while (received < budget) {
struct sk_buff *skb;
struct desc *desc;
int n;
#ifdef __ARMEB__
struct sk_buff *temp;
u32 phys;
#endif
n = queue_get_desc(rxq, port, 0);
if (n < 0) {
#if DEBUG_RX
printk(KERN_DEBUG "%s: hss_hdlc_poll"
" napi_complete\n", dev->name);
#endif
napi_complete(napi);
qmgr_enable_irq(rxq);
if (!qmgr_stat_empty(rxq) &&
napi_reschedule(napi)) {
#if DEBUG_RX
printk(KERN_DEBUG "%s: hss_hdlc_poll"
" napi_reschedule succeeded\n",
dev->name);
#endif
qmgr_disable_irq(rxq);
continue;
}
#if DEBUG_RX
printk(KERN_DEBUG "%s: hss_hdlc_poll all done\n",
dev->name);
#endif
return received; /* all work done */
}
desc = rx_desc_ptr(port, n);
#if 0 /* FIXME - error_count counts modulo 256, perhaps we should use it */
if (desc->error_count)
printk(KERN_DEBUG "%s: hss_hdlc_poll status 0x%02X"
" errors %u\n", dev->name, desc->status,
desc->error_count);
#endif
skb = NULL;
switch (desc->status) {
case 0:
#ifdef __ARMEB__
skb = netdev_alloc_skb(dev, RX_SIZE);
if (skb) {
phys = dma_map_single(&dev->dev, skb->data,
RX_SIZE,
DMA_FROM_DEVICE);
if (dma_mapping_error(&dev->dev, phys)) {
dev_kfree_skb(skb);
skb = NULL;
}
}
#else
skb = netdev_alloc_skb(dev, desc->pkt_len);
#endif
if (!skb)
dev->stats.rx_dropped++;
break;
case ERR_HDLC_ALIGN:
case ERR_HDLC_ABORT:
dev->stats.rx_frame_errors++;
dev->stats.rx_errors++;
break;
case ERR_HDLC_FCS:
dev->stats.rx_crc_errors++;
dev->stats.rx_errors++;
break;
case ERR_HDLC_TOO_LONG:
dev->stats.rx_length_errors++;
dev->stats.rx_errors++;
break;
default: /* FIXME - remove printk */
netdev_err(dev, "hss_hdlc_poll: status 0x%02X errors %u\n",
desc->status, desc->error_count);
dev->stats.rx_errors++;
}
if (!skb) {
/* put the desc back on RX-ready queue */
desc->buf_len = RX_SIZE;
desc->pkt_len = desc->status = 0;
queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
continue;
}
/* process received frame */
#ifdef __ARMEB__
temp = skb;
skb = port->rx_buff_tab[n];
dma_unmap_single(&dev->dev, desc->data,
RX_SIZE, DMA_FROM_DEVICE);
#else
dma_sync_single_for_cpu(&dev->dev, desc->data,
RX_SIZE, DMA_FROM_DEVICE);
memcpy_swab32((u32 *)skb->data, (u32 *)port->rx_buff_tab[n],
ALIGN(desc->pkt_len, 4) / 4);
#endif
skb_put(skb, desc->pkt_len);
debug_pkt(dev, "hss_hdlc_poll", skb->data, skb->len);
skb->protocol = hdlc_type_trans(skb, dev);
dev->stats.rx_packets++;
dev->stats.rx_bytes += skb->len;
netif_receive_skb(skb);
/* put the new buffer on RX-free queue */
#ifdef __ARMEB__
port->rx_buff_tab[n] = temp;
desc->data = phys;
#endif
desc->buf_len = RX_SIZE;
desc->pkt_len = 0;
queue_put_desc(rxfreeq, rx_desc_phys(port, n), desc);
received++;
}
#if DEBUG_RX
printk(KERN_DEBUG "hss_hdlc_poll: end, not all work done\n");
#endif
return received; /* not all work done */
}
static void hss_hdlc_txdone_irq(void *pdev)
{
struct net_device *dev = pdev;
struct port *port = dev_to_port(dev);
int n_desc;
#if DEBUG_TX
printk(KERN_DEBUG DRV_NAME ": hss_hdlc_txdone_irq\n");
#endif
while ((n_desc = queue_get_desc(port->txdoneq,
port, 1)) >= 0) {
struct desc *desc;
int start;
desc = tx_desc_ptr(port, n_desc);
dev->stats.tx_packets++;
dev->stats.tx_bytes += desc->pkt_len;
dma_unmap_tx(port, desc);
#if DEBUG_TX
printk(KERN_DEBUG "%s: hss_hdlc_txdone_irq free %p\n",
dev->name, port->tx_buff_tab[n_desc]);
#endif
free_buffer_irq(port->tx_buff_tab[n_desc]);
port->tx_buff_tab[n_desc] = NULL;
start = qmgr_stat_below_low_watermark(port->txreadyq);
queue_put_desc(port->txreadyq,
tx_desc_phys(port, n_desc), desc);
if (start) { /* TX-ready queue was empty */
#if DEBUG_TX
printk(KERN_DEBUG "%s: hss_hdlc_txdone_irq xmit"
" ready\n", dev->name);
#endif
netif_wake_queue(dev);
}
}
}
static int hss_hdlc_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct port *port = dev_to_port(dev);
unsigned int txreadyq = port->txreadyq;
int len, offset, bytes, n;
void *mem;
u32 phys;
struct desc *desc;
#if DEBUG_TX
printk(KERN_DEBUG "%s: hss_hdlc_xmit\n", dev->name);
#endif
if (unlikely(skb->len > HDLC_MAX_MRU)) {
dev_kfree_skb(skb);
dev->stats.tx_errors++;
return NETDEV_TX_OK;
}
debug_pkt(dev, "hss_hdlc_xmit", skb->data, skb->len);
len = skb->len;
#ifdef __ARMEB__
offset = 0; /* no need to keep alignment */
bytes = len;
mem = skb->data;
#else
offset = (int)skb->data & 3; /* keep 32-bit alignment */
bytes = ALIGN(offset + len, 4);
mem = kmalloc(bytes, GFP_ATOMIC);
if (!mem) {
dev_kfree_skb(skb);
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
memcpy_swab32(mem, (u32 *)((uintptr_t)skb->data & ~3), bytes / 4);
dev_kfree_skb(skb);
#endif
phys = dma_map_single(&dev->dev, mem, bytes, DMA_TO_DEVICE);
if (dma_mapping_error(&dev->dev, phys)) {
#ifdef __ARMEB__
dev_kfree_skb(skb);
#else
kfree(mem);
#endif
dev->stats.tx_dropped++;
return NETDEV_TX_OK;
}
n = queue_get_desc(txreadyq, port, 1);
BUG_ON(n < 0);
desc = tx_desc_ptr(port, n);
#ifdef __ARMEB__
port->tx_buff_tab[n] = skb;
#else
port->tx_buff_tab[n] = mem;
#endif
desc->data = phys + offset;
desc->buf_len = desc->pkt_len = len;
wmb();
queue_put_desc(port->txq, tx_desc_phys(port, n), desc);
if (qmgr_stat_below_low_watermark(txreadyq)) { /* empty */
#if DEBUG_TX
printk(KERN_DEBUG "%s: hss_hdlc_xmit queue full\n", dev->name);
#endif
netif_stop_queue(dev);
/* we could miss TX ready interrupt */
if (!qmgr_stat_below_low_watermark(txreadyq)) {
#if DEBUG_TX
printk(KERN_DEBUG "%s: hss_hdlc_xmit ready again\n",
dev->name);
#endif
netif_wake_queue(dev);
}
}
#if DEBUG_TX
printk(KERN_DEBUG "%s: hss_hdlc_xmit end\n", dev->name);
#endif
return NETDEV_TX_OK;
}
static int request_hdlc_queues(struct port *port)
{
int err;
err = qmgr_request_queue(port->rxfreeq, RX_DESCS, 0, 0,
"%s:RX-free", port->netdev->name);
if (err)
return err;
err = qmgr_request_queue(port->rxq, RX_DESCS, 0, 0,
"%s:RX", port->netdev->name);
if (err)
goto rel_rxfree;
err = qmgr_request_queue(port->txq, TX_DESCS, 0, 0,
"%s:TX", port->netdev->name);
if (err)
goto rel_rx;
err = qmgr_request_queue(port->txreadyq, TX_DESCS, 0, 0,
"%s:TX-ready", port->netdev->name);
if (err)
goto rel_tx;
err = qmgr_request_queue(port->txdoneq, TX_DESCS, 0, 0,
"%s:TX-done", port->netdev->name);
if (err)
goto rel_txready;
return 0;
rel_txready:
qmgr_release_queue(port->txreadyq);
rel_tx:
qmgr_release_queue(port->txq);
rel_rx:
qmgr_release_queue(port->rxq);
rel_rxfree:
qmgr_release_queue(port->rxfreeq);
printk(KERN_DEBUG "%s: unable to request hardware queues\n",
port->netdev->name);
return err;
}
static void release_hdlc_queues(struct port *port)
{
qmgr_release_queue(port->rxfreeq);
qmgr_release_queue(port->rxq);
qmgr_release_queue(port->txdoneq);
qmgr_release_queue(port->txq);
qmgr_release_queue(port->txreadyq);
}
static int init_hdlc_queues(struct port *port)
{
int i;
if (!ports_open) {
dma_pool = dma_pool_create(DRV_NAME, &port->netdev->dev,
POOL_ALLOC_SIZE, 32, 0);
if (!dma_pool)
return -ENOMEM;
}
port->desc_tab = dma_pool_zalloc(dma_pool, GFP_KERNEL,
&port->desc_tab_phys);
if (!port->desc_tab)
return -ENOMEM;
memset(port->rx_buff_tab, 0, sizeof(port->rx_buff_tab)); /* tables */
memset(port->tx_buff_tab, 0, sizeof(port->tx_buff_tab));
/* Setup RX buffers */
for (i = 0; i < RX_DESCS; i++) {
struct desc *desc = rx_desc_ptr(port, i);
buffer_t *buff;
void *data;
#ifdef __ARMEB__
buff = netdev_alloc_skb(port->netdev, RX_SIZE);
if (!buff)
return -ENOMEM;
data = buff->data;
#else
buff = kmalloc(RX_SIZE, GFP_KERNEL);
if (!buff)
return -ENOMEM;
data = buff;
#endif
desc->buf_len = RX_SIZE;
desc->data = dma_map_single(&port->netdev->dev, data,
RX_SIZE, DMA_FROM_DEVICE);
if (dma_mapping_error(&port->netdev->dev, desc->data)) {
free_buffer(buff);
return -EIO;
}
port->rx_buff_tab[i] = buff;
}
return 0;
}
static void destroy_hdlc_queues(struct port *port)
{
int i;
if (port->desc_tab) {
for (i = 0; i < RX_DESCS; i++) {
struct desc *desc = rx_desc_ptr(port, i);
buffer_t *buff = port->rx_buff_tab[i];
if (buff) {
dma_unmap_single(&port->netdev->dev,
desc->data, RX_SIZE,
DMA_FROM_DEVICE);
free_buffer(buff);
}
}
for (i = 0; i < TX_DESCS; i++) {
struct desc *desc = tx_desc_ptr(port, i);
buffer_t *buff = port->tx_buff_tab[i];
if (buff) {
dma_unmap_tx(port, desc);
free_buffer(buff);
}
}
dma_pool_free(dma_pool, port->desc_tab, port->desc_tab_phys);
port->desc_tab = NULL;
}
if (!ports_open && dma_pool) {
dma_pool_destroy(dma_pool);
dma_pool = NULL;
}
}
static irqreturn_t hss_hdlc_dcd_irq(int irq, void *data)
{
struct net_device *dev = data;
struct port *port = dev_to_port(dev);
int val;
val = gpiod_get_value(port->dcd);
hss_hdlc_set_carrier(dev, val);
return IRQ_HANDLED;
}
static int hss_hdlc_open(struct net_device *dev)
{
struct port *port = dev_to_port(dev);
unsigned long flags;
int i, err = 0;
int val;
err = hdlc_open(dev);
if (err)
return err;
err = hss_load_firmware(port);
if (err)
goto err_hdlc_close;
err = request_hdlc_queues(port);
if (err)
goto err_hdlc_close;
err = init_hdlc_queues(port);
if (err)
goto err_destroy_queues;
spin_lock_irqsave(&npe_lock, flags);
/* Set the carrier, the GPIO is flagged active low so this will return
* 1 if DCD is asserted.
*/
val = gpiod_get_value(port->dcd);
hss_hdlc_set_carrier(dev, val);
/* Set up an IRQ for DCD */
err = request_irq(gpiod_to_irq(port->dcd), hss_hdlc_dcd_irq, 0, "IXP4xx HSS", dev);
if (err) {
dev_err(&dev->dev, "ixp4xx_hss: failed to request DCD IRQ (%i)\n", err);
goto err_unlock;
}
/* GPIOs are flagged active low so this asserts DTR and RTS */
gpiod_set_value(port->dtr, 1);
gpiod_set_value(port->rts, 1);
spin_unlock_irqrestore(&npe_lock, flags);
/* Populate queues with buffers, no failure after this point */
for (i = 0; i < TX_DESCS; i++)
queue_put_desc(port->txreadyq,
tx_desc_phys(port, i), tx_desc_ptr(port, i));
for (i = 0; i < RX_DESCS; i++)
queue_put_desc(port->rxfreeq,
rx_desc_phys(port, i), rx_desc_ptr(port, i));
napi_enable(&port->napi);
netif_start_queue(dev);
qmgr_set_irq(port->rxq, QUEUE_IRQ_SRC_NOT_EMPTY,
hss_hdlc_rx_irq, dev);
qmgr_set_irq(port->txdoneq, QUEUE_IRQ_SRC_NOT_EMPTY,
hss_hdlc_txdone_irq, dev);
qmgr_enable_irq(port->txdoneq);
ports_open++;
hss_set_hdlc_cfg(port);
hss_config(port);
hss_start_hdlc(port);
/* we may already have RX data, enables IRQ */
napi_schedule(&port->napi);
return 0;
err_unlock:
spin_unlock_irqrestore(&npe_lock, flags);
err_destroy_queues:
destroy_hdlc_queues(port);
release_hdlc_queues(port);
err_hdlc_close:
hdlc_close(dev);
return err;
}
static int hss_hdlc_close(struct net_device *dev)
{
struct port *port = dev_to_port(dev);
unsigned long flags;
int i, buffs = RX_DESCS; /* allocated RX buffers */
spin_lock_irqsave(&npe_lock, flags);
ports_open--;
qmgr_disable_irq(port->rxq);
netif_stop_queue(dev);
napi_disable(&port->napi);
hss_stop_hdlc(port);
while (queue_get_desc(port->rxfreeq, port, 0) >= 0)
buffs--;
while (queue_get_desc(port->rxq, port, 0) >= 0)
buffs--;
if (buffs)
netdev_crit(dev, "unable to drain RX queue, %i buffer(s) left in NPE\n",
buffs);
buffs = TX_DESCS;
while (queue_get_desc(port->txq, port, 1) >= 0)
buffs--; /* cancel TX */
i = 0;
do {
while (queue_get_desc(port->txreadyq, port, 1) >= 0)
buffs--;
if (!buffs)
break;
} while (++i < MAX_CLOSE_WAIT);
if (buffs)
netdev_crit(dev, "unable to drain TX queue, %i buffer(s) left in NPE\n",
buffs);
#if DEBUG_CLOSE
if (!buffs)
printk(KERN_DEBUG "Draining TX queues took %i cycles\n", i);
#endif
qmgr_disable_irq(port->txdoneq);
free_irq(gpiod_to_irq(port->dcd), dev);
/* GPIOs are flagged active low so this de-asserts DTR and RTS */
gpiod_set_value(port->dtr, 0);
gpiod_set_value(port->rts, 0);
spin_unlock_irqrestore(&npe_lock, flags);
destroy_hdlc_queues(port);
release_hdlc_queues(port);
hdlc_close(dev);
return 0;
}
static int hss_hdlc_attach(struct net_device *dev, unsigned short encoding,
unsigned short parity)
{
struct port *port = dev_to_port(dev);
if (encoding != ENCODING_NRZ)
return -EINVAL;
switch (parity) {
case PARITY_CRC16_PR1_CCITT:
port->hdlc_cfg = 0;
return 0;
case PARITY_CRC32_PR1_CCITT:
port->hdlc_cfg = PKT_HDLC_CRC_32;
return 0;
default:
return -EINVAL;
}
}
static u32 check_clock(u32 timer_freq, u32 rate, u32 a, u32 b, u32 c,
u32 *best, u32 *best_diff, u32 *reg)
{
/* a is 10-bit, b is 10-bit, c is 12-bit */
u64 new_rate;
u32 new_diff;
new_rate = timer_freq * (u64)(c + 1);
do_div(new_rate, a * (c + 1) + b + 1);
new_diff = abs((u32)new_rate - rate);
if (new_diff < *best_diff) {
*best = new_rate;
*best_diff = new_diff;
*reg = (a << 22) | (b << 12) | c;
}
return new_diff;
}
static void find_best_clock(u32 timer_freq, u32 rate, u32 *best, u32 *reg)
{
u32 a, b, diff = 0xFFFFFFFF;
a = timer_freq / rate;
if (a > 0x3FF) { /* 10-bit value - we can go as slow as ca. 65 kb/s */
check_clock(timer_freq, rate, 0x3FF, 1, 1, best, &diff, reg);
return;
}
if (a == 0) { /* > 66.666 MHz */
a = 1; /* minimum divider is 1 (a = 0, b = 1, c = 1) */
rate = timer_freq;
}
if (rate * a == timer_freq) { /* don't divide by 0 later */
check_clock(timer_freq, rate, a - 1, 1, 1, best, &diff, reg);
return;
}
for (b = 0; b < 0x400; b++) {
u64 c = (b + 1) * (u64)rate;
do_div(c, timer_freq - rate * a);
c--;
if (c >= 0xFFF) { /* 12-bit - no need to check more 'b's */
if (b == 0 && /* also try a bit higher rate */
!check_clock(timer_freq, rate, a - 1, 1, 1, best,
&diff, reg))
return;
check_clock(timer_freq, rate, a, b, 0xFFF, best,
&diff, reg);
return;
}
if (!check_clock(timer_freq, rate, a, b, c, best, &diff, reg))
return;
if (!check_clock(timer_freq, rate, a, b, c + 1, best, &diff,
reg))
return;
}
}
static int hss_hdlc_set_clock(struct port *port, unsigned int clock_type)
{
switch (clock_type) {
case CLOCK_DEFAULT:
case CLOCK_EXT:
gpiod_set_value(port->clk_internal, 0);
return CLOCK_EXT;
case CLOCK_INT:
gpiod_set_value(port->clk_internal, 1);
return CLOCK_INT;
default:
return -EINVAL;
}
}
static int hss_hdlc_ioctl(struct net_device *dev, struct if_settings *ifs)
{
const size_t size = sizeof(sync_serial_settings);
sync_serial_settings new_line;
sync_serial_settings __user *line = ifs->ifs_ifsu.sync;
struct port *port = dev_to_port(dev);
unsigned long flags;
int clk;
switch (ifs->type) {
case IF_GET_IFACE:
ifs->type = IF_IFACE_V35;
if (ifs->size < size) {
ifs->size = size; /* data size wanted */
return -ENOBUFS;
}
memset(&new_line, 0, sizeof(new_line));
new_line.clock_type = port->clock_type;
new_line.clock_rate = port->clock_rate;
new_line.loopback = port->loopback;
if (copy_to_user(line, &new_line, size))
return -EFAULT;
return 0;
case IF_IFACE_SYNC_SERIAL:
case IF_IFACE_V35:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (copy_from_user(&new_line, line, size))
return -EFAULT;
clk = new_line.clock_type;
hss_hdlc_set_clock(port, clk);
if (clk != CLOCK_EXT && clk != CLOCK_INT)
return -EINVAL; /* No such clock setting */
if (new_line.loopback != 0 && new_line.loopback != 1)
return -EINVAL;
port->clock_type = clk; /* Update settings */
if (clk == CLOCK_INT) {
find_best_clock(IXP4XX_TIMER_FREQ,
new_line.clock_rate,
&port->clock_rate, &port->clock_reg);
} else {
port->clock_rate = 0;
port->clock_reg = CLK42X_SPEED_2048KHZ;
}
port->loopback = new_line.loopback;
spin_lock_irqsave(&npe_lock, flags);
if (dev->flags & IFF_UP)
hss_config(port);
if (port->loopback || port->carrier)
netif_carrier_on(port->netdev);
else
netif_carrier_off(port->netdev);
spin_unlock_irqrestore(&npe_lock, flags);
return 0;
default:
return hdlc_ioctl(dev, ifs);
}
}
/*****************************************************************************
* initialization
****************************************************************************/
static const struct net_device_ops hss_hdlc_ops = {
.ndo_open = hss_hdlc_open,
.ndo_stop = hss_hdlc_close,
.ndo_start_xmit = hdlc_start_xmit,
.ndo_siocwandev = hss_hdlc_ioctl,
};
static int ixp4xx_hss_probe(struct platform_device *pdev)
{
struct of_phandle_args queue_spec;
struct of_phandle_args npe_spec;
struct device *dev = &pdev->dev;
struct net_device *ndev;
struct device_node *np;
struct regmap *rmap;
struct port *port;
hdlc_device *hdlc;
int err;
u32 val;
/*
* Go into the syscon and check if we have the HSS and HDLC
* features available, else this will not work.
*/
rmap = syscon_regmap_lookup_by_compatible("syscon");
if (IS_ERR(rmap))
return dev_err_probe(dev, PTR_ERR(rmap),
"failed to look up syscon\n");
val = cpu_ixp4xx_features(rmap);
if ((val & (IXP4XX_FEATURE_HDLC | IXP4XX_FEATURE_HSS)) !=
(IXP4XX_FEATURE_HDLC | IXP4XX_FEATURE_HSS)) {
dev_err(dev, "HDLC and HSS feature unavailable in platform\n");
return -ENODEV;
}
np = dev->of_node;
port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
if (!port)
return -ENOMEM;
err = of_parse_phandle_with_fixed_args(np, "intel,npe-handle", 1, 0,
&npe_spec);
if (err)
return dev_err_probe(dev, err, "no NPE engine specified\n");
/* NPE ID 0x00, 0x10, 0x20... */
port->npe = npe_request(npe_spec.args[0] << 4);
if (!port->npe) {
dev_err(dev, "unable to obtain NPE instance\n");
return -ENODEV;
}
/* Get the TX ready queue as resource from queue manager */
err = of_parse_phandle_with_fixed_args(np, "intek,queue-chl-txready", 1, 0,
&queue_spec);
if (err)
return dev_err_probe(dev, err, "no txready queue phandle\n");
port->txreadyq = queue_spec.args[0];
/* Get the RX trig queue as resource from queue manager */
err = of_parse_phandle_with_fixed_args(np, "intek,queue-chl-rxtrig", 1, 0,
&queue_spec);
if (err)
return dev_err_probe(dev, err, "no rxtrig queue phandle\n");
port->rxtrigq = queue_spec.args[0];
/* Get the RX queue as resource from queue manager */
err = of_parse_phandle_with_fixed_args(np, "intek,queue-pkt-rx", 1, 0,
&queue_spec);
if (err)
return dev_err_probe(dev, err, "no RX queue phandle\n");
port->rxq = queue_spec.args[0];
/* Get the TX queue as resource from queue manager */
err = of_parse_phandle_with_fixed_args(np, "intek,queue-pkt-tx", 1, 0,
&queue_spec);
if (err)
return dev_err_probe(dev, err, "no RX queue phandle\n");
port->txq = queue_spec.args[0];
/* Get the RX free queue as resource from queue manager */
err = of_parse_phandle_with_fixed_args(np, "intek,queue-pkt-rxfree", 1, 0,
&queue_spec);
if (err)
return dev_err_probe(dev, err, "no RX free queue phandle\n");
port->rxfreeq = queue_spec.args[0];
/* Get the TX done queue as resource from queue manager */
err = of_parse_phandle_with_fixed_args(np, "intek,queue-pkt-txdone", 1, 0,
&queue_spec);
if (err)
return dev_err_probe(dev, err, "no TX done queue phandle\n");
port->txdoneq = queue_spec.args[0];
/* Obtain all the line control GPIOs */
port->cts = devm_gpiod_get(dev, "cts", GPIOD_OUT_LOW);
if (IS_ERR(port->cts))
return dev_err_probe(dev, PTR_ERR(port->cts), "unable to get CTS GPIO\n");
port->rts = devm_gpiod_get(dev, "rts", GPIOD_OUT_LOW);
if (IS_ERR(port->rts))
return dev_err_probe(dev, PTR_ERR(port->rts), "unable to get RTS GPIO\n");
port->dcd = devm_gpiod_get(dev, "dcd", GPIOD_IN);
if (IS_ERR(port->dcd))
return dev_err_probe(dev, PTR_ERR(port->dcd), "unable to get DCD GPIO\n");
port->dtr = devm_gpiod_get(dev, "dtr", GPIOD_OUT_LOW);
if (IS_ERR(port->dtr))
return dev_err_probe(dev, PTR_ERR(port->dtr), "unable to get DTR GPIO\n");
port->clk_internal = devm_gpiod_get(dev, "clk-internal", GPIOD_OUT_LOW);
if (IS_ERR(port->clk_internal))
return dev_err_probe(dev, PTR_ERR(port->clk_internal),
"unable to get CLK internal GPIO\n");
ndev = alloc_hdlcdev(port);
port->netdev = alloc_hdlcdev(port);
if (!port->netdev) {
err = -ENOMEM;
goto err_plat;
}
SET_NETDEV_DEV(ndev, &pdev->dev);
hdlc = dev_to_hdlc(ndev);
hdlc->attach = hss_hdlc_attach;
hdlc->xmit = hss_hdlc_xmit;
ndev->netdev_ops = &hss_hdlc_ops;
ndev->tx_queue_len = 100;
port->clock_type = CLOCK_EXT;
port->clock_rate = 0;
port->clock_reg = CLK42X_SPEED_2048KHZ;
port->id = pdev->id;
port->dev = &pdev->dev;
netif_napi_add_weight(ndev, &port->napi, hss_hdlc_poll, NAPI_WEIGHT);
err = register_hdlc_device(ndev);
if (err)
goto err_free_netdev;
platform_set_drvdata(pdev, port);
netdev_info(ndev, "initialized\n");
return 0;
err_free_netdev:
free_netdev(ndev);
err_plat:
npe_release(port->npe);
return err;
}
static int ixp4xx_hss_remove(struct platform_device *pdev)
{
struct port *port = platform_get_drvdata(pdev);
unregister_hdlc_device(port->netdev);
free_netdev(port->netdev);
npe_release(port->npe);
return 0;
}
static struct platform_driver ixp4xx_hss_driver = {
.driver.name = DRV_NAME,
.probe = ixp4xx_hss_probe,
.remove = ixp4xx_hss_remove,
};
module_platform_driver(ixp4xx_hss_driver);
MODULE_AUTHOR("Krzysztof Halasa");
MODULE_DESCRIPTION("Intel IXP4xx HSS driver");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:ixp4xx_hss");
|
linux-master
|
drivers/net/wan/ixp4xx_hss.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Generic HDLC support routines for Linux
* HDLC support
*
* Copyright (C) 1999 - 2006 Krzysztof Halasa <[email protected]>
*/
#include <linux/errno.h>
#include <linux/hdlc.h>
#include <linux/if_arp.h>
#include <linux/inetdevice.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pkt_sched.h>
#include <linux/poll.h>
#include <linux/rtnetlink.h>
#include <linux/skbuff.h>
static int raw_ioctl(struct net_device *dev, struct if_settings *ifs);
static __be16 raw_type_trans(struct sk_buff *skb, struct net_device *dev)
{
return cpu_to_be16(ETH_P_IP);
}
static struct hdlc_proto proto = {
.type_trans = raw_type_trans,
.ioctl = raw_ioctl,
.module = THIS_MODULE,
};
static int raw_ioctl(struct net_device *dev, struct if_settings *ifs)
{
raw_hdlc_proto __user *raw_s = ifs->ifs_ifsu.raw_hdlc;
const size_t size = sizeof(raw_hdlc_proto);
raw_hdlc_proto new_settings;
hdlc_device *hdlc = dev_to_hdlc(dev);
int result;
switch (ifs->type) {
case IF_GET_PROTO:
if (dev_to_hdlc(dev)->proto != &proto)
return -EINVAL;
ifs->type = IF_PROTO_HDLC;
if (ifs->size < size) {
ifs->size = size; /* data size wanted */
return -ENOBUFS;
}
if (copy_to_user(raw_s, hdlc->state, size))
return -EFAULT;
return 0;
case IF_PROTO_HDLC:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (dev->flags & IFF_UP)
return -EBUSY;
if (copy_from_user(&new_settings, raw_s, size))
return -EFAULT;
if (new_settings.encoding == ENCODING_DEFAULT)
new_settings.encoding = ENCODING_NRZ;
if (new_settings.parity == PARITY_DEFAULT)
new_settings.parity = PARITY_CRC16_PR1_CCITT;
result = hdlc->attach(dev, new_settings.encoding,
new_settings.parity);
if (result)
return result;
result = attach_hdlc_protocol(dev, &proto,
sizeof(raw_hdlc_proto));
if (result)
return result;
memcpy(hdlc->state, &new_settings, size);
dev->type = ARPHRD_RAWHDLC;
call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
netif_dormant_off(dev);
return 0;
}
return -EINVAL;
}
static int __init hdlc_raw_init(void)
{
register_hdlc_protocol(&proto);
return 0;
}
static void __exit hdlc_raw_exit(void)
{
unregister_hdlc_protocol(&proto);
}
module_init(hdlc_raw_init);
module_exit(hdlc_raw_exit);
MODULE_AUTHOR("Krzysztof Halasa <[email protected]>");
MODULE_DESCRIPTION("Raw HDLC protocol support for generic HDLC");
MODULE_LICENSE("GPL v2");
|
linux-master
|
drivers/net/wan/hdlc_raw.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* SDL Inc. RISCom/N2 synchronous serial card driver for Linux
*
* Copyright (C) 1998-2003 Krzysztof Halasa <[email protected]>
*
* For information see <https://www.kernel.org/pub/linux/utils/net/hdlc/>
*
* Note: integrated CSU/DSU/DDS are not supported by this driver
*
* Sources of information:
* Hitachi HD64570 SCA User's Manual
* SDL Inc. PPP/HDLC/CISCO driver
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/capability.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/in.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/moduleparam.h>
#include <linux/netdevice.h>
#include <linux/hdlc.h>
#include <asm/io.h>
#include "hd64570.h"
static const char *version = "SDL RISCom/N2 driver version: 1.15";
static const char *devname = "RISCom/N2";
#undef DEBUG_PKT
#define DEBUG_RINGS
#define USE_WINDOWSIZE 16384
#define USE_BUS16BITS 1
#define CLOCK_BASE 9830400 /* 9.8304 MHz */
#define MAX_PAGES 16 /* 16 RAM pages at max */
#define MAX_RAM_SIZE 0x80000 /* 512 KB */
#if MAX_RAM_SIZE > MAX_PAGES * USE_WINDOWSIZE
#undef MAX_RAM_SIZE
#define MAX_RAM_SIZE (MAX_PAGES * USE_WINDOWSIZE)
#endif
#define N2_IOPORTS 0x10
#define NEED_DETECT_RAM
#define NEED_SCA_MSCI_INTR
#define MAX_TX_BUFFERS 10
static char *hw; /* pointer to hw=xxx command line string */
/* RISCom/N2 Board Registers */
/* PC Control Register */
#define N2_PCR 0
#define PCR_RUNSCA 1 /* Run 64570 */
#define PCR_VPM 2 /* Enable VPM - needed if using RAM above 1 MB */
#define PCR_ENWIN 4 /* Open window */
#define PCR_BUS16 8 /* 16-bit bus */
/* Memory Base Address Register */
#define N2_BAR 2
/* Page Scan Register */
#define N2_PSR 4
#define WIN16K 0x00
#define WIN32K 0x20
#define WIN64K 0x40
#define PSR_WINBITS 0x60
#define PSR_DMAEN 0x80
#define PSR_PAGEBITS 0x0F
/* Modem Control Reg */
#define N2_MCR 6
#define CLOCK_OUT_PORT1 0x80
#define CLOCK_OUT_PORT0 0x40
#define TX422_PORT1 0x20
#define TX422_PORT0 0x10
#define DSR_PORT1 0x08
#define DSR_PORT0 0x04
#define DTR_PORT1 0x02
#define DTR_PORT0 0x01
typedef struct port_s {
struct net_device *dev;
struct card_s *card;
spinlock_t lock; /* TX lock */
sync_serial_settings settings;
int valid; /* port enabled */
int rxpart; /* partial frame received, next frame invalid*/
unsigned short encoding;
unsigned short parity;
u16 rxin; /* rx ring buffer 'in' pointer */
u16 txin; /* tx ring buffer 'in' and 'last' pointers */
u16 txlast;
u8 rxs, txs, tmc; /* SCA registers */
u8 phy_node; /* physical port # - 0 or 1 */
u8 log_node; /* logical port # */
} port_t;
typedef struct card_s {
u8 __iomem *winbase; /* ISA window base address */
u32 phy_winbase; /* ISA physical base address */
u32 ram_size; /* number of bytes */
u16 io; /* IO Base address */
u16 buff_offset; /* offset of first buffer of first channel */
u16 rx_ring_buffers; /* number of buffers in a ring */
u16 tx_ring_buffers;
u8 irq; /* IRQ (3-15) */
port_t ports[2];
struct card_s *next_card;
} card_t;
static card_t *first_card;
static card_t **new_card = &first_card;
#define sca_reg(reg, card) (0x8000 | (card)->io | \
((reg) & 0x0F) | (((reg) & 0xF0) << 6))
#define sca_in(reg, card) inb(sca_reg(reg, card))
#define sca_out(value, reg, card) outb(value, sca_reg(reg, card))
#define sca_inw(reg, card) inw(sca_reg(reg, card))
#define sca_outw(value, reg, card) outw(value, sca_reg(reg, card))
#define port_to_card(port) ((port)->card)
#define log_node(port) ((port)->log_node)
#define phy_node(port) ((port)->phy_node)
#define winsize(card) (USE_WINDOWSIZE)
#define winbase(card) ((card)->winbase)
#define get_port(card, port) ((card)->ports[port].valid ? \
&(card)->ports[port] : NULL)
static __inline__ u8 sca_get_page(card_t *card)
{
return inb(card->io + N2_PSR) & PSR_PAGEBITS;
}
static __inline__ void openwin(card_t *card, u8 page)
{
u8 psr = inb(card->io + N2_PSR);
outb((psr & ~PSR_PAGEBITS) | page, card->io + N2_PSR);
}
#include "hd64570.c"
static void n2_set_iface(port_t *port)
{
card_t *card = port->card;
int io = card->io;
u8 mcr = inb(io + N2_MCR);
u8 msci = get_msci(port);
u8 rxs = port->rxs & CLK_BRG_MASK;
u8 txs = port->txs & CLK_BRG_MASK;
switch (port->settings.clock_type) {
case CLOCK_INT:
mcr |= port->phy_node ? CLOCK_OUT_PORT1 : CLOCK_OUT_PORT0;
rxs |= CLK_BRG_RX; /* BRG output */
txs |= CLK_RXCLK_TX; /* RX clock */
break;
case CLOCK_TXINT:
mcr |= port->phy_node ? CLOCK_OUT_PORT1 : CLOCK_OUT_PORT0;
rxs |= CLK_LINE_RX; /* RXC input */
txs |= CLK_BRG_TX; /* BRG output */
break;
case CLOCK_TXFROMRX:
mcr |= port->phy_node ? CLOCK_OUT_PORT1 : CLOCK_OUT_PORT0;
rxs |= CLK_LINE_RX; /* RXC input */
txs |= CLK_RXCLK_TX; /* RX clock */
break;
default: /* Clock EXTernal */
mcr &= port->phy_node ? ~CLOCK_OUT_PORT1 : ~CLOCK_OUT_PORT0;
rxs |= CLK_LINE_RX; /* RXC input */
txs |= CLK_LINE_TX; /* TXC input */
}
outb(mcr, io + N2_MCR);
port->rxs = rxs;
port->txs = txs;
sca_out(rxs, msci + RXS, card);
sca_out(txs, msci + TXS, card);
sca_set_port(port);
}
static int n2_open(struct net_device *dev)
{
port_t *port = dev_to_port(dev);
int io = port->card->io;
u8 mcr = inb(io + N2_MCR) |
(port->phy_node ? TX422_PORT1 : TX422_PORT0);
int result;
result = hdlc_open(dev);
if (result)
return result;
mcr &= port->phy_node ? ~DTR_PORT1 : ~DTR_PORT0; /* set DTR ON */
outb(mcr, io + N2_MCR);
outb(inb(io + N2_PCR) | PCR_ENWIN, io + N2_PCR); /* open window */
outb(inb(io + N2_PSR) | PSR_DMAEN, io + N2_PSR); /* enable dma */
sca_open(dev);
n2_set_iface(port);
return 0;
}
static int n2_close(struct net_device *dev)
{
port_t *port = dev_to_port(dev);
int io = port->card->io;
u8 mcr = inb(io + N2_MCR) |
(port->phy_node ? TX422_PORT1 : TX422_PORT0);
sca_close(dev);
mcr |= port->phy_node ? DTR_PORT1 : DTR_PORT0; /* set DTR OFF */
outb(mcr, io + N2_MCR);
hdlc_close(dev);
return 0;
}
static int n2_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
void __user *data, int cmd)
{
#ifdef DEBUG_RINGS
if (cmd == SIOCDEVPRIVATE) {
sca_dump_rings(dev);
return 0;
}
#endif
return -EOPNOTSUPP;
}
static int n2_ioctl(struct net_device *dev, struct if_settings *ifs)
{
const size_t size = sizeof(sync_serial_settings);
sync_serial_settings new_line;
sync_serial_settings __user *line = ifs->ifs_ifsu.sync;
port_t *port = dev_to_port(dev);
switch (ifs->type) {
case IF_GET_IFACE:
ifs->type = IF_IFACE_SYNC_SERIAL;
if (ifs->size < size) {
ifs->size = size; /* data size wanted */
return -ENOBUFS;
}
if (copy_to_user(line, &port->settings, size))
return -EFAULT;
return 0;
case IF_IFACE_SYNC_SERIAL:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (copy_from_user(&new_line, line, size))
return -EFAULT;
if (new_line.clock_type != CLOCK_EXT &&
new_line.clock_type != CLOCK_TXFROMRX &&
new_line.clock_type != CLOCK_INT &&
new_line.clock_type != CLOCK_TXINT)
return -EINVAL; /* No such clock setting */
if (new_line.loopback != 0 && new_line.loopback != 1)
return -EINVAL;
memcpy(&port->settings, &new_line, size); /* Update settings */
n2_set_iface(port);
return 0;
default:
return hdlc_ioctl(dev, ifs);
}
}
static void n2_destroy_card(card_t *card)
{
int cnt;
for (cnt = 0; cnt < 2; cnt++)
if (card->ports[cnt].card) {
struct net_device *dev = port_to_dev(&card->ports[cnt]);
unregister_hdlc_device(dev);
}
if (card->irq)
free_irq(card->irq, card);
if (card->winbase) {
iounmap(card->winbase);
release_mem_region(card->phy_winbase, USE_WINDOWSIZE);
}
if (card->io)
release_region(card->io, N2_IOPORTS);
if (card->ports[0].dev)
free_netdev(card->ports[0].dev);
if (card->ports[1].dev)
free_netdev(card->ports[1].dev);
kfree(card);
}
static const struct net_device_ops n2_ops = {
.ndo_open = n2_open,
.ndo_stop = n2_close,
.ndo_start_xmit = hdlc_start_xmit,
.ndo_siocwandev = n2_ioctl,
.ndo_siocdevprivate = n2_siocdevprivate,
};
static int __init n2_run(unsigned long io, unsigned long irq,
unsigned long winbase, long valid0, long valid1)
{
card_t *card;
u8 cnt, pcr;
int i;
if (io < 0x200 || io > 0x3FF || (io % N2_IOPORTS) != 0) {
pr_err("invalid I/O port value\n");
return -ENODEV;
}
if (irq < 3 || irq > 15 || irq == 6) /* FIXME */ {
pr_err("invalid IRQ value\n");
return -ENODEV;
}
if (winbase < 0xA0000 || winbase > 0xFFFFF || (winbase & 0xFFF) != 0) {
pr_err("invalid RAM value\n");
return -ENODEV;
}
card = kzalloc(sizeof(card_t), GFP_KERNEL);
if (!card)
return -ENOBUFS;
card->ports[0].dev = alloc_hdlcdev(&card->ports[0]);
card->ports[1].dev = alloc_hdlcdev(&card->ports[1]);
if (!card->ports[0].dev || !card->ports[1].dev) {
pr_err("unable to allocate memory\n");
n2_destroy_card(card);
return -ENOMEM;
}
if (!request_region(io, N2_IOPORTS, devname)) {
pr_err("I/O port region in use\n");
n2_destroy_card(card);
return -EBUSY;
}
card->io = io;
if (request_irq(irq, sca_intr, 0, devname, card)) {
pr_err("could not allocate IRQ\n");
n2_destroy_card(card);
return -EBUSY;
}
card->irq = irq;
if (!request_mem_region(winbase, USE_WINDOWSIZE, devname)) {
pr_err("could not request RAM window\n");
n2_destroy_card(card);
return -EBUSY;
}
card->phy_winbase = winbase;
card->winbase = ioremap(winbase, USE_WINDOWSIZE);
if (!card->winbase) {
pr_err("ioremap() failed\n");
n2_destroy_card(card);
return -EFAULT;
}
outb(0, io + N2_PCR);
outb(winbase >> 12, io + N2_BAR);
switch (USE_WINDOWSIZE) {
case 16384:
outb(WIN16K, io + N2_PSR);
break;
case 32768:
outb(WIN32K, io + N2_PSR);
break;
case 65536:
outb(WIN64K, io + N2_PSR);
break;
default:
pr_err("invalid window size\n");
n2_destroy_card(card);
return -ENODEV;
}
pcr = PCR_ENWIN | PCR_VPM | (USE_BUS16BITS ? PCR_BUS16 : 0);
outb(pcr, io + N2_PCR);
card->ram_size = sca_detect_ram(card, card->winbase, MAX_RAM_SIZE);
/* number of TX + RX buffers for one port */
i = card->ram_size / ((valid0 + valid1) * (sizeof(pkt_desc) +
HDLC_MAX_MRU));
card->tx_ring_buffers = min(i / 2, MAX_TX_BUFFERS);
card->rx_ring_buffers = i - card->tx_ring_buffers;
card->buff_offset = (valid0 + valid1) * sizeof(pkt_desc) *
(card->tx_ring_buffers + card->rx_ring_buffers);
pr_info("RISCom/N2 %u KB RAM, IRQ%u, using %u TX + %u RX packets rings\n",
card->ram_size / 1024, card->irq,
card->tx_ring_buffers, card->rx_ring_buffers);
if (card->tx_ring_buffers < 1) {
pr_err("RAM test failed\n");
n2_destroy_card(card);
return -EIO;
}
pcr |= PCR_RUNSCA; /* run SCA */
outb(pcr, io + N2_PCR);
outb(0, io + N2_MCR);
sca_init(card, 0);
for (cnt = 0; cnt < 2; cnt++) {
port_t *port = &card->ports[cnt];
struct net_device *dev = port_to_dev(port);
hdlc_device *hdlc = dev_to_hdlc(dev);
if ((cnt == 0 && !valid0) || (cnt == 1 && !valid1))
continue;
port->phy_node = cnt;
port->valid = 1;
if ((cnt == 1) && valid0)
port->log_node = 1;
spin_lock_init(&port->lock);
dev->irq = irq;
dev->mem_start = winbase;
dev->mem_end = winbase + USE_WINDOWSIZE - 1;
dev->tx_queue_len = 50;
dev->netdev_ops = &n2_ops;
hdlc->attach = sca_attach;
hdlc->xmit = sca_xmit;
port->settings.clock_type = CLOCK_EXT;
port->card = card;
if (register_hdlc_device(dev)) {
pr_warn("unable to register hdlc device\n");
port->card = NULL;
n2_destroy_card(card);
return -ENOBUFS;
}
sca_init_port(port); /* Set up SCA memory */
netdev_info(dev, "RISCom/N2 node %d\n", port->phy_node);
}
*new_card = card;
new_card = &card->next_card;
return 0;
}
static int __init n2_init(void)
{
if (!hw) {
#ifdef MODULE
pr_info("no card initialized\n");
#endif
return -EINVAL; /* no parameters specified, abort */
}
pr_info("%s\n", version);
do {
unsigned long io, irq, ram;
long valid[2] = { 0, 0 }; /* Default = both ports disabled */
io = simple_strtoul(hw, &hw, 0);
if (*hw++ != ',')
break;
irq = simple_strtoul(hw, &hw, 0);
if (*hw++ != ',')
break;
ram = simple_strtoul(hw, &hw, 0);
if (*hw++ != ',')
break;
while (1) {
if (*hw == '0' && !valid[0])
valid[0] = 1; /* Port 0 enabled */
else if (*hw == '1' && !valid[1])
valid[1] = 1; /* Port 1 enabled */
else
break;
hw++;
}
if (!valid[0] && !valid[1])
break; /* at least one port must be used */
if (*hw == ':' || *hw == '\x0')
n2_run(io, irq, ram, valid[0], valid[1]);
if (*hw == '\x0')
return first_card ? 0 : -EINVAL;
} while (*hw++ == ':');
pr_err("invalid hardware parameters\n");
return first_card ? 0 : -EINVAL;
}
static void __exit n2_cleanup(void)
{
card_t *card = first_card;
while (card) {
card_t *ptr = card;
card = card->next_card;
n2_destroy_card(ptr);
}
}
module_init(n2_init);
module_exit(n2_cleanup);
MODULE_AUTHOR("Krzysztof Halasa <[email protected]>");
MODULE_DESCRIPTION("RISCom/N2 serial port driver");
MODULE_LICENSE("GPL v2");
module_param(hw, charp, 0444);
MODULE_PARM_DESC(hw, "io,irq,ram,ports:io,irq,...");
|
linux-master
|
drivers/net/wan/n2.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Cyclades PC300 synchronous serial card driver for Linux
*
* Copyright (C) 2000-2008 Krzysztof Halasa <[email protected]>
*
* For information see <https://www.kernel.org/pub/linux/utils/net/hdlc/>.
*
* Sources of information:
* Hitachi HD64572 SCA-II User's Manual
* Original Cyclades PC300 Linux driver
*
* This driver currently supports only PC300/RSV (V.24/V.35) and
* PC300/X21 cards.
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/types.h>
#include <linux/fcntl.h>
#include <linux/in.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/ioport.h>
#include <linux/moduleparam.h>
#include <linux/netdevice.h>
#include <linux/hdlc.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <asm/io.h>
#include "hd64572.h"
#undef DEBUG_PKT
#define DEBUG_RINGS
#define PC300_PLX_SIZE 0x80 /* PLX control window size (128 B) */
#define PC300_SCA_SIZE 0x400 /* SCA window size (1 KB) */
#define MAX_TX_BUFFERS 10
static int pci_clock_freq = 33000000;
static int use_crystal_clock;
static unsigned int CLOCK_BASE;
/* Masks to access the init_ctrl PLX register */
#define PC300_CLKSEL_MASK (0x00000004UL)
#define PC300_CHMEDIA_MASK(port) (0x00000020UL << ((port) * 3))
#define PC300_CTYPE_MASK (0x00000800UL)
enum { PC300_RSV = 1, PC300_X21, PC300_TE }; /* card types */
/* PLX PCI9050-1 local configuration and shared runtime registers.
* This structure can be used to access 9050 registers (memory mapped).
*/
typedef struct {
u32 loc_addr_range[4]; /* 00-0Ch : Local Address Ranges */
u32 loc_rom_range; /* 10h : Local ROM Range */
u32 loc_addr_base[4]; /* 14-20h : Local Address Base Addrs */
u32 loc_rom_base; /* 24h : Local ROM Base */
u32 loc_bus_descr[4]; /* 28-34h : Local Bus Descriptors */
u32 rom_bus_descr; /* 38h : ROM Bus Descriptor */
u32 cs_base[4]; /* 3C-48h : Chip Select Base Addrs */
u32 intr_ctrl_stat; /* 4Ch : Interrupt Control/Status */
u32 init_ctrl; /* 50h : EEPROM ctrl, Init Ctrl, etc */
} plx9050;
typedef struct port_s {
struct napi_struct napi;
struct net_device *netdev;
struct card_s *card;
spinlock_t lock; /* TX lock */
sync_serial_settings settings;
int rxpart; /* partial frame received, next frame invalid*/
unsigned short encoding;
unsigned short parity;
unsigned int iface;
u16 rxin; /* rx ring buffer 'in' pointer */
u16 txin; /* tx ring buffer 'in' and 'last' pointers */
u16 txlast;
u8 rxs, txs, tmc; /* SCA registers */
u8 chan; /* physical port # - 0 or 1 */
} port_t;
typedef struct card_s {
int type; /* RSV, X21, etc. */
int n_ports; /* 1 or 2 ports */
u8 __iomem *rambase; /* buffer memory base (virtual) */
u8 __iomem *scabase; /* SCA memory base (virtual) */
plx9050 __iomem *plxbase; /* PLX registers memory base (virtual) */
u32 init_ctrl_value; /* Saved value - 9050 bug workaround */
u16 rx_ring_buffers; /* number of buffers in a ring */
u16 tx_ring_buffers;
u16 buff_offset; /* offset of first buffer of first channel */
u8 irq; /* interrupt request level */
port_t ports[2];
} card_t;
#define get_port(card, port) ((port) < (card)->n_ports ? \
(&(card)->ports[port]) : (NULL))
#include "hd64572.c"
static void pc300_set_iface(port_t *port)
{
card_t *card = port->card;
u32 __iomem *init_ctrl = &card->plxbase->init_ctrl;
u16 msci = get_msci(port);
u8 rxs = port->rxs & CLK_BRG_MASK;
u8 txs = port->txs & CLK_BRG_MASK;
sca_out(EXS_TES1, (port->chan ? MSCI1_OFFSET : MSCI0_OFFSET) + EXS,
port->card);
switch (port->settings.clock_type) {
case CLOCK_INT:
rxs |= CLK_BRG; /* BRG output */
txs |= CLK_PIN_OUT | CLK_TX_RXCLK; /* RX clock */
break;
case CLOCK_TXINT:
rxs |= CLK_LINE; /* RXC input */
txs |= CLK_PIN_OUT | CLK_BRG; /* BRG output */
break;
case CLOCK_TXFROMRX:
rxs |= CLK_LINE; /* RXC input */
txs |= CLK_PIN_OUT | CLK_TX_RXCLK; /* RX clock */
break;
default: /* EXTernal clock */
rxs |= CLK_LINE; /* RXC input */
txs |= CLK_PIN_OUT | CLK_LINE; /* TXC input */
break;
}
port->rxs = rxs;
port->txs = txs;
sca_out(rxs, msci + RXS, card);
sca_out(txs, msci + TXS, card);
sca_set_port(port);
if (port->card->type == PC300_RSV) {
if (port->iface == IF_IFACE_V35)
writel(card->init_ctrl_value |
PC300_CHMEDIA_MASK(port->chan), init_ctrl);
else
writel(card->init_ctrl_value &
~PC300_CHMEDIA_MASK(port->chan), init_ctrl);
}
}
static int pc300_open(struct net_device *dev)
{
port_t *port = dev_to_port(dev);
int result = hdlc_open(dev);
if (result)
return result;
sca_open(dev);
pc300_set_iface(port);
return 0;
}
static int pc300_close(struct net_device *dev)
{
sca_close(dev);
hdlc_close(dev);
return 0;
}
static int pc300_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
void __user *data, int cmd)
{
#ifdef DEBUG_RINGS
if (cmd == SIOCDEVPRIVATE) {
sca_dump_rings(dev);
return 0;
}
#endif
return -EOPNOTSUPP;
}
static int pc300_ioctl(struct net_device *dev, struct if_settings *ifs)
{
const size_t size = sizeof(sync_serial_settings);
sync_serial_settings new_line;
sync_serial_settings __user *line = ifs->ifs_ifsu.sync;
int new_type;
port_t *port = dev_to_port(dev);
if (ifs->type == IF_GET_IFACE) {
ifs->type = port->iface;
if (ifs->size < size) {
ifs->size = size; /* data size wanted */
return -ENOBUFS;
}
if (copy_to_user(line, &port->settings, size))
return -EFAULT;
return 0;
}
if (port->card->type == PC300_X21 &&
(ifs->type == IF_IFACE_SYNC_SERIAL ||
ifs->type == IF_IFACE_X21))
new_type = IF_IFACE_X21;
else if (port->card->type == PC300_RSV &&
(ifs->type == IF_IFACE_SYNC_SERIAL ||
ifs->type == IF_IFACE_V35))
new_type = IF_IFACE_V35;
else if (port->card->type == PC300_RSV &&
ifs->type == IF_IFACE_V24)
new_type = IF_IFACE_V24;
else
return hdlc_ioctl(dev, ifs);
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (copy_from_user(&new_line, line, size))
return -EFAULT;
if (new_line.clock_type != CLOCK_EXT &&
new_line.clock_type != CLOCK_TXFROMRX &&
new_line.clock_type != CLOCK_INT &&
new_line.clock_type != CLOCK_TXINT)
return -EINVAL; /* No such clock setting */
if (new_line.loopback != 0 && new_line.loopback != 1)
return -EINVAL;
memcpy(&port->settings, &new_line, size); /* Update settings */
port->iface = new_type;
pc300_set_iface(port);
return 0;
}
static void pc300_pci_remove_one(struct pci_dev *pdev)
{
int i;
card_t *card = pci_get_drvdata(pdev);
for (i = 0; i < 2; i++)
if (card->ports[i].card)
unregister_hdlc_device(card->ports[i].netdev);
if (card->irq)
free_irq(card->irq, card);
if (card->rambase)
iounmap(card->rambase);
if (card->scabase)
iounmap(card->scabase);
if (card->plxbase)
iounmap(card->plxbase);
pci_release_regions(pdev);
pci_disable_device(pdev);
if (card->ports[0].netdev)
free_netdev(card->ports[0].netdev);
if (card->ports[1].netdev)
free_netdev(card->ports[1].netdev);
kfree(card);
}
static const struct net_device_ops pc300_ops = {
.ndo_open = pc300_open,
.ndo_stop = pc300_close,
.ndo_start_xmit = hdlc_start_xmit,
.ndo_siocwandev = pc300_ioctl,
.ndo_siocdevprivate = pc300_siocdevprivate,
};
static int pc300_pci_init_one(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
card_t *card;
u32 __iomem *p;
int i;
u32 ramsize;
u32 ramphys; /* buffer memory base */
u32 scaphys; /* SCA memory base */
u32 plxphys; /* PLX registers memory base */
i = pci_enable_device(pdev);
if (i)
return i;
i = pci_request_regions(pdev, "PC300");
if (i) {
pci_disable_device(pdev);
return i;
}
card = kzalloc(sizeof(card_t), GFP_KERNEL);
if (!card) {
pci_release_regions(pdev);
pci_disable_device(pdev);
return -ENOBUFS;
}
pci_set_drvdata(pdev, card);
if (pci_resource_len(pdev, 0) != PC300_PLX_SIZE ||
pci_resource_len(pdev, 2) != PC300_SCA_SIZE ||
pci_resource_len(pdev, 3) < 16384) {
pr_err("invalid card EEPROM parameters\n");
pc300_pci_remove_one(pdev);
return -EFAULT;
}
plxphys = pci_resource_start(pdev, 0) & PCI_BASE_ADDRESS_MEM_MASK;
card->plxbase = ioremap(plxphys, PC300_PLX_SIZE);
scaphys = pci_resource_start(pdev, 2) & PCI_BASE_ADDRESS_MEM_MASK;
card->scabase = ioremap(scaphys, PC300_SCA_SIZE);
ramphys = pci_resource_start(pdev, 3) & PCI_BASE_ADDRESS_MEM_MASK;
card->rambase = pci_ioremap_bar(pdev, 3);
if (!card->plxbase || !card->scabase || !card->rambase) {
pr_err("ioremap() failed\n");
pc300_pci_remove_one(pdev);
return -ENOMEM;
}
/* PLX PCI 9050 workaround for local configuration register read bug */
pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0, scaphys);
card->init_ctrl_value = readl(&((plx9050 __iomem *)card->scabase)->init_ctrl);
pci_write_config_dword(pdev, PCI_BASE_ADDRESS_0, plxphys);
if (pdev->device == PCI_DEVICE_ID_PC300_TE_1 ||
pdev->device == PCI_DEVICE_ID_PC300_TE_2)
card->type = PC300_TE; /* not fully supported */
else if (card->init_ctrl_value & PC300_CTYPE_MASK)
card->type = PC300_X21;
else
card->type = PC300_RSV;
if (pdev->device == PCI_DEVICE_ID_PC300_RX_1 ||
pdev->device == PCI_DEVICE_ID_PC300_TE_1)
card->n_ports = 1;
else
card->n_ports = 2;
for (i = 0; i < card->n_ports; i++) {
card->ports[i].netdev = alloc_hdlcdev(&card->ports[i]);
if (!card->ports[i].netdev) {
pr_err("unable to allocate memory\n");
pc300_pci_remove_one(pdev);
return -ENOMEM;
}
}
/* Reset PLX */
p = &card->plxbase->init_ctrl;
writel(card->init_ctrl_value | 0x40000000, p);
readl(p); /* Flush the write - do not use sca_flush */
udelay(1);
writel(card->init_ctrl_value, p);
readl(p); /* Flush the write - do not use sca_flush */
udelay(1);
/* Reload Config. Registers from EEPROM */
writel(card->init_ctrl_value | 0x20000000, p);
readl(p); /* Flush the write - do not use sca_flush */
udelay(1);
writel(card->init_ctrl_value, p);
readl(p); /* Flush the write - do not use sca_flush */
udelay(1);
ramsize = sca_detect_ram(card, card->rambase,
pci_resource_len(pdev, 3));
if (use_crystal_clock)
card->init_ctrl_value &= ~PC300_CLKSEL_MASK;
else
card->init_ctrl_value |= PC300_CLKSEL_MASK;
writel(card->init_ctrl_value, &card->plxbase->init_ctrl);
/* number of TX + RX buffers for one port */
i = ramsize / (card->n_ports * (sizeof(pkt_desc) + HDLC_MAX_MRU));
card->tx_ring_buffers = min(i / 2, MAX_TX_BUFFERS);
card->rx_ring_buffers = i - card->tx_ring_buffers;
card->buff_offset = card->n_ports * sizeof(pkt_desc) *
(card->tx_ring_buffers + card->rx_ring_buffers);
pr_info("PC300/%s, %u KB RAM at 0x%x, IRQ%u, using %u TX + %u RX packets rings\n",
card->type == PC300_X21 ? "X21" :
card->type == PC300_TE ? "TE" : "RSV",
ramsize / 1024, ramphys, pdev->irq,
card->tx_ring_buffers, card->rx_ring_buffers);
if (card->tx_ring_buffers < 1) {
pr_err("RAM test failed\n");
pc300_pci_remove_one(pdev);
return -EFAULT;
}
/* Enable interrupts on the PCI bridge, LINTi1 active low */
writew(0x0041, &card->plxbase->intr_ctrl_stat);
/* Allocate IRQ */
if (request_irq(pdev->irq, sca_intr, IRQF_SHARED, "pc300", card)) {
pr_warn("could not allocate IRQ%d\n", pdev->irq);
pc300_pci_remove_one(pdev);
return -EBUSY;
}
card->irq = pdev->irq;
sca_init(card, 0);
// COTE not set - allows better TX DMA settings
// sca_out(sca_in(PCR, card) | PCR_COTE, PCR, card);
sca_out(0x10, BTCR, card);
for (i = 0; i < card->n_ports; i++) {
port_t *port = &card->ports[i];
struct net_device *dev = port->netdev;
hdlc_device *hdlc = dev_to_hdlc(dev);
port->chan = i;
spin_lock_init(&port->lock);
dev->irq = card->irq;
dev->mem_start = ramphys;
dev->mem_end = ramphys + ramsize - 1;
dev->tx_queue_len = 50;
dev->netdev_ops = &pc300_ops;
hdlc->attach = sca_attach;
hdlc->xmit = sca_xmit;
port->settings.clock_type = CLOCK_EXT;
port->card = card;
if (card->type == PC300_X21)
port->iface = IF_IFACE_X21;
else
port->iface = IF_IFACE_V35;
sca_init_port(port);
if (register_hdlc_device(dev)) {
pr_err("unable to register hdlc device\n");
port->card = NULL;
pc300_pci_remove_one(pdev);
return -ENOBUFS;
}
netdev_info(dev, "PC300 channel %d\n", port->chan);
}
return 0;
}
static const struct pci_device_id pc300_pci_tbl[] = {
{ PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_PC300_RX_1, PCI_ANY_ID,
PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_PC300_RX_2, PCI_ANY_ID,
PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_PC300_TE_1, PCI_ANY_ID,
PCI_ANY_ID, 0, 0, 0 },
{ PCI_VENDOR_ID_CYCLADES, PCI_DEVICE_ID_PC300_TE_2, PCI_ANY_ID,
PCI_ANY_ID, 0, 0, 0 },
{ 0, }
};
static struct pci_driver pc300_pci_driver = {
.name = "PC300",
.id_table = pc300_pci_tbl,
.probe = pc300_pci_init_one,
.remove = pc300_pci_remove_one,
};
static int __init pc300_init_module(void)
{
if (pci_clock_freq < 1000000 || pci_clock_freq > 80000000) {
pr_err("Invalid PCI clock frequency\n");
return -EINVAL;
}
if (use_crystal_clock != 0 && use_crystal_clock != 1) {
pr_err("Invalid 'use_crystal_clock' value\n");
return -EINVAL;
}
CLOCK_BASE = use_crystal_clock ? 24576000 : pci_clock_freq;
return pci_register_driver(&pc300_pci_driver);
}
static void __exit pc300_cleanup_module(void)
{
pci_unregister_driver(&pc300_pci_driver);
}
MODULE_AUTHOR("Krzysztof Halasa <[email protected]>");
MODULE_DESCRIPTION("Cyclades PC300 serial port driver");
MODULE_LICENSE("GPL v2");
MODULE_DEVICE_TABLE(pci, pc300_pci_tbl);
module_param(pci_clock_freq, int, 0444);
MODULE_PARM_DESC(pci_clock_freq, "System PCI clock frequency in Hz");
module_param(use_crystal_clock, int, 0444);
MODULE_PARM_DESC(use_crystal_clock,
"Use 24.576 MHz clock instead of PCI clock");
module_init(pc300_init_module);
module_exit(pc300_cleanup_module);
|
linux-master
|
drivers/net/wan/pc300too.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Generic HDLC support routines for Linux
* HDLC Ethernet emulation support
*
* Copyright (C) 2002-2006 Krzysztof Halasa <[email protected]>
*/
#include <linux/errno.h>
#include <linux/etherdevice.h>
#include <linux/gfp.h>
#include <linux/hdlc.h>
#include <linux/if_arp.h>
#include <linux/inetdevice.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/pkt_sched.h>
#include <linux/poll.h>
#include <linux/rtnetlink.h>
#include <linux/skbuff.h>
static int raw_eth_ioctl(struct net_device *dev, struct if_settings *ifs);
static netdev_tx_t eth_tx(struct sk_buff *skb, struct net_device *dev)
{
int pad = ETH_ZLEN - skb->len;
if (pad > 0) { /* Pad the frame with zeros */
int len = skb->len;
if (skb_tailroom(skb) < pad)
if (pskb_expand_head(skb, 0, pad, GFP_ATOMIC)) {
dev->stats.tx_dropped++;
dev_kfree_skb(skb);
return 0;
}
skb_put(skb, pad);
memset(skb->data + len, 0, pad);
}
return dev_to_hdlc(dev)->xmit(skb, dev);
}
static struct hdlc_proto proto = {
.type_trans = eth_type_trans,
.xmit = eth_tx,
.ioctl = raw_eth_ioctl,
.module = THIS_MODULE,
};
static int raw_eth_ioctl(struct net_device *dev, struct if_settings *ifs)
{
raw_hdlc_proto __user *raw_s = ifs->ifs_ifsu.raw_hdlc;
const size_t size = sizeof(raw_hdlc_proto);
raw_hdlc_proto new_settings;
hdlc_device *hdlc = dev_to_hdlc(dev);
unsigned int old_qlen;
int result;
switch (ifs->type) {
case IF_GET_PROTO:
if (dev_to_hdlc(dev)->proto != &proto)
return -EINVAL;
ifs->type = IF_PROTO_HDLC_ETH;
if (ifs->size < size) {
ifs->size = size; /* data size wanted */
return -ENOBUFS;
}
if (copy_to_user(raw_s, hdlc->state, size))
return -EFAULT;
return 0;
case IF_PROTO_HDLC_ETH:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (dev->flags & IFF_UP)
return -EBUSY;
if (copy_from_user(&new_settings, raw_s, size))
return -EFAULT;
if (new_settings.encoding == ENCODING_DEFAULT)
new_settings.encoding = ENCODING_NRZ;
if (new_settings.parity == PARITY_DEFAULT)
new_settings.parity = PARITY_CRC16_PR1_CCITT;
result = hdlc->attach(dev, new_settings.encoding,
new_settings.parity);
if (result)
return result;
result = attach_hdlc_protocol(dev, &proto,
sizeof(raw_hdlc_proto));
if (result)
return result;
memcpy(hdlc->state, &new_settings, size);
old_qlen = dev->tx_queue_len;
ether_setup(dev);
dev->tx_queue_len = old_qlen;
dev->priv_flags &= ~IFF_TX_SKB_SHARING;
eth_hw_addr_random(dev);
call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
netif_dormant_off(dev);
return 0;
}
return -EINVAL;
}
static int __init hdlc_eth_init(void)
{
register_hdlc_protocol(&proto);
return 0;
}
static void __exit hdlc_eth_exit(void)
{
unregister_hdlc_protocol(&proto);
}
module_init(hdlc_eth_init);
module_exit(hdlc_eth_exit);
MODULE_AUTHOR("Krzysztof Halasa <[email protected]>");
MODULE_DESCRIPTION("Ethernet encapsulation support for generic HDLC");
MODULE_LICENSE("GPL v2");
|
linux-master
|
drivers/net/wan/hdlc_raw_eth.c
|
// SPDX-License-Identifier: GPL-2.0-only
/*
* Moxa C101 synchronous serial card driver for Linux
*
* Copyright (C) 2000-2003 Krzysztof Halasa <[email protected]>
*
* For information see <https://www.kernel.org/pub/linux/utils/net/hdlc/>
*
* Sources of information:
* Hitachi HD64570 SCA User's Manual
* Moxa C101 User's Manual
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/capability.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/hdlc.h>
#include <linux/delay.h>
#include <asm/io.h>
#include "hd64570.h"
static const char *version = "Moxa C101 driver version: 1.15";
static const char *devname = "C101";
#undef DEBUG_PKT
#define DEBUG_RINGS
#define C101_PAGE 0x1D00
#define C101_DTR 0x1E00
#define C101_SCA 0x1F00
#define C101_WINDOW_SIZE 0x2000
#define C101_MAPPED_RAM_SIZE 0x4000
#define RAM_SIZE (256 * 1024)
#define TX_RING_BUFFERS 10
#define RX_RING_BUFFERS ((RAM_SIZE - C101_WINDOW_SIZE) / \
(sizeof(pkt_desc) + HDLC_MAX_MRU) - TX_RING_BUFFERS)
#define CLOCK_BASE 9830400 /* 9.8304 MHz */
#define PAGE0_ALWAYS_MAPPED
static char *hw; /* pointer to hw=xxx command line string */
typedef struct card_s {
struct net_device *dev;
spinlock_t lock; /* TX lock */
u8 __iomem *win0base; /* ISA window base address */
u32 phy_winbase; /* ISA physical base address */
sync_serial_settings settings;
int rxpart; /* partial frame received, next frame invalid*/
unsigned short encoding;
unsigned short parity;
u16 rx_ring_buffers; /* number of buffers in a ring */
u16 tx_ring_buffers;
u16 buff_offset; /* offset of first buffer of first channel */
u16 rxin; /* rx ring buffer 'in' pointer */
u16 txin; /* tx ring buffer 'in' and 'last' pointers */
u16 txlast;
u8 rxs, txs, tmc; /* SCA registers */
u8 irq; /* IRQ (3-15) */
u8 page;
struct card_s *next_card;
} card_t;
typedef card_t port_t;
static card_t *first_card;
static card_t **new_card = &first_card;
#define sca_in(reg, card) readb((card)->win0base + C101_SCA + (reg))
#define sca_out(value, reg, card) writeb(value, (card)->win0base + C101_SCA + (reg))
#define sca_inw(reg, card) readw((card)->win0base + C101_SCA + (reg))
/* EDA address register must be set in EDAL, EDAH order - 8 bit ISA bus */
#define sca_outw(value, reg, card) do { \
writeb(value & 0xFF, (card)->win0base + C101_SCA + (reg)); \
writeb((value >> 8) & 0xFF, (card)->win0base + C101_SCA + (reg + 1));\
} while (0)
#define port_to_card(port) (port)
#define log_node(port) (0)
#define phy_node(port) (0)
#define winsize(card) (C101_WINDOW_SIZE)
#define win0base(card) ((card)->win0base)
#define winbase(card) ((card)->win0base + 0x2000)
#define get_port(card, port) (card)
static void sca_msci_intr(port_t *port);
static inline u8 sca_get_page(card_t *card)
{
return card->page;
}
static inline void openwin(card_t *card, u8 page)
{
card->page = page;
writeb(page, card->win0base + C101_PAGE);
}
#include "hd64570.c"
static inline void set_carrier(port_t *port)
{
if (!(sca_in(MSCI1_OFFSET + ST3, port) & ST3_DCD))
netif_carrier_on(port_to_dev(port));
else
netif_carrier_off(port_to_dev(port));
}
static void sca_msci_intr(port_t *port)
{
u8 stat = sca_in(MSCI0_OFFSET + ST1, port); /* read MSCI ST1 status */
/* Reset MSCI TX underrun and CDCD (ignored) status bit */
sca_out(stat & (ST1_UDRN | ST1_CDCD), MSCI0_OFFSET + ST1, port);
if (stat & ST1_UDRN) {
/* TX Underrun error detected */
port_to_dev(port)->stats.tx_errors++;
port_to_dev(port)->stats.tx_fifo_errors++;
}
stat = sca_in(MSCI1_OFFSET + ST1, port); /* read MSCI1 ST1 status */
/* Reset MSCI CDCD status bit - uses ch#2 DCD input */
sca_out(stat & ST1_CDCD, MSCI1_OFFSET + ST1, port);
if (stat & ST1_CDCD)
set_carrier(port);
}
static void c101_set_iface(port_t *port)
{
u8 rxs = port->rxs & CLK_BRG_MASK;
u8 txs = port->txs & CLK_BRG_MASK;
switch (port->settings.clock_type) {
case CLOCK_INT:
rxs |= CLK_BRG_RX; /* TX clock */
txs |= CLK_RXCLK_TX; /* BRG output */
break;
case CLOCK_TXINT:
rxs |= CLK_LINE_RX; /* RXC input */
txs |= CLK_BRG_TX; /* BRG output */
break;
case CLOCK_TXFROMRX:
rxs |= CLK_LINE_RX; /* RXC input */
txs |= CLK_RXCLK_TX; /* RX clock */
break;
default: /* EXTernal clock */
rxs |= CLK_LINE_RX; /* RXC input */
txs |= CLK_LINE_TX; /* TXC input */
}
port->rxs = rxs;
port->txs = txs;
sca_out(rxs, MSCI1_OFFSET + RXS, port);
sca_out(txs, MSCI1_OFFSET + TXS, port);
sca_set_port(port);
}
static int c101_open(struct net_device *dev)
{
port_t *port = dev_to_port(dev);
int result;
result = hdlc_open(dev);
if (result)
return result;
writeb(1, port->win0base + C101_DTR);
sca_out(0, MSCI1_OFFSET + CTL, port); /* RTS uses ch#2 output */
sca_open(dev);
/* DCD is connected to port 2 !@#$%^& - disable MSCI0 CDCD interrupt */
sca_out(IE1_UDRN, MSCI0_OFFSET + IE1, port);
sca_out(IE0_TXINT, MSCI0_OFFSET + IE0, port);
set_carrier(port);
/* enable MSCI1 CDCD interrupt */
sca_out(IE1_CDCD, MSCI1_OFFSET + IE1, port);
sca_out(IE0_RXINTA, MSCI1_OFFSET + IE0, port);
sca_out(0x48, IER0, port); /* TXINT #0 and RXINT #1 */
c101_set_iface(port);
return 0;
}
static int c101_close(struct net_device *dev)
{
port_t *port = dev_to_port(dev);
sca_close(dev);
writeb(0, port->win0base + C101_DTR);
sca_out(CTL_NORTS, MSCI1_OFFSET + CTL, port);
hdlc_close(dev);
return 0;
}
static int c101_siocdevprivate(struct net_device *dev, struct ifreq *ifr,
void __user *data, int cmd)
{
#ifdef DEBUG_RINGS
port_t *port = dev_to_port(dev);
if (cmd == SIOCDEVPRIVATE) {
sca_dump_rings(dev);
printk(KERN_DEBUG "MSCI1: ST: %02x %02x %02x %02x\n",
sca_in(MSCI1_OFFSET + ST0, port),
sca_in(MSCI1_OFFSET + ST1, port),
sca_in(MSCI1_OFFSET + ST2, port),
sca_in(MSCI1_OFFSET + ST3, port));
return 0;
}
#endif
return -EOPNOTSUPP;
}
static int c101_ioctl(struct net_device *dev, struct if_settings *ifs)
{
const size_t size = sizeof(sync_serial_settings);
sync_serial_settings new_line;
sync_serial_settings __user *line = ifs->ifs_ifsu.sync;
port_t *port = dev_to_port(dev);
switch (ifs->type) {
case IF_GET_IFACE:
ifs->type = IF_IFACE_SYNC_SERIAL;
if (ifs->size < size) {
ifs->size = size; /* data size wanted */
return -ENOBUFS;
}
if (copy_to_user(line, &port->settings, size))
return -EFAULT;
return 0;
case IF_IFACE_SYNC_SERIAL:
if (!capable(CAP_NET_ADMIN))
return -EPERM;
if (copy_from_user(&new_line, line, size))
return -EFAULT;
if (new_line.clock_type != CLOCK_EXT &&
new_line.clock_type != CLOCK_TXFROMRX &&
new_line.clock_type != CLOCK_INT &&
new_line.clock_type != CLOCK_TXINT)
return -EINVAL; /* No such clock setting */
if (new_line.loopback != 0 && new_line.loopback != 1)
return -EINVAL;
memcpy(&port->settings, &new_line, size); /* Update settings */
c101_set_iface(port);
return 0;
default:
return hdlc_ioctl(dev, ifs);
}
}
static void c101_destroy_card(card_t *card)
{
readb(card->win0base + C101_PAGE); /* Resets SCA? */
if (card->irq)
free_irq(card->irq, card);
if (card->win0base) {
iounmap(card->win0base);
release_mem_region(card->phy_winbase, C101_MAPPED_RAM_SIZE);
}
free_netdev(card->dev);
kfree(card);
}
static const struct net_device_ops c101_ops = {
.ndo_open = c101_open,
.ndo_stop = c101_close,
.ndo_start_xmit = hdlc_start_xmit,
.ndo_siocwandev = c101_ioctl,
.ndo_siocdevprivate = c101_siocdevprivate,
};
static int __init c101_run(unsigned long irq, unsigned long winbase)
{
struct net_device *dev;
hdlc_device *hdlc;
card_t *card;
int result;
if (irq < 3 || irq > 15 || irq == 6) /* FIXME */ {
pr_err("invalid IRQ value\n");
return -ENODEV;
}
if (winbase < 0xC0000 || winbase > 0xDFFFF || (winbase & 0x3FFF) != 0) {
pr_err("invalid RAM value\n");
return -ENODEV;
}
card = kzalloc(sizeof(card_t), GFP_KERNEL);
if (!card)
return -ENOBUFS;
card->dev = alloc_hdlcdev(card);
if (!card->dev) {
pr_err("unable to allocate memory\n");
kfree(card);
return -ENOBUFS;
}
if (request_irq(irq, sca_intr, 0, devname, card)) {
pr_err("could not allocate IRQ\n");
c101_destroy_card(card);
return -EBUSY;
}
card->irq = irq;
if (!request_mem_region(winbase, C101_MAPPED_RAM_SIZE, devname)) {
pr_err("could not request RAM window\n");
c101_destroy_card(card);
return -EBUSY;
}
card->phy_winbase = winbase;
card->win0base = ioremap(winbase, C101_MAPPED_RAM_SIZE);
if (!card->win0base) {
pr_err("could not map I/O address\n");
c101_destroy_card(card);
return -EFAULT;
}
card->tx_ring_buffers = TX_RING_BUFFERS;
card->rx_ring_buffers = RX_RING_BUFFERS;
card->buff_offset = C101_WINDOW_SIZE; /* Bytes 1D00-1FFF reserved */
readb(card->win0base + C101_PAGE); /* Resets SCA? */
udelay(100);
writeb(0, card->win0base + C101_PAGE);
writeb(0, card->win0base + C101_DTR); /* Power-up for RAM? */
sca_init(card, 0);
dev = port_to_dev(card);
hdlc = dev_to_hdlc(dev);
spin_lock_init(&card->lock);
dev->irq = irq;
dev->mem_start = winbase;
dev->mem_end = winbase + C101_MAPPED_RAM_SIZE - 1;
dev->tx_queue_len = 50;
dev->netdev_ops = &c101_ops;
hdlc->attach = sca_attach;
hdlc->xmit = sca_xmit;
card->settings.clock_type = CLOCK_EXT;
result = register_hdlc_device(dev);
if (result) {
pr_warn("unable to register hdlc device\n");
c101_destroy_card(card);
return result;
}
sca_init_port(card); /* Set up C101 memory */
set_carrier(card);
netdev_info(dev, "Moxa C101 on IRQ%u, using %u TX + %u RX packets rings\n",
card->irq, card->tx_ring_buffers, card->rx_ring_buffers);
*new_card = card;
new_card = &card->next_card;
return 0;
}
static int __init c101_init(void)
{
if (!hw) {
#ifdef MODULE
pr_info("no card initialized\n");
#endif
return -EINVAL; /* no parameters specified, abort */
}
pr_info("%s\n", version);
do {
unsigned long irq, ram;
irq = simple_strtoul(hw, &hw, 0);
if (*hw++ != ',')
break;
ram = simple_strtoul(hw, &hw, 0);
if (*hw == ':' || *hw == '\x0')
c101_run(irq, ram);
if (*hw == '\x0')
return first_card ? 0 : -EINVAL;
} while (*hw++ == ':');
pr_err("invalid hardware parameters\n");
return first_card ? 0 : -EINVAL;
}
static void __exit c101_cleanup(void)
{
card_t *card = first_card;
while (card) {
card_t *ptr = card;
card = card->next_card;
unregister_hdlc_device(port_to_dev(ptr));
c101_destroy_card(ptr);
}
}
module_init(c101_init);
module_exit(c101_cleanup);
MODULE_AUTHOR("Krzysztof Halasa <[email protected]>");
MODULE_DESCRIPTION("Moxa C101 serial port driver");
MODULE_LICENSE("GPL v2");
module_param(hw, charp, 0444);
MODULE_PARM_DESC(hw, "irq,ram:irq,...");
|
linux-master
|
drivers/net/wan/c101.c
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.